test/tests/matmul_clamp_f16_qsi8d32p_qai4c32p_test.cpp
| Line | Branch | Exec | Source |
|---|---|---|---|
| 1 | // | ||
| 2 | // SPDX-FileCopyrightText: Copyright 2025 Arm Limited and/or its affiliates <open-source-office@arm.com> | ||
| 3 | // | ||
| 4 | // SPDX-License-Identifier: Apache-2.0 | ||
| 5 | // | ||
| 6 | |||
| 7 | #include <gtest/gtest.h> | ||
| 8 | |||
| 9 | #include <array> | ||
| 10 | #include <cstddef> | ||
| 11 | #include <cstdint> | ||
| 12 | #include <cstdlib> | ||
| 13 | #include <sstream> | ||
| 14 | #include <string> | ||
| 15 | #include <tuple> | ||
| 16 | |||
| 17 | #include "kai/ukernels/matmul/matmul_clamp_f16_qsi8d32p_qai4c32p/kai_matmul_clamp_f16_qsi8d32p1vlx4_qai4c32p4vlx4_1vlx4vl_sme2_mopa.h" | ||
| 18 | #include "kai/ukernels/matmul/matmul_clamp_f16_qsi8d32p_qai4c32p/kai_matmul_clamp_f16_qsi8d32p1x4_qai4c32p4vlx4_1x4vl_sme2_dot.h" | ||
| 19 | #include "kai/ukernels/matmul/matmul_clamp_f16_qsi8d32p_qai4c32p/kai_matmul_clamp_f16_qsi8d32p1x4_qai4c32p4x4_1x4_neon_dotprod.h" | ||
| 20 | #include "kai/ukernels/matmul/matmul_clamp_f16_qsi8d32p_qai4c32p/kai_matmul_clamp_f16_qsi8d32p1x8_qai4c32p4x8_1x4_neon_dotprod.h" | ||
| 21 | #include "kai/ukernels/matmul/matmul_clamp_f16_qsi8d32p_qai4c32p/kai_matmul_clamp_f16_qsi8d32p4x4_qai4c32p4x4_8x4_neon_dotprod.h" | ||
| 22 | #include "kai/ukernels/matmul/matmul_clamp_f16_qsi8d32p_qai4c32p/kai_matmul_clamp_f16_qsi8d32p4x8_qai4c32p4x8_8x4_neon_i8mm.h" | ||
| 23 | #include "kai/ukernels/matmul/matmul_clamp_f16_qsi8d32p_qai4c32p/kai_matmul_clamp_f16_qsi8d32p_qai4c32p_interface.h" | ||
| 24 | #include "kai/ukernels/matmul/pack/kai_lhs_quant_pack_qsi8d32pscalef32_f16_neon.h" | ||
| 25 | #include "kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qai4c32p_qau4c32s0s1_f32_f32_f32_neon.h" | ||
| 26 | #include "kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qai4c32ps1s0nrx4_qau4c32s0s1_f32_f32_f32_neon.h" | ||
| 27 | #include "kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qai4c32ps1s0nrx4_qau4c32s1s0_f32_f32_f32_neon.h" | ||
| 28 | #include "test/common/abi_checker.hpp" | ||
| 29 | #include "test/common/buffer.hpp" | ||
| 30 | #include "test/common/compare.hpp" | ||
| 31 | #include "test/common/cpu_info.hpp" | ||
| 32 | #include "test/common/data_format.hpp" | ||
| 33 | #include "test/common/float16.hpp" | ||
| 34 | #include "test/common/int4.hpp" | ||
| 35 | #include "test/common/matmul_test_common.hpp" | ||
| 36 | #include "test/common/matrix_portion.hpp" | ||
| 37 | #include "test/common/memory.hpp" | ||
| 38 | #include "test/common/round.hpp" | ||
| 39 | #include "test/common/seed.hpp" | ||
| 40 | #include "test/common/test_suite.hpp" | ||
| 41 | #include "test/reference/cast.hpp" | ||
| 42 | #include "test/reference/clamp.hpp" | ||
| 43 | #include "test/reference/fill.hpp" | ||
| 44 | #include "test/reference/matmul.hpp" | ||
| 45 | #include "test/reference/pack.hpp" | ||
| 46 | #include "test/reference/quantize.hpp" | ||
| 47 | |||
| 48 | namespace kai::test { | ||
| 49 | |||
| 50 | // Interface for the LHS and RHS packed size and packing micro-kernels | ||
| 51 | using kai_get_lhs_packed_size_func_t = decltype(&kai_get_lhs_packed_size_lhs_quant_pack_qsi8d32pscalef32_f16_neon); | ||
| 52 | using kai_get_rhs_packed_size_func_t = | ||
| 53 | decltype(&kai_get_rhs_packed_size_rhs_pack_nxk_qai4c32p_qau4c32s0s1_f32_f32_f32_neon); | ||
| 54 | using kai_get_lhs_packed_offset_func_t = decltype(&kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32pscalef32_f16_neon); | ||
| 55 | using kai_get_rhs_packed_offset_func_t = | ||
| 56 | decltype(&kai_get_rhs_packed_offset_rhs_pack_nxk_qai4c32p_qau4c32s0s1_f32_f32_f32_neon); | ||
| 57 | using kai_get_lhs_offset_func_t = decltype(&kai_get_lhs_offset_lhs_quant_pack_qsi8d32pscalef32_f16_neon); | ||
| 58 | using kai_get_rhs_offset_func_t = decltype(&kai_get_rhs_offset_rhs_pack_nxk_qai4c32p_qau4c32s0s1_f32_f32_f32_neon); | ||
| 59 | using kai_run_lhs_pack_func_f16_t = decltype(&kai_run_lhs_quant_pack_qsi8d32pscalef32_f16_neon); | ||
| 60 | using kai_run_rhs_pack_func_t = decltype(&kai_run_rhs_pack_nxk_qai4c32p_qau4c32s0s1_f32_f32_f32_neon); | ||
| 61 | |||
| 62 | // Micro-kernel interface | ||
| 63 | struct kai_qai4c32p_pack_functions { | ||
| 64 | kai_get_rhs_packed_size_func_t packed_size; | ||
| 65 | kai_get_rhs_packed_offset_func_t get_packed_offset; | ||
| 66 | kai_get_rhs_offset_func_t get_offset; | ||
| 67 | kai_run_rhs_pack_func_t run_pack; | ||
| 68 | }; | ||
| 69 | |||
| 70 | struct kai_qsi8d32p_f16_pack_functions { | ||
| 71 | kai_get_lhs_packed_size_func_t packed_size; | ||
| 72 | kai_get_lhs_packed_offset_func_t get_packed_offset; | ||
| 73 | kai_get_lhs_offset_func_t get_offset; | ||
| 74 | kai_run_lhs_pack_func_f16_t run_pack; | ||
| 75 | }; | ||
| 76 | |||
| 77 | ✗ | static const std::array< | |
| 78 | UkernelMatmulPackVariant< | ||
| 79 | kai_matmul_clamp_f16_qsi8d32p_qai4c32p_ukernel, kai_qsi8d32p_f16_pack_functions, kai_qai4c32p_pack_functions>, | ||
| 80 | 8> | ||
| 81 |
0/4✗ Branch 0 not taken.
✗ Branch 0 not taken.
✗ Branch 1 not taken.
✗ Branch 1 not taken.
|
3 | variants_kai_matmul_clamp_f16_qsi8d32p_qai4c32p = { |
| 82 |
3/6✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 1 time.
✗ Branch 3 not taken.
✓ Branch 4 taken 1 time.
✗ Branch 5 not taken.
|
10 | {UKERNEL_MATMUL_PACK_VARIANT( |
| 83 | clamp_f16_qsi8d32p1x8_qai4c32p4x8_1x4_neon_dotprod, cpu_has_dotprod, | ||
| 84 | lhs_quant_pack_qsi8d32pscalef32_f16_neon, rhs_pack_nxk_qai4c32p_qau4c32s0s1_f32_f32_f32_neon, true), | ||
| 85 |
3/6✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 1 time.
✗ Branch 3 not taken.
✓ Branch 4 taken 1 time.
✗ Branch 5 not taken.
|
3 | UKERNEL_MATMUL_PACK_VARIANT( |
| 86 | clamp_f16_qsi8d32p4x8_qai4c32p4x8_8x4_neon_i8mm, cpu_has_i8mm, lhs_quant_pack_qsi8d32pscalef32_f16_neon, | ||
| 87 | rhs_pack_nxk_qai4c32p_qau4c32s0s1_f32_f32_f32_neon, true), | ||
| 88 |
3/6✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 1 time.
✗ Branch 3 not taken.
✓ Branch 4 taken 1 time.
✗ Branch 5 not taken.
|
3 | UKERNEL_MATMUL_PACK_VARIANT( |
| 89 | clamp_f16_qsi8d32p4x4_qai4c32p4x4_8x4_neon_dotprod, cpu_has_dotprod, | ||
| 90 | lhs_quant_pack_qsi8d32pscalef32_f16_neon, rhs_pack_nxk_qai4c32p_qau4c32s0s1_f32_f32_f32_neon, true), | ||
| 91 |
3/6✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 1 time.
✗ Branch 3 not taken.
✓ Branch 4 taken 1 time.
✗ Branch 5 not taken.
|
3 | UKERNEL_MATMUL_PACK_VARIANT( |
| 92 | clamp_f16_qsi8d32p1x4_qai4c32p4x4_1x4_neon_dotprod, cpu_has_dotprod, | ||
| 93 | lhs_quant_pack_qsi8d32pscalef32_f16_neon, rhs_pack_nxk_qai4c32p_qau4c32s0s1_f32_f32_f32_neon, true), | ||
| 94 |
3/6✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 1 time.
✗ Branch 3 not taken.
✓ Branch 4 taken 1 time.
✗ Branch 5 not taken.
|
3 | UKERNEL_MATMUL_PACK_VARIANT( |
| 95 | clamp_f16_qsi8d32p1x4_qai4c32p4vlx4_1x4vl_sme2_dot, cpu_has_sme2, lhs_quant_pack_qsi8d32pscalef32_f16_neon, | ||
| 96 | rhs_pack_nxk_qai4c32ps1s0nrx4_qau4c32s1s0_f32_f32_f32_neon, false), | ||
| 97 |
3/6✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 1 time.
✗ Branch 3 not taken.
✓ Branch 4 taken 1 time.
✗ Branch 5 not taken.
|
3 | UKERNEL_MATMUL_PACK_VARIANT( |
| 98 | clamp_f16_qsi8d32p1vlx4_qai4c32p4vlx4_1vlx4vl_sme2_mopa, cpu_has_sme2, | ||
| 99 | lhs_quant_pack_qsi8d32pscalef32_f16_neon, rhs_pack_nxk_qai4c32ps1s0nrx4_qau4c32s1s0_f32_f32_f32_neon, | ||
| 100 | false), | ||
| 101 |
3/6✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 1 time.
✗ Branch 3 not taken.
✓ Branch 4 taken 1 time.
✗ Branch 5 not taken.
|
3 | UKERNEL_MATMUL_PACK_VARIANT( |
| 102 | clamp_f16_qsi8d32p1x4_qai4c32p4vlx4_1x4vl_sme2_dot, cpu_has_sme2, lhs_quant_pack_qsi8d32pscalef32_f16_neon, | ||
| 103 | rhs_pack_nxk_qai4c32ps1s0nrx4_qau4c32s0s1_f32_f32_f32_neon, true), | ||
| 104 |
3/6✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 1 time.
✗ Branch 3 not taken.
✓ Branch 4 taken 1 time.
✗ Branch 5 not taken.
|
3 | UKERNEL_MATMUL_PACK_VARIANT( |
| 105 | clamp_f16_qsi8d32p1vlx4_qai4c32p4vlx4_1vlx4vl_sme2_mopa, cpu_has_sme2, | ||
| 106 | lhs_quant_pack_qsi8d32pscalef32_f16_neon, rhs_pack_nxk_qai4c32ps1s0nrx4_qau4c32s0s1_f32_f32_f32_neon, | ||
| 107 | true)}}; | ||
| 108 | |||
| 109 | 28 | static const auto test_matmul_shapes = testing::Values( | |
| 110 | 3 | MatMulShape{1, 64, 32}, // | |
| 111 | 3 | MatMulShape{1, 63, 32}, // | |
| 112 | 3 | MatMulShape{1, 65, 32}, // | |
| 113 | 3 | MatMulShape{1, 64, 64}, // | |
| 114 | 3 | MatMulShape{1, 64, 128}, // | |
| 115 | 3 | MatMulShape{1, 128, 32}, // | |
| 116 | 3 | MatMulShape{1, 128, 128}, // | |
| 117 | 3 | MatMulShape{1, 2, 32}, // | |
| 118 | 3 | MatMulShape{1, 3, 32}, // | |
| 119 | 3 | MatMulShape{1, 4, 32}, // | |
| 120 | 3 | MatMulShape{1, 5, 32}, // | |
| 121 | 3 | MatMulShape{3, 3, 32}, // | |
| 122 | 3 | MatMulShape{4, 4, 32}, // | |
| 123 | 3 | MatMulShape{5, 5, 32}, // | |
| 124 | 3 | MatMulShape{32, 128, 32}, // | |
| 125 | 3 | MatMulShape{15, 64, 64}, // | |
| 126 | 3 | MatMulShape{17, 64, 64}, // | |
| 127 | 3 | MatMulShape{16, 63, 64}, // | |
| 128 | 3 | MatMulShape{16, 64, 64}, // | |
| 129 | 3 | MatMulShape{16, 65, 64}, // | |
| 130 | 3 | MatMulShape{32, 64, 64}, // | |
| 131 | 3 | MatMulShape{16, 32, 64}, // | |
| 132 | 3 | MatMulShape{8, 32, 64}, // | |
| 133 | 3 | MatMulShape{15, 32, 32}, // | |
| 134 | 6 | MatMulShape{77, 99, 64} // | |
| 135 | ); | ||
| 136 | |||
| 137 | 10 | static const auto test_portions = testing::Values( | |
| 138 | 3 | MatrixPortion(0, 0, 1, 1), // Full matrix. | |
| 139 | 3 | MatrixPortion(0, 0, 1, 0.25), // Leftmost portion. | |
| 140 | 3 | MatrixPortion(0, 0.75, 1, 1), // Rightmost portion. | |
| 141 | 3 | MatrixPortion(0, 0.5, 1, 0.8), // Somewhere Middle | |
| 142 | 3 | MatrixPortion(0.75, 0.75, 1, 1), // Bottom-right corner. | |
| 143 | 3 | MatrixPortion(0.75, 0, 1, 1), // Partial rows | |
| 144 | 6 | MatrixPortion(0.4, 0.5, 0.6, 0.8) // Somewhere Middle | |
| 145 | ); | ||
| 146 | |||
| 147 | 3 | static const auto test_block_lengths = testing::Values(32, 64); | |
| 148 | |||
| 149 | // Executes the LHS packing micro-kernel. | ||
| 150 | 50148 | static inline Buffer pack_lhs_qsi8d32p_f16( | |
| 151 | const kai_qsi8d32p_f16_pack_functions& pack_interface, size_t M, size_t K, size_t bl, size_t mr, size_t kr, | ||
| 152 | size_t sr, const Buffer& lhs_f16, size_t stride, size_t rect_start_row, size_t rect_height) { | ||
| 153 | 50148 | const auto imp_packed_lhs_size = pack_interface.packed_size(M, K, bl, mr, kr, sr); | |
| 154 | 50148 | Buffer imp_packed_lhs(imp_packed_lhs_size, 0); | |
| 155 | |||
| 156 |
1/2✓ Branch 0 taken 50148 times.
✗ Branch 1 not taken.
|
50148 | auto lhs_offset = pack_interface.get_offset(rect_start_row, stride); |
| 157 |
1/2✓ Branch 0 taken 50148 times.
✗ Branch 1 not taken.
|
50148 | auto lhs_packed_offset = pack_interface.get_packed_offset(rect_start_row, K, bl, mr, kr, sr); |
| 158 | |||
| 159 |
1/2✓ Branch 0 taken 50148 times.
✗ Branch 1 not taken.
|
50148 | abi_check( |
| 160 | 50148 | pack_interface.run_pack, rect_height, K, bl, mr, kr, sr, 0, | |
| 161 |
1/2✓ Branch 0 taken 50148 times.
✗ Branch 1 not taken.
|
50148 | reinterpret_cast<const uint8_t*>(lhs_f16.data() + lhs_offset), stride, |
| 162 |
1/2✓ Branch 0 taken 50148 times.
✗ Branch 1 not taken.
|
50148 | imp_packed_lhs.data() + lhs_packed_offset); |
| 163 | |||
| 164 | 50148 | return (imp_packed_lhs); | |
| 165 | 50148 | } | |
| 166 | |||
| 167 | // Executes the RHS packing micro-kernel. | ||
| 168 | 12852 | static inline Buffer pack_rhs_qai4c32p( | |
| 169 | const kai_qai4c32p_pack_functions& pack_interface, size_t N, size_t K, size_t bl, size_t nr, size_t kr, size_t sr, | ||
| 170 | const Buffer& rhs_values_qai4, const bool has_bias, const Buffer& biases, const Buffer& rhs_scales, | ||
| 171 | const Buffer& rhs_zp, bool s0s1_input) { | ||
| 172 | // Cast to unsigned int | ||
| 173 | 12852 | auto rhs_qau4s1s0 = cast_qsu4_qsi4(rhs_values_qai4.data(), N * K); | |
| 174 | |||
| 175 |
1/2✓ Branch 0 taken 12852 times.
✗ Branch 1 not taken.
|
12852 | const auto imp_packed_rhs_size = pack_interface.packed_size(N, K, nr, kr, bl); |
| 176 |
1/2✓ Branch 0 taken 12852 times.
✗ Branch 1 not taken.
|
12852 | Buffer imp_packed_rhs(imp_packed_rhs_size); |
| 177 | |||
| 178 | // Runs the RHS packing micro-kernel. | ||
| 179 | 12852 | kai_rhs_pack_nxk_qai4c32p_params params{}; | |
| 180 | 12852 | params.lhs_zero_point = 1; | |
| 181 | 12852 | params.rhs_zero_point = 8; | |
| 182 | |||
| 183 |
5/10✓ Branch 0 taken 12852 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 2142 times.
✓ Branch 3 taken 10710 times.
✓ Branch 4 taken 2142 times.
✓ Branch 5 taken 10710 times.
✗ Branch 6 not taken.
✗ Branch 7 not taken.
✗ Branch 8 not taken.
✗ Branch 9 not taken.
|
12852 | abi_check( |
| 184 | 12852 | pack_interface.run_pack, 1, N, K, nr, kr, sr, bl, | |
| 185 |
3/4✓ Branch 0 taken 10710 times.
✓ Branch 1 taken 2142 times.
✓ Branch 2 taken 10710 times.
✗ Branch 3 not taken.
|
12852 | reinterpret_cast<const uint8_t*>(s0s1_input ? convert_s0s1_s1s0(rhs_qau4s1s0).data() : rhs_qau4s1s0.data()), |
| 186 |
2/2✓ Branch 0 taken 6426 times.
✓ Branch 1 taken 6426 times.
|
12852 | rhs_zp.data(), has_bias ? biases.data() : nullptr, rhs_scales.data(), imp_packed_rhs.data(), 0, ¶ms); |
| 187 | |||
| 188 | 12852 | return (imp_packed_rhs); | |
| 189 | 12852 | } | |
| 190 | |||
| 191 | class MatMulTest_f16_qsi8d32p_qai4c32p | ||
| 192 | : public ::testing::TestWithParam<MatMulClampTestPortionedParamsWithBias_WithBL> {}; | ||
| 193 | |||
| 194 |
8/16✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 3 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 3 times.
✗ Branch 5 not taken.
✓ Branch 6 taken 3 times.
✗ Branch 7 not taken.
✓ Branch 8 taken 3 times.
✗ Branch 9 not taken.
✓ Branch 10 taken 3 times.
✗ Branch 11 not taken.
✓ Branch 12 taken 3 times.
✗ Branch 13 not taken.
✓ Branch 14 taken 2 times.
✗ Branch 15 not taken.
|
84006 | TEST_P(MatMulTest_f16_qsi8d32p_qai4c32p, LhsPackedWithSameBlockdepth) { |
| 195 | // Verify LHS quant and pack int8 kernel behaves same for int4 and int8 matmul kernels, | ||
| 196 | // when the block-depth is same for different values of kr, sr. | ||
| 197 | |||
| 198 | 20899032 | const auto& [variant_index, matmul_shape, bl, portion, clamp_keep_ratio, has_bias] = GetParam(); | |
| 199 | 67200 | const auto& ukernel_variant = variants_kai_matmul_clamp_f16_qsi8d32p_qai4c32p.at(variant_index); | |
| 200 | |||
| 201 |
3/4✓ Branch 0 taken 33600 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 25200 times.
✓ Branch 3 taken 8400 times.
|
33600 | if (ukernel_variant.ukernel.fn_is_supported && !ukernel_variant.ukernel.fn_is_supported()) { |
| 202 |
3/6✓ Branch 0 taken 8400 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 8400 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 8400 times.
✗ Branch 5 not taken.
|
8400 | GTEST_SKIP() << "Unsupported CPU feature"; |
| 203 | } | ||
| 204 | |||
| 205 | 50400 | const size_t M = matmul_shape.m; | |
| 206 | 50400 | const size_t N = matmul_shape.n; | |
| 207 | 50400 | const size_t K = matmul_shape.k; | |
| 208 | |||
| 209 |
4/4✓ Branch 0 taken 6552 times.
✓ Branch 1 taken 18648 times.
✓ Branch 2 taken 6552 times.
✓ Branch 3 taken 18648 times.
|
50400 | if (K % bl != 0) { |
| 210 |
3/6✓ Branch 0 taken 6552 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 6552 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 6552 times.
✗ Branch 5 not taken.
|
6552 | GTEST_SKIP() << "K must be a multiple of bl"; |
| 211 | } | ||
| 212 | |||
| 213 | 18648 | const auto mr = ukernel_variant.ukernel.interface.get_mr(); | |
| 214 | 18648 | const auto nr = ukernel_variant.ukernel.interface.get_nr(); | |
| 215 | 18648 | const auto kr = ukernel_variant.ukernel.interface.get_kr(); | |
| 216 | 18648 | const auto sr = ukernel_variant.ukernel.interface.get_sr(); | |
| 217 | |||
| 218 | 18648 | auto m_step = ukernel_variant.ukernel.interface.get_m_step(); | |
| 219 |
3/14✓ Branch 0 taken 18648 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 18648 times.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
✗ Branch 5 not taken.
✗ Branch 6 not taken.
✗ Branch 7 not taken.
✗ Branch 8 not taken.
✗ Branch 9 not taken.
✗ Branch 10 not taken.
✗ Branch 11 not taken.
✗ Branch 12 not taken.
✓ Branch 13 taken 18648 times.
|
18648 | ASSERT_TRUE(m_step % mr == 0); |
| 220 | |||
| 221 | 18648 | auto n_step = ukernel_variant.ukernel.interface.get_n_step(); | |
| 222 |
3/14✓ Branch 0 taken 18648 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 18648 times.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
✗ Branch 5 not taken.
✗ Branch 6 not taken.
✗ Branch 7 not taken.
✗ Branch 8 not taken.
✗ Branch 9 not taken.
✗ Branch 10 not taken.
✗ Branch 11 not taken.
✗ Branch 12 not taken.
✓ Branch 13 taken 18648 times.
|
18648 | ASSERT_TRUE(n_step % nr == 0); |
| 223 | |||
| 224 | 37296 | const auto rect = portion.compute_portion(M, N, m_step, n_step); | |
| 225 | |||
| 226 | // Seed the random generator. | ||
| 227 |
1/2✓ Branch 0 taken 18648 times.
✗ Branch 1 not taken.
|
18648 | auto& feed = seed_stream(current_test_key()); |
| 228 | |||
| 229 | // Generates input data. | ||
| 230 | 18648 | const auto ref_lhs = fill_random<Float16>(M * K, feed()); | |
| 231 | |||
| 232 | // Runs the LHS packing micro-kernel. | ||
| 233 |
1/2✓ Branch 0 taken 18648 times.
✗ Branch 1 not taken.
|
18648 | const auto lhs_start_row = rect.start_row(); |
| 234 | 18648 | auto lhs_stride = K * sizeof(uint16_t); | |
| 235 | |||
| 236 |
1/2✓ Branch 0 taken 18648 times.
✗ Branch 1 not taken.
|
18648 | auto imp_packed_lhs = pack_lhs_qsi8d32p_f16( |
| 237 |
2/4✓ Branch 0 taken 18648 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 18648 times.
✗ Branch 3 not taken.
|
37296 | ukernel_variant.lhs_pack_interface, M, K, bl, mr, kr, sr, ref_lhs, lhs_stride, lhs_start_row, rect.height()); |
| 238 |
2/4✓ Branch 0 taken 18648 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 18648 times.
✗ Branch 3 not taken.
|
37296 | auto lhs_packed_offset = ukernel_variant.lhs_pack_interface.get_packed_offset(lhs_start_row, K, bl, mr, kr, sr); |
| 239 | |||
| 240 | 18648 | const size_t kr_qsi8 = kr / sr; | |
| 241 | 18648 | const size_t sr_qsi8 = 1; | |
| 242 | |||
| 243 |
1/2✓ Branch 0 taken 18648 times.
✗ Branch 1 not taken.
|
18648 | auto imp_packed_lhs_qsi8 = pack_lhs_qsi8d32p_f16( |
| 244 | 37296 | ukernel_variant.lhs_pack_interface, M, K, bl, mr, kr_qsi8, sr_qsi8, ref_lhs, lhs_stride, lhs_start_row, | |
| 245 |
1/2✓ Branch 0 taken 18648 times.
✗ Branch 1 not taken.
|
18648 | rect.height()); |
| 246 | 18648 | auto lhs_qsi8_packed_offset = | |
| 247 |
2/4✓ Branch 0 taken 18648 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 18648 times.
✗ Branch 3 not taken.
|
37296 | ukernel_variant.lhs_pack_interface.get_packed_offset(lhs_start_row, K, bl, mr, kr_qsi8, sr_qsi8); |
| 248 | |||
| 249 |
4/16✓ Branch 0 taken 18648 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 18648 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 18648 times.
✗ Branch 5 not taken.
✗ Branch 6 not taken.
✗ Branch 7 not taken.
✗ Branch 8 not taken.
✗ Branch 9 not taken.
✗ Branch 10 not taken.
✗ Branch 11 not taken.
✗ Branch 12 not taken.
✗ Branch 13 not taken.
✗ Branch 14 not taken.
✓ Branch 15 taken 18648 times.
|
18648 | ASSERT_EQ(lhs_qsi8_packed_offset, lhs_packed_offset); |
| 250 | |||
| 251 |
1/2✓ Branch 0 taken 18648 times.
✗ Branch 1 not taken.
|
18648 | auto* imp_packed_lhs_ptr = reinterpret_cast<const uint8_t*>(imp_packed_lhs.data()); |
| 252 |
1/2✓ Branch 0 taken 18648 times.
✗ Branch 1 not taken.
|
18648 | auto* imp_packed_lhs_qsi8_ptr = reinterpret_cast<const uint8_t*>(imp_packed_lhs_qsi8.data()); |
| 253 |
5/8✗ Branch 0 not taken.
✓ Branch 1 taken 20656440 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 20656440 times.
✓ Branch 4 taken 18648 times.
✓ Branch 5 taken 20637792 times.
✗ Branch 6 not taken.
✓ Branch 7 taken 18648 times.
|
20656440 | for (size_t i = 0; i < ukernel_variant.lhs_pack_interface.packed_size(M, K, bl, mr, kr, sr); i++) { |
| 254 |
4/16✓ Branch 0 taken 20637792 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 20637792 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 20637792 times.
✗ Branch 5 not taken.
✗ Branch 6 not taken.
✗ Branch 7 not taken.
✗ Branch 8 not taken.
✗ Branch 9 not taken.
✗ Branch 10 not taken.
✗ Branch 11 not taken.
✗ Branch 12 not taken.
✗ Branch 13 not taken.
✗ Branch 14 not taken.
✓ Branch 15 taken 20637792 times.
|
20637792 | ASSERT_EQ(imp_packed_lhs_ptr[i], imp_packed_lhs_qsi8_ptr[i]); |
| 255 | 20637792 | } | |
| 256 | 33600 | } | |
| 257 | |||
| 258 |
8/16✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 3 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 3 times.
✗ Branch 5 not taken.
✓ Branch 6 taken 3 times.
✗ Branch 7 not taken.
✓ Branch 8 taken 3 times.
✗ Branch 9 not taken.
✓ Branch 10 taken 3 times.
✗ Branch 11 not taken.
✓ Branch 12 taken 3 times.
✗ Branch 13 not taken.
✓ Branch 14 taken 2 times.
✗ Branch 15 not taken.
|
84006 | TEST_P(MatMulTest_f16_qsi8d32p_qai4c32p, EndToEnd) { |
| 259 | 193704 | const auto& [variant_index, matmul_shape, bl, portion, clamp_keep_ratio, has_bias] = GetParam(); | |
| 260 | 67200 | const auto& ukernel_variant = variants_kai_matmul_clamp_f16_qsi8d32p_qai4c32p.at(variant_index); | |
| 261 | |||
| 262 |
3/4✓ Branch 0 taken 33600 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 25200 times.
✓ Branch 3 taken 8400 times.
|
33600 | if (ukernel_variant.ukernel.fn_is_supported && !ukernel_variant.ukernel.fn_is_supported()) { |
| 263 |
3/6✓ Branch 0 taken 8400 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 8400 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 8400 times.
✗ Branch 5 not taken.
|
8400 | GTEST_SKIP() << "Unsupported CPU feature"; |
| 264 | } | ||
| 265 | |||
| 266 | 50400 | const size_t M = matmul_shape.m; | |
| 267 | 50400 | const size_t N = matmul_shape.n; | |
| 268 | 50400 | const size_t K = matmul_shape.k; | |
| 269 | |||
| 270 |
4/4✓ Branch 0 taken 6552 times.
✓ Branch 1 taken 18648 times.
✓ Branch 2 taken 6552 times.
✓ Branch 3 taken 18648 times.
|
50400 | if (K % bl != 0) { |
| 271 |
3/6✓ Branch 0 taken 6552 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 6552 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 6552 times.
✗ Branch 5 not taken.
|
6552 | GTEST_SKIP() << "K must be a multiple of bl"; |
| 272 | } | ||
| 273 | |||
| 274 | 18648 | const auto mr = ukernel_variant.ukernel.interface.get_mr(); | |
| 275 | 18648 | const auto nr = ukernel_variant.ukernel.interface.get_nr(); | |
| 276 | 18648 | const auto kr = ukernel_variant.ukernel.interface.get_kr(); | |
| 277 | 18648 | const auto sr = ukernel_variant.ukernel.interface.get_sr(); | |
| 278 | |||
| 279 |
4/4✓ Branch 0 taken 9324 times.
✓ Branch 1 taken 9324 times.
✓ Branch 2 taken 3528 times.
✓ Branch 3 taken 5796 times.
|
18648 | if (mr == 1 && M > 1) { |
| 280 |
3/6✓ Branch 0 taken 5796 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 5796 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 5796 times.
✗ Branch 5 not taken.
|
5796 | GTEST_SKIP() << "Kernel does not support M != 1"; |
| 281 | } | ||
| 282 | |||
| 283 | 12852 | auto m_step = ukernel_variant.ukernel.interface.get_m_step(); | |
| 284 |
3/14✓ Branch 0 taken 12852 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 12852 times.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
✗ Branch 5 not taken.
✗ Branch 6 not taken.
✗ Branch 7 not taken.
✗ Branch 8 not taken.
✗ Branch 9 not taken.
✗ Branch 10 not taken.
✗ Branch 11 not taken.
✗ Branch 12 not taken.
✓ Branch 13 taken 12852 times.
|
12852 | ASSERT_TRUE(m_step % mr == 0); |
| 285 | |||
| 286 | 12852 | auto n_step = ukernel_variant.ukernel.interface.get_n_step(); | |
| 287 |
3/14✓ Branch 0 taken 12852 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 12852 times.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
✗ Branch 5 not taken.
✗ Branch 6 not taken.
✗ Branch 7 not taken.
✗ Branch 8 not taken.
✗ Branch 9 not taken.
✗ Branch 10 not taken.
✗ Branch 11 not taken.
✗ Branch 12 not taken.
✓ Branch 13 taken 12852 times.
|
12852 | ASSERT_TRUE(n_step % nr == 0); |
| 288 | |||
| 289 | 25704 | const auto rect = portion.compute_portion(M, N, m_step, n_step); | |
| 290 |
2/4✓ Branch 0 taken 12852 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 12852 times.
|
12852 | if (rect.height() == 0 || rect.width() == 0) { |
| 291 | ✗ | GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; | |
| 292 | } | ||
| 293 | |||
| 294 | // Seed the random generator. | ||
| 295 |
1/2✓ Branch 0 taken 12852 times.
✗ Branch 1 not taken.
|
12852 | auto& feed = seed_stream(current_test_key()); |
| 296 | |||
| 297 | // Generates input data. | ||
| 298 | 12852 | const auto ref_lhs_f16 = fill_random<Float16>(M * K, feed()); | |
| 299 |
2/4✓ Branch 0 taken 12852 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 12852 times.
✗ Branch 3 not taken.
|
12852 | const auto ref_rhs = fill_random<float>(N * K, feed()); |
| 300 | 12852 | Buffer ref_biases; | |
| 301 | |||
| 302 |
2/2✓ Branch 0 taken 6426 times.
✓ Branch 1 taken 6426 times.
|
12852 | if (has_bias) { |
| 303 |
2/4✓ Branch 0 taken 6426 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 6426 times.
✗ Branch 3 not taken.
|
6426 | ref_biases = fill_random<float>(N, feed()); |
| 304 | 6426 | } | |
| 305 | // For reference implementation, Casting FP16 input to FP32 type and FP32 output back to FP16 because the matmul | ||
| 306 | // implementation works with FP32 accumulation and casts the result to FP16 | ||
| 307 |
3/6✓ Branch 0 taken 12852 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 12852 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 12852 times.
✗ Branch 5 not taken.
|
12852 | const auto ref_lhs = cast<float, Float16>(ref_lhs_f16.data(), ref_lhs_f16.size() * 8 / size_in_bits<Float16>); |
| 308 | |||
| 309 | // Runs the reference implementation. | ||
| 310 | // * Quantizes the LHS matrix using 8-bit symmetric quantization. | ||
| 311 | // * Quantizes the RHS matrix using 8-bit asymmetric quantization. | ||
| 312 | // * Performs GEMM. | ||
| 313 |
1/2✓ Branch 0 taken 12852 times.
✗ Branch 1 not taken.
|
12852 | QuantizationInfo lhs_qinfo{}; |
| 314 | lhs_qinfo.quant_width = bl; | ||
| 315 | lhs_qinfo.dst_type = DataType::QSI8; | ||
| 316 | lhs_qinfo.scale_type = DataType::FP32; | ||
| 317 | const auto [ref_lhs_quant, lhs_qoutputs] = quantize_dynamic(ref_lhs.data(), DataType::FP32, M, K, lhs_qinfo); | ||
| 318 | |||
| 319 | QuantizationInfo rhs_qinfo{}; | ||
| 320 | rhs_qinfo.quant_width = bl; | ||
| 321 | rhs_qinfo.dst_type = DataType::QAI4; | ||
| 322 | rhs_qinfo.scale_type = DataType::FP32; | ||
| 323 | rhs_qinfo.zero_point_type = DataType::I32; | ||
| 324 | const auto [ref_rhs_quant, rhs_qoutputs] = quantize_dynamic(ref_rhs.data(), DataType::FP32, N, K, rhs_qinfo); | ||
| 325 | const auto ref_dst_no_clamp = | ||
| 326 | matmul_nt_t_quantized<int8_t, float, int32_t, Int4, float, int32_t, float, float, int32_t, float>( | ||
| 327 | M, N, K, ref_lhs_quant.data(), lhs_qoutputs.scales.data(), nullptr, 1, bl, ref_rhs_quant.data(), | ||
| 328 | rhs_qoutputs.scales.data(), rhs_qoutputs.zero_points.data(), 1, bl, has_bias ? ref_biases.data() : nullptr, | ||
| 329 | nullptr, nullptr, 1); | ||
| 330 | |||
| 331 | // Clamps the reference output. | ||
| 332 | const auto [clamp_min, clamp_max] = find_clamp_range<float>(ref_dst_no_clamp.data(), M * N, clamp_keep_ratio); | ||
| 333 | const auto ref_dst_float = clamp<float>(ref_dst_no_clamp.data(), M * N, clamp_min, clamp_max); | ||
| 334 | |||
| 335 | // Cast the reference output to F16 | ||
| 336 | auto ref_dst = cast<Float16, float>(ref_dst_float.data(), ref_dst_float.size() * 8 / size_in_bits<float>); | ||
| 337 | |||
| 338 | // Runs the LHS packing micro-kernel. | ||
| 339 | const auto lhs_start_row = rect.start_row(); | ||
| 340 | auto imp_packed_lhs = pack_lhs_qsi8d32p_f16( | ||
| 341 | ukernel_variant.lhs_pack_interface, M, K, bl, mr, kr, sr, ref_lhs_f16, K * sizeof(uint16_t), lhs_start_row, | ||
| 342 | rect.height()); | ||
| 343 | auto lhs_packed_offset = ukernel_variant.lhs_pack_interface.get_packed_offset(lhs_start_row, K, bl, mr, kr, sr); | ||
| 344 | auto lhs_matmul_offset = ukernel_variant.ukernel.interface.get_lhs_packed_offset(lhs_start_row, K, bl); | ||
| 345 | |||
| 346 | ASSERT_EQ(lhs_packed_offset, lhs_matmul_offset); | ||
| 347 | |||
| 348 | // Prepare the offsets as the RHS packing micro-kernel expects the scaled zero-points in float. | ||
| 349 | const size_t num_blocks_per_row = round_up_division(K, bl); | ||
| 350 | const size_t ref_zp_size = N * num_blocks_per_row; | ||
| 351 | const size_t ref_zp_size_in_bytes = ref_zp_size * sizeof(float); | ||
| 352 | Buffer ref_rhs_zp_f32(ref_zp_size_in_bytes); | ||
| 353 | for (size_t i = 0; i < ref_zp_size; ++i) { | ||
| 354 | reinterpret_cast<float*>(ref_rhs_zp_f32.data())[i] = | ||
| 355 | -reinterpret_cast<const int32_t*>(rhs_qoutputs.zero_points.data())[i] * | ||
| 356 | reinterpret_cast<const float*>(rhs_qoutputs.scales.data())[i]; | ||
| 357 | } | ||
| 358 | |||
| 359 | const auto rhs_start_row = rect.start_col(); | ||
| 360 | auto imp_packed_rhs = pack_rhs_qai4c32p( | ||
| 361 | ukernel_variant.rhs_pack_interface, N, K, bl, nr, kr, sr, ref_rhs_quant, has_bias, ref_biases, | ||
| 362 | rhs_qoutputs.scales, ref_rhs_zp_f32, ukernel_variant.rhs_s0s1_input); | ||
| 363 | auto rhs_packed_offset = ukernel_variant.rhs_pack_interface.get_packed_offset(rhs_start_row, K, nr, kr, bl); | ||
| 364 | auto rhs_matmul_offset = ukernel_variant.ukernel.interface.get_rhs_packed_offset(rhs_start_row, K, bl); | ||
| 365 | ASSERT_EQ(rhs_packed_offset, rhs_matmul_offset); | ||
| 366 | |||
| 367 | const auto dst_stride_row = N * sizeof(uint16_t); | ||
| 368 | const auto dst_stride_col = sizeof(uint16_t); | ||
| 369 | const auto dst_offset = | ||
| 370 | ukernel_variant.ukernel.interface.get_dst_offset(rect.start_row(), rect.start_col(), dst_stride_row); | ||
| 371 | const auto ref_dst_offset = rect.start_row() * dst_stride_row + rect.start_col() * dst_stride_col; | ||
| 372 | ASSERT_EQ(dst_offset, ref_dst_offset); | ||
| 373 | |||
| 374 | // Runs the GEMM micro-kernel. | ||
| 375 | const auto imp_dst_size = ukernel_variant.ukernel.interface.get_dst_size(M, N); | ||
| 376 | ASSERT_EQ(imp_dst_size, ref_dst.size()); | ||
| 377 | Buffer imp_dst(imp_dst_size); | ||
| 378 | abi_check( | ||
| 379 | ukernel_variant.ukernel.interface.run_matmul, rect.height(), rect.width(), K, bl, | ||
| 380 | imp_packed_lhs.data() + lhs_matmul_offset, imp_packed_rhs.data() + rhs_matmul_offset, | ||
| 381 | reinterpret_cast<float*>(imp_dst.data() + dst_offset), dst_stride_row, dst_stride_col, clamp_min, clamp_max); | ||
| 382 | |||
| 383 | // Compares the output of the micro-kernels against the output of the reference implementation for the portion | ||
| 384 | // tested. | ||
| 385 | DefaultMismatchHandler handler(0, 0.1, 0, 0.05); | ||
| 386 | DataFormat dst_format = DataFormat(DataType::FP16); | ||
| 387 | const auto success = compare(imp_dst.data(), ref_dst.data(), dst_format, M, N, rect, handler); | ||
| 388 | ASSERT_TRUE(success); | ||
| 389 | ✗ | } | |
| 390 | |||
| 391 |
57/122✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 3 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 3 times.
✗ Branch 5 not taken.
✓ Branch 6 taken 3 times.
✗ Branch 7 not taken.
✓ Branch 8 taken 3 times.
✗ Branch 9 not taken.
✓ Branch 10 taken 2 times.
✓ Branch 10 taken 2 times.
✗ Branch 11 not taken.
✗ Branch 11 not taken.
✓ Branch 12 taken 2 times.
✓ Branch 12 taken 4 times.
✗ Branch 13 not taken.
✗ Branch 13 not taken.
✓ Branch 14 taken 2 times.
✓ Branch 14 taken 4 times.
✗ Branch 15 not taken.
✗ Branch 15 not taken.
✓ Branch 16 taken 2 times.
✓ Branch 16 taken 4 times.
✗ Branch 17 not taken.
✗ Branch 17 not taken.
✓ Branch 18 taken 4 times.
✓ Branch 18 taken 33600 times.
✗ Branch 19 not taken.
✗ Branch 19 not taken.
✗ Branch 20 not taken.
✓ Branch 20 taken 67200 times.
✗ Branch 21 not taken.
✗ Branch 21 not taken.
✗ Branch 22 not taken.
✗ Branch 22 not taken.
✗ Branch 23 not taken.
✗ Branch 23 not taken.
✗ Branch 24 not taken.
✗ Branch 24 not taken.
✗ Branch 25 not taken.
✗ Branch 25 not taken.
✗ Branch 26 not taken.
✗ Branch 26 not taken.
✗ Branch 27 not taken.
✗ Branch 27 not taken.
✗ Branch 28 not taken.
✓ Branch 28 taken 33600 times.
✗ Branch 29 not taken.
✗ Branch 29 not taken.
✓ Branch 30 taken 33600 times.
✓ Branch 30 taken 67200 times.
✗ Branch 31 not taken.
✗ Branch 31 not taken.
✓ Branch 32 taken 33600 times.
✓ Branch 32 taken 67200 times.
✗ Branch 33 not taken.
✗ Branch 33 not taken.
✓ Branch 34 taken 33600 times.
✓ Branch 34 taken 67200 times.
✗ Branch 35 not taken.
✗ Branch 35 not taken.
✓ Branch 36 taken 33600 times.
✓ Branch 36 taken 67200 times.
✗ Branch 37 not taken.
✗ Branch 37 not taken.
✓ Branch 38 taken 33600 times.
✓ Branch 38 taken 67200 times.
✗ Branch 39 not taken.
✗ Branch 39 not taken.
✓ Branch 40 taken 33600 times.
✓ Branch 40 taken 67200 times.
✗ Branch 41 not taken.
✗ Branch 41 not taken.
✓ Branch 42 taken 16800 times.
✓ Branch 42 taken 67200 times.
✓ Branch 43 taken 16800 times.
✗ Branch 43 not taken.
✓ Branch 44 taken 16800 times.
✓ Branch 44 taken 67200 times.
✗ Branch 45 not taken.
✗ Branch 45 not taken.
✓ Branch 46 taken 16800 times.
✓ Branch 46 taken 33600 times.
✗ Branch 47 not taken.
✓ Branch 47 taken 33600 times.
✓ Branch 48 taken 33600 times.
✓ Branch 48 taken 33600 times.
✗ Branch 49 not taken.
✗ Branch 49 not taken.
✓ Branch 50 taken 25200 times.
✓ Branch 50 taken 33600 times.
✓ Branch 51 taken 8400 times.
✗ Branch 51 not taken.
✓ Branch 52 taken 25200 times.
✓ Branch 52 taken 67200 times.
✗ Branch 53 not taken.
✗ Branch 53 not taken.
✓ Branch 54 taken 8400 times.
✓ Branch 54 taken 50400 times.
✗ Branch 55 not taken.
✓ Branch 55 taken 16800 times.
✓ Branch 56 taken 33600 times.
✓ Branch 56 taken 50400 times.
✗ Branch 57 not taken.
✗ Branch 57 not taken.
✓ Branch 58 taken 33600 times.
✓ Branch 58 taken 16800 times.
✗ Branch 59 not taken.
✗ Branch 59 not taken.
✓ Branch 60 taken 33600 times.
✓ Branch 60 taken 67200 times.
✗ Branch 61 not taken.
✗ Branch 61 not taken.
✓ Branch 62 taken 33600 times.
✓ Branch 62 taken 67200 times.
✗ Branch 63 not taken.
✗ Branch 63 not taken.
✓ Branch 64 taken 67200 times.
✗ Branch 65 not taken.
✓ Branch 66 taken 67200 times.
✗ Branch 67 not taken.
|
277209 | INSTANTIATE_TEST_SUITE_P( |
| 392 | MatMul, MatMulTest_f16_qsi8d32p_qai4c32p, | ||
| 393 | testing::Combine( | ||
| 394 | testing::Range<size_t>(0, variants_kai_matmul_clamp_f16_qsi8d32p_qai4c32p.size()), test_matmul_shapes, | ||
| 395 | test_block_lengths, // | ||
| 396 | test_portions, // | ||
| 397 | testing::ValuesIn(std::initializer_list<float>({1.0f, 0.9f, 0.5f})), // clamp_keep_ratio | ||
| 398 | testing::Bool()), // | ||
| 399 | [](const auto& info) { | ||
| 400 | const auto variant_idx = std::get<0>(info.param); | ||
| 401 | const std::string name{variants_kai_matmul_clamp_f16_qsi8d32p_qai4c32p.at(variant_idx).ukernel.name}; | ||
| 402 | const auto shape = std::get<MatMulShape>(info.param); | ||
| 403 | const auto bl = std::get<2>(info.param); | ||
| 404 | const auto portion = std::get<3>(info.param); | ||
| 405 | const auto clamp_keep_ratio = std::get<4>(info.param); | ||
| 406 | const auto has_bias = std::get<5>(info.param); | ||
| 407 | |||
| 408 | std::ostringstream sstream; | ||
| 409 | sstream << name << "__"; | ||
| 410 | PrintTo(shape, &sstream); | ||
| 411 | sstream << "__BL_" << bl << "_"; | ||
| 412 | if (has_bias) { | ||
| 413 | sstream << "_withBias_"; | ||
| 414 | } else { | ||
| 415 | sstream << "_noBias_"; | ||
| 416 | } | ||
| 417 | if (variants_kai_matmul_clamp_f16_qsi8d32p_qai4c32p.at(variant_idx).rhs_s0s1_input) { | ||
| 418 | sstream << "_RHS_s0s1__"; | ||
| 419 | } else { | ||
| 420 | sstream << "_RHS_s1s0__"; | ||
| 421 | } | ||
| 422 | sstream << "__clamp_keep_ratio_" << static_cast<int>(clamp_keep_ratio * 100); | ||
| 423 | PrintTo(portion, &sstream); | ||
| 424 | |||
| 425 | return sstream.str(); | ||
| 426 | }); | ||
| 427 | |||
| 428 | } // namespace kai::test | ||
| 429 |