KleidiAI Coverage Report


Directory: ./
File: kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi4c32p/kai_matmul_clamp_f32_qai8dxp4x8_qsi4c32p8x8_4x8_neon_i8mm.c
Date: 2025-10-20 13:18:31
Coverage Exec Excl Total
Lines: 98.3% 59 11 71
Functions: 100.0% 16 0 16
Branches: 50.0% 1 22 24

Line Branch Exec Source
1 //
2 // SPDX-FileCopyrightText: Copyright 2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
3 //
4 // SPDX-License-Identifier: Apache-2.0
5 //
6 #if !defined(__aarch64__) && !defined(__ARM_FEATURE_MATMUL_INT8) && !defined(_M_ARM64)
7 #error "I8mm extension required to compile this micro-kernel"
8 #else // Architectural features check.
9
10 #include "kai_matmul_clamp_f32_qai8dxp4x8_qsi4c32p8x8_4x8_neon_i8mm.h"
11
12 #include <stddef.h>
13 #include <stdint.h>
14
15 #include "kai/kai_common.h"
16
17 typedef struct {
18 float* dst;
19 const void* lhs_packed;
20 const void* rhs_packed;
21 const float* clamp_vals;
22 size_t dst_stride_row;
23 size_t m;
24 size_t n;
25 size_t num_blocks;
26 size_t num_subblocks;
27 } KernelArgs;
28
29 void kai_kernel_matmul_clamp_f32_qai8dxp4x8_qsi4c32p8x8_4x8_neon_i8mm(KernelArgs* args_ptr);
30
31 // Compute args
32 static const size_t kai_m_step = 4;
33 static const size_t kai_n_step = 8;
34 // Packing args
35 static const size_t kai_mr = 4;
36 static const size_t kai_nr = 8;
37 static const size_t kai_kr = 16;
38 static const size_t kai_sr = 2;
39 // LHS format args (num. bytes per value, multiplier, zero_point (if asymmetric))
40 static const size_t kai_num_bytes_qvalue_lhs = 1;
41 static const size_t kai_num_bytes_multiplier_lhs = 4;
42 static const size_t kai_num_bytes_zp_lhs = 4;
43 // RHS format args (num. bytes per value, multiplier, zero_point (if asymmetric), and reduction sum (if LHS is
44 // asymmetric))
45 static const size_t kai_num_bytes_recip_qvalue_rhs = 2;
46 static const size_t kai_num_bytes_multiplier_rhs = 2;
47 static const size_t kai_num_bytes_rsum_rhs = 4;
48 // DST format args
49 static const size_t kai_num_bytes_dst_value = 4;
50 // Extra args
51 static const size_t kai_num_bytes_bias = 4;
52 static const size_t kai_k_multiple_of = 32;
53 static const size_t kai_bl = 32;
54
55 108 inline static size_t kai_get_k_roundedup(size_t k) {
56 108 return kai_roundup(k, kai_k_multiple_of);
57 }
58
59 108 inline static size_t kai_get_num_bytes_per_block_rhs(size_t bl) {
60 KAI_ASSUME((bl % kai_bl) == 0);
61 108 size_t num_bytes_per_block_rhs = (bl / kai_num_bytes_recip_qvalue_rhs) + kai_num_bytes_multiplier_rhs;
62 216 return num_bytes_per_block_rhs;
63 108 }
64
65 271 inline static size_t kai_get_num_blocks_per_row(size_t k, size_t bl) {
66 KAI_ASSUME((bl % kai_bl) == 0);
67
68 271 return kai_roundup(k, bl) / bl;
69 }
70
71 108 inline static size_t kai_get_lhs_packed_stride(size_t k) {
72 108 const size_t k_internal = kai_get_k_roundedup(k);
73 108 size_t lhs_packed_stride = kai_mr * ((k_internal * kai_num_bytes_qvalue_lhs) + kai_num_bytes_multiplier_lhs);
74 // Since the LHS matrix is asymmetric with per-row quantization, we must include the
75 // the number of bytes to hold the zero point value
76 108 lhs_packed_stride += kai_mr * kai_num_bytes_zp_lhs;
77
78 216 return lhs_packed_stride;
79 108 }
80
81 108 inline static size_t kai_get_rhs_packed_stride(size_t k, size_t bl) {
82 KAI_ASSUME((bl % kai_bl) == 0);
83
84 108 const size_t num_blocks_per_row = kai_get_num_blocks_per_row(k, bl);
85 108 const size_t num_bytes_per_block = kai_get_num_bytes_per_block_rhs(bl);
86
87 108 size_t rhs_packed_stride = kai_nr * (num_bytes_per_block * num_blocks_per_row);
88 // Since the LHS matrix is quantized asymmetric with per-row quantization, we also include
89 // the number of bytes for the reduction sum
90 108 rhs_packed_stride += kai_nr * kai_num_bytes_rsum_rhs;
91 // Since the bias is packed with the RHS matrix, the stride is adjusted with the number of bytes of the bias
92 108 rhs_packed_stride += kai_nr * kai_num_bytes_bias;
93
94 216 return rhs_packed_stride;
95 108 }
96
97 112 size_t kai_get_m_step_matmul_clamp_f32_qai8dxp4x8_qsi4c32p8x8_4x8_neon_i8mm(void) {
98 112 return kai_m_step;
99 }
100
101 112 size_t kai_get_n_step_matmul_clamp_f32_qai8dxp4x8_qsi4c32p8x8_4x8_neon_i8mm(void) {
102 112 return kai_n_step;
103 }
104
105 112 size_t kai_get_mr_matmul_clamp_f32_qai8dxp4x8_qsi4c32p8x8_4x8_neon_i8mm(void) {
106 112 return kai_mr;
107 }
108
109 112 size_t kai_get_nr_matmul_clamp_f32_qai8dxp4x8_qsi4c32p8x8_4x8_neon_i8mm(void) {
110 112 return kai_nr;
111 }
112
113 112 size_t kai_get_kr_matmul_clamp_f32_qai8dxp4x8_qsi4c32p8x8_4x8_neon_i8mm(void) {
114 112 return kai_kr;
115 }
116
117 112 size_t kai_get_sr_matmul_clamp_f32_qai8dxp4x8_qsi4c32p8x8_4x8_neon_i8mm(void) {
118 112 return kai_sr;
119 }
120
121 108 size_t kai_get_lhs_packed_offset_matmul_clamp_f32_qai8dxp4x8_qsi4c32p8x8_4x8_neon_i8mm(size_t m_idx, size_t k) {
122 KAI_ASSUME((m_idx % kai_m_step) == 0);
123
124 108 return (m_idx / kai_mr) * kai_get_lhs_packed_stride(k);
125 }
126
127 108 size_t kai_get_rhs_packed_offset_matmul_clamp_f32_qai8dxp4x8_qsi4c32p8x8_4x8_neon_i8mm(
128 size_t n_idx, size_t k, size_t bl) {
129 KAI_ASSUME((k % bl) == 0);
130 KAI_ASSUME((n_idx % kai_n_step) == 0);
131
132 108 return (n_idx / kai_nr) * kai_get_rhs_packed_stride(k, bl);
133 }
134
135 108 size_t kai_get_dst_offset_matmul_clamp_f32_qai8dxp4x8_qsi4c32p8x8_4x8_neon_i8mm(
136 size_t m_idx, size_t n_idx, size_t dst_stride) {
137 KAI_ASSUME((m_idx % kai_m_step) == 0);
138 KAI_ASSUME((n_idx % kai_n_step) == 0);
139
140 108 return (n_idx * kai_num_bytes_dst_value) + m_idx * dst_stride;
141 }
142
143 108 size_t kai_get_dst_size_matmul_clamp_f32_qai8dxp4x8_qsi4c32p8x8_4x8_neon_i8mm(size_t m, size_t n) {
144 108 return m * n * kai_num_bytes_dst_value;
145 }
146
147 163 void kai_run_matmul_clamp_f32_qai8dxp4x8_qsi4c32p8x8_4x8_neon_i8mm(
148 size_t m, //
149 size_t n, //
150 size_t k, //
151 size_t bl, //
152 const void* restrict lhs_packed, //
153 const void* restrict rhs_packed, //
154 float* restrict dst, // NOLINT(readability-non-const-parameter)
155 size_t dst_stride_row, //
156 size_t dst_stride_col, //
157 float scalar_min, //
158 float scalar_max) {
159 KAI_ASSUME(dst_stride_col == sizeof(float));
160 KAI_ASSUME((k % bl) == 0);
161 KAI_ASSUME((bl % kai_bl) == 0);
162
163
1/2
✓ Branch 0 taken 163 times.
✗ Branch 1 not taken.
163 if (m == 0) {
164 return;
165 }
166 163 const size_t num_subblocks = bl / kai_bl;
167 163 const size_t num_blocks = kai_get_num_blocks_per_row(k, bl);
168 163 const float clamp_vals[2] = {scalar_min, scalar_max};
169
170 163 KernelArgs args;
171
172 163 args.dst = dst;
173 163 args.lhs_packed = lhs_packed;
174 163 args.rhs_packed = rhs_packed;
175 163 args.clamp_vals = clamp_vals;
176 163 args.dst_stride_row = dst_stride_row;
177 163 args.m = m;
178 163 args.n = n;
179 163 args.num_blocks = num_blocks;
180 163 args.num_subblocks = num_subblocks;
181
182 163 kai_kernel_matmul_clamp_f32_qai8dxp4x8_qsi4c32p8x8_4x8_neon_i8mm(&args);
183 163 }
184
185 #endif // Architectural features check.
186