KleidiAI Coverage Report


Directory: ./
File: kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm.c
Date: 2025-10-20 13:18:31
Coverage Exec Excl Total
Lines: 97.8% 45 5 51
Functions: 100.0% 14 0 14
Branches: 50.0% 1 10 12

Line Branch Exec Source
1 //
2 // SPDX-FileCopyrightText: Copyright 2024-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
3 //
4 // SPDX-License-Identifier: Apache-2.0
5 //
6
7 // Do not flag up inline assembly blocks
8 #pragma GCC diagnostic ignored "-Woverlength-strings"
9
10 #if !defined(__aarch64__) && !defined(__ARM_FEATURE_MATMUL_INT8)
11 #error "I8mm extension required to compile this micro-kernel"
12 #else // Architectural features check.
13
14 #include "kai_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm.h"
15
16 #include <stddef.h>
17 #include <stdint.h>
18
19 #include "kai/kai_common.h"
20
21 // Compute args
22 static const size_t kai_m_step = 16;
23 static const size_t kai_n_step = 4;
24 // Packing args
25 static const size_t kai_mr = 4;
26 static const size_t kai_nr = 4;
27 static const size_t kai_kr = 8;
28 static const size_t kai_sr = 1;
29 // LHS format args (num. bytes per value, multiplier, zero_point (if asymmetric))
30 static const size_t kai_num_bytes_qvalue_lhs = 1;
31 static const size_t kai_num_bytes_multiplier_lhs = 4;
32 static const size_t kai_num_bytes_zp_lhs = 4;
33 // RHS format args (num. bytes per value, multiplier, zero_point (if asymmetric), and reduction sum (if LHS is
34 // asymmetric))
35 static const size_t kai_num_bytes_qvalue_rhs = 1;
36 static const size_t kai_num_bytes_multiplier_rhs = 4;
37 static const size_t kai_num_bytes_rsum_rhs = 4;
38 // DST format args
39 static const size_t kai_num_bytes_dst_value = 4;
40 // Extra args
41 static const size_t kai_num_bytes_bias = 4;
42 static const size_t kai_k_multiple_of = 32;
43
44 617 inline static size_t kai_k_roundedup(size_t k) {
45 617 return kai_roundup(k, kai_k_multiple_of);
46 }
47
48 231 inline static size_t kai_lhs_packed_stride(size_t k) {
49 231 const size_t k_internal = kai_k_roundedup(k);
50 231 size_t lhs_packed_stride = kai_mr * ((k_internal * kai_num_bytes_qvalue_lhs) + kai_num_bytes_multiplier_lhs);
51 // Since the LHS matrix is asymmetric with per-row quantization, we must include the
52 // the number of bytes to hold the zero point value
53 231 lhs_packed_stride += kai_mr * kai_num_bytes_zp_lhs;
54
55 462 return lhs_packed_stride;
56 231 }
57
58 231 inline static size_t kai_rhs_packed_stride(size_t k) {
59 231 const size_t k_internal = kai_k_roundedup(k);
60 231 size_t rhs_packed_stride = kai_nr * (k_internal * kai_num_bytes_qvalue_rhs);
61 231 rhs_packed_stride += kai_nr * kai_num_bytes_multiplier_rhs;
62 // Since the LHS matrix is quantized asymmetric with per-row quantization, we also include
63 // the number of bytes for the reduction sum
64 231 rhs_packed_stride += kai_nr * kai_num_bytes_rsum_rhs;
65 // Since the bias is packed with the RHS matrix, the stride is adjusted with the number of bytes of the bias
66 231 rhs_packed_stride += kai_nr * kai_num_bytes_bias;
67
68 462 return rhs_packed_stride;
69 231 }
70
71 231 size_t kai_get_m_step_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm(void) {
72 231 return kai_m_step;
73 }
74
75 231 size_t kai_get_n_step_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm(void) {
76 231 return kai_n_step;
77 }
78
79 231 size_t kai_get_mr_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm(void) {
80 231 return kai_mr;
81 }
82
83 231 size_t kai_get_nr_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm(void) {
84 231 return kai_nr;
85 }
86
87 308 size_t kai_get_kr_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm(void) {
88 308 return kai_kr;
89 }
90
91 308 size_t kai_get_sr_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm(void) {
92 308 return kai_sr;
93 }
94
95 231 size_t kai_get_lhs_packed_offset_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm(size_t m_idx, size_t k) {
96 KAI_ASSUME((m_idx % kai_m_step) == 0);
97
98 231 return (m_idx / kai_mr) * kai_lhs_packed_stride(k);
99 }
100
101 231 size_t kai_get_rhs_packed_offset_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm(size_t n_idx, size_t k) {
102 KAI_ASSUME((n_idx % kai_n_step) == 0);
103
104 231 return (n_idx / kai_nr) * kai_rhs_packed_stride(k);
105 }
106
107 154 size_t kai_get_dst_offset_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm(
108 size_t m_idx, size_t n_idx, size_t dst_stride) {
109 KAI_ASSUME((m_idx % kai_m_step) == 0);
110 KAI_ASSUME((n_idx % kai_n_step) == 0);
111
112 154 return (n_idx * kai_num_bytes_dst_value) + m_idx * dst_stride;
113 }
114
115 154 size_t kai_get_dst_size_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm(size_t m, size_t n) {
116 154 return m * n * kai_num_bytes_dst_value;
117 }
118
119 155 void kai_run_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm(
120 size_t m, //
121 size_t n, //
122 size_t k, //
123 const void* restrict lhs_packed, //
124 const void* restrict rhs_packed, //
125 float* restrict dst, // NOLINT(readability-non-const-parameter)
126 size_t dst_stride_row, //
127 size_t dst_stride_col, //
128 float scalar_min, //
129 float scalar_max) {
130 KAI_ASSUME(dst_stride_col == sizeof(float));
131
132
1/2
✓ Branch 0 taken 155 times.
✗ Branch 1 not taken.
155 if (m == 0) {
133 return;
134 }
135
136 155 const size_t k_internal = kai_k_roundedup(k);
137 155 const size_t num_blocks = k_internal / kai_k_multiple_of;
138 155 const float clamp_vals[2] = {scalar_min, scalar_max};
139
140 310 __asm__ __volatile__(
141 "mov x13, %x[m]\n"
142 "mov x12, #0x80\n"
143 "mov x20, #0x20\n"
144 "cmp x13, #0x10\n"
145 "madd x12, %x[num_blocks], x12, x20\n"
146 "blt 14f\n"
147 "1:" // Row loop
148 "mov x11, %x[rhs_packed]\n"
149 "mov x10, %x[n]\n"
150 "add x9, %x[dst], %x[dst_stride_row], LSL #4\n"
151 "2:" // Column loop
152 "mov x27, %x[lhs_packed]\n"
153 "movi v31.4s, #0x0\n"
154 "movi v30.4s, #0x0\n"
155 "mov x23, %x[num_blocks]\n"
156 "movi v29.4s, #0x0\n"
157 "movi v28.4s, #0x0\n"
158 "movi v27.4s, #0x0\n"
159 "movi v26.4s, #0x0\n"
160 "add x22, x27, x12\n"
161 "add x21, x22, x12\n"
162 "add x20, x21, x12\n"
163 "movi v25.4s, #0x0\n"
164 "movi v24.4s, #0x0\n"
165 "movi v23.4s, #0x0\n"
166 "movi v22.4s, #0x0\n"
167 "movi v21.4s, #0x0\n"
168 "movi v20.4s, #0x0\n"
169 "movi v19.4s, #0x0\n"
170 "movi v18.4s, #0x0\n"
171 "movi v17.4s, #0x0\n"
172 "movi v16.4s, #0x0\n"
173 "3:" // Sub block loop
174 "ldr q2, [x11, #0x0]\n"
175 "ldr q1, [x11, #0x10]\n"
176 "subs x23, x23, #0x1\n"
177 "ldr q5, [x27, #0x0]\n"
178 "ldr q9, [x27, #0x10]\n"
179 "ldr q8, [x22, #0x0]\n"
180 "ldr q7, [x22, #0x10]\n"
181 "ldr q4, [x21, #0x0]\n"
182 "ldr q14, [x21, #0x10]\n"
183 "ldr q3, [x20, #0x0]\n"
184 "ldr q0, [x20, #0x10]\n"
185 ".inst 0x4e82a4bf // smmla v31.4s, v5.16b, v2.16b\n"
186 ".inst 0x4e81a4be // smmla v30.4s, v5.16b, v1.16b\n"
187 "ldr q6, [x11, #0x20]\n"
188 "ldr q5, [x11, #0x30]\n"
189 ".inst 0x4e82a53d // smmla v29.4s, v9.16b, v2.16b\n"
190 ".inst 0x4e81a53c // smmla v28.4s, v9.16b, v1.16b\n"
191 "ldr q13, [x27, #0x20]\n"
192 "ldr q12, [x27, #0x30]\n"
193 ".inst 0x4e82a51b // smmla v27.4s, v8.16b, v2.16b\n"
194 ".inst 0x4e81a51a // smmla v26.4s, v8.16b, v1.16b\n"
195 "ldr q11, [x22, #0x20]\n"
196 "ldr q10, [x22, #0x30]\n"
197 ".inst 0x4e82a4f9 // smmla v25.4s, v7.16b, v2.16b\n"
198 ".inst 0x4e81a4f8 // smmla v24.4s, v7.16b, v1.16b\n"
199 "ldr q9, [x21, #0x20]\n"
200 "ldr q8, [x21, #0x30]\n"
201 ".inst 0x4e82a497 // smmla v23.4s, v4.16b, v2.16b\n"
202 ".inst 0x4e81a496 // smmla v22.4s, v4.16b, v1.16b\n"
203 "ldr q7, [x20, #0x20]\n"
204 "ldr q4, [x20, #0x30]\n"
205 ".inst 0x4e82a5d5 // smmla v21.4s, v14.16b, v2.16b\n"
206 ".inst 0x4e81a5d4 // smmla v20.4s, v14.16b, v1.16b\n"
207 "ldr q15, [x11, #0x40]\n"
208 "ldr q14, [x11, #0x50]\n"
209 ".inst 0x4e82a473 // smmla v19.4s, v3.16b, v2.16b\n"
210 ".inst 0x4e81a472 // smmla v18.4s, v3.16b, v1.16b\n"
211 "ldr q3, [x27, #0x40]\n"
212 ".inst 0x4e82a411 // smmla v17.4s, v0.16b, v2.16b\n"
213 "ldr q2, [x27, #0x50]\n"
214 ".inst 0x4e81a410 // smmla v16.4s, v0.16b, v1.16b\n"
215 "ldr q1, [x22, #0x40]\n"
216 "ldr q0, [x22, #0x50]\n"
217 ".inst 0x4e86a5bf // smmla v31.4s, v13.16b, v6.16b\n"
218 ".inst 0x4e85a5be // smmla v30.4s, v13.16b, v5.16b\n"
219 "ldr q13, [x21, #0x40]\n"
220 ".inst 0x4e86a59d // smmla v29.4s, v12.16b, v6.16b\n"
221 ".inst 0x4e85a59c // smmla v28.4s, v12.16b, v5.16b\n"
222 "ldr q12, [x21, #0x50]\n"
223 ".inst 0x4e86a57b // smmla v27.4s, v11.16b, v6.16b\n"
224 ".inst 0x4e85a57a // smmla v26.4s, v11.16b, v5.16b\n"
225 "ldr q11, [x20, #0x40]\n"
226 ".inst 0x4e86a559 // smmla v25.4s, v10.16b, v6.16b\n"
227 ".inst 0x4e85a558 // smmla v24.4s, v10.16b, v5.16b\n"
228 "ldr q10, [x20, #0x50]\n"
229 ".inst 0x4e86a537 // smmla v23.4s, v9.16b, v6.16b\n"
230 ".inst 0x4e85a536 // smmla v22.4s, v9.16b, v5.16b\n"
231 "ldr q9, [x11, #0x60]\n"
232 ".inst 0x4e86a515 // smmla v21.4s, v8.16b, v6.16b\n"
233 ".inst 0x4e85a514 // smmla v20.4s, v8.16b, v5.16b\n"
234 "ldr q8, [x11, #0x70]\n"
235 "add x11, x11, #0x80\n"
236 ".inst 0x4e86a4f3 // smmla v19.4s, v7.16b, v6.16b\n"
237 ".inst 0x4e85a4f2 // smmla v18.4s, v7.16b, v5.16b\n"
238 "ldr q7, [x27, #0x60]\n"
239 ".inst 0x4e86a491 // smmla v17.4s, v4.16b, v6.16b\n"
240 "ldr q6, [x27, #0x70]\n"
241 ".inst 0x4e85a490 // smmla v16.4s, v4.16b, v5.16b\n"
242 "ldr q5, [x22, #0x60]\n"
243 "ldr q4, [x22, #0x70]\n"
244 ".inst 0x4e8fa47f // smmla v31.4s, v3.16b, v15.16b\n"
245 ".inst 0x4e8ea47e // smmla v30.4s, v3.16b, v14.16b\n"
246 "ldr q3, [x21, #0x60]\n"
247 ".inst 0x4e8fa45d // smmla v29.4s, v2.16b, v15.16b\n"
248 ".inst 0x4e8ea45c // smmla v28.4s, v2.16b, v14.16b\n"
249 "ldr q2, [x21, #0x70]\n"
250 "add x27, x27, #0x80\n"
251 ".inst 0x4e8fa43b // smmla v27.4s, v1.16b, v15.16b\n"
252 ".inst 0x4e8ea43a // smmla v26.4s, v1.16b, v14.16b\n"
253 "ldr q1, [x20, #0x60]\n"
254 "add x22, x22, #0x80\n"
255 ".inst 0x4e8fa419 // smmla v25.4s, v0.16b, v15.16b\n"
256 ".inst 0x4e8ea418 // smmla v24.4s, v0.16b, v14.16b\n"
257 "ldr q0, [x20, #0x70]\n"
258 "add x21, x21, #0x80\n"
259 ".inst 0x4e8fa5b7 // smmla v23.4s, v13.16b, v15.16b\n"
260 ".inst 0x4e8ea5b6 // smmla v22.4s, v13.16b, v14.16b\n"
261 "add x20, x20, #0x80\n"
262 ".inst 0x4e8fa595 // smmla v21.4s, v12.16b, v15.16b\n"
263 ".inst 0x4e8ea594 // smmla v20.4s, v12.16b, v14.16b\n"
264 ".inst 0x4e8fa573 // smmla v19.4s, v11.16b, v15.16b\n"
265 ".inst 0x4e8ea572 // smmla v18.4s, v11.16b, v14.16b\n"
266 ".inst 0x4e8fa551 // smmla v17.4s, v10.16b, v15.16b\n"
267 ".inst 0x4e8ea550 // smmla v16.4s, v10.16b, v14.16b\n"
268 ".inst 0x4e89a4ff // smmla v31.4s, v7.16b, v9.16b\n"
269 ".inst 0x4e88a4fe // smmla v30.4s, v7.16b, v8.16b\n"
270 ".inst 0x4e89a4dd // smmla v29.4s, v6.16b, v9.16b\n"
271 ".inst 0x4e88a4dc // smmla v28.4s, v6.16b, v8.16b\n"
272 ".inst 0x4e89a4bb // smmla v27.4s, v5.16b, v9.16b\n"
273 ".inst 0x4e88a4ba // smmla v26.4s, v5.16b, v8.16b\n"
274 ".inst 0x4e89a499 // smmla v25.4s, v4.16b, v9.16b\n"
275 ".inst 0x4e88a498 // smmla v24.4s, v4.16b, v8.16b\n"
276 ".inst 0x4e89a477 // smmla v23.4s, v3.16b, v9.16b\n"
277 ".inst 0x4e88a476 // smmla v22.4s, v3.16b, v8.16b\n"
278 ".inst 0x4e89a455 // smmla v21.4s, v2.16b, v9.16b\n"
279 ".inst 0x4e88a454 // smmla v20.4s, v2.16b, v8.16b\n"
280 ".inst 0x4e89a433 // smmla v19.4s, v1.16b, v9.16b\n"
281 ".inst 0x4e88a432 // smmla v18.4s, v1.16b, v8.16b\n"
282 ".inst 0x4e89a411 // smmla v17.4s, v0.16b, v9.16b\n"
283 ".inst 0x4e88a410 // smmla v16.4s, v0.16b, v8.16b\n"
284 "bgt 3b\n"
285 "ldr q7, [x11, #0x0]\n"
286 "ld1 { v4.4s }, [x27]\n"
287 "uzp1 v3.2d, v31.2d, v30.2d\n"
288 "uzp2 v2.2d, v31.2d, v30.2d\n"
289 "ldr q6, [x11, #0x10]\n"
290 "uzp1 v1.2d, v29.2d, v28.2d\n"
291 "uzp2 v0.2d, v29.2d, v28.2d\n"
292 "add x27, x27, #0x10\n"
293 "ldr q28, [x27, #0x0]\n"
294 "add x11, x11, #0x20\n"
295 "mla v3.4s, v7.4s, v4.s[0]\n"
296 "mla v2.4s, v7.4s, v4.s[1]\n"
297 "mla v1.4s, v7.4s, v4.s[2]\n"
298 "mla v0.4s, v7.4s, v4.s[3]\n"
299 "fmul v31.4s, v6.4s, v28.s[0]\n"
300 "fmul v30.4s, v6.4s, v28.s[1]\n"
301 "fmul v29.4s, v6.4s, v28.s[2]\n"
302 "fmul v28.4s, v6.4s, v28.s[3]\n"
303 "scvtf v3.4s, v3.4s\n"
304 "scvtf v2.4s, v2.4s\n"
305 "scvtf v1.4s, v1.4s\n"
306 "scvtf v0.4s, v0.4s\n"
307 "fmul v31.4s, v3.4s, v31.4s\n"
308 "fmul v30.4s, v2.4s, v30.4s\n"
309 "fmul v29.4s, v1.4s, v29.4s\n"
310 "fmul v28.4s, v0.4s, v28.4s\n"
311 "ld1 { v5.4s }, [x22]\n"
312 "uzp1 v4.2d, v27.2d, v26.2d\n"
313 "uzp2 v3.2d, v27.2d, v26.2d\n"
314 "add x22, x22, #0x10\n"
315 "ldr q2, [x22, #0x0]\n"
316 "uzp1 v1.2d, v25.2d, v24.2d\n"
317 "uzp2 v0.2d, v25.2d, v24.2d\n"
318 "mla v4.4s, v7.4s, v5.s[0]\n"
319 "mla v3.4s, v7.4s, v5.s[1]\n"
320 "mla v1.4s, v7.4s, v5.s[2]\n"
321 "mla v0.4s, v7.4s, v5.s[3]\n"
322 "fmul v27.4s, v6.4s, v2.s[0]\n"
323 "fmul v26.4s, v6.4s, v2.s[1]\n"
324 "fmul v25.4s, v6.4s, v2.s[2]\n"
325 "scvtf v4.4s, v4.4s\n"
326 "fmul v24.4s, v6.4s, v2.s[3]\n"
327 "scvtf v3.4s, v3.4s\n"
328 "scvtf v1.4s, v1.4s\n"
329 "scvtf v0.4s, v0.4s\n"
330 "fmul v27.4s, v4.4s, v27.4s\n"
331 "fmul v26.4s, v3.4s, v26.4s\n"
332 "fmul v25.4s, v1.4s, v25.4s\n"
333 "fmul v24.4s, v0.4s, v24.4s\n"
334 "ld1 { v5.4s }, [x21]\n"
335 "uzp1 v4.2d, v23.2d, v22.2d\n"
336 "uzp2 v3.2d, v23.2d, v22.2d\n"
337 "add x21, x21, #0x10\n"
338 "ldr q2, [x21, #0x0]\n"
339 "uzp1 v1.2d, v21.2d, v20.2d\n"
340 "uzp2 v0.2d, v21.2d, v20.2d\n"
341 "mla v4.4s, v7.4s, v5.s[0]\n"
342 "mla v3.4s, v7.4s, v5.s[1]\n"
343 "mla v1.4s, v7.4s, v5.s[2]\n"
344 "mla v0.4s, v7.4s, v5.s[3]\n"
345 "fmul v23.4s, v6.4s, v2.s[0]\n"
346 "fmul v22.4s, v6.4s, v2.s[1]\n"
347 "fmul v21.4s, v6.4s, v2.s[2]\n"
348 "scvtf v4.4s, v4.4s\n"
349 "fmul v20.4s, v6.4s, v2.s[3]\n"
350 "scvtf v3.4s, v3.4s\n"
351 "scvtf v1.4s, v1.4s\n"
352 "scvtf v0.4s, v0.4s\n"
353 "fmul v23.4s, v4.4s, v23.4s\n"
354 "fmul v22.4s, v3.4s, v22.4s\n"
355 "fmul v21.4s, v1.4s, v21.4s\n"
356 "fmul v20.4s, v0.4s, v20.4s\n"
357 "ld1 { v5.4s }, [x20]\n"
358 "uzp1 v4.2d, v19.2d, v18.2d\n"
359 "uzp2 v3.2d, v19.2d, v18.2d\n"
360 "add x20, x20, #0x10\n"
361 "ldr q2, [x20, #0x0]\n"
362 "uzp1 v1.2d, v17.2d, v16.2d\n"
363 "uzp2 v0.2d, v17.2d, v16.2d\n"
364 "mla v4.4s, v7.4s, v5.s[0]\n"
365 "mla v3.4s, v7.4s, v5.s[1]\n"
366 "mla v1.4s, v7.4s, v5.s[2]\n"
367 "mla v0.4s, v7.4s, v5.s[3]\n"
368 "fmul v19.4s, v6.4s, v2.s[0]\n"
369 "fmul v18.4s, v6.4s, v2.s[1]\n"
370 "fmul v17.4s, v6.4s, v2.s[2]\n"
371 "scvtf v4.4s, v4.4s\n"
372 "fmul v16.4s, v6.4s, v2.s[3]\n"
373 "scvtf v3.4s, v3.4s\n"
374 "scvtf v1.4s, v1.4s\n"
375 "scvtf v0.4s, v0.4s\n"
376 "fmul v19.4s, v4.4s, v19.4s\n"
377 "fmul v18.4s, v3.4s, v18.4s\n"
378 "fmul v17.4s, v1.4s, v17.4s\n"
379 "fmul v16.4s, v0.4s, v16.4s\n"
380 "ldr q2, [x11, #0x0]\n"
381 "ld1r { v1.4s }, [%x[clamp_vals]]\n"
382 "add x20, %x[clamp_vals], #0x4\n"
383 "cmp x10, #0x4\n"
384 "ld1r { v0.4s }, [x20]\n"
385 "add x11, x11, #0x10\n"
386 "fadd v31.4s, v31.4s, v2.4s\n"
387 "fadd v30.4s, v30.4s, v2.4s\n"
388 "fadd v29.4s, v29.4s, v2.4s\n"
389 "fadd v28.4s, v28.4s, v2.4s\n"
390 "fadd v27.4s, v27.4s, v2.4s\n"
391 "fadd v26.4s, v26.4s, v2.4s\n"
392 "fadd v25.4s, v25.4s, v2.4s\n"
393 "fadd v24.4s, v24.4s, v2.4s\n"
394 "fadd v23.4s, v23.4s, v2.4s\n"
395 "fadd v22.4s, v22.4s, v2.4s\n"
396 "fadd v21.4s, v21.4s, v2.4s\n"
397 "fadd v20.4s, v20.4s, v2.4s\n"
398 "fadd v19.4s, v19.4s, v2.4s\n"
399 "fadd v18.4s, v18.4s, v2.4s\n"
400 "fadd v17.4s, v17.4s, v2.4s\n"
401 "fadd v16.4s, v16.4s, v2.4s\n"
402 "fmax v31.4s, v31.4s, v1.4s\n"
403 "fmax v30.4s, v30.4s, v1.4s\n"
404 "fmax v29.4s, v29.4s, v1.4s\n"
405 "fmax v28.4s, v28.4s, v1.4s\n"
406 "fmax v27.4s, v27.4s, v1.4s\n"
407 "fmax v26.4s, v26.4s, v1.4s\n"
408 "fmax v25.4s, v25.4s, v1.4s\n"
409 "fmax v24.4s, v24.4s, v1.4s\n"
410 "fmax v23.4s, v23.4s, v1.4s\n"
411 "fmax v22.4s, v22.4s, v1.4s\n"
412 "fmax v21.4s, v21.4s, v1.4s\n"
413 "fmax v20.4s, v20.4s, v1.4s\n"
414 "fmax v19.4s, v19.4s, v1.4s\n"
415 "fmax v18.4s, v18.4s, v1.4s\n"
416 "fmax v17.4s, v17.4s, v1.4s\n"
417 "fmax v16.4s, v16.4s, v1.4s\n"
418 "fmin v31.4s, v31.4s, v0.4s\n"
419 "fmin v30.4s, v30.4s, v0.4s\n"
420 "fmin v29.4s, v29.4s, v0.4s\n"
421 "fmin v28.4s, v28.4s, v0.4s\n"
422 "fmin v27.4s, v27.4s, v0.4s\n"
423 "fmin v26.4s, v26.4s, v0.4s\n"
424 "fmin v25.4s, v25.4s, v0.4s\n"
425 "fmin v24.4s, v24.4s, v0.4s\n"
426 "fmin v23.4s, v23.4s, v0.4s\n"
427 "fmin v22.4s, v22.4s, v0.4s\n"
428 "fmin v21.4s, v21.4s, v0.4s\n"
429 "fmin v20.4s, v20.4s, v0.4s\n"
430 "fmin v19.4s, v19.4s, v0.4s\n"
431 "fmin v18.4s, v18.4s, v0.4s\n"
432 "fmin v17.4s, v17.4s, v0.4s\n"
433 "fmin v16.4s, v16.4s, v0.4s\n"
434 "blt 8f\n"
435 "mov x20, %x[dst]\n"
436 "str q31, [x20, #0x0]\n"
437 "add x20, x20, %x[dst_stride_row]\n"
438 "str q30, [x20, #0x0]\n"
439 "add x20, x20, %x[dst_stride_row]\n"
440 "str q29, [x20, #0x0]\n"
441 "add x20, x20, %x[dst_stride_row]\n"
442 "str q28, [x20, #0x0]\n"
443 "add x20, x20, %x[dst_stride_row]\n"
444 "str q27, [x20, #0x0]\n"
445 "add x20, x20, %x[dst_stride_row]\n"
446 "str q26, [x20, #0x0]\n"
447 "add x20, x20, %x[dst_stride_row]\n"
448 "str q25, [x20, #0x0]\n"
449 "add x20, x20, %x[dst_stride_row]\n"
450 "str q24, [x20, #0x0]\n"
451 "add x20, x20, %x[dst_stride_row]\n"
452 "str q23, [x20, #0x0]\n"
453 "add x20, x20, %x[dst_stride_row]\n"
454 "str q22, [x20, #0x0]\n"
455 "add x20, x20, %x[dst_stride_row]\n"
456 "str q21, [x20, #0x0]\n"
457 "add x20, x20, %x[dst_stride_row]\n"
458 "str q20, [x20, #0x0]\n"
459 "add x20, x20, %x[dst_stride_row]\n"
460 "str q19, [x20, #0x0]\n"
461 "add x20, x20, %x[dst_stride_row]\n"
462 "str q18, [x20, #0x0]\n"
463 "add x20, x20, %x[dst_stride_row]\n"
464 "str q17, [x20, #0x0]\n"
465 "add x20, x20, %x[dst_stride_row]\n"
466 "str q16, [x20, #0x0]\n"
467 "b 13f\n"
468 "8:" // Partial output
469 "mov x28, %x[dst]\n"
470 "add x26, x28, %x[dst_stride_row], LSL #2\n"
471 "add x25, x26, %x[dst_stride_row], LSL #1\n"
472 "add x24, x26, %x[dst_stride_row]\n"
473 "add x23, x25, %x[dst_stride_row]\n"
474 "add x22, x28, %x[dst_stride_row], LSL #1\n"
475 "add x21, x28, %x[dst_stride_row]\n"
476 "add x20, x22, %x[dst_stride_row]\n"
477 "add x27, x23, %x[dst_stride_row]\n"
478 "tbz x10, #1, 9f\n"
479 "st1 { v24.d }[0], [x23], #0x8\n"
480 "st1 { v25.d }[0], [x25], #0x8\n"
481 "st1 { v26.d }[0], [x24], #0x8\n"
482 "st1 { v27.d }[0], [x26], #0x8\n"
483 "st1 { v28.d }[0], [x20], #0x8\n"
484 "st1 { v29.d }[0], [x22], #0x8\n"
485 "st1 { v30.d }[0], [x21], #0x8\n"
486 "st1 { v31.d }[0], [x28], #0x8\n"
487 "tbz x10, #0, 10f\n"
488 "st1 { v24.s }[2], [x23]\n"
489 "st1 { v25.s }[2], [x25]\n"
490 "st1 { v26.s }[2], [x24]\n"
491 "st1 { v27.s }[2], [x26]\n"
492 "st1 { v28.s }[2], [x20]\n"
493 "st1 { v29.s }[2], [x22]\n"
494 "st1 { v30.s }[2], [x21]\n"
495 "st1 { v31.s }[2], [x28]\n"
496 "b 10f\n"
497 "9:" // Output block 0: partial_1_0
498 "st1 { v24.s }[0], [x23]\n"
499 "st1 { v25.s }[0], [x25]\n"
500 "st1 { v26.s }[0], [x24]\n"
501 "st1 { v27.s }[0], [x26]\n"
502 "st1 { v28.s }[0], [x20]\n"
503 "st1 { v29.s }[0], [x22]\n"
504 "st1 { v30.s }[0], [x21]\n"
505 "st1 { v31.s }[0], [x28]\n"
506 "10:" // Output block 0: Done
507 "add x26, x27, %x[dst_stride_row], LSL #2\n"
508 "add x25, x27, %x[dst_stride_row], LSL #1\n"
509 "add x24, x26, %x[dst_stride_row], LSL #1\n"
510 "add x23, x27, %x[dst_stride_row]\n"
511 "add x22, x25, %x[dst_stride_row]\n"
512 "add x21, x26, %x[dst_stride_row]\n"
513 "add x20, x24, %x[dst_stride_row]\n"
514 "tbz x10, #1, 11f\n"
515 "st1 { v16.d }[0], [x20], #0x8\n"
516 "st1 { v17.d }[0], [x24], #0x8\n"
517 "st1 { v18.d }[0], [x21], #0x8\n"
518 "st1 { v19.d }[0], [x26], #0x8\n"
519 "st1 { v20.d }[0], [x22], #0x8\n"
520 "st1 { v21.d }[0], [x25], #0x8\n"
521 "st1 { v22.d }[0], [x23], #0x8\n"
522 "st1 { v23.d }[0], [x27], #0x8\n"
523 "tbz x10, #0, 12f\n"
524 "st1 { v16.s }[2], [x20]\n"
525 "st1 { v17.s }[2], [x24]\n"
526 "st1 { v18.s }[2], [x21]\n"
527 "st1 { v19.s }[2], [x26]\n"
528 "st1 { v20.s }[2], [x22]\n"
529 "st1 { v21.s }[2], [x25]\n"
530 "st1 { v22.s }[2], [x23]\n"
531 "st1 { v23.s }[2], [x27]\n"
532 "b 12f\n"
533 "11:" // Output block 1: partial_1_0
534 "st1 { v16.s }[0], [x20]\n"
535 "st1 { v17.s }[0], [x24]\n"
536 "st1 { v18.s }[0], [x21]\n"
537 "st1 { v19.s }[0], [x26]\n"
538 "st1 { v20.s }[0], [x22]\n"
539 "st1 { v21.s }[0], [x25]\n"
540 "st1 { v22.s }[0], [x23]\n"
541 "st1 { v23.s }[0], [x27]\n"
542 "12:" // Output block 1: Done
543 "13:" // Output stage exit
544 "subs x10, x10, #0x4\n"
545 "add %x[dst], %x[dst], #0x10\n"
546 "bgt 2b\n"
547 "mov x20, #0x4\n"
548 "sub x13, x13, #0x10\n"
549 "cmp x13, #0x10\n"
550 "mov %x[dst], x9\n"
551 "madd %x[lhs_packed], x20, x12, %x[lhs_packed]\n"
552 "bge 1b\n"
553 "14:" // Row loop skip
554 "cbz x13, 23f\n"
555 "15:" // Row tail: Row loop
556 "mov x26, %x[rhs_packed]\n"
557 "mov x25, %x[n]\n"
558 "add x24, %x[dst], %x[dst_stride_row], LSL #2\n"
559 "16:" // Row tail: Column loop
560 "mov x27, %x[lhs_packed]\n"
561 "movi v31.4s, #0x0\n"
562 "movi v30.4s, #0x0\n"
563 "mov x20, %x[num_blocks]\n"
564 "movi v29.4s, #0x0\n"
565 "movi v28.4s, #0x0\n"
566 "17:" // Row tail: Sub block loop
567 "ldr q19, [x26, #0x0]\n"
568 "ldr q18, [x26, #0x10]\n"
569 "subs x20, x20, #0x1\n"
570 "ldr q17, [x27, #0x0]\n"
571 "ldr q16, [x27, #0x10]\n"
572 "ldr q27, [x26, #0x20]\n"
573 "ldr q26, [x26, #0x30]\n"
574 "ldr q25, [x27, #0x20]\n"
575 "ldr q24, [x27, #0x30]\n"
576 "ldr q23, [x26, #0x40]\n"
577 "ldr q22, [x26, #0x50]\n"
578 ".inst 0x4e93a63f // smmla v31.4s, v17.16b, v19.16b\n"
579 ".inst 0x4e92a63e // smmla v30.4s, v17.16b, v18.16b\n"
580 "ldr q21, [x27, #0x40]\n"
581 "ldr q20, [x27, #0x50]\n"
582 ".inst 0x4e93a61d // smmla v29.4s, v16.16b, v19.16b\n"
583 ".inst 0x4e92a61c // smmla v28.4s, v16.16b, v18.16b\n"
584 "ldr q19, [x26, #0x60]\n"
585 "ldr q18, [x26, #0x70]\n"
586 "add x26, x26, #0x80\n"
587 "ldr q17, [x27, #0x60]\n"
588 "ldr q16, [x27, #0x70]\n"
589 "add x27, x27, #0x80\n"
590 ".inst 0x4e9ba73f // smmla v31.4s, v25.16b, v27.16b\n"
591 ".inst 0x4e9aa73e // smmla v30.4s, v25.16b, v26.16b\n"
592 ".inst 0x4e9ba71d // smmla v29.4s, v24.16b, v27.16b\n"
593 ".inst 0x4e9aa71c // smmla v28.4s, v24.16b, v26.16b\n"
594 ".inst 0x4e97a6bf // smmla v31.4s, v21.16b, v23.16b\n"
595 ".inst 0x4e96a6be // smmla v30.4s, v21.16b, v22.16b\n"
596 ".inst 0x4e97a69d // smmla v29.4s, v20.16b, v23.16b\n"
597 ".inst 0x4e96a69c // smmla v28.4s, v20.16b, v22.16b\n"
598 ".inst 0x4e93a63f // smmla v31.4s, v17.16b, v19.16b\n"
599 ".inst 0x4e92a63e // smmla v30.4s, v17.16b, v18.16b\n"
600 ".inst 0x4e93a61d // smmla v29.4s, v16.16b, v19.16b\n"
601 ".inst 0x4e92a61c // smmla v28.4s, v16.16b, v18.16b\n"
602 "bgt 17b\n"
603 "ldr q18, [x26, #0x0]\n"
604 "ld1 { v17.4s }, [x27]\n"
605 "uzp1 v24.2d, v31.2d, v30.2d\n"
606 "uzp2 v23.2d, v31.2d, v30.2d\n"
607 "ldr q22, [x26, #0x10]\n"
608 "uzp1 v21.2d, v29.2d, v28.2d\n"
609 "uzp2 v20.2d, v29.2d, v28.2d\n"
610 "add x27, x27, #0x10\n"
611 "ldr q16, [x27, #0x0]\n"
612 "add x26, x26, #0x20\n"
613 "mla v24.4s, v18.4s, v17.s[0]\n"
614 "mla v23.4s, v18.4s, v17.s[1]\n"
615 "mla v21.4s, v18.4s, v17.s[2]\n"
616 "mla v20.4s, v18.4s, v17.s[3]\n"
617 "fmul v19.4s, v22.4s, v16.s[0]\n"
618 "fmul v18.4s, v22.4s, v16.s[1]\n"
619 "fmul v17.4s, v22.4s, v16.s[2]\n"
620 "fmul v16.4s, v22.4s, v16.s[3]\n"
621 "scvtf v24.4s, v24.4s\n"
622 "scvtf v23.4s, v23.4s\n"
623 "scvtf v21.4s, v21.4s\n"
624 "scvtf v20.4s, v20.4s\n"
625 "fmul v31.4s, v24.4s, v19.4s\n"
626 "fmul v30.4s, v23.4s, v18.4s\n"
627 "fmul v29.4s, v21.4s, v17.4s\n"
628 "fmul v28.4s, v20.4s, v16.4s\n"
629 "ldr q18, [x26, #0x0]\n"
630 "ld1r { v17.4s }, [%x[clamp_vals]]\n"
631 "add x20, %x[clamp_vals], #0x4\n"
632 "cmp x25, #0x4\n"
633 "ld1r { v16.4s }, [x20]\n"
634 "add x26, x26, #0x10\n"
635 "fadd v31.4s, v31.4s, v18.4s\n"
636 "fadd v30.4s, v30.4s, v18.4s\n"
637 "fadd v29.4s, v29.4s, v18.4s\n"
638 "fadd v28.4s, v28.4s, v18.4s\n"
639 "fmax v31.4s, v31.4s, v17.4s\n"
640 "fmax v30.4s, v30.4s, v17.4s\n"
641 "fmax v29.4s, v29.4s, v17.4s\n"
642 "fmax v28.4s, v28.4s, v17.4s\n"
643 "fmin v31.4s, v31.4s, v16.4s\n"
644 "fmin v30.4s, v30.4s, v16.4s\n"
645 "fmin v29.4s, v29.4s, v16.4s\n"
646 "fmin v28.4s, v28.4s, v16.4s\n"
647 "blt 19f\n"
648 "mov x20, %x[dst]\n"
649 "cmp x13, #0x1\n"
650 "str q31, [x20, #0x0]\n"
651 "add x20, x20, %x[dst_stride_row]\n"
652 "ble 22f\n"
653 "cmp x13, #0x2\n"
654 "str q30, [x20, #0x0]\n"
655 "add x20, x20, %x[dst_stride_row]\n"
656 "ble 22f\n"
657 "cmp x13, #0x3\n"
658 "str q29, [x20, #0x0]\n"
659 "add x20, x20, %x[dst_stride_row]\n"
660 "ble 22f\n"
661 "str q28, [x20, #0x0]\n"
662 "b 22f\n"
663 "19:" // Row tail: Partial output
664 "mov x23, %x[dst]\n"
665 "cmp x13, #0x1\n"
666 "add x22, x23, %x[dst_stride_row]\n"
667 "csel x22, x22, x23, GT\n"
668 "cmp x13, #0x2\n"
669 "add x21, x23, %x[dst_stride_row], LSL #1\n"
670 "csel x21, x21, x22, GT\n"
671 "cmp x13, #0x3\n"
672 "add x20, x21, %x[dst_stride_row]\n"
673 "csel x20, x20, x21, GT\n"
674 "tbz x25, #1, 20f\n"
675 "st1 { v28.d }[0], [x20], #0x8\n"
676 "st1 { v29.d }[0], [x21], #0x8\n"
677 "st1 { v30.d }[0], [x22], #0x8\n"
678 "st1 { v31.d }[0], [x23], #0x8\n"
679 "tbz x25, #0, 21f\n"
680 "st1 { v28.s }[2], [x20]\n"
681 "st1 { v29.s }[2], [x21]\n"
682 "st1 { v30.s }[2], [x22]\n"
683 "st1 { v31.s }[2], [x23]\n"
684 "b 21f\n"
685 "20:" // Row tail: Output block 0: partial_1_0
686 "st1 { v28.s }[0], [x20]\n"
687 "st1 { v29.s }[0], [x21]\n"
688 "st1 { v30.s }[0], [x22]\n"
689 "st1 { v31.s }[0], [x23]\n"
690 "21:" // Row tail: Output block 0: Done
691 "22:" // Row tail: Output stage exit
692 "subs x25, x25, #0x4\n"
693 "add %x[dst], %x[dst], #0x10\n"
694 "bgt 16b\n"
695 "subs x13, x13, #0x4\n"
696 "add %x[lhs_packed], %x[lhs_packed], x12\n"
697 "mov %x[dst], x24\n"
698 "bgt 15b\n"
699 "23:" // Row tail: Row loop skip
700 : [dst] "+&r"(dst), [lhs_packed] "+&r"(lhs_packed)
701 155 : [clamp_vals] "r"(clamp_vals), [dst_stride_row] "r"(dst_stride_row), [m] "r"(m), [n] "r"(n),
702 155 [num_blocks] "r"(num_blocks), [rhs_packed] "r"(rhs_packed)
703 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14",
704 "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29",
705 "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27",
706 "x28");
707 155 }
708
709 #endif // Architectural features check.
710