KleidiAI Coverage Report


Directory: ./
File: kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi4cxp/kai_matmul_clamp_f32_qai8dxp4x8_qsi4cxp4x8_8x4x32_neon_i8mm.c
Date: 2025-10-20 13:18:31
Coverage Exec Excl Total
Lines: 97.6% 41 7 49
Functions: 100.0% 14 0 14
Branches: 50.0% 1 14 16

Line Branch Exec Source
1 //
2 // SPDX-FileCopyrightText: Copyright 2024-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
3 //
4 // SPDX-License-Identifier: Apache-2.0
5 //
6
7 // Do not flag up inline assembly blocks
8 #pragma GCC diagnostic ignored "-Woverlength-strings"
9
10 #if !defined(__ARM_FEATURE_MATMUL_INT8)
11 #error "I8mm extension required to compile this micro-kernel"
12 #else
13 #include "kai_matmul_clamp_f32_qai8dxp4x8_qsi4cxp4x8_8x4x32_neon_i8mm.h"
14
15 #include <arm_neon.h>
16 #include <stddef.h>
17 #include <stdint.h>
18
19 #include "kai/kai_common.h"
20
21 static const size_t kai_m_step = 8;
22 static const size_t kai_n_step = 4;
23 static const size_t kai_mr = 4;
24 static const size_t kai_nr = 4;
25 static const size_t kai_kr = 16;
26 static const size_t kai_sr = 2;
27 static const size_t kai_num_bytes_multiplier_lhs = sizeof(float);
28 static const size_t kai_num_bytes_multiplier_rhs = sizeof(float);
29 static const size_t kai_num_bytes_offset_lhs = sizeof(int32_t);
30 static const size_t kai_num_bytes_sum_rhs = sizeof(int32_t);
31 static const size_t kai_num_bytes_bias = sizeof(float);
32
33 641 inline static size_t kai_k_roundedup(size_t k) {
34 // Since we pack a float and int32 value at the end of the row,
35 // we must make sure that k is a multiple of 4 for alignment
36 641 size_t kr_sr_roundedup4 = kai_roundup(kai_kr * kai_sr, 4);
37 1282 return kai_roundup(k, kr_sr_roundedup4);
38 641 }
39
40 240 inline static size_t kai_lhs_packed_stride(size_t k) {
41 240 const size_t k_internal = kai_k_roundedup(k);
42
43 KAI_ASSERT((k_internal % 2) == 0);
44
45 480 return kai_mr * (k_internal * sizeof(int8_t) + kai_num_bytes_multiplier_lhs + kai_num_bytes_offset_lhs);
46 240 }
47
48 240 inline static size_t kai_rhs_packed_stride(size_t k) {
49 240 const size_t k_internal = kai_k_roundedup(k);
50
51 KAI_ASSERT((k_internal % 2) == 0);
52
53 480 return kai_nr * ((k_internal / 2) + kai_num_bytes_multiplier_rhs + kai_num_bytes_sum_rhs + kai_num_bytes_bias);
54 240 }
55
56 320 size_t kai_get_m_step_matmul_clamp_f32_qai8dxp4x8_qsi4cxp4x8_8x4x32_neon_i8mm(void) {
57 320 return kai_m_step;
58 }
59
60 320 size_t kai_get_n_step_matmul_clamp_f32_qai8dxp4x8_qsi4cxp4x8_8x4x32_neon_i8mm(void) {
61 320 return kai_n_step;
62 }
63
64 240 size_t kai_get_mr_matmul_clamp_f32_qai8dxp4x8_qsi4cxp4x8_8x4x32_neon_i8mm(void) {
65 240 return kai_mr;
66 }
67
68 240 size_t kai_get_nr_matmul_clamp_f32_qai8dxp4x8_qsi4cxp4x8_8x4x32_neon_i8mm(void) {
69 240 return kai_nr;
70 }
71
72 320 size_t kai_get_kr_matmul_clamp_f32_qai8dxp4x8_qsi4cxp4x8_8x4x32_neon_i8mm(void) {
73 320 return kai_kr;
74 }
75
76 320 size_t kai_get_sr_matmul_clamp_f32_qai8dxp4x8_qsi4cxp4x8_8x4x32_neon_i8mm(void) {
77 320 return kai_sr;
78 }
79
80 240 size_t kai_get_lhs_packed_offset_matmul_clamp_f32_qai8dxp4x8_qsi4cxp4x8_8x4x32_neon_i8mm(size_t m_idx, size_t k) {
81 KAI_ASSERT((m_idx % kai_m_step) == 0);
82
83 240 return (m_idx / kai_mr) * kai_lhs_packed_stride(k);
84 }
85
86 240 size_t kai_get_rhs_packed_offset_matmul_clamp_f32_qai8dxp4x8_qsi4cxp4x8_8x4x32_neon_i8mm(size_t n_idx, size_t k) {
87 KAI_ASSERT((n_idx % kai_n_step) == 0);
88
89 240 return (n_idx / kai_nr) * kai_rhs_packed_stride(k);
90 }
91
92 160 size_t kai_get_dst_offset_matmul_clamp_f32_qai8dxp4x8_qsi4cxp4x8_8x4x32_neon_i8mm(
93 size_t m_idx, size_t n_idx, size_t dst_stride) {
94 KAI_ASSERT((m_idx % kai_m_step) == 0);
95 KAI_ASSERT((n_idx % kai_n_step) == 0);
96
97 160 return (n_idx * sizeof(float)) + m_idx * dst_stride;
98 }
99
100 160 size_t kai_get_dst_size_matmul_clamp_f32_qai8dxp4x8_qsi4cxp4x8_8x4x32_neon_i8mm(size_t m, size_t n) {
101 160 return m * n * sizeof(float);
102 }
103
104 161 void kai_run_matmul_clamp_f32_qai8dxp4x8_qsi4cxp4x8_8x4x32_neon_i8mm(
105 size_t m, size_t n, size_t k, const void* lhs_packed, const void* rhs_packed,
106 float* dst, // NOLINT(readability-non-const-parameter)
107 size_t dst_stride_row, size_t dst_stride_col, float scalar_min, float scalar_max) {
108 KAI_ASSERT(dst_stride_col == sizeof(float));
109
110
1/2
✓ Branch 0 taken 161 times.
✗ Branch 1 not taken.
161 if (m == 0) {
111 return;
112 }
113
114 161 const size_t k_internal = kai_k_roundedup(k);
115
116 161 size_t num_blocks = k_internal / 32;
117
118 161 float clamp_vals[2] = {scalar_min, scalar_max};
119
120 322 __asm__ __volatile__(
121 "mov x12, %x[m]\n"
122 "mov x11, #0x80\n"
123 "movi v11.16b, #0xf0\n"
124 "mov x20, #0x20\n"
125 "cmp x12, #0x8\n"
126 "madd x11, %x[num_blocks], x11, x20\n"
127 "blt 10f\n"
128 "1:" // Row loop
129 "mov x10, %x[rhs_packed]\n"
130 "mov x9, %x[n]\n"
131 "add x28, %x[dst], %x[dst_stride_row], LSL #3\n"
132 "2:" // Column loop
133 "mov x22, %x[lhs_packed]\n"
134 "movi v10.4s, #0x0\n"
135 "movi v9.4s, #0x0\n"
136 "mov x21, %x[num_blocks]\n"
137 "movi v8.4s, #0x0\n"
138 "movi v7.4s, #0x0\n"
139 "movi v6.4s, #0x0\n"
140 "movi v5.4s, #0x0\n"
141 "add x20, x22, x11\n"
142 "movi v4.4s, #0x0\n"
143 "movi v3.4s, #0x0\n"
144 "3:" // Sub block loop
145 "ldr q2, [x10, #0x0]\n"
146 "ldr q1, [x10, #0x10]\n"
147 "subs x21, x21, #0x1\n"
148 "ldr q20, [x22, #0x0]\n"
149 "ldr q19, [x22, #0x10]\n"
150 "ldr q18, [x20, #0x0]\n"
151 "ldr q0, [x20, #0x10]\n"
152 "ldr q31, [x10, #0x20]\n"
153 "ldr q30, [x10, #0x30]\n"
154 "shl v17.16b, v2.16b, #0x4\n"
155 "shl v16.16b, v1.16b, #0x4\n"
156 "ldr q29, [x22, #0x20]\n"
157 "ldr q28, [x22, #0x30]\n"
158 "and v2.16b, v2.16b, v11.16b\n"
159 "and v1.16b, v1.16b, v11.16b\n"
160 "ldr q27, [x20, #0x20]\n"
161 "ldr q26, [x20, #0x30]\n"
162 "add x10, x10, #0x40\n"
163 "ldr q25, [x22, #0x40]\n"
164 "ldr q24, [x22, #0x50]\n"
165 ".inst 0x4e91a68a // smmla v10.4s, v20.16b, v17.16b\n"
166 ".inst 0x4e90a689 // smmla v9.4s, v20.16b, v16.16b\n"
167 "ldr q23, [x20, #0x40]\n"
168 "ldr q22, [x20, #0x50]\n"
169 ".inst 0x4e91a668 // smmla v8.4s, v19.16b, v17.16b\n"
170 ".inst 0x4e90a667 // smmla v7.4s, v19.16b, v16.16b\n"
171 "ldr q21, [x22, #0x60]\n"
172 "ldr q20, [x22, #0x70]\n"
173 ".inst 0x4e91a646 // smmla v6.4s, v18.16b, v17.16b\n"
174 ".inst 0x4e90a645 // smmla v5.4s, v18.16b, v16.16b\n"
175 "ldr q19, [x20, #0x60]\n"
176 "ldr q18, [x20, #0x70]\n"
177 ".inst 0x4e91a404 // smmla v4.4s, v0.16b, v17.16b\n"
178 ".inst 0x4e90a403 // smmla v3.4s, v0.16b, v16.16b\n"
179 "shl v17.16b, v31.16b, #0x4\n"
180 "shl v16.16b, v30.16b, #0x4\n"
181 "add x22, x22, #0x80\n"
182 "add x20, x20, #0x80\n"
183 "and v31.16b, v31.16b, v11.16b\n"
184 "and v30.16b, v30.16b, v11.16b\n"
185 ".inst 0x4e91a7aa // smmla v10.4s, v29.16b, v17.16b\n"
186 ".inst 0x4e90a7a9 // smmla v9.4s, v29.16b, v16.16b\n"
187 ".inst 0x4e91a788 // smmla v8.4s, v28.16b, v17.16b\n"
188 ".inst 0x4e90a787 // smmla v7.4s, v28.16b, v16.16b\n"
189 ".inst 0x4e91a766 // smmla v6.4s, v27.16b, v17.16b\n"
190 ".inst 0x4e90a765 // smmla v5.4s, v27.16b, v16.16b\n"
191 ".inst 0x4e91a744 // smmla v4.4s, v26.16b, v17.16b\n"
192 ".inst 0x4e90a743 // smmla v3.4s, v26.16b, v16.16b\n"
193 ".inst 0x4e82a72a // smmla v10.4s, v25.16b, v2.16b\n"
194 ".inst 0x4e81a729 // smmla v9.4s, v25.16b, v1.16b\n"
195 ".inst 0x4e82a708 // smmla v8.4s, v24.16b, v2.16b\n"
196 ".inst 0x4e81a707 // smmla v7.4s, v24.16b, v1.16b\n"
197 ".inst 0x4e82a6e6 // smmla v6.4s, v23.16b, v2.16b\n"
198 ".inst 0x4e81a6e5 // smmla v5.4s, v23.16b, v1.16b\n"
199 ".inst 0x4e82a6c4 // smmla v4.4s, v22.16b, v2.16b\n"
200 ".inst 0x4e81a6c3 // smmla v3.4s, v22.16b, v1.16b\n"
201 ".inst 0x4e9fa6aa // smmla v10.4s, v21.16b, v31.16b\n"
202 ".inst 0x4e9ea6a9 // smmla v9.4s, v21.16b, v30.16b\n"
203 ".inst 0x4e9fa688 // smmla v8.4s, v20.16b, v31.16b\n"
204 ".inst 0x4e9ea687 // smmla v7.4s, v20.16b, v30.16b\n"
205 ".inst 0x4e9fa666 // smmla v6.4s, v19.16b, v31.16b\n"
206 ".inst 0x4e9ea665 // smmla v5.4s, v19.16b, v30.16b\n"
207 ".inst 0x4e9fa644 // smmla v4.4s, v18.16b, v31.16b\n"
208 ".inst 0x4e9ea643 // smmla v3.4s, v18.16b, v30.16b\n"
209 "bgt 3b\n"
210 "ldr q25, [x10, #0x0]\n"
211 "ld1 { v17.4s }, [x22]\n"
212 "uzp1 v23.2d, v10.2d, v9.2d\n"
213 "uzp2 v22.2d, v10.2d, v9.2d\n"
214 "ldr q24, [x10, #0x10]\n"
215 "uzp1 v21.2d, v8.2d, v7.2d\n"
216 "uzp2 v20.2d, v8.2d, v7.2d\n"
217 "add x22, x22, #0x10\n"
218 "ldr q16, [x22, #0x0]\n"
219 "add x10, x10, #0x20\n"
220 "mla v23.4s, v25.4s, v17.s[0]\n"
221 "mla v22.4s, v25.4s, v17.s[1]\n"
222 "mla v21.4s, v25.4s, v17.s[2]\n"
223 "mla v20.4s, v25.4s, v17.s[3]\n"
224 "fmul v19.4s, v24.4s, v16.s[0]\n"
225 "fmul v18.4s, v24.4s, v16.s[1]\n"
226 "fmul v17.4s, v24.4s, v16.s[2]\n"
227 "fmul v16.4s, v24.4s, v16.s[3]\n"
228 "scvtf v23.4s, v23.4s\n"
229 "scvtf v22.4s, v22.4s\n"
230 "scvtf v21.4s, v21.4s\n"
231 "scvtf v20.4s, v20.4s\n"
232 "fmul v10.4s, v23.4s, v19.4s\n"
233 "fmul v9.4s, v22.4s, v18.4s\n"
234 "fmul v8.4s, v21.4s, v17.4s\n"
235 "fmul v7.4s, v20.4s, v16.4s\n"
236 "ld1 { v17.4s }, [x20]\n"
237 "uzp1 v23.2d, v6.2d, v5.2d\n"
238 "uzp2 v22.2d, v6.2d, v5.2d\n"
239 "add x20, x20, #0x10\n"
240 "ldr q16, [x20, #0x0]\n"
241 "uzp1 v21.2d, v4.2d, v3.2d\n"
242 "uzp2 v20.2d, v4.2d, v3.2d\n"
243 "mla v23.4s, v25.4s, v17.s[0]\n"
244 "mla v22.4s, v25.4s, v17.s[1]\n"
245 "mla v21.4s, v25.4s, v17.s[2]\n"
246 "mla v20.4s, v25.4s, v17.s[3]\n"
247 "fmul v19.4s, v24.4s, v16.s[0]\n"
248 "fmul v18.4s, v24.4s, v16.s[1]\n"
249 "fmul v17.4s, v24.4s, v16.s[2]\n"
250 "scvtf v23.4s, v23.4s\n"
251 "fmul v16.4s, v24.4s, v16.s[3]\n"
252 "scvtf v22.4s, v22.4s\n"
253 "scvtf v21.4s, v21.4s\n"
254 "scvtf v20.4s, v20.4s\n"
255 "fmul v6.4s, v23.4s, v19.4s\n"
256 "fmul v5.4s, v22.4s, v18.4s\n"
257 "fmul v4.4s, v21.4s, v17.4s\n"
258 "fmul v3.4s, v20.4s, v16.4s\n"
259 "ldr q18, [x10, #0x0]\n"
260 "ld1r { v17.4s }, [%x[clamp_vals]]\n"
261 "add x20, %x[clamp_vals], #0x4\n"
262 "cmp x9, #0x4\n"
263 "ld1r { v16.4s }, [x20]\n"
264 "add x10, x10, #0x10\n"
265 "fadd v10.4s, v10.4s, v18.4s\n"
266 "fadd v9.4s, v9.4s, v18.4s\n"
267 "fadd v8.4s, v8.4s, v18.4s\n"
268 "fadd v7.4s, v7.4s, v18.4s\n"
269 "fadd v6.4s, v6.4s, v18.4s\n"
270 "fadd v5.4s, v5.4s, v18.4s\n"
271 "fadd v4.4s, v4.4s, v18.4s\n"
272 "fadd v3.4s, v3.4s, v18.4s\n"
273 "fmax v10.4s, v10.4s, v17.4s\n"
274 "fmax v9.4s, v9.4s, v17.4s\n"
275 "fmax v8.4s, v8.4s, v17.4s\n"
276 "fmax v7.4s, v7.4s, v17.4s\n"
277 "fmax v6.4s, v6.4s, v17.4s\n"
278 "fmax v5.4s, v5.4s, v17.4s\n"
279 "fmax v4.4s, v4.4s, v17.4s\n"
280 "fmax v3.4s, v3.4s, v17.4s\n"
281 "fmin v10.4s, v10.4s, v16.4s\n"
282 "fmin v9.4s, v9.4s, v16.4s\n"
283 "fmin v8.4s, v8.4s, v16.4s\n"
284 "fmin v7.4s, v7.4s, v16.4s\n"
285 "fmin v6.4s, v6.4s, v16.4s\n"
286 "fmin v5.4s, v5.4s, v16.4s\n"
287 "fmin v4.4s, v4.4s, v16.4s\n"
288 "fmin v3.4s, v3.4s, v16.4s\n"
289 "blt 6f\n"
290 "mov x20, %x[dst]\n"
291 "str q10, [x20, #0x0]\n"
292 "add x20, x20, %x[dst_stride_row]\n"
293 "str q9, [x20, #0x0]\n"
294 "add x20, x20, %x[dst_stride_row]\n"
295 "str q8, [x20, #0x0]\n"
296 "add x20, x20, %x[dst_stride_row]\n"
297 "str q7, [x20, #0x0]\n"
298 "add x20, x20, %x[dst_stride_row]\n"
299 "str q6, [x20, #0x0]\n"
300 "add x20, x20, %x[dst_stride_row]\n"
301 "str q5, [x20, #0x0]\n"
302 "add x20, x20, %x[dst_stride_row]\n"
303 "str q4, [x20, #0x0]\n"
304 "add x20, x20, %x[dst_stride_row]\n"
305 "str q3, [x20, #0x0]\n"
306 "b 9f\n"
307 "6:" // Partial output
308 "mov x27, %x[dst]\n"
309 "add x26, x27, %x[dst_stride_row], LSL #2\n"
310 "add x25, x26, %x[dst_stride_row], LSL #1\n"
311 "add x24, x26, %x[dst_stride_row]\n"
312 "add x23, x25, %x[dst_stride_row]\n"
313 "add x22, x27, %x[dst_stride_row], LSL #1\n"
314 "add x21, x27, %x[dst_stride_row]\n"
315 "add x20, x22, %x[dst_stride_row]\n"
316 "tbz x9, #1, 7f\n"
317 "st1 { v3.d }[0], [x23], #0x8\n"
318 "st1 { v4.d }[0], [x25], #0x8\n"
319 "st1 { v5.d }[0], [x24], #0x8\n"
320 "st1 { v6.d }[0], [x26], #0x8\n"
321 "st1 { v7.d }[0], [x20], #0x8\n"
322 "st1 { v8.d }[0], [x22], #0x8\n"
323 "st1 { v9.d }[0], [x21], #0x8\n"
324 "st1 { v10.d }[0], [x27], #0x8\n"
325 "tbz x9, #0, 8f\n"
326 "st1 { v3.s }[2], [x23]\n"
327 "st1 { v4.s }[2], [x25]\n"
328 "st1 { v5.s }[2], [x24]\n"
329 "st1 { v6.s }[2], [x26]\n"
330 "st1 { v7.s }[2], [x20]\n"
331 "st1 { v8.s }[2], [x22]\n"
332 "st1 { v9.s }[2], [x21]\n"
333 "st1 { v10.s }[2], [x27]\n"
334 "b 8f\n"
335 "7:" // Output block 0: partial_1_0
336 "st1 { v3.s }[0], [x23]\n"
337 "st1 { v4.s }[0], [x25]\n"
338 "st1 { v5.s }[0], [x24]\n"
339 "st1 { v6.s }[0], [x26]\n"
340 "st1 { v7.s }[0], [x20]\n"
341 "st1 { v8.s }[0], [x22]\n"
342 "st1 { v9.s }[0], [x21]\n"
343 "st1 { v10.s }[0], [x27]\n"
344 "8:" // Output block 0: Done
345 "9:" // Output stage exit
346 "subs x9, x9, #0x4\n"
347 "add %x[dst], %x[dst], #0x10\n"
348 "bgt 2b\n"
349 "mov x20, #0x2\n"
350 "sub x12, x12, #0x8\n"
351 "cmp x12, #0x8\n"
352 "mov %x[dst], x28\n"
353 "madd %x[lhs_packed], x20, x11, %x[lhs_packed]\n"
354 "bge 1b\n"
355 "10:" // Row loop skip
356 "cbz x12, 19f\n"
357 "11:" // Row tail: Row loop
358 "mov x26, %x[rhs_packed]\n"
359 "mov x25, %x[n]\n"
360 "add x24, %x[dst], %x[dst_stride_row], LSL #2\n"
361 "12:" // Row tail: Column loop
362 "mov x22, %x[lhs_packed]\n"
363 "movi v10.4s, #0x0\n"
364 "movi v9.4s, #0x0\n"
365 "mov x20, %x[num_blocks]\n"
366 "movi v8.4s, #0x0\n"
367 "movi v7.4s, #0x0\n"
368 "13:" // Row tail: Sub block loop
369 "ldr q31, [x26, #0x0]\n"
370 "ldr q30, [x26, #0x10]\n"
371 "subs x20, x20, #0x1\n"
372 "ldr q29, [x22, #0x0]\n"
373 "ldr q28, [x22, #0x10]\n"
374 "ldr q27, [x26, #0x20]\n"
375 "ldr q26, [x26, #0x30]\n"
376 "add x26, x26, #0x40\n"
377 "ldr q25, [x22, #0x20]\n"
378 "ldr q24, [x22, #0x30]\n"
379 "shl v23.16b, v31.16b, #0x4\n"
380 "shl v22.16b, v30.16b, #0x4\n"
381 "ldr q21, [x22, #0x40]\n"
382 "ldr q20, [x22, #0x50]\n"
383 "and v31.16b, v31.16b, v11.16b\n"
384 "and v30.16b, v30.16b, v11.16b\n"
385 "ldr q19, [x22, #0x60]\n"
386 "ldr q18, [x22, #0x70]\n"
387 "shl v17.16b, v27.16b, #0x4\n"
388 "shl v16.16b, v26.16b, #0x4\n"
389 ".inst 0x4e97a7aa // smmla v10.4s, v29.16b, v23.16b\n"
390 ".inst 0x4e96a7a9 // smmla v9.4s, v29.16b, v22.16b\n"
391 "and v27.16b, v27.16b, v11.16b\n"
392 "add x22, x22, #0x80\n"
393 ".inst 0x4e97a788 // smmla v8.4s, v28.16b, v23.16b\n"
394 ".inst 0x4e96a787 // smmla v7.4s, v28.16b, v22.16b\n"
395 "and v26.16b, v26.16b, v11.16b\n"
396 ".inst 0x4e91a72a // smmla v10.4s, v25.16b, v17.16b\n"
397 ".inst 0x4e90a729 // smmla v9.4s, v25.16b, v16.16b\n"
398 ".inst 0x4e91a708 // smmla v8.4s, v24.16b, v17.16b\n"
399 ".inst 0x4e90a707 // smmla v7.4s, v24.16b, v16.16b\n"
400 ".inst 0x4e9fa6aa // smmla v10.4s, v21.16b, v31.16b\n"
401 ".inst 0x4e9ea6a9 // smmla v9.4s, v21.16b, v30.16b\n"
402 ".inst 0x4e9fa688 // smmla v8.4s, v20.16b, v31.16b\n"
403 ".inst 0x4e9ea687 // smmla v7.4s, v20.16b, v30.16b\n"
404 ".inst 0x4e9ba66a // smmla v10.4s, v19.16b, v27.16b\n"
405 ".inst 0x4e9aa669 // smmla v9.4s, v19.16b, v26.16b\n"
406 ".inst 0x4e9ba648 // smmla v8.4s, v18.16b, v27.16b\n"
407 ".inst 0x4e9aa647 // smmla v7.4s, v18.16b, v26.16b\n"
408 "bgt 13b\n"
409 "ldr q18, [x26, #0x0]\n"
410 "ld1 { v17.4s }, [x22]\n"
411 "uzp1 v24.2d, v10.2d, v9.2d\n"
412 "uzp2 v23.2d, v10.2d, v9.2d\n"
413 "ldr q22, [x26, #0x10]\n"
414 "uzp1 v21.2d, v8.2d, v7.2d\n"
415 "uzp2 v20.2d, v8.2d, v7.2d\n"
416 "add x22, x22, #0x10\n"
417 "ldr q16, [x22, #0x0]\n"
418 "add x26, x26, #0x20\n"
419 "mla v24.4s, v18.4s, v17.s[0]\n"
420 "mla v23.4s, v18.4s, v17.s[1]\n"
421 "mla v21.4s, v18.4s, v17.s[2]\n"
422 "mla v20.4s, v18.4s, v17.s[3]\n"
423 "fmul v19.4s, v22.4s, v16.s[0]\n"
424 "fmul v18.4s, v22.4s, v16.s[1]\n"
425 "fmul v17.4s, v22.4s, v16.s[2]\n"
426 "fmul v16.4s, v22.4s, v16.s[3]\n"
427 "scvtf v24.4s, v24.4s\n"
428 "scvtf v23.4s, v23.4s\n"
429 "scvtf v21.4s, v21.4s\n"
430 "scvtf v20.4s, v20.4s\n"
431 "fmul v10.4s, v24.4s, v19.4s\n"
432 "fmul v9.4s, v23.4s, v18.4s\n"
433 "fmul v8.4s, v21.4s, v17.4s\n"
434 "fmul v7.4s, v20.4s, v16.4s\n"
435 "ldr q18, [x26, #0x0]\n"
436 "ld1r { v17.4s }, [%x[clamp_vals]]\n"
437 "add x20, %x[clamp_vals], #0x4\n"
438 "cmp x25, #0x4\n"
439 "ld1r { v16.4s }, [x20]\n"
440 "add x26, x26, #0x10\n"
441 "fadd v10.4s, v10.4s, v18.4s\n"
442 "fadd v9.4s, v9.4s, v18.4s\n"
443 "fadd v8.4s, v8.4s, v18.4s\n"
444 "fadd v7.4s, v7.4s, v18.4s\n"
445 "fmax v10.4s, v10.4s, v17.4s\n"
446 "fmax v9.4s, v9.4s, v17.4s\n"
447 "fmax v8.4s, v8.4s, v17.4s\n"
448 "fmax v7.4s, v7.4s, v17.4s\n"
449 "fmin v10.4s, v10.4s, v16.4s\n"
450 "fmin v9.4s, v9.4s, v16.4s\n"
451 "fmin v8.4s, v8.4s, v16.4s\n"
452 "fmin v7.4s, v7.4s, v16.4s\n"
453 "blt 15f\n"
454 "mov x20, %x[dst]\n"
455 "cmp x12, #0x1\n"
456 "str q10, [x20, #0x0]\n"
457 "add x20, x20, %x[dst_stride_row]\n"
458 "ble 18f\n"
459 "cmp x12, #0x2\n"
460 "str q9, [x20, #0x0]\n"
461 "add x20, x20, %x[dst_stride_row]\n"
462 "ble 18f\n"
463 "cmp x12, #0x3\n"
464 "str q8, [x20, #0x0]\n"
465 "add x20, x20, %x[dst_stride_row]\n"
466 "ble 18f\n"
467 "str q7, [x20, #0x0]\n"
468 "b 18f\n"
469 "15:" // Row tail: Partial output
470 "mov x23, %x[dst]\n"
471 "cmp x12, #0x1\n"
472 "add x22, x23, %x[dst_stride_row]\n"
473 "csel x22, x22, x23, GT\n"
474 "cmp x12, #0x2\n"
475 "add x21, x23, %x[dst_stride_row], LSL #1\n"
476 "csel x21, x21, x22, GT\n"
477 "cmp x12, #0x3\n"
478 "add x20, x21, %x[dst_stride_row]\n"
479 "csel x20, x20, x21, GT\n"
480 "tbz x25, #1, 16f\n"
481 "st1 { v7.d }[0], [x20], #0x8\n"
482 "st1 { v8.d }[0], [x21], #0x8\n"
483 "st1 { v9.d }[0], [x22], #0x8\n"
484 "st1 { v10.d }[0], [x23], #0x8\n"
485 "tbz x25, #0, 17f\n"
486 "st1 { v7.s }[2], [x20]\n"
487 "st1 { v8.s }[2], [x21]\n"
488 "st1 { v9.s }[2], [x22]\n"
489 "st1 { v10.s }[2], [x23]\n"
490 "b 17f\n"
491 "16:" // Row tail: Output block 0: partial_1_0
492 "st1 { v7.s }[0], [x20]\n"
493 "st1 { v8.s }[0], [x21]\n"
494 "st1 { v9.s }[0], [x22]\n"
495 "st1 { v10.s }[0], [x23]\n"
496 "17:" // Row tail: Output block 0: Done
497 "18:" // Row tail: Output stage exit
498 "subs x25, x25, #0x4\n"
499 "add %x[dst], %x[dst], #0x10\n"
500 "bgt 12b\n"
501 "subs x12, x12, #0x4\n"
502 "add %x[lhs_packed], %x[lhs_packed], x11\n"
503 "mov %x[dst], x24\n"
504 "bgt 11b\n"
505 "19:" // Row tail: Row loop skip
506 : [dst] "+&r"(dst), [lhs_packed] "+&r"(lhs_packed)
507 161 : [clamp_vals] "r"(clamp_vals), [dst_stride_row] "r"(dst_stride_row), [m] "r"(m), [n] "r"(n),
508 161 [num_blocks] "r"(num_blocks), [rhs_packed] "r"(rhs_packed)
509 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18",
510 "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11",
511 "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28");
512 161 }
513 #endif // Architectural feature check
514