KleidiAI Coverage Report


Directory: ./
File: kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm.c
Date: 2025-10-20 13:18:31
Coverage Exec Excl Total
Lines: 97.6% 41 19 61
Functions: 100.0% 16 0 16
Branches: 50.0% 1 38 40

Line Branch Exec Source
1 //
2 // SPDX-FileCopyrightText: Copyright 2024-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
3 //
4 // SPDX-License-Identifier: Apache-2.0
5 //
6
7 // Do not flag up inline assembly blocks
8 #pragma GCC diagnostic ignored "-Woverlength-strings"
9
10 #if !defined(__ARM_FEATURE_MATMUL_INT8)
11 #error "i8mm extension required to compile this micro-kernel"
12 #else
13 #include "kai_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm.h"
14
15 #include <arm_neon.h>
16 #include <stddef.h>
17 #include <stdint.h>
18
19 #include "kai/kai_common.h"
20
21 static const size_t kai_m_step = 16;
22 static const size_t kai_n_step = 4;
23 static const size_t kai_mr = 4;
24 static const size_t kai_nr = 4;
25 static const size_t kai_kr = 16;
26 static const size_t kai_sr = 2;
27 static const size_t kai_bl = 32;
28 static const size_t kai_num_bytes_multiplier = sizeof(uint16_t);
29
30 92 inline static size_t kai_num_bytes_per_block_lhs(void) {
31 92 return kai_bl * sizeof(int8_t) + kai_num_bytes_multiplier;
32 }
33
34 92 inline static size_t kai_num_bytes_per_block_rhs(void) {
35 92 return (kai_bl / 2) * sizeof(int8_t) + kai_num_bytes_multiplier;
36 }
37
38 184 inline static size_t kai_num_blocks_per_row(size_t k) {
39 KAI_ASSUME((k % kai_bl) == 0);
40 184 return k / kai_bl;
41 }
42
43 92 inline static size_t kai_lhs_packed_stride(size_t k) {
44 92 return kai_mr * kai_num_blocks_per_row(k) * kai_num_bytes_per_block_lhs();
45 }
46
47 92 inline static size_t kai_rhs_packed_stride(size_t k) {
48 KAI_ASSUME((k % 2) == 0);
49 KAI_ASSUME((k % kai_kr) == 0);
50 KAI_ASSUME((k % kai_bl) == 0);
51
52 92 const size_t num_blocks_per_row = kai_num_blocks_per_row(k);
53 92 const size_t num_bytes_per_block = kai_num_bytes_per_block_rhs();
54
55 184 return kai_nr * (num_bytes_per_block * num_blocks_per_row);
56 92 }
57
58 144 size_t kai_get_m_step_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm(void) {
59 144 return kai_m_step;
60 }
61
62 144 size_t kai_get_n_step_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm(void) {
63 144 return kai_n_step;
64 }
65
66 96 size_t kai_get_mr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm(void) {
67 96 return kai_mr;
68 }
69
70 96 size_t kai_get_nr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm(void) {
71 96 return kai_nr;
72 }
73
74 144 size_t kai_get_kr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm(void) {
75 144 return kai_kr;
76 }
77
78 96 size_t kai_get_sr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm(void) {
79 96 return kai_sr;
80 }
81
82 92 size_t kai_get_lhs_packed_offset_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm(
83 size_t m_idx, size_t k, size_t bl) {
84 KAI_ASSUME(bl == kai_bl);
85 KAI_ASSUME((k % 2) == 0);
86 KAI_ASSUME((k % kai_kr) == 0);
87 KAI_ASSUME((k % bl) == 0);
88 KAI_ASSUME((m_idx % kai_m_step) == 0);
89
90 92 return (m_idx / kai_mr) * kai_lhs_packed_stride(k);
91 }
92
93 92 size_t kai_get_rhs_packed_offset_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm(
94 size_t n_idx, size_t k, size_t bl) {
95 KAI_ASSUME(bl == kai_bl);
96 KAI_ASSUME((k % 2) == 0);
97 KAI_ASSUME((k % kai_kr) == 0);
98 KAI_ASSUME((k % bl) == 0);
99 KAI_ASSUME((n_idx % kai_n_step) == 0);
100
101 92 return (n_idx / kai_nr) * kai_rhs_packed_stride(k);
102 }
103
104 46 size_t kai_get_dst_offset_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm(
105 size_t m_idx, size_t n_idx, size_t dst_stride) {
106 KAI_ASSUME((m_idx % kai_m_step) == 0);
107 KAI_ASSUME((n_idx % kai_n_step) == 0);
108
109 46 return (n_idx * sizeof(float)) + m_idx * dst_stride;
110 }
111
112 46 size_t kai_get_dst_size_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm(size_t m, size_t n) {
113 46 return m * n * sizeof(float);
114 }
115
116 47 void kai_run_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm(
117 size_t m, size_t n, size_t k, size_t bl, const void* lhs_packed, const void* rhs_packed,
118 float* dst, // NOLINT(readability-non-const-parameter)
119 size_t dst_stride_row, size_t dst_stride_col, float scalar_min, float scalar_max) {
120 KAI_ASSUME(bl == kai_bl);
121 KAI_ASSUME(k % kai_bl == 0);
122 KAI_ASSUME(dst_stride_col == sizeof(float));
123
124
1/2
✓ Branch 0 taken 47 times.
✗ Branch 1 not taken.
47 if (m == 0) {
125 return;
126 }
127
128 47 const size_t num_blocks = k / kai_bl;
129 47 float clamp_vals[2] = {scalar_min, scalar_max};
130
131 94 __asm__ __volatile__(
132 "mov x13, %x[m]\n"
133 "mov x12, #0x88\n"
134 "cmp x13, #0x10\n"
135 "mul x12, %x[num_blocks], x12\n"
136 "blt 14f\n"
137 "1:" // Row loop
138 "mov x11, %x[rhs_packed]\n"
139 "mov x10, %x[n]\n"
140 "add x9, %x[dst], %x[dst_stride_row], LSL #4\n"
141 "2:" // Column loop
142 "mov x27, %x[lhs_packed]\n"
143 "movi v31.16b, #0x0\n"
144 "movi v30.16b, #0x0\n"
145 "mov x23, %x[num_blocks]\n"
146 "movi v29.16b, #0x0\n"
147 "movi v28.16b, #0x0\n"
148 "movi v27.16b, #0x0\n"
149 "movi v26.16b, #0x0\n"
150 "add x22, x27, x12\n"
151 "add x21, x22, x12\n"
152 "movi v25.16b, #0x0\n"
153 "movi v24.16b, #0x0\n"
154 "add x20, x21, x12\n"
155 "movi v23.16b, #0x0\n"
156 "movi v22.16b, #0x0\n"
157 "movi v21.16b, #0x0\n"
158 "movi v20.16b, #0x0\n"
159 "movi v19.16b, #0x0\n"
160 "movi v18.16b, #0x0\n"
161 "movi v17.16b, #0x0\n"
162 "movi v16.16b, #0x0\n"
163 "3:" // Block loop
164 "ldr d0, [x11, #0x0]\n"
165 "ldr d3, [x27, #0x0]\n"
166 "add x11, x11, #0x8\n"
167 "add x27, x27, #0x8\n"
168 "ldr q12, [x11, #0x0]\n"
169 "ldr q4, [x11, #0x10]\n"
170 "movi v5.4s, #0x0\n"
171 "movi v14.4s, #0x0\n"
172 "ldr q9, [x27, #0x0]\n"
173 "ldr q10, [x27, #0x10]\n"
174 "movi v7.4s, #0x0\n"
175 "movi v8.4s, #0x0\n"
176 "ldr q2, [x11, #0x20]\n"
177 "ldr q11, [x11, #0x30]\n"
178 "movi v1.16b, #0xf0\n"
179 "fcvtl v6.4s, v0.4h\n"
180 "ldr q15, [x27, #0x20]\n"
181 "shl v13.16b, v12.16b, #0x4\n"
182 "shl v0.16b, v4.16b, #0x4\n"
183 "add x11, x11, #0x40\n"
184 "and v12.16b, v12.16b, v1.16b\n"
185 "and v4.16b, v4.16b, v1.16b\n"
186 "fcvtl v3.4s, v3.4h\n"
187 ".inst 0x4e8da525 // smmla v5.4s, v9.16b, v13.16b\n"
188 ".inst 0x4e80a52e // smmla v14.4s, v9.16b, v0.16b\n"
189 ".inst 0x4e8da547 // smmla v7.4s, v10.16b, v13.16b\n"
190 ".inst 0x4e80a548 // smmla v8.4s, v10.16b, v0.16b\n"
191 "shl v10.16b, v2.16b, #0x4\n"
192 "shl v9.16b, v11.16b, #0x4\n"
193 "and v2.16b, v2.16b, v1.16b\n"
194 "and v11.16b, v11.16b, v1.16b\n"
195 "ldr q1, [x27, #0x30]\n"
196 ".inst 0x4e8aa5e5 // smmla v5.4s, v15.16b, v10.16b\n"
197 ".inst 0x4e89a5ee // smmla v14.4s, v15.16b, v9.16b\n"
198 "ldr q15, [x27, #0x40]\n"
199 ".inst 0x4e8aa427 // smmla v7.4s, v1.16b, v10.16b\n"
200 ".inst 0x4e89a428 // smmla v8.4s, v1.16b, v9.16b\n"
201 "ldr q1, [x27, #0x50]\n"
202 ".inst 0x4e8ca5e5 // smmla v5.4s, v15.16b, v12.16b\n"
203 ".inst 0x4e84a5ee // smmla v14.4s, v15.16b, v4.16b\n"
204 "ldr q15, [x27, #0x60]\n"
205 ".inst 0x4e8ca427 // smmla v7.4s, v1.16b, v12.16b\n"
206 ".inst 0x4e84a428 // smmla v8.4s, v1.16b, v4.16b\n"
207 "ldr q1, [x27, #0x70]\n"
208 "add x27, x27, #0x80\n"
209 ".inst 0x4e82a5e5 // smmla v5.4s, v15.16b, v2.16b\n"
210 ".inst 0x4e8ba5ee // smmla v14.4s, v15.16b, v11.16b\n"
211 "fmul v15.4s, v6.4s, v3.s[0]\n"
212 ".inst 0x4e82a427 // smmla v7.4s, v1.16b, v2.16b\n"
213 ".inst 0x4e8ba428 // smmla v8.4s, v1.16b, v11.16b\n"
214 "uzp1 v1.2d, v5.2d, v14.2d\n"
215 "uzp2 v5.2d, v5.2d, v14.2d\n"
216 "fmul v14.4s, v6.4s, v3.s[1]\n"
217 "scvtf v1.4s, v1.4s, #0x4\n"
218 "scvtf v5.4s, v5.4s, #0x4\n"
219 "fmla v31.4s, v1.4s, v15.4s\n"
220 "fmul v15.4s, v6.4s, v3.s[2]\n"
221 "fmul v3.4s, v6.4s, v3.s[3]\n"
222 "uzp1 v1.2d, v7.2d, v8.2d\n"
223 "uzp2 v8.2d, v7.2d, v8.2d\n"
224 "fmla v30.4s, v5.4s, v14.4s\n"
225 "scvtf v1.4s, v1.4s, #0x4\n"
226 "scvtf v8.4s, v8.4s, #0x4\n"
227 "fmla v29.4s, v1.4s, v15.4s\n"
228 "fmla v28.4s, v8.4s, v3.4s\n"
229 "ldr d5, [x22, #0x0]\n"
230 "add x22, x22, #0x8\n"
231 "movi v3.4s, #0x0\n"
232 "movi v1.4s, #0x0\n"
233 "ldr q15, [x22, #0x0]\n"
234 "ldr q7, [x22, #0x10]\n"
235 "movi v14.4s, #0x0\n"
236 "movi v8.4s, #0x0\n"
237 "fcvtl v5.4s, v5.4h\n"
238 ".inst 0x4e8da5e3 // smmla v3.4s, v15.16b, v13.16b\n"
239 ".inst 0x4e80a5e1 // smmla v1.4s, v15.16b, v0.16b\n"
240 "ldr q15, [x22, #0x20]\n"
241 ".inst 0x4e8da4ee // smmla v14.4s, v7.16b, v13.16b\n"
242 ".inst 0x4e80a4e8 // smmla v8.4s, v7.16b, v0.16b\n"
243 "ldr q7, [x22, #0x30]\n"
244 ".inst 0x4e8aa5e3 // smmla v3.4s, v15.16b, v10.16b\n"
245 ".inst 0x4e89a5e1 // smmla v1.4s, v15.16b, v9.16b\n"
246 "ldr q15, [x22, #0x40]\n"
247 ".inst 0x4e8aa4ee // smmla v14.4s, v7.16b, v10.16b\n"
248 ".inst 0x4e89a4e8 // smmla v8.4s, v7.16b, v9.16b\n"
249 "ldr q7, [x22, #0x50]\n"
250 ".inst 0x4e8ca5e3 // smmla v3.4s, v15.16b, v12.16b\n"
251 ".inst 0x4e84a5e1 // smmla v1.4s, v15.16b, v4.16b\n"
252 "ldr q15, [x22, #0x60]\n"
253 ".inst 0x4e8ca4ee // smmla v14.4s, v7.16b, v12.16b\n"
254 ".inst 0x4e84a4e8 // smmla v8.4s, v7.16b, v4.16b\n"
255 "ldr q7, [x22, #0x70]\n"
256 "add x22, x22, #0x80\n"
257 ".inst 0x4e82a5e3 // smmla v3.4s, v15.16b, v2.16b\n"
258 ".inst 0x4e8ba5e1 // smmla v1.4s, v15.16b, v11.16b\n"
259 "fmul v15.4s, v6.4s, v5.s[0]\n"
260 ".inst 0x4e82a4ee // smmla v14.4s, v7.16b, v2.16b\n"
261 ".inst 0x4e8ba4e8 // smmla v8.4s, v7.16b, v11.16b\n"
262 "uzp1 v7.2d, v3.2d, v1.2d\n"
263 "uzp2 v1.2d, v3.2d, v1.2d\n"
264 "fmul v3.4s, v6.4s, v5.s[1]\n"
265 "scvtf v7.4s, v7.4s, #0x4\n"
266 "scvtf v1.4s, v1.4s, #0x4\n"
267 "fmla v27.4s, v7.4s, v15.4s\n"
268 "fmul v15.4s, v6.4s, v5.s[2]\n"
269 "fmul v7.4s, v6.4s, v5.s[3]\n"
270 "uzp1 v5.2d, v14.2d, v8.2d\n"
271 "uzp2 v14.2d, v14.2d, v8.2d\n"
272 "fmla v26.4s, v1.4s, v3.4s\n"
273 "scvtf v5.4s, v5.4s, #0x4\n"
274 "scvtf v14.4s, v14.4s, #0x4\n"
275 "fmla v25.4s, v5.4s, v15.4s\n"
276 "fmla v24.4s, v14.4s, v7.4s\n"
277 "ldr d1, [x21, #0x0]\n"
278 "add x21, x21, #0x8\n"
279 "movi v8.4s, #0x0\n"
280 "movi v5.4s, #0x0\n"
281 "ldr q3, [x21, #0x0]\n"
282 "ldr q7, [x21, #0x10]\n"
283 "movi v14.4s, #0x0\n"
284 "movi v15.4s, #0x0\n"
285 "fcvtl v1.4s, v1.4h\n"
286 ".inst 0x4e8da468 // smmla v8.4s, v3.16b, v13.16b\n"
287 ".inst 0x4e80a465 // smmla v5.4s, v3.16b, v0.16b\n"
288 "ldr q3, [x21, #0x20]\n"
289 ".inst 0x4e8da4ee // smmla v14.4s, v7.16b, v13.16b\n"
290 ".inst 0x4e80a4ef // smmla v15.4s, v7.16b, v0.16b\n"
291 "ldr q7, [x21, #0x30]\n"
292 ".inst 0x4e8aa468 // smmla v8.4s, v3.16b, v10.16b\n"
293 ".inst 0x4e89a465 // smmla v5.4s, v3.16b, v9.16b\n"
294 "ldr q3, [x21, #0x40]\n"
295 ".inst 0x4e8aa4ee // smmla v14.4s, v7.16b, v10.16b\n"
296 ".inst 0x4e89a4ef // smmla v15.4s, v7.16b, v9.16b\n"
297 "ldr q7, [x21, #0x50]\n"
298 ".inst 0x4e8ca468 // smmla v8.4s, v3.16b, v12.16b\n"
299 ".inst 0x4e84a465 // smmla v5.4s, v3.16b, v4.16b\n"
300 "ldr q3, [x21, #0x60]\n"
301 ".inst 0x4e8ca4ee // smmla v14.4s, v7.16b, v12.16b\n"
302 ".inst 0x4e84a4ef // smmla v15.4s, v7.16b, v4.16b\n"
303 "ldr q7, [x21, #0x70]\n"
304 "add x21, x21, #0x80\n"
305 ".inst 0x4e82a468 // smmla v8.4s, v3.16b, v2.16b\n"
306 ".inst 0x4e8ba465 // smmla v5.4s, v3.16b, v11.16b\n"
307 "fmul v3.4s, v6.4s, v1.s[0]\n"
308 ".inst 0x4e82a4ee // smmla v14.4s, v7.16b, v2.16b\n"
309 ".inst 0x4e8ba4ef // smmla v15.4s, v7.16b, v11.16b\n"
310 "uzp1 v7.2d, v8.2d, v5.2d\n"
311 "uzp2 v8.2d, v8.2d, v5.2d\n"
312 "fmul v5.4s, v6.4s, v1.s[1]\n"
313 "scvtf v7.4s, v7.4s, #0x4\n"
314 "scvtf v8.4s, v8.4s, #0x4\n"
315 "fmla v23.4s, v7.4s, v3.4s\n"
316 "fmul v3.4s, v6.4s, v1.s[2]\n"
317 "fmul v1.4s, v6.4s, v1.s[3]\n"
318 "uzp1 v7.2d, v14.2d, v15.2d\n"
319 "uzp2 v14.2d, v14.2d, v15.2d\n"
320 "fmla v22.4s, v8.4s, v5.4s\n"
321 "scvtf v7.4s, v7.4s, #0x4\n"
322 "scvtf v14.4s, v14.4s, #0x4\n"
323 "fmla v21.4s, v7.4s, v3.4s\n"
324 "fmla v20.4s, v14.4s, v1.4s\n"
325 "ldr d3, [x20, #0x0]\n"
326 "add x20, x20, #0x8\n"
327 "movi v15.4s, #0x0\n"
328 "movi v8.4s, #0x0\n"
329 "ldr q5, [x20, #0x0]\n"
330 "ldr q14, [x20, #0x10]\n"
331 "movi v1.4s, #0x0\n"
332 "movi v7.4s, #0x0\n"
333 "fcvtl v3.4s, v3.4h\n"
334 ".inst 0x4e8da4af // smmla v15.4s, v5.16b, v13.16b\n"
335 ".inst 0x4e80a4a8 // smmla v8.4s, v5.16b, v0.16b\n"
336 "ldr q5, [x20, #0x20]\n"
337 ".inst 0x4e8da5c1 // smmla v1.4s, v14.16b, v13.16b\n"
338 "ldr q13, [x20, #0x30]\n"
339 ".inst 0x4e80a5c7 // smmla v7.4s, v14.16b, v0.16b\n"
340 "ldr q14, [x20, #0x40]\n"
341 "ldr q0, [x20, #0x50]\n"
342 ".inst 0x4e8aa4af // smmla v15.4s, v5.16b, v10.16b\n"
343 ".inst 0x4e89a4a8 // smmla v8.4s, v5.16b, v9.16b\n"
344 "ldr q5, [x20, #0x60]\n"
345 ".inst 0x4e8aa5a1 // smmla v1.4s, v13.16b, v10.16b\n"
346 "ldr q10, [x20, #0x70]\n"
347 "add x20, x20, #0x80\n"
348 ".inst 0x4e89a5a7 // smmla v7.4s, v13.16b, v9.16b\n"
349 "fmul v13.4s, v6.4s, v3.s[0]\n"
350 "fmul v9.4s, v6.4s, v3.s[1]\n"
351 ".inst 0x4e8ca5cf // smmla v15.4s, v14.16b, v12.16b\n"
352 ".inst 0x4e84a5c8 // smmla v8.4s, v14.16b, v4.16b\n"
353 "fmul v14.4s, v6.4s, v3.s[2]\n"
354 "fmul v6.4s, v6.4s, v3.s[3]\n"
355 ".inst 0x4e8ca401 // smmla v1.4s, v0.16b, v12.16b\n"
356 ".inst 0x4e84a407 // smmla v7.4s, v0.16b, v4.16b\n"
357 ".inst 0x4e82a4af // smmla v15.4s, v5.16b, v2.16b\n"
358 ".inst 0x4e8ba4a8 // smmla v8.4s, v5.16b, v11.16b\n"
359 ".inst 0x4e82a541 // smmla v1.4s, v10.16b, v2.16b\n"
360 ".inst 0x4e8ba547 // smmla v7.4s, v10.16b, v11.16b\n"
361 "uzp1 v4.2d, v15.2d, v8.2d\n"
362 "uzp2 v2.2d, v15.2d, v8.2d\n"
363 "scvtf v4.4s, v4.4s, #0x4\n"
364 "uzp1 v8.2d, v1.2d, v7.2d\n"
365 "uzp2 v0.2d, v1.2d, v7.2d\n"
366 "scvtf v2.4s, v2.4s, #0x4\n"
367 "fmla v19.4s, v4.4s, v13.4s\n"
368 "scvtf v8.4s, v8.4s, #0x4\n"
369 "scvtf v0.4s, v0.4s, #0x4\n"
370 "fmla v18.4s, v2.4s, v9.4s\n"
371 "fmla v17.4s, v8.4s, v14.4s\n"
372 "fmla v16.4s, v0.4s, v6.4s\n"
373 "subs x23, x23, #0x1\n"
374 "bgt 3b\n"
375 "ld1r { v1.4s }, [%x[clamp_vals]]\n"
376 "add x20, %x[clamp_vals], #0x4\n"
377 "cmp x10, #0x4\n"
378 "ld1r { v0.4s }, [x20]\n"
379 "fmax v31.4s, v31.4s, v1.4s\n"
380 "fmax v30.4s, v30.4s, v1.4s\n"
381 "fmax v29.4s, v29.4s, v1.4s\n"
382 "fmax v28.4s, v28.4s, v1.4s\n"
383 "fmax v27.4s, v27.4s, v1.4s\n"
384 "fmax v26.4s, v26.4s, v1.4s\n"
385 "fmax v25.4s, v25.4s, v1.4s\n"
386 "fmax v24.4s, v24.4s, v1.4s\n"
387 "fmax v23.4s, v23.4s, v1.4s\n"
388 "fmax v22.4s, v22.4s, v1.4s\n"
389 "fmax v21.4s, v21.4s, v1.4s\n"
390 "fmax v20.4s, v20.4s, v1.4s\n"
391 "fmax v19.4s, v19.4s, v1.4s\n"
392 "fmax v18.4s, v18.4s, v1.4s\n"
393 "fmax v17.4s, v17.4s, v1.4s\n"
394 "fmax v16.4s, v16.4s, v1.4s\n"
395 "fmin v31.4s, v31.4s, v0.4s\n"
396 "fmin v30.4s, v30.4s, v0.4s\n"
397 "fmin v29.4s, v29.4s, v0.4s\n"
398 "fmin v28.4s, v28.4s, v0.4s\n"
399 "fmin v27.4s, v27.4s, v0.4s\n"
400 "fmin v26.4s, v26.4s, v0.4s\n"
401 "fmin v25.4s, v25.4s, v0.4s\n"
402 "fmin v24.4s, v24.4s, v0.4s\n"
403 "fmin v23.4s, v23.4s, v0.4s\n"
404 "fmin v22.4s, v22.4s, v0.4s\n"
405 "fmin v21.4s, v21.4s, v0.4s\n"
406 "fmin v20.4s, v20.4s, v0.4s\n"
407 "fmin v19.4s, v19.4s, v0.4s\n"
408 "fmin v18.4s, v18.4s, v0.4s\n"
409 "fmin v17.4s, v17.4s, v0.4s\n"
410 "fmin v16.4s, v16.4s, v0.4s\n"
411 "blt 8f\n"
412 "mov x20, %x[dst]\n"
413 "str q31, [x20, #0x0]\n"
414 "add x20, x20, %x[dst_stride_row]\n"
415 "str q30, [x20, #0x0]\n"
416 "add x20, x20, %x[dst_stride_row]\n"
417 "str q29, [x20, #0x0]\n"
418 "add x20, x20, %x[dst_stride_row]\n"
419 "str q28, [x20, #0x0]\n"
420 "add x20, x20, %x[dst_stride_row]\n"
421 "str q27, [x20, #0x0]\n"
422 "add x20, x20, %x[dst_stride_row]\n"
423 "str q26, [x20, #0x0]\n"
424 "add x20, x20, %x[dst_stride_row]\n"
425 "str q25, [x20, #0x0]\n"
426 "add x20, x20, %x[dst_stride_row]\n"
427 "str q24, [x20, #0x0]\n"
428 "add x20, x20, %x[dst_stride_row]\n"
429 "str q23, [x20, #0x0]\n"
430 "add x20, x20, %x[dst_stride_row]\n"
431 "str q22, [x20, #0x0]\n"
432 "add x20, x20, %x[dst_stride_row]\n"
433 "str q21, [x20, #0x0]\n"
434 "add x20, x20, %x[dst_stride_row]\n"
435 "str q20, [x20, #0x0]\n"
436 "add x20, x20, %x[dst_stride_row]\n"
437 "str q19, [x20, #0x0]\n"
438 "add x20, x20, %x[dst_stride_row]\n"
439 "str q18, [x20, #0x0]\n"
440 "add x20, x20, %x[dst_stride_row]\n"
441 "str q17, [x20, #0x0]\n"
442 "add x20, x20, %x[dst_stride_row]\n"
443 "str q16, [x20, #0x0]\n"
444 "b 13f\n"
445 "8:" // Partial output
446 "mov x28, %x[dst]\n"
447 "add x26, x28, %x[dst_stride_row], LSL #2\n"
448 "add x25, x26, %x[dst_stride_row], LSL #1\n"
449 "add x24, x26, %x[dst_stride_row]\n"
450 "add x23, x25, %x[dst_stride_row]\n"
451 "add x22, x28, %x[dst_stride_row], LSL #1\n"
452 "add x21, x28, %x[dst_stride_row]\n"
453 "add x20, x22, %x[dst_stride_row]\n"
454 "add x27, x23, %x[dst_stride_row]\n"
455 "tbz x10, #1, 9f\n"
456 "st1 { v24.d }[0], [x23], #0x8\n"
457 "st1 { v25.d }[0], [x25], #0x8\n"
458 "st1 { v26.d }[0], [x24], #0x8\n"
459 "st1 { v27.d }[0], [x26], #0x8\n"
460 "st1 { v28.d }[0], [x20], #0x8\n"
461 "st1 { v29.d }[0], [x22], #0x8\n"
462 "st1 { v30.d }[0], [x21], #0x8\n"
463 "st1 { v31.d }[0], [x28], #0x8\n"
464 "tbz x10, #0, 10f\n"
465 "st1 { v24.s }[2], [x23]\n"
466 "st1 { v25.s }[2], [x25]\n"
467 "st1 { v26.s }[2], [x24]\n"
468 "st1 { v27.s }[2], [x26]\n"
469 "st1 { v28.s }[2], [x20]\n"
470 "st1 { v29.s }[2], [x22]\n"
471 "st1 { v30.s }[2], [x21]\n"
472 "st1 { v31.s }[2], [x28]\n"
473 "b 10f\n"
474 "9:" // Output block 0: partial_1_0
475 "st1 { v24.s }[0], [x23]\n"
476 "st1 { v25.s }[0], [x25]\n"
477 "st1 { v26.s }[0], [x24]\n"
478 "st1 { v27.s }[0], [x26]\n"
479 "st1 { v28.s }[0], [x20]\n"
480 "st1 { v29.s }[0], [x22]\n"
481 "st1 { v30.s }[0], [x21]\n"
482 "st1 { v31.s }[0], [x28]\n"
483 "10:" // Output block 0: Done
484 "add x26, x27, %x[dst_stride_row], LSL #2\n"
485 "add x25, x27, %x[dst_stride_row], LSL #1\n"
486 "add x24, x26, %x[dst_stride_row], LSL #1\n"
487 "add x23, x27, %x[dst_stride_row]\n"
488 "add x22, x25, %x[dst_stride_row]\n"
489 "add x21, x26, %x[dst_stride_row]\n"
490 "add x20, x24, %x[dst_stride_row]\n"
491 "tbz x10, #1, 11f\n"
492 "st1 { v16.d }[0], [x20], #0x8\n"
493 "st1 { v17.d }[0], [x24], #0x8\n"
494 "st1 { v18.d }[0], [x21], #0x8\n"
495 "st1 { v19.d }[0], [x26], #0x8\n"
496 "st1 { v20.d }[0], [x22], #0x8\n"
497 "st1 { v21.d }[0], [x25], #0x8\n"
498 "st1 { v22.d }[0], [x23], #0x8\n"
499 "st1 { v23.d }[0], [x27], #0x8\n"
500 "tbz x10, #0, 12f\n"
501 "st1 { v16.s }[2], [x20]\n"
502 "st1 { v17.s }[2], [x24]\n"
503 "st1 { v18.s }[2], [x21]\n"
504 "st1 { v19.s }[2], [x26]\n"
505 "st1 { v20.s }[2], [x22]\n"
506 "st1 { v21.s }[2], [x25]\n"
507 "st1 { v22.s }[2], [x23]\n"
508 "st1 { v23.s }[2], [x27]\n"
509 "b 12f\n"
510 "11:" // Output block 1: partial_1_0
511 "st1 { v16.s }[0], [x20]\n"
512 "st1 { v17.s }[0], [x24]\n"
513 "st1 { v18.s }[0], [x21]\n"
514 "st1 { v19.s }[0], [x26]\n"
515 "st1 { v20.s }[0], [x22]\n"
516 "st1 { v21.s }[0], [x25]\n"
517 "st1 { v22.s }[0], [x23]\n"
518 "st1 { v23.s }[0], [x27]\n"
519 "12:" // Output block 1: Done
520 "13:" // Output stage exit
521 "subs x10, x10, #0x4\n"
522 "add %x[dst], %x[dst], #0x10\n"
523 "bgt 2b\n"
524 "mov x20, #0x4\n"
525 "sub x13, x13, #0x10\n"
526 "cmp x13, #0x10\n"
527 "mov %x[dst], x9\n"
528 "madd %x[lhs_packed], x20, x12, %x[lhs_packed]\n"
529 "bge 1b\n"
530 "14:" // Row loop skip
531 "cbz x13, 23f\n"
532 "15:" // Row tail: Row loop
533 "mov x26, %x[rhs_packed]\n"
534 "mov x25, %x[n]\n"
535 "add x24, %x[dst], %x[dst_stride_row], LSL #2\n"
536 "16:" // Row tail: Column loop
537 "movi v31.16b, #0x0\n"
538 "movi v30.16b, #0x0\n"
539 "mov x27, %x[lhs_packed]\n"
540 "mov x20, %x[num_blocks]\n"
541 "movi v29.16b, #0x0\n"
542 "movi v28.16b, #0x0\n"
543 "17:" // Row tail: Block loop
544 "ldr d16, [x26, #0x0]\n"
545 "ldr d10, [x27, #0x0]\n"
546 "add x26, x26, #0x8\n"
547 "add x27, x27, #0x8\n"
548 "ldr q9, [x26, #0x0]\n"
549 "ldr q8, [x26, #0x10]\n"
550 "movi v7.4s, #0x0\n"
551 "movi v6.4s, #0x0\n"
552 "ldr q5, [x27, #0x0]\n"
553 "ldr q4, [x27, #0x10]\n"
554 "movi v3.4s, #0x0\n"
555 "movi v2.4s, #0x0\n"
556 "ldr q1, [x26, #0x20]\n"
557 "ldr q0, [x26, #0x30]\n"
558 "movi v27.16b, #0xf0\n"
559 "fcvtl v26.4s, v16.4h\n"
560 "ldr q23, [x27, #0x20]\n"
561 "ldr q22, [x27, #0x30]\n"
562 "shl v21.16b, v9.16b, #0x4\n"
563 "shl v20.16b, v8.16b, #0x4\n"
564 "ldr q25, [x27, #0x40]\n"
565 "ldr q24, [x27, #0x50]\n"
566 "and v9.16b, v9.16b, v27.16b\n"
567 "and v8.16b, v8.16b, v27.16b\n"
568 "ldr q19, [x27, #0x60]\n"
569 "ldr q18, [x27, #0x70]\n"
570 "shl v17.16b, v1.16b, #0x4\n"
571 "shl v16.16b, v0.16b, #0x4\n"
572 ".inst 0x4e95a4a7 // smmla v7.4s, v5.16b, v21.16b\n"
573 ".inst 0x4e94a4a6 // smmla v6.4s, v5.16b, v20.16b\n"
574 "and v1.16b, v1.16b, v27.16b\n"
575 "add x26, x26, #0x40\n"
576 ".inst 0x4e95a483 // smmla v3.4s, v4.16b, v21.16b\n"
577 ".inst 0x4e94a482 // smmla v2.4s, v4.16b, v20.16b\n"
578 "and v0.16b, v0.16b, v27.16b\n"
579 "add x27, x27, #0x80\n"
580 "fcvtl v10.4s, v10.4h\n"
581 ".inst 0x4e91a6e7 // smmla v7.4s, v23.16b, v17.16b\n"
582 ".inst 0x4e90a6e6 // smmla v6.4s, v23.16b, v16.16b\n"
583 ".inst 0x4e91a6c3 // smmla v3.4s, v22.16b, v17.16b\n"
584 ".inst 0x4e90a6c2 // smmla v2.4s, v22.16b, v16.16b\n"
585 "fmul v23.4s, v26.4s, v10.s[0]\n"
586 "fmul v22.4s, v26.4s, v10.s[1]\n"
587 "fmul v21.4s, v26.4s, v10.s[2]\n"
588 "fmul v20.4s, v26.4s, v10.s[3]\n"
589 ".inst 0x4e89a727 // smmla v7.4s, v25.16b, v9.16b\n"
590 ".inst 0x4e88a726 // smmla v6.4s, v25.16b, v8.16b\n"
591 ".inst 0x4e89a703 // smmla v3.4s, v24.16b, v9.16b\n"
592 ".inst 0x4e88a702 // smmla v2.4s, v24.16b, v8.16b\n"
593 ".inst 0x4e81a667 // smmla v7.4s, v19.16b, v1.16b\n"
594 ".inst 0x4e80a666 // smmla v6.4s, v19.16b, v0.16b\n"
595 ".inst 0x4e81a643 // smmla v3.4s, v18.16b, v1.16b\n"
596 ".inst 0x4e80a642 // smmla v2.4s, v18.16b, v0.16b\n"
597 "uzp1 v19.2d, v7.2d, v6.2d\n"
598 "uzp2 v18.2d, v7.2d, v6.2d\n"
599 "scvtf v19.4s, v19.4s, #0x4\n"
600 "uzp1 v17.2d, v3.2d, v2.2d\n"
601 "uzp2 v16.2d, v3.2d, v2.2d\n"
602 "scvtf v18.4s, v18.4s, #0x4\n"
603 "fmla v31.4s, v19.4s, v23.4s\n"
604 "scvtf v17.4s, v17.4s, #0x4\n"
605 "scvtf v16.4s, v16.4s, #0x4\n"
606 "fmla v30.4s, v18.4s, v22.4s\n"
607 "fmla v29.4s, v17.4s, v21.4s\n"
608 "fmla v28.4s, v16.4s, v20.4s\n"
609 "subs x20, x20, #0x1\n"
610 "bgt 17b\n"
611 "ld1r { v17.4s }, [%x[clamp_vals]]\n"
612 "add x20, %x[clamp_vals], #0x4\n"
613 "cmp x25, #0x4\n"
614 "ld1r { v16.4s }, [x20]\n"
615 "fmax v31.4s, v31.4s, v17.4s\n"
616 "fmax v30.4s, v30.4s, v17.4s\n"
617 "fmax v29.4s, v29.4s, v17.4s\n"
618 "fmax v28.4s, v28.4s, v17.4s\n"
619 "fmin v31.4s, v31.4s, v16.4s\n"
620 "fmin v30.4s, v30.4s, v16.4s\n"
621 "fmin v29.4s, v29.4s, v16.4s\n"
622 "fmin v28.4s, v28.4s, v16.4s\n"
623 "blt 19f\n"
624 "mov x20, %x[dst]\n"
625 "cmp x13, #0x1\n"
626 "str q31, [x20, #0x0]\n"
627 "add x20, x20, %x[dst_stride_row]\n"
628 "ble 22f\n"
629 "cmp x13, #0x2\n"
630 "str q30, [x20, #0x0]\n"
631 "add x20, x20, %x[dst_stride_row]\n"
632 "ble 22f\n"
633 "cmp x13, #0x3\n"
634 "str q29, [x20, #0x0]\n"
635 "add x20, x20, %x[dst_stride_row]\n"
636 "ble 22f\n"
637 "str q28, [x20, #0x0]\n"
638 "b 22f\n"
639 "19:" // Row tail: Partial output
640 "mov x23, %x[dst]\n"
641 "cmp x13, #0x1\n"
642 "add x22, x23, %x[dst_stride_row]\n"
643 "csel x22, x22, x23, GT\n"
644 "cmp x13, #0x2\n"
645 "add x21, x23, %x[dst_stride_row], LSL #1\n"
646 "csel x21, x21, x22, GT\n"
647 "cmp x13, #0x3\n"
648 "add x20, x21, %x[dst_stride_row]\n"
649 "csel x20, x20, x21, GT\n"
650 "tbz x25, #1, 20f\n"
651 "st1 { v28.d }[0], [x20], #0x8\n"
652 "st1 { v29.d }[0], [x21], #0x8\n"
653 "st1 { v30.d }[0], [x22], #0x8\n"
654 "st1 { v31.d }[0], [x23], #0x8\n"
655 "tbz x25, #0, 21f\n"
656 "st1 { v28.s }[2], [x20]\n"
657 "st1 { v29.s }[2], [x21]\n"
658 "st1 { v30.s }[2], [x22]\n"
659 "st1 { v31.s }[2], [x23]\n"
660 "b 21f\n"
661 "20:" // Row tail: Output block 0: partial_1_0
662 "st1 { v28.s }[0], [x20]\n"
663 "st1 { v29.s }[0], [x21]\n"
664 "st1 { v30.s }[0], [x22]\n"
665 "st1 { v31.s }[0], [x23]\n"
666 "21:" // Row tail: Output block 0: Done
667 "22:" // Row tail: Output stage exit
668 "subs x25, x25, #0x4\n"
669 "add %x[dst], %x[dst], #0x10\n"
670 "bgt 16b\n"
671 "subs x13, x13, #0x4\n"
672 "add %x[lhs_packed], %x[lhs_packed], x12\n"
673 "mov %x[dst], x24\n"
674 "bgt 15b\n"
675 "23:" // Row tail: Row loop skip
676 : [dst] "+&r"(dst), [lhs_packed] "+&r"(lhs_packed)
677 47 : [clamp_vals] "r"(clamp_vals), [dst_stride_row] "r"(dst_stride_row), [m] "r"(m), [n] "r"(n),
678 47 [num_blocks] "r"(num_blocks), [rhs_packed] "r"(rhs_packed)
679 : "cc", "memory", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v2", "v20",
680 "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v3", "v30", "v31", "v4", "v5", "v6", "v7",
681 "v8", "v9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x9");
682 47 }
683
684 #endif // Architectural feature check
685