Line |
Branch |
Exec |
Source |
1 |
|
|
// |
2 |
|
|
// SPDX-FileCopyrightText: Copyright 2025 Arm Limited and/or its affiliates <open-source-office@arm.com> |
3 |
|
|
// |
4 |
|
|
// SPDX-License-Identifier: Apache-2.0 |
5 |
|
|
// |
6 |
|
|
|
7 |
|
|
#if (!defined(__aarch64__) || !defined(__ARM_FEATURE_SVE2)) && !defined(_M_ARM64) |
8 |
|
|
#error "This file must be compiled for AArch64, FEAT_SVE2" |
9 |
|
|
#else // Architectural features check. |
10 |
|
|
|
11 |
|
|
#include "kai_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa.h" |
12 |
|
|
|
13 |
|
|
#include <stddef.h> |
14 |
|
|
|
15 |
|
|
#include "kai/kai_common.h" |
16 |
|
|
|
17 |
|
|
typedef struct { |
18 |
|
|
float* dst; // 0 |
19 |
|
|
const void* lhs_packed; // 0x8 |
20 |
|
|
const void* rhs_packed; // 0x10 |
21 |
|
|
size_t dst_stride_row; // 0x18 |
22 |
|
|
size_t m; // 0x20 |
23 |
|
|
size_t n; // 0x28 |
24 |
|
|
size_t lhs_stride; // 0x30 |
25 |
|
|
size_t rhs_stride; // 0x38 |
26 |
|
|
size_t rhs_row_bytes; // 0x40 |
27 |
|
|
size_t m_blk; // 0x48 |
28 |
|
|
size_t dst_inc; // 0x50 |
29 |
|
|
float clamp_min; // 0x58 |
30 |
|
|
float clamp_max; // 0x5c |
31 |
|
|
} KernelArgs; |
32 |
|
|
|
33 |
|
|
void kai_kernel_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa(KernelArgs* args_ptr); |
34 |
|
|
|
35 |
|
|
// Compute args |
36 |
|
|
static const size_t kai_m_step = 1; // multiple of vector length |
37 |
|
|
static const size_t kai_n_step = 4; // multiple of vector length |
38 |
|
|
// Packing args |
39 |
|
|
static const size_t kai_mr = 1; // multiple of vector length |
40 |
|
|
static const size_t kai_nr = 4; // multiple of vector length |
41 |
|
|
static const size_t kai_kr = 4; |
42 |
|
|
static const size_t kai_sr = 1; |
43 |
|
|
// LHS format args (num. bytes per value, multiplier, zero_point (if asymmetric)) |
44 |
|
|
static const size_t kai_num_bytes_qvalue_lhs = 1; |
45 |
|
|
static const size_t kai_num_bytes_multiplier_lhs = 4; |
46 |
|
|
static const size_t kai_num_bytes_zp_lhs = 4; |
47 |
|
|
// RHS format args (num. bytes per value, multiplier, zero_point (if asymmetric), and reduction sum (if LHS is |
48 |
|
|
// asymmetric)) |
49 |
|
|
static const size_t kai_num_bytes_qvalue_rhs = 1; |
50 |
|
|
static const size_t kai_num_bytes_multiplier_rhs = 4; |
51 |
|
|
static const size_t kai_num_bytes_rsum_rhs = 4; |
52 |
|
|
// DST format args |
53 |
|
|
static const size_t kai_num_bytes_dst_value = 4; |
54 |
|
|
// Extra args |
55 |
|
|
static const size_t kai_num_bytes_bias = 4; |
56 |
|
|
static const size_t kai_k_multiple_of = 32; |
57 |
|
|
|
58 |
|
927 |
inline static size_t kai_k_roundedup(size_t k) { |
59 |
|
|
// Round up k to be a multiple of 32. |
60 |
|
927 |
return kai_roundup(k, kai_k_multiple_of); |
61 |
|
|
} |
62 |
|
|
|
63 |
|
386 |
inline static size_t kai_get_lhs_packed_stride(size_t k) { |
64 |
|
386 |
const size_t k_internal = kai_k_roundedup(k); |
65 |
|
− |
KAI_ASSERT((k_internal % kai_k_multiple_of) == 0); |
66 |
|
386 |
const size_t mr = kai_get_mr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa(); |
67 |
|
386 |
size_t lhs_packed_stride = mr * ((k_internal * kai_num_bytes_qvalue_lhs) + kai_num_bytes_multiplier_lhs); |
68 |
|
|
// Since the LHS matrix is asymmetric with per-row quantization, we must include the |
69 |
|
|
// the number of bytes to hold the zero point value |
70 |
|
386 |
lhs_packed_stride += mr * kai_num_bytes_zp_lhs; |
71 |
|
|
|
72 |
|
772 |
return lhs_packed_stride; |
73 |
|
386 |
} |
74 |
|
|
|
75 |
|
386 |
inline static size_t kai_get_rhs_packed_stride(size_t k) { |
76 |
|
386 |
const size_t k_internal = kai_k_roundedup(k); |
77 |
|
− |
KAI_ASSERT((k_internal % kai_k_multiple_of) == 0); |
78 |
|
386 |
const size_t nr = kai_get_nr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa(); |
79 |
|
386 |
size_t rhs_packed_stride = nr * (k_internal * kai_num_bytes_qvalue_rhs); |
80 |
|
386 |
rhs_packed_stride += nr * kai_num_bytes_multiplier_rhs; |
81 |
|
|
// Since the LHS matrix is quantized asymmetric with per-row quantization, we also include |
82 |
|
|
// the number of bytes for the reduction sum |
83 |
|
386 |
rhs_packed_stride += nr * kai_num_bytes_rsum_rhs; |
84 |
|
|
// Since the bias is packed with the RHS matrix, the stride is adjusted with the number of bytes of the bias |
85 |
|
386 |
rhs_packed_stride += nr * kai_num_bytes_bias; |
86 |
|
|
|
87 |
|
772 |
return rhs_packed_stride; |
88 |
|
386 |
} |
89 |
|
|
|
90 |
|
616 |
size_t kai_get_m_step_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa(void) { |
91 |
|
616 |
return kai_m_step * kai_get_sme_vector_length_u8() / kai_kr; |
92 |
|
|
} |
93 |
|
|
|
94 |
|
616 |
size_t kai_get_n_step_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa(void) { |
95 |
|
616 |
return kai_n_step * kai_get_sme_vector_length_u8() / kai_kr; |
96 |
|
|
} |
97 |
|
|
|
98 |
|
1003 |
size_t kai_get_mr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa(void) { |
99 |
|
1003 |
return kai_mr * kai_get_sme_vector_length_u8() / kai_kr; |
100 |
|
|
} |
101 |
|
|
|
102 |
|
1003 |
size_t kai_get_nr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa(void) { |
103 |
|
1003 |
return kai_nr * kai_get_sme_vector_length_u8() / kai_kr; |
104 |
|
|
} |
105 |
|
|
|
106 |
|
308 |
size_t kai_get_kr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa(void) { |
107 |
|
308 |
return kai_kr; |
108 |
|
|
} |
109 |
|
|
|
110 |
|
308 |
size_t kai_get_sr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa(void) { |
111 |
|
308 |
return kai_sr; |
112 |
|
|
} |
113 |
|
|
|
114 |
|
231 |
size_t kai_get_lhs_packed_offset_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa(size_t m_idx, size_t k) { |
115 |
|
− |
KAI_ASSERT((m_idx % kai_get_m_step_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa()) == 0); |
116 |
|
|
|
117 |
|
231 |
const size_t mr = kai_get_mr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa(); |
118 |
|
|
|
119 |
|
462 |
return (m_idx / mr) * kai_get_lhs_packed_stride(k); |
120 |
|
231 |
} |
121 |
|
|
|
122 |
|
231 |
size_t kai_get_rhs_packed_offset_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa(size_t n_idx, size_t k) { |
123 |
|
− |
KAI_ASSERT((n_idx % kai_get_n_step_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa()) == 0); |
124 |
|
|
|
125 |
|
231 |
const size_t nr = kai_get_nr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa(); |
126 |
|
|
|
127 |
|
462 |
return (n_idx / nr) * kai_get_rhs_packed_stride(k); |
128 |
|
231 |
} |
129 |
|
|
|
130 |
|
154 |
size_t kai_get_dst_offset_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa( |
131 |
|
|
size_t m_idx, size_t n_idx, size_t dst_stride) { |
132 |
|
− |
KAI_ASSERT((m_idx % kai_get_m_step_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa()) == 0); |
133 |
|
− |
KAI_ASSERT((n_idx % kai_get_n_step_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa()) == 0); |
134 |
|
|
|
135 |
|
154 |
return ((n_idx * kai_num_bytes_dst_value) + m_idx * dst_stride); |
136 |
|
|
} |
137 |
|
|
|
138 |
|
154 |
size_t kai_get_dst_size_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa(size_t m, size_t n) { |
139 |
|
154 |
return (m * n * kai_num_bytes_dst_value); |
140 |
|
|
} |
141 |
|
|
|
142 |
|
155 |
void kai_run_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa( |
143 |
|
|
size_t m, // |
144 |
|
|
size_t n, // |
145 |
|
|
size_t k, // |
146 |
|
|
const void* restrict lhs_packed, // |
147 |
|
|
const void* restrict rhs_packed, // |
148 |
|
|
float* restrict dst, // NOLINT(readability-non-const-parameter) |
149 |
|
|
size_t dst_stride_row, // |
150 |
|
|
size_t dst_stride_col, // |
151 |
|
|
float scalar_min, // |
152 |
|
|
float scalar_max) { |
153 |
|
155 |
KAI_UNUSED(dst_stride_col); |
154 |
|
− |
KAI_ASSERT(n > 0); |
155 |
|
− |
KAI_ASSERT(m > 0); |
156 |
|
|
|
157 |
|
155 |
const size_t mr = kai_get_mr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa(); |
158 |
|
155 |
const size_t nr = kai_get_nr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa(); |
159 |
|
|
|
160 |
|
155 |
KernelArgs args; |
161 |
|
155 |
const size_t k_internal = kai_k_roundedup(k); |
162 |
|
155 |
args.dst = dst; |
163 |
|
155 |
args.lhs_packed = lhs_packed; |
164 |
|
155 |
args.rhs_packed = rhs_packed; |
165 |
|
155 |
args.dst_stride_row = dst_stride_row; |
166 |
|
155 |
args.m = m; |
167 |
|
155 |
args.n = n; |
168 |
|
155 |
args.lhs_stride = kai_get_lhs_packed_stride(k); |
169 |
|
155 |
args.rhs_stride = kai_get_rhs_packed_stride(k); |
170 |
|
155 |
args.rhs_row_bytes = nr * k_internal; |
171 |
|
155 |
args.m_blk = mr * k_internal; |
172 |
|
155 |
args.dst_inc = mr * dst_stride_row; |
173 |
|
155 |
args.clamp_min = scalar_min; |
174 |
|
155 |
args.clamp_max = scalar_max; |
175 |
|
|
|
176 |
|
155 |
kai_kernel_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme_mopa(&args); |
177 |
|
155 |
} |
178 |
|
|
|
179 |
|
|
#endif // Architectural feature check |
180 |
|
|
|