ggml-cpu: add floating-point conversion kernels

This commit is contained in:
taimur-10x 2025-11-11 21:02:03 +05:00 committed by Ludovic Henry
parent 28fcd3ec8e
commit 96128a9d67
1 changed files with 49 additions and 6 deletions

View File

@ -3296,12 +3296,30 @@ void ggml_cpu_fp16_to_fp32(const ggml_fp16_t * x, float * y, int64_t n) {
__m128 y_vec = _mm_cvtph_ps(x_vec); __m128 y_vec = _mm_cvtph_ps(x_vec);
_mm_storeu_ps(y + i, y_vec); _mm_storeu_ps(y + i, y_vec);
} }
#elif defined(__riscv_zvfh) #elif defined(__riscv_v_intrinsic) && defined(__riscv_zvfhmin)
for (int vl; i < n; i += vl) { // calculate step size
vl = __riscv_vsetvl_e16m1(n - i); const int epr = __riscv_vsetvlmax_e16m4();
vfloat16m1_t vx = __riscv_vle16_v_f16m1((_Float16 *)&x[i], vl); const int step = epr * 2;
vfloat32m2_t vy = __riscv_vfwcvt_f_f_v_f32m2(vx, vl); const int np = (n & ~(step - 1));
__riscv_vse32_v_f32m2(&y[i], vy, vl);
// unroll by 2
for (; i < np; i += step) {
vfloat16m4_t ax0 = __riscv_vle16_v_f16m4((const _Float16*)x + i, epr);
vfloat32m8_t ay0 = __riscv_vfwcvt_f_f_v_f32m8(ax0, epr);
__riscv_vse32_v_f32m8(y + i, ay0, epr);
vfloat16m4_t ax1 = __riscv_vle16_v_f16m4((const _Float16*)x + i + epr, epr);
vfloat32m8_t ay1 = __riscv_vfwcvt_f_f_v_f32m8(ax1, epr);
__riscv_vse32_v_f32m8(y + i + epr, ay1, epr);
}
// leftovers
int vl;
for (i = np; i < n; i += vl) {
vl = __riscv_vsetvl_e16m4(n - i);
vfloat16m4_t ax0 = __riscv_vle16_v_f16m4((const _Float16*)x + i, vl);
vfloat32m8_t ay0 = __riscv_vfwcvt_f_f_v_f32m8(ax0, vl);
__riscv_vse32_v_f32m8(y + i, ay0, vl);
} }
#endif #endif
@ -3347,6 +3365,31 @@ void ggml_cpu_bf16_to_fp32(const ggml_bf16_t * x, float * y, int64_t n) {
(const __m128i *)(x + i))), (const __m128i *)(x + i))),
16))); 16)));
} }
#elif defined(__riscv_v_intrinsic) && defined(__riscv_zvfbfmin)
// calculate step size
const int epr = __riscv_vsetvlmax_e16m4();
const int step = epr * 2;
const int np = (n & ~(step - 1));
// unroll by 2
for (; i < np; i += step) {
vbfloat16m4_t ax0 = __riscv_vle16_v_bf16m4((const __bf16*)x + i, epr);
vfloat32m8_t ay0 = __riscv_vfwcvtbf16_f_f_v_f32m8(ax0, epr);
__riscv_vse32_v_f32m8(y + i, ay0, epr);
vbfloat16m4_t ax1 = __riscv_vle16_v_bf16m4((const __bf16*)x + i + epr, epr);
vfloat32m8_t ay1 = __riscv_vfwcvtbf16_f_f_v_f32m8(ax1, epr);
__riscv_vse32_v_f32m8(y + i + epr, ay1, epr);
}
// leftovers
int vl;
for (i = np; i < n; i += vl) {
vl = __riscv_vsetvl_e16m4(n - i);
vbfloat16m4_t ax0 = __riscv_vle16_v_bf16m4((const __bf16*)x + i, vl);
vfloat32m8_t ay0 = __riscv_vfwcvtbf16_f_f_v_f32m8(ax0, vl);
__riscv_vse32_v_f32m8(y + i, ay0, vl);
}
#endif #endif
for (; i < n; i++) { for (; i < n; i++) {
y[i] = GGML_BF16_TO_FP32(x[i]); y[i] = GGML_BF16_TO_FP32(x[i]);