Merge branch 'master' into dev-refactoring
This commit is contained in:
commit
12c75f17c4
|
|
@ -1,5 +1,6 @@
|
|||
#include "log.h"
|
||||
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <cstdarg>
|
||||
#include <cstdio>
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
#include "log.h"
|
||||
#include "llama.h"
|
||||
|
||||
#include <chrono>
|
||||
#include <cmath>
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
#include "log.h"
|
||||
#include "llama.h"
|
||||
|
||||
#include <chrono>
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -254,12 +254,12 @@ export default function ChatMessage({
|
|||
🔄 Regenerate
|
||||
</button>
|
||||
)}
|
||||
<CopyButton
|
||||
className="badge btn-mini show-on-hover mr-2"
|
||||
content={msg.content}
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
<CopyButton
|
||||
className="badge btn-mini show-on-hover mr-2"
|
||||
content={msg.content}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
|
|
|||
|
|
@ -742,7 +742,7 @@ void quantize_row_q8_0(const float * restrict x, void * restrict vy, int64_t k)
|
|||
y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
|
||||
}
|
||||
}
|
||||
#elif defined(__wasm_simd128__)
|
||||
#elif defined __wasm_simd128__
|
||||
for (int i = 0; i < nb; i++) {
|
||||
v128_t srcv [8];
|
||||
v128_t asrcv[8];
|
||||
|
|
@ -1030,7 +1030,7 @@ void quantize_row_q8_1(const float * restrict x, void * restrict vy, int64_t k)
|
|||
|
||||
y[i].s = GGML_FP32_TO_FP16(d * vaddvq_s32(accv));
|
||||
}
|
||||
#elif defined(__wasm_simd128__)
|
||||
#elif defined __wasm_simd128__
|
||||
for (int i = 0; i < nb; i++) {
|
||||
v128_t srcv [8];
|
||||
v128_t asrcv[8];
|
||||
|
|
@ -1644,7 +1644,87 @@ static const int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -1
|
|||
//===================================== Q8_K ==============================================
|
||||
|
||||
void quantize_row_q8_K(const float * restrict x, void * restrict y, int64_t k) {
|
||||
#ifdef __wasm_simd128__
|
||||
assert(k % QK_K == 0);
|
||||
const int64_t nb = k / QK_K;
|
||||
block_q8_K * restrict yc = y; // Cast to proper type
|
||||
|
||||
for (int i = 0; i < nb; i++) {
|
||||
const float * x_block = x + i * QK_K;
|
||||
|
||||
v128_t min_vec = wasm_v128_load(x_block);
|
||||
v128_t max_vec = min_vec;
|
||||
|
||||
for (int j = 4; j < QK_K; j += 4) {
|
||||
v128_t x_vec = wasm_v128_load(x_block + j);
|
||||
max_vec = wasm_f32x4_pmax(max_vec, x_vec);
|
||||
min_vec = wasm_f32x4_pmin(min_vec, x_vec);
|
||||
}
|
||||
max_vec = wasm_f32x4_pmax(max_vec, wasm_i32x4_shuffle(max_vec, max_vec, 2, 3, 0, 1));
|
||||
max_vec = wasm_f32x4_pmax(max_vec, wasm_i32x4_shuffle(max_vec, max_vec, 1, 0, 3, 2));
|
||||
min_vec = wasm_f32x4_pmin(min_vec, wasm_i32x4_shuffle(min_vec, min_vec, 2, 3, 0, 1));
|
||||
min_vec = wasm_f32x4_pmin(min_vec, wasm_i32x4_shuffle(min_vec, min_vec, 1, 0, 3, 2));
|
||||
float max = wasm_f32x4_extract_lane(max_vec, 0);
|
||||
float min = wasm_f32x4_extract_lane(min_vec, 0);
|
||||
float amax = -min > max ? min : max;
|
||||
|
||||
if (amax == 0.0f) {
|
||||
yc[i].d = 0.0f;
|
||||
const v128_t zero = wasm_i8x16_splat(0);
|
||||
for (int j = 0; j < QK_K; j += 16) {
|
||||
wasm_v128_store(yc[i].qs + j, zero);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
const float iscale = -127.0f / amax;
|
||||
const v128_t scale_vec = wasm_f32x4_splat(iscale);
|
||||
|
||||
// Process 16 elements per iteration
|
||||
for (int j = 0, jb = 0; j < QK_K; j += 16, jb++) {
|
||||
// Load and quantize 16 floats
|
||||
v128_t x0 = wasm_v128_load(x_block + j);
|
||||
v128_t x1 = wasm_v128_load(x_block + j + 4);
|
||||
v128_t x2 = wasm_v128_load(x_block + j + 8);
|
||||
v128_t x3 = wasm_v128_load(x_block + j + 12);
|
||||
|
||||
v128_t q0 = wasm_f32x4_nearest(wasm_f32x4_mul(x0, scale_vec));
|
||||
v128_t q1 = wasm_f32x4_nearest(wasm_f32x4_mul(x1, scale_vec));
|
||||
v128_t q2 = wasm_f32x4_nearest(wasm_f32x4_mul(x2, scale_vec));
|
||||
v128_t q3 = wasm_f32x4_nearest(wasm_f32x4_mul(x3, scale_vec));
|
||||
|
||||
// Convert to i32 with saturation
|
||||
v128_t i0 = wasm_i32x4_trunc_sat_f32x4(q0);
|
||||
v128_t i1 = wasm_i32x4_trunc_sat_f32x4(q1);
|
||||
v128_t i2 = wasm_i32x4_trunc_sat_f32x4(q2);
|
||||
v128_t i3 = wasm_i32x4_trunc_sat_f32x4(q3);
|
||||
|
||||
// Pack into 16 i8 values
|
||||
v128_t i8 = wasm_i8x16_narrow_i16x8(
|
||||
wasm_i16x8_narrow_i32x4(i0, i1),
|
||||
wasm_i16x8_narrow_i32x4(i2, i3)
|
||||
);
|
||||
wasm_v128_store(yc[i].qs + j, i8);
|
||||
|
||||
// Calculate bsums using SIMD
|
||||
v128_t sum16 = wasm_i16x8_add(
|
||||
wasm_i16x8_extend_low_i8x16(i8),
|
||||
wasm_i16x8_extend_high_i8x16(i8)
|
||||
);
|
||||
v128_t sum32 = wasm_i32x4_add(
|
||||
wasm_i32x4_extend_low_i16x8(sum16),
|
||||
wasm_i32x4_extend_high_i16x8(sum16)
|
||||
);
|
||||
sum32 = wasm_i32x4_add(sum32, wasm_i32x4_shuffle(sum32, sum32, 2, 3, 0, 1));
|
||||
sum32 = wasm_i32x4_add(sum32, wasm_i32x4_shuffle(sum32, sum32, 1, 0, 3, 2));
|
||||
yc[i].bsums[jb] = wasm_i32x4_extract_lane(sum32, 0);
|
||||
}
|
||||
|
||||
yc[i].d = 1.0f / iscale;
|
||||
}
|
||||
#else
|
||||
quantize_row_q8_K_ref(x, y, k);
|
||||
#endif
|
||||
}
|
||||
|
||||
//===================================== Dot products =================================
|
||||
|
|
@ -2002,6 +2082,94 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * r
|
|||
}
|
||||
|
||||
sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
|
||||
#elif defined __wasm_simd128__
|
||||
v128_t sumv = wasm_f32x4_splat(0.0f);
|
||||
|
||||
const v128_t m4b = wasm_i8x16_splat(0x0F);
|
||||
const v128_t s8b = wasm_i8x16_splat(0x8);
|
||||
|
||||
for (; ib + 1 < nb; ib += 2) {
|
||||
const block_q4_0 * restrict x0 = &x[ib];
|
||||
const block_q4_0 * restrict x1 = &x[ib + 1];
|
||||
const block_q8_0 * restrict y0 = &y[ib];
|
||||
const block_q8_0 * restrict y1 = &y[ib + 1];
|
||||
|
||||
// Load and process x0
|
||||
v128_t v0_0 = wasm_v128_load(x0->qs);
|
||||
v128_t v0_0l = wasm_v128_and(v0_0, m4b);
|
||||
v128_t v0_0h = wasm_u8x16_shr(v0_0, 4);
|
||||
v128_t v0_0ls = wasm_i8x16_sub(v0_0l, s8b);
|
||||
v128_t v0_0hs = wasm_i8x16_sub(v0_0h, s8b);
|
||||
|
||||
// Load y0 vectors
|
||||
v128_t y0_l = wasm_v128_load(y0->qs);
|
||||
v128_t y0_h = wasm_v128_load(y0->qs + 16);
|
||||
|
||||
// Extend to i16x8 and compute dot products
|
||||
v128_t dx0l = wasm_i16x8_extend_low_i8x16(v0_0ls);
|
||||
v128_t dx0h = wasm_i16x8_extend_high_i8x16(v0_0ls);
|
||||
v128_t dx0hl = wasm_i16x8_extend_low_i8x16(v0_0hs);
|
||||
v128_t dx0hh = wasm_i16x8_extend_high_i8x16(v0_0hs);
|
||||
|
||||
v128_t dy0ll = wasm_i16x8_extend_low_i8x16(y0_l);
|
||||
v128_t dy0lh = wasm_i16x8_extend_high_i8x16(y0_l);
|
||||
v128_t dy0hl = wasm_i16x8_extend_low_i8x16(y0_h);
|
||||
v128_t dy0hh = wasm_i16x8_extend_high_i8x16(y0_h);
|
||||
|
||||
v128_t dp0 = wasm_i32x4_add(
|
||||
wasm_i32x4_add(
|
||||
wasm_i32x4_dot_i16x8(dx0l, dy0ll),
|
||||
wasm_i32x4_dot_i16x8(dx0h, dy0lh)
|
||||
),
|
||||
wasm_i32x4_add(
|
||||
wasm_i32x4_dot_i16x8(dx0hl, dy0hl),
|
||||
wasm_i32x4_dot_i16x8(dx0hh, dy0hh)
|
||||
)
|
||||
);
|
||||
|
||||
// Load and process x1
|
||||
v128_t v0_1 = wasm_v128_load(x1->qs);
|
||||
v128_t v0_1l = wasm_v128_and(v0_1, m4b);
|
||||
v128_t v0_1h = wasm_u8x16_shr(v0_1, 4);
|
||||
v128_t v0_1ls = wasm_i8x16_sub(v0_1l, s8b);
|
||||
v128_t v0_1hs = wasm_i8x16_sub(v0_1h, s8b);
|
||||
|
||||
// Load y1 vectors
|
||||
v128_t y1_l = wasm_v128_load(y1->qs);
|
||||
v128_t y1_h = wasm_v128_load(y1->qs + 16);
|
||||
|
||||
// Extend to i16x8 and compute dot products
|
||||
v128_t dx1l = wasm_i16x8_extend_low_i8x16(v0_1ls);
|
||||
v128_t dx1h = wasm_i16x8_extend_high_i8x16(v0_1ls);
|
||||
v128_t dx1hl = wasm_i16x8_extend_low_i8x16(v0_1hs);
|
||||
v128_t dx1hh = wasm_i16x8_extend_high_i8x16(v0_1hs);
|
||||
|
||||
v128_t dy1ll = wasm_i16x8_extend_low_i8x16(y1_l);
|
||||
v128_t dy1lh = wasm_i16x8_extend_high_i8x16(y1_l);
|
||||
v128_t dy1hl = wasm_i16x8_extend_low_i8x16(y1_h);
|
||||
v128_t dy1hh = wasm_i16x8_extend_high_i8x16(y1_h);
|
||||
|
||||
v128_t dp1 = wasm_i32x4_add(
|
||||
wasm_i32x4_add(
|
||||
wasm_i32x4_dot_i16x8(dx1l, dy1ll),
|
||||
wasm_i32x4_dot_i16x8(dx1h, dy1lh)
|
||||
),
|
||||
wasm_i32x4_add(
|
||||
wasm_i32x4_dot_i16x8(dx1hl, dy1hl),
|
||||
wasm_i32x4_dot_i16x8(dx1hh, dy1hh)
|
||||
)
|
||||
);
|
||||
|
||||
// Accumulate results with scaling
|
||||
float scale0 = GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d);
|
||||
float scale1 = GGML_FP16_TO_FP32(x1->d) * GGML_FP16_TO_FP32(y1->d);
|
||||
|
||||
sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(dp0), wasm_f32x4_splat(scale0)));
|
||||
sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(dp1), wasm_f32x4_splat(scale1)));
|
||||
}
|
||||
|
||||
sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
|
||||
wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
|
||||
#elif defined(__AVX2__)
|
||||
// Initialize accumulator with zeros
|
||||
__m256 acc = _mm256_setzero_ps();
|
||||
|
|
@ -2688,10 +2856,10 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, size_t bs, const void * r
|
|||
}
|
||||
|
||||
sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
|
||||
#elif defined(__wasm_simd128__)
|
||||
#elif defined __wasm_simd128__
|
||||
v128_t sumv = wasm_f32x4_splat(0.0f);
|
||||
|
||||
uint32_t qh;
|
||||
uint32_t qh_;
|
||||
uint64_t tmp[4];
|
||||
|
||||
// TODO: check if unrolling this is better
|
||||
|
|
@ -2702,12 +2870,12 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, size_t bs, const void * r
|
|||
const v128_t m4b = wasm_i8x16_splat(0x0F);
|
||||
|
||||
// extract the 5th bit
|
||||
memcpy(&qh, x0->qh, sizeof(qh));
|
||||
memcpy(&qh_, x0->qh, sizeof(qh_));
|
||||
|
||||
tmp[0] = table_b2b_1[(qh >> 0) & 0xFF];
|
||||
tmp[1] = table_b2b_1[(qh >> 8) & 0xFF];
|
||||
tmp[2] = table_b2b_1[(qh >> 16) & 0xFF];
|
||||
tmp[3] = table_b2b_1[(qh >> 24) ];
|
||||
tmp[0] = table_b2b_1[(qh_ >> 0) & 0xFF];
|
||||
tmp[1] = table_b2b_1[(qh_ >> 8) & 0xFF];
|
||||
tmp[2] = table_b2b_1[(qh_ >> 16) & 0xFF];
|
||||
tmp[3] = table_b2b_1[(qh_ >> 24) ];
|
||||
|
||||
const v128_t qhl = wasm_v128_load(tmp + 0);
|
||||
const v128_t qhh = wasm_v128_load(tmp + 2);
|
||||
|
|
@ -3049,12 +3217,12 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, size_t bs, const void * r
|
|||
}
|
||||
|
||||
sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
|
||||
#elif defined(__wasm_simd128__)
|
||||
#elif defined __wasm_simd128__
|
||||
v128_t sumv = wasm_f32x4_splat(0.0f);
|
||||
|
||||
float summs = 0.0f;
|
||||
|
||||
uint32_t qh;
|
||||
uint32_t qh_;
|
||||
uint64_t tmp[4];
|
||||
|
||||
// TODO: check if unrolling this is better
|
||||
|
|
@ -3067,12 +3235,12 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, size_t bs, const void * r
|
|||
const v128_t m4b = wasm_i8x16_splat(0x0F);
|
||||
|
||||
// extract the 5th bit
|
||||
memcpy(&qh, x0->qh, sizeof(qh));
|
||||
memcpy(&qh_, x0->qh, sizeof(qh_));
|
||||
|
||||
tmp[0] = table_b2b_0[(qh >> 0) & 0xFF];
|
||||
tmp[1] = table_b2b_0[(qh >> 8) & 0xFF];
|
||||
tmp[2] = table_b2b_0[(qh >> 16) & 0xFF];
|
||||
tmp[3] = table_b2b_0[(qh >> 24) ];
|
||||
tmp[0] = table_b2b_0[(qh_ >> 0) & 0xFF];
|
||||
tmp[1] = table_b2b_0[(qh_ >> 8) & 0xFF];
|
||||
tmp[2] = table_b2b_0[(qh_ >> 16) & 0xFF];
|
||||
tmp[3] = table_b2b_0[(qh_ >> 24) ];
|
||||
|
||||
const v128_t qhl = wasm_v128_load(tmp + 0);
|
||||
const v128_t qhh = wasm_v128_load(tmp + 2);
|
||||
|
|
@ -3565,6 +3733,45 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * r
|
|||
}
|
||||
|
||||
sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
|
||||
#elif defined __wasm_simd128__
|
||||
v128_t sumv = wasm_f32x4_splat(0.0f);
|
||||
|
||||
for (; ib < nb; ++ib) {
|
||||
const block_q8_0 * restrict x0 = &x[ib];
|
||||
const block_q8_0 * restrict y0 = &y[ib];
|
||||
|
||||
const v128_t x0_0 = wasm_v128_load(x0->qs);
|
||||
const v128_t x0_1 = wasm_v128_load(x0->qs + 16);
|
||||
const v128_t y0_0 = wasm_v128_load(y0->qs);
|
||||
const v128_t y0_1 = wasm_v128_load(y0->qs + 16);
|
||||
|
||||
// Extend 8-bit to 16-bit
|
||||
const v128_t x0_0l = wasm_i16x8_extend_low_i8x16(x0_0);
|
||||
const v128_t x0_0h = wasm_i16x8_extend_high_i8x16(x0_0);
|
||||
const v128_t x0_1l = wasm_i16x8_extend_low_i8x16(x0_1);
|
||||
const v128_t x0_1h = wasm_i16x8_extend_high_i8x16(x0_1);
|
||||
|
||||
const v128_t y0_0l = wasm_i16x8_extend_low_i8x16(y0_0);
|
||||
const v128_t y0_0h = wasm_i16x8_extend_high_i8x16(y0_0);
|
||||
const v128_t y0_1l = wasm_i16x8_extend_low_i8x16(y0_1);
|
||||
const v128_t y0_1h = wasm_i16x8_extend_high_i8x16(y0_1);
|
||||
|
||||
// Compute dot products
|
||||
const v128_t dx0_0 = wasm_i32x4_dot_i16x8(x0_0l, y0_0l);
|
||||
const v128_t dx0_1 = wasm_i32x4_dot_i16x8(x0_0h, y0_0h);
|
||||
const v128_t dx1_0 = wasm_i32x4_dot_i16x8(x0_1l, y0_1l);
|
||||
const v128_t dx1_1 = wasm_i32x4_dot_i16x8(x0_1h, y0_1h);
|
||||
|
||||
// Sum all dot products
|
||||
const v128_t sum_dots = wasm_i32x4_add(wasm_i32x4_add(dx0_0, dx0_1), wasm_i32x4_add(dx1_0, dx1_1));
|
||||
|
||||
// Convert to float and accumulate
|
||||
const float scale = GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d);
|
||||
sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(sum_dots), wasm_f32x4_splat(scale)));
|
||||
}
|
||||
|
||||
sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
|
||||
wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
|
||||
#elif defined(__AVX2__)
|
||||
// Initialize accumulator with zeros
|
||||
__m256 acc = _mm256_setzero_ps();
|
||||
|
|
@ -4439,6 +4646,106 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * r
|
|||
|
||||
*s = hsum_float_8(acc);
|
||||
|
||||
#elif defined __wasm_simd128__
|
||||
float sumf = 0;
|
||||
|
||||
for (int i = 0; i < nb; ++i) {
|
||||
const uint8_t * q2 = x[i].qs;
|
||||
const int8_t * q8 = y[i].qs;
|
||||
const uint8_t * sc = x[i].scales;
|
||||
|
||||
// Vectorized summs calculation
|
||||
v128_t summs_vec = wasm_i32x4_splat(0);
|
||||
{
|
||||
v128_t sc_vec = wasm_v128_load(sc);
|
||||
v128_t sc_upper = wasm_u8x16_shr(sc_vec, 4);
|
||||
|
||||
v128_t sc_low = wasm_u16x8_extend_low_u8x16(sc_upper);
|
||||
v128_t sc_high = wasm_u16x8_extend_high_u8x16(sc_upper);
|
||||
|
||||
v128_t bsums1 = wasm_v128_load(&y[i].bsums[0]);
|
||||
v128_t bsums2 = wasm_v128_load(&y[i].bsums[8]);
|
||||
|
||||
summs_vec = wasm_i32x4_add(
|
||||
wasm_i32x4_add(wasm_i32x4_dot_i16x8(sc_low, bsums1),
|
||||
wasm_i32x4_dot_i16x8(sc_high, bsums2)),
|
||||
summs_vec
|
||||
);
|
||||
|
||||
summs_vec = wasm_i32x4_add(summs_vec, wasm_i32x4_shuffle(summs_vec, summs_vec, 2, 3, 0, 1));
|
||||
summs_vec = wasm_i32x4_add(summs_vec, wasm_i32x4_shuffle(summs_vec, summs_vec, 1, 0, 3, 2));
|
||||
}
|
||||
int32_t summs = wasm_i32x4_extract_lane(summs_vec, 0);
|
||||
|
||||
// Vectorized isum calculation
|
||||
int32_t isum = 0;
|
||||
const uint8_t * sc_ptr = sc;
|
||||
const int k_iters = QK_K/128;
|
||||
|
||||
for (int k = 0; k < k_iters; ++k) {
|
||||
v128_t isum_vec = wasm_i32x4_splat(0);
|
||||
int shift = 0;
|
||||
|
||||
for (int j = 0; j < 4; ++j) {
|
||||
const int d0 = (sc_ptr[0] & 0xF);
|
||||
const int d1 = (sc_ptr[1] & 0xF);
|
||||
sc_ptr += 2;
|
||||
|
||||
// Process first 16 elements
|
||||
v128_t q2_0 = wasm_v128_load(q2);
|
||||
v128_t q8_0 = wasm_v128_load(q8);
|
||||
v128_t q2_shift_0 = wasm_u8x16_shr(q2_0, shift);
|
||||
v128_t q2_bits_0 = wasm_v128_and(q2_shift_0, wasm_i8x16_splat(0x03));
|
||||
|
||||
// Process next 16 elements
|
||||
v128_t q2_1 = wasm_v128_load(q2 + 16);
|
||||
v128_t q8_1 = wasm_v128_load(q8 + 16);
|
||||
v128_t q2_shift_1 = wasm_u8x16_shr(q2_1, shift);
|
||||
v128_t q2_bits_1 = wasm_v128_and(q2_shift_1, wasm_i8x16_splat(0x03));
|
||||
|
||||
// Calculate dot products
|
||||
v128_t p0 = wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_low_i8x16(q8_0),
|
||||
wasm_i16x8_extend_low_i8x16(q2_bits_0)
|
||||
);
|
||||
v128_t p1 = wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_high_i8x16(q8_0),
|
||||
wasm_i16x8_extend_high_i8x16(q2_bits_0)
|
||||
);
|
||||
v128_t p2 = wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_low_i8x16(q8_1),
|
||||
wasm_i16x8_extend_low_i8x16(q2_bits_1)
|
||||
);
|
||||
v128_t p3 = wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_high_i8x16(q8_1),
|
||||
wasm_i16x8_extend_high_i8x16(q2_bits_1)
|
||||
);
|
||||
|
||||
// Accumulate scaled results
|
||||
v128_t scaled = wasm_i32x4_add(
|
||||
wasm_i32x4_mul(wasm_i32x4_add(p0, p1), wasm_i32x4_splat(d0)),
|
||||
wasm_i32x4_mul(wasm_i32x4_add(p2, p3), wasm_i32x4_splat(d1))
|
||||
);
|
||||
|
||||
isum_vec = wasm_i32x4_add(isum_vec, scaled);
|
||||
q8 += 32;
|
||||
shift += 2;
|
||||
}
|
||||
q2 += 32;
|
||||
|
||||
// Horizontal sum of isum_vec
|
||||
isum_vec = wasm_i32x4_add(isum_vec, wasm_i32x4_shuffle(isum_vec, isum_vec, 2, 3, 0, 1));
|
||||
isum_vec = wasm_i32x4_add(isum_vec, wasm_i32x4_shuffle(isum_vec, isum_vec, 1, 0, 3, 2));
|
||||
isum += wasm_i32x4_extract_lane(isum_vec, 0);
|
||||
}
|
||||
|
||||
const float dall = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
|
||||
const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
|
||||
sumf += dall * isum - dmin * summs;
|
||||
}
|
||||
|
||||
*s = sumf;
|
||||
|
||||
#elif defined __riscv_v_intrinsic
|
||||
|
||||
float sumf = 0;
|
||||
|
|
@ -5121,6 +5428,94 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * r
|
|||
|
||||
*s = hsum_float_8(acc);
|
||||
|
||||
#elif defined __wasm_simd128__
|
||||
int8_t aux8[QK_K];
|
||||
float sums[8] = {0};
|
||||
uint32_t auxs[4];
|
||||
|
||||
float sumf = 0;
|
||||
for (int i = 0; i < nb; ++i) {
|
||||
const uint8_t * restrict q3 = x[i].qs;
|
||||
const uint8_t * restrict hm = x[i].hmask;
|
||||
const int8_t * restrict q8 = y[i].qs;
|
||||
|
||||
// Process blocks with SIMD
|
||||
int8_t * a = aux8;
|
||||
uint8_t m = 1;
|
||||
for (int j = 0; j < QK_K; j += 128) {
|
||||
for (int shift = 0; shift <= 6; shift += 2) {
|
||||
v128_t v_m = wasm_i8x16_splat(m);
|
||||
for (int l = 0; l < 32; l += 16) {
|
||||
v128_t v_q3 = wasm_v128_load(q3 + l);
|
||||
v128_t v_shift = wasm_i8x16_shr(v_q3, shift);
|
||||
v128_t v_low2 = wasm_v128_and(v_shift, wasm_i8x16_splat(0x03));
|
||||
|
||||
v128_t v_hm = wasm_v128_load(hm + l);
|
||||
v128_t v_mask = wasm_v128_and(v_hm, v_m);
|
||||
v_mask = wasm_i8x16_ne(v_mask, wasm_i8x16_splat(0));
|
||||
|
||||
v_low2 = wasm_i8x16_sub(v_low2, wasm_v128_and(wasm_i8x16_splat(4), wasm_v128_not(v_mask)));
|
||||
wasm_v128_store(a + l, v_low2);
|
||||
}
|
||||
a += 32;
|
||||
m <<= 1;
|
||||
}
|
||||
q3 += 32;
|
||||
}
|
||||
|
||||
// Extract scales
|
||||
memcpy(auxs, x[i].scales, 12);
|
||||
uint32_t tmp = auxs[2];
|
||||
auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
|
||||
auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
|
||||
auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
|
||||
auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
|
||||
const int8_t * scales = (const int8_t *)auxs;
|
||||
|
||||
// SIMD dot product with register accumulators
|
||||
v128_t v_acc0 = wasm_i32x4_splat(0);
|
||||
v128_t v_acc1 = wasm_i32x4_splat(0);
|
||||
a = aux8;
|
||||
for (int j = 0; j < QK_K/16; ++j) {
|
||||
const v128_t v_scale = wasm_i16x8_splat(scales[j] - 32);
|
||||
|
||||
// Process 16 elements per iteration
|
||||
for (int k = 0; k < 2; ++k) {
|
||||
const v128_t v_q8 = wasm_i16x8_load8x8(q8);
|
||||
const v128_t v_a = wasm_i16x8_load8x8(a);
|
||||
|
||||
v128_t v_prod = wasm_i16x8_mul(v_q8, v_a);
|
||||
v_prod = wasm_i16x8_mul(v_prod, v_scale);
|
||||
|
||||
v_acc0 = wasm_i32x4_add(v_acc0, wasm_i32x4_extend_low_i16x8(v_prod));
|
||||
v_acc1 = wasm_i32x4_add(v_acc1, wasm_i32x4_extend_high_i16x8(v_prod));
|
||||
|
||||
q8 += 8;
|
||||
a += 8;
|
||||
}
|
||||
}
|
||||
|
||||
// Accumulate results
|
||||
const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
|
||||
const v128_t v_d = wasm_f32x4_splat(d);
|
||||
v128_t v_sum = wasm_f32x4_add(
|
||||
wasm_f32x4_mul(wasm_f32x4_convert_i32x4(v_acc0), v_d),
|
||||
wasm_f32x4_mul(wasm_f32x4_convert_i32x4(v_acc1), v_d)
|
||||
);
|
||||
|
||||
// Accumulate into sums vector
|
||||
wasm_v128_store(sums, wasm_f32x4_add(wasm_v128_load(sums), v_sum));
|
||||
}
|
||||
|
||||
// Horizontal sum
|
||||
v128_t v_sum = wasm_f32x4_add(wasm_v128_load(sums), wasm_v128_load(sums + 4));
|
||||
sumf = wasm_f32x4_extract_lane(v_sum, 0) +
|
||||
wasm_f32x4_extract_lane(v_sum, 1) +
|
||||
wasm_f32x4_extract_lane(v_sum, 2) +
|
||||
wasm_f32x4_extract_lane(v_sum, 3);
|
||||
|
||||
*s = sumf;
|
||||
|
||||
#elif defined __riscv_v_intrinsic
|
||||
|
||||
uint32_t aux[3];
|
||||
|
|
@ -5646,7 +6041,7 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r
|
|||
}
|
||||
}
|
||||
*s = sumf;
|
||||
#elif __ARM_NEON
|
||||
#elif defined __ARM_NEON
|
||||
const uint8x16_t m4b = vdupq_n_u8(0xf);
|
||||
const int32x4_t mzero = vdupq_n_s32(0);
|
||||
|
||||
|
|
@ -5709,6 +6104,107 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r
|
|||
|
||||
*s = sumf;
|
||||
|
||||
#elif defined __wasm_simd128__
|
||||
const uint8_t * scales = (const uint8_t*)&utmp[0];
|
||||
float sumf = 0;
|
||||
|
||||
for (int i = 0; i < nb; ++i) {
|
||||
const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
|
||||
const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); // Corrected sign
|
||||
|
||||
const uint8_t * restrict q4 = x[i].qs;
|
||||
const int8_t * restrict q8 = y[i].qs;
|
||||
|
||||
// Process scales and mins
|
||||
memcpy(utmp, x[i].scales, 12);
|
||||
utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
|
||||
const uint32_t uaux = utmp[1] & kmask1;
|
||||
utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
|
||||
utmp[2] = uaux;
|
||||
utmp[0] &= kmask1;
|
||||
|
||||
// Sum mins * q8sums
|
||||
int32_t sumi = 0;
|
||||
const int16_t * restrict q8sums = y[i].bsums;
|
||||
const uint8_t * m = (const uint8_t *)&utmp[2];
|
||||
for (int j = 0; j < 16; j += 2) {
|
||||
sumi += (q8sums[j] + q8sums[j+1]) * m[j/2];
|
||||
}
|
||||
sumf -= dmin * sumi;
|
||||
|
||||
int32_t sumi1 = 0;
|
||||
int32_t sumi2 = 0;
|
||||
|
||||
for (int j = 0; j < QK_K/64; ++j) {
|
||||
// Load 64 4-bit weights (32 bytes)
|
||||
const v128_t q4x0 = wasm_v128_load(q4);
|
||||
const v128_t q4x1 = wasm_v128_load(q4 + 16);
|
||||
q4 += 32;
|
||||
|
||||
// Split into low/high nibbles
|
||||
const v128_t q4l0 = wasm_v128_and(q4x0, wasm_i8x16_splat(0x0F));
|
||||
const v128_t q4h0 = wasm_u8x16_shr(q4x0, 4);
|
||||
const v128_t q4l1 = wasm_v128_and(q4x1, wasm_i8x16_splat(0x0F));
|
||||
const v128_t q4h1 = wasm_u8x16_shr(q4x1, 4);
|
||||
|
||||
// Load 64 8-bit values (64 bytes)
|
||||
const v128_t q8x0 = wasm_v128_load(q8);
|
||||
const v128_t q8x1 = wasm_v128_load(q8 + 16);
|
||||
const v128_t q8x2 = wasm_v128_load(q8 + 32);
|
||||
const v128_t q8x3 = wasm_v128_load(q8 + 48);
|
||||
q8 += 64;
|
||||
|
||||
// Low nibble products
|
||||
v128_t vacc1 = wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_low_i8x16(q4l0),
|
||||
wasm_i16x8_extend_low_i8x16(q8x0)
|
||||
);
|
||||
vacc1 = wasm_i32x4_add(vacc1, wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_high_i8x16(q4l0),
|
||||
wasm_i16x8_extend_high_i8x16(q8x0)
|
||||
));
|
||||
vacc1 = wasm_i32x4_add(vacc1, wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_low_i8x16(q4l1),
|
||||
wasm_i16x8_extend_low_i8x16(q8x1)
|
||||
));
|
||||
vacc1 = wasm_i32x4_add(vacc1, wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_high_i8x16(q4l1),
|
||||
wasm_i16x8_extend_high_i8x16(q8x1)
|
||||
));
|
||||
|
||||
// High nibble products
|
||||
v128_t vacc2 = wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_low_i8x16(q4h0),
|
||||
wasm_i16x8_extend_low_i8x16(q8x2)
|
||||
);
|
||||
vacc2 = wasm_i32x4_add(vacc2, wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_high_i8x16(q4h0),
|
||||
wasm_i16x8_extend_high_i8x16(q8x2)
|
||||
));
|
||||
vacc2 = wasm_i32x4_add(vacc2, wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_low_i8x16(q4h1),
|
||||
wasm_i16x8_extend_low_i8x16(q8x3)
|
||||
));
|
||||
vacc2 = wasm_i32x4_add(vacc2, wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_high_i8x16(q4h1),
|
||||
wasm_i16x8_extend_high_i8x16(q8x3)
|
||||
));
|
||||
|
||||
// Accumulate scaled results
|
||||
int32_t vacc1_sum = wasm_i32x4_extract_lane(vacc1, 0) + wasm_i32x4_extract_lane(vacc1, 1) +
|
||||
wasm_i32x4_extract_lane(vacc1, 2) + wasm_i32x4_extract_lane(vacc1, 3);
|
||||
sumi1 += vacc1_sum * scales[2*j];
|
||||
|
||||
int32_t vacc2_sum = wasm_i32x4_extract_lane(vacc2, 0) + wasm_i32x4_extract_lane(vacc2, 1) +
|
||||
wasm_i32x4_extract_lane(vacc2, 2) + wasm_i32x4_extract_lane(vacc2, 3);
|
||||
sumi2 += vacc2_sum * scales[2*j+1];
|
||||
}
|
||||
|
||||
sumf += d * (sumi1 + sumi2);
|
||||
}
|
||||
|
||||
*s = sumf;
|
||||
|
||||
#elif defined __AVX2__
|
||||
|
||||
const __m256i m4 = _mm256_set1_epi8(0xF);
|
||||
|
|
@ -6459,6 +6955,118 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r
|
|||
|
||||
*s = hsum_float_8(acc) + summs;
|
||||
|
||||
#elif defined __wasm_simd128__
|
||||
//const uint8_t * scales = (const uint8_t*)&utmp[0];
|
||||
float sumf = 0;
|
||||
|
||||
for (int i = 0; i < nb; ++i) {
|
||||
const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
|
||||
const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); // Fixed sign
|
||||
|
||||
const uint8_t * restrict q5 = x[i].qs;
|
||||
const uint8_t * restrict qh = x[i].qh;
|
||||
const int8_t * restrict q8 = y[i].qs;
|
||||
|
||||
// Process scales and mins
|
||||
memcpy(utmp, x[i].scales, 12);
|
||||
utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
|
||||
const uint32_t uaux = utmp[1] & kmask1;
|
||||
utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
|
||||
utmp[2] = uaux;
|
||||
utmp[0] &= kmask1;
|
||||
|
||||
// Sum mins * q8sums
|
||||
int32_t sumi_mins = 0;
|
||||
const int16_t * restrict q8sums = y[i].bsums;
|
||||
const uint8_t * m = (const uint8_t *)&utmp[2];
|
||||
for (int j = 0; j < 16; j += 2) {
|
||||
sumi_mins += (q8sums[j] + q8sums[j+1]) * m[j/2];
|
||||
}
|
||||
sumf -= dmin * sumi_mins; // Correct subtraction
|
||||
|
||||
v128_t qh0 = wasm_v128_load(qh);
|
||||
v128_t qh1 = wasm_v128_load(qh + 16);
|
||||
const uint8_t * sc = (const uint8_t *)utmp;
|
||||
|
||||
int32_t sumi = 0;
|
||||
|
||||
for (int j = 0; j < QK_K/64; ++j) {
|
||||
const int shift = j * 2;
|
||||
v128_t qh_shift0 = wasm_u8x16_shr(qh0, shift);
|
||||
v128_t qh_shift1 = wasm_u8x16_shr(qh1, shift);
|
||||
|
||||
v128_t qh_low0 = wasm_i8x16_shl(wasm_v128_and(qh_shift0, wasm_i8x16_splat(0x01)), 4);
|
||||
v128_t qh_high0 = wasm_i8x16_shl(wasm_v128_and(qh_shift0, wasm_i8x16_splat(0x02)), 3);
|
||||
v128_t qh_low1 = wasm_i8x16_shl(wasm_v128_and(qh_shift1, wasm_i8x16_splat(0x01)), 4);
|
||||
v128_t qh_high1 = wasm_i8x16_shl(wasm_v128_and(qh_shift1, wasm_i8x16_splat(0x02)), 3);
|
||||
|
||||
v128_t q5_0 = wasm_v128_load(q5);
|
||||
v128_t q5_1 = wasm_v128_load(q5 + 16);
|
||||
q5 += 32;
|
||||
|
||||
v128_t q5l_0 = wasm_v128_or(wasm_v128_and(q5_0, wasm_i8x16_splat(0x0F)), qh_low0);
|
||||
v128_t q5h_0 = wasm_v128_or(wasm_u8x16_shr(q5_0, 4), qh_high0);
|
||||
v128_t q5l_1 = wasm_v128_or(wasm_v128_and(q5_1, wasm_i8x16_splat(0x0F)), qh_low1);
|
||||
v128_t q5h_1 = wasm_v128_or(wasm_u8x16_shr(q5_1, 4), qh_high1);
|
||||
|
||||
v128_t q8_0 = wasm_v128_load(q8);
|
||||
v128_t q8_1 = wasm_v128_load(q8 + 16);
|
||||
v128_t q8_2 = wasm_v128_load(q8 + 32);
|
||||
v128_t q8_3 = wasm_v128_load(q8 + 48);
|
||||
q8 += 64;
|
||||
|
||||
// Process low quants
|
||||
v128_t pl0 = wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_low_i8x16(q5l_0),
|
||||
wasm_i16x8_extend_low_i8x16(q8_0)
|
||||
);
|
||||
pl0 = wasm_i32x4_add(pl0, wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_high_i8x16(q5l_0),
|
||||
wasm_i16x8_extend_high_i8x16(q8_0)
|
||||
));
|
||||
v128_t pl1 = wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_low_i8x16(q5l_1),
|
||||
wasm_i16x8_extend_low_i8x16(q8_1)
|
||||
);
|
||||
pl1 = wasm_i32x4_add(pl1, wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_high_i8x16(q5l_1),
|
||||
wasm_i16x8_extend_high_i8x16(q8_1)
|
||||
));
|
||||
v128_t sum_low = wasm_i32x4_add(pl0, pl1);
|
||||
|
||||
// Process high quants
|
||||
v128_t ph0 = wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_low_i8x16(q5h_0),
|
||||
wasm_i16x8_extend_low_i8x16(q8_2)
|
||||
);
|
||||
ph0 = wasm_i32x4_add(ph0, wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_high_i8x16(q5h_0),
|
||||
wasm_i16x8_extend_high_i8x16(q8_2)
|
||||
));
|
||||
v128_t ph1 = wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_low_i8x16(q5h_1),
|
||||
wasm_i16x8_extend_low_i8x16(q8_3)
|
||||
);
|
||||
ph1 = wasm_i32x4_add(ph1, wasm_i32x4_dot_i16x8(
|
||||
wasm_i16x8_extend_high_i8x16(q5h_1),
|
||||
wasm_i16x8_extend_high_i8x16(q8_3)
|
||||
));
|
||||
v128_t sum_high = wasm_i32x4_add(ph0, ph1);
|
||||
|
||||
// Accumulate with scale factors
|
||||
int32_t sl = wasm_i32x4_extract_lane(sum_low, 0) + wasm_i32x4_extract_lane(sum_low, 1) +
|
||||
wasm_i32x4_extract_lane(sum_low, 2) + wasm_i32x4_extract_lane(sum_low, 3);
|
||||
int32_t sh = wasm_i32x4_extract_lane(sum_high, 0) + wasm_i32x4_extract_lane(sum_high, 1) +
|
||||
wasm_i32x4_extract_lane(sum_high, 2) + wasm_i32x4_extract_lane(sum_high, 3);
|
||||
|
||||
sumi += sl * sc[2*j] + sh * sc[2*j+1];
|
||||
}
|
||||
|
||||
sumf += d * sumi;
|
||||
}
|
||||
|
||||
*s = sumf;
|
||||
|
||||
#elif defined __riscv_v_intrinsic
|
||||
|
||||
const uint8_t * scales = (const uint8_t*)&utmp[0];
|
||||
|
|
@ -7122,6 +7730,85 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r
|
|||
|
||||
*s = hsum_float_8(acc);
|
||||
|
||||
#elif defined __wasm_simd128__
|
||||
int8_t aux8[QK_K] __attribute__((aligned(16)));
|
||||
int32_t aux32[8] __attribute__((aligned(16))) = {0};
|
||||
float sums[8] __attribute__((aligned(16))) = {0};
|
||||
|
||||
for (int i = 0; i < nb; ++i) {
|
||||
// Unpack 6-bit quantized data into aux8 (unchanged)
|
||||
const uint8_t * restrict q4 = x[i].ql;
|
||||
const uint8_t * restrict qh = x[i].qh;
|
||||
int8_t * a = aux8;
|
||||
for (int j = 0; j < QK_K; j += 128) {
|
||||
for (int l = 0; l < 32; ++l) {
|
||||
a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
|
||||
a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
|
||||
a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
|
||||
a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
|
||||
}
|
||||
a += 128;
|
||||
q4 += 64;
|
||||
qh += 32;
|
||||
}
|
||||
|
||||
const int8_t * restrict a_ptr = aux8;
|
||||
const int8_t * restrict q8 = y[i].qs;
|
||||
v128_t acc0 = wasm_i32x4_splat(0);
|
||||
v128_t acc1 = wasm_i32x4_splat(0);
|
||||
|
||||
for (int j = 0; j < QK_K/16; ++j) {
|
||||
const int scale = x[i].scales[j];
|
||||
const v128_t vscale = wasm_i32x4_splat(scale);
|
||||
|
||||
// Load 16 elements from a and q8
|
||||
const v128_t a_vec = wasm_v128_load(a_ptr);
|
||||
const v128_t q8_vec = wasm_v128_load(q8);
|
||||
|
||||
// Process low 8 elements
|
||||
v128_t a_low = wasm_i16x8_extend_low_i8x16(a_vec);
|
||||
v128_t q8_low = wasm_i16x8_extend_low_i8x16(q8_vec);
|
||||
v128_t prod_low = wasm_i16x8_mul(a_low, q8_low);
|
||||
v128_t prod_lo_lo = wasm_i32x4_extend_low_i16x8(prod_low);
|
||||
v128_t prod_lo_hi = wasm_i32x4_extend_high_i16x8(prod_low);
|
||||
|
||||
// Process high 8 elements
|
||||
v128_t a_high = wasm_i16x8_extend_high_i8x16(a_vec);
|
||||
v128_t q8_high = wasm_i16x8_extend_high_i8x16(q8_vec);
|
||||
v128_t prod_high = wasm_i16x8_mul(a_high, q8_high);
|
||||
v128_t prod_hi_lo = wasm_i32x4_extend_low_i16x8(prod_high);
|
||||
v128_t prod_hi_hi = wasm_i32x4_extend_high_i16x8(prod_high);
|
||||
|
||||
// Scale and accumulate
|
||||
prod_lo_lo = wasm_i32x4_mul(prod_lo_lo, vscale);
|
||||
prod_lo_hi = wasm_i32x4_mul(prod_lo_hi, vscale);
|
||||
prod_hi_lo = wasm_i32x4_mul(prod_hi_lo, vscale);
|
||||
prod_hi_hi = wasm_i32x4_mul(prod_hi_hi, vscale);
|
||||
|
||||
acc0 = wasm_i32x4_add(acc0, wasm_i32x4_add(prod_lo_lo, prod_hi_lo));
|
||||
acc1 = wasm_i32x4_add(acc1, wasm_i32x4_add(prod_lo_hi, prod_hi_hi));
|
||||
|
||||
a_ptr += 16;
|
||||
q8 += 16;
|
||||
}
|
||||
|
||||
// Store accumulated results
|
||||
wasm_v128_store(&aux32[0], acc0);
|
||||
wasm_v128_store(&aux32[4], acc1);
|
||||
|
||||
const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
|
||||
for (int l = 0; l < 8; ++l) {
|
||||
sums[l] += d * aux32[l];
|
||||
}
|
||||
}
|
||||
|
||||
// Sum final results
|
||||
float sumf = 0;
|
||||
for (int l = 0; l < 8; ++l) {
|
||||
sumf += sums[l];
|
||||
}
|
||||
*s = sumf;
|
||||
|
||||
#elif defined __riscv_v_intrinsic
|
||||
|
||||
float sumf = 0;
|
||||
|
|
|
|||
|
|
@ -7,10 +7,8 @@
|
|||
#include "ggml-cpu-impl.h"
|
||||
#include "ggml-cpu.h"
|
||||
#include "ggml-impl.h"
|
||||
#include "ggml-quants.h"
|
||||
#include "ggml-cpu-quants.h"
|
||||
#include "ggml-threading.h"
|
||||
#include "amx/amx.h"
|
||||
#include "ggml.h"
|
||||
|
||||
#if defined(_MSC_VER) || defined(__MINGW32__)
|
||||
|
|
@ -1291,7 +1289,7 @@ struct ggml_threadpool {
|
|||
atomic_int n_graph; // incremented when there is work to be done (i.e each graph)
|
||||
atomic_int GGML_CACHE_ALIGN n_barrier;
|
||||
atomic_int GGML_CACHE_ALIGN n_barrier_passed;
|
||||
atomic_int current_chunk; // currently processing chunk during Mat_Mul, shared between all the threads.
|
||||
atomic_int GGML_CACHE_ALIGN current_chunk; // currently processing chunk during Mat_Mul, shared between all the threads.
|
||||
|
||||
// these are atomic as an annotation for thread-sanitizer
|
||||
atomic_bool stop; // Used for stopping the threadpool altogether
|
||||
|
|
@ -7490,6 +7488,7 @@ UseGgmlGemm1:;
|
|||
if (src1->type != vec_dot_type) {
|
||||
char * wdata = params->wdata;
|
||||
|
||||
const size_t nbw0 = ggml_type_size(vec_dot_type);
|
||||
const size_t nbw1 = ggml_row_size(vec_dot_type, ne10);
|
||||
const size_t nbw2 = nbw1*ne11;
|
||||
const size_t nbw3 = nbw2*ne12;
|
||||
|
|
@ -7497,6 +7496,7 @@ UseGgmlGemm1:;
|
|||
assert(params->wsize >= ne13*nbw3);
|
||||
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||||
|
||||
#if 0
|
||||
for (int64_t i13 = 0; i13 < ne13; ++i13) {
|
||||
for (int64_t i12 = 0; i12 < ne12; ++i12) {
|
||||
for (int64_t i11 = ith; i11 < ne11; i11 += nth) {
|
||||
|
|
@ -7506,6 +7506,20 @@ UseGgmlGemm1:;
|
|||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
for (int64_t i13 = 0; i13 < ne13; ++i13) {
|
||||
for (int64_t i12 = 0; i12 < ne12; ++i12) {
|
||||
for (int64_t i11 = 0; i11 < ne11; ++i11) {
|
||||
size_t bs = ggml_blck_size(vec_dot_type);
|
||||
int64_t ne10_block_start = (ith * ne10/bs) / nth;
|
||||
int64_t ne10_block_end = ((ith + 1) * ne10/bs) / nth;
|
||||
from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + ne10_block_start*bs*nb10),
|
||||
(void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1 + ne10_block_start*nbw0),
|
||||
(ne10_block_end - ne10_block_start) * bs);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (ith == 0) {
|
||||
|
|
@ -7593,7 +7607,6 @@ UseGgmlGemm2:;
|
|||
if ((nr0 % 2 != 0) || (ne11 % 2 != 0) || ((ir0_end - ir0_start) % 2 != 0) || ((ir1_end - ir1_start) % 2 != 0)) {
|
||||
num_rows_per_vec_dot = 1;
|
||||
}
|
||||
|
||||
ggml_compute_forward_mul_mat_one_chunk(params, dst, src0->type, num_rows_per_vec_dot, ir0_start, ir0_end, ir1_start, ir1_end);
|
||||
|
||||
if (nth >= nchunk0 * nchunk1) {
|
||||
|
|
@ -7606,6 +7619,84 @@ UseGgmlGemm2:;
|
|||
|
||||
// ggml_compute_forward_mul_mat_id
|
||||
|
||||
#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ids->ne[0]*ids->ne[1] + (i1)]
|
||||
|
||||
struct mmid_row_mapping {
|
||||
int32_t i1;
|
||||
int32_t i2;
|
||||
};
|
||||
|
||||
static void ggml_compute_forward_mul_mat_id_one_chunk(
|
||||
struct ggml_tensor * dst,
|
||||
const struct ggml_tensor * src0,
|
||||
const struct ggml_tensor * src1,
|
||||
const struct ggml_tensor * ids,
|
||||
const int64_t cur_a,
|
||||
const int64_t ir0_start,
|
||||
const int64_t ir0_end,
|
||||
const int64_t ir1_start,
|
||||
const int64_t ir1_end,
|
||||
const char * src0_cur,
|
||||
const struct mmid_row_mapping * matrix_rows,
|
||||
const size_t row_size,
|
||||
const bool src1_cont,
|
||||
const void * wdata) {
|
||||
|
||||
GGML_TENSOR_BINARY_OP_LOCALS
|
||||
|
||||
const enum ggml_type type = src0->type;
|
||||
|
||||
ggml_vec_dot_t const vec_dot = type_traits_cpu[type].vec_dot;
|
||||
enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type;
|
||||
|
||||
const int64_t blck_0 = 16;
|
||||
const int64_t blck_1 = 16;
|
||||
|
||||
float tmp[16];
|
||||
|
||||
for (int64_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) {
|
||||
for (int64_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) {
|
||||
for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir1_end; ++ir1) {
|
||||
const int64_t _i12 = ir1; // logical row index for this expert
|
||||
|
||||
struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, _i12);
|
||||
const int id = row_mapping.i1; // selected expert index
|
||||
|
||||
const int64_t i11 = id % ne11;
|
||||
const int64_t i12 = row_mapping.i2; // row index in src1
|
||||
|
||||
const int64_t i1 = id; // selected expert index
|
||||
const int64_t i2 = i12; // row
|
||||
|
||||
// desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
|
||||
// if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
|
||||
// the original src1 data pointer, so we should index using the indices directly
|
||||
// TODO: this is a bit of a hack, we should probably have a better way to handle this
|
||||
const char * src1_col = (const char *) wdata +
|
||||
(src1_cont || src1->type != vec_dot_type
|
||||
? (i11 + i12*ne11)*row_size
|
||||
: (i11*nb11 + i12*nb12));
|
||||
|
||||
float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2));
|
||||
|
||||
for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ++ir0) {
|
||||
vec_dot(ne00, &tmp[ir0 - iir0], 0, src0_cur + ir0*nb01, 0, src1_col, 0, 1);
|
||||
}
|
||||
|
||||
memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir0_end) - iir0)*sizeof(float));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void * incr_ptr_aligned(void ** p, size_t size, size_t align) {
|
||||
|
||||
void * ptr = *p;
|
||||
ptr = (void *) GGML_PAD((uintptr_t) ptr, align);
|
||||
*p = (void *) ((char *) ptr + size);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void ggml_compute_forward_mul_mat_id(
|
||||
const struct ggml_compute_params * params,
|
||||
struct ggml_tensor * dst) {
|
||||
|
|
@ -7623,7 +7714,6 @@ static void ggml_compute_forward_mul_mat_id(
|
|||
|
||||
const bool src1_cont = ggml_is_contiguous(src1);
|
||||
|
||||
ggml_vec_dot_t const vec_dot = type_traits_cpu[type].vec_dot;
|
||||
enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type;
|
||||
ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float;
|
||||
|
||||
|
|
@ -7641,21 +7731,27 @@ static void ggml_compute_forward_mul_mat_id(
|
|||
const int n_ids = ids->ne[0]; // n_expert_used
|
||||
const int n_as = ne02; // n_expert
|
||||
|
||||
char * wdata_src1_end = (src1->type == vec_dot_type) ?
|
||||
(char *) params->wdata :
|
||||
(char *) params->wdata + GGML_PAD(ggml_row_size(vec_dot_type, ggml_nelements(src1)), sizeof(int64_t));
|
||||
void * wdata_cur = params->wdata;
|
||||
|
||||
struct mmid_row_mapping {
|
||||
int32_t i1;
|
||||
int32_t i2;
|
||||
};
|
||||
if (src1->type != vec_dot_type) {
|
||||
incr_ptr_aligned(&wdata_cur, ggml_row_size(vec_dot_type, ggml_nelements(src1)), sizeof(int64_t));
|
||||
}
|
||||
|
||||
int64_t * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as]
|
||||
struct mmid_row_mapping * matrix_rows = (struct mmid_row_mapping *)(matrix_row_counts + n_as); // [n_as][ne11]
|
||||
int64_t * matrix_row_counts = // [n_as]
|
||||
incr_ptr_aligned(&wdata_cur, n_as*sizeof(int64_t), sizeof(int64_t));
|
||||
|
||||
struct mmid_row_mapping * matrix_rows = // [n_as][ids->ne[0]*ids->ne[1]]
|
||||
incr_ptr_aligned(&wdata_cur, n_as*ids->ne[0]*ids->ne[1]*sizeof(struct mmid_row_mapping), sizeof(int64_t));
|
||||
|
||||
char (*atomic_current_chunk)[CACHE_LINE_SIZE] = // [n_as]
|
||||
incr_ptr_aligned(&wdata_cur, CACHE_LINE_SIZE * n_as, CACHE_LINE_SIZE);
|
||||
|
||||
GGML_ASSERT(params->wsize >= (size_t)((char *) wdata_cur - (char *) params->wdata));
|
||||
|
||||
if (src1->type != vec_dot_type) {
|
||||
char * wdata = params->wdata;
|
||||
|
||||
const size_t nbw0 = ggml_type_size(vec_dot_type);
|
||||
const size_t nbw1 = ggml_row_size(vec_dot_type, ne10);
|
||||
const size_t nbw2 = nbw1*ne11;
|
||||
const size_t nbw3 = nbw2*ne12;
|
||||
|
|
@ -7663,19 +7759,32 @@ static void ggml_compute_forward_mul_mat_id(
|
|||
assert(params->wsize >= ne13*nbw3);
|
||||
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||||
|
||||
#if 0
|
||||
for (int64_t i13 = 0; i13 < ne13; ++i13) {
|
||||
for (int64_t i12 = 0; i12 < ne12; ++i12) {
|
||||
for (int64_t i11 = ith; i11 < ne11; i11 += nth) {
|
||||
for (int64_t i12 = ith; i12 < ne12; i12 += nth) {
|
||||
for (int64_t i11 = 0; i11 < ne11; ++i11) {
|
||||
from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11),
|
||||
(void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1),
|
||||
ne10);
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
for (int64_t i13 = 0; i13 < ne13; ++i13) {
|
||||
for (int64_t i12 = 0; i12 < ne12; ++i12) {
|
||||
for (int64_t i11 = 0; i11 < ne11; ++i11) {
|
||||
size_t bs = ggml_blck_size(vec_dot_type);
|
||||
int64_t ne10_block_start = (ith * ne10/bs) / nth;
|
||||
int64_t ne10_block_end = ((ith + 1) * ne10/bs) / nth;
|
||||
from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + ne10_block_start*bs*nb10),
|
||||
(void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1 + ne10_block_start*nbw0),
|
||||
(ne10_block_end - ne10_block_start) * bs);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ne12 + (i1)]
|
||||
|
||||
if (ith == 0) {
|
||||
// initialize matrix_row_counts
|
||||
memset(matrix_row_counts, 0, n_as*sizeof(int64_t));
|
||||
|
|
@ -7693,9 +7802,14 @@ static void ggml_compute_forward_mul_mat_id(
|
|||
}
|
||||
}
|
||||
|
||||
// reset current_chunk
|
||||
for (int cur_a = ith; cur_a < n_as; cur_a += nth) {
|
||||
atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a);
|
||||
*current_chunk_ctr = nth;
|
||||
}
|
||||
|
||||
ggml_barrier(params->threadpool);
|
||||
|
||||
// compute each matrix multiplication in sequence
|
||||
for (int cur_a = 0; cur_a < n_as; ++cur_a) {
|
||||
const int64_t cne1 = matrix_row_counts[cur_a];
|
||||
|
||||
|
|
@ -7703,84 +7817,64 @@ static void ggml_compute_forward_mul_mat_id(
|
|||
continue;
|
||||
}
|
||||
|
||||
const char * src0_cur = (const char *) src0->data + cur_a*nb02;
|
||||
|
||||
const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
|
||||
const char * src0_cur = (const char *) src0->data + cur_a * nb02;
|
||||
const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
|
||||
const size_t row_size = ggml_row_size(vec_dot_type, ne10);
|
||||
|
||||
const int64_t nr0 = ne01; // src0 rows
|
||||
const int64_t nr1 = cne1; // src1 rows
|
||||
const int64_t nr0 = ne01;
|
||||
const int64_t nr1 = cne1;
|
||||
|
||||
// distribute the thread work across the inner or outer loop based on which one is larger
|
||||
int chunk_size = 16;
|
||||
if (nr0 == 1 || nr1 == 1) {
|
||||
chunk_size = 64;
|
||||
}
|
||||
|
||||
const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
|
||||
const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
|
||||
#if defined(__aarch64__)
|
||||
// disable for ARM
|
||||
const bool disable_chunking = true;
|
||||
#else
|
||||
// disable for NUMA
|
||||
const bool disable_chunking = ggml_is_numa();
|
||||
#endif // defined(__aarch64__)
|
||||
|
||||
const int64_t ith0 = ith % nth0;
|
||||
const int64_t ith1 = ith / nth0;
|
||||
int64_t nchunk0 = (nr0 + chunk_size - 1) / chunk_size;
|
||||
int64_t nchunk1 = (nr1 + chunk_size - 1) / chunk_size;
|
||||
|
||||
const int64_t dr0 = (nr0 + nth0 - 1)/nth0;
|
||||
const int64_t dr1 = (nr1 + nth1 - 1)/nth1;
|
||||
if (nchunk0 * nchunk1 < nth * 4 || disable_chunking) {
|
||||
nchunk0 = nr0 > nr1 ? nth : 1;
|
||||
nchunk1 = nr0 > nr1 ? 1 : nth;
|
||||
}
|
||||
|
||||
const int64_t ir010 = dr0*ith0;
|
||||
const int64_t ir011 = MIN(ir010 + dr0, nr0);
|
||||
const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0;
|
||||
const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1;
|
||||
|
||||
const int64_t ir110 = dr1*ith1;
|
||||
const int64_t ir111 = MIN(ir110 + dr1, nr1);
|
||||
int current_chunk = ith;
|
||||
|
||||
// threads with no work simply yield (not sure if it helps)
|
||||
//if (ir010 >= ir011 || ir110 >= ir111) {
|
||||
// sched_yield();
|
||||
// continue;
|
||||
//}
|
||||
atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a);
|
||||
|
||||
// block-tiling attempt
|
||||
const int64_t blck_0 = 16;
|
||||
const int64_t blck_1 = 16;
|
||||
while (current_chunk < nchunk0 * nchunk1) {
|
||||
const int64_t ith0 = current_chunk % nchunk0;
|
||||
const int64_t ith1 = current_chunk / nchunk0;
|
||||
|
||||
// attempt to reduce false-sharing (does not seem to make a difference)
|
||||
float tmp[16];
|
||||
const int64_t ir0_start = dr0 * ith0;
|
||||
const int64_t ir0_end = MIN(ir0_start + dr0, nr0);
|
||||
|
||||
for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) {
|
||||
for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) {
|
||||
for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ++ir1) {
|
||||
const int64_t _i12 = ir1; // logical row index for this expert
|
||||
const int64_t ir1_start = dr1 * ith1;
|
||||
const int64_t ir1_end = MIN(ir1_start + dr1, nr1);
|
||||
|
||||
struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, _i12);
|
||||
const int id = row_mapping.i1; // selected expert index
|
||||
ggml_compute_forward_mul_mat_id_one_chunk(
|
||||
dst, src0, src1, ids, cur_a,
|
||||
ir0_start, ir0_end, ir1_start, ir1_end,
|
||||
src0_cur, matrix_rows, row_size, src1_cont, wdata
|
||||
);
|
||||
|
||||
const int64_t i11 = id % ne11;
|
||||
const int64_t i12 = row_mapping.i2; // row index in src1
|
||||
|
||||
const int64_t i1 = id; // selected expert index
|
||||
const int64_t i2 = i12; // row
|
||||
|
||||
// desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
|
||||
// if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
|
||||
// the original src1 data pointer, so we should index using the indices directly
|
||||
// TODO: this is a bit of a hack, we should probably have a better way to handle this
|
||||
const char * src1_col = (const char *) wdata +
|
||||
(src1_cont || src1->type != vec_dot_type
|
||||
? (i11 + i12*ne11)*row_size
|
||||
: (i11*nb11 + i12*nb12));
|
||||
|
||||
float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2));
|
||||
|
||||
//for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
|
||||
// vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
|
||||
//}
|
||||
|
||||
for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
|
||||
vec_dot(ne00, &tmp[ir0 - iir0], 0, src0_cur + ir0*nb01, 0, src1_col, 0, 1);
|
||||
}
|
||||
|
||||
memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
|
||||
}
|
||||
if (nth >= nchunk0 * nchunk1) {
|
||||
break;
|
||||
}
|
||||
|
||||
current_chunk = atomic_fetch_add_explicit(current_chunk_ctr, 1, memory_order_relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
#undef MMID_MATRIX_ROW
|
||||
}
|
||||
|
||||
// ggml_compute_forward_out_prod
|
||||
|
|
@ -13713,14 +13807,19 @@ struct ggml_cplan ggml_graph_plan(
|
|||
cur = 0;
|
||||
const struct ggml_tensor * src0 = node->src[0];
|
||||
const struct ggml_tensor * src1 = node->src[1];
|
||||
const struct ggml_tensor * ids = node->src[2];
|
||||
const enum ggml_type vec_dot_type = type_traits_cpu[src0->type].vec_dot_type;
|
||||
if (src1->type != vec_dot_type) {
|
||||
cur += ggml_row_size(vec_dot_type, ggml_nelements(src1));
|
||||
}
|
||||
const int n_as = src0->ne[2];
|
||||
cur += GGML_PAD(cur, sizeof(int64_t)); // align
|
||||
cur += n_as * sizeof(int64_t); // matrix_row_counts
|
||||
cur += n_as * src1->ne[2] * sizeof(int64_t); // matrix_rows
|
||||
// src1
|
||||
if (src1->type != vec_dot_type) {
|
||||
cur += ggml_row_size(vec_dot_type, ggml_nelements(src1)) + sizeof(int64_t);
|
||||
}
|
||||
// matrix_row_counts
|
||||
cur += n_as * sizeof(int64_t) + sizeof(int64_t);
|
||||
// matrix_rows
|
||||
cur += n_as*ids->ne[0]*ids->ne[1]*sizeof(struct mmid_row_mapping) + sizeof(int64_t);
|
||||
// atomic_current_chunk
|
||||
cur += CACHE_LINE_SIZE*n_as + CACHE_LINE_SIZE;
|
||||
} break;
|
||||
case GGML_OP_OUT_PROD:
|
||||
{
|
||||
|
|
|
|||
|
|
@ -178,11 +178,11 @@ static ggml_cuda_device_info ggml_cuda_init() {
|
|||
int major_version = 0;
|
||||
size_t version_length = 0;
|
||||
if (rocblas_get_version_string_size(&version_length) == rocblas_status_success) {
|
||||
std::string version(version_length, '\0');
|
||||
std::vector<char> version(version_length+1, '\0');
|
||||
if (rocblas_get_version_string(version.data(), version.size()) == rocblas_status_success) {
|
||||
version.resize(::strlen(version.c_str()));
|
||||
version.resize(::strlen(version.data()));
|
||||
int parsed_value = 0;
|
||||
if (std::from_chars(version.c_str(), version.c_str() + version.length(), parsed_value).ec == std::errc()) {
|
||||
if (std::from_chars(version.data(), version.data() + version.size(), parsed_value).ec == std::errc()) {
|
||||
major_version = parsed_value;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -149,5 +149,5 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) {
|
|||
return !fp16_mma_hardware_available(cc) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE;
|
||||
}
|
||||
|
||||
return (!GGML_CUDA_CC_IS_RDNA3(cc) && !GGML_CUDA_CC_IS_CDNA(cc) && !GGML_CUDA_CC_IS_GCN(cc)) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE;
|
||||
return (!GGML_CUDA_CC_IS_RDNA3(cc) && !GGML_CUDA_CC_IS_CDNA(cc)) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
08b538031f7f944e84f472483ef5d26bf5190ead
|
||||
98a61a0d0b43cba06c3ac1c603813639552a0701
|
||||
|
|
|
|||
Loading…
Reference in New Issue