diff --git a/ggml/src/ggml-cpu/repack.cpp b/ggml/src/ggml-cpu/repack.cpp index 4bc5315220..615299774a 100644 --- a/ggml/src/ggml-cpu/repack.cpp +++ b/ggml/src/ggml-cpu/repack.cpp @@ -660,12 +660,12 @@ void ggml_gemv_q6_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, int offset = ((k / 2) % 2) + j * 2; for (int i = 0; i < blocklen; ++i) { const int hbits_index = k * ncols_interleaved * blocklen + j * blocklen + i; - const int lbits_index = (hbits_index / 32) * 64 + (hbits_index % 32); + const int lbits_index = hbits_index + (k/4) * 256; - int8_t v0 = (int8_t)((b_ptr[l].qh[hbits_index] & 3) << 4) | (b_ptr[l].ql[lbits_index] & 0xF) - 32; - int8_t v1 = (int8_t)(((b_ptr[l].qh[hbits_index] >> 2 ) & 3) << 4) | (b_ptr[l].ql[lbits_index + 32] & 0xF) - 32; - int8_t v2 = (int8_t)(((b_ptr[l].qh[hbits_index] >> 4 ) & 3) << 4) | ((b_ptr[l].ql[lbits_index] >> 4) & 0xF) - 32; - int8_t v3 = (int8_t)(((b_ptr[l].qh[hbits_index] >> 6 ) & 3) << 4) | ((b_ptr[l].ql[lbits_index + 32] >> 4) & 0xF) - 32; + int8_t v0 = (int8_t)(((b_ptr[l].qh[hbits_index] & 3) << 4) | (b_ptr[l].ql[lbits_index] & 0xF)) - 32; + int8_t v1 = (int8_t)((((b_ptr[l].qh[hbits_index] >> 2 ) & 3) << 4) | (b_ptr[l].ql[lbits_index + 256] & 0xF)) - 32; + int8_t v2 = (int8_t)((((b_ptr[l].qh[hbits_index] >> 4 ) & 3) << 4) | ((b_ptr[l].ql[lbits_index] >> 4) & 0xF)) - 32; + int8_t v3 = (int8_t)((((b_ptr[l].qh[hbits_index] >> 6 ) & 3) << 4) | ((b_ptr[l].ql[lbits_index + 256] >> 4) & 0xF)) - 32; sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 128 + (k % 4) * blocklen + i]); sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 128 + (k % 4) * blocklen + i + 32]); @@ -684,6 +684,7 @@ void ggml_gemv_q6_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, } for (int j = 0; j < ncols_interleaved; j++) { s[x * ncols_interleaved + j] = sumf[j]; + } } } @@ -1240,21 +1241,21 @@ void ggml_gemm_q6_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, int offset = ((k / 2) % 2) + j * 2; for (int i = 0; i < blocklen; ++i){ const int hbits_index = k * ncols_interleaved * blocklen + j * blocklen + i; - const int lbits_index = (hbits_index / 32) * 64 + (hbits_index % 32); + const int lbits_index = hbits_index + (k/4) * 256; - int8_t v0 = (int8_t)((b_ptr[l].qh[hbits_index] & 3) << 4) | (b_ptr[l].ql[lbits_index] & 0xF) - 32; - int8_t v1 = (int8_t)(((b_ptr[l].qh[hbits_index] >> 2 ) & 3) << 4) | (b_ptr[l].ql[lbits_index + 32] & 0xF) - 32; - int8_t v2 = (int8_t)(((b_ptr[l].qh[hbits_index] >> 4 ) & 3) << 4) | ((b_ptr[l].ql[lbits_index] >> 4) & 0xF) - 32; - int8_t v3 = (int8_t)(((b_ptr[l].qh[hbits_index] >> 6 ) & 3) << 4) | ((b_ptr[l].ql[lbits_index + 32] >> 4) & 0xF) - 32; + int8_t v0 = (int8_t)(((b_ptr[l].qh[hbits_index] & 3) << 4) | (b_ptr[l].ql[lbits_index] & 0xF)) - 32; + int8_t v1 = (int8_t)((((b_ptr[l].qh[hbits_index] >> 2 ) & 3) << 4) | (b_ptr[l].ql[lbits_index + 256] & 0xF)) - 32; + int8_t v2 = (int8_t)((((b_ptr[l].qh[hbits_index] >> 4 ) & 3) << 4) | ((b_ptr[l].ql[lbits_index] >> 4) & 0xF)) - 32; + int8_t v3 = (int8_t)((((b_ptr[l].qh[hbits_index] >> 6 ) & 3) << 4) | ((b_ptr[l].ql[lbits_index + 256] >> 4) & 0xF)) - 32; sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 512 + (k % 4) * 4 * blocklen + m * blocklen + i]); sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 512 + (k % 4) * 4 * blocklen + m * blocklen + i + 128]); sumi3 = (v2 * a_ptr[l].qs[(k >> 2) * 512 + (k % 4) * 4 * blocklen + m * blocklen + i + 256]); sumi4 = (v3 * a_ptr[l].qs[(k >> 2) * 512 + (k % 4) * 4 * blocklen + m * blocklen + i + 384]); - sumi1 = sumi1 * (scales_0[offset] & 0xF); - sumi2 = sumi2 * (scales_1[offset] & 0xF); - sumi3 = sumi3 * (scales_2[offset] & 0xF); - sumi4 = sumi4 * (scales_3[offset] & 0xF); + sumi1 = sumi1 * (scales_0[offset]); + sumi2 = sumi2 * (scales_1[offset]); + sumi3 = sumi3 * (scales_2[offset]); + sumi4 = sumi4 * (scales_3[offset]); sumi += sumi1 + sumi2 + sumi3 + sumi4; } sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d[m];