diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu index 5b6e6226..9051863b 100644 --- a/ggml/src/ggml-cuda.cu +++ b/ggml/src/ggml-cuda.cu @@ -1536,7 +1536,7 @@ static void ggml_cuda_op_mul_mat( const size_t nbytes_data = ggml_nbytes(src0); const size_t nbytes_padding = ggml_row_size(src0->type, MATRIX_ROW_PADDING - ne00 % MATRIX_ROW_PADDING); dev[id].src0_dd = dev[id].src0_dd_alloc.alloc(ctx.pool(id), nbytes_data + nbytes_padding); - CUDA_CHECK(cudaMemsetAsync(dev[id].src0_dd + nbytes_data , 0, nbytes_padding, stream)); + CUDA_CHECK(cudaMemsetAsync(dev[id].src0_dd, 0, nbytes_data + nbytes_padding, stream)); } // If src0 is on a temporary compute buffer (partial offloading) there may be some padding that needs to be cleared: diff --git a/ggml/src/ggml-cuda/quantize.cu b/ggml/src/ggml-cuda/quantize.cu index 45408ce8..65c7e5f1 100644 --- a/ggml/src/ggml-cuda/quantize.cu +++ b/ggml/src/ggml-cuda/quantize.cu @@ -84,7 +84,8 @@ static __global__ void quantize_mmq_q8_1( } } - const float d_inv = 127.0f / amax; + const float d = amax/127.f; + const float d_inv = d > 0 ? 1/d : 0.f; char4 q; q.x = roundf(xi.x*d_inv); q.y = roundf(xi.y*d_inv); @@ -106,8 +107,6 @@ static __global__ void quantize_mmq_q8_1( return; } - const float d = 1.0f / d_inv; - y[ib].d2s6[iqs/64] = d; return; @@ -117,8 +116,6 @@ static __global__ void quantize_mmq_q8_1( return; } - const float d = 1.0f / d_inv; - if (ds_layout == MMQ_Q8_1_DS_LAYOUT_DS4) { y[ib].ds4[iqs/32] = make_half2(d, sum); } else {