mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-02-23 22:54:10 +00:00
Fix CPU + CUDA
but CUDA is somehow not 100% correct as I get a slightly different PPL (lower!)
This commit is contained in:
@@ -3173,7 +3173,16 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
|
||||
ggml_cuda_op_relu(ctx, dst);
|
||||
break;
|
||||
case GGML_UNARY_OP_SIGMOID:
|
||||
ggml_cuda_op_sigmoid(ctx, dst);
|
||||
if (i + 4 < cgraph->n_nodes &&
|
||||
cgraph->nodes[i+1]->op == GGML_OP_RESHAPE &&
|
||||
cgraph->nodes[i+2]->op == GGML_OP_ADD &&
|
||||
cgraph->nodes[i+3]->op == GGML_OP_GROUPED_TOPK &&
|
||||
cgraph->nodes[i+4]->op == GGML_OP_GET_ROWS) {
|
||||
cuda_bailingmoev2_experts(ctx, cgraph->nodes[i+4], cgraph->nodes[i+3]);
|
||||
i += 4;
|
||||
} else {
|
||||
ggml_cuda_op_sigmoid(ctx, dst);
|
||||
}
|
||||
break;
|
||||
case GGML_UNARY_OP_HARDSIGMOID:
|
||||
ggml_cuda_op_hardsigmoid(ctx, dst);
|
||||
|
||||
@@ -25,25 +25,8 @@ struct store {
|
||||
constexpr static bool has_thresh = false;
|
||||
};
|
||||
|
||||
template<ggml_sort_order order, typename Store, typename dst_t>
|
||||
static __global__ void k_argsort_f32_T(const float * x, dst_t * dst, const int ncols, int ncols_pad, int ntop, Store s) {
|
||||
// int min_experts, float thresh_experts) {
|
||||
// bitonic sort
|
||||
int col = threadIdx.x;
|
||||
int row = blockIdx.y;
|
||||
|
||||
if (col >= ncols_pad) {
|
||||
return;
|
||||
}
|
||||
|
||||
const float * x_row = x + row * ncols;
|
||||
extern __shared__ int dst_row[];
|
||||
|
||||
// initialize indices
|
||||
dst_row[col] = col;
|
||||
|
||||
__syncthreads();
|
||||
|
||||
template<ggml_sort_order order>
|
||||
static __device__ __forceinline__ void sort(int ncols_pad, int ncols, int col, const float * x_row, int * dst_row) {
|
||||
for (int k = 2; k <= ncols_pad; k *= 2) {
|
||||
for (int j = k / 2; j > 0; j /= 2) {
|
||||
int ixj = col ^ j;
|
||||
@@ -69,6 +52,28 @@ static __global__ void k_argsort_f32_T(const float * x, dst_t * dst, const int n
|
||||
__syncthreads();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<ggml_sort_order order, typename Store, typename dst_t>
|
||||
static __global__ void k_argsort_f32_T(const float * x, dst_t * dst, const int ncols, int ncols_pad, int ntop, Store s) {
|
||||
// int min_experts, float thresh_experts) {
|
||||
// bitonic sort
|
||||
int col = threadIdx.x;
|
||||
int row = blockIdx.y;
|
||||
|
||||
if (col >= ncols_pad) {
|
||||
return;
|
||||
}
|
||||
|
||||
const float * x_row = x + row * ncols;
|
||||
extern __shared__ int dst_row[];
|
||||
|
||||
// initialize indices
|
||||
dst_row[col] = col;
|
||||
|
||||
__syncthreads();
|
||||
|
||||
sort<order>(ncols_pad, ncols, col, x_row, dst_row);
|
||||
|
||||
if constexpr (Store::has_thresh) {
|
||||
__syncthreads();
|
||||
@@ -92,7 +97,8 @@ static __global__ void k_argsort_f32_T(const float * x, dst_t * dst, const int n
|
||||
}
|
||||
|
||||
template<ggml_sort_order order>
|
||||
static __global__ void k_topk_sum(const float * x, float * dst, const int ncols, int ncols_pad, int n_top_k) {
|
||||
static __global__ void k_argsort_f32_f32_i32(const float * x_biased, const float * x, float * weights, int * ids, const int ncols, int ncols_pad, int ntop,
|
||||
size_t nb_ids) {
|
||||
// bitonic sort
|
||||
int col = threadIdx.x;
|
||||
int row = blockIdx.y;
|
||||
@@ -101,7 +107,7 @@ static __global__ void k_topk_sum(const float * x, float * dst, const int ncols,
|
||||
return;
|
||||
}
|
||||
|
||||
const float * x_row = x + row * ncols;
|
||||
const float * x_row = x_biased + row * ncols;
|
||||
extern __shared__ int dst_row[];
|
||||
|
||||
// initialize indices
|
||||
@@ -109,31 +115,42 @@ static __global__ void k_topk_sum(const float * x, float * dst, const int ncols,
|
||||
|
||||
__syncthreads();
|
||||
|
||||
for (int k = 2; k <= ncols_pad; k *= 2) {
|
||||
for (int j = k / 2; j > 0; j /= 2) {
|
||||
int ixj = col ^ j;
|
||||
if (ixj > col) {
|
||||
if ((col & k) == 0) {
|
||||
if (dst_row[col] >= ncols ||
|
||||
(dst_row[ixj] < ncols && (order == GGML_SORT_ORDER_ASC ?
|
||||
x_row[dst_row[col]] > x_row[dst_row[ixj]] :
|
||||
x_row[dst_row[col]] < x_row[dst_row[ixj]]))
|
||||
) {
|
||||
ggml_cuda_swap(dst_row[col], dst_row[ixj]);
|
||||
}
|
||||
} else {
|
||||
if (dst_row[ixj] >= ncols ||
|
||||
(dst_row[col] < ncols && (order == GGML_SORT_ORDER_ASC ?
|
||||
x_row[dst_row[col]] < x_row[dst_row[ixj]] :
|
||||
x_row[dst_row[col]] > x_row[dst_row[ixj]]))
|
||||
) {
|
||||
ggml_cuda_swap(dst_row[col], dst_row[ixj]);
|
||||
}
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
sort<order>(ncols_pad, ncols, col, x_row, dst_row);
|
||||
|
||||
if (col < ntop) {
|
||||
weights[row * ntop + col] = x[row * ncols + dst_row[col]];
|
||||
auto row_ids = (int *)((char *)ids + row*nb_ids);
|
||||
row_ids[col] = dst_row[col];
|
||||
}
|
||||
}
|
||||
|
||||
template<ggml_sort_order order>
|
||||
static __global__ void k_topk_sum(float * x, const float * bias, float * x_p, float * dst, const int ncols, int ncols_pad, int n_top_k) {
|
||||
// bitonic sort
|
||||
int col = threadIdx.x;
|
||||
int row = blockIdx.y;
|
||||
|
||||
if (col >= ncols_pad) {
|
||||
return;
|
||||
}
|
||||
|
||||
float * x_row = x + row * ncols;
|
||||
extern __shared__ int dst_row[];
|
||||
|
||||
// initialize indices
|
||||
dst_row[col] = col;
|
||||
if (bias && x_p) {
|
||||
float * x_p_row = x_p + row * ncols;
|
||||
if (col < ncols) {
|
||||
x_row[col] = 1/(1 + expf(-x_row[col]));
|
||||
x_p_row[col] = x_row[col] + bias[col];
|
||||
}
|
||||
x_row = x_p_row;
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
|
||||
sort<order>(ncols_pad, ncols, col, x_row, dst_row);
|
||||
|
||||
float val = col < n_top_k ? x_row[dst_row[col]] : 0;
|
||||
val = warp_reduce_sum(val);
|
||||
@@ -208,6 +225,29 @@ static void argsort_f32_T_cuda(const float * x, dst_t * dst, const int ncols, co
|
||||
}
|
||||
}
|
||||
|
||||
static void argsort_f32_f32_i32_cuda(const float * x_biased, const float * x, float * weights, int * ids, const int ncols, const int nrows, int ntop,
|
||||
size_t nb_ids, ggml_sort_order order, cudaStream_t stream) {
|
||||
// bitonic sort requires ncols to be power of 2
|
||||
const int ncols_pad = next_power_of_2(ncols);
|
||||
|
||||
const dim3 block_dims(ncols_pad, 1, 1);
|
||||
const dim3 block_nums(1, nrows, 1);
|
||||
const size_t shared_mem = ncols_pad * sizeof(int);
|
||||
|
||||
// FIXME: this limit could be raised by ~2-4x on Ampere or newer
|
||||
GGML_ASSERT(shared_mem <= ggml_cuda_info().devices[ggml_cuda_get_device()].smpb);
|
||||
|
||||
if (order == GGML_SORT_ORDER_ASC) {
|
||||
k_argsort_f32_f32_i32<GGML_SORT_ORDER_ASC><<<block_nums, block_dims, shared_mem, stream>>>(x_biased, x, weights, ids,
|
||||
ncols, ncols_pad, ntop, nb_ids);
|
||||
} else if (order == GGML_SORT_ORDER_DESC) {
|
||||
k_argsort_f32_f32_i32<GGML_SORT_ORDER_DESC><<<block_nums, block_dims, shared_mem, stream>>>(x_biased, x, weights, ids,
|
||||
ncols, ncols_pad, ntop, nb_ids);
|
||||
} else {
|
||||
GGML_ABORT("fatal error");
|
||||
}
|
||||
}
|
||||
|
||||
void ggml_cuda_op_argsort(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||
const ggml_tensor * src0 = dst->src[0];
|
||||
const float * src0_d = (const float *)src0->data;
|
||||
@@ -246,7 +286,8 @@ void ggml_cuda_op_argsort_thresh(ggml_backend_cuda_context & ctx, ggml_tensor *
|
||||
argsort_f32_T_cuda(src0_d, (int *)dst_d, ncols, nrows, ncols, GGML_SORT_ORDER_DESC, min_experts, thresh, stream);
|
||||
}
|
||||
|
||||
static void ggml_cuda_op_topk_sum(ggml_backend_cuda_context & ctx, const float * src, float * dst, int ncols, int nrows, int n_top_k) {
|
||||
static void ggml_cuda_op_topk_sum(ggml_backend_cuda_context & ctx, float * src, const float * bias, float * src_p, float * dst,
|
||||
int ncols, int nrows, int n_top_k) {
|
||||
|
||||
GGML_ASSERT(n_top_k <= ncols);
|
||||
|
||||
@@ -257,7 +298,7 @@ static void ggml_cuda_op_topk_sum(ggml_backend_cuda_context & ctx, const float *
|
||||
const size_t shared_mem = std::max(ncols_pad, WARP_SIZE) * sizeof(int);
|
||||
GGML_ASSERT(shared_mem <= ggml_cuda_info().devices[ggml_cuda_get_device()].smpb);
|
||||
|
||||
k_topk_sum<GGML_SORT_ORDER_DESC><<<block_nums, block_dims, shared_mem, ctx.stream()>>>(src, dst, ncols, ncols_pad, n_top_k);
|
||||
k_topk_sum<GGML_SORT_ORDER_DESC><<<block_nums, block_dims, shared_mem, ctx.stream()>>>(src, bias, src_p, dst, ncols, ncols_pad, n_top_k);
|
||||
}
|
||||
|
||||
void ggml_cuda_op_grouped_topk(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||
@@ -291,7 +332,7 @@ void ggml_cuda_op_grouped_topk(ggml_backend_cuda_context & ctx, ggml_tensor * ds
|
||||
CUDA_CHECK(cudaGetLastError());
|
||||
#else
|
||||
ggml_cuda_pool_alloc<float> group_scores(ctx.pool(), nrows*n_groups);
|
||||
ggml_cuda_op_topk_sum(ctx, (const float *)src->data, group_scores.get(), n_per_group, nrows*n_groups, nk);
|
||||
ggml_cuda_op_topk_sum(ctx, (float *)src->data, nullptr, nullptr, group_scores.get(), n_per_group, nrows*n_groups, nk);
|
||||
CUDA_CHECK(cudaGetLastError());
|
||||
#endif
|
||||
|
||||
@@ -310,3 +351,49 @@ void ggml_cuda_op_grouped_topk(ggml_backend_cuda_context & ctx, ggml_tensor * ds
|
||||
argsort_f32_T_cuda((const float *)src->data, (int *)dst->data, ne00, nrows, ne0, GGML_SORT_ORDER_DESC, -1, 0.0f, ctx.stream());
|
||||
|
||||
}
|
||||
|
||||
void cuda_bailingmoev2_experts(ggml_backend_cuda_context & ctx, ggml_tensor * dst, ggml_tensor * topk) {
|
||||
auto topk_src = topk->src[0];
|
||||
auto probs = topk_src->src[0]->src[0];
|
||||
auto bias = topk_src->src[1];
|
||||
|
||||
auto nrows = ggml_nrows(probs);
|
||||
|
||||
int n_groups = topk->op_params[0];
|
||||
int n_top_groups = topk->op_params[1];
|
||||
int nk = topk->op_params[2];
|
||||
|
||||
int ne00 = probs->ne[0];
|
||||
int ne0 = topk->ne[0];
|
||||
GGML_ASSERT(ggml_is_contiguous(probs));
|
||||
GGML_ASSERT(bias->ne[1] == 1);
|
||||
GGML_ASSERT(bias->ne[0] == probs->ne[0]);
|
||||
GGML_ASSERT(ne0 == dst->ne[1]);
|
||||
GGML_ASSERT(ne0 <= ne00);
|
||||
GGML_ASSERT(ne00%n_groups == 0);
|
||||
int n_per_group = ne00/n_groups;
|
||||
GGML_ASSERT(nk <= n_per_group);
|
||||
GGML_ASSERT(n_top_groups <= n_groups);
|
||||
int n_discarded_groups = n_groups - n_top_groups;
|
||||
|
||||
ggml_cuda_pool_alloc<float> group_scores(ctx.pool(), nrows*n_groups);
|
||||
ggml_cuda_op_topk_sum(ctx, (float *)probs->data, (const float *)bias->data, (float *)topk_src->data, group_scores.get(),
|
||||
n_per_group, nrows*n_groups, nk);
|
||||
CUDA_CHECK(cudaGetLastError());
|
||||
|
||||
ggml_cuda_pool_alloc<int> discarded_groups(ctx.pool(), nrows*n_discarded_groups);
|
||||
argsort_f32_T_cuda(group_scores.get(), discarded_groups.get(), n_groups, nrows, n_discarded_groups, GGML_SORT_ORDER_ASC, -1, 0.0f, ctx.stream());
|
||||
CUDA_CHECK(cudaGetLastError());
|
||||
|
||||
{
|
||||
const dim3 block_dims(WARP_SIZE, 1, 1);
|
||||
const dim3 block_nums(1, nrows, 1);
|
||||
cudaStream_t stream = ctx.stream();
|
||||
k_apply_mask<<<block_nums, block_dims, 0, ctx.stream()>>>((float *)topk_src->data, discarded_groups.get(), n_discarded_groups, n_per_group, ne00);
|
||||
CUDA_CHECK(cudaGetLastError());
|
||||
}
|
||||
|
||||
argsort_f32_f32_i32_cuda((const float *)topk_src->data, (const float *)probs->data, (float *)dst->data, (int *)topk->data, ne00, nrows, ne0,
|
||||
topk->nb[1], GGML_SORT_ORDER_DESC, ctx.stream());
|
||||
|
||||
}
|
||||
|
||||
@@ -11,3 +11,5 @@ void ggml_cuda_op_argsort(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||
void ggml_cuda_op_argsort_thresh(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||
|
||||
void ggml_cuda_op_grouped_topk(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||
|
||||
void cuda_bailingmoev2_experts(ggml_backend_cuda_context & ctx, ggml_tensor * dst, ggml_tensor * topk);
|
||||
|
||||
@@ -41,47 +41,58 @@ inline std::vector<std::pair<float,int>> & get_work_buffer(size_t size) {
|
||||
|
||||
}
|
||||
#ifdef __ARM_NEON
|
||||
inline float32x4_t v_biased_sigmoid(float32x4_t x, float32x4_t b) {
|
||||
inline float32x4_t v_sigmoid(float32x4_t x) {
|
||||
const float32x4_t one = vdupq_n_f32(1.0f);
|
||||
const float32x4_t zero = vdupq_n_f32(0.0f);
|
||||
const float32x4_t neg_x = vsubq_f32(zero, x);
|
||||
const float32x4_t exp_neg_x = v_expf(neg_x);
|
||||
const float32x4_t one_plus_exp_neg_x = vaddq_f32(one, exp_neg_x);
|
||||
return vaddq_f32(b, vdivq_f32(one, one_plus_exp_neg_x));
|
||||
return vdivq_f32(one, one_plus_exp_neg_x);
|
||||
}
|
||||
#endif
|
||||
#ifdef __AVX2__
|
||||
inline __m256 v_biased_sigmoid(__m256 x, __m256 b) {
|
||||
inline __m256 v_sigmoid(__m256 x) {
|
||||
const __m256 one = _mm256_set1_ps(1);
|
||||
const __m256 zero = _mm256_setzero_ps();
|
||||
const __m256 neg_x = _mm256_sub_ps(zero, x);
|
||||
const __m256 exp_neg_x = v_expf(neg_x);
|
||||
const __m256 one_plus_exp_neg_x = _mm256_add_ps(one, exp_neg_x);
|
||||
return _mm256_add_ps(b, _mm256_div_ps(one, one_plus_exp_neg_x));
|
||||
return _mm256_div_ps(one, one_plus_exp_neg_x);
|
||||
}
|
||||
#endif
|
||||
#if defined __AVX512F__ && defined __AVX512DQ__
|
||||
inline __m512 v_biased_sigmoid(__m512 x, __m512 b) {
|
||||
inline __m512 v_sigmoid(__m512 x) {
|
||||
const __m512 one = _mm512_set1_ps(1);
|
||||
const __m512 zero = _mm512_setzero_ps();
|
||||
const __m512 neg_x = _mm512_sub_ps(zero, x);
|
||||
const __m512 exp_neg_x = v_expf(neg_x);
|
||||
const __m512 one_plus_exp_neg_x = _mm512_add_ps(one, exp_neg_x);
|
||||
return _mm512_add_ps(b, _mm512_div_ps(one, one_plus_exp_neg_x));
|
||||
return _mm512_div_ps(one, one_plus_exp_neg_x);
|
||||
}
|
||||
#endif
|
||||
inline void biased_sigmoid(int n, const float * x, const float * bias, float * y) {
|
||||
inline void biased_sigmoid(int n, const float * x, const float * bias, float * y, float * z) {
|
||||
int i = 0;
|
||||
#if defined __AVX512F__ && defined __AVX512DQ__
|
||||
for (; i + 15 < n; i += 16) _mm512_storeu_ps(y + i, v_biased_sigmoid(_mm512_loadu_ps(x + i), _mm512_loadu_ps(bias + i)));
|
||||
for (; i + 15 < n; i += 16) {
|
||||
auto v = v_sigmoid(_mm512_loadu_ps(x + i));
|
||||
_mm512_storeu_ps(y + i, _mm512_add_ps(v, _mm512_loadu_ps(bias + i)));
|
||||
_mm512_storeu_ps(z + i, v);
|
||||
}
|
||||
#endif
|
||||
#if defined __AVX2__ && defined __FMA__
|
||||
for (; i + 7 < n; i += 8) _mm256_storeu_ps(y + i, v_biased_sigmoid(_mm256_loadu_ps(x + i), _mm256_loadu_ps(bias + i)));
|
||||
for (; i + 7 < n; i += 8) {
|
||||
auto v = v_sigmoid(_mm256_loadu_ps(x + i));
|
||||
_mm256_storeu_ps(y + i, _mm256_add_ps(v, _mm256_loadu_ps(bias + i)));
|
||||
_mm256_storeu_ps(z + i, v);
|
||||
}
|
||||
#endif
|
||||
#ifdef __ARM_NEON
|
||||
for (; i + 3 < n; i += 4) vst1q_f32(y + i, v_biased_sigmoid(vld1q_f32(x + i), vld1q_f32(bias + i)));
|
||||
#endif
|
||||
for (; i < n; ++i) y[i] = 1/(1 + expf(-x[i])) + bias[i];
|
||||
for (; i < n; ++i) {
|
||||
z[i] = 1/(1 + expf(-x[i]));
|
||||
y[i] = y[i] + bias[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -214,17 +225,18 @@ void iqk_bailingmoev2_experts(struct ggml_tensor * dst, struct ggml_tensor * top
|
||||
GGML_ASSERT(nk <= n_per_group);
|
||||
GGML_ASSERT(n_top_groups <= n_groups);
|
||||
|
||||
size_t work_size = n_groups + n_per_group*n_top_groups + (ne00 + 1)/2;
|
||||
size_t work_size = n_groups + n_per_group*n_top_groups + ne00;
|
||||
auto& aux = get_work_buffer(work_size);
|
||||
|
||||
auto groups = aux.data() + n_per_group*n_top_groups;
|
||||
auto values = (float *)(groups + n_groups);
|
||||
auto biased_values = (float *)(groups + n_groups);
|
||||
auto values = biased_values + ne00;
|
||||
|
||||
auto bias = (const float *)t_bias->data;
|
||||
|
||||
for (int ir = first; ir < last; ++ir) {
|
||||
auto data = (const float *)((const char *)probs->data + ir*probs->nb[1]);
|
||||
biased_sigmoid(ne00, data, bias, values);
|
||||
biased_sigmoid(ne00, data, bias, biased_values, values);
|
||||
//for (int j = 0; j < ne00; ++j) values[j] = 1/(1 + expf(-data[j])) + bias[j];
|
||||
auto weights = (float *)((char *)dst->data + ir*dst->nb[2]);
|
||||
auto ids = (int32_t *)((char *)topk->data + ir*topk->nb[1]);
|
||||
@@ -237,21 +249,21 @@ void iqk_bailingmoev2_experts(struct ggml_tensor * dst, struct ggml_tensor * top
|
||||
}
|
||||
if (n_top_groups < n_groups) {
|
||||
for (int ig = 0; ig < n_groups; ++ig) {
|
||||
groups[ig] = { group_score(n_per_group, nk, values + ig*n_per_group, (float *)aux.data()), ig };
|
||||
groups[ig] = { group_score(n_per_group, nk, biased_values + ig*n_per_group, (float *)aux.data()), ig };
|
||||
}
|
||||
std::partial_sort(groups, groups + n_top_groups, groups + n_groups, std::greater<std::pair<float,int>>{});
|
||||
|
||||
for (int ig = 0; ig < n_top_groups; ++ig) {
|
||||
int i0 = n_per_group * ig;
|
||||
int j0 = n_per_group * groups[ig].second;
|
||||
for (int j = 0; j < n_per_group; ++j) aux[i0 + j] = { values[j0 + j], j0 + j };
|
||||
for (int j = 0; j < n_per_group; ++j) aux[i0 + j] = { biased_values[j0 + j], j0 + j };
|
||||
}
|
||||
} else {
|
||||
for (int j = 0; j < ne00; ++j) aux[j] = { values[j], j };
|
||||
for (int j = 0; j < ne00; ++j) aux[j] = { biased_values[j], j };
|
||||
}
|
||||
std::partial_sort(aux.begin(), aux.begin() + ne0, aux.begin() + n_top_groups*n_per_group, std::greater<std::pair<float,int>>{});
|
||||
for (int j = 0; j < ne0; ++j) {
|
||||
weights[j] = aux[j].first;
|
||||
weights[j] = values[aux[j].second];
|
||||
ids[j] = aux[j].second;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user