diff --git a/ggml/src/ggml-cuda/binbcast.cu b/ggml/src/ggml-cuda/binbcast.cu index 99a98fcbfcdb3..1c76566344a88 100644 --- a/ggml/src/ggml-cuda/binbcast.cu +++ b/ggml/src/ggml-cuda/binbcast.cu @@ -57,7 +57,11 @@ static __global__ void k_bin_bcast(const src0_t * src0, const src1_t * src1, dst const int i10 = i0 % ne10; float result = src0_row ? (float) src0_row[i0] : 0.0f; - result = (..., (result = bin_op(result, (float)src1s[i_src1 + i10]))); + if constexpr (sizeof...(src1_ptrs) > 0) { + result = (..., (result = bin_op(result, (float)src1s[i_src1 + i10]))); + } else { + result = bin_op(result, (float)src1[i_src1 + i10]); + } dst_row[i0] = (dst_t) result; } @@ -96,7 +100,11 @@ static __global__ void k_bin_bcast_unravel(const src0_t * src0, const src1_t * const int i10 = i0 % ne10; float result = src0_row ? (float) src0_row[i0] : 0.0f; - result = (..., (result = bin_op(result, (float)src1s[i_src1 + i10]))); + if constexpr (sizeof...(src1_ptrs) > 0) { + result = (..., (result = bin_op(result, (float)src1s[i_src1 + i10]))); + } else { + result = bin_op(result, (float)src1[i_src1 + i10]); + } dst_row[i0] = (dst_t) result; } @@ -231,23 +239,43 @@ static void launch_bin_bcast_pack(const ggml_tensor * src0, const ggml_tensor * if (block_nums.z > 65535) { int block_num = (ne0 * ne1 * ne2 * ne3 + block_size - 1) / block_size; - k_bin_bcast_unravel - <<>>(src0_dd, src1_dd, dst_dd, - ne0, ne1, ne2, ne3, - ne10, ne11, ne12, ne13, - /* s0, */ s1, s2, s3, - /* s00,*/ s01, s02, s03, - /* s10,*/ s11, s12,s13, - (const src1_t *) dst->src[I + 1]->data...); + if constexpr (sizeof...(I) > 0) { + k_bin_bcast_unravel + <<>>(src0_dd, src1_dd, dst_dd, + ne0, ne1, ne2, ne3, + ne10, ne11, ne12, ne13, + /* s0, */ s1, s2, s3, + /* s00,*/ s01, s02, s03, + /* s10,*/ s11, s12,s13, + (const src1_t *) dst->src[I + 1]->data...); + } else { + k_bin_bcast_unravel + <<>>(src0_dd, src1_dd, dst_dd, + ne0, ne1, ne2, ne3, + ne10, ne11, ne12, ne13, + /* s0, */ s1, s2, s3, + /* s00,*/ s01, s02, s03, + /* s10,*/ s11, s12,s13); + } } else { - k_bin_bcast - <<>>(src0_dd, src1_dd, dst_dd, - ne0, ne1, ne2, ne3, - ne10, ne11, ne12, ne13, - /* s0, */ s1, s2, s3, - /* s00,*/ s01, s02, s03, - /* s10,*/ s11, s12,s13, - (const src1_t *) dst->src[I + 1]->data...); + if constexpr (sizeof...(I) > 0) { + k_bin_bcast + <<>>(src0_dd, src1_dd, dst_dd, + ne0, ne1, ne2, ne3, + ne10, ne11, ne12, ne13, + /* s0, */ s1, s2, s3, + /* s00,*/ s01, s02, s03, + /* s10,*/ s11, s12,s13, + (const src1_t *) dst->src[I + 1]->data...); + } else { + k_bin_bcast + <<>>(src0_dd, src1_dd, dst_dd, + ne0, ne1, ne2, ne3, + ne10, ne11, ne12, ne13, + /* s0, */ s1, s2, s3, + /* s00,*/ s01, s02, s03, + /* s10,*/ s11, s12,s13); + } } } } @@ -327,7 +355,7 @@ static void ggml_cuda_op_bin_bcast( } void ggml_cuda_op_repeat(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { - ggml_cuda_op_bin_bcast>(dst, dst->src[0], dst, nullptr, dst->src[0]->data, dst->data, ctx.stream()); + ggml_cuda_op_bin_bcast>(dst, dst->src[0], dst, nullptr, dst->src[0]->data, dst->data, ctx.stream()); } void ggml_cuda_op_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 6a1b0fc936092..e06f95f0819ed 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2827,7 +2827,7 @@ static bool ggml_cuda_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, const ggml_tensor *add = nullptr; if (ops.size() == 3 && ops.begin()[2] == GGML_OP_ADD) { - add = cgraph->nodes[node_idx+1]; + add = cgraph->nodes[node_idx+2]; } GGML_ASSERT(rms_norm->src[0]->type == GGML_TYPE_F32); diff --git a/ggml/src/ggml-cuda/norm.cu b/ggml/src/ggml-cuda/norm.cu index 293f6f68e5e52..d5157d958b717 100644 --- a/ggml/src/ggml-cuda/norm.cu +++ b/ggml/src/ggml-cuda/norm.cu @@ -127,6 +127,7 @@ static __global__ void rms_norm_f32(const float * x, float * dst, const int add_nrows = 0, const int add_nchannels = 0, const int add_nsamples = 0) { + const int nrows = gridDim.x; const int nchannels = gridDim.y; @@ -135,6 +136,8 @@ static __global__ void rms_norm_f32(const float * x, float * dst, const int sample = blockIdx.z; const int tid = threadIdx.x; + static_assert(!do_add || do_multiply, "fusing add is not supported without multiplying"); + x += sample*stride_sample + channel*stride_channel + row*stride_row; dst += ((sample*nchannels + channel)*nrows + row)*ncols; @@ -185,9 +188,6 @@ static __global__ void rms_norm_f32(const float * x, float * dst, } else if constexpr (do_multiply) { const int mul_col = col % mul_ncols; dst[col] = scale * x[col] * mul[mul_col]; - } else if constexpr (do_add) { - const int add_col = col % add_ncols; - dst[col] += add[add_col]; } else { dst[col] = scale * x[col]; }