diff --git a/examples/metal/metal.cpp b/examples/metal/metal.cpp index 77aca94a3ec97..cdfe4bfe97865 100644 --- a/examples/metal/metal.cpp +++ b/examples/metal/metal.cpp @@ -40,8 +40,10 @@ int main(int argc, char ** argv) { // this allocates all Metal resources and memory buffers auto * ctx_metal = ggml_metal_init(); - ggml_metal_add_buffer(ctx_metal, "data", ggml_get_mem_buffer(ctx_data), ggml_get_mem_size(ctx_data)); - ggml_metal_add_buffer(ctx_metal, "eval", ggml_get_mem_buffer(ctx_eval), ggml_get_mem_size(ctx_eval)); + const size_t max_size_data = ggml_get_max_tensor_size(ctx_data); + const size_t max_size_eval = ggml_get_max_tensor_size(ctx_eval); + ggml_metal_add_buffer(ctx_metal, "data", ggml_get_mem_buffer(ctx_data), ggml_get_mem_size(ctx_data), max_size_data); + ggml_metal_add_buffer(ctx_metal, "eval", ggml_get_mem_buffer(ctx_eval), ggml_get_mem_size(ctx_eval), max_size_eval); // main { diff --git a/llama.cpp b/llama.cpp index c165d3239e63f..dfbb85a682baa 100644 --- a/llama.cpp +++ b/llama.cpp @@ -19,6 +19,11 @@ #ifdef GGML_USE_METAL #include "ggml-metal.h" #endif +#ifdef GGML_USE_K_QUANTS +#ifndef QK_K +#define QK_K 256 +#endif +#endif #include #include @@ -2491,6 +2496,17 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } else { new_type = quantized_type; #ifdef GGML_USE_K_QUANTS + if (quantized_type == GGML_TYPE_Q2_K || quantized_type == GGML_TYPE_Q3_K || quantized_type == GGML_TYPE_Q4_K || + quantized_type == GGML_TYPE_Q5_K || quantized_type == GGML_TYPE_Q6_K) { + int nx = tensor.ne.at(0); + int ny = tensor.ne.at(0); + if (nx % QK_K != 0 || ny % QK_K != 0) { + fprintf(stderr, "\n\n========================= Tensor sizes %d x %d are not divisible by %d\n",nx,ny,QK_K); + fprintf(stderr, "This is required to be able to use k-quants for now!\n"); + fprintf(stderr, "========================================================================================\n\n"); + throw std::runtime_error("Unsupported tensor size encountered\n"); + } + } if (tensor.name == "output.weight") { new_type = GGML_TYPE_Q6_K; } else if (tensor.name.find("attention.wv.weight") != std::string::npos) {