mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-04-29 10:51:51 +00:00
Merge mainline llama.cpp (#3)
* Merging mainline - WIP * Merging mainline - WIP AVX2 and CUDA appear to work. CUDA performance seems slightly (~1-2%) lower as it is so often the case with llama.cpp/ggml after some "improvements" have been made. * Merging mainline - fix Metal * Remove check --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
@@ -92,6 +92,11 @@ static bool gguf_ex_read_0(const std::string & fname) {
|
||||
|
||||
struct gguf_context * ctx = gguf_init_from_file(fname.c_str(), params);
|
||||
|
||||
if (!ctx) {
|
||||
fprintf(stderr, "%s: failed to load '%s'\n", __func__, fname.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
printf("%s: version: %d\n", __func__, gguf_get_version(ctx));
|
||||
printf("%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx));
|
||||
printf("%s: data offset: %zu\n", __func__, gguf_get_data_offset(ctx));
|
||||
|
||||
Reference in New Issue
Block a user