mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-26 17:20:01 +00:00
Enable CUDA graphs for MoE models + GPT-OSS support (#689)
* gmp-oss: common * gpt-oss: attnetion sinks, swiglu_oai * gpt-oss: WIP llama Model loads and runs (CPU only), but PPL is much to high (~1500 for 1st batch vs ~200 in mainline). Is it because of SWA, because of vocab, or did I introduce a bug somewhere? * gpt-oss: CPU seems to be working It was the SWA thta was missing in the previous commit. There are issues with EOG tokens, so this still needs to be added. * CUDA: ADD_ID Just a copy from mainline * gpt-oss: Seems to be working on CUDA * gpt-oss: add sinks to the attn-vec kernels * CUDA: add head size of 64 to new mma Haven't turned it on yet, but observe slightly better PP and slightly worse TG performance with that. * gpt-oss: add ability to use -fmoe (only CUDA for now) * Move row sums to the write place * Add sinks to iqk flash attention * gpt_oss: Implement -fmoe on the CPU * Simdify swiglu_oai Turning it off for now as performance becomes more variable, so perhaps I'm running into thermal trottling imore often because of making the CPU work too hard. * llama: factor out model loader * Builds successfully * It runs, but mmap does not work * Fix llama_mmap so mmap works * Minor * Fix CUDA after latest changes * Attempt to use CUDA graphs with MoE models - not working * CUDA graphs WIP - still not working * CUDA graphs - seems to be working Likely not all MLA variants are working. I no longer remember why I added the q8_0 cpy that transposes the tensor, but if really needed, this is now missing. Also missing is q6_0. * Make q8_0 cache work for DeepSeek models with CUDA graphs * cuda: cpy for q6_0 * Fix llama_mmap on non-Linux platforms * Adding forgotten file * Iterating on Windows build failures * cuda: re-add q8_0 -> q8_0 transpose so mla = 2 can be used with CUDA graphs and q8_0 cache. * Disable graphs without -fmoe * Minor * Turn graphs on by default --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
@@ -24,9 +24,9 @@ class common_chat_msg_parser {
|
||||
std::string prelude;
|
||||
std::vector<common_string_range> groups;
|
||||
};
|
||||
|
||||
|
||||
common_chat_msg_parser(const std::string & input, bool is_partial, const common_chat_syntax & syntax);
|
||||
|
||||
|
||||
// Accessors
|
||||
const std::string & input() const { return input_; }
|
||||
size_t pos() const { return pos_; }
|
||||
@@ -42,7 +42,7 @@ class common_chat_msg_parser {
|
||||
}
|
||||
pos_ = pos;
|
||||
}
|
||||
|
||||
|
||||
void move_back(size_t n) {
|
||||
if (pos_ < n) {
|
||||
throw std::runtime_error("Can't move back that far!");
|
||||
@@ -56,46 +56,46 @@ class common_chat_msg_parser {
|
||||
// Content manipulation
|
||||
void add_content(const std::string & content);
|
||||
void add_reasoning_content(const std::string & reasoning_content);
|
||||
|
||||
|
||||
// Tool call manipulation
|
||||
void add_tool_call(const common_chat_tool_call & tool_call);
|
||||
bool add_tool_call(const std::string & name, const std::string & id, const std::string & arguments);
|
||||
bool add_tool_call(const json & tool_call);
|
||||
bool add_tool_calls(const json & arr);
|
||||
void clear_tools();
|
||||
|
||||
|
||||
// Parsing utilities
|
||||
std::string consume_rest();
|
||||
bool try_consume_literal(const std::string & literal);
|
||||
void consume_literal(const std::string & literal);
|
||||
bool try_parse_reasoning(const std::string & start_think, const std::string & end_think);
|
||||
|
||||
|
||||
// Regex-based parsing methods (new)
|
||||
std::optional<find_regex_result> try_find_regex(const common_regex & regex, size_t from = std::string::npos, bool add_prelude_to_content = true);
|
||||
find_regex_result consume_regex(const common_regex & regex);
|
||||
std::optional<find_regex_result> try_consume_regex(const common_regex & regex);
|
||||
|
||||
|
||||
// Progressive parsing primitives (for Phase 4)
|
||||
std::optional<find_regex_result> try_find_literal(const std::string & literal);
|
||||
bool consume_spaces();
|
||||
void set_healing_marker(const std::string & marker);
|
||||
|
||||
|
||||
|
||||
|
||||
// Main parsing entry point
|
||||
void parse();
|
||||
|
||||
|
||||
// Finishing
|
||||
void finish();
|
||||
|
||||
|
||||
// Result extraction
|
||||
common_chat_msg result_and_reset();
|
||||
|
||||
|
||||
// Advanced JSON parsing (following original llama.cpp patterns)
|
||||
struct consume_json_result {
|
||||
json value;
|
||||
bool is_partial;
|
||||
};
|
||||
|
||||
|
||||
std::optional<common_json> try_consume_json();
|
||||
common_json consume_json();
|
||||
consume_json_result consume_json_with_dumped_args(
|
||||
@@ -112,8 +112,8 @@ private:
|
||||
void parse_kimi_k2_format();
|
||||
void parse_deepseek_r1_format();
|
||||
void parse_generic_format();
|
||||
|
||||
|
||||
|
||||
|
||||
// JSON parsing utilities (enhanced streaming support)
|
||||
struct json_parse_result {
|
||||
json value;
|
||||
@@ -121,11 +121,11 @@ private:
|
||||
bool is_partial;
|
||||
std::string healing_marker;
|
||||
};
|
||||
|
||||
|
||||
// Partial detection utilities
|
||||
bool detect_partial_function_call(const std::string& content);
|
||||
void handle_partial_detection();
|
||||
|
||||
|
||||
// Legacy find_literal for compatibility
|
||||
std::optional<find_regex_result> try_find_literal_legacy(const std::string & literal);
|
||||
};
|
||||
@@ -133,4 +133,4 @@ private:
|
||||
// Main parsing function (public API)
|
||||
common_chat_msg common_chat_parse(const std::string & input, bool is_partial, const common_chat_syntax & syntax);
|
||||
|
||||
// Content-only parsing for fallback scenarios (static internal function)
|
||||
// Content-only parsing for fallback scenarios (static internal function)
|
||||
|
||||
Reference in New Issue
Block a user