mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-26 09:09:50 +00:00
* Implement function calling / tools for ik_llama.cpp for Kimi K2
* Implement basic tool choice
* Backport llama.cpp tool calls support
* Enhance function calls with improved chat parser and string utilities
- Add new chat.h/chat.cpp and chat-parser.h/chat-parser.cpp for better chat handling
- Improve function calls parsing with fallback to llama.cpp builder pattern
- Add string utility functions (starts_with, ends_with, find_partial_stop)
- Update README with function calls testing instructions
- Enhance Kimi K2 parser and function calls documentation
- Add comprehensive test suite for function calls
- Update CMakeLists.txt and Makefile for new components
* Enhance function calling with unified streaming and parser improvements
- Fix streaming content cleanup to prevent function syntax in output
- Unify content extraction patterns with llama.cpp approach
- Improve Kimi K2 parser robustness and partial content handling
- Add comprehensive test coverage for function call scenarios
- Optimize chat message parsing and diff computation
* Replace hardcoded values in kimi_k2_parser.hpp with named constants
- Add compile-time constants for all token format markers
- Add compile-time constants for XML format markers
- Add compile-time constants for simple format patterns
- Replace all hardcoded string literals with named constants
- Use compile-time length calculation to avoid manual counting
- Improve maintainability and reduce magic numbers throughout parser
* Fix duplicate common_chat_parse definition
- Remove duplicate implementation from chat-parser.cpp
- Keep single implementation in chat.cpp following llama.cpp patterns
- Resolves linker error: multiple definition of common_chat_parse
* Fix JSON assertion failure in function call parsing
- Add proper validation that 'function' field is an object before accessing nested keys
- Handle missing 'arguments' field gracefully with default "{}"
- Prevents crash when parsing malformed tool call JSON structures
* Add comprehensive Qwen3 XML tool calling support with unit tests
- Implement Qwen3 XML parser with <tool_call>{"name": "func", "arguments": {...}}</tool_call> format
- Add model detection and routing for Qwen3 vs Kimi-K2 formats
- Create 8 comprehensive unit tests covering parsing, streaming, error handling
- Fix token format cleaning bug in kimi_k2_parser.hpp processing order
- Remove progressive parsing code and related utilities
- Add tool injection support for Qwen3 format in server utils
* Add DeepSeek R1 function calling support with comprehensive unit tests
- Implement complete DeepSeek R1 tool call parsing in common_chat_parser.cpp
- Add DeepSeek R1 model detection and tool injection in deepseek_r1_tools.hpp
- Update function_calls.hpp with DeepSeek R1 integration and content extraction
- Update documentation to reflect support for Kimi-K2, Qwen3, and DeepSeek R1 models
- Add comprehensive unit tests for DeepSeek R1 reasoning, tool calls, and integration
- Port exact implementation patterns from original llama.cpp for compatibility
Key features:
- Native DeepSeek R1 format: <|tool▁calls▁begin|>function<|tool▁sep|>name```json{}```<|tool▁call▁end|><|tool▁calls▁end|>
- Reasoning content extraction from <think>...</think> tags
- Multiple tool calls support with separate call blocks
- Model detection for deepseek-r1, deepseek_r1 naming patterns
- Integration with incremental parsing and streaming support
* Add partial parsing support for JSON and regex
- json-partial.h/cpp: JSON partial parsing functionality
- regex-partial.h/cpp: Regex partial parsing functionality
* Add format_chat integration tests for Qwen3 tool injection
- Add test_qwen3_format_chat_integration() to validate tool injection pipeline
- Test tool injection conditions and system message enhancement
- Verify JSON formatting and anti-preamble instructions
- Add comprehensive test documentation
Tests confirm tool injection works correctly - conversational preamble
issue is not in ik_llama.cpp but likely in UI configuration.
* Fix Qwen3 tool call parsing - pass model name to parser
Server was not passing model name to parse_chat_message_incremental(),
causing Qwen3 to fall back to Kimi-K2 parser and return tool calls
as content instead of proper tool_calls array.
* Fix non-streaming path to use model-specific parsing
Non-streaming responses were hardcoded to use Kimi-K2 format,
causing Qwen3 XML tool calls to be returned as content instead
of proper tool_calls array. Now uses same model detection as
streaming path for consistency.
165 lines
5.0 KiB
C++
165 lines
5.0 KiB
C++
// Chat support with builder pattern for llama.cpp compatibility
|
|
#pragma once
|
|
|
|
#include "common.h"
|
|
#include <string>
|
|
#include <vector>
|
|
#include <functional>
|
|
|
|
// Forward declarations
|
|
struct common_chat_templates;
|
|
|
|
// Basic data structures compatible with original llama.cpp
|
|
struct common_string_range {
|
|
size_t begin;
|
|
size_t end;
|
|
|
|
common_string_range(size_t begin, size_t end) : begin(begin), end(end) {
|
|
if (begin > end) {
|
|
throw std::runtime_error("Invalid range");
|
|
}
|
|
}
|
|
|
|
// prevent default ctor
|
|
common_string_range() = delete;
|
|
|
|
bool empty() const {
|
|
return begin == end;
|
|
}
|
|
|
|
bool operator==(const common_string_range & other) const {
|
|
return begin == other.begin && end == other.end;
|
|
}
|
|
};
|
|
|
|
struct common_chat_tool_call {
|
|
std::string name;
|
|
std::string arguments;
|
|
std::string id;
|
|
|
|
bool operator==(const common_chat_tool_call & other) const {
|
|
return name == other.name && arguments == other.arguments && id == other.id;
|
|
}
|
|
|
|
bool operator!=(const common_chat_tool_call & other) const {
|
|
return !(*this == other);
|
|
}
|
|
};
|
|
|
|
struct common_chat_msg_content_part {
|
|
std::string type;
|
|
std::string text;
|
|
|
|
bool operator==(const common_chat_msg_content_part & other) const {
|
|
return type == other.type && text == other.text;
|
|
}
|
|
};
|
|
|
|
struct common_chat_msg {
|
|
std::string role;
|
|
std::string content;
|
|
std::vector<common_chat_msg_content_part> content_parts = {};
|
|
std::vector<common_chat_tool_call> tool_calls = {};
|
|
std::string reasoning_content;
|
|
std::string tool_name;
|
|
std::string tool_call_id;
|
|
|
|
bool empty() const {
|
|
return content.empty() && content_parts.empty() && tool_calls.empty() &&
|
|
reasoning_content.empty() && tool_name.empty() && tool_call_id.empty();
|
|
}
|
|
|
|
void ensure_tool_call_ids_set(std::vector<std::string> & ids_cache, const std::function<std::string()> & gen_tool_call_id) {
|
|
for (auto i = 0u; i < tool_calls.size(); i++) {
|
|
if (ids_cache.size() <= i) {
|
|
auto id = tool_calls[i].id;
|
|
if (id.empty()) {
|
|
id = gen_tool_call_id();
|
|
}
|
|
ids_cache.push_back(id);
|
|
}
|
|
tool_calls[i].id = ids_cache[i];
|
|
}
|
|
}
|
|
|
|
bool operator==(const common_chat_msg & other) const {
|
|
return role == other.role
|
|
&& content == other.content
|
|
&& content_parts == other.content_parts
|
|
&& tool_calls == other.tool_calls
|
|
&& reasoning_content == other.reasoning_content
|
|
&& tool_name == other.tool_name
|
|
&& tool_call_id == other.tool_call_id;
|
|
}
|
|
|
|
bool operator!=(const common_chat_msg & other) const {
|
|
return !(*this == other);
|
|
}
|
|
};
|
|
|
|
struct common_chat_msg_diff {
|
|
std::string reasoning_content_delta;
|
|
std::string content_delta;
|
|
size_t tool_call_index = std::string::npos;
|
|
common_chat_tool_call tool_call_delta;
|
|
|
|
static std::vector<common_chat_msg_diff> compute_diffs(const common_chat_msg & previous_msg, const common_chat_msg & new_msg);
|
|
|
|
bool operator==(const common_chat_msg_diff & other) const {
|
|
return content_delta == other.content_delta
|
|
&& tool_call_index == other.tool_call_index
|
|
&& tool_call_delta == other.tool_call_delta;
|
|
}
|
|
|
|
bool operator!=(const common_chat_msg_diff & other) const {
|
|
return !(*this == other);
|
|
}
|
|
};
|
|
|
|
struct common_chat_tool {
|
|
std::string name;
|
|
std::string description;
|
|
std::string parameters;
|
|
};
|
|
|
|
enum common_chat_tool_choice {
|
|
COMMON_CHAT_TOOL_CHOICE_AUTO,
|
|
COMMON_CHAT_TOOL_CHOICE_REQUIRED,
|
|
COMMON_CHAT_TOOL_CHOICE_NONE,
|
|
};
|
|
|
|
enum common_chat_format {
|
|
COMMON_CHAT_FORMAT_CONTENT_ONLY,
|
|
COMMON_CHAT_FORMAT_GENERIC,
|
|
COMMON_CHAT_FORMAT_DEEPSEEK_R1,
|
|
COMMON_CHAT_FORMAT_KIMI_K2, // Our custom format (keep last for backward compatibility)
|
|
};
|
|
|
|
struct common_chat_syntax {
|
|
common_chat_format format = COMMON_CHAT_FORMAT_KIMI_K2;
|
|
bool enable_thinking = false;
|
|
bool enable_tool_calls = true;
|
|
};
|
|
|
|
// Exception for partial parsing
|
|
class common_chat_msg_partial_exception : public std::runtime_error {
|
|
public:
|
|
common_chat_msg_partial_exception(const std::string & message) : std::runtime_error(message) {}
|
|
};
|
|
|
|
// Bridge functions to integrate with existing ik_llama.cpp system
|
|
// TODO: Uncomment and implement during integration phase
|
|
// common_chat_msg ik_to_common_msg(const struct ik_chat_msg & ik_msg);
|
|
// struct ik_chat_msg common_to_ik_msg(const common_chat_msg & common_msg);
|
|
|
|
// Format detection from chat template
|
|
common_chat_format common_chat_format_detect(const std::string & chat_template);
|
|
const char* common_chat_format_name(common_chat_format format);
|
|
|
|
// Main parsing function (entry point for original llama.cpp compatibility)
|
|
common_chat_msg common_chat_parse(const std::string & input, bool is_partial, const common_chat_syntax & syntax);
|
|
|
|
// Forward declare parser class
|
|
class common_chat_msg_parser;
|
|
|