mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-04-27 09:53:40 +00:00
server: enable checkpoint for recurrent models
create checkpoint after cancel fix ban string and rm context during rewind add checkpoint interval only save recurrent cache
This commit is contained in:
@@ -32,6 +32,7 @@ struct server_slot {
|
||||
llama_batch batch_spec = {};
|
||||
llama_context * ctx_dft = nullptr;
|
||||
|
||||
bool released = false;
|
||||
slot_state state = SLOT_STATE_IDLE;
|
||||
slot_command command = SLOT_COMMAND_NONE;
|
||||
|
||||
@@ -45,6 +46,7 @@ struct server_slot {
|
||||
int32_t n_ctx = 0; // context size per slot
|
||||
int32_t n_past = 0;
|
||||
int32_t n_past_prompt = 0;
|
||||
int32_t n_past_offset = 0;
|
||||
int32_t n_decoded = 0;
|
||||
int32_t n_remaining = -1;
|
||||
int32_t n_discarded_prompt = 0;
|
||||
@@ -355,4 +357,6 @@ struct server_context {
|
||||
void create_checkpoint(server_slot & slot);
|
||||
|
||||
void apply_checkpoint(server_slot & slot);
|
||||
|
||||
void release_slot_after_final_response(server_slot & slot);
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user