server: stop processing the prompt when client disconnects (#1134)

implement generator-based API for task results

Update httplib.h to 0.27.0

Fix embedding error

Stop prompt processing when disconnected

Co-authored-by: firecoperana <firecoperana>
This commit is contained in:
firecoperana
2026-01-12 23:56:59 -06:00
committed by GitHub
parent d3e3ad40f9
commit 1a461525d5
24 changed files with 7654 additions and 4549 deletions

View File

@@ -28,6 +28,42 @@ int server_queue::post(server_task task) {
return task.id;
}
void server_queue::cleanup_pending_task(int id_target) {
// no need lock because this is called exclusively by post()
auto rm_func = [id_target](const server_task& task) {
return task.id == id_target;
};
queue_tasks.erase(
std::remove_if(queue_tasks.begin(), queue_tasks.end(), rm_func),
queue_tasks.end());
queue_tasks_deferred.erase(
std::remove_if(queue_tasks_deferred.begin(), queue_tasks_deferred.end(), rm_func),
queue_tasks_deferred.end());
}
// multi-task version of post()
int server_queue::post(std::vector<server_task>&& tasks, bool front) {
std::unique_lock<std::mutex> lock(mutex_tasks);
for (auto& task : tasks) {
if (task.id == -1) {
task.id = id++;
}
// if this is cancel task make sure to clean up pending tasks
if (task.type == SERVER_TASK_TYPE_CANCEL) {
cleanup_pending_task(task.id_target);
}
QUE_DBG("new task, id = %d/%d, front = %d\n", task.id, (int)tasks.size(), front);
if (front) {
queue_tasks.push_front(std::move(task));
}
else {
queue_tasks.push_back(std::move(task));
}
}
condition_tasks.notify_one();
return 0;
}
void server_queue::defer(server_task&& task) {
std::unique_lock<std::mutex> lock(mutex_tasks);
queue_tasks_deferred.push_back(std::move(task));
@@ -68,7 +104,7 @@ void server_queue::start_loop() {
break;
}
server_task task = std::move(queue_tasks.front());
queue_tasks.erase(queue_tasks.begin());
queue_tasks.pop_front();
lock.unlock();
//LOG_VERBOSE("callback_new_task", { {"id_task", task.id} });
callback_new_task(std::move(task));
@@ -134,13 +170,21 @@ void server_queue::update_multitask(int id_multi, int id_sub, server_task_result
void server_response::add_waiting_task_id(int id_task) {
//LOG_VERBOSE("waiting for task id", { {"id_task", id_task} });
QUE_DBG("waiting for task id, id = %d\n", id_task);
SRV_DBG("add task %d to waiting list. current waiting = %d (before add)\n", id_task, (int)waiting_task_ids.size());
std::unique_lock<std::mutex> lock(mutex_results);
waiting_task_ids.insert(id_task);
}
void server_response::add_waiting_tasks(const std::vector<server_task>& tasks) {
std::unique_lock<std::mutex> lock(mutex_results);
for (const auto& task : tasks) {
SRV_DBG("add task %d to waiting list. current waiting = %d (before add)\n", task.id, (int)waiting_task_ids.size());
waiting_task_ids.insert(task.id);
}
}
void server_response::remove_waiting_task_id(int id_task) {
//LOG_VERBOSE("remove waiting for task id", { {"id_task", id_task} });
QUE_DBG("remove waiting for task id, id = %d\n", id_task);
@@ -153,14 +197,14 @@ server_task_result server_response::recv(int id_task) {
while (true) {
std::unique_lock<std::mutex> lock(mutex_results);
condition_results.wait(lock, [&] {
return !queue_results.empty();
return !queue_results_legacy.empty();
});
for (int i = 0; i < (int)queue_results.size(); i++) {
if (queue_results[i].id == id_task) {
assert(queue_results[i].id_multi == -1);
server_task_result res = queue_results[i];
queue_results.erase(queue_results.begin() + i);
for (int i = 0; i < (int)queue_results_legacy.size(); i++) {
if (queue_results_legacy[i].id == id_task) {
assert(queue_results_legacy[i].id_multi == -1);
server_task_result res = queue_results_legacy[i];
queue_results_legacy.erase(queue_results_legacy.begin() + i);
return res;
}
}
@@ -169,6 +213,41 @@ server_task_result server_response::recv(int id_task) {
// should never reach here
}
// same as recv(), but have timeout in seconds
// if timeout is reached, nullptr is returned
server_task_result_ptr server_response::recv_with_timeout(const std::unordered_set<int>& id_tasks, int timeout) {
while (true) {
std::unique_lock<std::mutex> lock(mutex_results);
for (int i = 0; i < (int)queue_results.size(); i++) {
if (id_tasks.find(queue_results[i]->id) != id_tasks.end()) {
server_task_result_ptr res = std::move(queue_results[i]);
queue_results.erase(queue_results.begin() + i);
return res;
}
}
std::cv_status cr_res = condition_results.wait_for(lock, std::chrono::seconds(timeout));
if (!running) {
SRV_DBG("%s : queue result stop\n", __func__);
std::terminate(); // we cannot return here since the caller is HTTP code
}
if (cr_res == std::cv_status::timeout) {
return nullptr;
}
}
// should never reach here
}
void server_response::remove_waiting_task_ids(const std::unordered_set<int>& id_tasks) {
std::unique_lock<std::mutex> lock(mutex_results);
for (const auto& id_task : id_tasks) {
SRV_DBG("remove task %d from waiting list. current waiting = %d (before remove)\n", id_task, (int)waiting_task_ids.size());
waiting_task_ids.erase(id_task);
}
}
void server_response::send(server_task_result result) {
//LOG_VERBOSE("send new result", { {"id_task", result.id} });
QUE_DBG("send new result, id = %d\n", result.id);
@@ -184,9 +263,25 @@ void server_response::send(server_task_result result) {
}
if (result.id == id_task) {
//LOG_VERBOSE("queue_results.push_back", { {"id_task", id_task} });
//LOG_VERBOSE("queue_results_legacy.push_back", { {"id_task", id_task} });
QUE_DBG("queue_results.push_back, id = %d\n", id_task);
queue_results.push_back(result);
queue_results_legacy.push_back(std::move(result));
condition_results.notify_all();
return;
}
}
}
// Send a new result to a waiting id_task
void server_response::send(server_task_result_ptr&& result) {
SRV_DBG("sending result for task id = %d\n", result->id);
std::unique_lock<std::mutex> lock(mutex_results);
for (const auto& id_task : waiting_task_ids) {
if (result->id == id_task) {
SRV_DBG("task id = %d pushed to result queue\n", result->id);
queue_results.emplace_back(std::move(result));
condition_results.notify_all();
return;
}