mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-05-01 03:41:53 +00:00
RPC: support multiple devices including cpu (#1024)
* RPC support multiple devices * rpc : update documentation (#16441) Update the README file to match the newly added functionality of exposing multiple devices from a single server. Co-authored-by: Diego Devesa <slarengh@gmail.com> # Conflicts: # examples/rpc/README.md * Remove memory settings * rpc : cache and reuse compute graphs (#15405) Store the last computed graph and reuse it when possible. Also do not return response from GRAPH_COMPUTE and assume it always completes successfully. If this this is not the case, the server closes the connection. This saves us a network round trip to the server. * Add -cpu to include cpu backend --------- Co-authored-by: firecoperana <firecoperana> Co-authored-by: Radoslav Gerganov <rgerganov@gmail.com>
This commit is contained in:
@@ -270,6 +270,28 @@ static std::string parse_device_list(const std::string& value) {
|
|||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static std::string add_rpc_devices(std::string& servers) {
|
||||||
|
std::string rpc_devices;
|
||||||
|
std::vector<std::string> rpc_servers = string_split(servers, ",");
|
||||||
|
if (rpc_servers.empty()) {
|
||||||
|
throw std::invalid_argument("no RPC servers specified");
|
||||||
|
}
|
||||||
|
for (auto& server : rpc_servers) {
|
||||||
|
uint32_t dev_count = ggml_backend_rpc_get_device_count(server.c_str());
|
||||||
|
uint32_t device = 0;
|
||||||
|
for (uint32_t i = 0; i < dev_count; ++i) {
|
||||||
|
const auto buft = ggml_backend_rpc_buffer_type(server.c_str(), device);
|
||||||
|
if (buft != nullptr) {
|
||||||
|
rpc_devices = rpc_devices + server + "|" + std::to_string(device) + ",";
|
||||||
|
++device;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!rpc_devices.empty()) {
|
||||||
|
rpc_devices = rpc_devices.substr(0, rpc_devices.size() - 1); // remove trailing comma
|
||||||
|
}
|
||||||
|
return rpc_devices;
|
||||||
|
}
|
||||||
|
|
||||||
std::pair<long, std::vector<char>> common_remote_get_content(const std::string& url, const common_remote_params&) {
|
std::pair<long, std::vector<char>> common_remote_get_content(const std::string& url, const common_remote_params&) {
|
||||||
if (!url.empty()) {
|
if (!url.empty()) {
|
||||||
@@ -1296,15 +1318,12 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
|||||||
if (arg == "--rpc") {
|
if (arg == "--rpc") {
|
||||||
CHECK_ARG
|
CHECK_ARG
|
||||||
#ifdef GGML_USE_RPC
|
#ifdef GGML_USE_RPC
|
||||||
params.rpc_servers = argv[i];
|
std::string servers(argv[i]);
|
||||||
std::string servers(params.rpc_servers);
|
servers = add_rpc_devices(servers);
|
||||||
size_t pos = 0;
|
if (servers.empty()) {
|
||||||
while ((pos = servers.find(",")) != std::string::npos) {
|
return false;
|
||||||
std::string server = servers.substr(0, pos);
|
|
||||||
ggml_backend_rpc_buffer_type(server.c_str());
|
|
||||||
servers.erase(0, pos + 1);
|
|
||||||
}
|
}
|
||||||
ggml_backend_rpc_buffer_type(servers.c_str());
|
params.rpc_servers = servers;
|
||||||
#endif
|
#endif
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@@ -1319,10 +1338,6 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
|||||||
}
|
}
|
||||||
if (arg == "--override-tensor" || arg == "-ot") {
|
if (arg == "--override-tensor" || arg == "-ot") {
|
||||||
CHECK_ARG
|
CHECK_ARG
|
||||||
/*for (auto endpoint : params.rpc_servers.split)
|
|
||||||
{
|
|
||||||
|
|
||||||
}*/
|
|
||||||
if (!parse_buft_overrides(std::string{ argv[i] }, params.tensor_buft_overrides)) {
|
if (!parse_buft_overrides(std::string{ argv[i] }, params.tensor_buft_overrides)) {
|
||||||
fprintf(stderr, "error: Invalid tensor buffer type override: %s\n", argv[i]);
|
fprintf(stderr, "error: Invalid tensor buffer type override: %s\n", argv[i]);
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
> This example and the RPC backend are currently in a proof-of-concept development stage. As such, the functionality is fragile and
|
> This example and the RPC backend are currently in a proof-of-concept development stage. As such, the functionality is fragile and
|
||||||
> insecure. **Never run the RPC server on an open network or in a sensitive environment!**
|
> insecure. **Never run the RPC server on an open network or in a sensitive environment!**
|
||||||
|
|
||||||
The `rpc-server` allows running `ggml` backend on a remote host.
|
The `rpc-server` allows exposing `ggml` devices on a remote host.
|
||||||
The RPC backend communicates with one or several instances of `rpc-server` and offloads computations to them.
|
The RPC backend communicates with one or several instances of `rpc-server` and offloads computations to them.
|
||||||
This can be used for distributed LLM inference with `llama.cpp` in the following way:
|
This can be used for distributed LLM inference with `llama.cpp` in the following way:
|
||||||
|
|
||||||
@@ -14,27 +14,34 @@ flowchart TD
|
|||||||
rpcb---|TCP|srvb
|
rpcb---|TCP|srvb
|
||||||
rpcb-.-|TCP|srvn
|
rpcb-.-|TCP|srvn
|
||||||
subgraph hostn[Host N]
|
subgraph hostn[Host N]
|
||||||
srvn[rpc-server]-.-backend3["Backend (CUDA,Metal,etc.)"]
|
srvn[rpc-server]<-.->dev4["CUDA0"]
|
||||||
|
srvn[rpc-server]<-.->dev5["CPU"]
|
||||||
end
|
end
|
||||||
subgraph hostb[Host B]
|
subgraph hostb[Host B]
|
||||||
srvb[rpc-server]---backend2["Backend (CUDA,Metal,etc.)"]
|
srvb[rpc-server]<-->dev3["Metal"]
|
||||||
end
|
end
|
||||||
subgraph hosta[Host A]
|
subgraph hosta[Host A]
|
||||||
srva[rpc-server]---backend["Backend (CUDA,Metal,etc.)"]
|
srva[rpc-server]<-->dev["CUDA0"]
|
||||||
|
srva[rpc-server]<-->dev2["CUDA1"]
|
||||||
end
|
end
|
||||||
subgraph host[Main Host]
|
subgraph host[Main Host]
|
||||||
ggml[llama.cpp]---rpcb[RPC backend]
|
local["Local devices"]<-->ggml[llama-cli]
|
||||||
|
ggml[llama-cli]<-->rpcb[RPC backend]
|
||||||
end
|
end
|
||||||
style hostn stroke:#66,stroke-width:2px,stroke-dasharray: 5 5
|
style hostn stroke:#66,stroke-width:2px,stroke-dasharray: 5 5
|
||||||
|
classDef devcls fill:#5B9BD5
|
||||||
|
class local,dev,dev2,dev3,dev4,dev5 devcls
|
||||||
```
|
```
|
||||||
|
|
||||||
Each host can run a different backend, e.g. one with CUDA and another with Metal.
|
By default, `rpc-server` exposes all available accelerator devices on the host.
|
||||||
You can also run multiple `rpc-server` instances on the same host, each with a different backend.
|
If there are no accelerators, it exposes a single `CPU` device.
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
On each host, build the corresponding backend with `cmake` and add `-DGGML_RPC=ON` to the build options.
|
### Remote hosts
|
||||||
For example, to build the CUDA backend with RPC support:
|
|
||||||
|
On each remote host, build the backends for each accelerator by adding `-DGGML_RPC=ON` to the build options.
|
||||||
|
For example, to build the `rpc-server` with support for CUDA accelerators:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
mkdir build-rpc-cuda
|
mkdir build-rpc-cuda
|
||||||
@@ -43,36 +50,49 @@ cmake .. -DGGML_CUDA=ON -DGGML_RPC=ON
|
|||||||
cmake --build . --config Release
|
cmake --build . --config Release
|
||||||
```
|
```
|
||||||
|
|
||||||
Then, start the `rpc-server` with the backend:
|
When started, the `rpc-server` will detect and expose all available `CUDA` devices:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ bin/rpc-server -p 50052
|
$ bin/rpc-server
|
||||||
create_backend: using CUDA backend
|
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
|
||||||
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
|
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
|
||||||
ggml_cuda_init: CUDA_USE_TENSOR_CORES: yes
|
|
||||||
ggml_cuda_init: found 1 CUDA devices:
|
ggml_cuda_init: found 1 CUDA devices:
|
||||||
Device 0: NVIDIA T1200 Laptop GPU, compute capability 7.5, VMM: yes
|
Device 0: NVIDIA GeForce RTX 5090, compute capability 12.0, VMM: yes
|
||||||
Starting RPC server on 0.0.0.0:50052
|
Starting RPC server v3.0.0
|
||||||
|
endpoint : 127.0.0.1:50052
|
||||||
|
local cache : n/a
|
||||||
|
Devices:
|
||||||
|
CUDA0: NVIDIA GeForce RTX 5090 (32109 MiB, 31588 MiB free)
|
||||||
```
|
```
|
||||||
|
|
||||||
When using the CUDA backend, you can specify the device with the `CUDA_VISIBLE_DEVICES` environment variable, e.g.:
|
You can control the set of exposed CUDA devices with the `CUDA_VISIBLE_DEVICES` environment variable or the `--device` command line option. The following two commands have the same effect:
|
||||||
```bash
|
```bash
|
||||||
$ CUDA_VISIBLE_DEVICES=0 bin/rpc-server -p 50052
|
$ CUDA_VISIBLE_DEVICES=0 bin/rpc-server -p 50052
|
||||||
|
$ bin/rpc-server --device CUDA0 -p 50052
|
||||||
```
|
```
|
||||||
This way you can run multiple `rpc-server` instances on the same host, each with a different CUDA device.
|
|
||||||
|
|
||||||
|
### Main host
|
||||||
|
|
||||||
On the main host build `llama.cpp` only with `-DGGML_RPC=ON`:
|
On the main host build `llama.cpp` with the backends for the local devices and add `-DGGML_RPC=ON` to the build options.
|
||||||
|
Finally, when running `llama-cli` or `llama-server`, use the `--rpc` option to specify the host and port of each `rpc-server`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
mkdir build-rpc
|
$ llama-cli -hf ggml-org/gemma-3-1b-it-GGUF -ngl 99 --rpc 192.168.88.10:50052,192.168.88.11:50052
|
||||||
cd build-rpc
|
|
||||||
cmake .. -DGGML_RPC=ON
|
|
||||||
cmake --build . --config Release
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Finally, use the `--rpc` option to specify the host and port of each `rpc-server`:
|
By default, llama.cpp distributes model weights and the KV cache across all available devices -- both local and remote -- in proportion to each device's available memory.
|
||||||
|
You can override this behavior with the `--tensor-split` option and set custom proportions when splitting tensor data across devices.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ bin/llama-cli -m ../models/tinyllama-1b/ggml-model-f16.gguf -p "Hello, my name is" --repeat-penalty 1.0 -n 64 --rpc 192.168.88.10:50052,192.168.88.11:50052 -ngl 99
|
$ bin/llama-cli -m ../models/tinyllama-1b/ggml-model-f16.gguf -p "Hello, my name is" --repeat-penalty 1.0 -n 64 --rpc 192.168.88.10:50052,192.168.88.11:50052 -ngl 99
|
||||||
```
|
```
|
||||||
|
|
||||||
|
By default, the cache is stored in the `$HOME/.cache/llama.cpp/rpc` directory and can be controlled via the `LLAMA_CACHE` environment variable.
|
||||||
|
|
||||||
|
### Troubleshooting
|
||||||
|
|
||||||
|
Use the `GGML_RPC_DEBUG` environment variable to enable debug messages from `rpc-server`:
|
||||||
|
```bash
|
||||||
|
$ GGML_RPC_DEBUG=1 bin/rpc-server
|
||||||
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -32,6 +32,7 @@
|
|||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <codecvt>
|
#include <codecvt>
|
||||||
|
#include <regex>
|
||||||
|
|
||||||
namespace fs = std::filesystem;
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
@@ -145,22 +146,24 @@ static std::string fs_get_cache_directory() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct rpc_server_params {
|
struct rpc_server_params {
|
||||||
std::string host = "127.0.0.1";
|
std::string host = "127.0.0.1";
|
||||||
int port = 50052;
|
int port = 50052;
|
||||||
size_t backend_mem = 0;
|
bool use_cache = false;
|
||||||
bool use_cache = false;
|
bool use_cpu = false;
|
||||||
int n_threads = std::max(1U, std::thread::hardware_concurrency() / 2);
|
int n_threads = std::max(1U, std::thread::hardware_concurrency() / 2);
|
||||||
|
std::vector<std::string> devices;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void print_usage(int /*argc*/, char** argv, rpc_server_params params) {
|
static void print_usage(int /*argc*/, char** argv, rpc_server_params params) {
|
||||||
fprintf(stderr, "Usage: %s [options]\n\n", argv[0]);
|
fprintf(stderr, "Usage: %s [options]\n\n", argv[0]);
|
||||||
fprintf(stderr, "options:\n");
|
fprintf(stderr, "options:\n");
|
||||||
fprintf(stderr, " -h, --help show this help message and exit\n");
|
fprintf(stderr, " -h, --help show this help message and exit\n");
|
||||||
fprintf(stderr, " -t, --threads number of threads for the CPU backend (default: %d)\n", params.n_threads);
|
fprintf(stderr, " -t, --threads N number of threads for the CPU device (default: %d)\n", params.n_threads);
|
||||||
fprintf(stderr, " -H HOST, --host HOST host to bind to (default: %s)\n", params.host.c_str());
|
fprintf(stderr, " -d, -dev, --device <dev1,dev2,...> comma-separated list of devices\n");
|
||||||
fprintf(stderr, " -p PORT, --port PORT port to bind to (default: %d)\n", params.port);
|
fprintf(stderr, " -cpu enable cpu backend\n");
|
||||||
fprintf(stderr, " -m MEM, --mem MEM backend memory size (in MB)\n");
|
fprintf(stderr, " -h, -H, --host, --Host HOST host to bind to (default: %s)\n", params.host.c_str());
|
||||||
fprintf(stderr, " -c, --cache enable local file cache\n");
|
fprintf(stderr, " -p, -P, --port, --Port PORT port to bind to (default: %d)\n", params.port);
|
||||||
|
fprintf(stderr, " -c, --cache enable local file cache\n");
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -168,7 +171,7 @@ static bool rpc_server_params_parse(int argc, char** argv, rpc_server_params& pa
|
|||||||
std::string arg;
|
std::string arg;
|
||||||
for (int i = 1; i < argc; i++) {
|
for (int i = 1; i < argc; i++) {
|
||||||
arg = argv[i];
|
arg = argv[i];
|
||||||
if (arg == "-H" || arg == "--host") {
|
if (arg == "-H" || arg == "-h" || arg == "--host" || arg == "--Host") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@@ -184,7 +187,25 @@ static bool rpc_server_params_parse(int argc, char** argv, rpc_server_params& pa
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (arg == "-p" || arg == "--port") {
|
else if (arg == "-d" || arg == "-dev" || arg == "--device") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
const std::regex regex{ R"([,/]+)" };
|
||||||
|
std::string dev_str = argv[i];
|
||||||
|
std::sregex_token_iterator iter(dev_str.begin(), dev_str.end(), regex, -1);
|
||||||
|
std::sregex_token_iterator end;
|
||||||
|
for (; iter != end; ++iter) {
|
||||||
|
try {
|
||||||
|
params.devices.push_back(*iter);
|
||||||
|
}
|
||||||
|
catch (const std::exception&) {
|
||||||
|
fprintf(stderr, "error: invalid device: %s\n", iter->str().c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (arg == "-p" || arg == "-P" || arg == "--port" || arg == "--Port") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@@ -196,11 +217,8 @@ static bool rpc_server_params_parse(int argc, char** argv, rpc_server_params& pa
|
|||||||
else if (arg == "-c" || arg == "--cache") {
|
else if (arg == "-c" || arg == "--cache") {
|
||||||
params.use_cache = true;
|
params.use_cache = true;
|
||||||
}
|
}
|
||||||
else if (arg == "-m" || arg == "--mem") {
|
else if (arg == "-cpu") {
|
||||||
if (++i >= argc) {
|
params.use_cpu = true;
|
||||||
return false;
|
|
||||||
}
|
|
||||||
params.backend_mem = std::stoul(argv[i]) * 1024 * 1024;
|
|
||||||
}
|
}
|
||||||
else if (arg == "-h" || arg == "--help") {
|
else if (arg == "-h" || arg == "--help") {
|
||||||
print_usage(argc, argv, params);
|
print_usage(argc, argv, params);
|
||||||
@@ -215,11 +233,18 @@ static bool rpc_server_params_parse(int argc, char** argv, rpc_server_params& pa
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ggml_backend_t create_backend(const rpc_server_params& params) {
|
static ggml_backend_t create_cpu_backend(const rpc_server_params& params) {
|
||||||
|
fprintf(stderr, "%s: using CPU backend\n", __func__);
|
||||||
|
ggml_backend_t backend = ggml_backend_cpu_init();
|
||||||
|
ggml_backend_cpu_set_n_threads(backend, params.n_threads);
|
||||||
|
return backend;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ggml_backend_t create_gpu_backend(const rpc_server_params& params, uint32_t device) {
|
||||||
ggml_backend_t backend = NULL;
|
ggml_backend_t backend = NULL;
|
||||||
#ifdef GGML_USE_CUDA
|
#ifdef GGML_USE_CUDA
|
||||||
fprintf(stderr, "%s: using CUDA backend\n", __func__);
|
fprintf(stderr, "%s: using CUDA backend: CUDA%d\n", __func__, device);
|
||||||
backend = ggml_backend_cuda_init(0, nullptr); // init device 0
|
backend = ggml_backend_cuda_init(device, nullptr); // init device
|
||||||
if (!backend) {
|
if (!backend) {
|
||||||
fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
|
fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
|
||||||
}
|
}
|
||||||
@@ -231,34 +256,113 @@ static ggml_backend_t create_backend(const rpc_server_params& params) {
|
|||||||
}
|
}
|
||||||
#elif GGML_USE_VULKAN
|
#elif GGML_USE_VULKAN
|
||||||
fprintf(stderr, "%s: using Vulkan backend\n", __func__);
|
fprintf(stderr, "%s: using Vulkan backend\n", __func__);
|
||||||
backend = ggml_backend_vk_init(0); // init device 0
|
backend = ggml_backend_vk_init(device); // init device 0
|
||||||
if (!backend) {
|
if (!backend) {
|
||||||
fprintf(stderr, "%s: ggml_backend_vulkan_init() failed\n", __func__);
|
fprintf(stderr, "%s: ggml_backend_vulkan_init() failed\n", __func__);
|
||||||
}
|
}
|
||||||
#elif GGML_USE_SYCL
|
#elif GGML_USE_SYCL
|
||||||
fprintf(stderr, "%s: using SYCL backend\n", __func__);
|
fprintf(stderr, "%s: using SYCL backend\n", __func__);
|
||||||
backend = ggml_backend_sycl_init(0); // init device 0
|
backend = ggml_backend_sycl_init(device); // init device 0
|
||||||
if (!backend) {
|
if (!backend) {
|
||||||
fprintf(stderr, "%s: ggml_backend_sycl_init() failed\n", __func__);
|
fprintf(stderr, "%s: ggml_backend_sycl_init() failed\n", __func__);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// if there aren't GPU Backends fallback to CPU backend
|
// if there aren't GPU Backends fallback to CPU backend
|
||||||
if (!backend) {
|
//if (!backend) {
|
||||||
fprintf(stderr, "%s: using CPU backend\n", __func__);
|
// fprintf(stderr, "%s: using CPU backend\n", __func__);
|
||||||
backend = ggml_backend_cpu_init();
|
// backend = ggml_backend_cpu_init();
|
||||||
ggml_backend_cpu_set_n_threads(backend, params.n_threads);
|
// ggml_backend_cpu_set_n_threads(backend, params.n_threads);
|
||||||
}
|
//}
|
||||||
return backend;
|
return backend;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void get_backend_memory(size_t * free_mem, size_t * total_mem) {
|
static int32_t find_device_idx(const std::string& str) {
|
||||||
|
std::regex pattern(R"((\d+)$)"); // Match digits at the end
|
||||||
|
std::smatch matches;
|
||||||
|
int number = -1;
|
||||||
|
if (std::regex_search(str, matches, pattern)) {
|
||||||
|
number = std::stoi(matches[1]);
|
||||||
|
}
|
||||||
|
return number;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t get_gpu_backend_count(const rpc_server_params& params) {
|
||||||
|
size_t count = 0;
|
||||||
|
#if defined(GGML_USE_CUDA)
|
||||||
|
count = ggml_backend_cuda_get_device_count();
|
||||||
|
#elif defined(GGML_USE_SYCL)
|
||||||
|
count = ggml_backend_sycl_get_device_count();
|
||||||
|
#elif defined(GGML_USE_VULKAN)
|
||||||
|
count = ggml_backend_vk_get_device_count();
|
||||||
|
#elif defined(GGML_USE_CANN)
|
||||||
|
return ggml_backend_cann_get_device_count();
|
||||||
|
#endif
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::vector<ggml_backend_t> get_devices(const rpc_server_params& params) {
|
||||||
|
std::vector<ggml_backend_t> devices;
|
||||||
|
if (!params.devices.empty()) {
|
||||||
|
for (auto device : params.devices) {
|
||||||
|
int32_t device_id;
|
||||||
|
ggml_backend_t dev;
|
||||||
|
if (params.use_cpu && device == "CPU" ) {
|
||||||
|
dev = create_cpu_backend(params);
|
||||||
|
} else {
|
||||||
|
device_id = find_device_idx(device);
|
||||||
|
if (device_id < 0) {
|
||||||
|
fprintf(stderr, "error: unknown device: %s\n", device.c_str());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
dev = create_gpu_backend(params, device_id);
|
||||||
|
}
|
||||||
|
if (dev) {
|
||||||
|
devices.push_back(dev);
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "error: unknown device: %s\n", device.c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
for (size_t i = 0; i < get_gpu_backend_count(params); i++) {
|
||||||
|
ggml_backend_t dev = create_gpu_backend(params, i);
|
||||||
|
if (dev) {
|
||||||
|
devices.push_back(dev);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// cpu backend at last
|
||||||
|
if (params.use_cpu || devices.empty()) {
|
||||||
|
ggml_backend_t dev = create_cpu_backend(params);
|
||||||
|
if (dev) {
|
||||||
|
devices.push_back(dev);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return devices;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void get_cpu_backend_memory(size_t * free_mem, size_t * total_mem) {
|
||||||
|
#ifdef _WIN32
|
||||||
|
MEMORYSTATUSEX status;
|
||||||
|
status.dwLength = sizeof(status);
|
||||||
|
GlobalMemoryStatusEx(&status);
|
||||||
|
*total_mem = status.ullTotalPhys;
|
||||||
|
*free_mem = status.ullAvailPhys;
|
||||||
|
#else
|
||||||
|
long pages = sysconf(_SC_PHYS_PAGES);
|
||||||
|
long page_size = sysconf(_SC_PAGE_SIZE);
|
||||||
|
*total_mem = pages * page_size;
|
||||||
|
*free_mem = *total_mem;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static void get_backend_memory(uint32_t device, size_t * free_mem, size_t * total_mem) {
|
||||||
#ifdef GGML_USE_CUDA
|
#ifdef GGML_USE_CUDA
|
||||||
ggml_backend_cuda_get_device_memory(0, free_mem, total_mem);
|
ggml_backend_cuda_get_device_memory(device, free_mem, total_mem);
|
||||||
#elif GGML_USE_VULKAN
|
#elif GGML_USE_VULKAN
|
||||||
ggml_backend_vk_get_device_memory(0, free_mem, total_mem);
|
ggml_backend_vk_get_device_memory(device, free_mem, total_mem);
|
||||||
#elif GGML_USE_SYCL
|
#elif GGML_USE_SYCL
|
||||||
ggml_backend_sycl_get_device_memory(0, free_mem, total_mem);
|
ggml_backend_sycl_get_device_memory(device, free_mem, total_mem);
|
||||||
#else
|
#else
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
MEMORYSTATUSEX status;
|
MEMORYSTATUSEX status;
|
||||||
@@ -292,20 +396,27 @@ int main(int argc, char * argv[]) {
|
|||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_backend_t backend = create_backend(params);
|
auto devices = get_devices(params);
|
||||||
if (!backend) {
|
if (devices.empty()) {
|
||||||
fprintf(stderr, "Failed to create backend\n");
|
fprintf(stderr, "No backend found\n");
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string endpoint = params.host + ":" + std::to_string(params.port);
|
std::string endpoint = params.host + ":" + std::to_string(params.port);
|
||||||
size_t free_mem, total_mem;
|
std::vector<size_t> free_mem, total_mem;
|
||||||
if (params.backend_mem > 0) {
|
for (size_t i = 0; i < devices.size(); i++) {
|
||||||
free_mem = params.backend_mem;
|
size_t free, total;
|
||||||
total_mem = params.backend_mem;
|
const char* name = ggml_backend_name(devices[i]);
|
||||||
}
|
if (std::string(name) == "CPU") {
|
||||||
else {
|
get_cpu_backend_memory(&free, &total);
|
||||||
get_backend_memory(&free_mem, &total_mem);
|
} else {
|
||||||
|
int32_t idx = find_device_idx(name);
|
||||||
|
get_backend_memory((uint32_t) idx, &free, &total);
|
||||||
|
}
|
||||||
|
free_mem.push_back(free);
|
||||||
|
total_mem.push_back(total);
|
||||||
}
|
}
|
||||||
|
|
||||||
const char * cache_dir = nullptr;
|
const char * cache_dir = nullptr;
|
||||||
std::string cache_dir_str;
|
std::string cache_dir_str;
|
||||||
if (params.use_cache) {
|
if (params.use_cache) {
|
||||||
@@ -316,14 +427,7 @@ int main(int argc, char * argv[]) {
|
|||||||
}
|
}
|
||||||
cache_dir = cache_dir_str.c_str();
|
cache_dir = cache_dir_str.c_str();
|
||||||
}
|
}
|
||||||
printf("Starting RPC server v%d.%d.%d\n",
|
ggml_backend_rpc_start_server(endpoint.c_str(), cache_dir, devices.size(), devices.data(),
|
||||||
RPC_PROTO_MAJOR_VERSION,
|
free_mem.data(), total_mem.data());
|
||||||
RPC_PROTO_MINOR_VERSION,
|
|
||||||
RPC_PROTO_PATCH_VERSION);
|
|
||||||
printf(" endpoint : %s\n", endpoint.c_str());
|
|
||||||
printf(" local cache : %s\n", cache_dir ? cache_dir : "n/a");
|
|
||||||
printf(" backend memory : %zu MB\n", free_mem / (1024 * 1024));
|
|
||||||
ggml_backend_rpc_start_server(backend, endpoint.c_str(), cache_dir, free_mem, total_mem);
|
|
||||||
ggml_backend_free(backend);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,22 +7,22 @@
|
|||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define RPC_PROTO_MAJOR_VERSION 2
|
#define RPC_PROTO_MAJOR_VERSION 3
|
||||||
#define RPC_PROTO_MINOR_VERSION 0
|
#define RPC_PROTO_MINOR_VERSION 5
|
||||||
#define RPC_PROTO_PATCH_VERSION 1
|
#define RPC_PROTO_PATCH_VERSION 1
|
||||||
#define GGML_RPC_MAX_SERVERS 16
|
#define GGML_RPC_MAX_SERVERS 16
|
||||||
|
|
||||||
// backend API
|
// backend API
|
||||||
GGML_API GGML_CALL ggml_backend_t ggml_backend_rpc_init(const char * endpoint);
|
GGML_API GGML_CALL ggml_backend_t ggml_backend_rpc_init(const char * endpoint, uint32_t device);
|
||||||
GGML_API GGML_CALL bool ggml_backend_is_rpc(ggml_backend_t backend);
|
GGML_API GGML_CALL bool ggml_backend_is_rpc(ggml_backend_t backend);
|
||||||
|
|
||||||
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const char * endpoint);
|
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const char * endpoint, uint32_t device);
|
||||||
|
|
||||||
GGML_API GGML_CALL void ggml_backend_rpc_get_device_memory(const char * endpoint, size_t * free, size_t * total);
|
GGML_API GGML_CALL uint32_t ggml_backend_rpc_get_device_count(const char* endpoint);
|
||||||
|
|
||||||
GGML_API GGML_CALL void ggml_backend_rpc_start_server(ggml_backend_t backend, const char * endpoint,
|
GGML_API GGML_CALL void ggml_backend_rpc_get_device_memory(const char * endpoint, uint32_t device, size_t * free, size_t * total);
|
||||||
const char * cache_dir,
|
|
||||||
size_t free_mem, size_t total_mem);
|
GGML_API GGML_CALL void ggml_backend_rpc_start_server(const char * endpoint, const char* cache_dir, size_t device, ggml_backend_t * devices, size_t* free_mem, size_t* total_mem);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -999,15 +999,6 @@ GGML_CALL static ggml_backend_t ggml_backend_reg_cpu_init(const char * params, v
|
|||||||
GGML_UNUSED(user_data);
|
GGML_UNUSED(user_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef GGML_USE_RPC
|
|
||||||
GGML_CALL static ggml_backend_t ggml_backend_reg_rpc_init(const char* params, void* user_data) {
|
|
||||||
return ggml_backend_rpc_init((const char*)user_data);
|
|
||||||
|
|
||||||
GGML_UNUSED(params);
|
|
||||||
GGML_UNUSED(user_data);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// multi-buffer buffer
|
// multi-buffer buffer
|
||||||
|
|
||||||
struct ggml_backend_multi_buffer_context {
|
struct ggml_backend_multi_buffer_context {
|
||||||
@@ -2159,6 +2150,7 @@ void ggml_backend_sched_reset(ggml_backend_sched_t sched) {
|
|||||||
|
|
||||||
bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) {
|
bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) {
|
||||||
GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs);
|
GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs);
|
||||||
|
ggml_backend_sched_synchronize(sched);
|
||||||
|
|
||||||
ggml_backend_sched_split_graph(sched, measure_graph);
|
ggml_backend_sched_split_graph(sched, measure_graph);
|
||||||
|
|
||||||
@@ -2167,7 +2159,6 @@ bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph *
|
|||||||
}
|
}
|
||||||
|
|
||||||
ggml_backend_sched_reset(sched);
|
ggml_backend_sched_reset(sched);
|
||||||
ggml_backend_sched_synchronize(sched);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -271,6 +271,11 @@ struct llama_layer {
|
|||||||
|
|
||||||
struct llama_lora_adapter;
|
struct llama_lora_adapter;
|
||||||
|
|
||||||
|
struct rpc_device {
|
||||||
|
std::string endpoint;
|
||||||
|
uint32_t device;
|
||||||
|
};
|
||||||
|
|
||||||
struct llama_model {
|
struct llama_model {
|
||||||
e_model type = MODEL_UNKNOWN;
|
e_model type = MODEL_UNKNOWN;
|
||||||
llm_arch arch = LLM_ARCH_UNKNOWN;
|
llm_arch arch = LLM_ARCH_UNKNOWN;
|
||||||
@@ -299,7 +304,7 @@ struct llama_model {
|
|||||||
int main_gpu;
|
int main_gpu;
|
||||||
int n_gpu_layers;
|
int n_gpu_layers;
|
||||||
|
|
||||||
std::vector<std::string> rpc_servers;
|
std::vector<rpc_device> rpc_servers;
|
||||||
std::vector<int32_t> devices;
|
std::vector<int32_t> devices;
|
||||||
|
|
||||||
// gguf metadata
|
// gguf metadata
|
||||||
|
|||||||
@@ -125,19 +125,6 @@
|
|||||||
// helpers
|
// helpers
|
||||||
//
|
//
|
||||||
|
|
||||||
// trim whitespace from the beginning and end of a string
|
|
||||||
//static std::string trim(const std::string & str) {
|
|
||||||
// Fails for Chinese character
|
|
||||||
// size_t start = 0;
|
|
||||||
// size_t end = str.size();
|
|
||||||
// while (start < end && isspace(str[start])) {
|
|
||||||
// start += 1;
|
|
||||||
// }
|
|
||||||
// while (end > start && isspace(str[end - 1])) {
|
|
||||||
// end -= 1;
|
|
||||||
// }
|
|
||||||
// return str.substr(start, end - start);
|
|
||||||
//}
|
|
||||||
|
|
||||||
static bool is_utf8_whitespace(uint8_t c) {
|
static bool is_utf8_whitespace(uint8_t c) {
|
||||||
// Basic ASCII whitespace
|
// Basic ASCII whitespace
|
||||||
@@ -155,38 +142,35 @@ static std::string trim(const std::string & str) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static std::vector<std::string> llama_string_split(const std::string& str, const std::string& delimiter) {
|
static std::vector<std::string> string_split(const std::string& str, const std::string& delimiter) {
|
||||||
std::vector<std::string> parts;
|
std::vector<std::string> parts;
|
||||||
size_t start = 0;
|
size_t start = 0;
|
||||||
size_t end = str.find(delimiter);
|
size_t end = str.find(delimiter);
|
||||||
|
|
||||||
while (end != std::string::npos) {
|
while (end != std::string::npos) {
|
||||||
parts.push_back(str.substr(start, end - start));
|
parts.push_back(str.substr(start, end - start));
|
||||||
start = end + delimiter.length();
|
start = end + delimiter.length();
|
||||||
end = str.find(delimiter, start);
|
end = str.find(delimiter, start);
|
||||||
}
|
}
|
||||||
|
|
||||||
parts.push_back(str.substr(start));
|
parts.push_back(str.substr(start));
|
||||||
|
|
||||||
return parts;
|
return parts;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// extract ip and port from RPC[ip:port] for rpc and keep other device names
|
// extract ip and port from RPC[ip:port] for rpc and keep other device names
|
||||||
static std::vector<std::string> extract_ip_from_rpc_device(std::vector<std::string> devices) {
|
static std::vector<rpc_device> extract_device_from_rpc_device(std::vector<std::string> devices) {
|
||||||
std::vector<std::string> rpc_servers;
|
std::vector<rpc_device> rpc_servers;
|
||||||
std::regex pattern("RPC\\[(.*?)\\]");
|
for (auto & device : devices) {
|
||||||
std::smatch matches;
|
rpc_device rpc;
|
||||||
for (auto device : devices) {
|
auto value = string_split(device, "|");
|
||||||
if (std::regex_search(device, matches, pattern)) {
|
if (value.size() == 2) {
|
||||||
rpc_servers.push_back(matches[1]);
|
rpc.device = std::stoi(value[1]);
|
||||||
} else {
|
rpc.endpoint = value[0];
|
||||||
rpc_servers.push_back(device);
|
|
||||||
}
|
}
|
||||||
|
rpc_servers.push_back(rpc);
|
||||||
}
|
}
|
||||||
return rpc_servers;
|
return rpc_servers;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
enum llm_chat_template {
|
enum llm_chat_template {
|
||||||
LLM_CHAT_TEMPLATE_CHATML,
|
LLM_CHAT_TEMPLATE_CHATML,
|
||||||
LLM_CHAT_TEMPLATE_LLAMA_2,
|
LLM_CHAT_TEMPLATE_LLAMA_2,
|
||||||
@@ -445,8 +429,10 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_offload(const llama_
|
|||||||
int dev_count = (int)llama_get_device_count(model);
|
int dev_count = (int)llama_get_device_count(model);
|
||||||
int rpc_count = (int)model.rpc_servers.size();
|
int rpc_count = (int)model.rpc_servers.size();
|
||||||
if (gpu >= dev_count - rpc_count) {
|
if (gpu >= dev_count - rpc_count) {
|
||||||
const char * endpoint = model.rpc_servers[gpu - dev_count + rpc_count].c_str();
|
int rpc_idx = gpu - dev_count + rpc_count;
|
||||||
return ggml_backend_rpc_buffer_type(endpoint);
|
rpc_device rpc = model.rpc_servers[rpc_idx];
|
||||||
|
const char * endpoint = rpc.endpoint.c_str();
|
||||||
|
return ggml_backend_rpc_buffer_type(endpoint, rpc.device);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#if defined(GGML_USE_METAL)
|
#if defined(GGML_USE_METAL)
|
||||||
@@ -504,8 +490,9 @@ static size_t llama_get_device_memory(const llama_model & model, int device) {
|
|||||||
if (device >= dev_count - rpc_count) {
|
if (device >= dev_count - rpc_count) {
|
||||||
size_t total;
|
size_t total;
|
||||||
size_t free;
|
size_t free;
|
||||||
const char * endpoint = model.rpc_servers[device - dev_count + rpc_count].c_str();
|
rpc_device rpc = model.rpc_servers[device - dev_count + rpc_count];
|
||||||
ggml_backend_rpc_get_device_memory(endpoint, &free, &total);
|
const char * endpoint = rpc.endpoint.c_str();
|
||||||
|
ggml_backend_rpc_get_device_memory(endpoint, rpc.device, &free, &total);
|
||||||
return free;
|
return free;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@@ -1694,11 +1681,23 @@ static bool llm_load_tensors(
|
|||||||
int act_gpu_layers = std::min(n_gpu_layers, (int)n_layer + 1);
|
int act_gpu_layers = std::min(n_gpu_layers, (int)n_layer + 1);
|
||||||
for (int i = i_gpu_start; i < n_layer; ++i) {
|
for (int i = i_gpu_start; i < n_layer; ++i) {
|
||||||
int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(i - i_gpu_start)/act_gpu_layers) - splits.begin();
|
int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(i - i_gpu_start)/act_gpu_layers) - splits.begin();
|
||||||
|
#ifndef NDEBUG
|
||||||
|
ggml_backend_buffer_type_t buft = llama_default_buffer_type_offload(model, model.devices[layer_gpu]);
|
||||||
|
const char* name = ggml_backend_buft_name(buft);
|
||||||
|
LLAMA_LOG_DEBUG("load_tensors: layers %3d assigned to backend %s\n", i,
|
||||||
|
name);
|
||||||
|
#endif
|
||||||
model.buft_layer[i] = llama_default_buffer_type_offload(model, model.devices[layer_gpu]);
|
model.buft_layer[i] = llama_default_buffer_type_offload(model, model.devices[layer_gpu]);
|
||||||
}
|
}
|
||||||
// assign the output layer
|
// assign the output layer
|
||||||
if (n_gpu_layers > n_layer) {
|
if (n_gpu_layers > n_layer) {
|
||||||
int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(act_gpu_layers - 1)/act_gpu_layers) - splits.begin();
|
int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(act_gpu_layers - 1)/act_gpu_layers) - splits.begin();
|
||||||
|
#ifndef NDEBUG
|
||||||
|
ggml_backend_buffer_type_t buft = llama_default_buffer_type_offload(model, model.devices[layer_gpu]);
|
||||||
|
const char* name = ggml_backend_buft_name(buft);
|
||||||
|
LLAMA_LOG_DEBUG("load_tensors: output layers assigned to backend %s\n",
|
||||||
|
name);
|
||||||
|
#endif
|
||||||
model.buft_output = llama_default_buffer_type_offload(model, model.devices[layer_gpu]);
|
model.buft_output = llama_default_buffer_type_offload(model, model.devices[layer_gpu]);
|
||||||
} else {
|
} else {
|
||||||
model.buft_output = llama_default_buffer_type_cpu(true);
|
model.buft_output = llama_default_buffer_type_cpu(true);
|
||||||
@@ -4016,17 +4015,11 @@ int64_t llama_time_us(void) {
|
|||||||
return ggml_time_us();
|
return ggml_time_us();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t find_device_idx(const std::string& str) {
|
static std::string create_rpc_name(std::string endpoint, uint32_t device) {
|
||||||
std::regex pattern(R"((\d+)$)"); // Match digits at the end
|
std::string dev_name = "RPC" + std::to_string(device) + "[" + std::string(endpoint) + "]";
|
||||||
std::smatch matches;
|
return dev_name;
|
||||||
int number = -1;
|
|
||||||
if (std::regex_search(str, matches, pattern)) {
|
|
||||||
number = std::stoi(matches[1]);
|
|
||||||
}
|
|
||||||
return number;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
struct llama_model * llama_load_model_from_file(
|
struct llama_model * llama_load_model_from_file(
|
||||||
const char * path_model,
|
const char * path_model,
|
||||||
struct llama_model_params params) {
|
struct llama_model_params params) {
|
||||||
@@ -4058,8 +4051,7 @@ struct llama_model * llama_load_model_from_file(
|
|||||||
|
|
||||||
std::vector<std::string> params_devices;
|
std::vector<std::string> params_devices;
|
||||||
if (params.devices && !striequals(params.devices, "")) {
|
if (params.devices && !striequals(params.devices, "")) {
|
||||||
params_devices = llama_string_split(params.devices, ",");
|
params_devices = string_split(params.devices, ",");
|
||||||
params_devices = extract_ip_from_rpc_device(params_devices);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::map<std::string, int32_t> buffer_names;
|
std::map<std::string, int32_t> buffer_names;
|
||||||
@@ -4075,20 +4067,21 @@ struct llama_model * llama_load_model_from_file(
|
|||||||
gpu_names.push_back(std::string(name));
|
gpu_names.push_back(std::string(name));
|
||||||
}
|
}
|
||||||
if (has_rpc) {
|
if (has_rpc) {
|
||||||
model->rpc_servers = llama_string_split(params.rpc_servers, ",");
|
model->rpc_servers = extract_device_from_rpc_device(string_split(params.rpc_servers, ","));
|
||||||
for (auto rpc : model->rpc_servers) {
|
for (auto rpc : model->rpc_servers) {
|
||||||
buffer_names.insert({ rpc, idx});
|
buffer_names.insert({ create_rpc_name(rpc.endpoint, rpc.device), idx});
|
||||||
idx++;
|
idx++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::vector<std::string> device_names;
|
std::vector<std::string> device_names;
|
||||||
if (params_devices.size()) {
|
if (params_devices.size()) {
|
||||||
device_names = params_devices;
|
device_names = params_devices;
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
// add RPC servers at the front of the list to minimize the network transfers
|
// add RPC servers at the front of the list to minimize the network transfers
|
||||||
if (has_rpc) {
|
if (has_rpc) {
|
||||||
device_names = model->rpc_servers;
|
for (auto& it : model->rpc_servers) {
|
||||||
|
device_names.push_back(create_rpc_name(it.endpoint, it.device));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
device_names.insert(device_names.end(), gpu_names.begin(), gpu_names.end());
|
device_names.insert(device_names.end(), gpu_names.begin(), gpu_names.end());
|
||||||
}
|
}
|
||||||
@@ -4096,8 +4089,7 @@ struct llama_model * llama_load_model_from_file(
|
|||||||
for (auto & device : device_names) {
|
for (auto & device : device_names) {
|
||||||
if (buffer_names.count(device)) {
|
if (buffer_names.count(device)) {
|
||||||
model->devices.push_back(buffer_names[device]);
|
model->devices.push_back(buffer_names[device]);
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
LLAMA_LOG_ERROR("%s backend not available.\n", device.c_str());
|
LLAMA_LOG_ERROR("%s backend not available.\n", device.c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -4451,10 +4443,11 @@ struct llama_context * llama_new_context_with_model(
|
|||||||
|
|
||||||
#if defined(GGML_USE_RPC)
|
#if defined(GGML_USE_RPC)
|
||||||
if (model->n_gpu_layers > 0) {
|
if (model->n_gpu_layers > 0) {
|
||||||
for (const auto & endpoint : model->rpc_servers) {
|
for (const auto & device : model->rpc_servers) {
|
||||||
ggml_backend_t backend = ggml_backend_rpc_init(endpoint.c_str());
|
ggml_backend_t backend = ggml_backend_rpc_init(device.endpoint.c_str(), device.device);
|
||||||
if (backend == nullptr) {
|
if (backend == nullptr) {
|
||||||
LLAMA_LOG_ERROR("%s: failed to initialize RPC to '%s'\n", __func__, endpoint.c_str());
|
LLAMA_LOG_ERROR("%s: failed to initialize RPC%d to '%s'\n", __func__, device.device,
|
||||||
|
device.endpoint.c_str());
|
||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user