1#pragma once
2
3#include "ggml.h"
4#include "ggml-backend.h"
5
6#ifdef __cplusplus
7extern "C" {
8#endif
9
10#ifdef GGML_USE_HIP
11#define GGML_CUDA_NAME "ROCm"
12#define GGML_CUBLAS_NAME "hipBLAS"
13#elif defined(GGML_USE_MUSA)
14#define GGML_CUDA_NAME "MUSA"
15#define GGML_CUBLAS_NAME "muBLAS"
16#else
17#define GGML_CUDA_NAME "CUDA"
18#define GGML_CUBLAS_NAME "cuBLAS"
19#endif
20#define GGML_CUDA_MAX_DEVICES 16
21
22// backend API
23GGML_BACKEND_API ggml_backend_t ggml_backend_cuda_init(int device);
24
25GGML_BACKEND_API bool ggml_backend_is_cuda(ggml_backend_t backend);
26
27// device buffer
28GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device);
29
30// split tensor buffer that splits matrices by rows across multiple devices
31GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(int main_device, const float * tensor_split);
32
33// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
34GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void);
35
36GGML_BACKEND_API int ggml_backend_cuda_get_device_count(void);
37GGML_BACKEND_API void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size);
38GGML_BACKEND_API void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total);
39
40GGML_BACKEND_API bool ggml_backend_cuda_register_host_buffer(void * buffer, size_t size);
41GGML_BACKEND_API void ggml_backend_cuda_unregister_host_buffer(void * buffer);
42
43GGML_BACKEND_API ggml_backend_reg_t ggml_backend_cuda_reg(void);
44
45#ifdef __cplusplus
46}
47#endif
48