mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2026-04-30 16:47:31 +03:00
Compare commits
12 Commits
b7490
...
compilade/
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f14aa8e43 | ||
|
|
0e1ccf15c7 | ||
|
|
5e25ddebff | ||
|
|
fd05c51cec | ||
|
|
b365c3ff01 | ||
|
|
cb64222b0c | ||
|
|
6eb7081860 | ||
|
|
4117ae5557 | ||
|
|
65e96a2464 | ||
|
|
9496bbb808 | ||
|
|
ddcb75dd8a | ||
|
|
52ab19df63 |
@@ -1,4 +1,4 @@
|
||||
# Copilot Instructions for llama.cpp
|
||||
# Instructions for llama.cpp
|
||||
|
||||
## Repository Overview
|
||||
|
||||
@@ -12,6 +12,24 @@ llama.cpp is a large-scale C/C++ project for efficient LLM (Large Language Model
|
||||
- **Backends supported**: CPU (AVX/NEON/RVV optimized), CUDA, Metal, Vulkan, SYCL, ROCm, MUSA
|
||||
- **License**: MIT
|
||||
|
||||
## Disclose AI Usage
|
||||
|
||||
It is crucial to remind contributors that the project mandates disclosure of any AI usage in pull requests. This requirement stems from the potential for AI-generated code to include suboptimal optimizations and hidden bugs, owing to the inherent overconfidence in AI outputs.
|
||||
|
||||
When generating significant portions of code, address this by:
|
||||
- Informing the user that AI-generated content may be rejected by maintainers.
|
||||
- Clearly marking AI-generated code in commit messages and comments.
|
||||
- Example of commit message: `[AI] Fix a race condition in ...`
|
||||
- Example of code comment: `// [AI] spawn a new thread ...`
|
||||
|
||||
These measures apply to:
|
||||
- Changes resulting in large portions of code or complex logic.
|
||||
- Modifications or additions to public APIs in `llama.h`, `ggml.h`, or `mtmd.h`.
|
||||
- Backend-related changes, such as those involving CPU, CUDA, Metal, Vulkan, etc.
|
||||
- Modifications to `tools/server`.
|
||||
|
||||
Note: These measures can be omitted for small fixes or trivial changes.
|
||||
|
||||
## Build Instructions
|
||||
|
||||
### Prerequisites
|
||||
@@ -251,6 +269,7 @@ Primary tools:
|
||||
- **Cross-platform compatibility**: Test on Linux, macOS, Windows when possible
|
||||
- **Performance focus**: This is a performance-critical inference library
|
||||
- **API stability**: Changes to `include/llama.h` require careful consideration
|
||||
- **Disclose AI Usage**: Refer to the "Disclose AI Usage" earlier in this document
|
||||
|
||||
### Git Workflow
|
||||
- Always create feature branches from `master`
|
||||
@@ -85,6 +85,9 @@ add_library(${TARGET} STATIC
|
||||
unicode.h
|
||||
)
|
||||
|
||||
target_include_directories(${TARGET} PUBLIC . ../vendor)
|
||||
target_compile_features (${TARGET} PUBLIC cxx_std_17)
|
||||
|
||||
if (BUILD_SHARED_LIBS)
|
||||
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||
endif()
|
||||
@@ -151,9 +154,7 @@ if (LLAMA_LLGUIDANCE)
|
||||
set(LLAMA_COMMON_EXTRA_LIBS ${LLAMA_COMMON_EXTRA_LIBS} llguidance ${LLGUIDANCE_PLATFORM_LIBS})
|
||||
endif ()
|
||||
|
||||
target_include_directories(${TARGET} PUBLIC . ../vendor)
|
||||
target_compile_features (${TARGET} PUBLIC cxx_std_17)
|
||||
target_link_libraries (${TARGET} PRIVATE ${LLAMA_COMMON_EXTRA_LIBS} PUBLIC llama Threads::Threads)
|
||||
target_link_libraries(${TARGET} PRIVATE ${LLAMA_COMMON_EXTRA_LIBS} PUBLIC llama Threads::Threads)
|
||||
|
||||
|
||||
#
|
||||
|
||||
@@ -2887,6 +2887,16 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
||||
params.lora_init_without_apply = true;
|
||||
}
|
||||
).set_examples({LLAMA_EXAMPLE_SERVER}));
|
||||
add_opt(common_arg(
|
||||
{"--sleep-idle-seconds"}, "SECONDS",
|
||||
string_format("number of seconds of idleness after which the server will sleep (default: %d; -1 = disabled)", params.sleep_idle_seconds),
|
||||
[](common_params & params, int value) {
|
||||
if (value == 0 || value < -1) {
|
||||
throw std::invalid_argument("invalid value: cannot be 0 or less than -1");
|
||||
}
|
||||
params.sleep_idle_seconds = value;
|
||||
}
|
||||
).set_examples({LLAMA_EXAMPLE_SERVER}));
|
||||
add_opt(common_arg(
|
||||
{"--simple-io"},
|
||||
"use basic IO for better compatibility in subprocesses and limited consoles",
|
||||
|
||||
@@ -475,7 +475,8 @@ struct common_params {
|
||||
bool enable_chat_template = true;
|
||||
common_reasoning_format reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK;
|
||||
int reasoning_budget = -1;
|
||||
bool prefill_assistant = true; // if true, any trailing assistant message will be prefilled into the response
|
||||
bool prefill_assistant = true; // if true, any trailing assistant message will be prefilled into the response
|
||||
int sleep_idle_seconds = -1; // if >0, server will sleep after this many seconds of idle time
|
||||
|
||||
std::vector<std::string> api_keys;
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ auto parser = build_chat_peg_native_parser([&](common_chat_peg_native_builder &
|
||||
```
|
||||
|
||||
For a more complete example, see `test_example_native()` in
|
||||
[tests/test-chat-peg-parser.cpp](tests/test-chat-peg-parser.cpp).
|
||||
[tests/test-chat-peg-parser.cpp](/tests/test-chat-peg-parser.cpp).
|
||||
|
||||
## Parsers/Combinators
|
||||
|
||||
@@ -175,7 +175,7 @@ Most model output can be placed in one of the following categories:
|
||||
(Qwen3-Coder, MiniMax M2) or pseudo-function calls (LFM2)
|
||||
|
||||
To provide broad coverage,
|
||||
[`common/chat-peg-parser.h`](common/chat-peg-parser.h) contains builders and
|
||||
[`common/chat-peg-parser.h`](/common/chat-peg-parser.h) contains builders and
|
||||
mappers that help create parsers and visitors/extractors for these types. They
|
||||
require parsers to tag nodes to conform to an AST "shape". This normalization
|
||||
makes it easy to extract information and generalize parsing.
|
||||
|
||||
@@ -3076,8 +3076,11 @@ static bool ggml_cuda_can_fuse(const struct ggml_cgraph * cgraph, int node_idx,
|
||||
ggml_can_fuse_subgraph(cgraph, node_idx, ops, { node_idx + 3, node_idx + 9 })) {
|
||||
ggml_tensor * softmax = cgraph->nodes[node_idx];
|
||||
ggml_tensor * weights = cgraph->nodes[node_idx + 9];
|
||||
ggml_tensor * get_rows = cgraph->nodes[node_idx + 4];
|
||||
ggml_tensor * argsort = cgraph->nodes[node_idx + 2];
|
||||
int n_expert = cgraph->nodes[node_idx]->src[0]->ne[0];
|
||||
|
||||
if (ggml_cuda_should_use_topk_moe(softmax, weights)) {
|
||||
if (ggml_cuda_should_use_topk_moe(softmax, weights, get_rows, argsort, nullptr, n_expert)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -3085,7 +3088,11 @@ static bool ggml_cuda_can_fuse(const struct ggml_cgraph * cgraph, int node_idx,
|
||||
if (is_equal(topk_moe_ops, ops) && ggml_can_fuse_subgraph(cgraph, node_idx, ops, { node_idx + 3, node_idx + 4 })) {
|
||||
ggml_tensor * softmax = cgraph->nodes[node_idx];
|
||||
ggml_tensor * weights = cgraph->nodes[node_idx + 4];
|
||||
if (ggml_cuda_should_use_topk_moe(softmax, weights)) {
|
||||
ggml_tensor * get_rows = cgraph->nodes[node_idx + 4];
|
||||
ggml_tensor * argsort = cgraph->nodes[node_idx + 2];
|
||||
int n_expert = cgraph->nodes[node_idx]->src[0]->ne[0];
|
||||
|
||||
if (ggml_cuda_should_use_topk_moe(softmax, weights, get_rows, argsort, nullptr, n_expert)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -3094,8 +3101,11 @@ static bool ggml_cuda_can_fuse(const struct ggml_cgraph * cgraph, int node_idx,
|
||||
ggml_can_fuse_subgraph(cgraph, node_idx, ops, { node_idx + 1, node_idx + 5 })) {
|
||||
ggml_tensor * softmax = cgraph->nodes[node_idx + 4];
|
||||
ggml_tensor * weights = cgraph->nodes[node_idx + 5];
|
||||
ggml_tensor * get_rows = cgraph->nodes[node_idx + 2];
|
||||
ggml_tensor * argsort = cgraph->nodes[node_idx + 0];
|
||||
int n_expert = cgraph->nodes[node_idx]->src[0]->ne[0];
|
||||
|
||||
if (ggml_cuda_should_use_topk_moe(softmax, weights)) {
|
||||
if (ggml_cuda_should_use_topk_moe(softmax, weights, get_rows, argsort, nullptr, n_expert)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -268,7 +268,23 @@ void ggml_cuda_op_topk_moe(ggml_backend_cuda_context & ctx,
|
||||
}
|
||||
}
|
||||
|
||||
bool ggml_cuda_should_use_topk_moe(const ggml_tensor * softmax, const ggml_tensor * weights, const ggml_tensor * clamp) {
|
||||
bool ggml_cuda_should_use_topk_moe(const ggml_tensor * softmax,
|
||||
const ggml_tensor * weights,
|
||||
const ggml_tensor * get_rows,
|
||||
const ggml_tensor * argsort,
|
||||
const ggml_tensor * clamp,
|
||||
int n_expert) {
|
||||
ggml_tensor * probs = get_rows->src[0];
|
||||
if (probs->op != GGML_OP_RESHAPE) {
|
||||
return false;
|
||||
}
|
||||
probs = probs->src[0];
|
||||
ggml_tensor * selection_probs = argsort->src[0];
|
||||
|
||||
if (probs != selection_probs) {
|
||||
return false;
|
||||
}
|
||||
|
||||
float scale = 1.0f;
|
||||
float max_bias = 0.0f;
|
||||
|
||||
@@ -288,7 +304,6 @@ bool ggml_cuda_should_use_topk_moe(const ggml_tensor * softmax, const ggml_tenso
|
||||
return false;
|
||||
}
|
||||
|
||||
const int n_expert = softmax->ne[0];
|
||||
// n_expert must be a power of 2
|
||||
if ((n_expert & (n_expert - 1)) != 0 || n_expert > 512) {
|
||||
return false;
|
||||
|
||||
@@ -11,6 +11,11 @@ void ggml_cuda_op_topk_moe(ggml_backend_cuda_context & ctx,
|
||||
const bool delayed_softmax = false,
|
||||
ggml_tensor * weight_clamp = nullptr);
|
||||
|
||||
bool ggml_cuda_should_use_topk_moe(const ggml_tensor * softmax, const ggml_tensor * weights, const ggml_tensor * clamp = nullptr);
|
||||
bool ggml_cuda_should_use_topk_moe(const ggml_tensor * softmax,
|
||||
const ggml_tensor * weights,
|
||||
const ggml_tensor * get_rows,
|
||||
const ggml_tensor * argsort,
|
||||
const ggml_tensor * clamp,
|
||||
int n_expert);
|
||||
|
||||
std::initializer_list<enum ggml_op> ggml_cuda_topk_moe_ops(bool with_norm, bool delayed_softmax = false);
|
||||
|
||||
@@ -583,7 +583,7 @@ static rpc_tensor serialize_tensor(const ggml_tensor * tensor) {
|
||||
if (tensor->buffer) {
|
||||
ggml_backend_buffer_t buffer = tensor->buffer;
|
||||
ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context;
|
||||
result.buffer = ctx->remote_ptr;
|
||||
result.buffer = ctx != nullptr ? ctx->remote_ptr : 0;
|
||||
} else {
|
||||
result.buffer = 0;
|
||||
}
|
||||
|
||||
@@ -689,6 +689,7 @@ struct vk_device_struct {
|
||||
vk_pipeline pipeline_gelu_quick[2];
|
||||
vk_pipeline pipeline_silu[2];
|
||||
vk_pipeline pipeline_relu[2];
|
||||
vk_pipeline pipeline_xielu[2];
|
||||
vk_pipeline pipeline_neg[2];
|
||||
vk_pipeline pipeline_tanh[2];
|
||||
vk_pipeline pipeline_sigmoid[2];
|
||||
@@ -990,6 +991,8 @@ struct vk_op_push_constants {
|
||||
uint32_t KY;
|
||||
float param1;
|
||||
float param2;
|
||||
float param3;
|
||||
float param4;
|
||||
};
|
||||
|
||||
struct vk_op_glu_push_constants {
|
||||
@@ -1258,6 +1261,7 @@ struct vk_op_im2col_push_constants {
|
||||
int32_t s0; int32_t s1;
|
||||
int32_t p0; int32_t p1;
|
||||
int32_t d0; int32_t d1;
|
||||
uint32_t batch_IC;
|
||||
};
|
||||
|
||||
struct vk_op_im2col_3d_push_constants {
|
||||
@@ -3973,6 +3977,7 @@ static void ggml_vk_load_shaders(vk_device& device) {
|
||||
CREATE_UNARY(gelu_quick)
|
||||
CREATE_UNARY(silu)
|
||||
CREATE_UNARY(relu)
|
||||
CREATE_UNARY(xielu)
|
||||
CREATE_UNARY(neg)
|
||||
CREATE_UNARY(tanh)
|
||||
CREATE_UNARY(sigmoid)
|
||||
@@ -5898,6 +5903,9 @@ static void ggml_vk_dispatch_pipeline(ggml_backend_vk_context* ctx, vk_context&
|
||||
std::cerr << "(" << buffer.buffer << ", " << buffer.offset << ", " << buffer.range << "), ";
|
||||
}
|
||||
std::cerr << "}, (" << wg0 << "," << wg1 << "," << wg2 << "))");
|
||||
GGML_ASSERT(wg0 <= ctx->device->properties.limits.maxComputeWorkGroupCount[0] &&
|
||||
wg1 <= ctx->device->properties.limits.maxComputeWorkGroupCount[1] &&
|
||||
wg2 <= ctx->device->properties.limits.maxComputeWorkGroupCount[2]);
|
||||
GGML_ASSERT(ctx->descriptor_set_idx < ctx->descriptor_sets.size());
|
||||
GGML_ASSERT(descriptor_buffer_infos.size() <= MAX_PARAMETER_COUNT);
|
||||
GGML_ASSERT(pipeline->parameter_count == descriptor_buffer_infos.size());
|
||||
@@ -8549,6 +8557,8 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
|
||||
return ctx->device->pipeline_gelu_quick[dst->type == GGML_TYPE_F16];
|
||||
case GGML_UNARY_OP_RELU:
|
||||
return ctx->device->pipeline_relu[dst->type == GGML_TYPE_F16];
|
||||
case GGML_UNARY_OP_XIELU:
|
||||
return ctx->device->pipeline_xielu[dst->type == GGML_TYPE_F16];
|
||||
case GGML_UNARY_OP_NEG:
|
||||
return ctx->device->pipeline_neg[dst->type == GGML_TYPE_F16];
|
||||
case GGML_UNARY_OP_TANH:
|
||||
@@ -9084,6 +9094,8 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co
|
||||
const uint32_t batch = src1->ne[is_2D ? 3 : 2];
|
||||
|
||||
elements = { OW * KW * KH, OH, batch * IC };
|
||||
elements[1] = std::min(elements[1], ctx->device->properties.limits.maxComputeWorkGroupCount[1]);
|
||||
elements[2] = std::min(elements[2], ctx->device->properties.limits.maxComputeWorkGroupCount[2]);
|
||||
} break;
|
||||
case GGML_OP_IM2COL_3D:
|
||||
{
|
||||
@@ -9695,14 +9707,14 @@ static void ggml_vk_opt_step_adamw(ggml_backend_vk_context * ctx, vk_context& su
|
||||
|
||||
ggml_vk_op_f32_opt_step_adamw(
|
||||
ctx, subctx, dst,
|
||||
{ (uint32_t)n, 0, 0.0f, 0.0f }
|
||||
{ (uint32_t)n, 0, 0.0f, 0.0f, 0.0f, 0.0f }
|
||||
);
|
||||
}
|
||||
|
||||
static void ggml_vk_opt_step_sgd(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) {
|
||||
const size_t n = ggml_nelements(dst->src[0]);
|
||||
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, src2, nullptr, dst, GGML_OP_OPT_STEP_SGD, { (uint32_t)n, 0, 0.0f, 0.0f });
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, src2, nullptr, dst, GGML_OP_OPT_STEP_SGD, { (uint32_t)n, 0, 0.0f, 0.0f, 0.0f, 0.0f });
|
||||
}
|
||||
|
||||
static void ggml_vk_concat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
||||
@@ -9788,6 +9800,7 @@ static void ggml_vk_arange(ggml_backend_vk_context * ctx, vk_context& subctx, gg
|
||||
1,
|
||||
ggml_get_op_params_f32(dst, 0),
|
||||
ggml_get_op_params_f32(dst, 2),
|
||||
0.0f, 0.0f,
|
||||
};
|
||||
|
||||
vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, nullptr, nullptr, nullptr, dst, GGML_OP_ARANGE);
|
||||
@@ -9809,6 +9822,7 @@ static void ggml_vk_fill(ggml_backend_vk_context * ctx, vk_context& subctx, ggml
|
||||
1,
|
||||
ggml_get_op_params_f32(dst, 0),
|
||||
0.0f,
|
||||
0.0f, 0.0f,
|
||||
};
|
||||
|
||||
vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, nullptr, nullptr, nullptr, dst, GGML_OP_FILL);
|
||||
@@ -9924,13 +9938,13 @@ static void ggml_vk_set_rows(ggml_backend_vk_context * ctx, vk_context& subctx,
|
||||
}
|
||||
|
||||
static void ggml_vk_silu_back(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_SILU_BACK, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f });
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_SILU_BACK, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f, 0.0f, 0.0f });
|
||||
}
|
||||
|
||||
static void ggml_vk_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
|
||||
float * op_params = (float *)dst->op_params;
|
||||
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f });
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f, 0.0f, 0.0f });
|
||||
}
|
||||
|
||||
static void ggml_vk_group_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
|
||||
@@ -9941,7 +9955,7 @@ static void ggml_vk_group_norm(ggml_backend_vk_context * ctx, vk_context& subctx
|
||||
const float eps = float_op_params[1];
|
||||
const uint32_t group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + num_groups - 1) / num_groups);
|
||||
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_GROUP_NORM, { group_size, 0, eps, 0.0f });
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_GROUP_NORM, { group_size, 0, eps, 0.0f, 0.0f, 0.0f });
|
||||
}
|
||||
|
||||
static uint32_t ggml_vk_rms_num_partials(ggml_backend_vk_context * ctx, const ggml_tensor *node) {
|
||||
@@ -10110,16 +10124,26 @@ static void ggml_vk_rms_norm(ggml_backend_vk_context * ctx, vk_context& subctx,
|
||||
|
||||
static void ggml_vk_rms_norm_back(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
||||
float * op_params = (float *)dst->op_params;
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_RMS_NORM_BACK, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f });
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_RMS_NORM_BACK, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f, 0.0f, 0.0f });
|
||||
}
|
||||
|
||||
static void ggml_vk_l2_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
|
||||
float * op_params = (float *)dst->op_params;
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_L2_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f });
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_L2_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f, 0.0f, 0.0f });
|
||||
}
|
||||
|
||||
static void ggml_vk_unary(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_UNARY, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f });
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_UNARY, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f, 0.0f, 0.0f });
|
||||
}
|
||||
|
||||
static void ggml_vk_xielu(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
|
||||
float * op_params = (float *)dst->op_params;
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_UNARY,
|
||||
{
|
||||
(uint32_t)ggml_nelements(src0), 0,
|
||||
op_params[1], op_params[2], op_params[3], op_params[4]
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
static void ggml_vk_glu(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
||||
@@ -10244,7 +10268,7 @@ static void ggml_vk_soft_max(ggml_backend_vk_context * ctx, vk_context& subctx,
|
||||
|
||||
static void ggml_vk_soft_max_back(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
||||
float * op_params = (float *)dst->op_params;
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_SOFT_MAX_BACK, { (uint32_t)src0->ne[0], (uint32_t)ggml_nrows(src0), op_params[0], op_params[1] });
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_SOFT_MAX_BACK, { (uint32_t)src0->ne[0], (uint32_t)ggml_nrows(src0), op_params[0], op_params[1], 0.0f, 0.0f });
|
||||
}
|
||||
|
||||
static void ggml_vk_topk_moe(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_cgraph * cgraph, int node_idx) {
|
||||
@@ -10541,11 +10565,11 @@ static void ggml_vk_cumsum(ggml_backend_vk_context * ctx, vk_context& subctx, co
|
||||
}
|
||||
|
||||
static void ggml_vk_argmax(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_ARGMAX, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], 0.0f, 0.0f });
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_ARGMAX, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], 0.0f, 0.0f, 0.0f, 0.0f });
|
||||
}
|
||||
|
||||
static void ggml_vk_count_equal(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_COUNT_EQUAL, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f });
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_COUNT_EQUAL, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f, 0.0f, 0.0f });
|
||||
}
|
||||
|
||||
static void ggml_vk_solve_tri(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
||||
@@ -10587,6 +10611,7 @@ static void ggml_vk_im2col(ggml_backend_vk_context * ctx, vk_context& subctx, co
|
||||
const uint32_t batch_offset = src1->nb[is_2D ? 3 : 2] / 4; // nb is byte offset, src is type float32
|
||||
|
||||
const uint32_t pelements = OW * KW * KH;
|
||||
const uint32_t batch = src1->ne[is_2D ? 3 : 2];
|
||||
|
||||
const ggml_backend_vk_buffer_context * d_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
|
||||
const vk_buffer d_buf = d_buf_ctx->dev_buffer;
|
||||
@@ -10599,7 +10624,7 @@ static void ggml_vk_im2col(ggml_backend_vk_context * ctx, vk_context& subctx, co
|
||||
IC, IW, IH, OW, OH, KW, KH,
|
||||
pelements,
|
||||
IC * KH * KW,
|
||||
s0, s1, p0, p1, d0, d1,
|
||||
s0, s1, p0, p1, d0, d1, batch * IC
|
||||
});
|
||||
}
|
||||
|
||||
@@ -10804,7 +10829,7 @@ static void ggml_vk_conv_2d_dw(ggml_backend_vk_context * ctx, vk_context& subctx
|
||||
|
||||
static void ggml_vk_leaky_relu(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
|
||||
const float * op_params = (const float *)dst->op_params;
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_LEAKY_RELU, { (uint32_t)ggml_nelements(src0), 0, op_params[0], 0.0f });
|
||||
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_LEAKY_RELU, { (uint32_t)ggml_nelements(src0), 0, op_params[0], 0.0f, 0.0f, 0.0f });
|
||||
}
|
||||
|
||||
#ifdef GGML_VULKAN_RUN_TESTS
|
||||
@@ -12050,6 +12075,9 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
|
||||
case GGML_UNARY_OP_TRUNC:
|
||||
ggml_vk_unary(ctx, compute_ctx, src0, node);
|
||||
break;
|
||||
case GGML_UNARY_OP_XIELU:
|
||||
ggml_vk_xielu(ctx, compute_ctx, src0, node);
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
@@ -12920,24 +12948,43 @@ static bool ggml_vk_can_fuse_topk_moe(ggml_backend_vk_context * ctx, const struc
|
||||
|
||||
const ggml_tensor * softmax;
|
||||
const ggml_tensor * weights;
|
||||
const ggml_tensor * get_rows;
|
||||
const ggml_tensor * argsort;
|
||||
|
||||
switch (mode) {
|
||||
case TOPK_MOE_EARLY_SOFTMAX_NORM:
|
||||
softmax = cgraph->nodes[node_idx + 0];
|
||||
weights = cgraph->nodes[node_idx + 9];
|
||||
get_rows = cgraph->nodes[node_idx + 4];
|
||||
argsort = cgraph->nodes[node_idx + 2];
|
||||
break;
|
||||
case TOPK_MOE_EARLY_SOFTMAX:
|
||||
softmax = cgraph->nodes[node_idx + 0];
|
||||
weights = cgraph->nodes[node_idx + 4];
|
||||
get_rows = cgraph->nodes[node_idx + 4];
|
||||
argsort = cgraph->nodes[node_idx + 2];
|
||||
break;
|
||||
case TOPK_MOE_LATE_SOFTMAX:
|
||||
softmax = cgraph->nodes[node_idx + 4];
|
||||
weights = cgraph->nodes[node_idx + 5];
|
||||
get_rows = cgraph->nodes[node_idx + 2];
|
||||
argsort = cgraph->nodes[node_idx + 0];
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
ggml_tensor * probs = get_rows->src[0];
|
||||
if (probs->op != GGML_OP_RESHAPE) {
|
||||
return false;
|
||||
}
|
||||
probs = probs->src[0];
|
||||
ggml_tensor * selection_probs = argsort->src[0];
|
||||
|
||||
if (probs != selection_probs) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const float * op_params = (const float *)softmax->op_params;
|
||||
|
||||
float scale = op_params[0];
|
||||
@@ -13502,7 +13549,8 @@ static void ggml_vk_graph_optimize(ggml_backend_t backend, struct ggml_cgraph *
|
||||
!(j == c+1 && c == current_set.back() && graph->nodes[c]->op == GGML_OP_RMS_NORM && graph->nodes[j]->op == GGML_OP_MUL) &&
|
||||
!(j == c+1 && c == current_set.back() && graph->nodes[c]->op == GGML_OP_MUL_MAT && graph->nodes[j]->op == GGML_OP_ADD) &&
|
||||
!(j == c+1 && c == current_set.back() && graph->nodes[c]->op == GGML_OP_MUL_MAT_ID && graph->nodes[j]->op == GGML_OP_ADD_ID) &&
|
||||
!(j == c+1 && c == current_set.back() && graph->nodes[c]->op == GGML_OP_MUL_MAT_ID && graph->nodes[j]->op == GGML_OP_MUL)) {
|
||||
!(j == c+1 && c == current_set.back() && graph->nodes[c]->op == GGML_OP_MUL_MAT_ID && graph->nodes[j]->op == GGML_OP_MUL) &&
|
||||
!(j == c+1 && c == current_set.back() && graph->nodes[c]->op == GGML_OP_ADD && graph->nodes[j]->op == GGML_OP_ADD)) {
|
||||
ok = false;
|
||||
break;
|
||||
}
|
||||
@@ -13842,6 +13890,7 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
|
||||
case GGML_UNARY_OP_GELU_QUICK:
|
||||
case GGML_UNARY_OP_SILU:
|
||||
case GGML_UNARY_OP_RELU:
|
||||
case GGML_UNARY_OP_XIELU:
|
||||
case GGML_UNARY_OP_NEG:
|
||||
case GGML_UNARY_OP_TANH:
|
||||
case GGML_UNARY_OP_SIGMOID:
|
||||
@@ -14747,7 +14796,7 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph *
|
||||
} else if (tensor->op == GGML_OP_LOG) {
|
||||
tensor_clone = ggml_log(ggml_ctx, src_clone[0]);
|
||||
} else if (tensor->op == GGML_OP_TRI) {
|
||||
tensor_clone = ggml_tri(ggml_ctx, src_clone[0], ggml_get_op_params_i32(tensor, 0));
|
||||
tensor_clone = ggml_tri(ggml_ctx, src_clone[0], (ggml_tri_type)ggml_get_op_params_i32(tensor, 0));
|
||||
} else if (tensor->op == GGML_OP_DIAG) {
|
||||
tensor_clone = ggml_diag(ggml_ctx, src_clone[0]);
|
||||
} else if (tensor->op == GGML_OP_CLAMP) {
|
||||
@@ -14835,6 +14884,13 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph *
|
||||
case GGML_UNARY_OP_RELU:
|
||||
tensor_clone = ggml_relu(ggml_ctx, src_clone[0]);
|
||||
break;
|
||||
case GGML_UNARY_OP_XIELU:
|
||||
tensor_clone = ggml_xielu(ggml_ctx, src_clone[0], 0, 0, 0, 0);
|
||||
ggml_set_op_params_f32(tensor_clone, 1, ggml_get_op_params_f32(tensor, 1));
|
||||
ggml_set_op_params_f32(tensor_clone, 2, ggml_get_op_params_f32(tensor, 2));
|
||||
ggml_set_op_params_f32(tensor_clone, 3, ggml_get_op_params_f32(tensor, 3));
|
||||
ggml_set_op_params_f32(tensor_clone, 4, ggml_get_op_params_f32(tensor, 4));
|
||||
break;
|
||||
case GGML_UNARY_OP_NEG:
|
||||
tensor_clone = ggml_neg(ggml_ctx, src_clone[0]);
|
||||
break;
|
||||
|
||||
@@ -6,4 +6,6 @@ layout (push_constant) uniform parameter
|
||||
uint KY;
|
||||
float param1;
|
||||
float param2;
|
||||
float param3;
|
||||
float param4;
|
||||
} p;
|
||||
|
||||
@@ -19,6 +19,7 @@ layout (push_constant) uniform parameter
|
||||
int s0; int s1;
|
||||
int p0; int p1;
|
||||
int d0; int d1;
|
||||
uint batch_IC;
|
||||
} p;
|
||||
|
||||
layout(constant_id = 0) const uint BLOCK_SIZE = 32;
|
||||
@@ -34,12 +35,12 @@ layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
|
||||
layout (buffer_reference) buffer D_ptr {D_TYPE d;};
|
||||
#endif
|
||||
|
||||
void main() {
|
||||
void im2col(const uint y, const uint z) {
|
||||
const uint gidx = gl_GlobalInvocationID.x;
|
||||
|
||||
const uint oh = gl_GlobalInvocationID.y;
|
||||
const uint batch = gl_GlobalInvocationID.z / p.IC;
|
||||
const uint ic = gl_GlobalInvocationID.z % p.IC;
|
||||
const uint oh = y;
|
||||
const uint batch = z / p.IC;
|
||||
const uint ic = z % p.IC;
|
||||
|
||||
const uint src_base = ic * p.offset_delta + batch * p.batch_offset;
|
||||
const BDA_OFFSET_T dst_base = ((BDA_OFFSET_T(batch) * p.OH + oh) * p.OW) * p.CHW + BDA_OFFSET_T(ic) * (p.KW * p.KH);
|
||||
@@ -101,3 +102,15 @@ void main() {
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
void main() {
|
||||
uint y = gl_GlobalInvocationID.y;
|
||||
while (y < p.OH) {
|
||||
uint z = gl_GlobalInvocationID.z;
|
||||
while (z < p.batch_IC) {
|
||||
im2col(y, z);
|
||||
z += gl_NumWorkGroups.z;
|
||||
}
|
||||
y += gl_NumWorkGroups.y;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,36 +11,54 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint itid,
|
||||
const uint y_idx = i * QUANT_K + 16 * itid;
|
||||
const uint nibble_shift = 4 * (itid & 1);
|
||||
const uint ib32 = itid / 2; // 0..7
|
||||
|
||||
uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i;
|
||||
// Precompute db multiplication factors
|
||||
float db_vals[NUM_ROWS];
|
||||
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
|
||||
const float d = float(data_a[ibi].d);
|
||||
const uint scale = (data_a[ibi].scales[ib32] >> nibble_shift) & 0xF;
|
||||
const float db = d * (0.5 + scale) * 0.25;
|
||||
|
||||
const uint scale_raw = data_a[ibi].scales[ib32];
|
||||
const uint scale = (scale_raw >> nibble_shift) & 0xF;
|
||||
// Merge constant calculations d * (0.5 + scale) * 0.25 = d*0.125 + d*scale*0.25
|
||||
db_vals[n] = d * (0.125f + float(scale) * 0.25f);
|
||||
ibi += num_blocks_per_row;
|
||||
}
|
||||
ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i;
|
||||
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
|
||||
// Preload grid and sign data for all l values
|
||||
vec4 grid0_vals[2], grid1_vals[2];
|
||||
uint sign_vals[2], sign7_vals[2];
|
||||
[[unroll]] for (uint l = 0; l < 2; ++l) {
|
||||
const uint qs = data_a[ibi].qs[2 * itid + l];
|
||||
const uint sign = qs >> 9;
|
||||
const uint sign7 = bitCount(sign);
|
||||
const vec4 grid0 = vec4(unpack8(iq2xs_grid[qs & 511].x));
|
||||
const vec4 grid1 = vec4(unpack8(iq2xs_grid[qs & 511].y));
|
||||
|
||||
[[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
|
||||
vec4 b0 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 0]);
|
||||
vec4 b4 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 1]);
|
||||
|
||||
FLOAT_TYPE sum =
|
||||
fma(FLOAT_TYPE(b0.x), FLOAT_TYPE((sign & 1) != 0 ? -grid0.x : grid0.x),
|
||||
fma(FLOAT_TYPE(b0.y), FLOAT_TYPE((sign & 2) != 0 ? -grid0.y : grid0.y),
|
||||
fma(FLOAT_TYPE(b0.z), FLOAT_TYPE((sign & 4) != 0 ? -grid0.z : grid0.z),
|
||||
fma(FLOAT_TYPE(b0.w), FLOAT_TYPE((sign & 8) != 0 ? -grid0.w : grid0.w),
|
||||
fma(FLOAT_TYPE(b4.x), FLOAT_TYPE((sign & 16) != 0 ? -grid1.x : grid1.x),
|
||||
fma(FLOAT_TYPE(b4.y), FLOAT_TYPE((sign & 32) != 0 ? -grid1.y : grid1.y),
|
||||
fma(FLOAT_TYPE(b4.z), FLOAT_TYPE((sign & 64) != 0 ? -grid1.z : grid1.z),
|
||||
fma(FLOAT_TYPE(b4.w), FLOAT_TYPE((sign7 & 1) != 0 ? -grid1.w : grid1.w),
|
||||
FLOAT_TYPE(0.0)))))))));
|
||||
temp[j][n] = fma(db, sum, temp[j][n]);
|
||||
sign_vals[l] = qs >> 9;
|
||||
sign7_vals[l] = bitCount(sign_vals[l]);
|
||||
const uvec2 grid_data = iq2xs_grid[qs & 511];
|
||||
grid0_vals[l] = vec4(unpack8(grid_data.x));
|
||||
grid1_vals[l] = vec4(unpack8(grid_data.y));
|
||||
}
|
||||
// Preload B data for all j columns (reduce repeated index calculations)
|
||||
[[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
|
||||
FLOAT_TYPE sum = FLOAT_TYPE(0.0);
|
||||
[[unroll]] for (uint l = 0; l < 2; ++l) {
|
||||
const uint sign = sign_vals[l];
|
||||
const uint sign7 = sign7_vals[l];
|
||||
const vec4 grid0 = grid0_vals[l];
|
||||
const vec4 grid1 = grid1_vals[l];
|
||||
// Precompute indices
|
||||
const uint b_idx = (j * p.batch_stride_b + b_offset + y_idx) / 4 + 2 * l;
|
||||
const vec4 b0 = vec4(data_b_v4[b_idx + 0]);
|
||||
const vec4 b4 = vec4(data_b_v4[b_idx + 1]);
|
||||
sum +=
|
||||
fma(FLOAT_TYPE(b0.x), FLOAT_TYPE((sign & 1) != 0 ? -grid0.x : grid0.x),
|
||||
fma(FLOAT_TYPE(b0.y), FLOAT_TYPE((sign & 2) != 0 ? -grid0.y : grid0.y),
|
||||
fma(FLOAT_TYPE(b0.z), FLOAT_TYPE((sign & 4) != 0 ? -grid0.z : grid0.z),
|
||||
fma(FLOAT_TYPE(b0.w), FLOAT_TYPE((sign & 8) != 0 ? -grid0.w : grid0.w),
|
||||
fma(FLOAT_TYPE(b4.x), FLOAT_TYPE((sign & 16) != 0 ? -grid1.x : grid1.x),
|
||||
fma(FLOAT_TYPE(b4.y), FLOAT_TYPE((sign & 32) != 0 ? -grid1.y : grid1.y),
|
||||
fma(FLOAT_TYPE(b4.z), FLOAT_TYPE((sign & 64) != 0 ? -grid1.z : grid1.z),
|
||||
fma(FLOAT_TYPE(b4.w), FLOAT_TYPE((sign7 & 1) != 0 ? -grid1.w : grid1.w),
|
||||
FLOAT_TYPE(0.0)))))))));
|
||||
}
|
||||
temp[j][n] = fma(FLOAT_TYPE(db_vals[n]), sum, temp[j][n]);
|
||||
}
|
||||
ibi += num_blocks_per_row;
|
||||
}
|
||||
|
||||
@@ -853,6 +853,8 @@ void process_shaders() {
|
||||
string_to_spv("hardswish_f32", "hardswish.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||
string_to_spv("abs_f16", "abs.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
|
||||
string_to_spv("abs_f32", "abs.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||
string_to_spv("xielu_f16", "xielu.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
|
||||
string_to_spv("xielu_f32", "xielu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||
|
||||
string_to_spv("tri_f16", "tri.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
|
||||
string_to_spv("tri_f32", "tri.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||
|
||||
35
ggml/src/ggml-vulkan/vulkan-shaders/xielu.comp
Normal file
35
ggml/src/ggml-vulkan/vulkan-shaders/xielu.comp
Normal file
@@ -0,0 +1,35 @@
|
||||
#version 450
|
||||
|
||||
#include "generic_head.glsl"
|
||||
#include "types.glsl"
|
||||
|
||||
#extension GL_EXT_control_flow_attributes : enable
|
||||
|
||||
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
|
||||
layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
|
||||
|
||||
void main() {
|
||||
const uint i = gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
|
||||
|
||||
if (i >= p.KX) {
|
||||
return;
|
||||
}
|
||||
|
||||
float x = float(data_a[i]);
|
||||
|
||||
float alpha_n = p.param1;
|
||||
float alpha_p = p.param2;
|
||||
float beta = p.param3;
|
||||
float eps = p.param4;
|
||||
|
||||
if (x > 0.0f) {
|
||||
x = alpha_p * x * x + beta * x;
|
||||
} else {
|
||||
const float min_x_eps = min(x, eps);
|
||||
x = (exp(min_x_eps) - 1 - x) * alpha_n + beta * x;
|
||||
}
|
||||
|
||||
data_d[i] = D_TYPE(x);
|
||||
}
|
||||
@@ -110,7 +110,6 @@ class SafetensorRemote:
|
||||
"""
|
||||
|
||||
BASE_DOMAIN = "https://huggingface.co"
|
||||
ALIGNMENT = 8 # bytes
|
||||
|
||||
@classmethod
|
||||
def get_list_tensors_hf_model(cls, model_id: str) -> dict[str, RemoteTensor]:
|
||||
@@ -204,9 +203,6 @@ class SafetensorRemote:
|
||||
|
||||
# Calculate the data start offset
|
||||
data_start_offset = 8 + metadata_length
|
||||
alignment = SafetensorRemote.ALIGNMENT
|
||||
if data_start_offset % alignment != 0:
|
||||
data_start_offset += alignment - (data_start_offset % alignment)
|
||||
|
||||
# Check if we have enough data to read the metadata
|
||||
if len(raw_data) < 8 + metadata_length:
|
||||
@@ -298,7 +294,6 @@ class SafetensorsLocal:
|
||||
Custom parsing gives a bit more control over the memory usage.
|
||||
The official safetensors library doesn't expose file ranges.
|
||||
"""
|
||||
ALIGNMENT = 8 # bytes
|
||||
|
||||
tensors: dict[str, LocalTensor]
|
||||
|
||||
@@ -316,9 +311,6 @@ class SafetensorsLocal:
|
||||
raise ValueError(f"Failed to parse safetensors metadata as JSON: {e}")
|
||||
|
||||
data_start_offset = f.tell()
|
||||
alignment = self.ALIGNMENT
|
||||
if data_start_offset % alignment != 0:
|
||||
data_start_offset += alignment - (data_start_offset % alignment)
|
||||
|
||||
tensors: dict[str, LocalTensor] = {}
|
||||
for name, meta in metadata.items():
|
||||
|
||||
@@ -5118,25 +5118,36 @@ struct test_top_k : public test_case {
|
||||
}
|
||||
};
|
||||
|
||||
enum MoeGatingFunc {
|
||||
GATING_FUNC_SOFTMAX,
|
||||
GATING_FUNC_SIGMOID,
|
||||
GATING_FUNC_SOFTMAX_WEIGHT,
|
||||
};
|
||||
|
||||
struct test_topk_moe : public test_case {
|
||||
const std::array<int64_t, 4> ne;
|
||||
const int n_expert_used;
|
||||
const bool with_norm;
|
||||
const bool delayed_softmax;
|
||||
const bool bias_probs;
|
||||
const MoeGatingFunc gating_func;
|
||||
const float scale_w;
|
||||
|
||||
test_topk_moe(std::array<int64_t, 4> ne = { 10, 5, 1, 1 },
|
||||
int n_expert_used = 1,
|
||||
bool with_norm = false,
|
||||
bool delayed_softmax = false) :
|
||||
bool bias_probs = false,
|
||||
MoeGatingFunc gating_func = GATING_FUNC_SOFTMAX,
|
||||
float scale_w = 0.0f) :
|
||||
ne(ne),
|
||||
n_expert_used(n_expert_used),
|
||||
with_norm(with_norm),
|
||||
delayed_softmax(delayed_softmax) {
|
||||
bias_probs(bias_probs),
|
||||
gating_func(gating_func),
|
||||
scale_w(scale_w) {
|
||||
GGML_ASSERT(n_expert_used <= ne[0]);
|
||||
GGML_ASSERT(!(with_norm && delayed_softmax));
|
||||
}
|
||||
|
||||
std::string vars() override { return VARS_TO_STR4(ne, n_expert_used, with_norm, delayed_softmax); }
|
||||
std::string vars() override { return VARS_TO_STR6(ne, n_expert_used, with_norm, bias_probs, gating_func, scale_w); }
|
||||
|
||||
std::string op_desc(ggml_tensor * t) override {
|
||||
GGML_UNUSED(t);
|
||||
@@ -5150,28 +5161,47 @@ struct test_topk_moe : public test_case {
|
||||
const int n_tokens = ne[1];
|
||||
|
||||
ggml_tensor * logits = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne.data());
|
||||
ggml_tensor * probs = delayed_softmax ? logits : ggml_soft_max(ctx, logits);
|
||||
ggml_tensor * selected_experts = ggml_argsort_top_k(ctx, probs, n_expert_used); // [n_expert_used, n_tokens]
|
||||
ggml_tensor * probs =
|
||||
(gating_func == GATING_FUNC_SOFTMAX) ? ggml_soft_max(ctx, logits) :
|
||||
(gating_func == GATING_FUNC_SIGMOID) ? ggml_sigmoid(ctx, logits) : logits;
|
||||
ggml_set_name(probs, "probs");
|
||||
|
||||
ggml_tensor * out = ggml_get_rows(ctx, ggml_reshape_3d(ctx, probs, 1, n_expert, n_tokens), selected_experts); // [1, n_expert_used, n_tokens]
|
||||
ggml_tensor * selection_probs = probs;
|
||||
if (bias_probs) {
|
||||
ggml_tensor * exp_probs_b = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne.data());
|
||||
ggml_set_name(exp_probs_b, "exp_probs_b");
|
||||
selection_probs = ggml_add(ctx, probs, exp_probs_b);
|
||||
ggml_set_name(selection_probs, "selection_probs");
|
||||
}
|
||||
|
||||
if (delayed_softmax) {
|
||||
out = ggml_reshape_2d(ctx, out, n_expert_used, n_tokens);
|
||||
out = ggml_soft_max(ctx, out); // [n_expert_used, n_tokens]
|
||||
out = ggml_reshape_3d(ctx, out, 1, n_expert_used, n_tokens);
|
||||
ggml_tensor * selected_experts = ggml_argsort_top_k(ctx, selection_probs, n_expert_used); // [n_expert_used, n_tokens]
|
||||
ggml_set_name(selected_experts, "selected_experts");
|
||||
|
||||
ggml_tensor * weights = ggml_get_rows(ctx, ggml_reshape_3d(ctx, probs, 1, n_expert, n_tokens), selected_experts); // [1, n_expert_used, n_tokens]
|
||||
ggml_set_name(weights, "weights");
|
||||
|
||||
if (gating_func == GATING_FUNC_SOFTMAX_WEIGHT) {
|
||||
weights = ggml_reshape_2d(ctx, weights, n_expert_used, n_tokens);
|
||||
weights = ggml_soft_max(ctx, weights); // [n_expert_used, n_tokens]
|
||||
weights = ggml_reshape_3d(ctx, weights, 1, n_expert_used, n_tokens);
|
||||
}
|
||||
|
||||
if (with_norm) {
|
||||
out = ggml_reshape_2d(ctx, out, n_expert_used, n_tokens);
|
||||
ggml_tensor * weights_sum = ggml_sum_rows(ctx, out); // [1, n_tokens]
|
||||
weights = ggml_reshape_2d(ctx, weights, n_expert_used, n_tokens);
|
||||
ggml_tensor * weights_sum = ggml_sum_rows(ctx, weights); // [1, n_tokens]
|
||||
ggml_set_name(weights_sum, "weights_sum");
|
||||
|
||||
weights_sum = ggml_clamp(ctx, weights_sum, 6.103515625e-5, INFINITY);
|
||||
out = ggml_div(ctx, out, weights_sum); // [n_expert_used, n_tokens]
|
||||
out = ggml_reshape_3d(ctx, out, 1, n_expert_used, n_tokens);
|
||||
weights = ggml_div(ctx, weights, weights_sum); // [n_expert_used, n_tokens]
|
||||
weights = ggml_reshape_3d(ctx, weights, 1, n_expert_used, n_tokens);
|
||||
}
|
||||
|
||||
ggml_set_name(out, "out");
|
||||
return out;
|
||||
if (scale_w) {
|
||||
weights = ggml_scale(ctx, weights, scale_w);
|
||||
}
|
||||
|
||||
ggml_set_name(weights, "weights");
|
||||
return weights;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -5344,6 +5374,13 @@ struct test_sum : public test_case {
|
||||
float grad_eps() override {
|
||||
return 0.1f * sqrtf(ne[0]*ne[1]*ne[2]*ne[3]);
|
||||
}
|
||||
|
||||
// Don't center the distribution around zero. Helps to avoid catastrophic cancellation.
|
||||
void initialize_tensors(ggml_context * ctx) override {
|
||||
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
|
||||
init_tensor_uniform(t, -0.9f, 1.1f);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// GGML_OP_SUM_ROWS
|
||||
@@ -5410,6 +5447,13 @@ struct test_mean : public test_case {
|
||||
float grad_eps() override {
|
||||
return 0.1f * ne[0]*ne[1]*ne[2]*ne[3];
|
||||
}
|
||||
|
||||
// Don't center the distribution around zero. Helps to avoid catastrophic cancellation.
|
||||
void initialize_tensors(ggml_context * ctx) override {
|
||||
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
|
||||
init_tensor_uniform(t, -0.9f, 1.1f);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// GGML_OP_UPSCALE
|
||||
@@ -6886,6 +6930,7 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
|
||||
test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 1, 2560}, {3, 3, 1, 2560}, 1, 1, 1, 1, 1, 1, true));
|
||||
test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 2, 2560}, {3, 3, 2, 2560}, 1, 1, 1, 1, 1, 1, true));
|
||||
test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {5, 5, 1, 32}, {3, 4, 1, 32}, 1, 1, 0, 0, 1, 1, true));
|
||||
test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32, {2, 2, 1536, 729}, {2, 2, 1536, 4096}, 1, 1, 0, 0, 1, 1, true));
|
||||
|
||||
// im2col 3D
|
||||
test_cases.emplace_back(new test_im2col_3d(GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32));
|
||||
@@ -7977,19 +8022,22 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
|
||||
}
|
||||
}
|
||||
|
||||
for (bool with_norm : {false, true}) {
|
||||
test_cases.emplace_back(new test_topk_moe({8, 22, 1, 1}, 4, with_norm));
|
||||
test_cases.emplace_back(new test_topk_moe({31, 22, 1, 1}, 8, with_norm));
|
||||
test_cases.emplace_back(new test_topk_moe({32, 22, 1, 1}, 8, with_norm));
|
||||
test_cases.emplace_back(new test_topk_moe({40, 22, 1, 1}, 8, with_norm));
|
||||
test_cases.emplace_back(new test_topk_moe({71, 22, 1, 1}, 8, with_norm));
|
||||
test_cases.emplace_back(new test_topk_moe({128, 1, 1, 1}, 128, with_norm));
|
||||
test_cases.emplace_back(new test_topk_moe({129, 1, 1, 1}, 128, with_norm));
|
||||
for (auto gate : {GATING_FUNC_SOFTMAX, GATING_FUNC_SIGMOID, GATING_FUNC_SOFTMAX_WEIGHT}) {
|
||||
for (bool with_norm : {false, true}) {
|
||||
for (bool bias_probs : {false, true}) {
|
||||
for (float scale_w : {0.0f, 2.0f}) {
|
||||
test_cases.emplace_back(new test_topk_moe({8, 22, 1, 1}, 4, with_norm, bias_probs, gate, scale_w));
|
||||
test_cases.emplace_back(new test_topk_moe({31, 22, 1, 1}, 8, with_norm, bias_probs, gate, scale_w));
|
||||
test_cases.emplace_back(new test_topk_moe({32, 22, 1, 1}, 8, with_norm, bias_probs, gate, scale_w));
|
||||
test_cases.emplace_back(new test_topk_moe({40, 22, 1, 1}, 8, with_norm, bias_probs, gate, scale_w));
|
||||
test_cases.emplace_back(new test_topk_moe({71, 22, 1, 1}, 8, with_norm, bias_probs, gate, scale_w));
|
||||
test_cases.emplace_back(new test_topk_moe({128, 1, 1, 1}, 128, with_norm, bias_probs, gate, scale_w));
|
||||
test_cases.emplace_back(new test_topk_moe({129, 1, 1, 1}, 128, with_norm, bias_probs, gate, scale_w));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test_cases.emplace_back(new test_topk_moe({ 8, 22, 1, 1 }, 4, /*with_norm*/ false, /*delayed_softmax*/ true));
|
||||
test_cases.emplace_back(new test_topk_moe({ 32, 22, 1, 1 }, 8, /*with_norm*/ false, /*delayed_softmax*/ true));
|
||||
|
||||
#if 0
|
||||
// these tests are disabled to save execution time, sbut they can be handy for debugging
|
||||
test_cases.emplace_back(new test_llama(2, true));
|
||||
|
||||
@@ -209,8 +209,6 @@ int main(int argc, char ** argv) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
ctx_cli.ctx_server.init();
|
||||
|
||||
console::spinner::stop();
|
||||
console::log("\n");
|
||||
|
||||
|
||||
@@ -107,6 +107,8 @@ For detailed instructions, see the [test documentation](./tests/README.md).
|
||||
- Large-scale code base split into smaller files: https://github.com/ggml-org/llama.cpp/pull/17362
|
||||
- Introduction of router mode: https://github.com/ggml-org/llama.cpp/pull/17470
|
||||
- Speculative decoding: https://github.com/ggml-org/llama.cpp/pull/17808 and rework in https://github.com/ggml-org/llama.cpp/pull/17808
|
||||
- INI presets: https://github.com/ggml-org/llama.cpp/pull/17859 (+ refactoring: https://github.com/ggml-org/llama.cpp/pull/18169)
|
||||
- Sleeping mode: https://github.com/ggml-org/llama.cpp/pull/18228
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1621,6 +1621,16 @@ Example of an error:
|
||||
}
|
||||
```
|
||||
|
||||
## Sleeping on Idle
|
||||
|
||||
The server supports an automatic sleep mode that activates after a specified period of inactivity (no incoming tasks). This feature, introduced in [PR #18228](https://github.com/ggml-org/llama.cpp/pull/18228), can be enabled using the `--sleep-idle-seconds` command-line argument. It works seamlessly in both single-model and multi-model configurations.
|
||||
|
||||
When the server enters sleep mode, the model and its associated memory (including the KV cache) are unloaded from RAM to conserve resources. Any new incoming task will automatically trigger the model to reload.
|
||||
|
||||
Note that the following endpoints are exempt from being considered as incoming tasks. They do not trigger model reloading and do not reset the idle timer:
|
||||
- `GET /health`
|
||||
- `GET /props`
|
||||
|
||||
## More examples
|
||||
|
||||
### Interactive mode
|
||||
|
||||
@@ -544,7 +544,9 @@ struct server_context_impl {
|
||||
|
||||
server_metrics metrics;
|
||||
|
||||
json webui_settings = json::object();
|
||||
// cached responses for HTTP API (read-only from HTTP threads)
|
||||
json json_server_props = json::object();
|
||||
json json_server_model_meta = json::object();
|
||||
|
||||
// Necessary similarity of prompt for slot selection
|
||||
float slot_prompt_similarity = 0.0f;
|
||||
@@ -554,8 +556,23 @@ struct server_context_impl {
|
||||
common_chat_templates_ptr chat_templates;
|
||||
oaicompat_parser_options oai_parser_opt;
|
||||
|
||||
bool sleeping = false;
|
||||
|
||||
~server_context_impl() {
|
||||
if (!sleeping) {
|
||||
// destroy() is already called when entering sleeping state
|
||||
// we don't call it again here to avoid double free
|
||||
destroy();
|
||||
}
|
||||
}
|
||||
|
||||
void destroy() {
|
||||
llama_init.reset();
|
||||
ctx = nullptr;
|
||||
model = nullptr;
|
||||
|
||||
mtmd_free(mctx);
|
||||
mctx = nullptr;
|
||||
|
||||
// Clear any sampling context
|
||||
for (server_slot & slot : slots) {
|
||||
@@ -571,22 +588,29 @@ struct server_context_impl {
|
||||
llama_batch_free(batch);
|
||||
}
|
||||
|
||||
void handle_sleeping_state(bool new_state) {
|
||||
GGML_ASSERT(sleeping != new_state);
|
||||
if (new_state) {
|
||||
SRV_INF("%s", "server is entering sleeping state\n");
|
||||
destroy();
|
||||
} else {
|
||||
SRV_INF("%s", "server is exiting sleeping state\n");
|
||||
if (!load_model(params_base)) {
|
||||
GGML_ABORT("failed to reload model after sleeping");
|
||||
}
|
||||
}
|
||||
sleeping = new_state;
|
||||
}
|
||||
|
||||
// load the model and initialize llama_context
|
||||
// this may also be called to resume from sleeping state
|
||||
bool load_model(const common_params & params) {
|
||||
bool is_resume = sleeping;
|
||||
|
||||
SRV_INF("loading model '%s'\n", params.model.path.c_str());
|
||||
|
||||
params_base = params;
|
||||
|
||||
webui_settings = json::object();
|
||||
if (!params_base.webui_config_json.empty()) {
|
||||
try {
|
||||
webui_settings = json::parse(params_base.webui_config_json);
|
||||
} catch (const std::exception & e) {
|
||||
SRV_ERR("%s: failed to parse webui config: %s\n", __func__, e.what());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
llama_init = common_init_from_params(params_base);
|
||||
|
||||
model = llama_init->model();
|
||||
@@ -654,7 +678,9 @@ struct server_context_impl {
|
||||
|
||||
std::string & mmproj_path = params_base.mmproj.path;
|
||||
if (!mmproj_path.empty()) {
|
||||
mtmd_helper_log_set(common_log_default_callback, nullptr);
|
||||
if (!is_resume) {
|
||||
mtmd_helper_log_set(common_log_default_callback, nullptr);
|
||||
}
|
||||
|
||||
mtmd_context_params mparams = mtmd_context_params_default();
|
||||
mparams.use_gpu = params_base.mmproj_use_gpu;
|
||||
@@ -699,19 +725,6 @@ struct server_context_impl {
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// initialize slots and server-related data
|
||||
void init() {
|
||||
// wiring up server queues
|
||||
queue_tasks.on_new_task([this](server_task && task) {
|
||||
process_single_task(std::move(task));
|
||||
});
|
||||
queue_tasks.on_update_slots([this]() {
|
||||
update_slots();
|
||||
});
|
||||
|
||||
// Necessary similarity of prompt for slot selection
|
||||
slot_prompt_similarity = params_base.slot_prompt_similarity;
|
||||
|
||||
@@ -726,6 +739,7 @@ struct server_context_impl {
|
||||
n_ctx_slot = n_ctx_train;
|
||||
}
|
||||
|
||||
slots.clear();
|
||||
for (int i = 0; i < params_base.n_parallel; i++) {
|
||||
server_slot slot;
|
||||
|
||||
@@ -742,13 +756,13 @@ struct server_context_impl {
|
||||
slot.ctx_dft = llama_init_from_model(model_dft, cparams_dft);
|
||||
if (slot.ctx_dft == nullptr) {
|
||||
SRV_ERR("%s", "failed to create draft context\n");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
slot.spec = common_speculative_init(slot.ctx, slot.ctx_dft);
|
||||
if (slot.spec == nullptr) {
|
||||
SRV_ERR("%s", "failed to create speculator\n");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
for (auto & pair : params_base.speculative.replacements) {
|
||||
common_speculative_add_replacement_tgt_dft(slot.spec, pair.first.c_str(), pair.second.c_str());
|
||||
@@ -782,8 +796,6 @@ struct server_context_impl {
|
||||
batch = llama_batch_init(std::max(n_batch, params_base.n_parallel), 0, 1);
|
||||
}
|
||||
|
||||
metrics.init();
|
||||
|
||||
if (params_base.cache_ram_mib != 0) {
|
||||
if (params_base.cache_ram_mib < 0) {
|
||||
SRV_WRN("prompt cache is enabled, size limit: %s\n", "no limit");
|
||||
@@ -832,6 +844,103 @@ struct server_context_impl {
|
||||
LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
|
||||
common_chat_templates_source(chat_templates.get()),
|
||||
common_chat_format_example(chat_templates.get(), params_base.use_jinja, params_base.default_template_kwargs).c_str());
|
||||
|
||||
if (!is_resume) {
|
||||
return init();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// unlike load_model(), this is only called once during initialization
|
||||
bool init() {
|
||||
GGML_ASSERT(ctx != nullptr);
|
||||
GGML_ASSERT(model != nullptr);
|
||||
GGML_ASSERT(!sleeping);
|
||||
|
||||
// wiring up server queues
|
||||
queue_tasks.on_new_task([this](server_task && task) {
|
||||
process_single_task(std::move(task));
|
||||
});
|
||||
queue_tasks.on_update_slots([this]() {
|
||||
update_slots();
|
||||
});
|
||||
queue_tasks.on_sleeping_state([this](bool sleeping) {
|
||||
handle_sleeping_state(sleeping);
|
||||
});
|
||||
|
||||
metrics.init();
|
||||
|
||||
if (!populate_json_responses()) {
|
||||
SRV_ERR("%s", "failed to populate JSON responses\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool populate_json_responses() {
|
||||
// populate webui settings
|
||||
json json_webui_settings = json::object();
|
||||
{
|
||||
if (!params_base.webui_config_json.empty()) {
|
||||
try {
|
||||
json_webui_settings = json::parse(params_base.webui_config_json);
|
||||
} catch (const std::exception & e) {
|
||||
SRV_ERR("%s: failed to parse webui config: %s\n", __func__, e.what());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// populate server properties
|
||||
{
|
||||
task_params params;
|
||||
params.sampling = params_base.sampling;
|
||||
json default_generation_settings_for_props = json {
|
||||
{"params", params.to_json(true)},
|
||||
{"n_ctx", get_slot_n_ctx()},
|
||||
};
|
||||
|
||||
json_server_props = {
|
||||
{ "default_generation_settings", default_generation_settings_for_props },
|
||||
{ "total_slots", params_base.n_parallel },
|
||||
{ "model_alias", model_name },
|
||||
{ "model_path", params_base.model.path },
|
||||
{ "modalities", json {
|
||||
{"vision", oai_parser_opt.allow_image},
|
||||
{"audio", oai_parser_opt.allow_audio},
|
||||
} },
|
||||
{ "endpoint_slots", params_base.endpoint_slots },
|
||||
{ "endpoint_props", params_base.endpoint_props },
|
||||
{ "endpoint_metrics", params_base.endpoint_metrics },
|
||||
{ "webui", params_base.webui },
|
||||
{ "webui_settings", json_webui_settings },
|
||||
{ "chat_template", common_chat_templates_source(chat_templates.get()) },
|
||||
{ "bos_token", common_token_to_piece(ctx, llama_vocab_bos(vocab), /* special= */ true)},
|
||||
{ "eos_token", common_token_to_piece(ctx, llama_vocab_eos(vocab), /* special= */ true)},
|
||||
{ "build_info", build_info },
|
||||
};
|
||||
if (params_base.use_jinja) {
|
||||
if (auto tool_use_src = common_chat_templates_source(chat_templates.get(), "tool_use")) {
|
||||
json_server_props["chat_template_tool_use"] = tool_use_src;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// populate model metadata
|
||||
{
|
||||
json_server_model_meta = {
|
||||
{"vocab_type", llama_vocab_type (vocab)},
|
||||
{"n_vocab", llama_vocab_n_tokens (vocab)},
|
||||
{"n_ctx_train", llama_model_n_ctx_train(model)},
|
||||
{"n_embd", llama_model_n_embd (model)},
|
||||
{"n_params", llama_model_n_params (model)},
|
||||
{"size", llama_model_size (model)},
|
||||
};
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
server_slot * get_slot_by_id(int id) {
|
||||
@@ -2635,17 +2744,6 @@ struct server_context_impl {
|
||||
SRV_DBG("%s", "run slots completed\n");
|
||||
}
|
||||
|
||||
json model_meta() const {
|
||||
return json {
|
||||
{"vocab_type", llama_vocab_type (vocab)},
|
||||
{"n_vocab", llama_vocab_n_tokens (vocab)},
|
||||
{"n_ctx_train", llama_model_n_ctx_train(model)},
|
||||
{"n_embd", llama_model_n_embd (model)},
|
||||
{"n_params", llama_model_n_params (model)},
|
||||
{"size", llama_model_size (model)},
|
||||
};
|
||||
}
|
||||
|
||||
int get_slot_n_ctx() {
|
||||
return slots.back().n_ctx;
|
||||
}
|
||||
@@ -2662,16 +2760,13 @@ struct server_context_impl {
|
||||
server_context::server_context() : impl(new server_context_impl()) {}
|
||||
server_context::~server_context() = default;
|
||||
|
||||
void server_context::init() {
|
||||
impl->init();
|
||||
}
|
||||
|
||||
bool server_context::load_model(const common_params & params) {
|
||||
return impl->load_model(params);
|
||||
}
|
||||
|
||||
void server_context::start_loop() {
|
||||
impl->queue_tasks.start_loop();
|
||||
auto & params = impl->params_base;
|
||||
impl->queue_tasks.start_loop(params.sleep_idle_seconds * 1000);
|
||||
}
|
||||
|
||||
void server_context::terminate() {
|
||||
@@ -2698,10 +2793,17 @@ server_context_info server_context::get_info() const {
|
||||
|
||||
|
||||
// generator-like API for HTTP response generation
|
||||
// may have bypass_sleep = true if the task does not use ctx_server
|
||||
struct server_res_generator : server_http_res {
|
||||
server_response_reader rd;
|
||||
server_res_generator(server_context_impl & ctx_server)
|
||||
: rd(ctx_server.queue_tasks, ctx_server.queue_results, HTTP_POLLING_SECONDS) {}
|
||||
server_res_generator(server_context_impl & ctx_server, bool bypass_sleep = false)
|
||||
: rd(ctx_server.queue_tasks, ctx_server.queue_results, HTTP_POLLING_SECONDS) {
|
||||
// fast path in case sleeping is disabled
|
||||
bypass_sleep |= ctx_server.params_base.sleep_idle_seconds < 0;
|
||||
if (!bypass_sleep) {
|
||||
ctx_server.queue_tasks.wait_until_no_sleep();
|
||||
}
|
||||
}
|
||||
void ok(const json & response_data) {
|
||||
status = 200;
|
||||
data = safe_json_to_str(response_data);
|
||||
@@ -2719,6 +2821,7 @@ struct server_res_generator : server_http_res {
|
||||
//
|
||||
|
||||
static std::unique_ptr<server_res_generator> handle_completions_impl(
|
||||
std::unique_ptr<server_res_generator> && res_ptr,
|
||||
server_context_impl & ctx_server,
|
||||
server_task_type type,
|
||||
const json & data,
|
||||
@@ -2727,7 +2830,7 @@ static std::unique_ptr<server_res_generator> handle_completions_impl(
|
||||
task_response_type res_type) {
|
||||
GGML_ASSERT(type == SERVER_TASK_TYPE_COMPLETION || type == SERVER_TASK_TYPE_INFILL);
|
||||
|
||||
auto res = std::make_unique<server_res_generator>(ctx_server);
|
||||
auto res = std::move(res_ptr);
|
||||
auto completion_id = gen_chatcmplid();
|
||||
auto & rd = res->rd;
|
||||
|
||||
@@ -2931,9 +3034,12 @@ static std::unique_ptr<server_res_generator> handle_completions_impl(
|
||||
}
|
||||
|
||||
void server_routes::init_routes() {
|
||||
// IMPORTANT: all lambda functions must start with std::make_unique<server_res_generator>
|
||||
// this is to ensure that the server_res_generator can handle sleeping case correctly
|
||||
|
||||
this->get_health = [this](const server_http_req &) {
|
||||
// error and loading states are handled by middleware
|
||||
auto res = std::make_unique<server_res_generator>(ctx_server);
|
||||
auto res = std::make_unique<server_res_generator>(ctx_server, true);
|
||||
res->ok({{"status", "ok"}});
|
||||
return res;
|
||||
};
|
||||
@@ -3115,46 +3221,10 @@ void server_routes::init_routes() {
|
||||
};
|
||||
|
||||
this->get_props = [this](const server_http_req &) {
|
||||
auto res = std::make_unique<server_res_generator>(ctx_server);
|
||||
json default_generation_settings_for_props;
|
||||
|
||||
{
|
||||
task_params params;
|
||||
|
||||
params.sampling = ctx_server.params_base.sampling;
|
||||
|
||||
default_generation_settings_for_props = json {
|
||||
{"params", params.to_json(true)},
|
||||
{"n_ctx", ctx_server.get_slot_n_ctx()},
|
||||
};
|
||||
}
|
||||
|
||||
json data = {
|
||||
{ "default_generation_settings", default_generation_settings_for_props },
|
||||
{ "total_slots", ctx_server.params_base.n_parallel },
|
||||
{ "model_alias", ctx_server.model_name },
|
||||
{ "model_path", ctx_server.params_base.model.path },
|
||||
{ "modalities", json {
|
||||
{"vision", ctx_server.oai_parser_opt.allow_image},
|
||||
{"audio", ctx_server.oai_parser_opt.allow_audio},
|
||||
} },
|
||||
{ "endpoint_slots", params.endpoint_slots },
|
||||
{ "endpoint_props", params.endpoint_props },
|
||||
{ "endpoint_metrics", params.endpoint_metrics },
|
||||
{ "webui", params.webui },
|
||||
{ "webui_settings", ctx_server.webui_settings },
|
||||
{ "chat_template", common_chat_templates_source(ctx_server.chat_templates.get()) },
|
||||
{ "bos_token", common_token_to_piece(ctx_server.ctx, llama_vocab_bos(ctx_server.vocab), /* special= */ true)},
|
||||
{ "eos_token", common_token_to_piece(ctx_server.ctx, llama_vocab_eos(ctx_server.vocab), /* special= */ true)},
|
||||
{ "build_info", build_info },
|
||||
};
|
||||
if (ctx_server.params_base.use_jinja) {
|
||||
if (auto tool_use_src = common_chat_templates_source(ctx_server.chat_templates.get(), "tool_use")) {
|
||||
data["chat_template_tool_use"] = tool_use_src;
|
||||
}
|
||||
}
|
||||
|
||||
res->ok(data);
|
||||
auto res = std::make_unique<server_res_generator>(ctx_server, true);
|
||||
auto props = ctx_server.json_server_props;
|
||||
props["is_sleeping"] = ctx_server.queue_tasks.is_sleeping();
|
||||
res->ok(props);
|
||||
return res;
|
||||
};
|
||||
|
||||
@@ -3272,6 +3342,7 @@ void server_routes::init_routes() {
|
||||
|
||||
std::vector<raw_buffer> files; // dummy
|
||||
return handle_completions_impl(
|
||||
std::move(res),
|
||||
ctx_server,
|
||||
SERVER_TASK_TYPE_INFILL,
|
||||
data,
|
||||
@@ -3281,9 +3352,11 @@ void server_routes::init_routes() {
|
||||
};
|
||||
|
||||
this->post_completions = [this](const server_http_req & req) {
|
||||
auto res = std::make_unique<server_res_generator>(ctx_server);
|
||||
std::vector<raw_buffer> files; // dummy
|
||||
const json body = json::parse(req.body);
|
||||
return handle_completions_impl(
|
||||
std::move(res),
|
||||
ctx_server,
|
||||
SERVER_TASK_TYPE_COMPLETION,
|
||||
body,
|
||||
@@ -3293,9 +3366,11 @@ void server_routes::init_routes() {
|
||||
};
|
||||
|
||||
this->post_completions_oai = [this](const server_http_req & req) {
|
||||
auto res = std::make_unique<server_res_generator>(ctx_server);
|
||||
std::vector<raw_buffer> files; // dummy
|
||||
const json body = json::parse(req.body);
|
||||
return handle_completions_impl(
|
||||
std::move(res),
|
||||
ctx_server,
|
||||
SERVER_TASK_TYPE_COMPLETION,
|
||||
body,
|
||||
@@ -3305,6 +3380,7 @@ void server_routes::init_routes() {
|
||||
};
|
||||
|
||||
this->post_chat_completions = [this](const server_http_req & req) {
|
||||
auto res = std::make_unique<server_res_generator>(ctx_server);
|
||||
std::vector<raw_buffer> files;
|
||||
json body = json::parse(req.body);
|
||||
json body_parsed = oaicompat_chat_params_parse(
|
||||
@@ -3312,6 +3388,7 @@ void server_routes::init_routes() {
|
||||
ctx_server.oai_parser_opt,
|
||||
files);
|
||||
return handle_completions_impl(
|
||||
std::move(res),
|
||||
ctx_server,
|
||||
SERVER_TASK_TYPE_COMPLETION,
|
||||
body_parsed,
|
||||
@@ -3321,6 +3398,7 @@ void server_routes::init_routes() {
|
||||
};
|
||||
|
||||
this->post_anthropic_messages = [this](const server_http_req & req) {
|
||||
auto res = std::make_unique<server_res_generator>(ctx_server);
|
||||
std::vector<raw_buffer> files;
|
||||
json body = convert_anthropic_to_oai(json::parse(req.body));
|
||||
json body_parsed = oaicompat_chat_params_parse(
|
||||
@@ -3328,6 +3406,7 @@ void server_routes::init_routes() {
|
||||
ctx_server.oai_parser_opt,
|
||||
files);
|
||||
return handle_completions_impl(
|
||||
std::move(res),
|
||||
ctx_server,
|
||||
SERVER_TASK_TYPE_COMPLETION,
|
||||
body_parsed,
|
||||
@@ -3365,11 +3444,13 @@ void server_routes::init_routes() {
|
||||
return res;
|
||||
};
|
||||
|
||||
// TODO: this endpoint is unsafe to access during model reloading (i.e. wake up from sleeping)
|
||||
// how to make it work even during load_model()?
|
||||
this->get_models = [this](const server_http_req &) {
|
||||
auto res = std::make_unique<server_res_generator>(ctx_server);
|
||||
json model_meta = nullptr;
|
||||
if (is_ready()) {
|
||||
model_meta = ctx_server.model_meta();
|
||||
model_meta = ctx_server.json_server_model_meta;
|
||||
}
|
||||
bool has_mtmd = ctx_server.mctx != nullptr;
|
||||
json models = {
|
||||
|
||||
@@ -22,9 +22,6 @@ struct server_context {
|
||||
server_context();
|
||||
~server_context();
|
||||
|
||||
// initialize slots and server-related data
|
||||
void init();
|
||||
|
||||
// load the model and initialize llama_context
|
||||
// returns true on success
|
||||
bool load_model(const common_params & params);
|
||||
@@ -35,7 +32,7 @@ struct server_context {
|
||||
// terminate main loop (will unblock start_loop)
|
||||
void terminate();
|
||||
|
||||
// get the underlaying llama_context
|
||||
// get the underlaying llama_context, can return nullptr if sleeping
|
||||
llama_context * get_llama_context() const;
|
||||
|
||||
// get a new response reader, used by CLI application
|
||||
|
||||
@@ -33,6 +33,7 @@ int server_queue::post(server_task && task, bool front) {
|
||||
} else {
|
||||
queue_tasks.push_back(std::move(task));
|
||||
}
|
||||
time_last_task = ggml_time_ms();
|
||||
condition_tasks.notify_one();
|
||||
return task_id;
|
||||
}
|
||||
@@ -54,6 +55,7 @@ int server_queue::post(std::vector<server_task> && tasks, bool front) {
|
||||
queue_tasks.push_back(std::move(task));
|
||||
}
|
||||
}
|
||||
time_last_task = ggml_time_ms();
|
||||
condition_tasks.notify_one();
|
||||
return 0;
|
||||
}
|
||||
@@ -62,6 +64,7 @@ void server_queue::defer(server_task && task) {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
QUE_DBG("defer task, id = %d\n", task.id);
|
||||
queue_tasks_deferred.push_back(std::move(task));
|
||||
time_last_task = ggml_time_ms();
|
||||
condition_tasks.notify_one();
|
||||
}
|
||||
|
||||
@@ -71,31 +74,52 @@ int server_queue::get_new_id() {
|
||||
return new_id;
|
||||
}
|
||||
|
||||
void server_queue::on_new_task(std::function<void(server_task &&)> callback) {
|
||||
callback_new_task = std::move(callback);
|
||||
}
|
||||
|
||||
void server_queue::on_update_slots(std::function<void(void)> callback) {
|
||||
callback_update_slots = std::move(callback);
|
||||
}
|
||||
|
||||
void server_queue::pop_deferred_task() {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
if (!queue_tasks_deferred.empty()) {
|
||||
queue_tasks.emplace_front(std::move(queue_tasks_deferred.front()));
|
||||
queue_tasks_deferred.pop_front();
|
||||
}
|
||||
time_last_task = ggml_time_ms();
|
||||
condition_tasks.notify_one();
|
||||
}
|
||||
|
||||
void server_queue::wait_until_no_sleep() {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
if (!sleeping) {
|
||||
return;
|
||||
} else {
|
||||
if (!req_stop_sleeping) {
|
||||
QUE_DBG("%s", "requesting to stop sleeping\n");
|
||||
req_stop_sleeping = true;
|
||||
condition_tasks.notify_one(); // only main thread is waiting on this
|
||||
}
|
||||
QUE_DBG("%s", "waiting until no sleep\n");
|
||||
condition_tasks.wait(lock, [&]{
|
||||
return !sleeping;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void server_queue::terminate() {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
running = false;
|
||||
condition_tasks.notify_all();
|
||||
}
|
||||
|
||||
void server_queue::start_loop() {
|
||||
void server_queue::start_loop(int64_t idle_sleep_ms) {
|
||||
running = true;
|
||||
time_last_task = ggml_time_ms();
|
||||
|
||||
constexpr auto max_wait_time = std::chrono::seconds(1);
|
||||
auto should_sleep = [&]() -> bool {
|
||||
// caller must hold mutex_tasks
|
||||
if (idle_sleep_ms < 0) {
|
||||
return false;
|
||||
}
|
||||
int64_t now = ggml_time_ms();
|
||||
return (now - time_last_task) >= idle_sleep_ms;
|
||||
};
|
||||
|
||||
while (true) {
|
||||
QUE_DBG("%s", "processing new tasks\n");
|
||||
@@ -117,23 +141,53 @@ void server_queue::start_loop() {
|
||||
QUE_DBG("processing task, id = %d\n", task.id);
|
||||
callback_new_task(std::move(task));
|
||||
}
|
||||
|
||||
// all tasks in the current loop is processed, slots data is now ready
|
||||
QUE_DBG("%s", "update slots\n");
|
||||
|
||||
// this will run the main inference process for all slots
|
||||
callback_update_slots();
|
||||
{
|
||||
// update_slots() may take a while to finish, we need to make sure it's not counted as idle
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
time_last_task = ggml_time_ms();
|
||||
}
|
||||
|
||||
QUE_DBG("%s", "waiting for new tasks\n");
|
||||
{
|
||||
while (true) {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
if (!running) {
|
||||
QUE_DBG("%s", "terminate\n");
|
||||
return;
|
||||
if (!running || !queue_tasks.empty()) {
|
||||
break; // go back to process new tasks or terminate
|
||||
}
|
||||
if (queue_tasks.empty()) {
|
||||
|
||||
// no tasks, check for sleeping state
|
||||
if (should_sleep()) {
|
||||
QUE_INF("%s", "entering sleeping state\n");
|
||||
sleeping = true;
|
||||
callback_sleeping_state(true);
|
||||
req_stop_sleeping = false;
|
||||
// wait until we are requested to exit sleeping state
|
||||
condition_tasks.wait(lock, [&]{
|
||||
return (!running || req_stop_sleeping);
|
||||
});
|
||||
if (!running) { // may changed during sleep
|
||||
break; // terminate
|
||||
}
|
||||
QUE_INF("%s", "exiting sleeping state\n");
|
||||
req_stop_sleeping = false;
|
||||
callback_sleeping_state(false);
|
||||
sleeping = false;
|
||||
time_last_task = ggml_time_ms();
|
||||
condition_tasks.notify_all(); // notify wait_until_no_sleep()
|
||||
break; // process new tasks
|
||||
} else {
|
||||
// wait for new tasks or timeout for checking sleeping condition
|
||||
bool res = condition_tasks.wait_for(lock, max_wait_time, [&]{
|
||||
return (!queue_tasks.empty() || !running);
|
||||
});
|
||||
if (res) {
|
||||
break; // new task arrived or terminate
|
||||
}
|
||||
// otherwise, loop again to check sleeping condition
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,10 @@
|
||||
struct server_queue {
|
||||
private:
|
||||
int id = 0;
|
||||
bool running;
|
||||
bool running = false;
|
||||
bool sleeping = false;
|
||||
bool req_stop_sleeping = false;
|
||||
int64_t time_last_task = 0;
|
||||
|
||||
// queues
|
||||
std::deque<server_task> queue_tasks;
|
||||
@@ -24,6 +27,7 @@ private:
|
||||
// callback functions
|
||||
std::function<void(server_task &&)> callback_new_task;
|
||||
std::function<void(void)> callback_update_slots;
|
||||
std::function<void(bool)> callback_sleeping_state;
|
||||
|
||||
public:
|
||||
// Add a new task to the end of the queue
|
||||
@@ -38,15 +42,18 @@ public:
|
||||
// Get the next id for creating a new task
|
||||
int get_new_id();
|
||||
|
||||
// Register function to process a new task
|
||||
void on_new_task(std::function<void(server_task &&)> callback);
|
||||
|
||||
// Register the function to be called when all slots data is ready to be processed
|
||||
void on_update_slots(std::function<void(void)> callback);
|
||||
|
||||
// Call when the state of one slot is changed, it will move one task from deferred to main queue
|
||||
void pop_deferred_task();
|
||||
|
||||
// if sleeping, request exiting sleep state and wait until it is done
|
||||
// returns immediately if not sleeping
|
||||
void wait_until_no_sleep();
|
||||
|
||||
bool is_sleeping() {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
return sleeping;
|
||||
}
|
||||
|
||||
// end the start_loop routine
|
||||
void terminate();
|
||||
|
||||
@@ -56,8 +63,15 @@ public:
|
||||
* - Process the task (i.e. maybe copy data into slot)
|
||||
* - Check if multitask is finished
|
||||
* - Update all slots
|
||||
*
|
||||
* Sleeping procedure (disabled if idle_sleep_ms < 0):
|
||||
* - If there is no task after idle_sleep_ms, enter sleeping state
|
||||
* - Call callback_sleeping_state(true)
|
||||
* - Wait until req_stop_sleeping is set to true
|
||||
* - Call callback_sleeping_state(false)
|
||||
* - Exit sleeping state
|
||||
*/
|
||||
void start_loop();
|
||||
void start_loop(int64_t idle_sleep_ms = -1);
|
||||
|
||||
// for metrics
|
||||
size_t queue_tasks_deferred_size() {
|
||||
@@ -65,6 +79,27 @@ public:
|
||||
return queue_tasks_deferred.size();
|
||||
}
|
||||
|
||||
//
|
||||
// Functions below are not thread-safe, must only be used before start_loop() is called
|
||||
//
|
||||
|
||||
// Register function to process a new task
|
||||
void on_new_task(std::function<void(server_task &&)> callback) {
|
||||
callback_new_task = std::move(callback);
|
||||
}
|
||||
|
||||
// Register the function to be called when all slots data is ready to be processed
|
||||
void on_update_slots(std::function<void(void)> callback) {
|
||||
callback_update_slots = std::move(callback);
|
||||
}
|
||||
|
||||
// Register callback for sleeping state change
|
||||
// note: when entering sleeping state, the callback is called AFTER sleeping is set to true
|
||||
// when leaving sleeping state, the callback is called BEFORE sleeping is set to false
|
||||
void on_sleeping_state(std::function<void(bool)> callback) {
|
||||
callback_sleeping_state = std::move(callback);
|
||||
}
|
||||
|
||||
private:
|
||||
void cleanup_pending_task(int id_target);
|
||||
};
|
||||
|
||||
@@ -252,7 +252,6 @@ int main(int argc, char ** argv, char ** envp) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
ctx_server.init();
|
||||
ctx_http.is_ready.store(true);
|
||||
|
||||
LOG_INF("%s: model loaded\n", __func__);
|
||||
@@ -309,7 +308,11 @@ int main(int argc, char ** argv, char ** envp) {
|
||||
if (monitor_thread.joinable()) {
|
||||
monitor_thread.join();
|
||||
}
|
||||
llama_memory_breakdown_print(ctx_server.get_llama_context());
|
||||
|
||||
auto * ll_ctx = ctx_server.get_llama_context();
|
||||
if (ll_ctx != nullptr) {
|
||||
llama_memory_breakdown_print(ll_ctx);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
39
tools/server/tests/unit/test_sleep.py
Normal file
39
tools/server/tests/unit/test_sleep.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import pytest
|
||||
import time
|
||||
from utils import *
|
||||
|
||||
server = ServerPreset.tinyllama2()
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_server():
|
||||
global server
|
||||
server = ServerPreset.tinyllama2()
|
||||
|
||||
|
||||
def test_server_sleep():
|
||||
global server
|
||||
server.sleep_idle_seconds = 1
|
||||
server.start()
|
||||
|
||||
# wait a bit so that server can go to sleep
|
||||
time.sleep(2)
|
||||
|
||||
# make sure these endpoints are still responsive after sleep
|
||||
res = server.make_request("GET", "/health")
|
||||
assert res.status_code == 200
|
||||
res = server.make_request("GET", "/props")
|
||||
assert res.status_code == 200
|
||||
assert res.body["is_sleeping"] == True
|
||||
|
||||
# make a generation request to wake up the server
|
||||
res = server.make_request("POST", "/completion", data={
|
||||
"n_predict": 1,
|
||||
"prompt": "Hello",
|
||||
})
|
||||
assert res.status_code == 200
|
||||
|
||||
# it should no longer be sleeping
|
||||
res = server.make_request("GET", "/props")
|
||||
assert res.status_code == 200
|
||||
assert res.body["is_sleeping"] == False
|
||||
@@ -100,6 +100,7 @@ class ServerProcess:
|
||||
server_path: str | None = None
|
||||
mmproj_url: str | None = None
|
||||
media_path: str | None = None
|
||||
sleep_idle_seconds: int | None = None
|
||||
|
||||
# session variables
|
||||
process: subprocess.Popen | None = None
|
||||
@@ -230,6 +231,8 @@ class ServerProcess:
|
||||
server_args.extend(["--mmproj-url", self.mmproj_url])
|
||||
if self.media_path:
|
||||
server_args.extend(["--media-path", self.media_path])
|
||||
if self.sleep_idle_seconds is not None:
|
||||
server_args.extend(["--sleep-idle-seconds", self.sleep_idle_seconds])
|
||||
|
||||
args = [str(arg) for arg in [server_path, *server_args]]
|
||||
print(f"tests: starting server with: {' '.join(args)}")
|
||||
|
||||
Reference in New Issue
Block a user