Compare commits

..

4 Commits
b4919 ... b4923

Author SHA1 Message Date
Sigbjørn Skjæret
108e53c2f1 llama : add support for GPT2, Bloom and CodeShell tied word embeddings (#12456)
* Add support for GPT2, Bloom and CodeShell tied word embeddings

* Deduplicate tied word embeddings weights

* Workaround for incorrect weight map

It appears transformer.wte.weight is in the weight map even though the weights are not there, remove it if output weights are encountered first.

* check++

* fatfingers--
2025-03-19 09:08:49 +01:00
Sigbjørn Skjæret
a686171ea7 convert : Support chat_template.json (#12460) 2025-03-19 08:58:13 +01:00
Jeff Bolz
c446b2edd2 vulkan: Submit once enough matmul work has been recorded (#12406)
I've been seeing significantly worse performance for tg with flash attention
enabled vs disabled, and it seems to be related to the submit heuristic.
Change the heuristic to check how many bytes worth of weight matrix are
used and flush every 100MB, and ramp up after the first few submits.
This seems to resolve the issue, and also increases perf for non-FA a bit.
2025-03-19 08:26:26 +01:00
lhez
d84635b1b0 opencl: improve profiling (#12442)
Some checks are pending
Python check requirements.txt / check-requirements (push) Waiting to run
flake8 Lint / Lint (push) Waiting to run
Python Type-Check / pyright type-check (push) Waiting to run
* opencl: more profiling timing

* opencl: generate trace for profiling

* opencl: reduce profiling overhead

* Populate profiling timing info at the end rather than after each
  kernel run

* opencl: fix for chrome tracing
2025-03-18 12:54:55 -07:00
5 changed files with 155 additions and 57 deletions

View File

@@ -180,7 +180,8 @@ class Model:
extra = sorted(tensor_names_from_parts.difference(self.tensor_names))
missing_files = sorted(set(weight_map[n] for n in missing if n in weight_map))
if len(extra) == 0 and len(missing_files) > 0:
raise ValueError(f"Missing or incomplete model files: {missing_files}")
raise ValueError(f"Missing or incomplete model files: {missing_files}\n"
f"Missing tensors: {missing}")
else:
raise ValueError("Mismatch between weight map and model parts for tensor names:\n"
f"Missing tensors: {missing}\n"
@@ -1099,13 +1100,6 @@ class BloomModel(Model):
tensors.append((self.map_tensor_name(name), data_torch))
if name == "word_embeddings.weight":
assert self.tensor_names is not None
# TODO: tie them at runtime, don't duplicate in the model file
if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")):
tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
return tensors
@@ -2423,10 +2417,6 @@ class GPT2Model(Model):
tensors.append((new_name, data_torch))
# note: GPT2 output is tied to (same as) wte in original model
if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
return tensors
@@ -2756,21 +2746,26 @@ class CodeShellModel(Model):
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
self.gguf_writer.add_rope_scaling_factor(1.0)
_has_tok_embd = False
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
del bid # unused
output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT)
tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD)
new_name = self.map_tensor_name(name)
tensors: list[tuple[str, Tensor]] = [(new_name, data_torch)]
# assuming token_embd.weight is seen before output.weight
if not self._has_tok_embd and new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT):
# even though the tensor file(s) does not contain the word embeddings they are still in the weight map
if self.tensor_names and "transformer.wte.weight" in self.tensor_names:
logger.debug(f"{tok_embd_name} not found before {output_name}, assuming they are tied")
self.tensor_names.remove("transformer.wte.weight")
elif new_name == tok_embd_name:
self._has_tok_embd = True
if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
assert self.tensor_names is not None
if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")):
# copy tok_embd.weight to output.weight
tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch))
return tensors
return [(new_name, data_torch)]
@Model.register("InternLM2ForCausalLM")

View File

@@ -297,8 +297,27 @@ static int ggml_backend_opencl_n_devices = 0;
struct ProfilingInfo {
std::string op_name;
std::string kernel_name;
// Kernel execution time in nanoseconds.
cl_ulong duration_ns;
cl_kernel kernel;
cl_event evt;
cl_ulong cmd_queued;
cl_ulong cmd_submit;
cl_ulong cmd_start;
cl_ulong cmd_end;
cl_ulong overhead_start;
cl_ulong overhead_end;
// For the times below, see spec for clGetEventProfilingInfo
// The time kernel spent in cmd queue - SUBMIT - QUEUED
cl_ulong cmd_queued_duration_ns;
// The time kernel spent for submission - START - SUBMIT
cl_ulong cmd_submit_duration_ns;
// Kernel execution time in nanoseconds - END - START
cl_ulong cmd_duration_ns;
// The time for the kernel to complete - COMPLETE - END
cl_ulong cmd_complete_duration_ns;
// Total time to finish the kernel - COMPELTE - QUEUED
cl_ulong cmd_total_duration_ns;
// Global and local work sizes.
size_t global_size[3];
size_t local_size[3];
@@ -903,12 +922,56 @@ static void ggml_cl2_free(void) {
return;
}
// Populate profiling info
for (ProfilingInfo & info : g_profiling_info) {
cl_ulong cmd_queued;
cl_ulong cmd_submit;
cl_ulong cmd_start;
cl_ulong cmd_end;
cl_ulong cmd_complete;
CL_CHECK(clWaitForEvents(1, &info.evt));
CL_CHECK(clGetEventProfilingInfo(
info.evt, CL_PROFILING_COMMAND_QUEUED, sizeof(cl_ulong), &cmd_queued, NULL));
CL_CHECK(clGetEventProfilingInfo(
info.evt, CL_PROFILING_COMMAND_SUBMIT, sizeof(cl_ulong), &cmd_submit, NULL));
CL_CHECK(clGetEventProfilingInfo(
info.evt, CL_PROFILING_COMMAND_START, sizeof(cl_ulong), &cmd_start, NULL));
CL_CHECK(clGetEventProfilingInfo(
info.evt, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &cmd_end, NULL));
CL_CHECK(clGetEventProfilingInfo(
info.evt, CL_PROFILING_COMMAND_COMPLETE, sizeof(cl_ulong), &cmd_complete, NULL));
CL_CHECK(clReleaseEvent(info.evt));
char kernel_name[512];
CL_CHECK(clGetKernelInfo(info.kernel, CL_KERNEL_FUNCTION_NAME,
sizeof(kernel_name), kernel_name, NULL));
info.kernel_name = kernel_name;
info.cmd_queued = cmd_queued;
info.cmd_submit = cmd_submit;
info.cmd_start = cmd_start;
info.cmd_end = cmd_end;
info.cmd_queued_duration_ns = cmd_submit - cmd_queued;
info.cmd_submit_duration_ns = cmd_start - cmd_submit;
info.cmd_duration_ns = cmd_end - cmd_start;
info.cmd_complete_duration_ns = cmd_complete - cmd_end;
info.cmd_total_duration_ns = cmd_complete - cmd_queued;
}
// Dump a csv
float total_kernel_time = 0;
fprintf(fperf, "op name, kernel name, duration (ms), global size, local size, output size\n");
fprintf(fperf, "op name, kernel name, queued duration (ms), submit duration(ms), exec duration (ms), complete duration (ms), total duration (ms), global size, local size, output size\n");
for (const ProfilingInfo & info : g_profiling_info) {
total_kernel_time += info.duration_ns/1.e6f;
fprintf(fperf, "%s,%s,%f,%zux%zux%zu,%zux%zux%zu,%zux%zux%zux%zu\n",
info.op_name.c_str(), info.kernel_name.c_str(), info.duration_ns/1.e6f,
total_kernel_time += info.cmd_duration_ns/1.e6f;
fprintf(fperf, "%s,%s,%f,%f,%f,%f,%f,%zux%zux%zu,%zux%zux%zu,%zux%zux%zux%zu\n",
info.op_name.c_str(), info.kernel_name.c_str(),
info.cmd_queued_duration_ns/1.e6f,
info.cmd_submit_duration_ns/1.e6f,
info.cmd_duration_ns/1.e6f,
info.cmd_complete_duration_ns/1.e6f,
info.cmd_total_duration_ns/1.e6f,
info.global_size[0], info.global_size[1], info.global_size[2],
info.local_size[0], info.local_size[2], info.local_size[2],
info.output_size[0], info.output_size[1], info.output_size[2], info.output_size[3]);
@@ -916,6 +979,27 @@ static void ggml_cl2_free(void) {
fclose(fperf);
GGML_LOG_INFO("ggml_opencl: total kernel time: %f\n", total_kernel_time);
// Dump a simple chrome trace
FILE* ftrace = fopen("cl_trace.json", "w");
if (!ftrace) {
GGML_LOG_ERROR("Failed to open cl_trace.json\n");
return;
}
fprintf(ftrace, "[\n");
for (const ProfilingInfo & info : g_profiling_info) {
fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"B\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Host\"},\n",
info.kernel_name.c_str(), info.cmd_queued/1000);
fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"E\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Host\"},\n",
info.kernel_name.c_str(), info.cmd_submit/1000);
fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"B\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Device\"},\n",
info.kernel_name.c_str(), info.cmd_start/1000);
fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"E\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Device\"},\n",
info.kernel_name.c_str(), info.cmd_end/1000);
}
fclose(ftrace);
#endif
}
@@ -2062,25 +2146,14 @@ static void dump_tensor(ggml_backend_t backend, const struct ggml_tensor * tenso
// Profiling utility
//------------------------------------------------------------------------------
#ifdef GGML_OPENCL_PROFILING
void populateProfilingInfo(
static void populateProfilingInfo(
ProfilingInfo& info, cl_event evt, cl_kernel kernel,
size_t global_size[3], size_t local_size[3],
const ggml_tensor * tensor) {
cl_ulong start;
cl_ulong end;
CL_CHECK(clWaitForEvents(1, &evt));
CL_CHECK(clGetEventProfilingInfo(
evt, CL_PROFILING_COMMAND_START, sizeof(cl_ulong), &start, NULL));
CL_CHECK(clGetEventProfilingInfo(
evt, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &end, NULL));
info.op_name = tensor->name;
info.kernel = kernel;
info.evt = evt;
char kernel_name[512];
CL_CHECK(clGetKernelInfo(kernel, CL_KERNEL_FUNCTION_NAME,
sizeof(kernel_name), kernel_name, NULL));
info.duration_ns = end - start;
info.op_name = tensor->name;
info.kernel_name = kernel_name;
info.local_size[0] = local_size[0];
info.local_size[1] = local_size[1];
info.local_size[2] = local_size[2];

View File

@@ -8436,8 +8436,12 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg
VK_LOG_DEBUG("ggml_backend_vk_graph_compute(" << cgraph->n_nodes << " nodes)");
ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
uint64_t total_mat_mul_bytes = 0;
for (int i = 0; i < cgraph->n_nodes; i++) {
ggml_vk_build_graph(ctx, cgraph->nodes[i], i, nullptr, 0, true, false, false);
if (cgraph->nodes[i]->op == GGML_OP_MUL_MAT || cgraph->nodes[i]->op == GGML_OP_MUL_MAT_ID) {
total_mat_mul_bytes += ggml_nbytes(cgraph->nodes[i]->src[0]);
}
}
if (ctx->device->need_compiles) {
ggml_vk_load_shaders(ctx->device);
@@ -8458,17 +8462,27 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg
bool first_node_in_batch = true; // true if next node will be first node in a batch
int submit_node_idx = 0; // index to first node in a batch
// Submit work every nodes_per_submit nodes to overlap CPU cmdbuffer generation with GPU execution.
// Start with a smaller count to get work submitted right away, and increase it after each submit.
int nodes_per_submit = 20;
// Submit after enough work has accumulated, to overlap CPU cmdbuffer generation with GPU execution.
// Estimate the amount of matmul work by looking at the weight matrix size, and submit every 100MB
// (and scaled down based on model size, so smaller models submit earlier).
// Also submit at least every 100 nodes, in case there are workloads without as much matmul.
int nodes_per_submit = 100;
int submitted_nodes = 0;
int submit_count = 0;
uint64_t mul_mat_bytes = 0;
uint64_t mul_mat_bytes_per_submit = std::min(uint64_t(100*1000*1000), total_mat_mul_bytes / 40u);
for (int i = 0; i < cgraph->n_nodes; i++) {
if (first_node_in_batch) {
submit_node_idx = i;
}
bool submit = (submitted_nodes >= nodes_per_submit) || (i == last_node);
if (cgraph->nodes[i]->op == GGML_OP_MUL_MAT || cgraph->nodes[i]->op == GGML_OP_MUL_MAT_ID) {
mul_mat_bytes += ggml_nbytes(cgraph->nodes[i]->src[0]);
}
bool submit = (submitted_nodes >= nodes_per_submit) ||
(mul_mat_bytes >= mul_mat_bytes_per_submit) ||
(i == last_node);
bool enqueued = ggml_vk_build_graph(ctx, cgraph->nodes[i], i, cgraph->nodes[submit_node_idx], submit_node_idx, false, i == last_node, submit);
@@ -8485,13 +8499,9 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg
if (submit) {
first_node_in_batch = true;
submitted_nodes = 0;
switch (submit_count) {
case 0:
nodes_per_submit = 50;
break;
default:
nodes_per_submit = 100;
break;
mul_mat_bytes = 0;
if (submit_count < 3) {
mul_mat_bytes_per_submit *= 2;
}
submit_count++;
}

View File

@@ -154,7 +154,12 @@ class SpecialVocab:
return True
with open(tokenizer_config_file, encoding = 'utf-8') as f:
tokenizer_config = json.load(f)
chat_template = tokenizer_config.get('chat_template')
chat_template_alt = None
chat_template_file = path / 'chat_template.json'
if chat_template_file.is_file():
with open(chat_template_file, encoding = 'utf-8') as f:
chat_template_alt = json.load(f).get('chat_template')
chat_template = tokenizer_config.get('chat_template', chat_template_alt)
if chat_template is None or isinstance(chat_template, (str, list)):
self.chat_template = chat_template
else:

View File

@@ -2020,7 +2020,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
// output
output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
// if output is NULL, init from the input tok embed
if (output == NULL) {
output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
}
for (int i = 0; i < n_layer; ++i) {
auto & layer = layers[i];
@@ -2381,7 +2386,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
// output
output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
// if output is NULL, init from the input tok embed
if (output == NULL) {
output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
}
for (int i = 0; i < n_layer; ++i) {
auto & layer = layers[i];
@@ -2407,7 +2417,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
} break;
case LLM_ARCH_CODESHELL:
{
tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
// if tok embd is NULL, init from output
if (tok_embd == NULL) {
tok_embd = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
}
// output
output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);