mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2026-04-23 16:37:33 +03:00
Compare commits
1 Commits
b2751
...
gg/fix-min
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8c259f6f3e |
2
.github/workflows/bench.yml
vendored
2
.github/workflows/bench.yml
vendored
@@ -32,7 +32,7 @@ on:
|
||||
- cron: '04 2 * * *'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || github.run_id }}-${{ github.event.inputs.sha }}
|
||||
group: ${{ github.workflow }}-${{ github.ref || github.run_id }}-${{ github.event.inputs.sha }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
|
||||
29
.github/workflows/server.yml
vendored
29
.github/workflows/server.yml
vendored
@@ -23,7 +23,7 @@ on:
|
||||
- cron: '2 4 * * *'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || github.run_id }}
|
||||
group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
@@ -41,16 +41,23 @@ jobs:
|
||||
sanitizer: ""
|
||||
fail-fast: false # While -DLLAMA_SANITIZE_THREAD=ON is broken
|
||||
|
||||
container:
|
||||
image: ubuntu:latest
|
||||
ports:
|
||||
- 8888
|
||||
options: --cpus 4
|
||||
|
||||
steps:
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install \
|
||||
apt-get update
|
||||
apt-get -y install \
|
||||
build-essential \
|
||||
xxd \
|
||||
git \
|
||||
cmake \
|
||||
python3-pip \
|
||||
curl \
|
||||
wget \
|
||||
language-pack-en \
|
||||
@@ -63,17 +70,6 @@ jobs:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.inputs.sha || github.event.pull_request.head.sha || github.sha || github.head_ref || github.ref_name }}
|
||||
|
||||
- name: Python setup
|
||||
id: setup_python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Tests dependencies
|
||||
id: test_dependencies
|
||||
run: |
|
||||
pip install -r examples/server/tests/requirements.txt
|
||||
|
||||
- name: Verify server deps
|
||||
id: verify_server_deps
|
||||
run: |
|
||||
@@ -104,6 +100,10 @@ jobs:
|
||||
-DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON ;
|
||||
cmake --build . --config ${{ matrix.build_type }} -j $(nproc) --target server
|
||||
|
||||
- name: Tests dependencies
|
||||
id: test_dependencies
|
||||
run: |
|
||||
pip install -r examples/server/tests/requirements.txt
|
||||
|
||||
- name: Tests
|
||||
id: server_integration_tests
|
||||
@@ -129,7 +129,6 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.inputs.sha || github.event.pull_request.head.sha || github.sha || github.head_ref || github.ref_name }}
|
||||
|
||||
- name: libCURL
|
||||
id: get_libcurl
|
||||
|
||||
@@ -43,7 +43,11 @@ else()
|
||||
set(LLAMA_METAL_DEFAULT OFF)
|
||||
endif()
|
||||
|
||||
set(LLAMA_LLAMAFILE_DEFAULT ON)
|
||||
if (CMAKE_SYSTEM_NAME MATCHES "ANDROID")
|
||||
set(LLAMA_LLAMAFILE_DEFAULT OFF)
|
||||
else()
|
||||
set(LLAMA_LLAMAFILE_DEFAULT ON)
|
||||
endif()
|
||||
|
||||
# general
|
||||
option(BUILD_SHARED_LIBS "build shared libraries" OFF)
|
||||
|
||||
2
Makefile
2
Makefile
@@ -768,7 +768,7 @@ batched-bench: examples/batched-bench/batched-bench.cpp build-info.o ggml.
|
||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||
|
||||
quantize: examples/quantize/quantize.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
||||
quantize: examples/quantize/quantize.cpp build-info.o ggml.o llama.o $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||
|
||||
|
||||
@@ -93,7 +93,6 @@ Typically finetunes of the base models below are supported as well.
|
||||
|
||||
- [X] LLaMA 🦙
|
||||
- [x] LLaMA 2 🦙🦙
|
||||
- [x] LLaMA 3 🦙🦙🦙
|
||||
- [X] [Mistral 7B](https://huggingface.co/mistralai/Mistral-7B-v0.1)
|
||||
- [x] [Mixtral MoE](https://huggingface.co/models?search=mistral-ai/Mixtral)
|
||||
- [x] [DBRX](https://huggingface.co/databricks/dbrx-instruct)
|
||||
@@ -120,9 +119,8 @@ Typically finetunes of the base models below are supported as well.
|
||||
- [x] [CodeShell](https://github.com/WisdomShell/codeshell)
|
||||
- [x] [Gemma](https://ai.google.dev/gemma)
|
||||
- [x] [Mamba](https://github.com/state-spaces/mamba)
|
||||
- [x] [Grok-1](https://huggingface.co/keyfan/grok-1-hf)
|
||||
- [x] [Xverse](https://huggingface.co/models?search=xverse)
|
||||
- [x] [Command-R models](https://huggingface.co/models?search=CohereForAI/c4ai-command-r)
|
||||
- [x] [Command-R](https://huggingface.co/CohereForAI/c4ai-command-r-v01)
|
||||
- [x] [SEA-LION](https://huggingface.co/models?search=sea-lion)
|
||||
- [x] [GritLM-7B](https://huggingface.co/GritLM/GritLM-7B) + [GritLM-8x7B](https://huggingface.co/GritLM/GritLM-8x7B)
|
||||
- [x] [OLMo](https://allenai.org/olmo)
|
||||
@@ -137,8 +135,6 @@ Typically finetunes of the base models below are supported as well.
|
||||
- [x] [ShareGPT4V](https://huggingface.co/models?search=Lin-Chen/ShareGPT4V)
|
||||
- [x] [MobileVLM 1.7B/3B models](https://huggingface.co/models?search=mobileVLM)
|
||||
- [x] [Yi-VL](https://huggingface.co/models?search=Yi-VL)
|
||||
- [x] [Mini CPM](https://huggingface.co/models?search=MiniCPM)
|
||||
- [x] [Moondream](https://huggingface.co/vikhyatk/moondream2)
|
||||
|
||||
**HTTP server**
|
||||
|
||||
|
||||
@@ -160,9 +160,8 @@ function gg_run_test_scripts_debug {
|
||||
|
||||
set -e
|
||||
|
||||
# TODO: too slow, run on dedicated node
|
||||
(cd ./examples/gguf-split && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
||||
#(cd ./examples/quantize && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
||||
(cd ./examples/quantize && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
||||
|
||||
set +e
|
||||
}
|
||||
|
||||
@@ -234,54 +234,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
||||
return result;
|
||||
}
|
||||
|
||||
bool parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides) {
|
||||
const char * sep = strchr(data, '=');
|
||||
if (sep == nullptr || sep - data >= 128) {
|
||||
fprintf(stderr, "%s: malformed KV override '%s'\n", __func__, data);
|
||||
return false;
|
||||
}
|
||||
llama_model_kv_override kvo;
|
||||
std::strncpy(kvo.key, data, sep - data);
|
||||
kvo.key[sep - data] = 0;
|
||||
sep++;
|
||||
if (strncmp(sep, "int:", 4) == 0) {
|
||||
sep += 4;
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
|
||||
kvo.val_i64 = std::atol(sep);
|
||||
} else if (strncmp(sep, "float:", 6) == 0) {
|
||||
sep += 6;
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
|
||||
kvo.val_f64 = std::atof(sep);
|
||||
} else if (strncmp(sep, "bool:", 5) == 0) {
|
||||
sep += 5;
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
|
||||
if (std::strcmp(sep, "true") == 0) {
|
||||
kvo.val_bool = true;
|
||||
} else if (std::strcmp(sep, "false") == 0) {
|
||||
kvo.val_bool = false;
|
||||
} else {
|
||||
fprintf(stderr, "%s: invalid boolean value for KV override '%s'\n", __func__, data);
|
||||
return false;
|
||||
}
|
||||
} else if (strncmp(sep, "str:", 4) == 0) {
|
||||
sep += 4;
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_STR;
|
||||
if (strlen(sep) > 127) {
|
||||
fprintf(stderr, "%s: malformed KV override '%s', value cannot exceed 127 chars\n", __func__, data);
|
||||
return false;
|
||||
}
|
||||
strncpy(kvo.val_str, sep, 127);
|
||||
kvo.val_str[127] = '\0';
|
||||
} else {
|
||||
fprintf(stderr, "%s: invalid type for KV override '%s'\n", __func__, data);
|
||||
return false;
|
||||
}
|
||||
overrides.emplace_back(std::move(kvo));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_params & params, int & i, bool & invalid_param) {
|
||||
llama_sampling_params & sparams = params.sparams;
|
||||
llama_sampling_params& sparams = params.sparams;
|
||||
|
||||
if (arg == "-s" || arg == "--seed") {
|
||||
if (++i >= argc) {
|
||||
@@ -1135,10 +1089,6 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
||||
params.n_print = std::stoi(argv[i]);
|
||||
return true;
|
||||
}
|
||||
if (arg == "--check-tensors") {
|
||||
params.check_tensors = true;
|
||||
return true;
|
||||
}
|
||||
if (arg == "--ppl-output-type") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
@@ -1290,11 +1240,47 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
||||
invalid_param = true;
|
||||
return true;
|
||||
}
|
||||
if (!parse_kv_override(argv[i], params.kv_overrides)) {
|
||||
char* sep = strchr(argv[i], '=');
|
||||
if (sep == nullptr || sep - argv[i] >= 128) {
|
||||
fprintf(stderr, "error: Malformed KV override: %s\n", argv[i]);
|
||||
invalid_param = true;
|
||||
return true;
|
||||
}
|
||||
struct llama_model_kv_override kvo;
|
||||
std::strncpy(kvo.key, argv[i], sep - argv[i]);
|
||||
kvo.key[sep - argv[i]] = 0;
|
||||
sep++;
|
||||
if (strncmp(sep, "int:", 4) == 0) {
|
||||
sep += 4;
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
|
||||
kvo.int_value = std::atol(sep);
|
||||
}
|
||||
else if (strncmp(sep, "float:", 6) == 0) {
|
||||
sep += 6;
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
|
||||
kvo.float_value = std::atof(sep);
|
||||
}
|
||||
else if (strncmp(sep, "bool:", 5) == 0) {
|
||||
sep += 5;
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
|
||||
if (std::strcmp(sep, "true") == 0) {
|
||||
kvo.bool_value = true;
|
||||
}
|
||||
else if (std::strcmp(sep, "false") == 0) {
|
||||
kvo.bool_value = false;
|
||||
}
|
||||
else {
|
||||
fprintf(stderr, "error: Invalid boolean value for KV override: %s\n", argv[i]);
|
||||
invalid_param = true;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else {
|
||||
fprintf(stderr, "error: Invalid type for KV override: %s\n", argv[i]);
|
||||
invalid_param = true;
|
||||
return true;
|
||||
}
|
||||
params.kv_overrides.push_back(kvo);
|
||||
return true;
|
||||
}
|
||||
#ifndef LOG_DISABLE_LOGS
|
||||
@@ -1565,10 +1551,9 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
||||
printf(" path to dynamic lookup cache to use for lookup decoding (updated by generation)\n");
|
||||
printf(" --override-kv KEY=TYPE:VALUE\n");
|
||||
printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
|
||||
printf(" types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
|
||||
printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
|
||||
printf(" -ptc N, --print-token-count N\n");
|
||||
printf(" print token count every N tokens (default: %d)\n", params.n_print);
|
||||
printf(" --check-tensors check model tensor data for invalid values\n");
|
||||
printf("\n");
|
||||
#ifndef LOG_DISABLE_LOGS
|
||||
log_print_usage();
|
||||
@@ -1789,7 +1774,6 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params &
|
||||
mparams.tensor_split = params.tensor_split;
|
||||
mparams.use_mmap = params.use_mmap;
|
||||
mparams.use_mlock = params.use_mlock;
|
||||
mparams.check_tensors = params.check_tensors;
|
||||
if (params.kv_overrides.empty()) {
|
||||
mparams.kv_overrides = NULL;
|
||||
} else {
|
||||
|
||||
@@ -161,7 +161,6 @@ struct gpt_params {
|
||||
bool dump_kv_cache = false; // dump the KV cache contents for debugging purposes
|
||||
bool no_kv_offload = false; // disable KV offloading
|
||||
bool warmup = true; // warmup run
|
||||
bool check_tensors = false; // validate tensor data
|
||||
|
||||
std::string cache_type_k = "f16"; // KV cache data type for the K
|
||||
std::string cache_type_v = "f16"; // KV cache data type for the V
|
||||
@@ -171,8 +170,6 @@ struct gpt_params {
|
||||
std::string image = ""; // path to an image file
|
||||
};
|
||||
|
||||
bool parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides);
|
||||
|
||||
bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params);
|
||||
|
||||
bool gpt_params_parse(int argc, char ** argv, gpt_params & params);
|
||||
|
||||
@@ -234,7 +234,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std::
|
||||
// INTERNAL, DO NOT USE
|
||||
// USE LOG() INSTEAD
|
||||
//
|
||||
#if !defined(_MSC_VER) || defined(__INTEL_LLVM_COMPILER)
|
||||
#if !defined(_MSC_VER) or defined(__INTEL_LLVM_COMPILER)
|
||||
#define LOG_IMPL(str, ...) \
|
||||
do { \
|
||||
if (LOG_TARGET != nullptr) \
|
||||
@@ -257,7 +257,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std::
|
||||
// INTERNAL, DO NOT USE
|
||||
// USE LOG_TEE() INSTEAD
|
||||
//
|
||||
#if !defined(_MSC_VER) || defined(__INTEL_LLVM_COMPILER)
|
||||
#if !defined(_MSC_VER) or defined(__INTEL_LLVM_COMPILER)
|
||||
#define LOG_TEE_IMPL(str, ...) \
|
||||
do { \
|
||||
if (LOG_TARGET != nullptr) \
|
||||
|
||||
@@ -23,7 +23,6 @@ struct Stats {
|
||||
};
|
||||
|
||||
struct StatParams {
|
||||
std::string dataset;
|
||||
std::string ofile = "imatrix.dat";
|
||||
int n_output_frequency = 10;
|
||||
int verbosity = 1;
|
||||
@@ -47,7 +46,7 @@ private:
|
||||
std::vector<float> m_src1_data;
|
||||
std::vector<char> m_ids; // the expert ids from ggml_mul_mat_id
|
||||
//
|
||||
void save_imatrix(const char * file_name, const char * dataset) const;
|
||||
void save_imatrix(const char * file_name) const;
|
||||
void keep_imatrix(int ncall) const;
|
||||
};
|
||||
|
||||
@@ -200,7 +199,7 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
|
||||
}
|
||||
|
||||
void IMatrixCollector::save_imatrix() const {
|
||||
save_imatrix(m_params.ofile.empty() ? "imatrix.dat" : m_params.ofile.c_str(), m_params.dataset.c_str());
|
||||
save_imatrix(m_params.ofile.empty() ? "imatrix.dat" : m_params.ofile.c_str());
|
||||
}
|
||||
|
||||
void IMatrixCollector::keep_imatrix(int ncall) const {
|
||||
@@ -208,33 +207,24 @@ void IMatrixCollector::keep_imatrix(int ncall) const {
|
||||
if (file_name.empty()) file_name = "imatrix.dat";
|
||||
file_name += ".at_";
|
||||
file_name += std::to_string(ncall);
|
||||
save_imatrix(file_name.c_str(), m_params.dataset.c_str());
|
||||
save_imatrix(file_name.c_str());
|
||||
}
|
||||
|
||||
void IMatrixCollector::save_imatrix(const char * fname, const char * dataset) const {
|
||||
void IMatrixCollector::save_imatrix(const char * fname) const {
|
||||
std::ofstream out(fname, std::ios::binary);
|
||||
int n_entries = m_stats.size();
|
||||
out.write((const char *) &n_entries, sizeof(n_entries));
|
||||
for (const auto & p : m_stats) {
|
||||
out.write((const char*)&n_entries, sizeof(n_entries));
|
||||
for (auto& p : m_stats) {
|
||||
int len = p.first.size();
|
||||
out.write((const char *) &len, sizeof(len));
|
||||
out.write((const char*)&len, sizeof(len));
|
||||
out.write(p.first.c_str(), len);
|
||||
out.write((const char *) &p.second.ncall, sizeof(p.second.ncall));
|
||||
out.write((const char*)&p.second.ncall, sizeof(p.second.ncall));
|
||||
int nval = p.second.values.size();
|
||||
out.write((const char *) &nval, sizeof(nval));
|
||||
if (nval > 0) out.write((const char *) p.second.values.data(), nval * sizeof(float));
|
||||
out.write((const char*)&nval, sizeof(nval));
|
||||
if (nval > 0) out.write((const char*)p.second.values.data(), nval*sizeof(float));
|
||||
}
|
||||
|
||||
// Write the number of call the matrix was computed with
|
||||
out.write((const char *) &m_last_call, sizeof(m_last_call));
|
||||
|
||||
// Write the dataset name at the end of the file to later on specify it in quantize
|
||||
int n_dataset = strlen(dataset);
|
||||
out.write((const char *) &n_dataset, sizeof(n_dataset));
|
||||
out.write(dataset, n_dataset);
|
||||
|
||||
if (m_params.verbosity > 0) {
|
||||
fprintf(stderr, "\n%s: stored collected data after %d chunks in %s\n", __func__, m_last_call, fname);
|
||||
fprintf(stderr, "\n%s: stored collected data after %d chunks in %s\n",__func__,m_last_call,fname);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -557,29 +547,6 @@ int main(int argc, char ** argv) {
|
||||
}
|
||||
}
|
||||
|
||||
gpt_params params;
|
||||
params.n_batch = 512;
|
||||
if (!gpt_params_parse(args.size(), args.data(), params)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
params.logits_all = true;
|
||||
params.n_batch = std::min(params.n_batch, params.n_ctx);
|
||||
|
||||
print_build_info();
|
||||
|
||||
if (params.seed == LLAMA_DEFAULT_SEED) {
|
||||
params.seed = time(NULL);
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s: seed = %u\n", __func__, params.seed);
|
||||
|
||||
std::mt19937 rng(params.seed);
|
||||
if (params.random_prompt) {
|
||||
params.prompt = gpt_random_prompt(rng);
|
||||
}
|
||||
|
||||
sparams.dataset = params.prompt_file;
|
||||
g_collector.set_parameters(std::move(sparams));
|
||||
|
||||
if (!combine_files.empty()) {
|
||||
@@ -618,6 +585,28 @@ int main(int argc, char ** argv) {
|
||||
}
|
||||
}
|
||||
|
||||
gpt_params params;
|
||||
params.n_batch = 512;
|
||||
if (!gpt_params_parse(args.size(), args.data(), params)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
params.logits_all = true;
|
||||
params.n_batch = std::min(params.n_batch, params.n_ctx);
|
||||
|
||||
print_build_info();
|
||||
|
||||
if (params.seed == LLAMA_DEFAULT_SEED) {
|
||||
params.seed = time(NULL);
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s: seed = %u\n", __func__, params.seed);
|
||||
|
||||
std::mt19937 rng(params.seed);
|
||||
if (params.random_prompt) {
|
||||
params.prompt = gpt_random_prompt(rng);
|
||||
}
|
||||
|
||||
llama_backend_init();
|
||||
llama_numa_init(params.numa);
|
||||
|
||||
|
||||
@@ -104,7 +104,6 @@ static std::string format(const char * fmt, ...) {
|
||||
#define TN_POS_EMBD "%s.position_embd.weight"
|
||||
#define TN_CLASS_EMBD "v.class_embd"
|
||||
#define TN_PATCH_EMBD "v.patch_embd.weight"
|
||||
#define TN_PATCH_BIAS "v.patch_embd.bias"
|
||||
#define TN_ATTN_K "%s.blk.%d.attn_k.%s"
|
||||
#define TN_ATTN_Q "%s.blk.%d.attn_q.%s"
|
||||
#define TN_ATTN_V "%s.blk.%d.attn_v.%s"
|
||||
@@ -426,7 +425,6 @@ struct clip_vision_model {
|
||||
// embeddings
|
||||
struct ggml_tensor * class_embedding;
|
||||
struct ggml_tensor * patch_embeddings;
|
||||
struct ggml_tensor * patch_bias;
|
||||
struct ggml_tensor * position_embeddings;
|
||||
|
||||
struct ggml_tensor * pre_ln_w;
|
||||
@@ -503,11 +501,6 @@ struct clip_ctx {
|
||||
bool use_gelu = false;
|
||||
int32_t ftype = 1;
|
||||
|
||||
bool has_class_embedding = true;
|
||||
bool has_pre_norm = true;
|
||||
bool has_post_norm = false;
|
||||
bool has_patch_bias = false;
|
||||
|
||||
struct gguf_context * ctx_gguf;
|
||||
struct ggml_context * ctx_data;
|
||||
|
||||
@@ -533,7 +526,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||
const int patch_size = hparams.patch_size;
|
||||
const int num_patches = ((image_size / patch_size) * (image_size / patch_size));
|
||||
const int num_patches_per_side = image_size / patch_size; GGML_UNUSED(num_patches_per_side);
|
||||
const int num_positions = num_patches + (ctx->has_class_embedding ? 1 : 0);
|
||||
const int num_positions = num_patches + 1;
|
||||
const int hidden_size = hparams.hidden_size;
|
||||
const int n_head = hparams.n_head;
|
||||
const int d_head = hidden_size / n_head;
|
||||
@@ -564,23 +557,16 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||
inp = ggml_reshape_3d(ctx0, inp, num_patches, hidden_size, batch_size);
|
||||
inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 0, 2, 3));
|
||||
|
||||
if (ctx->has_patch_bias) {
|
||||
// inp = ggml_add(ctx0, inp, ggml_repeat(ctx0, model.patch_bias, inp));
|
||||
inp = ggml_add(ctx0, inp, model.patch_bias);
|
||||
}
|
||||
|
||||
// concat class_embeddings and patch_embeddings
|
||||
struct ggml_tensor * embeddings = inp;
|
||||
if (ctx->has_class_embedding) {
|
||||
embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
|
||||
embeddings = ggml_acc(ctx0, embeddings, model.class_embedding,
|
||||
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0);
|
||||
embeddings = ggml_acc(ctx0, embeddings, inp,
|
||||
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
|
||||
}
|
||||
struct ggml_tensor * embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
|
||||
ggml_set_name(embeddings, "embeddings");
|
||||
ggml_set_input(embeddings);
|
||||
|
||||
embeddings = ggml_acc(ctx0, embeddings, model.class_embedding,
|
||||
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0);
|
||||
|
||||
embeddings = ggml_acc(ctx0, embeddings, inp,
|
||||
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
|
||||
|
||||
struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions);
|
||||
ggml_set_name(positions, "positions");
|
||||
@@ -590,7 +576,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||
ggml_add(ctx0, embeddings, ggml_get_rows(ctx0, model.position_embeddings, positions));
|
||||
|
||||
// pre-layernorm
|
||||
if (ctx->has_pre_norm) {
|
||||
{
|
||||
embeddings = ggml_norm(ctx0, embeddings, eps);
|
||||
ggml_set_name(embeddings, "pre_ln");
|
||||
|
||||
@@ -678,14 +664,6 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||
embeddings = cur;
|
||||
}
|
||||
|
||||
// post-layernorm
|
||||
if (ctx->has_post_norm) {
|
||||
embeddings = ggml_norm(ctx0, embeddings, eps);
|
||||
ggml_set_name(embeddings, "post_ln");
|
||||
|
||||
embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b);
|
||||
}
|
||||
|
||||
// llava projector
|
||||
{
|
||||
embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
|
||||
@@ -1170,39 +1148,12 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||
|
||||
}
|
||||
|
||||
try {
|
||||
vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD);
|
||||
new_clip->has_class_embedding = true;
|
||||
} catch (const std::exception& e) {
|
||||
new_clip->has_class_embedding = false;
|
||||
}
|
||||
|
||||
try {
|
||||
vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight"));
|
||||
vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias"));
|
||||
new_clip->has_pre_norm = true;
|
||||
} catch (std::exception & e) {
|
||||
new_clip->has_pre_norm = false;
|
||||
}
|
||||
|
||||
try {
|
||||
vision_model.post_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "weight"));
|
||||
vision_model.post_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "bias"));
|
||||
new_clip->has_post_norm = true;
|
||||
} catch (std::exception & e) {
|
||||
new_clip->has_post_norm = false;
|
||||
}
|
||||
|
||||
try {
|
||||
vision_model.patch_bias = get_tensor(new_clip->ctx_data, TN_PATCH_BIAS);
|
||||
new_clip->has_patch_bias = true;
|
||||
} catch (std::exception & e) {
|
||||
new_clip->has_patch_bias = false;
|
||||
}
|
||||
|
||||
try {
|
||||
vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD);
|
||||
vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD);
|
||||
vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v"));
|
||||
vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight"));
|
||||
vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias"));
|
||||
} catch(const std::exception& e) {
|
||||
LOG_TEE("%s: failed to load vision model tensors\n", __func__);
|
||||
}
|
||||
@@ -1374,7 +1325,7 @@ bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length
|
||||
}
|
||||
|
||||
// Linear interpolation between two points
|
||||
inline float clip_lerp(float s, float e, float t) {
|
||||
inline float lerp(float s, float e, float t) {
|
||||
return s + (e - s) * t;
|
||||
}
|
||||
// Bilinear resize function
|
||||
@@ -1396,17 +1347,17 @@ static void bilinear_resize(const clip_image_u8& src, clip_image_u8& dst, int ta
|
||||
float y_lerp = py - y_floor;
|
||||
|
||||
for (int c = 0; c < 3; c++) {
|
||||
float top = clip_lerp(
|
||||
float top = lerp(
|
||||
static_cast<float>(src.buf[3 * (y_floor * src.nx + x_floor) + c]),
|
||||
static_cast<float>(src.buf[3 * (y_floor * src.nx + (x_floor + 1)) + c]),
|
||||
x_lerp
|
||||
);
|
||||
float bottom = clip_lerp(
|
||||
float bottom = lerp(
|
||||
static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + x_floor) + c]),
|
||||
static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + (x_floor + 1)) + c]),
|
||||
x_lerp
|
||||
);
|
||||
dst.buf[3 * (y * target_width + x) + c] = static_cast<uint8_t>(clip_lerp(top, bottom, y_lerp));
|
||||
dst.buf[3 * (y * target_width + x) + c] = static_cast<uint8_t>(lerp(top, bottom, y_lerp));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
set(TARGET quantize)
|
||||
add_executable(${TARGET} quantize.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE llama common ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_link_libraries(${TARGET} PRIVATE llama build_info ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_include_directories(${TARGET} PRIVATE ../../common)
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <unordered_map>
|
||||
#include <fstream>
|
||||
#include <cmath>
|
||||
#include <algorithm>
|
||||
|
||||
struct quant_option {
|
||||
std::string name;
|
||||
@@ -52,10 +53,6 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
|
||||
{ "COPY", LLAMA_FTYPE_ALL_F32, "only copy tensors, no quantizing", },
|
||||
};
|
||||
|
||||
static const char * const LLM_KV_QUANTIZE_IMATRIX_FILE = "quantize.imatrix.file";
|
||||
static const char * const LLM_KV_QUANTIZE_IMATRIX_DATASET = "quantize.imatrix.dataset";
|
||||
static const char * const LLM_KV_QUANTIZE_IMATRIX_N_ENTRIES = "quantize.imatrix.entries_count";
|
||||
static const char * const LLM_KV_QUANTIZE_IMATRIX_N_CHUNKS = "quantize.imatrix.chunks_count";
|
||||
|
||||
static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftype, std::string & ftype_str_out) {
|
||||
std::string ftype_str;
|
||||
@@ -116,7 +113,7 @@ static void usage(const char * executable) {
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static int load_imatrix(const std::string & imatrix_file, std::string & imatrix_dataset, std::unordered_map<std::string, std::vector<float>> & imatrix_data) {
|
||||
static void load_imatrix(const std::string & imatrix_file, std::unordered_map<std::string, std::vector<float>> & imatrix_data) {
|
||||
std::ifstream in(imatrix_file.c_str(), std::ios::binary);
|
||||
if (!in) {
|
||||
printf("%s: failed to open %s\n",__func__, imatrix_file.c_str());
|
||||
@@ -163,33 +160,18 @@ static int load_imatrix(const std::string & imatrix_file, std::string & imatrix_
|
||||
printf("%s: loaded data (size = %6d, ncall = %6d) for '%s'\n", __func__, int(e.size()), ncall, name.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
// latest imatrix version contains the dataset filename at the end of the file
|
||||
int m_last_call = 0;
|
||||
if (in.peek() != EOF) {
|
||||
in.read((char *)&m_last_call, sizeof(m_last_call));
|
||||
int dataset_len;
|
||||
in.read((char *)&dataset_len, sizeof(dataset_len));
|
||||
std::vector<char> dataset_as_vec(dataset_len);
|
||||
in.read(dataset_as_vec.data(), dataset_len);
|
||||
imatrix_dataset.assign(dataset_as_vec.begin(), dataset_as_vec.end());
|
||||
printf("%s: imatrix dataset='%s'\n", __func__, imatrix_dataset.c_str());
|
||||
}
|
||||
printf("%s: loaded %d importance matrix entries from %s computed on %d chunks\n", __func__, int(imatrix_data.size()), imatrix_file.c_str(), m_last_call);
|
||||
return m_last_call;
|
||||
printf("%s: loaded %d importance matrix entries from %s\n", __func__, int(imatrix_data.size()), imatrix_file.c_str());
|
||||
}
|
||||
|
||||
static int prepare_imatrix(const std::string & imatrix_file,
|
||||
std::string & imatrix_dataset,
|
||||
static void prepare_imatrix(const std::string & imatrix_file,
|
||||
const std::vector<std::string> & included_weights,
|
||||
const std::vector<std::string> & excluded_weights,
|
||||
std::unordered_map<std::string, std::vector<float>> & imatrix_data) {
|
||||
int m_last_call = -1;
|
||||
if (!imatrix_file.empty()) {
|
||||
m_last_call = load_imatrix(imatrix_file, imatrix_dataset, imatrix_data);
|
||||
load_imatrix(imatrix_file, imatrix_data);
|
||||
}
|
||||
if (imatrix_data.empty()) {
|
||||
return m_last_call;
|
||||
return;
|
||||
}
|
||||
if (!excluded_weights.empty()) {
|
||||
for (auto& name : excluded_weights) {
|
||||
@@ -215,7 +197,6 @@ static int prepare_imatrix(const std::string & imatrix_file,
|
||||
if (!imatrix_data.empty()) {
|
||||
printf("%s: have %d importance matrix entries\n", __func__, int(imatrix_data.size()));
|
||||
}
|
||||
return m_last_call;
|
||||
}
|
||||
|
||||
static ggml_type parse_ggml_type(const char * arg) {
|
||||
@@ -230,6 +211,43 @@ static ggml_type parse_ggml_type(const char * arg) {
|
||||
return result;
|
||||
}
|
||||
|
||||
static bool parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides) {
|
||||
const char* sep = strchr(data, '=');
|
||||
if (sep == nullptr || sep - data >= 128) {
|
||||
fprintf(stderr, "%s: malformed KV override '%s'\n", __func__, data);
|
||||
return false;
|
||||
}
|
||||
llama_model_kv_override kvo;
|
||||
std::strncpy(kvo.key, data, sep - data);
|
||||
kvo.key[sep - data] = 0;
|
||||
sep++;
|
||||
if (strncmp(sep, "int:", 4) == 0) {
|
||||
sep += 4;
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
|
||||
kvo.int_value = std::atol(sep);
|
||||
} else if (strncmp(sep, "float:", 6) == 0) {
|
||||
sep += 6;
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
|
||||
kvo.float_value = std::atof(sep);
|
||||
} else if (strncmp(sep, "bool:", 5) == 0) {
|
||||
sep += 5;
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
|
||||
if (std::strcmp(sep, "true") == 0) {
|
||||
kvo.bool_value = true;
|
||||
} else if (std::strcmp(sep, "false") == 0) {
|
||||
kvo.bool_value = false;
|
||||
} else {
|
||||
fprintf(stderr, "%s: invalid boolean value for KV override '%s'\n", __func__, data);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
fprintf(stderr, "%s: invalid type for KV override '%s'\n", __func__, data);
|
||||
return false;
|
||||
}
|
||||
overrides.emplace_back(std::move(kvo));
|
||||
return true;
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
if (argc < 3) {
|
||||
usage(argv[0]);
|
||||
@@ -298,43 +316,10 @@ int main(int argc, char ** argv) {
|
||||
usage(argv[0]);
|
||||
}
|
||||
|
||||
std::string imatrix_dataset;
|
||||
std::unordered_map<std::string, std::vector<float>> imatrix_data;
|
||||
int m_last_call = prepare_imatrix(imatrix_file, imatrix_dataset, included_weights, excluded_weights, imatrix_data);
|
||||
prepare_imatrix(imatrix_file, included_weights, excluded_weights, imatrix_data);
|
||||
if (!imatrix_data.empty()) {
|
||||
params.imatrix = &imatrix_data;
|
||||
{
|
||||
llama_model_kv_override kvo;
|
||||
std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_FILE);
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_STR;
|
||||
strncpy(kvo.val_str, imatrix_file.c_str(), 127);
|
||||
kvo.val_str[127] = '\0';
|
||||
kv_overrides.emplace_back(std::move(kvo));
|
||||
}
|
||||
if (!imatrix_dataset.empty()) {
|
||||
llama_model_kv_override kvo;
|
||||
std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_DATASET);
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_STR;
|
||||
strncpy(kvo.val_str, imatrix_dataset.c_str(), 127);
|
||||
kvo.val_str[127] = '\0';
|
||||
kv_overrides.emplace_back(std::move(kvo));
|
||||
}
|
||||
|
||||
{
|
||||
llama_model_kv_override kvo;
|
||||
std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_N_ENTRIES);
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
|
||||
kvo.val_i64 = imatrix_data.size();
|
||||
kv_overrides.emplace_back(std::move(kvo));
|
||||
}
|
||||
|
||||
if (m_last_call > 0) {
|
||||
llama_model_kv_override kvo;
|
||||
std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_N_CHUNKS);
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
|
||||
kvo.val_i64 = m_last_call;
|
||||
kv_overrides.emplace_back(std::move(kvo));
|
||||
}
|
||||
}
|
||||
if (!kv_overrides.empty()) {
|
||||
kv_overrides.emplace_back();
|
||||
|
||||
@@ -90,8 +90,7 @@ export default function () {
|
||||
"model": model,
|
||||
"stream": true,
|
||||
"seed": 42,
|
||||
"max_tokens": max_tokens,
|
||||
"stop": ["<|im_end|>"] // This is temporary for phi-2 base (i.e. not instructed) since the server expects that the model always to emit BOS
|
||||
"max_tokens": max_tokens
|
||||
}
|
||||
|
||||
const params = {method: 'POST', body: JSON.stringify(payload)};
|
||||
|
||||
@@ -1207,27 +1207,6 @@ struct server_context {
|
||||
LOG_VERBOSE("eos token found", {});
|
||||
}
|
||||
|
||||
auto n_ctx_train = llama_n_ctx_train(model);
|
||||
if (slot.params.n_predict < 1 && slot.n_predict < 1 && slot.ga_n == 1
|
||||
&& slot.n_prompt_tokens + slot.n_decoded >= n_ctx_train) {
|
||||
LOG_WARNING("n_predict is not set and self-context extend is disabled."
|
||||
" Limiting generated tokens to n_ctx_train to avoid EOS-less generation infinite loop", {
|
||||
{ "id_slot", slot.id },
|
||||
{ "params.n_predict", slot.params.n_predict },
|
||||
{ "slot.n_prompt_tokens", slot.n_prompt_tokens },
|
||||
{ "slot.n_decoded", slot.n_decoded },
|
||||
{ "slot.n_predict", slot.n_predict },
|
||||
{ "n_slots", params.n_parallel },
|
||||
{ "slot.n_ctx", slot.n_ctx },
|
||||
{ "n_ctx", n_ctx },
|
||||
{ "n_ctx_train", n_ctx_train },
|
||||
{ "ga_n", slot.ga_n },
|
||||
});
|
||||
slot.truncated = true;
|
||||
slot.stopped_limit = true;
|
||||
slot.has_next_token = false; // stop prediction
|
||||
}
|
||||
|
||||
LOG_VERBOSE("next token", {
|
||||
{"id_slot", slot.id},
|
||||
{"id_task", slot.id_task},
|
||||
@@ -2162,7 +2141,7 @@ struct server_context {
|
||||
});
|
||||
|
||||
// process the created batch of tokens
|
||||
for (int32_t i = 0; i < batch.n_tokens; i += n_batch) {
|
||||
for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) {
|
||||
const int32_t n_tokens = std::min(n_batch, batch.n_tokens - i);
|
||||
|
||||
for (auto & slot : slots) {
|
||||
@@ -2392,7 +2371,7 @@ static void server_print_usage(const char * argv0, const gpt_params & params, co
|
||||
printf(" -n, --n-predict maximum tokens to predict (default: %d)\n", params.n_predict);
|
||||
printf(" --override-kv KEY=TYPE:VALUE\n");
|
||||
printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
|
||||
printf(" types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
|
||||
printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
|
||||
printf(" -gan N, --grp-attn-n N set the group attention factor to extend context size through self-extend(default: 1=disabled), used together with group attention width `--grp-attn-w`\n");
|
||||
printf(" -gaw N, --grp-attn-w N set the group attention width to extend context size through self-extend(default: 512), used together with group attention factor `--grp-attn-n`\n");
|
||||
printf(" --chat-template JINJA_TEMPLATE\n");
|
||||
@@ -2823,11 +2802,43 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams,
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
if (!parse_kv_override(argv[i], params.kv_overrides)) {
|
||||
char * sep = strchr(argv[i], '=');
|
||||
if (sep == nullptr || sep - argv[i] >= 128) {
|
||||
fprintf(stderr, "error: Malformed KV override: %s\n", argv[i]);
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
|
||||
struct llama_model_kv_override kvo;
|
||||
std::strncpy(kvo.key, argv[i], sep - argv[i]);
|
||||
kvo.key[sep - argv[i]] = 0;
|
||||
sep++;
|
||||
if (strncmp(sep, "int:", 4) == 0) {
|
||||
sep += 4;
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
|
||||
kvo.int_value = std::atol(sep);
|
||||
} else if (strncmp(sep, "float:", 6) == 0) {
|
||||
sep += 6;
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
|
||||
kvo.float_value = std::atof(sep);
|
||||
} else if (strncmp(sep, "bool:", 5) == 0) {
|
||||
sep += 5;
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
|
||||
if (std::strcmp(sep, "true") == 0) {
|
||||
kvo.bool_value = true;
|
||||
} else if (std::strcmp(sep, "false") == 0) {
|
||||
kvo.bool_value = false;
|
||||
} else {
|
||||
fprintf(stderr, "error: Invalid boolean value for KV override: %s\n", argv[i]);
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
fprintf(stderr, "error: Invalid type for KV override: %s\n", argv[i]);
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.kv_overrides.push_back(kvo);
|
||||
} else {
|
||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||
server_print_usage(argv[0], default_params, default_sparams);
|
||||
|
||||
@@ -1784,14 +1784,12 @@ void ggml_backend_sched_free(ggml_backend_sched_t sched) {
|
||||
|
||||
void ggml_backend_sched_reset(ggml_backend_sched_t sched) {
|
||||
// reset state for the next run
|
||||
if (!sched->is_reset) {
|
||||
size_t hash_size = sched->hash_set.size;
|
||||
memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size); // NOLINT
|
||||
memset(sched->tensor_backend_id, -1, sizeof(sched->tensor_backend_id[0]) * hash_size);
|
||||
memset(sched->tensor_copies, 0, sizeof(sched->tensor_copies[0]) * hash_size);
|
||||
size_t hash_size = sched->hash_set.size;
|
||||
memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size); // NOLINT
|
||||
memset(sched->tensor_backend_id, -1, sizeof(sched->tensor_backend_id[0]) * hash_size);
|
||||
memset(sched->tensor_copies, 0, sizeof(sched->tensor_copies[0]) * hash_size);
|
||||
|
||||
sched->is_reset = true;
|
||||
}
|
||||
sched->is_reset = true;
|
||||
sched->is_alloc = false;
|
||||
}
|
||||
|
||||
|
||||
284
ggml-quants.c
284
ggml-quants.c
@@ -12383,287 +12383,3 @@ void quantize_row_iq2_s(const float * restrict x, void * restrict vy, int64_t k)
|
||||
block_iq2_s * restrict y = vy;
|
||||
quantize_row_iq2_s_reference(x, y, k);
|
||||
}
|
||||
|
||||
static bool validate_float(float f, size_t i) {
|
||||
if (isinf(f)) {
|
||||
fprintf(stderr, "ggml_validate_row_data: found inf value at block %zu\n", i);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (isnan(f)) {
|
||||
fprintf(stderr, "ggml_validate_row_data: found nan value at block %zu\n", i);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool isinf_fp16(ggml_fp16_t f) {
|
||||
return (f & 0x7c00) == 0x7c00 && (f & 0x03ff) == 0;
|
||||
}
|
||||
|
||||
static bool isnan_fp16(ggml_fp16_t f) {
|
||||
return (f & 0x7c00) == 0x7c00 && (f & 0x03ff) != 0;
|
||||
}
|
||||
|
||||
static bool validate_fp16(ggml_fp16_t f, size_t i) {
|
||||
if (isinf_fp16(f)) {
|
||||
fprintf(stderr, "ggml_validate_row_data: found inf value at block %zu\n", i);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (isnan_fp16(f)) {
|
||||
fprintf(stderr, "ggml_validate_row_data: found nan value at block %zu\n", i);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#define VALIDATE_ROW_DATA_D_F16_IMPL(type, data, nb) \
|
||||
const type * q = (const type *) (data); \
|
||||
for (size_t i = 0; i < (nb); ++i) { \
|
||||
if (!validate_fp16(q[i].d, i)) { \
|
||||
return false; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define VALIDATE_ROW_DATA_DM_F16_IMPL(type, data, nb, d, m) \
|
||||
const type * q = (const type *) (data); \
|
||||
for (size_t i = 0; i < (nb); ++i) { \
|
||||
if (!validate_fp16(q[i].d, i) || !validate_fp16(q[i].m, i)) { \
|
||||
return false; \
|
||||
} \
|
||||
}
|
||||
|
||||
bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbytes) {
|
||||
if (type < 0 || type >= GGML_TYPE_COUNT) {
|
||||
fprintf(stderr, "%s: invalid type %d\n", __func__, type);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (nbytes % ggml_type_size(type) != 0) {
|
||||
fprintf(stderr, "%s: invalid size %zu for type %d\n", __func__, nbytes, type);
|
||||
return false;
|
||||
}
|
||||
|
||||
const size_t nb = nbytes/ggml_type_size(type);
|
||||
|
||||
switch (type) {
|
||||
case GGML_TYPE_F16:
|
||||
{
|
||||
const ggml_fp16_t * f = (const ggml_fp16_t *) data;
|
||||
size_t i = 0;
|
||||
#if defined(__AVX2__)
|
||||
for (; i + 15 < nb; i += 16) {
|
||||
__m256i v = _mm256_loadu_si256((const __m256i *)(f + i));
|
||||
__m256i vexp = _mm256_and_si256(v, _mm256_set1_epi16(0x7c00));
|
||||
__m256i cmp = _mm256_cmpeq_epi16(vexp, _mm256_set1_epi16(0x7c00));
|
||||
int mask = _mm256_movemask_epi8(cmp);
|
||||
if (mask) {
|
||||
for (size_t j = 0; j < 16; ++j) {
|
||||
if (!validate_fp16(f[i + j], i + j)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
GGML_UNREACHABLE();
|
||||
}
|
||||
}
|
||||
#elif defined(__ARM_NEON)
|
||||
for (; i + 7 < nb; i += 8) {
|
||||
uint16x8_t v = vld1q_u16(f + i);
|
||||
uint16x8_t vexp = vandq_u16(v, vdupq_n_u16(0x7c00));
|
||||
uint16x8_t cmp = vceqq_u16(vexp, vdupq_n_u16(0x7c00));
|
||||
uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(vshrn_n_u16(cmp, 4)), 0);
|
||||
if (mask) {
|
||||
for (size_t j = 0; j < 8; ++j) {
|
||||
if (!validate_fp16(f[i + j], i + j)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
GGML_UNREACHABLE();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
for (; i < nb; ++i) {
|
||||
if (!validate_fp16(f[i], i)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} break;
|
||||
case GGML_TYPE_F32:
|
||||
{
|
||||
const float * f = (const float *) data;
|
||||
size_t i = 0;
|
||||
#if defined(__AVX2__)
|
||||
for (; i + 7 < nb; i += 8) {
|
||||
__m256i v = _mm256_loadu_si256((const __m256i *)(f + i));
|
||||
__m256i vexp = _mm256_and_si256(v, _mm256_set1_epi32(0x7f800000));
|
||||
__m256i cmp = _mm256_cmpeq_epi32(vexp, _mm256_set1_epi32(0x7f800000));
|
||||
int mask = _mm256_movemask_epi8(cmp);
|
||||
if (mask) {
|
||||
for (size_t j = 0; j < 8; ++j) {
|
||||
if (!validate_float(f[i + j], i + j)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
GGML_UNREACHABLE();
|
||||
}
|
||||
}
|
||||
#elif defined(__ARM_NEON)
|
||||
for (; i + 3 < nb; i += 4) {
|
||||
uint32x4_t v = vld1q_u32((const uint32_t *)f + i);
|
||||
uint32x4_t vexp = vandq_u32(v, vdupq_n_u32(0x7f800000));
|
||||
uint32x4_t cmp = vceqq_u32(vexp, vdupq_n_u32(0x7f800000));
|
||||
uint64_t mask = vget_lane_u64(vreinterpret_u64_u16(vshrn_n_u32(cmp, 8)), 0);
|
||||
if (mask) {
|
||||
for (size_t j = 0; j < 4; ++j) {
|
||||
if (!validate_float(f[i + j], i + j)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
GGML_UNREACHABLE();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
for (; i < nb; ++i) {
|
||||
if (!validate_float(f[i], i)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} break;
|
||||
case GGML_TYPE_F64:
|
||||
{
|
||||
const double * f = (const double *) data;
|
||||
for (size_t i = 0; i < nb; ++i) {
|
||||
if (!validate_float(f[i], i)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} break;
|
||||
case GGML_TYPE_Q4_0:
|
||||
{
|
||||
VALIDATE_ROW_DATA_D_F16_IMPL(block_q4_0, data, nb);
|
||||
} break;
|
||||
case GGML_TYPE_Q4_1:
|
||||
{
|
||||
VALIDATE_ROW_DATA_DM_F16_IMPL(block_q4_1, data, nb, d, m);
|
||||
} break;
|
||||
case GGML_TYPE_Q5_0:
|
||||
{
|
||||
VALIDATE_ROW_DATA_D_F16_IMPL(block_q5_0, data, nb);
|
||||
} break;
|
||||
case GGML_TYPE_Q5_1:
|
||||
{
|
||||
VALIDATE_ROW_DATA_DM_F16_IMPL(block_q5_1, data, nb, d, m);
|
||||
} break;
|
||||
case GGML_TYPE_Q8_0:
|
||||
{
|
||||
VALIDATE_ROW_DATA_D_F16_IMPL(block_q8_0, data, nb);
|
||||
} break;
|
||||
case GGML_TYPE_Q2_K:
|
||||
{
|
||||
VALIDATE_ROW_DATA_DM_F16_IMPL(block_q2_K, data, nb, d, dmin);
|
||||
} break;
|
||||
case GGML_TYPE_Q3_K:
|
||||
{
|
||||
VALIDATE_ROW_DATA_D_F16_IMPL(block_q3_K, data, nb);
|
||||
} break;
|
||||
case GGML_TYPE_Q4_K:
|
||||
{
|
||||
#ifdef GGML_QKK_64
|
||||
VALIDATE_ROW_DATA_DM_F16_IMPL(block_q4_K, data, nb, d[0], d[1]);
|
||||
#else
|
||||
VALIDATE_ROW_DATA_DM_F16_IMPL(block_q4_K, data, nb, d, dmin);
|
||||
#endif
|
||||
} break;
|
||||
case GGML_TYPE_Q5_K:
|
||||
{
|
||||
#ifdef GGML_QKK_64
|
||||
VALIDATE_ROW_DATA_D_F16_IMPL(block_q5_K, data, nb);
|
||||
#else
|
||||
VALIDATE_ROW_DATA_DM_F16_IMPL(block_q5_K, data, nb, d, dmin);
|
||||
#endif
|
||||
} break;
|
||||
case GGML_TYPE_Q6_K:
|
||||
{
|
||||
VALIDATE_ROW_DATA_D_F16_IMPL(block_q6_K, data, nb);
|
||||
} break;
|
||||
case GGML_TYPE_Q8_K:
|
||||
{
|
||||
const block_q8_K * q = (const block_q8_K *) data;
|
||||
for (size_t i = 0; i < nb; ++i) {
|
||||
if (!validate_float(q[i].d, i)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} break;
|
||||
case GGML_TYPE_IQ1_S:
|
||||
{
|
||||
VALIDATE_ROW_DATA_D_F16_IMPL(block_iq1_s, data, nb);
|
||||
} break;
|
||||
case GGML_TYPE_IQ1_M:
|
||||
{
|
||||
const block_iq1_m * q = (const block_iq1_m *) data;
|
||||
for (size_t i = 0; i < nb; ++i) {
|
||||
#if QK_K == 64
|
||||
if (!validate_fp16(q[i].d, i)) {
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
iq1m_scale_t scale;
|
||||
const uint16_t * sc = (const uint16_t *)q[i].scales;
|
||||
scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
|
||||
if (!validate_fp16(scale.f16, i)) {
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
} break;
|
||||
case GGML_TYPE_IQ2_XXS:
|
||||
{
|
||||
VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_xxs, data, nb);
|
||||
} break;
|
||||
case GGML_TYPE_IQ2_XS:
|
||||
{
|
||||
VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_xs, data, nb);
|
||||
} break;
|
||||
case GGML_TYPE_IQ2_S:
|
||||
{
|
||||
VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_s, data, nb);
|
||||
} break;
|
||||
case GGML_TYPE_IQ3_XXS:
|
||||
{
|
||||
VALIDATE_ROW_DATA_D_F16_IMPL(block_iq3_xxs, data, nb);
|
||||
} break;
|
||||
|
||||
case GGML_TYPE_IQ3_S:
|
||||
{
|
||||
VALIDATE_ROW_DATA_D_F16_IMPL(block_iq3_s, data, nb);
|
||||
} break;
|
||||
case GGML_TYPE_IQ4_XS:
|
||||
#if QK_K != 64
|
||||
{
|
||||
VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_xs, data, nb);
|
||||
} break;
|
||||
#endif
|
||||
// with QK_K == 64, iq4_xs is iq4_nl
|
||||
case GGML_TYPE_IQ4_NL:
|
||||
{
|
||||
VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_nl, data, nb);
|
||||
} break;
|
||||
case GGML_TYPE_I8:
|
||||
case GGML_TYPE_I16:
|
||||
case GGML_TYPE_I32:
|
||||
case GGML_TYPE_I64:
|
||||
// nothing to validate
|
||||
break;
|
||||
default:
|
||||
{
|
||||
fprintf(stderr, "%s: invalid type %d\n", __func__, type);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
43
ggml.c
43
ggml.c
@@ -858,6 +858,18 @@ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
|
||||
// simd mappings
|
||||
//
|
||||
|
||||
#if defined(__ARM_NEON)
|
||||
#if !defined(__aarch64__)
|
||||
|
||||
// 64-bit compatibility
|
||||
|
||||
inline static float vaddvq_f32(float32x4_t v) {
|
||||
return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// we define a common set of C macros which map to specific intrinsics based on the current architecture
|
||||
// we then implement the fundamental computation operations below using only these macros
|
||||
// adding support for new architectures requires to define the corresponding SIMD macros
|
||||
@@ -20614,7 +20626,7 @@ static void gguf_free_kv(struct gguf_kv * kv) {
|
||||
}
|
||||
|
||||
struct gguf_context * gguf_init_empty(void) {
|
||||
struct gguf_context * ctx = GGML_CALLOC(1, sizeof(struct gguf_context));
|
||||
struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
|
||||
|
||||
memcpy(ctx->header.magic, GGUF_MAGIC, sizeof(ctx->header.magic));
|
||||
ctx->header.version = GGUF_VERSION;
|
||||
@@ -20659,7 +20671,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
|
||||
|
||||
bool ok = true;
|
||||
|
||||
struct gguf_context * ctx = GGML_CALLOC(1, sizeof(struct gguf_context));
|
||||
struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
|
||||
|
||||
// read the header
|
||||
{
|
||||
@@ -20696,13 +20708,9 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
|
||||
|
||||
// read the kv pairs
|
||||
{
|
||||
const uint64_t n_kv = ctx->header.n_kv;
|
||||
ctx->kv = GGML_MALLOC(ctx->header.n_kv * sizeof(struct gguf_kv));
|
||||
|
||||
// header.n_kv will hold the actual value of pairs that were successfully read in the loop below
|
||||
ctx->header.n_kv = 0;
|
||||
ctx->kv = GGML_CALLOC(n_kv, sizeof(struct gguf_kv));
|
||||
|
||||
for (uint64_t i = 0; i < n_kv; ++i) {
|
||||
for (uint64_t i = 0; i < ctx->header.n_kv; ++i) {
|
||||
struct gguf_kv * kv = &ctx->kv[i];
|
||||
|
||||
//fprintf(stderr, "%s: reading kv %d\n", __func__, i);
|
||||
@@ -20751,7 +20759,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
|
||||
return NULL;
|
||||
}
|
||||
|
||||
kv->value.arr.data = GGML_CALLOC(kv->value.arr.n, gguf_type_size(kv->value.arr.type));
|
||||
kv->value.arr.data = GGML_MALLOC(kv->value.arr.n * gguf_type_size(kv->value.arr.type));
|
||||
|
||||
ok = ok && gguf_fread_el(file, kv->value.arr.data, kv->value.arr.n * gguf_type_size(kv->value.arr.type), &offset);
|
||||
} break;
|
||||
@@ -20765,7 +20773,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
|
||||
return NULL;
|
||||
}
|
||||
|
||||
kv->value.arr.data = GGML_CALLOC(kv->value.arr.n, sizeof(struct gguf_str));
|
||||
kv->value.arr.data = GGML_MALLOC(kv->value.arr.n * sizeof(struct gguf_str));
|
||||
|
||||
for (uint64_t j = 0; j < kv->value.arr.n; ++j) {
|
||||
ok = ok && gguf_fread_str(file, &((struct gguf_str *) kv->value.arr.data)[j], &offset);
|
||||
@@ -20781,8 +20789,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
|
||||
if (!ok) {
|
||||
break;
|
||||
}
|
||||
|
||||
ctx->header.n_kv++;
|
||||
}
|
||||
|
||||
if (!ok) {
|
||||
@@ -20795,7 +20801,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
|
||||
|
||||
// read the tensor infos
|
||||
{
|
||||
ctx->infos = GGML_CALLOC(ctx->header.n_tensors, sizeof(struct gguf_tensor_info));
|
||||
ctx->infos = GGML_MALLOC(ctx->header.n_tensors * sizeof(struct gguf_tensor_info));
|
||||
|
||||
for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
|
||||
struct gguf_tensor_info * info = &ctx->infos[i];
|
||||
@@ -20816,7 +20822,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
|
||||
ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset);
|
||||
ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset);
|
||||
|
||||
// TODO: return an error instead of crashing with GGML_ASSERT
|
||||
gguf_tensor_info_sanitize(info);
|
||||
|
||||
if (!ok) {
|
||||
@@ -20987,7 +20992,7 @@ void gguf_free(struct gguf_context * ctx) {
|
||||
GGML_FREE(ctx->infos);
|
||||
}
|
||||
|
||||
GGML_FREE(ctx);
|
||||
GGML_ALIGNED_FREE(ctx);
|
||||
}
|
||||
|
||||
const char * gguf_type_name(enum gguf_type type) {
|
||||
@@ -21298,7 +21303,7 @@ void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_ty
|
||||
ctx->kv[idx].type = GGUF_TYPE_ARRAY;
|
||||
ctx->kv[idx].value.arr.type = type;
|
||||
ctx->kv[idx].value.arr.n = n;
|
||||
ctx->kv[idx].value.arr.data = GGML_CALLOC(n, gguf_type_size(type));
|
||||
ctx->kv[idx].value.arr.data = GGML_MALLOC(n*gguf_type_size(type));
|
||||
memcpy(ctx->kv[idx].value.arr.data, data, n*gguf_type_size(type));
|
||||
}
|
||||
|
||||
@@ -21308,7 +21313,7 @@ void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char **
|
||||
ctx->kv[idx].type = GGUF_TYPE_ARRAY;
|
||||
ctx->kv[idx].value.arr.type = GGUF_TYPE_STRING;
|
||||
ctx->kv[idx].value.arr.n = n;
|
||||
ctx->kv[idx].value.arr.data = GGML_CALLOC(n, sizeof(struct gguf_str));
|
||||
ctx->kv[idx].value.arr.data = GGML_MALLOC(n*sizeof(struct gguf_str));
|
||||
for (int i = 0; i < n; i++) {
|
||||
struct gguf_str * str = &((struct gguf_str *)ctx->kv[idx].value.arr.data)[i];
|
||||
str->n = strlen(data[i]);
|
||||
@@ -21335,7 +21340,7 @@ void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) {
|
||||
case GGUF_TYPE_ARRAY:
|
||||
{
|
||||
if (src->kv[i].value.arr.type == GGUF_TYPE_STRING) {
|
||||
const char ** data = GGML_CALLOC(src->kv[i].value.arr.n, sizeof(char *));
|
||||
const char ** data = GGML_MALLOC(src->kv[i].value.arr.n*sizeof(char *));
|
||||
for (uint32_t j = 0; j < src->kv[i].value.arr.n; j++) {
|
||||
data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data;
|
||||
}
|
||||
@@ -21423,7 +21428,7 @@ struct gguf_buf {
|
||||
|
||||
static struct gguf_buf gguf_buf_init(size_t size) {
|
||||
struct gguf_buf buf = {
|
||||
/*buf.data =*/ size == 0 ? NULL : GGML_CALLOC(1, size),
|
||||
/*buf.data =*/ size == 0 ? NULL : GGML_MALLOC(size),
|
||||
/*buf.size =*/ size,
|
||||
/*buf.offset =*/ 0,
|
||||
};
|
||||
|
||||
2
ggml.h
2
ggml.h
@@ -762,8 +762,6 @@ extern "C" {
|
||||
// use this to compute the memory overhead of a tensor
|
||||
GGML_API size_t ggml_tensor_overhead(void);
|
||||
|
||||
GGML_API bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbytes);
|
||||
|
||||
// main
|
||||
|
||||
GGML_API struct ggml_context * ggml_init(struct ggml_init_params params);
|
||||
|
||||
163
llama.cpp
163
llama.cpp
@@ -75,7 +75,6 @@
|
||||
#include <forward_list>
|
||||
#include <fstream>
|
||||
#include <functional>
|
||||
#include <future>
|
||||
#include <initializer_list>
|
||||
#include <locale>
|
||||
#include <map>
|
||||
@@ -2883,7 +2882,6 @@ namespace GGUFMeta {
|
||||
case LLAMA_KV_OVERRIDE_TYPE_BOOL: return "bool";
|
||||
case LLAMA_KV_OVERRIDE_TYPE_INT: return "int";
|
||||
case LLAMA_KV_OVERRIDE_TYPE_FLOAT: return "float";
|
||||
case LLAMA_KV_OVERRIDE_TYPE_STR: return "str";
|
||||
}
|
||||
return "unknown";
|
||||
}
|
||||
@@ -2895,16 +2893,13 @@ namespace GGUFMeta {
|
||||
__func__, override_type_to_str(ovrd->tag), ovrd->key);
|
||||
switch (ovrd->tag) {
|
||||
case LLAMA_KV_OVERRIDE_TYPE_BOOL: {
|
||||
LLAMA_LOG_INFO("%s\n", ovrd->val_bool ? "true" : "false");
|
||||
LLAMA_LOG_INFO("%s\n", ovrd->bool_value ? "true" : "false");
|
||||
} break;
|
||||
case LLAMA_KV_OVERRIDE_TYPE_INT: {
|
||||
LLAMA_LOG_INFO("%" PRId64 "\n", ovrd->val_i64);
|
||||
LLAMA_LOG_INFO("%" PRId64 "\n", ovrd->int_value);
|
||||
} break;
|
||||
case LLAMA_KV_OVERRIDE_TYPE_FLOAT: {
|
||||
LLAMA_LOG_INFO("%.6f\n", ovrd->val_f64);
|
||||
} break;
|
||||
case LLAMA_KV_OVERRIDE_TYPE_STR: {
|
||||
LLAMA_LOG_INFO("%s\n", ovrd->val_str);
|
||||
LLAMA_LOG_INFO("%.6f\n", ovrd->float_value);
|
||||
} break;
|
||||
default:
|
||||
// Shouldn't be possible to end up here, but just in case...
|
||||
@@ -2923,7 +2918,7 @@ namespace GGUFMeta {
|
||||
static typename std::enable_if<std::is_same<OT, bool>::value, bool>::type
|
||||
try_override(OT & target, const struct llama_model_kv_override * ovrd) {
|
||||
if (validate_override(LLAMA_KV_OVERRIDE_TYPE_BOOL, ovrd)) {
|
||||
target = ovrd->val_bool;
|
||||
target = ovrd->bool_value;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@@ -2933,7 +2928,7 @@ namespace GGUFMeta {
|
||||
static typename std::enable_if<!std::is_same<OT, bool>::value && std::is_integral<OT>::value, bool>::type
|
||||
try_override(OT & target, const struct llama_model_kv_override * ovrd) {
|
||||
if (validate_override(LLAMA_KV_OVERRIDE_TYPE_INT, ovrd)) {
|
||||
target = ovrd->val_i64;
|
||||
target = ovrd->int_value;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@@ -2943,7 +2938,7 @@ namespace GGUFMeta {
|
||||
static typename std::enable_if<std::is_floating_point<OT>::value, bool>::type
|
||||
try_override(T & target, const struct llama_model_kv_override * ovrd) {
|
||||
if (validate_override(LLAMA_KV_OVERRIDE_TYPE_FLOAT, ovrd)) {
|
||||
target = ovrd->val_f64;
|
||||
target = ovrd->float_value;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@@ -2952,11 +2947,12 @@ namespace GGUFMeta {
|
||||
template<typename OT>
|
||||
static typename std::enable_if<std::is_same<OT, std::string>::value, bool>::type
|
||||
try_override(T & target, const struct llama_model_kv_override * ovrd) {
|
||||
if (validate_override(LLAMA_KV_OVERRIDE_TYPE_STR, ovrd)) {
|
||||
target = ovrd->val_str;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
(void)target;
|
||||
(void)ovrd;
|
||||
if (!ovrd) { return false; }
|
||||
// Currently, we should never end up here so it would be a bug if we do.
|
||||
throw std::runtime_error(format("Unsupported attempt to override string type for metadata key %s\n",
|
||||
ovrd ? ovrd->key : "NULL"));
|
||||
}
|
||||
|
||||
static bool set(const gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
|
||||
@@ -2989,7 +2985,6 @@ struct llama_model_loader {
|
||||
size_t n_bytes = 0;
|
||||
|
||||
bool use_mmap = false;
|
||||
bool check_tensors;
|
||||
|
||||
llama_files files;
|
||||
llama_ftype ftype;
|
||||
@@ -3004,13 +2999,9 @@ struct llama_model_loader {
|
||||
|
||||
ggml_tensor * tensor;
|
||||
|
||||
llama_tensor_weight(const llama_file * file, uint16_t idx, const char * name, const struct gguf_context * gguf_ctx, ggml_tensor * tensor) : idx(idx), tensor(tensor) {
|
||||
llama_tensor_weight(uint16_t idx, const char * name, const struct gguf_context * gguf_ctx, ggml_tensor * tensor) : idx(idx), tensor(tensor) {
|
||||
const int tensor_idx = gguf_find_tensor(gguf_ctx, name);
|
||||
offs = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, tensor_idx);
|
||||
|
||||
if (offs + ggml_nbytes(tensor) < offs || offs + ggml_nbytes(tensor) > file->size) {
|
||||
throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", name));
|
||||
}
|
||||
}
|
||||
};
|
||||
std::vector<llama_tensor_weight> weights;
|
||||
@@ -3023,7 +3014,7 @@ struct llama_model_loader {
|
||||
std::string arch_name;
|
||||
LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
|
||||
|
||||
llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, const struct llama_model_kv_override * param_overrides_p) {
|
||||
llama_model_loader(const std::string & fname, bool use_mmap, const struct llama_model_kv_override * param_overrides_p) {
|
||||
int trace = 0;
|
||||
if (getenv("LLAMA_TRACE")) {
|
||||
trace = atoi(getenv("LLAMA_TRACE"));
|
||||
@@ -3049,15 +3040,15 @@ struct llama_model_loader {
|
||||
get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false);
|
||||
llm_kv = LLM_KV(llm_arch_from_string(arch_name));
|
||||
|
||||
files.emplace_back(new llama_file(fname.c_str(), "rb"));
|
||||
contexts.emplace_back(ctx);
|
||||
|
||||
// Save tensors data offset of the main file.
|
||||
// For subsidiary files, `meta` tensor data offset must not be used,
|
||||
// so we build a unified tensors index for weights.
|
||||
for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) {
|
||||
weights.emplace_back(files.back().get(), 0, cur->name, meta, cur);
|
||||
weights.emplace_back(0, cur->name, meta, cur);
|
||||
}
|
||||
files.emplace_back(new llama_file(fname.c_str(), "rb"));
|
||||
contexts.emplace_back(ctx);
|
||||
|
||||
uint16_t n_split = 0;
|
||||
get_key(llm_kv(LLM_KV_SPLIT_COUNT), n_split, false);
|
||||
|
||||
@@ -3091,13 +3082,12 @@ struct llama_model_loader {
|
||||
throw std::runtime_error(format("%s: failed to load GGUF split from %s\n", __func__, split_path));
|
||||
}
|
||||
|
||||
files.emplace_back(new llama_file(split_path, "rb"));
|
||||
contexts.emplace_back(ctx);
|
||||
|
||||
// Save tensors data offset info of the shard.
|
||||
for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) {
|
||||
weights.emplace_back(files.back().get(), idx, cur->name, ctx_gguf, cur);
|
||||
weights.emplace_back(idx, cur->name, ctx_gguf, cur);
|
||||
}
|
||||
files.emplace_back(new llama_file(split_path, "rb"));
|
||||
contexts.emplace_back(ctx);
|
||||
|
||||
gguf_free(ctx_gguf);
|
||||
}
|
||||
@@ -3228,7 +3218,6 @@ struct llama_model_loader {
|
||||
}
|
||||
|
||||
this->use_mmap = use_mmap;
|
||||
this->check_tensors = check_tensors;
|
||||
}
|
||||
|
||||
~llama_model_loader() {
|
||||
@@ -3487,10 +3476,6 @@ struct llama_model_loader {
|
||||
file->seek(w.offs, SEEK_SET);
|
||||
file->read_raw(cur->data, ggml_nbytes(cur));
|
||||
}
|
||||
|
||||
if (check_tensors && !ggml_validate_row_data(cur->type, cur->data, ggml_nbytes(cur))) {
|
||||
throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur)));
|
||||
}
|
||||
}
|
||||
|
||||
size_t size_done = 0;
|
||||
@@ -3507,8 +3492,6 @@ struct llama_model_loader {
|
||||
GGML_ASSERT(size_data != 0 && "call init_mappings() first");
|
||||
|
||||
std::vector<no_init<uint8_t>> read_buf;
|
||||
std::vector<std::future<std::pair<ggml_tensor *, bool>>> validation_result;
|
||||
|
||||
for (struct ggml_tensor * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) {
|
||||
const auto * weight = get_weight(ggml_get_name(cur));
|
||||
if (weight == nullptr) {
|
||||
@@ -3530,66 +3513,37 @@ struct llama_model_loader {
|
||||
if (bufs_mmap.count(weight->idx)) {
|
||||
buf_mmap = bufs_mmap.at(weight->idx);
|
||||
}
|
||||
uint8_t * data = (uint8_t *) mapping->addr + weight->offs;
|
||||
|
||||
if (check_tensors) {
|
||||
validation_result.emplace_back(std::async(std::launch::async, [cur, data, n_size] {
|
||||
return std::make_pair(cur, ggml_validate_row_data(cur->type, data, n_size));
|
||||
}));
|
||||
}
|
||||
|
||||
GGML_ASSERT(buf_mmap || cur->data); // either we have a buffer to allocate the tensor in, or it is already allocated
|
||||
if (buf_mmap && cur->data == nullptr) {
|
||||
ggml_backend_tensor_alloc(buf_mmap, cur, data);
|
||||
ggml_backend_tensor_alloc(buf_mmap, cur, (uint8_t *) mapping->addr + weight->offs);
|
||||
if (lmlocks) {
|
||||
const auto & lmlock = lmlocks->at(weight->idx);
|
||||
lmlock->grow_to(weight->offs + n_size);
|
||||
lmlock->grow_to(weight->offs + ggml_nbytes(cur));
|
||||
}
|
||||
|
||||
auto & mmap_used = mmaps_used[weight->idx];
|
||||
mmap_used.first = std::min(mmap_used.first, weight->offs);
|
||||
mmap_used.second = std::max(mmap_used.second, weight->offs + n_size);
|
||||
} else {
|
||||
ggml_backend_tensor_set(cur, data, 0, n_size);
|
||||
ggml_backend_tensor_set(cur, (uint8_t *) mapping->addr + weight->offs, 0, n_size);
|
||||
}
|
||||
} else {
|
||||
GGML_ASSERT(weight->idx < files.size());
|
||||
const auto & file = files.at(weight->idx);
|
||||
if (ggml_backend_buffer_is_host(cur->buffer)) {
|
||||
file->seek(weight->offs, SEEK_SET);
|
||||
file->read_raw(cur->data, n_size);
|
||||
if (check_tensors) {
|
||||
validation_result.emplace_back(std::async(std::launch::async, [cur, n_size] {
|
||||
return std::make_pair(cur, ggml_validate_row_data(cur->type, cur->data, n_size));
|
||||
}));
|
||||
}
|
||||
file->read_raw(cur->data, ggml_nbytes(cur));
|
||||
} else {
|
||||
read_buf.resize(n_size);
|
||||
read_buf.resize(ggml_nbytes(cur));
|
||||
file->seek(weight->offs, SEEK_SET);
|
||||
file->read_raw(read_buf.data(), n_size);
|
||||
file->read_raw(read_buf.data(), ggml_nbytes(cur));
|
||||
ggml_backend_tensor_set(cur, read_buf.data(), 0, n_size);
|
||||
if (check_tensors && !ggml_validate_row_data(cur->type, read_buf.data(), n_size)) {
|
||||
throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size_done += n_size;
|
||||
}
|
||||
|
||||
// check validation results
|
||||
bool validation_failed = false;
|
||||
for (auto & future : validation_result) {
|
||||
auto result = future.get();
|
||||
if (!result.second) {
|
||||
LLAMA_LOG_ERROR("%s: tensor '%s' has invalid data\n", __func__, ggml_get_name(result.first));
|
||||
validation_failed = true;
|
||||
}
|
||||
}
|
||||
if (validation_failed) {
|
||||
throw std::runtime_error("found tensors with invalid data");
|
||||
}
|
||||
|
||||
// check if this is the last call and do final cleanup
|
||||
if (size_done >= size_data) {
|
||||
// unmap offloaded tensors and metadata
|
||||
@@ -6016,7 +5970,7 @@ static bool llm_load_tensors(
|
||||
// Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback
|
||||
static int llama_model_load(const std::string & fname, llama_model & model, llama_model_params & params) {
|
||||
try {
|
||||
llama_model_loader ml(fname, params.use_mmap, params.check_tensors, params.kv_overrides);
|
||||
llama_model_loader ml(fname, params.use_mmap, params.kv_overrides);
|
||||
|
||||
model.hparams.vocab_only = params.vocab_only;
|
||||
|
||||
@@ -11473,10 +11427,6 @@ static int llama_decode_internal(
|
||||
}
|
||||
}
|
||||
|
||||
// Reset state for the next token before backend sync, to allow the CPU activities in the reset to
|
||||
// overlap with device computation.
|
||||
ggml_backend_sched_reset(lctx.sched);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -14405,20 +14355,14 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
||||
}
|
||||
|
||||
static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int64_t chunk_size, int64_t nrows, int64_t n_per_row, const float * imatrix, std::vector<std::thread> & workers, const int nthread) {
|
||||
if (nthread < 2) {
|
||||
// single-thread
|
||||
size_t new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, imatrix);
|
||||
if (!ggml_validate_row_data(new_type, new_data, new_size)) {
|
||||
throw std::runtime_error("quantized data validation failed");
|
||||
}
|
||||
return new_size;
|
||||
}
|
||||
|
||||
std::mutex mutex;
|
||||
int64_t counter = 0;
|
||||
size_t new_size = 0;
|
||||
bool valid = true;
|
||||
auto compute = [&mutex, &counter, &new_size, &valid, new_type, f32_data, new_data, chunk_size,
|
||||
if (nthread < 2) {
|
||||
// single-thread
|
||||
return ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, imatrix);
|
||||
}
|
||||
auto compute = [&mutex, &counter, &new_size, new_type, f32_data, new_data, chunk_size,
|
||||
nrows, n_per_row, imatrix]() {
|
||||
const int64_t nrows_per_chunk = chunk_size / n_per_row;
|
||||
size_t local_size = 0;
|
||||
@@ -14433,17 +14377,7 @@ static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const floa
|
||||
}
|
||||
lock.unlock();
|
||||
const int64_t this_nrow = std::min(nrows - first_row, nrows_per_chunk);
|
||||
size_t this_size = ggml_quantize_chunk(new_type, f32_data, new_data, first_row * n_per_row, this_nrow, n_per_row, imatrix);
|
||||
local_size += this_size;
|
||||
|
||||
// validate the quantized data
|
||||
const size_t row_size = ggml_row_size(new_type, n_per_row);
|
||||
void * this_data = (char *) new_data + first_row * row_size;
|
||||
if (!ggml_validate_row_data(new_type, this_data, this_size)) {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
valid = false;
|
||||
break;
|
||||
}
|
||||
local_size += ggml_quantize_chunk(new_type, f32_data, new_data, first_row * n_per_row, this_nrow, n_per_row, imatrix);
|
||||
}
|
||||
};
|
||||
for (int it = 0; it < nthread - 1; ++it) {
|
||||
@@ -14452,9 +14386,6 @@ static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const floa
|
||||
compute();
|
||||
for (auto & w : workers) { w.join(); }
|
||||
workers.clear();
|
||||
if (!valid) {
|
||||
throw std::runtime_error("quantized data validation failed");
|
||||
}
|
||||
return new_size;
|
||||
}
|
||||
|
||||
@@ -14517,7 +14448,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
auto v = (std::vector<llama_model_kv_override>*)params->kv_overrides;
|
||||
kv_overrides = v->data();
|
||||
}
|
||||
llama_model_loader ml(fname_inp, use_mmap, /*check_tensors*/ true, kv_overrides);
|
||||
llama_model_loader ml(fname_inp, use_mmap, kv_overrides);
|
||||
ml.init_mappings(false); // no prefetching
|
||||
|
||||
llama_model model;
|
||||
@@ -14555,13 +14486,11 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
for (auto & o : overrides) {
|
||||
if (o.key[0] == 0) break;
|
||||
if (o.tag == LLAMA_KV_OVERRIDE_TYPE_FLOAT) {
|
||||
gguf_set_val_f32(ctx_out, o.key, o.val_f64);
|
||||
gguf_set_val_f32(ctx_out, o.key, o.float_value);
|
||||
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
|
||||
gguf_set_val_i32(ctx_out, o.key, o.val_i64);
|
||||
gguf_set_val_i32(ctx_out, o.key, o.int_value);
|
||||
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
|
||||
gguf_set_val_bool(ctx_out, o.key, o.val_bool);
|
||||
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_STR) {
|
||||
gguf_set_val_str(ctx_out, o.key, o.val_str);
|
||||
gguf_set_val_bool(ctx_out, o.key, o.bool_value);
|
||||
} else {
|
||||
LLAMA_LOG_WARN("%s: unknown KV override type for key %s\n", __func__, o.key);
|
||||
}
|
||||
@@ -14880,7 +14809,7 @@ static int llama_apply_lora_from_file_internal(
|
||||
std::unique_ptr<llama_model_loader> ml;
|
||||
if (path_base_model) {
|
||||
LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
|
||||
ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*check_tensors*/ false, /*kv_overrides*/ nullptr));
|
||||
ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*kv_overrides*/ nullptr));
|
||||
ml->init_mappings(/*prefetch*/ false); // no prefetching
|
||||
}
|
||||
|
||||
@@ -15139,7 +15068,6 @@ struct llama_model_params llama_model_default_params() {
|
||||
/*.vocab_only =*/ false,
|
||||
/*.use_mmap =*/ true,
|
||||
/*.use_mlock =*/ false,
|
||||
/*.check_tensors =*/ false,
|
||||
};
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
@@ -16198,8 +16126,6 @@ struct llama_data_file_context : llama_data_context {
|
||||
*
|
||||
*/
|
||||
static void llama_state_get_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) {
|
||||
llama_synchronize(ctx);
|
||||
|
||||
// copy rng
|
||||
{
|
||||
std::ostringstream rng_ss;
|
||||
@@ -16352,8 +16278,6 @@ size_t llama_state_get_data(struct llama_context * ctx, uint8_t * dst) {
|
||||
|
||||
// Sets the state reading from the specified source address
|
||||
size_t llama_state_set_data(struct llama_context * ctx, const uint8_t * src) {
|
||||
llama_synchronize(ctx);
|
||||
|
||||
const uint8_t * inp = src;
|
||||
|
||||
// set rng
|
||||
@@ -16658,8 +16582,6 @@ size_t llama_state_seq_get_size(struct llama_context* ctx, llama_seq_id seq_id)
|
||||
}
|
||||
|
||||
static size_t llama_state_seq_get_data_internal(struct llama_context * ctx, llama_data_context & data_ctx, llama_seq_id seq_id) {
|
||||
llama_synchronize(ctx);
|
||||
|
||||
const auto & kv_self = ctx->kv_self;
|
||||
GGML_ASSERT(!kv_self.recurrent); // not implemented
|
||||
|
||||
@@ -16777,8 +16699,6 @@ size_t llama_state_seq_get_data(struct llama_context* ctx, uint8_t* dst, llama_s
|
||||
}
|
||||
|
||||
size_t llama_state_seq_set_data(struct llama_context * ctx, const uint8_t * src, llama_seq_id dest_seq_id) {
|
||||
llama_synchronize(ctx);
|
||||
|
||||
auto & kv_self = ctx->kv_self;
|
||||
GGML_ASSERT(!kv_self.recurrent); // not implemented
|
||||
|
||||
@@ -17720,11 +17640,6 @@ const char * llama_print_system_info(void) {
|
||||
s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
|
||||
s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
|
||||
s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | ";
|
||||
#ifdef GGML_USE_LLAMAFILE
|
||||
s += "LAMMAFILE = 1 | ";
|
||||
#else
|
||||
s += "LAMMAFILE = 0 | ";
|
||||
#endif
|
||||
|
||||
return s.c_str();
|
||||
}
|
||||
|
||||
19
llama.h
19
llama.h
@@ -195,19 +195,15 @@ extern "C" {
|
||||
LLAMA_KV_OVERRIDE_TYPE_INT,
|
||||
LLAMA_KV_OVERRIDE_TYPE_FLOAT,
|
||||
LLAMA_KV_OVERRIDE_TYPE_BOOL,
|
||||
LLAMA_KV_OVERRIDE_TYPE_STR,
|
||||
};
|
||||
|
||||
struct llama_model_kv_override {
|
||||
enum llama_model_kv_override_type tag;
|
||||
|
||||
char key[128];
|
||||
|
||||
enum llama_model_kv_override_type tag;
|
||||
union {
|
||||
int64_t val_i64;
|
||||
double val_f64;
|
||||
bool val_bool;
|
||||
char val_str[128];
|
||||
int64_t int_value;
|
||||
double float_value;
|
||||
bool bool_value;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -236,10 +232,9 @@ extern "C" {
|
||||
const struct llama_model_kv_override * kv_overrides;
|
||||
|
||||
// Keep the booleans together to avoid misalignment during copy-by-value.
|
||||
bool vocab_only; // only load the vocabulary, no weights
|
||||
bool use_mmap; // use mmap if possible
|
||||
bool use_mlock; // force system to keep model in RAM
|
||||
bool check_tensors; // validate model tensor data
|
||||
bool vocab_only; // only load the vocabulary, no weights
|
||||
bool use_mmap; // use mmap if possible
|
||||
bool use_mlock; // force system to keep model in RAM
|
||||
};
|
||||
|
||||
struct llama_context_params {
|
||||
|
||||
170
sgemm.cpp
170
sgemm.cpp
@@ -50,6 +50,7 @@
|
||||
#pragma GCC diagnostic ignored "-Wignored-attributes"
|
||||
|
||||
#include "sgemm.h"
|
||||
#include <algorithm>
|
||||
#include "ggml-impl.h"
|
||||
#include "ggml-quants.h"
|
||||
|
||||
@@ -242,23 +243,23 @@ template <> inline __m512 load(const ggml_fp16_t *p) {
|
||||
template <int KN, typename D, typename V, typename TA, typename TB, typename TC>
|
||||
class tinyBLAS {
|
||||
public:
|
||||
tinyBLAS(int64_t k,
|
||||
const TA *A, int64_t lda,
|
||||
const TB *B, int64_t ldb,
|
||||
TC *C, int64_t ldc,
|
||||
tinyBLAS(int k,
|
||||
const TA *A, int lda,
|
||||
const TB *B, int ldb,
|
||||
TC *C, int ldc,
|
||||
int ith, int nth)
|
||||
: A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
|
||||
}
|
||||
|
||||
void matmul(int64_t m, int64_t n, int task) {
|
||||
void matmul(int m, int n, int task) {
|
||||
if (task == GGML_TASK_TYPE_COMPUTE)
|
||||
mnpack(0, m, 0, n);
|
||||
}
|
||||
|
||||
private:
|
||||
NOINLINE void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
||||
int64_t mc, nc, mp, np;
|
||||
switch ((MIN(m - m0, 5) << 4) | MIN(n - n0, 5)) {
|
||||
NOINLINE void mnpack(int m0, int m, int n0, int n) {
|
||||
int mc, nc, mp, np;
|
||||
switch ((std::min(m - m0, 5) << 4) | std::min(n - n0, 5)) {
|
||||
#if VECTOR_REGISTERS == 32
|
||||
case 0x55:
|
||||
mc = 5;
|
||||
@@ -408,27 +409,27 @@ class tinyBLAS {
|
||||
}
|
||||
|
||||
template <int RM, int RN>
|
||||
NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
||||
int64_t ytiles = (m - m0) / RM;
|
||||
int64_t xtiles = (n - n0) / RN;
|
||||
int64_t tiles = xtiles * ytiles;
|
||||
int64_t duty = (tiles + nth - 1) / nth;
|
||||
int64_t start = duty * ith;
|
||||
int64_t end = start + duty;
|
||||
NOINLINE void gemm(int m0, int m, int n0, int n) {
|
||||
int ytiles = (m - m0) / RM;
|
||||
int xtiles = (n - n0) / RN;
|
||||
int tiles = xtiles * ytiles;
|
||||
int duty = (tiles + nth - 1) / nth;
|
||||
int start = duty * ith;
|
||||
int end = start + duty;
|
||||
if (end > tiles)
|
||||
end = tiles;
|
||||
for (int64_t job = start; job < end; ++job) {
|
||||
int64_t ii = m0 + job / xtiles * RM;
|
||||
int64_t jj = n0 + job % xtiles * RN;
|
||||
for (int job = start; job < end; ++job) {
|
||||
int ii = m0 + job / xtiles * RM;
|
||||
int jj = n0 + job % xtiles * RN;
|
||||
D Cv[RN][RM] = {};
|
||||
for (int64_t l = 0; l < k; l += KN)
|
||||
for (int64_t j = 0; j < RN; ++j)
|
||||
for (int64_t i = 0; i < RM; ++i)
|
||||
for (int l = 0; l < k; l += KN)
|
||||
for (int j = 0; j < RN; ++j)
|
||||
for (int i = 0; i < RM; ++i)
|
||||
Cv[j][i] = madd(load<V>(A + lda * (ii + i) + l),
|
||||
load<V>(B + ldb * (jj + j) + l),
|
||||
Cv[j][i]);
|
||||
for (int64_t j = 0; j < RN; ++j)
|
||||
for (int64_t i = 0; i < RM; ++i)
|
||||
for (int j = 0; j < RN; ++j)
|
||||
for (int i = 0; i < RM; ++i)
|
||||
C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
|
||||
}
|
||||
}
|
||||
@@ -436,10 +437,10 @@ class tinyBLAS {
|
||||
const TA *const A;
|
||||
const TB *const B;
|
||||
TC *const C;
|
||||
const int64_t k;
|
||||
const int64_t lda;
|
||||
const int64_t ldb;
|
||||
const int64_t ldc;
|
||||
const int k;
|
||||
const int lda;
|
||||
const int ldb;
|
||||
const int ldc;
|
||||
const int ith;
|
||||
const int nth;
|
||||
};
|
||||
@@ -451,23 +452,23 @@ class tinyBLAS {
|
||||
template <typename TA>
|
||||
class tinyBLAS_Q0_ARM {
|
||||
public:
|
||||
tinyBLAS_Q0_ARM(int64_t k,
|
||||
const TA *A, int64_t lda,
|
||||
const block_q8_0 *B, int64_t ldb,
|
||||
float *C, int64_t ldc,
|
||||
tinyBLAS_Q0_ARM(int k,
|
||||
const TA *A, int lda,
|
||||
const block_q8_0 *B, int ldb,
|
||||
float *C, int ldc,
|
||||
int ith, int nth)
|
||||
: A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
|
||||
}
|
||||
|
||||
void matmul(int64_t m, int64_t n, int task) {
|
||||
void matmul(int m, int n, int task) {
|
||||
if (task == GGML_TASK_TYPE_COMPUTE)
|
||||
mnpack(0, m, 0, n);
|
||||
}
|
||||
|
||||
private:
|
||||
NOINLINE void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
||||
int64_t mc, nc, mp, np;
|
||||
switch ((MIN(m - m0, 3) << 4) | MIN(n - n0, 3ll)) {
|
||||
NOINLINE void mnpack(int m0, int m, int n0, int n) {
|
||||
int mc, nc, mp, np;
|
||||
switch ((std::min(m - m0, 3) << 4) | std::min(n - n0, 3)) {
|
||||
case 0x33:
|
||||
mc = 3;
|
||||
nc = 3;
|
||||
@@ -523,22 +524,22 @@ class tinyBLAS_Q0_ARM {
|
||||
}
|
||||
|
||||
template <int RM, int RN>
|
||||
NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
||||
int64_t ytiles = (m - m0) / RM;
|
||||
int64_t xtiles = (n - n0) / RN;
|
||||
int64_t tiles = xtiles * ytiles;
|
||||
int64_t duty = (tiles + nth - 1) / nth;
|
||||
int64_t start = duty * ith;
|
||||
int64_t end = start + duty;
|
||||
NOINLINE void gemm(int m0, int m, int n0, int n) {
|
||||
int ytiles = (m - m0) / RM;
|
||||
int xtiles = (n - n0) / RN;
|
||||
int tiles = xtiles * ytiles;
|
||||
int duty = (tiles + nth - 1) / nth;
|
||||
int start = duty * ith;
|
||||
int end = start + duty;
|
||||
if (end > tiles)
|
||||
end = tiles;
|
||||
for (int64_t job = start; job < end; ++job) {
|
||||
int64_t ii = m0 + job / xtiles * RM;
|
||||
int64_t jj = n0 + job % xtiles * RN;
|
||||
for (int job = start; job < end; ++job) {
|
||||
int ii = m0 + job / xtiles * RM;
|
||||
int jj = n0 + job % xtiles * RN;
|
||||
float32x4_t Cv[RN][RM] = {};
|
||||
for (int64_t l = 0; l < k; ++l)
|
||||
for (int64_t j = 0; j < RN; ++j)
|
||||
for (int64_t i = 0; i < RM; ++i)
|
||||
for (int l = 0; l < k; ++l)
|
||||
for (int j = 0; j < RN; ++j)
|
||||
for (int i = 0; i < RM; ++i)
|
||||
Cv[j][i] = vmlaq_n_f32(Cv[j][i],
|
||||
vcvtq_f32_s32(vdotq_s32(
|
||||
vdotq_s32(vdupq_n_s32(0),
|
||||
@@ -548,8 +549,8 @@ class tinyBLAS_Q0_ARM {
|
||||
load_hi(B + ldb * (jj + j) + l))),
|
||||
unhalf(A[lda * (ii + i) + l].d) *
|
||||
unhalf(B[ldb * (jj + j) + l].d));
|
||||
for (int64_t j = 0; j < RN; ++j)
|
||||
for (int64_t i = 0; i < RM; ++i)
|
||||
for (int j = 0; j < RN; ++j)
|
||||
for (int i = 0; i < RM; ++i)
|
||||
C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
|
||||
}
|
||||
}
|
||||
@@ -576,10 +577,10 @@ class tinyBLAS_Q0_ARM {
|
||||
const TA *const A;
|
||||
const block_q8_0 *const B;
|
||||
float *const C;
|
||||
const int64_t k;
|
||||
const int64_t lda;
|
||||
const int64_t ldb;
|
||||
const int64_t ldc;
|
||||
const int k;
|
||||
const int lda;
|
||||
const int ldb;
|
||||
const int ldc;
|
||||
const int ith;
|
||||
const int nth;
|
||||
};
|
||||
@@ -589,23 +590,23 @@ class tinyBLAS_Q0_ARM {
|
||||
template <typename TA, typename TB, typename TC>
|
||||
class tinyBLAS_Q0_AVX2 {
|
||||
public:
|
||||
tinyBLAS_Q0_AVX2(int64_t k,
|
||||
const TA *A, int64_t lda,
|
||||
const TB *B, int64_t ldb,
|
||||
TC *C, int64_t ldc,
|
||||
tinyBLAS_Q0_AVX2(int k,
|
||||
const TA *A, int lda,
|
||||
const TB *B, int ldb,
|
||||
TC *C, int ldc,
|
||||
int ith, int nth)
|
||||
: A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
|
||||
}
|
||||
|
||||
void matmul(int64_t m, int64_t n, int task) {
|
||||
void matmul(int m, int n, int task) {
|
||||
if (task == GGML_TASK_TYPE_COMPUTE)
|
||||
mnpack(0, m, 0, n);
|
||||
}
|
||||
|
||||
private:
|
||||
void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
||||
int64_t mc, nc, mp, np;
|
||||
switch ((MIN(m - m0, 4) << 4) | MIN(n - n0, 4)) {
|
||||
void mnpack(int m0, int m, int n0, int n) {
|
||||
int mc, nc, mp, np;
|
||||
switch ((std::min(m - m0, 4) << 4) | std::min(n - n0, 4)) {
|
||||
#if VECTOR_REGISTERS == 32
|
||||
case 0x44:
|
||||
mc = 4;
|
||||
@@ -713,22 +714,22 @@ class tinyBLAS_Q0_AVX2 {
|
||||
}
|
||||
|
||||
template <int RM, int RN>
|
||||
NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
||||
int64_t ytiles = (m - m0) / RM;
|
||||
int64_t xtiles = (n - n0) / RN;
|
||||
int64_t tiles = xtiles * ytiles;
|
||||
int64_t duty = (tiles + nth - 1) / nth;
|
||||
int64_t start = duty * ith;
|
||||
int64_t end = start + duty;
|
||||
NOINLINE void gemm(int m0, int m, int n0, int n) {
|
||||
int ytiles = (m - m0) / RM;
|
||||
int xtiles = (n - n0) / RN;
|
||||
int tiles = xtiles * ytiles;
|
||||
int duty = (tiles + nth - 1) / nth;
|
||||
int start = duty * ith;
|
||||
int end = start + duty;
|
||||
if (end > tiles)
|
||||
end = tiles;
|
||||
for (int64_t job = start; job < end; ++job) {
|
||||
int64_t ii = m0 + job / xtiles * RM;
|
||||
int64_t jj = n0 + job % xtiles * RN;
|
||||
for (int job = start; job < end; ++job) {
|
||||
int ii = m0 + job / xtiles * RM;
|
||||
int jj = n0 + job % xtiles * RN;
|
||||
__m256 Cv[RN][RM] = {};
|
||||
for (int64_t l = 0; l < k; ++l)
|
||||
for (int64_t j = 0; j < RN; ++j)
|
||||
for (int64_t i = 0; i < RM; ++i)
|
||||
for (int l = 0; l < k; ++l)
|
||||
for (int j = 0; j < RN; ++j)
|
||||
for (int i = 0; i < RM; ++i)
|
||||
Cv[j][i] = madd(_mm256_set1_ps(unhalf(A[lda * (ii + i) + l].d) *
|
||||
unhalf(B[ldb * (jj + j) + l].d)),
|
||||
updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l),
|
||||
@@ -736,8 +737,8 @@ class tinyBLAS_Q0_AVX2 {
|
||||
_mm256_sign_epi8(load(B + ldb * (jj + j) + l),
|
||||
load(A + lda * (ii + i) + l))),
|
||||
Cv[j][i]);
|
||||
for (int64_t j = 0; j < RN; ++j)
|
||||
for (int64_t i = 0; i < RM; ++i)
|
||||
for (int j = 0; j < RN; ++j)
|
||||
for (int i = 0; i < RM; ++i)
|
||||
C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
|
||||
}
|
||||
}
|
||||
@@ -770,10 +771,10 @@ class tinyBLAS_Q0_AVX2 {
|
||||
const TA *const A;
|
||||
const TB *const B;
|
||||
TC *const C;
|
||||
const int64_t k;
|
||||
const int64_t lda;
|
||||
const int64_t ldb;
|
||||
const int64_t ldc;
|
||||
const int k;
|
||||
const int lda;
|
||||
const int ldb;
|
||||
const int ldc;
|
||||
const int ith;
|
||||
const int nth;
|
||||
};
|
||||
@@ -812,8 +813,8 @@ class tinyBLAS_Q0_AVX2 {
|
||||
* @param Ctype is GGML data type of `C`
|
||||
* @return true if this function was able to service the matmul request
|
||||
*/
|
||||
bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda, const void *B, int64_t ldb, void *C,
|
||||
int64_t ldc, int ith, int nth, int task, int Atype, int Btype, int Ctype) {
|
||||
bool llamafile_sgemm(int m, int n, int k, const void *A, int lda, const void *B, int ldb, void *C,
|
||||
int ldc, int ith, int nth, int task, int Atype, int Btype, int Ctype) {
|
||||
|
||||
assert(m >= 0);
|
||||
assert(n >= 0);
|
||||
@@ -823,6 +824,9 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda
|
||||
assert(ldc >= m);
|
||||
assert(nth > 0);
|
||||
assert(ith < nth);
|
||||
assert(1ll * lda * m <= 0x7fffffff);
|
||||
assert(1ll * ldb * n <= 0x7fffffff);
|
||||
assert(1ll * ldc * n <= 0x7fffffff);
|
||||
|
||||
if (Ctype != GGML_TYPE_F32)
|
||||
return false;
|
||||
|
||||
6
sgemm.h
6
sgemm.h
@@ -1,13 +1,11 @@
|
||||
#pragma once
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
bool llamafile_sgemm(int64_t, int64_t, int64_t, const void *, int64_t,
|
||||
const void *, int64_t, void *, int64_t, int, int,
|
||||
int, int, int, int);
|
||||
bool llamafile_sgemm(int, int, int, const void *, int, const void *, int,
|
||||
void *, int, int, int, int, int, int, int);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user