mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2026-02-26 14:23:22 +02:00
Compare commits
20 Commits
gguf-write
...
master-b77
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b772bba42e | ||
|
|
0728c5a8b9 | ||
|
|
1215ed7d5c | ||
|
|
2dbf518911 | ||
|
|
9d2382b3e4 | ||
|
|
a113689571 | ||
|
|
11f3ca06b8 | ||
|
|
9baf9ef304 | ||
|
|
8a88e5855c | ||
|
|
a9559bf77b | ||
|
|
ee1b497c98 | ||
|
|
d73b8d48b4 | ||
|
|
34ae1caf7f | ||
|
|
d91f3f0c55 | ||
|
|
65cdf34bdc | ||
|
|
edcc7ae7d2 | ||
|
|
7c529cede6 | ||
|
|
1a941869cb | ||
|
|
b5472ea0ad | ||
|
|
6df1f5940f |
185
.github/ISSUE_TEMPLATE/custom.md
vendored
Normal file
185
.github/ISSUE_TEMPLATE/custom.md
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
---
|
||||
name: Issue and enhancement template
|
||||
about: Used to report issues and request enhancements for llama.cpp
|
||||
title: "[User] Insert summary of your issue or enhancement.."
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
# Prerequisites
|
||||
|
||||
Please answer the following questions for yourself before submitting an issue.
|
||||
|
||||
- [ ] I am running the latest code. Development is very rapid so there are no tagged versions as of now.
|
||||
- [ ] I carefully followed the [README.md](https://github.com/ggerganov/llama.cpp/blob/master/README.md).
|
||||
- [ ] I [searched using keywords relevant to my issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/filtering-and-searching-issues-and-pull-requests) to make sure that I am creating a new issue that is not already open (or closed).
|
||||
- [ ] I reviewed the [Discussions](https://github.com/ggerganov/llama.cpp/discussions), and have a new bug or useful enhancement to share.
|
||||
|
||||
# Expected Behavior
|
||||
|
||||
Please provide a detailed written description of what you were trying to do, and what you expected `llama.cpp` to do.
|
||||
|
||||
# Current Behavior
|
||||
|
||||
Please provide a detailed written description of what `llama.cpp` did, instead.
|
||||
|
||||
# Environment and Context
|
||||
|
||||
Please provide detailed information about your computer setup. This is important in case the issue is not reproducible except for under certain specific conditions.
|
||||
|
||||
* Physical (or virtual) hardware you are using, e.g. for Linux:
|
||||
|
||||
`$ lscpu`
|
||||
|
||||
* Operating System, e.g. for Linux:
|
||||
|
||||
`$ uname -a`
|
||||
|
||||
* SDK version, e.g. for Linux:
|
||||
|
||||
```
|
||||
$ python3 --version
|
||||
$ make --version
|
||||
$ g++ --version
|
||||
```
|
||||
|
||||
# Failure Information (for bugs)
|
||||
|
||||
Please help provide information about the failure if this is a bug. If it is not a bug, please remove the rest of this template.
|
||||
|
||||
# Steps to Reproduce
|
||||
|
||||
Please provide detailed steps for reproducing the issue. We are not sitting in front of your screen, so the more detail the better.
|
||||
|
||||
1. step 1
|
||||
2. step 2
|
||||
3. step 3
|
||||
4. etc.
|
||||
|
||||
# Failure Logs
|
||||
|
||||
Please include any relevant log snippets or files. If it works under one configuration but not under another, please provide logs for both configurations and their corresponding outputs so it is easy to see where behavior changes.
|
||||
|
||||
Also, please try to **avoid using screenshots** if at all possible. Instead, copy/paste the console output and use [Github's markdown](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax) to cleanly format your logs for easy readability.
|
||||
|
||||
Example environment info:
|
||||
```
|
||||
llama.cpp$ git log | head -1
|
||||
commit 2af23d30434a677c6416812eea52ccc0af65119c
|
||||
|
||||
llama.cpp$ lscpu | egrep "AMD|Flags"
|
||||
Vendor ID: AuthenticAMD
|
||||
Model name: AMD Ryzen Threadripper 1950X 16-Core Processor
|
||||
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid amd_dcm aperfmperf rapl pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_llc mwaitx cpb hw_pstate ssbd ibpb vmmcall fsgsbase bmi1 avx2 smep bmi2 rdseed adx smap clflushopt sha_ni xsaveopt xsavec xgetbv1 xsaves clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif overflow_recov succor smca sme sev
|
||||
Virtualization: AMD-V
|
||||
|
||||
llama.cpp$ python3 --version
|
||||
Python 3.10.9
|
||||
|
||||
llama.cpp$ pip list | egrep "torch|numpy|sentencepiece"
|
||||
numpy 1.24.2
|
||||
numpydoc 1.5.0
|
||||
sentencepiece 0.1.97
|
||||
torch 1.13.1
|
||||
torchvision 0.14.1
|
||||
|
||||
llama.cpp$ make --version | head -1
|
||||
GNU Make 4.3
|
||||
|
||||
$ md5sum ./models/65B/ggml-model-q4_0.bin
|
||||
dbdd682cce80e2d6e93cefc7449df487 ./models/65B/ggml-model-q4_0.bin
|
||||
```
|
||||
|
||||
Example run with the Linux command [perf](https://www.brendangregg.com/perf.html)
|
||||
```
|
||||
llama.cpp$ perf stat ./main -m ./models/65B/ggml-model-q4_0.bin -t 16 -n 1024 -p "Please close your issue when it has been answered."
|
||||
main: seed = 1679149377
|
||||
llama_model_load: loading model from './models/65B/ggml-model-q4_0.bin' - please wait ...
|
||||
llama_model_load: n_vocab = 32000
|
||||
llama_model_load: n_ctx = 512
|
||||
llama_model_load: n_embd = 8192
|
||||
llama_model_load: n_mult = 256
|
||||
llama_model_load: n_head = 64
|
||||
llama_model_load: n_layer = 80
|
||||
llama_model_load: n_rot = 128
|
||||
llama_model_load: f16 = 2
|
||||
llama_model_load: n_ff = 22016
|
||||
llama_model_load: n_parts = 8
|
||||
llama_model_load: ggml ctx size = 41477.73 MB
|
||||
llama_model_load: memory_size = 2560.00 MB, n_mem = 40960
|
||||
llama_model_load: loading model part 1/8 from './models/65B/ggml-model-q4_0.bin'
|
||||
llama_model_load: .......................................................................................... done
|
||||
llama_model_load: model size = 4869.09 MB / num tensors = 723
|
||||
llama_model_load: loading model part 2/8 from './models/65B/ggml-model-q4_0.bin.1'
|
||||
llama_model_load: .......................................................................................... done
|
||||
llama_model_load: model size = 4869.09 MB / num tensors = 723
|
||||
llama_model_load: loading model part 3/8 from './models/65B/ggml-model-q4_0.bin.2'
|
||||
llama_model_load: .......................................................................................... done
|
||||
llama_model_load: model size = 4869.09 MB / num tensors = 723
|
||||
llama_model_load: loading model part 4/8 from './models/65B/ggml-model-q4_0.bin.3'
|
||||
llama_model_load: .......................................................................................... done
|
||||
llama_model_load: model size = 4869.09 MB / num tensors = 723
|
||||
llama_model_load: loading model part 5/8 from './models/65B/ggml-model-q4_0.bin.4'
|
||||
llama_model_load: .......................................................................................... done
|
||||
llama_model_load: model size = 4869.09 MB / num tensors = 723
|
||||
llama_model_load: loading model part 6/8 from './models/65B/ggml-model-q4_0.bin.5'
|
||||
llama_model_load: .......................................................................................... done
|
||||
llama_model_load: model size = 4869.09 MB / num tensors = 723
|
||||
llama_model_load: loading model part 7/8 from './models/65B/ggml-model-q4_0.bin.6'
|
||||
llama_model_load: .......................................................................................... done
|
||||
llama_model_load: model size = 4869.09 MB / num tensors = 723
|
||||
llama_model_load: loading model part 8/8 from './models/65B/ggml-model-q4_0.bin.7'
|
||||
llama_model_load: .......................................................................................... done
|
||||
llama_model_load: model size = 4869.09 MB / num tensors = 723
|
||||
|
||||
system_info: n_threads = 16 / 32 | AVX = 1 | AVX2 = 1 | AVX512 = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
|
||||
|
||||
main: prompt: 'Please close your issue when it has been answered.'
|
||||
main: number of tokens in prompt = 11
|
||||
1 -> ''
|
||||
12148 -> 'Please'
|
||||
3802 -> ' close'
|
||||
596 -> ' your'
|
||||
2228 -> ' issue'
|
||||
746 -> ' when'
|
||||
372 -> ' it'
|
||||
756 -> ' has'
|
||||
1063 -> ' been'
|
||||
7699 -> ' answered'
|
||||
29889 -> '.'
|
||||
|
||||
sampling parameters: temp = 0.800000, top_k = 40, top_p = 0.950000, repeat_last_n = 64, repeat_penalty = 1.300000
|
||||
|
||||
|
||||
Please close your issue when it has been answered.
|
||||
@duncan-donut: I'm trying to figure out what kind of "support" you need for this script and why, exactly? Is there a question about how the code works that hasn't already been addressed in one or more comments below this ticket, or are we talking something else entirely like some sorta bugfixing job because your server setup is different from mine??
|
||||
I can understand if your site needs to be running smoothly and you need help with a fix of sorts but there should really be nothing wrong here that the code itself could not handle. And given that I'm getting reports about how it works perfectly well on some other servers, what exactly are we talking? A detailed report will do wonders in helping us get this resolved for ya quickly so please take your time and describe the issue(s) you see as clearly & concisely as possible!!
|
||||
@duncan-donut: I'm not sure if you have access to cPanel but you could try these instructions. It is worth a shot! Let me know how it goes (or what error message, exactly!) when/if ya give that code a go? [end of text]
|
||||
|
||||
|
||||
main: mem per token = 71159620 bytes
|
||||
main: load time = 19309.95 ms
|
||||
main: sample time = 168.62 ms
|
||||
main: predict time = 223895.61 ms / 888.47 ms per token
|
||||
main: total time = 246406.42 ms
|
||||
|
||||
Performance counter stats for './main -m ./models/65B/ggml-model-q4_0.bin -t 16 -n 1024 -p Please close your issue when it has been answered.':
|
||||
|
||||
3636882.89 msec task-clock # 14.677 CPUs utilized
|
||||
13509 context-switches # 3.714 /sec
|
||||
2436 cpu-migrations # 0.670 /sec
|
||||
10476679 page-faults # 2.881 K/sec
|
||||
13133115082869 cycles # 3.611 GHz (16.77%)
|
||||
29314462753 stalled-cycles-frontend # 0.22% frontend cycles idle (16.76%)
|
||||
10294402631459 stalled-cycles-backend # 78.39% backend cycles idle (16.74%)
|
||||
23479217109614 instructions # 1.79 insn per cycle
|
||||
# 0.44 stalled cycles per insn (16.76%)
|
||||
2353072268027 branches # 647.002 M/sec (16.77%)
|
||||
1998682780 branch-misses # 0.08% of all branches (16.76%)
|
||||
|
||||
247.802177522 seconds time elapsed
|
||||
|
||||
3618.573072000 seconds user
|
||||
18.491698000 seconds sys
|
||||
```
|
||||
632
.github/workflows/build.yml
vendored
Normal file
632
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,632 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
workflow_dispatch: # allows manual triggering
|
||||
inputs:
|
||||
create_release:
|
||||
description: 'Create new release'
|
||||
required: true
|
||||
type: boolean
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths: ['.github/workflows/**', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu']
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
paths: ['**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu']
|
||||
|
||||
env:
|
||||
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
|
||||
GGML_NLOOP: 3
|
||||
GGML_NITER: 1
|
||||
GGML_N_THREADS: 1
|
||||
|
||||
jobs:
|
||||
ubuntu-focal-make:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- name: Clone
|
||||
id: checkout
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential gcc-8
|
||||
|
||||
- name: Build
|
||||
id: make_build
|
||||
run: |
|
||||
CC=gcc-8 make
|
||||
|
||||
ubuntu-latest-cmake:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Clone
|
||||
id: checkout
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential
|
||||
|
||||
- name: Build
|
||||
id: cmake_build
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
cmake --build . --config Release
|
||||
|
||||
- name: Test
|
||||
id: cmake_test
|
||||
run: |
|
||||
cd build
|
||||
ctest --verbose --timeout 900
|
||||
|
||||
ubuntu-latest-cmake-sanitizer:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
continue-on-error: true
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
sanitizer: [ADDRESS, THREAD, UNDEFINED]
|
||||
build_type: [Debug, Release]
|
||||
|
||||
steps:
|
||||
- name: Clone
|
||||
id: checkout
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential
|
||||
|
||||
- name: Build
|
||||
id: cmake_build
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }}
|
||||
cmake --build . --config ${{ matrix.build_type }}
|
||||
|
||||
- name: Test
|
||||
id: cmake_test
|
||||
run: |
|
||||
cd build
|
||||
ctest --verbose --timeout 900
|
||||
|
||||
ubuntu-latest-cmake-mpi:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
continue-on-error: true
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
mpi_library: [mpich, libopenmpi-dev]
|
||||
|
||||
steps:
|
||||
- name: Clone
|
||||
id: checkout
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ${{ matrix.mpi_library }}
|
||||
|
||||
- name: Build
|
||||
id: cmake_build
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DLLAMA_MPI=ON ..
|
||||
cmake --build . --config Release
|
||||
|
||||
- name: Test
|
||||
id: cmake_test
|
||||
run: |
|
||||
cd build
|
||||
ctest --verbose
|
||||
|
||||
macOS-latest-make:
|
||||
runs-on: macos-latest
|
||||
|
||||
steps:
|
||||
- name: Clone
|
||||
id: checkout
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
continue-on-error: true
|
||||
run: |
|
||||
brew update
|
||||
|
||||
- name: Build
|
||||
id: make_build
|
||||
run: |
|
||||
make
|
||||
|
||||
macOS-latest-cmake:
|
||||
runs-on: macos-latest
|
||||
|
||||
steps:
|
||||
- name: Clone
|
||||
id: checkout
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
continue-on-error: true
|
||||
run: |
|
||||
brew update
|
||||
|
||||
- name: Build
|
||||
id: cmake_build
|
||||
run: |
|
||||
sysctl -a
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF ..
|
||||
cmake --build . --config Release
|
||||
|
||||
- name: Test
|
||||
id: cmake_test
|
||||
run: |
|
||||
cd build
|
||||
ctest --verbose --timeout 900
|
||||
|
||||
windows-latest-cmake:
|
||||
runs-on: windows-latest
|
||||
|
||||
env:
|
||||
OPENBLAS_VERSION: 0.3.23
|
||||
OPENCL_VERSION: 2023.04.17
|
||||
CLBLAST_VERSION: 1.6.0
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- build: 'noavx'
|
||||
defines: '-DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX=OFF -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF'
|
||||
- build: 'avx2'
|
||||
defines: '-DLLAMA_BUILD_SERVER=ON'
|
||||
- build: 'avx'
|
||||
defines: '-DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX2=OFF'
|
||||
- build: 'avx512'
|
||||
defines: '-DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX512=ON -DBUILD_SHARED_LIBS=ON'
|
||||
- build: 'clblast'
|
||||
defines: '-DLLAMA_BUILD_SERVER=ON -DLLAMA_CLBLAST=ON -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/clblast"'
|
||||
- build: 'openblas'
|
||||
defines: '-DLLAMA_BUILD_SERVER=ON -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"'
|
||||
|
||||
steps:
|
||||
- name: Clone
|
||||
id: checkout
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- name: Download OpenCL SDK
|
||||
id: get_opencl
|
||||
if: ${{ matrix.build == 'clblast' }}
|
||||
run: |
|
||||
curl.exe -o $env:RUNNER_TEMP/opencl.zip -L "https://github.com/KhronosGroup/OpenCL-SDK/releases/download/v${env:OPENCL_VERSION}/OpenCL-SDK-v${env:OPENCL_VERSION}-Win-x64.zip"
|
||||
mkdir $env:RUNNER_TEMP/opencl
|
||||
tar.exe -xvf $env:RUNNER_TEMP/opencl.zip --strip-components=1 -C $env:RUNNER_TEMP/opencl
|
||||
|
||||
- name: Download CLBlast
|
||||
id: get_clblast
|
||||
if: ${{ matrix.build == 'clblast' }}
|
||||
run: |
|
||||
curl.exe -o $env:RUNNER_TEMP/clblast.7z -L "https://github.com/CNugteren/CLBlast/releases/download/${env:CLBLAST_VERSION}/CLBlast-${env:CLBLAST_VERSION}-windows-x64.7z"
|
||||
curl.exe -o $env:RUNNER_TEMP/CLBlast.LICENSE.txt -L "https://github.com/CNugteren/CLBlast/raw/${env:CLBLAST_VERSION}/LICENSE"
|
||||
7z x "-o${env:RUNNER_TEMP}" $env:RUNNER_TEMP/clblast.7z
|
||||
rename-item $env:RUNNER_TEMP/CLBlast-${env:CLBLAST_VERSION}-windows-x64 clblast
|
||||
foreach ($f in (gci -Recurse -Path "$env:RUNNER_TEMP/clblast" -Filter '*.cmake')) {
|
||||
$txt = Get-Content -Path $f -Raw
|
||||
$txt.Replace('C:/vcpkg/packages/opencl_x64-windows/', "$($env:RUNNER_TEMP.Replace('\','/'))/opencl/") | Set-Content -Path $f -Encoding UTF8
|
||||
}
|
||||
|
||||
- name: Download OpenBLAS
|
||||
id: get_openblas
|
||||
if: ${{ matrix.build == 'openblas' }}
|
||||
run: |
|
||||
curl.exe -o $env:RUNNER_TEMP/openblas.zip -L "https://github.com/xianyi/OpenBLAS/releases/download/v${env:OPENBLAS_VERSION}/OpenBLAS-${env:OPENBLAS_VERSION}-x64.zip"
|
||||
curl.exe -o $env:RUNNER_TEMP/OpenBLAS.LICENSE.txt -L "https://github.com/xianyi/OpenBLAS/raw/v${env:OPENBLAS_VERSION}/LICENSE"
|
||||
mkdir $env:RUNNER_TEMP/openblas
|
||||
tar.exe -xvf $env:RUNNER_TEMP/openblas.zip -C $env:RUNNER_TEMP/openblas
|
||||
$vcdir = $(vswhere -latest -products * -requires Microsoft.VisualStudio.Component.VC.Tools.x86.x64 -property installationPath)
|
||||
$msvc = $(join-path $vcdir $('VC\Tools\MSVC\'+$(gc -raw $(join-path $vcdir 'VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt')).Trim()))
|
||||
$lib = $(join-path $msvc 'bin\Hostx64\x64\lib.exe')
|
||||
& $lib /machine:x64 "/def:${env:RUNNER_TEMP}/openblas/lib/libopenblas.def" "/out:${env:RUNNER_TEMP}/openblas/lib/openblas.lib" /name:openblas.dll
|
||||
|
||||
- name: Build
|
||||
id: cmake_build
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. ${{ matrix.defines }}
|
||||
cmake --build . --config Release
|
||||
|
||||
- name: Add clblast.dll
|
||||
id: add_clblast_dll
|
||||
if: ${{ matrix.build == 'clblast' }}
|
||||
run: |
|
||||
cp $env:RUNNER_TEMP/clblast/lib/clblast.dll ./build/bin/Release
|
||||
cp $env:RUNNER_TEMP/CLBlast.LICENSE.txt ./build/bin/Release/CLBlast-${env:CLBLAST_VERSION}.txt
|
||||
|
||||
- name: Add libopenblas.dll
|
||||
id: add_libopenblas_dll
|
||||
if: ${{ matrix.build == 'openblas' }}
|
||||
run: |
|
||||
cp $env:RUNNER_TEMP/openblas/bin/libopenblas.dll ./build/bin/Release/openblas.dll
|
||||
cp $env:RUNNER_TEMP/OpenBLAS.LICENSE.txt ./build/bin/Release/OpenBLAS-${env:OPENBLAS_VERSION}.txt
|
||||
|
||||
- name: Check AVX512F support
|
||||
id: check_avx512f
|
||||
if: ${{ matrix.build == 'avx512' }}
|
||||
continue-on-error: true
|
||||
run: |
|
||||
cd build
|
||||
$vcdir = $(vswhere -latest -products * -requires Microsoft.VisualStudio.Component.VC.Tools.x86.x64 -property installationPath)
|
||||
$msvc = $(join-path $vcdir $('VC\Tools\MSVC\'+$(gc -raw $(join-path $vcdir 'VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt')).Trim()))
|
||||
$cl = $(join-path $msvc 'bin\Hostx64\x64\cl.exe')
|
||||
echo 'int main(void){unsigned int a[4];__cpuid(a,7);return !(a[1]&65536);}' >> avx512f.c
|
||||
& $cl /O2 /GS- /kernel avx512f.c /link /nodefaultlib /entry:main
|
||||
.\avx512f.exe && echo "AVX512F: YES" && ( echo HAS_AVX512F=1 >> $env:GITHUB_ENV ) || echo "AVX512F: NO"
|
||||
|
||||
- name: Test
|
||||
id: cmake_test
|
||||
if: ${{ matrix.build != 'clblast' && (matrix.build != 'avx512' || env.HAS_AVX512F == '1') }} # Test AVX-512 only when possible
|
||||
run: |
|
||||
cd build
|
||||
ctest -C Release --verbose --timeout 900
|
||||
|
||||
- name: Get commit hash
|
||||
id: commit
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
uses: pr-mpt/actions-commit-hash@v2
|
||||
|
||||
- name: Pack artifacts
|
||||
id: pack_artifacts
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
run: |
|
||||
Copy-Item LICENSE .\build\bin\Release\llama.cpp.txt
|
||||
7z a llama-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-win-${{ matrix.build }}-x64.zip .\build\bin\Release\*
|
||||
|
||||
- name: Upload artifacts
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
path: |
|
||||
llama-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-win-${{ matrix.build }}-x64.zip
|
||||
|
||||
windows-latest-cmake-cublas:
|
||||
runs-on: windows-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
cuda: ['12.1.0', '11.7.1']
|
||||
build: ['cublas']
|
||||
|
||||
steps:
|
||||
- name: Clone
|
||||
id: checkout
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- uses: Jimver/cuda-toolkit@v0.2.10
|
||||
id: cuda-toolkit
|
||||
with:
|
||||
cuda: ${{ matrix.cuda }}
|
||||
# TODO(green-sky): _dev seems to fail, and non dev are not enought
|
||||
#sub-packages: '["nvcc", "cudart", "cublas", "cudart_dev", "cublas_dev"]'
|
||||
|
||||
- name: Build
|
||||
id: cmake_build
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DLLAMA_BUILD_SERVER=ON -DLLAMA_CUBLAS=ON
|
||||
cmake --build . --config Release
|
||||
|
||||
- name: Get commit hash
|
||||
id: commit
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
uses: pr-mpt/actions-commit-hash@v2
|
||||
|
||||
- name: Pack artifacts
|
||||
id: pack_artifacts
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
run: |
|
||||
7z a llama-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-win-${{ matrix.build }}-cu${{ matrix.cuda }}-x64.zip .\build\bin\Release\*
|
||||
|
||||
- name: Upload artifacts
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
path: |
|
||||
llama-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-win-${{ matrix.build }}-cu${{ matrix.cuda }}-x64.zip
|
||||
|
||||
- name: Copy and pack Cuda runtime
|
||||
if: ${{ matrix.cuda == '12.1.0' }}
|
||||
# TODO(green-sky): paths are cuda 12 specific
|
||||
run: |
|
||||
echo "Cuda install location: ${{steps.cuda-toolkit.outputs.CUDA_PATH}}"
|
||||
mkdir '.\build\bin\cudart\'
|
||||
cp "${{steps.cuda-toolkit.outputs.CUDA_PATH}}\bin\cudart64_12.dll" '.\build\bin\cudart\'
|
||||
cp "${{steps.cuda-toolkit.outputs.CUDA_PATH}}\bin\cublas64_12.dll" '.\build\bin\cudart\'
|
||||
cp "${{steps.cuda-toolkit.outputs.CUDA_PATH}}\bin\cublasLt64_12.dll" '.\build\bin\cudart\'
|
||||
7z a cudart-llama-bin-win-cu${{ matrix.cuda }}-x64.zip .\build\bin\cudart\*
|
||||
|
||||
- name: Copy and pack Cuda runtime
|
||||
if: ${{ matrix.cuda == '11.7.1' }}
|
||||
# TODO(green-sky): paths are cuda 11 specific
|
||||
run: |
|
||||
echo "Cuda install location: ${{steps.cuda-toolkit.outputs.CUDA_PATH}}"
|
||||
mkdir '.\build\bin\cudart\'
|
||||
ls "${{steps.cuda-toolkit.outputs.CUDA_PATH}}\bin"
|
||||
cp "${{steps.cuda-toolkit.outputs.CUDA_PATH}}\bin\cudart64_110.dll" '.\build\bin\cudart\'
|
||||
cp "${{steps.cuda-toolkit.outputs.CUDA_PATH}}\bin\cublas64_11.dll" '.\build\bin\cudart\'
|
||||
cp "${{steps.cuda-toolkit.outputs.CUDA_PATH}}\bin\cublasLt64_11.dll" '.\build\bin\cudart\'
|
||||
7z a cudart-llama-bin-win-cu${{ matrix.cuda }}-x64.zip .\build\bin\cudart\*
|
||||
|
||||
- name: Upload Cuda runtime
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
path: |
|
||||
cudart-llama-bin-win-cu${{ matrix.cuda }}-x64.zip
|
||||
|
||||
release:
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
needs:
|
||||
- ubuntu-focal-make
|
||||
- ubuntu-latest-cmake
|
||||
- macOS-latest-make
|
||||
- macOS-latest-cmake
|
||||
- windows-latest-cmake
|
||||
- windows-latest-cmake-cublas
|
||||
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
id: download-artifact
|
||||
uses: actions/download-artifact@v3
|
||||
|
||||
- name: Get commit hash
|
||||
id: commit
|
||||
uses: pr-mpt/actions-commit-hash@v2
|
||||
|
||||
- name: Create release
|
||||
id: create_release
|
||||
uses: anzz1/action-create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}
|
||||
|
||||
- name: Upload release
|
||||
id: upload_release
|
||||
uses: actions/github-script@v3
|
||||
with:
|
||||
github-token: ${{secrets.GITHUB_TOKEN}}
|
||||
script: |
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const release_id = '${{ steps.create_release.outputs.id }}';
|
||||
for (let file of await fs.readdirSync('./artifact')) {
|
||||
if (path.extname(file) === '.zip') {
|
||||
console.log('uploadReleaseAsset', file);
|
||||
await github.repos.uploadReleaseAsset({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
release_id: release_id,
|
||||
name: file,
|
||||
data: await fs.readFileSync(`./artifact/${file}`)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
# ubuntu-latest-gcc:
|
||||
# runs-on: ubuntu-latest
|
||||
#
|
||||
# strategy:
|
||||
# matrix:
|
||||
# build: [Debug, Release]
|
||||
#
|
||||
# steps:
|
||||
# - name: Clone
|
||||
# uses: actions/checkout@v1
|
||||
#
|
||||
# - name: Dependencies
|
||||
# run: |
|
||||
# sudo apt-get update
|
||||
# sudo apt-get install build-essential
|
||||
# sudo apt-get install cmake
|
||||
#
|
||||
# - name: Configure
|
||||
# run: cmake . -DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
||||
#
|
||||
# - name: Build
|
||||
# run: |
|
||||
# make
|
||||
#
|
||||
# ubuntu-latest-clang:
|
||||
# runs-on: ubuntu-latest
|
||||
#
|
||||
# strategy:
|
||||
# matrix:
|
||||
# build: [Debug, Release]
|
||||
#
|
||||
# steps:
|
||||
# - name: Clone
|
||||
# uses: actions/checkout@v1
|
||||
#
|
||||
# - name: Dependencies
|
||||
# run: |
|
||||
# sudo apt-get update
|
||||
# sudo apt-get install build-essential
|
||||
# sudo apt-get install cmake
|
||||
#
|
||||
# - name: Configure
|
||||
# run: cmake . -DCMAKE_BUILD_TYPE=${{ matrix.build }} -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang
|
||||
#
|
||||
# - name: Build
|
||||
# run: |
|
||||
# make
|
||||
#
|
||||
# ubuntu-latest-gcc-sanitized:
|
||||
# runs-on: ubuntu-latest
|
||||
#
|
||||
# strategy:
|
||||
# matrix:
|
||||
# sanitizer: [ADDRESS, THREAD, UNDEFINED]
|
||||
#
|
||||
# steps:
|
||||
# - name: Clone
|
||||
# uses: actions/checkout@v1
|
||||
#
|
||||
# - name: Dependencies
|
||||
# run: |
|
||||
# sudo apt-get update
|
||||
# sudo apt-get install build-essential
|
||||
# sudo apt-get install cmake
|
||||
#
|
||||
# - name: Configure
|
||||
# run: cmake . -DCMAKE_BUILD_TYPE=Debug -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON
|
||||
#
|
||||
# - name: Build
|
||||
# run: |
|
||||
# make
|
||||
#
|
||||
# windows:
|
||||
# runs-on: windows-latest
|
||||
#
|
||||
# strategy:
|
||||
# matrix:
|
||||
# build: [Release]
|
||||
# arch: [Win32, x64]
|
||||
# include:
|
||||
# - arch: Win32
|
||||
# s2arc: x86
|
||||
# - arch: x64
|
||||
# s2arc: x64
|
||||
#
|
||||
# steps:
|
||||
# - name: Clone
|
||||
# uses: actions/checkout@v1
|
||||
#
|
||||
# - name: Add msbuild to PATH
|
||||
# uses: microsoft/setup-msbuild@v1
|
||||
#
|
||||
# - name: Configure
|
||||
# run: >
|
||||
# cmake -S . -B ./build -A ${{ matrix.arch }}
|
||||
# -DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
||||
#
|
||||
# - name: Build
|
||||
# run: |
|
||||
# cd ./build
|
||||
# msbuild ALL_BUILD.vcxproj -t:build -p:configuration=${{ matrix.build }} -p:platform=${{ matrix.arch }}
|
||||
#
|
||||
# - name: Upload binaries
|
||||
# uses: actions/upload-artifact@v1
|
||||
# with:
|
||||
# name: llama-bin-${{ matrix.arch }}
|
||||
# path: build/bin/${{ matrix.build }}
|
||||
#
|
||||
# windows-blas:
|
||||
# runs-on: windows-latest
|
||||
#
|
||||
# strategy:
|
||||
# matrix:
|
||||
# build: [Release]
|
||||
# arch: [Win32, x64]
|
||||
# blas: [ON]
|
||||
# include:
|
||||
# - arch: Win32
|
||||
# obzip: https://github.com/xianyi/OpenBLAS/releases/download/v0.3.21/OpenBLAS-0.3.21-x86.zip
|
||||
# s2arc: x86
|
||||
# - arch: x64
|
||||
# obzip: https://github.com/xianyi/OpenBLAS/releases/download/v0.3.21/OpenBLAS-0.3.21-x64.zip
|
||||
# s2arc: x64
|
||||
#
|
||||
# steps:
|
||||
# - name: Clone
|
||||
# uses: actions/checkout@v1
|
||||
#
|
||||
# - name: Add msbuild to PATH
|
||||
# uses: microsoft/setup-msbuild@v1
|
||||
#
|
||||
# - name: Fetch OpenBLAS
|
||||
# if: matrix.blas == 'ON'
|
||||
# run: |
|
||||
# C:/msys64/usr/bin/wget.exe -qO blas.zip ${{ matrix.obzip }}
|
||||
# 7z x blas.zip -oblas -y
|
||||
# copy blas/include/cblas.h .
|
||||
# copy blas/include/openblas_config.h .
|
||||
# echo "blasdir=$env:GITHUB_WORKSPACE/blas" >> $env:GITHUB_ENV
|
||||
#
|
||||
# - name: Configure
|
||||
# run: >
|
||||
# cmake -S . -B ./build -A ${{ matrix.arch }}
|
||||
# -DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
||||
# -DLLAMA_SUPPORT_OPENBLAS=${{ matrix.blas }}
|
||||
# -DCMAKE_LIBRARY_PATH="$env:blasdir/lib"
|
||||
#
|
||||
# - name: Build
|
||||
# run: |
|
||||
# cd ./build
|
||||
# msbuild ALL_BUILD.vcxproj -t:build -p:configuration=${{ matrix.build }} -p:platform=${{ matrix.arch }}
|
||||
#
|
||||
# - name: Copy libopenblas.dll
|
||||
# if: matrix.blas == 'ON'
|
||||
# run: copy "$env:blasdir/bin/libopenblas.dll" build/bin/${{ matrix.build }}
|
||||
#
|
||||
# - name: Upload binaries
|
||||
# if: matrix.blas == 'ON'
|
||||
# uses: actions/upload-artifact@v1
|
||||
# with:
|
||||
# name: llama-blas-bin-${{ matrix.arch }}
|
||||
# path: build/bin/${{ matrix.build }}
|
||||
#
|
||||
# emscripten:
|
||||
# runs-on: ubuntu-latest
|
||||
#
|
||||
# strategy:
|
||||
# matrix:
|
||||
# build: [Release]
|
||||
#
|
||||
# steps:
|
||||
# - name: Clone
|
||||
# uses: actions/checkout@v1
|
||||
#
|
||||
# - name: Dependencies
|
||||
# run: |
|
||||
# wget -q https://github.com/emscripten-core/emsdk/archive/master.tar.gz
|
||||
# tar -xvf master.tar.gz
|
||||
# emsdk-master/emsdk update
|
||||
# emsdk-master/emsdk install latest
|
||||
# emsdk-master/emsdk activate latest
|
||||
#
|
||||
# - name: Configure
|
||||
# run: echo "tmp"
|
||||
#
|
||||
# - name: Build
|
||||
# run: |
|
||||
# pushd emsdk-master
|
||||
# source ./emsdk_env.sh
|
||||
# popd
|
||||
# emcmake cmake . -DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
||||
# make
|
||||
65
.github/workflows/docker.yml
vendored
Normal file
65
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
# GitHub recommends pinning actions to a commit SHA.
|
||||
# To get a newer version, you will need to update the SHA.
|
||||
# You can also reference a tag or branch, but the action may change without warning.
|
||||
|
||||
name: Publish Docker image
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
push_to_registry:
|
||||
name: Push Docker image to Docker Hub
|
||||
if: github.event.pull_request.draft == false
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
COMMIT_SHA: ${{ github.sha }}
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
- { tag: "light", dockerfile: ".devops/main.Dockerfile" }
|
||||
- { tag: "full", dockerfile: ".devops/full.Dockerfile" }
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image (versioned)
|
||||
if: github.event_name == 'push'
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: "ghcr.io/ggerganov/llama.cpp:${{ matrix.config.tag }}-${{ env.COMMIT_SHA }}"
|
||||
file: ${{ matrix.config.dockerfile }}
|
||||
|
||||
- name: Build and push Docker image (tagged)
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: ${{ github.event_name == 'push' }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: "ghcr.io/ggerganov/llama.cpp:${{ matrix.config.tag }}"
|
||||
file: ${{ matrix.config.dockerfile }}
|
||||
17
.github/workflows/editorconfig.yml
vendored
Normal file
17
.github/workflows/editorconfig.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: EditorConfig Checker
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
editorconfig:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: editorconfig-checker/action-editorconfig-checker@main
|
||||
- run: editorconfig-checker
|
||||
20
.github/workflows/tidy-post.yml
vendored
Normal file
20
.github/workflows/tidy-post.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
name: clang-tidy review post comments
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
workflows: ["clang-tidy-review"]
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: ZedThree/clang-tidy-review/post@v0.13.0
|
||||
# lgtm_comment_body, max_comments, and annotations need to be set on the posting workflow in a split setup
|
||||
with:
|
||||
# adjust options as necessary
|
||||
lgtm_comment_body: ''
|
||||
annotations: false
|
||||
max_comments: 25
|
||||
23
.github/workflows/tidy-review.yml
vendored
Normal file
23
.github/workflows/tidy-review.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: clang-tidy-review
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
clang-tidy-review:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: ZedThree/clang-tidy-review@v0.13.0
|
||||
id: review
|
||||
with:
|
||||
lgtm_comment_body: ''
|
||||
build_dir: build
|
||||
cmake_command: cmake . -B build -DCMAKE_EXPORT_COMPILE_COMMANDS=on
|
||||
split_workflow: true
|
||||
|
||||
- uses: ZedThree/clang-tidy-review/upload@v0.13.0
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,7 +1,6 @@
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
*.gguf
|
||||
.DS_Store
|
||||
.build/
|
||||
.cache/
|
||||
@@ -46,7 +45,6 @@ models-mnt
|
||||
/server
|
||||
/Pipfile
|
||||
/embd-input-test
|
||||
/gguf
|
||||
/libllama.so
|
||||
build-info.h
|
||||
arm_neon.h
|
||||
|
||||
@@ -67,11 +67,13 @@ endif()
|
||||
option(LLAMA_ACCELERATE "llama: enable Accelerate framework" ON)
|
||||
option(LLAMA_BLAS "llama: use BLAS" OFF)
|
||||
set(LLAMA_BLAS_VENDOR "Generic" CACHE STRING "llama: BLAS library vendor")
|
||||
option(LLAMA_CUBLAS "llama: use cuBLAS" OFF)
|
||||
option(LLAMA_CUBLAS "llama: use CUDA" OFF)
|
||||
#option(LLAMA_CUDA_CUBLAS "llama: use cuBLAS for prompt processing" OFF)
|
||||
set(LLAMA_CUDA_MMQ_Y "64" CACHE STRING "llama: y tile size for mmq CUDA kernels")
|
||||
option(LLAMA_CUDA_FORCE_DMMV "llama: use dmmv instead of mmvq CUDA kernels" OFF)
|
||||
set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels")
|
||||
set(LLAMA_CUDA_MMV_Y "1" CACHE STRING "llama: y block size for mmv CUDA kernels")
|
||||
option(LLAMA_CUDA_DMMV_F16 "llama: use 16 bit floats for dmmv CUDA kernels" OFF)
|
||||
option(LLAMA_CUDA_F16 "llama: use 16 bit floats for some calculations" OFF)
|
||||
set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for Q2_K/Q6_K")
|
||||
option(LLAMA_CLBLAST "llama: use CLBlast" OFF)
|
||||
option(LLAMA_METAL "llama: use Metal" OFF)
|
||||
@@ -251,6 +253,10 @@ if (LLAMA_CUBLAS)
|
||||
set(GGML_SOURCES_CUDA ggml-cuda.cu ggml-cuda.h)
|
||||
|
||||
add_compile_definitions(GGML_USE_CUBLAS)
|
||||
# if (LLAMA_CUDA_CUBLAS)
|
||||
# add_compile_definitions(GGML_CUDA_CUBLAS)
|
||||
# endif()
|
||||
add_compile_definitions(GGML_CUDA_MMQ_Y=${LLAMA_CUDA_MMQ_Y})
|
||||
if (LLAMA_CUDA_FORCE_DMMV)
|
||||
add_compile_definitions(GGML_CUDA_FORCE_DMMV)
|
||||
endif()
|
||||
@@ -259,8 +265,8 @@ if (LLAMA_CUBLAS)
|
||||
if (DEFINED LLAMA_CUDA_DMMV_Y)
|
||||
add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_DMMV_Y}) # for backwards compatibility
|
||||
endif()
|
||||
if (LLAMA_CUDA_DMMV_F16)
|
||||
add_compile_definitions(GGML_CUDA_DMMV_F16)
|
||||
if (LLAMA_CUDA_F16 OR LLAMA_CUDA_DMMV_F16)
|
||||
add_compile_definitions(GGML_CUDA_F16)
|
||||
endif()
|
||||
add_compile_definitions(K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER})
|
||||
|
||||
@@ -271,10 +277,14 @@ if (LLAMA_CUBLAS)
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
|
||||
# 52 == lowest CUDA 12 standard
|
||||
# 60 == f16 CUDA intrinsics
|
||||
# 61 == integer CUDA intrinsics
|
||||
# 70 == (assumed) compute capability at which unrolling a loop in mul_mat_q kernels is faster
|
||||
if (LLAMA_CUDA_DMMV_F16)
|
||||
set(CMAKE_CUDA_ARCHITECTURES "60;61") # needed for f16 CUDA intrinsics
|
||||
set(CMAKE_CUDA_ARCHITECTURES "60;61;70") # needed for f16 CUDA intrinsics
|
||||
else()
|
||||
set(CMAKE_CUDA_ARCHITECTURES "52;61") # lowest CUDA 12 standard + lowest for integer intrinsics
|
||||
set(CMAKE_CUDA_ARCHITECTURES "52;61;70") # lowest CUDA 12 standard + lowest for integer intrinsics
|
||||
endif()
|
||||
endif()
|
||||
message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}")
|
||||
@@ -357,6 +367,7 @@ if (LLAMA_ALL_WARNINGS)
|
||||
-Wshadow
|
||||
-Wstrict-prototypes
|
||||
-Wpointer-arith
|
||||
-Wmissing-prototypes
|
||||
)
|
||||
set(cxx_flags
|
||||
-Wall
|
||||
@@ -496,6 +507,8 @@ endif()
|
||||
add_library(ggml OBJECT
|
||||
ggml.c
|
||||
ggml.h
|
||||
ggml-alloc.c
|
||||
ggml-alloc.h
|
||||
${GGML_SOURCES_CUDA}
|
||||
${GGML_SOURCES_OPENCL}
|
||||
${GGML_SOURCES_METAL}
|
||||
|
||||
32
Makefile
32
Makefile
@@ -1,5 +1,5 @@
|
||||
# Define the default target now so that it is always the first target
|
||||
BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch simple server embd-input-test gguf
|
||||
BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch simple server embd-input-test
|
||||
|
||||
# Binaries only useful for tests
|
||||
TEST_TARGETS = tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0
|
||||
@@ -63,7 +63,8 @@ ifdef LLAMA_SERVER_VERBOSE
|
||||
endif
|
||||
|
||||
# warnings
|
||||
CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith
|
||||
CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith \
|
||||
-Wmissing-prototypes
|
||||
CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar
|
||||
|
||||
# OS specific
|
||||
@@ -193,7 +194,7 @@ ifdef LLAMA_CUBLAS
|
||||
CXXFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
|
||||
LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib
|
||||
OBJS += ggml-cuda.o
|
||||
NVCCFLAGS = --forward-unknown-to-host-compiler
|
||||
NVCCFLAGS = --forward-unknown-to-host-compiler -use_fast_math
|
||||
ifdef LLAMA_CUDA_NVCC
|
||||
NVCC = $(LLAMA_CUDA_NVCC)
|
||||
else
|
||||
@@ -219,14 +220,25 @@ else ifdef LLAMA_CUDA_DMMV_Y
|
||||
else
|
||||
NVCCFLAGS += -DGGML_CUDA_MMV_Y=1
|
||||
endif # LLAMA_CUDA_MMV_Y
|
||||
ifdef LLAMA_CUDA_F16
|
||||
NVCCFLAGS += -DGGML_CUDA_F16
|
||||
endif # LLAMA_CUDA_F16
|
||||
ifdef LLAMA_CUDA_DMMV_F16
|
||||
NVCCFLAGS += -DGGML_CUDA_DMMV_F16
|
||||
NVCCFLAGS += -DGGML_CUDA_F16
|
||||
endif # LLAMA_CUDA_DMMV_F16
|
||||
ifdef LLAMA_CUDA_KQUANTS_ITER
|
||||
NVCCFLAGS += -DK_QUANTS_PER_ITERATION=$(LLAMA_CUDA_KQUANTS_ITER)
|
||||
else
|
||||
NVCCFLAGS += -DK_QUANTS_PER_ITERATION=2
|
||||
endif
|
||||
ifdef LLAMA_CUDA_MMQ_Y
|
||||
NVCCFLAGS += -DGGML_CUDA_MMQ_Y=$(LLAMA_CUDA_MMQ_Y)
|
||||
else
|
||||
NVCCFLAGS += -DGGML_CUDA_MMQ_Y=64
|
||||
endif # LLAMA_CUDA_MMQ_Y
|
||||
#ifdef LLAMA_CUDA_CUBLAS
|
||||
# NVCCFLAGS += -DGGML_CUDA_CUBLAS
|
||||
#endif # LLAMA_CUDA_CUBLAS
|
||||
ifdef LLAMA_CUDA_CCBIN
|
||||
NVCCFLAGS += -ccbin $(LLAMA_CUDA_CCBIN)
|
||||
endif
|
||||
@@ -317,7 +329,12 @@ $(info )
|
||||
ggml.o: ggml.c ggml.h ggml-cuda.h
|
||||
$(CC) $(CFLAGS) -c $< -o $@
|
||||
|
||||
llama.o: llama.cpp ggml.h ggml-cuda.h ggml-metal.h llama.h llama-util.h
|
||||
ggml-alloc.o: ggml-alloc.c ggml.h ggml-alloc.h
|
||||
$(CC) $(CFLAGS) -c $< -o $@
|
||||
|
||||
OBJS += ggml-alloc.o
|
||||
|
||||
llama.o: llama.cpp ggml.h ggml-alloc.h ggml-cuda.h ggml-metal.h llama.h llama-util.h
|
||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||
|
||||
common.o: examples/common.cpp examples/common.h
|
||||
@@ -330,7 +347,7 @@ libllama.so: llama.o ggml.o $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS)
|
||||
|
||||
clean:
|
||||
rm -vf *.o *.so *.dll main quantize quantize-stats perplexity embedding benchmark-matmult save-load-state server simple vdot train-text-from-scratch embd-input-test gguf build-info.h $(TEST_TARGETS)
|
||||
rm -vf *.o *.so *.dll main quantize quantize-stats perplexity embedding benchmark-matmult save-load-state server simple vdot train-text-from-scratch embd-input-test build-info.h $(TEST_TARGETS)
|
||||
|
||||
#
|
||||
# Examples
|
||||
@@ -370,9 +387,6 @@ $(LIB_PRE)embdinput$(DSO_EXT): examples/embd-input/embd-input.h examples/embd-in
|
||||
embd-input-test: $(LIB_PRE)embdinput$(DSO_EXT) examples/embd-input/embd-input-test.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %$(DSO_EXT),$(filter-out %.h,$(filter-out %.hpp,$^))) -o $@ $(LDFLAGS) -L. -lembdinput
|
||||
|
||||
gguf: examples/gguf/gguf.cpp build-info.h ggml.o $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||
|
||||
train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp build-info.h ggml.o llama.o $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||
|
||||
|
||||
22
README.md
22
README.md
@@ -77,6 +77,7 @@ as the main playground for developing new features for the [ggml](https://github
|
||||
**Supported models:**
|
||||
|
||||
- [X] LLaMA 🦙
|
||||
- [x] LLaMA 2 🦙🦙
|
||||
- [X] [Alpaca](https://github.com/ggerganov/llama.cpp#instruction-mode-with-alpaca)
|
||||
- [X] [GPT4All](https://github.com/ggerganov/llama.cpp#using-gpt4all)
|
||||
- [X] [Chinese LLaMA / Alpaca](https://github.com/ymcui/Chinese-LLaMA-Alpaca)
|
||||
@@ -399,12 +400,16 @@ Building the program with BLAS support may lead to some performance improvements
|
||||
|
||||
The environment variable [`CUDA_VISIBLE_DEVICES`](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars) can be used to specify which GPU(s) will be used. The following compilation options are also available to tweak performance:
|
||||
|
||||
<!---
|
||||
| LLAMA_CUDA_CUBLAS | Boolean | false | Use cuBLAS instead of custom CUDA kernels for prompt processing. Faster for all quantization formats except for q4_0 and q8_0, especially for k-quants. Increases VRAM usage (700 MiB for 7b, 970 MiB for 13b, 1430 MiB for 33b). |
|
||||
--->
|
||||
| Option | Legal values | Default | Description |
|
||||
|-------------------------|------------------------|---------|-------------|
|
||||
| LLAMA_CUDA_MMQ_Y | Positive integer >= 32 | 64 | Tile size in y direction when using the custom CUDA kernels for prompt processing. Higher values can be faster depending on the amount of shared memory available. Power of 2 heavily recommended. |
|
||||
| LLAMA_CUDA_FORCE_DMMV | Boolean | false | Force the use of dequantization + matrix vector multiplication kernels instead of using kernels that do matrix vector multiplication on quantized data. By default the decision is made based on compute capability (MMVQ for 6.1/Pascal/GTX 1000 or higher). Does not affect k-quants. |
|
||||
| LLAMA_CUDA_DMMV_X | Positive integer >= 32 | 32 | Number of values in x direction processed by the CUDA dequantization + matrix vector multiplication kernel per iteration. Increasing this value can improve performance on fast GPUs. Power of 2 heavily recommended. Does not affect k-quants. |
|
||||
| LLAMA_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the CUDA mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. Does not affect k-quants. |
|
||||
| LLAMA_CUDA_DMMV_F16 | Boolean | false | If enabled, use half-precision floating point arithmetic for the CUDA dequantization + mul mat vec kernels. Can improve performance on relatively recent GPUs. |
|
||||
| LLAMA_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the CUDA mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. Does not affect k-quants. |
|
||||
| LLAMA_CUDA_F16 | Boolean | false | If enabled, use half-precision floating point arithmetic for the CUDA dequantization + mul mat vec kernels and for the q4_1 and q5_1 matrix matrix multiplication kernels. Can improve performance on relatively recent GPUs. |
|
||||
| LLAMA_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per CUDA thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. |
|
||||
|
||||
- #### CLBlast
|
||||
@@ -650,6 +655,19 @@ python3 convert.py pygmalion-7b/ --outtype q4_1
|
||||
- The LLaMA models are officially distributed by Facebook and will **never** be provided through this repository.
|
||||
- Refer to [Facebook's LLaMA repository](https://github.com/facebookresearch/llama/pull/73/files) if you need to request access to the model data.
|
||||
|
||||
### Obtaining and using the Facebook LLaMA 2 model
|
||||
|
||||
- Refer to [Facebook's LLaMA download page](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) if you want to access the model data.
|
||||
- Alternatively, if you want to save time and space, you can download already converted and quantized models from [TheBloke](https://huggingface.co/TheBloke), including:
|
||||
- [LLaMA 2 7B base](https://huggingface.co/TheBloke/Llama-2-7B-GGML)
|
||||
- [LLaMA 2 13B base](https://huggingface.co/TheBloke/Llama-2-13B-GGML)
|
||||
- [LLaMA 2 70B base](https://huggingface.co/TheBloke/Llama-2-70B-GGML)
|
||||
- [LLaMA 2 7B chat](https://huggingface.co/TheBloke/Llama-2-7B-chat-GGML)
|
||||
- [LLaMA 2 13B chat](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML)
|
||||
- [LLaMA 2 70B chat](https://huggingface.co/TheBloke/Llama-2-70B-chat-GGML)
|
||||
- Specify `-eps 1e-5` for best generation quality
|
||||
- Specify `-gqa 8` for 70B models to work
|
||||
|
||||
### Verifying the model files
|
||||
|
||||
Please verify the [sha256 checksums](SHA256SUMS) of all downloaded model files to confirm that you have the correct model data files before creating an issue relating to your model files.
|
||||
|
||||
33
constants.py
33
constants.py
@@ -1,33 +0,0 @@
|
||||
GGUF_MAGIC = 0x47475546
|
||||
GGUF_VERSION = 1
|
||||
GGUF_DEFAULT_ALIGNMENT = 32
|
||||
|
||||
# general
|
||||
KEY_GENERAL_ARCHITECTURE = "general.architecture"
|
||||
KEY_GENERAL_QUANTIZATION_VERSION = "general.quantization_version"
|
||||
KEY_GENERAL_NAME = "general.name"
|
||||
KEY_GENERAL_AUTHOR = "general.author"
|
||||
KEY_GENERAL_URL = "general.url"
|
||||
KEY_GENERAL_DESCRIPTION = "general.description"
|
||||
KEY_GENERAL_FILE_TYPE = "general.file_type"
|
||||
KEY_GENERAL_LICENSE = "general.license"
|
||||
KEY_GENERAL_SOURCE_URL = "general.source.url"
|
||||
KEY_GENERAL_SOURCE_HF_REPO = "general.source.hugginface.repository"
|
||||
|
||||
# LLM
|
||||
KEY_LLM_CONTEXT_LENGTH = "{llm}.context_length"
|
||||
KEY_LLM_EMBEDDING_LENGTH = "{llm}.embedding_length"
|
||||
KEY_LLM_LAYER_COUNT = "{llm}.layer_count"
|
||||
KEY_LLM_FEED_FORWARD_LENGTH = "{llm}.feed_forward_length"
|
||||
KEY_LLM_USE_PARALLEL_RESIDUAL = "{llm}.use_parallel_residual"
|
||||
KEY_LLM_TENSOR_DATA_LAYOUT = "{llm}.tensor_data_layout"
|
||||
|
||||
# attention
|
||||
KEY_ATTENTION_HEAD_COUNT = "{llm}.attention.head_count"
|
||||
KEY_ATTENTION_HEAD_COUNT_KV = "{llm}.attention.head_count_kv"
|
||||
KEY_ATTENTION_MAX_ALIBI_BIAS = "{llm}.attention.max_alibi_bias"
|
||||
KEY_ATTENTION_CLAMP_KQV = "{llm}.attention.clamp_kqv"
|
||||
|
||||
# RoPE
|
||||
KEY_ROPE_DIMENSION_COUNT = "{llm}.rope.dimension_count"
|
||||
KEY_ROPE_SCALE = "{llm}.rope.scale"
|
||||
949
convert-new.py
949
convert-new.py
@@ -1,949 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import argparse
|
||||
import concurrent.futures
|
||||
import copy
|
||||
import enum
|
||||
import faulthandler
|
||||
import functools
|
||||
import io
|
||||
import itertools
|
||||
import json
|
||||
import math
|
||||
import mmap
|
||||
import pickle
|
||||
import re
|
||||
import signal
|
||||
import struct
|
||||
import sys
|
||||
import zipfile
|
||||
import numpy as np
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import (IO, TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Literal, Optional, Sequence, Tuple, TypeVar, Union)
|
||||
from sentencepiece import SentencePieceProcessor # type: ignore
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
if hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1'):
|
||||
faulthandler.register(signal.SIGUSR1)
|
||||
|
||||
NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]'
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class UnquantizedDataType:
|
||||
name: str
|
||||
|
||||
DT_F16 = UnquantizedDataType('F16')
|
||||
DT_F32 = UnquantizedDataType('F32')
|
||||
DT_I32 = UnquantizedDataType('I32')
|
||||
DT_BF16 = UnquantizedDataType('BF16')
|
||||
|
||||
DataType = Union[UnquantizedDataType]
|
||||
|
||||
DATA_TYPE_TO_FTYPE: Dict[DataType, int] = {
|
||||
DT_F32: 0,
|
||||
DT_F16: 1,
|
||||
}
|
||||
|
||||
FTYPE_TO_DATA_TYPE: Dict[int, DataType] = \
|
||||
{ftype: dtype for (dtype, ftype) in DATA_TYPE_TO_FTYPE.items()}
|
||||
|
||||
DATA_TYPE_TO_NUMPY: Dict[DataType, 'np.dtype[Any]'] = {
|
||||
DT_BF16: np.dtype(np.uint16),
|
||||
DT_F16: np.dtype(np.float16),
|
||||
DT_F32: np.dtype(np.float32),
|
||||
DT_I32: np.dtype(np.int32),
|
||||
}
|
||||
|
||||
NUMPY_TYPE_TO_DATA_TYPE: Dict['np.dtype[Any]', DataType] = \
|
||||
{dtype: data_type for (data_type, dtype) in DATA_TYPE_TO_NUMPY.items()}
|
||||
|
||||
class GGMLFileType(enum.Enum):
|
||||
AllF32 = 0
|
||||
MostlyF16 = 1 # except 1d tensors
|
||||
|
||||
def type_for_tensor(self, name: str, tensor: 'LazyTensor') -> DataType:
|
||||
if len(tensor.shape) == 1:
|
||||
# 1D tensors are always F32.
|
||||
return DT_F32
|
||||
elif self == GGMLFileType.AllF32:
|
||||
return DT_F32
|
||||
elif self == GGMLFileType.MostlyF16:
|
||||
return DT_F16
|
||||
else:
|
||||
raise ValueError(self)
|
||||
|
||||
# TODO: this is LLaMA specific
|
||||
def make_tensors_list() -> List[str]:
|
||||
ret = [
|
||||
'tok_embeddings.weight',
|
||||
'norm.weight',
|
||||
'output.weight',
|
||||
]
|
||||
for i in range(80): # maximum number of layer
|
||||
ret += [
|
||||
f'layers.{i}.attention.wq.weight',
|
||||
f'layers.{i}.attention.wk.weight',
|
||||
f'layers.{i}.attention.wv.weight',
|
||||
f'layers.{i}.attention.wo.weight',
|
||||
f'layers.{i}.attention_norm.weight',
|
||||
f'layers.{i}.feed_forward.w1.weight',
|
||||
f'layers.{i}.feed_forward.w2.weight',
|
||||
f'layers.{i}.feed_forward.w3.weight',
|
||||
f'layers.{i}.ffn_norm.weight',
|
||||
]
|
||||
return ret
|
||||
|
||||
# TODO: this should be generalized for non-LLaMA models
|
||||
TENSORS_LIST = make_tensors_list()
|
||||
TENSORS_SET = set(TENSORS_LIST)
|
||||
|
||||
def find_n_mult(n_ff: int, n_embd: int) -> int:
|
||||
# hardcoded magic range
|
||||
for n_mult in range(256, 1, -1):
|
||||
calc_ff = (((8*n_embd) // 3 + n_mult - 1) // n_mult)*n_mult
|
||||
if calc_ff == n_ff:
|
||||
return n_mult
|
||||
raise Exception(f"failed to find n_mult for (n_ff={n_ff}, n_embd={n_embd}).")
|
||||
|
||||
|
||||
@dataclass
|
||||
class Params:
|
||||
n_vocab: int
|
||||
n_embd: int
|
||||
n_mult: int
|
||||
n_head: int
|
||||
n_layer: int
|
||||
|
||||
@staticmethod
|
||||
def guessed(model: 'LazyModel') -> 'Params':
|
||||
# try transformer naming first
|
||||
n_vocab, n_embd = model["model.embed_tokens.weight"].shape if "model.embed_tokens.weight" in model else model["tok_embeddings.weight"].shape
|
||||
|
||||
# try transformer naming first
|
||||
if "model.layers.0.self_attn.q_proj.weight" in model:
|
||||
n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.q_proj.weight" not in model)
|
||||
elif "model.layers.0.self_attn.W_pack.weight" in model: # next: try baichuan naming
|
||||
n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.W_pack.weight" not in model)
|
||||
else:
|
||||
n_layer=next(i for i in itertools.count() if f"layers.{i}.attention.wq.weight" not in model)
|
||||
|
||||
if n_layer < 1:
|
||||
raise Exception("failed to guess 'n_layer'. This model is unknown or unsupported.\n"
|
||||
"Suggestion: provide 'config.json' of the model in the same directory containing model files.")
|
||||
|
||||
n_head=n_embd // 128 # guessed
|
||||
|
||||
return Params(
|
||||
n_vocab = n_vocab,
|
||||
n_embd = n_embd,
|
||||
n_mult = 256,
|
||||
n_head = n_head,
|
||||
n_layer = n_layer,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def loadHFTransformerJson(model: 'LazyModel', config_path: 'Path') -> 'Params':
|
||||
config = json.load(open(config_path))
|
||||
|
||||
n_vocab = config["vocab_size"];
|
||||
n_embd = config["hidden_size"];
|
||||
n_head = config["num_attention_heads"];
|
||||
n_layer = config["num_hidden_layers"];
|
||||
n_ff = config["intermediate_size"];
|
||||
|
||||
n_mult = find_n_mult(n_ff, n_embd);
|
||||
|
||||
return Params(
|
||||
n_vocab = n_vocab,
|
||||
n_embd = n_embd,
|
||||
n_mult = n_mult,
|
||||
n_head = n_head,
|
||||
n_layer = n_layer,
|
||||
)
|
||||
|
||||
# LLaMA v2 70B params.json
|
||||
# {"dim": 8192, "multiple_of": 4096, "ffn_dim_multiplier": 1.3, "n_heads": 64, "n_kv_heads": 8, "n_layers": 80, "norm_eps": 1e-05, "vocab_size": -1
|
||||
@staticmethod
|
||||
def loadOriginalParamsJson(model: 'LazyModel', config_path: 'Path') -> 'Params':
|
||||
config = json.load(open(config_path))
|
||||
|
||||
n_vocab = config["vocab_size"];
|
||||
n_embd = config["dim"];
|
||||
n_head = config["n_heads"];
|
||||
n_layer = config["n_layers"];
|
||||
n_mult = config["multiple_of"];
|
||||
|
||||
if n_vocab == -1:
|
||||
n_vocab = model["tok_embeddings.weight"].shape[0]
|
||||
|
||||
return Params(
|
||||
n_vocab = n_vocab,
|
||||
n_embd = n_embd,
|
||||
n_mult = n_mult,
|
||||
n_head = n_head,
|
||||
n_layer = n_layer,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def load(model_plus: 'ModelPlus') -> 'Params':
|
||||
hf_config_path = model_plus.paths[0].parent / "config.json"
|
||||
orig_config_path = model_plus.paths[0].parent / "params.json"
|
||||
|
||||
if hf_config_path.exists():
|
||||
params = Params.loadHFTransformerJson(model_plus.model, hf_config_path)
|
||||
elif orig_config_path.exists():
|
||||
params = Params.loadOriginalParamsJson(model_plus.model, orig_config_path)
|
||||
else:
|
||||
params = Params.guessed(model_plus.model)
|
||||
|
||||
print(f'params: n_vocab:{params.n_vocab} n_embd:{params.n_embd} n_mult:{params.n_mult} n_head:{params.n_head} n_layer:{params.n_layer}')
|
||||
return params
|
||||
|
||||
|
||||
class SentencePieceVocab:
|
||||
def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path], vocabtype: Optional[str]) -> None:
|
||||
self.vocabtype = vocabtype
|
||||
if self.vocabtype == "bpe":
|
||||
self.sentencepiece_tokenizer = json.loads(open(str(fname_tokenizer)).read())
|
||||
else:
|
||||
self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer))
|
||||
|
||||
added_tokens: Dict[str, int]
|
||||
if fname_added_tokens is not None:
|
||||
added_tokens = json.load(open(fname_added_tokens))
|
||||
else:
|
||||
added_tokens = {}
|
||||
|
||||
if self.vocabtype == "bpe":
|
||||
vocab_size: int = len(self.sentencepiece_tokenizer)
|
||||
else:
|
||||
vocab_size: int = self.sentencepiece_tokenizer.vocab_size()
|
||||
|
||||
expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
|
||||
actual_ids = sorted(added_tokens.values())
|
||||
if expected_ids != actual_ids:
|
||||
raise Exception(f"Expected added token IDs to be sequential and start at {len(added_tokens)}; got {actual_ids}")
|
||||
|
||||
items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
|
||||
self.added_tokens_list = [text for (text, idx) in items]
|
||||
self.vocab_size_base: int = vocab_size
|
||||
self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_list)
|
||||
self.fname_tokenizer = fname_tokenizer
|
||||
self.fname_added_tokens = fname_added_tokens
|
||||
|
||||
def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float]]:
|
||||
tokenizer = self.sentencepiece_tokenizer
|
||||
if self.vocabtype == "bpe":
|
||||
from transformers.models.gpt2 import tokenization_gpt2
|
||||
byte_encoder = tokenization_gpt2.bytes_to_unicode()
|
||||
byte_decoder = {v: k for k, v in byte_encoder.items()}
|
||||
for i, item in enumerate(tokenizer):
|
||||
text: bytes
|
||||
text = b''.join([x.to_bytes(1, byteorder='big') for x in [byte_decoder[y] for y in item]])
|
||||
score: float = -i
|
||||
yield text, score
|
||||
else:
|
||||
for i in range(tokenizer.vocab_size()):
|
||||
text: bytes
|
||||
if tokenizer.is_unknown(i):
|
||||
text = " \u2047 ".encode("utf-8")
|
||||
elif tokenizer.is_control(i):
|
||||
text = b""
|
||||
elif tokenizer.is_byte(i):
|
||||
piece = tokenizer.id_to_piece(i)
|
||||
if len(piece) != 6:
|
||||
raise Exception(f"Invalid token: {piece}")
|
||||
byte_value = int(piece[3:-1], 16)
|
||||
text = struct.pack("B", byte_value)
|
||||
else:
|
||||
text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8")
|
||||
score: float = tokenizer.get_score(i)
|
||||
yield text, score
|
||||
|
||||
def added_tokens(self) -> Iterable[Tuple[bytes, float]]:
|
||||
for text in self.added_tokens_list:
|
||||
score = -1000.0
|
||||
yield text.encode("utf-8"), score
|
||||
|
||||
def all_tokens(self) -> Iterable[Tuple[bytes, float]]:
|
||||
yield from self.sentencepiece_tokens()
|
||||
yield from self.added_tokens()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<SentencePieceVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
|
||||
|
||||
|
||||
Vocab = Union[SentencePieceVocab]
|
||||
|
||||
|
||||
def permute(weights: NDArray, n_head: int) -> NDArray:
|
||||
return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
|
||||
.swapaxes(1, 2)
|
||||
.reshape(weights.shape))
|
||||
|
||||
|
||||
class Tensor(metaclass=ABCMeta):
|
||||
data_type: DataType
|
||||
|
||||
@abstractmethod
|
||||
def astype(self, data_type: DataType) -> 'Tensor': ...
|
||||
@abstractmethod
|
||||
def permute(self, n_head: int) -> 'Tensor': ...
|
||||
@abstractmethod
|
||||
def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': ...
|
||||
@abstractmethod
|
||||
def part(self, n_part: int) -> 'UnquantizedTensor': ...
|
||||
@abstractmethod
|
||||
def to_ggml(self) -> 'GGMLCompatibleTensor': ...
|
||||
|
||||
|
||||
def bf16_to_fp32(bf16_arr: np.ndarray) -> np.ndarray:
|
||||
assert bf16_arr.dtype == np.uint16, f"Input array should be of dtype uint16, but got {bf16_arr.dtype}"
|
||||
fp32_arr = bf16_arr.astype(np.uint32) << 16
|
||||
return fp32_arr.view(np.float32)
|
||||
|
||||
|
||||
class UnquantizedTensor(Tensor):
|
||||
def __init__(self, ndarray: NDArray) -> None:
|
||||
assert isinstance(ndarray, np.ndarray)
|
||||
self.ndarray = ndarray
|
||||
self.data_type = NUMPY_TYPE_TO_DATA_TYPE[ndarray.dtype]
|
||||
|
||||
def astype(self, data_type: DataType) -> Tensor:
|
||||
dtype = DATA_TYPE_TO_NUMPY[data_type]
|
||||
if self.data_type == DT_BF16:
|
||||
self.ndarray = bf16_to_fp32(self.ndarray)
|
||||
return UnquantizedTensor(self.ndarray.astype(dtype))
|
||||
|
||||
def to_ggml(self) -> 'UnquantizedTensor':
|
||||
return self
|
||||
|
||||
def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor':
|
||||
r = self.ndarray.shape[0] // 3
|
||||
return UnquantizedTensor(permute(self.ndarray[r * n_part : r * n_part + r, ...], n_head))
|
||||
|
||||
def part(self, n_part: int) -> 'UnquantizedTensor':
|
||||
r = self.ndarray.shape[0] // 3
|
||||
return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...])
|
||||
|
||||
def permute(self, n_head: int) -> 'UnquantizedTensor':
|
||||
return UnquantizedTensor(permute(self.ndarray, n_head))
|
||||
|
||||
|
||||
def load_unquantized(lazy_tensor: 'LazyTensor', expected_dtype: Any = None, convert: bool = False) -> NDArray:
|
||||
tensor = lazy_tensor.load()
|
||||
assert isinstance(tensor, UnquantizedTensor)
|
||||
|
||||
# double-check:
|
||||
actual_shape = list(tensor.ndarray.shape)
|
||||
assert actual_shape == lazy_tensor.shape, (actual_shape, lazy_tensor.shape)
|
||||
if expected_dtype is not None and expected_dtype != tensor.ndarray.dtype:
|
||||
if convert:
|
||||
tensor.ndarray = tensor.ndarray.astype(expected_dtype)
|
||||
else:
|
||||
raise ValueError(f'expected this tensor to have dtype {expected_dtype}, got {tensor.ndarray.dtype}')
|
||||
|
||||
return tensor.ndarray
|
||||
|
||||
|
||||
GGMLCompatibleTensor = Union[UnquantizedTensor]
|
||||
|
||||
|
||||
class DeferredPermutedTensor(Tensor):
|
||||
def __init__(self, base: Tensor, n_head: int) -> None:
|
||||
self.base = base
|
||||
self.n_head = n_head
|
||||
self.data_type = self.base.data_type
|
||||
|
||||
def astype(self, data_type: DataType) -> Tensor:
|
||||
return self.base.astype(data_type).permute(self.n_head)
|
||||
|
||||
def to_ggml(self) -> GGMLCompatibleTensor:
|
||||
return self.base.to_ggml().permute(self.n_head)
|
||||
|
||||
def permute(self, n_head: int) -> Tensor:
|
||||
raise Exception("shouldn't permute twice")
|
||||
|
||||
|
||||
@dataclass
|
||||
class LazyTensor:
|
||||
_load: Callable[[], Tensor]
|
||||
shape: List[int]
|
||||
data_type: DataType
|
||||
description: str
|
||||
|
||||
def load(self) -> Tensor:
|
||||
ret = self._load()
|
||||
assert ret.data_type == self.data_type, (self.data_type, ret.data_type, self.description)
|
||||
return ret
|
||||
|
||||
def astype(self, data_type: DataType) -> 'LazyTensor':
|
||||
self.validate_conversion_to(data_type)
|
||||
|
||||
def load() -> Tensor:
|
||||
return self.load().astype(data_type)
|
||||
return LazyTensor(load, self.shape, data_type, f'convert({data_type}) {self.description}')
|
||||
|
||||
def validate_conversion_to(self, data_type: DataType) -> None:
|
||||
if data_type == self.data_type:
|
||||
return
|
||||
|
||||
|
||||
LazyModel = Dict[str, LazyTensor]
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelPlus:
|
||||
model: LazyModel
|
||||
paths: List[Path] # Where this was read from.
|
||||
format: Literal['ggml', 'torch', 'safetensors']
|
||||
vocab: Optional[Vocab] # For GGML models (which have vocab built in), the vocab.
|
||||
|
||||
|
||||
def merge_sharded(models: List[LazyModel]) -> LazyModel:
|
||||
# Original LLaMA models have each file contain one part of each tensor.
|
||||
# Use a dict instead of a set to preserve order.
|
||||
names = {name: None for model in models for name in model}
|
||||
|
||||
def convert(name: str) -> LazyTensor:
|
||||
lazy_tensors: List[LazyTensor] = [model[name] for model in models]
|
||||
if len(lazy_tensors) == 1:
|
||||
# only one file; don't go through this procedure since there might
|
||||
# be quantized tensors
|
||||
return lazy_tensors[0]
|
||||
if len(lazy_tensors[0].shape) == 1:
|
||||
# the tensor is just duplicated in every file
|
||||
return lazy_tensors[0]
|
||||
if name.startswith('tok_embeddings.') or \
|
||||
name.endswith('.attention.wo.weight') or \
|
||||
name.endswith('.feed_forward.w2.weight'):
|
||||
# split by columns
|
||||
axis = 1
|
||||
else:
|
||||
# split by rows
|
||||
axis = 0
|
||||
concatenated_shape = list(lazy_tensors[0].shape)
|
||||
concatenated_shape[axis] = sum(tensor.shape[axis] for tensor in lazy_tensors)
|
||||
|
||||
def load() -> UnquantizedTensor:
|
||||
ndarrays = [load_unquantized(tensor) for tensor in lazy_tensors]
|
||||
concatenated: NDArray = np.concatenate(ndarrays, axis=axis)
|
||||
return UnquantizedTensor(concatenated)
|
||||
description = 'concatenated[[' + '] | ['.join(lt.description for lt in lazy_tensors) + ']]'
|
||||
return LazyTensor(load, concatenated_shape, lazy_tensors[0].data_type, description)
|
||||
return {name: convert(name) for name in names}
|
||||
|
||||
|
||||
def merge_multifile_models(models_plus: List[ModelPlus]) -> ModelPlus:
|
||||
formats = set(mp.format for mp in models_plus)
|
||||
assert len(formats) == 1, "different formats?"
|
||||
format = formats.pop()
|
||||
paths = [path for mp in models_plus for path in mp.paths]
|
||||
# Use the first non-None vocab, if any.
|
||||
try:
|
||||
vocab = next(mp.vocab for mp in models_plus if mp.vocab is not None)
|
||||
except StopIteration:
|
||||
vocab = None
|
||||
|
||||
if any("model.embed_tokens.weight" in mp.model for mp in models_plus):
|
||||
# Transformers models put different tensors in different files, but
|
||||
# don't split indivdual tensors between files.
|
||||
model: LazyModel = {}
|
||||
for mp in models_plus:
|
||||
model.update(mp.model)
|
||||
else:
|
||||
model = merge_sharded([mp.model for mp in models_plus])
|
||||
|
||||
return ModelPlus(model, paths, format, vocab)
|
||||
|
||||
|
||||
def permute_lazy(lazy_tensor: LazyTensor, n_head: int) -> LazyTensor:
|
||||
def load() -> Tensor:
|
||||
return lazy_tensor.load().permute(n_head)
|
||||
return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}) ' + lazy_tensor.description)
|
||||
|
||||
def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int) -> LazyTensor:
|
||||
def load() -> Tensor:
|
||||
return lazy_tensor.load().permute_part(n_part, n_head)
|
||||
s = lazy_tensor.shape.copy()
|
||||
s[0] = s[0] // 3
|
||||
return LazyTensor(load, s, lazy_tensor.data_type, f'permute({n_head}) ' + lazy_tensor.description)
|
||||
|
||||
def part_lazy(lazy_tensor: LazyTensor, n_part: int) -> LazyTensor:
|
||||
def load() -> Tensor:
|
||||
return lazy_tensor.load().part(n_part)
|
||||
s = lazy_tensor.shape.copy()
|
||||
s[0] = s[0] // 3
|
||||
return LazyTensor(load, s, lazy_tensor.data_type, 'part ' + lazy_tensor.description)
|
||||
|
||||
def convert_transformers_to_orig(model: LazyModel, params: Params) -> LazyModel:
|
||||
out: LazyModel = {}
|
||||
out["tok_embeddings.weight"] = model["model.embed_tokens.weight"]
|
||||
out["norm.weight"] = model["model.norm.weight"]
|
||||
out["output.weight"] = model["lm_head.weight"]
|
||||
|
||||
for i in itertools.count():
|
||||
if f"model.layers.{i}.self_attn.q_proj.weight" in model:
|
||||
out[f"layers.{i}.attention.wq.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head)
|
||||
out[f"layers.{i}.attention.wk.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head)
|
||||
out[f"layers.{i}.attention.wv.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
|
||||
elif f"model.layers.{i}.self_attn.W_pack.weight" in model:
|
||||
out[f"layers.{i}.attention.wq.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head)
|
||||
out[f"layers.{i}.attention.wk.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head)
|
||||
out[f"layers.{i}.attention.wv.weight"] = part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 2)
|
||||
else:
|
||||
break
|
||||
|
||||
out[f"layers.{i}.attention.wo.weight"] = model[f"model.layers.{i}.self_attn.o_proj.weight"]
|
||||
|
||||
out[f"layers.{i}.feed_forward.w1.weight"] = model[f"model.layers.{i}.mlp.gate_proj.weight"]
|
||||
out[f"layers.{i}.feed_forward.w2.weight"] = model[f"model.layers.{i}.mlp.down_proj.weight"]
|
||||
out[f"layers.{i}.feed_forward.w3.weight"] = model[f"model.layers.{i}.mlp.up_proj.weight"]
|
||||
|
||||
out[f"layers.{i}.attention_norm.weight"] = model[f"model.layers.{i}.input_layernorm.weight"]
|
||||
out[f"layers.{i}.ffn_norm.weight"] = model[f"model.layers.{i}.post_attention_layernorm.weight"]
|
||||
return out
|
||||
|
||||
|
||||
# Functionality that simulates `torch.load` but where individual tensors are
|
||||
# only loaded into memory on demand, not all at once.
|
||||
# PyTorch can't do this natively as of time of writing:
|
||||
# - https://github.com/pytorch/pytorch/issues/64327
|
||||
# This allows us to de-shard without multiplying RAM usage, and also
|
||||
# conveniently drops the PyTorch dependency (though we still need numpy).
|
||||
|
||||
|
||||
@dataclass
|
||||
class LazyStorageKind:
|
||||
data_type: DataType
|
||||
|
||||
|
||||
@dataclass
|
||||
class LazyStorage:
|
||||
load: Callable[[int, int], NDArray]
|
||||
kind: LazyStorageKind
|
||||
description: str
|
||||
|
||||
|
||||
class LazyUnpickler(pickle.Unpickler):
|
||||
def __init__(self, fp: IO[bytes], data_base_path: str, zip_file: zipfile.ZipFile):
|
||||
super().__init__(fp)
|
||||
self.data_base_path = data_base_path
|
||||
self.zip_file = zip_file
|
||||
|
||||
def persistent_load(self, pid: Any) -> Any:
|
||||
assert pid[0] == 'storage'
|
||||
assert isinstance(pid[1], LazyStorageKind)
|
||||
data_type = pid[1].data_type
|
||||
filename_stem = pid[2]
|
||||
filename = self.data_base_path + '/' + filename_stem
|
||||
info = self.zip_file.getinfo(filename)
|
||||
|
||||
def load(offset: int, elm_count: int) -> NDArray:
|
||||
dtype = DATA_TYPE_TO_NUMPY.get(data_type)
|
||||
if dtype is None:
|
||||
raise Exception("tensor stored in unsupported format")
|
||||
fp = self.zip_file.open(info)
|
||||
fp.seek(offset * dtype.itemsize)
|
||||
size = elm_count * dtype.itemsize
|
||||
data = fp.read(size)
|
||||
assert len(data) == size
|
||||
return np.frombuffer(data, dtype)
|
||||
description = f'storage data_type={data_type} path-in-zip={filename} path={self.zip_file.filename}'
|
||||
return LazyStorage(load=load, kind=pid[1], description=description)
|
||||
|
||||
# @staticmethod
|
||||
def lazy_rebuild_tensor_v2(storage: Any, storage_offset: Any, size: Any, stride: Any,
|
||||
# pyright: ignore[reportSelfClsParameterName]
|
||||
requires_grad: Any, backward_hooks: Any, metadata: Any = None) -> LazyTensor:
|
||||
assert isinstance(storage, LazyStorage)
|
||||
|
||||
def load() -> UnquantizedTensor:
|
||||
elm_count = stride[0] * size[0]
|
||||
return UnquantizedTensor(storage.load(storage_offset, elm_count).reshape(size))
|
||||
description = f'pickled storage_offset={storage_offset} in {storage.description}'
|
||||
return LazyTensor(load, list(size), storage.kind.data_type, description)
|
||||
|
||||
# @staticmethod
|
||||
def rebuild_from_type_v2(func, new_type, args, state):
|
||||
return func(*args)
|
||||
|
||||
CLASSES: Dict[Any, Any] = {
|
||||
('torch._tensor', '_rebuild_from_type_v2'): rebuild_from_type_v2,
|
||||
('torch._utils', '_rebuild_tensor_v2'): lazy_rebuild_tensor_v2,
|
||||
('torch', 'BFloat16Storage'): LazyStorageKind(DT_BF16),
|
||||
('torch', 'HalfStorage'): LazyStorageKind(DT_F16),
|
||||
('torch', 'FloatStorage'): LazyStorageKind(DT_F32),
|
||||
('torch', 'IntStorage'): LazyStorageKind(DT_I32),
|
||||
('torch', 'Tensor'): LazyTensor,
|
||||
}
|
||||
|
||||
def find_class(self, module: str, name: str) -> Any:
|
||||
if not module.startswith('torch'):
|
||||
return super().find_class(module, name)
|
||||
return self.CLASSES[(module, name)]
|
||||
|
||||
|
||||
def lazy_load_torch_file(outer_fp: IO[bytes], path: Path) -> ModelPlus:
|
||||
zf = zipfile.ZipFile(outer_fp)
|
||||
pickle_paths = [name for name in zf.namelist() if name.endswith('.pkl')]
|
||||
assert len(pickle_paths) == 1, pickle_paths
|
||||
pickle_fp = zf.open(pickle_paths[0], 'r')
|
||||
unpickler = LazyUnpickler(pickle_fp,
|
||||
data_base_path=pickle_paths[0][:-4],
|
||||
zip_file=zf)
|
||||
model = unpickler.load()
|
||||
as_dict = dict(model.items())
|
||||
return ModelPlus(model=as_dict, paths=[path], format='torch', vocab=None)
|
||||
|
||||
|
||||
SAFETENSORS_DATA_TYPES: Dict[str, DataType] = {
|
||||
'BF16': DT_BF16,
|
||||
'F16': DT_F16,
|
||||
'F32': DT_F32,
|
||||
'I32': DT_I32,
|
||||
}
|
||||
|
||||
|
||||
def lazy_load_safetensors_file(fp: IO[bytes], path: Path) -> ModelPlus:
|
||||
header_size, = struct.unpack('<Q', fp.read(8))
|
||||
header: Dict[str, Dict[str, Any]] = json.loads(fp.read(header_size))
|
||||
# Use mmap for the actual data to avoid race conditions with the file offset.
|
||||
mapped = memoryview(mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ))
|
||||
byte_buf = mapped[8 + header_size:]
|
||||
|
||||
def convert(info: Dict[str, Any]) -> LazyTensor:
|
||||
data_type = SAFETENSORS_DATA_TYPES[info['dtype']]
|
||||
numpy_dtype = DATA_TYPE_TO_NUMPY[data_type]
|
||||
shape: List[int] = info['shape']
|
||||
begin, end = info['data_offsets']
|
||||
assert 0 <= begin <= end <= len(byte_buf)
|
||||
assert end - begin == math.prod(shape) * numpy_dtype.itemsize
|
||||
buf = byte_buf[begin:end]
|
||||
|
||||
def load() -> UnquantizedTensor:
|
||||
return UnquantizedTensor(np.frombuffer(buf, dtype=numpy_dtype).reshape(shape))
|
||||
description = f'safetensors begin={begin} end={end} type={data_type} path={path}'
|
||||
return LazyTensor(load, shape, data_type, description)
|
||||
model = {name: convert(info) for (name, info) in header.items() if name != '__metadata__'}
|
||||
return ModelPlus(model=model, paths=[path], format='safetensors', vocab=None)
|
||||
|
||||
|
||||
def must_read(fp: IO[bytes], length: int) -> bytes:
|
||||
ret = fp.read(length)
|
||||
if len(ret) < length:
|
||||
raise Exception("unexpectedly reached end of file")
|
||||
return ret
|
||||
|
||||
|
||||
@functools.lru_cache(maxsize=None)
|
||||
def lazy_load_file(path: Path) -> ModelPlus:
|
||||
fp = open(path, 'rb')
|
||||
first8 = fp.read(8)
|
||||
fp.seek(0)
|
||||
if first8[:2] == b'PK':
|
||||
# A zip file, i.e. PyTorch format
|
||||
return lazy_load_torch_file(fp, path)
|
||||
elif struct.unpack('<Q', first8)[0] < 16 * 1024 * 1024:
|
||||
# Probably safetensors
|
||||
return lazy_load_safetensors_file(fp, path)
|
||||
else:
|
||||
raise ValueError(f"unknown format: {path}")
|
||||
|
||||
|
||||
In = TypeVar('In')
|
||||
Out = TypeVar('Out')
|
||||
|
||||
|
||||
def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], concurrency: int) -> Iterable[Out]:
|
||||
'''Parallel map, but with backpressure. If the caller doesn't call `next`
|
||||
fast enough, this will stop calling `func` at some point rather than
|
||||
letting results pile up in memory. Specifically, there is a max of one
|
||||
output value buffered per thread.'''
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
futures: List[concurrent.futures.Future[Out]] = []
|
||||
items_rev = list(iterable)[::-1]
|
||||
for i in range(min(concurrency, len(items_rev))):
|
||||
futures.append(executor.submit(func, items_rev.pop()))
|
||||
while futures:
|
||||
result = futures.pop(0).result()
|
||||
if items_rev:
|
||||
futures.append(executor.submit(func, items_rev.pop()))
|
||||
yield result
|
||||
|
||||
|
||||
def check_vocab_size(params: Params, vocab: Vocab) -> None:
|
||||
if params.n_vocab != vocab.vocab_size:
|
||||
assert isinstance(vocab, SentencePieceVocab)
|
||||
if params.n_vocab == vocab.vocab_size_base:
|
||||
print("Ignoring added_tokens.json since model matches vocab size without it.")
|
||||
vocab.added_tokens_list = []
|
||||
vocab.vocab_size = vocab.vocab_size_base
|
||||
return
|
||||
msg = f"Vocab size mismatch (model has {params.n_vocab}, but {vocab.fname_tokenizer}"
|
||||
if vocab.fname_added_tokens is not None:
|
||||
msg += f" combined with {vocab.fname_added_tokens}"
|
||||
msg += f" has {vocab.vocab_size})."
|
||||
if vocab.vocab_size < params.n_vocab < vocab.vocab_size + 20 and vocab.fname_added_tokens is None:
|
||||
msg += f" Most likely you are missing added_tokens.json (should be in {vocab.fname_tokenizer.parent})."
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
class OutputFile:
|
||||
def __init__(self, fname_out: Path) -> None:
|
||||
self.fout = open(fname_out, "wb")
|
||||
|
||||
def write_file_header(self, params: Params, file_type: GGMLFileType) -> None:
|
||||
self.fout.write(b"ggjt"[::-1]) # magic
|
||||
values = [
|
||||
1, # file version
|
||||
params.n_vocab,
|
||||
params.n_embd,
|
||||
params.n_mult,
|
||||
params.n_head,
|
||||
params.n_layer,
|
||||
params.n_embd // params.n_head, # rot (obsolete)
|
||||
file_type.value,
|
||||
]
|
||||
self.fout.write(struct.pack("i" * len(values), *values))
|
||||
|
||||
def write_tensor_header(self, name: str, shape: Sequence[int], data_type: DataType) -> None:
|
||||
sname = name.encode('utf-8')
|
||||
self.fout.write(struct.pack("iii", len(shape), len(sname), DATA_TYPE_TO_FTYPE[data_type]))
|
||||
self.fout.write(struct.pack("i" * len(shape), *shape[::-1]))
|
||||
self.fout.write(sname)
|
||||
self.fout.seek((self.fout.tell() + 31) & -32)
|
||||
|
||||
def write_vocab(self, vocab: Vocab) -> None:
|
||||
for text, score in vocab.all_tokens():
|
||||
self.fout.write(struct.pack("i", len(text)))
|
||||
self.fout.write(text)
|
||||
self.fout.write(struct.pack("f", score))
|
||||
|
||||
@staticmethod
|
||||
def write_vocab_only(fname_out: Path, vocab: Vocab) -> None:
|
||||
of = OutputFile(fname_out)
|
||||
params = Params(n_vocab=vocab.vocab_size, n_embd=0, n_mult=0, n_head=1, n_layer=0)
|
||||
of = OutputFile(fname_out)
|
||||
of.write_file_header(params, file_type=GGMLFileType.AllF32)
|
||||
of.write_vocab(vocab)
|
||||
of.fout.close()
|
||||
|
||||
@staticmethod
|
||||
def write_all(fname_out: Path, params: Params, file_type: GGMLFileType, model: LazyModel, vocab: Vocab) -> None:
|
||||
check_vocab_size(params, vocab)
|
||||
of = OutputFile(fname_out)
|
||||
of.write_file_header(params, file_type)
|
||||
print("Writing vocab...")
|
||||
of.write_vocab(vocab)
|
||||
|
||||
def do_item(item: Tuple[str, LazyTensor]) -> NDArray:
|
||||
name, lazy_tensor = item
|
||||
return lazy_tensor.load().to_ggml().ndarray
|
||||
|
||||
ndarrays = bounded_parallel_map(do_item, model.items(), concurrency=8)
|
||||
for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)):
|
||||
size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape)
|
||||
padi = len(str(len(model)))
|
||||
print(f"[{i+1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type}")
|
||||
of.write_tensor_header(name, lazy_tensor.shape, lazy_tensor.data_type)
|
||||
ndarray.tofile(of.fout)
|
||||
of.fout.close()
|
||||
|
||||
|
||||
def pick_output_type(model: LazyModel, output_type_str: Optional[str]) -> GGMLFileType:
|
||||
wq_type = model["layers.0.attention.wq.weight"].data_type
|
||||
if output_type_str == "f32" or (output_type_str is None and wq_type in (DT_F32, DT_BF16)):
|
||||
return GGMLFileType.AllF32
|
||||
if output_type_str == "f16" or (output_type_str is None and wq_type == DT_F16):
|
||||
return GGMLFileType.MostlyF16
|
||||
name_to_type = {name: lazy_tensor.data_type for (name, lazy_tensor) in model.items()}
|
||||
raise Exception(f"Unexpected combination of types: {name_to_type}")
|
||||
|
||||
|
||||
def do_necessary_conversions(model: LazyModel, params: Params) -> LazyModel:
|
||||
if "lm_head.weight" in model:
|
||||
model = convert_transformers_to_orig(model, params)
|
||||
model = filter_and_sort_tensors(model)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyModel:
|
||||
return {name: tensor.astype(output_type.type_for_tensor(name, tensor))
|
||||
for (name, tensor) in model.items()}
|
||||
|
||||
|
||||
def nth_multifile_path(path: Path, n: int) -> Optional[Path]:
|
||||
'''Given any path belonging to a multi-file model (e.g. foo.bin.1), return
|
||||
the nth path in the model.
|
||||
'''
|
||||
# Support the following patterns:
|
||||
patterns: List[Tuple[str, str]] = [
|
||||
# - x.00.pth, x.01.pth, etc.
|
||||
(r'\.[0-9]{2}\.pth$', f'.{n:02}.pth'),
|
||||
# - x-00001-of-00002.bin, x-00002-of-00002.bin, etc.
|
||||
(r'-[0-9]{5}-of-(.*)$', fr'-{n:05}-of-\1'),
|
||||
# x.bin, x.bin.1, etc.
|
||||
(r'(\.[0-9]+)?$', r'\1' if n == 0 else fr'\1.{n}')
|
||||
]
|
||||
for regex, replacement in patterns:
|
||||
if re.search(regex, path.name):
|
||||
new_path = path.with_name(re.sub(regex, replacement, path.name))
|
||||
if new_path.exists():
|
||||
return new_path
|
||||
return None
|
||||
|
||||
|
||||
def find_multifile_paths(path: Path) -> List[Path]:
|
||||
'''Given any path belonging to a multi-file model (e.g. foo.bin.1), return
|
||||
the whole list of paths in the model.
|
||||
'''
|
||||
ret: List[Path] = []
|
||||
for i in itertools.count():
|
||||
nth_path = nth_multifile_path(path, i)
|
||||
if nth_path is None:
|
||||
break
|
||||
ret.append(nth_path)
|
||||
if not ret:
|
||||
# No matches. This should only happen if the file was named, e.g.,
|
||||
# foo.0, and there was no file named foo. Oh well, try to process it
|
||||
# as a single file.
|
||||
return [path]
|
||||
return ret
|
||||
|
||||
|
||||
def load_some_model(path: Path) -> ModelPlus:
|
||||
'''Load a model of any supported format.'''
|
||||
# Be extra-friendly and accept either a file or a directory:
|
||||
if path.is_dir():
|
||||
# Check if it's a set of safetensors files first
|
||||
files = list(path.glob("model-00001-of-*.safetensors"))
|
||||
if not files:
|
||||
# Try the PyTorch patterns too, with lower priority
|
||||
globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt", "pytorch_model.bin"]
|
||||
files = [file for glob in globs for file in path.glob(glob)]
|
||||
if not files:
|
||||
# Try GGML too, but with lower priority, since if both a non-GGML
|
||||
# model and a GGML model exist in the same directory, we assume the
|
||||
# latter was converted from the former.
|
||||
files = list(path.glob("ggml-model*.bin*"))
|
||||
if not files:
|
||||
raise Exception(f"Can't find model in directory {path}")
|
||||
if len(files) > 1:
|
||||
raise Exception(f"Found multiple models in {path}, not sure which to pick: {files}")
|
||||
path = files[0]
|
||||
|
||||
paths = find_multifile_paths(path)
|
||||
models_plus: List[ModelPlus] = []
|
||||
for path in paths:
|
||||
print(f"Loading model file {path}")
|
||||
models_plus.append(lazy_load_file(path))
|
||||
|
||||
model_plus = merge_multifile_models(models_plus)
|
||||
return model_plus
|
||||
|
||||
|
||||
def filter_and_sort_tensors(model: LazyModel) -> LazyModel:
|
||||
return {name: model[name] for name in TENSORS_LIST if name in model}
|
||||
|
||||
|
||||
def load_vocab(path: Path, vocabtype: Optional[str]) -> SentencePieceVocab:
|
||||
print(f"vocabtype: {vocabtype}")
|
||||
# Be extra-friendly and accept either a file or a directory. Also, if it's
|
||||
# a directory, it might be the model directory, and tokenizer.model might
|
||||
# be in the parent of that.
|
||||
if path.is_dir():
|
||||
vocab_file = "tokenizer.model"
|
||||
if vocabtype == 'bpe':
|
||||
vocab_file = "vocab.json"
|
||||
path2 = path / vocab_file
|
||||
# Use `.parent` instead of /.. to handle the symlink case better.
|
||||
path3 = path.parent / vocab_file
|
||||
if path2.exists():
|
||||
path = path2
|
||||
elif path3.exists():
|
||||
path = path3
|
||||
else:
|
||||
raise FileNotFoundError(
|
||||
f"Could not find tokenizer.model in {path} or its parent; "
|
||||
"if it's in another directory, pass the directory as --vocab-dir")
|
||||
added_tokens_path = path.parent / "added_tokens.json"
|
||||
print(f"Loading vocab file {path}")
|
||||
return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None,
|
||||
vocabtype)
|
||||
|
||||
|
||||
def default_outfile(model_paths: List[Path], file_type: GGMLFileType) -> Path:
|
||||
namestr = {
|
||||
GGMLFileType.AllF32: "f32",
|
||||
GGMLFileType.MostlyF16: "f16",
|
||||
}[file_type]
|
||||
ret = model_paths[0].parent / f"ggml-model-{namestr}.bin"
|
||||
if ret in model_paths:
|
||||
sys.stderr.write(
|
||||
f"Error: Default output path ({ret}) would overwrite the input. "
|
||||
"Please explicitly specify a path using --outfile.\n")
|
||||
sys.exit(1)
|
||||
return ret
|
||||
|
||||
|
||||
def do_dump_model(model_plus: ModelPlus) -> None:
|
||||
print(f"model_plus.paths = {model_plus.paths!r}")
|
||||
print(f"model_plus.format = {model_plus.format!r}")
|
||||
print(f"model_plus.vocab = {model_plus.vocab!r}")
|
||||
for name, lazy_tensor in model_plus.model.items():
|
||||
print(f"{name}: shape={lazy_tensor.shape} type={lazy_tensor.data_type}; {lazy_tensor.description}")
|
||||
|
||||
|
||||
def main(args_in: Optional[List[str]] = None) -> None:
|
||||
parser = argparse.ArgumentParser(description="Convert a LLaMa model to a GGML compatible file")
|
||||
parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model")
|
||||
parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file")
|
||||
parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab")
|
||||
parser.add_argument("--outtype", choices=["f32", "f16"], help="output format (default: based on input)")
|
||||
parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file")
|
||||
parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input")
|
||||
parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)")
|
||||
parser.add_argument("--vocabtype", choices=["spm", "bpe"], help="vocab format (default: spm)")
|
||||
args = parser.parse_args(args_in)
|
||||
|
||||
vocab: Vocab
|
||||
if args.dump_single:
|
||||
model_plus = lazy_load_file(args.model)
|
||||
do_dump_model(model_plus)
|
||||
elif args.vocab_only:
|
||||
vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype)
|
||||
assert args.outfile, "need --outfile if using --vocab-only"
|
||||
outfile = args.outfile
|
||||
OutputFile.write_vocab_only(outfile, vocab)
|
||||
print(f"Wrote {outfile}")
|
||||
else:
|
||||
model_plus = load_some_model(args.model)
|
||||
if args.dump:
|
||||
do_dump_model(model_plus)
|
||||
return
|
||||
if model_plus.vocab is not None and args.vocab_dir is None:
|
||||
vocab = model_plus.vocab
|
||||
else:
|
||||
vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent
|
||||
vocab = load_vocab(vocab_dir, args.vocabtype)
|
||||
|
||||
params = Params.load(model_plus)
|
||||
model = model_plus.model
|
||||
model = do_necessary_conversions(model, params)
|
||||
output_type = pick_output_type(model, args.outtype)
|
||||
model = convert_to_output_type(model, output_type)
|
||||
outfile = args.outfile or default_outfile(model_plus.paths, output_type)
|
||||
|
||||
OutputFile.write_all(outfile, params, output_type, model, vocab)
|
||||
print(f"Wrote {outfile}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
96
convert.py
Executable file → Normal file
96
convert.py
Executable file → Normal file
@@ -133,7 +133,7 @@ TENSORS_SET = set(TENSORS_LIST)
|
||||
|
||||
def find_n_mult(n_ff: int, n_embd: int) -> int:
|
||||
# hardcoded magic range
|
||||
for n_mult in range(256, 1, -1):
|
||||
for n_mult in range(8192, 1, -1):
|
||||
calc_ff = (((8*n_embd) // 3 + n_mult - 1) // n_mult)*n_mult
|
||||
if calc_ff == n_ff:
|
||||
return n_mult
|
||||
@@ -141,11 +141,12 @@ def find_n_mult(n_ff: int, n_embd: int) -> int:
|
||||
|
||||
@dataclass
|
||||
class Params:
|
||||
n_vocab: int
|
||||
n_embd: int
|
||||
n_mult: int
|
||||
n_head: int
|
||||
n_layer: int
|
||||
n_vocab: int
|
||||
n_embd: int
|
||||
n_mult: int
|
||||
n_head: int
|
||||
n_layer: int
|
||||
n_kv_head: Optional[int] # This parameter is only used for Llama 2
|
||||
|
||||
@staticmethod
|
||||
def guessed(model: 'LazyModel') -> 'Params':
|
||||
@@ -167,11 +168,12 @@ class Params:
|
||||
n_head=n_embd // 128 # guessed
|
||||
|
||||
return Params(
|
||||
n_vocab = n_vocab,
|
||||
n_embd = n_embd,
|
||||
n_mult = 256,
|
||||
n_head = n_head,
|
||||
n_layer = n_layer,
|
||||
n_vocab = n_vocab,
|
||||
n_embd = n_embd,
|
||||
n_mult = 256,
|
||||
n_head = n_head,
|
||||
n_layer = n_layer,
|
||||
n_kv_head = None,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@@ -183,15 +185,17 @@ class Params:
|
||||
n_head = config["num_attention_heads"];
|
||||
n_layer = config["num_hidden_layers"];
|
||||
n_ff = config["intermediate_size"];
|
||||
n_kv_head = config.get("num_key_value_heads")
|
||||
|
||||
n_mult = find_n_mult(n_ff, n_embd);
|
||||
|
||||
return Params(
|
||||
n_vocab = n_vocab,
|
||||
n_embd = n_embd,
|
||||
n_mult = n_mult,
|
||||
n_head = n_head,
|
||||
n_layer = n_layer,
|
||||
n_vocab = n_vocab,
|
||||
n_embd = n_embd,
|
||||
n_mult = n_mult,
|
||||
n_head = n_head,
|
||||
n_layer = n_layer,
|
||||
n_kv_head = n_kv_head,
|
||||
)
|
||||
|
||||
# LLaMA v2 70B params.json
|
||||
@@ -200,21 +204,22 @@ class Params:
|
||||
def loadOriginalParamsJson(model: 'LazyModel', config_path: 'Path') -> 'Params':
|
||||
config = json.load(open(config_path))
|
||||
|
||||
n_vocab = config["vocab_size"];
|
||||
n_embd = config["dim"];
|
||||
n_head = config["n_heads"];
|
||||
n_layer = config["n_layers"];
|
||||
n_mult = config["multiple_of"];
|
||||
n_vocab = config["vocab_size"];
|
||||
n_embd = config["dim"];
|
||||
n_head = config["n_heads"];
|
||||
n_layer = config["n_layers"];
|
||||
n_mult = config["multiple_of"];
|
||||
|
||||
if n_vocab == -1:
|
||||
n_vocab = model["tok_embeddings.weight"].shape[0]
|
||||
|
||||
return Params(
|
||||
n_vocab = n_vocab,
|
||||
n_embd = n_embd,
|
||||
n_mult = n_mult,
|
||||
n_head = n_head,
|
||||
n_layer = n_layer,
|
||||
n_vocab = n_vocab,
|
||||
n_embd = n_embd,
|
||||
n_mult = n_mult,
|
||||
n_head = n_head,
|
||||
n_layer = n_layer,
|
||||
n_kv_head = None,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@@ -317,10 +322,12 @@ class GGMLVocab:
|
||||
Vocab = Union[SentencePieceVocab, GGMLVocab]
|
||||
|
||||
|
||||
def permute(weights: NDArray, n_head: int) -> NDArray:
|
||||
def permute(weights: NDArray, n_head: int, n_kv_head: Optional[int] = None) -> NDArray:
|
||||
if n_kv_head is not None and n_head != n_kv_head:
|
||||
n_head //= n_kv_head
|
||||
return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
|
||||
.swapaxes(1, 2)
|
||||
.reshape(weights.shape))
|
||||
.swapaxes(1, 2)
|
||||
.reshape(weights.shape))
|
||||
|
||||
|
||||
def dequantize_q4(qvalues_pack32: NDArray, scales: NDArray, addends: Optional[NDArray], g_idx: Optional[NDArray]) -> NDArray:
|
||||
@@ -368,7 +375,7 @@ class Tensor(metaclass=ABCMeta):
|
||||
@abstractmethod
|
||||
def astype(self, data_type: DataType) -> 'Tensor': ...
|
||||
@abstractmethod
|
||||
def permute(self, n_head: int) -> 'Tensor': ...
|
||||
def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> 'Tensor': ...
|
||||
@abstractmethod
|
||||
def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': ...
|
||||
@abstractmethod
|
||||
@@ -406,8 +413,8 @@ class UnquantizedTensor(Tensor):
|
||||
r = self.ndarray.shape[0] // 3
|
||||
return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...])
|
||||
|
||||
def permute(self, n_head: int) -> 'UnquantizedTensor':
|
||||
return UnquantizedTensor(permute(self.ndarray, n_head))
|
||||
def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> 'UnquantizedTensor':
|
||||
return UnquantizedTensor(permute(self.ndarray, n_head, n_kv_head))
|
||||
|
||||
|
||||
def load_unquantized(lazy_tensor: 'LazyTensor', expected_dtype: Any = None, convert: bool = False) -> NDArray:
|
||||
@@ -455,26 +462,27 @@ class GGMLQuantizedTensor(Tensor):
|
||||
def to_ggml(self) -> 'GGMLQuantizedTensor':
|
||||
return self
|
||||
|
||||
def permute(self, n_head: int) -> 'GGMLQuantizedTensor':
|
||||
return GGMLQuantizedTensor(permute(self.ndarray, n_head), self.shape, self.data_type)
|
||||
def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> 'GGMLQuantizedTensor':
|
||||
return GGMLQuantizedTensor(permute(self.ndarray, n_head, n_kv_head), self.shape, self.data_type)
|
||||
|
||||
|
||||
GGMLCompatibleTensor = Union[UnquantizedTensor, GGMLQuantizedTensor]
|
||||
|
||||
|
||||
class DeferredPermutedTensor(Tensor):
|
||||
def __init__(self, base: Tensor, n_head: int) -> None:
|
||||
def __init__(self, base: Tensor, n_head: int, n_kv_head: Optional[int] = None) -> None:
|
||||
self.base = base
|
||||
self.n_head = n_head
|
||||
self.n_kv_head = n_kv_head
|
||||
self.data_type = self.base.data_type
|
||||
|
||||
def astype(self, data_type: DataType) -> Tensor:
|
||||
return self.base.astype(data_type).permute(self.n_head)
|
||||
return self.base.astype(data_type).permute(self.n_head, self.n_kv_head)
|
||||
|
||||
def to_ggml(self) -> GGMLCompatibleTensor:
|
||||
return self.base.to_ggml().permute(self.n_head)
|
||||
return self.base.to_ggml().permute(self.n_head, self.n_kv_head)
|
||||
|
||||
def permute(self, n_head: int) -> Tensor:
|
||||
def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> Tensor:
|
||||
raise Exception("shouldn't permute twice")
|
||||
|
||||
|
||||
@@ -566,8 +574,8 @@ class GPTQForLLaMaQuantizedTensor(Tensor):
|
||||
ret.data_type = QuantizedDataType(groupsize=new_groupsize, have_addends=True, have_g_idx=False)
|
||||
return ret
|
||||
|
||||
def permute(self, n_head: int) -> Tensor:
|
||||
return DeferredPermutedTensor(self, n_head)
|
||||
def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> Tensor:
|
||||
return DeferredPermutedTensor(self, n_head, n_kv_head)
|
||||
|
||||
def to_ggml(self) -> GGMLQuantizedTensor:
|
||||
# The output format looks like this:
|
||||
@@ -698,10 +706,10 @@ def merge_multifile_models(models_plus: List[ModelPlus]) -> ModelPlus:
|
||||
return ModelPlus(model, paths, format, vocab)
|
||||
|
||||
|
||||
def permute_lazy(lazy_tensor: LazyTensor, n_head: int) -> LazyTensor:
|
||||
def permute_lazy(lazy_tensor: LazyTensor, n_head: int, n_kv_head: Optional[int] = None) -> LazyTensor:
|
||||
def load() -> Tensor:
|
||||
return lazy_tensor.load().permute(n_head)
|
||||
return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}) ' + lazy_tensor.description)
|
||||
return lazy_tensor.load().permute(n_head, n_kv_head)
|
||||
return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}, {n_kv_head}) ' + lazy_tensor.description)
|
||||
|
||||
def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int) -> LazyTensor:
|
||||
def load() -> Tensor:
|
||||
@@ -726,7 +734,7 @@ def convert_transformers_to_orig(model: LazyModel, params: Params) -> LazyModel:
|
||||
for i in itertools.count():
|
||||
if f"model.layers.{i}.self_attn.q_proj.weight" in model:
|
||||
out[f"layers.{i}.attention.wq.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head)
|
||||
out[f"layers.{i}.attention.wk.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head)
|
||||
out[f"layers.{i}.attention.wk.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head, params.n_kv_head)
|
||||
out[f"layers.{i}.attention.wv.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
|
||||
elif f"model.layers.{i}.self_attn.W_pack.weight" in model:
|
||||
out[f"layers.{i}.attention.wq.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head)
|
||||
|
||||
@@ -352,7 +352,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
params.main_gpu = std::stoi(argv[i]);
|
||||
#else
|
||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.\n");
|
||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.\n");
|
||||
#endif
|
||||
} else if (arg == "--tensor-split" || arg == "-ts") {
|
||||
if (++i >= argc) {
|
||||
@@ -376,13 +376,19 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
||||
}
|
||||
}
|
||||
#else
|
||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n");
|
||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n");
|
||||
#endif // GGML_USE_CUBLAS
|
||||
} else if (arg == "--mul-mat-q" || arg == "-mmq") {
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
params.mul_mat_q = true;
|
||||
#else
|
||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to use mul_mat_q kernels.\n");
|
||||
#endif // GGML_USE_CUBLAS
|
||||
} else if (arg == "--low-vram" || arg == "-lv") {
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
params.low_vram = true;
|
||||
#else
|
||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n");
|
||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n");
|
||||
#endif // GGML_USE_CUBLAS
|
||||
} else if (arg == "--no-mmap") {
|
||||
params.use_mmap = false;
|
||||
@@ -402,8 +408,14 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
||||
params.antiprompt.push_back(argv[i]);
|
||||
} else if (arg == "--perplexity") {
|
||||
params.perplexity = true;
|
||||
} else if (arg == "--perplexity-lines") {
|
||||
params.perplexity_lines = true;
|
||||
} else if (arg == "--hellaswag") {
|
||||
params.hellaswag = true;
|
||||
} else if (arg == "--hellaswag-tasks") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.hellaswag_tasks = std::stoi(argv[i]);
|
||||
} else if (arg == "--ignore-eos") {
|
||||
params.logit_bias[llama_token_eos()] = -INFINITY;
|
||||
} else if (arg == "--no-penalize-nl") {
|
||||
@@ -559,8 +571,9 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
||||
fprintf(stdout, " not recommended: doubles context memory required and no measurable increase in quality\n");
|
||||
fprintf(stdout, " --temp N temperature (default: %.1f)\n", (double)params.temp);
|
||||
fprintf(stdout, " --perplexity compute perplexity over each ctx window of the prompt\n");
|
||||
fprintf(stdout, " --perplexity-lines compute perplexity over each line of the prompt\n");
|
||||
fprintf(stdout, " --keep number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep);
|
||||
fprintf(stdout, " --hellaswag compute HellaSwag score over random tasks from datafile supplied with -f\n");
|
||||
fprintf(stdout, " --hellaswag-tasks N number of tasks to use when computing the HellaSwag score (default: %d)\n", params.hellaswag_tasks);
|
||||
fprintf(stdout, " --keep N number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep);
|
||||
fprintf(stdout, " --chunks N max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks);
|
||||
if (llama_mlock_supported()) {
|
||||
fprintf(stdout, " --mlock force system to keep model in RAM rather than swapping or compressing\n");
|
||||
@@ -578,6 +591,9 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
||||
fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
|
||||
fprintf(stdout, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n" );
|
||||
fprintf(stdout, " -lv, --low-vram don't allocate VRAM scratch buffer\n" );
|
||||
fprintf(stdout, " -mmq, --mul-mat-q use experimental mul_mat_q CUDA kernels instead of cuBLAS. TEMP!!!\n" );
|
||||
fprintf(stdout, " Reduces VRAM usage by 700/970/1430 MiB for 7b/13b/33b but prompt processing speed\n" );
|
||||
fprintf(stdout, " is still suboptimal, especially q2_K, q3_K, q5_K, and q6_K.\n" );
|
||||
#endif
|
||||
fprintf(stdout, " --mtest compute maximum memory usage\n");
|
||||
fprintf(stdout, " --export export the computation graph to 'llama.ggml'\n");
|
||||
@@ -630,6 +646,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
|
||||
lparams.main_gpu = params.main_gpu;
|
||||
lparams.tensor_split = params.tensor_split;
|
||||
lparams.low_vram = params.low_vram;
|
||||
lparams.mul_mat_q = params.mul_mat_q;
|
||||
lparams.seed = params.seed;
|
||||
lparams.f16_kv = params.memory_f16;
|
||||
lparams.use_mmap = params.use_mmap;
|
||||
|
||||
@@ -70,7 +70,11 @@ struct gpt_params {
|
||||
std::string lora_adapter = ""; // lora adapter path
|
||||
std::string lora_base = ""; // base model path for the lora adapter
|
||||
|
||||
bool low_vram = false; // if true, reduce VRAM usage at the cost of performance
|
||||
bool hellaswag = false; // compute HellaSwag score over random tasks from datafile supplied in prompt
|
||||
size_t hellaswag_tasks = 400; // number of tasks to use when computing the HellaSwag score
|
||||
|
||||
bool low_vram = false; // if true, reduce VRAM usage at the cost of performance
|
||||
bool mul_mat_q = false; // if true, use experimental mul_mat_q kernels
|
||||
bool memory_f16 = true; // use f16 instead of f32 for memory kv
|
||||
bool random_prompt = false; // do not randomize prompt if none provided
|
||||
bool use_color = false; // use color to distinguish generations and inputs
|
||||
@@ -86,7 +90,6 @@ struct gpt_params {
|
||||
bool instruct = false; // instruction mode (used for Alpaca models)
|
||||
bool penalize_nl = true; // consider newlines as a repeatable token
|
||||
bool perplexity = false; // compute perplexity over the prompt
|
||||
bool perplexity_lines = false; // compute perplexity over each line of the prompt
|
||||
bool use_mmap = true; // use mmap for faster loads
|
||||
bool use_mlock = false; // use mlock to keep model in memory
|
||||
bool mem_test = false; // compute maximum memory usage
|
||||
|
||||
@@ -1,393 +0,0 @@
|
||||
#include "ggml.h"
|
||||
|
||||
#include <cstdio>
|
||||
#include <cinttypes>
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <fstream>
|
||||
#include <vector>
|
||||
|
||||
template<typename T>
|
||||
static std::string to_string(const T & val) {
|
||||
std::stringstream ss;
|
||||
ss << val;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
void gguf_ex_write_str(std::ofstream & fout, const std::string & val) {
|
||||
const int32_t n = val.size();
|
||||
fout.write((const char *) &n, sizeof(n));
|
||||
fout.write(val.c_str(), n);
|
||||
}
|
||||
|
||||
void gguf_ex_write_i32(std::ofstream & fout, int32_t val) {
|
||||
fout.write((const char *) &val, sizeof(val));
|
||||
}
|
||||
|
||||
void gguf_ex_write_u64(std::ofstream & fout, size_t val) {
|
||||
fout.write((const char *) &val, sizeof(val));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void gguf_ex_write_val(std::ofstream & fout, const std::string & key, enum gguf_type type, const T & val) {
|
||||
gguf_ex_write_str(fout, key);
|
||||
fout.write((const char *) &type, sizeof(type));
|
||||
fout.write((const char *) &val, sizeof(val));
|
||||
|
||||
fprintf(stdout, "%s: write param: %s = %s\n", __func__, key.c_str(), to_string(val).c_str());
|
||||
}
|
||||
|
||||
template<>
|
||||
void gguf_ex_write_val<std::string>(std::ofstream & fout, const std::string & key, enum gguf_type type, const std::string & val) {
|
||||
gguf_ex_write_str(fout, key);
|
||||
fout.write((const char *) &type, sizeof(type));
|
||||
|
||||
const int32_t n = val.size();
|
||||
fout.write((const char *) &n, sizeof(n));
|
||||
fout.write(val.c_str(), n);
|
||||
|
||||
fprintf(stdout, "%s: write param: %s = %s\n", __func__, key.c_str(), val.c_str());
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void gguf_ex_write_arr(std::ofstream & fout, const std::string & key, enum gguf_type type, const std::vector<T> & val) {
|
||||
gguf_ex_write_str(fout, key);
|
||||
{
|
||||
const enum gguf_type tarr = GGUF_TYPE_ARRAY;
|
||||
fout.write((const char *) &tarr, sizeof(tarr));
|
||||
}
|
||||
|
||||
const int32_t n = val.size();
|
||||
fout.write((const char *) &type, sizeof(type));
|
||||
fout.write((const char *) &n, sizeof(n));
|
||||
fout.write((const char *) val.data(), n * sizeof(T));
|
||||
|
||||
fprintf(stdout, "%s: write param: %s = [", __func__, key.c_str());
|
||||
for (int i = 0; i < n; ++i) {
|
||||
fprintf(stdout, "%s", to_string(val[i]).c_str());
|
||||
if (i < n - 1) {
|
||||
fprintf(stdout, ", ");
|
||||
}
|
||||
}
|
||||
fprintf(stdout, "]\n");
|
||||
}
|
||||
|
||||
template<>
|
||||
void gguf_ex_write_arr<std::string>(std::ofstream & fout, const std::string & key, enum gguf_type type, const std::vector<std::string> & val) {
|
||||
gguf_ex_write_str(fout, key);
|
||||
{
|
||||
const enum gguf_type tarr = GGUF_TYPE_ARRAY;
|
||||
fout.write((const char *) &tarr, sizeof(tarr));
|
||||
}
|
||||
|
||||
const int32_t n = val.size();
|
||||
fout.write((const char *) &type, sizeof(type));
|
||||
fout.write((const char *) &n, sizeof(n));
|
||||
for (int i = 0; i < n; ++i) {
|
||||
const int32_t nstr = val[i].size();
|
||||
fout.write((const char *) &nstr, sizeof(nstr));
|
||||
fout.write(val[i].c_str(), nstr);
|
||||
}
|
||||
|
||||
fprintf(stdout, "%s: write param: %s = [", __func__, key.c_str());
|
||||
for (int i = 0; i < n; ++i) {
|
||||
fprintf(stdout, "%s", val[i].c_str());
|
||||
if (i < n - 1) {
|
||||
fprintf(stdout, ", ");
|
||||
}
|
||||
}
|
||||
fprintf(stdout, "]\n");
|
||||
}
|
||||
|
||||
bool gguf_ex_write(const std::string & fname) {
|
||||
std::ofstream fout(fname.c_str(), std::ios::binary);
|
||||
|
||||
{
|
||||
const int32_t magic = GGUF_MAGIC;
|
||||
fout.write((const char *) &magic, sizeof(magic));
|
||||
}
|
||||
|
||||
{
|
||||
const int32_t version = GGUF_VERSION;
|
||||
fout.write((const char *) &version, sizeof(version));
|
||||
}
|
||||
|
||||
// NOTE: these have to match the output below!
|
||||
const int n_tensors = 10;
|
||||
const int n_kv = 12;
|
||||
|
||||
fout.write((const char*) &n_tensors, sizeof(n_tensors));
|
||||
fout.write((const char*) &n_kv, sizeof(n_kv));
|
||||
|
||||
fprintf(stdout, "%s: write header\n", __func__);
|
||||
|
||||
// kv data
|
||||
{
|
||||
gguf_ex_write_val< uint8_t>(fout, "some.parameter.uint8", GGUF_TYPE_UINT8, 0x12);
|
||||
gguf_ex_write_val< int8_t>(fout, "some.parameter.int8", GGUF_TYPE_INT8, -0x13);
|
||||
gguf_ex_write_val<uint16_t>(fout, "some.parameter.uint16", GGUF_TYPE_UINT16, 0x1234);
|
||||
gguf_ex_write_val< int16_t>(fout, "some.parameter.int16", GGUF_TYPE_INT16, -0x1235);
|
||||
gguf_ex_write_val<uint32_t>(fout, "some.parameter.uint32", GGUF_TYPE_UINT32, 0x12345678);
|
||||
gguf_ex_write_val< int32_t>(fout, "some.parameter.int32", GGUF_TYPE_INT32, -0x12345679);
|
||||
|
||||
gguf_ex_write_val<float> (fout, "some.parameter.float32", GGUF_TYPE_FLOAT32, 0.123456789f);
|
||||
gguf_ex_write_val<bool> (fout, "some.parameter.bool", GGUF_TYPE_BOOL, true);
|
||||
|
||||
gguf_ex_write_val<std::string>(fout, "some.parameter.string", GGUF_TYPE_STRING, "hello world");
|
||||
|
||||
gguf_ex_write_arr<int16_t> (fout, "some.parameter.arr.i16", GGUF_TYPE_INT16, { 1, 2, 3, 4, });
|
||||
gguf_ex_write_arr<float> (fout, "some.parameter.arr.f32", GGUF_TYPE_FLOAT32, { 3.145f, 2.718f, 1.414f, });
|
||||
gguf_ex_write_arr<std::string>(fout, "some.parameter.arr.str", GGUF_TYPE_STRING, { "hello", "world", "!" });
|
||||
}
|
||||
|
||||
uint64_t offset_tensor = 0;
|
||||
|
||||
struct ggml_init_params params = {
|
||||
/*.mem_size =*/ 128ull*1024ull*1024ull,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ false,
|
||||
};
|
||||
|
||||
struct ggml_context * ctx_data = ggml_init(params);
|
||||
|
||||
// tensor infos
|
||||
for (int i = 0; i < n_tensors; ++i) {
|
||||
const std::string name = "tensor_" + to_string(i);
|
||||
|
||||
int64_t ne[GGML_MAX_DIMS] = { 1 };
|
||||
int32_t n_dims = rand() % GGML_MAX_DIMS + 1;
|
||||
|
||||
for (int j = 0; j < n_dims; ++j) {
|
||||
ne[j] = rand() % 10 + 1;
|
||||
}
|
||||
|
||||
struct ggml_tensor * cur = ggml_new_tensor(ctx_data, GGML_TYPE_F32, n_dims, ne);
|
||||
ggml_set_name(cur, name.c_str());
|
||||
|
||||
{
|
||||
float * data = (float *) cur->data;
|
||||
for (int j = 0; j < ggml_nelements(cur); ++j) {
|
||||
data[j] = 100 + i;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stdout, "%s: tensor: %s, %d dims, ne = [", __func__, name.c_str(), n_dims);
|
||||
for (int j = 0; j < 4; ++j) {
|
||||
fprintf(stdout, "%s%3d", j == 0 ? "" : ", ", (int) cur->ne[j]);
|
||||
}
|
||||
fprintf(stdout, "], offset_tensor = %6" PRIu64 "\n", offset_tensor);
|
||||
|
||||
gguf_ex_write_str(fout, name);
|
||||
gguf_ex_write_i32(fout, n_dims);
|
||||
for (int j = 0; j < n_dims; ++j) {
|
||||
gguf_ex_write_i32(fout, cur->ne[j]);
|
||||
}
|
||||
gguf_ex_write_i32(fout, cur->type);
|
||||
gguf_ex_write_u64(fout, offset_tensor);
|
||||
|
||||
offset_tensor += GGML_PAD(ggml_nbytes(cur), GGUF_DEFAULT_ALIGNMENT);
|
||||
}
|
||||
|
||||
const uint64_t offset_data = GGML_PAD((uint64_t) fout.tellp(), GGUF_DEFAULT_ALIGNMENT);
|
||||
|
||||
fprintf(stdout, "%s: data offset = %" PRIu64 "\n", __func__, offset_data);
|
||||
|
||||
{
|
||||
const size_t pad = offset_data - fout.tellp();
|
||||
|
||||
for (size_t j = 0; j < pad; ++j) {
|
||||
fout.put(0);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < n_tensors; ++i) {
|
||||
fprintf(stdout, "%s: writing tensor %d data\n", __func__, i);
|
||||
|
||||
const std::string name = "tensor_" + to_string(i);
|
||||
|
||||
struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name.c_str());
|
||||
|
||||
fout.write((const char *) cur->data, ggml_nbytes(cur));
|
||||
|
||||
{
|
||||
const size_t pad = GGML_PAD(ggml_nbytes(cur), GGUF_DEFAULT_ALIGNMENT) - ggml_nbytes(cur);
|
||||
|
||||
for (size_t j = 0; j < pad; ++j) {
|
||||
fout.put(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fout.close();
|
||||
|
||||
fprintf(stdout, "%s: wrote file '%s;\n", __func__, fname.c_str());
|
||||
|
||||
ggml_free(ctx_data);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// just read tensor info
|
||||
bool gguf_ex_read_0(const std::string & fname) {
|
||||
struct gguf_init_params params = {
|
||||
/*.no_alloc = */ false,
|
||||
/*.ctx = */ NULL,
|
||||
};
|
||||
|
||||
struct gguf_context * ctx = gguf_init_from_file(fname.c_str(), params);
|
||||
|
||||
fprintf(stdout, "%s: version: %d\n", __func__, gguf_get_version(ctx));
|
||||
fprintf(stdout, "%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx));
|
||||
fprintf(stdout, "%s: data offset: %zu\n", __func__, gguf_get_data_offset(ctx));
|
||||
|
||||
// kv
|
||||
{
|
||||
const int n_kv = gguf_get_n_kv(ctx);
|
||||
|
||||
fprintf(stdout, "%s: n_kv: %d\n", __func__, n_kv);
|
||||
|
||||
for (int i = 0; i < n_kv; ++i) {
|
||||
const char * key = gguf_get_key(ctx, i);
|
||||
|
||||
fprintf(stdout, "%s: kv[%d]: key = %s\n", __func__, i, key);
|
||||
}
|
||||
}
|
||||
|
||||
// tensor info
|
||||
{
|
||||
const int n_tensors = gguf_get_n_tensors(ctx);
|
||||
|
||||
fprintf(stdout, "%s: n_tensors: %d\n", __func__, n_tensors);
|
||||
|
||||
for (int i = 0; i < n_tensors; ++i) {
|
||||
const char * name = gguf_get_tensor_name (ctx, i);
|
||||
const size_t offset = gguf_get_tensor_offset(ctx, i);
|
||||
|
||||
fprintf(stdout, "%s: tensor[%d]: name = %s, offset = %zu\n", __func__, i, name, offset);
|
||||
}
|
||||
}
|
||||
|
||||
gguf_free(ctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// read and create ggml_context containing the tensors and their data
|
||||
bool gguf_ex_read_1(const std::string & fname) {
|
||||
struct ggml_context * ctx_data = NULL;
|
||||
|
||||
struct gguf_init_params params = {
|
||||
/*.no_alloc = */ false,
|
||||
/*.ctx = */ &ctx_data,
|
||||
};
|
||||
|
||||
struct gguf_context * ctx = gguf_init_from_file(fname.c_str(), params);
|
||||
|
||||
fprintf(stdout, "%s: version: %d\n", __func__, gguf_get_version(ctx));
|
||||
fprintf(stdout, "%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx));
|
||||
fprintf(stdout, "%s: data offset: %zu\n", __func__, gguf_get_data_offset(ctx));
|
||||
|
||||
// kv
|
||||
{
|
||||
const int n_kv = gguf_get_n_kv(ctx);
|
||||
|
||||
fprintf(stdout, "%s: n_kv: %d\n", __func__, n_kv);
|
||||
|
||||
for (int i = 0; i < n_kv; ++i) {
|
||||
const char * key = gguf_get_key(ctx, i);
|
||||
|
||||
fprintf(stdout, "%s: kv[%d]: key = %s\n", __func__, i, key);
|
||||
}
|
||||
}
|
||||
|
||||
// tensor info
|
||||
{
|
||||
const int n_tensors = gguf_get_n_tensors(ctx);
|
||||
|
||||
fprintf(stdout, "%s: n_tensors: %d\n", __func__, n_tensors);
|
||||
|
||||
for (int i = 0; i < n_tensors; ++i) {
|
||||
const char * name = gguf_get_tensor_name (ctx, i);
|
||||
const size_t offset = gguf_get_tensor_offset(ctx, i);
|
||||
|
||||
fprintf(stdout, "%s: tensor[%d]: name = %s, offset = %zu\n", __func__, i, name, offset);
|
||||
}
|
||||
}
|
||||
|
||||
// data
|
||||
{
|
||||
const int n_tensors = gguf_get_n_tensors(ctx);
|
||||
|
||||
for (int i = 0; i < n_tensors; ++i) {
|
||||
fprintf(stdout, "%s: reading tensor %d data\n", __func__, i);
|
||||
|
||||
const std::string name = "tensor_" + to_string(i);
|
||||
|
||||
struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name.c_str());
|
||||
|
||||
fprintf(stdout, "%s: tensor[%d]: n_dims = %d, name = %s, data = %p\n",
|
||||
__func__, i, cur->n_dims, cur->name, cur->data);
|
||||
|
||||
// check data
|
||||
{
|
||||
const float * data = (const float *) cur->data;
|
||||
for (int j = 0; j < ggml_nelements(cur); ++j) {
|
||||
if (data[j] != 100 + i) {
|
||||
fprintf(stderr, "%s: tensor[%d]: data[%d] = %f\n", __func__, i, j, data[j]);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stdout, "%s: ctx_data size: %zu\n", __func__, ggml_get_mem_size(ctx_data));
|
||||
|
||||
ggml_free(ctx_data);
|
||||
gguf_free(ctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// read just the tensor info and mmap the data in user code
|
||||
bool gguf_ex_read_2(const std::string & fname) {
|
||||
struct ggml_context * ctx_data = NULL;
|
||||
|
||||
struct gguf_init_params params = {
|
||||
/*.no_alloc = */ true,
|
||||
/*.ctx = */ &ctx_data,
|
||||
};
|
||||
|
||||
struct gguf_context * ctx = gguf_init_from_file(fname.c_str(), params);
|
||||
|
||||
// TODO: mmap based on tensor infos
|
||||
|
||||
fprintf(stdout, "%s: ctx_data size: %zu\n", __func__, ggml_get_mem_size(ctx_data));
|
||||
|
||||
ggml_free(ctx_data);
|
||||
gguf_free(ctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
if (argc < 3) {
|
||||
fprintf(stdout, "usage: %s data.gguf r|w\n", argv[0]);
|
||||
return -1;
|
||||
}
|
||||
|
||||
const std::string fname(argv[1]);
|
||||
const std::string mode (argv[2]);
|
||||
|
||||
GGML_ASSERT((mode == "r" || mode == "w") && "mode must be r or w");
|
||||
|
||||
if (mode == "w") {
|
||||
GGML_ASSERT(gguf_ex_write(fname) && "failed to write gguf file");
|
||||
} else if (mode == "r") {
|
||||
GGML_ASSERT(gguf_ex_read_0(fname) && "failed to read gguf file");
|
||||
GGML_ASSERT(gguf_ex_read_1(fname) && "failed to read gguf file");
|
||||
GGML_ASSERT(gguf_ex_read_2(fname) && "failed to read gguf file");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -202,9 +202,9 @@ Example usage: `--top-p 0.95`
|
||||
|
||||
- `--tfs N`: Enable tail free sampling with parameter z (default: 1.0, 1.0 = disabled).
|
||||
|
||||
Tail free sampling (TFS) is a text generation technique that aims to reduce the impact of less likely tokens, which may be less relevant, less coherent, or nonsensical, on the output. The method adjusts the logits (token probabilities) by raising them to the power of the parameter z. A higher value of z (e.g., 2.0) will further suppress less likely tokens from the tail of the distribution, while a value of 1.0 disables the effect of TFS. By setting the parameter z, you can control how much the probabilities of less likely tokens are reduced.
|
||||
Tail free sampling (TFS) is a text generation technique that aims to reduce the impact of less likely tokens, which may be less relevant, less coherent, or nonsensical, on the output. Similar to Top-P it tries to determine the bulk of the most likely tokens dynamically. But TFS filters out logits based on the second derivative of their probabilities. Adding tokens is stopped after the sum of the second derivatives reaches the parameter z. In short: TFS looks how quickly the probabilities of the tokens decrease and cuts off the tail of unlikely tokens using the parameter z. Typical values for z are in the range of 0.9 to 0.95. A value of 1.0 would include all tokens, and thus disables the effect of TFS.
|
||||
|
||||
Example usage: `--tfs 2.0`
|
||||
Example usage: `--tfs 0.95`
|
||||
|
||||
### Locally Typical Sampling
|
||||
|
||||
|
||||
@@ -121,8 +121,23 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
void perplexity_lines(llama_context * ctx, const gpt_params & params) {
|
||||
// Calculates perplexity over each line of the prompt
|
||||
void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
||||
// Calculates hellaswag score (acc_norm) from prompt
|
||||
//
|
||||
// Data extracted from the HellaSwag validation dataset (MIT license) https://github.com/rowanz/hellaswag/blob/master/data/hellaswag_val.jsonl
|
||||
// All used data fields are preprocessed as in https://github.com/EleutherAI/lm-evaluation-harness/blob/df3da98c5405deafd519c2ddca52bb7c3fe36bef/lm_eval/tasks/hellaswag.py#L62-L68
|
||||
//
|
||||
// All 10042 tasks should be extracted to keep the results standardized like other implementations.
|
||||
//
|
||||
// Datafile layout:
|
||||
// ['??'] denotes json fields
|
||||
// 6 lines per task:
|
||||
// ['activity_label'] + ": " +['ctx'] - The first part of the query, the context
|
||||
// ['label'] - The index the best common sense ending aka gold ending
|
||||
// ['endings'][0] - Endings added to the first part of the query
|
||||
// ['endings'][1]
|
||||
// ['endings'][2]
|
||||
// ['endings'][3]
|
||||
|
||||
std::vector<std::string> prompt_lines;
|
||||
std::istringstream strstream(params.prompt);
|
||||
@@ -132,63 +147,149 @@ void perplexity_lines(llama_context * ctx, const gpt_params & params) {
|
||||
prompt_lines.push_back(line);
|
||||
}
|
||||
|
||||
if( prompt_lines.size() % 6 != 0) {
|
||||
fprintf(stderr, "%s : number of lines in prompt not a multiple of 6.\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
size_t hs_task_count = prompt_lines.size()/6;
|
||||
fprintf(stderr, "%s : loaded %lu tasks from prompt.\n", __func__, hs_task_count);
|
||||
|
||||
// This is needed as usual for LLaMA models
|
||||
bool prepend_bos = true;
|
||||
|
||||
// Number of tasks to use when computing the score
|
||||
if ( params.hellaswag_tasks < hs_task_count ) {
|
||||
hs_task_count = params.hellaswag_tasks;
|
||||
}
|
||||
|
||||
// The tasks should be randomized so the score stabilizes quickly.
|
||||
bool randomize_tasks = true;
|
||||
|
||||
// The random seed should not impact the final result if the computation is done over enough tasks, so kept hardcoded for now
|
||||
std::mt19937 rng(1);
|
||||
|
||||
// Dataholder for hellaswag tasks
|
||||
struct hs_data_t {
|
||||
std::string context;
|
||||
size_t gold_ending_idx;
|
||||
std::string ending[4];
|
||||
size_t ending_logprob_count[4];
|
||||
double ending_logprob[4];
|
||||
};
|
||||
|
||||
fprintf(stderr, "%s : selecting %lu %s tasks.\n", __func__, hs_task_count, (randomize_tasks?"randomized":"the first") );
|
||||
|
||||
// Select and read data from prompt lines
|
||||
hs_data_t *hs_data = new hs_data_t[hs_task_count];
|
||||
for (size_t i=0; i < hs_task_count; i++) {
|
||||
size_t idx = i;
|
||||
|
||||
// Select a random example of those left in the prompt
|
||||
if (randomize_tasks) {
|
||||
std::uniform_int_distribution<size_t> dist(0, prompt_lines.size()/6-1 ) ;
|
||||
idx = dist(rng);
|
||||
}
|
||||
|
||||
hs_data[i].context = prompt_lines[idx*6];
|
||||
hs_data[i].gold_ending_idx = std::stoi( prompt_lines[idx*6+1] );
|
||||
for (size_t j=0; j < 4; j++) {
|
||||
hs_data[i].ending[j] = " " + prompt_lines[idx*6+2+j];
|
||||
}
|
||||
|
||||
// Delete the selected random example from the prompt
|
||||
if (randomize_tasks) {
|
||||
prompt_lines.erase( std::next(prompt_lines.begin(),idx*6) , std::next(prompt_lines.begin(),idx*6+6) );
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s : calculating hellaswag score over selected tasks.\n", __func__);
|
||||
printf("\ntask\tacc_norm\n");
|
||||
|
||||
double acc = 0.0f;
|
||||
const int n_vocab = llama_n_vocab(ctx);
|
||||
|
||||
int counttotal = 0;
|
||||
size_t n_lines = prompt_lines.size();
|
||||
for (size_t task_idx = 0; task_idx < hs_task_count; task_idx++) {
|
||||
|
||||
double nll = 0.0;
|
||||
// Tokenize the context to count tokens
|
||||
std::vector<int> context_embd = ::llama_tokenize(ctx, hs_data[task_idx].context, prepend_bos);
|
||||
size_t context_size = context_embd.size();
|
||||
|
||||
fprintf(stderr, "%s: calculating perplexity over %lu lines\n", __func__, n_lines);
|
||||
for (size_t ending_idx=0;ending_idx<4;ending_idx++) {
|
||||
|
||||
printf("\nLine\tPPL line\tPPL cumulative\n");
|
||||
// Tokenize the query
|
||||
std::vector<int> query_embd = ::llama_tokenize(ctx, hs_data[task_idx].context + hs_data[task_idx].ending[ending_idx], prepend_bos);
|
||||
size_t query_size = query_embd.size();
|
||||
|
||||
for (size_t i = 0; i < n_lines; ++i) {
|
||||
// Stop if query wont fit the ctx window
|
||||
if (query_size > (size_t)params.n_ctx) {
|
||||
fprintf(stderr, "%s : number of tokens in query %lu > n_ctxl\n", __func__, query_size);
|
||||
return;
|
||||
}
|
||||
|
||||
// Tokenize and insert BOS at start
|
||||
std::vector<int> batch_embd = ::llama_tokenize(ctx, prompt_lines[i], true);
|
||||
// Speedup small evaluations by evaluating atleast 32 tokens
|
||||
if (query_size < 32) {
|
||||
query_embd.resize(32);
|
||||
}
|
||||
|
||||
size_t batch_size = batch_embd.size();
|
||||
// Evaluate the query
|
||||
if (llama_eval(ctx, query_embd.data(), query_embd.size(), 0, params.n_threads)) {
|
||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
// Stop if line is too long
|
||||
if( batch_size > (size_t)params.n_ctx ) {
|
||||
fprintf(stderr, "%s : tokens in line %lu > n_ctxl\n", __func__, i);
|
||||
return;
|
||||
const auto query_logits = llama_get_logits(ctx);
|
||||
std::vector<float> logits;
|
||||
logits.insert(logits.end(), query_logits, query_logits + query_size * n_vocab);
|
||||
|
||||
hs_data[task_idx].ending_logprob_count[ending_idx] = 0;
|
||||
hs_data[task_idx].ending_logprob[ending_idx] = 0.0f;
|
||||
|
||||
// Calculate the logprobs over the ending
|
||||
for (size_t j = context_size-1; j < query_size - 1; j++) {
|
||||
// Calculate probability of next token, given the previous ones.
|
||||
const std::vector<float> tok_logits(
|
||||
logits.begin() + (j + 0) * n_vocab,
|
||||
logits.begin() + (j + 1) * n_vocab);
|
||||
|
||||
const float prob = softmax(tok_logits)[query_embd[ j + 1]];
|
||||
|
||||
hs_data[task_idx].ending_logprob[ending_idx] += std::log(prob);
|
||||
hs_data[task_idx].ending_logprob_count[ending_idx]++;
|
||||
}
|
||||
|
||||
// Calculate the mean token logprob for acc_norm
|
||||
hs_data[task_idx].ending_logprob[ending_idx] /= hs_data[task_idx].ending_logprob_count[ending_idx];
|
||||
|
||||
|
||||
// printf("task %lu, ending %lu, whole_len %lu, context_len %lu, ending_logprob_count %lu, ending_logprob %.4f\n",
|
||||
// task_idx,ending_idx,whole_size,context_size, hs_data[task_idx].ending_logprob_count[ending_idx], hs_data[task_idx].ending_logprob[ending_idx] );
|
||||
}
|
||||
|
||||
if (llama_eval(ctx, batch_embd.data(), batch_size, 0, params.n_threads)) {
|
||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||
return;
|
||||
// Find the ending with maximum logprob
|
||||
size_t ending_logprob_max_idx = -1;
|
||||
double ending_logprob_max_val = -INFINITY;
|
||||
for (size_t j=0; j < 4; j++) {
|
||||
if (hs_data[task_idx].ending_logprob[j] > ending_logprob_max_val) {
|
||||
ending_logprob_max_idx = j;
|
||||
ending_logprob_max_val = hs_data[task_idx].ending_logprob[j];
|
||||
}
|
||||
}
|
||||
|
||||
const auto batch_logits = llama_get_logits(ctx);
|
||||
std::vector<float> logits;
|
||||
logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab);
|
||||
// printf("max logprob ending idx %lu, gold ending idx %lu\n", ending_logprob_max_idx, hs_data[task_idx].gold_ending_idx);
|
||||
|
||||
double nllline = 0.0;
|
||||
int countline = 0;
|
||||
|
||||
// Perplexity over second half of the line
|
||||
for (size_t j = batch_size/2; j < batch_size - 1; ++j) {
|
||||
// Calculate probability of next token, given the previous ones.
|
||||
const std::vector<float> tok_logits(
|
||||
logits.begin() + (j + 0) * n_vocab,
|
||||
logits.begin() + (j + 1) * n_vocab);
|
||||
|
||||
const float prob = softmax(tok_logits)[batch_embd[ j + 1]];
|
||||
|
||||
nllline += -std::log(prob);
|
||||
++countline;
|
||||
// If the gold ending got the maximum logprobe add one accuracy point
|
||||
if (ending_logprob_max_idx == hs_data[task_idx].gold_ending_idx) {
|
||||
acc += 1.0;
|
||||
}
|
||||
|
||||
nll += nllline;
|
||||
counttotal += countline;
|
||||
|
||||
// perplexity is e^(average negative log-likelihood)
|
||||
printf("%lu\t%.8lf\t%.8lf\n", i + 1, std::exp(nllline/countline), std::exp(nll / counttotal) );
|
||||
// Print the accumulated accuracy mean x 100
|
||||
printf("%li\t%.8lf\n",task_idx+1, acc/double(task_idx+1)*100.0);
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
delete [] hs_data;
|
||||
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
@@ -240,8 +341,8 @@ int main(int argc, char ** argv) {
|
||||
params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
|
||||
}
|
||||
|
||||
if (params.perplexity_lines) {
|
||||
perplexity_lines(ctx, params);
|
||||
if (params.hellaswag) {
|
||||
hellaswag_score(ctx, params);
|
||||
} else {
|
||||
perplexity(ctx, params);
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ int main(int argc, char ** argv) {
|
||||
auto lparams = llama_context_default_params();
|
||||
|
||||
lparams.n_ctx = params.n_ctx;
|
||||
lparams.n_gqa = params.n_gqa;
|
||||
lparams.seed = params.seed;
|
||||
lparams.f16_kv = params.memory_f16;
|
||||
lparams.use_mmap = params.use_mmap;
|
||||
|
||||
26
examples/server-llama2-13B.sh
Normal file
26
examples/server-llama2-13B.sh
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.." || exit
|
||||
|
||||
# Specify the model you want to use here:
|
||||
MODEL="${MODEL:-./models/llama-2-13b-chat.ggmlv3.q5_K_M.bin}"
|
||||
PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-./prompts/chat-system.txt}
|
||||
|
||||
# Adjust to the number of CPU cores you want to use.
|
||||
N_THREAD="${N_THREAD:-12}"
|
||||
|
||||
# Note: you can also override the generation options by specifying them on the command line:
|
||||
GEN_OPTIONS="${GEN_OPTIONS:---ctx_size 4096 --batch-size 1024}"
|
||||
|
||||
|
||||
# shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS
|
||||
./server $GEN_OPTIONS \
|
||||
--model "$MODEL" \
|
||||
--threads "$N_THREAD" \
|
||||
--rope-freq-scale 1.0 \
|
||||
"$@"
|
||||
|
||||
# I used this to test the model with mps, but omitted it from the general purpose. If you want to use it, just specify it on the command line.
|
||||
# -ngl 1 \
|
||||
109
examples/server/chat-llama2.sh
Normal file
109
examples/server/chat-llama2.sh
Normal file
@@ -0,0 +1,109 @@
|
||||
#!/bin/bash
|
||||
|
||||
API_URL="${API_URL:-http://127.0.0.1:8080}"
|
||||
|
||||
CHAT=(
|
||||
"Hello, Assistant."
|
||||
"Hello. How may I help you today?"
|
||||
)
|
||||
|
||||
INSTRUCTION="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions."
|
||||
|
||||
trim() {
|
||||
shopt -s extglob
|
||||
set -- "${1##+([[:space:]])}"
|
||||
printf "%s" "${1%%+([[:space:]])}"
|
||||
}
|
||||
|
||||
trim_trailing() {
|
||||
shopt -s extglob
|
||||
printf "%s" "${1%%+([[:space:]])}"
|
||||
}
|
||||
|
||||
format_prompt() {
|
||||
if [[ "${#CHAT[@]}" -eq 0 ]]; then
|
||||
echo -n "[INST] <<SYS>>\n${INSTRUCTION}\n<</SYS>>"
|
||||
else
|
||||
LAST_INDEX=$(( ${#CHAT[@]} - 1 ))
|
||||
echo -n "${CHAT[$LAST_INDEX]}\n[INST] $1 [/INST]"
|
||||
fi
|
||||
}
|
||||
|
||||
tokenize() {
|
||||
curl \
|
||||
--silent \
|
||||
--request POST \
|
||||
--url "${API_URL}/tokenize" \
|
||||
--header "Content-Type: application/json" \
|
||||
--data-raw "$(jq -ns --arg content "$1" '{content:$content}')" \
|
||||
| jq '.tokens[]'
|
||||
}
|
||||
|
||||
N_KEEP=$(tokenize "[INST] <<SYS>>\n${INSTRUCTION}\n<</SYS>>" | wc -l)
|
||||
|
||||
chat_completion() {
|
||||
PROMPT="$(trim_trailing "$(format_prompt "$1")")"
|
||||
DATA="$(echo -n "$PROMPT" | jq -Rs --argjson n_keep $N_KEEP '{
|
||||
prompt: .,
|
||||
temperature: 0.2,
|
||||
top_k: 40,
|
||||
top_p: 0.9,
|
||||
n_keep: $n_keep,
|
||||
n_predict: 1024,
|
||||
stop: ["[INST]"],
|
||||
stream: true
|
||||
}')"
|
||||
|
||||
# Create a temporary file to hold the Python output
|
||||
TEMPFILE=$(mktemp)
|
||||
|
||||
exec 3< <(curl \
|
||||
--silent \
|
||||
--no-buffer \
|
||||
--request POST \
|
||||
--url "${API_URL}/completion" \
|
||||
--header "Content-Type: application/json" \
|
||||
--data-raw "${DATA}")
|
||||
|
||||
python -c "
|
||||
import json
|
||||
import sys
|
||||
|
||||
answer = ''
|
||||
while True:
|
||||
line = sys.stdin.readline()
|
||||
if not line:
|
||||
break
|
||||
if line.startswith('data: '):
|
||||
json_content = line[6:].strip()
|
||||
content = json.loads(json_content)['content']
|
||||
sys.stdout.write(content)
|
||||
sys.stdout.flush()
|
||||
answer += content
|
||||
|
||||
answer = answer.rstrip('\n')
|
||||
|
||||
# Write the answer to the temporary file
|
||||
with open('$TEMPFILE', 'w') as f:
|
||||
f.write(answer)
|
||||
" <&3
|
||||
|
||||
exec 3<&-
|
||||
|
||||
# Read the answer from the temporary file
|
||||
ANSWER=$(cat $TEMPFILE)
|
||||
|
||||
# Clean up the temporary file
|
||||
rm $TEMPFILE
|
||||
|
||||
printf "\n"
|
||||
|
||||
CHAT+=("$1" "$(trim "$ANSWER")")
|
||||
}
|
||||
|
||||
while true; do
|
||||
echo -en "\033[0;32m" # Green color
|
||||
read -r -e -p "> " QUESTION
|
||||
echo -en "\033[0m" # Reset color
|
||||
chat_completion "${QUESTION}"
|
||||
done
|
||||
@@ -631,6 +631,9 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms,
|
||||
fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
|
||||
fprintf(stdout, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n");
|
||||
fprintf(stdout, " -lv, --low-vram don't allocate VRAM scratch buffer\n");
|
||||
fprintf(stdout, " -mmq, --mul-mat-q use experimental mul_mat_q CUDA kernels instead of cuBLAS. TEMP!!!\n" );
|
||||
fprintf(stdout, " Reduces VRAM usage by 700/970/1430 MiB for 7b/13b/33b but prompt processing speed\n" );
|
||||
fprintf(stdout, " is still suboptimal, especially q2_K, q3_K, q5_K, and q6_K.\n" );
|
||||
#endif
|
||||
fprintf(stdout, " -m FNAME, --model FNAME\n");
|
||||
fprintf(stdout, " model path (default: %s)\n", params.model.c_str());
|
||||
@@ -827,7 +830,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
||||
}
|
||||
}
|
||||
#else
|
||||
LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.", {});
|
||||
LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n", {});
|
||||
#endif // GGML_USE_CUBLAS
|
||||
}
|
||||
else if (arg == "--low-vram" || arg == "-lv")
|
||||
@@ -835,7 +838,15 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
params.low_vram = true;
|
||||
#else
|
||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n");
|
||||
LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n", {});
|
||||
#endif // GGML_USE_CUBLAS
|
||||
}
|
||||
else if (arg == "--mul-mat-q" || arg == "-mmq")
|
||||
{
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
params.mul_mat_q = true;
|
||||
#else
|
||||
LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. It is not possible to use mul_mat_q kernels.\n", {});
|
||||
#endif // GGML_USE_CUBLAS
|
||||
}
|
||||
else if (arg == "--main-gpu" || arg == "-mg")
|
||||
|
||||
541
ggml-alloc.c
Normal file
541
ggml-alloc.c
Normal file
@@ -0,0 +1,541 @@
|
||||
#include "ggml-alloc.h"
|
||||
#include "ggml.h"
|
||||
#include <assert.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#define UNUSED(x) (void)(x)
|
||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||
|
||||
//#define GGML_ALLOCATOR_DEBUG
|
||||
|
||||
//#define AT_PRINTF printf
|
||||
#define AT_PRINTF(...) ((void)0)
|
||||
|
||||
struct hash_node {
|
||||
struct ggml_tensor * t;
|
||||
int n_children;
|
||||
int n_views;
|
||||
};
|
||||
|
||||
static size_t hash(void * p) {
|
||||
return (size_t)p % GGML_GRAPH_HASHTABLE_SIZE;
|
||||
}
|
||||
|
||||
static struct hash_node * hash_get(struct hash_node hash_table[], struct ggml_tensor * t) {
|
||||
size_t h = hash(t);
|
||||
|
||||
// linear probing
|
||||
size_t i = h;
|
||||
while (hash_table[i].t != NULL) {
|
||||
if (hash_table[i].t == t) {
|
||||
return &hash_table[i];
|
||||
}
|
||||
i = (i + 1) % GGML_GRAPH_HASHTABLE_SIZE;
|
||||
if (i == h) {
|
||||
// hash table is full
|
||||
GGML_ASSERT(false);
|
||||
}
|
||||
}
|
||||
|
||||
hash_table[i].t = t;
|
||||
return &hash_table[i];
|
||||
}
|
||||
|
||||
// TODO: GGML_PAD ?
|
||||
static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
|
||||
assert(alignment && !(alignment & (alignment - 1))); // power of 2
|
||||
size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
|
||||
return offset + align;
|
||||
}
|
||||
|
||||
struct free_block {
|
||||
void * addr;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
#define MAX_FREE_BLOCKS 128
|
||||
|
||||
struct ggml_allocr {
|
||||
void * data;
|
||||
size_t size;
|
||||
size_t alignment;
|
||||
int n_free_blocks;
|
||||
struct free_block free_blocks[MAX_FREE_BLOCKS];
|
||||
struct hash_node hash_table[GGML_GRAPH_HASHTABLE_SIZE];
|
||||
size_t max_size;
|
||||
bool measure;
|
||||
|
||||
#ifdef GGML_ALLOCATOR_DEBUG
|
||||
struct ggml_tensor * allocated_tensors[1024];
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef GGML_ALLOCATOR_DEBUG
|
||||
static void add_allocated_tensor(struct ggml_allocator * alloc, struct ggml_tensor * tensor) {
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
if (alloc->allocated_tensors[i] == NULL) {
|
||||
alloc->allocated_tensors[i] = tensor;
|
||||
return;
|
||||
}
|
||||
}
|
||||
GGML_ASSERT(!"out of allocated_tensors");
|
||||
}
|
||||
static void remove_allocated_tensor(struct ggml_allocator * alloc, struct ggml_tensor * tensor) {
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
if (alloc->allocated_tensors[i] == tensor ||
|
||||
(alloc->allocated_tensors[i] != NULL && alloc->allocated_tensors[i]->data == tensor->data)) {
|
||||
alloc->allocated_tensors[i] = NULL;
|
||||
return;
|
||||
}
|
||||
}
|
||||
printf("tried to free tensor %s not found\n", tensor->name);
|
||||
GGML_ASSERT(!"tensor not found");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static size_t ggml_allocator_get_alloc_size(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
|
||||
return ggml_nbytes(tensor);
|
||||
|
||||
UNUSED(alloc);
|
||||
}
|
||||
|
||||
void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
|
||||
size_t size = ggml_allocator_get_alloc_size(alloc, tensor);
|
||||
size = aligned_offset(NULL, size, alloc->alignment);
|
||||
|
||||
AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
|
||||
|
||||
size_t max_avail = 0;
|
||||
|
||||
// find the best fitting free block
|
||||
int best_fit_block = -1;
|
||||
size_t best_fit_size = SIZE_MAX;
|
||||
for (int i = 0; i < alloc->n_free_blocks; i++) {
|
||||
struct free_block * block = &alloc->free_blocks[i];
|
||||
max_avail = MAX(max_avail, block->size);
|
||||
if (block->size >= size && block->size <= best_fit_size) {
|
||||
best_fit_block = i;
|
||||
best_fit_size = block->size;
|
||||
}
|
||||
}
|
||||
|
||||
AT_PRINTF("block %d\n", best_fit_block);
|
||||
|
||||
if (best_fit_block == -1) {
|
||||
fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n",
|
||||
__func__, size, max_avail);
|
||||
GGML_ASSERT(!"not enough space in the buffer");
|
||||
return;
|
||||
}
|
||||
struct free_block * block = &alloc->free_blocks[best_fit_block];
|
||||
void * addr = block->addr;
|
||||
block->addr = (char*)block->addr + size;
|
||||
block->size -= size;
|
||||
if (block->size == 0) {
|
||||
// remove block if empty
|
||||
alloc->n_free_blocks--;
|
||||
for (int j = best_fit_block; j < alloc->n_free_blocks; j++) {
|
||||
alloc->free_blocks[j] = alloc->free_blocks[j+1];
|
||||
}
|
||||
}
|
||||
|
||||
tensor->data = addr;
|
||||
|
||||
#ifdef GGML_ALLOCATOR_DEBUG
|
||||
add_allocated_tensor(alloc, tensor);
|
||||
size_t cur_max = (char*)addr - (char*)alloc->data + size;
|
||||
if (cur_max > alloc->max_size) {
|
||||
printf("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
if (alloc->allocated_tensors[i]) {
|
||||
printf("%s (%.2f MB) ", alloc->allocated_tensors[i]->name, ggml_nbytes(alloc->allocated_tensors[i]) / 1024.0 / 1024.0);
|
||||
}
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
alloc->max_size = MAX(alloc->max_size, (char*)addr - (char*)alloc->data + size);
|
||||
}
|
||||
|
||||
// this is a very naive implementation, but for our case the number of free blocks should be very small
|
||||
static void ggml_allocator_free_tensor(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
|
||||
void * ptr = tensor->data;
|
||||
|
||||
if (ptr < alloc->data || (char*)ptr >= (char*)alloc->data + alloc->max_size) {
|
||||
// the tensor was not allocated in this buffer
|
||||
// this can happen because the graph allocator will try to free weights and other tensors from different buffers
|
||||
// the easiest way to deal with this is just to ignore it
|
||||
return;
|
||||
}
|
||||
|
||||
size_t size = ggml_allocator_get_alloc_size(alloc, tensor);
|
||||
size = aligned_offset(NULL, size, alloc->alignment);
|
||||
AT_PRINTF("%s: freeing %s (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, size, alloc->n_free_blocks);
|
||||
|
||||
#ifdef GGML_ALLOCATOR_DEBUG
|
||||
remove_allocated_tensor(alloc, tensor);
|
||||
#endif
|
||||
|
||||
// see if we can merge with an existing block
|
||||
for (int i = 0; i < alloc->n_free_blocks; i++) {
|
||||
struct free_block * block = &alloc->free_blocks[i];
|
||||
// check if ptr is at the end of the block
|
||||
if ((char*)block->addr + block->size == ptr) {
|
||||
block->size += size;
|
||||
// check if we can merge with the next block
|
||||
if (i < alloc->n_free_blocks - 1 && (char*)block->addr + block->size == alloc->free_blocks[i+1].addr) {
|
||||
block->size += alloc->free_blocks[i+1].size;
|
||||
alloc->n_free_blocks--;
|
||||
for (int j = i+1; j < alloc->n_free_blocks; j++) {
|
||||
alloc->free_blocks[j] = alloc->free_blocks[j+1];
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
// check if ptr is at the beginning of the block
|
||||
if ((char*)ptr + size == block->addr) {
|
||||
block->addr = ptr;
|
||||
block->size += size;
|
||||
// check if we can merge with the previous block
|
||||
if (i > 0 && (char*)alloc->free_blocks[i-1].addr + alloc->free_blocks[i-1].size == block->addr) {
|
||||
alloc->free_blocks[i-1].size += block->size;
|
||||
alloc->n_free_blocks--;
|
||||
for (int j = i; j < alloc->n_free_blocks; j++) {
|
||||
alloc->free_blocks[j] = alloc->free_blocks[j+1];
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
// otherwise, add a new block
|
||||
GGML_ASSERT(alloc->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
|
||||
// insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster)
|
||||
int insert_pos = 0;
|
||||
while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].addr < ptr) {
|
||||
insert_pos++;
|
||||
}
|
||||
// shift all blocks from insert_pos onward to make room for the new block
|
||||
for (int i = alloc->n_free_blocks; i > insert_pos; i--) {
|
||||
alloc->free_blocks[i] = alloc->free_blocks[i-1];
|
||||
}
|
||||
// insert the new block
|
||||
alloc->free_blocks[insert_pos].addr = ptr;
|
||||
alloc->free_blocks[insert_pos].size = size;
|
||||
alloc->n_free_blocks++;
|
||||
}
|
||||
|
||||
void ggml_allocr_reset(struct ggml_allocr * alloc) {
|
||||
alloc->n_free_blocks = 1;
|
||||
size_t align_offset = aligned_offset(alloc->data, 0, alloc->alignment);
|
||||
alloc->free_blocks[0].addr = (char *)alloc->data + align_offset;
|
||||
alloc->free_blocks[0].size = alloc->size - align_offset;
|
||||
}
|
||||
|
||||
struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment) {
|
||||
struct ggml_allocr * alloc = (struct ggml_allocr *)malloc(sizeof(struct ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
|
||||
|
||||
*alloc = (struct ggml_allocr){
|
||||
/*.data = */ data,
|
||||
/*.size = */ size,
|
||||
/*.alignment = */ alignment,
|
||||
/*.n_free_blocks = */ 0,
|
||||
/*.free_blocks = */ {{0}},
|
||||
/*.hash_table = */ {{0}},
|
||||
/*.max_size = */ 0,
|
||||
/*.measure = */ false,
|
||||
#ifdef GGML_ALLOCATOR_DEBUG
|
||||
/*.allocated_tensors = */ = {0},
|
||||
#endif
|
||||
};
|
||||
|
||||
ggml_allocr_reset(alloc);
|
||||
|
||||
return alloc;
|
||||
}
|
||||
|
||||
// address and size of the buffer when measuring
|
||||
// it needs to be large enough to fit all the tensors, but it cannot overlap with other existing buffers
|
||||
static void * const MEASURE_BASE_ADDR = (void *) 0x1000;
|
||||
static const size_t MEASURE_MAX_SIZE = 1ULL<<40; // 1 TB
|
||||
|
||||
struct ggml_allocr * ggml_allocr_new_measure(size_t alignment) {
|
||||
struct ggml_allocr * alloc = (struct ggml_allocr *)malloc(sizeof(struct ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
|
||||
|
||||
*alloc = (struct ggml_allocr){
|
||||
/*.data = */ MEASURE_BASE_ADDR,
|
||||
/*.size = */ MEASURE_MAX_SIZE,
|
||||
/*.alignment = */ alignment,
|
||||
/*.n_free_blocks = */ 0,
|
||||
/*.free_blocks = */ {{0}},
|
||||
/*.hash_table = */ {{0}},
|
||||
/*.max_size = */ 0,
|
||||
/*.measure = */ true,
|
||||
#ifdef GGML_ALLOCATOR_DEBUG
|
||||
/*.allocated_tensors = */ = {0},
|
||||
#endif
|
||||
};
|
||||
|
||||
ggml_allocr_reset(alloc);
|
||||
|
||||
return alloc;
|
||||
}
|
||||
|
||||
void ggml_allocr_free(struct ggml_allocr * alloc) {
|
||||
free(alloc);
|
||||
}
|
||||
|
||||
bool ggml_allocr_is_measure(struct ggml_allocr * alloc) {
|
||||
return alloc->measure;
|
||||
}
|
||||
|
||||
//////////// compute graph allocator
|
||||
|
||||
static bool ggml_is_view(struct ggml_tensor * t) {
|
||||
return t->op == GGML_OP_RESHAPE || t->op == GGML_OP_VIEW || t->op == GGML_OP_TRANSPOSE ||
|
||||
t->op == GGML_OP_PERMUTE || t->op == GGML_OP_CPY;
|
||||
}
|
||||
|
||||
static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) {
|
||||
if (a->type != b->type) {
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < GGML_MAX_DIMS; i++) {
|
||||
if (a->ne[i] != b->ne[i]) {
|
||||
return false;
|
||||
}
|
||||
if (a->nb[i] != b->nb[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct ggml_tensor * get_view_parent(struct ggml_tensor * t) {
|
||||
switch (t->op) {
|
||||
case GGML_OP_PERMUTE:
|
||||
case GGML_OP_RESHAPE:
|
||||
case GGML_OP_TRANSPOSE:
|
||||
case GGML_OP_VIEW:
|
||||
return t->src[0];
|
||||
case GGML_OP_CPY:
|
||||
return t->src[1];
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static struct ggml_tensor * get_view_source(struct ggml_tensor * t) {
|
||||
struct ggml_tensor * parent = t;
|
||||
do {
|
||||
parent = get_view_parent(parent);
|
||||
} while (ggml_is_view(parent));
|
||||
return parent;
|
||||
}
|
||||
|
||||
static bool ggml_op_can_inplace(enum ggml_op op) {
|
||||
switch (op) {
|
||||
case GGML_OP_SCALE:
|
||||
case GGML_OP_DIAG_MASK_ZERO:
|
||||
case GGML_OP_DIAG_MASK_INF:
|
||||
case GGML_OP_ADD:
|
||||
case GGML_OP_ADD1:
|
||||
case GGML_OP_ACC:
|
||||
case GGML_OP_SUB:
|
||||
case GGML_OP_MUL:
|
||||
case GGML_OP_DIV:
|
||||
case GGML_OP_SQR:
|
||||
case GGML_OP_SQRT:
|
||||
case GGML_OP_LOG:
|
||||
case GGML_OP_UNARY:
|
||||
case GGML_OP_ROPE:
|
||||
case GGML_OP_RMS_NORM:
|
||||
case GGML_OP_SET:
|
||||
case GGML_OP_SOFT_MAX:
|
||||
case GGML_OP_CONT:
|
||||
return true;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void allocate_node(struct ggml_allocr * alloc, struct ggml_tensor * node) {
|
||||
struct hash_node * ht = alloc->hash_table;
|
||||
if (node->data == NULL) {
|
||||
if (ggml_is_view(node)) {
|
||||
size_t offset;
|
||||
switch(node->op) {
|
||||
case GGML_OP_VIEW:
|
||||
memcpy(&offset, node->op_params, sizeof(size_t));
|
||||
node->data = (char *) node->src[0]->data + offset;
|
||||
break;
|
||||
case GGML_OP_PERMUTE:
|
||||
case GGML_OP_RESHAPE:
|
||||
case GGML_OP_TRANSPOSE:
|
||||
node->data = node->src[0]->data;
|
||||
break;
|
||||
case GGML_OP_CPY:
|
||||
node->data = node->src[1]->data;
|
||||
break;
|
||||
default:
|
||||
GGML_ASSERT(!"unknown view op");
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// see if we can reuse a parent's buffer (inplace)
|
||||
if (ggml_op_can_inplace(node->op)) {
|
||||
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
||||
struct ggml_tensor * parent = node->src[i];
|
||||
if (parent == NULL) {
|
||||
break;
|
||||
}
|
||||
struct hash_node * p_hn = hash_get(ht, parent);
|
||||
if (parent->data != NULL && p_hn->n_children == 1 && p_hn->n_views == 0 && ggml_are_same_layout(node, parent)) {
|
||||
if (ggml_is_view(parent)) {
|
||||
struct ggml_tensor * view_src = get_view_source(parent);
|
||||
struct hash_node * view_src_hn = hash_get(ht, view_src);
|
||||
if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
|
||||
// TODO: the offset of the view parent must be kept to ensure that the op doesn't overwrite
|
||||
// the parent's data that it will need later (same layout requirement). the problem is that then
|
||||
// we cannot free the tensor because the original address of the allocation is lost.
|
||||
// adding a view_src pointer to the tensor would solve this and simplify the code dealing with views
|
||||
// for now, we only reuse the parent's data if the offset is zero (view_src->data == parent->data)
|
||||
AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
|
||||
node->data = parent->data;
|
||||
return;
|
||||
}
|
||||
}
|
||||
else {
|
||||
AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
|
||||
node->data = parent->data;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
ggml_allocr_alloc(alloc, node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static size_t ggml_allocator_alloc_graph_tensors_n(
|
||||
struct ggml_allocr * alloc,
|
||||
struct ggml_cgraph ** graphs, int n_graphs,
|
||||
struct ggml_tensor *** inputs, struct ggml_tensor *** outputs) {
|
||||
|
||||
// reset hash table
|
||||
struct hash_node * ht = alloc->hash_table;
|
||||
memset(ht, 0, sizeof(struct hash_node) * GGML_GRAPH_HASHTABLE_SIZE);
|
||||
|
||||
// count number of children and views
|
||||
for (int g = 0; g < n_graphs; g++) {
|
||||
struct ggml_cgraph * gf = graphs[g];
|
||||
for (int i = 0; i < gf->n_nodes; i++) {
|
||||
struct ggml_tensor * node = gf->nodes[i];
|
||||
|
||||
if (ggml_is_view(node)) {
|
||||
struct ggml_tensor * view_src = get_view_source(node);
|
||||
hash_get(ht, view_src)->n_views += 1;
|
||||
}
|
||||
|
||||
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
||||
struct ggml_tensor * parent = node->src[j];
|
||||
if (parent == NULL) {
|
||||
break;
|
||||
}
|
||||
hash_get(ht, parent)->n_children += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// allocate tensors
|
||||
for (int g = 0; g < n_graphs; g++) {
|
||||
struct ggml_cgraph * gf = graphs[g];
|
||||
AT_PRINTF("####### graph %d/%d\n", g, n_graphs);
|
||||
// graph inputs are allocated first to ensure that they are not overwritten by each other
|
||||
if (inputs != NULL && inputs[g] != NULL) {
|
||||
for (int i = 0; inputs[g][i] != NULL; i++) {
|
||||
struct ggml_tensor * input = inputs[g][i];
|
||||
AT_PRINTF("input: %s\n", input->name);
|
||||
allocate_node(alloc, input);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < gf->n_nodes; i++) {
|
||||
struct ggml_tensor * node = gf->nodes[i];
|
||||
|
||||
// allocate parents (leafs)
|
||||
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
||||
struct ggml_tensor * parent = node->src[j];
|
||||
if (parent == NULL) {
|
||||
break;
|
||||
}
|
||||
allocate_node(alloc, parent);
|
||||
}
|
||||
|
||||
// allocate node
|
||||
allocate_node(alloc, node);
|
||||
|
||||
AT_PRINTF("exec: %s (%s) <= ", ggml_op_name(node->op), node->name);
|
||||
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
||||
struct ggml_tensor * parent = node->src[j];
|
||||
if (parent == NULL) {
|
||||
break;
|
||||
}
|
||||
AT_PRINTF("%s", parent->name);
|
||||
if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
|
||||
AT_PRINTF(", ");
|
||||
}
|
||||
}
|
||||
AT_PRINTF("\n");
|
||||
|
||||
// update parents
|
||||
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
||||
struct ggml_tensor * parent = node->src[j];
|
||||
if (parent == NULL) {
|
||||
break;
|
||||
}
|
||||
struct hash_node * p_hn = hash_get(ht, parent);
|
||||
p_hn->n_children -= 1;
|
||||
|
||||
//AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views);
|
||||
|
||||
if (p_hn->n_children == 0 && p_hn->n_views == 0) {
|
||||
if (ggml_is_view(parent)) {
|
||||
struct ggml_tensor * view_src = get_view_source(parent);
|
||||
struct hash_node * view_src_hn = hash_get(ht, view_src);
|
||||
view_src_hn->n_views -= 1;
|
||||
AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src->n_children, view_src->n_views);
|
||||
if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src->data != node->data) {
|
||||
ggml_allocator_free_tensor(alloc, view_src);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (parent->data != node->data) {
|
||||
ggml_allocator_free_tensor(alloc, parent);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
AT_PRINTF("\n");
|
||||
}
|
||||
// free graph outputs here that wouldn't be freed otherwise because they have no children
|
||||
if (outputs != NULL && outputs[g] != NULL) {
|
||||
for (int i = 0; outputs[g][i] != NULL; i++) {
|
||||
struct ggml_tensor * output = outputs[g][i];
|
||||
AT_PRINTF("output: %s\n", output->name);
|
||||
ggml_allocator_free_tensor(alloc, output);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return alloc->max_size;
|
||||
}
|
||||
|
||||
size_t ggml_allocr_alloc_graph(struct ggml_allocr * alloc, struct ggml_cgraph * graph) {
|
||||
return ggml_allocator_alloc_graph_tensors_n(alloc, &graph, 1, NULL, NULL);
|
||||
}
|
||||
22
ggml-alloc.h
Normal file
22
ggml-alloc.h
Normal file
@@ -0,0 +1,22 @@
|
||||
#pragma once
|
||||
|
||||
#include "ggml.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
GGML_API struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment);
|
||||
GGML_API struct ggml_allocr * ggml_allocr_new_measure(size_t alignment);
|
||||
|
||||
GGML_API void ggml_allocr_free(struct ggml_allocr * alloc);
|
||||
GGML_API bool ggml_allocr_is_measure(struct ggml_allocr * alloc);
|
||||
GGML_API void ggml_allocr_reset(struct ggml_allocr * alloc);
|
||||
GGML_API void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor);
|
||||
GGML_API size_t ggml_allocr_alloc_graph(struct ggml_allocr * alloc, struct ggml_cgraph * graph);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
1994
ggml-cuda.cu
1994
ggml-cuda.cu
File diff suppressed because it is too large
Load Diff
@@ -27,6 +27,7 @@ void ggml_cuda_assign_buffers(struct ggml_tensor * tensor);
|
||||
void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor);
|
||||
void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor);
|
||||
void ggml_cuda_set_main_device(int main_device);
|
||||
void ggml_cuda_set_mul_mat_q(bool mul_mat_q);
|
||||
void ggml_cuda_set_scratch_size(size_t scratch_size);
|
||||
void ggml_cuda_free_scratch(void);
|
||||
bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor);
|
||||
|
||||
590
ggml.c
590
ggml.c
@@ -3698,6 +3698,7 @@ static const size_t GGML_TYPE_SIZE[GGML_TYPE_COUNT] = {
|
||||
};
|
||||
static_assert(GGML_TYPE_COUNT == 19, "GGML_TYPE_SIZE is outdated");
|
||||
|
||||
|
||||
static const char * GGML_TYPE_NAME[GGML_TYPE_COUNT] = {
|
||||
[GGML_TYPE_F32] = "f32",
|
||||
[GGML_TYPE_F16] = "f16",
|
||||
@@ -4556,10 +4557,12 @@ static struct ggml_object * ggml_new_object(struct ggml_context * ctx, enum ggml
|
||||
|
||||
static struct ggml_tensor * ggml_new_tensor_impl(
|
||||
struct ggml_context * ctx,
|
||||
enum ggml_type type,
|
||||
int n_dims,
|
||||
const int64_t* ne,
|
||||
void* data) {
|
||||
enum ggml_type type,
|
||||
int n_dims,
|
||||
const int64_t * ne,
|
||||
void * data) {
|
||||
|
||||
assert(n_dims >= 1 && n_dims <= GGML_MAX_DIMS);
|
||||
|
||||
size_t data_size = 0;
|
||||
|
||||
@@ -4647,22 +4650,22 @@ static void ggml_set_op_params_i32(struct ggml_tensor * tensor, uint32_t i, int3
|
||||
|
||||
struct ggml_tensor * ggml_new_tensor(
|
||||
struct ggml_context * ctx,
|
||||
enum ggml_type type,
|
||||
int n_dims,
|
||||
const int64_t * ne) {
|
||||
enum ggml_type type,
|
||||
int n_dims,
|
||||
const int64_t * ne) {
|
||||
return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL);
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_new_tensor_1d(
|
||||
struct ggml_context * ctx,
|
||||
enum ggml_type type,
|
||||
enum ggml_type type,
|
||||
int64_t ne0) {
|
||||
return ggml_new_tensor(ctx, type, 1, &ne0);
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_new_tensor_2d(
|
||||
struct ggml_context * ctx,
|
||||
enum ggml_type type,
|
||||
enum ggml_type type,
|
||||
int64_t ne0,
|
||||
int64_t ne1) {
|
||||
const int64_t ne[2] = { ne0, ne1 };
|
||||
@@ -4671,7 +4674,7 @@ struct ggml_tensor * ggml_new_tensor_2d(
|
||||
|
||||
struct ggml_tensor * ggml_new_tensor_3d(
|
||||
struct ggml_context * ctx,
|
||||
enum ggml_type type,
|
||||
enum ggml_type type,
|
||||
int64_t ne0,
|
||||
int64_t ne1,
|
||||
int64_t ne2) {
|
||||
@@ -4981,11 +4984,6 @@ enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor) {
|
||||
return (enum ggml_unary_op) ggml_get_op_params_i32(tensor, 0);
|
||||
}
|
||||
|
||||
static void ggml_set_unary_op(struct ggml_tensor * tensor, enum ggml_unary_op op) {
|
||||
GGML_ASSERT(tensor->op = GGML_OP_UNARY);
|
||||
ggml_set_op_params_i32(tensor, 0, (int32_t) op);
|
||||
}
|
||||
|
||||
const char * ggml_get_name(const struct ggml_tensor * tensor) {
|
||||
return tensor->name;
|
||||
}
|
||||
@@ -6242,6 +6240,27 @@ struct ggml_tensor * ggml_reshape_4d(
|
||||
|
||||
// ggml_view_1d
|
||||
|
||||
static struct ggml_tensor * ggml_view_tensor_offset(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int n_dims,
|
||||
const int64_t * ne,
|
||||
size_t offset) {
|
||||
// don't calculate an offset from an unallocated tensor
|
||||
void * data = NULL;
|
||||
if (a->data != NULL) {
|
||||
data = (char *) a->data + offset;
|
||||
}
|
||||
|
||||
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, n_dims, ne, data);
|
||||
|
||||
ggml_format_name(result, "%s (view)", a->name);
|
||||
|
||||
ggml_set_op_params(result, &offset, sizeof(offset));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_view_1d(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
@@ -6254,10 +6273,7 @@ struct ggml_tensor * ggml_view_1d(
|
||||
is_node = true;
|
||||
}
|
||||
|
||||
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, &ne0, (char *) a->data + offset);
|
||||
ggml_format_name(result, "%s (view)", a->name);
|
||||
|
||||
ggml_set_op_params(result, &offset, sizeof(offset));
|
||||
struct ggml_tensor * result = ggml_view_tensor_offset(ctx, a, 1, &ne0, offset);
|
||||
|
||||
result->op = GGML_OP_VIEW;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
@@ -6284,10 +6300,7 @@ struct ggml_tensor * ggml_view_2d(
|
||||
|
||||
const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, 1, 1 };
|
||||
|
||||
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, (char *) a->data + offset);
|
||||
ggml_format_name(result, "%s (view)", a->name);
|
||||
|
||||
ggml_set_op_params(result, &offset, sizeof(offset));
|
||||
struct ggml_tensor * result = ggml_view_tensor_offset(ctx, a, 2, ne, offset);
|
||||
|
||||
result->nb[1] = nb1;
|
||||
result->nb[2] = result->nb[1]*ne1;
|
||||
@@ -6320,10 +6333,7 @@ struct ggml_tensor * ggml_view_3d(
|
||||
|
||||
const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, ne2, 1 };
|
||||
|
||||
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, (char *) a->data + offset);
|
||||
ggml_format_name(result, "%s (view)", a->name);
|
||||
|
||||
ggml_set_op_params(result, &offset, sizeof(offset));
|
||||
struct ggml_tensor * result = ggml_view_tensor_offset(ctx, a, 3, ne, offset);
|
||||
|
||||
result->nb[1] = nb1;
|
||||
result->nb[2] = nb2;
|
||||
@@ -6358,10 +6368,7 @@ struct ggml_tensor * ggml_view_4d(
|
||||
|
||||
const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, ne2, ne3 };
|
||||
|
||||
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, (char *) a->data + offset);
|
||||
ggml_format_name(result, "%s (view)", a->name);
|
||||
|
||||
ggml_set_op_params(result, &offset, sizeof(offset));
|
||||
struct ggml_tensor * result = ggml_view_tensor_offset(ctx, a, 4, ne, offset);
|
||||
|
||||
result->nb[1] = nb1;
|
||||
result->nb[2] = nb2;
|
||||
@@ -6745,6 +6752,18 @@ struct ggml_tensor * ggml_rope_inplace(
|
||||
return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, 10000.0f, 1.0f, true);
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_rope_custom(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int n_past,
|
||||
int n_dims,
|
||||
int mode,
|
||||
int n_ctx,
|
||||
float freq_base,
|
||||
float freq_scale) {
|
||||
return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, freq_base, freq_scale, false);
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_rope_custom_inplace(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
@@ -7225,7 +7244,7 @@ static struct ggml_tensor * ggml_unary_impl(
|
||||
|
||||
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
||||
|
||||
ggml_set_unary_op(result, op);
|
||||
ggml_set_op_params_i32(result, 0, (int32_t) op);
|
||||
|
||||
result->op = GGML_OP_UNARY;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
@@ -18296,513 +18315,6 @@ size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, i
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
struct gguf_str {
|
||||
uint32_t n;
|
||||
char * data;
|
||||
};
|
||||
|
||||
static const size_t GGUF_TYPE_SIZE[GGUF_TYPE_COUNT] = {
|
||||
[GGUF_TYPE_UINT8] = sizeof(uint8_t),
|
||||
[GGUF_TYPE_INT8] = sizeof(int8_t),
|
||||
[GGUF_TYPE_UINT16] = sizeof(uint16_t),
|
||||
[GGUF_TYPE_INT16] = sizeof(int16_t),
|
||||
[GGUF_TYPE_UINT32] = sizeof(uint32_t),
|
||||
[GGUF_TYPE_INT32] = sizeof(int32_t),
|
||||
[GGUF_TYPE_FLOAT32] = sizeof(float),
|
||||
[GGUF_TYPE_BOOL] = sizeof(bool),
|
||||
[GGUF_TYPE_STRING] = sizeof(struct gguf_str),
|
||||
[GGUF_TYPE_ARRAY] = 0, // undefined
|
||||
};
|
||||
static_assert(GGUF_TYPE_COUNT == 10, "GGUF_TYPE_COUNT != 10");
|
||||
|
||||
union gguf_value {
|
||||
uint8_t uint8;
|
||||
int8_t int8;
|
||||
uint16_t uint16;
|
||||
int16_t int16;
|
||||
uint32_t uint32;
|
||||
int32_t int32;
|
||||
float float32;
|
||||
bool bool_;
|
||||
|
||||
struct gguf_str str;
|
||||
|
||||
struct {
|
||||
enum gguf_type type;
|
||||
|
||||
uint32_t n;
|
||||
void * data;
|
||||
} arr;
|
||||
};
|
||||
|
||||
struct gguf_kv {
|
||||
struct gguf_str key;
|
||||
|
||||
uint32_t n_bytes; // TODO: is this actually needed?
|
||||
|
||||
enum gguf_type type;
|
||||
union gguf_value value;
|
||||
};
|
||||
|
||||
struct gguf_header {
|
||||
uint32_t magic;
|
||||
uint32_t version;
|
||||
uint32_t n_tensors;
|
||||
uint32_t n_kv;
|
||||
|
||||
struct gguf_kv * kv;
|
||||
};
|
||||
|
||||
struct gguf_tensor_info {
|
||||
struct gguf_str name;
|
||||
|
||||
uint32_t n_dims;
|
||||
uint32_t ne[GGML_MAX_DIMS];
|
||||
uint32_t n_elms; // TODO: is this needed?
|
||||
|
||||
enum ggml_type type;
|
||||
|
||||
uint64_t offset; // offset from start of `data`, must be a multiple of `ALIGNMENT`
|
||||
};
|
||||
|
||||
struct gguf_context {
|
||||
struct gguf_header header;
|
||||
struct gguf_tensor_info * infos;
|
||||
|
||||
size_t alignment;
|
||||
size_t offset; // offset of `data` from beginning of file
|
||||
size_t size_data; // size of `data` in bytes
|
||||
|
||||
//uint8_t * padding;
|
||||
uint8_t * data;
|
||||
};
|
||||
|
||||
static bool gguf_fread_el(void * dst, size_t size, FILE * file, size_t * offset) {
|
||||
const size_t n = fread(dst, 1, size, file);
|
||||
*offset += n;
|
||||
return n == size;
|
||||
}
|
||||
|
||||
static bool gguf_fread_str(struct gguf_str * p, FILE * file, size_t * offset) {
|
||||
p->n = 0;
|
||||
p->data = NULL;
|
||||
|
||||
bool ok = true;
|
||||
|
||||
// TODO: how to avoid mallocs for strings?
|
||||
ok = ok && gguf_fread_el(&p->n, sizeof(p->n), file, offset); p->data = calloc(p->n + 1, 1);
|
||||
ok = ok && gguf_fread_el( p->data, p->n, file, offset);
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) {
|
||||
FILE * file = fopen(fname, "rb");
|
||||
if (!file) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// offset from start of file
|
||||
size_t offset = 0;
|
||||
|
||||
uint32_t magic = 0;
|
||||
|
||||
// check the magic before making allocations
|
||||
{
|
||||
gguf_fread_el(&magic, sizeof(magic), file, &offset);
|
||||
|
||||
if (magic != GGUF_MAGIC) {
|
||||
fprintf(stderr, "%s: invalid magic number %08x\n", __func__, magic);
|
||||
fclose(file);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
bool ok = true;
|
||||
|
||||
struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
|
||||
|
||||
// read the header
|
||||
{
|
||||
ctx->header.magic = magic;
|
||||
ctx->header.kv = NULL;
|
||||
|
||||
ctx->infos = NULL;
|
||||
ctx->data = NULL;
|
||||
|
||||
ok = ok && gguf_fread_el(&ctx->header.version, sizeof(ctx->header.version), file, &offset);
|
||||
ok = ok && gguf_fread_el(&ctx->header.n_tensors, sizeof(ctx->header.n_tensors), file, &offset);
|
||||
ok = ok && gguf_fread_el(&ctx->header.n_kv, sizeof(ctx->header.n_kv), file, &offset);
|
||||
|
||||
if (!ok) {
|
||||
fprintf(stderr, "%s: failed to read header\n", __func__);
|
||||
fclose(file);
|
||||
gguf_free(ctx);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// read the kv pairs
|
||||
{
|
||||
ctx->header.kv = GGML_ALIGNED_MALLOC(ctx->header.n_kv * sizeof(struct gguf_kv));
|
||||
|
||||
for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
|
||||
struct gguf_kv * kv = &ctx->header.kv[i];
|
||||
|
||||
//fprintf(stderr, "%s: reading kv %d\n", __func__, i);
|
||||
|
||||
ok = ok && gguf_fread_str(&kv->key, file, &offset);
|
||||
//ok = ok && gguf_fread_el (&kv->n_bytes, sizeof(kv->n_bytes), file, &offset);
|
||||
ok = ok && gguf_fread_el (&kv->type, sizeof(kv->type), file, &offset);
|
||||
|
||||
//fprintf(stderr, "%s: reading kv with key %s\n", __func__, kv->key.data);
|
||||
|
||||
switch (kv->type) {
|
||||
case GGUF_TYPE_UINT8: ok = ok && gguf_fread_el (&kv->value.uint8, sizeof(kv->value.uint8), file, &offset); break;
|
||||
case GGUF_TYPE_INT8: ok = ok && gguf_fread_el (&kv->value.int8, sizeof(kv->value.int8), file, &offset); break;
|
||||
case GGUF_TYPE_UINT16: ok = ok && gguf_fread_el (&kv->value.uint16, sizeof(kv->value.uint16), file, &offset); break;
|
||||
case GGUF_TYPE_INT16: ok = ok && gguf_fread_el (&kv->value.int16, sizeof(kv->value.int16), file, &offset); break;
|
||||
case GGUF_TYPE_UINT32: ok = ok && gguf_fread_el (&kv->value.uint32, sizeof(kv->value.uint32), file, &offset); break;
|
||||
case GGUF_TYPE_INT32: ok = ok && gguf_fread_el (&kv->value.int32, sizeof(kv->value.int32), file, &offset); break;
|
||||
case GGUF_TYPE_FLOAT32: ok = ok && gguf_fread_el (&kv->value.float32, sizeof(kv->value.float32), file, &offset); break;
|
||||
case GGUF_TYPE_BOOL: ok = ok && gguf_fread_el (&kv->value.bool_, sizeof(kv->value.bool_), file, &offset); break;
|
||||
case GGUF_TYPE_STRING: ok = ok && gguf_fread_str(&kv->value.str, file, &offset); break;
|
||||
case GGUF_TYPE_ARRAY:
|
||||
{
|
||||
ok = ok && gguf_fread_el(&kv->value.arr.type, sizeof(kv->value.arr.type), file, &offset);
|
||||
ok = ok && gguf_fread_el(&kv->value.arr.n, sizeof(kv->value.arr.n), file, &offset);
|
||||
|
||||
switch (kv->value.arr.type) {
|
||||
case GGUF_TYPE_UINT8:
|
||||
case GGUF_TYPE_INT8:
|
||||
case GGUF_TYPE_UINT16:
|
||||
case GGUF_TYPE_INT16:
|
||||
case GGUF_TYPE_UINT32:
|
||||
case GGUF_TYPE_INT32:
|
||||
case GGUF_TYPE_FLOAT32:
|
||||
case GGUF_TYPE_BOOL:
|
||||
{
|
||||
kv->value.arr.data = malloc(kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]);
|
||||
ok = ok && gguf_fread_el(kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type], file, &offset);
|
||||
} break;
|
||||
case GGUF_TYPE_STRING:
|
||||
{
|
||||
kv->value.arr.data = malloc(kv->value.arr.n * sizeof(struct gguf_str));
|
||||
for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
|
||||
ok = ok && gguf_fread_str(&((struct gguf_str *) kv->value.arr.data)[j], file, &offset);
|
||||
}
|
||||
} break;
|
||||
case GGUF_TYPE_ARRAY:
|
||||
case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
|
||||
};
|
||||
} break;
|
||||
case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
|
||||
};
|
||||
|
||||
if (!ok) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ok) {
|
||||
fprintf(stderr, "%s: failed to read key-value pairs\n", __func__);
|
||||
fclose(file);
|
||||
gguf_free(ctx);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// read the tensor infos
|
||||
{
|
||||
ctx->infos = GGML_ALIGNED_MALLOC(ctx->header.n_tensors * sizeof(struct gguf_tensor_info));
|
||||
|
||||
for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
|
||||
struct gguf_tensor_info * info = &ctx->infos[i];
|
||||
|
||||
for (int j = 0; j < GGML_MAX_DIMS; ++j) {
|
||||
info->ne[j] = 1;
|
||||
}
|
||||
|
||||
ok = ok && gguf_fread_str(&info->name, file, &offset);
|
||||
ok = ok && gguf_fread_el (&info->n_dims, sizeof(info->n_dims), file, &offset);
|
||||
for (uint32_t j = 0; j < info->n_dims; ++j) {
|
||||
ok = ok && gguf_fread_el(&info->ne[j], sizeof(info->ne[j]), file, &offset);
|
||||
}
|
||||
//ok = ok && gguf_fread_el (&info->n_elms, sizeof(info->n_elms), file, &offset);
|
||||
ok = ok && gguf_fread_el (&info->type, sizeof(info->type), file, &offset);
|
||||
ok = ok && gguf_fread_el (&info->offset, sizeof(info->offset), file, &offset);
|
||||
|
||||
if (!ok) {
|
||||
fprintf(stderr, "%s: failed to read tensor info\n", __func__);
|
||||
fclose(file);
|
||||
gguf_free(ctx);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
|
||||
|
||||
// TODO: determine new alignment from kv if available
|
||||
|
||||
// we require the data section to be aligned, so take into account any padding
|
||||
{
|
||||
const size_t offset_pad = offset % ctx->alignment;
|
||||
|
||||
if (offset_pad != 0) {
|
||||
offset += ctx->alignment - offset_pad;
|
||||
fseek(file, offset, SEEK_SET);
|
||||
}
|
||||
}
|
||||
|
||||
// store the current file offset - this is where the data section starts
|
||||
ctx->offset = offset;
|
||||
|
||||
// compute the total size of the data section, taking into account the alignment
|
||||
{
|
||||
|
||||
ctx->size_data = 0;
|
||||
for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
|
||||
struct gguf_tensor_info * info = &ctx->infos[i];
|
||||
|
||||
const int64_t ne =
|
||||
(int64_t) info->ne[0] *
|
||||
(int64_t) info->ne[1] *
|
||||
(int64_t) info->ne[2] *
|
||||
(int64_t) info->ne[3];
|
||||
|
||||
if (ne % ggml_blck_size(info->type) != 0) {
|
||||
fprintf(stderr, "%s: tensor '%s' number of elements (%" PRId64 ") is not a multiple of block size (%d)\n",
|
||||
__func__, info->name.data, ne, ggml_blck_size(info->type));
|
||||
fclose(file);
|
||||
gguf_free(ctx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const size_t size_cur = (ne*ggml_type_size(info->type))/ggml_blck_size(info->type);
|
||||
|
||||
ctx->size_data += GGML_PAD(size_cur, ctx->alignment);
|
||||
}
|
||||
}
|
||||
|
||||
// load the tensor data only if requested
|
||||
if (params.ctx != NULL) {
|
||||
// if the provided gguf_context is no_alloc, then we create "empty" tensors and do not read the binary blob
|
||||
// otherwise, we load the binary blob into the created ggml_context as well, and point the "data" members of
|
||||
// the ggml_tensor structs to the appropriate locations in the binary blob
|
||||
|
||||
// compute the exact size needed for the new ggml_context
|
||||
const size_t mem_size =
|
||||
params.no_alloc ?
|
||||
(ctx->header.n_tensors )*ggml_tensor_overhead() :
|
||||
(ctx->header.n_tensors + 1)*ggml_tensor_overhead() + ctx->size_data;
|
||||
|
||||
struct ggml_init_params pdata = {
|
||||
.mem_size = mem_size,
|
||||
.mem_buffer = NULL,
|
||||
.no_alloc = params.no_alloc,
|
||||
};
|
||||
|
||||
*params.ctx = ggml_init(pdata);
|
||||
|
||||
struct ggml_context * ctx_data = *params.ctx;
|
||||
|
||||
struct ggml_tensor * data = NULL;
|
||||
|
||||
if (params.no_alloc == false) {
|
||||
data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size_data);
|
||||
|
||||
ok = ok && data != NULL;
|
||||
|
||||
// read the binary blob with the tensor data
|
||||
ok = ok && gguf_fread_el(data->data, ctx->size_data, file, &offset);
|
||||
|
||||
if (!ok) {
|
||||
fprintf(stderr, "%s: failed to read tensor data\n", __func__);
|
||||
fclose(file);
|
||||
ggml_free(ctx_data);
|
||||
gguf_free(ctx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ctx->data = data->data;
|
||||
}
|
||||
|
||||
ggml_set_no_alloc(ctx_data, true);
|
||||
|
||||
// create the tensors
|
||||
for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
|
||||
const int64_t ne[GGML_MAX_DIMS] = {
|
||||
ctx->infos[i].ne[0],
|
||||
ctx->infos[i].ne[1],
|
||||
ctx->infos[i].ne[2],
|
||||
ctx->infos[i].ne[3],
|
||||
};
|
||||
|
||||
struct ggml_tensor * cur = ggml_new_tensor(ctx_data, ctx->infos[i].type, ctx->infos[i].n_dims, ne);
|
||||
|
||||
ok = ok && cur != NULL;
|
||||
|
||||
ggml_set_name(cur, ctx->infos[i].name.data);
|
||||
|
||||
if (!ok) {
|
||||
break;
|
||||
}
|
||||
|
||||
// point the data member to the appropriate location in the binary blob using the tensor infos
|
||||
if (params.no_alloc == false) {
|
||||
//cur->data = (char *) data->data + ctx->infos[i].offset - ctx->offset; // offset from start of file
|
||||
cur->data = (char *) data->data + ctx->infos[i].offset; // offset from data
|
||||
}
|
||||
}
|
||||
|
||||
if (!ok) {
|
||||
fprintf(stderr, "%s: failed to read the tensor data\n", __func__);
|
||||
fclose(file);
|
||||
ggml_free(ctx_data);
|
||||
gguf_free(ctx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ggml_set_no_alloc(ctx_data, params.no_alloc);
|
||||
}
|
||||
|
||||
fclose(file);
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
void gguf_free(struct gguf_context * ctx) {
|
||||
if (ctx == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (ctx->header.kv) {
|
||||
// free string memory - not great..
|
||||
for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
|
||||
struct gguf_kv * kv = &ctx->header.kv[i];
|
||||
|
||||
if (kv->key.data) {
|
||||
free(kv->key.data);
|
||||
}
|
||||
|
||||
if (kv->type == GGUF_TYPE_STRING) {
|
||||
if (kv->value.str.data) {
|
||||
free(kv->value.str.data);
|
||||
}
|
||||
}
|
||||
|
||||
if (kv->type == GGUF_TYPE_ARRAY) {
|
||||
if (kv->value.arr.data) {
|
||||
if (kv->value.arr.type == GGUF_TYPE_STRING) {
|
||||
for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
|
||||
struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[j];
|
||||
if (str->data) {
|
||||
free(str->data);
|
||||
}
|
||||
}
|
||||
}
|
||||
free(kv->value.arr.data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GGML_ALIGNED_FREE(ctx->header.kv);
|
||||
}
|
||||
|
||||
if (ctx->infos) {
|
||||
for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
|
||||
struct gguf_tensor_info * info = &ctx->infos[i];
|
||||
|
||||
if (info->name.data) {
|
||||
free(info->name.data);
|
||||
}
|
||||
}
|
||||
|
||||
GGML_ALIGNED_FREE(ctx->infos);
|
||||
}
|
||||
|
||||
GGML_ALIGNED_FREE(ctx);
|
||||
}
|
||||
|
||||
int gguf_get_version(struct gguf_context * ctx) {
|
||||
return ctx->header.version;
|
||||
}
|
||||
|
||||
size_t gguf_get_alignment(struct gguf_context * ctx) {
|
||||
return ctx->alignment;
|
||||
}
|
||||
|
||||
size_t gguf_get_data_offset(struct gguf_context * ctx) {
|
||||
return ctx->offset;
|
||||
}
|
||||
|
||||
void * gguf_get_data(struct gguf_context * ctx) {
|
||||
return ctx->data;
|
||||
}
|
||||
|
||||
int gguf_get_n_kv(struct gguf_context * ctx) {
|
||||
return ctx->header.n_kv;
|
||||
}
|
||||
|
||||
const char * gguf_get_key(struct gguf_context * ctx, int i) {
|
||||
return ctx->header.kv[i].key.data;
|
||||
}
|
||||
|
||||
enum gguf_type gguf_get_type(struct gguf_context * ctx, int i) {
|
||||
return ctx->header.kv[i].type;
|
||||
}
|
||||
|
||||
uint8_t gguf_get_val_u8(struct gguf_context * ctx, int i) {
|
||||
return ctx->header.kv[i].value.uint8;
|
||||
}
|
||||
|
||||
int8_t gguf_get_val_i8(struct gguf_context * ctx, int i) {
|
||||
return ctx->header.kv[i].value.int8;
|
||||
}
|
||||
|
||||
uint16_t gguf_get_val_u16(struct gguf_context * ctx, int i) {
|
||||
return ctx->header.kv[i].value.uint16;
|
||||
}
|
||||
|
||||
int16_t gguf_get_val_i16(struct gguf_context * ctx, int i) {
|
||||
return ctx->header.kv[i].value.int16;
|
||||
}
|
||||
|
||||
uint32_t gguf_get_val_u32(struct gguf_context * ctx, int i) {
|
||||
return ctx->header.kv[i].value.uint32;
|
||||
}
|
||||
|
||||
int32_t gguf_get_val_i32(struct gguf_context * ctx, int i) {
|
||||
return ctx->header.kv[i].value.int32;
|
||||
}
|
||||
|
||||
float gguf_get_val_f32(struct gguf_context * ctx, int i) {
|
||||
return ctx->header.kv[i].value.float32;
|
||||
}
|
||||
|
||||
bool gguf_get_val_bool(struct gguf_context * ctx, int i) {
|
||||
return ctx->header.kv[i].value.bool_;
|
||||
}
|
||||
|
||||
const char * gguf_get_val_str (struct gguf_context * ctx, int i) {
|
||||
return ctx->header.kv[i].value.str.data;
|
||||
}
|
||||
|
||||
int gguf_get_n_tensors(struct gguf_context * ctx) {
|
||||
return ctx->header.n_tensors;
|
||||
}
|
||||
|
||||
size_t gguf_get_tensor_offset(struct gguf_context * ctx, int i) {
|
||||
return ctx->infos[i].offset;
|
||||
}
|
||||
|
||||
char * gguf_get_tensor_name(struct gguf_context * ctx, int i) {
|
||||
return ctx->infos[i].name.data;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int ggml_cpu_has_avx(void) {
|
||||
#if defined(__AVX__)
|
||||
return 1;
|
||||
|
||||
76
ggml.h
76
ggml.h
@@ -202,14 +202,10 @@
|
||||
#define GGML_MAX_OP_PARAMS 32
|
||||
#define GGML_DEFAULT_N_THREADS 4
|
||||
|
||||
|
||||
#define GGML_EXIT_SUCCESS 0
|
||||
#define GGML_EXIT_ABORTED 1
|
||||
|
||||
#define GGUF_MAGIC 0x47475546 // "GGUF"
|
||||
#define GGUF_VERSION 1
|
||||
|
||||
#define GGUF_DEFAULT_ALIGNMENT 32
|
||||
|
||||
#define GGML_UNUSED(x) (void)(x)
|
||||
|
||||
#define GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1))
|
||||
@@ -1174,7 +1170,18 @@ extern "C" {
|
||||
int mode,
|
||||
int n_ctx);
|
||||
|
||||
// custom RoPE, in-place, returns view(a)
|
||||
// custom RoPE
|
||||
GGML_API struct ggml_tensor * ggml_rope_custom(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int n_past,
|
||||
int n_dims,
|
||||
int mode,
|
||||
int n_ctx,
|
||||
float freq_base,
|
||||
float freq_scale);
|
||||
|
||||
// in-place, returns view(a)
|
||||
GGML_API struct ggml_tensor * ggml_rope_custom_inplace(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
@@ -1615,63 +1622,6 @@ extern "C" {
|
||||
|
||||
GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist);
|
||||
|
||||
//
|
||||
// gguf
|
||||
//
|
||||
|
||||
// TODO: can be removed if the API is extended for writing
|
||||
enum gguf_type {
|
||||
GGUF_TYPE_UINT8 = 0,
|
||||
GGUF_TYPE_INT8 = 1,
|
||||
GGUF_TYPE_UINT16 = 2,
|
||||
GGUF_TYPE_INT16 = 3,
|
||||
GGUF_TYPE_UINT32 = 4,
|
||||
GGUF_TYPE_INT32 = 5,
|
||||
GGUF_TYPE_FLOAT32 = 6,
|
||||
GGUF_TYPE_BOOL = 7,
|
||||
GGUF_TYPE_STRING = 8,
|
||||
GGUF_TYPE_ARRAY = 9,
|
||||
GGUF_TYPE_COUNT, // marks the end of the enum
|
||||
};
|
||||
|
||||
struct gguf_context;
|
||||
|
||||
struct gguf_init_params {
|
||||
bool no_alloc;
|
||||
|
||||
// if not NULL, create a ggml_context and allocate the tensor data in it
|
||||
struct ggml_context ** ctx;
|
||||
};
|
||||
|
||||
GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params);
|
||||
//GGML_API struct gguf_context * gguf_init_from_buffer(..);
|
||||
GGML_API void gguf_free(struct gguf_context * ctx);
|
||||
|
||||
GGML_API int gguf_get_version (struct gguf_context * ctx);
|
||||
GGML_API size_t gguf_get_alignment (struct gguf_context * ctx);
|
||||
GGML_API size_t gguf_get_data_offset(struct gguf_context * ctx);
|
||||
GGML_API void * gguf_get_data (struct gguf_context * ctx);
|
||||
|
||||
GGML_API int gguf_get_n_kv(struct gguf_context * ctx);
|
||||
GGML_API const char * gguf_get_key (struct gguf_context * ctx, int i);
|
||||
GGML_API void gguf_get_val (struct gguf_context * ctx, int i, void * val);
|
||||
|
||||
GGML_API uint8_t gguf_get_val_u8 (struct gguf_context * ctx, int i);
|
||||
GGML_API int8_t gguf_get_val_i8 (struct gguf_context * ctx, int i);
|
||||
GGML_API uint16_t gguf_get_val_u16 (struct gguf_context * ctx, int i);
|
||||
GGML_API int16_t gguf_get_val_i16 (struct gguf_context * ctx, int i);
|
||||
GGML_API uint32_t gguf_get_val_u32 (struct gguf_context * ctx, int i);
|
||||
GGML_API int32_t gguf_get_val_i32 (struct gguf_context * ctx, int i);
|
||||
GGML_API float gguf_get_val_f32 (struct gguf_context * ctx, int i);
|
||||
GGML_API bool gguf_get_val_bool(struct gguf_context * ctx, int i);
|
||||
GGML_API const char * gguf_get_val_str (struct gguf_context * ctx, int i);
|
||||
GGML_API int gguf_get_arr_n (struct gguf_context * ctx, int i);
|
||||
GGML_API void gguf_get_arr_data(struct gguf_context * ctx, int i, void * data);
|
||||
|
||||
GGML_API int gguf_get_n_tensors (struct gguf_context * ctx);
|
||||
GGML_API size_t gguf_get_tensor_offset(struct gguf_context * ctx, int i);
|
||||
GGML_API char * gguf_get_tensor_name (struct gguf_context * ctx, int i);
|
||||
|
||||
//
|
||||
// system info
|
||||
//
|
||||
|
||||
283
gguf.py
283
gguf.py
@@ -1,283 +0,0 @@
|
||||
"""TODOs
|
||||
1. Implement writers for known architectures, LLaMA in particular.
|
||||
2. Add docstrings from the format specs.
|
||||
3. After development is done, Convert it to a proper pip-installable Python package, and possibly move it to its own repo under ggml-org.
|
||||
"""
|
||||
|
||||
import struct
|
||||
import constants
|
||||
from enum import IntEnum
|
||||
from typing import Any, IO, List
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
class GGMLQuantizationType(IntEnum):
|
||||
F32 = 0
|
||||
F16 = 1
|
||||
QR_0 = 2
|
||||
Q4_1 = 3
|
||||
# Q4_2 = 4 # support has been removed
|
||||
# Q4_3 = 5 # support has been removed
|
||||
Q5_0 = 6
|
||||
Q5_1 = 7
|
||||
Q8_0 = 8
|
||||
Q8_1 = 9
|
||||
Q2_K = 10
|
||||
Q3_K = 11
|
||||
Q4_K = 12
|
||||
Q5_K = 13
|
||||
Q6_K = 14
|
||||
Q8_K = 15
|
||||
|
||||
|
||||
class GGUFValueType(IntEnum):
|
||||
UINT8 = 0
|
||||
INT8 = 1
|
||||
UINT16 = 2
|
||||
INT16 = 3
|
||||
UINT32 = 4
|
||||
INT32 = 5
|
||||
FLOAT32 = 6
|
||||
BOOL = 7
|
||||
STRING = 8
|
||||
ARRAY = 9
|
||||
|
||||
@staticmethod
|
||||
def get_type(val):
|
||||
if isinstance(val, str):
|
||||
return GGUFValueType.STRING
|
||||
elif isinstance(val, list):
|
||||
return GGUFValueType.ARRAY
|
||||
elif isinstance(val, float):
|
||||
return GGUFValueType.FLOAT32
|
||||
elif isinstance(val, bool):
|
||||
return GGUFValueType.BOOL
|
||||
else:
|
||||
return GGUFValueType.INT32
|
||||
|
||||
|
||||
class GGUFWriter:
|
||||
def __init__(self, fout: IO):
|
||||
self.fout = fout
|
||||
self.offset_tensor = 0
|
||||
self.tensors: List[np.ndarray] = []
|
||||
|
||||
def write_header(self, tensor_count: int, metadata_kv_count: int):
|
||||
self.fout.write(struct.pack("<I", constants.GGUF_MAGIC))
|
||||
self.fout.write(struct.pack("<I", constants.GGUF_VERSION))
|
||||
self.fout.write(struct.pack("<I", tensor_count))
|
||||
self.fout.write(struct.pack("<I", metadata_kv_count))
|
||||
|
||||
@classmethod
|
||||
def open(cls, path: str) -> "GGUFWriter":
|
||||
f = open(path, "wb")
|
||||
return cls(f)
|
||||
|
||||
def write_key(self, key: str):
|
||||
self.write_val(key, GGUFValueType.STRING)
|
||||
|
||||
def write_uint8(self, key: str, val: int):
|
||||
self.write_key(key)
|
||||
self.write_val(val, GGUFValueType.UINT8)
|
||||
|
||||
def write_int8(self, key: str, val: int):
|
||||
self.write_key(key)
|
||||
self.write_val(val, GGUFValueType.INT8)
|
||||
|
||||
def write_uint16(self, key: str, val: int):
|
||||
self.write_key(key)
|
||||
self.write_val(val, GGUFValueType.UINT16)
|
||||
|
||||
def write_int16(self, key: str, val: int):
|
||||
self.write_key(key)
|
||||
self.write_val(val, GGUFValueType.INT16)
|
||||
|
||||
def write_uint32(self, key: str, val: int):
|
||||
self.write_key(key)
|
||||
self.write_val(val, GGUFValueType.UINT32)
|
||||
|
||||
def write_int32(self, key: str, val: int):
|
||||
self.write_key(key)
|
||||
self.write_val(val, GGUFValueType.INT32)
|
||||
|
||||
def write_float32(self, key: str, val: float):
|
||||
self.write_key(key)
|
||||
self.write_val(val, GGUFValueType.FLOAT32)
|
||||
|
||||
def write_bool(self, key: str, val: bool):
|
||||
self.write_key(key)
|
||||
self.write_val(val, GGUFValueType.BOOL)
|
||||
|
||||
def write_string(self, key: str, val: str):
|
||||
self.write_key(key)
|
||||
self.write_val(val, GGUFValueType.STRING)
|
||||
|
||||
def write_array(self, key: str, val: list):
|
||||
if not isinstance(val, list):
|
||||
raise ValueError("Value must be a list for array type")
|
||||
|
||||
self.write_key(key)
|
||||
self.write_val(val, GGUFValueType.ARRAY)
|
||||
|
||||
def write_val(self: str, val: Any, vtype: GGUFValueType = None):
|
||||
if vtype is None:
|
||||
vtype = GGUFValueType.get_type(val)
|
||||
|
||||
self.fout.write(struct.pack("<I", vtype))
|
||||
|
||||
if vtype == GGUFValueType.UINT8:
|
||||
self.fout.write(struct.pack("<B", val))
|
||||
elif vtype == GGUFValueType.INT8:
|
||||
self.fout.write(struct.pack("<b", val))
|
||||
elif vtype == GGUFValueType.UINT16:
|
||||
self.fout.write(struct.pack("<H", val))
|
||||
elif vtype == GGUFValueType.INT16:
|
||||
self.fout.write(struct.pack("<h", val))
|
||||
elif vtype == GGUFValueType.UINT32:
|
||||
self.fout.write(struct.pack("<I", val))
|
||||
elif vtype == GGUFValueType.INT32:
|
||||
self.fout.write(struct.pack("<i", val))
|
||||
elif vtype == GGUFValueType.FLOAT32:
|
||||
self.fout.write(struct.pack("<f", val))
|
||||
elif vtype == GGUFValueType.BOOL:
|
||||
self.fout.write(struct.pack("?", val))
|
||||
elif vtype == GGUFValueType.STRING:
|
||||
encoded_val = val.encode("utf8")
|
||||
self.fout.write(struct.pack("<I", len(encoded_val)))
|
||||
self.fout.write(encoded_val)
|
||||
elif vtype == GGUFValueType.ARRAY:
|
||||
self.fout.write(struct.pack("<I", len(val)))
|
||||
for item in val:
|
||||
self.write_val(item)
|
||||
else:
|
||||
raise ValueError("Invalid GGUF metadata value type")
|
||||
|
||||
@staticmethod
|
||||
def ggml_pad(x: int, n: int) -> int:
|
||||
return ((x + n - 1) // n) * n
|
||||
|
||||
def write_tensor_info(self, name: str, tensor: np.ndarray):
|
||||
self.write_val(name, GGUFValueType.STRING)
|
||||
n_dims = len(tensor.shape)
|
||||
self.write_val(n_dims, GGUFValueType.INT32)
|
||||
for i in range(n_dims):
|
||||
self.write_val(tensor.shape[n_dims - 1 - i], GGUFValueType.INT32)
|
||||
|
||||
assert tensor.dtype in (np.float32, np.float16), "Only F32 and F16 tensors are supported for now"
|
||||
dtype = GGMLQuantizationType.F32 if tensor.dtype == np.float32 else GGMLQuantizationType.F16
|
||||
self.write_val(dtype, GGUFValueType.INT32)
|
||||
self.fout.write(struct.pack("<Q", self.offset_tensor))
|
||||
self.offset_tensor += GGUFWriter.ggml_pad(tensor.nbytes, constants.GGUF_DEFAULT_ALIGNMENT)
|
||||
|
||||
offset_data = GGUFWriter.ggml_pad(self.fout.tell(), constants.GGUF_DEFAULT_ALIGNMENT)
|
||||
pad = offset_data - self.fout.tell()
|
||||
self.fout.write(bytes([0] * pad))
|
||||
|
||||
self.tensors.append(tensor)
|
||||
|
||||
def write_tensors(self):
|
||||
for tensor in self.tensors:
|
||||
tensor.tofile(self.fout)
|
||||
pad = GGUFWriter.ggml_pad(tensor.nbytes, constants.GGUF_DEFAULT_ALIGNMENT) - tensor.nbytes
|
||||
self.fout.write(bytes([0] * pad))
|
||||
|
||||
def flush(self):
|
||||
self.fout.flush()
|
||||
|
||||
def close(self):
|
||||
self.fout.close()
|
||||
|
||||
def write_architecture(self, architecture: str):
|
||||
self.write_string(constants.KEY_GENERAL_ARCHITECTURE,
|
||||
architecture)
|
||||
|
||||
def write_author(self, author: str):
|
||||
self.write_string(constants.KEY_GENERAL_AUTHOR, author)
|
||||
|
||||
def write_url(self, url: str):
|
||||
self.write_string(constants.KEY_GENERAL_URL, url)
|
||||
|
||||
def write_description(self, description: str):
|
||||
self.write_string(constants.KEY_GENERAL_DESCRIPTION, description)
|
||||
|
||||
def write_file_type(self, file_type: str):
|
||||
self.write_string(constants.KEY_GENERAL_FILE_TYPE, file_type)
|
||||
|
||||
def write_source_url(self, url: str):
|
||||
self.write_string(constants.KEY_GENERAL_SOURCE_URL, url)
|
||||
|
||||
def write_source_hf_repo(self, repo: str):
|
||||
self.write_string(constants.KEY_GENERAL_SOURCE_HF_REPO, repo)
|
||||
|
||||
def write_name(self, name: str):
|
||||
self.write_string(constants.KEY_GENERAL_NAME, name)
|
||||
|
||||
def write_quantization_version(self, quantization_version: GGMLQuantizationType):
|
||||
self.write_uint32(
|
||||
constants.KEY_GENERAL_QUANTIZATION_VERSION, quantization_version)
|
||||
|
||||
def write_context_length(self, llm: str, length: int):
|
||||
self.write_uint32(
|
||||
constants.KEY_LLM_CONTEXT_LENGTH.format(llm=llm), length)
|
||||
|
||||
def write_embedding_length(self, llm: str, length: int):
|
||||
self.write_uint32(
|
||||
constants.KEY_LLM_EMBEDDING_LENGTH.format(llm=llm), length)
|
||||
|
||||
def write_layer_count(self, llm: str, length: int):
|
||||
self.write_uint32(
|
||||
constants.KEY_LLM_LAYER_COUNT.format(llm=llm), length)
|
||||
|
||||
def write_feed_forward_length(self, llm: str, length: int):
|
||||
self.write_uint32(
|
||||
constants.KEY_LLM_FEED_FORWARD_LENGTH.format(llm=llm), length)
|
||||
|
||||
def write_parallel_residual(self, llm: str, use: bool):
|
||||
self.write_bool(
|
||||
constants.KEY_LLM_USE_PARALLEL_RESIDUAL.format(llm=llm), use)
|
||||
|
||||
def write_tensor_data_layout(self, llm: str, layout: str):
|
||||
self.write_string(
|
||||
constants.KEY_LLM_TENSOR_DATA_LAYOUT.format(llm=llm), layout)
|
||||
|
||||
def write_head_count(self, llm: str, count: int):
|
||||
self.write_uint32(
|
||||
constants.KEY_ATTENTION_HEAD_COUNT.format(llm=llm), count)
|
||||
|
||||
def write_head_count_kv(self, llm: str, count: int):
|
||||
self.write_uint32(
|
||||
constants.KEY_ATTENTION_HEAD_COUNT_KV.format(llm=llm), count)
|
||||
|
||||
def write_max_alibi_bias(self, llm: str, bias: float):
|
||||
self.write_float32(
|
||||
constants.KEY_ATTENTION_MAX_ALIBI_BIAS.format(llm=llm), bias)
|
||||
|
||||
def write_clamp_kqv(self, llm: str, value: float):
|
||||
self.write_float32(
|
||||
constants.KEY_ATTENTION_CLAMP_KQV.format(llm=llm), value)
|
||||
|
||||
def write_rope_dimension_count(self, llm: str, count: int):
|
||||
self.write_uint32(
|
||||
constants.KEY_ROPE_DIMENSION_COUNT.format(llm=llm), count)
|
||||
|
||||
def write_rope_scale(self, llm: str, value: float):
|
||||
self.write_float32(constants.KEY_ROPE_SCALE.format(llm=llm), value)
|
||||
|
||||
|
||||
# Example usage:
|
||||
if __name__ == "__main__":
|
||||
# Example usage with a file
|
||||
gguf_writer = GGUFWriter.open("example.gguf")
|
||||
gguf_writer.write_header(2, 3)
|
||||
|
||||
gguf_writer.write_architecture("llama")
|
||||
gguf_writer.write_uint32("answer", 42) # Write a 32-bit integer
|
||||
gguf_writer.write_float32("answer_in_float", 42.0) # Write a 32-bit float
|
||||
tensor1 = np.random.random(size=(7, 10)).astype(np.float32)
|
||||
tensor2 = np.random.random(size=(16, 12)).astype(np.float16)
|
||||
gguf_writer.write_tensor_info("tensor1", tensor1)
|
||||
gguf_writer.write_tensor_info("tensor2", tensor2)
|
||||
gguf_writer.write_tensors()
|
||||
|
||||
gguf_writer.close()
|
||||
62
k_quants.c
62
k_quants.c
@@ -39,6 +39,8 @@
|
||||
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||
|
||||
#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
|
||||
|
||||
//
|
||||
// 2-6 bit quantization in super-blocks
|
||||
//
|
||||
@@ -1353,7 +1355,7 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
const __m256i all_scales = _mm256_cvtepi8_epi16(scales8);
|
||||
const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
|
||||
const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
|
||||
const __m256i scales[2] = {_mm256_set_m128i(l_scales, l_scales), _mm256_set_m128i(h_scales, h_scales)};
|
||||
const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
|
||||
|
||||
__m256i sumi = _mm256_setzero_si256();
|
||||
|
||||
@@ -1421,7 +1423,7 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8]));
|
||||
|
||||
// sumf += -dmin * summs in 32bits*8
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(_mm256_set_m128i(summs_1, summs_0))), acc);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc);
|
||||
|
||||
const __m128i scales_0 = _mm_cvtepi8_epi16(scales16);
|
||||
const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16));
|
||||
@@ -1493,7 +1495,7 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
}
|
||||
|
||||
// sumf += dall * isum - dmin * summs in 32bits
|
||||
__m256i sumi = _mm256_set_m128i(sumi_1, sumi_0);
|
||||
__m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc);
|
||||
}
|
||||
|
||||
@@ -1644,8 +1646,8 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
summs += dmin * smin;
|
||||
|
||||
const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
|
||||
const __m256i q2_0 = _mm256_and_si256(_mm256_set_m128i(_mm_srli_epi16(q2bits, 2), q2bits), m3);
|
||||
const __m256i q2_1 = _mm256_and_si256(_mm256_set_m128i(_mm_srli_epi16(q2bits, 6), _mm_srli_epi16(q2bits, 4)), m3);
|
||||
const __m256i q2_0 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 2), q2bits), m3);
|
||||
const __m256i q2_1 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 6), _mm_srli_epi16(q2bits, 4)), m3);
|
||||
|
||||
const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
|
||||
const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
|
||||
@@ -1709,10 +1711,10 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
const __m128i p2 = _mm_maddubs_epi16(q2_2, _mm256_extractf128_si256(q8_1, 0));
|
||||
const __m128i p3 = _mm_maddubs_epi16(q2_3, _mm256_extractf128_si256(q8_1, 1));
|
||||
|
||||
const __m256i p_0 = _mm256_set_m128i(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p0, p0)), _mm_cvtepi16_epi32(p0));
|
||||
const __m256i p_1 = _mm256_set_m128i(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p1, p1)), _mm_cvtepi16_epi32(p1));
|
||||
const __m256i p_2 = _mm256_set_m128i(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p2, p2)), _mm_cvtepi16_epi32(p2));
|
||||
const __m256i p_3 = _mm256_set_m128i(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p3, p3)), _mm_cvtepi16_epi32(p3));
|
||||
const __m256i p_0 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p0, p0)), _mm_cvtepi16_epi32(p0));
|
||||
const __m256i p_1 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p1, p1)), _mm_cvtepi16_epi32(p1));
|
||||
const __m256i p_2 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p2, p2)), _mm_cvtepi16_epi32(p2));
|
||||
const __m256i p_3 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p3, p3)), _mm_cvtepi16_epi32(p3));
|
||||
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0)), acc);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1)), acc);
|
||||
@@ -1917,7 +1919,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
const __m256i all_scales = _mm256_cvtepi8_epi16(scales128);
|
||||
const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
|
||||
const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
|
||||
const __m256i scales[2] = {_mm256_set_m128i(l_scales, l_scales), _mm256_set_m128i(h_scales, h_scales)};
|
||||
const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
|
||||
|
||||
// high bit
|
||||
const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask);
|
||||
@@ -2128,7 +2130,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
}
|
||||
|
||||
// multiply with block scale and accumulate
|
||||
__m256i sumi = _mm256_set_m128i(sumi_1, sumi_0);
|
||||
__m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
|
||||
|
||||
}
|
||||
@@ -2303,13 +2305,13 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
aux16[0] = a & 0x0f0f;
|
||||
aux16[1] = (a >> 4) & 0x0f0f;
|
||||
|
||||
const __m256i scale_0 = _mm256_set_m128i(_mm_set1_epi16(aux8[2] - 8), _mm_set1_epi16(aux8[0] - 8));
|
||||
const __m256i scale_1 = _mm256_set_m128i(_mm_set1_epi16(aux8[3] - 8), _mm_set1_epi16(aux8[1] - 8));
|
||||
const __m256i scale_0 = MM256_SET_M128I(_mm_set1_epi16(aux8[2] - 8), _mm_set1_epi16(aux8[0] - 8));
|
||||
const __m256i scale_1 = MM256_SET_M128I(_mm_set1_epi16(aux8[3] - 8), _mm_set1_epi16(aux8[1] - 8));
|
||||
|
||||
memcpy(&aux64, x[i].hmask, 8);
|
||||
|
||||
const __m128i haux = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
|
||||
__m256i q3h_0 = _mm256_set_m128i(_mm_srli_epi16(haux, 2), haux);
|
||||
__m256i q3h_0 = MM256_SET_M128I(_mm_srli_epi16(haux, 2), haux);
|
||||
__m256i q3h_1 = _mm256_srli_epi16(q3h_0, 4);
|
||||
q3h_0 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_0, m1), 2);
|
||||
q3h_1 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_1, m1), 2);
|
||||
@@ -2318,7 +2320,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
|
||||
|
||||
// prepare low and high bits
|
||||
const __m256i q3aux = _mm256_set_m128i(_mm_srli_epi16(q3bits, 2), q3bits);
|
||||
const __m256i q3aux = MM256_SET_M128I(_mm_srli_epi16(q3bits, 2), q3bits);
|
||||
const __m256i q3l_0 = _mm256_and_si256(q3aux, m3);
|
||||
const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3aux, 4), m3);
|
||||
|
||||
@@ -2429,7 +2431,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
|
||||
p16_0 = _mm_add_epi32(p16_0, p16_2);
|
||||
p16_1 = _mm_add_epi32(p16_1, p16_3);
|
||||
__m256i p16 = _mm256_set_m128i(p16_1, p16_0);
|
||||
__m256i p16 = MM256_SET_M128I(p16_1, p16_0);
|
||||
|
||||
// multiply with block scale and accumulate
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16)), acc);
|
||||
@@ -2620,7 +2622,7 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m);
|
||||
|
||||
const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
|
||||
const __m256i scales = _mm256_set_m128i(sc128, sc128);
|
||||
const __m256i scales = MM256_SET_M128I(sc128, sc128);
|
||||
|
||||
__m256i sumi = _mm256_setzero_si256();
|
||||
|
||||
@@ -2727,7 +2729,7 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
}
|
||||
|
||||
__m256 vd = _mm256_set1_ps(d);
|
||||
__m256i sumi = _mm256_set_m128i(sumi_1, sumi_0);
|
||||
__m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
|
||||
|
||||
}
|
||||
@@ -2968,11 +2970,11 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
|
||||
const __m128i p32_0 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_0);
|
||||
const __m128i p32_1 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_1);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(_mm256_set_m128i(p32_1, p32_0))), acc);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_1, p32_0))), acc);
|
||||
|
||||
const __m128i p32_2 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_2);
|
||||
const __m128i p32_3 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_3);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(_mm256_set_m128i(p32_3, p32_2))), acc);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_3, p32_2))), acc);
|
||||
|
||||
}
|
||||
|
||||
@@ -3160,7 +3162,7 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
summs += dmin * _mm_extract_epi32(hsum, 0);
|
||||
|
||||
const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
|
||||
const __m256i scales = _mm256_set_m128i(sc128, sc128);
|
||||
const __m256i scales = MM256_SET_M128I(sc128, sc128);
|
||||
|
||||
const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].qh);
|
||||
__m256i hmask = mone;
|
||||
@@ -3299,7 +3301,7 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
}
|
||||
|
||||
__m256 vd = _mm256_set1_ps(d);
|
||||
__m256i sumi = _mm256_set_m128i(sumi_1, sumi_0);
|
||||
__m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
|
||||
|
||||
}
|
||||
@@ -3462,13 +3464,13 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
|
||||
const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
|
||||
|
||||
const __m256i scale_l = _mm256_set_m128i(_mm_set1_epi16(x[i].scales[1]), _mm_set1_epi16(x[i].scales[0]));
|
||||
const __m256i scale_h = _mm256_set_m128i(_mm_set1_epi16(x[i].scales[3]), _mm_set1_epi16(x[i].scales[2]));
|
||||
const __m256i scale_l = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[1]), _mm_set1_epi16(x[i].scales[0]));
|
||||
const __m256i scale_h = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[3]), _mm_set1_epi16(x[i].scales[2]));
|
||||
|
||||
int64_t aux64;
|
||||
memcpy(&aux64, x[i].qh, 8);
|
||||
const __m128i haux128 = _mm_set_epi64x(aux64 >> 1, aux64);
|
||||
const __m256i haux256 = _mm256_set_m128i(_mm_srli_epi16(haux128, 2), haux128);
|
||||
const __m256i haux256 = MM256_SET_M128I(_mm_srli_epi16(haux128, 2), haux128);
|
||||
|
||||
const __m256i q5h_0 = _mm256_slli_epi16(_mm256_andnot_si256(haux256, mone), 4);
|
||||
const __m256i q5h_1 = _mm256_slli_epi16(_mm256_andnot_si256(_mm256_srli_epi16(haux256, 4), mone), 4);
|
||||
@@ -3543,7 +3545,7 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
const __m128i dot_0 = _mm_sub_epi32(_mm_add_epi32(p16_0, p16_2), _mm_add_epi32(s16_0, s16_2));
|
||||
const __m128i dot_1 = _mm_sub_epi32(_mm_add_epi32(p16_1, p16_3), _mm_add_epi32(s16_1, s16_3));
|
||||
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_set_m128i(dot_1, dot_0))), acc);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(dot_1, dot_0))), acc);
|
||||
|
||||
}
|
||||
|
||||
@@ -3925,7 +3927,7 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
|
||||
}
|
||||
|
||||
__m256i sumi = _mm256_set_m128i(sumi_1, sumi_0);
|
||||
__m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
|
||||
}
|
||||
|
||||
@@ -4083,8 +4085,8 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
|
||||
const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
|
||||
|
||||
const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(_mm256_set_m128i(_mm_srli_epi16(q4bitsH, 2), q4bitsH), m2), 4);
|
||||
const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_set_m128i(_mm_srli_epi16(q4bitsH, 6), _mm_srli_epi16(q4bitsH, 4)), m2), 4);
|
||||
const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 2), q4bitsH), m2), 4);
|
||||
const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 6), _mm_srli_epi16(q4bitsH, 4)), m2), 4);
|
||||
|
||||
const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
|
||||
const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_1);
|
||||
@@ -4177,7 +4179,7 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
|
||||
sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
|
||||
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(_mm256_set_m128i(sumi_1, sumi_0))), acc);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi_1, sumi_0))), acc);
|
||||
}
|
||||
|
||||
*s = hsum_float_8(acc);
|
||||
|
||||
276
llama.cpp
276
llama.cpp
@@ -56,8 +56,14 @@
|
||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||
#endif
|
||||
|
||||
#if !defined(GGML_USE_CUBLAS) && !defined(GGML_USE_METAL)
|
||||
#include "ggml-alloc.h"
|
||||
#define LLAMA_USE_ALLOCATOR
|
||||
#else
|
||||
#define LLAMA_USE_SCRATCH
|
||||
#define LLAMA_MAX_SCRATCH_BUFFERS 16
|
||||
#endif
|
||||
|
||||
|
||||
// available llama models
|
||||
enum e_model {
|
||||
@@ -327,13 +333,22 @@ struct llama_model {
|
||||
|
||||
struct llama_context {
|
||||
llama_context(const llama_model & model) : model(model), t_load_us(model.t_load_us), t_start_us(model.t_start_us) {}
|
||||
#ifdef GGML_USE_METAL
|
||||
~llama_context() {
|
||||
if (model_owner) {
|
||||
delete &model;
|
||||
}
|
||||
#ifdef GGML_USE_METAL
|
||||
if (ctx_metal) {
|
||||
ggml_metal_free(ctx_metal);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#ifdef LLAMA_USE_ALLOCATOR
|
||||
if (alloc) {
|
||||
ggml_allocr_free(alloc);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
std::mt19937 rng;
|
||||
|
||||
bool has_evaluated_once = false;
|
||||
@@ -371,7 +386,17 @@ struct llama_context {
|
||||
// memory buffers used to evaluate the model
|
||||
// TODO: move in llama_state
|
||||
llama_ctx_buffer buf_compute;
|
||||
|
||||
#ifdef LLAMA_USE_ALLOCATOR
|
||||
llama_ctx_buffer buf_alloc;
|
||||
ggml_allocr * alloc = NULL;
|
||||
#endif
|
||||
|
||||
#ifdef LLAMA_USE_SCRATCH
|
||||
llama_ctx_buffer buf_scratch[LLAMA_MAX_SCRATCH_BUFFERS];
|
||||
int buf_last = 0;
|
||||
size_t buf_max_size[LLAMA_MAX_SCRATCH_BUFFERS] = { 0 };
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
ggml_metal_context * ctx_metal = NULL;
|
||||
@@ -381,9 +406,6 @@ struct llama_context {
|
||||
ggml_mpi_context * ctx_mpi = NULL;
|
||||
#endif
|
||||
|
||||
int buf_last = 0;
|
||||
size_t buf_max_size[LLAMA_MAX_SCRATCH_BUFFERS] = { 0 };
|
||||
|
||||
void use_buf(struct ggml_context * ctx, int i) {
|
||||
#if defined(LLAMA_USE_SCRATCH)
|
||||
size_t last_size = 0;
|
||||
@@ -879,6 +901,7 @@ struct llama_context_params llama_context_default_params() {
|
||||
/*.progress_callback =*/ nullptr,
|
||||
/*.progress_callback_user_data =*/ nullptr,
|
||||
/*.low_vram =*/ false,
|
||||
/*.mul_mat_q =*/ false,
|
||||
/*.f16_kv =*/ true,
|
||||
/*.logits_all =*/ false,
|
||||
/*.vocab_only =*/ false,
|
||||
@@ -1006,6 +1029,7 @@ static void llama_model_load_internal(
|
||||
int n_gpu_layers,
|
||||
int main_gpu,
|
||||
const float * tensor_split,
|
||||
const bool mul_mat_q,
|
||||
float rope_freq_base,
|
||||
float rope_freq_scale,
|
||||
bool low_vram,
|
||||
@@ -1134,9 +1158,11 @@ static void llama_model_load_internal(
|
||||
}
|
||||
|
||||
(void) main_gpu;
|
||||
(void) mul_mat_q;
|
||||
#if defined(GGML_USE_CUBLAS)
|
||||
fprintf(stderr, "%s: using CUDA for GPU acceleration\n", __func__);
|
||||
ggml_cuda_set_main_device(main_gpu);
|
||||
ggml_cuda_set_mul_mat_q(mul_mat_q);
|
||||
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
|
||||
#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT
|
||||
#elif defined(GGML_USE_CLBLAST)
|
||||
@@ -1230,12 +1256,16 @@ static void llama_model_load_internal(
|
||||
const size_t scale = memory_type == GGML_TYPE_F32 ? 2 : 1;
|
||||
|
||||
// this is the total memory required to run the inference
|
||||
const size_t mem_required =
|
||||
size_t mem_required =
|
||||
ctx_size +
|
||||
mmapped_size - vram_weights + // weights in VRAM not in memory
|
||||
mmapped_size - vram_weights; // weights in VRAM not in memory
|
||||
|
||||
#ifndef LLAMA_USE_ALLOCATOR
|
||||
mem_required +=
|
||||
MEM_REQ_SCRATCH0(hparams.n_ctx).at(model.type) +
|
||||
MEM_REQ_SCRATCH1().at(model.type) +
|
||||
MEM_REQ_EVAL().at(model.type);
|
||||
#endif
|
||||
|
||||
// this is the memory required by one llama_state
|
||||
const size_t mem_required_state =
|
||||
@@ -1341,6 +1371,7 @@ static bool llama_model_load(
|
||||
int n_gpu_layers,
|
||||
int main_gpu,
|
||||
const float * tensor_split,
|
||||
const bool mul_mat_q,
|
||||
float rope_freq_base,
|
||||
float rope_freq_scale,
|
||||
bool low_vram,
|
||||
@@ -1351,7 +1382,8 @@ static bool llama_model_load(
|
||||
llama_progress_callback progress_callback,
|
||||
void *progress_callback_user_data) {
|
||||
try {
|
||||
llama_model_load_internal(fname, model, vocab, n_ctx, n_batch, n_gqa, rms_norm_eps, n_gpu_layers, main_gpu, tensor_split, rope_freq_base, rope_freq_scale, low_vram, memory_type,
|
||||
llama_model_load_internal(fname, model, vocab, n_ctx, n_batch, n_gqa, rms_norm_eps, n_gpu_layers,
|
||||
main_gpu, tensor_split, mul_mat_q, rope_freq_base, rope_freq_scale, low_vram, memory_type,
|
||||
use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data);
|
||||
return true;
|
||||
} catch (const std::exception & err) {
|
||||
@@ -1360,32 +1392,15 @@ static bool llama_model_load(
|
||||
}
|
||||
}
|
||||
|
||||
// evaluate the transformer
|
||||
//
|
||||
// - lctx: llama context
|
||||
// - tokens: new batch of tokens to process
|
||||
// - embd embeddings input
|
||||
// - n_tokens number of tokens
|
||||
// - n_past: the context size so far
|
||||
// - n_threads: number of threads to use
|
||||
//
|
||||
static bool llama_eval_internal(
|
||||
static struct ggml_cgraph * llama_build_graph(
|
||||
llama_context & lctx,
|
||||
const llama_token * tokens,
|
||||
const float * embd,
|
||||
int n_tokens,
|
||||
int n_past,
|
||||
int n_threads,
|
||||
const char * cgraph_fname) {
|
||||
int n_past) {
|
||||
|
||||
LLAMA_ASSERT((!tokens && embd) || (tokens && !embd));
|
||||
|
||||
#ifdef GGML_USE_MPI
|
||||
ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
|
||||
#endif
|
||||
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
|
||||
const int N = n_tokens;
|
||||
|
||||
const auto & model = lctx.model;
|
||||
@@ -1401,10 +1416,8 @@ static bool llama_eval_internal(
|
||||
const int64_t n_head = hparams.n_head;
|
||||
const int64_t n_head_kv = hparams.n_head_kv;
|
||||
const int64_t n_embd_head = hparams.n_embd_head();
|
||||
const int64_t n_vocab = hparams.n_vocab;
|
||||
const int64_t n_embd_gqa = hparams.n_embd_gqa();
|
||||
|
||||
|
||||
LLAMA_ASSERT(n_embd_head == hparams.n_rot);
|
||||
|
||||
const float freq_base = hparams.rope_freq_base;
|
||||
@@ -1416,26 +1429,35 @@ static bool llama_eval_internal(
|
||||
auto & mem_per_token = lctx.mem_per_token;
|
||||
auto & buf_compute = lctx.buf_compute;
|
||||
|
||||
|
||||
struct ggml_init_params params = {
|
||||
/*.mem_size =*/ buf_compute.size,
|
||||
/*.mem_buffer =*/ buf_compute.addr,
|
||||
/*.no_alloc =*/ false,
|
||||
};
|
||||
|
||||
#ifdef LLAMA_USE_ALLOCATOR
|
||||
params.no_alloc = true;
|
||||
#endif
|
||||
|
||||
struct ggml_context * ctx0 = ggml_init(params);
|
||||
|
||||
ggml_cgraph * gf = ggml_new_graph(ctx0);
|
||||
|
||||
// for big prompts, if BLAS is enabled, it is better to use only one thread
|
||||
// otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
|
||||
n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads;
|
||||
|
||||
struct ggml_tensor * cur;
|
||||
struct ggml_tensor * inpL;
|
||||
|
||||
if (tokens) {
|
||||
struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
|
||||
|
||||
#ifdef LLAMA_USE_ALLOCATOR
|
||||
ggml_allocr_alloc(lctx.alloc, inp_tokens);
|
||||
if (!ggml_allocr_is_measure(lctx.alloc)) {
|
||||
memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens));
|
||||
}
|
||||
#else
|
||||
memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens));
|
||||
#endif
|
||||
ggml_set_name(inp_tokens, "inp_tokens");
|
||||
|
||||
inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens);
|
||||
@@ -1445,7 +1467,15 @@ static bool llama_eval_internal(
|
||||
#endif
|
||||
|
||||
inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N);
|
||||
|
||||
#ifdef LLAMA_USE_ALLOCATOR
|
||||
ggml_allocr_alloc(lctx.alloc, inpL);
|
||||
if (!ggml_allocr_is_measure(lctx.alloc)) {
|
||||
memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL));
|
||||
}
|
||||
#else
|
||||
memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL));
|
||||
#endif
|
||||
}
|
||||
|
||||
const int i_gpu_start = n_layer - n_gpu_layers;
|
||||
@@ -1472,6 +1502,17 @@ static bool llama_eval_internal(
|
||||
}
|
||||
#endif // GGML_USE_CUBLAS
|
||||
|
||||
struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
|
||||
#ifdef LLAMA_USE_ALLOCATOR
|
||||
ggml_allocr_alloc(lctx.alloc, KQ_scale);
|
||||
if (!ggml_allocr_is_measure(lctx.alloc)) {
|
||||
ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head));
|
||||
}
|
||||
#else
|
||||
ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head));
|
||||
#endif
|
||||
ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)");
|
||||
|
||||
for (int il = 0; il < n_layer; ++il) {
|
||||
ggml_format_name(inpL, "layer_inp_%d", il);
|
||||
|
||||
@@ -1567,9 +1608,6 @@ static bool llama_eval_internal(
|
||||
ggml_set_name(KQ, "KQ");
|
||||
|
||||
// KQ_scaled = KQ / sqrt(n_embd_head)
|
||||
struct ggml_tensor * KQ_scale = ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head));
|
||||
ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)");
|
||||
|
||||
// KQ_scaled shape [n_past + N, N, n_head, 1]
|
||||
struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale);
|
||||
offload_func_kq(KQ_scaled);
|
||||
@@ -1685,9 +1723,6 @@ static bool llama_eval_internal(
|
||||
|
||||
lctx.use_buf(ctx0, 0);
|
||||
|
||||
// used at the end to optionally extract the embeddings
|
||||
struct ggml_tensor * embeddings = NULL;
|
||||
|
||||
// norm
|
||||
{
|
||||
cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps);
|
||||
@@ -1698,8 +1733,6 @@ static bool llama_eval_internal(
|
||||
cur = ggml_mul(ctx0, cur, model.norm);
|
||||
// offload_func_nr(cur); // TODO CPU + GPU mirrored backend
|
||||
ggml_set_name(cur, "result_norm");
|
||||
|
||||
embeddings = cur;
|
||||
}
|
||||
|
||||
// lm_head
|
||||
@@ -1711,23 +1744,103 @@ static bool llama_eval_internal(
|
||||
// logits -> probs
|
||||
//cur = ggml_soft_max_inplace(ctx0, cur);
|
||||
|
||||
// run the computation
|
||||
ggml_build_forward_expand(gf, cur);
|
||||
|
||||
// fprintf(stderr, "graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf.n_nodes, gf.n_leafs);
|
||||
if (mem_per_token == 0) {
|
||||
mem_per_token = ggml_used_mem(ctx0)/N;
|
||||
}
|
||||
|
||||
#if 0
|
||||
printf("\n%s: used_mem: eval ctx %.3f MB, scratch %.3f MB %.3f MB, work buf %.3f MB, n_past = %d, N = %d\n", __func__,
|
||||
ggml_used_mem(ctx0)/1024.0/1024.0,
|
||||
lctx.get_buf_max_mem(0)/1024.0/1024.0,
|
||||
lctx.get_buf_max_mem(1)/1024.0/1024.0,
|
||||
lctx.work_buffer.size()/1024.0/1024.0,
|
||||
n_past, N);
|
||||
#endif
|
||||
|
||||
ggml_free(ctx0);
|
||||
|
||||
return gf;
|
||||
}
|
||||
|
||||
// evaluate the transformer
|
||||
//
|
||||
// - lctx: llama context
|
||||
// - tokens: new batch of tokens to process
|
||||
// - embd embeddings input
|
||||
// - n_tokens number of tokens
|
||||
// - n_past: the context size so far
|
||||
// - n_threads: number of threads to use
|
||||
//
|
||||
static bool llama_eval_internal(
|
||||
llama_context & lctx,
|
||||
const llama_token * tokens,
|
||||
const float * embd,
|
||||
int n_tokens,
|
||||
int n_past,
|
||||
int n_threads,
|
||||
const char * cgraph_fname) {
|
||||
|
||||
LLAMA_ASSERT((!tokens && embd) || (tokens && !embd));
|
||||
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
|
||||
#ifdef GGML_USE_MPI
|
||||
ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
|
||||
#endif
|
||||
|
||||
const int N = n_tokens;
|
||||
|
||||
const auto & model = lctx.model;
|
||||
const auto & hparams = model.hparams;
|
||||
|
||||
const auto & kv_self = lctx.kv_self;
|
||||
|
||||
LLAMA_ASSERT(!!kv_self.ctx);
|
||||
|
||||
const int64_t n_embd = hparams.n_embd;
|
||||
const int64_t n_vocab = hparams.n_vocab;
|
||||
|
||||
#ifdef LLAMA_USE_ALLOCATOR
|
||||
ggml_allocr_reset(lctx.alloc);
|
||||
#endif
|
||||
|
||||
ggml_cgraph * gf = llama_build_graph(lctx, tokens, embd, n_tokens, n_past);
|
||||
|
||||
#ifdef LLAMA_USE_ALLOCATOR
|
||||
ggml_allocr_alloc_graph(lctx.alloc, gf);
|
||||
#endif
|
||||
|
||||
// fprintf(stderr, "graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
|
||||
|
||||
// for big prompts, if BLAS is enabled, it is better to use only one thread
|
||||
// otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
|
||||
n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads;
|
||||
|
||||
struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
|
||||
struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
|
||||
|
||||
LLAMA_ASSERT(strcmp(res->name, "result_output") == 0);
|
||||
LLAMA_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
|
||||
|
||||
#if GGML_USE_MPI
|
||||
const int64_t n_layer = hparams.n_layer;
|
||||
ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer);
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
if (lctx.ctx_metal && N == 1) {
|
||||
if (!ggml_metal_if_optimized(lctx.ctx_metal)) {
|
||||
ggml_metal_graph_find_concurrency(lctx.ctx_metal, gf);
|
||||
}
|
||||
// TODO: disabled until #2413 is resolved
|
||||
//if (!ggml_metal_if_optimized(lctx.ctx_metal)) {
|
||||
// ggml_metal_graph_find_concurrency(lctx.ctx_metal, gf);
|
||||
//}
|
||||
ggml_metal_set_n_cb (lctx.ctx_metal, n_threads);
|
||||
ggml_metal_graph_compute(lctx.ctx_metal, gf);
|
||||
ggml_metal_get_tensor (lctx.ctx_metal, cur);
|
||||
ggml_metal_get_tensor (lctx.ctx_metal, res);
|
||||
if (!lctx.embedding.empty()) {
|
||||
ggml_metal_get_tensor(lctx.ctx_metal, embeddings);
|
||||
}
|
||||
} else {
|
||||
// IMPORTANT:
|
||||
// Since we don't have efficient Matrix x Matrix Metal multiplication yet, we fallback to vanilla
|
||||
@@ -1758,8 +1871,6 @@ static bool llama_eval_internal(
|
||||
// update kv token count
|
||||
lctx.kv_self.n = n_past + N;
|
||||
|
||||
struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
|
||||
|
||||
if (cgraph_fname) {
|
||||
ggml_graph_export(gf, cgraph_fname);
|
||||
}
|
||||
@@ -1797,21 +1908,6 @@ static bool llama_eval_internal(
|
||||
memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd);
|
||||
}
|
||||
|
||||
if (mem_per_token == 0) {
|
||||
mem_per_token = ggml_used_mem(ctx0)/N;
|
||||
}
|
||||
|
||||
#if 0
|
||||
printf("\n%s: used_mem: eval ctx %.3f MB, scratch %.3f MB %.3f MB, work buf %.3f MB, n_past = %d, N = %d\n", __func__,
|
||||
ggml_used_mem(ctx0)/1024.0/1024.0,
|
||||
lctx.get_buf_max_mem(0)/1024.0/1024.0,
|
||||
lctx.get_buf_max_mem(1)/1024.0/1024.0,
|
||||
lctx.work_buffer.size()/1024.0/1024.0,
|
||||
n_past, N);
|
||||
#endif
|
||||
|
||||
ggml_free(ctx0);
|
||||
|
||||
// measure the performance only for the single-token evals
|
||||
if (N == 1) {
|
||||
lctx.t_eval_us += ggml_time_us() - t_start_us;
|
||||
@@ -1923,7 +2019,9 @@ struct llama_tokenizer {
|
||||
if (token == vocab_.token_to_id.end()) {
|
||||
// output any symbols that did not form tokens as bytes.
|
||||
for (int j = 0; j < (int) symbol.n; ++j) {
|
||||
llama_vocab::id token_id = static_cast<uint8_t>(symbol.text[j]) + 3;
|
||||
// NOTE: old version, before #2420 - not sure what are the implications of this
|
||||
//llama_vocab::id token_id = static_cast<uint8_t>(symbol.text[j]) + 3;
|
||||
llama_vocab::id token_id = vocab_.token_to_id.at(std::string(1, symbol.text[j]));
|
||||
output.push_back(token_id);
|
||||
}
|
||||
} else {
|
||||
@@ -3100,7 +3198,7 @@ struct llama_model * llama_load_model_from_file(
|
||||
ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
|
||||
|
||||
if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gqa, params.rms_norm_eps, params.n_gpu_layers,
|
||||
params.main_gpu, params.tensor_split, params.rope_freq_base, params.rope_freq_scale,params.low_vram,
|
||||
params.main_gpu, params.tensor_split, params.mul_mat_q, params.rope_freq_base, params.rope_freq_scale,params.low_vram,
|
||||
memory_type, params.use_mmap, params.use_mlock, params.vocab_only, params.progress_callback,
|
||||
params.progress_callback_user_data)) {
|
||||
delete model;
|
||||
@@ -3177,10 +3275,47 @@ struct llama_context * llama_new_context_with_model(
|
||||
ctx->embedding.resize(hparams.n_embd);
|
||||
}
|
||||
|
||||
ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type) + ggml_graph_overhead());
|
||||
#ifdef LLAMA_USE_ALLOCATOR
|
||||
{
|
||||
static const size_t tensor_alignment = 32;
|
||||
// the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data
|
||||
ctx->buf_compute.resize(ggml_tensor_overhead()*GGML_MAX_NODES + ggml_graph_overhead());
|
||||
|
||||
// create measure allocator
|
||||
ctx->alloc = ggml_allocr_new_measure(tensor_alignment);
|
||||
|
||||
// build worst-case graph
|
||||
int n_tokens = std::min((int)hparams.n_ctx, params.n_batch);
|
||||
int n_past = hparams.n_ctx - n_tokens;
|
||||
llama_token token = llama_token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
|
||||
ggml_cgraph * gf = llama_build_graph(*ctx, &token, NULL, n_tokens, n_past);
|
||||
|
||||
// measure memory requirements for the graph
|
||||
size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment;
|
||||
|
||||
fprintf(stderr, "%s: compute buffer total size = %7.2f MB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0);
|
||||
|
||||
// debug - for comparison with scratch buffer
|
||||
//size_t prev_req =
|
||||
// MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type) +
|
||||
// MEM_REQ_SCRATCH1().at(ctx->model.type) +
|
||||
// MEM_REQ_EVAL().at(ctx->model.type);
|
||||
//fprintf(stderr, "%s: (debug) equivalent with scratch buffer = %7.2f MB\n", __func__, prev_req / 1024.0 / 1024.0);
|
||||
|
||||
// recreate allocator with exact memory requirements
|
||||
ggml_allocr_free(ctx->alloc);
|
||||
|
||||
ctx->buf_alloc.resize(alloc_size);
|
||||
ctx->alloc = ggml_allocr_new(ctx->buf_alloc.addr, ctx->buf_alloc.size, tensor_alignment);
|
||||
}
|
||||
#else
|
||||
ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type) + ggml_graph_overhead());
|
||||
#endif
|
||||
|
||||
#ifdef LLAMA_USE_SCRATCH
|
||||
ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type));
|
||||
ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1().at(ctx->model.type));
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
@@ -3250,9 +3385,6 @@ struct llama_context * llama_init_from_file(
|
||||
}
|
||||
|
||||
void llama_free(struct llama_context * ctx) {
|
||||
if (ctx->model_owner) {
|
||||
delete &ctx->model;
|
||||
}
|
||||
delete ctx;
|
||||
}
|
||||
|
||||
@@ -3662,7 +3794,7 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
|
||||
const auto & kv_self = ctx->kv_self;
|
||||
const auto & hparams = ctx->model.hparams;
|
||||
const int n_layer = hparams.n_layer;
|
||||
const int n_embd = hparams.n_embd;
|
||||
const int n_embd = hparams.n_embd_gqa();
|
||||
const int n_ctx = hparams.n_ctx;
|
||||
|
||||
const size_t kv_size = kv_self.buf.size;
|
||||
@@ -3765,7 +3897,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
|
||||
const auto & kv_self = ctx->kv_self;
|
||||
const auto & hparams = ctx->model.hparams;
|
||||
const int n_layer = hparams.n_layer;
|
||||
const int n_embd = hparams.n_embd;
|
||||
const int n_embd = hparams.n_embd_gqa();
|
||||
const int n_ctx = hparams.n_ctx;
|
||||
|
||||
size_t kv_size;
|
||||
|
||||
1
llama.h
1
llama.h
@@ -108,6 +108,7 @@ extern "C" {
|
||||
|
||||
// Keep the booleans together to avoid misalignment during copy-by-value.
|
||||
bool low_vram; // if true, reduce VRAM usage at the cost of performance
|
||||
bool mul_mat_q; // if true, use experimental mul_mat_q kernels
|
||||
bool f16_kv; // use fp16 for KV cache
|
||||
bool logits_all; // the llama_eval() call computes all logits, not just the last one
|
||||
bool vocab_only; // only load the vocabulary, no weights
|
||||
|
||||
Reference in New Issue
Block a user