mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2026-02-05 13:53:23 +02:00
Compare commits
103 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d80fb71f8b | ||
|
|
2f8bd2b901 | ||
|
|
bc5ba007b2 | ||
|
|
958367bf53 | ||
|
|
40f2555797 | ||
|
|
167a515651 | ||
|
|
c39665f589 | ||
|
|
0a1c750c80 | ||
|
|
190a37d797 | ||
|
|
2d3aba9ee8 | ||
|
|
80273a306d | ||
|
|
c19af0acb1 | ||
|
|
ac113a0fee | ||
|
|
4c9388fb96 | ||
|
|
873279b159 | ||
|
|
c8c07d658a | ||
|
|
19d900a756 | ||
|
|
11d47057a5 | ||
|
|
c421ac072d | ||
|
|
4ff7fe1fb3 | ||
|
|
6b8447352d | ||
|
|
674804a996 | ||
|
|
e94a138d64 | ||
|
|
e01c67affe | ||
|
|
994cfb1acb | ||
|
|
94008cc760 | ||
|
|
dbd5f2f573 | ||
|
|
f594bc80ba | ||
|
|
d5ebd79c76 | ||
|
|
55e47786e3 | ||
|
|
bc21975084 | ||
|
|
1db8c84fc6 | ||
|
|
45f097645e | ||
|
|
7cab2083c7 | ||
|
|
cda0e4b648 | ||
|
|
afd9909a64 | ||
|
|
87421a23e8 | ||
|
|
60ce97c9d8 | ||
|
|
8901755ba3 | ||
|
|
6f55bccbb8 | ||
|
|
17bb928080 | ||
|
|
9f45fc1e99 | ||
|
|
99bd4ac28c | ||
|
|
3752217ed5 | ||
|
|
f010b77a37 | ||
|
|
2194200278 | ||
|
|
73afe681aa | ||
|
|
9e04102448 | ||
|
|
dbf18e4de9 | ||
|
|
66c2c93082 | ||
|
|
10433e8b45 | ||
|
|
1f66b699c4 | ||
|
|
0e41b300ed | ||
|
|
cd60b88bf7 | ||
|
|
becfd387f6 | ||
|
|
755a9b2bf0 | ||
|
|
223c25a72f | ||
|
|
fbc98b748e | ||
|
|
dcdd535302 | ||
|
|
4c42f93b22 | ||
|
|
a89f75e1b7 | ||
|
|
13dca2a54a | ||
|
|
d4c19c0f5c | ||
|
|
c7181bd294 | ||
|
|
92be9f1216 | ||
|
|
edc265661c | ||
|
|
1bde94dd02 | ||
|
|
95c76e8e92 | ||
|
|
11ac9800af | ||
|
|
943d20b411 | ||
|
|
96776405a1 | ||
|
|
7eee341bee | ||
|
|
0e9f760eb1 | ||
|
|
cf8e0a3bb9 | ||
|
|
c7499c557c | ||
|
|
c81f3bbb05 | ||
|
|
e7022064ab | ||
|
|
3dc48fe75a | ||
|
|
dca1d4b58a | ||
|
|
458367a906 | ||
|
|
fa42aa6d89 | ||
|
|
6374743747 | ||
|
|
f1af42fa8c | ||
|
|
6279dac039 | ||
|
|
d5ac8cf2f2 | ||
|
|
96b6912103 | ||
|
|
d5cb86844f | ||
|
|
f4b2dcdf49 | ||
|
|
b6d6c5289f | ||
|
|
b0915d5b51 | ||
|
|
8c475b97b8 | ||
|
|
58b16695e1 | ||
|
|
905f5485b2 | ||
|
|
71967c2a6d | ||
|
|
17880771ad | ||
|
|
55951c018d | ||
|
|
ff565769f2 | ||
|
|
f3fdcfaa79 | ||
|
|
133c7b46b3 | ||
|
|
d5ed2b929d | ||
|
|
1bb8a64ebf | ||
|
|
fabdc3bda3 | ||
|
|
eee39bdc96 |
26
.devops/full-musa.Dockerfile
Normal file
26
.devops/full-musa.Dockerfile
Normal file
@@ -0,0 +1,26 @@
|
||||
ARG UBUNTU_VERSION=22.04
|
||||
# This needs to generally match the container host's environment.
|
||||
ARG MUSA_VERSION=rc3.1.0
|
||||
# Target the MUSA build image
|
||||
ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
|
||||
|
||||
FROM ${BASE_MUSA_DEV_CONTAINER} AS build
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y build-essential cmake python3 python3-pip git libcurl4-openssl-dev libgomp1
|
||||
|
||||
COPY requirements.txt requirements.txt
|
||||
COPY requirements requirements
|
||||
|
||||
RUN pip install --upgrade pip setuptools wheel \
|
||||
&& pip install -r requirements.txt
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN cmake -B build -DGGML_MUSA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
|
||||
cmake --build build --config Release -j$(nproc) && \
|
||||
cp build/bin/* .
|
||||
|
||||
ENTRYPOINT ["/app/.devops/tools.sh"]
|
||||
30
.devops/llama-cli-musa.Dockerfile
Normal file
30
.devops/llama-cli-musa.Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
ARG UBUNTU_VERSION=22.04
|
||||
# This needs to generally match the container host's environment.
|
||||
ARG MUSA_VERSION=rc3.1.0
|
||||
# Target the MUSA build image
|
||||
ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
|
||||
# Target the MUSA runtime image
|
||||
ARG BASE_MUSA_RUN_CONTAINER=mthreads/musa:${MUSA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
|
||||
|
||||
FROM ${BASE_MUSA_DEV_CONTAINER} AS build
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y build-essential git cmake
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN cmake -B build -DGGML_MUSA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
|
||||
cmake --build build --config Release --target llama-cli -j$(nproc)
|
||||
|
||||
FROM ${BASE_MUSA_RUN_CONTAINER} AS runtime
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y libgomp1
|
||||
|
||||
COPY --from=build /app/build/ggml/src/libggml.so /libggml.so
|
||||
COPY --from=build /app/build/src/libllama.so /libllama.so
|
||||
COPY --from=build /app/build/bin/llama-cli /llama-cli
|
||||
|
||||
ENTRYPOINT [ "/llama-cli" ]
|
||||
35
.devops/llama-server-musa.Dockerfile
Normal file
35
.devops/llama-server-musa.Dockerfile
Normal file
@@ -0,0 +1,35 @@
|
||||
ARG UBUNTU_VERSION=22.04
|
||||
# This needs to generally match the container host's environment.
|
||||
ARG MUSA_VERSION=rc3.1.0
|
||||
# Target the MUSA build image
|
||||
ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
|
||||
# Target the MUSA runtime image
|
||||
ARG BASE_MUSA_RUN_CONTAINER=mthreads/musa:${MUSA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
|
||||
|
||||
FROM ${BASE_MUSA_DEV_CONTAINER} AS build
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y build-essential git cmake libcurl4-openssl-dev
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN cmake -B build -DGGML_MUSA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
|
||||
cmake --build build --config Release --target llama-server -j$(nproc)
|
||||
|
||||
FROM ${BASE_MUSA_RUN_CONTAINER} AS runtime
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y libcurl4-openssl-dev libgomp1 curl
|
||||
|
||||
COPY --from=build /app/build/ggml/src/libggml.so /libggml.so
|
||||
COPY --from=build /app/build/src/libllama.so /libllama.so
|
||||
COPY --from=build /app/build/bin/llama-server /llama-server
|
||||
|
||||
# Must be set to 0.0.0.0 so it can listen to requests from host machine
|
||||
ENV LLAMA_ARG_HOST=0.0.0.0
|
||||
|
||||
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
|
||||
|
||||
ENTRYPOINT [ "/llama-server" ]
|
||||
5
.github/workflows/build.yml
vendored
5
.github/workflows/build.yml
vendored
@@ -19,6 +19,11 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Fine-grant permission
|
||||
# https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
|
||||
permissions:
|
||||
contents: write # for creating release
|
||||
|
||||
env:
|
||||
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
|
||||
GGML_NLOOP: 3
|
||||
|
||||
5
.github/workflows/close-issue.yml
vendored
5
.github/workflows/close-issue.yml
vendored
@@ -3,6 +3,11 @@ on:
|
||||
schedule:
|
||||
- cron: "42 0 * * *"
|
||||
|
||||
# Fine-grant permission
|
||||
# https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
close-issues:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
3
.github/workflows/docker.yml
vendored
3
.github/workflows/docker.yml
vendored
@@ -43,6 +43,9 @@ jobs:
|
||||
- { tag: "light-cuda", dockerfile: ".devops/llama-cli-cuda.Dockerfile", platforms: "linux/amd64" }
|
||||
- { tag: "server-cuda", dockerfile: ".devops/llama-server-cuda.Dockerfile", platforms: "linux/amd64" }
|
||||
- { tag: "full-cuda", dockerfile: ".devops/full-cuda.Dockerfile", platforms: "linux/amd64" }
|
||||
- { tag: "light-musa", dockerfile: ".devops/llama-cli-musa.Dockerfile", platforms: "linux/amd64" }
|
||||
- { tag: "server-musa", dockerfile: ".devops/llama-server-musa.Dockerfile", platforms: "linux/amd64" }
|
||||
- { tag: "full-musa", dockerfile: ".devops/full-musa.Dockerfile", platforms: "linux/amd64" }
|
||||
# Note: the rocm images are failing due to a compiler error and are disabled until this is fixed to allow the workflow to complete
|
||||
#- { tag: "light-rocm", dockerfile: ".devops/llama-cli-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" }
|
||||
#- { tag: "server-rocm", dockerfile: ".devops/llama-server-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" }
|
||||
|
||||
7
.github/workflows/nix-ci-aarch64.yml
vendored
7
.github/workflows/nix-ci-aarch64.yml
vendored
@@ -21,6 +21,13 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Fine-grant permission
|
||||
# https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
|
||||
permissions:
|
||||
# https://github.com/DeterminateSystems/nix-installer-action?tab=readme-ov-file#with-flakehub
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
nix-build-aarch64:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
7
.github/workflows/nix-ci.yml
vendored
7
.github/workflows/nix-ci.yml
vendored
@@ -12,6 +12,13 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Fine-grant permission
|
||||
# https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
|
||||
permissions:
|
||||
# https://github.com/DeterminateSystems/nix-installer-action?tab=readme-ov-file#with-flakehub
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
nix-eval:
|
||||
strategy:
|
||||
|
||||
@@ -63,7 +63,7 @@ option(LLAMA_SANITIZE_ADDRESS "llama: enable address sanitizer" OFF)
|
||||
option(LLAMA_SANITIZE_UNDEFINED "llama: enable undefined sanitizer" OFF)
|
||||
|
||||
# utils
|
||||
option(LLAMA_BUILD_COMMON "llama: build common utils library" ON)
|
||||
option(LLAMA_BUILD_COMMON "llama: build common utils library" ${LLAMA_STANDALONE})
|
||||
|
||||
# extra artifacts
|
||||
option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE})
|
||||
@@ -88,6 +88,10 @@ if (NOT DEFINED GGML_LLAMAFILE)
|
||||
set(GGML_LLAMAFILE_DEFAULT ON)
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED GGML_AMX)
|
||||
set(GGML_AMX ON)
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED GGML_CUDA_GRAPHS)
|
||||
set(GGML_CUDA_GRAPHS_DEFAULT ON)
|
||||
endif()
|
||||
@@ -201,12 +205,12 @@ if (LLAMA_BUILD_COMMON)
|
||||
add_subdirectory(common)
|
||||
endif()
|
||||
|
||||
if (LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
|
||||
if (LLAMA_BUILD_COMMON AND LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
|
||||
include(CTest)
|
||||
add_subdirectory(tests)
|
||||
endif()
|
||||
|
||||
if (LLAMA_BUILD_EXAMPLES)
|
||||
if (LLAMA_BUILD_COMMON AND LLAMA_BUILD_EXAMPLES)
|
||||
add_subdirectory(examples)
|
||||
add_subdirectory(pocs)
|
||||
endif()
|
||||
|
||||
@@ -1,24 +1,23 @@
|
||||
# Pull requests (for contributors)
|
||||
|
||||
- Test your changes:
|
||||
- Using the commands in the [`tests`](tests) folder. For instance, running the `./tests/test-backend-ops` command tests different backend implementations of the GGML library
|
||||
- Using the commands in the [`tests`](tests) folder. For instance, running the `./tests/test-backend-ops` command tests different backend implementations of the `ggml` library
|
||||
- Execute [the full CI locally on your machine](ci/README.md) before publishing
|
||||
- Please rate the complexity of your PR (i.e. `Review Complexity : Low`, `Review Complexity : Medium`, `Review Complexity : High`). This makes it easier for maintainers to triage the PRs.
|
||||
- The PR template has a series of review complexity checkboxes `[ ]` that [you can mark as](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/about-task-lists) `[X]` for your convenience
|
||||
- Consider allowing write access to your branch for faster review
|
||||
- Optionally rate the complexity of your PR (i.e. `Review Complexity : Low`, `Review Complexity : Medium`, `Review Complexity : High`). This makes it easier for maintainers to triage the PRs
|
||||
- Consider allowing write access to your branch for faster reviews, as reviewers can push commits directly
|
||||
- If your PR becomes stale, don't hesitate to ping the maintainers in the comments
|
||||
|
||||
# Pull requests (for collaborators)
|
||||
|
||||
- Squash-merge PRs
|
||||
- Use the following format for the squashed commit title: `<module> : <commit title> (#<issue_number>)`. For example: `utils : fix typo in utils.py (#1234)`
|
||||
- Optionally, pick a `<module>` from here: https://github.com/ggerganov/llama.cpp/wiki/Modules
|
||||
- Optionally pick a `<module>` from here: https://github.com/ggerganov/llama.cpp/wiki/Modules
|
||||
|
||||
# Coding guidelines
|
||||
|
||||
- Avoid adding third-party dependencies, extra files, extra headers, etc.
|
||||
- Always consider cross-compatibility with other operating systems and architectures
|
||||
- Avoid fancy looking modern STL constructs, use basic `for` loops, avoid templates, keep it simple
|
||||
- Avoid fancy-looking modern STL constructs, use basic `for` loops, avoid templates, keep it simple
|
||||
- There are no strict rules for the code style, but try to follow the patterns in the code (indentation, spaces, etc.). Vertical alignment makes things more readable and easier to batch edit
|
||||
- Clean-up any trailing whitespaces, use 4 spaces for indentation, brackets on the same line, `void * ptr`, `int & a`
|
||||
- Naming usually optimizes for common prefix (see https://github.com/ggerganov/ggml/pull/302#discussion_r1243240963)
|
||||
|
||||
24
Makefile
24
Makefile
@@ -93,11 +93,6 @@ GGML_METAL := 1
|
||||
DEPRECATE_WARNING := 1
|
||||
endif
|
||||
|
||||
ifdef LLAMA_OPENMP
|
||||
GGML_OPENMP := 1
|
||||
DEPRECATE_WARNING := 1
|
||||
endif
|
||||
|
||||
ifdef LLAMA_RPC
|
||||
GGML_RPC := 1
|
||||
DEPRECATE_WARNING := 1
|
||||
@@ -584,6 +579,11 @@ ifndef GGML_NO_LLAMAFILE
|
||||
OBJ_GGML += ggml/src/llamafile/sgemm.o
|
||||
endif
|
||||
|
||||
ifndef GGML_NO_AMX
|
||||
MK_CPPFLAGS += -DGGML_USE_AMX
|
||||
OBJ_GGML += ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o
|
||||
endif
|
||||
|
||||
ifdef GGML_RPC
|
||||
MK_CPPFLAGS += -DGGML_USE_RPC
|
||||
OBJ_GGML += ggml/src/ggml-rpc.o
|
||||
@@ -1087,6 +1087,19 @@ ggml/src/llamafile/sgemm.o: \
|
||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||
endif # GGML_NO_LLAMAFILE
|
||||
|
||||
ifndef GGML_NO_AMX
|
||||
ggml/src/ggml-amx.o: \
|
||||
ggml/src/ggml-amx.cpp \
|
||||
ggml/include/ggml-amx.h
|
||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||
|
||||
ggml/src/ggml-amx/mmq.o: \
|
||||
ggml/src/ggml-amx/mmq.cpp \
|
||||
ggml/src/ggml-amx/mmq.h \
|
||||
ggml/include/ggml.h
|
||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||
endif
|
||||
|
||||
ifdef GGML_RPC
|
||||
ggml/src/ggml-rpc.o: \
|
||||
ggml/src/ggml-rpc.cpp \
|
||||
@@ -1238,6 +1251,7 @@ clean:
|
||||
rm -vrf ggml/src/ggml-metal-embed.metal
|
||||
rm -vrf ggml/src/ggml-cuda/*.o
|
||||
rm -vrf ggml/src/ggml-cuda/template-instances/*.o
|
||||
rm -vrf ggml/src/ggml-amx/*.o
|
||||
rm -rvf $(BUILD_TARGETS)
|
||||
rm -rvf $(TEST_TARGETS)
|
||||
rm -f vulkan-shaders-gen ggml/src/ggml-vulkan-shaders.hpp ggml/src/ggml-vulkan-shaders.cpp
|
||||
|
||||
13
README.md
13
README.md
@@ -29,9 +29,9 @@ variety of hardware - locally and in the cloud.
|
||||
|
||||
- Plain C/C++ implementation without any dependencies
|
||||
- Apple silicon is a first-class citizen - optimized via ARM NEON, Accelerate and Metal frameworks
|
||||
- AVX, AVX2 and AVX512 support for x86 architectures
|
||||
- AVX, AVX2, AVX512 and AMX support for x86 architectures
|
||||
- 1.5-bit, 2-bit, 3-bit, 4-bit, 5-bit, 6-bit, and 8-bit integer quantization for faster inference and reduced memory use
|
||||
- Custom CUDA kernels for running LLMs on NVIDIA GPUs (support for AMD GPUs via HIP)
|
||||
- Custom CUDA kernels for running LLMs on NVIDIA GPUs (support for AMD GPUs via HIP and Moore Threads MTT GPUs via MUSA)
|
||||
- Vulkan and SYCL backend support
|
||||
- CPU+GPU hybrid inference to partially accelerate models larger than the total VRAM capacity
|
||||
|
||||
@@ -93,6 +93,7 @@ Typically finetunes of the base models below are supported as well.
|
||||
- [x] [FalconMamba Models](https://huggingface.co/collections/tiiuae/falconmamba-7b-66b9a580324dd1598b0f6d4a)
|
||||
- [x] [Jais](https://huggingface.co/inceptionai/jais-13b-chat)
|
||||
- [x] [Bielik-11B-v2.3](https://huggingface.co/collections/speakleash/bielik-11b-v23-66ee813238d9b526a072408a)
|
||||
- [x] [RWKV-6](https://github.com/BlinkDL/RWKV-LM)
|
||||
|
||||
(instructions for supporting more models: [HOWTO-add-model.md](./docs/development/HOWTO-add-model.md))
|
||||
|
||||
@@ -122,6 +123,7 @@ Typically finetunes of the base models below are supported as well.
|
||||
- Rust (nicer API): [mdrokz/rust-llama.cpp](https://github.com/mdrokz/rust-llama.cpp)
|
||||
- Rust (more direct bindings): [utilityai/llama-cpp-rs](https://github.com/utilityai/llama-cpp-rs)
|
||||
- C#/.NET: [SciSharp/LLamaSharp](https://github.com/SciSharp/LLamaSharp)
|
||||
- C#/VB.NET (more features - community license): [LM-Kit.NET](https://docs.lm-kit.com/lm-kit-net/index.html)
|
||||
- Scala 3: [donderom/llm4s](https://github.com/donderom/llm4s)
|
||||
- Clojure: [phronmophobic/llama.clj](https://github.com/phronmophobic/llama.clj)
|
||||
- React Native: [mybigday/llama.rn](https://github.com/mybigday/llama.rn)
|
||||
@@ -130,6 +132,8 @@ Typically finetunes of the base models below are supported as well.
|
||||
- Flutter/Dart: [netdur/llama_cpp_dart](https://github.com/netdur/llama_cpp_dart)
|
||||
- PHP (API bindings and features built on top of llama.cpp): [distantmagic/resonance](https://github.com/distantmagic/resonance) [(more info)](https://github.com/ggerganov/llama.cpp/pull/6326)
|
||||
- Guile Scheme: [guile_llama_cpp](https://savannah.nongnu.org/projects/guile-llama-cpp)
|
||||
- Swift [srgtuszy/llama-cpp-swift](https://github.com/srgtuszy/llama-cpp-swift)
|
||||
- Swift [ShenghaiWang/SwiftLlama](https://github.com/ShenghaiWang/SwiftLlama)
|
||||
|
||||
**UI:**
|
||||
|
||||
@@ -169,6 +173,8 @@ Unless otherwise noted these projects are open-source with permissive licensing:
|
||||
- [AIKit](https://github.com/sozercan/aikit) (MIT)
|
||||
- [LARS - The LLM & Advanced Referencing Solution](https://github.com/abgulati/LARS) (AGPL)
|
||||
- [LLMUnity](https://github.com/undreamai/LLMUnity) (MIT)
|
||||
- [Llama Assistant](https://github.com/vietanhdev/llama-assistant) (GPL)
|
||||
- [PocketPal AI - An iOS and Android App](https://github.com/a-ghorbani/pocketpal-ai) (MIT)
|
||||
|
||||
*(to have a project listed here, it should clearly state that it depends on `llama.cpp`)*
|
||||
|
||||
@@ -184,6 +190,7 @@ Unless otherwise noted these projects are open-source with permissive licensing:
|
||||
|
||||
- [Paddler](https://github.com/distantmagic/paddler) - Stateful load balancer custom-tailored for llama.cpp
|
||||
- [GPUStack](https://github.com/gpustack/gpustack) - Manage GPU clusters for running LLMs
|
||||
- [llama_cpp_canister](https://github.com/onicai/llama_cpp_canister) - llama.cpp as a smart contract on the Internet Computer, using WebAssembly
|
||||
|
||||
**Games:**
|
||||
- [Lucy's Labyrinth](https://github.com/MorganRO8/Lucys_Labyrinth) - A simple maze game where agents controlled by an AI model will try to trick you.
|
||||
@@ -412,7 +419,7 @@ Please refer to [Build llama.cpp locally](./docs/build.md)
|
||||
| [BLAS](./docs/build.md#blas-build) | All |
|
||||
| [BLIS](./docs/backend/BLIS.md) | All |
|
||||
| [SYCL](./docs/backend/SYCL.md) | Intel and Nvidia GPU |
|
||||
| [MUSA](./docs/build.md#musa) | Moore Threads GPU |
|
||||
| [MUSA](./docs/build.md#musa) | Moore Threads MTT GPU |
|
||||
| [CUDA](./docs/build.md#cuda) | Nvidia GPU |
|
||||
| [hipBLAS](./docs/build.md#hipblas) | AMD GPU |
|
||||
| [Vulkan](./docs/build.md#vulkan) | GPU |
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#/bin/bash
|
||||
#!/bin/bash
|
||||
#
|
||||
# sample usage:
|
||||
#
|
||||
@@ -53,7 +53,7 @@ if [ ! -z ${GG_BUILD_SYCL} ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_SYCL=1 DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON"
|
||||
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_SYCL=1 -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON"
|
||||
fi
|
||||
|
||||
if [ ! -z ${GG_BUILD_VULKAN} ]; then
|
||||
@@ -751,7 +751,8 @@ function gg_run_rerank_tiny {
|
||||
|
||||
model_f16="${path_models}/ggml-model-f16.gguf"
|
||||
|
||||
(time ./bin/llama-embedding --model ${model_f16} -p "what is panda?</s><s>hi\nwhat is panda?</s><s>it's a bear\nwhat is panda?</s><s>The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China." --pooling rank --embd-normalize -1 --verbose-prompt) 2>&1 | tee -a $OUT/${ci}-rk-f16.log
|
||||
# for this model, the SEP token is "</s>"
|
||||
(time ./bin/llama-embedding --model ${model_f16} -p "what is panda?</s></s>hi\nwhat is panda?</s></s>it's a bear\nwhat is panda?</s></s>The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China." --pooling rank --embd-normalize -1 --verbose-prompt) 2>&1 | tee -a $OUT/${ci}-rk-f16.log
|
||||
|
||||
# sample output
|
||||
# rerank score 0: 0.029
|
||||
@@ -774,7 +775,7 @@ function gg_run_rerank_tiny {
|
||||
|
||||
check_score "rerank score 0" "$(cat $OUT/${ci}-rk-f16.log | grep "rerank score 0")" "0.00" "0.05" | tee -a $OUT/${ci}-rk-f16.log
|
||||
check_score "rerank score 1" "$(cat $OUT/${ci}-rk-f16.log | grep "rerank score 1")" "0.00" "0.05" | tee -a $OUT/${ci}-rk-f16.log
|
||||
check_score "rerank score 2" "$(cat $OUT/${ci}-rk-f16.log | grep "rerank score 2")" "0.10" "0.15" | tee -a $OUT/${ci}-rk-f16.log
|
||||
check_score "rerank score 2" "$(cat $OUT/${ci}-rk-f16.log | grep "rerank score 2")" "0.10" "0.30" | tee -a $OUT/${ci}-rk-f16.log
|
||||
|
||||
set +e
|
||||
}
|
||||
|
||||
1182
common/arg.cpp
1182
common/arg.cpp
File diff suppressed because it is too large
Load Diff
44
common/arg.h
44
common/arg.h
@@ -10,7 +10,7 @@
|
||||
// CLI argument parsing
|
||||
//
|
||||
|
||||
struct llama_arg {
|
||||
struct common_arg {
|
||||
std::set<enum llama_example> examples = {LLAMA_EXAMPLE_COMMON};
|
||||
std::vector<const char *> args;
|
||||
const char * value_hint = nullptr; // help text or example for arg value
|
||||
@@ -18,60 +18,60 @@ struct llama_arg {
|
||||
const char * env = nullptr;
|
||||
std::string help;
|
||||
bool is_sparam = false; // is current arg a sampling param?
|
||||
void (*handler_void) (gpt_params & params) = nullptr;
|
||||
void (*handler_string) (gpt_params & params, const std::string &) = nullptr;
|
||||
void (*handler_str_str)(gpt_params & params, const std::string &, const std::string &) = nullptr;
|
||||
void (*handler_int) (gpt_params & params, int) = nullptr;
|
||||
void (*handler_void) (common_params & params) = nullptr;
|
||||
void (*handler_string) (common_params & params, const std::string &) = nullptr;
|
||||
void (*handler_str_str)(common_params & params, const std::string &, const std::string &) = nullptr;
|
||||
void (*handler_int) (common_params & params, int) = nullptr;
|
||||
|
||||
llama_arg(
|
||||
common_arg(
|
||||
const std::initializer_list<const char *> & args,
|
||||
const char * value_hint,
|
||||
const std::string & help,
|
||||
void (*handler)(gpt_params & params, const std::string &)
|
||||
void (*handler)(common_params & params, const std::string &)
|
||||
) : args(args), value_hint(value_hint), help(help), handler_string(handler) {}
|
||||
|
||||
llama_arg(
|
||||
common_arg(
|
||||
const std::initializer_list<const char *> & args,
|
||||
const char * value_hint,
|
||||
const std::string & help,
|
||||
void (*handler)(gpt_params & params, int)
|
||||
void (*handler)(common_params & params, int)
|
||||
) : args(args), value_hint(value_hint), help(help), handler_int(handler) {}
|
||||
|
||||
llama_arg(
|
||||
common_arg(
|
||||
const std::initializer_list<const char *> & args,
|
||||
const std::string & help,
|
||||
void (*handler)(gpt_params & params)
|
||||
void (*handler)(common_params & params)
|
||||
) : args(args), help(help), handler_void(handler) {}
|
||||
|
||||
// support 2 values for arg
|
||||
llama_arg(
|
||||
common_arg(
|
||||
const std::initializer_list<const char *> & args,
|
||||
const char * value_hint,
|
||||
const char * value_hint_2,
|
||||
const std::string & help,
|
||||
void (*handler)(gpt_params & params, const std::string &, const std::string &)
|
||||
void (*handler)(common_params & params, const std::string &, const std::string &)
|
||||
) : args(args), value_hint(value_hint), value_hint_2(value_hint_2), help(help), handler_str_str(handler) {}
|
||||
|
||||
llama_arg & set_examples(std::initializer_list<enum llama_example> examples);
|
||||
llama_arg & set_env(const char * env);
|
||||
llama_arg & set_sparam();
|
||||
common_arg & set_examples(std::initializer_list<enum llama_example> examples);
|
||||
common_arg & set_env(const char * env);
|
||||
common_arg & set_sparam();
|
||||
bool in_example(enum llama_example ex);
|
||||
bool get_value_from_env(std::string & output);
|
||||
bool has_value_from_env();
|
||||
std::string to_string();
|
||||
};
|
||||
|
||||
struct gpt_params_context {
|
||||
struct common_params_context {
|
||||
enum llama_example ex = LLAMA_EXAMPLE_COMMON;
|
||||
gpt_params & params;
|
||||
std::vector<llama_arg> options;
|
||||
common_params & params;
|
||||
std::vector<common_arg> options;
|
||||
void(*print_usage)(int, char **) = nullptr;
|
||||
gpt_params_context(gpt_params & params) : params(params) {}
|
||||
common_params_context(common_params & params) : params(params) {}
|
||||
};
|
||||
|
||||
// parse input arguments from CLI
|
||||
// if one argument has invalid value, it will automatically display usage of the specific argument (and not the full usage message)
|
||||
bool gpt_params_parse(int argc, char ** argv, gpt_params & params, llama_example ex, void(*print_usage)(int, char **) = nullptr);
|
||||
bool common_params_parse(int argc, char ** argv, common_params & params, llama_example ex, void(*print_usage)(int, char **) = nullptr);
|
||||
|
||||
// function to be used by test-arg-parser
|
||||
gpt_params_context gpt_params_parser_init(gpt_params & params, llama_example ex, void(*print_usage)(int, char **) = nullptr);
|
||||
common_params_context common_params_parser_init(common_params & params, llama_example ex, void(*print_usage)(int, char **) = nullptr);
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <climits>
|
||||
#include <cmath>
|
||||
#include <codecvt>
|
||||
#include <cstdarg>
|
||||
@@ -23,10 +24,10 @@
|
||||
#include <regex>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
#include <thread>
|
||||
|
||||
#if defined(__APPLE__) && defined(__MACH__)
|
||||
#include <sys/types.h>
|
||||
@@ -362,10 +363,10 @@ bool parse_cpu_mask(const std::string & mask, bool (&boolmask)[GGML_MAX_N_THREAD
|
||||
return true;
|
||||
}
|
||||
|
||||
void gpt_init() {
|
||||
void common_init() {
|
||||
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
|
||||
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_thold) {
|
||||
gpt_log_add(gpt_log_main(), level, "%s", text);
|
||||
if (LOG_DEFAULT_LLAMA <= common_log_verbosity_thold) {
|
||||
common_log_add(common_log_main(), level, "%s", text);
|
||||
}
|
||||
}, NULL);
|
||||
|
||||
@@ -378,7 +379,7 @@ void gpt_init() {
|
||||
LOG_INF("build: %d (%s) with %s for %s%s\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT, LLAMA_COMPILER, LLAMA_BUILD_TARGET, build_type);
|
||||
}
|
||||
|
||||
std::string gpt_params_get_system_info(const gpt_params & params) {
|
||||
std::string common_params_get_system_info(const common_params & params) {
|
||||
std::ostringstream os;
|
||||
|
||||
os << "system_info: n_threads = " << params.cpuparams.n_threads;
|
||||
@@ -400,17 +401,19 @@ std::string gpt_params_get_system_info(const gpt_params & params) {
|
||||
// String utils
|
||||
//
|
||||
|
||||
std::vector<std::string> string_split(std::string input, char separator) {
|
||||
std::vector<std::string> parts;
|
||||
size_t separator_pos = input.find(separator);
|
||||
while (separator_pos != std::string::npos) {
|
||||
std::string part = input.substr(0, separator_pos);
|
||||
parts.emplace_back(part);
|
||||
input = input.substr(separator_pos + 1);
|
||||
separator_pos = input.find(separator);
|
||||
}
|
||||
parts.emplace_back(input);
|
||||
return parts;
|
||||
std::string string_format(const char * fmt, ...) {
|
||||
va_list ap;
|
||||
va_list ap2;
|
||||
va_start(ap, fmt);
|
||||
va_copy(ap2, ap);
|
||||
int size = vsnprintf(NULL, 0, fmt, ap);
|
||||
GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
|
||||
std::vector<char> buf(size + 1);
|
||||
int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
|
||||
GGML_ASSERT(size2 == size);
|
||||
va_end(ap2);
|
||||
va_end(ap);
|
||||
return std::string(buf.data(), size);
|
||||
}
|
||||
|
||||
std::string string_strip(const std::string & str) {
|
||||
@@ -493,7 +496,7 @@ std::string string_from(const struct llama_context * ctx, const std::vector<llam
|
||||
first = false;
|
||||
}
|
||||
|
||||
auto detokenized = llama_token_to_piece(ctx, token);
|
||||
auto detokenized = common_token_to_piece(ctx, token);
|
||||
|
||||
detokenized.erase(
|
||||
std::remove_if(
|
||||
@@ -524,7 +527,7 @@ std::string string_from(const struct llama_context * ctx, const struct llama_bat
|
||||
first = false;
|
||||
}
|
||||
|
||||
auto detokenized = llama_token_to_piece(ctx, batch.token[i]);
|
||||
auto detokenized = common_token_to_piece(ctx, batch.token[i]);
|
||||
|
||||
detokenized.erase(
|
||||
std::remove_if(
|
||||
@@ -819,16 +822,16 @@ std::string fs_get_cache_file(const std::string & filename) {
|
||||
//
|
||||
// Model utils
|
||||
//
|
||||
struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
||||
llama_init_result iparams;
|
||||
auto mparams = llama_model_params_from_gpt_params(params);
|
||||
struct common_init_result common_init_from_params(common_params & params) {
|
||||
common_init_result iparams;
|
||||
auto mparams = common_model_params_to_llama(params);
|
||||
|
||||
llama_model * model = nullptr;
|
||||
|
||||
if (!params.hf_repo.empty() && !params.hf_file.empty()) {
|
||||
model = llama_load_model_from_hf(params.hf_repo.c_str(), params.hf_file.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams);
|
||||
model = common_load_model_from_hf(params.hf_repo.c_str(), params.hf_file.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams);
|
||||
} else if (!params.model_url.empty()) {
|
||||
model = llama_load_model_from_url(params.model_url.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams);
|
||||
model = common_load_model_from_url(params.model_url.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams);
|
||||
} else {
|
||||
model = llama_load_model_from_file(params.model.c_str(), mparams);
|
||||
}
|
||||
@@ -838,7 +841,32 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
||||
return iparams;
|
||||
}
|
||||
|
||||
auto cparams = llama_context_params_from_gpt_params(params);
|
||||
if (params.reranking) {
|
||||
bool ok = true;
|
||||
|
||||
if (llama_token_bos(model) == LLAMA_TOKEN_NULL) {
|
||||
LOG_WRN("%s: warning: model does not have a BOS token, reranking will not work\n", __func__);
|
||||
ok = false;
|
||||
}
|
||||
|
||||
if (llama_token_eos(model) == LLAMA_TOKEN_NULL) {
|
||||
LOG_WRN("%s: warning: model does not have an EOS token, reranking will not work\n", __func__);
|
||||
ok = false;
|
||||
}
|
||||
|
||||
if (llama_token_sep(model) == LLAMA_TOKEN_NULL) {
|
||||
LOG_WRN("%s: warning: model does not have a SEP token, reranking will not work\n", __func__);
|
||||
ok = false;
|
||||
}
|
||||
|
||||
if (!ok) {
|
||||
llama_free_model(model);
|
||||
|
||||
return iparams;
|
||||
}
|
||||
}
|
||||
|
||||
auto cparams = common_context_params_to_llama(params);
|
||||
|
||||
llama_context * lctx = llama_new_context_with_model(model, cparams);
|
||||
if (lctx == NULL) {
|
||||
@@ -851,10 +879,11 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
||||
if (params.control_vector_layer_start <= 0) params.control_vector_layer_start = 1;
|
||||
if (params.control_vector_layer_end <= 0) params.control_vector_layer_end = llama_n_layer(model);
|
||||
|
||||
const auto cvec = llama_control_vector_load(params.control_vectors);
|
||||
const auto cvec = common_control_vector_load(params.control_vectors);
|
||||
if (cvec.n_embd == -1) {
|
||||
llama_free(lctx);
|
||||
llama_free_model(model);
|
||||
|
||||
return iparams;
|
||||
}
|
||||
|
||||
@@ -867,13 +896,14 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
||||
if (err) {
|
||||
llama_free(lctx);
|
||||
llama_free_model(model);
|
||||
|
||||
return iparams;
|
||||
}
|
||||
}
|
||||
|
||||
// load and optionally apply lora adapters
|
||||
for (auto & la : params.lora_adapters) {
|
||||
llama_lora_adapter_container loaded_la;
|
||||
common_lora_adapter_container loaded_la;
|
||||
loaded_la.path = la.path;
|
||||
loaded_la.scale = la.scale;
|
||||
loaded_la.adapter = llama_lora_adapter_init(model, la.path.c_str());
|
||||
@@ -886,10 +916,10 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
||||
iparams.lora_adapters.push_back(loaded_la); // copy to list of loaded adapters
|
||||
}
|
||||
if (!params.lora_init_without_apply) {
|
||||
llama_lora_adapters_apply(lctx, iparams.lora_adapters);
|
||||
common_lora_adapters_apply(lctx, iparams.lora_adapters);
|
||||
}
|
||||
|
||||
if (params.sparams.ignore_eos && llama_token_eos(model) == -1) {
|
||||
if (params.sparams.ignore_eos && llama_token_eos(model) == LLAMA_TOKEN_NULL) {
|
||||
LOG_WRN("%s: warning: model does not have an EOS token, ignoring --ignore-eos\n", __func__);
|
||||
params.sparams.ignore_eos = false;
|
||||
}
|
||||
@@ -912,7 +942,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
||||
}
|
||||
|
||||
if (llama_model_has_encoder(model)) {
|
||||
llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size(), 0, 0));
|
||||
llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size()));
|
||||
llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
|
||||
if (decoder_start_token_id == -1) {
|
||||
decoder_start_token_id = bos;
|
||||
@@ -921,7 +951,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
||||
tmp.push_back(decoder_start_token_id);
|
||||
}
|
||||
if (llama_model_has_decoder(model)) {
|
||||
llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, 0));
|
||||
llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch)));
|
||||
}
|
||||
llama_kv_cache_clear(lctx);
|
||||
llama_synchronize(lctx);
|
||||
@@ -930,10 +960,11 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
||||
|
||||
iparams.model = model;
|
||||
iparams.context = lctx;
|
||||
|
||||
return iparams;
|
||||
}
|
||||
|
||||
void llama_lora_adapters_apply(struct llama_context * ctx, std::vector<llama_lora_adapter_container> & lora_adapters) {
|
||||
void common_lora_adapters_apply(struct llama_context * ctx, std::vector<common_lora_adapter_container> & lora_adapters) {
|
||||
llama_lora_adapter_clear(ctx);
|
||||
for (auto & la : lora_adapters) {
|
||||
if (la.scale != 0.0f) {
|
||||
@@ -942,7 +973,7 @@ void llama_lora_adapters_apply(struct llama_context * ctx, std::vector<llama_lor
|
||||
}
|
||||
}
|
||||
|
||||
struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & params) {
|
||||
struct llama_model_params common_model_params_to_llama(const common_params & params) {
|
||||
auto mparams = llama_model_default_params();
|
||||
|
||||
if (params.n_gpu_layers != -1) {
|
||||
@@ -991,10 +1022,10 @@ static ggml_type kv_cache_type_from_str(const std::string & s) {
|
||||
return GGML_TYPE_Q5_1;
|
||||
}
|
||||
|
||||
throw std::runtime_error("Invalid cache type: " + s);
|
||||
throw std::runtime_error("Unsupported cache type: " + s);
|
||||
}
|
||||
|
||||
struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params) {
|
||||
struct llama_context_params common_context_params_to_llama(const common_params & params) {
|
||||
auto cparams = llama_context_default_params();
|
||||
|
||||
cparams.n_ctx = params.n_ctx;
|
||||
@@ -1003,7 +1034,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
|
||||
cparams.n_ubatch = params.n_ubatch;
|
||||
cparams.n_threads = params.cpuparams.n_threads;
|
||||
cparams.n_threads_batch = params.cpuparams_batch.n_threads == -1 ?
|
||||
params.cpuparams.n_threads : params.cpuparams_batch.n_threads;
|
||||
params.cpuparams.n_threads : params.cpuparams_batch.n_threads;
|
||||
cparams.logits_all = params.logits_all;
|
||||
cparams.embeddings = params.embedding;
|
||||
cparams.rope_scaling_type = params.rope_scaling_type;
|
||||
@@ -1084,7 +1115,7 @@ static bool curl_perform_with_retry(const std::string& url, CURL* curl, int max_
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool llama_download_file(const std::string & url, const std::string & path, const std::string & hf_token) {
|
||||
static bool common_download_file(const std::string & url, const std::string & path, const std::string & hf_token) {
|
||||
|
||||
// Initialize libcurl
|
||||
std::unique_ptr<CURL, decltype(&curl_easy_cleanup)> curl(curl_easy_init(), &curl_easy_cleanup);
|
||||
@@ -1154,15 +1185,15 @@ static bool llama_download_file(const std::string & url, const std::string & pat
|
||||
}
|
||||
|
||||
// Send a HEAD request to retrieve the etag and last-modified headers
|
||||
struct llama_load_model_from_url_headers {
|
||||
struct common_load_model_from_url_headers {
|
||||
std::string etag;
|
||||
std::string last_modified;
|
||||
};
|
||||
llama_load_model_from_url_headers headers;
|
||||
common_load_model_from_url_headers headers;
|
||||
{
|
||||
typedef size_t(*CURLOPT_HEADERFUNCTION_PTR)(char *, size_t, size_t, void *);
|
||||
auto header_callback = [](char * buffer, size_t /*size*/, size_t n_items, void * userdata) -> size_t {
|
||||
llama_load_model_from_url_headers *headers = (llama_load_model_from_url_headers *) userdata;
|
||||
common_load_model_from_url_headers *headers = (common_load_model_from_url_headers *) userdata;
|
||||
|
||||
static std::regex header_regex("([^:]+): (.*)\r\n");
|
||||
static std::regex etag_regex("ETag", std::regex_constants::icase);
|
||||
@@ -1298,7 +1329,7 @@ static bool llama_download_file(const std::string & url, const std::string & pat
|
||||
return true;
|
||||
}
|
||||
|
||||
struct llama_model * llama_load_model_from_url(
|
||||
struct llama_model * common_load_model_from_url(
|
||||
const char * model_url,
|
||||
const char * path_model,
|
||||
const char * hf_token,
|
||||
@@ -1309,7 +1340,7 @@ struct llama_model * llama_load_model_from_url(
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!llama_download_file(model_url, path_model, hf_token)) {
|
||||
if (!common_download_file(model_url, path_model, hf_token)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -1362,7 +1393,7 @@ struct llama_model * llama_load_model_from_url(
|
||||
char split_url[LLAMA_CURL_MAX_URL_LENGTH] = {0};
|
||||
llama_split_path(split_url, sizeof(split_url), split_url_prefix, download_idx, n_split);
|
||||
|
||||
return llama_download_file(split_url, split_path, hf_token);
|
||||
return common_download_file(split_url, split_path, hf_token);
|
||||
}, idx));
|
||||
}
|
||||
|
||||
@@ -1377,7 +1408,7 @@ struct llama_model * llama_load_model_from_url(
|
||||
return llama_load_model_from_file(path_model, params);
|
||||
}
|
||||
|
||||
struct llama_model * llama_load_model_from_hf(
|
||||
struct llama_model * common_load_model_from_hf(
|
||||
const char * repo,
|
||||
const char * model,
|
||||
const char * path_model,
|
||||
@@ -1397,12 +1428,12 @@ struct llama_model * llama_load_model_from_hf(
|
||||
model_url += "/resolve/main/";
|
||||
model_url += model;
|
||||
|
||||
return llama_load_model_from_url(model_url.c_str(), path_model, hf_token, params);
|
||||
return common_load_model_from_url(model_url.c_str(), path_model, hf_token, params);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
struct llama_model * llama_load_model_from_url(
|
||||
struct llama_model * common_load_model_from_url(
|
||||
const char * /*model_url*/,
|
||||
const char * /*path_model*/,
|
||||
const char * /*hf_token*/,
|
||||
@@ -1411,7 +1442,7 @@ struct llama_model * llama_load_model_from_url(
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
struct llama_model * llama_load_model_from_hf(
|
||||
struct llama_model * common_load_model_from_hf(
|
||||
const char * /*repo*/,
|
||||
const char * /*model*/,
|
||||
const char * /*path_model*/,
|
||||
@@ -1427,11 +1458,11 @@ struct llama_model * llama_load_model_from_hf(
|
||||
// Batch utils
|
||||
//
|
||||
|
||||
void llama_batch_clear(struct llama_batch & batch) {
|
||||
void common_batch_clear(struct llama_batch & batch) {
|
||||
batch.n_tokens = 0;
|
||||
}
|
||||
|
||||
void llama_batch_add(
|
||||
void common_batch_add(
|
||||
struct llama_batch & batch,
|
||||
llama_token id,
|
||||
llama_pos pos,
|
||||
@@ -1454,15 +1485,15 @@ void llama_batch_add(
|
||||
// Vocab utils
|
||||
//
|
||||
|
||||
std::vector<llama_token> llama_tokenize(
|
||||
std::vector<llama_token> common_tokenize(
|
||||
const struct llama_context * ctx,
|
||||
const std::string & text,
|
||||
bool add_special,
|
||||
bool parse_special) {
|
||||
return llama_tokenize(llama_get_model(ctx), text, add_special, parse_special);
|
||||
return common_tokenize(llama_get_model(ctx), text, add_special, parse_special);
|
||||
}
|
||||
|
||||
std::vector<llama_token> llama_tokenize(
|
||||
std::vector<llama_token> common_tokenize(
|
||||
const struct llama_model * model,
|
||||
const std::string & text,
|
||||
bool add_special,
|
||||
@@ -1481,7 +1512,7 @@ std::vector<llama_token> llama_tokenize(
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) {
|
||||
std::string common_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) {
|
||||
std::string piece;
|
||||
piece.resize(piece.capacity()); // using string internal cache, 15 bytes + '\n'
|
||||
const int n_chars = llama_token_to_piece(llama_get_model(ctx), token, &piece[0], piece.size(), 0, special);
|
||||
@@ -1497,7 +1528,7 @@ std::string llama_token_to_piece(const struct llama_context * ctx, llama_token t
|
||||
return piece;
|
||||
}
|
||||
|
||||
std::string llama_detokenize(llama_context * ctx, const std::vector<llama_token> & tokens, bool special) {
|
||||
std::string common_detokenize(llama_context * ctx, const std::vector<llama_token> & tokens, bool special) {
|
||||
std::string text;
|
||||
text.resize(std::max(text.capacity(), tokens.size()));
|
||||
int32_t n_chars = llama_detokenize(llama_get_model(ctx), tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
|
||||
@@ -1517,15 +1548,15 @@ std::string llama_detokenize(llama_context * ctx, const std::vector<llama_token>
|
||||
// Chat template utils
|
||||
//
|
||||
|
||||
bool llama_chat_verify_template(const std::string & tmpl) {
|
||||
bool common_chat_verify_template(const std::string & tmpl) {
|
||||
llama_chat_message chat[] = {{"user", "test"}};
|
||||
int res = llama_chat_apply_template(nullptr, tmpl.c_str(), chat, 1, true, nullptr, 0);
|
||||
return res >= 0;
|
||||
}
|
||||
|
||||
std::string llama_chat_apply_template(const struct llama_model * model,
|
||||
std::string common_chat_apply_template(const struct llama_model * model,
|
||||
const std::string & tmpl,
|
||||
const std::vector<llama_chat_msg> & msgs,
|
||||
const std::vector<common_chat_msg> & msgs,
|
||||
bool add_ass) {
|
||||
int alloc_size = 0;
|
||||
bool fallback = false; // indicate if we must fallback to default chatml
|
||||
@@ -1567,42 +1598,42 @@ std::string llama_chat_apply_template(const struct llama_model * model,
|
||||
return formatted_chat;
|
||||
}
|
||||
|
||||
std::string llama_chat_format_single(const struct llama_model * model,
|
||||
std::string common_chat_format_single(const struct llama_model * model,
|
||||
const std::string & tmpl,
|
||||
const std::vector<llama_chat_msg> & past_msg,
|
||||
const llama_chat_msg & new_msg,
|
||||
const std::vector<common_chat_msg> & past_msg,
|
||||
const common_chat_msg & new_msg,
|
||||
bool add_ass) {
|
||||
std::ostringstream ss;
|
||||
auto fmt_past_msg = past_msg.empty() ? "" : llama_chat_apply_template(model, tmpl, past_msg, false);
|
||||
std::vector<llama_chat_msg> chat_new(past_msg);
|
||||
auto fmt_past_msg = past_msg.empty() ? "" : common_chat_apply_template(model, tmpl, past_msg, false);
|
||||
std::vector<common_chat_msg> chat_new(past_msg);
|
||||
// if the past_msg ends with a newline, we must preserve it in the formatted version
|
||||
if (add_ass && !fmt_past_msg.empty() && fmt_past_msg.back() == '\n') {
|
||||
ss << "\n";
|
||||
};
|
||||
// format chat with new_msg
|
||||
chat_new.push_back(new_msg);
|
||||
auto fmt_new_msg = llama_chat_apply_template(model, tmpl, chat_new, add_ass);
|
||||
auto fmt_new_msg = common_chat_apply_template(model, tmpl, chat_new, add_ass);
|
||||
// get the diff part
|
||||
ss << fmt_new_msg.substr(fmt_past_msg.size(), fmt_new_msg.size() - fmt_past_msg.size());
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string llama_chat_format_example(const struct llama_model * model,
|
||||
std::string common_chat_format_example(const struct llama_model * model,
|
||||
const std::string & tmpl) {
|
||||
std::vector<llama_chat_msg> msgs = {
|
||||
std::vector<common_chat_msg> msgs = {
|
||||
{"system", "You are a helpful assistant"},
|
||||
{"user", "Hello"},
|
||||
{"assistant", "Hi there"},
|
||||
{"user", "How are you?"},
|
||||
};
|
||||
return llama_chat_apply_template(model, tmpl, msgs, true);
|
||||
return common_chat_apply_template(model, tmpl, msgs, true);
|
||||
}
|
||||
|
||||
//
|
||||
// KV cache utils
|
||||
//
|
||||
|
||||
void llama_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size) {
|
||||
void common_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size) {
|
||||
static const char slot_chars[] = ".123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+";
|
||||
|
||||
printf("=== Dumping KV cache. total cells %d, max sequences per cell %d, populated cells %d, total tokens in cache %d, largest empty slot=%d @ %d",
|
||||
@@ -1625,7 +1656,7 @@ void llama_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size) {
|
||||
printf("\n=== Done dumping\n");
|
||||
}
|
||||
|
||||
void llama_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size) {
|
||||
void common_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size) {
|
||||
static const char slot_chars[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
|
||||
|
||||
printf("=== Dumping KV cache. total cells %d, max sequences per cell %d, populated cells %d, total tokens in cache %d, largest empty slot=%d @ %d\n",
|
||||
@@ -1677,7 +1708,7 @@ void llama_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_siz
|
||||
// Embedding utils
|
||||
//
|
||||
|
||||
void llama_embd_normalize(const float * inp, float * out, int n, int embd_norm) {
|
||||
void common_embd_normalize(const float * inp, float * out, int n, int embd_norm) {
|
||||
double sum = 0.0;
|
||||
|
||||
switch (embd_norm) {
|
||||
@@ -1711,7 +1742,7 @@ void llama_embd_normalize(const float * inp, float * out, int n, int embd_norm)
|
||||
}
|
||||
}
|
||||
|
||||
float llama_embd_similarity_cos(const float * embd1, const float * embd2, int n){
|
||||
float common_embd_similarity_cos(const float * embd1, const float * embd2, int n){
|
||||
double sum = 0.0;
|
||||
double sum1 = 0.0;
|
||||
double sum2 = 0.0;
|
||||
@@ -1737,8 +1768,8 @@ float llama_embd_similarity_cos(const float * embd1, const float * embd2, int n)
|
||||
// Control vector utils
|
||||
//
|
||||
|
||||
static llama_control_vector_data llama_control_vector_load_one(const llama_control_vector_load_info & load_info) {
|
||||
llama_control_vector_data result = { -1, {} };
|
||||
static common_control_vector_data common_control_vector_load_one(const common_control_vector_load_info & load_info) {
|
||||
common_control_vector_data result = { -1, {} };
|
||||
|
||||
ggml_context * ctx = nullptr;
|
||||
struct gguf_init_params meta_gguf_params = {
|
||||
@@ -1822,11 +1853,11 @@ static llama_control_vector_data llama_control_vector_load_one(const llama_contr
|
||||
return result;
|
||||
}
|
||||
|
||||
llama_control_vector_data llama_control_vector_load(const std::vector<llama_control_vector_load_info> & load_infos) {
|
||||
llama_control_vector_data result = { -1, {} };
|
||||
common_control_vector_data common_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos) {
|
||||
common_control_vector_data result = { -1, {} };
|
||||
|
||||
for (const auto & info : load_infos) {
|
||||
auto cur = llama_control_vector_load_one(info);
|
||||
auto cur = common_control_vector_load_one(info);
|
||||
|
||||
if (cur.n_embd == -1) {
|
||||
result.n_embd = -1;
|
||||
@@ -1918,7 +1949,7 @@ void yaml_dump_string_multiline(FILE * stream, const char * prop_name, const cha
|
||||
}
|
||||
}
|
||||
|
||||
void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const llama_context * lctx,
|
||||
void yaml_dump_non_result_info(FILE * stream, const common_params & params, const llama_context * lctx,
|
||||
const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc) {
|
||||
const auto & sparams = params.sparams;
|
||||
|
||||
@@ -2060,6 +2091,8 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
|
||||
fprintf(stream, "top_k: %d # default: 40\n", sparams.top_k);
|
||||
fprintf(stream, "top_p: %f # default: 0.95\n", sparams.top_p);
|
||||
fprintf(stream, "min_p: %f # default: 0.0\n", sparams.min_p);
|
||||
fprintf(stream, "xtc_probability: %f # default: 0.0\n", sparams.xtc_probability);
|
||||
fprintf(stream, "xtc_threshold: %f # default: 0.1\n", sparams.xtc_threshold);
|
||||
fprintf(stream, "typ_p: %f # default: 1.0\n", sparams.typ_p);
|
||||
fprintf(stream, "verbose_prompt: %s # default: false\n", params.verbose_prompt ? "true" : "false");
|
||||
fprintf(stream, "display_prompt: %s # default: true\n", params.display_prompt ? "true" : "false");
|
||||
|
||||
165
common/common.h
165
common/common.h
@@ -24,12 +24,12 @@
|
||||
|
||||
#define DEFAULT_MODEL_PATH "models/7B/ggml-model-f16.gguf"
|
||||
|
||||
struct llama_lora_adapter_info {
|
||||
struct common_lora_adapter_info {
|
||||
std::string path;
|
||||
float scale;
|
||||
};
|
||||
|
||||
struct llama_lora_adapter_container : llama_lora_adapter_info {
|
||||
struct common_lora_adapter_container : common_lora_adapter_info {
|
||||
struct llama_lora_adapter * adapter;
|
||||
};
|
||||
|
||||
@@ -39,7 +39,7 @@ extern char const * LLAMA_COMMIT;
|
||||
extern char const * LLAMA_COMPILER;
|
||||
extern char const * LLAMA_BUILD_TARGET;
|
||||
|
||||
struct llama_control_vector_load_info;
|
||||
struct common_control_vector_load_info;
|
||||
|
||||
//
|
||||
// CPU utils
|
||||
@@ -82,14 +82,16 @@ enum llama_example {
|
||||
LLAMA_EXAMPLE_COUNT,
|
||||
};
|
||||
|
||||
enum gpt_sampler_type {
|
||||
GPT_SAMPLER_TYPE_NONE = 0,
|
||||
GPT_SAMPLER_TYPE_TOP_K = 1,
|
||||
GPT_SAMPLER_TYPE_TOP_P = 2,
|
||||
GPT_SAMPLER_TYPE_MIN_P = 3,
|
||||
GPT_SAMPLER_TYPE_TFS_Z = 4,
|
||||
GPT_SAMPLER_TYPE_TYPICAL_P = 5,
|
||||
GPT_SAMPLER_TYPE_TEMPERATURE = 6,
|
||||
enum common_sampler_type {
|
||||
COMMON_SAMPLER_TYPE_NONE = 0,
|
||||
COMMON_SAMPLER_TYPE_TOP_K = 1,
|
||||
COMMON_SAMPLER_TYPE_TOP_P = 2,
|
||||
COMMON_SAMPLER_TYPE_MIN_P = 3,
|
||||
COMMON_SAMPLER_TYPE_TFS_Z = 4,
|
||||
COMMON_SAMPLER_TYPE_TYPICAL_P = 5,
|
||||
COMMON_SAMPLER_TYPE_TEMPERATURE = 6,
|
||||
COMMON_SAMPLER_TYPE_XTC = 7,
|
||||
COMMON_SAMPLER_TYPE_INFILL = 8,
|
||||
};
|
||||
|
||||
// dimensionality reduction methods, used by cvector-generator
|
||||
@@ -99,7 +101,7 @@ enum dimre_method {
|
||||
};
|
||||
|
||||
// sampler parameters
|
||||
struct gpt_sampler_params {
|
||||
struct common_sampler_params {
|
||||
uint32_t seed = LLAMA_DEFAULT_SEED; // the seed used to initialize llama_sampler
|
||||
|
||||
int32_t n_prev = 64; // number of previous tokens to remember
|
||||
@@ -108,6 +110,8 @@ struct gpt_sampler_params {
|
||||
int32_t top_k = 40; // <= 0 to use vocab size
|
||||
float top_p = 0.95f; // 1.0 = disabled
|
||||
float min_p = 0.05f; // 0.0 = disabled
|
||||
float xtc_probability = 0.00f; // 0.0 = disabled
|
||||
float xtc_threshold = 0.10f; // > 0.5 disables XTC
|
||||
float tfs_z = 1.00f; // 1.0 = disabled
|
||||
float typ_p = 1.00f; // typical_p, 1.0 = disabled
|
||||
float temp = 0.80f; // <= 0.0 to sample greedily, 0.0 to not output probabilities
|
||||
@@ -124,13 +128,15 @@ struct gpt_sampler_params {
|
||||
bool ignore_eos = false;
|
||||
bool no_perf = false; // disable performance metrics
|
||||
|
||||
std::vector<enum gpt_sampler_type> samplers = {
|
||||
GPT_SAMPLER_TYPE_TOP_K,
|
||||
GPT_SAMPLER_TYPE_TFS_Z,
|
||||
GPT_SAMPLER_TYPE_TYPICAL_P,
|
||||
GPT_SAMPLER_TYPE_TOP_P,
|
||||
GPT_SAMPLER_TYPE_MIN_P,
|
||||
GPT_SAMPLER_TYPE_TEMPERATURE
|
||||
|
||||
std::vector<enum common_sampler_type> samplers = {
|
||||
COMMON_SAMPLER_TYPE_TOP_K,
|
||||
COMMON_SAMPLER_TYPE_TFS_Z,
|
||||
COMMON_SAMPLER_TYPE_TYPICAL_P,
|
||||
COMMON_SAMPLER_TYPE_TOP_P,
|
||||
COMMON_SAMPLER_TYPE_MIN_P,
|
||||
COMMON_SAMPLER_TYPE_XTC,
|
||||
COMMON_SAMPLER_TYPE_TEMPERATURE,
|
||||
};
|
||||
|
||||
std::string grammar; // optional BNF-like grammar to constrain sampling
|
||||
@@ -141,7 +147,7 @@ struct gpt_sampler_params {
|
||||
std::string print() const;
|
||||
};
|
||||
|
||||
struct gpt_params {
|
||||
struct common_params {
|
||||
int32_t n_predict = -1; // new tokens to predict
|
||||
int32_t n_ctx = 0; // context size
|
||||
int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS)
|
||||
@@ -183,7 +189,7 @@ struct gpt_params {
|
||||
enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_UNSPECIFIED; // pooling type for embeddings
|
||||
enum llama_attention_type attention_type = LLAMA_ATTENTION_TYPE_UNSPECIFIED; // attention type for embeddings
|
||||
|
||||
struct gpt_sampler_params sparams;
|
||||
struct common_sampler_params sparams;
|
||||
|
||||
std::string model = ""; // model path // NOLINT
|
||||
std::string model_draft = ""; // draft model for speculative decoding // NOLINT
|
||||
@@ -208,9 +214,9 @@ struct gpt_params {
|
||||
std::vector<llama_model_kv_override> kv_overrides;
|
||||
|
||||
bool lora_init_without_apply = false; // only load lora to memory, but do not apply it to ctx (user can manually apply lora later using llama_lora_adapter_apply)
|
||||
std::vector<llama_lora_adapter_info> lora_adapters; // lora adapter path with user defined scale
|
||||
std::vector<common_lora_adapter_info> lora_adapters; // lora adapter path with user defined scale
|
||||
|
||||
std::vector<llama_control_vector_load_info> control_vectors; // control vector with user defined scale
|
||||
std::vector<common_control_vector_load_info> control_vectors; // control vector with user defined scale
|
||||
|
||||
int32_t verbosity = 0;
|
||||
int32_t control_vector_layer_start = -1; // layer range for control vector
|
||||
@@ -268,21 +274,21 @@ struct gpt_params {
|
||||
|
||||
// embedding
|
||||
bool embedding = false; // get only sentence embedding
|
||||
int32_t embd_normalize = 2; // normalisation for embendings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)
|
||||
int32_t embd_normalize = 2; // normalisation for embeddings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)
|
||||
std::string embd_out = ""; // empty = default, "array" = [[],[]...], "json" = openai style, "json+" = same "json" + cosine similarity matrix
|
||||
std::string embd_sep = "\n"; // separator of embendings
|
||||
std::string embd_sep = "\n"; // separator of embeddings
|
||||
bool reranking = false; // enable reranking support on server
|
||||
|
||||
// server params
|
||||
int32_t port = 8080; // server listens on this network port
|
||||
int32_t timeout_read = 600; // http read timeout in seconds
|
||||
int32_t timeout_write = timeout_read; // http write timeout in seconds
|
||||
int n_threads_http = -1; // number of threads to process HTTP requests (TODO: support threadpool)
|
||||
int32_t n_threads_http = -1; // number of threads to process HTTP requests (TODO: support threadpool)
|
||||
int32_t n_cache_reuse = 0; // min chunk size to reuse from the cache via KV shifting
|
||||
|
||||
std::string hostname = "127.0.0.1";
|
||||
std::string public_path = ""; // NOLINT
|
||||
std::string chat_template = ""; // NOLINT
|
||||
std::string system_prompt = ""; // NOLINT
|
||||
bool enable_chat_template = true;
|
||||
|
||||
std::vector<std::string> api_keys;
|
||||
@@ -290,7 +296,10 @@ struct gpt_params {
|
||||
std::string ssl_file_key = ""; // NOLINT
|
||||
std::string ssl_file_cert = ""; // NOLINT
|
||||
|
||||
bool endpoint_slots = true;
|
||||
// "advanced" endpoints are disabled by default for better security
|
||||
bool webui = true;
|
||||
bool endpoint_slots = false;
|
||||
bool endpoint_props = false; // only control POST requests, not GET
|
||||
bool endpoint_metrics = false;
|
||||
|
||||
bool log_json = false;
|
||||
@@ -345,20 +354,31 @@ struct gpt_params {
|
||||
|
||||
// call once at the start of a program if it uses libcommon
|
||||
// initializes the logging system and prints info about the build
|
||||
void gpt_init();
|
||||
void common_init();
|
||||
|
||||
std::string gpt_params_get_system_info(const gpt_params & params);
|
||||
std::string common_params_get_system_info(const common_params & params);
|
||||
|
||||
bool parse_cpu_range(const std::string& range, bool(&boolmask)[GGML_MAX_N_THREADS]);
|
||||
bool parse_cpu_mask(const std::string& mask, bool(&boolmask)[GGML_MAX_N_THREADS]);
|
||||
void postprocess_cpu_params(cpu_params& cpuparams, const cpu_params* role_model = nullptr);
|
||||
bool parse_cpu_range(const std::string & range, bool(&boolmask)[GGML_MAX_N_THREADS]);
|
||||
bool parse_cpu_mask(const std::string & mask, bool(&boolmask)[GGML_MAX_N_THREADS]);
|
||||
void postprocess_cpu_params(cpu_params & cpuparams, const cpu_params * role_model = nullptr);
|
||||
bool set_process_priority(enum ggml_sched_priority prio);
|
||||
|
||||
//
|
||||
// String utils
|
||||
//
|
||||
|
||||
std::vector<std::string> string_split(std::string input, char separator);
|
||||
#ifdef __GNUC__
|
||||
#ifdef __MINGW32__
|
||||
#define LLAMA_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
|
||||
#else
|
||||
#define LLAMA_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
|
||||
#endif
|
||||
#else
|
||||
#define LLAMA_COMMON_ATTRIBUTE_FORMAT(...)
|
||||
#endif
|
||||
|
||||
LLAMA_COMMON_ATTRIBUTE_FORMAT(1, 2)
|
||||
std::string string_format(const char * fmt, ...);
|
||||
|
||||
std::string string_strip(const std::string & str);
|
||||
std::string string_get_sortable_timestamp();
|
||||
@@ -367,6 +387,7 @@ void string_replace_all(std::string & s, const std::string & search, const std::
|
||||
|
||||
template<class T>
|
||||
static std::vector<T> string_split(const std::string & str, char delim) {
|
||||
static_assert(!std::is_same<T, std::string>::value, "Please use the specialized version for std::string");
|
||||
std::vector<T> values;
|
||||
std::istringstream str_stream(str);
|
||||
std::string token;
|
||||
@@ -379,6 +400,22 @@ static std::vector<T> string_split(const std::string & str, char delim) {
|
||||
return values;
|
||||
}
|
||||
|
||||
template<>
|
||||
std::vector<std::string> string_split<std::string>(const std::string & input, char separator)
|
||||
{
|
||||
std::vector<std::string> parts;
|
||||
size_t begin_pos = 0;
|
||||
size_t separator_pos = input.find(separator);
|
||||
while (separator_pos != std::string::npos) {
|
||||
std::string part = input.substr(begin_pos, separator_pos - begin_pos);
|
||||
parts.emplace_back(part);
|
||||
begin_pos = separator_pos + 1;
|
||||
separator_pos = input.find(separator, begin_pos);
|
||||
}
|
||||
parts.emplace_back(input.substr(begin_pos, separator_pos - begin_pos));
|
||||
return parts;
|
||||
}
|
||||
|
||||
bool string_parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides);
|
||||
void string_process_escapes(std::string & input);
|
||||
|
||||
@@ -401,29 +438,29 @@ std::string fs_get_cache_file(const std::string & filename);
|
||||
// Model utils
|
||||
//
|
||||
|
||||
struct llama_init_result {
|
||||
struct common_init_result {
|
||||
struct llama_model * model = nullptr;
|
||||
struct llama_context * context = nullptr;
|
||||
std::vector<llama_lora_adapter_container> lora_adapters;
|
||||
std::vector<common_lora_adapter_container> lora_adapters;
|
||||
};
|
||||
|
||||
struct llama_init_result llama_init_from_gpt_params(gpt_params & params);
|
||||
struct common_init_result common_init_from_params(common_params & params);
|
||||
|
||||
struct llama_model_params llama_model_params_from_gpt_params (const gpt_params & params);
|
||||
struct llama_context_params llama_context_params_from_gpt_params (const gpt_params & params);
|
||||
struct llama_model_params common_model_params_to_llama (const common_params & params);
|
||||
struct llama_context_params common_context_params_to_llama(const common_params & params);
|
||||
struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_params & params);
|
||||
|
||||
struct llama_model * llama_load_model_from_url(const char * model_url, const char * path_model, const char * hf_token, const struct llama_model_params & params);
|
||||
struct llama_model * llama_load_model_from_hf(const char * repo, const char * file, const char * path_model, const char * hf_token, const struct llama_model_params & params);
|
||||
struct llama_model * common_load_model_from_url(const char * model_url, const char * path_model, const char * hf_token, const struct llama_model_params & params);
|
||||
struct llama_model * common_load_model_from_hf(const char * repo, const char * file, const char * path_model, const char * hf_token, const struct llama_model_params & params);
|
||||
|
||||
// clear LoRA adapters from context, then apply new list of adapters
|
||||
void llama_lora_adapters_apply(struct llama_context * ctx, std::vector<llama_lora_adapter_container> & lora_adapters);
|
||||
void common_lora_adapters_apply(struct llama_context * ctx, std::vector<common_lora_adapter_container> & lora_adapters);
|
||||
|
||||
// Batch utils
|
||||
|
||||
void llama_batch_clear(struct llama_batch & batch);
|
||||
void common_batch_clear(struct llama_batch & batch);
|
||||
|
||||
void llama_batch_add(
|
||||
void common_batch_add(
|
||||
struct llama_batch & batch,
|
||||
llama_token id,
|
||||
llama_pos pos,
|
||||
@@ -436,13 +473,13 @@ void llama_batch_add(
|
||||
|
||||
// tokenizes a string into a vector of tokens
|
||||
// should work similar to Python's `tokenizer.encode`
|
||||
std::vector<llama_token> llama_tokenize(
|
||||
std::vector<llama_token> common_tokenize(
|
||||
const struct llama_context * ctx,
|
||||
const std::string & text,
|
||||
bool add_special,
|
||||
bool parse_special = false);
|
||||
|
||||
std::vector<llama_token> llama_tokenize(
|
||||
std::vector<llama_token> common_tokenize(
|
||||
const struct llama_model * model,
|
||||
const std::string & text,
|
||||
bool add_special,
|
||||
@@ -450,7 +487,7 @@ std::vector<llama_token> llama_tokenize(
|
||||
|
||||
// tokenizes a token into a piece, optionally renders special/control tokens
|
||||
// should work similar to Python's `tokenizer.id_to_piece`
|
||||
std::string llama_token_to_piece(
|
||||
std::string common_token_to_piece(
|
||||
const struct llama_context * ctx,
|
||||
llama_token token,
|
||||
bool special = true);
|
||||
@@ -458,7 +495,7 @@ std::string llama_token_to_piece(
|
||||
// detokenizes a vector of tokens into a string
|
||||
// should work similar to Python's `tokenizer.decode`
|
||||
// optionally renders special/control tokens
|
||||
std::string llama_detokenize(
|
||||
std::string common_detokenize(
|
||||
llama_context * ctx,
|
||||
const std::vector<llama_token> & tokens,
|
||||
bool special = true);
|
||||
@@ -468,31 +505,31 @@ std::string llama_detokenize(
|
||||
//
|
||||
|
||||
// same with llama_chat_message, but uses std::string
|
||||
struct llama_chat_msg {
|
||||
struct common_chat_msg {
|
||||
std::string role;
|
||||
std::string content;
|
||||
};
|
||||
|
||||
// Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid
|
||||
bool llama_chat_verify_template(const std::string & tmpl);
|
||||
bool common_chat_verify_template(const std::string & tmpl);
|
||||
|
||||
// CPP wrapper for llama_chat_apply_template
|
||||
// If the built-in template is not supported, we default to chatml
|
||||
// If the custom "tmpl" is not supported, we throw an error
|
||||
std::string llama_chat_apply_template(const struct llama_model * model,
|
||||
std::string common_chat_apply_template(const struct llama_model * model,
|
||||
const std::string & tmpl,
|
||||
const std::vector<llama_chat_msg> & chat,
|
||||
const std::vector<common_chat_msg> & chat,
|
||||
bool add_ass);
|
||||
|
||||
// Format single message, while taking into account the position of that message in chat history
|
||||
std::string llama_chat_format_single(const struct llama_model * model,
|
||||
std::string common_chat_format_single(const struct llama_model * model,
|
||||
const std::string & tmpl,
|
||||
const std::vector<llama_chat_msg> & past_msg,
|
||||
const llama_chat_msg & new_msg,
|
||||
const std::vector<common_chat_msg> & past_msg,
|
||||
const common_chat_msg & new_msg,
|
||||
bool add_ass);
|
||||
|
||||
// Returns an example of formatted chat
|
||||
std::string llama_chat_format_example(const struct llama_model * model,
|
||||
std::string common_chat_format_example(const struct llama_model * model,
|
||||
const std::string & tmpl);
|
||||
|
||||
//
|
||||
@@ -500,31 +537,31 @@ std::string llama_chat_format_example(const struct llama_model * model,
|
||||
//
|
||||
|
||||
// Dump the KV cache view with the number of sequences per cell.
|
||||
void llama_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size = 80);
|
||||
void common_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size = 80);
|
||||
|
||||
// Dump the KV cache view showing individual sequences in each cell (long output).
|
||||
void llama_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size = 40);
|
||||
void common_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size = 40);
|
||||
|
||||
//
|
||||
// Embedding utils
|
||||
//
|
||||
|
||||
void llama_embd_normalize(const float * inp, float * out, int n, int embd_norm = 2);
|
||||
void common_embd_normalize(const float * inp, float * out, int n, int embd_norm = 2);
|
||||
|
||||
float llama_embd_similarity_cos(const float * embd1, const float * embd2, int n);
|
||||
float common_embd_similarity_cos(const float * embd1, const float * embd2, int n);
|
||||
|
||||
//
|
||||
// Control vector utils
|
||||
//
|
||||
|
||||
struct llama_control_vector_data {
|
||||
struct common_control_vector_data {
|
||||
int n_embd;
|
||||
|
||||
// stores data for layers [1, n_layer] where n_layer = data.size() / n_embd
|
||||
std::vector<float> data;
|
||||
};
|
||||
|
||||
struct llama_control_vector_load_info {
|
||||
struct common_control_vector_load_info {
|
||||
float strength;
|
||||
|
||||
std::string fname;
|
||||
@@ -532,7 +569,7 @@ struct llama_control_vector_load_info {
|
||||
|
||||
// Load control vectors, scale each by strength, and add them together.
|
||||
// On error, returns {-1, empty}
|
||||
llama_control_vector_data llama_control_vector_load(const std::vector<llama_control_vector_load_info> & load_infos);
|
||||
common_control_vector_data common_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos);
|
||||
|
||||
//
|
||||
// Split utils
|
||||
@@ -551,5 +588,5 @@ void yaml_dump_vector_int (FILE * stream, const char * prop_name, const std
|
||||
void yaml_dump_string_multiline(FILE * stream, const char * prop_name, const char * data);
|
||||
|
||||
void yaml_dump_non_result_info(
|
||||
FILE * stream, const gpt_params & params, const llama_context * lctx,
|
||||
FILE * stream, const common_params & params, const llama_context * lctx,
|
||||
const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc);
|
||||
|
||||
@@ -611,7 +611,7 @@ private:
|
||||
}
|
||||
return join_seq();
|
||||
};
|
||||
return _add_rule(name, "\"\\\"\" " + to_rule(transform()) + " \"\\\"\" space");
|
||||
return _add_rule(name, "\"\\\"\" (" + to_rule(transform()) + ") \"\\\"\" space");
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
100
common/log.cpp
100
common/log.cpp
@@ -8,10 +8,10 @@
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
int gpt_log_verbosity_thold = LOG_DEFAULT_LLAMA;
|
||||
int common_log_verbosity_thold = LOG_DEFAULT_LLAMA;
|
||||
|
||||
void gpt_log_set_verbosity_thold(int verbosity) {
|
||||
gpt_log_verbosity_thold = verbosity;
|
||||
void common_log_set_verbosity_thold(int verbosity) {
|
||||
common_log_verbosity_thold = verbosity;
|
||||
}
|
||||
|
||||
#define LOG_COL_DEFAULT "\033[0m"
|
||||
@@ -29,16 +29,16 @@ static int64_t t_us() {
|
||||
}
|
||||
|
||||
// colors
|
||||
enum gpt_log_col : int {
|
||||
GPT_LOG_COL_DEFAULT = 0,
|
||||
GPT_LOG_COL_BOLD,
|
||||
GPT_LOG_COL_RED,
|
||||
GPT_LOG_COL_GREEN,
|
||||
GPT_LOG_COL_YELLOW,
|
||||
GPT_LOG_COL_BLUE,
|
||||
GPT_LOG_COL_MAGENTA,
|
||||
GPT_LOG_COL_CYAN,
|
||||
GPT_LOG_COL_WHITE,
|
||||
enum common_log_col : int {
|
||||
COMMON_LOG_COL_DEFAULT = 0,
|
||||
COMMON_LOG_COL_BOLD,
|
||||
COMMON_LOG_COL_RED,
|
||||
COMMON_LOG_COL_GREEN,
|
||||
COMMON_LOG_COL_YELLOW,
|
||||
COMMON_LOG_COL_BLUE,
|
||||
COMMON_LOG_COL_MAGENTA,
|
||||
COMMON_LOG_COL_CYAN,
|
||||
COMMON_LOG_COL_WHITE,
|
||||
};
|
||||
|
||||
// disable colors by default
|
||||
@@ -54,7 +54,7 @@ static std::vector<const char *> g_col = {
|
||||
"",
|
||||
};
|
||||
|
||||
struct gpt_log_entry {
|
||||
struct common_log_entry {
|
||||
enum ggml_log_level level;
|
||||
|
||||
bool prefix;
|
||||
@@ -71,7 +71,7 @@ struct gpt_log_entry {
|
||||
if (!fcur) {
|
||||
// stderr displays DBG messages only when their verbosity level is not higher than the threshold
|
||||
// these messages will still be logged to a file
|
||||
if (level == GGML_LOG_LEVEL_DEBUG && gpt_log_verbosity_thold < LOG_DEFAULT_DEBUG) {
|
||||
if (level == GGML_LOG_LEVEL_DEBUG && common_log_verbosity_thold < LOG_DEFAULT_DEBUG) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -86,19 +86,19 @@ struct gpt_log_entry {
|
||||
if (timestamp) {
|
||||
// [M.s.ms.us]
|
||||
fprintf(fcur, "%s%d.%02d.%03d.%03d%s ",
|
||||
g_col[GPT_LOG_COL_BLUE],
|
||||
g_col[COMMON_LOG_COL_BLUE],
|
||||
(int) (timestamp / 1000000 / 60),
|
||||
(int) (timestamp / 1000000 % 60),
|
||||
(int) (timestamp / 1000 % 1000),
|
||||
(int) (timestamp % 1000),
|
||||
g_col[GPT_LOG_COL_DEFAULT]);
|
||||
g_col[COMMON_LOG_COL_DEFAULT]);
|
||||
}
|
||||
|
||||
switch (level) {
|
||||
case GGML_LOG_LEVEL_INFO: fprintf(fcur, "%sI %s", g_col[GPT_LOG_COL_GREEN], g_col[GPT_LOG_COL_DEFAULT]); break;
|
||||
case GGML_LOG_LEVEL_WARN: fprintf(fcur, "%sW %s", g_col[GPT_LOG_COL_MAGENTA], "" ); break;
|
||||
case GGML_LOG_LEVEL_ERROR: fprintf(fcur, "%sE %s", g_col[GPT_LOG_COL_RED], "" ); break;
|
||||
case GGML_LOG_LEVEL_DEBUG: fprintf(fcur, "%sD %s", g_col[GPT_LOG_COL_YELLOW], "" ); break;
|
||||
case GGML_LOG_LEVEL_INFO: fprintf(fcur, "%sI %s", g_col[COMMON_LOG_COL_GREEN], g_col[COMMON_LOG_COL_DEFAULT]); break;
|
||||
case GGML_LOG_LEVEL_WARN: fprintf(fcur, "%sW %s", g_col[COMMON_LOG_COL_MAGENTA], "" ); break;
|
||||
case GGML_LOG_LEVEL_ERROR: fprintf(fcur, "%sE %s", g_col[COMMON_LOG_COL_RED], "" ); break;
|
||||
case GGML_LOG_LEVEL_DEBUG: fprintf(fcur, "%sD %s", g_col[COMMON_LOG_COL_YELLOW], "" ); break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -107,18 +107,18 @@ struct gpt_log_entry {
|
||||
fprintf(fcur, "%s", msg.data());
|
||||
|
||||
if (level == GGML_LOG_LEVEL_WARN || level == GGML_LOG_LEVEL_ERROR || level == GGML_LOG_LEVEL_DEBUG) {
|
||||
fprintf(fcur, "%s", g_col[GPT_LOG_COL_DEFAULT]);
|
||||
fprintf(fcur, "%s", g_col[COMMON_LOG_COL_DEFAULT]);
|
||||
}
|
||||
|
||||
fflush(fcur);
|
||||
}
|
||||
};
|
||||
|
||||
struct gpt_log {
|
||||
struct common_log {
|
||||
// default capacity - will be expanded if needed
|
||||
gpt_log() : gpt_log(256) {}
|
||||
common_log() : common_log(256) {}
|
||||
|
||||
gpt_log(size_t capacity) {
|
||||
common_log(size_t capacity) {
|
||||
file = nullptr;
|
||||
prefix = false;
|
||||
timestamps = false;
|
||||
@@ -137,7 +137,7 @@ struct gpt_log {
|
||||
resume();
|
||||
}
|
||||
|
||||
~gpt_log() {
|
||||
~common_log() {
|
||||
pause();
|
||||
if (file) {
|
||||
fclose(file);
|
||||
@@ -158,12 +158,12 @@ private:
|
||||
int64_t t_start;
|
||||
|
||||
// ring buffer of entries
|
||||
std::vector<gpt_log_entry> entries;
|
||||
std::vector<common_log_entry> entries;
|
||||
size_t head;
|
||||
size_t tail;
|
||||
|
||||
// worker thread copies into this
|
||||
gpt_log_entry cur;
|
||||
common_log_entry cur;
|
||||
|
||||
public:
|
||||
void add(enum ggml_log_level level, const char * fmt, va_list args) {
|
||||
@@ -219,7 +219,7 @@ public:
|
||||
tail = (tail + 1) % entries.size();
|
||||
if (tail == head) {
|
||||
// expand the buffer
|
||||
std::vector<gpt_log_entry> new_entries(2*entries.size());
|
||||
std::vector<common_log_entry> new_entries(2*entries.size());
|
||||
|
||||
size_t new_tail = 0;
|
||||
|
||||
@@ -320,15 +320,15 @@ public:
|
||||
pause();
|
||||
|
||||
if (colors) {
|
||||
g_col[GPT_LOG_COL_DEFAULT] = LOG_COL_DEFAULT;
|
||||
g_col[GPT_LOG_COL_BOLD] = LOG_COL_BOLD;
|
||||
g_col[GPT_LOG_COL_RED] = LOG_COL_RED;
|
||||
g_col[GPT_LOG_COL_GREEN] = LOG_COL_GREEN;
|
||||
g_col[GPT_LOG_COL_YELLOW] = LOG_COL_YELLOW;
|
||||
g_col[GPT_LOG_COL_BLUE] = LOG_COL_BLUE;
|
||||
g_col[GPT_LOG_COL_MAGENTA] = LOG_COL_MAGENTA;
|
||||
g_col[GPT_LOG_COL_CYAN] = LOG_COL_CYAN;
|
||||
g_col[GPT_LOG_COL_WHITE] = LOG_COL_WHITE;
|
||||
g_col[COMMON_LOG_COL_DEFAULT] = LOG_COL_DEFAULT;
|
||||
g_col[COMMON_LOG_COL_BOLD] = LOG_COL_BOLD;
|
||||
g_col[COMMON_LOG_COL_RED] = LOG_COL_RED;
|
||||
g_col[COMMON_LOG_COL_GREEN] = LOG_COL_GREEN;
|
||||
g_col[COMMON_LOG_COL_YELLOW] = LOG_COL_YELLOW;
|
||||
g_col[COMMON_LOG_COL_BLUE] = LOG_COL_BLUE;
|
||||
g_col[COMMON_LOG_COL_MAGENTA] = LOG_COL_MAGENTA;
|
||||
g_col[COMMON_LOG_COL_CYAN] = LOG_COL_CYAN;
|
||||
g_col[COMMON_LOG_COL_WHITE] = LOG_COL_WHITE;
|
||||
} else {
|
||||
for (size_t i = 0; i < g_col.size(); i++) {
|
||||
g_col[i] = "";
|
||||
@@ -355,47 +355,47 @@ public:
|
||||
// public API
|
||||
//
|
||||
|
||||
struct gpt_log * gpt_log_init() {
|
||||
return new gpt_log;
|
||||
struct common_log * common_log_init() {
|
||||
return new common_log;
|
||||
}
|
||||
|
||||
struct gpt_log * gpt_log_main() {
|
||||
static struct gpt_log log;
|
||||
struct common_log * common_log_main() {
|
||||
static struct common_log log;
|
||||
|
||||
return &log;
|
||||
}
|
||||
|
||||
void gpt_log_pause(struct gpt_log * log) {
|
||||
void common_log_pause(struct common_log * log) {
|
||||
log->pause();
|
||||
}
|
||||
|
||||
void gpt_log_resume(struct gpt_log * log) {
|
||||
void common_log_resume(struct common_log * log) {
|
||||
log->resume();
|
||||
}
|
||||
|
||||
void gpt_log_free(struct gpt_log * log) {
|
||||
void common_log_free(struct common_log * log) {
|
||||
delete log;
|
||||
}
|
||||
|
||||
void gpt_log_add(struct gpt_log * log, enum ggml_log_level level, const char * fmt, ...) {
|
||||
void common_log_add(struct common_log * log, enum ggml_log_level level, const char * fmt, ...) {
|
||||
va_list args;
|
||||
va_start(args, fmt);
|
||||
log->add(level, fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
void gpt_log_set_file(struct gpt_log * log, const char * file) {
|
||||
void common_log_set_file(struct common_log * log, const char * file) {
|
||||
log->set_file(file);
|
||||
}
|
||||
|
||||
void gpt_log_set_colors(struct gpt_log * log, bool colors) {
|
||||
void common_log_set_colors(struct common_log * log, bool colors) {
|
||||
log->set_colors(colors);
|
||||
}
|
||||
|
||||
void gpt_log_set_prefix(struct gpt_log * log, bool prefix) {
|
||||
void common_log_set_prefix(struct common_log * log, bool prefix) {
|
||||
log->set_prefix(prefix);
|
||||
}
|
||||
|
||||
void gpt_log_set_timestamps(struct gpt_log * log, bool timestamps) {
|
||||
void common_log_set_timestamps(struct common_log * log, bool timestamps) {
|
||||
log->set_timestamps(timestamps);
|
||||
}
|
||||
|
||||
36
common/log.h
36
common/log.h
@@ -14,23 +14,23 @@
|
||||
#define LOG_DEFAULT_LLAMA 0
|
||||
|
||||
// needed by the LOG_TMPL macro to avoid computing log arguments if the verbosity lower
|
||||
// set via gpt_log_set_verbosity()
|
||||
extern int gpt_log_verbosity_thold;
|
||||
// set via common_log_set_verbosity()
|
||||
extern int common_log_verbosity_thold;
|
||||
|
||||
void gpt_log_set_verbosity_thold(int verbosity); // not thread-safe
|
||||
void common_log_set_verbosity_thold(int verbosity); // not thread-safe
|
||||
|
||||
// the gpt_log uses an internal worker thread to print/write log messages
|
||||
// the common_log uses an internal worker thread to print/write log messages
|
||||
// when the worker thread is paused, incoming log messages are discarded
|
||||
struct gpt_log;
|
||||
struct common_log;
|
||||
|
||||
struct gpt_log * gpt_log_init();
|
||||
struct gpt_log * gpt_log_main(); // singleton, automatically destroys itself on exit
|
||||
void gpt_log_pause (struct gpt_log * log); // pause the worker thread, not thread-safe
|
||||
void gpt_log_resume(struct gpt_log * log); // resume the worker thread, not thread-safe
|
||||
void gpt_log_free (struct gpt_log * log);
|
||||
struct common_log * common_log_init();
|
||||
struct common_log * common_log_main(); // singleton, automatically destroys itself on exit
|
||||
void common_log_pause (struct common_log * log); // pause the worker thread, not thread-safe
|
||||
void common_log_resume(struct common_log * log); // resume the worker thread, not thread-safe
|
||||
void common_log_free (struct common_log * log);
|
||||
|
||||
LOG_ATTRIBUTE_FORMAT(3, 4)
|
||||
void gpt_log_add(struct gpt_log * log, enum ggml_log_level level, const char * fmt, ...);
|
||||
void common_log_add(struct common_log * log, enum ggml_log_level level, const char * fmt, ...);
|
||||
|
||||
// defaults: file = NULL, colors = false, prefix = false, timestamps = false
|
||||
//
|
||||
@@ -54,10 +54,10 @@ void gpt_log_add(struct gpt_log * log, enum ggml_log_level level, const char * f
|
||||
// D - debug (stderr, V = LOG_DEFAULT_DEBUG)
|
||||
//
|
||||
|
||||
void gpt_log_set_file (struct gpt_log * log, const char * file); // not thread-safe
|
||||
void gpt_log_set_colors (struct gpt_log * log, bool colors); // not thread-safe
|
||||
void gpt_log_set_prefix (struct gpt_log * log, bool prefix); // whether to output prefix to each log
|
||||
void gpt_log_set_timestamps(struct gpt_log * log, bool timestamps); // whether to output timestamps in the prefix
|
||||
void common_log_set_file (struct common_log * log, const char * file); // not thread-safe
|
||||
void common_log_set_colors (struct common_log * log, bool colors); // not thread-safe
|
||||
void common_log_set_prefix (struct common_log * log, bool prefix); // whether to output prefix to each log
|
||||
void common_log_set_timestamps(struct common_log * log, bool timestamps); // whether to output timestamps in the prefix
|
||||
|
||||
// helper macros for logging
|
||||
// use these to avoid computing log arguments if the verbosity of the log is higher than the threshold
|
||||
@@ -66,13 +66,13 @@ void gpt_log_set_timestamps(struct gpt_log * log, bool timestamps); // w
|
||||
//
|
||||
// LOG_DBG("this is a debug message: %d\n", expensive_function());
|
||||
//
|
||||
// this will avoid calling expensive_function() if LOG_DEFAULT_DEBUG > gpt_log_verbosity_thold
|
||||
// this will avoid calling expensive_function() if LOG_DEFAULT_DEBUG > common_log_verbosity_thold
|
||||
//
|
||||
|
||||
#define LOG_TMPL(level, verbosity, ...) \
|
||||
do { \
|
||||
if ((verbosity) <= gpt_log_verbosity_thold) { \
|
||||
gpt_log_add(gpt_log_main(), (level), __VA_ARGS__); \
|
||||
if ((verbosity) <= common_log_verbosity_thold) { \
|
||||
common_log_add(common_log_main(), (level), __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
#include <fstream>
|
||||
#include <thread>
|
||||
|
||||
void llama_ngram_cache_update(llama_ngram_cache & ngram_cache, int ngram_min, int ngram_max,
|
||||
void common_ngram_cache_update(common_ngram_cache & ngram_cache, int ngram_min, int ngram_max,
|
||||
std::vector<llama_token> & inp, int nnew, bool print_progress) {
|
||||
const int64_t t_start_ms = ggml_time_ms();
|
||||
const int64_t inp_size = inp.size();
|
||||
@@ -20,16 +20,16 @@ void llama_ngram_cache_update(llama_ngram_cache & ngram_cache, int ngram_min, in
|
||||
const int64_t i_start = std::max(inp_size - nnew, ngram_size);
|
||||
for (int64_t i = i_start; i < inp_size; ++i) {
|
||||
const int64_t ngram_start = i - ngram_size;
|
||||
llama_ngram ngram(&inp[ngram_start], ngram_size);
|
||||
common_ngram ngram(&inp[ngram_start], ngram_size);
|
||||
const llama_token token = inp[i];
|
||||
|
||||
llama_ngram_cache::iterator part_it = ngram_cache.find(ngram);
|
||||
common_ngram_cache::iterator part_it = ngram_cache.find(ngram);
|
||||
if (part_it == ngram_cache.end()) {
|
||||
llama_ngram_cache_part part;
|
||||
common_ngram_cache_part part;
|
||||
part.emplace(token, 1);
|
||||
ngram_cache.emplace(ngram, part);
|
||||
} else {
|
||||
llama_ngram_cache_part::iterator token_count_it = part_it->second.find(token);
|
||||
common_ngram_cache_part::iterator token_count_it = part_it->second.find(token);
|
||||
if (token_count_it == part_it->second.end()) {
|
||||
part_it->second.emplace(token, 1);
|
||||
} else {
|
||||
@@ -62,12 +62,12 @@ constexpr int draft_min_sample_size_strict[LLAMA_NGRAM_MAX] = { 4, 3, 2, 2};
|
||||
constexpr int draft_min_percent_strict[LLAMA_NGRAM_MAX] = {75, 66, 66, 66};
|
||||
|
||||
// Helper function that tries to draft a token from only the static ngram cache:
|
||||
static llama_token try_draft(llama_ngram_cache & nc_static, const llama_ngram ngram_static) {
|
||||
llama_ngram_cache::iterator part_static_it = nc_static.find(ngram_static);
|
||||
static llama_token try_draft(common_ngram_cache & nc_static, const common_ngram ngram_static) {
|
||||
common_ngram_cache::iterator part_static_it = nc_static.find(ngram_static);
|
||||
if (part_static_it == nc_static.end()) {
|
||||
return -1;
|
||||
}
|
||||
const llama_ngram_cache_part part_static = part_static_it->second;
|
||||
const common_ngram_cache_part part_static = part_static_it->second;
|
||||
|
||||
int max_count_static = 0;
|
||||
int sum_count_static = 0;
|
||||
@@ -95,19 +95,19 @@ static llama_token try_draft(llama_ngram_cache & nc_static, const llama_ngram ng
|
||||
|
||||
// Try to draft a token from primary cache (context/dynamic), validate with static cache:
|
||||
static llama_token try_draft(
|
||||
llama_ngram_cache & nc_primary, const std::vector<llama_ngram> & ngrams_primary, llama_ngram_cache_part & part_static,
|
||||
common_ngram_cache & nc_primary, const std::vector<common_ngram> & ngrams_primary, common_ngram_cache_part & part_static,
|
||||
const int * min_sample_size, const int * min_percent) {
|
||||
|
||||
llama_token drafted_token = -1;
|
||||
|
||||
for (int i = ngrams_primary.size()-1; i >= 0 && drafted_token == -1; --i) {
|
||||
const llama_ngram ngram_primary = ngrams_primary[i];
|
||||
const common_ngram ngram_primary = ngrams_primary[i];
|
||||
|
||||
llama_ngram_cache::iterator part_primary_it = nc_primary.find(ngram_primary);
|
||||
common_ngram_cache::iterator part_primary_it = nc_primary.find(ngram_primary);
|
||||
if (part_primary_it == nc_primary.end()) {
|
||||
continue;
|
||||
}
|
||||
const llama_ngram_cache_part part_primary = part_primary_it->second;
|
||||
const common_ngram_cache_part part_primary = part_primary_it->second;
|
||||
|
||||
int max_count_primary = 0;
|
||||
int max_count_static = 0;
|
||||
@@ -117,7 +117,7 @@ static llama_token try_draft(
|
||||
for (std::pair<llama_token, int> token_count_primary : part_primary) {
|
||||
const llama_token token = token_count_primary.first;
|
||||
|
||||
llama_ngram_cache_part::iterator token_count_static_it = part_static.find(token);
|
||||
common_ngram_cache_part::iterator token_count_static_it = part_static.find(token);
|
||||
|
||||
const int32_t count_primary = token_count_primary.second;
|
||||
const int32_t count_static = token_count_static_it != part_static.end() ? 100*token_count_static_it->second : 1;
|
||||
@@ -142,9 +142,9 @@ static llama_token try_draft(
|
||||
return drafted_token;
|
||||
}
|
||||
|
||||
void llama_ngram_cache_draft(
|
||||
void common_ngram_cache_draft(
|
||||
std::vector<llama_token> & inp, std::vector<llama_token> & draft, int n_draft, int ngram_min, int ngram_max,
|
||||
llama_ngram_cache & nc_context, llama_ngram_cache & nc_dynamic, llama_ngram_cache & nc_static
|
||||
common_ngram_cache & nc_context, common_ngram_cache & nc_dynamic, common_ngram_cache & nc_static
|
||||
) {
|
||||
GGML_ASSERT(draft.size() == 1);
|
||||
const int inp_size = inp.size();
|
||||
@@ -157,21 +157,21 @@ void llama_ngram_cache_draft(
|
||||
llama_token drafted_token = -1;
|
||||
|
||||
const int ngram_start_static = inp_size-LLAMA_NGRAM_STATIC + draft.size()-1;
|
||||
llama_ngram ngram_static;
|
||||
common_ngram ngram_static;
|
||||
for (int j = ngram_start_static; j < ngram_start_static + LLAMA_NGRAM_STATIC; ++j) {
|
||||
ngram_static.tokens[j-ngram_start_static] = get_token(inp, draft, j);
|
||||
}
|
||||
llama_ngram_cache::iterator part_static_it = nc_static.find(ngram_static);
|
||||
llama_ngram_cache_part part_static;
|
||||
common_ngram_cache::iterator part_static_it = nc_static.find(ngram_static);
|
||||
common_ngram_cache_part part_static;
|
||||
if (part_static_it != nc_static.end()) {
|
||||
part_static = part_static_it->second;
|
||||
}
|
||||
|
||||
// cd = context + dynamic
|
||||
std::vector<llama_ngram> ngrams_cd;
|
||||
std::vector<common_ngram> ngrams_cd;
|
||||
for (int ngram_size_cd = ngram_min; ngram_size_cd <= ngram_max; ++ngram_size_cd) {
|
||||
const int ngram_start_cd = inp_size-ngram_size_cd + draft.size()-1;
|
||||
llama_ngram ngram_cd;
|
||||
common_ngram ngram_cd;
|
||||
for (int j = ngram_start_cd; j < ngram_start_cd + ngram_size_cd; ++j) {
|
||||
ngram_cd.tokens[j-ngram_start_cd] = get_token(inp, draft, j);
|
||||
}
|
||||
@@ -196,16 +196,16 @@ void llama_ngram_cache_draft(
|
||||
}
|
||||
}
|
||||
|
||||
void llama_ngram_cache_save(llama_ngram_cache & ngram_cache, std::string & filename) {
|
||||
void common_ngram_cache_save(common_ngram_cache & ngram_cache, std::string & filename) {
|
||||
std::ofstream file_out(filename, std::ios::binary);
|
||||
for (std::pair<llama_ngram, llama_ngram_cache_part> item : ngram_cache) {
|
||||
const llama_ngram ngram = item.first;
|
||||
llama_ngram_cache_part token_counts = item.second;
|
||||
for (std::pair<common_ngram, common_ngram_cache_part> item : ngram_cache) {
|
||||
const common_ngram ngram = item.first;
|
||||
common_ngram_cache_part token_counts = item.second;
|
||||
GGML_ASSERT(!token_counts.empty());
|
||||
const int32_t ntokens = token_counts.size();
|
||||
GGML_ASSERT(ntokens > 0);
|
||||
|
||||
file_out.write(reinterpret_cast<const char *>(&ngram), sizeof(llama_ngram));
|
||||
file_out.write(reinterpret_cast<const char *>(&ngram), sizeof(common_ngram));
|
||||
file_out.write(reinterpret_cast<const char *>(&ntokens), sizeof(int32_t));
|
||||
for (std::pair<llama_token, int32_t> item2 : token_counts) {
|
||||
const llama_token token = item2.first;
|
||||
@@ -219,14 +219,14 @@ void llama_ngram_cache_save(llama_ngram_cache & ngram_cache, std::string & filen
|
||||
|
||||
}
|
||||
|
||||
llama_ngram_cache llama_ngram_cache_load(std::string & filename) {
|
||||
common_ngram_cache common_ngram_cache_load(std::string & filename) {
|
||||
std::ifstream hashmap_file(filename, std::ios::binary);
|
||||
if (!hashmap_file) {
|
||||
throw std::ifstream::failure("Unable to open file " + filename);
|
||||
}
|
||||
llama_ngram_cache ngram_cache;
|
||||
common_ngram_cache ngram_cache;
|
||||
|
||||
llama_ngram ngram;
|
||||
common_ngram ngram;
|
||||
int32_t ntokens;
|
||||
llama_token token;
|
||||
int32_t count;
|
||||
@@ -235,11 +235,11 @@ llama_ngram_cache llama_ngram_cache_load(std::string & filename) {
|
||||
char * ntokensc = reinterpret_cast<char*>(&ntokens);
|
||||
char * tokenc = reinterpret_cast<char*>(&token);
|
||||
char * countc = reinterpret_cast<char*>(&count);
|
||||
while(hashmap_file.read(ngramc, sizeof(llama_ngram))) {
|
||||
while(hashmap_file.read(ngramc, sizeof(common_ngram))) {
|
||||
GGML_ASSERT(!hashmap_file.eof());
|
||||
GGML_ASSERT(hashmap_file.read(ntokensc, sizeof(int32_t)));
|
||||
GGML_ASSERT(ntokens > 0);
|
||||
llama_ngram_cache_part token_counts;
|
||||
common_ngram_cache_part token_counts;
|
||||
|
||||
for (int i = 0; i < ntokens; ++i) {
|
||||
GGML_ASSERT(!hashmap_file.eof());
|
||||
@@ -257,12 +257,12 @@ llama_ngram_cache llama_ngram_cache_load(std::string & filename) {
|
||||
return ngram_cache;
|
||||
}
|
||||
|
||||
void llama_ngram_cache_merge(llama_ngram_cache & ngram_cache_target, llama_ngram_cache & ngram_cache_add) {
|
||||
for (std::pair<llama_ngram, llama_ngram_cache_part> ngram_part : ngram_cache_add) {
|
||||
const llama_ngram ngram = ngram_part.first;
|
||||
llama_ngram_cache_part part = ngram_part.second;
|
||||
void common_ngram_cache_merge(common_ngram_cache & ngram_cache_target, common_ngram_cache & ngram_cache_add) {
|
||||
for (std::pair<common_ngram, common_ngram_cache_part> ngram_part : ngram_cache_add) {
|
||||
const common_ngram ngram = ngram_part.first;
|
||||
common_ngram_cache_part part = ngram_part.second;
|
||||
|
||||
llama_ngram_cache::iterator part_merged_it = ngram_cache_target.find(ngram);
|
||||
common_ngram_cache::iterator part_merged_it = ngram_cache_target.find(ngram);
|
||||
if (part_merged_it == ngram_cache_target.end()) {
|
||||
ngram_cache_target.emplace(ngram, part);
|
||||
continue;
|
||||
@@ -273,7 +273,7 @@ void llama_ngram_cache_merge(llama_ngram_cache & ngram_cache_target, llama_ngram
|
||||
const int32_t count = token_count.second;
|
||||
GGML_ASSERT(count > 0);
|
||||
|
||||
llama_ngram_cache_part::iterator token_count_merged_it = part_merged_it->second.find(token);
|
||||
common_ngram_cache_part::iterator token_count_merged_it = part_merged_it->second.find(token);
|
||||
if (token_count_merged_it == part_merged_it->second.end()) {
|
||||
part_merged_it->second.emplace(token, count);
|
||||
continue;
|
||||
|
||||
@@ -12,22 +12,22 @@
|
||||
|
||||
// Data structures to map n-grams to empirical token probabilities:
|
||||
|
||||
struct llama_ngram {
|
||||
struct common_ngram {
|
||||
llama_token tokens[LLAMA_NGRAM_MAX];
|
||||
|
||||
llama_ngram() {
|
||||
common_ngram() {
|
||||
for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
|
||||
tokens[i] = -1;
|
||||
}
|
||||
}
|
||||
|
||||
llama_ngram(const llama_token * input, const int ngram_size) {
|
||||
common_ngram(const llama_token * input, const int ngram_size) {
|
||||
for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
|
||||
tokens[i] = i < ngram_size ? input[i] : -1;
|
||||
}
|
||||
}
|
||||
|
||||
bool operator==(const llama_ngram & other) const {
|
||||
bool operator==(const common_ngram & other) const {
|
||||
for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
|
||||
if (tokens[i] != other.tokens[i]) {
|
||||
return false;
|
||||
@@ -37,28 +37,28 @@ struct llama_ngram {
|
||||
}
|
||||
};
|
||||
|
||||
struct llama_token_hash_function {
|
||||
struct common_token_hash_function {
|
||||
size_t operator()(const llama_token token) const {
|
||||
// see https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/
|
||||
return token * 11400714819323198485llu;
|
||||
}
|
||||
};
|
||||
|
||||
struct llama_ngram_hash_function {
|
||||
size_t operator()(const llama_ngram & ngram) const {
|
||||
size_t hash = llama_token_hash_function{}(ngram.tokens[0]);
|
||||
struct common_ngram_hash_function {
|
||||
size_t operator()(const common_ngram & ngram) const {
|
||||
size_t hash = common_token_hash_function{}(ngram.tokens[0]);
|
||||
for (int i = 1; i < LLAMA_NGRAM_MAX; ++i) {
|
||||
hash ^= llama_token_hash_function{}(ngram.tokens[i]);
|
||||
hash ^= common_token_hash_function{}(ngram.tokens[i]);
|
||||
}
|
||||
return hash;
|
||||
}
|
||||
};
|
||||
|
||||
// token -> number of times token has been seen
|
||||
typedef std::unordered_map<llama_token, int32_t> llama_ngram_cache_part;
|
||||
typedef std::unordered_map<llama_token, int32_t> common_ngram_cache_part;
|
||||
|
||||
// n-gram -> empirical distribution of following tokens
|
||||
typedef std::unordered_map<llama_ngram, llama_ngram_cache_part, llama_ngram_hash_function> llama_ngram_cache;
|
||||
typedef std::unordered_map<common_ngram, common_ngram_cache_part, common_ngram_hash_function> common_ngram_cache;
|
||||
|
||||
|
||||
// Update an ngram cache with tokens.
|
||||
@@ -70,8 +70,8 @@ typedef std::unordered_map<llama_ngram, llama_ngram_cache_part, llama_ngram_hash
|
||||
//
|
||||
// In order to get correct results inp_data can ONLY BE APPENDED TO.
|
||||
// Changes in the middle need a complete rebuild.
|
||||
void llama_ngram_cache_update(
|
||||
llama_ngram_cache & ngram_cache, int ngram_min, int ngram_max, std::vector<llama_token> & inp_data, int nnew, bool print_progress);
|
||||
void common_ngram_cache_update(
|
||||
common_ngram_cache & ngram_cache, int ngram_min, int ngram_max, std::vector<llama_token> & inp_data, int nnew, bool print_progress);
|
||||
|
||||
// Try to draft tokens from ngram caches.
|
||||
// inp: the tokens generated so far.
|
||||
@@ -81,21 +81,21 @@ void llama_ngram_cache_update(
|
||||
// nc_context: ngram cache based on current context.
|
||||
// nc_dynamic: ngram cache based on previous user generations.
|
||||
// nc_static: ngram cache generated from a large text corpus, used for validation.
|
||||
void llama_ngram_cache_draft(
|
||||
void common_ngram_cache_draft(
|
||||
std::vector<llama_token> & inp, std::vector<llama_token> & draft, int n_draft, int ngram_min, int ngram_max,
|
||||
llama_ngram_cache & nc_context, llama_ngram_cache & nc_dynamic, llama_ngram_cache & nc_static);
|
||||
common_ngram_cache & nc_context, common_ngram_cache & nc_dynamic, common_ngram_cache & nc_static);
|
||||
|
||||
// Save an ngram cache to a file.
|
||||
// ngram_cache: the ngram cache to save.
|
||||
// filename: the path under which to save the ngram cache.
|
||||
void llama_ngram_cache_save(llama_ngram_cache & ngram_cache, std::string & filename);
|
||||
void common_ngram_cache_save(common_ngram_cache & ngram_cache, std::string & filename);
|
||||
|
||||
// Load an ngram cache saved with llama_ngram_cache_save.
|
||||
// Load an ngram cache saved with common_ngram_cache_save.
|
||||
// filename: the path from which to load the ngram cache.
|
||||
// returns: an ngram cache containing the information saved to filename.
|
||||
llama_ngram_cache llama_ngram_cache_load(std::string & filename);
|
||||
common_ngram_cache common_ngram_cache_load(std::string & filename);
|
||||
|
||||
// Merge two ngram caches.
|
||||
// ngram_cache_target: the ngram cache to which to add the information from ngram_cache_add.
|
||||
// ngram_cache_add: the ngram cache to add to ngram_cache_target.
|
||||
void llama_ngram_cache_merge(llama_ngram_cache & ngram_cache_target, llama_ngram_cache & ngram_cache_add);
|
||||
void common_ngram_cache_merge(common_ngram_cache & ngram_cache_target, common_ngram_cache & ngram_cache_add);
|
||||
|
||||
@@ -98,8 +98,8 @@ struct ring_buffer {
|
||||
std::vector<T> data;
|
||||
};
|
||||
|
||||
struct gpt_sampler {
|
||||
gpt_sampler_params params;
|
||||
struct common_sampler {
|
||||
common_sampler_params params;
|
||||
|
||||
struct llama_sampler * grmr;
|
||||
struct llama_sampler * chain;
|
||||
@@ -125,26 +125,26 @@ struct gpt_sampler {
|
||||
}
|
||||
};
|
||||
|
||||
std::string gpt_sampler_params::print() const {
|
||||
std::string common_sampler_params::print() const {
|
||||
char result[1024];
|
||||
|
||||
snprintf(result, sizeof(result),
|
||||
"\trepeat_last_n = %d, repeat_penalty = %.3f, frequency_penalty = %.3f, presence_penalty = %.3f\n"
|
||||
"\ttop_k = %d, tfs_z = %.3f, top_p = %.3f, min_p = %.3f, typical_p = %.3f, temp = %.3f\n"
|
||||
"\ttop_k = %d, tfs_z = %.3f, top_p = %.3f, min_p = %.3f, xtc_probability = %.3f, xtc_threshold = %.3f, typical_p = %.3f, temp = %.3f\n"
|
||||
"\tmirostat = %d, mirostat_lr = %.3f, mirostat_ent = %.3f",
|
||||
penalty_last_n, penalty_repeat, penalty_freq, penalty_present,
|
||||
top_k, tfs_z, top_p, min_p, typ_p, temp,
|
||||
top_k, tfs_z, top_p, min_p, xtc_probability, xtc_threshold, typ_p, temp,
|
||||
mirostat, mirostat_eta, mirostat_tau);
|
||||
|
||||
return std::string(result);
|
||||
}
|
||||
|
||||
struct gpt_sampler * gpt_sampler_init(const struct llama_model * model, const struct gpt_sampler_params & params) {
|
||||
struct common_sampler * common_sampler_init(const struct llama_model * model, const struct common_sampler_params & params) {
|
||||
llama_sampler_chain_params lparams = llama_sampler_chain_default_params();
|
||||
|
||||
lparams.no_perf = params.no_perf;
|
||||
|
||||
auto * result = new gpt_sampler {
|
||||
auto * result = new common_sampler {
|
||||
/* .params = */ params,
|
||||
/* .grmr = */ llama_sampler_init_grammar(model, params.grammar.c_str(), "root"),
|
||||
/* .chain = */ llama_sampler_chain_init(lparams),
|
||||
@@ -171,60 +171,52 @@ struct gpt_sampler * gpt_sampler_init(const struct llama_model * model, const st
|
||||
params.penalize_nl,
|
||||
params.ignore_eos));
|
||||
|
||||
if (params.temp > 0.0f) {
|
||||
if (params.mirostat == 0) {
|
||||
for (const auto & cnstr : params.samplers) {
|
||||
switch (cnstr) {
|
||||
case GPT_SAMPLER_TYPE_TOP_K:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_top_k (params.top_k));
|
||||
break;
|
||||
case GPT_SAMPLER_TYPE_TOP_P:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_top_p (params.top_p, params.min_keep));
|
||||
break;
|
||||
case GPT_SAMPLER_TYPE_MIN_P:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_min_p (params.min_p, params.min_keep));
|
||||
break;
|
||||
case GPT_SAMPLER_TYPE_TFS_Z:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_tail_free(params.tfs_z, params.min_keep));
|
||||
break;
|
||||
case GPT_SAMPLER_TYPE_TYPICAL_P:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_typical (params.typ_p, params.min_keep));
|
||||
break;
|
||||
case GPT_SAMPLER_TYPE_TEMPERATURE:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_temp_ext (params.temp, params.dynatemp_range, params.dynatemp_exponent));
|
||||
break;
|
||||
default:
|
||||
GGML_ASSERT(false && "unknown sampler type");
|
||||
}
|
||||
if (params.mirostat == 0) {
|
||||
for (const auto & cnstr : params.samplers) {
|
||||
switch (cnstr) {
|
||||
case COMMON_SAMPLER_TYPE_TOP_K:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_top_k (params.top_k));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_TOP_P:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_top_p (params.top_p, params.min_keep));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_MIN_P:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_min_p (params.min_p, params.min_keep));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_XTC:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_xtc (params.xtc_probability, params.xtc_threshold, params.min_keep, params.seed));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_TFS_Z:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_tail_free(params.tfs_z, params.min_keep));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_TYPICAL_P:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_typical (params.typ_p, params.min_keep));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_TEMPERATURE:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_temp_ext (params.temp, params.dynatemp_range, params.dynatemp_exponent));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_INFILL:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_infill (model));
|
||||
break;
|
||||
default:
|
||||
GGML_ASSERT(false && "unknown sampler type");
|
||||
}
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_softmax());
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_dist(params.seed));
|
||||
} else if (params.mirostat == 1) {
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_temp(params.temp));
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat(llama_n_vocab(model), params.seed, params.mirostat_tau, params.mirostat_eta, 100));
|
||||
} else if (params.mirostat == 2) {
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_temp(params.temp));
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat_v2(params.seed, params.mirostat_tau, params.mirostat_eta));
|
||||
} else {
|
||||
GGML_ASSERT(false && "unknown mirostat version");
|
||||
}
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_dist(params.seed));
|
||||
} else if (params.mirostat == 1) {
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_temp(params.temp));
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat(llama_n_vocab(model), params.seed, params.mirostat_tau, params.mirostat_eta, 100));
|
||||
} else if (params.mirostat == 2) {
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_temp(params.temp));
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat_v2(params.seed, params.mirostat_tau, params.mirostat_eta));
|
||||
} else {
|
||||
if (params.n_probs > 0) {
|
||||
// some use cases require to sample greedily, but still obtain the probabilities of the top tokens
|
||||
// ref: https://github.com/ggerganov/llama.cpp/pull/9605
|
||||
//
|
||||
// the following will not produce exactly the same probs as applyging softmax to the full vocabulary, but
|
||||
// it is much faster, since we avoid sorting all tokens and should give a good approximation
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_top_k(params.n_probs));
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_softmax());
|
||||
}
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_greedy());
|
||||
GGML_ASSERT(false && "unknown mirostat version");
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void gpt_sampler_free(struct gpt_sampler * gsmpl) {
|
||||
void common_sampler_free(struct common_sampler * gsmpl) {
|
||||
if (gsmpl) {
|
||||
llama_sampler_free(gsmpl->grmr);
|
||||
|
||||
@@ -234,7 +226,7 @@ void gpt_sampler_free(struct gpt_sampler * gsmpl) {
|
||||
}
|
||||
}
|
||||
|
||||
void gpt_sampler_accept(struct gpt_sampler * gsmpl, llama_token token, bool accept_grammar) {
|
||||
void common_sampler_accept(struct common_sampler * gsmpl, llama_token token, bool accept_grammar) {
|
||||
if (accept_grammar) {
|
||||
llama_sampler_accept(gsmpl->grmr, token);
|
||||
}
|
||||
@@ -244,14 +236,14 @@ void gpt_sampler_accept(struct gpt_sampler * gsmpl, llama_token token, bool acce
|
||||
gsmpl->prev.push_back(token);
|
||||
}
|
||||
|
||||
void gpt_sampler_reset(struct gpt_sampler * gsmpl) {
|
||||
void common_sampler_reset(struct common_sampler * gsmpl) {
|
||||
llama_sampler_reset(gsmpl->grmr);
|
||||
|
||||
llama_sampler_reset(gsmpl->chain);
|
||||
}
|
||||
|
||||
struct gpt_sampler * gpt_sampler_clone(gpt_sampler * gsmpl) {
|
||||
return new gpt_sampler {
|
||||
struct common_sampler * common_sampler_clone(common_sampler * gsmpl) {
|
||||
return new common_sampler {
|
||||
/* .params = */ gsmpl->params,
|
||||
/* .grmr = */ llama_sampler_clone(gsmpl->grmr),
|
||||
/* .chain = */ llama_sampler_clone(gsmpl->chain),
|
||||
@@ -261,7 +253,7 @@ struct gpt_sampler * gpt_sampler_clone(gpt_sampler * gsmpl) {
|
||||
};
|
||||
}
|
||||
|
||||
void gpt_perf_print(const struct llama_context * ctx, const struct gpt_sampler * gsmpl) {
|
||||
void common_perf_print(const struct llama_context * ctx, const struct common_sampler * gsmpl) {
|
||||
// TODO: measure grammar performance
|
||||
|
||||
if (gsmpl) {
|
||||
@@ -272,7 +264,7 @@ void gpt_perf_print(const struct llama_context * ctx, const struct gpt_sampler *
|
||||
}
|
||||
}
|
||||
|
||||
llama_token gpt_sampler_sample(struct gpt_sampler * gsmpl, struct llama_context * ctx, int idx, bool grammar_first) {
|
||||
llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_context * ctx, int idx, bool grammar_first) {
|
||||
gsmpl->set_logits(ctx, idx);
|
||||
|
||||
auto & grmr = gsmpl->grmr;
|
||||
@@ -318,21 +310,21 @@ llama_token gpt_sampler_sample(struct gpt_sampler * gsmpl, struct llama_context
|
||||
return cur_p.data[cur_p.selected].id;
|
||||
}
|
||||
|
||||
uint32_t gpt_sampler_get_seed(const struct gpt_sampler * gsmpl) {
|
||||
uint32_t common_sampler_get_seed(const struct common_sampler * gsmpl) {
|
||||
return llama_sampler_get_seed(gsmpl->chain);
|
||||
}
|
||||
|
||||
// helpers
|
||||
|
||||
llama_token_data_array * gpt_sampler_get_candidates(struct gpt_sampler * gsmpl) {
|
||||
llama_token_data_array * common_sampler_get_candidates(struct common_sampler * gsmpl) {
|
||||
return &gsmpl->cur_p;
|
||||
}
|
||||
|
||||
llama_token gpt_sampler_last(const struct gpt_sampler * gsmpl) {
|
||||
llama_token common_sampler_last(const struct common_sampler * gsmpl) {
|
||||
return gsmpl->prev.rat(0);
|
||||
}
|
||||
|
||||
std::string gpt_sampler_print(const struct gpt_sampler * gsmpl) {
|
||||
std::string common_sampler_print(const struct common_sampler * gsmpl) {
|
||||
std::string result = "logits ";
|
||||
|
||||
for (int i = 0; i < llama_sampler_chain_n(gsmpl->chain); i++) {
|
||||
@@ -343,7 +335,7 @@ std::string gpt_sampler_print(const struct gpt_sampler * gsmpl) {
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string gpt_sampler_prev_str(gpt_sampler * gsmpl, llama_context * ctx_main, int n) {
|
||||
std::string common_sampler_prev_str(common_sampler * gsmpl, llama_context * ctx_main, int n) {
|
||||
n = std::min(n, (int) gsmpl->prev.size());
|
||||
|
||||
if (n <= 0) {
|
||||
@@ -358,63 +350,69 @@ std::string gpt_sampler_prev_str(gpt_sampler * gsmpl, llama_context * ctx_main,
|
||||
|
||||
GGML_ASSERT(id != LLAMA_TOKEN_NULL && "null token in the sampling history - should not happen");
|
||||
|
||||
result += llama_token_to_piece(ctx_main, id);
|
||||
result += common_token_to_piece(ctx_main, id);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
char gpt_sampler_type_to_chr(enum gpt_sampler_type cnstr) {
|
||||
char common_sampler_type_to_chr(enum common_sampler_type cnstr) {
|
||||
switch (cnstr) {
|
||||
case GPT_SAMPLER_TYPE_TOP_K: return 'k';
|
||||
case GPT_SAMPLER_TYPE_TFS_Z: return 'f';
|
||||
case GPT_SAMPLER_TYPE_TYPICAL_P: return 'y';
|
||||
case GPT_SAMPLER_TYPE_TOP_P: return 'p';
|
||||
case GPT_SAMPLER_TYPE_MIN_P: return 'm';
|
||||
case GPT_SAMPLER_TYPE_TEMPERATURE: return 't';
|
||||
case COMMON_SAMPLER_TYPE_TOP_K: return 'k';
|
||||
case COMMON_SAMPLER_TYPE_TFS_Z: return 'f';
|
||||
case COMMON_SAMPLER_TYPE_TYPICAL_P: return 'y';
|
||||
case COMMON_SAMPLER_TYPE_TOP_P: return 'p';
|
||||
case COMMON_SAMPLER_TYPE_MIN_P: return 'm';
|
||||
case COMMON_SAMPLER_TYPE_TEMPERATURE: return 't';
|
||||
case COMMON_SAMPLER_TYPE_XTC: return 'x';
|
||||
case COMMON_SAMPLER_TYPE_INFILL: return 'i';
|
||||
default : return '?';
|
||||
}
|
||||
}
|
||||
|
||||
std::string gpt_sampler_type_to_str(enum gpt_sampler_type cnstr) {
|
||||
std::string common_sampler_type_to_str(enum common_sampler_type cnstr) {
|
||||
switch (cnstr) {
|
||||
case GPT_SAMPLER_TYPE_TOP_K: return "top_k";
|
||||
case GPT_SAMPLER_TYPE_TFS_Z: return "tfs_z";
|
||||
case GPT_SAMPLER_TYPE_TYPICAL_P: return "typ_p";
|
||||
case GPT_SAMPLER_TYPE_TOP_P: return "top_p";
|
||||
case GPT_SAMPLER_TYPE_MIN_P: return "min_p";
|
||||
case GPT_SAMPLER_TYPE_TEMPERATURE: return "temperature";
|
||||
case COMMON_SAMPLER_TYPE_TOP_K: return "top_k";
|
||||
case COMMON_SAMPLER_TYPE_TFS_Z: return "tfs_z";
|
||||
case COMMON_SAMPLER_TYPE_TYPICAL_P: return "typ_p";
|
||||
case COMMON_SAMPLER_TYPE_TOP_P: return "top_p";
|
||||
case COMMON_SAMPLER_TYPE_MIN_P: return "min_p";
|
||||
case COMMON_SAMPLER_TYPE_TEMPERATURE: return "temperature";
|
||||
case COMMON_SAMPLER_TYPE_XTC: return "xtc";
|
||||
case COMMON_SAMPLER_TYPE_INFILL: return "infill";
|
||||
default : return "";
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<gpt_sampler_type> gpt_sampler_types_from_names(const std::vector<std::string> & names, bool allow_alt_names) {
|
||||
std::unordered_map<std::string, gpt_sampler_type> sampler_canonical_name_map {
|
||||
{ "top_k", GPT_SAMPLER_TYPE_TOP_K },
|
||||
{ "top_p", GPT_SAMPLER_TYPE_TOP_P },
|
||||
{ "typ_p", GPT_SAMPLER_TYPE_TYPICAL_P },
|
||||
{ "min_p", GPT_SAMPLER_TYPE_MIN_P },
|
||||
{ "tfs_z", GPT_SAMPLER_TYPE_TFS_Z },
|
||||
{ "temperature", GPT_SAMPLER_TYPE_TEMPERATURE },
|
||||
std::vector<common_sampler_type> common_sampler_types_from_names(const std::vector<std::string> & names, bool allow_alt_names) {
|
||||
std::unordered_map<std::string, common_sampler_type> sampler_canonical_name_map {
|
||||
{ "top_k", COMMON_SAMPLER_TYPE_TOP_K },
|
||||
{ "top_p", COMMON_SAMPLER_TYPE_TOP_P },
|
||||
{ "typ_p", COMMON_SAMPLER_TYPE_TYPICAL_P },
|
||||
{ "min_p", COMMON_SAMPLER_TYPE_MIN_P },
|
||||
{ "tfs_z", COMMON_SAMPLER_TYPE_TFS_Z },
|
||||
{ "temperature", COMMON_SAMPLER_TYPE_TEMPERATURE },
|
||||
{ "xtc", COMMON_SAMPLER_TYPE_XTC },
|
||||
{ "infill", COMMON_SAMPLER_TYPE_INFILL },
|
||||
};
|
||||
|
||||
// since samplers names are written multiple ways
|
||||
// make it ready for both system names and input names
|
||||
std::unordered_map<std::string, gpt_sampler_type> sampler_alt_name_map {
|
||||
{ "top-k", GPT_SAMPLER_TYPE_TOP_K },
|
||||
{ "top-p", GPT_SAMPLER_TYPE_TOP_P },
|
||||
{ "nucleus", GPT_SAMPLER_TYPE_TOP_P },
|
||||
{ "typical-p", GPT_SAMPLER_TYPE_TYPICAL_P },
|
||||
{ "typical", GPT_SAMPLER_TYPE_TYPICAL_P },
|
||||
{ "typ-p", GPT_SAMPLER_TYPE_TYPICAL_P },
|
||||
{ "typ", GPT_SAMPLER_TYPE_TYPICAL_P },
|
||||
{ "min-p", GPT_SAMPLER_TYPE_MIN_P },
|
||||
{ "tfs-z", GPT_SAMPLER_TYPE_TFS_Z },
|
||||
{ "tfs", GPT_SAMPLER_TYPE_TFS_Z },
|
||||
{ "temp", GPT_SAMPLER_TYPE_TEMPERATURE },
|
||||
std::unordered_map<std::string, common_sampler_type> sampler_alt_name_map {
|
||||
{ "top-k", COMMON_SAMPLER_TYPE_TOP_K },
|
||||
{ "top-p", COMMON_SAMPLER_TYPE_TOP_P },
|
||||
{ "nucleus", COMMON_SAMPLER_TYPE_TOP_P },
|
||||
{ "typical-p", COMMON_SAMPLER_TYPE_TYPICAL_P },
|
||||
{ "typical", COMMON_SAMPLER_TYPE_TYPICAL_P },
|
||||
{ "typ-p", COMMON_SAMPLER_TYPE_TYPICAL_P },
|
||||
{ "typ", COMMON_SAMPLER_TYPE_TYPICAL_P },
|
||||
{ "min-p", COMMON_SAMPLER_TYPE_MIN_P },
|
||||
{ "tfs-z", COMMON_SAMPLER_TYPE_TFS_Z },
|
||||
{ "tfs", COMMON_SAMPLER_TYPE_TFS_Z },
|
||||
{ "temp", COMMON_SAMPLER_TYPE_TEMPERATURE },
|
||||
};
|
||||
|
||||
std::vector<gpt_sampler_type> samplers;
|
||||
std::vector<common_sampler_type> samplers;
|
||||
samplers.reserve(names.size());
|
||||
|
||||
for (const auto & name : names) {
|
||||
@@ -434,17 +432,19 @@ std::vector<gpt_sampler_type> gpt_sampler_types_from_names(const std::vector<std
|
||||
return samplers;
|
||||
}
|
||||
|
||||
std::vector<gpt_sampler_type> gpt_sampler_types_from_chars(const std::string & chars) {
|
||||
std::unordered_map<char, gpt_sampler_type> sampler_name_map = {
|
||||
{ gpt_sampler_type_to_chr(GPT_SAMPLER_TYPE_TOP_K), GPT_SAMPLER_TYPE_TOP_K },
|
||||
{ gpt_sampler_type_to_chr(GPT_SAMPLER_TYPE_TFS_Z), GPT_SAMPLER_TYPE_TFS_Z },
|
||||
{ gpt_sampler_type_to_chr(GPT_SAMPLER_TYPE_TYPICAL_P), GPT_SAMPLER_TYPE_TYPICAL_P },
|
||||
{ gpt_sampler_type_to_chr(GPT_SAMPLER_TYPE_TOP_P), GPT_SAMPLER_TYPE_TOP_P },
|
||||
{ gpt_sampler_type_to_chr(GPT_SAMPLER_TYPE_MIN_P), GPT_SAMPLER_TYPE_MIN_P },
|
||||
{ gpt_sampler_type_to_chr(GPT_SAMPLER_TYPE_TEMPERATURE), GPT_SAMPLER_TYPE_TEMPERATURE }
|
||||
std::vector<common_sampler_type> common_sampler_types_from_chars(const std::string & chars) {
|
||||
std::unordered_map<char, common_sampler_type> sampler_name_map = {
|
||||
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_K), COMMON_SAMPLER_TYPE_TOP_K },
|
||||
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TFS_Z), COMMON_SAMPLER_TYPE_TFS_Z },
|
||||
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TYPICAL_P), COMMON_SAMPLER_TYPE_TYPICAL_P },
|
||||
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_P), COMMON_SAMPLER_TYPE_TOP_P },
|
||||
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_MIN_P), COMMON_SAMPLER_TYPE_MIN_P },
|
||||
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TEMPERATURE), COMMON_SAMPLER_TYPE_TEMPERATURE },
|
||||
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_XTC), COMMON_SAMPLER_TYPE_XTC },
|
||||
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_INFILL), COMMON_SAMPLER_TYPE_INFILL },
|
||||
};
|
||||
|
||||
std::vector<gpt_sampler_type> samplers;
|
||||
std::vector<common_sampler_type> samplers;
|
||||
samplers.reserve(chars.size());
|
||||
|
||||
for (const auto & c : chars) {
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
// gpt_sampler extends llama_sampler with additional functionality:
|
||||
// common_sampler extends llama_sampler with additional functionality:
|
||||
//
|
||||
// - grammar support
|
||||
// - custom sampler logic based on the parameters
|
||||
@@ -23,30 +23,30 @@
|
||||
// token in order to verify if it fits the grammar. And only if the token doesn't fit the grammar, the
|
||||
// grammar constraints are applied to the full vocabulary and the token is resampled.
|
||||
//
|
||||
// The gpt_sampler also maintains a container with the last accepted tokens. In the future, this can
|
||||
// The common_sampler also maintains a container with the last accepted tokens. In the future, this can
|
||||
// be moved into the core llama library.
|
||||
//
|
||||
// For convenience, the gpt_sampler also maintains a container with the current candidate tokens.
|
||||
// For convenience, the common_sampler also maintains a container with the current candidate tokens.
|
||||
// This can be used to access the probabilities of the rest of the non-sampled tokens.
|
||||
//
|
||||
// TODO: measure grammar performance
|
||||
//
|
||||
|
||||
struct gpt_sampler;
|
||||
struct common_sampler;
|
||||
|
||||
// llama_sampler API overloads
|
||||
|
||||
struct gpt_sampler * gpt_sampler_init(const struct llama_model * model, const struct gpt_sampler_params & params);
|
||||
struct common_sampler * common_sampler_init(const struct llama_model * model, const struct common_sampler_params & params);
|
||||
|
||||
void gpt_sampler_free(struct gpt_sampler * gsmpl);
|
||||
void common_sampler_free(struct common_sampler * gsmpl);
|
||||
|
||||
// if accept_grammar is true, the token is accepted both by the sampling chain and the grammar
|
||||
void gpt_sampler_accept(struct gpt_sampler * gsmpl, llama_token token, bool accept_grammar);
|
||||
void gpt_sampler_reset (struct gpt_sampler * gsmpl);
|
||||
struct gpt_sampler * gpt_sampler_clone (struct gpt_sampler * gsmpl);
|
||||
void common_sampler_accept(struct common_sampler * gsmpl, llama_token token, bool accept_grammar);
|
||||
void common_sampler_reset (struct common_sampler * gsmpl);
|
||||
struct common_sampler * common_sampler_clone (struct common_sampler * gsmpl);
|
||||
|
||||
// arguments can be nullptr to skip printing
|
||||
void gpt_perf_print(const struct llama_context * ctx, const struct gpt_sampler * gsmpl);
|
||||
void common_perf_print(const struct llama_context * ctx, const struct common_sampler * gsmpl);
|
||||
|
||||
// extended sampling implementation:
|
||||
//
|
||||
@@ -58,26 +58,26 @@ void gpt_perf_print(const struct llama_context * ctx, const struct gpt_sampler *
|
||||
// if grammar_first is true, the grammar is applied before the samplers (slower)
|
||||
// useful in cases where all the resulting candidates (not just the sampled one) must fit the grammar
|
||||
//
|
||||
llama_token gpt_sampler_sample(struct gpt_sampler * gsmpl, struct llama_context * ctx, int idx, bool grammar_first = false);
|
||||
llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_context * ctx, int idx, bool grammar_first = false);
|
||||
|
||||
uint32_t gpt_sampler_get_seed(const struct gpt_sampler * gsmpl);
|
||||
uint32_t common_sampler_get_seed(const struct common_sampler * gsmpl);
|
||||
|
||||
// helpers
|
||||
|
||||
// access the internal list of current candidate tokens
|
||||
llama_token_data_array * gpt_sampler_get_candidates(struct gpt_sampler * gsmpl);
|
||||
llama_token_data_array * common_sampler_get_candidates(struct common_sampler * gsmpl);
|
||||
|
||||
// get the last accepted token
|
||||
llama_token gpt_sampler_last(const struct gpt_sampler * gsmpl);
|
||||
llama_token common_sampler_last(const struct common_sampler * gsmpl);
|
||||
|
||||
// print the sampler chain into a string
|
||||
std::string gpt_sampler_print(const struct gpt_sampler * gsmpl);
|
||||
std::string common_sampler_print(const struct common_sampler * gsmpl);
|
||||
|
||||
// get a string representation of the last accepted tokens
|
||||
std::string gpt_sampler_prev_str(gpt_sampler * gsmpl, llama_context * ctx, int n);
|
||||
std::string common_sampler_prev_str(common_sampler * gsmpl, llama_context * ctx, int n);
|
||||
|
||||
char gpt_sampler_type_to_chr(enum gpt_sampler_type cnstr);
|
||||
std::string gpt_sampler_type_to_str(enum gpt_sampler_type cnstr);
|
||||
char common_sampler_type_to_chr(enum common_sampler_type cnstr);
|
||||
std::string common_sampler_type_to_str(enum common_sampler_type cnstr);
|
||||
|
||||
std::vector<enum gpt_sampler_type> gpt_sampler_types_from_names(const std::vector<std::string> & names, bool allow_alt_names);
|
||||
std::vector<enum gpt_sampler_type> gpt_sampler_types_from_chars(const std::string & chars);
|
||||
std::vector<enum common_sampler_type> common_sampler_types_from_names(const std::vector<std::string> & names, bool allow_alt_names);
|
||||
std::vector<enum common_sampler_type> common_sampler_types_from_chars(const std::string & chars);
|
||||
|
||||
@@ -573,6 +573,9 @@ class Model:
|
||||
if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
|
||||
# ref: https://huggingface.co/BAAI/bge-small-en-v1.5
|
||||
res = "bert-bge"
|
||||
if chkhsh == "8e62295832751ca1e8f92f2226f403dea30dc5165e448b5bfa05af5340c64ec7":
|
||||
# ref: https://huggingface.co/BAAI/bge-large-zh-v1.5
|
||||
res = "bert-bge-large"
|
||||
if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
|
||||
# ref: https://huggingface.co/mosaicml/mpt-7b
|
||||
res = "mpt"
|
||||
@@ -2864,6 +2867,9 @@ class Rwkv6Model(Model):
|
||||
self.gguf_writer.add_token_list(tokens)
|
||||
self.gguf_writer.add_token_types(toktypes)
|
||||
special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
|
||||
special_vocab.chat_template = "rwkv-world"
|
||||
# hack: Add '\n\n' as the EOT token to make it chat normally
|
||||
special_vocab._set_special_token("eot", 261)
|
||||
special_vocab.add_to_gguf(self.gguf_writer)
|
||||
|
||||
def set_gguf_parameters(self):
|
||||
|
||||
@@ -72,6 +72,7 @@ models = [
|
||||
{"name": "deepseek-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base", },
|
||||
{"name": "falcon", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tiiuae/falcon-7b", },
|
||||
{"name": "bert-bge", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/BAAI/bge-small-en-v1.5", },
|
||||
{"name": "bert-bge-large", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/BAAI/bge-large-zh-v1.5", },
|
||||
{"name": "mpt", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mosaicml/mpt-7b", },
|
||||
{"name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", },
|
||||
{"name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", },
|
||||
|
||||
@@ -348,6 +348,9 @@ if __name__ == '__main__':
|
||||
if ".base_layer.weight" in name:
|
||||
continue
|
||||
logger.error(f"Unexpected name '{name}': Not a lora_A or lora_B tensor")
|
||||
if ".embed_tokens.weight" in name or ".lm_head.weight" in name:
|
||||
logger.error("Embeddings is present in the adapter. This can be due to new tokens added during fine tuning")
|
||||
logger.error("Hint: if you are using TRL, make sure not to call setup_chat_format()")
|
||||
sys.exit(1)
|
||||
|
||||
if base_name in tensor_map:
|
||||
|
||||
109
docs/android.md
109
docs/android.md
@@ -2,55 +2,82 @@
|
||||
# Android
|
||||
|
||||
## Build on Android using Termux
|
||||
[Termux](https://github.com/termux/termux-app#installation) is a method to execute `llama.cpp` on an Android device (no root required).
|
||||
```
|
||||
apt update && apt upgrade -y
|
||||
apt install git make cmake
|
||||
```
|
||||
|
||||
It's recommended to move your model inside the `~/` directory for best performance:
|
||||
```
|
||||
cd storage/downloads
|
||||
mv model.gguf ~/
|
||||
```
|
||||
[Termux](https://termux.dev/en/) is an Android terminal emulator and Linux environment app (no root required). As of writing, Termux is available experimentally in the Google Play Store; otherwise, it may be obtained directly from the project repo or on F-Droid.
|
||||
|
||||
[Get the code](https://github.com/ggerganov/llama.cpp#get-the-code) & [follow the Linux build instructions](https://github.com/ggerganov/llama.cpp#build) to build `llama.cpp`.
|
||||
|
||||
## Building the Project using Android NDK
|
||||
Obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake.
|
||||
|
||||
Execute the following commands on your computer to avoid downloading the NDK to your mobile. Alternatively, you can also do this in Termux:
|
||||
```
|
||||
$ mkdir build-android
|
||||
$ cd build-android
|
||||
$ export NDK=<your_ndk_directory>
|
||||
$ cmake -DCMAKE_TOOLCHAIN_FILE=$NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-23 -DCMAKE_C_FLAGS=-march=armv8.4a+dotprod ..
|
||||
$ make
|
||||
```
|
||||
|
||||
Install [termux](https://github.com/termux/termux-app#installation) on your device and run `termux-setup-storage` to get access to your SD card (if Android 11+ then run the command twice).
|
||||
|
||||
Finally, copy these built `llama` binaries and the model file to your device storage. Because the file permissions in the Android sdcard cannot be changed, you can copy the executable files to the `/data/data/com.termux/files/home/bin` path, and then execute the following commands in Termux to add executable permission:
|
||||
|
||||
(Assumed that you have pushed the built executable files to the /sdcard/llama.cpp/bin path using `adb push`)
|
||||
```
|
||||
$cp -r /sdcard/llama.cpp/bin /data/data/com.termux/files/home/
|
||||
$cd /data/data/com.termux/files/home/bin
|
||||
$chmod +x ./*
|
||||
```
|
||||
|
||||
Download model [llama-2-7b-chat.Q4_K_M.gguf](https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/blob/main/llama-2-7b-chat.Q4_K_M.gguf), and push it to `/sdcard/llama.cpp/`, then move it to `/data/data/com.termux/files/home/model/`
|
||||
With Termux, you can install and run `llama.cpp` as if the environment were Linux. Once in the Termux shell:
|
||||
|
||||
```
|
||||
$mv /sdcard/llama.cpp/llama-2-7b-chat.Q4_K_M.gguf /data/data/com.termux/files/home/model/
|
||||
$ apt update && apt upgrade -y
|
||||
$ apt install git cmake
|
||||
```
|
||||
|
||||
Now, you can start chatting:
|
||||
Then, follow the [build instructions](https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md), specifically for CMake.
|
||||
|
||||
Once the binaries are built, download your model of choice (e.g., from Hugging Face). It's recommended to place it in the `~/` directory for best performance:
|
||||
|
||||
```
|
||||
$cd /data/data/com.termux/files/home/bin
|
||||
$./llama-cli -m ../model/llama-2-7b-chat.Q4_K_M.gguf -n 128 -cml
|
||||
$ curl -L {model-url} -o ~/{model}.gguf
|
||||
```
|
||||
|
||||
Here's a demo of an interactive session running on Pixel 5 phone:
|
||||
Then, if you are not already in the repo directory, `cd` into `llama.cpp` and:
|
||||
|
||||
```
|
||||
$ ./build/bin/llama-simple -m ~/{model}.gguf -c {context-size} -p "{your-prompt}"
|
||||
```
|
||||
|
||||
Here, we show `llama-simple`, but any of the executables under `examples` should work, in theory. Be sure to set `context-size` to a reasonable number (say, 4096) to start with; otherwise, memory could spike and kill your terminal.
|
||||
|
||||
To see what it might look like visually, here's an old demo of an interactive session running on a Pixel 5 phone:
|
||||
|
||||
https://user-images.githubusercontent.com/271616/225014776-1d567049-ad71-4ef2-b050-55b0b3b9274c.mp4
|
||||
|
||||
## Cross-compile using Android NDK
|
||||
It's possible to build `llama.cpp` for Android on your host system via CMake and the Android NDK. If you are interested in this path, ensure you already have an environment prepared to cross-compile programs for Android (i.e., install the Android SDK). Note that, unlike desktop environments, the Android environment ships with a limited set of native libraries, and so only those libraries are available to CMake when building with the Android NDK (see: https://developer.android.com/ndk/guides/stable_apis.)
|
||||
|
||||
Once you're ready and have cloned `llama.cpp`, invoke the following in the project directory:
|
||||
|
||||
```
|
||||
$ cmake \
|
||||
-DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake \
|
||||
-DANDROID_ABI=arm64-v8a \
|
||||
-DANDROID_PLATFORM=android-28 \
|
||||
-DCMAKE_C_FLAGS="-march=armv8.7a" \
|
||||
-DCMAKE_CXX_FLAGS="-march=armv8.7a" \
|
||||
-DGGML_OPENMP=OFF \
|
||||
-DGGML_LLAMAFILE=OFF \
|
||||
-B build-android
|
||||
```
|
||||
|
||||
Notes:
|
||||
- While later versions of Android NDK ship with OpenMP, it must still be installed by CMake as a dependency, which is not supported at this time
|
||||
- `llamafile` does not appear to support Android devices (see: https://github.com/Mozilla-Ocho/llamafile/issues/325)
|
||||
|
||||
The above command should configure `llama.cpp` with the most performant options for modern devices. Even if your device is not running `armv8.7a`, `llama.cpp` includes runtime checks for available CPU features it can use.
|
||||
|
||||
Feel free to adjust the Android ABI for your target. Once the project is configured:
|
||||
|
||||
```
|
||||
$ cmake --build build-android --config Release -j{n}
|
||||
$ cmake --install build-android --prefix {install-dir} --config Release
|
||||
```
|
||||
|
||||
After installing, go ahead and download the model of your choice to your host system. Then:
|
||||
|
||||
```
|
||||
$ adb shell "mkdir /data/local/tmp/llama.cpp"
|
||||
$ adb push {install-dir} /data/local/tmp/llama.cpp/
|
||||
$ adb push {model}.gguf /data/local/tmp/llama.cpp/
|
||||
$ adb shell
|
||||
```
|
||||
|
||||
In the `adb shell`:
|
||||
|
||||
```
|
||||
$ cd /data/local/tmp/llama.cpp
|
||||
$ LD_LIBRARY_PATH=lib ./bin/llama-simple -m {model}.gguf -c {context-size} -p "{your-prompt}"
|
||||
```
|
||||
|
||||
That's it!
|
||||
|
||||
Be aware that Android will not find the library path `lib` on its own, so we must specify `LD_LIBRARY_PATH` in order to run the installed executables. Android does support `RPATH` in later API levels, so this could change in the future. Refer to the previous section for information about `context-size` (very important!) and running other `examples`.
|
||||
|
||||
@@ -198,6 +198,8 @@ The following compilation options are also available to tweak performance:
|
||||
|
||||
### MUSA
|
||||
|
||||
This provides GPU acceleration using the MUSA cores of your Moore Threads MTT GPU. Make sure to have the MUSA SDK installed. You can download it from here: [MUSA SDK](https://developer.mthreads.com/sdk/download/musa).
|
||||
|
||||
- Using `make`:
|
||||
```bash
|
||||
make GGML_MUSA=1
|
||||
@@ -209,6 +211,12 @@ The following compilation options are also available to tweak performance:
|
||||
cmake --build build --config Release
|
||||
```
|
||||
|
||||
The environment variable [`MUSA_VISIBLE_DEVICES`](https://docs.mthreads.com/musa-sdk/musa-sdk-doc-online/programming_guide/Z%E9%99%84%E5%BD%95/) can be used to specify which GPU(s) will be used.
|
||||
|
||||
The environment variable `GGML_CUDA_ENABLE_UNIFIED_MEMORY=1` can be used to enable unified memory in Linux. This allows swapping to system RAM instead of crashing when the GPU VRAM is exhausted.
|
||||
|
||||
Most of the compilation options available for CUDA should also be available for MUSA, though they haven't been thoroughly tested yet.
|
||||
|
||||
### hipBLAS
|
||||
|
||||
This provides BLAS acceleration on HIP-supported AMD GPUs.
|
||||
|
||||
@@ -19,8 +19,11 @@ Additionally, there the following images, similar to the above:
|
||||
- `ghcr.io/ggerganov/llama.cpp:full-rocm`: Same as `full` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`)
|
||||
- `ghcr.io/ggerganov/llama.cpp:light-rocm`: Same as `light` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`)
|
||||
- `ghcr.io/ggerganov/llama.cpp:server-rocm`: Same as `server` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`)
|
||||
- `ghcr.io/ggerganov/llama.cpp:full-musa`: Same as `full` but compiled with MUSA support. (platforms: `linux/amd64`)
|
||||
- `ghcr.io/ggerganov/llama.cpp:light-musa`: Same as `light` but compiled with MUSA support. (platforms: `linux/amd64`)
|
||||
- `ghcr.io/ggerganov/llama.cpp:server-musa`: Same as `server` but compiled with MUSA support. (platforms: `linux/amd64`)
|
||||
|
||||
The GPU enabled images are not currently tested by CI beyond being built. They are not built with any variation from the ones in the Dockerfiles defined in [.devops/](../.devops/) and the GitHub Action defined in [.github/workflows/docker.yml](../.github/workflows/docker.yml). If you need different settings (for example, a different CUDA or ROCm library, you'll need to build the images locally for now).
|
||||
The GPU enabled images are not currently tested by CI beyond being built. They are not built with any variation from the ones in the Dockerfiles defined in [.devops/](../.devops/) and the GitHub Action defined in [.github/workflows/docker.yml](../.github/workflows/docker.yml). If you need different settings (for example, a different CUDA, ROCm or MUSA library, you'll need to build the images locally for now).
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -84,3 +87,37 @@ docker run --gpus all -v /path/to/models:/models local/llama.cpp:full-cuda --run
|
||||
docker run --gpus all -v /path/to/models:/models local/llama.cpp:light-cuda -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1
|
||||
docker run --gpus all -v /path/to/models:/models local/llama.cpp:server-cuda -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512 --n-gpu-layers 1
|
||||
```
|
||||
|
||||
## Docker With MUSA
|
||||
|
||||
Assuming one has the [mt-container-toolkit](https://developer.mthreads.com/musa/native) properly installed on Linux, `muBLAS` should be accessible inside the container.
|
||||
|
||||
## Building Docker locally
|
||||
|
||||
```bash
|
||||
docker build -t local/llama.cpp:full-musa -f .devops/full-musa.Dockerfile .
|
||||
docker build -t local/llama.cpp:light-musa -f .devops/llama-cli-musa.Dockerfile .
|
||||
docker build -t local/llama.cpp:server-musa -f .devops/llama-server-musa.Dockerfile .
|
||||
```
|
||||
|
||||
You may want to pass in some different `ARGS`, depending on the MUSA environment supported by your container host, as well as the GPU architecture.
|
||||
|
||||
The defaults are:
|
||||
|
||||
- `MUSA_VERSION` set to `rc3.1.0`
|
||||
|
||||
The resulting images, are essentially the same as the non-MUSA images:
|
||||
|
||||
1. `local/llama.cpp:full-musa`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization.
|
||||
2. `local/llama.cpp:light-musa`: This image only includes the main executable file.
|
||||
3. `local/llama.cpp:server-musa`: This image only includes the server executable file.
|
||||
|
||||
## Usage
|
||||
|
||||
After building locally, Usage is similar to the non-MUSA examples, but you'll need to set `mthreads` as default Docker runtime. This can be done by executing `(cd /usr/bin/musa && sudo ./docker setup $PWD)` and verifying the changes by executing `docker info | grep mthreads` on the host machine. You will also want to use the `--n-gpu-layers` flag.
|
||||
|
||||
```bash
|
||||
docker run -v /path/to/models:/models local/llama.cpp:full-musa --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1
|
||||
docker run -v /path/to/models:/models local/llama.cpp:light-musa -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1
|
||||
docker run -v /path/to/models:/models local/llama.cpp:server-musa -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512 --n-gpu-layers 1
|
||||
```
|
||||
|
||||
@@ -15,13 +15,13 @@ static void print_usage(int, char ** argv) {
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_BENCH, print_usage)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_BENCH, print_usage)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
int is_pp_shared = params.is_pp_shared;
|
||||
|
||||
@@ -36,7 +36,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// initialize the model
|
||||
|
||||
llama_model_params model_params = llama_model_params_from_gpt_params(params);
|
||||
llama_model_params model_params = common_model_params_to_llama(params);
|
||||
|
||||
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
|
||||
|
||||
@@ -45,7 +45,7 @@ int main(int argc, char ** argv) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
llama_context_params ctx_params = llama_context_params_from_gpt_params(params);
|
||||
llama_context_params ctx_params = common_context_params_to_llama(params);
|
||||
|
||||
// ensure enough sequences are available
|
||||
ctx_params.n_seq_max = n_pl.empty() ? 1 : *std::max_element(n_pl.begin(), n_pl.end());
|
||||
@@ -74,7 +74,6 @@ int main(int argc, char ** argv) {
|
||||
batch.n_seq_id + i,
|
||||
batch.seq_id + i,
|
||||
batch.logits + i,
|
||||
0, 0, 0, // unused
|
||||
};
|
||||
|
||||
const int ret = llama_decode(ctx, batch_view);
|
||||
@@ -92,7 +91,7 @@ int main(int argc, char ** argv) {
|
||||
// warm up
|
||||
{
|
||||
for (int i = 0; i < 16; ++i) {
|
||||
llama_batch_add(batch, 0, i, { 0 }, false);
|
||||
common_batch_add(batch, 0, i, { 0 }, false);
|
||||
}
|
||||
|
||||
if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
|
||||
@@ -122,11 +121,11 @@ int main(int argc, char ** argv) {
|
||||
continue;
|
||||
}
|
||||
|
||||
llama_batch_clear(batch);
|
||||
common_batch_clear(batch);
|
||||
|
||||
for (int i = 0; i < pp; ++i) {
|
||||
for (int j = 0; j < (is_pp_shared ? 1 : pl); ++j) {
|
||||
llama_batch_add(batch, 0, i, { j }, false);
|
||||
common_batch_add(batch, 0, i, { j }, false);
|
||||
}
|
||||
}
|
||||
batch.logits[batch.n_tokens - 1] = true;
|
||||
@@ -151,10 +150,10 @@ int main(int argc, char ** argv) {
|
||||
const auto t_tg_start = ggml_time_us();
|
||||
|
||||
for (int i = 0; i < tg; ++i) {
|
||||
llama_batch_clear(batch);
|
||||
common_batch_clear(batch);
|
||||
|
||||
for (int j = 0; j < pl; ++j) {
|
||||
llama_batch_add(batch, 0, pp + i, { j }, true);
|
||||
common_batch_add(batch, 0, pp + i, { j }, true);
|
||||
}
|
||||
|
||||
if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
|
||||
|
||||
@@ -15,16 +15,16 @@ static void print_usage(int, char ** argv) {
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
params.prompt = "Hello my name is";
|
||||
params.n_predict = 32;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON, print_usage)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON, print_usage)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
// number of parallel batches
|
||||
int n_parallel = params.n_parallel;
|
||||
@@ -39,7 +39,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// initialize the model
|
||||
|
||||
llama_model_params model_params = llama_model_params_from_gpt_params(params);
|
||||
llama_model_params model_params = common_model_params_to_llama(params);
|
||||
|
||||
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
|
||||
|
||||
@@ -51,13 +51,13 @@ int main(int argc, char ** argv) {
|
||||
// tokenize the prompt
|
||||
|
||||
std::vector<llama_token> tokens_list;
|
||||
tokens_list = ::llama_tokenize(model, params.prompt, true);
|
||||
tokens_list = common_tokenize(model, params.prompt, true);
|
||||
|
||||
const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size())*n_parallel;
|
||||
|
||||
// initialize the context
|
||||
|
||||
llama_context_params ctx_params = llama_context_params_from_gpt_params(params);
|
||||
llama_context_params ctx_params = common_context_params_to_llama(params);
|
||||
|
||||
ctx_params.n_ctx = n_kv_req;
|
||||
ctx_params.n_batch = std::max(n_predict, n_parallel);
|
||||
@@ -94,7 +94,7 @@ int main(int argc, char ** argv) {
|
||||
LOG("\n");
|
||||
|
||||
for (auto id : tokens_list) {
|
||||
LOG("%s", llama_token_to_piece(ctx, id).c_str());
|
||||
LOG("%s", common_token_to_piece(ctx, id).c_str());
|
||||
}
|
||||
|
||||
// create a llama_batch
|
||||
@@ -108,7 +108,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// evaluate the initial prompt
|
||||
for (size_t i = 0; i < tokens_list.size(); ++i) {
|
||||
llama_batch_add(batch, tokens_list[i], i, seq_ids, false);
|
||||
common_batch_add(batch, tokens_list[i], i, seq_ids, false);
|
||||
}
|
||||
GGML_ASSERT(batch.n_tokens == (int) tokens_list.size());
|
||||
|
||||
@@ -123,8 +123,8 @@ int main(int argc, char ** argv) {
|
||||
decoder_start_token_id = llama_token_bos(model);
|
||||
}
|
||||
|
||||
llama_batch_clear(batch);
|
||||
llama_batch_add(batch, decoder_start_token_id, 0, seq_ids, false);
|
||||
common_batch_clear(batch);
|
||||
common_batch_add(batch, decoder_start_token_id, 0, seq_ids, false);
|
||||
}
|
||||
|
||||
// llama_decode will output logits only for the last token of the prompt
|
||||
@@ -161,7 +161,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
while (n_cur <= n_predict) {
|
||||
// prepare the next batch
|
||||
llama_batch_clear(batch);
|
||||
common_batch_clear(batch);
|
||||
|
||||
// sample the next token for each parallel sequence / stream
|
||||
for (int32_t i = 0; i < n_parallel; ++i) {
|
||||
@@ -185,15 +185,15 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// if there is only one stream, we print immediately to stdout
|
||||
if (n_parallel == 1) {
|
||||
LOG("%s", llama_token_to_piece(ctx, new_token_id).c_str());
|
||||
LOG("%s", common_token_to_piece(ctx, new_token_id).c_str());
|
||||
}
|
||||
|
||||
streams[i] += llama_token_to_piece(ctx, new_token_id);
|
||||
streams[i] += common_token_to_piece(ctx, new_token_id);
|
||||
|
||||
i_batch[i] = batch.n_tokens;
|
||||
|
||||
// push this new token for next evaluation
|
||||
llama_batch_add(batch, new_token_id, n_cur, { i }, true);
|
||||
common_batch_add(batch, new_token_id, n_cur, { i }, true);
|
||||
|
||||
n_decode += 1;
|
||||
}
|
||||
|
||||
@@ -872,7 +872,7 @@ static std::string basename(const std::string &path) {
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
struct train_params params = get_default_train_params();
|
||||
if (!params_parse(argc, argv, ¶ms)) {
|
||||
|
||||
@@ -31,7 +31,7 @@ template <class Iter>
|
||||
static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
|
||||
std::string ret;
|
||||
for (; begin != end; ++begin) {
|
||||
ret += llama_token_to_piece(ctx, *begin);
|
||||
ret += common_token_to_piece(ctx, *begin);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -272,8 +272,8 @@ struct tokenized_prompt {
|
||||
|
||||
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
|
||||
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
|
||||
tokens_pos = ::llama_tokenize(ctx, pos, add_bos, true);
|
||||
tokens_neg = ::llama_tokenize(ctx, neg, add_bos, true);
|
||||
tokens_pos = common_tokenize(ctx, pos, add_bos, true);
|
||||
tokens_neg = common_tokenize(ctx, neg, add_bos, true);
|
||||
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
|
||||
padding_seq(ctx, tokens_pos, max_seq_len);
|
||||
padding_seq(ctx, tokens_neg, max_seq_len);
|
||||
@@ -281,7 +281,7 @@ struct tokenized_prompt {
|
||||
|
||||
void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
|
||||
// TODO: customize padding token
|
||||
std::vector<llama_token> pad_tokens = ::llama_tokenize(ctx, " ", false);
|
||||
std::vector<llama_token> pad_tokens = common_tokenize(ctx, " ", false);
|
||||
llama_token pad_tok = pad_tokens.back();
|
||||
while (tokens.size() < len) {
|
||||
tokens.push_back(pad_tok);
|
||||
@@ -339,7 +339,7 @@ static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
|
||||
|
||||
static bool get_hidden_layers(llama_context * ctx, std::vector<llama_token> & tokens) {
|
||||
llama_kv_cache_clear(ctx);
|
||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), 0, 0))) {
|
||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) {
|
||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||
return false;
|
||||
}
|
||||
@@ -370,7 +370,7 @@ static void export_gguf(const std::vector<struct ggml_tensor *> & v_ctrl, const
|
||||
* Load prompt files and completion file.
|
||||
* Then format each pair of prompt + completion to make an entry.
|
||||
*/
|
||||
static int prepare_entries(gpt_params & params, train_context & ctx_train) {
|
||||
static int prepare_entries(common_params & params, train_context & ctx_train) {
|
||||
// load prompts
|
||||
std::vector<std::string> positive_prompts = ctrlvec_load_prompt_file(params.cvector_positive_file, true);
|
||||
std::vector<std::string> negative_prompts = ctrlvec_load_prompt_file(params.cvector_negative_file, true);
|
||||
@@ -388,9 +388,9 @@ static int prepare_entries(gpt_params & params, train_context & ctx_train) {
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_CVECTOR_GENERATOR, print_usage)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_CVECTOR_GENERATOR, print_usage)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -413,7 +413,7 @@ int main(int argc, char ** argv) {
|
||||
llama_numa_init(params.numa);
|
||||
|
||||
// load the model to get hparams
|
||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
||||
common_init_result llama_init = common_init_from_params(params);
|
||||
|
||||
llama_model * model = llama_init.model;
|
||||
llama_context * ctx = llama_init.context;
|
||||
|
||||
@@ -28,7 +28,7 @@ static std::vector<std::string> split_lines(const std::string & s, const std::st
|
||||
static void batch_add_seq(llama_batch & batch, const std::vector<int32_t> & tokens, llama_seq_id seq_id) {
|
||||
size_t n_tokens = tokens.size();
|
||||
for (size_t i = 0; i < n_tokens; i++) {
|
||||
llama_batch_add(batch, tokens[i], i, { seq_id }, true);
|
||||
common_batch_add(batch, tokens[i], i, { seq_id }, true);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,18 +74,18 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
|
||||
}
|
||||
|
||||
float * out = output + embd_pos * n_embd;
|
||||
llama_embd_normalize(embd, out, n_embd, embd_norm);
|
||||
common_embd_normalize(embd, out, n_embd, embd_norm);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_EMBEDDING)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_EMBEDDING)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
params.embedding = true;
|
||||
// For non-causal models, batch size must be equal to ubatch size
|
||||
@@ -95,7 +95,7 @@ int main(int argc, char ** argv) {
|
||||
llama_numa_init(params.numa);
|
||||
|
||||
// load the model
|
||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
||||
common_init_result llama_init = common_init_from_params(params);
|
||||
|
||||
llama_model * model = llama_init.model;
|
||||
llama_context * ctx = llama_init.context;
|
||||
@@ -122,7 +122,7 @@ int main(int argc, char ** argv) {
|
||||
// print system information
|
||||
{
|
||||
LOG_INF("\n");
|
||||
LOG_INF("%s\n", gpt_params_get_system_info(params).c_str());
|
||||
LOG_INF("%s\n", common_params_get_system_info(params).c_str());
|
||||
}
|
||||
|
||||
// split the prompt into lines
|
||||
@@ -135,7 +135,7 @@ int main(int argc, char ** argv) {
|
||||
// tokenize the prompts and trim
|
||||
std::vector<std::vector<int32_t>> inputs;
|
||||
for (const auto & prompt : prompts) {
|
||||
auto inp = ::llama_tokenize(ctx, prompt, true, true);
|
||||
auto inp = common_tokenize(ctx, prompt, true, true);
|
||||
if (inp.size() > n_batch) {
|
||||
LOG_ERR("%s: number of tokens in input line (%lld) exceeds batch size (%lld), increase batch size and re-run\n",
|
||||
__func__, (long long int) inp.size(), (long long int) n_batch);
|
||||
@@ -159,7 +159,7 @@ int main(int argc, char ** argv) {
|
||||
LOG_INF("%s: prompt %d: '%s'\n", __func__, i, prompts[i].c_str());
|
||||
LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, inputs[i].size());
|
||||
for (int j = 0; j < (int) inputs[i].size(); j++) {
|
||||
LOG("%6d -> '%s'\n", inputs[i][j], llama_token_to_piece(ctx, inputs[i][j]).c_str());
|
||||
LOG("%6d -> '%s'\n", inputs[i][j], common_token_to_piece(ctx, inputs[i][j]).c_str());
|
||||
}
|
||||
LOG("\n\n");
|
||||
}
|
||||
@@ -199,7 +199,7 @@ int main(int argc, char ** argv) {
|
||||
batch_decode(ctx, batch, out, s, n_embd, params.embd_normalize);
|
||||
e += pooling_type == LLAMA_POOLING_TYPE_NONE ? batch.n_tokens : s;
|
||||
s = 0;
|
||||
llama_batch_clear(batch);
|
||||
common_batch_clear(batch);
|
||||
}
|
||||
|
||||
// add to batch
|
||||
@@ -263,7 +263,7 @@ int main(int argc, char ** argv) {
|
||||
LOG("\n");
|
||||
for (int i = 0; i < n_prompts; i++) {
|
||||
for (int j = 0; j < n_prompts; j++) {
|
||||
float sim = llama_embd_similarity_cos(emb + i * n_embd, emb + j * n_embd, n_embd);
|
||||
float sim = common_embd_similarity_cos(emb + i * n_embd, emb + j * n_embd, n_embd);
|
||||
LOG("%6.2f ", sim);
|
||||
}
|
||||
LOG("%1.10s", prompts[i].c_str());
|
||||
@@ -296,7 +296,7 @@ int main(int argc, char ** argv) {
|
||||
for (int i = 0;;) { // at least two iteration (n_embd_count > 1)
|
||||
LOG(" [");
|
||||
for (int j = 0;;) { // at least two iteration (n_embd_count > 1)
|
||||
float sim = llama_embd_similarity_cos(emb + i * n_embd, emb + j * n_embd, n_embd);
|
||||
float sim = common_embd_similarity_cos(emb + i * n_embd, emb + j * n_embd, n_embd);
|
||||
LOG("%6.2f", sim);
|
||||
j++;
|
||||
if (j < n_embd_count) LOG(", "); else break;
|
||||
|
||||
@@ -126,12 +126,12 @@ static bool ggml_debug(struct ggml_tensor * t, bool ask, void * user_data) {
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool run(llama_context * ctx, const gpt_params & params) {
|
||||
static bool run(llama_context * ctx, const common_params & params) {
|
||||
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
|
||||
|
||||
std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, add_bos);
|
||||
std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, add_bos);
|
||||
|
||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), 0, 0))) {
|
||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) {
|
||||
LOG_ERR("%s : failed to eval\n", __func__);
|
||||
return false;
|
||||
}
|
||||
@@ -142,13 +142,13 @@ static bool run(llama_context * ctx, const gpt_params & params) {
|
||||
int main(int argc, char ** argv) {
|
||||
callback_data cb_data;
|
||||
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
llama_backend_init();
|
||||
llama_numa_init(params.numa);
|
||||
@@ -160,7 +160,7 @@ int main(int argc, char ** argv) {
|
||||
params.warmup = false;
|
||||
|
||||
// init
|
||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
||||
common_init_result llama_init = common_init_from_params(params);
|
||||
|
||||
llama_model * model = llama_init.model;
|
||||
llama_context * ctx = llama_init.context;
|
||||
@@ -172,7 +172,7 @@ int main(int argc, char ** argv) {
|
||||
// print system information
|
||||
{
|
||||
LOG_INF("\n");
|
||||
LOG_INF("%s\n", gpt_params_get_system_info(params).c_str());
|
||||
LOG_INF("%s\n", common_params_get_system_info(params).c_str());
|
||||
LOG_INF("\n");
|
||||
}
|
||||
|
||||
|
||||
@@ -128,7 +128,7 @@ struct lora_merge_ctx {
|
||||
|
||||
lora_merge_ctx(
|
||||
std::string & base_fname,
|
||||
std::vector<llama_lora_adapter_info> & lora_files,
|
||||
std::vector<common_lora_adapter_info> & lora_files,
|
||||
std::string & outfile,
|
||||
int n_threads) : base_model(base_fname, 0), n_threads(n_threads), fout(outfile, std::ios::binary) {
|
||||
fout.exceptions(std::ofstream::failbit); // fail fast on write errors
|
||||
@@ -314,9 +314,9 @@ struct lora_merge_ctx {
|
||||
// optionally dequantize it
|
||||
printf("%s : + dequantize base tensor from %s to F32\n", __func__, ggml_type_name(base->type));
|
||||
auto nels = ggml_nelements(inp_base);
|
||||
ggml_type_traits_t qtype = ggml_internal_get_type_traits(base->type);
|
||||
const auto * qtype = ggml_get_type_traits(base->type);
|
||||
std::vector<uint8_t> dequant_buf(nels * sizeof(float));
|
||||
qtype.to_float(read_buf.data(), (float *)dequant_buf.data(), nels);
|
||||
qtype->to_float(read_buf.data(), (float *)dequant_buf.data(), nels);
|
||||
ggml_backend_tensor_set(inp_base, dequant_buf.data(), 0, dequant_buf.size());
|
||||
} else {
|
||||
ggml_backend_tensor_set(inp_base, read_buf.data(), 0, ggml_nbytes(inp_base));
|
||||
@@ -400,9 +400,9 @@ static void print_usage(int, char ** argv) {
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_EXPORT_LORA, print_usage)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_EXPORT_LORA, print_usage)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ static void write_table_header(std::ofstream & file) {
|
||||
file << "| -------- | ----------- |\n";
|
||||
}
|
||||
|
||||
static void write_table_entry(std::ofstream & file, const llama_arg & opt) {
|
||||
static void write_table_entry(std::ofstream & file, const common_arg & opt) {
|
||||
file << "| `";
|
||||
// args
|
||||
for (const auto & arg : opt.args) {
|
||||
@@ -40,7 +40,7 @@ static void write_table_entry(std::ofstream & file, const llama_arg & opt) {
|
||||
file << "` | " << md_help << " |\n";
|
||||
}
|
||||
|
||||
static void write_table(std::ofstream & file, std::vector<llama_arg *> & opts) {
|
||||
static void write_table(std::ofstream & file, std::vector<common_arg *> & opts) {
|
||||
write_table_header(file);
|
||||
for (const auto & opt : opts) {
|
||||
write_table_entry(file, *opt);
|
||||
@@ -50,12 +50,12 @@ static void write_table(std::ofstream & file, std::vector<llama_arg *> & opts) {
|
||||
static void export_md(std::string fname, llama_example ex) {
|
||||
std::ofstream file(fname, std::ofstream::out | std::ofstream::trunc);
|
||||
|
||||
gpt_params params;
|
||||
auto ctx_arg = gpt_params_parser_init(params, ex);
|
||||
common_params params;
|
||||
auto ctx_arg = common_params_parser_init(params, ex);
|
||||
|
||||
std::vector<llama_arg *> common_options;
|
||||
std::vector<llama_arg *> sparam_options;
|
||||
std::vector<llama_arg *> specific_options;
|
||||
std::vector<common_arg *> common_options;
|
||||
std::vector<common_arg *> sparam_options;
|
||||
std::vector<common_arg *> specific_options;
|
||||
for (auto & opt : ctx_arg.options) {
|
||||
// in case multiple LLAMA_EXAMPLE_* are set, we prioritize the LLAMA_EXAMPLE_* matching current example
|
||||
if (opt.is_sparam) {
|
||||
|
||||
@@ -15,11 +15,11 @@ static std::vector<std::vector<float>> encode(llama_context * ctx, const std::ve
|
||||
llama_batch batch = llama_batch_init(llama_n_batch(ctx), 0, 1);
|
||||
|
||||
for (uint64_t i = 0; i < sentences.size(); i++) {
|
||||
llama_batch_clear(batch);
|
||||
common_batch_clear(batch);
|
||||
|
||||
const std::string input_string = instruction + sentences[i];
|
||||
|
||||
std::vector<llama_token> inputs = llama_tokenize(model, input_string, true, false);
|
||||
std::vector<llama_token> inputs = common_tokenize(model, input_string, true, false);
|
||||
|
||||
const int32_t n_toks = inputs.size();
|
||||
|
||||
@@ -28,7 +28,7 @@ static std::vector<std::vector<float>> encode(llama_context * ctx, const std::ve
|
||||
// inputs.push_back(llama_token_eos(model));
|
||||
|
||||
// we want to ignore instruction tokens for mean pooling
|
||||
const int32_t n_inst = llama_tokenize(model, instruction, true, false).size();
|
||||
const int32_t n_inst = common_tokenize(model, instruction, true, false).size();
|
||||
|
||||
#ifdef GRIT_DEBUG
|
||||
// debug tokens - should be matching as referenced in the GritLM sample
|
||||
@@ -40,7 +40,7 @@ static std::vector<std::vector<float>> encode(llama_context * ctx, const std::ve
|
||||
|
||||
// add input to batch (this increments n_tokens)
|
||||
for (int32_t j = 0; j < n_toks; j++) {
|
||||
llama_batch_add(batch, inputs[j], j, { 0 }, j >= n_inst);
|
||||
common_batch_add(batch, inputs[j], j, { 0 }, j >= n_inst);
|
||||
}
|
||||
|
||||
// clear previous kv_cache values (irrelevant for embeddings)
|
||||
@@ -75,7 +75,7 @@ static std::vector<std::vector<float>> encode(llama_context * ctx, const std::ve
|
||||
}
|
||||
|
||||
std::vector<float> emb_norm(emb_unorm.size());
|
||||
llama_embd_normalize(emb_unorm.data(), emb_norm.data(), n_embd);
|
||||
common_embd_normalize(emb_unorm.data(), emb_norm.data(), n_embd);
|
||||
result.push_back(emb_norm);
|
||||
|
||||
#ifdef GRIT_DEBUG
|
||||
@@ -105,16 +105,16 @@ static std::string generate(llama_context * ctx, llama_sampler * smpl, const std
|
||||
|
||||
llama_batch bat = llama_batch_init(llama_n_batch(ctx), 0, 1);
|
||||
|
||||
std::vector<llama_token> inputs = llama_tokenize(model, prompt, false, true);
|
||||
std::vector<llama_token> inputs = common_tokenize(model, prompt, false, true);
|
||||
int32_t i_current_token = 0;
|
||||
|
||||
while (true) {
|
||||
llama_batch_clear(bat);
|
||||
common_batch_clear(bat);
|
||||
{
|
||||
const int32_t n_inputs = inputs.size();
|
||||
|
||||
for (int32_t i = 0; i < n_inputs; i++) {
|
||||
llama_batch_add(bat, inputs[i], i_current_token++, { 0 }, i == n_inputs - 1);
|
||||
common_batch_add(bat, inputs[i], i_current_token++, { 0 }, i == n_inputs - 1);
|
||||
}
|
||||
}
|
||||
inputs.clear();
|
||||
@@ -127,7 +127,7 @@ static std::string generate(llama_context * ctx, llama_sampler * smpl, const std
|
||||
break;
|
||||
}
|
||||
|
||||
std::string piece = llama_token_to_piece(ctx, token);
|
||||
std::string piece = common_token_to_piece(ctx, token);
|
||||
if (stream) {
|
||||
std::printf("%s", piece.c_str());
|
||||
std::fflush(stdout);
|
||||
@@ -152,16 +152,16 @@ static std::string gritlm_instruction(const std::string & instruction) {
|
||||
}
|
||||
|
||||
int main(int argc, char * argv[]) {
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
llama_model_params mparams = llama_model_params_from_gpt_params(params);
|
||||
llama_context_params cparams = llama_context_params_from_gpt_params(params);
|
||||
llama_model_params mparams = common_model_params_to_llama(params);
|
||||
llama_context_params cparams = common_context_params_to_llama(params);
|
||||
|
||||
llama_backend_init();
|
||||
|
||||
@@ -199,10 +199,10 @@ int main(int argc, char * argv[]) {
|
||||
|
||||
const int n_embd = llama_n_embd(model);
|
||||
|
||||
const float cosine_sim_q0_d0 = llama_embd_similarity_cos(q_rep[0].data(), d_rep[0].data(), n_embd);
|
||||
const float cosine_sim_q0_d1 = llama_embd_similarity_cos(q_rep[0].data(), d_rep[1].data(), n_embd);
|
||||
const float cosine_sim_q1_d0 = llama_embd_similarity_cos(q_rep[1].data(), d_rep[0].data(), n_embd);
|
||||
const float cosine_sim_q1_d1 = llama_embd_similarity_cos(q_rep[1].data(), d_rep[1].data(), n_embd);
|
||||
const float cosine_sim_q0_d0 = common_embd_similarity_cos(q_rep[0].data(), d_rep[0].data(), n_embd);
|
||||
const float cosine_sim_q0_d1 = common_embd_similarity_cos(q_rep[0].data(), d_rep[1].data(), n_embd);
|
||||
const float cosine_sim_q1_d0 = common_embd_similarity_cos(q_rep[1].data(), d_rep[0].data(), n_embd);
|
||||
const float cosine_sim_q1_d1 = common_embd_similarity_cos(q_rep[1].data(), d_rep[1].data(), n_embd);
|
||||
|
||||
std::printf("Cosine similarity between \"%.50s\" and \"%.50s\" is: %.3f\n", queries[0].c_str(), documents[0].c_str(), cosine_sim_q0_d0);
|
||||
std::printf("Cosine similarity between \"%.50s\" and \"%.50s\" is: %.3f\n", queries[0].c_str(), documents[1].c_str(), cosine_sim_q0_d1);
|
||||
|
||||
@@ -37,13 +37,13 @@ struct Stats {
|
||||
class IMatrixCollector {
|
||||
public:
|
||||
IMatrixCollector() = default;
|
||||
void set_params(gpt_params params) { m_params = std::move(params); }
|
||||
void set_params(common_params params) { m_params = std::move(params); }
|
||||
bool collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data);
|
||||
void save_imatrix(int ncall = -1) const;
|
||||
bool load_imatrix(const char * file_name);
|
||||
private:
|
||||
std::unordered_map<std::string, Stats> m_stats;
|
||||
gpt_params m_params;
|
||||
common_params m_params;
|
||||
std::mutex m_mutex;
|
||||
int m_last_call = 0;
|
||||
std::vector<float> m_src1_data;
|
||||
@@ -428,7 +428,7 @@ static void process_logits(
|
||||
}
|
||||
}
|
||||
|
||||
static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
|
||||
static bool compute_imatrix(llama_context * ctx, const common_params & params) {
|
||||
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
|
||||
GGML_ASSERT(!llama_add_eos_token(llama_get_model(ctx)));
|
||||
const int n_ctx = llama_n_ctx(ctx);
|
||||
@@ -436,7 +436,7 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
|
||||
auto tim1 = std::chrono::high_resolution_clock::now();
|
||||
LOG_INF("%s: tokenizing the input ..\n", __func__);
|
||||
|
||||
std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, true);
|
||||
std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, true);
|
||||
|
||||
auto tim2 = std::chrono::high_resolution_clock::now();
|
||||
LOG_INF("%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count());
|
||||
@@ -496,6 +496,8 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
|
||||
// clear the KV cache
|
||||
llama_kv_cache_clear(ctx);
|
||||
|
||||
llama_batch batch = llama_batch_init(n_batch, 0, 1);
|
||||
|
||||
for (int j = 0; j < num_batches; ++j) {
|
||||
const int batch_start = start + j * n_batch;
|
||||
const int batch_size = std::min(end - batch_start, n_batch);
|
||||
@@ -508,9 +510,14 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
|
||||
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
|
||||
}
|
||||
|
||||
// TODO: use batch.logits to save computations instead of relying on logits_all == true
|
||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) {
|
||||
common_batch_clear(batch);
|
||||
for (int i = 0; i < batch_size; i++) {
|
||||
common_batch_add(batch, tokens[batch_start + i], j*n_batch + i, {0}, true);
|
||||
}
|
||||
|
||||
if (llama_decode(ctx, batch)) {
|
||||
LOG_ERR("%s : failed to eval\n", __func__);
|
||||
llama_batch_free(batch);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -523,6 +530,8 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
|
||||
}
|
||||
}
|
||||
|
||||
llama_batch_free(batch);
|
||||
|
||||
const auto t_end = std::chrono::high_resolution_clock::now();
|
||||
|
||||
if (i == 0) {
|
||||
@@ -568,17 +577,17 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
params.n_ctx = 512;
|
||||
params.logits_all = true;
|
||||
params.escape = false;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_IMATRIX, print_usage)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_IMATRIX, print_usage)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
params.n_batch = std::min(params.n_batch, params.n_ctx);
|
||||
|
||||
@@ -607,7 +616,7 @@ int main(int argc, char ** argv) {
|
||||
params.warmup = false;
|
||||
|
||||
// init
|
||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
||||
common_init_result llama_init = common_init_from_params(params);
|
||||
|
||||
llama_model * model = llama_init.model;
|
||||
llama_context * ctx = llama_init.context;
|
||||
@@ -625,7 +634,7 @@ int main(int argc, char ** argv) {
|
||||
// print system information
|
||||
{
|
||||
LOG_INF("\n");
|
||||
LOG_INF("%s\n", gpt_params_get_system_info(params).c_str());
|
||||
LOG_INF("%s\n", common_params_get_system_info(params).c_str());
|
||||
}
|
||||
|
||||
if (!compute_imatrix(ctx, params)) {
|
||||
|
||||
@@ -35,8 +35,8 @@
|
||||
|
||||
static llama_context ** g_ctx;
|
||||
static llama_model ** g_model;
|
||||
static gpt_sampler ** g_smpl;
|
||||
static gpt_params * g_params;
|
||||
static common_sampler ** g_smpl;
|
||||
static common_params * g_params;
|
||||
static std::vector<llama_token> * g_input_tokens;
|
||||
static std::ostringstream * g_output_ss;
|
||||
static std::vector<llama_token> * g_output_tokens;
|
||||
@@ -44,7 +44,7 @@ static std::vector<llama_token> * g_output_tokens;
|
||||
static bool is_interacting = false;
|
||||
|
||||
static void write_logfile(
|
||||
const llama_context * ctx, const gpt_params & params, const llama_model * model,
|
||||
const llama_context * ctx, const common_params & params, const llama_model * model,
|
||||
const std::vector<llama_token> & input_tokens, const std::string & output,
|
||||
const std::vector<llama_token> & output_tokens
|
||||
) {
|
||||
@@ -95,12 +95,12 @@ static void sigint_handler(int signo) {
|
||||
} else {
|
||||
console::cleanup();
|
||||
LOG("\n");
|
||||
gpt_perf_print(*g_ctx, *g_smpl);
|
||||
common_perf_print(*g_ctx, *g_smpl);
|
||||
write_logfile(*g_ctx, *g_params, *g_model, *g_input_tokens, g_output_ss->str(), *g_output_tokens);
|
||||
|
||||
// make sure all logs are flushed
|
||||
LOG("Interrupted by user\n");
|
||||
gpt_log_pause(gpt_log_main());
|
||||
common_log_pause(common_log_main());
|
||||
|
||||
_exit(130);
|
||||
}
|
||||
@@ -109,14 +109,14 @@ static void sigint_handler(int signo) {
|
||||
#endif
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
g_params = ¶ms;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_INFILL)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_INFILL)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
auto & sparams = params.sparams;
|
||||
|
||||
@@ -166,7 +166,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
llama_model * model = nullptr;
|
||||
llama_context * ctx = nullptr;
|
||||
gpt_sampler * smpl = nullptr;
|
||||
common_sampler * smpl = nullptr;
|
||||
|
||||
g_model = &model;
|
||||
g_ctx = &ctx;
|
||||
@@ -174,7 +174,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// load the model and apply lora adapter, if any
|
||||
LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__);
|
||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
||||
common_init_result llama_init = common_init_from_params(params);
|
||||
|
||||
model = llama_init.model;
|
||||
ctx = llama_init.context;
|
||||
@@ -195,21 +195,21 @@ int main(int argc, char ** argv) {
|
||||
// print system information
|
||||
{
|
||||
LOG_INF("\n");
|
||||
LOG_INF("%s\n", gpt_params_get_system_info(params).c_str());
|
||||
LOG_INF("%s\n", common_params_get_system_info(params).c_str());
|
||||
}
|
||||
const bool add_bos = llama_add_bos_token(model);
|
||||
GGML_ASSERT(!llama_add_eos_token(model));
|
||||
|
||||
std::vector<llama_token> embd_inp;
|
||||
std::vector<llama_token> embd_end;
|
||||
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
|
||||
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
|
||||
std::vector<llama_token> inp_pfx = common_tokenize(ctx, params.input_prefix, false);
|
||||
std::vector<llama_token> inp_sfx = common_tokenize(ctx, params.input_suffix, false);
|
||||
|
||||
GGML_ASSERT(llama_token_prefix(model) >= 0);
|
||||
GGML_ASSERT(llama_token_suffix(model) >= 0);
|
||||
GGML_ASSERT(llama_token_fim_pre(model) >= 0);
|
||||
GGML_ASSERT(llama_token_fim_suf(model) >= 0);
|
||||
|
||||
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
|
||||
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
|
||||
inp_pfx.insert(inp_pfx.begin(), llama_token_fim_pre(model));
|
||||
inp_sfx.insert(inp_sfx.begin(), llama_token_fim_suf(model));
|
||||
|
||||
embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
|
||||
embd_end = params.spm_infill ? inp_pfx : inp_sfx;
|
||||
@@ -218,7 +218,7 @@ int main(int argc, char ** argv) {
|
||||
}
|
||||
embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end());
|
||||
|
||||
const llama_token middle_token = llama_token_middle(model);
|
||||
const llama_token middle_token = llama_token_fim_mid(model);
|
||||
if (middle_token >= 0) {
|
||||
embd_inp.push_back(middle_token);
|
||||
}
|
||||
@@ -257,13 +257,13 @@ int main(int argc, char ** argv) {
|
||||
LOG_INF("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
||||
LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
||||
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
||||
LOG_INF("%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str());
|
||||
LOG_INF("%6d -> '%s'\n", embd_inp[i], common_token_to_piece(ctx, embd_inp[i]).c_str());
|
||||
}
|
||||
|
||||
if (params.n_keep > 0) {
|
||||
LOG_INF("%s: static prompt based on n_keep: '", __func__);
|
||||
for (int i = 0; i < params.n_keep; i++) {
|
||||
LOG_CNT("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str());
|
||||
LOG_CNT("%s", common_token_to_piece(ctx, embd_inp[i]).c_str());
|
||||
}
|
||||
LOG_CNT("'\n");
|
||||
}
|
||||
@@ -298,11 +298,11 @@ int main(int argc, char ** argv) {
|
||||
LOG_INF("Input suffix: '%s'\n", params.input_suffix.c_str());
|
||||
}
|
||||
}
|
||||
smpl = gpt_sampler_init(model, sparams);
|
||||
smpl = common_sampler_init(model, sparams);
|
||||
|
||||
LOG_INF("sampler seed: %u\n", gpt_sampler_get_seed(smpl));
|
||||
LOG_INF("sampler seed: %u\n", common_sampler_get_seed(smpl));
|
||||
LOG_INF("sampler params: \n%s\n", sparams.print().c_str());
|
||||
LOG_INF("sampler chain: %s\n", gpt_sampler_print(smpl).c_str());
|
||||
LOG_INF("sampler chain: %s\n", common_sampler_print(smpl).c_str());
|
||||
|
||||
LOG_INF("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
|
||||
|
||||
@@ -396,7 +396,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
LOG_DBG("eval: %s\n", string_from(ctx, embd).c_str());
|
||||
|
||||
if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval, n_past, 0))) {
|
||||
if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval))) {
|
||||
LOG_ERR("%s : failed to eval\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
@@ -411,9 +411,9 @@ int main(int argc, char ** argv) {
|
||||
embd.clear();
|
||||
|
||||
if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
|
||||
const llama_token id = gpt_sampler_sample(smpl, ctx, -1);
|
||||
const llama_token id = common_sampler_sample(smpl, ctx, -1);
|
||||
|
||||
gpt_sampler_accept(smpl, id, true);
|
||||
common_sampler_accept(smpl, id, true);
|
||||
|
||||
// LOG_DBG("last: %s\n", string_from(ctx, smpl->prev.to_vector()).c_str());
|
||||
|
||||
@@ -434,7 +434,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// push the prompt in the sampling context in order to apply repetition penalties later
|
||||
// for the prompt, we don't apply grammar rules
|
||||
gpt_sampler_accept(smpl, embd_inp[n_consumed], false);
|
||||
common_sampler_accept(smpl, embd_inp[n_consumed], false);
|
||||
|
||||
++n_consumed;
|
||||
if ((int) embd.size() >= params.n_batch) {
|
||||
@@ -446,7 +446,7 @@ int main(int argc, char ** argv) {
|
||||
// display text
|
||||
if (input_echo) {
|
||||
for (auto id : embd) {
|
||||
const std::string token_str = llama_token_to_piece(ctx, id);
|
||||
const std::string token_str = common_token_to_piece(ctx, id);
|
||||
LOG("%s", token_str.c_str());
|
||||
|
||||
if (embd.size() > 1) {
|
||||
@@ -465,10 +465,10 @@ int main(int argc, char ** argv) {
|
||||
// if not currently processing queued inputs;
|
||||
if ((int) embd_inp.size() <= n_consumed) {
|
||||
// deal with eot token in infill mode
|
||||
if ((gpt_sampler_last(smpl) == llama_token_eot(model) || is_interacting) && params.interactive){
|
||||
if ((common_sampler_last(smpl) == llama_token_eot(model) || is_interacting) && params.interactive){
|
||||
if (is_interacting && !params.interactive_first) {
|
||||
// print an eot token
|
||||
LOG("%s", llama_token_to_piece(ctx, llama_token_eot(model)).c_str());
|
||||
LOG("%s", common_token_to_piece(ctx, llama_token_eot(model)).c_str());
|
||||
}
|
||||
LOG("\n");
|
||||
console::set_display(console::user_input);
|
||||
@@ -505,11 +505,11 @@ int main(int argc, char ** argv) {
|
||||
}
|
||||
|
||||
// tokenize new prefix and suffix
|
||||
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
|
||||
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
|
||||
std::vector<llama_token> inp_pfx = common_tokenize(ctx, params.input_prefix, false);
|
||||
std::vector<llama_token> inp_sfx = common_tokenize(ctx, params.input_suffix, false);
|
||||
|
||||
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
|
||||
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
|
||||
inp_pfx.insert(inp_pfx.begin(), llama_token_fim_pre(model));
|
||||
inp_sfx.insert(inp_sfx.begin(), llama_token_fim_suf(model));
|
||||
|
||||
embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
|
||||
embd_end = params.spm_infill ? inp_pfx : inp_sfx;
|
||||
@@ -529,7 +529,7 @@ int main(int argc, char ** argv) {
|
||||
is_interacting = false;
|
||||
}
|
||||
// deal with end of generation tokens in interactive mode
|
||||
else if (llama_token_is_eog(model, gpt_sampler_last(smpl))) {
|
||||
else if (llama_token_is_eog(model, common_sampler_last(smpl))) {
|
||||
LOG_DBG("found EOS token\n");
|
||||
|
||||
if (params.interactive) {
|
||||
@@ -579,7 +579,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
const size_t original_size = embd_inp.size();
|
||||
|
||||
const auto line_inp = ::llama_tokenize(ctx, buffer, false);
|
||||
const auto line_inp = common_tokenize(ctx, buffer, false);
|
||||
LOG_DBG("input tokens: %s\n", string_from(ctx, line_inp).c_str());
|
||||
|
||||
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
|
||||
@@ -587,7 +587,7 @@ int main(int argc, char ** argv) {
|
||||
for (size_t i = original_size; i < embd_inp.size(); ++i) {
|
||||
const llama_token token = embd_inp[i];
|
||||
output_tokens.push_back(token);
|
||||
output_ss << llama_token_to_piece(ctx, token);
|
||||
output_ss << common_token_to_piece(ctx, token);
|
||||
}
|
||||
|
||||
n_remain -= line_inp.size();
|
||||
@@ -601,7 +601,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
if (n_past > 0) {
|
||||
if (is_interacting) {
|
||||
gpt_sampler_reset(smpl);
|
||||
common_sampler_reset(smpl);
|
||||
}
|
||||
is_interacting = false;
|
||||
}
|
||||
@@ -620,17 +620,17 @@ int main(int argc, char ** argv) {
|
||||
}
|
||||
}
|
||||
if (!params.interactive && n_remain <= 0) {
|
||||
LOG("%s", llama_token_to_piece(ctx, llama_token_eot(model)).c_str());
|
||||
LOG("%s", common_token_to_piece(ctx, llama_token_eot(model)).c_str());
|
||||
}
|
||||
|
||||
LOG("\n");
|
||||
gpt_perf_print(ctx, smpl);
|
||||
common_perf_print(ctx, smpl);
|
||||
write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens);
|
||||
|
||||
llama_free(ctx);
|
||||
llama_free_model(model);
|
||||
|
||||
gpt_sampler_free(smpl);
|
||||
common_sampler_free(smpl);
|
||||
llama_backend_free();
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -540,7 +540,7 @@ class SchemaConverter:
|
||||
return self._add_rule(
|
||||
name,
|
||||
to_rule(transform()) if self._raw_pattern \
|
||||
else "\"\\\"\" " + to_rule(transform()) + " \"\\\"\" space")
|
||||
else "\"\\\"\" (" + to_rule(transform()) + ") \"\\\"\" space")
|
||||
|
||||
|
||||
def _resolve_ref(self, ref):
|
||||
|
||||
@@ -151,7 +151,7 @@ static std::string get_gpu_info() {
|
||||
int count = ggml_backend_sycl_get_device_count();
|
||||
for (int i = 0; i < count; i++) {
|
||||
char buf[128];
|
||||
ggml_sycl_get_device_description(i, buf, sizeof(buf));
|
||||
ggml_backend_sycl_get_device_description(i, buf, sizeof(buf));
|
||||
id += buf;
|
||||
if (i < count - 1) {
|
||||
id += "/";
|
||||
@@ -304,9 +304,9 @@ static void print_usage(int /* argc */, char ** argv) {
|
||||
printf(" --cpu-strict <0|1> (default: %s)\n", join(cmd_params_defaults.cpu_strict, ",").c_str());
|
||||
printf(" --poll <0...100> (default: %s)\n", join(cmd_params_defaults.poll, ",").c_str());
|
||||
printf(" -ngl, --n-gpu-layers <n> (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str());
|
||||
#ifdef GGML_USE_RPC
|
||||
printf(" -rpc, --rpc <rpc_servers> (default: %s)\n", join(cmd_params_defaults.rpc_servers, ",").c_str());
|
||||
#endif
|
||||
if (llama_supports_rpc()) {
|
||||
printf(" -rpc, --rpc <rpc_servers> (default: %s)\n", join(cmd_params_defaults.rpc_servers, ",").c_str());
|
||||
}
|
||||
printf(" -sm, --split-mode <none|layer|row> (default: %s)\n", join(transform_to_str(cmd_params_defaults.split_mode, split_mode_str), ",").c_str());
|
||||
printf(" -mg, --main-gpu <i> (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str());
|
||||
printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str());
|
||||
@@ -497,14 +497,12 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
||||
}
|
||||
auto p = string_split<int>(argv[i], split_delim);
|
||||
params.n_gpu_layers.insert(params.n_gpu_layers.end(), p.begin(), p.end());
|
||||
#ifdef GGML_USE_RPC
|
||||
} else if (arg == "-rpc" || arg == "--rpc") {
|
||||
} else if (llama_supports_rpc() && (arg == "-rpc" || arg == "--rpc")) {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.rpc_servers.push_back(argv[i]);
|
||||
#endif
|
||||
} else if (arg == "-sm" || arg == "--split-mode") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
@@ -1430,7 +1428,7 @@ struct sql_printer : public printer {
|
||||
}
|
||||
};
|
||||
|
||||
static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_batch, int n_threads) {
|
||||
static void test_prompt(llama_context * ctx, int n_prompt, int n_batch, int n_threads) {
|
||||
llama_set_n_threads(ctx, n_threads, n_threads);
|
||||
|
||||
const llama_model * model = llama_get_model(ctx);
|
||||
@@ -1446,14 +1444,14 @@ static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_bat
|
||||
for (int i = 1; i < n_tokens; i++) {
|
||||
tokens[i] = std::rand() % n_vocab;
|
||||
}
|
||||
llama_decode(ctx, llama_batch_get_one(tokens.data(), n_tokens, n_past + n_processed, 0));
|
||||
llama_decode(ctx, llama_batch_get_one(tokens.data(), n_tokens));
|
||||
n_processed += n_tokens;
|
||||
}
|
||||
|
||||
llama_synchronize(ctx);
|
||||
}
|
||||
|
||||
static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads) {
|
||||
static void test_gen(llama_context * ctx, int n_gen, int n_threads) {
|
||||
llama_set_n_threads(ctx, n_threads, n_threads);
|
||||
|
||||
const llama_model * model = llama_get_model(ctx);
|
||||
@@ -1462,7 +1460,7 @@ static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads)
|
||||
llama_token token = llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab;
|
||||
|
||||
for (int i = 0; i < n_gen; i++) {
|
||||
llama_decode(ctx, llama_batch_get_one(&token, 1, n_past + i, 0));
|
||||
llama_decode(ctx, llama_batch_get_one(&token, 1));
|
||||
llama_synchronize(ctx);
|
||||
token = std::rand() % n_vocab;
|
||||
}
|
||||
@@ -1598,13 +1596,13 @@ int main(int argc, char ** argv) {
|
||||
fprintf(stderr, "llama-bench: benchmark %d/%ld: warmup prompt run\n", params_idx, params_count);
|
||||
}
|
||||
//test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch, t.n_threads);
|
||||
test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads);
|
||||
test_prompt(ctx, t.n_prompt, t.n_batch, t.n_threads);
|
||||
}
|
||||
if (t.n_gen > 0) {
|
||||
if (params.progress) {
|
||||
fprintf(stderr, "llama-bench: benchmark %d/%ld: warmup generation run\n", params_idx, params_count);
|
||||
}
|
||||
test_gen(ctx, 1, 0, t.n_threads);
|
||||
test_gen(ctx, 1, t.n_threads);
|
||||
}
|
||||
|
||||
for (int i = 0; i < params.reps; i++) {
|
||||
@@ -1616,13 +1614,13 @@ int main(int argc, char ** argv) {
|
||||
if (params.progress) {
|
||||
fprintf(stderr, "llama-bench: benchmark %d/%ld: prompt run %d/%d\n", params_idx, params_count, i + 1, params.reps);
|
||||
}
|
||||
test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads);
|
||||
test_prompt(ctx, t.n_prompt, t.n_batch, t.n_threads);
|
||||
}
|
||||
if (t.n_gen > 0) {
|
||||
if (params.progress) {
|
||||
fprintf(stderr, "llama-bench: benchmark %d/%ld: generation run %d/%d\n", params_idx, params_count, i + 1, params.reps);
|
||||
}
|
||||
test_gen(ctx, t.n_gen, t.n_prompt, t.n_threads);
|
||||
test_gen(ctx, t.n_gen, t.n_threads);
|
||||
}
|
||||
|
||||
uint64_t t_ns = get_time_ns() - t_start;
|
||||
|
||||
@@ -18,6 +18,7 @@ android {
|
||||
}
|
||||
externalNativeBuild {
|
||||
cmake {
|
||||
arguments += "-DLLAMA_BUILD_COMMON=ON"
|
||||
arguments += "-DCMAKE_BUILD_TYPE=Release"
|
||||
cppFlags += listOf()
|
||||
arguments += listOf()
|
||||
|
||||
@@ -186,11 +186,11 @@ Java_android_llama_cpp_LLamaAndroid_bench_1model(
|
||||
for (nri = 0; nri < nr; nri++) {
|
||||
LOGi("Benchmark prompt processing (pp)");
|
||||
|
||||
llama_batch_clear(*batch);
|
||||
common_batch_clear(*batch);
|
||||
|
||||
const int n_tokens = pp;
|
||||
for (i = 0; i < n_tokens; i++) {
|
||||
llama_batch_add(*batch, 0, i, { 0 }, false);
|
||||
common_batch_add(*batch, 0, i, { 0 }, false);
|
||||
}
|
||||
|
||||
batch->logits[batch->n_tokens - 1] = true;
|
||||
@@ -210,9 +210,9 @@ Java_android_llama_cpp_LLamaAndroid_bench_1model(
|
||||
const auto t_tg_start = ggml_time_us();
|
||||
for (i = 0; i < tg; i++) {
|
||||
|
||||
llama_batch_clear(*batch);
|
||||
common_batch_clear(*batch);
|
||||
for (j = 0; j < pl; j++) {
|
||||
llama_batch_add(*batch, 0, i, { j }, true);
|
||||
common_batch_add(*batch, 0, i, { j }, true);
|
||||
}
|
||||
|
||||
LOGi("llama_decode() text generation: %d", i);
|
||||
@@ -283,9 +283,6 @@ Java_android_llama_cpp_LLamaAndroid_new_1batch(JNIEnv *, jobject, jint n_tokens,
|
||||
nullptr,
|
||||
nullptr,
|
||||
nullptr,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
};
|
||||
|
||||
if (embd) {
|
||||
@@ -357,7 +354,7 @@ Java_android_llama_cpp_LLamaAndroid_completion_1init(
|
||||
const auto context = reinterpret_cast<llama_context *>(context_pointer);
|
||||
const auto batch = reinterpret_cast<llama_batch *>(batch_pointer);
|
||||
|
||||
const auto tokens_list = llama_tokenize(context, text, 1);
|
||||
const auto tokens_list = common_tokenize(context, text, 1);
|
||||
|
||||
auto n_ctx = llama_n_ctx(context);
|
||||
auto n_kv_req = tokens_list.size() + (n_len - tokens_list.size());
|
||||
@@ -369,14 +366,14 @@ Java_android_llama_cpp_LLamaAndroid_completion_1init(
|
||||
}
|
||||
|
||||
for (auto id : tokens_list) {
|
||||
LOGi("%s", llama_token_to_piece(context, id).c_str());
|
||||
LOGi("%s", common_token_to_piece(context, id).c_str());
|
||||
}
|
||||
|
||||
llama_batch_clear(*batch);
|
||||
common_batch_clear(*batch);
|
||||
|
||||
// evaluate the initial prompt
|
||||
for (auto i = 0; i < tokens_list.size(); i++) {
|
||||
llama_batch_add(*batch, tokens_list[i], i, { 0 }, false);
|
||||
common_batch_add(*batch, tokens_list[i], i, { 0 }, false);
|
||||
}
|
||||
|
||||
// llama_decode will output logits only for the last token of the prompt
|
||||
@@ -419,7 +416,7 @@ Java_android_llama_cpp_LLamaAndroid_completion_1loop(
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto new_token_chars = llama_token_to_piece(context, new_token_id);
|
||||
auto new_token_chars = common_token_to_piece(context, new_token_id);
|
||||
cached_token_chars += new_token_chars;
|
||||
|
||||
jstring new_token = nullptr;
|
||||
@@ -431,8 +428,8 @@ Java_android_llama_cpp_LLamaAndroid_completion_1loop(
|
||||
new_token = env->NewStringUTF("");
|
||||
}
|
||||
|
||||
llama_batch_clear(*batch);
|
||||
llama_batch_add(*batch, new_token_id, n_cur, { 0 }, true);
|
||||
common_batch_clear(*batch);
|
||||
common_batch_add(*batch, new_token_id, n_cur, { 0 }, true);
|
||||
|
||||
env->CallVoidMethod(intvar_ncur, la_int_var_inc);
|
||||
|
||||
|
||||
@@ -46,7 +46,6 @@ actor LlamaContext {
|
||||
let sparams = llama_sampler_chain_default_params()
|
||||
self.sampling = llama_sampler_chain_init(sparams)
|
||||
llama_sampler_chain_add(self.sampling, llama_sampler_init_temp(0.4))
|
||||
llama_sampler_chain_add(self.sampling, llama_sampler_init_softmax())
|
||||
llama_sampler_chain_add(self.sampling, llama_sampler_init_dist(1234))
|
||||
}
|
||||
|
||||
|
||||
@@ -1,135 +1,783 @@
|
||||
" Requires an already running llama.cpp server
|
||||
" To install either copy or symlink to ~/.vim/autoload/llama.vim
|
||||
" Then start with either :call llama#doLlamaGen(),
|
||||
" or add a keybind to your vimrc such as
|
||||
" nnoremap Z :call llama#doLlamaGen()<CR>
|
||||
" Similarly, you could add an insert mode keybind with
|
||||
" inoremap <C-B> <Cmd>call llama#doLlamaGen()<CR>
|
||||
" LLM-based text completion using llama.cpp
|
||||
"
|
||||
" g:llama_api_url, g:llama_api_key and g:llama_overrides can be configured in your .vimrc
|
||||
" let g:llama_api_url = "192.168.1.10:8080"
|
||||
" llama_overrides can also be set through buffer/window scopes. For instance
|
||||
" autocmd filetype python let b:llama_overrides = {"temp": 0.2}
|
||||
" Could be added to your .vimrc to automatically set a lower temperature when
|
||||
" editing a python script
|
||||
" Additionally, an override dict can be stored at the top of a file
|
||||
" !*{"stop": ["User:"]}
|
||||
" Could be added to the start of your chatlog.txt to set the stopping token
|
||||
" These parameter dicts are merged together from lowest to highest priority:
|
||||
" server default -> g:llama_overrides -> w:llama_overrides ->
|
||||
" b:llama_overrides -> in file (!*) overrides
|
||||
" requires:
|
||||
"
|
||||
" - neovim or vim
|
||||
" - curl
|
||||
" - llama.cpp server instance
|
||||
" - FIM-compatible model
|
||||
"
|
||||
" sample config:
|
||||
"
|
||||
" - Tab - accept the current suggestion
|
||||
" - Shift+Tab - accept just the first line of the suggestion
|
||||
" - Ctrl+F - toggle FIM completion manually
|
||||
"
|
||||
" make symlink or copy this file to ~/.config/nvim/autoload/llama.vim
|
||||
"
|
||||
" start the llama.cpp server with a FIM-compatible model. for example:
|
||||
"
|
||||
" $ llama-server -m {model.gguf} --port 8012 -ngl 99 -fa -dt 0.1 --ubatch-size 512 --batch-size 1024 --cache-reuse 256
|
||||
"
|
||||
" --batch-size [512, model max context]
|
||||
"
|
||||
" adjust the batch size to control how much of the provided local context will be used during the inference
|
||||
" lower values will use smaller part of the context around the cursor, which will result in faster processing
|
||||
"
|
||||
" --ubatch-size [64, 2048]
|
||||
"
|
||||
" chunks the batch into smaller chunks for faster processing
|
||||
" depends on the specific hardware. use llama-bench to profile and determine the best size
|
||||
"
|
||||
" --cache-reuse (ge:llama_config.n_predict, 1024]
|
||||
"
|
||||
" this should be either 0 (disabled) or strictly larger than g:llama_config.n_predict
|
||||
" using non-zero value enables context reuse on the server side which dramatically improves the performance at
|
||||
" large contexts. a value of 256 should be good for all cases
|
||||
"
|
||||
" run this once to initialise llama.vim:
|
||||
"
|
||||
" :call llama#init()
|
||||
"
|
||||
" more info: https://github.com/ggerganov/llama.cpp/pull/9787
|
||||
"
|
||||
" Sublists (like logit_bias and stop) are overridden, not merged
|
||||
" Example override:
|
||||
" !*{"logit_bias": [[13, -5], [2, false]], "temperature": 1, "top_k": 5, "top_p": 0.5, "n_predict": 256, "repeat_last_n": 256, "repeat_penalty": 1.17647}
|
||||
if !exists("g:llama_api_url")
|
||||
let g:llama_api_url= "127.0.0.1:8080"
|
||||
endif
|
||||
if !exists("g:llama_overrides")
|
||||
let g:llama_overrides = {}
|
||||
endif
|
||||
const s:querydata = {"n_predict": 256, "stop": [ "\n" ], "stream": v:true }
|
||||
const s:curlcommand = ['curl','--data-raw', "{\"prompt\":\"### System:\"}", '--silent', '--no-buffer', '--request', 'POST', '--url', g:llama_api_url .. '/completion', '--header', "Content-Type: application/json"]
|
||||
let s:linedict = {}
|
||||
|
||||
func s:callbackHandler(bufn, channel, msg)
|
||||
if len(a:msg) < 3
|
||||
return
|
||||
elseif a:msg[0] == "d"
|
||||
let l:msg = a:msg[6:-1]
|
||||
else
|
||||
let l:msg = a:msg
|
||||
endif
|
||||
let l:decoded_msg = json_decode(l:msg)
|
||||
let l:newtext = split(l:decoded_msg['content'], "\n", 1)
|
||||
if len(l:newtext) > 0
|
||||
call setbufline(a:bufn, s:linedict[a:bufn], getbufline(a:bufn, s:linedict[a:bufn])[0] .. newtext[0])
|
||||
else
|
||||
echo "nothing genned"
|
||||
endif
|
||||
if len(newtext) > 1
|
||||
let l:failed = appendbufline(a:bufn, s:linedict[a:bufn], newtext[1:-1])
|
||||
let s:linedict[a:bufn] = s:linedict[a:bufn] + len(newtext)-1
|
||||
endif
|
||||
if has_key(l:decoded_msg, "stop") && l:decoded_msg.stop
|
||||
echo "Finished generation"
|
||||
endif
|
||||
endfunction
|
||||
" colors (adjust to your liking)
|
||||
highlight llama_hl_hint guifg=#ff772f ctermfg=202
|
||||
highlight llama_hl_info guifg=#77ff2f ctermfg=119
|
||||
|
||||
func llama#doLlamaGen()
|
||||
if exists("b:job")
|
||||
if job_status(b:job) == "run"
|
||||
call job_stop(b:job)
|
||||
return
|
||||
endif
|
||||
endif
|
||||
" general parameters:
|
||||
"
|
||||
" endpoint: llama.cpp server endpoint
|
||||
" n_prefix: number of lines before the cursor location to include in the local prefix
|
||||
" n_suffix: number of lines after the cursor location to include in the local suffix
|
||||
" n_predict: max number of tokens to predict
|
||||
" t_max_prompt_ms: max alloted time for the prompt processing (TODO: not yet supported)
|
||||
" t_max_predict_ms: max alloted time for the prediction
|
||||
" show_info: show extra info about the inference (0 - disabled, 1 - statusline, 2 - inline)
|
||||
" auto_fim: trigger FIM completion automatically on cursor movement
|
||||
" max_line_suffix: do not auto-trigger FIM completion if there are more than this number of characters to the right of the cursor
|
||||
"
|
||||
" ring buffer of chunks, accumulated with time upon:
|
||||
"
|
||||
" - completion request
|
||||
" - yank
|
||||
" - entering a buffer
|
||||
" - leaving a buffer
|
||||
" - writing a file
|
||||
"
|
||||
" parameters for the ring-buffer with extra context:
|
||||
"
|
||||
" ring_n_chunks: max number of chunks to pass as extra context to the server (0 to disable)
|
||||
" ring_chunk_size: max size of the chunks (in number of lines)
|
||||
" note: adjust these numbers so that you don't overrun your context
|
||||
" at ring_n_chunks = 64 and ring_chunk_size = 64 you need ~32k context
|
||||
" ring_scope: the range around the cursor position (in number of lines) for gathering chunks after FIM
|
||||
" ring_update_ms: how often to process queued chunks in normal mode
|
||||
"
|
||||
let s:default_config = {
|
||||
\ 'endpoint': 'http://127.0.0.1:8012/infill',
|
||||
\ 'n_prefix': 256,
|
||||
\ 'n_suffix': 64,
|
||||
\ 'n_predict': 128,
|
||||
\ 't_max_prompt_ms': 500,
|
||||
\ 't_max_predict_ms': 3000,
|
||||
\ 'show_info': 2,
|
||||
\ 'auto_fim': v:true,
|
||||
\ 'max_line_suffix': 8,
|
||||
\ 'ring_n_chunks': 64,
|
||||
\ 'ring_chunk_size': 64,
|
||||
\ 'ring_scope': 1024,
|
||||
\ 'ring_update_ms': 1000,
|
||||
\ }
|
||||
|
||||
let l:cbuffer = bufnr("%")
|
||||
let s:linedict[l:cbuffer] = line('$')
|
||||
let l:buflines = getbufline(l:cbuffer, 1, 1000)
|
||||
let l:querydata = copy(s:querydata)
|
||||
call extend(l:querydata, g:llama_overrides)
|
||||
if exists("w:llama_overrides")
|
||||
call extend(l:querydata, w:llama_overrides)
|
||||
endif
|
||||
if exists("b:llama_overrides")
|
||||
call extend(l:querydata, b:llama_overrides)
|
||||
endif
|
||||
if l:buflines[0][0:1] == '!*'
|
||||
let l:userdata = json_decode(l:buflines[0][2:-1])
|
||||
call extend(l:querydata, l:userdata)
|
||||
let l:buflines = l:buflines[1:-1]
|
||||
endif
|
||||
let l:querydata.prompt = join(l:buflines, "\n")
|
||||
let l:curlcommand = copy(s:curlcommand)
|
||||
if exists("g:llama_api_key")
|
||||
call extend(l:curlcommand, ['--header', 'Authorization: Bearer ' .. g:llama_api_key])
|
||||
endif
|
||||
let l:curlcommand[2] = json_encode(l:querydata)
|
||||
let b:job = job_start(l:curlcommand, {"callback": function("s:callbackHandler", [l:cbuffer])})
|
||||
endfunction
|
||||
let g:llama_config = get(g:, 'llama_config', s:default_config)
|
||||
|
||||
" Echos the tokkenization of the provided string , or cursor to end of word
|
||||
" Onus is placed on the user to include the preceding space
|
||||
func llama#tokenizeWord(...)
|
||||
if (a:0 > 0)
|
||||
let l:input = a:1
|
||||
else
|
||||
exe "normal \"*ye"
|
||||
let l:input = @*
|
||||
endif
|
||||
let l:querydata = {"content": l:input}
|
||||
let l:curlcommand = copy(s:curlcommand)
|
||||
let l:curlcommand[2] = json_encode(l:querydata)
|
||||
let l:curlcommand[8] = g:llama_api_url .. "/tokenize"
|
||||
let s:token_job = job_start(l:curlcommand, {"callback": function("s:tokenizeWordCallback", [l:input])})
|
||||
endfunction
|
||||
|
||||
func s:tokenizeWordCallback(plaintext, channel, msg)
|
||||
echo '"' .. a:plaintext ..'" - ' .. string(json_decode(a:msg).tokens)
|
||||
endfunction
|
||||
|
||||
|
||||
" Echos the token count of the entire buffer (or provided string)
|
||||
" Example usage :echo llama#tokenCount()
|
||||
func llama#tokenCount(...)
|
||||
if (a:0 > 0)
|
||||
let l:buflines = a:1
|
||||
else
|
||||
let l:buflines = getline(1,1000)
|
||||
if l:buflines[0][0:1] == '!*'
|
||||
let l:buflines = l:buflines[1:-1]
|
||||
function! s:get_indent(str)
|
||||
let l:count = 0
|
||||
for i in range(len(a:str))
|
||||
if a:str[i] == "\t"
|
||||
let l:count += &tabstop - 1
|
||||
else
|
||||
break
|
||||
endif
|
||||
let l:buflines = join(l:buflines, "\n")
|
||||
endif
|
||||
let l:querydata = {"content": l:buflines}
|
||||
let l:curlcommand = copy(s:curlcommand)
|
||||
let l:curlcommand[2] = json_encode(l:querydata)
|
||||
let l:curlcommand[8] = g:llama_api_url .. "/tokenize"
|
||||
let s:token_job = job_start(l:curlcommand, {"callback": "s:tokenCountCallback"})
|
||||
endfor
|
||||
return l:count
|
||||
endfunction
|
||||
|
||||
func s:tokenCountCallback(channel, msg)
|
||||
let resp = json_decode(a:msg)
|
||||
echo len(resp.tokens)
|
||||
function! s:rand(i0, i1) abort
|
||||
return a:i0 + rand() % (a:i1 - a:i0 + 1)
|
||||
endfunction
|
||||
|
||||
function! llama#init()
|
||||
if !executable('curl')
|
||||
echohl WarningMsg
|
||||
echo 'llama.vim requires the "curl" command to be available'
|
||||
echohl None
|
||||
return
|
||||
endif
|
||||
|
||||
let s:pos_x = 0 " cursor position upon start of completion
|
||||
let s:pos_y = 0
|
||||
|
||||
let s:line_cur = ''
|
||||
|
||||
let s:line_cur_prefix = ''
|
||||
let s:line_cur_suffix = ''
|
||||
|
||||
let s:ring_chunks = [] " current set of chunks used as extra context
|
||||
let s:ring_queued = [] " chunks that are queued to be sent for processing
|
||||
let s:ring_n_evict = 0
|
||||
|
||||
let s:hint_shown = v:false
|
||||
let s:pos_y_pick = -9999 " last y where we picked a chunk
|
||||
let s:pos_dx = 0
|
||||
let s:content = []
|
||||
let s:can_accept = v:false
|
||||
|
||||
let s:timer_fim = -1
|
||||
let s:t_fim_start = reltime() " used to measure total FIM time
|
||||
let s:t_last_move = reltime() " last time the cursor moved
|
||||
|
||||
let s:current_job = v:null
|
||||
|
||||
let s:ghost_text_nvim = exists('*nvim_buf_get_mark')
|
||||
let s:ghost_text_vim = has('textprop')
|
||||
|
||||
if s:ghost_text_vim
|
||||
let s:hlgroup_hint = 'llama_hl_hint'
|
||||
let s:hlgroup_info = 'llama_hl_info'
|
||||
|
||||
if empty(prop_type_get(s:hlgroup_hint))
|
||||
call prop_type_add(s:hlgroup_hint, {'highlight': s:hlgroup_hint})
|
||||
endif
|
||||
if empty(prop_type_get(s:hlgroup_info))
|
||||
call prop_type_add(s:hlgroup_info, {'highlight': s:hlgroup_info})
|
||||
endif
|
||||
endif
|
||||
|
||||
augroup llama
|
||||
autocmd!
|
||||
autocmd InsertEnter * inoremap <expr> <silent> <C-F> llama#fim_inline(v:false)
|
||||
autocmd InsertLeavePre * call llama#fim_cancel()
|
||||
|
||||
autocmd CursorMoved * call s:on_move()
|
||||
autocmd CursorMovedI * call s:on_move()
|
||||
autocmd CompleteChanged * call llama#fim_cancel()
|
||||
|
||||
if g:llama_config.auto_fim
|
||||
autocmd CursorMovedI * call llama#fim(v:true)
|
||||
endif
|
||||
|
||||
" gather chunks upon yanking
|
||||
autocmd TextYankPost * if v:event.operator ==# 'y' | call s:pick_chunk(v:event.regcontents, v:false, v:true) | endif
|
||||
|
||||
" gather chunks upon entering/leaving a buffer
|
||||
autocmd BufEnter * call timer_start(100, {-> s:pick_chunk(getline(max([1, line('.') - g:llama_config.ring_chunk_size/2]), min([line('.') + g:llama_config.ring_chunk_size/2, line('$')])), v:true, v:true)})
|
||||
autocmd BufLeave * call s:pick_chunk(getline(max([1, line('.') - g:llama_config.ring_chunk_size/2]), min([line('.') + g:llama_config.ring_chunk_size/2, line('$')])), v:true, v:true)
|
||||
|
||||
" gather chunk upon saving the file
|
||||
autocmd BufWritePost * call s:pick_chunk(getline(max([1, line('.') - g:llama_config.ring_chunk_size/2]), min([line('.') + g:llama_config.ring_chunk_size/2, line('$')])), v:true, v:true)
|
||||
augroup END
|
||||
|
||||
silent! call llama#fim_cancel()
|
||||
|
||||
" init background update of the ring buffer
|
||||
if g:llama_config.ring_n_chunks > 0
|
||||
call s:ring_update()
|
||||
endif
|
||||
endfunction
|
||||
|
||||
" compute how similar two chunks of text are
|
||||
" 0 - no similarity, 1 - high similarity
|
||||
" TODO: figure out something better
|
||||
function! s:chunk_sim(c0, c1)
|
||||
let l:lines0 = len(a:c0)
|
||||
let l:lines1 = len(a:c1)
|
||||
|
||||
let l:common = 0
|
||||
|
||||
for l:line0 in a:c0
|
||||
for l:line1 in a:c1
|
||||
if l:line0 == l:line1
|
||||
let l:common += 1
|
||||
break
|
||||
endif
|
||||
endfor
|
||||
endfor
|
||||
|
||||
return 2.0 * l:common / (l:lines0 + l:lines1)
|
||||
endfunction
|
||||
|
||||
" pick a random chunk of size g:llama_config.ring_chunk_size from the provided text and queue it for processing
|
||||
"
|
||||
" no_mod - do not pick chunks from buffers with pending changes
|
||||
" do_evict - evict chunks that are very similar to the new one
|
||||
"
|
||||
function! s:pick_chunk(text, no_mod, do_evict)
|
||||
" do not pick chunks from buffers with pending changes or buffers that are not files
|
||||
if a:no_mod && (getbufvar(bufnr('%'), '&modified') || !buflisted(bufnr('%')) || !filereadable(expand('%')))
|
||||
return
|
||||
endif
|
||||
|
||||
" if the extra context option is disabled - do nothing
|
||||
if g:llama_config.ring_n_chunks <= 0
|
||||
return
|
||||
endif
|
||||
|
||||
" don't pick very small chunks
|
||||
if len(a:text) < 3
|
||||
return
|
||||
endif
|
||||
|
||||
if len(a:text) + 1 < g:llama_config.ring_chunk_size
|
||||
let l:chunk = a:text
|
||||
else
|
||||
let l:l0 = s:rand(0, max([0, len(a:text) - g:llama_config.ring_chunk_size/2]))
|
||||
let l:l1 = min([l:l0 + g:llama_config.ring_chunk_size/2, len(a:text)])
|
||||
|
||||
let l:chunk = a:text[l:l0:l:l1]
|
||||
endif
|
||||
|
||||
let l:chunk_str = join(l:chunk, "\n") . "\n"
|
||||
|
||||
" check if this chunk is already added
|
||||
let l:exist = v:false
|
||||
|
||||
for i in range(len(s:ring_chunks))
|
||||
if s:ring_chunks[i].data == l:chunk
|
||||
let l:exist = v:true
|
||||
break
|
||||
endif
|
||||
endfor
|
||||
|
||||
for i in range(len(s:ring_queued))
|
||||
if s:ring_queued[i].data == l:chunk
|
||||
let l:exist = v:true
|
||||
break
|
||||
endif
|
||||
endfor
|
||||
|
||||
if l:exist
|
||||
return
|
||||
endif
|
||||
|
||||
" evict queued chunks that are very similar to the new one
|
||||
for i in range(len(s:ring_queued) - 1, 0, -1)
|
||||
if s:chunk_sim(s:ring_queued[i].data, l:chunk) > 0.9
|
||||
if a:do_evict
|
||||
call remove(s:ring_queued, i)
|
||||
let s:ring_n_evict += 1
|
||||
else
|
||||
return
|
||||
endif
|
||||
endif
|
||||
endfor
|
||||
|
||||
" also from s:ring_chunks
|
||||
for i in range(len(s:ring_chunks) - 1, 0, -1)
|
||||
if s:chunk_sim(s:ring_chunks[i].data, l:chunk) > 0.9
|
||||
if a:do_evict
|
||||
call remove(s:ring_chunks, i)
|
||||
let s:ring_n_evict += 1
|
||||
else
|
||||
return
|
||||
endif
|
||||
endif
|
||||
endfor
|
||||
|
||||
" TODO: become parameter ?
|
||||
if len(s:ring_queued) == 16
|
||||
call remove(s:ring_queued, 0)
|
||||
endif
|
||||
|
||||
call add(s:ring_queued, {'data': l:chunk, 'str': l:chunk_str, 'time': reltime(), 'filename': expand('%')})
|
||||
|
||||
"let &statusline = 'extra context: ' . len(s:ring_chunks) . ' / ' . len(s:ring_queued)
|
||||
endfunction
|
||||
|
||||
" picks a queued chunk, sends it for processing and adds it to s:ring_chunks
|
||||
" called every g:llama_config.ring_update_ms
|
||||
function! s:ring_update()
|
||||
call timer_start(g:llama_config.ring_update_ms, {-> s:ring_update()})
|
||||
|
||||
" update only if in normal mode or if the cursor hasn't moved for a while
|
||||
if mode() !=# 'n' && reltimefloat(reltime(s:t_last_move)) < 3.0
|
||||
return
|
||||
endif
|
||||
|
||||
if len(s:ring_queued) == 0
|
||||
return
|
||||
endif
|
||||
|
||||
" move the first queued chunk to the ring buffer
|
||||
if len(s:ring_chunks) == g:llama_config.ring_n_chunks
|
||||
call remove(s:ring_chunks, 0)
|
||||
endif
|
||||
|
||||
call add(s:ring_chunks, remove(s:ring_queued, 0))
|
||||
|
||||
"let &statusline = 'updated context: ' . len(s:ring_chunks) . ' / ' . len(s:ring_queued)
|
||||
|
||||
" send asynchronous job with the new extra context so that it is ready for the next FIM
|
||||
let l:extra_context = []
|
||||
for l:chunk in s:ring_chunks
|
||||
call add(l:extra_context, {
|
||||
\ 'text': l:chunk.str,
|
||||
\ 'time': l:chunk.time,
|
||||
\ 'filename': l:chunk.filename
|
||||
\ })
|
||||
endfor
|
||||
|
||||
" no samplers needed here
|
||||
let l:request = json_encode({
|
||||
\ 'input_prefix': "",
|
||||
\ 'input_suffix': "",
|
||||
\ 'input_extra': l:extra_context,
|
||||
\ 'prompt': "",
|
||||
\ 'n_predict': 1,
|
||||
\ 'temperature': 0.0,
|
||||
\ 'stream': v:false,
|
||||
\ 'samplers': ["temperature"],
|
||||
\ 'cache_prompt': v:true,
|
||||
\ 't_max_prompt_ms': 1,
|
||||
\ 't_max_predict_ms': 1
|
||||
\ })
|
||||
|
||||
let l:curl_command = [
|
||||
\ "curl",
|
||||
\ "--silent",
|
||||
\ "--no-buffer",
|
||||
\ "--request", "POST",
|
||||
\ "--url", g:llama_config.endpoint,
|
||||
\ "--header", "Content-Type: application/json",
|
||||
\ "--data", l:request
|
||||
\ ]
|
||||
|
||||
" no callbacks because we don't need to process the response
|
||||
if s:ghost_text_nvim
|
||||
call jobstart(l:curl_command, {})
|
||||
elseif s:ghost_text_vim
|
||||
call job_start(l:curl_command, {})
|
||||
endif
|
||||
endfunction
|
||||
|
||||
" necessary for 'inoremap <expr>'
|
||||
function! llama#fim_inline(is_auto) abort
|
||||
call llama#fim(a:is_auto)
|
||||
return ''
|
||||
endfunction
|
||||
|
||||
" the main FIM call
|
||||
" takes local context around the cursor and sends it together with the extra context to the server for completion
|
||||
function! llama#fim(is_auto) abort
|
||||
" we already have a suggestion for the current cursor position
|
||||
if s:hint_shown && !a:is_auto
|
||||
call llama#fim_cancel()
|
||||
return
|
||||
endif
|
||||
|
||||
call llama#fim_cancel()
|
||||
|
||||
" avoid sending repeated requests too fast
|
||||
if reltimefloat(reltime(s:t_fim_start)) < 0.6
|
||||
if s:timer_fim != -1
|
||||
call timer_stop(s:timer_fim)
|
||||
let s:timer_fim = -1
|
||||
endif
|
||||
|
||||
let s:t_fim_start = reltime()
|
||||
let s:timer_fim = timer_start(600, {-> llama#fim(v:true)})
|
||||
return
|
||||
endif
|
||||
|
||||
let s:t_fim_start = reltime()
|
||||
|
||||
let s:content = []
|
||||
let s:can_accept = v:false
|
||||
|
||||
let s:pos_x = col('.') - 1
|
||||
let s:pos_y = line('.')
|
||||
let l:max_y = line('$')
|
||||
|
||||
let l:lines_prefix = getline(max([1, s:pos_y - g:llama_config.n_prefix]), s:pos_y - 1)
|
||||
let l:lines_suffix = getline(s:pos_y + 1, min([l:max_y, s:pos_y + g:llama_config.n_suffix]))
|
||||
|
||||
let s:line_cur = getline('.')
|
||||
|
||||
let s:line_cur_prefix = strpart(s:line_cur, 0, s:pos_x)
|
||||
let s:line_cur_suffix = strpart(s:line_cur, s:pos_x)
|
||||
|
||||
if a:is_auto && len(s:line_cur_suffix) > g:llama_config.max_line_suffix
|
||||
return
|
||||
endif
|
||||
|
||||
let l:prefix = ""
|
||||
\ . join(l:lines_prefix, "\n")
|
||||
\ . "\n"
|
||||
|
||||
let l:prompt = ""
|
||||
\ . s:line_cur_prefix
|
||||
|
||||
let l:suffix = ""
|
||||
\ . s:line_cur_suffix
|
||||
\ . "\n"
|
||||
\ . join(l:lines_suffix, "\n")
|
||||
\ . "\n"
|
||||
|
||||
" prepare the extra context data
|
||||
let l:extra_context = []
|
||||
for l:chunk in s:ring_chunks
|
||||
call add(l:extra_context, {
|
||||
\ 'text': l:chunk.str,
|
||||
\ 'time': l:chunk.time,
|
||||
\ 'filename': l:chunk.filename
|
||||
\ })
|
||||
endfor
|
||||
|
||||
" the indentation of the current line
|
||||
let l:indent = strlen(matchstr(s:line_cur_prefix, '^\s*'))
|
||||
|
||||
let l:request = json_encode({
|
||||
\ 'input_prefix': l:prefix,
|
||||
\ 'input_suffix': l:suffix,
|
||||
\ 'input_extra': l:extra_context,
|
||||
\ 'prompt': l:prompt,
|
||||
\ 'n_predict': g:llama_config.n_predict,
|
||||
\ 'n_indent': l:indent,
|
||||
\ 'top_k': 40,
|
||||
\ 'top_p': 0.99,
|
||||
\ 'stream': v:false,
|
||||
\ 'samplers': ["top_k", "top_p", "infill"],
|
||||
\ 'cache_prompt': v:true,
|
||||
\ 't_max_prompt_ms': g:llama_config.t_max_prompt_ms,
|
||||
\ 't_max_predict_ms': g:llama_config.t_max_predict_ms
|
||||
\ })
|
||||
|
||||
let l:curl_command = [
|
||||
\ "curl",
|
||||
\ "--silent",
|
||||
\ "--no-buffer",
|
||||
\ "--request", "POST",
|
||||
\ "--url", g:llama_config.endpoint,
|
||||
\ "--header", "Content-Type: application/json",
|
||||
\ "--data", l:request
|
||||
\ ]
|
||||
|
||||
if s:current_job != v:null
|
||||
if s:ghost_text_nvim
|
||||
call jobstop(s:current_job)
|
||||
elseif s:ghost_text_vim
|
||||
call job_stop(s:current_job)
|
||||
endif
|
||||
endif
|
||||
|
||||
" send the request asynchronously
|
||||
if s:ghost_text_nvim
|
||||
let s:current_job = jobstart(l:curl_command, {
|
||||
\ 'on_stdout': function('s:fim_on_stdout', [s:pos_x, s:pos_y, a:is_auto]),
|
||||
\ 'on_exit': function('s:fim_on_exit'),
|
||||
\ 'stdout_buffered': v:true
|
||||
\ })
|
||||
elseif s:ghost_text_vim
|
||||
let s:current_job = job_start(l:curl_command, {
|
||||
\ 'out_cb': function('s:fim_on_stdout', [s:pos_x, s:pos_y, a:is_auto]),
|
||||
\ 'exit_cb': function('s:fim_on_exit')
|
||||
\ })
|
||||
endif
|
||||
|
||||
" TODO: per-file location
|
||||
let l:delta_y = abs(s:pos_y - s:pos_y_pick)
|
||||
|
||||
" gather some extra context nearby and process it in the background
|
||||
" only gather chunks if the cursor has moved a lot
|
||||
" TODO: something more clever? reranking?
|
||||
if a:is_auto && l:delta_y > 32
|
||||
" expand the prefix even further
|
||||
call s:pick_chunk(getline(max([1, s:pos_y - g:llama_config.ring_scope]), max([1, s:pos_y - g:llama_config.n_prefix])), v:false, v:false)
|
||||
|
||||
" pick a suffix chunk
|
||||
call s:pick_chunk(getline(min([l:max_y, s:pos_y + g:llama_config.n_suffix]), min([l:max_y, s:pos_y + g:llama_config.n_suffix + g:llama_config.ring_chunk_size])), v:false, v:false)
|
||||
|
||||
let s:pos_y_pick = s:pos_y
|
||||
endif
|
||||
endfunction
|
||||
|
||||
" if first_line == v:true accept only the first line of the response
|
||||
function! llama#fim_accept(first_line)
|
||||
" insert the suggestion at the cursor location
|
||||
if s:can_accept && len(s:content) > 0
|
||||
call setline(s:pos_y, s:line_cur[:(s:pos_x - 1)] . s:content[0])
|
||||
if len(s:content) > 1
|
||||
if !a:first_line
|
||||
call append(s:pos_y, s:content[1:-1])
|
||||
endif
|
||||
endif
|
||||
|
||||
" move the cursor to the end of the accepted text
|
||||
if !a:first_line && len(s:content) > 1
|
||||
call cursor(s:pos_y + len(s:content) - 1, s:pos_x + s:pos_dx + 1)
|
||||
else
|
||||
call cursor(s:pos_y, s:pos_x + len(s:content[0]))
|
||||
endif
|
||||
endif
|
||||
|
||||
call llama#fim_cancel()
|
||||
endfunction
|
||||
|
||||
function! llama#fim_cancel()
|
||||
let s:hint_shown = v:false
|
||||
|
||||
" clear the virtual text
|
||||
let l:bufnr = bufnr('%')
|
||||
|
||||
if s:ghost_text_nvim
|
||||
let l:id_vt_fim = nvim_create_namespace('vt_fim')
|
||||
call nvim_buf_clear_namespace(l:bufnr, l:id_vt_fim, 0, -1)
|
||||
elseif s:ghost_text_vim
|
||||
call prop_remove({'type': s:hlgroup_hint, 'all': v:true})
|
||||
call prop_remove({'type': s:hlgroup_info, 'all': v:true})
|
||||
endif
|
||||
|
||||
" remove the mappings
|
||||
silent! iunmap <buffer> <Tab>
|
||||
silent! iunmap <buffer> <S-Tab>
|
||||
silent! iunmap <buffer> <Esc>
|
||||
endfunction
|
||||
|
||||
function! s:on_move()
|
||||
let s:t_last_move = reltime()
|
||||
|
||||
call llama#fim_cancel()
|
||||
endfunction
|
||||
|
||||
" callback that processes the FIM result from the server and displays the suggestion
|
||||
function! s:fim_on_stdout(pos_x, pos_y, is_auto, job_id, data, event = v:null)
|
||||
if s:ghost_text_nvim
|
||||
let l:raw = join(a:data, "\n")
|
||||
elseif s:ghost_text_vim
|
||||
let l:raw = a:data
|
||||
endif
|
||||
|
||||
if len(l:raw) == 0
|
||||
return
|
||||
endif
|
||||
|
||||
if a:pos_x != col('.') - 1 || a:pos_y != line('.')
|
||||
return
|
||||
endif
|
||||
|
||||
" show the suggestion only in insert mode
|
||||
if mode() !=# 'i'
|
||||
return
|
||||
endif
|
||||
|
||||
let s:pos_x = a:pos_x
|
||||
let s:pos_y = a:pos_y
|
||||
|
||||
let s:can_accept = v:true
|
||||
let l:has_info = v:false
|
||||
|
||||
if s:can_accept && v:shell_error
|
||||
if !a:is_auto
|
||||
call add(s:content, "<| curl error: is the server on? |>")
|
||||
endif
|
||||
let s:can_accept = v:false
|
||||
endif
|
||||
|
||||
let l:n_prompt = 0
|
||||
let l:t_prompt_ms = 1.0
|
||||
let l:s_prompt = 0
|
||||
|
||||
let l:n_predict = 0
|
||||
let l:t_predict_ms = 1.0
|
||||
let l:s_predict = 0
|
||||
|
||||
" get the generated suggestion
|
||||
if s:can_accept
|
||||
let l:response = json_decode(l:raw)
|
||||
|
||||
for l:part in split(get(l:response, 'content', ''), "\n", 1)
|
||||
call add(s:content, l:part)
|
||||
endfor
|
||||
|
||||
" remove trailing new lines
|
||||
while len(s:content) > 0 && s:content[-1] == ""
|
||||
call remove(s:content, -1)
|
||||
endwhile
|
||||
|
||||
let l:generation_settings = get(l:response, 'generation_settings', {})
|
||||
let l:n_ctx = get(l:generation_settings, 'n_ctx', 0)
|
||||
|
||||
let l:n_cached = get(l:response, 'tokens_cached', 0)
|
||||
let l:truncated = get(l:response, 'truncated', v:false)
|
||||
|
||||
" if response.timings is available
|
||||
if len(get(l:response, 'timings', {})) > 0
|
||||
let l:has_info = v:true
|
||||
let l:timings = get(l:response, 'timings', {})
|
||||
|
||||
let l:n_prompt = get(l:timings, 'prompt_n', 0)
|
||||
let l:t_prompt_ms = get(l:timings, 'prompt_ms', 1)
|
||||
let l:s_prompt = get(l:timings, 'prompt_per_second', 0)
|
||||
|
||||
let l:n_predict = get(l:timings, 'predicted_n', 0)
|
||||
let l:t_predict_ms = get(l:timings, 'predicted_ms', 1)
|
||||
let l:s_predict = get(l:timings, 'predicted_per_second', 0)
|
||||
endif
|
||||
endif
|
||||
|
||||
if len(s:content) == 0
|
||||
call add(s:content, "")
|
||||
let s:can_accept = v:false
|
||||
endif
|
||||
|
||||
if len(s:content) == 0
|
||||
return
|
||||
endif
|
||||
|
||||
" NOTE: the following is logic for discarding predictions that repeat existing text
|
||||
" the code is quite ugly and there is very likely a simpler and more canonical way to implement this
|
||||
"
|
||||
" still, I wonder if there is some better way that avoids having to do these special hacks?
|
||||
" on one hand, the LLM 'sees' the contents of the file before we start editing, so it is normal that it would
|
||||
" start generating whatever we have given it via the extra context. but on the other hand, it's not very
|
||||
" helpful to re-generate the same code that is already there
|
||||
|
||||
" truncate the suggestion if the first line is empty
|
||||
if len(s:content) == 1 && s:content[0] == ""
|
||||
let s:content = [""]
|
||||
endif
|
||||
|
||||
" ... and the next lines are repeated
|
||||
if len(s:content) > 1 && s:content[0] == "" && s:content[1:] == getline(s:pos_y + 1, s:pos_y + len(s:content) - 1)
|
||||
let s:content = [""]
|
||||
endif
|
||||
|
||||
" truncate the suggestion if it repeats the suffix
|
||||
if len(s:content) == 1 && s:content[0] == s:line_cur_suffix
|
||||
let s:content = [""]
|
||||
endif
|
||||
|
||||
" find the first non-empty line (strip whitespace)
|
||||
let l:cmp_y = s:pos_y + 1
|
||||
while l:cmp_y < line('$') && getline(l:cmp_y) =~? '^\s*$'
|
||||
let l:cmp_y += 1
|
||||
endwhile
|
||||
|
||||
if (s:line_cur_prefix . s:content[0]) == getline(l:cmp_y)
|
||||
" truncate the suggestion if it repeats the next line
|
||||
if len(s:content) == 1
|
||||
let s:content = [""]
|
||||
endif
|
||||
|
||||
" ... or if the second line of the suggestion is the prefix of line l:cmp_y + 1
|
||||
if len(s:content) == 2 && s:content[-1] == getline(l:cmp_y + 1)[:len(s:content[-1]) - 1]
|
||||
let s:content = [""]
|
||||
endif
|
||||
|
||||
" ... or if the middle chunk of lines of the suggestion is the same as [l:cmp_y + 1, l:cmp_y + len(s:content) - 1)
|
||||
if len(s:content) > 2 && join(s:content[1:-1], "\n") == join(getline(l:cmp_y + 1, l:cmp_y + len(s:content) - 1), "\n")
|
||||
let s:content = [""]
|
||||
endif
|
||||
endif
|
||||
|
||||
" keep only lines that have the same or larger whitespace prefix as s:line_cur_prefix
|
||||
"let l:indent = strlen(matchstr(s:line_cur_prefix, '^\s*'))
|
||||
"for i in range(1, len(s:content) - 1)
|
||||
" if strlen(matchstr(s:content[i], '^\s*')) < l:indent
|
||||
" let s:content = s:content[:i - 1]
|
||||
" break
|
||||
" endif
|
||||
"endfor
|
||||
|
||||
let s:pos_dx = len(s:content[-1])
|
||||
|
||||
let s:content[-1] .= s:line_cur_suffix
|
||||
|
||||
call llama#fim_cancel()
|
||||
|
||||
" display virtual text with the suggestion
|
||||
let l:bufnr = bufnr('%')
|
||||
|
||||
if s:ghost_text_nvim
|
||||
let l:id_vt_fim = nvim_create_namespace('vt_fim')
|
||||
endif
|
||||
|
||||
" construct the info message
|
||||
if g:llama_config.show_info > 0 && l:has_info
|
||||
let l:prefix = ' '
|
||||
|
||||
if l:truncated
|
||||
let l:info = printf("%s | WARNING: the context is full: %d / %d, increase the server context size or reduce g:llama_config.ring_n_chunks",
|
||||
\ g:llama_config.show_info == 2 ? l:prefix : 'llama.vim',
|
||||
\ l:n_cached, l:n_ctx
|
||||
\ )
|
||||
else
|
||||
let l:info = printf("%s | c: %d / %d, r: %d / %d, e: %d, q: %d / 16 | p: %d (%.2f ms, %.2f t/s) | g: %d (%.2f ms, %.2f t/s) | t: %.2f ms",
|
||||
\ g:llama_config.show_info == 2 ? l:prefix : 'llama.vim',
|
||||
\ l:n_cached, l:n_ctx, len(s:ring_chunks), g:llama_config.ring_n_chunks, s:ring_n_evict, len(s:ring_queued),
|
||||
\ l:n_prompt, l:t_prompt_ms, l:s_prompt,
|
||||
\ l:n_predict, l:t_predict_ms, l:s_predict,
|
||||
\ 1000.0 * reltimefloat(reltime(s:t_fim_start))
|
||||
\ )
|
||||
endif
|
||||
|
||||
if g:llama_config.show_info == 1
|
||||
" display the info in the statusline
|
||||
let &statusline = l:info
|
||||
let l:info = ''
|
||||
endif
|
||||
endif
|
||||
|
||||
" display the suggestion and append the info to the end of the first line
|
||||
if s:ghost_text_nvim
|
||||
call nvim_buf_set_extmark(l:bufnr, l:id_vt_fim, s:pos_y - 1, s:pos_x - 1, {
|
||||
\ 'virt_text': [[s:content[0], 'llama_hl_hint'], [l:info, 'llama_hl_info']],
|
||||
\ 'virt_text_win_col': virtcol('.') - 1
|
||||
\ })
|
||||
|
||||
call nvim_buf_set_extmark(l:bufnr, l:id_vt_fim, s:pos_y - 1, 0, {
|
||||
\ 'virt_lines': map(s:content[1:], {idx, val -> [[val, 'llama_hl_hint']]}),
|
||||
\ 'virt_text_win_col': virtcol('.')
|
||||
\ })
|
||||
elseif s:ghost_text_vim
|
||||
let l:new_suffix = s:content[0]
|
||||
if !empty(l:new_suffix)
|
||||
call prop_add(s:pos_y, s:pos_x + 1, {
|
||||
\ 'type': s:hlgroup_hint,
|
||||
\ 'text': l:new_suffix
|
||||
\ })
|
||||
endif
|
||||
for line in s:content[1:]
|
||||
call prop_add(s:pos_y, 0, {
|
||||
\ 'type': s:hlgroup_hint,
|
||||
\ 'text': line,
|
||||
\ 'text_padding_left': s:get_indent(line),
|
||||
\ 'text_align': 'below'
|
||||
\ })
|
||||
endfor
|
||||
if !empty(l:info)
|
||||
call prop_add(s:pos_y, 0, {
|
||||
\ 'type': s:hlgroup_info,
|
||||
\ 'text': l:info,
|
||||
\ 'text_padding_left': col('$'),
|
||||
\ 'text_wrap': 'truncate'
|
||||
\ })
|
||||
endif
|
||||
endif
|
||||
|
||||
" setup accept shortcuts
|
||||
inoremap <buffer> <Tab> <C-O>:call llama#fim_accept(v:false)<CR>
|
||||
inoremap <buffer> <S-Tab> <C-O>:call llama#fim_accept(v:true)<CR>
|
||||
|
||||
let s:hint_shown = v:true
|
||||
endfunction
|
||||
|
||||
function! s:fim_on_exit(job_id, exit_code, event = v:null)
|
||||
if a:exit_code != 0
|
||||
echom "Job failed with exit code: " . a:exit_code
|
||||
endif
|
||||
|
||||
let s:current_job = v:null
|
||||
endfunction
|
||||
|
||||
@@ -20,7 +20,7 @@ static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_toke
|
||||
if (n_eval > n_batch) {
|
||||
n_eval = n_batch;
|
||||
}
|
||||
if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) {
|
||||
if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval))) {
|
||||
LOG_ERR("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
|
||||
return false;
|
||||
}
|
||||
@@ -37,21 +37,21 @@ static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
|
||||
|
||||
static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
|
||||
std::string str2 = str;
|
||||
std::vector<llama_token> embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos, true);
|
||||
std::vector<llama_token> embd_inp = common_tokenize(ctx_llama, str2, add_bos, true);
|
||||
eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
|
||||
return true;
|
||||
}
|
||||
|
||||
static const char * sample(struct gpt_sampler * smpl,
|
||||
static const char * sample(struct common_sampler * smpl,
|
||||
struct llama_context * ctx_llama,
|
||||
int * n_past) {
|
||||
const llama_token id = gpt_sampler_sample(smpl, ctx_llama, -1);
|
||||
gpt_sampler_accept(smpl, id, true);
|
||||
const llama_token id = common_sampler_sample(smpl, ctx_llama, -1);
|
||||
common_sampler_accept(smpl, id, true);
|
||||
static std::string ret;
|
||||
if (llama_token_is_eog(llama_get_model(ctx_llama), id)) {
|
||||
ret = "</s>";
|
||||
} else {
|
||||
ret = llama_token_to_piece(ctx_llama, id);
|
||||
ret = common_token_to_piece(ctx_llama, id);
|
||||
}
|
||||
eval_id(ctx_llama, id, n_past);
|
||||
return ret.c_str();
|
||||
@@ -120,7 +120,7 @@ static void print_usage(int, char ** argv) {
|
||||
LOG("\n note: a lower temperature value like 0.1 is recommended for better quality.\n");
|
||||
}
|
||||
|
||||
static struct llava_image_embed * load_image(llava_context * ctx_llava, gpt_params * params, const std::string & fname) {
|
||||
static struct llava_image_embed * load_image(llava_context * ctx_llava, common_params * params, const std::string & fname) {
|
||||
|
||||
// load and preprocess the image
|
||||
llava_image_embed * embed = NULL;
|
||||
@@ -146,7 +146,7 @@ static struct llava_image_embed * load_image(llava_context * ctx_llava, gpt_para
|
||||
return embed;
|
||||
}
|
||||
|
||||
static void process_prompt(struct llava_context * ctx_llava, struct llava_image_embed * image_embed, gpt_params * params, const std::string & prompt) {
|
||||
static void process_prompt(struct llava_context * ctx_llava, struct llava_image_embed * image_embed, common_params * params, const std::string & prompt) {
|
||||
int n_past = 0;
|
||||
|
||||
const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict;
|
||||
@@ -159,16 +159,16 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
|
||||
user_prompt = prompt.substr(image_pos + std::string("<image>").length());
|
||||
LOG_INF("system_prompt: %s\n", system_prompt.c_str());
|
||||
if (params->verbose_prompt) {
|
||||
auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, system_prompt, true, true);
|
||||
auto tmp = common_tokenize(ctx_llava->ctx_llama, system_prompt, true, true);
|
||||
for (int i = 0; i < (int) tmp.size(); i++) {
|
||||
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
|
||||
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
|
||||
}
|
||||
}
|
||||
LOG_INF("user_prompt: %s\n", user_prompt.c_str());
|
||||
if (params->verbose_prompt) {
|
||||
auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
|
||||
auto tmp = common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
|
||||
for (int i = 0; i < (int) tmp.size(); i++) {
|
||||
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
|
||||
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -176,9 +176,9 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
|
||||
system_prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER:";
|
||||
user_prompt = prompt + "\nASSISTANT:";
|
||||
if (params->verbose_prompt) {
|
||||
auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
|
||||
auto tmp = common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
|
||||
for (int i = 0; i < (int) tmp.size(); i++) {
|
||||
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
|
||||
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -191,7 +191,7 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
|
||||
|
||||
LOG("\n");
|
||||
|
||||
struct gpt_sampler * smpl = gpt_sampler_init(ctx_llava->model, params->sparams);
|
||||
struct common_sampler * smpl = common_sampler_init(ctx_llava->model, params->sparams);
|
||||
if (!smpl) {
|
||||
LOG_ERR("%s: failed to initialize sampling subsystem\n", __func__);
|
||||
exit(1);
|
||||
@@ -211,15 +211,15 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
gpt_sampler_free(smpl);
|
||||
common_sampler_free(smpl);
|
||||
LOG("\n");
|
||||
}
|
||||
|
||||
static struct llama_model * llava_init(gpt_params * params) {
|
||||
static struct llama_model * llava_init(common_params * params) {
|
||||
llama_backend_init();
|
||||
llama_numa_init(params->numa);
|
||||
|
||||
llama_model_params model_params = llama_model_params_from_gpt_params(*params);
|
||||
llama_model_params model_params = common_model_params_to_llama(*params);
|
||||
|
||||
llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
|
||||
if (model == NULL) {
|
||||
@@ -229,7 +229,7 @@ static struct llama_model * llava_init(gpt_params * params) {
|
||||
return model;
|
||||
}
|
||||
|
||||
static struct llava_context * llava_init_context(gpt_params * params, llama_model * model) {
|
||||
static struct llava_context * llava_init_context(common_params * params, llama_model * model) {
|
||||
const char * clip_path = params->mmproj.c_str();
|
||||
|
||||
auto prompt = params->prompt;
|
||||
@@ -240,7 +240,7 @@ static struct llava_context * llava_init_context(gpt_params * params, llama_mode
|
||||
auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1);
|
||||
|
||||
|
||||
llama_context_params ctx_params = llama_context_params_from_gpt_params(*params);
|
||||
llama_context_params ctx_params = common_context_params_to_llama(*params);
|
||||
ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings
|
||||
|
||||
llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params);
|
||||
@@ -272,13 +272,13 @@ static void llava_free(struct llava_context * ctx_llava) {
|
||||
int main(int argc, char ** argv) {
|
||||
ggml_time_init();
|
||||
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, print_usage)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, print_usage)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) {
|
||||
print_usage(argc, argv);
|
||||
|
||||
@@ -401,6 +401,39 @@ bool llava_image_embed_make_with_clip_img(clip_ctx * ctx_clip, int n_threads, co
|
||||
return true;
|
||||
}
|
||||
|
||||
struct llava_embd_batch {
|
||||
std::vector<llama_pos> pos;
|
||||
std::vector<int32_t> n_seq_id;
|
||||
std::vector<llama_seq_id> seq_id_0;
|
||||
std::vector<llama_seq_id *> seq_ids;
|
||||
std::vector<int8_t> logits;
|
||||
llama_batch batch;
|
||||
llava_embd_batch(float * embd, int32_t n_tokens, llama_pos pos_0, llama_seq_id seq_id) {
|
||||
pos .resize(n_tokens);
|
||||
n_seq_id.resize(n_tokens);
|
||||
seq_ids .resize(n_tokens + 1);
|
||||
logits .resize(n_tokens);
|
||||
seq_id_0.resize(1);
|
||||
seq_id_0[0] = seq_id;
|
||||
seq_ids [n_tokens] = nullptr;
|
||||
batch = {
|
||||
/*n_tokens =*/ n_tokens,
|
||||
/*tokens =*/ nullptr,
|
||||
/*embd =*/ embd,
|
||||
/*pos =*/ pos.data(),
|
||||
/*n_seq_id =*/ n_seq_id.data(),
|
||||
/*seq_id =*/ seq_ids.data(),
|
||||
/*logits =*/ logits.data(),
|
||||
};
|
||||
for (int i = 0; i < n_tokens; i++) {
|
||||
batch.pos [i] = pos_0 + i;
|
||||
batch.n_seq_id[i] = 1;
|
||||
batch.seq_id [i] = seq_id_0.data();
|
||||
batch.logits [i] = false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_embed * image_embed, int n_batch, int * n_past) {
|
||||
int n_embd = llama_n_embd(llama_get_model(ctx_llama));
|
||||
|
||||
@@ -409,8 +442,9 @@ bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_
|
||||
if (n_eval > n_batch) {
|
||||
n_eval = n_batch;
|
||||
}
|
||||
llama_batch batch = {int32_t(n_eval), nullptr, (image_embed->embed+i*n_embd), nullptr, nullptr, nullptr, nullptr, *n_past, 1, 0, };
|
||||
if (llama_decode(ctx_llama, batch)) {
|
||||
float * embd = image_embed->embed+i*n_embd;
|
||||
llava_embd_batch llava_batch = llava_embd_batch(embd, n_eval, *n_past, 0);
|
||||
if (llama_decode(ctx_llama, llava_batch.batch)) {
|
||||
LOG_ERR("%s : failed to eval\n", __func__);
|
||||
return false;
|
||||
}
|
||||
@@ -432,7 +466,7 @@ struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * c
|
||||
bool image_embed_result = llava_image_embed_make_with_clip_img(ctx_clip, n_threads, img, &image_embed, &n_image_pos);
|
||||
if (!image_embed_result) {
|
||||
clip_image_u8_free(img);
|
||||
LOG_ERR("%s: coulnd't embed the image\n", __func__);
|
||||
LOG_ERR("%s: couldn't embed the image\n", __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -25,11 +25,11 @@ static void show_additional_info(int /*argc*/, char ** argv) {
|
||||
LOG("\nnote: a lower temperature value like 0.1 is recommended for better quality.\n");
|
||||
}
|
||||
|
||||
static struct llama_model * llava_init(gpt_params * params) {
|
||||
static struct llama_model * llava_init(common_params * params) {
|
||||
llama_backend_init();
|
||||
llama_numa_init(params->numa);
|
||||
|
||||
llama_model_params model_params = llama_model_params_from_gpt_params(*params);
|
||||
llama_model_params model_params = common_model_params_to_llama(*params);
|
||||
|
||||
llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
|
||||
if (model == NULL) {
|
||||
@@ -39,13 +39,13 @@ static struct llama_model * llava_init(gpt_params * params) {
|
||||
return model;
|
||||
}
|
||||
|
||||
static struct llava_context * llava_init_context(gpt_params * params, llama_model * model) {
|
||||
static struct llava_context * llava_init_context(common_params * params, llama_model * model) {
|
||||
auto prompt = params->prompt;
|
||||
if (prompt.empty()) {
|
||||
prompt = "describe the image in detail.";
|
||||
}
|
||||
|
||||
llama_context_params ctx_params = llama_context_params_from_gpt_params(*params);
|
||||
llama_context_params ctx_params = common_context_params_to_llama(*params);
|
||||
if (params->n_ctx < 2048) {
|
||||
// warn user here, "Image processing requires at least 2048 context, setting context to 2048"
|
||||
LOG_WRN("%s: Image processing requires at least 2048 context, setting context to 2048\n" , __func__);
|
||||
@@ -79,7 +79,7 @@ static void llava_free(struct llava_context * ctx_llava) {
|
||||
llama_backend_free();
|
||||
}
|
||||
|
||||
static struct clip_ctx * clip_init_context(gpt_params * params) {
|
||||
static struct clip_ctx * clip_init_context(common_params * params) {
|
||||
const char * clip_path = params->mmproj.c_str();
|
||||
|
||||
auto prompt = params->prompt;
|
||||
@@ -97,7 +97,7 @@ static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_toke
|
||||
if (n_eval > n_batch) {
|
||||
n_eval = n_batch;
|
||||
}
|
||||
if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) {
|
||||
if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval))) {
|
||||
LOG_ERR("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
|
||||
return false;
|
||||
}
|
||||
@@ -114,7 +114,7 @@ static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
|
||||
|
||||
static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
|
||||
std::string str2 = str;
|
||||
std::vector<llama_token> embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos, true);
|
||||
std::vector<llama_token> embd_inp = common_tokenize(ctx_llama, str2, add_bos, true);
|
||||
return eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
|
||||
}
|
||||
|
||||
@@ -129,7 +129,7 @@ static void process_eval_image_embed(struct llava_context * ctx_llava, const str
|
||||
llava_image_embed_free(slice_embed);
|
||||
}
|
||||
|
||||
static void process_image(struct llava_context * ctx_llava, struct llava_image_embed * embeds, gpt_params * params, int &n_past) {
|
||||
static void process_image(struct llava_context * ctx_llava, struct llava_image_embed * embeds, common_params * params, int &n_past) {
|
||||
std::string system_prompt;
|
||||
int idx = 0;
|
||||
int num_image_embeds = embeds->n_image_pos / clip_n_patches(ctx_llava->ctx_clip);
|
||||
@@ -162,22 +162,22 @@ static void process_image(struct llava_context * ctx_llava, struct llava_image_e
|
||||
LOG_INF("%s: image token past: %d\n", __func__, n_past);
|
||||
}
|
||||
|
||||
static const char * sample(struct gpt_sampler * smpl,
|
||||
static const char * sample(struct common_sampler * smpl,
|
||||
struct llama_context * ctx_llama,
|
||||
int * n_past) {
|
||||
const llama_token id = gpt_sampler_sample(smpl, ctx_llama, -1);
|
||||
gpt_sampler_accept(smpl, id, true);
|
||||
const llama_token id = common_sampler_sample(smpl, ctx_llama, -1);
|
||||
common_sampler_accept(smpl, id, true);
|
||||
static std::string ret;
|
||||
if (llama_token_is_eog(llama_get_model(ctx_llama), id)) {
|
||||
ret = "</s>";
|
||||
} else {
|
||||
ret = llama_token_to_piece(ctx_llama, id);
|
||||
ret = common_token_to_piece(ctx_llama, id);
|
||||
}
|
||||
eval_id(ctx_llama, id, n_past);
|
||||
return ret.c_str();
|
||||
}
|
||||
|
||||
static struct llava_context * minicpmv_init(gpt_params * params, const std::string & fname, int &n_past){
|
||||
static struct llava_context * minicpmv_init(common_params * params, const std::string & fname, int &n_past){
|
||||
auto * ctx_clip = clip_init_context(params);
|
||||
auto * embeds = llava_image_embed_make_with_filename(ctx_clip, params->cpuparams.n_threads, fname.c_str());
|
||||
if (!embeds) {
|
||||
@@ -213,7 +213,7 @@ static struct llava_context * minicpmv_init(gpt_params * params, const std::stri
|
||||
return ctx_llava;
|
||||
}
|
||||
|
||||
static struct gpt_sampler * llama_init(struct llava_context * ctx_llava, gpt_params * params, const std::string & prompt, int & n_past, bool is_first = false){
|
||||
static struct common_sampler * llama_init(struct llava_context * ctx_llava, common_params * params, const std::string & prompt, int & n_past, bool is_first = false){
|
||||
std::string user_prompt = prompt;
|
||||
int has_minicpmv_projector = clip_is_minicpmv(ctx_llava->ctx_clip);
|
||||
if (!is_first) {
|
||||
@@ -237,11 +237,11 @@ static struct gpt_sampler * llama_init(struct llava_context * ctx_llava, gpt_par
|
||||
|
||||
LOG_INF("\n");
|
||||
|
||||
struct gpt_sampler * smpl = gpt_sampler_init(ctx_llava->model, params->sparams);
|
||||
struct common_sampler * smpl = common_sampler_init(ctx_llava->model, params->sparams);
|
||||
return smpl;
|
||||
}
|
||||
|
||||
static const char * llama_loop(struct llava_context * ctx_llava,struct gpt_sampler * smpl, int &n_past){
|
||||
static const char * llama_loop(struct llava_context * ctx_llava,struct common_sampler * smpl, int &n_past){
|
||||
|
||||
const char * tmp = sample(smpl, ctx_llava->ctx_llama, &n_past);
|
||||
return tmp;
|
||||
@@ -250,13 +250,13 @@ static const char * llama_loop(struct llava_context * ctx_llava,struct gpt_sampl
|
||||
int main(int argc, char ** argv) {
|
||||
ggml_time_init();
|
||||
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, show_additional_info)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, show_additional_info)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
if (params.mmproj.empty() || (params.image.empty())) {
|
||||
show_additional_info(argc, argv);
|
||||
@@ -290,7 +290,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
fflush(stdout);
|
||||
}
|
||||
gpt_sampler_free(smpl);
|
||||
common_sampler_free(smpl);
|
||||
}else {
|
||||
while (true) {
|
||||
LOG("<user>");
|
||||
@@ -309,7 +309,7 @@ int main(int argc, char ** argv) {
|
||||
if (strstr(response.c_str(), "<user>")) break; // minicpm-v
|
||||
fflush(stdout);
|
||||
}
|
||||
gpt_sampler_free(smpl);
|
||||
common_sampler_free(smpl);
|
||||
}
|
||||
}
|
||||
printf("\n");
|
||||
|
||||
@@ -37,13 +37,13 @@ struct ngram_container {
|
||||
};
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
const int W = 15; // lookahead window
|
||||
const int N = 5; // n-gram size
|
||||
@@ -56,7 +56,7 @@ int main(int argc, char ** argv) {
|
||||
llama_numa_init(params.numa);
|
||||
|
||||
// load the target model
|
||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
||||
common_init_result llama_init = common_init_from_params(params);
|
||||
|
||||
llama_model * model = llama_init.model;
|
||||
llama_context * ctx = llama_init.context;
|
||||
@@ -65,7 +65,7 @@ int main(int argc, char ** argv) {
|
||||
std::vector<llama_token> inp;
|
||||
std::vector<llama_token> all;
|
||||
|
||||
inp = ::llama_tokenize(ctx, params.prompt, true, true);
|
||||
inp = common_tokenize(ctx, params.prompt, true, true);
|
||||
all = inp;
|
||||
|
||||
const int max_context_size = llama_n_ctx(ctx);
|
||||
@@ -79,7 +79,7 @@ int main(int argc, char ** argv) {
|
||||
LOG("\n\n");
|
||||
|
||||
for (auto id : inp) {
|
||||
LOG("%s", llama_token_to_piece(ctx, id).c_str());
|
||||
LOG("%s", common_token_to_piece(ctx, id).c_str());
|
||||
}
|
||||
|
||||
fflush(stderr);
|
||||
@@ -89,8 +89,8 @@ int main(int argc, char ** argv) {
|
||||
const auto t_enc_start = ggml_time_us();
|
||||
|
||||
// eval the prompt
|
||||
llama_decode(ctx, llama_batch_get_one( inp.data(), n_input - 1, 0, 0));
|
||||
llama_decode(ctx, llama_batch_get_one(&inp.back(), 1, n_input - 1, 0));
|
||||
llama_decode(ctx, llama_batch_get_one( inp.data(), n_input - 1));
|
||||
llama_decode(ctx, llama_batch_get_one(&inp.back(), 1));
|
||||
|
||||
for (int s = 1; s < W + G + 1; ++s) {
|
||||
llama_kv_cache_seq_cp(ctx, 0, s, -1, -1);
|
||||
@@ -115,7 +115,7 @@ int main(int argc, char ** argv) {
|
||||
llama_batch batch = llama_batch_init(params.n_ctx, 0, W + G + 1);
|
||||
|
||||
// target model sampling context
|
||||
struct gpt_sampler * smpl = gpt_sampler_init(model, params.sparams);
|
||||
struct common_sampler * smpl = common_sampler_init(model, params.sparams);
|
||||
|
||||
// verification n-grams
|
||||
std::vector<ngram_data> ngrams_cur(G);
|
||||
@@ -156,12 +156,12 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// sample first token
|
||||
{
|
||||
id = gpt_sampler_sample(smpl, ctx, 0);
|
||||
id = common_sampler_sample(smpl, ctx, 0);
|
||||
|
||||
gpt_sampler_accept(smpl, id, true);
|
||||
common_sampler_accept(smpl, id, true);
|
||||
|
||||
{
|
||||
const std::string token_str = llama_token_to_piece(ctx, id);
|
||||
const std::string token_str = common_token_to_piece(ctx, id);
|
||||
|
||||
LOG("%s", token_str.c_str());
|
||||
fflush(stdout);
|
||||
@@ -172,7 +172,7 @@ int main(int argc, char ** argv) {
|
||||
// debug
|
||||
if (dump_kv_cache) {
|
||||
llama_kv_cache_view_update(ctx, &kvc_view);
|
||||
llama_kv_cache_dump_view_seqs(kvc_view, 40);
|
||||
common_kv_cache_dump_view_seqs(kvc_view, 40);
|
||||
}
|
||||
|
||||
// build the mask from https://lmsys.org/blog/2023-11-21-lookahead-decoding/
|
||||
@@ -201,10 +201,10 @@ int main(int argc, char ** argv) {
|
||||
// V V V V V V
|
||||
// id
|
||||
{
|
||||
llama_batch_clear(batch);
|
||||
common_batch_clear(batch);
|
||||
|
||||
// current token - first token of the first level
|
||||
llama_batch_add(batch, id, n_past, seq_id_all, true);
|
||||
common_batch_add(batch, id, n_past, seq_id_all, true);
|
||||
|
||||
// verification n-grams - queue this before the lookahead tokens for less KV cache fragmentation
|
||||
{
|
||||
@@ -229,7 +229,7 @@ int main(int argc, char ** argv) {
|
||||
ngrams_cur[g].tokens [j + 1] = t;
|
||||
ngrams_cur[g].i_batch[j + 1] = batch.n_tokens;
|
||||
|
||||
llama_batch_add(batch, t, n_past + j + 1, { W + 1 + g }, true);
|
||||
common_batch_add(batch, t, n_past + j + 1, { W + 1 + g }, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -241,13 +241,13 @@ int main(int argc, char ** argv) {
|
||||
seq_id_look[j] = i + j + 1;
|
||||
}
|
||||
|
||||
llama_batch_add(batch, tokens_j[0][i], n_past + i, seq_id_look, false);
|
||||
common_batch_add(batch, tokens_j[0][i], n_past + i, seq_id_look, false);
|
||||
}
|
||||
|
||||
// fill the rest of the levels
|
||||
for (int j = 1; j < N - 1; j++) {
|
||||
for (int i = 0; i < W; i++) {
|
||||
llama_batch_add(batch, tokens_j[j][i], n_past + j + i, { i + 1 }, j == N - 2);
|
||||
common_batch_add(batch, tokens_j[j][i], n_past + j + i, { i + 1 }, j == N - 2);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -281,13 +281,13 @@ int main(int argc, char ** argv) {
|
||||
}
|
||||
|
||||
// sample the next token
|
||||
id = gpt_sampler_sample(smpl, ctx, i_batch);
|
||||
id = common_sampler_sample(smpl, ctx, i_batch);
|
||||
|
||||
gpt_sampler_accept(smpl, id, true);
|
||||
common_sampler_accept(smpl, id, true);
|
||||
|
||||
// print
|
||||
{
|
||||
const std::string token_str = llama_token_to_piece(ctx, id);
|
||||
const std::string token_str = common_token_to_piece(ctx, id);
|
||||
|
||||
if (v == 0) {
|
||||
LOG("%s", token_str.c_str());
|
||||
@@ -327,7 +327,7 @@ int main(int argc, char ** argv) {
|
||||
// print known n-grams starting with token id (debug)
|
||||
if (0 && v == 0) {
|
||||
if (ngrams_observed.cnt[id] > 0) {
|
||||
LOG("\n - %d n-grams starting with '%s'\n", ngrams_observed.cnt[id], llama_token_to_piece(ctx, id).c_str());
|
||||
LOG("\n - %d n-grams starting with '%s'\n", ngrams_observed.cnt[id], common_token_to_piece(ctx, id).c_str());
|
||||
}
|
||||
|
||||
for (int i = 0; i < ngrams_observed.cnt[id]; i++) {
|
||||
@@ -336,7 +336,7 @@ int main(int argc, char ** argv) {
|
||||
const int idx = id*(N - 1)*G + i*(N - 1);
|
||||
|
||||
for (int j = 0; j < N - 1; j++) {
|
||||
const std::string token_str = llama_token_to_piece(ctx, ngrams_observed.tokens[idx + j]);
|
||||
const std::string token_str = common_token_to_piece(ctx, ngrams_observed.tokens[idx + j]);
|
||||
|
||||
LOG("%s", token_str.c_str());
|
||||
}
|
||||
@@ -358,7 +358,7 @@ int main(int argc, char ** argv) {
|
||||
if (v == 0) {
|
||||
// sample from the last level
|
||||
for (int i = 0; i < W; i++) {
|
||||
tokens_j[N - 2][i] = gpt_sampler_sample(smpl, ctx, ngrams_cur.size()*(N-1) + W*(N - 2) + i);
|
||||
tokens_j[N - 2][i] = common_sampler_sample(smpl, ctx, ngrams_cur.size()*(N-1) + W*(N - 2) + i);
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < W; i++) {
|
||||
@@ -466,9 +466,9 @@ int main(int argc, char ** argv) {
|
||||
LOG_INF("n_accept = %d\n", n_accept);
|
||||
|
||||
LOG_INF("\n");
|
||||
gpt_perf_print(ctx, smpl);
|
||||
common_perf_print(ctx, smpl);
|
||||
|
||||
gpt_sampler_free(smpl);
|
||||
common_sampler_free(smpl);
|
||||
|
||||
llama_kv_cache_view_free(&kvc_view);
|
||||
|
||||
|
||||
@@ -12,9 +12,9 @@
|
||||
#include <vector>
|
||||
|
||||
int main(int argc, char ** argv){
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_LOOKUP)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LOOKUP)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ int main(int argc, char ** argv){
|
||||
llama_numa_init(params.numa);
|
||||
|
||||
// load the model
|
||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
||||
common_init_result llama_init = common_init_from_params(params);
|
||||
|
||||
llama_model * model = llama_init.model;
|
||||
llama_context * ctx = llama_init.context;
|
||||
@@ -31,15 +31,15 @@ int main(int argc, char ** argv){
|
||||
|
||||
// tokenize the prompt
|
||||
std::vector<llama_token> inp;
|
||||
inp = ::llama_tokenize(ctx, params.prompt, true, true);
|
||||
inp = common_tokenize(ctx, params.prompt, true, true);
|
||||
fprintf(stderr, "%s: tokenization done\n", __func__);
|
||||
|
||||
|
||||
llama_ngram_cache ngram_cache;
|
||||
llama_ngram_cache_update(ngram_cache, LLAMA_NGRAM_STATIC, LLAMA_NGRAM_STATIC, inp, inp.size(), true);
|
||||
common_ngram_cache ngram_cache;
|
||||
common_ngram_cache_update(ngram_cache, LLAMA_NGRAM_STATIC, LLAMA_NGRAM_STATIC, inp, inp.size(), true);
|
||||
fprintf(stderr, "%s: hashing done, writing file to %s\n", __func__, params.lookup_cache_static.c_str());
|
||||
|
||||
llama_ngram_cache_save(ngram_cache, params.lookup_cache_static);
|
||||
common_ngram_cache_save(ngram_cache, params.lookup_cache_static);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -33,15 +33,15 @@ int main(int argc, char ** argv){
|
||||
}
|
||||
|
||||
fprintf(stderr, "lookup-merge: loading file %s\n", args[0].c_str());
|
||||
llama_ngram_cache ngram_cache_merged = llama_ngram_cache_load(args[0]);
|
||||
common_ngram_cache ngram_cache_merged = common_ngram_cache_load(args[0]);
|
||||
|
||||
for (size_t i = 1; i < args.size()-1; ++i) {
|
||||
fprintf(stderr, "lookup-merge: loading file %s\n", args[i].c_str());
|
||||
llama_ngram_cache ngram_cache = llama_ngram_cache_load(args[i]);
|
||||
common_ngram_cache ngram_cache = common_ngram_cache_load(args[i]);
|
||||
|
||||
llama_ngram_cache_merge(ngram_cache_merged, ngram_cache);
|
||||
common_ngram_cache_merge(ngram_cache_merged, ngram_cache);
|
||||
}
|
||||
|
||||
fprintf(stderr, "lookup-merge: saving file %s\n", args.back().c_str());
|
||||
llama_ngram_cache_save(ngram_cache_merged, args.back());
|
||||
common_ngram_cache_save(ngram_cache_merged, args.back());
|
||||
}
|
||||
|
||||
@@ -13,13 +13,13 @@
|
||||
#include <vector>
|
||||
|
||||
int main(int argc, char ** argv){
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_LOOKUP)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LOOKUP)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
const int n_draft = params.n_draft;
|
||||
|
||||
@@ -28,18 +28,18 @@ int main(int argc, char ** argv){
|
||||
llama_numa_init(params.numa);
|
||||
|
||||
// load the model
|
||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
||||
common_init_result llama_init = common_init_from_params(params);
|
||||
|
||||
llama_model * model = llama_init.model;
|
||||
llama_context * ctx = llama_init.context;
|
||||
|
||||
// tokenize the prompt
|
||||
std::vector<llama_token> inp;
|
||||
inp = ::llama_tokenize(ctx, params.prompt, true, true);
|
||||
inp = common_tokenize(ctx, params.prompt, true, true);
|
||||
|
||||
llama_ngram_cache ngram_cache_context;
|
||||
llama_ngram_cache ngram_cache_dynamic;
|
||||
llama_ngram_cache ngram_cache_static;
|
||||
common_ngram_cache ngram_cache_context;
|
||||
common_ngram_cache ngram_cache_dynamic;
|
||||
common_ngram_cache ngram_cache_static;
|
||||
int64_t t_draft_flat_us = 0;
|
||||
int64_t t_draft_us = 0;
|
||||
|
||||
@@ -48,7 +48,7 @@ int main(int argc, char ** argv){
|
||||
|
||||
if (!params.lookup_cache_static.empty()) {
|
||||
try {
|
||||
ngram_cache_static = llama_ngram_cache_load(params.lookup_cache_static);
|
||||
ngram_cache_static = common_ngram_cache_load(params.lookup_cache_static);
|
||||
} catch (std::ifstream::failure const &) {
|
||||
LOG_ERR("failed to open static lookup cache: %s", params.lookup_cache_static.c_str());
|
||||
exit(1);
|
||||
@@ -57,7 +57,7 @@ int main(int argc, char ** argv){
|
||||
|
||||
if (!params.lookup_cache_dynamic.empty()) {
|
||||
try {
|
||||
ngram_cache_dynamic = llama_ngram_cache_load(params.lookup_cache_dynamic);
|
||||
ngram_cache_dynamic = common_ngram_cache_load(params.lookup_cache_dynamic);
|
||||
} catch (std::ifstream::failure const &) {} // if the file does not exist it will simply be created at the end of the program
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ int main(int argc, char ** argv){
|
||||
|
||||
{
|
||||
const int64_t t_start_draft_us = ggml_time_us();
|
||||
llama_ngram_cache_draft(pseudo_output, draft, n_draft, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, ngram_cache_context, ngram_cache_dynamic, ngram_cache_static);
|
||||
common_ngram_cache_draft(pseudo_output, draft, n_draft, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, ngram_cache_context, ngram_cache_dynamic, ngram_cache_static);
|
||||
t_draft_us += ggml_time_us() - t_start_draft_us;
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ int main(int argc, char ** argv){
|
||||
|
||||
{
|
||||
const int64_t t_start_draft_us = ggml_time_us();
|
||||
llama_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, pseudo_output, 1, false);
|
||||
common_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, pseudo_output, 1, false);
|
||||
t_draft_us += ggml_time_us() - t_start_draft_us;
|
||||
}
|
||||
}
|
||||
@@ -115,7 +115,7 @@ int main(int argc, char ** argv){
|
||||
pseudo_output.push_back(inp_slice[pseudo_output.size()]);
|
||||
{
|
||||
const int64_t t_start_draft_us = ggml_time_us();
|
||||
llama_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, pseudo_output, 1, false);
|
||||
common_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, pseudo_output, 1, false);
|
||||
t_draft_us += ggml_time_us() - t_start_draft_us;
|
||||
}
|
||||
}
|
||||
@@ -133,7 +133,7 @@ int main(int argc, char ** argv){
|
||||
}
|
||||
|
||||
// After each chunk, update the dynamic ngram cache with the context ngram cache:
|
||||
llama_ngram_cache_merge(ngram_cache_dynamic, ngram_cache_context);
|
||||
common_ngram_cache_merge(ngram_cache_dynamic, ngram_cache_context);
|
||||
ngram_cache_context.clear();
|
||||
}
|
||||
|
||||
|
||||
@@ -13,13 +13,13 @@
|
||||
#include <vector>
|
||||
|
||||
int main(int argc, char ** argv){
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_LOOKUP)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LOOKUP)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
// max. number of additional tokens to draft if match is found
|
||||
const int n_draft = params.n_draft;
|
||||
@@ -31,29 +31,29 @@ int main(int argc, char ** argv){
|
||||
llama_numa_init(params.numa);
|
||||
|
||||
// load the model
|
||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
||||
common_init_result llama_init = common_init_from_params(params);
|
||||
|
||||
llama_model * model = llama_init.model;
|
||||
llama_context * ctx = llama_init.context;
|
||||
|
||||
// tokenize the prompt
|
||||
std::vector<llama_token> inp;
|
||||
inp = ::llama_tokenize(ctx, params.prompt, true, true);
|
||||
inp = common_tokenize(ctx, params.prompt, true, true);
|
||||
|
||||
llama_ngram_cache ngram_cache_context;
|
||||
llama_ngram_cache ngram_cache_dynamic;
|
||||
llama_ngram_cache ngram_cache_static;
|
||||
common_ngram_cache ngram_cache_context;
|
||||
common_ngram_cache ngram_cache_dynamic;
|
||||
common_ngram_cache ngram_cache_static;
|
||||
int64_t t_draft_flat_us = 0;
|
||||
int64_t t_draft_us = 0;
|
||||
|
||||
{
|
||||
// Fill up context ngram cache with tokens from user input:
|
||||
const int64_t t_start_draft_us = ggml_time_us();
|
||||
llama_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, inp, inp.size(), false);
|
||||
common_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, inp, inp.size(), false);
|
||||
|
||||
if (!params.lookup_cache_static.empty()) {
|
||||
try {
|
||||
ngram_cache_static = llama_ngram_cache_load(params.lookup_cache_static);
|
||||
ngram_cache_static = common_ngram_cache_load(params.lookup_cache_static);
|
||||
} catch (std::ifstream::failure const &) {
|
||||
LOG_ERR("failed to open static lookup cache: %s", params.lookup_cache_static.c_str());
|
||||
exit(1);
|
||||
@@ -62,7 +62,7 @@ int main(int argc, char ** argv){
|
||||
|
||||
if (!params.lookup_cache_dynamic.empty()) {
|
||||
try {
|
||||
ngram_cache_dynamic = llama_ngram_cache_load(params.lookup_cache_dynamic);
|
||||
ngram_cache_dynamic = common_ngram_cache_load(params.lookup_cache_dynamic);
|
||||
} catch (std::ifstream::failure const &) {} // if the file does not exist it will simply be created at the end of the program
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ int main(int argc, char ** argv){
|
||||
LOG("\n\n");
|
||||
|
||||
for (auto id : inp) {
|
||||
LOG("%s", llama_token_to_piece(ctx, id).c_str());
|
||||
LOG("%s", common_token_to_piece(ctx, id).c_str());
|
||||
}
|
||||
|
||||
fflush(stderr);
|
||||
@@ -89,8 +89,8 @@ int main(int argc, char ** argv){
|
||||
|
||||
const auto t_enc_start = ggml_time_us();
|
||||
|
||||
llama_decode(ctx, llama_batch_get_one( inp.data(), n_input - 1, 0, 0));
|
||||
llama_decode(ctx, llama_batch_get_one(&inp.back(), 1, n_input - 1, 0));
|
||||
llama_decode(ctx, llama_batch_get_one( inp.data(), n_input - 1));
|
||||
llama_decode(ctx, llama_batch_get_one(&inp.back(), 1));
|
||||
|
||||
const auto t_enc_end = ggml_time_us();
|
||||
|
||||
@@ -102,7 +102,7 @@ int main(int argc, char ** argv){
|
||||
|
||||
bool has_eos = false;
|
||||
|
||||
struct gpt_sampler * smpl = gpt_sampler_init(model, params.sparams);
|
||||
struct common_sampler * smpl = common_sampler_init(model, params.sparams);
|
||||
|
||||
std::vector<llama_token> draft;
|
||||
|
||||
@@ -117,7 +117,7 @@ int main(int argc, char ** argv){
|
||||
// debug
|
||||
if (dump_kv_cache) {
|
||||
llama_kv_cache_view_update(ctx, &kvc_view);
|
||||
llama_kv_cache_dump_view_seqs(kvc_view, 40);
|
||||
common_kv_cache_dump_view_seqs(kvc_view, 40);
|
||||
}
|
||||
|
||||
// print current draft sequence
|
||||
@@ -126,11 +126,11 @@ int main(int argc, char ** argv){
|
||||
int i_dft = 0;
|
||||
while (true) {
|
||||
// sample from the target model
|
||||
llama_token id = gpt_sampler_sample(smpl, ctx, i_dft);
|
||||
llama_token id = common_sampler_sample(smpl, ctx, i_dft);
|
||||
|
||||
gpt_sampler_accept(smpl, id, true);
|
||||
common_sampler_accept(smpl, id, true);
|
||||
|
||||
const std::string token_str = llama_token_to_piece(ctx, id);
|
||||
const std::string token_str = common_token_to_piece(ctx, id);
|
||||
|
||||
if (!params.use_color) {
|
||||
LOG("%s", token_str.c_str());
|
||||
@@ -152,7 +152,7 @@ int main(int argc, char ** argv){
|
||||
{
|
||||
// Update context ngram cache with the newly accepted token:
|
||||
const int64_t t_start_draft_us = ggml_time_us();
|
||||
llama_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, inp, 1, false);
|
||||
common_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, inp, 1, false);
|
||||
t_draft_us += ggml_time_us() - t_start_draft_us;
|
||||
}
|
||||
|
||||
@@ -178,7 +178,7 @@ int main(int argc, char ** argv){
|
||||
{
|
||||
// Update context ngram cache with the newly accepted token:
|
||||
const int64_t t_start_draft_us = ggml_time_us();
|
||||
llama_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, inp, 1, false);
|
||||
common_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, inp, 1, false);
|
||||
t_draft_us += ggml_time_us() - t_start_draft_us;
|
||||
}
|
||||
break;
|
||||
@@ -192,18 +192,18 @@ int main(int argc, char ** argv){
|
||||
// clean the cache of draft tokens that weren't accepted
|
||||
llama_kv_cache_seq_rm(ctx, 0, n_past, -1);
|
||||
|
||||
llama_batch_clear(batch_tgt);
|
||||
llama_batch_add(batch_tgt, draft[0], n_past, { 0 }, true);
|
||||
common_batch_clear(batch_tgt);
|
||||
common_batch_add(batch_tgt, draft[0], n_past, { 0 }, true);
|
||||
|
||||
// Draft already contains a single token sampled from the model:
|
||||
GGML_ASSERT(draft.size() == 1);
|
||||
GGML_ASSERT(draft[0] == inp.back());
|
||||
const int64_t t_start_draft_us = ggml_time_us();
|
||||
|
||||
llama_ngram_cache_draft(inp, draft, n_draft, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, ngram_cache_context, ngram_cache_dynamic, ngram_cache_static);
|
||||
common_ngram_cache_draft(inp, draft, n_draft, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, ngram_cache_context, ngram_cache_dynamic, ngram_cache_static);
|
||||
|
||||
for (size_t i = 1; i < draft.size(); ++i) {
|
||||
llama_batch_add(batch_tgt, draft[i], n_past + i, { 0 }, true);
|
||||
common_batch_add(batch_tgt, draft[i], n_past + i, { 0 }, true);
|
||||
}
|
||||
|
||||
t_draft_us += ggml_time_us() - t_start_draft_us;
|
||||
@@ -218,8 +218,8 @@ int main(int argc, char ** argv){
|
||||
auto t_dec_end = ggml_time_us();
|
||||
|
||||
// Update dynamic ngram cache with context ngram cache and save it to disk:
|
||||
llama_ngram_cache_merge(ngram_cache_dynamic, ngram_cache_context);
|
||||
llama_ngram_cache_save(ngram_cache_dynamic, params.lookup_cache_dynamic);
|
||||
common_ngram_cache_merge(ngram_cache_dynamic, ngram_cache_context);
|
||||
common_ngram_cache_save(ngram_cache_dynamic, params.lookup_cache_dynamic);
|
||||
|
||||
LOG("\n\n");
|
||||
|
||||
@@ -237,9 +237,9 @@ int main(int argc, char ** argv){
|
||||
LOG_INF("accept = %.3f%%\n", 100.0f * n_accept / n_drafted);
|
||||
|
||||
LOG_INF("\ntarget:\n\n");
|
||||
gpt_perf_print(ctx, smpl);
|
||||
common_perf_print(ctx, smpl);
|
||||
|
||||
gpt_sampler_free(smpl);
|
||||
common_sampler_free(smpl);
|
||||
|
||||
llama_batch_free(batch_tgt);
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ In this section, we cover the most commonly used options for running the `llama-
|
||||
- `-c N, --ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference.
|
||||
- `-mli, --multiline-input`: Allows you to write or paste multiple lines without ending each in '\'
|
||||
- `-t N, --threads N`: Set the number of threads to use during generation. For optimal performance, it is recommended to set this value to the number of physical CPU cores your system has.
|
||||
- - `-ngl N, --n-gpu-layers N`: When compiled with GPU support, this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
||||
- `-ngl N, --n-gpu-layers N`: When compiled with GPU support, this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
||||
|
||||
## Input Prompts
|
||||
|
||||
@@ -241,6 +241,19 @@ The `--mirostat-ent` option sets the Mirostat target entropy (tau), which repres
|
||||
|
||||
Example usage: `--mirostat 2 --mirostat-lr 0.05 --mirostat-ent 3.0`
|
||||
|
||||
### XTC Sampling
|
||||
|
||||
- `--xtc-probability N`: Sets the chance for token removal (checked once on sampler start) (default: 0.0).
|
||||
- `--xtc-threshold N`: Sets a minimum probability threshold for tokens to be removed (default: 0.1).
|
||||
|
||||
Exclude Top Choices (XTC) is a unique sampler that is designed to remove top tokens from consideration and avoid more obvious and repetitive outputs. With a chance of `xtc-probability` it searches for tokens with probabilities of `xtc-threshold` and above, then removes all such tokens except the least probable one.
|
||||
|
||||
By removing top tokens XTC can improve the variety of answers, break writing clichés and inhibit repition, since clichés and repeated phrases are usually more likely to appear. By keeping the last token above the threshold, XTC ensures that the answer is still coherent. XTC is meant to be used for creative tasks, but feel free to experiment with different settings for different models.
|
||||
|
||||
Being experimental and unique, XTC is disabled by default. The recommended combination of samplers is Min-P followed by XTC on its default settings: `--sampling-seq mx --min-p 0.02 --xtc-probability 0.5`.
|
||||
|
||||
Example usage: `--xtc-probability 0.5 --xtc-threshold 0.1`
|
||||
|
||||
### Logit Bias
|
||||
|
||||
- `-l TOKEN_ID(+/-)BIAS, --logit-bias TOKEN_ID(+/-)BIAS`: Modify the likelihood of a token appearing in the generated text completion.
|
||||
@@ -284,10 +297,6 @@ These options help improve the performance and memory usage of the LLaMA models.
|
||||
|
||||
These flags attempt optimizations that help on some systems with non-uniform memory access. This currently consists of one of the above strategies, and disabling prefetch and readahead for mmap. The latter causes mapped pages to be faulted in on first access instead of all at once, and in combination with pinning threads to NUMA nodes, more of the pages end up on the NUMA node where they are used. Note that if the model is already in the system page cache, for example because of a previous run without this option, this will have little effect unless you drop the page cache first. This can be done by rebooting the system or on Linux by writing '3' to '/proc/sys/vm/drop_caches' as root.
|
||||
|
||||
### Memory Float 32
|
||||
|
||||
- `--memory-f32`: Use 32-bit floats instead of 16-bit floats for memory key+value. This doubles the context memory requirement and cached prompt file size but does not appear to increase generation quality in a measurable way. Not recommended.
|
||||
|
||||
### Batch Size
|
||||
|
||||
- `-b N, --batch-size N`: Set the batch size for prompt processing (default: `2048`). This large batch size benefits users who have BLAS installed and enabled it during the build. If you don't have BLAS enabled ("BLAS=0"), you can use a smaller number, such as 8, to see the prompt progress as it's evaluated in some situations.
|
||||
|
||||
@@ -33,8 +33,8 @@
|
||||
|
||||
static llama_context ** g_ctx;
|
||||
static llama_model ** g_model;
|
||||
static gpt_sampler ** g_smpl;
|
||||
static gpt_params * g_params;
|
||||
static common_sampler ** g_smpl;
|
||||
static common_params * g_params;
|
||||
static std::vector<llama_token> * g_input_tokens;
|
||||
static std::ostringstream * g_output_ss;
|
||||
static std::vector<llama_token> * g_output_tokens;
|
||||
@@ -63,7 +63,7 @@ static bool file_is_empty(const std::string & path) {
|
||||
}
|
||||
|
||||
static void write_logfile(
|
||||
const llama_context * ctx, const gpt_params & params, const llama_model * model,
|
||||
const llama_context * ctx, const common_params & params, const llama_model * model,
|
||||
const std::vector<llama_token> & input_tokens, const std::string & output,
|
||||
const std::vector<llama_token> & output_tokens
|
||||
) {
|
||||
@@ -114,12 +114,12 @@ static void sigint_handler(int signo) {
|
||||
} else {
|
||||
console::cleanup();
|
||||
LOG("\n");
|
||||
gpt_perf_print(*g_ctx, *g_smpl);
|
||||
common_perf_print(*g_ctx, *g_smpl);
|
||||
write_logfile(*g_ctx, *g_params, *g_model, *g_input_tokens, g_output_ss->str(), *g_output_tokens);
|
||||
|
||||
// make sure all logs are flushed
|
||||
LOG("Interrupted by user\n");
|
||||
gpt_log_pause(gpt_log_main());
|
||||
common_log_pause(common_log_main());
|
||||
|
||||
_exit(130);
|
||||
}
|
||||
@@ -127,22 +127,22 @@ static void sigint_handler(int signo) {
|
||||
}
|
||||
#endif
|
||||
|
||||
static std::string chat_add_and_format(struct llama_model * model, std::vector<llama_chat_msg> & chat_msgs, const std::string & role, const std::string & content) {
|
||||
llama_chat_msg new_msg{role, content};
|
||||
auto formatted = llama_chat_format_single(model, g_params->chat_template, chat_msgs, new_msg, role == "user");
|
||||
static std::string chat_add_and_format(struct llama_model * model, std::vector<common_chat_msg> & chat_msgs, const std::string & role, const std::string & content) {
|
||||
common_chat_msg new_msg{role, content};
|
||||
auto formatted = common_chat_format_single(model, g_params->chat_template, chat_msgs, new_msg, role == "user");
|
||||
chat_msgs.push_back({role, content});
|
||||
LOG_DBG("formatted: '%s'\n", formatted.c_str());
|
||||
return formatted;
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
g_params = ¶ms;
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_MAIN, print_usage)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_MAIN, print_usage)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
auto & sparams = params.sparams;
|
||||
|
||||
@@ -187,9 +187,9 @@ int main(int argc, char ** argv) {
|
||||
|
||||
llama_model * model = nullptr;
|
||||
llama_context * ctx = nullptr;
|
||||
gpt_sampler * smpl = nullptr;
|
||||
common_sampler * smpl = nullptr;
|
||||
|
||||
std::vector<llama_chat_msg> chat_msgs;
|
||||
std::vector<common_chat_msg> chat_msgs;
|
||||
|
||||
g_model = &model;
|
||||
g_ctx = &ctx;
|
||||
@@ -197,7 +197,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// load the model and apply lora adapter, if any
|
||||
LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__);
|
||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
||||
common_init_result llama_init = common_init_from_params(params);
|
||||
|
||||
model = llama_init.model;
|
||||
ctx = llama_init.context;
|
||||
@@ -246,7 +246,7 @@ int main(int argc, char ** argv) {
|
||||
// print chat template example in conversation mode
|
||||
if (params.conversation) {
|
||||
if (params.enable_chat_template) {
|
||||
LOG_INF("%s: chat template example:\n%s\n", __func__, llama_chat_format_example(model, params.chat_template).c_str());
|
||||
LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(model, params.chat_template).c_str());
|
||||
} else {
|
||||
LOG_INF("%s: in-suffix/prefix is specified, chat template will be disabled\n", __func__);
|
||||
}
|
||||
@@ -255,7 +255,7 @@ int main(int argc, char ** argv) {
|
||||
// print system information
|
||||
{
|
||||
LOG_INF("\n");
|
||||
LOG_INF("%s\n", gpt_params_get_system_info(params).c_str());
|
||||
LOG_INF("%s\n", common_params_get_system_info(params).c_str());
|
||||
LOG_INF("\n");
|
||||
}
|
||||
|
||||
@@ -296,7 +296,7 @@ int main(int argc, char ** argv) {
|
||||
: params.prompt;
|
||||
if (params.interactive_first || !params.prompt.empty() || session_tokens.empty()) {
|
||||
LOG_DBG("tokenize the prompt\n");
|
||||
embd_inp = ::llama_tokenize(ctx, prompt, true, true);
|
||||
embd_inp = common_tokenize(ctx, prompt, true, true);
|
||||
} else {
|
||||
LOG_DBG("use session tokens\n");
|
||||
embd_inp = session_tokens;
|
||||
@@ -379,13 +379,13 @@ int main(int argc, char ** argv) {
|
||||
LOG_INF("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
||||
LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
||||
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
||||
LOG_INF("%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str());
|
||||
LOG_INF("%6d -> '%s'\n", embd_inp[i], common_token_to_piece(ctx, embd_inp[i]).c_str());
|
||||
}
|
||||
|
||||
if (params.n_keep > add_bos) {
|
||||
LOG_INF("%s: static prompt based on n_keep: '", __func__);
|
||||
for (int i = 0; i < params.n_keep; i++) {
|
||||
LOG_CNT("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str());
|
||||
LOG_CNT("%s", common_token_to_piece(ctx, embd_inp[i]).c_str());
|
||||
}
|
||||
LOG_CNT("'\n");
|
||||
}
|
||||
@@ -415,9 +415,9 @@ int main(int argc, char ** argv) {
|
||||
for (const auto & antiprompt : params.antiprompt) {
|
||||
LOG_INF("Reverse prompt: '%s'\n", antiprompt.c_str());
|
||||
if (params.verbose_prompt) {
|
||||
auto tmp = ::llama_tokenize(ctx, antiprompt, false, true);
|
||||
auto tmp = common_tokenize(ctx, antiprompt, false, true);
|
||||
for (int i = 0; i < (int) tmp.size(); i++) {
|
||||
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
|
||||
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -430,9 +430,9 @@ int main(int argc, char ** argv) {
|
||||
if (!params.input_prefix.empty()) {
|
||||
LOG_INF("Input prefix: '%s'\n", params.input_prefix.c_str());
|
||||
if (params.verbose_prompt) {
|
||||
auto tmp = ::llama_tokenize(ctx, params.input_prefix, true, true);
|
||||
auto tmp = common_tokenize(ctx, params.input_prefix, true, true);
|
||||
for (int i = 0; i < (int) tmp.size(); i++) {
|
||||
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
|
||||
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -440,23 +440,23 @@ int main(int argc, char ** argv) {
|
||||
if (!params.input_suffix.empty()) {
|
||||
LOG_INF("Input suffix: '%s'\n", params.input_suffix.c_str());
|
||||
if (params.verbose_prompt) {
|
||||
auto tmp = ::llama_tokenize(ctx, params.input_suffix, false, true);
|
||||
auto tmp = common_tokenize(ctx, params.input_suffix, false, true);
|
||||
for (int i = 0; i < (int) tmp.size(); i++) {
|
||||
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
|
||||
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
smpl = gpt_sampler_init(model, sparams);
|
||||
smpl = common_sampler_init(model, sparams);
|
||||
if (!smpl) {
|
||||
LOG_ERR("%s: failed to initialize sampling subsystem\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
LOG_INF("sampler seed: %u\n", gpt_sampler_get_seed(smpl));
|
||||
LOG_INF("sampler seed: %u\n", common_sampler_get_seed(smpl));
|
||||
LOG_INF("sampler params: \n%s\n", sparams.print().c_str());
|
||||
LOG_INF("sampler chain: %s\n", gpt_sampler_print(smpl).c_str());
|
||||
LOG_INF("sampler chain: %s\n", common_sampler_print(smpl).c_str());
|
||||
|
||||
LOG_INF("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
|
||||
|
||||
@@ -521,14 +521,14 @@ int main(int argc, char ** argv) {
|
||||
|
||||
antiprompt_ids.reserve(params.antiprompt.size());
|
||||
for (const std::string & antiprompt : params.antiprompt) {
|
||||
antiprompt_ids.emplace_back(::llama_tokenize(ctx, antiprompt, false, true));
|
||||
antiprompt_ids.emplace_back(::common_tokenize(ctx, antiprompt, false, true));
|
||||
}
|
||||
|
||||
if (llama_model_has_encoder(model)) {
|
||||
int enc_input_size = embd_inp.size();
|
||||
llama_token * enc_input_buf = embd_inp.data();
|
||||
|
||||
if (llama_encode(ctx, llama_batch_get_one(enc_input_buf, enc_input_size, 0, 0))) {
|
||||
if (llama_encode(ctx, llama_batch_get_one(enc_input_buf, enc_input_size))) {
|
||||
LOG_ERR("%s : failed to eval\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
@@ -569,30 +569,30 @@ int main(int argc, char ** argv) {
|
||||
if (!params.ctx_shift){
|
||||
LOG_DBG("\n\n%s: context full and context shift is disabled => stopping\n", __func__);
|
||||
break;
|
||||
} else {
|
||||
if (params.n_predict == -2) {
|
||||
LOG_DBG("\n\n%s: context full and n_predict == -%d => stopping\n", __func__, params.n_predict);
|
||||
break;
|
||||
}
|
||||
|
||||
const int n_left = n_past - params.n_keep;
|
||||
const int n_discard = n_left/2;
|
||||
|
||||
LOG_DBG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n",
|
||||
n_past, n_left, n_ctx, params.n_keep, n_discard);
|
||||
|
||||
llama_kv_cache_seq_rm (ctx, 0, params.n_keep , params.n_keep + n_discard);
|
||||
llama_kv_cache_seq_add(ctx, 0, params.n_keep + n_discard, n_past, -n_discard);
|
||||
|
||||
n_past -= n_discard;
|
||||
|
||||
LOG_DBG("after swap: n_past = %d\n", n_past);
|
||||
|
||||
LOG_DBG("embd: %s\n", string_from(ctx, embd).c_str());
|
||||
|
||||
LOG_DBG("clear session path\n");
|
||||
path_session.clear();
|
||||
}
|
||||
|
||||
if (params.n_predict == -2) {
|
||||
LOG_DBG("\n\n%s: context full and n_predict == -%d => stopping\n", __func__, params.n_predict);
|
||||
break;
|
||||
}
|
||||
|
||||
const int n_left = n_past - params.n_keep;
|
||||
const int n_discard = n_left/2;
|
||||
|
||||
LOG_DBG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n",
|
||||
n_past, n_left, n_ctx, params.n_keep, n_discard);
|
||||
|
||||
llama_kv_cache_seq_rm (ctx, 0, params.n_keep , params.n_keep + n_discard);
|
||||
llama_kv_cache_seq_add(ctx, 0, params.n_keep + n_discard, n_past, -n_discard);
|
||||
|
||||
n_past -= n_discard;
|
||||
|
||||
LOG_DBG("after swap: n_past = %d\n", n_past);
|
||||
|
||||
LOG_DBG("embd: %s\n", string_from(ctx, embd).c_str());
|
||||
|
||||
LOG_DBG("clear session path\n");
|
||||
path_session.clear();
|
||||
}
|
||||
} else {
|
||||
// context extension via Self-Extend
|
||||
@@ -648,7 +648,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
LOG_DBG("eval: %s\n", string_from(ctx, embd).c_str());
|
||||
|
||||
if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval, n_past, 0))) {
|
||||
if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval))) {
|
||||
LOG_ERR("%s : failed to eval\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
@@ -679,9 +679,9 @@ int main(int argc, char ** argv) {
|
||||
LOG_DBG("saved session to %s\n", path_session.c_str());
|
||||
}
|
||||
|
||||
const llama_token id = gpt_sampler_sample(smpl, ctx, -1);
|
||||
const llama_token id = common_sampler_sample(smpl, ctx, -1);
|
||||
|
||||
gpt_sampler_accept(smpl, id, /* accept_grammar= */ true);
|
||||
common_sampler_accept(smpl, id, /* accept_grammar= */ true);
|
||||
|
||||
// LOG_DBG("last: %s\n", string_from(ctx, smpl->prev.to_vector()).c_str());
|
||||
|
||||
@@ -702,7 +702,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// push the prompt in the sampling context in order to apply repetition penalties later
|
||||
// for the prompt, we don't apply grammar rules
|
||||
gpt_sampler_accept(smpl, embd_inp[n_consumed], /* accept_grammar= */ false);
|
||||
common_sampler_accept(smpl, embd_inp[n_consumed], /* accept_grammar= */ false);
|
||||
|
||||
++n_consumed;
|
||||
if ((int) embd.size() >= params.n_batch) {
|
||||
@@ -714,7 +714,7 @@ int main(int argc, char ** argv) {
|
||||
// display text
|
||||
if (input_echo && display) {
|
||||
for (auto id : embd) {
|
||||
const std::string token_str = llama_token_to_piece(ctx, id, params.special);
|
||||
const std::string token_str = common_token_to_piece(ctx, id, params.special);
|
||||
|
||||
// Console/Stream Output
|
||||
LOG("%s", token_str.c_str());
|
||||
@@ -743,7 +743,7 @@ int main(int argc, char ** argv) {
|
||||
// check for reverse prompt in the last n_prev tokens
|
||||
if (!params.antiprompt.empty()) {
|
||||
const int n_prev = 32;
|
||||
const std::string last_output = gpt_sampler_prev_str(smpl, ctx, n_prev);
|
||||
const std::string last_output = common_sampler_prev_str(smpl, ctx, n_prev);
|
||||
|
||||
is_antiprompt = false;
|
||||
// Check if each of the reverse prompts appears at the end of the output.
|
||||
@@ -765,7 +765,7 @@ int main(int argc, char ** argv) {
|
||||
}
|
||||
|
||||
// check for reverse prompt using special tokens
|
||||
llama_token last_token = gpt_sampler_last(smpl);
|
||||
llama_token last_token = common_sampler_last(smpl);
|
||||
for (std::vector<llama_token> ids : antiprompt_ids) {
|
||||
if (ids.size() == 1 && last_token == ids[0]) {
|
||||
if (params.interactive) {
|
||||
@@ -782,13 +782,13 @@ int main(int argc, char ** argv) {
|
||||
}
|
||||
|
||||
// deal with end of generation tokens in interactive mode
|
||||
if (llama_token_is_eog(model, gpt_sampler_last(smpl))) {
|
||||
if (llama_token_is_eog(model, common_sampler_last(smpl))) {
|
||||
LOG_DBG("found an EOG token\n");
|
||||
|
||||
if (params.interactive) {
|
||||
if (!params.antiprompt.empty()) {
|
||||
// tokenize and inject first reverse prompt
|
||||
const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false, true);
|
||||
const auto first_antiprompt = common_tokenize(ctx, params.antiprompt.front(), false, true);
|
||||
embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
|
||||
is_antiprompt = true;
|
||||
}
|
||||
@@ -803,8 +803,8 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// if current token is not EOG, we add it to current assistant message
|
||||
if (params.conversation) {
|
||||
const auto id = gpt_sampler_last(smpl);
|
||||
assistant_ss << llama_token_to_piece(ctx, id, false);
|
||||
const auto id = common_sampler_last(smpl);
|
||||
assistant_ss << common_token_to_piece(ctx, id, false);
|
||||
}
|
||||
|
||||
if (n_past > 0 && is_interacting) {
|
||||
@@ -862,9 +862,9 @@ int main(int argc, char ** argv) {
|
||||
? chat_add_and_format(model, chat_msgs, "user", std::move(buffer))
|
||||
: std::move(buffer);
|
||||
// TODO: one inconvenient of current chat template implementation is that we can't distinguish between user input and special tokens (prefix/postfix)
|
||||
const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
|
||||
const auto line_inp = ::llama_tokenize(ctx, user_inp, false, format_chat);
|
||||
const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
|
||||
const auto line_pfx = common_tokenize(ctx, params.input_prefix, false, true);
|
||||
const auto line_inp = common_tokenize(ctx, user_inp, false, format_chat);
|
||||
const auto line_sfx = common_tokenize(ctx, params.input_suffix, false, true);
|
||||
|
||||
LOG_DBG("input tokens: %s\n", string_from(ctx, line_inp).c_str());
|
||||
|
||||
@@ -882,7 +882,7 @@ int main(int argc, char ** argv) {
|
||||
for (size_t i = original_size; i < embd_inp.size(); ++i) {
|
||||
const llama_token token = embd_inp[i];
|
||||
output_tokens.push_back(token);
|
||||
output_ss << llama_token_to_piece(ctx, token);
|
||||
output_ss << common_token_to_piece(ctx, token);
|
||||
}
|
||||
|
||||
// reset assistant message
|
||||
@@ -899,7 +899,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
if (n_past > 0) {
|
||||
if (is_interacting) {
|
||||
gpt_sampler_reset(smpl);
|
||||
common_sampler_reset(smpl);
|
||||
}
|
||||
is_interacting = false;
|
||||
}
|
||||
@@ -925,10 +925,10 @@ int main(int argc, char ** argv) {
|
||||
}
|
||||
|
||||
LOG("\n\n");
|
||||
gpt_perf_print(ctx, smpl);
|
||||
common_perf_print(ctx, smpl);
|
||||
write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens);
|
||||
|
||||
gpt_sampler_free(smpl);
|
||||
common_sampler_free(smpl);
|
||||
|
||||
llama_free(ctx);
|
||||
llama_free_model(model);
|
||||
|
||||
@@ -54,7 +54,7 @@ static std::vector<std::string> k_prompts = {
|
||||
struct client {
|
||||
~client() {
|
||||
if (smpl) {
|
||||
gpt_sampler_free(smpl);
|
||||
common_sampler_free(smpl);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ struct client {
|
||||
std::string prompt;
|
||||
std::string response;
|
||||
|
||||
struct gpt_sampler * smpl = nullptr;
|
||||
struct common_sampler * smpl = nullptr;
|
||||
};
|
||||
|
||||
static void print_date_time() {
|
||||
@@ -103,13 +103,13 @@ static std::vector<std::string> split_string(const std::string& input, char deli
|
||||
int main(int argc, char ** argv) {
|
||||
srand(1234);
|
||||
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_PARALLEL)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_PARALLEL)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
// number of simultaneous "clients" to simulate
|
||||
const int32_t n_clients = params.n_parallel;
|
||||
@@ -130,7 +130,7 @@ int main(int argc, char ** argv) {
|
||||
llama_numa_init(params.numa);
|
||||
|
||||
// load the target model
|
||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
||||
common_init_result llama_init = common_init_from_params(params);
|
||||
|
||||
llama_model * model = llama_init.model;
|
||||
llama_context * ctx = llama_init.context;
|
||||
@@ -160,11 +160,11 @@ int main(int argc, char ** argv) {
|
||||
for (size_t i = 0; i < clients.size(); ++i) {
|
||||
auto & client = clients[i];
|
||||
client.id = i;
|
||||
client.smpl = gpt_sampler_init(model, params.sparams);
|
||||
client.smpl = common_sampler_init(model, params.sparams);
|
||||
}
|
||||
|
||||
std::vector<llama_token> tokens_system;
|
||||
tokens_system = ::llama_tokenize(ctx, k_system, true);
|
||||
tokens_system = common_tokenize(ctx, k_system, true);
|
||||
const int32_t n_tokens_system = tokens_system.size();
|
||||
|
||||
llama_seq_id g_seq_id = 0;
|
||||
@@ -189,7 +189,7 @@ int main(int argc, char ** argv) {
|
||||
LOG_INF("%s: Evaluating the system prompt ...\n", __func__);
|
||||
|
||||
for (int32_t i = 0; i < n_tokens_system; ++i) {
|
||||
llama_batch_add(batch, tokens_system[i], i, { 0 }, false);
|
||||
common_batch_add(batch, tokens_system[i], i, { 0 }, false);
|
||||
}
|
||||
|
||||
if (llama_decode(ctx, batch) != 0) {
|
||||
@@ -210,10 +210,10 @@ int main(int argc, char ** argv) {
|
||||
while (true) {
|
||||
if (dump_kv_cache) {
|
||||
llama_kv_cache_view_update(ctx, &kvc_view);
|
||||
llama_kv_cache_dump_view_seqs(kvc_view, 40);
|
||||
common_kv_cache_dump_view_seqs(kvc_view, 40);
|
||||
}
|
||||
|
||||
llama_batch_clear(batch);
|
||||
common_batch_clear(batch);
|
||||
|
||||
// decode any currently ongoing sequences
|
||||
for (auto & client : clients) {
|
||||
@@ -223,7 +223,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
client.i_batch = batch.n_tokens;
|
||||
|
||||
llama_batch_add(batch, client.sampled, n_tokens_system + client.n_prompt + client.n_decoded, { client.id + 1 }, true);
|
||||
common_batch_add(batch, client.sampled, n_tokens_system + client.n_prompt + client.n_decoded, { client.id + 1 }, true);
|
||||
|
||||
client.n_decoded += 1;
|
||||
}
|
||||
@@ -252,14 +252,14 @@ int main(int argc, char ** argv) {
|
||||
client.prompt = client.input + "\nAssistant:";
|
||||
client.response = "";
|
||||
|
||||
gpt_sampler_reset(client.smpl);
|
||||
common_sampler_reset(client.smpl);
|
||||
|
||||
// do not prepend BOS because we have a system prompt!
|
||||
std::vector<llama_token> tokens_prompt;
|
||||
tokens_prompt = ::llama_tokenize(ctx, client.prompt, false);
|
||||
tokens_prompt = common_tokenize(ctx, client.prompt, false);
|
||||
|
||||
for (size_t i = 0; i < tokens_prompt.size(); ++i) {
|
||||
llama_batch_add(batch, tokens_prompt[i], i + n_tokens_system, { client.id + 1 }, false);
|
||||
common_batch_add(batch, tokens_prompt[i], i + n_tokens_system, { client.id + 1 }, false);
|
||||
}
|
||||
|
||||
// extract the logits only for the last token
|
||||
@@ -308,7 +308,6 @@ int main(int argc, char ** argv) {
|
||||
batch.n_seq_id + i,
|
||||
batch.seq_id + i,
|
||||
batch.logits + i,
|
||||
0, 0, 0, // unused
|
||||
};
|
||||
|
||||
const int ret = llama_decode(ctx, batch_view);
|
||||
@@ -340,9 +339,9 @@ int main(int argc, char ** argv) {
|
||||
//printf("client %d, seq %d, token %d, pos %d, batch %d\n",
|
||||
// client.id, client.seq_id, client.sampled, client.n_decoded, client.i_batch);
|
||||
|
||||
const llama_token id = gpt_sampler_sample(client.smpl, ctx, client.i_batch - i);
|
||||
const llama_token id = common_sampler_sample(client.smpl, ctx, client.i_batch - i);
|
||||
|
||||
gpt_sampler_accept(client.smpl, id, true);
|
||||
common_sampler_accept(client.smpl, id, true);
|
||||
|
||||
if (client.n_decoded == 1) {
|
||||
// start measuring generation time after the first token to make sure all concurrent clients
|
||||
@@ -350,7 +349,7 @@ int main(int argc, char ** argv) {
|
||||
client.t_start_gen = ggml_time_us();
|
||||
}
|
||||
|
||||
const std::string token_str = llama_token_to_piece(ctx, id);
|
||||
const std::string token_str = common_token_to_piece(ctx, id);
|
||||
|
||||
client.response += token_str;
|
||||
client.sampled = id;
|
||||
|
||||
@@ -15,17 +15,17 @@ static void print_usage(int, char ** argv) {
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
params.n_junk = 250;
|
||||
params.n_keep = 32;
|
||||
params.i_pos = -1;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_PASSKEY, print_usage)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_PASSKEY, print_usage)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
int n_junk = params.n_junk;
|
||||
int n_keep = params.n_keep;
|
||||
@@ -61,7 +61,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// initialize the model
|
||||
|
||||
llama_model_params model_params = llama_model_params_from_gpt_params(params);
|
||||
llama_model_params model_params = common_model_params_to_llama(params);
|
||||
|
||||
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
|
||||
|
||||
@@ -72,7 +72,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// initialize the context
|
||||
|
||||
llama_context_params ctx_params = llama_context_params_from_gpt_params(params);
|
||||
llama_context_params ctx_params = common_context_params_to_llama(params);
|
||||
|
||||
ctx_params.n_ctx = llama_n_ctx_train(model)*n_grp + n_keep;
|
||||
|
||||
@@ -92,10 +92,10 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// tokenize the prompt
|
||||
std::vector<llama_token> tokens_list;
|
||||
tokens_list = ::llama_tokenize(ctx, params.prompt, true);
|
||||
tokens_list = common_tokenize(ctx, params.prompt, true);
|
||||
|
||||
// tokenize the prefix and use it as a sink
|
||||
const int n_tokens_prefix = ::llama_tokenize(ctx, prompt_prefix, true).size();
|
||||
const int n_tokens_prefix = common_tokenize(ctx, prompt_prefix, true).size();
|
||||
|
||||
const int n_tokens_all = tokens_list.size();
|
||||
|
||||
@@ -137,10 +137,10 @@ int main(int argc, char ** argv) {
|
||||
n_past = llama_kv_cache_seq_pos_max(ctx, 0) + 1;
|
||||
}
|
||||
|
||||
llama_batch_clear(batch);
|
||||
common_batch_clear(batch);
|
||||
|
||||
for (int j = 0; j < n_batch && i + j < n_tokens_all; j++) {
|
||||
llama_batch_add(batch, tokens_list[i + j], n_past++, { 0 }, false);
|
||||
common_batch_add(batch, tokens_list[i + j], n_past++, { 0 }, false);
|
||||
}
|
||||
|
||||
if (i + n_batch >= n_tokens_all) {
|
||||
@@ -171,10 +171,10 @@ int main(int argc, char ** argv) {
|
||||
|
||||
n_past = llama_kv_cache_seq_pos_max(ctx, 0) + 1;
|
||||
|
||||
llama_batch_clear(batch);
|
||||
common_batch_clear(batch);
|
||||
|
||||
for (int j = 0; j < n_batch && i + j < n_tokens_all; j++) {
|
||||
llama_batch_add(batch, tokens_list[i + j], n_past++, { 0 }, false);
|
||||
common_batch_add(batch, tokens_list[i + j], n_past++, { 0 }, false);
|
||||
}
|
||||
|
||||
if (i + n_batch >= n_tokens_all) {
|
||||
@@ -229,15 +229,15 @@ int main(int argc, char ** argv) {
|
||||
break;
|
||||
}
|
||||
|
||||
LOG("%s", llama_token_to_piece(ctx, new_token_id).c_str());
|
||||
LOG("%s", common_token_to_piece(ctx, new_token_id).c_str());
|
||||
|
||||
n_decode += 1;
|
||||
|
||||
// prepare the next batch
|
||||
llama_batch_clear(batch);
|
||||
common_batch_clear(batch);
|
||||
|
||||
// push this new token for next evaluation
|
||||
llama_batch_add(batch, new_token_id, n_past++, { 0 }, true);
|
||||
common_batch_add(batch, new_token_id, n_past++, { 0 }, true);
|
||||
}
|
||||
|
||||
n_cur += 1;
|
||||
|
||||
@@ -35,7 +35,7 @@ struct results_log_softmax {
|
||||
};
|
||||
|
||||
static void write_logfile(
|
||||
const llama_context * ctx, const gpt_params & params, const llama_model * model,
|
||||
const llama_context * ctx, const common_params & params, const llama_model * model,
|
||||
const struct results_perplexity & results
|
||||
) {
|
||||
if (params.logdir.empty()) {
|
||||
@@ -169,7 +169,7 @@ static void process_logits(
|
||||
break;
|
||||
}
|
||||
lock.unlock();
|
||||
const results_log_softmax results = log_softmax(n_vocab, logits + i*n_vocab, tokens[i+1]);
|
||||
const results_log_softmax results = log_softmax(n_vocab, logits + size_t(i)*n_vocab, tokens[i+1]);
|
||||
const double v = -results.log_softmax;
|
||||
local_nll += v;
|
||||
local_nll2 += v*v;
|
||||
@@ -203,7 +203,7 @@ static void process_logits(std::ostream& out, int n_vocab, const float * logits,
|
||||
break;
|
||||
}
|
||||
lock.unlock();
|
||||
const double v = log_softmax(n_vocab, logits + i*n_vocab, log_probs.data() + i*nv, tokens[i+1]);
|
||||
const double v = log_softmax(n_vocab, logits + size_t(i)*n_vocab, log_probs.data() + i*nv, tokens[i+1]);
|
||||
local_nll += v;
|
||||
local_nll2 += v*v;
|
||||
}
|
||||
@@ -281,7 +281,9 @@ static std::pair<double, float> log_softmax(int n_vocab, const float * logits, c
|
||||
kld.sum_kld += sum;
|
||||
kld.sum_kld2 += sum*sum;
|
||||
++kld.count;
|
||||
if (imax == imax_base) ++kld.n_same_top;
|
||||
if (imax == imax_base) {
|
||||
++kld.n_same_top;
|
||||
}
|
||||
|
||||
const float p_base = expf(-nll_base);
|
||||
const float p = expf(-nll);
|
||||
@@ -323,7 +325,7 @@ static void process_logits(int n_vocab, const float * logits, const int * tokens
|
||||
break;
|
||||
}
|
||||
lock.unlock();
|
||||
std::pair<double, float> v = log_softmax(n_vocab, logits + i*n_vocab, base_log_probs.data() + i*nv, tokens[i+1], local_kld);
|
||||
std::pair<double, float> v = log_softmax(n_vocab, logits + size_t(i)*n_vocab, base_log_probs.data() + i*nv, tokens[i+1], local_kld);
|
||||
kld_values[i] = (float)v.first;
|
||||
p_diff_values[i] = v.second;
|
||||
}
|
||||
@@ -337,7 +339,7 @@ static void process_logits(int n_vocab, const float * logits, const int * tokens
|
||||
}
|
||||
}
|
||||
|
||||
static results_perplexity perplexity_v2(llama_context * ctx, const gpt_params & params) {
|
||||
static results_perplexity perplexity_v2(llama_context * ctx, const common_params & params) {
|
||||
// Download: https://huggingface.co/datasets/ggml-org/ci/resolve/main/wikitext-2-raw-v1.zip
|
||||
// Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
|
||||
// Output: `perplexity: 13.5106 [114/114]`
|
||||
@@ -348,7 +350,7 @@ static results_perplexity perplexity_v2(llama_context * ctx, const gpt_params &
|
||||
|
||||
LOG_INF("%s: tokenizing the input ..\n", __func__);
|
||||
|
||||
std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, true);
|
||||
std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, true);
|
||||
|
||||
const int n_ctx = llama_n_ctx(ctx);
|
||||
|
||||
@@ -383,9 +385,10 @@ static results_perplexity perplexity_v2(llama_context * ctx, const gpt_params &
|
||||
const int n_chunk_max = (tokens.size() - calc_chunk + params.ppl_stride - 1) / params.ppl_stride;
|
||||
|
||||
const int n_chunk = params.n_chunks < 0 ? n_chunk_max : std::min(params.n_chunks, n_chunk_max);
|
||||
const int n_vocab = llama_n_vocab(llama_get_model(ctx));
|
||||
const int n_batch = params.n_batch;
|
||||
|
||||
const int n_vocab = llama_n_vocab(llama_get_model(ctx));
|
||||
|
||||
int count = 0;
|
||||
double nll = 0.0;
|
||||
|
||||
@@ -405,14 +408,21 @@ static results_perplexity perplexity_v2(llama_context * ctx, const gpt_params &
|
||||
// clear the KV cache
|
||||
llama_kv_cache_clear(ctx);
|
||||
|
||||
llama_batch batch = llama_batch_init(n_batch, 0, 1);
|
||||
|
||||
for (int j = 0; j < num_batches; ++j) {
|
||||
const int batch_start = start + j * n_batch;
|
||||
const int batch_size = std::min(end - batch_start, n_batch);
|
||||
|
||||
common_batch_clear(batch);
|
||||
for (int i = 0; i < batch_size; i++) {
|
||||
common_batch_add(batch, tokens[batch_start + i], j*n_batch + i, {0}, true);
|
||||
}
|
||||
|
||||
//LOG_DBG(" Batch %d: starts at %d, size is %d, n_past is %d\n",j,batch_start,batch_size,j * n_batch);
|
||||
// TODO: use llama_batch.logits instead of relying on logits_all == true
|
||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) {
|
||||
if (llama_decode(ctx, batch)) {
|
||||
//LOG_ERR("%s : failed to eval\n", __func__);
|
||||
llama_batch_free(batch);
|
||||
return {tokens, -1, logit_history, prob_history};
|
||||
}
|
||||
|
||||
@@ -424,14 +434,16 @@ static results_perplexity perplexity_v2(llama_context * ctx, const gpt_params &
|
||||
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
|
||||
}
|
||||
|
||||
const auto batch_logits = llama_get_logits(ctx);
|
||||
logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab);
|
||||
const auto * batch_logits = llama_get_logits(ctx);
|
||||
logits.insert(logits.end(), batch_logits, batch_logits + size_t(batch_size) * n_vocab);
|
||||
|
||||
if (j == 0) {
|
||||
tokens[batch_start] = token_org;
|
||||
}
|
||||
}
|
||||
|
||||
llama_batch_free(batch);
|
||||
|
||||
const auto t_end = std::chrono::high_resolution_clock::now();
|
||||
|
||||
if (i == 0) {
|
||||
@@ -447,11 +459,10 @@ static results_perplexity perplexity_v2(llama_context * ctx, const gpt_params &
|
||||
|
||||
//LOG_DBG("%s: using tokens %d...%d\n",__func__,params.n_ctx - params.ppl_stride + start, params.n_ctx + start);
|
||||
for (int j = n_ctx - params.ppl_stride - 1; j < n_ctx - 1; ++j) {
|
||||
|
||||
// Calculate probability of next token, given the previous ones.
|
||||
const std::vector<float> tok_logits(
|
||||
logits.begin() + (j + 0) * n_vocab,
|
||||
logits.begin() + (j + 1) * n_vocab);
|
||||
logits.begin() + size_t(j + 0) * n_vocab,
|
||||
logits.begin() + size_t(j + 1) * n_vocab);
|
||||
|
||||
const float prob = softmax(tok_logits)[tokens[start + j + 1]];
|
||||
logit_history[start + j + 1] = tok_logits[tokens[start + j + 1]];
|
||||
@@ -472,7 +483,7 @@ static results_perplexity perplexity_v2(llama_context * ctx, const gpt_params &
|
||||
return {tokens, std::exp(nll / count), logit_history, prob_history};
|
||||
}
|
||||
|
||||
static results_perplexity perplexity(llama_context * ctx, const gpt_params & params, const int32_t n_ctx) {
|
||||
static results_perplexity perplexity(llama_context * ctx, const common_params & params, const int32_t n_ctx) {
|
||||
if (params.ppl_stride > 0) {
|
||||
return perplexity_v2(ctx, params);
|
||||
}
|
||||
@@ -500,7 +511,7 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
||||
auto tim1 = std::chrono::high_resolution_clock::now();
|
||||
LOG_INF("%s: tokenizing the input ..\n", __func__);
|
||||
|
||||
std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, true);
|
||||
std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, true);
|
||||
|
||||
auto tim2 = std::chrono::high_resolution_clock::now();
|
||||
LOG_INF("%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count());
|
||||
@@ -521,9 +532,10 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
||||
const int n_chunk_max = tokens.size() / n_ctx;
|
||||
|
||||
const int n_chunk = params.n_chunks < 0 ? n_chunk_max : std::min(params.n_chunks, n_chunk_max);
|
||||
const int n_vocab = llama_n_vocab(llama_get_model(ctx));
|
||||
const int n_batch = params.n_batch;
|
||||
|
||||
const int n_vocab = llama_n_vocab(llama_get_model(ctx));
|
||||
|
||||
int count = 0;
|
||||
double nll = 0.0;
|
||||
double nll2 = 0.0;
|
||||
@@ -538,7 +550,7 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
||||
|
||||
std::vector<float> logits;
|
||||
if (num_batches > 1) {
|
||||
logits.reserve((size_t)n_ctx * n_vocab);
|
||||
logits.reserve(size_t(n_ctx) * n_vocab);
|
||||
}
|
||||
|
||||
LOG_INF("%s: calculating perplexity over %d chunks, n_ctx=%d, batch_size=%d, n_seq=%d\n", __func__, n_chunk, n_ctx, n_batch, n_seq);
|
||||
@@ -620,7 +632,7 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
||||
|
||||
if (num_batches > 1 && n_outputs > 0) {
|
||||
const auto * batch_logits = llama_get_logits(ctx);
|
||||
logits.insert(logits.end(), batch_logits, batch_logits + n_outputs * n_vocab);
|
||||
logits.insert(logits.end(), batch_logits, batch_logits + size_t(n_outputs) * n_vocab);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -661,7 +673,9 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
||||
} else {
|
||||
double av = nll/count;
|
||||
double av2 = nll2/count - av*av;
|
||||
if (av2 > 0) av2 = sqrt(av2/(count-1));
|
||||
if (av2 > 0) {
|
||||
av2 = sqrt(av2/(count-1));
|
||||
}
|
||||
LOG("%8d %.4lf %4lf %4lf\n", i*n_ctx, std::exp(nll / count), av, av2);
|
||||
}
|
||||
}
|
||||
@@ -686,10 +700,10 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
||||
return {tokens, ppl, logit_history, prob_history};
|
||||
}
|
||||
|
||||
static bool decode_helper(llama_context * ctx, llama_batch & batch, std::vector<float> & batch_logits, int32_t n_batch, int32_t n_vocab) {
|
||||
static bool decode_helper(llama_context * ctx, llama_batch & batch, std::vector<float> & batch_logits, int n_batch, int n_vocab) {
|
||||
int prev_outputs = 0;
|
||||
for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) {
|
||||
const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i));
|
||||
for (int i = 0; i < (int) batch.n_tokens; i += n_batch) {
|
||||
const int n_tokens = std::min<int>(n_batch, batch.n_tokens - i);
|
||||
|
||||
llama_batch batch_view = {
|
||||
n_tokens,
|
||||
@@ -699,7 +713,6 @@ static bool decode_helper(llama_context * ctx, llama_batch & batch, std::vector<
|
||||
batch.n_seq_id + i,
|
||||
batch.seq_id + i,
|
||||
batch.logits + i,
|
||||
0, 0, 0, // unused
|
||||
};
|
||||
|
||||
const int ret = llama_decode(ctx, batch_view);
|
||||
@@ -713,7 +726,7 @@ static bool decode_helper(llama_context * ctx, llama_batch & batch, std::vector<
|
||||
n_outputs += batch_view.logits[i] != 0;
|
||||
}
|
||||
|
||||
memcpy(batch_logits.data() + prev_outputs*n_vocab, llama_get_logits(ctx), n_outputs*n_vocab*sizeof(float));
|
||||
memcpy(batch_logits.data() + size_t(prev_outputs)*n_vocab, llama_get_logits(ctx), size_t(n_outputs)*n_vocab*sizeof(float));
|
||||
|
||||
prev_outputs += n_outputs;
|
||||
}
|
||||
@@ -728,7 +741,9 @@ static void compute_logprobs(const float * batch_logits, int n_vocab, std::vecto
|
||||
if (eval_results.size() != eval_pairs.size()) {
|
||||
eval_results.resize(eval_pairs.size());
|
||||
}
|
||||
if (eval_pairs.empty()) return;
|
||||
if (eval_pairs.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
size_t max_threads = std::min((eval_pairs.size() + K_TOKEN_CHUNK - 1)/K_TOKEN_CHUNK, workers.size());
|
||||
|
||||
@@ -736,11 +751,13 @@ static void compute_logprobs(const float * batch_logits, int n_vocab, std::vecto
|
||||
auto compute = [&counter, &eval_pairs, &eval_results, batch_logits, n_vocab] () {
|
||||
float local_logprobs[K_TOKEN_CHUNK];
|
||||
while (true) {
|
||||
size_t first = counter.fetch_add(K_TOKEN_CHUNK, std::memory_order_relaxed);
|
||||
if (first >= eval_results.size()) break;
|
||||
size_t last = std::min(first + K_TOKEN_CHUNK, eval_results.size());
|
||||
const size_t first = counter.fetch_add(K_TOKEN_CHUNK, std::memory_order_relaxed);
|
||||
if (first >= eval_results.size()) {
|
||||
break;
|
||||
}
|
||||
const size_t last = std::min(first + K_TOKEN_CHUNK, eval_results.size());
|
||||
for (size_t i = first; i < last; ++i) {
|
||||
auto logits = batch_logits + eval_pairs[i].first * n_vocab;
|
||||
const auto * logits = batch_logits + eval_pairs[i].first * n_vocab;
|
||||
float max_logit = logits[0];
|
||||
for (int j = 1; j < n_vocab; ++j) {
|
||||
max_logit = std::max(max_logit, logits[j]);
|
||||
@@ -763,7 +780,7 @@ static void compute_logprobs(const float * batch_logits, int n_vocab, std::vecto
|
||||
}
|
||||
}
|
||||
|
||||
static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
||||
static void hellaswag_score(llama_context * ctx, const common_params & params) {
|
||||
// Calculates hellaswag score (acc_norm) from prompt
|
||||
//
|
||||
// Data extracted from the HellaSwag validation dataset (MIT license) https://github.com/rowanz/hellaswag/blob/master/data/hellaswag_val.jsonl
|
||||
@@ -844,7 +861,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
||||
hs_cur.gold_ending_idx = std::stoi( prompt_lines[idx*6+1] );
|
||||
for (size_t j = 0; j < 4; j++) {
|
||||
hs_cur.ending[j] = prompt_lines[idx*6+2+j];
|
||||
hs_cur.seq_tokens[j] = ::llama_tokenize(ctx, hs_cur.context + " " + hs_cur.ending[j], true);
|
||||
hs_cur.seq_tokens[j] = common_tokenize(ctx, hs_cur.context + " " + hs_cur.ending[j], true);
|
||||
}
|
||||
|
||||
// determine the common prefix of the endings
|
||||
@@ -877,10 +894,11 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
||||
|
||||
double acc = 0.0f;
|
||||
|
||||
const int n_vocab = llama_n_vocab(llama_get_model(ctx));
|
||||
const int n_ctx = llama_n_ctx(ctx);
|
||||
const int n_batch = params.n_batch;
|
||||
|
||||
const int n_vocab = llama_n_vocab(llama_get_model(ctx));
|
||||
|
||||
const int max_tasks_per_batch = 32;
|
||||
const int max_seq = std::min(4*max_tasks_per_batch, (int) llama_n_seq_max(ctx));
|
||||
|
||||
@@ -888,7 +906,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
||||
|
||||
std::vector<float> tok_logits(n_vocab);
|
||||
// TODO: this could be made smaller; it's currently the worst-case size
|
||||
std::vector<float> batch_logits(n_vocab*n_ctx);
|
||||
std::vector<float> batch_logits(size_t(n_ctx)*n_vocab);
|
||||
|
||||
std::vector<std::pair<size_t, llama_token>> eval_pairs;
|
||||
std::vector<float> eval_results;
|
||||
@@ -900,7 +918,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
||||
size_t i1 = i0;
|
||||
size_t i_logits = 0; // this tells us how many logits were needed before this point in the batch
|
||||
|
||||
llama_batch_clear(batch);
|
||||
common_batch_clear(batch);
|
||||
|
||||
// batch as much tasks as possible into the available context
|
||||
// each task has 4 unique sequence ids - one for each ending
|
||||
@@ -916,7 +934,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < hs_cur.common_prefix; ++i) {
|
||||
llama_batch_add(batch, hs_cur.seq_tokens[0][i], i, { s0 + 0, s0 + 1, s0 + 2, s0 + 3 }, false);
|
||||
common_batch_add(batch, hs_cur.seq_tokens[0][i], i, { s0 + 0, s0 + 1, s0 + 2, s0 + 3 }, false);
|
||||
}
|
||||
batch.logits[batch.n_tokens - 1] = true; // we need logits for the last token of the common prefix
|
||||
n_logits += 1;
|
||||
@@ -926,7 +944,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
||||
// TODO: don't evaluate the last token of each sequence
|
||||
for (size_t i = hs_cur.common_prefix; i < seq_tokens_size; ++i) {
|
||||
const bool needs_logits = i < seq_tokens_size - 1;
|
||||
llama_batch_add(batch, hs_cur.seq_tokens[s][i], i, { s0 + s }, needs_logits);
|
||||
common_batch_add(batch, hs_cur.seq_tokens[s][i], i, { s0 + s }, needs_logits);
|
||||
n_logits += needs_logits;
|
||||
}
|
||||
}
|
||||
@@ -975,7 +993,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
||||
auto & hs_cur = hs_data[i];
|
||||
|
||||
// get the logits of the last token of the common prefix
|
||||
std::memcpy(tok_logits.data(), batch_logits.data() + n_vocab*hs_cur.i_logits, n_vocab*sizeof(float));
|
||||
std::memcpy(tok_logits.data(), batch_logits.data() + hs_cur.i_logits*n_vocab, n_vocab*sizeof(float));
|
||||
|
||||
const auto first_probs = softmax(tok_logits);
|
||||
|
||||
@@ -1102,7 +1120,7 @@ static std::vector<winogrande_entry> load_winogrande_from_csv(const std::string
|
||||
* 0,Sarah was a much better surgeon than Maria so _ always got the easier cases.,Sarah,Maria,2
|
||||
*
|
||||
*/
|
||||
static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
||||
static void winogrande_score(llama_context * ctx, const common_params & params) {
|
||||
|
||||
constexpr int k_min_trailing_ctx = 3;
|
||||
|
||||
@@ -1136,8 +1154,8 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
||||
LOG_INF("%s : tokenizing selected tasks\n", __func__);
|
||||
|
||||
for (auto & task : data) {
|
||||
task.seq_tokens[0] = ::llama_tokenize(ctx, task.first + task.choices[0] + task.second, true);
|
||||
task.seq_tokens[1] = ::llama_tokenize(ctx, task.first + task.choices[1] + task.second, true);
|
||||
task.seq_tokens[0] = common_tokenize(ctx, task.first + task.choices[0] + task.second, true);
|
||||
task.seq_tokens[1] = common_tokenize(ctx, task.first + task.choices[1] + task.second, true);
|
||||
|
||||
task.common_prefix = 0;
|
||||
for (size_t k = 0; k < task.seq_tokens[0].size(); k++) {
|
||||
@@ -1152,16 +1170,17 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
||||
task.seq_tokens[0].size() - task.common_prefix +
|
||||
task.seq_tokens[1].size() - task.common_prefix;
|
||||
|
||||
task.n_base1 = ::llama_tokenize(ctx, task.first + task.choices[0], true).size();
|
||||
task.n_base2 = ::llama_tokenize(ctx, task.first + task.choices[1], true).size();
|
||||
task.n_base1 = common_tokenize(ctx, task.first + task.choices[0], true).size();
|
||||
task.n_base2 = common_tokenize(ctx, task.first + task.choices[1], true).size();
|
||||
}
|
||||
|
||||
LOG_INF("%s : calculating winogrande score over selected tasks.\n", __func__);
|
||||
|
||||
const int n_vocab = llama_n_vocab(llama_get_model(ctx));
|
||||
const int n_ctx = llama_n_ctx(ctx);
|
||||
const int n_batch = params.n_batch;
|
||||
|
||||
const int n_vocab = llama_n_vocab(llama_get_model(ctx));
|
||||
|
||||
const int max_tasks_per_batch = 128;
|
||||
const int max_seq = std::min(2*max_tasks_per_batch, (int) llama_n_seq_max(ctx));
|
||||
|
||||
@@ -1169,7 +1188,7 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
||||
|
||||
std::vector<float> tok_logits(n_vocab);
|
||||
// TODO: this could be made smaller; it's currently the worst-case size
|
||||
std::vector<float> batch_logits(n_vocab*n_ctx);
|
||||
std::vector<float> batch_logits(size_t(n_ctx)*n_vocab);
|
||||
|
||||
std::vector<std::pair<size_t, llama_token>> eval_pairs;
|
||||
std::vector<float> eval_results;
|
||||
@@ -1184,7 +1203,7 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
||||
size_t i1 = i0;
|
||||
size_t i_logits = 0;
|
||||
|
||||
llama_batch_clear(batch);
|
||||
common_batch_clear(batch);
|
||||
|
||||
while (n_cur + (int) data[i1].required_tokens <= n_ctx) {
|
||||
int n_logits = 0;
|
||||
@@ -1194,7 +1213,7 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < data[i1].common_prefix; ++i) {
|
||||
llama_batch_add(batch, data[i1].seq_tokens[0][i], i, { s0 + 0, s0 + 1 }, false);
|
||||
common_batch_add(batch, data[i1].seq_tokens[0][i], i, { s0 + 0, s0 + 1 }, false);
|
||||
}
|
||||
batch.logits[batch.n_tokens - 1] = true;
|
||||
n_logits += 1;
|
||||
@@ -1202,7 +1221,7 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
||||
for (int s = 0; s < 2; ++s) {
|
||||
// TODO: end before the last token, no need to predict past the end of the sequences
|
||||
for (size_t i = data[i1].common_prefix; i < data[i1].seq_tokens[s].size(); ++i) {
|
||||
llama_batch_add(batch, data[i1].seq_tokens[s][i], i, { s0 + s }, true);
|
||||
common_batch_add(batch, data[i1].seq_tokens[s][i], i, { s0 + s }, true);
|
||||
n_logits += 1;
|
||||
}
|
||||
}
|
||||
@@ -1359,7 +1378,7 @@ static bool multiple_choice_prepare_one_task(llama_context * ctx, multiple_choic
|
||||
}
|
||||
return false;
|
||||
}
|
||||
task.seq_tokens.emplace_back(::llama_tokenize(ctx, task.question + " " + answer, true));
|
||||
task.seq_tokens.emplace_back(::common_tokenize(ctx, task.question + " " + answer, true));
|
||||
}
|
||||
auto min_len = task.seq_tokens.front().size();
|
||||
for (auto& seq : task.seq_tokens) {
|
||||
@@ -1403,7 +1422,7 @@ static bool multiple_choice_prepare_one_task(llama_context * ctx, multiple_choic
|
||||
// git@hf.co:datasets/Stevross/mmlu
|
||||
// https://huggingface.co/datasets/truthful_qa
|
||||
//
|
||||
static void multiple_choice_score(llama_context * ctx, const gpt_params & params) {
|
||||
static void multiple_choice_score(llama_context * ctx, const common_params & params) {
|
||||
|
||||
std::istringstream strstream(params.prompt);
|
||||
uint32_t n_task;
|
||||
@@ -1509,17 +1528,18 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
||||
|
||||
LOG("\ntask\tacc_norm\n");
|
||||
|
||||
const int n_vocab = llama_n_vocab(llama_get_model(ctx));
|
||||
const int n_ctx = llama_n_ctx(ctx);
|
||||
const int n_batch = params.n_batch;
|
||||
|
||||
const int n_vocab = llama_n_vocab(llama_get_model(ctx));
|
||||
|
||||
const int max_tasks_per_batch = 32;
|
||||
const int max_seq = std::min(4*max_tasks_per_batch, (int) llama_n_seq_max(ctx));
|
||||
|
||||
llama_batch batch = llama_batch_init(n_ctx, 0, max_seq);
|
||||
|
||||
std::vector<float> tok_logits(n_vocab);
|
||||
std::vector<float> batch_logits(n_vocab*n_ctx);
|
||||
std::vector<float> batch_logits(size_t(n_ctx)*n_vocab);
|
||||
|
||||
std::vector<std::pair<size_t, llama_token>> eval_pairs;
|
||||
std::vector<float> eval_results;
|
||||
@@ -1536,7 +1556,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
||||
size_t i1 = i0;
|
||||
size_t i_logits = 0; // this tells us how many logits were needed before this point in the batch
|
||||
|
||||
llama_batch_clear(batch);
|
||||
common_batch_clear(batch);
|
||||
|
||||
// batch as much tasks as possible into the available context
|
||||
// each task has 4 unique sequence ids - one for each ending
|
||||
@@ -1559,7 +1579,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
||||
|
||||
for (size_t i = 0; i < cur_task.common_prefix; ++i) {
|
||||
//llama_batch_add(batch, cur_task.seq_tokens[0][i], i, { s0 + 0, s0 + 1, s0 + 2, s0 + 3}, false);
|
||||
llama_batch_add(batch, cur_task.seq_tokens[0][i], i, batch_indeces, false);
|
||||
common_batch_add(batch, cur_task.seq_tokens[0][i], i, batch_indeces, false);
|
||||
}
|
||||
batch.logits[batch.n_tokens - 1] = true; // we need logits for the last token of the common prefix
|
||||
n_logits += 1;
|
||||
@@ -1569,7 +1589,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
||||
// TODO: don't evaluate the last token of each sequence
|
||||
for (size_t i = cur_task.common_prefix; i < seq_tokens_size; ++i) {
|
||||
const bool needs_logits = i < seq_tokens_size - 1;
|
||||
llama_batch_add(batch, cur_task.seq_tokens[s][i], i, { s0 + s }, needs_logits);
|
||||
common_batch_add(batch, cur_task.seq_tokens[s][i], i, { s0 + s }, needs_logits);
|
||||
n_logits += needs_logits;
|
||||
}
|
||||
}
|
||||
@@ -1627,7 +1647,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
||||
//LOG("\n common_prefix: %zu\n", cur_task.common_prefix);
|
||||
|
||||
// get the logits of the last token of the common prefix
|
||||
std::memcpy(tok_logits.data(), batch_logits.data() + n_vocab*cur_task.i_logits, n_vocab*sizeof(float));
|
||||
std::memcpy(tok_logits.data(), batch_logits.data() + cur_task.i_logits*n_vocab, n_vocab*sizeof(float));
|
||||
|
||||
const auto first_probs = softmax(tok_logits);
|
||||
|
||||
@@ -1683,7 +1703,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
||||
LOG_INF("\n");
|
||||
}
|
||||
|
||||
static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
||||
static void kl_divergence(llama_context * ctx, const common_params & params) {
|
||||
if (params.logits_file.empty()) {
|
||||
LOG_ERR("%s: you must provide a name of a file containing the log probabilities of the base model\n", __func__);
|
||||
return;
|
||||
@@ -1709,7 +1729,8 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
||||
__func__, params.logits_file.c_str(), n_ctx, params.n_ctx);
|
||||
}
|
||||
|
||||
int n_vocab, n_chunk;
|
||||
int n_vocab;
|
||||
int n_chunk;
|
||||
in.read((char *)&n_vocab, sizeof(n_vocab));
|
||||
in.read((char *)&n_chunk, sizeof(n_chunk));
|
||||
if (in.fail()) {
|
||||
@@ -1720,7 +1741,7 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
||||
LOG_ERR("%s: inconsistent vocabulary (%d vs %d)\n", __func__, n_vocab, llama_n_vocab(llama_get_model(ctx)));
|
||||
}
|
||||
|
||||
std::vector<llama_token> tokens(n_ctx * n_chunk);
|
||||
std::vector<llama_token> tokens(size_t(n_ctx) * n_chunk);
|
||||
if (in.read((char *)tokens.data(), tokens.size()*sizeof(tokens[0])).fail()) {
|
||||
LOG_ERR("%s: failed reading evaluation tokens from %s\n", __func__, params.logits_file.c_str());
|
||||
return;
|
||||
@@ -1737,7 +1758,7 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
||||
std::vector<float> p_diff_values(size_t(n_ctx - 1 - n_ctx/2)*n_chunk);
|
||||
std::vector<float> logits;
|
||||
if (num_batches > 1) {
|
||||
logits.reserve(n_ctx * n_vocab);
|
||||
logits.reserve(size_t(n_ctx) * n_vocab);
|
||||
}
|
||||
|
||||
std::vector<std::thread> workers(std::thread::hardware_concurrency() - 1);
|
||||
@@ -1778,6 +1799,8 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
||||
// clear the KV cache
|
||||
llama_kv_cache_clear(ctx);
|
||||
|
||||
llama_batch batch = llama_batch_init(n_batch, 0, 1);
|
||||
|
||||
for (int j = 0; j < num_batches; ++j) {
|
||||
const int batch_start = start + j * n_batch;
|
||||
const int batch_size = std::min(end - batch_start, n_batch);
|
||||
@@ -1790,9 +1813,14 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
||||
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
|
||||
}
|
||||
|
||||
// TODO: use llama_batch.logits instead of relying on logits_all == true
|
||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) {
|
||||
common_batch_clear(batch);
|
||||
for (int i = 0; i < batch_size; i++) {
|
||||
common_batch_add(batch, tokens[batch_start + i], j*n_batch + i, {0}, true);
|
||||
}
|
||||
|
||||
if (llama_decode(ctx, batch)) {
|
||||
LOG_ERR("%s : failed to eval\n", __func__);
|
||||
llama_batch_free(batch);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1801,10 +1829,12 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
||||
|
||||
if (num_batches > 1) {
|
||||
const auto * batch_logits = llama_get_logits(ctx);
|
||||
logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab);
|
||||
logits.insert(logits.end(), batch_logits, batch_logits + size_t(batch_size) * n_vocab);
|
||||
}
|
||||
}
|
||||
|
||||
llama_batch_free(batch);
|
||||
|
||||
const auto t_end = std::chrono::high_resolution_clock::now();
|
||||
|
||||
if (i == 0) {
|
||||
@@ -1822,7 +1852,7 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
||||
|
||||
const int first = n_ctx/2;
|
||||
const float * all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx);
|
||||
process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first,
|
||||
process_logits(n_vocab, all_logits + size_t(first)*n_vocab, tokens.data() + start + first, n_ctx - 1 - first,
|
||||
workers, log_probs_uint16, kld, kld_ptr, p_diff_ptr);
|
||||
p_diff_ptr += n_ctx - 1 - first;
|
||||
kld_ptr += n_ctx - 1 - first;
|
||||
@@ -1955,17 +1985,17 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
params.n_ctx = 512;
|
||||
params.logits_all = true;
|
||||
params.escape = false;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_PERPLEXITY)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_PERPLEXITY)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
const int32_t n_ctx = params.n_ctx;
|
||||
|
||||
@@ -2004,7 +2034,7 @@ int main(int argc, char ** argv) {
|
||||
llama_numa_init(params.numa);
|
||||
|
||||
// load the model and apply lora adapter, if any
|
||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
||||
common_init_result llama_init = common_init_from_params(params);
|
||||
|
||||
llama_model * model = llama_init.model;
|
||||
llama_context * ctx = llama_init.context;
|
||||
@@ -2023,7 +2053,7 @@ int main(int argc, char ** argv) {
|
||||
// print system information
|
||||
{
|
||||
LOG_INF("\n");
|
||||
LOG_INF("%s\n", gpt_params_get_system_info(params).c_str());
|
||||
LOG_INF("%s\n", common_params_get_system_info(params).c_str());
|
||||
}
|
||||
|
||||
struct results_perplexity results;
|
||||
|
||||
@@ -142,7 +142,7 @@ static bool tensor_is_contiguous(const struct ggml_tensor * tensor) {
|
||||
}
|
||||
|
||||
static void test_roundtrip_on_chunk(
|
||||
const ggml_tensor * layer, int64_t offset, int64_t chunk_size, const ggml_type_traits_t & qfns, bool use_reference,
|
||||
const ggml_tensor * layer, int64_t offset, int64_t chunk_size, const ggml_type_traits & qfns, bool use_reference,
|
||||
float * input_scratch, char * quantized_scratch, float * output_scratch, error_stats & stats
|
||||
) {
|
||||
if (layer->type == GGML_TYPE_F16) {
|
||||
@@ -166,7 +166,7 @@ static void test_roundtrip_on_chunk(
|
||||
|
||||
// Run quantization function for a single layer and update error stats
|
||||
static void test_roundtrip_on_layer(
|
||||
std::string & name, bool print_layer_stats, const ggml_type_traits_t & qfns, bool use_reference,
|
||||
std::string & name, bool print_layer_stats, const ggml_type_traits & qfns, bool use_reference,
|
||||
const ggml_tensor * layer, std::vector<float> & input_scratch, std::vector<char> & quantized_scratch,
|
||||
std::vector<float> & output_scratch, error_stats & total_error, int max_thread = 0
|
||||
) {
|
||||
@@ -371,8 +371,8 @@ int main(int argc, char ** argv) {
|
||||
if (!params.include_types.empty() && std::find(params.include_types.begin(), params.include_types.end(), i) == params.include_types.end()) {
|
||||
continue;
|
||||
}
|
||||
ggml_type_traits_t qfns = ggml_internal_get_type_traits(type);
|
||||
if (qfns.from_float && qfns.to_float) {
|
||||
const auto * qfns = ggml_get_type_traits(type);
|
||||
if (qfns->from_float && qfns->to_float) {
|
||||
if (params.verbose) {
|
||||
printf("testing %s ...\n", ggml_type_name(type));
|
||||
}
|
||||
@@ -393,7 +393,7 @@ int main(int argc, char ** argv) {
|
||||
test_roundtrip_on_layer(
|
||||
layer_name,
|
||||
params.per_layer_stats,
|
||||
qfns,
|
||||
*qfns,
|
||||
params.reference,
|
||||
kv_tensor.second,
|
||||
input_scratch,
|
||||
|
||||
@@ -77,7 +77,7 @@ static std::vector<chunk> chunk_file(const std::string & filename, int chunk_siz
|
||||
static void batch_add_seq(llama_batch & batch, const std::vector<int32_t> & tokens, llama_seq_id seq_id) {
|
||||
size_t n_tokens = tokens.size();
|
||||
for (size_t i = 0; i < n_tokens; i++) {
|
||||
llama_batch_add(batch, tokens[i], i, { seq_id }, true);
|
||||
common_batch_add(batch, tokens[i], i, { seq_id }, true);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,18 +107,18 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
|
||||
}
|
||||
|
||||
float * out = output + batch.seq_id[i][0] * n_embd;
|
||||
llama_embd_normalize(embd, out, n_embd);
|
||||
common_embd_normalize(embd, out, n_embd);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_RETRIEVAL, print_usage)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_RETRIEVAL, print_usage)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
common_init();
|
||||
|
||||
// For BERT models, batch size must be equal to ubatch size
|
||||
params.n_ubatch = params.n_batch;
|
||||
@@ -149,7 +149,7 @@ int main(int argc, char ** argv) {
|
||||
llama_numa_init(params.numa);
|
||||
|
||||
// load the model
|
||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
||||
common_init_result llama_init = common_init_from_params(params);
|
||||
|
||||
llama_model * model = llama_init.model;
|
||||
llama_context * ctx = llama_init.context;
|
||||
@@ -176,7 +176,7 @@ int main(int argc, char ** argv) {
|
||||
// print system information
|
||||
{
|
||||
LOG_INF("\n");
|
||||
LOG_INF("%s\n", gpt_params_get_system_info(params).c_str());
|
||||
LOG_INF("%s\n", common_params_get_system_info(params).c_str());
|
||||
}
|
||||
|
||||
// max batch size
|
||||
@@ -185,7 +185,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// tokenize the prompts and trim
|
||||
for (auto & chunk : chunks) {
|
||||
auto inp = ::llama_tokenize(ctx, chunk.textdata, true, false);
|
||||
auto inp = common_tokenize(ctx, chunk.textdata, true, false);
|
||||
if (inp.size() > n_batch) {
|
||||
LOG_ERR("%s: chunk size (%lld) exceeds batch size (%lld), increase batch size and re-run\n",
|
||||
__func__, (long long int) inp.size(), (long long int) n_batch);
|
||||
@@ -204,7 +204,7 @@ int main(int argc, char ** argv) {
|
||||
LOG_INF("%s: prompt %d: '%s'\n", __func__, i, chunks[i].textdata.c_str());
|
||||
LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, chunks[i].tokens.size());
|
||||
for (int j = 0; j < (int) chunks[i].tokens.size(); j++) {
|
||||
LOG_INF("%6d -> '%s'\n", chunks[i].tokens[j], llama_token_to_piece(ctx, chunks[i].tokens[j]).c_str());
|
||||
LOG_INF("%6d -> '%s'\n", chunks[i].tokens[j], common_token_to_piece(ctx, chunks[i].tokens[j]).c_str());
|
||||
}
|
||||
LOG_INF("\n\n");
|
||||
}
|
||||
@@ -232,7 +232,7 @@ int main(int argc, char ** argv) {
|
||||
if (batch.n_tokens + n_toks > n_batch) {
|
||||
float * out = emb + p * n_embd;
|
||||
batch_decode(ctx, batch, out, s, n_embd);
|
||||
llama_batch_clear(batch);
|
||||
common_batch_clear(batch);
|
||||
p += s;
|
||||
s = 0;
|
||||
}
|
||||
@@ -260,20 +260,20 @@ int main(int argc, char ** argv) {
|
||||
while (true) {
|
||||
LOG("Enter query: ");
|
||||
std::getline(std::cin, query);
|
||||
std::vector<int32_t> query_tokens = llama_tokenize(ctx, query, true);
|
||||
std::vector<int32_t> query_tokens = common_tokenize(ctx, query, true);
|
||||
|
||||
batch_add_seq(query_batch, query_tokens, 0);
|
||||
|
||||
std::vector<float> query_emb(n_embd, 0);
|
||||
batch_decode(ctx, query_batch, query_emb.data(), 1, n_embd);
|
||||
|
||||
llama_batch_clear(query_batch);
|
||||
common_batch_clear(query_batch);
|
||||
|
||||
// compute cosine similarities
|
||||
{
|
||||
std::vector<std::pair<int, float>> similarities;
|
||||
for (int i = 0; i < n_chunks; i++) {
|
||||
float sim = llama_embd_similarity_cos(chunks[i].embedding.data(), query_emb.data(), n_embd);
|
||||
float sim = common_embd_similarity_cos(chunks[i].embedding.data(), query_emb.data(), n_embd);
|
||||
similarities.push_back(std::make_pair(i, sim));
|
||||
}
|
||||
|
||||
|
||||
@@ -151,7 +151,7 @@ int main(int argc, char * argv[]) {
|
||||
get_backend_memory(&free_mem, &total_mem);
|
||||
}
|
||||
printf("Starting RPC server on %s, backend memory: %zu MB\n", endpoint.c_str(), free_mem / (1024 * 1024));
|
||||
start_rpc_server(backend, endpoint.c_str(), free_mem, total_mem);
|
||||
ggml_backend_rpc_start_server(backend, endpoint.c_str(), free_mem, total_mem);
|
||||
ggml_backend_free(backend);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -6,12 +6,12 @@
|
||||
#include <cstdio>
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
params.prompt = "The quick brown fox";
|
||||
params.sparams.seed = 1234;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ int main(int argc, char ** argv) {
|
||||
std::string result2;
|
||||
|
||||
// init
|
||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
||||
common_init_result llama_init = common_init_from_params(params);
|
||||
|
||||
llama_model * model = llama_init.model;
|
||||
llama_context * ctx = llama_init.context;
|
||||
@@ -42,15 +42,21 @@ int main(int argc, char ** argv) {
|
||||
|
||||
llama_sampler * smpl = llama_sampler_chain_init(sparams);
|
||||
|
||||
llama_sampler_chain_add(smpl, llama_sampler_init_softmax());
|
||||
llama_sampler_chain_add(smpl, llama_sampler_init_dist(params.sparams.seed));
|
||||
|
||||
// tokenize prompt
|
||||
auto tokens = llama_tokenize(ctx, params.prompt, true);
|
||||
auto tokens = common_tokenize(ctx, params.prompt, true);
|
||||
|
||||
// prepare the batch
|
||||
llama_batch batch = llama_batch_init(tokens.size(), 0, 1);
|
||||
for (size_t i = 0; i < tokens.size(); i++) {
|
||||
common_batch_add(batch, tokens[i], i, {0}, false);
|
||||
}
|
||||
batch.logits[batch.n_tokens - 1] = true; // generate next token
|
||||
|
||||
// evaluate prompt
|
||||
llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), n_past, 0));
|
||||
n_past += tokens.size();
|
||||
llama_decode(ctx, batch);
|
||||
n_past += batch.n_tokens;
|
||||
|
||||
// save state (rng, logits, embedding and kv_cache) to file
|
||||
{
|
||||
@@ -72,13 +78,17 @@ int main(int argc, char ** argv) {
|
||||
|
||||
for (auto i = 0; i < params.n_predict; i++) {
|
||||
auto next_token = llama_sampler_sample(smpl, ctx, -1);
|
||||
auto next_token_str = llama_token_to_piece(ctx, next_token);
|
||||
auto next_token_str = common_token_to_piece(ctx, next_token);
|
||||
|
||||
printf("%s", next_token_str.c_str());
|
||||
result0 += next_token_str;
|
||||
|
||||
if (llama_decode(ctx, llama_batch_get_one(&next_token, 1, n_past, 0))) {
|
||||
common_batch_clear(batch);
|
||||
common_batch_add(batch, next_token, n_past, {0}, true);
|
||||
|
||||
if (llama_decode(ctx, batch)) {
|
||||
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
|
||||
llama_batch_free(batch);
|
||||
llama_free(ctx);
|
||||
llama_free_model(model);
|
||||
return 1;
|
||||
@@ -92,11 +102,10 @@ int main(int argc, char ** argv) {
|
||||
llama_free(ctx);
|
||||
|
||||
// make new context
|
||||
auto * ctx2 = llama_new_context_with_model(model, llama_context_params_from_gpt_params(params));
|
||||
auto * ctx2 = llama_new_context_with_model(model, common_context_params_to_llama(params));
|
||||
|
||||
llama_sampler * smpl2 = llama_sampler_chain_init(sparams);
|
||||
|
||||
llama_sampler_chain_add(smpl2, llama_sampler_init_softmax());
|
||||
llama_sampler_chain_add(smpl2, llama_sampler_init_dist(params.sparams.seed));
|
||||
|
||||
printf("\nsecond run: %s", params.prompt.c_str());
|
||||
@@ -128,13 +137,17 @@ int main(int argc, char ** argv) {
|
||||
// second run
|
||||
for (auto i = 0; i < params.n_predict; i++) {
|
||||
auto next_token = llama_sampler_sample(smpl2, ctx2, -1);
|
||||
auto next_token_str = llama_token_to_piece(ctx2, next_token);
|
||||
auto next_token_str = common_token_to_piece(ctx2, next_token);
|
||||
|
||||
printf("%s", next_token_str.c_str());
|
||||
result1 += next_token_str;
|
||||
|
||||
if (llama_decode(ctx2, llama_batch_get_one(&next_token, 1, n_past, 0))) {
|
||||
common_batch_clear(batch);
|
||||
common_batch_add(batch, next_token, n_past, {0}, true);
|
||||
|
||||
if (llama_decode(ctx2, batch)) {
|
||||
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
|
||||
llama_batch_free(batch);
|
||||
llama_free(ctx2);
|
||||
llama_free_model(model);
|
||||
return 1;
|
||||
@@ -152,11 +165,10 @@ int main(int argc, char ** argv) {
|
||||
}
|
||||
|
||||
// make new context
|
||||
auto * ctx3 = llama_new_context_with_model(model, llama_context_params_from_gpt_params(params));
|
||||
auto * ctx3 = llama_new_context_with_model(model, common_context_params_to_llama(params));
|
||||
|
||||
llama_sampler * smpl3 = llama_sampler_chain_init(sparams);
|
||||
|
||||
llama_sampler_chain_add(smpl3, llama_sampler_init_softmax());
|
||||
llama_sampler_chain_add(smpl3, llama_sampler_init_dist(params.sparams.seed));
|
||||
|
||||
printf("\nsingle seq run: %s", params.prompt.c_str());
|
||||
@@ -216,13 +228,17 @@ int main(int argc, char ** argv) {
|
||||
// third run with seq 1 instead of 0
|
||||
for (auto i = 0; i < params.n_predict; i++) {
|
||||
auto next_token = llama_sampler_sample(smpl3, ctx3, -1);
|
||||
auto next_token_str = llama_token_to_piece(ctx3, next_token);
|
||||
auto next_token_str = common_token_to_piece(ctx3, next_token);
|
||||
|
||||
printf("%s", next_token_str.c_str());
|
||||
result2 += next_token_str;
|
||||
|
||||
if (llama_decode(ctx3, llama_batch_get_one(&next_token, 1, n_past, 1))) {
|
||||
common_batch_clear(batch);
|
||||
common_batch_add(batch, next_token, n_past, {1}, true);
|
||||
|
||||
if (llama_decode(ctx3, batch)) {
|
||||
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
|
||||
llama_batch_free(batch);
|
||||
llama_free(ctx3);
|
||||
llama_free_model(model);
|
||||
return 1;
|
||||
@@ -236,6 +252,7 @@ int main(int argc, char ** argv) {
|
||||
llama_sampler_free(smpl2);
|
||||
llama_sampler_free(smpl3);
|
||||
|
||||
llama_batch_free(batch);
|
||||
llama_free(ctx3);
|
||||
llama_free_model(model);
|
||||
|
||||
|
||||
@@ -18,6 +18,8 @@ The project is under active development, and we are [looking for feedback and co
|
||||
|
||||
## Usage
|
||||
|
||||
<!-- Note for contributors: The list below is generated by llama-gen-docs -->
|
||||
|
||||
**Common params**
|
||||
|
||||
| Argument | Explanation |
|
||||
@@ -58,8 +60,6 @@ The project is under active development, and we are [looking for feedback and co
|
||||
| `--yarn-attn-factor N` | YaRN: scale sqrt(t) or attention magnitude (default: 1.0)<br/>(env: LLAMA_ARG_YARN_ATTN_FACTOR) |
|
||||
| `--yarn-beta-slow N` | YaRN: high correction dim or alpha (default: 1.0)<br/>(env: LLAMA_ARG_YARN_BETA_SLOW) |
|
||||
| `--yarn-beta-fast N` | YaRN: low correction dim or beta (default: 32.0)<br/>(env: LLAMA_ARG_YARN_BETA_FAST) |
|
||||
| `-gan, --grp-attn-n N` | group-attention factor (default: 1)<br/>(env: LLAMA_ARG_GRP_ATTN_N) |
|
||||
| `-gaw, --grp-attn-w N` | group-attention width (default: 512.0)<br/>(env: LLAMA_ARG_GRP_ATTN_W) |
|
||||
| `-dkvc, --dump-kv-cache` | verbose print of the KV cache |
|
||||
| `-nkvo, --no-kv-offload` | disable KV offload<br/>(env: LLAMA_ARG_NO_KV_OFFLOAD) |
|
||||
| `-ctk, --cache-type-k TYPE` | KV cache data type for K (default: f16)<br/>(env: LLAMA_ARG_CACHE_TYPE_K) |
|
||||
@@ -100,7 +100,7 @@ The project is under active development, and we are [looking for feedback and co
|
||||
| Argument | Explanation |
|
||||
| -------- | ----------- |
|
||||
| `--samplers SAMPLERS` | samplers that will be used for generation in the order, separated by ';'<br/>(default: top_k;tfs_z;typ_p;top_p;min_p;temperature) |
|
||||
| `-s, --seed SEED` | RNG seed (default: 4294967295, use random seed for 4294967295) |
|
||||
| `-s, --seed SEED` | RNG seed (default: -1, use random seed for -1) |
|
||||
| `--sampling-seq SEQUENCE` | simplified sequence for samplers that will be used (default: kfypmt) |
|
||||
| `--ignore-eos` | ignore end of stream token and continue generating (implies --logit-bias EOS-inf) |
|
||||
| `--penalize-nl` | penalize newline tokens (default: false) |
|
||||
@@ -147,9 +147,11 @@ The project is under active development, and we are [looking for feedback and co
|
||||
| `--ssl-cert-file FNAME` | path to file a PEM-encoded SSL certificate<br/>(env: LLAMA_ARG_SSL_CERT_FILE) |
|
||||
| `-to, --timeout N` | server read/write timeout in seconds (default: 600)<br/>(env: LLAMA_ARG_TIMEOUT) |
|
||||
| `--threads-http N` | number of threads used to process HTTP requests (default: -1)<br/>(env: LLAMA_ARG_THREADS_HTTP) |
|
||||
| `-spf, --system-prompt-file FNAME` | set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications |
|
||||
| `--cache-reuse N` | min chunk size to attempt reusing from the cache via KV shifting (default: 0)<br/>(env: LLAMA_ARG_CACHE_REUSE) |
|
||||
| `--metrics` | enable prometheus compatible metrics endpoint (default: disabled)<br/>(env: LLAMA_ARG_ENDPOINT_METRICS) |
|
||||
| `--no-slots` | disables slots monitoring endpoint (default: enabled)<br/>(env: LLAMA_ARG_NO_ENDPOINT_SLOTS) |
|
||||
| `--slots` | enable slots monitoring endpoint (default: disabled)<br/>(env: LLAMA_ARG_ENDPOINT_SLOTS) |
|
||||
| `--props` | enable changing global properties via POST /props (default: disabled)<br/>(env: LLAMA_ARG_ENDPOINT_PROPS) |
|
||||
| `--no-slots` | disables slots monitoring endpoint<br/>(env: LLAMA_ARG_NO_ENDPOINT_SLOTS) |
|
||||
| `--slot-save-path PATH` | path to save slot kv cache (default: disabled) |
|
||||
| `--chat-template JINJA_TEMPLATE` | set custom jinja chat template (default: template taken from model's metadata)<br/>if suffix/prefix are specified, template will be disabled<br/>only commonly used templates are accepted:<br/>https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template<br/>(env: LLAMA_ARG_CHAT_TEMPLATE) |
|
||||
| `-sps, --slot-prompt-similarity SIMILARITY` | how much the prompt of a request must match the prompt of a slot in order to use that slot (default: 0.50, 0.0 = disabled)<br/> |
|
||||
@@ -316,7 +318,18 @@ node index.js
|
||||
|
||||
- The prompt is a string or an array with the first element given as a string
|
||||
- The model's `tokenizer.ggml.add_bos_token` metadata is `true`
|
||||
- The system prompt is empty
|
||||
|
||||
These input shapes and data type are allowed for `prompt`:
|
||||
|
||||
- Single string: `"string"`
|
||||
- Single sequence of tokens: `[12, 34, 56]`
|
||||
- Mixed tokens and strings: `[12, 34, "string", 56, 78]`
|
||||
|
||||
Multiple prompts are also supported. In this case, the completion result will be an array.
|
||||
|
||||
- Only strings: `["string1", "string2"]`
|
||||
- Strings and sequences of tokens: `["string1", [12, 34, 56]]`
|
||||
- Mixed types: `[[12, 34, "string", 56, 78], [12, 34, 56], "string"]`
|
||||
|
||||
`temperature`: Adjust the randomness of the generated text. Default: `0.8`
|
||||
|
||||
@@ -332,6 +345,8 @@ node index.js
|
||||
|
||||
`n_predict`: Set the maximum number of tokens to predict when generating text. **Note:** May exceed the set limit slightly if the last token is a partial multibyte character. When 0, no tokens will be generated but the prompt is evaluated into the cache. Default: `-1`, where `-1` is infinity.
|
||||
|
||||
`n_indent`: Specify the minimum line indentation for the generated text in number of whitespace characters. Useful for code completion tasks. Default: `0`
|
||||
|
||||
`n_keep`: Specify the number of tokens from the prompt to retain when the context size is exceeded and tokens need to be discarded. The number excludes the BOS token.
|
||||
By default, this value is set to `0`, meaning no tokens are kept. Use `-1` to retain all tokens from the prompt.
|
||||
|
||||
@@ -374,14 +389,14 @@ node index.js
|
||||
|
||||
`min_keep`: If greater than 0, force samplers to return N possible tokens at minimum. Default: `0`
|
||||
|
||||
`t_max_predict_ms`: Set a time limit in milliseconds for the prediction (a.k.a. text-generation) phase. The timeout will trigger if the generation takes more than the specified time (measured since the first token was generated) and if a new-line character has already been generated. Useful for FIM applications. Default: `0`, which is disabled.
|
||||
|
||||
`image_data`: An array of objects to hold base64-encoded image `data` and its `id`s to be reference in `prompt`. You can determine the place of the image in the prompt as in the following: `USER:[img-12]Describe the image in detail.\nASSISTANT:`. In this case, `[img-12]` will be replaced by the embeddings of the image with id `12` in the following `image_data` array: `{..., "image_data": [{"data": "<BASE64_STRING>", "id": 12}]}`. Use `image_data` only with multimodal models, e.g., LLaVA.
|
||||
|
||||
`id_slot`: Assign the completion task to an specific slot. If is -1 the task will be assigned to a Idle slot. Default: `-1`
|
||||
|
||||
`cache_prompt`: Re-use KV cache from a previous request if possible. This way the common prefix does not have to be re-processed, only the suffix that differs between the requests. Because (depending on the backend) the logits are **not** guaranteed to be bit-for-bit identical for different batch sizes (prompt processing vs. token generation) enabling this option can cause nondeterministic results. Default: `false`
|
||||
|
||||
`system_prompt`: Change the system prompt (initial prompt of all slots), this is useful for chat applications. [See more](#change-system-prompt-on-runtime)
|
||||
|
||||
`samplers`: The order the samplers should be applied in. An array of strings representing sampler type names. If a sampler is not set, it will not be used. If a sampler is specified more than once, it will be applied multiple times. Default: `["top_k", "tfs_z", "typical_p", "top_p", "min_p", "temperature"]` - these are all the available values.
|
||||
|
||||
**Response format**
|
||||
@@ -519,34 +534,62 @@ Requires a reranker model (such as [bge-reranker-v2-m3](https://huggingface.co/B
|
||||
|
||||
Takes a prefix and a suffix and returns the predicted completion as stream.
|
||||
|
||||
*Options:*
|
||||
*Options:*
|
||||
|
||||
`input_prefix`: Set the prefix of the code to infill.
|
||||
- `input_prefix`: Set the prefix of the code to infill.
|
||||
- `input_suffix`: Set the suffix of the code to infill.
|
||||
- `input_extra`: Additional context inserted before the FIM prefix.
|
||||
- `prompt`: Added after the `FIM_MID` token
|
||||
|
||||
`input_suffix`: Set the suffix of the code to infill.
|
||||
`input_extra` is array of `{"filename": string, "text": string}` objects.
|
||||
|
||||
It also accepts all the options of `/completion` except `stream` and `prompt`.
|
||||
The endpoint also accepts all the options of `/completion`.
|
||||
|
||||
- **GET** `/props`: Return current server settings.
|
||||
If the model has `FIM_REPO` and `FIM_FILE_SEP` tokens, the [repo-level pattern](https://arxiv.org/pdf/2409.12186) is used:
|
||||
|
||||
```txt
|
||||
<FIM_REP>myproject
|
||||
<FIM_SEP>{chunk 0 filename}
|
||||
{chunk 0 text}
|
||||
<FIM_SEP>{chunk 1 filename}
|
||||
{chunk 1 text}
|
||||
...
|
||||
<FIM_SEP>filename
|
||||
<FIM_PRE>[input_prefix]<FIM_SUF>[input_suffix]<FIM_MID>[prompt]
|
||||
```
|
||||
|
||||
If the tokens are missing, then the extra context is simply prefixed at the start:
|
||||
|
||||
```txt
|
||||
[input_extra]<FIM_PRE>[input_prefix]<FIM_SUF>[input_suffix]<FIM_MID>[prompt]
|
||||
```
|
||||
|
||||
### **GET** `/props`: Get server global properties.
|
||||
|
||||
This endpoint is public (no API key check). By default, it is read-only. To make POST request to change global properties, you need to start server with `--props`
|
||||
|
||||
**Response format**
|
||||
|
||||
```json
|
||||
{
|
||||
"assistant_name": "",
|
||||
"user_name": "",
|
||||
"default_generation_settings": { ... },
|
||||
"total_slots": 1,
|
||||
"chat_template": ""
|
||||
}
|
||||
```
|
||||
|
||||
- `assistant_name` - the required assistant name to generate the prompt in case you have specified a system prompt for all slots.
|
||||
- `user_name` - the required anti-prompt to generate the prompt in case you have specified a system prompt for all slots.
|
||||
- `default_generation_settings` - the default generation settings for the `/completion` endpoint, which has the same fields as the `generation_settings` response object from the `/completion` endpoint.
|
||||
- `total_slots` - the total number of slots for process requests (defined by `--parallel` option)
|
||||
- `chat_template` - the model's original Jinja2 prompt template
|
||||
|
||||
### POST `/props`: Change server global properties.
|
||||
|
||||
To use this endpoint with POST method, you need to start server with `--props`
|
||||
|
||||
*Options:*
|
||||
|
||||
- None yet
|
||||
|
||||
### POST `/v1/chat/completions`: OpenAI-compatible Chat Completions API
|
||||
|
||||
Given a ChatML-formatted json description in `messages`, it returns the predicted completion. Both synchronous and streaming mode are supported, so scripted and interactive applications work fine. While no strong claims of compatibility with OpenAI API spec is being made, in our experience it suffices to support many apps. Only models with a [supported chat template](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template) can be used optimally with this endpoint. By default, the ChatML template will be used.
|
||||
@@ -813,28 +856,6 @@ To know the `id` of the adapter, use GET `/lora-adapters`
|
||||
|
||||
## More examples
|
||||
|
||||
### Change system prompt on runtime
|
||||
|
||||
To use the server example to serve multiple chat-type clients while keeping the same system prompt, you can utilize the option `system_prompt`. This only needs to be used once.
|
||||
|
||||
`prompt`: Specify a context that you want all connecting clients to respect.
|
||||
|
||||
`anti_prompt`: Specify the word you want to use to instruct the model to stop. This must be sent to each client through the `/props` endpoint.
|
||||
|
||||
`assistant_name`: The bot's name is necessary for each customer to generate the prompt. This must be sent to each client through the `/props` endpoint.
|
||||
|
||||
```json
|
||||
{
|
||||
"system_prompt": {
|
||||
"prompt": "Transcript of a never ending dialog, where the User interacts with an Assistant.\nThe Assistant is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision.\nUser: Recommend a nice restaurant in the area.\nAssistant: I recommend the restaurant \"The Golden Duck\". It is a 5 star restaurant with a great view of the city. The food is delicious and the service is excellent. The prices are reasonable and the portions are generous. The restaurant is located at 123 Main Street, New York, NY 10001. The phone number is (212) 555-1234. The hours are Monday through Friday from 11:00 am to 10:00 pm. The restaurant is closed on Saturdays and Sundays.\nUser: Who is Richard Feynman?\nAssistant: Richard Feynman was an American physicist who is best known for his work in quantum mechanics and particle physics. He was awarded the Nobel Prize in Physics in 1965 for his contributions to the development of quantum electrodynamics. He was a popular lecturer and author, and he wrote several books, including \"Surely You're Joking, Mr. Feynman!\" and \"What Do You Care What Other People Think?\".\nUser:",
|
||||
"anti_prompt": "User:",
|
||||
"assistant_name": "Assistant:"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**NOTE**: You can do this automatically when starting the server by simply creating a .json file with these options and using the CLI option `-spf FNAME` or `--system-prompt-file FNAME`.
|
||||
|
||||
### Interactive mode
|
||||
|
||||
Check the sample in [chat.mjs](chat.mjs).
|
||||
|
||||
@@ -43,6 +43,8 @@
|
||||
top_k: 0, // <= 0 to use vocab size
|
||||
top_p: 1.0, // 1.0 = disabled
|
||||
min_p: 0.05, // 0 = disabled; recommended for non-english: ~ 0.4
|
||||
xtc_probability: 0.0, // 0 = disabled;
|
||||
xtc_threshold: 0.1, // > 0.5 disables XTC;
|
||||
tfs_z: 1.0, // 1.0 = disabled
|
||||
typical_p: 1.0, // 1.0 = disabled
|
||||
presence_penalty: 0.0, // 0.0 = disabled
|
||||
@@ -836,6 +838,8 @@ return html`
|
||||
${FloatField({ label: "TFS-Z", title: "Activates tail-free sampling, a method used to limit the prediction of tokens that are too frequent. The parameter z controls the strength of this limitation. A value of 1.0 means that this function is deactivated.", max: 1.0, min: 0.0, name: "tfs_z", step: 0.01, value: params.value.tfs_z })}
|
||||
${FloatField({ label: "Frequency Penalty", title: "A penalty that is applied based on the frequency with which certain tokens occur in the training data set. A higher value results in rare tokens being favoured.", max: 1.0, min: 0.0, name: "frequency_penalty", step: 0.01, value: params.value.frequency_penalty })}
|
||||
${FloatField({ label: "Typical-P", title: "Activates local typical sampling, a method used to limit the prediction of tokens that are atypical in the current context. The parameter p controls the strength of this limitation. A value of 1.0 means that this function is deactivated.", max: 1.0, min: 0.0, name: "typical_p", step: 0.01, value: params.value.typical_p })}
|
||||
${FloatField({ label: "XTC probability", title: "Sets the chance for token removal (checked once on sampler start)", max: 1.0, min: 0.0, name: "xtc_probability", step: 0.01, value: params.value.xtc_probability })}
|
||||
${FloatField({ label: "XTC threshold", title: "Sets a minimum probability threshold for tokens to be removed", max: 0.5, min: 0.0, name: "xtc_threshold", step: 0.01, value: params.value.xtc_threshold })}
|
||||
${IntField({ label: "Min Keep", title: "If greater than 0, samplers are forced to return N possible tokens at minimum. Default is 0", max: 10, min: 0, name: "min_keep", value: params.value.min_keep })}
|
||||
</fieldset>
|
||||
|
||||
@@ -1132,6 +1136,8 @@ document.addEventListener('DOMContentLoaded', (event) => {
|
||||
const snapSettings = {
|
||||
temperature: { snapValue: 1.0, snapRangeMultiplier: 6 },
|
||||
min_p: { snapValue: 0.05, snapRangeMultiplier: 2 },
|
||||
xtc_probability: { snapValue: 0.0, snapRangeMultiplier: 4 },
|
||||
xtc_threshold: { snapValue: 0.5, snapRangeMultiplier: 4 },
|
||||
top_p: { snapValue: 1.0, snapRangeMultiplier: 4 },
|
||||
tfs_z: { snapValue: 1.0, snapRangeMultiplier: 4 },
|
||||
typical_p: { snapValue: 1.0, snapRangeMultiplier: 4 },
|
||||
|
||||
@@ -307,6 +307,8 @@
|
||||
top_k: 40, // <= 0 to use vocab size
|
||||
top_p: 0.95, // 1.0 = disabled
|
||||
min_p: 0.05, // 0 = disabled
|
||||
xtc_probability: 0.0, // 0 = disabled;
|
||||
xtc_threshold: 0.1, // > 0.5 disables XTC;
|
||||
tfs_z: 1.0, // 1.0 = disabled
|
||||
typical_p: 1.0, // 1.0 = disabled
|
||||
presence_penalty: 0.0, // 0.0 = disabled
|
||||
@@ -1013,6 +1015,8 @@
|
||||
${FloatField({ label: "Typical P", max: 1.0, min: 0.0, name: "typical_p", step: 0.01, value: params.value.typical_p })}
|
||||
${FloatField({ label: "Presence penalty", max: 1.0, min: 0.0, name: "presence_penalty", step: 0.01, value: params.value.presence_penalty })}
|
||||
${FloatField({ label: "Frequency penalty", max: 1.0, min: 0.0, name: "frequency_penalty", step: 0.01, value: params.value.frequency_penalty })}
|
||||
${FloatField({ label: "XTC probability", max: 1.0, min: 0.0, name: "xtc_probability", step: 0.01, value: params.value.xtc_probability })}
|
||||
${FloatField({ label: "XTC threshold", max: 0.5, min: 0.0, name: "xtc_threshold", step: 0.01, value: params.value.xtc_threshold })}
|
||||
</fieldset>
|
||||
<hr />
|
||||
<fieldset class="three">
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -529,7 +529,7 @@ export class SchemaConverter {
|
||||
return joinSeq();
|
||||
};
|
||||
|
||||
return this._addRule(name, "\"\\\"\" " + toRule(transform()) + " \"\\\"\" space")
|
||||
return this._addRule(name, "\"\\\"\" (" + toRule(transform()) + ") \"\\\"\" space")
|
||||
}
|
||||
|
||||
_notStrings(strings) {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -13,6 +13,10 @@ Feature: llama.cpp server
|
||||
And 32 as batch size
|
||||
And 2 slots
|
||||
|
||||
# the prompt is 301 tokens
|
||||
# the slot context is 256/2 = 128 tokens
|
||||
# the prompt is truncated to keep the last 109 tokens
|
||||
# 64 tokens are generated thanks to shifting the context when it gets full
|
||||
Scenario: Inference with context shift
|
||||
And 64 server max tokens to predict
|
||||
Then the server is starting
|
||||
|
||||
36
examples/server/tests/features/infill.feature
Normal file
36
examples/server/tests/features/infill.feature
Normal file
@@ -0,0 +1,36 @@
|
||||
@llama.cpp
|
||||
@infill
|
||||
Feature: llama.cpp server
|
||||
|
||||
# The current model is made by adding FIM tokens to the existing stories260K
|
||||
# We may want to use a better model in the future, maybe something like SmolLM 360M
|
||||
|
||||
Background: Server startup
|
||||
Given a server listening on localhost:8080
|
||||
And a model file tinyllamas/stories260K-infill.gguf from HF repo ggml-org/models
|
||||
And a model file test-model-infill.gguf
|
||||
And a model alias tinyllama-infill
|
||||
And 42 as server seed
|
||||
And 1024 as batch size
|
||||
And 1024 as ubatch size
|
||||
And 2048 KV cache size
|
||||
And 64 max tokens to predict
|
||||
And 0.0 temperature
|
||||
Then the server is starting
|
||||
Then the server is healthy
|
||||
|
||||
Scenario: Infill without input_extra
|
||||
Given a prompt "Complete this"
|
||||
And an infill input extra none none
|
||||
And an infill input prefix "#include <cstdio>\n#include \"llama.h\"\n\nint main() {\n int n_threads = llama_"
|
||||
And an infill input suffix "}\n"
|
||||
And an infill request with no api error
|
||||
Then 64 tokens are predicted matching One|day|she|saw|big|scary|bird
|
||||
|
||||
Scenario: Infill with input_extra
|
||||
Given a prompt "Complete this"
|
||||
And an infill input extra "llama.h" "LLAMA_API int32_t llama_n_threads();\n"
|
||||
And an infill input prefix "#include <cstdio>\n#include \"llama.h\"\n\nint main() {\n int n_threads = llama_"
|
||||
And an infill input suffix "}\n"
|
||||
And an infill request with no api error
|
||||
Then 64 tokens are predicted matching cuts|Jimmy|mom|came|into|the|room"
|
||||
@@ -5,7 +5,7 @@ Feature: Security
|
||||
Background: Server startup with an api key defined
|
||||
Given a server listening on localhost:8080
|
||||
And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models
|
||||
And a server api key llama.cpp
|
||||
And a server api key THIS_IS_THE_KEY
|
||||
Then the server is starting
|
||||
Then the server is healthy
|
||||
|
||||
@@ -16,11 +16,11 @@ Feature: Security
|
||||
And a completion request with <api_error> api error
|
||||
|
||||
Examples: Prompts
|
||||
| api_key | api_error |
|
||||
| llama.cpp | no |
|
||||
| llama.cpp | no |
|
||||
| hackeme | raised |
|
||||
| | raised |
|
||||
| api_key | api_error |
|
||||
| THIS_IS_THE_KEY | no |
|
||||
| THIS_IS_THE_KEY | no |
|
||||
| hackeme | raised |
|
||||
| | raised |
|
||||
|
||||
Scenario Outline: OAI Compatibility
|
||||
Given a system prompt test
|
||||
@@ -32,10 +32,10 @@ Feature: Security
|
||||
Given an OAI compatible chat completions request with <api_error> api error
|
||||
|
||||
Examples: Prompts
|
||||
| api_key | api_error |
|
||||
| llama.cpp | no |
|
||||
| llama.cpp | no |
|
||||
| hackme | raised |
|
||||
| api_key | api_error |
|
||||
| THIS_IS_THE_KEY | no |
|
||||
| THIS_IS_THE_KEY | no |
|
||||
| hackme | raised |
|
||||
|
||||
Scenario Outline: OAI Compatibility (invalid response formats)
|
||||
Given a system prompt test
|
||||
@@ -55,7 +55,7 @@ Feature: Security
|
||||
|
||||
|
||||
Scenario Outline: CORS Options
|
||||
Given a user api key llama.cpp
|
||||
Given a user api key THIS_IS_THE_KEY
|
||||
When an OPTIONS request is sent from <origin>
|
||||
Then CORS header <cors_header> is set to <cors_header_value>
|
||||
|
||||
|
||||
@@ -80,6 +80,11 @@ def step_server_config(context, server_fqdn: str, server_port: str):
|
||||
context.lora_file = None
|
||||
context.disable_ctx_shift = False
|
||||
|
||||
# infill
|
||||
context.infill_input_extra = None
|
||||
context.infill_input_suffix = ''
|
||||
context.infill_input_prefix = ''
|
||||
|
||||
context.tasks_result = []
|
||||
context.concurrent_tasks = []
|
||||
context.prompts = []
|
||||
@@ -291,6 +296,28 @@ async def step_request_completion(context, api_error: Literal['raised'] | str):
|
||||
assert completion == api_error_code, f"completion must be an {api_error_code} status code: {completion}"
|
||||
|
||||
|
||||
@step('an infill request with {api_error} api error')
|
||||
@async_run_until_complete
|
||||
async def step_request_completion(context, api_error: Literal['raised'] | str):
|
||||
if api_error != 'no':
|
||||
raise ValueError(f'api_error={api_error} is not yet implemented')
|
||||
payload = {
|
||||
"prompt": context.prompts[0],
|
||||
"input_suffix": context.infill_input_suffix,
|
||||
"input_prefix": context.infill_input_prefix,
|
||||
"n_predict": context.n_predict,
|
||||
"seed": context.seed,
|
||||
"temperature": context.temperature,
|
||||
}
|
||||
if context.infill_input_extra is not None:
|
||||
payload['input_extra'] = context.infill_input_extra
|
||||
async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session:
|
||||
async with session.post(f'{context.base_url}/infill',
|
||||
json=payload) as response:
|
||||
assert response.status == 200
|
||||
context.tasks_result = [await response.json()]
|
||||
|
||||
|
||||
@step('{predicted_n:d} tokens are predicted matching {re_content}')
|
||||
def step_n_tokens_predicted_with_content(context, predicted_n, re_content):
|
||||
context.completion = context.tasks_result.pop()
|
||||
@@ -539,6 +566,25 @@ def step_a_prompt_prompt(context, prompt):
|
||||
context.n_prompts = len(context.prompts)
|
||||
|
||||
|
||||
# TODO: allow this to be repeated
|
||||
@step('an infill input extra {filename} {text}')
|
||||
def step_infill_input_extra(context, filename, text):
|
||||
if filename == 'none':
|
||||
context.infill_input_extra = None
|
||||
else:
|
||||
context.infill_input_extra = [{'filename': filename, 'text': text}]
|
||||
|
||||
|
||||
@step('an infill input suffix {text}')
|
||||
def step_infill_input_suffix(context, text):
|
||||
context.infill_input_suffix = text
|
||||
|
||||
|
||||
@step('an infill input prefix {text}')
|
||||
def step_infill_input_prefix(context, text):
|
||||
context.infill_input_prefix = text
|
||||
|
||||
|
||||
@step('{num_prompts:d} prompts {prompt} with seed {seed:d}')
|
||||
def step_many_prompts(context, num_prompts, prompt, seed):
|
||||
if context.seed is None:
|
||||
@@ -1299,7 +1345,8 @@ async def wait_for_slots_status(context,
|
||||
|
||||
async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session:
|
||||
while True:
|
||||
async with await session.get(f'{base_url}/slots', params=params) as slots_response:
|
||||
headers = {'Authorization': f'Bearer {context.server_api_key}'}
|
||||
async with await session.get(f'{base_url}/slots', params=params, headers=headers) as slots_response:
|
||||
status_code = slots_response.status
|
||||
slots = await slots_response.json()
|
||||
if context.debug:
|
||||
@@ -1387,6 +1434,7 @@ def start_server_background(context):
|
||||
context.server_path = os.environ['LLAMA_SERVER_BIN_PATH']
|
||||
server_listen_addr = context.server_fqdn
|
||||
server_args = [
|
||||
'--slots', # requires to get slot status via /slots endpoint
|
||||
'--host', server_listen_addr,
|
||||
'--port', context.server_port,
|
||||
]
|
||||
|
||||
@@ -24,6 +24,22 @@
|
||||
#define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo-0613"
|
||||
|
||||
using json = nlohmann::ordered_json;
|
||||
using llama_tokens = std::vector<llama_token>;
|
||||
|
||||
#define SLT_INF(slot, fmt, ...) LOG_INF("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__)
|
||||
#define SLT_WRN(slot, fmt, ...) LOG_WRN("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__)
|
||||
#define SLT_ERR(slot, fmt, ...) LOG_ERR("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__)
|
||||
#define SLT_DBG(slot, fmt, ...) LOG_DBG("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__)
|
||||
|
||||
#define SRV_INF(fmt, ...) LOG_INF("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
#define SRV_WRN(fmt, ...) LOG_WRN("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
#define SRV_ERR(fmt, ...) LOG_ERR("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
#define SRV_DBG(fmt, ...) LOG_DBG("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
|
||||
#define QUE_INF(fmt, ...) LOG_INF("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
#define QUE_WRN(fmt, ...) LOG_WRN("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
#define QUE_ERR(fmt, ...) LOG_ERR("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
#define QUE_DBG(fmt, ...) LOG_DBG("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
||||
|
||||
// https://community.openai.com/t/openai-chat-list-of-error-codes-and-types/357791/11
|
||||
enum error_type {
|
||||
@@ -52,12 +68,238 @@ static T json_value(const json & body, const std::string & key, const T & defaul
|
||||
}
|
||||
|
||||
//
|
||||
// chat template utils
|
||||
// tokenizer and input processing utils
|
||||
//
|
||||
|
||||
static bool json_is_array_of_numbers(const json & data) {
|
||||
if (data.is_array()) {
|
||||
for (const auto & e : data) {
|
||||
if (!e.is_number_integer()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// is array having BOTH numbers & strings?
|
||||
static bool json_is_array_of_mixed_numbers_strings(const json & data) {
|
||||
bool seen_string = false;
|
||||
bool seen_number = false;
|
||||
if (data.is_array()) {
|
||||
for (const auto & e : data) {
|
||||
seen_string |= e.is_string();
|
||||
seen_number |= e.is_number_integer();
|
||||
if (seen_number && seen_string) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* this handles 2 cases:
|
||||
* - only string, example: "string"
|
||||
* - mixed string and tokens, example: [12, 34, "string", 56, 78]
|
||||
*/
|
||||
static llama_tokens tokenize_mixed(const llama_context * ctx, const json & json_prompt, bool add_special, bool parse_special) {
|
||||
// If `add_bos` is true, we only add BOS, when json_prompt is a string,
|
||||
// or the first element of the json_prompt array is a string.
|
||||
llama_tokens prompt_tokens;
|
||||
|
||||
if (json_prompt.is_array()) {
|
||||
bool first = true;
|
||||
for (const auto & p : json_prompt) {
|
||||
if (p.is_string()) {
|
||||
auto s = p.template get<std::string>();
|
||||
|
||||
llama_tokens p;
|
||||
if (first) {
|
||||
p = common_tokenize(ctx, s, add_special, parse_special);
|
||||
first = false;
|
||||
} else {
|
||||
p = common_tokenize(ctx, s, false, parse_special);
|
||||
}
|
||||
|
||||
prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
|
||||
} else {
|
||||
if (first) {
|
||||
first = false;
|
||||
}
|
||||
|
||||
prompt_tokens.push_back(p.template get<llama_token>());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
auto s = json_prompt.template get<std::string>();
|
||||
prompt_tokens = common_tokenize(ctx, s, add_special, parse_special);
|
||||
}
|
||||
|
||||
return prompt_tokens;
|
||||
}
|
||||
|
||||
/**
|
||||
* break the input "prompt" object into multiple prompt if needed, then tokenize them
|
||||
* this supports these cases:
|
||||
* - "prompt": "string"
|
||||
* - "prompt": [12, 34, 56]
|
||||
* - "prompt": [12, 34, "string", 56, 78]
|
||||
* and multiple prompts (multi-tasks):
|
||||
* - "prompt": ["string1", "string2"]
|
||||
* - "prompt": ["string1", [12, 34, 56]]
|
||||
* - "prompt": [[12, 34, "string", 56, 78], [12, 34, 56]]
|
||||
*/
|
||||
static std::vector<llama_tokens> tokenize_input_prompts(llama_context * ctx, const json & json_prompt, bool add_special, bool parse_special) {
|
||||
std::vector<llama_tokens> result;
|
||||
if (json_prompt.is_string() || json_is_array_of_mixed_numbers_strings(json_prompt)) {
|
||||
// string or mixed
|
||||
result.push_back(tokenize_mixed(ctx, json_prompt, add_special, parse_special));
|
||||
} else if (json_is_array_of_numbers(json_prompt)) {
|
||||
// array of tokens
|
||||
result.push_back(json_prompt.get<llama_tokens>());
|
||||
} else if (json_prompt.is_array()) {
|
||||
// array of prompts
|
||||
result.reserve(json_prompt.size());
|
||||
for (const auto & p : json_prompt) {
|
||||
if (p.is_string() || json_is_array_of_mixed_numbers_strings(p)) {
|
||||
result.push_back(tokenize_mixed(ctx, p, add_special, parse_special));
|
||||
} else if (json_is_array_of_numbers(p)) {
|
||||
// array of tokens
|
||||
result.push_back(p.get<llama_tokens>());
|
||||
} else {
|
||||
throw std::runtime_error("element of \"prompt\" must be a string, an list of tokens, or a list of mixed strings & tokens");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw std::runtime_error("\"prompt\" must be a string, an list of tokens, a list of mixed strings & tokens, or a list of prompts");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
//
|
||||
// template utils
|
||||
//
|
||||
|
||||
// format rerank task: [BOS]query[EOS][SEP]doc[EOS]
|
||||
static llama_tokens format_rerank(const struct llama_model * model, const llama_tokens & query, const llama_tokens & doc) {
|
||||
llama_tokens result;
|
||||
result.reserve(doc.size() + query.size() + 4);
|
||||
result.push_back(llama_token_bos(model));
|
||||
result.insert(result.end(), query.begin(), query.end());
|
||||
result.push_back(llama_token_eos(model));
|
||||
result.push_back(llama_token_sep(model));
|
||||
result.insert(result.end(), doc.begin(), doc.end());
|
||||
result.push_back(llama_token_eos(model));
|
||||
return result;
|
||||
}
|
||||
|
||||
// format infill task
|
||||
static llama_tokens format_infill(
|
||||
const llama_context * ctx,
|
||||
const json & input_prefix,
|
||||
const json & input_suffix,
|
||||
const json & input_extra,
|
||||
const int n_batch,
|
||||
const int n_predict,
|
||||
const int n_ctx,
|
||||
const bool spm_infill,
|
||||
const llama_tokens & tokens_prompt
|
||||
) {
|
||||
// TODO: optimize this block by reducing memory allocations and movement
|
||||
|
||||
// use FIM repo-level pattern:
|
||||
// ref: https://arxiv.org/pdf/2409.12186
|
||||
//
|
||||
// [FIM_REP]myproject
|
||||
// [FIM_SEP]filename0
|
||||
// extra chunk 0
|
||||
// [FIM_SEP]filename1
|
||||
// extra chunk 1
|
||||
// ...
|
||||
// [FIM_SEP]filename
|
||||
// [FIM_PRE]prefix[FIM_SUF]suffix[FIM_MID]prompt
|
||||
//
|
||||
llama_tokens extra_tokens;
|
||||
extra_tokens.reserve(n_ctx);
|
||||
|
||||
auto model = llama_get_model(ctx);
|
||||
auto tokens_prefix = tokenize_mixed(ctx, input_prefix, false, false);
|
||||
auto tokens_suffix = tokenize_mixed(ctx, input_suffix, false, false);
|
||||
|
||||
if (llama_token_fim_rep(model) != LLAMA_TOKEN_NULL) {
|
||||
// TODO: make project name an input
|
||||
static const auto k_fim_repo = common_tokenize(ctx, "myproject\n", false, false);
|
||||
|
||||
extra_tokens.push_back(llama_token_fim_rep(model));
|
||||
extra_tokens.insert(extra_tokens.end(), k_fim_repo.begin(), k_fim_repo.end());
|
||||
}
|
||||
for (const auto & chunk : input_extra) {
|
||||
// { "text": string, "filename": string }
|
||||
const std::string text = json_value(chunk, "text", std::string());
|
||||
const std::string filename = json_value(chunk, "filename", std::string("tmp"));
|
||||
|
||||
if (llama_token_fim_sep(model) != LLAMA_TOKEN_NULL) {
|
||||
const auto k_fim_file = common_tokenize(ctx, filename + "\n", false, false);
|
||||
|
||||
extra_tokens.insert(extra_tokens.end(), llama_token_fim_sep(model));
|
||||
extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end());
|
||||
} else {
|
||||
// chunk separator in binary form to avoid confusing the AI
|
||||
static const char k_chunk_prefix_str[] = {0x0a, 0x0a, 0x2d, 0x2d, 0x2d, 0x20, 0x73, 0x6e, 0x69, 0x70, 0x70, 0x65, 0x74, 0x20, 0x2d, 0x2d, 0x2d, 0x0a, 0x0a, 0x00};
|
||||
static const auto k_chunk_prefix_tokens = common_tokenize(ctx, k_chunk_prefix_str, false, false);
|
||||
|
||||
extra_tokens.insert(extra_tokens.end(), k_chunk_prefix_tokens.begin(), k_chunk_prefix_tokens.end());
|
||||
}
|
||||
|
||||
const auto chunk_tokens = common_tokenize(ctx, text, false, false);
|
||||
extra_tokens.insert(extra_tokens.end(), chunk_tokens.begin(), chunk_tokens.end());
|
||||
}
|
||||
|
||||
if (llama_token_fim_sep(model) != LLAMA_TOKEN_NULL) {
|
||||
// TODO: current filename
|
||||
static const auto k_fim_file = common_tokenize(ctx, "filename\n", false, false);
|
||||
|
||||
extra_tokens.insert(extra_tokens.end(), llama_token_fim_sep(model));
|
||||
extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end());
|
||||
}
|
||||
|
||||
// for now pick FIM context to fit in a batch (ratio prefix:suffix = 3:1, TODO: configurable?)
|
||||
const int n_suffix_take = std::min<int>(tokens_suffix.size(), (n_batch/4));
|
||||
const int n_prefix_take = std::min<int>(tokens_prefix.size(), 3*(n_batch/4) - 3);
|
||||
|
||||
// fill the rest of the context with extra chunks
|
||||
const int n_extra_take = std::min<int>(std::max<int>(0, n_ctx - (n_batch) - 2*n_predict), extra_tokens.size());
|
||||
|
||||
tokens_prefix.erase(tokens_prefix.begin(), tokens_prefix.begin() + tokens_prefix.size() - n_prefix_take);
|
||||
tokens_suffix.resize(n_suffix_take);
|
||||
|
||||
tokens_prefix.insert(tokens_prefix.begin(), llama_token_fim_pre(model));
|
||||
tokens_prefix.insert(tokens_prefix.end(), tokens_prompt.begin(), tokens_prompt.end());
|
||||
tokens_suffix.insert(tokens_suffix.begin(), llama_token_fim_suf(model));
|
||||
|
||||
auto embd_inp = spm_infill ? tokens_suffix : tokens_prefix;
|
||||
auto embd_end = spm_infill ? tokens_prefix : tokens_suffix;
|
||||
|
||||
if (llama_add_bos_token(model)) {
|
||||
embd_inp.insert(embd_inp.begin(), llama_token_bos(model));
|
||||
}
|
||||
|
||||
SRV_DBG("extra: n_ctx = %d, n_extra_take = %d, n_extra = %d\n", n_ctx, n_extra_take, (int) extra_tokens.size());
|
||||
|
||||
// put the extra context before the FIM prefix
|
||||
embd_inp.insert(embd_inp.begin(), extra_tokens.end() - n_extra_take, extra_tokens.end());
|
||||
|
||||
embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end());
|
||||
embd_inp.push_back(llama_token_fim_mid(model));
|
||||
|
||||
return embd_inp;
|
||||
}
|
||||
|
||||
// Format given chat. If tmpl is empty, we take the template from model metadata
|
||||
inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages) {
|
||||
std::vector<llama_chat_msg> chat;
|
||||
std::vector<common_chat_msg> chat;
|
||||
|
||||
for (size_t i = 0; i < messages.size(); ++i) {
|
||||
const auto & curr_msg = messages[i];
|
||||
@@ -84,12 +326,25 @@ inline std::string format_chat(const struct llama_model * model, const std::stri
|
||||
chat.push_back({role, content});
|
||||
}
|
||||
|
||||
const auto formatted_chat = llama_chat_apply_template(model, tmpl, chat, true);
|
||||
const auto formatted_chat = common_chat_apply_template(model, tmpl, chat, true);
|
||||
LOG_DBG("formatted_chat: '%s'\n", formatted_chat.c_str());
|
||||
|
||||
return formatted_chat;
|
||||
}
|
||||
|
||||
static std::string llama_get_chat_template(const struct llama_model * model) {
|
||||
std::string template_key = "tokenizer.chat_template";
|
||||
// call with NULL buffer to get the total size of the string
|
||||
int32_t res = llama_model_meta_val_str(model, template_key.c_str(), NULL, 0);
|
||||
if (res < 0) {
|
||||
return "";
|
||||
} else {
|
||||
std::vector<char> model_template(res, 0);
|
||||
llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size());
|
||||
return std::string(model_template.data(), model_template.size());
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// base64 utils (TODO: move to common in the future)
|
||||
//
|
||||
@@ -182,14 +437,14 @@ static std::string gen_chatcmplid() {
|
||||
// other common utils
|
||||
//
|
||||
|
||||
static size_t common_part(const std::vector<llama_token> & a, const std::vector<llama_token> & b) {
|
||||
static size_t longest_common_prefix(const std::vector<llama_token> & a, const std::vector<llama_token> & b) {
|
||||
size_t i;
|
||||
for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static size_t common_part(const std::string & a, const std::string & b) {
|
||||
static size_t longest_common_prefix(const std::string & a, const std::string & b) {
|
||||
size_t i;
|
||||
for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
|
||||
|
||||
@@ -216,24 +471,12 @@ static size_t find_partial_stop_string(const std::string &stop, const std::strin
|
||||
return std::string::npos;
|
||||
}
|
||||
|
||||
static bool json_is_array_of_numbers(const json & data) {
|
||||
if (data.is_array()) {
|
||||
for (const auto & e : data) {
|
||||
if (!e.is_number()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// TODO: reuse llama_detokenize
|
||||
template <class Iter>
|
||||
static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
|
||||
std::string ret;
|
||||
for (; begin != end; ++begin) {
|
||||
ret += llama_token_to_piece(ctx, *begin);
|
||||
ret += common_token_to_piece(ctx, *begin);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -241,7 +484,7 @@ static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
|
||||
|
||||
// format incomplete utf-8 multibyte character for output
|
||||
static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) {
|
||||
std::string out = token == -1 ? "" : llama_token_to_piece(ctx, token);
|
||||
std::string out = token == -1 ? "" : common_token_to_piece(ctx, token);
|
||||
|
||||
// if the size is 1 and first bit is 1, meaning it's a partial character
|
||||
// (size > 1 meaning it's already a known token)
|
||||
@@ -347,9 +590,9 @@ static json oaicompat_completion_params_parse(
|
||||
|
||||
// Handle "logprobs" field
|
||||
// TODO: The response format of this option is not yet OAI-compatible, but seems like no one really using it; We may need to fix it in the future
|
||||
if (body.contains("logprobs")) {
|
||||
if (json_value(body, "logprobs", false)) {
|
||||
llama_params["n_probs"] = json_value(body, "top_logprobs", 20);
|
||||
} else if (body.contains("top_logprobs")) {
|
||||
} else if (body.contains("top_logprobs") && !body.at("top_logprobs").is_null()) {
|
||||
throw std::runtime_error("top_logprobs requires logprobs to be set to true");
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
set(TARGET llama-simple)
|
||||
add_executable(${TARGET} simple.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||
|
||||
@@ -1,50 +1,112 @@
|
||||
#include "arg.h"
|
||||
#include "common.h"
|
||||
#include "log.h"
|
||||
#include "llama.h"
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
static void print_usage(int, char ** argv) {
|
||||
LOG("\nexample usage:\n");
|
||||
LOG("\n %s -m model.gguf -p \"Hello my name is\" -n 32\n", argv[0]);
|
||||
LOG("\n");
|
||||
printf("\nexample usage:\n");
|
||||
printf("\n %s -m model.gguf [-n n_predict] [-ngl n_gpu_layers] [prompt]\n", argv[0]);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
// path to the model gguf file
|
||||
std::string model_path;
|
||||
// prompt to generate text from
|
||||
std::string prompt = "Hello my name is";
|
||||
// number of layers to offload to the GPU
|
||||
int ngl = 99;
|
||||
// number of tokens to predict
|
||||
int n_predict = 32;
|
||||
|
||||
params.prompt = "Hello my name is";
|
||||
params.n_predict = 32;
|
||||
// parse command line arguments
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON, print_usage)) {
|
||||
return 1;
|
||||
{
|
||||
int i = 1;
|
||||
for (; i < argc; i++) {
|
||||
if (strcmp(argv[i], "-m") == 0) {
|
||||
if (i + 1 < argc) {
|
||||
model_path = argv[++i];
|
||||
} else {
|
||||
print_usage(argc, argv);
|
||||
return 1;
|
||||
}
|
||||
} else if (strcmp(argv[i], "-n") == 0) {
|
||||
if (i + 1 < argc) {
|
||||
try {
|
||||
n_predict = std::stoi(argv[++i]);
|
||||
} catch (...) {
|
||||
print_usage(argc, argv);
|
||||
return 1;
|
||||
}
|
||||
} else {
|
||||
print_usage(argc, argv);
|
||||
return 1;
|
||||
}
|
||||
} else if (strcmp(argv[i], "-ngl") == 0) {
|
||||
if (i + 1 < argc) {
|
||||
try {
|
||||
ngl = std::stoi(argv[++i]);
|
||||
} catch (...) {
|
||||
print_usage(argc, argv);
|
||||
return 1;
|
||||
}
|
||||
} else {
|
||||
print_usage(argc, argv);
|
||||
return 1;
|
||||
}
|
||||
} else {
|
||||
// prompt starts here
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (model_path.empty()) {
|
||||
print_usage(argc, argv);
|
||||
return 1;
|
||||
}
|
||||
if (i < argc) {
|
||||
prompt = argv[i++];
|
||||
for (; i < argc; i++) {
|
||||
prompt += " ";
|
||||
prompt += argv[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
|
||||
// total length of the sequence including the prompt
|
||||
const int n_predict = params.n_predict;
|
||||
|
||||
// init LLM
|
||||
|
||||
llama_backend_init();
|
||||
llama_numa_init(params.numa);
|
||||
|
||||
// initialize the model
|
||||
|
||||
llama_model_params model_params = llama_model_params_from_gpt_params(params);
|
||||
llama_model_params model_params = llama_model_default_params();
|
||||
model_params.n_gpu_layers = ngl;
|
||||
|
||||
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
|
||||
llama_model * model = llama_load_model_from_file(model_path.c_str(), model_params);
|
||||
|
||||
if (model == NULL) {
|
||||
fprintf(stderr , "%s: error: unable to load model\n" , __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// tokenize the prompt
|
||||
|
||||
// find the number of tokens in the prompt
|
||||
const int n_prompt = -llama_tokenize(model, prompt.c_str(), prompt.size(), NULL, 0, true, true);
|
||||
|
||||
// allocate space for the tokens and tokenize the prompt
|
||||
std::vector<llama_token> prompt_tokens(n_prompt);
|
||||
if (llama_tokenize(model, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), true, true) < 0) {
|
||||
fprintf(stderr, "%s: error: failed to tokenize the prompt\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// initialize the context
|
||||
|
||||
llama_context_params ctx_params = llama_context_params_from_gpt_params(params);
|
||||
llama_context_params ctx_params = llama_context_default_params();
|
||||
// n_ctx is the context size
|
||||
ctx_params.n_ctx = n_prompt + n_predict - 1;
|
||||
// n_batch is the maximum number of tokens that can be processed in a single call to llama_decode
|
||||
ctx_params.n_batch = n_prompt;
|
||||
// enable performance counters
|
||||
ctx_params.no_perf = false;
|
||||
|
||||
llama_context * ctx = llama_new_context_with_model(model, ctx_params);
|
||||
|
||||
@@ -53,117 +115,87 @@ int main(int argc, char ** argv) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
// initialize the sampler
|
||||
|
||||
auto sparams = llama_sampler_chain_default_params();
|
||||
|
||||
sparams.no_perf = false;
|
||||
|
||||
llama_sampler * smpl = llama_sampler_chain_init(sparams);
|
||||
|
||||
llama_sampler_chain_add(smpl, llama_sampler_init_greedy());
|
||||
|
||||
// tokenize the prompt
|
||||
|
||||
std::vector<llama_token> tokens_list;
|
||||
tokens_list = ::llama_tokenize(ctx, params.prompt, true);
|
||||
|
||||
const int n_ctx = llama_n_ctx(ctx);
|
||||
const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size());
|
||||
|
||||
LOG("\n");
|
||||
LOG_INF("%s: n_predict = %d, n_ctx = %d, n_kv_req = %d\n", __func__, n_predict, n_ctx, n_kv_req);
|
||||
|
||||
// make sure the KV cache is big enough to hold all the prompt and generated tokens
|
||||
if (n_kv_req > n_ctx) {
|
||||
LOG_ERR("%s: error: n_kv_req > n_ctx, the required KV cache size is not big enough\n", __func__);
|
||||
LOG_ERR("%s: either reduce n_predict or increase n_ctx\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// print the prompt token-by-token
|
||||
|
||||
LOG("\n");
|
||||
|
||||
for (auto id : tokens_list) {
|
||||
LOG("%s", llama_token_to_piece(ctx, id).c_str());
|
||||
for (auto id : prompt_tokens) {
|
||||
char buf[128];
|
||||
int n = llama_token_to_piece(model, id, buf, sizeof(buf), 0, true);
|
||||
if (n < 0) {
|
||||
fprintf(stderr, "%s: error: failed to convert token to piece\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
std::string s(buf, n);
|
||||
printf("%s", s.c_str());
|
||||
}
|
||||
|
||||
// create a llama_batch with size 512
|
||||
// we use this object to submit token data for decoding
|
||||
// prepare a batch for the prompt
|
||||
|
||||
llama_batch batch = llama_batch_init(512, 0, 1);
|
||||
|
||||
// evaluate the initial prompt
|
||||
for (size_t i = 0; i < tokens_list.size(); i++) {
|
||||
llama_batch_add(batch, tokens_list[i], i, { 0 }, false);
|
||||
}
|
||||
|
||||
// llama_decode will output logits only for the last token of the prompt
|
||||
batch.logits[batch.n_tokens - 1] = true;
|
||||
|
||||
if (llama_decode(ctx, batch) != 0) {
|
||||
LOG("%s: llama_decode() failed\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
llama_batch batch = llama_batch_get_one(prompt_tokens.data(), prompt_tokens.size());
|
||||
|
||||
// main loop
|
||||
|
||||
int n_cur = batch.n_tokens;
|
||||
int n_decode = 0;
|
||||
|
||||
const auto t_main_start = ggml_time_us();
|
||||
int n_decode = 0;
|
||||
llama_token new_token_id;
|
||||
|
||||
for (int n_pos = 0; n_pos + batch.n_tokens < n_prompt + n_predict; ) {
|
||||
// evaluate the current batch with the transformer model
|
||||
if (llama_decode(ctx, batch)) {
|
||||
fprintf(stderr, "%s : failed to eval, return code %d\n", __func__, 1);
|
||||
return 1;
|
||||
}
|
||||
|
||||
n_pos += batch.n_tokens;
|
||||
|
||||
while (n_cur <= n_predict) {
|
||||
// sample the next token
|
||||
{
|
||||
const llama_token new_token_id = llama_sampler_sample(smpl, ctx, -1);
|
||||
new_token_id = llama_sampler_sample(smpl, ctx, -1);
|
||||
|
||||
// is it an end of generation?
|
||||
if (llama_token_is_eog(model, new_token_id) || n_cur == n_predict) {
|
||||
LOG("\n");
|
||||
|
||||
if (llama_token_is_eog(model, new_token_id)) {
|
||||
break;
|
||||
}
|
||||
|
||||
LOG("%s", llama_token_to_piece(ctx, new_token_id).c_str());
|
||||
char buf[128];
|
||||
int n = llama_token_to_piece(model, new_token_id, buf, sizeof(buf), 0, true);
|
||||
if (n < 0) {
|
||||
fprintf(stderr, "%s: error: failed to convert token to piece\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
std::string s(buf, n);
|
||||
printf("%s", s.c_str());
|
||||
fflush(stdout);
|
||||
|
||||
// prepare the next batch
|
||||
llama_batch_clear(batch);
|
||||
|
||||
// push this new token for next evaluation
|
||||
llama_batch_add(batch, new_token_id, n_cur, { 0 }, true);
|
||||
// prepare the next batch with the sampled token
|
||||
batch = llama_batch_get_one(&new_token_id, 1);
|
||||
|
||||
n_decode += 1;
|
||||
}
|
||||
|
||||
n_cur += 1;
|
||||
|
||||
// evaluate the current batch with the transformer model
|
||||
if (llama_decode(ctx, batch)) {
|
||||
LOG_ERR("%s : failed to eval, return code %d\n", __func__, 1);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
LOG("\n");
|
||||
printf("\n");
|
||||
|
||||
const auto t_main_end = ggml_time_us();
|
||||
|
||||
LOG_INF("%s: decoded %d tokens in %.2f s, speed: %.2f t/s\n",
|
||||
fprintf(stderr, "%s: decoded %d tokens in %.2f s, speed: %.2f t/s\n",
|
||||
__func__, n_decode, (t_main_end - t_main_start) / 1000000.0f, n_decode / ((t_main_end - t_main_start) / 1000000.0f));
|
||||
|
||||
LOG("\n");
|
||||
fprintf(stderr, "\n");
|
||||
llama_perf_sampler_print(smpl);
|
||||
llama_perf_context_print(ctx);
|
||||
fprintf(stderr, "\n");
|
||||
|
||||
LOG("\n");
|
||||
|
||||
llama_batch_free(batch);
|
||||
llama_sampler_free(smpl);
|
||||
llama_free(ctx);
|
||||
llama_free_model(model);
|
||||
|
||||
llama_backend_free();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -26,20 +26,25 @@ struct seq_draft {
|
||||
std::vector<llama_token> tokens;
|
||||
std::vector<std::vector<llama_token_data>> dists;
|
||||
|
||||
struct gpt_sampler * smpl = nullptr;
|
||||
struct common_sampler * smpl = nullptr;
|
||||
};
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
// needed to get candidate probs even for temp <= 0.0
|
||||
params.sparams.n_probs = 128;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_SPECULATIVE)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_SPECULATIVE)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
if (params.n_predict < -1) {
|
||||
LOG_ERR("%s: --n-predict must be >= -1\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
common_init();
|
||||
|
||||
if (params.model_draft.empty()) {
|
||||
LOG_ERR("%s: --model-draft is required\n", __func__);
|
||||
@@ -66,7 +71,7 @@ int main(int argc, char ** argv) {
|
||||
llama_context * ctx_dft = NULL;
|
||||
|
||||
// load the target model
|
||||
llama_init_result llama_init_tgt = llama_init_from_gpt_params(params);
|
||||
common_init_result llama_init_tgt = common_init_from_params(params);
|
||||
model_tgt = llama_init_tgt.model;
|
||||
ctx_tgt = llama_init_tgt.context;
|
||||
|
||||
@@ -78,7 +83,7 @@ int main(int argc, char ** argv) {
|
||||
}
|
||||
|
||||
params.cpuparams_batch.n_threads = params.draft_cpuparams_batch.n_threads;
|
||||
llama_init_result llama_init_dft = llama_init_from_gpt_params(params);
|
||||
common_init_result llama_init_dft = common_init_from_params(params);
|
||||
model_dft = llama_init_dft.model;
|
||||
ctx_dft = llama_init_dft.context;
|
||||
|
||||
@@ -124,8 +129,8 @@ int main(int argc, char ** argv) {
|
||||
if (std::strcmp(token_text_tgt, token_text_dft) != 0) {
|
||||
LOG_ERR("%s: draft model vocab must match target model to use speculation but ", __func__);
|
||||
LOG_ERR("token %d content differs - target '%s', draft '%s'\n", i,
|
||||
llama_token_to_piece(ctx_tgt, i).c_str(),
|
||||
llama_token_to_piece(ctx_dft, i).c_str());
|
||||
common_token_to_piece(ctx_tgt, i).c_str(),
|
||||
common_token_to_piece(ctx_dft, i).c_str());
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@@ -134,7 +139,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// Tokenize the prompt
|
||||
std::vector<llama_token> inp;
|
||||
inp = ::llama_tokenize(ctx_tgt, params.prompt, true, true);
|
||||
inp = common_tokenize(ctx_tgt, params.prompt, true, true);
|
||||
|
||||
const int max_context_size = llama_n_ctx(ctx_tgt);
|
||||
const int max_tokens_list_size = max_context_size - 4;
|
||||
@@ -147,7 +152,7 @@ int main(int argc, char ** argv) {
|
||||
LOG("\n\n");
|
||||
|
||||
for (auto id : inp) {
|
||||
LOG("%s", llama_token_to_piece(ctx_tgt, id).c_str());
|
||||
LOG("%s", common_token_to_piece(ctx_tgt, id).c_str());
|
||||
}
|
||||
|
||||
const int n_input = inp.size();
|
||||
@@ -155,9 +160,9 @@ int main(int argc, char ** argv) {
|
||||
const auto t_enc_start = ggml_time_us();
|
||||
|
||||
// eval the prompt with both models
|
||||
llama_decode(ctx_tgt, llama_batch_get_one( inp.data(), n_input - 1, 0, 0));
|
||||
llama_decode(ctx_tgt, llama_batch_get_one(&inp.back(), 1, n_input - 1, 0));
|
||||
llama_decode(ctx_dft, llama_batch_get_one( inp.data(), n_input, 0, 0));
|
||||
llama_decode(ctx_tgt, llama_batch_get_one( inp.data(), n_input - 1));
|
||||
llama_decode(ctx_tgt, llama_batch_get_one(&inp.back(), 1));
|
||||
llama_decode(ctx_dft, llama_batch_get_one( inp.data(), n_input));
|
||||
|
||||
const auto t_enc_end = ggml_time_us();
|
||||
|
||||
@@ -178,20 +183,18 @@ int main(int argc, char ** argv) {
|
||||
bool has_eos = false;
|
||||
|
||||
// target model sampling context (reuse the llama_context's sampling instance)
|
||||
struct gpt_sampler * smpl = gpt_sampler_init(model_tgt, params.sparams);
|
||||
|
||||
struct llama_sampler * softmax = llama_sampler_init_softmax();
|
||||
struct common_sampler * smpl = common_sampler_init(model_tgt, params.sparams);
|
||||
|
||||
// draft sequence data
|
||||
std::vector<seq_draft> drafts(n_seq_dft);
|
||||
|
||||
for (int s = 0; s < n_seq_dft; ++s) {
|
||||
// allocate gpt_sampler for each draft sequence
|
||||
drafts[s].smpl = gpt_sampler_init(model_dft, params.sparams);
|
||||
// allocate llama_sampler for each draft sequence
|
||||
drafts[s].smpl = common_sampler_init(model_dft, params.sparams);
|
||||
}
|
||||
|
||||
llama_batch batch_dft = llama_batch_init(params.n_ctx, 0, 1);
|
||||
llama_batch batch_tgt = llama_batch_init(params.n_ctx, 0, n_seq_dft);
|
||||
llama_batch batch_dft = llama_batch_init(llama_n_batch(ctx_dft), 0, 1);
|
||||
llama_batch batch_tgt = llama_batch_init(llama_n_batch(ctx_tgt), 0, n_seq_dft);
|
||||
|
||||
const auto t_dec_start = ggml_time_us();
|
||||
|
||||
@@ -229,9 +232,9 @@ int main(int argc, char ** argv) {
|
||||
bool accept = false;
|
||||
if (params.sparams.temp > 0) {
|
||||
// stochastic verification
|
||||
gpt_sampler_sample(smpl, ctx_tgt, drafts[s_keep].i_batch_tgt[i_dft], true);
|
||||
common_sampler_sample(smpl, ctx_tgt, drafts[s_keep].i_batch_tgt[i_dft], true);
|
||||
|
||||
auto & dist_tgt = *gpt_sampler_get_candidates(smpl);
|
||||
auto & dist_tgt = *common_sampler_get_candidates(smpl);
|
||||
|
||||
float p_tgt = 0.0f;
|
||||
float p_dft = 0.0f;
|
||||
@@ -277,13 +280,13 @@ int main(int argc, char ** argv) {
|
||||
s_keep = s;
|
||||
accept = true;
|
||||
token_id = drafts[s].tokens[i_dft];
|
||||
token_str = llama_token_to_piece(ctx_tgt, token_id);
|
||||
gpt_sampler_accept(smpl, token_id, true);
|
||||
token_str = common_token_to_piece(ctx_tgt, token_id);
|
||||
common_sampler_accept(smpl, token_id, true);
|
||||
|
||||
LOG_DBG("draft token %d of sequence %d (%d, '%s') accepted\n", i_dft, s, token_id, token_str.c_str());
|
||||
break;
|
||||
} else {
|
||||
LOG_DBG("draft token %d of sequence %d (%d, '%s') rejected\n", i_dft, s, drafts[s].tokens[i_dft], llama_token_to_piece(ctx_tgt, drafts[s].tokens[i_dft]).c_str());
|
||||
LOG_DBG("draft token %d of sequence %d (%d, '%s') rejected\n", i_dft, s, drafts[s].tokens[i_dft], common_token_to_piece(ctx_tgt, drafts[s].tokens[i_dft]).c_str());
|
||||
drafts[s].active = false;
|
||||
|
||||
// calculate residual probability
|
||||
@@ -349,19 +352,19 @@ int main(int argc, char ** argv) {
|
||||
const int idx = dist(rng);
|
||||
|
||||
token_id = dist_tgt.data[idx].id;
|
||||
gpt_sampler_accept(smpl, token_id, true);
|
||||
token_str = llama_token_to_piece(ctx_tgt, token_id);
|
||||
common_sampler_accept(smpl, token_id, true);
|
||||
token_str = common_token_to_piece(ctx_tgt, token_id);
|
||||
}
|
||||
} else {
|
||||
// greedy verification
|
||||
|
||||
// sample from the target model
|
||||
LOG_DBG("sampling target: s_keep = %3d, i_dft = %3d, i_batch_tgt = %3d\n", s_keep, i_dft, drafts[s_keep].i_batch_tgt[i_dft]);
|
||||
token_id = gpt_sampler_sample(smpl, ctx_tgt, drafts[s_keep].i_batch_tgt[i_dft]);
|
||||
token_id = common_sampler_sample(smpl, ctx_tgt, drafts[s_keep].i_batch_tgt[i_dft]);
|
||||
|
||||
gpt_sampler_accept(smpl, token_id, true);
|
||||
common_sampler_accept(smpl, token_id, true);
|
||||
|
||||
token_str = llama_token_to_piece(ctx_tgt, token_id);
|
||||
token_str = common_token_to_piece(ctx_tgt, token_id);
|
||||
|
||||
for (int s = 0; s < n_seq_dft; ++s) {
|
||||
if (!drafts[s].active) {
|
||||
@@ -431,8 +434,8 @@ int main(int argc, char ** argv) {
|
||||
drafts[0].dists.push_back(std::vector<llama_token_data>());
|
||||
drafts[0].i_batch_tgt.push_back(0);
|
||||
|
||||
llama_batch_clear(batch_dft);
|
||||
llama_batch_add (batch_dft, token_id, n_past_dft, { 0 }, true);
|
||||
common_batch_clear(batch_dft);
|
||||
common_batch_add (batch_dft, token_id, n_past_dft, { 0 }, true);
|
||||
|
||||
llama_kv_cache_seq_rm(ctx_dft, 0, n_past_dft, -1);
|
||||
// LOG_DBG("dft batch: %s\n", LOG_BATCH_TOSTR_PRETTY(ctx_dft, batch_dft).c_str());
|
||||
@@ -441,14 +444,14 @@ int main(int argc, char ** argv) {
|
||||
++n_past_dft;
|
||||
}
|
||||
|
||||
if (n_predict > params.n_predict || has_eos) {
|
||||
if ((params.n_predict >= 0 && n_predict > params.n_predict) || has_eos) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (drafts[0].smpl) {
|
||||
gpt_sampler_free(drafts[0].smpl);
|
||||
common_sampler_free(drafts[0].smpl);
|
||||
}
|
||||
drafts[0].smpl = gpt_sampler_clone(smpl);
|
||||
drafts[0].smpl = common_sampler_clone(smpl);
|
||||
|
||||
int n_seq_cur = 1;
|
||||
int n_past_cur = n_past_dft;
|
||||
@@ -461,8 +464,8 @@ int main(int argc, char ** argv) {
|
||||
drafts[0].drafting = true;
|
||||
drafts[0].i_batch_dft = 0;
|
||||
|
||||
llama_batch_clear(batch_tgt);
|
||||
llama_batch_add (batch_tgt, drafts[0].tokens[0], n_past_tgt, { 0 }, true);
|
||||
common_batch_clear(batch_tgt);
|
||||
common_batch_add (batch_tgt, drafts[0].tokens[0], n_past_tgt, { 0 }, true);
|
||||
|
||||
// sample n_draft tokens from the draft model using tree-based sampling
|
||||
for (int i = 0; i < n_draft; ++i) {
|
||||
@@ -477,13 +480,13 @@ int main(int argc, char ** argv) {
|
||||
continue;
|
||||
}
|
||||
|
||||
gpt_sampler_sample(drafts[s].smpl, ctx_dft, drafts[s].i_batch_dft, true);
|
||||
common_sampler_sample(drafts[s].smpl, ctx_dft, drafts[s].i_batch_dft, true);
|
||||
|
||||
const auto * cur_p = gpt_sampler_get_candidates(drafts[s].smpl);
|
||||
const auto * cur_p = common_sampler_get_candidates(drafts[s].smpl);
|
||||
|
||||
for (int k = 0; k < std::min(n_seq_dft + 3, (int) cur_p->size); ++k) {
|
||||
LOG_DBG(" - draft candidate %3d for seq %3d, pos %3d: %6d (%8.3f) '%s'\n",
|
||||
k, s, i, cur_p->data[k].id, cur_p->data[k].p, llama_token_to_piece(ctx_dft, cur_p->data[k].id).c_str());
|
||||
k, s, i, cur_p->data[k].id, cur_p->data[k].p, common_token_to_piece(ctx_dft, cur_p->data[k].id).c_str());
|
||||
}
|
||||
|
||||
std::vector<int> sa(1, s);
|
||||
@@ -518,9 +521,9 @@ int main(int argc, char ** argv) {
|
||||
drafts[n_seq_cur].i_batch_tgt = drafts[s].i_batch_tgt;
|
||||
|
||||
if (drafts[n_seq_cur].smpl) {
|
||||
gpt_sampler_free(drafts[n_seq_cur].smpl);
|
||||
common_sampler_free(drafts[n_seq_cur].smpl);
|
||||
}
|
||||
drafts[n_seq_cur].smpl = gpt_sampler_clone(drafts[s].smpl);
|
||||
drafts[n_seq_cur].smpl = common_sampler_clone(drafts[s].smpl);
|
||||
|
||||
sa.push_back(n_seq_cur);
|
||||
|
||||
@@ -536,7 +539,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
const int s = sa[is];
|
||||
|
||||
gpt_sampler_accept(drafts[s].smpl, id, true);
|
||||
common_sampler_accept(drafts[s].smpl, id, true);
|
||||
|
||||
drafts[s].tokens.push_back(id);
|
||||
// save cur_p.data into drafts[s].dists
|
||||
@@ -545,12 +548,12 @@ int main(int argc, char ** argv) {
|
||||
// add unique drafted tokens to the target batch
|
||||
drafts[s].i_batch_tgt.push_back(batch_tgt.n_tokens);
|
||||
|
||||
llama_batch_add(batch_tgt, id, n_past_tgt + i + 1, { s }, true);
|
||||
common_batch_add(batch_tgt, id, n_past_tgt + i + 1, { s }, true);
|
||||
|
||||
// add the token to the batch for batched decoding with the draft model
|
||||
drafts[s].i_batch_dft = batch_dft.n_tokens;
|
||||
|
||||
llama_batch_add(batch_dft, id, n_past_cur, { s }, true);
|
||||
common_batch_add(batch_dft, id, n_past_cur, { s }, true);
|
||||
|
||||
if (batch_tgt.n_tokens > n_draft) {
|
||||
drafts[s].drafting = false;
|
||||
@@ -617,14 +620,13 @@ int main(int argc, char ** argv) {
|
||||
|
||||
LOG_INF("\n");
|
||||
LOG_INF("target:\n\n");
|
||||
gpt_perf_print(ctx_tgt, smpl);
|
||||
common_perf_print(ctx_tgt, smpl);
|
||||
|
||||
gpt_sampler_free(smpl);
|
||||
common_sampler_free(smpl);
|
||||
for (int s = 0; s < n_seq_dft; ++s) {
|
||||
gpt_sampler_free(drafts[s].smpl);
|
||||
common_sampler_free(drafts[s].smpl);
|
||||
}
|
||||
|
||||
llama_sampler_free(softmax);
|
||||
llama_batch_free(batch_dft);
|
||||
|
||||
llama_free(ctx_tgt);
|
||||
|
||||
@@ -365,7 +365,7 @@ int main(int raw_argc, char ** raw_argv) {
|
||||
const bool parse_special = !no_parse_special;
|
||||
|
||||
std::vector<llama_token> tokens;
|
||||
tokens = ::llama_tokenize(model, prompt, add_bos, parse_special);
|
||||
tokens = common_tokenize(model, prompt, add_bos, parse_special);
|
||||
|
||||
if (printing_ids) {
|
||||
printf("[");
|
||||
@@ -380,7 +380,7 @@ int main(int raw_argc, char ** raw_argv) {
|
||||
} else {
|
||||
bool invalid_utf8 = false;
|
||||
printf("%6d -> '", tokens[i]);
|
||||
write_utf8_cstr_to_stdout(llama_token_to_piece(ctx, tokens[i]).c_str(), invalid_utf8);
|
||||
write_utf8_cstr_to_stdout(common_token_to_piece(ctx, tokens[i]).c_str(), invalid_utf8);
|
||||
if (invalid_utf8) {
|
||||
printf("' (utf-8 decode failure)\n");
|
||||
} else {
|
||||
|
||||
20
flake.lock
generated
20
flake.lock
generated
@@ -5,11 +5,11 @@
|
||||
"nixpkgs-lib": "nixpkgs-lib"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1726153070,
|
||||
"narHash": "sha256-HO4zgY0ekfwO5bX0QH/3kJ/h4KvUDFZg8YpkNwIbg1U=",
|
||||
"lastModified": 1727826117,
|
||||
"narHash": "sha256-K5ZLCyfO/Zj9mPFldf3iwS6oZStJcU4tSpiXTMYaaL0=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "bcef6817a8b2aa20a5a6dbb19b43e63c5bf8619a",
|
||||
"rev": "3d04084d54bedc3d6b8b736c70ef449225c361b1",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -20,11 +20,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1727348695,
|
||||
"narHash": "sha256-J+PeFKSDV+pHL7ukkfpVzCOO7mBSrrpJ3svwBFABbhI=",
|
||||
"lastModified": 1729256560,
|
||||
"narHash": "sha256-/uilDXvCIEs3C9l73JTACm4quuHUsIHcns1c+cHUJwA=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "1925c603f17fc89f4c8f6bf6f631a802ad85d784",
|
||||
"rev": "4c2fcb090b1f3e5b47eaa7bd33913b574a11e0a0",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -36,14 +36,14 @@
|
||||
},
|
||||
"nixpkgs-lib": {
|
||||
"locked": {
|
||||
"lastModified": 1725233747,
|
||||
"narHash": "sha256-Ss8QWLXdr2JCBPcYChJhz4xJm+h/xjl4G0c0XlP6a74=",
|
||||
"lastModified": 1727825735,
|
||||
"narHash": "sha256-0xHYkMkeLVQAMa7gvkddbPqpxph+hDzdu1XdGPJR+Os=",
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz"
|
||||
"url": "https://github.com/NixOS/nixpkgs/archive/fb192fec7cc7a4c26d51779e9bab07ce6fa5597a.tar.gz"
|
||||
},
|
||||
"original": {
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz"
|
||||
"url": "https://github.com/NixOS/nixpkgs/archive/fb192fec7cc7a4c26d51779e9bab07ce6fa5597a.tar.gz"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
|
||||
@@ -99,6 +99,9 @@ option(GGML_AVX512 "ggml: enable AVX512" OFF)
|
||||
option(GGML_AVX512_VBMI "ggml: enable AVX512-VBMI" OFF)
|
||||
option(GGML_AVX512_VNNI "ggml: enable AVX512-VNNI" OFF)
|
||||
option(GGML_AVX512_BF16 "ggml: enable AVX512-BF16" OFF)
|
||||
option(GGML_AMX_TILE "ggml: enable AMX-TILE" OFF)
|
||||
option(GGML_AMX_INT8 "ggml: enable AMX-INT8" OFF)
|
||||
option(GGML_AMX_BF16 "ggml: enable AMX-BF16" OFF)
|
||||
option(GGML_FMA "ggml: enable FMA" ${INS_ENB})
|
||||
if (NOT MSVC)
|
||||
option(GGML_F16C "ggml: enable F16C" ${INS_ENB}) # in MSVC F16C is implied with AVX2/AVX512
|
||||
@@ -158,6 +161,7 @@ set (GGML_METAL_MACOSX_VERSION_MIN "" CACHE STRING
|
||||
set (GGML_METAL_STD "" CACHE STRING "ggml: metal standard version (-std flag)")
|
||||
option(GGML_OPENMP "ggml: use OpenMP" ON)
|
||||
option(GGML_RPC "ggml: use RPC" OFF)
|
||||
option(GGML_AMX "ggml: use AMX" OFF)
|
||||
option(GGML_SYCL "ggml: use SYCL" OFF)
|
||||
option(GGML_SYCL_F16 "ggml: use 16 bit floats for sycl calculations" OFF)
|
||||
set (GGML_SYCL_TARGET "INTEL" CACHE STRING
|
||||
|
||||
@@ -24,7 +24,7 @@ GGML_API void ggml_tallocr_alloc(struct ggml_tallocr * talloc, st
|
||||
// Graph allocator
|
||||
/*
|
||||
Example usage:
|
||||
ggml_gallocr_t galloc = ggml_gallocr_new(ggml_bacckend_cpu_buffer_type());
|
||||
ggml_gallocr_t galloc = ggml_gallocr_new(ggml_backend_cpu_buffer_type());
|
||||
|
||||
// optional: create a worst-case graph and reserve the buffers to avoid reallocations
|
||||
ggml_gallocr_reserve(galloc, build_graph(max_batch));
|
||||
|
||||
25
ggml/include/ggml-amx.h
Normal file
25
ggml/include/ggml-amx.h
Normal file
@@ -0,0 +1,25 @@
|
||||
#pragma once
|
||||
|
||||
#include "ggml.h"
|
||||
#include "ggml-backend.h"
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// buffer_type API
|
||||
GGML_API ggml_backend_buffer_type_t ggml_backend_amx_buffer_type(void);
|
||||
|
||||
GGML_API bool ggml_backend_is_amx(ggml_backend_t backend);
|
||||
|
||||
// backend API
|
||||
GGML_API ggml_backend_t ggml_backend_amx_init(void);
|
||||
|
||||
GGML_API void ggml_backend_amx_set_n_threads(ggml_backend_t backend_amx, int n_threads);
|
||||
|
||||
GGML_API ggml_backend_reg_t ggml_backend_amx_reg(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@@ -127,6 +127,8 @@ extern "C" {
|
||||
bool async;
|
||||
// pinned host buffer
|
||||
bool host_buffer;
|
||||
// creating buffers from host ptr
|
||||
bool buffer_from_host_ptr;
|
||||
// event synchronization
|
||||
bool events;
|
||||
};
|
||||
@@ -168,6 +170,7 @@ extern "C" {
|
||||
|
||||
// Functions that may be obtained using ggml_backend_reg_get_proc_address
|
||||
typedef ggml_backend_buffer_type_t (*ggml_backend_split_buffer_type_t)(const float *);
|
||||
typedef void (*ggml_backend_set_n_threads_t)(ggml_backend_t, int);
|
||||
|
||||
//
|
||||
// Backend registry
|
||||
@@ -247,7 +250,7 @@ extern "C" {
|
||||
GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
|
||||
|
||||
// Initialize backend buffers from a measure graph
|
||||
GGML_API bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph);
|
||||
GGML_API bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph); // returns success
|
||||
|
||||
GGML_API int ggml_backend_sched_get_n_backends(ggml_backend_sched_t sched);
|
||||
GGML_API ggml_backend_t ggml_backend_sched_get_backend(ggml_backend_sched_t sched, int i);
|
||||
@@ -262,7 +265,7 @@ extern "C" {
|
||||
GGML_API ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node);
|
||||
|
||||
// Allocate and compute graph on the backend scheduler
|
||||
GGML_API bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
||||
GGML_API bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph); // returns success
|
||||
GGML_API enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
||||
GGML_API enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
||||
GGML_API void ggml_backend_sched_synchronize(ggml_backend_sched_t sched);
|
||||
|
||||
@@ -17,6 +17,8 @@ GGML_API bool ggml_backend_is_blas(ggml_backend_t backend);
|
||||
// for openblas and blis, this will also set the number of threads used for blas operations
|
||||
GGML_API void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads);
|
||||
|
||||
GGML_API ggml_backend_reg_t ggml_backend_blas_reg(void);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
@@ -34,6 +34,8 @@ extern "C" {
|
||||
*/
|
||||
#define GGML_CANN_MAX_DEVICES 16
|
||||
|
||||
GGML_API ggml_backend_reg_t ggml_backend_cann_reg(void);
|
||||
|
||||
/**
|
||||
* @brief Initializes the CANN backend for a specified device.
|
||||
*
|
||||
|
||||
@@ -43,7 +43,9 @@ GGML_API ggml_backend_t ggml_backend_metal_init(void);
|
||||
|
||||
GGML_API bool ggml_backend_is_metal(ggml_backend_t backend);
|
||||
|
||||
GGML_API ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size);
|
||||
GGML_DEPRECATED(
|
||||
GGML_API ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size),
|
||||
"obsoleted by the new device interface - https://github.com/ggerganov/llama.cpp/pull/9713");
|
||||
|
||||
GGML_API void ggml_backend_metal_set_abort_callback(ggml_backend_t backend, ggml_abort_callback abort_callback, void * user_data);
|
||||
|
||||
@@ -57,6 +59,8 @@ GGML_API bool ggml_backend_metal_supports_family(ggml_backend_t backend, int fam
|
||||
// capture all command buffers committed the next time `ggml_backend_graph_compute` is called
|
||||
GGML_API void ggml_backend_metal_capture_next_compute(ggml_backend_t backend);
|
||||
|
||||
GGML_API ggml_backend_reg_t ggml_backend_metal_reg(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -17,7 +17,11 @@ GGML_API ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const char * en
|
||||
|
||||
GGML_API void ggml_backend_rpc_get_device_memory(const char * endpoint, size_t * free, size_t * total);
|
||||
|
||||
GGML_API void start_rpc_server(ggml_backend_t backend, const char * endpoint, size_t free_mem, size_t total_mem);
|
||||
GGML_API void ggml_backend_rpc_start_server(ggml_backend_t backend, const char * endpoint, size_t free_mem, size_t total_mem);
|
||||
|
||||
GGML_API ggml_backend_reg_t ggml_backend_rpc_reg(void);
|
||||
|
||||
GGML_API ggml_backend_dev_t ggml_backend_rpc_add_device(const char * endpoint);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
@@ -19,6 +19,8 @@ extern "C" {
|
||||
// backend API
|
||||
GGML_API ggml_backend_t ggml_backend_sycl_init(int device);
|
||||
|
||||
GGML_API bool ggml_backend_is_sycl(ggml_backend_t backend);
|
||||
|
||||
// devide buffer
|
||||
GGML_API ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(int device);
|
||||
|
||||
@@ -29,14 +31,19 @@ GGML_API ggml_backend_buffer_type_t ggml_backend_sycl_split_buffer_type(const fl
|
||||
GGML_API ggml_backend_buffer_type_t ggml_backend_sycl_host_buffer_type(void);
|
||||
|
||||
GGML_API void ggml_backend_sycl_print_sycl_devices(void);
|
||||
GGML_API void ggml_sycl_get_gpu_list(int *id_list, int max_len);
|
||||
GGML_API void ggml_sycl_get_device_description(int device, char *description, size_t description_size);
|
||||
GGML_API void ggml_backend_sycl_get_gpu_list(int *id_list, int max_len);
|
||||
GGML_API void ggml_backend_sycl_get_device_description(int device,
|
||||
char *description,
|
||||
size_t description_size);
|
||||
GGML_API int ggml_backend_sycl_get_device_count();
|
||||
GGML_API void ggml_backend_sycl_get_device_memory(int device, size_t *free, size_t *total);
|
||||
|
||||
// SYCL doesn't support registering host memory, keep here for reference
|
||||
// GGML_API bool ggml_backend_sycl_register_host_buffer(void * buffer, size_t size);
|
||||
// GGML_API void ggml_backend_sycl_unregister_host_buffer(void * buffer);
|
||||
|
||||
GGML_API ggml_backend_reg_t ggml_backend_sycl_reg(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -24,6 +24,8 @@ GGML_API ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num);
|
||||
// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
|
||||
GGML_API ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type(void);
|
||||
|
||||
GGML_API ggml_backend_reg_t ggml_backend_vk_reg(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -456,6 +456,7 @@ extern "C" {
|
||||
GGML_OP_SUM_ROWS,
|
||||
GGML_OP_MEAN,
|
||||
GGML_OP_ARGMAX,
|
||||
GGML_OP_COUNT_EQUAL,
|
||||
GGML_OP_REPEAT,
|
||||
GGML_OP_REPEAT_BACK,
|
||||
GGML_OP_CONCAT,
|
||||
@@ -994,6 +995,12 @@ extern "C" {
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
// count number of equal elements in a and b
|
||||
GGML_API struct ggml_tensor * ggml_count_equal(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b);
|
||||
|
||||
// if a is the same shape as b, and a is not parameter, return a
|
||||
// otherwise, return a new tensor: repeat(a) to fit in b
|
||||
GGML_API struct ggml_tensor * ggml_repeat(
|
||||
@@ -2481,6 +2488,7 @@ extern "C" {
|
||||
GGML_API int ggml_cpu_has_avx512_vbmi(void);
|
||||
GGML_API int ggml_cpu_has_avx512_vnni(void);
|
||||
GGML_API int ggml_cpu_has_avx512_bf16(void);
|
||||
GGML_API int ggml_cpu_has_amx_int8 (void);
|
||||
GGML_API int ggml_cpu_has_fma (void);
|
||||
GGML_API int ggml_cpu_has_neon (void);
|
||||
GGML_API int ggml_cpu_has_sve (void);
|
||||
@@ -2528,7 +2536,7 @@ extern "C" {
|
||||
typedef void (*ggml_gemm_t) (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT x,
|
||||
const void * GGML_RESTRICT y, int nr, int nc);
|
||||
|
||||
typedef struct {
|
||||
struct ggml_type_traits {
|
||||
const char * type_name;
|
||||
int64_t blck_size;
|
||||
int64_t blck_size_interleave; // interleave elements in blocks
|
||||
@@ -2544,9 +2552,9 @@ extern "C" {
|
||||
int64_t ncols; // number of columns to process simultaneously
|
||||
ggml_gemv_t gemv;
|
||||
ggml_gemm_t gemm;
|
||||
} ggml_type_traits_t;
|
||||
};
|
||||
|
||||
GGML_API ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type);
|
||||
GGML_API const struct ggml_type_traits * ggml_get_type_traits(enum ggml_type type);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
@@ -163,8 +163,8 @@ if (GGML_OPENMP)
|
||||
list(APPEND GGML_EXTRA_LIBS_PRIVATE OpenMP::OpenMP_C OpenMP::OpenMP_CXX)
|
||||
|
||||
if (GGML_MUSA)
|
||||
list(APPEND GGML_EXTRA_INCLUDES "/usr/lib/llvm-10/include/openmp")
|
||||
list(APPEND GGML_EXTRA_LIBS_PRIVATE "/usr/lib/llvm-10/lib/libomp.so")
|
||||
list(APPEND GGML_EXTRA_INCLUDES "/usr/lib/llvm-14/lib/clang/14.0.0/include")
|
||||
list(APPEND GGML_EXTRA_LIBS_PRIVATE "/usr/lib/llvm-14/lib/libomp.so")
|
||||
endif()
|
||||
else()
|
||||
message(WARNING "OpenMP not found")
|
||||
@@ -190,22 +190,24 @@ if (GGML_BLAS)
|
||||
# see https://gitlab.kitware.com/cmake/cmake/-/issues/20268
|
||||
find_package(PkgConfig REQUIRED)
|
||||
if (${GGML_BLAS_VENDOR} MATCHES "Generic")
|
||||
pkg_check_modules(DepBLAS REQUIRED blas)
|
||||
pkg_check_modules(DepBLAS blas)
|
||||
elseif (${GGML_BLAS_VENDOR} MATCHES "OpenBLAS")
|
||||
# As of openblas v0.3.22, the 64-bit is named openblas64.pc
|
||||
pkg_check_modules(DepBLAS openblas64)
|
||||
if (NOT DepBLAS_FOUND)
|
||||
pkg_check_modules(DepBLAS REQUIRED openblas)
|
||||
pkg_check_modules(DepBLAS openblas)
|
||||
endif()
|
||||
elseif (${GGML_BLAS_VENDOR} MATCHES "FLAME")
|
||||
pkg_check_modules(DepBLAS REQUIRED blis)
|
||||
add_compile_definitions(GGML_BLAS_USE_BLIS)
|
||||
pkg_check_modules(DepBLAS blis)
|
||||
elseif (${GGML_BLAS_VENDOR} MATCHES "ATLAS")
|
||||
pkg_check_modules(DepBLAS REQUIRED blas-atlas)
|
||||
pkg_check_modules(DepBLAS blas-atlas)
|
||||
elseif (${GGML_BLAS_VENDOR} MATCHES "FlexiBLAS")
|
||||
pkg_check_modules(DepBLAS REQUIRED flexiblas_api)
|
||||
pkg_check_modules(DepBLAS flexiblas_api)
|
||||
elseif (${GGML_BLAS_VENDOR} MATCHES "Intel")
|
||||
add_compile_definitions(GGML_BLAS_USE_MKL)
|
||||
# all Intel* libraries share the same include path
|
||||
pkg_check_modules(DepBLAS REQUIRED mkl-sdl)
|
||||
pkg_check_modules(DepBLAS mkl-sdl)
|
||||
elseif (${GGML_BLAS_VENDOR} MATCHES "NVHPC")
|
||||
# this doesn't provide pkg-config
|
||||
# suggest to assign BLAS_INCLUDE_DIRS on your own
|
||||
@@ -265,6 +267,26 @@ if (GGML_LLAMAFILE)
|
||||
set(GGML_SOURCES_LLAMAFILE llamafile/sgemm.cpp)
|
||||
endif()
|
||||
|
||||
if (GGML_AMX)
|
||||
if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 11.0)
|
||||
else()
|
||||
set(GGML_AMX OFF)
|
||||
message(WARNING "AMX requires gcc version > 11.0. Turning off GGML_AMX.")
|
||||
endif()
|
||||
|
||||
if (GGML_AMX)
|
||||
message(STATUS "Using AMX")
|
||||
|
||||
list(APPEND GGML_CDEF_PUBLIC GGML_USE_AMX)
|
||||
|
||||
file(GLOB GGML_HEADERS_AMX "ggml-amx/*.h")
|
||||
list(APPEND GGML_HEADERS_AMX "../include/ggml-amx.h")
|
||||
|
||||
file(GLOB GGML_SOURCES_AMX "ggml-amx/*.cpp")
|
||||
list(APPEND GGML_SOURCES_AMX "ggml-amx.cpp")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (GGML_CUDA)
|
||||
cmake_minimum_required(VERSION 3.18) # for CMAKE_CUDA_ARCHITECTURES
|
||||
|
||||
@@ -1178,6 +1200,18 @@ elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LW
|
||||
add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512BF16__>)
|
||||
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512BF16__>)
|
||||
endif()
|
||||
if (GGML_AMX_TILE)
|
||||
add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AMX_TILE__>)
|
||||
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AMX_TILE__>)
|
||||
endif()
|
||||
if (GGML_AMX_INT8)
|
||||
add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AMX_INT8__>)
|
||||
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AMX_INT8__>)
|
||||
endif()
|
||||
if (GGML_AMX_BF16)
|
||||
add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AMX_BF16__>)
|
||||
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AMX_BF16__>)
|
||||
endif()
|
||||
elseif (GGML_AVX2)
|
||||
list(APPEND ARCH_FLAGS /arch:AVX2)
|
||||
elseif (GGML_AVX)
|
||||
@@ -1213,6 +1247,15 @@ elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LW
|
||||
if (GGML_AVX512_BF16)
|
||||
list(APPEND ARCH_FLAGS -mavx512bf16)
|
||||
endif()
|
||||
if (GGML_AMX_TILE)
|
||||
list(APPEND ARCH_FLAGS -mamx-tile)
|
||||
endif()
|
||||
if (GGML_AMX_INT8)
|
||||
list(APPEND ARCH_FLAGS -mamx-int8)
|
||||
endif()
|
||||
if (GGML_AMX_BF16)
|
||||
list(APPEND ARCH_FLAGS -mamx-bf16)
|
||||
endif()
|
||||
endif()
|
||||
elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64")
|
||||
message(STATUS "PowerPC detected")
|
||||
@@ -1338,6 +1381,7 @@ add_library(ggml
|
||||
${GGML_SOURCES_ROCM} ${GGML_HEADERS_ROCM}
|
||||
${GGML_SOURCES_BLAS} ${GGML_HEADERS_BLAS}
|
||||
${GGML_SOURCES_LLAMAFILE} ${GGML_HEADERS_LLAMAFILE}
|
||||
${GGML_SOURCES_AMX} ${GGML_HEADERS_AMX}
|
||||
${GGML_SOURCES_CANN} ${GGML_HEADERS_CANN}
|
||||
ggml-aarch64.c ggml-aarch64.h
|
||||
)
|
||||
@@ -1361,6 +1405,10 @@ if (MATH_LIBRARY)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (CMAKE_SYSTEM_NAME MATCHES "Android")
|
||||
list(APPEND GGML_EXTRA_LIBS_PRIVATE dl) # Must be linked explicitly
|
||||
endif()
|
||||
|
||||
list(REMOVE_DUPLICATES GGML_EXTRA_LIBS_PRIVATE)
|
||||
list(REMOVE_DUPLICATES GGML_EXTRA_LIBS_PUBLIC)
|
||||
target_link_libraries(ggml PRIVATE ${GGML_EXTRA_LIBS_PRIVATE} PUBLIC ${GGML_EXTRA_LIBS_PUBLIC})
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
//#define GGML_ALLOCATOR_DEBUG
|
||||
|
||||
//#define AT_PRINTF(...) fprintf(stderr, __VA_ARGS__)
|
||||
//#define AT_PRINTF(...) GGML_LOG_DEBUG(__VA_ARGS__)
|
||||
#define AT_PRINTF(...)
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ void ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_tensor * tenso
|
||||
size = GGML_PAD(size, talloc->alignment);
|
||||
|
||||
if (talloc->offset + size > ggml_backend_buffer_get_size(talloc->buffer)) {
|
||||
fprintf(stderr, "%s: not enough space in the buffer to allocate %s (needed %zu, available %zu)\n",
|
||||
GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %s (needed %zu, available %zu)\n",
|
||||
__func__, tensor->name, size, ggml_backend_buffer_get_size(talloc->buffer) - talloc->offset);
|
||||
GGML_ABORT("not enough space in the buffer");
|
||||
}
|
||||
@@ -172,7 +172,7 @@ static size_t ggml_dyn_tallocr_alloc(struct ggml_dyn_tallocr * alloc, size_t siz
|
||||
best_fit_block = alloc->n_free_blocks - 1;
|
||||
} else {
|
||||
// this should never happen
|
||||
fprintf(stderr, "%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n",
|
||||
GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n",
|
||||
__func__, size, max_avail);
|
||||
GGML_ABORT("not enough space in the buffer");
|
||||
}
|
||||
@@ -209,16 +209,16 @@ static size_t ggml_dyn_tallocr_alloc(struct ggml_dyn_tallocr * alloc, size_t siz
|
||||
}
|
||||
}
|
||||
}
|
||||
fprintf(stderr, "max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
|
||||
GGML_LOG_DEBUG("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
if (alloc->allocated_tensors[i].tensor) {
|
||||
fprintf(stderr, "%s [%zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name,
|
||||
GGML_LOG_DEBUG("%s [%zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name,
|
||||
alloc->allocated_tensors[i].offset,
|
||||
alloc->allocated_tensors[i].offset + ggml_nbytes(alloc->allocated_tensors[i].tensor),
|
||||
ggml_nbytes(alloc->allocated_tensors[i].tensor) / 1024.0 / 1024.0);
|
||||
}
|
||||
}
|
||||
fprintf(stderr, "\n");
|
||||
GGML_LOG_DEBUG("\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -348,7 +348,6 @@ struct tensor_alloc {
|
||||
};
|
||||
|
||||
struct leaf_alloc {
|
||||
int buffer_id;
|
||||
struct tensor_alloc leaf;
|
||||
};
|
||||
|
||||
@@ -740,7 +739,6 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
|
||||
for (int i = 0; i < graph->n_leafs; i++) {
|
||||
struct ggml_tensor * leaf = graph->leafs[i];
|
||||
struct hash_node * hn = ggml_gallocr_hash_get(galloc, leaf);
|
||||
galloc->leaf_allocs[i].buffer_id = hn->buffer_id;
|
||||
if (leaf->view_src || leaf->data) {
|
||||
galloc->leaf_allocs[i].leaf.buffer_id = -1;
|
||||
galloc->leaf_allocs[i].leaf.offset = SIZE_MAX;
|
||||
@@ -768,13 +766,13 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
|
||||
// even if there are no tensors allocated in this buffer, we still need to allocate it to initialize views
|
||||
if (new_size > cur_size || galloc->buffers[i] == NULL) {
|
||||
#ifndef NDEBUG
|
||||
fprintf(stderr, "%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
|
||||
GGML_LOG_DEBUG("%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
|
||||
#endif
|
||||
|
||||
ggml_backend_buffer_free(galloc->buffers[i]);
|
||||
galloc->buffers[i] = ggml_backend_buft_alloc_buffer(galloc->bufts[i], new_size);
|
||||
if (galloc->buffers[i] == NULL) {
|
||||
fprintf(stderr, "%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), new_size);
|
||||
GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), new_size);
|
||||
return false;
|
||||
}
|
||||
ggml_backend_buffer_set_usage(galloc->buffers[i], GGML_BACKEND_BUFFER_USAGE_COMPUTE);
|
||||
@@ -825,14 +823,14 @@ static bool ggml_gallocr_node_needs_realloc(ggml_gallocr_t galloc, struct ggml_t
|
||||
static bool ggml_gallocr_needs_realloc(ggml_gallocr_t galloc, struct ggml_cgraph * graph) {
|
||||
if (galloc->n_nodes != graph->n_nodes) {
|
||||
#ifndef NDEBUG
|
||||
fprintf(stderr, "%s: graph has different number of nodes\n", __func__);
|
||||
GGML_LOG_DEBUG("%s: graph has different number of nodes\n", __func__);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
if (galloc->n_leafs != graph->n_leafs) {
|
||||
#ifndef NDEBUG
|
||||
fprintf(stderr, "%s: graph has different number of leafs\n", __func__);
|
||||
GGML_LOG_DEBUG("%s: graph has different number of leafs\n", __func__);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
@@ -843,7 +841,7 @@ static bool ggml_gallocr_needs_realloc(ggml_gallocr_t galloc, struct ggml_cgraph
|
||||
|
||||
if (!ggml_gallocr_node_needs_realloc(galloc, node, &node_alloc->dst)) {
|
||||
#ifndef NDEBUG
|
||||
fprintf(stderr, "%s: node %s is not valid\n", __func__, node->name);
|
||||
GGML_LOG_DEBUG("%s: node %s is not valid\n", __func__, node->name);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
@@ -855,7 +853,7 @@ static bool ggml_gallocr_needs_realloc(ggml_gallocr_t galloc, struct ggml_cgraph
|
||||
}
|
||||
if (!ggml_gallocr_node_needs_realloc(galloc, src, &node_alloc->src[j])) {
|
||||
#ifndef NDEBUG
|
||||
fprintf(stderr, "%s: src %d (%s) of node %s is not valid\n", __func__, j, src->name, node->name);
|
||||
GGML_LOG_DEBUG("%s: src %d (%s) of node %s is not valid\n", __func__, j, src->name, node->name);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
@@ -869,14 +867,14 @@ bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph)
|
||||
if (ggml_gallocr_needs_realloc(galloc, graph)) {
|
||||
if (galloc->n_buffers == 1) {
|
||||
#ifndef NDEBUG
|
||||
fprintf(stderr, "%s: reallocating buffers automatically\n", __func__);
|
||||
GGML_LOG_DEBUG("%s: reallocating buffers automatically\n", __func__);
|
||||
#endif
|
||||
if (!ggml_gallocr_reserve(galloc, graph)) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
#ifndef NDEBUG
|
||||
fprintf(stderr, "%s: cannot reallocate multi buffer graph automatically, call reserve\n", __func__);
|
||||
GGML_LOG_DEBUG("%s: cannot reallocate multi buffer graph automatically, call reserve\n", __func__);
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
@@ -940,7 +938,7 @@ static bool alloc_tensor_range(struct ggml_context * ctx,
|
||||
ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size);
|
||||
if (buffer == NULL) {
|
||||
#ifndef NDEBUG
|
||||
fprintf(stderr, "%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(buft), size);
|
||||
GGML_LOG_DEBUG("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(buft), size);
|
||||
#endif
|
||||
for (size_t i = 0; i < *n_buffers; i++) {
|
||||
ggml_backend_buffer_free((*buffers)[i]);
|
||||
@@ -990,7 +988,7 @@ ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_conte
|
||||
}
|
||||
|
||||
if (this_size > max_size) {
|
||||
fprintf(stderr, "%s: tensor %s is too large to fit in a %s buffer (tensor size: %zu, max buffer size: %zu)\n",
|
||||
GGML_LOG_ERROR("%s: tensor %s is too large to fit in a %s buffer (tensor size: %zu, max buffer size: %zu)\n",
|
||||
__func__, t->name,
|
||||
ggml_backend_buft_name(buft),
|
||||
this_size, max_size);
|
||||
@@ -1022,7 +1020,7 @@ ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_conte
|
||||
|
||||
if (n_buffers == 0) {
|
||||
#ifndef NDEBUG
|
||||
fprintf(stderr, "%s: all tensors in the context are already allocated\n", __func__);
|
||||
GGML_LOG_DEBUG("%s: all tensors in the context are already allocated\n", __func__);
|
||||
#endif
|
||||
return NULL;
|
||||
}
|
||||
|
||||
453
ggml/src/ggml-amx.cpp
Normal file
453
ggml/src/ggml-amx.cpp
Normal file
@@ -0,0 +1,453 @@
|
||||
#include "ggml-amx.h"
|
||||
#include "ggml-amx/common.h"
|
||||
#include "ggml-amx/mmq.h"
|
||||
#include "ggml-backend-impl.h"
|
||||
#include "ggml-impl.h"
|
||||
|
||||
#if defined(__gnu_linux__)
|
||||
#include <sys/syscall.h>
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <memory>
|
||||
|
||||
#if defined(__AMX_INT8__)
|
||||
|
||||
// AMX buffer interface
|
||||
static const char * ggml_backend_amx_buffer_get_name(ggml_backend_buffer_t buffer) {
|
||||
return "AMX";
|
||||
|
||||
GGML_UNUSED(buffer);
|
||||
}
|
||||
|
||||
static void ggml_backend_amx_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
||||
free(buffer->context);
|
||||
}
|
||||
|
||||
static void * ggml_backend_amx_buffer_get_base(ggml_backend_buffer_t buffer) {
|
||||
return (void *)(buffer->context);
|
||||
}
|
||||
|
||||
static void ggml_backend_amx_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
|
||||
memset((char *)tensor->data + offset, value, size);
|
||||
|
||||
GGML_UNUSED(buffer);
|
||||
}
|
||||
|
||||
static void ggml_backend_amx_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
|
||||
if (qtype_has_amx_kernels(tensor->type)) {
|
||||
ggml_backend_amx_convert_weight(tensor, data, offset, size);
|
||||
} else {
|
||||
memcpy((char *)tensor->data + offset, data, size);
|
||||
}
|
||||
|
||||
GGML_UNUSED(buffer);
|
||||
}
|
||||
|
||||
static void ggml_backend_amx_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
|
||||
GGML_ASSERT(!qtype_has_amx_kernels(tensor->type));
|
||||
memcpy(data, (const char *)tensor->data + offset, size);
|
||||
|
||||
GGML_UNUSED(buffer);
|
||||
}
|
||||
|
||||
static bool ggml_backend_amx_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
|
||||
if (ggml_backend_buffer_is_host(src->buffer)) {
|
||||
if (qtype_has_amx_kernels(src->type)) {
|
||||
ggml_backend_amx_convert_weight(dst, src->data, 0, ggml_backend_amx_get_alloc_size(dst));
|
||||
} else {
|
||||
memcpy(dst->data, src->data, ggml_nbytes(src));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
||||
GGML_UNUSED(buffer);
|
||||
}
|
||||
|
||||
static void ggml_backend_amx_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
|
||||
memset(buffer->context, value, buffer->size);
|
||||
}
|
||||
|
||||
static ggml_backend_buffer_i ggml_backend_amx_buffer_interface = {
|
||||
/* .get_name = */ ggml_backend_amx_buffer_get_name,
|
||||
/* .free_buffer = */ ggml_backend_amx_buffer_free_buffer,
|
||||
/* .get_base = */ ggml_backend_amx_buffer_get_base,
|
||||
/* .init_tensor = */ NULL, // no initialization required
|
||||
/* .memset_tensor = */ ggml_backend_amx_buffer_memset_tensor,
|
||||
/* .set_tensor = */ ggml_backend_amx_buffer_set_tensor,
|
||||
/* .get_tensor = */ ggml_backend_amx_buffer_get_tensor,
|
||||
/* .cpy_tensor = */ ggml_backend_amx_buffer_cpy_tensor,
|
||||
/* .clear = */ ggml_backend_amx_buffer_clear,
|
||||
/* .reset = */ NULL,
|
||||
};
|
||||
|
||||
static const char * ggml_backend_amx_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
||||
return "AMX";
|
||||
|
||||
GGML_UNUSED(buft);
|
||||
}
|
||||
|
||||
static ggml_backend_buffer_t ggml_backend_amx_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
||||
void * data = aligned_alloc(TENSOR_ALIGNMENT, size);
|
||||
if (data == NULL) {
|
||||
fprintf(stderr, "%s: failed to allocate buffer of size %zu\n", __func__, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return ggml_backend_buffer_init(buft, ggml_backend_amx_buffer_interface, data, size);
|
||||
}
|
||||
|
||||
static size_t ggml_backend_amx_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
|
||||
return TENSOR_ALIGNMENT;
|
||||
|
||||
GGML_UNUSED(buft);
|
||||
}
|
||||
|
||||
static size_t ggml_backend_amx_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor* tensor) {
|
||||
return ggml_backend_amx_get_alloc_size(tensor);
|
||||
|
||||
GGML_UNUSED(buft);
|
||||
}
|
||||
|
||||
static bool ggml_backend_amx_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
|
||||
return false;
|
||||
|
||||
GGML_UNUSED(buft);
|
||||
}
|
||||
|
||||
ggml_backend_buffer_type_t ggml_backend_amx_buffer_type() {
|
||||
static struct ggml_backend_buffer_type ggml_backend_buffer_type_amx = {
|
||||
/* .iface = */ {
|
||||
/* .get_name = */ ggml_backend_amx_buffer_type_get_name,
|
||||
/* .alloc_buffer = */ ggml_backend_amx_buffer_type_alloc_buffer,
|
||||
/* .get_alignment = */ ggml_backend_amx_buffer_type_get_alignment,
|
||||
/* .get_max_size = */ NULL, // defaults to SIZE_MAX
|
||||
/* .get_alloc_size = */ ggml_backend_amx_buffer_type_get_alloc_size,
|
||||
/* .is_host = */ ggml_backend_amx_buffer_type_is_host,
|
||||
},
|
||||
/* .device = */ NULL,
|
||||
/* .context = */ NULL,
|
||||
};
|
||||
|
||||
return &ggml_backend_buffer_type_amx;
|
||||
}
|
||||
|
||||
// backend interface
|
||||
|
||||
static const char * ggml_backend_amx_name(ggml_backend_t backend) {
|
||||
return "AMX";
|
||||
|
||||
GGML_UNUSED(backend);
|
||||
}
|
||||
|
||||
static void ggml_backend_amx_free(ggml_backend_t backend) {
|
||||
ggml_backend_amx_context * ctx = (ggml_backend_amx_context *)backend->context;
|
||||
delete ctx;
|
||||
delete backend;
|
||||
}
|
||||
|
||||
static ggml_backend_buffer_type_t ggml_backend_amx_get_default_buffer_type(ggml_backend_t backend) {
|
||||
return ggml_backend_amx_buffer_type();
|
||||
|
||||
GGML_UNUSED(backend);
|
||||
}
|
||||
|
||||
static enum ggml_status ggml_backend_amx_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
|
||||
ggml_backend_amx_context * ctx = (ggml_backend_amx_context *)backend->context;
|
||||
|
||||
for (int i = 0; i < cgraph->n_nodes; i++) {
|
||||
struct ggml_tensor * node = cgraph->nodes[i];
|
||||
|
||||
switch (node->op) {
|
||||
case GGML_OP_MUL_MAT:
|
||||
ggml_backend_amx_mul_mat(ctx, node);
|
||||
break;
|
||||
|
||||
case GGML_OP_NONE:
|
||||
case GGML_OP_RESHAPE:
|
||||
case GGML_OP_VIEW:
|
||||
case GGML_OP_PERMUTE:
|
||||
case GGML_OP_TRANSPOSE:
|
||||
break;
|
||||
|
||||
default:
|
||||
fprintf(stderr, "%s: unsupported op %s\n", __func__, ggml_op_desc(node));
|
||||
GGML_ASSERT(false);
|
||||
}
|
||||
}
|
||||
|
||||
return GGML_STATUS_SUCCESS;
|
||||
|
||||
GGML_UNUSED(backend);
|
||||
}
|
||||
|
||||
static struct ggml_backend_i ggml_backend_amx_i = {
|
||||
/* .get_name = */ ggml_backend_amx_name,
|
||||
/* .free = */ ggml_backend_amx_free,
|
||||
/* .get_default_buffer_type = */ ggml_backend_amx_get_default_buffer_type,
|
||||
/* .set_tensor_async = */ NULL,
|
||||
/* .get_tensor_async = */ NULL,
|
||||
/* .cpy_tensor_async = */ NULL,
|
||||
/* .synchronize = */ NULL,
|
||||
/* .graph_plan_create = */ NULL,
|
||||
/* .graph_plan_free = */ NULL,
|
||||
/* .graph_plan_update = */ NULL,
|
||||
/* .graph_plan_compute = */ NULL,
|
||||
/* .graph_compute = */ ggml_backend_amx_graph_compute,
|
||||
/* .supports_op = */ NULL,
|
||||
/* .supports_buft = */ NULL,
|
||||
/* .offload_op = */ NULL,
|
||||
/* .event_record = */ NULL,
|
||||
/* .event_wait = */ NULL,
|
||||
};
|
||||
|
||||
static ggml_guid_t ggml_backend_amx_guid() {
|
||||
static ggml_guid guid = { 0x13, 0xb8, 0xa4, 0xc4, 0xba, 0xfe, 0x51, 0x67, 0x87, 0x44, 0x55, 0x15, 0xb2, 0x35, 0x62, 0x3e };
|
||||
return &guid;
|
||||
}
|
||||
|
||||
#define ARCH_GET_XCOMP_PERM 0x1022
|
||||
#define ARCH_REQ_XCOMP_PERM 0x1023
|
||||
#define XFEATURE_XTILECFG 17
|
||||
#define XFEATURE_XTILEDATA 18
|
||||
|
||||
static bool ggml_amx_init() {
|
||||
#if defined(__gnu_linux__)
|
||||
if (syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA)) {
|
||||
fprintf(stderr, "AMX is not ready to be used!\n");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
#elif defined(_WIN32)
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
ggml_backend_t ggml_backend_amx_init() {
|
||||
|
||||
// invoke a Linux system call to request access to AMX features
|
||||
ggml_amx_init();
|
||||
|
||||
// backend context
|
||||
ggml_backend_amx_context * ctx = new ggml_backend_amx_context;
|
||||
|
||||
// ggml amx backend
|
||||
ggml_backend_t backend = new ggml_backend {
|
||||
/* .guid = */ ggml_backend_amx_guid(),
|
||||
/* .interface = */ ggml_backend_amx_i,
|
||||
/* .device = */ ggml_backend_reg_dev_get(ggml_backend_amx_reg(), 0),
|
||||
/* .context = */ ctx,
|
||||
};
|
||||
|
||||
return backend;
|
||||
}
|
||||
|
||||
bool ggml_backend_is_amx(ggml_backend_t backend) {
|
||||
return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_amx_guid());
|
||||
}
|
||||
|
||||
void ggml_backend_amx_set_n_threads(ggml_backend_t backend_amx, int n_threads) {
|
||||
GGML_ASSERT(ggml_backend_is_amx(backend_amx));
|
||||
|
||||
ggml_backend_amx_context * ctx = (ggml_backend_amx_context *)backend_amx->context;
|
||||
ctx->n_threads = n_threads;
|
||||
}
|
||||
|
||||
// device interface
|
||||
|
||||
static const char * ggml_backend_amx_device_get_name(ggml_backend_dev_t dev) {
|
||||
return "AMX";
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
}
|
||||
|
||||
static const char * ggml_backend_amx_device_get_description(ggml_backend_dev_t dev) {
|
||||
return "Intel Advanced Matrix Extensions";
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
}
|
||||
|
||||
static void ggml_backend_amx_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
|
||||
// TODO
|
||||
*free = 0;
|
||||
*total = 0;
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
}
|
||||
|
||||
static enum ggml_backend_dev_type ggml_backend_amx_device_get_type(ggml_backend_dev_t dev) {
|
||||
return GGML_BACKEND_DEVICE_TYPE_CPU;
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
}
|
||||
|
||||
static void ggml_backend_amx_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) {
|
||||
props->name = ggml_backend_amx_device_get_name(dev);
|
||||
props->description = ggml_backend_amx_device_get_description(dev);
|
||||
props->type = ggml_backend_amx_device_get_type(dev);
|
||||
ggml_backend_amx_device_get_memory(dev, &props->memory_free, &props->memory_total);
|
||||
|
||||
// `buffer_from_host_ptr` is intended to be used in mmap, when memory layout unchanged
|
||||
props->caps = {
|
||||
/* .async = */ false,
|
||||
/* .host_buffer = */ false,
|
||||
/* .buffer_from_host_ptr = */ false,
|
||||
/* .events = */ false,
|
||||
};
|
||||
}
|
||||
|
||||
static ggml_backend_t ggml_backend_amx_device_init(ggml_backend_dev_t dev, const char * params) {
|
||||
return ggml_backend_amx_init();
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
GGML_UNUSED(params);
|
||||
}
|
||||
|
||||
static ggml_backend_buffer_type_t ggml_backend_amx_device_get_buffer_type(ggml_backend_dev_t dev) {
|
||||
return ggml_backend_amx_buffer_type();
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
}
|
||||
|
||||
static bool ggml_backend_amx_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) {
|
||||
|
||||
// handle only 2d gemm for now
|
||||
auto is_contiguous_2d = [](const struct ggml_tensor * t) {
|
||||
return ggml_is_contiguous(t) && t->ne[3] == 1 && t->ne[2] == 1;
|
||||
};
|
||||
|
||||
switch (op->op) {
|
||||
case GGML_OP_NONE:
|
||||
case GGML_OP_RESHAPE:
|
||||
case GGML_OP_VIEW:
|
||||
case GGML_OP_PERMUTE:
|
||||
case GGML_OP_TRANSPOSE:
|
||||
return true;
|
||||
|
||||
case GGML_OP_MUL_MAT: {
|
||||
const struct ggml_tensor * src0 = op->src[0];
|
||||
const struct ggml_tensor * src1 = op->src[1];
|
||||
|
||||
const enum ggml_type type = src0->type;
|
||||
const int64_t ne0 = op->ne[0];
|
||||
|
||||
bool is_training = src0->grad || src1->grad;
|
||||
|
||||
// amx kernels enables for Q4_0, Q4_1, Q8_0, F16
|
||||
// Q4_K, Q5_K, Q6_K, IQ4_XS enabled for QK_K = 256
|
||||
bool has_amx_kernels = qtype_has_amx_kernels(type) || (type == GGML_TYPE_F16);
|
||||
|
||||
bool can_use_amx =
|
||||
is_contiguous_2d(src0) && // src0 must be contiguous
|
||||
is_contiguous_2d(src1) && // src1 must be contiguous
|
||||
!is_training && // inference only
|
||||
src1->type == GGML_TYPE_F32 && // src1 must be float32
|
||||
has_amx_kernels && // with amx kernel impls
|
||||
ne0 % (TILE_N * 2) == 0; // out_features is 32x
|
||||
|
||||
return can_use_amx;
|
||||
}
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
}
|
||||
|
||||
static bool ggml_backend_amx_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
|
||||
return buft->iface.get_name == ggml_backend_amx_buffer_type_get_name;
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
}
|
||||
|
||||
static const struct ggml_backend_device_i ggml_backend_amx_device_i = {
|
||||
/* .get_name = */ ggml_backend_amx_device_get_name,
|
||||
/* .get_description = */ ggml_backend_amx_device_get_description,
|
||||
/* .get_memory = */ ggml_backend_amx_device_get_memory,
|
||||
/* .get_type = */ ggml_backend_amx_device_get_type,
|
||||
/* .get_props = */ ggml_backend_amx_device_get_props,
|
||||
/* .init_backend = */ ggml_backend_amx_device_init,
|
||||
/* .get_buffer_type = */ ggml_backend_amx_device_get_buffer_type,
|
||||
/* .get_host_buffer_type = */ NULL,
|
||||
/* .buffer_from_host_ptr = */ NULL,
|
||||
/* .supports_op = */ ggml_backend_amx_device_supports_op,
|
||||
/* .supports_buft = */ ggml_backend_amx_device_supports_buft,
|
||||
/* .offload_op = */ NULL,
|
||||
/* .event_new = */ NULL,
|
||||
/* .event_free = */ NULL,
|
||||
/* .event_synchronize = */ NULL,
|
||||
};
|
||||
|
||||
// backend reg interface
|
||||
|
||||
static const char * ggml_backend_amx_reg_get_name(ggml_backend_reg_t reg) {
|
||||
return "AMX";
|
||||
|
||||
GGML_UNUSED(reg);
|
||||
}
|
||||
|
||||
static size_t ggml_backend_amx_reg_get_device_count(ggml_backend_reg_t reg) {
|
||||
return 1;
|
||||
|
||||
GGML_UNUSED(reg);
|
||||
}
|
||||
|
||||
static ggml_backend_dev_t ggml_backend_amx_reg_get_device(ggml_backend_reg_t reg, size_t index) {
|
||||
GGML_ASSERT(index == 0);
|
||||
|
||||
static ggml_backend_device ggml_backend_amx_device = {
|
||||
/* .iface = */ ggml_backend_amx_device_i,
|
||||
/* .reg = */ reg,
|
||||
/* .context = */ nullptr,
|
||||
};
|
||||
|
||||
return &ggml_backend_amx_device;
|
||||
|
||||
GGML_UNUSED(reg);
|
||||
GGML_UNUSED(index);
|
||||
}
|
||||
|
||||
static void * ggml_backend_amx_get_proc_address(ggml_backend_reg_t reg, const char * name) {
|
||||
if (std::strcmp(name, "ggml_backend_set_n_threads") == 0) {
|
||||
return (void *)ggml_backend_amx_set_n_threads;
|
||||
}
|
||||
return NULL;
|
||||
|
||||
GGML_UNUSED(reg);
|
||||
GGML_UNUSED(name);
|
||||
}
|
||||
|
||||
static const struct ggml_backend_reg_i ggml_backend_amx_reg_i = {
|
||||
/* .get_name = */ ggml_backend_amx_reg_get_name,
|
||||
/* .get_device_count = */ ggml_backend_amx_reg_get_device_count,
|
||||
/* .get_device = */ ggml_backend_amx_reg_get_device,
|
||||
/* .get_proc_address = */ ggml_backend_amx_get_proc_address,
|
||||
};
|
||||
|
||||
ggml_backend_reg_t ggml_backend_amx_reg(void) {
|
||||
static struct ggml_backend_reg ggml_backend_amx_reg = {
|
||||
/* .iface = */ ggml_backend_amx_reg_i,
|
||||
/* .context = */ NULL,
|
||||
};
|
||||
|
||||
return &ggml_backend_amx_reg;
|
||||
}
|
||||
|
||||
#else // if defined(__AMX_INT8__)
|
||||
|
||||
ggml_backend_t ggml_backend_amx_init(void) {
|
||||
fprintf(stderr, "GGML is not compiled with AMX support!\n");
|
||||
return ggml_backend_t{};
|
||||
}
|
||||
|
||||
void ggml_backend_amx_set_n_threads(ggml_backend_t backend_amx, int n_threads) {
|
||||
fprintf(stderr, "GGML is not compiled with AMX support!\n");
|
||||
|
||||
GGML_UNUSED(backend_amx);
|
||||
GGML_UNUSED(n_threads);
|
||||
}
|
||||
|
||||
#endif
|
||||
93
ggml/src/ggml-amx/common.h
Normal file
93
ggml/src/ggml-amx/common.h
Normal file
@@ -0,0 +1,93 @@
|
||||
#pragma once
|
||||
|
||||
#include "ggml.h"
|
||||
#include "ggml-cpu-impl.h" // <immintrin.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <type_traits>
|
||||
|
||||
#if defined(_OPENMP)
|
||||
#include <omp.h>
|
||||
#endif
|
||||
|
||||
#define TILE_M 16
|
||||
#define TILE_N 16
|
||||
#define TILE_K 32
|
||||
#define VNNI_BLK 4
|
||||
|
||||
#define AMX_BLK_SIZE 32
|
||||
|
||||
#define TMM0 0
|
||||
#define TMM1 1
|
||||
#define TMM2 2
|
||||
#define TMM3 3
|
||||
#define TMM4 4
|
||||
#define TMM5 5
|
||||
#define TMM6 6
|
||||
#define TMM7 7
|
||||
|
||||
// parallel routines
|
||||
template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
|
||||
inline T div_up(T x, T y) { return (x + y - 1) / y; }
|
||||
|
||||
template <typename T>
|
||||
inline void balance211(T n, T nth, T ith, T& n_start, T& n_end) {
|
||||
#if 0
|
||||
// onednn partition pattern
|
||||
T& n_my = n_end;
|
||||
if (nth <= 1 || n == 0) {
|
||||
n_start = 0;
|
||||
n_my = n;
|
||||
} else {
|
||||
T n1 = div_up(n, nth);
|
||||
T n2 = n1 - 1;
|
||||
T T1 = n - n2 * nth;
|
||||
n_my = ith < T1 ? n1 : n2;
|
||||
n_start = ith <= T1 ? ith*n1 : T1 * n1 + (ith - T1) * n2;
|
||||
}
|
||||
n_end += n_start;
|
||||
#else
|
||||
// pytorch aten partition pattern
|
||||
T n_my = div_up(n, nth);
|
||||
n_start = ith * n_my;
|
||||
n_end = std::min(n_start + n_my, n);
|
||||
#endif
|
||||
}
|
||||
|
||||
template <typename func_t>
|
||||
inline void parallel_for(int nth, int n, const func_t& f) {
|
||||
#if defined(_OPENMP)
|
||||
#pragma omp parallel num_threads(nth)
|
||||
{
|
||||
//int nth = omp_get_num_threads();
|
||||
int ith = omp_get_thread_num();
|
||||
int tbegin, tend;
|
||||
balance211(n, nth, ith, tbegin, tend);
|
||||
f(tbegin, tend);
|
||||
}
|
||||
#else
|
||||
f(0, n);
|
||||
|
||||
GGML_UNUSED(nth);
|
||||
#endif
|
||||
}
|
||||
|
||||
// quantized types that have AMX support
|
||||
inline bool qtype_has_amx_kernels(const enum ggml_type type) {
|
||||
// TODO: fix padding for vnni format
|
||||
return (type == GGML_TYPE_Q4_0) ||
|
||||
(type == GGML_TYPE_Q4_1);
|
||||
//(type == GGML_TYPE_Q8_0) ||
|
||||
//(type == GGML_TYPE_Q4_K) ||
|
||||
//(type == GGML_TYPE_Q5_K) ||
|
||||
//(type == GGML_TYPE_Q6_K) ||
|
||||
//(type == GGML_TYPE_IQ4_XS);
|
||||
}
|
||||
|
||||
// ggml backend context
|
||||
struct ggml_backend_amx_context {
|
||||
int n_threads = GGML_DEFAULT_N_THREADS;
|
||||
std::unique_ptr<char[]> work_data;
|
||||
size_t work_size = 0;
|
||||
};
|
||||
2509
ggml/src/ggml-amx/mmq.cpp
Normal file
2509
ggml/src/ggml-amx/mmq.cpp
Normal file
File diff suppressed because it is too large
Load Diff
17
ggml/src/ggml-amx/mmq.h
Normal file
17
ggml/src/ggml-amx/mmq.h
Normal file
@@ -0,0 +1,17 @@
|
||||
#pragma once
|
||||
#include "common.h"
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
size_t ggml_backend_amx_get_alloc_size(const struct ggml_tensor * tensor);
|
||||
|
||||
void ggml_backend_amx_convert_weight(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
||||
|
||||
void ggml_backend_amx_mul_mat(ggml_backend_amx_context * ctx, struct ggml_tensor * dst);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@@ -88,6 +88,7 @@ extern "C" {
|
||||
|
||||
void (*free)(ggml_backend_t backend);
|
||||
|
||||
// Will be moved to the device interface
|
||||
// buffer allocation
|
||||
ggml_backend_buffer_type_t (*get_default_buffer_type)(ggml_backend_t backend);
|
||||
|
||||
@@ -112,17 +113,9 @@ extern "C" {
|
||||
|
||||
// IMPORTANT: these functions have been moved to the device interface and will be removed from the backend interface
|
||||
// new backends should implement the device interface instead
|
||||
|
||||
// These functions are being moved to the device interface
|
||||
// check if the backend can compute an operation
|
||||
bool (*supports_op) (ggml_backend_t backend, const struct ggml_tensor * op);
|
||||
|
||||
// check if the backend can use tensors allocated in a buffer type
|
||||
bool (*supports_buft)(ggml_backend_t backend, ggml_backend_buffer_type_t buft);
|
||||
|
||||
// check if the backend wants to run an operation, even if the weights are allocated in a CPU buffer
|
||||
// these should be expensive operations with large batch sizes that may benefit from running on this backend
|
||||
// even if the weight has to be copied from the CPU temporarily
|
||||
bool (*offload_op) (ggml_backend_t backend, const struct ggml_tensor * op);
|
||||
|
||||
// (optional) event synchronization
|
||||
@@ -184,9 +177,8 @@ extern "C" {
|
||||
// check if the backend can use tensors allocated in a buffer type
|
||||
bool (*supports_buft)(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft);
|
||||
|
||||
// check if the backend wants to run an operation, even if the weights are allocated in a CPU buffer
|
||||
// these should be expensive operations with large batch sizes that may benefit from running on this backend
|
||||
// even if the weight has to be copied from the CPU temporarily
|
||||
// (optional) check if the backend wants to run an operation, even if the weights are allocated in an incompatible buffer
|
||||
// these should be expensive operations that may benefit from running on this backend instead of the CPU backend
|
||||
bool (*offload_op)(ggml_backend_dev_t dev, const struct ggml_tensor * op);
|
||||
|
||||
// (optional) event synchronization
|
||||
|
||||
@@ -329,7 +329,6 @@ bool ggml_backend_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type
|
||||
if (backend->device) {
|
||||
return ggml_backend_dev_supports_buft(backend->device, buft);
|
||||
}
|
||||
|
||||
return backend->iface.supports_buft(backend, buft);
|
||||
}
|
||||
|
||||
@@ -379,7 +378,7 @@ void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst
|
||||
ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
|
||||
} else if (!ggml_backend_buffer_copy_tensor(src, dst)) {
|
||||
#ifndef NDEBUG
|
||||
fprintf(stderr, "%s: warning: slow copy from %s to %s\n", __func__, ggml_backend_buffer_name(src->buffer), ggml_backend_buffer_name(dst->buffer));
|
||||
GGML_LOG_DEBUG("%s: warning: slow copy from %s to %s\n", __func__, ggml_backend_buffer_name(src->buffer), ggml_backend_buffer_name(dst->buffer));
|
||||
#endif
|
||||
size_t nbytes = ggml_nbytes(src);
|
||||
void * data = malloc(nbytes);
|
||||
@@ -463,6 +462,7 @@ enum ggml_backend_dev_type ggml_backend_dev_type(ggml_backend_dev_t device) {
|
||||
}
|
||||
|
||||
void ggml_backend_dev_get_props(ggml_backend_dev_t device, struct ggml_backend_dev_props * props) {
|
||||
memset(props, 0, sizeof(*props));
|
||||
device->iface.get_props(device, props);
|
||||
}
|
||||
|
||||
@@ -479,6 +479,10 @@ ggml_backend_buffer_type_t ggml_backend_dev_buffer_type(ggml_backend_dev_t devic
|
||||
}
|
||||
|
||||
ggml_backend_buffer_type_t ggml_backend_dev_host_buffer_type(ggml_backend_dev_t device) {
|
||||
if (device->iface.get_host_buffer_type == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return device->iface.get_host_buffer_type(device);
|
||||
}
|
||||
|
||||
@@ -495,7 +499,11 @@ bool ggml_backend_dev_supports_buft(ggml_backend_dev_t device, ggml_backend_buff
|
||||
}
|
||||
|
||||
bool ggml_backend_dev_offload_op(ggml_backend_dev_t device, const struct ggml_tensor * op) {
|
||||
return device->iface.offload_op(device, op);
|
||||
if (device->iface.offload_op != NULL) {
|
||||
return device->iface.offload_op(device, op);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Backend (reg)
|
||||
@@ -525,6 +533,38 @@ void * ggml_backend_reg_get_proc_address(ggml_backend_reg_t reg, const char * na
|
||||
#include "ggml-cuda.h"
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
#include "ggml-metal.h"
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_SYCL
|
||||
#include "ggml-sycl.h"
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_VULKAN
|
||||
#include "ggml-vulkan.h"
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_BLAS
|
||||
#include "ggml-blas.h"
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_RPC
|
||||
#include "ggml-rpc.h"
|
||||
#endif
|
||||
|
||||
#ifndef __AMX_INT8__
|
||||
#undef GGML_USE_AMX
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_AMX
|
||||
# include "ggml-amx.h"
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_CANN
|
||||
#include "ggml-cann.h"
|
||||
#endif
|
||||
|
||||
struct ggml_backend_registry {
|
||||
std::vector<ggml_backend_reg_t> backends;
|
||||
std::vector<ggml_backend_dev_t> devices;
|
||||
@@ -533,15 +573,36 @@ struct ggml_backend_registry {
|
||||
#ifdef GGML_USE_CUDA
|
||||
register_backend(ggml_backend_cuda_reg());
|
||||
#endif
|
||||
#ifdef GGML_USE_METAL
|
||||
register_backend(ggml_backend_metal_reg());
|
||||
#endif
|
||||
#ifdef GGML_USE_SYCL
|
||||
register_backend(ggml_backend_sycl_reg());
|
||||
#endif
|
||||
#ifdef GGML_USE_VULKAN
|
||||
register_backend(ggml_backend_vk_reg());
|
||||
#endif
|
||||
#ifdef GGML_USE_BLAS
|
||||
register_backend(ggml_backend_blas_reg());
|
||||
#endif
|
||||
#ifdef GGML_USE_RPC
|
||||
register_backend(ggml_backend_rpc_reg());
|
||||
#endif
|
||||
#ifdef GGML_USE_AMX
|
||||
register_backend(ggml_backend_amx_reg());
|
||||
#endif
|
||||
#ifdef GGML_USE_CANN
|
||||
register_backend(ggml_backend_cann_reg());
|
||||
#endif
|
||||
|
||||
// TODO: kompute
|
||||
|
||||
register_backend(ggml_backend_cpu_reg());
|
||||
|
||||
// TODO: sycl, metal, vulkan, kompute, cann
|
||||
}
|
||||
|
||||
void register_backend(ggml_backend_reg_t reg) {
|
||||
#ifndef NDEBUG
|
||||
fprintf(stderr, "%s: registered backend %s (%zu devices)\n",
|
||||
GGML_LOG_DEBUG("%s: registered backend %s (%zu devices)\n",
|
||||
__func__, ggml_backend_reg_name(reg), ggml_backend_reg_dev_count(reg));
|
||||
#endif
|
||||
backends.push_back(reg);
|
||||
@@ -552,7 +613,7 @@ struct ggml_backend_registry {
|
||||
|
||||
void register_device(ggml_backend_dev_t device) {
|
||||
#ifndef NDEBUG
|
||||
fprintf(stderr, "%s: registered device %s (%s)\n", __func__, ggml_backend_dev_name(device), ggml_backend_dev_description(device));
|
||||
GGML_LOG_DEBUG("%s: registered device %s (%s)\n", __func__, ggml_backend_dev_name(device), ggml_backend_dev_description(device));
|
||||
#endif
|
||||
devices.push_back(device);
|
||||
}
|
||||
@@ -652,8 +713,6 @@ ggml_backend_t ggml_backend_init_best(void) {
|
||||
|
||||
// backend CPU
|
||||
|
||||
static const size_t TENSOR_ALIGNMENT = 32; // required for mmap as gguf only guarantees 32-byte alignment
|
||||
|
||||
static const char * ggml_backend_cpu_buffer_get_name(ggml_backend_buffer_t buffer) {
|
||||
return "CPU";
|
||||
|
||||
@@ -672,7 +731,7 @@ static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) {
|
||||
}
|
||||
|
||||
static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
||||
free(buffer->context);
|
||||
ggml_aligned_free(buffer->context, buffer->size);
|
||||
}
|
||||
|
||||
static void ggml_backend_cpu_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
|
||||
@@ -740,14 +799,19 @@ static const char * ggml_backend_cpu_buffer_type_get_name(ggml_backend_buffer_ty
|
||||
}
|
||||
|
||||
static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
||||
size += TENSOR_ALIGNMENT; // malloc may return an address that is not aligned
|
||||
void * data = malloc(size); // TODO: use GGML_ALIGNED_MALLOC (move to ggml-impl.h)
|
||||
auto alloc_size = size;
|
||||
if (alloc_size == 0) {
|
||||
alloc_size = 1;
|
||||
}
|
||||
|
||||
void * data = ggml_aligned_malloc(alloc_size);
|
||||
|
||||
if (data == NULL) {
|
||||
fprintf(stderr, "%s: failed to allocate buffer of size %zu\n", __func__, size);
|
||||
GGML_LOG_ERROR("%s: failed to allocate buffer of size %zu\n", __func__, alloc_size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return ggml_backend_buffer_init(buft, ggml_backend_cpu_buffer_i, data, size);
|
||||
return ggml_backend_buffer_init(buft, ggml_backend_cpu_buffer_i, data, alloc_size);
|
||||
}
|
||||
|
||||
static size_t ggml_backend_cpu_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
|
||||
@@ -806,7 +870,7 @@ static ggml_backend_buffer_t ggml_backend_cpu_hbm_buffer_type_alloc_buffer(ggml_
|
||||
void * ptr;
|
||||
int result = hbw_posix_memalign(&ptr, ggml_backend_cpu_buffer_type_get_alignment(buft), size);
|
||||
if (result != 0) {
|
||||
fprintf(stderr, "failed to allocate HBM buffer of size %zu\n", size);
|
||||
GGML_LOG_ERROR("failed to allocate HBM buffer of size %zu\n", size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -1118,9 +1182,10 @@ static void ggml_backend_cpu_device_get_props(ggml_backend_dev_t dev, struct ggm
|
||||
props->type = ggml_backend_cpu_device_get_type(dev);
|
||||
ggml_backend_cpu_device_get_memory(dev, &props->memory_free, &props->memory_total);
|
||||
props->caps = {
|
||||
/* async */ false,
|
||||
/* host_buffer */ false,
|
||||
/* events */ false,
|
||||
/* .async = */ false,
|
||||
/* .host_buffer = */ false,
|
||||
/* .buffer_from_host_ptr = */ true,
|
||||
/* .events = */ false,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1153,7 +1218,7 @@ static bool ggml_backend_cpu_device_supports_op(ggml_backend_dev_t dev, const st
|
||||
op->type != GGML_TYPE_IQ1_S &&
|
||||
op->type != GGML_TYPE_IQ1_M; // missing type_traits.from_float
|
||||
case GGML_OP_MUL_MAT:
|
||||
return op->src[1]->type == GGML_TYPE_F32 || op->src[1]->type == ggml_internal_get_type_traits(op->src[0]->type).vec_dot_type;
|
||||
return op->src[1]->type == GGML_TYPE_F32 || op->src[1]->type == ggml_get_type_traits(op->src[0]->type)->vec_dot_type;
|
||||
case GGML_OP_ROPE_BACK:
|
||||
return op->src[2] == NULL && (op->op_params[2] & 4) == 0;
|
||||
case GGML_OP_IM2COL_BACK:
|
||||
@@ -1216,16 +1281,22 @@ static ggml_backend_dev_t ggml_backend_cpu_reg_get_device(ggml_backend_reg_t reg
|
||||
};
|
||||
|
||||
return &ggml_backend_cpu_device;
|
||||
}
|
||||
|
||||
static void * ggml_backend_cpu_get_proc_address(ggml_backend_reg_t reg, const char * name) {
|
||||
if (strcmp(name, "ggml_backend_set_n_threads") == 0) {
|
||||
return (void *)ggml_backend_cpu_set_n_threads;
|
||||
}
|
||||
return NULL;
|
||||
|
||||
GGML_UNUSED(reg);
|
||||
GGML_UNUSED(index);
|
||||
}
|
||||
|
||||
static const struct ggml_backend_reg_i ggml_backend_cpu_reg_i = {
|
||||
/* .get_name = */ ggml_backend_cpu_reg_get_name,
|
||||
/* .get_device_count = */ ggml_backend_cpu_reg_get_device_count,
|
||||
/* .get_device = */ ggml_backend_cpu_reg_get_device,
|
||||
/* .get_proc_address = */ NULL,
|
||||
/* .get_proc_address = */ ggml_backend_cpu_get_proc_address,
|
||||
};
|
||||
|
||||
ggml_backend_reg_t ggml_backend_cpu_reg(void) {
|
||||
@@ -1422,7 +1493,7 @@ static int ggml_backend_sched_backend_from_buffer(ggml_backend_sched_t sched, co
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
fprintf(stderr, "%s: warning: no backend supports op %s with a weight with buffer type %s used in tensor %s, the weight will need to be copied\n",
|
||||
GGML_LOG_DEBUG("%s: warning: no backend supports op %s with a weight with buffer type %s used in tensor %s, the weight will need to be copied\n",
|
||||
__func__, ggml_op_desc(tensor), ggml_backend_buffer_name(buffer), tensor->name);
|
||||
#endif
|
||||
|
||||
@@ -1511,13 +1582,13 @@ static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, str
|
||||
for (int i = 0; i < graph->n_nodes; i++) {
|
||||
if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) {
|
||||
ggml_backend_t split_backend = sched->backends[sched->splits[cur_split].backend_id];
|
||||
fprintf(stderr, "\n## SPLIT #%d: %s # %d inputs: ", cur_split, ggml_backend_name(split_backend),
|
||||
GGML_LOG_DEBUG("\n## SPLIT #%d: %s # %d inputs: ", cur_split, ggml_backend_name(split_backend),
|
||||
sched->splits[cur_split].n_inputs);
|
||||
for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) {
|
||||
fprintf(stderr, "[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name,
|
||||
GGML_LOG_DEBUG("[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name,
|
||||
fmt_size(ggml_nbytes(sched->splits[cur_split].inputs[j])));
|
||||
}
|
||||
fprintf(stderr, "\n");
|
||||
GGML_LOG_DEBUG("\n");
|
||||
cur_split++;
|
||||
}
|
||||
struct ggml_tensor * node = graph->nodes[i];
|
||||
@@ -1525,7 +1596,7 @@ static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, str
|
||||
continue;
|
||||
}
|
||||
ggml_backend_t tensor_backend = ggml_backend_sched_get_tensor_backend(sched, node);
|
||||
fprintf(stderr, "node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s]:", i, ggml_op_name(node->op), node->name,
|
||||
GGML_LOG_DEBUG("node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s]:", i, ggml_op_name(node->op), node->name,
|
||||
fmt_size(ggml_nbytes(node)), tensor_backend ? ggml_backend_name(tensor_backend) : "NULL", GET_CAUSE(node));
|
||||
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
||||
struct ggml_tensor * src = node->src[j];
|
||||
@@ -1533,10 +1604,10 @@ static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, str
|
||||
continue;
|
||||
}
|
||||
ggml_backend_t src_backend = ggml_backend_sched_get_tensor_backend(sched, src);
|
||||
fprintf(stderr, " %20.20s (%5.5s) [%5.5s %8.8s]", src->name,
|
||||
GGML_LOG_DEBUG(" %20.20s (%5.5s) [%5.5s %8.8s]", src->name,
|
||||
fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", GET_CAUSE(src));
|
||||
}
|
||||
fprintf(stderr, "\n");
|
||||
GGML_LOG_DEBUG("\n");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2050,11 +2121,11 @@ static bool ggml_backend_sched_alloc_splits(ggml_backend_sched_t sched) {
|
||||
// the re-allocation may cause the split inputs to be moved to a different address
|
||||
ggml_backend_sched_synchronize(sched);
|
||||
#ifndef NDEBUG
|
||||
fprintf(stderr, "%s: failed to allocate graph, reserving (backend_ids_changed = %d)\n", __func__, backend_ids_changed);
|
||||
GGML_LOG_DEBUG("%s: failed to allocate graph, reserving (backend_ids_changed = %d)\n", __func__, backend_ids_changed);
|
||||
#endif
|
||||
ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids);
|
||||
if (!ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) {
|
||||
fprintf(stderr, "%s: failed to allocate graph\n", __func__);
|
||||
GGML_LOG_ERROR("%s: failed to allocate graph\n", __func__);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -2197,6 +2268,7 @@ ggml_backend_sched_t ggml_backend_sched_new(
|
||||
sched->backends[b] = backends[b];
|
||||
sched->bufts[b] = bufts ? bufts[b] : ggml_backend_get_default_buffer_type(backends[b]);
|
||||
GGML_ASSERT(ggml_backend_supports_buft(backends[b], sched->bufts[b]));
|
||||
|
||||
if (sched->n_copies > 1) {
|
||||
for (int c = 0; c < sched->n_copies; c++) {
|
||||
sched->events[b][c] = ggml_backend_event_new(backends[b]->device);
|
||||
@@ -2448,7 +2520,7 @@ struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, s
|
||||
struct ggml_context * ctx_unallocated = ggml_init(params);
|
||||
|
||||
if (ctx_allocated == NULL || ctx_unallocated == NULL) {
|
||||
fprintf(stderr, "failed to allocate context for graph copy\n");
|
||||
GGML_LOG_ERROR("%s: failed to allocate context for graph copy\n", __func__);
|
||||
ggml_hash_set_free(&hash_set);
|
||||
free(node_copies);
|
||||
free(node_init);
|
||||
@@ -2471,7 +2543,7 @@ struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, s
|
||||
// allocate nodes
|
||||
ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx_allocated, backend);
|
||||
if (buffer == NULL) {
|
||||
fprintf(stderr, "failed to allocate buffer for graph copy\n");
|
||||
GGML_LOG_ERROR("%s: failed to allocate buffer for graph copy\n", __func__);
|
||||
ggml_hash_set_free(&hash_set);
|
||||
free(node_copies);
|
||||
free(node_init);
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
#include <future>
|
||||
#include <vector>
|
||||
#include <cstring>
|
||||
|
||||
#if defined(GGML_USE_ACCELERATE)
|
||||
# include <Accelerate/Accelerate.h>
|
||||
@@ -26,30 +27,6 @@ struct ggml_backend_blas_context {
|
||||
#endif
|
||||
};
|
||||
|
||||
// helper function to determine if it is better to use BLAS or not
|
||||
// for large matrices, BLAS is faster
|
||||
static bool ggml_backend_blas_use_blas(const struct ggml_tensor * dst) {
|
||||
const struct ggml_tensor * src0 = dst->src[0];
|
||||
const struct ggml_tensor * src1 = dst->src[1];
|
||||
|
||||
const int64_t ne10 = src1->ne[0];
|
||||
|
||||
const int64_t ne0 = dst->ne[0];
|
||||
const int64_t ne1 = dst->ne[1];
|
||||
|
||||
// TODO: find the optimal values for these
|
||||
if (ggml_is_contiguous(src0) &&
|
||||
ggml_is_contiguous(src1) &&
|
||||
src1->type == GGML_TYPE_F32 &&
|
||||
(ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
|
||||
|
||||
/*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void ggml_backend_blas_mul_mat(ggml_backend_blas_context * ctx, struct ggml_tensor * dst) {
|
||||
const struct ggml_tensor * src0 = dst->src[0];
|
||||
const struct ggml_tensor * src1 = dst->src[1];
|
||||
@@ -88,8 +65,8 @@ static void ggml_backend_blas_mul_mat(ggml_backend_blas_context * ctx, struct gg
|
||||
|
||||
// convert src0 to float
|
||||
if (type != GGML_TYPE_F32) {
|
||||
ggml_type_traits_t type_traits = ggml_internal_get_type_traits(type);
|
||||
ggml_to_float_t const to_float = type_traits.to_float;
|
||||
const auto * type_traits = ggml_get_type_traits(type);
|
||||
ggml_to_float_t const to_float = type_traits->to_float;
|
||||
|
||||
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
||||
for (int64_t i02 = 0; i02 < ne02; i02++) {
|
||||
@@ -235,7 +212,7 @@ static void ggml_backend_blas_out_prod(ggml_backend_blas_context * ctx, struct g
|
||||
|
||||
// backend interface
|
||||
|
||||
static const char * ggml_backend_blas_name(ggml_backend_t backend) {
|
||||
static const char * ggml_backend_blas_get_name(ggml_backend_t backend) {
|
||||
return "BLAS";
|
||||
|
||||
GGML_UNUSED(backend);
|
||||
@@ -285,29 +262,8 @@ static enum ggml_status ggml_backend_blas_graph_compute(ggml_backend_t backend,
|
||||
GGML_UNUSED(backend);
|
||||
}
|
||||
|
||||
static bool ggml_backend_blas_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
|
||||
const struct ggml_tensor * src0 = op->src[0];
|
||||
const struct ggml_tensor * src1 = op->src[1];
|
||||
|
||||
return (op->op == GGML_OP_MUL_MAT && ggml_backend_blas_use_blas(op)) ||
|
||||
(op->op == GGML_OP_OUT_PROD && op->src[0]->type == GGML_TYPE_F32 &&
|
||||
op->src[1]->type == GGML_TYPE_F32 &&
|
||||
ggml_is_matrix(src0) &&
|
||||
ggml_is_matrix(src1) &&
|
||||
ggml_is_contiguous(src0) &&
|
||||
(ggml_is_contiguous(src1) || ggml_is_transposed(src1)));
|
||||
|
||||
GGML_UNUSED(backend);
|
||||
}
|
||||
|
||||
static bool ggml_backend_blas_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) {
|
||||
return ggml_backend_buft_is_host(buft);
|
||||
|
||||
GGML_UNUSED(backend);
|
||||
}
|
||||
|
||||
static struct ggml_backend_i blas_backend_i = {
|
||||
/* .get_name = */ ggml_backend_blas_name,
|
||||
/* .get_name = */ ggml_backend_blas_get_name,
|
||||
/* .free = */ ggml_backend_blas_free,
|
||||
/* .get_default_buffer_type = */ ggml_backend_blas_get_default_buffer_type,
|
||||
/* .set_tensor_async = */ NULL,
|
||||
@@ -319,8 +275,8 @@ static struct ggml_backend_i blas_backend_i = {
|
||||
/* .graph_plan_update = */ NULL,
|
||||
/* .graph_plan_compute = */ NULL,
|
||||
/* .graph_compute = */ ggml_backend_blas_graph_compute,
|
||||
/* .supports_op = */ ggml_backend_blas_supports_op,
|
||||
/* .supports_buft = */ ggml_backend_blas_supports_buft,
|
||||
/* .supports_op = */ NULL,
|
||||
/* .supports_buft = */ NULL,
|
||||
/* .offload_op = */ NULL,
|
||||
/* .event_record = */ NULL,
|
||||
/* .event_wait = */ NULL,
|
||||
@@ -337,18 +293,18 @@ ggml_backend_t ggml_backend_blas_init(void) {
|
||||
ggml_backend_t backend = new ggml_backend {
|
||||
/* .guid = */ ggml_backend_blas_guid(),
|
||||
/* .interface = */ blas_backend_i,
|
||||
/* .device = */ nullptr,
|
||||
/* .device = */ ggml_backend_reg_dev_get(ggml_backend_blas_reg(), 0),
|
||||
/* .context = */ ctx,
|
||||
};
|
||||
|
||||
#if !defined(NDEBUG) && defined(OPENBLAS_VERSION) && defined(GGML_USE_OPENMP)
|
||||
#if defined(OPENBLAS_VERSION) && defined(GGML_USE_OPENMP)
|
||||
if (openblas_get_parallel() != OPENBLAS_OPENMP) {
|
||||
fprintf(stderr, "%s: warning: ggml is using OpenMP, but OpenBLAS was compiled without OpenMP support\n", __func__);
|
||||
GGML_LOG_DEBUG("%s: warning: ggml is using OpenMP, but OpenBLAS was compiled without OpenMP support\n", __func__);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(NDEBUG) && defined(BLIS_ENABLE_CBLAS) && defined(GGML_USE_OPENMP) && !defined(BLIS_ENABLE_OPENMP)
|
||||
fprintf(stderr, "%s: warning: ggml is using OpenMP, but BLIS was compiled without OpenMP support\n", __func__);
|
||||
#if defined(BLIS_ENABLE_CBLAS) && defined(GGML_USE_OPENMP) && !defined(BLIS_ENABLE_OPENMP)
|
||||
GGML_LOG_DEBUG("%s: warning: ggml is using OpenMP, but BLIS was compiled without OpenMP support\n", __func__);
|
||||
#endif
|
||||
|
||||
return backend;
|
||||
@@ -364,3 +320,205 @@ void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads)
|
||||
ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend_blas->context;
|
||||
ctx->n_threads = n_threads;
|
||||
}
|
||||
|
||||
// device interface
|
||||
|
||||
static const char * ggml_backend_blas_device_get_name(ggml_backend_dev_t dev) {
|
||||
return "BLAS";
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
}
|
||||
|
||||
static const char * ggml_backend_blas_device_get_description(ggml_backend_dev_t dev) {
|
||||
#if defined(GGML_USE_ACCELERATE)
|
||||
return "Accelerate";
|
||||
#elif defined(GGML_BLAS_USE_MKL)
|
||||
return "MKL";
|
||||
#elif defined(GGML_BLAS_USE_BLIS)
|
||||
return "BLIS";
|
||||
#elif defined(GGML_BLAS_USE_NVPL)
|
||||
return "NVPL";
|
||||
#elif defined(OPENBLAS_VERSION)
|
||||
return "OpenBLAS";
|
||||
#else
|
||||
return "BLAS";
|
||||
#endif
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
}
|
||||
|
||||
static void ggml_backend_blas_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
|
||||
// TODO
|
||||
*free = 0;
|
||||
*total = 0;
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
}
|
||||
|
||||
static enum ggml_backend_dev_type ggml_backend_blas_device_get_type(ggml_backend_dev_t dev) {
|
||||
return GGML_BACKEND_DEVICE_TYPE_CPU;
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
}
|
||||
|
||||
static void ggml_backend_blas_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) {
|
||||
props->name = ggml_backend_blas_device_get_name(dev);
|
||||
props->description = ggml_backend_blas_device_get_description(dev);
|
||||
props->type = ggml_backend_blas_device_get_type(dev);
|
||||
ggml_backend_blas_device_get_memory(dev, &props->memory_free, &props->memory_total);
|
||||
props->caps = {
|
||||
/* .async = */ false,
|
||||
/* .host_buffer = */ false,
|
||||
/* .buffer_from_host_ptr = */ true,
|
||||
/* .events = */ false,
|
||||
};
|
||||
}
|
||||
|
||||
static ggml_backend_t ggml_backend_blas_device_init(ggml_backend_dev_t dev, const char * params) {
|
||||
return ggml_backend_blas_init();
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
GGML_UNUSED(params);
|
||||
}
|
||||
|
||||
static ggml_backend_buffer_type_t ggml_backend_blas_device_get_buffer_type(ggml_backend_dev_t dev) {
|
||||
return ggml_backend_cpu_buffer_type();
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
}
|
||||
|
||||
static ggml_backend_buffer_t ggml_backend_blas_device_buffer_from_ptr(ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size) {
|
||||
return ggml_backend_cpu_buffer_from_ptr(ptr, size);
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
GGML_UNUSED(max_tensor_size);
|
||||
}
|
||||
|
||||
static bool ggml_backend_blas_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) {
|
||||
const struct ggml_tensor * src0 = op->src[0];
|
||||
const struct ggml_tensor * src1 = op->src[1];
|
||||
|
||||
switch (op->op) {
|
||||
case GGML_OP_NONE:
|
||||
case GGML_OP_RESHAPE:
|
||||
case GGML_OP_VIEW:
|
||||
case GGML_OP_PERMUTE:
|
||||
case GGML_OP_TRANSPOSE:
|
||||
return true;
|
||||
|
||||
case GGML_OP_MUL_MAT:
|
||||
{
|
||||
// BLAS usually is only faster for large matrices
|
||||
const struct ggml_tensor * src0 = op->src[0];
|
||||
const struct ggml_tensor * src1 = op->src[1];
|
||||
|
||||
const int64_t ne10 = src1->ne[0];
|
||||
|
||||
const int64_t ne0 = op->ne[0];
|
||||
const int64_t ne1 = op->ne[1];
|
||||
|
||||
// TODO: find the optimal value
|
||||
const int64_t min_batch = 32;
|
||||
|
||||
return ggml_is_contiguous(src0) &&
|
||||
ggml_is_contiguous(src1) &&
|
||||
src1->type == GGML_TYPE_F32 &&
|
||||
(ne0 >= min_batch && ne1 >= min_batch && ne10 >= min_batch) &&
|
||||
(src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL);
|
||||
}
|
||||
|
||||
case GGML_OP_OUT_PROD:
|
||||
return op->src[0]->type == GGML_TYPE_F32 &&
|
||||
op->src[1]->type == GGML_TYPE_F32 &&
|
||||
ggml_is_matrix(src0) &&
|
||||
ggml_is_matrix(src1) &&
|
||||
ggml_is_contiguous(src0) &&
|
||||
(ggml_is_contiguous(src1) || ggml_is_transposed(src1)) &&
|
||||
(src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL);
|
||||
|
||||
default:
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
}
|
||||
|
||||
static bool ggml_backend_blas_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
|
||||
return ggml_backend_buft_is_host(buft);
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
}
|
||||
|
||||
static const struct ggml_backend_device_i ggml_backend_blas_device_i = {
|
||||
/* .get_name = */ ggml_backend_blas_device_get_name,
|
||||
/* .get_description = */ ggml_backend_blas_device_get_description,
|
||||
/* .get_memory = */ ggml_backend_blas_device_get_memory,
|
||||
/* .get_type = */ ggml_backend_blas_device_get_type,
|
||||
/* .get_props = */ ggml_backend_blas_device_get_props,
|
||||
/* .init_backend = */ ggml_backend_blas_device_init,
|
||||
/* .get_buffer_type = */ ggml_backend_blas_device_get_buffer_type,
|
||||
/* .get_host_buffer_type = */ NULL,
|
||||
/* .buffer_from_host_ptr = */ ggml_backend_blas_device_buffer_from_ptr,
|
||||
/* .supports_op = */ ggml_backend_blas_device_supports_op,
|
||||
/* .supports_buft = */ ggml_backend_blas_device_supports_buft,
|
||||
/* .offload_op = */ NULL,
|
||||
/* .event_new = */ NULL,
|
||||
/* .event_free = */ NULL,
|
||||
/* .event_synchronize = */ NULL,
|
||||
};
|
||||
|
||||
// backend reg interface
|
||||
|
||||
static const char * ggml_backend_blas_reg_get_name(ggml_backend_reg_t reg) {
|
||||
return "BLAS";
|
||||
|
||||
GGML_UNUSED(reg);
|
||||
}
|
||||
|
||||
static size_t ggml_backend_blas_reg_get_device_count(ggml_backend_reg_t reg) {
|
||||
return 1;
|
||||
|
||||
GGML_UNUSED(reg);
|
||||
}
|
||||
|
||||
static ggml_backend_dev_t ggml_backend_blas_reg_get_device(ggml_backend_reg_t reg, size_t index) {
|
||||
GGML_ASSERT(index == 0);
|
||||
|
||||
static ggml_backend_device ggml_backend_blas_device = {
|
||||
/* .iface = */ ggml_backend_blas_device_i,
|
||||
/* .reg = */ reg,
|
||||
/* .context = */ nullptr,
|
||||
};
|
||||
|
||||
return &ggml_backend_blas_device;
|
||||
|
||||
GGML_UNUSED(reg);
|
||||
GGML_UNUSED(index);
|
||||
}
|
||||
|
||||
static void * ggml_backend_blas_get_proc_address(ggml_backend_reg_t reg, const char * name) {
|
||||
if (std::strcmp(name, "ggml_backend_set_n_threads") == 0) {
|
||||
return (void *)ggml_backend_blas_set_n_threads;
|
||||
}
|
||||
return NULL;
|
||||
|
||||
GGML_UNUSED(reg);
|
||||
GGML_UNUSED(name);
|
||||
}
|
||||
|
||||
static const struct ggml_backend_reg_i ggml_backend_blas_reg_i = {
|
||||
/* .get_name = */ ggml_backend_blas_reg_get_name,
|
||||
/* .get_device_count = */ ggml_backend_blas_reg_get_device_count,
|
||||
/* .get_device = */ ggml_backend_blas_reg_get_device,
|
||||
/* .get_proc_address = */ ggml_backend_blas_get_proc_address,
|
||||
};
|
||||
|
||||
ggml_backend_reg_t ggml_backend_blas_reg(void) {
|
||||
static struct ggml_backend_reg ggml_backend_blas_reg = {
|
||||
/* .iface = */ ggml_backend_blas_reg_i,
|
||||
/* .context = */ NULL,
|
||||
};
|
||||
|
||||
return &ggml_backend_blas_reg;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user