mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2026-04-23 16:37:33 +03:00
Compare commits
83 Commits
b4660
...
gg/specula
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0f2bf55502 | ||
|
|
965ad1c08a | ||
|
|
9626d9351a | ||
|
|
b58934c183 | ||
|
|
63e489c025 | ||
|
|
63ac128563 | ||
|
|
5137da7b8c | ||
|
|
09aaf4f1f5 | ||
|
|
73e2ed3ce3 | ||
|
|
f7b1116af1 | ||
|
|
c4d29baf32 | ||
|
|
2eea03d86a | ||
|
|
0f2bbe6564 | ||
|
|
fe163d5bf3 | ||
|
|
818a340ea8 | ||
|
|
bf42a23d0a | ||
|
|
c2ea16f260 | ||
|
|
6dde178248 | ||
|
|
fc10c38ded | ||
|
|
22885105a6 | ||
|
|
c2cd24fbfd | ||
|
|
68ff663a04 | ||
|
|
f355229692 | ||
|
|
fc1b0d0936 | ||
|
|
89daa2564f | ||
|
|
300907b211 | ||
|
|
94b87f87b5 | ||
|
|
dbc2ec59b5 | ||
|
|
3d68f034da | ||
|
|
38e32eb6a0 | ||
|
|
a4f011e8d0 | ||
|
|
a7b8ce2260 | ||
|
|
04045bb842 | ||
|
|
8a8c4ceb60 | ||
|
|
c1f958c038 | ||
|
|
c48f630d1c | ||
|
|
bd6e55bfd3 | ||
|
|
c7f460ab88 | ||
|
|
27e8a23300 | ||
|
|
e4376270d9 | ||
|
|
3e69319772 | ||
|
|
a394039db0 | ||
|
|
be3bbd6215 | ||
|
|
31afcbee0e | ||
|
|
5c4284d57b | ||
|
|
bfd11a2344 | ||
|
|
0fb77f821f | ||
|
|
e598697d63 | ||
|
|
fef0cbeadf | ||
|
|
748ee9fe93 | ||
|
|
198b1ec611 | ||
|
|
c3d6af7cd2 | ||
|
|
369be5598a | ||
|
|
4078c77f98 | ||
|
|
90e4dba461 | ||
|
|
a18f481f99 | ||
|
|
b9ab0a4d0b | ||
|
|
7b891bdc86 | ||
|
|
81732619fd | ||
|
|
507f9174fe | ||
|
|
19b392d58d | ||
|
|
0893e0114e | ||
|
|
d7b31a9d84 | ||
|
|
9ac3457b39 | ||
|
|
c2a67efe38 | ||
|
|
b044a0fe3c | ||
|
|
19d3c8293b | ||
|
|
98f6b0fd1e | ||
|
|
55ac8c7791 | ||
|
|
e6e6583199 | ||
|
|
aaa5505307 | ||
|
|
bdcf8b6a56 | ||
|
|
4d3465c5ae | ||
|
|
d80be897ac | ||
|
|
3ab410f55f | ||
|
|
0cf867160c | ||
|
|
d2fe216fb2 | ||
|
|
ed926d8833 | ||
|
|
2d219b389e | ||
|
|
333820d749 | ||
|
|
c026ba3c23 | ||
|
|
7ee953a64a | ||
|
|
ec3bc8270b |
@@ -1,6 +1,6 @@
|
||||
ARG UBUNTU_VERSION=22.04
|
||||
# This needs to generally match the container host's environment.
|
||||
ARG CUDA_VERSION=12.6.0
|
||||
ARG CUDA_VERSION=12.4.0
|
||||
# Target the CUDA build image
|
||||
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
|
||||
|
||||
|
||||
@@ -17,10 +17,10 @@ Version: %( date "+%%Y%%m%%d" )
|
||||
Release: 1%{?dist}
|
||||
Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
|
||||
License: MIT
|
||||
Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz
|
||||
Source0: https://github.com/ggml-org/llama.cpp/archive/refs/heads/master.tar.gz
|
||||
BuildRequires: coreutils make gcc-c++ git cuda-toolkit
|
||||
Requires: cuda-toolkit
|
||||
URL: https://github.com/ggerganov/llama.cpp
|
||||
URL: https://github.com/ggml-org/llama.cpp
|
||||
|
||||
%define debug_package %{nil}
|
||||
%define source_date_epoch_from_changelog 0
|
||||
|
||||
@@ -18,10 +18,10 @@ Version: %( date "+%%Y%%m%%d" )
|
||||
Release: 1%{?dist}
|
||||
Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
|
||||
License: MIT
|
||||
Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz
|
||||
Source0: https://github.com/ggml-org/llama.cpp/archive/refs/heads/master.tar.gz
|
||||
BuildRequires: coreutils make gcc-c++ git libstdc++-devel
|
||||
Requires: libstdc++
|
||||
URL: https://github.com/ggerganov/llama.cpp
|
||||
URL: https://github.com/ggml-org/llama.cpp
|
||||
|
||||
%define debug_package %{nil}
|
||||
%define source_date_epoch_from_changelog 0
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
ARG UBUNTU_VERSION=22.04
|
||||
# This needs to generally match the container host's environment.
|
||||
ARG MUSA_VERSION=rc3.1.0
|
||||
ARG MUSA_VERSION=rc3.1.1
|
||||
# Target the MUSA build image
|
||||
ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
|
||||
|
||||
|
||||
@@ -133,12 +133,12 @@ effectiveStdenv.mkDerivation (finalAttrs: {
|
||||
--replace '[bundle pathForResource:@"default" ofType:@"metallib"];' "@\"$out/bin/default.metallib\";"
|
||||
'';
|
||||
|
||||
# With PR#6015 https://github.com/ggerganov/llama.cpp/pull/6015,
|
||||
# With PR#6015 https://github.com/ggml-org/llama.cpp/pull/6015,
|
||||
# `default.metallib` may be compiled with Metal compiler from XCode
|
||||
# and we need to escape sandbox on MacOS to access Metal compiler.
|
||||
# `xcrun` is used find the path of the Metal compiler, which is varible
|
||||
# and not on $PATH
|
||||
# see https://github.com/ggerganov/llama.cpp/pull/6118 for discussion
|
||||
# see https://github.com/ggml-org/llama.cpp/pull/6118 for discussion
|
||||
__noChroot = effectiveStdenv.isDarwin && useMetalKit && precompileMetalShaders;
|
||||
|
||||
nativeBuildInputs =
|
||||
@@ -220,7 +220,7 @@ effectiveStdenv.mkDerivation (finalAttrs: {
|
||||
broken = (useMetalKit && !effectiveStdenv.isDarwin);
|
||||
|
||||
description = "Inference of LLaMA model in pure C/C++${descriptionSuffix}";
|
||||
homepage = "https://github.com/ggerganov/llama.cpp/";
|
||||
homepage = "https://github.com/ggml-org/llama.cpp/";
|
||||
license = lib.licenses.mit;
|
||||
|
||||
# Accommodates `nix run` and `lib.getExe`
|
||||
|
||||
@@ -11,7 +11,7 @@ ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-co
|
||||
FROM ${BASE_ROCM_DEV_CONTAINER} AS build
|
||||
|
||||
# Unless otherwise specified, we make a fat build.
|
||||
# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878
|
||||
# List from https://github.com/ggml-org/llama.cpp/pull/1087#issuecomment-1682807878
|
||||
# This is mostly tied to rocBLAS supported archs.
|
||||
# gfx803, gfx900, gfx1032, gfx1101, gfx1102,not officialy supported
|
||||
# gfx906 is deprecated
|
||||
|
||||
6
.github/ISSUE_TEMPLATE/020-enhancement.yml
vendored
6
.github/ISSUE_TEMPLATE/020-enhancement.yml
vendored
@@ -6,7 +6,7 @@ body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
[Please post your idea first in Discussion if there is not yet a consensus for this enhancement request. This will help to keep this issue tracker focused on enhancements that the community has agreed needs to be implemented.](https://github.com/ggerganov/llama.cpp/discussions/categories/ideas)
|
||||
[Please post your idea first in Discussion if there is not yet a consensus for this enhancement request. This will help to keep this issue tracker focused on enhancements that the community has agreed needs to be implemented.](https://github.com/ggml-org/llama.cpp/discussions/categories/ideas)
|
||||
|
||||
- type: checkboxes
|
||||
id: prerequisites
|
||||
@@ -16,11 +16,11 @@ body:
|
||||
options:
|
||||
- label: I am running the latest code. Mention the version if possible as well.
|
||||
required: true
|
||||
- label: I carefully followed the [README.md](https://github.com/ggerganov/llama.cpp/blob/master/README.md).
|
||||
- label: I carefully followed the [README.md](https://github.com/ggml-org/llama.cpp/blob/master/README.md).
|
||||
required: true
|
||||
- label: I searched using keywords relevant to my issue to make sure that I am creating a new issue that is not already open (or closed).
|
||||
required: true
|
||||
- label: I reviewed the [Discussions](https://github.com/ggerganov/llama.cpp/discussions), and have a new and useful enhancement to share.
|
||||
- label: I reviewed the [Discussions](https://github.com/ggml-org/llama.cpp/discussions), and have a new and useful enhancement to share.
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/030-research.yml
vendored
2
.github/ISSUE_TEMPLATE/030-research.yml
vendored
@@ -6,7 +6,7 @@ body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Don't forget to check for any [duplicate research issue tickets](https://github.com/ggerganov/llama.cpp/issues?q=is%3Aopen+is%3Aissue+label%3A%22research+%F0%9F%94%AC%22)
|
||||
Don't forget to check for any [duplicate research issue tickets](https://github.com/ggml-org/llama.cpp/issues?q=is%3Aopen+is%3Aissue+label%3A%22research+%F0%9F%94%AC%22)
|
||||
|
||||
- type: checkboxes
|
||||
id: research-stage
|
||||
|
||||
4
.github/ISSUE_TEMPLATE/040-refactor.yml
vendored
4
.github/ISSUE_TEMPLATE/040-refactor.yml
vendored
@@ -6,8 +6,8 @@ body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Don't forget to [check for existing refactor issue tickets](https://github.com/ggerganov/llama.cpp/issues?q=is%3Aopen+is%3Aissue+label%3Arefactoring) in case it's already covered.
|
||||
Also you may want to check [Pull request refactor label as well](https://github.com/ggerganov/llama.cpp/pulls?q=is%3Aopen+is%3Apr+label%3Arefactoring) for duplicates too.
|
||||
Don't forget to [check for existing refactor issue tickets](https://github.com/ggml-org/llama.cpp/issues?q=is%3Aopen+is%3Aissue+label%3Arefactoring) in case it's already covered.
|
||||
Also you may want to check [Pull request refactor label as well](https://github.com/ggml-org/llama.cpp/pulls?q=is%3Aopen+is%3Apr+label%3Arefactoring) for duplicates too.
|
||||
|
||||
- type: textarea
|
||||
id: background-description
|
||||
|
||||
6
.github/ISSUE_TEMPLATE/config.yml
vendored
6
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,11 +1,11 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: Got an idea?
|
||||
url: https://github.com/ggerganov/llama.cpp/discussions/categories/ideas
|
||||
url: https://github.com/ggml-org/llama.cpp/discussions/categories/ideas
|
||||
about: Pop it there. It may then become an enhancement ticket.
|
||||
- name: Got a question?
|
||||
url: https://github.com/ggerganov/llama.cpp/discussions/categories/q-a
|
||||
url: https://github.com/ggml-org/llama.cpp/discussions/categories/q-a
|
||||
about: Ask a question there!
|
||||
- name: Want to contribute?
|
||||
url: https://github.com/ggerganov/llama.cpp/wiki/contribute
|
||||
url: https://github.com/ggml-org/llama.cpp/wiki/contribute
|
||||
about: Head to the contribution guide page of the wiki for areas you can help with
|
||||
|
||||
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -1 +1 @@
|
||||
*Make sure to read the [contributing guidelines](https://github.com/ggerganov/llama.cpp/blob/master/CONTRIBUTING.md) before submitting a PR*
|
||||
*Make sure to read the [contributing guidelines](https://github.com/ggml-org/llama.cpp/blob/master/CONTRIBUTING.md) before submitting a PR*
|
||||
|
||||
12
.github/workflows/bench.yml.disabled
vendored
12
.github/workflows/bench.yml.disabled
vendored
@@ -1,5 +1,5 @@
|
||||
# TODO: there have been some issues with the workflow, so disabling for now
|
||||
# https://github.com/ggerganov/llama.cpp/issues/7893
|
||||
# https://github.com/ggml-org/llama.cpp/issues/7893
|
||||
#
|
||||
# Benchmark
|
||||
name: Benchmark
|
||||
@@ -57,17 +57,7 @@ jobs:
|
||||
|
||||
if: |
|
||||
inputs.gpu-series == 'Standard_NC4as_T4_v3'
|
||||
|| (
|
||||
github.event_name == 'schedule'
|
||||
&& github.ref_name == 'master'
|
||||
&& github.repository_owner == 'ggerganov'
|
||||
)
|
||||
|| github.event_name == 'pull_request_target'
|
||||
|| (
|
||||
github.event_name == 'push'
|
||||
&& github.event.ref == 'refs/heads/master'
|
||||
&& github.repository_owner == 'ggerganov'
|
||||
)
|
||||
steps:
|
||||
- name: Clone
|
||||
id: checkout
|
||||
|
||||
38
.github/workflows/build.yml
vendored
38
.github/workflows/build.yml
vendored
@@ -129,7 +129,7 @@ jobs:
|
||||
run: |
|
||||
sysctl -a
|
||||
# Metal is disabled due to intermittent failures with Github runners not having a GPU:
|
||||
# https://github.com/ggerganov/llama.cpp/actions/runs/8635935781/job/23674807267#step:5:2313
|
||||
# https://github.com/ggml-org/llama.cpp/actions/runs/8635935781/job/23674807267#step:5:2313
|
||||
cmake -B build \
|
||||
-DCMAKE_BUILD_RPATH="@loader_path" \
|
||||
-DLLAMA_FATAL_WARNINGS=ON \
|
||||
@@ -374,6 +374,8 @@ jobs:
|
||||
- name: Clone
|
||||
id: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: ccache
|
||||
uses: hendrikmuhs/ccache-action@v1.2.16
|
||||
@@ -401,7 +403,35 @@ jobs:
|
||||
run: |
|
||||
cd build
|
||||
# This is using llvmpipe and runs slower than other backends
|
||||
ctest -L main --verbose --timeout 1800
|
||||
ctest -L main --verbose --timeout 2700
|
||||
|
||||
- name: Determine tag name
|
||||
id: tag
|
||||
shell: bash
|
||||
run: |
|
||||
BUILD_NUMBER="$(git rev-list --count HEAD)"
|
||||
SHORT_HASH="$(git rev-parse --short=7 HEAD)"
|
||||
if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then
|
||||
echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-')
|
||||
echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Pack artifacts
|
||||
id: pack_artifacts
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
run: |
|
||||
cp LICENSE ./build/bin/
|
||||
cp examples/run/linenoise.cpp/LICENSE ./build/bin/LICENSE.linenoise.cpp
|
||||
zip -r llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.zip ./build/bin/*
|
||||
|
||||
- name: Upload artifacts
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.zip
|
||||
name: llama-bin-ubuntu-vulkan-x64.zip
|
||||
|
||||
ubuntu-22-cmake-hip:
|
||||
runs-on: ubuntu-22.04
|
||||
@@ -443,7 +473,7 @@ jobs:
|
||||
|
||||
ubuntu-22-cmake-musa:
|
||||
runs-on: ubuntu-22.04
|
||||
container: mthreads/musa:rc3.1.0-devel-ubuntu22.04
|
||||
container: mthreads/musa:rc3.1.1-devel-ubuntu22.04
|
||||
|
||||
steps:
|
||||
- name: Clone
|
||||
@@ -1345,8 +1375,10 @@ jobs:
|
||||
|
||||
needs:
|
||||
- ubuntu-cpu-cmake
|
||||
- ubuntu-22-cmake-vulkan
|
||||
- windows-latest-cmake
|
||||
- windows-2019-cmake-cuda
|
||||
- windows-latest-cmake-sycl
|
||||
- windows-latest-cmake-hip-release
|
||||
- macOS-latest-cmake-arm64
|
||||
- macOS-latest-cmake-x64
|
||||
|
||||
2
.github/workflows/docker.yml
vendored
2
.github/workflows/docker.yml
vendored
@@ -51,6 +51,8 @@ jobs:
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
2
.github/workflows/labeler.yml
vendored
2
.github/workflows/labeler.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: "ggerganov/llama.cpp"
|
||||
repository: "ggml-org/llama.cpp"
|
||||
- uses: actions/labeler@v5
|
||||
with:
|
||||
configuration-path: '.github/labeler.yml'
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -98,6 +98,7 @@ examples/server/*.css.hpp
|
||||
examples/server/*.html.hpp
|
||||
examples/server/*.js.hpp
|
||||
examples/server/*.mjs.hpp
|
||||
examples/server/*.gz.hpp
|
||||
!build_64.sh
|
||||
!examples/*.bat
|
||||
!examples/*/*.kts
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
- Squash-merge PRs
|
||||
- Use the following format for the squashed commit title: `<module> : <commit title> (#<issue_number>)`. For example: `utils : fix typo in utils.py (#1234)`
|
||||
- Optionally pick a `<module>` from here: https://github.com/ggerganov/llama.cpp/wiki/Modules
|
||||
- Optionally pick a `<module>` from here: https://github.com/ggml-org/llama.cpp/wiki/Modules
|
||||
- Consider adding yourself to [CODEOWNERS](CODEOWNERS)
|
||||
|
||||
# Coding guidelines
|
||||
@@ -40,14 +40,14 @@
|
||||
- Try to follow the existing patterns in the code (indentation, spaces, etc.). In case of doubt use `clang-format` to format the added code
|
||||
- For anything not covered in the current guidelines, refer to the [C++ Core Guidelines](https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines)
|
||||
- Tensors store data in row-major order. We refer to dimension 0 as columns, 1 as rows, 2 as matrices
|
||||
- Matrix multiplication is unconventional: [`C = ggml_mul_mat(ctx, A, B)`](https://github.com/ggerganov/llama.cpp/blob/880e352277fc017df4d5794f0c21c44e1eae2b84/ggml.h#L1058-L1064) means $C^T = A B^T \Leftrightarrow C = B A^T.$
|
||||
- Matrix multiplication is unconventional: [`C = ggml_mul_mat(ctx, A, B)`](https://github.com/ggml-org/llama.cpp/blob/880e352277fc017df4d5794f0c21c44e1eae2b84/ggml.h#L1058-L1064) means $C^T = A B^T \Leftrightarrow C = B A^T.$
|
||||
|
||||

|
||||
|
||||
# Naming guidelines
|
||||
|
||||
- Use `snake_case` for function, variable and type names
|
||||
- Naming usually optimizes for longest common prefix (see https://github.com/ggerganov/ggml/pull/302#discussion_r1243240963)
|
||||
- Naming usually optimizes for longest common prefix (see https://github.com/ggml-org/ggml/pull/302#discussion_r1243240963)
|
||||
|
||||
```cpp
|
||||
// not OK
|
||||
@@ -122,4 +122,4 @@
|
||||
|
||||
The Github issues, PRs and discussions contain a lot of information that can be useful to get familiar with the codebase. For convenience, some of the more important information is referenced from Github projects:
|
||||
|
||||
https://github.com/ggerganov/llama.cpp/projects
|
||||
https://github.com/ggml-org/llama.cpp/projects
|
||||
|
||||
10
Makefile
10
Makefile
@@ -1,5 +1,5 @@
|
||||
ifndef LLAMA_MAKEFILE
|
||||
$(error The Makefile build is deprecated. Use the CMake build instead. For more details, see https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md)
|
||||
$(error The Makefile build is deprecated. Use the CMake build instead. For more details, see https://github.com/ggml-org/llama.cpp/blob/master/docs/build.md)
|
||||
endif
|
||||
|
||||
# Define the default target now so that it is always the first target
|
||||
@@ -463,7 +463,7 @@ endif
|
||||
ifneq '' '$(findstring mingw,$(shell $(CC) -dumpmachine))'
|
||||
# The stack is only 16-byte aligned on Windows, so don't let gcc emit aligned moves.
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54412
|
||||
# https://github.com/ggerganov/llama.cpp/issues/2922
|
||||
# https://github.com/ggml-org/llama.cpp/issues/2922
|
||||
MK_CFLAGS += -Xassembler -muse-unaligned-vector-move
|
||||
MK_CXXFLAGS += -Xassembler -muse-unaligned-vector-move
|
||||
|
||||
@@ -1078,8 +1078,8 @@ endif
|
||||
ifdef REMOVE_WARNING
|
||||
$(info !!! REMOVAL WARNING !!!)
|
||||
$(info The following LLAMA_ options have been removed and are no longer supported)
|
||||
$(info - LLAMA_DISABLE_LOGS (https://github.com/ggerganov/llama.cpp/pull/9418))
|
||||
$(info - LLAMA_SERVER_VERBOSE (https://github.com/ggerganov/llama.cpp/pull/9418))
|
||||
$(info - LLAMA_DISABLE_LOGS (https://github.com/ggml-org/llama.cpp/pull/9418))
|
||||
$(info - LLAMA_SERVER_VERBOSE (https://github.com/ggml-org/llama.cpp/pull/9418))
|
||||
$(info )
|
||||
endif
|
||||
|
||||
@@ -1364,7 +1364,7 @@ llama-server: \
|
||||
examples/server/index.html.hpp \
|
||||
examples/server/loading.html.hpp \
|
||||
common/chat.cpp \
|
||||
common/chat.hpp \
|
||||
common/chat.h \
|
||||
common/chat-template.hpp \
|
||||
common/json.hpp \
|
||||
common/minja.hpp \
|
||||
|
||||
75
README.md
75
README.md
@@ -3,26 +3,33 @@
|
||||

|
||||
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://github.com/ggerganov/llama.cpp/actions/workflows/server.yml)
|
||||
[](https://github.com/ggml-org/llama.cpp/actions/workflows/server.yml)
|
||||
|
||||
[Roadmap](https://github.com/users/ggerganov/projects/7) / [Project status](https://github.com/ggerganov/llama.cpp/discussions/3471) / [Manifesto](https://github.com/ggerganov/llama.cpp/discussions/205) / [ggml](https://github.com/ggerganov/ggml)
|
||||
[Roadmap](https://github.com/users/ggml-org/projects/7) / [Project status](https://github.com/ggml-org/llama.cpp/discussions/3471) / [Manifesto](https://github.com/ggml-org/llama.cpp/discussions/205) / [ggml](https://github.com/ggml-org/ggml)
|
||||
|
||||
Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others) in pure C/C++
|
||||
|
||||
> [!IMPORTANT]
|
||||
> New `llama.cpp` package location: [ggml-org/llama.cpp](https://github.com/ggml-org/llama.cpp/pkgs/container/llama.cpp)
|
||||
>
|
||||
> Update your container URLs to: `ghcr.io/ggml-org/llama.cpp`
|
||||
>
|
||||
> More info: https://github.com/ggml-org/llama.cpp/discussions/11801
|
||||
|
||||
## Recent API changes
|
||||
|
||||
- [Changelog for `libllama` API](https://github.com/ggerganov/llama.cpp/issues/9289)
|
||||
- [Changelog for `llama-server` REST API](https://github.com/ggerganov/llama.cpp/issues/9291)
|
||||
- [Changelog for `libllama` API](https://github.com/ggml-org/llama.cpp/issues/9289)
|
||||
- [Changelog for `llama-server` REST API](https://github.com/ggml-org/llama.cpp/issues/9291)
|
||||
|
||||
## Hot topics
|
||||
|
||||
- **How to use [MTLResidencySet](https://developer.apple.com/documentation/metal/mtlresidencyset?language=objc) to keep the GPU memory active?** https://github.com/ggerganov/llama.cpp/pull/11427
|
||||
- **How to use [MTLResidencySet](https://developer.apple.com/documentation/metal/mtlresidencyset?language=objc) to keep the GPU memory active?** https://github.com/ggml-org/llama.cpp/pull/11427
|
||||
- **VS Code extension for FIM completions:** https://github.com/ggml-org/llama.vscode
|
||||
- Universal tool call support in `llama-server`: https://github.com/ggerganov/llama.cpp/pull/9639
|
||||
- Universal tool call support in `llama-server`: https://github.com/ggml-org/llama.cpp/pull/9639
|
||||
- Vim/Neovim plugin for FIM completions: https://github.com/ggml-org/llama.vim
|
||||
- Introducing GGUF-my-LoRA https://github.com/ggerganov/llama.cpp/discussions/10123
|
||||
- Hugging Face Inference Endpoints now support GGUF out of the box! https://github.com/ggerganov/llama.cpp/discussions/9669
|
||||
- Hugging Face GGUF editor: [discussion](https://github.com/ggerganov/llama.cpp/discussions/9268) | [tool](https://huggingface.co/spaces/CISCai/gguf-editor)
|
||||
- Introducing GGUF-my-LoRA https://github.com/ggml-org/llama.cpp/discussions/10123
|
||||
- Hugging Face Inference Endpoints now support GGUF out of the box! https://github.com/ggml-org/llama.cpp/discussions/9669
|
||||
- Hugging Face GGUF editor: [discussion](https://github.com/ggml-org/llama.cpp/discussions/9268) | [tool](https://huggingface.co/spaces/CISCai/gguf-editor)
|
||||
|
||||
----
|
||||
|
||||
@@ -39,7 +46,7 @@ range of hardware - locally and in the cloud.
|
||||
- Vulkan and SYCL backend support
|
||||
- CPU+GPU hybrid inference to partially accelerate models larger than the total VRAM capacity
|
||||
|
||||
The `llama.cpp` project is the main playground for developing new features for the [ggml](https://github.com/ggerganov/ggml) library.
|
||||
The `llama.cpp` project is the main playground for developing new features for the [ggml](https://github.com/ggml-org/ggml) library.
|
||||
|
||||
<details>
|
||||
<summary>Models</summary>
|
||||
@@ -59,23 +66,23 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
|
||||
- [X] [Falcon](https://huggingface.co/models?search=tiiuae/falcon)
|
||||
- [X] [Chinese LLaMA / Alpaca](https://github.com/ymcui/Chinese-LLaMA-Alpaca) and [Chinese LLaMA-2 / Alpaca-2](https://github.com/ymcui/Chinese-LLaMA-Alpaca-2)
|
||||
- [X] [Vigogne (French)](https://github.com/bofenghuang/vigogne)
|
||||
- [X] [BERT](https://github.com/ggerganov/llama.cpp/pull/5423)
|
||||
- [X] [BERT](https://github.com/ggml-org/llama.cpp/pull/5423)
|
||||
- [X] [Koala](https://bair.berkeley.edu/blog/2023/04/03/koala/)
|
||||
- [X] [Baichuan 1 & 2](https://huggingface.co/models?search=baichuan-inc/Baichuan) + [derivations](https://huggingface.co/hiyouga/baichuan-7b-sft)
|
||||
- [X] [Aquila 1 & 2](https://huggingface.co/models?search=BAAI/Aquila)
|
||||
- [X] [Starcoder models](https://github.com/ggerganov/llama.cpp/pull/3187)
|
||||
- [X] [Starcoder models](https://github.com/ggml-org/llama.cpp/pull/3187)
|
||||
- [X] [Refact](https://huggingface.co/smallcloudai/Refact-1_6B-fim)
|
||||
- [X] [MPT](https://github.com/ggerganov/llama.cpp/pull/3417)
|
||||
- [X] [Bloom](https://github.com/ggerganov/llama.cpp/pull/3553)
|
||||
- [X] [MPT](https://github.com/ggml-org/llama.cpp/pull/3417)
|
||||
- [X] [Bloom](https://github.com/ggml-org/llama.cpp/pull/3553)
|
||||
- [x] [Yi models](https://huggingface.co/models?search=01-ai/Yi)
|
||||
- [X] [StableLM models](https://huggingface.co/stabilityai)
|
||||
- [x] [Deepseek models](https://huggingface.co/models?search=deepseek-ai/deepseek)
|
||||
- [x] [Qwen models](https://huggingface.co/models?search=Qwen/Qwen)
|
||||
- [x] [PLaMo-13B](https://github.com/ggerganov/llama.cpp/pull/3557)
|
||||
- [x] [PLaMo-13B](https://github.com/ggml-org/llama.cpp/pull/3557)
|
||||
- [x] [Phi models](https://huggingface.co/models?search=microsoft/phi)
|
||||
- [x] [PhiMoE](https://github.com/ggerganov/llama.cpp/pull/11003)
|
||||
- [x] [PhiMoE](https://github.com/ggml-org/llama.cpp/pull/11003)
|
||||
- [x] [GPT-2](https://huggingface.co/gpt2)
|
||||
- [x] [Orion 14B](https://github.com/ggerganov/llama.cpp/pull/5118)
|
||||
- [x] [Orion 14B](https://github.com/ggml-org/llama.cpp/pull/5118)
|
||||
- [x] [InternLM2](https://huggingface.co/models?search=internlm2)
|
||||
- [x] [CodeShell](https://github.com/WisdomShell/codeshell)
|
||||
- [x] [Gemma](https://ai.google.dev/gemma)
|
||||
@@ -146,7 +153,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
|
||||
- Zig: [deins/llama.cpp.zig](https://github.com/Deins/llama.cpp.zig)
|
||||
- Flutter/Dart: [netdur/llama_cpp_dart](https://github.com/netdur/llama_cpp_dart)
|
||||
- Flutter: [xuegao-tzx/Fllama](https://github.com/xuegao-tzx/Fllama)
|
||||
- PHP (API bindings and features built on top of llama.cpp): [distantmagic/resonance](https://github.com/distantmagic/resonance) [(more info)](https://github.com/ggerganov/llama.cpp/pull/6326)
|
||||
- PHP (API bindings and features built on top of llama.cpp): [distantmagic/resonance](https://github.com/distantmagic/resonance) [(more info)](https://github.com/ggml-org/llama.cpp/pull/6326)
|
||||
- Guile Scheme: [guile_llama_cpp](https://savannah.nongnu.org/projects/guile-llama-cpp)
|
||||
- Swift [srgtuszy/llama-cpp-swift](https://github.com/srgtuszy/llama-cpp-swift)
|
||||
- Swift [ShenghaiWang/SwiftLlama](https://github.com/ShenghaiWang/SwiftLlama)
|
||||
@@ -235,6 +242,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
|
||||
| [HIP](docs/build.md#hip) | AMD GPU |
|
||||
| [Vulkan](docs/build.md#vulkan) | GPU |
|
||||
| [CANN](docs/build.md#cann) | Ascend NPU |
|
||||
| [OpenCL](docs/backend/OPENCL.md) | Adreno GPU |
|
||||
|
||||
## Building the project
|
||||
|
||||
@@ -244,7 +252,7 @@ The project also includes many example programs and tools using the `llama` libr
|
||||
- Clone this repository and build locally, see [how to build](docs/build.md)
|
||||
- On MacOS or Linux, install `llama.cpp` via [brew, flox or nix](docs/install.md)
|
||||
- Use a Docker image, see [documentation for Docker](docs/docker.md)
|
||||
- Download pre-built binaries from [releases](https://github.com/ggerganov/llama.cpp/releases)
|
||||
- Download pre-built binaries from [releases](https://github.com/ggml-org/llama.cpp/releases)
|
||||
|
||||
## Obtaining and quantizing models
|
||||
|
||||
@@ -257,14 +265,14 @@ You can either manually download the GGUF file or directly use any `llama.cpp`-c
|
||||
|
||||
After downloading a model, use the CLI tools to run it locally - see below.
|
||||
|
||||
`llama.cpp` requires the model to be stored in the [GGUF](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md) file format. Models in other data formats can be converted to GGUF using the `convert_*.py` Python scripts in this repo.
|
||||
`llama.cpp` requires the model to be stored in the [GGUF](https://github.com/ggml-org/ggml/blob/master/docs/gguf.md) file format. Models in other data formats can be converted to GGUF using the `convert_*.py` Python scripts in this repo.
|
||||
|
||||
The Hugging Face platform provides a variety of online tools for converting, quantizing and hosting models with `llama.cpp`:
|
||||
|
||||
- Use the [GGUF-my-repo space](https://huggingface.co/spaces/ggml-org/gguf-my-repo) to convert to GGUF format and quantize model weights to smaller sizes
|
||||
- Use the [GGUF-my-LoRA space](https://huggingface.co/spaces/ggml-org/gguf-my-lora) to convert LoRA adapters to GGUF format (more info: https://github.com/ggerganov/llama.cpp/discussions/10123)
|
||||
- Use the [GGUF-editor space](https://huggingface.co/spaces/CISCai/gguf-editor) to edit GGUF meta data in the browser (more info: https://github.com/ggerganov/llama.cpp/discussions/9268)
|
||||
- Use the [Inference Endpoints](https://ui.endpoints.huggingface.co/) to directly host `llama.cpp` in the cloud (more info: https://github.com/ggerganov/llama.cpp/discussions/9669)
|
||||
- Use the [GGUF-my-LoRA space](https://huggingface.co/spaces/ggml-org/gguf-my-lora) to convert LoRA adapters to GGUF format (more info: https://github.com/ggml-org/llama.cpp/discussions/10123)
|
||||
- Use the [GGUF-editor space](https://huggingface.co/spaces/CISCai/gguf-editor) to edit GGUF meta data in the browser (more info: https://github.com/ggml-org/llama.cpp/discussions/9268)
|
||||
- Use the [Inference Endpoints](https://ui.endpoints.huggingface.co/) to directly host `llama.cpp` in the cloud (more info: https://github.com/ggml-org/llama.cpp/discussions/9669)
|
||||
|
||||
To learn more about model quantization, [read this documentation](examples/quantize/README.md)
|
||||
|
||||
@@ -487,9 +495,9 @@ To learn more about model quantization, [read this documentation](examples/quant
|
||||
- Collaborators can push to branches in the `llama.cpp` repo and merge PRs into the `master` branch
|
||||
- Collaborators will be invited based on contributions
|
||||
- Any help with managing issues, PRs and projects is very appreciated!
|
||||
- See [good first issues](https://github.com/ggerganov/llama.cpp/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) for tasks suitable for first contributions
|
||||
- See [good first issues](https://github.com/ggml-org/llama.cpp/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) for tasks suitable for first contributions
|
||||
- Read the [CONTRIBUTING.md](CONTRIBUTING.md) for more information
|
||||
- Make sure to read this: [Inference at the edge](https://github.com/ggerganov/llama.cpp/discussions/205)
|
||||
- Make sure to read this: [Inference at the edge](https://github.com/ggml-org/llama.cpp/discussions/205)
|
||||
- A bit of backstory for those who are interested: [Changelog podcast](https://changelog.com/podcast/532)
|
||||
|
||||
## Other documentation
|
||||
@@ -504,7 +512,7 @@ To learn more about model quantization, [read this documentation](examples/quant
|
||||
- [Running on Docker](docs/docker.md)
|
||||
- [Build on Android](docs/android.md)
|
||||
- [Performance troubleshooting](docs/development/token_generation_performance_tips.md)
|
||||
- [GGML tips & tricks](https://github.com/ggerganov/llama.cpp/wiki/GGML-Tips-&-Tricks)
|
||||
- [GGML tips & tricks](https://github.com/ggml-org/llama.cpp/wiki/GGML-Tips-&-Tricks)
|
||||
|
||||
#### Seminal papers and background on the models
|
||||
|
||||
@@ -518,5 +526,18 @@ If your issue is with model generation quality, then please at least scan the fo
|
||||
- [Aligning language models to follow instructions](https://openai.com/research/instruction-following)
|
||||
- [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155)
|
||||
|
||||
#### References
|
||||
## Completions
|
||||
Command-line completion is available for some environments.
|
||||
|
||||
#### Bash Completion
|
||||
```bash
|
||||
$ build/bin/llama-cli --completion-bash > ~/.llama-completion.bash
|
||||
$ source ~/.llama-completion.bash
|
||||
```
|
||||
Optionally this can be added to your `.bashrc` or `.bash_profile` to load it
|
||||
automatically. For example:
|
||||
```console
|
||||
$ echo "source ~/.llama-completion.bash" >> ~/.bashrc
|
||||
```
|
||||
|
||||
## References
|
||||
|
||||
@@ -62,6 +62,6 @@ Beware that none of the topics under [Using llama.cpp securely](#using-llamacpp-
|
||||
<!-- normal version -->
|
||||
However, If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
|
||||
|
||||
Please disclose it as a private [security advisory](https://github.com/ggerganov/llama.cpp/security/advisories/new).
|
||||
Please disclose it as a private [security advisory](https://github.com/ggml-org/llama.cpp/security/advisories/new).
|
||||
|
||||
A team of volunteers on a reasonable-effort basis maintains this project. As such, please give us at least 90 days to work on a fix before public exposure.
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# CI
|
||||
|
||||
In addition to [Github Actions](https://github.com/ggerganov/llama.cpp/actions) `llama.cpp` uses a custom CI framework:
|
||||
In addition to [Github Actions](https://github.com/ggml-org/llama.cpp/actions) `llama.cpp` uses a custom CI framework:
|
||||
|
||||
https://github.com/ggml-org/ci
|
||||
|
||||
It monitors the `master` branch for new commits and runs the
|
||||
[ci/run.sh](https://github.com/ggerganov/llama.cpp/blob/master/ci/run.sh) script on dedicated cloud instances. This allows us
|
||||
[ci/run.sh](https://github.com/ggml-org/llama.cpp/blob/master/ci/run.sh) script on dedicated cloud instances. This allows us
|
||||
to execute heavier workloads compared to just using Github Actions. Also with time, the cloud instances will be scaled
|
||||
to cover various hardware architectures, including GPU and Apple Silicon instances.
|
||||
|
||||
|
||||
@@ -57,8 +57,7 @@ add_library(${TARGET} STATIC
|
||||
arg.h
|
||||
base64.hpp
|
||||
chat.cpp
|
||||
chat.hpp
|
||||
chat-template.hpp
|
||||
chat.h
|
||||
common.cpp
|
||||
common.h
|
||||
console.cpp
|
||||
@@ -68,7 +67,8 @@ add_library(${TARGET} STATIC
|
||||
llguidance.cpp
|
||||
log.cpp
|
||||
log.h
|
||||
minja.hpp
|
||||
minja/chat-template.hpp
|
||||
minja/minja.hpp
|
||||
ngram-cache.cpp
|
||||
ngram-cache.h
|
||||
sampling.cpp
|
||||
@@ -96,6 +96,22 @@ if (LLAMA_LLGUIDANCE)
|
||||
include(ExternalProject)
|
||||
set(LLGUIDANCE_SRC ${CMAKE_BINARY_DIR}/llguidance/source)
|
||||
set(LLGUIDANCE_PATH ${LLGUIDANCE_SRC}/target/release)
|
||||
|
||||
# Set the correct library file extension based on platform
|
||||
if (WIN32)
|
||||
set(LLGUIDANCE_LIB_NAME "llguidance.lib")
|
||||
# Add Windows-specific libraries
|
||||
set(LLGUIDANCE_PLATFORM_LIBS
|
||||
ws2_32 # Windows Sockets API
|
||||
userenv # For GetUserProfileDirectoryW
|
||||
ntdll # For NT functions
|
||||
bcrypt # For BCryptGenRandom
|
||||
)
|
||||
else()
|
||||
set(LLGUIDANCE_LIB_NAME "libllguidance.a")
|
||||
set(LLGUIDANCE_PLATFORM_LIBS "")
|
||||
endif()
|
||||
|
||||
ExternalProject_Add(llguidance_ext
|
||||
GIT_REPOSITORY https://github.com/guidance-ai/llguidance
|
||||
# v0.6.12:
|
||||
@@ -106,17 +122,18 @@ if (LLAMA_LLGUIDANCE)
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND cargo build --release
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS ${LLGUIDANCE_PATH}/libllguidance.a ${LLGUIDANCE_PATH}/llguidance.h
|
||||
BUILD_BYPRODUCTS ${LLGUIDANCE_PATH}/${LLGUIDANCE_LIB_NAME} ${LLGUIDANCE_PATH}/llguidance.h
|
||||
UPDATE_COMMAND ""
|
||||
)
|
||||
target_compile_definitions(${TARGET} PUBLIC LLAMA_USE_LLGUIDANCE)
|
||||
|
||||
add_library(llguidance STATIC IMPORTED)
|
||||
set_target_properties(llguidance PROPERTIES IMPORTED_LOCATION ${LLGUIDANCE_PATH}/libllguidance.a)
|
||||
set_target_properties(llguidance PROPERTIES IMPORTED_LOCATION ${LLGUIDANCE_PATH}/${LLGUIDANCE_LIB_NAME})
|
||||
add_dependencies(llguidance llguidance_ext)
|
||||
|
||||
target_include_directories(${TARGET} PRIVATE ${LLGUIDANCE_PATH})
|
||||
set(LLAMA_COMMON_EXTRA_LIBS ${LLAMA_COMMON_EXTRA_LIBS} llguidance)
|
||||
# Add platform libraries to the main target
|
||||
set(LLAMA_COMMON_EXTRA_LIBS ${LLAMA_COMMON_EXTRA_LIBS} llguidance ${LLGUIDANCE_PLATFORM_LIBS})
|
||||
endif ()
|
||||
|
||||
target_include_directories(${TARGET} PUBLIC .)
|
||||
|
||||
142
common/arg.cpp
142
common/arg.cpp
@@ -2,6 +2,7 @@
|
||||
|
||||
#include "log.h"
|
||||
#include "sampling.h"
|
||||
#include "chat.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <climits>
|
||||
@@ -365,6 +366,112 @@ static void common_params_print_usage(common_params_context & ctx_arg) {
|
||||
print_options(specific_options);
|
||||
}
|
||||
|
||||
static void common_params_print_completion(common_params_context & ctx_arg) {
|
||||
std::vector<common_arg *> common_options;
|
||||
std::vector<common_arg *> sparam_options;
|
||||
std::vector<common_arg *> specific_options;
|
||||
|
||||
for (auto & opt : ctx_arg.options) {
|
||||
if (opt.is_sparam) {
|
||||
sparam_options.push_back(&opt);
|
||||
} else if (opt.in_example(ctx_arg.ex)) {
|
||||
specific_options.push_back(&opt);
|
||||
} else {
|
||||
common_options.push_back(&opt);
|
||||
}
|
||||
}
|
||||
|
||||
printf("_llama_completions() {\n");
|
||||
printf(" local cur prev opts\n");
|
||||
printf(" COMPREPLY=()\n");
|
||||
printf(" cur=\"${COMP_WORDS[COMP_CWORD]}\"\n");
|
||||
printf(" prev=\"${COMP_WORDS[COMP_CWORD-1]}\"\n\n");
|
||||
|
||||
printf(" opts=\"");
|
||||
auto print_options = [](const std::vector<common_arg *> & options) {
|
||||
for (const common_arg * opt : options) {
|
||||
for (const char * arg : opt->args) {
|
||||
printf("%s ", arg);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
print_options(common_options);
|
||||
print_options(sparam_options);
|
||||
print_options(specific_options);
|
||||
printf("\"\n\n");
|
||||
|
||||
printf(" case \"$prev\" in\n");
|
||||
printf(" --model)\n");
|
||||
printf(" COMPREPLY=( $(compgen -f -X '!*.gguf' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
|
||||
printf(" return 0\n");
|
||||
printf(" ;;\n");
|
||||
printf(" --grammar-file)\n");
|
||||
printf(" COMPREPLY=( $(compgen -f -X '!*.gbnf' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
|
||||
printf(" return 0\n");
|
||||
printf(" ;;\n");
|
||||
printf(" --chat-template-file)\n");
|
||||
printf(" COMPREPLY=( $(compgen -f -X '!*.jinja' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
|
||||
printf(" return 0\n");
|
||||
printf(" ;;\n");
|
||||
printf(" *)\n");
|
||||
printf(" COMPREPLY=( $(compgen -W \"${opts}\" -- \"$cur\") )\n");
|
||||
printf(" return 0\n");
|
||||
printf(" ;;\n");
|
||||
printf(" esac\n");
|
||||
printf("}\n\n");
|
||||
|
||||
std::set<std::string> executables = {
|
||||
"llama-batched",
|
||||
"llama-batched-bench",
|
||||
"llama-bench",
|
||||
"llama-cli",
|
||||
"llama-convert-llama2c-to-ggml",
|
||||
"llama-cvector-generator",
|
||||
"llama-embedding",
|
||||
"llama-eval-callback",
|
||||
"llama-export-lora",
|
||||
"llama-gbnf-validator",
|
||||
"llama-gen-docs",
|
||||
"llama-gguf",
|
||||
"llama-gguf-hash",
|
||||
"llama-gguf-split",
|
||||
"llama-gritlm",
|
||||
"llama-imatrix",
|
||||
"llama-infill",
|
||||
"llama-llava-cli",
|
||||
"llama-llava-clip-quantize-cli",
|
||||
"llama-lookahead",
|
||||
"llama-lookup",
|
||||
"llama-lookup-create",
|
||||
"llama-lookup-merge",
|
||||
"llama-lookup-stats",
|
||||
"llama-minicpmv-cli",
|
||||
"llama-parallel",
|
||||
"llama-passkey",
|
||||
"llama-perplexity",
|
||||
"llama-q8dot",
|
||||
"llama-quantize",
|
||||
"llama-quantize-stats",
|
||||
"llama-qwen2vl-cli",
|
||||
"llama-retrieval",
|
||||
"llama-run",
|
||||
"llama-save-load-state",
|
||||
"llama-server",
|
||||
"llama-simple",
|
||||
"llama-simple-chat",
|
||||
"llama-speculative",
|
||||
"llama-speculative-simple",
|
||||
"llama-tokenize",
|
||||
"llama-tts",
|
||||
"llama-vdot"
|
||||
};
|
||||
|
||||
for (const auto& exe : executables) {
|
||||
printf("complete -F _llama_completions %s\n", exe.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
static std::vector<ggml_backend_dev_t> parse_device_list(const std::string & value) {
|
||||
std::vector<ggml_backend_dev_t> devices;
|
||||
auto dev_names = string_split<std::string>(value, ',');
|
||||
@@ -426,6 +533,10 @@ bool common_params_parse(int argc, char ** argv, common_params & params, llama_e
|
||||
}
|
||||
exit(0);
|
||||
}
|
||||
if (ctx_arg.params.completion) {
|
||||
common_params_print_completion(ctx_arg);
|
||||
exit(0);
|
||||
}
|
||||
} catch (const std::invalid_argument & ex) {
|
||||
fprintf(stderr, "%s\n", ex.what());
|
||||
ctx_arg.params = params_org;
|
||||
@@ -494,6 +605,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
||||
exit(0);
|
||||
}
|
||||
));
|
||||
add_opt(common_arg(
|
||||
{"--completion-bash"},
|
||||
"print source-able bash completion script for llama.cpp",
|
||||
[](common_params & params) {
|
||||
params.completion = true;
|
||||
}
|
||||
));
|
||||
add_opt(common_arg(
|
||||
{"--verbose-prompt"},
|
||||
string_format("print a verbose prompt before generation (default: %s)", params.verbose_prompt ? "true" : "false"),
|
||||
@@ -674,7 +792,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
||||
));
|
||||
add_opt(common_arg(
|
||||
{"--no-context-shift"},
|
||||
string_format("disables context shift on inifinite text generation (default: %s)", params.ctx_shift ? "disabled" : "enabled"),
|
||||
string_format("disables context shift on infinite text generation (default: %s)", params.ctx_shift ? "disabled" : "enabled"),
|
||||
[](common_params & params) {
|
||||
params.ctx_shift = false;
|
||||
}
|
||||
@@ -946,6 +1064,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
||||
params.sampling.min_p = std::stof(value);
|
||||
}
|
||||
).set_sparam());
|
||||
add_opt(common_arg(
|
||||
{"--top-nsigma"}, "N",
|
||||
string_format("top-n-sigma sampling (default: %.1f, -1.0 = disabled)", params.sampling.top_n_sigma),
|
||||
[](common_params & params, const std::string & value) {
|
||||
params.sampling.top_n_sigma = std::stof(value);
|
||||
}
|
||||
).set_examples({LLAMA_EXAMPLE_MAIN}).set_sparam());
|
||||
add_opt(common_arg(
|
||||
{"--xtc-probability"}, "N",
|
||||
string_format("xtc probability (default: %.1f, 0.0 = disabled)", (double)params.sampling.xtc_probability),
|
||||
@@ -1445,7 +1570,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
||||
"- isolate: only spawn threads on CPUs on the node that execution started on\n"
|
||||
"- numactl: use the CPU map provided by numactl\n"
|
||||
"if run without this previously, it is recommended to drop the system page cache before using this\n"
|
||||
"see https://github.com/ggerganov/llama.cpp/issues/1437",
|
||||
"see https://github.com/ggml-org/llama.cpp/issues/1437",
|
||||
[](common_params & params, const std::string & value) {
|
||||
/**/ if (value == "distribute" || value == "") { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
|
||||
else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
|
||||
@@ -1975,6 +2100,17 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
||||
params.use_jinja = true;
|
||||
}
|
||||
).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN}).set_env("LLAMA_ARG_JINJA"));
|
||||
add_opt(common_arg(
|
||||
{"--reasoning-format"}, "FORMAT",
|
||||
"reasoning format (default: deepseek; allowed values: deepseek, none)\n"
|
||||
"controls whether thought tags are extracted from the response, and in which format they're returned. 'none' leaves thoughts unparsed in `message.content`, 'deepseek' puts them in `message.reasoning_content` (for DeepSeek R1 & Command R7B only).\n"
|
||||
"only supported for non-streamed responses",
|
||||
[](common_params & params, const std::string & value) {
|
||||
/**/ if (value == "deepseek") { params.reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK; }
|
||||
else if (value == "none") { params.reasoning_format = COMMON_REASONING_FORMAT_NONE; }
|
||||
else { std::invalid_argument("invalid value"); }
|
||||
}
|
||||
).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN}).set_env("LLAMA_ARG_THINK"));
|
||||
add_opt(common_arg(
|
||||
{"--chat-template"}, "JINJA_TEMPLATE",
|
||||
string_format(
|
||||
@@ -2112,7 +2248,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
||||
).set_env("LLAMA_LOG_VERBOSITY"));
|
||||
add_opt(common_arg(
|
||||
{"--log-prefix"},
|
||||
"Enable prefx in log messages",
|
||||
"Enable prefix in log messages",
|
||||
[](common_params &) {
|
||||
common_log_set_prefix(common_log_main(), true);
|
||||
}
|
||||
|
||||
1033
common/chat.cpp
1033
common/chat.cpp
File diff suppressed because it is too large
Load Diff
134
common/chat.h
Normal file
134
common/chat.h
Normal file
@@ -0,0 +1,134 @@
|
||||
// Chat support (incl. tool call grammar constraining & output parsing) w/ generic & custom template handlers.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common.h"
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
struct common_chat_templates;
|
||||
|
||||
struct common_chat_tool_call {
|
||||
std::string name;
|
||||
std::string arguments;
|
||||
std::string id;
|
||||
};
|
||||
|
||||
struct common_chat_msg_content_part {
|
||||
std::string type;
|
||||
std::string text;
|
||||
};
|
||||
|
||||
struct common_chat_msg {
|
||||
std::string role;
|
||||
std::string content;
|
||||
std::vector<common_chat_msg_content_part> content_parts = {};
|
||||
std::vector<common_chat_tool_call> tool_calls = {};
|
||||
std::string reasoning_content;
|
||||
std::string tool_name;
|
||||
std::string tool_call_id;
|
||||
};
|
||||
|
||||
struct common_chat_tool {
|
||||
std::string name;
|
||||
std::string description;
|
||||
std::string parameters;
|
||||
};
|
||||
|
||||
enum common_chat_tool_choice {
|
||||
COMMON_CHAT_TOOL_CHOICE_AUTO,
|
||||
COMMON_CHAT_TOOL_CHOICE_REQUIRED,
|
||||
COMMON_CHAT_TOOL_CHOICE_NONE,
|
||||
};
|
||||
|
||||
enum common_chat_format {
|
||||
COMMON_CHAT_FORMAT_CONTENT_ONLY,
|
||||
COMMON_CHAT_FORMAT_GENERIC,
|
||||
COMMON_CHAT_FORMAT_MISTRAL_NEMO,
|
||||
COMMON_CHAT_FORMAT_LLAMA_3_X,
|
||||
COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS,
|
||||
COMMON_CHAT_FORMAT_DEEPSEEK_R1,
|
||||
COMMON_CHAT_FORMAT_DEEPSEEK_R1_EXTRACT_REASONING,
|
||||
COMMON_CHAT_FORMAT_FIREFUNCTION_V2,
|
||||
COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2,
|
||||
COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1,
|
||||
COMMON_CHAT_FORMAT_HERMES_2_PRO,
|
||||
COMMON_CHAT_FORMAT_COMMAND_R7B,
|
||||
COMMON_CHAT_FORMAT_COMMAND_R7B_EXTRACT_REASONING,
|
||||
|
||||
COMMON_CHAT_FORMAT_COUNT, // Not a format, just the # formats
|
||||
};
|
||||
|
||||
struct common_chat_templates_inputs {
|
||||
std::vector<common_chat_msg> messages;
|
||||
std::string grammar;
|
||||
std::string json_schema;
|
||||
bool add_generation_prompt = true;
|
||||
bool use_jinja = true;
|
||||
// Parameters below only supported when use_jinja is true
|
||||
std::vector<common_chat_tool> tools;
|
||||
common_chat_tool_choice tool_choice = COMMON_CHAT_TOOL_CHOICE_AUTO;
|
||||
bool parallel_tool_calls = false;
|
||||
bool extract_reasoning = true;
|
||||
};
|
||||
|
||||
struct common_chat_params {
|
||||
common_chat_format format = COMMON_CHAT_FORMAT_CONTENT_ONLY;
|
||||
std::string prompt;
|
||||
std::string grammar;
|
||||
bool grammar_lazy = false;
|
||||
std::vector<common_grammar_trigger> grammar_triggers;
|
||||
std::vector<std::string> preserved_tokens;
|
||||
std::vector<std::string> additional_stops;
|
||||
};
|
||||
|
||||
// Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid
|
||||
bool common_chat_verify_template(const std::string & tmpl, bool use_jinja);
|
||||
|
||||
void common_chat_templates_free(struct common_chat_templates * tmpls);
|
||||
|
||||
struct common_chat_templates_deleter { void operator()(common_chat_templates * tmpls) { common_chat_templates_free(tmpls); } };
|
||||
|
||||
typedef std::unique_ptr<struct common_chat_templates, common_chat_templates_deleter> common_chat_templates_ptr;
|
||||
|
||||
common_chat_templates_ptr common_chat_templates_init(
|
||||
const struct llama_model * model,
|
||||
const std::string & chat_template_override,
|
||||
const std::string & bos_token_override = "",
|
||||
const std::string & eos_token_override = "");
|
||||
|
||||
bool common_chat_templates_was_explicit(const struct common_chat_templates * tmpls);
|
||||
const char * common_chat_templates_source(const struct common_chat_templates * tmpls, const char * variant = nullptr);
|
||||
|
||||
|
||||
struct common_chat_params common_chat_templates_apply(
|
||||
const struct common_chat_templates * tmpls,
|
||||
const struct common_chat_templates_inputs & inputs);
|
||||
|
||||
// Format single message, while taking into account the position of that message in chat history
|
||||
std::string common_chat_format_single(
|
||||
const struct common_chat_templates * tmpls,
|
||||
const std::vector<common_chat_msg> & past_msg,
|
||||
const common_chat_msg & new_msg,
|
||||
bool add_ass,
|
||||
bool use_jinja);
|
||||
|
||||
// Returns an example of formatted chat
|
||||
std::string common_chat_format_example(
|
||||
const struct common_chat_templates * tmpls,
|
||||
bool use_jinja);
|
||||
|
||||
std::string common_chat_format_name(common_chat_format format);
|
||||
common_chat_msg common_chat_parse( const std::string & input, common_chat_format format);
|
||||
|
||||
common_chat_tool_choice common_chat_tool_choice_parse_oaicompat(const std::string & tool_choice);
|
||||
|
||||
// Parses a JSON array of messages in OpenAI's chat completion API format.
|
||||
// T can be std::string containing JSON or nlohmann::ordered_json
|
||||
template <class T> std::vector<common_chat_msg> common_chat_msgs_parse_oaicompat(const T & messages);
|
||||
template <class T> T common_chat_msgs_to_json_oaicompat(const std::vector<common_chat_msg> & msgs, bool concat_typed_text = false);
|
||||
|
||||
// Parses a JSON array of tools in OpenAI's chat completion tool call API format.
|
||||
// T can be std::string containing JSON or nlohmann::ordered_json
|
||||
template <class T> std::vector<common_chat_tool> common_chat_tools_parse_oaicompat(const T & tools);
|
||||
template <class T> T common_chat_tools_to_json_oaicompat(const std::vector<common_chat_tool> & tools);
|
||||
@@ -1,52 +0,0 @@
|
||||
// Chat support (incl. tool call grammar constraining & output parsing) w/ generic & custom template handlers.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common.h"
|
||||
#include <json.hpp>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
using json = nlohmann::ordered_json;
|
||||
|
||||
struct common_chat_inputs {
|
||||
json messages;
|
||||
json tools;
|
||||
json tool_choice;
|
||||
json json_schema;
|
||||
bool parallel_tool_calls;
|
||||
bool stream;
|
||||
std::string grammar;
|
||||
bool add_generation_prompt = true;
|
||||
};
|
||||
|
||||
enum common_chat_format {
|
||||
COMMON_CHAT_FORMAT_CONTENT_ONLY,
|
||||
COMMON_CHAT_FORMAT_GENERIC,
|
||||
COMMON_CHAT_FORMAT_MISTRAL_NEMO,
|
||||
COMMON_CHAT_FORMAT_LLAMA_3_X,
|
||||
COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS,
|
||||
COMMON_CHAT_FORMAT_DEEPSEEK_R1,
|
||||
COMMON_CHAT_FORMAT_FIREFUNCTION_V2,
|
||||
COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2,
|
||||
COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1,
|
||||
COMMON_CHAT_FORMAT_HERMES_2_PRO,
|
||||
COMMON_CHAT_FORMAT_COMMAND_R7B,
|
||||
|
||||
COMMON_CHAT_FORMAT_COUNT, // Not a format, just the # formats
|
||||
};
|
||||
|
||||
struct common_chat_params {
|
||||
common_chat_format format = COMMON_CHAT_FORMAT_CONTENT_ONLY;
|
||||
json prompt;
|
||||
std::string grammar;
|
||||
bool grammar_lazy = false;
|
||||
std::vector<common_grammar_trigger> grammar_triggers;
|
||||
std::vector<std::string> preserved_tokens;
|
||||
std::vector<std::string> additional_stops;
|
||||
};
|
||||
|
||||
struct common_chat_params common_chat_params_init(const common_chat_template & tmpl, const struct common_chat_inputs & params);
|
||||
std::string common_chat_format_name(common_chat_format format);
|
||||
common_chat_msg common_chat_parse( const std::string & input, common_chat_format format);
|
||||
@@ -12,8 +12,6 @@
|
||||
#include "json.hpp"
|
||||
#include "json-schema-to-grammar.h"
|
||||
#include "llama.h"
|
||||
#include "chat.hpp"
|
||||
#include "chat-template.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
@@ -1768,174 +1766,6 @@ std::string common_detokenize(const struct llama_vocab * vocab, const std::vecto
|
||||
return text;
|
||||
}
|
||||
|
||||
//
|
||||
// Chat template utils
|
||||
//
|
||||
|
||||
bool common_chat_verify_template(const std::string & tmpl, bool use_jinja) {
|
||||
if (use_jinja) {
|
||||
try {
|
||||
auto chat_template = common_chat_template(tmpl, "<s>", "</s>");
|
||||
common_chat_inputs inputs;
|
||||
inputs.messages = json::array({{
|
||||
{"role", "user"},
|
||||
{"content", "test"},
|
||||
}});
|
||||
common_chat_params_init(chat_template, inputs);
|
||||
return true;
|
||||
} catch (const std::exception & e) {
|
||||
LOG_ERR("%s: failed to apply template: %s\n", __func__, e.what());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
llama_chat_message chat[] = {{"user", "test"}};
|
||||
const int res = llama_chat_apply_template(tmpl.c_str(), chat, 1, true, nullptr, 0);
|
||||
return res >= 0;
|
||||
}
|
||||
|
||||
std::string common_chat_apply_template(
|
||||
const common_chat_template & tmpl,
|
||||
const std::vector<common_chat_msg> & msgs,
|
||||
bool add_ass,
|
||||
bool use_jinja) {
|
||||
if (use_jinja) {
|
||||
auto messages = json::array();
|
||||
for (const auto & msg : msgs) {
|
||||
messages.push_back({{"role", msg.role}, {"content", msg.content}});
|
||||
}
|
||||
common_chat_inputs inputs;
|
||||
inputs.messages = messages;
|
||||
inputs.add_generation_prompt = add_ass;
|
||||
return common_chat_params_init(tmpl, inputs).prompt;
|
||||
}
|
||||
|
||||
int alloc_size = 0;
|
||||
std::vector<llama_chat_message> chat;
|
||||
for (const auto & msg : msgs) {
|
||||
chat.push_back({msg.role.c_str(), msg.content.c_str()});
|
||||
alloc_size += (msg.role.size() + msg.content.size()) * 1.25;
|
||||
}
|
||||
|
||||
std::vector<char> buf(alloc_size);
|
||||
|
||||
// run the first time to get the total output length
|
||||
int32_t res = llama_chat_apply_template(tmpl.source().c_str(), chat.data(), chat.size(), add_ass, buf.data(), buf.size());
|
||||
|
||||
// error: chat template is not supported
|
||||
if (res < 0) {
|
||||
// if the custom "tmpl" is not supported, we throw an error
|
||||
// this is a bit redundant (for good), since we're not sure if user validated the custom template with llama_chat_verify_template()
|
||||
throw std::runtime_error("this custom template is not supported");
|
||||
}
|
||||
|
||||
// if it turns out that our buffer is too small, we resize it
|
||||
if ((size_t) res > buf.size()) {
|
||||
buf.resize(res);
|
||||
res = llama_chat_apply_template(tmpl.source().c_str(), chat.data(), chat.size(), add_ass, buf.data(), buf.size());
|
||||
}
|
||||
|
||||
std::string formatted_chat(buf.data(), res);
|
||||
return formatted_chat;
|
||||
}
|
||||
|
||||
std::string common_chat_format_single(
|
||||
const common_chat_template & tmpl,
|
||||
const std::vector<common_chat_msg> & past_msg,
|
||||
const common_chat_msg & new_msg,
|
||||
bool add_ass,
|
||||
bool use_jinja) {
|
||||
std::ostringstream ss;
|
||||
auto fmt_past_msg = past_msg.empty() ? "" : common_chat_apply_template(tmpl, past_msg, false, use_jinja);
|
||||
std::vector<common_chat_msg> chat_new(past_msg);
|
||||
// if the past_msg ends with a newline, we must preserve it in the formatted version
|
||||
if (add_ass && !fmt_past_msg.empty() && fmt_past_msg.back() == '\n') {
|
||||
ss << "\n";
|
||||
};
|
||||
// format chat with new_msg
|
||||
chat_new.push_back(new_msg);
|
||||
auto fmt_new_msg = common_chat_apply_template(tmpl, chat_new, add_ass, use_jinja);
|
||||
// get the diff part
|
||||
ss << fmt_new_msg.substr(fmt_past_msg.size(), fmt_new_msg.size() - fmt_past_msg.size());
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string common_chat_format_example(const common_chat_template & tmpl, bool use_jinja) {
|
||||
std::vector<common_chat_msg> msgs = {
|
||||
{"system", "You are a helpful assistant", {}},
|
||||
{"user", "Hello", {}},
|
||||
{"assistant", "Hi there", {}},
|
||||
{"user", "How are you?", {}},
|
||||
};
|
||||
return common_chat_apply_template(tmpl, msgs, true, use_jinja);
|
||||
}
|
||||
|
||||
#define CHATML_TEMPLATE_SRC \
|
||||
"{%- for message in messages -%}\n" \
|
||||
" {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>\n' -}}\n" \
|
||||
"{%- endfor -%}\n" \
|
||||
"{%- if add_generation_prompt -%}\n" \
|
||||
" {{- '<|im_start|>assistant\n' -}}\n" \
|
||||
"{%- endif -%}"
|
||||
|
||||
common_chat_templates common_chat_templates_from_model(const struct llama_model * model, const std::string & chat_template_override)
|
||||
{
|
||||
std::string default_template_src;
|
||||
std::string template_tool_use_src;
|
||||
|
||||
bool has_explicit_template = !chat_template_override.empty();
|
||||
if (chat_template_override.empty()) {
|
||||
auto str = llama_model_chat_template(model, /* name */ nullptr);
|
||||
if (str) {
|
||||
default_template_src = str;
|
||||
has_explicit_template = true;
|
||||
}
|
||||
str = llama_model_chat_template(model, /* name */ "tool_use");
|
||||
if (str) {
|
||||
template_tool_use_src = str;
|
||||
has_explicit_template = true;
|
||||
}
|
||||
} else {
|
||||
default_template_src = chat_template_override;
|
||||
}
|
||||
if (default_template_src.empty() || default_template_src == "chatml") {
|
||||
if (!template_tool_use_src.empty()) {
|
||||
default_template_src = template_tool_use_src;
|
||||
} else {
|
||||
default_template_src = CHATML_TEMPLATE_SRC;
|
||||
}
|
||||
}
|
||||
auto vocab = llama_model_get_vocab(model);
|
||||
const auto get_token = [&](llama_token token, const char * name, const char * jinja_variable_name) {
|
||||
if (token == LLAMA_TOKEN_NULL) {
|
||||
if (default_template_src.find(jinja_variable_name) != std::string::npos
|
||||
|| template_tool_use_src.find(jinja_variable_name) != std::string::npos) {
|
||||
LOG_WRN("%s: warning: vocab does not have a %s token, jinja template won't work as intended.\n", __func__, name);
|
||||
}
|
||||
return std::string();
|
||||
} else {
|
||||
return common_token_to_piece(vocab, token, true);
|
||||
}
|
||||
};
|
||||
auto token_bos = get_token(llama_vocab_bos(vocab), "BOS", "bos_token");
|
||||
auto token_eos = get_token(llama_vocab_eos(vocab), "EOS", "eos_token");
|
||||
try {
|
||||
return {
|
||||
has_explicit_template,
|
||||
std::make_unique<minja::chat_template>(default_template_src, token_bos, token_eos),
|
||||
template_tool_use_src.empty()
|
||||
? nullptr
|
||||
: std::make_unique<minja::chat_template>(template_tool_use_src, token_bos, token_eos),
|
||||
};
|
||||
} catch (const std::exception & e) {
|
||||
LOG_ERR("%s: failed to parse chat template: %s\n", __func__, e.what());
|
||||
return {
|
||||
has_explicit_template,
|
||||
std::make_unique<minja::chat_template>(CHATML_TEMPLATE_SRC, token_bos, token_eos),
|
||||
nullptr,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// KV cache utils
|
||||
//
|
||||
|
||||
@@ -140,6 +140,7 @@ struct common_params_sampling {
|
||||
int32_t dry_allowed_length = 2; // tokens extending repetitions beyond this receive penalty
|
||||
int32_t dry_penalty_last_n = -1; // how many tokens to scan for repetitions (0 = disable penalty, -1 = context size)
|
||||
int32_t mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
|
||||
float top_n_sigma = -1.00f;// -1.0 = disabled
|
||||
float mirostat_tau = 5.00f; // target entropy
|
||||
float mirostat_eta = 0.10f; // learning rate
|
||||
bool ignore_eos = false;
|
||||
@@ -177,10 +178,10 @@ struct common_params_speculative {
|
||||
|
||||
int32_t n_ctx = 0; // draft context size
|
||||
int32_t n_max = 16; // maximum number of tokens to draft during speculative decoding
|
||||
int32_t n_min = 5; // minimum number of draft tokens to use for speculative decoding
|
||||
int32_t n_min = 0; // minimum number of draft tokens to use for speculative decoding
|
||||
int32_t n_gpu_layers = -1; // number of layers to store in VRAM for the draft model (-1 - use default)
|
||||
float p_split = 0.1f; // speculative decoding split probability
|
||||
float p_min = 0.9f; // minimum speculative decoding probability (greedy)
|
||||
float p_min = 0.75f; // minimum speculative decoding probability (greedy)
|
||||
|
||||
struct cpu_params cpuparams;
|
||||
struct cpu_params cpuparams_batch;
|
||||
@@ -202,6 +203,11 @@ struct common_params_vocoder {
|
||||
bool use_guide_tokens = false; // enable guide tokens to improve TTS accuracy // NOLINT
|
||||
};
|
||||
|
||||
enum common_reasoning_format {
|
||||
COMMON_REASONING_FORMAT_NONE,
|
||||
COMMON_REASONING_FORMAT_DEEPSEEK, // Extract thinking tag contents and return as `message.reasoning_content`
|
||||
};
|
||||
|
||||
struct common_params {
|
||||
int32_t n_predict = -1; // new tokens to predict
|
||||
int32_t n_ctx = 4096; // context size
|
||||
@@ -292,6 +298,7 @@ struct common_params {
|
||||
bool kl_divergence = false; // compute KL divergence
|
||||
|
||||
bool usage = false; // print usage
|
||||
bool completion = false; // print source-able completion script
|
||||
bool use_color = false; // use color to distinguish generations and inputs
|
||||
bool special = false; // enable special token output
|
||||
bool interactive = false; // interactive mode
|
||||
@@ -346,6 +353,7 @@ struct common_params {
|
||||
std::string chat_template = ""; // NOLINT
|
||||
bool use_jinja = false; // NOLINT
|
||||
bool enable_chat_template = true;
|
||||
common_reasoning_format reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK;
|
||||
|
||||
std::vector<std::string> api_keys;
|
||||
|
||||
@@ -424,13 +432,13 @@ bool set_process_priority(enum ggml_sched_priority prio);
|
||||
//
|
||||
|
||||
#ifdef __GNUC__
|
||||
#ifdef __MINGW32__
|
||||
#define LLAMA_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
|
||||
# if defined(__MINGW32__) && !defined(__clang__)
|
||||
# define LLAMA_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
|
||||
# else
|
||||
# define LLAMA_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
|
||||
# endif
|
||||
#else
|
||||
#define LLAMA_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
|
||||
#endif
|
||||
#else
|
||||
#define LLAMA_COMMON_ATTRIBUTE_FORMAT(...)
|
||||
# define LLAMA_COMMON_ATTRIBUTE_FORMAT(...)
|
||||
#endif
|
||||
|
||||
LLAMA_COMMON_ATTRIBUTE_FORMAT(1, 2)
|
||||
@@ -608,62 +616,6 @@ std::string common_detokenize(
|
||||
const std::vector<llama_token> & tokens,
|
||||
bool special = true);
|
||||
|
||||
//
|
||||
// Chat template utils
|
||||
//
|
||||
|
||||
struct common_tool_call {
|
||||
std::string name;
|
||||
std::string arguments;
|
||||
std::string id;
|
||||
};
|
||||
|
||||
// same with llama_chat_message, but uses std::string
|
||||
struct common_chat_msg {
|
||||
std::string role;
|
||||
std::string content;
|
||||
std::vector<common_tool_call> tool_calls;
|
||||
std::string tool_plan = "";
|
||||
};
|
||||
|
||||
// Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid
|
||||
bool common_chat_verify_template(const std::string & tmpl, bool use_jinja);
|
||||
|
||||
namespace minja {
|
||||
class chat_template;
|
||||
}
|
||||
|
||||
typedef minja::chat_template common_chat_template;
|
||||
|
||||
struct common_chat_templates {
|
||||
bool has_explicit_template; // Model had builtin template or template overridde was specified.
|
||||
std::unique_ptr<common_chat_template> template_default; // always set (defaults to chatml)
|
||||
std::unique_ptr<common_chat_template> template_tool_use;
|
||||
};
|
||||
|
||||
// CPP wrapper for llama_chat_apply_template
|
||||
// If the built-in template is not supported, we default to chatml
|
||||
// If the custom "tmpl" is not supported, we throw an error
|
||||
std::string common_chat_apply_template(
|
||||
const common_chat_template & tmpl,
|
||||
const std::vector<common_chat_msg> & chat,
|
||||
bool add_ass,
|
||||
bool use_jinja);
|
||||
|
||||
// Format single message, while taking into account the position of that message in chat history
|
||||
std::string common_chat_format_single(
|
||||
const common_chat_template & tmpl,
|
||||
const std::vector<common_chat_msg> & past_msg,
|
||||
const common_chat_msg & new_msg,
|
||||
bool add_ass,
|
||||
bool use_jinja);
|
||||
|
||||
// Returns an example of formatted chat
|
||||
std::string common_chat_format_example(
|
||||
const common_chat_template & tmpl, bool use_jinja);
|
||||
|
||||
common_chat_templates common_chat_templates_from_model(const struct llama_model * model, const std::string & chat_template_override);
|
||||
|
||||
//
|
||||
// KV cache utils
|
||||
//
|
||||
|
||||
@@ -254,10 +254,10 @@ llama_sampler * llama_sampler_init_llg(const llama_vocab * vocab, const char * g
|
||||
};
|
||||
}
|
||||
|
||||
return new llama_sampler{
|
||||
return llama_sampler_init(
|
||||
/* .iface = */ &llama_sampler_llg_i,
|
||||
/* .ctx = */ ctx,
|
||||
};
|
||||
/* .ctx = */ ctx
|
||||
);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#include "log.h"
|
||||
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <cstdarg>
|
||||
#include <cstdio>
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
#include "ggml.h" // for ggml_log_level
|
||||
|
||||
#define LOG_CLR_TO_EOL "\033[K\r"
|
||||
#define LOG_COL_DEFAULT "\033[0m"
|
||||
#define LOG_COL_BOLD "\033[1m"
|
||||
#define LOG_COL_RED "\033[31m"
|
||||
@@ -14,7 +15,7 @@
|
||||
|
||||
#ifndef __GNUC__
|
||||
# define LOG_ATTRIBUTE_FORMAT(...)
|
||||
#elif defined(__MINGW32__)
|
||||
#elif defined(__MINGW32__) && !defined(__clang__)
|
||||
# define LOG_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
|
||||
#else
|
||||
# define LOG_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
|
||||
|
||||
@@ -249,16 +249,30 @@ class chat_template {
|
||||
inputs.add_generation_prompt = false;
|
||||
full = apply(inputs);
|
||||
}
|
||||
|
||||
if (full.find(prefix) != 0) {
|
||||
if (prefix.rfind(eos_token_) == prefix.size() - eos_token_.size()) {
|
||||
prefix = prefix.substr(0, prefix.size() - eos_token_.size());
|
||||
auto eos_pos_last = full.rfind(eos_token_);
|
||||
if (eos_pos_last == prefix.size() - eos_token_.size() ||
|
||||
(full[full.size() - 1] == '\n' && (eos_pos_last == full.size() - eos_token_.size() - 1))) {
|
||||
full = full.substr(0, eos_pos_last);
|
||||
}
|
||||
size_t common_prefix_length = 0;
|
||||
for (size_t i = 0; i < prefix.size() && i < full.size(); ++i) {
|
||||
if (prefix[i] != full[i]) {
|
||||
break;
|
||||
}
|
||||
if (prefix[i] == '<') {
|
||||
// DeepSeek R1's template (as of 20250209) adds a trailing <think> if add_generation_prompt,
|
||||
// but it removes thinking tags for past messages.
|
||||
// The prefix and full strings diverge at <think> vs. <|tool▁calls▁begin|>, we avoid consuming the leading <.
|
||||
continue;
|
||||
}
|
||||
common_prefix_length = i + 1;
|
||||
}
|
||||
if (full.find(prefix) != 0) {
|
||||
auto example = full.substr(common_prefix_length);
|
||||
if (example.find("tool_name") == std::string::npos && example.find("some_value") == std::string::npos) {
|
||||
fprintf(stderr, "Failed to infer a tool call example (possible template bug)\n");
|
||||
} else {
|
||||
tool_call_example_ = example;
|
||||
}
|
||||
tool_call_example_ = full.substr(prefix.size());
|
||||
}
|
||||
} catch (const std::exception & e) {
|
||||
fprintf(stderr, "Failed to generate tool call example: %s\n", e.what());
|
||||
@@ -363,7 +377,7 @@ class chat_template {
|
||||
if (polyfill_tools) {
|
||||
adjusted_messages = add_system(inputs.messages,
|
||||
"You can call any of the following tools to satisfy the user's requests: " + minja::Value(inputs.tools).dump(2, /* to_json= */ true) +
|
||||
(!polyfill_tool_call_example || tool_call_example_.empty() ? "" : "\n\nExample tool call syntax:\n\n" + tool_call_example_));
|
||||
(!polyfill_tool_call_example || tool_call_example_.empty() ? "" : "\n\nExample tool call syntax:\n\n" + tool_call_example_ + "\n\n"));
|
||||
} else {
|
||||
adjusted_messages = inputs.messages;
|
||||
}
|
||||
@@ -1385,6 +1385,13 @@ static std::string strip(const std::string & s) {
|
||||
return s.substr(start, end - start + 1);
|
||||
}
|
||||
|
||||
static std::string capitalize(const std::string & s) {
|
||||
if (s.empty()) return s;
|
||||
auto result = s;
|
||||
result[0] = std::toupper(result[0]);
|
||||
return result;
|
||||
}
|
||||
|
||||
static std::string html_escape(const std::string & s) {
|
||||
std::string result;
|
||||
result.reserve(s.size());
|
||||
@@ -1462,6 +1469,9 @@ public:
|
||||
if (method->get_name() == "strip") {
|
||||
vargs.expectArgs("strip method", {0, 0}, {0, 0});
|
||||
return Value(strip(str));
|
||||
} else if (method->get_name() == "capitalize") {
|
||||
vargs.expectArgs("capitalize method", {0, 0}, {0, 0});
|
||||
return Value(capitalize(str));
|
||||
} else if (method->get_name() == "endswith") {
|
||||
vargs.expectArgs("endswith method", {1, 1}, {0, 0});
|
||||
auto suffix = vargs.args[0].get<std::string>();
|
||||
@@ -1792,7 +1802,7 @@ private:
|
||||
auto left = parseStringConcat();
|
||||
if (!left) throw std::runtime_error("Expected left side of 'logical compare' expression");
|
||||
|
||||
static std::regex compare_tok(R"(==|!=|<=?|>=?|in\b|is\b|not[\r\n\s]+in\b)");
|
||||
static std::regex compare_tok(R"(==|!=|<=?|>=?|in\b|is\b|not\s+in\b)");
|
||||
static std::regex not_tok(R"(not\b)");
|
||||
std::string op_str;
|
||||
while (!(op_str = consumeToken(compare_tok)).empty()) {
|
||||
@@ -2171,7 +2181,7 @@ private:
|
||||
using TemplateTokenIterator = TemplateTokenVector::const_iterator;
|
||||
|
||||
std::vector<std::string> parseVarNames() {
|
||||
static std::regex varnames_regex(R"(((?:\w+)(?:[\r\n\s]*,[\r\n\s]*(?:\w+))*)[\r\n\s]*)");
|
||||
static std::regex varnames_regex(R"(((?:\w+)(?:\s*,\s*(?:\w+))*)\s*)");
|
||||
|
||||
std::vector<std::string> group;
|
||||
if ((group = consumeTokenGroups(varnames_regex)).empty()) throw std::runtime_error("Expected variable names");
|
||||
@@ -2194,13 +2204,13 @@ private:
|
||||
}
|
||||
|
||||
TemplateTokenVector tokenize() {
|
||||
static std::regex comment_tok(R"(\{#([-~]?)([\s\S\r\n]*?)([-~]?)#\})");
|
||||
static std::regex comment_tok(R"(\{#([-~]?)([\s\S]*?)([-~]?)#\})");
|
||||
static std::regex expr_open_regex(R"(\{\{([-~])?)");
|
||||
static std::regex block_open_regex(R"(^\{%([-~])?[\s\n\r]*)");
|
||||
static std::regex block_open_regex(R"(^\{%([-~])?\s*)");
|
||||
static std::regex block_keyword_tok(R"((if|else|elif|endif|for|endfor|generation|endgeneration|set|endset|block|endblock|macro|endmacro|filter|endfilter|break|continue)\b)");
|
||||
static std::regex non_text_open_regex(R"(\{\{|\{%|\{#)");
|
||||
static std::regex expr_close_regex(R"([\s\n\r]*([-~])?\}\})");
|
||||
static std::regex block_close_regex(R"([\s\n\r]*([-~])?%\})");
|
||||
static std::regex expr_close_regex(R"(\s*([-~])?\}\})");
|
||||
static std::regex block_close_regex(R"(\s*([-~])?%\})");
|
||||
|
||||
TemplateTokenVector tokens;
|
||||
std::vector<std::string> group;
|
||||
@@ -2284,7 +2294,7 @@ private:
|
||||
auto post_space = parseBlockClose();
|
||||
tokens.push_back(std::make_unique<EndGenerationTemplateToken>(location, pre_space, post_space));
|
||||
} else if (keyword == "set") {
|
||||
static std::regex namespaced_var_regex(R"((\w+)[\s\n\r]*\.[\s\n\r]*(\w+))");
|
||||
static std::regex namespaced_var_regex(R"((\w+)\s*\.\s*(\w+))");
|
||||
|
||||
std::string ns;
|
||||
std::vector<std::string> var_names;
|
||||
@@ -2336,6 +2346,11 @@ private:
|
||||
throw std::runtime_error("Unexpected block: " + keyword);
|
||||
}
|
||||
} else if (std::regex_search(it, end, match, non_text_open_regex)) {
|
||||
if (!match.position()) {
|
||||
if (match[0] != "{#")
|
||||
throw std::runtime_error("Internal error: Expected a comment");
|
||||
throw std::runtime_error("Missing end of comment tag");
|
||||
}
|
||||
auto text_end = it + match.position();
|
||||
text = std::string(it, text_end);
|
||||
it = text_end;
|
||||
@@ -2400,7 +2415,7 @@ private:
|
||||
|
||||
auto text = text_token->text;
|
||||
if (post_space == SpaceHandling::Strip) {
|
||||
static std::regex trailing_space_regex(R"((\s|\r|\n)+$)");
|
||||
static std::regex trailing_space_regex(R"(\s+$)");
|
||||
text = std::regex_replace(text, trailing_space_regex, "");
|
||||
} else if (options.lstrip_blocks && it != end) {
|
||||
auto i = text.size();
|
||||
@@ -2410,7 +2425,7 @@ private:
|
||||
}
|
||||
}
|
||||
if (pre_space == SpaceHandling::Strip) {
|
||||
static std::regex leading_space_regex(R"(^(\s|\r|\n)+)");
|
||||
static std::regex leading_space_regex(R"(^\s+)");
|
||||
text = std::regex_replace(text, leading_space_regex, "");
|
||||
} else if (options.trim_blocks && (it - 1) != begin && !dynamic_cast<ExpressionTemplateToken*>((*(it - 2)).get())) {
|
||||
if (text.length() > 0 && text[0] == '\n') {
|
||||
@@ -134,11 +134,11 @@ std::string common_params_sampling::print() const {
|
||||
snprintf(result, sizeof(result),
|
||||
"\trepeat_last_n = %d, repeat_penalty = %.3f, frequency_penalty = %.3f, presence_penalty = %.3f\n"
|
||||
"\tdry_multiplier = %.3f, dry_base = %.3f, dry_allowed_length = %d, dry_penalty_last_n = %d\n"
|
||||
"\ttop_k = %d, top_p = %.3f, min_p = %.3f, xtc_probability = %.3f, xtc_threshold = %.3f, typical_p = %.3f, temp = %.3f\n"
|
||||
"\ttop_k = %d, top_p = %.3f, min_p = %.3f, xtc_probability = %.3f, xtc_threshold = %.3f, typical_p = %.3f, top_n_sigma = %.3f, temp = %.3f\n"
|
||||
"\tmirostat = %d, mirostat_lr = %.3f, mirostat_ent = %.3f",
|
||||
penalty_last_n, penalty_repeat, penalty_freq, penalty_present,
|
||||
dry_multiplier, dry_base, dry_allowed_length, dry_penalty_last_n,
|
||||
top_k, top_p, min_p, xtc_probability, xtc_threshold, typ_p, temp,
|
||||
top_k, top_p, min_p, xtc_probability, xtc_threshold, typ_p, top_n_sigma, temp,
|
||||
mirostat, mirostat_eta, mirostat_tau);
|
||||
|
||||
return std::string(result);
|
||||
@@ -151,12 +151,6 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co
|
||||
|
||||
lparams.no_perf = params.no_perf;
|
||||
|
||||
std::vector<const char *> trigger_words;
|
||||
trigger_words.reserve(params.grammar_trigger_words.size());
|
||||
for (const auto & str : params.grammar_trigger_words) {
|
||||
trigger_words.push_back(str.word.c_str());
|
||||
}
|
||||
|
||||
struct llama_sampler * grmr;
|
||||
if (params.grammar.compare(0, 11, "%llguidance") == 0) {
|
||||
#ifdef LLAMA_USE_LLGUIDANCE
|
||||
@@ -165,6 +159,12 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co
|
||||
GGML_ABORT("llguidance (cmake -DLLAMA_LLGUIDANCE=ON) is not enabled");
|
||||
#endif // LLAMA_USE_LLGUIDANCE
|
||||
} else {
|
||||
std::vector<const char *> trigger_words;
|
||||
trigger_words.reserve(params.grammar_trigger_words.size());
|
||||
for (const auto & str : params.grammar_trigger_words) {
|
||||
trigger_words.push_back(str.word.c_str());
|
||||
}
|
||||
|
||||
grmr = params.grammar_lazy
|
||||
? llama_sampler_init_grammar_lazy(vocab, params.grammar.c_str(), "root",
|
||||
trigger_words.data(), trigger_words.size(),
|
||||
@@ -188,45 +188,51 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co
|
||||
params.logit_bias.data()));
|
||||
|
||||
if (params.mirostat == 0) {
|
||||
for (const auto & cnstr : params.samplers) {
|
||||
switch (cnstr) {
|
||||
case COMMON_SAMPLER_TYPE_DRY:
|
||||
{
|
||||
std::vector<const char *> c_breakers;
|
||||
c_breakers.reserve(params.dry_sequence_breakers.size());
|
||||
for (const auto & str : params.dry_sequence_breakers) {
|
||||
c_breakers.push_back(str.c_str());
|
||||
}
|
||||
if (params.top_n_sigma >= 0) {
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_top_k (params.top_k));
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_temp (params.temp));
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_top_n_sigma (params.top_n_sigma));
|
||||
} else {
|
||||
for (const auto & cnstr : params.samplers) {
|
||||
switch (cnstr) {
|
||||
case COMMON_SAMPLER_TYPE_DRY:
|
||||
{
|
||||
std::vector<const char *> c_breakers;
|
||||
c_breakers.reserve(params.dry_sequence_breakers.size());
|
||||
for (const auto & str : params.dry_sequence_breakers) {
|
||||
c_breakers.push_back(str.c_str());
|
||||
}
|
||||
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_dry (vocab, llama_model_n_ctx_train(model), params.dry_multiplier, params.dry_base, params.dry_allowed_length, params.dry_penalty_last_n, c_breakers.data(), c_breakers.size()));
|
||||
}
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_TOP_K:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_top_k (params.top_k));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_TOP_P:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_top_p (params.top_p, params.min_keep));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_MIN_P:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_min_p (params.min_p, params.min_keep));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_XTC:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_xtc (params.xtc_probability, params.xtc_threshold, params.min_keep, params.seed));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_TYPICAL_P:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_typical (params.typ_p, params.min_keep));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_TEMPERATURE:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_temp_ext (params.temp, params.dynatemp_range, params.dynatemp_exponent));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_INFILL:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_infill (vocab));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_PENALTIES:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_penalties(params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present));
|
||||
break;
|
||||
default:
|
||||
GGML_ASSERT(false && "unknown sampler type");
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_dry (vocab, llama_model_n_ctx_train(model), params.dry_multiplier, params.dry_base, params.dry_allowed_length, params.dry_penalty_last_n, c_breakers.data(), c_breakers.size()));
|
||||
}
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_TOP_K:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_top_k (params.top_k));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_TOP_P:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_top_p (params.top_p, params.min_keep));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_MIN_P:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_min_p (params.min_p, params.min_keep));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_XTC:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_xtc (params.xtc_probability, params.xtc_threshold, params.min_keep, params.seed));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_TYPICAL_P:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_typical (params.typ_p, params.min_keep));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_TEMPERATURE:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_temp_ext (params.temp, params.dynatemp_range, params.dynatemp_exponent));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_INFILL:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_infill (vocab));
|
||||
break;
|
||||
case COMMON_SAMPLER_TYPE_PENALTIES:
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_penalties(params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present));
|
||||
break;
|
||||
default:
|
||||
GGML_ASSERT(false && "unknown sampler type");
|
||||
}
|
||||
}
|
||||
}
|
||||
llama_sampler_chain_add(result->chain, llama_sampler_init_dist(params.seed));
|
||||
|
||||
@@ -252,11 +252,6 @@ llama_tokens common_speculative_gen_draft(
|
||||
// add drafted token for each sequence
|
||||
const llama_token id = cur_p->data[0].id;
|
||||
|
||||
// only collect very high-confidence draft tokens
|
||||
if (cur_p->data[0].p < params.p_min) {
|
||||
break;
|
||||
}
|
||||
|
||||
common_sampler_accept(smpl, id, true);
|
||||
|
||||
result.push_back(id);
|
||||
@@ -265,6 +260,11 @@ llama_tokens common_speculative_gen_draft(
|
||||
break;
|
||||
}
|
||||
|
||||
// only collect very high-confidence draft tokens
|
||||
if (cur_p->data[0].p < params.p_min) {
|
||||
break;
|
||||
}
|
||||
|
||||
common_batch_add(batch, id, n_past + i + 1, { 0 }, true);
|
||||
|
||||
// evaluate the drafted tokens on the draft model
|
||||
|
||||
@@ -9,7 +9,7 @@ struct common_speculative_params {
|
||||
int n_draft = 16; // max drafted tokens
|
||||
int n_reuse = 256;
|
||||
|
||||
float p_min = 0.9f; // min probabiliy required to accept a token in the draft
|
||||
float p_min = 0.75f; // min probability required to accept a token in the draft
|
||||
};
|
||||
|
||||
struct common_speculative * common_speculative_init(struct llama_context * ctx_dft);
|
||||
|
||||
@@ -558,7 +558,7 @@ class Model:
|
||||
|
||||
# NOTE: this function is generated by convert_hf_to_gguf_update.py
|
||||
# do not modify it manually!
|
||||
# ref: https://github.com/ggerganov/llama.cpp/pull/6920
|
||||
# ref: https://github.com/ggml-org/llama.cpp/pull/6920
|
||||
# Marker: Start get_vocab_base_pre
|
||||
def get_vocab_base_pre(self, tokenizer) -> str:
|
||||
# encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that
|
||||
@@ -708,7 +708,7 @@ class Model:
|
||||
logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet")
|
||||
logger.warning("** - the pre-tokenization config has changed upstream")
|
||||
logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.")
|
||||
logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920")
|
||||
logger.warning("** ref: https://github.com/ggml-org/llama.cpp/pull/6920")
|
||||
logger.warning("**")
|
||||
logger.warning(f"** chkhsh: {chkhsh}")
|
||||
logger.warning("**************************************************************************************")
|
||||
@@ -2835,7 +2835,7 @@ class InternLM2Model(Model):
|
||||
if chat_eos_token_id is not None:
|
||||
# For the chat model, we replace the eos with '<|im_end|>'.
|
||||
# TODO: this is a hack, should be fixed
|
||||
# https://github.com/ggerganov/llama.cpp/pull/6745#issuecomment-2067687048
|
||||
# https://github.com/ggml-org/llama.cpp/pull/6745#issuecomment-2067687048
|
||||
special_vocab.special_token_ids["eos"] = chat_eos_token_id
|
||||
logger.warning(f"Replace eos:{old_eos} with a special token:{chat_eos_token_id}"
|
||||
" in chat mode so that the conversation can end normally.")
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
# provide the necessary information to llama.cpp via the GGUF header in order to implement
|
||||
# the same pre-tokenizer.
|
||||
#
|
||||
# ref: https://github.com/ggerganov/llama.cpp/pull/6920
|
||||
# ref: https://github.com/ggml-org/llama.cpp/pull/6920
|
||||
#
|
||||
# Instructions:
|
||||
#
|
||||
@@ -246,7 +246,7 @@ src_func = f"""
|
||||
logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet")
|
||||
logger.warning("** - the pre-tokenization config has changed upstream")
|
||||
logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.")
|
||||
logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920")
|
||||
logger.warning("** ref: https://github.com/ggml-org/llama.cpp/pull/6920")
|
||||
logger.warning("**")
|
||||
logger.warning(f"** chkhsh: {{chkhsh}}")
|
||||
logger.warning("**************************************************************************************")
|
||||
|
||||
@@ -395,7 +395,7 @@ if __name__ == '__main__':
|
||||
logger.error(f"Unexpected name '{name}': Not a lora_A or lora_B tensor")
|
||||
if ".embed_tokens.weight" in name or ".lm_head.weight" in name:
|
||||
logger.error("Embeddings is present in the adapter. This can be due to new tokens added during fine tuning")
|
||||
logger.error("Please refer to https://github.com/ggerganov/llama.cpp/pull/9948")
|
||||
logger.error("Please refer to https://github.com/ggml-org/llama.cpp/pull/9948")
|
||||
sys.exit(1)
|
||||
|
||||
if base_name in tensor_map:
|
||||
@@ -419,7 +419,7 @@ if __name__ == '__main__':
|
||||
# some archs may have the same tensor for lm_head and output (tie word embeddings)
|
||||
# in this case, adapters targeting lm_head will fail when using llama-export-lora
|
||||
# therefore, we ignore them for now
|
||||
# see: https://github.com/ggerganov/llama.cpp/issues/9065
|
||||
# see: https://github.com/ggml-org/llama.cpp/issues/9065
|
||||
if name == "lm_head.weight" and len(dest) == 0:
|
||||
raise ValueError("lm_head is present in adapter, but is ignored in base model")
|
||||
for dest_name, dest_data in dest:
|
||||
|
||||
@@ -12,7 +12,7 @@ $ apt update && apt upgrade -y
|
||||
$ apt install git cmake
|
||||
```
|
||||
|
||||
Then, follow the [build instructions](https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md), specifically for CMake.
|
||||
Then, follow the [build instructions](https://github.com/ggml-org/llama.cpp/blob/master/docs/build.md), specifically for CMake.
|
||||
|
||||
Once the binaries are built, download your model of choice (e.g., from Hugging Face). It's recommended to place it in the `~/` directory for best performance:
|
||||
|
||||
|
||||
205
docs/backend/OPENCL.md
Normal file
205
docs/backend/OPENCL.md
Normal file
@@ -0,0 +1,205 @@
|
||||
# llama.cpp for OpenCL
|
||||
|
||||
- [Background](#background)
|
||||
- [OS](#os)
|
||||
- [Hardware](#hardware)
|
||||
- [DataType Supports](#datatype-supports)
|
||||
- [Model Preparation](#model-preparation)
|
||||
- [CMake Options](#cmake-options)
|
||||
- [Android](#android)
|
||||
- [Windows 11 Arm64](#windows-11-arm64)
|
||||
- [Known Issue](#known-issues)
|
||||
- [TODO](#todo)
|
||||
|
||||
## Background
|
||||
|
||||
OpenCL (Open Computing Language) is an open, royalty-free standard for cross-platform, parallel programming of diverse accelerators found in supercomputers, cloud servers, personal computers, mobile devices and embedded platforms. OpenCL specifies a programming language (based on C99) for programming these devices and application programming interfaces (APIs) to control the platform and execute programs on the compute devices. Similar to CUDA, OpenCL has been widely used to program GPUs and is supported by most GPU vendors.
|
||||
|
||||
### Llama.cpp + OpenCL
|
||||
|
||||
The llama.cpp OpenCL backend is designed to enable llama.cpp on **Qualcomm Adreno GPU** firstly via OpenCL. Thanks to the portabilty of OpenCL, the OpenCL backend can also run on certain Intel GPUs although the performance is not optimal.
|
||||
|
||||
## OS
|
||||
|
||||
| OS | Status | Verified |
|
||||
|---------|---------|------------------------------------------------|
|
||||
| Android | Support | Snapdragon 8 Gen 3, Snapdragon 8 Elite |
|
||||
| Windows | Support | Windows 11 Arm64 with Snapdragon X Elite |
|
||||
| Linux | Support | Ubuntu 22.04 WSL2 with Intel 12700H |
|
||||
|
||||
## Hardware
|
||||
|
||||
### Adreno GPU
|
||||
|
||||
**Verified devices**
|
||||
|
||||
| Adreno GPU | Status |
|
||||
|:------------------------------------:|:-------:|
|
||||
| Adreno 750 (Snapdragon 8 Gen 3) | Support |
|
||||
| Adreno 830 (Snapdragon 8 Elite) | Support |
|
||||
| Adreno X85 (Snapdragon X Elite) | Support |
|
||||
|
||||
## DataType Supports
|
||||
|
||||
| DataType | Status |
|
||||
|:----------------------:|:--------------------------:|
|
||||
| Q4_0 | Support |
|
||||
| Q6_K | Support, but not optimized |
|
||||
|
||||
## Model Preparation
|
||||
|
||||
You can refer to the general [*Prepare and Quantize*](README.md#prepare-and-quantize) guide for model prepration.
|
||||
|
||||
Currently we support `Q4_0` quantization and have optimize for it. To achieve best performance on Adreno GPU, add `--pure` to `llama-quantize`. For example,
|
||||
|
||||
```sh
|
||||
./llama-quantize --pure ggml-model-qwen2.5-3b-f16.gguf ggml-model-qwen-3b-Q4_0.gguf Q4_0
|
||||
```
|
||||
|
||||
Since `Q6_K` is also supported, `Q4_0` quantization without `--pure` will also work. However, the performance will be worse compared to pure `Q4_0` quantization.
|
||||
|
||||
## CMake Options
|
||||
|
||||
The OpenCL backend has the following CMake options that control the behavior of the backend.
|
||||
|
||||
| CMake options | Default value | Description |
|
||||
|:---------------------------------:|:--------------:|:------------------------------------------|
|
||||
| `GGML_OPENCL_EMBED_KERNELS` | `ON` | Embed OpenCL kernels into the executable. |
|
||||
| `GGML_OPENCL_USE_ADRENO_KERNELS` | `ON` | Use kernels optimized for Adreno. |
|
||||
|
||||
## Android
|
||||
|
||||
Ubuntu 22.04 is used for targeting Android. Make sure the following tools are accessible from command line,
|
||||
|
||||
* Git
|
||||
* CMake 3.29
|
||||
* Ninja
|
||||
* Python3
|
||||
|
||||
### I. Setup Environment
|
||||
|
||||
1. **Install NDK**
|
||||
|
||||
```sh
|
||||
cd ~
|
||||
wget https://dl.google.com/android/repository/commandlinetools-linux-8512546_latest.zip && \
|
||||
unzip commandlinetools-linux-8512546_latest.zip && \
|
||||
mkdir -p ~/android-sdk/cmdline-tools && \
|
||||
mv cmdline-tools latest && \
|
||||
mv latest ~/android-sdk/cmdline-tools/ && \
|
||||
rm -rf commandlinetools-linux-8512546_latest.zip
|
||||
|
||||
yes | ~/android-sdk/cmdline-tools/latest/bin/sdkmanager "ndk;26.3.11579264"
|
||||
```
|
||||
|
||||
2. **Install OpenCL Headers and Library**
|
||||
|
||||
```sh
|
||||
mkdir -p ~/dev/llm
|
||||
cd ~/dev/llm
|
||||
|
||||
git clone https://github.com/KhronosGroup/OpenCL-Headers && \
|
||||
cd OpenCL-Headers && \
|
||||
cp -r CL ~/android-sdk/ndk/26.3.11579264/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/include
|
||||
|
||||
cd ~/dev/llm
|
||||
|
||||
git clone https://github.com/KhronosGroup/OpenCL-ICD-Loader && \
|
||||
cd OpenCL-ICD-Loader && \
|
||||
mkdir build_ndk26 && cd build_ndk26 && \
|
||||
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_TOOLCHAIN_FILE=$HOME/android-sdk/ndk/26.3.11579264/build/cmake/android.toolchain.cmake \
|
||||
-DOPENCL_ICD_LOADER_HEADERS_DIR=$HOME/android-sdk/ndk/26.3.11579264/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/include \
|
||||
-DANDROID_ABI=arm64-v8a \
|
||||
-DANDROID_PLATFORM=24 \
|
||||
-DANDROID_STL=c++_shared && \
|
||||
ninja && \
|
||||
cp libOpenCL.so ~/android-sdk/ndk/26.3.11579264/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/lib/aarch64-linux-android
|
||||
```
|
||||
|
||||
### II. Build llama.cpp
|
||||
|
||||
```sh
|
||||
cd ~/dev/llm
|
||||
|
||||
git clone https://github.com/ggml-org/llama.cpp && \
|
||||
cd llama.cpp && \
|
||||
mkdir build-android && cd build-android
|
||||
|
||||
cmake .. -G Ninja \
|
||||
-DCMAKE_TOOLCHAIN_FILE=$HOME/android-sdk/ndk/26.3.11579264/build/cmake/android.toolchain.cmake \
|
||||
-DANDROID_ABI=arm64-v8a \
|
||||
-DANDROID_PLATFORM=android-28 \
|
||||
-DBUILD_SHARED_LIBS=OFF \
|
||||
-DGGML_OPENCL=ON
|
||||
|
||||
ninja
|
||||
```
|
||||
|
||||
## Windows 11 Arm64
|
||||
|
||||
A Snapdragon X Elite device with Windows 11 Arm64 is used. Make sure the following tools are accessible from command line,
|
||||
|
||||
* Git
|
||||
* CMake 3.29
|
||||
* Clang 19
|
||||
* Ninja
|
||||
* Visual Studio 2022
|
||||
|
||||
Powershell is used for the following instructions.
|
||||
|
||||
### I. Setup Environment
|
||||
|
||||
1. **Install OpenCL Headers and Library**
|
||||
|
||||
```powershell
|
||||
mkdir -p ~/dev/llm
|
||||
|
||||
cd ~/dev/llm
|
||||
git clone https://github.com/KhronosGroup/OpenCL-Headers && cd OpenCL-Headers
|
||||
mkdir build && cd build
|
||||
cmake .. -G Ninja `
|
||||
-DBUILD_TESTING=OFF `
|
||||
-DOPENCL_HEADERS_BUILD_TESTING=OFF `
|
||||
-DOPENCL_HEADERS_BUILD_CXX_TESTS=OFF `
|
||||
-DCMAKE_INSTALL_PREFIX="$HOME/dev/llm/opencl"
|
||||
cmake --build . --target install
|
||||
|
||||
cd ~/dev/llm
|
||||
git clone https://github.com/KhronosGroup/OpenCL-ICD-Loader && cd OpenCL-ICD-Loader
|
||||
mkdir build && cd build
|
||||
cmake .. -G Ninja `
|
||||
-DCMAKE_BUILD_TYPE=Release `
|
||||
-DCMAKE_PREFIX_PATH="$HOME/dev/llm/opencl" `
|
||||
-DCMAKE_INSTALL_PREFIX="$HOME/dev/llm/opencl"
|
||||
cmake --build . --target install
|
||||
```
|
||||
|
||||
### II. Build llama.cpp
|
||||
|
||||
```powershell
|
||||
|
||||
mkdir -p ~/dev/llm
|
||||
cd ~/dev/llm
|
||||
|
||||
git clone https://github.com/ggml-org/llama.cpp && cd llama.cpp
|
||||
mkdir build && cd build
|
||||
|
||||
cmake .. -G Ninja `
|
||||
-DCMAKE_TOOLCHAIN_FILE="$HOME/dev/llm/llama.cpp/cmake/arm64-windows-llvm.cmake" `
|
||||
-DCMAKE_BUILD_TYPE=Release `
|
||||
-DCMAKE_PREFIX_PATH="$HOME/dev/llm/opencl" `
|
||||
-DBUILD_SHARED_LIBS=OFF `
|
||||
-DGGML_OPENCL=ON
|
||||
ninja
|
||||
```
|
||||
|
||||
## Known Issues
|
||||
|
||||
- Qwen2.5 0.5B model produces gibberish output with Adreno kernels.
|
||||
|
||||
## TODO
|
||||
|
||||
- Fix Qwen2.5 0.5B
|
||||
- Optimization for Q6_K
|
||||
- Support and optimization for Q4_K
|
||||
@@ -36,8 +36,8 @@ The following release is verified with good quality:
|
||||
|
||||
|Commit ID|Tag|Release|Verified Platform| Update date|
|
||||
|-|-|-|-|-|
|
||||
|3bcd40b3c593d14261fb2abfabad3c0fb5b9e318|b4040 |[llama-b4040-bin-win-sycl-x64.zip](https://github.com/ggerganov/llama.cpp/releases/download/b4040/llama-b4040-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1<br>MTL Arc GPU/Windows 11/oneAPI 2024.1| 2024-11-19|
|
||||
|fb76ec31a9914b7761c1727303ab30380fd4f05c|b3038 |[llama-b3038-bin-win-sycl-x64.zip](https://github.com/ggerganov/llama.cpp/releases/download/b3038/llama-b3038-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1<br>MTL Arc GPU/Windows 11/oneAPI 2024.1||
|
||||
|3bcd40b3c593d14261fb2abfabad3c0fb5b9e318|b4040 |[llama-b4040-bin-win-sycl-x64.zip](https://github.com/ggml-org/llama.cpp/releases/download/b4040/llama-b4040-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1<br>MTL Arc GPU/Windows 11/oneAPI 2024.1| 2024-11-19|
|
||||
|fb76ec31a9914b7761c1727303ab30380fd4f05c|b3038 |[llama-b3038-bin-win-sycl-x64.zip](https://github.com/ggml-org/llama.cpp/releases/download/b3038/llama-b3038-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1<br>MTL Arc GPU/Windows 11/oneAPI 2024.1||
|
||||
|
||||
|
||||
## News
|
||||
@@ -58,7 +58,7 @@ The following release is verified with good quality:
|
||||
- 2024.3
|
||||
- Release binary files of Windows.
|
||||
- A blog is published: **Run LLM on all Intel GPUs Using llama.cpp**: [intel.com](https://www.intel.com/content/www/us/en/developer/articles/technical/run-llm-on-all-gpus-using-llama-cpp-artical.html) or [medium.com](https://medium.com/@jianyu_neo/run-llm-on-all-intel-gpus-using-llama-cpp-fd2e2dcbd9bd).
|
||||
- New base line is ready: [tag b2437](https://github.com/ggerganov/llama.cpp/tree/b2437).
|
||||
- New base line is ready: [tag b2437](https://github.com/ggml-org/llama.cpp/tree/b2437).
|
||||
- Support multiple cards: **--split-mode**: [none|layer]; not support [row], it's on developing.
|
||||
- Support to assign main GPU by **--main-gpu**, replace $GGML_SYCL_DEVICE.
|
||||
- Support detecting all GPUs with level-zero and same top **Max compute units**.
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
**To get the Code:**
|
||||
|
||||
```bash
|
||||
git clone https://github.com/ggerganov/llama.cpp
|
||||
git clone https://github.com/ggml-org/llama.cpp
|
||||
cd llama.cpp
|
||||
```
|
||||
|
||||
@@ -46,7 +46,7 @@ cmake --build build --config Release
|
||||
```
|
||||
|
||||
- Building for Windows (x86, x64 and arm64) with MSVC or clang as compilers:
|
||||
- Install Visual Studio 2022, e.g. via the [Community Edition](https://visualstudio.microsoft.com/de/vs/community/). In the installer, select at least the following options (this also automatically installs the required additional tools like CMake,...):
|
||||
- Install Visual Studio 2022, e.g. via the [Community Edition](https://visualstudio.microsoft.com/vs/community/). In the installer, select at least the following options (this also automatically installs the required additional tools like CMake,...):
|
||||
- Tab Workload: Desktop-development with C++
|
||||
- Tab Components (select quickly via search): C++-_CMake_ Tools for Windows, _Git_ for Windows, C++-_Clang_ Compiler for Windows, MS-Build Support for LLVM-Toolset (clang)
|
||||
- Please remember to always use a Developer Command Prompt / PowerShell for VS2022 for git, build, test
|
||||
|
||||
@@ -248,7 +248,7 @@ You have successfully set up CUDA on Fedora within a toolbox environment using t
|
||||
|
||||
- **Building `llama.cpp`:**
|
||||
|
||||
- With CUDA installed, you can follow these [build instructions for `llama.cpp`](https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md) to compile it with CUDA support.
|
||||
- With CUDA installed, you can follow these [build instructions for `llama.cpp`](https://github.com/ggml-org/llama.cpp/blob/master/docs/build.md) to compile it with CUDA support.
|
||||
- Ensure that any CUDA-specific build flags or paths are correctly set in your build configuration.
|
||||
|
||||
- **Using the Toolbox Environment:**
|
||||
|
||||
@@ -104,16 +104,16 @@ Note: to debug the inference graph: you can use [llama-eval-callback](/examples/
|
||||
|
||||
## GGUF specification
|
||||
|
||||
https://github.com/ggerganov/ggml/blob/master/docs/gguf.md
|
||||
https://github.com/ggml-org/ggml/blob/master/docs/gguf.md
|
||||
|
||||
## Resources
|
||||
|
||||
- YaRN RoPE scaling https://github.com/ggerganov/llama.cpp/pull/2268
|
||||
- support Baichuan serial models https://github.com/ggerganov/llama.cpp/pull/3009
|
||||
- support attention bias https://github.com/ggerganov/llama.cpp/pull/4283
|
||||
- Mixtral support https://github.com/ggerganov/llama.cpp/pull/4406
|
||||
- BERT embeddings https://github.com/ggerganov/llama.cpp/pull/5423
|
||||
- Grok-1 support https://github.com/ggerganov/llama.cpp/pull/6204
|
||||
- Command R Plus support https://github.com/ggerganov/llama.cpp/pull/6491
|
||||
- support arch DBRX https://github.com/ggerganov/llama.cpp/pull/6515
|
||||
- How to convert HuggingFace model to GGUF format https://github.com/ggerganov/llama.cpp/discussions/2948
|
||||
- YaRN RoPE scaling https://github.com/ggml-org/llama.cpp/pull/2268
|
||||
- support Baichuan serial models https://github.com/ggml-org/llama.cpp/pull/3009
|
||||
- support attention bias https://github.com/ggml-org/llama.cpp/pull/4283
|
||||
- Mixtral support https://github.com/ggml-org/llama.cpp/pull/4406
|
||||
- BERT embeddings https://github.com/ggml-org/llama.cpp/pull/5423
|
||||
- Grok-1 support https://github.com/ggml-org/llama.cpp/pull/6204
|
||||
- Command R Plus support https://github.com/ggml-org/llama.cpp/pull/6491
|
||||
- support arch DBRX https://github.com/ggml-org/llama.cpp/pull/6515
|
||||
- How to convert HuggingFace model to GGUF format https://github.com/ggml-org/llama.cpp/discussions/2948
|
||||
|
||||
@@ -7,21 +7,21 @@
|
||||
## Images
|
||||
We have three Docker images available for this project:
|
||||
|
||||
1. `ghcr.io/ggerganov/llama.cpp:full`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. (platforms: `linux/amd64`, `linux/arm64`)
|
||||
2. `ghcr.io/ggerganov/llama.cpp:light`: This image only includes the main executable file. (platforms: `linux/amd64`, `linux/arm64`)
|
||||
3. `ghcr.io/ggerganov/llama.cpp:server`: This image only includes the server executable file. (platforms: `linux/amd64`, `linux/arm64`)
|
||||
1. `ghcr.io/ggml-org/llama.cpp:full`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. (platforms: `linux/amd64`, `linux/arm64`)
|
||||
2. `ghcr.io/ggml-org/llama.cpp:light`: This image only includes the main executable file. (platforms: `linux/amd64`, `linux/arm64`)
|
||||
3. `ghcr.io/ggml-org/llama.cpp:server`: This image only includes the server executable file. (platforms: `linux/amd64`, `linux/arm64`)
|
||||
|
||||
Additionally, there the following images, similar to the above:
|
||||
|
||||
- `ghcr.io/ggerganov/llama.cpp:full-cuda`: Same as `full` but compiled with CUDA support. (platforms: `linux/amd64`)
|
||||
- `ghcr.io/ggerganov/llama.cpp:light-cuda`: Same as `light` but compiled with CUDA support. (platforms: `linux/amd64`)
|
||||
- `ghcr.io/ggerganov/llama.cpp:server-cuda`: Same as `server` but compiled with CUDA support. (platforms: `linux/amd64`)
|
||||
- `ghcr.io/ggerganov/llama.cpp:full-rocm`: Same as `full` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`)
|
||||
- `ghcr.io/ggerganov/llama.cpp:light-rocm`: Same as `light` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`)
|
||||
- `ghcr.io/ggerganov/llama.cpp:server-rocm`: Same as `server` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`)
|
||||
- `ghcr.io/ggerganov/llama.cpp:full-musa`: Same as `full` but compiled with MUSA support. (platforms: `linux/amd64`)
|
||||
- `ghcr.io/ggerganov/llama.cpp:light-musa`: Same as `light` but compiled with MUSA support. (platforms: `linux/amd64`)
|
||||
- `ghcr.io/ggerganov/llama.cpp:server-musa`: Same as `server` but compiled with MUSA support. (platforms: `linux/amd64`)
|
||||
- `ghcr.io/ggml-org/llama.cpp:full-cuda`: Same as `full` but compiled with CUDA support. (platforms: `linux/amd64`)
|
||||
- `ghcr.io/ggml-org/llama.cpp:light-cuda`: Same as `light` but compiled with CUDA support. (platforms: `linux/amd64`)
|
||||
- `ghcr.io/ggml-org/llama.cpp:server-cuda`: Same as `server` but compiled with CUDA support. (platforms: `linux/amd64`)
|
||||
- `ghcr.io/ggml-org/llama.cpp:full-rocm`: Same as `full` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`)
|
||||
- `ghcr.io/ggml-org/llama.cpp:light-rocm`: Same as `light` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`)
|
||||
- `ghcr.io/ggml-org/llama.cpp:server-rocm`: Same as `server` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`)
|
||||
- `ghcr.io/ggml-org/llama.cpp:full-musa`: Same as `full` but compiled with MUSA support. (platforms: `linux/amd64`)
|
||||
- `ghcr.io/ggml-org/llama.cpp:light-musa`: Same as `light` but compiled with MUSA support. (platforms: `linux/amd64`)
|
||||
- `ghcr.io/ggml-org/llama.cpp:server-musa`: Same as `server` but compiled with MUSA support. (platforms: `linux/amd64`)
|
||||
|
||||
The GPU enabled images are not currently tested by CI beyond being built. They are not built with any variation from the ones in the Dockerfiles defined in [.devops/](../.devops/) and the GitHub Action defined in [.github/workflows/docker.yml](../.github/workflows/docker.yml). If you need different settings (for example, a different CUDA, ROCm or MUSA library, you'll need to build the images locally for now).
|
||||
|
||||
@@ -32,25 +32,25 @@ The easiest way to download the models, convert them to ggml and optimize them i
|
||||
Replace `/path/to/models` below with the actual path where you downloaded the models.
|
||||
|
||||
```bash
|
||||
docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --all-in-one "/models/" 7B
|
||||
docker run -v /path/to/models:/models ghcr.io/ggml-org/llama.cpp:full --all-in-one "/models/" 7B
|
||||
```
|
||||
|
||||
On completion, you are ready to play!
|
||||
|
||||
```bash
|
||||
docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512
|
||||
docker run -v /path/to/models:/models ghcr.io/ggml-org/llama.cpp:full --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512
|
||||
```
|
||||
|
||||
or with a light image:
|
||||
|
||||
```bash
|
||||
docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:light -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512
|
||||
docker run -v /path/to/models:/models ghcr.io/ggml-org/llama.cpp:light -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512
|
||||
```
|
||||
|
||||
or with a server image:
|
||||
|
||||
```bash
|
||||
docker run -v /path/to/models:/models -p 8000:8000 ghcr.io/ggerganov/llama.cpp:server -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512
|
||||
docker run -v /path/to/models:/models -p 8000:8000 ghcr.io/ggml-org/llama.cpp:server -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512
|
||||
```
|
||||
|
||||
## Docker With CUDA
|
||||
@@ -69,7 +69,7 @@ You may want to pass in some different `ARGS`, depending on the CUDA environment
|
||||
|
||||
The defaults are:
|
||||
|
||||
- `CUDA_VERSION` set to `12.6.0`
|
||||
- `CUDA_VERSION` set to `12.4.0`
|
||||
- `CUDA_DOCKER_ARCH` set to the cmake build default, which includes all the supported architectures
|
||||
|
||||
The resulting images, are essentially the same as the non-CUDA images:
|
||||
@@ -104,7 +104,7 @@ You may want to pass in some different `ARGS`, depending on the MUSA environment
|
||||
|
||||
The defaults are:
|
||||
|
||||
- `MUSA_VERSION` set to `rc3.1.0`
|
||||
- `MUSA_VERSION` set to `rc3.1.1`
|
||||
|
||||
The resulting images, are essentially the same as the non-MUSA images:
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ On Mac and Linux, the homebrew package manager can be used via
|
||||
```sh
|
||||
brew install llama.cpp
|
||||
```
|
||||
The formula is automatically updated with new `llama.cpp` releases. More info: https://github.com/ggerganov/llama.cpp/discussions/7668
|
||||
The formula is automatically updated with new `llama.cpp` releases. More info: https://github.com/ggml-org/llama.cpp/discussions/7668
|
||||
|
||||
## Nix
|
||||
|
||||
|
||||
@@ -13,13 +13,15 @@ cmake -B build -DLLAMA_LLGUIDANCE=ON
|
||||
make -C build -j
|
||||
```
|
||||
|
||||
For Windows use `cmake --build build --config Release` instead of `make`.
|
||||
|
||||
This requires the Rust compiler and the `cargo` tool to be [installed](https://www.rust-lang.org/tools/install).
|
||||
|
||||
## Interface
|
||||
|
||||
There are no new command-line arguments or modifications to `common_params`. When enabled, grammars starting with `%llguidance` are passed to LLGuidance instead of the [current](../grammars/README.md) llama.cpp grammars. Additionally, JSON Schema requests (e.g., using the `-j` argument in `llama-cli`) are also passed to LLGuidance.
|
||||
|
||||
For your existing GBNF grammars, you can use [gbnf_to_lark.py script](https://github.com/guidance-ai/llguidance/blob/main/scripts/gbnf_to_lark.py) to convert them to LLGuidance Lark-like format.
|
||||
For your existing GBNF grammars, you can use [gbnf_to_lark.py script](https://github.com/guidance-ai/llguidance/blob/main/python/llguidance/gbnf_to_lark.py) to convert them to LLGuidance Lark-like format.
|
||||
|
||||
## Performance
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@
|
||||
This example demonstrates how to generate a control vector using gguf models.
|
||||
|
||||
Related PRs:
|
||||
- [Add support for control vectors](https://github.com/ggerganov/llama.cpp/pull/5970)
|
||||
- (Issue) [Generate control vector using llama.cpp](https://github.com/ggerganov/llama.cpp/issues/6880)
|
||||
- [Add cvector-generator example](https://github.com/ggerganov/llama.cpp/pull/7514)
|
||||
- [Add support for control vectors](https://github.com/ggml-org/llama.cpp/pull/5970)
|
||||
- (Issue) [Generate control vector using llama.cpp](https://github.com/ggml-org/llama.cpp/issues/6880)
|
||||
- [Add cvector-generator example](https://github.com/ggml-org/llama.cpp/pull/7514)
|
||||
|
||||
## Examples
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# llama.cpp/examples/imatrix
|
||||
|
||||
Compute an importance matrix for a model and given text dataset. Can be used during quantization to enchance the quality of the quantized models.
|
||||
More information is available here: https://github.com/ggerganov/llama.cpp/pull/4861
|
||||
Compute an importance matrix for a model and given text dataset. Can be used during quantization to enhance the quality of the quantized models.
|
||||
More information is available here: https://github.com/ggml-org/llama.cpp/pull/4861
|
||||
|
||||
## Usage
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
#include "log.h"
|
||||
#include "llama.h"
|
||||
|
||||
#include <chrono>
|
||||
#include <cmath>
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
@@ -99,7 +100,7 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
|
||||
const float * data = is_host ? (const float *) src1->data : m_src1_data.data();
|
||||
|
||||
// this has been adapted to the new format of storing merged experts in a single 3d tensor
|
||||
// ref: https://github.com/ggerganov/llama.cpp/pull/6387
|
||||
// ref: https://github.com/ggml-org/llama.cpp/pull/6387
|
||||
if (t->op == GGML_OP_MUL_MAT_ID) {
|
||||
// ids -> [n_experts_used, n_tokens]
|
||||
// src1 -> [cols, n_expert_used, n_tokens]
|
||||
|
||||
@@ -876,8 +876,8 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||
struct test {
|
||||
static const std::string build_commit;
|
||||
static const int build_number;
|
||||
static const std::string cpu_info;
|
||||
static const std::string gpu_info;
|
||||
const std::string cpu_info;
|
||||
const std::string gpu_info;
|
||||
std::string model_filename;
|
||||
std::string model_type;
|
||||
uint64_t model_size;
|
||||
@@ -903,7 +903,10 @@ struct test {
|
||||
std::string test_time;
|
||||
std::vector<uint64_t> samples_ns;
|
||||
|
||||
test(const cmd_params_instance & inst, const llama_model * lmodel, const llama_context * ctx) {
|
||||
test(const cmd_params_instance & inst, const llama_model * lmodel, const llama_context * ctx) :
|
||||
cpu_info(get_cpu_info()),
|
||||
gpu_info(get_gpu_info()) {
|
||||
|
||||
model_filename = inst.model;
|
||||
char buf[128];
|
||||
llama_model_desc(lmodel, buf, sizeof(buf));
|
||||
@@ -1058,8 +1061,6 @@ struct test {
|
||||
|
||||
const std::string test::build_commit = LLAMA_COMMIT;
|
||||
const int test::build_number = LLAMA_BUILD_NUMBER;
|
||||
const std::string test::cpu_info = get_cpu_info();
|
||||
const std::string test::gpu_info = get_gpu_info();
|
||||
|
||||
struct printer {
|
||||
virtual ~printer() {}
|
||||
|
||||
@@ -14,7 +14,7 @@ project("llama-android")
|
||||
#include(FetchContent)
|
||||
#FetchContent_Declare(
|
||||
# llama
|
||||
# GIT_REPOSITORY https://github.com/ggerganov/llama.cpp
|
||||
# GIT_REPOSITORY https://github.com/ggml-org/llama.cpp
|
||||
# GIT_TAG master
|
||||
#)
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@
|
||||
Local inference of llama.cpp on an iPhone. This is a sample app that can be used as a starting
|
||||
point for more advanced projects.
|
||||
|
||||
For usage instructions and performance stats, check the following discussion: https://github.com/ggerganov/llama.cpp/discussions/4508
|
||||
For usage instructions and performance stats, check the following discussion: https://github.com/ggml-org/llama.cpp/discussions/4508
|
||||
|
||||

|
||||

|
||||
|
||||
Video demonstration:
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
"
|
||||
" :call llama#init()
|
||||
"
|
||||
" more info: https://github.com/ggerganov/llama.cpp/pull/9787
|
||||
" more info: https://github.com/ggml-org/llama.cpp/pull/9787
|
||||
"
|
||||
|
||||
" colors (adjust to your liking)
|
||||
|
||||
@@ -26,7 +26,7 @@ python ./convert_hf_to_gguf.py ../MiniCPM-o-2_6/model
|
||||
```
|
||||
|
||||
Build llama.cpp using `CMake`:
|
||||
https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md
|
||||
https://github.com/ggml-org/llama.cpp/blob/master/docs/build.md
|
||||
|
||||
```bash
|
||||
cmake -B build
|
||||
|
||||
@@ -6,7 +6,7 @@ Download [MiniCPM-Llama3-V-2_5](https://huggingface.co/openbmb/MiniCPM-Llama3-V-
|
||||
|
||||
Clone llama.cpp:
|
||||
```bash
|
||||
git clone https://github.com/ggerganov/llama.cpp
|
||||
git clone https://github.com/ggml-org/llama.cpp
|
||||
cd llama.cpp
|
||||
```
|
||||
|
||||
|
||||
@@ -4,4 +4,4 @@ Demonstration of lookahead decoding technique:
|
||||
|
||||
https://lmsys.org/blog/2023-11-21-lookahead-decoding/
|
||||
|
||||
More info: https://github.com/ggerganov/llama.cpp/pull/4207
|
||||
More info: https://github.com/ggml-org/llama.cpp/pull/4207
|
||||
|
||||
@@ -8,5 +8,5 @@ The key parameters for lookup decoding are `ngram_min`, `ngram_max` and `n_draft
|
||||
|
||||
More info:
|
||||
|
||||
https://github.com/ggerganov/llama.cpp/pull/4484
|
||||
https://github.com/ggerganov/llama.cpp/issues/4226
|
||||
https://github.com/ggml-org/llama.cpp/pull/4484
|
||||
https://github.com/ggml-org/llama.cpp/issues/4226
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# llama.cpp/examples/main
|
||||
|
||||
This example program allows you to use various LLaMA language models easily and efficiently. It is specifically designed to work with the [llama.cpp](https://github.com/ggerganov/llama.cpp) project, which provides a plain C/C++ implementation with optional 4-bit quantization support for faster, lower memory inference, and is optimized for desktop CPUs. This program can be used to perform various inference tasks with LLaMA models, including generating text based on user-provided prompts and chat-like interactions with reverse prompts.
|
||||
This example program allows you to use various LLaMA language models easily and efficiently. It is specifically designed to work with the [llama.cpp](https://github.com/ggml-org/llama.cpp) project, which provides a plain C/C++ implementation with optional 4-bit quantization support for faster, lower memory inference, and is optimized for desktop CPUs. This program can be used to perform various inference tasks with LLaMA models, including generating text based on user-provided prompts and chat-like interactions with reverse prompts.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
@@ -37,7 +37,7 @@ Once downloaded, place your model in the models folder in llama.cpp.
|
||||
|
||||
##### Infinite text from a starting prompt (you can use `Ctrl-C` to stop it):
|
||||
```bash
|
||||
./llama-cli -m models\gemma-1.1-7b-it.Q4_K_M.gguf --ignore-eos -n -1
|
||||
./llama-cli -m models/gemma-1.1-7b-it.Q4_K_M.gguf --ignore-eos -n -1
|
||||
```
|
||||
|
||||
### Windows:
|
||||
@@ -121,7 +121,7 @@ When --in-prefix or --in-suffix options are enabled the chat template ( --chat-t
|
||||
|
||||
### Chat templates
|
||||
|
||||
`--chat-template JINJA_TEMPLATE`: This option sets a custom jinja chat template. It accepts a string, not a file name. Default: template taken from model's metadata. Llama.cpp only supports [some pre-defined templates](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template). These include llama2, llama3, gemma, monarch, chatml, orion, vicuna, vicuna-orca, deepseek, command-r, zephyr. When --in-prefix or --in-suffix options are enabled the chat template ( --chat-template ) is disabled.
|
||||
`--chat-template JINJA_TEMPLATE`: This option sets a custom jinja chat template. It accepts a string, not a file name. Default: template taken from model's metadata. Llama.cpp only supports [some pre-defined templates](https://github.com/ggml-org/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template). These include llama2, llama3, gemma, monarch, chatml, orion, vicuna, vicuna-orca, deepseek, command-r, zephyr. When --in-prefix or --in-suffix options are enabled the chat template ( --chat-template ) is disabled.
|
||||
|
||||
Example usage: `--chat-template gemma`
|
||||
|
||||
@@ -265,6 +265,14 @@ Being experimental and unique, XTC is disabled by default. The recommended combi
|
||||
|
||||
Example usage: `--xtc-probability 0.5 --xtc-threshold 0.1`
|
||||
|
||||
### Top-nσ Sampling
|
||||
|
||||
- `--top-nsigma N`: Limit the next token selection to a subset of tokens with pre-softmax logits that are within n * σ less than the max logit (default: -1, -1 = disabled).
|
||||
|
||||
Top-nσ sampling is a text generation method that selects tokens based on a statistical threshold in pre-softmax logits. It works by only sampling from tokens with logits that are within n * σ of the maximum logit. This method helps maintain a stable sampling space regardless of temperature scaling, allowing it to perform well on reasoning tasks even in high temperatures. Without complex probability manipulation, it efficiently filters tokens directly on the pre-softmax logits. A higher value for top-nsigma (e.g., 5) will take more noisy tokens into consideration, while a lower value (e.g., 1) will focous on the more informative region of the sampling space.
|
||||
|
||||
Example usage: `--top-nsigma 1`
|
||||
|
||||
### Logit Bias
|
||||
|
||||
- `-l TOKEN_ID(+/-)BIAS, --logit-bias TOKEN_ID(+/-)BIAS`: Modify the likelihood of a token appearing in the generated text completion.
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
#include "log.h"
|
||||
#include "sampling.h"
|
||||
#include "llama.h"
|
||||
#include "chat-template.hpp"
|
||||
#include "chat.h"
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
@@ -158,7 +158,7 @@ int main(int argc, char ** argv) {
|
||||
}
|
||||
|
||||
const llama_vocab * vocab = llama_model_get_vocab(model);
|
||||
auto chat_templates = common_chat_templates_from_model(model, params.chat_template);
|
||||
auto chat_templates = common_chat_templates_init(model, params.chat_template);
|
||||
|
||||
LOG_INF("%s: llama threadpool init, n_threads = %d\n", __func__, (int) params.cpuparams.n_threads);
|
||||
|
||||
@@ -201,7 +201,7 @@ int main(int argc, char ** argv) {
|
||||
}
|
||||
|
||||
// auto enable conversation mode if chat template is available
|
||||
const bool has_chat_template = chat_templates.has_explicit_template && chat_templates.template_default;
|
||||
const bool has_chat_template = common_chat_templates_was_explicit(chat_templates.get());
|
||||
if (params.conversation_mode == COMMON_CONVERSATION_MODE_AUTO) {
|
||||
if (has_chat_template) {
|
||||
LOG_INF("%s: chat template is available, enabling conversation mode (disable it with -no-cnv)\n", __func__);
|
||||
@@ -219,7 +219,7 @@ int main(int argc, char ** argv) {
|
||||
// print chat template example in conversation mode
|
||||
if (params.conversation_mode) {
|
||||
if (params.enable_chat_template) {
|
||||
LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(*chat_templates.template_default, params.use_jinja).c_str());
|
||||
LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(chat_templates.get(), params.use_jinja).c_str());
|
||||
} else {
|
||||
LOG_INF("%s: in-suffix/prefix is specified, chat template will be disabled\n", __func__);
|
||||
}
|
||||
@@ -264,9 +264,11 @@ int main(int argc, char ** argv) {
|
||||
std::vector<llama_token> embd_inp;
|
||||
|
||||
auto chat_add_and_format = [&chat_msgs, &chat_templates](const std::string & role, const std::string & content) {
|
||||
common_chat_msg new_msg{role, content, {}};
|
||||
auto formatted = common_chat_format_single(*chat_templates.template_default, chat_msgs, new_msg, role == "user", g_params->use_jinja);
|
||||
chat_msgs.push_back({role, content, {}});
|
||||
common_chat_msg new_msg;
|
||||
new_msg.role = role;
|
||||
new_msg.content = content;
|
||||
auto formatted = common_chat_format_single(chat_templates.get(), chat_msgs, new_msg, role == "user", g_params->use_jinja);
|
||||
chat_msgs.push_back(new_msg);
|
||||
LOG_DBG("formatted: '%s'\n", formatted.c_str());
|
||||
return formatted;
|
||||
};
|
||||
@@ -755,11 +757,14 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// check for reverse prompt using special tokens
|
||||
llama_token last_token = common_sampler_last(smpl);
|
||||
if (std::find(antiprompt_token.begin(), antiprompt_token.end(), last_token) != antiprompt_token.end()) {
|
||||
if (params.interactive) {
|
||||
is_interacting = true;
|
||||
for (auto token : antiprompt_token) {
|
||||
if (token == last_token) {
|
||||
if (params.interactive) {
|
||||
is_interacting = true;
|
||||
}
|
||||
is_antiprompt = true;
|
||||
break;
|
||||
}
|
||||
is_antiprompt = true;
|
||||
}
|
||||
|
||||
if (is_antiprompt) {
|
||||
|
||||
@@ -5,8 +5,8 @@ models ability to recall information from long contexts.
|
||||
|
||||
See the following PRs for more info:
|
||||
|
||||
- https://github.com/ggerganov/llama.cpp/pull/3856
|
||||
- https://github.com/ggerganov/llama.cpp/pull/4810
|
||||
- https://github.com/ggml-org/llama.cpp/pull/3856
|
||||
- https://github.com/ggml-org/llama.cpp/pull/4810
|
||||
|
||||
### Usage
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
#include "log.h"
|
||||
#include "llama.h"
|
||||
|
||||
#include <chrono>
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
|
||||
@@ -23,7 +23,7 @@ def create_completion(host, prompt, gbnf_grammar):
|
||||
"""Calls the /completion API on llama-server.
|
||||
|
||||
See
|
||||
https://github.com/ggerganov/llama.cpp/tree/HEAD/examples/server#api-endpoints
|
||||
https://github.com/ggml-org/llama.cpp/tree/HEAD/examples/server#api-endpoints
|
||||
"""
|
||||
print(f" Request:\n Grammar:\n{textwrap.indent(gbnf_grammar, ' ')}\n Prompt:\n{textwrap.indent(prompt.rstrip(), ' ')}")
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
@@ -69,22 +69,22 @@ Several quantization methods are supported. They differ in the resulting model d
|
||||
| 13B | ms/tok @ 8th | - | 73 | 82 | 98 | 105 | 128 |
|
||||
| 13B | bits/weight | 16.0 | 4.5 | 5.0 | 5.5 | 6.0 | 8.5 |
|
||||
|
||||
- [k-quants](https://github.com/ggerganov/llama.cpp/pull/1684)
|
||||
- [k-quants](https://github.com/ggml-org/llama.cpp/pull/1684)
|
||||
- recent k-quants improvements and new i-quants
|
||||
- [#2707](https://github.com/ggerganov/llama.cpp/pull/2707)
|
||||
- [#2807](https://github.com/ggerganov/llama.cpp/pull/2807)
|
||||
- [#4773 - 2-bit i-quants (inference)](https://github.com/ggerganov/llama.cpp/pull/4773)
|
||||
- [#4856 - 2-bit i-quants (inference)](https://github.com/ggerganov/llama.cpp/pull/4856)
|
||||
- [#4861 - importance matrix](https://github.com/ggerganov/llama.cpp/pull/4861)
|
||||
- [#4872 - MoE models](https://github.com/ggerganov/llama.cpp/pull/4872)
|
||||
- [#4897 - 2-bit quantization](https://github.com/ggerganov/llama.cpp/pull/4897)
|
||||
- [#4930 - imatrix for all k-quants](https://github.com/ggerganov/llama.cpp/pull/4930)
|
||||
- [#4951 - imatrix on the GPU](https://github.com/ggerganov/llama.cpp/pull/4957)
|
||||
- [#4969 - imatrix for legacy quants](https://github.com/ggerganov/llama.cpp/pull/4969)
|
||||
- [#4996 - k-quants tuning](https://github.com/ggerganov/llama.cpp/pull/4996)
|
||||
- [#5060 - Q3_K_XS](https://github.com/ggerganov/llama.cpp/pull/5060)
|
||||
- [#5196 - 3-bit i-quants](https://github.com/ggerganov/llama.cpp/pull/5196)
|
||||
- [quantization tuning](https://github.com/ggerganov/llama.cpp/pull/5320), [another one](https://github.com/ggerganov/llama.cpp/pull/5334), and [another one](https://github.com/ggerganov/llama.cpp/pull/5361)
|
||||
- [#2707](https://github.com/ggml-org/llama.cpp/pull/2707)
|
||||
- [#2807](https://github.com/ggml-org/llama.cpp/pull/2807)
|
||||
- [#4773 - 2-bit i-quants (inference)](https://github.com/ggml-org/llama.cpp/pull/4773)
|
||||
- [#4856 - 2-bit i-quants (inference)](https://github.com/ggml-org/llama.cpp/pull/4856)
|
||||
- [#4861 - importance matrix](https://github.com/ggml-org/llama.cpp/pull/4861)
|
||||
- [#4872 - MoE models](https://github.com/ggml-org/llama.cpp/pull/4872)
|
||||
- [#4897 - 2-bit quantization](https://github.com/ggml-org/llama.cpp/pull/4897)
|
||||
- [#4930 - imatrix for all k-quants](https://github.com/ggml-org/llama.cpp/pull/4930)
|
||||
- [#4951 - imatrix on the GPU](https://github.com/ggml-org/llama.cpp/pull/4957)
|
||||
- [#4969 - imatrix for legacy quants](https://github.com/ggml-org/llama.cpp/pull/4969)
|
||||
- [#4996 - k-quants tuning](https://github.com/ggml-org/llama.cpp/pull/4996)
|
||||
- [#5060 - Q3_K_XS](https://github.com/ggml-org/llama.cpp/pull/5060)
|
||||
- [#5196 - 3-bit i-quants](https://github.com/ggml-org/llama.cpp/pull/5196)
|
||||
- [quantization tuning](https://github.com/ggml-org/llama.cpp/pull/5320), [another one](https://github.com/ggml-org/llama.cpp/pull/5334), and [another one](https://github.com/ggml-org/llama.cpp/pull/5361)
|
||||
|
||||
**Llama 2 7B**
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
Demonstration of simple retrieval technique based on cosine similarity
|
||||
|
||||
More info:
|
||||
https://github.com/ggerganov/llama.cpp/pull/6193
|
||||
https://github.com/ggml-org/llama.cpp/pull/6193
|
||||
|
||||
### How to use
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "chat-template.hpp"
|
||||
#include "chat.h"
|
||||
#include "common.h"
|
||||
#include "json.hpp"
|
||||
#include "linenoise.cpp/linenoise.h"
|
||||
@@ -346,7 +346,7 @@ class HttpClient {
|
||||
if (!output_file.empty()) {
|
||||
output_file_partial = output_file + ".partial";
|
||||
if (!out.open(output_file_partial, "ab")) {
|
||||
printe("Failed to open file\n");
|
||||
printe("Failed to open file for writing\n");
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -535,8 +535,7 @@ class HttpClient {
|
||||
|
||||
static void print_progress(const std::string & progress_prefix, const std::string & progress_bar,
|
||||
const std::string & progress_suffix) {
|
||||
printe("\r%*s\r%s%s| %s", get_terminal_width(), " ", progress_prefix.c_str(), progress_bar.c_str(),
|
||||
progress_suffix.c_str());
|
||||
printe("\r" LOG_CLR_TO_EOL "%s%s| %s", progress_prefix.c_str(), progress_bar.c_str(), progress_suffix.c_str());
|
||||
}
|
||||
// Function to write data to a file
|
||||
static size_t write_data(void * ptr, size_t size, size_t nmemb, void * stream) {
|
||||
@@ -558,7 +557,7 @@ class LlamaData {
|
||||
llama_model_ptr model;
|
||||
llama_sampler_ptr sampler;
|
||||
llama_context_ptr context;
|
||||
std::vector<llama_chat_message> messages;
|
||||
std::vector<llama_chat_message> messages; // TODO: switch to common_chat_msg
|
||||
std::list<std::string> msg_strs;
|
||||
std::vector<char> fmtted;
|
||||
|
||||
@@ -797,16 +796,13 @@ class LlamaData {
|
||||
llama_model_ptr initialize_model(Opt & opt) {
|
||||
ggml_backend_load_all();
|
||||
resolve_model(opt.model_);
|
||||
printe(
|
||||
"\r%*s"
|
||||
"\rLoading model",
|
||||
get_terminal_width(), " ");
|
||||
printe("\r" LOG_CLR_TO_EOL "Loading model");
|
||||
llama_model_ptr model(llama_model_load_from_file(opt.model_.c_str(), opt.model_params));
|
||||
if (!model) {
|
||||
printe("%s: error: unable to load model from file: %s\n", __func__, opt.model_.c_str());
|
||||
}
|
||||
|
||||
printe("\r%*s\r", static_cast<int>(sizeof("Loading model")), " ");
|
||||
printe("\r" LOG_CLR_TO_EOL);
|
||||
return model;
|
||||
}
|
||||
|
||||
@@ -838,44 +834,23 @@ static void add_message(const char * role, const std::string & text, LlamaData &
|
||||
}
|
||||
|
||||
// Function to apply the chat template and resize `formatted` if needed
|
||||
static int apply_chat_template(const common_chat_template & tmpl, LlamaData & llama_data, const bool append, bool use_jinja) {
|
||||
if (use_jinja) {
|
||||
json messages = json::array();
|
||||
for (const auto & msg : llama_data.messages) {
|
||||
messages.push_back({
|
||||
{"role", msg.role},
|
||||
{"content", msg.content},
|
||||
});
|
||||
}
|
||||
try {
|
||||
minja::chat_template_inputs tmpl_inputs;
|
||||
tmpl_inputs.messages = messages;
|
||||
tmpl_inputs.add_generation_prompt = append;
|
||||
|
||||
minja::chat_template_options tmpl_opts;
|
||||
tmpl_opts.use_bos_token = false;
|
||||
tmpl_opts.use_eos_token = false;
|
||||
|
||||
auto result = tmpl.apply(tmpl_inputs, tmpl_opts);
|
||||
llama_data.fmtted.resize(result.size() + 1);
|
||||
memcpy(llama_data.fmtted.data(), result.c_str(), result.size() + 1);
|
||||
return result.size();
|
||||
} catch (const std::exception & e) {
|
||||
printe("failed to render the chat template: %s\n", e.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
int result = llama_chat_apply_template(
|
||||
tmpl.source().c_str(), llama_data.messages.data(), llama_data.messages.size(), append,
|
||||
append ? llama_data.fmtted.data() : nullptr, append ? llama_data.fmtted.size() : 0);
|
||||
if (append && result > static_cast<int>(llama_data.fmtted.size())) {
|
||||
llama_data.fmtted.resize(result);
|
||||
result = llama_chat_apply_template(tmpl.source().c_str(), llama_data.messages.data(),
|
||||
llama_data.messages.size(), append, llama_data.fmtted.data(),
|
||||
llama_data.fmtted.size());
|
||||
static int apply_chat_template(const struct common_chat_templates * tmpls, LlamaData & llama_data, const bool append, bool use_jinja) {
|
||||
common_chat_templates_inputs inputs;
|
||||
for (const auto & msg : llama_data.messages) {
|
||||
common_chat_msg cmsg;
|
||||
cmsg.role = msg.role;
|
||||
cmsg.content = msg.content;
|
||||
inputs.messages.push_back(cmsg);
|
||||
}
|
||||
inputs.add_generation_prompt = append;
|
||||
inputs.use_jinja = use_jinja;
|
||||
|
||||
return result;
|
||||
auto chat_params = common_chat_templates_apply(tmpls, inputs);
|
||||
// TODO: use other params for tool calls.
|
||||
auto result = chat_params.prompt;
|
||||
llama_data.fmtted.resize(result.size() + 1);
|
||||
memcpy(llama_data.fmtted.data(), result.c_str(), result.size() + 1);
|
||||
return result.size();
|
||||
}
|
||||
|
||||
// Function to tokenize the prompt
|
||||
@@ -969,10 +944,7 @@ static int generate(LlamaData & llama_data, const std::string & prompt, std::str
|
||||
static int read_user_input(std::string & user_input) {
|
||||
static const char * prompt_prefix = "> ";
|
||||
#ifdef WIN32
|
||||
printf(
|
||||
"\r%*s"
|
||||
"\r" LOG_COL_DEFAULT "%s",
|
||||
get_terminal_width(), " ", prompt_prefix);
|
||||
printf("\r" LOG_CLR_TO_EOL LOG_COL_DEFAULT "%s", prompt_prefix);
|
||||
|
||||
std::getline(std::cin, user_input);
|
||||
if (std::cin.eof()) {
|
||||
@@ -1022,8 +994,8 @@ static int generate_response(LlamaData & llama_data, const std::string & prompt,
|
||||
}
|
||||
|
||||
// Helper function to apply the chat template and handle errors
|
||||
static int apply_chat_template_with_error_handling(const common_chat_template & tmpl, LlamaData & llama_data, const bool append, int & output_length, bool use_jinja) {
|
||||
const int new_len = apply_chat_template(tmpl, llama_data, append, use_jinja);
|
||||
static int apply_chat_template_with_error_handling(const common_chat_templates * tmpls, LlamaData & llama_data, const bool append, int & output_length, bool use_jinja) {
|
||||
const int new_len = apply_chat_template(tmpls, llama_data, append, use_jinja);
|
||||
if (new_len < 0) {
|
||||
printe("failed to apply the chat template\n");
|
||||
return -1;
|
||||
@@ -1085,8 +1057,7 @@ static int get_user_input(std::string & user_input, const std::string & user) {
|
||||
static int chat_loop(LlamaData & llama_data, const std::string & user, bool use_jinja) {
|
||||
int prev_len = 0;
|
||||
llama_data.fmtted.resize(llama_n_ctx(llama_data.context.get()));
|
||||
auto chat_templates = common_chat_templates_from_model(llama_data.model.get(), "");
|
||||
GGML_ASSERT(chat_templates.template_default);
|
||||
auto chat_templates = common_chat_templates_init(llama_data.model.get(), "");
|
||||
static const bool stdout_a_terminal = is_stdout_a_terminal();
|
||||
while (true) {
|
||||
// Get user input
|
||||
@@ -1097,7 +1068,7 @@ static int chat_loop(LlamaData & llama_data, const std::string & user, bool use_
|
||||
|
||||
add_message("user", user.empty() ? user_input : user, llama_data);
|
||||
int new_len;
|
||||
if (apply_chat_template_with_error_handling(*chat_templates.template_default, llama_data, true, new_len, use_jinja) < 0) {
|
||||
if (apply_chat_template_with_error_handling(chat_templates.get(), llama_data, true, new_len, use_jinja) < 0) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -1112,7 +1083,7 @@ static int chat_loop(LlamaData & llama_data, const std::string & user, bool use_
|
||||
}
|
||||
|
||||
add_message("assistant", response, llama_data);
|
||||
if (apply_chat_template_with_error_handling(*chat_templates.template_default, llama_data, false, prev_len, use_jinja) < 0) {
|
||||
if (apply_chat_template_with_error_handling(chat_templates.get(), llama_data, false, prev_len, use_jinja) < 0) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ option(LLAMA_SERVER_SSL "Build SSL support for the server" OFF)
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
||||
|
||||
if (MINGW)
|
||||
# fix: https://github.com/ggerganov/llama.cpp/actions/runs/9651004652/job/26617901362?pr=8006
|
||||
# fix: https://github.com/ggml-org/llama.cpp/actions/runs/9651004652/job/26617901362?pr=8006
|
||||
add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER})
|
||||
endif()
|
||||
|
||||
|
||||
@@ -7,14 +7,14 @@ Set of LLM REST APIs and a simple web front end to interact with llama.cpp.
|
||||
**Features:**
|
||||
* LLM inference of F16 and quantized models on GPU and CPU
|
||||
* [OpenAI API](https://github.com/openai/openai-openapi) compatible chat completions and embeddings routes
|
||||
* Reranking endoint (WIP: https://github.com/ggerganov/llama.cpp/pull/9510)
|
||||
* Reranking endoint (WIP: https://github.com/ggml-org/llama.cpp/pull/9510)
|
||||
* Parallel decoding with multi-user support
|
||||
* Continuous batching
|
||||
* Multimodal (wip)
|
||||
* Monitoring endpoints
|
||||
* Schema-constrained JSON response format
|
||||
|
||||
The project is under active development, and we are [looking for feedback and contributors](https://github.com/ggerganov/llama.cpp/issues/4216).
|
||||
The project is under active development, and we are [looking for feedback and contributors](https://github.com/ggml-org/llama.cpp/issues/4216).
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -65,7 +65,7 @@ The project is under active development, and we are [looking for feedback and co
|
||||
| `-np, --parallel N` | number of parallel sequences to decode (default: 1)<br/>(env: LLAMA_ARG_N_PARALLEL) |
|
||||
| `--mlock` | force system to keep model in RAM rather than swapping or compressing<br/>(env: LLAMA_ARG_MLOCK) |
|
||||
| `--no-mmap` | do not memory-map model (slower load but may reduce pageouts if not using mlock)<br/>(env: LLAMA_ARG_NO_MMAP) |
|
||||
| `--numa TYPE` | attempt optimizations that help on some NUMA systems<br/>- distribute: spread execution evenly over all nodes<br/>- isolate: only spawn threads on CPUs on the node that execution started on<br/>- numactl: use the CPU map provided by numactl<br/>if run without this previously, it is recommended to drop the system page cache before using this<br/>see https://github.com/ggerganov/llama.cpp/issues/1437<br/>(env: LLAMA_ARG_NUMA) |
|
||||
| `--numa TYPE` | attempt optimizations that help on some NUMA systems<br/>- distribute: spread execution evenly over all nodes<br/>- isolate: only spawn threads on CPUs on the node that execution started on<br/>- numactl: use the CPU map provided by numactl<br/>if run without this previously, it is recommended to drop the system page cache before using this<br/>see https://github.com/ggml-org/llama.cpp/issues/1437<br/>(env: LLAMA_ARG_NUMA) |
|
||||
| `-dev, --device <dev1,dev2,..>` | comma-separated list of devices to use for offloading (none = don't offload)<br/>use --list-devices to see a list of available devices<br/>(env: LLAMA_ARG_DEVICE) |
|
||||
| `--list-devices` | print list of available devices and exit |
|
||||
| `-ngl, --gpu-layers, --n-gpu-layers N` | number of layers to store in VRAM<br/>(env: LLAMA_ARG_N_GPU_LAYERS) |
|
||||
@@ -127,6 +127,7 @@ The project is under active development, and we are [looking for feedback and co
|
||||
| `--grammar-file FNAME` | file to read grammar from |
|
||||
| `-j, --json-schema SCHEMA` | JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object<br/>For schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead |
|
||||
| `--jinja` | Enable experimental Jinja templating engine (required for tool use) |
|
||||
| `--reasoning-format FORMAT` | Controls extraction of model thinking traces and the format / field in which they are returned (default: `deepseek`; allowed values: `deepseek`, `none`; requires `--jinja`). `none` will leave thinking traces inline in `message.content` in a model-specific format, while `deepseek` will return them separately under `message.reasoning_content` |
|
||||
|
||||
**Example-specific params**
|
||||
|
||||
@@ -177,7 +178,7 @@ Example usage of docker compose with environment variables:
|
||||
```yml
|
||||
services:
|
||||
llamacpp-server:
|
||||
image: ghcr.io/ggerganov/llama.cpp:server
|
||||
image: ghcr.io/ggml-org/llama.cpp:server
|
||||
ports:
|
||||
- 8080:8080
|
||||
volumes:
|
||||
@@ -220,7 +221,7 @@ services:
|
||||
The project includes a web-based user interface that enables interaction with the model through the `/chat/completions` endpoint.
|
||||
|
||||
The web UI is developed using:
|
||||
- `vue` framework for frontend development
|
||||
- `react` framework for frontend development
|
||||
- `tailwindcss` and `daisyui` for styling
|
||||
- `vite` for build tooling
|
||||
|
||||
@@ -272,10 +273,10 @@ You can consume the endpoints with Postman or NodeJS with axios library. You can
|
||||
### Docker
|
||||
|
||||
```bash
|
||||
docker run -p 8080:8080 -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:server -m models/7B/ggml-model.gguf -c 512 --host 0.0.0.0 --port 8080
|
||||
docker run -p 8080:8080 -v /path/to/models:/models ghcr.io/ggml-org/llama.cpp:server -m models/7B/ggml-model.gguf -c 512 --host 0.0.0.0 --port 8080
|
||||
|
||||
# or, with CUDA:
|
||||
docker run -p 8080:8080 -v /path/to/models:/models --gpus all ghcr.io/ggerganov/llama.cpp:server-cuda -m models/7B/ggml-model.gguf -c 512 --host 0.0.0.0 --port 8080 --n-gpu-layers 99
|
||||
docker run -p 8080:8080 -v /path/to/models:/models --gpus all ghcr.io/ggml-org/llama.cpp:server-cuda -m models/7B/ggml-model.gguf -c 512 --host 0.0.0.0 --port 8080 --n-gpu-layers 99
|
||||
```
|
||||
|
||||
## Testing with CURL
|
||||
@@ -1065,7 +1066,7 @@ print(completion.choices[0].text)
|
||||
|
||||
### POST `/v1/chat/completions`: OpenAI-compatible Chat Completions API
|
||||
|
||||
Given a ChatML-formatted json description in `messages`, it returns the predicted completion. Both synchronous and streaming mode are supported, so scripted and interactive applications work fine. While no strong claims of compatibility with OpenAI API spec is being made, in our experience it suffices to support many apps. Only models with a [supported chat template](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template) can be used optimally with this endpoint. By default, the ChatML template will be used.
|
||||
Given a ChatML-formatted json description in `messages`, it returns the predicted completion. Both synchronous and streaming mode are supported, so scripted and interactive applications work fine. While no strong claims of compatibility with OpenAI API spec is being made, in our experience it suffices to support many apps. Only models with a [supported chat template](https://github.com/ggml-org/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template) can be used optimally with this endpoint. By default, the ChatML template will be used.
|
||||
|
||||
*Options:*
|
||||
|
||||
@@ -1119,7 +1120,7 @@ curl http://localhost:8080/v1/chat/completions \
|
||||
|
||||
*Tool call support*
|
||||
|
||||
[Function calling](https://platform.openai.com/docs/guides/function-calling) is supported for all models (see https://github.com/ggerganov/llama.cpp/pull/9639):
|
||||
[Function calling](https://platform.openai.com/docs/guides/function-calling) is supported for all models (see https://github.com/ggml-org/llama.cpp/pull/9639):
|
||||
|
||||
- Requires `--jinja` flag
|
||||
- Native tool call formats supported:
|
||||
@@ -1136,61 +1137,252 @@ curl http://localhost:8080/v1/chat/completions \
|
||||
|
||||
| Template | Format |
|
||||
|----------|--------|
|
||||
| CohereForAI-c4ai-command-r-plus-default.jinja | generic tool calls |
|
||||
| CohereForAI-c4ai-command-r-plus-rag.jinja | generic tool calls |
|
||||
| CohereForAI-c4ai-command-r-plus-tool_use.jinja | generic tool calls |
|
||||
| MiniMaxAI-MiniMax-Text-01.jinja | generic tool calls |
|
||||
| NexaAIDev-Octopus-v2.jinja | generic tool calls |
|
||||
| NousResearch-Hermes-2-Pro-Llama-3-8B-default.jinja | generic tool calls |
|
||||
| NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja | hermes 2 pro tool calls |
|
||||
| NousResearch-Hermes-2-Pro-Mistral-7B-default.jinja | generic tool calls |
|
||||
| NousResearch-Hermes-2-Pro-Mistral-7B-tool_use.jinja | hermes 2 pro tool calls |
|
||||
| NousResearch-Hermes-3-Llama-3.1-70B-default.jinja | generic tool calls |
|
||||
| NousResearch-Hermes-3-Llama-3.1-70B-tool_use.jinja | hermes 2 pro tool calls |
|
||||
| OrionStarAI-Orion-14B-Chat.jinja | generic tool calls |
|
||||
| Qwen-QwQ-32B-Preview.jinja | hermes 2 pro tool calls |
|
||||
| Qwen-Qwen2-7B-Instruct.jinja | generic tool calls |
|
||||
| Qwen-Qwen2-VL-7B-Instruct.jinja | generic tool calls |
|
||||
| Qwen-Qwen2.5-7B-Instruct.jinja | hermes 2 pro tool calls |
|
||||
| Qwen-Qwen2.5-Math-7B-Instruct.jinja | hermes 2 pro tool calls |
|
||||
| TheBloke-FusionNet_34Bx2_MoE-AWQ.jinja | generic tool calls |
|
||||
| abacusai-Fewshot-Metamath-OrcaVicuna-Mistral.jinja | generic tool calls |
|
||||
| bofenghuang-vigogne-2-70b-chat.jinja | generic tool calls |
|
||||
| databricks-dbrx-instruct.jinja | generic tool calls |
|
||||
| deepseek-ai-DeepSeek-Coder-V2-Instruct.jinja | generic tool calls |
|
||||
| deepseek-ai-DeepSeek-R1-Distill-Llama-8B.jinja | deepseek r1 tool calls |
|
||||
| deepseek-ai-DeepSeek-R1-Distill-Qwen-32B.jinja | deepseek r1 tool calls |
|
||||
| deepseek-ai-DeepSeek-R1-Distill-Qwen-7B.jinja | deepseek r1 tool calls |
|
||||
| deepseek-ai-DeepSeek-V2.5.jinja | deepseek r1 tool calls |
|
||||
| deepseek-ai-deepseek-coder-33b-instruct.jinja | generic tool calls |
|
||||
| google-gemma-2-2b-it.jinja | generic tool calls |
|
||||
| google-gemma-7b-it.jinja | generic tool calls |
|
||||
| indischepartij-MiniCPM-3B-OpenHermes-2.5-v2.jinja | generic tool calls |
|
||||
| mattshumer-Reflection-Llama-3.1-70B.jinja | generic tool calls |
|
||||
| meetkai-functionary-medium-v3.2.jinja | functionary v3.2 tool calls |
|
||||
| meta-llama-Llama-3.1-8B-Instruct.jinja | llama 3.x tool calls (w/ builtin tools) |
|
||||
| meta-llama-Llama-3.2-3B-Instruct.jinja | llama 3.x tool calls |
|
||||
| meta-llama-Llama-3.3-70B-Instruct.jinja | llama 3.x tool calls (w/ builtin tools) |
|
||||
| meta-llama-Meta-Llama-3.1-8B-Instruct.jinja | llama 3.x tool calls (w/ builtin tools) |
|
||||
| microsoft-Phi-3-medium-4k-instruct.jinja | generic tool calls |
|
||||
| microsoft-Phi-3-mini-4k-instruct.jinja | generic tool calls |
|
||||
| microsoft-Phi-3-small-8k-instruct.jinja | generic tool calls |
|
||||
| microsoft-Phi-3.5-mini-instruct.jinja | generic tool calls |
|
||||
| microsoft-Phi-3.5-vision-instruct.jinja | generic tool calls |
|
||||
| mistralai-Mistral-7B-Instruct-v0.2.jinja | generic tool calls |
|
||||
| mistralai-Mistral-Large-Instruct-2407.jinja | mistral nemo tool calls |
|
||||
| mistralai-Mistral-Large-Instruct-2411.jinja | generic tool calls |
|
||||
| mistralai-Mistral-Nemo-Instruct-2407.jinja | mistral nemo tool calls |
|
||||
| mistralai-Mixtral-8x7B-Instruct-v0.1.jinja | generic tool calls |
|
||||
| mlabonne-AlphaMonarch-7B.jinja | generic tool calls |
|
||||
| nvidia-Llama-3.1-Nemotron-70B-Instruct-HF.jinja | llama 3.x tool calls (w/ builtin tools) |
|
||||
| openchat-openchat-3.5-0106.jinja | generic tool calls |
|
||||
| teknium-OpenHermes-2.5-Mistral-7B.jinja | generic tool calls |
|
||||
| Almawave-Velvet-14B.jinja | Hermes 2 Pro |
|
||||
| AtlaAI-Selene-1-Mini-Llama-3.1-8B.jinja | Llama 3.x |
|
||||
| CohereForAI-aya-expanse-8b.jinja | Generic |
|
||||
| CohereForAI-c4ai-command-r-plus-default.jinja | Generic |
|
||||
| CohereForAI-c4ai-command-r-plus-rag.jinja | Generic |
|
||||
| CohereForAI-c4ai-command-r-plus-tool_use.jinja | Generic |
|
||||
| CohereForAI-c4ai-command-r7b-12-2024-default.jinja | Command R7B (extract reasoning) |
|
||||
| CohereForAI-c4ai-command-r7b-12-2024-rag.jinja | Command R7B (extract reasoning) |
|
||||
| CohereForAI-c4ai-command-r7b-12-2024-tool_use.jinja | Command R7B (extract reasoning) |
|
||||
| CohereForAI-c4ai-command-r7b-12-2024.jinja | Generic |
|
||||
| DavieLion-Llama-3.2-1B-SPIN-iter3.jinja | Generic |
|
||||
| Delta-Vector-Rei-12B.jinja | Mistral Nemo |
|
||||
| EpistemeAI-Mistral-Nemo-Instruct-12B-Philosophy-Math.jinja | Mistral Nemo |
|
||||
| FlofloB-83k_continued_pretraining_Qwen2.5-0.5B-Instruct_Unsloth_merged_16bit.jinja | Hermes 2 Pro |
|
||||
| FlofloB-test_continued_pretraining_Phi-3-mini-4k-instruct_Unsloth_merged_16bit.jinja | Generic |
|
||||
| HelpingAI-HAI-SER.jinja | Generic |
|
||||
| HuggingFaceTB-SmolLM2-1.7B-Instruct.jinja | Generic |
|
||||
| HuggingFaceTB-SmolLM2-135M-Instruct.jinja | Generic |
|
||||
| HuggingFaceTB-SmolLM2-360M-Instruct.jinja | Generic |
|
||||
| INSAIT-Institute-BgGPT-Gemma-2-27B-IT-v1.0.jinja | Generic |
|
||||
| Ihor-Text2Graph-R1-Qwen2.5-0.5b.jinja | Hermes 2 Pro |
|
||||
| Infinigence-Megrez-3B-Instruct.jinja | Generic |
|
||||
| Josephgflowers-TinyLlama_v1.1_math_code-world-test-1.jinja | Generic |
|
||||
| LGAI-EXAONE-EXAONE-3.5-2.4B-Instruct.jinja | Generic |
|
||||
| LGAI-EXAONE-EXAONE-3.5-7.8B-Instruct.jinja | Generic |
|
||||
| LatitudeGames-Wayfarer-12B.jinja | Generic |
|
||||
| Magpie-Align-Llama-3-8B-Magpie-Align-v0.1.jinja | Generic |
|
||||
| Magpie-Align-Llama-3.1-8B-Magpie-Align-v0.1.jinja | Generic |
|
||||
| MaziyarPanahi-calme-3.2-instruct-78b.jinja | Generic |
|
||||
| MiniMaxAI-MiniMax-Text-01.jinja | Generic |
|
||||
| MiniMaxAI-MiniMax-VL-01.jinja | Generic |
|
||||
| NaniDAO-deepseek-r1-qwen-2.5-32B-ablated.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| NexaAIDev-Octopus-v2.jinja | Generic |
|
||||
| NousResearch-Hermes-2-Pro-Llama-3-8B-default.jinja | Generic |
|
||||
| NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja | Hermes 2 Pro |
|
||||
| NousResearch-Hermes-2-Pro-Mistral-7B-default.jinja | Generic |
|
||||
| NousResearch-Hermes-2-Pro-Mistral-7B-tool_use.jinja | Hermes 2 Pro |
|
||||
| NousResearch-Hermes-3-Llama-3.1-70B-default.jinja | Generic |
|
||||
| NousResearch-Hermes-3-Llama-3.1-70B-tool_use.jinja | Hermes 2 Pro |
|
||||
| NovaSky-AI-Sky-T1-32B-Flash.jinja | Hermes 2 Pro |
|
||||
| NovaSky-AI-Sky-T1-32B-Preview.jinja | Hermes 2 Pro |
|
||||
| OnlyCheeini-greesychat-turbo.jinja | Generic |
|
||||
| Orenguteng-Llama-3.1-8B-Lexi-Uncensored-V2.jinja | Llama 3.x |
|
||||
| OrionStarAI-Orion-14B-Chat.jinja | Generic |
|
||||
| PowerInfer-SmallThinker-3B-Preview.jinja | Generic |
|
||||
| PrimeIntellect-INTELLECT-1-Instruct.jinja | Generic |
|
||||
| Qwen-QVQ-72B-Preview.jinja | Generic |
|
||||
| Qwen-QwQ-32B-Preview.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen1.5-7B-Chat.jinja | Generic |
|
||||
| Qwen-Qwen2-7B-Instruct.jinja | Generic |
|
||||
| Qwen-Qwen2-VL-72B-Instruct.jinja | Generic |
|
||||
| Qwen-Qwen2-VL-7B-Instruct.jinja | Generic |
|
||||
| Qwen-Qwen2.5-0.5B.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen2.5-1.5B-Instruct.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen2.5-14B-Instruct-1M.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen2.5-14B.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen2.5-32B-Instruct.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen2.5-32B.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen2.5-3B-Instruct.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen2.5-72B-Instruct.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen2.5-7B-Instruct-1M.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen2.5-7B-Instruct.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen2.5-7B.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen2.5-Coder-32B-Instruct.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen2.5-Coder-7B-Instruct.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen2.5-Math-1.5B.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen2.5-Math-7B-Instruct.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen2.5-VL-3B-Instruct.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen2.5-VL-72B-Instruct.jinja | Hermes 2 Pro |
|
||||
| Qwen-Qwen2.5-VL-7B-Instruct.jinja | Hermes 2 Pro |
|
||||
| RWKV-Red-Team-ARWKV-7B-Preview-0.1.jinja | Hermes 2 Pro |
|
||||
| SakanaAI-TinySwallow-1.5B-Instruct.jinja | Hermes 2 Pro |
|
||||
| SakanaAI-TinySwallow-1.5B.jinja | Hermes 2 Pro |
|
||||
| Sao10K-70B-L3.3-Cirrus-x1.jinja | Llama 3.x |
|
||||
| SentientAGI-Dobby-Mini-Leashed-Llama-3.1-8B.jinja | Llama 3.x |
|
||||
| SentientAGI-Dobby-Mini-Unhinged-Llama-3.1-8B.jinja | Llama 3.x |
|
||||
| Steelskull-L3.3-Damascus-R1.jinja | Llama 3.x |
|
||||
| Steelskull-L3.3-MS-Nevoria-70b.jinja | Llama 3.x |
|
||||
| Steelskull-L3.3-Nevoria-R1-70b.jinja | Llama 3.x |
|
||||
| THUDM-glm-4-9b-chat.jinja | Generic |
|
||||
| THUDM-glm-edge-1.5b-chat.jinja | Generic |
|
||||
| Tarek07-Progenitor-V1.1-LLaMa-70B.jinja | Llama 3.x |
|
||||
| TheBloke-FusionNet_34Bx2_MoE-AWQ.jinja | Generic |
|
||||
| TinyLlama-TinyLlama-1.1B-Chat-v1.0.jinja | Generic |
|
||||
| UCLA-AGI-Mistral7B-PairRM-SPPO-Iter3.jinja | Generic |
|
||||
| ValiantLabs-Llama3.1-8B-Enigma.jinja | Llama 3.x |
|
||||
| abacusai-Fewshot-Metamath-OrcaVicuna-Mistral.jinja | Generic |
|
||||
| ai21labs-AI21-Jamba-1.5-Large.jinja | Generic |
|
||||
| allenai-Llama-3.1-Tulu-3-405B-SFT.jinja | Generic |
|
||||
| allenai-Llama-3.1-Tulu-3-405B.jinja | Generic |
|
||||
| allenai-Llama-3.1-Tulu-3-8B.jinja | Generic |
|
||||
| arcee-ai-Virtuoso-Lite.jinja | Hermes 2 Pro |
|
||||
| arcee-ai-Virtuoso-Medium-v2.jinja | Hermes 2 Pro |
|
||||
| arcee-ai-Virtuoso-Small-v2.jinja | Hermes 2 Pro |
|
||||
| avemio-GRAG-NEMO-12B-ORPO-HESSIAN-AI.jinja | Generic |
|
||||
| bespokelabs-Bespoke-Stratos-7B.jinja | Hermes 2 Pro |
|
||||
| bfuzzy1-acheron-m1a-llama.jinja | Generic |
|
||||
| bofenghuang-vigogne-2-70b-chat.jinja | Generic |
|
||||
| bytedance-research-UI-TARS-72B-DPO.jinja | Generic |
|
||||
| bytedance-research-UI-TARS-7B-DPO.jinja | Generic |
|
||||
| bytedance-research-UI-TARS-7B-SFT.jinja | Generic |
|
||||
| carsenk-phi3.5_mini_exp_825_uncensored.jinja | Generic |
|
||||
| cyberagent-DeepSeek-R1-Distill-Qwen-14B-Japanese.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| cyberagent-DeepSeek-R1-Distill-Qwen-32B-Japanese.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| databricks-dbrx-instruct.jinja | Generic |
|
||||
| deepseek-ai-DeepSeek-Coder-V2-Instruct.jinja | Generic |
|
||||
| deepseek-ai-DeepSeek-Coder-V2-Lite-Base.jinja | Generic |
|
||||
| deepseek-ai-DeepSeek-Coder-V2-Lite-Instruct.jinja | Generic |
|
||||
| deepseek-ai-DeepSeek-R1-Distill-Llama-70B.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| deepseek-ai-DeepSeek-R1-Distill-Llama-8B.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| deepseek-ai-DeepSeek-R1-Distill-Qwen-1.5B.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| deepseek-ai-DeepSeek-R1-Distill-Qwen-14B.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| deepseek-ai-DeepSeek-R1-Distill-Qwen-32B.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| deepseek-ai-DeepSeek-R1-Distill-Qwen-7B.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| deepseek-ai-DeepSeek-R1-Zero.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| deepseek-ai-DeepSeek-R1.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| deepseek-ai-DeepSeek-V2-Lite.jinja | Generic |
|
||||
| deepseek-ai-DeepSeek-V2.5.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| deepseek-ai-DeepSeek-V3.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| deepseek-ai-deepseek-coder-33b-instruct.jinja | Generic |
|
||||
| deepseek-ai-deepseek-coder-6.7b-instruct.jinja | Generic |
|
||||
| deepseek-ai-deepseek-coder-7b-instruct-v1.5.jinja | Generic |
|
||||
| deepseek-ai-deepseek-llm-67b-chat.jinja | Generic |
|
||||
| deepseek-ai-deepseek-llm-7b-chat.jinja | Generic |
|
||||
| dicta-il-dictalm2.0-instruct.jinja | Generic |
|
||||
| ehristoforu-Falcon3-8B-Franken-Basestruct.jinja | Hermes 2 Pro |
|
||||
| fireworks-ai-llama-3-firefunction-v2.jinja | FireFunction v2 |
|
||||
| godlikehhd-alpaca_data_sampled_ifd_new_5200.jinja | Hermes 2 Pro |
|
||||
| godlikehhd-alpaca_data_score_max_0.7_2600.jinja | Hermes 2 Pro |
|
||||
| google-gemma-2-27b-it.jinja | Generic |
|
||||
| google-gemma-2-2b-it.jinja | Generic |
|
||||
| google-gemma-2-2b-jpn-it.jinja | Generic |
|
||||
| google-gemma-7b-it.jinja | Generic |
|
||||
| huihui-ai-DeepSeek-R1-Distill-Llama-70B-abliterated.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| huihui-ai-DeepSeek-R1-Distill-Llama-8B-abliterated.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| huihui-ai-DeepSeek-R1-Distill-Qwen-14B-abliterated-v2.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| huihui-ai-DeepSeek-R1-Distill-Qwen-32B-abliterated.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| huihui-ai-DeepSeek-R1-Distill-Qwen-7B-abliterated-v2.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| huihui-ai-Qwen2.5-14B-Instruct-1M-abliterated.jinja | Hermes 2 Pro |
|
||||
| ibm-granite-granite-3.1-8b-instruct.jinja | Generic |
|
||||
| indischepartij-MiniCPM-3B-OpenHermes-2.5-v2.jinja | Generic |
|
||||
| inflatebot-MN-12B-Mag-Mell-R1.jinja | Generic |
|
||||
| jinaai-ReaderLM-v2.jinja | Generic |
|
||||
| kms7530-chemeng_qwen-math-7b_24_1_100_1_nonmath.jinja | Hermes 2 Pro |
|
||||
| knifeayumu-Cydonia-v1.3-Magnum-v4-22B.jinja | Mistral Nemo |
|
||||
| langgptai-qwen1.5-7b-chat-sa-v0.1.jinja | Generic |
|
||||
| lightblue-DeepSeek-R1-Distill-Qwen-7B-Japanese.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| mattshumer-Reflection-Llama-3.1-70B.jinja | Generic |
|
||||
| meetkai-functionary-medium-v3.1.jinja | Functionary v3.1 Llama 3.1 |
|
||||
| meetkai-functionary-medium-v3.2.jinja | Functionary v3.2 |
|
||||
| meta-llama-Llama-2-7b-chat-hf.jinja | Generic |
|
||||
| meta-llama-Llama-3.1-8B-Instruct.jinja | Llama 3.x |
|
||||
| meta-llama-Llama-3.2-11B-Vision-Instruct.jinja | Llama 3.x |
|
||||
| meta-llama-Llama-3.2-1B-Instruct.jinja | Llama 3.x |
|
||||
| meta-llama-Llama-3.2-3B-Instruct.jinja | Llama 3.x |
|
||||
| meta-llama-Llama-3.3-70B-Instruct.jinja | Llama 3.x |
|
||||
| meta-llama-Meta-Llama-3-8B-Instruct.jinja | Generic |
|
||||
| meta-llama-Meta-Llama-3.1-8B-Instruct.jinja | Llama 3.x |
|
||||
| microsoft-Phi-3-medium-4k-instruct.jinja | Generic |
|
||||
| microsoft-Phi-3-mini-4k-instruct.jinja | Generic |
|
||||
| microsoft-Phi-3-small-8k-instruct.jinja | Generic |
|
||||
| microsoft-Phi-3.5-mini-instruct.jinja | Generic |
|
||||
| microsoft-Phi-3.5-vision-instruct.jinja | Generic |
|
||||
| microsoft-phi-4.jinja | Generic |
|
||||
| migtissera-Tess-3-Mistral-Nemo-12B.jinja | Generic |
|
||||
| ministral-Ministral-3b-instruct.jinja | Generic |
|
||||
| mistralai-Codestral-22B-v0.1.jinja | Generic |
|
||||
| mistralai-Mistral-7B-Instruct-v0.1.jinja | Generic |
|
||||
| mistralai-Mistral-7B-Instruct-v0.2.jinja | Generic |
|
||||
| mistralai-Mistral-7B-Instruct-v0.3.jinja | Mistral Nemo |
|
||||
| mistralai-Mistral-Large-Instruct-2407.jinja | Mistral Nemo |
|
||||
| mistralai-Mistral-Large-Instruct-2411.jinja | Generic |
|
||||
| mistralai-Mistral-Nemo-Instruct-2407.jinja | Mistral Nemo |
|
||||
| mistralai-Mistral-Small-24B-Instruct-2501.jinja | Generic |
|
||||
| mistralai-Mixtral-8x7B-Instruct-v0.1.jinja | Generic |
|
||||
| mkurman-Qwen2.5-14B-DeepSeek-R1-1M.jinja | Hermes 2 Pro |
|
||||
| mlabonne-AlphaMonarch-7B.jinja | Generic |
|
||||
| mlx-community-Josiefied-Qwen2.5-0.5B-Instruct-abliterated-v1-float32.jinja | Hermes 2 Pro |
|
||||
| mlx-community-Qwen2.5-VL-7B-Instruct-8bit.jinja | Hermes 2 Pro |
|
||||
| mobiuslabsgmbh-DeepSeek-R1-ReDistill-Qwen-1.5B-v1.1.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| netcat420-MFANNv0.20.jinja | Generic |
|
||||
| netcat420-MFANNv0.24.jinja | Generic |
|
||||
| netease-youdao-Confucius-o1-14B.jinja | Hermes 2 Pro |
|
||||
| nvidia-AceMath-7B-RM.jinja | Hermes 2 Pro |
|
||||
| nvidia-Eagle2-1B.jinja | Hermes 2 Pro |
|
||||
| nvidia-Eagle2-9B.jinja | Hermes 2 Pro |
|
||||
| nvidia-Llama-3.1-Nemotron-70B-Instruct-HF.jinja | Llama 3.x |
|
||||
| onnx-community-DeepSeek-R1-Distill-Qwen-1.5B-ONNX.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| open-thoughts-OpenThinker-7B.jinja | Hermes 2 Pro |
|
||||
| openchat-openchat-3.5-0106.jinja | Generic |
|
||||
| pankajmathur-orca_mini_v6_8b.jinja | Generic |
|
||||
| princeton-nlp-Mistral-7B-Base-SFT-RDPO.jinja | Generic |
|
||||
| princeton-nlp-Mistral-7B-Instruct-DPO.jinja | Generic |
|
||||
| princeton-nlp-Mistral-7B-Instruct-RDPO.jinja | Generic |
|
||||
| prithivMLmods-Bellatrix-Tiny-1.5B-R1.jinja | Hermes 2 Pro |
|
||||
| prithivMLmods-Bellatrix-Tiny-1B-R1.jinja | Llama 3.x |
|
||||
| prithivMLmods-Bellatrix-Tiny-1B-v3.jinja | Generic |
|
||||
| prithivMLmods-Bellatrix-Tiny-3B-R1.jinja | Llama 3.x |
|
||||
| prithivMLmods-Blaze-14B-xElite.jinja | Generic |
|
||||
| prithivMLmods-Calcium-Opus-14B-Elite2-R1.jinja | Hermes 2 Pro |
|
||||
| prithivMLmods-Calme-Ties-78B.jinja | Generic |
|
||||
| prithivMLmods-Calme-Ties2-78B.jinja | Generic |
|
||||
| prithivMLmods-Calme-Ties3-78B.jinja | Generic |
|
||||
| prithivMLmods-ChemQwen2-vL.jinja | Generic |
|
||||
| prithivMLmods-GWQ2b.jinja | Generic |
|
||||
| prithivMLmods-LatexMind-2B-Codec.jinja | Generic |
|
||||
| prithivMLmods-Llama-3.2-6B-AlgoCode.jinja | Llama 3.x |
|
||||
| prithivMLmods-Megatron-Opus-14B-Exp.jinja | Hermes 2 Pro |
|
||||
| prithivMLmods-Megatron-Opus-14B-Stock.jinja | Hermes 2 Pro |
|
||||
| prithivMLmods-Megatron-Opus-7B-Exp.jinja | Hermes 2 Pro |
|
||||
| prithivMLmods-Omni-Reasoner-Merged.jinja | Hermes 2 Pro |
|
||||
| prithivMLmods-Omni-Reasoner4-Merged.jinja | Hermes 2 Pro |
|
||||
| prithivMLmods-Primal-Opus-14B-Optimus-v1.jinja | Hermes 2 Pro |
|
||||
| prithivMLmods-QwQ-Math-IO-500M.jinja | Hermes 2 Pro |
|
||||
| prithivMLmods-Qwen-7B-Distill-Reasoner.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| prithivMLmods-Qwen2.5-1.5B-DeepSeek-R1-Instruct.jinja | Hermes 2 Pro |
|
||||
| prithivMLmods-Qwen2.5-14B-DeepSeek-R1-1M.jinja | Hermes 2 Pro |
|
||||
| prithivMLmods-Qwen2.5-32B-DeepSeek-R1-Instruct.jinja | Hermes 2 Pro |
|
||||
| prithivMLmods-Qwen2.5-7B-DeepSeek-R1-1M.jinja | Hermes 2 Pro |
|
||||
| prithivMLmods-Triangulum-v2-10B.jinja | Hermes 2 Pro |
|
||||
| qingy2024-Falcon3-2x10B-MoE-Instruct.jinja | Hermes 2 Pro |
|
||||
| rubenroy-Zurich-14B-GCv2-5m.jinja | Hermes 2 Pro |
|
||||
| rubenroy-Zurich-7B-GCv2-5m.jinja | Hermes 2 Pro |
|
||||
| silma-ai-SILMA-Kashif-2B-Instruct-v1.0.jinja | Generic |
|
||||
| simplescaling-s1-32B.jinja | Hermes 2 Pro |
|
||||
| sometimesanotion-Lamarck-14B-v0.7.jinja | Hermes 2 Pro |
|
||||
| sonthenguyen-zephyr-sft-bnb-4bit-DPO-mtbr-180steps.jinja | Generic |
|
||||
| sthenno-tempesthenno-icy-0130.jinja | Generic |
|
||||
| sumink-qwft.jinja | Hermes 2 Pro |
|
||||
| teknium-OpenHermes-2.5-Mistral-7B.jinja | Generic |
|
||||
| thirdeyeai-elevate360m.jinja | Generic |
|
||||
| tiiuae-Falcon3-10B-Instruct.jinja | Hermes 2 Pro |
|
||||
| unsloth-DeepSeek-R1-Distill-Llama-8B-unsloth-bnb-4bit.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| unsloth-DeepSeek-R1-Distill-Llama-8B.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| unsloth-DeepSeek-R1.jinja | DeepSeek R1 (extract reasoning) |
|
||||
| unsloth-Mistral-Small-24B-Instruct-2501-unsloth-bnb-4bit.jinja | Generic |
|
||||
| upstage-solar-pro-preview-instruct.jinja | Generic |
|
||||
| whyhow-ai-PatientSeek.jinja | Generic |
|
||||
| xwen-team-Xwen-72B-Chat.jinja | Hermes 2 Pro |
|
||||
| xwen-team-Xwen-7B-Chat.jinja | Hermes 2 Pro |
|
||||
|
||||
This table can be generated with:
|
||||
|
||||
```bash
|
||||
./build/bin/test-chat ../minja/build/tests/*.jinja 2>/dev/null
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
@@ -1202,11 +1394,20 @@ curl http://localhost:8080/v1/chat/completions \
|
||||
|
||||
```shell
|
||||
# Native support:
|
||||
|
||||
llama-server --jinja -fa -hf bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M
|
||||
llama-server --jinja -fa -hf bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q6_K_L
|
||||
llama-server --jinja -fa -hf bartowski/functionary-small-v3.2-GGUF:Q4_K_M
|
||||
llama-server --jinja -fa -hf bartowski/Llama-3.3-70B-Instruct-GGUF:Q4_K_M
|
||||
|
||||
# Native support for DeepSeek R1 works best w/ our own template (official template buggy)
|
||||
|
||||
llama-server --jinja -fa -hf bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q6_K_L \
|
||||
--chat-template-file models/templates/llama-cpp-deepseek-r1.jinja
|
||||
|
||||
llama-server --jinja -fa -hf bartowski/DeepSeek-R1-Distill-Qwen-32B-GGUF:Q4_K_M \
|
||||
--chat-template-file models/templates/llama-cpp-deepseek-r1.jinja
|
||||
|
||||
# Native support requires the right template for these GGUFs:
|
||||
|
||||
llama-server --jinja -fa -hf bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M \
|
||||
@@ -1236,17 +1437,17 @@ curl http://localhost:8080/v1/chat/completions \
|
||||
{
|
||||
"type":"function",
|
||||
"function":{
|
||||
"name":"get_current_weather",
|
||||
"description":"Get the current weather in a given location",
|
||||
"name":"python",
|
||||
"description":"Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.",
|
||||
"parameters":{
|
||||
"type":"object",
|
||||
"properties":{
|
||||
"location":{
|
||||
"code":{
|
||||
"type":"string",
|
||||
"description":"The city and state, e.g. San Francisco, CA"
|
||||
"description":"The code to run in the ipython interpreter."
|
||||
}
|
||||
},
|
||||
"required":["location"]
|
||||
"required":["code"]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1254,7 +1455,7 @@ curl http://localhost:8080/v1/chat/completions \
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "What is the weather like in Istanbul?."
|
||||
"content": "Print a hello world message with python."
|
||||
}
|
||||
]
|
||||
}'
|
||||
@@ -1398,7 +1599,7 @@ Apart from error types supported by OAI, we also have custom types that are spec
|
||||
|
||||
### Legacy completion web UI
|
||||
|
||||
A new chat-based UI has replaced the old completion-based since [this PR](https://github.com/ggerganov/llama.cpp/pull/10175). If you want to use the old completion, start the server with `--path ./examples/server/public_legacy`
|
||||
A new chat-based UI has replaced the old completion-based since [this PR](https://github.com/ggml-org/llama.cpp/pull/10175). If you want to use the old completion, start the server with `--path ./examples/server/public_legacy`
|
||||
|
||||
For example:
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Binary file not shown.
@@ -42,7 +42,7 @@ enum stop_type {
|
||||
STOP_TYPE_LIMIT,
|
||||
};
|
||||
|
||||
// state diagram: https://github.com/ggerganov/llama.cpp/pull/9283
|
||||
// state diagram: https://github.com/ggml-org/llama.cpp/pull/9283
|
||||
enum slot_state {
|
||||
SLOT_STATE_IDLE,
|
||||
SLOT_STATE_STARTED, // TODO: this state is only used for setting up the initial prompt processing; maybe merge it with launch_slot_with_task in the future
|
||||
@@ -173,6 +173,7 @@ struct slot_params {
|
||||
{"grammar_trigger_words", grammar_trigger_words},
|
||||
{"grammar_trigger_tokens", sampling.grammar_trigger_tokens},
|
||||
{"preserved_tokens", sampling.preserved_tokens},
|
||||
{"chat_format", common_chat_format_name(oaicompat_chat_format)},
|
||||
{"samplers", samplers},
|
||||
{"speculative.n_max", speculative.n_max},
|
||||
{"speculative.n_min", speculative.n_min},
|
||||
@@ -273,7 +274,7 @@ struct server_task {
|
||||
params.speculative.p_min = json_value(data, "speculative.p_min", defaults.speculative.p_min);
|
||||
|
||||
params.speculative.n_min = std::min(params.speculative.n_max, params.speculative.n_min);
|
||||
params.speculative.n_min = std::max(params.speculative.n_min, 2);
|
||||
params.speculative.n_min = std::max(params.speculative.n_min, 0);
|
||||
params.speculative.n_max = std::max(params.speculative.n_max, 0);
|
||||
|
||||
// Use OpenAI API logprobs only if n_probs wasn't provided
|
||||
@@ -328,30 +329,27 @@ struct server_task {
|
||||
}
|
||||
|
||||
// process "json_schema" and "grammar"
|
||||
if (data.contains("json_schema") && !data.at("json_schema").is_null() && data.contains("grammar") && !data.at("grammar").is_null()) {
|
||||
throw std::runtime_error("Either \"json_schema\" or \"grammar\" can be specified, but not both");
|
||||
}
|
||||
if (data.contains("json_schema") && !data.contains("grammar")) {
|
||||
try {
|
||||
auto schema = json_value(data, "json_schema", json::object());
|
||||
LOG_DBG("JSON schema: %s\n", schema.dump(2).c_str());
|
||||
SRV_DBG("JSON schema: %s\n", schema.dump(2).c_str());
|
||||
params.sampling.grammar = json_schema_to_grammar(schema);
|
||||
LOG_DBG("Converted grammar: %s\n", params.sampling.grammar.c_str());
|
||||
SRV_DBG("Converted grammar: %s\n", params.sampling.grammar.c_str());
|
||||
} catch (const std::exception & e) {
|
||||
throw std::runtime_error(std::string("\"json_schema\": ") + e.what());
|
||||
}
|
||||
} else {
|
||||
params.sampling.grammar = json_value(data, "grammar", defaults.sampling.grammar);
|
||||
LOG_DBG("Grammar: %s\n", params.sampling.grammar.c_str());
|
||||
SRV_DBG("Grammar: %s\n", params.sampling.grammar.c_str());
|
||||
params.sampling.grammar_lazy = json_value(data, "grammar_lazy", defaults.sampling.grammar_lazy);
|
||||
LOG_DBG("Grammar lazy: %s\n", params.sampling.grammar_lazy ? "true" : "false");
|
||||
SRV_DBG("Grammar lazy: %s\n", params.sampling.grammar_lazy ? "true" : "false");
|
||||
}
|
||||
|
||||
{
|
||||
auto it = data.find("chat_format");
|
||||
if (it != data.end()) {
|
||||
params.oaicompat_chat_format = static_cast<common_chat_format>(it->get<int>());
|
||||
LOG_INF("Chat format: %s\n", common_chat_format_name(params.oaicompat_chat_format).c_str());
|
||||
SRV_INF("Chat format: %s\n", common_chat_format_name(params.oaicompat_chat_format).c_str());
|
||||
} else {
|
||||
params.oaicompat_chat_format = defaults.oaicompat_chat_format;
|
||||
}
|
||||
@@ -367,12 +365,12 @@ struct server_task {
|
||||
|
||||
auto ids = common_tokenize(vocab, trigger.word, /* add_special= */ false, /* parse_special= */ true);
|
||||
if (ids.size() == 1) {
|
||||
LOG_DBG("Grammar trigger token: %d (`%s`)\n", ids[0], trigger.word.c_str());
|
||||
SRV_DBG("Grammar trigger token: %d (`%s`)\n", ids[0], trigger.word.c_str());
|
||||
params.sampling.grammar_trigger_tokens.push_back(ids[0]);
|
||||
params.sampling.preserved_tokens.insert(ids[0]);
|
||||
continue;
|
||||
}
|
||||
LOG_DBG("Grammar trigger word: `%s`\n", trigger.word.c_str());
|
||||
SRV_DBG("Grammar trigger word: `%s`\n", trigger.word.c_str());
|
||||
params.sampling.grammar_trigger_words.push_back(trigger);
|
||||
}
|
||||
}
|
||||
@@ -381,11 +379,11 @@ struct server_task {
|
||||
for (const auto & t : *preserved_tokens) {
|
||||
auto ids = common_tokenize(vocab, t.get<std::string>(), /* add_special= */ false, /* parse_special= */ true);
|
||||
if (ids.size() == 1) {
|
||||
LOG_DBG("Preserved token: %d\n", ids[0]);
|
||||
SRV_DBG("Preserved token: %d\n", ids[0]);
|
||||
params.sampling.preserved_tokens.insert(ids[0]);
|
||||
} else {
|
||||
// This may happen when using a tool call style meant for a model with special tokens to preserve on a model without said tokens.
|
||||
LOG_WRN("Not preserved because more than 1 token (wrong chat template override?): %s\n", t.get<std::string>().c_str());
|
||||
SRV_WRN("Not preserved because more than 1 token (wrong chat template override?): %s\n", t.get<std::string>().c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -717,16 +715,26 @@ struct server_task_result_cmpl_final : server_task_result {
|
||||
std::string finish_reason = "length";
|
||||
common_chat_msg msg;
|
||||
if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) {
|
||||
LOG_DBG("Parsing chat message: %s\n", content.c_str());
|
||||
SRV_DBG("Parsing chat message: %s\n", content.c_str());
|
||||
msg = common_chat_parse(content, oaicompat_chat_format);
|
||||
finish_reason = msg.tool_calls.empty() ? "stop" : "tool_calls";
|
||||
} else {
|
||||
msg.content = content;
|
||||
}
|
||||
|
||||
json tool_calls;
|
||||
json message {
|
||||
{"role", "assistant"},
|
||||
};
|
||||
if (!msg.reasoning_content.empty()) {
|
||||
message["reasoning_content"] = msg.reasoning_content;
|
||||
}
|
||||
if (msg.content.empty() && !msg.tool_calls.empty()) {
|
||||
message["content"] = json();
|
||||
} else {
|
||||
message["content"] = msg.content;
|
||||
}
|
||||
if (!msg.tool_calls.empty()) {
|
||||
tool_calls = json::array();
|
||||
auto tool_calls = json::array();
|
||||
for (const auto & tc : msg.tool_calls) {
|
||||
tool_calls.push_back({
|
||||
{"type", "function"},
|
||||
@@ -737,15 +745,7 @@ struct server_task_result_cmpl_final : server_task_result {
|
||||
{"id", tc.id},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
json message {
|
||||
{"content", msg.content},
|
||||
{"tool_calls", tool_calls},
|
||||
{"role", "assistant"},
|
||||
};
|
||||
if (!msg.tool_plan.empty()) {
|
||||
message["tool_plan"] = msg.tool_plan;
|
||||
message["tool_calls"] = tool_calls;
|
||||
}
|
||||
|
||||
json choice {
|
||||
@@ -1600,6 +1600,10 @@ struct server_queue {
|
||||
|
||||
while (true) {
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
if (!running) {
|
||||
QUE_DBG("%s", "terminate\n");
|
||||
return;
|
||||
}
|
||||
if (queue_tasks.empty()) {
|
||||
lock.unlock();
|
||||
break;
|
||||
@@ -1620,11 +1624,11 @@ struct server_queue {
|
||||
QUE_DBG("%s", "waiting for new tasks\n");
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||
if (!running) {
|
||||
QUE_DBG("%s", "terminate\n");
|
||||
return;
|
||||
}
|
||||
if (queue_tasks.empty()) {
|
||||
if (!running) {
|
||||
QUE_DBG("%s", "terminate\n");
|
||||
return;
|
||||
}
|
||||
condition_tasks.wait(lock, [&]{
|
||||
return (!queue_tasks.empty() || !running);
|
||||
});
|
||||
@@ -1800,7 +1804,7 @@ struct server_context {
|
||||
// Necessary similarity of prompt for slot selection
|
||||
float slot_prompt_similarity = 0.0f;
|
||||
|
||||
common_chat_templates chat_templates;
|
||||
common_chat_templates_ptr chat_templates;
|
||||
|
||||
~server_context() {
|
||||
// Clear any sampling context
|
||||
@@ -1884,45 +1888,17 @@ struct server_context {
|
||||
llama_init_dft.context.reset();
|
||||
}
|
||||
|
||||
if (params_base.chat_template.empty() && !validate_builtin_chat_template(params.use_jinja)) {
|
||||
LOG_WRN("%s: The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses\n", __func__);
|
||||
chat_templates = common_chat_templates_from_model(model, "chatml");
|
||||
} else {
|
||||
chat_templates = common_chat_templates_from_model(model, params_base.chat_template);
|
||||
chat_templates = common_chat_templates_init(model, params_base.chat_template);
|
||||
try {
|
||||
common_chat_format_example(chat_templates.get(), params.use_jinja);
|
||||
} catch (const std::exception & e) {
|
||||
SRV_WRN("%s: The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses\n", __func__);
|
||||
chat_templates = common_chat_templates_init(model, "chatml");
|
||||
}
|
||||
GGML_ASSERT(chat_templates.template_default.get() != nullptr);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool validate_builtin_chat_template(bool use_jinja) const {
|
||||
llama_chat_message chat[] = {{"user", "test"}};
|
||||
|
||||
if (use_jinja) {
|
||||
auto templates = common_chat_templates_from_model(model, "");
|
||||
common_chat_inputs inputs;
|
||||
inputs.messages = json::array({{
|
||||
{"role", "user"},
|
||||
{"content", "test"},
|
||||
}});
|
||||
GGML_ASSERT(templates.template_default);
|
||||
try {
|
||||
common_chat_params_init(*templates.template_default, inputs);
|
||||
if (templates.template_tool_use) {
|
||||
common_chat_params_init(*templates.template_tool_use, inputs);
|
||||
}
|
||||
return true;
|
||||
} catch (const std::exception & e) {
|
||||
SRV_ERR("failed to apply template: %s\n", e.what());
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
const char * tmpl = llama_model_chat_template(model, /* name */ nullptr);
|
||||
const int32_t chat_res = llama_chat_apply_template(tmpl, chat, 1, true, nullptr, 0);
|
||||
return chat_res > 0;
|
||||
}
|
||||
}
|
||||
|
||||
void init() {
|
||||
const int32_t n_ctx_slot = n_ctx / params_base.n_parallel;
|
||||
|
||||
@@ -2069,8 +2045,8 @@ struct server_context {
|
||||
|
||||
if (slot.n_predict > 0 && slot.params.n_predict > slot.n_predict) {
|
||||
// Might be better to reject the request with a 400 ?
|
||||
SLT_WRN(slot, "n_predict = %d exceeds server configuration, setting to %d", slot.params.n_predict, slot.n_predict);
|
||||
slot.params.n_predict = slot.n_predict;
|
||||
SLT_WRN(slot, "n_predict = %d exceeds server configuration, setting to %d", slot.n_predict, slot.n_predict);
|
||||
}
|
||||
|
||||
if (slot.params.ignore_eos && has_eos_token) {
|
||||
@@ -2275,7 +2251,7 @@ struct server_context {
|
||||
for (size_t i = 0; i < std::min(max_probs, n_probs); i++) {
|
||||
result.probs.push_back({
|
||||
cur_p->data[i].id,
|
||||
common_detokenize(ctx, {cur_p->data[i].id}, special),
|
||||
common_token_to_piece(ctx, cur_p->data[i].id, special),
|
||||
cur_p->data[i].p
|
||||
});
|
||||
}
|
||||
@@ -2297,7 +2273,7 @@ struct server_context {
|
||||
for (size_t i = 0; i < std::min(n_vocab, n_probs); i++) {
|
||||
result.probs.push_back({
|
||||
cur[i].id,
|
||||
common_detokenize(ctx, {cur[i].id}, special),
|
||||
common_token_to_piece(ctx, cur[i].id, special),
|
||||
cur[i].p
|
||||
});
|
||||
}
|
||||
@@ -3355,10 +3331,10 @@ static void log_server_request(const httplib::Request & req, const httplib::Resp
|
||||
|
||||
// reminder: this function is not covered by httplib's exception handler; if someone does more complicated stuff, think about wrapping it in try-catch
|
||||
|
||||
LOG_INF("request: %s %s %s %d\n", req.method.c_str(), req.path.c_str(), req.remote_addr.c_str(), res.status);
|
||||
SRV_INF("request: %s %s %s %d\n", req.method.c_str(), req.path.c_str(), req.remote_addr.c_str(), res.status);
|
||||
|
||||
LOG_DBG("request: %s\n", req.body.c_str());
|
||||
LOG_DBG("response: %s\n", res.body.c_str());
|
||||
SRV_DBG("request: %s\n", req.body.c_str());
|
||||
SRV_DBG("response: %s\n", res.body.c_str());
|
||||
}
|
||||
|
||||
std::function<void(int)> shutdown_handler;
|
||||
@@ -3649,7 +3625,7 @@ int main(int argc, char ** argv) {
|
||||
}, {
|
||||
{"name", "n_busy_slots_per_decode"},
|
||||
{"help", "Average number of busy slots per llama_decode() call"},
|
||||
{"value", (float) res_metrics->n_busy_slots_total / (float) res_metrics->n_decode_total}
|
||||
{"value", (float) res_metrics->n_busy_slots_total / std::max((float) res_metrics->n_decode_total, 1.f)}
|
||||
}}},
|
||||
{"gauge", {{
|
||||
{"name", "prompt_tokens_seconds"},
|
||||
@@ -3815,13 +3791,15 @@ int main(int argc, char ** argv) {
|
||||
{ "default_generation_settings", ctx_server.default_generation_settings_for_props },
|
||||
{ "total_slots", ctx_server.params_base.n_parallel },
|
||||
{ "model_path", ctx_server.params_base.model },
|
||||
{ "chat_template", ctx_server.chat_templates.template_default->source() },
|
||||
{ "bos_token", ctx_server.chat_templates.template_default->bos_token() },
|
||||
{ "eos_token", ctx_server.chat_templates.template_default->eos_token() },
|
||||
{ "chat_template", common_chat_templates_source(ctx_server.chat_templates.get()) },
|
||||
{ "bos_token", common_token_to_piece(ctx_server.ctx, llama_vocab_bos(ctx_server.vocab), /* special= */ true)},
|
||||
{ "eos_token", common_token_to_piece(ctx_server.ctx, llama_vocab_eos(ctx_server.vocab), /* special= */ true)},
|
||||
{ "build_info", build_info },
|
||||
};
|
||||
if (ctx_server.params_base.use_jinja && ctx_server.chat_templates.template_tool_use) {
|
||||
data["chat_template_tool_use"] = ctx_server.chat_templates.template_tool_use->source();
|
||||
if (ctx_server.params_base.use_jinja) {
|
||||
if (auto tool_use_src = common_chat_templates_source(ctx_server.chat_templates.get(), "tool_use")) {
|
||||
data["chat_template_tool_use"] = tool_use_src;
|
||||
}
|
||||
}
|
||||
|
||||
res_ok(res, data);
|
||||
@@ -3860,7 +3838,9 @@ int main(int argc, char ** argv) {
|
||||
|
||||
try {
|
||||
const auto & prompt = data.at("prompt");
|
||||
LOG_DBG("Prompt: %s\n", prompt.is_string() ? prompt.get<std::string>().c_str() : prompt.dump(2).c_str());
|
||||
// TODO: this log can become very long, put it behind a flag or think about a more compact format
|
||||
//SRV_DBG("Prompt: %s\n", prompt.is_string() ? prompt.get<std::string>().c_str() : prompt.dump(2).c_str());
|
||||
|
||||
std::vector<llama_tokens> tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true);
|
||||
tasks.reserve(tokenized_prompts.size());
|
||||
for (size_t i = 0; i < tokenized_prompts.size(); i++) {
|
||||
@@ -4054,7 +4034,7 @@ int main(int argc, char ** argv) {
|
||||
}
|
||||
|
||||
auto body = json::parse(req.body);
|
||||
json data = oaicompat_completion_params_parse(body, params.use_jinja, ctx_server.chat_templates);
|
||||
json data = oaicompat_completion_params_parse(body, params.use_jinja, params.reasoning_format, ctx_server.chat_templates.get());
|
||||
|
||||
return handle_completions_impl(
|
||||
SERVER_TASK_TYPE_COMPLETION,
|
||||
@@ -4067,7 +4047,7 @@ int main(int argc, char ** argv) {
|
||||
// same with handle_chat_completions, but without inference part
|
||||
const auto handle_apply_template = [&ctx_server, ¶ms, &res_ok](const httplib::Request & req, httplib::Response & res) {
|
||||
auto body = json::parse(req.body);
|
||||
json data = oaicompat_completion_params_parse(body, params.use_jinja, ctx_server.chat_templates);
|
||||
json data = oaicompat_completion_params_parse(body, params.use_jinja, params.reasoning_format, ctx_server.chat_templates.get());
|
||||
res_ok(res, {{ "prompt", std::move(data.at("prompt")) }});
|
||||
};
|
||||
|
||||
@@ -4254,6 +4234,11 @@ int main(int argc, char ** argv) {
|
||||
// return;
|
||||
//}
|
||||
|
||||
// if true, use TEI API format, otherwise use Jina API format
|
||||
// Jina: https://jina.ai/reranker/
|
||||
// TEI: https://huggingface.github.io/text-embeddings-inference/#/Text%20Embeddings%20Inference/rerank
|
||||
bool is_tei_format = body.contains("texts");
|
||||
|
||||
json query;
|
||||
if (body.count("query") == 1) {
|
||||
query = body.at("query");
|
||||
@@ -4266,7 +4251,8 @@ int main(int argc, char ** argv) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<std::string> documents = json_value(body, "documents", std::vector<std::string>());
|
||||
std::vector<std::string> documents = json_value(body, "documents",
|
||||
json_value(body, "texts", std::vector<std::string>()));
|
||||
if (documents.empty()) {
|
||||
res_error(res, format_error_response("\"documents\" must be a non-empty string array", ERROR_TYPE_INVALID_REQUEST));
|
||||
return;
|
||||
@@ -4311,7 +4297,12 @@ int main(int argc, char ** argv) {
|
||||
}
|
||||
|
||||
// write JSON response
|
||||
json root = format_response_rerank(body, responses);
|
||||
json root = format_response_rerank(
|
||||
body,
|
||||
responses,
|
||||
is_tei_format,
|
||||
documents);
|
||||
|
||||
res_ok(res, root);
|
||||
};
|
||||
|
||||
@@ -4376,6 +4367,9 @@ int main(int argc, char ** argv) {
|
||||
res.set_content("Error: gzip is not supported by this browser", "text/plain");
|
||||
} else {
|
||||
res.set_header("Content-Encoding", "gzip");
|
||||
// COEP and COOP headers, required by pyodide (python interpreter)
|
||||
res.set_header("Cross-Origin-Embedder-Policy", "require-corp");
|
||||
res.set_header("Cross-Origin-Opener-Policy", "same-origin");
|
||||
res.set_content(reinterpret_cast<const char*>(index_html_gz), index_html_gz_len, "text/html; charset=utf-8");
|
||||
}
|
||||
return false;
|
||||
@@ -4425,6 +4419,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// clean up function, to be called before exit
|
||||
auto clean_up = [&svr]() {
|
||||
SRV_INF("%s: cleaning up before exit...\n", __func__);
|
||||
svr->stop();
|
||||
llama_backend_free();
|
||||
};
|
||||
@@ -4441,10 +4436,6 @@ int main(int argc, char ** argv) {
|
||||
}
|
||||
|
||||
if (!was_bound) {
|
||||
//LOG_ERROR("couldn't bind HTTP server socket", {
|
||||
// {"hostname", params.hostname},
|
||||
// {"port", params.port},
|
||||
//});
|
||||
LOG_ERR("%s: couldn't bind HTTP server socket, hostname: %s, port: %d\n", __func__, params.hostname.c_str(), params.port);
|
||||
clean_up();
|
||||
return 1;
|
||||
@@ -4461,7 +4452,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
if (!ctx_server.load_model(params)) {
|
||||
clean_up();
|
||||
t.join();
|
||||
// t.join(); // FIXME: see below
|
||||
LOG_ERR("%s: exiting due to model loading error\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
@@ -4473,8 +4464,8 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// print sample chat example to make it clear which template is used
|
||||
LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
|
||||
ctx_server.chat_templates.template_default->source().c_str(),
|
||||
common_chat_format_example(*ctx_server.chat_templates.template_default, ctx_server.params_base.use_jinja).c_str());
|
||||
common_chat_templates_source(ctx_server.chat_templates.get()),
|
||||
common_chat_format_example(ctx_server.chat_templates.get(), ctx_server.params_base.use_jinja).c_str());
|
||||
|
||||
ctx_server.queue_tasks.on_new_task([&ctx_server](const server_task & task) {
|
||||
ctx_server.process_single_task(task);
|
||||
@@ -4485,13 +4476,10 @@ int main(int argc, char ** argv) {
|
||||
});
|
||||
|
||||
shutdown_handler = [&](int) {
|
||||
// this will unblock start_loop()
|
||||
ctx_server.queue_tasks.terminate();
|
||||
};
|
||||
|
||||
LOG_INF("%s: server is listening on http://%s:%d - starting the main loop\n", __func__, params.hostname.c_str(), params.port);
|
||||
|
||||
ctx_server.queue_tasks.start_loop();
|
||||
|
||||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
||||
struct sigaction sigint_action;
|
||||
sigint_action.sa_handler = signal_handler;
|
||||
@@ -4506,8 +4494,13 @@ int main(int argc, char ** argv) {
|
||||
SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
|
||||
#endif
|
||||
|
||||
LOG_INF("%s: server is listening on http://%s:%d - starting the main loop\n", __func__, params.hostname.c_str(), params.port);
|
||||
|
||||
// this call blocks the main thread until queue_tasks.terminate() is called
|
||||
ctx_server.queue_tasks.start_loop();
|
||||
|
||||
clean_up();
|
||||
t.join();
|
||||
// t.join(); // FIXME: http thread may stuck if there is an on-going request. we don't need to care about this for now as the HTTP connection will already be closed at this point, but it's better to fix this
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ DEBUG=1 ./tests.sh -s -v -x
|
||||
To run all the tests in a file:
|
||||
|
||||
```shell
|
||||
./tests.sh unit/test_chat_completion.py.py -v -x
|
||||
./tests.sh unit/test_chat_completion.py -v -x
|
||||
```
|
||||
|
||||
To run a single test:
|
||||
|
||||
@@ -21,6 +21,8 @@ def create_server():
|
||||
(None, "Book", "What is the best book", 8, "^ blue", 23, 8, "length", True, "This is not a chat template, it is"),
|
||||
("codellama70b", "You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 64, "length", False, None),
|
||||
("codellama70b", "You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 64, "length", True, None),
|
||||
(None, "Book", [{"type": "text", "text": "What is"}, {"type": "text", "text": "the best book"}], 8, "Whillicter", 79, 8, "length", False, None),
|
||||
(None, "Book", [{"type": "text", "text": "What is"}, {"type": "text", "text": "the best book"}], 8, "Whillicter", 79, 8, "length", True, None),
|
||||
]
|
||||
)
|
||||
def test_chat_completion(model, system_prompt, user_prompt, max_tokens, re_content, n_prompt, n_predicted, finish_reason, jinja, chat_template):
|
||||
@@ -44,7 +46,7 @@ def test_chat_completion(model, system_prompt, user_prompt, max_tokens, re_conte
|
||||
assert res.body["usage"]["completion_tokens"] == n_predicted
|
||||
choice = res.body["choices"][0]
|
||||
assert "assistant" == choice["message"]["role"]
|
||||
assert match_regex(re_content, choice["message"]["content"])
|
||||
assert match_regex(re_content, choice["message"]["content"]), f'Expected {re_content}, got {choice["message"]["content"]}'
|
||||
assert choice["finish_reason"] == finish_reason
|
||||
|
||||
|
||||
@@ -169,6 +171,47 @@ def test_completion_with_response_format(response_format: dict, n_predicted: int
|
||||
assert "error" in res.body
|
||||
|
||||
|
||||
@pytest.mark.parametrize("jinja,json_schema,n_predicted,re_content", [
|
||||
(False, {"const": "42"}, 6, "\"42\""),
|
||||
(True, {"const": "42"}, 6, "\"42\""),
|
||||
])
|
||||
def test_completion_with_json_schema(jinja: bool, json_schema: dict, n_predicted: int, re_content: str):
|
||||
global server
|
||||
server.jinja = jinja
|
||||
server.start()
|
||||
res = server.make_request("POST", "/chat/completions", data={
|
||||
"max_tokens": n_predicted,
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a coding assistant."},
|
||||
{"role": "user", "content": "Write an example"},
|
||||
],
|
||||
"json_schema": json_schema,
|
||||
})
|
||||
assert res.status_code == 200, f'Expected 200, got {res.status_code}'
|
||||
choice = res.body["choices"][0]
|
||||
assert match_regex(re_content, choice["message"]["content"]), f'Expected {re_content}, got {choice["message"]["content"]}'
|
||||
|
||||
|
||||
@pytest.mark.parametrize("jinja,grammar,n_predicted,re_content", [
|
||||
(False, 'root ::= "a"{5,5}', 6, "a{5,5}"),
|
||||
(True, 'root ::= "a"{5,5}', 6, "a{5,5}"),
|
||||
])
|
||||
def test_completion_with_grammar(jinja: bool, grammar: str, n_predicted: int, re_content: str):
|
||||
global server
|
||||
server.jinja = jinja
|
||||
server.start()
|
||||
res = server.make_request("POST", "/chat/completions", data={
|
||||
"max_tokens": n_predicted,
|
||||
"messages": [
|
||||
{"role": "user", "content": "Does not matter what I say, does it?"},
|
||||
],
|
||||
"grammar": grammar,
|
||||
})
|
||||
assert res.status_code == 200, res.body
|
||||
choice = res.body["choices"][0]
|
||||
assert match_regex(re_content, choice["message"]["content"]), choice["message"]["content"]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("messages", [
|
||||
None,
|
||||
"string",
|
||||
|
||||
@@ -10,17 +10,20 @@ def create_server():
|
||||
server = ServerPreset.jina_reranker_tiny()
|
||||
|
||||
|
||||
TEST_DOCUMENTS = [
|
||||
"A machine is a physical system that uses power to apply forces and control movement to perform an action. The term is commonly applied to artificial devices, such as those employing engines or motors, but also to natural biological macromolecules, such as molecular machines.",
|
||||
"Learning is the process of acquiring new understanding, knowledge, behaviors, skills, values, attitudes, and preferences. The ability to learn is possessed by humans, non-human animals, and some machines; there is also evidence for some kind of learning in certain plants.",
|
||||
"Machine learning is a field of study in artificial intelligence concerned with the development and study of statistical algorithms that can learn from data and generalize to unseen data, and thus perform tasks without explicit instructions.",
|
||||
"Paris, capitale de la France, est une grande ville européenne et un centre mondial de l'art, de la mode, de la gastronomie et de la culture. Son paysage urbain du XIXe siècle est traversé par de larges boulevards et la Seine."
|
||||
]
|
||||
|
||||
|
||||
def test_rerank():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/rerank", data={
|
||||
"query": "Machine learning is",
|
||||
"documents": [
|
||||
"A machine is a physical system that uses power to apply forces and control movement to perform an action. The term is commonly applied to artificial devices, such as those employing engines or motors, but also to natural biological macromolecules, such as molecular machines.",
|
||||
"Learning is the process of acquiring new understanding, knowledge, behaviors, skills, values, attitudes, and preferences. The ability to learn is possessed by humans, non-human animals, and some machines; there is also evidence for some kind of learning in certain plants.",
|
||||
"Machine learning is a field of study in artificial intelligence concerned with the development and study of statistical algorithms that can learn from data and generalize to unseen data, and thus perform tasks without explicit instructions.",
|
||||
"Paris, capitale de la France, est une grande ville européenne et un centre mondial de l'art, de la mode, de la gastronomie et de la culture. Son paysage urbain du XIXe siècle est traversé par de larges boulevards et la Seine."
|
||||
]
|
||||
"documents": TEST_DOCUMENTS,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert len(res.body["results"]) == 4
|
||||
@@ -38,6 +41,29 @@ def test_rerank():
|
||||
assert least_relevant["index"] == 3
|
||||
|
||||
|
||||
def test_rerank_tei_format():
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/rerank", data={
|
||||
"query": "Machine learning is",
|
||||
"texts": TEST_DOCUMENTS,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert len(res.body) == 4
|
||||
|
||||
most_relevant = res.body[0]
|
||||
least_relevant = res.body[0]
|
||||
for doc in res.body:
|
||||
if doc["score"] > most_relevant["score"]:
|
||||
most_relevant = doc
|
||||
if doc["score"] < least_relevant["score"]:
|
||||
least_relevant = doc
|
||||
|
||||
assert most_relevant["score"] > least_relevant["score"]
|
||||
assert most_relevant["index"] == 2
|
||||
assert least_relevant["index"] == 3
|
||||
|
||||
|
||||
@pytest.mark.parametrize("documents", [
|
||||
[],
|
||||
None,
|
||||
|
||||
@@ -92,6 +92,7 @@ def do_test_completion_with_required_tool_tiny(template_name: str, tool: dict, a
|
||||
tool_calls = choice["message"].get("tool_calls")
|
||||
assert tool_calls and len(tool_calls) == 1, f'Expected 1 tool call in {choice["message"]}'
|
||||
tool_call = tool_calls[0]
|
||||
assert choice["message"].get("content") is None, f'Expected no content in {choice["message"]}'
|
||||
expected_function_name = "python" if tool["type"] == "code_interpreter" else tool["function"]["name"]
|
||||
assert expected_function_name == tool_call["function"]["name"]
|
||||
actual_arguments = tool_call["function"]["arguments"]
|
||||
@@ -155,11 +156,11 @@ def test_completion_with_required_tool_tiny_slow(template_name: str, tool: dict,
|
||||
|
||||
(TEST_TOOL, "success", "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")),
|
||||
(PYTHON_TOOL, "code", "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")),
|
||||
(PYTHON_TOOL, "code", "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", "chatml"),
|
||||
# (PYTHON_TOOL, "code", "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
(TEST_TOOL, "success", "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-3-Llama-3.1-8B", "tool_use")),
|
||||
(PYTHON_TOOL, "code", "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-3-Llama-3.1-8B", "tool_use")),
|
||||
(PYTHON_TOOL, "code", "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", "chatml"),
|
||||
# (PYTHON_TOOL, "code", "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
(TEST_TOOL, "success", "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None),
|
||||
(PYTHON_TOOL, "code", "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None),
|
||||
@@ -175,7 +176,7 @@ def test_completion_with_required_tool_tiny_slow(template_name: str, tool: dict,
|
||||
|
||||
(TEST_TOOL, "success", "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)),
|
||||
(PYTHON_TOOL, "code", "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)),
|
||||
(PYTHON_TOOL, "code", "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
# (PYTHON_TOOL, "code", "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
# TODO: fix these
|
||||
# (TEST_TOOL, "success", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
|
||||
# (PYTHON_TOOL, "code", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
|
||||
@@ -214,6 +215,7 @@ def test_completion_with_required_tool_real_model(tool: dict, argument_key: str
|
||||
tool_calls = choice["message"].get("tool_calls")
|
||||
assert tool_calls and len(tool_calls) == 1, f'Expected 1 tool call in {choice["message"]}'
|
||||
tool_call = tool_calls[0]
|
||||
assert choice["message"].get("content") is None, f'Expected no content in {choice["message"]}'
|
||||
expected_function_name = "python" if tool["type"] == "code_interpreter" else tool["function"]["name"]
|
||||
assert expected_function_name == tool_call["function"]["name"]
|
||||
actual_arguments = tool_call["function"]["arguments"]
|
||||
@@ -273,7 +275,6 @@ def test_completion_without_tool_call_slow(template_name: str, n_predict: int, t
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize("hf_repo,template_override", [
|
||||
("bartowski/c4ai-command-r7b-12-2024-GGUF:Q4_K_M", ("CohereForAI/c4ai-command-r7b-12-2024", "tool_use")),
|
||||
("bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None),
|
||||
("bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
@@ -298,13 +299,16 @@ def test_completion_without_tool_call_slow(template_name: str, n_predict: int, t
|
||||
("bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)),
|
||||
("bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
("bartowski/c4ai-command-r7b-12-2024-GGUF:Q6_K_L", ("CohereForAI/c4ai-command-r7b-12-2024", "tool_use")),
|
||||
|
||||
("bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
|
||||
|
||||
# Note: gemma-2-2b-it knows itself as "model", not "assistant", so we don't test the ill-suited chatml on it.
|
||||
("bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None),
|
||||
|
||||
# ("bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)),
|
||||
# ("bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
|
||||
])
|
||||
def test_weather(hf_repo: str, template_override: Tuple[str, str | None] | None):
|
||||
def test_weather(hf_repo: str, template_override: str | Tuple[str, str | None] | None):
|
||||
global server
|
||||
n_predict = 512
|
||||
server.n_slots = 1
|
||||
@@ -323,6 +327,7 @@ def test_weather(hf_repo: str, template_override: Tuple[str, str | None] | None)
|
||||
res = server.make_request("POST", "/chat/completions", data={
|
||||
"max_tokens": n_predict,
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a chatbot that uses tools/functions. Dont overthink things."},
|
||||
{"role": "user", "content": "What is the weather in Istanbul?"},
|
||||
],
|
||||
"tools": [WEATHER_TOOL],
|
||||
@@ -332,6 +337,7 @@ def test_weather(hf_repo: str, template_override: Tuple[str, str | None] | None)
|
||||
tool_calls = choice["message"].get("tool_calls")
|
||||
assert tool_calls and len(tool_calls) == 1, f'Expected 1 tool call in {choice["message"]}'
|
||||
tool_call = tool_calls[0]
|
||||
assert choice["message"].get("content") is None, f'Expected no content in {choice["message"]}'
|
||||
assert tool_call["function"]["name"] == WEATHER_TOOL["function"]["name"]
|
||||
actual_arguments = json.loads(tool_call["function"]["arguments"])
|
||||
assert 'location' in actual_arguments, f"location not found in {json.dumps(actual_arguments)}"
|
||||
@@ -340,22 +346,166 @@ def test_weather(hf_repo: str, template_override: Tuple[str, str | None] | None)
|
||||
assert re.match('^Istanbul(, (TR|Turkey|Türkiye))?$', location), f'Expected Istanbul for location, got {location}'
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize("result_override,n_predict,hf_repo,template_override", [
|
||||
(None, 128, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", "chatml"),
|
||||
(None, 128, "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None),
|
||||
(None, 128, "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
(None, 128, "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")),
|
||||
(None, 128, "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-3-Llama-3.1-8B", "tool_use")),
|
||||
(None, 128, "bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai/functionary-medium-v3.2", None)),
|
||||
(None, 128, "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None),
|
||||
(None, 128, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None),
|
||||
(None, 128, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", "chatml"),
|
||||
(None, 128, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None),
|
||||
|
||||
# TODO: fix these (wrong results, either didn't respect decimal instruction or got wrong value)
|
||||
("[\\s\\S]*?\\*\\*\\s*0.5($|\\*\\*)", 8192, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
|
||||
# ("[\\s\\S]*?\\*\\*\\s*0.5($|\\*\\*)", 8192, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", ("llama-cpp-deepseek-r1", None)),
|
||||
])
|
||||
def test_calc_result(result_override: str | None, n_predict: int, hf_repo: str, template_override: str | Tuple[str, str | None] | None):
|
||||
global server
|
||||
# n_predict = 512
|
||||
server.n_slots = 1
|
||||
server.jinja = True
|
||||
server.n_ctx = 8192 * 2
|
||||
server.n_predict = n_predict
|
||||
server.model_hf_repo = hf_repo
|
||||
server.model_hf_file = None
|
||||
if isinstance(template_override, tuple):
|
||||
(template_hf_repo, template_variant) = template_override
|
||||
server.chat_template_file = f"../../../models/templates/{template_hf_repo.replace('/', '-') + ('-' + template_variant if template_variant else '')}.jinja"
|
||||
assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template."
|
||||
elif isinstance(template_override, str):
|
||||
server.chat_template = template_override
|
||||
server.start(timeout_seconds=TIMEOUT_SERVER_START)
|
||||
res = server.make_request("POST", "/chat/completions", data={
|
||||
"max_tokens": n_predict,
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a chatbot that uses tools/functions. Dont overthink things, and provide very concise answers. Do not explain your reasoning to the user. Provide any numerical values back to the user with at most two decimals."},
|
||||
{"role": "user", "content": "What's the y coordinate of a point on the unit sphere at angle 30 degrees?"},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": "call_6789",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "calculate",
|
||||
"arguments": "{\"expression\":\"sin(30 * pi / 180)\"}"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "tool",
|
||||
"name": "calculate",
|
||||
"content": "0.55644242476",
|
||||
"tool_call_id": "call_6789"
|
||||
}
|
||||
],
|
||||
"tools": [
|
||||
{
|
||||
"type":"function",
|
||||
"function":{
|
||||
"name":"calculate",
|
||||
"description":"A calculator function that computes values of arithmetic expressions in the Python syntax",
|
||||
"parameters":{
|
||||
"type":"object",
|
||||
"properties":{
|
||||
"expression":{
|
||||
"type":"string",
|
||||
"description":"An arithmetic expression to compute the value of (Python syntad, assuming all floats)"
|
||||
}
|
||||
},
|
||||
"required":["expression"]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}, timeout=TIMEOUT_HTTP_REQUEST)
|
||||
assert res.status_code == 200, f"Expected status code 200, got {res.status_code}"
|
||||
choice = res.body["choices"][0]
|
||||
tool_calls = choice["message"].get("tool_calls")
|
||||
assert tool_calls is None, f'Expected no tool call in {choice["message"]}'
|
||||
content = choice["message"].get("content")
|
||||
assert content is not None, f'Expected content in {choice["message"]}'
|
||||
if result_override is not None:
|
||||
assert re.match(result_override, content), f'Expected {result_override}, got {content}'
|
||||
else:
|
||||
assert re.match('^[\\s\\S]*?The (y[ -])?coordinate [\\s\\S]*?is (approximately )?0\\.56\\b|^0\\.56$', content), \
|
||||
f'Expected something like "The y coordinate is 0.56.", got {content}'
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize("n_predict,reasoning_format,expect_content,expect_reasoning_content,hf_repo,template_override", [
|
||||
(128, 'deepseek', "^The sum of 102 and 7 is 109.*", None, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None),
|
||||
(128, None, "^The sum of 102 and 7 is 109.*", None, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None),
|
||||
|
||||
(1024, 'deepseek', "To find the sum of.*", "I need to calculate the sum of 102 and 7.*", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
|
||||
(1024, 'none', "^I need[\\s\\S]*?</think>\n?To find.*", None, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
|
||||
|
||||
(1024, 'deepseek', "To find the sum of.*", "First, I [\\s\\S]*", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", ("llama-cpp-deepseek-r1", None)),
|
||||
])
|
||||
def test_thoughts(n_predict: int, reasoning_format: Literal['deepseek', 'none'] | None, expect_content: str | None, expect_reasoning_content: str | None, hf_repo: str, template_override: str | Tuple[str, str | None] | None):
|
||||
global server
|
||||
server.n_slots = 1
|
||||
server.reasoning_format = reasoning_format
|
||||
server.jinja = True
|
||||
server.n_ctx = 8192 * 2
|
||||
server.n_predict = n_predict
|
||||
server.model_hf_repo = hf_repo
|
||||
server.model_hf_file = None
|
||||
if isinstance(template_override, tuple):
|
||||
(template_hf_repo, template_variant) = template_override
|
||||
server.chat_template_file = f"../../../models/templates/{template_hf_repo.replace('/', '-') + ('-' + template_variant if template_variant else '')}.jinja"
|
||||
assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template."
|
||||
elif isinstance(template_override, str):
|
||||
server.chat_template = template_override
|
||||
server.start(timeout_seconds=TIMEOUT_SERVER_START)
|
||||
res = server.make_request("POST", "/chat/completions", data={
|
||||
"max_tokens": n_predict,
|
||||
"messages": [
|
||||
{"role": "user", "content": "What's the sum of 102 and 7?"},
|
||||
]
|
||||
}, timeout=TIMEOUT_HTTP_REQUEST)
|
||||
assert res.status_code == 200, f"Expected status code 200, got {res.status_code}"
|
||||
choice = res.body["choices"][0]
|
||||
assert choice["message"].get("tool_calls") is None, f'Expected no tool call in {choice["message"]}'
|
||||
|
||||
content = choice["message"].get("content")
|
||||
if expect_content is None:
|
||||
assert content is None, f'Expected no content in {choice["message"]}'
|
||||
else:
|
||||
assert re.match(expect_content, content), f'Expected {expect_content}, got {content}'
|
||||
|
||||
reasoning_content = choice["message"].get("reasoning_content")
|
||||
if expect_reasoning_content is None:
|
||||
assert reasoning_content is None, f'Expected no reasoning content in {choice["message"]}'
|
||||
else:
|
||||
assert re.match(expect_reasoning_content, reasoning_content), f'Expected {expect_reasoning_content}, got {reasoning_content}'
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize("expected_arguments_override,hf_repo,template_override", [
|
||||
(None, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
|
||||
# (None, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
(None, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None),
|
||||
(None, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
(None, "bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai-functionary-medium-v3.2", None)),
|
||||
(None, "bartowski/functionary-small-v3.2-GGUF:Q8_0", "chatml"),
|
||||
|
||||
(None, "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None),
|
||||
('{"code":"print("}', "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
('{"code":"print("}', "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None),
|
||||
(None, "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
('{"code":"print("}', "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama-Llama-3.2-3B-Instruct", None)),
|
||||
(None, "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama-Llama-3.2-3B-Instruct", None)),
|
||||
(None, "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
('{"code":"print("}', "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama-Llama-3.2-3B-Instruct", None)),
|
||||
('{"code":"print("}', "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
(None, "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
|
||||
(None, "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None),
|
||||
(None, "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", "chatml"),
|
||||
@@ -371,15 +521,13 @@ def test_weather(hf_repo: str, template_override: Tuple[str, str | None] | None)
|
||||
|
||||
# Note: gemma-2-2b-it knows itself as "model", not "assistant", so we don't test the ill-suited chatml on it.
|
||||
(None, "bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None),
|
||||
|
||||
# (None, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
|
||||
])
|
||||
def test_hello_world_tool_call(expected_arguments_override: str | None, hf_repo: str, template_override: str | Tuple[str, str | None] | None):
|
||||
def test_hello_world(expected_arguments_override: str | None, hf_repo: str, template_override: str | Tuple[str, str | None] | None):
|
||||
global server
|
||||
server.n_slots = 1
|
||||
server.jinja = True
|
||||
server.n_ctx = 8192
|
||||
server.n_predict = 128
|
||||
server.n_predict = 512 # High because of DeepSeek R1
|
||||
server.model_hf_repo = hf_repo
|
||||
server.model_hf_file = None
|
||||
if isinstance(template_override, tuple):
|
||||
@@ -406,6 +554,7 @@ def test_hello_world_tool_call(expected_arguments_override: str | None, hf_repo:
|
||||
tool_calls = choice["message"].get("tool_calls")
|
||||
assert tool_calls and len(tool_calls) == 1, f'Expected 1 tool call in {choice["message"]}'
|
||||
tool_call = tool_calls[0]
|
||||
assert choice["message"].get("content") is None, f'Expected no content in {choice["message"]}'
|
||||
assert tool_call["function"]["name"] == PYTHON_TOOL["function"]["name"]
|
||||
actual_arguments = tool_call["function"]["arguments"]
|
||||
if expected_arguments_override is not None:
|
||||
|
||||
@@ -78,6 +78,7 @@ class ServerProcess:
|
||||
draft_max: int | None = None
|
||||
no_webui: bool | None = None
|
||||
jinja: bool | None = None
|
||||
reasoning_format: Literal['deepseek', 'none'] | None = None
|
||||
chat_template: str | None = None
|
||||
chat_template_file: str | None = None
|
||||
|
||||
@@ -172,6 +173,8 @@ class ServerProcess:
|
||||
server_args.append("--no-webui")
|
||||
if self.jinja:
|
||||
server_args.append("--jinja")
|
||||
if self.reasoning_format is not None:
|
||||
server_args.extend(("--reasoning-format", self.reasoning_format))
|
||||
if self.chat_template:
|
||||
server_args.extend(["--chat-template", self.chat_template])
|
||||
if self.chat_template_file:
|
||||
|
||||
@@ -12,9 +12,7 @@
|
||||
// Change JSON_ASSERT from assert() to GGML_ASSERT:
|
||||
#define JSON_ASSERT GGML_ASSERT
|
||||
#include "json.hpp"
|
||||
#include "minja.hpp"
|
||||
#include "chat.hpp"
|
||||
#include "chat-template.hpp"
|
||||
#include "chat.h"
|
||||
|
||||
#include <random>
|
||||
#include <sstream>
|
||||
@@ -347,41 +345,6 @@ static llama_tokens format_infill(
|
||||
return embd_inp;
|
||||
}
|
||||
|
||||
// Format given chat. If tmpl is empty, we take the template from model metadata
|
||||
inline std::string format_chat(const common_chat_template & tmpl, const std::vector<json> & messages) {
|
||||
std::vector<common_chat_msg> chat;
|
||||
|
||||
for (size_t i = 0; i < messages.size(); ++i) {
|
||||
const auto & curr_msg = messages[i];
|
||||
|
||||
std::string role = json_value(curr_msg, "role", std::string(""));
|
||||
|
||||
std::string content;
|
||||
if (curr_msg.contains("content")) {
|
||||
if (curr_msg["content"].is_string()) {
|
||||
content = curr_msg["content"].get<std::string>();
|
||||
} else if (curr_msg["content"].is_array()) {
|
||||
for (const auto & part : curr_msg["content"]) {
|
||||
if (part.contains("text")) {
|
||||
content += "\n" + part["text"].get<std::string>();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw std::runtime_error("Invalid 'content' type (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
|
||||
}
|
||||
} else {
|
||||
throw std::runtime_error("Missing 'content' (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
|
||||
}
|
||||
|
||||
chat.push_back({role, content, /* tool_calls= */ {}});
|
||||
}
|
||||
|
||||
const auto formatted_chat = common_chat_apply_template(tmpl, chat, true, /* use_jinja= */ false);
|
||||
LOG_DBG("formatted_chat: '%s'\n", formatted_chat.c_str());
|
||||
|
||||
return formatted_chat;
|
||||
}
|
||||
|
||||
//
|
||||
// base64 utils (TODO: move to common in the future)
|
||||
//
|
||||
@@ -578,12 +541,10 @@ static json oaicompat_completion_params_parse(const json & body) {
|
||||
static json oaicompat_completion_params_parse(
|
||||
const json & body, /* openai api json semantics */
|
||||
bool use_jinja,
|
||||
const common_chat_templates & chat_templates)
|
||||
common_reasoning_format reasoning_format,
|
||||
const struct common_chat_templates * tmpls)
|
||||
{
|
||||
json llama_params;
|
||||
const auto & tmpl = body.contains("tools") && chat_templates.template_tool_use
|
||||
? *chat_templates.template_tool_use
|
||||
: *chat_templates.template_default;
|
||||
|
||||
auto tools = json_value(body, "tools", json());
|
||||
auto stream = json_value(body, "stream", false);
|
||||
@@ -609,61 +570,58 @@ static json oaicompat_completion_params_parse(
|
||||
llama_params["stop"] = json_value(body, "stop", json::array());
|
||||
}
|
||||
|
||||
auto json_schema = json_value(body, "json_schema", json());
|
||||
auto grammar = json_value(body, "grammar", std::string());
|
||||
if (!json_schema.is_null() && !grammar.empty()) {
|
||||
throw std::runtime_error("Cannot use both json_schema and grammar");
|
||||
}
|
||||
|
||||
// Handle "response_format" field
|
||||
if (body.contains("response_format")) {
|
||||
json response_format = json_value(body, "response_format", json::object());
|
||||
std::string response_type = json_value(response_format, "type", std::string());
|
||||
if (response_type == "json_object") {
|
||||
llama_params["json_schema"] = json_value(response_format, "schema", json::object());
|
||||
json_schema = json_value(response_format, "schema", json::object());
|
||||
} else if (response_type == "json_schema") {
|
||||
json json_schema = json_value(response_format, "json_schema", json::object());
|
||||
llama_params["json_schema"] = json_value(json_schema, "schema", json::object());
|
||||
json_schema = json_value(json_schema, "schema", json::object());
|
||||
} else if (!response_type.empty() && response_type != "text") {
|
||||
throw std::runtime_error("response_format type must be one of \"text\" or \"json_object\", but got: " + response_type);
|
||||
}
|
||||
}
|
||||
|
||||
// Apply chat template to the list of messages
|
||||
if (use_jinja) {
|
||||
auto tool_choice = json_value(body, "tool_choice", std::string("auto"));
|
||||
if (tool_choice != "none" && tool_choice != "auto" && tool_choice != "required") {
|
||||
throw std::runtime_error("Invalid tool_choice: " + tool_choice);
|
||||
}
|
||||
if (tool_choice != "none" && llama_params.contains("grammar")) {
|
||||
throw std::runtime_error("Cannot use custom grammar constraints with tools.");
|
||||
}
|
||||
common_chat_inputs inputs;
|
||||
inputs.messages = body.at("messages");
|
||||
inputs.tools = tools;
|
||||
inputs.tool_choice = tool_choice;
|
||||
inputs.parallel_tool_calls = json_value(body, "parallel_tool_calls", false);
|
||||
if (inputs.parallel_tool_calls && !tmpl.original_caps().supports_parallel_tool_calls) {
|
||||
LOG_DBG("Disabling parallel_tool_calls because the template does not support it\n");
|
||||
inputs.parallel_tool_calls = false;
|
||||
}
|
||||
inputs.stream = stream;
|
||||
// TODO: support mixing schema w/ tools beyond generic format.
|
||||
inputs.json_schema = json_value(llama_params, "json_schema", json());
|
||||
auto chat_params = common_chat_params_init(tmpl, inputs);
|
||||
common_chat_templates_inputs inputs;
|
||||
inputs.messages = common_chat_msgs_parse_oaicompat(body.at("messages"));
|
||||
inputs.tools = common_chat_tools_parse_oaicompat(tools);
|
||||
inputs.tool_choice = common_chat_tool_choice_parse_oaicompat(json_value(body, "tool_choice", std::string("auto")));
|
||||
inputs.json_schema = json_schema.is_null() ? "" : json_schema.dump();
|
||||
inputs.grammar = grammar;
|
||||
inputs.add_generation_prompt = true;
|
||||
inputs.use_jinja = use_jinja;
|
||||
inputs.parallel_tool_calls = json_value(body, "parallel_tool_calls", false);
|
||||
inputs.extract_reasoning = reasoning_format != COMMON_REASONING_FORMAT_NONE;
|
||||
if (!inputs.tools.empty() && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE && body.contains("grammar")) {
|
||||
throw std::runtime_error("Cannot use custom grammar constraints with tools.");
|
||||
}
|
||||
|
||||
llama_params["chat_format"] = static_cast<int>(chat_params.format);
|
||||
llama_params["prompt"] = chat_params.prompt;
|
||||
llama_params["grammar"] = chat_params.grammar;
|
||||
llama_params["grammar_lazy"] = chat_params.grammar_lazy;
|
||||
auto grammar_triggers = json::array();
|
||||
for (const auto & trigger : chat_params.grammar_triggers) {
|
||||
grammar_triggers.push_back({
|
||||
{"word", trigger.word},
|
||||
{"at_start", trigger.at_start},
|
||||
});
|
||||
}
|
||||
llama_params["grammar_triggers"] = grammar_triggers;
|
||||
llama_params["preserved_tokens"] = chat_params.preserved_tokens;
|
||||
for (const auto & stop : chat_params.additional_stops) {
|
||||
llama_params["stop"].push_back(stop);
|
||||
}
|
||||
} else {
|
||||
llama_params["prompt"] = format_chat(tmpl, body.at("messages"));
|
||||
// Apply chat template to the list of messages
|
||||
auto chat_params = common_chat_templates_apply(tmpls, inputs);
|
||||
|
||||
llama_params["chat_format"] = static_cast<int>(chat_params.format);
|
||||
llama_params["prompt"] = chat_params.prompt;
|
||||
llama_params["grammar"] = chat_params.grammar;
|
||||
llama_params["grammar_lazy"] = chat_params.grammar_lazy;
|
||||
auto grammar_triggers = json::array();
|
||||
for (const auto & trigger : chat_params.grammar_triggers) {
|
||||
grammar_triggers.push_back({
|
||||
{"word", trigger.word},
|
||||
{"at_start", trigger.at_start},
|
||||
});
|
||||
}
|
||||
llama_params["grammar_triggers"] = grammar_triggers;
|
||||
llama_params["preserved_tokens"] = chat_params.preserved_tokens;
|
||||
for (const auto & stop : chat_params.additional_stops) {
|
||||
llama_params["stop"].push_back(stop);
|
||||
}
|
||||
|
||||
// Handle "n" field
|
||||
@@ -735,29 +693,51 @@ static json format_embeddings_response_oaicompat(const json & request, const jso
|
||||
return res;
|
||||
}
|
||||
|
||||
static json format_response_rerank(const json & request, const json & ranks) {
|
||||
json data = json::array();
|
||||
int32_t n_tokens = 0;
|
||||
int i = 0;
|
||||
for (const auto & rank : ranks) {
|
||||
data.push_back(json{
|
||||
{"index", i++},
|
||||
{"relevance_score", json_value(rank, "score", 0.0)},
|
||||
});
|
||||
static json format_response_rerank(
|
||||
const json & request,
|
||||
const json & ranks,
|
||||
bool is_tei_format,
|
||||
std::vector<std::string> & texts) {
|
||||
json res;
|
||||
if (is_tei_format) {
|
||||
// TEI response format
|
||||
res = json::array();
|
||||
bool return_text = json_value(request, "return_text", false);
|
||||
for (const auto & rank : ranks) {
|
||||
int index = json_value(rank, "index", 0);
|
||||
json elem = json{
|
||||
{"index", index},
|
||||
{"score", json_value(rank, "score", 0.0)},
|
||||
};
|
||||
if (return_text) {
|
||||
elem["text"] = std::move(texts[index]);
|
||||
}
|
||||
res.push_back(elem);
|
||||
}
|
||||
} else {
|
||||
// Jina response format
|
||||
json results = json::array();
|
||||
int32_t n_tokens = 0;
|
||||
for (const auto & rank : ranks) {
|
||||
results.push_back(json{
|
||||
{"index", json_value(rank, "index", 0)},
|
||||
{"relevance_score", json_value(rank, "score", 0.0)},
|
||||
});
|
||||
|
||||
n_tokens += json_value(rank, "tokens_evaluated", 0);
|
||||
n_tokens += json_value(rank, "tokens_evaluated", 0);
|
||||
}
|
||||
|
||||
res = json{
|
||||
{"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
|
||||
{"object", "list"},
|
||||
{"usage", json{
|
||||
{"prompt_tokens", n_tokens},
|
||||
{"total_tokens", n_tokens}
|
||||
}},
|
||||
{"results", results}
|
||||
};
|
||||
}
|
||||
|
||||
json res = json {
|
||||
{"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
|
||||
{"object", "list"},
|
||||
{"usage", json {
|
||||
{"prompt_tokens", n_tokens},
|
||||
{"total_tokens", n_tokens}
|
||||
}},
|
||||
{"results", data}
|
||||
};
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
17
examples/server/webui/package-lock.json
generated
17
examples/server/webui/package-lock.json
generated
@@ -8,10 +8,12 @@
|
||||
"name": "webui",
|
||||
"version": "0.0.0",
|
||||
"dependencies": {
|
||||
"@heroicons/react": "^2.2.0",
|
||||
"@sec-ant/readable-stream": "^0.6.0",
|
||||
"@vscode/markdown-it-katex": "^1.1.1",
|
||||
"autoprefixer": "^10.4.20",
|
||||
"daisyui": "^4.12.14",
|
||||
"dexie": "^4.0.11",
|
||||
"highlight.js": "^11.10.0",
|
||||
"katex": "^0.16.15",
|
||||
"postcss": "^8.4.49",
|
||||
@@ -902,6 +904,15 @@
|
||||
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@heroicons/react": {
|
||||
"version": "2.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@heroicons/react/-/react-2.2.0.tgz",
|
||||
"integrity": "sha512-LMcepvRaS9LYHJGsF0zzmgKCUim/X3N/DQKc4jepAXJ7l8QxJ1PmxJzqplF2Z3FE4PqBAIGyJAQ/w4B5dsqbtQ==",
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"react": ">= 16 || ^19.0.0-rc"
|
||||
}
|
||||
},
|
||||
"node_modules/@humanfs/core": {
|
||||
"version": "0.19.1",
|
||||
"resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz",
|
||||
@@ -2328,6 +2339,12 @@
|
||||
"url": "https://github.com/sponsors/wooorm"
|
||||
}
|
||||
},
|
||||
"node_modules/dexie": {
|
||||
"version": "4.0.11",
|
||||
"resolved": "https://registry.npmjs.org/dexie/-/dexie-4.0.11.tgz",
|
||||
"integrity": "sha512-SOKO002EqlvBYYKQSew3iymBoN2EQ4BDw/3yprjh7kAfFzjBYkaMNa/pZvcA7HSWlcKSQb9XhPe3wKyQ0x4A8A==",
|
||||
"license": "Apache-2.0"
|
||||
},
|
||||
"node_modules/didyoumean": {
|
||||
"version": "1.2.2",
|
||||
"resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz",
|
||||
|
||||
@@ -11,10 +11,12 @@
|
||||
"preview": "vite preview"
|
||||
},
|
||||
"dependencies": {
|
||||
"@heroicons/react": "^2.2.0",
|
||||
"@sec-ant/readable-stream": "^0.6.0",
|
||||
"@vscode/markdown-it-katex": "^1.1.1",
|
||||
"autoprefixer": "^10.4.20",
|
||||
"daisyui": "^4.12.14",
|
||||
"dexie": "^4.0.11",
|
||||
"highlight.js": "^11.10.0",
|
||||
"katex": "^0.16.15",
|
||||
"postcss": "^8.4.49",
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import { HashRouter, Outlet, Route, Routes } from 'react-router';
|
||||
import Header from './components/Header';
|
||||
import Sidebar from './components/Sidebar';
|
||||
import { AppContextProvider } from './utils/app.context';
|
||||
import { AppContextProvider, useAppContext } from './utils/app.context';
|
||||
import ChatScreen from './components/ChatScreen';
|
||||
import SettingDialog from './components/SettingDialog';
|
||||
|
||||
function App() {
|
||||
return (
|
||||
@@ -22,13 +23,23 @@ function App() {
|
||||
}
|
||||
|
||||
function AppLayout() {
|
||||
const { showSettings, setShowSettings } = useAppContext();
|
||||
return (
|
||||
<>
|
||||
<Sidebar />
|
||||
<div className="chat-screen drawer-content grow flex flex-col h-screen w-screen mx-auto px-4">
|
||||
<div
|
||||
className="drawer-content grow flex flex-col h-screen w-screen mx-auto px-4 overflow-auto"
|
||||
id="main-scroll"
|
||||
>
|
||||
<Header />
|
||||
<Outlet />
|
||||
</div>
|
||||
{
|
||||
<SettingDialog
|
||||
show={showSettings}
|
||||
onClose={() => setShowSettings(false)}
|
||||
/>
|
||||
}
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ export const BASE_URL = new URL('.', document.baseURI).href
|
||||
|
||||
export const CONFIG_DEFAULT = {
|
||||
// Note: in order not to introduce breaking changes, please keep the same data type (number, string, etc) if you want to change the default value. Do not use null or undefined for default value.
|
||||
// Do not use nested objects, keep it single level. Prefix the key if you need to group them.
|
||||
apiKey: '',
|
||||
systemMessage: 'You are a helpful assistant.',
|
||||
showTokensPerSecond: false,
|
||||
@@ -36,6 +37,8 @@ export const CONFIG_DEFAULT = {
|
||||
dry_penalty_last_n: -1,
|
||||
max_tokens: -1,
|
||||
custom: '', // custom json-stringified object
|
||||
// experimental features
|
||||
pyIntepreterEnabled: false,
|
||||
};
|
||||
export const CONFIG_INFO: Record<string, string> = {
|
||||
apiKey: 'Set the API Key if you are using --api-key option for the server.',
|
||||
|
||||
195
examples/server/webui/src/components/CanvasPyInterpreter.tsx
Normal file
195
examples/server/webui/src/components/CanvasPyInterpreter.tsx
Normal file
@@ -0,0 +1,195 @@
|
||||
import { useEffect, useState } from 'react';
|
||||
import { useAppContext } from '../utils/app.context';
|
||||
import { OpenInNewTab, XCloseButton } from '../utils/common';
|
||||
import { CanvasType } from '../utils/types';
|
||||
import { PlayIcon, StopIcon } from '@heroicons/react/24/outline';
|
||||
import StorageUtils from '../utils/storage';
|
||||
|
||||
const canInterrupt = typeof SharedArrayBuffer === 'function';
|
||||
|
||||
// adapted from https://pyodide.org/en/stable/usage/webworker.html
|
||||
const WORKER_CODE = `
|
||||
importScripts("https://cdn.jsdelivr.net/pyodide/v0.27.2/full/pyodide.js");
|
||||
|
||||
let stdOutAndErr = [];
|
||||
|
||||
let pyodideReadyPromise = loadPyodide({
|
||||
stdout: (data) => stdOutAndErr.push(data),
|
||||
stderr: (data) => stdOutAndErr.push(data),
|
||||
});
|
||||
|
||||
let alreadySetBuff = false;
|
||||
|
||||
self.onmessage = async (event) => {
|
||||
stdOutAndErr = [];
|
||||
|
||||
// make sure loading is done
|
||||
const pyodide = await pyodideReadyPromise;
|
||||
const { id, python, context, interruptBuffer } = event.data;
|
||||
|
||||
if (interruptBuffer && !alreadySetBuff) {
|
||||
pyodide.setInterruptBuffer(interruptBuffer);
|
||||
alreadySetBuff = true;
|
||||
}
|
||||
|
||||
// Now load any packages we need, run the code, and send the result back.
|
||||
await pyodide.loadPackagesFromImports(python);
|
||||
|
||||
// make a Python dictionary with the data from content
|
||||
const dict = pyodide.globals.get("dict");
|
||||
const globals = dict(Object.entries(context));
|
||||
try {
|
||||
self.postMessage({ id, running: true });
|
||||
// Execute the python code in this context
|
||||
const result = pyodide.runPython(python, { globals });
|
||||
self.postMessage({ result, id, stdOutAndErr });
|
||||
} catch (error) {
|
||||
self.postMessage({ error: error.message, id });
|
||||
}
|
||||
interruptBuffer[0] = 0;
|
||||
};
|
||||
`;
|
||||
|
||||
let worker: Worker;
|
||||
const interruptBuffer = canInterrupt
|
||||
? new Uint8Array(new SharedArrayBuffer(1))
|
||||
: null;
|
||||
|
||||
const startWorker = () => {
|
||||
if (!worker) {
|
||||
worker = new Worker(
|
||||
URL.createObjectURL(new Blob([WORKER_CODE], { type: 'text/javascript' }))
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
if (StorageUtils.getConfig().pyIntepreterEnabled) {
|
||||
startWorker();
|
||||
}
|
||||
|
||||
const runCodeInWorker = (
|
||||
pyCode: string,
|
||||
callbackRunning: () => void
|
||||
): {
|
||||
donePromise: Promise<string>;
|
||||
interrupt: () => void;
|
||||
} => {
|
||||
startWorker();
|
||||
const id = Math.random() * 1e8;
|
||||
const context = {};
|
||||
if (interruptBuffer) {
|
||||
interruptBuffer[0] = 0;
|
||||
}
|
||||
|
||||
const donePromise = new Promise<string>((resolve) => {
|
||||
worker.onmessage = (event) => {
|
||||
const { error, stdOutAndErr, running } = event.data;
|
||||
if (id !== event.data.id) return;
|
||||
if (running) {
|
||||
callbackRunning();
|
||||
return;
|
||||
} else if (error) {
|
||||
resolve(error.toString());
|
||||
} else {
|
||||
resolve(stdOutAndErr.join('\n'));
|
||||
}
|
||||
};
|
||||
worker.postMessage({ id, python: pyCode, context, interruptBuffer });
|
||||
});
|
||||
|
||||
const interrupt = () => {
|
||||
console.log('Interrupting...');
|
||||
console.trace();
|
||||
if (interruptBuffer) {
|
||||
interruptBuffer[0] = 2;
|
||||
}
|
||||
};
|
||||
|
||||
return { donePromise, interrupt };
|
||||
};
|
||||
|
||||
export default function CanvasPyInterpreter() {
|
||||
const { canvasData, setCanvasData } = useAppContext();
|
||||
|
||||
const [code, setCode] = useState(canvasData?.content ?? ''); // copy to avoid direct mutation
|
||||
const [running, setRunning] = useState(false);
|
||||
const [output, setOutput] = useState('');
|
||||
const [interruptFn, setInterruptFn] = useState<() => void>();
|
||||
const [showStopBtn, setShowStopBtn] = useState(false);
|
||||
|
||||
const runCode = async (pycode: string) => {
|
||||
interruptFn?.();
|
||||
setRunning(true);
|
||||
setOutput('Loading Pyodide...');
|
||||
const { donePromise, interrupt } = runCodeInWorker(pycode, () => {
|
||||
setOutput('Running...');
|
||||
setShowStopBtn(canInterrupt);
|
||||
});
|
||||
setInterruptFn(() => interrupt);
|
||||
const out = await donePromise;
|
||||
setOutput(out);
|
||||
setRunning(false);
|
||||
setShowStopBtn(false);
|
||||
};
|
||||
|
||||
// run code on mount
|
||||
useEffect(() => {
|
||||
setCode(canvasData?.content ?? '');
|
||||
runCode(canvasData?.content ?? '');
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [canvasData?.content]);
|
||||
|
||||
if (canvasData?.type !== CanvasType.PY_INTERPRETER) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="card bg-base-200 w-full h-full shadow-xl">
|
||||
<div className="card-body">
|
||||
<div className="flex justify-between items-center mb-4">
|
||||
<span className="text-lg font-bold">Python Interpreter</span>
|
||||
<XCloseButton
|
||||
className="bg-base-100"
|
||||
onClick={() => setCanvasData(null)}
|
||||
/>
|
||||
</div>
|
||||
<div className="grid grid-rows-3 gap-4 h-full">
|
||||
<textarea
|
||||
className="textarea textarea-bordered w-full h-full font-mono"
|
||||
value={code}
|
||||
onChange={(e) => setCode(e.target.value)}
|
||||
></textarea>
|
||||
<div className="font-mono flex flex-col row-span-2">
|
||||
<div className="flex items-center mb-2">
|
||||
<button
|
||||
className="btn btn-sm bg-base-100"
|
||||
onClick={() => runCode(code)}
|
||||
disabled={running}
|
||||
>
|
||||
<PlayIcon className="h-6 w-6" /> Run
|
||||
</button>
|
||||
{showStopBtn && (
|
||||
<button
|
||||
className="btn btn-sm bg-base-100 ml-2"
|
||||
onClick={() => interruptFn?.()}
|
||||
>
|
||||
<StopIcon className="h-6 w-6" /> Stop
|
||||
</button>
|
||||
)}
|
||||
<span className="grow text-right text-xs">
|
||||
<OpenInNewTab href="https://github.com/ggerganov/llama.cpp/issues/11762">
|
||||
Report a bug
|
||||
</OpenInNewTab>
|
||||
</span>
|
||||
</div>
|
||||
<textarea
|
||||
className="textarea textarea-bordered h-full dark-color"
|
||||
value={output}
|
||||
readOnly
|
||||
></textarea>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -3,6 +3,7 @@ import { useAppContext } from '../utils/app.context';
|
||||
import { Message, PendingMessage } from '../utils/types';
|
||||
import { classNames } from '../utils/misc';
|
||||
import MarkdownDisplay, { CopyButton } from './MarkdownDisplay';
|
||||
import { ChevronLeftIcon, ChevronRightIcon } from '@heroicons/react/24/outline';
|
||||
|
||||
interface SplitMessage {
|
||||
content: PendingMessage['content'];
|
||||
@@ -12,17 +13,24 @@ interface SplitMessage {
|
||||
|
||||
export default function ChatMessage({
|
||||
msg,
|
||||
siblingLeafNodeIds,
|
||||
siblingCurrIdx,
|
||||
id,
|
||||
scrollToBottom,
|
||||
onRegenerateMessage,
|
||||
onEditMessage,
|
||||
onChangeSibling,
|
||||
isPending,
|
||||
}: {
|
||||
msg: Message | PendingMessage;
|
||||
siblingLeafNodeIds: Message['id'][];
|
||||
siblingCurrIdx: number;
|
||||
id?: string;
|
||||
scrollToBottom: (requiresNearBottom: boolean) => void;
|
||||
onRegenerateMessage(msg: Message): void;
|
||||
onEditMessage(msg: Message, content: string): void;
|
||||
onChangeSibling(sibling: Message['id']): void;
|
||||
isPending?: boolean;
|
||||
}) {
|
||||
const { viewingConversation, replaceMessageAndGenerate, config } =
|
||||
useAppContext();
|
||||
const { viewingChat, config } = useAppContext();
|
||||
const [editingContent, setEditingContent] = useState<string | null>(null);
|
||||
const timings = useMemo(
|
||||
() =>
|
||||
@@ -37,6 +45,8 @@ export default function ChatMessage({
|
||||
: null,
|
||||
[msg.timings]
|
||||
);
|
||||
const nextSibling = siblingLeafNodeIds[siblingCurrIdx + 1];
|
||||
const prevSibling = siblingLeafNodeIds[siblingCurrIdx - 1];
|
||||
|
||||
// for reasoning model, we split the message into content and thought
|
||||
// TODO: implement this as remark/rehype plugin in the future
|
||||
@@ -64,13 +74,7 @@ export default function ChatMessage({
|
||||
return { content: actualContent, thought, isThinking };
|
||||
}, [msg]);
|
||||
|
||||
if (!viewingConversation) return null;
|
||||
|
||||
const regenerate = async () => {
|
||||
replaceMessageAndGenerate(viewingConversation.id, msg.id, undefined, () =>
|
||||
scrollToBottom(true)
|
||||
);
|
||||
};
|
||||
if (!viewingChat) return null;
|
||||
|
||||
return (
|
||||
<div className="group" id={id}>
|
||||
@@ -92,7 +96,7 @@ export default function ChatMessage({
|
||||
<>
|
||||
<textarea
|
||||
dir="auto"
|
||||
className="textarea textarea-bordered bg-base-100 text-base-content w-[calc(90vw-8em)] lg:w-96"
|
||||
className="textarea textarea-bordered bg-base-100 text-base-content max-w-2xl w-[calc(90vw-8em)] h-24"
|
||||
value={editingContent}
|
||||
onChange={(e) => setEditingContent(e.target.value)}
|
||||
></textarea>
|
||||
@@ -105,13 +109,12 @@ export default function ChatMessage({
|
||||
</button>
|
||||
<button
|
||||
className="btn mt-2"
|
||||
onClick={() =>
|
||||
replaceMessageAndGenerate(
|
||||
viewingConversation.id,
|
||||
msg.id,
|
||||
editingContent
|
||||
)
|
||||
}
|
||||
onClick={() => {
|
||||
if (msg.content !== null) {
|
||||
setEditingContent(null);
|
||||
onEditMessage(msg as Message, editingContent);
|
||||
}
|
||||
}}
|
||||
>
|
||||
Submit
|
||||
</button>
|
||||
@@ -149,11 +152,46 @@ export default function ChatMessage({
|
||||
)}
|
||||
</summary>
|
||||
<div className="collapse-content">
|
||||
<MarkdownDisplay content={thought} />
|
||||
<MarkdownDisplay
|
||||
content={thought}
|
||||
isGenerating={isPending}
|
||||
/>
|
||||
</div>
|
||||
</details>
|
||||
)}
|
||||
<MarkdownDisplay content={content} />
|
||||
|
||||
{msg.extra && msg.extra.length > 0 && (
|
||||
<details
|
||||
className={classNames({
|
||||
'collapse collapse-arrow mb-4 bg-base-200': true,
|
||||
'bg-opacity-10': msg.role !== 'assistant',
|
||||
})}
|
||||
>
|
||||
<summary className="collapse-title">
|
||||
Extra content
|
||||
</summary>
|
||||
<div className="collapse-content">
|
||||
{msg.extra.map(
|
||||
(extra, i) =>
|
||||
extra.type === 'textFile' ? (
|
||||
<div key={extra.name}>
|
||||
<b>{extra.name}</b>
|
||||
<pre>{extra.content}</pre>
|
||||
</div>
|
||||
) : extra.type === 'context' ? (
|
||||
<div key={i}>
|
||||
<pre>{extra.content}</pre>
|
||||
</div>
|
||||
) : null // TODO: support other extra types
|
||||
)}
|
||||
</div>
|
||||
</details>
|
||||
)}
|
||||
|
||||
<MarkdownDisplay
|
||||
content={content}
|
||||
isGenerating={isPending}
|
||||
/>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
@@ -190,10 +228,35 @@ export default function ChatMessage({
|
||||
{msg.content !== null && (
|
||||
<div
|
||||
className={classNames({
|
||||
'mx-4 mt-2 mb-2': true,
|
||||
'text-right': msg.role === 'user',
|
||||
'flex items-center gap-2 mx-4 mt-2 mb-2': true,
|
||||
'flex-row-reverse': msg.role === 'user',
|
||||
})}
|
||||
>
|
||||
{siblingLeafNodeIds && siblingLeafNodeIds.length > 1 && (
|
||||
<div className="flex gap-1 items-center opacity-60 text-sm">
|
||||
<button
|
||||
className={classNames({
|
||||
'btn btn-sm btn-ghost p-1': true,
|
||||
'opacity-20': !prevSibling,
|
||||
})}
|
||||
onClick={() => prevSibling && onChangeSibling(prevSibling)}
|
||||
>
|
||||
<ChevronLeftIcon className="h-4 w-4" />
|
||||
</button>
|
||||
<span>
|
||||
{siblingCurrIdx + 1} / {siblingLeafNodeIds.length}
|
||||
</span>
|
||||
<button
|
||||
className={classNames({
|
||||
'btn btn-sm btn-ghost p-1': true,
|
||||
'opacity-20': !nextSibling,
|
||||
})}
|
||||
onClick={() => nextSibling && onChangeSibling(nextSibling)}
|
||||
>
|
||||
<ChevronRightIcon className="h-4 w-4" />
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
{/* user message */}
|
||||
{msg.role === 'user' && (
|
||||
<button
|
||||
@@ -210,18 +273,22 @@ export default function ChatMessage({
|
||||
{!isPending && (
|
||||
<button
|
||||
className="badge btn-mini show-on-hover mr-2"
|
||||
onClick={regenerate}
|
||||
onClick={() => {
|
||||
if (msg.content !== null) {
|
||||
onRegenerateMessage(msg as Message);
|
||||
}
|
||||
}}
|
||||
disabled={msg.content === null}
|
||||
>
|
||||
🔄 Regenerate
|
||||
</button>
|
||||
)}
|
||||
<CopyButton
|
||||
className="badge btn-mini show-on-hover mr-2"
|
||||
content={msg.content}
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
<CopyButton
|
||||
className="badge btn-mini show-on-hover mr-2"
|
||||
content={msg.content}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@@ -1,123 +1,265 @@
|
||||
import { useEffect, useRef, useState } from 'react';
|
||||
import { useAppContext } from '../utils/app.context';
|
||||
import StorageUtils from '../utils/storage';
|
||||
import { useNavigate } from 'react-router';
|
||||
import { useEffect, useMemo, useRef, useState } from 'react';
|
||||
import { CallbackGeneratedChunk, useAppContext } from '../utils/app.context';
|
||||
import ChatMessage from './ChatMessage';
|
||||
import { PendingMessage } from '../utils/types';
|
||||
import { CanvasType, Message, PendingMessage } from '../utils/types';
|
||||
import { classNames, throttle } from '../utils/misc';
|
||||
import CanvasPyInterpreter from './CanvasPyInterpreter';
|
||||
import StorageUtils from '../utils/storage';
|
||||
import { useVSCodeContext } from '../utils/llama-vscode';
|
||||
|
||||
/**
|
||||
* A message display is a message node with additional information for rendering.
|
||||
* For example, siblings of the message node are stored as their last node (aka leaf node).
|
||||
*/
|
||||
export interface MessageDisplay {
|
||||
msg: Message | PendingMessage;
|
||||
siblingLeafNodeIds: Message['id'][];
|
||||
siblingCurrIdx: number;
|
||||
isPending?: boolean;
|
||||
}
|
||||
|
||||
function getListMessageDisplay(
|
||||
msgs: Readonly<Message[]>,
|
||||
leafNodeId: Message['id']
|
||||
): MessageDisplay[] {
|
||||
const currNodes = StorageUtils.filterByLeafNodeId(msgs, leafNodeId, true);
|
||||
const res: MessageDisplay[] = [];
|
||||
const nodeMap = new Map<Message['id'], Message>();
|
||||
for (const msg of msgs) {
|
||||
nodeMap.set(msg.id, msg);
|
||||
}
|
||||
// find leaf node from a message node
|
||||
const findLeafNode = (msgId: Message['id']): Message['id'] => {
|
||||
let currNode: Message | undefined = nodeMap.get(msgId);
|
||||
while (currNode) {
|
||||
if (currNode.children.length === 0) break;
|
||||
currNode = nodeMap.get(currNode.children.at(-1) ?? -1);
|
||||
}
|
||||
return currNode?.id ?? -1;
|
||||
};
|
||||
// traverse the current nodes
|
||||
for (const msg of currNodes) {
|
||||
const parentNode = nodeMap.get(msg.parent ?? -1);
|
||||
if (!parentNode) continue;
|
||||
const siblings = parentNode.children;
|
||||
if (msg.type !== 'root') {
|
||||
res.push({
|
||||
msg,
|
||||
siblingLeafNodeIds: siblings.map(findLeafNode),
|
||||
siblingCurrIdx: siblings.indexOf(msg.id),
|
||||
});
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
const scrollToBottom = throttle(
|
||||
(requiresNearBottom: boolean, delay: number = 80) => {
|
||||
const mainScrollElem = document.getElementById('main-scroll');
|
||||
if (!mainScrollElem) return;
|
||||
const spaceToBottom =
|
||||
mainScrollElem.scrollHeight -
|
||||
mainScrollElem.scrollTop -
|
||||
mainScrollElem.clientHeight;
|
||||
if (!requiresNearBottom || spaceToBottom < 50) {
|
||||
setTimeout(
|
||||
() => mainScrollElem.scrollTo({ top: mainScrollElem.scrollHeight }),
|
||||
delay
|
||||
);
|
||||
}
|
||||
},
|
||||
80
|
||||
);
|
||||
|
||||
export default function ChatScreen() {
|
||||
const {
|
||||
viewingConversation,
|
||||
viewingChat,
|
||||
sendMessage,
|
||||
isGenerating,
|
||||
stopGenerating,
|
||||
pendingMessages,
|
||||
canvasData,
|
||||
replaceMessageAndGenerate,
|
||||
} = useAppContext();
|
||||
const [inputMsg, setInputMsg] = useState('');
|
||||
const containerRef = useRef<HTMLDivElement>(null);
|
||||
const navigate = useNavigate();
|
||||
const inputRef = useRef<HTMLTextAreaElement>(null);
|
||||
|
||||
const currConvId = viewingConversation?.id ?? '';
|
||||
const pendingMsg: PendingMessage | undefined = pendingMessages[currConvId];
|
||||
const { extraContext, clearExtraContext } = useVSCodeContext(
|
||||
inputRef,
|
||||
setInputMsg
|
||||
);
|
||||
// TODO: improve this when we have "upload file" feature
|
||||
const currExtra: Message['extra'] = extraContext ? [extraContext] : undefined;
|
||||
|
||||
const scrollToBottom = (requiresNearBottom: boolean) => {
|
||||
if (!containerRef.current) return;
|
||||
const msgListElem = containerRef.current;
|
||||
const spaceToBottom =
|
||||
msgListElem.scrollHeight -
|
||||
msgListElem.scrollTop -
|
||||
msgListElem.clientHeight;
|
||||
if (!requiresNearBottom || spaceToBottom < 50) {
|
||||
setTimeout(
|
||||
() => msgListElem.scrollTo({ top: msgListElem.scrollHeight }),
|
||||
1
|
||||
);
|
||||
// keep track of leaf node for rendering
|
||||
const [currNodeId, setCurrNodeId] = useState<number>(-1);
|
||||
const messages: MessageDisplay[] = useMemo(() => {
|
||||
if (!viewingChat) return [];
|
||||
else return getListMessageDisplay(viewingChat.messages, currNodeId);
|
||||
}, [currNodeId, viewingChat]);
|
||||
|
||||
const currConvId = viewingChat?.conv.id ?? null;
|
||||
const pendingMsg: PendingMessage | undefined =
|
||||
pendingMessages[currConvId ?? ''];
|
||||
|
||||
useEffect(() => {
|
||||
// reset to latest node when conversation changes
|
||||
setCurrNodeId(-1);
|
||||
// scroll to bottom when conversation changes
|
||||
scrollToBottom(false, 1);
|
||||
}, [currConvId]);
|
||||
|
||||
const onChunk: CallbackGeneratedChunk = (currLeafNodeId?: Message['id']) => {
|
||||
if (currLeafNodeId) {
|
||||
setCurrNodeId(currLeafNodeId);
|
||||
}
|
||||
scrollToBottom(true);
|
||||
};
|
||||
|
||||
// scroll to bottom when conversation changes
|
||||
useEffect(() => {
|
||||
scrollToBottom(false);
|
||||
}, [viewingConversation?.id]);
|
||||
|
||||
const sendNewMessage = async () => {
|
||||
if (inputMsg.trim().length === 0 || isGenerating(currConvId)) return;
|
||||
const convId = viewingConversation?.id ?? StorageUtils.getNewConvId();
|
||||
if (inputMsg.trim().length === 0 || isGenerating(currConvId ?? '')) return;
|
||||
const lastInpMsg = inputMsg;
|
||||
setInputMsg('');
|
||||
if (!viewingConversation) {
|
||||
// if user is creating a new conversation, redirect to the new conversation
|
||||
navigate(`/chat/${convId}`);
|
||||
}
|
||||
scrollToBottom(false);
|
||||
// auto scroll as message is being generated
|
||||
const onChunk = () => scrollToBottom(true);
|
||||
if (!(await sendMessage(convId, inputMsg, onChunk))) {
|
||||
setCurrNodeId(-1);
|
||||
// get the last message node
|
||||
const lastMsgNodeId = messages.at(-1)?.msg.id ?? null;
|
||||
if (
|
||||
!(await sendMessage(
|
||||
currConvId,
|
||||
lastMsgNodeId,
|
||||
inputMsg,
|
||||
currExtra,
|
||||
onChunk
|
||||
))
|
||||
) {
|
||||
// restore the input message if failed
|
||||
setInputMsg(lastInpMsg);
|
||||
}
|
||||
// OK
|
||||
clearExtraContext();
|
||||
};
|
||||
|
||||
const handleEditMessage = async (msg: Message, content: string) => {
|
||||
if (!viewingChat) return;
|
||||
setCurrNodeId(msg.id);
|
||||
scrollToBottom(false);
|
||||
await replaceMessageAndGenerate(
|
||||
viewingChat.conv.id,
|
||||
msg.parent,
|
||||
content,
|
||||
msg.extra,
|
||||
onChunk
|
||||
);
|
||||
setCurrNodeId(-1);
|
||||
scrollToBottom(false);
|
||||
};
|
||||
|
||||
const handleRegenerateMessage = async (msg: Message) => {
|
||||
if (!viewingChat) return;
|
||||
setCurrNodeId(msg.parent);
|
||||
scrollToBottom(false);
|
||||
await replaceMessageAndGenerate(
|
||||
viewingChat.conv.id,
|
||||
msg.parent,
|
||||
null,
|
||||
msg.extra,
|
||||
onChunk
|
||||
);
|
||||
setCurrNodeId(-1);
|
||||
scrollToBottom(false);
|
||||
};
|
||||
|
||||
const hasCanvas = !!canvasData;
|
||||
|
||||
// due to some timing issues of StorageUtils.appendMsg(), we need to make sure the pendingMsg is not duplicated upon rendering (i.e. appears once in the saved conversation and once in the pendingMsg)
|
||||
const pendingMsgDisplay: MessageDisplay[] =
|
||||
pendingMsg && messages.at(-1)?.msg.id !== pendingMsg.id
|
||||
? [
|
||||
{
|
||||
msg: pendingMsg,
|
||||
siblingLeafNodeIds: [],
|
||||
siblingCurrIdx: 0,
|
||||
isPending: true,
|
||||
},
|
||||
]
|
||||
: [];
|
||||
|
||||
return (
|
||||
<>
|
||||
{/* chat messages */}
|
||||
<div
|
||||
className={classNames({
|
||||
'grid lg:gap-8 grow transition-[300ms]': true,
|
||||
'grid-cols-[1fr_0fr] lg:grid-cols-[1fr_1fr]': hasCanvas, // adapted for mobile
|
||||
'grid-cols-[1fr_0fr]': !hasCanvas,
|
||||
})}
|
||||
>
|
||||
<div
|
||||
id="messages-list"
|
||||
className="flex flex-col grow overflow-y-auto"
|
||||
ref={containerRef}
|
||||
className={classNames({
|
||||
'flex flex-col w-full max-w-[900px] mx-auto': true,
|
||||
'hidden lg:flex': hasCanvas, // adapted for mobile
|
||||
flex: !hasCanvas,
|
||||
})}
|
||||
>
|
||||
<div className="mt-auto flex justify-center">
|
||||
{/* placeholder to shift the message to the bottom */}
|
||||
{viewingConversation ? '' : 'Send a message to start'}
|
||||
{/* chat messages */}
|
||||
<div id="messages-list" className="grow">
|
||||
<div className="mt-auto flex justify-center">
|
||||
{/* placeholder to shift the message to the bottom */}
|
||||
{viewingChat ? '' : 'Send a message to start'}
|
||||
</div>
|
||||
{[...messages, ...pendingMsgDisplay].map((msg) => (
|
||||
<ChatMessage
|
||||
key={msg.msg.id}
|
||||
msg={msg.msg}
|
||||
siblingLeafNodeIds={msg.siblingLeafNodeIds}
|
||||
siblingCurrIdx={msg.siblingCurrIdx}
|
||||
onRegenerateMessage={handleRegenerateMessage}
|
||||
onEditMessage={handleEditMessage}
|
||||
onChangeSibling={setCurrNodeId}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
{viewingConversation?.messages.map((msg) => (
|
||||
<ChatMessage key={msg.id} msg={msg} scrollToBottom={scrollToBottom} />
|
||||
))}
|
||||
|
||||
{pendingMsg && (
|
||||
<ChatMessage
|
||||
msg={pendingMsg}
|
||||
scrollToBottom={scrollToBottom}
|
||||
isPending
|
||||
id="pending-msg"
|
||||
/>
|
||||
{/* chat input */}
|
||||
<div className="flex flex-row items-center pt-8 pb-6 sticky bottom-0 bg-base-100">
|
||||
<textarea
|
||||
className="textarea textarea-bordered w-full"
|
||||
placeholder="Type a message (Shift+Enter to add a new line)"
|
||||
ref={inputRef}
|
||||
value={inputMsg}
|
||||
onChange={(e) => setInputMsg(e.target.value)}
|
||||
onKeyDown={(e) => {
|
||||
if (e.key === 'Enter' && e.shiftKey) return;
|
||||
if (e.key === 'Enter' && !e.shiftKey) {
|
||||
e.preventDefault();
|
||||
sendNewMessage();
|
||||
}
|
||||
}}
|
||||
id="msg-input"
|
||||
dir="auto"
|
||||
></textarea>
|
||||
{isGenerating(currConvId ?? '') ? (
|
||||
<button
|
||||
className="btn btn-neutral ml-2"
|
||||
onClick={() => stopGenerating(currConvId ?? '')}
|
||||
>
|
||||
Stop
|
||||
</button>
|
||||
) : (
|
||||
<button
|
||||
className="btn btn-primary ml-2"
|
||||
onClick={sendNewMessage}
|
||||
disabled={inputMsg.trim().length === 0}
|
||||
>
|
||||
Send
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
<div className="w-full sticky top-[7em] h-[calc(100vh-9em)]">
|
||||
{canvasData?.type === CanvasType.PY_INTERPRETER && (
|
||||
<CanvasPyInterpreter />
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* chat input */}
|
||||
<div className="flex flex-row items-center mt-8 mb-6">
|
||||
<textarea
|
||||
className="textarea textarea-bordered w-full"
|
||||
placeholder="Type a message (Shift+Enter to add a new line)"
|
||||
value={inputMsg}
|
||||
onChange={(e) => setInputMsg(e.target.value)}
|
||||
onKeyDown={(e) => {
|
||||
if (e.key === 'Enter' && e.shiftKey) return;
|
||||
if (e.key === 'Enter' && !e.shiftKey) {
|
||||
e.preventDefault();
|
||||
sendNewMessage();
|
||||
}
|
||||
}}
|
||||
id="msg-input"
|
||||
dir="auto"
|
||||
></textarea>
|
||||
{isGenerating(currConvId) ? (
|
||||
<button
|
||||
className="btn btn-neutral ml-2"
|
||||
onClick={() => stopGenerating(currConvId)}
|
||||
>
|
||||
Stop
|
||||
</button>
|
||||
) : (
|
||||
<button
|
||||
className="btn btn-primary ml-2"
|
||||
onClick={sendNewMessage}
|
||||
disabled={inputMsg.trim().length === 0}
|
||||
>
|
||||
Send
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -5,12 +5,11 @@ import { classNames } from '../utils/misc';
|
||||
import daisyuiThemes from 'daisyui/src/theming/themes';
|
||||
import { THEMES } from '../Config';
|
||||
import { useNavigate } from 'react-router';
|
||||
import SettingDialog from './SettingDialog';
|
||||
|
||||
export default function Header() {
|
||||
const navigate = useNavigate();
|
||||
const [selectedTheme, setSelectedTheme] = useState(StorageUtils.getTheme());
|
||||
const [showSettingDialog, setShowSettingDialog] = useState(false);
|
||||
const { setShowSettings } = useAppContext();
|
||||
|
||||
const setTheme = (theme: string) => {
|
||||
StorageUtils.setTheme(theme);
|
||||
@@ -26,12 +25,12 @@ export default function Header() {
|
||||
);
|
||||
}, [selectedTheme]);
|
||||
|
||||
const { isGenerating, viewingConversation } = useAppContext();
|
||||
const isCurrConvGenerating = isGenerating(viewingConversation?.id ?? '');
|
||||
const { isGenerating, viewingChat } = useAppContext();
|
||||
const isCurrConvGenerating = isGenerating(viewingChat?.conv.id ?? '');
|
||||
|
||||
const removeConversation = () => {
|
||||
if (isCurrConvGenerating || !viewingConversation) return;
|
||||
const convId = viewingConversation.id;
|
||||
if (isCurrConvGenerating || !viewingChat) return;
|
||||
const convId = viewingChat?.conv.id;
|
||||
if (window.confirm('Are you sure to delete this conversation?')) {
|
||||
StorageUtils.remove(convId);
|
||||
navigate('/');
|
||||
@@ -39,9 +38,9 @@ export default function Header() {
|
||||
};
|
||||
|
||||
const downloadConversation = () => {
|
||||
if (isCurrConvGenerating || !viewingConversation) return;
|
||||
const convId = viewingConversation.id;
|
||||
const conversationJson = JSON.stringify(viewingConversation, null, 2);
|
||||
if (isCurrConvGenerating || !viewingChat) return;
|
||||
const convId = viewingChat?.conv.id;
|
||||
const conversationJson = JSON.stringify(viewingChat, null, 2);
|
||||
const blob = new Blob([conversationJson], { type: 'application/json' });
|
||||
const url = URL.createObjectURL(blob);
|
||||
const a = document.createElement('a');
|
||||
@@ -54,7 +53,7 @@ export default function Header() {
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="flex flex-row items-center mt-6 mb-6">
|
||||
<div className="flex flex-row items-center pt-6 pb-6 sticky top-0 z-10 bg-base-100">
|
||||
{/* open sidebar button */}
|
||||
<label htmlFor="toggle-drawer" className="btn btn-ghost lg:hidden">
|
||||
<svg
|
||||
@@ -76,40 +75,43 @@ export default function Header() {
|
||||
|
||||
{/* action buttons (top right) */}
|
||||
<div className="flex items-center">
|
||||
<div v-if="messages.length > 0" className="dropdown dropdown-end">
|
||||
{/* "..." button */}
|
||||
<button
|
||||
tabIndex={0}
|
||||
role="button"
|
||||
className="btn m-1"
|
||||
disabled={isCurrConvGenerating}
|
||||
>
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
width="16"
|
||||
height="16"
|
||||
fill="currentColor"
|
||||
className="bi bi-three-dots-vertical"
|
||||
viewBox="0 0 16 16"
|
||||
{viewingChat && (
|
||||
<div className="dropdown dropdown-end">
|
||||
{/* "..." button */}
|
||||
<button
|
||||
tabIndex={0}
|
||||
role="button"
|
||||
className="btn m-1"
|
||||
disabled={isCurrConvGenerating}
|
||||
>
|
||||
<path d="M9.5 13a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0m0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0m0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0" />
|
||||
</svg>
|
||||
</button>
|
||||
{/* dropdown menu */}
|
||||
<ul
|
||||
tabIndex={0}
|
||||
className="dropdown-content menu bg-base-100 rounded-box z-[1] w-52 p-2 shadow"
|
||||
>
|
||||
<li onClick={downloadConversation}>
|
||||
<a>Download</a>
|
||||
</li>
|
||||
<li className="text-error" onClick={removeConversation}>
|
||||
<a>Delete</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
width="16"
|
||||
height="16"
|
||||
fill="currentColor"
|
||||
className="bi bi-three-dots-vertical"
|
||||
viewBox="0 0 16 16"
|
||||
>
|
||||
<path d="M9.5 13a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0m0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0m0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0" />
|
||||
</svg>
|
||||
</button>
|
||||
{/* dropdown menu */}
|
||||
<ul
|
||||
tabIndex={0}
|
||||
className="dropdown-content menu bg-base-100 rounded-box z-[1] w-52 p-2 shadow"
|
||||
>
|
||||
<li onClick={downloadConversation}>
|
||||
<a>Download</a>
|
||||
</li>
|
||||
<li className="text-error" onClick={removeConversation}>
|
||||
<a>Delete</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="tooltip tooltip-bottom" data-tip="Settings">
|
||||
<button className="btn" onClick={() => setShowSettingDialog(true)}>
|
||||
<button className="btn" onClick={() => setShowSettings(true)}>
|
||||
{/* settings button */}
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
@@ -172,11 +174,6 @@ export default function Header() {
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<SettingDialog
|
||||
show={showSettingDialog}
|
||||
onClose={() => setShowSettingDialog(false)}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -9,8 +9,16 @@ import 'katex/dist/katex.min.css';
|
||||
import { classNames, copyStr } from '../utils/misc';
|
||||
import { ElementContent, Root } from 'hast';
|
||||
import { visit } from 'unist-util-visit';
|
||||
import { useAppContext } from '../utils/app.context';
|
||||
import { CanvasType } from '../utils/types';
|
||||
|
||||
export default function MarkdownDisplay({ content }: { content: string }) {
|
||||
export default function MarkdownDisplay({
|
||||
content,
|
||||
isGenerating,
|
||||
}: {
|
||||
content: string;
|
||||
isGenerating?: boolean;
|
||||
}) {
|
||||
const preprocessedContent = useMemo(
|
||||
() => preprocessLaTeX(content),
|
||||
[content]
|
||||
@@ -21,8 +29,13 @@ export default function MarkdownDisplay({ content }: { content: string }) {
|
||||
rehypePlugins={[rehypeHightlight, rehypeKatex, rehypeCustomCopyButton]}
|
||||
components={{
|
||||
button: (props) => (
|
||||
<CopyCodeButton {...props} origContent={preprocessedContent} />
|
||||
<CodeBlockButtons
|
||||
{...props}
|
||||
isGenerating={isGenerating}
|
||||
origContent={preprocessedContent}
|
||||
/>
|
||||
),
|
||||
// note: do not use "pre", "p" or other basic html elements here, it will cause the node to re-render when the message is being generated (this should be a bug with react-markdown, not sure how to fix it)
|
||||
}}
|
||||
>
|
||||
{preprocessedContent}
|
||||
@@ -30,11 +43,12 @@ export default function MarkdownDisplay({ content }: { content: string }) {
|
||||
);
|
||||
}
|
||||
|
||||
const CopyCodeButton: React.ElementType<
|
||||
const CodeBlockButtons: React.ElementType<
|
||||
React.ClassAttributes<HTMLButtonElement> &
|
||||
React.HTMLAttributes<HTMLButtonElement> &
|
||||
ExtraProps & { origContent: string }
|
||||
> = ({ node, origContent }) => {
|
||||
ExtraProps & { origContent: string; isGenerating?: boolean }
|
||||
> = ({ node, origContent, isGenerating }) => {
|
||||
const { config } = useAppContext();
|
||||
const startOffset = node?.position?.start.offset ?? 0;
|
||||
const endOffset = node?.position?.end.offset ?? 0;
|
||||
|
||||
@@ -47,14 +61,33 @@ const CopyCodeButton: React.ElementType<
|
||||
[origContent, startOffset, endOffset]
|
||||
);
|
||||
|
||||
const codeLanguage = useMemo(
|
||||
() =>
|
||||
origContent
|
||||
.substring(startOffset, startOffset + 10)
|
||||
.match(/^```([^\n]+)\n/)?.[1] ?? '',
|
||||
[origContent, startOffset]
|
||||
);
|
||||
|
||||
const canRunCode =
|
||||
!isGenerating &&
|
||||
config.pyIntepreterEnabled &&
|
||||
codeLanguage.startsWith('py');
|
||||
|
||||
return (
|
||||
<div
|
||||
className={classNames({
|
||||
'text-right sticky top-4 mb-2 mr-2 h-0': true,
|
||||
'text-right sticky top-[7em] mb-2 mr-2 h-0': true,
|
||||
'display-none': !node?.position,
|
||||
})}
|
||||
>
|
||||
<CopyButton className="badge btn-mini" content={copiedContent} />
|
||||
{canRunCode && (
|
||||
<RunPyCodeButton
|
||||
className="badge btn-mini ml-2"
|
||||
content={copiedContent}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -81,6 +114,31 @@ export const CopyButton = ({
|
||||
);
|
||||
};
|
||||
|
||||
export const RunPyCodeButton = ({
|
||||
content,
|
||||
className,
|
||||
}: {
|
||||
content: string;
|
||||
className?: string;
|
||||
}) => {
|
||||
const { setCanvasData } = useAppContext();
|
||||
return (
|
||||
<>
|
||||
<button
|
||||
className={className}
|
||||
onClick={() =>
|
||||
setCanvasData({
|
||||
type: CanvasType.PY_INTERPRETER,
|
||||
content,
|
||||
})
|
||||
}
|
||||
>
|
||||
▶️ Run
|
||||
</button>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* This injects the "button" element before each "pre" element.
|
||||
* The actual button will be replaced with a react component in the MarkdownDisplay.
|
||||
@@ -94,9 +152,7 @@ function rehypeCustomCopyButton() {
|
||||
// replace current node
|
||||
preNode.properties.visited = 'true';
|
||||
node.tagName = 'div';
|
||||
node.properties = {
|
||||
className: 'relative my-4',
|
||||
};
|
||||
node.properties = {};
|
||||
// add node for button
|
||||
const btnNode: ElementContent = {
|
||||
type: 'element',
|
||||
|
||||
@@ -3,17 +3,27 @@ import { useAppContext } from '../utils/app.context';
|
||||
import { CONFIG_DEFAULT, CONFIG_INFO } from '../Config';
|
||||
import { isDev } from '../Config';
|
||||
import StorageUtils from '../utils/storage';
|
||||
import { classNames, isBoolean, isNumeric, isString } from '../utils/misc';
|
||||
import {
|
||||
BeakerIcon,
|
||||
ChatBubbleOvalLeftEllipsisIcon,
|
||||
Cog6ToothIcon,
|
||||
FunnelIcon,
|
||||
HandRaisedIcon,
|
||||
SquaresPlusIcon,
|
||||
} from '@heroicons/react/24/outline';
|
||||
import { OpenInNewTab } from '../utils/common';
|
||||
|
||||
type SettKey = keyof typeof CONFIG_DEFAULT;
|
||||
|
||||
const COMMON_SAMPLER_KEYS: SettKey[] = [
|
||||
const BASIC_KEYS: SettKey[] = [
|
||||
'temperature',
|
||||
'top_k',
|
||||
'top_p',
|
||||
'min_p',
|
||||
'max_tokens',
|
||||
];
|
||||
const OTHER_SAMPLER_KEYS: SettKey[] = [
|
||||
const SAMPLER_KEYS: SettKey[] = [
|
||||
'dynatemp_range',
|
||||
'dynatemp_exponent',
|
||||
'typical_p',
|
||||
@@ -31,6 +41,223 @@ const PENALTY_KEYS: SettKey[] = [
|
||||
'dry_penalty_last_n',
|
||||
];
|
||||
|
||||
enum SettingInputType {
|
||||
SHORT_INPUT,
|
||||
LONG_INPUT,
|
||||
CHECKBOX,
|
||||
CUSTOM,
|
||||
}
|
||||
|
||||
interface SettingFieldInput {
|
||||
type: Exclude<SettingInputType, SettingInputType.CUSTOM>;
|
||||
label: string | React.ReactElement;
|
||||
help?: string | React.ReactElement;
|
||||
key: SettKey;
|
||||
}
|
||||
|
||||
interface SettingFieldCustom {
|
||||
type: SettingInputType.CUSTOM;
|
||||
key: SettKey;
|
||||
component:
|
||||
| string
|
||||
| React.FC<{
|
||||
value: string | boolean | number;
|
||||
onChange: (value: string) => void;
|
||||
}>;
|
||||
}
|
||||
|
||||
interface SettingSection {
|
||||
title: React.ReactElement;
|
||||
fields: (SettingFieldInput | SettingFieldCustom)[];
|
||||
}
|
||||
|
||||
const ICON_CLASSNAME = 'w-4 h-4 mr-1 inline';
|
||||
|
||||
const SETTING_SECTIONS: SettingSection[] = [
|
||||
{
|
||||
title: (
|
||||
<>
|
||||
<Cog6ToothIcon className={ICON_CLASSNAME} />
|
||||
General
|
||||
</>
|
||||
),
|
||||
fields: [
|
||||
{
|
||||
type: SettingInputType.SHORT_INPUT,
|
||||
label: 'API Key',
|
||||
key: 'apiKey',
|
||||
},
|
||||
{
|
||||
type: SettingInputType.LONG_INPUT,
|
||||
label: 'System Message (will be disabled if left empty)',
|
||||
key: 'systemMessage',
|
||||
},
|
||||
...BASIC_KEYS.map(
|
||||
(key) =>
|
||||
({
|
||||
type: SettingInputType.SHORT_INPUT,
|
||||
label: key,
|
||||
key,
|
||||
}) as SettingFieldInput
|
||||
),
|
||||
],
|
||||
},
|
||||
{
|
||||
title: (
|
||||
<>
|
||||
<FunnelIcon className={ICON_CLASSNAME} />
|
||||
Samplers
|
||||
</>
|
||||
),
|
||||
fields: [
|
||||
{
|
||||
type: SettingInputType.SHORT_INPUT,
|
||||
label: 'Samplers queue',
|
||||
key: 'samplers',
|
||||
},
|
||||
...SAMPLER_KEYS.map(
|
||||
(key) =>
|
||||
({
|
||||
type: SettingInputType.SHORT_INPUT,
|
||||
label: key,
|
||||
key,
|
||||
}) as SettingFieldInput
|
||||
),
|
||||
],
|
||||
},
|
||||
{
|
||||
title: (
|
||||
<>
|
||||
<HandRaisedIcon className={ICON_CLASSNAME} />
|
||||
Penalties
|
||||
</>
|
||||
),
|
||||
fields: PENALTY_KEYS.map((key) => ({
|
||||
type: SettingInputType.SHORT_INPUT,
|
||||
label: key,
|
||||
key,
|
||||
})),
|
||||
},
|
||||
{
|
||||
title: (
|
||||
<>
|
||||
<ChatBubbleOvalLeftEllipsisIcon className={ICON_CLASSNAME} />
|
||||
Reasoning
|
||||
</>
|
||||
),
|
||||
fields: [
|
||||
{
|
||||
type: SettingInputType.CHECKBOX,
|
||||
label: 'Expand though process by default for generating message',
|
||||
key: 'showThoughtInProgress',
|
||||
},
|
||||
{
|
||||
type: SettingInputType.CHECKBOX,
|
||||
label:
|
||||
'Exclude thought process when sending request to API (Recommended for DeepSeek-R1)',
|
||||
key: 'excludeThoughtOnReq',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
title: (
|
||||
<>
|
||||
<SquaresPlusIcon className={ICON_CLASSNAME} />
|
||||
Advanced
|
||||
</>
|
||||
),
|
||||
fields: [
|
||||
{
|
||||
type: SettingInputType.CUSTOM,
|
||||
key: 'custom', // dummy key, won't be used
|
||||
component: () => {
|
||||
const debugImportDemoConv = async () => {
|
||||
const res = await fetch('/demo-conversation.json');
|
||||
const demoConv = await res.json();
|
||||
StorageUtils.remove(demoConv.id);
|
||||
for (const msg of demoConv.messages) {
|
||||
StorageUtils.appendMsg(demoConv.id, msg);
|
||||
}
|
||||
};
|
||||
return (
|
||||
<button className="btn" onClick={debugImportDemoConv}>
|
||||
(debug) Import demo conversation
|
||||
</button>
|
||||
);
|
||||
},
|
||||
},
|
||||
{
|
||||
type: SettingInputType.CHECKBOX,
|
||||
label: 'Show tokens per second',
|
||||
key: 'showTokensPerSecond',
|
||||
},
|
||||
{
|
||||
type: SettingInputType.LONG_INPUT,
|
||||
label: (
|
||||
<>
|
||||
Custom JSON config (For more info, refer to{' '}
|
||||
<OpenInNewTab href="https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md">
|
||||
server documentation
|
||||
</OpenInNewTab>
|
||||
)
|
||||
</>
|
||||
),
|
||||
key: 'custom',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
title: (
|
||||
<>
|
||||
<BeakerIcon className={ICON_CLASSNAME} />
|
||||
Experimental
|
||||
</>
|
||||
),
|
||||
fields: [
|
||||
{
|
||||
type: SettingInputType.CUSTOM,
|
||||
key: 'custom', // dummy key, won't be used
|
||||
component: () => (
|
||||
<>
|
||||
<p className="mb-8">
|
||||
Experimental features are not guaranteed to work correctly.
|
||||
<br />
|
||||
<br />
|
||||
If you encounter any problems, create a{' '}
|
||||
<OpenInNewTab href="https://github.com/ggerganov/llama.cpp/issues/new?template=019-bug-misc.yml">
|
||||
Bug (misc.)
|
||||
</OpenInNewTab>{' '}
|
||||
report on Github. Please also specify <b>webui/experimental</b> on
|
||||
the report title and include screenshots.
|
||||
<br />
|
||||
<br />
|
||||
Some features may require packages downloaded from CDN, so they
|
||||
need internet connection.
|
||||
</p>
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
type: SettingInputType.CHECKBOX,
|
||||
label: (
|
||||
<>
|
||||
<b>Enable Python interpreter</b>
|
||||
<br />
|
||||
<small className="text-xs">
|
||||
This feature uses{' '}
|
||||
<OpenInNewTab href="https://pyodide.org">pyodide</OpenInNewTab>,
|
||||
downloaded from CDN. To use this feature, ask the LLM to generate
|
||||
python code inside a markdown code block. You will see a "Run"
|
||||
button on the code block, near the "Copy" button.
|
||||
</small>
|
||||
</>
|
||||
),
|
||||
key: 'pyIntepreterEnabled',
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
export default function SettingDialog({
|
||||
show,
|
||||
onClose,
|
||||
@@ -39,6 +266,7 @@ export default function SettingDialog({
|
||||
onClose: () => void;
|
||||
}) {
|
||||
const { config, saveConfig } = useAppContext();
|
||||
const [sectionIdx, setSectionIdx] = useState(0);
|
||||
|
||||
// clone the config object to prevent direct mutation
|
||||
const [localConfig, setLocalConfig] = useState<typeof CONFIG_DEFAULT>(
|
||||
@@ -52,206 +280,148 @@ export default function SettingDialog({
|
||||
};
|
||||
|
||||
const handleSave = () => {
|
||||
saveConfig(localConfig);
|
||||
// copy the local config to prevent direct mutation
|
||||
const newConfig: typeof CONFIG_DEFAULT = JSON.parse(
|
||||
JSON.stringify(localConfig)
|
||||
);
|
||||
// validate the config
|
||||
for (const key in newConfig) {
|
||||
const value = newConfig[key as SettKey];
|
||||
const mustBeBoolean = isBoolean(CONFIG_DEFAULT[key as SettKey]);
|
||||
const mustBeString = isString(CONFIG_DEFAULT[key as SettKey]);
|
||||
const mustBeNumeric = isNumeric(CONFIG_DEFAULT[key as SettKey]);
|
||||
if (mustBeString) {
|
||||
if (!isString(value)) {
|
||||
alert(`Value for ${key} must be string`);
|
||||
return;
|
||||
}
|
||||
} else if (mustBeNumeric) {
|
||||
const trimedValue = value.toString().trim();
|
||||
const numVal = Number(trimedValue);
|
||||
if (isNaN(numVal) || !isNumeric(numVal) || trimedValue.length === 0) {
|
||||
alert(`Value for ${key} must be numeric`);
|
||||
return;
|
||||
}
|
||||
// force conversion to number
|
||||
// @ts-expect-error this is safe
|
||||
newConfig[key] = numVal;
|
||||
} else if (mustBeBoolean) {
|
||||
if (!isBoolean(value)) {
|
||||
alert(`Value for ${key} must be boolean`);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
console.error(`Unknown default type for key ${key}`);
|
||||
}
|
||||
}
|
||||
if (isDev) console.log('Saving config', newConfig);
|
||||
saveConfig(newConfig);
|
||||
onClose();
|
||||
};
|
||||
|
||||
const debugImportDemoConv = async () => {
|
||||
const res = await fetch('/demo-conversation.json');
|
||||
const demoConv = await res.json();
|
||||
StorageUtils.remove(demoConv.id);
|
||||
for (const msg of demoConv.messages) {
|
||||
StorageUtils.appendMsg(demoConv.id, msg);
|
||||
}
|
||||
onClose();
|
||||
const onChange = (key: SettKey) => (value: string | boolean) => {
|
||||
// note: we do not perform validation here, because we may get incomplete value as user is still typing it
|
||||
setLocalConfig({ ...localConfig, [key]: value });
|
||||
};
|
||||
|
||||
return (
|
||||
<dialog className={`modal ${show ? 'modal-open' : ''}`}>
|
||||
<div className="modal-box">
|
||||
<dialog className={classNames({ modal: true, 'modal-open': show })}>
|
||||
<div className="modal-box w-11/12 max-w-3xl">
|
||||
<h3 className="text-lg font-bold mb-6">Settings</h3>
|
||||
<div className="h-[calc(90vh-12rem)] overflow-y-auto">
|
||||
<p className="opacity-40 mb-6">
|
||||
Settings below are saved in browser's localStorage
|
||||
</p>
|
||||
|
||||
<SettingsModalShortInput
|
||||
configKey="apiKey"
|
||||
configDefault={CONFIG_DEFAULT}
|
||||
value={localConfig.apiKey}
|
||||
onChange={(value) =>
|
||||
setLocalConfig({ ...localConfig, apiKey: value })
|
||||
}
|
||||
/>
|
||||
|
||||
<label className="form-control mb-2">
|
||||
<div className="label">
|
||||
System Message (will be disabled if left empty)
|
||||
</div>
|
||||
<textarea
|
||||
className="textarea textarea-bordered h-24"
|
||||
placeholder={`Default: ${CONFIG_DEFAULT.systemMessage}`}
|
||||
value={localConfig.systemMessage}
|
||||
onChange={(e) =>
|
||||
setLocalConfig({
|
||||
...localConfig,
|
||||
systemMessage: e.target.value,
|
||||
})
|
||||
}
|
||||
/>
|
||||
</label>
|
||||
|
||||
{COMMON_SAMPLER_KEYS.map((key) => (
|
||||
<SettingsModalShortInput
|
||||
key={key}
|
||||
configKey={key}
|
||||
configDefault={CONFIG_DEFAULT}
|
||||
value={localConfig[key]}
|
||||
onChange={(value) =>
|
||||
setLocalConfig({ ...localConfig, [key]: value })
|
||||
}
|
||||
/>
|
||||
))}
|
||||
|
||||
<details className="collapse collapse-arrow bg-base-200 mb-2 overflow-visible">
|
||||
<summary className="collapse-title font-bold">
|
||||
Other sampler settings
|
||||
</summary>
|
||||
<div className="collapse-content">
|
||||
<SettingsModalShortInput
|
||||
label="Samplers queue"
|
||||
configKey="samplers"
|
||||
configDefault={CONFIG_DEFAULT}
|
||||
value={localConfig.samplers}
|
||||
onChange={(value) =>
|
||||
setLocalConfig({ ...localConfig, samplers: value })
|
||||
}
|
||||
/>
|
||||
{OTHER_SAMPLER_KEYS.map((key) => (
|
||||
<SettingsModalShortInput
|
||||
key={key}
|
||||
configKey={key}
|
||||
configDefault={CONFIG_DEFAULT}
|
||||
value={localConfig[key]}
|
||||
onChange={(value) =>
|
||||
setLocalConfig({ ...localConfig, [key]: value })
|
||||
}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
</details>
|
||||
|
||||
<details className="collapse collapse-arrow bg-base-200 mb-2 overflow-visible">
|
||||
<summary className="collapse-title font-bold">
|
||||
Penalties settings
|
||||
</summary>
|
||||
<div className="collapse-content">
|
||||
{PENALTY_KEYS.map((key) => (
|
||||
<SettingsModalShortInput
|
||||
key={key}
|
||||
configKey={key}
|
||||
configDefault={CONFIG_DEFAULT}
|
||||
value={localConfig[key]}
|
||||
onChange={(value) =>
|
||||
setLocalConfig({ ...localConfig, [key]: value })
|
||||
}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
</details>
|
||||
|
||||
<details className="collapse collapse-arrow bg-base-200 mb-2 overflow-visible">
|
||||
<summary className="collapse-title font-bold">
|
||||
Reasoning models
|
||||
</summary>
|
||||
<div className="collapse-content">
|
||||
<div className="flex flex-row items-center mb-2">
|
||||
<input
|
||||
type="checkbox"
|
||||
className="checkbox"
|
||||
checked={localConfig.showThoughtInProgress}
|
||||
onChange={(e) =>
|
||||
setLocalConfig({
|
||||
...localConfig,
|
||||
showThoughtInProgress: e.target.checked,
|
||||
})
|
||||
}
|
||||
/>
|
||||
<span className="ml-4">
|
||||
Expand though process by default for generating message
|
||||
</span>
|
||||
<div className="flex flex-col md:flex-row h-[calc(90vh-12rem)]">
|
||||
{/* Left panel, showing sections - Desktop version */}
|
||||
<div className="hidden md:flex flex-col items-stretch pr-4 mr-4 border-r-2 border-base-200">
|
||||
{SETTING_SECTIONS.map((section, idx) => (
|
||||
<div
|
||||
key={idx}
|
||||
className={classNames({
|
||||
'btn btn-ghost justify-start font-normal w-44 mb-1': true,
|
||||
'btn-active': sectionIdx === idx,
|
||||
})}
|
||||
onClick={() => setSectionIdx(idx)}
|
||||
dir="auto"
|
||||
>
|
||||
{section.title}
|
||||
</div>
|
||||
<div className="flex flex-row items-center mb-2">
|
||||
<input
|
||||
type="checkbox"
|
||||
className="checkbox"
|
||||
checked={localConfig.excludeThoughtOnReq}
|
||||
onChange={(e) =>
|
||||
setLocalConfig({
|
||||
...localConfig,
|
||||
excludeThoughtOnReq: e.target.checked,
|
||||
})
|
||||
}
|
||||
/>
|
||||
<span className="ml-4">
|
||||
Exclude thought process when sending request to API
|
||||
(Recommended for DeepSeek-R1)
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</details>
|
||||
))}
|
||||
</div>
|
||||
|
||||
<details className="collapse collapse-arrow bg-base-200 mb-2 overflow-visible">
|
||||
<summary className="collapse-title font-bold">
|
||||
Advanced config
|
||||
</summary>
|
||||
<div className="collapse-content">
|
||||
{/* this button only shows in dev mode, used to import a demo conversation to test message rendering */}
|
||||
{isDev && (
|
||||
<div className="flex flex-row items-center mb-2">
|
||||
<button className="btn" onClick={debugImportDemoConv}>
|
||||
(debug) Import demo conversation
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
<div className="flex flex-row items-center mb-2">
|
||||
<input
|
||||
type="checkbox"
|
||||
className="checkbox"
|
||||
checked={localConfig.showTokensPerSecond}
|
||||
onChange={(e) =>
|
||||
setLocalConfig({
|
||||
...localConfig,
|
||||
showTokensPerSecond: e.target.checked,
|
||||
})
|
||||
}
|
||||
/>
|
||||
<span className="ml-4">Show tokens per second</span>
|
||||
</div>
|
||||
<label className="form-control mb-2">
|
||||
<div className="label inline">
|
||||
Custom JSON config (For more info, refer to{' '}
|
||||
<a
|
||||
className="underline"
|
||||
href="https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md"
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
{/* Left panel, showing sections - Mobile version */}
|
||||
<div className="md:hidden flex flex-row gap-2 mb-4">
|
||||
<details className="dropdown">
|
||||
<summary className="btn bt-sm w-full m-1">
|
||||
{SETTING_SECTIONS[sectionIdx].title}
|
||||
</summary>
|
||||
<ul className="menu dropdown-content bg-base-100 rounded-box z-[1] w-52 p-2 shadow">
|
||||
{SETTING_SECTIONS.map((section, idx) => (
|
||||
<div
|
||||
key={idx}
|
||||
className={classNames({
|
||||
'btn btn-ghost justify-start font-normal': true,
|
||||
'btn-active': sectionIdx === idx,
|
||||
})}
|
||||
onClick={() => setSectionIdx(idx)}
|
||||
dir="auto"
|
||||
>
|
||||
server documentation
|
||||
</a>
|
||||
)
|
||||
</div>
|
||||
<textarea
|
||||
className="textarea textarea-bordered h-24"
|
||||
placeholder='Example: { "mirostat": 1, "min_p": 0.1 }'
|
||||
value={localConfig.custom}
|
||||
onChange={(e) =>
|
||||
setLocalConfig({ ...localConfig, custom: e.target.value })
|
||||
}
|
||||
/>
|
||||
</label>
|
||||
</div>
|
||||
</details>
|
||||
{section.title}
|
||||
</div>
|
||||
))}
|
||||
</ul>
|
||||
</details>
|
||||
</div>
|
||||
|
||||
{/* Right panel, showing setting fields */}
|
||||
<div className="grow overflow-y-auto px-4">
|
||||
{SETTING_SECTIONS[sectionIdx].fields.map((field, idx) => {
|
||||
const key = `${sectionIdx}-${idx}`;
|
||||
if (field.type === SettingInputType.SHORT_INPUT) {
|
||||
return (
|
||||
<SettingsModalShortInput
|
||||
key={key}
|
||||
configKey={field.key}
|
||||
value={localConfig[field.key]}
|
||||
onChange={onChange(field.key)}
|
||||
label={field.label as string}
|
||||
/>
|
||||
);
|
||||
} else if (field.type === SettingInputType.LONG_INPUT) {
|
||||
return (
|
||||
<SettingsModalLongInput
|
||||
key={key}
|
||||
configKey={field.key}
|
||||
value={localConfig[field.key].toString()}
|
||||
onChange={onChange(field.key)}
|
||||
label={field.label as string}
|
||||
/>
|
||||
);
|
||||
} else if (field.type === SettingInputType.CHECKBOX) {
|
||||
return (
|
||||
<SettingsModalCheckbox
|
||||
key={key}
|
||||
configKey={field.key}
|
||||
value={!!localConfig[field.key]}
|
||||
onChange={onChange(field.key)}
|
||||
label={field.label as string}
|
||||
/>
|
||||
);
|
||||
} else if (field.type === SettingInputType.CUSTOM) {
|
||||
return (
|
||||
<div key={key} className="mb-2">
|
||||
{typeof field.component === 'string'
|
||||
? field.component
|
||||
: field.component({
|
||||
value: localConfig[field.key],
|
||||
onChange: onChange(field.key),
|
||||
})}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
})}
|
||||
|
||||
<p className="opacity-40 mb-6 text-sm mt-8">
|
||||
Settings are saved in browser's localStorage
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="modal-action">
|
||||
@@ -270,37 +440,97 @@ export default function SettingDialog({
|
||||
);
|
||||
}
|
||||
|
||||
function SettingsModalShortInput({
|
||||
function SettingsModalLongInput({
|
||||
configKey,
|
||||
configDefault,
|
||||
value,
|
||||
onChange,
|
||||
label,
|
||||
}: {
|
||||
configKey: SettKey;
|
||||
configDefault: typeof CONFIG_DEFAULT;
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
value: any;
|
||||
value: string;
|
||||
onChange: (value: string) => void;
|
||||
label?: string;
|
||||
}) {
|
||||
return (
|
||||
<label className="input input-bordered join-item grow flex items-center gap-2 mb-2">
|
||||
<div className="dropdown dropdown-hover">
|
||||
<div tabIndex={0} role="button" className="font-bold">
|
||||
{label || configKey}
|
||||
</div>
|
||||
<div className="dropdown-content menu bg-base-100 rounded-box z-10 w-64 p-2 shadow mt-4">
|
||||
{CONFIG_INFO[configKey] ?? '(no help message available)'}
|
||||
</div>
|
||||
</div>
|
||||
<input
|
||||
type="text"
|
||||
className="grow"
|
||||
placeholder={`Default: ${configDefault[configKey] || 'none'}`}
|
||||
<label className="form-control mb-2">
|
||||
<div className="label inline">{label || configKey}</div>
|
||||
<textarea
|
||||
className="textarea textarea-bordered h-24"
|
||||
placeholder={`Default: ${CONFIG_DEFAULT[configKey] || 'none'}`}
|
||||
value={value}
|
||||
onChange={(e) => onChange(e.target.value)}
|
||||
/>
|
||||
</label>
|
||||
);
|
||||
}
|
||||
|
||||
function SettingsModalShortInput({
|
||||
configKey,
|
||||
value,
|
||||
onChange,
|
||||
label,
|
||||
}: {
|
||||
configKey: SettKey;
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
value: any;
|
||||
onChange: (value: string) => void;
|
||||
label?: string;
|
||||
}) {
|
||||
const helpMsg = CONFIG_INFO[configKey];
|
||||
|
||||
return (
|
||||
<>
|
||||
{/* on mobile, we simply show the help message here */}
|
||||
{helpMsg && (
|
||||
<div className="block md:hidden mb-1">
|
||||
<b>{label || configKey}</b>
|
||||
<br />
|
||||
<p className="text-xs">{helpMsg}</p>
|
||||
</div>
|
||||
)}
|
||||
<label className="input input-bordered join-item grow flex items-center gap-2 mb-2">
|
||||
<div className="dropdown dropdown-hover">
|
||||
<div tabIndex={0} role="button" className="font-bold hidden md:block">
|
||||
{label || configKey}
|
||||
</div>
|
||||
{helpMsg && (
|
||||
<div className="dropdown-content menu bg-base-100 rounded-box z-10 w-64 p-2 shadow mt-4">
|
||||
{helpMsg}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
<input
|
||||
type="text"
|
||||
className="grow"
|
||||
placeholder={`Default: ${CONFIG_DEFAULT[configKey] || 'none'}`}
|
||||
value={value}
|
||||
onChange={(e) => onChange(e.target.value)}
|
||||
/>
|
||||
</label>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
function SettingsModalCheckbox({
|
||||
configKey,
|
||||
value,
|
||||
onChange,
|
||||
label,
|
||||
}: {
|
||||
configKey: SettKey;
|
||||
value: boolean;
|
||||
onChange: (value: boolean) => void;
|
||||
label: string;
|
||||
}) {
|
||||
return (
|
||||
<div className="flex flex-row items-center mb-2">
|
||||
<input
|
||||
type="checkbox"
|
||||
className="toggle"
|
||||
checked={value}
|
||||
onChange={(e) => onChange(e.target.checked)}
|
||||
/>
|
||||
<span className="ml-4">{label || configKey}</span>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useEffect, useMemo, useState } from 'react';
|
||||
import { useEffect, useState } from 'react';
|
||||
import { classNames } from '../utils/misc';
|
||||
import { Conversation } from '../utils/types';
|
||||
import StorageUtils from '../utils/storage';
|
||||
@@ -7,16 +7,17 @@ import { useNavigate, useParams } from 'react-router';
|
||||
export default function Sidebar() {
|
||||
const params = useParams();
|
||||
const navigate = useNavigate();
|
||||
const currConv = useMemo(
|
||||
() => StorageUtils.getOneConversation(params.convId ?? ''),
|
||||
[params.convId]
|
||||
);
|
||||
|
||||
const [conversations, setConversations] = useState<Conversation[]>([]);
|
||||
const [currConv, setCurrConv] = useState<Conversation | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
const handleConversationChange = () => {
|
||||
setConversations(StorageUtils.getAllConversations());
|
||||
StorageUtils.getOneConversation(params.convId ?? '').then(setCurrConv);
|
||||
}, [params.convId]);
|
||||
|
||||
useEffect(() => {
|
||||
const handleConversationChange = async () => {
|
||||
setConversations(await StorageUtils.getAllConversations());
|
||||
};
|
||||
StorageUtils.onConversationChanged(handleConversationChange);
|
||||
handleConversationChange();
|
||||
@@ -82,11 +83,11 @@ export default function Sidebar() {
|
||||
onClick={() => navigate(`/chat/${conv.id}`)}
|
||||
dir="auto"
|
||||
>
|
||||
<span className="truncate">{conv.messages[0].content}</span>
|
||||
<span className="truncate">{conv.name}</span>
|
||||
</div>
|
||||
))}
|
||||
<div className="text-center text-xs opacity-40 mt-auto mx-4">
|
||||
Conversations are saved to browser's localStorage
|
||||
Conversations are saved to browser's IndexedDB
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -45,6 +45,9 @@
|
||||
/* Highlight.js */
|
||||
[data-color-scheme='light'] {
|
||||
@include meta.load-css('highlight.js/styles/stackoverflow-light');
|
||||
.dark-color {
|
||||
@apply bg-base-content text-base-100;
|
||||
}
|
||||
}
|
||||
[data-color-scheme='dark'] {
|
||||
@include meta.load-css('highlight.js/styles/stackoverflow-dark');
|
||||
@@ -52,6 +55,9 @@
|
||||
[data-color-scheme='auto'] {
|
||||
@media (prefers-color-scheme: light) {
|
||||
@include meta.load-css('highlight.js/styles/stackoverflow-light');
|
||||
.dark-color {
|
||||
@apply bg-base-content text-base-100;
|
||||
}
|
||||
}
|
||||
@media (prefers-color-scheme: dark) {
|
||||
@include meta.load-css('highlight.js/styles/stackoverflow-dark');
|
||||
|
||||
@@ -1,5 +1,12 @@
|
||||
import React, { createContext, useContext, useEffect, useState } from 'react';
|
||||
import { APIMessage, Conversation, Message, PendingMessage } from './types';
|
||||
import {
|
||||
APIMessage,
|
||||
CanvasData,
|
||||
Conversation,
|
||||
Message,
|
||||
PendingMessage,
|
||||
ViewingChat,
|
||||
} from './types';
|
||||
import StorageUtils from './storage';
|
||||
import {
|
||||
filterThoughtFromMsgs,
|
||||
@@ -7,46 +14,67 @@ import {
|
||||
getSSEStreamAsync,
|
||||
} from './misc';
|
||||
import { BASE_URL, CONFIG_DEFAULT, isDev } from '../Config';
|
||||
import { matchPath, useLocation } from 'react-router';
|
||||
import { matchPath, useLocation, useNavigate } from 'react-router';
|
||||
|
||||
interface AppContextValue {
|
||||
viewingConversation: Conversation | null;
|
||||
// conversations and messages
|
||||
viewingChat: ViewingChat | null;
|
||||
pendingMessages: Record<Conversation['id'], PendingMessage>;
|
||||
isGenerating: (convId: string) => boolean;
|
||||
sendMessage: (
|
||||
convId: string,
|
||||
convId: string | null,
|
||||
leafNodeId: Message['id'] | null,
|
||||
content: string,
|
||||
onChunk?: CallbackGeneratedChunk
|
||||
extra: Message['extra'],
|
||||
onChunk: CallbackGeneratedChunk
|
||||
) => Promise<boolean>;
|
||||
stopGenerating: (convId: string) => void;
|
||||
replaceMessageAndGenerate: (
|
||||
convId: string,
|
||||
origMsgId: Message['id'],
|
||||
content?: string,
|
||||
onChunk?: CallbackGeneratedChunk
|
||||
parentNodeId: Message['id'], // the parent node of the message to be replaced
|
||||
content: string | null,
|
||||
extra: Message['extra'],
|
||||
onChunk: CallbackGeneratedChunk
|
||||
) => Promise<void>;
|
||||
|
||||
// canvas
|
||||
canvasData: CanvasData | null;
|
||||
setCanvasData: (data: CanvasData | null) => void;
|
||||
|
||||
// config
|
||||
config: typeof CONFIG_DEFAULT;
|
||||
saveConfig: (config: typeof CONFIG_DEFAULT) => void;
|
||||
showSettings: boolean;
|
||||
setShowSettings: (show: boolean) => void;
|
||||
}
|
||||
|
||||
// for now, this callback is only used for scrolling to the bottom of the chat
|
||||
type CallbackGeneratedChunk = () => void;
|
||||
// this callback is used for scrolling to the bottom of the chat and switching to the last node
|
||||
export type CallbackGeneratedChunk = (currLeafNodeId?: Message['id']) => void;
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const AppContext = createContext<AppContextValue>({} as any);
|
||||
|
||||
const getViewingChat = async (convId: string): Promise<ViewingChat | null> => {
|
||||
const conv = await StorageUtils.getOneConversation(convId);
|
||||
if (!conv) return null;
|
||||
return {
|
||||
conv: conv,
|
||||
// all messages from all branches, not filtered by last node
|
||||
messages: await StorageUtils.getMessages(convId),
|
||||
};
|
||||
};
|
||||
|
||||
export const AppContextProvider = ({
|
||||
children,
|
||||
}: {
|
||||
children: React.ReactElement;
|
||||
}) => {
|
||||
const { pathname } = useLocation();
|
||||
const navigate = useNavigate();
|
||||
const params = matchPath('/chat/:convId', pathname);
|
||||
const convId = params?.params?.convId;
|
||||
|
||||
const [viewingConversation, setViewingConversation] =
|
||||
useState<Conversation | null>(null);
|
||||
const [viewingChat, setViewingChat] = useState<ViewingChat | null>(null);
|
||||
const [pendingMessages, setPendingMessages] = useState<
|
||||
Record<Conversation['id'], PendingMessage>
|
||||
>({});
|
||||
@@ -54,14 +82,19 @@ export const AppContextProvider = ({
|
||||
Record<Conversation['id'], AbortController>
|
||||
>({});
|
||||
const [config, setConfig] = useState(StorageUtils.getConfig());
|
||||
const [canvasData, setCanvasData] = useState<CanvasData | null>(null);
|
||||
const [showSettings, setShowSettings] = useState(false);
|
||||
|
||||
// handle change when the convId from URL is changed
|
||||
useEffect(() => {
|
||||
const handleConversationChange = (changedConvId: string) => {
|
||||
// also reset the canvas data
|
||||
setCanvasData(null);
|
||||
const handleConversationChange = async (changedConvId: string) => {
|
||||
if (changedConvId !== convId) return;
|
||||
setViewingConversation(StorageUtils.getOneConversation(convId));
|
||||
setViewingChat(await getViewingChat(changedConvId));
|
||||
};
|
||||
StorageUtils.onConversationChanged(handleConversationChange);
|
||||
setViewingConversation(StorageUtils.getOneConversation(convId ?? ''));
|
||||
getViewingChat(convId ?? '').then(setViewingChat);
|
||||
return () => {
|
||||
StorageUtils.offConversationChanged(handleConversationChange);
|
||||
};
|
||||
@@ -99,23 +132,39 @@ export const AppContextProvider = ({
|
||||
|
||||
const generateMessage = async (
|
||||
convId: string,
|
||||
onChunk?: CallbackGeneratedChunk
|
||||
leafNodeId: Message['id'],
|
||||
onChunk: CallbackGeneratedChunk
|
||||
) => {
|
||||
if (isGenerating(convId)) return;
|
||||
|
||||
const config = StorageUtils.getConfig();
|
||||
const currConversation = StorageUtils.getOneConversation(convId);
|
||||
const currConversation = await StorageUtils.getOneConversation(convId);
|
||||
if (!currConversation) {
|
||||
throw new Error('Current conversation is not found');
|
||||
}
|
||||
|
||||
const currMessages = StorageUtils.filterByLeafNodeId(
|
||||
await StorageUtils.getMessages(convId),
|
||||
leafNodeId,
|
||||
false
|
||||
);
|
||||
const abortController = new AbortController();
|
||||
setAbort(convId, abortController);
|
||||
|
||||
if (!currMessages) {
|
||||
throw new Error('Current messages are not found');
|
||||
}
|
||||
|
||||
const pendingId = Date.now() + 1;
|
||||
let pendingMsg: PendingMessage = {
|
||||
id: Date.now() + 1,
|
||||
id: pendingId,
|
||||
convId,
|
||||
type: 'text',
|
||||
timestamp: pendingId,
|
||||
role: 'assistant',
|
||||
content: null,
|
||||
parent: leafNodeId,
|
||||
children: [],
|
||||
};
|
||||
setPending(convId, pendingMsg);
|
||||
|
||||
@@ -125,7 +174,7 @@ export const AppContextProvider = ({
|
||||
...(config.systemMessage.length === 0
|
||||
? []
|
||||
: [{ role: 'system', content: config.systemMessage } as APIMessage]),
|
||||
...normalizeMsgsForAPI(currConversation?.messages ?? []),
|
||||
...normalizeMsgsForAPI(currMessages),
|
||||
];
|
||||
if (config.excludeThoughtOnReq) {
|
||||
messages = filterThoughtFromMsgs(messages);
|
||||
@@ -186,8 +235,7 @@ export const AppContextProvider = ({
|
||||
const lastContent = pendingMsg.content || '';
|
||||
if (addedContent) {
|
||||
pendingMsg = {
|
||||
id: pendingMsg.id,
|
||||
role: 'assistant',
|
||||
...pendingMsg,
|
||||
content: lastContent + addedContent,
|
||||
};
|
||||
}
|
||||
@@ -202,7 +250,7 @@ export const AppContextProvider = ({
|
||||
};
|
||||
}
|
||||
setPending(convId, pendingMsg);
|
||||
onChunk?.();
|
||||
onChunk(); // don't need to switch node for pending message
|
||||
}
|
||||
} catch (err) {
|
||||
setPending(convId, null);
|
||||
@@ -217,37 +265,55 @@ export const AppContextProvider = ({
|
||||
}
|
||||
}
|
||||
|
||||
if (pendingMsg.content) {
|
||||
StorageUtils.appendMsg(currConversation.id, {
|
||||
id: pendingMsg.id,
|
||||
content: pendingMsg.content,
|
||||
role: pendingMsg.role,
|
||||
timings: pendingMsg.timings,
|
||||
});
|
||||
if (pendingMsg.content !== null) {
|
||||
await StorageUtils.appendMsg(pendingMsg as Message, leafNodeId);
|
||||
}
|
||||
setPending(convId, null);
|
||||
onChunk?.(); // trigger scroll to bottom
|
||||
onChunk(pendingId); // trigger scroll to bottom and switch to the last node
|
||||
};
|
||||
|
||||
const sendMessage = async (
|
||||
convId: string,
|
||||
convId: string | null,
|
||||
leafNodeId: Message['id'] | null,
|
||||
content: string,
|
||||
onChunk?: CallbackGeneratedChunk
|
||||
extra: Message['extra'],
|
||||
onChunk: CallbackGeneratedChunk
|
||||
): Promise<boolean> => {
|
||||
if (isGenerating(convId) || content.trim().length === 0) return false;
|
||||
if (isGenerating(convId ?? '') || content.trim().length === 0) return false;
|
||||
|
||||
StorageUtils.appendMsg(convId, {
|
||||
id: Date.now(),
|
||||
role: 'user',
|
||||
content,
|
||||
});
|
||||
if (convId === null || convId.length === 0 || leafNodeId === null) {
|
||||
const conv = await StorageUtils.createConversation(
|
||||
content.substring(0, 256)
|
||||
);
|
||||
convId = conv.id;
|
||||
leafNodeId = conv.currNode;
|
||||
// if user is creating a new conversation, redirect to the new conversation
|
||||
navigate(`/chat/${convId}`);
|
||||
}
|
||||
|
||||
const now = Date.now();
|
||||
const currMsgId = now;
|
||||
StorageUtils.appendMsg(
|
||||
{
|
||||
id: currMsgId,
|
||||
timestamp: now,
|
||||
type: 'text',
|
||||
convId,
|
||||
role: 'user',
|
||||
content,
|
||||
extra,
|
||||
parent: leafNodeId,
|
||||
children: [],
|
||||
},
|
||||
leafNodeId
|
||||
);
|
||||
onChunk(currMsgId);
|
||||
|
||||
try {
|
||||
await generateMessage(convId, onChunk);
|
||||
await generateMessage(convId, currMsgId, onChunk);
|
||||
return true;
|
||||
} catch (_) {
|
||||
// rollback
|
||||
StorageUtils.popMsg(convId);
|
||||
// TODO: rollback
|
||||
}
|
||||
return false;
|
||||
};
|
||||
@@ -260,22 +326,35 @@ export const AppContextProvider = ({
|
||||
// if content is undefined, we remove last assistant message
|
||||
const replaceMessageAndGenerate = async (
|
||||
convId: string,
|
||||
origMsgId: Message['id'],
|
||||
content?: string,
|
||||
onChunk?: CallbackGeneratedChunk
|
||||
parentNodeId: Message['id'], // the parent node of the message to be replaced
|
||||
content: string | null,
|
||||
extra: Message['extra'],
|
||||
onChunk: CallbackGeneratedChunk
|
||||
) => {
|
||||
if (isGenerating(convId)) return;
|
||||
|
||||
StorageUtils.filterAndKeepMsgs(convId, (msg) => msg.id < origMsgId);
|
||||
if (content) {
|
||||
StorageUtils.appendMsg(convId, {
|
||||
id: Date.now(),
|
||||
role: 'user',
|
||||
content,
|
||||
});
|
||||
if (content !== null) {
|
||||
const now = Date.now();
|
||||
const currMsgId = now;
|
||||
StorageUtils.appendMsg(
|
||||
{
|
||||
id: currMsgId,
|
||||
timestamp: now,
|
||||
type: 'text',
|
||||
convId,
|
||||
role: 'user',
|
||||
content,
|
||||
extra,
|
||||
parent: parentNodeId,
|
||||
children: [],
|
||||
},
|
||||
parentNodeId
|
||||
);
|
||||
parentNodeId = currMsgId;
|
||||
}
|
||||
onChunk(parentNodeId);
|
||||
|
||||
await generateMessage(convId, onChunk);
|
||||
await generateMessage(convId, parentNodeId, onChunk);
|
||||
};
|
||||
|
||||
const saveConfig = (config: typeof CONFIG_DEFAULT) => {
|
||||
@@ -287,13 +366,17 @@ export const AppContextProvider = ({
|
||||
<AppContext.Provider
|
||||
value={{
|
||||
isGenerating,
|
||||
viewingConversation,
|
||||
viewingChat,
|
||||
pendingMessages,
|
||||
sendMessage,
|
||||
stopGenerating,
|
||||
replaceMessageAndGenerate,
|
||||
canvasData,
|
||||
setCanvasData,
|
||||
config,
|
||||
saveConfig,
|
||||
showSettings,
|
||||
setShowSettings,
|
||||
}}
|
||||
>
|
||||
{children}
|
||||
|
||||
38
examples/server/webui/src/utils/common.tsx
Normal file
38
examples/server/webui/src/utils/common.tsx
Normal file
@@ -0,0 +1,38 @@
|
||||
export const XCloseButton: React.ElementType<
|
||||
React.ClassAttributes<HTMLButtonElement> &
|
||||
React.HTMLAttributes<HTMLButtonElement>
|
||||
> = ({ className, ...props }) => (
|
||||
<button className={`btn btn-square btn-sm ${className ?? ''}`} {...props}>
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
className="h-6 w-6"
|
||||
fill="none"
|
||||
viewBox="0 0 24 24"
|
||||
stroke="currentColor"
|
||||
>
|
||||
<path
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
strokeWidth="2"
|
||||
d="M6 18L18 6M6 6l12 12"
|
||||
/>
|
||||
</svg>
|
||||
</button>
|
||||
);
|
||||
|
||||
export const OpenInNewTab = ({
|
||||
href,
|
||||
children,
|
||||
}: {
|
||||
href: string;
|
||||
children: string;
|
||||
}) => (
|
||||
<a
|
||||
className="underline"
|
||||
href={href}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
>
|
||||
{children}
|
||||
</a>
|
||||
);
|
||||
62
examples/server/webui/src/utils/llama-vscode.ts
Normal file
62
examples/server/webui/src/utils/llama-vscode.ts
Normal file
@@ -0,0 +1,62 @@
|
||||
import { useEffect, useState } from 'react';
|
||||
import { MessageExtraContext } from './types';
|
||||
|
||||
// Extra context when using llama.cpp WebUI from llama-vscode, inside an iframe
|
||||
// Ref: https://github.com/ggml-org/llama.cpp/pull/11940
|
||||
|
||||
interface SetTextEvData {
|
||||
text: string;
|
||||
context: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* To test it:
|
||||
* window.postMessage({ command: 'setText', text: 'Spot the syntax error', context: 'def test()\n return 123' }, '*');
|
||||
*/
|
||||
|
||||
export const useVSCodeContext = (
|
||||
inputRef: React.RefObject<HTMLTextAreaElement>,
|
||||
setInputMsg: (text: string) => void
|
||||
) => {
|
||||
const [extraContext, setExtraContext] = useState<MessageExtraContext | null>(
|
||||
null
|
||||
);
|
||||
|
||||
// Accept setText message from a parent window and set inputMsg and extraContext
|
||||
useEffect(() => {
|
||||
const handleMessage = (event: MessageEvent) => {
|
||||
if (event.data?.command === 'setText') {
|
||||
const data: SetTextEvData = event.data;
|
||||
setInputMsg(data?.text);
|
||||
if (data?.context && data.context.length > 0) {
|
||||
setExtraContext({
|
||||
type: 'context',
|
||||
content: data.context,
|
||||
});
|
||||
}
|
||||
inputRef.current?.focus();
|
||||
}
|
||||
};
|
||||
|
||||
window.addEventListener('message', handleMessage);
|
||||
return () => window.removeEventListener('message', handleMessage);
|
||||
}, []);
|
||||
|
||||
// Add a keydown listener that sends the "escapePressed" message to the parent window
|
||||
useEffect(() => {
|
||||
const handleKeyDown = (event: KeyboardEvent) => {
|
||||
if (event.key === 'Escape') {
|
||||
window.parent.postMessage({ command: 'escapePressed' }, '*');
|
||||
}
|
||||
};
|
||||
|
||||
window.addEventListener('keydown', handleKeyDown);
|
||||
return () => window.removeEventListener('keydown', handleKeyDown);
|
||||
}, []);
|
||||
|
||||
return {
|
||||
extraContext,
|
||||
// call once the user message is sent, to clear the extra context
|
||||
clearExtraContext: () => setExtraContext(null),
|
||||
};
|
||||
};
|
||||
@@ -4,7 +4,6 @@ import { APIMessage, Message } from './types';
|
||||
|
||||
// ponyfill for missing ReadableStream asyncIterator on Safari
|
||||
import { asyncIterator } from '@sec-ant/readable-stream/ponyfill/asyncIterator';
|
||||
import { isDev } from '../Config';
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
export const isString = (x: any) => !!x.toLowerCase;
|
||||
@@ -23,7 +22,7 @@ export async function* getSSEStreamAsync(fetchResponse: Response) {
|
||||
.pipeThrough(new TextLineStream());
|
||||
// @ts-expect-error asyncIterator complains about type, but it should work
|
||||
for await (const line of asyncIterator(lines)) {
|
||||
if (isDev) console.log({ line });
|
||||
//if (isDev) console.log({ line });
|
||||
if (line.startsWith('data:') && !line.endsWith('[DONE]')) {
|
||||
const data = JSON.parse(line.slice(5));
|
||||
yield data;
|
||||
@@ -54,12 +53,23 @@ export const copyStr = (textToCopy: string) => {
|
||||
|
||||
/**
|
||||
* filter out redundant fields upon sending to API
|
||||
* also format extra into text
|
||||
*/
|
||||
export function normalizeMsgsForAPI(messages: Message[]) {
|
||||
export function normalizeMsgsForAPI(messages: Readonly<Message[]>) {
|
||||
return messages.map((msg) => {
|
||||
let newContent = '';
|
||||
|
||||
for (const extra of msg.extra ?? []) {
|
||||
if (extra.type === 'context') {
|
||||
newContent += `${extra.content}\n\n`;
|
||||
}
|
||||
}
|
||||
|
||||
newContent += msg.content;
|
||||
|
||||
return {
|
||||
role: msg.role,
|
||||
content: msg.content,
|
||||
content: newContent,
|
||||
};
|
||||
}) as APIMessage[];
|
||||
}
|
||||
@@ -85,3 +95,26 @@ export function classNames(classes: Record<string, boolean>): string {
|
||||
.map(([key, _]) => key)
|
||||
.join(' ');
|
||||
}
|
||||
|
||||
export const delay = (ms: number) =>
|
||||
new Promise((resolve) => setTimeout(resolve, ms));
|
||||
|
||||
export const throttle = <T extends unknown[]>(
|
||||
callback: (...args: T) => void,
|
||||
delay: number
|
||||
) => {
|
||||
let isWaiting = false;
|
||||
|
||||
return (...args: T) => {
|
||||
if (isWaiting) {
|
||||
return;
|
||||
}
|
||||
|
||||
callback(...args);
|
||||
isWaiting = true;
|
||||
|
||||
setTimeout(() => {
|
||||
isWaiting = false;
|
||||
}, delay);
|
||||
};
|
||||
};
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
// format: { [convId]: { id: string, lastModified: number, messages: [...] } }
|
||||
|
||||
import { CONFIG_DEFAULT } from '../Config';
|
||||
import { Conversation, Message } from './types';
|
||||
import { Conversation, Message, TimingReport } from './types';
|
||||
import Dexie, { Table } from 'dexie';
|
||||
|
||||
const event = new EventTarget();
|
||||
|
||||
@@ -17,85 +18,154 @@ const dispatchConversationChange = (convId: string) => {
|
||||
);
|
||||
};
|
||||
|
||||
const db = new Dexie('LlamacppWebui') as Dexie & {
|
||||
conversations: Table<Conversation>;
|
||||
messages: Table<Message>;
|
||||
};
|
||||
|
||||
// https://dexie.org/docs/Version/Version.stores()
|
||||
db.version(1).stores({
|
||||
// Unlike SQL, you don’t need to specify all properties but only the one you wish to index.
|
||||
conversations: '&id, lastModified',
|
||||
messages: '&id, convId, [convId+id], timestamp',
|
||||
});
|
||||
|
||||
// convId is a string prefixed with 'conv-'
|
||||
const StorageUtils = {
|
||||
/**
|
||||
* manage conversations
|
||||
*/
|
||||
getAllConversations(): Conversation[] {
|
||||
const res = [];
|
||||
for (const key in localStorage) {
|
||||
if (key.startsWith('conv-')) {
|
||||
res.push(JSON.parse(localStorage.getItem(key) ?? '{}'));
|
||||
}
|
||||
}
|
||||
res.sort((a, b) => b.lastModified - a.lastModified);
|
||||
return res;
|
||||
async getAllConversations(): Promise<Conversation[]> {
|
||||
await migrationLStoIDB().catch(console.error); // noop if already migrated
|
||||
return (await db.conversations.toArray()).sort(
|
||||
(a, b) => b.lastModified - a.lastModified
|
||||
);
|
||||
},
|
||||
/**
|
||||
* can return null if convId does not exist
|
||||
*/
|
||||
getOneConversation(convId: string): Conversation | null {
|
||||
return JSON.parse(localStorage.getItem(convId) || 'null');
|
||||
async getOneConversation(convId: string): Promise<Conversation | null> {
|
||||
return (await db.conversations.where('id').equals(convId).first()) ?? null;
|
||||
},
|
||||
/**
|
||||
* if convId does not exist, create one
|
||||
* get all message nodes in a conversation
|
||||
*/
|
||||
appendMsg(convId: string, msg: Message): void {
|
||||
if (msg.content === null) return;
|
||||
const conv = StorageUtils.getOneConversation(convId) || {
|
||||
id: convId,
|
||||
lastModified: Date.now(),
|
||||
messages: [],
|
||||
async getMessages(convId: string): Promise<Message[]> {
|
||||
return await db.messages.where({ convId }).toArray();
|
||||
},
|
||||
/**
|
||||
* use in conjunction with getMessages to filter messages by leafNodeId
|
||||
* includeRoot: whether to include the root node in the result
|
||||
* if node with leafNodeId does not exist, return the path with the latest timestamp
|
||||
*/
|
||||
filterByLeafNodeId(
|
||||
msgs: Readonly<Message[]>,
|
||||
leafNodeId: Message['id'],
|
||||
includeRoot: boolean
|
||||
): Readonly<Message[]> {
|
||||
const res: Message[] = [];
|
||||
const nodeMap = new Map<Message['id'], Message>();
|
||||
for (const msg of msgs) {
|
||||
nodeMap.set(msg.id, msg);
|
||||
}
|
||||
let startNode: Message | undefined = nodeMap.get(leafNodeId);
|
||||
if (!startNode) {
|
||||
// if not found, we return the path with the latest timestamp
|
||||
let latestTime = -1;
|
||||
for (const msg of msgs) {
|
||||
if (msg.timestamp > latestTime) {
|
||||
startNode = msg;
|
||||
latestTime = msg.timestamp;
|
||||
}
|
||||
}
|
||||
}
|
||||
// traverse the path from leafNodeId to root
|
||||
// startNode can never be undefined here
|
||||
let currNode: Message | undefined = startNode;
|
||||
while (currNode) {
|
||||
if (currNode.type !== 'root' || (currNode.type === 'root' && includeRoot))
|
||||
res.push(currNode);
|
||||
currNode = nodeMap.get(currNode.parent ?? -1);
|
||||
}
|
||||
res.sort((a, b) => a.timestamp - b.timestamp);
|
||||
return res;
|
||||
},
|
||||
/**
|
||||
* create a new conversation with a default root node
|
||||
*/
|
||||
async createConversation(name: string): Promise<Conversation> {
|
||||
const now = Date.now();
|
||||
const msgId = now;
|
||||
const conv: Conversation = {
|
||||
id: `conv-${now}`,
|
||||
lastModified: now,
|
||||
currNode: msgId,
|
||||
name,
|
||||
};
|
||||
conv.messages.push(msg);
|
||||
conv.lastModified = Date.now();
|
||||
localStorage.setItem(convId, JSON.stringify(conv));
|
||||
dispatchConversationChange(convId);
|
||||
await db.conversations.add(conv);
|
||||
// create a root node
|
||||
await db.messages.add({
|
||||
id: msgId,
|
||||
convId: conv.id,
|
||||
type: 'root',
|
||||
timestamp: now,
|
||||
role: 'system',
|
||||
content: '',
|
||||
parent: -1,
|
||||
children: [],
|
||||
});
|
||||
return conv;
|
||||
},
|
||||
/**
|
||||
* Get new conversation id
|
||||
* if convId does not exist, throw an error
|
||||
*/
|
||||
getNewConvId(): string {
|
||||
return `conv-${Date.now()}`;
|
||||
async appendMsg(
|
||||
msg: Exclude<Message, 'parent' | 'children'>,
|
||||
parentNodeId: Message['id']
|
||||
): Promise<void> {
|
||||
if (msg.content === null) return;
|
||||
const { convId } = msg;
|
||||
await db.transaction('rw', db.conversations, db.messages, async () => {
|
||||
const conv = await StorageUtils.getOneConversation(convId);
|
||||
const parentMsg = await db.messages
|
||||
.where({ convId, id: parentNodeId })
|
||||
.first();
|
||||
// update the currNode of conversation
|
||||
if (!conv) {
|
||||
throw new Error(`Conversation ${convId} does not exist`);
|
||||
}
|
||||
if (!parentMsg) {
|
||||
throw new Error(
|
||||
`Parent message ID ${parentNodeId} does not exist in conversation ${convId}`
|
||||
);
|
||||
}
|
||||
await db.conversations.update(convId, {
|
||||
lastModified: Date.now(),
|
||||
currNode: msg.id,
|
||||
});
|
||||
// update parent
|
||||
await db.messages.update(parentNodeId, {
|
||||
children: [...parentMsg.children, msg.id],
|
||||
});
|
||||
// create message
|
||||
await db.messages.add({
|
||||
...msg,
|
||||
parent: parentNodeId,
|
||||
children: [],
|
||||
});
|
||||
});
|
||||
dispatchConversationChange(convId);
|
||||
},
|
||||
/**
|
||||
* remove conversation by id
|
||||
*/
|
||||
remove(convId: string): void {
|
||||
localStorage.removeItem(convId);
|
||||
async remove(convId: string): Promise<void> {
|
||||
await db.transaction('rw', db.conversations, db.messages, async () => {
|
||||
await db.conversations.delete(convId);
|
||||
await db.messages.where({ convId }).delete();
|
||||
});
|
||||
dispatchConversationChange(convId);
|
||||
},
|
||||
/**
|
||||
* remove all conversations
|
||||
*/
|
||||
filterAndKeepMsgs(
|
||||
convId: string,
|
||||
predicate: (msg: Message) => boolean
|
||||
): void {
|
||||
const conv = StorageUtils.getOneConversation(convId);
|
||||
if (!conv) return;
|
||||
conv.messages = conv.messages.filter(predicate);
|
||||
conv.lastModified = Date.now();
|
||||
localStorage.setItem(convId, JSON.stringify(conv));
|
||||
dispatchConversationChange(convId);
|
||||
},
|
||||
/**
|
||||
* remove last message from conversation
|
||||
*/
|
||||
popMsg(convId: string): Message | undefined {
|
||||
const conv = StorageUtils.getOneConversation(convId);
|
||||
if (!conv) return;
|
||||
const msg = conv.messages.pop();
|
||||
conv.lastModified = Date.now();
|
||||
if (conv.messages.length === 0) {
|
||||
StorageUtils.remove(convId);
|
||||
} else {
|
||||
localStorage.setItem(convId, JSON.stringify(conv));
|
||||
}
|
||||
dispatchConversationChange(convId);
|
||||
return msg;
|
||||
},
|
||||
|
||||
// event listeners
|
||||
onConversationChanged(callback: CallbackConversationChanged) {
|
||||
@@ -136,3 +206,79 @@ const StorageUtils = {
|
||||
};
|
||||
|
||||
export default StorageUtils;
|
||||
|
||||
// Migration from localStorage to IndexedDB
|
||||
|
||||
// these are old types, LS prefix stands for LocalStorage
|
||||
interface LSConversation {
|
||||
id: string; // format: `conv-{timestamp}`
|
||||
lastModified: number; // timestamp from Date.now()
|
||||
messages: LSMessage[];
|
||||
}
|
||||
interface LSMessage {
|
||||
id: number;
|
||||
role: 'user' | 'assistant' | 'system';
|
||||
content: string;
|
||||
timings?: TimingReport;
|
||||
}
|
||||
async function migrationLStoIDB() {
|
||||
if (localStorage.getItem('migratedToIDB')) return;
|
||||
const res: LSConversation[] = [];
|
||||
for (const key in localStorage) {
|
||||
if (key.startsWith('conv-')) {
|
||||
res.push(JSON.parse(localStorage.getItem(key) ?? '{}'));
|
||||
}
|
||||
}
|
||||
if (res.length === 0) return;
|
||||
await db.transaction('rw', db.conversations, db.messages, async () => {
|
||||
let migratedCount = 0;
|
||||
for (const conv of res) {
|
||||
const { id: convId, lastModified, messages } = conv;
|
||||
const firstMsg = messages[0];
|
||||
const lastMsg = messages.at(-1);
|
||||
if (messages.length < 2 || !firstMsg || !lastMsg) {
|
||||
console.log(
|
||||
`Skipping conversation ${convId} with ${messages.length} messages`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
const name = firstMsg.content ?? '(no messages)';
|
||||
await db.conversations.add({
|
||||
id: convId,
|
||||
lastModified,
|
||||
currNode: lastMsg.id,
|
||||
name,
|
||||
});
|
||||
const rootId = messages[0].id - 2;
|
||||
await db.messages.add({
|
||||
id: rootId,
|
||||
convId: convId,
|
||||
type: 'root',
|
||||
timestamp: rootId,
|
||||
role: 'system',
|
||||
content: '',
|
||||
parent: -1,
|
||||
children: [firstMsg.id],
|
||||
});
|
||||
for (let i = 0; i < messages.length; i++) {
|
||||
const msg = messages[i];
|
||||
await db.messages.add({
|
||||
...msg,
|
||||
type: 'text',
|
||||
convId: convId,
|
||||
timestamp: msg.id,
|
||||
parent: i === 0 ? rootId : messages[i - 1].id,
|
||||
children: i === messages.length - 1 ? [] : [messages[i + 1].id],
|
||||
});
|
||||
}
|
||||
migratedCount++;
|
||||
console.log(
|
||||
`Migrated conversation ${convId} with ${messages.length} messages`
|
||||
);
|
||||
}
|
||||
console.log(
|
||||
`Migrated ${migratedCount} conversations from localStorage to IndexedDB`
|
||||
);
|
||||
localStorage.setItem('migratedToIDB', '1');
|
||||
});
|
||||
}
|
||||
|
||||
@@ -5,11 +5,60 @@ export interface TimingReport {
|
||||
predicted_ms: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* What is conversation "branching"? It is a feature that allows the user to edit an old message in the history, while still keeping the conversation flow.
|
||||
* Inspired by ChatGPT / Claude / Hugging Chat where you edit a message, a new branch of the conversation is created, and the old message is still visible.
|
||||
*
|
||||
* We use the same node-based structure like other chat UIs, where each message has a parent and children. A "root" message is the first message in a conversation, which will not be displayed in the UI.
|
||||
*
|
||||
* root
|
||||
* ├── message 1
|
||||
* │ └── message 2
|
||||
* │ └── message 3
|
||||
* └── message 4
|
||||
* └── message 5
|
||||
*
|
||||
* In the above example, assuming that user wants to edit message 2, a new branch will be created:
|
||||
*
|
||||
* ├── message 2
|
||||
* │ └── message 3
|
||||
* └── message 6
|
||||
*
|
||||
* Message 2 and 6 are siblings, and message 6 is the new branch.
|
||||
*
|
||||
* We only need to know the last node (aka leaf) to get the current branch. In the above example, message 5 is the leaf of branch containing message 4 and 5.
|
||||
*
|
||||
* For the implementation:
|
||||
* - StorageUtils.getMessages() returns list of all nodes
|
||||
* - StorageUtils.filterByLeafNodeId() filters the list of nodes from a given leaf node
|
||||
*/
|
||||
|
||||
// Note: the term "message" and "node" are used interchangeably in this context
|
||||
export interface Message {
|
||||
id: number;
|
||||
convId: string;
|
||||
type: 'text' | 'root';
|
||||
timestamp: number; // timestamp from Date.now()
|
||||
role: 'user' | 'assistant' | 'system';
|
||||
content: string;
|
||||
timings?: TimingReport;
|
||||
extra?: MessageExtra[];
|
||||
// node based system for branching
|
||||
parent: Message['id'];
|
||||
children: Message['id'][];
|
||||
}
|
||||
|
||||
type MessageExtra = MessageExtraTextFile | MessageExtraContext; // TODO: will add more in the future
|
||||
|
||||
export interface MessageExtraTextFile {
|
||||
type: 'textFile';
|
||||
name: string;
|
||||
content: string;
|
||||
}
|
||||
|
||||
export interface MessageExtraContext {
|
||||
type: 'context';
|
||||
content: string;
|
||||
}
|
||||
|
||||
export type APIMessage = Pick<Message, 'role' | 'content'>;
|
||||
@@ -17,9 +66,26 @@ export type APIMessage = Pick<Message, 'role' | 'content'>;
|
||||
export interface Conversation {
|
||||
id: string; // format: `conv-{timestamp}`
|
||||
lastModified: number; // timestamp from Date.now()
|
||||
messages: Message[];
|
||||
currNode: Message['id']; // the current message node being viewed
|
||||
name: string;
|
||||
}
|
||||
|
||||
export interface ViewingChat {
|
||||
conv: Readonly<Conversation>;
|
||||
messages: Readonly<Message[]>;
|
||||
}
|
||||
|
||||
export type PendingMessage = Omit<Message, 'content'> & {
|
||||
content: string | null;
|
||||
};
|
||||
|
||||
export enum CanvasType {
|
||||
PY_INTERPRETER,
|
||||
}
|
||||
|
||||
export interface CanvasPyInterpreter {
|
||||
type: CanvasType.PY_INTERPRETER;
|
||||
content: string;
|
||||
}
|
||||
|
||||
export type CanvasData = CanvasPyInterpreter;
|
||||
|
||||
@@ -72,5 +72,9 @@ export default defineConfig({
|
||||
proxy: {
|
||||
'/v1': 'http://localhost:8080',
|
||||
},
|
||||
headers: {
|
||||
'Cross-Origin-Embedder-Policy': 'require-corp',
|
||||
'Cross-Origin-Opener-Policy': 'same-origin',
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# llama.cpp/example/simple-cmake-pkg
|
||||
|
||||
This program builds [simple](../simple) using a relocatable CMake package. It serves as an example of using the `find_package()` CMake command to conveniently include [llama.cpp](https://github.com/ggerganov/llama.cpp) in projects which live outside of the source tree.
|
||||
This program builds [simple](../simple) using a relocatable CMake package. It serves as an example of using the `find_package()` CMake command to conveniently include [llama.cpp](https://github.com/ggml-org/llama.cpp) in projects which live outside of the source tree.
|
||||
|
||||
## Building
|
||||
|
||||
@@ -13,7 +13,7 @@ When hardware acceleration libraries are used (e.g. CUDA, Metal, Vulkan, etc.),
|
||||
### Build llama.cpp and install to llama.cpp/inst
|
||||
|
||||
```sh
|
||||
git clone https://github.com/ggerganov/llama.cpp
|
||||
git clone https://github.com/ggml-org/llama.cpp
|
||||
cd llama.cpp
|
||||
cmake -S . -B build
|
||||
cmake --build build
|
||||
|
||||
@@ -4,6 +4,6 @@ Demonstration of speculative decoding and tree-based speculative decoding techni
|
||||
|
||||
More info:
|
||||
|
||||
- https://github.com/ggerganov/llama.cpp/pull/2926
|
||||
- https://github.com/ggerganov/llama.cpp/pull/3624
|
||||
- https://github.com/ggerganov/llama.cpp/pull/5625
|
||||
- https://github.com/ggml-org/llama.cpp/pull/2926
|
||||
- https://github.com/ggml-org/llama.cpp/pull/3624
|
||||
- https://github.com/ggml-org/llama.cpp/pull/5625
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
# ```
|
||||
# nixConfig = {
|
||||
# extra-substituters = [
|
||||
# # Populated by the CI in ggerganov/llama.cpp
|
||||
# # Populated by the CI in ggml-org/llama.cpp
|
||||
# "https://llama-cpp.cachix.org"
|
||||
#
|
||||
# # A development cache for nixpkgs imported with `config.cudaSupport = true`.
|
||||
@@ -56,11 +56,11 @@
|
||||
# };
|
||||
# ```
|
||||
|
||||
# For inspection, use `nix flake show github:ggerganov/llama.cpp` or the nix repl:
|
||||
# For inspection, use `nix flake show github:ggml-org/llama.cpp` or the nix repl:
|
||||
#
|
||||
# ```bash
|
||||
# ❯ nix repl
|
||||
# nix-repl> :lf github:ggerganov/llama.cpp
|
||||
# nix-repl> :lf github:ggml-org/llama.cpp
|
||||
# Added 13 variables.
|
||||
# nix-repl> outputs.apps.x86_64-linux.quantize
|
||||
# { program = "/nix/store/00000000000000000000000000000000-llama.cpp/bin/llama-quantize"; type = "app"; }
|
||||
@@ -176,7 +176,7 @@
|
||||
#
|
||||
# We could test all outputs e.g. as `checks = confg.packages`.
|
||||
#
|
||||
# TODO: Build more once https://github.com/ggerganov/llama.cpp/issues/6346 has been addressed
|
||||
# TODO: Build more once https://github.com/ggml-org/llama.cpp/issues/6346 has been addressed
|
||||
checks = {
|
||||
inherit (config.packages) default vulkan;
|
||||
};
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user