Compare commits

..

4 Commits

Author SHA1 Message Date
Georgi Gerganov
a1cdd29cd2 ggml : rms_norm in chunks 2023-05-20 10:15:54 +03:00
Georgi Gerganov
5a317898e8 ggml : process mul mat rows in chunks 2023-05-20 10:15:53 +03:00
Georgi Gerganov
8a203f9fa1 llama : fix compile warnings in llama_set_state_data() 2023-05-20 10:14:43 +03:00
Georgi Gerganov
4fd3e29297 ggml : fix scalar implementation of Q4_1 dot 2023-05-20 10:13:19 +03:00
6 changed files with 90 additions and 187 deletions

View File

@@ -338,36 +338,6 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
break;
}
params.input_suffix = argv[i];
} else if (arg == "--steering-add") {
if (++i >= argc) {
invalid_param = true;
break;
}
params.steering_add = argv[i];
} else if (arg == "--steering-sub") {
if (++i >= argc) {
invalid_param = true;
break;
}
params.steering_sub = argv[i];
} else if (arg == "--steering-mul") {
if (++i >= argc) {
invalid_param = true;
break;
}
params.steering_mul = std::stof(argv[i]);
} else if (arg == "--steering-source") {
if (++i >= argc) {
invalid_param = true;
break;
}
params.steering_source = std::stoi(argv[i]);
} else if (arg == "--steering-layer") {
if (++i >= argc) {
invalid_param = true;
break;
}
params.steering_layer = std::stoi(argv[i]);
} else {
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
gpt_print_usage(argc, argv, default_params);
@@ -453,11 +423,6 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
}
fprintf(stderr, " -ngl N, --n-gpu-layers N\n");
fprintf(stderr, " number of layers to store in VRAM\n");
fprintf(stderr, " --steering-add add positive steering prompt\n");
fprintf(stderr, " --steering-sub add negative steering prompt\n");
fprintf(stderr, " --steering-mul steering strength (negative is reverse, default %.1f)\n", params.steering_mul);
fprintf(stderr, " --steering-source layer for steering source (default %d)\n", params.steering_source);
fprintf(stderr, " --steering-layer layer for steering insertion (default %d)\n", params.steering_layer);
fprintf(stderr, " --mtest compute maximum memory usage\n");
fprintf(stderr, " --verbose-prompt print prompt before generation\n");
fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n");

View File

@@ -71,12 +71,6 @@ struct gpt_params {
bool use_mlock = false; // use mlock to keep model in memory
bool mem_test = false; // compute maximum memory usage
bool verbose_prompt = false; // print prompt tokens before generation
std::string steering_add;
std::string steering_sub;
float steering_mul = 1.0f;
int steering_layer = 15;
int steering_source = 2;
};
bool gpt_params_parse(int argc, char ** argv, gpt_params & params);

View File

@@ -173,36 +173,6 @@ int main(int argc, char ** argv) {
return 1;
}
if (!params.steering_add.empty() || !params.steering_sub.empty())
{
fprintf(stderr, "%s: steering: ('%s' - '%s') * %f\n",
__func__, params.steering_add.c_str(), params.steering_sub.c_str(), params.steering_mul);
params.steering_add.insert(0, 1, ' ');
params.steering_sub.insert(0, 1, ' ');
auto add_tokens = ::llama_tokenize(ctx, params.steering_add, true);
auto sub_tokens = ::llama_tokenize(ctx, params.steering_sub, true);
if (add_tokens.size() != sub_tokens.size()) {
while (add_tokens.size() < sub_tokens.size()) {
add_tokens.push_back(llama_token_nl());
}
while (sub_tokens.size() < add_tokens.size()) {
sub_tokens.push_back(llama_token_nl());
}
}
llama_set_steering_write(ctx, params.steering_source, +1.0f);
llama_eval(ctx, add_tokens.data(), std::min((int)add_tokens.size(), n_ctx), 0, params.n_threads);
llama_set_steering_write(ctx, params.steering_source, -1.0f);
llama_eval(ctx, sub_tokens.data(), std::min((int)sub_tokens.size(), n_ctx), 0, params.n_threads);
llama_set_steering_read(ctx, params.steering_layer, params.steering_mul);
}
// debug message about similarity of saved session, if applicable
size_t n_matching_session_tokens = 0;
if (session_tokens.size()) {
@@ -429,8 +399,6 @@ int main(int argc, char ** argv) {
llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
}
//llama_set_steering_off(ctx);
llama_token id = 0;
{

140
ggml.c
View File

@@ -2481,7 +2481,7 @@ static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void *
sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
}
sumf += (GGML_FP16_TO_FP32(x[i]).d*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
}
*s = sumf;
@@ -3590,6 +3590,9 @@ struct ggml_compute_params {
// work buffer for all threads
size_t wsize;
void * wdata;
// atomic counter used to distribute chunks of work
atomic_int * aic;
};
//
@@ -9030,18 +9033,20 @@ static void ggml_compute_forward_rms_norm_f32(
GGML_ASSERT(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
atomic_store(params->aic, 0);
return;
}
GGML_ASSERT(src0->nb[0] == sizeof(float));
const int ith = params->ith;
const int ith = params->ith; UNUSED(ith);
const int nth = params->nth;
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
const int64_t ne02 = src0->ne[2];
const int64_t ne03 = src0->ne[3];
const int64_t ne03 = src0->ne[3]; UNUSED(ne03);
const size_t nb01 = src0->nb[1];
const size_t nb02 = src0->nb[2];
@@ -9053,30 +9058,45 @@ static void ggml_compute_forward_rms_norm_f32(
const float eps = 1e-6f; // TODO: make this a parameter
// TODO: optimize
for (int64_t i03 = 0; i03 < ne03; i03++) {
for (int64_t i02 = 0; i02 < ne02; i02++) {
for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
const int nr = ggml_nrows(src0);
const int dr = (nr + 8*nth - 1)/(8*nth);
ggml_float sum = 0.0;
for (int64_t i00 = 0; i00 < ne00; i00++) {
sum += (ggml_float)(x[i00] * x[i00]);
}
while (true) {
const int ir0 = atomic_fetch_add(params->aic, dr);
float mean = sum/ne00;
float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
memcpy(y, x, ne00 * sizeof(float));
// for (int i00 = 0; i00 < ne00; i00++) {
// y[i00] = x[i00];
// }
const float scale = 1.0f/sqrtf(mean + eps);
ggml_vec_scale_f32(ne00, y, scale);
for (int ir = ir0; ir < ir0 + dr; ++ir) {
if (ir >= nr) {
break;
}
// src0 indices
const int i03 = ir/(ne02*ne01);
const int i02 = (ir - i03*ne02*ne01)/ne01;
const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
ggml_float sum = 0.0;
for (int64_t i00 = 0; i00 < ne00; i00++) {
sum += (ggml_float)(x[i00] * x[i00]);
}
float mean = sum/ne00;
float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
memcpy(y, x, ne00 * sizeof(float));
// for (int i00 = 0; i00 < ne00; i00++) {
// y[i00] = x[i00];
// }
const float scale = 1.0f/sqrtf(mean + eps);
ggml_vec_scale_f32(ne00, y, scale);
}
if (ir0 + dr >= nr) {
break;
}
}
}
@@ -9751,7 +9771,7 @@ static void ggml_compute_forward_mul_mat_q_f32(
const int nb2 = dst->nb[2];
const int nb3 = dst->nb[3];
const int ith = params->ith;
const int ith = params->ith; UNUSED(ith);
const int nth = params->nth;
GGML_ASSERT(ne02 == ne12);
@@ -9867,6 +9887,8 @@ static void ggml_compute_forward_mul_mat_q_f32(
}
}
atomic_store(params->aic, 0);
return;
}
@@ -9874,43 +9896,48 @@ static void ggml_compute_forward_mul_mat_q_f32(
return;
}
// parallelize by src0 rows using ggml_vec_dot_q
// total rows in src0
const int nr = ne01*ne02*ne03;
// rows per thread
const int dr = (nr + nth - 1)/nth;
// row range for this thread
const int ir0 = dr*ith;
const int ir1 = MIN(ir0 + dr, nr);
void * wdata = params->wdata;
const size_t row_size = ne00*GGML_TYPE_SIZE[vec_dot_type]/GGML_BLCK_SIZE[vec_dot_type];
for (int ir = ir0; ir < ir1; ++ir) {
// src0 indices
const int i03 = ir/(ne02*ne01);
const int i02 = (ir - i03*ne02*ne01)/ne01;
const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
// parallelize by src0 rows using ggml_vec_dot_q
const int i13 = i03;
const int i12 = i02;
const int nr = ggml_nrows(src0);
const int dr = (nr + 8*nth - 1)/(8*nth);
const int i0 = i01;
const int i2 = i02;
const int i3 = i03;
while (true) {
const int ir0 = atomic_fetch_add(params->aic, dr);
void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
char * src1_col = ((char *) wdata + ( (0 + i12*ne11 + i13*ne12*ne11)*row_size));
for (int ir = ir0; ir < ir0 + dr; ++ir) {
if (ir >= nr) {
break;
}
float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3));
// src0 indices
const int i03 = ir/(ne02*ne01);
const int i02 = (ir - i03*ne02*ne01)/ne01;
const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
assert(ne00 % 32 == 0);
const int i13 = i03;
const int i12 = i02;
for (int64_t ic = 0; ic < ne11; ++ic) {
vec_dot_q(ne00, &dst_col[ic*ne0], src0_row, (void *) (src1_col + ic*row_size));
const int i0 = i01;
const int i2 = i02;
const int i3 = i03;
void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
char * src1_col = ((char *) wdata + ( (0 + i12*ne11 + i13*ne12*ne11)*row_size));
float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3));
assert(ne00 % 32 == 0);
for (int64_t ic = 0; ic < ne11; ++ic) {
vec_dot_q(ne00, &dst_col[ic*ne0], src0_row, (void *) (src1_col + ic*row_size));
}
}
if (ir0 + dr >= nr) {
break;
}
}
@@ -13749,6 +13776,7 @@ struct ggml_compute_state_shared {
// synchronization primitives
atomic_int n_ready;
atomic_int aic;
atomic_bool has_work;
atomic_bool stop; // stop all threads
};
@@ -13817,6 +13845,7 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph)
/*.spin =*/ GGML_LOCK_INITIALIZER,
/*.n_threads =*/ n_threads,
/*.n_ready =*/ 0,
/*.aic =*/ 0,
/*.has_work =*/ false,
/*.stop =*/ false,
};
@@ -13837,6 +13866,7 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph)
.nth = n_threads,
.wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0,
.wdata = cgraph->work ? cgraph->work->data : NULL,
.aic = &state_shared.aic,
},
.node = NULL,
.shared = &state_shared,
@@ -14126,6 +14156,7 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph)
/*.nth =*/ node->n_tasks,
/*.wsize =*/ cgraph->work ? ggml_nbytes(cgraph->work) : 0,
/*.wdata =*/ cgraph->work ? cgraph->work->data : NULL,
/*.aic =*/ &state_shared.aic,
};
ggml_compute_forward(&params, node);
@@ -14149,6 +14180,7 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph)
.nth = node->n_tasks,
.wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0,
.wdata = cgraph->work ? cgraph->work->data : NULL,
.aic = &state_shared.aic,
};
workers[j].node = node;
}
@@ -14164,6 +14196,7 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph)
}
params.type = GGML_TASK_COMPUTE;
params.aic = &state_shared.aic;
ggml_compute_forward(&params, node);
// wait for thread pool
@@ -14204,6 +14237,7 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph)
.nth = node->n_tasks,
.wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0,
.wdata = cgraph->work ? cgraph->work->data : NULL,
.aic = &state_shared.aic,
};
workers[j].node = node;
}

View File

@@ -32,7 +32,6 @@
#include <mutex>
#include <sstream>
#include <numeric>
#include <iostream>
#define LLAMA_USE_SCRATCH
#define LLAMA_MAX_SCRATCH_BUFFERS 16
@@ -230,15 +229,6 @@ struct llama_context {
// input embedding (1-dimensional array: [n_embd])
std::vector<float> embedding;
std::vector<float> steering_vector; // [n_ctx, n_embd]
int steering_layer = 0;
int steering_mode = 0;
float steering_mul = 0.0f;
#define STEERING_OFF 0
#define STEERING_WRITE 2
#define STEERING_READ 3
// memory buffers used to evaluate the model
// TODO: move in llama_state
llama_ctx_buffer buf_compute;
@@ -279,24 +269,6 @@ struct llama_context {
}
};
void llama_set_steering_off(struct llama_context * ctx) {
ctx->steering_mode = STEERING_OFF;
}
void llama_set_steering_write(struct llama_context * ctx, int layer, float mul) {
ctx->steering_mode = STEERING_WRITE;
ctx->steering_mul = mul;
ctx->steering_layer = layer;
}
void llama_set_steering_read(struct llama_context * ctx, int layer, float mul) {
ctx->steering_mode = STEERING_READ;
ctx->steering_mul = mul;
ctx->steering_layer = layer;
//FILE* steeringbin = fopen("steering.bin", "wb");
//fwrite(ctx->steering_vector.data(), sizeof(float), ctx->steering_vector.size(), steeringbin);
//fclose(steeringbin);
}
template <typename T>
static T checked_mul(T a, T b) {
T ret = a * b;
@@ -1180,13 +1152,6 @@ static bool llama_eval_internal(
ggml_set_name(embd, "embd");
memcpy(embd->data, tokens, N*ggml_element_size(embd));
struct ggml_tensor * steer;
if (lctx.steering_mode != STEERING_OFF) {
steer = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N);
//steer->data = lctx.steering_vector.data() + n_past * n_embd * sizeof(float);
memcpy(steer->data, lctx.steering_vector.data() + n_past * n_embd, ggml_nbytes(steer));
}
struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.tok_embeddings, embd);
for (int il = 0; il < n_layer; ++il) {
@@ -1196,17 +1161,6 @@ static bool llama_eval_internal(
lctx.use_buf(ctx0, 0);
if (lctx.steering_mode != STEERING_OFF && il == lctx.steering_layer) {
struct ggml_tensor * scal = ggml_new_f32(ctx0, lctx.steering_mul);
if (lctx.steering_mode == STEERING_WRITE) {
ggml_build_forward_expand(&gf, ggml_cpy(ctx0,
ggml_add(ctx0, ggml_scale(ctx0, inpL, scal), steer), steer));
break;
}
// std::cout << "\nAdding steering vector to inpL " << il << "\n";
inpSA = ggml_add(ctx0, ggml_scale(ctx0, steer, scal), inpSA);
}
// norm
{
cur = ggml_rms_norm(ctx0, inpL);
@@ -1420,12 +1374,6 @@ static bool llama_eval_internal(
memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd);
}
if (lctx.steering_mode == STEERING_WRITE) {
memcpy(lctx.steering_vector.data() + n_past * n_embd, steer->data, ggml_nbytes(steer));
}
if (mem_per_token == 0) {
mem_per_token = ggml_used_mem(ctx0)/N;
}
@@ -2247,8 +2195,6 @@ struct llama_context * llama_init_from_file(
ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0().at(ctx->model.type));
ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1().at(ctx->model.type));
ctx->steering_vector.resize(hparams.n_ctx * hparams.n_embd);
}
return ctx;
@@ -2672,8 +2618,8 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
}
// Sets the state reading from the specified source address
size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src) {
const uint8_t * inp = src;
size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
uint8_t * inp = src;
// set rng
{

View File

@@ -138,7 +138,7 @@ extern "C" {
// Set the state reading from the specified address
// Returns the number of bytes read
LLAMA_API size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src);
LLAMA_API size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src);
// Save/load session file
LLAMA_API bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out);
@@ -190,10 +190,6 @@ extern "C" {
LLAMA_API llama_token llama_token_eos();
LLAMA_API llama_token llama_token_nl();
LLAMA_API void llama_set_steering_off(struct llama_context * ctx);
LLAMA_API void llama_set_steering_write(struct llama_context * ctx, int layer, float mul);
LLAMA_API void llama_set_steering_read(struct llama_context * ctx, int layer, float mul);
// Sampling functions
/// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.