Skip to content

Commit 61733d3

Browse files
committed
all : prefer float over double where appropriate
1 parent f68345e commit 61733d3

File tree

7 files changed

+69
-65
lines changed

7 files changed

+69
-65
lines changed

Makefile

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,12 +31,14 @@ endif
3131
#
3232

3333
# keep standard at C11 and C++11
34-
CFLAGS = -I. -O3 -DNDEBUG -std=c11 -fPIC \
35-
-Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith
36-
CXXFLAGS = -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC \
37-
-Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion
34+
CFLAGS = -I. -O3 -DNDEBUG -std=c11 -fPIC
35+
CXXFLAGS = -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC
3836
LDFLAGS =
3937

38+
# warnings
39+
CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith -Wno-unused-function
40+
CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
41+
4042
# OS specific
4143
# TODO: support Windows
4244
ifeq ($(UNAME_S),Linux)

examples/embedding/embedding.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ int main(int argc, char ** argv) {
8989
const auto embeddings = llama_get_embeddings(ctx);
9090

9191
for (int i = 0; i < n_embd; i++) {
92-
printf("%f ", (double)embeddings[i]);
92+
printf("%f ", embeddings[i]);
9393
}
9494
printf("\n");
9595
}

examples/main/main.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@ int main(int argc, char ** argv) {
210210
}
211211
}
212212
fprintf(stderr, "sampling: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n",
213-
(double)params.temp, params.top_k, (double)params.top_p, params.repeat_last_n, (double)params.repeat_penalty);
213+
params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty);
214214
fprintf(stderr, "generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
215215
fprintf(stderr, "\n\n");
216216

@@ -275,10 +275,10 @@ int main(int argc, char ** argv) {
275275

276276
if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
277277
// out of user input, sample next token
278-
const int top_k = params.top_k;
279-
const double top_p = (double)params.top_p;
280-
const double temp = (double)params.temp;
281-
const double repeat_penalty = (double)params.repeat_penalty;
278+
const int32_t top_k = params.top_k;
279+
const float top_p = params.top_p;
280+
const float temp = params.temp;
281+
const float repeat_penalty = params.repeat_penalty;
282282

283283
llama_token id = 0;
284284

examples/perplexity/perplexity.cpp

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,15 @@
11
#include "common.h"
22
#include "llama.h"
33

4-
std::vector<double> softmax(const std::vector<float>& logits) {
5-
std::vector<double> probs(logits.size());
4+
std::vector<float> softmax(const std::vector<float>& logits) {
5+
std::vector<float> probs(logits.size());
66
float max_logit = logits[0];
77
for (float v : logits) max_logit = std::max(max_logit, v);
88
double sum_exp = 0.0;
99
for (size_t i = 0; i < logits.size(); i++) {
1010
// Subtract the maximum logit value from the current logit value for numerical stability
11-
float logit = logits[i] - max_logit;
12-
double exp_logit = std::exp((double)logit);
11+
const float logit = logits[i] - max_logit;
12+
const float exp_logit = std::expf(logit);
1313
sum_exp += exp_logit;
1414
probs[i] = exp_logit;
1515
}
@@ -24,14 +24,16 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
2424
auto tokens = ::llama_tokenize(ctx, params.prompt, true);
2525

2626
int count = 0;
27-
double nll = 0.0;
2827
int seq_count = tokens.size() / params.n_ctx;
2928

29+
double nll = 0.0;
30+
3031
fprintf(stderr, "%s : calculating perplexity over %d chunks\n", __func__, seq_count);
3132

3233
for (int i = 0; i < seq_count; ++i) {
3334
int start = i * params.n_ctx;
34-
int end = start + params.n_ctx - 1;
35+
int end = start + params.n_ctx - 1; // TODO: this is not optimal, e.g. it makes the batch 511 instead of 512
36+
// it is better to always be power of 2 for better performance
3537
std::vector<llama_token> embd(tokens.begin() + start, tokens.begin() + end);
3638
auto start_t = std::chrono::high_resolution_clock::now();
3739
if (llama_eval(ctx, embd.data(), embd.size(), 0, params.n_threads)) {
@@ -40,7 +42,7 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
4042
}
4143
auto end_t = std::chrono::high_resolution_clock::now();
4244
if (i == 0) {
43-
double seconds = std::chrono::duration<double>(end_t - start_t).count();
45+
const float seconds = std::chrono::duration<float>(end_t - start_t).count();
4446
printf("%.2f seconds per pass - ETA %.2f hours\n", seconds, (seconds * seq_count) / (60.0*60.0));
4547
}
4648
// We get the logits for all the tokens in the context window (params.n_ctx)
@@ -63,7 +65,7 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
6365
std::vector<float> tok_logits(
6466
logits + j * n_vocab,
6567
logits + (j + 1) * n_vocab);
66-
double prob = softmax(tok_logits)[tokens[start + j + 1]];
68+
const float prob = softmax(tok_logits)[tokens[start + j + 1]];
6769
nll += -std::log(prob);
6870
++count;
6971
}

ggml.c

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -150,10 +150,10 @@ typedef double ggml_float;
150150
//
151151
#include <arm_neon.h>
152152

153-
#define GGML_COMPUTE_FP16_TO_FP32(x) (x)
153+
#define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x))
154154
#define GGML_COMPUTE_FP32_TO_FP16(x) (x)
155155

156-
#define GGML_FP16_TO_FP32(x) (x)
156+
#define GGML_FP16_TO_FP32(x) ((float) (x))
157157
#define GGML_FP32_TO_FP16(x) (x)
158158

159159
#else
@@ -322,7 +322,7 @@ inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
322322
// note: do not use these inside ggml.c
323323
// these are meant to be used via the ggml.h API
324324
float ggml_fp16_to_fp32(ggml_fp16_t x) {
325-
return GGML_FP16_TO_FP32(x);
325+
return (float) GGML_FP16_TO_FP32(x);
326326
}
327327

328328
ggml_fp16_t ggml_fp32_to_fp16(float x) {
@@ -566,7 +566,7 @@ static void quantize_row_q4_0(const float * restrict x, void * restrict vy, int
566566
MAX(vgetq_lane_f32(amaxv[0], 2), vgetq_lane_f32(amaxv[0], 3)));
567567

568568
const float d = amax / ((1 << 3) - 1);
569-
const float id = d ? 1.0/d : 0.0;
569+
const float id = d ? 1.0f/d : 0.0f;
570570

571571
y[i].d = d;
572572

@@ -1001,7 +1001,7 @@ static void dequantize_row_q4_1(const void * restrict vx, float * restrict y, in
10011001
} \
10021002
const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \
10031003
const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \
1004-
res = vaddvq_f32(vaddq_f32(t0, t1)); \
1004+
res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
10051005
}
10061006

10071007
#define GGML_F16_VEC GGML_F16x8
@@ -1505,7 +1505,7 @@ static inline __m512 dot_q4_0_oneblock_avx512(
15051505
#endif
15061506

15071507
inline static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) {
1508-
float sumf = 0.0f;
1508+
ggml_float sumf = 0.0;
15091509

15101510
#if defined(GGML_SIMD)
15111511
const int np = (n & ~(GGML_F16_STEP - 1));
@@ -1529,11 +1529,11 @@ inline static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t
15291529

15301530
// leftovers
15311531
for (int i = np; i < n; ++i) {
1532-
sumf += GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]);
1532+
sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
15331533
}
15341534
#else
15351535
for (int i = 0; i < n; ++i) {
1536-
sumf += GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]);
1536+
sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
15371537
}
15381538
#endif
15391539

@@ -1549,7 +1549,7 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void
15491549
const block_q4_0 * restrict x = vx;
15501550
const block_q4_0 * restrict y = vy;
15511551

1552-
float sumf = 0.0;
1552+
ggml_float sumf = 0.0;
15531553

15541554
#if defined(__ARM_NEON)
15551555
float sum0 = 0.0f;
@@ -1644,7 +1644,7 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void
16441644
#endif
16451645
}
16461646

1647-
sumf = sum0 + sum1;
1647+
sumf = (ggml_float)(sum0 + sum1);
16481648
#elif defined(__AVX512F__)
16491649
// Initialize accumulator with zeros
16501650
__m512 acc0 = _mm512_setzero_ps();
@@ -1936,7 +1936,7 @@ inline static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void
19361936
// compute GGML_VEC_DOT_UNROLL dot products at once
19371937
// xs - x row stride in bytes
19381938
inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) {
1939-
float sumf[GGML_VEC_DOT_UNROLL] = { 0.0f };
1939+
ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 };
19401940

19411941
ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL];
19421942

@@ -1972,13 +1972,13 @@ inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * re
19721972
// leftovers
19731973
for (int i = np; i < n; ++i) {
19741974
for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
1975-
sumf[j] += GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]);
1975+
sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
19761976
}
19771977
}
19781978
#else
19791979
for (int i = 0; i < n; ++i) {
19801980
for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
1981-
sumf[j] += GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]);
1981+
sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
19821982
}
19831983
}
19841984
#endif
@@ -6998,16 +6998,16 @@ static void ggml_compute_forward_rope_f32(
69986998
const int p = (mode == 0 ? n_past + i2 : i2);
69996999
for (int i1 = 0; i1 < ne1; i1++) {
70007000
for (int i0 = 0; i0 < n_dims; i0 += 2) {
7001-
const double theta = pow(10000.0, ((double)-i0)/n_dims);
7001+
const float theta = powf(10000.0, ((float)-i0)/n_dims);
70027002

7003-
const double cos_theta = cos(p*theta);
7004-
const double sin_theta = sin(p*theta);
7003+
const float cos_theta = cosf(p*theta);
7004+
const float sin_theta = sinf(p*theta);
70057005

70067006
const float * const src = (float *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
70077007
float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
70087008

7009-
double x0 = (double)src[0];
7010-
double x1 = (double)src[1];
7009+
const float x0 = src[0];
7010+
const float x1 = src[1];
70117011

70127012
dst_data[0] = x0*cos_theta - x1*sin_theta;
70137013
dst_data[1] = x0*sin_theta + x1*cos_theta;
@@ -7054,16 +7054,16 @@ static void ggml_compute_forward_rope_f16(
70547054
const int p = (mode == 0 ? n_past + i2 : i2);
70557055
for (int i1 = 0; i1 < ne1; i1++) {
70567056
for (int i0 = 0; i0 < n_dims; i0 += 2) {
7057-
const double theta = pow(10000.0, ((double)-i0)/n_dims);
7057+
const float theta = powf(10000.0, ((float)-i0)/n_dims);
70587058

7059-
const float cos_theta = cos(p*theta);
7060-
const float sin_theta = sin(p*theta);
7059+
const float cos_theta = cosf(p*theta);
7060+
const float sin_theta = sinf(p*theta);
70617061

70627062
const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
70637063
ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
70647064

7065-
float x0 = ggml_fp16_to_fp32(src[0]);
7066-
float x1 = ggml_fp16_to_fp32(src[1]);
7065+
const float x0 = ggml_fp16_to_fp32(src[0]);
7066+
const float x1 = ggml_fp16_to_fp32(src[1]);
70677067

70687068
dst_data[0] = ggml_fp32_to_fp16(x0*cos_theta - x1*sin_theta);
70697069
dst_data[1] = ggml_fp32_to_fp16(x0*sin_theta + x1*cos_theta);

llama.cpp

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -779,8 +779,8 @@ static bool llama_model_load(
779779

780780
// progress
781781
if (progress_callback) {
782-
double current_file_progress = double(size_t(fin.tellg()) - file_offset) / double(file_size - file_offset);
783-
double current_progress = (double(i) + current_file_progress) / double(n_parts);
782+
float current_file_progress = float(size_t(fin.tellg()) - file_offset) / float(file_size - file_offset);
783+
float current_progress = (float(i) + current_file_progress) / float(n_parts);
784784
progress_callback(current_progress, progress_callback_user_data);
785785
}
786786
if (model.n_loaded % 8 == 0) {
@@ -1240,12 +1240,12 @@ static std::vector<llama_vocab::id> llama_tokenize(const llama_vocab & vocab, co
12401240
// sampling
12411241
//
12421242

1243-
static void sample_top_k(std::vector<std::pair<double, llama_vocab::id>> & logits_id, int top_k) {
1243+
static void sample_top_k(std::vector<std::pair<float, llama_vocab::id>> & logits_id, int top_k) {
12441244
// find the top k tokens
12451245
std::partial_sort(
12461246
logits_id.begin(),
12471247
logits_id.begin() + top_k, logits_id.end(),
1248-
[](const std::pair<double, llama_vocab::id> & a, const std::pair<double, llama_vocab::id> & b) {
1248+
[](const std::pair<float, llama_vocab::id> & a, const std::pair<float, llama_vocab::id> & b) {
12491249
return a.first > b.first;
12501250
});
12511251

@@ -1256,51 +1256,51 @@ static llama_vocab::id llama_sample_top_p_top_k(
12561256
llama_context & lctx,
12571257
const std::vector<llama_vocab::id> & last_n_tokens,
12581258
int top_k,
1259-
double top_p,
1260-
double temp,
1261-
double repeat_penalty) {
1259+
float top_p,
1260+
float temp,
1261+
float repeat_penalty) {
12621262
auto & rng = lctx.rng;
12631263

12641264
const int n_logits = lctx.model.hparams.n_vocab;
12651265

12661266
const auto & logits = lctx.logits;
12671267
const auto * plogits = logits.data() + logits.size() - n_logits;
12681268

1269-
std::vector<std::pair<double, llama_vocab::id>> logits_id;
1269+
std::vector<std::pair<float, llama_vocab::id>> logits_id;
12701270
logits_id.reserve(n_logits);
12711271

12721272
{
1273-
const double scale = 1.0/temp;
1273+
const float scale = 1.0f/temp;
12741274
for (int i = 0; i < n_logits; ++i) {
12751275
// repetition penalty from ctrl paper (https://arxiv.org/abs/1909.05858)
12761276
// credit https://github.com/facebookresearch/llama/compare/main...shawwn:llama:main
12771277
if (std::find(last_n_tokens.begin(), last_n_tokens.end(), i) != last_n_tokens.end()) {
12781278
// if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
12791279
if (plogits[i] < 0.0f) {
1280-
logits_id.push_back(std::make_pair((double)plogits[i]*scale*repeat_penalty, i));
1280+
logits_id.push_back(std::make_pair(plogits[i]*scale*repeat_penalty, i));
12811281
} else {
1282-
logits_id.push_back(std::make_pair((double)plogits[i]*scale/repeat_penalty, i));
1282+
logits_id.push_back(std::make_pair(plogits[i]*scale/repeat_penalty, i));
12831283
}
12841284
} else {
1285-
logits_id.push_back(std::make_pair((double)plogits[i]*scale, i));
1285+
logits_id.push_back(std::make_pair(plogits[i]*scale, i));
12861286
}
12871287
}
12881288
}
12891289

12901290
sample_top_k(logits_id, top_k);
12911291

1292-
double maxl = -std::numeric_limits<double>::infinity();
1292+
float maxl = -std::numeric_limits<float>::infinity();
12931293
for (const auto & kv : logits_id) {
12941294
maxl = std::max(maxl, kv.first);
12951295
}
12961296

12971297
// compute probs for the top k tokens
1298-
std::vector<double> probs;
1298+
std::vector<float> probs;
12991299
probs.reserve(logits_id.size());
13001300

13011301
double sum = 0.0;
13021302
for (const auto & kv : logits_id) {
1303-
double p = exp(kv.first - maxl);
1303+
const float p = expf(kv.first - maxl);
13041304
probs.push_back(p);
13051305
sum += p;
13061306
}
@@ -1590,7 +1590,7 @@ static bool llama_model_quantize_internal(const std::string & fname_inp, const s
15901590
}
15911591

15921592
for (int i = 0; i < (int) hist_cur.size(); ++i) {
1593-
printf("%5.3f ", hist_cur[i] / (double)nelements);
1593+
printf("%5.3f ", hist_cur[i] / float(nelements));
15941594
}
15951595
printf("\n");
15961596
} else {
@@ -1613,7 +1613,7 @@ static bool llama_model_quantize_internal(const std::string & fname_inp, const s
16131613

16141614
printf("%s: hist: ", __func__);
16151615
for (int i = 0; i < (int) hist_all.size(); ++i) {
1616-
printf("%5.3f ", hist_all[i] / (double)sum_all);
1616+
printf("%5.3f ", hist_all[i] / float(sum_all));
16171617
}
16181618
printf("\n");
16191619
}
@@ -1795,9 +1795,9 @@ llama_token llama_sample_top_p_top_k(
17951795
const llama_token * last_n_tokens_data,
17961796
int last_n_tokens_size,
17971797
int top_k,
1798-
double top_p,
1799-
double temp,
1800-
double repeat_penalty) {
1798+
float top_p,
1799+
float temp,
1800+
float repeat_penalty) {
18011801
const int64_t t_start_sample_us = ggml_time_us();
18021802

18031803
llama_token result = 0;

0 commit comments

Comments
 (0)