Skip to content

Commit 95bf5f7

Browse files
committed
llama_sampling_sample with default args is more naively usable
* Batches populated by either llama_batch_get_one or llama_batch_add work with default args * Previously get_one could use the default argument * Previously add should usually have used the last index where logits[idx] == true * This hopefully encourages the use of llama_batch_add * By giving expected results when using default arguments. * Adds "negative indexing" feature to llama_get_logits_ith and llama_get_embeddings_ith * Believed to work with any currently well behaved program * Default arg now works for both cases (previously would give strange results for add case) * Any non-negative number is unaffected and behaves as previously * Negative arguments were previously invalid. * Implemented as a special case of indexing as suggested by @compilade in #6519
1 parent 855f544 commit 95bf5f7

File tree

3 files changed

+34
-12
lines changed

3 files changed

+34
-12
lines changed

common/sampling.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ llama_token llama_sampling_sample(
129129
struct llama_sampling_context * ctx_sampling,
130130
struct llama_context * ctx_main,
131131
struct llama_context * ctx_cfg,
132-
int idx = 0);
132+
int idx = -1);
133133

134134
// Prepares and adjusts the set of token candidates for sampling based on penalties, biases, and sampling parameters.
135135
llama_token_data_array llama_sampling_prepare(

llama.cpp

Lines changed: 29 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2177,7 +2177,7 @@ struct llama_context {
21772177

21782178
std::vector<int32_t> output_ids; // map batch token positions to ids of the logits and embd buffers
21792179
size_t output_size = 0; // capacity (of tokens positions) for the output buffers
2180-
int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch
2180+
int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
21812181

21822182
bool logits_all = false;
21832183

@@ -10411,6 +10411,9 @@ static int llama_decode_internal(
1041110411
n_outputs_prev += lctx.n_outputs;
1041210412
}
1041310413

10414+
// set to total number of outputs in the batch, for use in llama_get_logits_ith
10415+
lctx.n_outputs = n_outputs;
10416+
1041410417
// wait for the computation to finish (automatically done when obtaining the model output)
1041510418
//llama_synchronize(&lctx);
1041610419

@@ -15511,23 +15514,31 @@ float * llama_get_logits(struct llama_context * ctx) {
1551115514
}
1551215515

1551315516
float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) {
15517+
int32_t j = -1;
1551415518
llama_synchronize(ctx);
1551515519

1551615520
try {
1551715521
if (ctx->logits == nullptr) {
1551815522
throw std::runtime_error("no logits");
1551915523
}
15520-
if ((size_t) i >= ctx->output_ids.size()) {
15524+
15525+
if (i < 0) {
15526+
j = ctx->n_outputs + i;
15527+
if (j < 0) {
15528+
throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs));
15529+
}
15530+
} else if ((size_t) i >= ctx->output_ids.size()) {
1552115531
throw std::runtime_error(format("out of range [0, %lu)", ctx->output_ids.size()));
15532+
} else {
15533+
j = ctx->output_ids[i];
1552215534
}
15523-
const int32_t j = ctx->output_ids[i];
1552415535

1552515536
if (j < 0) {
1552615537
throw std::runtime_error(format("batch.logits[%d] != true", i));
1552715538
}
15528-
if ((size_t) j >= ctx->output_size) {
15539+
if ((size_t) j >= ctx->n_outputs) {
1552915540
// This should not happen
15530-
throw std::runtime_error(format("corrupt output buffer (j=%d, output_size=%lu)", j, ctx->output_size));
15541+
throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%lu)", j, ctx->n_outputs));
1553115542
}
1553215543

1553315544
return ctx->logits + j*ctx->model.hparams.n_vocab;
@@ -15547,23 +15558,32 @@ float * llama_get_embeddings(struct llama_context * ctx) {
1554715558
}
1554815559

1554915560
float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i) {
15561+
int32_t j = -1;
15562+
1555015563
llama_synchronize(ctx);
1555115564

1555215565
try {
1555315566
if (ctx->embd == nullptr) {
1555415567
throw std::runtime_error("no embeddings");
1555515568
}
15556-
if ((size_t) i >= ctx->output_ids.size()) {
15569+
15570+
if (i < 0) {
15571+
j = ctx->n_outputs + i;
15572+
if (j < 0) {
15573+
throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs));
15574+
}
15575+
} else if ((size_t) i >= ctx->output_ids.size()) {
1555715576
throw std::runtime_error(format("out of range [0, %lu)", ctx->output_ids.size()));
15577+
} else {
15578+
j = ctx->output_ids[i];
1555815579
}
15559-
const int32_t j = ctx->output_ids[i];
1556015580

1556115581
if (j < 0) {
1556215582
throw std::runtime_error(format("batch.logits[%d] != true", i));
1556315583
}
15564-
if ((size_t) j >= ctx->output_size) {
15584+
if ((size_t) j >= ctx->n_outputs) {
1556515585
// This should not happen
15566-
throw std::runtime_error(format("corrupt output buffer (j=%d, output_size=%lu)", j, ctx->output_size));
15586+
throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%lu)", j, ctx->n_outputs));
1556715587
}
1556815588

1556915589
return ctx->embd + j*ctx->model.hparams.n_embd;

llama.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -684,8 +684,9 @@ extern "C" {
684684
// Cols: n_vocab
685685
LLAMA_API float * llama_get_logits(struct llama_context * ctx);
686686

687-
// Logits for the ith token. Equivalent to:
687+
// Logits for the ith token. For positive indices, Equivalent to:
688688
// llama_get_logits(ctx) + ctx->output_ids[i]*n_vocab
689+
// Negative indicies can be used to access logits in reverse order, -1 is the last logit.
689690
// returns NULL for invalid ids.
690691
LLAMA_API float * llama_get_logits_ith(struct llama_context * ctx, int32_t i);
691692

@@ -697,8 +698,9 @@ extern "C" {
697698
// Otherwise, returns NULL.
698699
LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
699700

700-
// Get the embeddings for the ith token. Equivalent to:
701+
// Get the embeddings for the ith token. For positive indices, Equivalent to:
701702
// llama_get_embeddings(ctx) + ctx->output_ids[i]*n_embd
703+
// Negative indicies can be used to access embeddings in reverse order, -1 is the last embedding.
702704
// shape: [n_embd] (1-dimensional)
703705
// returns NULL for invalid ids.
704706
LLAMA_API float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i);

0 commit comments

Comments
 (0)