// Check model supports embeddings if (embedding::has_embeddings(model)) { int32_t dim = embedding::dimension(model);
// Check model supports embeddings if (embedding::has_embeddings(model)) { int32_t dim = embedding::dimension(model); // Decode tokens with pooling enabled decoder::decode_tokens(ctx, tokens, 0, 512);
// Extract normalized embeddings auto vec = embedding::get(ctx, embedding::Normalize::L2); }
#pragma once
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <llama/llama.h>
#include <stdexcept>
#include <vector>
};
if (!model) {
return false;
}
int32_t n_embd = llama_model_n_embd(model);
return n_embd > 0;
}
inline int32_t
dimension(
const llama_model *model) {
if (!model) {
return 0;
}
return llama_model_n_embd(model);
}
if (!ctx) {
return false;
}
return llama_pooling_type(ctx) != LLAMA_POOLING_TYPE_NONE;
}
if (!ctx) {
return LLAMA_POOLING_TYPE_NONE;
}
return llama_pooling_type(ctx);
}
namespace detail {
if (vec.empty())
return;
float norm_sq = 0.0f;
for (float v : vec) {
norm_sq += v * v;
}
float norm = std::sqrt(norm_sq);
if (norm > 1e-8f) {
for (float &v : vec) {
v /= norm;
}
} else {
"[embedding::detail::apply_l2_normalize] WARNING: near-zero norm");
}
}
}
namespace detail {
struct BatchGuard {
explicit BatchGuard(llama_batch &b) :
batch(b) {}
};
}
inline void encode(llama_context *ctx,
const llama_token *tokens,
int32_t n_tokens, int32_t n_batch) {
n_tokens);
if (!ctx) {
throw std::runtime_error("embedding::encode - NULL context");
}
if (!tokens || n_tokens <= 0) {
throw std::runtime_error("embedding::encode - Invalid token array");
}
if (n_tokens > n_batch) {
n_tokens, n_batch);
throw std::runtime_error(
"embedding::encode - token count exceeds batch size (truncation not "
"supported, increase n_batch or reduce input length)");
}
llama_batch batch = llama_batch_init(n_batch, 0, 1);
detail::BatchGuard batch_guard(batch);
for (int32_t i = 0; i < n_tokens; ++i) {
}
if (llama_decode(ctx, batch) != 0) {
throw std::runtime_error("embedding::encode - llama_decode failed");
}
}
inline void encode(llama_context *ctx,
const std::vector<llama_token> &tokens,
int32_t n_batch) {
encode(ctx, tokens.data(),
static_cast<int32_t
>(tokens.size()), n_batch);
}
inline std::vector<float>
get(llama_context *ctx,
if (!ctx) {
throw std::invalid_argument("embedding::get: ctx is null");
}
const llama_model *model = llama_get_model(ctx);
if (!model) {
throw std::runtime_error("embedding::get: failed to get model");
}
"[embedding::get] WARNING: pooling not enabled, embeddings may be "
"invalid. Create context with pooling_type != NONE");
}
const float *embd_ptr = nullptr;
embd_ptr = llama_get_embeddings_seq(ctx, 0);
"embeddings");
} else {
embd_ptr = llama_get_embeddings(ctx);
}
if (!embd_ptr) {
"Ensure context was created with embeddings=true and "
"tokens were encoded with logits=true for all tokens.");
throw std::runtime_error(
"embedding::get: embeddings unavailable (ensure embeddings=true in "
"context params and use encode_for_embeddings())");
}
int32_t n_embd = llama_model_n_embd(model);
std::vector<float> embeddings(embd_ptr, embd_ptr + n_embd);
}
LLOYAL_LOG_DEBUG(
"[embedding::get] Extracted embeddings (dim=%d, normalize=%d)",
n_embd, static_cast<int>(normalize));
return embeddings;
}
inline std::vector<float>
get_seq(llama_context *ctx, llama_seq_id seq,
if (!ctx) {
throw std::invalid_argument("embedding::get_seq: ctx is null");
}
const llama_model *model = llama_get_model(ctx);
if (!model) {
throw std::runtime_error("embedding::get_seq: failed to get model");
}
}
const float *embd_ptr = llama_get_embeddings_seq(ctx, seq);
if (!embd_ptr) {
if (seq == 0) {
return get(ctx, normalize);
}
"seq=%d",
seq);
throw std::runtime_error("embedding::get_seq: embeddings unavailable");
}
int32_t n_embd = llama_model_n_embd(model);
std::vector<float> embeddings(embd_ptr, embd_ptr + n_embd);
}
"(dim=%d)",
seq, n_embd);
return embeddings;
}
inline std::vector<float>
get_ith(llama_context *ctx, int32_t idx,
if (!ctx) {
throw std::invalid_argument("embedding::get_ith: ctx is null");
}
const llama_model *model = llama_get_model(ctx);
if (!model) {
throw std::runtime_error("embedding::get_ith: failed to get model");
}
const float *embd_ptr = llama_get_embeddings_ith(ctx, idx);
if (!embd_ptr) {
"idx=%d",
idx);
throw std::runtime_error("embedding::get_ith: embeddings unavailable");
}
int32_t n_embd = llama_model_n_embd(model);
std::vector<float> embeddings(embd_ptr, embd_ptr + n_embd);
}
"(dim=%d)",
idx, n_embd);
return embeddings;
}
const std::vector<float> &b) {
if (a.size() != b.size()) {
"(%zu vs %zu)",
a.size(), b.size());
throw std::invalid_argument(
"embedding::cosine_similarity: dimension mismatch");
}
if (a.empty()) {
return 0.0f;
}
float dot = 0.0f;
for (size_t i = 0; i < a.size(); ++i) {
dot += a[i] * b[i];
}
return dot;
}
}
#define LLOYAL_LOG_DEBUG(...)
liblloyal - Common definitions and logging
void apply_l2_normalize(std::vector< float > &vec)
Apply L2 normalization to embedding vector (in-place)
std::vector< float > get_seq(llama_context *ctx, llama_seq_id seq, Normalize normalize=Normalize::L2)
Get embeddings for specific sequence.
float cosine_similarity(const std::vector< float > &a, const std::vector< float > &b)
Compute cosine similarity between two embedding vectors.
void encode(llama_context *ctx, const llama_token *tokens, int32_t n_tokens, int32_t n_batch)
Encode tokens for embedding extraction.
std::vector< float > get(llama_context *ctx, Normalize normalize=Normalize::L2)
Get embeddings for last decoded batch.
Normalize
Normalization modes for embedding vectors.
int32_t pooling_type(llama_context *ctx)
Get pooling type for context.
std::vector< float > get_ith(llama_context *ctx, int32_t idx, Normalize normalize=Normalize::L2)
Get embeddings for specific token index in last batch.
bool has_embeddings(const llama_model *model)
Check if model supports embeddings.
bool has_pooling(llama_context *ctx)
Check if context has pooling enabled.
int32_t dimension(const llama_model *model)
Get embedding dimension for model.
void batch_clear(llama_batch &batch)
Clear batch to empty state.
void batch_add(llama_batch &batch, llama_token id, int32_t pos, const std::vector< llama_seq_id > &seq_ids, bool logits, int32_t capacity=-1)
Add single token to batch with position and sequence info.