32#include <llama/llama.h>
44inline ggml_type
from_str(
const std::string& s) {
45 if (s ==
"f32")
return GGML_TYPE_F32;
46 if (s ==
"f16")
return GGML_TYPE_F16;
47 if (s ==
"bf16")
return GGML_TYPE_BF16;
48 if (s ==
"q8_0")
return GGML_TYPE_Q8_0;
49 if (s ==
"q4_0")
return GGML_TYPE_Q4_0;
50 if (s ==
"q4_1")
return GGML_TYPE_Q4_1;
51 if (s ==
"iq4_nl")
return GGML_TYPE_IQ4_NL;
52 if (s ==
"q5_0")
return GGML_TYPE_Q5_0;
53 if (s ==
"q5_1")
return GGML_TYPE_Q5_1;
54 return GGML_TYPE_COUNT;
77inline bool remove_range(llama_context *ctx, llama_seq_id seq, llama_pos p0,
84 llama_memory_t mem = llama_get_memory(ctx);
85 bool success = llama_memory_seq_rm(mem, seq, p0, p1);
91 "remove_range called BEFORE next llama_decode()");
110inline llama_pos
pos_max(llama_context *ctx, llama_seq_id seq) {
116 llama_memory_t mem = llama_get_memory(ctx);
117 llama_pos max_pos = llama_memory_seq_pos_max(mem, seq);
137inline void seq_cp(llama_context *ctx, llama_seq_id src, llama_seq_id dst,
138 llama_pos p0 = 0, llama_pos p1 = -1) {
144 llama_memory_t mem = llama_get_memory(ctx);
145 llama_memory_seq_cp(mem, src, dst, p0, p1);
147 LLOYAL_LOG_DEBUG(
"[kv::seq_cp] Copied seq %d → %d [%d, %d)", src, dst, p0, p1);
161inline void seq_keep(llama_context *ctx, llama_seq_id seq) {
167 llama_memory_t mem = llama_get_memory(ctx);
168 llama_memory_seq_keep(mem, seq);
197static_assert(std::is_signed_v<llama_seq_id>,
198 "llama_seq_id must be signed for NO_LEASE sentinel");
206constexpr llama_seq_id
NO_LEASE =
static_cast<llama_seq_id
>(-1);
217 llama_context*
ctx =
nullptr;
233inline State init(llama_context* ctx, llama_seq_id n_seq_max) {
237 s.
leased.resize(
static_cast<size_t>(n_seq_max), 0);
238 s.
vacant.reserve(
static_cast<size_t>(n_seq_max));
239 for (llama_seq_id i = n_seq_max; i-- > 0; ) {
257 llama_seq_id seq = s.
vacant.back();
259 s.
leased[
static_cast<size_t>(seq)] = 1;
275 assert(seq >= 0 && seq < s.
n_seq_max &&
"release: seq out of range");
276 assert(s.
leased[
static_cast<size_t>(seq)] &&
"release: seq not leased");
277 s.
leased[
static_cast<size_t>(seq)] = 0;
297 assert(seq >= 0 && seq < s.
n_seq_max &&
"evict: seq out of range");
298 assert(s.
leased[
static_cast<size_t>(seq)] &&
"evict: seq not leased");
320 assert(keep >= 0 && keep < s.
n_seq_max &&
"retain: keep seq out of range");
321 assert(s.
leased[
static_cast<size_t>(keep)] &&
"retain: keep seq not leased");
324 for (llama_seq_id i = 0; i < s.
n_seq_max; ++i) {
325 if (i != keep) assert(
pos_max(s.
ctx, i) < 0 &&
"retain: seq_keep left dirty tags");
330 for (llama_seq_id i = s.
n_seq_max; i-- > 0; ) {
332 s.
leased[
static_cast<size_t>(i)] = 1;
334 s.
leased[
static_cast<size_t>(i)] = 0;
351 for (llama_seq_id i = 0; i < s.
n_seq_max; ++i) {
352 if (s.
leased[
static_cast<size_t>(i)]) {
387inline size_t state_size(llama_context *ctx, llama_seq_id seq) {
393 llama_memory_t mem = llama_get_memory(ctx);
394 llama_pos max_pos = llama_memory_seq_pos_max(mem, seq);
396 LLOYAL_LOG_DEBUG(
"[kv::state_size] WARNING: KV cache is empty (max_pos=%d) "
402 size_t size = llama_state_seq_get_size(ctx, seq);
406 "[kv::state_size] Per-sequence size query failed for seq=%d", seq);
408 "[kv::state_size] Attempting global state size (fallback)");
409 size = llama_state_get_size(ctx);
416 "size queries failed");
420 "[kv::state_size] Per-sequence size for seq=%d: %zu bytes (%.1f MB)",
421 seq, size, size / 1024.0 / 1024.0);
442inline size_t state_save(llama_context *ctx, llama_seq_id seq, uint8_t *dst,
444 if (!ctx || !dst || size == 0) {
446 "[kv::state_save] ERROR: invalid parameters (ctx=%p, dst=%p, size=%zu)",
451 llama_memory_t mem = llama_get_memory(ctx);
452 llama_pos max_pos = llama_memory_seq_pos_max(mem, seq);
454 LLOYAL_LOG_DEBUG(
"[kv::state_save] WARNING: KV cache is empty (max_pos=%d) "
460 size_t written = llama_state_seq_get_data(ctx, dst, size, seq);
464 "(possible KV fragmentation)",
467 "[kv::state_save] Attempting global state save (fallback)");
468 written = llama_state_get_data(ctx, dst, size);
472 "[kv::state_save] Global fallback succeeded: %zu bytes (%.1f MB)",
473 written, written / 1024.0 / 1024.0);
476 "[kv::state_save] ERROR: Both per-sequence and global save failed");
480 "[kv::state_save] Per-sequence saved %zu bytes (%.1f MB) for seq=%d",
481 written, written / 1024.0 / 1024.0, seq);
503inline size_t state_load(llama_context *ctx, llama_seq_id seq,
504 const uint8_t *src,
size_t size) {
505 if (!ctx || !src || size == 0) {
507 "[kv::state_load] ERROR: invalid parameters (ctx=%p, src=%p, size=%zu)",
512 llama_memory_t mem = llama_get_memory(ctx);
513 llama_pos max_pos = llama_memory_seq_pos_max(mem, seq);
515 LLOYAL_LOG_DEBUG(
"[kv::state_load] WARNING: KV cache is empty (max_pos=%d) "
516 "- loading may crash on recurrent models",
520 size_t read = llama_state_seq_set_data(ctx, src, size, seq);
524 "(possible fragmentation)",
527 "[kv::state_load] Attempting global state restore (fallback)");
528 read = llama_state_set_data(ctx, src, size);
532 "[kv::state_load] Global fallback succeeded: %zu bytes (%.1f MB)",
533 read, read / 1024.0 / 1024.0);
540 "[kv::state_load] Per-sequence loaded %zu bytes (%.1f MB) for seq=%d",
541 read, read / 1024.0 / 1024.0, seq);
564 size_t size = llama_state_get_size(ctx);
566 size / 1024.0 / 1024.0);
581 if (!ctx || !dst || size == 0) {
586 size_t written = llama_state_get_data(ctx, dst, size);
588 written, written / 1024.0 / 1024.0);
604 if (!ctx || !src || size == 0) {
609 size_t read = llama_state_set_data(ctx, src, size);
611 read / 1024.0 / 1024.0);
629 "[kv::build_info] ============================================");
631 "[kv::build_info] llama.cpp KV Sequence Operations Configuration");
633 "[kv::build_info] ============================================");
637 "[kv::build_info] Current MVP: n_seq_max=1 (single sequence only)");
640 llama_pos max_pos =
pos_max(ctx, 0);
649 if (snapshot_size > 0) {
651 "[kv::build_info] Estimated snapshot size: %zu bytes (%.1f MB)",
652 snapshot_size, snapshot_size / 1024.0 / 1024.0);
657 "[kv::build_info] Fragmentation fallback: per-sequence → global state");
659 "[kv::build_info] Critical: Call remove_range() BEFORE llama_decode()");
661 "[kv::build_info] ============================================");
685 throw std::runtime_error(
"kv::clear_all - NULL context");
689 llama_memory_clear(llama_get_memory(ctx),
true);
710 throw std::runtime_error(
"kv::clear_metadata - NULL context");
714 llama_memory_clear(llama_get_memory(ctx),
false);
753 const std::vector<llama_token> &original_sinks,
754 const std::vector<llama_token> &tail,
758 throw std::runtime_error(
"kv::clear_and_reseed - NULL context");
761 if (original_sinks.empty() && tail.empty()) {
762 LLOYAL_LOG_DEBUG(
"[kv::clear_and_reseed] ERROR: both sinks and tail are empty");
763 throw std::runtime_error(
"kv::clear_and_reseed - no tokens to reseed");
766 LLOYAL_LOG_DEBUG(
"[kv::clear_and_reseed] Starting reseed: %zu sinks + %zu tail = %zu total",
767 original_sinks.size(), tail.size(), original_sinks.size() + tail.size());
770 llama_memory_t mem = llama_get_memory(ctx);
773 llama_pos max_pos_before = llama_memory_seq_pos_max(mem, 0);
774 LLOYAL_LOG_DEBUG(
"[kv::clear_and_reseed] Before clear: KV cache max_pos=%d", max_pos_before);
777 llama_memory_clear(mem,
true);
779 llama_pos max_pos_after_clear = llama_memory_seq_pos_max(mem, 0);
780 if (max_pos_after_clear != -1) {
781 LLOYAL_LOG_DEBUG(
"[kv::clear_and_reseed] WARNING: KV cache not empty after clear (max_pos=%d)",
782 max_pos_after_clear);
786 if (!original_sinks.empty()) {
787 LLOYAL_LOG_DEBUG(
"[kv::clear_and_reseed] Re-decoding %zu sinks at position 0", original_sinks.size());
789 throw std::runtime_error(
"kv::clear_and_reseed - llama_decode failed on sinks");
795 int32_t tail_start_pos =
static_cast<int32_t
>(original_sinks.size());
796 LLOYAL_LOG_DEBUG(
"[kv::clear_and_reseed] Re-decoding %zu tail tokens at position %d",
797 tail.size(), tail_start_pos);
799 throw std::runtime_error(
"kv::clear_and_reseed - llama_decode failed on tail");
804 llama_pos max_pos_after = llama_memory_seq_pos_max(mem, 0);
805 int32_t expected_pos =
static_cast<int32_t
>(original_sinks.size() + tail.size()) - 1;
807 LLOYAL_LOG_DEBUG(
"[kv::clear_and_reseed] After reseed: KV cache max_pos=%d (expected %d)",
808 max_pos_after, expected_pos);
810 if (max_pos_after != expected_pos) {
811 LLOYAL_LOG_DEBUG(
"[kv::clear_and_reseed] WARNING: Unexpected final position (got %d, expected %d)",
812 max_pos_after, expected_pos);
851inline size_t write_file(llama_context *ctx, llama_seq_id seq,
852 const std::string &filepath,
853 const std::vector<llama_token> &tokens) {
859 if (filepath.empty()) {
865 llama_memory_t mem = llama_get_memory(ctx);
866 llama_pos max_pos = llama_memory_seq_pos_max(mem, seq);
869 "[kv::write_file] WARNING: KV cache is empty - skipping write");
875 size_t bytes = llama_state_seq_save_file(ctx, filepath.c_str(), seq,
876 tokens.data(), tokens.size());
881 filepath.c_str(), bytes, bytes / 1024.0 / 1024.0,
911 const std::string &filepath) {
913 throw std::runtime_error(
"[kv::read_file] null context");
916 if (filepath.empty()) {
917 throw std::runtime_error(
"[kv::read_file] empty filepath");
921 const uint32_t n_ctx = llama_n_ctx(ctx);
923 std::vector<llama_token> tokens;
924 tokens.resize(n_ctx);
926 size_t token_count = 0;
929 llama_state_seq_load_file(ctx, filepath.c_str(), seq, tokens.data(),
930 tokens.size(), &token_count);
933 throw std::runtime_error(
"[kv::read_file] failed to load from " +
937 tokens.resize(token_count);
939 LLOYAL_LOG_DEBUG(
"[kv::read_file] Loaded %s: %zu bytes (%.1f MB), %zu tokens",
940 filepath.c_str(), bytes, bytes / 1024.0 / 1024.0,
943 return FileData{std::move(tokens), bytes};
#define LLOYAL_LOG_DEBUG(...)
liblloyal - Common definitions and logging
Batch Decoding Operations.
constexpr llama_seq_id NO_LEASE
Sentinel value indicating a branch has no KV residency.
int many(llama_context *ctx, const llama_token *tokens, int32_t n_tokens, int32_t n_past, int32_t n_batch, llama_seq_id seq_id=0)
Decode multiple tokens into the KV cache with auto-chunking.
ggml_type from_str(const std::string &s)
Map string name to ggml_type enum (matches llama.cpp CLI -ctk/-ctv flags).
void evict_all(State &s)
Evict every leased seq_id.
llama_seq_id acquire(State &s)
Acquire a seq_id from the vacant pool.
size_t available(const State &s)
Number of vacant seq_ids available for acquisition.
void evict(State &s, llama_seq_id seq)
Evict a seq_id — strip all KV tags then release.
void retain(State &s, llama_seq_id keep)
Nuclear retain — keep one seq, rebuild vacancy from scratch.
State init(llama_context *ctx, llama_seq_id n_seq_max)
Initialize tenancy with all seq_ids vacant.
void release(State &s, llama_seq_id seq)
Release a seq_id back to vacant — bookkeeping only, no KV calls.
void seq_keep(llama_context *ctx, llama_seq_id seq)
Keep only one sequence, removing all others.
FileData read_file(llama_context *ctx, llama_seq_id seq, const std::string &filepath)
void log_build_info(llama_context *ctx)
Log KV cache build info and current state.
size_t state_size(llama_context *ctx, llama_seq_id seq)
Get size needed to serialize sequence state.
void seq_cp(llama_context *ctx, llama_seq_id src, llama_seq_id dst, llama_pos p0=0, llama_pos p1=-1)
Copy KV cache from one sequence to another.
void clear_and_reseed(llama_context *ctx, const std::vector< llama_token > &original_sinks, const std::vector< llama_token > &tail, int32_t n_batch)
size_t state_save(llama_context *ctx, llama_seq_id seq, uint8_t *dst, size_t size)
Save sequence state to buffer.
void clear_metadata(llama_context *ctx)
Clear KV cache metadata only (fast reset)
size_t global_state_load(llama_context *ctx, const uint8_t *src, size_t size)
Restore global state from buffer.
size_t state_load(llama_context *ctx, llama_seq_id seq, const uint8_t *src, size_t size)
Restore sequence state from buffer.
llama_pos pos_max(llama_context *ctx, llama_seq_id seq)
Get maximum position in KV cache sequence.
void clear_all(llama_context *ctx)
Clear all KV cache (complete reset)
size_t global_state_size(llama_context *ctx)
Get size needed to serialize global state.
bool remove_range(llama_context *ctx, llama_seq_id seq, llama_pos p0, llama_pos p1)
Remove token range from KV cache sequence.
size_t write_file(llama_context *ctx, llama_seq_id seq, const std::string &filepath, const std::vector< llama_token > &tokens)
Write KV state to file with self-describing format.
size_t global_state_save(llama_context *ctx, uint8_t *dst, size_t size)
Save global state to buffer.
Data structure returned by read_file.
std::vector< llama_token > tokens
Tokens restored from file.
size_t bytes_read
Total bytes read from file.
Tenancy state — tracks seq_id vacancy and leases.
llama_context * ctx
Context for KV operations (nullptr after drain)
std::vector< llama_seq_id > vacant
Available seq_ids (LIFO stack)
std::vector< uint8_t > leased
Bitmap: leased[seq] = 1 if issued.
llama_seq_id n_seq_max
Total seq_id capacity (from llama_n_seq_max)