8#include <llama/llama.h>
12#include <unordered_map>
62 std::hash<std::string> Hs;
85 static std::shared_ptr<llama_model>
acquire(
const std::string &fsPath,
86 const llama_model_params ¶ms);
92 inline static std::mutex mu_;
97 inline static std::unordered_map<ModelKey, std::weak_ptr<llama_model>,
111 static ModelKey makeKey(
const std::string &fsPath,
112 const llama_model_params ¶ms);
123inline void freeModel(llama_model *model) {
125 "[ModelRegistry] Freeing model: ptr=%p (last reference released)",
127 llama_model_free(model);
138inline ModelKey ModelRegistry::makeKey(
const std::string &fsPath,
139 const llama_model_params ¶ms) {
141 std::string canonPath = fsPath;
142 const std::string filePrefix =
"file://";
143 if (canonPath.substr(0, filePrefix.length()) == filePrefix) {
144 canonPath = canonPath.substr(filePrefix.length());
147 return {canonPath, params.n_gpu_layers, params.use_mmap};
155inline std::shared_ptr<llama_model>
157 const llama_model_params ¶ms) {
158 ModelKey key = makeKey(fsPath, params);
161 "n_gpu_layers=%d, use_mmap=%s",
165 std::lock_guard<std::mutex> lock(mu_);
167 auto cacheEntry = cache_.find(key);
168 if (cacheEntry != cache_.end()) {
170 if (
auto existingModel = cacheEntry->second.lock()) {
171 long refCount = existingModel.use_count();
173 "[ModelRegistry] Cache HIT - Reusing model: ptr=%p, refcount=%ld",
174 (
void *)existingModel.get(), refCount);
175 return existingModel;
178 "removing stale entry");
179 cache_.erase(cacheEntry);
183 LLOYAL_LOG_DEBUG(
"[ModelRegistry] Cache MISS - Loading NEW model from disk");
187 key.
use_mmap ?
"enabled" :
"disabled");
189 llama_model *rawModel =
190 llama_model_load_from_file(key.
canonPath.c_str(), params);
195 "[ModelRegistry] ERROR: llama_model_load_from_file returned NULL");
199 size_t modelSize = llama_model_size(rawModel);
203 modelSize / (1024.0 * 1024.0));
205 auto sharedModel = std::shared_ptr<llama_model>(rawModel, detail::freeModel);
209 cache_[key] = sharedModel;
211 "shared_ptr (refcount=1)");
Thread-safe registry for sharing llama_model instances.
static std::shared_ptr< llama_model > acquire(const std::string &fsPath, const llama_model_params ¶ms)
Acquire a model from cache or load if not present.
#define LLOYAL_LOG_DEBUG(...)
liblloyal - Common definitions and logging
JSON Schema to Grammar Converter (Header-Only)
Hash functor for ModelKey.
size_t operator()(const ModelKey &k) const
Compute hash for ModelKey.
Model cache key combining file path and GPU configuration.
std::string canonPath
Normalized file path (file:// prefix removed)
bool use_mmap
Whether to use memory mapping.
int n_gpu_layers
Number of layers offloaded to GPU (-1 = all)
bool operator==(const ModelKey &o) const