8#include <llama/llama.h>
9#include "nlohmann/json.hpp"
58 const std::string &messages_json,
59 const std::string &template_override =
"") {
70 if (helper_result.
prompt.empty()) {
72 "[chat_template::format] Template processing failed, using fallback");
76 using json = nlohmann::ordered_json;
77 json messages = json::parse(messages_json);
79 for (
const auto &msg : messages) {
80 if (msg.contains(
"role") && msg.contains(
"content")) {
81 fallback += msg[
"role"].get<std::string>() +
": " +
82 msg[
"content"].get<std::string>() +
"\n";
90 "[chat_template::format] Using fallback format (%zu bytes)",
94 }
catch (
const std::exception &e) {
96 "[chat_template::format] ERROR: Failed to parse messages JSON: %s",
109 "[chat_template::format] Successfully formatted with %zu stop tokens",
113 }
catch (
const std::exception &e) {
130inline bool validate(
const std::string &template_str) {
135 isValid ?
"valid" :
"invalid");
137 }
catch (
const std::exception &e) {
#define LLOYAL_LOG_DEBUG(...)
liblloyal - Common definitions and logging
bool validate(const std::string &template_str)
Validate chat template syntax.
FormatResult format(const llama_model *model, const std::string &messages_json, const std::string &template_override="")
Format chat messages using model's chat template with fallback.
ChatTemplateResult format_chat_template_complete(const llama_model *model, const std::string &messages_json, const std::string &template_override="")
Complete chat template processing with stop token detection.
bool validate_chat_template_helper(const std::string &template_str)
Validate chat template syntax.
nlohmann::ordered_json json
Result from complete chat template processing.
std::string prompt
Formatted chat prompt ready for tokenization.
std::vector< std::string > additional_stops
Template-specific stop tokens (e.g., "<|im_end|>", "<|eot_id|>")