29#include <llama/llama.h>
31#include <peg-parser.h>
144 const std::string& output,
145 common_chat_format format,
146 common_reasoning_format reasoning_format = COMMON_REASONING_FORMAT_NONE,
147 bool is_partial =
false,
148 const std::string& generation_prompt =
"",
149 const std::string& parser_data =
""
155 common_chat_parser_params syntax;
156 syntax.format = format;
158 syntax.generation_prompt = generation_prompt;
161 if (!parser_data.empty()) {
162 syntax.parser.load(parser_data);
166 common_chat_msg msg = common_chat_parse(output, is_partial, syntax);
172 for (
const auto& tc : msg.tool_calls) {
173 result.
tool_calls.push_back({tc.name, tc.arguments, tc.id});
176 }
catch (
const std::exception& e) {
177 LLOYAL_LOG_DEBUG(
"[chat_out::parse] Parse failed: %s, returning raw output", e.what());
202 const llama_model* model,
203 const std::string& output,
204 bool is_partial =
false
210 common_chat_templates_ptr tmpls = common_chat_templates_init(model,
"");
217 common_chat_templates_inputs inputs;
218 inputs.messages = {{.role =
"user", .
content =
""}};
219 inputs.add_generation_prompt =
true;
220 inputs.use_jinja =
true;
222 common_chat_params params = common_chat_templates_apply(tmpls.get(), inputs);
225 return parse(output, params.format, COMMON_REASONING_FORMAT_NONE, is_partial,
226 params.generation_prompt);
228 }
catch (
const std::exception& e) {
#define LLOYAL_LOG_DEBUG(...)
liblloyal - Common definitions and logging
Chat output parsing (tool calls, reasoning, content)
ParseResult parse(const std::string &output, common_chat_format format, common_reasoning_format reasoning_format=COMMON_REASONING_FORMAT_NONE, bool is_partial=false, const std::string &generation_prompt="", const std::string &parser_data="")
Parse model output with explicit format.
Result from parsing model output.
std::vector< ToolCall > tool_calls
Extracted tool calls (empty array if none)
std::string content
Main response text (visible to user)
std::string reasoning_content
Extracted thinking/reasoning blocks (empty if none)