| #pragma once |
|
|
| #include "common.h" |
| #include "log.h" |
| #include "llama.h" |
| #include "arg.h" |
| #include "base64.hpp" |
| #include "mtmd.h" |
| #include "mtmd-helper.h" |
| #include "chat.h" |
|
|
| |
| #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576 |
| |
| #define CPPHTTPLIB_LISTEN_BACKLOG 512 |
| |
| #define CPPHTTPLIB_TCP_NODELAY true |
| #include <cpp-httplib/httplib.h> |
|
|
| #define JSON_ASSERT GGML_ASSERT |
| #include <nlohmann/json.hpp> |
|
|
| #include <random> |
| #include <sstream> |
| #include <string> |
| #include <vector> |
| #include <memory> |
| #include <cinttypes> |
|
|
| #define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo" |
|
|
| using json = nlohmann::ordered_json; |
|
|
| #define SLT_INF(slot, fmt, ...) LOG_INF("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, ((slot).task ? (slot).task->id : -1), __VA_ARGS__) |
| #define SLT_WRN(slot, fmt, ...) LOG_WRN("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, ((slot).task ? (slot).task->id : -1), __VA_ARGS__) |
| #define SLT_ERR(slot, fmt, ...) LOG_ERR("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, ((slot).task ? (slot).task->id : -1), __VA_ARGS__) |
| #define SLT_DBG(slot, fmt, ...) LOG_DBG("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, ((slot).task ? (slot).task->id : -1), __VA_ARGS__) |
|
|
| #define SRV_INF(fmt, ...) LOG_INF("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| #define SRV_WRN(fmt, ...) LOG_WRN("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| #define SRV_ERR(fmt, ...) LOG_ERR("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| #define SRV_DBG(fmt, ...) LOG_DBG("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
|
|
| #define QUE_INF(fmt, ...) LOG_INF("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| #define QUE_WRN(fmt, ...) LOG_WRN("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| #define QUE_ERR(fmt, ...) LOG_ERR("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| #define QUE_DBG(fmt, ...) LOG_DBG("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
|
|
| using raw_buffer = std::vector<uint8_t>; |
|
|
| template <typename T> |
| static T json_value(const json & body, const std::string & key, const T & default_value) { |
| |
| if (body.contains(key) && !body.at(key).is_null()) { |
| try { |
| return body.at(key); |
| } catch (NLOHMANN_JSON_NAMESPACE::detail::type_error const & err) { |
| LOG_WRN("Wrong type supplied for parameter '%s'. Expected '%s', using default value: %s\n", key.c_str(), json(default_value).type_name(), err.what()); |
| return default_value; |
| } |
| } else { |
| return default_value; |
| } |
| } |
|
|
| const static std::string build_info("b" + std::to_string(LLAMA_BUILD_NUMBER) + "-" + LLAMA_COMMIT); |
|
|
| |
| struct server_grammar_trigger { |
| common_grammar_trigger value; |
|
|
| server_grammar_trigger() = default; |
| server_grammar_trigger(const common_grammar_trigger & value) : value(value) {} |
| server_grammar_trigger(const json & in) { |
| value.type = (common_grammar_trigger_type) in.at("type").get<int>(); |
| value.value = in.at("value").get<std::string>(); |
| if (value.type == COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN) { |
| value.token = (llama_token) in.at("token").get<int>(); |
| } |
| } |
|
|
| json to_json() const { |
| json out { |
| {"type", (int) value.type}, |
| {"value", value.value}, |
| }; |
| if (value.type == COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN) { |
| out["token"] = (int) value.token; |
| } |
| return out; |
| } |
| }; |
|
|
| |
| |
| |
|
|
| static bool json_is_array_of_numbers(const json & data) { |
| if (data.is_array()) { |
| for (const auto & e : data) { |
| if (!e.is_number_integer()) { |
| return false; |
| } |
| } |
| return true; |
| } |
| return false; |
| } |
|
|
| |
| static bool json_is_array_of_mixed_numbers_strings(const json & data) { |
| bool seen_string = false; |
| bool seen_number = false; |
| if (data.is_array()) { |
| for (const auto & e : data) { |
| seen_string |= e.is_string(); |
| seen_number |= e.is_number_integer(); |
| if (seen_number && seen_string) { |
| return true; |
| } |
| } |
| } |
| return false; |
| } |
|
|
| |
| static bool json_is_array_and_contains_numbers(const json & data) { |
| if (data.is_array()) { |
| for (const auto & e : data) { |
| if (e.is_number_integer()) { |
| return true; |
| } |
| } |
| return false; |
| } |
| return false; |
| } |
|
|
| |
| static json json_get_nested_values(const std::vector<std::string> & paths, const json & js) { |
| json result = json::object(); |
|
|
| for (const std::string & path : paths) { |
| json current = js; |
| const auto keys = string_split<std::string>(path, '/'); |
| bool valid_path = true; |
| for (const std::string & k : keys) { |
| if (valid_path && current.is_object() && current.contains(k)) { |
| current = current[k]; |
| } else { |
| valid_path = false; |
| } |
| } |
| if (valid_path) { |
| result[path] = current; |
| } |
| } |
| return result; |
| } |
|
|
| |
| |
| |
| |
| |
| static llama_tokens tokenize_mixed(const llama_vocab * vocab, const json & json_prompt, bool add_special, bool parse_special) { |
| |
| |
| llama_tokens prompt_tokens; |
|
|
| if (json_prompt.is_array()) { |
| bool first = true; |
| for (const auto & p : json_prompt) { |
| if (p.is_string()) { |
| auto s = p.template get<std::string>(); |
|
|
| llama_tokens p; |
| if (first) { |
| p = common_tokenize(vocab, s, add_special, parse_special); |
| first = false; |
| } else { |
| p = common_tokenize(vocab, s, false, parse_special); |
| } |
|
|
| prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end()); |
| } else { |
| if (first) { |
| first = false; |
| } |
|
|
| prompt_tokens.push_back(p.template get<llama_token>()); |
| } |
| } |
| } else { |
| auto s = json_prompt.template get<std::string>(); |
| prompt_tokens = common_tokenize(vocab, s, add_special, parse_special); |
| } |
|
|
| return prompt_tokens; |
| } |
|
|
| |
| |
| |
| static size_t validate_utf8(const std::string& text) { |
| size_t len = text.size(); |
| if (len == 0) return 0; |
|
|
| |
| for (size_t i = 1; i <= 4 && i <= len; ++i) { |
| unsigned char c = text[len - i]; |
| |
| if ((c & 0xE0) == 0xC0) { |
| |
| |
| if (i < 2) return len - i; |
| } else if ((c & 0xF0) == 0xE0) { |
| |
| |
| if (i < 3) return len - i; |
| } else if ((c & 0xF8) == 0xF0) { |
| |
| |
| if (i < 4) return len - i; |
| } |
| } |
|
|
| |
| return len; |
| } |
|
|
| |
| |
| |
|
|
| |
| static llama_tokens format_infill( |
| const llama_vocab * vocab, |
| const json & input_prefix, |
| const json & input_suffix, |
| const json & input_extra, |
| const int n_batch, |
| const int n_predict, |
| const int n_ctx, |
| const bool spm_infill, |
| const llama_tokens & tokens_prompt |
| ) { |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| llama_tokens extra_tokens; |
| extra_tokens.reserve(n_ctx); |
|
|
| auto tokens_prefix = tokenize_mixed(vocab, input_prefix, false, false); |
| auto tokens_suffix = tokenize_mixed(vocab, input_suffix, false, false); |
|
|
| if (llama_vocab_fim_rep(vocab) != LLAMA_TOKEN_NULL) { |
| |
| static const auto k_fim_repo = common_tokenize(vocab, "myproject\n", false, false); |
|
|
| extra_tokens.push_back(llama_vocab_fim_rep(vocab)); |
| extra_tokens.insert(extra_tokens.end(), k_fim_repo.begin(), k_fim_repo.end()); |
| } |
| for (const auto & chunk : input_extra) { |
| |
| const std::string text = json_value(chunk, "text", std::string()); |
| const std::string filename = json_value(chunk, "filename", std::string("tmp")); |
|
|
| if (llama_vocab_fim_sep(vocab) != LLAMA_TOKEN_NULL) { |
| const auto k_fim_file = common_tokenize(vocab, filename + "\n", false, false); |
|
|
| extra_tokens.insert(extra_tokens.end(), llama_vocab_fim_sep(vocab)); |
| extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end()); |
| } else { |
| |
| static const char k_chunk_prefix_str[] = {0x0a, 0x0a, 0x2d, 0x2d, 0x2d, 0x20, 0x73, 0x6e, 0x69, 0x70, 0x70, 0x65, 0x74, 0x20, 0x2d, 0x2d, 0x2d, 0x0a, 0x0a, 0x00}; |
| static const auto k_chunk_prefix_tokens = common_tokenize(vocab, k_chunk_prefix_str, false, false); |
|
|
| extra_tokens.insert(extra_tokens.end(), k_chunk_prefix_tokens.begin(), k_chunk_prefix_tokens.end()); |
| } |
|
|
| const auto chunk_tokens = common_tokenize(vocab, text, false, false); |
| extra_tokens.insert(extra_tokens.end(), chunk_tokens.begin(), chunk_tokens.end()); |
| } |
|
|
| if (llama_vocab_fim_sep(vocab) != LLAMA_TOKEN_NULL) { |
| |
| static const auto k_fim_file = common_tokenize(vocab, "filename\n", false, false); |
|
|
| extra_tokens.insert(extra_tokens.end(), llama_vocab_fim_sep(vocab)); |
| extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end()); |
| } |
|
|
| |
| const int n_prefix_take = std::min<int>(tokens_prefix.size(), 3*(n_batch/4)); |
| const int n_suffix_take = std::min<int>(tokens_suffix.size(), std::max<int>(0, (n_batch/4) - (2 + tokens_prompt.size()))); |
|
|
| SRV_DBG("n_prefix_take = %d, n_suffix_take = %d, total = %d\n", n_prefix_take, n_suffix_take, (n_prefix_take + n_suffix_take)); |
|
|
| |
| const int n_extra_take = std::min<int>(std::max<int>(0, n_ctx - (n_batch) - 2*n_predict), extra_tokens.size()); |
|
|
| tokens_prefix.erase(tokens_prefix.begin(), tokens_prefix.begin() + tokens_prefix.size() - n_prefix_take); |
| tokens_suffix.resize(n_suffix_take); |
|
|
| tokens_prefix.insert(tokens_prefix.begin(), llama_vocab_fim_pre(vocab)); |
| tokens_prefix.insert(tokens_prefix.end(), tokens_prompt.begin(), tokens_prompt.end()); |
| tokens_suffix.insert(tokens_suffix.begin(), llama_vocab_fim_suf(vocab)); |
|
|
| auto embd_inp = spm_infill ? tokens_suffix : tokens_prefix; |
| auto embd_end = spm_infill ? tokens_prefix : tokens_suffix; |
|
|
| if (llama_vocab_get_add_bos(vocab)) { |
| embd_inp.insert(embd_inp.begin(), llama_vocab_bos(vocab)); |
| } |
|
|
| SRV_DBG("extra: n_ctx = %d, n_extra_take = %d, n_extra = %d\n", n_ctx, n_extra_take, (int) extra_tokens.size()); |
|
|
| |
| embd_inp.insert(embd_inp.begin(), extra_tokens.end() - n_extra_take, extra_tokens.end()); |
|
|
| embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end()); |
| embd_inp.push_back(llama_vocab_fim_mid(vocab)); |
|
|
| return embd_inp; |
| } |
|
|
| |
| |
| |
|
|
| static const std::string base64_chars = |
| "ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| "abcdefghijklmnopqrstuvwxyz" |
| "0123456789+/"; |
|
|
| static inline bool is_base64(uint8_t c) { |
| return (isalnum(c) || (c == '+') || (c == '/')); |
| } |
|
|
| static inline raw_buffer base64_decode(const std::string & encoded_string) { |
| int i = 0; |
| int j = 0; |
| int in_ = 0; |
|
|
| int in_len = encoded_string.size(); |
|
|
| uint8_t char_array_4[4]; |
| uint8_t char_array_3[3]; |
|
|
| raw_buffer ret; |
|
|
| while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_])) { |
| char_array_4[i++] = encoded_string[in_]; in_++; |
| if (i == 4) { |
| for (i = 0; i < 4; i++) { |
| char_array_4[i] = base64_chars.find(char_array_4[i]); |
| } |
|
|
| char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4); |
| char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2); |
| char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3]; |
|
|
| for (i = 0; (i < 3); i++) { |
| ret.push_back(char_array_3[i]); |
| } |
|
|
| i = 0; |
| } |
| } |
|
|
| if (i) { |
| for (j = i; j < 4; j++) { |
| char_array_4[j] = 0; |
| } |
|
|
| for (j = 0; j < 4; j++) { |
| char_array_4[j] = base64_chars.find(char_array_4[j]); |
| } |
|
|
| char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4); |
| char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2); |
| char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3]; |
|
|
| for (j = 0; j < i - 1; j++) { |
| ret.push_back(char_array_3[j]); |
| } |
| } |
|
|
| return ret; |
| } |
|
|
| |
| |
| |
|
|
| static std::string random_string() { |
| static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"); |
|
|
| std::random_device rd; |
| std::mt19937 generator(rd()); |
|
|
| std::string result(32, ' '); |
|
|
| for (int i = 0; i < 32; ++i) { |
| result[i] = str[generator() % str.size()]; |
| } |
|
|
| return result; |
| } |
|
|
| static std::string gen_chatcmplid() { |
| return "chatcmpl-" + random_string(); |
| } |
|
|
| static std::string gen_tool_call_id() { |
| return random_string(); |
| } |
|
|
| |
| |
| |
|
|
| |
| template <class Iter> |
| static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) { |
| std::string ret; |
| for (; begin != end; ++begin) { |
| ret += common_token_to_piece(ctx, *begin); |
| } |
|
|
| return ret; |
| } |
|
|
| |
| static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) { |
| std::string out = token == LLAMA_TOKEN_NULL ? "" : common_token_to_piece(ctx, token); |
|
|
| |
| |
| if (out.size() == 1 && (out[0] & 0x80) == 0x80) { |
| std::stringstream ss; |
| ss << std::hex << (out[0] & 0xff); |
| std::string res(ss.str()); |
| out = "byte: \\x" + res; |
| } |
|
|
| return out; |
| } |
|
|
| static bool server_sent_event(httplib::DataSink & sink, const json & data) { |
| const std::string str = |
| "data: " + |
| data.dump(-1, ' ', false, json::error_handler_t::replace) + |
| "\n\n"; |
|
|
| LOG_DBG("data stream, to_send: %s", str.c_str()); |
|
|
| return sink.write(str.c_str(), str.size()); |
| } |
|
|
| |
| |
| |
|
|
| |
| static json oaicompat_completion_params_parse(const json & body) { |
| json llama_params; |
|
|
| if (!body.contains("prompt")) { |
| throw std::runtime_error("\"prompt\" is required"); |
| } |
|
|
| |
| if (body.contains("stop") && body.at("stop").is_string()) { |
| llama_params["stop"] = json::array({body.at("stop").get<std::string>()}); |
| } else { |
| llama_params["stop"] = json_value(body, "stop", json::array()); |
| } |
|
|
| |
| int n_choices = json_value(body, "n", 1); |
| if (n_choices != 1) { |
| throw std::runtime_error("Only one completion choice is allowed"); |
| } |
|
|
| |
| if (json_value(body, "echo", false)) { |
| throw std::runtime_error("Only no echo is supported"); |
| } |
|
|
| |
| static const std::vector<std::string> unsupported_params { "best_of", "suffix" }; |
| for (const auto & param : unsupported_params) { |
| if (body.contains(param)) { |
| throw std::runtime_error("Unsupported param: " + param); |
| } |
| } |
|
|
| |
| for (const auto & item : body.items()) { |
| |
| if (!llama_params.contains(item.key()) || item.key() == "n_predict") { |
| llama_params[item.key()] = item.value(); |
| } |
| } |
|
|
| return llama_params; |
| } |
|
|
| struct oaicompat_parser_options { |
| bool use_jinja; |
| bool prefill_assistant; |
| common_reasoning_format reasoning_format; |
| std::map<std::string,std::string> chat_template_kwargs; |
| common_chat_templates * tmpls; |
| bool allow_image; |
| bool allow_audio; |
| bool enable_thinking = true; |
| }; |
|
|
| |
| static json oaicompat_chat_params_parse( |
| json & body, |
| const oaicompat_parser_options & opt, |
| std::vector<raw_buffer> & out_files) |
| { |
| json llama_params; |
|
|
| auto tools = json_value(body, "tools", json()); |
| auto has_tools = tools.is_array() && !tools.empty(); |
| auto stream = json_value(body, "stream", false); |
| auto tool_choice = json_value(body, "tool_choice", std::string("auto")); |
|
|
| if (!opt.use_jinja) { |
| if (has_tools) { |
| throw std::runtime_error("tools param requires --jinja flag"); |
| } |
| if (tool_choice != "auto") { |
| throw std::runtime_error("tool_choice param requires --jinja flag"); |
| } |
| } |
|
|
| |
| if (body.contains("stop") && body.at("stop").is_string()) { |
| llama_params["stop"] = json::array({body.at("stop").get<std::string>()}); |
| } else { |
| llama_params["stop"] = json_value(body, "stop", json::array()); |
| } |
|
|
| auto json_schema = json_value(body, "json_schema", json()); |
| auto grammar = json_value(body, "grammar", std::string()); |
| if (!json_schema.is_null() && !grammar.empty()) { |
| throw std::runtime_error("Cannot use both json_schema and grammar"); |
| } |
|
|
| |
| if (body.contains("response_format")) { |
| json response_format = json_value(body, "response_format", json::object()); |
| std::string response_type = json_value(response_format, "type", std::string()); |
| if (response_type == "json_object") { |
| json_schema = json_value(response_format, "schema", json::object()); |
| } else if (response_type == "json_schema") { |
| auto schema_wrapper = json_value(response_format, "json_schema", json::object()); |
| json_schema = json_value(schema_wrapper, "schema", json::object()); |
| } else if (!response_type.empty() && response_type != "text") { |
| throw std::runtime_error("response_format type must be one of \"text\" or \"json_object\", but got: " + response_type); |
| } |
| } |
|
|
| |
| if (!body.contains("messages")) { |
| throw std::runtime_error("'messages' is required"); |
| } |
| json & messages = body.at("messages"); |
| if (!messages.is_array()) { |
| throw std::runtime_error("Expected 'messages' to be an array"); |
| } |
| for (auto & msg : messages) { |
| std::string role = json_value(msg, "role", std::string()); |
| if (role != "assistant" && !msg.contains("content")) { |
| throw std::runtime_error("All non-assistant messages must contain 'content'"); |
| } |
| if (role == "assistant") { |
| if (!msg.contains("content") && !msg.contains("tool_calls")) { |
| throw std::runtime_error("Assistant message must contain either 'content' or 'tool_calls'!"); |
| } |
| if (!msg.contains("content")) { |
| continue; |
| } |
| } |
| json & content = msg.at("content"); |
| if (content.is_string() || content.is_null()) { |
| continue; |
| } |
|
|
| if (!content.is_array()) { |
| throw std::runtime_error("Expected 'content' to be a string or an array"); |
| } |
|
|
| for (auto & p : content) { |
| std::string type = json_value(p, "type", std::string()); |
| if (type == "image_url") { |
| if (!opt.allow_image) { |
| throw std::runtime_error("image input is not supported - hint: if this is unexpected, you may need to provide the mmproj"); |
| } |
|
|
| json image_url = json_value(p, "image_url", json::object()); |
| std::string url = json_value(image_url, "url", std::string()); |
| if (string_starts_with(url, "http")) { |
| |
| |
| common_remote_params params; |
| params.headers.push_back("User-Agent: llama.cpp/" + build_info); |
| params.max_size = 1024 * 1024 * 10; |
| params.timeout = 10; |
| SRV_INF("downloading image from '%s'\n", url.c_str()); |
| auto res = common_remote_get_content(url, params); |
| if (200 <= res.first && res.first < 300) { |
| SRV_INF("downloaded %ld bytes\n", res.second.size()); |
| raw_buffer data; |
| data.insert(data.end(), res.second.begin(), res.second.end()); |
| out_files.push_back(data); |
| } else { |
| throw std::runtime_error("Failed to download image"); |
| } |
|
|
| } else { |
| |
| std::vector<std::string> parts = string_split<std::string>(url, ','); |
| if (parts.size() != 2) { |
| throw std::runtime_error("Invalid image_url.url value"); |
| } else if (!string_starts_with(parts[0], "data:image/")) { |
| throw std::runtime_error("Invalid image_url.url format: " + parts[0]); |
| } else if (!string_ends_with(parts[0], "base64")) { |
| throw std::runtime_error("image_url.url must be base64 encoded"); |
| } else { |
| auto base64_data = parts[1]; |
| auto decoded_data = base64_decode(base64_data); |
| out_files.push_back(decoded_data); |
| } |
| } |
|
|
| |
| p["type"] = "text"; |
| p["text"] = mtmd_default_marker(); |
| p.erase("image_url"); |
|
|
| } else if (type == "input_audio") { |
| if (!opt.allow_audio) { |
| throw std::runtime_error("audio input is not supported - hint: if this is unexpected, you may need to provide the mmproj"); |
| } |
|
|
| json input_audio = json_value(p, "input_audio", json::object()); |
| std::string data = json_value(input_audio, "data", std::string()); |
| std::string format = json_value(input_audio, "format", std::string()); |
| |
| if (format != "wav" && format != "mp3") { |
| throw std::runtime_error("input_audio.format must be either 'wav' or 'mp3'"); |
| } |
| auto decoded_data = base64_decode(data); |
| out_files.push_back(decoded_data); |
|
|
| |
| p["type"] = "text"; |
| p["text"] = mtmd_default_marker(); |
| p.erase("input_audio"); |
|
|
| } else if (type != "text") { |
| throw std::runtime_error("unsupported content[].type"); |
| } |
| } |
| } |
|
|
| common_chat_templates_inputs inputs; |
| inputs.messages = common_chat_msgs_parse_oaicompat(messages); |
| inputs.tools = common_chat_tools_parse_oaicompat(tools); |
| inputs.tool_choice = common_chat_tool_choice_parse_oaicompat(tool_choice); |
| inputs.json_schema = json_schema.is_null() ? "" : json_schema.dump(); |
| inputs.grammar = grammar; |
| inputs.use_jinja = opt.use_jinja; |
| inputs.parallel_tool_calls = json_value(body, "parallel_tool_calls", false); |
| inputs.add_generation_prompt = json_value(body, "add_generation_prompt", true); |
| inputs.reasoning_format = opt.reasoning_format; |
| inputs.enable_thinking = opt.enable_thinking; |
| if (!inputs.tools.empty() && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE) { |
| if (body.contains("grammar")) { |
| throw std::runtime_error("Cannot use custom grammar constraints with tools."); |
| } |
| llama_params["parse_tool_calls"] = true; |
| } |
|
|
| |
| auto chat_template_kwargs_object = json_value(body, "chat_template_kwargs", json::object()); |
| inputs.chat_template_kwargs = opt.chat_template_kwargs; |
| for (const auto & item : chat_template_kwargs_object.items()) { |
| inputs.chat_template_kwargs[item.key()] = item.value().dump(); |
| } |
|
|
| |
| auto enable_thinking_kwarg = json_value(inputs.chat_template_kwargs, "enable_thinking", std::string("")); |
| if (enable_thinking_kwarg == "true") { |
| inputs.enable_thinking = true; |
| } else if (enable_thinking_kwarg == "false") { |
| inputs.enable_thinking = false; |
| } else if (!enable_thinking_kwarg.empty() && enable_thinking_kwarg[0] == '"') { |
| throw std::runtime_error("invalid type for \"enable_thinking\" (expected boolean, got string)"); |
| } |
|
|
| |
| |
| bool prefill_assistant_message = !inputs.messages.empty() && inputs.messages.back().role == "assistant" && opt.prefill_assistant; |
| common_chat_msg last_message; |
| if (prefill_assistant_message) { |
| last_message = inputs.messages.back(); |
| inputs.messages.pop_back(); |
|
|
| |
| if (!inputs.messages.empty() && inputs.messages.back().role == "assistant"){ |
| throw std::runtime_error("Cannot have 2 or more assistant messages at the end of the list."); |
| } |
|
|
| |
| inputs.reasoning_format = COMMON_REASONING_FORMAT_NONE; |
|
|
| if ( inputs.enable_thinking ) { |
| throw std::runtime_error("Assistant response prefill is incompatible with enable_thinking."); |
| } |
|
|
| inputs.add_generation_prompt = true; |
| } |
|
|
| |
| auto chat_params = common_chat_templates_apply(opt.tmpls, inputs); |
|
|
| |
| if (prefill_assistant_message) { |
| if (!last_message.content_parts.empty()) { |
| for (auto & p : last_message.content_parts) { |
| chat_params.prompt += p.text; |
| } |
| } else { |
| chat_params.prompt += last_message.content; |
| } |
| } |
|
|
| llama_params["chat_format"] = static_cast<int>(chat_params.format); |
| llama_params["prompt"] = chat_params.prompt; |
| if (!chat_params.grammar.empty()) { |
| llama_params["grammar"] = chat_params.grammar; |
| } |
| llama_params["grammar_lazy"] = chat_params.grammar_lazy; |
| auto grammar_triggers = json::array(); |
| for (const auto & trigger : chat_params.grammar_triggers) { |
| server_grammar_trigger ct(trigger); |
| grammar_triggers.push_back(ct.to_json()); |
| } |
| llama_params["grammar_triggers"] = grammar_triggers; |
| llama_params["preserved_tokens"] = chat_params.preserved_tokens; |
| llama_params["thinking_forced_open"] = chat_params.thinking_forced_open; |
| for (const auto & stop : chat_params.additional_stops) { |
| llama_params["stop"].push_back(stop); |
| } |
|
|
| |
| int n_choices = json_value(body, "n", 1); |
| if (n_choices != 1) { |
| throw std::runtime_error("Only one completion choice is allowed"); |
| } |
|
|
| |
| |
| if (json_value(body, "logprobs", false)) { |
| if (has_tools && stream) { |
| throw std::runtime_error("logprobs is not supported with tools + stream"); |
| } |
| llama_params["n_probs"] = json_value(body, "top_logprobs", 20); |
| } else if (body.contains("top_logprobs") && !body.at("top_logprobs").is_null()) { |
| throw std::runtime_error("top_logprobs requires logprobs to be set to true"); |
| } |
|
|
| |
| |
| |
| for (const auto & item : body.items()) { |
| |
| if (!llama_params.contains(item.key()) || item.key() == "n_predict") { |
| llama_params[item.key()] = item.value(); |
| } |
| } |
|
|
| return llama_params; |
| } |
|
|
| static json format_embeddings_response_oaicompat(const json & request, const json & embeddings, bool use_base64 = false) { |
| json data = json::array(); |
| int32_t n_tokens = 0; |
| int i = 0; |
| for (const auto & elem : embeddings) { |
| json embedding_obj; |
|
|
| if (use_base64) { |
| const auto& vec = json_value(elem, "embedding", json::array()).get<std::vector<float>>(); |
| const char* data_ptr = reinterpret_cast<const char*>(vec.data()); |
| size_t data_size = vec.size() * sizeof(float); |
| embedding_obj = { |
| {"embedding", base64::encode(data_ptr, data_size)}, |
| {"index", i++}, |
| {"object", "embedding"}, |
| {"encoding_format", "base64"} |
| }; |
| } else { |
| embedding_obj = { |
| {"embedding", json_value(elem, "embedding", json::array())}, |
| {"index", i++}, |
| {"object", "embedding"} |
| }; |
| } |
| data.push_back(embedding_obj); |
|
|
| n_tokens += json_value(elem, "tokens_evaluated", 0); |
| } |
|
|
| json res = json { |
| {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))}, |
| {"object", "list"}, |
| {"usage", json { |
| {"prompt_tokens", n_tokens}, |
| {"total_tokens", n_tokens} |
| }}, |
| {"data", data} |
| }; |
|
|
| return res; |
| } |
|
|
| static json format_response_rerank( |
| const json & request, |
| const json & ranks, |
| bool is_tei_format, |
| std::vector<std::string> & texts, |
| int top_n) { |
| int32_t n_tokens = 0; |
| bool return_text = is_tei_format && json_value(request, "return_text", false); |
| std::vector<json> elements; |
| std::string score_label = is_tei_format ? "score" : "relevance_score"; |
| for (const auto & rank : ranks) { |
| int index = json_value(rank, "index", 0); |
| json elem = json{ |
| {"index", index}, |
| {score_label, json_value(rank, "score", 0.0)}, |
| }; |
| n_tokens += json_value(rank, "tokens_evaluated", 0); |
| if (return_text) { |
| elem["text"] = std::move(texts[index]); |
| } |
| elements.push_back(elem); |
| } |
|
|
| std::sort(elements.begin(), elements.end(), [score_label](const json& a, const json& b) { |
| return json_value(a, score_label, 0.0) > json_value(b, score_label, 0.0); |
| }); |
|
|
| elements.resize(std::min(top_n, (int)elements.size())); |
| json results = elements; |
|
|
| if (is_tei_format) return results; |
|
|
| json res = json{ |
| {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))}, |
| {"object", "list"}, |
| {"usage", json{ |
| {"prompt_tokens", n_tokens}, |
| {"total_tokens", n_tokens} |
| }}, |
| {"results", results} |
| }; |
|
|
| return res; |
| } |
|
|
| static bool is_valid_utf8(const std::string & str) { |
| const unsigned char* bytes = reinterpret_cast<const unsigned char*>(str.data()); |
| const unsigned char* end = bytes + str.length(); |
|
|
| while (bytes < end) { |
| if (*bytes <= 0x7F) { |
| |
| bytes++; |
| } else if ((*bytes & 0xE0) == 0xC0) { |
| |
| if (end - bytes < 2 || (bytes[1] & 0xC0) != 0x80) |
| return false; |
| bytes += 2; |
| } else if ((*bytes & 0xF0) == 0xE0) { |
| |
| if (end - bytes < 3 || (bytes[1] & 0xC0) != 0x80 || (bytes[2] & 0xC0) != 0x80) |
| return false; |
| bytes += 3; |
| } else if ((*bytes & 0xF8) == 0xF0) { |
| |
| if (end - bytes < 4 || (bytes[1] & 0xC0) != 0x80 || |
| (bytes[2] & 0xC0) != 0x80 || (bytes[3] & 0xC0) != 0x80) |
| return false; |
| bytes += 4; |
| } else { |
| |
| return false; |
| } |
| } |
|
|
| return true; |
| } |
|
|
| static json format_tokenizer_response(const json & tokens) { |
| return json { |
| {"tokens", tokens} |
| }; |
| } |
|
|
| static json format_detokenized_response(const std::string & content) { |
| return json { |
| {"content", content} |
| }; |
| } |
|
|
| static json format_logit_bias(const std::vector<llama_logit_bias> & logit_bias) { |
| json data = json::array(); |
| for (const auto & lb : logit_bias) { |
| data.push_back(json{ |
| {"bias", lb.bias}, |
| {"token", lb.token}, |
| }); |
| } |
| return data; |
| } |
|
|
| static std::string safe_json_to_str(const json & data) { |
| return data.dump(-1, ' ', false, json::error_handler_t::replace); |
| } |
|
|
| static std::vector<llama_token_data> get_token_probabilities(llama_context * ctx, int idx) { |
| std::vector<llama_token_data> cur; |
| const auto * logits = llama_get_logits_ith(ctx, idx); |
|
|
| const llama_model * model = llama_get_model(ctx); |
| const llama_vocab * vocab = llama_model_get_vocab(model); |
|
|
| const int n_vocab = llama_vocab_n_tokens(vocab); |
|
|
| cur.resize(n_vocab); |
| for (llama_token token_id = 0; token_id < n_vocab; token_id++) { |
| cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f}; |
| } |
|
|
| |
| std::sort(cur.begin(), cur.end(), [](const llama_token_data & a, const llama_token_data & b) { |
| return a.logit > b.logit; |
| }); |
|
|
| |
| float max_l = cur[0].logit; |
| float cum_sum = 0.0f; |
| for (size_t i = 0; i < cur.size(); ++i) { |
| float p = expf(cur[i].logit - max_l); |
| cur[i].p = p; |
| cum_sum += p; |
| } |
| for (size_t i = 0; i < cur.size(); ++i) { |
| cur[i].p /= cum_sum; |
| } |
|
|
| return cur; |
| } |
|
|
| static bool are_lora_equal( |
| const std::vector<common_adapter_lora_info> & l1, |
| const std::vector<common_adapter_lora_info> & l2) { |
| if (l1.size() != l2.size()) { |
| return false; |
| } |
| for (size_t i = 0; i < l1.size(); ++i) { |
| |
| if (l1[i].scale != l2[i].scale || l1[i].ptr != l2[i].ptr) { |
| return false; |
| } |
| } |
| return true; |
| } |
|
|
| |
| static std::vector<size_t> lora_get_enabled_ids(const std::vector<common_adapter_lora_info> & loras) { |
| std::vector<size_t> enabled_ids; |
| for (size_t i = 0; i < loras.size(); ++i) { |
| if (loras[i].scale > 0) { |
| enabled_ids.push_back(i); |
| } |
| } |
| return enabled_ids; |
| } |
|
|
| |
| static bool lora_all_alora(const std::vector<common_adapter_lora_info> & loras) { |
| bool found_alora = false; |
| for (const auto & lora : loras) { |
| if (lora.scale != 0) { |
| if (llama_adapter_get_alora_n_invocation_tokens(lora.ptr) == 0) { |
| return false; |
| } |
| found_alora = true; |
| } |
| } |
| return found_alora; |
| } |
|
|
| |
| |
| static bool lora_should_clear_cache( |
| const std::vector<common_adapter_lora_info> & current, |
| const std::vector<common_adapter_lora_info> & next) { |
|
|
| |
| |
| |
| GGML_ASSERT(!are_lora_equal(current, next)); |
|
|
| return ( |
| !(lora_get_enabled_ids(current).empty() || lora_all_alora(current)) || |
| !lora_all_alora(next)); |
| } |
|
|
| |
| static std::vector<common_adapter_lora_info> parse_lora_request( |
| const std::vector<common_adapter_lora_info> & lora_base, |
| const json & data) { |
| std::vector<common_adapter_lora_info> lora(lora_base); |
| int max_idx = lora.size(); |
|
|
| |
| for (auto & entry : lora) { |
| entry.scale = 0.0f; |
| } |
|
|
| |
| for (const auto & entry : data) { |
| int id = json_value(entry, "id", -1); |
| float scale = json_value(entry, "scale", 0.0f); |
| if (0 <= id && id < max_idx) { |
| lora[id].scale = scale; |
| } else { |
| throw std::runtime_error("invalid adapter id"); |
| } |
| } |
|
|
| return lora; |
| } |
|
|
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| struct server_tokens { |
| bool has_mtmd = false; |
|
|
| private: |
|
|
| |
| std::unordered_map<llama_pos, mtmd::input_chunk_ptr> map_pos_to_media; |
|
|
| |
| |
| |
| |
| llama_tokens tokens; |
|
|
| |
| |
| |
| |
|
|
| public: |
| server_tokens() = default; |
| ~server_tokens() = default; |
|
|
| |
| |
| server_tokens(const server_tokens&) = delete; |
| server_tokens& operator=(const server_tokens&) = delete; |
|
|
| |
| server_tokens(server_tokens&&) = default; |
| server_tokens& operator=(server_tokens&&) = default; |
|
|
| |
| llama_token operator[](size_t index) { return tokens[index]; } |
| const llama_token& operator[](size_t index) const { return tokens[index]; } |
|
|
| server_tokens(mtmd::input_chunks & mtmd_chunks, bool has_mtmd) : has_mtmd(has_mtmd) { |
| for (size_t i = 0; i < mtmd_chunks.size(); ++i) { |
| push_back(mtmd_chunks[i]); |
| } |
| } |
|
|
| server_tokens(const llama_tokens & tokens, bool has_mtmd) : has_mtmd(has_mtmd), tokens(tokens) {} |
|
|
| |
| std::string str() const { |
| std::ostringstream oss; |
| oss << "tokens: "; |
| for (const auto & t : tokens) { |
| if (t == LLAMA_TOKEN_NULL) { |
| oss << "<embd> "; |
| } else { |
| oss << t << " "; |
| } |
| } |
| oss << "\n"; |
| oss << "image pos: "; |
| for (const auto & it : map_pos_to_media) { |
| oss << it.first << ", "; |
| } |
| return oss.str(); |
| } |
|
|
| const mtmd::input_chunk_ptr & find_chunk(llama_pos pos) const { |
| auto it = map_pos_to_media.find(pos); |
| if (it != map_pos_to_media.end()) { |
| return it->second; |
| } |
| throw std::runtime_error("Chunk not found"); |
| } |
|
|
| void push_back(llama_token tok) { |
| if (tok == LLAMA_TOKEN_NULL) { |
| throw std::runtime_error("Invalid token"); |
| } |
| tokens.emplace_back(tok); |
| } |
|
|
| |
| void push_back(const mtmd_input_chunk * chunk) { |
| auto type = mtmd_input_chunk_get_type(chunk); |
| if (type == MTMD_INPUT_CHUNK_TYPE_IMAGE || type == MTMD_INPUT_CHUNK_TYPE_AUDIO) { |
| GGML_ASSERT(has_mtmd); |
| const int n_pos = mtmd_input_chunk_get_n_pos(chunk); |
| llama_pos start_pos = tokens.size(); |
| for (int i = 0; i < n_pos; ++i) { |
| tokens.emplace_back(LLAMA_TOKEN_NULL); |
| } |
| mtmd::input_chunk_ptr new_chunk(mtmd_input_chunk_copy(chunk)); |
| map_pos_to_media[start_pos] = std::move(new_chunk); |
| } else if (type == MTMD_INPUT_CHUNK_TYPE_TEXT) { |
| size_t n_tokens; |
| const auto * text_tokens = mtmd_input_chunk_get_tokens_text(chunk, &n_tokens); |
| for (size_t i = 0; i < n_tokens; ++i) { |
| push_back(text_tokens[i]); |
| } |
| } else { |
| GGML_ABORT("Invalid chunk type"); |
| } |
| } |
|
|
| |
| void push_back(server_tokens & tokens) { |
| size_t start_pos = size(); |
| for (size_t i = 0; i < tokens.size(); i++) { |
| push_back(tokens[i]); |
| } |
| if (tokens.has_mtmd) { |
| |
| |
| GGML_ASSERT(has_mtmd); |
| for (auto it = tokens.map_pos_to_media.begin(); it != tokens.map_pos_to_media.end(); ) { |
| auto * chunk = tokens.map_pos_to_media[it->first].get(); |
| mtmd::input_chunk_ptr new_chunk(mtmd_input_chunk_copy(chunk)); |
| map_pos_to_media[start_pos+it->first] = std::move(new_chunk); |
| } |
| } |
| } |
|
|
| |
| void insert(const llama_tokens & inp_tokens) { |
| GGML_ASSERT(!has_mtmd); |
| tokens.insert(tokens.end(), inp_tokens.begin(), inp_tokens.end()); |
| } |
|
|
| |
| const llama_tokens & get_text_tokens() const { |
| GGML_ASSERT(!has_mtmd); |
| return tokens; |
| } |
|
|
| |
| void set_token(llama_pos pos, llama_token id) { |
| GGML_ASSERT(!has_mtmd); |
| tokens[pos] = id; |
| } |
|
|
| size_t size() const { |
| return tokens.size(); |
| } |
|
|
| bool empty() const { |
| return tokens.empty(); |
| } |
|
|
| void clear() { |
| tokens.clear(); |
| } |
|
|
| void keep_first(size_t n) { |
| GGML_ASSERT(n <= tokens.size()); |
| if (has_mtmd) { |
| if (n == tokens.size()) { |
| return; |
| } |
| |
| |
| |
| |
| |
| |
| if (n > 0) { |
| llama_token last_token = tokens[n - 1]; |
| |
| if (last_token == LLAMA_TOKEN_NULL) { |
| find_chunk(n - 1); |
| } |
| } |
| |
| for (auto it = map_pos_to_media.begin(); it != map_pos_to_media.end(); ) { |
| llama_pos pos = it->first; |
| if (pos >= (llama_pos)n) { |
| it = map_pos_to_media.erase(it); |
| } else { |
| ++it; |
| } |
| } |
| } |
| tokens.resize(n); |
| } |
|
|
| std::string detokenize(const llama_context * ctx, bool special) const { |
| llama_tokens text_tokens; |
| text_tokens.reserve(tokens.size()); |
| for (const auto & t : tokens) { |
| if (t != LLAMA_TOKEN_NULL) { |
| text_tokens.push_back(t); |
| } |
| } |
| return common_detokenize(ctx, text_tokens, special); |
| } |
|
|
| size_t get_common_prefix(const server_tokens & b) const { |
| const size_t max_idx = std::min(tokens.size(), b.tokens.size()); |
|
|
| if (!has_mtmd) { |
| for (size_t i = 0; i < max_idx; ++i) { |
| if (tokens[i] == b.tokens[i]) { |
| continue; |
| } |
|
|
| return i; |
| } |
|
|
| return max_idx; |
| } |
|
|
| for (size_t i = 0; i < max_idx; ++i) { |
| const llama_token ai = tokens[i]; |
| const llama_token bi = b.tokens[i]; |
|
|
| if (ai == LLAMA_TOKEN_NULL && bi == LLAMA_TOKEN_NULL) { |
| const auto & a_chunk = find_chunk(i); |
| const auto & b_chunk = b.find_chunk(i); |
|
|
| GGML_ASSERT(a_chunk && b_chunk); |
|
|
| const std::string id_ai = mtmd_input_chunk_get_id(a_chunk.get()); |
| const std::string id_bi = mtmd_input_chunk_get_id(b_chunk.get()); |
|
|
| const size_t pos_a = mtmd_input_chunk_get_n_pos(a_chunk.get()); |
| const size_t pos_b = mtmd_input_chunk_get_n_pos(b_chunk.get()); |
|
|
| if (id_ai == id_bi && pos_a == pos_b) { |
| GGML_ASSERT(pos_a > 0 && "Invalid media chunk"); |
| i += pos_a - 1; |
| continue; |
| } |
|
|
| return i; |
| } |
|
|
| if (ai == bi) { |
| continue; |
| } |
|
|
| return i; |
| } |
|
|
| return max_idx; |
| } |
|
|
| |
| bool validate(const struct llama_context * ctx) const { |
| const llama_model * model = llama_get_model(ctx); |
| const llama_vocab * vocab = llama_model_get_vocab(model); |
| const int32_t n_vocab = llama_vocab_n_tokens(vocab); |
|
|
| for (size_t i = 0; i < tokens.size(); ++i) { |
| const auto & t = tokens[i]; |
| if (t == LLAMA_TOKEN_NULL) { |
| try { |
| const auto & chunk = find_chunk(i); |
| size_t n_pos = mtmd_input_chunk_get_n_pos(chunk.get()); |
| i += n_pos - 1; |
| } catch (const std::exception & e) { |
| return false; |
| } |
| } else if (t < 0 || t >= n_vocab) { |
| return false; |
| } |
| } |
| return true; |
| } |
|
|
| |
| int32_t process_chunk( |
| llama_context * ctx, |
| mtmd_context * mctx, |
| llama_pos n_past, |
| int32_t seq_id, |
| llama_pos & n_pos_out) const { |
| const auto & chunk = find_chunk(n_past); |
| const char * name = mtmd_input_chunk_get_type(chunk.get()) == MTMD_INPUT_CHUNK_TYPE_IMAGE |
| ? "image" : "audio"; |
| SRV_INF("processing %s...\n", name); |
| int32_t n_batch = llama_n_batch(ctx); |
| int64_t t0 = ggml_time_ms(); |
| llama_pos new_n_past = n_past; |
| int32_t result = mtmd_helper_eval_chunk_single(mctx, ctx, |
| chunk.get(), |
| n_past, |
| seq_id, |
| n_batch, |
| true, |
| &new_n_past); |
| SRV_INF("%s processed in %" PRId64 " ms\n", name, ggml_time_ms() - t0); |
| if (result != 0) { |
| LOG_ERR("mtmd_helper_eval failed with status %d", result); |
| n_pos_out = n_past; |
| return result; |
| } |
| n_pos_out = new_n_past; |
| return 0; |
| } |
| }; |
|
|
| |
| static std::string fnv_hash(const uint8_t * data, size_t len) { |
| const uint64_t fnv_prime = 0x100000001b3ULL; |
| uint64_t hash = 0xcbf29ce484222325ULL; |
|
|
| for (size_t i = 0; i < len; ++i) { |
| hash ^= data[i]; |
| hash *= fnv_prime; |
| } |
| return std::to_string(hash); |
| } |
|
|
| static server_tokens process_mtmd_prompt(mtmd_context * mctx, std::string prompt, std::vector<raw_buffer> files) { |
| mtmd::bitmaps bitmaps; |
| for (auto & file : files) { |
| mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_buf(mctx, file.data(), file.size())); |
| if (!bmp.ptr) { |
| throw std::runtime_error("Failed to load image or audio file"); |
| } |
| |
| std::string hash = fnv_hash(bmp.data(), bmp.n_bytes()); |
| bmp.set_id(hash.c_str()); |
| bitmaps.entries.push_back(std::move(bmp)); |
| } |
| |
| std::vector<server_tokens> inputs; |
| |
| mtmd_input_text inp_txt = { |
| prompt.c_str(), |
| true, |
| true, |
| }; |
| mtmd::input_chunks chunks(mtmd_input_chunks_init()); |
| auto bitmaps_c_ptr = bitmaps.c_ptr(); |
| int32_t tokenized = mtmd_tokenize(mctx, |
| chunks.ptr.get(), |
| &inp_txt, |
| bitmaps_c_ptr.data(), |
| bitmaps_c_ptr.size()); |
| if (tokenized != 0) { |
| throw std::runtime_error("Failed to tokenize prompt"); |
| } |
| auto result = server_tokens(chunks, true); |
| return result; |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static server_tokens tokenize_input_subprompt(const llama_vocab * vocab, mtmd_context * mctx, const json & json_prompt, bool add_special, bool parse_special) { |
| constexpr char JSON_STRING_PROMPT_KEY[] = "prompt_string"; |
| constexpr char JSON_MTMD_DATA_KEY[] = "multimodal_data"; |
| const bool has_mtmd = mctx != nullptr; |
| if (json_prompt.is_string() || json_is_array_of_mixed_numbers_strings(json_prompt)) { |
| |
| llama_tokens tmp = tokenize_mixed(vocab, json_prompt, add_special, parse_special); |
| return server_tokens(tmp, false); |
| } else if (json_is_array_of_numbers(json_prompt)) { |
| |
| llama_tokens tmp = json_prompt.get<llama_tokens>(); |
| return server_tokens(tmp, false); |
| } else if (json_prompt.contains(JSON_STRING_PROMPT_KEY)) { |
| |
| if (json_prompt.contains(JSON_MTMD_DATA_KEY)) { |
| if (!has_mtmd) |
| throw std::runtime_error("Multimodal data provided, but model does not support multimodal requests."); |
|
|
| |
| std::vector<raw_buffer> files; |
| for (const auto & entry : json_prompt.at(JSON_MTMD_DATA_KEY)) { |
| files.push_back(base64_decode(entry)); |
| } |
| return process_mtmd_prompt(mctx, json_prompt.at(JSON_STRING_PROMPT_KEY), files); |
| } else { |
| |
| llama_tokens tmp = tokenize_mixed(vocab, json_prompt.at(JSON_STRING_PROMPT_KEY), add_special, parse_special); |
| return server_tokens(tmp, false); |
| } |
| } else { |
| throw std::runtime_error("\"prompt\" elements must be a string, a list of tokens, a JSON object containing a prompt string, or a list of mixed strings & tokens."); |
| } |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static std::vector<server_tokens> tokenize_input_prompts(const llama_vocab * vocab, mtmd_context * mctx, const json & json_prompt, bool add_special, bool parse_special) { |
| std::vector<server_tokens> result; |
| if (json_prompt.is_array() && !json_is_array_and_contains_numbers(json_prompt)) { |
| result.reserve(json_prompt.size()); |
| for (const auto & p : json_prompt) { |
| result.push_back(tokenize_input_subprompt(vocab, mctx, p,add_special, parse_special)); |
| } |
| } else { |
| result.push_back(tokenize_input_subprompt(vocab, mctx, json_prompt, add_special, parse_special)); |
| } |
| if (result.empty()) { |
| throw std::runtime_error("\"prompt\" must not be empty"); |
| } |
| return result; |
| } |
|
|
| |
| static server_tokens format_rerank(const struct llama_model * model, const struct llama_vocab * vocab, mtmd_context * mctx, const std::string & query, const std::string & doc) { |
| server_tokens result = {}; |
|
|
| const char * rerank_prompt = llama_model_chat_template(model, "rerank"); |
|
|
| if (rerank_prompt != nullptr) { |
| std::string prompt = rerank_prompt; |
| string_replace_all(prompt, "{query}" , query); |
| string_replace_all(prompt, "{document}", doc ); |
| server_tokens tokens = tokenize_input_subprompt(vocab, mctx, prompt, false, true); |
| result.push_back(tokens); |
| } else { |
| |
| server_tokens query_tokens = tokenize_input_subprompt(vocab, mctx, query, false, false); |
| server_tokens doc_tokens = tokenize_input_subprompt(vocab, mctx, doc, false, false); |
| llama_token eos_token = llama_vocab_eos(vocab); |
| if (eos_token == LLAMA_TOKEN_NULL) { |
| eos_token = llama_vocab_sep(vocab); |
| } |
|
|
| if (llama_vocab_get_add_bos(vocab)) { |
| result.push_back(llama_vocab_bos(vocab)); |
| } |
| result.push_back(query_tokens); |
| if (llama_vocab_get_add_eos(vocab)) { |
| result.push_back(eos_token); |
| } |
| if (llama_vocab_get_add_sep(vocab)) { |
| result.push_back(llama_vocab_sep(vocab)); |
| } |
| result.push_back(doc_tokens); |
| if (llama_vocab_get_add_eos(vocab)) { |
| result.push_back(eos_token); |
| } |
| } |
|
|
| return result; |
| } |
|
|