// OpenAI-compatible API client factory for Duso // Creates chat completion clients for any OpenAI-compatible endpoint // Helper functions (endpoint-agnostic) function extract_text_content(response) if response.choices and len(response.choices) > 0 then var message = response.choices[0].message if message and message.content then return message.content end end return "" end function extract_tool_calls(response) if response.choices and len(response.choices) > 0 then var message = response.choices[0].message if message and message.tool_calls then return message.tool_calls end end return [] end function map_tool_choice(tool_choice_value) if tool_choice_value == "any" then return "required" end return tool_choice_value end function build_messages(config, messages) var result = [] if config.system then // Note: OpenAI auto-caches prompts 1024+ tokens on GPT-4o and newer // cache_control config is accepted but ignored (automatic) push(result, {role = "system", content = config.system}) end for msg in messages do push(result, msg) end return result end function build_request_body(config, messages) var full_messages = build_messages(config, messages) var body = { model = config.model, messages = full_messages, max_tokens = config.max_tokens } if config.temperature != nil then body.temperature = config.temperature end if config.top_p != nil then body.top_p = config.top_p end if config.tools then // Note: OpenAI auto-caches tools as part of 1024+ token prompts on GPT-4o+ // cache_control config is accepted but ignored (automatic) body.tools = config.tools body.tool_choice = map_tool_choice(config.tool_choice) end return body end // Factory function - creates a complete client for any endpoint function create_client(endpoint, models_endpoint, options) if not options then options = {} end var default_model = options.default_model or "gpt-4o-mini" var default_max_tokens = options.default_max_tokens or 2048 var default_temperature = options.default_temperature or 1.0 var build_headers_fn = options.build_headers or function(key) return { "Content-Type" = "application/json", "Authorization" = "Bearer " + key } end var get_api_key_fn = options.get_api_key or function(key, env_var) if key then return key end return env(env_var or "OPENAI_API_KEY") end // Core client functions using the provided endpoint function execute_tools_loop_impl(session, response, max_iterations) if not max_iterations then max_iterations = 10 end var iteration = 0 while iteration < max_iterations do var stop_reason = response.choices[0].finish_reason if stop_reason != "tool_calls" then break end iteration = iteration + 1 var tool_calls = extract_tool_calls(response) if len(tool_calls) == 0 then break end var assistant_message = response.choices[0].message push(session.messages, {role = "assistant", content = assistant_message.content, tool_calls = assistant_message.tool_calls}) for tool_call in tool_calls do var func = tool_call["function"] var handler = session.config.tool_handlers[func.name] var result = nil var error = false if handler then try var args = parse_json(func.arguments) result = handler(args) catch (e) result = "Error executing tool: " + format(e) error = true end else result = "Tool handler not found: " + func.name error = true end push(session.messages, { role = "tool", tool_call_id = tool_call.id, content = tostring(result) }) end var request_body = build_request_body(session.config, session.messages) var api_response = fetch(endpoint, { method = "POST", headers = session.headers, body = format_json(request_body), timeout = session.config.timeout }) if api_response.status != 200 then throw("API error: " + api_response.status + " - " + api_response.body) end response = api_response.json() if response.usage then var new_in = session.usage.input_tokens + response.usage.prompt_tokens var new_out = session.usage.output_tokens + response.usage.completion_tokens session.usage = {input_tokens = new_in, output_tokens = new_out} end end return response end function session_impl(user_config) if not user_config then user_config = {} end var config = { model = user_config.model or default_model, max_tokens = user_config.max_tokens or default_max_tokens, temperature = user_config.temperature != nil ? user_config.temperature : default_temperature, system = user_config.system, tools = user_config.tools, tool_handlers = user_config.tool_handlers or {}, auto_execute_tools = user_config.auto_execute_tools != nil ? user_config.auto_execute_tools : true, tool_choice = user_config.tool_choice or "auto", top_p = user_config.top_p, key = user_config.key, cache_control = user_config.cache_control, timeout = user_config.timeout or 30 } // Convert standard tool definitions to OpenAI API schema if config.tools then var converted_tools = [] for t in config.tools do if type(t) == "object" and t.name then var tool_schema = { type = "function", "function" = { name = t.name, description = t.description or "", parameters = { type = "object", properties = t.parameters or {}, required = t.required or [] } } } push(converted_tools, tool_schema) if t.handler then config.tool_handlers[t.name] = t.handler end else // Assume it's already in OpenAI API format push(converted_tools, t) end end config.tools = converted_tools end config.key = get_api_key_fn(config.key, options.key_env) if not config.key then throw("API key not set and key not provided") end var headers = build_headers_fn(config.key) var session_obj = { messages = [], usage = {input_tokens = 0, output_tokens = 0}, config = config, headers = headers, prompt = function(user_message) push(messages, {role = "user", content = user_message}) var request_body = build_request_body(config, messages) var response = fetch(endpoint, { method = "POST", headers = headers, body = format_json(request_body), timeout = config.timeout }) if response.status != 200 then throw("API error: " + response.status + " - " + response.body) end var data = response.json() if data.usage then var new_in = usage.input_tokens + data.usage.prompt_tokens var new_out = usage.output_tokens + data.usage.completion_tokens usage = {input_tokens = new_in, output_tokens = new_out} end if config.auto_execute_tools and data.choices[0].finish_reason == "tool_calls" then data = execute_tools_loop_impl(session_obj, data) end var response_text = extract_text_content(data) var assistant_message = data.choices[0].message push(messages, {role = "assistant", content = assistant_message.content}) return response_text end, add_tool_result = function(tool_call_id, result) if len(messages) == 0 then throw("No messages in conversation") end var last_message = messages[len(messages) - 1] if last_message.role != "assistant" then throw("Last message must be from assistant to add tool result") end push(messages, { role = "tool", tool_call_id = tool_call_id, content = tostring(result) }) end, continue_conversation = function() if len(messages) == 0 then throw("No messages in conversation") end var request_body = build_request_body(config, messages) var response = fetch(endpoint, { method = "POST", headers = headers, body = format_json(request_body), timeout = config.timeout }) if response.status != 200 then throw("API error: " + response.status + " - " + response.body) end var data = response.json() if data.usage then var new_in = usage.input_tokens + data.usage.prompt_tokens var new_out = usage.output_tokens + data.usage.completion_tokens usage = {input_tokens = new_in, output_tokens = new_out} end var response_text = extract_text_content(data) push(messages, {role = "assistant", content = response_text}) return response_text end, clear = function() messages = [] usage = {input_tokens = 0, output_tokens = 0} return nil end, set = function(key, value) config[key] = value return nil end } return session_obj end function prompt_impl(message, user_config) if not user_config then user_config = {} end var sess = session_impl(user_config) return sess.prompt(message) end function models_impl(key) var api_key = get_api_key_fn(key, options.key_env) if not api_key then throw("API key not set and key not provided") end var response = fetch(models_endpoint, { method = "GET", headers = build_headers_fn(api_key) }) if response.status != 200 then throw("Failed to fetch models: " + response.status) end var data = response.json() return data.data end // Return client object return { prompt = prompt_impl, session = session_impl, models = models_impl } end // Default exports (OpenAI) var default_client = create_client( "https://api.openai.com/v1/chat/completions", "https://api.openai.com/v1/models", { default_model = "gpt-4o-mini", key_env = "OPENAI_API_KEY" } ) return { prompt = default_client.prompt, session = default_client.session, models = default_client.models, create_client = create_client }