import Anthropic from "@anthropic-ai/sdk"; import { ENV } from "./env"; export type Role = "system" | "user" | "assistant" | "tool" | "function"; export type TextContent = { type: "text"; text: string; }; export type ImageContent = { type: "image_url"; image_url: { url: string; detail?: "auto" | "low" | "high"; }; }; export type FileContent = { type: "file_url"; file_url: { url: string; mime_type?: "audio/mpeg" | "audio/wav" | "application/pdf" | "audio/mp4" | "video/mp4"; }; }; export type MessageContent = string | TextContent | ImageContent | FileContent; export type Message = { role: Role; content: MessageContent | MessageContent[]; name?: string; tool_call_id?: string; }; export type Tool = { type: "function"; function: { name: string; description?: string; parameters?: Record; }; }; export type ToolChoicePrimitive = "none" | "auto" | "required"; export type ToolChoiceByName = { name: string }; export type ToolChoiceExplicit = { type: "function"; function: { name: string; }; }; export type ToolChoice = | ToolChoicePrimitive | ToolChoiceByName | ToolChoiceExplicit; export type InvokeParams = { messages: Message[]; tools?: Tool[]; toolChoice?: ToolChoice; tool_choice?: ToolChoice; maxTokens?: number; max_tokens?: number; outputSchema?: OutputSchema; output_schema?: OutputSchema; responseFormat?: ResponseFormat; response_format?: ResponseFormat; model?: string; temperature?: number; }; export type ToolCall = { id: string; type: "function"; function: { name: string; arguments: string; }; }; export type InvokeResult = { id: string; created: number; model: string; choices: Array<{ index: number; message: { role: Role; content: string | Array; tool_calls?: ToolCall[]; }; finish_reason: string | null; }>; usage?: { prompt_tokens: number; completion_tokens: number; total_tokens: number; }; }; export type JsonSchema = { name: string; schema: Record; strict?: boolean; }; export type OutputSchema = JsonSchema; export type ResponseFormat = | { type: "text" } | { type: "json_object" } | { type: "json_schema"; json_schema: JsonSchema }; // Extract plain text from a MessageContent value const contentToString = (content: MessageContent | MessageContent[]): string => { const parts = Array.isArray(content) ? content : [content]; return parts .map(p => (typeof p === "string" ? p : p.type === "text" ? p.text : "")) .join("\n"); }; export async function invokeLLM(params: InvokeParams): Promise { if (!ENV.anthropicApiKey) { throw new Error("ANTHROPIC_API_KEY is not configured"); } const { messages, maxTokens, max_tokens, model, temperature } = params; // Anthropic takes system prompt as a top-level param, not in the messages array const systemParts = messages .filter(m => m.role === "system") .map(m => contentToString(m.content)); const system = systemParts.length > 0 ? systemParts.join("\n\n") : undefined; const anthropicMessages = messages .filter(m => m.role !== "system") .map(m => ({ role: m.role as "user" | "assistant", content: contentToString(m.content), })); const client = new Anthropic({ apiKey: ENV.anthropicApiKey }); const response = await client.messages.create({ model: model ?? "claude-sonnet-4-6", max_tokens: maxTokens ?? max_tokens ?? 1024, ...(system ? { system } : {}), ...(temperature != null ? { temperature } : {}), messages: anthropicMessages, }); const textContent = response.content .filter((block): block is Anthropic.TextBlock => block.type === "text") .map(block => block.text) .join(""); return { id: response.id, created: Math.floor(Date.now() / 1000), model: response.model, choices: [ { index: 0, message: { role: "assistant", content: textContent, }, finish_reason: response.stop_reason ?? null, }, ], usage: { prompt_tokens: response.usage.input_tokens, completion_tokens: response.usage.output_tokens, total_tokens: response.usage.input_tokens + response.usage.output_tokens, }, }; }