llm.ts 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. import Anthropic from "@anthropic-ai/sdk";
  2. import { ENV } from "./env";
  3. export type Role = "system" | "user" | "assistant" | "tool" | "function";
  4. export type TextContent = {
  5. type: "text";
  6. text: string;
  7. };
  8. export type ImageContent = {
  9. type: "image_url";
  10. image_url: {
  11. url: string;
  12. detail?: "auto" | "low" | "high";
  13. };
  14. };
  15. export type FileContent = {
  16. type: "file_url";
  17. file_url: {
  18. url: string;
  19. mime_type?: "audio/mpeg" | "audio/wav" | "application/pdf" | "audio/mp4" | "video/mp4";
  20. };
  21. };
  22. export type MessageContent = string | TextContent | ImageContent | FileContent;
  23. export type Message = {
  24. role: Role;
  25. content: MessageContent | MessageContent[];
  26. name?: string;
  27. tool_call_id?: string;
  28. };
  29. export type Tool = {
  30. type: "function";
  31. function: {
  32. name: string;
  33. description?: string;
  34. parameters?: Record<string, unknown>;
  35. };
  36. };
  37. export type ToolChoicePrimitive = "none" | "auto" | "required";
  38. export type ToolChoiceByName = { name: string };
  39. export type ToolChoiceExplicit = {
  40. type: "function";
  41. function: {
  42. name: string;
  43. };
  44. };
  45. export type ToolChoice =
  46. | ToolChoicePrimitive
  47. | ToolChoiceByName
  48. | ToolChoiceExplicit;
  49. export type InvokeParams = {
  50. messages: Message[];
  51. tools?: Tool[];
  52. toolChoice?: ToolChoice;
  53. tool_choice?: ToolChoice;
  54. maxTokens?: number;
  55. max_tokens?: number;
  56. outputSchema?: OutputSchema;
  57. output_schema?: OutputSchema;
  58. responseFormat?: ResponseFormat;
  59. response_format?: ResponseFormat;
  60. model?: string;
  61. temperature?: number;
  62. };
  63. export type ToolCall = {
  64. id: string;
  65. type: "function";
  66. function: {
  67. name: string;
  68. arguments: string;
  69. };
  70. };
  71. export type InvokeResult = {
  72. id: string;
  73. created: number;
  74. model: string;
  75. choices: Array<{
  76. index: number;
  77. message: {
  78. role: Role;
  79. content: string | Array<TextContent | ImageContent | FileContent>;
  80. tool_calls?: ToolCall[];
  81. };
  82. finish_reason: string | null;
  83. }>;
  84. usage?: {
  85. prompt_tokens: number;
  86. completion_tokens: number;
  87. total_tokens: number;
  88. };
  89. };
  90. export type JsonSchema = {
  91. name: string;
  92. schema: Record<string, unknown>;
  93. strict?: boolean;
  94. };
  95. export type OutputSchema = JsonSchema;
  96. export type ResponseFormat =
  97. | { type: "text" }
  98. | { type: "json_object" }
  99. | { type: "json_schema"; json_schema: JsonSchema };
  100. // Extract plain text from a MessageContent value
  101. const contentToString = (content: MessageContent | MessageContent[]): string => {
  102. const parts = Array.isArray(content) ? content : [content];
  103. return parts
  104. .map(p => (typeof p === "string" ? p : p.type === "text" ? p.text : ""))
  105. .join("\n");
  106. };
  107. export async function invokeLLM(params: InvokeParams): Promise<InvokeResult> {
  108. if (!ENV.anthropicApiKey) {
  109. throw new Error("ANTHROPIC_API_KEY is not configured");
  110. }
  111. const { messages, maxTokens, max_tokens, model, temperature } = params;
  112. // Anthropic takes system prompt as a top-level param, not in the messages array
  113. const systemParts = messages
  114. .filter(m => m.role === "system")
  115. .map(m => contentToString(m.content));
  116. const system = systemParts.length > 0 ? systemParts.join("\n\n") : undefined;
  117. const anthropicMessages = messages
  118. .filter(m => m.role !== "system")
  119. .map(m => ({
  120. role: m.role as "user" | "assistant",
  121. content: contentToString(m.content),
  122. }));
  123. const client = new Anthropic({ apiKey: ENV.anthropicApiKey });
  124. const response = await client.messages.create({
  125. model: model ?? "claude-sonnet-4-6",
  126. max_tokens: maxTokens ?? max_tokens ?? 1024,
  127. ...(system ? { system } : {}),
  128. ...(temperature != null ? { temperature } : {}),
  129. messages: anthropicMessages,
  130. });
  131. const textContent = response.content
  132. .filter((block): block is Anthropic.TextBlock => block.type === "text")
  133. .map(block => block.text)
  134. .join("");
  135. return {
  136. id: response.id,
  137. created: Math.floor(Date.now() / 1000),
  138. model: response.model,
  139. choices: [
  140. {
  141. index: 0,
  142. message: {
  143. role: "assistant",
  144. content: textContent,
  145. },
  146. finish_reason: response.stop_reason ?? null,
  147. },
  148. ],
  149. usage: {
  150. prompt_tokens: response.usage.input_tokens,
  151. completion_tokens: response.usage.output_tokens,
  152. total_tokens: response.usage.input_tokens + response.usage.output_tokens,
  153. },
  154. };
  155. }