diff --git a/package.json b/package.json index 2a11055..188980d 100644 --- a/package.json +++ b/package.json @@ -22,6 +22,7 @@ "typescript": "4.7.4" }, "dependencies": { + "@anthropic-ai/sdk": "^0.19.0", "@types/mustache": "^4.2.2", "@types/react": "^18.0.28", "@types/react-dom": "^18.0.11", diff --git a/src/complete/completers.ts b/src/complete/completers.ts index 8913bf6..d75a213 100644 --- a/src/complete/completers.ts +++ b/src/complete/completers.ts @@ -4,6 +4,7 @@ import { ChatGPTComplete } from "./completers/chatgpt/chatgpt"; import { JurassicJ2Complete } from "./completers/ai21/ai21"; import { GooseAIComplete } from "./completers/gooseai/gooseai"; import { OobaboogaComplete } from "./completers/oobabooga/oobabooga"; +import { AnthropicComplete } from "./completers/anthropic/anthropic"; export const available: Completer[] = [ new ChatGPTComplete(), @@ -11,4 +12,5 @@ export const available: Completer[] = [ new JurassicJ2Complete(), new GooseAIComplete(), new OobaboogaComplete(), + new AnthropicComplete(), ]; diff --git a/src/complete/completers/anthropic/anthropic.tsx b/src/complete/completers/anthropic/anthropic.tsx new file mode 100644 index 0000000..caf014d --- /dev/null +++ b/src/complete/completers/anthropic/anthropic.tsx @@ -0,0 +1,205 @@ +import { Completer, Model, Prompt } from "../../complete"; +import { Notice } from "obsidian"; +import { + SettingsUI as ProviderSettingsUI, + Settings as ProviderSettings, + parse_settings as parse_provider_settings, +} from "./provider_settings"; +import { + SettingsUI as ModelSettingsUI, + parse_settings as parse_model_settings, + Settings as ModelSettings, +} from "./model_settings"; +import Anthropic from "@anthropic-ai/sdk"; +import Mustache from "mustache"; + +class AnthropicAI implements Model { + id: string; + name: string; + description: string; + rate_limit_notice: Notice | null = null; + rate_limit_notice_timeout: number | null = null; + Settings = ModelSettingsUI; + + provider_settings: ProviderSettings; + + constructor( + provider_settings: string, + id: string, + name: string, + description: string + ) { + this.id = id; + this.name = name; + this.description = description; + this.provider_settings = parse_provider_settings(provider_settings); + } + + + async prepare(prompt: Prompt, settings: ModelSettings): Promise { + // See the ChatGPT version of this for details + // TODO: make this work properly, with context from other notes + + return { + prefix: prompt.prefix.slice(-(settings.prompt_length || 4096)), + suffix: prompt.suffix.slice(0, settings.prompt_length || 4096), + }; + } + + + async generate_messages( + prompt: Prompt, + model_settings: { + user_prompt: string; + } + ): Promise<{ role: "assistant" | "user"; content: string }[]> { + return [ + { + role: "user", + content: Mustache.render(model_settings.user_prompt, prompt), + }, + ]; + } + + model_parameters(model_settings: ModelSettings): { + top_p: number | undefined; + top_k: any; + temperature: number | undefined + } { + return { + temperature: model_settings.temperature, + top_p: model_settings.top_p, + top_k: model_settings.top_k, + } + } + + + create_rate_limit_notice() { + if (this.rate_limit_notice) { + window.clearTimeout(this.rate_limit_notice_timeout!); + this.rate_limit_notice_timeout = window.setTimeout(() => { + this.rate_limit_notice?.hide(); + this.rate_limit_notice = null; + this.rate_limit_notice_timeout = null; + }, 5000); + } else { + this.rate_limit_notice = new Notice( + "Rate limit exceeded. Please wait a few minutes and try again." + ); + this.rate_limit_notice_timeout = window.setTimeout(() => { + this.rate_limit_notice?.hide(); + this.rate_limit_notice = null; + this.rate_limit_notice_timeout = null; + }, 5000); + } + } + + create_api_key_notice() { + const notice: any = new Notice("", 5000); + const notice_element = notice.noticeEl as HTMLElement; + notice_element.createEl("span", { + text: "Anthropic API key is invalid. Please double-check your ", + }); + notice_element.createEl("a", { + text: "API key", + href: "https://console.anthropic.com/settings/keys", + }); + notice_element.createEl("span", { + text: " in the plugin settings.", + }); + } + + parse_api_error(e: { status?: number }) { + if (e.status === 429) { + this.create_rate_limit_notice(); + throw new Error(); + } else if (e.status === 401) { + this.create_api_key_notice(); + throw new Error(); + } + throw e; + } + + get_api() { + return new Anthropic({ + apiKey: this.provider_settings.api_key, + baseURL: this.provider_settings.host_url, + }); + } + + async complete(prompt: Prompt, settings: string): Promise { + const model_settings = parse_model_settings(settings); + const api = this.get_api(); + + try { + const response = await api.messages.create({ + ...this.model_parameters(model_settings), + messages: await this.generate_messages(prompt, model_settings), + model: this.id, + max_tokens: 64, + system: model_settings.system_prompt, + }); + + return response.content[0].text; + } catch (e) { + this.parse_api_error(e); + throw e; + } + } + + async *iterate(prompt: Prompt, settings: string): AsyncGenerator { + const model_settings = parse_model_settings(settings); + const api = this.get_api(); + + try { + const stream = await api.messages.create({ + ...this.model_parameters(model_settings), + messages: await this.generate_messages(prompt, model_settings), + system: model_settings.system_prompt, + model: this.id, + max_tokens: 64, + stream: true, + }); + + for await (const response of stream) { + if (response.type === "content_block_delta") { + yield response.delta.text; + } + } + } catch (e) { + this.parse_api_error(e); + throw e; + } + } +} + +export class AnthropicComplete implements Completer { + id: string = "anthropic"; + name: string = "Anthropic AI"; + description: string = "Anthropic's AI language model"; + + async get_models(settings: string) { + return [ + new AnthropicAI( + settings, + "claude-3-haiku-20240307", + "Claude 3 Haiku (recommended)", + "Fastest and most cost-effective model for Claude 3" + ), + new AnthropicAI( + settings, + "claude-3-sonnet-20240229", + "Claude 3 Sonnet", + "Balanced speed and intelligence model for Claude 3" + ), + new AnthropicAI( + settings, + "claude-3-opus-20240229", + "Claude 3 Opus", + "Most intelligent model for Claude 3" + ) + ]; + } + + Settings = ProviderSettingsUI; +} diff --git a/src/complete/completers/anthropic/model_settings.tsx b/src/complete/completers/anthropic/model_settings.tsx new file mode 100644 index 0000000..7f359fb --- /dev/null +++ b/src/complete/completers/anthropic/model_settings.tsx @@ -0,0 +1,141 @@ +import React from "react"; +import { z } from "zod"; +import SettingsItem from "../../../components/SettingsItem"; + +export const settings_schema = z.object({ + system_prompt: z.string(), + user_prompt: z.string(), + prompt_length: z.number().optional(), + temperature: z.number().optional(), + top_p: z.number().optional(), + top_k: z.number().optional(), +}); + +export type Settings = z.infer; + +const default_settings: Settings = { + system_prompt: + "You are trying to give a long suggestion on how to complete the user's message. Complete in the language of the original message. Write only the completion and nothing else. Do not include the user's text in your message. Only include the completion.", + user_prompt: "Continue the following:\n\n{{prefix}}", + prompt_length: 256, +}; + +export const parse_settings = (data: string | null): Settings => { + if (data == null) { + return default_settings; + } + try { + const settings: unknown = JSON.parse(data); + return settings_schema.parse(settings); + } catch (e) { + return default_settings; + } +} + +export function SettingsUI({ + settings, + saveSettings, +}: { + settings: string | null; + saveSettings: (settings: string) => void; +}) { + const parsed_settings = parse_settings(settings); + + return ( + <> + +

+ If you're getting rate limit errors, I can't really help. OpenAI + doesn't like you using their API too much. You can either{" "} + + upgrade your plan + {" "} + or set up a fallback preset. A fallback will be used while the + plugin waits for the rate limit to reset; scroll down to the + "Presets" section to set one up. +

+ +