Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Anthropic Claude 3 AI models to autocompletion #24

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
"typescript": "4.7.4"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.19.0",
"@types/mustache": "^4.2.2",
"@types/react": "^18.0.28",
"@types/react-dom": "^18.0.11",
Expand Down
2 changes: 2 additions & 0 deletions src/complete/completers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,13 @@ import { ChatGPTComplete } from "./completers/chatgpt/chatgpt";
import { JurassicJ2Complete } from "./completers/ai21/ai21";
import { GooseAIComplete } from "./completers/gooseai/gooseai";
import { OobaboogaComplete } from "./completers/oobabooga/oobabooga";
import { AnthropicComplete } from "./completers/anthropic/anthropic";

export const available: Completer[] = [
new ChatGPTComplete(),
new OpenAIComplete(),
new JurassicJ2Complete(),
new GooseAIComplete(),
new OobaboogaComplete(),
new AnthropicComplete(),
];
205 changes: 205 additions & 0 deletions src/complete/completers/anthropic/anthropic.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,205 @@
import { Completer, Model, Prompt } from "../../complete";
import { Notice } from "obsidian";
import {
SettingsUI as ProviderSettingsUI,
Settings as ProviderSettings,
parse_settings as parse_provider_settings,
} from "./provider_settings";
import {
SettingsUI as ModelSettingsUI,
parse_settings as parse_model_settings,
Settings as ModelSettings,
} from "./model_settings";
import Anthropic from "@anthropic-ai/sdk";
import Mustache from "mustache";

class AnthropicAI implements Model {
id: string;
name: string;
description: string;
rate_limit_notice: Notice | null = null;
rate_limit_notice_timeout: number | null = null;
Settings = ModelSettingsUI;

provider_settings: ProviderSettings;

constructor(
provider_settings: string,
id: string,
name: string,
description: string
) {
this.id = id;
this.name = name;
this.description = description;
this.provider_settings = parse_provider_settings(provider_settings);
}


async prepare(prompt: Prompt, settings: ModelSettings): Promise<Prompt> {
// See the ChatGPT version of this for details
// TODO: make this work properly, with context from other notes

return {
prefix: prompt.prefix.slice(-(settings.prompt_length || 4096)),
suffix: prompt.suffix.slice(0, settings.prompt_length || 4096),
};
}


async generate_messages(
prompt: Prompt,
model_settings: {
user_prompt: string;
}
): Promise<{ role: "assistant" | "user"; content: string }[]> {
return [
{
role: "user",
content: Mustache.render(model_settings.user_prompt, prompt),
},
];
}

model_parameters(model_settings: ModelSettings): {
top_p: number | undefined;
top_k: any;
temperature: number | undefined
} {
return {
temperature: model_settings.temperature,
top_p: model_settings.top_p,
top_k: model_settings.top_k,
}
}


create_rate_limit_notice() {
if (this.rate_limit_notice) {
window.clearTimeout(this.rate_limit_notice_timeout!);
this.rate_limit_notice_timeout = window.setTimeout(() => {
this.rate_limit_notice?.hide();
this.rate_limit_notice = null;
this.rate_limit_notice_timeout = null;
}, 5000);
} else {
this.rate_limit_notice = new Notice(
"Rate limit exceeded. Please wait a few minutes and try again."
);
this.rate_limit_notice_timeout = window.setTimeout(() => {
this.rate_limit_notice?.hide();
this.rate_limit_notice = null;
this.rate_limit_notice_timeout = null;
}, 5000);
}
}

create_api_key_notice() {
const notice: any = new Notice("", 5000);
const notice_element = notice.noticeEl as HTMLElement;
notice_element.createEl("span", {
text: "Anthropic API key is invalid. Please double-check your ",
});
notice_element.createEl("a", {
text: "API key",
href: "https://console.anthropic.com/settings/keys",
});
notice_element.createEl("span", {
text: " in the plugin settings.",
});
}

parse_api_error(e: { status?: number }) {
if (e.status === 429) {
this.create_rate_limit_notice();
throw new Error();
} else if (e.status === 401) {
this.create_api_key_notice();
throw new Error();
}
throw e;
}

get_api() {
return new Anthropic({
apiKey: this.provider_settings.api_key,
baseURL: this.provider_settings.host_url,
});
}

async complete(prompt: Prompt, settings: string): Promise<string> {
const model_settings = parse_model_settings(settings);
const api = this.get_api();

try {
const response = await api.messages.create({
...this.model_parameters(model_settings),
messages: await this.generate_messages(prompt, model_settings),
model: this.id,
max_tokens: 64,
system: model_settings.system_prompt,
});

return response.content[0].text;
} catch (e) {
this.parse_api_error(e);
throw e;
}
}

async *iterate(prompt: Prompt, settings: string): AsyncGenerator<string> {
const model_settings = parse_model_settings(settings);
const api = this.get_api();

try {
const stream = await api.messages.create({
...this.model_parameters(model_settings),
messages: await this.generate_messages(prompt, model_settings),
system: model_settings.system_prompt,
model: this.id,
max_tokens: 64,
stream: true,
});

for await (const response of stream) {
if (response.type === "content_block_delta") {
yield response.delta.text;
}
}
} catch (e) {
this.parse_api_error(e);
throw e;
}
}
}

export class AnthropicComplete implements Completer {
id: string = "anthropic";
name: string = "Anthropic AI";
description: string = "Anthropic's AI language model";

async get_models(settings: string) {
return [
new AnthropicAI(
settings,
"claude-3-haiku-20240307",
"Claude 3 Haiku (recommended)",
"Fastest and most cost-effective model for Claude 3"
),
new AnthropicAI(
settings,
"claude-3-sonnet-20240229",
"Claude 3 Sonnet",
"Balanced speed and intelligence model for Claude 3"
),
new AnthropicAI(
settings,
"claude-3-opus-20240229",
"Claude 3 Opus",
"Most intelligent model for Claude 3"
)
];
}

Settings = ProviderSettingsUI;
}
141 changes: 141 additions & 0 deletions src/complete/completers/anthropic/model_settings.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
import React from "react";
import { z } from "zod";
import SettingsItem from "../../../components/SettingsItem";

export const settings_schema = z.object({
system_prompt: z.string(),
user_prompt: z.string(),
prompt_length: z.number().optional(),
temperature: z.number().optional(),
top_p: z.number().optional(),
top_k: z.number().optional(),
});

export type Settings = z.infer<typeof settings_schema>;

const default_settings: Settings = {
system_prompt:
"You are trying to give a long suggestion on how to complete the user's message. Complete in the language of the original message. Write only the completion and nothing else. Do not include the user's text in your message. Only include the completion.",
user_prompt: "Continue the following:\n\n{{prefix}}",
prompt_length: 256,
};

export const parse_settings = (data: string | null): Settings => {
if (data == null) {
return default_settings;
}
try {
const settings: unknown = JSON.parse(data);
return settings_schema.parse(settings);
} catch (e) {
return default_settings;
}
}

export function SettingsUI({
settings,
saveSettings,
}: {
settings: string | null;
saveSettings: (settings: string) => void;
}) {
const parsed_settings = parse_settings(settings);

return (
<>
<SettingsItem name="Rate limits" />
<p>
If you're getting rate limit errors, I can't really help. OpenAI
doesn't like you using their API too much. You can either{" "}
<a href="https://platform.openai.com/account/billing/overview">
upgrade your plan
</a>{" "}
or set up a fallback preset. A fallback will be used while the
plugin waits for the rate limit to reset; scroll down to the
"Presets" section to set one up.
</p>
<SettingsItem name="System prompt" />
<textarea
className="ai-complete-full-width"
value={parsed_settings.system_prompt}
onChange={(e) =>
saveSettings(
JSON.stringify({
...parsed_settings,
system_prompt: e.target.value,
})
)
}
/>
<SettingsItem name="User prompt" />
<textarea
className="ai-complete-full-width"
value={parsed_settings.user_prompt}
onChange={(e) =>
saveSettings(
JSON.stringify({
...parsed_settings,
user_prompt: e.target.value,
})
)
}
/>
<SettingsItem name="Max tokens">
<input
type="number"
value={parsed_settings.prompt_length}
onChange={(e) =>
saveSettings(
JSON.stringify({
...parsed_settings,
prompt_length: parseInt(e.target.value),
})
)
}
/>
</SettingsItem>
<SettingsItem name="Temperature">
<input
type="number"
value={parsed_settings.temperature}
onChange={(e) =>
saveSettings(
JSON.stringify({
...parsed_settings,
temperature: parseFloat(e.target.value),
})
)
}
/>
</SettingsItem>
<SettingsItem name="Top P">
<input
type="number"
value={parsed_settings.top_p}
onChange={(e) =>
saveSettings(
JSON.stringify({
...parsed_settings,
top_p: parseFloat(e.target.value),
})
)
}
/>
</SettingsItem>
<SettingsItem name="Top K">
<input
type="number"
value={parsed_settings.top_k}
onChange={(e) =>
saveSettings(
JSON.stringify({
...parsed_settings,
top_K: parseFloat(e.target.value),
})
)
}
/>
</SettingsItem>
</>
);
}
Loading