Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

community[minor]: upgraded @mlc/web-llm dependency and updated it's ChatModel #5637

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions libs/langchain-community/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@
"@langchain/scripts": "~0.0.14",
"@layerup/layerup-security": "^1.5.12",
"@mendable/firecrawl-js": "^0.0.13",
"@mlc-ai/web-llm": "^0.2.35",
"@mlc-ai/web-llm": "^0.2.40",
"@mozilla/readability": "^0.4.4",
"@neondatabase/serverless": "^0.9.1",
"@notionhq/client": "^2.2.10",
Expand Down Expand Up @@ -245,7 +245,7 @@
"@huggingface/inference": "^2.6.4",
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hey there! 👋 I noticed the update to the "@mlc-ai/web-llm" dependency in the package.json file. This change may impact the project's dependencies, so I'm flagging it for the maintainers to review. Keep up the great work! 🚀

"@layerup/layerup-security": "^1.5.12",
"@mendable/firecrawl-js": "^0.0.13",
"@mlc-ai/web-llm": "^0.2.35",
"@mlc-ai/web-llm": "^0.2.40",
"@mozilla/readability": "*",
"@neondatabase/serverless": "*",
"@notionhq/client": "^2.2.10",
Expand Down
30 changes: 10 additions & 20 deletions libs/langchain-community/src/chat_models/webllm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ export interface WebLLMCallOptions extends BaseLanguageModelCallOptions {}
export class ChatWebLLM extends SimpleChatModel<WebLLMCallOptions> {
static inputs: WebLLMInputs;

protected engine: webllm.EngineInterface;
protected engine: webllm.MLCEngine;

appConfig?: webllm.AppConfig;

Expand All @@ -63,40 +63,33 @@ export class ChatWebLLM extends SimpleChatModel<WebLLMCallOptions> {
this.chatOptions = inputs.chatOptions;
this.model = inputs.model;
this.temperature = inputs.temperature;
this.engine = new webllm.MLCEngine();
}

_llmType() {
return "web-llm";
}

async initialize(progressCallback?: webllm.InitProgressCallback) {
this.engine = new webllm.Engine();
if (progressCallback !== undefined) {
this.engine.setInitProgressCallback(progressCallback);
}
await this.reload(this.model, this.chatOptions, this.appConfig);
this.engine.setInitProgressCallback(() => {});
}

async reload(
modelId: string,
newAppConfig?: webllm.AppConfig,
newChatOpts?: webllm.ChatOptions
) {
if (this.engine !== undefined) {
await this.engine.reload(modelId, newAppConfig, newChatOpts);
} else {
throw new Error("Initialize model before reloading.");
}
await this.engine.reload(modelId, newChatOpts, newAppConfig);
}

async *_streamResponseChunks(
messages: BaseMessage[],
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<ChatGenerationChunk> {
await this.initialize();

const messagesInput: ChatCompletionMessageParam[] = messages.map(
(message) => {
if (typeof message.content !== "string") {
Expand Down Expand Up @@ -124,15 +117,12 @@ export class ChatWebLLM extends SimpleChatModel<WebLLMCallOptions> {
}
);

const stream = this.engine.chatCompletionAsyncChunkGenerator(
{
stream: true,
messages: messagesInput,
stop: options.stop,
logprobs: true,
},
{}
);
const stream = await this.engine.chat.completions.create({
stream: true,
messages: messagesInput,
stop: options.stop,
logprobs: true,
});
for await (const chunk of stream) {
// Last chunk has undefined content
const text = chunk.choices[0].delta.content ?? "";
Expand All @@ -146,7 +136,7 @@ export class ChatWebLLM extends SimpleChatModel<WebLLMCallOptions> {
},
}),
});
await runManager?.handleLLMNewToken(text ?? "");
await runManager?.handleLLMNewToken(text);
}
}

Expand Down
21 changes: 15 additions & 6 deletions yarn.lock
Original file line number Diff line number Diff line change
Expand Up @@ -9099,7 +9099,7 @@ __metadata:
"@langchain/scripts": ~0.0.14
"@layerup/layerup-security": ^1.5.12
"@mendable/firecrawl-js": ^0.0.13
"@mlc-ai/web-llm": ^0.2.35
"@mlc-ai/web-llm": ^0.2.40
"@mozilla/readability": ^0.4.4
"@neondatabase/serverless": ^0.9.1
"@notionhq/client": ^2.2.10
Expand Down Expand Up @@ -9266,7 +9266,7 @@ __metadata:
"@huggingface/inference": ^2.6.4
"@layerup/layerup-security": ^1.5.12
"@mendable/firecrawl-js": ^0.0.13
"@mlc-ai/web-llm": ^0.2.35
"@mlc-ai/web-llm": ^0.2.40
"@mozilla/readability": "*"
"@neondatabase/serverless": "*"
"@notionhq/client": ^2.2.10
Expand Down Expand Up @@ -10445,10 +10445,12 @@ __metadata:
languageName: node
linkType: hard

"@mlc-ai/web-llm@npm:^0.2.35":
version: 0.2.35
resolution: "@mlc-ai/web-llm@npm:0.2.35"
checksum: 03c1d1847340f88474e1eeed7a91cc09e29299a1216e378385ffe5479c203d39a8656d98c9187864322453a91f046b874d7073662ab04033527079d9bb29bee3
"@mlc-ai/web-llm@npm:^0.2.40":
version: 0.2.40
resolution: "@mlc-ai/web-llm@npm:0.2.40"
dependencies:
loglevel: ^1.9.1
checksum: 44d46178f7b7f899893ee8096fd4188b8c343589a10428c52f87b1b7e708f7a94b2b6315c8a6f8075f14d6d92aebfd8afc7f6d049a2ef60f8b8dc950b98a82e2
languageName: node
linkType: hard

Expand Down Expand Up @@ -28431,6 +28433,13 @@ __metadata:
languageName: node
linkType: hard

"loglevel@npm:^1.9.1":
version: 1.9.1
resolution: "loglevel@npm:1.9.1"
checksum: e1c8586108c4d566122e91f8a79c8df728920e3a714875affa5120566761a24077ec8ec9e5fc388b022e39fc411ec6e090cde1b5775871241b045139771eeb06
languageName: node
linkType: hard

"long@npm:*, long@npm:^5.2.1, long@npm:~5.2.3":
version: 5.2.3
resolution: "long@npm:5.2.3"
Expand Down
Loading