diff --git a/README.md b/README.md index 3c0ac7e..c952137 100644 --- a/README.md +++ b/README.md @@ -17,8 +17,7 @@ By simplifying the structure and key functions, developers can quickly set up an ## How to use ### Option 1: Goto demo [AIQL](https://chat.aiql.com/) - -> The demo will use `Llama 3.1` by default +> The demo will use `Qwen 2.5` by default ### Option 2: Download [Index](./index.html) and open it locally (recommended) diff --git a/example/config/chatgpt.json b/example/config/chatgpt.json index 2630624..a661054 100644 --- a/example/config/chatgpt.json +++ b/example/config/chatgpt.json @@ -4,6 +4,7 @@ "url": "https://api.openai.com", "path": "/v1/chat/completions", "model": "gpt-4o-mini", + "max_tokens_type": "max_completion_tokens", "max_tokens_value": "" }, "defaultChoiceStore": { diff --git a/example/config/chatgptproxy.json b/example/config/chatgptproxy.json index ad923fd..706d3f2 100644 --- a/example/config/chatgptproxy.json +++ b/example/config/chatgptproxy.json @@ -4,6 +4,7 @@ "url": "https://api.aiql.com", "path": "/v1/chat/completions", "model": "gpt-4o-mini", + "max_tokens_type": "max_completion_tokens", "max_tokens_value": "" }, "defaultChoiceStore": { diff --git a/example/config/deepinfra.json b/example/config/deepinfra.json index 34ab899..33c4fdb 100644 --- a/example/config/deepinfra.json +++ b/example/config/deepinfra.json @@ -8,13 +8,15 @@ }, "defaultChoiceStore": { "model": [ + "meta-llama/Llama-3.2-11B-Vision-Instruct", + "meta-llama/Llama-3.2-90B-Vision-Instruct", "meta-llama/Meta-Llama-3.1-70B-Instruct", "meta-llama/Meta-Llama-3.1-405B-Instruct", "meta-llama/Meta-Llama-3.1-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.3", "mistralai/Mistral-Nemo-Instruct-2407", "01-ai/Yi-34B-Chat", - "Qwen/Qwen2-72B-Instruct" + "Qwen/Qwen2.5-72B-Instruct" ] } } diff --git a/index.html b/index.html index 86601af..c6615c2 100644 --- a/index.html +++ b/index.html @@ -90,7 +90,7 @@ } #lottie { - height: calc(95vh - 20px); + height: calc(99vh - 50px); max-height: calc(100% - 100px); } @@ -339,6 +339,7 @@ @@ -381,7 +382,7 @@ @@ -455,6 +456,16 @@ {{ chatbotStore.max_tokens_value ? chatbotStore.max_tokens_value : "default" }} + + {{"temperature: "}} + {{ chatbotStore.temperature ? chatbotStore.temperature : + "default" }} + + + {{"top_p: "}} + {{ chatbotStore.top_p ? chatbotStore.top_p : + "default" }} + {{ chatbotStore.authPrefix }} @@ -484,6 +495,17 @@ variant="outlined"> + + + + + + @@ -982,6 +1004,15 @@
{{ column.key }}
return "grey" } + }, + downloadHistory() { + const blob = new Blob([JSON.stringify(this.conversation, null, 2)], { type: 'application/json' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = 'history.json'; + a.click(); + URL.revokeObjectURL(url); } } @@ -1006,15 +1037,17 @@
{{ column.key }}
model: [ "gpt-4o-mini", "gpt-4o", - 'gpt-4-turbo', + "gpt-4-turbo", + "Qwen/Qwen2.5-72B-Instruct", + "meta-llama/Llama-3.2-11B-Vision-Instruct", + "meta-llama/Llama-3.2-90B-Vision-Instruct", "meta-llama/Meta-Llama-3.1-70B-Instruct", - "meta-llama/Meta-Llama-3.1-405B-Instruct", "meta-llama/Meta-Llama-3.1-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.3", "mistralai/Mistral-Nemo-Instruct-2407", - 'Qwen/Qwen2-72B-Instruct'], + ], authPrefix: ["Bearer", "Base", "Token"], - max_tokens_type: ["max_tokens", "max_new_tokens"], + max_tokens_type: ["max_tokens", "max_completion_tokens", "max_new_tokens"], }), persist: { enabled: true, @@ -1039,11 +1072,13 @@
{{ column.key }}
apiKey: "", url: "https://api2.aiql.com", path: "/chat/completions", - model: "meta-llama/Meta-Llama-3.1-70B-Instruct", + model: "Qwen/Qwen2.5-72B-Instruct", authPrefix: "Bearer", contentType: "application/json", max_tokens_type: "max_tokens", max_tokens_value: "32000", + temperature: "", + top_p: "", method: "POST", stream: true }), @@ -1094,6 +1129,14 @@
{{ column.key }}
body[chatbotStore.max_tokens_type] = parseInt(chatbotStore.max_tokens_value) } + if (chatbotStore.temperature) { + body["temperature"] = parseFloat(chatbotStore.temperature) + } + + if (chatbotStore.top_p) { + body["top_p"] = parseFloat(chatbotStore.top_p) + } + const request = { headers: headers, method: chatbotStore.method, @@ -1110,10 +1153,13 @@
{{ column.key }}
// Handle errors if (!completion.ok) { const errorData = await completion.json(); + console.log(errorData.error?.message); if (errorData.error?.message) - snackbarStore.showErrorMessage(errorData.error.message); + snackbarStore.showErrorMessage(`${completion.status}: ${errorData.error.message}`); + else if (errorData.detail[0]?.msg) + snackbarStore.showErrorMessage(`${completion.status}${(" - " + errorData.detail[0]?.loc + ":") || ":"} ${errorData.detail[0]?.msg}`); else - snackbarStore.showErrorMessage(`${completion.status} ${completion.statusText}`); + snackbarStore.showErrorMessage(`${completion.status}: ${completion.statusText}`); return; } @@ -1383,4 +1429,4 @@
{{ column.key }}
- + \ No newline at end of file