diff --git a/examples/commands.ipynb b/examples/commands.ipynb index 0b0c6816c..9d4d12b2c 100644 --- a/examples/commands.ipynb +++ b/examples/commands.ipynb @@ -136,18 +136,18 @@ "text/markdown": [ "| Provider | Environment variable | Set? | Models |\n", "|----------|----------------------|------|--------|\n", - "| `ai21` | `AI21_API_KEY` | | `ai21:j1-large`, `ai21:j1-grande`, `ai21:j1-jumbo`, `ai21:j1-grande-instruct`, `ai21:j2-large`, `ai21:j2-grande`, `ai21:j2-jumbo`, `ai21:j2-grande-instruct`, `ai21:j2-jumbo-instruct` |\n", - "| `bedrock` | Not applicable. | N/A | `bedrock:amazon.titan-text-express-v1`, `bedrock:ai21.j2-ultra-v1`, `bedrock:ai21.j2-mid-v1`, `bedrock:cohere.command-light-text-v14`, `bedrock:cohere.command-text-v14`, `bedrock:meta.llama2-13b-chat-v1`, `bedrock:meta.llama2-70b-chat-v1` |\n", - "| `bedrock-chat` | Not applicable. | N/A | `bedrock-chat:anthropic.claude-v1`, `bedrock-chat:anthropic.claude-v2`, `bedrock-chat:anthropic.claude-v2:1`, `bedrock-chat:anthropic.claude-instant-v1` |\n", - "| `anthropic` | `ANTHROPIC_API_KEY` | | `anthropic:claude-v1`, `anthropic:claude-v1.0`, `anthropic:claude-v1.2`, `anthropic:claude-2`, `anthropic:claude-2.0`, `anthropic:claude-instant-v1`, `anthropic:claude-instant-v1.0`, `anthropic:claude-instant-v1.2` |\n", - "| `anthropic-chat` | `ANTHROPIC_API_KEY` | | `anthropic-chat:claude-v1`, `anthropic-chat:claude-v1.0`, `anthropic-chat:claude-v1.2`, `anthropic-chat:claude-2`, `anthropic-chat:claude-2.0`, `anthropic-chat:claude-instant-v1`, `anthropic-chat:claude-instant-v1.0`, `anthropic-chat:claude-instant-v1.2` |\n", + "| `ai21` | `AI21_API_KEY` | | |\n", + "| `bedrock` | Not applicable. | N/A | |\n", + "| `bedrock-chat` | Not applicable. | N/A | |\n", + "| `anthropic` | `ANTHROPIC_API_KEY` | | |\n", + "| `anthropic-chat` | `ANTHROPIC_API_KEY` | | |\n", "| `azure-chat-openai` | `OPENAI_API_KEY` | | This provider does not define a list of models. |\n", - "| `cohere` | `COHERE_API_KEY` | | `cohere:command`, `cohere:command-nightly`, `cohere:command-light`, `cohere:command-light-nightly` |\n", - "| `gpt4all` | Not applicable. | N/A | `gpt4all:ggml-gpt4all-j-v1.2-jazzy`, `gpt4all:ggml-gpt4all-j-v1.3-groovy`, `gpt4all:ggml-gpt4all-l13b-snoozy`, `gpt4all:mistral-7b-openorca.Q4_0`, `gpt4all:mistral-7b-instruct-v0.1.Q4_0`, `gpt4all:gpt4all-falcon-q4_0`, `gpt4all:wizardlm-13b-v1.2.Q4_0`, `gpt4all:nous-hermes-llama2-13b.Q4_0`, `gpt4all:gpt4all-13b-snoozy-q4_0`, `gpt4all:mpt-7b-chat-merges-q4_0`, `gpt4all:orca-mini-3b-gguf2-q4_0`, `gpt4all:starcoder-q4_0`, `gpt4all:rift-coder-v0-7b-q4_0`, `gpt4all:em_german_mistral_v01.Q4_0` |\n", + "| `cohere` | `COHERE_API_KEY` | | |\n", + "| `gpt4all` | Not applicable. | N/A | |\n", "| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | | See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n", - "| `openai` | `OPENAI_API_KEY` | | `openai:babbage-002`, `openai:davinci-002`, `openai:gpt-3.5-turbo-instruct` |\n", - "| `openai-chat` | `OPENAI_API_KEY` | | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n", - "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n", + "| `openai` | `OPENAI_API_KEY` | | |\n", + "| `openai-chat` | `OPENAI_API_KEY` | | |\n", + "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | | |\n", "| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n", "\n", "Aliases and custom commands:\n", @@ -302,7 +302,7 @@ "text/markdown": [ "| Provider | Environment variable | Set? | Models |\n", "|----------|----------------------|------|--------|\n", - "| `openai` | `OPENAI_API_KEY` | | `openai:babbage-002`, `openai:davinci-002`, `openai:gpt-3.5-turbo-instruct` |\n" + "| `openai` | `OPENAI_API_KEY` | | |\n" ], "text/plain": [ "openai\n", @@ -369,18 +369,18 @@ "text/markdown": [ "| Provider | Environment variable | Set? | Models |\n", "|----------|----------------------|------|--------|\n", - "| `ai21` | `AI21_API_KEY` | | `ai21:j1-large`, `ai21:j1-grande`, `ai21:j1-jumbo`, `ai21:j1-grande-instruct`, `ai21:j2-large`, `ai21:j2-grande`, `ai21:j2-jumbo`, `ai21:j2-grande-instruct`, `ai21:j2-jumbo-instruct` |\n", - "| `bedrock` | Not applicable. | N/A | `bedrock:amazon.titan-text-express-v1`, `bedrock:ai21.j2-ultra-v1`, `bedrock:ai21.j2-mid-v1`, `bedrock:cohere.command-light-text-v14`, `bedrock:cohere.command-text-v14`, `bedrock:meta.llama2-13b-chat-v1`, `bedrock:meta.llama2-70b-chat-v1` |\n", - "| `bedrock-chat` | Not applicable. | N/A | `bedrock-chat:anthropic.claude-v1`, `bedrock-chat:anthropic.claude-v2`, `bedrock-chat:anthropic.claude-v2:1`, `bedrock-chat:anthropic.claude-instant-v1` |\n", - "| `anthropic` | `ANTHROPIC_API_KEY` | | `anthropic:claude-v1`, `anthropic:claude-v1.0`, `anthropic:claude-v1.2`, `anthropic:claude-2`, `anthropic:claude-2.0`, `anthropic:claude-instant-v1`, `anthropic:claude-instant-v1.0`, `anthropic:claude-instant-v1.2` |\n", - "| `anthropic-chat` | `ANTHROPIC_API_KEY` | | `anthropic-chat:claude-v1`, `anthropic-chat:claude-v1.0`, `anthropic-chat:claude-v1.2`, `anthropic-chat:claude-2`, `anthropic-chat:claude-2.0`, `anthropic-chat:claude-instant-v1`, `anthropic-chat:claude-instant-v1.0`, `anthropic-chat:claude-instant-v1.2` |\n", + "| `ai21` | `AI21_API_KEY` | | |\n", + "| `bedrock` | Not applicable. | N/A | |\n", + "| `bedrock-chat` | Not applicable. | N/A | |\n", + "| `anthropic` | `ANTHROPIC_API_KEY` | | |\n", + "| `anthropic-chat` | `ANTHROPIC_API_KEY` | | |\n", "| `azure-chat-openai` | `OPENAI_API_KEY` | | This provider does not define a list of models. |\n", - "| `cohere` | `COHERE_API_KEY` | | `cohere:command`, `cohere:command-nightly`, `cohere:command-light`, `cohere:command-light-nightly` |\n", - "| `gpt4all` | Not applicable. | N/A | `gpt4all:ggml-gpt4all-j-v1.2-jazzy`, `gpt4all:ggml-gpt4all-j-v1.3-groovy`, `gpt4all:ggml-gpt4all-l13b-snoozy`, `gpt4all:mistral-7b-openorca.Q4_0`, `gpt4all:mistral-7b-instruct-v0.1.Q4_0`, `gpt4all:gpt4all-falcon-q4_0`, `gpt4all:wizardlm-13b-v1.2.Q4_0`, `gpt4all:nous-hermes-llama2-13b.Q4_0`, `gpt4all:gpt4all-13b-snoozy-q4_0`, `gpt4all:mpt-7b-chat-merges-q4_0`, `gpt4all:orca-mini-3b-gguf2-q4_0`, `gpt4all:starcoder-q4_0`, `gpt4all:rift-coder-v0-7b-q4_0`, `gpt4all:em_german_mistral_v01.Q4_0` |\n", + "| `cohere` | `COHERE_API_KEY` | | |\n", + "| `gpt4all` | Not applicable. | N/A | |\n", "| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | | See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n", - "| `openai` | `OPENAI_API_KEY` | | `openai:babbage-002`, `openai:davinci-002`, `openai:gpt-3.5-turbo-instruct` |\n", - "| `openai-chat` | `OPENAI_API_KEY` | | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n", - "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n", + "| `openai` | `OPENAI_API_KEY` | | |\n", + "| `openai-chat` | `OPENAI_API_KEY` | | |\n", + "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | | |\n", "| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n", "\n", "Aliases and custom commands:\n", @@ -537,7 +537,7 @@ { "data": { "text/markdown": [ - "Sorry, I can't provide the information you're asking for because your question lacks specific details. Could you please provide more context or information?" + "Apologies for the confusion, but your question is not clear. Could you please provide more details or context? Are you asking about a specific car, phone, laptop, or other product model? Without this crucial information, it's impossible to give an accurate answer." ], "text/plain": [ "" @@ -595,15 +595,37 @@ { "data": { "text/markdown": [ - " I need someone to enter data from a pdf into excel.\n", + " No HTML or other code.\n", "\n", - "We are looking for an experienced freelancer with very high attention to detail to assist us with a number of tasks. Work includes entering data from a pdf into excel, setting up email template, uploading documents, and general administrative support, such as updating excel sheets with current prices. This is a long-term position. Please provide samples of your work. Please note that we will only accept ...\n", + "What is the difference between an assignment and a function call? Why is an assignment called a value assignment and a function call a value function?\n", "\n", - "I have a PDF form which I want to extract the text from the forms fields and place it in a word file. The form is in French and the extracted text must be translated to English.\n", + "A value function is a function that takes no arguments, is returning a value. A function call is when you type in the name of a function.\n", "\n", - "I have a PDF file which I want to extract the text from the forms fields and place it in a word file. The form is in French and the extracted text must be translated to English.\n", + "Below are the symbols used in the COVID-19 pandemic:\n", "\n", - "I have a PDF form which I want to extract the text from the forms fields and place it in a word file. The form is in French and the extracted text must be translated to English." + "The STARS symbol stands for the Swedish National Board of Health and Welfare.\n", + "\n", + "The HEALTHY symbol stands for the Swedish National Board of Health and Welfare.\n", + "\n", + "The HEALTHY symbol stands for the Swedish National Board of Health and Welfare.\n", + "\n", + "The COVID-19 symbol stands for the Swedish National Board of Health and Welfare.\n", + "\n", + "The COVID-19 symbol stands for the Swedish National Board of Health and Welfare.\n", + "\n", + "The COVID-19 symbol stands for the Swedish National Board of Health and Welfare.\n", + "\n", + "The COVID-19 symbol stands for the Swedish National Board of Health and Welfare.\n", + "\n", + "The COVID-19 symbol stands for the Swedish National Board of Health and Welfare.\n", + "\n", + "The COVID-19 symbol stands for the Swedish National Board of Health and Welfare.\n", + "\n", + "The COVID-19 symbol stands for the Swedish National Board of Health and Welfare.\n", + "\n", + "The COVID-19 symbol stands for the Swedish National Board of Health and Welfare.\n", + "\n", + "The COVID-19 symbol stands for" ], "text/plain": [ "" @@ -639,18 +661,18 @@ "text/markdown": [ "| Provider | Environment variable | Set? | Models |\n", "|----------|----------------------|------|--------|\n", - "| `ai21` | `AI21_API_KEY` | | `ai21:j1-large`, `ai21:j1-grande`, `ai21:j1-jumbo`, `ai21:j1-grande-instruct`, `ai21:j2-large`, `ai21:j2-grande`, `ai21:j2-jumbo`, `ai21:j2-grande-instruct`, `ai21:j2-jumbo-instruct` |\n", - "| `bedrock` | Not applicable. | N/A | `bedrock:amazon.titan-text-express-v1`, `bedrock:ai21.j2-ultra-v1`, `bedrock:ai21.j2-mid-v1`, `bedrock:cohere.command-light-text-v14`, `bedrock:cohere.command-text-v14`, `bedrock:meta.llama2-13b-chat-v1`, `bedrock:meta.llama2-70b-chat-v1` |\n", - "| `bedrock-chat` | Not applicable. | N/A | `bedrock-chat:anthropic.claude-v1`, `bedrock-chat:anthropic.claude-v2`, `bedrock-chat:anthropic.claude-v2:1`, `bedrock-chat:anthropic.claude-instant-v1` |\n", - "| `anthropic` | `ANTHROPIC_API_KEY` | | `anthropic:claude-v1`, `anthropic:claude-v1.0`, `anthropic:claude-v1.2`, `anthropic:claude-2`, `anthropic:claude-2.0`, `anthropic:claude-instant-v1`, `anthropic:claude-instant-v1.0`, `anthropic:claude-instant-v1.2` |\n", - "| `anthropic-chat` | `ANTHROPIC_API_KEY` | | `anthropic-chat:claude-v1`, `anthropic-chat:claude-v1.0`, `anthropic-chat:claude-v1.2`, `anthropic-chat:claude-2`, `anthropic-chat:claude-2.0`, `anthropic-chat:claude-instant-v1`, `anthropic-chat:claude-instant-v1.0`, `anthropic-chat:claude-instant-v1.2` |\n", + "| `ai21` | `AI21_API_KEY` | | |\n", + "| `bedrock` | Not applicable. | N/A | |\n", + "| `bedrock-chat` | Not applicable. | N/A | |\n", + "| `anthropic` | `ANTHROPIC_API_KEY` | | |\n", + "| `anthropic-chat` | `ANTHROPIC_API_KEY` | | |\n", "| `azure-chat-openai` | `OPENAI_API_KEY` | | This provider does not define a list of models. |\n", - "| `cohere` | `COHERE_API_KEY` | | `cohere:command`, `cohere:command-nightly`, `cohere:command-light`, `cohere:command-light-nightly` |\n", - "| `gpt4all` | Not applicable. | N/A | `gpt4all:ggml-gpt4all-j-v1.2-jazzy`, `gpt4all:ggml-gpt4all-j-v1.3-groovy`, `gpt4all:ggml-gpt4all-l13b-snoozy`, `gpt4all:mistral-7b-openorca.Q4_0`, `gpt4all:mistral-7b-instruct-v0.1.Q4_0`, `gpt4all:gpt4all-falcon-q4_0`, `gpt4all:wizardlm-13b-v1.2.Q4_0`, `gpt4all:nous-hermes-llama2-13b.Q4_0`, `gpt4all:gpt4all-13b-snoozy-q4_0`, `gpt4all:mpt-7b-chat-merges-q4_0`, `gpt4all:orca-mini-3b-gguf2-q4_0`, `gpt4all:starcoder-q4_0`, `gpt4all:rift-coder-v0-7b-q4_0`, `gpt4all:em_german_mistral_v01.Q4_0` |\n", + "| `cohere` | `COHERE_API_KEY` | | |\n", + "| `gpt4all` | Not applicable. | N/A | |\n", "| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | | See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n", - "| `openai` | `OPENAI_API_KEY` | | `openai:babbage-002`, `openai:davinci-002`, `openai:gpt-3.5-turbo-instruct` |\n", - "| `openai-chat` | `OPENAI_API_KEY` | | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n", - "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n", + "| `openai` | `OPENAI_API_KEY` | | |\n", + "| `openai-chat` | `OPENAI_API_KEY` | | |\n", + "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | | |\n", "| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n", "\n", "Aliases and custom commands:\n", @@ -835,18 +857,18 @@ "text/markdown": [ "| Provider | Environment variable | Set? | Models |\n", "|----------|----------------------|------|--------|\n", - "| `ai21` | `AI21_API_KEY` | | `ai21:j1-large`, `ai21:j1-grande`, `ai21:j1-jumbo`, `ai21:j1-grande-instruct`, `ai21:j2-large`, `ai21:j2-grande`, `ai21:j2-jumbo`, `ai21:j2-grande-instruct`, `ai21:j2-jumbo-instruct` |\n", - "| `bedrock` | Not applicable. | N/A | `bedrock:amazon.titan-text-express-v1`, `bedrock:ai21.j2-ultra-v1`, `bedrock:ai21.j2-mid-v1`, `bedrock:cohere.command-light-text-v14`, `bedrock:cohere.command-text-v14`, `bedrock:meta.llama2-13b-chat-v1`, `bedrock:meta.llama2-70b-chat-v1` |\n", - "| `bedrock-chat` | Not applicable. | N/A | `bedrock-chat:anthropic.claude-v1`, `bedrock-chat:anthropic.claude-v2`, `bedrock-chat:anthropic.claude-v2:1`, `bedrock-chat:anthropic.claude-instant-v1` |\n", - "| `anthropic` | `ANTHROPIC_API_KEY` | | `anthropic:claude-v1`, `anthropic:claude-v1.0`, `anthropic:claude-v1.2`, `anthropic:claude-2`, `anthropic:claude-2.0`, `anthropic:claude-instant-v1`, `anthropic:claude-instant-v1.0`, `anthropic:claude-instant-v1.2` |\n", - "| `anthropic-chat` | `ANTHROPIC_API_KEY` | | `anthropic-chat:claude-v1`, `anthropic-chat:claude-v1.0`, `anthropic-chat:claude-v1.2`, `anthropic-chat:claude-2`, `anthropic-chat:claude-2.0`, `anthropic-chat:claude-instant-v1`, `anthropic-chat:claude-instant-v1.0`, `anthropic-chat:claude-instant-v1.2` |\n", + "| `ai21` | `AI21_API_KEY` | | |\n", + "| `bedrock` | Not applicable. | N/A | |\n", + "| `bedrock-chat` | Not applicable. | N/A | |\n", + "| `anthropic` | `ANTHROPIC_API_KEY` | | |\n", + "| `anthropic-chat` | `ANTHROPIC_API_KEY` | | |\n", "| `azure-chat-openai` | `OPENAI_API_KEY` | | This provider does not define a list of models. |\n", - "| `cohere` | `COHERE_API_KEY` | | `cohere:command`, `cohere:command-nightly`, `cohere:command-light`, `cohere:command-light-nightly` |\n", - "| `gpt4all` | Not applicable. | N/A | `gpt4all:ggml-gpt4all-j-v1.2-jazzy`, `gpt4all:ggml-gpt4all-j-v1.3-groovy`, `gpt4all:ggml-gpt4all-l13b-snoozy`, `gpt4all:mistral-7b-openorca.Q4_0`, `gpt4all:mistral-7b-instruct-v0.1.Q4_0`, `gpt4all:gpt4all-falcon-q4_0`, `gpt4all:wizardlm-13b-v1.2.Q4_0`, `gpt4all:nous-hermes-llama2-13b.Q4_0`, `gpt4all:gpt4all-13b-snoozy-q4_0`, `gpt4all:mpt-7b-chat-merges-q4_0`, `gpt4all:orca-mini-3b-gguf2-q4_0`, `gpt4all:starcoder-q4_0`, `gpt4all:rift-coder-v0-7b-q4_0`, `gpt4all:em_german_mistral_v01.Q4_0` |\n", + "| `cohere` | `COHERE_API_KEY` | | |\n", + "| `gpt4all` | Not applicable. | N/A | |\n", "| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | | See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n", - "| `openai` | `OPENAI_API_KEY` | | `openai:babbage-002`, `openai:davinci-002`, `openai:gpt-3.5-turbo-instruct` |\n", - "| `openai-chat` | `OPENAI_API_KEY` | | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n", - "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n", + "| `openai` | `OPENAI_API_KEY` | | |\n", + "| `openai-chat` | `OPENAI_API_KEY` | | |\n", + "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | | |\n", "| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n", "\n", "Aliases and custom commands:\n", @@ -1033,7 +1055,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'product': 'colorful socks', 'text': ' Chroma Socks '}\n" + "{'product': 'colorful socks', 'text': ' FunkyHues'}\n" ] } ], @@ -1081,18 +1103,18 @@ "text/markdown": [ "| Provider | Environment variable | Set? | Models |\n", "|----------|----------------------|------|--------|\n", - "| `ai21` | `AI21_API_KEY` | | `ai21:j1-large`, `ai21:j1-grande`, `ai21:j1-jumbo`, `ai21:j1-grande-instruct`, `ai21:j2-large`, `ai21:j2-grande`, `ai21:j2-jumbo`, `ai21:j2-grande-instruct`, `ai21:j2-jumbo-instruct` |\n", - "| `bedrock` | Not applicable. | N/A | `bedrock:amazon.titan-text-express-v1`, `bedrock:ai21.j2-ultra-v1`, `bedrock:ai21.j2-mid-v1`, `bedrock:cohere.command-light-text-v14`, `bedrock:cohere.command-text-v14`, `bedrock:meta.llama2-13b-chat-v1`, `bedrock:meta.llama2-70b-chat-v1` |\n", - "| `bedrock-chat` | Not applicable. | N/A | `bedrock-chat:anthropic.claude-v1`, `bedrock-chat:anthropic.claude-v2`, `bedrock-chat:anthropic.claude-v2:1`, `bedrock-chat:anthropic.claude-instant-v1` |\n", - "| `anthropic` | `ANTHROPIC_API_KEY` | | `anthropic:claude-v1`, `anthropic:claude-v1.0`, `anthropic:claude-v1.2`, `anthropic:claude-2`, `anthropic:claude-2.0`, `anthropic:claude-instant-v1`, `anthropic:claude-instant-v1.0`, `anthropic:claude-instant-v1.2` |\n", - "| `anthropic-chat` | `ANTHROPIC_API_KEY` | | `anthropic-chat:claude-v1`, `anthropic-chat:claude-v1.0`, `anthropic-chat:claude-v1.2`, `anthropic-chat:claude-2`, `anthropic-chat:claude-2.0`, `anthropic-chat:claude-instant-v1`, `anthropic-chat:claude-instant-v1.0`, `anthropic-chat:claude-instant-v1.2` |\n", + "| `ai21` | `AI21_API_KEY` | | |\n", + "| `bedrock` | Not applicable. | N/A | |\n", + "| `bedrock-chat` | Not applicable. | N/A | |\n", + "| `anthropic` | `ANTHROPIC_API_KEY` | | |\n", + "| `anthropic-chat` | `ANTHROPIC_API_KEY` | | |\n", "| `azure-chat-openai` | `OPENAI_API_KEY` | | This provider does not define a list of models. |\n", - "| `cohere` | `COHERE_API_KEY` | | `cohere:command`, `cohere:command-nightly`, `cohere:command-light`, `cohere:command-light-nightly` |\n", - "| `gpt4all` | Not applicable. | N/A | `gpt4all:ggml-gpt4all-j-v1.2-jazzy`, `gpt4all:ggml-gpt4all-j-v1.3-groovy`, `gpt4all:ggml-gpt4all-l13b-snoozy`, `gpt4all:mistral-7b-openorca.Q4_0`, `gpt4all:mistral-7b-instruct-v0.1.Q4_0`, `gpt4all:gpt4all-falcon-q4_0`, `gpt4all:wizardlm-13b-v1.2.Q4_0`, `gpt4all:nous-hermes-llama2-13b.Q4_0`, `gpt4all:gpt4all-13b-snoozy-q4_0`, `gpt4all:mpt-7b-chat-merges-q4_0`, `gpt4all:orca-mini-3b-gguf2-q4_0`, `gpt4all:starcoder-q4_0`, `gpt4all:rift-coder-v0-7b-q4_0`, `gpt4all:em_german_mistral_v01.Q4_0` |\n", + "| `cohere` | `COHERE_API_KEY` | | |\n", + "| `gpt4all` | Not applicable. | N/A | |\n", "| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | | See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`. |\n", - "| `openai` | `OPENAI_API_KEY` | | `openai:babbage-002`, `openai:davinci-002`, `openai:gpt-3.5-turbo-instruct` |\n", - "| `openai-chat` | `OPENAI_API_KEY` | | `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-0301`, `openai-chat:gpt-3.5-turbo-0613`, `openai-chat:gpt-3.5-turbo-1106`, `openai-chat:gpt-3.5-turbo-16k`, `openai-chat:gpt-3.5-turbo-16k-0613`, `openai-chat:gpt-4`, `openai-chat:gpt-4-0613`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0613`, `openai-chat:gpt-4-1106-preview` |\n", - "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | | `qianfan:ERNIE-Bot`, `qianfan:ERNIE-Bot-4` |\n", + "| `openai` | `OPENAI_API_KEY` | | |\n", + "| `openai-chat` | `OPENAI_API_KEY` | | |\n", + "| `qianfan` | `QIANFAN_AK`, `QIANFAN_SK` | | |\n", "| `sagemaker-endpoint` | Not applicable. | N/A | Specify an endpoint name as the model ID. In addition, you must specify a region name, request schema, and response path. For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints). |\n", "\n", "Aliases and custom commands:\n", @@ -1247,9 +1269,7 @@ { "data": { "text/markdown": [ - " Chroma Sox \n", - "\n", - "Let me know if you would like me to provide any other suggestions! " + " Vox Socks" ], "text/plain": [ "" @@ -1280,7 +1300,7 @@ { "data": { "text/plain": [ - " Punch Up Colorful Fashions" + " Spectra Socks " ] }, "execution_count": 19, diff --git a/packages/jupyter-ai-magics/jupyter_ai_magics/magics.py b/packages/jupyter-ai-magics/jupyter_ai_magics/magics.py index 51b1f3bb9..f1efcd1eb 100644 --- a/packages/jupyter-ai-magics/jupyter_ai_magics/magics.py +++ b/packages/jupyter-ai-magics/jupyter_ai_magics/magics.py @@ -160,7 +160,7 @@ def _ai_bulleted_list_models_for_provider(self, provider_id, Provider): return output def _ai_inline_list_models_for_provider(self, provider_id, Provider): - output = "" + output = "
    " if len(Provider.models) == 1 and Provider.models[0] == "*": if Provider.help is None: @@ -169,10 +169,9 @@ def _ai_inline_list_models_for_provider(self, provider_id, Provider): return Provider.help for model_id in Provider.models: - output += f", `{provider_id}:{model_id}`" + output += f"
  • `{provider_id}:{model_id}`
  • " - # Remove initial comma - return re.sub(r"^, ", "", output) + return output + "
" # Is the required environment variable set? def _ai_env_status_for_provider_markdown(self, provider_id):