Skip to content

Commit

Permalink
Update history to task
Browse files Browse the repository at this point in the history
Add persistent session_id
Delete init
  • Loading branch information
rahul-tuli committed Sep 25, 2023
1 parent 8773d27 commit 09eec2d
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 16 deletions.
36 changes: 21 additions & 15 deletions examples/chatbot-llm/chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,11 @@
Whether to display the token generation
speed or not [default:
no_show_tokens_per_sec]
--history / --no_history Whether to include history during prompt
generation or not [default: history]
--task TEXT The task to use for the pipeline. Choose any
of `chat`, `codegen`, `text-generation`
[default: chat]
--help Show this message and exit.
Installation: pip install deepsparse[transformers]
Examples:
Expand All @@ -61,11 +61,12 @@
4) Disable history
python chatbot.py models/llama/deployment \
--no_history
--task text-generation
"""
import click

from deepsparse import Pipeline
from deepsparse.tasks import SupportedTasks


@click.command(
Expand Down Expand Up @@ -103,18 +104,19 @@
help="Whether to display the token generation speed or not",
)
@click.option(
"--history/--no_history",
is_flag=True,
default=True,
help="Whether to include history during prompt generation or not",
"--task",
default="chat",
type=str,
help="The task to use for the pipeline. Choose any of "
"`chat`, `codegen`, `text-generation`",
)
def main(
model_path: str,
sequence_length: int,
sampling_temperature: float,
prompt_sequence_length: int,
show_tokens_per_sec: bool,
history: bool,
task: str,
):
"""
Command Line utility to interact with a text genration LLM in a chatbot style
Expand All @@ -123,21 +125,25 @@ def main(
python chatbot.py [OPTIONS] <MODEL_PATH>
"""
# chat pipeline, automatically adds history
task = "chat" if history else "text-generation"

session_ids = "chatbot_cli_session"

pipeline = Pipeline.create(
task=task,
task=task, # let pipeline determine if task is supported
model_path=model_path,
sequence_length=sequence_length,
sampling_temperature=sampling_temperature,
prompt_sequence_length=prompt_sequence_length,
)

# continue prompts until a keyboard interrupt
while True:
input_text = input("User: ")
response = pipeline(**{"sequences": [input_text]})
pipeline_inputs = {"prompt": [input_text]}

if SupportedTasks.is_chat(task):
pipeline_inputs["session_ids"] = session_ids

response = pipeline(**pipeline_inputs)
print("Bot: ", response.generations[0].text)
if show_tokens_per_sec:
times = pipeline.timer_manager.times
Expand Down
2 changes: 1 addition & 1 deletion src/deepsparse/tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ def is_chat(cls, task: str) -> bool:
:param task: the name of the task to check whether it is a chat task
:return: True if it is a chat task, False otherwise
"""
return any([chat_task.matches(task) for chat_task in cls.chat])
return any(chat_task.matches(task) for chat_task in cls.chat)

@classmethod
def is_text_generation(cls, task: str) -> bool:
Expand Down

0 comments on commit 09eec2d

Please sign in to comment.