Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

This creates a memory actor for sharing memory across actors #82

Merged
merged 6 commits into from
Apr 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions packages/jupyter-ai/jupyter_ai/actors/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ class ACTOR_TYPE(str, Enum):
DEFAULT = "default"
FILESYSTEM = "filesystem"
INDEX = 'index'
MEMORY = 'memory'

COMMANDS = {
'/fs': ACTOR_TYPE.FILESYSTEM,
Expand Down
10 changes: 6 additions & 4 deletions packages/jupyter-ai/jupyter_ai/actors/default.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import time
from uuid import uuid4
from jupyter_ai.actors.base import BaseActor, Logger
from jupyter_ai.actors.base import BaseActor, Logger, ACTOR_TYPE
from jupyter_ai.actors.memory import RemoteMemory
from jupyter_ai.models import AgentChatMessage, HumanChatMessage
from jupyter_ai_magics.providers import ChatOpenAINewProvider
from langchain import ConversationChain
Expand All @@ -14,6 +15,7 @@
HumanMessagePromptTemplate
)

SYSTEM_PROMPT = "The following is a friendly conversation between a human and an AI, whose name is Jupyter AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know."


@ray.remote
Expand All @@ -24,16 +26,16 @@ def __init__(self, reply_queue: Queue, log: Logger):
provider = ChatOpenAINewProvider(model_id="gpt-3.5-turbo")

# Create a conversation memory
memory = ConversationBufferMemory(return_messages=True)
memory = RemoteMemory(actor_name=ACTOR_TYPE.MEMORY)
prompt_template = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template("The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know."),
SystemMessagePromptTemplate.from_template(SYSTEM_PROMPT),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}")
])
chain = ConversationChain(
llm=provider,
prompt=prompt_template,
verbose=True,
verbose=True,
memory=memory
)
self.chat_provider = chain
Expand Down
90 changes: 90 additions & 0 deletions packages/jupyter-ai/jupyter_ai/actors/memory.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
from jupyter_ai.actors.base import Logger
from typing import Dict, Any, List
from langchain.schema import BaseMemory
import ray
from pydantic import PrivateAttr


@ray.remote
class MemoryActor(object):
"""Turns any LangChain memory into a Ray actor.

The resulting actor can be used as LangChain memory in chains
running in different actors by using RemoteMemory (below).
"""

def __init__(self, memory: BaseMemory, log: Logger):
self.memory = memory
self.log = log

def get_chat_memory(self):
return self.memory.chat_memory

def get_output_key(self):
return self.memory.output_key

def get_input_key(self):
return self.memory.input_key

def get_return_messages(self):
return self.memory.return_messages

def get_memory_variables(self):
return self.memory.memory_variables

def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
return self.memory.save_context(inputs, outputs)

def clear(self):
return self.memory.clear()

def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
return self.memory.load_memory_variables(inputs)




class RemoteMemory(BaseMemory):
"""Wraps a MemoryActor into a LangChain memory class.

This enables you to use a single distributed memory across multiple
Ray actors running different chains.
"""

actor_name: str
_actor: Any = PrivateAttr()

def __init__(self, **data):
super().__init__(**data)
self._actor = ray.get_actor(self.actor_name)

@property
def memory_variables(self) -> List[str]:
o = self._actor.get_memory_variables.remote()
return ray.get(o)

@property
def output_key(self):
o = self._actor.get_output_key.remote()
return ray.get(o)

@property
def input_key(self):
o = self._actor.get_input_key.remote()
return ray.get(o)

@property
def return_messages(self):
o = self._actor.get_return_messages.remote()
return ray.get(o)

def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
o = self._actor.save_context.remote(inputs, outputs)
return ray.get(o)

def clear(self):
self._actor.clear.remote()

def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
o = self._actor.load_memory_variables.remote(inputs)
return ray.get(o)
19 changes: 9 additions & 10 deletions packages/jupyter-ai/jupyter_ai/actors/router.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,19 @@
from jupyter_ai.actors.base import ACTOR_TYPE, COMMANDS, Logger
from jupyter_ai.actors.base import ACTOR_TYPE, COMMANDS, Logger, BaseActor
from jupyter_ai.models import ClearMessage

import ray
from ray.util.queue import Queue


@ray.remote
class Router():
"""Routes messages to the correct actor. To register new
actors, add the actor type in the `ACTOR_TYPE` enum and
add a corresponding command in the `COMMANDS` dictionary.
"""

def __init__(self, log: Logger, reply_queue: Queue):
self.log = log
self.reply_queue = reply_queue
class Router(BaseActor):
def __init__(self, reply_queue: Queue, log: Logger):
"""Routes messages to the correct actor.

To register new actors, add the actor type in the `ACTOR_TYPE` enum and
add a corresponding command in the `COMMANDS` dictionary.
"""
super().__init__(log=log, reply_queue=reply_queue)

def route_message(self, message):

Expand Down
7 changes: 7 additions & 0 deletions packages/jupyter-ai/jupyter_ai/extension.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
import asyncio
import queue
from langchain.memory import ConversationBufferWindowMemory
from jupyter_ai.actors.default import DefaultActor
from jupyter_ai.actors.filesystem import FileSystemActor
from jupyter_ai.actors.index import DocumentIndexActor
from jupyter_ai.actors.router import Router
from jupyter_ai.actors.memory import MemoryActor
from jupyter_ai.actors.base import ACTOR_TYPE
from jupyter_ai.reply_processor import ReplyProcessor
from jupyter_server.extension.application import ExtensionApp
Expand Down Expand Up @@ -122,10 +124,15 @@ def initialize_settings(self):
reply_queue=reply_queue,
log=self.log
)
memory_actor = MemoryActor.options(name=ACTOR_TYPE.MEMORY.value).remote(
memory=ConversationBufferWindowMemory(return_messages=True, k=2),
log=self.log
)
self.settings['router'] = router
self.settings["default_actor"] = default_actor
self.settings["index_actor"] = index_actor
self.settings["fs_actor"] = fs_actor
self.settings["memory_actor"] = memory_actor

reply_processor = ReplyProcessor(self.settings['chat_handlers'], reply_queue, log=self.log)
loop = asyncio.get_event_loop()
Expand Down