-
Notifications
You must be signed in to change notification settings - Fork 20
/
app.py
165 lines (128 loc) · 5.34 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
import streamlit as st
import os
from PyPDF2 import PdfReader
import docx
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from dotenv import load_dotenv
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain import HuggingFaceHub
from streamlit_chat import message
from langchain.callbacks import get_openai_callback
# "with" notation
def main():
load_dotenv()
st.set_page_config(page_title="Chat With any files")
st.header("💬 Chatbot")
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
if "processComplete" not in st.session_state:
st.session_state.processComplete = None
with st.sidebar:
uploaded_files = st.file_uploader("Upload your file",type=['pdf','docx'],accept_multiple_files=True)
openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
process = st.button("Process")
if process:
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
files_text = get_files_text(uploaded_files)
# get text chunks
text_chunks = get_text_chunks(files_text)
# create vetore stores
vetorestore = get_vectorstore(text_chunks)
# create conversation chain
st.session_state.conversation = get_conversation_chain(vetorestore,openai_api_key) #for openAI
# st.session_state.conversation = get_conversation_chain(vetorestore) #for huggingface
st.session_state.processComplete = True
if st.session_state.processComplete == True:
user_question = st.chat_input("Ask Question about your files.")
if user_question:
handel_userinput(user_question)
def get_files_text(uploaded_files):
text = ""
for uploaded_file in uploaded_files:
split_tup = os.path.splitext(uploaded_file.name)
file_extension = split_tup[1]
if file_extension == ".pdf":
text += get_pdf_text(uploaded_file)
elif file_extension == ".docx":
text += get_docx_text(uploaded_file)
else:
text += get_csv_text(uploaded_file)
return text
def get_pdf_text(pdf):
pdf_reader = PdfReader(pdf)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_docx_text(file):
doc = docx.Document(file)
allText = []
for docpara in doc.paragraphs:
allText.append(docpara.text)
text = ' '.join(allText)
return text
def get_csv_text(file):
return "a"
def get_text_chunks(text):
# spilit ito chuncks
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=900,
chunk_overlap=100,
length_function=len
)
chunks = text_splitter.split_text(text)
return chunks
def get_vectorstore(text_chunks):
embeddings = HuggingFaceEmbeddings()
# embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
knowledge_base = FAISS.from_texts(text_chunks,embeddings)
return knowledge_base
def get_conversation_chain(vetorestore,openai_api_key):
llm = ChatOpenAI(openai_api_key=openai_api_key, model_name = 'gpt-3.5-turbo',temperature=0)
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vetorestore.as_retriever(),
memory=memory
)
return conversation_chain
# def get_conversation_chain(vetorestore):
# llm = HuggingFaceHub(repo_id="google/flan-t5-large", model_kwargs={"temperature":5,
# "max_length":64})
# memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
# conversation_chain = ConversationalRetrievalChain.from_llm(
# llm=llm,
# retriever=vetorestore.as_retriever(),
# memory=memory
# )
# return conversation_chain
def handel_userinput(user_question):
with get_openai_callback() as cb:
response = st.session_state.conversation({'question':user_question})
st.session_state.chat_history = response['chat_history']
# Layout of input/response containers
response_container = st.container()
with response_container:
for i, messages in enumerate(st.session_state.chat_history):
if i % 2 == 0:
message(messages.content, is_user=True, key=str(i))
else:
message(messages.content, key=str(i))
st.write(f"Total Tokens: {cb.total_tokens}" f", Prompt Tokens: {cb.prompt_tokens}" f", Completion Tokens: {cb.completion_tokens}" f", Total Cost (USD): ${cb.total_cost}")
# for i, message in enumerate(st.session_state.chat_history):
# if i % 2 == 0:
# st.write(user_template.replace("{{MSG}}",message.content),unsafe_allow_html=True)
# else:
# st.write(bot_template.replace("{{MSG}}",message.content),unsafe_allow_html=True)
if __name__ == '__main__':
main()