0% found this document useful (0 votes)
34 views2 pages

Chatbot Code

chatbot

Uploaded by

Tamer Mostafa
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
34 views2 pages

Chatbot Code

chatbot

Uploaded by

Tamer Mostafa
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
You are on page 1/ 2

import streamlit as st

from PyPDF2 import PdfReader


from langchain.text_splitter import RecursiveCharacterTextSplitter
from [Link] import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from [Link].question_answering import load_qa_chain
from langchain_community.chat_models import ChatOpenAI

OPENAI_API_KEY = "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" #Pass your


key here

#Upload PDF files


[Link]("My first Chatbot")

with [Link]:
[Link]("Your Documents")
file = st.file_uploader(" Upload a PDf file and start asking questions",
type="pdf")

#Extract the text


if file is not None:
pdf_reader = PdfReader(file)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
#[Link](text)

#Break it into chunks


text_splitter = RecursiveCharacterTextSplitter(
separators="\n",
chunk_size=1000,
chunk_overlap=150,
length_function=len
)
chunks = text_splitter.split_text(text)
#[Link](chunks)

# generating embedding
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)

# creating vector store - FAISS


vector_store = FAISS.from_texts(chunks, embeddings)

# get user question


user_question = st.text_input("Type Your question here")

# do similarity search
if user_question:
match = vector_store.similarity_search(user_question)
#[Link](match)

#define the LLM


llm = ChatOpenAI(
openai_api_key = OPENAI_API_KEY,
temperature = 0,
max_tokens = 1000,
model_name = "gpt-3.5-turbo"
)
#output results
#chain -> take the question, get relevant document, pass it to the LLM,
generate the output
chain = load_qa_chain(llm, chain_type="stuff")
response = [Link](input_documents = match, question = user_question)
[Link](response)

You might also like