-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathdemo.py
48 lines (35 loc) · 1.29 KB
/
demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
# Setup
## Libraries
import openai
import os
from dotenv import load_dotenv, find_dotenv
from IPython.display import display, Markdown
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import DocArrayInMemorySearch
## Variables
_ = load_dotenv(find_dotenv()) # read local .env file
openai.api_key = os.environ['OPENAI_API_KEY']
file = 'data/pdfs/ISLRv2_website.pdf'
# Load file
loader = PyPDFLoader(file)
document = loader.load()
# Split
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=150)
splits = text_splitter.split_documents(document)
# Convert into embeddings and store
embeddings = OpenAIEmbeddings()
db = DocArrayInMemorySearch.from_documents(
splits,
embeddings
)
# Query
query = input("Your query, e.g. 'Explain bias-variance tradeoff for a linear regression model.'")
similar_pages = db.similarity_search(query, limit=10)
retriever = db.as_retriever()
llm = ChatOpenAI(temperature = 0.0)
qpages = "".join([similar_pages[i].page_content for i in range(len(similar_pages))])
response = llm.call_as_llm(f"{qpages} Question: {query}")
display(Markdown(response))