Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

RAG.py 1.3 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
  1. from langchain_community.document_loaders import PyPDFLoader
  2. from langchain.text_splitter import RecursiveCharacterTextSplitter
  3. from langchain_community.vectorstores import Chroma
  4. from langchain_openai import OpenAIEmbeddings
  5. from langchain import hub
  6. from langchain_openai import ChatOpenAI
  7. from langchain.schema.runnable import RunnablePassthrough
  8. import csv
  9. from loadPDF import loadPDF
  10. PDFname = loadPDF()
  11. loader = PyPDFLoader(PDFname)
  12. #from https://python.langchain.com/docs/use_cases/question_answering/
  13. # Split documents
  14. text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
  15. splits = text_splitter.split_documents(loader.load())
  16. # Embed and store splits
  17. vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())
  18. retriever = vectorstore.as_retriever()
  19. # Prompt
  20. # https://smith.langchain.com/hub/rlm/rag-prompt
  21. rag_prompt = hub.pull("rlm/rag-prompt")
  22. # LLM
  23. llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
  24. # RAG chain
  25. rag_chain = {"context": retriever, "question": RunnablePassthrough()} | rag_prompt | llm
  26. again = True
  27. while (again):
  28. question = input("What's your question? Type q to quit: ")
  29. if (question == 'q'):
  30. again = False
  31. break
  32. result = rag_chain.invoke(question)
  33. print(result)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...