Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
File renamed without changes.
File renamed without changes.
1 change: 1 addition & 0 deletions notebooks/Chapter 1_ Prompt Chaining (Code Example).ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"cells":[{"cell_type":"code","source":["import os\n","from langchain_openai import ChatOpenAI\n","from langchain_core.prompts import ChatPromptTemplate\n","from langchain_core.output_parsers import StrOutputParser\n","\n","# For better security, load environment variables from a .env file\n","# from dotenv import load_dotenv\n","# load_dotenv()\n","# Make sure your OPENAI_API_KEY is set in the .env file\n","\n","# Initialize the Language Model (using ChatOpenAI is recommended)\n","llm = ChatOpenAI(temperature=0)\n","\n","# --- Prompt 1: Extract Information ---\n","prompt_extract = ChatPromptTemplate.from_template(\n"," \"Extract the technical specifications from the following text:\\n\\n{text_input}\"\n",")\n","\n","# --- Prompt 2: Transform to JSON ---\n","prompt_transform = ChatPromptTemplate.from_template(\n"," \"Transform the following specifications into a JSON object with 'cpu', 'memory', and 'storage' as keys:\\n\\n{specifications}\"\n",")\n","\n","# --- Build the Chain using LCEL ---\n","# The StrOutputParser() converts the LLM's message output to a simple string.\n","extraction_chain = prompt_extract | llm | StrOutputParser()\n","\n","# The full chain passes the output of the extraction chain into the 'specifications'\n","# variable for the transformation prompt.\n","full_chain = (\n"," {\"specifications\": extraction_chain}\n"," | prompt_transform\n"," | llm\n"," | StrOutputParser()\n",")\n","\n","# --- Run the Chain ---\n","input_text = \"The new laptop model features a 3.5 GHz octa-core processor, 16GB of RAM, and a 1TB NVMe SSD.\"\n","\n","# Execute the chain with the input text dictionary.\n","final_result = full_chain.invoke({\"text_input\": input_text})\n","\n","print(\"\\n--- Final JSON Output ---\")\n","print(final_result)"],"outputs":[],"execution_count":null,"metadata":{"id":"NhfdOggSqMrH"}}],"metadata":{"colab":{"provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"}},"nbformat":4,"nbformat_minor":0}