LangChain Expression Language (LCEL)Ā¶
InĀ [1]:
Copied!
import os
import openai
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
openai.api_key = os.environ['OPENAI_API_KEY']
import os import openai from dotenv import load_dotenv, find_dotenv _ = load_dotenv(find_dotenv()) # read local .env file openai.api_key = os.environ['OPENAI_API_KEY']
InĀ [2]:
Copied!
#!pip install pydantic==1.10.8
#!pip install pydantic==1.10.8
InĀ [3]:
Copied!
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema.output_parser import StrOutputParser
from langchain.prompts import ChatPromptTemplate from langchain.chat_models import ChatOpenAI from langchain.schema.output_parser import StrOutputParser
Simple ChainĀ¶
InĀ [4]:
Copied!
prompt = ChatPromptTemplate.from_template(
"tell me a short joke about {topic}"
)
model = ChatOpenAI()
output_parser = StrOutputParser()
prompt = ChatPromptTemplate.from_template( "tell me a short joke about {topic}" ) model = ChatOpenAI() output_parser = StrOutputParser()
InĀ [8]:
Copied!
chain = prompt | model | output_parser
chain = prompt | model | output_parser
InĀ [10]:
Copied!
print(chain.invoke({"topic": "bears"}))
print(chain.invoke({"topic": "bears"}))
Why don't bears like fast food? Because they can't catch it!
More complex chainĀ¶
And Runnable Map to supply user-provided inputs to the prompt.
InĀ [11]:
Copied!
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import DocArrayInMemorySearch
from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import DocArrayInMemorySearch
InĀ [12]:
Copied!
vectorstore = DocArrayInMemorySearch.from_texts(
["harrison worked at kensho", "bears like to eat honey"],
embedding=OpenAIEmbeddings()
)
retriever = vectorstore.as_retriever()
vectorstore = DocArrayInMemorySearch.from_texts( ["harrison worked at kensho", "bears like to eat honey"], embedding=OpenAIEmbeddings() ) retriever = vectorstore.as_retriever()
InĀ [13]:
Copied!
retriever.get_relevant_documents("where did harrison work?")
retriever.get_relevant_documents("where did harrison work?")
Out[13]:
[Document(page_content='harrison worked at kensho'), Document(page_content='bears like to eat honey')]
InĀ [14]:
Copied!
retriever.get_relevant_documents("what do bears like to eat")
retriever.get_relevant_documents("what do bears like to eat")
Out[14]:
[Document(page_content='bears like to eat honey'), Document(page_content='harrison worked at kensho')]
InĀ [15]:
Copied!
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template)
InĀ [16]:
Copied!
from langchain.schema.runnable import RunnableMap
from langchain.schema.runnable import RunnableMap
InĀ [17]:
Copied!
chain = RunnableMap({
"context": lambda x: retriever.get_relevant_documents(x["question"]),
"question": lambda x: x["question"]
}) | prompt | model | output_parser
chain = RunnableMap({ "context": lambda x: retriever.get_relevant_documents(x["question"]), "question": lambda x: x["question"] }) | prompt | model | output_parser
InĀ [18]:
Copied!
chain.invoke({"question": "where did harrison work?"})
chain.invoke({"question": "where did harrison work?"})
Out[18]:
'Harrison worked at Kensho.'
InĀ [19]:
Copied!
inputs = RunnableMap({
"context": lambda x: retriever.get_relevant_documents(x["question"]),
"question": lambda x: x["question"]
})
inputs = RunnableMap({ "context": lambda x: retriever.get_relevant_documents(x["question"]), "question": lambda x: x["question"] })
InĀ [20]:
Copied!
inputs.invoke({"question": "where did harrison work?"})
inputs.invoke({"question": "where did harrison work?"})
Out[20]:
{'context': [Document(page_content='harrison worked at kensho'), Document(page_content='bears like to eat honey')], 'question': 'where did harrison work?'}
BindĀ¶
and OpenAI Functions
InĀ [21]:
Copied!
functions = [
{
"name": "weather_search",
"description": "Search for weather given an airport code",
"parameters": {
"type": "object",
"properties": {
"airport_code": {
"type": "string",
"description": "The airport code to get the weather for"
},
},
"required": ["airport_code"]
}
}
]
functions = [ { "name": "weather_search", "description": "Search for weather given an airport code", "parameters": { "type": "object", "properties": { "airport_code": { "type": "string", "description": "The airport code to get the weather for" }, }, "required": ["airport_code"] } } ]
InĀ [22]:
Copied!
prompt = ChatPromptTemplate.from_messages(
[
("human", "{input}")
]
)
model = ChatOpenAI(temperature=0).bind(functions=functions)
prompt = ChatPromptTemplate.from_messages( [ ("human", "{input}") ] ) model = ChatOpenAI(temperature=0).bind(functions=functions)
InĀ [23]:
Copied!
runnable = prompt | model
runnable = prompt | model
InĀ [24]:
Copied!
runnable.invoke({"input": "what is the weather in sf"})
runnable.invoke({"input": "what is the weather in sf"})
Out[24]:
AIMessage(content='', additional_kwargs={'function_call': {'name': 'weather_search', 'arguments': '{"airport_code":"SFO"}'}})
InĀ [25]:
Copied!
functions = [
{
"name": "weather_search",
"description": "Search for weather given an airport code",
"parameters": {
"type": "object",
"properties": {
"airport_code": {
"type": "string",
"description": "The airport code to get the weather for"
},
},
"required": ["airport_code"]
}
},
{
"name": "sports_search",
"description": "Search for news of recent sport events",
"parameters": {
"type": "object",
"properties": {
"team_name": {
"type": "string",
"description": "The sports team to search for"
},
},
"required": ["team_name"]
}
}
]
functions = [ { "name": "weather_search", "description": "Search for weather given an airport code", "parameters": { "type": "object", "properties": { "airport_code": { "type": "string", "description": "The airport code to get the weather for" }, }, "required": ["airport_code"] } }, { "name": "sports_search", "description": "Search for news of recent sport events", "parameters": { "type": "object", "properties": { "team_name": { "type": "string", "description": "The sports team to search for" }, }, "required": ["team_name"] } } ]
InĀ [26]:
Copied!
model = model.bind(functions=functions)
model = model.bind(functions=functions)
InĀ [27]:
Copied!
runnable = prompt | model
runnable = prompt | model
InĀ [28]:
Copied!
runnable.invoke({"input": "how did the patriots do yesterday?"})
runnable.invoke({"input": "how did the patriots do yesterday?"})
Out[28]:
AIMessage(content='', additional_kwargs={'function_call': {'name': 'sports_search', 'arguments': '{"team_name":"New England Patriots"}'}})
FallbacksĀ¶
InĀ [29]:
Copied!
from langchain.llms import OpenAI
import json
from langchain.llms import OpenAI import json
Note: Due to the deprecation of OpenAI's model text-davinci-001
on 4 January 2024, you'll be using OpenAI's recommended replacement model gpt-3.5-turbo-instruct
instead.
InĀ [30]:
Copied!
simple_model = OpenAI(
temperature=0,
max_tokens=1000,
model="gpt-3.5-turbo-instruct"
)
simple_chain = simple_model | json.loads
simple_model = OpenAI( temperature=0, max_tokens=1000, model="gpt-3.5-turbo-instruct" ) simple_chain = simple_model | json.loads
InĀ [31]:
Copied!
challenge = "write three poems in a json blob, where each poem is a json blob of a title, author, and first line"
challenge = "write three poems in a json blob, where each poem is a json blob of a title, author, and first line"
InĀ [32]:
Copied!
simple_model.invoke(challenge)
simple_model.invoke(challenge)
Out[32]:
'\n\n{\n "title": "Autumn Leaves",\n "author": "Emily Dickinson",\n "first_line": "The leaves are falling, one by one"\n}\n\n{\n "title": "The Ocean\'s Song",\n "author": "Pablo Neruda",\n "first_line": "I hear the ocean\'s song, a symphony of waves"\n}\n\n{\n "title": "A Winter\'s Night",\n "author": "Robert Frost",\n "first_line": "The snow falls softly, covering the ground"\n}'
Note: The next line is expected to fail.
InĀ [33]:
Copied!
simple_chain.invoke(challenge)
simple_chain.invoke(challenge)
--------------------------------------------------------------------------- JSONDecodeError Traceback (most recent call last) Cell In[33], line 1 ----> 1 simple_chain.invoke(challenge) File /usr/local/lib/python3.9/site-packages/langchain/schema/runnable/base.py:1113, in RunnableSequence.invoke(self, input, config) 1111 try: 1112 for i, step in enumerate(self.steps): -> 1113 input = step.invoke( 1114 input, 1115 # mark each step as a child run 1116 patch_config( 1117 config, callbacks=run_manager.get_child(f"seq:step:{i+1}") 1118 ), 1119 ) 1120 # finish the root run 1121 except BaseException as e: File /usr/local/lib/python3.9/site-packages/langchain/schema/runnable/base.py:2163, in RunnableLambda.invoke(self, input, config, **kwargs) 2161 """Invoke this runnable synchronously.""" 2162 if hasattr(self, "func"): -> 2163 return self._call_with_config( 2164 self._invoke, 2165 input, 2166 self._config(config, self.func), 2167 ) 2168 else: 2169 raise TypeError( 2170 "Cannot invoke a coroutine function synchronously." 2171 "Use `ainvoke` instead." 2172 ) File /usr/local/lib/python3.9/site-packages/langchain/schema/runnable/base.py:633, in Runnable._call_with_config(self, func, input, config, run_type, **kwargs) 626 run_manager = callback_manager.on_chain_start( 627 dumpd(self), 628 input, 629 run_type=run_type, 630 name=config.get("run_name"), 631 ) 632 try: --> 633 output = call_func_with_variable_args( 634 func, input, run_manager, config, **kwargs 635 ) 636 except BaseException as e: 637 run_manager.on_chain_error(e) File /usr/local/lib/python3.9/site-packages/langchain/schema/runnable/config.py:173, in call_func_with_variable_args(func, input, run_manager, config, **kwargs) 171 if accepts_run_manager(func): 172 kwargs["run_manager"] = run_manager --> 173 return func(input, **kwargs) File /usr/local/lib/python3.9/site-packages/langchain/schema/runnable/base.py:2096, in RunnableLambda._invoke(self, input, run_manager, config) 2090 def _invoke( 2091 self, 2092 input: Input, 2093 run_manager: CallbackManagerForChainRun, 2094 config: RunnableConfig, 2095 ) -> Output: -> 2096 output = call_func_with_variable_args(self.func, input, run_manager, config) 2097 # If the output is a runnable, invoke it 2098 if isinstance(output, Runnable): File /usr/local/lib/python3.9/site-packages/langchain/schema/runnable/config.py:173, in call_func_with_variable_args(func, input, run_manager, config, **kwargs) 171 if accepts_run_manager(func): 172 kwargs["run_manager"] = run_manager --> 173 return func(input, **kwargs) File /usr/local/lib/python3.9/json/__init__.py:346, in loads(s, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw) 341 s = s.decode(detect_encoding(s), 'surrogatepass') 343 if (cls is None and object_hook is None and 344 parse_int is None and parse_float is None and 345 parse_constant is None and object_pairs_hook is None and not kw): --> 346 return _default_decoder.decode(s) 347 if cls is None: 348 cls = JSONDecoder File /usr/local/lib/python3.9/json/decoder.py:340, in JSONDecoder.decode(self, s, _w) 338 end = _w(s, end).end() 339 if end != len(s): --> 340 raise JSONDecodeError("Extra data", s, end) 341 return obj JSONDecodeError: Extra data: line 9 column 1 (char 125)
InĀ [34]:
Copied!
model = ChatOpenAI(temperature=0)
chain = model | StrOutputParser() | json.loads
model = ChatOpenAI(temperature=0) chain = model | StrOutputParser() | json.loads
InĀ [35]:
Copied!
chain.invoke(challenge)
chain.invoke(challenge)
Out[35]:
{'poem1': {'title': 'The Night Sky', 'author': 'Emily Dickinson', 'firstLine': 'The night is starry and the stars are blue.'}, 'poem2': {'title': 'Autumn Leaves', 'author': 'Robert Frost', 'firstLine': "My sorrow, when she's here with me, thinks these dark days of autumn rain are beautiful as days can be."}, 'poem3': {'title': 'Hope is the Thing with Feathers', 'author': 'Emily Dickinson', 'firstLine': 'Hope is the thing with feathers that perches in the soul.'}}
InĀ [36]:
Copied!
final_chain = simple_chain.with_fallbacks([chain])
final_chain = simple_chain.with_fallbacks([chain])
InĀ [37]:
Copied!
final_chain.invoke(challenge)
final_chain.invoke(challenge)
Out[37]:
{'poem1': {'title': 'The Rose', 'author': 'Emily Dickinson', 'firstLine': 'A rose by any other name would smell as sweet'}, 'poem2': {'title': 'The Road Not Taken', 'author': 'Robert Frost', 'firstLine': 'Two roads diverged in a yellow wood'}, 'poem3': {'title': 'Hope is the Thing with Feathers', 'author': 'Emily Dickinson', 'firstLine': 'Hope is the thing with feathers that perches in the soul'}}
InterfaceĀ¶
InĀ [38]:
Copied!
prompt = ChatPromptTemplate.from_template(
"Tell me a short joke about {topic}"
)
model = ChatOpenAI()
output_parser = StrOutputParser()
chain = prompt | model | output_parser
prompt = ChatPromptTemplate.from_template( "Tell me a short joke about {topic}" ) model = ChatOpenAI() output_parser = StrOutputParser() chain = prompt | model | output_parser
InĀ [39]:
Copied!
chain.invoke({"topic": "bears"})
chain.invoke({"topic": "bears"})
Out[39]:
'Why do bears have hairy coats? \n\nFur protection!'
InĀ [40]:
Copied!
chain.batch([{"topic": "bears"}, {"topic": "frogs"}])
chain.batch([{"topic": "bears"}, {"topic": "frogs"}])
Out[40]:
["Why don't bears like fast food? Because they can't catch it!", 'Why are frogs so happy?\n\nBecause they eat whatever bugs them!']
InĀ [41]:
Copied!
for t in chain.stream({"topic": "bears"}):
print(t)
for t in chain.stream({"topic": "bears"}): print(t)
Why don 't bears wear shoes ? Because they have bear feet !
InĀ [42]:
Copied!
response = await chain.ainvoke({"topic": "bears"})
response
response = await chain.ainvoke({"topic": "bears"}) response
Out[42]:
"Why did the bear break up with his girlfriend? \n\nBecause he couldn't bear the relationship anymore!"
InĀ [Ā ]:
Copied!
InĀ [Ā ]:
Copied!
InĀ [Ā ]:
Copied!
InĀ [Ā ]:
Copied!
InĀ [Ā ]:
Copied!
InĀ [Ā ]:
Copied!
Last update: 2024-10-23
Created: 2024-10-23
Created: 2024-10-23