Lesson 2 : LangGraph ComponentsĀ¶
InĀ [1]:
Copied!
from dotenv import load_dotenv
_ = load_dotenv()
from dotenv import load_dotenv _ = load_dotenv()
InĀ [2]:
Copied!
from langgraph.graph import StateGraph, END
from typing import TypedDict, Annotated
import operator
from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage, ToolMessage
from langchain_openai import ChatOpenAI
from langchain_community.tools.tavily_search import TavilySearchResults
from langgraph.graph import StateGraph, END from typing import TypedDict, Annotated import operator from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage, ToolMessage from langchain_openai import ChatOpenAI from langchain_community.tools.tavily_search import TavilySearchResults
InĀ [3]:
Copied!
tool = TavilySearchResults(max_results=4) #increased number of results
print(type(tool))
print(tool.name)
tool = TavilySearchResults(max_results=4) #increased number of results print(type(tool)) print(tool.name)
<class 'langchain_community.tools.tavily_search.tool.TavilySearchResults'> tavily_search_results_json
If you are not familiar with python typing annotation, you can refer to the python documents.
InĀ [5]:
Copied!
class AgentState(TypedDict):
messages: Annotated[list[AnyMessage], operator.add]
class AgentState(TypedDict): messages: Annotated[list[AnyMessage], operator.add]
Note: in
take_action
below, some logic was added to cover the case that the LLM returned a non-existent tool name. Even with function calling, LLMs can still occasionally hallucinate. Note that all that is done is instructing the LLM to try again! An advantage of an agentic organization.
InĀ [6]:
Copied!
class Agent:
def __init__(self, model, tools, system=""):
self.system = system
graph = StateGraph(AgentState)
graph.add_node("llm", self.call_openai)
graph.add_node("action", self.take_action)
graph.add_conditional_edges(
"llm",
self.exists_action,
{True: "action", False: END}
)
graph.add_edge("action", "llm")
graph.set_entry_point("llm")
self.graph = graph.compile()
self.tools = {t.name: t for t in tools}
self.model = model.bind_tools(tools)
def exists_action(self, state: AgentState):
result = state['messages'][-1]
return len(result.tool_calls) > 0
def call_openai(self, state: AgentState):
messages = state['messages']
if self.system:
messages = [SystemMessage(content=self.system)] + messages
message = self.model.invoke(messages)
return {'messages': [message]}
def take_action(self, state: AgentState):
tool_calls = state['messages'][-1].tool_calls
results = []
for t in tool_calls:
print(f"Calling: {t}")
if not t['name'] in self.tools: # check for bad tool name from LLM
print("\n ....bad tool name....")
result = "bad tool name, retry" # instruct LLM to retry if bad
else:
result = self.tools[t['name']].invoke(t['args'])
results.append(ToolMessage(tool_call_id=t['id'], name=t['name'], content=str(result)))
print("Back to the model!")
return {'messages': results}
class Agent: def __init__(self, model, tools, system=""): self.system = system graph = StateGraph(AgentState) graph.add_node("llm", self.call_openai) graph.add_node("action", self.take_action) graph.add_conditional_edges( "llm", self.exists_action, {True: "action", False: END} ) graph.add_edge("action", "llm") graph.set_entry_point("llm") self.graph = graph.compile() self.tools = {t.name: t for t in tools} self.model = model.bind_tools(tools) def exists_action(self, state: AgentState): result = state['messages'][-1] return len(result.tool_calls) > 0 def call_openai(self, state: AgentState): messages = state['messages'] if self.system: messages = [SystemMessage(content=self.system)] + messages message = self.model.invoke(messages) return {'messages': [message]} def take_action(self, state: AgentState): tool_calls = state['messages'][-1].tool_calls results = [] for t in tool_calls: print(f"Calling: {t}") if not t['name'] in self.tools: # check for bad tool name from LLM print("\n ....bad tool name....") result = "bad tool name, retry" # instruct LLM to retry if bad else: result = self.tools[t['name']].invoke(t['args']) results.append(ToolMessage(tool_call_id=t['id'], name=t['name'], content=str(result))) print("Back to the model!") return {'messages': results}
InĀ [7]:
Copied!
prompt = """You are a smart research assistant. Use the search engine to look up information. \
You are allowed to make multiple calls (either together or in sequence). \
Only look up information when you are sure of what you want. \
If you need to look up some information before asking a follow up question, you are allowed to do that!
"""
model = ChatOpenAI(model="gpt-3.5-turbo") #reduce inference cost
abot = Agent(model, [tool], system=prompt)
prompt = """You are a smart research assistant. Use the search engine to look up information. \ You are allowed to make multiple calls (either together or in sequence). \ Only look up information when you are sure of what you want. \ If you need to look up some information before asking a follow up question, you are allowed to do that! """ model = ChatOpenAI(model="gpt-3.5-turbo") #reduce inference cost abot = Agent(model, [tool], system=prompt)
InĀ [8]:
Copied!
from IPython.display import Image
Image(abot.graph.get_graph().draw_png())
from IPython.display import Image Image(abot.graph.get_graph().draw_png())
Out[8]:
InĀ [9]:
Copied!
messages = [HumanMessage(content="What is the weather in sf?")]
result = abot.graph.invoke({"messages": messages})
messages = [HumanMessage(content="What is the weather in sf?")] result = abot.graph.invoke({"messages": messages})
Calling: {'name': 'tavily_search_results_json', 'args': {'query': 'weather in San Francisco'}, 'id': 'call_gaEvnai9PEiSwxAOe5mbyi4X'} Back to the model!
InĀ [10]:
Copied!
result
result
Out[10]:
{'messages': [HumanMessage(content='What is the weather in sf?'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_gaEvnai9PEiSwxAOe5mbyi4X', 'function': {'arguments': '{"query":"weather in San Francisco"}', 'name': 'tavily_search_results_json'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 21, 'prompt_tokens': 153, 'total_tokens': 174}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-bdff7ca2-3cab-4365-9797-d98150b82e42-0', tool_calls=[{'name': 'tavily_search_results_json', 'args': {'query': 'weather in San Francisco'}, 'id': 'call_gaEvnai9PEiSwxAOe5mbyi4X'}]), ToolMessage(content='[{\'url\': \'https://world-weather.info/forecast/usa/san_francisco/june-2024/\', \'content\': \'Extended weather forecast in San Francisco. Hourly Week 10 days 14 days 30 days Year. Detailed ā” San Francisco Weather Forecast for June 2024 - day/night š”ļø temperatures, precipitations - World-Weather.info.\'}, {\'url\': \'https://www.accuweather.com/en/us/san-francisco/94103/june-weather/347629\', \'content\': \'Get the monthly weather forecast for San Francisco, CA, including daily high/low, historical averages, to help you plan ahead.\'}, {\'url\': \'https://www.weatherapi.com/\', \'content\': "{\'location\': {\'name\': \'San Francisco\', \'region\': \'California\', \'country\': \'United States of America\', \'lat\': 37.78, \'lon\': -122.42, \'tz_id\': \'America/Los_Angeles\', \'localtime_epoch\': 1718872651, \'localtime\': \'2024-06-20 1:37\'}, \'current\': {\'last_updated_epoch\': 1718872200, \'last_updated\': \'2024-06-20 01:30\', \'temp_c\': 12.8, \'temp_f\': 55.0, \'is_day\': 0, \'condition\': {\'text\': \'Partly cloudy\', \'icon\': \'//cdn.weatherapi.com/weather/64x64/night/116.png\', \'code\': 1003}, \'wind_mph\': 11.9, \'wind_kph\': 19.1, \'wind_degree\': 280, \'wind_dir\': \'W\', \'pressure_mb\': 1013.0, \'pressure_in\': 29.92, \'precip_mm\': 0.02, \'precip_in\': 0.0, \'humidity\': 83, \'cloud\': 75, \'feelslike_c\': 11.4, \'feelslike_f\': 52.6, \'windchill_c\': 10.1, \'windchill_f\': 50.3, \'heatindex_c\': 11.6, \'heatindex_f\': 52.9, \'dewpoint_c\': 9.6, \'dewpoint_f\': 49.3, \'vis_km\': 16.0, \'vis_miles\': 9.0, \'uv\': 1.0, \'gust_mph\': 12.7, \'gust_kph\': 20.4}}"}, {\'url\': \'https://www.weathertab.com/en/c/e/06/united-states/california/san-francisco/\', \'content\': \'Temperature Forecast Normal. Avg High Temps 60 to 70 Ā°F. Avg Low Temps 45 to 60 Ā°F. Explore comprehensive June 2024 weather forecasts for San Francisco, including daily high and low temperatures, precipitation risks, and monthly temperature trends. Featuring detailed day-by-day forecasts, dynamic graphs of daily rain probabilities, and ...\'}]', name='tavily_search_results_json', tool_call_id='call_gaEvnai9PEiSwxAOe5mbyi4X'), AIMessage(content='The current weather in San Francisco is partly cloudy with a temperature of 55.0Ā°F (12.8Ā°C). The wind speed is 19.1 kph coming from the west, and there is a 75% cloud cover. The humidity is at 83%.', response_metadata={'token_usage': {'completion_tokens': 57, 'prompt_tokens': 842, 'total_tokens': 899}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-7359eb14-f0b5-4d89-8849-c2097c2a3e6d-0')]}
InĀ [11]:
Copied!
result['messages'][-1].content
result['messages'][-1].content
Out[11]:
'The current weather in San Francisco is partly cloudy with a temperature of 55.0Ā°F (12.8Ā°C). The wind speed is 19.1 kph coming from the west, and there is a 75% cloud cover. The humidity is at 83%.'
InĀ [12]:
Copied!
messages = [HumanMessage(content="What is the weather in SF and LA?")]
result = abot.graph.invoke({"messages": messages})
messages = [HumanMessage(content="What is the weather in SF and LA?")] result = abot.graph.invoke({"messages": messages})
Calling: {'name': 'tavily_search_results_json', 'args': {'query': 'weather in San Francisco'}, 'id': 'call_8DXZWtfXkUYjSKJBHkwvpptb'} Calling: {'name': 'tavily_search_results_json', 'args': {'query': 'weather in Los Angeles'}, 'id': 'call_oqQbdxbEQ6beq7VfeCUFU9ub'} Back to the model!
InĀ [13]:
Copied!
result['messages'][-1].content
result['messages'][-1].content
Out[13]:
'The current weather in San Francisco is 52.9Ā°F with patchy rain nearby. The wind speed is 14.4 kph coming from the WSW direction. The humidity is at 88%. \n\nIn Los Angeles, the current temperature is 65.9Ā°F with clear skies. The wind speed is 5.0 kph coming from the SSW direction, and the humidity is at 68%.'
InĀ [14]:
Copied!
# Note, the query was modified to produce more consistent results.
# Results may vary per run and over time as search information and models change.
query = "Who won the super bowl in 2024? In what state is the winning team headquarters located? \
What is the GDP of that state? Answer each question."
messages = [HumanMessage(content=query)]
model = ChatOpenAI(model="gpt-4o") # requires more advanced model
abot = Agent(model, [tool], system=prompt)
result = abot.graph.invoke({"messages": messages})
# Note, the query was modified to produce more consistent results. # Results may vary per run and over time as search information and models change. query = "Who won the super bowl in 2024? In what state is the winning team headquarters located? \ What is the GDP of that state? Answer each question." messages = [HumanMessage(content=query)] model = ChatOpenAI(model="gpt-4o") # requires more advanced model abot = Agent(model, [tool], system=prompt) result = abot.graph.invoke({"messages": messages})
Calling: {'name': 'tavily_search_results_json', 'args': {'query': 'Super Bowl 2024 winner'}, 'id': 'call_DtGTmbfGKtlu7PmheNS9GeWw'} Calling: {'name': 'tavily_search_results_json', 'args': {'query': 'State of Super Bowl 2024 winning team headquarters'}, 'id': 'call_nm0HjPpxPNrgcWw9Dvjjelwb'} Back to the model! Calling: {'name': 'tavily_search_results_json', 'args': {'query': 'GDP of Missouri 2023'}, 'id': 'call_IXTgXArU1CZJr36AHnkyy5OZ'} Back to the model!
InĀ [15]:
Copied!
print(result['messages'][-1].content)
print(result['messages'][-1].content)
1. **Who won the Super Bowl in 2024?** - The Kansas City Chiefs won the Super Bowl in 2024. 2. **In what state is the winning team's headquarters located?** - The Kansas City Chiefs are headquartered in Missouri. 3. **What is the GDP of that state?** - As of 2023, the real GDP of Missouri was approximately $423.6 billion.
InĀ [Ā ]:
Copied!
Last update: 2024-10-23
Created: 2024-10-23
Created: 2024-10-23