Conversational agent¶
In [1]:
Copied!
import os
import openai
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
openai.api_key = os.environ['OPENAI_API_KEY']
import os import openai from dotenv import load_dotenv, find_dotenv _ = load_dotenv(find_dotenv()) # read local .env file openai.api_key = os.environ['OPENAI_API_KEY']
In [2]:
Copied!
from langchain.tools import tool
from langchain.tools import tool
In [3]:
Copied!
import requests
from pydantic import BaseModel, Field
import datetime
# Define the input schema
class OpenMeteoInput(BaseModel):
latitude: float = Field(..., description="Latitude of the location to fetch weather data for")
longitude: float = Field(..., description="Longitude of the location to fetch weather data for")
@tool(args_schema=OpenMeteoInput)
def get_current_temperature(latitude: float, longitude: float) -> dict:
"""Fetch current temperature for given coordinates."""
BASE_URL = "https://api.open-meteo.com/v1/forecast"
# Parameters for the request
params = {
'latitude': latitude,
'longitude': longitude,
'hourly': 'temperature_2m',
'forecast_days': 1,
}
# Make the request
response = requests.get(BASE_URL, params=params)
if response.status_code == 200:
results = response.json()
else:
raise Exception(f"API Request failed with status code: {response.status_code}")
current_utc_time = datetime.datetime.utcnow()
time_list = [datetime.datetime.fromisoformat(time_str.replace('Z', '+00:00')) for time_str in results['hourly']['time']]
temperature_list = results['hourly']['temperature_2m']
closest_time_index = min(range(len(time_list)), key=lambda i: abs(time_list[i] - current_utc_time))
current_temperature = temperature_list[closest_time_index]
return f'The current temperature is {current_temperature}°C'
import requests from pydantic import BaseModel, Field import datetime # Define the input schema class OpenMeteoInput(BaseModel): latitude: float = Field(..., description="Latitude of the location to fetch weather data for") longitude: float = Field(..., description="Longitude of the location to fetch weather data for") @tool(args_schema=OpenMeteoInput) def get_current_temperature(latitude: float, longitude: float) -> dict: """Fetch current temperature for given coordinates.""" BASE_URL = "https://api.open-meteo.com/v1/forecast" # Parameters for the request params = { 'latitude': latitude, 'longitude': longitude, 'hourly': 'temperature_2m', 'forecast_days': 1, } # Make the request response = requests.get(BASE_URL, params=params) if response.status_code == 200: results = response.json() else: raise Exception(f"API Request failed with status code: {response.status_code}") current_utc_time = datetime.datetime.utcnow() time_list = [datetime.datetime.fromisoformat(time_str.replace('Z', '+00:00')) for time_str in results['hourly']['time']] temperature_list = results['hourly']['temperature_2m'] closest_time_index = min(range(len(time_list)), key=lambda i: abs(time_list[i] - current_utc_time)) current_temperature = temperature_list[closest_time_index] return f'The current temperature is {current_temperature}°C'
In [4]:
Copied!
import wikipedia
@tool
def search_wikipedia(query: str) -> str:
"""Run Wikipedia search and get page summaries."""
page_titles = wikipedia.search(query)
summaries = []
for page_title in page_titles[: 3]:
try:
wiki_page = wikipedia.page(title=page_title, auto_suggest=False)
summaries.append(f"Page: {page_title}\nSummary: {wiki_page.summary}")
except (
self.wiki_client.exceptions.PageError,
self.wiki_client.exceptions.DisambiguationError,
):
pass
if not summaries:
return "No good Wikipedia Search Result was found"
return "\n\n".join(summaries)
import wikipedia @tool def search_wikipedia(query: str) -> str: """Run Wikipedia search and get page summaries.""" page_titles = wikipedia.search(query) summaries = [] for page_title in page_titles[: 3]: try: wiki_page = wikipedia.page(title=page_title, auto_suggest=False) summaries.append(f"Page: {page_title}\nSummary: {wiki_page.summary}") except ( self.wiki_client.exceptions.PageError, self.wiki_client.exceptions.DisambiguationError, ): pass if not summaries: return "No good Wikipedia Search Result was found" return "\n\n".join(summaries)
In [5]:
Copied!
tools = [get_current_temperature, search_wikipedia]
tools = [get_current_temperature, search_wikipedia]
In [6]:
Copied!
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.tools.render import format_tool_to_openai_function
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate from langchain.tools.render import format_tool_to_openai_function from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
In [7]:
Copied!
functions = [format_tool_to_openai_function(f) for f in tools]
model = ChatOpenAI(temperature=0).bind(functions=functions)
prompt = ChatPromptTemplate.from_messages([
("system", "You are helpful but sassy assistant"),
("user", "{input}"),
])
chain = prompt | model | OpenAIFunctionsAgentOutputParser()
functions = [format_tool_to_openai_function(f) for f in tools] model = ChatOpenAI(temperature=0).bind(functions=functions) prompt = ChatPromptTemplate.from_messages([ ("system", "You are helpful but sassy assistant"), ("user", "{input}"), ]) chain = prompt | model | OpenAIFunctionsAgentOutputParser()
In [8]:
Copied!
result = chain.invoke({"input": "what is the weather is sf?"})
result = chain.invoke({"input": "what is the weather is sf?"})
In [9]:
Copied!
result
result
Out[9]:
AgentActionMessageLog(tool='get_current_temperature', tool_input={'latitude': 37.7749, 'longitude': -122.4194}, log="\nInvoking: `get_current_temperature` with `{'latitude': 37.7749, 'longitude': -122.4194}`\n\n\n", message_log=[AIMessage(content='', additional_kwargs={'function_call': {'name': 'get_current_temperature', 'arguments': '{"latitude":37.7749,"longitude":-122.4194}'}})])
In [10]:
Copied!
result.tool
result.tool
Out[10]:
'get_current_temperature'
In [11]:
Copied!
result.tool_input
result.tool_input
Out[11]:
{'latitude': 37.7749, 'longitude': -122.4194}
In [12]:
Copied!
from langchain.prompts import MessagesPlaceholder
prompt = ChatPromptTemplate.from_messages([
("system", "You are helpful but sassy assistant"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad")
])
from langchain.prompts import MessagesPlaceholder prompt = ChatPromptTemplate.from_messages([ ("system", "You are helpful but sassy assistant"), ("user", "{input}"), MessagesPlaceholder(variable_name="agent_scratchpad") ])
In [13]:
Copied!
chain = prompt | model | OpenAIFunctionsAgentOutputParser()
chain = prompt | model | OpenAIFunctionsAgentOutputParser()
In [35]:
Copied!
result1 = chain.invoke({
"input": "what is the weather is sf?",
"agent_scratchpad": []
})
result1 = chain.invoke({ "input": "what is the weather is sf?", "agent_scratchpad": [] })
In [36]:
Copied!
result1
result1
Out[36]:
AgentActionMessageLog(tool='get_current_temperature', tool_input={'latitude': 37.7749, 'longitude': -122.4194}, log="\nInvoking: `get_current_temperature` with `{'latitude': 37.7749, 'longitude': -122.4194}`\n\n\n", message_log=[AIMessage(content='', additional_kwargs={'function_call': {'name': 'get_current_temperature', 'arguments': '{"latitude":37.7749,"longitude":-122.4194}'}})])
In [37]:
Copied!
result1.tool
result1.tool
Out[37]:
'get_current_temperature'
In [38]:
Copied!
observation = get_current_temperature(result1.tool_input)
observation = get_current_temperature(result1.tool_input)
In [39]:
Copied!
observation
observation
Out[39]:
'The current temperature is 13.7°C'
In [40]:
Copied!
type(result1)
type(result1)
Out[40]:
langchain.schema.agent.AgentActionMessageLog
In [41]:
Copied!
from langchain.agents.format_scratchpad import format_to_openai_functions
from langchain.agents.format_scratchpad import format_to_openai_functions
In [42]:
Copied!
result1.message_log
result1.message_log
Out[42]:
[AIMessage(content='', additional_kwargs={'function_call': {'name': 'get_current_temperature', 'arguments': '{"latitude":37.7749,"longitude":-122.4194}'}})]
In [43]:
Copied!
format_to_openai_functions([(result1, observation), ])
format_to_openai_functions([(result1, observation), ])
Out[43]:
[AIMessage(content='', additional_kwargs={'function_call': {'name': 'get_current_temperature', 'arguments': '{"latitude":37.7749,"longitude":-122.4194}'}}), FunctionMessage(content='The current temperature is 13.7°C', name='get_current_temperature')]
In [44]:
Copied!
result2 = chain.invoke({
"input": "what is the weather is sf?",
"agent_scratchpad": format_to_openai_functions([(result1, observation)])
})
result2 = chain.invoke({ "input": "what is the weather is sf?", "agent_scratchpad": format_to_openai_functions([(result1, observation)]) })
In [45]:
Copied!
result2
result2
Out[45]:
AgentFinish(return_values={'output': 'The current temperature in San Francisco is 13.7°C.'}, log='The current temperature in San Francisco is 13.7°C.')
In [46]:
Copied!
from langchain.schema.agent import AgentFinish
def run_agent(user_input):
intermediate_steps = []
while True:
result = chain.invoke({
"input": user_input,
"agent_scratchpad": format_to_openai_functions(intermediate_steps)
})
if isinstance(result, AgentFinish):
return result
tool = {
"search_wikipedia": search_wikipedia,
"get_current_temperature": get_current_temperature,
}[result.tool]
observation = tool.run(result.tool_input)
intermediate_steps.append((result, observation))
from langchain.schema.agent import AgentFinish def run_agent(user_input): intermediate_steps = [] while True: result = chain.invoke({ "input": user_input, "agent_scratchpad": format_to_openai_functions(intermediate_steps) }) if isinstance(result, AgentFinish): return result tool = { "search_wikipedia": search_wikipedia, "get_current_temperature": get_current_temperature, }[result.tool] observation = tool.run(result.tool_input) intermediate_steps.append((result, observation))
In [47]:
Copied!
from langchain.schema.runnable import RunnablePassthrough
runnnable_pass_through = RunnablePassthrough.assign(
agent_scratchpad= lambda x: format_to_openai_functions(x["intermediate_steps"])
)
runnnable_pass_through.invoke({"intermediate_steps": [(result1, observation), ]})
from langchain.schema.runnable import RunnablePassthrough runnnable_pass_through = RunnablePassthrough.assign( agent_scratchpad= lambda x: format_to_openai_functions(x["intermediate_steps"]) ) runnnable_pass_through.invoke({"intermediate_steps": [(result1, observation), ]})
Out[47]:
{'intermediate_steps': [(AgentActionMessageLog(tool='get_current_temperature', tool_input={'latitude': 37.7749, 'longitude': -122.4194}, log="\nInvoking: `get_current_temperature` with `{'latitude': 37.7749, 'longitude': -122.4194}`\n\n\n", message_log=[AIMessage(content='', additional_kwargs={'function_call': {'name': 'get_current_temperature', 'arguments': '{"latitude":37.7749,"longitude":-122.4194}'}})]), 'The current temperature is 13.7°C')], 'agent_scratchpad': [AIMessage(content='', additional_kwargs={'function_call': {'name': 'get_current_temperature', 'arguments': '{"latitude":37.7749,"longitude":-122.4194}'}}), FunctionMessage(content='The current temperature is 13.7°C', name='get_current_temperature')]}
In [48]:
Copied!
# simpler loop using RunnablePassthrough
agent_chain = runnnable_pass_through | chain
def run_agent(user_input):
intermediate_steps = []
while True:
result = agent_chain.invoke({ # use agennt chain
"input": user_input,
"intermediate_steps": intermediate_steps
})
if isinstance(result, AgentFinish):
return result
tool = {
"search_wikipedia": search_wikipedia,
"get_current_temperature": get_current_temperature,
}[result.tool]
observation = tool.run(result.tool_input)
intermediate_steps.append((result, observation))
# simpler loop using RunnablePassthrough agent_chain = runnnable_pass_through | chain def run_agent(user_input): intermediate_steps = [] while True: result = agent_chain.invoke({ # use agennt chain "input": user_input, "intermediate_steps": intermediate_steps }) if isinstance(result, AgentFinish): return result tool = { "search_wikipedia": search_wikipedia, "get_current_temperature": get_current_temperature, }[result.tool] observation = tool.run(result.tool_input) intermediate_steps.append((result, observation))
In [49]:
Copied!
run_agent("what is the weather is sf?")
run_agent("what is the weather is sf?")
Out[49]:
AgentFinish(return_values={'output': 'The current temperature in San Francisco is 13.7°C.'}, log='The current temperature in San Francisco is 13.7°C.')
In [50]:
Copied!
run_agent("what is langchain?")
run_agent("what is langchain?")
Out[50]:
AgentFinish(return_values={'output': 'LangChain is a framework designed to simplify the creation of applications using large language models (LLMs). It is a language model integration framework with use-cases including document analysis and summarization, chatbots, and code analysis.'}, log='LangChain is a framework designed to simplify the creation of applications using large language models (LLMs). It is a language model integration framework with use-cases including document analysis and summarization, chatbots, and code analysis.')
In [51]:
Copied!
run_agent("hi!")
run_agent("hi!")
Out[51]:
AgentFinish(return_values={'output': 'Well, hello there! How can I assist you today?'}, log='Well, hello there! How can I assist you today?')
Agents¶
In [52]:
Copied!
from langchain.agents import AgentExecutor
agent_executor = AgentExecutor(agent=agent_chain, tools=tools, verbose=True)
from langchain.agents import AgentExecutor agent_executor = AgentExecutor(agent=agent_chain, tools=tools, verbose=True)
In [53]:
Copied!
agent_executor.invoke({"input": "what is langchain?"})
agent_executor.invoke({"input": "what is langchain?"})
> Entering new AgentExecutor chain... Invoking: `search_wikipedia` with `{'query': 'Langchain'}` Page: LangChain Summary: LangChain is a framework designed to simplify the creation of applications using large language models (LLMs). As a language model integration framework, LangChain's use-cases largely overlap with those of language models in general, including document analysis and summarization, chatbots, and code analysis. Page: DataStax Summary: DataStax, Inc. is a real-time data for AI company based in Santa Clara, California. Its product Astra DB is a cloud database-as-a-service based on Apache Cassandra. DataStax also offers DataStax Enterprise (DSE), an on-premises database built on Apache Cassandra, and Astra Streaming, a messaging and event streaming cloud service based on Apache Pulsar. As of June 2022, the company has roughly 800 customers distributed in over 50 countries. Page: Sentence embedding Summary: In natural language processing, a sentence embedding refers to a numeric representation of a sentence in the form of a vector of real numbers which encodes meaningful semantic information. State of the art embeddings are based on the learned hidden layer representation of dedicated sentence transformer models. BERT pioneered an approach involving the use of a dedicated [CLS] token prepended to the beginning of each sentence inputted into the model; the final hidden state vector of this token encodes information about the sentence and can be fine-tuned for use in sentence classification tasks. In practice however, BERT's sentence embedding with the [CLS] token achieves poor performance, often worse than simply averaging non-contextual word embeddings. SBERT later achieved superior sentence embedding performance by fine tuning BERT's [CLS] token embeddings through the usage of a siamese neural network architecture on the SNLI dataset. Other approaches are loosely based on the idea of distributional semantics applied to sentences. Skip-Thought trains an encoder-decoder structure for the task of neighboring sentences predictions. Though this has been shown to achieve worse performance than approaches such as InferSent or SBERT. An alternative direction is to aggregate word embeddings, such as those returned by Word2vec, into sentence embeddings. The most straightforward approach is to simply compute the average of word vectors, known as continuous bag-of-words (CBOW). However, more elaborate solutions based on word vector quantization have also been proposed. One such approach is the vector of locally aggregated word embeddings (VLAWE), which demonstrated performance improvements in downstream text classification tasks.LangChain is a framework designed to simplify the creation of applications using large language models (LLMs). It is a language model integration framework with use-cases including document analysis and summarization, chatbots, and code analysis. > Finished chain.
Out[53]:
{'input': 'what is langchain?', 'output': 'LangChain is a framework designed to simplify the creation of applications using large language models (LLMs). It is a language model integration framework with use-cases including document analysis and summarization, chatbots, and code analysis.'}
In [54]:
Copied!
agent_executor.invoke({"input": "my name is bob"})
agent_executor.invoke({"input": "my name is bob"})
> Entering new AgentExecutor chain... Nice to meet you, Bob! How can I assist you today? > Finished chain.
Out[54]:
{'input': 'my name is bob', 'output': 'Nice to meet you, Bob! How can I assist you today?'}
In [55]:
Copied!
agent_executor.invoke({"input": "what is my name"})
agent_executor.invoke({"input": "what is my name"})
> Entering new AgentExecutor chain... I'm sorry, I don't have access to your personal information. How can I assist you today? > Finished chain.
Out[55]:
{'input': 'what is my name', 'output': "I'm sorry, I don't have access to your personal information. How can I assist you today?"}
Chat with agents¶
In [56]:
Copied!
prompt = ChatPromptTemplate.from_messages([
("system", "You are helpful but sassy assistant"),
MessagesPlaceholder(variable_name="chat_history"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad")
])
prompt = ChatPromptTemplate.from_messages([ ("system", "You are helpful but sassy assistant"), MessagesPlaceholder(variable_name="chat_history"), ("user", "{input}"), MessagesPlaceholder(variable_name="agent_scratchpad") ])
In [57]:
Copied!
agent_chain = RunnablePassthrough.assign(
agent_scratchpad= lambda x: format_to_openai_functions(x["intermediate_steps"])
) | prompt | model | OpenAIFunctionsAgentOutputParser()
agent_chain = RunnablePassthrough.assign( agent_scratchpad= lambda x: format_to_openai_functions(x["intermediate_steps"]) ) | prompt | model | OpenAIFunctionsAgentOutputParser()
In [58]:
Copied!
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(return_messages=True,memory_key="chat_history")
from langchain.memory import ConversationBufferMemory memory = ConversationBufferMemory(return_messages=True,memory_key="chat_history")
In [59]:
Copied!
agent_executor = AgentExecutor(agent=agent_chain, tools=tools, verbose=True, memory=memory)
agent_executor = AgentExecutor(agent=agent_chain, tools=tools, verbose=True, memory=memory)
In [60]:
Copied!
agent_executor.invoke({"input": "my name is bob"})
agent_executor.invoke({"input": "my name is bob"})
> Entering new AgentExecutor chain... Nice to meet you, Bob! How can I assist you today? > Finished chain.
Out[60]:
{'input': 'my name is bob', 'chat_history': [HumanMessage(content='my name is bob'), AIMessage(content='Nice to meet you, Bob! How can I assist you today?')], 'output': 'Nice to meet you, Bob! How can I assist you today?'}
In [61]:
Copied!
agent_executor.invoke({"input": "whats my name"})
agent_executor.invoke({"input": "whats my name"})
> Entering new AgentExecutor chain... Your name is Bob. > Finished chain.
Out[61]:
{'input': 'whats my name', 'chat_history': [HumanMessage(content='my name is bob'), AIMessage(content='Nice to meet you, Bob! How can I assist you today?'), HumanMessage(content='whats my name'), AIMessage(content='Your name is Bob.')], 'output': 'Your name is Bob.'}
In [62]:
Copied!
agent_executor.invoke({"input": "whats the weather in sf?"})
agent_executor.invoke({"input": "whats the weather in sf?"})
> Entering new AgentExecutor chain... Invoking: `get_current_temperature` with `{'latitude': 37.7749, 'longitude': -122.4194}` The current temperature is 13.7°CThe current temperature in San Francisco is 13.7°C. > Finished chain.
Out[62]:
{'input': 'whats the weather in sf?', 'chat_history': [HumanMessage(content='my name is bob'), AIMessage(content='Nice to meet you, Bob! How can I assist you today?'), HumanMessage(content='whats my name'), AIMessage(content='Your name is Bob.'), HumanMessage(content='whats the weather in sf?'), AIMessage(content='The current temperature in San Francisco is 13.7°C.')], 'output': 'The current temperature in San Francisco is 13.7°C.'}
Create a chatbot¶
In [63]:
Copied!
@tool
def create_your_own(query: str) -> str:
"""This function can do whatever you would like once you fill it in """
print(type(query))
return query[::-1]
@tool def create_your_own(query: str) -> str: """This function can do whatever you would like once you fill it in """ print(type(query)) return query[::-1]
In [64]:
Copied!
tools = [get_current_temperature, search_wikipedia, create_your_own]
tools = [get_current_temperature, search_wikipedia, create_your_own]
In [65]:
Copied!
import panel as pn # GUI
pn.extension()
import panel as pn
import param
class cbfs(param.Parameterized):
def __init__(self, tools, **params):
super(cbfs, self).__init__( **params)
self.panels = []
self.functions = [format_tool_to_openai_function(f) for f in tools]
self.model = ChatOpenAI(temperature=0).bind(functions=self.functions)
self.memory = ConversationBufferMemory(return_messages=True,memory_key="chat_history")
self.prompt = ChatPromptTemplate.from_messages([
("system", "You are helpful but sassy assistant"),
MessagesPlaceholder(variable_name="chat_history"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad")
])
self.chain = RunnablePassthrough.assign(
agent_scratchpad = lambda x: format_to_openai_functions(x["intermediate_steps"])
) | self.prompt | self.model | OpenAIFunctionsAgentOutputParser()
self.qa = AgentExecutor(agent=self.chain, tools=tools, verbose=False, memory=self.memory)
def convchain(self, query):
if not query:
return
inp.value = ''
result = self.qa.invoke({"input": query})
self.answer = result['output']
self.panels.extend([
pn.Row('User:', pn.pane.Markdown(query, width=450)),
pn.Row('ChatBot:', pn.pane.Markdown(self.answer, width=450, styles={'background-color': '#F6F6F6'}))
])
return pn.WidgetBox(*self.panels, scroll=True)
def clr_history(self,count=0):
self.chat_history = []
return
import panel as pn # GUI pn.extension() import panel as pn import param class cbfs(param.Parameterized): def __init__(self, tools, **params): super(cbfs, self).__init__( **params) self.panels = [] self.functions = [format_tool_to_openai_function(f) for f in tools] self.model = ChatOpenAI(temperature=0).bind(functions=self.functions) self.memory = ConversationBufferMemory(return_messages=True,memory_key="chat_history") self.prompt = ChatPromptTemplate.from_messages([ ("system", "You are helpful but sassy assistant"), MessagesPlaceholder(variable_name="chat_history"), ("user", "{input}"), MessagesPlaceholder(variable_name="agent_scratchpad") ]) self.chain = RunnablePassthrough.assign( agent_scratchpad = lambda x: format_to_openai_functions(x["intermediate_steps"]) ) | self.prompt | self.model | OpenAIFunctionsAgentOutputParser() self.qa = AgentExecutor(agent=self.chain, tools=tools, verbose=False, memory=self.memory) def convchain(self, query): if not query: return inp.value = '' result = self.qa.invoke({"input": query}) self.answer = result['output'] self.panels.extend([ pn.Row('User:', pn.pane.Markdown(query, width=450)), pn.Row('ChatBot:', pn.pane.Markdown(self.answer, width=450, styles={'background-color': '#F6F6F6'})) ]) return pn.WidgetBox(*self.panels, scroll=True) def clr_history(self,count=0): self.chat_history = [] return
In [66]:
Copied!
cb = cbfs(tools)
inp = pn.widgets.TextInput( placeholder='Enter text here…')
conversation = pn.bind(cb.convchain, inp)
tab1 = pn.Column(
pn.Row(inp),
pn.layout.Divider(),
pn.panel(conversation, loading_indicator=True, height=400),
pn.layout.Divider(),
)
dashboard = pn.Column(
pn.Row(pn.pane.Markdown('# QnA_Bot')),
pn.Tabs(('Conversation', tab1))
)
dashboard
cb = cbfs(tools) inp = pn.widgets.TextInput( placeholder='Enter text here…') conversation = pn.bind(cb.convchain, inp) tab1 = pn.Column( pn.Row(inp), pn.layout.Divider(), pn.panel(conversation, loading_indicator=True, height=400), pn.layout.Divider(), ) dashboard = pn.Column( pn.Row(pn.pane.Markdown('# QnA_Bot')), pn.Tabs(('Conversation', tab1)) ) dashboard
Out[66]:
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
Last update: 2024-10-23
Created: 2024-10-23
Created: 2024-10-23