from langchain_core.prompts import ChatPromptTemplate from langchain_core.prompts.message import SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain_core.output_parsers import CommaSeparatedListOutputParser, PydanticOutputParser from langchain_core.exceptions import OutputParserException from langchain_core.pydantic_v1 import BaseModel, Field
import os from langchain_openai import OpenAIEmbeddings from langchain_community.vectorstores import Chroma from langchain_core.documents import Document
defuse_vector_store(): if os.getenv("OPENAI_API_KEY") isNone: print("Error: OPENAI_API_KEY environment variable not set.") return
import os from langchain_openai import OpenAIEmbeddings from langchain_community.vectorstores import Chroma from langchain_core.documents import Document
defuse_retriever(): if os.getenv("OPENAI_API_KEY") isNone: print("Error: OPENAI_API_KEY environment variable not set.") return
import os from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain.chains import create_retrieval_chain from langchain.chains.combine_documents import create_stuff_documents_chain from langchain_core.prompts import ChatPromptTemplate
defuse_retrieval_qa_chain(): if os.getenv("OPENAI_API_KEY") isNone: print("Error: OPENAI_API_KEY environment variable not set.") return
# 1. 初始化 Chat Model 用于问答 llm = ChatOpenAI(model_name="gpt-4o", temperature=0)
graph TD
A["用户输入 (问题)"] --> B{"Agent (LLM)"}
B --> C{Thought: 需要使用什么工具?}
C --> D{"Action: 使用某工具 (e.g., Google Search)"}
D --> E[Observation: 工具返回结果]
E --> B
B -- 如果问题解决 --> F[Final Answer: 最终答案]
B -- 如果需要更多工具 --> C
import os from langchain_openai import ChatOpenAI from langchain.agents import AgentExecutor, create_openai_functions_agent from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_community.tools.tavily_search import TavilySearchResults # 一个简单的搜索工具 from langchain.tools import tool # 用于自定义工具的装饰器
defuse_agents_and_tools(): if os.getenv("OPENAI_API_KEY") isNone: print("Error: OPENAI_API_KEY environment variable not set.") return # Tavily API Key for search tool (可选, 如果不使用搜索工具可以不设置) # export TAVILY_API_KEY="your_tavily_api_key_here"
# 预期输出示例 (verbose=True 会打印详细的思考过程): # --- Question 1: 计算 'Hello LangChain World' 这句话有多少个单词? --- # > Entering new AgentExecutor chain... # { # "color": "green", # "kwargs": { # "log_or_template": "\nI need to count the number of words in the given text. The `word_count_tool` is suitable for this task.\n", # "log": "\nI need to count the number of words in the given text. The `word_count_tool` is suitable for this task.\n" # }, # "type": "tool", # "name": "Thought" # } # { # "color": "yellow", # "kwargs": { # "log_or_template": "tool_code", # "log": "Calling tool \"word_count_tool\" with text: \"Hello LangChain World\"\n" # }, # "type": "tool", # "name": "Action" # } # { # "tool_name": "word_count_tool", # "tool_input": "Hello LangChain World", # "log": "Input to word_count_tool: Hello LangChain World" # }5 # { # "color": "blue", # "kwargs": { # "log_or_template": "\nI have successfully used the `word_count_tool` and the result is 3. Now I can provide the final answer.\n", # "log": "\nI have successfully used the `word_count_tool` and the result is 3. Now I can provide the final answer.\n" # }, # "type": "tool", # "name": "Observation" # } # > Finished chain. # Answer 1: 'Hello LangChain World' 这句话有 3 个单词。 # # --- Question 2: 2024年奥运会在哪个城市举行? --- # > Entering new AgentExecutor chain... # { # "color": "green", # "kwargs": { # "log_or_template": "\nI need to find out the host city for the 2024 Olympic Games. I should use a search tool for this.\n", # "log": "\nI need to find out the host city for the 2024 Olympic Games. I should use a search tool for this.\n" # }, # "type": "tool", # "name": "Thought" # } # { # "color": "yellow", # "kwargs": { # "log_or_template": "tool_code", # "log": "Calling tool \"tavily_search_results\" with text: \"2024 Olympics host city\"\n" # }, # "type": "tool", # "name": "Action" # } # { # "tool_name": "tavily_search_results", # "tool_input": "2024 Olympics host city", # "log": "Calling tool \"tavily_search_results\" with text: \"2024 Olympics host city\"" # } # [ # {'url': '...', 'content': '...Host city: Paris'}, # ... # ] # { # "color": "blue", # "kwargs": { # "log_or_template": "\n根据搜索结果,2024年奥运会将在法国巴黎举行。我可以直接给出答案。\n", # "log": "\n根据搜索结果,2024年奥运会将在法国巴黎举行。我可以直接给出答案。\n" # }, # "type": "tool", # "name": "Observation" # } # > Finished chain. # Answer 2: 2024年奥运会将在法国巴黎举行。
import os from langchain_openai import ChatOpenAI from langchain.chains import ConversationChain from langchain.memory import ConversationBufferMemory from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
defuse_memory(): if os.getenv("OPENAI_API_KEY") isNone: print("Error: OPENAI_API_KEY environment variable not set.") return
# 1. 初始化 Chat Model llm = ChatOpenAI(model_name="gpt-4o", temperature=0.7)
for i, user_input inenumerate(inputs): print(f"\n--- User Input {i+1}: {user_input} ---") response = conversation_chain.invoke({"input": user_input}) print(f"AI Response {i+1}: {response['response']}")
# 打印记忆中的完整对话历史 print("\n--- Full Conversation History in Memory ---") # conv_memory.buffer 是一个消息列表 for msg in conv_memory.buffer: print(f"- {msg.type.capitalize()}: {msg.content}")
if __name__ == "__main__": use_memory()
# 预期输出示例 (verbose=True会打印更多信息,对话内容会因模型和温度而异): # --- User Input 1: 你好,你叫什么名字? --- # > Entering new ConversationChain chain... # Prompt after formatting: # System: 你是一个知识渊博的AI助手。 # Human: 你好,你叫什么名字? # > Finished chain. # AI Response 1: 我是一个大型语言模型,由 Google 训练。 # # --- User Input 2: 我来自中国上海,你来自哪里? --- # > Entering new ConversationChain chain... # Prompt after formatting: # System: 你是一个知识渊博的AI助手。 # Human: 你好,你叫什么名字? # AI: 我是一个大型语言模型,由 Google 训练。 # Human: 我来自中国上海,你来自哪里? # > Finished chain. # AI Response 2: 我没有具体的物理位置,我是一个 AI,存在于服务器和我的程序代码中。 # # --- User Input 3: 你觉得中国的传统文化怎么样? --- # > Entering new ConversationChain chain... # Prompt after formatting: # System: 你是一个知识渊博的AI助手。 # Human: 你好,你叫什么名字? # AI: 我是一个大型语言模型,由 Google 训练。 # Human: 我来自中国上海,你来自哪里? # AI: 我没有具体的物理位置,我是一个 AI,存在于服务器和我的程序代码中。 # Human: 你觉得中国的传统文化怎么样? # > Finished chain. # AI Response 3: 中国的传统文化非常丰富多彩,历史悠久,充满了智慧和艺术价值。从儒家思想、道家哲学到诗词歌赋、戏曲、书法、国画,再到传统节日和精湛的手工艺,都展现了中华民族独特的魅力和深厚的底蕴。这些文化遗产不仅影响了中国人的生活方式和价值观,也对世界文明产生了深远的影响。 # # --- Full Conversation History in Memory --- # - Human: 你好,你叫什么名字? # - Ai: 我是一个大型语言模型,由 Google 训练。 # - Human: 我来自中国上海,你来自哪里? # - Ai: 我没有具体的物理位置,我是一个 AI,存在于服务器和我的程序代码中。 # - Human: 你觉得中国的传统文化怎么样? # - Ai: 中国的传统文化非常丰富多彩,历史悠久,充满了智慧和艺术价值。从儒家思想、道家哲学到诗词歌赋、戏曲、书法、国画,再到传统节日和精湛的手工艺,都展现了中华民族独特的魅力和深厚的底蕴。这些文化遗产不仅影响了中国人的生活方式和价值观,也对世界文明产生了深远的影响。