import os from dotenv import load_dotenv from langchain_openai import ChatOpenAI, OpenAI from langchain_core.messages import HumanMessage, SystemMessage
# 实例化 ChatModel (推荐) chat_model = ChatOpenAI(temperature=0.7, model="gpt-3.5-turbo") # model for chat models
print("--- LLM 模块调用示例 ---")
# 1. LLM 调用 (invoke) response_llm = llm.invoke("What is the capital of France?") print(f"LLM Response (invoke): {response_llm.strip()}")
# 2. ChatModel 调用 (invoke) messages = [ SystemMessage(content="You are a helpful assistant."), HumanMessage(content="What is the capital of France?") ] response_chat = chat_model.invoke(messages) print(f"ChatModel Response (invoke): {response_chat.content}")
# 3. ChatModel 流式调用 (stream) print("\nChatModel Response (stream):") stream_messages = [HumanMessage(content="Tell me a short story about a brave knight.")] for chunk in chat_model.stream(stream_messages): print(chunk.content, end="", flush=True) print("\n")
# 4. ChatModel 批量调用 (batch) batch_inputs = [ [HumanMessage(content="What is the color of the sky?")], [HumanMessage(content="What is the color of grass?")] ] batch_responses = chat_model.batch(batch_inputs) print("\nChatModel Response (batch):") for res in batch_responses: print(f"- {res.content}")
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, MessagesPlaceholder, FewShotPromptTemplate
print("\n--- Prompts 模块调用示例 ---")
# 1. PromptTemplate (用于 LLM 模型) prompt_template = PromptTemplate.from_template("Tell me a {adjective} story about a {noun}.") formatted_prompt = prompt_template.invoke({"adjective": "funny", "noun": "cat"}) print(f"PromptTemplate 结果: {formatted_prompt}")
# 2. ChatPromptTemplate (用于 ChatModel 模型) # 使用 from_messages 快速创建 chat_prompt = ChatPromptTemplate.from_messages([ ("system", "You are a helpful assistant that translates English to French."), ("human", "Translate this sentence: {sentence}") ]) formatted_chat_prompt = chat_prompt.invoke({"sentence": "Hello, how are you?"}) print(f"ChatPromptTemplate 结果: {formatted_chat_prompt.messages}")
# 3. ChatPromptTemplate 结合 MessagesPlaceholder (用于记忆或复杂消息注入) from langchain_core.messages import HumanMessage, AIMessage history = [HumanMessage(content="Hi there!"), AIMessage(content="Hello! How can I help you?")] chat_prompt_with_history = ChatPromptTemplate.from_messages([ ("system", "You are a friendly chatbot."), MessagesPlaceholder(variable_name="chat_history"), # 占位符用于插入历史消息 ("human", "{input}") ]) formatted_prompt_with_history = chat_prompt_with_history.invoke({ "chat_history": history, "input": "What's the weather like today?" }) print(f"ChatPromptTemplate (含历史) 结果: {formatted_prompt_with_history.messages}")
from langchain_core.output_parsers import StrOutputParser, JsonOutputParser from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI
# 1. StrOutputParser # 这是一个默认行为,但明确指定可读性更好 chain_str = ChatPromptTemplate.from_template("Tell me a fact about {animal}.") | chat_model_parser | StrOutputParser() fact = chain_str.invoke({"animal": "dogs"}) print(f"StrOutputParser 结果: {fact}")
# 2. JsonOutputParser json_prompt = ChatPromptTemplate.from_template( "Return a JSON object with the 'animal' and 'fact' about {animal}." "Format: {{\"animal\": \"<animal_name>\", \"fact\": \"<fact_about_animal>\"}}" ) chain_json = json_prompt | chat_model_parser | JsonOutputParser() json_output = chain_json.invoke({"animal": "cats"}) print(f"JsonOutputParser 结果 (类型: {type(json_output)}): {json_output}")
# 3. PydanticOutputParser classAnimalFact(BaseModel): animal: str = Field(description="The name of the animal.") fact: str = Field(description="An interesting fact about the animal.")
pydantic_prompt = ChatPromptTemplate.from_messages([ ("system", "Answer the user query. {format_instructions}"), ("human", "Tell me a fact about {animal}.") ]).partial(format_instructions=parser.get_format_instructions()) # 将格式指令注入到提示中
from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI from langchain_core.output_parsers import StrOutputParser
print("\n--- Chains 模块调用示例 ---")
# 最简单的链:Prompt -> LLM -> Parser llm_chain = ( ChatPromptTemplate.from_template("Tell me a short {emotion} story about a {animal}.") | ChatOpenAI(temperature=0.7, model="gpt-3.5-turbo") | StrOutputParser() )
story = llm_chain.invoke({"emotion": "happy", "animal": "rabbit"}) print(f"简单链执行结果:\n{story}")
# 更复杂的链:包含多个 LLM 调用或中间步骤 from langchain_core.runnables import RunnablePassthrough
summary_chain = ( {"text": llm_chain} # 将上一个链的输出作为 'text' 键传入下一个链 | RunnablePassthrough.assign(summary=lambda x: summarize_text(x["text"])) | ChatPromptTemplate.from_template("Original Story: {text}\nSummary: {summary}\n\nBased on the summary, what is the main theme of the story?") | ChatOpenAI(temperature=0, model="gpt-3.5-turbo") | StrOutputParser() )
from langchain_community.document_loaders import TextLoader from langchain_text_splitters import RecursiveCharacterTextSplitter from langchain_openai import OpenAIEmbeddings from langchain_community.vectorstores import Chroma from langchain.chains.retrieval import create_retrieval_chain from langchain.chains.combine_documents import create_stuff_documents_chain from langchain_core.prompts import ChatPromptTemplate
print("\n--- Retrieval 模块调用示例 ---")
# 1. 创建一个模拟文档 withopen("example_document.txt", "w") as f: f.write("LangChain is a framework for developing applications powered by large language models. " "It enables applications that are context-aware and can reason. " "LangChain also helps with data augmentation, agentic reasoning, and evaluation. " "Key components include LLMs, prompts, chains, agents, and memory. " "Vector databases like Chroma are often used for retrieval.")
# 6. 构建 RAG 链 # a. 定义一个文档组合链,它知道如何处理检索到的文档 document_chain = create_stuff_documents_chain( ChatOpenAI(model="gpt-3.5-turbo"), ChatPromptTemplate.from_template("Answer the question based only on the provided context:\n\n{context}\n\nQuestion: {input}") )
# b. 创建检索链 retrieval_chain = create_retrieval_chain(retriever, document_chain)
# 7. 调用检索链 response = retrieval_chain.invoke({"input": "What is LangChain useful for?"}) print(f"RAG 链的响应: {response['answer']}") # print(f"检索到的文档: {[d.page_content for d in response['context']]}")
# 清理模拟文档 os.remove("example_document.txt")
4.7 Agents (智能体)
Agent 赋予 LLM 决策和使用工具的能力。
调用方法:
Tool: 封装 Agent 可以调用的函数。
create_react_agent: 创建基于 ReAct (Reasoning and Acting) 模式的 Agent。
from langchain.agents import AgentExecutor, create_react_agent from langchain import hub from langchain_core.tools import Tool from langchain_openai import ChatOpenAI from langchain_core.prompts import PromptTemplate import operator # 用于模拟一个简单的计算器
print("\n--- Agents 模块调用示例 ---")
# 1. 定义工具 (Tools) defmultiply(a: float, b: float) -> float: """Multiplies two numbers.""" return a * b
defadd(a: float, b: float) -> float: """Adds two numbers.""" return a + b
# 将 Python 函数封装成 LangChain 工具 tools = [ Tool( name="Multiply", func=multiply, description="Useful for multiplying two floating point numbers. Input should be two numbers separated by a comma." ), Tool( name="Add", func=add, description="Useful for adding two floating point numbers. Input should be two numbers separated by a comma." ) ]
# 2. 创建 Agent # a. 从 LangChain Hub 加载 Agent 提示模板 (或自定义) # pull_from_hub 函数需要安装 `langchainhub` # pip install langchainhub try: prompt = hub.pull("hwchase17/react") except Exception: # 如果无法访问 LangChain Hub,使用一个通用模板 prompt = ChatPromptTemplate.from_template("""Answer the following questions as best you can. You have access to the following tools: {tools} Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [{tool_names}] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question Begin! Question: {input} Thought:{agent_scratchpad}""")
# 4. 调用 Agent question = "What is 15.6 multiplied by 3.2 and then added to 10?" response = agent_executor.invoke({"input": question}) print(f"\nAgent 的最终响应: {response['output']}")
from langchain.chains import ConversationChain from langchain.memory import ConversationBufferMemory, ConversationBufferWindowMemory, ConversationSummaryMemory from langchain_openai import ChatOpenAI from langchain_core.messages import HumanMessage, AIMessage from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
print("\n--- 带记忆的对话示例 ---") user_input1 = "My name is Alice." response1 = conversation_with_memory.invoke({"input": user_input1}) memory.save_context({"input": user_input1}, {"output": response1}) # 手动保存上下文 print(f"用户: {user_input1}\nAI: {response1}")
user_input2 = "What did I just tell you my name was?" response2 = conversation_with_memory.invoke({"input": user_input2}) memory.save_context({"input": user_input2}, {"output": response2}) print(f"用户: {user_input2}\nAI: {response2}")
from langchain_core.callbacks import BaseCallbackHandler from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI from langchain_core.output_parsers import StrOutputParser
defon_llm_start(self, serialized, prompts, **kwargs): print(f"--- LLM started with prompts: {prompts} ---")
defon_llm_end(self, response, **kwargs): print(f"--- LLM ended with response: {response.generations[0][0].text[:50]}... ---")
defon_chain_end(self, outputs, **kwargs): print(f"--- Chain ended with outputs: {outputs} ---")
# 2. 将回调处理器添加到链中 chain_with_callbacks = ( ChatPromptTemplate.from_template("What is a good name for a company that makes {product}?") | ChatOpenAI(temperature=0.7, model="gpt-3.5-turbo") | StrOutputParser() )
from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnablePassthrough