graph TD
A[用户查询] --> B{"智能体 (LLM)"}
B --> C{1. 任务规划}
C --> D{2. 工具选择 & 执行}
D -- (调用) --> E[工具库: 搜索引擎]
D -- (调用) --> F[工具库: 向量数据库]
D -- (调用) --> G[工具库: API/DB]
E --> H[检索结果/实时信息]
F --> I[内部知识库片段]
G --> J[结构化数据/外部服务响应]
H & I & J --> K{3. 信息整合 & 推理}
K --> L{4. 生成 & 评估}
L --> M{评估结果: 满意?}
M -- (否) --> C
M -- (是) --> N[最终答案]
N --> A'
if"Python之父"in query: return"Guido van Rossum 被认为是Python语言的创始人。" elif"RAG"in query and"局限性"in query: return"RAG的局限性包括:依赖初始检索结果质量、缺乏多步推理能力、难以处理实时变化的信息、容易产生幻觉。" elif"Agentic RAG"in query and"优势"in query: return"Agentic RAG的优势在于:通过智能体实现多步规划、工具调用、自我修正,显著提高复杂推理和信息整合能力,减少幻觉。" else: returnf"No specific information found for '{query}'. Please refine your query."
classLLM: """模拟一个简单的LLM""" defgenerate(self, prompt: str) -> str: print(f"LLM generating response based on prompt: \n---{prompt[:200]}...\n---") # 简单模拟生成 if"Guido van Rossum"in prompt and"Python"in prompt: return"根据搜索结果,Python语言是由Guido van Rossum创建的。" elif"RAG的局限性"in prompt and"Agentic RAG的优势"in prompt: returnf"传统RAG面临的局限性主要包括依赖初始检索质量、缺乏多步推理能力,而Agentic RAG通过引入智能体实现多步规划、工具调用和自我修正,从而增强了复杂推理和信息整合能力,并减少了幻觉。简而言之,Agentic RAG解决了传统RAG的部分痛点,使其能处理更复杂的任务。" else: returnf"Based on the provided information, I can generate a response about: {prompt[:50]}..."
def_decide_action(self, user_query: str, current_context: str) -> tuple[str, str]: """ Agent的核心决策逻辑:决定下一步是搜索还是直接生成。 在实际中,这将是一个更复杂的LLM调用,包含Prompt Engineering来引导决策。 """ # 模拟LLM的决策过程 # 如果上下文信息足够,则生成;否则尝试搜索 decision_prompt = f"User Query: {user_query}\nCurrent Context: {current_context}\n\nBased on the current context, should I 'SEARCH' for more information or 'GENERATE' a final answer? If SEARCH, provide a search query. If GENERATE, just say GENERATE." print("\n--- Agent Deciding Action ---") # 这里简化LLM的决策逻辑,实际会调用LLM判断 if"Python之父"in user_query and"Guido van Rossum"in current_context: return"GENERATE", "" elif"RAG"in user_query and"局限性"in user_query and"Agentic RAG"in user_query and"优势"in user_context: return"GENERATE", "" elif"Python之父"in user_query andnot"Guido van Rossum"in current_context: return"SEARCH", "Python之父是谁" elif"RAG"in user_query and"Agentic RAG"in user_query: # 如果同时问到RAG和Agentic RAG,可能需要多步 if"局限性"in user_query andnot ("RAG的局限性"in current_context or"Agentic RAG的优势"in current_context): return"SEARCH", "RAG的局限性 Agentic RAG的优势" else: return"GENERATE", "" else: return"SEARCH", user_query # 默认先尝试搜索 defrun(self, user_query: str) -> str: current_context = "" max_iterations = 3 for i inrange(max_iterations): print(f"\n--- Agent Iteration {i+1} ---") action, query_or_none = self._decide_action(user_query, current_context)
if action == "SEARCH": print(f"Agent decided to SEARCH with query: '{query_or_none}'") search_tool = self.tools.get("SearchTool") if search_tool: retrieved_info = search_tool.run(query_or_none) current_context += "\nRetrieved Information: " + retrieved_info self.history.append(f"Iteration {i+1}: Searched for '{query_or_none}', Got: {retrieved_info}") else: return"Error: SearchTool not available." elif action == "GENERATE": print("Agent decided to GENERATE final answer.") prompt_for_llm = f"User Query: {user_query}\n\nContext Provided:\n{current_context}\n\nBased on the context, please provide a comprehensive answer:" final_answer = self.llm.generate(prompt_for_llm) self.history.append(f"Iteration {i+1}: Generated final answer.") return final_answer # 简单的迭代结束条件,实际会更复杂 if i == max_iterations - 1: print("Max iterations reached. Generating best possible answer with current context.") prompt_for_llm = f"User Query: {user_query}\n\nContext Provided:\n{current_context}\n\nBased on the context, please provide the best possible answer:" returnself.llm.generate(prompt_for_llm) return"Agent failed to generate a satisfactory answer."