Dies ist ein sehr grundlegendes Beispiel, wie ich versuche, Langchain zu verwenden, um ein LLM aufzurufen und das Tool zu finden, um < /p>
zu verwenden
import asyncio
import json
from langchain.agents import AgentExecutor, create_structured_chat_agent, Tool
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import AIMessage, ToolCall
from langchain_community.chat_models.fake import FakeMessagesListChatModel
# 1. Define a simple, predictable tool
def simple_tool_function(input: str) -> str:
"""A simple tool that returns a fixed string."""
print(f"Tool called with input: '{input}'")
return "The tool says hello back!"
tools = [
Tool(
name="simple_tool",
func=simple_tool_function,
description="A simple test tool.",
)
]
# 2. Create responses that follow the structured chat format
responses = [
# First response: Agent decides to use a tool
AIMessage(
content=json.dumps({
"action": "simple_tool",
"action_input": {"input": "hello"}
})
),
# Second response: Agent provides final answer after tool execution
AIMessage(
content=json.dumps({
"action": "Final Answer",
"action_input": "The tool call was successful. The tool said: 'The tool says hello back!'"
})
),
]
# Use the modern FakeMessagesListChatModel
llm = FakeMessagesListChatModel(responses=responses)
# 3. Create the prompt using the standard structured chat prompt format
prompt = ChatPromptTemplate.from_messages([
("system", """Respond to the human as helpfully and accurately as possible. You have access to the following tools:
{tools}
Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
Valid "action" values: "Final Answer" or {tool_names}
Provide only ONE action per $JSON_BLOB, as shown:
< /code>
{{
"Aktion": $ tool_name,
"action_input": $ input
}} < /p>
Follow this format:
Question: input question to answer
Thought: consider previous and subsequent steps
Action:
< /code>
$ json_blob < /p>
Observation: action result
... (repeat Thought/Action/Observation as needed)
Thought: I know what to respond
Action:
< /code>
{{
"Aktion": "endgültige Antwort",
"action_input": "Endgültige Antwort auf menschliche"
}} < /p>
Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation"""),
("human", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
# 4. Create the agent and executor
try:
agent = create_structured_chat_agent(llm, tools, prompt)
agent_executor = AgentExecutor(
agent=agent,
tools=tools,
verbose=True,
handle_parsing_errors=True,
max_iterations=3
)
print("✅ Agent and Executor created successfully.")
except Exception as e:
print(f"❌ Failed to create agent: {e}")
exit()
# 5. Run the agent
async def main():
print("\n--- Invoking Agent ---")
try:
result = await agent_executor.ainvoke({"input": "call the tool"})
print("\n--- Agent Finished ---")
print(f"✅ Final Result: {result}")
except Exception as e:
print(f"❌ Agent failed during invocation.")
import traceback
traceback.print_exc()
if __name__ == "__main__":
asyncio.run(main())
< /code>
Python -Anforderungen Ich verwende < /p>
langchain==0.3.27
langchain-community==0.3.27
langchain-core==0.3.74
langchain-aws==0.2.30
langchain-openai==0.3.29
Python Version 3.9
Wie kann ich den Fehler ValueError beseitigen: Variable Agent_ScratchPad sollte eine Liste von Basismeldungen sein, vom Typ
Dies ist ein sehr grundlegendes Beispiel, wie ich versuche, Langchain zu verwenden, um ein LLM aufzurufen und das Tool zu finden, um < /p> zu verwenden[code]import asyncio import json from langchain.agents import AgentExecutor, create_structured_chat_agent, Tool from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.messages import AIMessage, ToolCall from langchain_community.chat_models.fake import FakeMessagesListChatModel
# 1. Define a simple, predictable tool def simple_tool_function(input: str) -> str: """A simple tool that returns a fixed string.""" print(f"Tool called with input: '{input}'") return "The tool says hello back!"
# 2. Create responses that follow the structured chat format responses = [ # First response: Agent decides to use a tool AIMessage( content=json.dumps({ "action": "simple_tool", "action_input": {"input": "hello"} }) ), # Second response: Agent provides final answer after tool execution AIMessage( content=json.dumps({ "action": "Final Answer", "action_input": "The tool call was successful. The tool said: 'The tool says hello back!'" }) ), ]
# Use the modern FakeMessagesListChatModel llm = FakeMessagesListChatModel(responses=responses)
# 3. Create the prompt using the standard structured chat prompt format prompt = ChatPromptTemplate.from_messages([ ("system", """Respond to the human as helpfully and accurately as possible. You have access to the following tools:
{tools}
Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
Valid "action" values: "Final Answer" or {tool_names}
Question: input question to answer Thought: consider previous and subsequent steps Action: < /code> $ json_blob < /p> Observation: action result ... (repeat Thought/Action/Observation as needed) Thought: I know what to respond Action: < /code> {{ "Aktion": "endgültige Antwort", "action_input": "Endgültige Antwort auf menschliche" }} < /p>
Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation"""), ("human", "{input}"), MessagesPlaceholder(variable_name="agent_scratchpad"), ])
# 4. Create the agent and executor try: agent = create_structured_chat_agent(llm, tools, prompt) agent_executor = AgentExecutor( agent=agent, tools=tools, verbose=True, handle_parsing_errors=True, max_iterations=3 ) print("✅ Agent and Executor created successfully.") except Exception as e: print(f"❌ Failed to create agent: {e}") exit()
# 5. Run the agent async def main(): print("\n--- Invoking Agent ---") try: result = await agent_executor.ainvoke({"input": "call the tool"}) print("\n--- Agent Finished ---") print(f"✅ Final Result: {result}") except Exception as e: print(f"❌ Agent failed during invocation.") import traceback traceback.print_exc()
if __name__ == "__main__": asyncio.run(main()) < /code> Python -Anforderungen Ich verwende < /p> langchain==0.3.27 langchain-community==0.3.27 langchain-core==0.3.74 langchain-aws==0.2.30 langchain-openai==0.3.29 [/code] Python Version 3.9 Wie kann ich den Fehler ValueError beseitigen: Variable Agent_ScratchPad sollte eine Liste von Basismeldungen sein, vom Typ
Ich bin ein Student und muss eine Menge Dinge durch .pdf für Online -Kurse lesen und muss sie ausdrucken. Während ich im Internet surft, fand ich ein Skript, das mir helfen würde, PDFs zu erstellen...
Ich habe diesen Fehler, wenn Sie versuchen, das -Access -Token für OAuth 2.0 zu erhalten, um eine Benachrichtigung an ein anderes Telefon zu senden.
Caused by: java.lang.IllegalArgumentException:...
Ich habe diesen Fehler, wenn Sie versuchen, das -Access -Token für OAuth 2.0 zu erhalten, um eine Benachrichtigung an ein anderes Telefon zu senden.
Caused by: java.lang.IllegalArgumentException:...
Ich versuche einen Text an SQL Agent mit OLLAMA und LAMA 3.1, an eine SQLite -Datenbank verbunden. Das Verhalten, das ich erwarte, ist, dass der Agent die Liste der Tabellen anruft, eine SQL -Abfrage...