Hallo, ich verwende den vorgefertigten ReAct-Agenten mit „anthropic.claude-3-sonnet“ über ChatBedrock.
Ich versuche, den vorgefertigten ReAct-Agenten als Knoten innerhalb eines LangGraph zu verwenden Graph. Das Diagramm zeigt zwei ReAct-basierte Agentenknoten. Beide sind genau identisch, nur mit unterschiedlichen Eingabeaufforderungen.
Agent 1 läuft einwandfrei, aber Agent 2 gibt den folgenden Fehler aus.
Das Problem scheint im Dienstprogramm _merge.py innerhalb der langchain_core-Bibliothek zu liegen.
Ein ähnliches Problem mit Gemini wird in #23827 besprochen
Vielen Dank im Voraus für all die Hilfe
Unten ist der Fehler-Stacktrace:
TypeError Traceback (most recent call last)
Cell In[12], line 7
2 userInput = '''
3 Develop Business Challenges and Opportunities (BCOs) for brand based on its Strategic Imperatives (SIs).
4 '''
5 message = HumanMessage(content = userInput )
----> 7 graph.invoke(
8 input = {"messages" : [message],
9 "brand" : 'brand',
10 "primary_competitors" : ["competitor 1", "competitor 2", "competitor 3"],
11 "brand_research" : [],
12 "strategic_imperatives" : ["SI-1",
13 "SI-2",
14 "SI-3",
15 "SI-4",
16 "SI-5",
17 "SI-6'),
18 "stratagic_imperatives_research" : [],
19 "plan" : [],
20 "next_actor" : '',
21 "next_task" : '',
22 "sender" : '',
23 },
24 config = {"configurable": {"thread_id": "42"}, "recursion_limit": 30} ,
25
26
27 )
28 # ToolMessage
File /opt/conda/lib/python3.11/site-packages/langgraph/pregel/init.py:1940, in Pregel.invoke(self, input, config, stream_mode, output_keys, interrupt_before, interrupt_after, debug, **kwargs)
1938 else:
1939 chunks = []
-> 1940 for chunk in self.stream(
1941 input,
1942 config,
1943 stream_mode=stream_mode,
1944 output_keys=output_keys,
1945 interrupt_before=interrupt_before,
1946 interrupt_after=interrupt_after,
1947 debug=debug,
1948 **kwargs,
1949 ):
1950 if stream_mode == "values":
1951 latest = chunk
File /opt/conda/lib/python3.11/site-packages/langgraph/pregel/init.py:1660, in Pregel.stream(self, input, config, stream_mode, output_keys, interrupt_before, interrupt_after, debug, subgraphs)
1654 # Similarly to Bulk Synchronous Parallel / Pregel model
1655 # computation proceeds in steps, while there are channel updates
1656 # channel updates from step N are only visible in step N+1
1657 # channels are guaranteed to be immutable for the duration of the step,
1658 # with channel updates applied only at the transition between steps
1659 while loop.tick(input_keys=self.input_channels):
-> 1660 for _ in runner.tick(
1661 loop.tasks.values(),
1662 timeout=self.step_timeout,
1663 retry_policy=self.retry_policy,
1664 get_waiter=get_waiter,
1665 ):
1666 # emit output
1667 yield from output()
1668 # emit output
File /opt/conda/lib/python3.11/site-packages/langgraph/pregel/runner.py:167, in PregelRunner.tick(self, tasks, reraise, timeout, retry_policy, get_waiter)
165 t = tasks[0]
166 try:
--> 167 run_with_retry(
168 t,
169 retry_policy,
170 configurable={
171 CONFIG_KEY_SEND: partial(writer, t),
172 CONFIG_KEY_CALL: partial(call, t),
173 },
174 )
175 self.commit(t, None)
176 except Exception as exc:
File /opt/conda/lib/python3.11/site-packages/langgraph/pregel/retry.py:40, in run_with_retry(task, retry_policy, configurable)
38 task.writes.clear()
39 # run the task
---> 40 return task.proc.invoke(task.input, config)
41 except ParentCommand as exc:
42 ns: str = config[CONF][CONFIG_KEY_CHECKPOINT_NS]
File /opt/conda/lib/python3.11/site-packages/langgraph/utils/runnable.py:408, in RunnableSeq.invoke(self, input, config, **kwargs)
404 config = patch_config(
405 config, callbacks=run_manager.get_child(f"seq:step:{i+1}")
406 )
407 if i == 0:
--> 408 input = step.invoke(input, config, **kwargs)
409 else:
410 input = step.invoke(input, config)
File /opt/conda/lib/python3.11/site-packages/langgraph/utils/runnable.py:184, in RunnableCallable.invoke(self, input, config, **kwargs)
182 else:
183 context.run(_set_config_context, config)
--> 184 ret = context.run(self.func, input, **kwargs)
185 if isinstance(ret, Runnable) and self.recurse:
186 return ret.invoke(input, config)
File ~/MUltiAgent_SI_to_BCO/graph/workflow.py:74, in workflow.init_graph..(state)
68 self.workflow = StateGraph(state)
70 self.workflow.add_node("brand_research_agent",lambda state: agent_node(state = state,
71 agent = self.agents["brand_research_agent"],
72 name = "brand_research_agent",))
---> 74 self.workflow.add_node("si_research_agent",lambda state: agent_node(state = state,
75 agent = self.agents["si_research_agent"],
76 name = "si_research_agent",))
78 self.workflow.add_node("bco_planning_agent",lambda state: agent_node(state = state,
79 agent = self.agents["bco_planning_agent"],
80 name = "bco_planning_agent",))
82 self.workflow.add_node("bco_formulation_agent",lambda state: agent_node(state = state,
83 agent = self.agents["bco_formulation_agent"],
84 name = "bco_formulation_agent",))
File ~/MUltiAgent_SI_to_BCO/graph/nodes.py:78, in agent_node(state, agent, name)
75 except Exception as e:
76 # Log and raise any exceptions that occur
77 logger.error(f"Error in executing {name} node: {str(e)}")
---> 78 raise e
File ~/MUltiAgent_SI_to_BCO/graph/nodes.py:39, in agent_node(state, agent, name)
36 logger.info(f"executing agent {name}")
38 # Invoke the agent with the current state
---> 39 response = agent.invoke(state)
41 # Extract the content from the response
42 content = response['output'] if isinstance(response, dict) and 'output' in response else response
File /opt/conda/lib/python3.11/site-packages/langgraph/pregel/init.py:1940, in Pregel.invoke(self, input, config, stream_mode, output_keys, interrupt_before, interrupt_after, debug, **kwargs)
1938 else:
1939 chunks = []
-> 1940 for chunk in self.stream(
1941 input,
1942 config,
1943 stream_mode=stream_mode,
1944 output_keys=output_keys,
1945 interrupt_before=interrupt_before,
1946 interrupt_after=interrupt_after,
1947 debug=debug,
1948 **kwargs,
1949 ):
1950 if stream_mode == "values":
1951 latest = chunk
File /opt/conda/lib/python3.11/site-packages/langgraph/pregel/init.py:1660, in Pregel.stream(self, input, config, stream_mode, output_keys, interrupt_before, interrupt_after, debug, subgraphs)
1654 # Similarly to Bulk Synchronous Parallel / Pregel model
1655 # computation proceeds in steps, while there are channel updates
1656 # channel updates from step N are only visible in step N+1
1657 # channels are guaranteed to be immutable for the duration of the step,
1658 # with channel updates applied only at the transition between steps
1659 while loop.tick(input_keys=self.input_channels):
-> 1660 for _ in runner.tick(
1661 loop.tasks.values(),
1662 timeout=self.step_timeout,
1663 retry_policy=self.retry_policy,
1664 get_waiter=get_waiter,
1665 ):
1666 # emit output
1667 yield from output()
1668 # emit output
File /opt/conda/lib/python3.11/site-packages/langgraph/pregel/runner.py:167, in PregelRunner.tick(self, tasks, reraise, timeout, retry_policy, get_waiter)
165 t = tasks[0]
166 try:
--> 167 run_with_retry(
168 t,
169 retry_policy,
170 configurable={
171 CONFIG_KEY_SEND: partial(writer, t),
172 CONFIG_KEY_CALL: partial(call, t),
173 },
174 )
175 self.commit(t, None)
176 except Exception as exc:
File /opt/conda/lib/python3.11/site-packages/langgraph/pregel/retry.py:40, in run_with_retry(task, retry_policy, configurable)
38 task.writes.clear()
39 # run the task
---> 40 return task.proc.invoke(task.input, config)
41 except ParentCommand as exc:
42 ns: str = config[CONF][CONFIG_KEY_CHECKPOINT_NS]
File /opt/conda/lib/python3.11/site-packages/langgraph/utils/runnable.py:408, in RunnableSeq.invoke(self, input, config, **kwargs)
404 config = patch_config(
405 config, callbacks=run_manager.get_child(f"seq:step:{i+1}")
406 )
407 if i == 0:
--> 408 input = step.invoke(input, config, **kwargs)
409 else:
410 input = step.invoke(input, config)
File /opt/conda/lib/python3.11/site-packages/langgraph/utils/runnable.py:176, in RunnableCallable.invoke(self, input, config, **kwargs)
174 context = copy_context()
175 context.run(_set_config_context, child_config)
--> 176 ret = context.run(self.func, input, **kwargs)
177 except BaseException as e:
178 run_manager.on_chain_error(e)
File /opt/conda/lib/python3.11/site-packages/langgraph/prebuilt/chat_agent_executor.py:560, in create_react_agent..call_model(state, config)
558 def call_model(state: AgentState, config: RunnableConfig) -> AgentState:
559 _validate_chat_history(state["messages"])
--> 560 response = model_runnable.invoke(state, config)
561 has_tool_calls = isinstance(response, AIMessage) and response.tool_calls
562 all_tools_return_direct = (
563 all(call["name"] in should_return_direct for call in response.tool_calls)
564 if isinstance(response, AIMessage)
565 else False
566 )
File /opt/conda/lib/python3.11/site-packages/langchain_core/runnables/base.py:3022, in RunnableSequence.invoke(self, input, config, **kwargs)
3020 input = context.run(step.invoke, input, config, **kwargs)
3021 else:
-> 3022 input = context.run(step.invoke, input, config)
3023 # finish the root run
3024 except BaseException as e:
File /opt/conda/lib/python3.11/site-packages/langchain_core/runnables/base.py:4711, in RunnableLambda.invoke(self, input, config, **kwargs)
4697 """Invoke this Runnable synchronously.
4698
4699 Args:
(...)
4708 TypeError: If the Runnable is a coroutine function.
4709 """
4710 if hasattr(self, "func"):
-> 4711 return self._call_with_config(
4712 self._invoke,
4713 input,
4714 self._config(config, self.func),
4715 **kwargs,
4716 )
4717 else:
4718 msg = (
4719 "Cannot invoke a coroutine function synchronously."
4720 "Use ainvoke instead."
4721 )
File /opt/conda/lib/python3.11/site-packages/langchain_core/runnables/base.py:1925, in Runnable._call_with_config(self, func, input, config, run_type, serialized, **kwargs)
1921 context = copy_context()
1922 context.run(_set_config_context, child_config)
1923 output = cast(
1924 Output,
-> 1925 context.run(
1926 call_func_with_variable_args, # type: ignore[arg-type]
1927 func, # type: ignore[arg-type]
1928 input, # type: ignore[arg-type]
1929 config,
1930 run_manager,
1931 **kwargs,
1932 ),
1933 )
1934 except BaseException as e:
1935 run_manager.on_chain_error(e)
File /opt/conda/lib/python3.11/site-packages/langchain_core/runnables/config.py:396, in call_func_with_variable_args(func, input, config, run_manager, **kwargs)
394 if run_manager is not None and accepts_run_manager(func):
395 kwargs["run_manager"] = run_manager
--> 396 return func(input, **kwargs)
File /opt/conda/lib/python3.11/site-packages/langchain_core/runnables/base.py:4565, in RunnableLambda._invoke(self, input, run_manager, config, **kwargs)
4563 output = chunk
4564 else:
-> 4565 output = call_func_with_variable_args(
4566 self.func, input, config, run_manager, **kwargs
4567 )
4568 # If the output is a Runnable, invoke it
4569 if isinstance(output, Runnable):
File /opt/conda/lib/python3.11/site-packages/langchain_core/runnables/config.py:396, in call_func_with_variable_args(func, input, config, run_manager, **kwargs)
394 if run_manager is not None and accepts_run_manager(func):
395 kwargs["run_manager"] = run_manager
--> 396 return func(input, **kwargs)
File /opt/conda/lib/python3.11/site-packages/langchain_core/messages/utils.py:571, in merge_message_runs(messages, chunk_separator)
564 if (
565 isinstance(last_chunk.content, str)
566 and isinstance(curr_chunk.content, str)
567 and last_chunk.content
568 and curr_chunk.content
569 ):
570 last_chunk.content += chunk_separator
--> 571 merged.append(_chunk_to_msg(last_chunk + curr_chunk))
572 return merged
File /opt/conda/lib/python3.11/site-packages/langchain_core/messages/ai.py:395, in AIMessageChunk.add(self, other)
393 def add(self, other: Any) -> BaseMessageChunk: # type: ignore
394 if isinstance(other, AIMessageChunk):
--> 395 return add_ai_message_chunks(self, other)
396 elif isinstance(other, (list, tuple)) and all(
397 isinstance(o, AIMessageChunk) for o in other
398 ):
399 return add_ai_message_chunks(self, *other)
File /opt/conda/lib/python3.11/site-packages/langchain_core/messages/ai.py:412, in add_ai_message_chunks(left, *others)
409 raise ValueError(msg)
411 content = merge_content(left.content, *(o.content for o in others))
--> 412 additional_kwargs = merge_dicts(
413 left.additional_kwargs, *(o.additional_kwargs for o in others)
414 )
415 response_metadata = merge_dicts(
416 left.response_metadata, *(o.response_metadata for o in others)
417 )
419 # Merge tool call chunks
File /opt/conda/lib/python3.11/site-packages/langchain_core/utils/_merge.py:58, in merge_dicts(left, *others)
56 merged[right_k] += right_v
57 elif isinstance(merged[right_k], dict):
---> 58 merged[right_k] = merge_dicts(merged[right_k], right_v)
59 elif isinstance(merged[right_k], list):
60 merged[right_k] = merge_lists(merged[right_k], right_v)
File /opt/conda/lib/python3.11/site-packages/langchain_core/utils/_merge.py:68, in merge_dicts(left, *others)
63 else:
64 msg = (
65 f"Additional kwargs key {right_k} already exists in left dict and "
66 f"value has unsupported type {type(merged[right_k])}."
67 )
---> 68 raise TypeError(msg)
69 return merged
TypeError: Additional kwargs key prompt_tokens already exists in left dict and value has unsupported type .
Ich habe versucht, online nach ähnlichen Problemen zu suchen, konnte aber keine Lösung finden.
Ich erwarte, dass die Bibliothek als Agent 2 funktioniert, da eine andere Instanz davon als Agent 1 einwandfrei funktioniert< /P>
Hallo, ich verwende den vorgefertigten ReAct-Agenten mit „anthropic.claude-3-sonnet“ über ChatBedrock. Ich versuche, den vorgefertigten ReAct-Agenten als Knoten innerhalb eines LangGraph zu verwenden Graph. Das Diagramm zeigt zwei ReAct-basierte Agentenknoten. Beide sind genau identisch, nur mit unterschiedlichen Eingabeaufforderungen. Agent 1 läuft einwandfrei, aber Agent 2 gibt den folgenden Fehler aus. Das Problem scheint im Dienstprogramm _merge.py innerhalb der langchain_core-Bibliothek zu liegen. Ein ähnliches Problem mit Gemini wird in #23827 besprochen Vielen Dank im Voraus für all die Hilfe Unten ist der Fehler-Stacktrace: [code]TypeError Traceback (most recent call last) Cell In[12], line 7 2 userInput = ''' 3 Develop Business Challenges and Opportunities (BCOs) for brand based on its Strategic Imperatives (SIs). 4 ''' 5 message = HumanMessage(content = userInput ) ----> 7 graph.invoke( 8 input = {"messages" : [message], 9 "brand" : 'brand', 10 "primary_competitors" : ["competitor 1", "competitor 2", "competitor 3"], 11 "brand_research" : [], 12 "strategic_imperatives" : ["SI-1", 13 "SI-2", 14 "SI-3", 15 "SI-4", 16 "SI-5", 17 "SI-6'), 18 "stratagic_imperatives_research" : [], 19 "plan" : [], 20 "next_actor" : '', 21 "next_task" : '', 22 "sender" : '', 23 }, 24 config = {"configurable": {"thread_id": "42"}, "recursion_limit": 30} , 25 26 27 ) 28 # ToolMessage
File /opt/conda/lib/python3.11/site-packages/langgraph/pregel/init.py:1660, in Pregel.stream(self, input, config, stream_mode, output_keys, interrupt_before, interrupt_after, debug, subgraphs) 1654 # Similarly to Bulk Synchronous Parallel / Pregel model 1655 # computation proceeds in steps, while there are channel updates 1656 # channel updates from step N are only visible in step N+1 1657 # channels are guaranteed to be immutable for the duration of the step, 1658 # with channel updates applied only at the transition between steps 1659 while loop.tick(input_keys=self.input_channels): -> 1660 for _ in runner.tick( 1661 loop.tasks.values(), 1662 timeout=self.step_timeout, 1663 retry_policy=self.retry_policy, 1664 get_waiter=get_waiter, 1665 ): 1666 # emit output 1667 yield from output() 1668 # emit output
File ~/MUltiAgent_SI_to_BCO/graph/nodes.py:78, in agent_node(state, agent, name) 75 except Exception as e: 76 # Log and raise any exceptions that occur 77 logger.error(f"Error in executing {name} node: {str(e)}") ---> 78 raise e
File ~/MUltiAgent_SI_to_BCO/graph/nodes.py:39, in agent_node(state, agent, name) 36 logger.info(f"executing agent {name}") 38 # Invoke the agent with the current state ---> 39 response = agent.invoke(state) 41 # Extract the content from the response 42 content = response['output'] if isinstance(response, dict) and 'output' in response else response
File /opt/conda/lib/python3.11/site-packages/langgraph/pregel/init.py:1660, in Pregel.stream(self, input, config, stream_mode, output_keys, interrupt_before, interrupt_after, debug, subgraphs) 1654 # Similarly to Bulk Synchronous Parallel / Pregel model 1655 # computation proceeds in steps, while there are channel updates 1656 # channel updates from step N are only visible in step N+1 1657 # channels are guaranteed to be immutable for the duration of the step, 1658 # with channel updates applied only at the transition between steps 1659 while loop.tick(input_keys=self.input_channels): -> 1660 for _ in runner.tick( 1661 loop.tasks.values(), 1662 timeout=self.step_timeout, 1663 retry_policy=self.retry_policy, 1664 get_waiter=get_waiter, 1665 ): 1666 # emit output 1667 yield from output() 1668 # emit output
File /opt/conda/lib/python3.11/site-packages/langchain_core/runnables/config.py:396, in call_func_with_variable_args(func, input, config, run_manager, **kwargs) 394 if run_manager is not None and accepts_run_manager(func): 395 kwargs["run_manager"] = run_manager --> 396 return func(input, **kwargs)
File /opt/conda/lib/python3.11/site-packages/langchain_core/runnables/base.py:4565, in RunnableLambda._invoke(self, input, run_manager, config, **kwargs) 4563 output = chunk 4564 else: -> 4565 output = call_func_with_variable_args( 4566 self.func, input, config, run_manager, **kwargs 4567 ) 4568 # If the output is a Runnable, invoke it 4569 if isinstance(output, Runnable):
File /opt/conda/lib/python3.11/site-packages/langchain_core/runnables/config.py:396, in call_func_with_variable_args(func, input, config, run_manager, **kwargs) 394 if run_manager is not None and accepts_run_manager(func): 395 kwargs["run_manager"] = run_manager --> 396 return func(input, **kwargs)
File /opt/conda/lib/python3.11/site-packages/langchain_core/messages/utils.py:571, in merge_message_runs(messages, chunk_separator) 564 if ( 565 isinstance(last_chunk.content, str) 566 and isinstance(curr_chunk.content, str) 567 and last_chunk.content 568 and curr_chunk.content 569 ): 570 last_chunk.content += chunk_separator --> 571 merged.append(_chunk_to_msg(last_chunk + curr_chunk)) 572 return merged
File /opt/conda/lib/python3.11/site-packages/langchain_core/messages/ai.py:395, in AIMessageChunk.add(self, other) 393 def add(self, other: Any) -> BaseMessageChunk: # type: ignore 394 if isinstance(other, AIMessageChunk): --> 395 return add_ai_message_chunks(self, other) 396 elif isinstance(other, (list, tuple)) and all( 397 isinstance(o, AIMessageChunk) for o in other 398 ): 399 return add_ai_message_chunks(self, *other)
File /opt/conda/lib/python3.11/site-packages/langchain_core/messages/ai.py:412, in add_ai_message_chunks(left, *others) 409 raise ValueError(msg) 411 content = merge_content(left.content, *(o.content for o in others)) --> 412 additional_kwargs = merge_dicts( 413 left.additional_kwargs, *(o.additional_kwargs for o in others) 414 ) 415 response_metadata = merge_dicts( 416 left.response_metadata, *(o.response_metadata for o in others) 417 ) 419 # Merge tool call chunks
File /opt/conda/lib/python3.11/site-packages/langchain_core/utils/_merge.py:68, in merge_dicts(left, *others) 63 else: 64 msg = ( 65 f"Additional kwargs key {right_k} already exists in left dict and " 66 f"value has unsupported type {type(merged[right_k])}." 67 ) ---> 68 raise TypeError(msg) 69 return merged
TypeError: Additional kwargs key prompt_tokens already exists in left dict and value has unsupported type . [/code] Ich habe versucht, online nach ähnlichen Problemen zu suchen, konnte aber keine Lösung finden. Ich erwarte, dass die Bibliothek als Agent 2 funktioniert, da eine andere Instanz davon als Agent 1 einwandfrei funktioniert< /P>
Gibt es eine Problemumgehung für diesen Fehler?
Immer wenn ich versuche, eine Systemaufforderung mit einem Beispiel-JSON oder Text zu geben, der einige Schlüsselwörter hervorhebt, indem ich „Zitat“...
in meinem C++/Pybind11-Projekt habe ich eine C++-Klasse Foo, die in Python in Unterklassen unterteilt werden kann. Wie üblich durchlief ich den üblichen Prozess, eine Trampolinklasse PyFoo und die...
Gibt es in der Methode array.map() eine Möglichkeit, auf die Schlüssel für jedes Schlüssel/Wert-Paar in jedem Index zuzugreifen bzw. diese zu iterieren?
Bitte beachten Sie, dass mir dies bekannt ist...
Heute habe ich ein Pop-up, in dem Spyder auf 6.0.1 aktualisiert werden kann. Wenn ich versuche, es zu aktualisieren, erhalte ich die folgende Fehlermeldung: