async def llm_node(self, chat_ctx: llm.ChatContext, tools: list[FunctionTool], model_settings: ModelSettings) -> AsyncIterable[llm.ChatChunk]:
print("\n\nCHAT CONTEXT INSIDE THE LLM_NODE:",chat_ctx.items,"\n\n")
LLM_TIMEOUT_SECONDS = 30
#current_stage_id = self.flow_state.current_stage_id # Get the current stage ID
# Check for a pending transition and get the new instructions
#next_stage_id = await self._call_small_llm_for_transition(self.flow_state.transition_rules_prompt, chat_ctx.items[-1].text_content)
# if self.flow_config["stages"][self.current_stage_id]["type"] =="end_c"
# if self.flow_state.next_stage_id and self.flow_state.next_stage_id != "''" and self.flow_state.next_stage_id != self.flow_state.current_stage_id and self.flow_config["stages"][self.current_stage_id]["type"] != "api":
# chunk = ''
# yield chunk
# return
if self.flow_config["stages"][self.current_stage_id]["type"] == "api":
# llm.Tool
tool_call_instruction = llm.tool_context(
name="pre_api_call_tool",
args={},
id=str(uuid.uuid4())
)
chunk = llm.ChatChunk(delta=llm.ChoiceDelta(tool_calls=[tool_call_instruction]))
yield chunk
# # Yield a tool call chunk to the session
# yield llm.ChatChunk(
# id=str(uuid.uuid4()),
# delta=llm.ChoiceDelta(
# content="", # No content to speak
# tool_calls=[tool_call_instruction]
# ),
# # finish_reason=llm.ChoiceFinishReason.TOOL_CALLS
# # llm.
# )
return
if self.flow_state.next_stage_id and self.flow_state.next_stage_id != "''" and self.flow_state.next_stage_id != self.flow_state.current_stage_id and self.flow_config["stages"][self.current_stage_id]["type"] != "question" :
# if next_stage_id and next_stage_id != "''" and next_stage_id != current_stage_id:
# print(f"Transition detected from inside llm_node to {next_stage_id}")
# Manually perform the transition logic to update state
# if self.flow_config["stages"][self.current_stage_id]["type"] =="endcall":
# await self.on_enter()
# return
# Get the new instructions for the next stage
# new_stage_data = self.flow_config["stages"][next_stage_id]
#new_instructions = create_intent_instructions(new_stage_data, self.flow_config, self.flow_state, next_stage_id, self.job_context)
# Use a small LLM to generate the reply for the new stage
print("chat_ctx.items:",chat_ctx.items[-1].text_content)
reply = await self._call_small_llm_for_question(chat_ctx.items[-1].text_content,self.effective_llm_instruction)
# Yield a single chunk with the full reply and return
yield llm.ChatChunk(id=str(uuid.uuid4()), delta=llm.ChoiceDelta(content=reply))
return
# If no transition is needed, proceed with the default LLM behavior
print("here we do not transitioned so we generate regular reply in llm_node")
try:
llm_response_generator = Agent.default.llm_node(self, chat_ctx, tools, model_settings).__aiter__()
while True:
try:
# ... (rest of your existing llm_node logic for chunk processing and timeout)
chunk = await asyncio.wait_for(llm_response_generator.__anext__(), timeout=LLM_TIMEOUT_SECONDS)
print("llmchunk",chunk)
yield chunk
except StopAsyncIteration:
break
except Exception as e:
print(f"CRITICAL ERROR starting LLM: {e}")
# ... (rest of your existing error handling)
i have used the above code but getting the below error
2025-08-11 155632,998 - ERROR livekit.agents - Error in _llm_inference_task
Traceback (most recent call last):
File "C:\HARSHITA\INTERNSHIPS\pgv\livekit-latest - Copy\.venv1\Lib\site-packages\livekit\agents\utils\log.py", line 16, in async_fn_logs
return await fn(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\HARSHITA\INTERNSHIPS\pgv\livekit-latest - Copy\.venv1\Lib\site-packages\opentelemetry\util\_decorator.py", line 71, in async_wrapper
return await func(*args, **kwargs) # type: ignore
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\HARSHITA\INTERNSHIPS\pgv\livekit-latest - Copy\.venv1\Lib\site-packages\livekit\agents\voice\generation.py", line 124, in _llm_inference_task
async for chunk in llm_node:
...<29 lines>...
)
File "C:\HARSHITA\INTERNSHIPS\pgv\livekit-latest - Copy\workflow-script-v2-2.py", line 1227, in llm_node
tool_call_instruction = llm.ToolChoice(
name="pre_api_call_tool",
args={},
id=str(uuid.uuid4())
)
File "C:\Python313\Lib\typing.py", line 1317, in call
result = self.__origin__(*args, **kwargs)
File "C:\Python313\Lib\typing.py", line 560, in call
raise TypeError(f"Cannot instantiate {self!r}")
TypeError: Cannot instantiate typing.Union
🎤 CAPTURED USER's last message: 'Yes,'