async def *on_enter*(self): # Called when ...
# ask-ai
b
async def *on_enter*(self): # Called when the agent is added to the session. # Generates an initial greeting or response according to the agent's instructions. self.session.generate_reply() If I have this code, how can I see the LLM output in console, generate_reply abstracts that away by sending it to TTS
t
Thanks for reaching out, Neel. I'm an AI assistant and I'm here to help. Give me a moment while I research this topic.
Could you provide more details about the environment or framework you are using? For example, are you working within a specific SDK or library provided by LiveKit, or is this a custom implementation? This will help in determining how to intercept or log the LLM output.
b
This is my code import logging from dotenv import load_dotenv from livekit.agents import ( Agent, AgentSession, JobContext, JobProcess, RoomInputOptions, RoomOutputOptions, WorkerOptions, cli, ) from livekit.agents.llm import function_tool from livekit.plugins import deepgram, openai, silero load_dotenv(_dotenv_path_=".env.local") class FAQAgent(Agent): def *__init__*(self) -> None: # Initialize the FAQAgent with instructions for the LLM super().__init__( instructions=( "You are a helpful FAQ assistant for a salon. Answer questions about services, hours, pricing, and policies. " "If you don't know something or are unsure, call the escalate_to_supervisor function. " "Keep responses brief and friendly." ) ) async def *on_enter*(self): # Called when the agent is added to the session. # Generates an initial greeting or response according to the agent's instructions. print("Agent entered the room") self.session.generate_reply() @function_tool async def *escalate_to_supervisor*( self, _customer_question_: str, _customer_id_: str = "unknown" ): """ Called when the AI doesn't know how to answer a question - so basically anything besides pricing, hours, etc. This function simulates escalating the request to a human supervisor. """ # Create help request (structure matches your simplified DB schema) help_request = { "question": _customer_question_, "status": "pending", "supervisor_response": None } # Simulate texting supervisor (prints to console for now) print(f"📱 SUPERVISOR ALERT: Need help with '{_customer_question_}'") return "I don't have that information right now. Let me check with my supervisor and get back to you." _# @function_tool_ _# async def handle_question(self, user_question: str):_ # # 1. Search SQLite and/or Pinecone for an answer _# answer, confidence = search_knowledge_base(user_question)_ # if answer and confidence > 0.5: # return answer # else: # # 2. Escalate if not found or not confident _# return await self.escalate_to_supervisor(user_question)_ def *prewarm*(_proc_: JobProcess): """ Preloads the Silero Voice Activity Detection (VAD) model into the process's userdata. This makes VAD available for all sessions without reloading it each time. """ proc.userdata["vad"] = silero.VAD.load() async def *entrypoint*(_ctx_: JobContext): """ Main entrypoint for the agent worker. Connects to LiveKit, sets up the agent session, and starts the FAQAgent in the specified room. """ await ctx.connect() session = AgentSession( vad=ctx.proc.userdata["vad"], llm=openai.LLM(model="gpt-4o-mini"), stt=deepgram.STT(model="nova-3"), tts=openai.TTS(voice="ash"), ) await session.start( agent=FAQAgent(), room=ctx.room, _room_input_options_=RoomInputOptions(), _room_output_options_=RoomOutputOptions(_transcription_enabled_=True), ) if name == "__main__": # Starts the agent worker using LiveKit's CLI runner, specifying the entrypoint and prewarm functions. cli.run_app(WorkerOptions(_entrypoint_fnc_=entrypoint, _prewarm_fnc_=prewarm))
t
Hey there! Want to peek into the LLM output using LiveKit's
AgentSession
? No worries, I've got a super straightforward way to help you track metrics and usage info. It's easier than you might think! 😊 1. First, let's create a
UsageCollector
to gather those metrics:
usage_collector = metrics.UsageCollector()
2. Next, we'll set up a friendly event handler to collect metrics:
Copy code
@session.on("metrics_collected")
def _on_metrics_collected(ev: MetricsCollectedEvent):
    metrics.log_metrics(ev.metrics)
    usage_collector.collect(ev.metrics)
3. Now, let's create a simple function to log the usage summary:
Copy code
async def log_usage():
    summary = usage_collector.get_summary()
    <http://logger.info|logger.info>(f"Usage: {summary}")
4. Lastly, we'll add this function as a shutdown callback:
ctx.add_shutdown_callback(log_usage)
With this setup, you'll effortlessly log metrics during the session and get a neat summary of total usage when it's all done. Pretty cool, right? 🚀 Let me know if you need any help implementing this! Sources: GitHub::livekit::agents | examples/voice_agents/basic_agent.py