This is my code
import logging
from dotenv import load_dotenv
from livekit.agents import (
Agent,
AgentSession,
JobContext,
JobProcess,
RoomInputOptions,
RoomOutputOptions,
WorkerOptions,
cli,
)
from livekit.agents.llm import function_tool
from livekit.plugins import deepgram, openai, silero
load_dotenv(_dotenv_path_=".env.local")
class FAQAgent(Agent):
def *__init__*(self) -> None:
# Initialize the FAQAgent with instructions for the LLM
super().__init__(
instructions=(
"You are a helpful FAQ assistant for a salon. Answer questions about services, hours, pricing, and policies. "
"If you don't know something or are unsure, call the escalate_to_supervisor function. "
"Keep responses brief and friendly."
)
)
async def *on_enter*(self):
# Called when the agent is added to the session.
# Generates an initial greeting or response according to the agent's instructions.
print("Agent entered the room")
self.session.generate_reply()
@function_tool
async def *escalate_to_supervisor*(
self,
_customer_question_: str,
_customer_id_: str = "unknown"
):
"""
Called when the AI doesn't know how to answer a question - so basically anything besides pricing, hours, etc.
This function simulates escalating the request to a human supervisor.
"""
# Create help request (structure matches your simplified DB schema)
help_request = {
"question": _customer_question_,
"status": "pending",
"supervisor_response": None
}
# Simulate texting supervisor (prints to console for now)
print(f"📱 SUPERVISOR ALERT: Need help with '{_customer_question_}'")
return "I don't have that information right now. Let me check with my supervisor and get back to you."
_# @function_tool_
_# async def handle_question(self, user_question: str):_
# # 1. Search SQLite and/or Pinecone for an answer
_# answer, confidence = search_knowledge_base(user_question)_
# if answer and confidence > 0.5:
# return answer
# else:
# # 2. Escalate if not found or not confident
_# return await self.escalate_to_supervisor(user_question)_
def *prewarm*(_proc_: JobProcess):
"""
Preloads the Silero Voice Activity Detection (VAD) model into the process's userdata.
This makes VAD available for all sessions without reloading it each time.
"""
proc.userdata["vad"] = silero.VAD.load()
async def *entrypoint*(_ctx_: JobContext):
"""
Main entrypoint for the agent worker. Connects to LiveKit, sets up the agent session,
and starts the FAQAgent in the specified room.
"""
await ctx.connect()
session = AgentSession(
vad=ctx.proc.userdata["vad"],
llm=openai.LLM(model="gpt-4o-mini"),
stt=deepgram.STT(model="nova-3"),
tts=openai.TTS(voice="ash"),
)
await session.start(
agent=FAQAgent(),
room=ctx.room,
_room_input_options_=RoomInputOptions(),
_room_output_options_=RoomOutputOptions(_transcription_enabled_=True),
)
if name == "__main__":
# Starts the agent worker using LiveKit's CLI runner, specifying the entrypoint and prewarm functions.
cli.run_app(WorkerOptions(_entrypoint_fnc_=entrypoint, _prewarm_fnc_=prewarm))