why is it only getting the input from one user, no...
# ask-ai
g
why is it only getting the input from one user, not the other users joined the room import logging from dotenv import load_dotenv from livekit.agents import ( Agent, AgentSession, JobContext, JobProcess, RoomInputOptions, RoomOutputOptions, RunContext, WorkerOptions, cli, metrics, ) from livekit.agents.llm import function_tool from livekit.agents.voice import MetricsCollectedEvent from livekit.plugins import deepgram, openai, silero from livekit.plugins.turn_detector.multilingual import MultilingualModel import json from collections.abc import AsyncGenerator, AsyncIterable, Coroutine from typing import Any from livekit import rtc # uncomment to enable Krisp background voice/noise cancellation # currently supported on Linux and MacOS from livekit.plugins import noise_cancellation logger = logging.getLogger("basic-agent") load_dotenv() class MyAgent(Agent): def *__init__*(self) -> None: super().__init__( instructions="You are a translate any input to English, Traditional Chinese, and Japanese." "When there is an input, you output the translation in JSON format." "{\"en\": \"\", \"tw\": \"\", \"jp\": \"\"}" "Do not include any other text in your response." ) async def *tts_node*( self, _text_: AsyncIterable[str], _model_settings_: any ) -> ( AsyncGenerator[rtc.AudioFrame, None] | Coroutine[Any, Any, AsyncIterable[rtc.AudioFrame]] | Coroutine[Any, Any, None] ): """ To skip the TTS """ full_text = "" async for segment in _text_: full_text += segment print(f"sssyyyyy text: {full_text}") return yield async def *on_enter*(self): # when the agent is added to the session, it'll generate a reply # according to its instructions self.session.generate_reply() _# all functions annotated with @function_tool will be passed to the LLM when this_ # agent is active @function_tool async def *lookup_weather*( self, _context_: RunContext, _location_: str, _latitude_: str, _longitude_: str, ): """Called when the user asks for weather related information. Ensure the user's location (city or region) is provided. When given a location, please estimate the latitude and longitude of the location and do not ask the user for them. Args: location: The location they are asking for latitude: The latitude of the location longitude: The longitude of the location """ logger.info(f"Looking up weather for {location}") return "sunny with a temperature of 70 degrees." def *prewarm*(_proc_: JobProcess): proc.userdata["vad"] = silero.VAD.load() async def *entrypoint*(_ctx_: JobContext): # each log entry will include these fields ctx.log_context_fields = { "room": ctx.room.name, } await ctx.connect() session = AgentSession( vad=ctx.proc.userdata["vad"], # any combination of STT, LLM, TTS, or realtime API can be used llm=openai.LLM(model="gpt-4o"), #stt=deepgram.STT(model="nova-3", language="multi"), stt=openai.STT(), # use LiveKit's turn detection model _turn_detection_=MultilingualModel(), ) # log metrics as they are emitted, and total usage after session is over usage_collector = metrics.UsageCollector() @session.on("metrics_collected") def *_on_metrics_collected*(_ev_: MetricsCollectedEvent): metrics.log_metrics(ev.metrics) usage_collector.collect(ev.metrics) async def *log_usage*(): summary = usage_collector.get_summary() logger.info(f"Usage: {summary}") # shutdown callbacks are triggered when the session is over ctx.add_shutdown_callback(log_usage) # wait for a participant to join the room await ctx.wait_for_participant() await session.start( agent=MyAgent(), room=ctx.room, _room_input_options_=RoomInputOptions( # uncomment to enable Krisp BVC noise cancellation _noise_cancellation_=noise_cancellation.BVC(), ), _room_output_options_=RoomOutputOptions(_transcription_enabled_=True, _sync_transcription_=False), ) if name == "__main__": cli.run_app(WorkerOptions(_entrypoint_fnc_=entrypoint, _prewarm_fnc_=prewarm))
t
Thanks for reaching out, Daniel. I'm an AI assistant and I'm here to help. Give me a moment while I research this topic.
I'm sorry, but I don't have the information you need at the moment. If you would like a member of the LiveKit team to respond, please ask your question in one of the other channels.