wooden-dream-294
08/19/2025, 11:21 AMBVCTelephony
for best results
noise_cancellation=noise_cancellation.BVC(),
),
)
await session.generate_reply(
instructions=“Greet users by saying: Hi, I’m your AI progress assistant. Do you have five minutes to complete your progress intake?”
)
if name == “__main__“:
agents.cli.run_app(agents.WorkerOptions(entrypoint_fnc=entrypoint))tall-airline-16756
08/19/2025, 11:21 AMtall-airline-16756
08/19/2025, 11:21 AMwooden-dream-294
08/19/2025, 11:21 AMBVCTelephony
for best results
noise_cancellation=noise_cancellation.BVC(),
),
)
await session.generate_reply(
instructions=“Greet users by saying: Hi, I’m your AI progress assistant. Do you have five minutes to complete your progress intake?”
)
if name == “__main__“:
agents.cli.run_app(agents.WorkerOptions(entrypoint_fnc=entrypoint))tall-airline-16756
08/19/2025, 11:22 AMsession = AgentSession(
llm=openai.LLM(model="gpt-4o-mini"),
# No STT, TTS, or VAD needed for text-only
)
await session.start(
agent=Assistant(),
room=ctx.room,
room_input_options=RoomInputOptions(
text_enabled=True, # Enable text input
audio_enabled=False, # Disable audio input
video_enabled=False # Disable video input
),
room_output_options=RoomOutputOptions(
transcription_enabled=True, # Enable text output
audio_enabled=False # Disable audio output
)
)
2. Key Configuration Details:
• Input Options: Just set audio_enabled=False
and video_enabled=False
inside RoomInputOptions
.
• Output Options: Make sure audio_enabled=False
and transcription_enabled=True
are set in RoomOutputOptions
.
This way, you’ll have only text input and output active, and all audio and video features will be turned off. If you want me to walk you through anything else, just let me know—I’m here to help!
Sources: examples/other/transcription/translator.py | Text and transcriptions | LiveKit Docs | Text and transcriptions | LiveKit Docswooden-dream-294
08/19/2025, 12:38 PMtall-airline-16756
08/19/2025, 12:39 PMimport logging
from dotenv import load_dotenv
from livekit.agents import (
Agent,
AgentSession,
JobContext,
RoomInputOptions,
RoomOutputOptions,
WorkerOptions,
cli,
)
from livekit.plugins import openai
load_dotenv()
class MyAgent(Agent):
def __init__(self) -> None:
super().__init__(
instructions="You are a helpful assistant.",
)
async def entrypoint(ctx: JobContext):
session = AgentSession(
llm=openai.LLM(model="gpt-4o-mini"),
# No TTS or STT needed for text-only
)
await session.start(
agent=MyAgent(),
room=ctx.room,
room_input_options=RoomInputOptions(text_enabled=True, audio_enabled=False),
room_output_options=RoomOutputOptions(transcription_enabled=True, audio_enabled=False),
)
if __name__ == "__main__":
cli.run_app(WorkerOptions(entrypoint_fnc=entrypoint))
2. Run in Console Mode:
python agent.py console
3. Console Features:
• Just type your messages and hit Enter to chat with the agent.
• Use [Ctrl+B]
anytime to switch between Text and Audio modes.
• Press [Q]
whenever you want to exit the console session.
And that’s it! This setup makes it super easy to chat with your agent locally in text-only mode. If you run into any questions or want a hand with anything else, just give me a shout—I’m here to help!
Sources: examples/other/chat-stream-receiver.py | Voice AI quickstart | LiveKit Docs | Text and transcriptions | LiveKit Docswooden-dream-294
08/19/2025, 12:57 PMtall-airline-16756
08/19/2025, 12:58 PM