# for OpenAI LLM # change key and model to your desired one JOINLY_LLM_MODEL=gpt-4o JOINLY_LLM_PROVIDER=openai OPENAI_API_KEY=your-openai-api-key # for Anthropic LLM # change key and model to your desired one JOINLY_LLM_MODEL=claude-3-5-haiku-latest JOINLY_LLM_PROVIDER=anthropic ANTHROPIC_API_KEY=your-anthropic-api-key # for Google LLM # change key and model to your desired one JOINLY_MODEL_NAME=gemini-2.5-flash-lite JOINLY_MODEL_PROVIDER=google GOOGLE_API_KEY=your-google-api-key # for Azure OpenAI LLM # change key and model to your desired one (and version) JOINLY_LLM_MODEL=gpt-4o JOINLY_LLM_PROVIDER=azure AZURE_OPENAI_API_KEY=your-azure-openai-api-key AZURE_OPENAI_ENDPOINT=https://your-azure-openai-endpoint.openai.azure.com/ OPENAI_API_VERSION=2024-12-01-preview # for Ollama LLM (requires ollama pull ) # Note: only works when using the client_example.py, or another script outside of the docker, # for direct usage with --client, you would need additional setup # note that small models often fail to correctly call the necessary tools, so try with caution # change to your desired model with tool calling JOINLY_LLM_MODEL=smollm2:1.7b JOINLY_LLM_PROVIDER=ollama # for Ollama at a different host, set following variables to your desired host and port: # OLLAMA_HOST=127.0.0.1 # OLLAMA_PORT=11434 # Deepgram Text-to-Speech/Transcription, set args "--tts deepgram" or/and "--stt deepgram" DEEPGRAM_API_KEY=your-deepgram-api-key # ElevenLabs Text-to-Speech, set arg "--tts elevenlabs" ELEVENLABS_API_KEY=your-elevenlabs-api-key # For the example client (tavily web search) TAVILY_API_KEY=your-tavily-api-key