$schema: ./schema/mcp-agent.config.schema.json anthropic: null default_search_server: filesystem document_segmentation: enabled: false size_threshold_chars: 50000 execution_engine: asyncio logger: level: info path_settings: path_pattern: logs/mcp-agent-{unique_id}.jsonl timestamp_format: '%Y%m%d_%H%M%S' unique_id: timestamp progress_display: false transports: - console - file mcp: servers: bocha-mcp: args: - tools/bocha_search_server.py command: python3 env: BOCHA_API_KEY: '' PYTHONPATH: . brave: # macos and linux should use this args: - -y - '@modelcontextprotocol/server-brave-search' command: npx # windows should use this # args: # # please use the correct path for your system # - C:/Users/LEGION/AppData/Roaming/npm/node_modules/@modelcontextprotocol/server-brave-search/dist/index.js # command: node env: BRAVE_API_KEY: '' filesystem: # macos and linux should use this # Note: "No valid root directories" warning is harmless - connection still works args: - -y - '@modelcontextprotocol/server-filesystem' - . - ./deepcode_lab command: npx # windows should use this # args: # # please use the correct path for your system # - C:/Users/LEGION/AppData/Roaming/npm/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js # - . # command: node code-implementation: args: - tools/code_implementation_server.py command: python description: Paper code reproduction tool server - provides file operations, code execution, search and other functions env: PYTHONPATH: . code-reference-indexer: args: - tools/code_reference_indexer.py command: python description: Code reference indexer server - Provides intelligent code reference search from indexed repositories env: PYTHONPATH: . command-executor: args: - tools/command_executor.py command: python env: PYTHONPATH: . document-segmentation: args: - tools/document_segmentation_server.py command: python description: Document segmentation server - Provides intelligent document analysis and segmented reading to optimize token usage env: PYTHONPATH: . fetch: args: - mcp-server-fetch command: uvx file-downloader: args: - tools/pdf_downloader.py command: python env: PYTHONPATH: . github-downloader: args: - tools/git_command.py command: python env: PYTHONPATH: . # LLM Provider Priority (选择使用哪个LLM / Choose which LLM to use) # Options: "anthropic", "google", "openai" # If not set or provider unavailable, will fallback to first available provider llm_provider: "google" # 设置为 "google", "anthropic", 或 "openai" openai: base_max_tokens: 40000 # default_model: google/gemini-2.5-pro default_model: anthropic/claude-sonnet-4.5 # default_model: openai/gpt-oss-120b # default_model: deepseek/deepseek-v3.2-exp # default_model: moonshotai/kimi-k2-thinking reasoning_effort: low # Only for thinking models max_tokens_policy: adaptive retry_max_tokens: 32768 # Provider configurations # default_model is used by mcp_agent for planning/analysis phases # implementation_model is used by code_implementation_workflow for code generation google: default_model: "gemini-3-pro-preview" planning_model: "gemini-3-pro-preview" implementation_model: "gemini-2.5-flash" anthropic: default_model: "claude-sonnet-4.5" planning_model: "claude-sonnet-4.5" implementation_model: "claude-sonnet-3.5" openai: default_model: "o3-mini" planning_model: "o3-mini" implementation_model: "gpt-4o" planning_mode: traditional