# Octomind Configuration File
# This file contains all configurable settings for Octomind.
# All values shown here are the defaults - you can customize any of them.
#
# 💡 Tips:
# • View current config: octomind config --show
# • Validate config: octomind config --validate
# Configuration version (DO NOT MODIFY - used for automatic upgrades)
version = 1
# ═══════════════════════════════════════════════════════════════════════════════
# SYSTEM-WIDE SETTINGS
# These settings apply globally across all roles and commands
# ═══════════════════════════════════════════════════════════════════════════════
# Log level for system messages (none, info, debug)
# • none: No logging output (cleanest experience)
# • info: Show important operations and status messages
# • debug: Show detailed debugging information
log_level = "info"
# Default model for all operations (provider:model format)
# Examples: "openrouter:anthropic/claude-3.5-sonnet", "openai:gpt-4o", "deepseek:deepseek-chat"
model = "openrouter:anthropic/claude-sonnet-4"
# Default tag used when no TAG is passed to `octomind run` (and acp/server).
# Can be a role name (e.g. "developer") or a tap agent (e.g. "octomind:assistant").
# The built-in tap muvon/octomind-tap ships ready-to-use agents like octomind:assistant.
default = "assistant:general"
# Default max_tokens for all operations
max_tokens = 16384
# Custom instructions file name (relative to project root)
# This file will be automatically loaded as a user message in new sessions
# Set to empty string to disable: custom_instructions_file_name = ""
custom_instructions_file_name = "INSTRUCTIONS.md"
# Custom constraints file name (relative to project root)
# This file content will be automatically appended to EACH user request in ... tags
# Set to empty string to disable: custom_constraints_file_name = ""
custom_constraints_file_name = "CONSTRAINTS.md"
# Sandbox mode: restrict all filesystem writes to the current working directory
# When enabled, the process and all child processes (shell, MCP servers) cannot
# write outside the directory where octomind was launched.
# Supported on Linux (Landlock, kernel 5.13+) and macOS (Seatbelt).
# Can also be enabled at runtime with --sandbox CLI flag.
sandbox = false
# ═══════════════════════════════════════════════════════════════════════════════
# PERFORMANCE & LIMITS
# Configure thresholds and performance-related settings
# ═══════════════════════════════════════════════════════════════════════════════
# Warn when MCP tool responses exceed this token count (0 = disable warnings)
mcp_response_warning_threshold = 0
# Global token limit for ALL MCP tool responses (0 = unlimited)
# When exceeded, responses are automatically truncated with notice
mcp_response_tokens_threshold = 20000
# Maximum tokens per session before truncation kicks in (0 = disabled, >0 = enabled)
max_session_tokens_threshold = 200000
# Cache responses when they exceed this token count (0 = no caching)
cache_tokens_threshold = 2048
# How long to keep cached responses (in seconds)
cache_timeout_seconds = 240
# Wether to use long system cache (longer cache lifetime)
use_long_system_cache = true
# Maximum number of retries for API calls (can be overridden by --max-retries CLI flag)
max_retries = 1
# Base timeout for exponential backoff retry logic (config-only, no CLI override)
retry_timeout = 30
# Per-request HTTP timeout in seconds — hard limit on how long a single
# HTTP call to the LLM provider may take before being aborted.
# 0 = no timeout (LLM responses can take minutes for long generations).
# Applied at the reqwest RequestBuilder level; retry/backoff still applies on timeout.
request_timeout_seconds = 300
# ═══════════════════════════════════════════════════════════════════════════════
# USER INTERFACE
# Configure how Octomind displays information
# ═══════════════════════════════════════════════════════════════════════════════
# Enable markdown rendering for AI responses (makes output prettier)
enable_markdown_rendering = true
# Markdown theme for styling (default, dark, light, ocean, solarized, monokai)
# Use 'octomind config --list-themes' to see all available themes
markdown_theme = "default"
# Session spending threshold in USD (0.0 = no limit)
# When exceeded, Octomind will prompt before continuing
max_session_spending_threshold = 0.0
# Request spending threshold in USD (0.0 = no limit)
# When exceeded during a single request processing, Octomind will stop execution
# This tracks cost from user input until next user input or session end
max_request_spending_threshold = 0.0
# Capability provider overrides for tap agents
# Each key is a capability name, value is the provider to use instead of "default"
# Example: codesearch = "octocode" → uses capabilities/codesearch/octocode.toml
# Leave empty to use default providers for all capabilities
[capabilities]
# Tap model overrides for specific agents
# Each key is a tap agent tag (e.g., "developer:general"), value is the model to use
# This allows setting a preferred model for specific tap agents.
# Priority: CLI --model > taps override > role.model > config.model
# Example:
# [taps]
# "developer:general" = "ollama:glm-5"
# "octomind:assistant" = "openai:gpt-4o"
# Leave empty to use default model for all tap agents
[taps]
# ═══════════════════════════════════════════════════════════════════════════════
# API KEYS AND AUTHENTICATION
# All API keys are read from environment variables for security
# Set these environment variables before running Octomind:
# • OPENROUTER_API_KEY - for OpenRouter (https://openrouter.ai/)
# • OPENAI_API_KEY - for OpenAI (https://platform.openai.com/)
# • ANTHROPIC_API_KEY - for Anthropic (https://console.anthropic.com/)
# • DEEPSEEK_API_KEY - for DeepSeek (https://platform.deepseek.com/)
# • GOOGLE_APPLICATION_CREDENTIALS - path to Google Cloud credentials JSON
# • AWS_ACCESS_KEY_ID - for Amazon Bedrock
# • CLOUDFLARE_API_TOKEN - for Cloudflare Workers AI
# ═══════════════════════════════════════════════════════════════════════════════
# ROLE CONFIGURATIONS
# Define custom roles here to override or extend tap-provided agents.
# Roles defined here take precedence over tap manifests with the same name.
#
# The built-in tap (muvon/octomind-tap) already ships production-ready roles:
# https://github.com/muvon/octomind-tap
#
# To use a tap agent directly, just run:
# octomind run octomind:assistant
# octomind run octomind:developer
#
# Define a custom role below only when you need project-specific behaviour.
# ═══════════════════════════════════════════════════════════════════════════════
# Example: override the developer role with your own system prompt
[[roles]]
name = "assistant"
temperature = 0.3
top_p = 0.7
top_k = 20
system = """
You are helpful and knowledgeable assistant.
Working directory: {{CWD}}
"""
welcome = "Hello! Ready to code. Working in {{CWD}} (Role: {{ROLE}})"
[roles.mcp]
server_refs = ["core", "filesystem", "agent"]
allowed_tools = ["core:*", "filesystem:*", "agent:*"]
# Task refiner role - lightweight query refinement
[[roles]]
name = "task_refiner"
model = "openrouter:openai/gpt-4.1-mini"
temperature = 0.3
top_p = 0.7
top_k = 20
system = """
You are a simple query processor for the Octomind system. Your ONLY job is basic query refinement and initial file guessing.
Your task:
1. Take the user's request and make it slightly clearer/more structured
2. If you can guess which files might be relevant, mention them (but don't be confident if unsure)
3. Keep it simple - you're just doing basic cleanup and first-pass file guessing
Rules:
- If the request is already clear → return it unchanged or with minimal improvements
- If you don't understand → return the ORIGINAL request exactly as given
- NEVER add complex analysis, edge cases, or detailed requirements
- NEVER suggest solutions or implementation approaches
- Just do basic query structuring and maybe guess some files to look at first
- Keep your response short and focused
Example:
User: "fix the login bug"
You: "Fix the login bug. Likely files to check first: src/auth/, src/login.rs, or authentication-related modules."
{{CONTEXT}}
RESPOND ONLY WITH REFIEND ORIGINAL TASK
If you DO NOT UNDERSTAND or LACK INFORMATION → respond with the ORIGINAL task exactly as given
CRITICAL: RESPOND WITHOUT ANY QUESTIONS IN THE WAY YOU JUST GET USER REQUEST AND PROPERLY REFINED IT AND RESPODED ONLY WITH REFINED TASK
"""
welcome = ""
[roles.mcp]
server_refs = []
allowed_tools = []
# Task researcher role - context gathering with tool access
[[roles]]
name = "task_researcher"
model = "openrouter:google/gemini-2.5-flash-preview"
temperature = 0.3
top_p = 0.7
top_k = 20
system = """
You are a research assistant and information gatherer for development tasks. Think of yourself as a junior developer doing reconnaissance work.
Your role: Gather the most important information and starting points needed for the task, then present findings clearly.
CRITICAL: Use parallel tool execution whenever possible - call multiple tools simultaneously in the same block for maximum efficiency.
Research Process:
1. **Parallel Discovery**: Execute remember() + semantic_search() + view() simultaneously when gathering initial information
2. **Focused Examination**: Use text_editor with specific line ranges or view_signatures instead of reading entire files
3. **Smart Reading**: Only read full files when absolutely necessary - prefer signatures, key sections, or targeted ranges
Research Strategy:
- **Check past knowledge**: remember() for similar tasks and patterns
- **Find code semantically**: semantic_search() for related functions, patterns, documentation
- **Discover file structure**: view() to understand project layout
- **Examine selectively**: text_editor with ranges, or view_signatures for overview
- **Stay focused**: Get starting points and context, don't dive deep into implementations
What to gather (focused, not exhaustive):
- Past experiences and solutions from memory
- Key code entry points and interfaces via semantic search
- Relevant file locations and their general purposes
- Important function/struct signatures (not full implementations)
- Configuration touchpoints that might be relevant
Present your findings as:
- **Past Experience**: Relevant memories or previous similar work
- **Starting Points**: Key files/functions/areas to begin investigation
- **Code Context**: Important interfaces and patterns found (signatures level)
- **Refined Task**: Based on findings, clarify what needs to be done and where to start looking
IMPORTANT:
- Execute tools in parallel whenever possible for efficiency
- Read selectively - get context and starting points, not full implementations
- You're providing reconnaissance, NOT doing the actual development work
- Focus on "where to look" rather than "how to implement"
{{SYSTEM}}
{{CONTEXT}}"""
welcome = ""
[roles.mcp]
server_refs = ["filesystem"]
allowed_tools = ["view"]
# Reduce role - session history compression
[[roles]]
name = "reduce"
model = "openrouter:openai/o4-mini"
temperature = 0.3
top_p = 0.7
top_k = 20
system = """You are a Session History Reducer for Octomind. Your role is to create a CONCISE historical record that preserves CRITICAL architectural information and all file references for future sessions.
**CRITICAL PRESERVATION STRATEGY:**
Create a compressed history that captures ESSENTIAL architectural knowledge and file references that may need to be revisited.
**WHAT TO PRESERVE (MANDATORY):**
- **ALL File References**: Every file that was read, examined, or modified with specific reasons
- **Core Architecture Changes**: Any structural modifications, new patterns introduced, or system design decisions
- **Key Technical Names**: All function names, class names, struct names, constants, and identifiers discovered/used
- **Important Dependencies**: How components connect and interact with each other
- **Critical Design Decisions**: Technical choices that affect future development
- **Implementation Patterns**: Architectural patterns found or established
**ARCHITECTURAL FOCUS:**
- **System Structure**: How components fit together
- **Data Flow**: How information moves through the system
- **Key Interfaces**: Important APIs, traits, and contracts
- **Configuration Changes**: Any config modifications that affect system behavior
- **Integration Points**: How different modules/layers/components connect
**WHAT TO REMOVE:**
- Verbose explanations and lengthy reasoning
- Detailed code implementations (keep signatures/interfaces only)
- Step-by-step procedural descriptions
- Redundant information and duplicate explanations
**OUTPUT FORMAT:**
```
## Task Completed: [Brief architectural task description]
**Files Read/Modified/Examined:**
- `path/to/core/file.rs` - [WHY this file was important - architecture reason]
- `config/system.toml` - [configuration changes made]
- `src/module/interface.rs` - [interface discovery/modification]
**Core Architecture Elements:**
- **Structures**: `CoreStruct`, `SystemConfig`, `InterfaceHandler`
- **Functions/Methods**: `process_pipeline()`, `handle_request()`, `configure_system()`
- **Traits/Interfaces**: `ProcessorTrait`, `ConfigurableInterface`
- **Constants/Config**: `DEFAULT_TIMEOUT`, `MAX_CONNECTIONS`, `SYSTEM_VERSION`
**Architectural Changes/Discoveries:**
[Key structural changes, new patterns, or important system design elements discovered]
**Component Relationships:**
[How different parts connect, data flows, dependencies between modules]
**Critical Context for Future:**
[Essential information that might be needed if we work on related features or revisit these files]
```
**CRITICAL RULES:**
- NEVER omit file paths - future sessions may need to re-examine these files
- Preserve ALL architectural insights and structural understanding
- Keep component relationship information for system understanding
- Focus on information that helps understand the codebase structure
- Create a reference that prevents re-reading files unnecessarily
This architectural history will be essential for future development sessions.
{{CONTEXT}}"""
welcome = ""
[roles.mcp]
server_refs = []
allowed_tools = []
# ═══════════════════════════════════════════════════════════════════════════════
# MCP (MODEL CONTEXT PROTOCOL) SERVERS
# Configure external MCP servers and tools
# Built-in servers are defined here for transparency and easy customization
# ═══════════════════════════════════════════════════════════════════════════════
[mcp]
# Global tool restrictions (empty = no restrictions)
allowed_tools = []
# Built-in MCP servers (always available)
[[mcp.servers]]
name = "core"
type = "builtin"
timeout_seconds = 30
tools = []
[[mcp.servers]]
name = "agent"
type = "builtin"
timeout_seconds = 30
tools = []
# HTTP MCP server configuration:
# - Just provide a URL (localhost or remote - doesn't matter)
# - OAuth authentication is handled automatically via MCP Authorization Discovery (RFC 9728)
# - No manual OAuth configuration needed — just provide the URL
# - Use Stdin type for local processes instead
# [[mcp.servers]]
# name = "my_http_server"
# type = "http"
# url = "http://localhost:3000/mcp" # URL to connect to
# timeout_seconds = 30
# tools = []
# [[mcp.servers]]
# name = "octocode"
# type = "stdio"
# command = "octocode"
# args = ["mcp", "--path=."]
# timeout_seconds = 240
# tools = []
# ═══════════════════════════════════════════════════════════════════════════════
# WEBHOOK HOOKS
# Configure HTTP webhook listeners that pipe payloads through scripts
# and inject the output into the session as user messages.
# Activate hooks with: octomind run --hook --daemon --format jsonl
#
# Script interface:
# stdin: raw HTTP body
# stdout: message to inject into session (if exit code 0)
# stderr: error info (logged on non-zero exit)
# env: HOOK_NAME, HOOK_METHOD, HOOK_PATH, HOOK_QUERY,
# HOOK_CONTENT_TYPE, HOOK_SESSION, HOOK_HEADER_*
# ═══════════════════════════════════════════════════════════════════════════════
# Example: GitHub push webhook
# [[hooks]]
# name = "github-push"
# bind = "0.0.0.0:9876"
# script = "/path/to/process-github-push.sh"
# timeout = 30 # seconds (default: 30)
# ═══════════════════════════════════════════════════════════════════════════════
# WORKFLOWS (BRAIN-INSPIRED PLANNING SYSTEM)
# Configure multi-step AI processing workflows with validation and feedback loops
# Workflows act as PLANNERS that enhance user requests before execution
# See doc/10-workflows.md for comprehensive documentation
# ═══════════════════════════════════════════════════════════════════════════════
# Example workflow: Developer workflow with task refinement and research
# This workflow is active by default and serves as a working example
[[workflows]]
name = "developer_workflow"
description = "Two-stage workflow: refine task, then research context"
[[workflows.steps]]
name = "refine"
type = "once"
layer = "task_refiner"
[[workflows.steps]]
name = "research"
type = "once"
layer = "task_researcher"
# Example workflow: Feedback loop with validation
# [[workflows]]
# name = "validated_development"
# description = "Development with validation feedback loop"
#
# [[workflows.steps]]
# name = "validation_loop"
# type = "loop"
# max_iterations = 3
# exit_pattern = "APPROVED"
#
# [[workflows.steps.substeps]]
# name = "propose"
# type = "once"
# layer = "task_refiner"
#
# [[workflows.steps.substeps]]
# name = "validate"
# type = "once"
# layer = "task_researcher"
# ═══════════════════════════════════════════════════════════════════════════════
# LAYERS (ACP-BASED PROCESSING PIPELINE)
# Layers are workflow building blocks that execute via ACP protocol.
# Each layer references a role (or external ACP client) via the command field.
# Model, system prompt, temperature, and MCP config live in the role — not here.
# ═══════════════════════════════════════════════════════════════════════════════
# Task refiner layer - used by workflows for query refinement
[[layers]]
name = "task_refiner"
description = "Refines and clarifies user requests for better processing by subsequent layers"
command = "octomind acp task_refiner"
input_mode = "last"
output_mode = "none"
output_role = "assistant"
# Task researcher layer - used by workflows for context gathering
[[layers]]
name = "task_researcher"
description = "Gathers information and context needed for development tasks through code analysis and research"
command = "octomind acp task_researcher"
input_mode = "last"
output_mode = "append"
output_role = "assistant"
# ═══════════════════════════════════════════════════════════════════════════════
# CUSTOM COMMANDS
# Define custom commands that can be triggered with /run
# NOTE: 'description' field is now REQUIRED for all commands (shown in help text)
# ═══════════════════════════════════════════════════════════════════════════════
[[commands]]
name = "reduce"
description = "Compress session history for cost optimization during ongoing work"
command = "octomind acp reduce"
input_mode = "all"
output_mode = "replace"
output_role = "assistant"
# ═══════════════════════════════════════════════════════════════════════════════
# ADVANCED CONFIGURATION
# These sections are for advanced users and custom setups
# Most users won't need to modify these
# ═══════════════════════════════════════════════════════════════════════════════
# Example custom layer (requires matching role in [[roles]]):
# [[layers]]
# name = "analysis"
# description = "Performs detailed analysis of code, systems, or requirements"
# command = "octomind acp analysis"
# input_mode = "last"
# output_mode = "append"
# output_role = "assistant"
# Example custom command (requires matching role in [[roles]]):
# [[commands]]
# name = "estimate"
# description = "Provides project estimation and time analysis for development tasks"
# command = "octomind acp estimate"
# input_mode = "last"
# output_mode = "none"
# output_role = "assistant"
# Global system prompt override (uncomment to set a global default)
# system = "You are Octomind, an intelligent AI assistant."
# ═══════════════════════════════════════════════════════════════════════════════
# AGENT CONFIGURATIONS
# Define AI agents using the same layer configuration as commands
# Each agent becomes a separate MCP tool (e.g., agent_code_reviewer, agent_debugger)
# NOTE: 'description' field is now REQUIRED for all agents (used as function description in MCP)
# The output_mode controls what the agent tool returns:
# - "none": Returns only the final layer output (cleanest for tool use)
# - "append": Returns layer output + session messages (for debugging)
# ═══════════════════════════════════════════════════════════════════════════════
# AGENTS
# Configure specialized AI agents for task delegation
# Each agent becomes a separate MCP tool (e.g., agent_context_gatherer)
# Agents now use ACP protocol directly - just specify the role to use
# ═══════════════════════════════════════════════════════════════════════════════
# Context Gatherer Agent - Gathers detailed context from files and codebase
# Uses the context_gatherer role defined in [[roles]] section
[[agents]]
name = "context_gatherer"
description = "Gather detailed context from files and codebase. Reads files, searches code patterns, and provides comprehensive information about specific areas of the codebase for development tasks."
command = "octomind acp context_gatherer"
workdir = "." # Working directory for agent execution (default: current directory)
# Example: Architect Agent - Design system architecture
# [[agents]]
# name = "architect"
# description = "Design system architecture and evaluate technical decisions. Provides high-level design guidance."
# command = "octomind acp architect"
# workdir = "."
# Example: Code Reviewer Agent - Review code for quality and best practices
# [[agents]]
# name = "code_reviewer"
# description = "Review code for quality, best practices, security issues, and performance problems."
# command = "octomind acp code_reviewer"
# workdir = "."
# ═══════════════════════════════════════════════════════════════════════════════
# PROMPT TEMPLATES
# Define reusable prompt templates for quick access via /prompt command
# Usage: /prompt
# ═══════════════════════════════════════════════════════════════════════════════
# Example prompt templates - customize or add your own
[[prompts]]
name = "review"
description = "Request code review with focus on best practices"
prompt = """Please review the code above focusing on:
- Code quality and best practices
- Security considerations
- Performance implications
- Maintainability and readability
- Potential bugs or edge cases
Provide specific suggestions for improvement."""
[[prompts]]
name = "explain"
description = "Ask for detailed explanation of code or concept"
prompt = "Please provide a detailed explanation of the code/concept above, including how it works, why it's designed this way, and any important considerations."
[[prompts]]
name = "optimize"
description = "Request optimization suggestions"
prompt = """Please analyze the code above and suggest optimizations for:
- Performance improvements
- Memory usage reduction
- Code simplification
- Better algorithms or data structures
Explain the trade-offs for each suggestion."""
[[prompts]]
name = "test"
description = "Request test cases and testing strategy"
prompt = """Please help create comprehensive tests for the code above:
- Unit test cases covering normal scenarios
- Edge cases and error conditions
- Integration test considerations
- Mock/stub requirements
- Testing strategy recommendations"""
[[prompts]]
name = "debug"
description = "Help with debugging and troubleshooting"
prompt = """Please help debug the issue above:
- Analyze potential root causes
- Suggest debugging approaches
- Recommend logging or monitoring points
- Provide step-by-step troubleshooting guide
- Identify common pitfalls in this scenario"""
# ═══════════════════════════════════════════════════════════════════════
# SKILL AUTO-ACTIVATION & VALIDATION
# Configure automatic skill activation and output validation.
# Skills are tap-distributed instruction packs that inject domain knowledge.
# Auto-activation uses declarative `rules:` in SKILL.md frontmatter —
# no script spawning, evaluated in-process on each user message.
# Validators run deterministic checks on the final assistant message.
# ═══════════════════════════════════════════════════════════════════════
[skills]
# Enable automatic skill activation via declarative rules in SKILL.md frontmatter.
# When enabled, skills with `domains` matching the current agent's role
# are checked on each user message. Disable to use manual /skill only.
auto_activation = true
# Enable automatic validation via `validate` scripts at end of each assistant turn.
# When enabled, active skills with a `validate` script are run automatically.
# Disable to skip validators entirely (manual validation only).
auto_validation = false
# Reserved. Rules are evaluated in-process (no script timeout needed).
# Kept for config compatibility.
activation_timeout = 3
# Timeout in seconds for `validate` scripts. 0 = unlimited.
# Can be longer — validators like `cargo test` may need time.
validation_timeout = 60
# Maximum validation retries per skill before giving up.
# Prevents infinite loops when a validator consistently fails.
# Counter resets when validation passes or skill is deactivated.
max_retries = 3
# ═══════════════════════════════════════════════════════════════════════
# PLAN-DRIVEN AUTONOMOUS COMPRESSION
# Configure automatic context compression for plan-driven sessions
# ═══════════════════════════════════════════════════════════════════════
[compression]
# Enable compression system (task → phase → project, all automatic)
hints_enabled = true
# Context pressure threshold (0.0-1.0) at which to start showing hints
hints_pressure_threshold = 0.7
# Minimum tool executions between hints to avoid spamming
hints_min_interval = 5
# Compression aggressiveness scales with absolute token count
# Each level defines: threshold (token count) and target_ratio (compression strength)
# Compression triggers when context exceeds ANY threshold, using the highest matched ratio
# Example: At 100k tokens, compress to 1/4 size (4x compression)
[[compression.pressure_levels]]
threshold = 60000
target_ratio = 2.0 # Light: 50% reduction
[[compression.pressure_levels]]
threshold = 120000
target_ratio = 4.0 # Medium: 75% reduction
[[compression.pressure_levels]]
threshold = 160000
target_ratio = 8.0 # Aggressive: 87.5% reduction
# Maximum number of critical knowledge entries retained across compressions.
# Each compression may extract a short snippet of critical knowledge (decisions,
# constraints, user preferences). Only the last N entries are kept and injected
# into every subsequent compression so the AI never loses essential context.
knowledge_retention = 10
# Decision model configuration for compression decisions and summary generation
# Use a fast, cheap model like Haiku for cost savings (10x cheaper than Sonnet)
# Cost comparison: Haiku ~$0.0003 vs Sonnet ~$0.003 per compression decision
[compression.decision]
model = "anthropic:claude-haiku-4-5"
max_tokens = 16000 # Enough for decision + summary
temperature = 0.3 # Lower temperature for consistent decisions
top_p = 1.0
top_k = 0
max_retries = 1
retry_timeout = 30
ignore_cost = false # When true, compression decision API cost is not tracked (free)
# ═══════════════════════════════════════════════════════════════════════
# CROSS-SESSION ADAPTIVE LEARNING
# Extracts generalizable lessons from conversations and injects relevant
# ones into future sessions. Separate from memory — learning is narrower
# and structured: actionable facts scored by confidence.
# Storage: learning/{project}/{role}/ — project-scoped, role-filtered.
# ═══════════════════════════════════════════════════════════════════════
[learning]
# Enable the learning system
enabled = true
# Model for extraction and retrieval LLM calls (cheap model recommended)
model = "anthropic:claude-haiku-4-5"
# Backend: "file" (default, zero deps) or "mcp" (external tool with field mapping)
backend = "file"
# Minimum user messages before intermediate learning triggers during auto-compaction
min_messages_for_intermediate = 3
# Maximum lessons to inject into the system prompt per session
max_inject = 5
# MCP backend configuration (uncomment to use instead of file backend):
# Octobrain example — field_map maps canonical learning fields to the MCP tool's actual arguments.
# Empty string = omit that field. Missing = omit.
#
# backend = "mcp"
#
# [learning.store]
# tool = "memorize"
# [learning.store.field_map]
# content = "content" # required by memorize
# title = "title" # required by memorize (short summary)
# memory_type = "memory_type" # "learning" — valid octobrain category
# importance = "importance" # 0.0-1.0
# confidence = "source" # maps high/medium/low → user_confirmed/agent_inferred
# tags = "tags" # array of strings, max 10
# role = "role"
# project = "project"
#
# [learning.retrieve]
# tool = "remember"
# [learning.retrieve.field_map]
# query = "query" # string or array of search terms
# memory_type = "memory_types" # passed as ["learning"] array to match octobrain schema
# role = "role"
# project = "project"
# limit = "limit" # max 5 for octobrain