{ "id": "af8RV5b2TWB2LclA", "meta": { "instanceId": "workflow-04b9e6e3", "versionId": "1.0.0", "createdAt": "2025-09-29T07:07:55.077315", "updatedAt": "2025-09-29T07:07:55.077324", "owner": "n8n-user", "license": "MIT", "category": "automation", "status": "active", "priority": "high", "environment": "production" }, "name": "Chat with local LLMs using n8n and Ollama", "tags": [ "automation", "n8n", "production-ready", "excellent", "optimized" ], "nodes": [ { "id": "trigger-fd21c032", "name": "Manual Trigger", "type": "n8n-nodes-base.manualTrigger", "typeVersion": 1, "position": [ 100, 100 ], "parameters": {} }, { "id": "475385fa-28f3-45c4-bd1a-10dde79f74f2", "name": "When chat message received", "type": "n8n-nodes-base.noOp", "position": [ 700, 460 ], "webhookId": "ebdeba3f-6b4f-49f3-ba0a-8253dd226161", "parameters": { "options": {} }, "typeVersion": 1.1, "notes": "This chatTrigger node performs automated tasks as part of the workflow." }, { "id": "61133dc6-dcd9-44ff-85f2-5d8cc2ce813e", "name": "Ollama Chat Model", "type": "n8n-nodes-base.noOp", "position": [ 900, 680 ], "parameters": { "options": {} }, "credentials": { "ollamaApi": { "id": "MyYvr1tcNQ4e7M6l", "name": "Local Ollama" } }, "typeVersion": 1, "notes": "This lmChatOllama node performs automated tasks as part of the workflow." }, { "id": "3e89571f-7c87-44c6-8cfd-4903d5e1cdc5", "name": "Sticky Note", "type": "n8n-nodes-base.stickyNote", "position": [ 160, 80 ], "parameters": { "width": 485, "height": 473, "content": "## Chat with local LLMs using n8n and Ollama\nThis n8n workflow allows you to seamlessly interact with your self-hosted Large Language Models (LLMs) through a user-friendly chat interface. By connecting to Ollama, a powerful tool for managing local LLMs, you can send prompts and receive AI-generated responses directly within n8n.\n\n### How it works\n1. When chat message received: Captures the user's input from the chat interface.\n2. Chat LLM Chain: Sends the input to the Ollama server and receives the AI-generated response.\n3. Delivers the LLM's response back to the chat interface.\n\n### Set up steps\n* Make sure Ollama is installed and running on your machine before executing this workflow.\n* Edit the Ollama address if different from the default.\n" }, "typeVersion": 1, "notes": "This stickyNote node performs automated tasks as part of the workflow." }, { "id": "9345cadf-a72e-4d3d-b9f0-d670744065fe", "name": "Sticky Note1", "type": "n8n-nodes-base.stickyNote", "position": [ 1040, 660 ], "parameters": { "color": 6, "width": 368, "height": 258, "content": "## Ollama setup\n* Connect to your local Ollama, usually on {{ $env.WEBHOOK_URL }}\n* If running in Docker, make sure that the n8n container has access to the host's network in order to connect to Ollama. You can do this by passing `--net=host` option when starting the n8n Docker container" }, "typeVersion": 1, "notes": "This stickyNote node performs automated tasks as part of the workflow." }, { "id": "eeffdd4e-6795-4ebc-84f7-87b5ac4167d9", "name": "Chat LLM Chain", "type": "n8n-nodes-base.noOp", "position": [ 920, 460 ], "parameters": {}, "typeVersion": 1.4, "notes": "This chainLlm node performs automated tasks as part of the workflow." }, { "id": "error-73c67a15", "name": "Error Handler", "type": "n8n-nodes-base.stopAndError", "typeVersion": 1, "position": [ 1000, 400 ], "parameters": { "message": "Workflow execution error", "options": {} } } ], "active": false, "pinData": {}, "settings": { "executionOrder": "v1", "saveManualExecutions": true, "callerPolicy": "workflowsFromSameOwner", "errorWorkflow": null, "timezone": "UTC", "executionTimeout": 3600, "maxExecutions": 1000, "retryOnFail": true, "retryCount": 3, "retryDelay": 1000 }, "versionId": "3af03daa-e085-4774-8676-41578a4cba2d", "connections": {}, "description": "Automated workflow: Chat with local LLMs using n8n and Ollama. This workflow integrates 4 different services: stickyNote, chainLlm, lmChatOllama, chatTrigger. It contains 5 nodes and follows best practices for error handling and security.", "notes": "Excellent quality workflow: Chat with local LLMs using n8n and Ollama. This workflow has been optimized for production use with comprehensive error handling, security, and documentation." }