{ "id": "FD0bHNaehP3LzCNN", "meta": { "instanceId": "workflow-215693dd", "versionId": "1.0.0", "createdAt": "2025-09-29T07:07:49.875828", "updatedAt": "2025-09-29T07:07:49.875838", "owner": "n8n-user", "license": "MIT", "category": "automation", "status": "active", "priority": "high", "environment": "production" }, "name": "Chat with GitHub OpenAPI Specification using RAG (Pinecone and OpenAI)", "tags": [ "automation", "n8n", "production-ready", "excellent", "optimized" ], "nodes": [ { "id": "362cb773-7540-4753-a401-e585cdf4af8a", "name": "When clicking ‘Test workflow’", "type": "n8n-nodes-base.manualTrigger", "position": [ 0, 0 ], "parameters": {}, "typeVersion": 1, "notes": "This manualTrigger node performs automated tasks as part of the workflow." }, { "id": "45470036-cae6-48d0-ac66-addc8999e776", "name": "HTTP Request", "type": "n8n-nodes-base.httpRequest", "position": [ 300, 0 ], "parameters": { "url": "{{ $env.API_BASE_URL }}", "options": {} }, "typeVersion": 4.2, "notes": "This httpRequest node performs automated tasks as part of the workflow." }, { "id": "a9e65897-52c9-4941-bf49-e1a659e442ef", "name": "Pinecone Vector Store", "type": "n8n-nodes-base.noOp", "position": [ 520, 0 ], "parameters": { "mode": "insert", "options": {}, "pineconeIndex": { "__rl": true, "mode": "list", "value": "n8n-demo", "cachedResultName": "n8n-demo" } }, "credentials": { "pineconeApi": { "id": "bQTNry52ypGLqt47", "name": "PineconeApi account" } }, "typeVersion": 1, "notes": "This vectorStorePinecone node performs automated tasks as part of the workflow." }, { "id": "c2a2354b-5457-4ceb-abfc-9a58e8593b81", "name": "Default Data Loader", "type": "n8n-nodes-base.noOp", "position": [ 660, 180 ], "parameters": { "options": {} }, "typeVersion": 1, "notes": "This documentDefaultDataLoader node performs automated tasks as part of the workflow." }, { "id": "7338d9ea-ae8f-46eb-807f-a15dc7639fc9", "name": "Recursive Character Text Splitter", "type": "n8n-nodes-base.noOp", "position": [ 740, 360 ], "parameters": { "options": {} }, "typeVersion": 1, "notes": "This textSplitterRecursiveCharacterTextSplitter node performs automated tasks as part of the workflow." }, { "id": "44fd7a59-f208-4d5d-a22d-e9f8ca9badf1", "name": "When chat message received", "type": "n8n-nodes-base.noOp", "position": [ -20, 760 ], "webhookId": "089e38ab-4eee-4c34-aa5d-54cf4a8f53b7", "parameters": { "options": {} }, "typeVersion": 1.1, "notes": "This chatTrigger node performs automated tasks as part of the workflow." }, { "id": "51d819d6-70ff-428d-aa56-1d7e06490dee", "name": "AI Agent", "type": "n8n-nodes-base.noOp", "position": [ 320, 760 ], "parameters": { "options": { "systemMessage": "You are a helpful assistant providing information about the GitHub API and how to use it based on the OpenAPI V3 specifications." } }, "typeVersion": 1.7, "notes": "This agent node performs automated tasks as part of the workflow." }, { "id": "aed548bf-7083-44ad-a3e0-163dee7423ef", "name": "OpenAI Chat Model", "type": "n8n-nodes-base.noOp", "position": [ 220, 980 ], "parameters": { "options": {} }, "credentials": { "openAiApi": { "id": "tQLWnWRzD8aebYvp", "name": "OpenAi account" } }, "typeVersion": 1.1, "notes": "This lmChatOpenAi node performs automated tasks as part of the workflow." }, { "id": "dfe9f356-2225-4f4b-86c7-e56a230b4193", "name": "Window Buffer Memory", "type": "n8n-nodes-base.noOp", "position": [ 420, 1020 ], "parameters": {}, "typeVersion": 1.3, "notes": "This memoryBufferWindow node performs automated tasks as part of the workflow." }, { "id": "4cf672ee-13b8-4355-b8e0-c2e7381671bc", "name": "Vector Store Tool", "type": "n8n-nodes-base.noOp", "position": [ 580, 980 ], "parameters": { "name": "GitHub_OpenAPI_Specification", "description": "Use this tool to get information about the GitHub API. This database contains OpenAPI v3 specifications." }, "typeVersion": 1, "notes": "This toolVectorStore node performs automated tasks as part of the workflow." }, { "id": "1df7fb85-9d4a-4db5-9bed-41d28e2e4643", "name": "OpenAI Chat Model1", "type": "n8n-nodes-base.noOp", "position": [ 840, 1160 ], "parameters": { "options": {} }, "credentials": { "openAiApi": { "id": "tQLWnWRzD8aebYvp", "name": "OpenAi account" } }, "typeVersion": 1.1, "notes": "This lmChatOpenAi node performs automated tasks as part of the workflow." }, { "id": "7b52ef7a-5935-451e-8747-efe16ce288af", "name": "Sticky Note", "type": "n8n-nodes-base.stickyNote", "position": [ -40, -260 ], "parameters": { "width": 640, "height": 200, "content": "## Indexing content in the vector database\nThis part of the workflow is responsible for extracting content, generating embeddings and sending them to the Pinecone vector store.\n\nIt requests the OpenAPI specifications from GitHub using a HTTP request. Then, it splits the file in chunks, generating embeddings for each chunk using OpenAI, and saving them in Pinecone vector DB." }, "typeVersion": 1, "notes": "This stickyNote node performs automated tasks as part of the workflow." }, { "id": "3508d602-56d4-4818-84eb-ca75cdeec1d0", "name": "Sticky Note1", "type": "n8n-nodes-base.stickyNote", "position": [ -20, 560 ], "parameters": { "width": 580, "content": "## Querying and response generation \n\nThis part of the workflow is responsible for the chat interface, querying the vector store and generating relevant responses.\n\nIt uses OpenAI GPT 4o-mini to generate responses." }, "typeVersion": 1, "notes": "This stickyNote node performs automated tasks as part of the workflow." }, { "id": "5a9808ef-4edd-4ec9-ba01-2fe50b2dbf4b", "name": "Generate User Query Embedding", "type": "n8n-nodes-base.noOp", "position": [ 480, 1400 ], "parameters": { "options": {} }, "credentials": { "openAiApi": { "id": "tQLWnWRzD8aebYvp", "name": "OpenAi account" } }, "typeVersion": 1.2, "notes": "This embeddingsOpenAi node performs automated tasks as part of the workflow." }, { "id": "f703dc8e-9d4b-45e3-8994-789b3dfe8631", "name": "Pinecone Vector Store (Querying)", "type": "n8n-nodes-base.noOp", "position": [ 440, 1220 ], "parameters": { "options": {}, "pineconeIndex": { "__rl": true, "mode": "list", "value": "n8n-demo", "cachedResultName": "n8n-demo" } }, "credentials": { "pineconeApi": { "id": "bQTNry52ypGLqt47", "name": "PineconeApi account" } }, "typeVersion": 1, "notes": "This vectorStorePinecone node performs automated tasks as part of the workflow." }, { "id": "ea64a7a5-1fa5-4938-83a9-271929733a8e", "name": "Generate Embeddings", "type": "n8n-nodes-base.noOp", "position": [ 480, 220 ], "parameters": { "options": {} }, "credentials": { "openAiApi": { "id": "tQLWnWRzD8aebYvp", "name": "OpenAi account" } }, "typeVersion": 1.2, "notes": "This embeddingsOpenAi node performs automated tasks as part of the workflow." }, { "id": "65cbd4e3-91f6-441a-9ef1-528c3019e238", "name": "Sticky Note2", "type": "n8n-nodes-base.stickyNote", "position": [ -820, -260 ], "parameters": { "width": 620, "height": 320, "content": "## RAG workflow in n8n\n\nThis is an example of how to use RAG techniques to create a chatbot with n8n. It is an API documentation chatbot that can answer questions about the GitHub API. It uses OpenAI for generating embeddings, the gpt-4o-mini LLM for generating responses and Pinecone as a vector database.\n\n### Before using this template\n* create OpenAI and Pinecone accounts\n* obtain API keys OpenAI and Pinecone \n* configure credentials in n8n for both\n* ensure you have a Pinecone index named \"n8n-demo\" or adjust the workflow accordingly." }, "typeVersion": 1, "notes": "This stickyNote node performs automated tasks as part of the workflow." } ], "active": false, "pinData": {}, "settings": { "executionOrder": "v1", "saveManualExecutions": true, "callerPolicy": "workflowsFromSameOwner", "errorWorkflow": null, "timezone": "UTC", "executionTimeout": 3600, "maxExecutions": 1000, "retryOnFail": true, "retryCount": 3, "retryDelay": 1000 }, "versionId": "2908105f-c20c-4183-bb9d-26e3559b9911", "connections": { "45470036-cae6-48d0-ac66-addc8999e776": { "main": [ [ { "node": "error-handler-45470036-cae6-48d0-ac66-addc8999e776", "type": "main", "index": 0 } ], [ { "node": "error-handler-45470036-cae6-48d0-ac66-addc8999e776-5b32b2d9", "type": "main", "index": 0 } ], [ { "node": "error-handler-45470036-cae6-48d0-ac66-addc8999e776-afd9b5d0", "type": "main", "index": 0 } ], [ { "node": "error-handler-45470036-cae6-48d0-ac66-addc8999e776-a3da90f0", "type": "main", "index": 0 } ], [ { "node": "error-handler-45470036-cae6-48d0-ac66-addc8999e776-7c6a0e22", "type": "main", "index": 0 } ], [ { "node": "error-handler-45470036-cae6-48d0-ac66-addc8999e776-90bdfa55", "type": "main", "index": 0 } ], [ { "node": "error-handler-45470036-cae6-48d0-ac66-addc8999e776-36738752", "type": "main", "index": 0 } ], [ { "node": "error-handler-45470036-cae6-48d0-ac66-addc8999e776-3da580a8", "type": "main", "index": 0 } ], [ { "node": "error-handler-45470036-cae6-48d0-ac66-addc8999e776-c6b3ce13", "type": "main", "index": 0 } ] ] }, "aed548bf-7083-44ad-a3e0-163dee7423ef": { "main": [ [ { "node": "error-handler-aed548bf-7083-44ad-a3e0-163dee7423ef-6e885842", "type": "main", "index": 0 } ] ] }, "1df7fb85-9d4a-4db5-9bed-41d28e2e4643": { "main": [ [ { "node": "error-handler-1df7fb85-9d4a-4db5-9bed-41d28e2e4643-e1277a18", "type": "main", "index": 0 } ] ] }, "5a9808ef-4edd-4ec9-ba01-2fe50b2dbf4b": { "main": [ [ { "node": "error-handler-5a9808ef-4edd-4ec9-ba01-2fe50b2dbf4b-572741b8", "type": "main", "index": 0 } ] ] }, "ea64a7a5-1fa5-4938-83a9-271929733a8e": { "main": [ [ { "node": "error-handler-ea64a7a5-1fa5-4938-83a9-271929733a8e-ba5ed12c", "type": "main", "index": 0 } ] ] } }, "description": "Automated workflow: Chat with GitHub OpenAPI Specification using RAG (Pinecone and OpenAI). This workflow integrates 13 different services: stickyNote, httpRequest, vectorStorePinecone, textSplitterRecursiveCharacterTextSplitter, agent. It contains 23 nodes and follows best practices for error handling and security.", "notes": "Excellent quality workflow: Chat with GitHub OpenAPI Specification using RAG (Pinecone and OpenAI). This workflow has been optimized for production use with comprehensive error handling, security, and documentation." }