{ "id": "HnqGW0eq5asKfZxf", "meta": { "instanceId": "workflow-22b90d31", "versionId": "1.0.0", "createdAt": "2025-09-29T07:07:57.451311", "updatedAt": "2025-09-29T07:07:57.451385", "owner": "n8n-user", "license": "MIT", "category": "automation", "status": "active", "priority": "high", "environment": "production" }, "name": "🔍🛠️Perplexity Researcher to HTML Web Page", "tags": [ "automation", "n8n", "production-ready", "excellent", "optimized" ], "nodes": [ { "id": "ad5d96c6-941a-4ab3-b349-10bae99e5988", "name": "Sticky Note", "type": "n8n-nodes-base.stickyNote", "position": [ 320, 1360 ], "parameters": { "color": 3, "width": 625.851492623043, "height": 465.2493344282225, "content": "## Create Article from Perplexity Research" }, "typeVersion": 1, "notes": "This stickyNote node performs automated tasks as part of the workflow." }, { "id": "19b3ca66-5fd2-4d04-b25a-a17fb38642f8", "name": "Sticky Note1", "type": "n8n-nodes-base.stickyNote", "position": [ 1240, 1360 ], "parameters": { "color": 4, "width": 479.02028317328745, "height": 464.14912719677955, "content": "## Convert Article into HTML" }, "typeVersion": 1, "notes": "This stickyNote node performs automated tasks as part of the workflow." }, { "id": "7fad54e8-5a50-42da-b38d-08f6912615ab", "name": "gpt-4o-mini", "type": "n8n-nodes-base.noOp", "position": [ 1380, 1660 ], "parameters": { "model": "gpt-4o-mini-2024-07-18", "options": { "responseFormat": "text" } }, "credentials": { "openAiApi": { "id": "h597GY4ZJQD47RQd", "name": "OpenAi account" } }, "typeVersion": 1, "notes": "This lmChatOpenAi node performs automated tasks as part of the workflow." }, { "id": "5291869f-3ac6-4ce2-88f3-b572924b6082", "name": "gpt-4o-mini1", "type": "n8n-nodes-base.noOp", "position": [ 1560, 1040 ], "parameters": { "options": { "topP": 1, "timeout": 60000, "maxTokens": -1, "maxRetries": 2, "temperature": 0, "responseFormat": "text", "presencePenalty": 0, "frequencyPenalty": 0 } }, "credentials": { "openAiApi": { "id": "h597GY4ZJQD47RQd", "name": "OpenAi account" } }, "typeVersion": 1, "notes": "This lmChatOpenAi node performs automated tasks as part of the workflow." }, { "id": "a232f6ca-ad4c-40fa-a641-f0dd83c8f18a", "name": "Structured Output Parser1", "type": "n8n-nodes-base.noOp", "position": [ 640, 1660 ], "parameters": { "schemaType": "manual", "inputSchema": "{\n \"type\": \"object\",\n \"properties\": {\n \"article\": {\n \"type\": \"object\",\n \"required\": [\"category\", \"title\", \"metadata\", \"content\", \"hashtags\"],\n \"properties\": {\n \"category\": {\n \"type\": \"string\",\n \"description\": \"Article category\"\n },\n \"title\": {\n \"type\": \"string\",\n \"description\": \"Article title\"\n },\n \"metadata\": {\n \"type\": \"object\",\n \"properties\": {\n \"timePosted\": {\n \"type\": \"string\",\n \"description\": \"Time since article was posted\"\n },\n \"author\": {\n \"type\": \"string\",\n \"description\": \"Article author name\"\n },\n \"tag\": {\n \"type\": \"string\",\n \"description\": \"Article primary tag\"\n }\n },\n \"required\": [\"timePosted\", \"author\", \"tag\"]\n },\n \"content\": {\n \"type\": \"object\",\n \"properties\": {\n \"mainText\": {\n \"type\": \"string\",\n \"description\": \"Main article content\"\n },\n \"sections\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"title\": {\n \"type\": \"string\",\n \"description\": \"Section title\"\n },\n \"text\": {\n \"type\": \"string\",\n \"description\": \"Section content\"\n },\n \"quote\": {\n \"type\": \"string\",\n \"description\": \"Blockquote text\"\n }\n },\n \"required\": [\"title\", \"text\", \"quote\"]\n }\n }\n },\n \"required\": [\"mainText\", \"sections\"]\n },\n \"hashtags\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"Article hashtags\"\n }\n }\n }\n }\n}" }, "typeVersion": 1.2, "notes": "This outputParserStructured node performs automated tasks as part of the workflow." }, { "id": "e7d1adac-88aa-4f76-92bf-bbac3aa6386a", "name": "gpt-4o-mini2", "type": "n8n-nodes-base.noOp", "position": [ 420, 1660 ], "parameters": { "options": { "topP": 1, "timeout": 60000, "maxTokens": -1, "maxRetries": 2, "temperature": 0, "responseFormat": "json_object", "presencePenalty": 0, "frequencyPenalty": 0 } }, "credentials": { "openAiApi": { "id": "h597GY4ZJQD47RQd", "name": "OpenAi account" } }, "typeVersion": 1, "notes": "This lmChatOpenAi node performs automated tasks as part of the workflow." }, { "id": "156e51db-03f7-4099-afe8-6f0361c5b497", "name": "Webhook", "type": "n8n-nodes-base.webhook", "position": [ 160, 860 ], "webhookId": "6a8e3ae7-02ae-4663-a27a-07df448550ab", "parameters": { "path": "pblog", "options": {}, "responseMode": "responseNode" }, "typeVersion": 2, "notes": "This webhook node performs automated tasks as part of the workflow." }, { "id": "6dd3eba7-e779-4e4a-960e-c5a7b6b3a929", "name": "Respond to Webhook", "type": "n8n-nodes-base.respondToWebhook", "position": [ 2820, 1480 ], "parameters": { "options": {}, "respondWith": "text", "responseBody": "={{ $json.text }}" }, "typeVersion": 1.1, "notes": "This respondToWebhook node performs automated tasks as part of the workflow." }, { "id": "27ee681e-4259-4323-b4fe-629f99cb33d0", "name": "Telegram", "type": "n8n-nodes-base.telegram", "position": [ 2320, 880 ], "parameters": { "text": "={{ $('Perplexity Topic Agent').item.json.output.slice(0, 300) }}", "chatId": "={{ $json.telegram_chat_id }}", "additionalFields": { "parse_mode": "HTML", "appendAttribution": false } }, "credentials": { "telegramApi": { "id": "BIE64nzfpGeesXUn", "name": "Telegram account" } }, "typeVersion": 1.2, "notes": "This telegram node performs automated tasks as part of the workflow." }, { "id": "f437d40c-2bf6-43e2-b77b-e5c2cdc35055", "name": "gpt-4o-mini5", "type": "n8n-nodes-base.noOp", "position": [ 2480, 1660 ], "parameters": { "options": { "topP": 1, "timeout": 60000, "maxTokens": -1, "maxRetries": 2, "temperature": 0, "responseFormat": "text", "presencePenalty": 0, "frequencyPenalty": 0 } }, "credentials": { "openAiApi": { "id": "h597GY4ZJQD47RQd", "name": "OpenAi account" } }, "typeVersion": 1, "notes": "This lmChatOpenAi node performs automated tasks as part of the workflow." }, { "id": "275bce4a-4252-41d4-bcba-174f0c51bf4a", "name": "Basic LLM Chain", "type": "n8n-nodes-base.noOp", "position": [ 2340, 1480 ], "parameters": { "text": "=Create a modern, responsive single-line HTML document. Convert any markdown to Tailwind CSS classes. Replace markdown lists with proper HTML list elements. Remove all newline characters while preserving tags in content. Enhance the layout with Tailwind CSS cards where appropriate. Use the following base structure, but improve the styling and responsiveness:\n\n\n\n\n
\n \n \nTime Posted: Just now
\nAuthor: AI Research Team
\nTag: AI Models
\nDeepSeek V3 is a state-of-the-art AI model that leverages\n advanced architectures and techniques to deliver high performance across various applications.\n This overview covers its key concepts, practical applications, advantages, limitations, and best\n practices for implementation.
\n1. Mixture-of-Experts (MoE) Architecture: DeepSeek V3\n employs a Mixture-of-Experts (MoE) architecture, which consists of multiple neural networks,\n each optimized for different tasks. This architecture allows for efficient processing by\n activating only a portion of the network for each task, reducing hardware costs.
\n2. Parameters: The model boasts a total of 671\n billion\n parameters, with 37 billion active parameters for each token during processing. The addition\n of\n the Multi-Token Prediction (MTP) module increases the total parameters to 685 billion,\n making it\n significantly larger than other models like Meta's Llama 3.1 (405B).
\n3. Multi-head Latent Attention (MLA): DeepSeek V3\n uses\n Multi-head Latent Attention (MLA) to extract key details from text multiple times, improving\n its\n accuracy.
\n4. Multi-Token Prediction (MTP): The model utilizes\n Multi-Token Prediction to generate several tokens at once, speeding up inference and\n enabling\n speculative decoding.
\n\n DeepSeek V3 employs a Mixture-of-Experts architecture for efficient processing.\n
\n DeepSeek V3 democratizes AI access for smaller organizations.\n
\n DeepSeek V3 processes information at 60 tokens per second.\n
\n Deployment of DeepSeek V3 may be complex for small companies.\n
\n Engage with the open-source community for better implementation.\n
Hashtags: #DeepSeekV3 #AI #MachineLearning #OpenSource
\n