{ "id": "2d995555-9a88-4480-bc48-960928d44208", "revision": 0, "last_node_id": 5275, "last_link_id": 14038, "nodes": [ { "id": 5232, "type": "EmptyImage", "pos": [ -2933.414241117287, 8358.807856915175 ], "size": [ 210, 130 ], "flags": {}, "order": 0, "mode": 0, "inputs": [], "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 14027 ] } ], "properties": { "Node name for S&R": "EmptyImage", "cnr_id": "comfy-core", "ver": "0.5.1" }, "widgets_values": [ 1920, 1088, 1, 0 ] }, { "id": 5235, "type": "CM_FloatToInt", "pos": [ -2936.33335261475, 8791.840964228226 ], "size": [ 214.03773034527285, 58 ], "flags": {}, "order": 12, "mode": 0, "inputs": [ { "name": "a", "type": "FLOAT", "widget": { "name": "a" }, "link": 13980 } ], "outputs": [ { "name": "INT", "type": "INT", "links": [ 14029 ] } ], "title": "Frame Rate", "properties": { "Node name for S&R": "CM_FloatToInt" }, "widgets_values": [ 0 ] }, { "id": 5236, "type": "PrimitiveFloat", "pos": [ -2942.289721613317, 8659.704185822162 ], "size": [ 210, 58 ], "flags": {}, "order": 1, "mode": 0, "inputs": [], "outputs": [ { "name": "FLOAT", "type": "FLOAT", "links": [ 13980, 13982, 13983 ] } ], "title": "Frame Rate", "properties": { "Node name for S&R": "PrimitiveFloat", "cnr_id": "comfy-core", "ver": "0.5.1" }, "widgets_values": [ 24 ] }, { "id": 5220, "type": "CheckpointLoaderSimple", "pos": [ -3631.9850884317184, 8347.250691061321 ], "size": [ 407.1770500664943, 99.93503409866116 ], "flags": {}, "order": 2, "mode": 0, "inputs": [], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 13957, 13959 ] }, { "name": "CLIP", "type": "CLIP", "links": [] }, { "name": "VAE", "type": "VAE", "links": [ 14034 ] } ], "properties": { "Node name for S&R": "CheckpointLoaderSimple", "cnr_id": "comfy-core", "ver": "0.3.56" }, "widgets_values": [ "ltx-2-19b-dev.safetensors" ] }, { "id": 5219, "type": "LTXVAudioVAELoader", "pos": [ -3637.1465575074335, 8804.875753924302 ], "size": [ 424.18031362607053, 58 ], "flags": {}, "order": 3, "mode": 0, "inputs": [], "outputs": [ { "name": "Audio VAE", "type": "VAE", "links": [ 14028 ] } ], "properties": { "Node name for S&R": "LTXVAudioVAELoader", "cnr_id": "comfy-core", "ver": "0.3.64" }, "widgets_values": [ "ltx-2-19b-dev.safetensors" ] }, { "id": 5075, "type": "SaveVideo", "pos": [ -1577.9547757855387, 8313.151531938616 ], "size": [ 670.0771562911698, 584.6954036515563 ], "flags": {}, "order": 22, "mode": 0, "inputs": [ { "name": "video", "type": "VIDEO", "link": 13979 } ], "outputs": [], "properties": { "cnr_id": "comfy-core", "ver": "0.5.1" }, "widgets_values": [ "video/LTX-2", "auto", "auto" ] }, { "id": 5229, "type": "LTXVConditioning", "pos": [ -2306.387910249023, 8740.929135810758 ], "size": [ 210, 94 ], "flags": {}, "order": 19, "mode": 0, "inputs": [ { "name": "positive", "type": "CONDITIONING", "link": 13967 }, { "name": "negative", "type": "CONDITIONING", "link": 13968 }, { "name": "frame_rate", "type": "FLOAT", "widget": { "name": "frame_rate" }, "link": 13983 } ], "outputs": [ { "name": "positive", "type": "CONDITIONING", "slot_index": 0, "links": [ 14031 ] }, { "name": "negative", "type": "CONDITIONING", "slot_index": 1, "links": [ 14032 ] } ], "properties": { "Node name for S&R": "LTXVConditioning", "cnr_id": "comfy-core", "ver": "0.3.28" }, "widgets_values": [ 25 ] }, { "id": 5228, "type": "CLIPTextEncode", "pos": [ -2307.3357609769005, 8688.298695601421 ], "size": [ 307.2346496582031, 47 ], "flags": { "collapsed": true }, "order": 18, "mode": 0, "inputs": [ { "name": "clip", "type": "CLIP", "link": 13965 }, { "name": "text", "type": "STRING", "widget": { "name": "text" }, "link": 13966 } ], "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "slot_index": 0, "links": [ 13967 ] } ], "title": "Enhanced Prompt (Positive)", "properties": { "Node name for S&R": "CLIPTextEncode", "cnr_id": "comfy-core", "ver": "0.3.28" }, "widgets_values": [ "" ], "color": "#232", "bgcolor": "#353" }, { "id": 5221, "type": "LoraLoaderModelOnly", "pos": [ -2435.0393201037195, 8163.778513700478 ], "size": [ 210, 82 ], "flags": {}, "order": 17, "mode": 4, "inputs": [ { "name": "model", "type": "MODEL", "link": 13958 } ], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 14030 ] } ], "properties": { "Node name for S&R": "LoraLoaderModelOnly", "cnr_id": "comfy-core", "ver": "0.3.68" }, "widgets_values": [ "your_camera_lora.safetensors", 1 ] }, { "id": 5222, "type": "LoraLoaderModelOnly", "pos": [ -2668.636981453724, 8162.466693154896 ], "size": [ 220.14623500416474, 90.32621827319144 ], "flags": {}, "order": 14, "mode": 4, "inputs": [ { "name": "model", "type": "MODEL", "link": 13959 } ], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 14033 ] } ], "properties": { "Node name for S&R": "LoraLoaderModelOnly", "cnr_id": "comfy-core", "ver": "0.3.68" }, "widgets_values": [ "your_camera_lora.safetensors", 1 ] }, { "id": 5226, "type": "CLIPTextEncode", "pos": [ -2655.5275979204835, 8707.696034280034 ], "size": [ 313.9271213812535, 160.84366176012554 ], "flags": { "collapsed": false }, "order": 15, "mode": 0, "inputs": [ { "name": "clip", "type": "CLIP", "link": 13963 } ], "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "slot_index": 0, "links": [ 13968 ] } ], "properties": { "Node name for S&R": "CLIPTextEncode", "cnr_id": "comfy-core", "ver": "0.3.28" }, "widgets_values": [ "blurry, low quality, still frame, frames, watermark, overlay, titles, has blurbox, has subtitles" ], "color": "#322", "bgcolor": "#533" }, { "id": 5270, "type": "LatentUpscaleModelLoader", "pos": [ -3625.8131648839753, 8493.432696308342 ], "size": [ 396.1660435338662, 90.46574270449128 ], "flags": {}, "order": 4, "mode": 0, "inputs": [], "outputs": [ { "name": "LATENT_UPSCALE_MODEL", "type": "LATENT_UPSCALE_MODEL", "links": [ 14038 ] } ], "properties": { "Node name for S&R": "LatentUpscaleModelLoader", "cnr_id": "comfy-core", "ver": "0.7.0" }, "widgets_values": [ "ltx-2-spatial-upscaler-x2-1.0.safetensors" ] }, { "id": 5273, "type": "MarkdownNote", "pos": [ -2943.241504237536, 8161.259137854798 ], "size": [ 240.16187366614236, 98.41687371924309 ], "flags": { "collapsed": false }, "order": 5, "mode": 0, "inputs": [], "outputs": [], "title": "Video Size", "properties": {}, "widgets_values": [ "Width & height settings must be divisible by 64. \nFrame count must be divisible by 8 + 1. \n\n\nRunning with invalid parameters **will not cause errors**. Instead, the flow will silently choose the closest valid parameters. " ], "color": "#432", "bgcolor": "#653" }, { "id": 5274, "type": "MarkdownNote", "pos": [ -2679.7373441625773, 7861.17563988318 ], "size": [ 476.19110164532435, 207.4316250893462 ], "flags": { "collapsed": false }, "order": 6, "mode": 0, "inputs": [], "outputs": [], "title": "LoRA", "properties": {}, "widgets_values": [ "# Using LoRAs\n**Ctrl + B to enable**\n\nFor the camera LoRA we recommend setting strength = 1\n\nYou can use LTX [Ready to use camera LoRAs](https://huggingface.co/collections/Lightricks/ltx-2)\n\n\n\n\nUse a second LoRa only when the details are important in the LoRa. \n\n## Training\nWe have made available a full trainer to help you to train your own LoRA. You can [find it here](https://github.com/Lightricks/LTX-2)." ], "color": "#432", "bgcolor": "#653" }, { "id": 5227, "type": "LTXVGemmaEnhancePrompt", "pos": [ -2302.473950325332, 8630.055358449259 ], "size": [ 369.82634110464005, 194 ], "flags": { "collapsed": true }, "order": 16, "mode": 0, "inputs": [ { "name": "clip", "type": "CLIP", "link": 13964 }, { "name": "image", "shape": 7, "type": "IMAGE", "link": null }, { "name": "prompt", "type": "STRING", "widget": { "name": "prompt" }, "link": 13984 } ], "outputs": [ { "name": "enhanced_prompt", "type": "STRING", "links": [ 13966 ] } ], "title": "Enhancer", "properties": { "Node name for S&R": "LTXVGemmaEnhancePrompt" }, "widgets_values": [ "", "You are a Creative Assistant. Given a user's raw input prompt describing a scene or concept, expand it into a detailed video generation prompt with specific visuals and integrated audio to guide a text-to-video model.\n\n#### Guidelines\n- Strictly follow all aspects of the user's raw input: include every element requested (style, visuals, motions, actions, camera movement, audio).\n - If the input is vague, invent concrete details: lighting, textures, materials, scene settings, etc.\n - For characters: describe gender, clothing, hair, expressions. DO NOT invent unrequested characters.\n- Use active language: present-progressive verbs (\"is walking,\" \"speaking\"). If no action specified, describe natural movements.\n- Maintain chronological flow: use temporal connectors (\"as,\" \"then,\" \"while\").\n- Audio layer: Describe complete soundscape (background audio, ambient sounds, SFX, speech/music when requested). Integrate sounds chronologically alongside actions. Be specific (e.g., \"soft footsteps on tile\"), not vague (e.g., \"ambient sound is present\").\n- Speech (only when requested): \n - For ANY speech-related input (talking, conversation, singing, etc.), ALWAYS include exact words in quotes with voice characteristics (e.g., \"The man says in an excited voice: 'You won't believe what I just saw!'\").\n - Specify language if not English and accent if relevant.\n- Style: Include visual style at the beginning: \"Style: