[
{
"order": "a",
"md5sum": "a54c08a7b90e4029a8c2ab5b5dc936aa",
"name": "Reasoner v1",
"filename": "qwen2.5-coder-7b-instruct-q4_0.gguf",
"filesize": "4431390720",
"requires": "3.6.0",
"ramrequired": "8",
"parameters": "8 billion",
"quant": "q4_0",
"type": "qwen2",
"description": "
",
"url": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-GGUF/resolve/main/qwen2.5-coder-7b-instruct-q4_0.gguf",
"chatTemplate": "{{- '<|im_start|>system\\n' }}\n{% if toolList|length > 0 %}You have access to the following functions:\n{% for tool in toolList %}\nUse the function '{{tool.function}}' to: '{{tool.description}}'\n{% if tool.parameters|length > 0 %}\nparameters:\n{% for info in tool.parameters %}\n {{info.name}}:\n type: {{info.type}}\n description: {{info.description}}\n required: {{info.required}}\n{% endfor %}\n{% endif %}\n# Tool Instructions\nIf you CHOOSE to call this function ONLY reply with the following format:\n'{{tool.symbolicFormat}}'\nHere is an example. If the user says, '{{tool.examplePrompt}}', then you reply\n'{{tool.exampleCall}}'\nAfter the result you might reply with, '{{tool.exampleReply}}'\n{% endfor %}\nYou MUST include both the start and end tags when you use a function.\n\nYou are a helpful AI assistant who uses the functions to break down, analyze, perform, and verify complex reasoning tasks. You SHOULD try to verify your answers using the functions where possible.\n{% endif %}\n{{- '<|im_end|>\\n' }}\n{% for message in messages %}\n{{'<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{% endfor %}\n{% if add_generation_prompt %}\n{{ '<|im_start|>assistant\\n' }}\n{% endif %}\n",
"systemPrompt": ""
},
{
"order": "aa",
"md5sum": "c87ad09e1e4c8f9c35a5fcef52b6f1c9",
"name": "Llama 3 8B Instruct",
"filename": "Meta-Llama-3-8B-Instruct.Q4_0.gguf",
"filesize": "4661724384",
"requires": "2.7.1",
"ramrequired": "8",
"parameters": "8 billion",
"quant": "q4_0",
"type": "LLaMA3",
"description": "",
"url": "https://gpt4all.io/models/gguf/Meta-Llama-3-8B-Instruct.Q4_0.gguf",
"promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2<|eot_id|>",
"systemPrompt": "",
"chatTemplate": "{%- set loop_messages = messages %}\n{%- for message in loop_messages %}\n {%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' %}\n {{- content }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}"
},
{
"order": "aa1",
"sha256sum": "5cd4ee65211770f1d99b4f6f4951780b9ef40e29314bd6542bb5bd0ad0bc29d1",
"name": "DeepSeek-R1-Distill-Qwen-7B",
"filename": "DeepSeek-R1-Distill-Qwen-7B-Q4_0.gguf",
"filesize": "4444121056",
"requires": "3.8.0",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "deepseek",
"description": "The official Qwen2.5-Math-7B distillation of DeepSeek-R1.
- License: MIT
- No restrictions on commercial use
- #reasoning
",
"url": "https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-7B-Q4_0.gguf",
"chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<|User|>' + message['content'] }}\n {%- endif %}\n {%- if message['role'] == 'assistant' %}\n {%- set content = message['content'] | regex_replace('^[\\\\s\\\\S]*', '') %}\n {{- '<|Assistant|>' + content + '<|end▁of▁sentence|>' }}\n {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n {{- '<|Assistant|>' }}\n{%- endif %}"
},
{
"order": "aa2",
"sha256sum": "906b3382f2680f4ce845459b4a122e904002b075238080307586bcffcde49eef",
"name": "DeepSeek-R1-Distill-Qwen-14B",
"filename": "DeepSeek-R1-Distill-Qwen-14B-Q4_0.gguf",
"filesize": "8544267680",
"requires": "3.8.0",
"ramrequired": "16",
"parameters": "14 billion",
"quant": "q4_0",
"type": "deepseek",
"description": "The official Qwen2.5-14B distillation of DeepSeek-R1.
- License: MIT
- No restrictions on commercial use
- #reasoning
",
"url": "https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-14B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-14B-Q4_0.gguf",
"chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<|User|>' + message['content'] }}\n {%- endif %}\n {%- if message['role'] == 'assistant' %}\n {%- set content = message['content'] | regex_replace('^[\\\\s\\\\S]*', '') %}\n {{- '<|Assistant|>' + content + '<|end▁of▁sentence|>' }}\n {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n {{- '<|Assistant|>' }}\n{%- endif %}"
},
{
"order": "aa3",
"sha256sum": "0eb93e436ac8beec18aceb958c120d282cb2cf5451b23185e7be268fe9d375cc",
"name": "DeepSeek-R1-Distill-Llama-8B",
"filename": "DeepSeek-R1-Distill-Llama-8B-Q4_0.gguf",
"filesize": "4675894112",
"requires": "3.8.0",
"ramrequired": "8",
"parameters": "8 billion",
"quant": "q4_0",
"type": "deepseek",
"description": "The official Llama-3.1-8B distillation of DeepSeek-R1.
- License: MIT
- No restrictions on commercial use
- #reasoning
",
"url": "https://huggingface.co/bartowski/DeepSeek-R1-Distill-Llama-8B-GGUF/resolve/main/DeepSeek-R1-Distill-Llama-8B-Q4_0.gguf",
"chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<|User|>' + message['content'] }}\n {%- endif %}\n {%- if message['role'] == 'assistant' %}\n {%- set content = message['content'] | regex_replace('^[\\\\s\\\\S]*', '') %}\n {{- '<|Assistant|>' + content + '<|end▁of▁sentence|>' }}\n {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n {{- '<|Assistant|>' }}\n{%- endif %}"
},
{
"order": "aa4",
"sha256sum": "b3af887d0a015b39fab2395e4faf682c1a81a6a3fd09a43f0d4292f7d94bf4d0",
"name": "DeepSeek-R1-Distill-Qwen-1.5B",
"filename": "DeepSeek-R1-Distill-Qwen-1.5B-Q4_0.gguf",
"filesize": "1068807776",
"requires": "3.8.0",
"ramrequired": "3",
"parameters": "1.5 billion",
"quant": "q4_0",
"type": "deepseek",
"description": "The official Qwen2.5-Math-1.5B distillation of DeepSeek-R1.
- License: MIT
- No restrictions on commercial use
- #reasoning
",
"url": "https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-1.5B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-1.5B-Q4_0.gguf",
"chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<|User|>' + message['content'] }}\n {%- endif %}\n {%- if message['role'] == 'assistant' %}\n {%- set content = message['content'] | regex_replace('^[\\\\s\\\\S]*', '') %}\n {{- '<|Assistant|>' + content + '<|end▁of▁sentence|>' }}\n {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n {{- '<|Assistant|>' }}\n{%- endif %}"
},
{
"order": "b",
"md5sum": "27b44e8ae1817525164ddf4f8dae8af4",
"name": "Llama 3.2 3B Instruct",
"filename": "Llama-3.2-3B-Instruct-Q4_0.gguf",
"filesize": "1921909280",
"requires": "3.4.0",
"ramrequired": "4",
"parameters": "3 billion",
"quant": "q4_0",
"type": "LLaMA3",
"description": "",
"url": "https://huggingface.co/bartowski/Llama-3.2-3B-Instruct-GGUF/resolve/main/Llama-3.2-3B-Instruct-Q4_0.gguf",
"promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2",
"systemPrompt": "<|start_header_id|>system<|end_header_id|>\nCutting Knowledge Date: December 2023\n\nYou are a helpful assistant.<|eot_id|>",
"chatTemplate": "{{- bos_token }}\n{%- set date_string = strftime_now('%d %b %Y') %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] | trim %}\n {%- set loop_start = 1 %}\n{%- else %}\n {%- set system_message = '' %}\n {%- set loop_start = 0 %}\n{%- endif %}\n\n{#- System message #}\n{{- '<|start_header_id|>system<|end_header_id|>\\n\\n' }}\n{{- 'Cutting Knowledge Date: December 2023\\n' }}\n{{- 'Today Date: ' + date_string + '\\n\\n' }}\n{{- system_message }}\n{{- '<|eot_id|>' }}\n\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' + message['content'] | trim + '<|eot_id|>' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}"
},
{
"order": "c",
"md5sum": "48ff0243978606fdba19d899b77802fc",
"name": "Llama 3.2 1B Instruct",
"filename": "Llama-3.2-1B-Instruct-Q4_0.gguf",
"filesize": "773025920",
"requires": "3.4.0",
"ramrequired": "2",
"parameters": "1 billion",
"quant": "q4_0",
"type": "LLaMA3",
"description": "",
"url": "https://huggingface.co/bartowski/Llama-3.2-1B-Instruct-GGUF/resolve/main/Llama-3.2-1B-Instruct-Q4_0.gguf",
"promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2",
"systemPrompt": "<|start_header_id|>system<|end_header_id|>\nCutting Knowledge Date: December 2023\n\nYou are a helpful assistant.<|eot_id|>",
"chatTemplate": "{{- bos_token }}\n{%- set date_string = strftime_now('%d %b %Y') %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] | trim %}\n {%- set loop_start = 1 %}\n{%- else %}\n {%- set system_message = '' %}\n {%- set loop_start = 0 %}\n{%- endif %}\n\n{#- System message #}\n{{- '<|start_header_id|>system<|end_header_id|>\\n\\n' }}\n{{- 'Cutting Knowledge Date: December 2023\\n' }}\n{{- 'Today Date: ' + date_string + '\\n\\n' }}\n{{- system_message }}\n{{- '<|eot_id|>' }}\n\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' + message['content'] | trim + '<|eot_id|>' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}"
},
{
"order": "d",
"md5sum": "a5f6b4eabd3992da4d7fb7f020f921eb",
"name": "Nous Hermes 2 Mistral DPO",
"filename": "Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf",
"filesize": "4108928000",
"requires": "2.7.1",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "Mistral",
"description": "Good overall fast chat model
- Fast responses
- Chat based model
- Accepts system prompts in ChatML format
- Trained by Mistral AI
- Finetuned by Nous Research on the OpenHermes-2.5 dataset
- Licensed for commercial use
",
"url": "https://huggingface.co/NousResearch/Nous-Hermes-2-Mistral-7B-DPO-GGUF/resolve/main/Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf",
"promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n",
"systemPrompt": "",
"chatTemplate": "{%- for message in messages %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
},
{
"order": "e",
"md5sum": "97463be739b50525df56d33b26b00852",
"name": "Mistral Instruct",
"filename": "mistral-7b-instruct-v0.1.Q4_0.gguf",
"filesize": "4108916384",
"requires": "2.5.0",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "Mistral",
"systemPrompt": "",
"description": "Strong overall fast instruction following model
- Fast responses
- Trained by Mistral AI
- Uncensored
- Licensed for commercial use
",
"url": "https://gpt4all.io/models/gguf/mistral-7b-instruct-v0.1.Q4_0.gguf",
"promptTemplate": "[INST] %1 [/INST]",
"chatTemplate": "{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_start = 1 %}\n{%- else %}\n {%- set loop_start = 0 %}\n{%- endif %}\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {%- if (message['role'] == 'user') != ((loop.index0 - loop_start) % 2 == 0) %}\n {{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {%- if loop.index0 == loop_start and loop_start == 1 %}\n {{- ' [INST] ' + system_message + '\\n\\n' + message['content'] + ' [/INST]' }}\n {%- else %}\n {{- ' [INST] ' + message['content'] + ' [/INST]' }}\n {%- endif %}\n {%- elif message['role'] == 'assistant' %}\n {{- ' ' + message['content'] + eos_token }}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}"
},
{
"order": "f",
"md5sum": "8a9c75bcd8a66b7693f158ec96924eeb",
"name": "Llama 3.1 8B Instruct 128k",
"filename": "Meta-Llama-3.1-8B-Instruct-128k-Q4_0.gguf",
"filesize": "4661212096",
"requires": "3.1.1",
"ramrequired": "8",
"parameters": "8 billion",
"quant": "q4_0",
"type": "LLaMA3",
"description": "- For advanced users only. Not recommended for use on Windows or Linux without selecting CUDA due to speed issues.
- Fast responses
- Chat based model
- Large context size of 128k
- Accepts agentic system prompts in Llama 3.1 format
- Trained by Meta
- License: Meta Llama 3.1 Community License
",
"url": "https://huggingface.co/GPT4All-Community/Meta-Llama-3.1-8B-Instruct-128k/resolve/main/Meta-Llama-3.1-8B-Instruct-128k-Q4_0.gguf",
"promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2",
"systemPrompt": "<|start_header_id|>system<|end_header_id|>\nCutting Knowledge Date: December 2023\n\nYou are a helpful assistant.<|eot_id|>",
"chatTemplate": "{%- set loop_messages = messages %}\n{%- for message in loop_messages %}\n {%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' %}\n {%- if loop.index0 == 0 %}\n {%- set content = bos_token + content %}\n {%- endif %}\n {{- content }}\n{%- endfor %}\n{{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}"
},
{
"order": "g",
"md5sum": "f692417a22405d80573ac10cb0cd6c6a",
"name": "Mistral OpenOrca",
"filename": "mistral-7b-openorca.gguf2.Q4_0.gguf",
"filesize": "4108928128",
"requires": "2.7.1",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "Mistral",
"description": "Strong overall fast chat model
- Fast responses
- Chat based model
- Trained by Mistral AI
- Finetuned on OpenOrca dataset curated via Nomic Atlas
- Licensed for commercial use
",
"url": "https://gpt4all.io/models/gguf/mistral-7b-openorca.gguf2.Q4_0.gguf",
"promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n",
"systemPrompt": "<|im_start|>system\nYou are MistralOrca, a large language model trained by Alignment Lab AI.\n<|im_end|>\n",
"chatTemplate": "{%- for message in messages %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
},
{
"order": "h",
"md5sum": "c4c78adf744d6a20f05c8751e3961b84",
"name": "GPT4All Falcon",
"filename": "gpt4all-falcon-newbpe-q4_0.gguf",
"filesize": "4210994112",
"requires": "2.6.0",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "Falcon",
"systemPrompt": "",
"description": "Very fast model with good quality
- Fastest responses
- Instruction based
- Trained by TII
- Finetuned by Nomic AI
- Licensed for commercial use
",
"url": "https://gpt4all.io/models/gguf/gpt4all-falcon-newbpe-q4_0.gguf",
"promptTemplate": "### Instruction:\n%1\n\n### Response:\n",
"chatTemplate": "{%- if messages[0]['role'] == 'system' %}\n {%- set loop_start = 1 %}\n {{- messages[0]['content'] + '\\n\\n' }}\n{%- else %}\n {%- set loop_start = 0 %}\n{%- endif %}\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {%- if message['role'] == 'user' %}\n {{- '### User: ' + message['content'] + '\\n\\n' }}\n {%- elif message['role'] == 'assistant' %}\n {{- '### Assistant: ' + message['content'] + '\\n\\n' }}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '### Assistant:' }}\n{%- endif %}"
},
{
"order": "i",
"md5sum": "00c8593ba57f5240f59662367b3ed4a5",
"name": "Orca 2 (Medium)",
"filename": "orca-2-7b.Q4_0.gguf",
"filesize": "3825824192",
"requires": "2.5.2",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "LLaMA2",
"systemPrompt": "",
"description": "- Instruction based
- Trained by Microsoft
- Cannot be used commercially
",
"url": "https://gpt4all.io/models/gguf/orca-2-7b.Q4_0.gguf",
"chatTemplate": "{%- for message in messages %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
},
{
"order": "j",
"md5sum": "3c0d63c4689b9af7baa82469a6f51a19",
"name": "Orca 2 (Full)",
"filename": "orca-2-13b.Q4_0.gguf",
"filesize": "7365856064",
"requires": "2.5.2",
"ramrequired": "16",
"parameters": "13 billion",
"quant": "q4_0",
"type": "LLaMA2",
"systemPrompt": "",
"description": "- Instruction based
- Trained by Microsoft
- Cannot be used commercially
",
"url": "https://gpt4all.io/models/gguf/orca-2-13b.Q4_0.gguf",
"chatTemplate": "{%- for message in messages %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
},
{
"order": "k",
"md5sum": "5aff90007499bce5c64b1c0760c0b186",
"name": "Wizard v1.2",
"filename": "wizardlm-13b-v1.2.Q4_0.gguf",
"filesize": "7365834624",
"requires": "2.5.0",
"ramrequired": "16",
"parameters": "13 billion",
"quant": "q4_0",
"type": "LLaMA2",
"systemPrompt": "",
"description": "Strong overall larger model
- Instruction based
- Gives very long responses
- Finetuned with only 1k of high-quality data
- Trained by Microsoft and Peking University
- Cannot be used commercially
",
"url": "https://gpt4all.io/models/gguf/wizardlm-13b-v1.2.Q4_0.gguf",
"chatTemplate": "{%- if messages[0]['role'] == 'system' %}\n {%- set loop_start = 1 %}\n {{- messages[0]['content'] + ' ' }}\n{%- else %}\n {%- set loop_start = 0 %}\n{%- endif %}\n{%- for message in loop_messages %}\n {%- if loop.index0 >= loop_start %}\n {%- if message['role'] == 'user' %}\n {{- 'USER: ' + message['content'] }}\n {%- elif message['role'] == 'assistant' %}\n {{- 'ASSISTANT: ' + message['content'] }}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n {%- if (loop.index0 - loop_start) % 2 == 0 %}\n {{- ' ' }}\n {%- else %}\n {{- eos_token }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- 'ASSISTANT:' }}\n{%- endif %}",
"systemMessage": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions."
},
{
"order": "l",
"md5sum": "31b47b4e8c1816b62684ac3ca373f9e1",
"name": "Ghost 7B v0.9.1",
"filename": "ghost-7b-v0.9.1-Q4_0.gguf",
"filesize": "4108916960",
"requires": "2.7.1",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "Mistral",
"description": "Ghost 7B v0.9.1 fast, powerful and smooth for Vietnamese and English languages.",
"url": "https://huggingface.co/lamhieu/ghost-7b-v0.9.1-gguf/resolve/main/ghost-7b-v0.9.1-Q4_0.gguf",
"promptTemplate": "<|user|>\n%1\n<|assistant|>\n%2\n",
"systemPrompt": "<|system|>\nYou are Ghost created by Lam Hieu. You are a helpful and knowledgeable assistant. You like to help and always give honest information, in its original language. In communication, you are always respectful, equal and promote positive behavior.\n",
"chatTemplate": "{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<|user|>\\n' + message['content'] + eos_token }}\n {%- elif message['role'] == 'system' %}\n {{- '<|system|>\\n' + message['content'] + eos_token }}\n {%- elif message['role'] == 'assistant' %}\n {{- '<|assistant|>\\n' + message['content'] + eos_token }}\n {%- endif %}\n {%- if loop.last and add_generation_prompt %}\n {{- '<|assistant|>' }}\n {%- endif %}\n{%- endfor %}",
"systemMessage": "You are Ghost created by Lam Hieu. You are a helpful and knowledgeable assistant. You like to help and always give honest information, in its original language. In communication, you are always respectful, equal and promote positive behavior."
},
{
"order": "m",
"md5sum": "3d12810391d04d1153b692626c0c6e16",
"name": "Hermes",
"filename": "nous-hermes-llama2-13b.Q4_0.gguf",
"filesize": "7366062080",
"requires": "2.5.0",
"ramrequired": "16",
"parameters": "13 billion",
"quant": "q4_0",
"type": "LLaMA2",
"systemPrompt": "",
"description": "Extremely good model
- Instruction based
- Gives long responses
- Curated with 300,000 uncensored instructions
- Trained by Nous Research
- Cannot be used commercially
",
"url": "https://gpt4all.io/models/gguf/nous-hermes-llama2-13b.Q4_0.gguf",
"promptTemplate": "### Instruction:\n%1\n\n### Response:\n",
"chatTemplate": "{%- if messages[0]['role'] == 'system' %}\n {%- set loop_start = 1 %}\n {{- messages[0]['content'] + '\\n\\n' }}\n{%- else %}\n {%- set loop_start = 0 %}\n{%- endif %}\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {%- if message['role'] == 'user' %}\n {{- '### Instruction:\\n' + message['content'] + '\\n\\n' }}\n {%- elif message['role'] == 'assistant' %}\n {{- '### Response:\\n' + message['content'] + '\\n\\n' }}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '### Instruction:\\n' }}\n{%- endif %}"
},
{
"order": "n",
"md5sum": "40388eb2f8d16bb5d08c96fdfaac6b2c",
"name": "Snoozy",
"filename": "gpt4all-13b-snoozy-q4_0.gguf",
"filesize": "7365834624",
"requires": "2.5.0",
"ramrequired": "16",
"parameters": "13 billion",
"quant": "q4_0",
"type": "LLaMA",
"systemPrompt": "",
"description": "Very good overall model
- Instruction based
- Based on the same dataset as Groovy
- Slower than Groovy, with higher quality responses
- Trained by Nomic AI
- Cannot be used commercially
",
"url": "https://gpt4all.io/models/gguf/gpt4all-13b-snoozy-q4_0.gguf",
"chatTemplate": "{%- if messages[0]['role'] == 'system' %}\n {%- set loop_start = 1 %}\n {{- messages[0]['content'] + '\\n\\n' }}\n{%- else %}\n {%- set loop_start = 0 %}\n{%- endif %}\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {%- if message['role'] == 'user' %}\n {{- '### Instruction:\\n' + message['content'] + '\\n\\n' }}\n {%- elif message['role'] == 'assistant' %}\n {{- '### Response:\\n' + message['content'] + '\\n\\n' }}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '### Response:\\n' }}\n{%- endif %}",
"systemMessage": "Below is an instruction that describes a task. Write a response that appropriately completes the request."
},
{
"order": "o",
"md5sum": "15dcb4d7ea6de322756449c11a0b7545",
"name": "MPT Chat",
"filename": "mpt-7b-chat-newbpe-q4_0.gguf",
"filesize": "3912373472",
"requires": "2.7.1",
"removedIn": "2.7.3",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "MPT",
"description": "Good model with novel architecture
- Fast responses
- Chat based
- Trained by Mosaic ML
- Cannot be used commercially
",
"url": "https://gpt4all.io/models/gguf/mpt-7b-chat-newbpe-q4_0.gguf",
"promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n",
"systemPrompt": "<|im_start|>system\n- You are a helpful assistant chatbot trained by MosaicML.\n- You answer questions.\n- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.\n- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.<|im_end|>\n",
"chatTemplate": "{%- for message in messages %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
},
{
"order": "p",
"md5sum": "ab5d8e8a2f79365ea803c1f1d0aa749d",
"name": "MPT Chat",
"filename": "mpt-7b-chat.gguf4.Q4_0.gguf",
"filesize": "3796178112",
"requires": "2.7.3",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "MPT",
"description": "Good model with novel architecture
- Fast responses
- Chat based
- Trained by Mosaic ML
- Cannot be used commercially
",
"url": "https://gpt4all.io/models/gguf/mpt-7b-chat.gguf4.Q4_0.gguf",
"promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n",
"systemPrompt": "<|im_start|>system\n- You are a helpful assistant chatbot trained by MosaicML.\n- You answer questions.\n- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.\n- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.<|im_end|>\n",
"chatTemplate": "{%- for message in messages %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
},
{
"order": "q",
"md5sum": "f8347badde9bfc2efbe89124d78ddaf5",
"name": "Phi-3 Mini Instruct",
"filename": "Phi-3-mini-4k-instruct.Q4_0.gguf",
"filesize": "2176181568",
"requires": "2.7.1",
"ramrequired": "4",
"parameters": "4 billion",
"quant": "q4_0",
"type": "Phi-3",
"description": "- Very fast responses
- Chat based model
- Accepts system prompts in Phi-3 format
- Trained by Microsoft
- License: MIT
- No restrictions on commercial use
",
"url": "https://gpt4all.io/models/gguf/Phi-3-mini-4k-instruct.Q4_0.gguf",
"promptTemplate": "<|user|>\n%1<|end|>\n<|assistant|>\n%2<|end|>\n",
"systemPrompt": "",
"chatTemplate": "{{- bos_token }}\n{%- for message in messages %}\n {{- '<|' + message['role'] + '|>\\n' + message['content'] + '<|end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|assistant|>\\n' }}\n{%- else %}\n {{- eos_token }}\n{%- endif %}"
},
{
"order": "r",
"md5sum": "0e769317b90ac30d6e09486d61fefa26",
"name": "Mini Orca (Small)",
"filename": "orca-mini-3b-gguf2-q4_0.gguf",
"filesize": "1979946720",
"requires": "2.5.0",
"ramrequired": "4",
"parameters": "3 billion",
"quant": "q4_0",
"type": "OpenLLaMa",
"description": "Small version of new model with novel dataset
- Very fast responses
- Instruction based
- Explain tuned datasets
- Orca Research Paper dataset construction approaches
- Cannot be used commercially
",
"url": "https://gpt4all.io/models/gguf/orca-mini-3b-gguf2-q4_0.gguf",
"promptTemplate": "### User:\n%1\n\n### Response:\n",
"systemPrompt": "### System:\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\n\n",
"chatTemplate": "{%- if messages[0]['role'] == 'system' %}\n {%- set loop_start = 1 %}\n {{- '### System:\\n' + messages[0]['content'] + '\\n\\n' }}\n{%- else %}\n {%- set loop_start = 0 %}\n{%- endif %}\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {%- if message['role'] == 'user' %}\n {{- '### User:\\n' + message['content'] + '\\n\\n' }}\n {%- elif message['role'] == 'assistant' %}\n {{- '### Response:\\n' + message['content'] + '\\n\\n' }}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '### Response:\\n' }}\n{%- endif %}"
},
{
"order": "s",
"md5sum": "c232f17e09bca4b7ee0b5b1f4107c01e",
"disableGUI": "true",
"name": "Replit",
"filename": "replit-code-v1_5-3b-newbpe-q4_0.gguf",
"filesize": "1953055104",
"requires": "2.6.0",
"ramrequired": "4",
"parameters": "3 billion",
"quant": "q4_0",
"type": "Replit",
"systemPrompt": "",
"promptTemplate": "%1",
"description": "Trained on subset of the Stack
- Code completion based
- Licensed for commercial use
- WARNING: Not available for chat GUI
",
"url": "https://gpt4all.io/models/gguf/replit-code-v1_5-3b-newbpe-q4_0.gguf",
"chatTemplate": null
},
{
"order": "t",
"md5sum": "70841751ccd95526d3dcfa829e11cd4c",
"disableGUI": "true",
"name": "Starcoder",
"filename": "starcoder-newbpe-q4_0.gguf",
"filesize": "8987411904",
"requires": "2.6.0",
"ramrequired": "4",
"parameters": "7 billion",
"quant": "q4_0",
"type": "Starcoder",
"systemPrompt": "",
"promptTemplate": "%1",
"description": "Trained on subset of the Stack
- Code completion based
- WARNING: Not available for chat GUI
",
"url": "https://gpt4all.io/models/gguf/starcoder-newbpe-q4_0.gguf",
"chatTemplate": null
},
{
"order": "u",
"md5sum": "e973dd26f0ffa6e46783feaea8f08c83",
"disableGUI": "true",
"name": "Rift coder",
"filename": "rift-coder-v0-7b-q4_0.gguf",
"filesize": "3825903776",
"requires": "2.5.0",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "LLaMA",
"systemPrompt": "",
"promptTemplate": "%1",
"description": "Trained on collection of Python and TypeScript
- Code completion based
- WARNING: Not available for chat GUI
",
"url": "https://gpt4all.io/models/gguf/rift-coder-v0-7b-q4_0.gguf",
"chatTemplate": null
},
{
"order": "v",
"md5sum": "e479e6f38b59afc51a470d1953a6bfc7",
"disableGUI": "true",
"name": "SBert",
"filename": "all-MiniLM-L6-v2-f16.gguf",
"filesize": "45887744",
"requires": "2.5.0",
"removedIn": "2.7.4",
"ramrequired": "1",
"parameters": "40 million",
"quant": "f16",
"type": "Bert",
"embeddingModel": true,
"systemPrompt": "",
"description": "LocalDocs text embeddings model
- For use with LocalDocs feature
- Used for retrieval augmented generation (RAG)",
"url": "https://gpt4all.io/models/gguf/all-MiniLM-L6-v2-f16.gguf",
"chatTemplate": null
},
{
"order": "w",
"md5sum": "dd90e2cb7f8e9316ac3796cece9883b5",
"name": "SBert",
"filename": "all-MiniLM-L6-v2.gguf2.f16.gguf",
"filesize": "45949216",
"requires": "2.7.4",
"removedIn": "3.0.0",
"ramrequired": "1",
"parameters": "40 million",
"quant": "f16",
"type": "Bert",
"embeddingModel": true,
"description": "LocalDocs text embeddings model
- For use with LocalDocs feature
- Used for retrieval augmented generation (RAG)",
"url": "https://gpt4all.io/models/gguf/all-MiniLM-L6-v2.gguf2.f16.gguf",
"chatTemplate": null
},
{
"order": "x",
"md5sum": "919de4dd6f25351bcb0223790db1932d",
"name": "EM German Mistral",
"filename": "em_german_mistral_v01.Q4_0.gguf",
"filesize": "4108916352",
"requires": "2.5.0",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "Mistral",
"description": "Mistral-based model for German-language applications
- Fast responses
- Chat based model
- Trained by ellamind
- Finetuned on German instruction and chat data
- Licensed for commercial use
",
"url": "https://huggingface.co/TheBloke/em_german_mistral_v01-GGUF/resolve/main/em_german_mistral_v01.Q4_0.gguf",
"promptTemplate": "USER: %1 ASSISTANT: ",
"systemPrompt": "Du bist ein hilfreicher Assistent. ",
"chatTemplate": "{%- if messages[0]['role'] == 'system' %}\n {%- set loop_start = 1 %}\n {{- messages[0]['content'] }}\n{%- else %}\n {%- set loop_start = 0 %}\n{%- endif %}\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {%- if not loop.first %}\n {{- ' ' }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {{- 'USER: ' + message['content'] }}\n {%- elif message['role'] == 'assistant' %}\n {{- 'ASSISTANT: ' + message['content'] }}\n {%- else %}\n {{- raise_exception('After the optional system message, conversation roles must be either user or assistant.') }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {%- if messages %}\n {{- ' ' }}\n {%- endif %}\n {{- 'ASSISTANT:' }}\n{%- endif %}",
"systemMessage": "Du bist ein hilfreicher Assistent."
},
{
"order": "y",
"md5sum": "60ea031126f82db8ddbbfecc668315d2",
"disableGUI": "true",
"name": "Nomic Embed Text v1",
"filename": "nomic-embed-text-v1.f16.gguf",
"filesize": "274290560",
"requires": "2.7.4",
"ramrequired": "1",
"parameters": "137 million",
"quant": "f16",
"type": "Bert",
"embeddingModel": true,
"systemPrompt": "",
"description": "nomic-embed-text-v1",
"url": "https://gpt4all.io/models/gguf/nomic-embed-text-v1.f16.gguf",
"chatTemplate": null
},
{
"order": "z",
"md5sum": "a5401e7f7e46ed9fcaed5b60a281d547",
"disableGUI": "true",
"name": "Nomic Embed Text v1.5",
"filename": "nomic-embed-text-v1.5.f16.gguf",
"filesize": "274290560",
"requires": "2.7.4",
"ramrequired": "1",
"parameters": "137 million",
"quant": "f16",
"type": "Bert",
"embeddingModel": true,
"systemPrompt": "",
"description": "nomic-embed-text-v1.5",
"url": "https://gpt4all.io/models/gguf/nomic-embed-text-v1.5.f16.gguf",
"chatTemplate": null
},
{
"order": "zzz",
"md5sum": "a8c5a783105f87a481543d4ed7d7586d",
"name": "Qwen2-1.5B-Instruct",
"filename": "qwen2-1_5b-instruct-q4_0.gguf",
"filesize": "937532800",
"requires": "3.0",
"ramrequired": "3",
"parameters": "1.5 billion",
"quant": "q4_0",
"type": "qwen2",
"description": "- Very fast responses
- Instruction based model
- Usage of LocalDocs (RAG): Highly recommended
- Supports context length of up to 32768
- Trained and finetuned by Qwen (Alibaba Cloud)
- License: Apache 2.0
",
"url": "https://huggingface.co/Qwen/Qwen2-1.5B-Instruct-GGUF/resolve/main/qwen2-1_5b-instruct-q4_0.gguf",
"promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>",
"systemPrompt": "<|im_start|>system\nBelow is an instruction that describes a task. Write a response that appropriately completes the request.<|im_end|>\n",
"chatTemplate": "{%- for message in messages %}\n {%- if loop.first and messages[0]['role'] != 'system' %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
}
]