"use client"; import { useChat } from "@ai-sdk/react"; import { Message, MessageAvatar, MessageContent, } from "@/components/ai-elements/message"; import { PromptInput, PromptInputButton, PromptInputModelSelect, PromptInputModelSelectContent, PromptInputModelSelectItem, PromptInputModelSelectTrigger, PromptInputModelSelectValue, PromptInputSubmit, PromptInputTextarea, PromptInputToolbar, PromptInputTools, } from "@/components/ai-elements/prompt-input"; import { Conversation, ConversationContent, ConversationScrollButton, } from "@/components/ai-elements/conversation"; import { Response } from "@/components/ai-elements/response"; import { Loader } from "@/components/ai-elements/loader"; import { Reasoning, ReasoningContent, ReasoningTrigger, } from "@/components/ai-elements/reasoning"; import { Button } from "@/components/ui/button"; import { PlusIcon, RefreshCcw, Copy, X, CheckIcon, XIcon } from "lucide-react"; import { useState, useEffect, useRef, useMemo } from "react"; import { ModeToggle } from "@/components/ui/mode-toggle"; import { doesBrowserSupportWebLLM, webLLM, WebLLMUIMessage, } from "@browser-ai/web-llm"; import { DefaultChatTransport, lastAssistantMessageIsCompleteWithApprovalResponses, lastAssistantMessageIsCompleteWithToolCalls, UIMessage, } from "ai"; import { toast } from "sonner"; import Image from "next/image"; import { Progress } from "@/components/ui/progress"; import { AudioFileDisplay } from "@/components/audio-file-display"; import { WebLLMChatTransport } from "@/app/web-llm/util/web-llm-chat-transport"; import { ModelSelector } from "@/components/model-selector"; import { Tool, ToolContent, ToolHeader, ToolInput, ToolOutput, } from "@/components/ai-elements/tool"; import { Confirmation, ConfirmationTitle, ConfirmationRequest, ConfirmationAccepted, ConfirmationRejected, ConfirmationActions, ConfirmationAction, } from "@/components/ai-elements/confirmation"; const MODELS = [ "Qwen3-0.6B-q4f16_1-MLC", "Qwen3-4B-q4f16_1-MLC", "gemma-2-2b-it-q4f16_1-MLC", "DeepSeek-R1-Distill-Qwen-7B-q4f16_1-MLC", ]; function WebLLMChat({ browserSupportsWebLLM, modelId, setModelId, }: { browserSupportsWebLLM: boolean; modelId: string; setModelId: (modelId: string) => void; }) { const [input, setInput] = useState(""); const [files, setFiles] = useState(undefined); const fileInputRef = useRef(null); const chatTransport = useMemo(() => { if (browserSupportsWebLLM) { console.log("here"); const model = webLLM(modelId, { worker: new Worker(new URL("./util/worker.ts", import.meta.url), { type: "module", }), }); return new WebLLMChatTransport(model); // Client side chat transport } return new DefaultChatTransport({ // server side (api route) api: "/api/chat", }); }, [modelId, browserSupportsWebLLM]); const { error, status, sendMessage, messages, regenerate, stop, addToolApprovalResponse, } = useChat({ transport: chatTransport, // use custom transport sendAutomaticallyWhen: lastAssistantMessageIsCompleteWithApprovalResponses, onError(error) { toast.error(error.message); }, }); const handleSubmit = (e: React.FormEvent) => { e.preventDefault(); if ((input.trim() || files) && status === "ready") { sendMessage({ text: input, files, }); setInput(""); setFiles(undefined); if (fileInputRef.current) { fileInputRef.current.value = ""; } } }; const handleFileChange = (e: React.ChangeEvent) => { if (e.target.files) { setFiles(e.target.files); } }; const removeFile = (indexToRemove: number) => { if (files) { const dt = new DataTransfer(); Array.from(files).forEach((file, index) => { if (index !== indexToRemove) { dt.items.add(file); } }); setFiles(dt.files); if (fileInputRef.current) { fileInputRef.current.files = dt.files; } } }; const copyMessageToClipboard = (message: any) => { const textContent = message.parts .filter((part: any) => part.type === "text") .map((part: any) => part.text) .join("\n"); navigator.clipboard.writeText(textContent); }; return (
{messages.length === 0 && (
{browserSupportsWebLLM ? ( <>

@browser-ai/web-llm demo

Using WebLLM client-side AI model

Your device supports WebGPU

) : ( <>

Using server-side model

Your device doesn't support WebGPU

)}
)} {messages.map((m, index) => ( {/* Render parts in chronological order */} {m.parts.map((part, partIndex) => { // Handle download progress parts if (part.type === "data-modelDownloadProgress") { // Only show if message is not empty (hiding completed/cleared progress) if (!part.data.message) return null; // Don't show the entire div when actively streaming if (status === "ready") return null; return (
{part.data.message}
{part.data.status === "downloading" && part.data.progress !== undefined && ( )}
); } // Handle file parts if (part.type === "file") { if (part.mediaType?.startsWith("image/")) { return (
{part.filename
); } if (part.mediaType?.startsWith("audio/")) { return ( ); } // TODO: Handle other file types return null; } // Handle reasoning if (part.type === "reasoning") { return ( {part.text} ); } // Handle tool parts if (part.type.startsWith("tool-")) { // Type guard to ensure part is a ToolUIPart if (!("state" in part)) return null; // Handle tool states that need confirmation UI const needsConfirmation = part.state === "approval-requested" || part.state === "approval-responded" || part.state === "output-denied"; if (needsConfirmation && "approval" in part) { const toolName = part.type.replace("tool-", ""); return ( {"input" in part && part.input !== undefined && ( )} Allow {toolName} to execute with these parameters? Accepted Rejected addToolApprovalResponse({ id: part.approval!.id, approved: false, reason: "User denied tool execution", }) } variant="outline" > Reject addToolApprovalResponse({ id: part.approval!.id, approved: true, }) } variant="default" > Accept ); } // Map state values to the expected type const toolState = part.state === "streaming" || part.state === "done" ? "output-available" : part.state || "input-streaming"; // Format output as ReactNode const formatOutput = (output: unknown): React.ReactNode => { if (output === undefined || output === null) return undefined; if (typeof output === "string") return output; return (
                          {JSON.stringify(output, null, 2)}
                        
); }; return ( {"input" in part && part.input !== undefined && ( )} {("output" in part || "errorText" in part) && ( )} ); } // Handle text parts if (part.type === "text") { return {part.text}; } return null; })} {/* Loading state when tool approval was sent and we're waiting for response */} {(m.role === "assistant" || m.role === "system") && index === messages.length - 1 && status === "submitted" && m.parts.some( (part) => part.type.startsWith("tool-") && "state" in part && part.state === "approval-responded", ) && (
Thinking...
)} {/* Action buttons for assistant messages */} {(m.role === "assistant" || m.role === "system") && index === messages.length - 1 && status === "ready" && (
)}
))} {/* Loading state - only show as separate message if not after tool approval */} {status === "submitted" && !messages.some( (m, index) => index === messages.length - 1 && (m.role === "assistant" || m.role === "system") && m.parts.some( (part) => part.type.startsWith("tool-") && "state" in part && part.state === "approval-responded", ), ) && (
Thinking...
)} {/* Error state */} {error && (
An error occurred.
)}
setInput(e.target.value)} placeholder="What would you like to know? (Powered by WebLLM Worker)" minHeight={48} maxHeight={164} className="bg-accent dark:bg-card" /> fileInputRef.current?.click()}> {MODELS.map((model) => ( {model} ))} {/* File preview area - moved inside the form */} {files && files.length > 0 && (
{Array.from(files).map((file, index) => (
{file.type.startsWith("image/") ? (
{file.name}
) : file.type.startsWith("audio/") ? (
{file.name}
) : (
{file.name}
)}
))}
)}
); } export default function WebLLMChatPage() { const [browserSupportsWebLLM, setBrowserSupportsWebLLM] = useState< boolean | null >(null); const [modelId, setModelId] = useState(MODELS[0]); useEffect(() => { setBrowserSupportsWebLLM(doesBrowserSupportWebLLM()); }, []); if (browserSupportsWebLLM === null) { return (
); } return ( ); }