/** * Simple AI SDK Streaming Test * Tests the core streaming functionality that users actually need */ import { ollama } from 'ai-sdk-ollama'; import { streamText, tool } from 'ai'; import { z } from 'zod'; import { MODELS } from './model'; async function testBasicStreaming() { console.log('🌊 Basic Text Streaming Test\n'); const { textStream } = await streamText({ model: ollama(MODELS.LLAMA_3_2), prompt: 'Write a short poem about programming. Keep it under 50 words.', maxOutputTokens: 100, }); console.log('Streaming response:'); console.log('-'.repeat(40)); let fullText = ''; for await (const chunk of textStream) { process.stdout.write(chunk); fullText += chunk; } console.log('\n' + '-'.repeat(40)); console.log(`āœ… Streaming completed! (${fullText.length} characters)`); return fullText.length > 0; } async function testStreamingWithRealTimeDisplay() { console.log('\n\nšŸ“ŗ Real-time Streaming Display\n'); const { textStream } = await streamText({ model: ollama('llama3.2'), prompt: 'Count from 1 to 10 with a word description for each number.', maxOutputTokens: 200, }); console.log('Real-time counting:'); console.log('-'.repeat(40)); let wordCount = 0; for await (const chunk of textStream) { process.stdout.write(chunk); wordCount += chunk.split(' ').length; // Add some visual feedback if (chunk.includes('\n')) { // New line in the stream } } console.log('\n' + '-'.repeat(40)); console.log(`āœ… Real-time streaming completed! (~${wordCount} words)`); return true; } async function testStreamingWithDifferentModels() { console.log('\n\nšŸ¤– Multi-Model Streaming Test\n'); const models = [ { name: MODELS.LLAMA_3_2, prompt: 'What is TypeScript?' }, ]; for (const { name, prompt } of models) { console.log(`\nšŸ”ø Testing ${name}:`); try { const { textStream } = await streamText({ model: ollama(name), prompt, maxOutputTokens: 50, }); let response = ''; for await (const chunk of textStream) { response += chunk; } console.log(` āœ… Response: ${response.trim()}`); } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); console.log(` āŒ Error: ${errorMessage}`); } } return true; } async function testStreamingWithTools() { console.log('\n\nšŸ”§ Streaming + Tool Calls Test\n'); try { // Use the fullStream to capture tool calls const result = await streamText({ model: ollama('llama3.2'), prompt: 'What is the weather in London? Use the weather tool.', tools: { getWeather: tool({ description: 'Get weather for a city', inputSchema: z.object({ city: z.string(), }), execute: async ({ city }) => { return { city, temperature: 15, condition: 'cloudy' }; }, }), }, }); console.log('Streaming response with tools:'); console.log('-'.repeat(40)); let textContent = ''; let toolCallsFound = 0; for await (const part of result.fullStream) { switch (part.type) { case 'text-delta': { process.stdout.write(part.text); textContent += part.text; break; } case 'tool-call': { toolCallsFound++; console.log( `\nšŸ”§ [TOOL CALL] ${part.toolName} with input:`, part.input, ); break; } case 'tool-result': { console.log(`šŸ”§ [TOOL RESULT]`, part.output); break; } case 'finish': { console.log(`\n\nāœ… Stream finished: ${part.finishReason}`); break; } } } console.log('\n' + '-'.repeat(40)); console.log( `šŸ“Š Summary: ${textContent.length} chars text, ${toolCallsFound} tool calls`, ); return true; } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); console.log(`āŒ Tool streaming error: ${errorMessage}`); return false; } } async function testStreamingPerformance() { console.log('\n\n⚔ Streaming Performance Test\n'); const startTime = Date.now(); const { textStream } = await streamText({ model: ollama('llama3.2'), prompt: 'Explain the concept of recursion in programming.', maxOutputTokens: 150, }); console.log('Performance test - explaining recursion:'); console.log('-'.repeat(40)); let chunks = 0; let totalChars = 0; let firstChunkTime = 0; for await (const chunk of textStream) { if (chunks === 0) { firstChunkTime = Date.now() - startTime; } chunks++; totalChars += chunk.length; process.stdout.write(chunk); } const totalTime = Date.now() - startTime; console.log('\n' + '-'.repeat(40)); console.log(`⚔ Performance Results:`); console.log(` Time to first chunk: ${firstChunkTime}ms`); console.log(` Total time: ${totalTime}ms`); console.log(` Chunks received: ${chunks}`); console.log(` Characters streamed: ${totalChars}`); console.log( ` Average chunk size: ${Math.round(totalChars / chunks)} chars`, ); return totalTime < 10_000; // Should complete within 10 seconds } async function runStreamingTests() { console.log('🌊 AI SDK Streaming Tests\n'); console.log('='.repeat(60)); const results = { basic: false, realTime: false, multiModel: false, withTools: false, performance: false, }; try { results.basic = await testBasicStreaming(); results.realTime = await testStreamingWithRealTimeDisplay(); await testStreamingWithDifferentModels(); // Always succeeds partially results.multiModel = true; results.withTools = await testStreamingWithTools(); results.performance = await testStreamingPerformance(); console.log('\n' + '='.repeat(60)); console.log('šŸ“Š Streaming Test Results:'); console.log(` Basic streaming: ${results.basic ? 'āœ…' : 'āŒ'}`); console.log(` Real-time display: ${results.realTime ? 'āœ…' : 'āŒ'}`); console.log(` Multi-model support: ${results.multiModel ? 'āœ…' : 'āŒ'}`); console.log(` Tool integration: ${results.withTools ? 'āœ…' : 'āŒ'}`); console.log(` Performance: ${results.performance ? 'āœ…' : 'āŒ'}`); const passCount = Object.values(results).filter(Boolean).length; console.log(`\nšŸŽÆ Overall: ${passCount}/5 tests passed`); if (passCount >= 4) { console.log('šŸŽ‰ AI SDK streaming integration is working excellently!'); } else if (passCount >= 3) { console.log('āœ… AI SDK streaming integration is working well!'); } else { console.log('āš ļø AI SDK streaming needs attention.'); } } catch (error) { console.error('\nāŒ Test suite failed:', error); } } // Run the tests runStreamingTests().catch((error) => { console.error('Test suite failed:', error); process.exit(1); });