'use client'; import { useRef, useState, useEffect } from 'react'; import { useStore } from '@/lib/store'; import { api } from '@/lib/api'; import type { Message } from '@/lib/api'; export default function MessageInput() { const { currentId, setCurrentId, addMessage, setIsLoading, isLoading, setConversations } = useStore(); const [text, setText] = useState(''); const [recording, setRecording] = useState(false); const textareaRef = useRef(null); const mediaRecorderRef = useRef(null); const chunksRef = useRef([]); useEffect(() => { textareaRef.current?.focus(); }, [currentId]); function autoResize() { const el = textareaRef.current; if (!el) return; el.style.height = 'auto'; el.style.height = Math.min(el.scrollHeight, 160) + 'px'; } async function send() { const message = text.trim(); if (!message || isLoading) return; setText(''); if (textareaRef.current) textareaRef.current.style.height = 'auto'; let convId = currentId; if (!convId) { const conv = await api.newConversation(); convId = conv.id; setCurrentId(convId); } const userMsg: Message = { role: 'user', content: message, sources: [], timestamp: new Date().toISOString(), }; addMessage(userMsg); setIsLoading(true); try { const data = await api.sendMessage(message, convId); setCurrentId(data.conversation_id); const assistantMsg: Message = { role: 'assistant', content: data.response, sources: data.sources || [], timestamp: new Date().toISOString(), }; addMessage(assistantMsg); const updated = await api.getConversations(); setConversations(updated); } catch (e) { addMessage({ role: 'assistant', content: 'Error — please try again.', sources: [], timestamp: new Date().toISOString(), }); } finally { setIsLoading(false); textareaRef.current?.focus(); } } async function startRecording() { try { const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); const mr = new MediaRecorder(stream); chunksRef.current = []; mr.ondataavailable = e => chunksRef.current.push(e.data); mr.onstop = async () => { stream.getTracks().forEach(t => t.stop()); const blob = new Blob(chunksRef.current, { type: 'audio/webm' }); try { const { text: transcript } = await api.transcribe(blob); setText(prev => prev ? prev + ' ' + transcript : transcript); textareaRef.current?.focus(); } catch { console.error('Transcription failed'); } }; mr.start(); mediaRecorderRef.current = mr; setRecording(true); } catch { alert('Microphone access denied'); } } function stopRecording() { mediaRecorderRef.current?.stop(); setRecording(false); } function handleKeyDown(e: React.KeyboardEvent) { if (e.key === 'Enter' && !e.shiftKey) { e.preventDefault(); send(); } } return (
{/* Voice button */} {/* Text input */}