'use client'; import { useRef, useState, useEffect } from 'react'; import { useStore } from '@/lib/store'; import { api } from '@/lib/api'; import type { Message } from '@/lib/api'; export default function MessageInput() { const { currentId, setCurrentId, addMessage, setIsLoading, isLoading, setConversations } = useStore(); const [text, setText] = useState(''); const [recording, setRecording] = useState(false); const [transcribing, setTranscribing] = useState(false); const textareaRef = useRef(null); const mediaRecorderRef = useRef(null); const chunksRef = useRef([]); const streamRef = useRef(null); useEffect(() => { textareaRef.current?.focus(); }, [currentId]); function autoResize() { const el = textareaRef.current; if (!el) return; el.style.height = 'auto'; el.style.height = Math.min(el.scrollHeight, 160) + 'px'; } async function send() { const message = text.trim(); if (!message || isLoading) return; setText(''); if (textareaRef.current) textareaRef.current.style.height = 'auto'; let convId = currentId; if (!convId) { const conv = await api.newConversation(); convId = conv.id; setCurrentId(convId); } const userMsg: Message = { role: 'user', content: message, sources: [], timestamp: new Date().toISOString(), }; addMessage(userMsg); setIsLoading(true); try { const data = await api.sendMessage(message, convId); setCurrentId(data.conversation_id); addMessage({ role: 'assistant', content: data.response, sources: data.sources || [], timestamp: new Date().toISOString(), }); const updated = await api.getConversations(); setConversations(updated); } catch { addMessage({ role: 'assistant', content: 'Error — please try again.', sources: [], timestamp: new Date().toISOString(), }); } finally { setIsLoading(false); textareaRef.current?.focus(); } } async function toggleRecording() { if (recording) { // Stop recording mediaRecorderRef.current?.stop(); setRecording(false); } else { // Start recording try { const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); streamRef.current = stream; // Pick supported mime type const mimeType = MediaRecorder.isTypeSupported('audio/webm') ? 'audio/webm' : MediaRecorder.isTypeSupported('audio/mp4') ? 'audio/mp4' : 'audio/ogg'; const mr = new MediaRecorder(stream, { mimeType }); chunksRef.current = []; mr.ondataavailable = e => { if (e.data.size > 0) chunksRef.current.push(e.data); }; mr.onstop = async () => { streamRef.current?.getTracks().forEach(t => t.stop()); if (chunksRef.current.length === 0) return; setTranscribing(true); try { const blob = new Blob(chunksRef.current, { type: mimeType }); const { text: transcript } = await api.transcribe(blob); if (transcript.trim()) { setText(prev => prev ? prev + ' ' + transcript.trim() : transcript.trim()); setTimeout(() => autoResize(), 0); } } catch (e) { console.error('Transcription failed', e); } finally { setTranscribing(false); } }; mr.start(1000); // collect data every second mediaRecorderRef.current = mr; setRecording(true); } catch { alert('Microphone access denied'); } } } function handleKeyDown(e: React.KeyboardEvent) { if (e.key === 'Enter' && !e.shiftKey) { e.preventDefault(); send(); } } const micColor = recording ? '#e8f5ed' : transcribing ? '#e8f5ed' : 'var(--text3)'; const micBg = recording ? '#a32d2d' : transcribing ? 'var(--accent)' : 'var(--bg3)'; return (
{/* Mic button — tap to start, tap to stop */} {/* Text input */}