Files
aaronai-web/components/MessageInput.tsx
T

229 lines
7.3 KiB
TypeScript

'use client';
import { useRef, useState, useEffect } from 'react';
import { useStore } from '@/lib/store';
import { api } from '@/lib/api';
import type { Message } from '@/lib/api';
export default function MessageInput() {
const { currentId, setCurrentId, addMessage, setIsLoading, isLoading, setConversations } = useStore();
const [text, setText] = useState('');
const [recording, setRecording] = useState(false);
const [transcribing, setTranscribing] = useState(false);
const textareaRef = useRef<HTMLTextAreaElement>(null);
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
const chunksRef = useRef<Blob[]>([]);
const streamRef = useRef<MediaStream | null>(null);
useEffect(() => {
textareaRef.current?.focus();
}, [currentId]);
function autoResize() {
const el = textareaRef.current;
if (!el) return;
el.style.height = 'auto';
el.style.height = Math.min(el.scrollHeight, 160) + 'px';
}
async function send() {
const message = text.trim();
if (!message || isLoading) return;
setText('');
if (textareaRef.current) textareaRef.current.style.height = 'auto';
let convId = currentId;
if (!convId) {
const conv = await api.newConversation();
convId = conv.id;
setCurrentId(convId);
}
const userMsg: Message = {
role: 'user',
content: message,
sources: [],
timestamp: new Date().toISOString(),
};
addMessage(userMsg);
setIsLoading(true);
try {
const data = await api.sendMessage(message, convId);
setCurrentId(data.conversation_id);
addMessage({
role: 'assistant',
content: data.response,
sources: data.sources || [],
timestamp: new Date().toISOString(),
});
const updated = await api.getConversations();
setConversations(updated);
} catch {
addMessage({
role: 'assistant',
content: 'Error — please try again.',
sources: [],
timestamp: new Date().toISOString(),
});
} finally {
setIsLoading(false);
textareaRef.current?.focus();
}
}
async function toggleRecording() {
if (recording) {
// Stop recording
mediaRecorderRef.current?.stop();
setRecording(false);
} else {
// Start recording
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
streamRef.current = stream;
// Pick supported mime type
const mimeType = MediaRecorder.isTypeSupported('audio/webm')
? 'audio/webm'
: MediaRecorder.isTypeSupported('audio/mp4')
? 'audio/mp4'
: 'audio/ogg';
const mr = new MediaRecorder(stream, { mimeType });
chunksRef.current = [];
mr.ondataavailable = e => {
if (e.data.size > 0) chunksRef.current.push(e.data);
};
mr.onstop = async () => {
streamRef.current?.getTracks().forEach(t => t.stop());
if (chunksRef.current.length === 0) return;
setTranscribing(true);
try {
const blob = new Blob(chunksRef.current, { type: mimeType });
const { text: transcript } = await api.transcribe(blob);
if (transcript.trim()) {
setText(prev => prev ? prev + ' ' + transcript.trim() : transcript.trim());
setTimeout(() => autoResize(), 0);
}
} catch (e) {
console.error('Transcription failed', e);
} finally {
setTranscribing(false);
}
};
mr.start(1000); // collect data every second
mediaRecorderRef.current = mr;
setRecording(true);
} catch {
alert('Microphone access denied');
}
}
}
function handleKeyDown(e: React.KeyboardEvent) {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
send();
}
}
const micColor = recording ? '#e8f5ed' : transcribing ? '#e8f5ed' : 'var(--text3)';
const micBg = recording ? '#a32d2d' : transcribing ? 'var(--accent)' : 'var(--bg3)';
return (
<div
className="flex gap-2 items-end flex-shrink-0"
style={{
borderTop: '1px solid var(--border)',
padding: '12px 16px',
paddingBottom: 'max(12px, env(safe-area-inset-bottom))',
paddingLeft: 'max(16px, env(safe-area-inset-left))',
paddingRight: 'max(16px, env(safe-area-inset-right))',
}}
>
{/* Mic button — tap to start, tap to stop */}
<button
onPointerUp={toggleRecording}
className="flex-shrink-0 rounded-lg flex items-center justify-center transition-all"
style={{
width: '44px',
height: '44px',
background: micBg,
border: 'none',
cursor: 'pointer',
color: micColor,
touchAction: 'manipulation',
flexShrink: 0,
}}
aria-label={recording ? 'Stop recording' : transcribing ? 'Transcribing...' : 'Start recording'}
title={recording ? 'Tap to stop' : 'Tap to record'}
>
{transcribing ? (
<svg width="18" height="18" viewBox="0 0 24 24" fill="currentColor">
<circle cx="12" cy="12" r="3" opacity="0.6">
<animate attributeName="opacity" values="0.6;1;0.6" dur="1s" repeatCount="indefinite"/>
</circle>
</svg>
) : recording ? (
// Square stop icon when recording
<svg width="14" height="14" viewBox="0 0 14 14" fill="currentColor">
<rect width="14" height="14" rx="2"/>
</svg>
) : (
// Mic icon when idle
<svg width="18" height="18" viewBox="0 0 24 24" fill="currentColor">
<path d="M12 1a4 4 0 0 1 4 4v6a4 4 0 0 1-8 0V5a4 4 0 0 1 4-4zm0 2a2 2 0 0 0-2 2v6a2 2 0 0 0 4 0V5a2 2 0 0 0-2-2zm-7 8a7 7 0 0 0 14 0h2a9 9 0 0 1-8 8.94V22h-2v-2.06A9 9 0 0 1 3 11h2z"/>
</svg>
)}
</button>
{/* Text input */}
<div
className="flex-1 rounded-xl min-w-0 overflow-hidden"
style={{ background: 'var(--bg2)', border: '1px solid var(--border2)' }}
>
<textarea
ref={textareaRef}
value={text}
onChange={e => { setText(e.target.value); autoResize(); }}
onKeyDown={handleKeyDown}
placeholder={recording ? 'Recording... tap mic to stop' : transcribing ? 'Transcribing...' : 'Ask anything...'}
rows={1}
className="w-full block resize-none outline-none bg-transparent px-3 py-3 leading-relaxed min-w-0"
style={{
fontSize: 'var(--font-size)',
color: 'var(--text)',
minHeight: '44px',
maxHeight: '160px',
fontFamily: 'var(--font-sans)',
}}
/>
</div>
{/* Send button */}
<button
onPointerUp={send}
disabled={isLoading || !text.trim()}
className="flex-shrink-0 rounded-lg px-4 text-sm font-medium transition-opacity"
style={{
background: 'var(--accent)',
color: '#e8f5ed',
border: 'none',
cursor: isLoading || !text.trim() ? 'not-allowed' : 'pointer',
opacity: isLoading || !text.trim() ? 0.4 : 1,
minHeight: '44px',
fontFamily: 'var(--font-sans)',
touchAction: 'manipulation',
}}
>
Send
</button>
</div>
);
}