// Stage 2: Upload voice note (mic + file) and Stage 3: Transcribe loading // Rev 2: captures real File/Blob and forwards it to the parent for /api/jobs. const UploadStage = ({ formKind, onBack, onTranscribe }) => { const [mode, setMode] = React.useState(null); // null | "record" | "file" const [recording, setRecording] = React.useState(false); const [paused, setPaused] = React.useState(false); const [elapsed, setElapsed] = React.useState(0); const [recordedBlob, setRecordedBlob] = React.useState(null); const [recordedMime, setRecordedMime] = React.useState(null); const [file, setFile] = React.useState(null); const [dragOver, setDragOver] = React.useState(false); const [recError, setRecError] = React.useState(null); const mediaRecorderRef = React.useRef(null); const chunksRef = React.useRef([]); const fileInputRef = React.useRef(null); // elapsed tick React.useEffect(() => { if (!recording || paused) return; const t = setInterval(() => setElapsed(e => e + 0.1), 100); return () => clearInterval(t); }, [recording, paused]); const startRecording = async () => { setRecError(null); setRecordedBlob(null); setElapsed(0); try { const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); const mime = MediaRecorder.isTypeSupported("audio/webm;codecs=opus") ? "audio/webm;codecs=opus" : (MediaRecorder.isTypeSupported("audio/webm") ? "audio/webm" : ""); const rec = mime ? new MediaRecorder(stream, { mimeType: mime }) : new MediaRecorder(stream); chunksRef.current = []; rec.ondataavailable = (ev) => { if (ev.data && ev.data.size) chunksRef.current.push(ev.data); }; rec.onstop = () => { const blob = new Blob(chunksRef.current, { type: rec.mimeType || "audio/webm" }); setRecordedBlob(blob); setRecordedMime(rec.mimeType || "audio/webm"); stream.getTracks().forEach(t => t.stop()); }; mediaRecorderRef.current = rec; rec.start(); setRecording(true); setPaused(false); setMode("record"); } catch (e) { setRecError(e?.message || "microphone access denied"); } }; const pauseRecording = () => { const rec = mediaRecorderRef.current; if (rec && rec.state === "recording") { rec.pause(); setPaused(true); } }; const resumeRecording = () => { const rec = mediaRecorderRef.current; if (rec && rec.state === "paused") { rec.resume(); setPaused(false); } }; const stopRecording = () => { if (mediaRecorderRef.current && mediaRecorderRef.current.state !== "inactive") { mediaRecorderRef.current.stop(); } setRecording(false); setPaused(false); }; const discardRecording = () => { stopRecording(); setRecordedBlob(null); setRecordedMime(null); setElapsed(0); setMode(null); }; const pickFile = (f) => { if (!f) return; const okExt = /\.(wav|mp3|webm|m4a)$/i.test(f.name); if (!okExt) { setRecError("Only .wav .mp3 .webm .m4a accepted"); return; } if (f.size > 50 * 1024 * 1024) { setRecError("File too large (max 50 MB)"); return; } setRecError(null); setFile(f); }; const onFilePicked = (e) => pickFile(e.target.files && e.target.files[0]); const onDrop = (e) => { e.preventDefault(); setDragOver(false); pickFile(e.dataTransfer.files && e.dataTransfer.files[0]); }; const handleTranscribe = () => { if (mode === "record" && recordedBlob) { const ext = (recordedMime && recordedMime.includes("webm")) ? ".webm" : ".webm"; const name = `recording-${Date.now()}${ext}`; onTranscribe({ blob: recordedBlob, filename: name, durationSec: elapsed, source: "recorded" }); return; } if (mode === "file" && file) { onTranscribe({ blob: file, filename: file.name, durationSec: null, source: "uploaded" }); return; } }; const fmtTime = (s) => { const m = Math.floor(s / 60), sec = Math.floor(s % 60); return `${String(m).padStart(2,"0")}:${String(sec).padStart(2,"0")}`; }; return (
Dictate the applicant's details in English, Hindi, or Hinglish — we'll transcribe and extract.
{phase === "transcribe" ? "Separating speech from silence and converting audio to text." : "Reading the transcript and mapping it to the " + (formKind === "sep" ? "SEP" : "Salaried") + " loan schema."}