diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 3877194..684d0be 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -316,15 +316,40 @@ fn start_recording( Ok("Recording started".to_string()) } -/// Stop recording and return the transcript. +/// Stop recording (for real-time mode, transcription already happened during recording). #[tauri::command] async fn stop_recording( state: State<'_, AppState>, app_handle: tauri::AppHandle, +) -> Result { + let logs = Arc::clone(&state.logs); + + emit_log(&app_handle, &logs, "[Audio] Stopping recording..."); + + // Stop the recording and get the duration + let duration = { + let mut audio_guard = state.audio_capture.lock(); + if let Some(ref mut capture) = *audio_guard { + let samples = capture.stop_recording(); + samples.len() as f32 / 16000.0 + } else { + return Err("No active recording".to_string()); + } + }; + + emit_log(&app_handle, &logs, &format!("[Audio] Recording stopped. Total duration: {:.1}s", duration)); + Ok("Recording stopped".to_string()) +} + +/// Stop recording and transcribe all at once (batch mode). +#[tauri::command] +async fn stop_recording_batch( + state: State<'_, AppState>, + app_handle: tauri::AppHandle, ) -> Result, String> { let logs = Arc::clone(&state.logs); - emit_log(&app_handle, &logs, "[Audio] Stopping recording..."); + emit_log(&app_handle, &logs, "[Audio] Stopping recording (batch mode)..."); // Get the audio samples let audio_samples = { diff --git a/src-tauri/src/ml/summarizer.rs b/src-tauri/src/ml/summarizer.rs index ffe522e..87f9d1f 100644 --- a/src-tauri/src/ml/summarizer.rs +++ b/src-tauri/src/ml/summarizer.rs @@ -78,15 +78,17 @@ impl LlamaSummarizer { let prompt = format!( "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n\ - You are a helpful assistant that creates concise meeting summaries. Focus on:\n\ - - Key decisions made\n\ - - Action items and who owns them\n\ - - Important discussions and their outcomes\n\ - - Next steps\n\n\ - Keep the summary structured and easy to scan.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n\ + You are a helpful assistant that creates structured meeting summaries. \ + Format your response using the following template:\n\n\ + **Summary:** A high level overview of the meeting.\n\n\ + **Key decisions:** Any important resolutions that the meeting reached.\n\n\ + **Action Items:**\n\ + - list of things that must be addressed\n\n\ + **Discussion points:**\n\ + - a list of each topic/argument/counterpoint brought up in the meeting.\n\n\ + Be concise but comprehensive. Focus on capturing all important information.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n\ Please summarize this meeting transcript:\n\n\ - {}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n\ - Meeting Summary:\n", + {}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", transcript ); diff --git a/src/App.tsx b/src/App.tsx index 7c53bd9..6ad8ec4 100644 --- a/src/App.tsx +++ b/src/App.tsx @@ -165,7 +165,7 @@ function App() { const stopRecording = async () => { try { - // Stop the timers + // Stop the timers first if (recordingTimer.current) { clearInterval(recordingTimer.current); recordingTimer.current = null; @@ -178,36 +178,43 @@ function App() { setAppState("transcribing"); setStatusMessage("Processing final audio..."); - // Process any remaining audio - const finalChunk = await invoke("get_remaining_audio", { - lastOffset: audioOffset.current - }); - - if (finalChunk.length > 0) { - const finalSegments = await invoke("transcribe_chunk", { - audioData: finalChunk + // First, process any audio that hasn't been processed yet + try { + const finalChunk = await invoke("get_remaining_audio", { + lastOffset: audioOffset.current }); - if (finalSegments.length > 0) { - const baseTime = totalProcessedSamples.current / 16000; - const adjustedSegments = finalSegments.map(seg => ({ - ...seg, - start: seg.start + baseTime, - end: seg.end + baseTime, - })); + if (finalChunk.length > 0) { + console.log(`Processing final chunk of ${finalChunk.length} samples`); + const finalSegments = await invoke("transcribe_chunk", { + audioData: finalChunk + }); - setTranscriptSegments(prev => [...prev, ...adjustedSegments]); + if (finalSegments.length > 0) { + const baseTime = totalProcessedSamples.current / 16000; + const adjustedSegments = finalSegments.map(seg => ({ + ...seg, + start: seg.start + baseTime, + end: seg.end + baseTime, + })); + + setTranscriptSegments(prev => [...prev, ...adjustedSegments]); + } } + } catch (chunkError) { + console.error("Error processing final chunk:", chunkError); + // Continue with stopping even if final chunk fails } - // Stop the recording - await invoke("stop_recording"); + // Now stop the recording + await invoke("stop_recording"); setAppState("ready"); setStatusMessage(""); } catch (error) { console.error("Failed to stop recording:", error); setAppState("ready"); + setStatusMessage(""); setErrorMessage(String(error)); } };