generated from nhcarrigan/template
feat: Meeting transcription app with WhisperX and Llama #1
+27
-2
@@ -316,15 +316,40 @@ fn start_recording(
|
||||
Ok("Recording started".to_string())
|
||||
}
|
||||
|
||||
/// Stop recording and return the transcript.
|
||||
/// Stop recording (for real-time mode, transcription already happened during recording).
|
||||
#[tauri::command]
|
||||
async fn stop_recording(
|
||||
state: State<'_, AppState>,
|
||||
app_handle: tauri::AppHandle,
|
||||
) -> Result<String, String> {
|
||||
let logs = Arc::clone(&state.logs);
|
||||
|
||||
emit_log(&app_handle, &logs, "[Audio] Stopping recording...");
|
||||
|
||||
// Stop the recording and get the duration
|
||||
let duration = {
|
||||
let mut audio_guard = state.audio_capture.lock();
|
||||
if let Some(ref mut capture) = *audio_guard {
|
||||
let samples = capture.stop_recording();
|
||||
samples.len() as f32 / 16000.0
|
||||
} else {
|
||||
return Err("No active recording".to_string());
|
||||
}
|
||||
};
|
||||
|
||||
emit_log(&app_handle, &logs, &format!("[Audio] Recording stopped. Total duration: {:.1}s", duration));
|
||||
Ok("Recording stopped".to_string())
|
||||
}
|
||||
|
||||
/// Stop recording and transcribe all at once (batch mode).
|
||||
#[tauri::command]
|
||||
async fn stop_recording_batch(
|
||||
state: State<'_, AppState>,
|
||||
app_handle: tauri::AppHandle,
|
||||
) -> Result<Vec<TranscriptSegment>, String> {
|
||||
let logs = Arc::clone(&state.logs);
|
||||
|
||||
emit_log(&app_handle, &logs, "[Audio] Stopping recording...");
|
||||
emit_log(&app_handle, &logs, "[Audio] Stopping recording (batch mode)...");
|
||||
|
||||
// Get the audio samples
|
||||
let audio_samples = {
|
||||
|
||||
@@ -78,15 +78,17 @@ impl LlamaSummarizer {
|
||||
|
||||
let prompt = format!(
|
||||
"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n\
|
||||
You are a helpful assistant that creates concise meeting summaries. Focus on:\n\
|
||||
- Key decisions made\n\
|
||||
- Action items and who owns them\n\
|
||||
- Important discussions and their outcomes\n\
|
||||
- Next steps\n\n\
|
||||
Keep the summary structured and easy to scan.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n\
|
||||
You are a helpful assistant that creates structured meeting summaries. \
|
||||
Format your response using the following template:\n\n\
|
||||
**Summary:** A high level overview of the meeting.\n\n\
|
||||
**Key decisions:** Any important resolutions that the meeting reached.\n\n\
|
||||
**Action Items:**\n\
|
||||
- list of things that must be addressed\n\n\
|
||||
**Discussion points:**\n\
|
||||
- a list of each topic/argument/counterpoint brought up in the meeting.\n\n\
|
||||
Be concise but comprehensive. Focus on capturing all important information.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n\
|
||||
Please summarize this meeting transcript:\n\n\
|
||||
{}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n\
|
||||
Meeting Summary:\n",
|
||||
{}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
|
||||
transcript
|
||||
);
|
||||
|
||||
|
||||
+26
-19
@@ -165,7 +165,7 @@ function App() {
|
||||
|
||||
const stopRecording = async () => {
|
||||
try {
|
||||
// Stop the timers
|
||||
// Stop the timers first
|
||||
if (recordingTimer.current) {
|
||||
clearInterval(recordingTimer.current);
|
||||
recordingTimer.current = null;
|
||||
@@ -178,36 +178,43 @@ function App() {
|
||||
setAppState("transcribing");
|
||||
setStatusMessage("Processing final audio...");
|
||||
|
||||
// Process any remaining audio
|
||||
const finalChunk = await invoke<number[]>("get_remaining_audio", {
|
||||
lastOffset: audioOffset.current
|
||||
});
|
||||
|
||||
if (finalChunk.length > 0) {
|
||||
const finalSegments = await invoke<TranscriptSegment[]>("transcribe_chunk", {
|
||||
audioData: finalChunk
|
||||
// First, process any audio that hasn't been processed yet
|
||||
try {
|
||||
const finalChunk = await invoke<number[]>("get_remaining_audio", {
|
||||
lastOffset: audioOffset.current
|
||||
});
|
||||
|
||||
if (finalSegments.length > 0) {
|
||||
const baseTime = totalProcessedSamples.current / 16000;
|
||||
const adjustedSegments = finalSegments.map(seg => ({
|
||||
...seg,
|
||||
start: seg.start + baseTime,
|
||||
end: seg.end + baseTime,
|
||||
}));
|
||||
if (finalChunk.length > 0) {
|
||||
console.log(`Processing final chunk of ${finalChunk.length} samples`);
|
||||
const finalSegments = await invoke<TranscriptSegment[]>("transcribe_chunk", {
|
||||
audioData: finalChunk
|
||||
});
|
||||
|
||||
setTranscriptSegments(prev => [...prev, ...adjustedSegments]);
|
||||
if (finalSegments.length > 0) {
|
||||
const baseTime = totalProcessedSamples.current / 16000;
|
||||
const adjustedSegments = finalSegments.map(seg => ({
|
||||
...seg,
|
||||
start: seg.start + baseTime,
|
||||
end: seg.end + baseTime,
|
||||
}));
|
||||
|
||||
setTranscriptSegments(prev => [...prev, ...adjustedSegments]);
|
||||
}
|
||||
}
|
||||
} catch (chunkError) {
|
||||
console.error("Error processing final chunk:", chunkError);
|
||||
// Continue with stopping even if final chunk fails
|
||||
}
|
||||
|
||||
// Stop the recording
|
||||
await invoke("stop_recording");
|
||||
// Now stop the recording
|
||||
await invoke<string>("stop_recording");
|
||||
|
||||
setAppState("ready");
|
||||
setStatusMessage("");
|
||||
} catch (error) {
|
||||
console.error("Failed to stop recording:", error);
|
||||
setAppState("ready");
|
||||
setStatusMessage("");
|
||||
setErrorMessage(String(error));
|
||||
}
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user