feat: template

This commit is contained in:
2026-01-29 11:41:03 -08:00
parent e6c19b589e
commit a6843cb3f1
3 changed files with 63 additions and 29 deletions
+27 -2
View File
@@ -316,15 +316,40 @@ fn start_recording(
Ok("Recording started".to_string()) Ok("Recording started".to_string())
} }
/// Stop recording and return the transcript. /// Stop recording (for real-time mode, transcription already happened during recording).
#[tauri::command] #[tauri::command]
async fn stop_recording( async fn stop_recording(
state: State<'_, AppState>, state: State<'_, AppState>,
app_handle: tauri::AppHandle, app_handle: tauri::AppHandle,
) -> Result<String, String> {
let logs = Arc::clone(&state.logs);
emit_log(&app_handle, &logs, "[Audio] Stopping recording...");
// Stop the recording and get the duration
let duration = {
let mut audio_guard = state.audio_capture.lock();
if let Some(ref mut capture) = *audio_guard {
let samples = capture.stop_recording();
samples.len() as f32 / 16000.0
} else {
return Err("No active recording".to_string());
}
};
emit_log(&app_handle, &logs, &format!("[Audio] Recording stopped. Total duration: {:.1}s", duration));
Ok("Recording stopped".to_string())
}
/// Stop recording and transcribe all at once (batch mode).
#[tauri::command]
async fn stop_recording_batch(
state: State<'_, AppState>,
app_handle: tauri::AppHandle,
) -> Result<Vec<TranscriptSegment>, String> { ) -> Result<Vec<TranscriptSegment>, String> {
let logs = Arc::clone(&state.logs); let logs = Arc::clone(&state.logs);
emit_log(&app_handle, &logs, "[Audio] Stopping recording..."); emit_log(&app_handle, &logs, "[Audio] Stopping recording (batch mode)...");
// Get the audio samples // Get the audio samples
let audio_samples = { let audio_samples = {
+10 -8
View File
@@ -78,15 +78,17 @@ impl LlamaSummarizer {
let prompt = format!( let prompt = format!(
"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n\ "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n\
You are a helpful assistant that creates concise meeting summaries. Focus on:\n\ You are a helpful assistant that creates structured meeting summaries. \
- Key decisions made\n\ Format your response using the following template:\n\n\
- Action items and who owns them\n\ **Summary:** A high level overview of the meeting.\n\n\
- Important discussions and their outcomes\n\ **Key decisions:** Any important resolutions that the meeting reached.\n\n\
- Next steps\n\n\ **Action Items:**\n\
Keep the summary structured and easy to scan.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n\ - list of things that must be addressed\n\n\
**Discussion points:**\n\
- a list of each topic/argument/counterpoint brought up in the meeting.\n\n\
Be concise but comprehensive. Focus on capturing all important information.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n\
Please summarize this meeting transcript:\n\n\ Please summarize this meeting transcript:\n\n\
{}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n\ {}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
Meeting Summary:\n",
transcript transcript
); );
+26 -19
View File
@@ -165,7 +165,7 @@ function App() {
const stopRecording = async () => { const stopRecording = async () => {
try { try {
// Stop the timers // Stop the timers first
if (recordingTimer.current) { if (recordingTimer.current) {
clearInterval(recordingTimer.current); clearInterval(recordingTimer.current);
recordingTimer.current = null; recordingTimer.current = null;
@@ -178,36 +178,43 @@ function App() {
setAppState("transcribing"); setAppState("transcribing");
setStatusMessage("Processing final audio..."); setStatusMessage("Processing final audio...");
// Process any remaining audio // First, process any audio that hasn't been processed yet
const finalChunk = await invoke<number[]>("get_remaining_audio", { try {
lastOffset: audioOffset.current const finalChunk = await invoke<number[]>("get_remaining_audio", {
}); lastOffset: audioOffset.current
if (finalChunk.length > 0) {
const finalSegments = await invoke<TranscriptSegment[]>("transcribe_chunk", {
audioData: finalChunk
}); });
if (finalSegments.length > 0) { if (finalChunk.length > 0) {
const baseTime = totalProcessedSamples.current / 16000; console.log(`Processing final chunk of ${finalChunk.length} samples`);
const adjustedSegments = finalSegments.map(seg => ({ const finalSegments = await invoke<TranscriptSegment[]>("transcribe_chunk", {
...seg, audioData: finalChunk
start: seg.start + baseTime, });
end: seg.end + baseTime,
}));
setTranscriptSegments(prev => [...prev, ...adjustedSegments]); if (finalSegments.length > 0) {
const baseTime = totalProcessedSamples.current / 16000;
const adjustedSegments = finalSegments.map(seg => ({
...seg,
start: seg.start + baseTime,
end: seg.end + baseTime,
}));
setTranscriptSegments(prev => [...prev, ...adjustedSegments]);
}
} }
} catch (chunkError) {
console.error("Error processing final chunk:", chunkError);
// Continue with stopping even if final chunk fails
} }
// Stop the recording // Now stop the recording
await invoke("stop_recording"); await invoke<string>("stop_recording");
setAppState("ready"); setAppState("ready");
setStatusMessage(""); setStatusMessage("");
} catch (error) { } catch (error) {
console.error("Failed to stop recording:", error); console.error("Failed to stop recording:", error);
setAppState("ready"); setAppState("ready");
setStatusMessage("");
setErrorMessage(String(error)); setErrorMessage(String(error));
} }
}; };