feat: display progress for finalising audio and summarising

This commit is contained in:
2026-01-29 12:34:25 -08:00
parent a6843cb3f1
commit 43a544a886
7 changed files with 540 additions and 63 deletions
+18 -3
View File
@@ -371,6 +371,7 @@ async fn stop_recording_batch(
// Transcribe the audio // Transcribe the audio
emit_log(&app_handle, &logs, "[Transcribe] Starting transcription..."); emit_log(&app_handle, &logs, "[Transcribe] Starting transcription...");
let app_handle_clone = app_handle.clone();
let mut segments = { let mut segments = {
let transcriber = state.transcriber.lock(); let transcriber = state.transcriber.lock();
if !transcriber.is_loaded() { if !transcriber.is_loaded() {
@@ -378,7 +379,10 @@ async fn stop_recording_batch(
return Err("Whisper model not loaded. Please ensure the model is downloaded.".to_string()); return Err("Whisper model not loaded. Please ensure the model is downloaded.".to_string());
} }
transcriber.transcribe(&audio_samples) transcriber.transcribe_with_progress(&audio_samples, move |progress| {
// Emit progress event to frontend
let _ = app_handle_clone.emit("transcription-progress", progress);
})
.map_err(|e| format!("Transcription failed: {}", e))? .map_err(|e| format!("Transcription failed: {}", e))?
}; };
@@ -399,6 +403,7 @@ async fn stop_recording_batch(
async fn transcribe_chunk( async fn transcribe_chunk(
state: State<'_, AppState>, state: State<'_, AppState>,
audio_data: Vec<f32>, audio_data: Vec<f32>,
app_handle: tauri::AppHandle,
) -> Result<Vec<TranscriptSegment>, String> { ) -> Result<Vec<TranscriptSegment>, String> {
let transcriber = state.transcriber.lock(); let transcriber = state.transcriber.lock();
@@ -406,7 +411,13 @@ async fn transcribe_chunk(
return Err("Whisper model not loaded".to_string()); return Err("Whisper model not loaded".to_string());
} }
let segments = transcriber.transcribe(&audio_data) // Clone the app handle for the closure
let app_handle_clone = app_handle.clone();
let segments = transcriber.transcribe_with_progress(&audio_data, move |progress| {
// Emit progress event to frontend
let _ = app_handle_clone.emit("transcription-progress", progress);
})
.map_err(|e| format!("Transcription failed: {}", e))?; .map_err(|e| format!("Transcription failed: {}", e))?;
Ok(segments) Ok(segments)
@@ -460,7 +471,11 @@ async fn summarize(
return Err("LLaMA model not loaded".to_string()); return Err("LLaMA model not loaded".to_string());
} }
let summary = summarizer.summarize(&transcript) let app_handle_clone = app_handle.clone();
let summary = summarizer.summarize_with_progress(&transcript, move |progress| {
// Emit progress event to frontend
let _ = app_handle_clone.emit("summary-progress", progress);
})
.map_err(|e| format!("Summarization failed: {}", e))?; .map_err(|e| format!("Summarization failed: {}", e))?;
emit_log(&app_handle, &logs, &format!("[Summary] Generated {} character summary", summary.len())); emit_log(&app_handle, &logs, &format!("[Summary] Generated {} character summary", summary.len()));
+28 -1
View File
@@ -71,6 +71,14 @@ impl LlamaSummarizer {
/// Generate a meeting summary from the transcript. /// Generate a meeting summary from the transcript.
pub fn summarize(&self, transcript: &str) -> Result<String, SummarizerError> { pub fn summarize(&self, transcript: &str) -> Result<String, SummarizerError> {
self.summarize_with_progress(transcript, |_| {})
}
/// Generate a meeting summary with progress callback.
pub fn summarize_with_progress<F>(&self, transcript: &str, mut progress_fn: F) -> Result<String, SummarizerError>
where
F: FnMut(f64),
{
let model = self let model = self
.model .model
.as_ref() .as_ref()
@@ -94,6 +102,9 @@ impl LlamaSummarizer {
debug!("Generating summary for {} character transcript", transcript.len()); debug!("Generating summary for {} character transcript", transcript.len());
// Report initial progress
progress_fn(0.0);
// Create context for inference // Create context for inference
let ctx_params = LlamaContextParams::default() let ctx_params = LlamaContextParams::default()
.with_n_ctx(NonZeroU32::new(8192)); .with_n_ctx(NonZeroU32::new(8192));
@@ -102,6 +113,9 @@ impl LlamaSummarizer {
.new_context(self.backend.as_ref().unwrap(), ctx_params) .new_context(self.backend.as_ref().unwrap(), ctx_params)
.map_err(|e| SummarizerError::ContextError(e.to_string()))?; .map_err(|e| SummarizerError::ContextError(e.to_string()))?;
// Report progress after context creation
progress_fn(0.1);
// Tokenize the prompt // Tokenize the prompt
let tokens = model let tokens = model
.str_to_token(&prompt, llama_cpp_2::model::AddBos::Always) .str_to_token(&prompt, llama_cpp_2::model::AddBos::Always)
@@ -109,6 +123,9 @@ impl LlamaSummarizer {
debug!("Prompt tokenized to {} tokens", tokens.len()); debug!("Prompt tokenized to {} tokens", tokens.len());
// Report progress after tokenization
progress_fn(0.2);
// Create batch and add tokens // Create batch and add tokens
let mut batch = LlamaBatch::new(tokens.len(), 1); let mut batch = LlamaBatch::new(tokens.len(), 1);
@@ -122,6 +139,9 @@ impl LlamaSummarizer {
ctx.decode(&mut batch) ctx.decode(&mut batch)
.map_err(|e| SummarizerError::SummarizationError(e.to_string()))?; .map_err(|e| SummarizerError::SummarizationError(e.to_string()))?;
// Report progress after prompt decode
progress_fn(0.3);
// Generate response tokens // Generate response tokens
let mut output_tokens = Vec::new(); let mut output_tokens = Vec::new();
let max_tokens = 1024; let max_tokens = 1024;
@@ -134,7 +154,7 @@ impl LlamaSummarizer {
LlamaSampler::dist(42), LlamaSampler::dist(42),
]); ]);
for _ in 0..max_tokens { for i in 0..max_tokens {
// Sample the next token using the sampler // Sample the next token using the sampler
// The sampler.sample() takes context and the index of the last token in the batch // The sampler.sample() takes context and the index of the last token in the batch
let token = sampler.sample(&ctx, (batch.n_tokens() - 1) as i32); let token = sampler.sample(&ctx, (batch.n_tokens() - 1) as i32);
@@ -146,6 +166,10 @@ impl LlamaSummarizer {
output_tokens.push(token); output_tokens.push(token);
// Report progress during generation (from 30% to 90%)
let generation_progress = i as f64 / max_tokens as f64;
progress_fn(0.3 + (generation_progress * 0.6));
// Prepare next batch // Prepare next batch
batch.clear(); batch.clear();
batch.add(token, n_cur as i32, &[0], true) batch.add(token, n_cur as i32, &[0], true)
@@ -164,6 +188,9 @@ impl LlamaSummarizer {
.filter_map(|t| model.token_to_str(*t, llama_cpp_2::model::Special::Tokenize).ok()) .filter_map(|t| model.token_to_str(*t, llama_cpp_2::model::Special::Tokenize).ok())
.collect::<String>(); .collect::<String>();
// Report completion
progress_fn(1.0);
info!("Summary generated: {} characters", output_text.len()); info!("Summary generated: {} characters", output_text.len());
Ok(output_text.trim().to_string()) Ok(output_text.trim().to_string())
} }
+20
View File
@@ -65,6 +65,14 @@ impl WhisperTranscriber {
/// Transcribe audio samples (expected to be 16kHz mono f32). /// Transcribe audio samples (expected to be 16kHz mono f32).
pub fn transcribe(&self, audio: &[f32]) -> Result<Vec<TranscriptSegment>, TranscriberError> { pub fn transcribe(&self, audio: &[f32]) -> Result<Vec<TranscriptSegment>, TranscriberError> {
self.transcribe_with_progress(audio, |_| {})
}
/// Transcribe audio samples with progress callback.
pub fn transcribe_with_progress<F>(&self, audio: &[f32], mut progress_fn: F) -> Result<Vec<TranscriptSegment>, TranscriberError>
where
F: FnMut(f64),
{
let context = self let context = self
.context .context
.as_ref() .as_ref()
@@ -74,11 +82,17 @@ impl WhisperTranscriber {
audio.len(), audio.len(),
audio.len() as f64 / 16000.0); audio.len() as f64 / 16000.0);
// Report initial progress
progress_fn(0.0);
// Create a state for this transcription // Create a state for this transcription
let mut state = context let mut state = context
.create_state() .create_state()
.map_err(|e| TranscriberError::TranscriptionError(e.to_string()))?; .map_err(|e| TranscriberError::TranscriptionError(e.to_string()))?;
// Report progress after state creation
progress_fn(0.2);
// Set up transcription parameters // Set up transcription parameters
let mut params = FullParams::new(SamplingStrategy::Greedy { best_of: 1 }); let mut params = FullParams::new(SamplingStrategy::Greedy { best_of: 1 });
@@ -96,6 +110,9 @@ impl WhisperTranscriber {
.full(params, audio) .full(params, audio)
.map_err(|e| TranscriberError::TranscriptionError(e.to_string()))?; .map_err(|e| TranscriberError::TranscriptionError(e.to_string()))?;
// Report progress after transcription
progress_fn(0.8);
// Extract segments // Extract segments
let num_segments = state.full_n_segments() let num_segments = state.full_n_segments()
.map_err(|e| TranscriberError::TranscriptionError(e.to_string()))?; .map_err(|e| TranscriberError::TranscriptionError(e.to_string()))?;
@@ -125,6 +142,9 @@ impl WhisperTranscriber {
}); });
} }
// Report completion
progress_fn(1.0);
info!("Transcription complete: {} segments", segments.len()); info!("Transcription complete: {} segments", segments.len());
Ok(segments) Ok(segments)
} }
+210
View File
@@ -585,4 +585,214 @@ body {
background-color: #451a03; background-color: #451a03;
color: #fbbf24; color: #fbbf24;
} }
.recording-item {
background-color: var(--surface-color);
border-color: var(--border-color);
}
.recording-item:hover {
background-color: var(--bg-color);
}
}
/* New layout styles */
.app-layout {
display: flex;
flex: 1;
gap: 2rem;
min-height: 0;
}
.recordings-list {
width: 300px;
background-color: var(--surface-color);
border-radius: 0.5rem;
padding: 1.5rem;
overflow-y: auto;
}
.recordings-list h3 {
margin-bottom: 1rem;
font-size: 1.125rem;
}
.no-recordings {
color: var(--text-secondary);
font-style: italic;
}
.recordings-items {
list-style: none;
display: flex;
flex-direction: column;
gap: 0.5rem;
}
.recording-item {
padding: 0.75rem;
border-radius: 0.375rem;
cursor: pointer;
transition: all 0.2s;
background-color: white;
border: 1px solid var(--border-color);
}
.recording-item:hover {
background-color: var(--surface-color);
}
.recording-item.selected {
background-color: var(--primary-color);
color: white;
}
.recording-item.selected .recording-time {
color: white;
}
.recording-item.selected .recording-status {
color: rgba(255, 255, 255, 0.8);
}
.recording-time {
font-weight: 500;
font-size: 0.875rem;
}
.recording-status {
font-size: 0.75rem;
color: var(--text-secondary);
margin-top: 0.25rem;
}
.main-content {
flex: 1;
display: flex;
flex-direction: column;
gap: 2rem;
min-width: 0;
}
.recording-details {
flex: 1;
display: flex;
flex-direction: column;
gap: 1.5rem;
}
.recording-header {
display: flex;
justify-content: space-between;
align-items: center;
flex-wrap: wrap;
gap: 1rem;
}
.recording-header h2 {
font-size: 1.25rem;
margin: 0;
}
.recording-actions {
display: flex;
gap: 0.75rem;
flex-wrap: wrap;
}
.empty-state {
flex: 1;
display: flex;
align-items: center;
justify-content: center;
padding: 4rem 2rem;
text-align: center;
color: var(--text-secondary);
font-size: 1.125rem;
}
/* Summary display improvements */
.summary-text {
white-space: pre-wrap;
line-height: 1.6;
}
.summary-text strong {
display: block;
margin-top: 1rem;
margin-bottom: 0.5rem;
}
/* Progress Bar */
.progress-container {
width: 100%;
max-width: 300px;
margin: 1rem auto;
}
.progress-label {
font-size: 0.875rem;
color: var(--text-secondary);
margin-bottom: 0.5rem;
text-align: center;
}
.progress-bar {
width: 100%;
height: 8px;
background-color: var(--border-color);
border-radius: 4px;
overflow: hidden;
position: relative;
}
.progress-fill {
height: 100%;
background-color: var(--primary-color);
border-radius: 4px;
transition: width 0.3s ease;
position: relative;
overflow: hidden;
}
.progress-fill::after {
content: "";
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
background: linear-gradient(
90deg,
transparent 0%,
rgba(255, 255, 255, 0.2) 50%,
transparent 100%
);
animation: shimmer 1.5s infinite;
}
@keyframes shimmer {
0% {
transform: translateX(-100%);
}
100% {
transform: translateX(100%);
}
}
.progress-text {
font-size: 0.75rem;
color: var(--text-secondary);
text-align: center;
margin-top: 0.25rem;
font-variant-numeric: tabular-nums;
}
/* Transcribing indicator update */
.transcribing-indicator {
display: flex;
flex-direction: column;
align-items: center;
gap: 0.5rem;
padding: 1rem;
} }
+230 -50
View File
@@ -1,9 +1,11 @@
import { useState, useEffect, useRef } from "react"; import { useState, useEffect, useRef } from "react";
import { invoke } from "@tauri-apps/api/core"; import { invoke } from "@tauri-apps/api/core";
import { listen, type UnlistenFn } from "@tauri-apps/api/event";
import "./App.css"; import "./App.css";
import { TranscriptDisplay } from "./components/TranscriptDisplay"; import { TranscriptDisplay } from "./components/TranscriptDisplay";
import { SummaryDisplay } from "./components/SummaryDisplay"; import { SummaryDisplay } from "./components/SummaryDisplay";
import { BackendLogs } from "./components/BackendLogs"; import { BackendLogs } from "./components/BackendLogs";
import { ProgressBar } from "./components/ProgressBar";
interface TranscriptSegment { interface TranscriptSegment {
start: number; start: number;
@@ -12,22 +14,35 @@ interface TranscriptSegment {
speaker: string; speaker: string;
} }
interface Recording {
id: string;
timestamp: Date;
duration: number;
transcriptSegments: TranscriptSegment[];
summary: string | null;
isGeneratingSummary: boolean;
summaryProgress?: number;
}
type AppState = "checking" | "downloading_models" | "initializing" | "ready" | "recording" | "transcribing" | "error"; type AppState = "checking" | "downloading_models" | "initializing" | "ready" | "recording" | "transcribing" | "error";
function App() { function App() {
const [transcriptSegments, setTranscriptSegments] = useState<TranscriptSegment[]>([]); const [recordings, setRecordings] = useState<Recording[]>([]);
const [summary, setSummary] = useState<string | null>(null); const [activeRecording, setActiveRecording] = useState<Recording | null>(null);
const [isGeneratingSummary, setIsGeneratingSummary] = useState(false); const [selectedRecordingId, setSelectedRecordingId] = useState<string | null>(null);
const [appState, setAppState] = useState<AppState>("checking"); const [appState, setAppState] = useState<AppState>("checking");
const [statusMessage, setStatusMessage] = useState("Checking setup..."); const [statusMessage, setStatusMessage] = useState("Checking setup...");
const [showLogs, setShowLogs] = useState(false); const [showLogs, setShowLogs] = useState(false);
const [errorMessage, setErrorMessage] = useState<string | null>(null); const [errorMessage, setErrorMessage] = useState<string | null>(null);
const [recordingDuration, setRecordingDuration] = useState(0); const [recordingDuration, setRecordingDuration] = useState(0);
const [transcriptionProgress, setTranscriptionProgress] = useState(0);
const initStarted = useRef(false); const initStarted = useRef(false);
const recordingTimer = useRef<number | null>(null); const recordingTimer = useRef<number | null>(null);
const transcriptionTimer = useRef<number | null>(null); const transcriptionTimer = useRef<number | null>(null);
const audioOffset = useRef(0); const audioOffset = useRef(0);
const totalProcessedSamples = useRef(0); const totalProcessedSamples = useRef(0);
const transcriptionProgressUnlisten = useRef<UnlistenFn | null>(null);
const summaryProgressUnlisten = useRef<UnlistenFn | null>(null);
useEffect(() => { useEffect(() => {
if (initStarted.current) return; if (initStarted.current) return;
@@ -36,7 +51,7 @@ function App() {
initializeApp(); initializeApp();
}, []); }, []);
// Cleanup timers on unmount // Cleanup timers and listeners on unmount
useEffect(() => { useEffect(() => {
return () => { return () => {
if (recordingTimer.current) { if (recordingTimer.current) {
@@ -45,9 +60,37 @@ function App() {
if (transcriptionTimer.current) { if (transcriptionTimer.current) {
clearInterval(transcriptionTimer.current); clearInterval(transcriptionTimer.current);
} }
if (transcriptionProgressUnlisten.current) {
transcriptionProgressUnlisten.current();
}
if (summaryProgressUnlisten.current) {
summaryProgressUnlisten.current();
}
}; };
}, []); }, []);
// Set up event listeners for progress updates
useEffect(() => {
const setupListeners = async () => {
// Listen for transcription progress
transcriptionProgressUnlisten.current = await listen<number>('transcription-progress', (event) => {
setTranscriptionProgress(event.payload * 100); // Convert 0-1 to 0-100
});
// Listen for summary progress
summaryProgressUnlisten.current = await listen<number>('summary-progress', (event) => {
const recordingId = selectedRecordingId || recordings[0]?.id;
if (recordingId) {
setRecordings(prev => prev.map(r =>
r.id === recordingId ? { ...r, summaryProgress: event.payload * 100 } : r
));
}
});
};
setupListeners();
}, [selectedRecordingId, recordings]);
const initializeApp = async () => { const initializeApp = async () => {
try { try {
setAppState("checking"); setAppState("checking");
@@ -122,7 +165,10 @@ function App() {
end: seg.end + baseTime, end: seg.end + baseTime,
})); }));
setTranscriptSegments(prev => [...prev, ...adjustedSegments]); setActiveRecording(prev => prev ? {
...prev,
transcriptSegments: [...prev.transcriptSegments, ...adjustedSegments]
} : null);
} }
// Track total processed samples // Track total processed samples
@@ -140,11 +186,20 @@ function App() {
try { try {
setAppState("recording"); setAppState("recording");
setRecordingDuration(0); setRecordingDuration(0);
setTranscriptSegments([]);
setSummary(null);
audioOffset.current = 0; audioOffset.current = 0;
totalProcessedSamples.current = 0; totalProcessedSamples.current = 0;
// Create a new active recording
const newRecording: Recording = {
id: Date.now().toString(),
timestamp: new Date(),
duration: 0,
transcriptSegments: [],
summary: null,
isGeneratingSummary: false,
};
setActiveRecording(newRecording);
await invoke("start_recording"); await invoke("start_recording");
// Start timer to show recording duration // Start timer to show recording duration
@@ -177,6 +232,7 @@ function App() {
setAppState("transcribing"); setAppState("transcribing");
setStatusMessage("Processing final audio..."); setStatusMessage("Processing final audio...");
setTranscriptionProgress(0);
// First, process any audio that hasn't been processed yet // First, process any audio that hasn't been processed yet
try { try {
@@ -186,6 +242,7 @@ function App() {
if (finalChunk.length > 0) { if (finalChunk.length > 0) {
console.log(`Processing final chunk of ${finalChunk.length} samples`); console.log(`Processing final chunk of ${finalChunk.length} samples`);
// The progress will be updated via events from the backend
const finalSegments = await invoke<TranscriptSegment[]>("transcribe_chunk", { const finalSegments = await invoke<TranscriptSegment[]>("transcribe_chunk", {
audioData: finalChunk audioData: finalChunk
}); });
@@ -198,7 +255,10 @@ function App() {
end: seg.end + baseTime, end: seg.end + baseTime,
})); }));
setTranscriptSegments(prev => [...prev, ...adjustedSegments]); setActiveRecording(prev => prev ? {
...prev,
transcriptSegments: [...prev.transcriptSegments, ...adjustedSegments]
} : null);
} }
} }
} catch (chunkError) { } catch (chunkError) {
@@ -209,8 +269,23 @@ function App() {
// Now stop the recording // Now stop the recording
await invoke<string>("stop_recording"); await invoke<string>("stop_recording");
// Save the recording to history
if (activeRecording) {
const finalRecording = {
...activeRecording,
duration: recordingDuration
};
setRecordings(prev => [finalRecording, ...prev]);
setSelectedRecordingId(finalRecording.id);
setActiveRecording(null);
}
// Brief delay to show completion
await new Promise(resolve => setTimeout(resolve, 500));
setAppState("ready"); setAppState("ready");
setStatusMessage(""); setStatusMessage("");
setTranscriptionProgress(0);
} catch (error) { } catch (error) {
console.error("Failed to stop recording:", error); console.error("Failed to stop recording:", error);
setAppState("ready"); setAppState("ready");
@@ -219,28 +294,61 @@ function App() {
} }
}; };
const generateSummary = async () => { const generateSummary = async (recordingId: string) => {
if (transcriptSegments.length === 0) return; const recording = recordings.find(r => r.id === recordingId);
if (!recording || recording.transcriptSegments.length === 0) return;
setIsGeneratingSummary(true); // Update the recording to show it's generating
setRecordings(prev => prev.map(r =>
r.id === recordingId ? { ...r, isGeneratingSummary: true, summaryProgress: 0 } : r
));
const fullTranscript = transcriptSegments const fullTranscript = recording.transcriptSegments
.map((seg) => `${seg.speaker}: ${seg.text}`) .map((seg) => `${seg.speaker}: ${seg.text}`)
.join("\n"); .join("\n");
try { try {
// Progress will be updated via events from the backend
const summaryResult = await invoke<string>("summarize", { transcript: fullTranscript }); const summaryResult = await invoke<string>("summarize", { transcript: fullTranscript });
setSummary(summaryResult);
// Update the recording with the summary
setRecordings(prev => prev.map(r =>
r.id === recordingId
? { ...r, summary: summaryResult, isGeneratingSummary: false, summaryProgress: 100 }
: r
));
// Clear progress after a brief delay
setTimeout(() => {
setRecordings(prev => prev.map(r =>
r.id === recordingId ? { ...r, summaryProgress: undefined } : r
));
}, 1000);
} catch (error) { } catch (error) {
console.error("Failed to generate summary:", error); console.error("Failed to generate summary:", error);
setErrorMessage(String(error)); setErrorMessage(String(error));
} finally {
setIsGeneratingSummary(false); // Reset generating state on error
setRecordings(prev => prev.map(r =>
r.id === recordingId ? { ...r, isGeneratingSummary: false, summaryProgress: undefined } : r
));
} }
}; };
const downloadTranscript = () => { const copyToClipboard = async (text: string) => {
const content = transcriptSegments try {
await navigator.clipboard.writeText(text);
// You could add a toast notification here
} catch (error) {
console.error("Failed to copy to clipboard:", error);
}
};
const downloadTranscript = (recordingId: string) => {
const recording = recordings.find(r => r.id === recordingId);
if (!recording) return;
const content = recording.transcriptSegments
.map((seg) => `[${formatTime(seg.start)}] ${seg.speaker}: ${seg.text}`) .map((seg) => `[${formatTime(seg.start)}] ${seg.speaker}: ${seg.text}`)
.join("\n"); .join("\n");
@@ -248,19 +356,20 @@ function App() {
const url = URL.createObjectURL(blob); const url = URL.createObjectURL(blob);
const a = document.createElement("a"); const a = document.createElement("a");
a.href = url; a.href = url;
a.download = `meeting-transcript-${new Date().toISOString().split("T")[0]}.txt`; a.download = `meeting-transcript-${recording.timestamp.toISOString().split("T")[0]}.txt`;
a.click(); a.click();
URL.revokeObjectURL(url); URL.revokeObjectURL(url);
}; };
const downloadSummary = () => { const downloadSummary = (recordingId: string) => {
if (!summary) return; const recording = recordings.find(r => r.id === recordingId);
if (!recording || !recording.summary) return;
const blob = new Blob([summary], { type: "text/plain" }); const blob = new Blob([recording.summary], { type: "text/plain" });
const url = URL.createObjectURL(blob); const url = URL.createObjectURL(blob);
const a = document.createElement("a"); const a = document.createElement("a");
a.href = url; a.href = url;
a.download = `meeting-summary-${new Date().toISOString().split("T")[0]}.txt`; a.download = `meeting-summary-${recording.timestamp.toISOString().split("T")[0]}.txt`;
a.click(); a.click();
URL.revokeObjectURL(url); URL.revokeObjectURL(url);
}; };
@@ -277,6 +386,10 @@ function App() {
return `${mins}:${secs.toString().padStart(2, "0")}`; return `${mins}:${secs.toString().padStart(2, "0")}`;
}; };
// Get the currently selected recording or active recording
const displayedRecording = activeRecording ||
(selectedRecordingId ? recordings.find(r => r.id === selectedRecordingId) : null);
const renderSetupScreen = () => ( const renderSetupScreen = () => (
<div className="setup-screen"> <div className="setup-screen">
<div className="setup-content"> <div className="setup-content">
@@ -347,7 +460,7 @@ function App() {
<div className="recording-indicator"> <div className="recording-indicator">
<span className="recording-dot" /> <span className="recording-dot" />
Recording: {formatDuration(recordingDuration)} Recording: {formatDuration(recordingDuration)}
{transcriptSegments.length > 0 && ( {activeRecording && activeRecording.transcriptSegments.length > 0 && (
<span className="real-time-indicator"> (Real-time transcription active)</span> <span className="real-time-indicator"> (Real-time transcription active)</span>
)} )}
</div> </div>
@@ -359,28 +472,42 @@ function App() {
{appState === "transcribing" && ( {appState === "transcribing" && (
<div className="transcribing-indicator"> <div className="transcribing-indicator">
<div className="loading-spinner small" /> <ProgressBar
<span>Transcribing audio...</span> progress={transcriptionProgress}
label="Processing final audio..."
/>
</div> </div>
)} )}
{appState === "ready" && transcriptSegments.length > 0 && (
<div className="action-buttons">
<button className="secondary-button" onClick={downloadTranscript}>
📄 Download Transcript
</button>
<button
className="primary-button"
onClick={generateSummary}
disabled={isGeneratingSummary}
>
Generate Summary
</button>
</div>
)}
</section> </section>
); );
const renderRecordingsList = () => (
<aside className="recordings-list">
<h3>Recording History</h3>
{recordings.length === 0 ? (
<p className="no-recordings">No recordings yet</p>
) : (
<ul className="recordings-items">
{recordings.map(recording => (
<li
key={recording.id}
className={`recording-item ${selectedRecordingId === recording.id ? 'selected' : ''}`}
onClick={() => setSelectedRecordingId(recording.id)}
>
<div className="recording-time">
{recording.timestamp.toLocaleTimeString()} - {formatDuration(recording.duration)}
</div>
<div className="recording-status">
{recording.summary ? '✓ Summary' : recording.isGeneratingSummary ? '⏳ Summarizing...' : ''}
</div>
</li>
))}
</ul>
)}
</aside>
);
// Show setup screen for non-ready states // Show setup screen for non-ready states
if (appState === "checking" || appState === "downloading_models" || appState === "initializing" || appState === "error") { if (appState === "checking" || appState === "downloading_models" || appState === "initializing" || appState === "error") {
return ( return (
@@ -401,20 +528,73 @@ function App() {
<p>Local Meeting Transcription & Summarization</p> <p>Local Meeting Transcription & Summarization</p>
</header> </header>
<div className="app-content"> <div className="app-layout">
{renderRecordingControls()} {/* Left sidebar with recordings list */}
{renderRecordingsList()}
<div className="content-grid"> {/* Main content area */}
<TranscriptDisplay segments={transcriptSegments} /> <div className="main-content">
<SummaryDisplay {renderRecordingControls()}
summary={summary}
isLoading={isGeneratingSummary} {/* Display selected recording or active recording */}
onDownload={downloadSummary} {displayedRecording && (
/> <div className="recording-details">
<div className="recording-header">
<h2>Recording from {displayedRecording.timestamp.toLocaleString()}</h2>
<div className="recording-actions">
<button
className="secondary-button"
onClick={() => downloadTranscript(displayedRecording.id)}
>
📄 Download Transcript
</button>
{!displayedRecording.summary && !displayedRecording.isGeneratingSummary && (
<button
className="primary-button"
onClick={() => generateSummary(displayedRecording.id)}
>
Generate Summary
</button>
)}
{displayedRecording.summary && (
<>
<button
className="secondary-button"
onClick={() => copyToClipboard(displayedRecording.summary!)}
>
📋 Copy Summary
</button>
<button
className="secondary-button"
onClick={() => downloadSummary(displayedRecording.id)}
>
💾 Download Summary
</button>
</>
)}
</div>
</div>
<div className="content-grid">
<TranscriptDisplay segments={displayedRecording.transcriptSegments} />
<SummaryDisplay
summary={displayedRecording.summary}
isLoading={displayedRecording.isGeneratingSummary}
progress={displayedRecording.summaryProgress}
/>
</div>
</div>
)}
{!displayedRecording && recordings.length === 0 && appState === "ready" && (
<div className="empty-state">
<p>Click "Start Recording" to begin your first meeting transcription!</p>
</div>
)}
</div> </div>
<BackendLogs isVisible={showLogs} onToggle={() => setShowLogs(!showLogs)} />
</div> </div>
<BackendLogs isVisible={showLogs} onToggle={() => setShowLogs(!showLogs)} />
</main> </main>
); );
} }
+19
View File
@@ -0,0 +1,19 @@
interface ProgressBarProps {
progress: number; // 0-100
label?: string;
}
export function ProgressBar({ progress, label }: ProgressBarProps) {
return (
<div className="progress-container">
{label && <div className="progress-label">{label}</div>}
<div className="progress-bar">
<div
className="progress-fill"
style={{ width: `${Math.min(100, Math.max(0, progress))}%` }}
/>
</div>
<div className="progress-text">{Math.round(progress)}%</div>
</div>
);
}
+15 -9
View File
@@ -1,25 +1,31 @@
interface SummaryDisplayProps { interface SummaryDisplayProps {
summary: string | null; summary: string | null;
isLoading: boolean; isLoading: boolean;
onDownload: () => void; progress?: number;
} }
export function SummaryDisplay({ summary, isLoading, onDownload }: SummaryDisplayProps) { import { ProgressBar } from "./ProgressBar";
export function SummaryDisplay({ summary, isLoading, progress }: SummaryDisplayProps) {
return ( return (
<div className="summary-display"> <div className="summary-display">
<div className="summary-header"> <div className="summary-header">
<h2>Meeting Summary</h2> <h2>Meeting Summary</h2>
{summary && (
<button className="download-button" onClick={onDownload}>
📥 Download
</button>
)}
</div> </div>
<div className="summary-content"> <div className="summary-content">
{isLoading ? ( {isLoading ? (
<div className="loading"> <div className="loading">
<div className="spinner"></div> {progress !== undefined ? (
<p>Generating summary...</p> <ProgressBar
progress={progress}
label="Generating summary..."
/>
) : (
<>
<div className="spinner"></div>
<p>Generating summary...</p>
</>
)}
</div> </div>
) : summary ? ( ) : summary ? (
<div className="summary-text">{summary}</div> <div className="summary-text">{summary}</div>