From f9cb13f53f4453bbe5e95bdedeb72754c3732848 Mon Sep 17 00:00:00 2001 From: Naomi Carrigan Date: Thu, 29 Jan 2026 17:23:40 -0800 Subject: [PATCH] fix: lints --- package.json | 4 +- src-tauri/Cargo.lock | 1 + src-tauri/Cargo.toml | 3 + src-tauri/src/lib.rs | 2 +- src-tauri/src/ml/summarizer.rs | 2 +- src-tauri/src/ml/vad.rs | 1 + src-tauri/tauri.conf.json | 4 +- src/App.css | 22 +-- src/App.tsx | 290 +++++++++++++++++------------- src/components/ProgressBar.tsx | 2 +- src/components/SummaryDisplay.tsx | 5 +- 11 files changed, 187 insertions(+), 149 deletions(-) diff --git a/package.json b/package.json index 1ed659d..612aebe 100644 --- a/package.json +++ b/package.json @@ -6,8 +6,8 @@ "scripts": { "dev": "vite", "build": "tsc && vite build", - "lint": "eslint .", - "lint:fix": "eslint . --fix", + "lint": "eslint src", + "lint:fix": "eslint src --fix", "format": "prettier --write .", "format:check": "prettier --check .", "preview": "vite preview", diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index ee047d4..08a4446 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -531,6 +531,7 @@ dependencies = [ "tauri", "tauri-build", "tauri-plugin-opener", + "tempfile", "thiserror 2.0.18", "tokio", "tracing", diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index dc23932..9ad66e7 100644 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -67,6 +67,9 @@ windows = { version = "0.62", features = [ "Win32_UI_Shell_PropertiesSystem", ] } +[dev-dependencies] +tempfile = "3.10" + [patch.crates-io] # Fix cross-compilation: use CARGO_CFG_TARGET_OS instead of cfg!(windows) llama-cpp-sys-2 = { path = "../patches/llama-cpp-sys-2" } diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 4af32fc..7894597 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -487,7 +487,7 @@ fn check_ready(state: State<'_, AppState>) -> Result { // At minimum, we need the summarizer loaded // Whisper can be loaded on first use - let ready = summarizer.as_ref().map_or(false, |s| s.is_loaded()); + let ready = summarizer.as_ref().is_some_and(|s| s.is_loaded()); Ok(ready) } diff --git a/src-tauri/src/ml/summarizer.rs b/src-tauri/src/ml/summarizer.rs index ae23567..bf7f31a 100644 --- a/src-tauri/src/ml/summarizer.rs +++ b/src-tauri/src/ml/summarizer.rs @@ -158,7 +158,7 @@ impl LlamaSummarizer { for i in 0..max_tokens { // Sample the next token using the sampler // The sampler.sample() takes context and the index of the last token in the batch - let token = sampler.sample(&ctx, (batch.n_tokens() - 1) as i32); + let token = sampler.sample(&ctx, batch.n_tokens() - 1); // Check for end of generation if model.is_eog_token(token) { diff --git a/src-tauri/src/ml/vad.rs b/src-tauri/src/ml/vad.rs index bcd76dd..e122c2b 100644 --- a/src-tauri/src/ml/vad.rs +++ b/src-tauri/src/ml/vad.rs @@ -415,6 +415,7 @@ mod tests { start_sample: 16000, end_sample: 32000, speaker_id: 0, + fingerprint: None, }; assert!((segment.start_seconds() - 1.0).abs() < 0.001); diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index 339d89d..4670dab 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -37,9 +37,7 @@ "installerIcon": "icons/icon.ico" } }, - "resources": [ - "resources/" - ], + "resources": ["resources/"], "icon": [ "icons/32x32.png", "icons/128x128.png", diff --git a/src/App.css b/src/App.css index e87d435..b2ffa41 100644 --- a/src/App.css +++ b/src/App.css @@ -5,21 +5,21 @@ font-weight: 400; /* Witchy Purple Rose Palette */ - --witch-purple: #2B1B3D; - --witch-plum: #44275A; - --witch-rose: #A8577E; - --witch-mauve: #D4A5C7; - --witch-lavender: #E8D5E8; - --witch-black: #0A0009; - --witch-silver: #C0C0C0; - --witch-moon: #F5F5F5; + --witch-purple: #2b1b3d; + --witch-plum: #44275a; + --witch-rose: #a8577e; + --witch-mauve: #d4a5c7; + --witch-lavender: #e8d5e8; + --witch-black: #0a0009; + --witch-silver: #c0c0c0; + --witch-moon: #f5f5f5; --witch-shadow: rgba(10, 0, 9, 0.7); /* Theme mappings */ --primary-color: var(--witch-rose); --primary-hover: var(--witch-plum); --secondary-color: var(--witch-mauve); - --danger-color: #D4658E; + --danger-color: #d4658e; --bg-color: var(--witch-black); --surface-color: var(--witch-purple); --text-color: var(--witch-moon); @@ -156,7 +156,7 @@ body { } .record-button.recording:hover { - background-color: #B94A6D; + background-color: #b94a6d; } .recording-indicator { @@ -514,7 +514,7 @@ body { flex: 1; overflow-y: auto; padding: 0.5rem 1rem; - font-family: 'Consolas', 'Monaco', 'Courier New', monospace; + font-family: "Consolas", "Monaco", "Courier New", monospace; font-size: 0.75rem; background-color: var(--witch-black); color: var(--witch-silver); diff --git a/src/App.tsx b/src/App.tsx index 2b36988..0aae93a 100644 --- a/src/App.tsx +++ b/src/App.tsx @@ -1,4 +1,4 @@ -import { useState, useEffect, useRef } from "react"; +import { useState, useEffect, useRef, useCallback } from "react"; import { invoke } from "@tauri-apps/api/core"; import { listen, type UnlistenFn } from "@tauri-apps/api/event"; import { getVersion } from "@tauri-apps/api/app"; @@ -42,7 +42,14 @@ interface Recording { summaryProgress?: number; } -type AppState = "checking" | "downloading_models" | "initializing" | "ready" | "recording" | "transcribing" | "error"; +type AppState = + | "checking" + | "downloading_models" + | "initializing" + | "ready" + | "recording" + | "transcribing" + | "error"; function App() { const [recordings, setRecordings] = useState([]); @@ -63,17 +70,75 @@ function App() { const transcriptionProgressUnlisten = useRef(null); const summaryProgressUnlisten = useRef(null); + // Define initializeApp before using it + const initializeApp = useCallback(async () => { + try { + setAppState("checking"); + setStatusMessage("Checking if models are present..."); + + const modelsPresent = await invoke("check_models"); + + if (!modelsPresent) { + setAppState("downloading_models"); + setStatusMessage("Downloading AI models (~2GB)..."); + setShowLogs(true); + + await invoke("download_models"); + + // Check again after download + const modelsVerified = await invoke("check_models"); + if (!modelsVerified) { + throw new Error( + "Download completed but model file not found. This might be a path or permissions issue." + ); + } + } + + // Initialize ML models + setAppState("initializing"); + setStatusMessage("Loading AI models (this may take a moment)..."); + setShowLogs(true); + + await invoke("initialize_models"); + + const ready = await invoke("check_ready"); + if (!ready) { + throw new Error("Models failed to initialize. Check logs for details."); + } + + setAppState("ready"); + setStatusMessage(""); + setShowLogs(false); + + // Load saved recordings + try { + const savedRecordings = await invoke("load_recordings"); + const loadedRecordings = savedRecordings.map(storedToFrontend); + setRecordings(loadedRecordings); + console.log(`Loaded ${loadedRecordings.length} transcripts from storage`); + } catch (loadError) { + console.error("Failed to load transcripts:", loadError); + // Don't fail app init if we can't load transcripts + } + } catch (error) { + console.error("Initialization failed:", error); + setAppState("error"); + setErrorMessage(String(error)); + setShowLogs(true); + } + }, []); + useEffect(() => { if (initStarted.current) return; initStarted.current = true; // Get app version - getVersion().then(version => { + getVersion().then((version) => { setAppVersion(version); }); initializeApp(); - }, []); + }, [initializeApp]); // Helper function to convert stored recording to frontend format const storedToFrontend = (stored: StoredRecording): Recording => ({ @@ -119,17 +184,22 @@ function App() { useEffect(() => { const setupListeners = async () => { // Listen for transcription progress - transcriptionProgressUnlisten.current = await listen('transcription-progress', (event) => { - setTranscriptionProgress(event.payload * 100); // Convert 0-1 to 0-100 - }); + transcriptionProgressUnlisten.current = await listen( + "transcription-progress", + (event) => { + setTranscriptionProgress(event.payload * 100); // Convert 0-1 to 0-100 + } + ); // Listen for summary progress - summaryProgressUnlisten.current = await listen('summary-progress', (event) => { + summaryProgressUnlisten.current = await listen("summary-progress", (event) => { const recordingId = selectedRecordingId || recordings[0]?.id; if (recordingId) { - setRecordings(prev => prev.map(r => - r.id === recordingId ? { ...r, summaryProgress: event.payload * 100 } : r - )); + setRecordings((prev) => + prev.map((r) => + r.id === recordingId ? { ...r, summaryProgress: event.payload * 100 } : r + ) + ); } }); }; @@ -137,62 +207,6 @@ function App() { setupListeners(); }, [selectedRecordingId, recordings]); - const initializeApp = async () => { - try { - setAppState("checking"); - setStatusMessage("Checking if models are present..."); - - const modelsPresent = await invoke("check_models"); - - if (!modelsPresent) { - setAppState("downloading_models"); - setStatusMessage("Downloading AI models (~2GB)..."); - setShowLogs(true); - - await invoke("download_models"); - - // Check again after download - const modelsVerified = await invoke("check_models"); - if (!modelsVerified) { - throw new Error("Download completed but model file not found. This might be a path or permissions issue."); - } - } - - // Initialize ML models - setAppState("initializing"); - setStatusMessage("Loading AI models (this may take a moment)..."); - setShowLogs(true); - - await invoke("initialize_models"); - - // Check if models are ready - const ready = await invoke("check_ready"); - if (!ready) { - throw new Error("Models failed to initialize. Check logs for details."); - } - - setAppState("ready"); - setStatusMessage(""); - setShowLogs(false); - - // Load saved recordings - try { - const savedRecordings = await invoke("load_recordings"); - const loadedRecordings = savedRecordings.map(storedToFrontend); - setRecordings(loadedRecordings); - console.log(`Loaded ${loadedRecordings.length} transcripts from storage`); - } catch (loadError) { - console.error("Failed to load transcripts:", loadError); - // Don't fail app init if we can't load transcripts - } - } catch (error) { - console.error("Initialization failed:", error); - setAppState("error"); - setErrorMessage(String(error)); - setShowLogs(true); - } - }; - const retrySetup = () => { setErrorMessage(null); initStarted.current = false; @@ -203,7 +217,7 @@ function App() { try { // Get the next chunk of audio const [audioChunk, newOffset] = await invoke<[number[], number]>("get_audio_chunk", { - lastOffset: audioOffset.current + lastOffset: audioOffset.current, }); // If we have enough audio (at least 5 seconds worth at 16kHz) @@ -212,22 +226,26 @@ function App() { const chunkStartTime = totalProcessedSamples.current / 16000; const newSegments = await invoke("transcribe_chunk", { audioData: audioChunk, - chunkStartTime: chunkStartTime + chunkStartTime: chunkStartTime, }); if (newSegments.length > 0) { // Calculate timestamps based on total processed samples const baseTime = totalProcessedSamples.current / 16000; - const adjustedSegments = newSegments.map(seg => ({ + const adjustedSegments = newSegments.map((seg) => ({ ...seg, start: seg.start + baseTime, end: seg.end + baseTime, })); - setActiveRecording(prev => prev ? { - ...prev, - transcriptSegments: [...prev.transcriptSegments, ...adjustedSegments] - } : null); + setActiveRecording((prev) => + prev + ? { + ...prev, + transcriptSegments: [...prev.transcriptSegments, ...adjustedSegments], + } + : null + ); } // Track total processed samples @@ -264,7 +282,7 @@ function App() { // Start timer to show recording duration recordingTimer.current = window.setInterval(() => { - setRecordingDuration(d => d + 1); + setRecordingDuration((d) => d + 1); }, 1000); // Start real-time transcription timer (every 5 seconds) @@ -297,7 +315,7 @@ function App() { // First, process any audio that hasn't been processed yet try { const finalChunk = await invoke("get_remaining_audio", { - lastOffset: audioOffset.current + lastOffset: audioOffset.current, }); if (finalChunk.length > 0) { @@ -306,21 +324,25 @@ function App() { // The progress will be updated via events from the backend const finalSegments = await invoke("transcribe_chunk", { audioData: finalChunk, - chunkStartTime: chunkStartTime + chunkStartTime: chunkStartTime, }); if (finalSegments.length > 0) { const baseTime = totalProcessedSamples.current / 16000; - const adjustedSegments = finalSegments.map(seg => ({ + const adjustedSegments = finalSegments.map((seg) => ({ ...seg, start: seg.start + baseTime, end: seg.end + baseTime, })); - setActiveRecording(prev => prev ? { - ...prev, - transcriptSegments: [...prev.transcriptSegments, ...adjustedSegments] - } : null); + setActiveRecording((prev) => + prev + ? { + ...prev, + transcriptSegments: [...prev.transcriptSegments, ...adjustedSegments], + } + : null + ); } } } catch (chunkError) { @@ -335,16 +357,16 @@ function App() { if (activeRecording) { const finalRecording = { ...activeRecording, - duration: recordingDuration + duration: recordingDuration, }; - setRecordings(prev => [finalRecording, ...prev]); + setRecordings((prev) => [finalRecording, ...prev]); setSelectedRecordingId(finalRecording.id); setActiveRecording(null); // Save to persistent storage try { await invoke("save_recording", { - recording: frontendToStored(finalRecording) + recording: frontendToStored(finalRecording), }); console.log("Transcript saved to storage"); } catch (saveError) { @@ -354,7 +376,7 @@ function App() { } // Brief delay to show completion - await new Promise(resolve => setTimeout(resolve, 500)); + await new Promise((resolve) => setTimeout(resolve, 500)); setAppState("ready"); setStatusMessage(""); @@ -368,13 +390,15 @@ function App() { }; const generateSummary = async (recordingId: string) => { - const recording = recordings.find(r => r.id === recordingId); + const recording = recordings.find((r) => r.id === recordingId); if (!recording || recording.transcriptSegments.length === 0) return; // Update the recording to show it's generating - setRecordings(prev => prev.map(r => - r.id === recordingId ? { ...r, isGeneratingSummary: true, summaryProgress: 0 } : r - )); + setRecordings((prev) => + prev.map((r) => + r.id === recordingId ? { ...r, isGeneratingSummary: true, summaryProgress: 0 } : r + ) + ); const fullTranscript = recording.transcriptSegments .map((seg) => `${seg.speaker}: ${seg.text}`) @@ -382,27 +406,27 @@ function App() { try { // Progress will be updated via events from the backend - const [summaryResult, titleResult] = await invoke<[string, string | null]>("summarize", { transcript: fullTranscript }); + const [summaryResult, titleResult] = await invoke<[string, string | null]>("summarize", { + transcript: fullTranscript, + }); // Update the recording with the summary and title - const updatedRecording = recordings.find(r => r.id === recordingId); + const updatedRecording = recordings.find((r) => r.id === recordingId); if (updatedRecording) { const recordingWithSummary = { ...updatedRecording, summary: summaryResult, title: titleResult, isGeneratingSummary: false, - summaryProgress: 100 + summaryProgress: 100, }; - setRecordings(prev => prev.map(r => - r.id === recordingId ? recordingWithSummary : r - )); + setRecordings((prev) => prev.map((r) => (r.id === recordingId ? recordingWithSummary : r))); // Update in persistent storage try { await invoke("update_recording", { - recording: frontendToStored(recordingWithSummary) + recording: frontendToStored(recordingWithSummary), }); console.log("Transcript updated with summary"); } catch (updateError) { @@ -411,9 +435,9 @@ function App() { // Clear progress after a brief delay setTimeout(() => { - setRecordings(prev => prev.map(r => - r.id === recordingId ? { ...r, summaryProgress: undefined } : r - )); + setRecordings((prev) => + prev.map((r) => (r.id === recordingId ? { ...r, summaryProgress: undefined } : r)) + ); }, 1000); } } catch (error) { @@ -421,9 +445,13 @@ function App() { setErrorMessage(String(error)); // Reset generating state on error - setRecordings(prev => prev.map(r => - r.id === recordingId ? { ...r, isGeneratingSummary: false, summaryProgress: undefined } : r - )); + setRecordings((prev) => + prev.map((r) => + r.id === recordingId + ? { ...r, isGeneratingSummary: false, summaryProgress: undefined } + : r + ) + ); } }; @@ -438,7 +466,9 @@ function App() { const deleteRecording = async (recordingId: string) => { // Confirm deletion - if (!confirm("Are you sure you want to delete this transcript? This action cannot be undone.")) { + if ( + !confirm("Are you sure you want to delete this transcript? This action cannot be undone.") + ) { return; } @@ -447,7 +477,7 @@ function App() { await invoke("delete_recording", { recordingId }); // Remove from state - setRecordings(prev => prev.filter(r => r.id !== recordingId)); + setRecordings((prev) => prev.filter((r) => r.id !== recordingId)); // Clear selection if we deleted the selected recording if (selectedRecordingId === recordingId) { @@ -462,7 +492,7 @@ function App() { }; const downloadTranscript = (recordingId: string) => { - const recording = recordings.find(r => r.id === recordingId); + const recording = recordings.find((r) => r.id === recordingId); if (!recording) return; const content = recording.transcriptSegments @@ -479,7 +509,7 @@ function App() { }; const downloadSummary = (recordingId: string) => { - const recording = recordings.find(r => r.id === recordingId); + const recording = recordings.find((r) => r.id === recordingId); if (!recording || !recording.summary) return; const blob = new Blob([recording.summary], { type: "text/plain" }); @@ -504,8 +534,9 @@ function App() { }; // Get the currently selected recording or active recording - const displayedRecording = activeRecording || - (selectedRecordingId ? recordings.find(r => r.id === selectedRecordingId) : null); + const displayedRecording = + activeRecording || + (selectedRecordingId ? recordings.find((r) => r.id === selectedRecordingId) : null); const renderSetupScreen = () => (
@@ -540,9 +571,7 @@ function App() {
  • Whisper - Speech recognition
  • VAD - Speaker detection
  • -

    - This is a pure Rust backend - no Python required! -

    +

    This is a pure Rust backend - no Python required!

    )} @@ -555,9 +584,9 @@ function App() { )} - {(appState === "checking" || appState === "initializing" || appState === "downloading_models") && ( -
    - )} + {(appState === "checking" || + appState === "initializing" || + appState === "downloading_models") &&
    }
    setShowLogs(!showLogs)} /> @@ -589,13 +618,9 @@ function App() { {appState === "transcribing" && (
    - +
    )} - ); @@ -606,17 +631,18 @@ function App() {

    No transcripts yet

    ) : (
      - {recordings.map(recording => ( + {recordings.map((recording) => (
    • setSelectedRecordingId(recording.id)} >
      - {recording.title || `${recording.timestamp.toLocaleTimeString()} - ${formatDuration(recording.duration)}`} + {recording.title || + `${recording.timestamp.toLocaleTimeString()} - ${formatDuration(recording.duration)}`}
      {recording.title && (
      @@ -624,7 +650,11 @@ function App() {
      )}
      - {recording.summary ? '✓ Summary' : recording.isGeneratingSummary ? '⏳ Summarizing...' : ''} + {recording.summary + ? "✓ Summary" + : recording.isGeneratingSummary + ? "⏳ Summarizing..." + : ""}
    ); -} \ No newline at end of file +} diff --git a/src/components/SummaryDisplay.tsx b/src/components/SummaryDisplay.tsx index 124f104..d0b035d 100644 --- a/src/components/SummaryDisplay.tsx +++ b/src/components/SummaryDisplay.tsx @@ -16,10 +16,7 @@ export function SummaryDisplay({ summary, isLoading, progress }: SummaryDisplayP {isLoading ? (
    {progress !== undefined ? ( - + ) : ( <>