generated from nhcarrigan/template
3c8a46e5a6
- Add Python backend structure with FastAPI for transcription/summarization - Add React UI with audio recording, transcript, and summary views - Configure Tauri to manage Python backend lifecycle - Set up Windows cross-compilation with cargo-xwin - Add Gitea CI workflow for lint, test, and multi-platform builds - Configure ESLint, Prettier, and Vitest for code quality Note: App scaffolding only - Python env and models not yet set up
90 lines
2.8 KiB
Python
90 lines
2.8 KiB
Python
"""Download required models for Chronara."""
|
|
|
|
import os
|
|
import sys
|
|
from pathlib import Path
|
|
from urllib.request import urlretrieve
|
|
|
|
# Model download URLs
|
|
MODELS = {
|
|
"llama-3.2-1B": {
|
|
"url": "https://huggingface.co/bartowski/Llama-3.2-1B-Instruct-GGUF/resolve/main/Llama-3.2-1B-Instruct-Q4_K_M.gguf",
|
|
"filename": "llama-3.2-1B-instruct-Q4_K_M.gguf",
|
|
"size": "1.2GB",
|
|
},
|
|
"llama-3.2-3B": {
|
|
"url": "https://huggingface.co/bartowski/Llama-3.2-3B-Instruct-GGUF/resolve/main/Llama-3.2-3B-Instruct-Q4_K_M.gguf",
|
|
"filename": "llama-3.2-3B-instruct-Q4_K_M.gguf",
|
|
"size": "2.5GB",
|
|
},
|
|
}
|
|
|
|
|
|
def download_with_progress(url: str, filepath: Path):
|
|
"""Download file with progress bar."""
|
|
|
|
def _progress(block_num, block_size, total_size):
|
|
downloaded = block_num * block_size
|
|
percent = min(downloaded * 100 / total_size, 100)
|
|
progress = int(50 * percent / 100)
|
|
sys.stdout.write(
|
|
f"\r[{'=' * progress}{' ' * (50 - progress)}] {percent:.1f}%"
|
|
)
|
|
sys.stdout.flush()
|
|
|
|
print(f"Downloading {filepath.name}...")
|
|
urlretrieve(url, filepath, reporthook=_progress)
|
|
print() # New line after progress bar
|
|
|
|
|
|
def main():
|
|
"""Download all required models."""
|
|
# Get project root
|
|
project_root = Path(__file__).parent.parent
|
|
models_dir = project_root / "models"
|
|
models_dir.mkdir(exist_ok=True)
|
|
|
|
print("🤖 Chronara Model Downloader")
|
|
print("=" * 50)
|
|
|
|
# Ask which model to download
|
|
print("\nWhich Llama model would you like to use?")
|
|
print("1. Llama 3.2 1B (1.2GB) - Faster, good for basic summaries")
|
|
print("2. Llama 3.2 3B (2.5GB) - Better quality summaries")
|
|
print("3. Both models")
|
|
|
|
choice = input("\nEnter your choice (1/2/3): ").strip()
|
|
|
|
models_to_download = []
|
|
if choice == "1":
|
|
models_to_download = ["llama-3.2-1B"]
|
|
elif choice == "2":
|
|
models_to_download = ["llama-3.2-3B"]
|
|
elif choice == "3":
|
|
models_to_download = ["llama-3.2-1B", "llama-3.2-3B"]
|
|
else:
|
|
print("Invalid choice!")
|
|
return
|
|
|
|
# Download selected models
|
|
for model_name in models_to_download:
|
|
model_info = MODELS[model_name]
|
|
filepath = models_dir / model_info["filename"]
|
|
|
|
if filepath.exists():
|
|
print(f"\n✓ {model_name} already downloaded")
|
|
continue
|
|
|
|
print(f"\n📥 Downloading {model_name} ({model_info['size']})...")
|
|
try:
|
|
download_with_progress(model_info["url"], filepath)
|
|
print(f"✓ Downloaded {model_name} successfully!")
|
|
except Exception as e:
|
|
print(f"✗ Failed to download {model_name}: {e}")
|
|
|
|
print("\n✨ Model download complete!")
|
|
print("\nNote: WhisperX models will be downloaded automatically on first run.")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main() |