Files
chronara/patches/llama-cpp-sys-2/Cargo.toml
T
naomi 74c334c939
Security Scan and Upload / Security & DefectDojo Upload (pull_request) Successful in 2m25s
CI / Lint & Test (pull_request) Failing after 6m21s
CI / Build Linux (pull_request) Has been skipped
CI / Build Windows (cross-compile) (pull_request) Has been skipped
feat: we successfully have the installer working for windows!
Models are downloaded at runtime instead of build.
2026-01-28 17:15:13 -08:00

105 lines
2.6 KiB
TOML

# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
name = "llama-cpp-sys-2"
version = "0.1.132"
build = "build.rs"
links = "llama"
include = [
"wrapper.h",
"wrapper_mtmd.h",
"build.rs",
"/src",
"/llama.cpp/common/**/*.h",
"/llama.cpp/common/**/*.hpp",
"/llama.cpp/common/**/*.cpp",
"/llama.cpp/ggml/include/*.h",
"/llama.cpp/ggml/src/*.h",
"/llama.cpp/ggml/src/*.c",
"/llama.cpp/ggml/src/*.cpp",
"/llama.cpp/src/*.h",
"/llama.cpp/src/*.cpp",
"/llama.cpp/src/models/*.h",
"/llama.cpp/src/models/*.cpp",
"/llama.cpp/tools/mtmd/*.h",
"/llama.cpp/tools/mtmd/*.cpp",
"/llama.cpp/convert_hf_to_gguf.py",
"/llama.cpp/common/build-info.cpp.in",
"/llama.cpp/ggml/src/ggml-cuda.cu",
"/llama.cpp/ggml/src/ggml-metal.m",
"/llama.cpp/ggml/src/ggml-metal.metal",
"/llama.cpp/include/llama.h",
"/llama.cpp/include/llama-cpp.h",
"/llama.cpp/ggml/src/ggml-cpu/**/*",
"/llama.cpp/ggml/src/ggml-cuda/**/*",
"/llama.cpp/ggml/src/ggml-metal/**/*",
"/llama.cpp/ggml/src/ggml-vulkan/**/*",
"/llama.cpp/ggml/src/llamafile/sgemm.h",
"/llama.cpp/ggml/src/llamafile/sgemm.cpp",
"/llama.cpp/pocs",
"/llama.cpp/vendor",
"/llama.cpp/CMakeLists.txt",
"/llama.cpp/common/CMakeLists.txt",
"/llama.cpp/ggml/CMakeLists.txt",
"/llama.cpp/ggml/src/CMakeLists.txt",
"/llama.cpp/src/CMakeLists.txt",
"/llama.cpp/cmake",
"/llama.cpp/ggml/cmake",
"/llama.cpp/common/cmake",
]
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "Low Level Bindings to llama.cpp"
readme = "README.md"
license = "MIT OR Apache-2.0"
repository = "https://github.com/utilityai/llama-cpp-rs"
[features]
cuda = []
cuda-no-vmm = ["cuda"]
dynamic-link = []
metal = []
mtmd = []
openmp = []
shared-stdcxx = []
system-ggml = []
vulkan = []
[lib]
name = "llama_cpp_sys_2"
path = "src/lib.rs"
[dependencies]
[build-dependencies.bindgen]
version = "0.72.1"
[build-dependencies.cc]
version = "1.2.49"
features = ["parallel"]
[build-dependencies.cmake]
version = "0.1"
[build-dependencies.find_cuda_helper]
version = "0.2.0"
[build-dependencies.glob]
version = "0.3.3"
[build-dependencies.walkdir]
version = "2"