1
0
forked from nikkuss/pkgs

6 Commits

6 changed files with 254 additions and 23 deletions
+11 -11
View File
@@ -21,7 +21,7 @@
}, },
"base24-schemes": { "base24-schemes": {
"cargoLock": null, "cargoLock": null,
"date": "2026-03-04", "date": "2026-04-12",
"extract": null, "extract": null,
"name": "base24-schemes", "name": "base24-schemes",
"passthru": null, "passthru": null,
@@ -31,13 +31,13 @@
"fetchSubmodules": false, "fetchSubmodules": false,
"leaveDotGit": false, "leaveDotGit": false,
"name": null, "name": null,
"rev": "13b5b0c299982bb361039601e2d72587d6846294", "rev": "3e228732e93c107e8b65b8be30c36e5b3c507d95",
"sha256": "sha256-4eu3LqB9tPqe0Vaqxd4wkZiBbthLbpb7llcoE/p5HT0=", "sha256": "sha256-G34G4hri8H2XNwF0e15MFj3sjO8fTOphNk0WjBwnVew=",
"sparseCheckout": [], "sparseCheckout": [],
"type": "git", "type": "git",
"url": "https://github.com/tinted-theming/schemes.git" "url": "https://github.com/tinted-theming/schemes.git"
}, },
"version": "13b5b0c299982bb361039601e2d72587d6846294" "version": "3e228732e93c107e8b65b8be30c36e5b3c507d95"
}, },
"claude-code-acp": { "claude-code-acp": {
"cargoLock": null, "cargoLock": null,
@@ -53,12 +53,12 @@
"name": null, "name": null,
"owner": "zed-industries", "owner": "zed-industries",
"repo": "claude-code-acp", "repo": "claude-code-acp",
"rev": "v0.24.2", "rev": "v0.26.0",
"sha256": "sha256-SRVbLcGrH5pJt6yfM0ObSso68M+yGateIVYf/kFVDhE=", "sha256": "sha256-2G8gjMCnk3W1I2+4sNsumL15ts9bLXAOMguCmwnzWSA=",
"sparseCheckout": [], "sparseCheckout": [],
"type": "github" "type": "github"
}, },
"version": "v0.24.2" "version": "v0.26.0"
}, },
"flightcore": { "flightcore": {
"cargoLock": null, "cargoLock": null,
@@ -84,7 +84,7 @@
"neovide": { "neovide": {
"cargoLock": { "cargoLock": {
"Cargo.lock": [ "Cargo.lock": [
"sha256-Qd3Ld0az0b7ixz2ObUucdJO2uWSQs7XiTVdmXJxilhk=/Cargo.lock", "sha256-JBV939TEfG44w59iUJ3KgiCrQ9aCcjmPxu2uoBD_Ank=/Cargo.lock",
{} {}
] ]
}, },
@@ -94,12 +94,12 @@
"passthru": null, "passthru": null,
"pinned": false, "pinned": false,
"src": { "src": {
"deepClone": false, "deepClone": true,
"fetchSubmodules": false, "fetchSubmodules": true,
"leaveDotGit": false, "leaveDotGit": false,
"name": null, "name": null,
"rev": "nightly", "rev": "nightly",
"sha256": "sha256-Qd3Ld0az0b7ixz2ObUucdJO2uWSQs7XiTVdmXJxilhk=", "sha256": "sha256-JBV939TEfG44w59iUJ3KgiCrQ9aCcjmPxu2uoBD/Ank=",
"sparseCheckout": [], "sparseCheckout": [],
"type": "git", "type": "git",
"url": "https://github.com/neovide/neovide.git" "url": "https://github.com/neovide/neovide.git"
+11 -11
View File
@@ -21,27 +21,27 @@
}; };
base24-schemes = { base24-schemes = {
pname = "base24-schemes"; pname = "base24-schemes";
version = "13b5b0c299982bb361039601e2d72587d6846294"; version = "3e228732e93c107e8b65b8be30c36e5b3c507d95";
src = fetchgit { src = fetchgit {
url = "https://github.com/tinted-theming/schemes.git"; url = "https://github.com/tinted-theming/schemes.git";
rev = "13b5b0c299982bb361039601e2d72587d6846294"; rev = "3e228732e93c107e8b65b8be30c36e5b3c507d95";
fetchSubmodules = false; fetchSubmodules = false;
deepClone = false; deepClone = false;
leaveDotGit = false; leaveDotGit = false;
sparseCheckout = []; sparseCheckout = [];
sha256 = "sha256-4eu3LqB9tPqe0Vaqxd4wkZiBbthLbpb7llcoE/p5HT0="; sha256 = "sha256-G34G4hri8H2XNwF0e15MFj3sjO8fTOphNk0WjBwnVew=";
}; };
date = "2026-03-04"; date = "2026-04-12";
}; };
claude-code-acp = { claude-code-acp = {
pname = "claude-code-acp"; pname = "claude-code-acp";
version = "v0.24.2"; version = "v0.26.0";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "zed-industries"; owner = "zed-industries";
repo = "claude-code-acp"; repo = "claude-code-acp";
rev = "v0.24.2"; rev = "v0.26.0";
fetchSubmodules = false; fetchSubmodules = false;
sha256 = "sha256-SRVbLcGrH5pJt6yfM0ObSso68M+yGateIVYf/kFVDhE="; sha256 = "sha256-2G8gjMCnk3W1I2+4sNsumL15ts9bLXAOMguCmwnzWSA=";
}; };
}; };
flightcore = { flightcore = {
@@ -61,14 +61,14 @@
src = fetchgit { src = fetchgit {
url = "https://github.com/neovide/neovide.git"; url = "https://github.com/neovide/neovide.git";
rev = "nightly"; rev = "nightly";
fetchSubmodules = false; fetchSubmodules = true;
deepClone = false; deepClone = true;
leaveDotGit = false; leaveDotGit = false;
sparseCheckout = []; sparseCheckout = [];
sha256 = "sha256-Qd3Ld0az0b7ixz2ObUucdJO2uWSQs7XiTVdmXJxilhk="; sha256 = "sha256-JBV939TEfG44w59iUJ3KgiCrQ9aCcjmPxu2uoBD/Ank=";
}; };
cargoLock."Cargo.lock" = { cargoLock."Cargo.lock" = {
lockFile = ./. + "/sha256-Qd3Ld0az0b7ixz2ObUucdJO2uWSQs7XiTVdmXJxilhk=/Cargo.lock"; lockFile = ./. + "/sha256-JBV939TEfG44w59iUJ3KgiCrQ9aCcjmPxu2uoBD_Ank=/Cargo.lock";
outputHashes = { outputHashes = {
}; };
}; };
@@ -1593,7 +1593,7 @@ dependencies = [
[[package]] [[package]]
name = "neovide" name = "neovide"
version = "0.16.0" version = "0.16.1"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"approx", "approx",
+2
View File
@@ -34,4 +34,6 @@ fetch.git = "https://github.com/qca/qca-swiss-army-knife.git"
[neovide] [neovide]
src.manual = "nightly" src.manual = "nightly"
fetch.git = "https://github.com/neovide/neovide.git" fetch.git = "https://github.com/neovide/neovide.git"
git.fetchSubmodules = true
git.deepClone = true
cargo_lock = ["Cargo.lock"] cargo_lock = ["Cargo.lock"]
@@ -0,0 +1,6 @@
{ llama-cpp }:
llama-cpp.override {
hexagonSupport = true;
blasSupport = false;
}
+223
View File
@@ -0,0 +1,223 @@
{
lib,
pkgs,
autoAddDriverRunpath,
cmake,
fetchFromGitHub,
nix-update-script,
stdenv,
config,
cudaSupport ? config.cudaSupport,
cudaPackages ? { },
rocmSupport ? config.rocmSupport,
rocmPackages ? { },
rocmGpuTargets ? rocmPackages.clr.localGpuTargets or rocmPackages.clr.gpuTargets,
openclSupport ? false,
clblast,
blasSupport ? builtins.all (x: !x) [
cudaSupport
metalSupport
openclSupport
rocmSupport
vulkanSupport
],
blas,
pkg-config,
metalSupport ? stdenv.hostPlatform.isDarwin && stdenv.hostPlatform.isAarch64 && !openclSupport,
vulkanSupport ? false,
rpcSupport ? false,
hexagonSupport ? false,
curl,
llama-cpp,
shaderc,
vulkan-headers,
hexagon-sdk,
vulkan-loader,
ninja,
}:
let
# It's necessary to consistently use backendStdenv when building with CUDA support,
# otherwise we get libstdc++ errors downstream.
# cuda imposes an upper bound on the gcc version
buildPkgs = import pkgs.path {
system = "x86_64-linux"; # builder uses x86_64
};
# hexagon needs a x86 build env
crossPkgs = buildPkgs.pkgsCross.aarch64-multiplatform;
effectiveStdenv = if hexagonSupport then crossPkgs.stdenv else stdenv;
inherit (lib)
cmakeBool
cmakeFeature
optionals
optionalString
;
cudaBuildInputs = with cudaPackages; [
cuda_cccl # <nv/target>
# A temporary hack for reducing the closure size, remove once cudaPackages
# have stopped using lndir: https://github.com/NixOS/nixpkgs/issues/271792
cuda_cudart
libcublas
];
rocmBuildInputs = with rocmPackages; [
clr
hipblas
rocblas
];
vulkanBuildInputs = [
shaderc
vulkan-headers
vulkan-loader
];
hexagonBuildInputs = [
hexagon-sdk
];
in
effectiveStdenv.mkDerivation (finalAttrs: {
pname = "llama-cpp";
version = "8871";
src = fetchFromGitHub {
owner = "ggml-org";
repo = "llama.cpp";
tag = "b${finalAttrs.version}";
hash = "sha256-dSMomkkG3YFwXAcYTym6Z03u8ZAWFFio8jdQJPMJ/yg=";
leaveDotGit = true;
postFetch = ''
git -C "$out" rev-parse --short HEAD > $out/COMMIT
find "$out" -name .git -print0 | xargs -0 rm -rf
'';
};
nativeBuildInputs = [
cmake
pkgs.clang
ninja
pkg-config
blas
]
++ optionals cudaSupport [
cudaPackages.cuda_nvcc
autoAddDriverRunpath
];
buildInputs =
optionals cudaSupport cudaBuildInputs
++ optionals openclSupport [ clblast ]
++ optionals rocmSupport rocmBuildInputs
++ optionals blasSupport [ blas ]
++ optionals vulkanSupport vulkanBuildInputs
++ optionals hexagonSupport hexagonBuildInputs
++ [
curl
];
preConfigure = ''
prependToVar cmakeFlags "-DLLAMA_BUILD_COMMIT:STRING=$(cat COMMIT)"
'';
cmakeFlags = [
# -march=native is non-deterministic; override with platform-specific flags if needed
(cmakeBool "GGML_NATIVE" false)
(cmakeBool "LLAMA_BUILD_EXAMPLES" false)
(cmakeBool "LLAMA_BUILD_SERVER" true)
(cmakeBool "LLAMA_BUILD_TESTS" (finalAttrs.finalPackage.doCheck or false))
(cmakeBool "LLAMA_CURL" true)
(cmakeBool "BUILD_SHARED_LIBS" true)
(cmakeBool "GGML_BLAS" blasSupport)
(cmakeBool "GGML_CLBLAST" openclSupport)
(cmakeBool "GGML_CUDA" cudaSupport)
(cmakeBool "GGML_HIP" rocmSupport)
(cmakeBool "GGML_METAL" metalSupport)
(cmakeBool "GGML_RPC" rpcSupport)
(cmakeBool "GGML_VULKAN" vulkanSupport)
(cmakeBool "GGML_HEXAGON" hexagonSupport)
(cmakeFeature "LLAMA_BUILD_NUMBER" finalAttrs.version)
]
++ optionals cudaSupport [
(cmakeFeature "CMAKE_CUDA_ARCHITECTURES" cudaPackages.flags.cmakeCudaArchitecturesString)
]
++ optionals rocmSupport [
(cmakeFeature "CMAKE_HIP_COMPILER" "${rocmPackages.clr.hipClangPath}/clang++")
(cmakeFeature "CMAKE_HIP_ARCHITECTURES" (builtins.concatStringsSep ";" rocmGpuTargets))
]
++ optionals metalSupport [
(cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1")
(cmakeBool "LLAMA_METAL_EMBED_LIBRARY" true)
]
++ optionals rpcSupport [
# This is done so we can move rpc-server out of bin because llama.cpp doesn't
# install rpc-server in their install target.
(cmakeBool "CMAKE_SKIP_BUILD_RPATH" true)
]
++ optionals hexagonSupport [
# (cmakeFeature "CMAKE_TOOLCHAIN_FILE" "${finalAttrs.src}/cmake/arm64-linux-clang.cmake")
(cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1")
(cmakeFeature "CMAKE_CXX_FLAGS" "")
(cmakeBool "GGML_OPENMP" false)
(cmakeBool "GGML_LLAMAFILE" false)
(cmakeFeature "GGML_OPENCL" "OFF")
(cmakeFeature "PREBUILT_LIB_DIR" "linux_aarch64")
(cmakeFeature "GGML_HEXAGON_FP32_QUANTIZE_GROUP_SIZE" "128")
(cmakeFeature "HEXAGON_SDK_ROOT" "${hexagon-sdk}/opt")
(cmakeFeature "HEXAGON_TOOLS_ROOT" "${hexagon-sdk}/opt/tools/HEXAGON_Tools/19.0.04")
(cmakeFeature "LLAMA_OPENSSL" "OFF")
];
# upstream plans on adding targets at the cmakelevel, remove those
# additional steps after that
postInstall = ''
# Match previous binary name for this package
ln -sf $out/bin/llama-cli $out/bin/llama
mkdir -p $out/include
cp $src/include/llama.h $out/include/
''
+ optionalString rpcSupport "cp bin/rpc-server $out/bin/llama-rpc-server";
# the tests are failing as of 2025-08
doCheck = false;
passthru = {
tests = lib.optionalAttrs stdenv.hostPlatform.isDarwin {
metal = llama-cpp.override { metalSupport = true; };
};
updateScript = nix-update-script {
attrPath = "llama-cpp";
extraArgs = [
"--version-regex"
"b(.*)"
];
};
};
meta = {
description = "Inference of Meta's LLaMA model (and others) in pure C/C++";
homepage = "https://github.com/ggml-org/llama.cpp";
license = lib.licenses.mit;
mainProgram = "llama";
maintainers = with lib.maintainers; [
booxter
dit7ya
philiptaron
xddxdd
];
platforms = lib.platforms.unix;
badPlatforms = optionals (cudaSupport || openclSupport) lib.platforms.darwin;
broken =
(metalSupport && !effectiveStdenv.hostPlatform.isDarwin)
|| (hexagonSupport && !effectiveStdenv.hostPlatform.isAarch64);
};
})