From be7b92e97dac09dc9c8910b82e723bf1690c6aa3 Mon Sep 17 00:00:00 2001 From: Gaetan Lepage Date: Mon, 16 Dec 2024 23:49:55 +0100 Subject: [PATCH] ollama: 0.5.1 -> 0.5.4 Diff: https://github.com/ollama/ollama/compare/v0.5.1...v0.5.4 Changelog: https://github.com/ollama/ollama/releases/tag/v0.5.4 Co-authored-by: Mastoca <121066551+mastoca@users.noreply.github.com> --- pkgs/by-name/ol/ollama/package.nix | 53 +++++++++++++++++------------- pkgs/by-name/ol/ollama/rocm.patch | 13 -------- 2 files changed, 31 insertions(+), 35 deletions(-) delete mode 100644 pkgs/by-name/ol/ollama/rocm.patch diff --git a/pkgs/by-name/ol/ollama/package.nix b/pkgs/by-name/ol/ollama/package.nix index 250b68069172..27fc235ee9cb 100644 --- a/pkgs/by-name/ol/ollama/package.nix +++ b/pkgs/by-name/ol/ollama/package.nix @@ -19,7 +19,9 @@ cudaPackages, darwin, autoAddDriverRunpath, + versionCheckHook, + # passthru nixosTests, testers, ollama, @@ -41,13 +43,13 @@ assert builtins.elem acceleration [ let pname = "ollama"; # don't forget to invalidate all hashes each update - version = "0.5.1"; + version = "0.5.4"; src = fetchFromGitHub { owner = "ollama"; repo = "ollama"; - rev = "v${version}"; - hash = "sha256-llsK/rMK1jf2uneqgon9gqtZcbC9PuCDxoYfC7Ta6PY="; + tag = "v${version}"; + hash = "sha256-JyP7A1+u9Vs6ynOKDwun1qLBsjN+CVHIv39Hh2TYa2U="; fetchSubmodules = true; }; @@ -169,14 +171,10 @@ goBuild { ++ lib.optionals enableCuda cudaLibs ++ lib.optionals stdenv.hostPlatform.isDarwin metalFrameworks; - patches = [ - # ollama's build script is unable to find hipcc - ./rocm.patch - ]; - + # replace inaccurate version number with actual release version postPatch = '' - # replace inaccurate version number with actual release version - substituteInPlace version/version.go --replace-fail 0.0.0 '${version}' + substituteInPlace version/version.go \ + --replace-fail 0.0.0 '${version}' ''; overrideModAttrs = ( @@ -186,25 +184,28 @@ goBuild { } ); - preBuild = '' + preBuild = + let + dist_cmd = + if cudaRequested then + "dist_cuda_v${cudaMajorVersion}" + else if rocmRequested then + "dist_rocm" + else + "dist"; + in # build llama.cpp libraries for ollama - make -j $NIX_BUILD_CORES - ''; - - postInstall = lib.optionalString stdenv.hostPlatform.isLinux '' - # copy libggml_*.so and runners into lib - # https://github.com/ollama/ollama/blob/v0.4.4/llama/make/gpu.make#L90 - mkdir -p $out/lib - cp -r dist/*/lib/* $out/lib/ - ''; + '' + make ${dist_cmd} -j $NIX_BUILD_CORES + ''; postFixup = + # the app doesn't appear functional at the moment, so hide it '' - # the app doesn't appear functional at the moment, so hide it mv "$out/bin/app" "$out/bin/.ollama-app" '' + # expose runtime libraries necessary to use the gpu + lib.optionalString (enableRocm || enableCuda) '' - # expose runtime libraries necessary to use the gpu wrapProgram "$out/bin/ollama" ${wrapperArgs} ''; @@ -215,6 +216,14 @@ goBuild { "-X=github.com/ollama/ollama/server.mode=release" ]; + __darwinAllowLocalNetworking = true; + + nativeInstallCheck = [ + versionCheckHook + ]; + versionCheckProgramArg = [ "--version" ]; + doInstallCheck = true; + passthru = { tests = { diff --git a/pkgs/by-name/ol/ollama/rocm.patch b/pkgs/by-name/ol/ollama/rocm.patch deleted file mode 100644 index c3e8defeaba4..000000000000 --- a/pkgs/by-name/ol/ollama/rocm.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/llama/make/Makefile.rocm b/llama/make/Makefile.rocm -index 4ab176b4..cd8be223 100644 ---- a/llama/make/Makefile.rocm -+++ b/llama/make/Makefile.rocm -@@ -15,7 +15,7 @@ ifeq ($(OS),windows) - GPU_COMPILER:=$(GPU_COMPILER_WIN) - else ifeq ($(OS),linux) - GPU_LIB_DIR_LINUX := $(HIP_PATH)/lib -- GPU_COMPILER_LINUX := $(shell X=$$(which hipcc 2>/dev/null) && echo $$X) -+ GPU_COMPILER_LINUX := $(HIP_PATH)/bin/hipcc - GPU_COMPILER:=$(GPU_COMPILER_LINUX) - ROCM_TRANSITIVE_LIBS_INITIAL = $(sort $(shell ldd $(GPU_LIBS) | grep "=>" | cut -f2 -d= | cut -f2 -d' ' | grep -e rocm -e amdgpu -e libtinfo -e libnuma -e libelf)) - GPU_TRANSITIVE_LIBS = $(sort $(shell readlink -f $(ROCM_TRANSITIVE_LIBS_INITIAL)) $(ROCM_TRANSITIVE_LIBS_INITIAL))