tree-wide: cudaPackages.cudaFlags -> cudaPackages.flags

Signed-off-by: Connor Baker <ConnorBaker01@gmail.com>
This commit is contained in:
Connor Baker 2025-05-12 19:30:16 +00:00
parent f395b190c3
commit 4938f79c95
18 changed files with 35 additions and 35 deletions

View file

@ -20,7 +20,7 @@
qt5,
xorg,
cudaSupport ? config.cudaSupport,
cudaCapabilities ? cudaPackages.cudaFlags.cudaCapabilities,
cudaCapabilities ? cudaPackages.flags.cudaCapabilities,
cudaPackages,
}:
@ -46,7 +46,7 @@ stdenv'.mkDerivation rec {
cmakeFlags = lib.optionals cudaSupport [
(lib.cmakeBool "CUDA_ENABLED" true)
(lib.cmakeFeature "CMAKE_CUDA_ARCHITECTURES" (
lib.strings.concatStringsSep ";" (map cudaPackages.cudaFlags.dropDot cudaCapabilities)
lib.strings.concatStringsSep ";" (map cudaPackages.flags.dropDot cudaCapabilities)
))
];

View file

@ -17,7 +17,7 @@ let
cuda_nvcc
libcublas
;
inherit (cudaPackages.cudaFlags) cudaCapabilities dropDot isJetsonBuild;
inherit (cudaPackages.flags) cudaCapabilities dropDot isJetsonBuild;
in
backendStdenv.mkDerivation {
pname = "gpu-burn";

View file

@ -17,7 +17,7 @@
cublasSupport ? config.cudaSupport,
# You can find a full list here: https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/
# For example if you're on an RTX 3060 that means you're using "Ampere" and you need to pass "sm_86"
cudaArches ? cudaPackages.cudaFlags.realArches or [ ],
cudaArches ? cudaPackages.flags.realArches or [ ],
clblastSupport ? stdenv.hostPlatform.isLinux,
clblast,

View file

@ -49,7 +49,7 @@ let
(acceleration == "cuda") || (config.cudaSupport && acceleration == null);
minRequiredCudaCapability = "6.1"; # build fails with 6.0
inherit (cudaPackages.cudaFlags) cudaCapabilities;
inherit (cudaPackages.flags) cudaCapabilities;
cudaCapabilityString =
if cudaCapability == null then
(builtins.head (
@ -60,7 +60,7 @@ let
))
else
cudaCapability;
cudaCapability' = lib.toInt (cudaPackages.cudaFlags.dropDot cudaCapabilityString);
cudaCapability' = lib.toInt (cudaPackages.flags.dropDot cudaCapabilityString);
mklSupport =
assert accelIsValid;

View file

@ -27,7 +27,7 @@
let
minRequiredCudaCapability = "6.1"; # build fails with 6.0
inherit (cudaPackages.cudaFlags) cudaCapabilities;
inherit (cudaPackages.flags) cudaCapabilities;
cudaCapabilityString =
if cudaCapability == null then
(builtins.head (
@ -38,7 +38,7 @@ let
))
else
cudaCapability;
cudaCapability' = lib.toInt (cudaPackages.cudaFlags.dropDot cudaCapabilityString);
cudaCapability' = lib.toInt (cudaPackages.flags.dropDot cudaCapabilityString);
in
rustPlatform.buildRustPackage (finalAttrs: {
pname = "moshi";

View file

@ -16,7 +16,7 @@
rocmPackages,
rocmGpuTargets ? rocmPackages.clr.localGpuTargets or (rocmPackages.clr.gpuTargets or [ ]),
cudaPackages,
cudaArches ? cudaPackages.cudaFlags.realArches or [ ],
cudaArches ? cudaPackages.flags.realArches or [ ],
autoAddDriverRunpath,
# passthru

View file

@ -98,7 +98,7 @@ let
hash = "sha256-YJdZ0cMtUncH5Z6TtAWBH0xtAIu2UcbjnVcCM4tfg20=";
};
isCudaJetson = cudaSupport && cudaPackages.cudaFlags.isJetsonBuild;
isCudaJetson = cudaSupport && cudaPackages.flags.isJetsonBuild;
in
effectiveStdenv.mkDerivation rec {
pname = "onnxruntime";

View file

@ -66,7 +66,7 @@ stdenv.mkDerivation (finalAttrs: {
# https://github.com/PixarAnimationStudios/OpenSubdiv/blob/7d0ab5530feef693ac0a920585b5c663b80773b3/CMakeLists.txt#L602
preConfigure = lib.optionalString cudaSupport ''
cmakeFlagsArray+=(
-DOSD_CUDA_NVCC_FLAGS="${lib.concatStringsSep " " cudaPackages.cudaFlags.gencode}"
-DOSD_CUDA_NVCC_FLAGS="${lib.concatStringsSep " " cudaPackages.flags.gencode}"
)
'';

View file

@ -24,7 +24,7 @@ let
cuda_cudart
cuda_nvcc
cuda_nvml_dev
cudaFlags
flags
nccl
;
@ -101,7 +101,7 @@ effectiveStdenv.mkDerivation (finalAttrs: {
++ optionals enableAvx [ "--with-avx" ]
++ optionals enableCuda [
"--with-cuda=${cuda_nvcc}"
"--with-nvcc-gencode=${concatStringsSep " " cudaFlags.gencode}"
"--with-nvcc-gencode=${concatStringsSep " " flags.gencode}"
];
postInstall = ''

View file

@ -9,6 +9,11 @@ in
builtins.mapAttrs mkRenamed {
# A comment to prevent empty { } from collapsing into a single line
cudaFlags = {
path = "cudaPackages.flags";
package = final.flags;
};
cudaVersion = {
path = "cudaPackages.cudaMajorMinorVersion";
package = final.cudaMajorMinorVersion;

View file

@ -18,9 +18,9 @@ let
cuda_cudart
cuda_nvcc
cudaAtLeast
cudaFlags
cudaOlder
cudatoolkit
flags
;
# versions 2.26+ with CUDA 11.x error with
# fatal error: cuda/atomic: No such file or directory
@ -87,7 +87,7 @@ backendStdenv.mkDerivation (finalAttrs: {
makeFlags =
[
"PREFIX=$(out)"
"NVCC_GENCODE=${cudaFlags.gencodeString}"
"NVCC_GENCODE=${flags.gencodeString}"
]
++ lib.optionals (cudaOlder "11.4") [
"CUDA_HOME=${cudatoolkit}"
@ -118,7 +118,7 @@ backendStdenv.mkDerivation (finalAttrs: {
platforms = platforms.linux;
# NCCL is not supported on Jetson, because it does not use NVLink or PCI-e for inter-GPU communication.
# https://forums.developer.nvidia.com/t/can-jetson-orin-support-nccl/232845/9
badPlatforms = lib.optionals cudaFlags.isJetsonBuild [ "aarch64-linux" ];
badPlatforms = lib.optionals flags.isJetsonBuild [ "aarch64-linux" ];
maintainers = with maintainers; [
mdaiter
orivej

View file

@ -274,8 +274,7 @@ let
#https://github.com/OpenMathLib/OpenBLAS/wiki/Faq/4bded95e8dc8aadc70ce65267d1093ca7bdefc4c#multi-threaded
openblas_ = blas.provider.override { singleThreaded = true; };
inherit (cudaPackages) cudaFlags;
inherit (cudaFlags) cmakeCudaArchitecturesString cudaCapabilities;
inherit (cudaPackages.flags) cmakeCudaArchitecturesString cudaCapabilities;
in

View file

@ -52,8 +52,8 @@
let
inherit (cudaPackages)
cudaFlags
cudaMajorMinorVersion
flags
nccl
;
@ -319,7 +319,7 @@ let
build --action_env TF_CUDA_PATHS="${cuda_build_deps_joined},${cudnnMerged},${lib.getDev nccl}"
build --action_env TF_CUDA_VERSION="${cudaMajorMinorVersion}"
build --action_env TF_CUDNN_VERSION="${lib.versions.major cudaPackages.cudnn.version}"
build:cuda --action_env TF_CUDA_COMPUTE_CAPABILITIES="${builtins.concatStringsSep "," cudaFlags.realArches}"
build:cuda --action_env TF_CUDA_COMPUTE_CAPABILITIES="${builtins.concatStringsSep "," flags.realArches}"
''
+
# Note that upstream conditions this on `wheel_cpu == "x86_64"`. We just

View file

@ -53,7 +53,7 @@ let
packages = import ./binary-hashes.nix;
inherit (cudaPackages) cudatoolkit cudnn;
isCudaJetson = cudaSupport && cudaPackages.cudaFlags.isJetsonBuild;
isCudaJetson = cudaSupport && cudaPackages.flags.isJetsonBuild;
in
buildPythonPackage rec {
pname = "tensorflow" + lib.optionalString cudaSupport "-gpu";

View file

@ -70,7 +70,7 @@
config,
cudaSupport ? config.cudaSupport,
cudaPackages,
cudaCapabilities ? cudaPackages.cudaFlags.cudaCapabilities,
cudaCapabilities ? cudaPackages.flags.cudaCapabilities,
mklSupport ? false,
mkl,
tensorboardSupport ? true,

View file

@ -114,7 +114,7 @@ let
strings
trivial
;
inherit (cudaPackages) cudaFlags cudnn nccl;
inherit (cudaPackages) cudnn flags nccl;
triton = throw "python3Packages.torch: use _tritonEffective instead of triton to avoid divergence";
@ -152,10 +152,10 @@ let
# lists.subtractLists a b = b - a
# For CUDA
supportedCudaCapabilities = lists.intersectLists cudaFlags.cudaCapabilities supportedTorchCudaCapabilities;
unsupportedCudaCapabilities = lists.subtractLists supportedCudaCapabilities cudaFlags.cudaCapabilities;
supportedCudaCapabilities = lists.intersectLists flags.cudaCapabilities supportedTorchCudaCapabilities;
unsupportedCudaCapabilities = lists.subtractLists supportedCudaCapabilities flags.cudaCapabilities;
isCudaJetson = cudaSupport && cudaPackages.cudaFlags.isJetsonBuild;
isCudaJetson = cudaSupport && cudaPackages.flags.isJetsonBuild;
# Use trivial.warnIf to print a warning if any unsupported GPU targets are specified.
gpuArchWarner =

View file

@ -77,7 +77,7 @@ let
trivial
;
inherit (cudaPackages) cudaFlags;
inherit (cudaPackages) flags;
shouldUsePkg =
pkg: if pkg != null && lib.meta.availableOn stdenv.hostPlatform pkg then pkg else null;
@ -179,10 +179,10 @@ let
# lists.subtractLists a b = b - a
# For CUDA
supportedCudaCapabilities = lists.intersectLists cudaFlags.cudaCapabilities supportedTorchCudaCapabilities;
unsupportedCudaCapabilities = lists.subtractLists supportedCudaCapabilities cudaFlags.cudaCapabilities;
supportedCudaCapabilities = lists.intersectLists flags.cudaCapabilities supportedTorchCudaCapabilities;
unsupportedCudaCapabilities = lists.subtractLists supportedCudaCapabilities flags.cudaCapabilities;
isCudaJetson = cudaSupport && cudaPackages.cudaFlags.isJetsonBuild;
isCudaJetson = cudaSupport && cudaPackages.flags.isJetsonBuild;
# Use trivial.warnIf to print a warning if any unsupported GPU targets are specified.
gpuArchWarner =
@ -376,8 +376,7 @@ buildPythonPackage rec {
]
++ lib.optionals cudaSupport [
(lib.cmakeFeature "TORCH_CUDA_ARCH_LIST" "${gpuTargetString}")
(lib.cmakeFeature "CUTLASS_NVCC_ARCHS_ENABLED" "${cudaPackages.cudaFlags.cmakeCudaArchitecturesString
}")
(lib.cmakeFeature "CUTLASS_NVCC_ARCHS_ENABLED" "${cudaPackages.flags.cmakeCudaArchitecturesString}")
(lib.cmakeFeature "CUDA_TOOLKIT_ROOT_DIR" "${symlinkJoin {
name = "cuda-merged-${cudaPackages.cudaMajorMinorVersion}";
paths = builtins.concatMap getAllOutputs mergedCudaLibraries;

View file

@ -82,9 +82,6 @@ let
__attrsFailEvaluation = true;
};
# TODO(@connorbaker): `cudaFlags` is an alias for `flags` which should be removed in the future.
cudaFlags = flags;
# Loose packages
# Barring packages which share a home (e.g., cudatoolkit and cudatoolkit-legacy-runfile), new packages
# should be added to ../development/cuda-modules/packages in "by-name" style, where they will be automatically