treewide: fix typos in comments (#413240)

This commit is contained in:
Pol Dellaiera 2025-06-02 18:43:07 +02:00 committed by GitHub
commit 6d9d3014ba
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
88 changed files with 110 additions and 110 deletions

View file

@ -1747,7 +1747,7 @@ rec {
/** /**
Get the first of the `outputs` provided by the package, or the default. Get the first of the `outputs` provided by the package, or the default.
This function is alligned with `_overrideFirst()` from the `multiple-outputs.sh` setup hook. This function is aligned with `_overrideFirst()` from the `multiple-outputs.sh` setup hook.
Like `getOutput`, the function is idempotent. Like `getOutput`, the function is idempotent.
# Inputs # Inputs

View file

@ -389,7 +389,7 @@ rec {
extensions = composeManyExtensions [ overlayA overlayB ]; extensions = composeManyExtensions [ overlayA overlayB ];
# Caluculate the fixed point of all composed overlays. # Calculate the fixed point of all composed overlays.
fixedpoint = lib.fix (lib.extends extensions original ); fixedpoint = lib.fix (lib.extends extensions original );
in fixedpoint in fixedpoint

View file

@ -404,7 +404,7 @@ rec {
```nix ```nix
myType = mkOptionType { myType = mkOptionType {
name = "myType"; name = "myType";
merge = mergeDefaultOption; # <- This line is redundant. It is the default aready. merge = mergeDefaultOption; # <- This line is redundant. It is the default already.
}; };
``` ```
@ -470,7 +470,7 @@ rec {
args@{ args@{
message, message,
# WARNING: the default merge function assumes that the definition is a valid (option) value. You MUST pass a merge function if the return value needs to be # WARNING: the default merge function assumes that the definition is a valid (option) value. You MUST pass a merge function if the return value needs to be
# - type checked beyond what .check does (which should be very litte; only on the value head; not attribute values, etc) # - type checked beyond what .check does (which should be very little; only on the value head; not attribute values, etc)
# - if you want attribute values to be checked, or list items # - if you want attribute values to be checked, or list items
# - if you want coercedTo-like behavior to work # - if you want coercedTo-like behavior to work
merge ? loc: defs: (head defs).value, merge ? loc: defs: (head defs).value,

View file

@ -75,7 +75,7 @@ let
if pos == null then "" else " at ${pos.file}:${toString pos.line}:${toString pos.column}"; if pos == null then "" else " at ${pos.file}:${toString pos.line}:${toString pos.column}";
# Internal functor to help for migrating functor.wrapped to functor.payload.elemType # Internal functor to help for migrating functor.wrapped to functor.payload.elemType
# Note that individual attributes can be overriden if needed. # Note that individual attributes can be overridden if needed.
elemTypeFunctor = elemTypeFunctor =
name: name:
{ elemType, ... }@payload: { elemType, ... }@payload:

View file

@ -45,7 +45,7 @@
Documentation rendered as AsciiDoc. This is useful for e.g. man pages. Documentation rendered as AsciiDoc. This is useful for e.g. man pages.
> Note: NixOS itself uses this ouput to to build the configuration.nix man page" > Note: NixOS itself uses this output to to build the configuration.nix man page"
## optionsNix ## optionsNix
@ -59,7 +59,7 @@
let let
# Evaluate a NixOS configuration # Evaluate a NixOS configuration
eval = import (pkgs.path + "/nixos/lib/eval-config.nix") { eval = import (pkgs.path + "/nixos/lib/eval-config.nix") {
# Overriden explicitly here, this would include all modules from NixOS otherwise. # Overridden explicitly here, this would include all modules from NixOS otherwise.
# See: docs of eval-config.nix for more details # See: docs of eval-config.nix for more details
baseModules = []; baseModules = [];
modules = [ modules = [

View file

@ -130,7 +130,7 @@ let
virtualisation.test.nodeName = mkOption { virtualisation.test.nodeName = mkOption {
internal = true; internal = true;
default = name; default = name;
# We need to force this in specilisations, otherwise it'd be # We need to force this in specialisations, otherwise it'd be
# readOnly = true; # readOnly = true;
description = '' description = ''
The `name` in `nodes.<name>`; stable across `specialisations`. The `name` in `nodes.<name>`; stable across `specialisations`.

View file

@ -60,7 +60,7 @@ let
inherit (eval) pkgs; inherit (eval) pkgs;
excludedTestOptions = [ excludedTestOptions = [
# We cannot evluate _module.args, as it is used during the computation # We cannot evaluate _module.args, as it is used during the computation
# of the modules list. # of the modules list.
"_module.args" "_module.args"

View file

@ -273,7 +273,7 @@ in
caddy = 239; caddy = 239;
taskd = 240; taskd = 240;
# factorio = 241; # DynamicUser = true # factorio = 241; # DynamicUser = true
# emby = 242; # unusued, removed 2019-05-01 # emby = 242; # unused, removed 2019-05-01
#graylog = 243;# dynamically allocated as of 2021-09-03 #graylog = 243;# dynamically allocated as of 2021-09-03
sniproxy = 244; sniproxy = 244;
nzbget = 245; nzbget = 245;
@ -371,7 +371,7 @@ in
# system user or group of the same id in someone else's NixOS. # system user or group of the same id in someone else's NixOS.
# This could break their system and make that person upset for a whole day. # This could break their system and make that person upset for a whole day.
# #
# Sidenote: the default is defined in `shadow` module[2], and the relavent change # Sidenote: the default is defined in `shadow` module[2], and the relevant change
# was made way back in 2014[3]. # was made way back in 2014[3].
# #
# [1]: https://man7.org/linux/man-pages/man5/login.defs.5.html#:~:text=SYS_UID_MAX%20(number)%2C%20SYS_UID_MIN%20(number) # [1]: https://man7.org/linux/man-pages/man5/login.defs.5.html#:~:text=SYS_UID_MAX%20(number)%2C%20SYS_UID_MIN%20(number)
@ -700,7 +700,7 @@ in
# system user or group of the same id in someone else's NixOS. # system user or group of the same id in someone else's NixOS.
# This could break their system and make that person upset for a whole day. # This could break their system and make that person upset for a whole day.
# #
# Sidenote: the default is defined in `shadow` module[2], and the relavent change # Sidenote: the default is defined in `shadow` module[2], and the relevant change
# was made way back in 2014[3]. # was made way back in 2014[3].
# #
# [1]: https://man7.org/linux/man-pages/man5/login.defs.5.html#:~:text=SYS_UID_MAX%20(number)%2C%20SYS_UID_MIN%20(number) # [1]: https://man7.org/linux/man-pages/man5/login.defs.5.html#:~:text=SYS_UID_MAX%20(number)%2C%20SYS_UID_MIN%20(number)

View file

@ -1,4 +1,4 @@
# This profile sets up a sytem for image based appliance usage. An appliance is # This profile sets up a system for image based appliance usage. An appliance is
# installed as an image, cannot be re-built, has no Nix available, and is # installed as an image, cannot be re-built, has no Nix available, and is
# generally not meant for interactive use. Updates to such an appliance are # generally not meant for interactive use. Updates to such an appliance are
# handled by updating whole partition images via a tool like systemd-sysupdate. # handled by updating whole partition images via a tool like systemd-sysupdate.

View file

@ -45,7 +45,7 @@ in
# software rendering to implement GLX (OpenGL on Xorg). # software rendering to implement GLX (OpenGL on Xorg).
# However, just building TurboVNC with support for that is not enough # However, just building TurboVNC with support for that is not enough
# (it only takes care of the X server side part of OpenGL); # (it only takes care of the X server side part of OpenGL);
# the indiviudual applications (e.g. `glxgears`) also need to directly load # the individual applications (e.g. `glxgears`) also need to directly load
# the OpenGL libs. # the OpenGL libs.
# Thus, this creates `/run/opengl-driver` populated by Mesa so that the applications # Thus, this creates `/run/opengl-driver` populated by Mesa so that the applications
# can find the llvmpipe `swrast.so` software rendering DRI lib via `libglvnd`. # can find the llvmpipe `swrast.so` software rendering DRI lib via `libglvnd`.

View file

@ -139,7 +139,7 @@ let
[ [
(yamlFormat.generate "helm-chart-manifest-${name}.yaml" (mkHelmChartCR name value)) (yamlFormat.generate "helm-chart-manifest-${name}.yaml" (mkHelmChartCR name value))
] ]
# alternate the YAML doc seperator (---) and extraDeploy manifests to create # alternate the YAML doc separator (---) and extraDeploy manifests to create
# multi document YAMLs # multi document YAMLs
++ (lib.concatMap (x: [ ++ (lib.concatMap (x: [
yamlDocSeparator yamlDocSeparator

View file

@ -143,7 +143,7 @@ in
lib.mkDefault (json.generate "bonsai_tree.json" (filterNulls cfg.settings)); lib.mkDefault (json.generate "bonsai_tree.json" (filterNulls cfg.settings));
# bonsaid is controlled by bonsaictl, so place the latter in the environment by default. # bonsaid is controlled by bonsaictl, so place the latter in the environment by default.
# bonsaictl is typically invoked by scripts or a DE so this isn't strictly necesssary, # bonsaictl is typically invoked by scripts or a DE so this isn't strictly necessary,
# but it's helpful while administering the service generally. # but it's helpful while administering the service generally.
environment.systemPackages = [ cfg.package ]; environment.systemPackages = [ cfg.package ];

View file

@ -176,7 +176,7 @@ in
# need to be writeable, so we can't just point at the ones in the nix # need to be writeable, so we can't just point at the ones in the nix
# store. Instead we take the approach of copying them out of the store # store. Instead we take the approach of copying them out of the store
# on first run. If `bookarch` already exists, we assume the rest of the # on first run. If `bookarch` already exists, we assume the rest of the
# files do as well, and copy nothing -- otherwise we risk ovewriting # files do as well, and copy nothing -- otherwise we risk overwriting
# server state information every time the server is upgraded. # server state information every time the server is upgraded.
preStart = '' preStart = ''
if [ ! -e "${cfg.stateDir}"/bookarch ]; then if [ ! -e "${cfg.stateDir}"/bookarch ]; then

View file

@ -201,7 +201,7 @@ let
# the old service and then starts the new service after config updates. # the old service and then starts the new service after config updates.
# Since we use path-based activation[1] here, the service unit will # Since we use path-based activation[1] here, the service unit will
# immediately[2] be started by the path unit. Probably that start is # immediately[2] be started by the path unit. Probably that start is
# before config updates, whcih causes the service unit to use the old # before config updates, which causes the service unit to use the old
# config after nixos-rebuild switch. Setting stopIfChanged to false works # config after nixos-rebuild switch. Setting stopIfChanged to false works
# around this issue by restarting the service after config updates. # around this issue by restarting the service after config updates.
# [0]: https://nixos.org/manual/nixos/unstable/#sec-switching-systems # [0]: https://nixos.org/manual/nixos/unstable/#sec-switching-systems

View file

@ -107,7 +107,7 @@ in
RestrictNamespaces = true; RestrictNamespaces = true;
RestrictRealtime = true; RestrictRealtime = true;
MemoryDenyWriteExecute = true; MemoryDenyWriteExecute = true;
# Upstream Recommandation # Upstream Recommendation
LimitNOFILE = 20500; LimitNOFILE = 20500;
}; };
}; };

View file

@ -91,7 +91,7 @@ in
config = lib.mkIf cfg.enable { config = lib.mkIf cfg.enable {
environment.systemPackages = [ pkgs.below ]; environment.systemPackages = [ pkgs.below ];
# /etc/below.conf is also refered to by the `below` CLI tool, # /etc/below.conf is also referred to by the `below` CLI tool,
# so this can't be a store-only file whose path is passed to the service # so this can't be a store-only file whose path is passed to the service
environment.etc."below/below.conf".text = cfgContents; environment.etc."below/below.conf".text = cfgContents;

View file

@ -55,7 +55,7 @@ in
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
}; };
# set up Security wrapper Same as inteded in deb post install # set up Security wrapper Same as intended in deb post install
security.wrappers.cato-clientd = { security.wrappers.cato-clientd = {
source = "${cfg.package}/bin/cato-clientd"; source = "${cfg.package}/bin/cato-clientd";
owner = "root"; owner = "root";

View file

@ -289,7 +289,7 @@ in
{ {
# Note: we want by default to enable OpenSSL, but it seems anything 100 and above is # Note: we want by default to enable OpenSSL, but it seems anything 100 and above is
# overriden by default value from vhost-options.nix # overridden by default value from vhost-options.nix
enableACME = mkOverride 99 true; enableACME = mkOverride 99 true;
forceSSL = mkOverride 99 true; forceSSL = mkOverride 99 true;
locations.${cfg.nginx.path} = { locations.${cfg.nginx.path} = {

View file

@ -550,7 +550,7 @@ in
User = client.user.name; User = client.user.name;
Group = client.user.group; Group = client.user.group;
# settings implied by DynamicUser=true, without actully using it, # settings implied by DynamicUser=true, without actually using it,
# see https://www.freedesktop.org/software/systemd/man/latest/systemd.exec.html#DynamicUser= # see https://www.freedesktop.org/software/systemd/man/latest/systemd.exec.html#DynamicUser=
RemoveIPC = true; RemoveIPC = true;
PrivateTmp = true; PrivateTmp = true;

View file

@ -102,7 +102,7 @@ in
# special options as its public anyway # special options as its public anyway
# As far as I know leaking this secret is just # As far as I know leaking this secret is just
# an information leak as one can fetch some basic app # an information leak as one can fetch some basic app
# informations from the IDP # information from the IDP
# To actually do something one still needs to have login # To actually do something one still needs to have login
# data and this secret so this being public will not # data and this secret so this being public will not
# suffice for anything just decreasing security # suffice for anything just decreasing security

View file

@ -42,7 +42,7 @@ let
# values must be separated by whitespace or even commas. # values must be separated by whitespace or even commas.
# Consult either sshd_config(5) or, as last resort, the OpehSSH source for parsing # Consult either sshd_config(5) or, as last resort, the OpehSSH source for parsing
# the options at servconf.c:process_server_config_line_depth() to determine the right "mode" # the options at servconf.c:process_server_config_line_depth() to determine the right "mode"
# for each. But fortunaly this fact is documented for most of them in the manpage. # for each. But fortunately this fact is documented for most of them in the manpage.
commaSeparated = [ commaSeparated = [
"Ciphers" "Ciphers"
"KexAlgorithms" "KexAlgorithms"

View file

@ -133,7 +133,7 @@ in
services.yggdrasil.settings.Listen = services.yggdrasil.settings.Listen =
let let
# By default linux dynamically alocates ports in range 32768..60999 # By default linux dynamically allocates ports in range 32768..60999
# `sysctl net.ipv4.ip_local_port_range` # `sysctl net.ipv4.ip_local_port_range`
# See: https://xkcd.com/221/ # See: https://xkcd.com/221/
prot_port = { prot_port = {

View file

@ -295,7 +295,7 @@ let
( (
lib.mapAttrs ( lib.mapAttrs (
k: v: k: v:
# Not necesssary, but prettier rendering # Not necessary, but prettier rendering
if if
lib.elem k [ lib.elem k [
"AutomapHostsSuffixes" "AutomapHostsSuffixes"

View file

@ -348,7 +348,7 @@ in
else if else if
pgsqlLocal pgsqlLocal
# note: davis expects a non-standard postgres uri (due to the underlying doctrine library) # note: davis expects a non-standard postgres uri (due to the underlying doctrine library)
# specifically the dummy hostname which is overriden by the host query parameter # specifically the dummy hostname which is overridden by the host query parameter
then then
"postgres://${user}@localhost/${db.name}?host=/run/postgresql" "postgres://${user}@localhost/${db.name}?host=/run/postgresql"
else if mysqlLocal then else if mysqlLocal then

View file

@ -224,7 +224,7 @@ in
# Thus, disable distribution for improved simplicity and security: # Thus, disable distribution for improved simplicity and security:
# #
# When distribution is enabled, # When distribution is enabled,
# Elixir spwans the Erlang VM, which will listen by default on all # Elixir spawns the Erlang VM, which will listen by default on all
# interfaces for messages between Erlang nodes (capable of # interfaces for messages between Erlang nodes (capable of
# remote code execution); it can be protected by a cookie; see # remote code execution); it can be protected by a cookie; see
# https://erlang.org/doc/reference_manual/distributed.html#security). # https://erlang.org/doc/reference_manual/distributed.html#security).

View file

@ -132,7 +132,7 @@ in
{ {
# coming from https://github.com/windmill-labs/windmill/blob/main/init-db-as-superuser.sql # coming from https://github.com/windmill-labs/windmill/blob/main/init-db-as-superuser.sql
# modified to not grant priviledges on all tables # modified to not grant privileges on all tables
# create role windmill_user and windmill_admin only if they don't exist # create role windmill_user and windmill_admin only if they don't exist
postgresql.postStart = lib.mkIf cfg.database.createLocally ( postgresql.postStart = lib.mkIf cfg.database.createLocally (
lib.mkAfter '' lib.mkAfter ''

View file

@ -26,7 +26,7 @@ in
# Enable cloud-init by default for waagent. # Enable cloud-init by default for waagent.
# Otherwise waagent would try manage networking using ifupdown, # Otherwise waagent would try manage networking using ifupdown,
# which is currently not availeble in nixpkgs. # which is currently not available in nixpkgs.
services.cloud-init.enable = true; services.cloud-init.enable = true;
services.cloud-init.network.enable = true; services.cloud-init.network.enable = true;
systemd.services.cloud-config.serviceConfig.Restart = "on-failure"; systemd.services.cloud-config.serviceConfig.Restart = "on-failure";

View file

@ -105,7 +105,7 @@ in
splashImage = null; splashImage = null;
# For Gen 1 VM, configurate grub output to serial_com0. # For Gen 1 VM, configurate grub output to serial_com0.
# Not needed for Gen 2 VM wbere serial_com0 does not exist, # Not needed for Gen 2 VM wbere serial_com0 does not exist,
# and outputing to console is enough to make Azure Serial Console working # and outputting to console is enough to make Azure Serial Console working
extraConfig = lib.mkIf (!efiSupport) '' extraConfig = lib.mkIf (!efiSupport) ''
serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1 serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1
terminal_input --append serial terminal_input --append serial

View file

@ -67,7 +67,7 @@ let
convert = convert =
attrs: attrs:
pipe (recurse [ ] attrs) [ pipe (recurse [ ] attrs) [
# Filter out null values and emoty lists # Filter out null values and empty lists
(filter (kv: kv.value != null && kv.value != [ ])) (filter (kv: kv.value != null && kv.value != [ ]))
# Transform to Key=Value form, then concatenate # Transform to Key=Value form, then concatenate
(map (kv: "${kv.name}=${transform kv.value}")) (map (kv: "${kv.name}=${transform kv.value}"))

View file

@ -65,8 +65,8 @@ in
repartConfig = { repartConfig = {
Type = "esp"; Type = "esp";
Format = "vfat"; Format = "vfat";
# Minimize = "guess" seems to not work very vell for vfat # Minimize = "guess" seems to not work very well for vfat
# partitons. It's better to set a sensible default instead. The # partitions. It's better to set a sensible default instead. The
# aarch64 kernel seems to generally be a little bigger than the # aarch64 kernel seems to generally be a little bigger than the
# x86_64 kernel. To stay on the safe side, leave some more slack # x86_64 kernel. To stay on the safe side, leave some more slack
# for every platform other than x86_64. # for every platform other than x86_64.

View file

@ -94,7 +94,7 @@ in
) cfg.configuration.security.acme.certs ) cfg.configuration.security.acme.certs
) )
# A specialisation's config is nested under its configuration attribute. # A specialisation's config is nested under its configuration attribute.
# For ease of use, nest the root node's configuration simiarly. # For ease of use, nest the root node's configuration similarly.
([ { configuration = node; } ] ++ (builtins.attrValues node.specialisation)) ([ { configuration = node; } ] ++ (builtins.attrValues node.specialisation))
) )
); );

View file

@ -4,7 +4,7 @@
# this test works doing a migration and asserting ntfy-sh runs properly. first, # this test works doing a migration and asserting ntfy-sh runs properly. first,
# ntfy-sh is configured to use a static user and group. then ntfy-sh is # ntfy-sh is configured to use a static user and group. then ntfy-sh is
# started and tested. after that, ntfy-sh is shut down and a systemd drop # started and tested. after that, ntfy-sh is shut down and a systemd drop
# in configuration file is used to upate the service configuration to use # in configuration file is used to update the service configuration to use
# DynamicUser=true. then the ntfy-sh is started again and tested. # DynamicUser=true. then the ntfy-sh is started again and tested.
import ./make-test-python.nix { import ./make-test-python.nix {

View file

@ -6,7 +6,7 @@
# - downloading the file over sftp # - downloading the file over sftp
# - assert that the ACLs are respected # - assert that the ACLs are respected
# - share a file between alice and bob (using sftp) # - share a file between alice and bob (using sftp)
# - assert that eve cannot acceess the shared folder between alice and bob. # - assert that eve cannot access the shared folder between alice and bob.
# #
# Additional test coverage for the remaining protocols (i.e. ftp, http and webdav) # Additional test coverage for the remaining protocols (i.e. ftp, http and webdav)
# would be a nice to have for the future. # would be a nice to have for the future.
@ -333,7 +333,7 @@ in
testScript = testScript =
{ nodes, ... }: { nodes, ... }:
let let
# A function to generate test cases for wheter # A function to generate test cases for whether
# a specified username is expected to access the shared folder. # a specified username is expected to access the shared folder.
accessSharedFoldersSubtest = accessSharedFoldersSubtest =
{ {

View file

@ -1,4 +1,4 @@
# Tests downloading a signed update aritfact from a server to a target machine. # Tests downloading a signed update artifact from a server to a target machine.
# This test does not rely on the `systemd.timer` units provided by the # This test does not rely on the `systemd.timer` units provided by the
# `systemd-sysupdate` module but triggers the `systemd-sysupdate` service # `systemd-sysupdate` module but triggers the `systemd-sysupdate` service
# manually to make the test more robust. # manually to make the test more robust.

View file

@ -4,7 +4,7 @@
# correct time, we need to connect to an NTP server, which usually requires resolving its hostname. # correct time, we need to connect to an NTP server, which usually requires resolving its hostname.
# #
# This test does the following: # This test does the following:
# - Sets up a DNS server (tinydns) listening on the eth1 ip addess, serving .ntp and fake.ntp records. # - Sets up a DNS server (tinydns) listening on the eth1 ip address, serving .ntp and fake.ntp records.
# - Configures that DNS server as a resolver and enables DNSSEC in systemd-resolved settings. # - Configures that DNS server as a resolver and enables DNSSEC in systemd-resolved settings.
# - Configures systemd-timesyncd to use fake.ntp hostname as an NTP server. # - Configures systemd-timesyncd to use fake.ntp hostname as an NTP server.
# - Performs a regular DNS lookup, to ensure it fails due to broken DNSSEC. # - Performs a regular DNS lookup, to ensure it fails due to broken DNSSEC.

View file

@ -17,7 +17,7 @@
./update-from-overlay ./update-from-overlay
It will update both melpa and elpa packages using It will update both melpa and elpa packages using
https://github.com/nix-community/emacs-overlay. It's almost instantenous and https://github.com/nix-community/emacs-overlay. It's almost instantaneous and
formats commits for you. formats commits for you.
*/ */
@ -1355,7 +1355,7 @@ let
hyperbole = ignoreCompilationError (addPackageRequires (mkHome super.hyperbole) [ self.el-mock ]); # elisp error hyperbole = ignoreCompilationError (addPackageRequires (mkHome super.hyperbole) [ self.el-mock ]); # elisp error
# needs non-existent "browser database directory" during compilation # needs non-existent "browser database directory" during compilation
# TODO report to upsteam about missing dependency websocket # TODO report to upstream about missing dependency websocket
ibrowse = ignoreCompilationError (addPackageRequires super.ibrowse [ self.websocket ]); ibrowse = ignoreCompilationError (addPackageRequires super.ibrowse [ self.websocket ]);
# elisp error and missing optional dependencies # elisp error and missing optional dependencies
@ -1368,7 +1368,7 @@ let
indium = mkHome super.indium; indium = mkHome super.indium;
# TODO report to upsteam # TODO report to upstream
inlineR = addPackageRequires super.inlineR [ self.ess ]; inlineR = addPackageRequires super.inlineR [ self.ess ];
# https://github.com/duelinmarkers/insfactor.el/issues/7 # https://github.com/duelinmarkers/insfactor.el/issues/7
@ -1547,7 +1547,7 @@ let
org-gtd = ignoreCompilationError super.org-gtd; # elisp error org-gtd = ignoreCompilationError super.org-gtd; # elisp error
# needs newer org than the Eamcs 29.4 builtin one # needs newer org than the Emacs 29.4 builtin one
org-link-beautify = addPackageRequires super.org-link-beautify [ org-link-beautify = addPackageRequires super.org-link-beautify [
self.org self.org
self.qrencode self.qrencode

View file

@ -5709,7 +5709,7 @@ let
}; };
# TODO: add overrides overlay, so that we can have a generated.nix # TODO: add overrides overlay, so that we can have a generated.nix
# then apply extension specific modifcations to packages. # then apply extension specific modifications to packages.
# overlays will be applied left to right, overrides should come after aliases. # overlays will be applied left to right, overrides should come after aliases.
overlays = lib.optionals config.allowAliases [ overlays = lib.optionals config.allowAliases [

View file

@ -33,7 +33,7 @@
<https://github.com/Microsoft/vscode-cpptools/issues/35> <https://github.com/Microsoft/vscode-cpptools/issues/35>
Once the symbolic link temporary solution taken, everything shoud run smootly. Once the symbolic link temporary solution taken, everything should run smootly.
*/ */
let let

View file

@ -49,7 +49,7 @@ stdenv.mkDerivation rec {
libsForQt5.qtmacextras # can be removed when using qt6 libsForQt5.qtmacextras # can be removed when using qt6
]; ];
# custom Darwin install instructions taken from the upsteam compileOSX.sh script # custom Darwin install instructions taken from the upstream compileOSX.sh script
installPhase = lib.optionalString stdenv.hostPlatform.isDarwin '' installPhase = lib.optionalString stdenv.hostPlatform.isDarwin ''
runHook preInstall runHook preInstall

View file

@ -15,7 +15,7 @@
jq, jq,
libiconv, libiconv,
# Controls codegen parallelization for all crates. # Controls codegen parallelization for all crates.
# May be overriden on a per-crate level. # May be overridden on a per-crate level.
# See <https://doc.rust-lang.org/rustc/codegen-options/index.html#codegen-units> # See <https://doc.rust-lang.org/rustc/codegen-options/index.html#codegen-units>
defaultCodegenUnits ? 1, defaultCodegenUnits ? 1,
}: }:

View file

@ -20,7 +20,7 @@
defaultCrateOverrides ? pkgs.defaultCrateOverrides, defaultCrateOverrides ? pkgs.defaultCrateOverrides,
# The features to enable for the root_crate or the workspace_members. # The features to enable for the root_crate or the workspace_members.
rootFeatures ? [ "default" ], rootFeatures ? [ "default" ],
# If true, throw errors instead of issueing deprecation warnings. # If true, throw errors instead of issuing deprecation warnings.
strictDeprecation ? false, strictDeprecation ? false,
# Used for conditional compilation based on CPU feature detection. # Used for conditional compilation based on CPU feature detection.
targetFeatures ? [ ], targetFeatures ? [ ],
@ -4485,9 +4485,9 @@ rec {
runTests ? false, runTests ? false,
testCrateFlags ? [ ], testCrateFlags ? [ ],
testInputs ? [ ], testInputs ? [ ],
# Any command to run immediatelly before a test is executed. # Any command to run immediately before a test is executed.
testPreRun ? "", testPreRun ? "",
# Any command run immediatelly after a test is executed. # Any command run immediately after a test is executed.
testPostRun ? "", testPostRun ? "",
}: }:
lib.makeOverridable lib.makeOverridable

View file

@ -85,7 +85,7 @@ stdenv.mkDerivation (finalAttrs: {
yaml-cpp yaml-cpp
nlohmann_json nlohmann_json
# Todo: add these optional dependcies in nixpkgs. # Todo: add these optional dependencies in nixpkgs.
# sz # sz
# mgard # mgard
# catalyst # catalyst

View file

@ -45,7 +45,7 @@ stdenv.mkDerivation rec {
]; ];
patches = [ patches = [
# Allow completly unvendoring hyperhdr # Allow completely unvendoring hyperhdr
# This can be removed on the next hyperhdr release # This can be removed on the next hyperhdr release
./unvendor.patch ./unvendor.patch
]; ];

View file

@ -3,7 +3,7 @@
lib, lib,
buildPackages, buildPackages,
cmake, cmake,
# explicitely depending on openexr_2 because ilmbase doesn't exist for v3 # explicitly depending on openexr_2 because ilmbase doesn't exist for v3
openexr_2, openexr_2,
}: }:

View file

@ -75,7 +75,7 @@ stdenv.mkDerivation (finalAttrs: {
enableParallelBuilding = true; enableParallelBuilding = true;
# Do not build amd64 assembly code on Darwin, because it fails to compile # Do not build amd64 assembly code on Darwin, because it fails to compile
# with unknow directive errors # with unknown directive errors
configureFlags = configureFlags =
optional stdenv.hostPlatform.isDarwin "--enable-amd64=no" optional stdenv.hostPlatform.isDarwin "--enable-amd64=no"
++ optional (!svgSupport) "--without-svg" ++ optional (!svgSupport) "--without-svg"

View file

@ -13,7 +13,7 @@ stdenv.mkDerivation rec {
sha256 = "1l3hlw9rrc11qggbg9a2303p3bhxxx2vqkmlk8avsrbqw15r1ayr"; sha256 = "1l3hlw9rrc11qggbg9a2303p3bhxxx2vqkmlk8avsrbqw15r1ayr";
}; };
# credis build system has no install actions, provide our own. # credits build system has no install actions, provide our own.
installPhase = '' installPhase = ''
mkdir -p "$out/bin" mkdir -p "$out/bin"
mkdir -p "$out/lib" mkdir -p "$out/lib"

View file

@ -106,7 +106,7 @@ stdenv.mkDerivation (finalAttrs: {
patches = [ patches = [
./startup-config-support-nix-store.patch ./startup-config-support-nix-store.patch
# Miktex will search exectables in "GetMyPrefix(true)/bin". # Miktex will search exectables in "GetMyPrefix(true)/bin".
# The path evalutate to "/usr/bin" in FHS style linux distrubution, # The path evaluate to "/usr/bin" in FHS style linux distribution,
# compared to "/nix/store/.../bin" in NixOS. # compared to "/nix/store/.../bin" in NixOS.
# As a result, miktex will fail to find e.g. 'pkexec','ksudo','gksu' # As a result, miktex will fail to find e.g. 'pkexec','ksudo','gksu'
# under /run/wrappers/bin in NixOS. # under /run/wrappers/bin in NixOS.

View file

@ -14,7 +14,7 @@
perl, perl,
# mxnet cuda support is turned off, but dependencies like opencv can still be built with cudaSupport # mxnet cuda support is turned off, but dependencies like opencv can still be built with cudaSupport
# and fail to compile without the cudatoolkit # and fail to compile without the cudatoolkit
# mxnet cuda support will not be availaible, as mxnet requires version <=11 # mxnet cuda support will not be available, as mxnet requires version <=11
cudaSupport ? config.cudaSupport, cudaSupport ? config.cudaSupport,
cudaPackages ? { }, cudaPackages ? { },
}: }:

View file

@ -30,7 +30,7 @@ rustPlatform.buildRustPackage rec {
patches = [ patches = [
# Related to https://github.com/stepchowfun/typical/pull/501 # Related to https://github.com/stepchowfun/typical/pull/501
# Commiting a slightly different patch because the upstream one doesn't apply cleanly # Committing a slightly different patch because the upstream one doesn't apply cleanly
./lifetime.patch ./lifetime.patch
]; ];

View file

@ -8,7 +8,7 @@
stdenv.mkDerivation { stdenv.mkDerivation {
pname = "usb-reset"; pname = "usb-reset";
# not tagged, but changelog has this with the date of the e9a9d6c commit # not tagged, but changelog has this with the date of the e9a9d6c commit
# and no significant change occured between bumping the version in the Makefile and that # and no significant change occurred between bumping the version in the Makefile and that
# and the changes since then (up to ff822d8) seem snap related # and the changes since then (up to ff822d8) seem snap related
version = "0.3"; version = "0.3";

View file

@ -38,7 +38,7 @@ stdenv.mkDerivation (finalAttrs: {
}; };
patches = [ patches = [
# instead of runnning git during the build process # instead of running git during the build process
# use the .COMMIT file generated in the fetcher FOD # use the .COMMIT file generated in the fetcher FOD
./git-rev-parse.patch ./git-rev-parse.patch
]; ];

View file

@ -6,7 +6,7 @@
copyDesktopItems, copyDesktopItems,
autoPatchelfHook, autoPatchelfHook,
# Upstream is officialy built with Electron 18 # Upstream is officially built with Electron 18
# (but it works with latest Electron with minor changes, see HACK below) # (but it works with latest Electron with minor changes, see HACK below)
electron, electron,
asar, asar,

View file

@ -38,7 +38,7 @@ assert !enablePlugin -> disableGdbPlugin;
# Note [Windows Exception Handling] # Note [Windows Exception Handling]
# sjlj (short jump long jump) exception handling makes no sense on x86_64, # sjlj (short jump long jump) exception handling makes no sense on x86_64,
# it's forcably slowing programs down as it produces a constant overhead. # it's forcibly slowing programs down as it produces a constant overhead.
# On x86_64 we have SEH (Structured Exception Handling) and we should use # On x86_64 we have SEH (Structured Exception Handling) and we should use
# that. On i686, we do not have SEH, and have to use sjlj with dwarf2. # that. On i686, we do not have SEH, and have to use sjlj with dwarf2.
# Hence it's now conditional on x86_32 (i686 is 32bit). # Hence it's now conditional on x86_32 (i686 is 32bit).

View file

@ -20,7 +20,7 @@ with haskellLib;
self: super: self: super:
{ {
# Hackage's accelerate is from 2020 and incomptible with our GHC. # Hackage's accelerate is from 2020 and incompatible with our GHC.
# The existing derivation also has missing dependencies # The existing derivation also has missing dependencies
# compared to the source from github. # compared to the source from github.
# https://github.com/AccelerateHS/accelerate/issues/553 # https://github.com/AccelerateHS/accelerate/issues/553
@ -791,7 +791,7 @@ self: super:
katt = dontCheck super.katt; katt = dontCheck super.katt;
language-slice = dontCheck super.language-slice; language-slice = dontCheck super.language-slice;
# Bogus lower bound on data-default-class added via Hackage revison # Bogus lower bound on data-default-class added via Hackage revision
# https://github.com/mrkkrp/req/pull/180#issuecomment-2628201485 # https://github.com/mrkkrp/req/pull/180#issuecomment-2628201485
req = overrideCabal { req = overrideCabal {
revision = null; revision = null;
@ -1963,7 +1963,7 @@ self: super:
license = lib.licenses.bsd3; license = lib.licenses.bsd3;
# ghc-bignum is not buildable if none of the three backends # ghc-bignum is not buildable if none of the three backends
# is explicitly enabled. We enable Native for now as it doesn't # is explicitly enabled. We enable Native for now as it doesn't
# depend on anything else as oppossed to GMP and FFI. # depend on anything else as opposed to GMP and FFI.
# Apply patch which fixes a compilation failure we encountered. # Apply patch which fixes a compilation failure we encountered.
# Will need to be kept until we can drop ghc-bignum entirely, # Will need to be kept until we can drop ghc-bignum entirely,
# i. e. if GHC 8.10.* and 8.8.* have been removed. # i. e. if GHC 8.10.* and 8.8.* have been removed.
@ -2529,7 +2529,7 @@ self: super:
# Missing test files https://github.com/kephas/xdg-basedir-compliant/issues/1 # Missing test files https://github.com/kephas/xdg-basedir-compliant/issues/1
xdg-basedir-compliant = dontCheck super.xdg-basedir-compliant; xdg-basedir-compliant = dontCheck super.xdg-basedir-compliant;
# Test failure after libxcrypt migration, reported upstrem at # Test failure after libxcrypt migration, reported upstream at
# https://github.com/phadej/crypt-sha512/issues/13 # https://github.com/phadej/crypt-sha512/issues/13
crypt-sha512 = dontCheck super.crypt-sha512; crypt-sha512 = dontCheck super.crypt-sha512;

View file

@ -207,7 +207,7 @@ let
# When using a baseruby, ruby always sets "libdir" to the build # When using a baseruby, ruby always sets "libdir" to the build
# directory, which nix rejects due to a reference in to /build/ in # directory, which nix rejects due to a reference in to /build/ in
# the final product. Removing this reference doesn't seem to break # the final product. Removing this reference doesn't seem to break
# anything and fixes cross compliation. # anything and fixes cross compilation.
./dont-refer-to-build-dir.patch ./dont-refer-to-build-dir.patch
]; ];

View file

@ -34,7 +34,7 @@
# all dependants in Nixpkgs # all dependants in Nixpkgs
withSmallDeps ? ffmpegVariant == "small" || withFullDeps, withSmallDeps ? ffmpegVariant == "small" || withFullDeps,
# Everything enabled; only guarded behind platform exclusivity or brokeness. # Everything enabled; only guarded behind platform exclusivity or brokenness.
# If you need to depend on ffmpeg-full because ffmpeg is missing some feature # If you need to depend on ffmpeg-full because ffmpeg is missing some feature
# your package needs, you should enable that feature in regular ffmpeg # your package needs, you should enable that feature in regular ffmpeg
# instead. # instead.

View file

@ -225,7 +225,7 @@ let
# This avoids conflicts between man pages of openssl subcommands (for # This avoids conflicts between man pages of openssl subcommands (for
# example 'ts' and 'err') man pages and their equivalent top-level # example 'ts' and 'err') man pages and their equivalent top-level
# command in other packages (respectively man-pages and moreutils). # command in other packages (respectively man-pages and moreutils).
# This is done in ubuntu and archlinux, and possiibly many other distros. # This is done in ubuntu and archlinux, and possibly many other distros.
"MANSUFFIX=ssl" "MANSUFFIX=ssl"
]; ];

View file

@ -29,7 +29,7 @@ stdenv.mkDerivation (finalAttrs: {
}; };
patches = [ patches = [
# Pathes to fix compiling on LLVM 19 from https://github.com/ukoethe/vigra/pull/592 # Patches to fix compiling on LLVM 19 from https://github.com/ukoethe/vigra/pull/592
./fix-llvm-19-1.patch ./fix-llvm-19-1.patch
./fix-llvm-19-2.patch ./fix-llvm-19-2.patch
]; ];

View file

@ -4,7 +4,7 @@
buildPythonPackage, buildPythonPackage,
pythonOlder, pythonOlder,
cffi, cffi,
# overriden as pkgs.brotli # overridden as pkgs.brotli
brotli, brotli,
setuptools, setuptools,
pytestCheckHook, pytestCheckHook,

View file

@ -60,7 +60,7 @@ buildPythonPackage rec {
# https://github.com/DataDog/datadogpy/issues/746 # https://github.com/DataDog/datadogpy/issues/746
"TestDogshell" "TestDogshell"
# Flaky: test execution time aganst magic values # Flaky: test execution time against magic values
"test_distributed" "test_distributed"
"test_timed" "test_timed"
"test_timed_in_ms" "test_timed_in_ms"

View file

@ -60,7 +60,7 @@ let
# AttributeError: jax.core.Var was removed in JAX v0.6.0. Use jax.extend.core.Var instead, and # AttributeError: jax.core.Var was removed in JAX v0.6.0. Use jax.extend.core.Var instead, and
# see https://docs.jax.dev/en/latest/jax.extend.html for details. # see https://docs.jax.dev/en/latest/jax.extend.html for details.
# Alrady on master: https://github.com/google-deepmind/dm-haiku/commit/cfe8480d253a93100bf5e2d24c40435a95399c96 # Already on master: https://github.com/google-deepmind/dm-haiku/commit/cfe8480d253a93100bf5e2d24c40435a95399c96
# TODO: remove at the next release # TODO: remove at the next release
postPatch = '' postPatch = ''
substituteInPlace haiku/_src/jaxpr_info.py \ substituteInPlace haiku/_src/jaxpr_info.py \

View file

@ -3,7 +3,7 @@
# requires the CUDA toolkit (via nvcc) to be available. # requires the CUDA toolkit (via nvcc) to be available.
# #
# This means that if you plan to use flashinfer, you will need to set the # This means that if you plan to use flashinfer, you will need to set the
# environment varaible `CUDA_HOME` to `cudatoolkit`. # environment variable `CUDA_HOME` to `cudatoolkit`.
{ {
lib, lib,
config, config,

View file

@ -178,7 +178,7 @@ buildPythonPackage rec {
}; };
# FMPy searches for sundials without the "lib"-prefix; strip it # FMPy searches for sundials without the "lib"-prefix; strip it
# and symlink the so-files into existance. # and symlink the so-files into existence.
postFixup = '' postFixup = ''
pushd $out/lib pushd $out/lib
for so in *.so; do for so in *.so; do

View file

@ -27,7 +27,7 @@ buildPythonPackage rec {
# itables has 4 different node packages, each with their own # itables has 4 different node packages, each with their own
# package-lock.json, and partially depending on each other. # package-lock.json, and partially depending on each other.
# Our fetchNpmDeps tooling in nixpkgs doesn't support this yet, so we fetch # Our fetchNpmDeps tooling in nixpkgs doesn't support this yet, so we fetch
# the source tarball from pypi, wich includes the javascript bundle already. # the source tarball from pypi, which includes the javascript bundle already.
src = fetchPypi { src = fetchPypi {
inherit pname version; inherit pname version;
hash = "sha256-S5HASUVfqTny+Vu15MYSSrEffCaJuL7UhDOc3eudVWI="; hash = "sha256-S5HASUVfqTny+Vu15MYSSrEffCaJuL7UhDOc3eudVWI=";

View file

@ -54,7 +54,7 @@ buildPythonPackage rec {
# Boto @ 1.35 has outstripped the version requirement # Boto @ 1.35 has outstripped the version requirement
"boto3" "boto3"
# Each component release requests the exact latest core. # Each component release requests the exact latest core.
# That prevents us from updating individul components. # That prevents us from updating individual components.
"langchain-core" "langchain-core"
]; ];

View file

@ -44,7 +44,7 @@ buildPythonPackage rec {
pythonRelaxDeps = [ pythonRelaxDeps = [
# Each component release requests the exact latest core. # Each component release requests the exact latest core.
# That prevents us from updating individul components. # That prevents us from updating individual components.
"langchain-core" "langchain-core"
]; ];

View file

@ -32,7 +32,7 @@ buildPythonPackage rec {
pythonRelaxDeps = [ pythonRelaxDeps = [
# Each component release requests the exact latest core. # Each component release requests the exact latest core.
# That prevents us from updating individul components. # That prevents us from updating individual components.
"langchain-core" "langchain-core"
"numpy" "numpy"
]; ];

View file

@ -56,7 +56,7 @@ buildPythonPackage rec {
pythonRelaxDeps = [ pythonRelaxDeps = [
# Each component release requests the exact latest langchain and -core. # Each component release requests the exact latest langchain and -core.
# That prevents us from updating individul components. # That prevents us from updating individual components.
"langchain" "langchain"
"langchain-core" "langchain-core"
"numpy" "numpy"

View file

@ -34,7 +34,7 @@ buildPythonPackage rec {
pythonRelaxDeps = [ pythonRelaxDeps = [
# Each component release requests the exact latest core. # Each component release requests the exact latest core.
# That prevents us from updating individul components. # That prevents us from updating individual components.
"langchain-core" "langchain-core"
]; ];

View file

@ -49,7 +49,7 @@ buildPythonPackage rec {
pythonRelaxDeps = [ pythonRelaxDeps = [
# Each component release requests the exact latest core. # Each component release requests the exact latest core.
# That prevents us from updating individul components. # That prevents us from updating individual components.
"langchain-core" "langchain-core"
]; ];

View file

@ -39,7 +39,7 @@ buildPythonPackage rec {
pythonRelaxDeps = [ pythonRelaxDeps = [
# Each component release requests the exact latest core. # Each component release requests the exact latest core.
# That prevents us from updating individul components. # That prevents us from updating individual components.
"langchain-core" "langchain-core"
"numpy" "numpy"
]; ];

View file

@ -40,7 +40,7 @@ buildPythonPackage rec {
pythonRelaxDeps = [ pythonRelaxDeps = [
# Each component release requests the exact latest core. # Each component release requests the exact latest core.
# That prevents us from updating individul components. # That prevents us from updating individual components.
"langchain-core" "langchain-core"
]; ];

View file

@ -52,7 +52,7 @@ buildPythonPackage rec {
pythonRelaxDeps = [ pythonRelaxDeps = [
# Each component release requests the exact latest core. # Each component release requests the exact latest core.
# That prevents us from updating individul components. # That prevents us from updating individual components.
"langchain-core" "langchain-core"
]; ];

View file

@ -42,7 +42,7 @@ buildPythonPackage rec {
pythonRelaxDeps = [ pythonRelaxDeps = [
# Each component release requests the exact latest core. # Each component release requests the exact latest core.
# That prevents us from updating individul components. # That prevents us from updating individual components.
"langchain-core" "langchain-core"
"numpy" "numpy"
]; ];

View file

@ -34,7 +34,7 @@ buildPythonPackage rec {
pythonRelaxDeps = [ pythonRelaxDeps = [
# Each component release requests the exact latest core. # Each component release requests the exact latest core.
# That prevents us from updating individul components. # That prevents us from updating individual components.
"langchain-core" "langchain-core"
]; ];

View file

@ -60,7 +60,7 @@ buildPythonPackage rec {
pythonRelaxDeps = [ pythonRelaxDeps = [
# Each component release requests the exact latest core. # Each component release requests the exact latest core.
# That prevents us from updating individul components. # That prevents us from updating individual components.
"langchain-core" "langchain-core"
"numpy" "numpy"
"tenacity" "tenacity"

View file

@ -21,7 +21,7 @@ buildPythonPackage rec {
hash = "sha256-r9UB5H+qAJc6k2SVAiOCI2yRDLNv2zKRmfrAan+cX9I="; hash = "sha256-r9UB5H+qAJc6k2SVAiOCI2yRDLNv2zKRmfrAan+cX9I=";
}; };
# A temporary fixup to support fork mode with openmpi implemention # A temporary fixup to support fork mode with openmpi implementation
# See https://github.com/firedrakeproject/mpi-pytest/pull/17 # See https://github.com/firedrakeproject/mpi-pytest/pull/17
postPatch = lib.optionalString (mpi4py.mpi.pname == "openmpi") '' postPatch = lib.optionalString (mpi4py.mpi.pname == "openmpi") ''
substituteInPlace pytest_mpi/plugin.py \ substituteInPlace pytest_mpi/plugin.py \

View file

@ -68,7 +68,7 @@ buildPythonPackage rec {
# Project has no tests. # Project has no tests.
# In order to make pythonImportsCheck work, NUMBA_CACHE_DIR env var need to # In order to make pythonImportsCheck work, NUMBA_CACHE_DIR env var need to
# be set to a writable dir (https://github.com/numba/numba/issues/4032#issuecomment-488102702). # be set to a writable dir (https://github.com/numba/numba/issues/4032#issuecomment-488102702).
# pythonImportsCheck has no pre* hook, use checkPhase to wordaround that. # pythonImportsCheck has no pre* hook, use checkPhase to workaround that.
checkPhase = '' checkPhase = ''
export NUMBA_CACHE_DIR="$(mktemp -d)" export NUMBA_CACHE_DIR="$(mktemp -d)"
''; '';

View file

@ -24,7 +24,7 @@
tomli-w, tomli-w,
werkzeug, werkzeug,
# coupled downsteam dependencies # coupled downstream dependencies
pip-tools, pip-tools,
}: }:

View file

@ -110,7 +110,7 @@ buildPythonPackage rec {
"MulticastTests.test_multiListen" "MulticastTests.test_multiListen"
]; ];
"src/twisted/trial/test/test_script.py" = [ "src/twisted/trial/test/test_script.py" = [
# Fails in LXC containers with less than all cores availaible (limits.cpu) # Fails in LXC containers with less than all cores available (limits.cpu)
"AutoJobsTests.test_cpuCount" "AutoJobsTests.test_cpuCount"
]; ];
"src/twisted/internet/test/test_unix.py" = [ "src/twisted/internet/test/test_unix.py" = [

View file

@ -37,7 +37,7 @@ buildPythonPackage rec {
[ [
# test failure reported upstream: https://github.com/adrienverge/yamllint/issues/373 # test failure reported upstream: https://github.com/adrienverge/yamllint/issues/373
"test_find_files_recursively" "test_find_files_recursively"
# Issue wih fixture # Issue with fixture
"test_codec_built_in_equivalent" "test_codec_built_in_equivalent"
] ]
++ lib.optionals stdenv.hostPlatform.isDarwin [ ++ lib.optionals stdenv.hostPlatform.isDarwin [

View file

@ -241,7 +241,7 @@ stdenv.mkDerivation rec {
# guarantee that it will always run in any nix context. # guarantee that it will always run in any nix context.
# #
# See also ./bazel_darwin_sandbox.patch in bazel_5. That patch uses # See also ./bazel_darwin_sandbox.patch in bazel_5. That patch uses
# NIX_BUILD_TOP env var to conditionnally disable sleep features inside the # NIX_BUILD_TOP env var to conditionally disable sleep features inside the
# sandbox. # sandbox.
# #
# If you want to investigate the sandbox profile path, # If you want to investigate the sandbox profile path,

View file

@ -323,11 +323,11 @@ stdenv.mkDerivation rec {
# --{,tool_}java_runtime_version=local_jdk and rely on the fact no java # --{,tool_}java_runtime_version=local_jdk and rely on the fact no java
# toolchain registered by default uses the local_jdk, making the selection # toolchain registered by default uses the local_jdk, making the selection
# unambiguous. # unambiguous.
# This toolchain has the advantage that it can use any ambiant java jdk, # This toolchain has the advantage that it can use any ambient java jdk,
# not only a given, fixed version. It allows bazel to work correctly in any # not only a given, fixed version. It allows bazel to work correctly in any
# environment where JAVA_HOME is set to the right java version, like inside # environment where JAVA_HOME is set to the right java version, like inside
# nix derivations. # nix derivations.
# However, this patch breaks bazel hermeticity, by picking the ambiant java # However, this patch breaks bazel hermeticity, by picking the ambient java
# version instead of the more hermetic remote_jdk prebuilt binaries that # version instead of the more hermetic remote_jdk prebuilt binaries that
# rules_java provide by default. It also requires the user to have a # rules_java provide by default. It also requires the user to have a
# JAVA_HOME set to the exact version required by the project. # JAVA_HOME set to the exact version required by the project.
@ -347,7 +347,7 @@ stdenv.mkDerivation rec {
# guarantee that it will always run in any nix context. # guarantee that it will always run in any nix context.
# #
# See also ./bazel_darwin_sandbox.patch in bazel_5. That patch uses # See also ./bazel_darwin_sandbox.patch in bazel_5. That patch uses
# NIX_BUILD_TOP env var to conditionnally disable sleep features inside the # NIX_BUILD_TOP env var to conditionally disable sleep features inside the
# sandbox. # sandbox.
# #
# If you want to investigate the sandbox profile path, # If you want to investigate the sandbox profile path,

View file

@ -151,8 +151,8 @@ let
]; ];
}); });
# Pinned due to home-assistant still needing 1.10.0 verison # Pinned due to home-assistant still needing 1.10.0 version
# Remove this when home-assistant upates the jellyfin-apiclient-python version # Remove this when home-assistant updates the jellyfin-apiclient-python version
jellyfin-apiclient-python = super.jellyfin-apiclient-python.overridePythonAttrs (oldAttrs: rec { jellyfin-apiclient-python = super.jellyfin-apiclient-python.overridePythonAttrs (oldAttrs: rec {
version = "1.10.0"; version = "1.10.0";
src = fetchFromGitHub { src = fetchFromGitHub {

View file

@ -120,7 +120,7 @@ let
isMarkedInsecure = attrs: (attrs.meta.knownVulnerabilities or [ ]) != [ ]; isMarkedInsecure = attrs: (attrs.meta.knownVulnerabilities or [ ]) != [ ];
# Alow granular checks to allow only some unfree packages # Allow granular checks to allow only some unfree packages
# Example: # Example:
# {pkgs, ...}: # {pkgs, ...}:
# { # {

View file

@ -625,7 +625,7 @@ rec {
"outocp" "outocp"
"pmxab" "pmxab"
# GUI scripts that accept no argument or crash without a graphics server; please test manualy # GUI scripts that accept no argument or crash without a graphics server; please test manually
"epspdftk" "epspdftk"
"texdoctk" "texdoctk"
"tlshell" "tlshell"

View file

@ -3658,7 +3658,7 @@ with pkgs;
libhandy = callPackage ../development/libraries/libhandy { }; libhandy = callPackage ../development/libraries/libhandy { };
# Needed for apps that still depend on the unstable verison of the library (not libhandy-1) # Needed for apps that still depend on the unstable version of the library (not libhandy-1)
libhandy_0 = callPackage ../development/libraries/libhandy/0.x.nix { }; libhandy_0 = callPackage ../development/libraries/libhandy/0.x.nix { };
libint = callPackage ../development/libraries/libint { }; libint = callPackage ../development/libraries/libint { };
@ -4853,7 +4853,7 @@ with pkgs;
zbar = libsForQt5.callPackage ../tools/graphics/zbar { }; zbar = libsForQt5.callPackage ../tools/graphics/zbar { };
# Nvidia support does not require any propietary libraries, so CI can build it. # Nvidia support does not require any proprietary libraries, so CI can build it.
# Note that when enabling this unconditionally, non-nvidia users will always have an empty "GPU" section. # Note that when enabling this unconditionally, non-nvidia users will always have an empty "GPU" section.
zenith-nvidia = zenith.override { zenith-nvidia = zenith.override {
nvidiaSupport = true; nvidiaSupport = true;
@ -7057,7 +7057,7 @@ with pkgs;
# host platform. # host platform.
# #
# Because this is the *next* stages choice, it's a bit non-modular to put # Because this is the *next* stages choice, it's a bit non-modular to put
# here. In theory, bootstraping is supposed to not be a chain but at tree, # here. In theory, bootstrapping is supposed to not be a chain but at tree,
# where each stage supports many "successor" stages, like multiple possible # where each stage supports many "successor" stages, like multiple possible
# futures. We don't have a better alternative, but with this downside in # futures. We don't have a better alternative, but with this downside in
# mind, please be judicious when using this attribute. E.g. for building # mind, please be judicious when using this attribute. E.g. for building
@ -8209,7 +8209,7 @@ with pkgs;
stdenv = stdenvNoLibc; stdenv = stdenvNoLibc;
}; };
# These are used when buiding compiler-rt / libgcc, prior to building libc. # These are used when building compiler-rt / libgcc, prior to building libc.
preLibcCrossHeaders = preLibcCrossHeaders =
let let
inherit (stdenv.targetPlatform) libc; inherit (stdenv.targetPlatform) libc;
@ -10575,7 +10575,7 @@ with pkgs;
nginxModules = recurseIntoAttrs (callPackage ../servers/http/nginx/modules.nix { }); nginxModules = recurseIntoAttrs (callPackage ../servers/http/nginx/modules.nix { });
# We should move to dynmaic modules and create a nginxFull package with all modules # We should move to dynamic modules and create a nginxFull package with all modules
nginxShibboleth = nginxStable.override { nginxShibboleth = nginxStable.override {
modules = [ modules = [
nginxModules.rtmp nginxModules.rtmp
@ -13703,7 +13703,7 @@ with pkgs;
jdk = jdk17; jdk = jdk17;
}; };
# perhaps there are better apps for this task? It's how I had configured my preivous system. # perhaps there are better apps for this task? It's how I had configured my previous system.
# And I don't want to rewrite all rules # And I don't want to rewrite all rules
profanity = callPackage ../applications/networking/instant-messengers/profanity ( profanity = callPackage ../applications/networking/instant-messengers/profanity (
{ {

View file

@ -328,7 +328,7 @@ let
# The complete chain of package set builders, applied from top to bottom. # The complete chain of package set builders, applied from top to bottom.
# stdenvOverlays must be last as it brings package forward from the # stdenvOverlays must be last as it brings package forward from the
# previous bootstrapping phases which have already been overlayed. # previous bootstrapping phases which have already been overlaid.
toFix = lib.foldl' (lib.flip lib.extends) (self: { }) ( toFix = lib.foldl' (lib.flip lib.extends) (self: { }) (
[ [
stdenvBootstappingAndPlatforms stdenvBootstappingAndPlatforms