treewide: Format all Nix files

Format all Nix files using the officially approved formatter,
making the CI check introduced in the previous commit succeed:

  nix-build ci -A fmt.check

This is the next step of the of the [implementation](https://github.com/NixOS/nixfmt/issues/153)
of the accepted [RFC 166](https://github.com/NixOS/rfcs/pull/166).

This commit will lead to merge conflicts for a number of PRs,
up to an estimated ~1100 (~33%) among the PRs with activity in the past 2
months, but that should be lower than what it would be without the previous
[partial treewide format](https://github.com/NixOS/nixpkgs/pull/322537).

Merge conflicts caused by this commit can now automatically be resolved while rebasing using the
[auto-rebase script](8616af08d9/maintainers/scripts/auto-rebase).

If you run into any problems regarding any of this, please reach out to the
[formatting team](https://nixos.org/community/teams/formatting/) by
pinging @NixOS/nix-formatting.
This commit is contained in:
Silvan Mosberger 2025-04-01 20:10:43 +02:00
parent 2140bf39e4
commit 374e6bcc40
1523 changed files with 986047 additions and 513621 deletions

View file

@ -4,7 +4,12 @@
# also to reconfigure instances. However, we can't rename it because
# existing "configuration.nix" files on EC2 instances refer to it.)
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
inherit (lib) mkDefault mkIf;
@ -28,18 +33,22 @@ in
boot.growPartition = true;
fileSystems."/" = mkIf (!cfg.zfs.enable) (lib.mkDefault {
device = "/dev/disk/by-label/nixos";
fsType = "ext4";
autoResize = true;
});
fileSystems."/" = mkIf (!cfg.zfs.enable) (
lib.mkDefault {
device = "/dev/disk/by-label/nixos";
fsType = "ext4";
autoResize = true;
}
);
fileSystems."/boot" = mkIf (cfg.efi || cfg.zfs.enable) (lib.mkDefault {
# The ZFS image uses a partition labeled ESP whether or not we're
# booting with EFI.
device = "/dev/disk/by-label/ESP";
fsType = "vfat";
});
fileSystems."/boot" = mkIf (cfg.efi || cfg.zfs.enable) (
lib.mkDefault {
# The ZFS image uses a partition labeled ESP whether or not we're
# booting with EFI.
device = "/dev/disk/by-label/ESP";
fsType = "vfat";
}
);
services.zfs.expandOnBoot = mkIf cfg.zfs.enable "all";
@ -50,13 +59,19 @@ in
];
boot.initrd.kernelModules = [ "xen-blkfront" ];
boot.initrd.availableKernelModules = [ "nvme" ];
boot.kernelParams = [ "console=ttyS0,115200n8" "random.trust_cpu=on" ];
boot.kernelParams = [
"console=ttyS0,115200n8"
"random.trust_cpu=on"
];
# Prevent the nouveau kernel module from being loaded, as it
# interferes with the nvidia/nvidia-uvm modules needed for CUDA.
# Also blacklist xen_fbfront to prevent a 30 second delay during
# boot.
boot.blacklistedKernelModules = [ "nouveau" "xen_fbfront" ];
boot.blacklistedKernelModules = [
"nouveau"
"xen_fbfront"
];
boot.loader.grub.device = if cfg.efi then "nodev" else "/dev/xvda";
boot.loader.grub.efiSupport = cfg.efi;
@ -71,7 +86,7 @@ in
systemd.services.fetch-ec2-metadata = {
wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ];
after = ["network-online.target"];
after = [ "network-online.target" ];
path = [ pkgs.curl ];
script = builtins.readFile ./ec2-metadata-fetcher.sh;
serviceConfig.Type = "oneshot";

View file

@ -1,4 +1,10 @@
{ config, pkgs, lib, modulesPath, ... }:
{
config,
pkgs,
lib,
modulesPath,
...
}:
with lib;
{
imports = [
@ -30,164 +36,192 @@ with lib;
cfg = config.virtualisation.digitalOcean;
hostName = config.networking.hostName;
doMetadataFile = "/run/do-metadata/v1.json";
in mkMerge [{
fileSystems."/" = lib.mkDefault {
device = "/dev/disk/by-label/nixos";
autoResize = true;
fsType = "ext4";
};
boot = {
growPartition = true;
kernelParams = [ "console=ttyS0" "panic=1" "boot.panic_on_fail" ];
initrd.kernelModules = [ "virtio_scsi" ];
kernelModules = [ "virtio_pci" "virtio_net" ];
loader.grub.devices = ["/dev/vda"];
};
services.openssh = {
enable = mkDefault true;
settings.PasswordAuthentication = mkDefault false;
};
services.do-agent.enable = mkDefault true;
networking = {
hostName = mkDefault ""; # use Digital Ocean metadata server
};
in
mkMerge [
{
fileSystems."/" = lib.mkDefault {
device = "/dev/disk/by-label/nixos";
autoResize = true;
fsType = "ext4";
};
boot = {
growPartition = true;
kernelParams = [
"console=ttyS0"
"panic=1"
"boot.panic_on_fail"
];
initrd.kernelModules = [ "virtio_scsi" ];
kernelModules = [
"virtio_pci"
"virtio_net"
];
loader.grub.devices = [ "/dev/vda" ];
};
services.openssh = {
enable = mkDefault true;
settings.PasswordAuthentication = mkDefault false;
};
services.do-agent.enable = mkDefault true;
networking = {
hostName = mkDefault ""; # use Digital Ocean metadata server
};
/* Check for and wait for the metadata server to become reachable.
* This serves as a dependency for all the other metadata services. */
systemd.services.digitalocean-metadata = {
path = [ pkgs.curl ];
description = "Get host metadata provided by Digitalocean";
script = ''
set -eu
DO_DELAY_ATTEMPTS=0
while ! curl -fsSL -o $RUNTIME_DIRECTORY/v1.json http://169.254.169.254/metadata/v1.json; do
DO_DELAY_ATTEMPTS=$((DO_DELAY_ATTEMPTS + 1))
if (( $DO_DELAY_ATTEMPTS >= $DO_DELAY_ATTEMPTS_MAX )); then
echo "giving up"
exit 1
/*
Check for and wait for the metadata server to become reachable.
This serves as a dependency for all the other metadata services.
*/
systemd.services.digitalocean-metadata = {
path = [ pkgs.curl ];
description = "Get host metadata provided by Digitalocean";
script = ''
set -eu
DO_DELAY_ATTEMPTS=0
while ! curl -fsSL -o $RUNTIME_DIRECTORY/v1.json http://169.254.169.254/metadata/v1.json; do
DO_DELAY_ATTEMPTS=$((DO_DELAY_ATTEMPTS + 1))
if (( $DO_DELAY_ATTEMPTS >= $DO_DELAY_ATTEMPTS_MAX )); then
echo "giving up"
exit 1
fi
echo "metadata unavailable, trying again in 1s..."
sleep 1
done
chmod 600 $RUNTIME_DIRECTORY/v1.json
'';
environment = {
DO_DELAY_ATTEMPTS_MAX = "10";
};
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
RuntimeDirectory = "do-metadata";
RuntimeDirectoryPreserve = "yes";
};
unitConfig = {
ConditionPathExists = "!${doMetadataFile}";
After =
[ "network-pre.target" ]
++ optional config.networking.dhcpcd.enable "dhcpcd.service"
++ optional config.systemd.network.enable "systemd-networkd.service";
};
};
/*
Fetch the root password from the digital ocean metadata.
There is no specific route for this, so we use jq to get
it from the One Big JSON metadata blob
*/
systemd.services.digitalocean-set-root-password = mkIf cfg.setRootPassword {
path = [
pkgs.shadow
pkgs.jq
];
description = "Set root password provided by Digitalocean";
wantedBy = [ "multi-user.target" ];
script = ''
set -eo pipefail
ROOT_PASSWORD=$(jq -er '.auth_key' ${doMetadataFile})
echo "root:$ROOT_PASSWORD" | chpasswd
mkdir -p /etc/do-metadata/set-root-password
'';
unitConfig = {
ConditionPathExists = "!/etc/do-metadata/set-root-password";
Before = optional config.services.openssh.enable "sshd.service";
After = [ "digitalocean-metadata.service" ];
Requires = [ "digitalocean-metadata.service" ];
};
serviceConfig = {
Type = "oneshot";
};
};
/*
Set the hostname from Digital Ocean, unless the user configured it in
the NixOS configuration. The cached metadata file isn't used here
because the hostname is a mutable part of the droplet.
*/
systemd.services.digitalocean-set-hostname = mkIf (hostName == "") {
path = [
pkgs.curl
pkgs.nettools
];
description = "Set hostname provided by Digitalocean";
wantedBy = [ "network.target" ];
script = ''
set -e
DIGITALOCEAN_HOSTNAME=$(curl -fsSL http://169.254.169.254/metadata/v1/hostname)
hostname "$DIGITALOCEAN_HOSTNAME"
if [[ ! -e /etc/hostname || -w /etc/hostname ]]; then
printf "%s\n" "$DIGITALOCEAN_HOSTNAME" > /etc/hostname
fi
echo "metadata unavailable, trying again in 1s..."
sleep 1
done
chmod 600 $RUNTIME_DIRECTORY/v1.json
'';
environment = {
DO_DELAY_ATTEMPTS_MAX = "10";
unitConfig = {
Before = [ "network.target" ];
After = [ "digitalocean-metadata.service" ];
Wants = [ "digitalocean-metadata.service" ];
};
serviceConfig = {
Type = "oneshot";
};
};
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
RuntimeDirectory = "do-metadata";
RuntimeDirectoryPreserve = "yes";
};
unitConfig = {
ConditionPathExists = "!${doMetadataFile}";
After = [ "network-pre.target" ] ++
optional config.networking.dhcpcd.enable "dhcpcd.service" ++
optional config.systemd.network.enable "systemd-networkd.service";
};
};
/* Fetch the root password from the digital ocean metadata.
* There is no specific route for this, so we use jq to get
* it from the One Big JSON metadata blob */
systemd.services.digitalocean-set-root-password = mkIf cfg.setRootPassword {
path = [ pkgs.shadow pkgs.jq ];
description = "Set root password provided by Digitalocean";
wantedBy = [ "multi-user.target" ];
script = ''
set -eo pipefail
ROOT_PASSWORD=$(jq -er '.auth_key' ${doMetadataFile})
echo "root:$ROOT_PASSWORD" | chpasswd
mkdir -p /etc/do-metadata/set-root-password
# Fetch the ssh keys for root from Digital Ocean
systemd.services.digitalocean-ssh-keys = mkIf cfg.setSshKeys {
description = "Set root ssh keys provided by Digital Ocean";
wantedBy = [ "multi-user.target" ];
path = [ pkgs.jq ];
script = ''
set -e
mkdir -m 0700 -p /root/.ssh
jq -er '.public_keys[]' ${doMetadataFile} > /root/.ssh/authorized_keys
chmod 600 /root/.ssh/authorized_keys
'';
unitConfig = {
ConditionPathExists = "!/etc/do-metadata/set-root-password";
Before = optional config.services.openssh.enable "sshd.service";
After = [ "digitalocean-metadata.service" ];
Requires = [ "digitalocean-metadata.service" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
unitConfig = {
ConditionPathExists = "!/root/.ssh/authorized_keys";
Before = optional config.services.openssh.enable "sshd.service";
After = [ "digitalocean-metadata.service" ];
Requires = [ "digitalocean-metadata.service" ];
};
};
serviceConfig = {
Type = "oneshot";
};
};
/* Set the hostname from Digital Ocean, unless the user configured it in
* the NixOS configuration. The cached metadata file isn't used here
* because the hostname is a mutable part of the droplet. */
systemd.services.digitalocean-set-hostname = mkIf (hostName == "") {
path = [ pkgs.curl pkgs.nettools ];
description = "Set hostname provided by Digitalocean";
wantedBy = [ "network.target" ];
script = ''
set -e
DIGITALOCEAN_HOSTNAME=$(curl -fsSL http://169.254.169.254/metadata/v1/hostname)
hostname "$DIGITALOCEAN_HOSTNAME"
if [[ ! -e /etc/hostname || -w /etc/hostname ]]; then
printf "%s\n" "$DIGITALOCEAN_HOSTNAME" > /etc/hostname
fi
'';
unitConfig = {
Before = [ "network.target" ];
After = [ "digitalocean-metadata.service" ];
Wants = [ "digitalocean-metadata.service" ];
};
serviceConfig = {
Type = "oneshot";
};
};
/* Fetch the ssh keys for root from Digital Ocean */
systemd.services.digitalocean-ssh-keys = mkIf cfg.setSshKeys {
description = "Set root ssh keys provided by Digital Ocean";
wantedBy = [ "multi-user.target" ];
path = [ pkgs.jq ];
script = ''
set -e
mkdir -m 0700 -p /root/.ssh
jq -er '.public_keys[]' ${doMetadataFile} > /root/.ssh/authorized_keys
chmod 600 /root/.ssh/authorized_keys
'';
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
unitConfig = {
ConditionPathExists = "!/root/.ssh/authorized_keys";
Before = optional config.services.openssh.enable "sshd.service";
After = [ "digitalocean-metadata.service" ];
Requires = [ "digitalocean-metadata.service" ];
};
};
/* Initialize the RNG by running the entropy-seed script from the
* Digital Ocean metadata
*/
systemd.services.digitalocean-entropy-seed = mkIf cfg.seedEntropy {
description = "Run the kernel RNG entropy seeding script from the Digital Ocean vendor data";
wantedBy = [ "network.target" ];
path = [ pkgs.jq pkgs.mpack ];
script = ''
set -eo pipefail
TEMPDIR=$(mktemp -d)
jq -er '.vendor_data' ${doMetadataFile} | munpack -tC $TEMPDIR
ENTROPY_SEED=$(grep -rl "DigitalOcean Entropy Seed script" $TEMPDIR)
${pkgs.runtimeShell} $ENTROPY_SEED
rm -rf $TEMPDIR
/*
Initialize the RNG by running the entropy-seed script from the
Digital Ocean metadata
*/
systemd.services.digitalocean-entropy-seed = mkIf cfg.seedEntropy {
description = "Run the kernel RNG entropy seeding script from the Digital Ocean vendor data";
wantedBy = [ "network.target" ];
path = [
pkgs.jq
pkgs.mpack
];
script = ''
set -eo pipefail
TEMPDIR=$(mktemp -d)
jq -er '.vendor_data' ${doMetadataFile} | munpack -tC $TEMPDIR
ENTROPY_SEED=$(grep -rl "DigitalOcean Entropy Seed script" $TEMPDIR)
${pkgs.runtimeShell} $ENTROPY_SEED
rm -rf $TEMPDIR
'';
unitConfig = {
Before = [ "network.target" ];
After = [ "digitalocean-metadata.service" ];
Requires = [ "digitalocean-metadata.service" ];
unitConfig = {
Before = [ "network.target" ];
After = [ "digitalocean-metadata.service" ];
Requires = [ "digitalocean-metadata.service" ];
};
serviceConfig = {
Type = "oneshot";
};
};
serviceConfig = {
Type = "oneshot";
};
};
}
}
];
meta.maintainers = with maintainers; [
arianvp
eamsden
];
meta.maintainers = with maintainers; [ arianvp eamsden ];
}

View file

@ -1,6 +1,12 @@
# Systemd services for docker.
{ config, lib, utils, pkgs, ... }:
{
config,
lib,
utils,
pkgs,
...
}:
with lib;
@ -8,7 +14,7 @@ let
cfg = config.virtualisation.docker;
proxy_env = config.networking.proxy.envVars;
settingsFormat = pkgs.formats.json {};
settingsFormat = pkgs.formats.json { };
daemonSettingsFile = settingsFormat.generate "daemon.json" cfg.daemon.settings;
in
@ -16,119 +22,132 @@ in
###### interface
options.virtualisation.docker = {
enable =
mkOption {
type = types.bool;
default = false;
description = ''
This option enables docker, a daemon that manages
linux containers. Users in the "docker" group can interact with
the daemon (e.g. to start or stop containers) using the
{command}`docker` command line tool.
'';
};
enable = mkOption {
type = types.bool;
default = false;
description = ''
This option enables docker, a daemon that manages
linux containers. Users in the "docker" group can interact with
the daemon (e.g. to start or stop containers) using the
{command}`docker` command line tool.
'';
};
listenOptions =
mkOption {
type = types.listOf types.str;
default = ["/run/docker.sock"];
description = ''
A list of unix and tcp docker should listen to. The format follows
ListenStream as described in {manpage}`systemd.socket(5)`.
'';
};
listenOptions = mkOption {
type = types.listOf types.str;
default = [ "/run/docker.sock" ];
description = ''
A list of unix and tcp docker should listen to. The format follows
ListenStream as described in {manpage}`systemd.socket(5)`.
'';
};
enableOnBoot =
mkOption {
type = types.bool;
default = true;
description = ''
When enabled dockerd is started on boot. This is required for
containers which are created with the
`--restart=always` flag to work. If this option is
disabled, docker might be started on demand by socket activation.
'';
};
enableOnBoot = mkOption {
type = types.bool;
default = true;
description = ''
When enabled dockerd is started on boot. This is required for
containers which are created with the
`--restart=always` flag to work. If this option is
disabled, docker might be started on demand by socket activation.
'';
};
daemon.settings =
mkOption {
type = types.submodule {
freeformType = settingsFormat.type;
options = {
live-restore = mkOption {
type = types.bool;
# Prior to NixOS 24.11, this was set to true by default, while upstream defaulted to false.
# Keep the option unset to follow upstream defaults
default = versionOlder config.system.stateVersion "24.11";
defaultText = literalExpression "lib.versionOlder config.system.stateVersion \"24.11\"";
description = ''
Allow dockerd to be restarted without affecting running container.
This option is incompatible with docker swarm.
'';
};
daemon.settings = mkOption {
type = types.submodule {
freeformType = settingsFormat.type;
options = {
live-restore = mkOption {
type = types.bool;
# Prior to NixOS 24.11, this was set to true by default, while upstream defaulted to false.
# Keep the option unset to follow upstream defaults
default = versionOlder config.system.stateVersion "24.11";
defaultText = literalExpression "lib.versionOlder config.system.stateVersion \"24.11\"";
description = ''
Allow dockerd to be restarted without affecting running container.
This option is incompatible with docker swarm.
'';
};
};
default = { };
example = {
ipv6 = true;
"live-restore" = true;
"fixed-cidr-v6" = "fd00::/80";
};
description = ''
Configuration for docker daemon. The attributes are serialized to JSON used as daemon.conf.
See https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
'';
};
enableNvidia =
mkOption {
type = types.bool;
default = false;
description = ''
**Deprecated**, please use hardware.nvidia-container-toolkit.enable instead.
Enable nvidia-docker wrapper, supporting NVIDIA GPUs inside docker containers.
'';
default = { };
example = {
ipv6 = true;
"live-restore" = true;
"fixed-cidr-v6" = "fd00::/80";
};
description = ''
Configuration for docker daemon. The attributes are serialized to JSON used as daemon.conf.
See https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
'';
};
storageDriver =
mkOption {
type = types.nullOr (types.enum ["aufs" "btrfs" "devicemapper" "overlay" "overlay2" "zfs"]);
default = null;
description = ''
This option determines which Docker
[storage driver](https://docs.docker.com/storage/storagedriver/select-storage-driver/)
to use.
By default it lets docker automatically choose the preferred storage
driver.
However, it is recommended to specify a storage driver explicitly, as
docker's default varies over versions.
enableNvidia = mkOption {
type = types.bool;
default = false;
description = ''
**Deprecated**, please use hardware.nvidia-container-toolkit.enable instead.
::: {.warning}
Changing the storage driver will cause any existing containers
and images to become inaccessible.
:::
'';
};
Enable nvidia-docker wrapper, supporting NVIDIA GPUs inside docker containers.
'';
};
logDriver =
mkOption {
type = types.enum ["none" "json-file" "syslog" "journald" "gelf" "fluentd" "awslogs" "splunk" "etwlogs" "gcplogs" "local"];
default = "journald";
description = ''
This option determines which Docker log driver to use.
'';
};
storageDriver = mkOption {
type = types.nullOr (
types.enum [
"aufs"
"btrfs"
"devicemapper"
"overlay"
"overlay2"
"zfs"
]
);
default = null;
description = ''
This option determines which Docker
[storage driver](https://docs.docker.com/storage/storagedriver/select-storage-driver/)
to use.
By default it lets docker automatically choose the preferred storage
driver.
However, it is recommended to specify a storage driver explicitly, as
docker's default varies over versions.
extraOptions =
mkOption {
type = types.separatedString " ";
default = "";
description = ''
The extra command-line options to pass to
{command}`docker` daemon.
'';
};
::: {.warning}
Changing the storage driver will cause any existing containers
and images to become inaccessible.
:::
'';
};
logDriver = mkOption {
type = types.enum [
"none"
"json-file"
"syslog"
"journald"
"gelf"
"fluentd"
"awslogs"
"splunk"
"etwlogs"
"gcplogs"
"local"
];
default = "journald";
description = ''
This option determines which Docker log driver to use.
'';
};
extraOptions = mkOption {
type = types.separatedString " ";
default = "";
description = ''
The extra command-line options to pass to
{command}`docker` daemon.
'';
};
autoPrune = {
enable = mkOption {
@ -143,7 +162,7 @@ in
flags = mkOption {
type = types.listOf types.str;
default = [];
default = [ ];
example = [ "--all" ];
description = ''
Any additional flags passed to {command}`docker system prune`.
@ -202,20 +221,32 @@ in
};
imports = [
(mkRemovedOptionModule ["virtualisation" "docker" "socketActivation"] "This option was removed and socket activation is now always active")
(mkAliasOptionModule ["virtualisation" "docker" "liveRestore"] ["virtualisation" "docker" "daemon" "settings" "live-restore"])
(mkRemovedOptionModule [
"virtualisation"
"docker"
"socketActivation"
] "This option was removed and socket activation is now always active")
(mkAliasOptionModule
[ "virtualisation" "docker" "liveRestore" ]
[ "virtualisation" "docker" "daemon" "settings" "live-restore" ]
)
];
###### implementation
config = mkIf cfg.enable (mkMerge [{
boot.kernelModules = [ "bridge" "veth" "br_netfilter" "xt_nat" ];
config = mkIf cfg.enable (mkMerge [
{
boot.kernelModules = [
"bridge"
"veth"
"br_netfilter"
"xt_nat"
];
boot.kernel.sysctl = {
"net.ipv4.conf.all.forwarding" = mkOverride 98 true;
"net.ipv4.conf.default.forwarding" = mkOverride 98 true;
};
environment.systemPackages = [ cfg.package ]
++ optional cfg.enableNvidia pkgs.nvidia-docker;
environment.systemPackages = [ cfg.package ] ++ optional cfg.enableNvidia pkgs.nvidia-docker;
users.groups.docker.gid = config.ids.gids.docker;
systemd.packages = [ cfg.package ];
@ -223,15 +254,20 @@ in
# (https://docs.docker.com/engine/release-notes/25.0/#new). Encourage
# moving to CDI as opposed to having deprecated runtime
# wrappers.
warnings = lib.optionals (cfg.enableNvidia && (lib.strings.versionAtLeast cfg.package.version "25")) [
''
You have set virtualisation.docker.enableNvidia. This option is deprecated, please set hardware.nvidia-container-toolkit.enable instead.
''
];
warnings =
lib.optionals (cfg.enableNvidia && (lib.strings.versionAtLeast cfg.package.version "25"))
[
''
You have set virtualisation.docker.enableNvidia. This option is deprecated, please set hardware.nvidia-container-toolkit.enable instead.
''
];
systemd.services.docker = {
wantedBy = optional cfg.enableOnBoot "multi-user.target";
after = [ "network.target" "docker.socket" ];
after = [
"network.target"
"docker.socket"
];
requires = [ "docker.socket" ];
environment = proxy_env;
serviceConfig = {
@ -242,14 +278,17 @@ in
${cfg.package}/bin/dockerd \
--config-file=${daemonSettingsFile} \
${cfg.extraOptions}
''];
ExecReload=[
''
];
ExecReload = [
""
"${pkgs.procps}/bin/kill -s HUP $MAINPID"
];
};
path = [ pkgs.kmod ] ++ optional (cfg.storageDriver == "zfs") pkgs.zfs
path =
[ pkgs.kmod ]
++ optional (cfg.storageDriver == "zfs") pkgs.zfs
++ optional cfg.enableNvidia pkgs.nvidia-docker
++ cfg.extraPackages;
};
@ -273,12 +312,15 @@ in
serviceConfig = {
Type = "oneshot";
ExecStart = utils.escapeSystemdExecArgs ([
(lib.getExe cfg.package)
"system"
"prune"
"-f"
] ++ cfg.autoPrune.flags);
ExecStart = utils.escapeSystemdExecArgs (
[
(lib.getExe cfg.package)
"system"
"prune"
"-f"
]
++ cfg.autoPrune.flags
);
};
startAt = optional cfg.autoPrune.enable cfg.autoPrune.dates;
@ -294,9 +336,13 @@ in
};
assertions = [
{ assertion = cfg.enableNvidia && pkgs.stdenv.hostPlatform.isx86_64 -> config.hardware.graphics.enable32Bit or false;
{
assertion =
cfg.enableNvidia && pkgs.stdenv.hostPlatform.isx86_64
-> config.hardware.graphics.enable32Bit or false;
message = "Option enableNvidia on x86_64 requires 32-bit support libraries";
}];
}
];
virtualisation.docker.daemon.settings = {
group = "docker";

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
inherit (lib)
@ -7,7 +12,7 @@ let
mkIf
optional
readFile
;
;
in
{
@ -16,7 +21,6 @@ in
../profiles/qemu-guest.nix
];
fileSystems."/" = {
fsType = "ext4";
device = "/dev/disk/by-label/nixos";
@ -24,9 +28,16 @@ in
};
boot.growPartition = true;
boot.kernelParams = [ "console=ttyS0" "panic=1" "boot.panic_on_fail" ];
boot.kernelParams = [
"console=ttyS0"
"panic=1"
"boot.panic_on_fail"
];
boot.initrd.kernelModules = [ "virtio_scsi" ];
boot.kernelModules = [ "virtio_pci" "virtio_net" ];
boot.kernelModules = [
"virtio_pci"
"virtio_net"
];
# Generate a GRUB menu.
boot.loader.grub.device = "/dev/sda";
@ -81,18 +92,35 @@ in
systemd.services.google-shutdown-scripts.wantedBy = [ "multi-user.target" ];
security.sudo.extraRules = mkIf config.users.mutableUsers [
{ groups = [ "google-sudoers" ]; commands = [ { command = "ALL"; options = [ "NOPASSWD" ]; } ]; }
{
groups = [ "google-sudoers" ];
commands = [
{
command = "ALL";
options = [ "NOPASSWD" ];
}
];
}
];
security.sudo-rs.extraRules = mkIf config.users.mutableUsers [
{ groups = [ "google-sudoers" ]; commands = [ { command = "ALL"; options = [ "NOPASSWD" ]; } ]; }
{
groups = [ "google-sudoers" ];
commands = [
{
command = "ALL";
options = [ "NOPASSWD" ];
}
];
}
];
users.groups.google-sudoers = mkIf config.users.mutableUsers { };
boot.extraModprobeConfig = readFile "${pkgs.google-guest-configs}/etc/modprobe.d/gce-blacklist.conf";
environment.etc."sysctl.d/60-gce-network-security.conf".source = "${pkgs.google-guest-configs}/etc/sysctl.d/60-gce-network-security.conf";
environment.etc."sysctl.d/60-gce-network-security.conf".source =
"${pkgs.google-guest-configs}/etc/sysctl.d/60-gce-network-security.conf";
environment.etc."default/instance_configs.cfg".text = ''
[Accounts]

View file

@ -1,15 +1,23 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
let
cfg = config.virtualisation.hypervGuest;
in {
in
{
imports = [
(mkRemovedOptionModule [ "virtualisation" "hypervGuest" "videoMode" ]
"The video mode can now be configured via standard tools, or in Hyper-V VM settings."
)
(mkRemovedOptionModule [
"virtualisation"
"hypervGuest"
"videoMode"
] "The video mode can now be configured via standard tools, or in Hyper-V VM settings.")
];
options = {
@ -21,7 +29,11 @@ in {
config = mkIf cfg.enable {
boot = {
initrd.kernelModules = [
"hv_balloon" "hv_netvsc" "hv_storvsc" "hv_utils" "hv_vmbus"
"hv_balloon"
"hv_netvsc"
"hv_storvsc"
"hv_utils"
"hv_vmbus"
];
initrd.availableKernelModules = [ "hyperv_keyboard" ];
@ -34,17 +46,19 @@ in {
environment.systemPackages = [ config.boot.kernelPackages.hyperv-daemons.bin ];
# enable hotadding cpu/memory
services.udev.packages = lib.singleton (pkgs.writeTextFile {
name = "hyperv-cpu-and-memory-hotadd-udev-rules";
destination = "/etc/udev/rules.d/99-hyperv-cpu-and-memory-hotadd.rules";
text = ''
# Memory hotadd
SUBSYSTEM=="memory", ACTION=="add", DEVPATH=="/devices/system/memory/memory[0-9]*", TEST=="state", ATTR{state}="online"
services.udev.packages = lib.singleton (
pkgs.writeTextFile {
name = "hyperv-cpu-and-memory-hotadd-udev-rules";
destination = "/etc/udev/rules.d/99-hyperv-cpu-and-memory-hotadd.rules";
text = ''
# Memory hotadd
SUBSYSTEM=="memory", ACTION=="add", DEVPATH=="/devices/system/memory/memory[0-9]*", TEST=="state", ATTR{state}="online"
# CPU hotadd
SUBSYSTEM=="cpu", ACTION=="add", DEVPATH=="/devices/system/cpu/cpu[0-9]*", TEST=="online", ATTR{online}="1"
'';
});
# CPU hotadd
SUBSYSTEM=="cpu", ACTION=="add", DEVPATH=="/devices/system/cpu/cpu[0-9]*", TEST=="online", ATTR{online}="1"
'';
}
);
systemd = {
packages = [ config.boot.kernelPackages.hyperv-daemons.lib ];

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
@ -215,26 +220,34 @@ in
{
imports = [
(mkRemovedOptionModule [ "virtualisation" "libvirtd" "enableKVM" ]
"Set the option `virtualisation.libvirtd.qemu.package' instead.")
(mkRemovedOptionModule [
"virtualisation"
"libvirtd"
"enableKVM"
] "Set the option `virtualisation.libvirtd.qemu.package' instead.")
(mkRenamedOptionModule
[ "virtualisation" "libvirtd" "qemuPackage" ]
[ "virtualisation" "libvirtd" "qemu" "package" ])
[ "virtualisation" "libvirtd" "qemu" "package" ]
)
(mkRenamedOptionModule
[ "virtualisation" "libvirtd" "qemuRunAsRoot" ]
[ "virtualisation" "libvirtd" "qemu" "runAsRoot" ])
[ "virtualisation" "libvirtd" "qemu" "runAsRoot" ]
)
(mkRenamedOptionModule
[ "virtualisation" "libvirtd" "qemuVerbatimConfig" ]
[ "virtualisation" "libvirtd" "qemu" "verbatimConfig" ])
[ "virtualisation" "libvirtd" "qemu" "verbatimConfig" ]
)
(mkRenamedOptionModule
[ "virtualisation" "libvirtd" "qemuOvmf" ]
[ "virtualisation" "libvirtd" "qemu" "ovmf" "enable" ])
(mkRemovedOptionModule
[ "virtualisation" "libvirtd" "qemuOvmfPackage" ]
"If this option was set to `foo`, set the option `virtualisation.libvirtd.qemu.ovmf.packages' to `[foo.fd]` instead.")
[ "virtualisation" "libvirtd" "qemu" "ovmf" "enable" ]
)
(mkRemovedOptionModule [ "virtualisation" "libvirtd" "qemuOvmfPackage" ]
"If this option was set to `foo`, set the option `virtualisation.libvirtd.qemu.ovmf.packages' to `[foo.fd]` instead."
)
(mkRenamedOptionModule
[ "virtualisation" "libvirtd" "qemuSwtpm" ]
[ "virtualisation" "libvirtd" "qemu" "swtpm" "enable" ])
[ "virtualisation" "libvirtd" "qemu" "swtpm" "enable" ]
)
];
###### interface
@ -273,7 +286,10 @@ in
};
onBoot = mkOption {
type = types.enum [ "start" "ignore" ];
type = types.enum [
"start"
"ignore"
];
default = "start";
description = ''
Specifies the action to be done to / on the guests when the host boots.
@ -285,7 +301,10 @@ in
};
onShutdown = mkOption {
type = types.enum [ "shutdown" "suspend" ];
type = types.enum [
"shutdown"
"suspend"
];
default = "suspend";
description = ''
When shutting down / restarting the host what method should
@ -368,7 +387,6 @@ in
};
};
###### implementation
config = mkIf cfg.enable {
@ -377,8 +395,8 @@ in
{
assertion = config.virtualisation.libvirtd.qemu.ovmf.package == null;
message = ''
The option virtualisation.libvirtd.qemu.ovmf.package is superseded by virtualisation.libvirtd.qemu.ovmf.packages.
If this option was set to `foo`, set the option `virtualisation.libvirtd.qemu.ovmf.packages' to `[foo.fd]` instead.
The option virtualisation.libvirtd.qemu.ovmf.package is superseded by virtualisation.libvirtd.qemu.ovmf.packages.
If this option was set to `foo`, set the option `virtualisation.libvirtd.qemu.ovmf.packages' to `[foo.fd]` instead.
'';
}
{
@ -389,11 +407,13 @@ in
environment = {
# this file is expected in /etc/qemu and not sysconfdir (/var/lib)
etc."qemu/bridge.conf".text = lib.concatMapStringsSep "\n"
(e:
"allow ${e}")
cfg.allowedBridges;
systemPackages = with pkgs; [ libressl.nc iptables cfg.package cfg.qemu.package ];
etc."qemu/bridge.conf".text = lib.concatMapStringsSep "\n" (e: "allow ${e}") cfg.allowedBridges;
systemPackages = with pkgs; [
libressl.nc
iptables
cfg.package
cfg.qemu.package
];
etc.ethertypes.source = "${pkgs.iptables}/etc/ethertypes";
};
@ -449,34 +469,44 @@ in
ln -s --force ${cfg.qemu.package}/bin/qemu-pr-helper /run/${dirName}/nix-helpers/
${optionalString cfg.qemu.ovmf.enable (let
ovmfpackage = pkgs.buildEnv {
name = "qemu-ovmf";
paths = cfg.qemu.ovmf.packages;
};
in
${optionalString cfg.qemu.ovmf.enable (
let
ovmfpackage = pkgs.buildEnv {
name = "qemu-ovmf";
paths = cfg.qemu.ovmf.packages;
};
in
''
ln -s --force ${ovmfpackage}/FV/AAVMF_CODE{,.ms}.fd /run/${dirName}/nix-ovmf/
ln -s --force ${ovmfpackage}/FV/OVMF_CODE{,.ms}.fd /run/${dirName}/nix-ovmf/
ln -s --force ${ovmfpackage}/FV/AAVMF_VARS{,.ms}.fd /run/${dirName}/nix-ovmf/
ln -s --force ${ovmfpackage}/FV/OVMF_VARS{,.ms}.fd /run/${dirName}/nix-ovmf/
'')}
ln -s --force ${ovmfpackage}/FV/AAVMF_CODE{,.ms}.fd /run/${dirName}/nix-ovmf/
ln -s --force ${ovmfpackage}/FV/OVMF_CODE{,.ms}.fd /run/${dirName}/nix-ovmf/
ln -s --force ${ovmfpackage}/FV/AAVMF_VARS{,.ms}.fd /run/${dirName}/nix-ovmf/
ln -s --force ${ovmfpackage}/FV/OVMF_VARS{,.ms}.fd /run/${dirName}/nix-ovmf/
''
)}
# Symlink hooks to /var/lib/libvirt
${concatStringsSep "\n" (map (driver:
''
mkdir -p /var/lib/${dirName}/hooks/${driver}.d
rm -rf /var/lib/${dirName}/hooks/${driver}.d/*
${concatStringsSep "\n" (mapAttrsToList (name: value:
"ln -s --force ${value} /var/lib/${dirName}/hooks/${driver}.d/${name}") cfg.hooks.${driver})}
'') (attrNames cfg.hooks))}
${concatStringsSep "\n" (
map (driver: ''
mkdir -p /var/lib/${dirName}/hooks/${driver}.d
rm -rf /var/lib/${dirName}/hooks/${driver}.d/*
${concatStringsSep "\n" (
mapAttrsToList (
name: value: "ln -s --force ${value} /var/lib/${dirName}/hooks/${driver}.d/${name}"
) cfg.hooks.${driver}
)}
'') (attrNames cfg.hooks)
)}
'';
serviceConfig = {
Type = "oneshot";
RuntimeDirectoryPreserve = "yes";
LogsDirectory = subDirs [ "qemu" ];
RuntimeDirectory = subDirs [ "nix-emulators" "nix-helpers" "nix-ovmf" ];
RuntimeDirectory = subDirs [
"nix-emulators"
"nix-helpers"
"nix-ovmf"
];
StateDirectory = subDirs [ "dnsmasq" ];
};
};
@ -484,8 +514,7 @@ in
systemd.services.libvirtd = {
wantedBy = [ "multi-user.target" ];
requires = [ "libvirtd-config.service" ];
after = [ "libvirtd-config.service" ]
++ optional vswitch.enable "ovs-vswitchd.service";
after = [ "libvirtd-config.service" ] ++ optional vswitch.enable "ovs-vswitchd.service";
environment.LIBVIRTD_ARGS = escapeShellArgs (
[
@ -493,10 +522,15 @@ in
configFile
"--timeout"
"120" # from ${libvirt}/var/lib/sysconfig/libvirtd
] ++ cfg.extraOptions
]
++ cfg.extraOptions
);
path = [ cfg.qemu.package pkgs.netcat ] # libvirtd requires qemu-img to manage disk images
path =
[
cfg.qemu.package
pkgs.netcat
] # libvirtd requires qemu-img to manage disk images
++ optional vswitch.enable vswitch.package
++ optional cfg.qemu.swtpm.enable cfg.qemu.swtpm.package;
@ -517,7 +551,11 @@ in
wantedBy = [ "multi-user.target" ];
requires = [ "libvirtd.service" ];
after = [ "libvirtd.service" ];
path = with pkgs; [ coreutils gawk cfg.package ];
path = with pkgs; [
coreutils
gawk
cfg.package
];
restartIfChanged = false;
environment.ON_BOOT = "${cfg.onBoot}";
@ -554,16 +592,18 @@ in
# https://libvirt.org/daemons.html#monolithic-systemd-integration
systemd.sockets.libvirtd.wantedBy = [ "sockets.target" ];
systemd.tmpfiles.rules = let
vhostUserCollection = pkgs.buildEnv {
name = "vhost-user";
paths = cfg.qemu.vhostUserPackages;
pathsToLink = [ "/share/qemu/vhost-user" ];
};
in [
"L+ /var/lib/qemu/vhost-user - - - - ${vhostUserCollection}/share/qemu/vhost-user"
"L+ /var/lib/qemu/firmware - - - - ${cfg.qemu.package}/share/qemu/firmware"
];
systemd.tmpfiles.rules =
let
vhostUserCollection = pkgs.buildEnv {
name = "vhost-user";
paths = cfg.qemu.vhostUserPackages;
pathsToLink = [ "/share/qemu/vhost-user" ];
};
in
[
"L+ /var/lib/qemu/vhost-user - - - - ${vhostUserCollection}/share/qemu/vhost-user"
"L+ /var/lib/qemu/firmware - - - - ${cfg.qemu.package}/share/qemu/firmware"
];
security.polkit = {
enable = true;

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,10 @@
{ config, lib, utils, pkgs, ... }:
{
config,
lib,
utils,
pkgs,
...
}:
let
cfg = config.virtualisation.podman;
json = pkgs.formats.json { };
@ -6,29 +12,42 @@ let
inherit (lib) mkOption types;
# Provides a fake "docker" binary mapping to podman
dockerCompat = pkgs.runCommand "${cfg.package.pname}-docker-compat-${cfg.package.version}"
{
outputs = [ "out" "man" ];
inherit (cfg.package) meta;
preferLocalBuild = true;
} ''
mkdir -p $out/bin
ln -s ${cfg.package}/bin/podman $out/bin/docker
dockerCompat =
pkgs.runCommand "${cfg.package.pname}-docker-compat-${cfg.package.version}"
{
outputs = [
"out"
"man"
];
inherit (cfg.package) meta;
preferLocalBuild = true;
}
''
mkdir -p $out/bin
ln -s ${cfg.package}/bin/podman $out/bin/docker
mkdir -p $man/share/man/man1
for f in ${cfg.package.man}/share/man/man1/*; do
basename=$(basename $f | sed s/podman/docker/g)
ln -s $f $man/share/man/man1/$basename
done
'';
mkdir -p $man/share/man/man1
for f in ${cfg.package.man}/share/man/man1/*; do
basename=$(basename $f | sed s/podman/docker/g)
ln -s $f $man/share/man/man1/$basename
done
'';
in
{
imports = [
(lib.mkRemovedOptionModule [ "virtualisation" "podman" "defaultNetwork" "dnsname" ]
"Use virtualisation.podman.defaultNetwork.settings.dns_enabled instead.")
(lib.mkRemovedOptionModule [ "virtualisation" "podman" "defaultNetwork" "extraPlugins" ]
"Netavark isn't compatible with CNI plugins.")
(lib.mkRemovedOptionModule [
"virtualisation"
"podman"
"defaultNetwork"
"dnsname"
] "Use virtualisation.podman.defaultNetwork.settings.dns_enabled instead.")
(lib.mkRemovedOptionModule [
"virtualisation"
"podman"
"defaultNetwork"
"extraPlugins"
] "Netavark isn't compatible with CNI plugins.")
./network-socket.nix
];
@ -38,17 +57,16 @@ in
options.virtualisation.podman = {
enable =
mkOption {
type = types.bool;
default = false;
description = ''
This option enables Podman, a daemonless container engine for
developing, managing, and running OCI Containers on your Linux System.
enable = mkOption {
type = types.bool;
default = false;
description = ''
This option enables Podman, a daemonless container engine for
developing, managing, and running OCI Containers on your Linux System.
It is a drop-in replacement for the {command}`docker` command.
'';
};
It is a drop-in replacement for the {command}`docker` command.
'';
};
dockerSocket.enable = mkOption {
type = types.bool;
@ -108,7 +126,7 @@ in
flags = mkOption {
type = types.listOf types.str;
default = [];
default = [ ];
example = [ "--all" ];
description = ''
Any additional flags passed to {command}`podman system prune`.
@ -126,22 +144,38 @@ in
};
};
package = (lib.mkPackageOption pkgs "podman" {
extraDescription = ''
This package will automatically include extra packages and runtimes.
'';
}) // {
apply = pkg: pkg.override {
extraPackages = cfg.extraPackages ++ [
"/run/wrappers" # setuid shadow
config.systemd.package # To allow systemd-based container healthchecks
] ++ lib.optional (config.boot.supportedFilesystems.zfs or false) config.boot.zfs.package;
extraRuntimes = [ pkgs.runc ]
++ lib.optionals (config.virtualisation.containers.containersConf.settings.network.default_rootless_network_cmd or "" == "slirp4netns") (with pkgs; [
slirp4netns
]);
package =
(lib.mkPackageOption pkgs "podman" {
extraDescription = ''
This package will automatically include extra packages and runtimes.
'';
})
// {
apply =
pkg:
pkg.override {
extraPackages =
cfg.extraPackages
++ [
"/run/wrappers" # setuid shadow
config.systemd.package # To allow systemd-based container healthchecks
]
++ lib.optional (config.boot.supportedFilesystems.zfs or false) config.boot.zfs.package;
extraRuntimes =
[ pkgs.runc ]
++ lib.optionals
(
config.virtualisation.containers.containersConf.settings.network.default_rootless_network_cmd or ""
== "slirp4netns"
)
(
with pkgs;
[
slirp4netns
]
);
};
};
};
defaultNetwork.settings = lib.mkOption {
type = json.type;
@ -156,17 +190,27 @@ in
config =
let
networkConfig = ({
dns_enabled = false;
driver = "bridge";
id = "0000000000000000000000000000000000000000000000000000000000000000";
internal = false;
ipam_options = { driver = "host-local"; };
ipv6_enabled = false;
name = "podman";
network_interface = "podman0";
subnets = [{ gateway = "10.88.0.1"; subnet = "10.88.0.0/16"; }];
} // cfg.defaultNetwork.settings);
networkConfig = (
{
dns_enabled = false;
driver = "bridge";
id = "0000000000000000000000000000000000000000000000000000000000000000";
internal = false;
ipam_options = {
driver = "host-local";
};
ipv6_enabled = false;
name = "podman";
network_interface = "podman0";
subnets = [
{
gateway = "10.88.0.1";
subnet = "10.88.0.0/16";
}
];
}
// cfg.defaultNetwork.settings
);
inherit (networkConfig) dns_enabled network_interface;
in
lib.mkIf cfg.enable {
@ -176,8 +220,7 @@ in
''
];
environment.systemPackages = [ cfg.package ]
++ lib.optional cfg.dockerCompat dockerCompat;
environment.systemPackages = [ cfg.package ] ++ lib.optional cfg.dockerCompat dockerCompat;
# https://github.com/containers/podman/blob/097cc6eb6dd8e598c0e8676d21267b4edb11e144/docs/tutorials/basic_networking.md#default-network
environment.etc."containers/networks/podman.json" = lib.mkIf (cfg.defaultNetwork.settings != { }) {
@ -204,12 +247,15 @@ in
serviceConfig = {
Type = "oneshot";
ExecStart = utils.escapeSystemdExecArgs ([
(lib.getExe cfg.package)
"system"
"prune"
"-f"
] ++ cfg.autoPrune.flags);
ExecStart = utils.escapeSystemdExecArgs (
[
(lib.getExe cfg.package)
"system"
"prune"
"-f"
]
++ cfg.autoPrune.flags
);
};
startAt = lib.optional cfg.autoPrune.enable cfg.autoPrune.dates;
@ -223,8 +269,9 @@ in
# Podman does not support multiple sockets, as of podman 5.0.2, so we use
# a symlink. Unfortunately this does not let us use an alternate group,
# such as `docker`.
systemd.sockets.podman.socketConfig.Symlinks =
lib.mkIf cfg.dockerSocket.enable [ "/run/docker.sock" ];
systemd.sockets.podman.socketConfig.Symlinks = lib.mkIf cfg.dockerSocket.enable [
"/run/docker.sock"
];
systemd.user.services.podman.environment = config.networking.proxy.envVars;
systemd.user.sockets.podman.wantedBy = [ "sockets.target" ];
@ -237,15 +284,18 @@ in
systemd.tmpfiles.packages = [
# The /run/podman rule interferes with our podman group, so we remove
# it and let the systemd socket logic take care of it.
(pkgs.runCommand "podman-tmpfiles-nixos" {
package = cfg.package;
preferLocalBuild = true;
} ''
mkdir -p $out/lib/tmpfiles.d/
grep -v 'D! /run/podman 0700 root root' \
<$package/lib/tmpfiles.d/podman.conf \
>$out/lib/tmpfiles.d/podman.conf
'')
(pkgs.runCommand "podman-tmpfiles-nixos"
{
package = cfg.package;
preferLocalBuild = true;
}
''
mkdir -p $out/lib/tmpfiles.d/
grep -v 'D! /run/podman 0700 root root' \
<$package/lib/tmpfiles.d/podman.conf \
>$out/lib/tmpfiles.d/podman.conf
''
)
];
users.groups.podman = { };

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.virtualisation.rosetta;
@ -55,10 +60,9 @@ in
fsType = "virtiofs";
};
nix.settings = {
extra-platforms = [ "x86_64-linux" ];
extra-sandbox-paths = [
extra-sandbox-paths = [
"/run/binfmt"
cfg.mountPoint
];