mirror of
https://github.com/NixOS/nixpkgs.git
synced 2025-06-09 19:13:26 +03:00
incus: format
This commit is contained in:
parent
ed30be523a
commit
9ab59bb5fb
13 changed files with 600 additions and 482 deletions
|
@ -1,51 +1,71 @@
|
|||
{ lib, config, pkgs, ... }:
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
templateSubmodule = {...}: {
|
||||
options = {
|
||||
enable = lib.mkEnableOption "this template";
|
||||
templateSubmodule =
|
||||
{ ... }:
|
||||
{
|
||||
options = {
|
||||
enable = lib.mkEnableOption "this template";
|
||||
|
||||
target = lib.mkOption {
|
||||
description = "Path in the container";
|
||||
type = lib.types.path;
|
||||
};
|
||||
template = lib.mkOption {
|
||||
description = ".tpl file for rendering the target";
|
||||
type = lib.types.path;
|
||||
};
|
||||
when = lib.mkOption {
|
||||
description = "Events which trigger a rewrite (create, copy)";
|
||||
type = lib.types.listOf (lib.types.str);
|
||||
};
|
||||
properties = lib.mkOption {
|
||||
description = "Additional properties";
|
||||
type = lib.types.attrs;
|
||||
default = {};
|
||||
target = lib.mkOption {
|
||||
description = "Path in the container";
|
||||
type = lib.types.path;
|
||||
};
|
||||
template = lib.mkOption {
|
||||
description = ".tpl file for rendering the target";
|
||||
type = lib.types.path;
|
||||
};
|
||||
when = lib.mkOption {
|
||||
description = "Events which trigger a rewrite (create, copy)";
|
||||
type = lib.types.listOf (lib.types.str);
|
||||
};
|
||||
properties = lib.mkOption {
|
||||
description = "Additional properties";
|
||||
type = lib.types.attrs;
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
toYAML = name: data: pkgs.writeText name (lib.generators.toYAML {} data);
|
||||
toYAML = name: data: pkgs.writeText name (lib.generators.toYAML { } data);
|
||||
|
||||
cfg = config.virtualisation.lxc;
|
||||
templates = if cfg.templates != {} then let
|
||||
list = lib.mapAttrsToList (name: value: { inherit name; } // value)
|
||||
(lib.filterAttrs (name: value: value.enable) cfg.templates);
|
||||
in
|
||||
{
|
||||
files = map (tpl: {
|
||||
source = tpl.template;
|
||||
target = "/templates/${tpl.name}.tpl";
|
||||
}) list;
|
||||
properties = lib.listToAttrs (map (tpl: lib.nameValuePair tpl.target {
|
||||
when = tpl.when;
|
||||
template = "${tpl.name}.tpl";
|
||||
properties = tpl.properties;
|
||||
}) list);
|
||||
}
|
||||
else { files = []; properties = {}; };
|
||||
templates =
|
||||
if cfg.templates != { } then
|
||||
let
|
||||
list = lib.mapAttrsToList (name: value: { inherit name; } // value) (
|
||||
lib.filterAttrs (name: value: value.enable) cfg.templates
|
||||
);
|
||||
in
|
||||
{
|
||||
files = map (tpl: {
|
||||
source = tpl.template;
|
||||
target = "/templates/${tpl.name}.tpl";
|
||||
}) list;
|
||||
properties = lib.listToAttrs (
|
||||
map (
|
||||
tpl:
|
||||
lib.nameValuePair tpl.target {
|
||||
when = tpl.when;
|
||||
template = "${tpl.name}.tpl";
|
||||
properties = tpl.properties;
|
||||
}
|
||||
) list
|
||||
);
|
||||
}
|
||||
else
|
||||
{
|
||||
files = [ ];
|
||||
properties = { };
|
||||
};
|
||||
|
||||
in {
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
../image/file-options.nix
|
||||
];
|
||||
|
@ -59,7 +79,7 @@ in {
|
|||
templates = lib.mkOption {
|
||||
description = "Templates for LXD";
|
||||
type = lib.types.attrsOf (lib.types.submodule templateSubmodule);
|
||||
default = {};
|
||||
default = { };
|
||||
example = lib.literalExpression ''
|
||||
{
|
||||
# create /etc/hostname on container creation
|
||||
|
@ -91,7 +111,10 @@ in {
|
|||
};
|
||||
|
||||
config = {
|
||||
system.nixos.tags = [ "lxc" "metadata" ];
|
||||
system.nixos.tags = [
|
||||
"lxc"
|
||||
"metadata"
|
||||
];
|
||||
image.extension = "tar.xz";
|
||||
image.filePath = "tarball/${config.image.fileName}";
|
||||
system.build.image = config.system.build.metadata;
|
||||
|
@ -100,7 +123,9 @@ in {
|
|||
contents = [
|
||||
{
|
||||
source = toYAML "metadata.yaml" {
|
||||
architecture = builtins.elemAt (builtins.match "^([a-z0-9_]+).+" (toString pkgs.stdenv.hostPlatform.system)) 0;
|
||||
architecture = builtins.elemAt (builtins.match "^([a-z0-9_]+).+" (
|
||||
toString pkgs.stdenv.hostPlatform.system
|
||||
)) 0;
|
||||
creation_date = 1;
|
||||
properties = {
|
||||
description = "${config.system.nixos.distroName} ${config.system.nixos.codeName} ${config.system.nixos.label} ${pkgs.stdenv.hostPlatform.system}";
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
{lib, ...}:
|
||||
{ lib, ... }:
|
||||
|
||||
{
|
||||
meta = {
|
||||
|
|
|
@ -1,6 +1,11 @@
|
|||
# LXC Configuration
|
||||
|
||||
{ config, lib, pkgs, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.virtualisation.lxc;
|
||||
|
@ -12,58 +17,53 @@ in
|
|||
};
|
||||
|
||||
options.virtualisation.lxc = {
|
||||
enable =
|
||||
lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
This enables Linux Containers (LXC), which provides tools
|
||||
for creating and managing system or application containers
|
||||
on Linux.
|
||||
'';
|
||||
};
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
This enables Linux Containers (LXC), which provides tools
|
||||
for creating and managing system or application containers
|
||||
on Linux.
|
||||
'';
|
||||
};
|
||||
|
||||
unprivilegedContainers = lib.mkEnableOption "support for unprivileged users to launch containers";
|
||||
|
||||
systemConfig =
|
||||
lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
default = "";
|
||||
description = ''
|
||||
This is the system-wide LXC config. See
|
||||
{manpage}`lxc.system.conf(5)`.
|
||||
'';
|
||||
};
|
||||
systemConfig = lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
default = "";
|
||||
description = ''
|
||||
This is the system-wide LXC config. See
|
||||
{manpage}`lxc.system.conf(5)`.
|
||||
'';
|
||||
};
|
||||
package = lib.mkPackageOption pkgs "lxc" { };
|
||||
|
||||
defaultConfig =
|
||||
lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
default = "";
|
||||
description = ''
|
||||
Default config (default.conf) for new containers, i.e. for
|
||||
network config. See {manpage}`lxc.container.conf(5)`.
|
||||
'';
|
||||
};
|
||||
defaultConfig = lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
default = "";
|
||||
description = ''
|
||||
Default config (default.conf) for new containers, i.e. for
|
||||
network config. See {manpage}`lxc.container.conf(5)`.
|
||||
'';
|
||||
};
|
||||
|
||||
usernetConfig =
|
||||
lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
default = "";
|
||||
description = ''
|
||||
This is the config file for managing unprivileged user network
|
||||
administration access in LXC. See {manpage}`lxc-usernet(5)`.
|
||||
'';
|
||||
};
|
||||
usernetConfig = lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
default = "";
|
||||
description = ''
|
||||
This is the config file for managing unprivileged user network
|
||||
administration access in LXC. See {manpage}`lxc-usernet(5)`.
|
||||
'';
|
||||
};
|
||||
|
||||
bridgeConfig =
|
||||
lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
default = "";
|
||||
description = ''
|
||||
This is the config file for override lxc-net bridge default settings.
|
||||
'';
|
||||
};
|
||||
bridgeConfig = lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
default = "";
|
||||
description = ''
|
||||
This is the config file for override lxc-net bridge default settings.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
###### implementation
|
||||
|
@ -88,7 +88,7 @@ in
|
|||
};
|
||||
|
||||
# We don't need the `lxc-user` group, unless the unprivileged containers are enabled.
|
||||
users.groups = lib.mkIf cfg.unprivilegedContainers { lxc-user = {}; };
|
||||
users.groups = lib.mkIf cfg.unprivilegedContainers { lxc-user = { }; };
|
||||
|
||||
# `lxc-user-nic` needs suid to attach to bridge for unpriv containers.
|
||||
security.wrappers = lib.mkIf cfg.unprivilegedContainers {
|
||||
|
@ -108,7 +108,12 @@ in
|
|||
lxc-net = {
|
||||
enable = true;
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = [ pkgs.iproute2 pkgs.iptables pkgs.getent pkgs.dnsmasq ];
|
||||
path = [
|
||||
pkgs.iproute2
|
||||
pkgs.iptables
|
||||
pkgs.getent
|
||||
pkgs.dnsmasq
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -1,29 +1,34 @@
|
|||
# LXC Configuration
|
||||
|
||||
{ config, lib, pkgs, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.virtualisation.lxc.lxcfs;
|
||||
in {
|
||||
in
|
||||
{
|
||||
meta = {
|
||||
maintainers = lib.teams.lxc.members;
|
||||
};
|
||||
|
||||
###### interface
|
||||
options.virtualisation.lxc.lxcfs = {
|
||||
enable =
|
||||
lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
This enables LXCFS, a FUSE filesystem for LXC.
|
||||
To use lxcfs in include the following configuration in your
|
||||
container configuration:
|
||||
```
|
||||
virtualisation.lxc.defaultConfig = "lxc.include = ''${pkgs.lxcfs}/share/lxc/config/common.conf.d/00-lxcfs.conf";
|
||||
```
|
||||
'';
|
||||
};
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
This enables LXCFS, a FUSE filesystem for LXC.
|
||||
To use lxcfs in include the following configuration in your
|
||||
container configuration:
|
||||
```
|
||||
virtualisation.lxc.defaultConfig = "lxc.include = ''${pkgs.lxcfs}/share/lxc/config/common.conf.d/00-lxcfs.conf";
|
||||
```
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
###### implementation
|
||||
|
@ -34,11 +39,11 @@ in {
|
|||
before = [ "lxc.service" ];
|
||||
restartIfChanged = false;
|
||||
serviceConfig = {
|
||||
ExecStartPre="${pkgs.coreutils}/bin/mkdir -p /var/lib/lxcfs";
|
||||
ExecStart="${pkgs.lxcfs}/bin/lxcfs /var/lib/lxcfs";
|
||||
ExecStopPost="-${pkgs.fuse}/bin/fusermount -u /var/lib/lxcfs";
|
||||
KillMode="process";
|
||||
Restart="on-failure";
|
||||
ExecStartPre = "${pkgs.coreutils}/bin/mkdir -p /var/lib/lxcfs";
|
||||
ExecStart = "${pkgs.lxcfs}/bin/lxcfs /var/lib/lxcfs";
|
||||
ExecStopPost = "-${pkgs.fuse}/bin/fusermount -u /var/lib/lxcfs";
|
||||
KillMode = "process";
|
||||
Restart = "on-failure";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -1,140 +1,154 @@
|
|||
import ../make-test-python.nix ({ pkgs, lib, extra ? {}, name ? "incus-container", incus ? pkgs.incus-lts, ... } :
|
||||
import ../make-test-python.nix (
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
extra ? { },
|
||||
name ? "incus-container",
|
||||
incus ? pkgs.incus-lts,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
releases = import ../../release.nix {
|
||||
configuration = lib.recursiveUpdate {
|
||||
let
|
||||
releases = import ../../release.nix {
|
||||
configuration = lib.recursiveUpdate {
|
||||
# Building documentation makes the test unnecessarily take a longer time:
|
||||
documentation.enable = lib.mkForce false;
|
||||
|
||||
boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
|
||||
}
|
||||
extra;
|
||||
};
|
||||
|
||||
container-image-metadata = "${releases.incusContainerMeta.${pkgs.stdenv.hostPlatform.system}}/tarball/nixos-system-${pkgs.stdenv.hostPlatform.system}.tar.xz";
|
||||
container-image-rootfs = "${releases.incusContainerImage.${pkgs.stdenv.hostPlatform.system}}/nixos-lxc-image-${pkgs.stdenv.hostPlatform.system}.squashfs";
|
||||
in
|
||||
{
|
||||
inherit name;
|
||||
|
||||
meta = {
|
||||
maintainers = lib.teams.lxc.members;
|
||||
};
|
||||
|
||||
nodes.machine = { ... }: {
|
||||
virtualisation = {
|
||||
# Ensure test VM has enough resources for creating and managing guests
|
||||
cores = 2;
|
||||
memorySize = 1024;
|
||||
diskSize = 4096;
|
||||
|
||||
incus = {
|
||||
enable = true;
|
||||
package = incus;
|
||||
};
|
||||
} extra;
|
||||
};
|
||||
networking.nftables.enable = true;
|
||||
};
|
||||
|
||||
testScript = # python
|
||||
''
|
||||
def instance_is_up(_) -> bool:
|
||||
status, _ = machine.execute("incus exec container --disable-stdin --force-interactive /run/current-system/sw/bin/systemctl -- is-system-running")
|
||||
return status == 0
|
||||
container-image-metadata = "${
|
||||
releases.incusContainerMeta.${pkgs.stdenv.hostPlatform.system}
|
||||
}/tarball/nixos-system-${pkgs.stdenv.hostPlatform.system}.tar.xz";
|
||||
container-image-rootfs = "${
|
||||
releases.incusContainerImage.${pkgs.stdenv.hostPlatform.system}
|
||||
}/nixos-lxc-image-${pkgs.stdenv.hostPlatform.system}.squashfs";
|
||||
in
|
||||
{
|
||||
inherit name;
|
||||
|
||||
def set_container(config):
|
||||
machine.succeed(f"incus config set container {config}")
|
||||
machine.succeed("incus restart container")
|
||||
with machine.nested("Waiting for instance to start and be usable"):
|
||||
retry(instance_is_up)
|
||||
meta = {
|
||||
maintainers = lib.teams.lxc.members;
|
||||
};
|
||||
|
||||
def check_sysctl(instance):
|
||||
with subtest("systemd sysctl settings are applied"):
|
||||
machine.succeed(f"incus exec {instance} -- systemctl status systemd-sysctl")
|
||||
sysctl = machine.succeed(f"incus exec {instance} -- sysctl net.ipv4.ip_forward").strip().split(" ")[-1]
|
||||
assert "1" == sysctl, f"systemd-sysctl configuration not correctly applied, {sysctl} != 1"
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation = {
|
||||
# Ensure test VM has enough resources for creating and managing guests
|
||||
cores = 2;
|
||||
memorySize = 1024;
|
||||
diskSize = 4096;
|
||||
|
||||
machine.wait_for_unit("incus.service")
|
||||
incus = {
|
||||
enable = true;
|
||||
package = incus;
|
||||
};
|
||||
};
|
||||
networking.nftables.enable = true;
|
||||
};
|
||||
|
||||
# no preseed should mean no service
|
||||
machine.fail("systemctl status incus-preseed.service")
|
||||
testScript = # python
|
||||
''
|
||||
def instance_is_up(_) -> bool:
|
||||
status, _ = machine.execute("incus exec container --disable-stdin --force-interactive /run/current-system/sw/bin/systemctl -- is-system-running")
|
||||
return status == 0
|
||||
|
||||
machine.succeed("incus admin init --minimal")
|
||||
|
||||
with subtest("Container image can be imported"):
|
||||
machine.succeed("incus image import ${container-image-metadata} ${container-image-rootfs} --alias nixos")
|
||||
|
||||
with subtest("Container can be launched and managed"):
|
||||
machine.succeed("incus launch nixos container")
|
||||
with machine.nested("Waiting for instance to start and be usable"):
|
||||
retry(instance_is_up)
|
||||
machine.succeed("echo true | incus exec container /run/current-system/sw/bin/bash -")
|
||||
|
||||
with subtest("Container mounts lxcfs overlays"):
|
||||
machine.succeed("incus exec container mount | grep 'lxcfs on /proc/cpuinfo type fuse.lxcfs'")
|
||||
machine.succeed("incus exec container mount | grep 'lxcfs on /proc/meminfo type fuse.lxcfs'")
|
||||
|
||||
with subtest("resource limits"):
|
||||
with subtest("Container CPU limits can be managed"):
|
||||
set_container("limits.cpu 1")
|
||||
cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
|
||||
assert cpuinfo == "1", f"Wrong number of CPUs reported from /proc/cpuinfo, want: 1, got: {cpuinfo}"
|
||||
|
||||
set_container("limits.cpu 2")
|
||||
cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
|
||||
assert cpuinfo == "2", f"Wrong number of CPUs reported from /proc/cpuinfo, want: 2, got: {cpuinfo}"
|
||||
|
||||
with subtest("Container memory limits can be managed"):
|
||||
set_container("limits.memory 64MB")
|
||||
meminfo = machine.succeed("incus exec container grep -- MemTotal /proc/meminfo").strip()
|
||||
meminfo_bytes = " ".join(meminfo.split(' ')[-2:])
|
||||
assert meminfo_bytes == "62500 kB", f"Wrong amount of memory reported from /proc/meminfo, want: '62500 kB', got: '{meminfo_bytes}'"
|
||||
|
||||
set_container("limits.memory 128MB")
|
||||
meminfo = machine.succeed("incus exec container grep -- MemTotal /proc/meminfo").strip()
|
||||
meminfo_bytes = " ".join(meminfo.split(' ')[-2:])
|
||||
assert meminfo_bytes == "125000 kB", f"Wrong amount of memory reported from /proc/meminfo, want: '125000 kB', got: '{meminfo_bytes}'"
|
||||
|
||||
with subtest("virtual tpm can be configured"):
|
||||
machine.succeed("incus config device add container vtpm tpm path=/dev/tpm0 pathrm=/dev/tpmrm0")
|
||||
machine.succeed("incus exec container -- test -e /dev/tpm0")
|
||||
machine.succeed("incus exec container -- test -e /dev/tpmrm0")
|
||||
machine.succeed("incus config device remove container vtpm")
|
||||
machine.fail("incus exec container -- test -e /dev/tpm0")
|
||||
|
||||
with subtest("lxc-generator"):
|
||||
with subtest("lxc-container generator configures plain container"):
|
||||
# reuse the existing container to save some time
|
||||
machine.succeed("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
|
||||
check_sysctl("container")
|
||||
|
||||
with subtest("lxc-container generator configures nested container"):
|
||||
machine.execute("incus delete --force container")
|
||||
machine.succeed("incus launch nixos container --config security.nesting=true")
|
||||
def set_container(config):
|
||||
machine.succeed(f"incus config set container {config}")
|
||||
machine.succeed("incus restart container")
|
||||
with machine.nested("Waiting for instance to start and be usable"):
|
||||
retry(instance_is_up)
|
||||
|
||||
machine.fail("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
|
||||
target = machine.succeed("incus exec container readlink -- -f /run/systemd/system/systemd-binfmt.service").strip()
|
||||
assert target == "/dev/null", "lxc generator did not correctly mask /run/systemd/system/systemd-binfmt.service"
|
||||
def check_sysctl(instance):
|
||||
with subtest("systemd sysctl settings are applied"):
|
||||
machine.succeed(f"incus exec {instance} -- systemctl status systemd-sysctl")
|
||||
sysctl = machine.succeed(f"incus exec {instance} -- sysctl net.ipv4.ip_forward").strip().split(" ")[-1]
|
||||
assert "1" == sysctl, f"systemd-sysctl configuration not correctly applied, {sysctl} != 1"
|
||||
|
||||
check_sysctl("container")
|
||||
machine.wait_for_unit("incus.service")
|
||||
|
||||
with subtest("lxc-container generator configures privileged container"):
|
||||
machine.execute("incus delete --force container")
|
||||
machine.succeed("incus launch nixos container --config security.privileged=true")
|
||||
# no preseed should mean no service
|
||||
machine.fail("systemctl status incus-preseed.service")
|
||||
|
||||
machine.succeed("incus admin init --minimal")
|
||||
|
||||
with subtest("Container image can be imported"):
|
||||
machine.succeed("incus image import ${container-image-metadata} ${container-image-rootfs} --alias nixos")
|
||||
|
||||
with subtest("Container can be launched and managed"):
|
||||
machine.succeed("incus launch nixos container")
|
||||
with machine.nested("Waiting for instance to start and be usable"):
|
||||
retry(instance_is_up)
|
||||
machine.succeed("echo true | incus exec container /run/current-system/sw/bin/bash -")
|
||||
|
||||
machine.succeed("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
|
||||
with subtest("Container mounts lxcfs overlays"):
|
||||
machine.succeed("incus exec container mount | grep 'lxcfs on /proc/cpuinfo type fuse.lxcfs'")
|
||||
machine.succeed("incus exec container mount | grep 'lxcfs on /proc/meminfo type fuse.lxcfs'")
|
||||
|
||||
check_sysctl("container")
|
||||
with subtest("resource limits"):
|
||||
with subtest("Container CPU limits can be managed"):
|
||||
set_container("limits.cpu 1")
|
||||
cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
|
||||
assert cpuinfo == "1", f"Wrong number of CPUs reported from /proc/cpuinfo, want: 1, got: {cpuinfo}"
|
||||
|
||||
with subtest("softDaemonRestart"):
|
||||
with subtest("Instance remains running when softDaemonRestart is enabled and services is stopped"):
|
||||
pid = machine.succeed("incus info container | grep 'PID'").split(":")[1].strip()
|
||||
machine.succeed(f"ps {pid}")
|
||||
machine.succeed("systemctl stop incus")
|
||||
machine.succeed(f"ps {pid}")
|
||||
'';
|
||||
})
|
||||
set_container("limits.cpu 2")
|
||||
cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
|
||||
assert cpuinfo == "2", f"Wrong number of CPUs reported from /proc/cpuinfo, want: 2, got: {cpuinfo}"
|
||||
|
||||
with subtest("Container memory limits can be managed"):
|
||||
set_container("limits.memory 64MB")
|
||||
meminfo = machine.succeed("incus exec container grep -- MemTotal /proc/meminfo").strip()
|
||||
meminfo_bytes = " ".join(meminfo.split(' ')[-2:])
|
||||
assert meminfo_bytes == "62500 kB", f"Wrong amount of memory reported from /proc/meminfo, want: '62500 kB', got: '{meminfo_bytes}'"
|
||||
|
||||
set_container("limits.memory 128MB")
|
||||
meminfo = machine.succeed("incus exec container grep -- MemTotal /proc/meminfo").strip()
|
||||
meminfo_bytes = " ".join(meminfo.split(' ')[-2:])
|
||||
assert meminfo_bytes == "125000 kB", f"Wrong amount of memory reported from /proc/meminfo, want: '125000 kB', got: '{meminfo_bytes}'"
|
||||
|
||||
with subtest("virtual tpm can be configured"):
|
||||
machine.succeed("incus config device add container vtpm tpm path=/dev/tpm0 pathrm=/dev/tpmrm0")
|
||||
machine.succeed("incus exec container -- test -e /dev/tpm0")
|
||||
machine.succeed("incus exec container -- test -e /dev/tpmrm0")
|
||||
machine.succeed("incus config device remove container vtpm")
|
||||
machine.fail("incus exec container -- test -e /dev/tpm0")
|
||||
|
||||
with subtest("lxc-generator"):
|
||||
with subtest("lxc-container generator configures plain container"):
|
||||
# reuse the existing container to save some time
|
||||
machine.succeed("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
|
||||
check_sysctl("container")
|
||||
|
||||
with subtest("lxc-container generator configures nested container"):
|
||||
machine.execute("incus delete --force container")
|
||||
machine.succeed("incus launch nixos container --config security.nesting=true")
|
||||
with machine.nested("Waiting for instance to start and be usable"):
|
||||
retry(instance_is_up)
|
||||
|
||||
machine.fail("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
|
||||
target = machine.succeed("incus exec container readlink -- -f /run/systemd/system/systemd-binfmt.service").strip()
|
||||
assert target == "/dev/null", "lxc generator did not correctly mask /run/systemd/system/systemd-binfmt.service"
|
||||
|
||||
check_sysctl("container")
|
||||
|
||||
with subtest("lxc-container generator configures privileged container"):
|
||||
machine.execute("incus delete --force container")
|
||||
machine.succeed("incus launch nixos container --config security.privileged=true")
|
||||
with machine.nested("Waiting for instance to start and be usable"):
|
||||
retry(instance_is_up)
|
||||
|
||||
machine.succeed("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
|
||||
|
||||
check_sysctl("container")
|
||||
|
||||
with subtest("softDaemonRestart"):
|
||||
with subtest("Instance remains running when softDaemonRestart is enabled and services is stopped"):
|
||||
pid = machine.succeed("incus info container | grep 'PID'").split(":")[1].strip()
|
||||
machine.succeed(f"ps {pid}")
|
||||
machine.succeed("systemctl stop incus")
|
||||
machine.succeed(f"ps {pid}")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,69 +1,78 @@
|
|||
import ../make-test-python.nix ({ pkgs, lib, incus ? pkgs.incus-lts, ... } :
|
||||
import ../make-test-python.nix (
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
incus ? pkgs.incus-lts,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
name = "incus-openvswitch";
|
||||
{
|
||||
name = "incus-openvswitch";
|
||||
|
||||
meta = {
|
||||
maintainers = lib.teams.lxc.members;
|
||||
};
|
||||
|
||||
nodes.machine = { lib, ... }: {
|
||||
virtualisation = {
|
||||
incus = {
|
||||
enable = true;
|
||||
package = incus;
|
||||
};
|
||||
|
||||
vswitch.enable = true;
|
||||
incus.preseed = {
|
||||
networks = [
|
||||
{
|
||||
name = "nixostestbr0";
|
||||
type = "bridge";
|
||||
config = {
|
||||
"bridge.driver" = "openvswitch";
|
||||
"ipv4.address" = "10.0.100.1/24";
|
||||
"ipv4.nat" = "true";
|
||||
};
|
||||
}
|
||||
];
|
||||
profiles = [
|
||||
{
|
||||
name = "nixostest_default";
|
||||
devices = {
|
||||
eth0 = {
|
||||
name = "eth0";
|
||||
network = "nixostestbr0";
|
||||
type = "nic";
|
||||
};
|
||||
root = {
|
||||
path = "/";
|
||||
pool = "default";
|
||||
size = "35GiB";
|
||||
type = "disk";
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
storage_pools = [
|
||||
{
|
||||
name = "nixostest_pool";
|
||||
driver = "dir";
|
||||
}
|
||||
];
|
||||
};
|
||||
meta = {
|
||||
maintainers = lib.teams.lxc.members;
|
||||
};
|
||||
networking.nftables.enable = true;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("incus.service")
|
||||
machine.wait_for_unit("incus-preseed.service")
|
||||
nodes.machine =
|
||||
{ lib, ... }:
|
||||
{
|
||||
virtualisation = {
|
||||
incus = {
|
||||
enable = true;
|
||||
package = incus;
|
||||
};
|
||||
|
||||
with subtest("Verify openvswitch bridge"):
|
||||
machine.succeed("incus network info nixostestbr0")
|
||||
vswitch.enable = true;
|
||||
incus.preseed = {
|
||||
networks = [
|
||||
{
|
||||
name = "nixostestbr0";
|
||||
type = "bridge";
|
||||
config = {
|
||||
"bridge.driver" = "openvswitch";
|
||||
"ipv4.address" = "10.0.100.1/24";
|
||||
"ipv4.nat" = "true";
|
||||
};
|
||||
}
|
||||
];
|
||||
profiles = [
|
||||
{
|
||||
name = "nixostest_default";
|
||||
devices = {
|
||||
eth0 = {
|
||||
name = "eth0";
|
||||
network = "nixostestbr0";
|
||||
type = "nic";
|
||||
};
|
||||
root = {
|
||||
path = "/";
|
||||
pool = "default";
|
||||
size = "35GiB";
|
||||
type = "disk";
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
storage_pools = [
|
||||
{
|
||||
name = "nixostest_pool";
|
||||
driver = "dir";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
networking.nftables.enable = true;
|
||||
};
|
||||
|
||||
with subtest("Verify openvswitch bridge"):
|
||||
machine.succeed("ovs-vsctl br-exists nixostestbr0")
|
||||
'';
|
||||
})
|
||||
testScript = ''
|
||||
machine.wait_for_unit("incus.service")
|
||||
machine.wait_for_unit("incus-preseed.service")
|
||||
|
||||
with subtest("Verify openvswitch bridge"):
|
||||
machine.succeed("incus network info nixostestbr0")
|
||||
|
||||
with subtest("Verify openvswitch bridge"):
|
||||
machine.succeed("ovs-vsctl br-exists nixostestbr0")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,32 +1,41 @@
|
|||
import ../make-test-python.nix ({ pkgs, lib, incus ? pkgs.incus-lts, ... } :
|
||||
import ../make-test-python.nix (
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
incus ? pkgs.incus-lts,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
name = "incus-socket-activated";
|
||||
{
|
||||
name = "incus-socket-activated";
|
||||
|
||||
meta = {
|
||||
maintainers = lib.teams.lxc.members;
|
||||
};
|
||||
|
||||
nodes.machine = { lib, ... }: {
|
||||
virtualisation = {
|
||||
incus = {
|
||||
enable = true;
|
||||
package = incus;
|
||||
socketActivation = true;
|
||||
};
|
||||
meta = {
|
||||
maintainers = lib.teams.lxc.members;
|
||||
};
|
||||
networking.nftables.enable = true;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("incus.socket")
|
||||
nodes.machine =
|
||||
{ lib, ... }:
|
||||
{
|
||||
virtualisation = {
|
||||
incus = {
|
||||
enable = true;
|
||||
package = incus;
|
||||
socketActivation = true;
|
||||
};
|
||||
};
|
||||
networking.nftables.enable = true;
|
||||
};
|
||||
|
||||
# ensure service is not running by default
|
||||
machine.fail("systemctl is-active incus.service")
|
||||
machine.fail("systemctl is-active incus-preseed.service")
|
||||
testScript = ''
|
||||
machine.wait_for_unit("incus.socket")
|
||||
|
||||
# access the socket and ensure the service starts
|
||||
machine.succeed("incus list")
|
||||
machine.wait_for_unit("incus.service")
|
||||
'';
|
||||
})
|
||||
# ensure service is not running by default
|
||||
machine.fail("systemctl is-active incus.service")
|
||||
machine.fail("systemctl is-active incus-preseed.service")
|
||||
|
||||
# access the socket and ensure the service starts
|
||||
machine.succeed("incus list")
|
||||
machine.wait_for_unit("incus.service")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,67 +1,84 @@
|
|||
import ../make-test-python.nix ({ pkgs, lib, incus ? pkgs.incus-lts, ... }: {
|
||||
name = "incus-ui";
|
||||
import ../make-test-python.nix (
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
incus ? pkgs.incus-lts,
|
||||
...
|
||||
}:
|
||||
{
|
||||
name = "incus-ui";
|
||||
|
||||
meta = {
|
||||
maintainers = lib.teams.lxc.members;
|
||||
};
|
||||
|
||||
nodes.machine = { lib, ... }: {
|
||||
virtualisation = {
|
||||
incus = {
|
||||
enable = true;
|
||||
package = incus;
|
||||
};
|
||||
incus.ui.enable = true;
|
||||
meta = {
|
||||
maintainers = lib.teams.lxc.members;
|
||||
};
|
||||
networking.nftables.enable = true;
|
||||
|
||||
environment.systemPackages =
|
||||
let
|
||||
seleniumScript = pkgs.writers.writePython3Bin "selenium-script"
|
||||
{
|
||||
libraries = with pkgs.python3Packages; [ selenium ];
|
||||
} ''
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.firefox.options import Options
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
nodes.machine =
|
||||
{ lib, ... }:
|
||||
{
|
||||
virtualisation = {
|
||||
incus = {
|
||||
enable = true;
|
||||
package = incus;
|
||||
};
|
||||
incus.ui.enable = true;
|
||||
};
|
||||
networking.nftables.enable = true;
|
||||
|
||||
options = Options()
|
||||
options.add_argument("--headless")
|
||||
service = webdriver.FirefoxService(executable_path="${lib.getExe pkgs.geckodriver}") # noqa: E501
|
||||
environment.systemPackages =
|
||||
let
|
||||
seleniumScript =
|
||||
pkgs.writers.writePython3Bin "selenium-script"
|
||||
{
|
||||
libraries = with pkgs.python3Packages; [ selenium ];
|
||||
}
|
||||
''
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.firefox.options import Options
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
|
||||
driver = webdriver.Firefox(options=options, service=service)
|
||||
driver.implicitly_wait(10)
|
||||
driver.get("https://localhost:8443/ui")
|
||||
options = Options()
|
||||
options.add_argument("--headless")
|
||||
service = webdriver.FirefoxService(executable_path="${lib.getExe pkgs.geckodriver}") # noqa: E501
|
||||
|
||||
wait = WebDriverWait(driver, 60)
|
||||
driver = webdriver.Firefox(options=options, service=service)
|
||||
driver.implicitly_wait(10)
|
||||
driver.get("https://localhost:8443/ui")
|
||||
|
||||
assert len(driver.find_elements(By.CLASS_NAME, "l-application")) > 0
|
||||
assert len(driver.find_elements(By.CLASS_NAME, "l-navigation__drawer")) > 0
|
||||
wait = WebDriverWait(driver, 60)
|
||||
|
||||
driver.close()
|
||||
'';
|
||||
in
|
||||
with pkgs; [ curl firefox-unwrapped geckodriver seleniumScript ];
|
||||
};
|
||||
assert len(driver.find_elements(By.CLASS_NAME, "l-application")) > 0
|
||||
assert len(driver.find_elements(By.CLASS_NAME, "l-navigation__drawer")) > 0
|
||||
|
||||
driver.close()
|
||||
'';
|
||||
in
|
||||
with pkgs;
|
||||
[
|
||||
curl
|
||||
firefox-unwrapped
|
||||
geckodriver
|
||||
seleniumScript
|
||||
];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("sockets.target")
|
||||
machine.wait_for_unit("incus.service")
|
||||
machine.wait_for_file("/var/lib/incus/unix.socket")
|
||||
testScript = ''
|
||||
machine.wait_for_unit("sockets.target")
|
||||
machine.wait_for_unit("incus.service")
|
||||
machine.wait_for_file("/var/lib/incus/unix.socket")
|
||||
|
||||
# Configure incus listen address
|
||||
machine.succeed("incus config set core.https_address :8443")
|
||||
machine.succeed("systemctl restart incus")
|
||||
# Configure incus listen address
|
||||
machine.succeed("incus config set core.https_address :8443")
|
||||
machine.succeed("systemctl restart incus")
|
||||
|
||||
# Check that the INCUS_UI environment variable is populated in the systemd unit
|
||||
machine.succeed("cat /etc/systemd/system/incus.service | grep 'INCUS_UI'")
|
||||
# Check that the INCUS_UI environment variable is populated in the systemd unit
|
||||
machine.succeed("cat /etc/systemd/system/incus.service | grep 'INCUS_UI'")
|
||||
|
||||
# Ensure the endpoint returns an HTML page with 'Incus UI' in the title
|
||||
machine.succeed("curl -kLs https://localhost:8443/ui | grep '<title>Incus UI</title>'")
|
||||
# Ensure the endpoint returns an HTML page with 'Incus UI' in the title
|
||||
machine.succeed("curl -kLs https://localhost:8443/ui | grep '<title>Incus UI</title>'")
|
||||
|
||||
# Ensure the application is actually rendered by the Javascript
|
||||
machine.succeed("PYTHONUNBUFFERED=1 selenium-script")
|
||||
'';
|
||||
})
|
||||
# Ensure the application is actually rendered by the Javascript
|
||||
machine.succeed("PYTHONUNBUFFERED=1 selenium-script")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,86 +1,95 @@
|
|||
import ../make-test-python.nix ({ pkgs, lib, incus ? pkgs.incus-lts, ... }:
|
||||
import ../make-test-python.nix (
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
incus ? pkgs.incus-lts,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
releases = import ../../release.nix {
|
||||
configuration = {
|
||||
# Building documentation makes the test unnecessarily take a longer time:
|
||||
documentation.enable = lib.mkForce false;
|
||||
let
|
||||
releases = import ../../release.nix {
|
||||
configuration = {
|
||||
# Building documentation makes the test unnecessarily take a longer time:
|
||||
documentation.enable = lib.mkForce false;
|
||||
|
||||
# Our tests require `grep` & friends:
|
||||
environment.systemPackages = with pkgs; [busybox];
|
||||
};
|
||||
};
|
||||
|
||||
vm-image-metadata = releases.incusVirtualMachineImageMeta.${pkgs.stdenv.hostPlatform.system};
|
||||
vm-image-disk = releases.incusVirtualMachineImage.${pkgs.stdenv.hostPlatform.system};
|
||||
|
||||
instance-name = "instance1";
|
||||
in
|
||||
{
|
||||
name = "incus-virtual-machine";
|
||||
|
||||
meta = {
|
||||
maintainers = lib.teams.lxc.members;
|
||||
};
|
||||
|
||||
nodes.machine = {...}: {
|
||||
virtualisation = {
|
||||
# Ensure test VM has enough resources for creating and managing guests
|
||||
cores = 2;
|
||||
memorySize = 1024;
|
||||
diskSize = 4096;
|
||||
|
||||
incus = {
|
||||
enable = true;
|
||||
package = incus;
|
||||
# Our tests require `grep` & friends:
|
||||
environment.systemPackages = with pkgs; [ busybox ];
|
||||
};
|
||||
};
|
||||
networking.nftables.enable = true;
|
||||
};
|
||||
|
||||
testScript = # python
|
||||
''
|
||||
def instance_is_up(_) -> bool:
|
||||
status, _ = machine.execute("incus exec ${instance-name} --disable-stdin --force-interactive /run/current-system/sw/bin/systemctl -- is-system-running")
|
||||
return status == 0
|
||||
vm-image-metadata = releases.incusVirtualMachineImageMeta.${pkgs.stdenv.hostPlatform.system};
|
||||
vm-image-disk = releases.incusVirtualMachineImage.${pkgs.stdenv.hostPlatform.system};
|
||||
|
||||
machine.wait_for_unit("incus.service")
|
||||
instance-name = "instance1";
|
||||
in
|
||||
{
|
||||
name = "incus-virtual-machine";
|
||||
|
||||
machine.succeed("incus admin init --minimal")
|
||||
meta = {
|
||||
maintainers = lib.teams.lxc.members;
|
||||
};
|
||||
|
||||
with subtest("virtual-machine image can be imported"):
|
||||
machine.succeed("incus image import ${vm-image-metadata}/*/*.tar.xz ${vm-image-disk}/nixos.qcow2 --alias nixos")
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation = {
|
||||
# Ensure test VM has enough resources for creating and managing guests
|
||||
cores = 2;
|
||||
memorySize = 1024;
|
||||
diskSize = 4096;
|
||||
|
||||
with subtest("virtual-machine can be created"):
|
||||
machine.succeed("incus create nixos ${instance-name} --vm --config limits.memory=512MB --config security.secureboot=false")
|
||||
incus = {
|
||||
enable = true;
|
||||
package = incus;
|
||||
};
|
||||
};
|
||||
networking.nftables.enable = true;
|
||||
};
|
||||
|
||||
with subtest("virtual tpm can be configured"):
|
||||
machine.succeed("incus config device add ${instance-name} vtpm tpm path=/dev/tpm0")
|
||||
testScript = # python
|
||||
''
|
||||
def instance_is_up(_) -> bool:
|
||||
status, _ = machine.execute("incus exec ${instance-name} --disable-stdin --force-interactive /run/current-system/sw/bin/systemctl -- is-system-running")
|
||||
return status == 0
|
||||
|
||||
with subtest("virtual-machine can be launched and become available"):
|
||||
machine.succeed("incus start ${instance-name}")
|
||||
with machine.nested("Waiting for instance to start and be usable"):
|
||||
retry(instance_is_up)
|
||||
machine.wait_for_unit("incus.service")
|
||||
|
||||
with subtest("incus-agent is started"):
|
||||
machine.succeed("incus exec ${instance-name} systemctl is-active incus-agent")
|
||||
machine.succeed("incus admin init --minimal")
|
||||
|
||||
with subtest("incus-agent has a valid path"):
|
||||
machine.succeed("incus exec ${instance-name} -- bash -c 'true'")
|
||||
with subtest("virtual-machine image can be imported"):
|
||||
machine.succeed("incus image import ${vm-image-metadata}/*/*.tar.xz ${vm-image-disk}/nixos.qcow2 --alias nixos")
|
||||
|
||||
with subtest("guest supports cpu hotplug"):
|
||||
machine.succeed("incus config set ${instance-name} limits.cpu=1")
|
||||
count = int(machine.succeed("incus exec ${instance-name} -- nproc").strip())
|
||||
assert count == 1, f"Wrong number of CPUs reported, want: 1, got: {count}"
|
||||
with subtest("virtual-machine can be created"):
|
||||
machine.succeed("incus create nixos ${instance-name} --vm --config limits.memory=512MB --config security.secureboot=false")
|
||||
|
||||
machine.succeed("incus config set ${instance-name} limits.cpu=2")
|
||||
count = int(machine.succeed("incus exec ${instance-name} -- nproc").strip())
|
||||
assert count == 2, f"Wrong number of CPUs reported, want: 2, got: {count}"
|
||||
with subtest("virtual tpm can be configured"):
|
||||
machine.succeed("incus config device add ${instance-name} vtpm tpm path=/dev/tpm0")
|
||||
|
||||
with subtest("Instance remains running when softDaemonRestart is enabled and services is stopped"):
|
||||
pid = machine.succeed("incus info ${instance-name} | grep 'PID'").split(":")[1].strip()
|
||||
machine.succeed(f"ps {pid}")
|
||||
machine.succeed("systemctl stop incus")
|
||||
machine.succeed(f"ps {pid}")
|
||||
'';
|
||||
})
|
||||
with subtest("virtual-machine can be launched and become available"):
|
||||
machine.succeed("incus start ${instance-name}")
|
||||
with machine.nested("Waiting for instance to start and be usable"):
|
||||
retry(instance_is_up)
|
||||
|
||||
with subtest("incus-agent is started"):
|
||||
machine.succeed("incus exec ${instance-name} systemctl is-active incus-agent")
|
||||
|
||||
with subtest("incus-agent has a valid path"):
|
||||
machine.succeed("incus exec ${instance-name} -- bash -c 'true'")
|
||||
|
||||
with subtest("guest supports cpu hotplug"):
|
||||
machine.succeed("incus config set ${instance-name} limits.cpu=1")
|
||||
count = int(machine.succeed("incus exec ${instance-name} -- nproc").strip())
|
||||
assert count == 1, f"Wrong number of CPUs reported, want: 1, got: {count}"
|
||||
|
||||
machine.succeed("incus config set ${instance-name} limits.cpu=2")
|
||||
count = int(machine.succeed("incus exec ${instance-name} -- nproc").strip())
|
||||
assert count == 2, f"Wrong number of CPUs reported, want: 2, got: {count}"
|
||||
|
||||
with subtest("Instance remains running when softDaemonRestart is enabled and services is stopped"):
|
||||
pid = machine.succeed("incus info ${instance-name} | grep 'PID'").split(":")[1].strip()
|
||||
machine.succeed(f"ps {pid}")
|
||||
machine.succeed("systemctl stop incus")
|
||||
machine.succeed(f"ps {pid}")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,4 +1,14 @@
|
|||
{ stdenvNoCC, lib, src, version, makeWrapper, coreutils, findutils, gnugrep, systemd }:
|
||||
{
|
||||
stdenvNoCC,
|
||||
lib,
|
||||
src,
|
||||
version,
|
||||
makeWrapper,
|
||||
coreutils,
|
||||
findutils,
|
||||
gnugrep,
|
||||
systemd,
|
||||
}:
|
||||
|
||||
stdenvNoCC.mkDerivation {
|
||||
name = "distrobuilder-nixos-generator";
|
||||
|
@ -14,6 +24,13 @@ stdenvNoCC.mkDerivation {
|
|||
|
||||
installPhase = ''
|
||||
install -D -m 0555 distrobuilder/lxc.generator $out/lib/systemd/system-generators/lxc
|
||||
wrapProgram $out/lib/systemd/system-generators/lxc --prefix PATH : ${lib.makeBinPath [coreutils findutils gnugrep systemd]}:${systemd}/lib/systemd
|
||||
wrapProgram $out/lib/systemd/system-generators/lxc --prefix PATH : ${
|
||||
lib.makeBinPath [
|
||||
coreutils
|
||||
findutils
|
||||
gnugrep
|
||||
systemd
|
||||
]
|
||||
}:${systemd}/lib/systemd
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,34 +1,37 @@
|
|||
{ lib
|
||||
, buildGoModule
|
||||
, callPackage
|
||||
, cdrkit
|
||||
, coreutils
|
||||
, debootstrap
|
||||
, fetchFromGitHub
|
||||
, gnupg
|
||||
, gnutar
|
||||
, hivex
|
||||
, makeWrapper
|
||||
, nixosTests
|
||||
, pkg-config
|
||||
, squashfsTools
|
||||
, stdenv
|
||||
, wimlib
|
||||
{
|
||||
lib,
|
||||
buildGoModule,
|
||||
callPackage,
|
||||
cdrkit,
|
||||
coreutils,
|
||||
debootstrap,
|
||||
fetchFromGitHub,
|
||||
gnupg,
|
||||
gnutar,
|
||||
hivex,
|
||||
makeWrapper,
|
||||
nixosTests,
|
||||
pkg-config,
|
||||
squashfsTools,
|
||||
stdenv,
|
||||
wimlib,
|
||||
}:
|
||||
|
||||
let
|
||||
bins = [
|
||||
coreutils
|
||||
debootstrap
|
||||
gnupg
|
||||
gnutar
|
||||
squashfsTools
|
||||
] ++ lib.optionals stdenv.hostPlatform.isx86_64 [
|
||||
# repack-windows deps
|
||||
cdrkit
|
||||
hivex
|
||||
wimlib
|
||||
];
|
||||
bins =
|
||||
[
|
||||
coreutils
|
||||
debootstrap
|
||||
gnupg
|
||||
gnutar
|
||||
squashfsTools
|
||||
]
|
||||
++ lib.optionals stdenv.hostPlatform.isx86_64 [
|
||||
# repack-windows deps
|
||||
cdrkit
|
||||
hivex
|
||||
wimlib
|
||||
];
|
||||
in
|
||||
buildGoModule rec {
|
||||
pname = "distrobuilder";
|
||||
|
@ -46,7 +49,6 @@ buildGoModule rec {
|
|||
|
||||
buildInputs = bins;
|
||||
|
||||
|
||||
# tests require a local keyserver (mkg20001/nixpkgs branch distrobuilder-with-tests) but gpg is currently broken in tests
|
||||
doCheck = false;
|
||||
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
{ lib
|
||||
, stdenv
|
||||
, fetchFromGitHub
|
||||
, fetchYarnDeps
|
||||
, nodejs
|
||||
, fixup-yarn-lock
|
||||
, yarn
|
||||
, nixosTests
|
||||
, git
|
||||
{
|
||||
lib,
|
||||
stdenv,
|
||||
fetchFromGitHub,
|
||||
fetchYarnDeps,
|
||||
nodejs,
|
||||
fixup-yarn-lock,
|
||||
yarn,
|
||||
nixosTests,
|
||||
git,
|
||||
}:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
|
|
|
@ -53,7 +53,12 @@ stdenv.mkDerivation rec {
|
|||
|
||||
postInstall = ''
|
||||
# `mount` hook requires access to the `mount` command from `util-linux` and `readlink` from `coreutils`:
|
||||
wrapProgram "$out/share/lxcfs/lxc.mount.hook" --prefix PATH : ${lib.makeBinPath [ coreutils util-linux ]}
|
||||
wrapProgram "$out/share/lxcfs/lxc.mount.hook" --prefix PATH : ${
|
||||
lib.makeBinPath [
|
||||
coreutils
|
||||
util-linux
|
||||
]
|
||||
}
|
||||
'';
|
||||
|
||||
postFixup = ''
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue