mirror of
https://github.com/NixOS/nixpkgs.git
synced 2025-07-12 05:16:25 +03:00
Merge remote-tracking branch 'upstream/staging-next' into staging
This commit is contained in:
commit
e52d633a63
1744 changed files with 988732 additions and 514579 deletions
File diff suppressed because it is too large
Load diff
|
@ -1,40 +1,38 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "bcachefs";
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ Madouura ];
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "bcachefs";
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ Madouura ];
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
virtualisation.emptyDiskImages = [ 4096 ];
|
||||
networking.hostId = "deadbeef";
|
||||
boot.supportedFilesystems = [ "bcachefs" ];
|
||||
environment.systemPackages = with pkgs; [
|
||||
parted
|
||||
keyutils
|
||||
];
|
||||
};
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
virtualisation.emptyDiskImages = [ 4096 ];
|
||||
networking.hostId = "deadbeef";
|
||||
boot.supportedFilesystems = [ "bcachefs" ];
|
||||
environment.systemPackages = with pkgs; [
|
||||
parted
|
||||
keyutils
|
||||
];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.succeed("modprobe bcachefs")
|
||||
machine.succeed("bcachefs version")
|
||||
machine.succeed("ls /dev")
|
||||
testScript = ''
|
||||
machine.succeed("modprobe bcachefs")
|
||||
machine.succeed("bcachefs version")
|
||||
machine.succeed("ls /dev")
|
||||
|
||||
machine.succeed(
|
||||
"mkdir /tmp/mnt",
|
||||
"udevadm settle",
|
||||
"parted --script /dev/vdb mklabel msdos",
|
||||
"parted --script /dev/vdb -- mkpart primary 1024M 50% mkpart primary 50% -1s",
|
||||
"udevadm settle",
|
||||
"echo password | bcachefs format --encrypted --metadata_replicas 2 --label vtest /dev/vdb1 /dev/vdb2",
|
||||
"echo password | bcachefs unlock -k session /dev/vdb1",
|
||||
"echo password | mount -t bcachefs /dev/vdb1:/dev/vdb2 /tmp/mnt",
|
||||
"udevadm settle",
|
||||
"bcachefs fs usage /tmp/mnt",
|
||||
"umount /tmp/mnt",
|
||||
"udevadm settle",
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
machine.succeed(
|
||||
"mkdir /tmp/mnt",
|
||||
"udevadm settle",
|
||||
"parted --script /dev/vdb mklabel msdos",
|
||||
"parted --script /dev/vdb -- mkpart primary 1024M 50% mkpart primary 50% -1s",
|
||||
"udevadm settle",
|
||||
"echo password | bcachefs format --encrypted --metadata_replicas 2 --label vtest /dev/vdb1 /dev/vdb2",
|
||||
"echo password | bcachefs unlock -k session /dev/vdb1",
|
||||
"echo password | mount -t bcachefs /dev/vdb1:/dev/vdb2 /tmp/mnt",
|
||||
"udevadm settle",
|
||||
"bcachefs fs usage /tmp/mnt",
|
||||
"umount /tmp/mnt",
|
||||
"udevadm settle",
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,52 +1,50 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{ pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
pythonEnv = pkgs.python3.withPackages (p: [ p.beanstalkc ]);
|
||||
let
|
||||
pythonEnv = pkgs.python3.withPackages (p: [ p.beanstalkc ]);
|
||||
|
||||
produce = pkgs.writeScript "produce.py" ''
|
||||
#!${pythonEnv.interpreter}
|
||||
import beanstalkc
|
||||
produce = pkgs.writeScript "produce.py" ''
|
||||
#!${pythonEnv.interpreter}
|
||||
import beanstalkc
|
||||
|
||||
queue = beanstalkc.Connection(host='localhost', port=11300, parse_yaml=False);
|
||||
queue.put(b'this is a job')
|
||||
queue.put(b'this is another job')
|
||||
'';
|
||||
queue = beanstalkc.Connection(host='localhost', port=11300, parse_yaml=False);
|
||||
queue.put(b'this is a job')
|
||||
queue.put(b'this is another job')
|
||||
'';
|
||||
|
||||
consume = pkgs.writeScript "consume.py" ''
|
||||
#!${pythonEnv.interpreter}
|
||||
import beanstalkc
|
||||
consume = pkgs.writeScript "consume.py" ''
|
||||
#!${pythonEnv.interpreter}
|
||||
import beanstalkc
|
||||
|
||||
queue = beanstalkc.Connection(host='localhost', port=11300, parse_yaml=False);
|
||||
queue = beanstalkc.Connection(host='localhost', port=11300, parse_yaml=False);
|
||||
|
||||
job = queue.reserve(timeout=0)
|
||||
print(job.body.decode('utf-8'))
|
||||
job.delete()
|
||||
'';
|
||||
job = queue.reserve(timeout=0)
|
||||
print(job.body.decode('utf-8'))
|
||||
job.delete()
|
||||
'';
|
||||
|
||||
in
|
||||
{
|
||||
name = "beanstalkd";
|
||||
meta.maintainers = [ lib.maintainers.aanderse ];
|
||||
in
|
||||
{
|
||||
name = "beanstalkd";
|
||||
meta.maintainers = [ lib.maintainers.aanderse ];
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
services.beanstalkd.enable = true;
|
||||
};
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
services.beanstalkd.enable = true;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
machine.wait_for_unit("beanstalkd.service")
|
||||
machine.wait_for_unit("beanstalkd.service")
|
||||
|
||||
machine.succeed("${produce}")
|
||||
assert "this is a job\n" == machine.succeed(
|
||||
"${consume}"
|
||||
)
|
||||
assert "this is another job\n" == machine.succeed(
|
||||
"${consume}"
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
machine.succeed("${produce}")
|
||||
assert "this is a job\n" == machine.succeed(
|
||||
"${consume}"
|
||||
)
|
||||
assert "this is another job\n" == machine.succeed(
|
||||
"${consume}"
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,88 +1,83 @@
|
|||
{ compression, ... }@args:
|
||||
{ lib, compression, ... }:
|
||||
{
|
||||
name = "binary-cache-" + compression;
|
||||
meta.maintainers = with lib.maintainers; [ thomasjm ];
|
||||
|
||||
import ./make-test-python.nix (
|
||||
{ lib, pkgs, ... }:
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ ../modules/installer/cd-dvd/channel.nix ];
|
||||
environment.systemPackages = with pkgs; [
|
||||
openssl
|
||||
python3
|
||||
];
|
||||
|
||||
{
|
||||
name = "binary-cache-" + compression;
|
||||
meta.maintainers = with lib.maintainers; [ thomasjm ];
|
||||
# We encrypt the binary cache before putting it on the machine so Nix
|
||||
# doesn't bring any references along.
|
||||
environment.etc."binary-cache.tar.gz.encrypted".source =
|
||||
with pkgs;
|
||||
runCommand "binary-cache.tar.gz.encrypted"
|
||||
{
|
||||
allowReferences = [ ];
|
||||
nativeBuildInputs = [ openssl ];
|
||||
}
|
||||
''
|
||||
tar -czf tmp.tar.gz -C "${
|
||||
mkBinaryCache {
|
||||
rootPaths = [ hello ];
|
||||
inherit compression;
|
||||
}
|
||||
}" .
|
||||
openssl enc -aes-256-cbc -salt -in tmp.tar.gz -out $out -k mysecretpassword
|
||||
'';
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ ../modules/installer/cd-dvd/channel.nix ];
|
||||
environment.systemPackages = with pkgs; [
|
||||
openssl
|
||||
python3
|
||||
];
|
||||
nix.extraOptions = ''
|
||||
experimental-features = nix-command
|
||||
'';
|
||||
};
|
||||
|
||||
# We encrypt the binary cache before putting it on the machine so Nix
|
||||
# doesn't bring any references along.
|
||||
environment.etc."binary-cache.tar.gz.encrypted".source =
|
||||
with pkgs;
|
||||
runCommand "binary-cache.tar.gz.encrypted"
|
||||
{
|
||||
allowReferences = [ ];
|
||||
nativeBuildInputs = [ openssl ];
|
||||
}
|
||||
''
|
||||
tar -czf tmp.tar.gz -C "${
|
||||
mkBinaryCache {
|
||||
rootPaths = [ hello ];
|
||||
inherit compression;
|
||||
}
|
||||
}" .
|
||||
openssl enc -aes-256-cbc -salt -in tmp.tar.gz -out $out -k mysecretpassword
|
||||
'';
|
||||
testScript = ''
|
||||
# Decrypt the cache into /tmp/binary-cache.tar.gz
|
||||
machine.succeed("openssl enc -d -aes-256-cbc -in /etc/binary-cache.tar.gz.encrypted -out /tmp/binary-cache.tar.gz -k mysecretpassword")
|
||||
|
||||
nix.extraOptions = ''
|
||||
experimental-features = nix-command
|
||||
'';
|
||||
};
|
||||
# Untar the cache into /tmp/cache
|
||||
machine.succeed("mkdir /tmp/cache")
|
||||
machine.succeed("tar -C /tmp/cache -xf /tmp/binary-cache.tar.gz")
|
||||
|
||||
testScript = ''
|
||||
# Decrypt the cache into /tmp/binary-cache.tar.gz
|
||||
machine.succeed("openssl enc -d -aes-256-cbc -in /etc/binary-cache.tar.gz.encrypted -out /tmp/binary-cache.tar.gz -k mysecretpassword")
|
||||
# Sanity test of cache structure
|
||||
status, stdout = machine.execute("ls /tmp/cache")
|
||||
cache_files = stdout.split()
|
||||
assert ("nix-cache-info" in cache_files)
|
||||
assert ("nar" in cache_files)
|
||||
|
||||
# Untar the cache into /tmp/cache
|
||||
machine.succeed("mkdir /tmp/cache")
|
||||
machine.succeed("tar -C /tmp/cache -xf /tmp/binary-cache.tar.gz")
|
||||
# Nix store ping should work
|
||||
machine.succeed("nix store ping --store file:///tmp/cache")
|
||||
|
||||
# Sanity test of cache structure
|
||||
status, stdout = machine.execute("ls /tmp/cache")
|
||||
cache_files = stdout.split()
|
||||
assert ("nix-cache-info" in cache_files)
|
||||
assert ("nar" in cache_files)
|
||||
# Cache should contain a .narinfo referring to "hello"
|
||||
grepLogs = machine.succeed("grep -l 'StorePath: /nix/store/[[:alnum:]]*-hello-.*' /tmp/cache/*.narinfo")
|
||||
|
||||
# Nix store ping should work
|
||||
machine.succeed("nix store ping --store file:///tmp/cache")
|
||||
# Get the store path referenced by the .narinfo
|
||||
narInfoFile = grepLogs.strip()
|
||||
narInfoContents = machine.succeed("cat " + narInfoFile)
|
||||
import re
|
||||
match = re.match(r"^StorePath: (/nix/store/[a-z0-9]*-hello-.*)$", narInfoContents, re.MULTILINE)
|
||||
if not match: raise Exception("Couldn't find hello store path in cache")
|
||||
storePath = match[1]
|
||||
|
||||
# Cache should contain a .narinfo referring to "hello"
|
||||
grepLogs = machine.succeed("grep -l 'StorePath: /nix/store/[[:alnum:]]*-hello-.*' /tmp/cache/*.narinfo")
|
||||
# Make sure the store path doesn't exist yet
|
||||
machine.succeed("[ ! -d %s ] || exit 1" % storePath)
|
||||
|
||||
# Get the store path referenced by the .narinfo
|
||||
narInfoFile = grepLogs.strip()
|
||||
narInfoContents = machine.succeed("cat " + narInfoFile)
|
||||
import re
|
||||
match = re.match(r"^StorePath: (/nix/store/[a-z0-9]*-hello-.*)$", narInfoContents, re.MULTILINE)
|
||||
if not match: raise Exception("Couldn't find hello store path in cache")
|
||||
storePath = match[1]
|
||||
# Should be able to build hello using the cache
|
||||
logs = machine.succeed("nix-build -A hello '<nixpkgs>' --option require-sigs false --option trusted-substituters file:///tmp/cache --option substituters file:///tmp/cache 2>&1")
|
||||
logLines = logs.split("\n")
|
||||
if not "this path will be fetched" in logLines[0]: raise Exception("Unexpected first log line")
|
||||
def shouldBe(got, desired):
|
||||
if got != desired: raise Exception("Expected '%s' but got '%s'" % (desired, got))
|
||||
shouldBe(logLines[1], " " + storePath)
|
||||
shouldBe(logLines[2], "copying path '%s' from 'file:///tmp/cache'..." % storePath)
|
||||
shouldBe(logLines[3], storePath)
|
||||
|
||||
# Make sure the store path doesn't exist yet
|
||||
machine.succeed("[ ! -d %s ] || exit 1" % storePath)
|
||||
|
||||
# Should be able to build hello using the cache
|
||||
logs = machine.succeed("nix-build -A hello '<nixpkgs>' --option require-sigs false --option trusted-substituters file:///tmp/cache --option substituters file:///tmp/cache 2>&1")
|
||||
logLines = logs.split("\n")
|
||||
if not "this path will be fetched" in logLines[0]: raise Exception("Unexpected first log line")
|
||||
def shouldBe(got, desired):
|
||||
if got != desired: raise Exception("Expected '%s' but got '%s'" % (desired, got))
|
||||
shouldBe(logLines[1], " " + storePath)
|
||||
shouldBe(logLines[2], "copying path '%s' from 'file:///tmp/cache'..." % storePath)
|
||||
shouldBe(logLines[3], storePath)
|
||||
|
||||
# Store path should exist in the store now
|
||||
machine.succeed("[ -d %s ] || exit 1" % storePath)
|
||||
'';
|
||||
}
|
||||
) args
|
||||
# Store path should exist in the store now
|
||||
machine.succeed("[ -d %s ] || exit 1" % storePath)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,265 +1,276 @@
|
|||
import ./make-test-python.nix ({ pkgs, ... }:
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
|
||||
let
|
||||
passphrase = "supersecret";
|
||||
dataDir = "/ran:dom/data";
|
||||
subDir = "not_anything_here";
|
||||
excludedSubDirFile = "not_this_file_either";
|
||||
excludeFile = "not_this_file";
|
||||
keepFile = "important_file";
|
||||
keepFileData = "important_data";
|
||||
localRepo = "/root/back:up";
|
||||
# a repository on a file system which is not mounted automatically
|
||||
localRepoMount = "/noAutoMount";
|
||||
archiveName = "my_archive";
|
||||
remoteRepo = "borg@server:."; # No need to specify path
|
||||
privateKey = pkgs.writeText "id_ed25519" ''
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||
QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe
|
||||
RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw
|
||||
AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg
|
||||
9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ=
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
'';
|
||||
publicKey = ''
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv root@client
|
||||
'';
|
||||
privateKeyAppendOnly = pkgs.writeText "id_ed25519" ''
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||
QyNTUxOQAAACBacZuz1ELGQdhI7PF6dGFafCDlvh8pSEc4cHjkW0QjLwAAAJC9YTxxvWE8
|
||||
cQAAAAtzc2gtZWQyNTUxOQAAACBacZuz1ELGQdhI7PF6dGFafCDlvh8pSEc4cHjkW0QjLw
|
||||
AAAEAAhV7wTl5dL/lz+PF/d4PnZXuG1Id6L/mFEiGT1tZsuFpxm7PUQsZB2Ejs8Xp0YVp8
|
||||
IOW+HylIRzhweORbRCMvAAAADXJzY2h1ZXR6QGt1cnQ=
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
'';
|
||||
publicKeyAppendOnly = ''
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFpxm7PUQsZB2Ejs8Xp0YVp8IOW+HylIRzhweORbRCMv root@client
|
||||
'';
|
||||
let
|
||||
passphrase = "supersecret";
|
||||
dataDir = "/ran:dom/data";
|
||||
subDir = "not_anything_here";
|
||||
excludedSubDirFile = "not_this_file_either";
|
||||
excludeFile = "not_this_file";
|
||||
keepFile = "important_file";
|
||||
keepFileData = "important_data";
|
||||
localRepo = "/root/back:up";
|
||||
# a repository on a file system which is not mounted automatically
|
||||
localRepoMount = "/noAutoMount";
|
||||
archiveName = "my_archive";
|
||||
remoteRepo = "borg@server:."; # No need to specify path
|
||||
privateKey = pkgs.writeText "id_ed25519" ''
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||
QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe
|
||||
RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw
|
||||
AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg
|
||||
9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ=
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
'';
|
||||
publicKey = ''
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv root@client
|
||||
'';
|
||||
privateKeyAppendOnly = pkgs.writeText "id_ed25519" ''
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||
QyNTUxOQAAACBacZuz1ELGQdhI7PF6dGFafCDlvh8pSEc4cHjkW0QjLwAAAJC9YTxxvWE8
|
||||
cQAAAAtzc2gtZWQyNTUxOQAAACBacZuz1ELGQdhI7PF6dGFafCDlvh8pSEc4cHjkW0QjLw
|
||||
AAAEAAhV7wTl5dL/lz+PF/d4PnZXuG1Id6L/mFEiGT1tZsuFpxm7PUQsZB2Ejs8Xp0YVp8
|
||||
IOW+HylIRzhweORbRCMvAAAADXJzY2h1ZXR6QGt1cnQ=
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
'';
|
||||
publicKeyAppendOnly = ''
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFpxm7PUQsZB2Ejs8Xp0YVp8IOW+HylIRzhweORbRCMv root@client
|
||||
'';
|
||||
|
||||
in {
|
||||
name = "borgbackup";
|
||||
meta = with pkgs.lib; {
|
||||
maintainers = with maintainers; [ dotlambda ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
client = { ... }: {
|
||||
virtualisation.fileSystems.${localRepoMount} = {
|
||||
device = "tmpfs";
|
||||
fsType = "tmpfs";
|
||||
options = [ "noauto" ];
|
||||
};
|
||||
|
||||
services.borgbackup.jobs = {
|
||||
|
||||
local = {
|
||||
paths = dataDir;
|
||||
repo = localRepo;
|
||||
preHook = ''
|
||||
# Don't append a timestamp
|
||||
archiveName="${archiveName}"
|
||||
'';
|
||||
encryption = {
|
||||
mode = "repokey";
|
||||
inherit passphrase;
|
||||
};
|
||||
compression = "auto,zlib,9";
|
||||
prune.keep = {
|
||||
within = "1y";
|
||||
yearly = 5;
|
||||
};
|
||||
exclude = [ "*/${excludeFile}" ];
|
||||
extraCreateArgs = [ "--exclude-caches" "--exclude-if-present" ".dont backup" ];
|
||||
postHook = "echo post";
|
||||
startAt = [ ]; # Do not run automatically
|
||||
};
|
||||
|
||||
localMount = {
|
||||
paths = dataDir;
|
||||
repo = localRepoMount;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
};
|
||||
|
||||
remote = {
|
||||
paths = dataDir;
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
|
||||
};
|
||||
|
||||
remoteAppendOnly = {
|
||||
paths = dataDir;
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519.appendOnly";
|
||||
};
|
||||
|
||||
commandSuccess = {
|
||||
dumpCommand = pkgs.writeScript "commandSuccess" ''
|
||||
echo -n test
|
||||
'';
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
|
||||
};
|
||||
|
||||
commandFail = {
|
||||
dumpCommand = "${pkgs.coreutils}/bin/false";
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
|
||||
};
|
||||
|
||||
sleepInhibited = {
|
||||
inhibitsSleep = true;
|
||||
# Blocks indefinitely while "backing up" so that we can try to suspend the local system while it's hung
|
||||
dumpCommand = pkgs.writeScript "sleepInhibited" ''
|
||||
cat /dev/zero
|
||||
'';
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
|
||||
};
|
||||
|
||||
};
|
||||
in
|
||||
{
|
||||
name = "borgbackup";
|
||||
meta = with pkgs.lib; {
|
||||
maintainers = with maintainers; [ dotlambda ];
|
||||
};
|
||||
|
||||
server = { ... }: {
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
PasswordAuthentication = false;
|
||||
KbdInteractiveAuthentication = false;
|
||||
nodes = {
|
||||
client =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.fileSystems.${localRepoMount} = {
|
||||
device = "tmpfs";
|
||||
fsType = "tmpfs";
|
||||
options = [ "noauto" ];
|
||||
};
|
||||
|
||||
services.borgbackup.jobs = {
|
||||
|
||||
local = {
|
||||
paths = dataDir;
|
||||
repo = localRepo;
|
||||
preHook = ''
|
||||
# Don't append a timestamp
|
||||
archiveName="${archiveName}"
|
||||
'';
|
||||
encryption = {
|
||||
mode = "repokey";
|
||||
inherit passphrase;
|
||||
};
|
||||
compression = "auto,zlib,9";
|
||||
prune.keep = {
|
||||
within = "1y";
|
||||
yearly = 5;
|
||||
};
|
||||
exclude = [ "*/${excludeFile}" ];
|
||||
extraCreateArgs = [
|
||||
"--exclude-caches"
|
||||
"--exclude-if-present"
|
||||
".dont backup"
|
||||
];
|
||||
postHook = "echo post";
|
||||
startAt = [ ]; # Do not run automatically
|
||||
};
|
||||
|
||||
localMount = {
|
||||
paths = dataDir;
|
||||
repo = localRepoMount;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
};
|
||||
|
||||
remote = {
|
||||
paths = dataDir;
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
|
||||
};
|
||||
|
||||
remoteAppendOnly = {
|
||||
paths = dataDir;
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519.appendOnly";
|
||||
};
|
||||
|
||||
commandSuccess = {
|
||||
dumpCommand = pkgs.writeScript "commandSuccess" ''
|
||||
echo -n test
|
||||
'';
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
|
||||
};
|
||||
|
||||
commandFail = {
|
||||
dumpCommand = "${pkgs.coreutils}/bin/false";
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
|
||||
};
|
||||
|
||||
sleepInhibited = {
|
||||
inhibitsSleep = true;
|
||||
# Blocks indefinitely while "backing up" so that we can try to suspend the local system while it's hung
|
||||
dumpCommand = pkgs.writeScript "sleepInhibited" ''
|
||||
cat /dev/zero
|
||||
'';
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.borgbackup.repos.repo1 = {
|
||||
authorizedKeys = [ publicKey ];
|
||||
path = "/data/borgbackup";
|
||||
};
|
||||
server =
|
||||
{ ... }:
|
||||
{
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
PasswordAuthentication = false;
|
||||
KbdInteractiveAuthentication = false;
|
||||
};
|
||||
};
|
||||
|
||||
# Second repo to make sure the authorizedKeys options are merged correctly
|
||||
services.borgbackup.repos.repo2 = {
|
||||
authorizedKeysAppendOnly = [ publicKeyAppendOnly ];
|
||||
path = "/data/borgbackup";
|
||||
quota = ".5G";
|
||||
};
|
||||
services.borgbackup.repos.repo1 = {
|
||||
authorizedKeys = [ publicKey ];
|
||||
path = "/data/borgbackup";
|
||||
};
|
||||
|
||||
# Second repo to make sure the authorizedKeys options are merged correctly
|
||||
services.borgbackup.repos.repo2 = {
|
||||
authorizedKeysAppendOnly = [ publicKeyAppendOnly ];
|
||||
path = "/data/borgbackup";
|
||||
quota = ".5G";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
client.fail('test -d "${remoteRepo}"')
|
||||
client.fail('test -d "${remoteRepo}"')
|
||||
|
||||
client.succeed(
|
||||
"cp ${privateKey} /root/id_ed25519"
|
||||
)
|
||||
client.succeed("chmod 0600 /root/id_ed25519")
|
||||
client.succeed(
|
||||
"cp ${privateKeyAppendOnly} /root/id_ed25519.appendOnly"
|
||||
)
|
||||
client.succeed("chmod 0600 /root/id_ed25519.appendOnly")
|
||||
client.succeed(
|
||||
"cp ${privateKey} /root/id_ed25519"
|
||||
)
|
||||
client.succeed("chmod 0600 /root/id_ed25519")
|
||||
client.succeed(
|
||||
"cp ${privateKeyAppendOnly} /root/id_ed25519.appendOnly"
|
||||
)
|
||||
client.succeed("chmod 0600 /root/id_ed25519.appendOnly")
|
||||
|
||||
client.succeed("mkdir -p ${dataDir}/${subDir}")
|
||||
client.succeed("touch ${dataDir}/${excludeFile}")
|
||||
client.succeed("touch '${dataDir}/${subDir}/.dont backup'")
|
||||
client.succeed("touch ${dataDir}/${subDir}/${excludedSubDirFile}")
|
||||
client.succeed("echo '${keepFileData}' > ${dataDir}/${keepFile}")
|
||||
client.succeed("mkdir -p ${dataDir}/${subDir}")
|
||||
client.succeed("touch ${dataDir}/${excludeFile}")
|
||||
client.succeed("touch '${dataDir}/${subDir}/.dont backup'")
|
||||
client.succeed("touch ${dataDir}/${subDir}/${excludedSubDirFile}")
|
||||
client.succeed("echo '${keepFileData}' > ${dataDir}/${keepFile}")
|
||||
|
||||
with subtest("local"):
|
||||
borg = "BORG_PASSPHRASE='${passphrase}' borg"
|
||||
client.systemctl("start --wait borgbackup-job-local")
|
||||
client.fail("systemctl is-failed borgbackup-job-local")
|
||||
# Make sure exactly one archive has been created
|
||||
assert int(client.succeed("{} list '${localRepo}' | wc -l".format(borg))) > 0
|
||||
# Make sure excludeFile has been excluded
|
||||
client.fail(
|
||||
"{} list '${localRepo}::${archiveName}' | grep -qF '${excludeFile}'".format(borg)
|
||||
)
|
||||
# Make sure excludedSubDirFile has been excluded
|
||||
client.fail(
|
||||
"{} list '${localRepo}::${archiveName}' | grep -qF '${subDir}/${excludedSubDirFile}".format(borg)
|
||||
)
|
||||
# Make sure keepFile has the correct content
|
||||
client.succeed("{} extract '${localRepo}::${archiveName}'".format(borg))
|
||||
assert "${keepFileData}" in client.succeed("cat ${dataDir}/${keepFile}")
|
||||
# Make sure the same is true when using `borg mount`
|
||||
client.succeed(
|
||||
"mkdir -p /mnt/borg && {} mount '${localRepo}::${archiveName}' /mnt/borg".format(
|
||||
borg
|
||||
)
|
||||
)
|
||||
assert "${keepFileData}" in client.succeed(
|
||||
"cat /mnt/borg/${dataDir}/${keepFile}"
|
||||
)
|
||||
with subtest("local"):
|
||||
borg = "BORG_PASSPHRASE='${passphrase}' borg"
|
||||
client.systemctl("start --wait borgbackup-job-local")
|
||||
client.fail("systemctl is-failed borgbackup-job-local")
|
||||
# Make sure exactly one archive has been created
|
||||
assert int(client.succeed("{} list '${localRepo}' | wc -l".format(borg))) > 0
|
||||
# Make sure excludeFile has been excluded
|
||||
client.fail(
|
||||
"{} list '${localRepo}::${archiveName}' | grep -qF '${excludeFile}'".format(borg)
|
||||
)
|
||||
# Make sure excludedSubDirFile has been excluded
|
||||
client.fail(
|
||||
"{} list '${localRepo}::${archiveName}' | grep -qF '${subDir}/${excludedSubDirFile}".format(borg)
|
||||
)
|
||||
# Make sure keepFile has the correct content
|
||||
client.succeed("{} extract '${localRepo}::${archiveName}'".format(borg))
|
||||
assert "${keepFileData}" in client.succeed("cat ${dataDir}/${keepFile}")
|
||||
# Make sure the same is true when using `borg mount`
|
||||
client.succeed(
|
||||
"mkdir -p /mnt/borg && {} mount '${localRepo}::${archiveName}' /mnt/borg".format(
|
||||
borg
|
||||
)
|
||||
)
|
||||
assert "${keepFileData}" in client.succeed(
|
||||
"cat /mnt/borg/${dataDir}/${keepFile}"
|
||||
)
|
||||
|
||||
with subtest("localMount"):
|
||||
# the file system for the repo should not be already mounted
|
||||
client.fail("mount | grep ${localRepoMount}")
|
||||
# ensure trying to write to the mountpoint before the fs is mounted fails
|
||||
client.succeed("chattr +i ${localRepoMount}")
|
||||
borg = "borg"
|
||||
client.systemctl("start --wait borgbackup-job-localMount")
|
||||
client.fail("systemctl is-failed borgbackup-job-localMount")
|
||||
# Make sure exactly one archive has been created
|
||||
assert int(client.succeed("{} list '${localRepoMount}' | wc -l".format(borg))) > 0
|
||||
with subtest("localMount"):
|
||||
# the file system for the repo should not be already mounted
|
||||
client.fail("mount | grep ${localRepoMount}")
|
||||
# ensure trying to write to the mountpoint before the fs is mounted fails
|
||||
client.succeed("chattr +i ${localRepoMount}")
|
||||
borg = "borg"
|
||||
client.systemctl("start --wait borgbackup-job-localMount")
|
||||
client.fail("systemctl is-failed borgbackup-job-localMount")
|
||||
# Make sure exactly one archive has been created
|
||||
assert int(client.succeed("{} list '${localRepoMount}' | wc -l".format(borg))) > 0
|
||||
|
||||
with subtest("remote"):
|
||||
borg = "BORG_RSH='ssh -oStrictHostKeyChecking=no -i /root/id_ed25519' borg"
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.systemctl("start --wait borgbackup-job-remote")
|
||||
client.fail("systemctl is-failed borgbackup-job-remote")
|
||||
with subtest("remote"):
|
||||
borg = "BORG_RSH='ssh -oStrictHostKeyChecking=no -i /root/id_ed25519' borg"
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.systemctl("start --wait borgbackup-job-remote")
|
||||
client.fail("systemctl is-failed borgbackup-job-remote")
|
||||
|
||||
# Make sure we can't access repos other than the specified one
|
||||
client.fail("{} list borg\@server:wrong".format(borg))
|
||||
# Make sure we can't access repos other than the specified one
|
||||
client.fail("{} list borg\@server:wrong".format(borg))
|
||||
|
||||
# TODO: Make sure that data is actually deleted
|
||||
# TODO: Make sure that data is actually deleted
|
||||
|
||||
with subtest("remoteAppendOnly"):
|
||||
borg = (
|
||||
"BORG_RSH='ssh -oStrictHostKeyChecking=no -i /root/id_ed25519.appendOnly' borg"
|
||||
)
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.systemctl("start --wait borgbackup-job-remoteAppendOnly")
|
||||
client.fail("systemctl is-failed borgbackup-job-remoteAppendOnly")
|
||||
with subtest("remoteAppendOnly"):
|
||||
borg = (
|
||||
"BORG_RSH='ssh -oStrictHostKeyChecking=no -i /root/id_ed25519.appendOnly' borg"
|
||||
)
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.systemctl("start --wait borgbackup-job-remoteAppendOnly")
|
||||
client.fail("systemctl is-failed borgbackup-job-remoteAppendOnly")
|
||||
|
||||
# Make sure we can't access repos other than the specified one
|
||||
client.fail("{} list borg\@server:wrong".format(borg))
|
||||
# Make sure we can't access repos other than the specified one
|
||||
client.fail("{} list borg\@server:wrong".format(borg))
|
||||
|
||||
# TODO: Make sure that data is not actually deleted
|
||||
# TODO: Make sure that data is not actually deleted
|
||||
|
||||
with subtest("commandSuccess"):
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.systemctl("start --wait borgbackup-job-commandSuccess")
|
||||
client.fail("systemctl is-failed borgbackup-job-commandSuccess")
|
||||
id = client.succeed("borg-job-commandSuccess list | tail -n1 | cut -d' ' -f1").strip()
|
||||
client.succeed(f"borg-job-commandSuccess extract ::{id} stdin")
|
||||
assert "test" == client.succeed("cat stdin")
|
||||
with subtest("commandSuccess"):
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.systemctl("start --wait borgbackup-job-commandSuccess")
|
||||
client.fail("systemctl is-failed borgbackup-job-commandSuccess")
|
||||
id = client.succeed("borg-job-commandSuccess list | tail -n1 | cut -d' ' -f1").strip()
|
||||
client.succeed(f"borg-job-commandSuccess extract ::{id} stdin")
|
||||
assert "test" == client.succeed("cat stdin")
|
||||
|
||||
with subtest("commandFail"):
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.systemctl("start --wait borgbackup-job-commandFail")
|
||||
client.succeed("systemctl is-failed borgbackup-job-commandFail")
|
||||
with subtest("commandFail"):
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.systemctl("start --wait borgbackup-job-commandFail")
|
||||
client.succeed("systemctl is-failed borgbackup-job-commandFail")
|
||||
|
||||
with subtest("sleepInhibited"):
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.fail("systemd-inhibit --list | grep -q borgbackup")
|
||||
client.systemctl("start borgbackup-job-sleepInhibited")
|
||||
client.wait_until_succeeds("systemd-inhibit --list | grep -q borgbackup")
|
||||
client.systemctl("stop borgbackup-job-sleepInhibited")
|
||||
'';
|
||||
})
|
||||
with subtest("sleepInhibited"):
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.fail("systemd-inhibit --list | grep -q borgbackup")
|
||||
client.systemctl("start borgbackup-job-sleepInhibited")
|
||||
client.wait_until_succeeds("systemd-inhibit --list | grep -q borgbackup")
|
||||
client.systemctl("stop borgbackup-job-sleepInhibited")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import ./make-test-python.nix ({ pkgs, ... }:
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
|
||||
let
|
||||
privateKey = ''
|
||||
|
@ -17,71 +18,83 @@ import ./make-test-python.nix ({ pkgs, ... }:
|
|||
{
|
||||
name = "btrbk-doas";
|
||||
meta = with pkgs.lib; {
|
||||
maintainers = with maintainers; [ symphorien tu-maurice ];
|
||||
maintainers = with maintainers; [
|
||||
symphorien
|
||||
tu-maurice
|
||||
];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
archive = { ... }: {
|
||||
security.sudo.enable = false;
|
||||
security.doas.enable = true;
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
# note: this makes the privateKey world readable.
|
||||
# don't do it with real ssh keys.
|
||||
environment.etc."btrbk_key".text = privateKey;
|
||||
services.btrbk = {
|
||||
extraPackages = [ pkgs.lz4 ];
|
||||
instances = {
|
||||
remote = {
|
||||
onCalendar = "minutely";
|
||||
settings = {
|
||||
ssh_identity = "/etc/btrbk_key";
|
||||
ssh_user = "btrbk";
|
||||
stream_compress = "lz4";
|
||||
volume = {
|
||||
"ssh://main/mnt" = {
|
||||
target = "/mnt";
|
||||
snapshot_dir = "btrbk/remote";
|
||||
subvolume = "to_backup";
|
||||
archive =
|
||||
{ ... }:
|
||||
{
|
||||
security.sudo.enable = false;
|
||||
security.doas.enable = true;
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
# note: this makes the privateKey world readable.
|
||||
# don't do it with real ssh keys.
|
||||
environment.etc."btrbk_key".text = privateKey;
|
||||
services.btrbk = {
|
||||
extraPackages = [ pkgs.lz4 ];
|
||||
instances = {
|
||||
remote = {
|
||||
onCalendar = "minutely";
|
||||
settings = {
|
||||
ssh_identity = "/etc/btrbk_key";
|
||||
ssh_user = "btrbk";
|
||||
stream_compress = "lz4";
|
||||
volume = {
|
||||
"ssh://main/mnt" = {
|
||||
target = "/mnt";
|
||||
snapshot_dir = "btrbk/remote";
|
||||
subvolume = "to_backup";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
main = { ... }: {
|
||||
security.sudo.enable = false;
|
||||
security.doas.enable = true;
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
passwordAuthentication = false;
|
||||
kbdInteractiveAuthentication = false;
|
||||
};
|
||||
services.btrbk = {
|
||||
extraPackages = [ pkgs.lz4 ];
|
||||
sshAccess = [
|
||||
{
|
||||
key = publicKey;
|
||||
roles = [ "source" "send" "info" "delete" ];
|
||||
}
|
||||
];
|
||||
instances = {
|
||||
local = {
|
||||
onCalendar = "minutely";
|
||||
settings = {
|
||||
volume = {
|
||||
"/mnt" = {
|
||||
snapshot_dir = "btrbk/local";
|
||||
subvolume = "to_backup";
|
||||
main =
|
||||
{ ... }:
|
||||
{
|
||||
security.sudo.enable = false;
|
||||
security.doas.enable = true;
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
passwordAuthentication = false;
|
||||
kbdInteractiveAuthentication = false;
|
||||
};
|
||||
services.btrbk = {
|
||||
extraPackages = [ pkgs.lz4 ];
|
||||
sshAccess = [
|
||||
{
|
||||
key = publicKey;
|
||||
roles = [
|
||||
"source"
|
||||
"send"
|
||||
"info"
|
||||
"delete"
|
||||
];
|
||||
}
|
||||
];
|
||||
instances = {
|
||||
local = {
|
||||
onCalendar = "minutely";
|
||||
settings = {
|
||||
volume = {
|
||||
"/mnt" = {
|
||||
snapshot_dir = "btrbk/local";
|
||||
subvolume = "to_backup";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
|
@ -111,4 +124,5 @@ import ./make-test-python.nix ({ pkgs, ... }:
|
|||
main.succeed("echo baz > /mnt/to_backup/bar")
|
||||
archive.succeed("cat /mnt/*/bar | grep bar")
|
||||
'';
|
||||
})
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import ./make-test-python.nix ({ pkgs, ... }:
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
|
||||
let
|
||||
privateKey = ''
|
||||
|
@ -21,64 +22,73 @@ import ./make-test-python.nix ({ pkgs, ... }:
|
|||
};
|
||||
|
||||
nodes = {
|
||||
archive = { ... }: {
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
# note: this makes the privateKey world readable.
|
||||
# don't do it with real ssh keys.
|
||||
environment.etc."btrbk_key".text = privateKey;
|
||||
services.btrbk = {
|
||||
instances = {
|
||||
remote = {
|
||||
onCalendar = "minutely";
|
||||
settings = {
|
||||
ssh_identity = "/etc/btrbk_key";
|
||||
ssh_user = "btrbk";
|
||||
stream_compress = "lz4";
|
||||
volume = {
|
||||
"ssh://main/mnt" = {
|
||||
target = "/mnt";
|
||||
snapshot_dir = "btrbk/remote";
|
||||
subvolume = "to_backup";
|
||||
archive =
|
||||
{ ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
# note: this makes the privateKey world readable.
|
||||
# don't do it with real ssh keys.
|
||||
environment.etc."btrbk_key".text = privateKey;
|
||||
services.btrbk = {
|
||||
instances = {
|
||||
remote = {
|
||||
onCalendar = "minutely";
|
||||
settings = {
|
||||
ssh_identity = "/etc/btrbk_key";
|
||||
ssh_user = "btrbk";
|
||||
stream_compress = "lz4";
|
||||
volume = {
|
||||
"ssh://main/mnt" = {
|
||||
target = "/mnt";
|
||||
snapshot_dir = "btrbk/remote";
|
||||
subvolume = "to_backup";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
main = { ... }: {
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
KbdInteractiveAuthentication = false;
|
||||
PasswordAuthentication = false;
|
||||
main =
|
||||
{ ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
KbdInteractiveAuthentication = false;
|
||||
PasswordAuthentication = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
services.btrbk = {
|
||||
extraPackages = [ pkgs.lz4 ];
|
||||
sshAccess = [
|
||||
{
|
||||
key = publicKey;
|
||||
roles = [ "source" "send" "info" "delete" ];
|
||||
}
|
||||
];
|
||||
instances = {
|
||||
local = {
|
||||
onCalendar = "minutely";
|
||||
settings = {
|
||||
volume = {
|
||||
"/mnt" = {
|
||||
snapshot_dir = "btrbk/local";
|
||||
subvolume = "to_backup";
|
||||
services.btrbk = {
|
||||
extraPackages = [ pkgs.lz4 ];
|
||||
sshAccess = [
|
||||
{
|
||||
key = publicKey;
|
||||
roles = [
|
||||
"source"
|
||||
"send"
|
||||
"info"
|
||||
"delete"
|
||||
];
|
||||
}
|
||||
];
|
||||
instances = {
|
||||
local = {
|
||||
onCalendar = "minutely";
|
||||
settings = {
|
||||
volume = {
|
||||
"/mnt" = {
|
||||
snapshot_dir = "btrbk/local";
|
||||
subvolume = "to_backup";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
|
@ -108,4 +118,5 @@ import ./make-test-python.nix ({ pkgs, ... }:
|
|||
main.succeed("echo baz > /mnt/to_backup/bar")
|
||||
archive.succeed("cat /mnt/*/bar | grep bar")
|
||||
'';
|
||||
})
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,141 +1,138 @@
|
|||
# Test ensures buildbot master comes up correctly and workers can connect
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "buildbot";
|
||||
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "buildbot";
|
||||
nodes = {
|
||||
bbmaster =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.buildbot-master = {
|
||||
enable = true;
|
||||
|
||||
nodes = {
|
||||
bbmaster =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.buildbot-master = {
|
||||
enable = true;
|
||||
|
||||
# NOTE: use fake repo due to no internet in hydra ci
|
||||
factorySteps = [
|
||||
"steps.Git(repourl='git://gitrepo/fakerepo.git', mode='incremental')"
|
||||
"steps.ShellCommand(command=['bash', 'fakerepo.sh'])"
|
||||
];
|
||||
changeSource = [
|
||||
"changes.GitPoller('git://gitrepo/fakerepo.git', workdir='gitpoller-workdir', branch='master', pollInterval=300)"
|
||||
];
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
8010
|
||||
8011
|
||||
9989
|
||||
# NOTE: use fake repo due to no internet in hydra ci
|
||||
factorySteps = [
|
||||
"steps.Git(repourl='git://gitrepo/fakerepo.git', mode='incremental')"
|
||||
"steps.ShellCommand(command=['bash', 'fakerepo.sh'])"
|
||||
];
|
||||
environment.systemPackages = with pkgs; [
|
||||
changeSource = [
|
||||
"changes.GitPoller('git://gitrepo/fakerepo.git', workdir='gitpoller-workdir', branch='master', pollInterval=300)"
|
||||
];
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
8010
|
||||
8011
|
||||
9989
|
||||
];
|
||||
environment.systemPackages = with pkgs; [
|
||||
git
|
||||
buildbot-full
|
||||
];
|
||||
};
|
||||
|
||||
bbworker =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.buildbot-worker = {
|
||||
enable = true;
|
||||
masterUrl = "bbmaster:9989";
|
||||
};
|
||||
environment.systemPackages = with pkgs; [
|
||||
git
|
||||
buildbot-worker
|
||||
];
|
||||
};
|
||||
|
||||
gitrepo =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.openssh.enable = true;
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
22
|
||||
9418
|
||||
];
|
||||
environment.systemPackages = with pkgs; [ git ];
|
||||
systemd.services.git-daemon = {
|
||||
description = "Git daemon for the test";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [
|
||||
"network.target"
|
||||
"sshd.service"
|
||||
];
|
||||
|
||||
serviceConfig.Restart = "always";
|
||||
path = with pkgs; [
|
||||
coreutils
|
||||
git
|
||||
buildbot-full
|
||||
openssh
|
||||
];
|
||||
};
|
||||
|
||||
bbworker =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.buildbot-worker = {
|
||||
enable = true;
|
||||
masterUrl = "bbmaster:9989";
|
||||
environment = {
|
||||
HOME = "/root";
|
||||
};
|
||||
environment.systemPackages = with pkgs; [
|
||||
git
|
||||
buildbot-worker
|
||||
];
|
||||
preStart = ''
|
||||
git config --global user.name 'Nobody Fakeuser'
|
||||
git config --global user.email 'nobody\@fakerepo.com'
|
||||
rm -rvf /srv/repos/fakerepo.git /tmp/fakerepo
|
||||
mkdir -pv /srv/repos/fakerepo ~/.ssh
|
||||
ssh-keyscan -H gitrepo > ~/.ssh/known_hosts
|
||||
cat ~/.ssh/known_hosts
|
||||
|
||||
mkdir -p /src/repos/fakerepo
|
||||
cd /srv/repos/fakerepo
|
||||
rm -rf *
|
||||
git init
|
||||
echo -e '#!/bin/sh\necho fakerepo' > fakerepo.sh
|
||||
cat fakerepo.sh
|
||||
touch .git/git-daemon-export-ok
|
||||
git add fakerepo.sh .git/git-daemon-export-ok
|
||||
git commit -m fakerepo
|
||||
'';
|
||||
script = ''
|
||||
git daemon --verbose --export-all --base-path=/srv/repos --reuseaddr
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
gitrepo =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.openssh.enable = true;
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
22
|
||||
9418
|
||||
];
|
||||
environment.systemPackages = with pkgs; [ git ];
|
||||
systemd.services.git-daemon = {
|
||||
description = "Git daemon for the test";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [
|
||||
"network.target"
|
||||
"sshd.service"
|
||||
];
|
||||
testScript = ''
|
||||
gitrepo.wait_for_unit("git-daemon.service")
|
||||
gitrepo.wait_for_unit("multi-user.target")
|
||||
|
||||
serviceConfig.Restart = "always";
|
||||
path = with pkgs; [
|
||||
coreutils
|
||||
git
|
||||
openssh
|
||||
];
|
||||
environment = {
|
||||
HOME = "/root";
|
||||
};
|
||||
preStart = ''
|
||||
git config --global user.name 'Nobody Fakeuser'
|
||||
git config --global user.email 'nobody\@fakerepo.com'
|
||||
rm -rvf /srv/repos/fakerepo.git /tmp/fakerepo
|
||||
mkdir -pv /srv/repos/fakerepo ~/.ssh
|
||||
ssh-keyscan -H gitrepo > ~/.ssh/known_hosts
|
||||
cat ~/.ssh/known_hosts
|
||||
with subtest("Repo is accessible via git daemon"):
|
||||
bbmaster.systemctl("start network-online.target")
|
||||
bbmaster.wait_for_unit("network-online.target")
|
||||
bbmaster.succeed("rm -rfv /tmp/fakerepo")
|
||||
bbmaster.succeed("git clone git://gitrepo/fakerepo /tmp/fakerepo")
|
||||
|
||||
mkdir -p /src/repos/fakerepo
|
||||
cd /srv/repos/fakerepo
|
||||
rm -rf *
|
||||
git init
|
||||
echo -e '#!/bin/sh\necho fakerepo' > fakerepo.sh
|
||||
cat fakerepo.sh
|
||||
touch .git/git-daemon-export-ok
|
||||
git add fakerepo.sh .git/git-daemon-export-ok
|
||||
git commit -m fakerepo
|
||||
'';
|
||||
script = ''
|
||||
git daemon --verbose --export-all --base-path=/srv/repos --reuseaddr
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
with subtest("Master service and worker successfully connect"):
|
||||
bbmaster.wait_for_unit("buildbot-master.service")
|
||||
bbmaster.wait_until_succeeds("curl --fail -s --head http://bbmaster:8010")
|
||||
bbworker.systemctl("start network-online.target")
|
||||
bbworker.wait_for_unit("network-online.target")
|
||||
bbworker.succeed("nc -z bbmaster 8010")
|
||||
bbworker.succeed("nc -z bbmaster 9989")
|
||||
bbworker.wait_for_unit("buildbot-worker.service")
|
||||
|
||||
testScript = ''
|
||||
gitrepo.wait_for_unit("git-daemon.service")
|
||||
gitrepo.wait_for_unit("multi-user.target")
|
||||
with subtest("Stop buildbot worker"):
|
||||
bbmaster.succeed("systemctl -l --no-pager status buildbot-master")
|
||||
bbmaster.succeed("systemctl stop buildbot-master")
|
||||
bbworker.fail("nc -z bbmaster 8010")
|
||||
bbworker.fail("nc -z bbmaster 9989")
|
||||
bbworker.succeed("systemctl -l --no-pager status buildbot-worker")
|
||||
bbworker.succeed("systemctl stop buildbot-worker")
|
||||
|
||||
with subtest("Repo is accessible via git daemon"):
|
||||
bbmaster.systemctl("start network-online.target")
|
||||
bbmaster.wait_for_unit("network-online.target")
|
||||
bbmaster.succeed("rm -rfv /tmp/fakerepo")
|
||||
bbmaster.succeed("git clone git://gitrepo/fakerepo /tmp/fakerepo")
|
||||
with subtest("Buildbot daemon mode works"):
|
||||
bbmaster.succeed(
|
||||
"buildbot create-master /tmp",
|
||||
"mv -fv /tmp/master.cfg.sample /tmp/master.cfg",
|
||||
"sed -i 's/8010/8011/' /tmp/master.cfg",
|
||||
"buildbot start /tmp",
|
||||
"nc -z bbmaster 8011",
|
||||
)
|
||||
bbworker.wait_until_succeeds("curl --fail -s --head http://bbmaster:8011")
|
||||
bbmaster.wait_until_succeeds("buildbot stop /tmp")
|
||||
bbworker.fail("nc -z bbmaster 8011")
|
||||
'';
|
||||
|
||||
with subtest("Master service and worker successfully connect"):
|
||||
bbmaster.wait_for_unit("buildbot-master.service")
|
||||
bbmaster.wait_until_succeeds("curl --fail -s --head http://bbmaster:8010")
|
||||
bbworker.systemctl("start network-online.target")
|
||||
bbworker.wait_for_unit("network-online.target")
|
||||
bbworker.succeed("nc -z bbmaster 8010")
|
||||
bbworker.succeed("nc -z bbmaster 9989")
|
||||
bbworker.wait_for_unit("buildbot-worker.service")
|
||||
|
||||
with subtest("Stop buildbot worker"):
|
||||
bbmaster.succeed("systemctl -l --no-pager status buildbot-master")
|
||||
bbmaster.succeed("systemctl stop buildbot-master")
|
||||
bbworker.fail("nc -z bbmaster 8010")
|
||||
bbworker.fail("nc -z bbmaster 9989")
|
||||
bbworker.succeed("systemctl -l --no-pager status buildbot-worker")
|
||||
bbworker.succeed("systemctl stop buildbot-worker")
|
||||
|
||||
with subtest("Buildbot daemon mode works"):
|
||||
bbmaster.succeed(
|
||||
"buildbot create-master /tmp",
|
||||
"mv -fv /tmp/master.cfg.sample /tmp/master.cfg",
|
||||
"sed -i 's/8010/8011/' /tmp/master.cfg",
|
||||
"buildbot start /tmp",
|
||||
"nc -z bbmaster 8011",
|
||||
)
|
||||
bbworker.wait_until_succeeds("curl --fail -s --head http://bbmaster:8011")
|
||||
bbmaster.wait_until_succeeds("buildbot stop /tmp")
|
||||
bbworker.fail("nc -z bbmaster 8011")
|
||||
'';
|
||||
|
||||
meta.maintainers = pkgs.lib.teams.buildbot.members;
|
||||
}
|
||||
)
|
||||
meta.maintainers = pkgs.lib.teams.buildbot.members;
|
||||
}
|
||||
|
|
|
@ -1,241 +1,297 @@
|
|||
import ./make-test-python.nix ({pkgs, lib, ...}:
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = {
|
||||
clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
|
||||
monA = {
|
||||
name = "a";
|
||||
ip = "192.168.1.1";
|
||||
let
|
||||
cfg = {
|
||||
clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
|
||||
monA = {
|
||||
name = "a";
|
||||
ip = "192.168.1.1";
|
||||
};
|
||||
osd0 = {
|
||||
name = "0";
|
||||
ip = "192.168.1.2";
|
||||
key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
|
||||
uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
|
||||
};
|
||||
osd1 = {
|
||||
name = "1";
|
||||
ip = "192.168.1.3";
|
||||
key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
|
||||
uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
|
||||
};
|
||||
osd2 = {
|
||||
name = "2";
|
||||
ip = "192.168.1.4";
|
||||
key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
|
||||
uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
|
||||
};
|
||||
};
|
||||
osd0 = {
|
||||
name = "0";
|
||||
ip = "192.168.1.2";
|
||||
key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
|
||||
uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
|
||||
};
|
||||
osd1 = {
|
||||
name = "1";
|
||||
ip = "192.168.1.3";
|
||||
key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
|
||||
uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
|
||||
};
|
||||
osd2 = {
|
||||
name = "2";
|
||||
ip = "192.168.1.4";
|
||||
key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
|
||||
uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
|
||||
};
|
||||
};
|
||||
generateCephConfig = { daemonConfig }: {
|
||||
enable = true;
|
||||
global = {
|
||||
fsid = cfg.clusterId;
|
||||
monHost = cfg.monA.ip;
|
||||
monInitialMembers = cfg.monA.name;
|
||||
};
|
||||
} // daemonConfig;
|
||||
generateCephConfig =
|
||||
{ daemonConfig }:
|
||||
{
|
||||
enable = true;
|
||||
global = {
|
||||
fsid = cfg.clusterId;
|
||||
monHost = cfg.monA.ip;
|
||||
monInitialMembers = cfg.monA.name;
|
||||
};
|
||||
}
|
||||
// daemonConfig;
|
||||
|
||||
generateHost = { pkgs, cephConfig, networkConfig, ... }: {
|
||||
virtualisation = {
|
||||
emptyDiskImages = [ 20480 ];
|
||||
vlans = [ 1 ];
|
||||
generateHost =
|
||||
{
|
||||
pkgs,
|
||||
cephConfig,
|
||||
networkConfig,
|
||||
...
|
||||
}:
|
||||
{
|
||||
virtualisation = {
|
||||
emptyDiskImages = [ 20480 ];
|
||||
vlans = [ 1 ];
|
||||
};
|
||||
|
||||
networking = networkConfig;
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
bash
|
||||
sudo
|
||||
ceph
|
||||
xfsprogs
|
||||
libressl.nc
|
||||
];
|
||||
|
||||
boot.kernelModules = [ "xfs" ];
|
||||
|
||||
services.ceph = cephConfig;
|
||||
};
|
||||
|
||||
networkMonA = {
|
||||
dhcpcd.enable = false;
|
||||
interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
||||
{
|
||||
address = cfg.monA.ip;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
firewall = {
|
||||
allowedTCPPorts = [
|
||||
6789
|
||||
3300
|
||||
];
|
||||
allowedTCPPortRanges = [
|
||||
{
|
||||
from = 6800;
|
||||
to = 7300;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
cephConfigMonA = generateCephConfig {
|
||||
daemonConfig = {
|
||||
mon = {
|
||||
enable = true;
|
||||
daemons = [ cfg.monA.name ];
|
||||
};
|
||||
mgr = {
|
||||
enable = true;
|
||||
daemons = [ cfg.monA.name ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
networking = networkConfig;
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
bash
|
||||
sudo
|
||||
ceph
|
||||
xfsprogs
|
||||
libressl.nc
|
||||
];
|
||||
|
||||
boot.kernelModules = [ "xfs" ];
|
||||
|
||||
services.ceph = cephConfig;
|
||||
};
|
||||
|
||||
networkMonA = {
|
||||
dhcpcd.enable = false;
|
||||
interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
||||
{ address = cfg.monA.ip; prefixLength = 24; }
|
||||
];
|
||||
firewall = {
|
||||
allowedTCPPorts = [ 6789 3300 ];
|
||||
allowedTCPPortRanges = [ { from = 6800; to = 7300; } ];
|
||||
networkOsd = osd: {
|
||||
dhcpcd.enable = false;
|
||||
interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
||||
{
|
||||
address = osd.ip;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
firewall = {
|
||||
allowedTCPPortRanges = [
|
||||
{
|
||||
from = 6800;
|
||||
to = 7300;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
cephConfigMonA = generateCephConfig { daemonConfig = {
|
||||
mon = {
|
||||
enable = true;
|
||||
daemons = [ cfg.monA.name ];
|
||||
|
||||
cephConfigOsd =
|
||||
osd:
|
||||
generateCephConfig {
|
||||
daemonConfig = {
|
||||
osd = {
|
||||
enable = true;
|
||||
daemons = [ osd.name ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Following deployment is based on the manual deployment described here:
|
||||
# https://docs.ceph.com/docs/master/install/manual-deployment/
|
||||
# For other ways to deploy a ceph cluster, look at the documentation at
|
||||
# https://docs.ceph.com/docs/master/
|
||||
testscript =
|
||||
{ ... }:
|
||||
''
|
||||
start_all()
|
||||
|
||||
monA.wait_for_unit("network.target")
|
||||
osd0.wait_for_unit("network.target")
|
||||
osd1.wait_for_unit("network.target")
|
||||
osd2.wait_for_unit("network.target")
|
||||
|
||||
# Bootstrap ceph-mon daemon
|
||||
monA.succeed(
|
||||
"sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
|
||||
"sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
|
||||
"sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
|
||||
"monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
|
||||
"sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
|
||||
"sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/",
|
||||
"sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
|
||||
"systemctl start ceph-mon-${cfg.monA.name}",
|
||||
)
|
||||
monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
|
||||
monA.succeed("ceph mon enable-msgr2")
|
||||
monA.succeed("ceph config set mon auth_allow_insecure_global_id_reclaim false")
|
||||
|
||||
# Can't check ceph status until a mon is up
|
||||
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
|
||||
|
||||
# Start the ceph-mgr daemon, it has no deps and hardly any setup
|
||||
monA.succeed(
|
||||
"ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
|
||||
"systemctl start ceph-mgr-${cfg.monA.name}",
|
||||
)
|
||||
monA.wait_for_unit("ceph-mgr-a")
|
||||
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
|
||||
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
|
||||
|
||||
# Send the admin keyring to the OSD machines
|
||||
monA.succeed("cp /etc/ceph/ceph.client.admin.keyring /tmp/shared")
|
||||
osd0.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
|
||||
osd1.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
|
||||
osd2.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
|
||||
|
||||
# Bootstrap OSDs
|
||||
osd0.succeed(
|
||||
"mkfs.xfs /dev/vdb",
|
||||
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
|
||||
"mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
|
||||
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
|
||||
'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -',
|
||||
)
|
||||
osd1.succeed(
|
||||
"mkfs.xfs /dev/vdb",
|
||||
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
|
||||
"mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
|
||||
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
|
||||
'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
|
||||
)
|
||||
osd2.succeed(
|
||||
"mkfs.xfs /dev/vdb",
|
||||
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
|
||||
"mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
|
||||
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
|
||||
'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
|
||||
)
|
||||
|
||||
# Initialize the OSDs with regular filestore
|
||||
osd0.succeed(
|
||||
"ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
|
||||
"chown -R ceph:ceph /var/lib/ceph/osd",
|
||||
"systemctl start ceph-osd-${cfg.osd0.name}",
|
||||
)
|
||||
osd1.succeed(
|
||||
"ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
|
||||
"chown -R ceph:ceph /var/lib/ceph/osd",
|
||||
"systemctl start ceph-osd-${cfg.osd1.name}",
|
||||
)
|
||||
osd2.succeed(
|
||||
"ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
|
||||
"chown -R ceph:ceph /var/lib/ceph/osd",
|
||||
"systemctl start ceph-osd-${cfg.osd2.name}",
|
||||
)
|
||||
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
|
||||
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
|
||||
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
|
||||
|
||||
monA.succeed(
|
||||
"ceph osd pool create multi-node-test 32 32",
|
||||
"ceph osd pool ls | grep 'multi-node-test'",
|
||||
|
||||
# We need to enable an application on the pool, otherwise it will
|
||||
# stay unhealthy in state POOL_APP_NOT_ENABLED.
|
||||
# Creating a CephFS would do this automatically, but we haven't done that here.
|
||||
# See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
|
||||
# We use the custom application name "nixos-test" for this.
|
||||
"ceph osd pool application enable multi-node-test nixos-test",
|
||||
|
||||
"ceph osd pool rename multi-node-test multi-node-other-test",
|
||||
"ceph osd pool ls | grep 'multi-node-other-test'",
|
||||
)
|
||||
monA.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
|
||||
monA.succeed("ceph osd pool set multi-node-other-test size 2")
|
||||
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
|
||||
monA.wait_until_succeeds("ceph -s | grep '33 active+clean'")
|
||||
monA.fail(
|
||||
"ceph osd pool ls | grep 'multi-node-test'",
|
||||
"ceph osd pool delete multi-node-other-test multi-node-other-test --yes-i-really-really-mean-it",
|
||||
)
|
||||
|
||||
# Shut down ceph on all machines in a very unpolite way
|
||||
monA.crash()
|
||||
osd0.crash()
|
||||
osd1.crash()
|
||||
osd2.crash()
|
||||
|
||||
# Start it up
|
||||
osd0.start()
|
||||
osd1.start()
|
||||
osd2.start()
|
||||
monA.start()
|
||||
|
||||
# Ensure the cluster comes back up again
|
||||
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
|
||||
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
|
||||
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
|
||||
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
|
||||
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
|
||||
'';
|
||||
in
|
||||
{
|
||||
name = "basic-multi-node-ceph-cluster";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ lejonet ];
|
||||
};
|
||||
mgr = {
|
||||
enable = true;
|
||||
daemons = [ cfg.monA.name ];
|
||||
|
||||
nodes = {
|
||||
monA = generateHost {
|
||||
pkgs = pkgs;
|
||||
cephConfig = cephConfigMonA;
|
||||
networkConfig = networkMonA;
|
||||
};
|
||||
osd0 = generateHost {
|
||||
pkgs = pkgs;
|
||||
cephConfig = cephConfigOsd cfg.osd0;
|
||||
networkConfig = networkOsd cfg.osd0;
|
||||
};
|
||||
osd1 = generateHost {
|
||||
pkgs = pkgs;
|
||||
cephConfig = cephConfigOsd cfg.osd1;
|
||||
networkConfig = networkOsd cfg.osd1;
|
||||
};
|
||||
osd2 = generateHost {
|
||||
pkgs = pkgs;
|
||||
cephConfig = cephConfigOsd cfg.osd2;
|
||||
networkConfig = networkOsd cfg.osd2;
|
||||
};
|
||||
};
|
||||
}; };
|
||||
|
||||
networkOsd = osd: {
|
||||
dhcpcd.enable = false;
|
||||
interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
||||
{ address = osd.ip; prefixLength = 24; }
|
||||
];
|
||||
firewall = {
|
||||
allowedTCPPortRanges = [ { from = 6800; to = 7300; } ];
|
||||
};
|
||||
};
|
||||
|
||||
cephConfigOsd = osd: generateCephConfig { daemonConfig = {
|
||||
osd = {
|
||||
enable = true;
|
||||
daemons = [ osd.name ];
|
||||
};
|
||||
}; };
|
||||
|
||||
# Following deployment is based on the manual deployment described here:
|
||||
# https://docs.ceph.com/docs/master/install/manual-deployment/
|
||||
# For other ways to deploy a ceph cluster, look at the documentation at
|
||||
# https://docs.ceph.com/docs/master/
|
||||
testscript = { ... }: ''
|
||||
start_all()
|
||||
|
||||
monA.wait_for_unit("network.target")
|
||||
osd0.wait_for_unit("network.target")
|
||||
osd1.wait_for_unit("network.target")
|
||||
osd2.wait_for_unit("network.target")
|
||||
|
||||
# Bootstrap ceph-mon daemon
|
||||
monA.succeed(
|
||||
"sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
|
||||
"sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
|
||||
"sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
|
||||
"monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
|
||||
"sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
|
||||
"sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/",
|
||||
"sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
|
||||
"systemctl start ceph-mon-${cfg.monA.name}",
|
||||
)
|
||||
monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
|
||||
monA.succeed("ceph mon enable-msgr2")
|
||||
monA.succeed("ceph config set mon auth_allow_insecure_global_id_reclaim false")
|
||||
|
||||
# Can't check ceph status until a mon is up
|
||||
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
|
||||
|
||||
# Start the ceph-mgr daemon, it has no deps and hardly any setup
|
||||
monA.succeed(
|
||||
"ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
|
||||
"systemctl start ceph-mgr-${cfg.monA.name}",
|
||||
)
|
||||
monA.wait_for_unit("ceph-mgr-a")
|
||||
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
|
||||
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
|
||||
|
||||
# Send the admin keyring to the OSD machines
|
||||
monA.succeed("cp /etc/ceph/ceph.client.admin.keyring /tmp/shared")
|
||||
osd0.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
|
||||
osd1.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
|
||||
osd2.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
|
||||
|
||||
# Bootstrap OSDs
|
||||
osd0.succeed(
|
||||
"mkfs.xfs /dev/vdb",
|
||||
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
|
||||
"mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
|
||||
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
|
||||
'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -',
|
||||
)
|
||||
osd1.succeed(
|
||||
"mkfs.xfs /dev/vdb",
|
||||
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
|
||||
"mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
|
||||
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
|
||||
'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
|
||||
)
|
||||
osd2.succeed(
|
||||
"mkfs.xfs /dev/vdb",
|
||||
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
|
||||
"mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
|
||||
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
|
||||
'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
|
||||
)
|
||||
|
||||
# Initialize the OSDs with regular filestore
|
||||
osd0.succeed(
|
||||
"ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
|
||||
"chown -R ceph:ceph /var/lib/ceph/osd",
|
||||
"systemctl start ceph-osd-${cfg.osd0.name}",
|
||||
)
|
||||
osd1.succeed(
|
||||
"ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
|
||||
"chown -R ceph:ceph /var/lib/ceph/osd",
|
||||
"systemctl start ceph-osd-${cfg.osd1.name}",
|
||||
)
|
||||
osd2.succeed(
|
||||
"ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
|
||||
"chown -R ceph:ceph /var/lib/ceph/osd",
|
||||
"systemctl start ceph-osd-${cfg.osd2.name}",
|
||||
)
|
||||
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
|
||||
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
|
||||
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
|
||||
|
||||
monA.succeed(
|
||||
"ceph osd pool create multi-node-test 32 32",
|
||||
"ceph osd pool ls | grep 'multi-node-test'",
|
||||
|
||||
# We need to enable an application on the pool, otherwise it will
|
||||
# stay unhealthy in state POOL_APP_NOT_ENABLED.
|
||||
# Creating a CephFS would do this automatically, but we haven't done that here.
|
||||
# See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
|
||||
# We use the custom application name "nixos-test" for this.
|
||||
"ceph osd pool application enable multi-node-test nixos-test",
|
||||
|
||||
"ceph osd pool rename multi-node-test multi-node-other-test",
|
||||
"ceph osd pool ls | grep 'multi-node-other-test'",
|
||||
)
|
||||
monA.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
|
||||
monA.succeed("ceph osd pool set multi-node-other-test size 2")
|
||||
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
|
||||
monA.wait_until_succeeds("ceph -s | grep '33 active+clean'")
|
||||
monA.fail(
|
||||
"ceph osd pool ls | grep 'multi-node-test'",
|
||||
"ceph osd pool delete multi-node-other-test multi-node-other-test --yes-i-really-really-mean-it",
|
||||
)
|
||||
|
||||
# Shut down ceph on all machines in a very unpolite way
|
||||
monA.crash()
|
||||
osd0.crash()
|
||||
osd1.crash()
|
||||
osd2.crash()
|
||||
|
||||
# Start it up
|
||||
osd0.start()
|
||||
osd1.start()
|
||||
osd2.start()
|
||||
monA.start()
|
||||
|
||||
# Ensure the cluster comes back up again
|
||||
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
|
||||
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
|
||||
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
|
||||
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
|
||||
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
|
||||
'';
|
||||
in {
|
||||
name = "basic-multi-node-ceph-cluster";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ lejonet ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
monA = generateHost { pkgs = pkgs; cephConfig = cephConfigMonA; networkConfig = networkMonA; };
|
||||
osd0 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd cfg.osd0; networkConfig = networkOsd cfg.osd0; };
|
||||
osd1 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd cfg.osd1; networkConfig = networkOsd cfg.osd1; };
|
||||
osd2 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd cfg.osd2; networkConfig = networkOsd cfg.osd2; };
|
||||
};
|
||||
|
||||
testScript = testscript;
|
||||
})
|
||||
testScript = testscript;
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,63 +1,69 @@
|
|||
import ./make-test-python.nix ({ pkgs, ...} : {
|
||||
name = "deluge";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ flokli ];
|
||||
};
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "deluge";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ flokli ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
simple = {
|
||||
services.deluge = {
|
||||
enable = true;
|
||||
package = pkgs.deluge-2_x;
|
||||
web = {
|
||||
nodes = {
|
||||
simple = {
|
||||
services.deluge = {
|
||||
enable = true;
|
||||
package = pkgs.deluge-2_x;
|
||||
web = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
declarative = {
|
||||
services.deluge = {
|
||||
enable = true;
|
||||
package = pkgs.deluge-2_x;
|
||||
openFirewall = true;
|
||||
declarative = true;
|
||||
config = {
|
||||
allow_remote = true;
|
||||
download_location = "/var/lib/deluge/my-download";
|
||||
daemon_port = 58846;
|
||||
listen_ports = [
|
||||
6881
|
||||
6889
|
||||
];
|
||||
};
|
||||
web = {
|
||||
enable = true;
|
||||
port = 3142;
|
||||
};
|
||||
authFile = pkgs.writeText "deluge-auth" ''
|
||||
localclient:a7bef72a890:10
|
||||
andrew:password:10
|
||||
user3:anotherpass:5
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
declarative = {
|
||||
services.deluge = {
|
||||
enable = true;
|
||||
package = pkgs.deluge-2_x;
|
||||
openFirewall = true;
|
||||
declarative = true;
|
||||
config = {
|
||||
allow_remote = true;
|
||||
download_location = "/var/lib/deluge/my-download";
|
||||
daemon_port = 58846;
|
||||
listen_ports = [ 6881 6889 ];
|
||||
};
|
||||
web = {
|
||||
enable = true;
|
||||
port = 3142;
|
||||
};
|
||||
authFile = pkgs.writeText "deluge-auth" ''
|
||||
localclient:a7bef72a890:10
|
||||
andrew:password:10
|
||||
user3:anotherpass:5
|
||||
'';
|
||||
};
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
};
|
||||
simple.wait_for_unit("deluged")
|
||||
simple.wait_for_unit("delugeweb")
|
||||
simple.wait_for_open_port(8112)
|
||||
declarative.wait_for_unit("network.target")
|
||||
declarative.wait_until_succeeds("curl --fail http://simple:8112")
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
declarative.wait_for_unit("deluged")
|
||||
declarative.wait_for_unit("delugeweb")
|
||||
declarative.wait_until_succeeds("curl --fail http://declarative:3142")
|
||||
|
||||
simple.wait_for_unit("deluged")
|
||||
simple.wait_for_unit("delugeweb")
|
||||
simple.wait_for_open_port(8112)
|
||||
declarative.wait_for_unit("network.target")
|
||||
declarative.wait_until_succeeds("curl --fail http://simple:8112")
|
||||
|
||||
declarative.wait_for_unit("deluged")
|
||||
declarative.wait_for_unit("delugeweb")
|
||||
declarative.wait_until_succeeds("curl --fail http://declarative:3142")
|
||||
|
||||
# deluge-console always exits with 1. https://dev.deluge-torrent.org/ticket/3291
|
||||
declarative.succeed(
|
||||
"(deluge-console 'connect 127.0.0.1:58846 andrew password; help' || true) | grep -q 'rm.*Remove a torrent'"
|
||||
)
|
||||
'';
|
||||
})
|
||||
# deluge-console always exits with 1. https://dev.deluge-torrent.org/ticket/3291
|
||||
declarative.succeed(
|
||||
"(deluge-console 'connect 127.0.0.1:58846 andrew password; help' || true) | grep -q 'rm.*Remove a torrent'"
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
28
nixos/tests/docling-serve.nix
Normal file
28
nixos/tests/docling-serve.nix
Normal file
|
@ -0,0 +1,28 @@
|
|||
{ lib, ... }:
|
||||
let
|
||||
mainPort = "5001";
|
||||
in
|
||||
{
|
||||
name = "docling-serve";
|
||||
meta = with lib.maintainers; {
|
||||
maintainers = [ drupol ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
machine =
|
||||
{ ... }:
|
||||
{
|
||||
services.docling-serve = {
|
||||
enable = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.start()
|
||||
|
||||
machine.wait_for_unit("docling-serve.service")
|
||||
machine.wait_for_open_port(${mainPort})
|
||||
machine.succeed("curl http://127.0.0.1:${mainPort}")
|
||||
'';
|
||||
}
|
|
@ -15,7 +15,6 @@ let
|
|||
installPhase = "mkdir -p $out; cp -R * $out/";
|
||||
};
|
||||
|
||||
|
||||
plugin-icalevents = pkgs.stdenv.mkDerivation rec {
|
||||
name = "icalevents";
|
||||
version = "2017-06-16";
|
||||
|
@ -32,65 +31,78 @@ let
|
|||
'';
|
||||
|
||||
dwWithAcronyms = pkgs.dokuwiki.overrideAttrs (prev: {
|
||||
installPhase = prev.installPhase or "" + ''
|
||||
ln -sf ${acronymsFile} $out/share/dokuwiki/conf/acronyms.local.conf
|
||||
'';
|
||||
installPhase =
|
||||
prev.installPhase or ""
|
||||
+ ''
|
||||
ln -sf ${acronymsFile} $out/share/dokuwiki/conf/acronyms.local.conf
|
||||
'';
|
||||
});
|
||||
|
||||
mkNode = webserver: { ... }: {
|
||||
services.dokuwiki = {
|
||||
inherit webserver;
|
||||
mkNode =
|
||||
webserver:
|
||||
{ ... }:
|
||||
{
|
||||
services.dokuwiki = {
|
||||
inherit webserver;
|
||||
|
||||
sites = {
|
||||
"site1.local" = {
|
||||
templates = [ template-bootstrap3 ];
|
||||
settings = {
|
||||
useacl = false;
|
||||
userewrite = true;
|
||||
template = "bootstrap3";
|
||||
sites = {
|
||||
"site1.local" = {
|
||||
templates = [ template-bootstrap3 ];
|
||||
settings = {
|
||||
useacl = false;
|
||||
userewrite = true;
|
||||
template = "bootstrap3";
|
||||
};
|
||||
};
|
||||
};
|
||||
"site2.local" = {
|
||||
package = dwWithAcronyms;
|
||||
usersFile = "/var/lib/dokuwiki/site2.local/users.auth.php";
|
||||
plugins = [ plugin-icalevents ];
|
||||
settings = {
|
||||
useacl = true;
|
||||
superuser = "admin";
|
||||
title._file = titleFile;
|
||||
plugin.dummy.empty = "This is just for testing purposes";
|
||||
};
|
||||
acl = [
|
||||
{ page = "*";
|
||||
actor = "@ALL";
|
||||
level = "read"; }
|
||||
{ page = "acl-test";
|
||||
actor = "@ALL";
|
||||
level = "none"; }
|
||||
];
|
||||
pluginsConfig = {
|
||||
authad = false;
|
||||
authldap = false;
|
||||
authmysql = false;
|
||||
authpgsql = false;
|
||||
tag = false;
|
||||
icalevents = true;
|
||||
"site2.local" = {
|
||||
package = dwWithAcronyms;
|
||||
usersFile = "/var/lib/dokuwiki/site2.local/users.auth.php";
|
||||
plugins = [ plugin-icalevents ];
|
||||
settings = {
|
||||
useacl = true;
|
||||
superuser = "admin";
|
||||
title._file = titleFile;
|
||||
plugin.dummy.empty = "This is just for testing purposes";
|
||||
};
|
||||
acl = [
|
||||
{
|
||||
page = "*";
|
||||
actor = "@ALL";
|
||||
level = "read";
|
||||
}
|
||||
{
|
||||
page = "acl-test";
|
||||
actor = "@ALL";
|
||||
level = "none";
|
||||
}
|
||||
];
|
||||
pluginsConfig = {
|
||||
authad = false;
|
||||
authldap = false;
|
||||
authmysql = false;
|
||||
authpgsql = false;
|
||||
tag = false;
|
||||
icalevents = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.caddy.virtualHosts = {
|
||||
"site1.local".hostName = "http://site1.local";
|
||||
"site2.local".hostName = "http://site2.local";
|
||||
};
|
||||
services.caddy.virtualHosts = {
|
||||
"site1.local".hostName = "http://site1.local";
|
||||
"site2.local".hostName = "http://site2.local";
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
networking.hosts."127.0.0.1" = [ "site1.local" "site2.local" ];
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
networking.hosts."127.0.0.1" = [
|
||||
"site1.local"
|
||||
"site2.local"
|
||||
];
|
||||
};
|
||||
|
||||
titleFile = pkgs.writeText "dokuwiki-title" "DokuWiki on site2";
|
||||
in {
|
||||
in
|
||||
{
|
||||
name = "dokuwiki";
|
||||
meta = with pkgs.lib; {
|
||||
maintainers = with maintainers; [
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
{ system ? builtins.currentSystem,
|
||||
config ? {},
|
||||
pkgs ? import ../.. { inherit system config; }
|
||||
{
|
||||
system ? builtins.currentSystem,
|
||||
config ? { },
|
||||
pkgs ? import ../.. { inherit system config; },
|
||||
}:
|
||||
|
||||
with import ../lib/testing-python.nix { inherit system pkgs; };
|
||||
|
@ -9,52 +10,53 @@ with pkgs.lib;
|
|||
with import common/ec2.nix { inherit makeTest pkgs; };
|
||||
|
||||
let
|
||||
imageCfg = (import ../lib/eval-config.nix {
|
||||
inherit system;
|
||||
modules = [
|
||||
../maintainers/scripts/ec2/amazon-image.nix
|
||||
../modules/testing/test-instrumentation.nix
|
||||
../modules/profiles/qemu-guest.nix
|
||||
{
|
||||
# Hack to make the partition resizing work in QEMU.
|
||||
boot.initrd.postDeviceCommands = mkBefore ''
|
||||
ln -s vda /dev/xvda
|
||||
ln -s vda1 /dev/xvda1
|
||||
'';
|
||||
imageCfg =
|
||||
(import ../lib/eval-config.nix {
|
||||
inherit system;
|
||||
modules = [
|
||||
../maintainers/scripts/ec2/amazon-image.nix
|
||||
../modules/testing/test-instrumentation.nix
|
||||
../modules/profiles/qemu-guest.nix
|
||||
{
|
||||
# Hack to make the partition resizing work in QEMU.
|
||||
boot.initrd.postDeviceCommands = mkBefore ''
|
||||
ln -s vda /dev/xvda
|
||||
ln -s vda1 /dev/xvda1
|
||||
'';
|
||||
|
||||
amazonImage.format = "qcow2";
|
||||
amazonImage.format = "qcow2";
|
||||
|
||||
# In a NixOS test the serial console is occupied by the "backdoor"
|
||||
# (see testing/test-instrumentation.nix) and is incompatible with
|
||||
# the configuration in virtualisation/amazon-image.nix.
|
||||
systemd.services."serial-getty@ttyS0".enable = mkForce false;
|
||||
# In a NixOS test the serial console is occupied by the "backdoor"
|
||||
# (see testing/test-instrumentation.nix) and is incompatible with
|
||||
# the configuration in virtualisation/amazon-image.nix.
|
||||
systemd.services."serial-getty@ttyS0".enable = mkForce false;
|
||||
|
||||
# Needed by nixos-rebuild due to the lack of network
|
||||
# access. Determined by trial and error.
|
||||
system.extraDependencies = with pkgs; ( [
|
||||
# Needed for a nixos-rebuild.
|
||||
busybox
|
||||
cloud-utils
|
||||
desktop-file-utils
|
||||
libxslt.bin
|
||||
mkinitcpio-nfs-utils
|
||||
stdenv
|
||||
stdenvNoCC
|
||||
texinfo
|
||||
unionfs-fuse
|
||||
xorg.lndir
|
||||
# Needed by nixos-rebuild due to the lack of network
|
||||
# access. Determined by trial and error.
|
||||
system.extraDependencies = with pkgs; ([
|
||||
# Needed for a nixos-rebuild.
|
||||
busybox
|
||||
cloud-utils
|
||||
desktop-file-utils
|
||||
libxslt.bin
|
||||
mkinitcpio-nfs-utils
|
||||
stdenv
|
||||
stdenvNoCC
|
||||
texinfo
|
||||
unionfs-fuse
|
||||
xorg.lndir
|
||||
|
||||
# These are used in the configure-from-userdata tests
|
||||
# for EC2. Httpd and valgrind are requested by the
|
||||
# configuration.
|
||||
apacheHttpd
|
||||
apacheHttpd.doc
|
||||
apacheHttpd.man
|
||||
valgrind.doc
|
||||
]);
|
||||
}
|
||||
];
|
||||
}).config;
|
||||
# These are used in the configure-from-userdata tests
|
||||
# for EC2. Httpd and valgrind are requested by the
|
||||
# configuration.
|
||||
apacheHttpd
|
||||
apacheHttpd.doc
|
||||
apacheHttpd.man
|
||||
valgrind.doc
|
||||
]);
|
||||
}
|
||||
];
|
||||
}).config;
|
||||
image = "${imageCfg.system.build.amazonImage}/${imageCfg.image.imageFile}";
|
||||
|
||||
sshKeys = import ./ssh-keys.nix pkgs;
|
||||
|
@ -62,16 +64,17 @@ let
|
|||
snakeOilPrivateKeyFile = pkgs.writeText "private-key" snakeOilPrivateKey;
|
||||
snakeOilPublicKey = sshKeys.snakeOilPublicKey;
|
||||
|
||||
in {
|
||||
in
|
||||
{
|
||||
boot-ec2-nixops = makeEc2Test {
|
||||
name = "nixops-userdata";
|
||||
name = "nixops-userdata";
|
||||
meta.timeout = 600;
|
||||
inherit image;
|
||||
sshPublicKey = snakeOilPublicKey; # That's right folks! My user's key is also the host key!
|
||||
|
||||
userData = ''
|
||||
SSH_HOST_ED25519_KEY_PUB:${snakeOilPublicKey}
|
||||
SSH_HOST_ED25519_KEY:${replaceStrings ["\n"] ["|"] snakeOilPrivateKey}
|
||||
SSH_HOST_ED25519_KEY:${replaceStrings [ "\n" ] [ "|" ] snakeOilPrivateKey}
|
||||
'';
|
||||
script = ''
|
||||
machine.start()
|
||||
|
@ -113,7 +116,7 @@ in {
|
|||
};
|
||||
|
||||
boot-ec2-config = makeEc2Test {
|
||||
name = "config-userdata";
|
||||
name = "config-userdata";
|
||||
meta.broken = true; # amazon-init wants to download from the internet while building the system
|
||||
inherit image;
|
||||
sshPublicKey = snakeOilPublicKey;
|
||||
|
|
|
@ -1,44 +1,49 @@
|
|||
import ./make-test-python.nix ({ pkgs, ...} : {
|
||||
name = "environment";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ nequissimus ];
|
||||
};
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "environment";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ nequissimus ];
|
||||
};
|
||||
|
||||
nodes.machine = { pkgs, lib, ... }: lib.mkMerge [
|
||||
{
|
||||
boot.kernelPackages = pkgs.linuxPackages;
|
||||
environment.etc.plainFile.text = ''
|
||||
Hello World
|
||||
'';
|
||||
environment.etc."folder/with/file".text = ''
|
||||
Foo Bar!
|
||||
'';
|
||||
nodes.machine =
|
||||
{ pkgs, lib, ... }:
|
||||
lib.mkMerge [
|
||||
{
|
||||
boot.kernelPackages = pkgs.linuxPackages;
|
||||
environment.etc.plainFile.text = ''
|
||||
Hello World
|
||||
'';
|
||||
environment.etc."folder/with/file".text = ''
|
||||
Foo Bar!
|
||||
'';
|
||||
|
||||
environment.sessionVariables = {
|
||||
TERMINFO_DIRS = "/run/current-system/sw/share/terminfo";
|
||||
NIXCON = "awesome";
|
||||
SHOULD_NOT_BE_SET = "oops";
|
||||
};
|
||||
}
|
||||
{
|
||||
environment.sessionVariables = {
|
||||
SHOULD_NOT_BE_SET = lib.mkForce null;
|
||||
};
|
||||
}
|
||||
];
|
||||
environment.sessionVariables = {
|
||||
TERMINFO_DIRS = "/run/current-system/sw/share/terminfo";
|
||||
NIXCON = "awesome";
|
||||
SHOULD_NOT_BE_SET = "oops";
|
||||
};
|
||||
}
|
||||
{
|
||||
environment.sessionVariables = {
|
||||
SHOULD_NOT_BE_SET = lib.mkForce null;
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
testScript = ''
|
||||
machine.succeed('[ -L "/etc/plainFile" ]')
|
||||
assert "Hello World" in machine.succeed('cat "/etc/plainFile"')
|
||||
machine.succeed('[ -d "/etc/folder" ]')
|
||||
machine.succeed('[ -d "/etc/folder/with" ]')
|
||||
machine.succeed('[ -L "/etc/folder/with/file" ]')
|
||||
assert "Hello World" in machine.succeed('cat "/etc/plainFile"')
|
||||
testScript = ''
|
||||
machine.succeed('[ -L "/etc/plainFile" ]')
|
||||
assert "Hello World" in machine.succeed('cat "/etc/plainFile"')
|
||||
machine.succeed('[ -d "/etc/folder" ]')
|
||||
machine.succeed('[ -d "/etc/folder/with" ]')
|
||||
machine.succeed('[ -L "/etc/folder/with/file" ]')
|
||||
assert "Hello World" in machine.succeed('cat "/etc/plainFile"')
|
||||
|
||||
assert "/run/current-system/sw/share/terminfo" in machine.succeed(
|
||||
"echo ''${TERMINFO_DIRS}"
|
||||
)
|
||||
assert "awesome" in machine.succeed("echo ''${NIXCON}")
|
||||
machine.fail("printenv SHOULD_NOT_BE_SET")
|
||||
'';
|
||||
})
|
||||
assert "/run/current-system/sw/share/terminfo" in machine.succeed(
|
||||
"echo ''${TERMINFO_DIRS}"
|
||||
)
|
||||
assert "awesome" in machine.succeed("echo ''${NIXCON}")
|
||||
machine.fail("printenv SHOULD_NOT_BE_SET")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,166 +1,169 @@
|
|||
import ../make-test-python.nix ({ lib, ... }:
|
||||
rec {
|
||||
name = "fcitx5";
|
||||
meta.maintainers = with lib.maintainers; [ nevivurn ];
|
||||
import ../make-test-python.nix (
|
||||
{ lib, ... }:
|
||||
rec {
|
||||
name = "fcitx5";
|
||||
meta.maintainers = with lib.maintainers; [ nevivurn ];
|
||||
|
||||
nodes.machine = { pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
../common/user-account.nix
|
||||
];
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
../common/user-account.nix
|
||||
];
|
||||
|
||||
environment.systemPackages = [
|
||||
# To avoid clashing with xfce4-terminal
|
||||
pkgs.alacritty
|
||||
];
|
||||
environment.systemPackages = [
|
||||
# To avoid clashing with xfce4-terminal
|
||||
pkgs.alacritty
|
||||
];
|
||||
|
||||
services.displayManager.autoLogin = {
|
||||
enable = true;
|
||||
user = "alice";
|
||||
};
|
||||
|
||||
services.xserver = {
|
||||
enable = true;
|
||||
displayManager.lightdm.enable = true;
|
||||
desktopManager.xfce.enable = true;
|
||||
};
|
||||
|
||||
i18n.inputMethod = {
|
||||
enable = true;
|
||||
type = "fcitx5";
|
||||
fcitx5.addons = [
|
||||
pkgs.fcitx5-chinese-addons
|
||||
pkgs.fcitx5-hangul
|
||||
pkgs.fcitx5-m17n
|
||||
pkgs.fcitx5-mozc
|
||||
];
|
||||
fcitx5.settings = {
|
||||
globalOptions = {
|
||||
"Hotkey"."EnumerateSkipFirst" = "False";
|
||||
"Hotkey/TriggerKeys"."0" = "Control+space";
|
||||
"Hotkey/EnumerateForwardKeys"."0" = "Alt+Shift_L";
|
||||
"Hotkey/EnumerateBackwardKeys"."0" = "Alt+Shift_R";
|
||||
services.displayManager.autoLogin = {
|
||||
enable = true;
|
||||
user = "alice";
|
||||
};
|
||||
inputMethod = {
|
||||
"GroupOrder" = {
|
||||
"0" = "NixOS_test";
|
||||
};
|
||||
"Groups/0" = {
|
||||
"Default Layout" = "us";
|
||||
"DefaultIM" = "wbx";
|
||||
"Name" = "NixOS_test";
|
||||
};
|
||||
"Groups/0/Items/0" = {
|
||||
"Name" = "keyboard-us";
|
||||
};
|
||||
"Groups/0/Items/1" = {
|
||||
"Layout" = "us";
|
||||
"Name" = "wbx";
|
||||
};
|
||||
"Groups/0/Items/2" = {
|
||||
"Layout" = "us";
|
||||
"Name" = "hangul";
|
||||
};
|
||||
"Groups/0/Items/3" = {
|
||||
"Layout" = "us";
|
||||
"Name" = "m17n_sa_harvard-kyoto";
|
||||
};
|
||||
"Groups/0/Items/4" = {
|
||||
"Layout" = "us";
|
||||
"Name" = "mozc";
|
||||
|
||||
services.xserver = {
|
||||
enable = true;
|
||||
displayManager.lightdm.enable = true;
|
||||
desktopManager.xfce.enable = true;
|
||||
};
|
||||
|
||||
i18n.inputMethod = {
|
||||
enable = true;
|
||||
type = "fcitx5";
|
||||
fcitx5.addons = [
|
||||
pkgs.fcitx5-chinese-addons
|
||||
pkgs.fcitx5-hangul
|
||||
pkgs.fcitx5-m17n
|
||||
pkgs.fcitx5-mozc
|
||||
];
|
||||
fcitx5.settings = {
|
||||
globalOptions = {
|
||||
"Hotkey"."EnumerateSkipFirst" = "False";
|
||||
"Hotkey/TriggerKeys"."0" = "Control+space";
|
||||
"Hotkey/EnumerateForwardKeys"."0" = "Alt+Shift_L";
|
||||
"Hotkey/EnumerateBackwardKeys"."0" = "Alt+Shift_R";
|
||||
};
|
||||
inputMethod = {
|
||||
"GroupOrder" = {
|
||||
"0" = "NixOS_test";
|
||||
};
|
||||
"Groups/0" = {
|
||||
"Default Layout" = "us";
|
||||
"DefaultIM" = "wbx";
|
||||
"Name" = "NixOS_test";
|
||||
};
|
||||
"Groups/0/Items/0" = {
|
||||
"Name" = "keyboard-us";
|
||||
};
|
||||
"Groups/0/Items/1" = {
|
||||
"Layout" = "us";
|
||||
"Name" = "wbx";
|
||||
};
|
||||
"Groups/0/Items/2" = {
|
||||
"Layout" = "us";
|
||||
"Name" = "hangul";
|
||||
};
|
||||
"Groups/0/Items/3" = {
|
||||
"Layout" = "us";
|
||||
"Name" = "m17n_sa_harvard-kyoto";
|
||||
};
|
||||
"Groups/0/Items/4" = {
|
||||
"Layout" = "us";
|
||||
"Name" = "mozc";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = { nodes, ... }:
|
||||
let
|
||||
user = nodes.machine.users.users.alice;
|
||||
xauth = "${user.home}/.Xauthority";
|
||||
in
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
user = nodes.machine.users.users.alice;
|
||||
xauth = "${user.home}/.Xauthority";
|
||||
in
|
||||
''
|
||||
start_all()
|
||||
start_all()
|
||||
|
||||
machine.wait_for_x()
|
||||
machine.wait_for_file("${xauth}")
|
||||
machine.succeed("xauth merge ${xauth}")
|
||||
machine.sleep(5)
|
||||
machine.wait_for_x()
|
||||
machine.wait_for_file("${xauth}")
|
||||
machine.succeed("xauth merge ${xauth}")
|
||||
machine.sleep(5)
|
||||
|
||||
machine.wait_until_succeeds("pgrep fcitx5")
|
||||
machine.succeed("su - ${user.name} -c 'kill $(pgrep fcitx5)'")
|
||||
machine.sleep(1)
|
||||
machine.wait_until_succeeds("pgrep fcitx5")
|
||||
machine.succeed("su - ${user.name} -c 'kill $(pgrep fcitx5)'")
|
||||
machine.sleep(1)
|
||||
|
||||
machine.succeed("su - ${user.name} -c 'alacritty >&2 &'")
|
||||
machine.wait_for_window("alice@machine")
|
||||
machine.succeed("su - ${user.name} -c 'alacritty >&2 &'")
|
||||
machine.wait_for_window("alice@machine")
|
||||
|
||||
machine.succeed("su - ${user.name} -c 'fcitx5 >&2 &'")
|
||||
machine.sleep(10)
|
||||
machine.succeed("su - ${user.name} -c 'fcitx5 >&2 &'")
|
||||
machine.sleep(10)
|
||||
|
||||
### Type on terminal
|
||||
machine.send_chars("echo ")
|
||||
machine.sleep(1)
|
||||
### Type on terminal
|
||||
machine.send_chars("echo ")
|
||||
machine.sleep(1)
|
||||
|
||||
### Start fcitx Unicode input
|
||||
machine.send_key("ctrl-alt-shift-u")
|
||||
machine.sleep(1)
|
||||
### Start fcitx Unicode input
|
||||
machine.send_key("ctrl-alt-shift-u")
|
||||
machine.sleep(1)
|
||||
|
||||
### Search for smiling face
|
||||
machine.send_chars("smil")
|
||||
machine.sleep(1)
|
||||
### Search for smiling face
|
||||
machine.send_chars("smil")
|
||||
machine.sleep(1)
|
||||
|
||||
### Navigate to the second one
|
||||
machine.send_key("tab")
|
||||
machine.sleep(1)
|
||||
### Navigate to the second one
|
||||
machine.send_key("tab")
|
||||
machine.sleep(1)
|
||||
|
||||
### Choose it
|
||||
machine.send_key("\n")
|
||||
machine.sleep(1)
|
||||
### Choose it
|
||||
machine.send_key("\n")
|
||||
machine.sleep(1)
|
||||
|
||||
### Start fcitx language input
|
||||
machine.send_key("ctrl-spc")
|
||||
machine.sleep(1)
|
||||
### Start fcitx language input
|
||||
machine.send_key("ctrl-spc")
|
||||
machine.sleep(1)
|
||||
|
||||
### Default wubi, enter 一下
|
||||
machine.send_chars("gggh ")
|
||||
machine.sleep(1)
|
||||
### Default wubi, enter 一下
|
||||
machine.send_chars("gggh ")
|
||||
machine.sleep(1)
|
||||
|
||||
### Switch to Hangul
|
||||
machine.send_key("alt-shift")
|
||||
machine.sleep(1)
|
||||
### Switch to Hangul
|
||||
machine.send_key("alt-shift")
|
||||
machine.sleep(1)
|
||||
|
||||
### Enter 한
|
||||
machine.send_chars("gks")
|
||||
machine.sleep(1)
|
||||
### Enter 한
|
||||
machine.send_chars("gks")
|
||||
machine.sleep(1)
|
||||
|
||||
### Switch to Harvard Kyoto
|
||||
machine.send_key("alt-shift")
|
||||
machine.sleep(1)
|
||||
### Switch to Harvard Kyoto
|
||||
machine.send_key("alt-shift")
|
||||
machine.sleep(1)
|
||||
|
||||
### Enter क
|
||||
machine.send_chars("ka")
|
||||
machine.sleep(1)
|
||||
### Enter क
|
||||
machine.send_chars("ka")
|
||||
machine.sleep(1)
|
||||
|
||||
### Switch to Mozc
|
||||
machine.send_key("alt-shift")
|
||||
machine.sleep(1)
|
||||
### Switch to Mozc
|
||||
machine.send_key("alt-shift")
|
||||
machine.sleep(1)
|
||||
|
||||
### Enter か
|
||||
machine.send_chars("ka\n")
|
||||
machine.sleep(1)
|
||||
### Enter か
|
||||
machine.send_chars("ka\n")
|
||||
machine.sleep(1)
|
||||
|
||||
### Turn off Fcitx
|
||||
machine.send_key("ctrl-spc")
|
||||
machine.sleep(1)
|
||||
### Turn off Fcitx
|
||||
machine.send_key("ctrl-spc")
|
||||
machine.sleep(1)
|
||||
|
||||
### Redirect typed characters to a file
|
||||
machine.send_chars(" > fcitx_test.out\n")
|
||||
machine.sleep(1)
|
||||
machine.screenshot("terminal_chars")
|
||||
### Redirect typed characters to a file
|
||||
machine.send_chars(" > fcitx_test.out\n")
|
||||
machine.sleep(1)
|
||||
machine.screenshot("terminal_chars")
|
||||
|
||||
### Verify that file contents are as expected
|
||||
file_content = machine.succeed("cat ${user.home}/fcitx_test.out")
|
||||
assert file_content == "☺一下한कか\n", f'output does not match input:\n{file_content}'
|
||||
''
|
||||
;
|
||||
})
|
||||
### Verify that file contents are as expected
|
||||
file_content = machine.succeed("cat ${user.home}/fcitx_test.out")
|
||||
assert file_content == "☺一下한कか\n", f'output does not match input:\n{file_content}'
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
{ system ? builtins.currentSystem
|
||||
, pkgs ? import ../.. { inherit system; }
|
||||
, ...
|
||||
{
|
||||
system ? builtins.currentSystem,
|
||||
pkgs ? import ../.. { inherit system; },
|
||||
...
|
||||
}:
|
||||
let
|
||||
lib = pkgs.lib;
|
||||
|
@ -14,50 +15,50 @@ in
|
|||
with import ../lib/testing-python.nix { inherit system; };
|
||||
{
|
||||
|
||||
postgresql = makeTest
|
||||
{
|
||||
inherit testScript;
|
||||
name = "ferretdb-postgresql";
|
||||
meta.maintainers = with lib.maintainers; [ julienmalka ];
|
||||
postgresql = makeTest {
|
||||
inherit testScript;
|
||||
name = "ferretdb-postgresql";
|
||||
meta.maintainers = with lib.maintainers; [ julienmalka ];
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.ferretdb = {
|
||||
enable = true;
|
||||
settings.FERRETDB_HANDLER = "pg";
|
||||
};
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.ferretdb = {
|
||||
enable = true;
|
||||
settings.FERRETDB_HANDLER = "pg";
|
||||
};
|
||||
|
||||
systemd.services.ferretdb.serviceConfig = {
|
||||
Requires = "postgresql.service";
|
||||
After = "postgresql.service";
|
||||
};
|
||||
systemd.services.ferretdb.serviceConfig = {
|
||||
Requires = "postgresql.service";
|
||||
After = "postgresql.service";
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
ensureDatabases = [ "ferretdb" ];
|
||||
ensureUsers = [{
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
ensureDatabases = [ "ferretdb" ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = "ferretdb";
|
||||
ensureDBOwnership = true;
|
||||
}];
|
||||
};
|
||||
|
||||
environment.systemPackages = with pkgs; [ mongosh ];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
sqlite = makeTest
|
||||
{
|
||||
inherit testScript;
|
||||
name = "ferretdb-sqlite";
|
||||
meta.maintainers = with lib.maintainers; [ julienmalka ];
|
||||
environment.systemPackages = with pkgs; [ mongosh ];
|
||||
};
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.ferretdb.enable = true;
|
||||
sqlite = makeTest {
|
||||
inherit testScript;
|
||||
name = "ferretdb-sqlite";
|
||||
meta.maintainers = with lib.maintainers; [ julienmalka ];
|
||||
|
||||
environment.systemPackages = with pkgs; [ mongosh ];
|
||||
};
|
||||
};
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.ferretdb.enable = true;
|
||||
|
||||
environment.systemPackages = with pkgs; [ mongosh ];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,60 +1,65 @@
|
|||
import ./make-test-python.nix ({ lib, ... }: {
|
||||
name = "grub";
|
||||
import ./make-test-python.nix (
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "grub";
|
||||
|
||||
meta = with lib.maintainers; {
|
||||
maintainers = [ rnhmjoj ];
|
||||
};
|
||||
|
||||
nodes.machine = { ... }: {
|
||||
virtualisation.useBootLoader = true;
|
||||
|
||||
boot.loader.timeout = null;
|
||||
boot.loader.grub = {
|
||||
enable = true;
|
||||
users.alice.password = "supersecret";
|
||||
|
||||
# OCR is not accurate enough
|
||||
extraConfig = "serial; terminal_output serial";
|
||||
meta = with lib.maintainers; {
|
||||
maintainers = [ rnhmjoj ];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
def grub_login_as(user, password):
|
||||
"""
|
||||
Enters user and password to log into GRUB
|
||||
"""
|
||||
machine.wait_for_console_text("Enter username:")
|
||||
machine.send_chars(user + "\n")
|
||||
machine.wait_for_console_text("Enter password:")
|
||||
machine.send_chars(password + "\n")
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.useBootLoader = true;
|
||||
|
||||
boot.loader.timeout = null;
|
||||
boot.loader.grub = {
|
||||
enable = true;
|
||||
users.alice.password = "supersecret";
|
||||
|
||||
# OCR is not accurate enough
|
||||
extraConfig = "serial; terminal_output serial";
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
def grub_login_as(user, password):
|
||||
"""
|
||||
Enters user and password to log into GRUB
|
||||
"""
|
||||
machine.wait_for_console_text("Enter username:")
|
||||
machine.send_chars(user + "\n")
|
||||
machine.wait_for_console_text("Enter password:")
|
||||
machine.send_chars(password + "\n")
|
||||
|
||||
|
||||
def grub_select_all_configurations():
|
||||
"""
|
||||
Selects "All configurations" from the GRUB menu
|
||||
to trigger a login request.
|
||||
"""
|
||||
machine.send_monitor_command("sendkey down")
|
||||
machine.send_monitor_command("sendkey ret")
|
||||
def grub_select_all_configurations():
|
||||
"""
|
||||
Selects "All configurations" from the GRUB menu
|
||||
to trigger a login request.
|
||||
"""
|
||||
machine.send_monitor_command("sendkey down")
|
||||
machine.send_monitor_command("sendkey ret")
|
||||
|
||||
|
||||
machine.start()
|
||||
machine.start()
|
||||
|
||||
# wait for grub screen
|
||||
machine.wait_for_console_text("GNU GRUB")
|
||||
# wait for grub screen
|
||||
machine.wait_for_console_text("GNU GRUB")
|
||||
|
||||
grub_select_all_configurations()
|
||||
with subtest("Invalid credentials are rejected"):
|
||||
grub_login_as("wronguser", "wrongsecret")
|
||||
machine.wait_for_console_text("error: access denied.")
|
||||
grub_select_all_configurations()
|
||||
with subtest("Invalid credentials are rejected"):
|
||||
grub_login_as("wronguser", "wrongsecret")
|
||||
machine.wait_for_console_text("error: access denied.")
|
||||
|
||||
grub_select_all_configurations()
|
||||
with subtest("Valid credentials are accepted"):
|
||||
grub_login_as("alice", "supersecret")
|
||||
machine.send_chars("\n") # press enter to boot
|
||||
machine.wait_for_console_text("Linux version")
|
||||
grub_select_all_configurations()
|
||||
with subtest("Valid credentials are accepted"):
|
||||
grub_login_as("alice", "supersecret")
|
||||
machine.send_chars("\n") # press enter to boot
|
||||
machine.wait_for_console_text("Linux version")
|
||||
|
||||
with subtest("Machine boots correctly"):
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
'';
|
||||
})
|
||||
with subtest("Machine boots correctly"):
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,70 +1,82 @@
|
|||
{ system ? builtins.currentSystem
|
||||
, config ? { }
|
||||
, pkgs ? import ../.. { inherit system config; }
|
||||
{
|
||||
system ? builtins.currentSystem,
|
||||
config ? { },
|
||||
pkgs ? import ../.. { inherit system config; },
|
||||
}:
|
||||
|
||||
with import ../lib/testing-python.nix { inherit system pkgs; };
|
||||
with pkgs.lib;
|
||||
|
||||
let
|
||||
makeHostNameTest = hostName: domain: fqdnOrNull:
|
||||
makeHostNameTest =
|
||||
hostName: domain: fqdnOrNull:
|
||||
let
|
||||
fqdn = hostName + (optionalString (domain != null) ".${domain}");
|
||||
getStr = str: # maybeString2String
|
||||
let res = builtins.tryEval str;
|
||||
in if (res.success && res.value != null) then res.value else "null";
|
||||
getStr =
|
||||
str: # maybeString2String
|
||||
let
|
||||
res = builtins.tryEval str;
|
||||
in
|
||||
if (res.success && res.value != null) then res.value else "null";
|
||||
in
|
||||
makeTest {
|
||||
name = "hostname-${fqdn}";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ primeos blitz ];
|
||||
};
|
||||
|
||||
nodes.machine = { lib, ... }: {
|
||||
networking.hostName = hostName;
|
||||
networking.domain = domain;
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
inetutils
|
||||
maintainers = [
|
||||
primeos
|
||||
blitz
|
||||
];
|
||||
};
|
||||
|
||||
testScript = { nodes, ... }: ''
|
||||
start_all()
|
||||
nodes.machine =
|
||||
{ lib, ... }:
|
||||
{
|
||||
networking.hostName = hostName;
|
||||
networking.domain = domain;
|
||||
|
||||
machine = ${hostName}
|
||||
environment.systemPackages = with pkgs; [
|
||||
inetutils
|
||||
];
|
||||
};
|
||||
|
||||
machine.systemctl("start network-online.target")
|
||||
machine.wait_for_unit("network-online.target")
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
start_all()
|
||||
|
||||
# Test if NixOS computes the correct FQDN (either a FQDN or an error/null):
|
||||
assert "${getStr nodes.machine.networking.fqdn}" == "${getStr fqdnOrNull}"
|
||||
machine = ${hostName}
|
||||
|
||||
# The FQDN, domain name, and hostname detection should work as expected:
|
||||
assert "${fqdn}" == machine.succeed("hostname --fqdn").strip()
|
||||
assert "${optionalString (domain != null) domain}" == machine.succeed("dnsdomainname").strip()
|
||||
assert (
|
||||
"${hostName}"
|
||||
== machine.succeed(
|
||||
'hostnamectl status | grep "Static hostname" | cut -d: -f2'
|
||||
).strip()
|
||||
)
|
||||
machine.systemctl("start network-online.target")
|
||||
machine.wait_for_unit("network-online.target")
|
||||
|
||||
# 127.0.0.1 and ::1 should resolve back to "localhost":
|
||||
assert (
|
||||
"localhost" == machine.succeed("getent hosts 127.0.0.1 | awk '{print $2}'").strip()
|
||||
)
|
||||
assert "localhost" == machine.succeed("getent hosts ::1 | awk '{print $2}'").strip()
|
||||
# Test if NixOS computes the correct FQDN (either a FQDN or an error/null):
|
||||
assert "${getStr nodes.machine.networking.fqdn}" == "${getStr fqdnOrNull}"
|
||||
|
||||
# 127.0.0.2 should resolve back to the FQDN and hostname:
|
||||
fqdn_and_host_name = "${optionalString (domain != null) "${hostName}.${domain} "}${hostName}"
|
||||
assert (
|
||||
fqdn_and_host_name
|
||||
== machine.succeed("getent hosts 127.0.0.2 | awk '{print $2,$3}'").strip()
|
||||
)
|
||||
# The FQDN, domain name, and hostname detection should work as expected:
|
||||
assert "${fqdn}" == machine.succeed("hostname --fqdn").strip()
|
||||
assert "${optionalString (domain != null) domain}" == machine.succeed("dnsdomainname").strip()
|
||||
assert (
|
||||
"${hostName}"
|
||||
== machine.succeed(
|
||||
'hostnamectl status | grep "Static hostname" | cut -d: -f2'
|
||||
).strip()
|
||||
)
|
||||
|
||||
assert "${fqdn}" == machine.succeed("getent hosts ${hostName} | awk '{print $2}'").strip()
|
||||
'';
|
||||
# 127.0.0.1 and ::1 should resolve back to "localhost":
|
||||
assert (
|
||||
"localhost" == machine.succeed("getent hosts 127.0.0.1 | awk '{print $2}'").strip()
|
||||
)
|
||||
assert "localhost" == machine.succeed("getent hosts ::1 | awk '{print $2}'").strip()
|
||||
|
||||
# 127.0.0.2 should resolve back to the FQDN and hostname:
|
||||
fqdn_and_host_name = "${optionalString (domain != null) "${hostName}.${domain} "}${hostName}"
|
||||
assert (
|
||||
fqdn_and_host_name
|
||||
== machine.succeed("getent hosts 127.0.0.2 | awk '{print $2,$3}'").strip()
|
||||
)
|
||||
|
||||
assert "${fqdn}" == machine.succeed("getent hosts ${hostName} | awk '{print $2}'").strip()
|
||||
'';
|
||||
};
|
||||
|
||||
in
|
||||
|
|
|
@ -4,125 +4,141 @@
|
|||
# 3. jenkins service not started on slave node
|
||||
# 4. declarative jobs can be added and removed
|
||||
|
||||
import ./make-test-python.nix ({ pkgs, ...} : {
|
||||
name = "jenkins";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ bjornfor coconnor domenkozar ];
|
||||
};
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "jenkins";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [
|
||||
bjornfor
|
||||
coconnor
|
||||
domenkozar
|
||||
];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
nodes = {
|
||||
|
||||
master =
|
||||
{ ... }:
|
||||
{ services.jenkins = {
|
||||
enable = true;
|
||||
jobBuilder = {
|
||||
master =
|
||||
{ ... }:
|
||||
{
|
||||
services.jenkins = {
|
||||
enable = true;
|
||||
nixJobs = [
|
||||
{ job = {
|
||||
name = "job-1";
|
||||
builders = [
|
||||
{ shell = ''
|
||||
echo "Running job-1"
|
||||
'';
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
jobBuilder = {
|
||||
enable = true;
|
||||
nixJobs = [
|
||||
{
|
||||
job = {
|
||||
name = "job-1";
|
||||
builders = [
|
||||
{
|
||||
shell = ''
|
||||
echo "Running job-1"
|
||||
'';
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
|
||||
{ job = {
|
||||
name = "folder-1";
|
||||
project-type = "folder";
|
||||
};
|
||||
}
|
||||
{
|
||||
job = {
|
||||
name = "folder-1";
|
||||
project-type = "folder";
|
||||
};
|
||||
}
|
||||
|
||||
{ job = {
|
||||
name = "folder-1/job-2";
|
||||
builders = [
|
||||
{ shell = ''
|
||||
echo "Running job-2"
|
||||
'';
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
];
|
||||
{
|
||||
job = {
|
||||
name = "folder-1/job-2";
|
||||
builders = [
|
||||
{
|
||||
shell = ''
|
||||
echo "Running job-2"
|
||||
'';
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
specialisation.noJenkinsJobs.configuration = {
|
||||
services.jenkins.jobBuilder.nixJobs = pkgs.lib.mkForce [ ];
|
||||
};
|
||||
|
||||
# should have no effect
|
||||
services.jenkinsSlave.enable = true;
|
||||
|
||||
users.users.jenkins.extraGroups = [ "users" ];
|
||||
|
||||
systemd.services.jenkins.serviceConfig.TimeoutStartSec = "6min";
|
||||
|
||||
# Increase disk space to prevent this issue:
|
||||
#
|
||||
# WARNING h.n.DiskSpaceMonitorDescriptor#markNodeOfflineOrOnline: Making Built-In Node offline temporarily due to the lack of disk space
|
||||
virtualisation.diskSize = 2 * 1024;
|
||||
};
|
||||
|
||||
specialisation.noJenkinsJobs.configuration = {
|
||||
services.jenkins.jobBuilder.nixJobs = pkgs.lib.mkForce [];
|
||||
slave =
|
||||
{ ... }:
|
||||
{
|
||||
services.jenkinsSlave.enable = true;
|
||||
|
||||
users.users.jenkins.extraGroups = [ "users" ];
|
||||
};
|
||||
|
||||
# should have no effect
|
||||
services.jenkinsSlave.enable = true;
|
||||
};
|
||||
|
||||
users.users.jenkins.extraGroups = [ "users" ];
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
configWithoutJobs = "${nodes.master.system.build.toplevel}/specialisation/noJenkinsJobs";
|
||||
jenkinsPort = nodes.master.services.jenkins.port;
|
||||
jenkinsUrl = "http://localhost:${toString jenkinsPort}";
|
||||
in
|
||||
''
|
||||
start_all()
|
||||
|
||||
systemd.services.jenkins.serviceConfig.TimeoutStartSec = "6min";
|
||||
master.wait_for_unit("default.target")
|
||||
|
||||
# Increase disk space to prevent this issue:
|
||||
#
|
||||
# WARNING h.n.DiskSpaceMonitorDescriptor#markNodeOfflineOrOnline: Making Built-In Node offline temporarily due to the lack of disk space
|
||||
virtualisation.diskSize = 2 * 1024;
|
||||
};
|
||||
assert "Authentication required" in master.succeed("curl http://localhost:8080")
|
||||
|
||||
slave =
|
||||
{ ... }:
|
||||
{ services.jenkinsSlave.enable = true;
|
||||
for host in master, slave:
|
||||
groups = host.succeed("sudo -u jenkins groups")
|
||||
assert "jenkins" in groups
|
||||
assert "users" in groups
|
||||
|
||||
users.users.jenkins.extraGroups = [ "users" ];
|
||||
};
|
||||
slave.fail("systemctl is-enabled jenkins.service")
|
||||
|
||||
};
|
||||
slave.succeed("java -fullversion")
|
||||
|
||||
testScript = { nodes, ... }:
|
||||
let
|
||||
configWithoutJobs = "${nodes.master.system.build.toplevel}/specialisation/noJenkinsJobs";
|
||||
jenkinsPort = nodes.master.services.jenkins.port;
|
||||
jenkinsUrl = "http://localhost:${toString jenkinsPort}";
|
||||
in ''
|
||||
start_all()
|
||||
with subtest("jobs are declarative"):
|
||||
# Check that jobs are created on disk.
|
||||
master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/job-1/config.xml")
|
||||
master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/folder-1/config.xml")
|
||||
master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/folder-1/jobs/job-2/config.xml")
|
||||
|
||||
master.wait_for_unit("default.target")
|
||||
# Verify that jenkins also sees the jobs.
|
||||
out = master.succeed("${pkgs.jenkins}/bin/jenkins-cli -s ${jenkinsUrl} -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword) list-jobs")
|
||||
jobs = [x.strip() for x in out.splitlines()]
|
||||
# Seeing jobs inside folders requires the Folders plugin
|
||||
# (https://plugins.jenkins.io/cloudbees-folder/), which we don't have
|
||||
# in this vanilla jenkins install, so limit ourself to non-folder jobs.
|
||||
assert jobs == ['job-1'], f"jobs != ['job-1']: {jobs}"
|
||||
|
||||
assert "Authentication required" in master.succeed("curl http://localhost:8080")
|
||||
master.succeed(
|
||||
"${configWithoutJobs}/bin/switch-to-configuration test >&2"
|
||||
)
|
||||
|
||||
for host in master, slave:
|
||||
groups = host.succeed("sudo -u jenkins groups")
|
||||
assert "jenkins" in groups
|
||||
assert "users" in groups
|
||||
# Check that jobs are removed from disk.
|
||||
master.wait_until_fails("test -f /var/lib/jenkins/jobs/job-1/config.xml")
|
||||
master.wait_until_fails("test -f /var/lib/jenkins/jobs/folder-1/config.xml")
|
||||
master.wait_until_fails("test -f /var/lib/jenkins/jobs/folder-1/jobs/job-2/config.xml")
|
||||
|
||||
slave.fail("systemctl is-enabled jenkins.service")
|
||||
|
||||
slave.succeed("java -fullversion")
|
||||
|
||||
with subtest("jobs are declarative"):
|
||||
# Check that jobs are created on disk.
|
||||
master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/job-1/config.xml")
|
||||
master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/folder-1/config.xml")
|
||||
master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/folder-1/jobs/job-2/config.xml")
|
||||
|
||||
# Verify that jenkins also sees the jobs.
|
||||
out = master.succeed("${pkgs.jenkins}/bin/jenkins-cli -s ${jenkinsUrl} -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword) list-jobs")
|
||||
jobs = [x.strip() for x in out.splitlines()]
|
||||
# Seeing jobs inside folders requires the Folders plugin
|
||||
# (https://plugins.jenkins.io/cloudbees-folder/), which we don't have
|
||||
# in this vanilla jenkins install, so limit ourself to non-folder jobs.
|
||||
assert jobs == ['job-1'], f"jobs != ['job-1']: {jobs}"
|
||||
|
||||
master.succeed(
|
||||
"${configWithoutJobs}/bin/switch-to-configuration test >&2"
|
||||
)
|
||||
|
||||
# Check that jobs are removed from disk.
|
||||
master.wait_until_fails("test -f /var/lib/jenkins/jobs/job-1/config.xml")
|
||||
master.wait_until_fails("test -f /var/lib/jenkins/jobs/folder-1/config.xml")
|
||||
master.wait_until_fails("test -f /var/lib/jenkins/jobs/folder-1/jobs/job-2/config.xml")
|
||||
|
||||
# Verify that jenkins also sees the jobs as removed.
|
||||
out = master.succeed("${pkgs.jenkins}/bin/jenkins-cli -s ${jenkinsUrl} -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword) list-jobs")
|
||||
jobs = [x.strip() for x in out.splitlines()]
|
||||
assert jobs == [], f"jobs != []: {jobs}"
|
||||
'';
|
||||
})
|
||||
# Verify that jenkins also sees the jobs as removed.
|
||||
out = master.succeed("${pkgs.jenkins}/bin/jenkins-cli -s ${jenkinsUrl} -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword) list-jobs")
|
||||
jobs = [x.strip() for x in out.splitlines()]
|
||||
assert jobs == [], f"jobs != []: {jobs}"
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,111 +1,138 @@
|
|||
{ system ? builtins.currentSystem,
|
||||
config ? {},
|
||||
pkgs ? import ../.. { inherit system config; }
|
||||
{
|
||||
system ? builtins.currentSystem,
|
||||
config ? { },
|
||||
pkgs ? import ../.. { inherit system config; },
|
||||
}:
|
||||
|
||||
with pkgs.lib;
|
||||
|
||||
let
|
||||
makeKafkaTest = name: { kafkaPackage, mode ? "kraft" }: (import ./make-test-python.nix ({
|
||||
inherit name;
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ nequissimus ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
kafka = { ... }: {
|
||||
services.apache-kafka = mkMerge [
|
||||
({
|
||||
enable = true;
|
||||
package = kafkaPackage;
|
||||
settings = {
|
||||
"offsets.topic.replication.factor" = 1;
|
||||
"log.dirs" = [
|
||||
"/var/lib/kafka/logdir1"
|
||||
"/var/lib/kafka/logdir2"
|
||||
];
|
||||
};
|
||||
})
|
||||
(mkIf (mode == "zookeeper") {
|
||||
settings = {
|
||||
"zookeeper.session.timeout.ms" = 600000;
|
||||
"zookeeper.connect" = [ "zookeeper1:2181" ];
|
||||
};
|
||||
})
|
||||
(mkIf (mode == "kraft") {
|
||||
clusterId = "ak2fIHr4S8WWarOF_ODD0g";
|
||||
formatLogDirs = true;
|
||||
settings = {
|
||||
"node.id" = 1;
|
||||
"process.roles" = [
|
||||
"broker"
|
||||
"controller"
|
||||
];
|
||||
"listeners" = [
|
||||
"PLAINTEXT://:9092"
|
||||
"CONTROLLER://:9093"
|
||||
];
|
||||
"listener.security.protocol.map" = [
|
||||
"PLAINTEXT:PLAINTEXT"
|
||||
"CONTROLLER:PLAINTEXT"
|
||||
];
|
||||
"controller.quorum.voters" = [
|
||||
"1@kafka:9093"
|
||||
];
|
||||
"controller.listener.names" = [ "CONTROLLER" ];
|
||||
};
|
||||
})
|
||||
];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 9092 9093 ];
|
||||
# i686 tests: qemu-system-i386 can simulate max 2047MB RAM (not 2048)
|
||||
virtualisation.memorySize = 2047;
|
||||
makeKafkaTest =
|
||||
name:
|
||||
{
|
||||
kafkaPackage,
|
||||
mode ? "kraft",
|
||||
}:
|
||||
(import ./make-test-python.nix ({
|
||||
inherit name;
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ nequissimus ];
|
||||
};
|
||||
} // optionalAttrs (mode == "zookeeper") {
|
||||
zookeeper1 = { ... }: {
|
||||
services.zookeeper = {
|
||||
enable = true;
|
||||
|
||||
nodes =
|
||||
{
|
||||
kafka =
|
||||
{ ... }:
|
||||
{
|
||||
services.apache-kafka = mkMerge [
|
||||
({
|
||||
enable = true;
|
||||
package = kafkaPackage;
|
||||
settings = {
|
||||
"offsets.topic.replication.factor" = 1;
|
||||
"log.dirs" = [
|
||||
"/var/lib/kafka/logdir1"
|
||||
"/var/lib/kafka/logdir2"
|
||||
];
|
||||
};
|
||||
})
|
||||
(mkIf (mode == "zookeeper") {
|
||||
settings = {
|
||||
"zookeeper.session.timeout.ms" = 600000;
|
||||
"zookeeper.connect" = [ "zookeeper1:2181" ];
|
||||
};
|
||||
})
|
||||
(mkIf (mode == "kraft") {
|
||||
clusterId = "ak2fIHr4S8WWarOF_ODD0g";
|
||||
formatLogDirs = true;
|
||||
settings = {
|
||||
"node.id" = 1;
|
||||
"process.roles" = [
|
||||
"broker"
|
||||
"controller"
|
||||
];
|
||||
"listeners" = [
|
||||
"PLAINTEXT://:9092"
|
||||
"CONTROLLER://:9093"
|
||||
];
|
||||
"listener.security.protocol.map" = [
|
||||
"PLAINTEXT:PLAINTEXT"
|
||||
"CONTROLLER:PLAINTEXT"
|
||||
];
|
||||
"controller.quorum.voters" = [
|
||||
"1@kafka:9093"
|
||||
];
|
||||
"controller.listener.names" = [ "CONTROLLER" ];
|
||||
};
|
||||
})
|
||||
];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
9092
|
||||
9093
|
||||
];
|
||||
# i686 tests: qemu-system-i386 can simulate max 2047MB RAM (not 2048)
|
||||
virtualisation.memorySize = 2047;
|
||||
};
|
||||
}
|
||||
// optionalAttrs (mode == "zookeeper") {
|
||||
zookeeper1 =
|
||||
{ ... }:
|
||||
{
|
||||
services.zookeeper = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 2181 ];
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 2181 ];
|
||||
};
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
${optionalString (mode == "zookeeper") ''
|
||||
zookeeper1.wait_for_unit("default.target")
|
||||
zookeeper1.wait_for_unit("zookeeper.service")
|
||||
zookeeper1.wait_for_open_port(2181)
|
||||
''}
|
||||
|
||||
${optionalString (mode == "zookeeper") ''
|
||||
zookeeper1.wait_for_unit("default.target")
|
||||
zookeeper1.wait_for_unit("zookeeper.service")
|
||||
zookeeper1.wait_for_open_port(2181)
|
||||
''}
|
||||
kafka.wait_for_unit("default.target")
|
||||
kafka.wait_for_unit("apache-kafka.service")
|
||||
kafka.wait_for_open_port(9092)
|
||||
|
||||
kafka.wait_for_unit("default.target")
|
||||
kafka.wait_for_unit("apache-kafka.service")
|
||||
kafka.wait_for_open_port(9092)
|
||||
kafka.wait_until_succeeds(
|
||||
"${kafkaPackage}/bin/kafka-topics.sh --create "
|
||||
+ "--bootstrap-server localhost:9092 --partitions 1 "
|
||||
+ "--replication-factor 1 --topic testtopic"
|
||||
)
|
||||
kafka.succeed(
|
||||
"echo 'test 1' | "
|
||||
+ "${kafkaPackage}/bin/kafka-console-producer.sh "
|
||||
+ "--bootstrap-server localhost:9092 --topic testtopic"
|
||||
)
|
||||
assert "test 1" in kafka.succeed(
|
||||
"${kafkaPackage}/bin/kafka-console-consumer.sh "
|
||||
+ "--bootstrap-server localhost:9092 --topic testtopic "
|
||||
+ "--from-beginning --max-messages 1"
|
||||
)
|
||||
'';
|
||||
}) { inherit system; });
|
||||
|
||||
kafka.wait_until_succeeds(
|
||||
"${kafkaPackage}/bin/kafka-topics.sh --create "
|
||||
+ "--bootstrap-server localhost:9092 --partitions 1 "
|
||||
+ "--replication-factor 1 --topic testtopic"
|
||||
)
|
||||
kafka.succeed(
|
||||
"echo 'test 1' | "
|
||||
+ "${kafkaPackage}/bin/kafka-console-producer.sh "
|
||||
+ "--bootstrap-server localhost:9092 --topic testtopic"
|
||||
)
|
||||
assert "test 1" in kafka.succeed(
|
||||
"${kafkaPackage}/bin/kafka-console-consumer.sh "
|
||||
+ "--bootstrap-server localhost:9092 --topic testtopic "
|
||||
+ "--from-beginning --max-messages 1"
|
||||
)
|
||||
'';
|
||||
}) { inherit system; });
|
||||
|
||||
in with pkgs; {
|
||||
kafka_3_7 = makeKafkaTest "kafka_3_7" { kafkaPackage = apacheKafka_3_7; mode = "zookeeper"; };
|
||||
kafka_3_8 = makeKafkaTest "kafka_3_8" { kafkaPackage = apacheKafka_3_8; mode = "zookeeper"; };
|
||||
kafka_3_9 = makeKafkaTest "kafka_3_9" { kafkaPackage = apacheKafka_3_9; mode = "zookeeper"; };
|
||||
in
|
||||
with pkgs;
|
||||
{
|
||||
kafka_3_7 = makeKafkaTest "kafka_3_7" {
|
||||
kafkaPackage = apacheKafka_3_7;
|
||||
mode = "zookeeper";
|
||||
};
|
||||
kafka_3_8 = makeKafkaTest "kafka_3_8" {
|
||||
kafkaPackage = apacheKafka_3_8;
|
||||
mode = "zookeeper";
|
||||
};
|
||||
kafka_3_9 = makeKafkaTest "kafka_3_9" {
|
||||
kafkaPackage = apacheKafka_3_9;
|
||||
mode = "zookeeper";
|
||||
};
|
||||
kafka_4_0 = makeKafkaTest "kafka_4_0" { kafkaPackage = apacheKafka_4_0; };
|
||||
kafka = makeKafkaTest "kafka" { kafkaPackage = apacheKafka; };
|
||||
}
|
||||
|
|
|
@ -1,43 +1,50 @@
|
|||
import ./make-test-python.nix ({ pkgs, lib, ... }: {
|
||||
name = "keepalived";
|
||||
meta.maintainers = [ lib.maintainers.raitobezarius ];
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "keepalived";
|
||||
meta.maintainers = [ lib.maintainers.raitobezarius ];
|
||||
|
||||
nodes = {
|
||||
node1 = { pkgs, ... }: {
|
||||
services.keepalived.enable = true;
|
||||
services.keepalived.openFirewall = true;
|
||||
services.keepalived.vrrpInstances.test = {
|
||||
interface = "eth1";
|
||||
state = "MASTER";
|
||||
priority = 50;
|
||||
virtualIps = [{ addr = "192.168.1.200"; }];
|
||||
virtualRouterId = 1;
|
||||
};
|
||||
environment.systemPackages = [ pkgs.tcpdump ];
|
||||
nodes = {
|
||||
node1 =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.keepalived.enable = true;
|
||||
services.keepalived.openFirewall = true;
|
||||
services.keepalived.vrrpInstances.test = {
|
||||
interface = "eth1";
|
||||
state = "MASTER";
|
||||
priority = 50;
|
||||
virtualIps = [ { addr = "192.168.1.200"; } ];
|
||||
virtualRouterId = 1;
|
||||
};
|
||||
environment.systemPackages = [ pkgs.tcpdump ];
|
||||
};
|
||||
node2 =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.keepalived.enable = true;
|
||||
services.keepalived.openFirewall = true;
|
||||
services.keepalived.vrrpInstances.test = {
|
||||
interface = "eth1";
|
||||
state = "MASTER";
|
||||
priority = 100;
|
||||
virtualIps = [ { addr = "192.168.1.200"; } ];
|
||||
virtualRouterId = 1;
|
||||
};
|
||||
environment.systemPackages = [ pkgs.tcpdump ];
|
||||
};
|
||||
};
|
||||
node2 = { pkgs, ... }: {
|
||||
services.keepalived.enable = true;
|
||||
services.keepalived.openFirewall = true;
|
||||
services.keepalived.vrrpInstances.test = {
|
||||
interface = "eth1";
|
||||
state = "MASTER";
|
||||
priority = 100;
|
||||
virtualIps = [{ addr = "192.168.1.200"; }];
|
||||
virtualRouterId = 1;
|
||||
};
|
||||
environment.systemPackages = [ pkgs.tcpdump ];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
# wait for boot time delay to pass
|
||||
for node in [node1, node2]:
|
||||
node.wait_until_succeeds(
|
||||
"systemctl show -p LastTriggerUSecMonotonic keepalived-boot-delay.timer | grep -vq 'LastTriggerUSecMonotonic=0'"
|
||||
)
|
||||
node.wait_for_unit("keepalived")
|
||||
node2.wait_until_succeeds("ip addr show dev eth1 | grep -q 192.168.1.200")
|
||||
node1.fail("ip addr show dev eth1 | grep -q 192.168.1.200")
|
||||
node1.succeed("ping -c1 192.168.1.200")
|
||||
'';
|
||||
})
|
||||
testScript = ''
|
||||
# wait for boot time delay to pass
|
||||
for node in [node1, node2]:
|
||||
node.wait_until_succeeds(
|
||||
"systemctl show -p LastTriggerUSecMonotonic keepalived-boot-delay.timer | grep -vq 'LastTriggerUSecMonotonic=0'"
|
||||
)
|
||||
node.wait_for_unit("keepalived")
|
||||
node2.wait_until_succeeds("ip addr show dev eth1 | grep -q 192.168.1.200")
|
||||
node1.fail("ip addr show dev eth1 | grep -q 192.168.1.200")
|
||||
node1.succeed("ping -c1 192.168.1.200")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,20 +1,23 @@
|
|||
import ./make-test-python.nix ({ lib, ... }:
|
||||
import ./make-test-python.nix (
|
||||
{ lib, ... }:
|
||||
|
||||
{
|
||||
name = "komga";
|
||||
meta.maintainers = with lib.maintainers; [ govanify ];
|
||||
{
|
||||
name = "komga";
|
||||
meta.maintainers = with lib.maintainers; [ govanify ];
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{ services.komga = {
|
||||
enable = true;
|
||||
settings.server.port = 1234;
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.komga = {
|
||||
enable = true;
|
||||
settings.server.port = 1234;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("komga.service")
|
||||
machine.wait_for_open_port(1234)
|
||||
machine.succeed("curl --fail http://localhost:1234/")
|
||||
'';
|
||||
})
|
||||
testScript = ''
|
||||
machine.wait_for_unit("komga.service")
|
||||
machine.wait_for_open_port(1234)
|
||||
machine.succeed("curl --fail http://localhost:1234/")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,34 +1,55 @@
|
|||
{ system ? builtins.currentSystem, pkgs ? import ../../.. { inherit system; } }:
|
||||
{
|
||||
system ? builtins.currentSystem,
|
||||
pkgs ? import ../../.. { inherit system; },
|
||||
}:
|
||||
with import ./base.nix { inherit system; };
|
||||
let
|
||||
domain = "my.zyx";
|
||||
|
||||
redisPod = pkgs.writeText "redis-pod.json" (builtins.toJSON {
|
||||
kind = "Pod";
|
||||
apiVersion = "v1";
|
||||
metadata.name = "redis";
|
||||
metadata.labels.name = "redis";
|
||||
spec.containers = [{
|
||||
name = "redis";
|
||||
image = "redis";
|
||||
args = ["--bind" "0.0.0.0"];
|
||||
imagePullPolicy = "Never";
|
||||
ports = [{
|
||||
name = "redis-server";
|
||||
containerPort = 6379;
|
||||
}];
|
||||
}];
|
||||
});
|
||||
redisPod = pkgs.writeText "redis-pod.json" (
|
||||
builtins.toJSON {
|
||||
kind = "Pod";
|
||||
apiVersion = "v1";
|
||||
metadata.name = "redis";
|
||||
metadata.labels.name = "redis";
|
||||
spec.containers = [
|
||||
{
|
||||
name = "redis";
|
||||
image = "redis";
|
||||
args = [
|
||||
"--bind"
|
||||
"0.0.0.0"
|
||||
];
|
||||
imagePullPolicy = "Never";
|
||||
ports = [
|
||||
{
|
||||
name = "redis-server";
|
||||
containerPort = 6379;
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
);
|
||||
|
||||
redisService = pkgs.writeText "redis-service.json" (builtins.toJSON {
|
||||
kind = "Service";
|
||||
apiVersion = "v1";
|
||||
metadata.name = "redis";
|
||||
spec = {
|
||||
ports = [{port = 6379; targetPort = 6379;}];
|
||||
selector = {name = "redis";};
|
||||
};
|
||||
});
|
||||
redisService = pkgs.writeText "redis-service.json" (
|
||||
builtins.toJSON {
|
||||
kind = "Service";
|
||||
apiVersion = "v1";
|
||||
metadata.name = "redis";
|
||||
spec = {
|
||||
ports = [
|
||||
{
|
||||
port = 6379;
|
||||
targetPort = 6379;
|
||||
}
|
||||
];
|
||||
selector = {
|
||||
name = "redis";
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
|
||||
redisImage = pkgs.dockerTools.buildImage {
|
||||
name = "redis";
|
||||
|
@ -36,24 +57,31 @@ let
|
|||
copyToRoot = pkgs.buildEnv {
|
||||
name = "image-root";
|
||||
pathsToLink = [ "/bin" ];
|
||||
paths = [ pkgs.redis pkgs.bind.host ];
|
||||
paths = [
|
||||
pkgs.redis
|
||||
pkgs.bind.host
|
||||
];
|
||||
};
|
||||
config.Entrypoint = ["/bin/redis-server"];
|
||||
config.Entrypoint = [ "/bin/redis-server" ];
|
||||
};
|
||||
|
||||
probePod = pkgs.writeText "probe-pod.json" (builtins.toJSON {
|
||||
kind = "Pod";
|
||||
apiVersion = "v1";
|
||||
metadata.name = "probe";
|
||||
metadata.labels.name = "probe";
|
||||
spec.containers = [{
|
||||
name = "probe";
|
||||
image = "probe";
|
||||
args = [ "-f" ];
|
||||
tty = true;
|
||||
imagePullPolicy = "Never";
|
||||
}];
|
||||
});
|
||||
probePod = pkgs.writeText "probe-pod.json" (
|
||||
builtins.toJSON {
|
||||
kind = "Pod";
|
||||
apiVersion = "v1";
|
||||
metadata.name = "probe";
|
||||
metadata.labels.name = "probe";
|
||||
spec.containers = [
|
||||
{
|
||||
name = "probe";
|
||||
image = "probe";
|
||||
args = [ "-f" ];
|
||||
tty = true;
|
||||
imagePullPolicy = "Never";
|
||||
}
|
||||
];
|
||||
}
|
||||
);
|
||||
|
||||
probeImage = pkgs.dockerTools.buildImage {
|
||||
name = "probe";
|
||||
|
@ -61,18 +89,28 @@ let
|
|||
copyToRoot = pkgs.buildEnv {
|
||||
name = "image-root";
|
||||
pathsToLink = [ "/bin" ];
|
||||
paths = [ pkgs.bind.host pkgs.busybox ];
|
||||
paths = [
|
||||
pkgs.bind.host
|
||||
pkgs.busybox
|
||||
];
|
||||
};
|
||||
config.Entrypoint = ["/bin/tail"];
|
||||
config.Entrypoint = [ "/bin/tail" ];
|
||||
};
|
||||
|
||||
extraConfiguration = { config, pkgs, lib, ... }: {
|
||||
environment.systemPackages = [ pkgs.bind.host ];
|
||||
services.dnsmasq.enable = true;
|
||||
services.dnsmasq.settings.server = [
|
||||
"/cluster.local/${config.services.kubernetes.addons.dns.clusterIp}#53"
|
||||
];
|
||||
};
|
||||
extraConfiguration =
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.bind.host ];
|
||||
services.dnsmasq.enable = true;
|
||||
services.dnsmasq.settings.server = [
|
||||
"/cluster.local/${config.services.kubernetes.addons.dns.clusterIp}#53"
|
||||
];
|
||||
};
|
||||
|
||||
base = {
|
||||
name = "dns";
|
||||
|
@ -153,7 +191,8 @@ let
|
|||
machine1.succeed("kubectl exec probe -- /bin/host redis.default.svc.cluster.local")
|
||||
'';
|
||||
};
|
||||
in {
|
||||
in
|
||||
{
|
||||
singlenode = mkKubernetesSingleNodeTest (base // singleNodeTest);
|
||||
multinode = mkKubernetesMultiNodeTest (base // multiNodeTest);
|
||||
}
|
||||
|
|
|
@ -1,22 +1,30 @@
|
|||
import ./make-test-python.nix ({lib, ...}: {
|
||||
name = "mailhog";
|
||||
meta.maintainers = with lib.maintainers; [jojosch RTUnreal];
|
||||
import ./make-test-python.nix (
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "mailhog";
|
||||
meta.maintainers = with lib.maintainers; [
|
||||
jojosch
|
||||
RTUnreal
|
||||
];
|
||||
|
||||
nodes.machine = {pkgs, ...}: {
|
||||
services.mailhog.enable = true;
|
||||
};
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.mailhog.enable = true;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
machine.wait_for_unit("mailhog.service")
|
||||
machine.wait_for_open_port(1025)
|
||||
machine.wait_for_open_port(8025)
|
||||
# Test sendmail wrapper (this uses smtp, which tests the connection)
|
||||
machine.succeed('printf "To: root@example.com\r\n\r\nthis is the body of the email" | sendmail -t -i -f sender@example.com')
|
||||
res = machine.succeed(
|
||||
"curl --fail http://localhost:8025/api/v2/messages"
|
||||
)
|
||||
assert all(msg in res for msg in ["this is the body of the email", "sender@example.com", "root@example.com"])
|
||||
'';
|
||||
})
|
||||
machine.wait_for_unit("mailhog.service")
|
||||
machine.wait_for_open_port(1025)
|
||||
machine.wait_for_open_port(8025)
|
||||
# Test sendmail wrapper (this uses smtp, which tests the connection)
|
||||
machine.succeed('printf "To: root@example.com\r\n\r\nthis is the body of the email" | sendmail -t -i -f sender@example.com')
|
||||
res = machine.succeed(
|
||||
"curl --fail http://localhost:8025/api/v2/messages"
|
||||
)
|
||||
assert all(msg in res for msg in ["this is the body of the email", "sender@example.com", "root@example.com"])
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -2,39 +2,47 @@ let
|
|||
seed = "2151901553968352745";
|
||||
rcon-pass = "foobar";
|
||||
rcon-port = 43000;
|
||||
in import ./make-test-python.nix ({ pkgs, ... }: {
|
||||
name = "minecraft-server";
|
||||
meta = with pkgs.lib.maintainers; { maintainers = [ nequissimus ]; };
|
||||
|
||||
nodes.server = { ... }: {
|
||||
environment.systemPackages = [ pkgs.mcrcon ];
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
|
||||
services.minecraft-server = {
|
||||
declarative = true;
|
||||
enable = true;
|
||||
eula = true;
|
||||
serverProperties = {
|
||||
enable-rcon = true;
|
||||
level-seed = seed;
|
||||
level-type = "flat";
|
||||
generate-structures = false;
|
||||
online-mode = false;
|
||||
"rcon.password" = rcon-pass;
|
||||
"rcon.port" = rcon-port;
|
||||
};
|
||||
in
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "minecraft-server";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ nequissimus ];
|
||||
};
|
||||
|
||||
virtualisation.memorySize = 2047;
|
||||
};
|
||||
nodes.server =
|
||||
{ ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.mcrcon ];
|
||||
|
||||
testScript = ''
|
||||
server.wait_for_unit("minecraft-server")
|
||||
server.wait_for_open_port(${toString rcon-port})
|
||||
assert "${seed}" in server.succeed(
|
||||
"mcrcon -H localhost -P ${toString rcon-port} -p '${rcon-pass}' -c 'seed'"
|
||||
)
|
||||
server.succeed("systemctl stop minecraft-server")
|
||||
'';
|
||||
})
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
|
||||
services.minecraft-server = {
|
||||
declarative = true;
|
||||
enable = true;
|
||||
eula = true;
|
||||
serverProperties = {
|
||||
enable-rcon = true;
|
||||
level-seed = seed;
|
||||
level-type = "flat";
|
||||
generate-structures = false;
|
||||
online-mode = false;
|
||||
"rcon.password" = rcon-pass;
|
||||
"rcon.port" = rcon-port;
|
||||
};
|
||||
};
|
||||
|
||||
virtualisation.memorySize = 2047;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
server.wait_for_unit("minecraft-server")
|
||||
server.wait_for_open_port(${toString rcon-port})
|
||||
assert "${seed}" in server.succeed(
|
||||
"mcrcon -H localhost -P ${toString rcon-port} -p '${rcon-pass}' -c 'seed'"
|
||||
)
|
||||
server.succeed("systemctl stop minecraft-server")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,20 +1,20 @@
|
|||
import ./make-test-python.nix ({ pkgs, ... }:
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
tls-cert =
|
||||
pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
|
||||
openssl req \
|
||||
-x509 -newkey rsa:4096 -sha256 -days 365 \
|
||||
-nodes -out cert.pem -keyout key.pem \
|
||||
-subj '/CN=minio' -addext "subjectAltName=DNS:localhost"
|
||||
tls-cert = pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
|
||||
openssl req \
|
||||
-x509 -newkey rsa:4096 -sha256 -days 365 \
|
||||
-nodes -out cert.pem -keyout key.pem \
|
||||
-subj '/CN=minio' -addext "subjectAltName=DNS:localhost"
|
||||
|
||||
mkdir -p $out
|
||||
cp key.pem cert.pem $out
|
||||
'';
|
||||
mkdir -p $out
|
||||
cp key.pem cert.pem $out
|
||||
'';
|
||||
|
||||
accessKey = "BKIKJAA5BMMU2RHO6IBB";
|
||||
secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12";
|
||||
minioPythonScript = pkgs.writeScript "minio-test.py" ''
|
||||
#! ${pkgs.python3.withPackages(ps: [ ps.minio ])}/bin/python
|
||||
#! ${pkgs.python3.withPackages (ps: [ ps.minio ])}/bin/python
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
|
@ -53,19 +53,21 @@ import ./make-test-python.nix ({ pkgs, ... }:
|
|||
};
|
||||
|
||||
nodes = {
|
||||
machine = { pkgs, ... }: {
|
||||
services.minio = {
|
||||
enable = true;
|
||||
inherit rootCredentialsFile;
|
||||
machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.minio = {
|
||||
enable = true;
|
||||
inherit rootCredentialsFile;
|
||||
};
|
||||
environment.systemPackages = [ pkgs.minio-client ];
|
||||
|
||||
# Minio requires at least 1GiB of free disk space to run.
|
||||
virtualisation.diskSize = 4 * 1024;
|
||||
|
||||
# Minio pre allocates 2GiB or memory, reserve some more
|
||||
virtualisation.memorySize = 4096;
|
||||
};
|
||||
environment.systemPackages = [ pkgs.minio-client ];
|
||||
|
||||
# Minio requires at least 1GiB of free disk space to run.
|
||||
virtualisation.diskSize = 4 * 1024;
|
||||
|
||||
# Minio pre allocates 2GiB or memory, reserve some more
|
||||
virtualisation.memorySize = 4096;
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
|
@ -110,4 +112,5 @@ import ./make-test-python.nix ({ pkgs, ... }:
|
|||
|
||||
machine.shutdown()
|
||||
'';
|
||||
})
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,16 +1,25 @@
|
|||
# This test runs netdata and checks for data via apps.plugin
|
||||
|
||||
import ./make-test-python.nix ({ pkgs, ...} : {
|
||||
name = "netdata";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ cransom raitobezarius ];
|
||||
};
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "netdata";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [
|
||||
cransom
|
||||
raitobezarius
|
||||
];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
netdata =
|
||||
{ pkgs, ... }:
|
||||
nodes = {
|
||||
netdata =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [ curl jq netdata ];
|
||||
environment.systemPackages = with pkgs; [
|
||||
curl
|
||||
jq
|
||||
netdata
|
||||
];
|
||||
services.netdata = {
|
||||
enable = true;
|
||||
python.recommendedPythonPackages = true;
|
||||
|
@ -22,35 +31,36 @@ import ./make-test-python.nix ({ pkgs, ...} : {
|
|||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
netdata.wait_for_unit("netdata.service")
|
||||
netdata.wait_for_unit("netdata.service")
|
||||
|
||||
# wait for the service to listen before sending a request
|
||||
netdata.wait_for_open_port(19999)
|
||||
# wait for the service to listen before sending a request
|
||||
netdata.wait_for_open_port(19999)
|
||||
|
||||
# check if the netdata main page loads.
|
||||
netdata.succeed("curl --fail http://localhost:19999/")
|
||||
netdata.succeed("sleep 4")
|
||||
# check if the netdata main page loads.
|
||||
netdata.succeed("curl --fail http://localhost:19999/")
|
||||
netdata.succeed("sleep 4")
|
||||
|
||||
# check if netdata can read disk ops for root owned processes.
|
||||
# if > 0, successful. verifies both netdata working and
|
||||
# apps.plugin has elevated capabilities.
|
||||
url = "http://localhost:19999/api/v1/data?chart=user.root_disk_physical_io"
|
||||
filter = '[.data[range(10)][2]] | add | . < 0'
|
||||
cmd = f"curl -s {url} | jq -e '{filter}'"
|
||||
netdata.wait_until_succeeds(cmd)
|
||||
# check if netdata can read disk ops for root owned processes.
|
||||
# if > 0, successful. verifies both netdata working and
|
||||
# apps.plugin has elevated capabilities.
|
||||
url = "http://localhost:19999/api/v1/data?chart=user.root_disk_physical_io"
|
||||
filter = '[.data[range(10)][2]] | add | . < 0'
|
||||
cmd = f"curl -s {url} | jq -e '{filter}'"
|
||||
netdata.wait_until_succeeds(cmd)
|
||||
|
||||
# check if the control socket is available
|
||||
netdata.succeed("sudo netdatacli ping")
|
||||
# check if the control socket is available
|
||||
netdata.succeed("sudo netdatacli ping")
|
||||
|
||||
# check that custom groups in apps_groups.conf are used.
|
||||
# if > 0, successful. verifies that user-specified apps_group.conf
|
||||
# is used.
|
||||
url = "http://localhost:19999/api/v1/data?chart=app.netdata_test_cpu_utilization"
|
||||
filter = '[.data[range(10)][2]] | add | . > 0'
|
||||
cmd = f"curl -s {url} | jq -e '{filter}'"
|
||||
netdata.wait_until_succeeds(cmd, timeout=30)
|
||||
'';
|
||||
})
|
||||
# check that custom groups in apps_groups.conf are used.
|
||||
# if > 0, successful. verifies that user-specified apps_group.conf
|
||||
# is used.
|
||||
url = "http://localhost:19999/api/v1/data?chart=app.netdata_test_cpu_utilization"
|
||||
filter = '[.data[range(10)][2]] | add | . > 0'
|
||||
cmd = f"curl -s {url} | jq -e '{filter}'"
|
||||
netdata.wait_until_succeeds(cmd, timeout=30)
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
{ system ? builtins.currentSystem
|
||||
, config ? {}
|
||||
, pkgs ? import ../.. { inherit system config; }
|
||||
{
|
||||
system ? builtins.currentSystem,
|
||||
config ? { },
|
||||
pkgs ? import ../.. { inherit system config; },
|
||||
}:
|
||||
|
||||
with import ../../lib/testing-python.nix { inherit system pkgs; };
|
||||
|
@ -11,27 +12,29 @@ let
|
|||
# so using systemd-networkd for the router vm is fine in these tests.
|
||||
router = import ./router.nix { networkd = true; };
|
||||
qemu-common = import ../../lib/qemu-common.nix { inherit (pkgs) lib pkgs; };
|
||||
clientConfig = extraConfig: lib.recursiveUpdate {
|
||||
networking.useDHCP = false;
|
||||
clientConfig =
|
||||
extraConfig:
|
||||
lib.recursiveUpdate {
|
||||
networking.useDHCP = false;
|
||||
|
||||
# Make sure that only NetworkManager configures the interface
|
||||
networking.interfaces = lib.mkForce {
|
||||
eth1 = {};
|
||||
};
|
||||
networking.networkmanager = {
|
||||
enable = true;
|
||||
# this is needed so NM doesn't generate 'Wired Connection' profiles and instead uses the default one
|
||||
settings.main.no-auto-default = "*";
|
||||
ensureProfiles.profiles.default = {
|
||||
connection = {
|
||||
id = "default";
|
||||
type = "ethernet";
|
||||
interface-name = "eth1";
|
||||
autoconnect = true;
|
||||
# Make sure that only NetworkManager configures the interface
|
||||
networking.interfaces = lib.mkForce {
|
||||
eth1 = { };
|
||||
};
|
||||
networking.networkmanager = {
|
||||
enable = true;
|
||||
# this is needed so NM doesn't generate 'Wired Connection' profiles and instead uses the default one
|
||||
settings.main.no-auto-default = "*";
|
||||
ensureProfiles.profiles.default = {
|
||||
connection = {
|
||||
id = "default";
|
||||
type = "ethernet";
|
||||
interface-name = "eth1";
|
||||
autoconnect = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
} extraConfig;
|
||||
} extraConfig;
|
||||
testCases = {
|
||||
startup = {
|
||||
name = "startup";
|
||||
|
@ -139,12 +142,14 @@ let
|
|||
dispatcherScripts = {
|
||||
name = "dispatcherScripts";
|
||||
nodes.client = clientConfig {
|
||||
networking.networkmanager.dispatcherScripts = [{
|
||||
type = "pre-up";
|
||||
source = pkgs.writeText "testHook" ''
|
||||
touch /tmp/dispatcher-scripts-are-working
|
||||
'';
|
||||
}];
|
||||
networking.networkmanager.dispatcherScripts = [
|
||||
{
|
||||
type = "pre-up";
|
||||
source = pkgs.writeText "testHook" ''
|
||||
touch /tmp/dispatcher-scripts-are-working
|
||||
'';
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
@ -154,18 +159,20 @@ let
|
|||
};
|
||||
envsubst = {
|
||||
name = "envsubst";
|
||||
nodes.client = let
|
||||
# you should never write secrets in to your nixos configuration, please use tools like sops-nix or agenix
|
||||
secretFile = pkgs.writeText "my-secret.env" ''
|
||||
MY_SECRET_IP=fd00:1234:5678:1::23/64
|
||||
'';
|
||||
in clientConfig {
|
||||
networking.networkmanager.ensureProfiles.environmentFiles = [ secretFile ];
|
||||
networking.networkmanager.ensureProfiles.profiles.default = {
|
||||
ipv6.method = "manual";
|
||||
ipv6.addresses = "$MY_SECRET_IP";
|
||||
nodes.client =
|
||||
let
|
||||
# you should never write secrets in to your nixos configuration, please use tools like sops-nix or agenix
|
||||
secretFile = pkgs.writeText "my-secret.env" ''
|
||||
MY_SECRET_IP=fd00:1234:5678:1::23/64
|
||||
'';
|
||||
in
|
||||
clientConfig {
|
||||
networking.networkmanager.ensureProfiles.environmentFiles = [ secretFile ];
|
||||
networking.networkmanager.ensureProfiles.profiles.default = {
|
||||
ipv6.method = "manual";
|
||||
ipv6.addresses = "$MY_SECRET_IP";
|
||||
};
|
||||
};
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
client.wait_for_unit("NetworkManager.service")
|
||||
|
@ -174,10 +181,17 @@ let
|
|||
'';
|
||||
};
|
||||
};
|
||||
in lib.mapAttrs (lib.const (attrs: makeTest (attrs // {
|
||||
name = "${attrs.name}-Networking-NetworkManager";
|
||||
meta = {
|
||||
maintainers = [ ];
|
||||
};
|
||||
in
|
||||
lib.mapAttrs (lib.const (
|
||||
attrs:
|
||||
makeTest (
|
||||
attrs
|
||||
// {
|
||||
name = "${attrs.name}-Networking-NetworkManager";
|
||||
meta = {
|
||||
maintainers = [ ];
|
||||
};
|
||||
|
||||
}))) testCases
|
||||
}
|
||||
)
|
||||
)) testCases
|
||||
|
|
|
@ -1,108 +1,135 @@
|
|||
{ system ? builtins.currentSystem
|
||||
, config ? { }
|
||||
, pkgs ? import ../../.. { inherit system config; }
|
||||
{
|
||||
system ? builtins.currentSystem,
|
||||
config ? { },
|
||||
pkgs ? import ../../.. { inherit system config; },
|
||||
}:
|
||||
|
||||
with pkgs.lib;
|
||||
|
||||
let
|
||||
baseModule = { config, ... }: {
|
||||
imports = [
|
||||
{
|
||||
options.test-helpers = {
|
||||
rclone = mkOption { type = types.str; };
|
||||
upload-sample = mkOption { type = types.str; };
|
||||
check-sample = mkOption { type = types.str; };
|
||||
init = mkOption { type = types.str; default = ""; };
|
||||
extraTests = mkOption { type = types.either types.str (types.functionTo types.str); default = ""; };
|
||||
};
|
||||
options.adminuser = mkOption { type = types.str; };
|
||||
options.adminpass = mkOption { type = types.str; };
|
||||
}
|
||||
];
|
||||
|
||||
adminuser = "root";
|
||||
adminpass = "hunter2";
|
||||
|
||||
test-helpers.rclone = "${pkgs.writeShellScript "rclone" ''
|
||||
set -euo pipefail
|
||||
export PATH="${pkgs.rclone}/bin:$PATH"
|
||||
export RCLONE_CONFIG_NEXTCLOUD_TYPE=webdav
|
||||
export RCLONE_CONFIG_NEXTCLOUD_URL="http://nextcloud/remote.php/dav/files/${config.adminuser}"
|
||||
export RCLONE_CONFIG_NEXTCLOUD_VENDOR="nextcloud"
|
||||
export RCLONE_CONFIG_NEXTCLOUD_USER="${config.adminuser}"
|
||||
export RCLONE_CONFIG_NEXTCLOUD_PASS="$(rclone obscure ${config.adminpass})"
|
||||
exec "$@"
|
||||
''}";
|
||||
test-helpers.upload-sample = "${pkgs.writeShellScript "rclone-upload" ''
|
||||
<<<'hi' rclone rcat nextcloud:test-shared-file
|
||||
''}";
|
||||
test-helpers.check-sample = "${pkgs.writeShellScript "check-sample" ''
|
||||
set -e
|
||||
diff <(echo 'hi') <(rclone cat nextcloud:test-shared-file)
|
||||
''}";
|
||||
|
||||
nodes = {
|
||||
client = { ... }: {};
|
||||
nextcloud = { lib, ... }: {
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
services.nextcloud = {
|
||||
enable = true;
|
||||
hostName = "nextcloud";
|
||||
https = false;
|
||||
database.createLocally = lib.mkDefault true;
|
||||
config = {
|
||||
adminpassFile = "${pkgs.writeText "adminpass" config.adminpass}"; # Don't try this at home!
|
||||
baseModule =
|
||||
{ config, ... }:
|
||||
{
|
||||
imports = [
|
||||
{
|
||||
options.test-helpers = {
|
||||
rclone = mkOption { type = types.str; };
|
||||
upload-sample = mkOption { type = types.str; };
|
||||
check-sample = mkOption { type = types.str; };
|
||||
init = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
};
|
||||
extraTests = mkOption {
|
||||
type = types.either types.str (types.functionTo types.str);
|
||||
default = "";
|
||||
};
|
||||
};
|
||||
options.adminuser = mkOption { type = types.str; };
|
||||
options.adminpass = mkOption { type = types.str; };
|
||||
}
|
||||
];
|
||||
|
||||
adminuser = "root";
|
||||
adminpass = "hunter2";
|
||||
|
||||
test-helpers.rclone = "${pkgs.writeShellScript "rclone" ''
|
||||
set -euo pipefail
|
||||
export PATH="${pkgs.rclone}/bin:$PATH"
|
||||
export RCLONE_CONFIG_NEXTCLOUD_TYPE=webdav
|
||||
export RCLONE_CONFIG_NEXTCLOUD_URL="http://nextcloud/remote.php/dav/files/${config.adminuser}"
|
||||
export RCLONE_CONFIG_NEXTCLOUD_VENDOR="nextcloud"
|
||||
export RCLONE_CONFIG_NEXTCLOUD_USER="${config.adminuser}"
|
||||
export RCLONE_CONFIG_NEXTCLOUD_PASS="$(rclone obscure ${config.adminpass})"
|
||||
exec "$@"
|
||||
''}";
|
||||
test-helpers.upload-sample = "${pkgs.writeShellScript "rclone-upload" ''
|
||||
<<<'hi' rclone rcat nextcloud:test-shared-file
|
||||
''}";
|
||||
test-helpers.check-sample = "${pkgs.writeShellScript "check-sample" ''
|
||||
set -e
|
||||
diff <(echo 'hi') <(rclone cat nextcloud:test-shared-file)
|
||||
''}";
|
||||
|
||||
nodes = {
|
||||
client = { ... }: { };
|
||||
nextcloud =
|
||||
{ lib, ... }:
|
||||
{
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
services.nextcloud = {
|
||||
enable = true;
|
||||
hostName = "nextcloud";
|
||||
https = false;
|
||||
database.createLocally = lib.mkDefault true;
|
||||
config = {
|
||||
adminpassFile = "${pkgs.writeText "adminpass" config.adminpass}"; # Don't try this at home!
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
args@{ nodes, ... }:
|
||||
let
|
||||
inherit (config) test-helpers;
|
||||
in
|
||||
mkBefore ''
|
||||
nextcloud.start()
|
||||
client.start()
|
||||
nextcloud.wait_for_unit("multi-user.target")
|
||||
|
||||
${test-helpers.init}
|
||||
|
||||
with subtest("Ensure nextcloud-occ is working"):
|
||||
nextcloud.succeed("nextcloud-occ status")
|
||||
nextcloud.succeed("curl -sSf http://nextcloud/login")
|
||||
|
||||
with subtest("Upload/Download test"):
|
||||
nextcloud.succeed(
|
||||
"${test-helpers.rclone} ${test-helpers.upload-sample}"
|
||||
)
|
||||
client.wait_for_unit("multi-user.target")
|
||||
client.succeed(
|
||||
"${test-helpers.rclone} ${test-helpers.check-sample}"
|
||||
)
|
||||
|
||||
${
|
||||
if pkgs.lib.isFunction test-helpers.extraTests then
|
||||
test-helpers.extraTests args
|
||||
else
|
||||
test-helpers.extraTests
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
testScript = args@{ nodes, ... }: let
|
||||
inherit (config) test-helpers;
|
||||
in mkBefore ''
|
||||
nextcloud.start()
|
||||
client.start()
|
||||
nextcloud.wait_for_unit("multi-user.target")
|
||||
|
||||
${test-helpers.init}
|
||||
|
||||
with subtest("Ensure nextcloud-occ is working"):
|
||||
nextcloud.succeed("nextcloud-occ status")
|
||||
nextcloud.succeed("curl -sSf http://nextcloud/login")
|
||||
|
||||
with subtest("Upload/Download test"):
|
||||
nextcloud.succeed(
|
||||
"${test-helpers.rclone} ${test-helpers.upload-sample}"
|
||||
)
|
||||
client.wait_for_unit("multi-user.target")
|
||||
client.succeed(
|
||||
"${test-helpers.rclone} ${test-helpers.check-sample}"
|
||||
)
|
||||
|
||||
${if pkgs.lib.isFunction test-helpers.extraTests then test-helpers.extraTests args else test-helpers.extraTests}
|
||||
'';
|
||||
};
|
||||
|
||||
genTests = version:
|
||||
genTests =
|
||||
version:
|
||||
let
|
||||
testBase.imports = [
|
||||
baseModule
|
||||
{
|
||||
nodes.nextcloud = { pkgs, ... }: {
|
||||
services.nextcloud.package = pkgs.${"nextcloud${toString version}"};
|
||||
};
|
||||
nodes.nextcloud =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.nextcloud.package = pkgs.${"nextcloud${toString version}"};
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
callNextcloudTest = path:
|
||||
callNextcloudTest =
|
||||
path:
|
||||
let
|
||||
name = "${removeSuffix ".nix" (baseNameOf path)}${toString version}";
|
||||
in nameValuePair name (import path {
|
||||
inherit system pkgs testBase;
|
||||
name = "nextcloud-${name}";
|
||||
});
|
||||
in map callNextcloudTest [
|
||||
in
|
||||
nameValuePair name (
|
||||
import path {
|
||||
inherit system pkgs testBase;
|
||||
name = "nextcloud-${name}";
|
||||
}
|
||||
);
|
||||
in
|
||||
map callNextcloudTest [
|
||||
./basic.nix
|
||||
./with-declarative-redis-and-secrets.nix
|
||||
./with-mysql-and-memcached.nix
|
||||
|
@ -110,4 +137,10 @@ let
|
|||
./with-objectstore.nix
|
||||
];
|
||||
in
|
||||
listToAttrs (concatMap genTests [ 29 30 31 ])
|
||||
listToAttrs (
|
||||
concatMap genTests [
|
||||
29
|
||||
30
|
||||
31
|
||||
]
|
||||
)
|
||||
|
|
|
@ -1,75 +1,88 @@
|
|||
import ./make-test-python.nix ({ pkgs, lib, withNg ? false, ... }: {
|
||||
name = "nixos-rebuild-install-bootloader";
|
||||
import ./make-test-python.nix (
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
withNg ? false,
|
||||
...
|
||||
}:
|
||||
{
|
||||
name = "nixos-rebuild-install-bootloader";
|
||||
|
||||
nodes = {
|
||||
machine = { lib, pkgs, ... }: {
|
||||
imports = [
|
||||
../modules/profiles/installation-device.nix
|
||||
../modules/profiles/base.nix
|
||||
];
|
||||
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = 1;
|
||||
};
|
||||
|
||||
system.includeBuildDependencies = true;
|
||||
system.rebuild.enableNg = withNg;
|
||||
|
||||
virtualisation = {
|
||||
cores = 2;
|
||||
memorySize = 2048;
|
||||
};
|
||||
|
||||
virtualisation.useBootLoader = true;
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
let
|
||||
configFile = pkgs.writeText "configuration.nix" /* nix */ ''
|
||||
{ lib, pkgs, ... }: {
|
||||
nodes = {
|
||||
machine =
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
./hardware-configuration.nix
|
||||
<nixpkgs/nixos/modules/testing/test-instrumentation.nix>
|
||||
../modules/profiles/installation-device.nix
|
||||
../modules/profiles/base.nix
|
||||
];
|
||||
|
||||
boot.loader.grub = {
|
||||
enable = true;
|
||||
device = "/dev/vda";
|
||||
forceInstall = true;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = 1;
|
||||
};
|
||||
|
||||
system.rebuild.enableNg = ${lib.boolToString withNg};
|
||||
documentation.enable = false;
|
||||
}
|
||||
system.includeBuildDependencies = true;
|
||||
system.rebuild.enableNg = withNg;
|
||||
|
||||
virtualisation = {
|
||||
cores = 2;
|
||||
memorySize = 2048;
|
||||
};
|
||||
|
||||
virtualisation.useBootLoader = true;
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
let
|
||||
configFile =
|
||||
pkgs.writeText "configuration.nix" # nix
|
||||
''
|
||||
{ lib, pkgs, ... }: {
|
||||
imports = [
|
||||
./hardware-configuration.nix
|
||||
<nixpkgs/nixos/modules/testing/test-instrumentation.nix>
|
||||
];
|
||||
|
||||
boot.loader.grub = {
|
||||
enable = true;
|
||||
device = "/dev/vda";
|
||||
forceInstall = true;
|
||||
};
|
||||
|
||||
system.rebuild.enableNg = ${lib.boolToString withNg};
|
||||
documentation.enable = false;
|
||||
}
|
||||
'';
|
||||
|
||||
in
|
||||
# python
|
||||
''
|
||||
machine.start()
|
||||
machine.succeed("udevadm settle")
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
|
||||
machine.succeed("nixos-generate-config")
|
||||
machine.copy_from_host(
|
||||
"${configFile}",
|
||||
"/etc/nixos/configuration.nix",
|
||||
)
|
||||
machine.succeed("nixos-rebuild switch")
|
||||
|
||||
# Need to run `nixos-rebuild` twice because the first run will install
|
||||
# GRUB anyway
|
||||
with subtest("Switch system again and install bootloader"):
|
||||
result = machine.succeed("nixos-rebuild switch --install-bootloader 2>&1")
|
||||
# install-grub2.pl messages
|
||||
assert "updating GRUB 2 menu..." in result
|
||||
assert "installing the GRUB 2 boot loader on /dev/vda..." in result
|
||||
# GRUB message
|
||||
assert "Installation finished. No error reported." in result
|
||||
# at this point we've tested regression #262724, but haven't tested the bootloader itself
|
||||
# TODO: figure out how to how to tell the test driver to start the bootloader instead of
|
||||
# booting into the kernel directly.
|
||||
'';
|
||||
|
||||
in
|
||||
/* python */ ''
|
||||
machine.start()
|
||||
machine.succeed("udevadm settle")
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
|
||||
machine.succeed("nixos-generate-config")
|
||||
machine.copy_from_host(
|
||||
"${configFile}",
|
||||
"/etc/nixos/configuration.nix",
|
||||
)
|
||||
machine.succeed("nixos-rebuild switch")
|
||||
|
||||
# Need to run `nixos-rebuild` twice because the first run will install
|
||||
# GRUB anyway
|
||||
with subtest("Switch system again and install bootloader"):
|
||||
result = machine.succeed("nixos-rebuild switch --install-bootloader 2>&1")
|
||||
# install-grub2.pl messages
|
||||
assert "updating GRUB 2 menu..." in result
|
||||
assert "installing the GRUB 2 boot loader on /dev/vda..." in result
|
||||
# GRUB message
|
||||
assert "Installation finished. No error reported." in result
|
||||
# at this point we've tested regression #262724, but haven't tested the bootloader itself
|
||||
# TODO: figure out how to how to tell the test driver to start the bootloader instead of
|
||||
# booting into the kernel directly.
|
||||
'';
|
||||
})
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,4 +1,10 @@
|
|||
{ hostPkgs, lib, withNg, ... }: {
|
||||
{
|
||||
hostPkgs,
|
||||
lib,
|
||||
withNg,
|
||||
...
|
||||
}:
|
||||
{
|
||||
name = "nixos-rebuild-specialisations";
|
||||
|
||||
# TODO: remove overlay from nixos/modules/profiles/installation-device.nix
|
||||
|
@ -6,82 +12,87 @@
|
|||
node.pkgsReadOnly = false;
|
||||
|
||||
nodes = {
|
||||
machine = { lib, pkgs, ... }: {
|
||||
imports = [
|
||||
../modules/profiles/installation-device.nix
|
||||
../modules/profiles/base.nix
|
||||
];
|
||||
machine =
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
../modules/profiles/installation-device.nix
|
||||
../modules/profiles/base.nix
|
||||
];
|
||||
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = 1;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = 1;
|
||||
};
|
||||
|
||||
system.includeBuildDependencies = true;
|
||||
|
||||
system.extraDependencies = [
|
||||
# Not part of the initial build apparently?
|
||||
pkgs.grub2
|
||||
];
|
||||
|
||||
system.rebuild.enableNg = withNg;
|
||||
system.switch.enable = true;
|
||||
|
||||
virtualisation = {
|
||||
cores = 2;
|
||||
memorySize = 4096;
|
||||
};
|
||||
};
|
||||
|
||||
system.includeBuildDependencies = true;
|
||||
|
||||
system.extraDependencies = [
|
||||
# Not part of the initial build apparently?
|
||||
pkgs.grub2
|
||||
];
|
||||
|
||||
system.rebuild.enableNg = withNg;
|
||||
system.switch.enable = true;
|
||||
|
||||
virtualisation = {
|
||||
cores = 2;
|
||||
memorySize = 4096;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
let
|
||||
configFile = hostPkgs.writeText "configuration.nix" /* nix */ ''
|
||||
{ lib, pkgs, ... }: {
|
||||
imports = [
|
||||
./hardware-configuration.nix
|
||||
<nixpkgs/nixos/modules/testing/test-instrumentation.nix>
|
||||
];
|
||||
|
||||
boot.loader.grub = {
|
||||
enable = true;
|
||||
device = "/dev/vda";
|
||||
forceInstall = true;
|
||||
};
|
||||
|
||||
documentation.enable = false;
|
||||
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellScriptBin "parent" "")
|
||||
];
|
||||
|
||||
system.rebuild.enableNg = ${lib.boolToString withNg};
|
||||
|
||||
specialisation.foo = {
|
||||
inheritParentConfig = true;
|
||||
|
||||
configuration = { ... }: {
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellScriptBin "foo" "")
|
||||
configFile =
|
||||
hostPkgs.writeText "configuration.nix" # nix
|
||||
''
|
||||
{ lib, pkgs, ... }: {
|
||||
imports = [
|
||||
./hardware-configuration.nix
|
||||
<nixpkgs/nixos/modules/testing/test-instrumentation.nix>
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
specialisation.bar = {
|
||||
inheritParentConfig = true;
|
||||
boot.loader.grub = {
|
||||
enable = true;
|
||||
device = "/dev/vda";
|
||||
forceInstall = true;
|
||||
};
|
||||
|
||||
documentation.enable = false;
|
||||
|
||||
configuration = { ... }: {
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellScriptBin "bar" "")
|
||||
(pkgs.writeShellScriptBin "parent" "")
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
'';
|
||||
|
||||
system.rebuild.enableNg = ${lib.boolToString withNg};
|
||||
|
||||
specialisation.foo = {
|
||||
inheritParentConfig = true;
|
||||
|
||||
configuration = { ... }: {
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellScriptBin "foo" "")
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
specialisation.bar = {
|
||||
inheritParentConfig = true;
|
||||
|
||||
configuration = { ... }: {
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellScriptBin "bar" "")
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
'';
|
||||
|
||||
in
|
||||
/* python */ ''
|
||||
# python
|
||||
''
|
||||
machine.start()
|
||||
machine.succeed("udevadm settle")
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
|
|
|
@ -1,4 +1,10 @@
|
|||
{ hostPkgs, lib, withNg, ... }: {
|
||||
{
|
||||
hostPkgs,
|
||||
lib,
|
||||
withNg,
|
||||
...
|
||||
}:
|
||||
{
|
||||
name = "nixos-rebuild-target-host";
|
||||
|
||||
# TODO: remove overlay from nixos/modules/profiles/installation-device.nix
|
||||
|
@ -6,119 +12,145 @@
|
|||
node.pkgsReadOnly = false;
|
||||
|
||||
nodes = {
|
||||
deployer = { lib, pkgs, ... }: let
|
||||
inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
|
||||
in {
|
||||
imports = [ ../modules/profiles/installation-device.nix ];
|
||||
deployer =
|
||||
{ lib, pkgs, ... }:
|
||||
let
|
||||
inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
|
||||
in
|
||||
{
|
||||
imports = [ ../modules/profiles/installation-device.nix ];
|
||||
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = 1;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = 1;
|
||||
};
|
||||
|
||||
environment.systemPackages = [ pkgs.passh ];
|
||||
|
||||
system.includeBuildDependencies = true;
|
||||
|
||||
virtualisation = {
|
||||
cores = 2;
|
||||
memorySize = 2048;
|
||||
};
|
||||
|
||||
system.build.privateKey = snakeOilPrivateKey;
|
||||
system.build.publicKey = snakeOilPublicKey;
|
||||
# We don't switch on `deployer`, but we need it to have the dependencies
|
||||
# available, to be picked up by system.includeBuildDependencies above.
|
||||
system.rebuild.enableNg = withNg;
|
||||
system.switch.enable = true;
|
||||
};
|
||||
|
||||
environment.systemPackages = [ pkgs.passh ];
|
||||
target =
|
||||
{ nodes, lib, ... }:
|
||||
let
|
||||
targetConfig = {
|
||||
documentation.enable = false;
|
||||
services.openssh.enable = true;
|
||||
|
||||
system.includeBuildDependencies = true;
|
||||
users.users.root.openssh.authorizedKeys.keys = [ nodes.deployer.system.build.publicKey ];
|
||||
users.users.alice.openssh.authorizedKeys.keys = [ nodes.deployer.system.build.publicKey ];
|
||||
users.users.bob.openssh.authorizedKeys.keys = [ nodes.deployer.system.build.publicKey ];
|
||||
|
||||
virtualisation = {
|
||||
cores = 2;
|
||||
memorySize = 2048;
|
||||
};
|
||||
users.users.alice.extraGroups = [ "wheel" ];
|
||||
users.users.bob.extraGroups = [ "wheel" ];
|
||||
|
||||
system.build.privateKey = snakeOilPrivateKey;
|
||||
system.build.publicKey = snakeOilPublicKey;
|
||||
# We don't switch on `deployer`, but we need it to have the dependencies
|
||||
# available, to be picked up by system.includeBuildDependencies above.
|
||||
system.rebuild.enableNg = withNg;
|
||||
system.switch.enable = true;
|
||||
};
|
||||
# Disable sudo for root to ensure sudo isn't called without `--use-remote-sudo`
|
||||
security.sudo.extraRules = lib.mkForce [
|
||||
{
|
||||
groups = [ "wheel" ];
|
||||
commands = [ { command = "ALL"; } ];
|
||||
}
|
||||
{
|
||||
users = [ "alice" ];
|
||||
commands = [
|
||||
{
|
||||
command = "ALL";
|
||||
options = [ "NOPASSWD" ];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
target = { nodes, lib, ... }: let
|
||||
targetConfig = {
|
||||
documentation.enable = false;
|
||||
services.openssh.enable = true;
|
||||
nix.settings.trusted-users = [ "@wheel" ];
|
||||
};
|
||||
in
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
|
||||
users.users.root.openssh.authorizedKeys.keys = [ nodes.deployer.system.build.publicKey ];
|
||||
users.users.alice.openssh.authorizedKeys.keys = [ nodes.deployer.system.build.publicKey ];
|
||||
users.users.bob.openssh.authorizedKeys.keys = [ nodes.deployer.system.build.publicKey ];
|
||||
config = lib.mkMerge [
|
||||
targetConfig
|
||||
{
|
||||
system.build = {
|
||||
inherit targetConfig;
|
||||
};
|
||||
system.switch.enable = true;
|
||||
|
||||
users.users.alice.extraGroups = [ "wheel" ];
|
||||
users.users.bob.extraGroups = [ "wheel" ];
|
||||
|
||||
# Disable sudo for root to ensure sudo isn't called without `--use-remote-sudo`
|
||||
security.sudo.extraRules = lib.mkForce [
|
||||
{ groups = [ "wheel" ]; commands = [ { command = "ALL"; } ]; }
|
||||
{ users = [ "alice" ]; commands = [ { command = "ALL"; options = [ "NOPASSWD" ]; } ]; }
|
||||
networking.hostName = "target";
|
||||
}
|
||||
];
|
||||
|
||||
nix.settings.trusted-users = [ "@wheel" ];
|
||||
};
|
||||
in {
|
||||
imports = [ ./common/user-account.nix ];
|
||||
|
||||
config = lib.mkMerge [
|
||||
targetConfig
|
||||
{
|
||||
system.build = {
|
||||
inherit targetConfig;
|
||||
};
|
||||
system.switch.enable = true;
|
||||
|
||||
networking.hostName = "target";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = { nodes, ... }:
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
sshConfig = builtins.toFile "ssh.conf" ''
|
||||
UserKnownHostsFile=/dev/null
|
||||
StrictHostKeyChecking=no
|
||||
'';
|
||||
|
||||
targetConfigJSON = hostPkgs.writeText "target-configuration.json"
|
||||
(builtins.toJSON nodes.target.system.build.targetConfig);
|
||||
targetConfigJSON = hostPkgs.writeText "target-configuration.json" (
|
||||
builtins.toJSON nodes.target.system.build.targetConfig
|
||||
);
|
||||
|
||||
targetNetworkJSON = hostPkgs.writeText "target-network.json"
|
||||
(builtins.toJSON nodes.target.system.build.networkConfig);
|
||||
targetNetworkJSON = hostPkgs.writeText "target-network.json" (
|
||||
builtins.toJSON nodes.target.system.build.networkConfig
|
||||
);
|
||||
|
||||
configFile = hostname: hostPkgs.writeText "configuration.nix" /* nix */ ''
|
||||
{ lib, modulesPath, ... }: {
|
||||
imports = [
|
||||
(modulesPath + "/virtualisation/qemu-vm.nix")
|
||||
(modulesPath + "/testing/test-instrumentation.nix")
|
||||
(modulesPath + "/../tests/common/user-account.nix")
|
||||
(lib.modules.importJSON ./target-configuration.json)
|
||||
(lib.modules.importJSON ./target-network.json)
|
||||
./hardware-configuration.nix
|
||||
];
|
||||
configFile =
|
||||
hostname:
|
||||
hostPkgs.writeText "configuration.nix" # nix
|
||||
''
|
||||
{ lib, modulesPath, ... }: {
|
||||
imports = [
|
||||
(modulesPath + "/virtualisation/qemu-vm.nix")
|
||||
(modulesPath + "/testing/test-instrumentation.nix")
|
||||
(modulesPath + "/../tests/common/user-account.nix")
|
||||
(lib.modules.importJSON ./target-configuration.json)
|
||||
(lib.modules.importJSON ./target-network.json)
|
||||
./hardware-configuration.nix
|
||||
];
|
||||
|
||||
boot.loader.grub = {
|
||||
enable = true;
|
||||
device = "/dev/vda";
|
||||
forceInstall = true;
|
||||
};
|
||||
boot.loader.grub = {
|
||||
enable = true;
|
||||
device = "/dev/vda";
|
||||
forceInstall = true;
|
||||
};
|
||||
|
||||
system.rebuild.enableNg = ${lib.boolToString withNg};
|
||||
system.rebuild.enableNg = ${lib.boolToString withNg};
|
||||
|
||||
${lib.optionalString withNg /* nix */ ''
|
||||
nixpkgs.overlays = [
|
||||
(final: prev: {
|
||||
# Set tmpdir inside nixos-rebuild-ng to test
|
||||
# "Deploy works with very long TMPDIR"
|
||||
nixos-rebuild-ng = prev.nixos-rebuild-ng.override { withTmpdir = "/tmp"; };
|
||||
})
|
||||
];
|
||||
''}
|
||||
${lib.optionalString withNg # nix
|
||||
''
|
||||
nixpkgs.overlays = [
|
||||
(final: prev: {
|
||||
# Set tmpdir inside nixos-rebuild-ng to test
|
||||
# "Deploy works with very long TMPDIR"
|
||||
nixos-rebuild-ng = prev.nixos-rebuild-ng.override { withTmpdir = "/tmp"; };
|
||||
})
|
||||
];
|
||||
''
|
||||
}
|
||||
|
||||
# this will be asserted
|
||||
networking.hostName = "${hostname}";
|
||||
}
|
||||
'';
|
||||
# this will be asserted
|
||||
networking.hostName = "${hostname}";
|
||||
}
|
||||
'';
|
||||
in
|
||||
/* python */ ''
|
||||
# python
|
||||
''
|
||||
start_all()
|
||||
target.wait_for_open_port(22)
|
||||
|
||||
|
|
|
@ -1,281 +1,359 @@
|
|||
import ./make-test-python.nix ({ pkgs, ... }:
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
|
||||
let inherit (import ./ssh-keys.nix pkgs)
|
||||
snakeOilPrivateKey snakeOilPublicKey snakeOilEd25519PrivateKey snakeOilEd25519PublicKey;
|
||||
in {
|
||||
name = "openssh";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ aszlig ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
|
||||
server =
|
||||
{ ... }:
|
||||
|
||||
{
|
||||
services.openssh.enable = true;
|
||||
security.pam.services.sshd.limits =
|
||||
[ { domain = "*"; item = "memlock"; type = "-"; value = 1024; } ];
|
||||
users.users.root.openssh.authorizedKeys.keys = [
|
||||
snakeOilPublicKey
|
||||
];
|
||||
};
|
||||
|
||||
server-allowed-users =
|
||||
{ ... }:
|
||||
|
||||
{
|
||||
services.openssh = { enable = true; settings.AllowUsers = [ "alice" "bob" ]; };
|
||||
users.groups = { alice = { }; bob = { }; carol = { }; };
|
||||
users.users = {
|
||||
alice = { isNormalUser = true; group = "alice"; openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; };
|
||||
bob = { isNormalUser = true; group = "bob"; openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; };
|
||||
carol = { isNormalUser = true; group = "carol"; openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; };
|
||||
};
|
||||
};
|
||||
|
||||
server-lazy =
|
||||
{ ... }:
|
||||
|
||||
{
|
||||
services.openssh = { enable = true; startWhenNeeded = true; };
|
||||
security.pam.services.sshd.limits =
|
||||
[ { domain = "*"; item = "memlock"; type = "-"; value = 1024; } ];
|
||||
users.users.root.openssh.authorizedKeys.keys = [
|
||||
snakeOilPublicKey
|
||||
];
|
||||
};
|
||||
|
||||
server-lazy-socket = {
|
||||
virtualisation.vlans = [ 1 2 ];
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
startWhenNeeded = true;
|
||||
ports = [ 2222 ];
|
||||
listenAddresses = [ { addr = "0.0.0.0"; } ];
|
||||
};
|
||||
users.users.root.openssh.authorizedKeys.keys = [
|
||||
snakeOilPublicKey
|
||||
];
|
||||
let
|
||||
inherit (import ./ssh-keys.nix pkgs)
|
||||
snakeOilPrivateKey
|
||||
snakeOilPublicKey
|
||||
snakeOilEd25519PrivateKey
|
||||
snakeOilEd25519PublicKey
|
||||
;
|
||||
in
|
||||
{
|
||||
name = "openssh";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ aszlig ];
|
||||
};
|
||||
|
||||
server-localhost-only =
|
||||
{ ... }:
|
||||
nodes = {
|
||||
|
||||
{
|
||||
services.openssh = {
|
||||
enable = true; listenAddresses = [ { addr = "127.0.0.1"; port = 22; } ];
|
||||
};
|
||||
};
|
||||
server =
|
||||
{ ... }:
|
||||
|
||||
server-localhost-only-lazy =
|
||||
{ ... }:
|
||||
|
||||
{
|
||||
services.openssh = {
|
||||
enable = true; startWhenNeeded = true; listenAddresses = [ { addr = "127.0.0.1"; port = 22; } ];
|
||||
};
|
||||
};
|
||||
|
||||
server-match-rule =
|
||||
{ ... }:
|
||||
|
||||
{
|
||||
services.openssh = {
|
||||
enable = true; listenAddresses = [ { addr = "127.0.0.1"; port = 22; } { addr = "[::]"; port = 22; } ];
|
||||
extraConfig = ''
|
||||
# Combined test for two (predictable) Match criterias
|
||||
Match LocalAddress 127.0.0.1 LocalPort 22
|
||||
PermitRootLogin yes
|
||||
|
||||
# Separate tests for Match criterias
|
||||
Match User root
|
||||
PermitRootLogin yes
|
||||
Match Group root
|
||||
PermitRootLogin yes
|
||||
Match Host nohost.example
|
||||
PermitRootLogin yes
|
||||
Match LocalAddress 127.0.0.1
|
||||
PermitRootLogin yes
|
||||
Match LocalPort 22
|
||||
PermitRootLogin yes
|
||||
Match RDomain nohost.example
|
||||
PermitRootLogin yes
|
||||
Match Address 127.0.0.1
|
||||
PermitRootLogin yes
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
server-no-openssl =
|
||||
{ ... }:
|
||||
{
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
package = pkgs.opensshPackages.openssh.override {
|
||||
linkOpenssl = false;
|
||||
};
|
||||
hostKeys = [
|
||||
{ type = "ed25519"; path = "/etc/ssh/ssh_host_ed25519_key"; }
|
||||
{
|
||||
services.openssh.enable = true;
|
||||
security.pam.services.sshd.limits = [
|
||||
{
|
||||
domain = "*";
|
||||
item = "memlock";
|
||||
type = "-";
|
||||
value = 1024;
|
||||
}
|
||||
];
|
||||
settings = {
|
||||
# Since this test is against an OpenSSH-without-OpenSSL,
|
||||
# we have to override NixOS's defaults ciphers (which require OpenSSL)
|
||||
# and instead set these to null, which will mean OpenSSH uses its defaults.
|
||||
# Expectedly, OpenSSH's defaults don't require OpenSSL when it's compiled
|
||||
# without OpenSSL.
|
||||
Ciphers = null;
|
||||
KexAlgorithms = null;
|
||||
Macs = null;
|
||||
users.users.root.openssh.authorizedKeys.keys = [
|
||||
snakeOilPublicKey
|
||||
];
|
||||
};
|
||||
|
||||
server-allowed-users =
|
||||
{ ... }:
|
||||
|
||||
{
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings.AllowUsers = [
|
||||
"alice"
|
||||
"bob"
|
||||
];
|
||||
};
|
||||
users.groups = {
|
||||
alice = { };
|
||||
bob = { };
|
||||
carol = { };
|
||||
};
|
||||
users.users = {
|
||||
alice = {
|
||||
isNormalUser = true;
|
||||
group = "alice";
|
||||
openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
|
||||
};
|
||||
bob = {
|
||||
isNormalUser = true;
|
||||
group = "bob";
|
||||
openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
|
||||
};
|
||||
carol = {
|
||||
isNormalUser = true;
|
||||
group = "carol";
|
||||
openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
|
||||
};
|
||||
};
|
||||
};
|
||||
users.users.root.openssh.authorizedKeys.keys = [
|
||||
snakeOilEd25519PublicKey
|
||||
];
|
||||
};
|
||||
|
||||
server-no-pam =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
server-lazy =
|
||||
{ ... }:
|
||||
|
||||
{
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
startWhenNeeded = true;
|
||||
};
|
||||
security.pam.services.sshd.limits = [
|
||||
{
|
||||
domain = "*";
|
||||
item = "memlock";
|
||||
type = "-";
|
||||
value = 1024;
|
||||
}
|
||||
];
|
||||
users.users.root.openssh.authorizedKeys.keys = [
|
||||
snakeOilPublicKey
|
||||
];
|
||||
};
|
||||
|
||||
server-lazy-socket = {
|
||||
virtualisation.vlans = [
|
||||
1
|
||||
2
|
||||
];
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
package = pkgs.opensshPackages.openssh.override {
|
||||
withPAM = false;
|
||||
};
|
||||
settings = {
|
||||
UsePAM = false;
|
||||
};
|
||||
startWhenNeeded = true;
|
||||
ports = [ 2222 ];
|
||||
listenAddresses = [ { addr = "0.0.0.0"; } ];
|
||||
};
|
||||
users.users.root.openssh.authorizedKeys.keys = [
|
||||
snakeOilPublicKey
|
||||
];
|
||||
};
|
||||
|
||||
client =
|
||||
{ ... }: {
|
||||
virtualisation.vlans = [ 1 2 ];
|
||||
};
|
||||
server-localhost-only =
|
||||
{ ... }:
|
||||
|
||||
};
|
||||
{
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
listenAddresses = [
|
||||
{
|
||||
addr = "127.0.0.1";
|
||||
port = 22;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
server-localhost-only-lazy =
|
||||
{ ... }:
|
||||
|
||||
server.wait_for_unit("sshd", timeout=30)
|
||||
server_allowed_users.wait_for_unit("sshd", timeout=30)
|
||||
server_localhost_only.wait_for_unit("sshd", timeout=30)
|
||||
server_match_rule.wait_for_unit("sshd", timeout=30)
|
||||
server_no_openssl.wait_for_unit("sshd", timeout=30)
|
||||
server_no_pam.wait_for_unit("sshd", timeout=30)
|
||||
{
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
startWhenNeeded = true;
|
||||
listenAddresses = [
|
||||
{
|
||||
addr = "127.0.0.1";
|
||||
port = 22;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
server_lazy.wait_for_unit("sshd.socket", timeout=30)
|
||||
server_localhost_only_lazy.wait_for_unit("sshd.socket", timeout=30)
|
||||
server_lazy_socket.wait_for_unit("sshd.socket", timeout=30)
|
||||
server-match-rule =
|
||||
{ ... }:
|
||||
|
||||
with subtest("manual-authkey"):
|
||||
client.succeed(
|
||||
'${pkgs.openssh}/bin/ssh-keygen -t ed25519 -f /root/.ssh/id_ed25519 -N ""'
|
||||
)
|
||||
public_key = client.succeed(
|
||||
"${pkgs.openssh}/bin/ssh-keygen -y -f /root/.ssh/id_ed25519"
|
||||
)
|
||||
public_key = public_key.strip()
|
||||
client.succeed("chmod 600 /root/.ssh/id_ed25519")
|
||||
{
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
listenAddresses = [
|
||||
{
|
||||
addr = "127.0.0.1";
|
||||
port = 22;
|
||||
}
|
||||
{
|
||||
addr = "[::]";
|
||||
port = 22;
|
||||
}
|
||||
];
|
||||
extraConfig = ''
|
||||
# Combined test for two (predictable) Match criterias
|
||||
Match LocalAddress 127.0.0.1 LocalPort 22
|
||||
PermitRootLogin yes
|
||||
|
||||
server.succeed("echo '{}' > /root/.ssh/authorized_keys".format(public_key))
|
||||
server_lazy.succeed("echo '{}' > /root/.ssh/authorized_keys".format(public_key))
|
||||
# Separate tests for Match criterias
|
||||
Match User root
|
||||
PermitRootLogin yes
|
||||
Match Group root
|
||||
PermitRootLogin yes
|
||||
Match Host nohost.example
|
||||
PermitRootLogin yes
|
||||
Match LocalAddress 127.0.0.1
|
||||
PermitRootLogin yes
|
||||
Match LocalPort 22
|
||||
PermitRootLogin yes
|
||||
Match RDomain nohost.example
|
||||
PermitRootLogin yes
|
||||
Match Address 127.0.0.1
|
||||
PermitRootLogin yes
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
client.wait_for_unit("network.target")
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server 'echo hello world' >&2",
|
||||
timeout=30
|
||||
)
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server 'ulimit -l' | grep 1024",
|
||||
timeout=30
|
||||
)
|
||||
server-no-openssl =
|
||||
{ ... }:
|
||||
{
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
package = pkgs.opensshPackages.openssh.override {
|
||||
linkOpenssl = false;
|
||||
};
|
||||
hostKeys = [
|
||||
{
|
||||
type = "ed25519";
|
||||
path = "/etc/ssh/ssh_host_ed25519_key";
|
||||
}
|
||||
];
|
||||
settings = {
|
||||
# Since this test is against an OpenSSH-without-OpenSSL,
|
||||
# we have to override NixOS's defaults ciphers (which require OpenSSL)
|
||||
# and instead set these to null, which will mean OpenSSH uses its defaults.
|
||||
# Expectedly, OpenSSH's defaults don't require OpenSSL when it's compiled
|
||||
# without OpenSSL.
|
||||
Ciphers = null;
|
||||
KexAlgorithms = null;
|
||||
Macs = null;
|
||||
};
|
||||
};
|
||||
users.users.root.openssh.authorizedKeys.keys = [
|
||||
snakeOilEd25519PublicKey
|
||||
];
|
||||
};
|
||||
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server-lazy 'echo hello world' >&2",
|
||||
timeout=30
|
||||
)
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server-lazy 'ulimit -l' | grep 1024",
|
||||
timeout=30
|
||||
)
|
||||
server-no-pam =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
package = pkgs.opensshPackages.openssh.override {
|
||||
withPAM = false;
|
||||
};
|
||||
settings = {
|
||||
UsePAM = false;
|
||||
};
|
||||
};
|
||||
users.users.root.openssh.authorizedKeys.keys = [
|
||||
snakeOilPublicKey
|
||||
];
|
||||
};
|
||||
|
||||
with subtest("socket activation on a non-standard port"):
|
||||
client.succeed(
|
||||
"cat ${snakeOilPrivateKey} > privkey.snakeoil"
|
||||
)
|
||||
client.succeed("chmod 600 privkey.snakeoil")
|
||||
# The final segment in this IP is allocated according to the alphabetical order of machines in this test.
|
||||
client.succeed(
|
||||
"ssh -p 2222 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil root@192.168.2.5 true",
|
||||
timeout=30
|
||||
)
|
||||
client =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.vlans = [
|
||||
1
|
||||
2
|
||||
];
|
||||
};
|
||||
|
||||
with subtest("configured-authkey"):
|
||||
client.succeed(
|
||||
"cat ${snakeOilPrivateKey} > privkey.snakeoil"
|
||||
)
|
||||
client.succeed("chmod 600 privkey.snakeoil")
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server true",
|
||||
timeout=30
|
||||
)
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server-lazy true",
|
||||
timeout=30
|
||||
)
|
||||
};
|
||||
|
||||
with subtest("localhost-only"):
|
||||
server_localhost_only.succeed("ss -nlt | grep '127.0.0.1:22'")
|
||||
server_localhost_only_lazy.succeed("ss -nlt | grep '127.0.0.1:22'")
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
with subtest("match-rules"):
|
||||
server_match_rule.succeed("ss -nlt | grep '127.0.0.1:22'")
|
||||
server.wait_for_unit("sshd", timeout=30)
|
||||
server_allowed_users.wait_for_unit("sshd", timeout=30)
|
||||
server_localhost_only.wait_for_unit("sshd", timeout=30)
|
||||
server_match_rule.wait_for_unit("sshd", timeout=30)
|
||||
server_no_openssl.wait_for_unit("sshd", timeout=30)
|
||||
server_no_pam.wait_for_unit("sshd", timeout=30)
|
||||
|
||||
with subtest("allowed-users"):
|
||||
client.succeed(
|
||||
"cat ${snakeOilPrivateKey} > privkey.snakeoil"
|
||||
)
|
||||
client.succeed("chmod 600 privkey.snakeoil")
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil alice@server-allowed-users true",
|
||||
timeout=30
|
||||
)
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil bob@server-allowed-users true",
|
||||
timeout=30
|
||||
)
|
||||
client.fail(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil carol@server-allowed-users true",
|
||||
timeout=30
|
||||
)
|
||||
server_lazy.wait_for_unit("sshd.socket", timeout=30)
|
||||
server_localhost_only_lazy.wait_for_unit("sshd.socket", timeout=30)
|
||||
server_lazy_socket.wait_for_unit("sshd.socket", timeout=30)
|
||||
|
||||
with subtest("no-openssl"):
|
||||
client.succeed(
|
||||
"cat ${snakeOilEd25519PrivateKey} > privkey.snakeoil"
|
||||
)
|
||||
client.succeed("chmod 600 privkey.snakeoil")
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server-no-openssl true",
|
||||
timeout=30
|
||||
)
|
||||
with subtest("manual-authkey"):
|
||||
client.succeed(
|
||||
'${pkgs.openssh}/bin/ssh-keygen -t ed25519 -f /root/.ssh/id_ed25519 -N ""'
|
||||
)
|
||||
public_key = client.succeed(
|
||||
"${pkgs.openssh}/bin/ssh-keygen -y -f /root/.ssh/id_ed25519"
|
||||
)
|
||||
public_key = public_key.strip()
|
||||
client.succeed("chmod 600 /root/.ssh/id_ed25519")
|
||||
|
||||
with subtest("no-pam"):
|
||||
client.succeed(
|
||||
"cat ${snakeOilPrivateKey} > privkey.snakeoil"
|
||||
)
|
||||
client.succeed("chmod 600 privkey.snakeoil")
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server-no-pam true",
|
||||
timeout=30
|
||||
)
|
||||
server.succeed("echo '{}' > /root/.ssh/authorized_keys".format(public_key))
|
||||
server_lazy.succeed("echo '{}' > /root/.ssh/authorized_keys".format(public_key))
|
||||
|
||||
# None of the per-connection units should have failed.
|
||||
server_lazy.fail("systemctl is-failed 'sshd@*.service'")
|
||||
'';
|
||||
})
|
||||
client.wait_for_unit("network.target")
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server 'echo hello world' >&2",
|
||||
timeout=30
|
||||
)
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server 'ulimit -l' | grep 1024",
|
||||
timeout=30
|
||||
)
|
||||
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server-lazy 'echo hello world' >&2",
|
||||
timeout=30
|
||||
)
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server-lazy 'ulimit -l' | grep 1024",
|
||||
timeout=30
|
||||
)
|
||||
|
||||
with subtest("socket activation on a non-standard port"):
|
||||
client.succeed(
|
||||
"cat ${snakeOilPrivateKey} > privkey.snakeoil"
|
||||
)
|
||||
client.succeed("chmod 600 privkey.snakeoil")
|
||||
# The final segment in this IP is allocated according to the alphabetical order of machines in this test.
|
||||
client.succeed(
|
||||
"ssh -p 2222 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil root@192.168.2.5 true",
|
||||
timeout=30
|
||||
)
|
||||
|
||||
with subtest("configured-authkey"):
|
||||
client.succeed(
|
||||
"cat ${snakeOilPrivateKey} > privkey.snakeoil"
|
||||
)
|
||||
client.succeed("chmod 600 privkey.snakeoil")
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server true",
|
||||
timeout=30
|
||||
)
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server-lazy true",
|
||||
timeout=30
|
||||
)
|
||||
|
||||
with subtest("localhost-only"):
|
||||
server_localhost_only.succeed("ss -nlt | grep '127.0.0.1:22'")
|
||||
server_localhost_only_lazy.succeed("ss -nlt | grep '127.0.0.1:22'")
|
||||
|
||||
with subtest("match-rules"):
|
||||
server_match_rule.succeed("ss -nlt | grep '127.0.0.1:22'")
|
||||
|
||||
with subtest("allowed-users"):
|
||||
client.succeed(
|
||||
"cat ${snakeOilPrivateKey} > privkey.snakeoil"
|
||||
)
|
||||
client.succeed("chmod 600 privkey.snakeoil")
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil alice@server-allowed-users true",
|
||||
timeout=30
|
||||
)
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil bob@server-allowed-users true",
|
||||
timeout=30
|
||||
)
|
||||
client.fail(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil carol@server-allowed-users true",
|
||||
timeout=30
|
||||
)
|
||||
|
||||
with subtest("no-openssl"):
|
||||
client.succeed(
|
||||
"cat ${snakeOilEd25519PrivateKey} > privkey.snakeoil"
|
||||
)
|
||||
client.succeed("chmod 600 privkey.snakeoil")
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server-no-openssl true",
|
||||
timeout=30
|
||||
)
|
||||
|
||||
with subtest("no-pam"):
|
||||
client.succeed(
|
||||
"cat ${snakeOilPrivateKey} > privkey.snakeoil"
|
||||
)
|
||||
client.succeed("chmod 600 privkey.snakeoil")
|
||||
client.succeed(
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server-no-pam true",
|
||||
timeout=30
|
||||
)
|
||||
|
||||
# None of the per-connection units should have failed.
|
||||
server_lazy.fail("systemctl is-failed 'sshd@*.service'")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,26 +1,32 @@
|
|||
let
|
||||
name = "pam";
|
||||
in
|
||||
import ../make-test-python.nix ({ pkgs, ... }: {
|
||||
name = "pam-file-contents";
|
||||
import ../make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "pam-file-contents";
|
||||
|
||||
nodes.machine = { ... }: {
|
||||
imports = [ ../../modules/profiles/minimal.nix ];
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ ../../modules/profiles/minimal.nix ];
|
||||
|
||||
security.krb5.enable = true;
|
||||
security.krb5.enable = true;
|
||||
|
||||
users = {
|
||||
mutableUsers = false;
|
||||
users = {
|
||||
user = {
|
||||
isNormalUser = true;
|
||||
users = {
|
||||
mutableUsers = false;
|
||||
users = {
|
||||
user = {
|
||||
isNormalUser = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = builtins.replaceStrings
|
||||
[ "@@pam_ccreds@@" "@@pam_krb5@@" ]
|
||||
[ pkgs.pam_ccreds.outPath pkgs.pam_krb5.outPath ]
|
||||
(builtins.readFile ./test_chfn.py);
|
||||
})
|
||||
testScript =
|
||||
builtins.replaceStrings
|
||||
[ "@@pam_ccreds@@" "@@pam_krb5@@" ]
|
||||
[ pkgs.pam_ccreds.outPath pkgs.pam_krb5.outPath ]
|
||||
(builtins.readFile ./test_chfn.py);
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,110 +1,116 @@
|
|||
import ./make-test-python.nix ({ pkgs, lib, ...} :
|
||||
|
||||
{
|
||||
name = "pantheon";
|
||||
|
||||
meta.maintainers = lib.teams.pantheon.members;
|
||||
|
||||
nodes.machine = { ... }:
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
name = "pantheon";
|
||||
|
||||
# Workaround ".gala-wrapped invoked oom-killer"
|
||||
virtualisation.memorySize = 2047;
|
||||
meta.maintainers = lib.teams.pantheon.members;
|
||||
|
||||
services.xserver.enable = true;
|
||||
services.xserver.desktopManager.pantheon.enable = true;
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
|
||||
# We ship pantheon.appcenter by default when this is enabled.
|
||||
services.flatpak.enable = true;
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
|
||||
# We don't ship gnome-text-editor in Pantheon module, we add this line mainly
|
||||
# to catch eval issues related to this option.
|
||||
environment.pantheon.excludePackages = [ pkgs.gnome-text-editor ];
|
||||
# Workaround ".gala-wrapped invoked oom-killer"
|
||||
virtualisation.memorySize = 2047;
|
||||
|
||||
environment.systemPackages = [ pkgs.xdotool ];
|
||||
};
|
||||
services.xserver.enable = true;
|
||||
services.xserver.desktopManager.pantheon.enable = true;
|
||||
|
||||
enableOCR = true;
|
||||
# We ship pantheon.appcenter by default when this is enabled.
|
||||
services.flatpak.enable = true;
|
||||
|
||||
testScript = { nodes, ... }: let
|
||||
user = nodes.machine.users.users.alice;
|
||||
bob = nodes.machine.users.users.bob;
|
||||
in ''
|
||||
machine.wait_for_unit("display-manager.service")
|
||||
# We don't ship gnome-text-editor in Pantheon module, we add this line mainly
|
||||
# to catch eval issues related to this option.
|
||||
environment.pantheon.excludePackages = [ pkgs.gnome-text-editor ];
|
||||
|
||||
with subtest("Test we can see usernames in elementary-greeter"):
|
||||
machine.wait_for_text("${user.description}")
|
||||
machine.wait_until_succeeds("pgrep -f io.elementary.greeter-compositor")
|
||||
# OCR was struggling with this one.
|
||||
# machine.wait_for_text("${bob.description}")
|
||||
# Ensure the password box is focused by clicking it.
|
||||
# Workaround for https://github.com/NixOS/nixpkgs/issues/211366.
|
||||
machine.succeed("XAUTHORITY=/var/lib/lightdm/.Xauthority DISPLAY=:0 xdotool mousemove 512 505 click 1")
|
||||
machine.sleep(2)
|
||||
machine.screenshot("elementary_greeter_lightdm")
|
||||
environment.systemPackages = [ pkgs.xdotool ];
|
||||
};
|
||||
|
||||
with subtest("Login with elementary-greeter"):
|
||||
machine.send_chars("${user.password}\n")
|
||||
machine.wait_for_x()
|
||||
machine.wait_for_file("${user.home}/.Xauthority")
|
||||
machine.succeed("xauth merge ${user.home}/.Xauthority")
|
||||
machine.wait_until_succeeds('journalctl -t gnome-session-binary --grep "Entering running state"')
|
||||
enableOCR = true;
|
||||
|
||||
with subtest("Check that logging in has given the user ownership of devices"):
|
||||
machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
user = nodes.machine.users.users.alice;
|
||||
bob = nodes.machine.users.users.bob;
|
||||
in
|
||||
''
|
||||
machine.wait_for_unit("display-manager.service")
|
||||
|
||||
with subtest("Check if Pantheon components actually start"):
|
||||
for i in ["gala", "io.elementary.wingpanel", "io.elementary.dock", "gsd-media-keys", "io.elementary.desktop.agent-polkit"]:
|
||||
machine.wait_until_succeeds(f"pgrep -f {i}")
|
||||
for i in ["gala", "io.elementary.wingpanel", "io.elementary.dock"]:
|
||||
machine.wait_for_window(i)
|
||||
machine.wait_until_succeeds("pgrep -xf ${pkgs.pantheon.elementary-files}/libexec/io.elementary.files.xdg-desktop-portal")
|
||||
with subtest("Test we can see usernames in elementary-greeter"):
|
||||
machine.wait_for_text("${user.description}")
|
||||
machine.wait_until_succeeds("pgrep -f io.elementary.greeter-compositor")
|
||||
# OCR was struggling with this one.
|
||||
# machine.wait_for_text("${bob.description}")
|
||||
# Ensure the password box is focused by clicking it.
|
||||
# Workaround for https://github.com/NixOS/nixpkgs/issues/211366.
|
||||
machine.succeed("XAUTHORITY=/var/lib/lightdm/.Xauthority DISPLAY=:0 xdotool mousemove 512 505 click 1")
|
||||
machine.sleep(2)
|
||||
machine.screenshot("elementary_greeter_lightdm")
|
||||
|
||||
with subtest("Check if various environment variables are set"):
|
||||
cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf ${pkgs.pantheon.gala}/bin/gala)/environ"
|
||||
machine.succeed(f"{cmd} | grep 'XDG_CURRENT_DESKTOP' | grep 'Pantheon'")
|
||||
# Hopefully from the sessionPath option.
|
||||
machine.succeed(f"{cmd} | grep 'XDG_DATA_DIRS' | grep 'gsettings-schemas/pantheon-agent-geoclue2'")
|
||||
# Hopefully from login shell.
|
||||
machine.succeed(f"{cmd} | grep '__NIXOS_SET_ENVIRONMENT_DONE' | grep '1'")
|
||||
with subtest("Login with elementary-greeter"):
|
||||
machine.send_chars("${user.password}\n")
|
||||
machine.wait_for_x()
|
||||
machine.wait_for_file("${user.home}/.Xauthority")
|
||||
machine.succeed("xauth merge ${user.home}/.Xauthority")
|
||||
machine.wait_until_succeeds('journalctl -t gnome-session-binary --grep "Entering running state"')
|
||||
|
||||
with subtest("Open elementary videos"):
|
||||
machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.videos >&2 &'")
|
||||
machine.sleep(2)
|
||||
machine.wait_for_window("io.elementary.videos")
|
||||
machine.wait_for_text("No Videos Open")
|
||||
with subtest("Check that logging in has given the user ownership of devices"):
|
||||
machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
|
||||
|
||||
with subtest("Open elementary calendar"):
|
||||
machine.wait_until_succeeds("pgrep -f evolution-calendar-factory")
|
||||
machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.calendar >&2 &'")
|
||||
machine.sleep(2)
|
||||
machine.wait_for_window("io.elementary.calendar")
|
||||
with subtest("Check if Pantheon components actually start"):
|
||||
for i in ["gala", "io.elementary.wingpanel", "io.elementary.dock", "gsd-media-keys", "io.elementary.desktop.agent-polkit"]:
|
||||
machine.wait_until_succeeds(f"pgrep -f {i}")
|
||||
for i in ["gala", "io.elementary.wingpanel", "io.elementary.dock"]:
|
||||
machine.wait_for_window(i)
|
||||
machine.wait_until_succeeds("pgrep -xf ${pkgs.pantheon.elementary-files}/libexec/io.elementary.files.xdg-desktop-portal")
|
||||
|
||||
with subtest("Open system settings"):
|
||||
machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.settings >&2 &'")
|
||||
# Wait for all plugins to be loaded before we check if the window is still there.
|
||||
machine.sleep(5)
|
||||
machine.wait_for_window("io.elementary.settings")
|
||||
with subtest("Check if various environment variables are set"):
|
||||
cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf ${pkgs.pantheon.gala}/bin/gala)/environ"
|
||||
machine.succeed(f"{cmd} | grep 'XDG_CURRENT_DESKTOP' | grep 'Pantheon'")
|
||||
# Hopefully from the sessionPath option.
|
||||
machine.succeed(f"{cmd} | grep 'XDG_DATA_DIRS' | grep 'gsettings-schemas/pantheon-agent-geoclue2'")
|
||||
# Hopefully from login shell.
|
||||
machine.succeed(f"{cmd} | grep '__NIXOS_SET_ENVIRONMENT_DONE' | grep '1'")
|
||||
|
||||
with subtest("Open elementary terminal"):
|
||||
machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.terminal >&2 &'")
|
||||
machine.wait_for_window("io.elementary.terminal")
|
||||
with subtest("Open elementary videos"):
|
||||
machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.videos >&2 &'")
|
||||
machine.sleep(2)
|
||||
machine.wait_for_window("io.elementary.videos")
|
||||
machine.wait_for_text("No Videos Open")
|
||||
|
||||
with subtest("Trigger multitasking view"):
|
||||
cmd = "dbus-send --session --dest=org.pantheon.gala --print-reply /org/pantheon/gala org.pantheon.gala.PerformAction int32:1"
|
||||
env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus DISPLAY=:0"
|
||||
machine.succeed(f"su - ${user.name} -c '{env} {cmd}'")
|
||||
machine.sleep(5)
|
||||
machine.screenshot("multitasking")
|
||||
machine.succeed(f"su - ${user.name} -c '{env} {cmd}'")
|
||||
with subtest("Open elementary calendar"):
|
||||
machine.wait_until_succeeds("pgrep -f evolution-calendar-factory")
|
||||
machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.calendar >&2 &'")
|
||||
machine.sleep(2)
|
||||
machine.wait_for_window("io.elementary.calendar")
|
||||
|
||||
with subtest("Check if gala has ever coredumped"):
|
||||
machine.fail("coredumpctl --json=short | grep gala")
|
||||
# So you can see the dock in the below screenshot.
|
||||
machine.succeed("su - ${user.name} -c 'DISPLAY=:0 xdotool mousemove 450 1000 >&2 &'")
|
||||
machine.sleep(10)
|
||||
machine.screenshot("screen")
|
||||
'';
|
||||
})
|
||||
with subtest("Open system settings"):
|
||||
machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.settings >&2 &'")
|
||||
# Wait for all plugins to be loaded before we check if the window is still there.
|
||||
machine.sleep(5)
|
||||
machine.wait_for_window("io.elementary.settings")
|
||||
|
||||
with subtest("Open elementary terminal"):
|
||||
machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.terminal >&2 &'")
|
||||
machine.wait_for_window("io.elementary.terminal")
|
||||
|
||||
with subtest("Trigger multitasking view"):
|
||||
cmd = "dbus-send --session --dest=org.pantheon.gala --print-reply /org/pantheon/gala org.pantheon.gala.PerformAction int32:1"
|
||||
env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus DISPLAY=:0"
|
||||
machine.succeed(f"su - ${user.name} -c '{env} {cmd}'")
|
||||
machine.sleep(5)
|
||||
machine.screenshot("multitasking")
|
||||
machine.succeed(f"su - ${user.name} -c '{env} {cmd}'")
|
||||
|
||||
with subtest("Check if gala has ever coredumped"):
|
||||
machine.fail("coredumpctl --json=short | grep gala")
|
||||
# So you can see the dock in the below screenshot.
|
||||
machine.succeed("su - ${user.name} -c 'DISPLAY=:0 xdotool mousemove 450 1000 >&2 &'")
|
||||
machine.sleep(10)
|
||||
machine.screenshot("screen")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,109 +1,127 @@
|
|||
import ./make-test-python.nix ({ lib, ... }: {
|
||||
name = "paperless";
|
||||
meta.maintainers = with lib.maintainers; [ leona SuperSandro2000 erikarvstedt ];
|
||||
import ./make-test-python.nix (
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "paperless";
|
||||
meta.maintainers = with lib.maintainers; [
|
||||
leona
|
||||
SuperSandro2000
|
||||
erikarvstedt
|
||||
];
|
||||
|
||||
nodes = let self = {
|
||||
simple = { pkgs, ... }: {
|
||||
environment.systemPackages = with pkgs; [ imagemagick jq ];
|
||||
services.paperless = {
|
||||
enable = true;
|
||||
passwordFile = builtins.toFile "password" "admin";
|
||||
nodes =
|
||||
let
|
||||
self = {
|
||||
simple =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
imagemagick
|
||||
jq
|
||||
];
|
||||
services.paperless = {
|
||||
enable = true;
|
||||
passwordFile = builtins.toFile "password" "admin";
|
||||
|
||||
exporter = {
|
||||
enable = true;
|
||||
exporter = {
|
||||
enable = true;
|
||||
|
||||
settings = {
|
||||
"no-color" = lib.mkForce false; # override a default option
|
||||
"no-thumbnail" = true; # add a new option
|
||||
};
|
||||
settings = {
|
||||
"no-color" = lib.mkForce false; # override a default option
|
||||
"no-thumbnail" = true; # add a new option
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
postgres =
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
imports = [ self.simple ];
|
||||
services.paperless.database.createLocally = true;
|
||||
services.paperless.settings = {
|
||||
PAPERLESS_OCR_LANGUAGE = "deu";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
postgres = { config, pkgs, ... }: {
|
||||
imports = [ self.simple ];
|
||||
services.paperless.database.createLocally = true;
|
||||
services.paperless.settings = {
|
||||
PAPERLESS_OCR_LANGUAGE = "deu";
|
||||
};
|
||||
};
|
||||
}; in self;
|
||||
in
|
||||
self;
|
||||
|
||||
testScript = ''
|
||||
import json
|
||||
testScript = ''
|
||||
import json
|
||||
|
||||
def test_paperless(node):
|
||||
node.wait_for_unit("paperless-consumer.service")
|
||||
def test_paperless(node):
|
||||
node.wait_for_unit("paperless-consumer.service")
|
||||
|
||||
with subtest("Add a document via the file system"):
|
||||
node.succeed(
|
||||
"convert -size 400x40 xc:white -font 'DejaVu-Sans' -pointsize 20 -fill black "
|
||||
"-annotate +5+20 'hello world 16-10-2005' /var/lib/paperless/consume/doc.png"
|
||||
)
|
||||
with subtest("Add a document via the file system"):
|
||||
node.succeed(
|
||||
"convert -size 400x40 xc:white -font 'DejaVu-Sans' -pointsize 20 -fill black "
|
||||
"-annotate +5+20 'hello world 16-10-2005' /var/lib/paperless/consume/doc.png"
|
||||
)
|
||||
|
||||
with subtest("Web interface gets ready"):
|
||||
node.wait_for_unit("paperless-web.service")
|
||||
# Wait until server accepts connections
|
||||
node.wait_until_succeeds("curl -fs localhost:28981")
|
||||
|
||||
# Required for consuming documents via the web interface
|
||||
with subtest("Task-queue gets ready"):
|
||||
node.wait_for_unit("paperless-task-queue.service")
|
||||
|
||||
with subtest("Add a png document via the web interface"):
|
||||
node.succeed(
|
||||
"convert -size 400x40 xc:white -font 'DejaVu-Sans' -pointsize 20 -fill black "
|
||||
"-annotate +5+20 'hello web 16-10-2005' /tmp/webdoc.png"
|
||||
)
|
||||
node.wait_until_succeeds("curl -u admin:admin -F document=@/tmp/webdoc.png -fs localhost:28981/api/documents/post_document/")
|
||||
|
||||
with subtest("Add a txt document via the web interface"):
|
||||
node.succeed(
|
||||
"echo 'hello web 16-10-2005' > /tmp/webdoc.txt"
|
||||
)
|
||||
node.wait_until_succeeds("curl -u admin:admin -F document=@/tmp/webdoc.txt -fs localhost:28981/api/documents/post_document/")
|
||||
|
||||
with subtest("Documents are consumed"):
|
||||
node.wait_until_succeeds(
|
||||
"(($(curl -u admin:admin -fs localhost:28981/api/documents/ | jq .count) == 3))"
|
||||
)
|
||||
docs = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/"))['results']
|
||||
assert "2005-10-16" in docs[0]['created']
|
||||
assert "2005-10-16" in docs[1]['created']
|
||||
assert "2005-10-16" in docs[2]['created']
|
||||
|
||||
# Detects gunicorn issues, see PR #190888
|
||||
with subtest("Document metadata can be accessed"):
|
||||
metadata = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/1/metadata/"))
|
||||
assert "original_checksum" in metadata
|
||||
|
||||
metadata = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/2/metadata/"))
|
||||
assert "original_checksum" in metadata
|
||||
|
||||
metadata = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/3/metadata/"))
|
||||
assert "original_checksum" in metadata
|
||||
|
||||
with subtest("Exporter"):
|
||||
node.succeed("systemctl start --wait paperless-exporter")
|
||||
with subtest("Web interface gets ready"):
|
||||
node.wait_for_unit("paperless-web.service")
|
||||
node.wait_for_unit("paperless-consumer.service")
|
||||
node.wait_for_unit("paperless-scheduler.service")
|
||||
# Wait until server accepts connections
|
||||
node.wait_until_succeeds("curl -fs localhost:28981")
|
||||
|
||||
# Required for consuming documents via the web interface
|
||||
with subtest("Task-queue gets ready"):
|
||||
node.wait_for_unit("paperless-task-queue.service")
|
||||
|
||||
node.succeed("ls -lah /var/lib/paperless/export/manifest.json")
|
||||
with subtest("Add a png document via the web interface"):
|
||||
node.succeed(
|
||||
"convert -size 400x40 xc:white -font 'DejaVu-Sans' -pointsize 20 -fill black "
|
||||
"-annotate +5+20 'hello web 16-10-2005' /tmp/webdoc.png"
|
||||
)
|
||||
node.wait_until_succeeds("curl -u admin:admin -F document=@/tmp/webdoc.png -fs localhost:28981/api/documents/post_document/")
|
||||
|
||||
timers = node.succeed("systemctl list-timers paperless-exporter")
|
||||
print(timers)
|
||||
assert "paperless-exporter.timer paperless-exporter.service" in timers, "missing timer"
|
||||
assert "1 timers listed." in timers, "incorrect number of timers"
|
||||
with subtest("Add a txt document via the web interface"):
|
||||
node.succeed(
|
||||
"echo 'hello web 16-10-2005' > /tmp/webdoc.txt"
|
||||
)
|
||||
node.wait_until_succeeds("curl -u admin:admin -F document=@/tmp/webdoc.txt -fs localhost:28981/api/documents/post_document/")
|
||||
|
||||
# Double check that our attrset option override works as expected
|
||||
cmdline = node.succeed("grep 'paperless-manage' $(systemctl cat paperless-exporter | grep ExecStart | cut -f 2 -d=)")
|
||||
print(f"Exporter command line {cmdline!r}")
|
||||
assert cmdline.strip() == "paperless-manage document_exporter /var/lib/paperless/export --compare-checksums --delete --no-progress-bar --no-thumbnail", "Unexpected exporter command line"
|
||||
with subtest("Documents are consumed"):
|
||||
node.wait_until_succeeds(
|
||||
"(($(curl -u admin:admin -fs localhost:28981/api/documents/ | jq .count) == 3))"
|
||||
)
|
||||
docs = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/"))['results']
|
||||
assert "2005-10-16" in docs[0]['created']
|
||||
assert "2005-10-16" in docs[1]['created']
|
||||
assert "2005-10-16" in docs[2]['created']
|
||||
|
||||
test_paperless(simple)
|
||||
simple.send_monitor_command("quit")
|
||||
simple.wait_for_shutdown()
|
||||
test_paperless(postgres)
|
||||
'';
|
||||
})
|
||||
# Detects gunicorn issues, see PR #190888
|
||||
with subtest("Document metadata can be accessed"):
|
||||
metadata = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/1/metadata/"))
|
||||
assert "original_checksum" in metadata
|
||||
|
||||
metadata = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/2/metadata/"))
|
||||
assert "original_checksum" in metadata
|
||||
|
||||
metadata = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/3/metadata/"))
|
||||
assert "original_checksum" in metadata
|
||||
|
||||
with subtest("Exporter"):
|
||||
node.succeed("systemctl start --wait paperless-exporter")
|
||||
node.wait_for_unit("paperless-web.service")
|
||||
node.wait_for_unit("paperless-consumer.service")
|
||||
node.wait_for_unit("paperless-scheduler.service")
|
||||
node.wait_for_unit("paperless-task-queue.service")
|
||||
|
||||
node.succeed("ls -lah /var/lib/paperless/export/manifest.json")
|
||||
|
||||
timers = node.succeed("systemctl list-timers paperless-exporter")
|
||||
print(timers)
|
||||
assert "paperless-exporter.timer paperless-exporter.service" in timers, "missing timer"
|
||||
assert "1 timers listed." in timers, "incorrect number of timers"
|
||||
|
||||
# Double check that our attrset option override works as expected
|
||||
cmdline = node.succeed("grep 'paperless-manage' $(systemctl cat paperless-exporter | grep ExecStart | cut -f 2 -d=)")
|
||||
print(f"Exporter command line {cmdline!r}")
|
||||
assert cmdline.strip() == "paperless-manage document_exporter /var/lib/paperless/export --compare-checksums --delete --no-progress-bar --no-thumbnail", "Unexpected exporter command line"
|
||||
|
||||
test_paperless(simple)
|
||||
simple.send_monitor_command("quit")
|
||||
simple.wait_for_shutdown()
|
||||
test_paperless(postgres)
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,12 +1,22 @@
|
|||
{ system ? builtins.currentSystem
|
||||
, config ? { }
|
||||
, pkgs ? import ../.. { inherit system config; }
|
||||
{
|
||||
system ? builtins.currentSystem,
|
||||
config ? { },
|
||||
pkgs ? import ../.. { inherit system config; },
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
|
||||
inherit (pkgs.lib) concatStringsSep maintainers mapAttrs mkMerge
|
||||
removeSuffix replaceStrings singleton splitString makeBinPath;
|
||||
inherit (pkgs.lib)
|
||||
concatStringsSep
|
||||
maintainers
|
||||
mapAttrs
|
||||
mkMerge
|
||||
removeSuffix
|
||||
replaceStrings
|
||||
singleton
|
||||
splitString
|
||||
makeBinPath
|
||||
;
|
||||
|
||||
/*
|
||||
* The attrset `exporterTests` contains one attribute
|
||||
|
@ -148,7 +158,8 @@ let
|
|||
};
|
||||
metricProvider = {
|
||||
services.bitcoind.default.enable = true;
|
||||
services.bitcoind.default.rpc.users.bitcoinrpc.passwordHMAC = "e8fe33f797e698ac258c16c8d7aadfbe$872bdb8f4d787367c26bcfd75e6c23c4f19d44a69f5d1ad329e5adf3f82710f7";
|
||||
services.bitcoind.default.rpc.users.bitcoinrpc.passwordHMAC =
|
||||
"e8fe33f797e698ac258c16c8d7aadfbe$872bdb8f4d787367c26bcfd75e6c23c4f19d44a69f5d1ad329e5adf3f82710f7";
|
||||
};
|
||||
exporterTest = ''
|
||||
wait_for_unit("prometheus-bitcoin-exporter.service")
|
||||
|
@ -161,12 +172,14 @@ let
|
|||
blackbox = {
|
||||
exporterConfig = {
|
||||
enable = true;
|
||||
configFile = pkgs.writeText "config.yml" (builtins.toJSON {
|
||||
modules.icmp_v6 = {
|
||||
prober = "icmp";
|
||||
icmp.preferred_ip_protocol = "ip6";
|
||||
};
|
||||
});
|
||||
configFile = pkgs.writeText "config.yml" (
|
||||
builtins.toJSON {
|
||||
modules.icmp_v6 = {
|
||||
prober = "icmp";
|
||||
icmp.preferred_ip_protocol = "ip6";
|
||||
};
|
||||
}
|
||||
);
|
||||
};
|
||||
exporterTest = ''
|
||||
wait_for_unit("prometheus-blackbox-exporter.service")
|
||||
|
@ -185,7 +198,12 @@ let
|
|||
metricProvider = {
|
||||
services.borgmatic.enable = true;
|
||||
services.borgmatic.settings.source_directories = [ "/home" ];
|
||||
services.borgmatic.settings.repositories = [ { label = "local"; path = "/var/backup"; } ];
|
||||
services.borgmatic.settings.repositories = [
|
||||
{
|
||||
label = "local";
|
||||
path = "/var/backup";
|
||||
}
|
||||
];
|
||||
services.borgmatic.settings.keep_daily = 10;
|
||||
};
|
||||
exporterTest = ''
|
||||
|
@ -202,17 +220,20 @@ let
|
|||
enable = true;
|
||||
extraFlags = [ "--web.collectd-push-path /collectd" ];
|
||||
};
|
||||
exporterTest = let postData = replaceStrings [ "\n" ] [ "" ] ''
|
||||
[{
|
||||
"values":[23],
|
||||
"dstypes":["gauge"],
|
||||
"type":"gauge",
|
||||
"interval":1000,
|
||||
"host":"testhost",
|
||||
"plugin":"testplugin",
|
||||
"time":DATE
|
||||
}]
|
||||
''; in
|
||||
exporterTest =
|
||||
let
|
||||
postData = replaceStrings [ "\n" ] [ "" ] ''
|
||||
[{
|
||||
"values":[23],
|
||||
"dstypes":["gauge"],
|
||||
"type":"gauge",
|
||||
"interval":1000,
|
||||
"host":"testhost",
|
||||
"plugin":"testplugin",
|
||||
"time":DATE
|
||||
}]
|
||||
'';
|
||||
in
|
||||
''
|
||||
wait_for_unit("prometheus-collectd-exporter.service")
|
||||
wait_for_open_port(9103)
|
||||
|
@ -244,8 +265,8 @@ let
|
|||
services.deluge.declarative = true;
|
||||
services.deluge.config.daemon_port = 2345;
|
||||
services.deluge.authFile = pkgs.writeText "authFile" ''
|
||||
localclient:abcdef:10
|
||||
user:weak_password:10
|
||||
localclient:abcdef:10
|
||||
user:weak_password:10
|
||||
'';
|
||||
};
|
||||
exporterTest = ''
|
||||
|
@ -297,14 +318,18 @@ let
|
|||
listen: 127.0.0.1@53
|
||||
template:
|
||||
- id: default
|
||||
storage: ${pkgs.buildEnv {
|
||||
name = "zones";
|
||||
paths = [(pkgs.writeTextDir "example.com.zone" ''
|
||||
@ SOA ns1.example.com. noc.example.com. 2024032401 86400 7200 3600000 172800
|
||||
@ NS ns1
|
||||
ns1 A 192.168.0.1
|
||||
'')];
|
||||
}}
|
||||
storage: ${
|
||||
pkgs.buildEnv {
|
||||
name = "zones";
|
||||
paths = [
|
||||
(pkgs.writeTextDir "example.com.zone" ''
|
||||
@ SOA ns1.example.com. noc.example.com. 2024032401 86400 7200 3600000 172800
|
||||
@ NS ns1
|
||||
ns1 A 192.168.0.1
|
||||
'')
|
||||
];
|
||||
}
|
||||
}
|
||||
zonefile-load: difference
|
||||
zonefile-sync: -1
|
||||
zone:
|
||||
|
@ -356,27 +381,31 @@ let
|
|||
'';
|
||||
};
|
||||
|
||||
exportarr-sonarr = let apikey = "eccff6a992bc2e4b88e46d064b26bb4e"; in {
|
||||
nodeName = "exportarr_sonarr";
|
||||
exporterConfig = {
|
||||
enable = true;
|
||||
url = "http://127.0.0.1:8989";
|
||||
apiKeyFile = pkgs.writeText "dummy-api-key" apikey;
|
||||
};
|
||||
metricProvider = {
|
||||
services.sonarr = {
|
||||
exportarr-sonarr =
|
||||
let
|
||||
apikey = "eccff6a992bc2e4b88e46d064b26bb4e";
|
||||
in
|
||||
{
|
||||
nodeName = "exportarr_sonarr";
|
||||
exporterConfig = {
|
||||
enable = true;
|
||||
environmentFiles = [(pkgs.writeText "sonarr-env" "SONARR__AUTH__APIKEY=${apikey}")];
|
||||
url = "http://127.0.0.1:8989";
|
||||
apiKeyFile = pkgs.writeText "dummy-api-key" apikey;
|
||||
};
|
||||
metricProvider = {
|
||||
services.sonarr = {
|
||||
enable = true;
|
||||
environmentFiles = [ (pkgs.writeText "sonarr-env" "SONARR__AUTH__APIKEY=${apikey}") ];
|
||||
};
|
||||
};
|
||||
exporterTest = ''
|
||||
wait_for_unit("sonarr.service")
|
||||
wait_for_open_port(8989)
|
||||
wait_for_unit("prometheus-exportarr-sonarr-exporter.service")
|
||||
wait_for_open_port(9708)
|
||||
succeed("curl -sSf http://localhost:9708/metrics | grep sonarr_series_total")
|
||||
'';
|
||||
};
|
||||
exporterTest = ''
|
||||
wait_for_unit("sonarr.service")
|
||||
wait_for_open_port(8989)
|
||||
wait_for_unit("prometheus-exportarr-sonarr-exporter.service")
|
||||
wait_for_open_port(9708)
|
||||
succeed("curl -sSf http://localhost:9708/metrics | grep sonarr_series_total")
|
||||
'';
|
||||
};
|
||||
|
||||
fastly = {
|
||||
exporterConfig = {
|
||||
|
@ -409,14 +438,16 @@ let
|
|||
enable = true;
|
||||
port = 9108;
|
||||
graphitePort = 9109;
|
||||
mappingSettings.mappings = [{
|
||||
match = "test.*.*";
|
||||
name = "testing";
|
||||
labels = {
|
||||
protocol = "$1";
|
||||
author = "$2";
|
||||
};
|
||||
}];
|
||||
mappingSettings.mappings = [
|
||||
{
|
||||
match = "test.*.*";
|
||||
name = "testing";
|
||||
labels = {
|
||||
protocol = "$1";
|
||||
author = "$2";
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
exporterTest = ''
|
||||
wait_for_unit("prometheus-graphite-exporter.service")
|
||||
|
@ -433,7 +464,10 @@ let
|
|||
port = 9348;
|
||||
configuration = {
|
||||
hosts = {
|
||||
default = { username = "username"; password = "password"; };
|
||||
default = {
|
||||
username = "username";
|
||||
password = "password";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -504,15 +538,20 @@ let
|
|||
json = {
|
||||
exporterConfig = {
|
||||
enable = true;
|
||||
configFile = pkgs.writeText "json-exporter-conf.json" (builtins.toJSON {
|
||||
modules = {
|
||||
default = {
|
||||
metrics = [
|
||||
{ name = "json_test_metric"; path = "{ .test }"; }
|
||||
];
|
||||
configFile = pkgs.writeText "json-exporter-conf.json" (
|
||||
builtins.toJSON {
|
||||
modules = {
|
||||
default = {
|
||||
metrics = [
|
||||
{
|
||||
name = "json_test_metric";
|
||||
path = "{ .test }";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
});
|
||||
}
|
||||
);
|
||||
};
|
||||
metricProvider = {
|
||||
systemd.services.prometheus-json-exporter.after = [ "nginx.service" ];
|
||||
|
@ -552,17 +591,19 @@ let
|
|||
dnssec-signing: off
|
||||
zonefile-sync: -1
|
||||
zonefile-load: difference
|
||||
storage: ${pkgs.buildEnv {
|
||||
name = "foo";
|
||||
paths = [
|
||||
(pkgs.writeTextDir "test.zone" ''
|
||||
@ SOA ns.example.com. noc.example.com. 2019031301 86400 7200 3600000 172800
|
||||
@ NS ns1
|
||||
@ NS ns2
|
||||
ns1 A 192.168.0.1
|
||||
'')
|
||||
];
|
||||
}}
|
||||
storage: ${
|
||||
pkgs.buildEnv {
|
||||
name = "foo";
|
||||
paths = [
|
||||
(pkgs.writeTextDir "test.zone" ''
|
||||
@ SOA ns.example.com. noc.example.com. 2019031301 86400 7200 3600000 172800
|
||||
@ NS ns1
|
||||
@ NS ns2
|
||||
ns1 A 192.168.0.1
|
||||
'')
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
mod-stats:
|
||||
- id: custom
|
||||
|
@ -678,14 +719,16 @@ let
|
|||
configuration = {
|
||||
monitoringInterval = "2s";
|
||||
mailCheckTimeout = "10s";
|
||||
servers = [{
|
||||
name = "testserver";
|
||||
server = "localhost";
|
||||
port = 25;
|
||||
from = "mail-exporter@localhost";
|
||||
to = "mail-exporter@localhost";
|
||||
detectionDir = "/var/spool/mail/mail-exporter/new";
|
||||
}];
|
||||
servers = [
|
||||
{
|
||||
name = "testserver";
|
||||
server = "localhost";
|
||||
port = 25;
|
||||
from = "mail-exporter@localhost";
|
||||
to = "mail-exporter@localhost";
|
||||
detectionDir = "/var/spool/mail/mail-exporter/new";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
metricProvider = {
|
||||
|
@ -709,7 +752,7 @@ let
|
|||
isSystemUser = true;
|
||||
group = "mailexporter";
|
||||
};
|
||||
users.groups.mailexporter = {};
|
||||
users.groups.mailexporter = { };
|
||||
};
|
||||
exporterTest = ''
|
||||
wait_for_unit("postfix.service")
|
||||
|
@ -773,7 +816,10 @@ let
|
|||
networking.networkmanager.enable = true;
|
||||
systemd.services.ModemManager = {
|
||||
enable = true;
|
||||
wantedBy = [ "NetworkManager.service" "prometheus-modemmanager-exporter.service" ];
|
||||
wantedBy = [
|
||||
"NetworkManager.service"
|
||||
"prometheus-modemmanager-exporter.service"
|
||||
];
|
||||
};
|
||||
};
|
||||
exporterTest = ''
|
||||
|
@ -796,14 +842,16 @@ let
|
|||
metricProvider = {
|
||||
services.mosquitto = {
|
||||
enable = true;
|
||||
listeners = [{
|
||||
users.exporter = {
|
||||
acl = [ "read #" ];
|
||||
passwordFile = pkgs.writeText "mosquitto-password" "testpassword";
|
||||
};
|
||||
}];
|
||||
listeners = [
|
||||
{
|
||||
users.exporter = {
|
||||
acl = [ "read #" ];
|
||||
passwordFile = pkgs.writeText "mosquitto-password" "testpassword";
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
systemd.services.prometheus-mqtt-exporter ={
|
||||
systemd.services.prometheus-mqtt-exporter = {
|
||||
wants = [ "mosquitto.service" ];
|
||||
after = [ "mosquitto.service" ];
|
||||
};
|
||||
|
@ -988,7 +1036,7 @@ let
|
|||
nodeName = "node_cert";
|
||||
exporterConfig = {
|
||||
enable = true;
|
||||
paths = ["/run/certs"];
|
||||
paths = [ "/run/certs" ];
|
||||
};
|
||||
exporterTest = ''
|
||||
wait_for_unit("prometheus-node-cert-exporter.service")
|
||||
|
@ -1084,9 +1132,9 @@ let
|
|||
metricProvider = {
|
||||
users.users."php-fpm-exporter" = {
|
||||
isSystemUser = true;
|
||||
group = "php-fpm-exporter";
|
||||
group = "php-fpm-exporter";
|
||||
};
|
||||
users.groups."php-fpm-exporter" = {};
|
||||
users.groups."php-fpm-exporter" = { };
|
||||
services.phpfpm.pools."php-fpm-exporter" = {
|
||||
user = "php-fpm-exporter";
|
||||
group = "php-fpm-exporter";
|
||||
|
@ -1116,26 +1164,31 @@ let
|
|||
enable = true;
|
||||
|
||||
settings = {
|
||||
targets = [ {
|
||||
"localhost" = {
|
||||
alias = "local machine";
|
||||
env = "prod";
|
||||
type = "domain";
|
||||
};
|
||||
} {
|
||||
"127.0.0.1" = {
|
||||
alias = "local machine";
|
||||
type = "v4";
|
||||
};
|
||||
} {
|
||||
"::1" = {
|
||||
alias = "local machine";
|
||||
type = "v6";
|
||||
};
|
||||
} {
|
||||
"google.com" = {};
|
||||
} ];
|
||||
dns = {};
|
||||
targets = [
|
||||
{
|
||||
"localhost" = {
|
||||
alias = "local machine";
|
||||
env = "prod";
|
||||
type = "domain";
|
||||
};
|
||||
}
|
||||
{
|
||||
"127.0.0.1" = {
|
||||
alias = "local machine";
|
||||
type = "v4";
|
||||
};
|
||||
}
|
||||
{
|
||||
"::1" = {
|
||||
alias = "local machine";
|
||||
type = "v6";
|
||||
};
|
||||
}
|
||||
{
|
||||
"google.com" = { };
|
||||
}
|
||||
];
|
||||
dns = { };
|
||||
ping = {
|
||||
interval = "2s";
|
||||
timeout = "3s";
|
||||
|
@ -1211,7 +1264,10 @@ let
|
|||
enable = true;
|
||||
settings.process_names = [
|
||||
# Remove nix store path from process name
|
||||
{ name = "{{.Matches.Wrapped}} {{ .Matches.Args }}"; cmdline = [ "^/nix/store[^ ]*/(?P<Wrapped>[^ /]*) (?P<Args>.*)" ]; }
|
||||
{
|
||||
name = "{{.Matches.Wrapped}} {{ .Matches.Args }}";
|
||||
cmdline = [ "^/nix/store[^ ]*/(?P<Wrapped>[^ /]*) (?P<Args>.*)" ];
|
||||
}
|
||||
];
|
||||
};
|
||||
exporterTest = ''
|
||||
|
@ -1225,26 +1281,28 @@ let
|
|||
'';
|
||||
};
|
||||
|
||||
pve = let
|
||||
pveExporterEnvFile = pkgs.writeTextFile {
|
||||
name = "pve.env";
|
||||
text = ''
|
||||
PVE_USER="test_user@pam"
|
||||
PVE_PASSWORD="hunter3"
|
||||
PVE_VERIFY_SSL="false"
|
||||
pve =
|
||||
let
|
||||
pveExporterEnvFile = pkgs.writeTextFile {
|
||||
name = "pve.env";
|
||||
text = ''
|
||||
PVE_USER="test_user@pam"
|
||||
PVE_PASSWORD="hunter3"
|
||||
PVE_VERIFY_SSL="false"
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
exporterConfig = {
|
||||
enable = true;
|
||||
environmentFile = pveExporterEnvFile;
|
||||
};
|
||||
exporterTest = ''
|
||||
wait_for_unit("prometheus-pve-exporter.service")
|
||||
wait_for_open_port(9221)
|
||||
wait_until_succeeds("curl localhost:9221")
|
||||
'';
|
||||
};
|
||||
in {
|
||||
exporterConfig = {
|
||||
enable = true;
|
||||
environmentFile = pveExporterEnvFile;
|
||||
};
|
||||
exporterTest = ''
|
||||
wait_for_unit("prometheus-pve-exporter.service")
|
||||
wait_for_open_port(9221)
|
||||
wait_until_succeeds("curl localhost:9221")
|
||||
'';
|
||||
};
|
||||
|
||||
py-air-control = {
|
||||
nodeName = "py_air_control";
|
||||
|
@ -1362,10 +1420,12 @@ let
|
|||
sabnzbd = {
|
||||
exporterConfig = {
|
||||
enable = true;
|
||||
servers = [{
|
||||
baseUrl = "http://localhost:8080";
|
||||
apiKeyFile = "/var/sabnzbd-apikey";
|
||||
}];
|
||||
servers = [
|
||||
{
|
||||
baseUrl = "http://localhost:8080";
|
||||
apiKeyFile = "/var/sabnzbd-apikey";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
metricProvider = {
|
||||
|
@ -1431,7 +1491,10 @@ let
|
|||
exporterConfig = {
|
||||
enable = true;
|
||||
settings.scripts = [
|
||||
{ name = "success"; script = "sleep 1"; }
|
||||
{
|
||||
name = "success";
|
||||
script = "sleep 1";
|
||||
}
|
||||
];
|
||||
};
|
||||
exporterTest = ''
|
||||
|
@ -1613,7 +1676,7 @@ let
|
|||
unpoller = {
|
||||
nodeName = "unpoller";
|
||||
exporterConfig.enable = true;
|
||||
exporterConfig.controllers = [{ }];
|
||||
exporterConfig.controllers = [ { } ];
|
||||
exporterTest = ''
|
||||
wait_until_succeeds(
|
||||
'journalctl -eu prometheus-unpoller-exporter.service -o cat | grep "Connection Error"'
|
||||
|
@ -1653,7 +1716,7 @@ let
|
|||
services.v2ray = {
|
||||
enable = true;
|
||||
config = {
|
||||
stats = {};
|
||||
stats = { };
|
||||
api = {
|
||||
tag = "api";
|
||||
services = [ "StatsService" ];
|
||||
|
@ -1668,7 +1731,9 @@ let
|
|||
listen = "127.0.0.1";
|
||||
port = 54321;
|
||||
protocol = "dokodemo-door";
|
||||
settings = { address = "127.0.0.1"; };
|
||||
settings = {
|
||||
address = "127.0.0.1";
|
||||
};
|
||||
tag = "api";
|
||||
}
|
||||
];
|
||||
|
@ -1678,7 +1743,7 @@ let
|
|||
}
|
||||
{
|
||||
protocol = "freedom";
|
||||
settings = {};
|
||||
settings = { };
|
||||
tag = "api";
|
||||
}
|
||||
];
|
||||
|
@ -1734,21 +1799,28 @@ let
|
|||
'';
|
||||
};
|
||||
|
||||
wireguard = let
|
||||
snakeoil = import ./wireguard/snakeoil-keys.nix;
|
||||
publicKeyWithoutNewlines = replaceStrings [ "\n" ] [ "" ] snakeoil.peer1.publicKey;
|
||||
in
|
||||
wireguard =
|
||||
let
|
||||
snakeoil = import ./wireguard/snakeoil-keys.nix;
|
||||
publicKeyWithoutNewlines = replaceStrings [ "\n" ] [ "" ] snakeoil.peer1.publicKey;
|
||||
in
|
||||
{
|
||||
exporterConfig.enable = true;
|
||||
metricProvider = {
|
||||
networking.wireguard.interfaces.wg0 = {
|
||||
ips = [ "10.23.42.1/32" "fc00::1/128" ];
|
||||
ips = [
|
||||
"10.23.42.1/32"
|
||||
"fc00::1/128"
|
||||
];
|
||||
listenPort = 23542;
|
||||
|
||||
inherit (snakeoil.peer0) privateKey;
|
||||
|
||||
peers = singleton {
|
||||
allowedIPs = [ "10.23.42.2/32" "fc00::2/128" ];
|
||||
allowedIPs = [
|
||||
"10.23.42.2/32"
|
||||
"fc00::2/128"
|
||||
];
|
||||
|
||||
inherit (snakeoil.peer1) publicKey;
|
||||
};
|
||||
|
@ -1781,8 +1853,9 @@ let
|
|||
};
|
||||
};
|
||||
in
|
||||
mapAttrs
|
||||
(exporter: testConfig: (makeTest (
|
||||
mapAttrs (
|
||||
exporter: testConfig:
|
||||
(makeTest (
|
||||
let
|
||||
nodeName = testConfig.nodeName or exporter;
|
||||
|
||||
|
@ -1790,22 +1863,31 @@ mapAttrs
|
|||
{
|
||||
name = "prometheus-${exporter}-exporter";
|
||||
|
||||
nodes.${nodeName} = mkMerge [{
|
||||
services.prometheus.exporters.${exporter} = testConfig.exporterConfig;
|
||||
} testConfig.metricProvider or { }];
|
||||
nodes.${nodeName} = mkMerge [
|
||||
{
|
||||
services.prometheus.exporters.${exporter} = testConfig.exporterConfig;
|
||||
}
|
||||
testConfig.metricProvider or { }
|
||||
];
|
||||
|
||||
testScript = ''
|
||||
${nodeName}.start()
|
||||
${concatStringsSep "\n" (map (line:
|
||||
if builtins.any (b: b) [
|
||||
(builtins.match "^[[:space:]]*$" line != null)
|
||||
(builtins.substring 0 1 line == "#")
|
||||
(builtins.substring 0 1 line == " ")
|
||||
(builtins.substring 0 1 line == ")")
|
||||
]
|
||||
then line
|
||||
else "${nodeName}.${line}"
|
||||
) (splitString "\n" (removeSuffix "\n" testConfig.exporterTest)))}
|
||||
${concatStringsSep "\n" (
|
||||
map (
|
||||
line:
|
||||
if
|
||||
builtins.any (b: b) [
|
||||
(builtins.match "^[[:space:]]*$" line != null)
|
||||
(builtins.substring 0 1 line == "#")
|
||||
(builtins.substring 0 1 line == " ")
|
||||
(builtins.substring 0 1 line == ")")
|
||||
]
|
||||
then
|
||||
line
|
||||
else
|
||||
"${nodeName}.${line}"
|
||||
) (splitString "\n" (removeSuffix "\n" testConfig.exporterTest))
|
||||
)}
|
||||
${nodeName}.shutdown()
|
||||
'';
|
||||
|
||||
|
@ -1813,5 +1895,5 @@ mapAttrs
|
|||
maintainers = [ willibutz ];
|
||||
};
|
||||
}
|
||||
)))
|
||||
exporterTests
|
||||
))
|
||||
) exporterTests
|
||||
|
|
|
@ -45,7 +45,10 @@ import ./make-test-python.nix (
|
|||
name = "restic";
|
||||
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ bbigras i077 ];
|
||||
maintainers = [
|
||||
bbigras
|
||||
i077
|
||||
];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
|
@ -54,7 +57,14 @@ import ./make-test-python.nix (
|
|||
{
|
||||
services.restic.backups = {
|
||||
remotebackup = {
|
||||
inherit passwordFile paths exclude pruneOpts backupPrepareCommand backupCleanupCommand;
|
||||
inherit
|
||||
passwordFile
|
||||
paths
|
||||
exclude
|
||||
pruneOpts
|
||||
backupPrepareCommand
|
||||
backupCleanupCommand
|
||||
;
|
||||
repository = remoteRepository;
|
||||
initialize = true;
|
||||
timerConfig = null; # has no effect here, just checking that it doesn't break the service
|
||||
|
@ -72,18 +82,33 @@ import ./make-test-python.nix (
|
|||
'';
|
||||
};
|
||||
inhibit-test = {
|
||||
inherit passwordFile paths exclude pruneOpts;
|
||||
inherit
|
||||
passwordFile
|
||||
paths
|
||||
exclude
|
||||
pruneOpts
|
||||
;
|
||||
repository = remoteInhibitTestRepository;
|
||||
initialize = true;
|
||||
inhibitsSleep = true;
|
||||
};
|
||||
remote-noinit-backup = {
|
||||
inherit passwordFile exclude pruneOpts paths;
|
||||
inherit
|
||||
passwordFile
|
||||
exclude
|
||||
pruneOpts
|
||||
paths
|
||||
;
|
||||
initialize = false;
|
||||
repository = remoteNoInitRepository;
|
||||
};
|
||||
rclonebackup = {
|
||||
inherit passwordFile paths exclude pruneOpts;
|
||||
inherit
|
||||
passwordFile
|
||||
paths
|
||||
exclude
|
||||
pruneOpts
|
||||
;
|
||||
initialize = true;
|
||||
repository = rcloneRepository;
|
||||
rcloneConfig = {
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,4 +1,17 @@
|
|||
let generateNodeConf = { lib, pkgs, config, privk, pubk, systemdCreds, peerId, nodeId, ...}: {
|
||||
let
|
||||
generateNodeConf =
|
||||
{
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
privk,
|
||||
pubk,
|
||||
systemdCreds,
|
||||
peerId,
|
||||
nodeId,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [ common/user-account.nix ];
|
||||
systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
|
||||
networking.useNetworkd = true;
|
||||
|
@ -14,7 +27,10 @@ let generateNodeConf = { lib, pkgs, config, privk, pubk, systemdCreds, peerId, n
|
|||
};
|
||||
netdevs = {
|
||||
"90-wg0" = {
|
||||
netdevConfig = { Kind = "wireguard"; Name = "wg0"; };
|
||||
netdevConfig = {
|
||||
Kind = "wireguard";
|
||||
Name = "wg0";
|
||||
};
|
||||
wireguardConfig = {
|
||||
# Test storing wireguard private key using systemd credentials.
|
||||
PrivateKey = lib.mkIf systemdCreds "@network.wireguard.private";
|
||||
|
@ -27,13 +43,15 @@ let generateNodeConf = { lib, pkgs, config, privk, pubk, systemdCreds, peerId, n
|
|||
ListenPort = 51820;
|
||||
FirewallMark = 42;
|
||||
};
|
||||
wireguardPeers = [ {
|
||||
Endpoint = "192.168.1.${peerId}:51820";
|
||||
PublicKey = pubk;
|
||||
PresharedKeyFile = pkgs.writeText "psk.key" "yTL3sCOL33Wzi6yCnf9uZQl/Z8laSE+zwpqOHC4HhFU=";
|
||||
AllowedIPs = [ "10.0.0.${peerId}/32" ];
|
||||
PersistentKeepalive = 15;
|
||||
} ];
|
||||
wireguardPeers = [
|
||||
{
|
||||
Endpoint = "192.168.1.${peerId}:51820";
|
||||
PublicKey = pubk;
|
||||
PresharedKeyFile = pkgs.writeText "psk.key" "yTL3sCOL33Wzi6yCnf9uZQl/Z8laSE+zwpqOHC4HhFU=";
|
||||
AllowedIPs = [ "10.0.0.${peerId}/32" ];
|
||||
PersistentKeepalive = 15;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
networks = {
|
||||
|
@ -42,100 +60,148 @@ let generateNodeConf = { lib, pkgs, config, privk, pubk, systemdCreds, peerId, n
|
|||
linkConfig.Unmanaged = true;
|
||||
};
|
||||
"90-wg0" = {
|
||||
matchConfig = { Name = "wg0"; };
|
||||
matchConfig = {
|
||||
Name = "wg0";
|
||||
};
|
||||
address = [ "10.0.0.${nodeId}/32" ];
|
||||
routes = [
|
||||
{ Gateway = "10.0.0.${nodeId}"; Destination = "10.0.0.0/24"; }
|
||||
{ Gateway = "10.0.0.${nodeId}"; Destination = "10.0.0.0/24"; Table = "custom"; }
|
||||
{
|
||||
Gateway = "10.0.0.${nodeId}";
|
||||
Destination = "10.0.0.0/24";
|
||||
}
|
||||
{
|
||||
Gateway = "10.0.0.${nodeId}";
|
||||
Destination = "10.0.0.0/24";
|
||||
Table = "custom";
|
||||
}
|
||||
];
|
||||
};
|
||||
"30-eth1" = {
|
||||
matchConfig = { Name = "eth1"; };
|
||||
matchConfig = {
|
||||
Name = "eth1";
|
||||
};
|
||||
address = [
|
||||
"192.168.1.${nodeId}/24"
|
||||
"fe80::${nodeId}/64"
|
||||
];
|
||||
routingPolicyRules = [
|
||||
{ Table = 10; IncomingInterface = "eth1"; Family = "both"; }
|
||||
{ Table = 20; OutgoingInterface = "eth1"; }
|
||||
{ Table = 30; From = "192.168.1.1"; To = "192.168.1.2"; SourcePort = 666 ; DestinationPort = 667; }
|
||||
{ Table = 40; IPProtocol = "tcp"; InvertRule = true; }
|
||||
{ Table = 50; IncomingInterface = "eth1"; Family = "ipv4"; }
|
||||
{ Table = 60; FirewallMark = 4; }
|
||||
{ Table = 70; FirewallMark = "16/0x1f"; }
|
||||
{
|
||||
Table = 10;
|
||||
IncomingInterface = "eth1";
|
||||
Family = "both";
|
||||
}
|
||||
{
|
||||
Table = 20;
|
||||
OutgoingInterface = "eth1";
|
||||
}
|
||||
{
|
||||
Table = 30;
|
||||
From = "192.168.1.1";
|
||||
To = "192.168.1.2";
|
||||
SourcePort = 666;
|
||||
DestinationPort = 667;
|
||||
}
|
||||
{
|
||||
Table = 40;
|
||||
IPProtocol = "tcp";
|
||||
InvertRule = true;
|
||||
}
|
||||
{
|
||||
Table = 50;
|
||||
IncomingInterface = "eth1";
|
||||
Family = "ipv4";
|
||||
}
|
||||
{
|
||||
Table = 60;
|
||||
FirewallMark = 4;
|
||||
}
|
||||
{
|
||||
Table = 70;
|
||||
FirewallMark = "16/0x1f";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
in import ./make-test-python.nix ({pkgs, ... }: {
|
||||
name = "networkd";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ picnoir ];
|
||||
};
|
||||
nodes = {
|
||||
node1 = { pkgs, ... }@attrs:
|
||||
let localConf = {
|
||||
privk = "GDiXWlMQKb379XthwX0haAbK6hTdjblllpjGX0heP00=";
|
||||
pubk = "iRxpqj42nnY0Qz8MAQbSm7bXxXP5hkPqWYIULmvW+EE=";
|
||||
systemdCreds = false;
|
||||
nodeId = "1";
|
||||
peerId = "2";
|
||||
in
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "networkd";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ picnoir ];
|
||||
};
|
||||
in generateNodeConf (attrs // localConf);
|
||||
nodes = {
|
||||
node1 =
|
||||
{ pkgs, ... }@attrs:
|
||||
let
|
||||
localConf = {
|
||||
privk = "GDiXWlMQKb379XthwX0haAbK6hTdjblllpjGX0heP00=";
|
||||
pubk = "iRxpqj42nnY0Qz8MAQbSm7bXxXP5hkPqWYIULmvW+EE=";
|
||||
systemdCreds = false;
|
||||
nodeId = "1";
|
||||
peerId = "2";
|
||||
};
|
||||
in
|
||||
generateNodeConf (attrs // localConf);
|
||||
|
||||
node2 = { pkgs, ... }@attrs:
|
||||
let localConf = {
|
||||
privk = "eHxSI2jwX/P4AOI0r8YppPw0+4NZnjOxfbS5mt06K2k=";
|
||||
pubk = "27s0OvaBBdHoJYkH9osZpjpgSOVNw+RaKfboT/Sfq0g=";
|
||||
systemdCreds = true;
|
||||
nodeId = "2";
|
||||
peerId = "1";
|
||||
node2 =
|
||||
{ pkgs, ... }@attrs:
|
||||
let
|
||||
localConf = {
|
||||
privk = "eHxSI2jwX/P4AOI0r8YppPw0+4NZnjOxfbS5mt06K2k=";
|
||||
pubk = "27s0OvaBBdHoJYkH9osZpjpgSOVNw+RaKfboT/Sfq0g=";
|
||||
systemdCreds = true;
|
||||
nodeId = "2";
|
||||
peerId = "1";
|
||||
};
|
||||
in
|
||||
generateNodeConf (attrs // localConf);
|
||||
};
|
||||
in generateNodeConf (attrs // localConf);
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
node1.systemctl("start systemd-networkd-wait-online@eth1.service")
|
||||
node1.systemctl("start systemd-networkd-wait-online.service")
|
||||
node1.wait_for_unit("systemd-networkd-wait-online@eth1.service")
|
||||
node1.wait_for_unit("systemd-networkd-wait-online.service")
|
||||
node2.systemctl("start systemd-networkd-wait-online@eth1.service")
|
||||
node2.systemctl("start systemd-networkd-wait-online.service")
|
||||
node2.wait_for_unit("systemd-networkd-wait-online@eth1.service")
|
||||
node2.wait_for_unit("systemd-networkd-wait-online.service")
|
||||
testScript = ''
|
||||
start_all()
|
||||
node1.systemctl("start systemd-networkd-wait-online@eth1.service")
|
||||
node1.systemctl("start systemd-networkd-wait-online.service")
|
||||
node1.wait_for_unit("systemd-networkd-wait-online@eth1.service")
|
||||
node1.wait_for_unit("systemd-networkd-wait-online.service")
|
||||
node2.systemctl("start systemd-networkd-wait-online@eth1.service")
|
||||
node2.systemctl("start systemd-networkd-wait-online.service")
|
||||
node2.wait_for_unit("systemd-networkd-wait-online@eth1.service")
|
||||
node2.wait_for_unit("systemd-networkd-wait-online.service")
|
||||
|
||||
# ================================
|
||||
# Networkd Config
|
||||
# ================================
|
||||
node1.succeed("grep RouteTable=custom:23 /etc/systemd/networkd.conf")
|
||||
node1.succeed("sudo ip route show table custom | grep '10.0.0.0/24 via 10.0.0.1 dev wg0 proto static'")
|
||||
# ================================
|
||||
# Networkd Config
|
||||
# ================================
|
||||
node1.succeed("grep RouteTable=custom:23 /etc/systemd/networkd.conf")
|
||||
node1.succeed("sudo ip route show table custom | grep '10.0.0.0/24 via 10.0.0.1 dev wg0 proto static'")
|
||||
|
||||
# ================================
|
||||
# Wireguard
|
||||
# ================================
|
||||
node1.succeed("ping -c 5 10.0.0.2")
|
||||
node2.succeed("ping -c 5 10.0.0.1")
|
||||
# Is the fwmark set?
|
||||
node2.succeed("wg | grep -q 42")
|
||||
# ================================
|
||||
# Wireguard
|
||||
# ================================
|
||||
node1.succeed("ping -c 5 10.0.0.2")
|
||||
node2.succeed("ping -c 5 10.0.0.1")
|
||||
# Is the fwmark set?
|
||||
node2.succeed("wg | grep -q 42")
|
||||
|
||||
# ================================
|
||||
# Routing Policies
|
||||
# ================================
|
||||
# Testing all the routingPolicyRuleConfig members:
|
||||
# Table + IncomingInterface
|
||||
node1.succeed("sudo ip rule | grep 'from all iif eth1 lookup 10'")
|
||||
# OutgoingInterface
|
||||
node1.succeed("sudo ip rule | grep 'from all oif eth1 lookup 20'")
|
||||
# From + To + SourcePort + DestinationPort
|
||||
node1.succeed(
|
||||
"sudo ip rule | grep 'from 192.168.1.1 to 192.168.1.2 sport 666 dport 667 lookup 30'"
|
||||
)
|
||||
# IPProtocol + InvertRule
|
||||
node1.succeed("sudo ip rule | grep 'not from all ipproto tcp lookup 40'")
|
||||
# FirewallMark without a mask
|
||||
node1.succeed("sudo ip rule | grep 'from all fwmark 0x4 lookup 60'")
|
||||
# FirewallMark with a mask
|
||||
node1.succeed("sudo ip rule | grep 'from all fwmark 0x10/0x1f lookup 70'")
|
||||
'';
|
||||
})
|
||||
# ================================
|
||||
# Routing Policies
|
||||
# ================================
|
||||
# Testing all the routingPolicyRuleConfig members:
|
||||
# Table + IncomingInterface
|
||||
node1.succeed("sudo ip rule | grep 'from all iif eth1 lookup 10'")
|
||||
# OutgoingInterface
|
||||
node1.succeed("sudo ip rule | grep 'from all oif eth1 lookup 20'")
|
||||
# From + To + SourcePort + DestinationPort
|
||||
node1.succeed(
|
||||
"sudo ip rule | grep 'from 192.168.1.1 to 192.168.1.2 sport 666 dport 667 lookup 30'"
|
||||
)
|
||||
# IPProtocol + InvertRule
|
||||
node1.succeed("sudo ip rule | grep 'not from all ipproto tcp lookup 40'")
|
||||
# FirewallMark without a mask
|
||||
node1.succeed("sudo ip rule | grep 'from all fwmark 0x4 lookup 60'")
|
||||
# FirewallMark with a mask
|
||||
node1.succeed("sudo ip rule | grep 'from all fwmark 0x10/0x1f lookup 70'")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,35 +1,45 @@
|
|||
# Test UniFi controller
|
||||
|
||||
{ system ? builtins.currentSystem
|
||||
, config ? { allowUnfree = true; }
|
||||
, pkgs ? import ../.. { inherit system config; }
|
||||
{
|
||||
system ? builtins.currentSystem,
|
||||
config ? {
|
||||
allowUnfree = true;
|
||||
},
|
||||
pkgs ? import ../.. { inherit system config; },
|
||||
}:
|
||||
|
||||
with import ../lib/testing-python.nix { inherit system pkgs; };
|
||||
with pkgs.lib;
|
||||
|
||||
let
|
||||
makeAppTest = unifi: makeTest {
|
||||
name = "unifi-controller-${unifi.version}";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ patryk27 zhaofengli ];
|
||||
};
|
||||
|
||||
nodes.server = {
|
||||
nixpkgs.config = config;
|
||||
|
||||
services.unifi = {
|
||||
enable = true;
|
||||
unifiPackage = unifi;
|
||||
openFirewall = false;
|
||||
makeAppTest =
|
||||
unifi:
|
||||
makeTest {
|
||||
name = "unifi-controller-${unifi.version}";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [
|
||||
patryk27
|
||||
zhaofengli
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
server.wait_for_unit("unifi.service")
|
||||
server.wait_until_succeeds("curl -Lk https://localhost:8443 >&2", timeout=300)
|
||||
'';
|
||||
};
|
||||
in with pkgs; {
|
||||
nodes.server = {
|
||||
nixpkgs.config = config;
|
||||
|
||||
services.unifi = {
|
||||
enable = true;
|
||||
unifiPackage = unifi;
|
||||
openFirewall = false;
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
server.wait_for_unit("unifi.service")
|
||||
server.wait_until_succeeds("curl -Lk https://localhost:8443 >&2", timeout=300)
|
||||
'';
|
||||
};
|
||||
in
|
||||
with pkgs;
|
||||
{
|
||||
unifi8 = makeAppTest unifi8;
|
||||
}
|
||||
|
|
|
@ -1,301 +1,345 @@
|
|||
{ system ? builtins.currentSystem,
|
||||
config ? {},
|
||||
{
|
||||
system ? builtins.currentSystem,
|
||||
config ? { },
|
||||
pkgs ? import ../.. { inherit system config; },
|
||||
debug ? false,
|
||||
enableUnfree ? false,
|
||||
enableKvm ? false,
|
||||
use64bitGuest ? true
|
||||
use64bitGuest ? true,
|
||||
}:
|
||||
|
||||
with import ../lib/testing-python.nix { inherit system pkgs; };
|
||||
with pkgs.lib;
|
||||
|
||||
let
|
||||
testVMConfig = vmName: attrs: { config, pkgs, lib, ... }: let
|
||||
guestAdditions = pkgs.linuxPackages.virtualboxGuestAdditions;
|
||||
testVMConfig =
|
||||
vmName: attrs:
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
guestAdditions = pkgs.linuxPackages.virtualboxGuestAdditions;
|
||||
|
||||
miniInit = ''
|
||||
#!${pkgs.runtimeShell} -xe
|
||||
export PATH="${lib.makeBinPath [ pkgs.coreutils pkgs.util-linux ]}"
|
||||
miniInit = ''
|
||||
#!${pkgs.runtimeShell} -xe
|
||||
export PATH="${
|
||||
lib.makeBinPath [
|
||||
pkgs.coreutils
|
||||
pkgs.util-linux
|
||||
]
|
||||
}"
|
||||
|
||||
mkdir -p /run/dbus /var
|
||||
ln -s /run /var
|
||||
cat > /etc/passwd <<EOF
|
||||
root:x:0:0::/root:/bin/false
|
||||
messagebus:x:1:1::/run/dbus:/bin/false
|
||||
EOF
|
||||
cat > /etc/group <<EOF
|
||||
root:x:0:
|
||||
messagebus:x:1:
|
||||
EOF
|
||||
mkdir -p /run/dbus /var
|
||||
ln -s /run /var
|
||||
cat > /etc/passwd <<EOF
|
||||
root:x:0:0::/root:/bin/false
|
||||
messagebus:x:1:1::/run/dbus:/bin/false
|
||||
EOF
|
||||
cat > /etc/group <<EOF
|
||||
root:x:0:
|
||||
messagebus:x:1:
|
||||
EOF
|
||||
|
||||
"${pkgs.dbus}/bin/dbus-daemon" --fork \
|
||||
--config-file="${pkgs.dbus}/share/dbus-1/system.conf"
|
||||
"${pkgs.dbus}/bin/dbus-daemon" --fork \
|
||||
--config-file="${pkgs.dbus}/share/dbus-1/system.conf"
|
||||
|
||||
${guestAdditions}/bin/VBoxService
|
||||
${(attrs.vmScript or (const "")) pkgs}
|
||||
${guestAdditions}/bin/VBoxService
|
||||
${(attrs.vmScript or (const "")) pkgs}
|
||||
|
||||
i=0
|
||||
while [ ! -e /mnt-root/shutdown ]; do
|
||||
sleep 10
|
||||
i=$(($i + 10))
|
||||
[ $i -le 120 ] || fail
|
||||
done
|
||||
i=0
|
||||
while [ ! -e /mnt-root/shutdown ]; do
|
||||
sleep 10
|
||||
i=$(($i + 10))
|
||||
[ $i -le 120 ] || fail
|
||||
done
|
||||
|
||||
rm -f /mnt-root/boot-done /mnt-root/shutdown
|
||||
'';
|
||||
in {
|
||||
boot.kernelParams = [
|
||||
"console=tty0" "console=ttyS0" "ignore_loglevel"
|
||||
"boot.trace" "panic=1" "boot.panic_on_fail"
|
||||
"init=${pkgs.writeScript "mini-init.sh" miniInit}"
|
||||
];
|
||||
|
||||
fileSystems."/" = {
|
||||
device = "vboxshare";
|
||||
fsType = "vboxsf";
|
||||
};
|
||||
|
||||
virtualisation.virtualbox.guest.enable = true;
|
||||
|
||||
boot.initrd.kernelModules = [
|
||||
"af_packet" "vboxsf"
|
||||
"virtio" "virtio_pci" "virtio_ring" "virtio_net" "vboxguest"
|
||||
];
|
||||
|
||||
boot.initrd.extraUtilsCommands = ''
|
||||
copy_bin_and_libs "${guestAdditions}/bin/mount.vboxsf"
|
||||
copy_bin_and_libs "${pkgs.util-linux}/bin/unshare"
|
||||
${(attrs.extraUtilsCommands or (const "")) pkgs}
|
||||
'';
|
||||
|
||||
boot.initrd.postMountCommands = ''
|
||||
touch /mnt-root/boot-done
|
||||
hostname "${vmName}"
|
||||
mkdir -p /nix/store
|
||||
unshare -m ${escapeShellArg pkgs.runtimeShell} -c '
|
||||
mount -t vboxsf nixstore /nix/store
|
||||
exec "$stage2Init"
|
||||
'
|
||||
poweroff -f
|
||||
'';
|
||||
|
||||
system.requiredKernelConfig = with config.lib.kernelConfig; [
|
||||
(isYes "SERIAL_8250_CONSOLE")
|
||||
(isYes "SERIAL_8250")
|
||||
];
|
||||
|
||||
networking.usePredictableInterfaceNames = false;
|
||||
};
|
||||
|
||||
mkLog = logfile: tag: let
|
||||
rotated = map (i: "${logfile}.${toString i}") (range 1 9);
|
||||
all = concatMapStringsSep " " (f: "\"${f}\"") ([logfile] ++ rotated);
|
||||
logcmd = "tail -F ${all} 2> /dev/null | logger -t \"${tag}\"";
|
||||
in if debug then "machine.execute(ru('${logcmd} & disown'))" else "pass";
|
||||
|
||||
testVM = vmName: vmScript: let
|
||||
cfg = (import ../lib/eval-config.nix {
|
||||
system = if use64bitGuest then "x86_64-linux" else "i686-linux";
|
||||
modules = [
|
||||
(testVMConfig vmName vmScript)
|
||||
rm -f /mnt-root/boot-done /mnt-root/shutdown
|
||||
'';
|
||||
in
|
||||
{
|
||||
boot.kernelParams = [
|
||||
"console=tty0"
|
||||
"console=ttyS0"
|
||||
"ignore_loglevel"
|
||||
"boot.trace"
|
||||
"panic=1"
|
||||
"boot.panic_on_fail"
|
||||
"init=${pkgs.writeScript "mini-init.sh" miniInit}"
|
||||
];
|
||||
}).config;
|
||||
in pkgs.vmTools.runInLinuxVM (pkgs.runCommand "virtualbox-image" {
|
||||
preVM = ''
|
||||
mkdir -p "$out"
|
||||
diskImage="$(pwd)/qimage"
|
||||
${pkgs.vmTools.qemu}/bin/qemu-img create -f raw "$diskImage" 100M
|
||||
'';
|
||||
|
||||
postVM = ''
|
||||
echo "creating VirtualBox disk image..."
|
||||
${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -O vdi \
|
||||
"$diskImage" "$out/disk.vdi"
|
||||
'';
|
||||
|
||||
buildInputs = [ pkgs.util-linux pkgs.perl ];
|
||||
} ''
|
||||
${pkgs.parted}/sbin/parted --script /dev/vda mklabel msdos
|
||||
${pkgs.parted}/sbin/parted --script /dev/vda -- mkpart primary ext2 1M -1s
|
||||
${pkgs.e2fsprogs}/sbin/mkfs.ext4 /dev/vda1
|
||||
${pkgs.e2fsprogs}/sbin/tune2fs -c 0 -i 0 /dev/vda1
|
||||
mkdir /mnt
|
||||
mount /dev/vda1 /mnt
|
||||
cp "${cfg.system.build.kernel}/bzImage" /mnt/linux
|
||||
cp "${cfg.system.build.initialRamdisk}/initrd" /mnt/initrd
|
||||
|
||||
${pkgs.grub2}/bin/grub-install --boot-directory=/mnt /dev/vda
|
||||
|
||||
cat > /mnt/grub/grub.cfg <<GRUB
|
||||
set root=hd0,1
|
||||
linux /linux ${concatStringsSep " " cfg.boot.kernelParams}
|
||||
initrd /initrd
|
||||
boot
|
||||
GRUB
|
||||
umount /mnt
|
||||
'');
|
||||
|
||||
createVM = name: attrs: let
|
||||
mkFlags = concatStringsSep " ";
|
||||
|
||||
sharePath = "/home/alice/vboxshare-${name}";
|
||||
|
||||
createFlags = mkFlags [
|
||||
"--ostype ${if use64bitGuest then "Linux26_64" else "Linux26"}"
|
||||
"--register"
|
||||
];
|
||||
|
||||
vmFlags = mkFlags ([
|
||||
"--uart1 0x3F8 4"
|
||||
"--uartmode1 client /run/virtualbox-log-${name}.sock"
|
||||
"--memory 768"
|
||||
"--audio none"
|
||||
] ++ (attrs.vmFlags or []));
|
||||
|
||||
controllerFlags = mkFlags [
|
||||
"--name SATA"
|
||||
"--add sata"
|
||||
"--bootable on"
|
||||
"--hostiocache on"
|
||||
];
|
||||
|
||||
diskFlags = mkFlags [
|
||||
"--storagectl SATA"
|
||||
"--port 0"
|
||||
"--device 0"
|
||||
"--type hdd"
|
||||
"--mtype immutable"
|
||||
"--medium ${testVM name attrs}/disk.vdi"
|
||||
];
|
||||
|
||||
sharedFlags = mkFlags [
|
||||
"--name vboxshare"
|
||||
"--hostpath ${sharePath}"
|
||||
];
|
||||
|
||||
nixstoreFlags = mkFlags [
|
||||
"--name nixstore"
|
||||
"--hostpath /nix/store"
|
||||
"--readonly"
|
||||
];
|
||||
in {
|
||||
machine = {
|
||||
systemd.sockets."vboxtestlog-${name}" = {
|
||||
description = "VirtualBox Test Machine Log Socket For ${name}";
|
||||
wantedBy = [ "sockets.target" ];
|
||||
before = [ "multi-user.target" ];
|
||||
socketConfig.ListenStream = "/run/virtualbox-log-${name}.sock";
|
||||
socketConfig.Accept = true;
|
||||
fileSystems."/" = {
|
||||
device = "vboxshare";
|
||||
fsType = "vboxsf";
|
||||
};
|
||||
|
||||
systemd.services."vboxtestlog-${name}@" = {
|
||||
description = "VirtualBox Test Machine Log For ${name}";
|
||||
serviceConfig.StandardInput = "socket";
|
||||
serviceConfig.StandardOutput = "journal";
|
||||
serviceConfig.SyslogIdentifier = "GUEST-${name}";
|
||||
serviceConfig.ExecStart = "${pkgs.coreutils}/bin/cat";
|
||||
};
|
||||
virtualisation.virtualbox.guest.enable = true;
|
||||
|
||||
boot.initrd.kernelModules = [
|
||||
"af_packet"
|
||||
"vboxsf"
|
||||
"virtio"
|
||||
"virtio_pci"
|
||||
"virtio_ring"
|
||||
"virtio_net"
|
||||
"vboxguest"
|
||||
];
|
||||
|
||||
boot.initrd.extraUtilsCommands = ''
|
||||
copy_bin_and_libs "${guestAdditions}/bin/mount.vboxsf"
|
||||
copy_bin_and_libs "${pkgs.util-linux}/bin/unshare"
|
||||
${(attrs.extraUtilsCommands or (const "")) pkgs}
|
||||
'';
|
||||
|
||||
boot.initrd.postMountCommands = ''
|
||||
touch /mnt-root/boot-done
|
||||
hostname "${vmName}"
|
||||
mkdir -p /nix/store
|
||||
unshare -m ${escapeShellArg pkgs.runtimeShell} -c '
|
||||
mount -t vboxsf nixstore /nix/store
|
||||
exec "$stage2Init"
|
||||
'
|
||||
poweroff -f
|
||||
'';
|
||||
|
||||
system.requiredKernelConfig = with config.lib.kernelConfig; [
|
||||
(isYes "SERIAL_8250_CONSOLE")
|
||||
(isYes "SERIAL_8250")
|
||||
];
|
||||
|
||||
networking.usePredictableInterfaceNames = false;
|
||||
};
|
||||
|
||||
testSubs = ''
|
||||
mkLog =
|
||||
logfile: tag:
|
||||
let
|
||||
rotated = map (i: "${logfile}.${toString i}") (range 1 9);
|
||||
all = concatMapStringsSep " " (f: "\"${f}\"") ([ logfile ] ++ rotated);
|
||||
logcmd = "tail -F ${all} 2> /dev/null | logger -t \"${tag}\"";
|
||||
in
|
||||
if debug then "machine.execute(ru('${logcmd} & disown'))" else "pass";
|
||||
|
||||
testVM =
|
||||
vmName: vmScript:
|
||||
let
|
||||
cfg =
|
||||
(import ../lib/eval-config.nix {
|
||||
system = if use64bitGuest then "x86_64-linux" else "i686-linux";
|
||||
modules = [
|
||||
(testVMConfig vmName vmScript)
|
||||
];
|
||||
}).config;
|
||||
in
|
||||
pkgs.vmTools.runInLinuxVM (
|
||||
pkgs.runCommand "virtualbox-image"
|
||||
{
|
||||
preVM = ''
|
||||
mkdir -p "$out"
|
||||
diskImage="$(pwd)/qimage"
|
||||
${pkgs.vmTools.qemu}/bin/qemu-img create -f raw "$diskImage" 100M
|
||||
'';
|
||||
|
||||
postVM = ''
|
||||
echo "creating VirtualBox disk image..."
|
||||
${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -O vdi \
|
||||
"$diskImage" "$out/disk.vdi"
|
||||
'';
|
||||
|
||||
buildInputs = [
|
||||
pkgs.util-linux
|
||||
pkgs.perl
|
||||
];
|
||||
}
|
||||
''
|
||||
${pkgs.parted}/sbin/parted --script /dev/vda mklabel msdos
|
||||
${pkgs.parted}/sbin/parted --script /dev/vda -- mkpart primary ext2 1M -1s
|
||||
${pkgs.e2fsprogs}/sbin/mkfs.ext4 /dev/vda1
|
||||
${pkgs.e2fsprogs}/sbin/tune2fs -c 0 -i 0 /dev/vda1
|
||||
mkdir /mnt
|
||||
mount /dev/vda1 /mnt
|
||||
cp "${cfg.system.build.kernel}/bzImage" /mnt/linux
|
||||
cp "${cfg.system.build.initialRamdisk}/initrd" /mnt/initrd
|
||||
|
||||
${pkgs.grub2}/bin/grub-install --boot-directory=/mnt /dev/vda
|
||||
|
||||
cat > /mnt/grub/grub.cfg <<GRUB
|
||||
set root=hd0,1
|
||||
linux /linux ${concatStringsSep " " cfg.boot.kernelParams}
|
||||
initrd /initrd
|
||||
boot
|
||||
GRUB
|
||||
umount /mnt
|
||||
''
|
||||
);
|
||||
|
||||
createVM =
|
||||
name: attrs:
|
||||
let
|
||||
mkFlags = concatStringsSep " ";
|
||||
|
||||
sharePath = "/home/alice/vboxshare-${name}";
|
||||
|
||||
createFlags = mkFlags [
|
||||
"--ostype ${if use64bitGuest then "Linux26_64" else "Linux26"}"
|
||||
"--register"
|
||||
];
|
||||
|
||||
vmFlags = mkFlags (
|
||||
[
|
||||
"--uart1 0x3F8 4"
|
||||
"--uartmode1 client /run/virtualbox-log-${name}.sock"
|
||||
"--memory 768"
|
||||
"--audio none"
|
||||
]
|
||||
++ (attrs.vmFlags or [ ])
|
||||
);
|
||||
|
||||
controllerFlags = mkFlags [
|
||||
"--name SATA"
|
||||
"--add sata"
|
||||
"--bootable on"
|
||||
"--hostiocache on"
|
||||
];
|
||||
|
||||
diskFlags = mkFlags [
|
||||
"--storagectl SATA"
|
||||
"--port 0"
|
||||
"--device 0"
|
||||
"--type hdd"
|
||||
"--mtype immutable"
|
||||
"--medium ${testVM name attrs}/disk.vdi"
|
||||
];
|
||||
|
||||
sharedFlags = mkFlags [
|
||||
"--name vboxshare"
|
||||
"--hostpath ${sharePath}"
|
||||
];
|
||||
|
||||
nixstoreFlags = mkFlags [
|
||||
"--name nixstore"
|
||||
"--hostpath /nix/store"
|
||||
"--readonly"
|
||||
];
|
||||
in
|
||||
{
|
||||
machine = {
|
||||
systemd.sockets."vboxtestlog-${name}" = {
|
||||
description = "VirtualBox Test Machine Log Socket For ${name}";
|
||||
wantedBy = [ "sockets.target" ];
|
||||
before = [ "multi-user.target" ];
|
||||
socketConfig.ListenStream = "/run/virtualbox-log-${name}.sock";
|
||||
socketConfig.Accept = true;
|
||||
};
|
||||
|
||||
systemd.services."vboxtestlog-${name}@" = {
|
||||
description = "VirtualBox Test Machine Log For ${name}";
|
||||
serviceConfig.StandardInput = "socket";
|
||||
serviceConfig.StandardOutput = "journal";
|
||||
serviceConfig.SyslogIdentifier = "GUEST-${name}";
|
||||
serviceConfig.ExecStart = "${pkgs.coreutils}/bin/cat";
|
||||
};
|
||||
};
|
||||
|
||||
testSubs = ''
|
||||
|
||||
|
||||
${name}_sharepath = "${sharePath}"
|
||||
${name}_sharepath = "${sharePath}"
|
||||
|
||||
|
||||
def check_running_${name}():
|
||||
cmd = "VBoxManage list runningvms | grep -q '^\"${name}\"'"
|
||||
(status, _) = machine.execute(ru(cmd))
|
||||
return status == 0
|
||||
def check_running_${name}():
|
||||
cmd = "VBoxManage list runningvms | grep -q '^\"${name}\"'"
|
||||
(status, _) = machine.execute(ru(cmd))
|
||||
return status == 0
|
||||
|
||||
|
||||
def cleanup_${name}():
|
||||
if check_running_${name}():
|
||||
machine.execute(ru("VBoxManage controlvm ${name} poweroff"))
|
||||
machine.succeed("rm -rf ${sharePath}")
|
||||
machine.succeed("mkdir -p ${sharePath}")
|
||||
machine.succeed("chown alice:users ${sharePath}")
|
||||
def cleanup_${name}():
|
||||
if check_running_${name}():
|
||||
machine.execute(ru("VBoxManage controlvm ${name} poweroff"))
|
||||
machine.succeed("rm -rf ${sharePath}")
|
||||
machine.succeed("mkdir -p ${sharePath}")
|
||||
machine.succeed("chown alice:users ${sharePath}")
|
||||
|
||||
|
||||
def create_vm_${name}():
|
||||
cleanup_${name}()
|
||||
vbm("createvm --name ${name} ${createFlags}")
|
||||
vbm("modifyvm ${name} ${vmFlags}")
|
||||
vbm("setextradata ${name} VBoxInternal/PDM/HaltOnReset 1")
|
||||
vbm("storagectl ${name} ${controllerFlags}")
|
||||
vbm("storageattach ${name} ${diskFlags}")
|
||||
vbm("sharedfolder add ${name} ${sharedFlags}")
|
||||
vbm("sharedfolder add ${name} ${nixstoreFlags}")
|
||||
def create_vm_${name}():
|
||||
cleanup_${name}()
|
||||
vbm("createvm --name ${name} ${createFlags}")
|
||||
vbm("modifyvm ${name} ${vmFlags}")
|
||||
vbm("setextradata ${name} VBoxInternal/PDM/HaltOnReset 1")
|
||||
vbm("storagectl ${name} ${controllerFlags}")
|
||||
vbm("storageattach ${name} ${diskFlags}")
|
||||
vbm("sharedfolder add ${name} ${sharedFlags}")
|
||||
vbm("sharedfolder add ${name} ${nixstoreFlags}")
|
||||
|
||||
${mkLog "$HOME/VirtualBox VMs/${name}/Logs/VBox.log" "HOST-${name}"}
|
||||
${mkLog "$HOME/VirtualBox VMs/${name}/Logs/VBox.log" "HOST-${name}"}
|
||||
|
||||
|
||||
def destroy_vm_${name}():
|
||||
cleanup_${name}()
|
||||
vbm("unregistervm ${name} --delete")
|
||||
def destroy_vm_${name}():
|
||||
cleanup_${name}()
|
||||
vbm("unregistervm ${name} --delete")
|
||||
|
||||
|
||||
def wait_for_vm_boot_${name}():
|
||||
machine.execute(
|
||||
ru(
|
||||
"set -e; i=0; "
|
||||
"while ! test -e ${sharePath}/boot-done; do "
|
||||
"sleep 10; i=$(($i + 10)); [ $i -le 3600 ]; "
|
||||
"VBoxManage list runningvms | grep -q '^\"${name}\"'; "
|
||||
"done"
|
||||
)
|
||||
)
|
||||
def wait_for_vm_boot_${name}():
|
||||
machine.execute(
|
||||
ru(
|
||||
"set -e; i=0; "
|
||||
"while ! test -e ${sharePath}/boot-done; do "
|
||||
"sleep 10; i=$(($i + 10)); [ $i -le 3600 ]; "
|
||||
"VBoxManage list runningvms | grep -q '^\"${name}\"'; "
|
||||
"done"
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def wait_for_ip_${name}(interface):
|
||||
property = f"/VirtualBox/GuestInfo/Net/{interface}/V4/IP"
|
||||
getip = f"VBoxManage guestproperty get ${name} {property} | sed -n -e 's/^Value: //p'"
|
||||
def wait_for_ip_${name}(interface):
|
||||
property = f"/VirtualBox/GuestInfo/Net/{interface}/V4/IP"
|
||||
getip = f"VBoxManage guestproperty get ${name} {property} | sed -n -e 's/^Value: //p'"
|
||||
|
||||
ip = machine.succeed(
|
||||
ru(
|
||||
"for i in $(seq 1000); do "
|
||||
f'if ipaddr="$({getip})" && [ -n "$ipaddr" ]; then '
|
||||
'echo "$ipaddr"; exit 0; '
|
||||
"fi; "
|
||||
"sleep 1; "
|
||||
"done; "
|
||||
"echo 'Could not get IPv4 address for ${name}!' >&2; "
|
||||
"exit 1"
|
||||
)
|
||||
).strip()
|
||||
return ip
|
||||
ip = machine.succeed(
|
||||
ru(
|
||||
"for i in $(seq 1000); do "
|
||||
f'if ipaddr="$({getip})" && [ -n "$ipaddr" ]; then '
|
||||
'echo "$ipaddr"; exit 0; '
|
||||
"fi; "
|
||||
"sleep 1; "
|
||||
"done; "
|
||||
"echo 'Could not get IPv4 address for ${name}!' >&2; "
|
||||
"exit 1"
|
||||
)
|
||||
).strip()
|
||||
return ip
|
||||
|
||||
|
||||
def wait_for_startup_${name}(nudge=lambda: None):
|
||||
for _ in range(0, 130, 10):
|
||||
machine.sleep(10)
|
||||
if check_running_${name}():
|
||||
return
|
||||
nudge()
|
||||
raise Exception("VirtualBox VM didn't start up within 2 minutes")
|
||||
def wait_for_startup_${name}(nudge=lambda: None):
|
||||
for _ in range(0, 130, 10):
|
||||
machine.sleep(10)
|
||||
if check_running_${name}():
|
||||
return
|
||||
nudge()
|
||||
raise Exception("VirtualBox VM didn't start up within 2 minutes")
|
||||
|
||||
|
||||
def wait_for_shutdown_${name}():
|
||||
for _ in range(0, 130, 10):
|
||||
machine.sleep(10)
|
||||
if not check_running_${name}():
|
||||
return
|
||||
raise Exception("VirtualBox VM didn't shut down within 2 minutes")
|
||||
def wait_for_shutdown_${name}():
|
||||
for _ in range(0, 130, 10):
|
||||
machine.sleep(10)
|
||||
if not check_running_${name}():
|
||||
return
|
||||
raise Exception("VirtualBox VM didn't shut down within 2 minutes")
|
||||
|
||||
|
||||
def shutdown_vm_${name}():
|
||||
machine.succeed(ru("touch ${sharePath}/shutdown"))
|
||||
machine.execute(
|
||||
"set -e; i=0; "
|
||||
"while test -e ${sharePath}/shutdown "
|
||||
" -o -e ${sharePath}/boot-done; do "
|
||||
"sleep 1; i=$(($i + 1)); [ $i -le 3600 ]; "
|
||||
"done"
|
||||
)
|
||||
wait_for_shutdown_${name}()
|
||||
'';
|
||||
};
|
||||
def shutdown_vm_${name}():
|
||||
machine.succeed(ru("touch ${sharePath}/shutdown"))
|
||||
machine.execute(
|
||||
"set -e; i=0; "
|
||||
"while test -e ${sharePath}/shutdown "
|
||||
" -o -e ${sharePath}/boot-done; do "
|
||||
"sleep 1; i=$(($i + 1)); [ $i -le 3600 ]; "
|
||||
"done"
|
||||
)
|
||||
wait_for_shutdown_${name}()
|
||||
'';
|
||||
};
|
||||
|
||||
hostonlyVMFlags = [
|
||||
"--nictype1 virtio"
|
||||
|
@ -322,7 +366,7 @@ let
|
|||
'';
|
||||
|
||||
vboxVMs = mapAttrs createVM {
|
||||
simple = {};
|
||||
simple = { };
|
||||
|
||||
detectvirt.vmScript = sysdDetectVirt;
|
||||
|
||||
|
@ -340,65 +384,82 @@ let
|
|||
testExtensionPack.vmFlags = enableExtensionPackVMFlags;
|
||||
};
|
||||
|
||||
mkVBoxTest = vboxHostConfig: vms: name: testScript: makeTest {
|
||||
name = "virtualbox-${name}";
|
||||
mkVBoxTest =
|
||||
vboxHostConfig: vms: name: testScript:
|
||||
makeTest {
|
||||
name = "virtualbox-${name}";
|
||||
|
||||
nodes.machine = { lib, config, ... }: {
|
||||
imports = let
|
||||
mkVMConf = name: val: val.machine // { key = "${name}-config"; };
|
||||
vmConfigs = mapAttrsToList mkVMConf vms;
|
||||
in [ ./common/user-account.nix ./common/x11.nix ] ++ vmConfigs;
|
||||
virtualisation.memorySize = 2048;
|
||||
nodes.machine =
|
||||
{ lib, config, ... }:
|
||||
{
|
||||
imports =
|
||||
let
|
||||
mkVMConf = name: val: val.machine // { key = "${name}-config"; };
|
||||
vmConfigs = mapAttrsToList mkVMConf vms;
|
||||
in
|
||||
[
|
||||
./common/user-account.nix
|
||||
./common/x11.nix
|
||||
]
|
||||
++ vmConfigs;
|
||||
virtualisation.memorySize = 2048;
|
||||
|
||||
virtualisation.qemu.options = let
|
||||
# IvyBridge is reasonably ancient to be compatible with recent
|
||||
# Intel/AMD hosts and sufficient for the KVM flavor.
|
||||
guestCpu = if config.virtualisation.virtualbox.host.enableKvm then "IvyBridge" else "kvm64";
|
||||
in ["-cpu" "${guestCpu},svm=on,vmx=on"];
|
||||
virtualisation.qemu.options =
|
||||
let
|
||||
# IvyBridge is reasonably ancient to be compatible with recent
|
||||
# Intel/AMD hosts and sufficient for the KVM flavor.
|
||||
guestCpu = if config.virtualisation.virtualbox.host.enableKvm then "IvyBridge" else "kvm64";
|
||||
in
|
||||
[
|
||||
"-cpu"
|
||||
"${guestCpu},svm=on,vmx=on"
|
||||
];
|
||||
|
||||
test-support.displayManager.auto.user = "alice";
|
||||
users.users.alice.extraGroups = let
|
||||
inherit (config.virtualisation.virtualbox.host) enableHardening;
|
||||
in lib.mkIf enableHardening [ "vboxusers" ];
|
||||
test-support.displayManager.auto.user = "alice";
|
||||
users.users.alice.extraGroups =
|
||||
let
|
||||
inherit (config.virtualisation.virtualbox.host) enableHardening;
|
||||
in
|
||||
lib.mkIf enableHardening [ "vboxusers" ];
|
||||
|
||||
virtualisation.virtualbox.host = {
|
||||
enable = true;
|
||||
} // vboxHostConfig;
|
||||
virtualisation.virtualbox.host = {
|
||||
enable = true;
|
||||
} // vboxHostConfig;
|
||||
|
||||
nixpkgs.config.allowUnfree = config.virtualisation.virtualbox.host.enableExtensionPack;
|
||||
nixpkgs.config.allowUnfree = config.virtualisation.virtualbox.host.enableExtensionPack;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
from shlex import quote
|
||||
${concatStrings (mapAttrsToList (_: getAttr "testSubs") vms)}
|
||||
|
||||
def ru(cmd: str) -> str:
|
||||
return f"su - alice -c {quote(cmd)}"
|
||||
|
||||
|
||||
def vbm(cmd: str) -> str:
|
||||
return machine.succeed(ru(f"VBoxManage {cmd}"))
|
||||
|
||||
|
||||
def remove_uuids(output: str) -> str:
|
||||
return "\n".join(
|
||||
[line for line in (output or "").splitlines() if not line.startswith("UUID:")]
|
||||
)
|
||||
|
||||
|
||||
machine.wait_for_x()
|
||||
|
||||
${mkLog "$HOME/.config/VirtualBox/VBoxSVC.log" "HOST-SVC"}
|
||||
|
||||
${testScript}
|
||||
# (keep black happy)
|
||||
'';
|
||||
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ aszlig ];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
from shlex import quote
|
||||
${concatStrings (mapAttrsToList (_: getAttr "testSubs") vms)}
|
||||
|
||||
def ru(cmd: str) -> str:
|
||||
return f"su - alice -c {quote(cmd)}"
|
||||
|
||||
|
||||
def vbm(cmd: str) -> str:
|
||||
return machine.succeed(ru(f"VBoxManage {cmd}"))
|
||||
|
||||
|
||||
def remove_uuids(output: str) -> str:
|
||||
return "\n".join(
|
||||
[line for line in (output or "").splitlines() if not line.startswith("UUID:")]
|
||||
)
|
||||
|
||||
|
||||
machine.wait_for_x()
|
||||
|
||||
${mkLog "$HOME/.config/VirtualBox/VBoxSVC.log" "HOST-SVC"}
|
||||
|
||||
${testScript}
|
||||
# (keep black happy)
|
||||
'';
|
||||
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ aszlig ];
|
||||
};
|
||||
};
|
||||
|
||||
unfreeTests = mapAttrs (mkVBoxTest { enableExtensionPack = true; } vboxVMsWithExtpack) {
|
||||
enable-extension-pack = ''
|
||||
create_vm_testExtensionPack()
|
||||
|
@ -418,24 +479,28 @@ let
|
|||
'';
|
||||
};
|
||||
|
||||
kvmTests = mapAttrs (mkVBoxTest {
|
||||
enableKvm = true;
|
||||
kvmTests =
|
||||
mapAttrs
|
||||
(mkVBoxTest {
|
||||
enableKvm = true;
|
||||
|
||||
# Once the KVM version supports these, we can enable them.
|
||||
addNetworkInterface = false;
|
||||
enableHardening = false;
|
||||
} vboxVMs) {
|
||||
kvm-headless = ''
|
||||
create_vm_headless()
|
||||
machine.succeed(ru("VBoxHeadless --startvm headless >&2 & disown %1"))
|
||||
wait_for_startup_headless()
|
||||
wait_for_vm_boot_headless()
|
||||
shutdown_vm_headless()
|
||||
destroy_vm_headless()
|
||||
'';
|
||||
};
|
||||
# Once the KVM version supports these, we can enable them.
|
||||
addNetworkInterface = false;
|
||||
enableHardening = false;
|
||||
} vboxVMs)
|
||||
{
|
||||
kvm-headless = ''
|
||||
create_vm_headless()
|
||||
machine.succeed(ru("VBoxHeadless --startvm headless >&2 & disown %1"))
|
||||
wait_for_startup_headless()
|
||||
wait_for_vm_boot_headless()
|
||||
shutdown_vm_headless()
|
||||
destroy_vm_headless()
|
||||
'';
|
||||
};
|
||||
|
||||
in mapAttrs (mkVBoxTest {} vboxVMs) {
|
||||
in
|
||||
mapAttrs (mkVBoxTest { } vboxVMs) {
|
||||
simple-gui = ''
|
||||
# Home to select Tools, down to move to the VM, enter to start it.
|
||||
def send_vm_startup():
|
||||
|
|
|
@ -1,38 +1,43 @@
|
|||
import ./make-test-python.nix ({ pkgs, ... }: {
|
||||
name = "wasabibackend";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ mmahut ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
machine = { ... }: {
|
||||
services.wasabibackend = {
|
||||
enable = true;
|
||||
network = "testnet";
|
||||
rpc = {
|
||||
user = "alice";
|
||||
port = 18332;
|
||||
};
|
||||
};
|
||||
services.bitcoind."testnet" = {
|
||||
enable = true;
|
||||
testnet = true;
|
||||
rpc.users = {
|
||||
alice.passwordHMAC = "e7096bc21da60b29ecdbfcdb2c3acc62$f948e61cb587c399358ed99c6ed245a41460b4bf75125d8330c9f6fcc13d7ae7";
|
||||
};
|
||||
};
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "wasabibackend";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ mmahut ];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("wasabibackend.service")
|
||||
machine.wait_until_succeeds(
|
||||
"grep 'Wasabi Backend started' /var/lib/wasabibackend/.walletwasabi/backend/Logs.txt"
|
||||
)
|
||||
machine.sleep(5)
|
||||
machine.succeed(
|
||||
"grep 'Config is successfully initialized' /var/lib/wasabibackend/.walletwasabi/backend/Logs.txt"
|
||||
)
|
||||
'';
|
||||
})
|
||||
nodes = {
|
||||
machine =
|
||||
{ ... }:
|
||||
{
|
||||
services.wasabibackend = {
|
||||
enable = true;
|
||||
network = "testnet";
|
||||
rpc = {
|
||||
user = "alice";
|
||||
port = 18332;
|
||||
};
|
||||
};
|
||||
services.bitcoind."testnet" = {
|
||||
enable = true;
|
||||
testnet = true;
|
||||
rpc.users = {
|
||||
alice.passwordHMAC = "e7096bc21da60b29ecdbfcdb2c3acc62$f948e61cb587c399358ed99c6ed245a41460b4bf75125d8330c9f6fcc13d7ae7";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("wasabibackend.service")
|
||||
machine.wait_until_succeeds(
|
||||
"grep 'Wasabi Backend started' /var/lib/wasabibackend/.walletwasabi/backend/Logs.txt"
|
||||
)
|
||||
machine.sleep(5)
|
||||
machine.succeed(
|
||||
"grep 'Config is successfully initialized' /var/lib/wasabibackend/.walletwasabi/backend/Logs.txt"
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
|
|
@ -43,6 +43,10 @@ in
|
|||
server =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [
|
||||
pkgs.curlHTTP3
|
||||
];
|
||||
|
||||
services.h2o = {
|
||||
enable = true;
|
||||
defaultHTTPListenPort = port.HTTP;
|
||||
|
@ -60,6 +64,9 @@ in
|
|||
"${domain.TLS}" = {
|
||||
tls = {
|
||||
policy = "force";
|
||||
quic = {
|
||||
retry = "ON";
|
||||
};
|
||||
identity = [
|
||||
{
|
||||
key-file = ../../common/acme/server/acme.test.key.pem;
|
||||
|
@ -68,6 +75,8 @@ in
|
|||
];
|
||||
extraSettings = {
|
||||
minimum-version = "TLSv1.3";
|
||||
# when using common ACME certs, disable talking to CA
|
||||
ocsp-update-interval = 0;
|
||||
};
|
||||
};
|
||||
settings = {
|
||||
|
@ -97,10 +106,15 @@ in
|
|||
];
|
||||
|
||||
networking = {
|
||||
firewall.allowedTCPPorts = with port; [
|
||||
HTTP
|
||||
TLS
|
||||
];
|
||||
firewall = {
|
||||
allowedTCPPorts = with port; [
|
||||
HTTP
|
||||
TLS
|
||||
];
|
||||
allowedUDPPorts = with port; [
|
||||
TLS
|
||||
];
|
||||
};
|
||||
extraHosts = ''
|
||||
127.0.0.1 ${domain.HTTP}
|
||||
127.0.0.1 ${domain.TLS}
|
||||
|
@ -108,7 +122,6 @@ in
|
|||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
let
|
||||
portStrHTTP = builtins.toString port.HTTP;
|
||||
|
@ -120,23 +133,26 @@ in
|
|||
server.wait_for_open_port(${portStrHTTP})
|
||||
server.wait_for_open_port(${portStrTLS})
|
||||
|
||||
http_hello_world_body = server.succeed("curl --fail-with-body 'http://${domain.HTTP}:${portStrHTTP}/hello_world.txt'")
|
||||
assert "${sawatdi_chao_lok}" in http_hello_world_body
|
||||
assert "${sawatdi_chao_lok}" in server.succeed("curl --fail-with-body 'http://${domain.HTTP}:${portStrHTTP}/hello_world.txt'")
|
||||
|
||||
tls_hello_world_head = server.succeed("curl -v --head --compressed --http2 --tlsv1.3 --fail-with-body 'https://${domain.TLS}:${portStrTLS}/hello_world.rst'").lower()
|
||||
assert "http/2 200" in tls_hello_world_head
|
||||
assert "server: h2o" in tls_hello_world_head
|
||||
assert "content-type: text/x-rst" in tls_hello_world_head
|
||||
|
||||
tls_hello_world_body = server.succeed("curl -v --http2 --tlsv1.3 --compressed --fail-with-body 'https://${domain.TLS}:${portStrTLS}/hello_world.rst'")
|
||||
assert "${sawatdi_chao_lok}" in tls_hello_world_body
|
||||
assert "${sawatdi_chao_lok}" in server.succeed("curl -v --http2 --tlsv1.3 --compressed --fail-with-body 'https://${domain.TLS}:${portStrTLS}/hello_world.rst'")
|
||||
|
||||
tls_hello_world_head_redirected = server.succeed("curl -v --head --fail-with-body 'http://${domain.TLS}:${builtins.toString port.HTTP}/hello_world.rst'").lower()
|
||||
assert "redirected" in tls_hello_world_head_redirected
|
||||
quic_hello_world_head = server.succeed("curl -v --head --compressed --http3-only --fail-with-body 'https://${domain.TLS}:${portStrTLS}/hello_world.rst'").lower()
|
||||
assert "http/3 200" in quic_hello_world_head
|
||||
assert "server: h2o" in quic_hello_world_head
|
||||
assert "content-type: text/x-rst" in quic_hello_world_head
|
||||
|
||||
assert "${sawatdi_chao_lok}" in server.succeed("curl -v --http3-only --compressed --fail-with-body 'https://${domain.TLS}:${portStrTLS}/hello_world.rst'")
|
||||
|
||||
assert "redirected" in server.succeed("curl -v --head --fail-with-body 'http://${domain.TLS}:${portStrHTTP}/hello_world.rst'").lower()
|
||||
|
||||
server.fail("curl --location --max-redirs 0 'http://${domain.TLS}:${portStrHTTP}/hello_world.rst'")
|
||||
|
||||
tls_hello_world_body_redirected = server.succeed("curl -v --location --fail-with-body 'http://${domain.TLS}:${portStrHTTP}/hello_world.rst'")
|
||||
assert "${sawatdi_chao_lok}" in tls_hello_world_body_redirected
|
||||
assert "${sawatdi_chao_lok}" in server.succeed("curl -v --location --fail-with-body 'http://${domain.TLS}:${portStrHTTP}/hello_world.rst'")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -58,10 +58,8 @@ in
|
|||
server.wait_for_unit("h2o.service")
|
||||
server.wait_for_open_port(${portStr})
|
||||
|
||||
hello_world = server.succeed("curl --fail-with-body http://${domain}:${portStr}/hello_world")
|
||||
assert "${sawatdi_chao_lok}" in hello_world
|
||||
assert "${sawatdi_chao_lok}" in server.succeed("curl --fail-with-body http://${domain}:${portStr}/hello_world")
|
||||
|
||||
file_handler = server.succeed("curl --fail-with-body http://${domain}:${portStr}/file_handler")
|
||||
assert "FILE_HANDLER" in file_handler
|
||||
assert "FILE_HANDLER" in server.succeed("curl --fail-with-body http://${domain}:${portStr}/file_handler")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -39,6 +39,10 @@ let
|
|||
certificate-file = ../../common/acme/server/acme.test.cert.pem;
|
||||
}
|
||||
];
|
||||
extraSettings = {
|
||||
# when using common ACME certs, disable talking to CA
|
||||
ocsp-update-interval = 0;
|
||||
};
|
||||
};
|
||||
settings = {
|
||||
paths."/"."file.file" = "${hello_txt recommendations}";
|
||||
|
|
|
@ -1,34 +1,46 @@
|
|||
{ system ? builtins.currentSystem
|
||||
, config ? { }
|
||||
, pkgs ? import ../../.. { inherit system config; }
|
||||
{
|
||||
system ? builtins.currentSystem,
|
||||
config ? { },
|
||||
pkgs ? import ../../.. { inherit system config; },
|
||||
# Test current default (LTS) and latest kernel
|
||||
, kernelVersionsToTest ? [ (pkgs.lib.versions.majorMinor pkgs.linuxPackages.kernel.version) "latest" ]
|
||||
kernelVersionsToTest ? [
|
||||
(pkgs.lib.versions.majorMinor pkgs.linuxPackages.kernel.version)
|
||||
"latest"
|
||||
],
|
||||
}:
|
||||
|
||||
with pkgs.lib;
|
||||
|
||||
let
|
||||
tests = let callTest = p: args: import p ({ inherit system pkgs; } // args); in {
|
||||
basic = callTest ./basic.nix;
|
||||
amneziawg = callTest ./amneziawg.nix;
|
||||
namespaces = callTest ./namespaces.nix;
|
||||
networkd = callTest ./networkd.nix;
|
||||
wg-quick = callTest ./wg-quick.nix;
|
||||
wg-quick-nftables = args: callTest ./wg-quick.nix ({ nftables = true; } // args);
|
||||
amneziawg-quick = callTest ./amneziawg-quick.nix;
|
||||
generated = callTest ./generated.nix;
|
||||
dynamic-refresh = callTest ./dynamic-refresh.nix;
|
||||
dynamic-refresh-networkd = args: callTest ./dynamic-refresh.nix ({ useNetworkd = true; } // args);
|
||||
};
|
||||
tests =
|
||||
let
|
||||
callTest = p: args: import p ({ inherit system pkgs; } // args);
|
||||
in
|
||||
{
|
||||
basic = callTest ./basic.nix;
|
||||
amneziawg = callTest ./amneziawg.nix;
|
||||
namespaces = callTest ./namespaces.nix;
|
||||
networkd = callTest ./networkd.nix;
|
||||
wg-quick = callTest ./wg-quick.nix;
|
||||
wg-quick-nftables = args: callTest ./wg-quick.nix ({ nftables = true; } // args);
|
||||
amneziawg-quick = callTest ./amneziawg-quick.nix;
|
||||
generated = callTest ./generated.nix;
|
||||
dynamic-refresh = callTest ./dynamic-refresh.nix;
|
||||
dynamic-refresh-networkd = args: callTest ./dynamic-refresh.nix ({ useNetworkd = true; } // args);
|
||||
};
|
||||
in
|
||||
|
||||
listToAttrs (
|
||||
flip concatMap kernelVersionsToTest (version:
|
||||
flip concatMap kernelVersionsToTest (
|
||||
version:
|
||||
let
|
||||
v' = replaceStrings [ "." ] [ "_" ] version;
|
||||
in
|
||||
flip mapAttrsToList tests (name: test:
|
||||
nameValuePair "wireguard-${name}-linux-${v'}" (test { kernelPackages = pkgs."linuxPackages_${v'}"; })
|
||||
flip mapAttrsToList tests (
|
||||
name: test:
|
||||
nameValuePair "wireguard-${name}-linux-${v'}" (test {
|
||||
kernelPackages = pkgs."linuxPackages_${v'}";
|
||||
})
|
||||
)
|
||||
)
|
||||
)
|
||||
|
|
|
@ -75,7 +75,10 @@ rec {
|
|||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 80 443 ];
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
443
|
||||
];
|
||||
networking.hosts."127.0.0.1" = [
|
||||
"site1.local"
|
||||
"site2.local"
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
{ system ? builtins.currentSystem,
|
||||
config ? {},
|
||||
pkgs ? import ../.. { inherit system config; }
|
||||
{
|
||||
system ? builtins.currentSystem,
|
||||
config ? { },
|
||||
pkgs ? import ../.. { inherit system config; },
|
||||
}:
|
||||
|
||||
with import ../lib/testing-python.nix { inherit system pkgs; };
|
||||
|
@ -8,10 +9,11 @@ with import ../lib/testing-python.nix { inherit system pkgs; };
|
|||
let
|
||||
|
||||
makeZfsTest =
|
||||
{ kernelPackages
|
||||
, enableSystemdStage1 ? false
|
||||
, zfsPackage
|
||||
, extraTest ? ""
|
||||
{
|
||||
kernelPackages,
|
||||
enableSystemdStage1 ? false,
|
||||
zfsPackage,
|
||||
extraTest ? "",
|
||||
}:
|
||||
makeTest {
|
||||
name = zfsPackage.kernelModuleAttribute;
|
||||
|
@ -19,177 +21,189 @@ let
|
|||
maintainers = [ elvishjerricco ];
|
||||
};
|
||||
|
||||
nodes.machine = { config, pkgs, lib, ... }:
|
||||
nodes.machine =
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
usersharePath = "/var/lib/samba/usershares";
|
||||
in {
|
||||
virtualisation = {
|
||||
emptyDiskImages = [ 4096 4096 ];
|
||||
useBootLoader = true;
|
||||
useEFIBoot = true;
|
||||
};
|
||||
boot.loader.systemd-boot.enable = true;
|
||||
boot.loader.timeout = 0;
|
||||
boot.loader.efi.canTouchEfiVariables = true;
|
||||
networking.hostId = "deadbeef";
|
||||
boot.kernelPackages = kernelPackages;
|
||||
boot.zfs.package = zfsPackage;
|
||||
boot.supportedFilesystems = [ "zfs" ];
|
||||
boot.initrd.systemd.enable = enableSystemdStage1;
|
||||
in
|
||||
{
|
||||
virtualisation = {
|
||||
emptyDiskImages = [
|
||||
4096
|
||||
4096
|
||||
];
|
||||
useBootLoader = true;
|
||||
useEFIBoot = true;
|
||||
};
|
||||
boot.loader.systemd-boot.enable = true;
|
||||
boot.loader.timeout = 0;
|
||||
boot.loader.efi.canTouchEfiVariables = true;
|
||||
networking.hostId = "deadbeef";
|
||||
boot.kernelPackages = kernelPackages;
|
||||
boot.zfs.package = zfsPackage;
|
||||
boot.supportedFilesystems = [ "zfs" ];
|
||||
boot.initrd.systemd.enable = enableSystemdStage1;
|
||||
|
||||
environment.systemPackages = [ pkgs.parted ];
|
||||
environment.systemPackages = [ pkgs.parted ];
|
||||
|
||||
# /dev/disk/by-id doesn't get populated in the NixOS test framework
|
||||
boot.zfs.devNodes = "/dev/disk/by-uuid";
|
||||
# /dev/disk/by-id doesn't get populated in the NixOS test framework
|
||||
boot.zfs.devNodes = "/dev/disk/by-uuid";
|
||||
|
||||
specialisation.samba.configuration = {
|
||||
services.samba = {
|
||||
enable = true;
|
||||
settings.global = {
|
||||
"registry shares" = true;
|
||||
"usershare path" = "${usersharePath}";
|
||||
"usershare allow guests" = true;
|
||||
"usershare max shares" = "100";
|
||||
"usershare owner only" = false;
|
||||
specialisation.samba.configuration = {
|
||||
services.samba = {
|
||||
enable = true;
|
||||
settings.global = {
|
||||
"registry shares" = true;
|
||||
"usershare path" = "${usersharePath}";
|
||||
"usershare allow guests" = true;
|
||||
"usershare max shares" = "100";
|
||||
"usershare owner only" = false;
|
||||
};
|
||||
};
|
||||
systemd.services.samba-smbd.serviceConfig.ExecStartPre =
|
||||
"${pkgs.coreutils}/bin/mkdir -m +t -p ${usersharePath}";
|
||||
virtualisation.fileSystems = {
|
||||
"/tmp/mnt" = {
|
||||
device = "rpool/root";
|
||||
fsType = "zfs";
|
||||
};
|
||||
};
|
||||
};
|
||||
systemd.services.samba-smbd.serviceConfig.ExecStartPre =
|
||||
"${pkgs.coreutils}/bin/mkdir -m +t -p ${usersharePath}";
|
||||
virtualisation.fileSystems = {
|
||||
"/tmp/mnt" = {
|
||||
device = "rpool/root";
|
||||
|
||||
specialisation.encryption.configuration = {
|
||||
boot.zfs.requestEncryptionCredentials = [ "automatic" ];
|
||||
virtualisation.fileSystems."/automatic" = {
|
||||
device = "automatic";
|
||||
fsType = "zfs";
|
||||
};
|
||||
virtualisation.fileSystems."/manual" = {
|
||||
device = "manual";
|
||||
fsType = "zfs";
|
||||
};
|
||||
virtualisation.fileSystems."/manual/encrypted" = {
|
||||
device = "manual/encrypted";
|
||||
fsType = "zfs";
|
||||
options = [ "noauto" ];
|
||||
};
|
||||
virtualisation.fileSystems."/manual/httpkey" = {
|
||||
device = "manual/httpkey";
|
||||
fsType = "zfs";
|
||||
options = [ "noauto" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
specialisation.encryption.configuration = {
|
||||
boot.zfs.requestEncryptionCredentials = [ "automatic" ];
|
||||
virtualisation.fileSystems."/automatic" = {
|
||||
device = "automatic";
|
||||
fsType = "zfs";
|
||||
specialisation.forcepool.configuration = {
|
||||
systemd.services.zfs-import-forcepool.wantedBy = lib.mkVMOverride [ "forcepool.mount" ];
|
||||
systemd.targets.zfs.wantedBy = lib.mkVMOverride [ ];
|
||||
boot.zfs.forceImportAll = true;
|
||||
virtualisation.fileSystems."/forcepool" = {
|
||||
device = "forcepool";
|
||||
fsType = "zfs";
|
||||
options = [ "noauto" ];
|
||||
};
|
||||
};
|
||||
virtualisation.fileSystems."/manual" = {
|
||||
device = "manual";
|
||||
fsType = "zfs";
|
||||
};
|
||||
virtualisation.fileSystems."/manual/encrypted" = {
|
||||
device = "manual/encrypted";
|
||||
fsType = "zfs";
|
||||
options = [ "noauto" ];
|
||||
};
|
||||
virtualisation.fileSystems."/manual/httpkey" = {
|
||||
device = "manual/httpkey";
|
||||
fsType = "zfs";
|
||||
options = [ "noauto" ];
|
||||
};
|
||||
};
|
||||
|
||||
specialisation.forcepool.configuration = {
|
||||
systemd.services.zfs-import-forcepool.wantedBy = lib.mkVMOverride [ "forcepool.mount" ];
|
||||
systemd.targets.zfs.wantedBy = lib.mkVMOverride [];
|
||||
boot.zfs.forceImportAll = true;
|
||||
virtualisation.fileSystems."/forcepool" = {
|
||||
device = "forcepool";
|
||||
fsType = "zfs";
|
||||
options = [ "noauto" ];
|
||||
};
|
||||
};
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts = {
|
||||
localhost = {
|
||||
locations = {
|
||||
"/zfskey" = {
|
||||
return = ''200 "httpkeyabc"'';
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts = {
|
||||
localhost = {
|
||||
locations = {
|
||||
"/zfskey" = {
|
||||
return = ''200 "httpkeyabc"'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.succeed(
|
||||
"zpool status",
|
||||
"parted --script /dev/vdb mklabel msdos",
|
||||
"parted --script /dev/vdb -- mkpart primary 1024M -1s",
|
||||
"parted --script /dev/vdc mklabel msdos",
|
||||
"parted --script /dev/vdc -- mkpart primary 1024M -1s",
|
||||
)
|
||||
testScript =
|
||||
''
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.succeed(
|
||||
"zpool status",
|
||||
"parted --script /dev/vdb mklabel msdos",
|
||||
"parted --script /dev/vdb -- mkpart primary 1024M -1s",
|
||||
"parted --script /dev/vdc mklabel msdos",
|
||||
"parted --script /dev/vdc -- mkpart primary 1024M -1s",
|
||||
)
|
||||
|
||||
with subtest("sharesmb works"):
|
||||
machine.succeed(
|
||||
"zpool create rpool /dev/vdb1",
|
||||
"zfs create -o mountpoint=legacy rpool/root",
|
||||
# shared datasets cannot have legacy mountpoint
|
||||
"zfs create rpool/shared_smb",
|
||||
"bootctl set-default nixos-generation-1-specialisation-samba.conf",
|
||||
"sync",
|
||||
)
|
||||
machine.crash()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.succeed("zfs set sharesmb=on rpool/shared_smb")
|
||||
machine.succeed(
|
||||
"smbclient -gNL localhost | grep rpool_shared_smb",
|
||||
"umount /tmp/mnt",
|
||||
"zpool destroy rpool",
|
||||
)
|
||||
with subtest("sharesmb works"):
|
||||
machine.succeed(
|
||||
"zpool create rpool /dev/vdb1",
|
||||
"zfs create -o mountpoint=legacy rpool/root",
|
||||
# shared datasets cannot have legacy mountpoint
|
||||
"zfs create rpool/shared_smb",
|
||||
"bootctl set-default nixos-generation-1-specialisation-samba.conf",
|
||||
"sync",
|
||||
)
|
||||
machine.crash()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.succeed("zfs set sharesmb=on rpool/shared_smb")
|
||||
machine.succeed(
|
||||
"smbclient -gNL localhost | grep rpool_shared_smb",
|
||||
"umount /tmp/mnt",
|
||||
"zpool destroy rpool",
|
||||
)
|
||||
|
||||
with subtest("encryption works"):
|
||||
machine.succeed(
|
||||
'echo password | zpool create -O mountpoint=legacy '
|
||||
+ "-O encryption=aes-256-gcm -O keyformat=passphrase automatic /dev/vdb1",
|
||||
"zpool create -O mountpoint=legacy manual /dev/vdc1",
|
||||
"echo otherpass | zfs create "
|
||||
+ "-o encryption=aes-256-gcm -o keyformat=passphrase manual/encrypted",
|
||||
"zfs create -o encryption=aes-256-gcm -o keyformat=passphrase "
|
||||
+ "-o keylocation=http://localhost/zfskey manual/httpkey",
|
||||
"bootctl set-default nixos-generation-1-specialisation-encryption.conf",
|
||||
"sync",
|
||||
"zpool export automatic",
|
||||
"zpool export manual",
|
||||
)
|
||||
machine.crash()
|
||||
machine.start()
|
||||
machine.wait_for_console_text("Starting password query on")
|
||||
machine.send_console("password\n")
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.succeed(
|
||||
"zfs get -Ho value keystatus manual/encrypted | grep -Fx unavailable",
|
||||
"echo otherpass | zfs load-key manual/encrypted",
|
||||
"systemctl start manual-encrypted.mount",
|
||||
"zfs load-key manual/httpkey",
|
||||
"systemctl start manual-httpkey.mount",
|
||||
"umount /automatic /manual/encrypted /manual/httpkey /manual",
|
||||
"zpool destroy automatic",
|
||||
"zpool destroy manual",
|
||||
)
|
||||
with subtest("encryption works"):
|
||||
machine.succeed(
|
||||
'echo password | zpool create -O mountpoint=legacy '
|
||||
+ "-O encryption=aes-256-gcm -O keyformat=passphrase automatic /dev/vdb1",
|
||||
"zpool create -O mountpoint=legacy manual /dev/vdc1",
|
||||
"echo otherpass | zfs create "
|
||||
+ "-o encryption=aes-256-gcm -o keyformat=passphrase manual/encrypted",
|
||||
"zfs create -o encryption=aes-256-gcm -o keyformat=passphrase "
|
||||
+ "-o keylocation=http://localhost/zfskey manual/httpkey",
|
||||
"bootctl set-default nixos-generation-1-specialisation-encryption.conf",
|
||||
"sync",
|
||||
"zpool export automatic",
|
||||
"zpool export manual",
|
||||
)
|
||||
machine.crash()
|
||||
machine.start()
|
||||
machine.wait_for_console_text("Starting password query on")
|
||||
machine.send_console("password\n")
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.succeed(
|
||||
"zfs get -Ho value keystatus manual/encrypted | grep -Fx unavailable",
|
||||
"echo otherpass | zfs load-key manual/encrypted",
|
||||
"systemctl start manual-encrypted.mount",
|
||||
"zfs load-key manual/httpkey",
|
||||
"systemctl start manual-httpkey.mount",
|
||||
"umount /automatic /manual/encrypted /manual/httpkey /manual",
|
||||
"zpool destroy automatic",
|
||||
"zpool destroy manual",
|
||||
)
|
||||
|
||||
with subtest("boot.zfs.forceImportAll works"):
|
||||
machine.succeed(
|
||||
"rm /etc/hostid",
|
||||
"zgenhostid deadcafe",
|
||||
"zpool create forcepool /dev/vdb1 -O mountpoint=legacy",
|
||||
"bootctl set-default nixos-generation-1-specialisation-forcepool.conf",
|
||||
"rm /etc/hostid",
|
||||
"sync",
|
||||
)
|
||||
machine.crash()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.fail("zpool import forcepool")
|
||||
machine.succeed(
|
||||
"systemctl start forcepool.mount",
|
||||
"mount | grep forcepool",
|
||||
)
|
||||
'' + extraTest;
|
||||
with subtest("boot.zfs.forceImportAll works"):
|
||||
machine.succeed(
|
||||
"rm /etc/hostid",
|
||||
"zgenhostid deadcafe",
|
||||
"zpool create forcepool /dev/vdb1 -O mountpoint=legacy",
|
||||
"bootctl set-default nixos-generation-1-specialisation-forcepool.conf",
|
||||
"rm /etc/hostid",
|
||||
"sync",
|
||||
)
|
||||
machine.crash()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.fail("zpool import forcepool")
|
||||
machine.succeed(
|
||||
"systemctl start forcepool.mount",
|
||||
"mount | grep forcepool",
|
||||
)
|
||||
''
|
||||
+ extraTest;
|
||||
|
||||
};
|
||||
|
||||
|
||||
in {
|
||||
in
|
||||
{
|
||||
|
||||
series_2_2 = makeZfsTest {
|
||||
zfsPackage = pkgs.zfs_2_2;
|
||||
|
@ -218,22 +232,32 @@ in {
|
|||
expand-partitions = makeTest {
|
||||
name = "multi-disk-zfs";
|
||||
nodes = {
|
||||
machine = { pkgs, ... }: {
|
||||
environment.systemPackages = [ pkgs.parted ];
|
||||
boot.supportedFilesystems = [ "zfs" ];
|
||||
networking.hostId = "00000000";
|
||||
machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.parted ];
|
||||
boot.supportedFilesystems = [ "zfs" ];
|
||||
networking.hostId = "00000000";
|
||||
|
||||
virtualisation = {
|
||||
emptyDiskImages = [ 20480 20480 20480 20480 20480 20480 ];
|
||||
};
|
||||
virtualisation = {
|
||||
emptyDiskImages = [
|
||||
20480
|
||||
20480
|
||||
20480
|
||||
20480
|
||||
20480
|
||||
20480
|
||||
];
|
||||
};
|
||||
|
||||
specialisation.resize.configuration = {
|
||||
services.zfs.expandOnBoot = [ "tank" ];
|
||||
specialisation.resize.configuration = {
|
||||
services.zfs.expandOnBoot = [ "tank" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = { nodes, ... }:
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
start_all()
|
||||
machine.wait_for_unit("default.target")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue