mirror of
https://github.com/NixOS/nixpkgs.git
synced 2025-06-09 19:13:26 +03:00
nixosTests: handleTest -> runTest, batch 1
Reference: https://github.com/NixOS/nixpkgs/issues/386873
This commit is contained in:
parent
131462b962
commit
f34483be5e
538 changed files with 35525 additions and 36600 deletions
File diff suppressed because it is too large
Load diff
|
@ -1,131 +1,128 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
let
|
||||
{ pkgs, lib, ... }:
|
||||
|
||||
baud = 57600;
|
||||
tty = "/dev/ttyACM0";
|
||||
port = "tnc0";
|
||||
socatPort = 1234;
|
||||
let
|
||||
baud = 57600;
|
||||
tty = "/dev/ttyACM0";
|
||||
port = "tnc0";
|
||||
socatPort = 1234;
|
||||
|
||||
createAX25Node = nodeId: {
|
||||
createAX25Node = nodeId: {
|
||||
boot.kernelPackages = pkgs.linuxPackages_ham;
|
||||
boot.kernelModules = [ "ax25" ];
|
||||
|
||||
boot.kernelPackages = pkgs.linuxPackages_ham;
|
||||
boot.kernelModules = [ "ax25" ];
|
||||
networking.firewall.allowedTCPPorts = [ socatPort ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ socatPort ];
|
||||
environment.systemPackages = with pkgs; [
|
||||
libax25
|
||||
ax25-tools
|
||||
ax25-apps
|
||||
socat
|
||||
];
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
libax25
|
||||
ax25-tools
|
||||
ax25-apps
|
||||
socat
|
||||
services.ax25.axports."${port}" = {
|
||||
inherit baud tty;
|
||||
enable = true;
|
||||
callsign = "NOCALL-${toString nodeId}";
|
||||
description = "mocked tnc";
|
||||
};
|
||||
|
||||
services.ax25.axlisten = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
# All mocks radios will connect back to socat-broker on node 1 in order to get
|
||||
# all messages that are "broadcasted over the ether"
|
||||
systemd.services.ax25-mock-hardware = {
|
||||
description = "mock AX.25 TNC and Radio";
|
||||
wantedBy = [ "default.target" ];
|
||||
before = [
|
||||
"ax25-kissattach-${port}.service"
|
||||
"axlisten.service"
|
||||
];
|
||||
|
||||
services.ax25.axports."${port}" = {
|
||||
inherit baud tty;
|
||||
enable = true;
|
||||
callsign = "NOCALL-${toString nodeId}";
|
||||
description = "mocked tnc";
|
||||
};
|
||||
|
||||
services.ax25.axlisten = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
# All mocks radios will connect back to socat-broker on node 1 in order to get
|
||||
# all messages that are "broadcasted over the ether"
|
||||
systemd.services.ax25-mock-hardware = {
|
||||
description = "mock AX.25 TNC and Radio";
|
||||
wantedBy = [ "default.target" ];
|
||||
before = [
|
||||
"ax25-kissattach-${port}.service"
|
||||
"axlisten.service"
|
||||
];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
Type = "exec";
|
||||
ExecStart = "${pkgs.socat}/bin/socat -d -d tcp:192.168.1.1:${toString socatPort} pty,link=${tty},b${toString baud},raw";
|
||||
};
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
Type = "exec";
|
||||
ExecStart = "${pkgs.socat}/bin/socat -d -d tcp:192.168.1.1:${toString socatPort} pty,link=${tty},b${toString baud},raw";
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
name = "ax25Simple";
|
||||
nodes = {
|
||||
node1 = lib.mkMerge [
|
||||
(createAX25Node 1)
|
||||
# mimicking radios on the same frequency
|
||||
{
|
||||
systemd.services.ax25-mock-ether = {
|
||||
description = "mock radio ether";
|
||||
wantedBy = [ "default.target" ];
|
||||
requires = [ "network.target" ];
|
||||
before = [ "ax25-mock-hardware.service" ];
|
||||
# broken needs access to "ss" or "netstat"
|
||||
path = [ pkgs.iproute2 ];
|
||||
serviceConfig = {
|
||||
Type = "exec";
|
||||
ExecStart = "${pkgs.socat}/bin/socat-broker.sh tcp4-listen:${toString socatPort}";
|
||||
};
|
||||
postStart = "${pkgs.coreutils}/bin/sleep 2";
|
||||
};
|
||||
in
|
||||
{
|
||||
name = "ax25Simple";
|
||||
nodes = {
|
||||
node1 = lib.mkMerge [
|
||||
(createAX25Node 1)
|
||||
# mimicking radios on the same frequency
|
||||
{
|
||||
systemd.services.ax25-mock-ether = {
|
||||
description = "mock radio ether";
|
||||
wantedBy = [ "default.target" ];
|
||||
requires = [ "network.target" ];
|
||||
before = [ "ax25-mock-hardware.service" ];
|
||||
# broken needs access to "ss" or "netstat"
|
||||
path = [ pkgs.iproute2 ];
|
||||
serviceConfig = {
|
||||
Type = "exec";
|
||||
ExecStart = "${pkgs.socat}/bin/socat-broker.sh tcp4-listen:${toString socatPort}";
|
||||
};
|
||||
}
|
||||
];
|
||||
node2 = createAX25Node 2;
|
||||
node3 = createAX25Node 3;
|
||||
};
|
||||
testScript =
|
||||
{ ... }:
|
||||
''
|
||||
def wait_for_machine(m):
|
||||
m.succeed("lsmod | grep ax25")
|
||||
m.wait_for_unit("ax25-axports.target")
|
||||
m.wait_for_unit("axlisten.service")
|
||||
m.fail("journalctl -o cat -u axlisten.service | grep -i \"no AX.25 port data configured\"")
|
||||
postStart = "${pkgs.coreutils}/bin/sleep 2";
|
||||
};
|
||||
}
|
||||
];
|
||||
node2 = createAX25Node 2;
|
||||
node3 = createAX25Node 3;
|
||||
};
|
||||
testScript =
|
||||
{ ... }:
|
||||
''
|
||||
def wait_for_machine(m):
|
||||
m.succeed("lsmod | grep ax25")
|
||||
m.wait_for_unit("ax25-axports.target")
|
||||
m.wait_for_unit("axlisten.service")
|
||||
m.fail("journalctl -o cat -u axlisten.service | grep -i \"no AX.25 port data configured\"")
|
||||
|
||||
# start the first node since the socat-broker needs to be running
|
||||
node1.start()
|
||||
node1.wait_for_unit("ax25-mock-ether.service")
|
||||
wait_for_machine(node1)
|
||||
# start the first node since the socat-broker needs to be running
|
||||
node1.start()
|
||||
node1.wait_for_unit("ax25-mock-ether.service")
|
||||
wait_for_machine(node1)
|
||||
|
||||
node2.start()
|
||||
node3.start()
|
||||
wait_for_machine(node2)
|
||||
wait_for_machine(node3)
|
||||
node2.start()
|
||||
node3.start()
|
||||
wait_for_machine(node2)
|
||||
wait_for_machine(node3)
|
||||
|
||||
# Node 1 -> Node 2
|
||||
node1.succeed("echo hello | ax25_call ${port} NOCALL-1 NOCALL-2")
|
||||
node2.sleep(1)
|
||||
node2.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-1 to NOCALL-2 ctl I00\" | grep hello")
|
||||
# Node 1 -> Node 2
|
||||
node1.succeed("echo hello | ax25_call ${port} NOCALL-1 NOCALL-2")
|
||||
node2.sleep(1)
|
||||
node2.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-1 to NOCALL-2 ctl I00\" | grep hello")
|
||||
|
||||
# Node 1 -> Node 3
|
||||
node1.succeed("echo hello | ax25_call ${port} NOCALL-1 NOCALL-3")
|
||||
node3.sleep(1)
|
||||
node3.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-1 to NOCALL-3 ctl I00\" | grep hello")
|
||||
# Node 1 -> Node 3
|
||||
node1.succeed("echo hello | ax25_call ${port} NOCALL-1 NOCALL-3")
|
||||
node3.sleep(1)
|
||||
node3.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-1 to NOCALL-3 ctl I00\" | grep hello")
|
||||
|
||||
# Node 2 -> Node 1
|
||||
# must sleep due to previous ax25_call lingering
|
||||
node2.sleep(5)
|
||||
node2.succeed("echo hello | ax25_call ${port} NOCALL-2 NOCALL-1")
|
||||
node1.sleep(1)
|
||||
node1.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-2 to NOCALL-1 ctl I00\" | grep hello")
|
||||
# Node 2 -> Node 1
|
||||
# must sleep due to previous ax25_call lingering
|
||||
node2.sleep(5)
|
||||
node2.succeed("echo hello | ax25_call ${port} NOCALL-2 NOCALL-1")
|
||||
node1.sleep(1)
|
||||
node1.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-2 to NOCALL-1 ctl I00\" | grep hello")
|
||||
|
||||
# Node 2 -> Node 3
|
||||
node2.succeed("echo hello | ax25_call ${port} NOCALL-2 NOCALL-3")
|
||||
node3.sleep(1)
|
||||
node3.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-2 to NOCALL-3 ctl I00\" | grep hello")
|
||||
# Node 2 -> Node 3
|
||||
node2.succeed("echo hello | ax25_call ${port} NOCALL-2 NOCALL-3")
|
||||
node3.sleep(1)
|
||||
node3.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-2 to NOCALL-3 ctl I00\" | grep hello")
|
||||
|
||||
# Node 3 -> Node 1
|
||||
# must sleep due to previous ax25_call lingering
|
||||
node3.sleep(5)
|
||||
node3.succeed("echo hello | ax25_call ${port} NOCALL-3 NOCALL-1")
|
||||
node1.sleep(1)
|
||||
node1.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-3 to NOCALL-1 ctl I00\" | grep hello")
|
||||
# Node 3 -> Node 1
|
||||
# must sleep due to previous ax25_call lingering
|
||||
node3.sleep(5)
|
||||
node3.succeed("echo hello | ax25_call ${port} NOCALL-3 NOCALL-1")
|
||||
node1.sleep(1)
|
||||
node1.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-3 to NOCALL-1 ctl I00\" | grep hello")
|
||||
|
||||
# Node 3 -> Node 2
|
||||
node3.succeed("echo hello | ax25_call ${port} NOCALL-3 NOCALL-2")
|
||||
node2.sleep(1)
|
||||
node2.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-3 to NOCALL-2 ctl I00\" | grep hello")
|
||||
'';
|
||||
}
|
||||
)
|
||||
# Node 3 -> Node 2
|
||||
node3.succeed("echo hello | ax25_call ${port} NOCALL-3 NOCALL-2")
|
||||
node2.sleep(1)
|
||||
node2.succeed("journalctl -o cat -u axlisten.service | grep -A1 \"NOCALL-3 to NOCALL-2 ctl I00\" | grep hello")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,64 +1,62 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
let
|
||||
user = "alice";
|
||||
in
|
||||
{
|
||||
name = "benchexec";
|
||||
{ pkgs, lib, ... }:
|
||||
let
|
||||
user = "alice";
|
||||
in
|
||||
{
|
||||
name = "benchexec";
|
||||
|
||||
nodes.benchexec = {
|
||||
imports = [ ./common/user-account.nix ];
|
||||
nodes.benchexec = {
|
||||
imports = [ ./common/user-account.nix ];
|
||||
|
||||
programs.benchexec = {
|
||||
enable = true;
|
||||
users = [ user ];
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ ... }:
|
||||
let
|
||||
runexec = lib.getExe' pkgs.benchexec "runexec";
|
||||
echo = builtins.toString pkgs.benchexec;
|
||||
test = lib.getExe (
|
||||
pkgs.writeShellApplication rec {
|
||||
name = "test";
|
||||
meta.mainProgram = name;
|
||||
text = "echo '${echo}'";
|
||||
}
|
||||
);
|
||||
wd = "/tmp";
|
||||
stdout = "${wd}/runexec.out";
|
||||
stderr = "${wd}/runexec.err";
|
||||
in
|
||||
''
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
benchexec.succeed(''''\
|
||||
systemd-run \
|
||||
--property='StandardOutput=file:${stdout}' \
|
||||
--property='StandardError=file:${stderr}' \
|
||||
--unit=runexec --wait --user --machine='${user}@' \
|
||||
--working-directory ${wd} \
|
||||
'${runexec}' \
|
||||
--debug \
|
||||
--read-only-dir / \
|
||||
--hidden-dir /home \
|
||||
'${test}' \
|
||||
'''')
|
||||
benchexec.succeed("grep -s '${echo}' ${wd}/output.log")
|
||||
benchexec.succeed("test \"$(grep -Ec '((start|wall|cpu)time|memory)=' ${stdout})\" = 4")
|
||||
benchexec.succeed("! grep -E '(WARNING|ERROR)' ${stderr}")
|
||||
'';
|
||||
|
||||
interactive.nodes.benchexec.services.kmscon = {
|
||||
programs.benchexec = {
|
||||
enable = true;
|
||||
fonts = [
|
||||
{
|
||||
name = "Fira Code";
|
||||
package = pkgs.fira-code;
|
||||
}
|
||||
];
|
||||
users = [ user ];
|
||||
};
|
||||
}
|
||||
)
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ ... }:
|
||||
let
|
||||
runexec = lib.getExe' pkgs.benchexec "runexec";
|
||||
echo = builtins.toString pkgs.benchexec;
|
||||
test = lib.getExe (
|
||||
pkgs.writeShellApplication rec {
|
||||
name = "test";
|
||||
meta.mainProgram = name;
|
||||
text = "echo '${echo}'";
|
||||
}
|
||||
);
|
||||
wd = "/tmp";
|
||||
stdout = "${wd}/runexec.out";
|
||||
stderr = "${wd}/runexec.err";
|
||||
in
|
||||
''
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
benchexec.succeed(''''\
|
||||
systemd-run \
|
||||
--property='StandardOutput=file:${stdout}' \
|
||||
--property='StandardError=file:${stderr}' \
|
||||
--unit=runexec --wait --user --machine='${user}@' \
|
||||
--working-directory ${wd} \
|
||||
'${runexec}' \
|
||||
--debug \
|
||||
--read-only-dir / \
|
||||
--hidden-dir /home \
|
||||
'${test}' \
|
||||
'''')
|
||||
benchexec.succeed("grep -s '${echo}' ${wd}/output.log")
|
||||
benchexec.succeed("test \"$(grep -Ec '((start|wall|cpu)time|memory)=' ${stdout})\" = 4")
|
||||
benchexec.succeed("! grep -E '(WARNING|ERROR)' ${stderr}")
|
||||
'';
|
||||
|
||||
interactive.nodes.benchexec.services.kmscon = {
|
||||
enable = true;
|
||||
fonts = [
|
||||
{
|
||||
name = "Fira Code";
|
||||
package = pkgs.fira-code;
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,57 +1,55 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "bitcoind";
|
||||
meta = with pkgs.lib; {
|
||||
maintainers = with maintainers; [ _1000101 ];
|
||||
};
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "bitcoind";
|
||||
meta = with pkgs.lib; {
|
||||
maintainers = with maintainers; [ _1000101 ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
services.bitcoind."mainnet" = {
|
||||
enable = true;
|
||||
rpc = {
|
||||
port = 8332;
|
||||
users.rpc.passwordHMAC = "acc2374e5f9ba9e62a5204d3686616cf$53abdba5e67a9005be6a27ca03a93ce09e58854bc2b871523a0d239a72968033";
|
||||
users.rpc2.passwordHMAC = "1495e4a3ad108187576c68f7f9b5ddc5$accce0881c74aa01bb8960ff3bdbd39f607fd33178147679e055a4ac35f53225";
|
||||
};
|
||||
};
|
||||
|
||||
environment.etc."test.blank".text = "";
|
||||
services.bitcoind."testnet" = {
|
||||
enable = true;
|
||||
configFile = "/etc/test.blank";
|
||||
testnet = true;
|
||||
rpc = {
|
||||
port = 18332;
|
||||
};
|
||||
extraCmdlineOptions = [
|
||||
"-rpcuser=rpc"
|
||||
"-rpcpassword=rpc"
|
||||
"-rpcauth=rpc2:1495e4a3ad108187576c68f7f9b5ddc5$accce0881c74aa01bb8960ff3bdbd39f607fd33178147679e055a4ac35f53225"
|
||||
];
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
services.bitcoind."mainnet" = {
|
||||
enable = true;
|
||||
rpc = {
|
||||
port = 8332;
|
||||
users.rpc.passwordHMAC = "acc2374e5f9ba9e62a5204d3686616cf$53abdba5e67a9005be6a27ca03a93ce09e58854bc2b871523a0d239a72968033";
|
||||
users.rpc2.passwordHMAC = "1495e4a3ad108187576c68f7f9b5ddc5$accce0881c74aa01bb8960ff3bdbd39f607fd33178147679e055a4ac35f53225";
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
environment.etc."test.blank".text = "";
|
||||
services.bitcoind."testnet" = {
|
||||
enable = true;
|
||||
configFile = "/etc/test.blank";
|
||||
testnet = true;
|
||||
rpc = {
|
||||
port = 18332;
|
||||
};
|
||||
extraCmdlineOptions = [
|
||||
"-rpcuser=rpc"
|
||||
"-rpcpassword=rpc"
|
||||
"-rpcauth=rpc2:1495e4a3ad108187576c68f7f9b5ddc5$accce0881c74aa01bb8960ff3bdbd39f607fd33178147679e055a4ac35f53225"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
machine.wait_for_unit("bitcoind-mainnet.service")
|
||||
machine.wait_for_unit("bitcoind-testnet.service")
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
machine.wait_until_succeeds(
|
||||
'curl --fail --user rpc:rpc --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:8332 | grep \'"chain":"main"\' '
|
||||
)
|
||||
machine.wait_until_succeeds(
|
||||
'curl --fail --user rpc2:rpc2 --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:8332 | grep \'"chain":"main"\' '
|
||||
)
|
||||
machine.wait_until_succeeds(
|
||||
'curl --fail --user rpc:rpc --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:18332 | grep \'"chain":"test"\' '
|
||||
)
|
||||
machine.wait_until_succeeds(
|
||||
'curl --fail --user rpc2:rpc2 --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:18332 | grep \'"chain":"test"\' '
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
machine.wait_for_unit("bitcoind-mainnet.service")
|
||||
machine.wait_for_unit("bitcoind-testnet.service")
|
||||
|
||||
machine.wait_until_succeeds(
|
||||
'curl --fail --user rpc:rpc --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:8332 | grep \'"chain":"main"\' '
|
||||
)
|
||||
machine.wait_until_succeeds(
|
||||
'curl --fail --user rpc2:rpc2 --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:8332 | grep \'"chain":"main"\' '
|
||||
)
|
||||
machine.wait_until_succeeds(
|
||||
'curl --fail --user rpc:rpc --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:18332 | grep \'"chain":"test"\' '
|
||||
)
|
||||
machine.wait_until_succeeds(
|
||||
'curl --fail --user rpc2:rpc2 --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:18332 | grep \'"chain":"test"\' '
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -6,199 +6,197 @@
|
|||
# which only works if the first client successfully uses the UPnP-IGD
|
||||
# protocol to poke a hole in the NAT.
|
||||
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{ pkgs, ... }:
|
||||
|
||||
let
|
||||
let
|
||||
|
||||
# Some random file to serve.
|
||||
file = pkgs.hello.src;
|
||||
# Some random file to serve.
|
||||
file = pkgs.hello.src;
|
||||
|
||||
internalRouterAddress = "192.168.3.1";
|
||||
internalClient1Address = "192.168.3.2";
|
||||
externalRouterAddress = "80.100.100.1";
|
||||
externalClient2Address = "80.100.100.2";
|
||||
externalTrackerAddress = "80.100.100.3";
|
||||
internalRouterAddress = "192.168.3.1";
|
||||
internalClient1Address = "192.168.3.2";
|
||||
externalRouterAddress = "80.100.100.1";
|
||||
externalClient2Address = "80.100.100.2";
|
||||
externalTrackerAddress = "80.100.100.3";
|
||||
|
||||
download-dir = "/var/lib/transmission/Downloads";
|
||||
transmissionConfig =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.transmission_3 ];
|
||||
services.transmission = {
|
||||
enable = true;
|
||||
settings = {
|
||||
dht-enabled = false;
|
||||
message-level = 2;
|
||||
inherit download-dir;
|
||||
};
|
||||
download-dir = "/var/lib/transmission/Downloads";
|
||||
transmissionConfig =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.transmission_3 ];
|
||||
services.transmission = {
|
||||
enable = true;
|
||||
settings = {
|
||||
dht-enabled = false;
|
||||
message-level = 2;
|
||||
inherit download-dir;
|
||||
};
|
||||
};
|
||||
in
|
||||
|
||||
{
|
||||
name = "bittorrent";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [
|
||||
domenkozar
|
||||
rob
|
||||
bobvanderlinden
|
||||
];
|
||||
};
|
||||
in
|
||||
|
||||
nodes = {
|
||||
tracker =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ transmissionConfig ];
|
||||
{
|
||||
name = "bittorrent";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [
|
||||
domenkozar
|
||||
rob
|
||||
bobvanderlinden
|
||||
];
|
||||
};
|
||||
|
||||
virtualisation.vlans = [ 1 ];
|
||||
networking.firewall.enable = false;
|
||||
networking.interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = externalTrackerAddress;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
nodes = {
|
||||
tracker =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ transmissionConfig ];
|
||||
|
||||
# We need Apache on the tracker to serve the torrents.
|
||||
services.httpd = {
|
||||
enable = true;
|
||||
virtualHosts = {
|
||||
"torrentserver.org" = {
|
||||
adminAddr = "foo@example.org";
|
||||
documentRoot = "/tmp";
|
||||
};
|
||||
virtualisation.vlans = [ 1 ];
|
||||
networking.firewall.enable = false;
|
||||
networking.interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = externalTrackerAddress;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
|
||||
# We need Apache on the tracker to serve the torrents.
|
||||
services.httpd = {
|
||||
enable = true;
|
||||
virtualHosts = {
|
||||
"torrentserver.org" = {
|
||||
adminAddr = "foo@example.org";
|
||||
documentRoot = "/tmp";
|
||||
};
|
||||
};
|
||||
services.opentracker.enable = true;
|
||||
};
|
||||
services.opentracker.enable = true;
|
||||
};
|
||||
|
||||
router =
|
||||
{ pkgs, nodes, ... }:
|
||||
{
|
||||
virtualisation.vlans = [
|
||||
1
|
||||
2
|
||||
];
|
||||
networking.nat.enable = true;
|
||||
networking.nat.internalInterfaces = [ "eth2" ];
|
||||
networking.nat.externalInterface = "eth1";
|
||||
networking.firewall.enable = true;
|
||||
networking.firewall.trustedInterfaces = [ "eth2" ];
|
||||
networking.interfaces.eth0.ipv4.addresses = [ ];
|
||||
networking.interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = externalRouterAddress;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.interfaces.eth2.ipv4.addresses = [
|
||||
{
|
||||
address = internalRouterAddress;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
services.miniupnpd = {
|
||||
enable = true;
|
||||
externalInterface = "eth1";
|
||||
internalIPs = [ "eth2" ];
|
||||
appendConfig = ''
|
||||
ext_ip=${externalRouterAddress}
|
||||
'';
|
||||
};
|
||||
router =
|
||||
{ pkgs, nodes, ... }:
|
||||
{
|
||||
virtualisation.vlans = [
|
||||
1
|
||||
2
|
||||
];
|
||||
networking.nat.enable = true;
|
||||
networking.nat.internalInterfaces = [ "eth2" ];
|
||||
networking.nat.externalInterface = "eth1";
|
||||
networking.firewall.enable = true;
|
||||
networking.firewall.trustedInterfaces = [ "eth2" ];
|
||||
networking.interfaces.eth0.ipv4.addresses = [ ];
|
||||
networking.interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = externalRouterAddress;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.interfaces.eth2.ipv4.addresses = [
|
||||
{
|
||||
address = internalRouterAddress;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
services.miniupnpd = {
|
||||
enable = true;
|
||||
externalInterface = "eth1";
|
||||
internalIPs = [ "eth2" ];
|
||||
appendConfig = ''
|
||||
ext_ip=${externalRouterAddress}
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
client1 =
|
||||
{ pkgs, nodes, ... }:
|
||||
{
|
||||
imports = [ transmissionConfig ];
|
||||
environment.systemPackages = [ pkgs.miniupnpc ];
|
||||
client1 =
|
||||
{ pkgs, nodes, ... }:
|
||||
{
|
||||
imports = [ transmissionConfig ];
|
||||
environment.systemPackages = [ pkgs.miniupnpc ];
|
||||
|
||||
virtualisation.vlans = [ 2 ];
|
||||
networking.interfaces.eth0.ipv4.addresses = [ ];
|
||||
networking.interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = internalClient1Address;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.defaultGateway = internalRouterAddress;
|
||||
networking.firewall.enable = false;
|
||||
};
|
||||
virtualisation.vlans = [ 2 ];
|
||||
networking.interfaces.eth0.ipv4.addresses = [ ];
|
||||
networking.interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = internalClient1Address;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.defaultGateway = internalRouterAddress;
|
||||
networking.firewall.enable = false;
|
||||
};
|
||||
|
||||
client2 =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ transmissionConfig ];
|
||||
client2 =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ transmissionConfig ];
|
||||
|
||||
virtualisation.vlans = [ 1 ];
|
||||
networking.interfaces.eth0.ipv4.addresses = [ ];
|
||||
networking.interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = externalClient2Address;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.firewall.enable = false;
|
||||
};
|
||||
};
|
||||
virtualisation.vlans = [ 1 ];
|
||||
networking.interfaces.eth0.ipv4.addresses = [ ];
|
||||
networking.interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = externalClient2Address;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.firewall.enable = false;
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
start_all()
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
start_all()
|
||||
|
||||
# Wait for network and miniupnpd.
|
||||
router.systemctl("start network-online.target")
|
||||
router.wait_for_unit("network-online.target")
|
||||
router.wait_for_unit("miniupnpd")
|
||||
# Wait for network and miniupnpd.
|
||||
router.systemctl("start network-online.target")
|
||||
router.wait_for_unit("network-online.target")
|
||||
router.wait_for_unit("miniupnpd")
|
||||
|
||||
# Create the torrent.
|
||||
tracker.succeed("mkdir ${download-dir}/data")
|
||||
tracker.succeed(
|
||||
"cp ${file} ${download-dir}/data/test.tar.bz2"
|
||||
)
|
||||
tracker.succeed(
|
||||
"transmission-create ${download-dir}/data/test.tar.bz2 --private --tracker http://${externalTrackerAddress}:6969/announce --outfile /tmp/test.torrent"
|
||||
)
|
||||
tracker.succeed("chmod 644 /tmp/test.torrent")
|
||||
# Create the torrent.
|
||||
tracker.succeed("mkdir ${download-dir}/data")
|
||||
tracker.succeed(
|
||||
"cp ${file} ${download-dir}/data/test.tar.bz2"
|
||||
)
|
||||
tracker.succeed(
|
||||
"transmission-create ${download-dir}/data/test.tar.bz2 --private --tracker http://${externalTrackerAddress}:6969/announce --outfile /tmp/test.torrent"
|
||||
)
|
||||
tracker.succeed("chmod 644 /tmp/test.torrent")
|
||||
|
||||
# Start the tracker. !!! use a less crappy tracker
|
||||
tracker.systemctl("start network-online.target")
|
||||
tracker.wait_for_unit("network-online.target")
|
||||
tracker.wait_for_unit("opentracker.service")
|
||||
tracker.wait_for_open_port(6969)
|
||||
# Start the tracker. !!! use a less crappy tracker
|
||||
tracker.systemctl("start network-online.target")
|
||||
tracker.wait_for_unit("network-online.target")
|
||||
tracker.wait_for_unit("opentracker.service")
|
||||
tracker.wait_for_open_port(6969)
|
||||
|
||||
# Start the initial seeder.
|
||||
tracker.succeed(
|
||||
"transmission-remote --add /tmp/test.torrent --no-portmap --no-dht --download-dir ${download-dir}/data"
|
||||
)
|
||||
# Start the initial seeder.
|
||||
tracker.succeed(
|
||||
"transmission-remote --add /tmp/test.torrent --no-portmap --no-dht --download-dir ${download-dir}/data"
|
||||
)
|
||||
|
||||
# Now we should be able to download from the client behind the NAT.
|
||||
tracker.wait_for_unit("httpd")
|
||||
client1.systemctl("start network-online.target")
|
||||
client1.wait_for_unit("network-online.target")
|
||||
client1.succeed("transmission-remote --add http://${externalTrackerAddress}/test.torrent >&2 &")
|
||||
client1.wait_for_file("${download-dir}/test.tar.bz2")
|
||||
client1.succeed(
|
||||
"cmp ${download-dir}/test.tar.bz2 ${file}"
|
||||
)
|
||||
# Now we should be able to download from the client behind the NAT.
|
||||
tracker.wait_for_unit("httpd")
|
||||
client1.systemctl("start network-online.target")
|
||||
client1.wait_for_unit("network-online.target")
|
||||
client1.succeed("transmission-remote --add http://${externalTrackerAddress}/test.torrent >&2 &")
|
||||
client1.wait_for_file("${download-dir}/test.tar.bz2")
|
||||
client1.succeed(
|
||||
"cmp ${download-dir}/test.tar.bz2 ${file}"
|
||||
)
|
||||
|
||||
# Bring down the initial seeder.
|
||||
tracker.stop_job("transmission")
|
||||
# Bring down the initial seeder.
|
||||
tracker.stop_job("transmission")
|
||||
|
||||
# Now download from the second client. This can only succeed if
|
||||
# the first client created a NAT hole in the router.
|
||||
client2.systemctl("start network-online.target")
|
||||
client2.wait_for_unit("network-online.target")
|
||||
client2.succeed(
|
||||
"transmission-remote --add http://${externalTrackerAddress}/test.torrent --no-portmap --no-dht >&2 &"
|
||||
)
|
||||
client2.wait_for_file("${download-dir}/test.tar.bz2")
|
||||
client2.succeed(
|
||||
"cmp ${download-dir}/test.tar.bz2 ${file}"
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
# Now download from the second client. This can only succeed if
|
||||
# the first client created a NAT hole in the router.
|
||||
client2.systemctl("start network-online.target")
|
||||
client2.wait_for_unit("network-online.target")
|
||||
client2.succeed(
|
||||
"transmission-remote --add http://${externalTrackerAddress}/test.torrent --no-portmap --no-dht >&2 &"
|
||||
)
|
||||
client2.wait_for_file("${download-dir}/test.tar.bz2")
|
||||
client2.succeed(
|
||||
"cmp ${download-dir}/test.tar.bz2 ${file}"
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,33 +1,31 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "blockbook-frontend";
|
||||
meta = with pkgs.lib; {
|
||||
maintainers = with maintainers; [ _1000101 ];
|
||||
};
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "blockbook-frontend";
|
||||
meta = with pkgs.lib; {
|
||||
maintainers = with maintainers; [ _1000101 ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
services.blockbook-frontend."test" = {
|
||||
enable = true;
|
||||
};
|
||||
services.bitcoind.mainnet = {
|
||||
enable = true;
|
||||
rpc = {
|
||||
port = 8030;
|
||||
users.rpc.passwordHMAC = "acc2374e5f9ba9e62a5204d3686616cf$53abdba5e67a9005be6a27ca03a93ce09e58854bc2b871523a0d239a72968033";
|
||||
};
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
services.blockbook-frontend."test" = {
|
||||
enable = true;
|
||||
};
|
||||
services.bitcoind.mainnet = {
|
||||
enable = true;
|
||||
rpc = {
|
||||
port = 8030;
|
||||
users.rpc.passwordHMAC = "acc2374e5f9ba9e62a5204d3686616cf$53abdba5e67a9005be6a27ca03a93ce09e58854bc2b871523a0d239a72968033";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("blockbook-frontend-test.service")
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("blockbook-frontend-test.service")
|
||||
|
||||
machine.wait_for_open_port(9030)
|
||||
machine.wait_for_open_port(9030)
|
||||
|
||||
machine.succeed("curl -sSfL http://localhost:9030 | grep 'Blockbook'")
|
||||
'';
|
||||
}
|
||||
)
|
||||
machine.succeed("curl -sSfL http://localhost:9030 | grep 'Blockbook'")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,193 +1,191 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "boot-stage1";
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "boot-stage1";
|
||||
|
||||
nodes.machine =
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
boot.extraModulePackages =
|
||||
let
|
||||
compileKernelModule =
|
||||
name: source:
|
||||
pkgs.runCommandCC name
|
||||
rec {
|
||||
inherit source;
|
||||
kdev = config.boot.kernelPackages.kernel.dev;
|
||||
kver = config.boot.kernelPackages.kernel.modDirVersion;
|
||||
ksrc = "${kdev}/lib/modules/${kver}/build";
|
||||
hardeningDisable = [ "pic" ];
|
||||
nativeBuildInputs = kdev.moduleBuildDependencies;
|
||||
}
|
||||
''
|
||||
echo "obj-m += $name.o" > Makefile
|
||||
echo "$source" > "$name.c"
|
||||
make -C "$ksrc" M=$(pwd) modules
|
||||
install -vD "$name.ko" "$out/lib/modules/$kver/$name.ko"
|
||||
'';
|
||||
|
||||
# This spawns a kthread which just waits until it gets a signal and
|
||||
# terminates if that is the case. We want to make sure that nothing during
|
||||
# the boot process kills any kthread by accident, like what happened in
|
||||
# issue #15226.
|
||||
kcanary = compileKernelModule "kcanary" ''
|
||||
#include <linux/version.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/signal.h>
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
|
||||
#include <linux/sched/signal.h>
|
||||
#endif
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
struct task_struct *canaryTask;
|
||||
|
||||
static int kcanary(void *nothing)
|
||||
{
|
||||
allow_signal(SIGINT);
|
||||
allow_signal(SIGTERM);
|
||||
allow_signal(SIGKILL);
|
||||
while (!kthread_should_stop()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout_interruptible(msecs_to_jiffies(100));
|
||||
if (signal_pending(current)) break;
|
||||
}
|
||||
return 0;
|
||||
nodes.machine =
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
boot.extraModulePackages =
|
||||
let
|
||||
compileKernelModule =
|
||||
name: source:
|
||||
pkgs.runCommandCC name
|
||||
rec {
|
||||
inherit source;
|
||||
kdev = config.boot.kernelPackages.kernel.dev;
|
||||
kver = config.boot.kernelPackages.kernel.modDirVersion;
|
||||
ksrc = "${kdev}/lib/modules/${kver}/build";
|
||||
hardeningDisable = [ "pic" ];
|
||||
nativeBuildInputs = kdev.moduleBuildDependencies;
|
||||
}
|
||||
|
||||
static int kcanaryInit(void)
|
||||
{
|
||||
kthread_run(&kcanary, NULL, "kcanary");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kcanaryExit(void)
|
||||
{
|
||||
kthread_stop(canaryTask);
|
||||
}
|
||||
|
||||
module_init(kcanaryInit);
|
||||
module_exit(kcanaryExit);
|
||||
'';
|
||||
|
||||
in
|
||||
lib.singleton kcanary;
|
||||
|
||||
boot.initrd.kernelModules = [ "kcanary" ];
|
||||
|
||||
boot.initrd.extraUtilsCommands =
|
||||
let
|
||||
compile =
|
||||
name: source:
|
||||
pkgs.runCommandCC name { inherit source; } ''
|
||||
mkdir -p "$out/bin"
|
||||
echo "$source" | gcc -Wall -o "$out/bin/$name" -xc -
|
||||
''
|
||||
echo "obj-m += $name.o" > Makefile
|
||||
echo "$source" > "$name.c"
|
||||
make -C "$ksrc" M=$(pwd) modules
|
||||
install -vD "$name.ko" "$out/lib/modules/$kver/$name.ko"
|
||||
'';
|
||||
|
||||
daemonize =
|
||||
name: source:
|
||||
compile name ''
|
||||
# This spawns a kthread which just waits until it gets a signal and
|
||||
# terminates if that is the case. We want to make sure that nothing during
|
||||
# the boot process kills any kthread by accident, like what happened in
|
||||
# issue #15226.
|
||||
kcanary = compileKernelModule "kcanary" ''
|
||||
#include <linux/version.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/signal.h>
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
|
||||
#include <linux/sched/signal.h>
|
||||
#endif
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
struct task_struct *canaryTask;
|
||||
|
||||
static int kcanary(void *nothing)
|
||||
{
|
||||
allow_signal(SIGINT);
|
||||
allow_signal(SIGTERM);
|
||||
allow_signal(SIGKILL);
|
||||
while (!kthread_should_stop()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout_interruptible(msecs_to_jiffies(100));
|
||||
if (signal_pending(current)) break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kcanaryInit(void)
|
||||
{
|
||||
kthread_run(&kcanary, NULL, "kcanary");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kcanaryExit(void)
|
||||
{
|
||||
kthread_stop(canaryTask);
|
||||
}
|
||||
|
||||
module_init(kcanaryInit);
|
||||
module_exit(kcanaryExit);
|
||||
'';
|
||||
|
||||
in
|
||||
lib.singleton kcanary;
|
||||
|
||||
boot.initrd.kernelModules = [ "kcanary" ];
|
||||
|
||||
boot.initrd.extraUtilsCommands =
|
||||
let
|
||||
compile =
|
||||
name: source:
|
||||
pkgs.runCommandCC name { inherit source; } ''
|
||||
mkdir -p "$out/bin"
|
||||
echo "$source" | gcc -Wall -o "$out/bin/$name" -xc -
|
||||
'';
|
||||
|
||||
daemonize =
|
||||
name: source:
|
||||
compile name ''
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
|
||||
void runSource(void) {
|
||||
${source}
|
||||
}
|
||||
|
||||
int main(void) {
|
||||
if (fork() > 0) return 0;
|
||||
setsid();
|
||||
runSource();
|
||||
return 1;
|
||||
}
|
||||
'';
|
||||
|
||||
mkCmdlineCanary =
|
||||
{
|
||||
name,
|
||||
cmdline ? "",
|
||||
source ? "",
|
||||
}:
|
||||
(daemonize name ''
|
||||
char *argv[] = {"${cmdline}", NULL};
|
||||
execvp("${name}-child", argv);
|
||||
'')
|
||||
// {
|
||||
child = compile "${name}-child" ''
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
|
||||
void runSource(void) {
|
||||
${source}
|
||||
}
|
||||
|
||||
int main(void) {
|
||||
if (fork() > 0) return 0;
|
||||
setsid();
|
||||
runSource();
|
||||
${source}
|
||||
while (1) sleep(1);
|
||||
return 1;
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
mkCmdlineCanary =
|
||||
{
|
||||
name,
|
||||
cmdline ? "",
|
||||
source ? "",
|
||||
}:
|
||||
(daemonize name ''
|
||||
char *argv[] = {"${cmdline}", NULL};
|
||||
execvp("${name}-child", argv);
|
||||
'')
|
||||
// {
|
||||
child = compile "${name}-child" ''
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
copyCanaries = lib.concatMapStrings (canary: ''
|
||||
${lib.optionalString (canary ? child) ''
|
||||
copy_bin_and_libs "${canary.child}/bin/${canary.child.name}"
|
||||
''}
|
||||
copy_bin_and_libs "${canary}/bin/${canary.name}"
|
||||
'');
|
||||
|
||||
int main(void) {
|
||||
${source}
|
||||
while (1) sleep(1);
|
||||
return 1;
|
||||
}
|
||||
'';
|
||||
};
|
||||
in
|
||||
copyCanaries [
|
||||
# Simple canary process which just sleeps forever and should be killed by
|
||||
# stage 2.
|
||||
(daemonize "canary1" "while (1) sleep(1);")
|
||||
|
||||
copyCanaries = lib.concatMapStrings (canary: ''
|
||||
${lib.optionalString (canary ? child) ''
|
||||
copy_bin_and_libs "${canary.child}/bin/${canary.child.name}"
|
||||
''}
|
||||
copy_bin_and_libs "${canary}/bin/${canary.name}"
|
||||
'');
|
||||
# We want this canary process to try mimicking a kthread using a cmdline
|
||||
# with a zero length so we can make sure that the process is properly
|
||||
# killed in stage 1.
|
||||
(mkCmdlineCanary {
|
||||
name = "canary2";
|
||||
source = ''
|
||||
FILE *f;
|
||||
f = fopen("/run/canary2.pid", "w");
|
||||
fprintf(f, "%d\n", getpid());
|
||||
fclose(f);
|
||||
'';
|
||||
})
|
||||
|
||||
in
|
||||
copyCanaries [
|
||||
# Simple canary process which just sleeps forever and should be killed by
|
||||
# stage 2.
|
||||
(daemonize "canary1" "while (1) sleep(1);")
|
||||
# This canary process mimics a storage daemon, which we do NOT want to be
|
||||
# killed before going into stage 2. For more on root storage daemons, see:
|
||||
# https://www.freedesktop.org/wiki/Software/systemd/RootStorageDaemons/
|
||||
(mkCmdlineCanary {
|
||||
name = "canary3";
|
||||
cmdline = "@canary3";
|
||||
})
|
||||
];
|
||||
|
||||
# We want this canary process to try mimicking a kthread using a cmdline
|
||||
# with a zero length so we can make sure that the process is properly
|
||||
# killed in stage 1.
|
||||
(mkCmdlineCanary {
|
||||
name = "canary2";
|
||||
source = ''
|
||||
FILE *f;
|
||||
f = fopen("/run/canary2.pid", "w");
|
||||
fprintf(f, "%d\n", getpid());
|
||||
fclose(f);
|
||||
'';
|
||||
})
|
||||
boot.initrd.postMountCommands = ''
|
||||
canary1
|
||||
canary2
|
||||
canary3
|
||||
# Make sure the pidfile of canary 2 is created so that we still can get
|
||||
# its former pid after the killing spree starts next within stage 1.
|
||||
while [ ! -s /run/canary2.pid ]; do sleep 0.1; done
|
||||
'';
|
||||
};
|
||||
|
||||
# This canary process mimics a storage daemon, which we do NOT want to be
|
||||
# killed before going into stage 2. For more on root storage daemons, see:
|
||||
# https://www.freedesktop.org/wiki/Software/systemd/RootStorageDaemons/
|
||||
(mkCmdlineCanary {
|
||||
name = "canary3";
|
||||
cmdline = "@canary3";
|
||||
})
|
||||
];
|
||||
testScript = ''
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.succeed("test -s /run/canary2.pid")
|
||||
machine.fail("pgrep -a canary1")
|
||||
machine.fail("kill -0 $(< /run/canary2.pid)")
|
||||
machine.succeed('pgrep -a -f "^@canary3$"')
|
||||
machine.succeed('pgrep -a -f "^\\[kcanary\\]$"')
|
||||
'';
|
||||
|
||||
boot.initrd.postMountCommands = ''
|
||||
canary1
|
||||
canary2
|
||||
canary3
|
||||
# Make sure the pidfile of canary 2 is created so that we still can get
|
||||
# its former pid after the killing spree starts next within stage 1.
|
||||
while [ ! -s /run/canary2.pid ]; do sleep 0.1; done
|
||||
'';
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.succeed("test -s /run/canary2.pid")
|
||||
machine.fail("pgrep -a canary1")
|
||||
machine.fail("kill -0 $(< /run/canary2.pid)")
|
||||
machine.succeed('pgrep -a -f "^@canary3$"')
|
||||
machine.succeed('pgrep -a -f "^\\[kcanary\\]$"')
|
||||
'';
|
||||
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ aszlig ];
|
||||
}
|
||||
)
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ aszlig ];
|
||||
}
|
||||
|
|
|
@ -1,73 +1,71 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "boot-stage2";
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "boot-stage2";
|
||||
|
||||
nodes.machine =
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
virtualisation = {
|
||||
emptyDiskImages = [ 256 ];
|
||||
nodes.machine =
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
virtualisation = {
|
||||
emptyDiskImages = [ 256 ];
|
||||
|
||||
# Mount an ext4 as the upper layer of the Nix store.
|
||||
fileSystems = {
|
||||
"/nix/store" = lib.mkForce {
|
||||
device = "/dev/vdb"; # the above disk image
|
||||
fsType = "ext4";
|
||||
# Mount an ext4 as the upper layer of the Nix store.
|
||||
fileSystems = {
|
||||
"/nix/store" = lib.mkForce {
|
||||
device = "/dev/vdb"; # the above disk image
|
||||
fsType = "ext4";
|
||||
|
||||
# data=journal always displays after errors=remount-ro; this is only needed because of the overlay
|
||||
# and #375257 will trigger with `errors=remount-ro` on a non-overlaid store:
|
||||
# see ordering in https://github.com/torvalds/linux/blob/v6.12/fs/ext4/super.c#L2974
|
||||
options = [
|
||||
"defaults"
|
||||
"errors=remount-ro"
|
||||
"data=journal"
|
||||
];
|
||||
};
|
||||
# data=journal always displays after errors=remount-ro; this is only needed because of the overlay
|
||||
# and #375257 will trigger with `errors=remount-ro` on a non-overlaid store:
|
||||
# see ordering in https://github.com/torvalds/linux/blob/v6.12/fs/ext4/super.c#L2974
|
||||
options = [
|
||||
"defaults"
|
||||
"errors=remount-ro"
|
||||
"data=journal"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
boot = {
|
||||
initrd = {
|
||||
# Format the upper Nix store.
|
||||
postDeviceCommands = ''
|
||||
${pkgs.e2fsprogs}/bin/mkfs.ext4 /dev/vdb
|
||||
'';
|
||||
|
||||
# Overlay the RO store onto it.
|
||||
# Note that bug #375257 can be triggered without an overlay,
|
||||
# using the errors=remount-ro option (or similar) or with an overlay where any of the
|
||||
# paths ends in 'ro'. The offending mountpoint also has to be the last (top) one
|
||||
# if an option ending in 'ro' is the last in the list, so test both cases here.
|
||||
postMountCommands = ''
|
||||
mkdir -p /mnt-root/nix/store/ro /mnt-root/nix/store/rw /mnt-root/nix/store/work
|
||||
mount --bind /mnt-root/nix/.ro-store /mnt-root/nix/store/ro
|
||||
mount -t overlay overlay \
|
||||
-o lowerdir=/mnt-root/nix/store/ro,upperdir=/mnt-root/nix/store/rw,workdir=/mnt-root/nix/store/work \
|
||||
/mnt-root/nix/store
|
||||
'';
|
||||
|
||||
kernelModules = [ "overlay" ];
|
||||
};
|
||||
|
||||
postBootCommands = ''
|
||||
touch /etc/post-boot-ran
|
||||
mount
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.succeed("test /etc/post-boot-ran")
|
||||
machine.fail("touch /nix/store/should-not-work");
|
||||
'';
|
||||
boot = {
|
||||
initrd = {
|
||||
# Format the upper Nix store.
|
||||
postDeviceCommands = ''
|
||||
${pkgs.e2fsprogs}/bin/mkfs.ext4 /dev/vdb
|
||||
'';
|
||||
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ numinit ];
|
||||
}
|
||||
)
|
||||
# Overlay the RO store onto it.
|
||||
# Note that bug #375257 can be triggered without an overlay,
|
||||
# using the errors=remount-ro option (or similar) or with an overlay where any of the
|
||||
# paths ends in 'ro'. The offending mountpoint also has to be the last (top) one
|
||||
# if an option ending in 'ro' is the last in the list, so test both cases here.
|
||||
postMountCommands = ''
|
||||
mkdir -p /mnt-root/nix/store/ro /mnt-root/nix/store/rw /mnt-root/nix/store/work
|
||||
mount --bind /mnt-root/nix/.ro-store /mnt-root/nix/store/ro
|
||||
mount -t overlay overlay \
|
||||
-o lowerdir=/mnt-root/nix/store/ro,upperdir=/mnt-root/nix/store/rw,workdir=/mnt-root/nix/store/work \
|
||||
/mnt-root/nix/store
|
||||
'';
|
||||
|
||||
kernelModules = [ "overlay" ];
|
||||
};
|
||||
|
||||
postBootCommands = ''
|
||||
touch /etc/post-boot-ran
|
||||
mount
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.succeed("test /etc/post-boot-ran")
|
||||
machine.fail("touch /nix/store/should-not-work");
|
||||
'';
|
||||
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ numinit ];
|
||||
}
|
||||
|
|
|
@ -1,276 +1,274 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{ pkgs, ... }:
|
||||
|
||||
let
|
||||
passphrase = "supersecret";
|
||||
dataDir = "/ran:dom/data";
|
||||
subDir = "not_anything_here";
|
||||
excludedSubDirFile = "not_this_file_either";
|
||||
excludeFile = "not_this_file";
|
||||
keepFile = "important_file";
|
||||
keepFileData = "important_data";
|
||||
localRepo = "/root/back:up";
|
||||
# a repository on a file system which is not mounted automatically
|
||||
localRepoMount = "/noAutoMount";
|
||||
archiveName = "my_archive";
|
||||
remoteRepo = "borg@server:."; # No need to specify path
|
||||
privateKey = pkgs.writeText "id_ed25519" ''
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||
QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe
|
||||
RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw
|
||||
AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg
|
||||
9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ=
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
'';
|
||||
publicKey = ''
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv root@client
|
||||
'';
|
||||
privateKeyAppendOnly = pkgs.writeText "id_ed25519" ''
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||
QyNTUxOQAAACBacZuz1ELGQdhI7PF6dGFafCDlvh8pSEc4cHjkW0QjLwAAAJC9YTxxvWE8
|
||||
cQAAAAtzc2gtZWQyNTUxOQAAACBacZuz1ELGQdhI7PF6dGFafCDlvh8pSEc4cHjkW0QjLw
|
||||
AAAEAAhV7wTl5dL/lz+PF/d4PnZXuG1Id6L/mFEiGT1tZsuFpxm7PUQsZB2Ejs8Xp0YVp8
|
||||
IOW+HylIRzhweORbRCMvAAAADXJzY2h1ZXR6QGt1cnQ=
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
'';
|
||||
publicKeyAppendOnly = ''
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFpxm7PUQsZB2Ejs8Xp0YVp8IOW+HylIRzhweORbRCMv root@client
|
||||
'';
|
||||
let
|
||||
passphrase = "supersecret";
|
||||
dataDir = "/ran:dom/data";
|
||||
subDir = "not_anything_here";
|
||||
excludedSubDirFile = "not_this_file_either";
|
||||
excludeFile = "not_this_file";
|
||||
keepFile = "important_file";
|
||||
keepFileData = "important_data";
|
||||
localRepo = "/root/back:up";
|
||||
# a repository on a file system which is not mounted automatically
|
||||
localRepoMount = "/noAutoMount";
|
||||
archiveName = "my_archive";
|
||||
remoteRepo = "borg@server:."; # No need to specify path
|
||||
privateKey = pkgs.writeText "id_ed25519" ''
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||
QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe
|
||||
RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw
|
||||
AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg
|
||||
9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ=
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
'';
|
||||
publicKey = ''
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv root@client
|
||||
'';
|
||||
privateKeyAppendOnly = pkgs.writeText "id_ed25519" ''
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||
QyNTUxOQAAACBacZuz1ELGQdhI7PF6dGFafCDlvh8pSEc4cHjkW0QjLwAAAJC9YTxxvWE8
|
||||
cQAAAAtzc2gtZWQyNTUxOQAAACBacZuz1ELGQdhI7PF6dGFafCDlvh8pSEc4cHjkW0QjLw
|
||||
AAAEAAhV7wTl5dL/lz+PF/d4PnZXuG1Id6L/mFEiGT1tZsuFpxm7PUQsZB2Ejs8Xp0YVp8
|
||||
IOW+HylIRzhweORbRCMvAAAADXJzY2h1ZXR6QGt1cnQ=
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
'';
|
||||
publicKeyAppendOnly = ''
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFpxm7PUQsZB2Ejs8Xp0YVp8IOW+HylIRzhweORbRCMv root@client
|
||||
'';
|
||||
|
||||
in
|
||||
{
|
||||
name = "borgbackup";
|
||||
meta = with pkgs.lib; {
|
||||
maintainers = with maintainers; [ dotlambda ];
|
||||
};
|
||||
in
|
||||
{
|
||||
name = "borgbackup";
|
||||
meta = with pkgs.lib; {
|
||||
maintainers = with maintainers; [ dotlambda ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
client =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.fileSystems.${localRepoMount} = {
|
||||
device = "tmpfs";
|
||||
fsType = "tmpfs";
|
||||
options = [ "noauto" ];
|
||||
nodes = {
|
||||
client =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.fileSystems.${localRepoMount} = {
|
||||
device = "tmpfs";
|
||||
fsType = "tmpfs";
|
||||
options = [ "noauto" ];
|
||||
};
|
||||
|
||||
services.borgbackup.jobs = {
|
||||
|
||||
local = {
|
||||
paths = dataDir;
|
||||
repo = localRepo;
|
||||
preHook = ''
|
||||
# Don't append a timestamp
|
||||
archiveName="${archiveName}"
|
||||
'';
|
||||
encryption = {
|
||||
mode = "repokey";
|
||||
inherit passphrase;
|
||||
};
|
||||
compression = "auto,zlib,9";
|
||||
prune.keep = {
|
||||
within = "1y";
|
||||
yearly = 5;
|
||||
};
|
||||
exclude = [ "*/${excludeFile}" ];
|
||||
extraCreateArgs = [
|
||||
"--exclude-caches"
|
||||
"--exclude-if-present"
|
||||
".dont backup"
|
||||
];
|
||||
postHook = "echo post";
|
||||
startAt = [ ]; # Do not run automatically
|
||||
};
|
||||
|
||||
services.borgbackup.jobs = {
|
||||
localMount = {
|
||||
paths = dataDir;
|
||||
repo = localRepoMount;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
};
|
||||
|
||||
local = {
|
||||
paths = dataDir;
|
||||
repo = localRepo;
|
||||
preHook = ''
|
||||
# Don't append a timestamp
|
||||
archiveName="${archiveName}"
|
||||
'';
|
||||
encryption = {
|
||||
mode = "repokey";
|
||||
inherit passphrase;
|
||||
};
|
||||
compression = "auto,zlib,9";
|
||||
prune.keep = {
|
||||
within = "1y";
|
||||
yearly = 5;
|
||||
};
|
||||
exclude = [ "*/${excludeFile}" ];
|
||||
extraCreateArgs = [
|
||||
"--exclude-caches"
|
||||
"--exclude-if-present"
|
||||
".dont backup"
|
||||
];
|
||||
postHook = "echo post";
|
||||
startAt = [ ]; # Do not run automatically
|
||||
};
|
||||
remote = {
|
||||
paths = dataDir;
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
|
||||
};
|
||||
|
||||
localMount = {
|
||||
paths = dataDir;
|
||||
repo = localRepoMount;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
};
|
||||
remoteAppendOnly = {
|
||||
paths = dataDir;
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519.appendOnly";
|
||||
};
|
||||
|
||||
remote = {
|
||||
paths = dataDir;
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
|
||||
};
|
||||
commandSuccess = {
|
||||
dumpCommand = pkgs.writeScript "commandSuccess" ''
|
||||
echo -n test
|
||||
'';
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
|
||||
};
|
||||
|
||||
remoteAppendOnly = {
|
||||
paths = dataDir;
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519.appendOnly";
|
||||
};
|
||||
commandFail = {
|
||||
dumpCommand = "${pkgs.coreutils}/bin/false";
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
|
||||
};
|
||||
|
||||
commandSuccess = {
|
||||
dumpCommand = pkgs.writeScript "commandSuccess" ''
|
||||
echo -n test
|
||||
'';
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
|
||||
};
|
||||
sleepInhibited = {
|
||||
inhibitsSleep = true;
|
||||
# Blocks indefinitely while "backing up" so that we can try to suspend the local system while it's hung
|
||||
dumpCommand = pkgs.writeScript "sleepInhibited" ''
|
||||
cat /dev/zero
|
||||
'';
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
|
||||
};
|
||||
|
||||
commandFail = {
|
||||
dumpCommand = "${pkgs.coreutils}/bin/false";
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
|
||||
};
|
||||
|
||||
sleepInhibited = {
|
||||
inhibitsSleep = true;
|
||||
# Blocks indefinitely while "backing up" so that we can try to suspend the local system while it's hung
|
||||
dumpCommand = pkgs.writeScript "sleepInhibited" ''
|
||||
cat /dev/zero
|
||||
'';
|
||||
repo = remoteRepo;
|
||||
encryption.mode = "none";
|
||||
startAt = [ ];
|
||||
environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
server =
|
||||
{ ... }:
|
||||
{
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
PasswordAuthentication = false;
|
||||
KbdInteractiveAuthentication = false;
|
||||
};
|
||||
};
|
||||
|
||||
server =
|
||||
{ ... }:
|
||||
{
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
PasswordAuthentication = false;
|
||||
KbdInteractiveAuthentication = false;
|
||||
};
|
||||
};
|
||||
|
||||
services.borgbackup.repos.repo1 = {
|
||||
authorizedKeys = [ publicKey ];
|
||||
path = "/data/borgbackup";
|
||||
};
|
||||
|
||||
# Second repo to make sure the authorizedKeys options are merged correctly
|
||||
services.borgbackup.repos.repo2 = {
|
||||
authorizedKeysAppendOnly = [ publicKeyAppendOnly ];
|
||||
path = "/data/borgbackup";
|
||||
quota = ".5G";
|
||||
};
|
||||
services.borgbackup.repos.repo1 = {
|
||||
authorizedKeys = [ publicKey ];
|
||||
path = "/data/borgbackup";
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
# Second repo to make sure the authorizedKeys options are merged correctly
|
||||
services.borgbackup.repos.repo2 = {
|
||||
authorizedKeysAppendOnly = [ publicKeyAppendOnly ];
|
||||
path = "/data/borgbackup";
|
||||
quota = ".5G";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
client.fail('test -d "${remoteRepo}"')
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
client.succeed(
|
||||
"cp ${privateKey} /root/id_ed25519"
|
||||
)
|
||||
client.succeed("chmod 0600 /root/id_ed25519")
|
||||
client.succeed(
|
||||
"cp ${privateKeyAppendOnly} /root/id_ed25519.appendOnly"
|
||||
)
|
||||
client.succeed("chmod 0600 /root/id_ed25519.appendOnly")
|
||||
client.fail('test -d "${remoteRepo}"')
|
||||
|
||||
client.succeed("mkdir -p ${dataDir}/${subDir}")
|
||||
client.succeed("touch ${dataDir}/${excludeFile}")
|
||||
client.succeed("touch '${dataDir}/${subDir}/.dont backup'")
|
||||
client.succeed("touch ${dataDir}/${subDir}/${excludedSubDirFile}")
|
||||
client.succeed("echo '${keepFileData}' > ${dataDir}/${keepFile}")
|
||||
client.succeed(
|
||||
"cp ${privateKey} /root/id_ed25519"
|
||||
)
|
||||
client.succeed("chmod 0600 /root/id_ed25519")
|
||||
client.succeed(
|
||||
"cp ${privateKeyAppendOnly} /root/id_ed25519.appendOnly"
|
||||
)
|
||||
client.succeed("chmod 0600 /root/id_ed25519.appendOnly")
|
||||
|
||||
with subtest("local"):
|
||||
borg = "BORG_PASSPHRASE='${passphrase}' borg"
|
||||
client.systemctl("start --wait borgbackup-job-local")
|
||||
client.fail("systemctl is-failed borgbackup-job-local")
|
||||
# Make sure exactly one archive has been created
|
||||
assert int(client.succeed("{} list '${localRepo}' | wc -l".format(borg))) > 0
|
||||
# Make sure excludeFile has been excluded
|
||||
client.fail(
|
||||
"{} list '${localRepo}::${archiveName}' | grep -qF '${excludeFile}'".format(borg)
|
||||
)
|
||||
# Make sure excludedSubDirFile has been excluded
|
||||
client.fail(
|
||||
"{} list '${localRepo}::${archiveName}' | grep -qF '${subDir}/${excludedSubDirFile}".format(borg)
|
||||
)
|
||||
# Make sure keepFile has the correct content
|
||||
client.succeed("{} extract '${localRepo}::${archiveName}'".format(borg))
|
||||
assert "${keepFileData}" in client.succeed("cat ${dataDir}/${keepFile}")
|
||||
# Make sure the same is true when using `borg mount`
|
||||
client.succeed(
|
||||
"mkdir -p /mnt/borg && {} mount '${localRepo}::${archiveName}' /mnt/borg".format(
|
||||
borg
|
||||
)
|
||||
)
|
||||
assert "${keepFileData}" in client.succeed(
|
||||
"cat /mnt/borg/${dataDir}/${keepFile}"
|
||||
)
|
||||
client.succeed("mkdir -p ${dataDir}/${subDir}")
|
||||
client.succeed("touch ${dataDir}/${excludeFile}")
|
||||
client.succeed("touch '${dataDir}/${subDir}/.dont backup'")
|
||||
client.succeed("touch ${dataDir}/${subDir}/${excludedSubDirFile}")
|
||||
client.succeed("echo '${keepFileData}' > ${dataDir}/${keepFile}")
|
||||
|
||||
with subtest("localMount"):
|
||||
# the file system for the repo should not be already mounted
|
||||
client.fail("mount | grep ${localRepoMount}")
|
||||
# ensure trying to write to the mountpoint before the fs is mounted fails
|
||||
client.succeed("chattr +i ${localRepoMount}")
|
||||
borg = "borg"
|
||||
client.systemctl("start --wait borgbackup-job-localMount")
|
||||
client.fail("systemctl is-failed borgbackup-job-localMount")
|
||||
# Make sure exactly one archive has been created
|
||||
assert int(client.succeed("{} list '${localRepoMount}' | wc -l".format(borg))) > 0
|
||||
with subtest("local"):
|
||||
borg = "BORG_PASSPHRASE='${passphrase}' borg"
|
||||
client.systemctl("start --wait borgbackup-job-local")
|
||||
client.fail("systemctl is-failed borgbackup-job-local")
|
||||
# Make sure exactly one archive has been created
|
||||
assert int(client.succeed("{} list '${localRepo}' | wc -l".format(borg))) > 0
|
||||
# Make sure excludeFile has been excluded
|
||||
client.fail(
|
||||
"{} list '${localRepo}::${archiveName}' | grep -qF '${excludeFile}'".format(borg)
|
||||
)
|
||||
# Make sure excludedSubDirFile has been excluded
|
||||
client.fail(
|
||||
"{} list '${localRepo}::${archiveName}' | grep -qF '${subDir}/${excludedSubDirFile}".format(borg)
|
||||
)
|
||||
# Make sure keepFile has the correct content
|
||||
client.succeed("{} extract '${localRepo}::${archiveName}'".format(borg))
|
||||
assert "${keepFileData}" in client.succeed("cat ${dataDir}/${keepFile}")
|
||||
# Make sure the same is true when using `borg mount`
|
||||
client.succeed(
|
||||
"mkdir -p /mnt/borg && {} mount '${localRepo}::${archiveName}' /mnt/borg".format(
|
||||
borg
|
||||
)
|
||||
)
|
||||
assert "${keepFileData}" in client.succeed(
|
||||
"cat /mnt/borg/${dataDir}/${keepFile}"
|
||||
)
|
||||
|
||||
with subtest("remote"):
|
||||
borg = "BORG_RSH='ssh -oStrictHostKeyChecking=no -i /root/id_ed25519' borg"
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.systemctl("start --wait borgbackup-job-remote")
|
||||
client.fail("systemctl is-failed borgbackup-job-remote")
|
||||
with subtest("localMount"):
|
||||
# the file system for the repo should not be already mounted
|
||||
client.fail("mount | grep ${localRepoMount}")
|
||||
# ensure trying to write to the mountpoint before the fs is mounted fails
|
||||
client.succeed("chattr +i ${localRepoMount}")
|
||||
borg = "borg"
|
||||
client.systemctl("start --wait borgbackup-job-localMount")
|
||||
client.fail("systemctl is-failed borgbackup-job-localMount")
|
||||
# Make sure exactly one archive has been created
|
||||
assert int(client.succeed("{} list '${localRepoMount}' | wc -l".format(borg))) > 0
|
||||
|
||||
# Make sure we can't access repos other than the specified one
|
||||
client.fail("{} list borg\@server:wrong".format(borg))
|
||||
with subtest("remote"):
|
||||
borg = "BORG_RSH='ssh -oStrictHostKeyChecking=no -i /root/id_ed25519' borg"
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.systemctl("start --wait borgbackup-job-remote")
|
||||
client.fail("systemctl is-failed borgbackup-job-remote")
|
||||
|
||||
# TODO: Make sure that data is actually deleted
|
||||
# Make sure we can't access repos other than the specified one
|
||||
client.fail("{} list borg\@server:wrong".format(borg))
|
||||
|
||||
with subtest("remoteAppendOnly"):
|
||||
borg = (
|
||||
"BORG_RSH='ssh -oStrictHostKeyChecking=no -i /root/id_ed25519.appendOnly' borg"
|
||||
)
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.systemctl("start --wait borgbackup-job-remoteAppendOnly")
|
||||
client.fail("systemctl is-failed borgbackup-job-remoteAppendOnly")
|
||||
# TODO: Make sure that data is actually deleted
|
||||
|
||||
# Make sure we can't access repos other than the specified one
|
||||
client.fail("{} list borg\@server:wrong".format(borg))
|
||||
with subtest("remoteAppendOnly"):
|
||||
borg = (
|
||||
"BORG_RSH='ssh -oStrictHostKeyChecking=no -i /root/id_ed25519.appendOnly' borg"
|
||||
)
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.systemctl("start --wait borgbackup-job-remoteAppendOnly")
|
||||
client.fail("systemctl is-failed borgbackup-job-remoteAppendOnly")
|
||||
|
||||
# TODO: Make sure that data is not actually deleted
|
||||
# Make sure we can't access repos other than the specified one
|
||||
client.fail("{} list borg\@server:wrong".format(borg))
|
||||
|
||||
with subtest("commandSuccess"):
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.systemctl("start --wait borgbackup-job-commandSuccess")
|
||||
client.fail("systemctl is-failed borgbackup-job-commandSuccess")
|
||||
id = client.succeed("borg-job-commandSuccess list | tail -n1 | cut -d' ' -f1").strip()
|
||||
client.succeed(f"borg-job-commandSuccess extract ::{id} stdin")
|
||||
assert "test" == client.succeed("cat stdin")
|
||||
# TODO: Make sure that data is not actually deleted
|
||||
|
||||
with subtest("commandFail"):
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.systemctl("start --wait borgbackup-job-commandFail")
|
||||
client.succeed("systemctl is-failed borgbackup-job-commandFail")
|
||||
with subtest("commandSuccess"):
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.systemctl("start --wait borgbackup-job-commandSuccess")
|
||||
client.fail("systemctl is-failed borgbackup-job-commandSuccess")
|
||||
id = client.succeed("borg-job-commandSuccess list | tail -n1 | cut -d' ' -f1").strip()
|
||||
client.succeed(f"borg-job-commandSuccess extract ::{id} stdin")
|
||||
assert "test" == client.succeed("cat stdin")
|
||||
|
||||
with subtest("sleepInhibited"):
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.fail("systemd-inhibit --list | grep -q borgbackup")
|
||||
client.systemctl("start borgbackup-job-sleepInhibited")
|
||||
client.wait_until_succeeds("systemd-inhibit --list | grep -q borgbackup")
|
||||
client.systemctl("stop borgbackup-job-sleepInhibited")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("commandFail"):
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.systemctl("start --wait borgbackup-job-commandFail")
|
||||
client.succeed("systemctl is-failed borgbackup-job-commandFail")
|
||||
|
||||
with subtest("sleepInhibited"):
|
||||
server.wait_for_unit("sshd.service")
|
||||
client.wait_for_unit("network.target")
|
||||
client.fail("systemd-inhibit --list | grep -q borgbackup")
|
||||
client.systemctl("start borgbackup-job-sleepInhibited")
|
||||
client.wait_until_succeeds("systemd-inhibit --list | grep -q borgbackup")
|
||||
client.systemctl("stop borgbackup-job-sleepInhibited")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,28 +1,26 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "borgmatic";
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
services.borgmatic = {
|
||||
enable = true;
|
||||
settings = {
|
||||
source_directories = [ "/home" ];
|
||||
repositories = [
|
||||
{
|
||||
label = "local";
|
||||
path = "/var/backup";
|
||||
}
|
||||
];
|
||||
keep_daily = 7;
|
||||
};
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "borgmatic";
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
services.borgmatic = {
|
||||
enable = true;
|
||||
settings = {
|
||||
source_directories = [ "/home" ];
|
||||
repositories = [
|
||||
{
|
||||
label = "local";
|
||||
path = "/var/backup";
|
||||
}
|
||||
];
|
||||
keep_daily = 7;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.succeed("borgmatic rcreate -e none")
|
||||
machine.succeed("borgmatic")
|
||||
'';
|
||||
}
|
||||
)
|
||||
testScript = ''
|
||||
machine.succeed("borgmatic rcreate -e none")
|
||||
machine.succeed("borgmatic")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,25 +1,23 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
|
||||
name = "bpftune";
|
||||
name = "bpftune";
|
||||
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ nickcao ];
|
||||
};
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ nickcao ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.bpftune.enable = true;
|
||||
};
|
||||
};
|
||||
nodes = {
|
||||
machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.bpftune.enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("bpftune.service")
|
||||
machine.wait_for_console_text("bpftune works")
|
||||
'';
|
||||
testScript = ''
|
||||
machine.wait_for_unit("bpftune.service")
|
||||
machine.wait_for_console_text("bpftune works")
|
||||
'';
|
||||
|
||||
}
|
||||
)
|
||||
}
|
||||
|
|
|
@ -1,41 +1,39 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "breitbandmessung";
|
||||
meta.maintainers = with lib.maintainers; [ b4dm4n ];
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "breitbandmessung";
|
||||
meta.maintainers = with lib.maintainers; [ b4dm4n ];
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
./common/user-account.nix
|
||||
./common/x11.nix
|
||||
];
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
./common/user-account.nix
|
||||
./common/x11.nix
|
||||
];
|
||||
|
||||
# increase screen size to make the whole program visible
|
||||
virtualisation.resolution = {
|
||||
x = 1280;
|
||||
y = 1024;
|
||||
};
|
||||
|
||||
test-support.displayManager.auto.user = "alice";
|
||||
|
||||
environment.systemPackages = with pkgs; [ breitbandmessung ];
|
||||
environment.variables.XAUTHORITY = "/home/alice/.Xauthority";
|
||||
|
||||
# breitbandmessung is unfree
|
||||
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "breitbandmessung" ];
|
||||
# increase screen size to make the whole program visible
|
||||
virtualisation.resolution = {
|
||||
x = 1280;
|
||||
y = 1024;
|
||||
};
|
||||
|
||||
enableOCR = true;
|
||||
test-support.displayManager.auto.user = "alice";
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_x()
|
||||
machine.execute("su - alice -c breitbandmessung >&2 &")
|
||||
machine.wait_for_window("Breitbandmessung")
|
||||
machine.wait_for_text("Breitbandmessung")
|
||||
machine.wait_for_text("Datenschutz")
|
||||
machine.screenshot("breitbandmessung")
|
||||
'';
|
||||
}
|
||||
)
|
||||
environment.systemPackages = with pkgs; [ breitbandmessung ];
|
||||
environment.variables.XAUTHORITY = "/home/alice/.Xauthority";
|
||||
|
||||
# breitbandmessung is unfree
|
||||
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "breitbandmessung" ];
|
||||
};
|
||||
|
||||
enableOCR = true;
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_x()
|
||||
machine.execute("su - alice -c breitbandmessung >&2 &")
|
||||
machine.wait_for_window("Breitbandmessung")
|
||||
machine.wait_for_text("Breitbandmessung")
|
||||
machine.wait_for_text("Datenschutz")
|
||||
machine.screenshot("breitbandmessung")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,53 +1,51 @@
|
|||
# integration tests for brscan5 sane driver
|
||||
#
|
||||
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "brscan5";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ mattchrist ];
|
||||
};
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "brscan5";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ mattchrist ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
hardware.sane = {
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
hardware.sane = {
|
||||
enable = true;
|
||||
brscan5 = {
|
||||
enable = true;
|
||||
brscan5 = {
|
||||
enable = true;
|
||||
netDevices = {
|
||||
"a" = {
|
||||
model = "ADS-1200";
|
||||
nodename = "BRW0080927AFBCE";
|
||||
};
|
||||
"b" = {
|
||||
model = "ADS-1200";
|
||||
ip = "192.168.1.2";
|
||||
};
|
||||
netDevices = {
|
||||
"a" = {
|
||||
model = "ADS-1200";
|
||||
nodename = "BRW0080927AFBCE";
|
||||
};
|
||||
"b" = {
|
||||
model = "ADS-1200";
|
||||
ip = "192.168.1.2";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import re
|
||||
# sane loads libsane-brother5.so.1 successfully, and scanimage doesn't die
|
||||
strace = machine.succeed('strace scanimage -L 2>&1').split("\n")
|
||||
regexp = 'openat\(.*libsane-brother5.so.1", O_RDONLY|O_CLOEXEC\) = \d\d*$'
|
||||
assert len([x for x in strace if re.match(regexp,x)]) > 0
|
||||
testScript = ''
|
||||
import re
|
||||
# sane loads libsane-brother5.so.1 successfully, and scanimage doesn't die
|
||||
strace = machine.succeed('strace scanimage -L 2>&1').split("\n")
|
||||
regexp = 'openat\(.*libsane-brother5.so.1", O_RDONLY|O_CLOEXEC\) = \d\d*$'
|
||||
assert len([x for x in strace if re.match(regexp,x)]) > 0
|
||||
|
||||
# module creates a config
|
||||
cfg = machine.succeed('cat /etc/opt/brother/scanner/brscan5/brsanenetdevice.cfg')
|
||||
assert 'DEVICE=a , "ADS-1200" , 0x4f9:0x459 , NODENAME=BRW0080927AFBCE' in cfg
|
||||
assert 'DEVICE=b , "ADS-1200" , 0x4f9:0x459 , IP-ADDRESS=192.168.1.2' in cfg
|
||||
# module creates a config
|
||||
cfg = machine.succeed('cat /etc/opt/brother/scanner/brscan5/brsanenetdevice.cfg')
|
||||
assert 'DEVICE=a , "ADS-1200" , 0x4f9:0x459 , NODENAME=BRW0080927AFBCE' in cfg
|
||||
assert 'DEVICE=b , "ADS-1200" , 0x4f9:0x459 , IP-ADDRESS=192.168.1.2' in cfg
|
||||
|
||||
# scanimage lists the two network scanners
|
||||
scanimage = machine.succeed("scanimage -L")
|
||||
print(scanimage)
|
||||
assert """device `brother5:net1;dev0' is a Brother b ADS-1200""" in scanimage
|
||||
assert """device `brother5:net1;dev1' is a Brother a ADS-1200""" in scanimage
|
||||
'';
|
||||
}
|
||||
)
|
||||
# scanimage lists the two network scanners
|
||||
scanimage = machine.succeed("scanimage -L")
|
||||
print(scanimage)
|
||||
assert """device `brother5:net1;dev0' is a Brother b ADS-1200""" in scanimage
|
||||
assert """device `brother5:net1;dev1' is a Brother a ADS-1200""" in scanimage
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,128 +1,126 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{ pkgs, ... }:
|
||||
|
||||
let
|
||||
privateKey = ''
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||
QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe
|
||||
RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw
|
||||
AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg
|
||||
9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ=
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
'';
|
||||
publicKey = ''
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv
|
||||
'';
|
||||
in
|
||||
{
|
||||
name = "btrbk-doas";
|
||||
meta = with pkgs.lib; {
|
||||
maintainers = with maintainers; [
|
||||
symphorien
|
||||
tu-maurice
|
||||
];
|
||||
};
|
||||
let
|
||||
privateKey = ''
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||
QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe
|
||||
RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw
|
||||
AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg
|
||||
9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ=
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
'';
|
||||
publicKey = ''
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv
|
||||
'';
|
||||
in
|
||||
{
|
||||
name = "btrbk-doas";
|
||||
meta = with pkgs.lib; {
|
||||
maintainers = with maintainers; [
|
||||
symphorien
|
||||
tu-maurice
|
||||
];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
archive =
|
||||
{ ... }:
|
||||
{
|
||||
security.sudo.enable = false;
|
||||
security.doas.enable = true;
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
# note: this makes the privateKey world readable.
|
||||
# don't do it with real ssh keys.
|
||||
environment.etc."btrbk_key".text = privateKey;
|
||||
services.btrbk = {
|
||||
extraPackages = [ pkgs.lz4 ];
|
||||
instances = {
|
||||
remote = {
|
||||
onCalendar = "minutely";
|
||||
settings = {
|
||||
ssh_identity = "/etc/btrbk_key";
|
||||
ssh_user = "btrbk";
|
||||
stream_compress = "lz4";
|
||||
volume = {
|
||||
"ssh://main/mnt" = {
|
||||
target = "/mnt";
|
||||
snapshot_dir = "btrbk/remote";
|
||||
subvolume = "to_backup";
|
||||
};
|
||||
nodes = {
|
||||
archive =
|
||||
{ ... }:
|
||||
{
|
||||
security.sudo.enable = false;
|
||||
security.doas.enable = true;
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
# note: this makes the privateKey world readable.
|
||||
# don't do it with real ssh keys.
|
||||
environment.etc."btrbk_key".text = privateKey;
|
||||
services.btrbk = {
|
||||
extraPackages = [ pkgs.lz4 ];
|
||||
instances = {
|
||||
remote = {
|
||||
onCalendar = "minutely";
|
||||
settings = {
|
||||
ssh_identity = "/etc/btrbk_key";
|
||||
ssh_user = "btrbk";
|
||||
stream_compress = "lz4";
|
||||
volume = {
|
||||
"ssh://main/mnt" = {
|
||||
target = "/mnt";
|
||||
snapshot_dir = "btrbk/remote";
|
||||
subvolume = "to_backup";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
main =
|
||||
{ ... }:
|
||||
{
|
||||
security.sudo.enable = false;
|
||||
security.doas.enable = true;
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
passwordAuthentication = false;
|
||||
kbdInteractiveAuthentication = false;
|
||||
};
|
||||
services.btrbk = {
|
||||
extraPackages = [ pkgs.lz4 ];
|
||||
sshAccess = [
|
||||
{
|
||||
key = publicKey;
|
||||
roles = [
|
||||
"source"
|
||||
"send"
|
||||
"info"
|
||||
"delete"
|
||||
];
|
||||
}
|
||||
];
|
||||
instances = {
|
||||
local = {
|
||||
onCalendar = "minutely";
|
||||
settings = {
|
||||
volume = {
|
||||
"/mnt" = {
|
||||
snapshot_dir = "btrbk/local";
|
||||
subvolume = "to_backup";
|
||||
};
|
||||
main =
|
||||
{ ... }:
|
||||
{
|
||||
security.sudo.enable = false;
|
||||
security.doas.enable = true;
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
passwordAuthentication = false;
|
||||
kbdInteractiveAuthentication = false;
|
||||
};
|
||||
services.btrbk = {
|
||||
extraPackages = [ pkgs.lz4 ];
|
||||
sshAccess = [
|
||||
{
|
||||
key = publicKey;
|
||||
roles = [
|
||||
"source"
|
||||
"send"
|
||||
"info"
|
||||
"delete"
|
||||
];
|
||||
}
|
||||
];
|
||||
instances = {
|
||||
local = {
|
||||
onCalendar = "minutely";
|
||||
settings = {
|
||||
volume = {
|
||||
"/mnt" = {
|
||||
snapshot_dir = "btrbk/local";
|
||||
subvolume = "to_backup";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
# create btrfs partition at /mnt
|
||||
for machine in (archive, main):
|
||||
machine.succeed("dd if=/dev/zero of=/data_fs bs=120M count=1")
|
||||
machine.succeed("mkfs.btrfs /data_fs")
|
||||
machine.succeed("mkdir /mnt")
|
||||
machine.succeed("mount /data_fs /mnt")
|
||||
# create btrfs partition at /mnt
|
||||
for machine in (archive, main):
|
||||
machine.succeed("dd if=/dev/zero of=/data_fs bs=120M count=1")
|
||||
machine.succeed("mkfs.btrfs /data_fs")
|
||||
machine.succeed("mkdir /mnt")
|
||||
machine.succeed("mount /data_fs /mnt")
|
||||
|
||||
# what to backup and where
|
||||
main.succeed("btrfs subvolume create /mnt/to_backup")
|
||||
main.succeed("mkdir -p /mnt/btrbk/{local,remote}")
|
||||
# what to backup and where
|
||||
main.succeed("btrfs subvolume create /mnt/to_backup")
|
||||
main.succeed("mkdir -p /mnt/btrbk/{local,remote}")
|
||||
|
||||
# check that local snapshots work
|
||||
with subtest("local"):
|
||||
main.succeed("echo foo > /mnt/to_backup/bar")
|
||||
main.wait_until_succeeds("cat /mnt/btrbk/local/*/bar | grep foo")
|
||||
main.succeed("echo bar > /mnt/to_backup/bar")
|
||||
main.succeed("cat /mnt/btrbk/local/*/bar | grep foo")
|
||||
# check that local snapshots work
|
||||
with subtest("local"):
|
||||
main.succeed("echo foo > /mnt/to_backup/bar")
|
||||
main.wait_until_succeeds("cat /mnt/btrbk/local/*/bar | grep foo")
|
||||
main.succeed("echo bar > /mnt/to_backup/bar")
|
||||
main.succeed("cat /mnt/btrbk/local/*/bar | grep foo")
|
||||
|
||||
# check that btrfs send/receive works and ssh access works
|
||||
with subtest("remote"):
|
||||
archive.wait_until_succeeds("cat /mnt/*/bar | grep bar")
|
||||
main.succeed("echo baz > /mnt/to_backup/bar")
|
||||
archive.succeed("cat /mnt/*/bar | grep bar")
|
||||
'';
|
||||
}
|
||||
)
|
||||
# check that btrfs send/receive works and ssh access works
|
||||
with subtest("remote"):
|
||||
archive.wait_until_succeeds("cat /mnt/*/bar | grep bar")
|
||||
main.succeed("echo baz > /mnt/to_backup/bar")
|
||||
archive.succeed("cat /mnt/*/bar | grep bar")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,41 +1,39 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
name = "btrbk-no-timer";
|
||||
meta.maintainers = with lib.maintainers; [ oxalica ];
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
name = "btrbk-no-timer";
|
||||
meta.maintainers = with lib.maintainers; [ oxalica ];
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
services.btrbk.instances.local = {
|
||||
onCalendar = null;
|
||||
settings.volume."/mnt" = {
|
||||
snapshot_dir = "btrbk/local";
|
||||
subvolume = "to_backup";
|
||||
};
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
services.btrbk.instances.local = {
|
||||
onCalendar = null;
|
||||
settings.volume."/mnt" = {
|
||||
snapshot_dir = "btrbk/local";
|
||||
subvolume = "to_backup";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
# Create btrfs partition at /mnt
|
||||
machine.succeed("truncate --size=128M /data_fs")
|
||||
machine.succeed("mkfs.btrfs /data_fs")
|
||||
machine.succeed("mkdir /mnt")
|
||||
machine.succeed("mount /data_fs /mnt")
|
||||
machine.succeed("btrfs subvolume create /mnt/to_backup")
|
||||
machine.succeed("mkdir -p /mnt/btrbk/local")
|
||||
# Create btrfs partition at /mnt
|
||||
machine.succeed("truncate --size=128M /data_fs")
|
||||
machine.succeed("mkfs.btrfs /data_fs")
|
||||
machine.succeed("mkdir /mnt")
|
||||
machine.succeed("mount /data_fs /mnt")
|
||||
machine.succeed("btrfs subvolume create /mnt/to_backup")
|
||||
machine.succeed("mkdir -p /mnt/btrbk/local")
|
||||
|
||||
# The service should not have any triggering timer.
|
||||
unit = machine.get_unit_info('btrbk-local.service')
|
||||
assert "TriggeredBy" not in unit
|
||||
# The service should not have any triggering timer.
|
||||
unit = machine.get_unit_info('btrbk-local.service')
|
||||
assert "TriggeredBy" not in unit
|
||||
|
||||
# Manually starting the service should still work.
|
||||
machine.succeed("echo foo > /mnt/to_backup/bar")
|
||||
machine.start_job("btrbk-local.service")
|
||||
machine.wait_until_succeeds("cat /mnt/btrbk/local/*/bar | grep foo")
|
||||
'';
|
||||
}
|
||||
)
|
||||
# Manually starting the service should still work.
|
||||
machine.succeed("echo foo > /mnt/to_backup/bar")
|
||||
machine.start_job("btrbk-local.service")
|
||||
machine.wait_until_succeeds("cat /mnt/btrbk/local/*/bar | grep foo")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -6,56 +6,54 @@
|
|||
# order-sensitive config format.
|
||||
#
|
||||
# Issue: https://github.com/NixOS/nixpkgs/issues/195660
|
||||
import ./make-test-python.nix (
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
name = "btrbk-section-order";
|
||||
meta.maintainers = with lib.maintainers; [ oxalica ];
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
name = "btrbk-section-order";
|
||||
meta.maintainers = with lib.maintainers; [ oxalica ];
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
services.btrbk.instances.local = {
|
||||
onCalendar = null;
|
||||
settings = {
|
||||
timestamp_format = "long";
|
||||
target."ssh://global-target/".ssh_user = "root";
|
||||
volume."/btrfs" = {
|
||||
snapshot_dir = "/volume-snapshots";
|
||||
target."ssh://volume-target/".ssh_user = "root";
|
||||
subvolume."@subvolume" = {
|
||||
snapshot_dir = "/subvolume-snapshots";
|
||||
target."ssh://subvolume-target/".ssh_user = "root";
|
||||
};
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
services.btrbk.instances.local = {
|
||||
onCalendar = null;
|
||||
settings = {
|
||||
timestamp_format = "long";
|
||||
target."ssh://global-target/".ssh_user = "root";
|
||||
volume."/btrfs" = {
|
||||
snapshot_dir = "/volume-snapshots";
|
||||
target."ssh://volume-target/".ssh_user = "root";
|
||||
subvolume."@subvolume" = {
|
||||
snapshot_dir = "/subvolume-snapshots";
|
||||
target."ssh://subvolume-target/".ssh_user = "root";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import difflib
|
||||
machine.wait_for_unit("basic.target")
|
||||
got = machine.succeed("cat /etc/btrbk/local.conf").strip()
|
||||
expect = """
|
||||
backend btrfs-progs-sudo
|
||||
stream_compress no
|
||||
timestamp_format long
|
||||
target ssh://global-target/
|
||||
testScript = ''
|
||||
import difflib
|
||||
machine.wait_for_unit("basic.target")
|
||||
got = machine.succeed("cat /etc/btrbk/local.conf").strip()
|
||||
expect = """
|
||||
backend btrfs-progs-sudo
|
||||
stream_compress no
|
||||
timestamp_format long
|
||||
target ssh://global-target/
|
||||
ssh_user root
|
||||
volume /btrfs
|
||||
snapshot_dir /volume-snapshots
|
||||
target ssh://volume-target/
|
||||
ssh_user root
|
||||
subvolume @subvolume
|
||||
snapshot_dir /subvolume-snapshots
|
||||
target ssh://subvolume-target/
|
||||
ssh_user root
|
||||
volume /btrfs
|
||||
snapshot_dir /volume-snapshots
|
||||
target ssh://volume-target/
|
||||
ssh_user root
|
||||
subvolume @subvolume
|
||||
snapshot_dir /subvolume-snapshots
|
||||
target ssh://subvolume-target/
|
||||
ssh_user root
|
||||
""".strip()
|
||||
print(got)
|
||||
if got != expect:
|
||||
diff = difflib.unified_diff(expect.splitlines(keepends=True), got.splitlines(keepends=True), fromfile="expected", tofile="got")
|
||||
print("".join(diff))
|
||||
assert got == expect
|
||||
'';
|
||||
}
|
||||
)
|
||||
""".strip()
|
||||
print(got)
|
||||
if got != expect:
|
||||
diff = difflib.unified_diff(expect.splitlines(keepends=True), got.splitlines(keepends=True), fromfile="expected", tofile="got")
|
||||
print("".join(diff))
|
||||
assert got == expect
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,122 +1,120 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{ pkgs, ... }:
|
||||
|
||||
let
|
||||
privateKey = ''
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||
QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe
|
||||
RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw
|
||||
AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg
|
||||
9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ=
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
'';
|
||||
publicKey = ''
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv
|
||||
'';
|
||||
in
|
||||
{
|
||||
name = "btrbk";
|
||||
meta = with pkgs.lib; {
|
||||
maintainers = with maintainers; [ symphorien ];
|
||||
};
|
||||
let
|
||||
privateKey = ''
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||
QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe
|
||||
RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw
|
||||
AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg
|
||||
9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ=
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
'';
|
||||
publicKey = ''
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv
|
||||
'';
|
||||
in
|
||||
{
|
||||
name = "btrbk";
|
||||
meta = with pkgs.lib; {
|
||||
maintainers = with maintainers; [ symphorien ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
archive =
|
||||
{ ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
# note: this makes the privateKey world readable.
|
||||
# don't do it with real ssh keys.
|
||||
environment.etc."btrbk_key".text = privateKey;
|
||||
services.btrbk = {
|
||||
instances = {
|
||||
remote = {
|
||||
onCalendar = "minutely";
|
||||
settings = {
|
||||
ssh_identity = "/etc/btrbk_key";
|
||||
ssh_user = "btrbk";
|
||||
stream_compress = "lz4";
|
||||
volume = {
|
||||
"ssh://main/mnt" = {
|
||||
target = "/mnt";
|
||||
snapshot_dir = "btrbk/remote";
|
||||
subvolume = "to_backup";
|
||||
};
|
||||
nodes = {
|
||||
archive =
|
||||
{ ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
# note: this makes the privateKey world readable.
|
||||
# don't do it with real ssh keys.
|
||||
environment.etc."btrbk_key".text = privateKey;
|
||||
services.btrbk = {
|
||||
instances = {
|
||||
remote = {
|
||||
onCalendar = "minutely";
|
||||
settings = {
|
||||
ssh_identity = "/etc/btrbk_key";
|
||||
ssh_user = "btrbk";
|
||||
stream_compress = "lz4";
|
||||
volume = {
|
||||
"ssh://main/mnt" = {
|
||||
target = "/mnt";
|
||||
snapshot_dir = "btrbk/remote";
|
||||
subvolume = "to_backup";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
main =
|
||||
{ ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
KbdInteractiveAuthentication = false;
|
||||
PasswordAuthentication = false;
|
||||
};
|
||||
main =
|
||||
{ ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
KbdInteractiveAuthentication = false;
|
||||
PasswordAuthentication = false;
|
||||
};
|
||||
services.btrbk = {
|
||||
extraPackages = [ pkgs.lz4 ];
|
||||
sshAccess = [
|
||||
{
|
||||
key = publicKey;
|
||||
roles = [
|
||||
"source"
|
||||
"send"
|
||||
"info"
|
||||
"delete"
|
||||
];
|
||||
}
|
||||
];
|
||||
instances = {
|
||||
local = {
|
||||
onCalendar = "minutely";
|
||||
settings = {
|
||||
volume = {
|
||||
"/mnt" = {
|
||||
snapshot_dir = "btrbk/local";
|
||||
subvolume = "to_backup";
|
||||
};
|
||||
};
|
||||
services.btrbk = {
|
||||
extraPackages = [ pkgs.lz4 ];
|
||||
sshAccess = [
|
||||
{
|
||||
key = publicKey;
|
||||
roles = [
|
||||
"source"
|
||||
"send"
|
||||
"info"
|
||||
"delete"
|
||||
];
|
||||
}
|
||||
];
|
||||
instances = {
|
||||
local = {
|
||||
onCalendar = "minutely";
|
||||
settings = {
|
||||
volume = {
|
||||
"/mnt" = {
|
||||
snapshot_dir = "btrbk/local";
|
||||
subvolume = "to_backup";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
# create btrfs partition at /mnt
|
||||
for machine in (archive, main):
|
||||
machine.succeed("dd if=/dev/zero of=/data_fs bs=120M count=1")
|
||||
machine.succeed("mkfs.btrfs /data_fs")
|
||||
machine.succeed("mkdir /mnt")
|
||||
machine.succeed("mount /data_fs /mnt")
|
||||
# create btrfs partition at /mnt
|
||||
for machine in (archive, main):
|
||||
machine.succeed("dd if=/dev/zero of=/data_fs bs=120M count=1")
|
||||
machine.succeed("mkfs.btrfs /data_fs")
|
||||
machine.succeed("mkdir /mnt")
|
||||
machine.succeed("mount /data_fs /mnt")
|
||||
|
||||
# what to backup and where
|
||||
main.succeed("btrfs subvolume create /mnt/to_backup")
|
||||
main.succeed("mkdir -p /mnt/btrbk/{local,remote}")
|
||||
# what to backup and where
|
||||
main.succeed("btrfs subvolume create /mnt/to_backup")
|
||||
main.succeed("mkdir -p /mnt/btrbk/{local,remote}")
|
||||
|
||||
# check that local snapshots work
|
||||
with subtest("local"):
|
||||
main.succeed("echo foo > /mnt/to_backup/bar")
|
||||
main.wait_until_succeeds("cat /mnt/btrbk/local/*/bar | grep foo")
|
||||
main.succeed("echo bar > /mnt/to_backup/bar")
|
||||
main.succeed("cat /mnt/btrbk/local/*/bar | grep foo")
|
||||
# check that local snapshots work
|
||||
with subtest("local"):
|
||||
main.succeed("echo foo > /mnt/to_backup/bar")
|
||||
main.wait_until_succeeds("cat /mnt/btrbk/local/*/bar | grep foo")
|
||||
main.succeed("echo bar > /mnt/to_backup/bar")
|
||||
main.succeed("cat /mnt/btrbk/local/*/bar | grep foo")
|
||||
|
||||
# check that btrfs send/receive works and ssh access works
|
||||
with subtest("remote"):
|
||||
archive.wait_until_succeeds("cat /mnt/*/bar | grep bar")
|
||||
main.succeed("echo baz > /mnt/to_backup/bar")
|
||||
archive.succeed("cat /mnt/*/bar | grep bar")
|
||||
'';
|
||||
}
|
||||
)
|
||||
# check that btrfs send/receive works and ssh access works
|
||||
with subtest("remote"):
|
||||
archive.wait_until_succeeds("cat /mnt/*/bar | grep bar")
|
||||
main.succeed("echo baz > /mnt/to_backup/bar")
|
||||
archive.succeed("cat /mnt/*/bar | grep bar")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,104 +1,102 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "budgie";
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "budgie";
|
||||
|
||||
meta.maintainers = lib.teams.budgie.members;
|
||||
meta.maintainers = lib.teams.budgie.members;
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
./common/user-account.nix
|
||||
];
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
./common/user-account.nix
|
||||
];
|
||||
|
||||
services.xserver.enable = true;
|
||||
services.xserver.enable = true;
|
||||
|
||||
services.xserver.displayManager = {
|
||||
lightdm.enable = true;
|
||||
autoLogin = {
|
||||
enable = true;
|
||||
user = "alice";
|
||||
};
|
||||
};
|
||||
|
||||
# We don't ship gnome-text-editor in Budgie module, we add this line mainly
|
||||
# to catch eval issues related to this option.
|
||||
environment.budgie.excludePackages = [ pkgs.gnome-text-editor ];
|
||||
|
||||
services.xserver.desktopManager.budgie = {
|
||||
services.xserver.displayManager = {
|
||||
lightdm.enable = true;
|
||||
autoLogin = {
|
||||
enable = true;
|
||||
extraPlugins = [
|
||||
pkgs.budgie-analogue-clock-applet
|
||||
];
|
||||
user = "alice";
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
user = nodes.machine.users.users.alice;
|
||||
env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus DISPLAY=:0";
|
||||
su = command: "su - ${user.name} -c '${env} ${command}'";
|
||||
in
|
||||
''
|
||||
with subtest("Wait for login"):
|
||||
# wait_for_x() checks graphical-session.target, which is expected to be
|
||||
# inactive on Budgie before Budgie manages user session with systemd.
|
||||
# https://github.com/BuddiesOfBudgie/budgie-desktop/blob/39e9f0895c978f76/src/session/budgie-desktop.in#L16
|
||||
#
|
||||
# Previously this was unconditionally touched by xsessionWrapper but was
|
||||
# changed in #233981 (we have Budgie:GNOME in XDG_CURRENT_DESKTOP).
|
||||
# machine.wait_for_x()
|
||||
machine.wait_until_succeeds('journalctl -t budgie-session-binary --grep "Entering running state"')
|
||||
machine.wait_for_file("${user.home}/.Xauthority")
|
||||
machine.succeed("xauth merge ${user.home}/.Xauthority")
|
||||
# We don't ship gnome-text-editor in Budgie module, we add this line mainly
|
||||
# to catch eval issues related to this option.
|
||||
environment.budgie.excludePackages = [ pkgs.gnome-text-editor ];
|
||||
|
||||
with subtest("Check that logging in has given the user ownership of devices"):
|
||||
machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
|
||||
services.xserver.desktopManager.budgie = {
|
||||
enable = true;
|
||||
extraPlugins = [
|
||||
pkgs.budgie-analogue-clock-applet
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
with subtest("Check if Budgie session components actually start"):
|
||||
for i in ["budgie-daemon", "budgie-panel", "budgie-wm", "budgie-desktop-view", "gsd-media-keys"]:
|
||||
machine.wait_until_succeeds(f"pgrep -f {i}")
|
||||
# We don't check xwininfo for budgie-wm.
|
||||
# See https://github.com/NixOS/nixpkgs/pull/216737#discussion_r1155312754
|
||||
machine.wait_for_window("budgie-daemon")
|
||||
machine.wait_for_window("budgie-panel")
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
user = nodes.machine.users.users.alice;
|
||||
env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus DISPLAY=:0";
|
||||
su = command: "su - ${user.name} -c '${env} ${command}'";
|
||||
in
|
||||
''
|
||||
with subtest("Wait for login"):
|
||||
# wait_for_x() checks graphical-session.target, which is expected to be
|
||||
# inactive on Budgie before Budgie manages user session with systemd.
|
||||
# https://github.com/BuddiesOfBudgie/budgie-desktop/blob/39e9f0895c978f76/src/session/budgie-desktop.in#L16
|
||||
#
|
||||
# Previously this was unconditionally touched by xsessionWrapper but was
|
||||
# changed in #233981 (we have Budgie:GNOME in XDG_CURRENT_DESKTOP).
|
||||
# machine.wait_for_x()
|
||||
machine.wait_until_succeeds('journalctl -t budgie-session-binary --grep "Entering running state"')
|
||||
machine.wait_for_file("${user.home}/.Xauthority")
|
||||
machine.succeed("xauth merge ${user.home}/.Xauthority")
|
||||
|
||||
with subtest("Check if various environment variables are set"):
|
||||
cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf /run/current-system/sw/bin/budgie-wm)/environ"
|
||||
machine.succeed(f"{cmd} | grep 'XDG_CURRENT_DESKTOP' | grep 'Budgie:GNOME'")
|
||||
machine.succeed(f"{cmd} | grep 'BUDGIE_PLUGIN_DATADIR' | grep '${pkgs.budgie-desktop-with-plugins.pname}'")
|
||||
with subtest("Check that logging in has given the user ownership of devices"):
|
||||
machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
|
||||
|
||||
with subtest("Open run dialog"):
|
||||
machine.send_key("alt-f2")
|
||||
machine.wait_for_window("budgie-run-dialog")
|
||||
machine.sleep(2)
|
||||
machine.screenshot("run_dialog")
|
||||
machine.send_key("esc")
|
||||
with subtest("Check if Budgie session components actually start"):
|
||||
for i in ["budgie-daemon", "budgie-panel", "budgie-wm", "budgie-desktop-view", "gsd-media-keys"]:
|
||||
machine.wait_until_succeeds(f"pgrep -f {i}")
|
||||
# We don't check xwininfo for budgie-wm.
|
||||
# See https://github.com/NixOS/nixpkgs/pull/216737#discussion_r1155312754
|
||||
machine.wait_for_window("budgie-daemon")
|
||||
machine.wait_for_window("budgie-panel")
|
||||
|
||||
with subtest("Open Budgie Control Center"):
|
||||
machine.succeed("${su "budgie-control-center >&2 &"}")
|
||||
machine.wait_for_window("Budgie Control Center")
|
||||
with subtest("Check if various environment variables are set"):
|
||||
cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf /run/current-system/sw/bin/budgie-wm)/environ"
|
||||
machine.succeed(f"{cmd} | grep 'XDG_CURRENT_DESKTOP' | grep 'Budgie:GNOME'")
|
||||
machine.succeed(f"{cmd} | grep 'BUDGIE_PLUGIN_DATADIR' | grep '${pkgs.budgie-desktop-with-plugins.pname}'")
|
||||
|
||||
with subtest("Lock the screen"):
|
||||
machine.succeed("${su "budgie-screensaver-command -l >&2 &"}")
|
||||
machine.wait_until_succeeds("${su "budgie-screensaver-command -q"} | grep 'The screensaver is active'")
|
||||
machine.sleep(2)
|
||||
machine.send_chars("${user.password}", delay=0.5)
|
||||
machine.screenshot("budgie_screensaver")
|
||||
machine.send_chars("\n")
|
||||
machine.wait_until_succeeds("${su "budgie-screensaver-command -q"} | grep 'The screensaver is inactive'")
|
||||
machine.sleep(2)
|
||||
with subtest("Open run dialog"):
|
||||
machine.send_key("alt-f2")
|
||||
machine.wait_for_window("budgie-run-dialog")
|
||||
machine.sleep(2)
|
||||
machine.screenshot("run_dialog")
|
||||
machine.send_key("esc")
|
||||
|
||||
with subtest("Open GNOME terminal"):
|
||||
machine.succeed("${su "gnome-terminal"}")
|
||||
machine.wait_for_window("${user.name}@machine: ~")
|
||||
with subtest("Open Budgie Control Center"):
|
||||
machine.succeed("${su "budgie-control-center >&2 &"}")
|
||||
machine.wait_for_window("Budgie Control Center")
|
||||
|
||||
with subtest("Check if Budgie has ever coredumped"):
|
||||
machine.fail("coredumpctl --json=short | grep budgie")
|
||||
machine.sleep(10)
|
||||
machine.screenshot("screen")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("Lock the screen"):
|
||||
machine.succeed("${su "budgie-screensaver-command -l >&2 &"}")
|
||||
machine.wait_until_succeeds("${su "budgie-screensaver-command -q"} | grep 'The screensaver is active'")
|
||||
machine.sleep(2)
|
||||
machine.send_chars("${user.password}", delay=0.5)
|
||||
machine.screenshot("budgie_screensaver")
|
||||
machine.send_chars("\n")
|
||||
machine.wait_until_succeeds("${su "budgie-screensaver-command -q"} | grep 'The screensaver is inactive'")
|
||||
machine.sleep(2)
|
||||
|
||||
with subtest("Open GNOME terminal"):
|
||||
machine.succeed("${su "gnome-terminal"}")
|
||||
machine.wait_for_window("${user.name}@machine: ~")
|
||||
|
||||
with subtest("Check if Budgie has ever coredumped"):
|
||||
machine.fail("coredumpctl --json=short | grep budgie")
|
||||
machine.sleep(10)
|
||||
machine.screenshot("screen")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,33 +1,31 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, pkgs, ... }:
|
||||
{ lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
name = "buildkite-agent";
|
||||
meta.maintainers = with lib.maintainers; [ flokli ];
|
||||
{
|
||||
name = "buildkite-agent";
|
||||
meta.maintainers = with lib.maintainers; [ flokli ];
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.buildkite-agents = {
|
||||
one = {
|
||||
privateSshKeyPath = (import ./ssh-keys.nix pkgs).snakeOilPrivateKey;
|
||||
tokenPath = (pkgs.writeText "my-token" "5678");
|
||||
};
|
||||
two = {
|
||||
tokenPath = (pkgs.writeText "my-token" "1234");
|
||||
};
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.buildkite-agents = {
|
||||
one = {
|
||||
privateSshKeyPath = (import ./ssh-keys.nix pkgs).snakeOilPrivateKey;
|
||||
tokenPath = (pkgs.writeText "my-token" "5678");
|
||||
};
|
||||
two = {
|
||||
tokenPath = (pkgs.writeText "my-token" "1234");
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
# we can't wait on the unit to start up, as we obviously can't connect to buildkite,
|
||||
# but we can look whether files are set up correctly
|
||||
testScript = ''
|
||||
start_all()
|
||||
# we can't wait on the unit to start up, as we obviously can't connect to buildkite,
|
||||
# but we can look whether files are set up correctly
|
||||
|
||||
machine.wait_for_file("/var/lib/buildkite-agent-one/buildkite-agent.cfg")
|
||||
machine.wait_for_file("/var/lib/buildkite-agent-one/.ssh/id_rsa")
|
||||
machine.wait_for_file("/var/lib/buildkite-agent-one/buildkite-agent.cfg")
|
||||
machine.wait_for_file("/var/lib/buildkite-agent-one/.ssh/id_rsa")
|
||||
|
||||
machine.wait_for_file("/var/lib/buildkite-agent-two/buildkite-agent.cfg")
|
||||
'';
|
||||
}
|
||||
)
|
||||
machine.wait_for_file("/var/lib/buildkite-agent-two/buildkite-agent.cfg")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,87 +1,85 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "c2FmZQ";
|
||||
meta.maintainers = with lib.maintainers; [ hmenke ];
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "c2FmZQ";
|
||||
meta.maintainers = with lib.maintainers; [ hmenke ];
|
||||
|
||||
nodes.machine = {
|
||||
services.c2fmzq-server = {
|
||||
enable = true;
|
||||
port = 8080;
|
||||
passphraseFile = builtins.toFile "pwfile" "hunter2"; # don't do this on real deployments
|
||||
settings = {
|
||||
verbose = 3; # debug
|
||||
# make sure multiple freeform options evaluate
|
||||
allow-new-accounts = true;
|
||||
auto-approve-new-accounts = true;
|
||||
licenses = false;
|
||||
};
|
||||
};
|
||||
environment = {
|
||||
sessionVariables = {
|
||||
C2FMZQ_PASSPHRASE = "lol";
|
||||
C2FMZQ_API_SERVER = "http://localhost:8080";
|
||||
};
|
||||
systemPackages = [
|
||||
pkgs.c2fmzq
|
||||
(pkgs.writeScriptBin "c2FmZQ-client-wrapper" ''
|
||||
#!${pkgs.expect}/bin/expect -f
|
||||
spawn c2FmZQ-client {*}$argv
|
||||
expect {
|
||||
"Enter password:" { send "$env(PASSWORD)\r" }
|
||||
"Type YES to confirm:" { send "YES\r" }
|
||||
timeout { exit 1 }
|
||||
eof { exit 0 }
|
||||
}
|
||||
interact
|
||||
'')
|
||||
];
|
||||
nodes.machine = {
|
||||
services.c2fmzq-server = {
|
||||
enable = true;
|
||||
port = 8080;
|
||||
passphraseFile = builtins.toFile "pwfile" "hunter2"; # don't do this on real deployments
|
||||
settings = {
|
||||
verbose = 3; # debug
|
||||
# make sure multiple freeform options evaluate
|
||||
allow-new-accounts = true;
|
||||
auto-approve-new-accounts = true;
|
||||
licenses = false;
|
||||
};
|
||||
};
|
||||
environment = {
|
||||
sessionVariables = {
|
||||
C2FMZQ_PASSPHRASE = "lol";
|
||||
C2FMZQ_API_SERVER = "http://localhost:8080";
|
||||
};
|
||||
systemPackages = [
|
||||
pkgs.c2fmzq
|
||||
(pkgs.writeScriptBin "c2FmZQ-client-wrapper" ''
|
||||
#!${pkgs.expect}/bin/expect -f
|
||||
spawn c2FmZQ-client {*}$argv
|
||||
expect {
|
||||
"Enter password:" { send "$env(PASSWORD)\r" }
|
||||
"Type YES to confirm:" { send "YES\r" }
|
||||
timeout { exit 1 }
|
||||
eof { exit 0 }
|
||||
}
|
||||
interact
|
||||
'')
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
machine.start()
|
||||
machine.wait_for_unit("c2fmzq-server.service")
|
||||
machine.wait_for_open_port(8080)
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
machine.start()
|
||||
machine.wait_for_unit("c2fmzq-server.service")
|
||||
machine.wait_for_open_port(8080)
|
||||
|
||||
with subtest("Create accounts for alice and bob"):
|
||||
machine.succeed("PASSWORD=foobar c2FmZQ-client-wrapper -- -v 3 create-account alice@example.com")
|
||||
machine.succeed("PASSWORD=fizzbuzz c2FmZQ-client-wrapper -- -v 3 create-account bob@example.com")
|
||||
with subtest("Create accounts for alice and bob"):
|
||||
machine.succeed("PASSWORD=foobar c2FmZQ-client-wrapper -- -v 3 create-account alice@example.com")
|
||||
machine.succeed("PASSWORD=fizzbuzz c2FmZQ-client-wrapper -- -v 3 create-account bob@example.com")
|
||||
|
||||
with subtest("Log in as alice"):
|
||||
machine.succeed("PASSWORD=foobar c2FmZQ-client-wrapper -- -v 3 login alice@example.com")
|
||||
msg = machine.succeed("c2FmZQ-client -v 3 status")
|
||||
assert "Logged in as alice@example.com" in msg, f"ERROR: Not logged in as alice:\n{msg}"
|
||||
with subtest("Log in as alice"):
|
||||
machine.succeed("PASSWORD=foobar c2FmZQ-client-wrapper -- -v 3 login alice@example.com")
|
||||
msg = machine.succeed("c2FmZQ-client -v 3 status")
|
||||
assert "Logged in as alice@example.com" in msg, f"ERROR: Not logged in as alice:\n{msg}"
|
||||
|
||||
with subtest("Create a new album, upload a file, and delete the uploaded file"):
|
||||
machine.succeed("c2FmZQ-client -v 3 create-album 'Rarest Memes'")
|
||||
machine.succeed("echo 'pls do not steal' > meme.txt")
|
||||
machine.succeed("c2FmZQ-client -v 3 import meme.txt 'Rarest Memes'")
|
||||
machine.succeed("c2FmZQ-client -v 3 sync")
|
||||
machine.succeed("rm meme.txt")
|
||||
with subtest("Create a new album, upload a file, and delete the uploaded file"):
|
||||
machine.succeed("c2FmZQ-client -v 3 create-album 'Rarest Memes'")
|
||||
machine.succeed("echo 'pls do not steal' > meme.txt")
|
||||
machine.succeed("c2FmZQ-client -v 3 import meme.txt 'Rarest Memes'")
|
||||
machine.succeed("c2FmZQ-client -v 3 sync")
|
||||
machine.succeed("rm meme.txt")
|
||||
|
||||
with subtest("Share the album with bob"):
|
||||
machine.succeed("c2FmZQ-client-wrapper -- -v 3 share 'Rarest Memes' bob@example.com")
|
||||
with subtest("Share the album with bob"):
|
||||
machine.succeed("c2FmZQ-client-wrapper -- -v 3 share 'Rarest Memes' bob@example.com")
|
||||
|
||||
with subtest("Log in as bob"):
|
||||
machine.succeed("PASSWORD=fizzbuzz c2FmZQ-client-wrapper -- -v 3 login bob@example.com")
|
||||
msg = machine.succeed("c2FmZQ-client -v 3 status")
|
||||
assert "Logged in as bob@example.com" in msg, f"ERROR: Not logged in as bob:\n{msg}"
|
||||
with subtest("Log in as bob"):
|
||||
machine.succeed("PASSWORD=fizzbuzz c2FmZQ-client-wrapper -- -v 3 login bob@example.com")
|
||||
msg = machine.succeed("c2FmZQ-client -v 3 status")
|
||||
assert "Logged in as bob@example.com" in msg, f"ERROR: Not logged in as bob:\n{msg}"
|
||||
|
||||
with subtest("Download the shared file"):
|
||||
machine.succeed("c2FmZQ-client -v 3 download 'shared/Rarest Memes/meme.txt'")
|
||||
machine.succeed("c2FmZQ-client -v 3 export 'shared/Rarest Memes/meme.txt' .")
|
||||
msg = machine.succeed("cat meme.txt")
|
||||
assert "pls do not steal\n" == msg, f"File content is not the same:\n{msg}"
|
||||
with subtest("Download the shared file"):
|
||||
machine.succeed("c2FmZQ-client -v 3 download 'shared/Rarest Memes/meme.txt'")
|
||||
machine.succeed("c2FmZQ-client -v 3 export 'shared/Rarest Memes/meme.txt' .")
|
||||
msg = machine.succeed("cat meme.txt")
|
||||
assert "pls do not steal\n" == msg, f"File content is not the same:\n{msg}"
|
||||
|
||||
with subtest("Test that PWA is served"):
|
||||
msg = machine.succeed("curl -sSfL http://localhost:8080")
|
||||
assert "c2FmZQ" in msg, f"Could not find 'c2FmZQ' in the output:\n{msg}"
|
||||
with subtest("Test that PWA is served"):
|
||||
msg = machine.succeed("curl -sSfL http://localhost:8080")
|
||||
assert "c2FmZQ" in msg, f"Could not find 'c2FmZQ' in the output:\n{msg}"
|
||||
|
||||
with subtest("A setting with false value is properly passed"):
|
||||
machine.succeed("systemctl show -p ExecStart --value c2fmzq-server.service | grep -F -- '--licenses=false'");
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("A setting with false value is properly passed"):
|
||||
machine.succeed("systemctl show -p ExecStart --value c2fmzq-server.service | grep -F -- '--licenses=false'");
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,44 +1,42 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{ pkgs, ... }:
|
||||
|
||||
{
|
||||
name = "cage";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ matthewbauer ];
|
||||
};
|
||||
{
|
||||
name = "cage";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ matthewbauer ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
|
||||
fonts.packages = with pkgs; [ dejavu_fonts ];
|
||||
fonts.packages = with pkgs; [ dejavu_fonts ];
|
||||
|
||||
services.cage = {
|
||||
enable = true;
|
||||
user = "alice";
|
||||
program = "${pkgs.xterm}/bin/xterm";
|
||||
};
|
||||
|
||||
# Need to switch to a different GPU driver than the default one (-vga std) so that Cage can launch:
|
||||
virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ];
|
||||
services.cage = {
|
||||
enable = true;
|
||||
user = "alice";
|
||||
program = "${pkgs.xterm}/bin/xterm";
|
||||
};
|
||||
|
||||
enableOCR = true;
|
||||
# Need to switch to a different GPU driver than the default one (-vga std) so that Cage can launch:
|
||||
virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ];
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
user = nodes.machine.config.users.users.alice;
|
||||
in
|
||||
''
|
||||
with subtest("Wait for cage to boot up"):
|
||||
start_all()
|
||||
machine.wait_for_file("/run/user/${toString user.uid}/wayland-0.lock")
|
||||
machine.wait_until_succeeds("pgrep xterm")
|
||||
machine.wait_for_text("alice@machine")
|
||||
machine.screenshot("screen")
|
||||
'';
|
||||
}
|
||||
)
|
||||
enableOCR = true;
|
||||
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
user = nodes.machine.config.users.users.alice;
|
||||
in
|
||||
''
|
||||
with subtest("Wait for cage to boot up"):
|
||||
start_all()
|
||||
machine.wait_for_file("/run/user/${toString user.uid}/wayland-0.lock")
|
||||
machine.wait_until_succeeds("pgrep xterm")
|
||||
machine.wait_for_text("alice@machine")
|
||||
machine.screenshot("screen")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,72 +1,70 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{ pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cagebreakConfigfile = pkgs.writeText "config" ''
|
||||
workspaces 1
|
||||
escape C-t
|
||||
bind t exec env DISPLAY=:0 ${pkgs.xterm}/bin/xterm -cm -pc
|
||||
'';
|
||||
in
|
||||
{
|
||||
name = "cagebreak";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ berbiche ];
|
||||
let
|
||||
cagebreakConfigfile = pkgs.writeText "config" ''
|
||||
workspaces 1
|
||||
escape C-t
|
||||
bind t exec env DISPLAY=:0 ${pkgs.xterm}/bin/xterm -cm -pc
|
||||
'';
|
||||
in
|
||||
{
|
||||
name = "cagebreak";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ berbiche ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ config, ... }:
|
||||
{
|
||||
# Automatically login on tty1 as a normal user:
|
||||
imports = [ ./common/user-account.nix ];
|
||||
services.getty.autologinUser = "alice";
|
||||
programs.bash.loginShellInit = ''
|
||||
if [ "$(tty)" = "/dev/tty1" ]; then
|
||||
set -e
|
||||
|
||||
mkdir -p ~/.config/cagebreak
|
||||
cp -f ${cagebreakConfigfile} ~/.config/cagebreak/config
|
||||
|
||||
cagebreak
|
||||
fi
|
||||
'';
|
||||
|
||||
hardware.graphics.enable = true;
|
||||
programs.xwayland.enable = true;
|
||||
security.polkit.enable = true;
|
||||
environment.systemPackages = [
|
||||
pkgs.cagebreak
|
||||
pkgs.wayland-utils
|
||||
];
|
||||
|
||||
# Need to switch to a different GPU driver than the default one (-vga std) so that Cagebreak can launch:
|
||||
virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ config, ... }:
|
||||
{
|
||||
# Automatically login on tty1 as a normal user:
|
||||
imports = [ ./common/user-account.nix ];
|
||||
services.getty.autologinUser = "alice";
|
||||
programs.bash.loginShellInit = ''
|
||||
if [ "$(tty)" = "/dev/tty1" ]; then
|
||||
set -e
|
||||
enableOCR = true;
|
||||
|
||||
mkdir -p ~/.config/cagebreak
|
||||
cp -f ${cagebreakConfigfile} ~/.config/cagebreak/config
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
user = nodes.machine.config.users.users.alice;
|
||||
XDG_RUNTIME_DIR = "/run/user/${toString user.uid}";
|
||||
in
|
||||
''
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.wait_for_file("${XDG_RUNTIME_DIR}/wayland-0")
|
||||
|
||||
cagebreak
|
||||
fi
|
||||
'';
|
||||
with subtest("ensure wayland works with wayinfo from wallutils"):
|
||||
print(machine.succeed("env XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR} wayland-info"))
|
||||
|
||||
hardware.graphics.enable = true;
|
||||
programs.xwayland.enable = true;
|
||||
security.polkit.enable = true;
|
||||
environment.systemPackages = [
|
||||
pkgs.cagebreak
|
||||
pkgs.wayland-utils
|
||||
];
|
||||
|
||||
# Need to switch to a different GPU driver than the default one (-vga std) so that Cagebreak can launch:
|
||||
virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ];
|
||||
};
|
||||
|
||||
enableOCR = true;
|
||||
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
user = nodes.machine.config.users.users.alice;
|
||||
XDG_RUNTIME_DIR = "/run/user/${toString user.uid}";
|
||||
in
|
||||
''
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.wait_for_file("${XDG_RUNTIME_DIR}/wayland-0")
|
||||
|
||||
with subtest("ensure wayland works with wayinfo from wallutils"):
|
||||
print(machine.succeed("env XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR} wayland-info"))
|
||||
|
||||
# TODO: Fix the XWayland test (log the cagebreak output to debug):
|
||||
# with subtest("ensure xwayland works with xterm"):
|
||||
# machine.send_key("ctrl-t")
|
||||
# machine.send_key("t")
|
||||
# machine.wait_until_succeeds("pgrep xterm")
|
||||
# machine.wait_for_text("${user.name}@machine")
|
||||
# machine.screenshot("screen")
|
||||
# machine.send_key("ctrl-d")
|
||||
'';
|
||||
}
|
||||
)
|
||||
# TODO: Fix the XWayland test (log the cagebreak output to debug):
|
||||
# with subtest("ensure xwayland works with xterm"):
|
||||
# machine.send_key("ctrl-t")
|
||||
# machine.send_key("t")
|
||||
# machine.wait_until_succeeds("pgrep xterm")
|
||||
# machine.wait_for_text("${user.name}@machine")
|
||||
# machine.screenshot("screen")
|
||||
# machine.send_key("ctrl-d")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,62 +1,60 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
certs = import ./common/acme/server/snakeoil-certs.nix;
|
||||
inherit (certs) domain;
|
||||
in
|
||||
{
|
||||
name = "canaille";
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ erictapen ];
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
certs = import ./common/acme/server/snakeoil-certs.nix;
|
||||
inherit (certs) domain;
|
||||
in
|
||||
{
|
||||
name = "canaille";
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ erictapen ];
|
||||
|
||||
nodes.server =
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
services.canaille = {
|
||||
enable = true;
|
||||
secretKeyFile = pkgs.writeText "canaille-secret-key" ''
|
||||
this is not a secret key
|
||||
'';
|
||||
settings = {
|
||||
SERVER_NAME = domain;
|
||||
};
|
||||
nodes.server =
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
services.canaille = {
|
||||
enable = true;
|
||||
secretKeyFile = pkgs.writeText "canaille-secret-key" ''
|
||||
this is not a secret key
|
||||
'';
|
||||
settings = {
|
||||
SERVER_NAME = domain;
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."${domain}" = {
|
||||
enableACME = lib.mkForce false;
|
||||
sslCertificate = certs."${domain}".cert;
|
||||
sslCertificateKey = certs."${domain}".key;
|
||||
};
|
||||
|
||||
networking.hosts."::1" = [ "${domain}" ];
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
443
|
||||
];
|
||||
|
||||
users.users.canaille.shell = pkgs.bashInteractive;
|
||||
|
||||
security.pki.certificateFiles = [ certs.ca.cert ];
|
||||
};
|
||||
|
||||
nodes.client =
|
||||
{ nodes, ... }:
|
||||
{
|
||||
networking.hosts."${nodes.server.networking.primaryIPAddress}" = [ "${domain}" ];
|
||||
security.pki.certificateFiles = [ certs.ca.cert ];
|
||||
services.nginx.virtualHosts."${domain}" = {
|
||||
enableACME = lib.mkForce false;
|
||||
sslCertificate = certs."${domain}".cert;
|
||||
sslCertificateKey = certs."${domain}".key;
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ ... }:
|
||||
''
|
||||
import json
|
||||
networking.hosts."::1" = [ "${domain}" ];
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
443
|
||||
];
|
||||
|
||||
start_all()
|
||||
server.wait_for_unit("canaille.socket")
|
||||
server.wait_until_succeeds("curl -f https://${domain}")
|
||||
server.succeed("sudo -iu canaille -- canaille create user --user-name admin --password adminpass --emails admin@${domain}")
|
||||
json_str = server.succeed("sudo -iu canaille -- canaille get user")
|
||||
assert json.loads(json_str)[0]["user_name"] == "admin"
|
||||
server.succeed("sudo -iu canaille -- canaille config check")
|
||||
'';
|
||||
}
|
||||
)
|
||||
users.users.canaille.shell = pkgs.bashInteractive;
|
||||
|
||||
security.pki.certificateFiles = [ certs.ca.cert ];
|
||||
};
|
||||
|
||||
nodes.client =
|
||||
{ nodes, ... }:
|
||||
{
|
||||
networking.hosts."${nodes.server.networking.primaryIPAddress}" = [ "${domain}" ];
|
||||
security.pki.certificateFiles = [ certs.ca.cert ];
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ ... }:
|
||||
''
|
||||
import json
|
||||
|
||||
start_all()
|
||||
server.wait_for_unit("canaille.socket")
|
||||
server.wait_until_succeeds("curl -f https://${domain}")
|
||||
server.succeed("sudo -iu canaille -- canaille create user --user-name admin --password adminpass --emails admin@${domain}")
|
||||
json_str = server.succeed("sudo -iu canaille -- canaille get user")
|
||||
assert json.loads(json_str)[0]["user_name"] == "admin"
|
||||
server.succeed("sudo -iu canaille -- canaille config check")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,250 +1,248 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "castopod";
|
||||
meta = with lib.maintainers; {
|
||||
maintainers = [ alexoundos ];
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "castopod";
|
||||
meta = with lib.maintainers; {
|
||||
maintainers = [ alexoundos ];
|
||||
};
|
||||
|
||||
nodes.castopod =
|
||||
{ nodes, ... }:
|
||||
{
|
||||
# otherwise 500 MiB file upload fails!
|
||||
virtualisation.diskSize = 512 + 3 * 512;
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
networking.extraHosts = lib.strings.concatStringsSep "\n" (
|
||||
lib.attrsets.mapAttrsToList (
|
||||
name: _: "127.0.0.1 ${name}"
|
||||
) nodes.castopod.services.nginx.virtualHosts
|
||||
);
|
||||
|
||||
services.castopod = {
|
||||
enable = true;
|
||||
database.createLocally = true;
|
||||
localDomain = "castopod.example.com";
|
||||
maxUploadSize = "512M";
|
||||
};
|
||||
};
|
||||
|
||||
nodes.castopod =
|
||||
{ nodes, ... }:
|
||||
{
|
||||
# otherwise 500 MiB file upload fails!
|
||||
virtualisation.diskSize = 512 + 3 * 512;
|
||||
nodes.client =
|
||||
{
|
||||
nodes,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
domain = nodes.castopod.services.castopod.localDomain;
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
networking.extraHosts = lib.strings.concatStringsSep "\n" (
|
||||
lib.attrsets.mapAttrsToList (
|
||||
name: _: "127.0.0.1 ${name}"
|
||||
) nodes.castopod.services.nginx.virtualHosts
|
||||
);
|
||||
getIP = node: (builtins.head node.networking.interfaces.eth1.ipv4.addresses).address;
|
||||
|
||||
services.castopod = {
|
||||
enable = true;
|
||||
database.createLocally = true;
|
||||
localDomain = "castopod.example.com";
|
||||
maxUploadSize = "512M";
|
||||
};
|
||||
};
|
||||
targetPodcastSize = 500 * 1024 * 1024;
|
||||
lameMp3Bitrate = 348300;
|
||||
lameMp3FileAdjust = -800;
|
||||
targetPodcastDuration = toString ((targetPodcastSize + lameMp3FileAdjust) / (lameMp3Bitrate / 8));
|
||||
bannerWidth = 3000;
|
||||
banner = pkgs.runCommand "gen-castopod-cover.jpg" { } ''
|
||||
${pkgs.imagemagick}/bin/magick `
|
||||
`-background green -bordercolor white -gravity northwest xc:black `
|
||||
`-duplicate 99 `
|
||||
`-seed 1 -resize "%[fx:rand()*72+24]" `
|
||||
`-seed 0 -rotate "%[fx:rand()*360]" -border 6x6 -splice 16x36 `
|
||||
`-seed 0 -rotate "%[fx:floor(rand()*4)*90]" -resize "150x50!" `
|
||||
`+append -crop 10x1@ +repage -roll "+%[fx:(t%2)*72]+0" -append `
|
||||
`-resize ${toString bannerWidth} -quality 1 $out
|
||||
'';
|
||||
|
||||
nodes.client =
|
||||
{
|
||||
nodes,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
domain = nodes.castopod.services.castopod.localDomain;
|
||||
coverWidth = toString 3000;
|
||||
cover = pkgs.runCommand "gen-castopod-banner.jpg" { } ''
|
||||
${pkgs.imagemagick}/bin/magick `
|
||||
`-background white -bordercolor white -gravity northwest xc:black `
|
||||
`-duplicate 99 `
|
||||
`-seed 1 -resize "%[fx:rand()*72+24]" `
|
||||
`-seed 0 -rotate "%[fx:rand()*360]" -border 6x6 -splice 36x36 `
|
||||
`-seed 0 -rotate "%[fx:floor(rand()*4)*90]" -resize "144x144!" `
|
||||
`+append -crop 10x1@ +repage -roll "+%[fx:(t%2)*72]+0" -append `
|
||||
`-resize ${coverWidth} -quality 1 $out
|
||||
'';
|
||||
in
|
||||
{
|
||||
networking.extraHosts = lib.strings.concatStringsSep "\n" (
|
||||
lib.attrsets.mapAttrsToList (
|
||||
name: _: "${getIP nodes.castopod} ${name}"
|
||||
) nodes.castopod.services.nginx.virtualHosts
|
||||
);
|
||||
|
||||
getIP = node: (builtins.head node.networking.interfaces.eth1.ipv4.addresses).address;
|
||||
environment.systemPackages =
|
||||
let
|
||||
username = "admin";
|
||||
email = "admin@${domain}";
|
||||
password = "Abcd1234";
|
||||
podcastTitle = "Some Title";
|
||||
episodeTitle = "Episode Title";
|
||||
browser-test =
|
||||
pkgs.writers.writePython3Bin "browser-test"
|
||||
{
|
||||
libraries = [ pkgs.python3Packages.selenium ];
|
||||
flakeIgnore = [
|
||||
"E124"
|
||||
"E501"
|
||||
];
|
||||
}
|
||||
''
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver import Firefox
|
||||
from selenium.webdriver.firefox.options import Options
|
||||
from selenium.webdriver.firefox.service import Service
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from subprocess import STDOUT
|
||||
import logging
|
||||
|
||||
targetPodcastSize = 500 * 1024 * 1024;
|
||||
lameMp3Bitrate = 348300;
|
||||
lameMp3FileAdjust = -800;
|
||||
targetPodcastDuration = toString ((targetPodcastSize + lameMp3FileAdjust) / (lameMp3Bitrate / 8));
|
||||
bannerWidth = 3000;
|
||||
banner = pkgs.runCommand "gen-castopod-cover.jpg" { } ''
|
||||
${pkgs.imagemagick}/bin/magick `
|
||||
`-background green -bordercolor white -gravity northwest xc:black `
|
||||
`-duplicate 99 `
|
||||
`-seed 1 -resize "%[fx:rand()*72+24]" `
|
||||
`-seed 0 -rotate "%[fx:rand()*360]" -border 6x6 -splice 16x36 `
|
||||
`-seed 0 -rotate "%[fx:floor(rand()*4)*90]" -resize "150x50!" `
|
||||
`+append -crop 10x1@ +repage -roll "+%[fx:(t%2)*72]+0" -append `
|
||||
`-resize ${toString bannerWidth} -quality 1 $out
|
||||
'';
|
||||
selenium_logger = logging.getLogger("selenium")
|
||||
selenium_logger.setLevel(logging.DEBUG)
|
||||
selenium_logger.addHandler(logging.StreamHandler())
|
||||
|
||||
coverWidth = toString 3000;
|
||||
cover = pkgs.runCommand "gen-castopod-banner.jpg" { } ''
|
||||
${pkgs.imagemagick}/bin/magick `
|
||||
`-background white -bordercolor white -gravity northwest xc:black `
|
||||
`-duplicate 99 `
|
||||
`-seed 1 -resize "%[fx:rand()*72+24]" `
|
||||
`-seed 0 -rotate "%[fx:rand()*360]" -border 6x6 -splice 36x36 `
|
||||
`-seed 0 -rotate "%[fx:floor(rand()*4)*90]" -resize "144x144!" `
|
||||
`+append -crop 10x1@ +repage -roll "+%[fx:(t%2)*72]+0" -append `
|
||||
`-resize ${coverWidth} -quality 1 $out
|
||||
'';
|
||||
in
|
||||
{
|
||||
networking.extraHosts = lib.strings.concatStringsSep "\n" (
|
||||
lib.attrsets.mapAttrsToList (
|
||||
name: _: "${getIP nodes.castopod} ${name}"
|
||||
) nodes.castopod.services.nginx.virtualHosts
|
||||
);
|
||||
options = Options()
|
||||
options.add_argument('--headless')
|
||||
service = Service(log_output=STDOUT)
|
||||
driver = Firefox(options=options, service=service)
|
||||
driver = Firefox(options=options)
|
||||
driver.implicitly_wait(30)
|
||||
driver.set_page_load_timeout(60)
|
||||
|
||||
environment.systemPackages =
|
||||
let
|
||||
username = "admin";
|
||||
email = "admin@${domain}";
|
||||
password = "Abcd1234";
|
||||
podcastTitle = "Some Title";
|
||||
episodeTitle = "Episode Title";
|
||||
browser-test =
|
||||
pkgs.writers.writePython3Bin "browser-test"
|
||||
{
|
||||
libraries = [ pkgs.python3Packages.selenium ];
|
||||
flakeIgnore = [
|
||||
"E124"
|
||||
"E501"
|
||||
];
|
||||
}
|
||||
''
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver import Firefox
|
||||
from selenium.webdriver.firefox.options import Options
|
||||
from selenium.webdriver.firefox.service import Service
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from subprocess import STDOUT
|
||||
import logging
|
||||
# install ##########################################################
|
||||
|
||||
selenium_logger = logging.getLogger("selenium")
|
||||
selenium_logger.setLevel(logging.DEBUG)
|
||||
selenium_logger.addHandler(logging.StreamHandler())
|
||||
driver.get('http://${domain}/cp-install')
|
||||
|
||||
options = Options()
|
||||
options.add_argument('--headless')
|
||||
service = Service(log_output=STDOUT)
|
||||
driver = Firefox(options=options, service=service)
|
||||
driver = Firefox(options=options)
|
||||
driver.implicitly_wait(30)
|
||||
driver.set_page_load_timeout(60)
|
||||
wait = WebDriverWait(driver, 20)
|
||||
|
||||
# install ##########################################################
|
||||
wait.until(EC.title_contains("installer"))
|
||||
|
||||
driver.get('http://${domain}/cp-install')
|
||||
driver.find_element(By.CSS_SELECTOR, '#username').send_keys(
|
||||
'${username}'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#email').send_keys(
|
||||
'${email}'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#password').send_keys(
|
||||
'${password}'
|
||||
)
|
||||
driver.find_element(By.XPATH,
|
||||
"//button[contains(., 'Finish install')]"
|
||||
).click()
|
||||
|
||||
wait = WebDriverWait(driver, 20)
|
||||
wait.until(EC.title_contains("Auth"))
|
||||
|
||||
wait.until(EC.title_contains("installer"))
|
||||
driver.find_element(By.CSS_SELECTOR, '#email').send_keys(
|
||||
'${email}'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#password').send_keys(
|
||||
'${password}'
|
||||
)
|
||||
driver.find_element(By.XPATH,
|
||||
"//button[contains(., 'Login')]"
|
||||
).click()
|
||||
|
||||
driver.find_element(By.CSS_SELECTOR, '#username').send_keys(
|
||||
'${username}'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#email').send_keys(
|
||||
'${email}'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#password').send_keys(
|
||||
'${password}'
|
||||
)
|
||||
driver.find_element(By.XPATH,
|
||||
"//button[contains(., 'Finish install')]"
|
||||
).click()
|
||||
wait.until(EC.title_contains("Admin dashboard"))
|
||||
|
||||
wait.until(EC.title_contains("Auth"))
|
||||
# create podcast ###################################################
|
||||
|
||||
driver.find_element(By.CSS_SELECTOR, '#email').send_keys(
|
||||
'${email}'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#password').send_keys(
|
||||
'${password}'
|
||||
)
|
||||
driver.find_element(By.XPATH,
|
||||
"//button[contains(., 'Login')]"
|
||||
).click()
|
||||
driver.get('http://${domain}/admin/podcasts/new')
|
||||
|
||||
wait.until(EC.title_contains("Admin dashboard"))
|
||||
wait.until(EC.title_contains("Create podcast"))
|
||||
|
||||
# create podcast ###################################################
|
||||
driver.find_element(By.CSS_SELECTOR, '#cover').send_keys(
|
||||
'${cover}'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#banner').send_keys(
|
||||
'${banner}'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#title').send_keys(
|
||||
'${podcastTitle}'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#handle').send_keys(
|
||||
'some_handle'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#description').send_keys(
|
||||
'Some description'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#owner_name').send_keys(
|
||||
'Owner Name'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#owner_email').send_keys(
|
||||
'owner@email.xyz'
|
||||
)
|
||||
driver.find_element(By.XPATH,
|
||||
"//button[contains(., 'Create podcast')]"
|
||||
).click()
|
||||
|
||||
driver.get('http://${domain}/admin/podcasts/new')
|
||||
wait.until(EC.title_contains("${podcastTitle}"))
|
||||
|
||||
wait.until(EC.title_contains("Create podcast"))
|
||||
driver.find_element(By.XPATH,
|
||||
"//span[contains(., 'Add an episode')]"
|
||||
).click()
|
||||
|
||||
driver.find_element(By.CSS_SELECTOR, '#cover').send_keys(
|
||||
'${cover}'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#banner').send_keys(
|
||||
'${banner}'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#title').send_keys(
|
||||
'${podcastTitle}'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#handle').send_keys(
|
||||
'some_handle'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#description').send_keys(
|
||||
'Some description'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#owner_name').send_keys(
|
||||
'Owner Name'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#owner_email').send_keys(
|
||||
'owner@email.xyz'
|
||||
)
|
||||
driver.find_element(By.XPATH,
|
||||
"//button[contains(., 'Create podcast')]"
|
||||
).click()
|
||||
wait.until(EC.title_contains("Add an episode"))
|
||||
|
||||
wait.until(EC.title_contains("${podcastTitle}"))
|
||||
# upload podcast ###################################################
|
||||
|
||||
driver.find_element(By.XPATH,
|
||||
"//span[contains(., 'Add an episode')]"
|
||||
).click()
|
||||
driver.find_element(By.CSS_SELECTOR, '#audio_file').send_keys(
|
||||
'/tmp/podcast.mp3'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#cover').send_keys(
|
||||
'${cover}'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#description').send_keys(
|
||||
'Episode description'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#title').send_keys(
|
||||
'${episodeTitle}'
|
||||
)
|
||||
driver.find_element(By.XPATH,
|
||||
"//button[contains(., 'Create episode')]"
|
||||
).click()
|
||||
|
||||
wait.until(EC.title_contains("Add an episode"))
|
||||
wait.until(EC.title_contains("${episodeTitle}"))
|
||||
|
||||
# upload podcast ###################################################
|
||||
|
||||
driver.find_element(By.CSS_SELECTOR, '#audio_file').send_keys(
|
||||
'/tmp/podcast.mp3'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#cover').send_keys(
|
||||
'${cover}'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#description').send_keys(
|
||||
'Episode description'
|
||||
)
|
||||
driver.find_element(By.CSS_SELECTOR, '#title').send_keys(
|
||||
'${episodeTitle}'
|
||||
)
|
||||
driver.find_element(By.XPATH,
|
||||
"//button[contains(., 'Create episode')]"
|
||||
).click()
|
||||
|
||||
wait.until(EC.title_contains("${episodeTitle}"))
|
||||
|
||||
driver.close()
|
||||
driver.quit()
|
||||
'';
|
||||
in
|
||||
[
|
||||
pkgs.firefox-unwrapped
|
||||
pkgs.geckodriver
|
||||
browser-test
|
||||
(pkgs.writeShellApplication {
|
||||
name = "build-mp3";
|
||||
runtimeInputs = with pkgs; [
|
||||
sox
|
||||
lame
|
||||
];
|
||||
text = ''
|
||||
out=/tmp/podcast.mp3
|
||||
sox -n -r 48000 -t wav - synth ${targetPodcastDuration} sine 440 `
|
||||
`| lame --noreplaygain --cbr -q 9 -b 320 - $out
|
||||
FILESIZE="$(stat -c%s $out)"
|
||||
[ "$FILESIZE" -gt 0 ]
|
||||
[ "$FILESIZE" -le "${toString targetPodcastSize}" ]
|
||||
driver.close()
|
||||
driver.quit()
|
||||
'';
|
||||
})
|
||||
];
|
||||
};
|
||||
in
|
||||
[
|
||||
pkgs.firefox-unwrapped
|
||||
pkgs.geckodriver
|
||||
browser-test
|
||||
(pkgs.writeShellApplication {
|
||||
name = "build-mp3";
|
||||
runtimeInputs = with pkgs; [
|
||||
sox
|
||||
lame
|
||||
];
|
||||
text = ''
|
||||
out=/tmp/podcast.mp3
|
||||
sox -n -r 48000 -t wav - synth ${targetPodcastDuration} sine 440 `
|
||||
`| lame --noreplaygain --cbr -q 9 -b 320 - $out
|
||||
FILESIZE="$(stat -c%s $out)"
|
||||
[ "$FILESIZE" -gt 0 ]
|
||||
[ "$FILESIZE" -le "${toString targetPodcastSize}" ]
|
||||
'';
|
||||
})
|
||||
];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
castopod.wait_for_unit("castopod-setup.service")
|
||||
castopod.wait_for_file("/run/phpfpm/castopod.sock")
|
||||
castopod.wait_for_unit("nginx.service")
|
||||
castopod.wait_for_open_port(80)
|
||||
castopod.wait_until_succeeds("curl -sS -f http://castopod.example.com")
|
||||
testScript = ''
|
||||
start_all()
|
||||
castopod.wait_for_unit("castopod-setup.service")
|
||||
castopod.wait_for_file("/run/phpfpm/castopod.sock")
|
||||
castopod.wait_for_unit("nginx.service")
|
||||
castopod.wait_for_open_port(80)
|
||||
castopod.wait_until_succeeds("curl -sS -f http://castopod.example.com")
|
||||
|
||||
client.succeed("build-mp3")
|
||||
client.succeed("build-mp3")
|
||||
|
||||
with subtest("Create superadmin, log in, create and upload a podcast"):
|
||||
client.succeed(\
|
||||
"PYTHONUNBUFFERED=1 systemd-cat -t browser-test browser-test")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("Create superadmin, log in, create and upload a podcast"):
|
||||
client.succeed(\
|
||||
"PYTHONUNBUFFERED=1 systemd-cat -t browser-test browser-test")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,49 +1,47 @@
|
|||
# This test checks charliecloud image construction and run
|
||||
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
|
||||
dockerfile = pkgs.writeText "Dockerfile" ''
|
||||
FROM nix
|
||||
RUN mkdir /home /tmp
|
||||
RUN touch /etc/passwd /etc/group
|
||||
CMD ["true"]
|
||||
'';
|
||||
dockerfile = pkgs.writeText "Dockerfile" ''
|
||||
FROM nix
|
||||
RUN mkdir /home /tmp
|
||||
RUN touch /etc/passwd /etc/group
|
||||
CMD ["true"]
|
||||
'';
|
||||
|
||||
in
|
||||
{
|
||||
name = "charliecloud";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ bzizou ];
|
||||
};
|
||||
in
|
||||
{
|
||||
name = "charliecloud";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ bzizou ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
host =
|
||||
{ ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.charliecloud ];
|
||||
virtualisation.docker.enable = true;
|
||||
users.users.alice = {
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "docker" ];
|
||||
};
|
||||
nodes = {
|
||||
host =
|
||||
{ ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.charliecloud ];
|
||||
virtualisation.docker.enable = true;
|
||||
users.users.alice = {
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "docker" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
host.start()
|
||||
host.wait_for_unit("docker.service")
|
||||
host.succeed(
|
||||
'su - alice -c "docker load --input=${pkgs.dockerTools.examples.nix}"'
|
||||
)
|
||||
host.succeed(
|
||||
"cp ${dockerfile} /home/alice/Dockerfile"
|
||||
)
|
||||
host.succeed('su - alice -c "ch-build -t hello ."')
|
||||
host.succeed('su - alice -c "ch-builder2tar hello /var/tmp"')
|
||||
host.succeed('su - alice -c "ch-tar2dir /var/tmp/hello.tar.gz /var/tmp"')
|
||||
host.succeed('su - alice -c "ch-run /var/tmp/hello -- echo Running_From_Container_OK"')
|
||||
'';
|
||||
}
|
||||
)
|
||||
testScript = ''
|
||||
host.start()
|
||||
host.wait_for_unit("docker.service")
|
||||
host.succeed(
|
||||
'su - alice -c "docker load --input=${pkgs.dockerTools.examples.nix}"'
|
||||
)
|
||||
host.succeed(
|
||||
"cp ${dockerfile} /home/alice/Dockerfile"
|
||||
)
|
||||
host.succeed('su - alice -c "ch-build -t hello ."')
|
||||
host.succeed('su - alice -c "ch-builder2tar hello /var/tmp"')
|
||||
host.succeed('su - alice -c "ch-tar2dir /var/tmp/hello.tar.gz /var/tmp"')
|
||||
host.succeed('su - alice -c "ch-run /var/tmp/hello -- echo Running_From_Container_OK"')
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,84 +1,82 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "cinnamon-wayland";
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "cinnamon-wayland";
|
||||
|
||||
meta.maintainers = lib.teams.cinnamon.members;
|
||||
meta.maintainers = lib.teams.cinnamon.members;
|
||||
|
||||
nodes.machine =
|
||||
{ nodes, ... }:
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
services.xserver.enable = true;
|
||||
services.xserver.desktopManager.cinnamon.enable = true;
|
||||
services.displayManager = {
|
||||
autoLogin.enable = true;
|
||||
autoLogin.user = nodes.machine.users.users.alice.name;
|
||||
defaultSession = "cinnamon-wayland";
|
||||
};
|
||||
|
||||
# For the sessionPath subtest.
|
||||
services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gpaste ];
|
||||
nodes.machine =
|
||||
{ nodes, ... }:
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
services.xserver.enable = true;
|
||||
services.xserver.desktopManager.cinnamon.enable = true;
|
||||
services.displayManager = {
|
||||
autoLogin.enable = true;
|
||||
autoLogin.user = nodes.machine.users.users.alice.name;
|
||||
defaultSession = "cinnamon-wayland";
|
||||
};
|
||||
|
||||
enableOCR = true;
|
||||
# For the sessionPath subtest.
|
||||
services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gpaste ];
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
user = nodes.machine.users.users.alice;
|
||||
env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus";
|
||||
su = command: "su - ${user.name} -c '${env} ${command}'";
|
||||
enableOCR = true;
|
||||
|
||||
# Call javascript in cinnamon (the shell), returns a tuple (success, output),
|
||||
# where `success` is true if the dbus call was successful and `output` is what
|
||||
# the javascript evaluates to.
|
||||
eval =
|
||||
name: su "gdbus call --session -d org.Cinnamon -o /org/Cinnamon -m org.Cinnamon.Eval ${name}";
|
||||
in
|
||||
''
|
||||
machine.wait_for_unit("display-manager.service")
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
user = nodes.machine.users.users.alice;
|
||||
env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus";
|
||||
su = command: "su - ${user.name} -c '${env} ${command}'";
|
||||
|
||||
with subtest("Wait for wayland server"):
|
||||
machine.wait_for_file("/run/user/${toString user.uid}/wayland-0")
|
||||
# Call javascript in cinnamon (the shell), returns a tuple (success, output),
|
||||
# where `success` is true if the dbus call was successful and `output` is what
|
||||
# the javascript evaluates to.
|
||||
eval =
|
||||
name: su "gdbus call --session -d org.Cinnamon -o /org/Cinnamon -m org.Cinnamon.Eval ${name}";
|
||||
in
|
||||
''
|
||||
machine.wait_for_unit("display-manager.service")
|
||||
|
||||
with subtest("Check that logging in has given the user ownership of devices"):
|
||||
machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
|
||||
with subtest("Wait for wayland server"):
|
||||
machine.wait_for_file("/run/user/${toString user.uid}/wayland-0")
|
||||
|
||||
with subtest("Wait for the Cinnamon shell"):
|
||||
# Correct output should be (true, '2')
|
||||
# https://github.com/linuxmint/cinnamon/blob/5.4.0/js/ui/main.js#L183-L187
|
||||
machine.wait_until_succeeds("${eval "Main.runState"} | grep -q 'true,..2'")
|
||||
with subtest("Check that logging in has given the user ownership of devices"):
|
||||
machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
|
||||
|
||||
with subtest("Check if Cinnamon components actually start"):
|
||||
for i in ["csd-media-keys", "xapp-sn-watcher", "nemo-desktop"]:
|
||||
machine.wait_until_succeeds(f"pgrep -f {i}")
|
||||
machine.wait_until_succeeds("journalctl -b --grep 'Loaded applet menu@cinnamon.org'")
|
||||
machine.wait_until_succeeds("journalctl -b --grep 'calendar@cinnamon.org: Calendar events supported'")
|
||||
with subtest("Wait for the Cinnamon shell"):
|
||||
# Correct output should be (true, '2')
|
||||
# https://github.com/linuxmint/cinnamon/blob/5.4.0/js/ui/main.js#L183-L187
|
||||
machine.wait_until_succeeds("${eval "Main.runState"} | grep -q 'true,..2'")
|
||||
|
||||
with subtest("Check if sessionPath option actually works"):
|
||||
machine.succeed("${eval "imports.gi.GIRepository.Repository.get_search_path\\(\\)"} | grep gpaste")
|
||||
with subtest("Check if Cinnamon components actually start"):
|
||||
for i in ["csd-media-keys", "xapp-sn-watcher", "nemo-desktop"]:
|
||||
machine.wait_until_succeeds(f"pgrep -f {i}")
|
||||
machine.wait_until_succeeds("journalctl -b --grep 'Loaded applet menu@cinnamon.org'")
|
||||
machine.wait_until_succeeds("journalctl -b --grep 'calendar@cinnamon.org: Calendar events supported'")
|
||||
|
||||
with subtest("Open Cinnamon Settings"):
|
||||
machine.succeed("${su "cinnamon-settings themes >&2 &"}")
|
||||
machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'cinnamon-settings'")
|
||||
machine.wait_for_text('(Style|Appearance|Color)')
|
||||
machine.sleep(2)
|
||||
machine.screenshot("cinnamon_settings")
|
||||
with subtest("Check if sessionPath option actually works"):
|
||||
machine.succeed("${eval "imports.gi.GIRepository.Repository.get_search_path\\(\\)"} | grep gpaste")
|
||||
|
||||
with subtest("Check if screensaver works"):
|
||||
# This is not supported at the moment.
|
||||
# https://trello.com/b/HHs01Pab/cinnamon-wayland
|
||||
machine.execute("${su "cinnamon-screensaver-command -l >&2 &"}")
|
||||
machine.wait_until_succeeds("journalctl -b --grep 'cinnamon-screensaver is disabled in wayland sessions'")
|
||||
with subtest("Open Cinnamon Settings"):
|
||||
machine.succeed("${su "cinnamon-settings themes >&2 &"}")
|
||||
machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'cinnamon-settings'")
|
||||
machine.wait_for_text('(Style|Appearance|Color)')
|
||||
machine.sleep(2)
|
||||
machine.screenshot("cinnamon_settings")
|
||||
|
||||
with subtest("Open GNOME Terminal"):
|
||||
machine.succeed("${su "dbus-launch gnome-terminal"}")
|
||||
machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'gnome-terminal'")
|
||||
machine.sleep(2)
|
||||
with subtest("Check if screensaver works"):
|
||||
# This is not supported at the moment.
|
||||
# https://trello.com/b/HHs01Pab/cinnamon-wayland
|
||||
machine.execute("${su "cinnamon-screensaver-command -l >&2 &"}")
|
||||
machine.wait_until_succeeds("journalctl -b --grep 'cinnamon-screensaver is disabled in wayland sessions'")
|
||||
|
||||
with subtest("Check if Cinnamon has ever coredumped"):
|
||||
machine.fail("coredumpctl --json=short | grep -E 'cinnamon|nemo'")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("Open GNOME Terminal"):
|
||||
machine.succeed("${su "dbus-launch gnome-terminal"}")
|
||||
machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'gnome-terminal'")
|
||||
machine.sleep(2)
|
||||
|
||||
with subtest("Check if Cinnamon has ever coredumped"):
|
||||
machine.fail("coredumpctl --json=short | grep -E 'cinnamon|nemo'")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,104 +1,102 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "cinnamon";
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "cinnamon";
|
||||
|
||||
meta.maintainers = lib.teams.cinnamon.members;
|
||||
meta.maintainers = lib.teams.cinnamon.members;
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
services.xserver.enable = true;
|
||||
services.xserver.desktopManager.cinnamon.enable = true;
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
services.xserver.enable = true;
|
||||
services.xserver.desktopManager.cinnamon.enable = true;
|
||||
|
||||
# We don't ship gnome-text-editor in Cinnamon module, we add this line mainly
|
||||
# to catch eval issues related to this option.
|
||||
environment.cinnamon.excludePackages = [ pkgs.gnome-text-editor ];
|
||||
# We don't ship gnome-text-editor in Cinnamon module, we add this line mainly
|
||||
# to catch eval issues related to this option.
|
||||
environment.cinnamon.excludePackages = [ pkgs.gnome-text-editor ];
|
||||
|
||||
# For the sessionPath subtest.
|
||||
services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gpaste ];
|
||||
# For the sessionPath subtest.
|
||||
services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gpaste ];
|
||||
|
||||
# For OCR test.
|
||||
services.xserver.displayManager.lightdm.greeters.slick.extraConfig = ''
|
||||
enable-hidpi = on
|
||||
'';
|
||||
};
|
||||
|
||||
enableOCR = true;
|
||||
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
user = nodes.machine.users.users.alice;
|
||||
env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus DISPLAY=:0";
|
||||
su = command: "su - ${user.name} -c '${env} ${command}'";
|
||||
|
||||
# Call javascript in cinnamon (the shell), returns a tuple (success, output),
|
||||
# where `success` is true if the dbus call was successful and `output` is what
|
||||
# the javascript evaluates to.
|
||||
eval =
|
||||
name: su "gdbus call --session -d org.Cinnamon -o /org/Cinnamon -m org.Cinnamon.Eval ${name}";
|
||||
in
|
||||
''
|
||||
machine.wait_for_unit("display-manager.service")
|
||||
|
||||
with subtest("Test if we can see username in slick-greeter"):
|
||||
machine.wait_for_text("${user.description}")
|
||||
machine.screenshot("slick_greeter_lightdm")
|
||||
|
||||
with subtest("Login with slick-greeter"):
|
||||
machine.send_chars("${user.password}\n")
|
||||
machine.wait_for_x()
|
||||
machine.wait_for_file("${user.home}/.Xauthority")
|
||||
machine.succeed("xauth merge ${user.home}/.Xauthority")
|
||||
|
||||
with subtest("Check that logging in has given the user ownership of devices"):
|
||||
machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
|
||||
|
||||
with subtest("Wait for the Cinnamon shell"):
|
||||
# Correct output should be (true, '2')
|
||||
# https://github.com/linuxmint/cinnamon/blob/5.4.0/js/ui/main.js#L183-L187
|
||||
machine.wait_until_succeeds("${eval "Main.runState"} | grep -q 'true,..2'")
|
||||
|
||||
with subtest("Check if Cinnamon components actually start"):
|
||||
for i in ["csd-media-keys", "cinnamon-killer-daemon", "xapp-sn-watcher", "nemo-desktop"]:
|
||||
machine.wait_until_succeeds(f"pgrep -f {i}")
|
||||
machine.wait_until_succeeds("journalctl -b --grep 'Loaded applet menu@cinnamon.org'")
|
||||
machine.wait_until_succeeds("journalctl -b --grep 'calendar@cinnamon.org: Calendar events supported'")
|
||||
|
||||
with subtest("Check if sessionPath option actually works"):
|
||||
machine.succeed("${eval "imports.gi.GIRepository.Repository.get_search_path\\(\\)"} | grep gpaste")
|
||||
|
||||
with subtest("Open Cinnamon Settings"):
|
||||
machine.succeed("${su "cinnamon-settings themes >&2 &"}")
|
||||
machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'cinnamon-settings'")
|
||||
machine.wait_for_text('(Style|Appearance|Color)')
|
||||
machine.sleep(2)
|
||||
machine.screenshot("cinnamon_settings")
|
||||
|
||||
with subtest("Lock the screen"):
|
||||
machine.succeed("${su "cinnamon-screensaver-command -l >&2 &"}")
|
||||
machine.wait_until_succeeds("${su "cinnamon-screensaver-command -q"} | grep 'The screensaver is active'")
|
||||
machine.sleep(2)
|
||||
machine.screenshot("cinnamon_screensaver")
|
||||
machine.send_chars("${user.password}\n", delay=0.2)
|
||||
machine.wait_until_succeeds("${su "cinnamon-screensaver-command -q"} | grep 'The screensaver is inactive'")
|
||||
machine.sleep(2)
|
||||
|
||||
with subtest("Open GNOME Terminal"):
|
||||
machine.succeed("${su "gnome-terminal"}")
|
||||
machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'gnome-terminal'")
|
||||
machine.sleep(2)
|
||||
|
||||
with subtest("Open virtual keyboard"):
|
||||
machine.succeed("${su "dbus-send --print-reply --dest=org.Cinnamon /org/Cinnamon org.Cinnamon.ToggleKeyboard"}")
|
||||
machine.wait_for_text('(Ctrl|Alt)')
|
||||
machine.sleep(2)
|
||||
machine.screenshot("cinnamon_virtual_keyboard")
|
||||
|
||||
with subtest("Check if Cinnamon has ever coredumped"):
|
||||
machine.fail("coredumpctl --json=short | grep -E 'cinnamon|nemo'")
|
||||
# For OCR test.
|
||||
services.xserver.displayManager.lightdm.greeters.slick.extraConfig = ''
|
||||
enable-hidpi = on
|
||||
'';
|
||||
}
|
||||
)
|
||||
};
|
||||
|
||||
enableOCR = true;
|
||||
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
user = nodes.machine.users.users.alice;
|
||||
env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus DISPLAY=:0";
|
||||
su = command: "su - ${user.name} -c '${env} ${command}'";
|
||||
|
||||
# Call javascript in cinnamon (the shell), returns a tuple (success, output),
|
||||
# where `success` is true if the dbus call was successful and `output` is what
|
||||
# the javascript evaluates to.
|
||||
eval =
|
||||
name: su "gdbus call --session -d org.Cinnamon -o /org/Cinnamon -m org.Cinnamon.Eval ${name}";
|
||||
in
|
||||
''
|
||||
machine.wait_for_unit("display-manager.service")
|
||||
|
||||
with subtest("Test if we can see username in slick-greeter"):
|
||||
machine.wait_for_text("${user.description}")
|
||||
machine.screenshot("slick_greeter_lightdm")
|
||||
|
||||
with subtest("Login with slick-greeter"):
|
||||
machine.send_chars("${user.password}\n")
|
||||
machine.wait_for_x()
|
||||
machine.wait_for_file("${user.home}/.Xauthority")
|
||||
machine.succeed("xauth merge ${user.home}/.Xauthority")
|
||||
|
||||
with subtest("Check that logging in has given the user ownership of devices"):
|
||||
machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
|
||||
|
||||
with subtest("Wait for the Cinnamon shell"):
|
||||
# Correct output should be (true, '2')
|
||||
# https://github.com/linuxmint/cinnamon/blob/5.4.0/js/ui/main.js#L183-L187
|
||||
machine.wait_until_succeeds("${eval "Main.runState"} | grep -q 'true,..2'")
|
||||
|
||||
with subtest("Check if Cinnamon components actually start"):
|
||||
for i in ["csd-media-keys", "cinnamon-killer-daemon", "xapp-sn-watcher", "nemo-desktop"]:
|
||||
machine.wait_until_succeeds(f"pgrep -f {i}")
|
||||
machine.wait_until_succeeds("journalctl -b --grep 'Loaded applet menu@cinnamon.org'")
|
||||
machine.wait_until_succeeds("journalctl -b --grep 'calendar@cinnamon.org: Calendar events supported'")
|
||||
|
||||
with subtest("Check if sessionPath option actually works"):
|
||||
machine.succeed("${eval "imports.gi.GIRepository.Repository.get_search_path\\(\\)"} | grep gpaste")
|
||||
|
||||
with subtest("Open Cinnamon Settings"):
|
||||
machine.succeed("${su "cinnamon-settings themes >&2 &"}")
|
||||
machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'cinnamon-settings'")
|
||||
machine.wait_for_text('(Style|Appearance|Color)')
|
||||
machine.sleep(2)
|
||||
machine.screenshot("cinnamon_settings")
|
||||
|
||||
with subtest("Lock the screen"):
|
||||
machine.succeed("${su "cinnamon-screensaver-command -l >&2 &"}")
|
||||
machine.wait_until_succeeds("${su "cinnamon-screensaver-command -q"} | grep 'The screensaver is active'")
|
||||
machine.sleep(2)
|
||||
machine.screenshot("cinnamon_screensaver")
|
||||
machine.send_chars("${user.password}\n", delay=0.2)
|
||||
machine.wait_until_succeeds("${su "cinnamon-screensaver-command -q"} | grep 'The screensaver is inactive'")
|
||||
machine.sleep(2)
|
||||
|
||||
with subtest("Open GNOME Terminal"):
|
||||
machine.succeed("${su "gnome-terminal"}")
|
||||
machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'gnome-terminal'")
|
||||
machine.sleep(2)
|
||||
|
||||
with subtest("Open virtual keyboard"):
|
||||
machine.succeed("${su "dbus-send --print-reply --dest=org.Cinnamon /org/Cinnamon org.Cinnamon.ToggleKeyboard"}")
|
||||
machine.wait_for_text('(Ctrl|Alt)')
|
||||
machine.sleep(2)
|
||||
machine.screenshot("cinnamon_virtual_keyboard")
|
||||
|
||||
with subtest("Check if Cinnamon has ever coredumped"):
|
||||
machine.fail("coredumpctl --json=short | grep -E 'cinnamon|nemo'")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -18,117 +18,115 @@ let
|
|||
|
||||
in
|
||||
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "cjdns";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ ehmry ];
|
||||
};
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "cjdns";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ ehmry ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
# Alice finds peers over over ETHInterface.
|
||||
alice =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ basicConfig ];
|
||||
nodes = {
|
||||
# Alice finds peers over over ETHInterface.
|
||||
alice =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ basicConfig ];
|
||||
|
||||
services.cjdns.ETHInterface.bind = "eth1";
|
||||
services.cjdns.ETHInterface.bind = "eth1";
|
||||
|
||||
services.httpd.enable = true;
|
||||
services.httpd.adminAddr = "foo@example.org";
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
services.httpd.enable = true;
|
||||
services.httpd.adminAddr = "foo@example.org";
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
|
||||
# Bob explicitly connects to Carol over UDPInterface.
|
||||
bob =
|
||||
{ ... }:
|
||||
# Bob explicitly connects to Carol over UDPInterface.
|
||||
bob =
|
||||
{ ... }:
|
||||
|
||||
{
|
||||
imports = [ basicConfig ];
|
||||
{
|
||||
imports = [ basicConfig ];
|
||||
|
||||
networking.interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.0.2";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.0.2";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
|
||||
services.cjdns = {
|
||||
UDPInterface = {
|
||||
bind = "0.0.0.0:1024";
|
||||
connectTo."192.168.0.1:1024" = {
|
||||
password = carolPassword;
|
||||
publicKey = carolPubKey;
|
||||
};
|
||||
services.cjdns = {
|
||||
UDPInterface = {
|
||||
bind = "0.0.0.0:1024";
|
||||
connectTo."192.168.0.1:1024" = {
|
||||
password = carolPassword;
|
||||
publicKey = carolPubKey;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Carol listens on ETHInterface and UDPInterface,
|
||||
# but knows neither Alice or Bob.
|
||||
carol =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ basicConfig ];
|
||||
# Carol listens on ETHInterface and UDPInterface,
|
||||
# but knows neither Alice or Bob.
|
||||
carol =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ basicConfig ];
|
||||
|
||||
environment.etc."cjdns.keys".text = ''
|
||||
CJDNS_PRIVATE_KEY=${carolKey}
|
||||
CJDNS_ADMIN_PASSWORD=FOOBAR
|
||||
'';
|
||||
environment.etc."cjdns.keys".text = ''
|
||||
CJDNS_PRIVATE_KEY=${carolKey}
|
||||
CJDNS_ADMIN_PASSWORD=FOOBAR
|
||||
'';
|
||||
|
||||
networking.interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.0.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.0.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
|
||||
services.cjdns = {
|
||||
authorizedPasswords = [ carolPassword ];
|
||||
ETHInterface.bind = "eth1";
|
||||
UDPInterface.bind = "192.168.0.1:1024";
|
||||
};
|
||||
networking.firewall.allowedUDPPorts = [ 1024 ];
|
||||
services.cjdns = {
|
||||
authorizedPasswords = [ carolPassword ];
|
||||
ETHInterface.bind = "eth1";
|
||||
UDPInterface.bind = "192.168.0.1:1024";
|
||||
};
|
||||
networking.firewall.allowedUDPPorts = [ 1024 ];
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import re
|
||||
testScript = ''
|
||||
import re
|
||||
|
||||
start_all()
|
||||
start_all()
|
||||
|
||||
alice.wait_for_unit("cjdns.service")
|
||||
bob.wait_for_unit("cjdns.service")
|
||||
carol.wait_for_unit("cjdns.service")
|
||||
alice.wait_for_unit("cjdns.service")
|
||||
bob.wait_for_unit("cjdns.service")
|
||||
carol.wait_for_unit("cjdns.service")
|
||||
|
||||
|
||||
def cjdns_ip(machine):
|
||||
res = machine.succeed("ip -o -6 addr show dev tun0")
|
||||
ip = re.split("\s+|/", res)[3]
|
||||
machine.log("has ip {}".format(ip))
|
||||
return ip
|
||||
def cjdns_ip(machine):
|
||||
res = machine.succeed("ip -o -6 addr show dev tun0")
|
||||
ip = re.split("\s+|/", res)[3]
|
||||
machine.log("has ip {}".format(ip))
|
||||
return ip
|
||||
|
||||
|
||||
alice_ip6 = cjdns_ip(alice)
|
||||
bob_ip6 = cjdns_ip(bob)
|
||||
carol_ip6 = cjdns_ip(carol)
|
||||
alice_ip6 = cjdns_ip(alice)
|
||||
bob_ip6 = cjdns_ip(bob)
|
||||
carol_ip6 = cjdns_ip(carol)
|
||||
|
||||
# ping a few times each to let the routing table establish itself
|
||||
# ping a few times each to let the routing table establish itself
|
||||
|
||||
alice.succeed("ping -c 4 {}".format(carol_ip6))
|
||||
bob.succeed("ping -c 4 {}".format(carol_ip6))
|
||||
alice.succeed("ping -c 4 {}".format(carol_ip6))
|
||||
bob.succeed("ping -c 4 {}".format(carol_ip6))
|
||||
|
||||
carol.succeed("ping -c 4 {}".format(alice_ip6))
|
||||
carol.succeed("ping -c 4 {}".format(bob_ip6))
|
||||
carol.succeed("ping -c 4 {}".format(alice_ip6))
|
||||
carol.succeed("ping -c 4 {}".format(bob_ip6))
|
||||
|
||||
alice.succeed("ping -c 4 {}".format(bob_ip6))
|
||||
bob.succeed("ping -c 4 {}".format(alice_ip6))
|
||||
alice.succeed("ping -c 4 {}".format(bob_ip6))
|
||||
bob.succeed("ping -c 4 {}".format(alice_ip6))
|
||||
|
||||
alice.wait_for_unit("httpd.service")
|
||||
alice.wait_for_unit("httpd.service")
|
||||
|
||||
bob.succeed("curl --fail -g http://[{}]".format(alice_ip6))
|
||||
'';
|
||||
}
|
||||
)
|
||||
bob.succeed("curl --fail -g http://[{}]".format(alice_ip6))
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,35 +1,33 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "clickhouse";
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ ];
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "clickhouse";
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ ];
|
||||
|
||||
nodes.machine = {
|
||||
services.clickhouse.enable = true;
|
||||
virtualisation.memorySize = 4096;
|
||||
};
|
||||
nodes.machine = {
|
||||
services.clickhouse.enable = true;
|
||||
virtualisation.memorySize = 4096;
|
||||
};
|
||||
|
||||
testScript =
|
||||
let
|
||||
# work around quote/substitution complexity by Nix, Perl, bash and SQL.
|
||||
tableDDL = pkgs.writeText "ddl.sql" "CREATE TABLE `demo` (`value` FixedString(10)) engine = MergeTree PARTITION BY value ORDER BY tuple();";
|
||||
insertQuery = pkgs.writeText "insert.sql" "INSERT INTO `demo` (`value`) VALUES ('foo');";
|
||||
selectQuery = pkgs.writeText "select.sql" "SELECT * from `demo`";
|
||||
in
|
||||
''
|
||||
machine.start()
|
||||
machine.wait_for_unit("clickhouse.service")
|
||||
machine.wait_for_open_port(9000)
|
||||
testScript =
|
||||
let
|
||||
# work around quote/substitution complexity by Nix, Perl, bash and SQL.
|
||||
tableDDL = pkgs.writeText "ddl.sql" "CREATE TABLE `demo` (`value` FixedString(10)) engine = MergeTree PARTITION BY value ORDER BY tuple();";
|
||||
insertQuery = pkgs.writeText "insert.sql" "INSERT INTO `demo` (`value`) VALUES ('foo');";
|
||||
selectQuery = pkgs.writeText "select.sql" "SELECT * from `demo`";
|
||||
in
|
||||
''
|
||||
machine.start()
|
||||
machine.wait_for_unit("clickhouse.service")
|
||||
machine.wait_for_open_port(9000)
|
||||
|
||||
machine.succeed(
|
||||
"cat ${tableDDL} | clickhouse-client"
|
||||
)
|
||||
machine.succeed(
|
||||
"cat ${insertQuery} | clickhouse-client"
|
||||
)
|
||||
machine.succeed(
|
||||
"cat ${selectQuery} | clickhouse-client | grep foo"
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
machine.succeed(
|
||||
"cat ${tableDDL} | clickhouse-client"
|
||||
)
|
||||
machine.succeed(
|
||||
"cat ${insertQuery} | clickhouse-client"
|
||||
)
|
||||
machine.succeed(
|
||||
"cat ${selectQuery} | clickhouse-client | grep foo"
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,21 +1,19 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "cloudlog";
|
||||
meta = {
|
||||
maintainers = with pkgs.lib.maintainers; [ melling ];
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "cloudlog";
|
||||
meta = {
|
||||
maintainers = with pkgs.lib.maintainers; [ melling ];
|
||||
};
|
||||
nodes = {
|
||||
machine = {
|
||||
services.mysql.package = pkgs.mariadb;
|
||||
services.cloudlog.enable = true;
|
||||
};
|
||||
nodes = {
|
||||
machine = {
|
||||
services.mysql.package = pkgs.mariadb;
|
||||
services.cloudlog.enable = true;
|
||||
};
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("phpfpm-cloudlog")
|
||||
machine.wait_for_open_port(80);
|
||||
machine.wait_until_succeeds("curl -s -L --fail http://localhost | grep 'Login - Cloudlog'")
|
||||
'';
|
||||
}
|
||||
)
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("phpfpm-cloudlog")
|
||||
machine.wait_for_open_port(80);
|
||||
machine.wait_until_succeeds("curl -s -L --fail http://localhost | grep 'Login - Cloudlog'")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,156 +1,154 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{ pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
user = "alice"; # from ./common/user-account.nix
|
||||
password = "foobar"; # from ./common/user-account.nix
|
||||
in
|
||||
{
|
||||
name = "cockpit";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ lucasew ];
|
||||
};
|
||||
nodes = {
|
||||
server =
|
||||
{ config, ... }:
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
security.polkit.enable = true;
|
||||
users.users.${user} = {
|
||||
extraGroups = [ "wheel" ];
|
||||
};
|
||||
services.cockpit = {
|
||||
enable = true;
|
||||
port = 7890;
|
||||
openFirewall = true;
|
||||
allowed-origins = [
|
||||
"https://server:${toString config.services.cockpit.port}"
|
||||
];
|
||||
};
|
||||
let
|
||||
user = "alice"; # from ./common/user-account.nix
|
||||
password = "foobar"; # from ./common/user-account.nix
|
||||
in
|
||||
{
|
||||
name = "cockpit";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ lucasew ];
|
||||
};
|
||||
nodes = {
|
||||
server =
|
||||
{ config, ... }:
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
security.polkit.enable = true;
|
||||
users.users.${user} = {
|
||||
extraGroups = [ "wheel" ];
|
||||
};
|
||||
client =
|
||||
{ config, ... }:
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
environment.systemPackages =
|
||||
let
|
||||
seleniumScript =
|
||||
pkgs.writers.writePython3Bin "selenium-script"
|
||||
{
|
||||
libraries = with pkgs.python3Packages; [ selenium ];
|
||||
}
|
||||
''
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.firefox.options import Options
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from time import sleep
|
||||
|
||||
|
||||
def log(msg):
|
||||
from sys import stderr
|
||||
print(f"[*] {msg}", file=stderr)
|
||||
|
||||
|
||||
log("Initializing")
|
||||
|
||||
options = Options()
|
||||
options.add_argument("--headless")
|
||||
|
||||
service = webdriver.FirefoxService(executable_path="${lib.getExe pkgs.geckodriver}") # noqa: E501
|
||||
driver = webdriver.Firefox(options=options, service=service)
|
||||
|
||||
driver.implicitly_wait(10)
|
||||
|
||||
log("Opening homepage")
|
||||
driver.get("https://server:7890")
|
||||
|
||||
|
||||
def wait_elem(by, query, timeout=10):
|
||||
wait = WebDriverWait(driver, timeout)
|
||||
wait.until(EC.presence_of_element_located((by, query)))
|
||||
|
||||
|
||||
def wait_title_contains(title, timeout=10):
|
||||
wait = WebDriverWait(driver, timeout)
|
||||
wait.until(EC.title_contains(title))
|
||||
|
||||
|
||||
def find_element(by, query):
|
||||
return driver.find_element(by, query)
|
||||
|
||||
|
||||
def set_value(elem, value):
|
||||
script = 'arguments[0].value = arguments[1]'
|
||||
return driver.execute_script(script, elem, value)
|
||||
|
||||
|
||||
log("Waiting for the homepage to load")
|
||||
|
||||
# cockpit sets initial title as hostname
|
||||
wait_title_contains("server")
|
||||
wait_elem(By.CSS_SELECTOR, 'input#login-user-input')
|
||||
|
||||
log("Homepage loaded!")
|
||||
|
||||
log("Filling out username")
|
||||
login_input = find_element(By.CSS_SELECTOR, 'input#login-user-input')
|
||||
set_value(login_input, "${user}")
|
||||
|
||||
log("Filling out password")
|
||||
password_input = find_element(By.CSS_SELECTOR, 'input#login-password-input')
|
||||
set_value(password_input, "${password}")
|
||||
|
||||
log("Submitting credentials for login")
|
||||
driver.find_element(By.CSS_SELECTOR, 'button#login-button').click()
|
||||
|
||||
# driver.implicitly_wait(1)
|
||||
# driver.get("https://server:7890/system")
|
||||
|
||||
log("Waiting dashboard to load")
|
||||
wait_title_contains("${user}@server")
|
||||
|
||||
log("Waiting for the frontend to initialize")
|
||||
sleep(1)
|
||||
|
||||
log("Looking for that banner that tells about limited access")
|
||||
container_iframe = find_element(By.CSS_SELECTOR, 'iframe.container-frame')
|
||||
driver.switch_to.frame(container_iframe)
|
||||
|
||||
assert "Web console is running in limited access mode" in driver.page_source
|
||||
|
||||
log("Clicking the sudo button")
|
||||
for button in driver.find_elements(By.TAG_NAME, "button"):
|
||||
if 'admin' in button.text:
|
||||
button.click()
|
||||
driver.switch_to.default_content()
|
||||
|
||||
log("Checking that /nonexistent is not a thing")
|
||||
assert '/nonexistent' not in driver.page_source
|
||||
assert len(driver.find_elements(By.CSS_SELECTOR, '#machine-reconnect')) == 0
|
||||
|
||||
driver.close()
|
||||
'';
|
||||
in
|
||||
with pkgs;
|
||||
[
|
||||
firefox-unwrapped
|
||||
geckodriver
|
||||
seleniumScript
|
||||
];
|
||||
services.cockpit = {
|
||||
enable = true;
|
||||
port = 7890;
|
||||
openFirewall = true;
|
||||
allowed-origins = [
|
||||
"https://server:${toString config.services.cockpit.port}"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
client =
|
||||
{ config, ... }:
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
environment.systemPackages =
|
||||
let
|
||||
seleniumScript =
|
||||
pkgs.writers.writePython3Bin "selenium-script"
|
||||
{
|
||||
libraries = with pkgs.python3Packages; [ selenium ];
|
||||
}
|
||||
''
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.firefox.options import Options
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from time import sleep
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
server.wait_for_unit("sockets.target")
|
||||
server.wait_for_open_port(7890)
|
||||
def log(msg):
|
||||
from sys import stderr
|
||||
print(f"[*] {msg}", file=stderr)
|
||||
|
||||
client.succeed("curl -k https://server:7890 -o /dev/stderr")
|
||||
print(client.succeed("whoami"))
|
||||
client.succeed('PYTHONUNBUFFERED=1 selenium-script')
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
||||
log("Initializing")
|
||||
|
||||
options = Options()
|
||||
options.add_argument("--headless")
|
||||
|
||||
service = webdriver.FirefoxService(executable_path="${lib.getExe pkgs.geckodriver}") # noqa: E501
|
||||
driver = webdriver.Firefox(options=options, service=service)
|
||||
|
||||
driver.implicitly_wait(10)
|
||||
|
||||
log("Opening homepage")
|
||||
driver.get("https://server:7890")
|
||||
|
||||
|
||||
def wait_elem(by, query, timeout=10):
|
||||
wait = WebDriverWait(driver, timeout)
|
||||
wait.until(EC.presence_of_element_located((by, query)))
|
||||
|
||||
|
||||
def wait_title_contains(title, timeout=10):
|
||||
wait = WebDriverWait(driver, timeout)
|
||||
wait.until(EC.title_contains(title))
|
||||
|
||||
|
||||
def find_element(by, query):
|
||||
return driver.find_element(by, query)
|
||||
|
||||
|
||||
def set_value(elem, value):
|
||||
script = 'arguments[0].value = arguments[1]'
|
||||
return driver.execute_script(script, elem, value)
|
||||
|
||||
|
||||
log("Waiting for the homepage to load")
|
||||
|
||||
# cockpit sets initial title as hostname
|
||||
wait_title_contains("server")
|
||||
wait_elem(By.CSS_SELECTOR, 'input#login-user-input')
|
||||
|
||||
log("Homepage loaded!")
|
||||
|
||||
log("Filling out username")
|
||||
login_input = find_element(By.CSS_SELECTOR, 'input#login-user-input')
|
||||
set_value(login_input, "${user}")
|
||||
|
||||
log("Filling out password")
|
||||
password_input = find_element(By.CSS_SELECTOR, 'input#login-password-input')
|
||||
set_value(password_input, "${password}")
|
||||
|
||||
log("Submitting credentials for login")
|
||||
driver.find_element(By.CSS_SELECTOR, 'button#login-button').click()
|
||||
|
||||
# driver.implicitly_wait(1)
|
||||
# driver.get("https://server:7890/system")
|
||||
|
||||
log("Waiting dashboard to load")
|
||||
wait_title_contains("${user}@server")
|
||||
|
||||
log("Waiting for the frontend to initialize")
|
||||
sleep(1)
|
||||
|
||||
log("Looking for that banner that tells about limited access")
|
||||
container_iframe = find_element(By.CSS_SELECTOR, 'iframe.container-frame')
|
||||
driver.switch_to.frame(container_iframe)
|
||||
|
||||
assert "Web console is running in limited access mode" in driver.page_source
|
||||
|
||||
log("Clicking the sudo button")
|
||||
for button in driver.find_elements(By.TAG_NAME, "button"):
|
||||
if 'admin' in button.text:
|
||||
button.click()
|
||||
driver.switch_to.default_content()
|
||||
|
||||
log("Checking that /nonexistent is not a thing")
|
||||
assert '/nonexistent' not in driver.page_source
|
||||
assert len(driver.find_elements(By.CSS_SELECTOR, '#machine-reconnect')) == 0
|
||||
|
||||
driver.close()
|
||||
'';
|
||||
in
|
||||
with pkgs;
|
||||
[
|
||||
firefox-unwrapped
|
||||
geckodriver
|
||||
seleniumScript
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
server.wait_for_unit("sockets.target")
|
||||
server.wait_for_open_port(7890)
|
||||
|
||||
client.succeed("curl -k https://server:7890 -o /dev/stderr")
|
||||
print(client.succeed("whoami"))
|
||||
client.succeed('PYTHONUNBUFFERED=1 selenium-script')
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,26 +1,24 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "code-server";
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "code-server";
|
||||
|
||||
nodes = {
|
||||
machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.code-server = {
|
||||
enable = true;
|
||||
auth = "none";
|
||||
};
|
||||
nodes = {
|
||||
machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.code-server = {
|
||||
enable = true;
|
||||
auth = "none";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("code-server.service")
|
||||
machine.wait_for_open_port(4444)
|
||||
machine.succeed("curl -k --fail http://localhost:4444", timeout=10)
|
||||
'';
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("code-server.service")
|
||||
machine.wait_for_open_port(4444)
|
||||
machine.succeed("curl -k --fail http://localhost:4444", timeout=10)
|
||||
'';
|
||||
|
||||
meta.maintainers = [ lib.maintainers.drupol ];
|
||||
}
|
||||
)
|
||||
meta.maintainers = [ lib.maintainers.drupol ];
|
||||
}
|
||||
|
|
|
@ -1,25 +1,23 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "coder";
|
||||
meta.maintainers = pkgs.coder.meta.maintainers;
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "coder";
|
||||
meta.maintainers = pkgs.coder.meta.maintainers;
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.coder = {
|
||||
enable = true;
|
||||
accessUrl = "http://localhost:3000";
|
||||
};
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.coder = {
|
||||
enable = true;
|
||||
accessUrl = "http://localhost:3000";
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.start()
|
||||
machine.wait_for_unit("postgresql.service")
|
||||
machine.wait_for_unit("coder.service")
|
||||
machine.wait_for_open_port(3000)
|
||||
testScript = ''
|
||||
machine.start()
|
||||
machine.wait_for_unit("postgresql.service")
|
||||
machine.wait_for_unit("coder.service")
|
||||
machine.wait_for_open_port(3000)
|
||||
|
||||
machine.succeed("curl --fail http://localhost:3000")
|
||||
'';
|
||||
}
|
||||
)
|
||||
machine.succeed("curl --fail http://localhost:3000")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,41 +1,39 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "collectd";
|
||||
meta = { };
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "collectd";
|
||||
meta = { };
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, lib, ... }:
|
||||
nodes.machine =
|
||||
{ pkgs, lib, ... }:
|
||||
|
||||
{
|
||||
services.collectd = {
|
||||
enable = true;
|
||||
extraConfig = lib.mkBefore ''
|
||||
Interval 30
|
||||
{
|
||||
services.collectd = {
|
||||
enable = true;
|
||||
extraConfig = lib.mkBefore ''
|
||||
Interval 30
|
||||
'';
|
||||
plugins = {
|
||||
rrdtool = ''
|
||||
DataDir "/var/lib/collectd/rrd"
|
||||
'';
|
||||
plugins = {
|
||||
rrdtool = ''
|
||||
DataDir "/var/lib/collectd/rrd"
|
||||
'';
|
||||
load = "";
|
||||
};
|
||||
load = "";
|
||||
};
|
||||
environment.systemPackages = [ pkgs.rrdtool ];
|
||||
};
|
||||
environment.systemPackages = [ pkgs.rrdtool ];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("collectd.service")
|
||||
hostname = machine.succeed("hostname").strip()
|
||||
file = f"/var/lib/collectd/rrd/{hostname}/load/load.rrd"
|
||||
machine.wait_for_file(file);
|
||||
machine.succeed(f"rrdinfo {file} | logger")
|
||||
# check that this file contains a shortterm metric
|
||||
machine.succeed(f"rrdinfo {file} | grep -F 'ds[shortterm].min = '")
|
||||
# check that interval was set before the plugins
|
||||
machine.succeed(f"rrdinfo {file} | grep -F 'step = 30'")
|
||||
# check that there are frequent updates
|
||||
machine.succeed(f"cp {file} before")
|
||||
machine.wait_until_fails(f"cmp before {file}")
|
||||
'';
|
||||
}
|
||||
)
|
||||
testScript = ''
|
||||
machine.wait_for_unit("collectd.service")
|
||||
hostname = machine.succeed("hostname").strip()
|
||||
file = f"/var/lib/collectd/rrd/{hostname}/load/load.rrd"
|
||||
machine.wait_for_file(file);
|
||||
machine.succeed(f"rrdinfo {file} | logger")
|
||||
# check that this file contains a shortterm metric
|
||||
machine.succeed(f"rrdinfo {file} | grep -F 'ds[shortterm].min = '")
|
||||
# check that interval was set before the plugins
|
||||
machine.succeed(f"rrdinfo {file} | grep -F 'step = 30'")
|
||||
# check that there are frequent updates
|
||||
machine.succeed(f"cp {file} before")
|
||||
machine.wait_until_fails(f"cmp before {file}")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,21 +1,19 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "commafeed";
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "commafeed";
|
||||
|
||||
nodes.server = {
|
||||
services.commafeed = {
|
||||
enable = true;
|
||||
};
|
||||
nodes.server = {
|
||||
services.commafeed = {
|
||||
enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
server.start()
|
||||
server.wait_for_unit("commafeed.service")
|
||||
server.wait_for_open_port(8082)
|
||||
server.succeed("curl --fail --silent http://localhost:8082")
|
||||
'';
|
||||
testScript = ''
|
||||
server.start()
|
||||
server.wait_for_unit("commafeed.service")
|
||||
server.wait_for_open_port(8082)
|
||||
server.succeed("curl --fail --silent http://localhost:8082")
|
||||
'';
|
||||
|
||||
meta.maintainers = [ lib.maintainers.raroh73 ];
|
||||
}
|
||||
)
|
||||
meta.maintainers = [ lib.maintainers.raroh73 ];
|
||||
}
|
||||
|
|
|
@ -1,85 +1,83 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "connman";
|
||||
meta = with lib.maintainers; {
|
||||
maintainers = [ rnhmjoj ];
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "connman";
|
||||
meta = with lib.maintainers; {
|
||||
maintainers = [ rnhmjoj ];
|
||||
};
|
||||
|
||||
# Router running radvd on VLAN 1
|
||||
nodes.router =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ ../modules/profiles/minimal.nix ];
|
||||
|
||||
virtualisation.vlans = [ 1 ];
|
||||
|
||||
boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = true;
|
||||
|
||||
networking = {
|
||||
useDHCP = false;
|
||||
interfaces.eth1.ipv6.addresses = [
|
||||
{
|
||||
address = "fd12::1";
|
||||
prefixLength = 64;
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
services.radvd = {
|
||||
enable = true;
|
||||
config = ''
|
||||
interface eth1 {
|
||||
AdvSendAdvert on;
|
||||
AdvManagedFlag on;
|
||||
AdvOtherConfigFlag on;
|
||||
prefix fd12::/64 {
|
||||
AdvAutonomous off;
|
||||
};
|
||||
};
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
# Router running radvd on VLAN 1
|
||||
nodes.router =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ ../modules/profiles/minimal.nix ];
|
||||
# Client running connman, connected to VLAN 1
|
||||
nodes.client =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.vlans = [ 1 ];
|
||||
|
||||
virtualisation.vlans = [ 1 ];
|
||||
# add a virtual wlan interface
|
||||
boot.kernelModules = [ "mac80211_hwsim" ];
|
||||
boot.extraModprobeConfig = ''
|
||||
options mac80211_hwsim radios=1
|
||||
'';
|
||||
|
||||
boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = true;
|
||||
# Note: the overrides are needed because the wifi is
|
||||
# disabled with mkVMOverride in qemu-vm.nix.
|
||||
services.connman.enable = lib.mkOverride 0 true;
|
||||
services.connman.networkInterfaceBlacklist = [ "eth0" ];
|
||||
networking.wireless.enable = lib.mkOverride 0 true;
|
||||
networking.wireless.interfaces = [ "wlan0" ];
|
||||
};
|
||||
|
||||
networking = {
|
||||
useDHCP = false;
|
||||
interfaces.eth1.ipv6.addresses = [
|
||||
{
|
||||
address = "fd12::1";
|
||||
prefixLength = 64;
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
services.radvd = {
|
||||
enable = true;
|
||||
config = ''
|
||||
interface eth1 {
|
||||
AdvSendAdvert on;
|
||||
AdvManagedFlag on;
|
||||
AdvOtherConfigFlag on;
|
||||
prefix fd12::/64 {
|
||||
AdvAutonomous off;
|
||||
};
|
||||
};
|
||||
'';
|
||||
};
|
||||
};
|
||||
with subtest("Router is ready"):
|
||||
router.wait_for_unit("radvd.service")
|
||||
|
||||
# Client running connman, connected to VLAN 1
|
||||
nodes.client =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.vlans = [ 1 ];
|
||||
with subtest("Daemons are running"):
|
||||
client.wait_for_unit("wpa_supplicant-wlan0.service")
|
||||
client.wait_for_unit("connman.service")
|
||||
client.wait_until_succeeds("connmanctl state | grep -q ready")
|
||||
|
||||
# add a virtual wlan interface
|
||||
boot.kernelModules = [ "mac80211_hwsim" ];
|
||||
boot.extraModprobeConfig = ''
|
||||
options mac80211_hwsim radios=1
|
||||
'';
|
||||
with subtest("Wired interface is configured"):
|
||||
client.wait_until_succeeds("ip -6 route | grep -q fd12::/64")
|
||||
client.wait_until_succeeds("ping -c 1 fd12::1")
|
||||
|
||||
# Note: the overrides are needed because the wifi is
|
||||
# disabled with mkVMOverride in qemu-vm.nix.
|
||||
services.connman.enable = lib.mkOverride 0 true;
|
||||
services.connman.networkInterfaceBlacklist = [ "eth0" ];
|
||||
networking.wireless.enable = lib.mkOverride 0 true;
|
||||
networking.wireless.interfaces = [ "wlan0" ];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
with subtest("Router is ready"):
|
||||
router.wait_for_unit("radvd.service")
|
||||
|
||||
with subtest("Daemons are running"):
|
||||
client.wait_for_unit("wpa_supplicant-wlan0.service")
|
||||
client.wait_for_unit("connman.service")
|
||||
client.wait_until_succeeds("connmanctl state | grep -q ready")
|
||||
|
||||
with subtest("Wired interface is configured"):
|
||||
client.wait_until_succeeds("ip -6 route | grep -q fd12::/64")
|
||||
client.wait_until_succeeds("ping -c 1 fd12::1")
|
||||
|
||||
with subtest("Can set up a wireless access point"):
|
||||
client.succeed("connmanctl enable wifi")
|
||||
client.wait_until_succeeds("connmanctl tether wifi on nixos-test reproducibility | grep -q 'Enabled'")
|
||||
client.wait_until_succeeds("iw wlan0 info | grep -q nixos-test")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("Can set up a wireless access point"):
|
||||
client.succeed("connmanctl enable wifi")
|
||||
client.wait_until_succeeds("connmanctl tether wifi on nixos-test reproducibility | grep -q 'Enabled'")
|
||||
client.wait_until_succeeds("iw wlan0 info | grep -q nixos-test")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,43 +1,41 @@
|
|||
import ./make-test-python.nix (
|
||||
{ ... }:
|
||||
{
|
||||
name = "consul-template";
|
||||
{ ... }:
|
||||
{
|
||||
name = "consul-template";
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
services.consul-template.instances.example.settings = {
|
||||
template = [
|
||||
{
|
||||
contents = ''
|
||||
{{ key "example" }}
|
||||
'';
|
||||
perms = "0600";
|
||||
destination = "/example";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
services.consul = {
|
||||
enable = true;
|
||||
extraConfig = {
|
||||
server = true;
|
||||
bootstrap_expect = 1;
|
||||
bind_addr = "127.0.0.1";
|
||||
};
|
||||
};
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
services.consul-template.instances.example.settings = {
|
||||
template = [
|
||||
{
|
||||
contents = ''
|
||||
{{ key "example" }}
|
||||
'';
|
||||
perms = "0600";
|
||||
destination = "/example";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("consul.service")
|
||||
machine.wait_for_open_port(8500)
|
||||
services.consul = {
|
||||
enable = true;
|
||||
extraConfig = {
|
||||
server = true;
|
||||
bootstrap_expect = 1;
|
||||
bind_addr = "127.0.0.1";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
machine.wait_for_unit("consul-template-example.service")
|
||||
testScript = ''
|
||||
machine.wait_for_unit("consul.service")
|
||||
machine.wait_for_open_port(8500)
|
||||
|
||||
machine.wait_until_succeeds('consul kv put example example')
|
||||
machine.wait_for_unit("consul-template-example.service")
|
||||
|
||||
machine.wait_for_file("/example")
|
||||
machine.succeed('grep "example" /example')
|
||||
'';
|
||||
}
|
||||
)
|
||||
machine.wait_until_succeeds('consul kv put example example')
|
||||
|
||||
machine.wait_for_file("/example")
|
||||
machine.succeed('grep "example" /example')
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,267 +1,265 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{ pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
# Settings for both servers and agents
|
||||
webUi = true;
|
||||
retry_interval = "1s";
|
||||
raft_multiplier = 1;
|
||||
let
|
||||
# Settings for both servers and agents
|
||||
webUi = true;
|
||||
retry_interval = "1s";
|
||||
raft_multiplier = 1;
|
||||
|
||||
defaultExtraConfig = {
|
||||
inherit retry_interval;
|
||||
performance = {
|
||||
inherit raft_multiplier;
|
||||
defaultExtraConfig = {
|
||||
inherit retry_interval;
|
||||
performance = {
|
||||
inherit raft_multiplier;
|
||||
};
|
||||
};
|
||||
|
||||
allConsensusServerHosts = [
|
||||
"192.168.1.1"
|
||||
"192.168.1.2"
|
||||
"192.168.1.3"
|
||||
];
|
||||
|
||||
allConsensusClientHosts = [
|
||||
"192.168.2.1"
|
||||
"192.168.2.2"
|
||||
];
|
||||
|
||||
firewallSettings = {
|
||||
# See https://www.consul.io/docs/install/ports.html
|
||||
allowedTCPPorts = [
|
||||
8301
|
||||
8302
|
||||
8600
|
||||
8500
|
||||
8300
|
||||
];
|
||||
allowedUDPPorts = [
|
||||
8301
|
||||
8302
|
||||
8600
|
||||
];
|
||||
};
|
||||
|
||||
client =
|
||||
index:
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
ip = builtins.elemAt allConsensusClientHosts index;
|
||||
in
|
||||
{
|
||||
environment.systemPackages = [ pkgs.consul ];
|
||||
|
||||
networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
||||
{
|
||||
address = ip;
|
||||
prefixLength = 16;
|
||||
}
|
||||
];
|
||||
networking.firewall = firewallSettings;
|
||||
|
||||
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "consul" ];
|
||||
|
||||
services.consul = {
|
||||
enable = true;
|
||||
inherit webUi;
|
||||
extraConfig = defaultExtraConfig // {
|
||||
server = false;
|
||||
retry_join = allConsensusServerHosts;
|
||||
bind_addr = ip;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
allConsensusServerHosts = [
|
||||
"192.168.1.1"
|
||||
"192.168.1.2"
|
||||
"192.168.1.3"
|
||||
];
|
||||
|
||||
allConsensusClientHosts = [
|
||||
"192.168.2.1"
|
||||
"192.168.2.2"
|
||||
];
|
||||
|
||||
firewallSettings = {
|
||||
# See https://www.consul.io/docs/install/ports.html
|
||||
allowedTCPPorts = [
|
||||
8301
|
||||
8302
|
||||
8600
|
||||
8500
|
||||
8300
|
||||
server =
|
||||
index:
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
numConsensusServers = builtins.length allConsensusServerHosts;
|
||||
thisConsensusServerHost = builtins.elemAt allConsensusServerHosts index;
|
||||
ip = thisConsensusServerHost; # since we already use IPs to identify servers
|
||||
in
|
||||
{
|
||||
networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
||||
{
|
||||
address = ip;
|
||||
prefixLength = 16;
|
||||
}
|
||||
];
|
||||
allowedUDPPorts = [
|
||||
8301
|
||||
8302
|
||||
8600
|
||||
];
|
||||
};
|
||||
networking.firewall = firewallSettings;
|
||||
|
||||
client =
|
||||
index:
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
ip = builtins.elemAt allConsensusClientHosts index;
|
||||
in
|
||||
{
|
||||
environment.systemPackages = [ pkgs.consul ];
|
||||
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "consul" ];
|
||||
|
||||
networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
||||
{
|
||||
address = ip;
|
||||
prefixLength = 16;
|
||||
}
|
||||
];
|
||||
networking.firewall = firewallSettings;
|
||||
|
||||
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "consul" ];
|
||||
|
||||
services.consul = {
|
||||
services.consul =
|
||||
assert builtins.elem thisConsensusServerHost allConsensusServerHosts;
|
||||
{
|
||||
enable = true;
|
||||
inherit webUi;
|
||||
extraConfig = defaultExtraConfig // {
|
||||
server = false;
|
||||
retry_join = allConsensusServerHosts;
|
||||
server = true;
|
||||
bootstrap_expect = numConsensusServers;
|
||||
# Tell Consul that we never intend to drop below this many servers.
|
||||
# Ensures to not permanently lose consensus after temporary loss.
|
||||
# See https://github.com/hashicorp/consul/issues/8118#issuecomment-645330040
|
||||
autopilot.min_quorum = numConsensusServers;
|
||||
retry_join =
|
||||
# If there's only 1 node in the network, we allow self-join;
|
||||
# otherwise, the node must not try to join itself, and join only the other servers.
|
||||
# See https://github.com/hashicorp/consul/issues/2868
|
||||
if numConsensusServers == 1 then
|
||||
allConsensusServerHosts
|
||||
else
|
||||
builtins.filter (h: h != thisConsensusServerHost) allConsensusServerHosts;
|
||||
bind_addr = ip;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
server =
|
||||
index:
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
numConsensusServers = builtins.length allConsensusServerHosts;
|
||||
thisConsensusServerHost = builtins.elemAt allConsensusServerHosts index;
|
||||
ip = thisConsensusServerHost; # since we already use IPs to identify servers
|
||||
in
|
||||
{
|
||||
networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
||||
{
|
||||
address = ip;
|
||||
prefixLength = 16;
|
||||
}
|
||||
];
|
||||
networking.firewall = firewallSettings;
|
||||
|
||||
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "consul" ];
|
||||
|
||||
services.consul =
|
||||
assert builtins.elem thisConsensusServerHost allConsensusServerHosts;
|
||||
{
|
||||
enable = true;
|
||||
inherit webUi;
|
||||
extraConfig = defaultExtraConfig // {
|
||||
server = true;
|
||||
bootstrap_expect = numConsensusServers;
|
||||
# Tell Consul that we never intend to drop below this many servers.
|
||||
# Ensures to not permanently lose consensus after temporary loss.
|
||||
# See https://github.com/hashicorp/consul/issues/8118#issuecomment-645330040
|
||||
autopilot.min_quorum = numConsensusServers;
|
||||
retry_join =
|
||||
# If there's only 1 node in the network, we allow self-join;
|
||||
# otherwise, the node must not try to join itself, and join only the other servers.
|
||||
# See https://github.com/hashicorp/consul/issues/2868
|
||||
if numConsensusServers == 1 then
|
||||
allConsensusServerHosts
|
||||
else
|
||||
builtins.filter (h: h != thisConsensusServerHost) allConsensusServerHosts;
|
||||
bind_addr = ip;
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
name = "consul";
|
||||
|
||||
nodes = {
|
||||
server1 = server 0;
|
||||
server2 = server 1;
|
||||
server3 = server 2;
|
||||
|
||||
client1 = client 0;
|
||||
client2 = client 1;
|
||||
};
|
||||
in
|
||||
{
|
||||
name = "consul";
|
||||
|
||||
testScript = ''
|
||||
servers = [server1, server2, server3]
|
||||
machines = [server1, server2, server3, client1, client2]
|
||||
nodes = {
|
||||
server1 = server 0;
|
||||
server2 = server 1;
|
||||
server3 = server 2;
|
||||
|
||||
for m in machines:
|
||||
m.wait_for_unit("consul.service")
|
||||
client1 = client 0;
|
||||
client2 = client 1;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
servers = [server1, server2, server3]
|
||||
machines = [server1, server2, server3, client1, client2]
|
||||
|
||||
for m in machines:
|
||||
m.wait_for_unit("consul.service")
|
||||
|
||||
|
||||
def wait_for_healthy_servers():
|
||||
# See https://github.com/hashicorp/consul/issues/8118#issuecomment-645330040
|
||||
# for why the `Voter` column of `list-peers` has that info.
|
||||
# TODO: The `grep true` relies on the fact that currently in
|
||||
# the output like
|
||||
# # consul operator raft list-peers
|
||||
# Node ID Address State Voter RaftProtocol
|
||||
# server3 ... 192.168.1.3:8300 leader true 3
|
||||
# server2 ... 192.168.1.2:8300 follower true 3
|
||||
# server1 ... 192.168.1.1:8300 follower false 3
|
||||
# `Voter`is the only boolean column.
|
||||
# Change this to the more reliable way to be defined by
|
||||
# https://github.com/hashicorp/consul/issues/8118
|
||||
# once that ticket is closed.
|
||||
for m in machines:
|
||||
m.wait_until_succeeds(
|
||||
"[ $(consul operator raft list-peers | grep true | wc -l) == 3 ]"
|
||||
)
|
||||
def wait_for_healthy_servers():
|
||||
# See https://github.com/hashicorp/consul/issues/8118#issuecomment-645330040
|
||||
# for why the `Voter` column of `list-peers` has that info.
|
||||
# TODO: The `grep true` relies on the fact that currently in
|
||||
# the output like
|
||||
# # consul operator raft list-peers
|
||||
# Node ID Address State Voter RaftProtocol
|
||||
# server3 ... 192.168.1.3:8300 leader true 3
|
||||
# server2 ... 192.168.1.2:8300 follower true 3
|
||||
# server1 ... 192.168.1.1:8300 follower false 3
|
||||
# `Voter`is the only boolean column.
|
||||
# Change this to the more reliable way to be defined by
|
||||
# https://github.com/hashicorp/consul/issues/8118
|
||||
# once that ticket is closed.
|
||||
for m in machines:
|
||||
m.wait_until_succeeds(
|
||||
"[ $(consul operator raft list-peers | grep true | wc -l) == 3 ]"
|
||||
)
|
||||
|
||||
|
||||
def wait_for_all_machines_alive():
|
||||
"""
|
||||
Note that Serf-"alive" does not mean "Raft"-healthy;
|
||||
see `wait_for_healthy_servers()` for that instead.
|
||||
"""
|
||||
for m in machines:
|
||||
m.wait_until_succeeds("[ $(consul members | grep -o alive | wc -l) == 5 ]")
|
||||
def wait_for_all_machines_alive():
|
||||
"""
|
||||
Note that Serf-"alive" does not mean "Raft"-healthy;
|
||||
see `wait_for_healthy_servers()` for that instead.
|
||||
"""
|
||||
for m in machines:
|
||||
m.wait_until_succeeds("[ $(consul members | grep -o alive | wc -l) == 5 ]")
|
||||
|
||||
|
||||
wait_for_healthy_servers()
|
||||
# Also wait for clients to be alive.
|
||||
wait_for_all_machines_alive()
|
||||
wait_for_healthy_servers()
|
||||
# Also wait for clients to be alive.
|
||||
wait_for_all_machines_alive()
|
||||
|
||||
client1.succeed("consul kv put testkey 42")
|
||||
client2.succeed("[ $(consul kv get testkey) == 42 ]")
|
||||
client1.succeed("consul kv put testkey 42")
|
||||
client2.succeed("[ $(consul kv get testkey) == 42 ]")
|
||||
|
||||
|
||||
def rolling_restart_test(proper_rolling_procedure=True):
|
||||
"""
|
||||
Tests that the cluster can tolearate failures of any single server,
|
||||
following the recommended rolling upgrade procedure from
|
||||
https://www.consul.io/docs/upgrading#standard-upgrades.
|
||||
def rolling_restart_test(proper_rolling_procedure=True):
|
||||
"""
|
||||
Tests that the cluster can tolearate failures of any single server,
|
||||
following the recommended rolling upgrade procedure from
|
||||
https://www.consul.io/docs/upgrading#standard-upgrades.
|
||||
|
||||
Optionally, `proper_rolling_procedure=False` can be given
|
||||
to wait only for each server to be back `Healthy`, not `Stable`
|
||||
in the Raft consensus, see Consul setting `ServerStabilizationTime` and
|
||||
https://github.com/hashicorp/consul/issues/8118#issuecomment-645330040.
|
||||
"""
|
||||
Optionally, `proper_rolling_procedure=False` can be given
|
||||
to wait only for each server to be back `Healthy`, not `Stable`
|
||||
in the Raft consensus, see Consul setting `ServerStabilizationTime` and
|
||||
https://github.com/hashicorp/consul/issues/8118#issuecomment-645330040.
|
||||
"""
|
||||
|
||||
for server in servers:
|
||||
server.block()
|
||||
server.systemctl("stop consul")
|
||||
for server in servers:
|
||||
server.block()
|
||||
server.systemctl("stop consul")
|
||||
|
||||
# Make sure the stopped peer is recognized as being down
|
||||
client1.wait_until_succeeds(
|
||||
f"[ $(consul members | grep {server.name} | grep -o -E 'failed|left' | wc -l) == 1 ]"
|
||||
)
|
||||
# Make sure the stopped peer is recognized as being down
|
||||
client1.wait_until_succeeds(
|
||||
f"[ $(consul members | grep {server.name} | grep -o -E 'failed|left' | wc -l) == 1 ]"
|
||||
)
|
||||
|
||||
# For each client, wait until they have connection again
|
||||
# using `kv get -recurse` before issuing commands.
|
||||
client1.wait_until_succeeds("consul kv get -recurse")
|
||||
client2.wait_until_succeeds("consul kv get -recurse")
|
||||
# For each client, wait until they have connection again
|
||||
# using `kv get -recurse` before issuing commands.
|
||||
client1.wait_until_succeeds("consul kv get -recurse")
|
||||
client2.wait_until_succeeds("consul kv get -recurse")
|
||||
|
||||
# Do some consul actions while one server is down.
|
||||
client1.succeed("consul kv put testkey 43")
|
||||
client2.succeed("[ $(consul kv get testkey) == 43 ]")
|
||||
client2.succeed("consul kv delete testkey")
|
||||
# Do some consul actions while one server is down.
|
||||
client1.succeed("consul kv put testkey 43")
|
||||
client2.succeed("[ $(consul kv get testkey) == 43 ]")
|
||||
client2.succeed("consul kv delete testkey")
|
||||
|
||||
server.unblock()
|
||||
server.systemctl("start consul")
|
||||
server.unblock()
|
||||
server.systemctl("start consul")
|
||||
|
||||
if proper_rolling_procedure:
|
||||
# Wait for recovery.
|
||||
wait_for_healthy_servers()
|
||||
else:
|
||||
# NOT proper rolling upgrade procedure, see above.
|
||||
wait_for_all_machines_alive()
|
||||
if proper_rolling_procedure:
|
||||
# Wait for recovery.
|
||||
wait_for_healthy_servers()
|
||||
else:
|
||||
# NOT proper rolling upgrade procedure, see above.
|
||||
wait_for_all_machines_alive()
|
||||
|
||||
# Wait for client connections.
|
||||
client1.wait_until_succeeds("consul kv get -recurse")
|
||||
client2.wait_until_succeeds("consul kv get -recurse")
|
||||
# Wait for client connections.
|
||||
client1.wait_until_succeeds("consul kv get -recurse")
|
||||
client2.wait_until_succeeds("consul kv get -recurse")
|
||||
|
||||
# Do some consul actions with server back up.
|
||||
client1.succeed("consul kv put testkey 44")
|
||||
client2.succeed("[ $(consul kv get testkey) == 44 ]")
|
||||
client2.succeed("consul kv delete testkey")
|
||||
# Do some consul actions with server back up.
|
||||
client1.succeed("consul kv put testkey 44")
|
||||
client2.succeed("[ $(consul kv get testkey) == 44 ]")
|
||||
client2.succeed("consul kv delete testkey")
|
||||
|
||||
|
||||
def all_servers_crash_simultaneously_test():
|
||||
"""
|
||||
Tests that the cluster will eventually come back after all
|
||||
servers crash simultaneously.
|
||||
"""
|
||||
def all_servers_crash_simultaneously_test():
|
||||
"""
|
||||
Tests that the cluster will eventually come back after all
|
||||
servers crash simultaneously.
|
||||
"""
|
||||
|
||||
for server in servers:
|
||||
server.block()
|
||||
server.systemctl("stop --no-block consul")
|
||||
for server in servers:
|
||||
server.block()
|
||||
server.systemctl("stop --no-block consul")
|
||||
|
||||
for server in servers:
|
||||
# --no-block is async, so ensure it has been stopped by now
|
||||
server.wait_until_fails("systemctl is-active --quiet consul")
|
||||
server.unblock()
|
||||
server.systemctl("start consul")
|
||||
for server in servers:
|
||||
# --no-block is async, so ensure it has been stopped by now
|
||||
server.wait_until_fails("systemctl is-active --quiet consul")
|
||||
server.unblock()
|
||||
server.systemctl("start consul")
|
||||
|
||||
# Wait for recovery.
|
||||
wait_for_healthy_servers()
|
||||
# Wait for recovery.
|
||||
wait_for_healthy_servers()
|
||||
|
||||
# Wait for client connections.
|
||||
client1.wait_until_succeeds("consul kv get -recurse")
|
||||
client2.wait_until_succeeds("consul kv get -recurse")
|
||||
# Wait for client connections.
|
||||
client1.wait_until_succeeds("consul kv get -recurse")
|
||||
client2.wait_until_succeeds("consul kv get -recurse")
|
||||
|
||||
# Do some consul actions with servers back up.
|
||||
client1.succeed("consul kv put testkey 44")
|
||||
client2.succeed("[ $(consul kv get testkey) == 44 ]")
|
||||
client2.succeed("consul kv delete testkey")
|
||||
# Do some consul actions with servers back up.
|
||||
client1.succeed("consul kv put testkey 44")
|
||||
client2.succeed("[ $(consul kv get testkey) == 44 ]")
|
||||
client2.succeed("consul kv delete testkey")
|
||||
|
||||
|
||||
# Run the tests.
|
||||
# Run the tests.
|
||||
|
||||
print("rolling_restart_test()")
|
||||
rolling_restart_test()
|
||||
print("rolling_restart_test()")
|
||||
rolling_restart_test()
|
||||
|
||||
print("all_servers_crash_simultaneously_test()")
|
||||
all_servers_crash_simultaneously_test()
|
||||
print("all_servers_crash_simultaneously_test()")
|
||||
all_servers_crash_simultaneously_test()
|
||||
|
||||
print("rolling_restart_test(proper_rolling_procedure=False)")
|
||||
rolling_restart_test(proper_rolling_procedure=False)
|
||||
'';
|
||||
}
|
||||
)
|
||||
print("rolling_restart_test(proper_rolling_procedure=False)")
|
||||
rolling_restart_test(proper_rolling_procedure=False)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -5,110 +5,108 @@ let
|
|||
containerIp6 = "fc00::2/7";
|
||||
in
|
||||
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-bridge";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [
|
||||
aristid
|
||||
aszlig
|
||||
kampfschlaefer
|
||||
];
|
||||
};
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-bridge";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [
|
||||
aristid
|
||||
aszlig
|
||||
kampfschlaefer
|
||||
];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ ../modules/installer/cd-dvd/channel.nix ];
|
||||
virtualisation.writableStore = true;
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ ../modules/installer/cd-dvd/channel.nix ];
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
networking.bridges = {
|
||||
br0 = {
|
||||
interfaces = [ ];
|
||||
};
|
||||
networking.bridges = {
|
||||
br0 = {
|
||||
interfaces = [ ];
|
||||
};
|
||||
networking.interfaces = {
|
||||
br0 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = hostIp;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
ipv6.addresses = [
|
||||
{
|
||||
address = hostIp6;
|
||||
prefixLength = 7;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
networking.interfaces = {
|
||||
br0 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = hostIp;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
ipv6.addresses = [
|
||||
{
|
||||
address = hostIp6;
|
||||
prefixLength = 7;
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
containers.webserver = {
|
||||
autoStart = true;
|
||||
privateNetwork = true;
|
||||
hostBridge = "br0";
|
||||
localAddress = containerIp;
|
||||
localAddress6 = containerIp6;
|
||||
config = {
|
||||
services.httpd.enable = true;
|
||||
services.httpd.adminAddr = "foo@example.org";
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
};
|
||||
|
||||
containers.web-noip = {
|
||||
autoStart = true;
|
||||
privateNetwork = true;
|
||||
hostBridge = "br0";
|
||||
config = {
|
||||
services.httpd.enable = true;
|
||||
services.httpd.adminAddr = "foo@example.org";
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
};
|
||||
|
||||
virtualisation.additionalPaths = [ pkgs.stdenv ];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("default.target")
|
||||
assert "webserver" in machine.succeed("nixos-container list")
|
||||
containers.webserver = {
|
||||
autoStart = true;
|
||||
privateNetwork = true;
|
||||
hostBridge = "br0";
|
||||
localAddress = containerIp;
|
||||
localAddress6 = containerIp6;
|
||||
config = {
|
||||
services.httpd.enable = true;
|
||||
services.httpd.adminAddr = "foo@example.org";
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
};
|
||||
|
||||
with subtest("Start the webserver container"):
|
||||
assert "up" in machine.succeed("nixos-container status webserver")
|
||||
containers.web-noip = {
|
||||
autoStart = true;
|
||||
privateNetwork = true;
|
||||
hostBridge = "br0";
|
||||
config = {
|
||||
services.httpd.enable = true;
|
||||
services.httpd.adminAddr = "foo@example.org";
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
};
|
||||
|
||||
with subtest("Bridges exist inside containers"):
|
||||
machine.succeed(
|
||||
"nixos-container run webserver -- ip link show eth0",
|
||||
"nixos-container run web-noip -- ip link show eth0",
|
||||
)
|
||||
virtualisation.additionalPaths = [ pkgs.stdenv ];
|
||||
};
|
||||
|
||||
ip = "${containerIp}".split("/")[0]
|
||||
machine.succeed(f"ping -n -c 1 {ip}")
|
||||
machine.succeed(f"curl --fail http://{ip}/ > /dev/null")
|
||||
testScript = ''
|
||||
machine.wait_for_unit("default.target")
|
||||
assert "webserver" in machine.succeed("nixos-container list")
|
||||
|
||||
ip6 = "${containerIp6}".split("/")[0]
|
||||
machine.succeed(f"ping -n -c 1 {ip6}")
|
||||
machine.succeed(f"curl --fail http://[{ip6}]/ > /dev/null")
|
||||
with subtest("Start the webserver container"):
|
||||
assert "up" in machine.succeed("nixos-container status webserver")
|
||||
|
||||
with subtest(
|
||||
"nixos-container show-ip works in case of an ipv4 address "
|
||||
+ "with subnetmask in CIDR notation."
|
||||
):
|
||||
result = machine.succeed("nixos-container show-ip webserver").rstrip()
|
||||
assert result == ip
|
||||
with subtest("Bridges exist inside containers"):
|
||||
machine.succeed(
|
||||
"nixos-container run webserver -- ip link show eth0",
|
||||
"nixos-container run web-noip -- ip link show eth0",
|
||||
)
|
||||
|
||||
with subtest("Stop the container"):
|
||||
machine.succeed("nixos-container stop webserver")
|
||||
machine.fail(
|
||||
f"curl --fail --connect-timeout 2 http://{ip}/ > /dev/null",
|
||||
f"curl --fail --connect-timeout 2 http://[{ip6}]/ > /dev/null",
|
||||
)
|
||||
ip = "${containerIp}".split("/")[0]
|
||||
machine.succeed(f"ping -n -c 1 {ip}")
|
||||
machine.succeed(f"curl --fail http://{ip}/ > /dev/null")
|
||||
|
||||
# Destroying a declarative container should fail.
|
||||
machine.fail("nixos-container destroy webserver")
|
||||
'';
|
||||
}
|
||||
)
|
||||
ip6 = "${containerIp6}".split("/")[0]
|
||||
machine.succeed(f"ping -n -c 1 {ip6}")
|
||||
machine.succeed(f"curl --fail http://[{ip6}]/ > /dev/null")
|
||||
|
||||
with subtest(
|
||||
"nixos-container show-ip works in case of an ipv4 address "
|
||||
+ "with subnetmask in CIDR notation."
|
||||
):
|
||||
result = machine.succeed("nixos-container show-ip webserver").rstrip()
|
||||
assert result == ip
|
||||
|
||||
with subtest("Stop the container"):
|
||||
machine.succeed("nixos-container stop webserver")
|
||||
machine.fail(
|
||||
f"curl --fail --connect-timeout 2 http://{ip}/ > /dev/null",
|
||||
f"curl --fail --connect-timeout 2 http://[{ip6}]/ > /dev/null",
|
||||
)
|
||||
|
||||
# Destroying a declarative container should fail.
|
||||
machine.fail("nixos-container destroy webserver")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,48 +1,46 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
let
|
||||
{ pkgs, lib, ... }:
|
||||
let
|
||||
|
||||
customPkgs = pkgs.appendOverlays [
|
||||
(self: super: {
|
||||
hello = super.hello.overrideAttrs (old: {
|
||||
name = "custom-hello";
|
||||
});
|
||||
})
|
||||
];
|
||||
customPkgs = pkgs.appendOverlays [
|
||||
(self: super: {
|
||||
hello = super.hello.overrideAttrs (old: {
|
||||
name = "custom-hello";
|
||||
});
|
||||
})
|
||||
];
|
||||
|
||||
in
|
||||
{
|
||||
name = "containers-custom-pkgs";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ erikarvstedt ];
|
||||
in
|
||||
{
|
||||
name = "containers-custom-pkgs";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ erikarvstedt ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ config, ... }:
|
||||
{
|
||||
assertions =
|
||||
let
|
||||
helloName = (builtins.head config.containers.test.config.system.extraDependencies).name;
|
||||
in
|
||||
[
|
||||
{
|
||||
assertion = helloName == "custom-hello";
|
||||
message = "Unexpected value: ${helloName}";
|
||||
}
|
||||
];
|
||||
|
||||
containers.test = {
|
||||
autoStart = true;
|
||||
config =
|
||||
{ pkgs, config, ... }:
|
||||
{
|
||||
nixpkgs.pkgs = customPkgs;
|
||||
system.extraDependencies = [ pkgs.hello ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ config, ... }:
|
||||
{
|
||||
assertions =
|
||||
let
|
||||
helloName = (builtins.head config.containers.test.config.system.extraDependencies).name;
|
||||
in
|
||||
[
|
||||
{
|
||||
assertion = helloName == "custom-hello";
|
||||
message = "Unexpected value: ${helloName}";
|
||||
}
|
||||
];
|
||||
|
||||
containers.test = {
|
||||
autoStart = true;
|
||||
config =
|
||||
{ pkgs, config, ... }:
|
||||
{
|
||||
nixpkgs.pkgs = customPkgs;
|
||||
system.extraDependencies = [ pkgs.hello ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# This test only consists of evaluating the test machine
|
||||
testScript = "pass";
|
||||
}
|
||||
)
|
||||
# This test only consists of evaluating the test machine
|
||||
testScript = "pass";
|
||||
}
|
||||
|
|
|
@ -1,59 +1,57 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-ephemeral";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ patryk27 ];
|
||||
};
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-ephemeral";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ patryk27 ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
virtualisation.writableStore = true;
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
containers.webserver = {
|
||||
ephemeral = true;
|
||||
privateNetwork = true;
|
||||
hostAddress = "10.231.136.1";
|
||||
localAddress = "10.231.136.2";
|
||||
config = {
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts.localhost = {
|
||||
root = pkgs.runCommand "localhost" { } ''
|
||||
mkdir "$out"
|
||||
echo hello world > "$out/index.html"
|
||||
'';
|
||||
};
|
||||
containers.webserver = {
|
||||
ephemeral = true;
|
||||
privateNetwork = true;
|
||||
hostAddress = "10.231.136.1";
|
||||
localAddress = "10.231.136.2";
|
||||
config = {
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts.localhost = {
|
||||
root = pkgs.runCommand "localhost" { } ''
|
||||
mkdir "$out"
|
||||
echo hello world > "$out/index.html"
|
||||
'';
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
assert "webserver" in machine.succeed("nixos-container list")
|
||||
testScript = ''
|
||||
assert "webserver" in machine.succeed("nixos-container list")
|
||||
|
||||
machine.succeed("nixos-container start webserver")
|
||||
machine.succeed("nixos-container start webserver")
|
||||
|
||||
with subtest("Container got its own root folder"):
|
||||
machine.succeed("ls /run/nixos-containers/webserver")
|
||||
with subtest("Container got its own root folder"):
|
||||
machine.succeed("ls /run/nixos-containers/webserver")
|
||||
|
||||
with subtest("Container persistent directory is not created"):
|
||||
machine.fail("ls /var/lib/nixos-containers/webserver")
|
||||
with subtest("Container persistent directory is not created"):
|
||||
machine.fail("ls /var/lib/nixos-containers/webserver")
|
||||
|
||||
# Since "start" returns after the container has reached
|
||||
# multi-user.target, we should now be able to access it.
|
||||
ip = machine.succeed("nixos-container show-ip webserver").rstrip()
|
||||
machine.succeed(f"ping -n -c1 {ip}")
|
||||
machine.succeed(f"curl --fail http://{ip}/ > /dev/null")
|
||||
# Since "start" returns after the container has reached
|
||||
# multi-user.target, we should now be able to access it.
|
||||
ip = machine.succeed("nixos-container show-ip webserver").rstrip()
|
||||
machine.succeed(f"ping -n -c1 {ip}")
|
||||
machine.succeed(f"curl --fail http://{ip}/ > /dev/null")
|
||||
|
||||
with subtest("Stop the container"):
|
||||
machine.succeed("nixos-container stop webserver")
|
||||
machine.fail(f"curl --fail --connect-timeout 2 http://{ip}/ > /dev/null")
|
||||
with subtest("Stop the container"):
|
||||
machine.succeed("nixos-container stop webserver")
|
||||
machine.fail(f"curl --fail --connect-timeout 2 http://{ip}/ > /dev/null")
|
||||
|
||||
with subtest("Container's root folder was removed"):
|
||||
machine.fail("ls /run/nixos-containers/webserver")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("Container's root folder was removed"):
|
||||
machine.fail("ls /run/nixos-containers/webserver")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,115 +1,113 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-extra_veth";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ kampfschlaefer ];
|
||||
};
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-extra_veth";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ kampfschlaefer ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ ../modules/installer/cd-dvd/channel.nix ];
|
||||
virtualisation.writableStore = true;
|
||||
virtualisation.vlans = [ ];
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ ../modules/installer/cd-dvd/channel.nix ];
|
||||
virtualisation.writableStore = true;
|
||||
virtualisation.vlans = [ ];
|
||||
|
||||
networking.useDHCP = false;
|
||||
networking.bridges = {
|
||||
br0 = {
|
||||
interfaces = [ ];
|
||||
};
|
||||
br1 = {
|
||||
interfaces = [ ];
|
||||
};
|
||||
networking.useDHCP = false;
|
||||
networking.bridges = {
|
||||
br0 = {
|
||||
interfaces = [ ];
|
||||
};
|
||||
networking.interfaces = {
|
||||
br0 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.0.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
ipv6.addresses = [
|
||||
{
|
||||
address = "fc00::1";
|
||||
prefixLength = 7;
|
||||
}
|
||||
];
|
||||
};
|
||||
br1 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.1.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
br1 = {
|
||||
interfaces = [ ];
|
||||
};
|
||||
|
||||
containers.webserver = {
|
||||
autoStart = true;
|
||||
privateNetwork = true;
|
||||
hostBridge = "br0";
|
||||
localAddress = "192.168.0.100/24";
|
||||
localAddress6 = "fc00::2/7";
|
||||
extraVeths = {
|
||||
veth1 = {
|
||||
hostBridge = "br1";
|
||||
localAddress = "192.168.1.100/24";
|
||||
};
|
||||
veth2 = {
|
||||
hostAddress = "192.168.2.1";
|
||||
localAddress = "192.168.2.100";
|
||||
};
|
||||
};
|
||||
config = {
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
};
|
||||
networking.interfaces = {
|
||||
br0 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.0.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
ipv6.addresses = [
|
||||
{
|
||||
address = "fc00::1";
|
||||
prefixLength = 7;
|
||||
}
|
||||
];
|
||||
};
|
||||
br1 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.1.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
virtualisation.additionalPaths = [ pkgs.stdenv ];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("default.target")
|
||||
assert "webserver" in machine.succeed("nixos-container list")
|
||||
containers.webserver = {
|
||||
autoStart = true;
|
||||
privateNetwork = true;
|
||||
hostBridge = "br0";
|
||||
localAddress = "192.168.0.100/24";
|
||||
localAddress6 = "fc00::2/7";
|
||||
extraVeths = {
|
||||
veth1 = {
|
||||
hostBridge = "br1";
|
||||
localAddress = "192.168.1.100/24";
|
||||
};
|
||||
veth2 = {
|
||||
hostAddress = "192.168.2.1";
|
||||
localAddress = "192.168.2.100";
|
||||
};
|
||||
};
|
||||
config = {
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
};
|
||||
|
||||
with subtest("Status of the webserver container is up"):
|
||||
assert "up" in machine.succeed("nixos-container status webserver")
|
||||
virtualisation.additionalPaths = [ pkgs.stdenv ];
|
||||
};
|
||||
|
||||
with subtest("Ensure that the veths are inside the container"):
|
||||
assert "state UP" in machine.succeed(
|
||||
"nixos-container run webserver -- ip link show veth1"
|
||||
)
|
||||
assert "state UP" in machine.succeed(
|
||||
"nixos-container run webserver -- ip link show veth2"
|
||||
)
|
||||
testScript = ''
|
||||
machine.wait_for_unit("default.target")
|
||||
assert "webserver" in machine.succeed("nixos-container list")
|
||||
|
||||
with subtest("Ensure the presence of the extra veths"):
|
||||
assert "state UP" in machine.succeed("ip link show veth1")
|
||||
assert "state UP" in machine.succeed("ip link show veth2")
|
||||
with subtest("Status of the webserver container is up"):
|
||||
assert "up" in machine.succeed("nixos-container status webserver")
|
||||
|
||||
with subtest("Ensure the veth1 is part of br1 on the host"):
|
||||
assert "master br1" in machine.succeed("ip link show veth1")
|
||||
with subtest("Ensure that the veths are inside the container"):
|
||||
assert "state UP" in machine.succeed(
|
||||
"nixos-container run webserver -- ip link show veth1"
|
||||
)
|
||||
assert "state UP" in machine.succeed(
|
||||
"nixos-container run webserver -- ip link show veth2"
|
||||
)
|
||||
|
||||
with subtest("Ping on main veth"):
|
||||
machine.succeed("ping -n -c 1 192.168.0.100")
|
||||
machine.succeed("ping -n -c 1 fc00::2")
|
||||
with subtest("Ensure the presence of the extra veths"):
|
||||
assert "state UP" in machine.succeed("ip link show veth1")
|
||||
assert "state UP" in machine.succeed("ip link show veth2")
|
||||
|
||||
with subtest("Ping on the first extra veth"):
|
||||
machine.succeed("ping -n -c 1 192.168.1.100 >&2")
|
||||
with subtest("Ensure the veth1 is part of br1 on the host"):
|
||||
assert "master br1" in machine.succeed("ip link show veth1")
|
||||
|
||||
with subtest("Ping on the second extra veth"):
|
||||
machine.succeed("ping -n -c 1 192.168.2.100 >&2")
|
||||
with subtest("Ping on main veth"):
|
||||
machine.succeed("ping -n -c 1 192.168.0.100")
|
||||
machine.succeed("ping -n -c 1 fc00::2")
|
||||
|
||||
with subtest("Container can be stopped"):
|
||||
machine.succeed("nixos-container stop webserver")
|
||||
machine.fail("ping -n -c 1 192.168.1.100 >&2")
|
||||
machine.fail("ping -n -c 1 192.168.2.100 >&2")
|
||||
with subtest("Ping on the first extra veth"):
|
||||
machine.succeed("ping -n -c 1 192.168.1.100 >&2")
|
||||
|
||||
with subtest("Destroying a declarative container should fail"):
|
||||
machine.fail("nixos-container destroy webserver")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("Ping on the second extra veth"):
|
||||
machine.succeed("ping -n -c 1 192.168.2.100 >&2")
|
||||
|
||||
with subtest("Container can be stopped"):
|
||||
machine.succeed("nixos-container stop webserver")
|
||||
machine.fail("ping -n -c 1 192.168.1.100 >&2")
|
||||
machine.fail("ping -n -c 1 192.168.2.100 >&2")
|
||||
|
||||
with subtest("Destroying a declarative container should fail"):
|
||||
machine.fail("nixos-container destroy webserver")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,55 +1,53 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-hosts";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ montag451 ];
|
||||
};
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-hosts";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ montag451 ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ lib, ... }:
|
||||
{
|
||||
virtualisation.vlans = [ ];
|
||||
nodes.machine =
|
||||
{ lib, ... }:
|
||||
{
|
||||
virtualisation.vlans = [ ];
|
||||
|
||||
networking.bridges.br0.interfaces = [ ];
|
||||
networking.interfaces.br0.ipv4.addresses = [
|
||||
{
|
||||
address = "10.11.0.254";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.bridges.br0.interfaces = [ ];
|
||||
networking.interfaces.br0.ipv4.addresses = [
|
||||
{
|
||||
address = "10.11.0.254";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
|
||||
# Force /etc/hosts to be the only source for host name resolution
|
||||
environment.etc."nsswitch.conf".text = lib.mkForce ''
|
||||
hosts: files
|
||||
'';
|
||||
# Force /etc/hosts to be the only source for host name resolution
|
||||
environment.etc."nsswitch.conf".text = lib.mkForce ''
|
||||
hosts: files
|
||||
'';
|
||||
|
||||
containers.simple = {
|
||||
autoStart = true;
|
||||
privateNetwork = true;
|
||||
localAddress = "10.10.0.1";
|
||||
hostAddress = "10.10.0.254";
|
||||
containers.simple = {
|
||||
autoStart = true;
|
||||
privateNetwork = true;
|
||||
localAddress = "10.10.0.1";
|
||||
hostAddress = "10.10.0.254";
|
||||
|
||||
config = { };
|
||||
};
|
||||
|
||||
containers.netmask = {
|
||||
autoStart = true;
|
||||
privateNetwork = true;
|
||||
hostBridge = "br0";
|
||||
localAddress = "10.11.0.1/24";
|
||||
|
||||
config = { };
|
||||
};
|
||||
config = { };
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("default.target")
|
||||
containers.netmask = {
|
||||
autoStart = true;
|
||||
privateNetwork = true;
|
||||
hostBridge = "br0";
|
||||
localAddress = "10.11.0.1/24";
|
||||
|
||||
with subtest("Ping the containers using the entries added in /etc/hosts"):
|
||||
for host in "simple.containers", "netmask.containers":
|
||||
machine.succeed(f"ping -n -c 1 {host}")
|
||||
'';
|
||||
}
|
||||
)
|
||||
config = { };
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("default.target")
|
||||
|
||||
with subtest("Ping the containers using the entries added in /etc/hosts"):
|
||||
for host in "simple.containers", "netmask.containers":
|
||||
machine.succeed(f"ping -n -c 1 {host}")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,197 +1,195 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-imperative";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [
|
||||
aristid
|
||||
aszlig
|
||||
kampfschlaefer
|
||||
];
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-imperative";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [
|
||||
aristid
|
||||
aszlig
|
||||
kampfschlaefer
|
||||
];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [ ../modules/installer/cd-dvd/channel.nix ];
|
||||
|
||||
# XXX: Sandbox setup fails while trying to hardlink files from the host's
|
||||
# store file system into the prepared chroot directory.
|
||||
nix.settings.sandbox = false;
|
||||
nix.settings.substituters = [ ]; # don't try to access cache.nixos.org
|
||||
|
||||
virtualisation.memorySize = 2048;
|
||||
virtualisation.writableStore = true;
|
||||
# Make sure we always have all the required dependencies for creating a
|
||||
# container available within the VM, because we don't have network access.
|
||||
virtualisation.additionalPaths =
|
||||
let
|
||||
emptyContainer = import ../lib/eval-config.nix {
|
||||
modules = lib.singleton {
|
||||
nixpkgs = { inherit (config.nixpkgs) localSystem; };
|
||||
|
||||
containers.foo.config = { };
|
||||
};
|
||||
|
||||
# The system is inherited from the host above.
|
||||
# Set it to null, to remove the "legacy" entrypoint's non-hermetic default.
|
||||
system = null;
|
||||
};
|
||||
in
|
||||
with pkgs;
|
||||
[
|
||||
stdenv
|
||||
stdenvNoCC
|
||||
emptyContainer.config.containers.foo.path
|
||||
libxslt
|
||||
desktop-file-utils
|
||||
texinfo
|
||||
docbook5
|
||||
libxml2
|
||||
docbook_xsl_ns
|
||||
xorg.lndir
|
||||
documentation-highlighter
|
||||
perlPackages.ConfigIniFiles
|
||||
];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [ ../modules/installer/cd-dvd/channel.nix ];
|
||||
|
||||
# XXX: Sandbox setup fails while trying to hardlink files from the host's
|
||||
# store file system into the prepared chroot directory.
|
||||
nix.settings.sandbox = false;
|
||||
nix.settings.substituters = [ ]; # don't try to access cache.nixos.org
|
||||
|
||||
virtualisation.memorySize = 2048;
|
||||
virtualisation.writableStore = true;
|
||||
# Make sure we always have all the required dependencies for creating a
|
||||
# container available within the VM, because we don't have network access.
|
||||
virtualisation.additionalPaths =
|
||||
let
|
||||
emptyContainer = import ../lib/eval-config.nix {
|
||||
modules = lib.singleton {
|
||||
nixpkgs = { inherit (config.nixpkgs) localSystem; };
|
||||
|
||||
containers.foo.config = { };
|
||||
};
|
||||
|
||||
# The system is inherited from the host above.
|
||||
# Set it to null, to remove the "legacy" entrypoint's non-hermetic default.
|
||||
system = null;
|
||||
};
|
||||
in
|
||||
with pkgs;
|
||||
[
|
||||
stdenv
|
||||
stdenvNoCC
|
||||
emptyContainer.config.containers.foo.path
|
||||
libxslt
|
||||
desktop-file-utils
|
||||
texinfo
|
||||
docbook5
|
||||
libxml2
|
||||
docbook_xsl_ns
|
||||
xorg.lndir
|
||||
documentation-highlighter
|
||||
perlPackages.ConfigIniFiles
|
||||
];
|
||||
};
|
||||
|
||||
testScript =
|
||||
let
|
||||
tmpfilesContainerConfig = pkgs.writeText "container-config-tmpfiles" ''
|
||||
{
|
||||
systemd.tmpfiles.rules = [ "d /foo - - - - -" ];
|
||||
systemd.services.foo = {
|
||||
serviceConfig.Type = "oneshot";
|
||||
script = "ls -al /foo";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
||||
'';
|
||||
brokenCfg = pkgs.writeText "broken.nix" ''
|
||||
{
|
||||
assertions = [
|
||||
{ assertion = false;
|
||||
message = "I never evaluate";
|
||||
}
|
||||
];
|
||||
}
|
||||
'';
|
||||
in
|
||||
''
|
||||
with subtest("Make sure we have a NixOS tree (required by ‘nixos-container create’)"):
|
||||
machine.succeed("PAGER=cat nix-env -qa -A nixos.hello >&2")
|
||||
|
||||
id1, id2 = None, None
|
||||
|
||||
with subtest("Create some containers imperatively"):
|
||||
id1 = machine.succeed("nixos-container create foo --ensure-unique-name").rstrip()
|
||||
machine.log(f"created container {id1}")
|
||||
|
||||
id2 = machine.succeed("nixos-container create foo --ensure-unique-name").rstrip()
|
||||
machine.log(f"created container {id2}")
|
||||
|
||||
assert id1 != id2
|
||||
|
||||
with subtest(f"Put the root of {id2} into a bind mount"):
|
||||
machine.succeed(
|
||||
f"mv /var/lib/nixos-containers/{id2} /id2-bindmount",
|
||||
f"mount --bind /id2-bindmount /var/lib/nixos-containers/{id1}",
|
||||
)
|
||||
|
||||
ip1 = machine.succeed(f"nixos-container show-ip {id1}").rstrip()
|
||||
ip2 = machine.succeed(f"nixos-container show-ip {id2}").rstrip()
|
||||
assert ip1 != ip2
|
||||
|
||||
with subtest(
|
||||
"Create a directory and a file we can later check if it still exists "
|
||||
+ "after destruction of the container"
|
||||
):
|
||||
machine.succeed("mkdir /nested-bindmount")
|
||||
machine.succeed("echo important data > /nested-bindmount/dummy")
|
||||
|
||||
with subtest(
|
||||
"Create a directory with a dummy file and bind-mount it into both containers."
|
||||
):
|
||||
for id in id1, id2:
|
||||
important_path = f"/var/lib/nixos-containers/{id}/very/important/data"
|
||||
machine.succeed(
|
||||
f"mkdir -p {important_path}",
|
||||
f"mount --bind /nested-bindmount {important_path}",
|
||||
)
|
||||
|
||||
with subtest("Start one of them"):
|
||||
machine.succeed(f"nixos-container start {id1}")
|
||||
|
||||
with subtest("Execute commands via the root shell"):
|
||||
assert "Linux" in machine.succeed(f"nixos-container run {id1} -- uname")
|
||||
|
||||
with subtest("Execute a nix command via the root shell. (regression test for #40355)"):
|
||||
machine.succeed(
|
||||
f"nixos-container run {id1} -- nix-instantiate -E "
|
||||
+ '\'derivation { name = "empty"; builder = "false"; system = "false"; }\' '
|
||||
)
|
||||
|
||||
with subtest("Stop and start (regression test for #4989)"):
|
||||
machine.succeed(f"nixos-container stop {id1}")
|
||||
machine.succeed(f"nixos-container start {id1}")
|
||||
|
||||
# clear serial backlog for next tests
|
||||
machine.succeed("logger eat console backlog 3ea46eb2-7f82-4f70-b810-3f00e3dd4c4d")
|
||||
machine.wait_for_console_text(
|
||||
"eat console backlog 3ea46eb2-7f82-4f70-b810-3f00e3dd4c4d"
|
||||
)
|
||||
|
||||
with subtest("Stop a container early"):
|
||||
machine.succeed(f"nixos-container stop {id1}")
|
||||
machine.succeed(f"nixos-container start {id1} >&2 &")
|
||||
machine.wait_for_console_text("Stage 2")
|
||||
machine.succeed(f"nixos-container stop {id1}")
|
||||
machine.wait_for_console_text(f"Container {id1} exited successfully")
|
||||
machine.succeed(f"nixos-container start {id1}")
|
||||
|
||||
with subtest("Stop a container without machined (regression test for #109695)"):
|
||||
machine.systemctl("stop systemd-machined")
|
||||
machine.succeed(f"nixos-container stop {id1}")
|
||||
machine.wait_for_console_text(f"Container {id1} has been shut down")
|
||||
machine.succeed(f"nixos-container start {id1}")
|
||||
|
||||
with subtest("tmpfiles are present"):
|
||||
machine.log("creating container tmpfiles")
|
||||
machine.succeed(
|
||||
"nixos-container create tmpfiles --config-file ${tmpfilesContainerConfig}"
|
||||
)
|
||||
machine.log("created, starting…")
|
||||
machine.succeed("nixos-container start tmpfiles")
|
||||
machine.log("done starting, investigating…")
|
||||
machine.succeed(
|
||||
"echo $(nixos-container run tmpfiles -- systemctl is-active foo.service) | grep -q active;"
|
||||
)
|
||||
machine.succeed("nixos-container destroy tmpfiles")
|
||||
|
||||
with subtest("Execute commands via the root shell"):
|
||||
assert "Linux" in machine.succeed(f"nixos-container run {id1} -- uname")
|
||||
|
||||
with subtest("Destroy the containers"):
|
||||
for id in id1, id2:
|
||||
machine.succeed(f"nixos-container destroy {id}")
|
||||
|
||||
with subtest("Check whether destruction of any container has killed important data"):
|
||||
machine.succeed("grep -qF 'important data' /nested-bindmount/dummy")
|
||||
|
||||
with subtest("Ensure that the container path is gone"):
|
||||
print(machine.succeed("ls -lsa /var/lib/nixos-containers"))
|
||||
machine.succeed(f"test ! -e /var/lib/nixos-containers/{id1}")
|
||||
|
||||
with subtest("Ensure that a failed container creation doesn'leave any state"):
|
||||
machine.fail(
|
||||
"nixos-container create b0rk --config-file ${brokenCfg}"
|
||||
)
|
||||
machine.succeed("test ! -e /var/lib/nixos-containers/b0rk")
|
||||
testScript =
|
||||
let
|
||||
tmpfilesContainerConfig = pkgs.writeText "container-config-tmpfiles" ''
|
||||
{
|
||||
systemd.tmpfiles.rules = [ "d /foo - - - - -" ];
|
||||
systemd.services.foo = {
|
||||
serviceConfig.Type = "oneshot";
|
||||
script = "ls -al /foo";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
||||
'';
|
||||
}
|
||||
)
|
||||
brokenCfg = pkgs.writeText "broken.nix" ''
|
||||
{
|
||||
assertions = [
|
||||
{ assertion = false;
|
||||
message = "I never evaluate";
|
||||
}
|
||||
];
|
||||
}
|
||||
'';
|
||||
in
|
||||
''
|
||||
with subtest("Make sure we have a NixOS tree (required by ‘nixos-container create’)"):
|
||||
machine.succeed("PAGER=cat nix-env -qa -A nixos.hello >&2")
|
||||
|
||||
id1, id2 = None, None
|
||||
|
||||
with subtest("Create some containers imperatively"):
|
||||
id1 = machine.succeed("nixos-container create foo --ensure-unique-name").rstrip()
|
||||
machine.log(f"created container {id1}")
|
||||
|
||||
id2 = machine.succeed("nixos-container create foo --ensure-unique-name").rstrip()
|
||||
machine.log(f"created container {id2}")
|
||||
|
||||
assert id1 != id2
|
||||
|
||||
with subtest(f"Put the root of {id2} into a bind mount"):
|
||||
machine.succeed(
|
||||
f"mv /var/lib/nixos-containers/{id2} /id2-bindmount",
|
||||
f"mount --bind /id2-bindmount /var/lib/nixos-containers/{id1}",
|
||||
)
|
||||
|
||||
ip1 = machine.succeed(f"nixos-container show-ip {id1}").rstrip()
|
||||
ip2 = machine.succeed(f"nixos-container show-ip {id2}").rstrip()
|
||||
assert ip1 != ip2
|
||||
|
||||
with subtest(
|
||||
"Create a directory and a file we can later check if it still exists "
|
||||
+ "after destruction of the container"
|
||||
):
|
||||
machine.succeed("mkdir /nested-bindmount")
|
||||
machine.succeed("echo important data > /nested-bindmount/dummy")
|
||||
|
||||
with subtest(
|
||||
"Create a directory with a dummy file and bind-mount it into both containers."
|
||||
):
|
||||
for id in id1, id2:
|
||||
important_path = f"/var/lib/nixos-containers/{id}/very/important/data"
|
||||
machine.succeed(
|
||||
f"mkdir -p {important_path}",
|
||||
f"mount --bind /nested-bindmount {important_path}",
|
||||
)
|
||||
|
||||
with subtest("Start one of them"):
|
||||
machine.succeed(f"nixos-container start {id1}")
|
||||
|
||||
with subtest("Execute commands via the root shell"):
|
||||
assert "Linux" in machine.succeed(f"nixos-container run {id1} -- uname")
|
||||
|
||||
with subtest("Execute a nix command via the root shell. (regression test for #40355)"):
|
||||
machine.succeed(
|
||||
f"nixos-container run {id1} -- nix-instantiate -E "
|
||||
+ '\'derivation { name = "empty"; builder = "false"; system = "false"; }\' '
|
||||
)
|
||||
|
||||
with subtest("Stop and start (regression test for #4989)"):
|
||||
machine.succeed(f"nixos-container stop {id1}")
|
||||
machine.succeed(f"nixos-container start {id1}")
|
||||
|
||||
# clear serial backlog for next tests
|
||||
machine.succeed("logger eat console backlog 3ea46eb2-7f82-4f70-b810-3f00e3dd4c4d")
|
||||
machine.wait_for_console_text(
|
||||
"eat console backlog 3ea46eb2-7f82-4f70-b810-3f00e3dd4c4d"
|
||||
)
|
||||
|
||||
with subtest("Stop a container early"):
|
||||
machine.succeed(f"nixos-container stop {id1}")
|
||||
machine.succeed(f"nixos-container start {id1} >&2 &")
|
||||
machine.wait_for_console_text("Stage 2")
|
||||
machine.succeed(f"nixos-container stop {id1}")
|
||||
machine.wait_for_console_text(f"Container {id1} exited successfully")
|
||||
machine.succeed(f"nixos-container start {id1}")
|
||||
|
||||
with subtest("Stop a container without machined (regression test for #109695)"):
|
||||
machine.systemctl("stop systemd-machined")
|
||||
machine.succeed(f"nixos-container stop {id1}")
|
||||
machine.wait_for_console_text(f"Container {id1} has been shut down")
|
||||
machine.succeed(f"nixos-container start {id1}")
|
||||
|
||||
with subtest("tmpfiles are present"):
|
||||
machine.log("creating container tmpfiles")
|
||||
machine.succeed(
|
||||
"nixos-container create tmpfiles --config-file ${tmpfilesContainerConfig}"
|
||||
)
|
||||
machine.log("created, starting…")
|
||||
machine.succeed("nixos-container start tmpfiles")
|
||||
machine.log("done starting, investigating…")
|
||||
machine.succeed(
|
||||
"echo $(nixos-container run tmpfiles -- systemctl is-active foo.service) | grep -q active;"
|
||||
)
|
||||
machine.succeed("nixos-container destroy tmpfiles")
|
||||
|
||||
with subtest("Execute commands via the root shell"):
|
||||
assert "Linux" in machine.succeed(f"nixos-container run {id1} -- uname")
|
||||
|
||||
with subtest("Destroy the containers"):
|
||||
for id in id1, id2:
|
||||
machine.succeed(f"nixos-container destroy {id}")
|
||||
|
||||
with subtest("Check whether destruction of any container has killed important data"):
|
||||
machine.succeed("grep -qF 'important data' /nested-bindmount/dummy")
|
||||
|
||||
with subtest("Ensure that the container path is gone"):
|
||||
print(machine.succeed("ls -lsa /var/lib/nixos-containers"))
|
||||
machine.succeed(f"test ! -e /var/lib/nixos-containers/{id1}")
|
||||
|
||||
with subtest("Ensure that a failed container creation doesn'leave any state"):
|
||||
machine.fail(
|
||||
"nixos-container create b0rk --config-file ${brokenCfg}"
|
||||
)
|
||||
machine.succeed("test ! -e /var/lib/nixos-containers/b0rk")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -12,71 +12,69 @@ let
|
|||
};
|
||||
|
||||
in
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-ipv4-ipv6";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [
|
||||
aristid
|
||||
aszlig
|
||||
kampfschlaefer
|
||||
];
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-ipv4-ipv6";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [
|
||||
aristid
|
||||
aszlig
|
||||
kampfschlaefer
|
||||
];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
containers.webserver4 = webserverFor "10.231.136.1" "10.231.136.2";
|
||||
containers.webserver6 = webserverFor "fc00::2" "fc00::1";
|
||||
virtualisation.additionalPaths = [ pkgs.stdenv ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
containers.webserver4 = webserverFor "10.231.136.1" "10.231.136.2";
|
||||
containers.webserver6 = webserverFor "fc00::2" "fc00::1";
|
||||
virtualisation.additionalPaths = [ pkgs.stdenv ];
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
import time
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
import time
|
||||
|
||||
|
||||
def curl_host(ip):
|
||||
# put [] around ipv6 addresses for curl
|
||||
host = ip if ":" not in ip else f"[{ip}]"
|
||||
return f"curl --fail --connect-timeout 2 http://{host}/ > /dev/null"
|
||||
def curl_host(ip):
|
||||
# put [] around ipv6 addresses for curl
|
||||
host = ip if ":" not in ip else f"[{ip}]"
|
||||
return f"curl --fail --connect-timeout 2 http://{host}/ > /dev/null"
|
||||
|
||||
|
||||
def get_ip(container):
|
||||
# need to distinguish because show-ip won't work for ipv6
|
||||
if container == "webserver4":
|
||||
ip = machine.succeed(f"nixos-container show-ip {container}").rstrip()
|
||||
assert ip == "${nodes.machine.config.containers.webserver4.localAddress}"
|
||||
return ip
|
||||
return "${nodes.machine.config.containers.webserver6.localAddress}"
|
||||
def get_ip(container):
|
||||
# need to distinguish because show-ip won't work for ipv6
|
||||
if container == "webserver4":
|
||||
ip = machine.succeed(f"nixos-container show-ip {container}").rstrip()
|
||||
assert ip == "${nodes.machine.config.containers.webserver4.localAddress}"
|
||||
return ip
|
||||
return "${nodes.machine.config.containers.webserver6.localAddress}"
|
||||
|
||||
|
||||
for container in "webserver4", "webserver6":
|
||||
assert container in machine.succeed("nixos-container list")
|
||||
for container in "webserver4", "webserver6":
|
||||
assert container in machine.succeed("nixos-container list")
|
||||
|
||||
with subtest(f"Start container {container}"):
|
||||
machine.succeed(f"nixos-container start {container}")
|
||||
# wait 2s for container to start and network to be up
|
||||
time.sleep(2)
|
||||
with subtest(f"Start container {container}"):
|
||||
machine.succeed(f"nixos-container start {container}")
|
||||
# wait 2s for container to start and network to be up
|
||||
time.sleep(2)
|
||||
|
||||
# Since "start" returns after the container has reached
|
||||
# multi-user.target, we should now be able to access it.
|
||||
# Since "start" returns after the container has reached
|
||||
# multi-user.target, we should now be able to access it.
|
||||
|
||||
ip = get_ip(container)
|
||||
with subtest(f"{container} reacts to pings and HTTP requests"):
|
||||
machine.succeed(f"ping -n -c1 {ip}")
|
||||
machine.succeed(curl_host(ip))
|
||||
ip = get_ip(container)
|
||||
with subtest(f"{container} reacts to pings and HTTP requests"):
|
||||
machine.succeed(f"ping -n -c1 {ip}")
|
||||
machine.succeed(curl_host(ip))
|
||||
|
||||
with subtest(f"Stop container {container}"):
|
||||
machine.succeed(f"nixos-container stop {container}")
|
||||
machine.fail(curl_host(ip))
|
||||
with subtest(f"Stop container {container}"):
|
||||
machine.succeed(f"nixos-container stop {container}")
|
||||
machine.fail(curl_host(ip))
|
||||
|
||||
# Destroying a declarative container should fail.
|
||||
machine.fail(f"nixos-container destroy {container}")
|
||||
'';
|
||||
}
|
||||
)
|
||||
# Destroying a declarative container should fail.
|
||||
machine.fail(f"nixos-container destroy {container}")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -4,97 +4,95 @@ let
|
|||
containerIp2 = "192.168.1.254";
|
||||
in
|
||||
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-macvlans";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ montag451 ];
|
||||
};
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-macvlans";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ montag451 ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
nodes = {
|
||||
|
||||
machine1 =
|
||||
{ lib, ... }:
|
||||
{
|
||||
virtualisation.vlans = [ 1 ];
|
||||
machine1 =
|
||||
{ lib, ... }:
|
||||
{
|
||||
virtualisation.vlans = [ 1 ];
|
||||
|
||||
# To be able to ping containers from the host, it is necessary
|
||||
# to create a macvlan on the host on the VLAN 1 network.
|
||||
networking.macvlans.mv-eth1-host = {
|
||||
interface = "eth1";
|
||||
mode = "bridge";
|
||||
};
|
||||
networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ ];
|
||||
networking.interfaces.mv-eth1-host = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.1.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
# To be able to ping containers from the host, it is necessary
|
||||
# to create a macvlan on the host on the VLAN 1 network.
|
||||
networking.macvlans.mv-eth1-host = {
|
||||
interface = "eth1";
|
||||
mode = "bridge";
|
||||
};
|
||||
networking.interfaces.eth1.ipv4.addresses = lib.mkForce [ ];
|
||||
networking.interfaces.mv-eth1-host = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.1.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
containers.test1 = {
|
||||
autoStart = true;
|
||||
macvlans = [ "eth1" ];
|
||||
containers.test1 = {
|
||||
autoStart = true;
|
||||
macvlans = [ "eth1" ];
|
||||
|
||||
config = {
|
||||
networking.interfaces.mv-eth1 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = containerIp1;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
containers.test2 = {
|
||||
autoStart = true;
|
||||
macvlans = [ "eth1" ];
|
||||
|
||||
config = {
|
||||
networking.interfaces.mv-eth1 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = containerIp2;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
config = {
|
||||
networking.interfaces.mv-eth1 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = containerIp1;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
machine2 =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.vlans = [ 1 ];
|
||||
containers.test2 = {
|
||||
autoStart = true;
|
||||
macvlans = [ "eth1" ];
|
||||
|
||||
config = {
|
||||
networking.interfaces.mv-eth1 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = containerIp2;
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
machine2 =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.vlans = [ 1 ];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine1.wait_for_unit("default.target")
|
||||
machine2.wait_for_unit("default.target")
|
||||
};
|
||||
|
||||
with subtest(
|
||||
"Ping between containers to check that macvlans are created in bridge mode"
|
||||
):
|
||||
machine1.succeed("nixos-container run test1 -- ping -n -c 1 ${containerIp2}")
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine1.wait_for_unit("default.target")
|
||||
machine2.wait_for_unit("default.target")
|
||||
|
||||
with subtest("Ping containers from the host (machine1)"):
|
||||
machine1.succeed("ping -n -c 1 ${containerIp1}")
|
||||
machine1.succeed("ping -n -c 1 ${containerIp2}")
|
||||
with subtest(
|
||||
"Ping between containers to check that macvlans are created in bridge mode"
|
||||
):
|
||||
machine1.succeed("nixos-container run test1 -- ping -n -c 1 ${containerIp2}")
|
||||
|
||||
with subtest(
|
||||
"Ping containers from the second machine to check that containers are reachable from the outside"
|
||||
):
|
||||
machine2.succeed("ping -n -c 1 ${containerIp1}")
|
||||
machine2.succeed("ping -n -c 1 ${containerIp2}")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("Ping containers from the host (machine1)"):
|
||||
machine1.succeed("ping -n -c 1 ${containerIp1}")
|
||||
machine1.succeed("ping -n -c 1 ${containerIp2}")
|
||||
|
||||
with subtest(
|
||||
"Ping containers from the second machine to check that containers are reachable from the outside"
|
||||
):
|
||||
machine2.succeed("ping -n -c 1 ${containerIp1}")
|
||||
machine2.succeed("ping -n -c 1 ${containerIp2}")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,44 +1,42 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-names";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ patryk27 ];
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-names";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ patryk27 ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
# We're using the newest kernel, so that we can test containers with long names.
|
||||
# Please see https://github.com/NixOS/nixpkgs/issues/38509 for details.
|
||||
boot.kernelPackages = pkgs.linuxPackages_latest;
|
||||
|
||||
containers =
|
||||
let
|
||||
container = subnet: {
|
||||
autoStart = true;
|
||||
privateNetwork = true;
|
||||
hostAddress = "192.168.${subnet}.1";
|
||||
localAddress = "192.168.${subnet}.2";
|
||||
config = { };
|
||||
};
|
||||
|
||||
in
|
||||
{
|
||||
first = container "1";
|
||||
second = container "2";
|
||||
really-long-name = container "3";
|
||||
really-long-long-name-2 = container "4";
|
||||
};
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
# We're using the newest kernel, so that we can test containers with long names.
|
||||
# Please see https://github.com/NixOS/nixpkgs/issues/38509 for details.
|
||||
boot.kernelPackages = pkgs.linuxPackages_latest;
|
||||
testScript = ''
|
||||
machine.wait_for_unit("default.target")
|
||||
|
||||
containers =
|
||||
let
|
||||
container = subnet: {
|
||||
autoStart = true;
|
||||
privateNetwork = true;
|
||||
hostAddress = "192.168.${subnet}.1";
|
||||
localAddress = "192.168.${subnet}.2";
|
||||
config = { };
|
||||
};
|
||||
|
||||
in
|
||||
{
|
||||
first = container "1";
|
||||
second = container "2";
|
||||
really-long-name = container "3";
|
||||
really-long-long-name-2 = container "4";
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("default.target")
|
||||
|
||||
machine.succeed("ip link show | grep ve-first")
|
||||
machine.succeed("ip link show | grep ve-second")
|
||||
machine.succeed("ip link show | grep ve-really-lFYWO")
|
||||
machine.succeed("ip link show | grep ve-really-l3QgY")
|
||||
'';
|
||||
}
|
||||
)
|
||||
machine.succeed("ip link show | grep ve-first")
|
||||
machine.succeed("ip link show | grep ve-second")
|
||||
machine.succeed("ip link show | grep ve-really-lFYWO")
|
||||
machine.succeed("ip link show | grep ve-really-l3QgY")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,36 +1,34 @@
|
|||
# Test for NixOS' container nesting.
|
||||
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "nested";
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "nested";
|
||||
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ sorki ];
|
||||
};
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ sorki ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ lib, ... }:
|
||||
let
|
||||
makeNested = subConf: {
|
||||
containers.nested = {
|
||||
autoStart = true;
|
||||
privateNetwork = true;
|
||||
config = subConf;
|
||||
};
|
||||
nodes.machine =
|
||||
{ lib, ... }:
|
||||
let
|
||||
makeNested = subConf: {
|
||||
containers.nested = {
|
||||
autoStart = true;
|
||||
privateNetwork = true;
|
||||
config = subConf;
|
||||
};
|
||||
in
|
||||
makeNested (makeNested { });
|
||||
};
|
||||
in
|
||||
makeNested (makeNested { });
|
||||
|
||||
testScript = ''
|
||||
machine.start()
|
||||
machine.wait_for_unit("container@nested.service")
|
||||
machine.succeed("systemd-run --pty --machine=nested -- machinectl list | grep nested")
|
||||
print(
|
||||
machine.succeed(
|
||||
"systemd-run --pty --machine=nested -- systemd-run --pty --machine=nested -- systemctl status"
|
||||
)
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
testScript = ''
|
||||
machine.start()
|
||||
machine.wait_for_unit("container@nested.service")
|
||||
machine.succeed("systemd-run --pty --machine=nested -- machinectl list | grep nested")
|
||||
print(
|
||||
machine.succeed(
|
||||
"systemd-run --pty --machine=nested -- systemd-run --pty --machine=nested -- systemctl status"
|
||||
)
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,153 +1,151 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-physical_interfaces";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ kampfschlaefer ];
|
||||
};
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-physical_interfaces";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ kampfschlaefer ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
server =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.vlans = [ 1 ];
|
||||
nodes = {
|
||||
server =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.vlans = [ 1 ];
|
||||
|
||||
containers.server = {
|
||||
privateNetwork = true;
|
||||
interfaces = [ "eth1" ];
|
||||
containers.server = {
|
||||
privateNetwork = true;
|
||||
interfaces = [ "eth1" ];
|
||||
|
||||
config = {
|
||||
networking.interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = "10.10.0.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.firewall.enable = false;
|
||||
};
|
||||
config = {
|
||||
networking.interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = "10.10.0.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.firewall.enable = false;
|
||||
};
|
||||
};
|
||||
bridged =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.vlans = [ 1 ];
|
||||
};
|
||||
bridged =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.vlans = [ 1 ];
|
||||
|
||||
containers.bridged = {
|
||||
privateNetwork = true;
|
||||
interfaces = [ "eth1" ];
|
||||
containers.bridged = {
|
||||
privateNetwork = true;
|
||||
interfaces = [ "eth1" ];
|
||||
|
||||
config = {
|
||||
networking.bridges.br0.interfaces = [ "eth1" ];
|
||||
networking.interfaces.br0.ipv4.addresses = [
|
||||
{
|
||||
address = "10.10.0.2";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.firewall.enable = false;
|
||||
};
|
||||
config = {
|
||||
networking.bridges.br0.interfaces = [ "eth1" ];
|
||||
networking.interfaces.br0.ipv4.addresses = [
|
||||
{
|
||||
address = "10.10.0.2";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.firewall.enable = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
bonded =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.vlans = [ 1 ];
|
||||
bonded =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.vlans = [ 1 ];
|
||||
|
||||
containers.bonded = {
|
||||
privateNetwork = true;
|
||||
interfaces = [ "eth1" ];
|
||||
containers.bonded = {
|
||||
privateNetwork = true;
|
||||
interfaces = [ "eth1" ];
|
||||
|
||||
config = {
|
||||
networking.bonds.bond0 = {
|
||||
interfaces = [ "eth1" ];
|
||||
driverOptions.mode = "active-backup";
|
||||
};
|
||||
networking.interfaces.bond0.ipv4.addresses = [
|
||||
{
|
||||
address = "10.10.0.3";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.firewall.enable = false;
|
||||
config = {
|
||||
networking.bonds.bond0 = {
|
||||
interfaces = [ "eth1" ];
|
||||
driverOptions.mode = "active-backup";
|
||||
};
|
||||
networking.interfaces.bond0.ipv4.addresses = [
|
||||
{
|
||||
address = "10.10.0.3";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.firewall.enable = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
bridgedbond =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.vlans = [ 1 ];
|
||||
bridgedbond =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.vlans = [ 1 ];
|
||||
|
||||
containers.bridgedbond = {
|
||||
privateNetwork = true;
|
||||
interfaces = [ "eth1" ];
|
||||
containers.bridgedbond = {
|
||||
privateNetwork = true;
|
||||
interfaces = [ "eth1" ];
|
||||
|
||||
config = {
|
||||
networking.bonds.bond0 = {
|
||||
interfaces = [ "eth1" ];
|
||||
driverOptions.mode = "active-backup";
|
||||
};
|
||||
networking.bridges.br0.interfaces = [ "bond0" ];
|
||||
networking.interfaces.br0.ipv4.addresses = [
|
||||
{
|
||||
address = "10.10.0.4";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.firewall.enable = false;
|
||||
config = {
|
||||
networking.bonds.bond0 = {
|
||||
interfaces = [ "eth1" ];
|
||||
driverOptions.mode = "active-backup";
|
||||
};
|
||||
networking.bridges.br0.interfaces = [ "bond0" ];
|
||||
networking.interfaces.br0.ipv4.addresses = [
|
||||
{
|
||||
address = "10.10.0.4";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.firewall.enable = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
with subtest("Prepare server"):
|
||||
server.wait_for_unit("default.target")
|
||||
server.succeed("ip link show dev eth1 >&2")
|
||||
with subtest("Prepare server"):
|
||||
server.wait_for_unit("default.target")
|
||||
server.succeed("ip link show dev eth1 >&2")
|
||||
|
||||
with subtest("Simple physical interface is up"):
|
||||
server.succeed("nixos-container start server")
|
||||
server.wait_for_unit("container@server")
|
||||
server.succeed(
|
||||
"systemctl -M server list-dependencies network-addresses-eth1.service >&2"
|
||||
)
|
||||
with subtest("Simple physical interface is up"):
|
||||
server.succeed("nixos-container start server")
|
||||
server.wait_for_unit("container@server")
|
||||
server.succeed(
|
||||
"systemctl -M server list-dependencies network-addresses-eth1.service >&2"
|
||||
)
|
||||
|
||||
# The other tests will ping this container on its ip. Here we just check
|
||||
# that the device is present in the container.
|
||||
server.succeed("nixos-container run server -- ip a show dev eth1 >&2")
|
||||
# The other tests will ping this container on its ip. Here we just check
|
||||
# that the device is present in the container.
|
||||
server.succeed("nixos-container run server -- ip a show dev eth1 >&2")
|
||||
|
||||
with subtest("Physical device in bridge in container can ping server"):
|
||||
bridged.wait_for_unit("default.target")
|
||||
bridged.succeed("nixos-container start bridged")
|
||||
bridged.wait_for_unit("container@bridged")
|
||||
bridged.succeed(
|
||||
"systemctl -M bridged list-dependencies network-addresses-br0.service >&2",
|
||||
"systemctl -M bridged status -n 30 -l network-addresses-br0.service",
|
||||
"nixos-container run bridged -- ping -w 10 -c 1 -n 10.10.0.1",
|
||||
)
|
||||
with subtest("Physical device in bridge in container can ping server"):
|
||||
bridged.wait_for_unit("default.target")
|
||||
bridged.succeed("nixos-container start bridged")
|
||||
bridged.wait_for_unit("container@bridged")
|
||||
bridged.succeed(
|
||||
"systemctl -M bridged list-dependencies network-addresses-br0.service >&2",
|
||||
"systemctl -M bridged status -n 30 -l network-addresses-br0.service",
|
||||
"nixos-container run bridged -- ping -w 10 -c 1 -n 10.10.0.1",
|
||||
)
|
||||
|
||||
with subtest("Physical device in bond in container can ping server"):
|
||||
bonded.wait_for_unit("default.target")
|
||||
bonded.succeed("nixos-container start bonded")
|
||||
bonded.wait_for_unit("container@bonded")
|
||||
bonded.succeed(
|
||||
"systemctl -M bonded list-dependencies network-addresses-bond0 >&2",
|
||||
"systemctl -M bonded status -n 30 -l network-addresses-bond0 >&2",
|
||||
"nixos-container run bonded -- ping -w 10 -c 1 -n 10.10.0.1",
|
||||
)
|
||||
with subtest("Physical device in bond in container can ping server"):
|
||||
bonded.wait_for_unit("default.target")
|
||||
bonded.succeed("nixos-container start bonded")
|
||||
bonded.wait_for_unit("container@bonded")
|
||||
bonded.succeed(
|
||||
"systemctl -M bonded list-dependencies network-addresses-bond0 >&2",
|
||||
"systemctl -M bonded status -n 30 -l network-addresses-bond0 >&2",
|
||||
"nixos-container run bonded -- ping -w 10 -c 1 -n 10.10.0.1",
|
||||
)
|
||||
|
||||
with subtest("Physical device in bond in bridge in container can ping server"):
|
||||
bridgedbond.wait_for_unit("default.target")
|
||||
bridgedbond.succeed("nixos-container start bridgedbond")
|
||||
bridgedbond.wait_for_unit("container@bridgedbond")
|
||||
bridgedbond.succeed(
|
||||
"systemctl -M bridgedbond list-dependencies network-addresses-br0.service >&2",
|
||||
"systemctl -M bridgedbond status -n 30 -l network-addresses-br0.service",
|
||||
"nixos-container run bridgedbond -- ping -w 10 -c 1 -n 10.10.0.1",
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("Physical device in bond in bridge in container can ping server"):
|
||||
bridgedbond.wait_for_unit("default.target")
|
||||
bridgedbond.succeed("nixos-container start bridgedbond")
|
||||
bridgedbond.wait_for_unit("container@bridgedbond")
|
||||
bridgedbond.succeed(
|
||||
"systemctl -M bridgedbond list-dependencies network-addresses-br0.service >&2",
|
||||
"systemctl -M bridgedbond status -n 30 -l network-addresses-br0.service",
|
||||
"nixos-container run bridgedbond -- ping -w 10 -c 1 -n 10.10.0.1",
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -5,69 +5,67 @@ let
|
|||
containerPort = 80;
|
||||
in
|
||||
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-portforward";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [
|
||||
aristid
|
||||
aszlig
|
||||
kampfschlaefer
|
||||
ianwookim
|
||||
];
|
||||
};
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-portforward";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [
|
||||
aristid
|
||||
aszlig
|
||||
kampfschlaefer
|
||||
ianwookim
|
||||
];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ ../modules/installer/cd-dvd/channel.nix ];
|
||||
virtualisation.writableStore = true;
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ ../modules/installer/cd-dvd/channel.nix ];
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
containers.webserver = {
|
||||
privateNetwork = true;
|
||||
hostAddress = hostIp;
|
||||
localAddress = containerIp;
|
||||
forwardPorts = [
|
||||
{
|
||||
protocol = "tcp";
|
||||
hostPort = hostPort;
|
||||
containerPort = containerPort;
|
||||
}
|
||||
];
|
||||
config = {
|
||||
services.httpd.enable = true;
|
||||
services.httpd.adminAddr = "foo@example.org";
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
containers.webserver = {
|
||||
privateNetwork = true;
|
||||
hostAddress = hostIp;
|
||||
localAddress = containerIp;
|
||||
forwardPorts = [
|
||||
{
|
||||
protocol = "tcp";
|
||||
hostPort = hostPort;
|
||||
containerPort = containerPort;
|
||||
}
|
||||
];
|
||||
config = {
|
||||
services.httpd.enable = true;
|
||||
services.httpd.adminAddr = "foo@example.org";
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
|
||||
virtualisation.additionalPaths = [ pkgs.stdenv ];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
container_list = machine.succeed("nixos-container list")
|
||||
assert "webserver" in container_list
|
||||
virtualisation.additionalPaths = [ pkgs.stdenv ];
|
||||
};
|
||||
|
||||
# Start the webserver container.
|
||||
machine.succeed("nixos-container start webserver")
|
||||
testScript = ''
|
||||
container_list = machine.succeed("nixos-container list")
|
||||
assert "webserver" in container_list
|
||||
|
||||
# wait two seconds for the container to start and the network to be up
|
||||
machine.sleep(2)
|
||||
# Start the webserver container.
|
||||
machine.succeed("nixos-container start webserver")
|
||||
|
||||
# Since "start" returns after the container has reached
|
||||
# multi-user.target, we should now be able to access it.
|
||||
# ip = machine.succeed("nixos-container show-ip webserver").strip()
|
||||
machine.succeed("ping -n -c1 ${hostIp}")
|
||||
machine.succeed("curl --fail http://${hostIp}:${toString hostPort}/ > /dev/null")
|
||||
# wait two seconds for the container to start and the network to be up
|
||||
machine.sleep(2)
|
||||
|
||||
# Stop the container.
|
||||
machine.succeed("nixos-container stop webserver")
|
||||
machine.fail("curl --fail --connect-timeout 2 http://${hostIp}:${toString hostPort}/ > /dev/null")
|
||||
# Since "start" returns after the container has reached
|
||||
# multi-user.target, we should now be able to access it.
|
||||
# ip = machine.succeed("nixos-container show-ip webserver").strip()
|
||||
machine.succeed("ping -n -c1 ${hostIp}")
|
||||
machine.succeed("curl --fail http://${hostIp}:${toString hostPort}/ > /dev/null")
|
||||
|
||||
# Destroying a declarative container should fail.
|
||||
machine.fail("nixos-container destroy webserver")
|
||||
'';
|
||||
# Stop the container.
|
||||
machine.succeed("nixos-container stop webserver")
|
||||
machine.fail("curl --fail --connect-timeout 2 http://${hostIp}:${toString hostPort}/ > /dev/null")
|
||||
|
||||
}
|
||||
)
|
||||
# Destroying a declarative container should fail.
|
||||
machine.fail("nixos-container destroy webserver")
|
||||
'';
|
||||
|
||||
}
|
||||
|
|
|
@ -1,61 +1,59 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-reloadable";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ danbst ];
|
||||
};
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-reloadable";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ danbst ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
machine =
|
||||
{ lib, ... }:
|
||||
{
|
||||
containers.test1 = {
|
||||
autoStart = true;
|
||||
config.environment.etc.check.text = "client_base";
|
||||
};
|
||||
nodes = {
|
||||
machine =
|
||||
{ lib, ... }:
|
||||
{
|
||||
containers.test1 = {
|
||||
autoStart = true;
|
||||
config.environment.etc.check.text = "client_base";
|
||||
};
|
||||
|
||||
# prevent make-test-python.nix to change IP
|
||||
networking.interfaces.eth1.ipv4.addresses = lib.mkOverride 0 [ ];
|
||||
# prevent make-test-python.nix to change IP
|
||||
networking.interfaces.eth1.ipv4.addresses = lib.mkOverride 0 [ ];
|
||||
|
||||
specialisation.c1.configuration = {
|
||||
containers.test1.config = {
|
||||
environment.etc.check.text = lib.mkForce "client_c1";
|
||||
services.httpd.enable = true;
|
||||
services.httpd.adminAddr = "nixos@example.com";
|
||||
};
|
||||
};
|
||||
|
||||
specialisation.c2.configuration = {
|
||||
containers.test1.config = {
|
||||
environment.etc.check.text = lib.mkForce "client_c2";
|
||||
services.nginx.enable = true;
|
||||
};
|
||||
specialisation.c1.configuration = {
|
||||
containers.test1.config = {
|
||||
environment.etc.check.text = lib.mkForce "client_c1";
|
||||
services.httpd.enable = true;
|
||||
services.httpd.adminAddr = "nixos@example.com";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.start()
|
||||
machine.wait_for_unit("default.target")
|
||||
specialisation.c2.configuration = {
|
||||
containers.test1.config = {
|
||||
environment.etc.check.text = lib.mkForce "client_c2";
|
||||
services.nginx.enable = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
assert "client_base" in machine.succeed("nixos-container run test1 cat /etc/check")
|
||||
testScript = ''
|
||||
machine.start()
|
||||
machine.wait_for_unit("default.target")
|
||||
|
||||
with subtest("httpd is available after activating config1"):
|
||||
machine.succeed(
|
||||
"/run/booted-system/specialisation/c1/bin/switch-to-configuration test >&2",
|
||||
"[[ $(nixos-container run test1 cat /etc/check) == client_c1 ]] >&2",
|
||||
"systemctl status httpd -M test1 >&2",
|
||||
)
|
||||
assert "client_base" in machine.succeed("nixos-container run test1 cat /etc/check")
|
||||
|
||||
with subtest("httpd is not available any longer after switching to config2"):
|
||||
machine.succeed(
|
||||
"/run/booted-system/specialisation/c2/bin/switch-to-configuration test >&2",
|
||||
"[[ $(nixos-container run test1 cat /etc/check) == client_c2 ]] >&2",
|
||||
"systemctl status nginx -M test1 >&2",
|
||||
)
|
||||
machine.fail("systemctl status httpd -M test1 >&2")
|
||||
'';
|
||||
with subtest("httpd is available after activating config1"):
|
||||
machine.succeed(
|
||||
"/run/booted-system/specialisation/c1/bin/switch-to-configuration test >&2",
|
||||
"[[ $(nixos-container run test1 cat /etc/check) == client_c1 ]] >&2",
|
||||
"systemctl status httpd -M test1 >&2",
|
||||
)
|
||||
|
||||
}
|
||||
)
|
||||
with subtest("httpd is not available any longer after switching to config2"):
|
||||
machine.succeed(
|
||||
"/run/booted-system/specialisation/c2/bin/switch-to-configuration test >&2",
|
||||
"[[ $(nixos-container run test1 cat /etc/check) == client_c2 ]] >&2",
|
||||
"systemctl status nginx -M test1 >&2",
|
||||
)
|
||||
machine.fail("systemctl status httpd -M test1 >&2")
|
||||
'';
|
||||
|
||||
}
|
||||
|
|
|
@ -1,40 +1,38 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "containers-require-bind-mounts";
|
||||
meta.maintainers = with lib.maintainers; [ kira-bruneau ];
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "containers-require-bind-mounts";
|
||||
meta.maintainers = with lib.maintainers; [ kira-bruneau ];
|
||||
|
||||
nodes.machine = {
|
||||
containers.require-bind-mounts = {
|
||||
bindMounts = {
|
||||
"/srv/data" = { };
|
||||
};
|
||||
config = { };
|
||||
};
|
||||
|
||||
virtualisation.fileSystems = {
|
||||
"/srv/data" = {
|
||||
fsType = "tmpfs";
|
||||
options = [ "noauto" ];
|
||||
};
|
||||
nodes.machine = {
|
||||
containers.require-bind-mounts = {
|
||||
bindMounts = {
|
||||
"/srv/data" = { };
|
||||
};
|
||||
config = { };
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("default.target")
|
||||
virtualisation.fileSystems = {
|
||||
"/srv/data" = {
|
||||
fsType = "tmpfs";
|
||||
options = [ "noauto" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
assert "require-bind-mounts" in machine.succeed("nixos-container list")
|
||||
testScript = ''
|
||||
machine.wait_for_unit("default.target")
|
||||
|
||||
assert "require-bind-mounts" in machine.succeed("nixos-container list")
|
||||
assert "down" in machine.succeed("nixos-container status require-bind-mounts")
|
||||
assert "inactive" in machine.fail("systemctl is-active srv-data.mount")
|
||||
|
||||
with subtest("bind mount host paths must be mounted to run container"):
|
||||
machine.succeed("nixos-container start require-bind-mounts")
|
||||
assert "up" in machine.succeed("nixos-container status require-bind-mounts")
|
||||
assert "active" in machine.succeed("systemctl status srv-data.mount")
|
||||
|
||||
machine.succeed("systemctl stop srv-data.mount")
|
||||
assert "down" in machine.succeed("nixos-container status require-bind-mounts")
|
||||
assert "inactive" in machine.fail("systemctl is-active srv-data.mount")
|
||||
|
||||
with subtest("bind mount host paths must be mounted to run container"):
|
||||
machine.succeed("nixos-container start require-bind-mounts")
|
||||
assert "up" in machine.succeed("nixos-container status require-bind-mounts")
|
||||
assert "active" in machine.succeed("systemctl status srv-data.mount")
|
||||
|
||||
machine.succeed("systemctl stop srv-data.mount")
|
||||
assert "down" in machine.succeed("nixos-container status require-bind-mounts")
|
||||
assert "inactive" in machine.fail("systemctl is-active srv-data.mount")
|
||||
'';
|
||||
}
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,131 +1,129 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-restart_networking";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ kampfschlaefer ];
|
||||
};
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-restart_networking";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ kampfschlaefer ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
client = {
|
||||
virtualisation.vlans = [ 1 ];
|
||||
nodes = {
|
||||
client = {
|
||||
virtualisation.vlans = [ 1 ];
|
||||
|
||||
networking.firewall.enable = false;
|
||||
networking.firewall.enable = false;
|
||||
|
||||
containers.webserver = {
|
||||
autoStart = true;
|
||||
privateNetwork = true;
|
||||
hostBridge = "br0";
|
||||
config = {
|
||||
networking.firewall.enable = false;
|
||||
networking.interfaces.eth0.ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.1.122";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
containers.webserver = {
|
||||
autoStart = true;
|
||||
privateNetwork = true;
|
||||
hostBridge = "br0";
|
||||
config = {
|
||||
networking.firewall.enable = false;
|
||||
networking.interfaces.eth0.ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.1.122";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
networking.bridges.br0 = {
|
||||
interfaces = [ ];
|
||||
rstp = false;
|
||||
};
|
||||
|
||||
networking.interfaces.br0.ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.1.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
|
||||
networking.interfaces.eth1 = {
|
||||
ipv4.addresses = lib.mkForce [ ];
|
||||
ipv6.addresses = lib.mkForce [ ];
|
||||
};
|
||||
|
||||
specialisation.eth1.configuration = {
|
||||
networking.bridges.br0.interfaces = [ "eth1" ];
|
||||
networking.interfaces = {
|
||||
eth1.ipv4.addresses = lib.mkForce [ ];
|
||||
eth1.ipv6.addresses = lib.mkForce [ ];
|
||||
br0.ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.1.2";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
specialisation.eth1-rstp.configuration = {
|
||||
networking.bridges.br0 = {
|
||||
interfaces = [ ];
|
||||
rstp = false;
|
||||
interfaces = [ "eth1" ];
|
||||
rstp = lib.mkForce true;
|
||||
};
|
||||
|
||||
networking.interfaces.br0.ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.1.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
|
||||
networking.interfaces.eth1 = {
|
||||
ipv4.addresses = lib.mkForce [ ];
|
||||
ipv6.addresses = lib.mkForce [ ];
|
||||
};
|
||||
|
||||
specialisation.eth1.configuration = {
|
||||
networking.bridges.br0.interfaces = [ "eth1" ];
|
||||
networking.interfaces = {
|
||||
eth1.ipv4.addresses = lib.mkForce [ ];
|
||||
eth1.ipv6.addresses = lib.mkForce [ ];
|
||||
br0.ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.1.2";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
specialisation.eth1-rstp.configuration = {
|
||||
networking.bridges.br0 = {
|
||||
interfaces = [ "eth1" ];
|
||||
rstp = lib.mkForce true;
|
||||
};
|
||||
|
||||
networking.interfaces = {
|
||||
eth1.ipv4.addresses = lib.mkForce [ ];
|
||||
eth1.ipv6.addresses = lib.mkForce [ ];
|
||||
br0.ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.1.2";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
networking.interfaces = {
|
||||
eth1.ipv4.addresses = lib.mkForce [ ];
|
||||
eth1.ipv6.addresses = lib.mkForce [ ];
|
||||
br0.ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.1.2";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
client.start()
|
||||
testScript = ''
|
||||
client.start()
|
||||
|
||||
client.wait_for_unit("default.target")
|
||||
client.wait_for_unit("default.target")
|
||||
|
||||
with subtest("Initial configuration connectivity check"):
|
||||
client.succeed("ping 192.168.1.122 -c 1 -n >&2")
|
||||
client.succeed("nixos-container run webserver -- ping -c 1 -n 192.168.1.1 >&2")
|
||||
with subtest("Initial configuration connectivity check"):
|
||||
client.succeed("ping 192.168.1.122 -c 1 -n >&2")
|
||||
client.succeed("nixos-container run webserver -- ping -c 1 -n 192.168.1.1 >&2")
|
||||
|
||||
client.fail("ip l show eth1 |grep 'master br0' >&2")
|
||||
client.fail("grep eth1 /run/br0.interfaces >&2")
|
||||
client.fail("ip l show eth1 |grep 'master br0' >&2")
|
||||
client.fail("grep eth1 /run/br0.interfaces >&2")
|
||||
|
||||
with subtest("Bridged configuration without STP preserves connectivity"):
|
||||
client.succeed(
|
||||
"/run/booted-system/specialisation/eth1/bin/switch-to-configuration test >&2"
|
||||
)
|
||||
with subtest("Bridged configuration without STP preserves connectivity"):
|
||||
client.succeed(
|
||||
"/run/booted-system/specialisation/eth1/bin/switch-to-configuration test >&2"
|
||||
)
|
||||
|
||||
client.succeed(
|
||||
"ping 192.168.1.122 -c 1 -n >&2",
|
||||
"nixos-container run webserver -- ping -c 1 -n 192.168.1.2 >&2",
|
||||
"ip l show eth1 |grep 'master br0' >&2",
|
||||
"grep eth1 /run/br0.interfaces >&2",
|
||||
)
|
||||
client.succeed(
|
||||
"ping 192.168.1.122 -c 1 -n >&2",
|
||||
"nixos-container run webserver -- ping -c 1 -n 192.168.1.2 >&2",
|
||||
"ip l show eth1 |grep 'master br0' >&2",
|
||||
"grep eth1 /run/br0.interfaces >&2",
|
||||
)
|
||||
|
||||
# activating rstp needs another service, therefore the bridge will restart and the container will lose its connectivity
|
||||
# with subtest("Bridged configuration with STP"):
|
||||
# client.succeed("/run/booted-system/specialisation/eth1-rstp/bin/switch-to-configuration test >&2")
|
||||
# client.execute("ip -4 a >&2")
|
||||
# client.execute("ip l >&2")
|
||||
#
|
||||
# client.succeed(
|
||||
# "ping 192.168.1.122 -c 1 -n >&2",
|
||||
# "nixos-container run webserver -- ping -c 1 -n 192.168.1.2 >&2",
|
||||
# "ip l show eth1 |grep 'master br0' >&2",
|
||||
# "grep eth1 /run/br0.interfaces >&2",
|
||||
# )
|
||||
# activating rstp needs another service, therefore the bridge will restart and the container will lose its connectivity
|
||||
# with subtest("Bridged configuration with STP"):
|
||||
# client.succeed("/run/booted-system/specialisation/eth1-rstp/bin/switch-to-configuration test >&2")
|
||||
# client.execute("ip -4 a >&2")
|
||||
# client.execute("ip l >&2")
|
||||
#
|
||||
# client.succeed(
|
||||
# "ping 192.168.1.122 -c 1 -n >&2",
|
||||
# "nixos-container run webserver -- ping -c 1 -n 192.168.1.2 >&2",
|
||||
# "ip l show eth1 |grep 'master br0' >&2",
|
||||
# "grep eth1 /run/br0.interfaces >&2",
|
||||
# )
|
||||
|
||||
with subtest("Reverting to initial configuration preserves connectivity"):
|
||||
client.succeed(
|
||||
"/run/booted-system/bin/switch-to-configuration test >&2"
|
||||
)
|
||||
with subtest("Reverting to initial configuration preserves connectivity"):
|
||||
client.succeed(
|
||||
"/run/booted-system/bin/switch-to-configuration test >&2"
|
||||
)
|
||||
|
||||
client.succeed("ping 192.168.1.122 -c 1 -n >&2")
|
||||
client.succeed("nixos-container run webserver -- ping -c 1 -n 192.168.1.1 >&2")
|
||||
client.succeed("ping 192.168.1.122 -c 1 -n >&2")
|
||||
client.succeed("nixos-container run webserver -- ping -c 1 -n 192.168.1.1 >&2")
|
||||
|
||||
client.fail("ip l show eth1 |grep 'master br0' >&2")
|
||||
client.fail("grep eth1 /run/br0.interfaces >&2")
|
||||
'';
|
||||
client.fail("ip l show eth1 |grep 'master br0' >&2")
|
||||
client.fail("grep eth1 /run/br0.interfaces >&2")
|
||||
'';
|
||||
|
||||
}
|
||||
)
|
||||
}
|
||||
|
|
|
@ -1,93 +1,91 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-tmpfs";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ patryk27 ];
|
||||
};
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-tmpfs";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ patryk27 ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ ../modules/installer/cd-dvd/channel.nix ];
|
||||
virtualisation.writableStore = true;
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ ../modules/installer/cd-dvd/channel.nix ];
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
containers.tmpfs = {
|
||||
autoStart = true;
|
||||
tmpfs = [
|
||||
# Mount var as a tmpfs
|
||||
"/var"
|
||||
containers.tmpfs = {
|
||||
autoStart = true;
|
||||
tmpfs = [
|
||||
# Mount var as a tmpfs
|
||||
"/var"
|
||||
|
||||
# Add a nested mount inside a tmpfs
|
||||
"/var/log"
|
||||
# Add a nested mount inside a tmpfs
|
||||
"/var/log"
|
||||
|
||||
# Add a tmpfs on a path that does not exist
|
||||
"/some/random/path"
|
||||
];
|
||||
config = { };
|
||||
};
|
||||
|
||||
virtualisation.additionalPaths = [ pkgs.stdenv ];
|
||||
# Add a tmpfs on a path that does not exist
|
||||
"/some/random/path"
|
||||
];
|
||||
config = { };
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("default.target")
|
||||
assert "tmpfs" in machine.succeed("nixos-container list")
|
||||
virtualisation.additionalPaths = [ pkgs.stdenv ];
|
||||
};
|
||||
|
||||
with subtest("tmpfs container is up"):
|
||||
assert "up" in machine.succeed("nixos-container status tmpfs")
|
||||
testScript = ''
|
||||
machine.wait_for_unit("default.target")
|
||||
assert "tmpfs" in machine.succeed("nixos-container list")
|
||||
|
||||
with subtest("tmpfs container is up"):
|
||||
assert "up" in machine.succeed("nixos-container status tmpfs")
|
||||
|
||||
|
||||
def tmpfs_cmd(command):
|
||||
return f"nixos-container run tmpfs -- {command} 2>/dev/null"
|
||||
def tmpfs_cmd(command):
|
||||
return f"nixos-container run tmpfs -- {command} 2>/dev/null"
|
||||
|
||||
|
||||
with subtest("/var is mounted as a tmpfs"):
|
||||
machine.succeed(tmpfs_cmd("mountpoint -q /var"))
|
||||
with subtest("/var is mounted as a tmpfs"):
|
||||
machine.succeed(tmpfs_cmd("mountpoint -q /var"))
|
||||
|
||||
with subtest("/var/log is mounted as a tmpfs"):
|
||||
assert "What: tmpfs" in machine.succeed(
|
||||
tmpfs_cmd("systemctl status var-log.mount --no-pager")
|
||||
)
|
||||
machine.succeed(tmpfs_cmd("mountpoint -q /var/log"))
|
||||
with subtest("/var/log is mounted as a tmpfs"):
|
||||
assert "What: tmpfs" in machine.succeed(
|
||||
tmpfs_cmd("systemctl status var-log.mount --no-pager")
|
||||
)
|
||||
machine.succeed(tmpfs_cmd("mountpoint -q /var/log"))
|
||||
|
||||
with subtest("/some/random/path is mounted as a tmpfs"):
|
||||
assert "What: tmpfs" in machine.succeed(
|
||||
tmpfs_cmd("systemctl status some-random-path.mount --no-pager")
|
||||
)
|
||||
machine.succeed(tmpfs_cmd("mountpoint -q /some/random/path"))
|
||||
with subtest("/some/random/path is mounted as a tmpfs"):
|
||||
assert "What: tmpfs" in machine.succeed(
|
||||
tmpfs_cmd("systemctl status some-random-path.mount --no-pager")
|
||||
)
|
||||
machine.succeed(tmpfs_cmd("mountpoint -q /some/random/path"))
|
||||
|
||||
with subtest(
|
||||
"files created in the container in a non-tmpfs directory are visible on the host."
|
||||
):
|
||||
# This establishes legitimacy for the following tests
|
||||
machine.succeed(
|
||||
tmpfs_cmd("touch /root/test.file"),
|
||||
tmpfs_cmd("ls -l /root | grep -q test.file"),
|
||||
"test -e /var/lib/nixos-containers/tmpfs/root/test.file",
|
||||
)
|
||||
with subtest(
|
||||
"files created in the container in a non-tmpfs directory are visible on the host."
|
||||
):
|
||||
# This establishes legitimacy for the following tests
|
||||
machine.succeed(
|
||||
tmpfs_cmd("touch /root/test.file"),
|
||||
tmpfs_cmd("ls -l /root | grep -q test.file"),
|
||||
"test -e /var/lib/nixos-containers/tmpfs/root/test.file",
|
||||
)
|
||||
|
||||
with subtest(
|
||||
"/some/random/path is writable and that files created there are not "
|
||||
+ "in the hosts container dir but in the tmpfs"
|
||||
):
|
||||
machine.succeed(
|
||||
tmpfs_cmd("touch /some/random/path/test.file"),
|
||||
tmpfs_cmd("test -e /some/random/path/test.file"),
|
||||
)
|
||||
machine.fail("test -e /var/lib/nixos-containers/tmpfs/some/random/path/test.file")
|
||||
with subtest(
|
||||
"/some/random/path is writable and that files created there are not "
|
||||
+ "in the hosts container dir but in the tmpfs"
|
||||
):
|
||||
machine.succeed(
|
||||
tmpfs_cmd("touch /some/random/path/test.file"),
|
||||
tmpfs_cmd("test -e /some/random/path/test.file"),
|
||||
)
|
||||
machine.fail("test -e /var/lib/nixos-containers/tmpfs/some/random/path/test.file")
|
||||
|
||||
with subtest(
|
||||
"files created in the hosts container dir in a path where a tmpfs "
|
||||
+ "file system has been mounted are not visible to the container as "
|
||||
+ "the do not exist in the tmpfs"
|
||||
):
|
||||
machine.succeed(
|
||||
"touch /var/lib/nixos-containers/tmpfs/var/test.file",
|
||||
"test -e /var/lib/nixos-containers/tmpfs/var/test.file",
|
||||
"ls -l /var/lib/nixos-containers/tmpfs/var/ | grep -q test.file 2>/dev/null",
|
||||
)
|
||||
machine.fail(tmpfs_cmd("ls -l /var | grep -q test.file"))
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest(
|
||||
"files created in the hosts container dir in a path where a tmpfs "
|
||||
+ "file system has been mounted are not visible to the container as "
|
||||
+ "the do not exist in the tmpfs"
|
||||
):
|
||||
machine.succeed(
|
||||
"touch /var/lib/nixos-containers/tmpfs/var/test.file",
|
||||
"test -e /var/lib/nixos-containers/tmpfs/var/test.file",
|
||||
"ls -l /var/lib/nixos-containers/tmpfs/var/ | grep -q test.file 2>/dev/null",
|
||||
)
|
||||
machine.fail(tmpfs_cmd("ls -l /var | grep -q test.file"))
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,26 +1,24 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-unified-hierarchy";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ farnoy ];
|
||||
};
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "containers-unified-hierarchy";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ farnoy ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
containers = {
|
||||
test-container = {
|
||||
autoStart = true;
|
||||
config = { };
|
||||
};
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
containers = {
|
||||
test-container = {
|
||||
autoStart = true;
|
||||
config = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("default.target")
|
||||
testScript = ''
|
||||
machine.wait_for_unit("default.target")
|
||||
|
||||
machine.succeed("echo 'stat -fc %T /sys/fs/cgroup/ | grep cgroup2fs' | nixos-container root-login test-container")
|
||||
'';
|
||||
}
|
||||
)
|
||||
machine.succeed("echo 'stat -fc %T /sys/fs/cgroup/ | grep cgroup2fs' | nixos-container root-login test-container")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,28 +1,26 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, pkgs, ... }:
|
||||
{ lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
port = 3333;
|
||||
in
|
||||
{
|
||||
name = "convos";
|
||||
meta.maintainers = with lib.maintainers; [ sgo ];
|
||||
let
|
||||
port = 3333;
|
||||
in
|
||||
{
|
||||
name = "convos";
|
||||
meta.maintainers = with lib.maintainers; [ sgo ];
|
||||
|
||||
nodes = {
|
||||
machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.convos = {
|
||||
enable = true;
|
||||
listenPort = port;
|
||||
};
|
||||
nodes = {
|
||||
machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.convos = {
|
||||
enable = true;
|
||||
listenPort = port;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("convos")
|
||||
machine.wait_for_open_port(${toString port})
|
||||
machine.succeed("curl -f http://localhost:${toString port}/")
|
||||
'';
|
||||
}
|
||||
)
|
||||
testScript = ''
|
||||
machine.wait_for_unit("convos")
|
||||
machine.wait_for_open_port(${toString port})
|
||||
machine.succeed("curl -f http://localhost:${toString port}/")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,38 +1,36 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "coturn";
|
||||
nodes = {
|
||||
default = {
|
||||
services.coturn.enable = true;
|
||||
};
|
||||
secretsfile = {
|
||||
boot.postBootCommands = ''
|
||||
echo "some-very-secret-string" > /run/coturn-secret
|
||||
'';
|
||||
services.coturn = {
|
||||
enable = true;
|
||||
static-auth-secret-file = "/run/coturn-secret";
|
||||
};
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "coturn";
|
||||
nodes = {
|
||||
default = {
|
||||
services.coturn.enable = true;
|
||||
};
|
||||
secretsfile = {
|
||||
boot.postBootCommands = ''
|
||||
echo "some-very-secret-string" > /run/coturn-secret
|
||||
'';
|
||||
services.coturn = {
|
||||
enable = true;
|
||||
static-auth-secret-file = "/run/coturn-secret";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
with subtest("by default works without configuration"):
|
||||
default.wait_for_unit("coturn.service")
|
||||
with subtest("by default works without configuration"):
|
||||
default.wait_for_unit("coturn.service")
|
||||
|
||||
with subtest("works with static-auth-secret-file"):
|
||||
secretsfile.wait_for_unit("coturn.service")
|
||||
secretsfile.wait_for_open_port(3478)
|
||||
secretsfile.succeed("grep 'some-very-secret-string' /run/coturn/turnserver.cfg")
|
||||
# Forbidden IP, fails:
|
||||
secretsfile.fail("${pkgs.coturn}/bin/turnutils_uclient -W some-very-secret-string 127.0.0.1 -DgX -e 127.0.0.1 -n 1 -c -y")
|
||||
# allowed-peer-ip, should succeed:
|
||||
secretsfile.succeed("${pkgs.coturn}/bin/turnutils_uclient -W some-very-secret-string 192.168.1.2 -DgX -e 192.168.1.2 -n 1 -c -y")
|
||||
with subtest("works with static-auth-secret-file"):
|
||||
secretsfile.wait_for_unit("coturn.service")
|
||||
secretsfile.wait_for_open_port(3478)
|
||||
secretsfile.succeed("grep 'some-very-secret-string' /run/coturn/turnserver.cfg")
|
||||
# Forbidden IP, fails:
|
||||
secretsfile.fail("${pkgs.coturn}/bin/turnutils_uclient -W some-very-secret-string 127.0.0.1 -DgX -e 127.0.0.1 -n 1 -c -y")
|
||||
# allowed-peer-ip, should succeed:
|
||||
secretsfile.succeed("${pkgs.coturn}/bin/turnutils_uclient -W some-very-secret-string 192.168.1.2 -DgX -e 192.168.1.2 -n 1 -c -y")
|
||||
|
||||
default.log(default.execute("systemd-analyze security coturn.service | grep -v '✓'")[1])
|
||||
'';
|
||||
}
|
||||
)
|
||||
default.log(default.execute("systemd-analyze security coturn.service | grep -v '✓'")[1])
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -14,51 +14,49 @@ let
|
|||
testpass = "cowabunga";
|
||||
testlogin = "${testuser}:${testpass}@";
|
||||
in
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "couchdb";
|
||||
meta.maintainers = [ ];
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "couchdb";
|
||||
meta.maintainers = [ ];
|
||||
|
||||
nodes = {
|
||||
couchdb3 = makeNode pkgs.couchdb3 testuser testpass;
|
||||
};
|
||||
nodes = {
|
||||
couchdb3 = makeNode pkgs.couchdb3 testuser testpass;
|
||||
};
|
||||
|
||||
testScript =
|
||||
let
|
||||
curlJqCheck =
|
||||
login: action: path: jqexpr: result:
|
||||
pkgs.writeScript "curl-jq-check-${action}-${path}.sh" ''
|
||||
RESULT=$(curl -X ${action} http://${login}127.0.0.1:5984/${path} | jq -r '${jqexpr}')
|
||||
echo $RESULT >&2
|
||||
if [ "$RESULT" != "${result}" ]; then
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
in
|
||||
''
|
||||
start_all()
|
||||
testScript =
|
||||
let
|
||||
curlJqCheck =
|
||||
login: action: path: jqexpr: result:
|
||||
pkgs.writeScript "curl-jq-check-${action}-${path}.sh" ''
|
||||
RESULT=$(curl -X ${action} http://${login}127.0.0.1:5984/${path} | jq -r '${jqexpr}')
|
||||
echo $RESULT >&2
|
||||
if [ "$RESULT" != "${result}" ]; then
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
in
|
||||
''
|
||||
start_all()
|
||||
|
||||
couchdb3.wait_for_unit("couchdb.service")
|
||||
couchdb3.wait_until_succeeds(
|
||||
"${curlJqCheck testlogin "GET" "" ".couchdb" "Welcome"}"
|
||||
)
|
||||
couchdb3.wait_until_succeeds(
|
||||
"${curlJqCheck testlogin "GET" "_all_dbs" ". | length" "0"}"
|
||||
)
|
||||
couchdb3.succeed("${curlJqCheck testlogin "PUT" "foo" ".ok" "true"}")
|
||||
couchdb3.succeed(
|
||||
"${curlJqCheck testlogin "GET" "_all_dbs" ". | length" "1"}"
|
||||
)
|
||||
couchdb3.succeed(
|
||||
"${curlJqCheck testlogin "DELETE" "foo" ".ok" "true"}"
|
||||
)
|
||||
couchdb3.succeed(
|
||||
"${curlJqCheck testlogin "GET" "_all_dbs" ". | length" "0"}"
|
||||
)
|
||||
couchdb3.succeed(
|
||||
"${curlJqCheck testlogin "GET" "_node/couchdb@127.0.0.1" ".couchdb" "Welcome"}"
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
couchdb3.wait_for_unit("couchdb.service")
|
||||
couchdb3.wait_until_succeeds(
|
||||
"${curlJqCheck testlogin "GET" "" ".couchdb" "Welcome"}"
|
||||
)
|
||||
couchdb3.wait_until_succeeds(
|
||||
"${curlJqCheck testlogin "GET" "_all_dbs" ". | length" "0"}"
|
||||
)
|
||||
couchdb3.succeed("${curlJqCheck testlogin "PUT" "foo" ".ok" "true"}")
|
||||
couchdb3.succeed(
|
||||
"${curlJqCheck testlogin "GET" "_all_dbs" ". | length" "1"}"
|
||||
)
|
||||
couchdb3.succeed(
|
||||
"${curlJqCheck testlogin "DELETE" "foo" ".ok" "true"}"
|
||||
)
|
||||
couchdb3.succeed(
|
||||
"${curlJqCheck testlogin "GET" "_all_dbs" ". | length" "0"}"
|
||||
)
|
||||
couchdb3.succeed(
|
||||
"${curlJqCheck testlogin "GET" "_node/couchdb@127.0.0.1" ".couchdb" "Welcome"}"
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,33 +1,31 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, pkgs, ... }:
|
||||
{ lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
name = "crabfit";
|
||||
{
|
||||
name = "crabfit";
|
||||
|
||||
meta.maintainers = [ ];
|
||||
meta.maintainers = [ ];
|
||||
|
||||
nodes = {
|
||||
machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.crabfit = {
|
||||
enable = true;
|
||||
nodes = {
|
||||
machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.crabfit = {
|
||||
enable = true;
|
||||
|
||||
frontend.host = "http://127.0.0.1:3001";
|
||||
api.host = "127.0.0.1:3000";
|
||||
};
|
||||
frontend.host = "http://127.0.0.1:3001";
|
||||
api.host = "127.0.0.1:3000";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# TODO: Add a reverse proxy and a dns entry for testing
|
||||
testScript = ''
|
||||
machine.wait_for_unit("crabfit-api")
|
||||
machine.wait_for_unit("crabfit-frontend")
|
||||
# TODO: Add a reverse proxy and a dns entry for testing
|
||||
testScript = ''
|
||||
machine.wait_for_unit("crabfit-api")
|
||||
machine.wait_for_unit("crabfit-frontend")
|
||||
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.wait_for_open_port(3001)
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.wait_for_open_port(3001)
|
||||
|
||||
machine.succeed("curl -f http://localhost:3001/")
|
||||
'';
|
||||
}
|
||||
)
|
||||
machine.succeed("curl -f http://localhost:3001/")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,59 +1,57 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
client =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.croc ];
|
||||
};
|
||||
pass = "PassRelay";
|
||||
in
|
||||
{
|
||||
name = "croc";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [
|
||||
equirosa
|
||||
SuperSandro2000
|
||||
];
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
client =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.croc ];
|
||||
};
|
||||
pass = "PassRelay";
|
||||
in
|
||||
{
|
||||
name = "croc";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [
|
||||
equirosa
|
||||
SuperSandro2000
|
||||
];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
relay = {
|
||||
services.croc = {
|
||||
enable = true;
|
||||
pass = pass;
|
||||
openFirewall = true;
|
||||
};
|
||||
nodes = {
|
||||
relay = {
|
||||
services.croc = {
|
||||
enable = true;
|
||||
pass = pass;
|
||||
openFirewall = true;
|
||||
};
|
||||
sender = client;
|
||||
receiver = client;
|
||||
};
|
||||
sender = client;
|
||||
receiver = client;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
# wait until relay is up
|
||||
relay.wait_for_unit("croc")
|
||||
relay.wait_for_open_port(9009)
|
||||
relay.wait_for_open_port(9010)
|
||||
relay.wait_for_open_port(9011)
|
||||
relay.wait_for_open_port(9012)
|
||||
relay.wait_for_open_port(9013)
|
||||
# wait until relay is up
|
||||
relay.wait_for_unit("croc")
|
||||
relay.wait_for_open_port(9009)
|
||||
relay.wait_for_open_port(9010)
|
||||
relay.wait_for_open_port(9011)
|
||||
relay.wait_for_open_port(9012)
|
||||
relay.wait_for_open_port(9013)
|
||||
|
||||
# generate testfiles and send them
|
||||
sender.wait_for_unit("multi-user.target")
|
||||
sender.execute("echo Hello World > testfile01.txt")
|
||||
sender.execute("echo Hello Earth > testfile02.txt")
|
||||
sender.execute(
|
||||
"env CROC_SECRET=topSecret croc --pass ${pass} --relay relay send testfile01.txt testfile02.txt >&2 &"
|
||||
)
|
||||
# generate testfiles and send them
|
||||
sender.wait_for_unit("multi-user.target")
|
||||
sender.execute("echo Hello World > testfile01.txt")
|
||||
sender.execute("echo Hello Earth > testfile02.txt")
|
||||
sender.execute(
|
||||
"env CROC_SECRET=topSecret croc --pass ${pass} --relay relay send testfile01.txt testfile02.txt >&2 &"
|
||||
)
|
||||
|
||||
# receive the testfiles and check them
|
||||
receiver.succeed(
|
||||
"env CROC_SECRET=topSecret croc --pass ${pass} --yes --relay relay"
|
||||
)
|
||||
assert "Hello World" in receiver.succeed("cat testfile01.txt")
|
||||
assert "Hello Earth" in receiver.succeed("cat testfile02.txt")
|
||||
'';
|
||||
}
|
||||
)
|
||||
# receive the testfiles and check them
|
||||
receiver.succeed(
|
||||
"env CROC_SECRET=topSecret croc --pass ${pass} --yes --relay relay"
|
||||
)
|
||||
assert "Hello World" in receiver.succeed("cat testfile01.txt")
|
||||
assert "Hello Earth" in receiver.succeed("cat testfile02.txt")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -24,182 +24,180 @@
|
|||
uses upstream for its tests.
|
||||
*/
|
||||
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
let
|
||||
# Update with domains in TestImpersonate.TEST_URLS if needed from:
|
||||
# https://github.com/lwthiker/curl-impersonate/blob/main/tests/test_impersonate.py
|
||||
domains = [
|
||||
"www.wikimedia.org"
|
||||
"www.wikipedia.org"
|
||||
"www.mozilla.org"
|
||||
"www.apache.org"
|
||||
"www.kernel.org"
|
||||
"git-scm.com"
|
||||
];
|
||||
{ pkgs, lib, ... }:
|
||||
let
|
||||
# Update with domains in TestImpersonate.TEST_URLS if needed from:
|
||||
# https://github.com/lwthiker/curl-impersonate/blob/main/tests/test_impersonate.py
|
||||
domains = [
|
||||
"www.wikimedia.org"
|
||||
"www.wikipedia.org"
|
||||
"www.mozilla.org"
|
||||
"www.apache.org"
|
||||
"www.kernel.org"
|
||||
"git-scm.com"
|
||||
];
|
||||
|
||||
tls-certs =
|
||||
let
|
||||
# Configure CA with X.509 v3 extensions that would be trusted by curl
|
||||
ca-cert-conf = pkgs.writeText "curl-impersonate-ca.cnf" ''
|
||||
basicConstraints = critical, CA:TRUE
|
||||
subjectKeyIdentifier = hash
|
||||
authorityKeyIdentifier = keyid:always, issuer:always
|
||||
keyUsage = critical, cRLSign, digitalSignature, keyCertSign
|
||||
'';
|
||||
|
||||
# Configure leaf certificate with X.509 v3 extensions that would be trusted
|
||||
# by curl and set subject-alternative names for test domains
|
||||
tls-cert-conf = pkgs.writeText "curl-impersonate-tls.cnf" ''
|
||||
basicConstraints = critical, CA:FALSE
|
||||
subjectKeyIdentifier = hash
|
||||
authorityKeyIdentifier = keyid:always, issuer:always
|
||||
keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment, keyAgreement
|
||||
extendedKeyUsage = critical, serverAuth
|
||||
subjectAltName = @alt_names
|
||||
|
||||
[alt_names]
|
||||
${lib.concatStringsSep "\n" (lib.imap0 (idx: domain: "DNS.${toString idx} = ${domain}") domains)}
|
||||
'';
|
||||
in
|
||||
pkgs.runCommand "curl-impersonate-test-certs"
|
||||
{
|
||||
nativeBuildInputs = [ pkgs.openssl ];
|
||||
}
|
||||
''
|
||||
# create CA certificate and key
|
||||
openssl req -newkey rsa:4096 -keyout ca-key.pem -out ca-csr.pem -nodes -subj '/CN=curl-impersonate-ca.nixos.test'
|
||||
openssl x509 -req -sha512 -in ca-csr.pem -key ca-key.pem -out ca.pem -extfile ${ca-cert-conf} -days 36500
|
||||
openssl x509 -in ca.pem -text
|
||||
|
||||
# create server certificate and key
|
||||
openssl req -newkey rsa:4096 -keyout key.pem -out csr.pem -nodes -subj '/CN=curl-impersonate.nixos.test'
|
||||
openssl x509 -req -sha512 -in csr.pem -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out cert.pem -extfile ${tls-cert-conf} -days 36500
|
||||
openssl x509 -in cert.pem -text
|
||||
|
||||
# output CA cert and server cert and key
|
||||
mkdir -p $out
|
||||
cp key.pem cert.pem ca.pem $out
|
||||
'';
|
||||
|
||||
# Test script
|
||||
curl-impersonate-test =
|
||||
let
|
||||
# Build miniature libcurl client used by test driver
|
||||
minicurl =
|
||||
pkgs.runCommandCC "minicurl"
|
||||
{
|
||||
buildInputs = [ pkgs.curl ];
|
||||
}
|
||||
''
|
||||
mkdir -p $out/bin
|
||||
$CC -Wall -Werror -o $out/bin/minicurl ${pkgs.curl-impersonate.src}/tests/minicurl.c `curl-config --libs`
|
||||
'';
|
||||
in
|
||||
pkgs.writeShellScript "curl-impersonate-test" ''
|
||||
set -euxo pipefail
|
||||
|
||||
# Test driver requirements
|
||||
export PATH="${
|
||||
with pkgs;
|
||||
lib.makeBinPath [
|
||||
bash
|
||||
coreutils
|
||||
python3Packages.pytest
|
||||
nghttp2
|
||||
tcpdump
|
||||
]
|
||||
}"
|
||||
export PYTHONPATH="${
|
||||
with pkgs.python3Packages;
|
||||
makePythonPath [
|
||||
pyyaml
|
||||
pytest-asyncio
|
||||
dpkt
|
||||
ts1-signatures
|
||||
]
|
||||
}"
|
||||
|
||||
# Prepare test root prefix
|
||||
mkdir -p usr/{bin,lib}
|
||||
cp -rs ${pkgs.curl-impersonate}/* ${minicurl}/* usr/
|
||||
|
||||
cp -r ${pkgs.curl-impersonate.src}/tests ./
|
||||
|
||||
# Run tests
|
||||
cd tests
|
||||
pytest . --install-dir ../usr --capture-interface eth1 --exitfirst -k 'not test_http2_headers'
|
||||
tls-certs =
|
||||
let
|
||||
# Configure CA with X.509 v3 extensions that would be trusted by curl
|
||||
ca-cert-conf = pkgs.writeText "curl-impersonate-ca.cnf" ''
|
||||
basicConstraints = critical, CA:TRUE
|
||||
subjectKeyIdentifier = hash
|
||||
authorityKeyIdentifier = keyid:always, issuer:always
|
||||
keyUsage = critical, cRLSign, digitalSignature, keyCertSign
|
||||
'';
|
||||
in
|
||||
{
|
||||
name = "curl-impersonate";
|
||||
|
||||
meta = with lib.maintainers; {
|
||||
maintainers = [ ];
|
||||
};
|
||||
# Configure leaf certificate with X.509 v3 extensions that would be trusted
|
||||
# by curl and set subject-alternative names for test domains
|
||||
tls-cert-conf = pkgs.writeText "curl-impersonate-tls.cnf" ''
|
||||
basicConstraints = critical, CA:FALSE
|
||||
subjectKeyIdentifier = hash
|
||||
authorityKeyIdentifier = keyid:always, issuer:always
|
||||
keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment, keyAgreement
|
||||
extendedKeyUsage = critical, serverAuth
|
||||
subjectAltName = @alt_names
|
||||
|
||||
nodes = {
|
||||
web =
|
||||
{
|
||||
nodes,
|
||||
pkgs,
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
{
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
443
|
||||
];
|
||||
[alt_names]
|
||||
${lib.concatStringsSep "\n" (lib.imap0 (idx: domain: "DNS.${toString idx} = ${domain}") domains)}
|
||||
'';
|
||||
in
|
||||
pkgs.runCommand "curl-impersonate-test-certs"
|
||||
{
|
||||
nativeBuildInputs = [ pkgs.openssl ];
|
||||
}
|
||||
''
|
||||
# create CA certificate and key
|
||||
openssl req -newkey rsa:4096 -keyout ca-key.pem -out ca-csr.pem -nodes -subj '/CN=curl-impersonate-ca.nixos.test'
|
||||
openssl x509 -req -sha512 -in ca-csr.pem -key ca-key.pem -out ca.pem -extfile ${ca-cert-conf} -days 36500
|
||||
openssl x509 -in ca.pem -text
|
||||
|
||||
services = {
|
||||
nginx = {
|
||||
enable = true;
|
||||
virtualHosts."curl-impersonate.nixos.test" = {
|
||||
default = true;
|
||||
addSSL = true;
|
||||
sslCertificate = "${tls-certs}/cert.pem";
|
||||
sslCertificateKey = "${tls-certs}/key.pem";
|
||||
};
|
||||
# create server certificate and key
|
||||
openssl req -newkey rsa:4096 -keyout key.pem -out csr.pem -nodes -subj '/CN=curl-impersonate.nixos.test'
|
||||
openssl x509 -req -sha512 -in csr.pem -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out cert.pem -extfile ${tls-cert-conf} -days 36500
|
||||
openssl x509 -in cert.pem -text
|
||||
|
||||
# output CA cert and server cert and key
|
||||
mkdir -p $out
|
||||
cp key.pem cert.pem ca.pem $out
|
||||
'';
|
||||
|
||||
# Test script
|
||||
curl-impersonate-test =
|
||||
let
|
||||
# Build miniature libcurl client used by test driver
|
||||
minicurl =
|
||||
pkgs.runCommandCC "minicurl"
|
||||
{
|
||||
buildInputs = [ pkgs.curl ];
|
||||
}
|
||||
''
|
||||
mkdir -p $out/bin
|
||||
$CC -Wall -Werror -o $out/bin/minicurl ${pkgs.curl-impersonate.src}/tests/minicurl.c `curl-config --libs`
|
||||
'';
|
||||
in
|
||||
pkgs.writeShellScript "curl-impersonate-test" ''
|
||||
set -euxo pipefail
|
||||
|
||||
# Test driver requirements
|
||||
export PATH="${
|
||||
with pkgs;
|
||||
lib.makeBinPath [
|
||||
bash
|
||||
coreutils
|
||||
python3Packages.pytest
|
||||
nghttp2
|
||||
tcpdump
|
||||
]
|
||||
}"
|
||||
export PYTHONPATH="${
|
||||
with pkgs.python3Packages;
|
||||
makePythonPath [
|
||||
pyyaml
|
||||
pytest-asyncio
|
||||
dpkt
|
||||
ts1-signatures
|
||||
]
|
||||
}"
|
||||
|
||||
# Prepare test root prefix
|
||||
mkdir -p usr/{bin,lib}
|
||||
cp -rs ${pkgs.curl-impersonate}/* ${minicurl}/* usr/
|
||||
|
||||
cp -r ${pkgs.curl-impersonate.src}/tests ./
|
||||
|
||||
# Run tests
|
||||
cd tests
|
||||
pytest . --install-dir ../usr --capture-interface eth1 --exitfirst -k 'not test_http2_headers'
|
||||
'';
|
||||
in
|
||||
{
|
||||
name = "curl-impersonate";
|
||||
|
||||
meta = with lib.maintainers; {
|
||||
maintainers = [ ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
web =
|
||||
{
|
||||
nodes,
|
||||
pkgs,
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
{
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
443
|
||||
];
|
||||
|
||||
services = {
|
||||
nginx = {
|
||||
enable = true;
|
||||
virtualHosts."curl-impersonate.nixos.test" = {
|
||||
default = true;
|
||||
addSSL = true;
|
||||
sslCertificate = "${tls-certs}/cert.pem";
|
||||
sslCertificateKey = "${tls-certs}/key.pem";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
curl =
|
||||
{
|
||||
nodes,
|
||||
pkgs,
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
{
|
||||
networking.extraHosts = lib.concatStringsSep "\n" (
|
||||
map (domain: "${nodes.web.networking.primaryIPAddress} ${domain}") domains
|
||||
);
|
||||
curl =
|
||||
{
|
||||
nodes,
|
||||
pkgs,
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
{
|
||||
networking.extraHosts = lib.concatStringsSep "\n" (
|
||||
map (domain: "${nodes.web.networking.primaryIPAddress} ${domain}") domains
|
||||
);
|
||||
|
||||
security.pki.certificateFiles = [ "${tls-certs}/ca.pem" ];
|
||||
};
|
||||
};
|
||||
security.pki.certificateFiles = [ "${tls-certs}/ca.pem" ];
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
start_all()
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
start_all()
|
||||
|
||||
with subtest("Wait for network"):
|
||||
web.systemctl("start network-online.target")
|
||||
curl.systemctl("start network-online.target")
|
||||
web.wait_for_unit("network-online.target")
|
||||
curl.wait_for_unit("network-online.target")
|
||||
with subtest("Wait for network"):
|
||||
web.systemctl("start network-online.target")
|
||||
curl.systemctl("start network-online.target")
|
||||
web.wait_for_unit("network-online.target")
|
||||
curl.wait_for_unit("network-online.target")
|
||||
|
||||
with subtest("Wait for web server"):
|
||||
web.wait_for_unit("nginx.service")
|
||||
web.wait_for_open_port(443)
|
||||
with subtest("Wait for web server"):
|
||||
web.wait_for_unit("nginx.service")
|
||||
web.wait_for_open_port(443)
|
||||
|
||||
with subtest("Run curl-impersonate tests"):
|
||||
curl.succeed("${curl-impersonate-test}")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("Run curl-impersonate tests"):
|
||||
curl.succeed("${curl-impersonate-test}")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,38 +1,36 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
|
||||
name = "dae";
|
||||
name = "dae";
|
||||
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ oluceps ];
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ oluceps ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.curl ];
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
statusPage = true;
|
||||
};
|
||||
services.dae = {
|
||||
enable = true;
|
||||
config = ''
|
||||
global { disable_waiting_network: true }
|
||||
routing{}
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.curl ];
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
statusPage = true;
|
||||
};
|
||||
services.dae = {
|
||||
enable = true;
|
||||
config = ''
|
||||
global { disable_waiting_network: true }
|
||||
routing{}
|
||||
'';
|
||||
};
|
||||
};
|
||||
testScript = ''
|
||||
machine.wait_for_unit("nginx.service")
|
||||
machine.wait_for_unit("dae.service")
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("nginx.service")
|
||||
machine.wait_for_unit("dae.service")
|
||||
machine.wait_for_open_port(80)
|
||||
|
||||
machine.wait_for_open_port(80)
|
||||
machine.succeed("curl --fail --max-time 10 http://localhost")
|
||||
'';
|
||||
|
||||
machine.succeed("curl --fail --max-time 10 http://localhost")
|
||||
'';
|
||||
|
||||
}
|
||||
)
|
||||
}
|
||||
|
|
|
@ -1,125 +1,123 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "db-rest";
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ marie ];
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "db-rest";
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ marie ];
|
||||
|
||||
nodes = {
|
||||
database = {
|
||||
networking = {
|
||||
interfaces.eth1 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.2.10";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
firewall.allowedTCPPorts = [ 31638 ];
|
||||
};
|
||||
|
||||
services.redis.servers.db-rest = {
|
||||
enable = true;
|
||||
bind = "0.0.0.0";
|
||||
requirePass = "choochoo";
|
||||
port = 31638;
|
||||
nodes = {
|
||||
database = {
|
||||
networking = {
|
||||
interfaces.eth1 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.2.10";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
firewall.allowedTCPPorts = [ 31638 ];
|
||||
};
|
||||
|
||||
serverWithTcp =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment = {
|
||||
etc = {
|
||||
"db-rest/password-redis-db".text = ''
|
||||
choochoo
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
networking = {
|
||||
interfaces.eth1 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.2.11";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
firewall.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
|
||||
services.db-rest = {
|
||||
enable = true;
|
||||
host = "0.0.0.0";
|
||||
redis = {
|
||||
enable = true;
|
||||
createLocally = false;
|
||||
host = "192.168.2.10";
|
||||
port = 31638;
|
||||
passwordFile = "/etc/db-rest/password-redis-db";
|
||||
useSSL = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
serverWithUnixSocket =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
networking = {
|
||||
interfaces.eth1 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.2.12";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
firewall.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
|
||||
services.db-rest = {
|
||||
enable = true;
|
||||
host = "0.0.0.0";
|
||||
redis = {
|
||||
enable = true;
|
||||
createLocally = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
client = {
|
||||
environment.systemPackages = [ pkgs.jq ];
|
||||
networking = {
|
||||
interfaces.eth1 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.2.13";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
services.redis.servers.db-rest = {
|
||||
enable = true;
|
||||
bind = "0.0.0.0";
|
||||
requirePass = "choochoo";
|
||||
port = 31638;
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
serverWithTcp =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment = {
|
||||
etc = {
|
||||
"db-rest/password-redis-db".text = ''
|
||||
choochoo
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
with subtest("db-rest redis with TCP socket"):
|
||||
database.wait_for_unit("redis-db-rest.service")
|
||||
database.wait_for_open_port(31638)
|
||||
networking = {
|
||||
interfaces.eth1 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.2.11";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
firewall.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
|
||||
serverWithTcp.wait_for_unit("db-rest.service")
|
||||
serverWithTcp.wait_for_open_port(3000)
|
||||
services.db-rest = {
|
||||
enable = true;
|
||||
host = "0.0.0.0";
|
||||
redis = {
|
||||
enable = true;
|
||||
createLocally = false;
|
||||
host = "192.168.2.10";
|
||||
port = 31638;
|
||||
passwordFile = "/etc/db-rest/password-redis-db";
|
||||
useSSL = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
client.succeed("curl --fail --get http://192.168.2.11:3000/stations --data-urlencode 'query=Köln Hbf' | jq -r '.\"8000207\".name' | grep 'Köln Hbf'")
|
||||
serverWithUnixSocket =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
networking = {
|
||||
interfaces.eth1 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.2.12";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
firewall.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
|
||||
with subtest("db-rest redis with Unix socket"):
|
||||
serverWithUnixSocket.wait_for_unit("db-rest.service")
|
||||
serverWithUnixSocket.wait_for_open_port(3000)
|
||||
services.db-rest = {
|
||||
enable = true;
|
||||
host = "0.0.0.0";
|
||||
redis = {
|
||||
enable = true;
|
||||
createLocally = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
client.succeed("curl --fail --get http://192.168.2.12:3000/stations --data-urlencode 'query=Köln Hbf' | jq -r '.\"8000207\".name' | grep 'Köln Hbf'")
|
||||
'';
|
||||
}
|
||||
)
|
||||
client = {
|
||||
environment.systemPackages = [ pkgs.jq ];
|
||||
networking = {
|
||||
interfaces.eth1 = {
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.2.13";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
with subtest("db-rest redis with TCP socket"):
|
||||
database.wait_for_unit("redis-db-rest.service")
|
||||
database.wait_for_open_port(31638)
|
||||
|
||||
serverWithTcp.wait_for_unit("db-rest.service")
|
||||
serverWithTcp.wait_for_open_port(3000)
|
||||
|
||||
client.succeed("curl --fail --get http://192.168.2.11:3000/stations --data-urlencode 'query=Köln Hbf' | jq -r '.\"8000207\".name' | grep 'Köln Hbf'")
|
||||
|
||||
with subtest("db-rest redis with Unix socket"):
|
||||
serverWithUnixSocket.wait_for_unit("db-rest.service")
|
||||
serverWithUnixSocket.wait_for_open_port(3000)
|
||||
|
||||
client.succeed("curl --fail --get http://192.168.2.12:3000/stations --data-urlencode 'query=Köln Hbf' | jq -r '.\"8000207\".name' | grep 'Köln Hbf'")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,44 +1,42 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "dconf";
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "dconf";
|
||||
|
||||
meta.maintainers = with lib.maintainers; [
|
||||
linsui
|
||||
];
|
||||
meta.maintainers = with lib.maintainers; [
|
||||
linsui
|
||||
];
|
||||
|
||||
nodes.machine =
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
users.extraUsers.alice = {
|
||||
isNormalUser = true;
|
||||
};
|
||||
programs.dconf = with lib.gvariant; {
|
||||
enable = true;
|
||||
profiles.user.databases = [
|
||||
{
|
||||
settings = {
|
||||
"test/not".locked = mkInt32 1;
|
||||
"test/is".locked = "locked";
|
||||
};
|
||||
locks = [
|
||||
"/test/is/locked"
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
nodes.machine =
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
users.extraUsers.alice = {
|
||||
isNormalUser = true;
|
||||
};
|
||||
programs.dconf = with lib.gvariant; {
|
||||
enable = true;
|
||||
profiles.user.databases = [
|
||||
{
|
||||
settings = {
|
||||
"test/not".locked = mkInt32 1;
|
||||
"test/is".locked = "locked";
|
||||
};
|
||||
locks = [
|
||||
"/test/is/locked"
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.succeed("test $(dconf read -d /test/not/locked) == 1")
|
||||
machine.succeed("test $(dconf read -d /test/is/locked) == \"'locked'\"")
|
||||
machine.fail("sudo -u alice dbus-run-session -- dconf write /test/is/locked \"@s 'unlocked'\"")
|
||||
machine.succeed("sudo -u alice dbus-run-session -- dconf write /test/not/locked \"@i 2\"")
|
||||
'';
|
||||
}
|
||||
)
|
||||
testScript = ''
|
||||
machine.succeed("test $(dconf read -d /test/not/locked) == 1")
|
||||
machine.succeed("test $(dconf read -d /test/is/locked) == \"'locked'\"")
|
||||
machine.fail("sudo -u alice dbus-run-session -- dconf write /test/is/locked \"@s 'unlocked'\"")
|
||||
machine.succeed("sudo -u alice dbus-run-session -- dconf write /test/not/locked \"@i 2\"")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,28 +1,26 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
let
|
||||
port = 6000;
|
||||
in
|
||||
{
|
||||
name = "ddns-updater";
|
||||
{ pkgs, lib, ... }:
|
||||
let
|
||||
port = 6000;
|
||||
in
|
||||
{
|
||||
name = "ddns-updater";
|
||||
|
||||
meta.maintainers = with lib.maintainers; [ delliott ];
|
||||
meta.maintainers = with lib.maintainers; [ delliott ];
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.ddns-updater = {
|
||||
enable = true;
|
||||
environment = {
|
||||
LISTENING_ADDRESS = ":" + (toString port);
|
||||
};
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.ddns-updater = {
|
||||
enable = true;
|
||||
environment = {
|
||||
LISTENING_ADDRESS = ":" + (toString port);
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("ddns-updater.service")
|
||||
machine.wait_for_open_port(${toString port})
|
||||
machine.succeed("curl --fail http://localhost:${toString port}/")
|
||||
'';
|
||||
}
|
||||
)
|
||||
testScript = ''
|
||||
machine.wait_for_unit("ddns-updater.service")
|
||||
machine.wait_for_open_port(${toString port})
|
||||
machine.succeed("curl --fail http://localhost:${toString port}/")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,37 +1,35 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
let
|
||||
httpPort = 800;
|
||||
in
|
||||
{
|
||||
name = "deconz";
|
||||
{ pkgs, lib, ... }:
|
||||
let
|
||||
httpPort = 800;
|
||||
in
|
||||
{
|
||||
name = "deconz";
|
||||
|
||||
meta.maintainers = with lib.maintainers; [
|
||||
bjornfor
|
||||
];
|
||||
meta.maintainers = with lib.maintainers; [
|
||||
bjornfor
|
||||
];
|
||||
|
||||
nodes.machine =
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
services.deconz = {
|
||||
enable = true;
|
||||
inherit httpPort;
|
||||
extraArgs = [
|
||||
"--dbg-err=2"
|
||||
"--dbg-info=2"
|
||||
];
|
||||
};
|
||||
nodes.machine =
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
services.deconz = {
|
||||
enable = true;
|
||||
inherit httpPort;
|
||||
extraArgs = [
|
||||
"--dbg-err=2"
|
||||
"--dbg-info=2"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("deconz.service")
|
||||
machine.succeed("curl -sfL http://localhost:${toString httpPort}")
|
||||
'';
|
||||
}
|
||||
)
|
||||
testScript = ''
|
||||
machine.wait_for_unit("deconz.service")
|
||||
machine.succeed("curl -sfL http://localhost:${toString httpPort}")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,57 +1,55 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "deepin";
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "deepin";
|
||||
|
||||
meta.maintainers = lib.teams.deepin.members;
|
||||
meta.maintainers = lib.teams.deepin.members;
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
./common/user-account.nix
|
||||
];
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
./common/user-account.nix
|
||||
];
|
||||
|
||||
virtualisation.memorySize = 2048;
|
||||
virtualisation.memorySize = 2048;
|
||||
|
||||
services.xserver.enable = true;
|
||||
services.xserver.enable = true;
|
||||
|
||||
services.xserver.displayManager = {
|
||||
lightdm.enable = true;
|
||||
autoLogin = {
|
||||
enable = true;
|
||||
user = "alice";
|
||||
};
|
||||
services.xserver.displayManager = {
|
||||
lightdm.enable = true;
|
||||
autoLogin = {
|
||||
enable = true;
|
||||
user = "alice";
|
||||
};
|
||||
|
||||
services.xserver.desktopManager.deepin.enable = true;
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
user = nodes.machine.users.users.alice;
|
||||
in
|
||||
''
|
||||
with subtest("Wait for login"):
|
||||
machine.wait_for_x()
|
||||
machine.wait_for_file("${user.home}/.Xauthority")
|
||||
machine.succeed("xauth merge ${user.home}/.Xauthority")
|
||||
services.xserver.desktopManager.deepin.enable = true;
|
||||
};
|
||||
|
||||
with subtest("Check that logging in has given the user ownership of devices"):
|
||||
machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
user = nodes.machine.users.users.alice;
|
||||
in
|
||||
''
|
||||
with subtest("Wait for login"):
|
||||
machine.wait_for_x()
|
||||
machine.wait_for_file("${user.home}/.Xauthority")
|
||||
machine.succeed("xauth merge ${user.home}/.Xauthority")
|
||||
|
||||
with subtest("Check if Deepin session components actually start"):
|
||||
machine.wait_until_succeeds("pgrep -f dde-session-daemon")
|
||||
machine.wait_for_window("dde-session-daemon")
|
||||
machine.wait_until_succeeds("pgrep -f dde-desktop")
|
||||
machine.wait_for_window("dde-desktop")
|
||||
with subtest("Check that logging in has given the user ownership of devices"):
|
||||
machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
|
||||
|
||||
with subtest("Open deepin-terminal"):
|
||||
machine.succeed("su - ${user.name} -c 'DISPLAY=:0 deepin-terminal >&2 &'")
|
||||
machine.wait_for_window("deepin-terminal")
|
||||
machine.sleep(20)
|
||||
machine.screenshot("screen")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("Check if Deepin session components actually start"):
|
||||
machine.wait_until_succeeds("pgrep -f dde-session-daemon")
|
||||
machine.wait_for_window("dde-session-daemon")
|
||||
machine.wait_until_succeeds("pgrep -f dde-desktop")
|
||||
machine.wait_for_window("dde-desktop")
|
||||
|
||||
with subtest("Open deepin-terminal"):
|
||||
machine.succeed("su - ${user.name} -c 'DISPLAY=:0 deepin-terminal >&2 &'")
|
||||
machine.wait_for_window("deepin-terminal")
|
||||
machine.sleep(20)
|
||||
machine.screenshot("screen")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,69 +1,67 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "deluge";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ flokli ];
|
||||
};
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "deluge";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ flokli ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
simple = {
|
||||
services.deluge = {
|
||||
nodes = {
|
||||
simple = {
|
||||
services.deluge = {
|
||||
enable = true;
|
||||
package = pkgs.deluge-2_x;
|
||||
web = {
|
||||
enable = true;
|
||||
package = pkgs.deluge-2_x;
|
||||
web = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
declarative = {
|
||||
services.deluge = {
|
||||
enable = true;
|
||||
package = pkgs.deluge-2_x;
|
||||
openFirewall = true;
|
||||
declarative = true;
|
||||
config = {
|
||||
allow_remote = true;
|
||||
download_location = "/var/lib/deluge/my-download";
|
||||
daemon_port = 58846;
|
||||
listen_ports = [
|
||||
6881
|
||||
6889
|
||||
];
|
||||
};
|
||||
web = {
|
||||
enable = true;
|
||||
port = 3142;
|
||||
};
|
||||
authFile = pkgs.writeText "deluge-auth" ''
|
||||
localclient:a7bef72a890:10
|
||||
andrew:password:10
|
||||
user3:anotherpass:5
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
declarative = {
|
||||
services.deluge = {
|
||||
enable = true;
|
||||
package = pkgs.deluge-2_x;
|
||||
openFirewall = true;
|
||||
declarative = true;
|
||||
config = {
|
||||
allow_remote = true;
|
||||
download_location = "/var/lib/deluge/my-download";
|
||||
daemon_port = 58846;
|
||||
listen_ports = [
|
||||
6881
|
||||
6889
|
||||
];
|
||||
};
|
||||
web = {
|
||||
enable = true;
|
||||
port = 3142;
|
||||
};
|
||||
authFile = pkgs.writeText "deluge-auth" ''
|
||||
localclient:a7bef72a890:10
|
||||
andrew:password:10
|
||||
user3:anotherpass:5
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
simple.wait_for_unit("deluged")
|
||||
simple.wait_for_unit("delugeweb")
|
||||
simple.wait_for_open_port(8112)
|
||||
declarative.wait_for_unit("network.target")
|
||||
declarative.wait_until_succeeds("curl --fail http://simple:8112")
|
||||
};
|
||||
|
||||
declarative.wait_for_unit("deluged")
|
||||
declarative.wait_for_unit("delugeweb")
|
||||
declarative.wait_until_succeeds("curl --fail http://declarative:3142")
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
# deluge-console always exits with 1. https://dev.deluge-torrent.org/ticket/3291
|
||||
declarative.succeed(
|
||||
"(deluge-console 'connect 127.0.0.1:58846 andrew password; help' || true) | grep -q 'rm.*Remove a torrent'"
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
simple.wait_for_unit("deluged")
|
||||
simple.wait_for_unit("delugeweb")
|
||||
simple.wait_for_open_port(8112)
|
||||
declarative.wait_for_unit("network.target")
|
||||
declarative.wait_until_succeeds("curl --fail http://simple:8112")
|
||||
|
||||
declarative.wait_for_unit("deluged")
|
||||
declarative.wait_for_unit("delugeweb")
|
||||
declarative.wait_until_succeeds("curl --fail http://declarative:3142")
|
||||
|
||||
# deluge-console always exits with 1. https://dev.deluge-torrent.org/ticket/3291
|
||||
declarative.succeed(
|
||||
"(deluge-console 'connect 127.0.0.1:58846 andrew password; help' || true) | grep -q 'rm.*Remove a torrent'"
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,71 +1,69 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
dependencyTrackPort = 8081;
|
||||
in
|
||||
{
|
||||
name = "dependency-track";
|
||||
meta = {
|
||||
maintainers = pkgs.lib.teams.cyberus.members;
|
||||
};
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
dependencyTrackPort = 8081;
|
||||
in
|
||||
{
|
||||
name = "dependency-track";
|
||||
meta = {
|
||||
maintainers = pkgs.lib.teams.cyberus.members;
|
||||
};
|
||||
|
||||
nodes = {
|
||||
server =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
virtualisation = {
|
||||
cores = 2;
|
||||
diskSize = 4096;
|
||||
memorySize = 1024 * 2;
|
||||
};
|
||||
|
||||
environment.systemPackages = with pkgs; [ curl ];
|
||||
systemd.services.dependency-track = {
|
||||
# source: https://github.com/DependencyTrack/dependency-track/blob/37e0ba59e8057c18a87a7a76e247a8f75677a56c/dev/scripts/data-nist-generate-dummy.sh
|
||||
preStart = ''
|
||||
set -euo pipefail
|
||||
|
||||
NIST_DIR="$HOME/.dependency-track/nist"
|
||||
|
||||
rm -rf "$NIST_DIR"
|
||||
mkdir -p "$NIST_DIR"
|
||||
|
||||
for feed in $(seq "2024" "2002"); do
|
||||
touch "$NIST_DIR/nvdcve-1.1-$feed.json.gz"
|
||||
echo "9999999999999" > "$NIST_DIR/nvdcve-1.1-$feed.json.gz.ts"
|
||||
done
|
||||
'';
|
||||
};
|
||||
services.dependency-track = {
|
||||
enable = true;
|
||||
port = dependencyTrackPort;
|
||||
nginx.domain = "localhost";
|
||||
database.passwordFile = "${pkgs.writeText "dbPassword" ''hunter2'THE'''H''''E''}";
|
||||
};
|
||||
nodes = {
|
||||
server =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
virtualisation = {
|
||||
cores = 2;
|
||||
diskSize = 4096;
|
||||
memorySize = 1024 * 2;
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
# python
|
||||
''
|
||||
import json
|
||||
environment.systemPackages = with pkgs; [ curl ];
|
||||
systemd.services.dependency-track = {
|
||||
# source: https://github.com/DependencyTrack/dependency-track/blob/37e0ba59e8057c18a87a7a76e247a8f75677a56c/dev/scripts/data-nist-generate-dummy.sh
|
||||
preStart = ''
|
||||
set -euo pipefail
|
||||
|
||||
start_all()
|
||||
NIST_DIR="$HOME/.dependency-track/nist"
|
||||
|
||||
server.wait_for_unit("dependency-track.service")
|
||||
server.wait_until_succeeds(
|
||||
"journalctl -o cat -u dependency-track.service | grep 'Dependency-Track is ready'"
|
||||
rm -rf "$NIST_DIR"
|
||||
mkdir -p "$NIST_DIR"
|
||||
|
||||
for feed in $(seq "2024" "2002"); do
|
||||
touch "$NIST_DIR/nvdcve-1.1-$feed.json.gz"
|
||||
echo "9999999999999" > "$NIST_DIR/nvdcve-1.1-$feed.json.gz.ts"
|
||||
done
|
||||
'';
|
||||
};
|
||||
services.dependency-track = {
|
||||
enable = true;
|
||||
port = dependencyTrackPort;
|
||||
nginx.domain = "localhost";
|
||||
database.passwordFile = "${pkgs.writeText "dbPassword" ''hunter2'THE'''H''''E''}";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
# python
|
||||
''
|
||||
import json
|
||||
|
||||
start_all()
|
||||
|
||||
server.wait_for_unit("dependency-track.service")
|
||||
server.wait_until_succeeds(
|
||||
"journalctl -o cat -u dependency-track.service | grep 'Dependency-Track is ready'"
|
||||
)
|
||||
server.wait_for_open_port(${toString dependencyTrackPort})
|
||||
|
||||
with subtest("version api returns correct version"):
|
||||
version = json.loads(
|
||||
server.succeed("curl http://localhost/api/version")
|
||||
)
|
||||
server.wait_for_open_port(${toString dependencyTrackPort})
|
||||
assert version["version"] == "${pkgs.dependency-track.version}"
|
||||
|
||||
with subtest("version api returns correct version"):
|
||||
version = json.loads(
|
||||
server.succeed("curl http://localhost/api/version")
|
||||
)
|
||||
assert version["version"] == "${pkgs.dependency-track.version}"
|
||||
|
||||
with subtest("nginx serves frontend"):
|
||||
server.succeed("curl http://localhost/ | grep \"<title>Dependency-Track</title>\"")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("nginx serves frontend"):
|
||||
server.succeed("curl http://localhost/ | grep \"<title>Dependency-Track</title>\"")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,43 +1,41 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
server-port = 3141;
|
||||
in
|
||||
{
|
||||
name = "devpi-server";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ cafkafk ];
|
||||
};
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
server-port = 3141;
|
||||
in
|
||||
{
|
||||
name = "devpi-server";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ cafkafk ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
devpi =
|
||||
{ ... }:
|
||||
{
|
||||
services.devpi-server = {
|
||||
enable = true;
|
||||
host = "0.0.0.0";
|
||||
port = server-port;
|
||||
openFirewall = true;
|
||||
secretFile = pkgs.writeText "devpi-secret" "v263P+V3YGDYUyfYL/RBURw+tCPMDw94R/iCuBNJrDhaYrZYjpA6XPFVDDH8ViN20j77y2PHoMM/U0opNkVQ2g==";
|
||||
};
|
||||
nodes = {
|
||||
devpi =
|
||||
{ ... }:
|
||||
{
|
||||
services.devpi-server = {
|
||||
enable = true;
|
||||
host = "0.0.0.0";
|
||||
port = server-port;
|
||||
openFirewall = true;
|
||||
secretFile = pkgs.writeText "devpi-secret" "v263P+V3YGDYUyfYL/RBURw+tCPMDw94R/iCuBNJrDhaYrZYjpA6XPFVDDH8ViN20j77y2PHoMM/U0opNkVQ2g==";
|
||||
};
|
||||
};
|
||||
|
||||
client1 =
|
||||
{ ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
devpi-client
|
||||
jq
|
||||
];
|
||||
};
|
||||
};
|
||||
client1 =
|
||||
{ ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
devpi-client
|
||||
jq
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
devpi.wait_for_unit("devpi-server.service")
|
||||
devpi.wait_for_open_port(${builtins.toString server-port})
|
||||
testScript = ''
|
||||
start_all()
|
||||
devpi.wait_for_unit("devpi-server.service")
|
||||
devpi.wait_for_open_port(${builtins.toString server-port})
|
||||
|
||||
client1.succeed("devpi getjson http://devpi:${builtins.toString server-port}")
|
||||
'';
|
||||
}
|
||||
)
|
||||
client1.succeed("devpi getjson http://devpi:${builtins.toString server-port}")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,84 +1,82 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "dex-oidc";
|
||||
meta.maintainers = with lib.maintainers; [ Flakebi ];
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "dex-oidc";
|
||||
meta.maintainers = with lib.maintainers; [ Flakebi ];
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [ jq ];
|
||||
services.dex = {
|
||||
enable = true;
|
||||
settings = {
|
||||
issuer = "http://127.0.0.1:8080/dex";
|
||||
storage = {
|
||||
type = "postgres";
|
||||
config.host = "/var/run/postgresql";
|
||||
};
|
||||
web.http = "127.0.0.1:8080";
|
||||
oauth2.skipApprovalScreen = true;
|
||||
staticClients = [
|
||||
{
|
||||
id = "oidcclient";
|
||||
name = "Client";
|
||||
redirectURIs = [ "https://example.com/callback" ];
|
||||
secretFile = "/etc/dex/oidcclient";
|
||||
}
|
||||
];
|
||||
connectors = [
|
||||
{
|
||||
type = "mockPassword";
|
||||
id = "mock";
|
||||
name = "Example";
|
||||
config = {
|
||||
username = "admin";
|
||||
password = "password";
|
||||
};
|
||||
}
|
||||
];
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [ jq ];
|
||||
services.dex = {
|
||||
enable = true;
|
||||
settings = {
|
||||
issuer = "http://127.0.0.1:8080/dex";
|
||||
storage = {
|
||||
type = "postgres";
|
||||
config.host = "/var/run/postgresql";
|
||||
};
|
||||
};
|
||||
|
||||
# This should not be set from nix but through other means to not leak the secret.
|
||||
environment.etc."dex/oidcclient" = {
|
||||
mode = "0400";
|
||||
user = "dex";
|
||||
text = "oidcclientsecret";
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
ensureDatabases = [ "dex" ];
|
||||
ensureUsers = [
|
||||
web.http = "127.0.0.1:8080";
|
||||
oauth2.skipApprovalScreen = true;
|
||||
staticClients = [
|
||||
{
|
||||
name = "dex";
|
||||
ensureDBOwnership = true;
|
||||
id = "oidcclient";
|
||||
name = "Client";
|
||||
redirectURIs = [ "https://example.com/callback" ];
|
||||
secretFile = "/etc/dex/oidcclient";
|
||||
}
|
||||
];
|
||||
connectors = [
|
||||
{
|
||||
type = "mockPassword";
|
||||
id = "mock";
|
||||
name = "Example";
|
||||
config = {
|
||||
username = "admin";
|
||||
password = "password";
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
with subtest("Web server gets ready"):
|
||||
machine.wait_for_unit("dex.service", timeout=120)
|
||||
# Wait until server accepts connections
|
||||
machine.wait_until_succeeds("curl -fs 'localhost:8080/dex/auth/mock?client_id=oidcclient&response_type=code&redirect_uri=https://example.com/callback&scope=openid'", timeout=120)
|
||||
# This should not be set from nix but through other means to not leak the secret.
|
||||
environment.etc."dex/oidcclient" = {
|
||||
mode = "0400";
|
||||
user = "dex";
|
||||
text = "oidcclientsecret";
|
||||
};
|
||||
|
||||
with subtest("Login"):
|
||||
state = machine.succeed("curl -fs 'localhost:8080/dex/auth/mock?client_id=oidcclient&response_type=code&redirect_uri=https://example.com/callback&scope=openid' | sed -n 's/.*state=\\(.*\\)\">.*/\\1/p'").strip()
|
||||
print(f"Got state {state}")
|
||||
# Login request returns 303 with redirect_url that has code as query parameter:
|
||||
# https://example.com/callback?code=kibsamwdupuy2iwqnlbqei3u6&state=
|
||||
code = machine.succeed(f"curl -fs 'localhost:8080/dex/auth/mock/login?back=&state={state}' -d 'login=admin&password=password' -w '%{{redirect_url}}' | sed -n 's/.*code=\\(.*\\)&.*/\\1/p'")
|
||||
print(f"Got approval code {code}")
|
||||
bearer = machine.succeed(f"curl -fs localhost:8080/dex/token -u oidcclient:oidcclientsecret -d 'grant_type=authorization_code&redirect_uri=https://example.com/callback&code={code}' | jq .access_token -r").strip()
|
||||
print(f"Got access token {bearer}")
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
ensureDatabases = [ "dex" ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = "dex";
|
||||
ensureDBOwnership = true;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
with subtest("Get userinfo"):
|
||||
assert '"sub"' in machine.succeed(
|
||||
f"curl -fs localhost:8080/dex/userinfo --oauth2-bearer {bearer}"
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
testScript = ''
|
||||
with subtest("Web server gets ready"):
|
||||
machine.wait_for_unit("dex.service", timeout=120)
|
||||
# Wait until server accepts connections
|
||||
machine.wait_until_succeeds("curl -fs 'localhost:8080/dex/auth/mock?client_id=oidcclient&response_type=code&redirect_uri=https://example.com/callback&scope=openid'", timeout=120)
|
||||
|
||||
with subtest("Login"):
|
||||
state = machine.succeed("curl -fs 'localhost:8080/dex/auth/mock?client_id=oidcclient&response_type=code&redirect_uri=https://example.com/callback&scope=openid' | sed -n 's/.*state=\\(.*\\)\">.*/\\1/p'").strip()
|
||||
print(f"Got state {state}")
|
||||
# Login request returns 303 with redirect_url that has code as query parameter:
|
||||
# https://example.com/callback?code=kibsamwdupuy2iwqnlbqei3u6&state=
|
||||
code = machine.succeed(f"curl -fs 'localhost:8080/dex/auth/mock/login?back=&state={state}' -d 'login=admin&password=password' -w '%{{redirect_url}}' | sed -n 's/.*code=\\(.*\\)&.*/\\1/p'")
|
||||
print(f"Got approval code {code}")
|
||||
bearer = machine.succeed(f"curl -fs localhost:8080/dex/token -u oidcclient:oidcclientsecret -d 'grant_type=authorization_code&redirect_uri=https://example.com/callback&code={code}' | jq .access_token -r").strip()
|
||||
print(f"Got access token {bearer}")
|
||||
|
||||
with subtest("Get userinfo"):
|
||||
assert '"sub"' in machine.succeed(
|
||||
f"curl -fs localhost:8080/dex/userinfo --oauth2-bearer {bearer}"
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,35 +1,33 @@
|
|||
import ./make-test-python.nix (
|
||||
{
|
||||
pkgs,
|
||||
latestKernel ? false,
|
||||
...
|
||||
}:
|
||||
{
|
||||
pkgs,
|
||||
latestKernel ? false,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
name = "disable-installer-tools";
|
||||
{
|
||||
name = "disable-installer-tools";
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
system.disableInstallerTools = true;
|
||||
boot.enableContainers = false;
|
||||
environment.defaultPackages = [ ];
|
||||
};
|
||||
nodes.machine =
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
system.disableInstallerTools = true;
|
||||
boot.enableContainers = false;
|
||||
environment.defaultPackages = [ ];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
|
||||
testScript = ''
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
|
||||
|
||||
with subtest("nixos installer tools should not be included"):
|
||||
machine.fail("which nixos-rebuild")
|
||||
machine.fail("which nixos-install")
|
||||
machine.fail("which nixos-generate-config")
|
||||
machine.fail("which nixos-enter")
|
||||
machine.fail("which nixos-version")
|
||||
machine.fail("which nixos-build-vms")
|
||||
with subtest("nixos installer tools should not be included"):
|
||||
machine.fail("which nixos-rebuild")
|
||||
machine.fail("which nixos-install")
|
||||
machine.fail("which nixos-generate-config")
|
||||
machine.fail("which nixos-enter")
|
||||
machine.fail("which nixos-version")
|
||||
machine.fail("which nixos-build-vms")
|
||||
|
||||
with subtest("perl should not be included"):
|
||||
machine.fail("which perl")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("perl should not be included"):
|
||||
machine.fail("which perl")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -3,209 +3,207 @@
|
|||
# 2. sending a private message to the admin user through the API
|
||||
# 3. replying to that message via email.
|
||||
|
||||
import ./make-test-python.nix (
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
package ? pkgs.discourse,
|
||||
...
|
||||
}:
|
||||
let
|
||||
certs = import ./common/acme/server/snakeoil-certs.nix;
|
||||
clientDomain = "client.fake.domain";
|
||||
discourseDomain = certs.domain;
|
||||
adminPassword = "eYAX85qmMJ5GZIHLaXGDAoszD7HSZp5d";
|
||||
secretKeyBase = "381f4ac6d8f5e49d804dae72aa9c046431d2f34c656a705c41cd52fed9b4f6f76f51549f0b55db3b8b0dded7a00d6a381ebe9a4367d2d44f5e743af6628b4d42";
|
||||
admin = {
|
||||
email = "alice@${clientDomain}";
|
||||
username = "alice";
|
||||
fullName = "Alice Admin";
|
||||
passwordFile = "${pkgs.writeText "admin-pass" adminPassword}";
|
||||
};
|
||||
in
|
||||
{
|
||||
name = "discourse";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ talyz ];
|
||||
};
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
package ? pkgs.discourse,
|
||||
...
|
||||
}:
|
||||
let
|
||||
certs = import ./common/acme/server/snakeoil-certs.nix;
|
||||
clientDomain = "client.fake.domain";
|
||||
discourseDomain = certs.domain;
|
||||
adminPassword = "eYAX85qmMJ5GZIHLaXGDAoszD7HSZp5d";
|
||||
secretKeyBase = "381f4ac6d8f5e49d804dae72aa9c046431d2f34c656a705c41cd52fed9b4f6f76f51549f0b55db3b8b0dded7a00d6a381ebe9a4367d2d44f5e743af6628b4d42";
|
||||
admin = {
|
||||
email = "alice@${clientDomain}";
|
||||
username = "alice";
|
||||
fullName = "Alice Admin";
|
||||
passwordFile = "${pkgs.writeText "admin-pass" adminPassword}";
|
||||
};
|
||||
in
|
||||
{
|
||||
name = "discourse";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ talyz ];
|
||||
};
|
||||
|
||||
nodes.discourse =
|
||||
{ nodes, ... }:
|
||||
{
|
||||
virtualisation.memorySize = 2048;
|
||||
virtualisation.cores = 4;
|
||||
virtualisation.useNixStoreImage = true;
|
||||
virtualisation.writableStore = false;
|
||||
nodes.discourse =
|
||||
{ nodes, ... }:
|
||||
{
|
||||
virtualisation.memorySize = 2048;
|
||||
virtualisation.cores = 4;
|
||||
virtualisation.useNixStoreImage = true;
|
||||
virtualisation.writableStore = false;
|
||||
|
||||
imports = [ common/user-account.nix ];
|
||||
imports = [ common/user-account.nix ];
|
||||
|
||||
security.pki.certificateFiles = [
|
||||
certs.ca.cert
|
||||
];
|
||||
security.pki.certificateFiles = [
|
||||
certs.ca.cert
|
||||
];
|
||||
|
||||
networking.extraHosts = ''
|
||||
127.0.0.1 ${discourseDomain}
|
||||
${nodes.client.networking.primaryIPAddress} ${clientDomain}
|
||||
'';
|
||||
|
||||
services.postfix = {
|
||||
enableSubmission = true;
|
||||
enableSubmissions = true;
|
||||
submissionsOptions = {
|
||||
smtpd_sasl_auth_enable = "yes";
|
||||
smtpd_client_restrictions = "permit";
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages = [ pkgs.jq ];
|
||||
|
||||
services.postgresql.package = pkgs.postgresql_15;
|
||||
|
||||
services.discourse = {
|
||||
enable = true;
|
||||
inherit admin package;
|
||||
hostname = discourseDomain;
|
||||
sslCertificate = "${certs.${discourseDomain}.cert}";
|
||||
sslCertificateKey = "${certs.${discourseDomain}.key}";
|
||||
secretKeyBaseFile = "${pkgs.writeText "secret-key-base" secretKeyBase}";
|
||||
enableACME = false;
|
||||
mail.outgoing.serverAddress = clientDomain;
|
||||
mail.incoming.enable = true;
|
||||
siteSettings = {
|
||||
posting = {
|
||||
min_post_length = 5;
|
||||
min_first_post_length = 5;
|
||||
min_personal_message_post_length = 5;
|
||||
};
|
||||
};
|
||||
unicornTimeout = 900;
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
25
|
||||
465
|
||||
];
|
||||
};
|
||||
|
||||
nodes.client =
|
||||
{ nodes, ... }:
|
||||
{
|
||||
imports = [ common/user-account.nix ];
|
||||
|
||||
security.pki.certificateFiles = [
|
||||
certs.ca.cert
|
||||
];
|
||||
|
||||
networking.extraHosts = ''
|
||||
127.0.0.1 ${clientDomain}
|
||||
${nodes.discourse.networking.primaryIPAddress} ${discourseDomain}
|
||||
'';
|
||||
|
||||
services.dovecot2 = {
|
||||
enable = true;
|
||||
protocols = [ "imap" ];
|
||||
};
|
||||
|
||||
services.postfix = {
|
||||
enable = true;
|
||||
origin = clientDomain;
|
||||
relayDomains = [ clientDomain ];
|
||||
config = {
|
||||
compatibility_level = "2";
|
||||
smtpd_banner = "ESMTP server";
|
||||
myhostname = clientDomain;
|
||||
mydestination = clientDomain;
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages =
|
||||
let
|
||||
replyToEmail = pkgs.writeScriptBin "reply-to-email" ''
|
||||
#!${pkgs.python3.interpreter}
|
||||
import imaplib
|
||||
import smtplib
|
||||
import ssl
|
||||
import email.header
|
||||
from email import message_from_bytes
|
||||
from email.message import EmailMessage
|
||||
|
||||
with imaplib.IMAP4('localhost') as imap:
|
||||
imap.login('alice', 'foobar')
|
||||
imap.select()
|
||||
status, data = imap.search(None, 'ALL')
|
||||
assert status == 'OK'
|
||||
|
||||
nums = data[0].split()
|
||||
assert len(nums) == 1
|
||||
|
||||
status, msg_data = imap.fetch(nums[0], '(RFC822)')
|
||||
assert status == 'OK'
|
||||
|
||||
msg = email.message_from_bytes(msg_data[0][1])
|
||||
subject = str(email.header.make_header(email.header.decode_header(msg['Subject'])))
|
||||
reply_to = email.header.decode_header(msg['Reply-To'])[0][0]
|
||||
message_id = email.header.decode_header(msg['Message-ID'])[0][0]
|
||||
date = email.header.decode_header(msg['Date'])[0][0]
|
||||
|
||||
ctx = ssl.create_default_context()
|
||||
with smtplib.SMTP_SSL(host='${discourseDomain}', context=ctx) as smtp:
|
||||
reply = EmailMessage()
|
||||
reply['Subject'] = 'Re: ' + subject
|
||||
reply['To'] = reply_to
|
||||
reply['From'] = 'alice@${clientDomain}'
|
||||
reply['In-Reply-To'] = message_id
|
||||
reply['References'] = message_id
|
||||
reply['Date'] = date
|
||||
reply.set_content("Test reply.")
|
||||
|
||||
smtp.send_message(reply)
|
||||
smtp.quit()
|
||||
'';
|
||||
in
|
||||
[ replyToEmail ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 25 ];
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ nodes }:
|
||||
let
|
||||
request = builtins.toJSON {
|
||||
title = "Private message";
|
||||
raw = "This is a test message.";
|
||||
target_recipients = admin.username;
|
||||
archetype = "private_message";
|
||||
};
|
||||
in
|
||||
''
|
||||
discourse.start()
|
||||
client.start()
|
||||
|
||||
discourse.wait_for_unit("discourse.service")
|
||||
discourse.wait_for_file("/run/discourse/sockets/unicorn.sock")
|
||||
discourse.wait_until_succeeds("curl -sS -f https://${discourseDomain}")
|
||||
discourse.succeed(
|
||||
"curl -sS -f https://${discourseDomain}/session/csrf -c cookie -b cookie -H 'Accept: application/json' | jq -r '\"X-CSRF-Token: \" + .csrf' > csrf_token",
|
||||
"curl -sS -f https://${discourseDomain}/session -c cookie -b cookie -H @csrf_token -H 'Accept: application/json' -d 'login=${nodes.discourse.services.discourse.admin.username}' -d \"password=${adminPassword}\" | jq -e '.user.username == \"${nodes.discourse.services.discourse.admin.username}\"'",
|
||||
"curl -sS -f https://${discourseDomain}/login -v -H 'Accept: application/json' -c cookie -b cookie 2>&1 | grep ${nodes.discourse.services.discourse.admin.username}",
|
||||
)
|
||||
|
||||
client.wait_for_unit("postfix.service")
|
||||
client.wait_for_unit("dovecot2.service")
|
||||
|
||||
discourse.succeed(
|
||||
"sudo -u discourse discourse-rake api_key:create_master[master] >api_key",
|
||||
'curl -sS -f https://${discourseDomain}/posts -X POST -H "Content-Type: application/json" -H "Api-Key: $(<api_key)" -H "Api-Username: system" -d \'${request}\' ',
|
||||
)
|
||||
|
||||
client.wait_until_succeeds("reply-to-email")
|
||||
|
||||
discourse.wait_until_succeeds(
|
||||
'curl -sS -f https://${discourseDomain}/topics/private-messages/system -H "Accept: application/json" -H "Api-Key: $(<api_key)" -H "Api-Username: system" | jq -e \'if .topic_list.topics[0].id != null then .topic_list.topics[0].id else null end\' >topic_id'
|
||||
)
|
||||
discourse.succeed(
|
||||
'curl -sS -f https://${discourseDomain}/t/$(<topic_id) -H "Accept: application/json" -H "Api-Key: $(<api_key)" -H "Api-Username: system" | jq -e \'if .post_stream.posts[1].cooked == "<p>Test reply.</p>" then true else null end\' '
|
||||
)
|
||||
networking.extraHosts = ''
|
||||
127.0.0.1 ${discourseDomain}
|
||||
${nodes.client.networking.primaryIPAddress} ${clientDomain}
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
||||
services.postfix = {
|
||||
enableSubmission = true;
|
||||
enableSubmissions = true;
|
||||
submissionsOptions = {
|
||||
smtpd_sasl_auth_enable = "yes";
|
||||
smtpd_client_restrictions = "permit";
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages = [ pkgs.jq ];
|
||||
|
||||
services.postgresql.package = pkgs.postgresql_15;
|
||||
|
||||
services.discourse = {
|
||||
enable = true;
|
||||
inherit admin package;
|
||||
hostname = discourseDomain;
|
||||
sslCertificate = "${certs.${discourseDomain}.cert}";
|
||||
sslCertificateKey = "${certs.${discourseDomain}.key}";
|
||||
secretKeyBaseFile = "${pkgs.writeText "secret-key-base" secretKeyBase}";
|
||||
enableACME = false;
|
||||
mail.outgoing.serverAddress = clientDomain;
|
||||
mail.incoming.enable = true;
|
||||
siteSettings = {
|
||||
posting = {
|
||||
min_post_length = 5;
|
||||
min_first_post_length = 5;
|
||||
min_personal_message_post_length = 5;
|
||||
};
|
||||
};
|
||||
unicornTimeout = 900;
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
25
|
||||
465
|
||||
];
|
||||
};
|
||||
|
||||
nodes.client =
|
||||
{ nodes, ... }:
|
||||
{
|
||||
imports = [ common/user-account.nix ];
|
||||
|
||||
security.pki.certificateFiles = [
|
||||
certs.ca.cert
|
||||
];
|
||||
|
||||
networking.extraHosts = ''
|
||||
127.0.0.1 ${clientDomain}
|
||||
${nodes.discourse.networking.primaryIPAddress} ${discourseDomain}
|
||||
'';
|
||||
|
||||
services.dovecot2 = {
|
||||
enable = true;
|
||||
protocols = [ "imap" ];
|
||||
};
|
||||
|
||||
services.postfix = {
|
||||
enable = true;
|
||||
origin = clientDomain;
|
||||
relayDomains = [ clientDomain ];
|
||||
config = {
|
||||
compatibility_level = "2";
|
||||
smtpd_banner = "ESMTP server";
|
||||
myhostname = clientDomain;
|
||||
mydestination = clientDomain;
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages =
|
||||
let
|
||||
replyToEmail = pkgs.writeScriptBin "reply-to-email" ''
|
||||
#!${pkgs.python3.interpreter}
|
||||
import imaplib
|
||||
import smtplib
|
||||
import ssl
|
||||
import email.header
|
||||
from email import message_from_bytes
|
||||
from email.message import EmailMessage
|
||||
|
||||
with imaplib.IMAP4('localhost') as imap:
|
||||
imap.login('alice', 'foobar')
|
||||
imap.select()
|
||||
status, data = imap.search(None, 'ALL')
|
||||
assert status == 'OK'
|
||||
|
||||
nums = data[0].split()
|
||||
assert len(nums) == 1
|
||||
|
||||
status, msg_data = imap.fetch(nums[0], '(RFC822)')
|
||||
assert status == 'OK'
|
||||
|
||||
msg = email.message_from_bytes(msg_data[0][1])
|
||||
subject = str(email.header.make_header(email.header.decode_header(msg['Subject'])))
|
||||
reply_to = email.header.decode_header(msg['Reply-To'])[0][0]
|
||||
message_id = email.header.decode_header(msg['Message-ID'])[0][0]
|
||||
date = email.header.decode_header(msg['Date'])[0][0]
|
||||
|
||||
ctx = ssl.create_default_context()
|
||||
with smtplib.SMTP_SSL(host='${discourseDomain}', context=ctx) as smtp:
|
||||
reply = EmailMessage()
|
||||
reply['Subject'] = 'Re: ' + subject
|
||||
reply['To'] = reply_to
|
||||
reply['From'] = 'alice@${clientDomain}'
|
||||
reply['In-Reply-To'] = message_id
|
||||
reply['References'] = message_id
|
||||
reply['Date'] = date
|
||||
reply.set_content("Test reply.")
|
||||
|
||||
smtp.send_message(reply)
|
||||
smtp.quit()
|
||||
'';
|
||||
in
|
||||
[ replyToEmail ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 25 ];
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ nodes }:
|
||||
let
|
||||
request = builtins.toJSON {
|
||||
title = "Private message";
|
||||
raw = "This is a test message.";
|
||||
target_recipients = admin.username;
|
||||
archetype = "private_message";
|
||||
};
|
||||
in
|
||||
''
|
||||
discourse.start()
|
||||
client.start()
|
||||
|
||||
discourse.wait_for_unit("discourse.service")
|
||||
discourse.wait_for_file("/run/discourse/sockets/unicorn.sock")
|
||||
discourse.wait_until_succeeds("curl -sS -f https://${discourseDomain}")
|
||||
discourse.succeed(
|
||||
"curl -sS -f https://${discourseDomain}/session/csrf -c cookie -b cookie -H 'Accept: application/json' | jq -r '\"X-CSRF-Token: \" + .csrf' > csrf_token",
|
||||
"curl -sS -f https://${discourseDomain}/session -c cookie -b cookie -H @csrf_token -H 'Accept: application/json' -d 'login=${nodes.discourse.services.discourse.admin.username}' -d \"password=${adminPassword}\" | jq -e '.user.username == \"${nodes.discourse.services.discourse.admin.username}\"'",
|
||||
"curl -sS -f https://${discourseDomain}/login -v -H 'Accept: application/json' -c cookie -b cookie 2>&1 | grep ${nodes.discourse.services.discourse.admin.username}",
|
||||
)
|
||||
|
||||
client.wait_for_unit("postfix.service")
|
||||
client.wait_for_unit("dovecot2.service")
|
||||
|
||||
discourse.succeed(
|
||||
"sudo -u discourse discourse-rake api_key:create_master[master] >api_key",
|
||||
'curl -sS -f https://${discourseDomain}/posts -X POST -H "Content-Type: application/json" -H "Api-Key: $(<api_key)" -H "Api-Username: system" -d \'${request}\' ',
|
||||
)
|
||||
|
||||
client.wait_until_succeeds("reply-to-email")
|
||||
|
||||
discourse.wait_until_succeeds(
|
||||
'curl -sS -f https://${discourseDomain}/topics/private-messages/system -H "Accept: application/json" -H "Api-Key: $(<api_key)" -H "Api-Username: system" | jq -e \'if .topic_list.topics[0].id != null then .topic_list.topics[0].id else null end\' >topic_id'
|
||||
)
|
||||
discourse.succeed(
|
||||
'curl -sS -f https://${discourseDomain}/t/$(<topic_id) -H "Accept: application/json" -H "Api-Key: $(<api_key)" -H "Api-Username: system" | jq -e \'if .post_stream.posts[1].cooked == "<p>Test reply.</p>" then true else null end\' '
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,67 +1,65 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "documize";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ ];
|
||||
};
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "documize";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.jq ];
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.jq ];
|
||||
|
||||
services.documize = {
|
||||
enable = true;
|
||||
port = 3000;
|
||||
dbtype = "postgresql";
|
||||
db = "host=localhost port=5432 sslmode=disable user=documize password=documize dbname=documize";
|
||||
};
|
||||
|
||||
systemd.services.documize-server = {
|
||||
after = [ "postgresql.service" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
initialScript = pkgs.writeText "psql-init" ''
|
||||
CREATE ROLE documize WITH LOGIN PASSWORD 'documize';
|
||||
CREATE DATABASE documize WITH OWNER documize;
|
||||
'';
|
||||
};
|
||||
services.documize = {
|
||||
enable = true;
|
||||
port = 3000;
|
||||
dbtype = "postgresql";
|
||||
db = "host=localhost port=5432 sslmode=disable user=documize password=documize dbname=documize";
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
systemd.services.documize-server = {
|
||||
after = [ "postgresql.service" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
};
|
||||
|
||||
machine.wait_for_unit("documize-server.service")
|
||||
machine.wait_for_open_port(3000)
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
initialScript = pkgs.writeText "psql-init" ''
|
||||
CREATE ROLE documize WITH LOGIN PASSWORD 'documize';
|
||||
CREATE DATABASE documize WITH OWNER documize;
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
dbhash = machine.succeed(
|
||||
"curl -f localhost:3000 | grep 'property=\"dbhash' | grep -Po 'content=\"\\K[^\"]*'"
|
||||
)
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
dbhash = dbhash.strip()
|
||||
machine.wait_for_unit("documize-server.service")
|
||||
machine.wait_for_open_port(3000)
|
||||
|
||||
machine.succeed(
|
||||
(
|
||||
"curl -X POST"
|
||||
" --data 'dbname=documize'"
|
||||
" --data 'dbhash={}'"
|
||||
" --data 'title=NixOS'"
|
||||
" --data 'message=Docs'"
|
||||
" --data 'firstname=Bob'"
|
||||
" --data 'lastname=Foobar'"
|
||||
" --data 'email=bob.foobar@nixos.org'"
|
||||
" --data 'password=verysafe'"
|
||||
" -f localhost:3000/api/setup"
|
||||
).format(dbhash)
|
||||
)
|
||||
dbhash = machine.succeed(
|
||||
"curl -f localhost:3000 | grep 'property=\"dbhash' | grep -Po 'content=\"\\K[^\"]*'"
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
'test "$(curl -f localhost:3000/api/public/meta | jq ".title" | xargs echo)" = "NixOS"'
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
dbhash = dbhash.strip()
|
||||
|
||||
machine.succeed(
|
||||
(
|
||||
"curl -X POST"
|
||||
" --data 'dbname=documize'"
|
||||
" --data 'dbhash={}'"
|
||||
" --data 'title=NixOS'"
|
||||
" --data 'message=Docs'"
|
||||
" --data 'firstname=Bob'"
|
||||
" --data 'lastname=Foobar'"
|
||||
" --data 'email=bob.foobar@nixos.org'"
|
||||
" --data 'password=verysafe'"
|
||||
" -f localhost:3000/api/setup"
|
||||
).format(dbhash)
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
'test "$(curl -f localhost:3000/api/public/meta | jq ".title" | xargs echo)" = "NixOS"'
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,48 +1,46 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
name = "doh-proxy-rust";
|
||||
meta.maintainers = with lib.maintainers; [ stephank ];
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
name = "doh-proxy-rust";
|
||||
meta.maintainers = with lib.maintainers; [ stephank ];
|
||||
|
||||
nodes = {
|
||||
machine =
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
services.bind = {
|
||||
enable = true;
|
||||
extraOptions = "empty-zones-enable no;";
|
||||
zones = lib.singleton {
|
||||
name = ".";
|
||||
master = true;
|
||||
file = pkgs.writeText "root.zone" ''
|
||||
$TTL 3600
|
||||
. IN SOA ns.example.org. admin.example.org. ( 1 3h 1h 1w 1d )
|
||||
. IN NS ns.example.org.
|
||||
ns.example.org. IN A 192.168.0.1
|
||||
'';
|
||||
};
|
||||
};
|
||||
services.doh-proxy-rust = {
|
||||
enable = true;
|
||||
flags = [
|
||||
"--server-address=127.0.0.1:53"
|
||||
];
|
||||
nodes = {
|
||||
machine =
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
services.bind = {
|
||||
enable = true;
|
||||
extraOptions = "empty-zones-enable no;";
|
||||
zones = lib.singleton {
|
||||
name = ".";
|
||||
master = true;
|
||||
file = pkgs.writeText "root.zone" ''
|
||||
$TTL 3600
|
||||
. IN SOA ns.example.org. admin.example.org. ( 1 3h 1h 1w 1d )
|
||||
. IN NS ns.example.org.
|
||||
ns.example.org. IN A 192.168.0.1
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
services.doh-proxy-rust = {
|
||||
enable = true;
|
||||
flags = [
|
||||
"--server-address=127.0.0.1:53"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
url = "http://localhost:3000/dns-query"
|
||||
query = "AAABAAABAAAAAAAAAm5zB2V4YW1wbGUDb3JnAAABAAE=" # IN A ns.example.org.
|
||||
bin_ip = r"$'\xC0\xA8\x00\x01'" # 192.168.0.1, as shell binary string
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
url = "http://localhost:3000/dns-query"
|
||||
query = "AAABAAABAAAAAAAAAm5zB2V4YW1wbGUDb3JnAAABAAE=" # IN A ns.example.org.
|
||||
bin_ip = r"$'\xC0\xA8\x00\x01'" # 192.168.0.1, as shell binary string
|
||||
|
||||
machine.wait_for_unit("bind.service")
|
||||
machine.wait_for_unit("doh-proxy-rust.service")
|
||||
machine.wait_for_open_port(53)
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.succeed(f"curl --fail -H 'Accept: application/dns-message' '{url}?dns={query}' | grep -F {bin_ip}")
|
||||
'';
|
||||
}
|
||||
)
|
||||
machine.wait_for_unit("bind.service")
|
||||
machine.wait_for_unit("doh-proxy-rust.service")
|
||||
machine.wait_for_open_port(53)
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.succeed(f"curl --fail -H 'Accept: application/dns-message' '{url}?dns={query}' | grep -F {bin_ip}")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,33 +1,31 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "domination";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ fgaz ];
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "domination";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ fgaz ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
./common/x11.nix
|
||||
];
|
||||
|
||||
services.xserver.enable = true;
|
||||
environment.systemPackages = [ pkgs.domination ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
./common/x11.nix
|
||||
];
|
||||
enableOCR = true;
|
||||
|
||||
services.xserver.enable = true;
|
||||
environment.systemPackages = [ pkgs.domination ];
|
||||
};
|
||||
|
||||
enableOCR = true;
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_x()
|
||||
# Add a dummy sound card, or an error reporting popup will appear,
|
||||
# covering the main window and preventing OCR
|
||||
machine.execute("modprobe snd-dummy")
|
||||
machine.execute("domination >&2 &")
|
||||
machine.wait_for_window("Menu")
|
||||
machine.wait_for_text(r"(New Game|Start Server|Load Game|Help Manual|Join Game|About|Play Online)")
|
||||
machine.screenshot("screen")
|
||||
'';
|
||||
}
|
||||
)
|
||||
testScript = ''
|
||||
machine.wait_for_x()
|
||||
# Add a dummy sound card, or an error reporting popup will appear,
|
||||
# covering the main window and preventing OCR
|
||||
machine.execute("modprobe snd-dummy")
|
||||
machine.execute("domination >&2 &")
|
||||
machine.wait_for_window("Menu")
|
||||
machine.wait_for_text(r"(New Game|Start Server|Load Game|Help Manual|Join Game|About|Play Online)")
|
||||
machine.screenshot("screen")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,24 +1,22 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
name = "drbd-driver";
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ birkb ];
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
name = "drbd-driver";
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ birkb ];
|
||||
|
||||
nodes = {
|
||||
machine =
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
boot = {
|
||||
kernelModules = [ "drbd" ];
|
||||
extraModulePackages = with config.boot.kernelPackages; [ drbd ];
|
||||
kernelPackages = pkgs.linuxPackages;
|
||||
};
|
||||
nodes = {
|
||||
machine =
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
boot = {
|
||||
kernelModules = [ "drbd" ];
|
||||
extraModulePackages = with config.boot.kernelPackages; [ drbd ];
|
||||
kernelPackages = pkgs.linuxPackages;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.start();
|
||||
machine.succeed("modinfo drbd | grep --extended-regexp '^version:\s+${pkgs.linuxPackages.drbd.version}$'")
|
||||
'';
|
||||
}
|
||||
)
|
||||
testScript = ''
|
||||
machine.start();
|
||||
machine.succeed("modinfo drbd | grep --extended-regexp '^version:\s+${pkgs.linuxPackages.drbd.version}$'")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,93 +1,91 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
let
|
||||
drbdPort = 7789;
|
||||
{ pkgs, lib, ... }:
|
||||
let
|
||||
drbdPort = 7789;
|
||||
|
||||
drbdConfig =
|
||||
{ nodes, ... }:
|
||||
{
|
||||
virtualisation.emptyDiskImages = [ 1 ];
|
||||
networking.firewall.allowedTCPPorts = [ drbdPort ];
|
||||
drbdConfig =
|
||||
{ nodes, ... }:
|
||||
{
|
||||
virtualisation.emptyDiskImages = [ 1 ];
|
||||
networking.firewall.allowedTCPPorts = [ drbdPort ];
|
||||
|
||||
services.drbd = {
|
||||
enable = true;
|
||||
config = ''
|
||||
global {
|
||||
usage-count yes;
|
||||
services.drbd = {
|
||||
enable = true;
|
||||
config = ''
|
||||
global {
|
||||
usage-count yes;
|
||||
}
|
||||
|
||||
common {
|
||||
net {
|
||||
protocol C;
|
||||
ping-int 1;
|
||||
}
|
||||
}
|
||||
|
||||
resource r0 {
|
||||
volume 0 {
|
||||
device /dev/drbd0;
|
||||
disk /dev/vdb;
|
||||
meta-disk internal;
|
||||
}
|
||||
|
||||
common {
|
||||
net {
|
||||
protocol C;
|
||||
ping-int 1;
|
||||
}
|
||||
on drbd1 {
|
||||
address ${nodes.drbd1.networking.primaryIPAddress}:${toString drbdPort};
|
||||
}
|
||||
|
||||
resource r0 {
|
||||
volume 0 {
|
||||
device /dev/drbd0;
|
||||
disk /dev/vdb;
|
||||
meta-disk internal;
|
||||
}
|
||||
|
||||
on drbd1 {
|
||||
address ${nodes.drbd1.networking.primaryIPAddress}:${toString drbdPort};
|
||||
}
|
||||
|
||||
on drbd2 {
|
||||
address ${nodes.drbd2.networking.primaryIPAddress}:${toString drbdPort};
|
||||
}
|
||||
on drbd2 {
|
||||
address ${nodes.drbd2.networking.primaryIPAddress}:${toString drbdPort};
|
||||
}
|
||||
'';
|
||||
};
|
||||
}
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
name = "drbd";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [
|
||||
ryantm
|
||||
astro
|
||||
birkb
|
||||
];
|
||||
};
|
||||
in
|
||||
{
|
||||
name = "drbd";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [
|
||||
ryantm
|
||||
astro
|
||||
birkb
|
||||
];
|
||||
};
|
||||
|
||||
nodes.drbd1 = drbdConfig;
|
||||
nodes.drbd2 = drbdConfig;
|
||||
nodes.drbd1 = drbdConfig;
|
||||
nodes.drbd2 = drbdConfig;
|
||||
|
||||
testScript =
|
||||
{ nodes }:
|
||||
''
|
||||
drbd1.start()
|
||||
drbd2.start()
|
||||
testScript =
|
||||
{ nodes }:
|
||||
''
|
||||
drbd1.start()
|
||||
drbd2.start()
|
||||
|
||||
drbd1.wait_for_unit("network.target")
|
||||
drbd2.wait_for_unit("network.target")
|
||||
drbd1.wait_for_unit("network.target")
|
||||
drbd2.wait_for_unit("network.target")
|
||||
|
||||
drbd1.succeed(
|
||||
"drbdadm create-md r0",
|
||||
"drbdadm up r0",
|
||||
"drbdadm primary r0 --force",
|
||||
)
|
||||
drbd1.succeed(
|
||||
"drbdadm create-md r0",
|
||||
"drbdadm up r0",
|
||||
"drbdadm primary r0 --force",
|
||||
)
|
||||
|
||||
drbd2.succeed("drbdadm create-md r0", "drbdadm up r0")
|
||||
drbd2.succeed("drbdadm create-md r0", "drbdadm up r0")
|
||||
|
||||
drbd1.succeed(
|
||||
"mkfs.ext4 /dev/drbd0",
|
||||
"mkdir -p /mnt/drbd",
|
||||
"mount /dev/drbd0 /mnt/drbd",
|
||||
"touch /mnt/drbd/hello",
|
||||
"umount /mnt/drbd",
|
||||
"drbdadm secondary r0",
|
||||
)
|
||||
drbd1.sleep(1)
|
||||
drbd1.succeed(
|
||||
"mkfs.ext4 /dev/drbd0",
|
||||
"mkdir -p /mnt/drbd",
|
||||
"mount /dev/drbd0 /mnt/drbd",
|
||||
"touch /mnt/drbd/hello",
|
||||
"umount /mnt/drbd",
|
||||
"drbdadm secondary r0",
|
||||
)
|
||||
drbd1.sleep(1)
|
||||
|
||||
drbd2.succeed(
|
||||
"drbdadm primary r0",
|
||||
"mkdir -p /mnt/drbd",
|
||||
"mount /dev/drbd0 /mnt/drbd",
|
||||
"ls /mnt/drbd/hello",
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
drbd2.succeed(
|
||||
"drbdadm primary r0",
|
||||
"mkdir -p /mnt/drbd",
|
||||
"mount /dev/drbd0 /mnt/drbd",
|
||||
"ls /mnt/drbd/hello",
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -3,75 +3,73 @@
|
|||
# client on the inside network, a server on the outside network, and a
|
||||
# router connected to both that performs Network Address Translation
|
||||
# for the client.
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
let
|
||||
routerBase = lib.mkMerge [
|
||||
{
|
||||
virtualisation.vlans = [
|
||||
2
|
||||
1
|
||||
];
|
||||
networking.nftables.enable = true;
|
||||
networking.nat.internalIPs = [ "192.168.1.0/24" ];
|
||||
networking.nat.externalInterface = "eth1";
|
||||
}
|
||||
];
|
||||
in
|
||||
{
|
||||
name = "dublin-traceroute";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ baloo ];
|
||||
{ pkgs, lib, ... }:
|
||||
let
|
||||
routerBase = lib.mkMerge [
|
||||
{
|
||||
virtualisation.vlans = [
|
||||
2
|
||||
1
|
||||
];
|
||||
networking.nftables.enable = true;
|
||||
networking.nat.internalIPs = [ "192.168.1.0/24" ];
|
||||
networking.nat.externalInterface = "eth1";
|
||||
}
|
||||
];
|
||||
in
|
||||
{
|
||||
name = "dublin-traceroute";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ baloo ];
|
||||
};
|
||||
|
||||
nodes.client =
|
||||
{ nodes, ... }:
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
virtualisation.vlans = [ 1 ];
|
||||
|
||||
networking.defaultGateway =
|
||||
(builtins.head nodes.router.networking.interfaces.eth2.ipv4.addresses).address;
|
||||
networking.nftables.enable = true;
|
||||
|
||||
programs.dublin-traceroute.enable = true;
|
||||
};
|
||||
|
||||
nodes.client =
|
||||
{ nodes, ... }:
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
virtualisation.vlans = [ 1 ];
|
||||
nodes.router =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.vlans = [
|
||||
2
|
||||
1
|
||||
];
|
||||
networking.nftables.enable = true;
|
||||
networking.nat.internalIPs = [ "192.168.1.0/24" ];
|
||||
networking.nat.externalInterface = "eth1";
|
||||
networking.nat.enable = true;
|
||||
};
|
||||
|
||||
networking.defaultGateway =
|
||||
(builtins.head nodes.router.networking.interfaces.eth2.ipv4.addresses).address;
|
||||
networking.nftables.enable = true;
|
||||
nodes.server =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.vlans = [ 2 ];
|
||||
networking.firewall.enable = false;
|
||||
services.httpd.enable = true;
|
||||
services.httpd.adminAddr = "foo@example.org";
|
||||
services.vsftpd.enable = true;
|
||||
services.vsftpd.anonymousUser = true;
|
||||
};
|
||||
|
||||
programs.dublin-traceroute.enable = true;
|
||||
};
|
||||
testScript = ''
|
||||
client.start()
|
||||
router.start()
|
||||
server.start()
|
||||
|
||||
nodes.router =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.vlans = [
|
||||
2
|
||||
1
|
||||
];
|
||||
networking.nftables.enable = true;
|
||||
networking.nat.internalIPs = [ "192.168.1.0/24" ];
|
||||
networking.nat.externalInterface = "eth1";
|
||||
networking.nat.enable = true;
|
||||
};
|
||||
server.wait_for_unit("network.target")
|
||||
router.wait_for_unit("network.target")
|
||||
client.wait_for_unit("network.target")
|
||||
|
||||
nodes.server =
|
||||
{ ... }:
|
||||
{
|
||||
virtualisation.vlans = [ 2 ];
|
||||
networking.firewall.enable = false;
|
||||
services.httpd.enable = true;
|
||||
services.httpd.adminAddr = "foo@example.org";
|
||||
services.vsftpd.enable = true;
|
||||
services.vsftpd.anonymousUser = true;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
client.start()
|
||||
router.start()
|
||||
server.start()
|
||||
|
||||
server.wait_for_unit("network.target")
|
||||
router.wait_for_unit("network.target")
|
||||
client.wait_for_unit("network.target")
|
||||
|
||||
# Make sure we can trace from an unprivileged user
|
||||
client.succeed("sudo -u alice dublin-traceroute server")
|
||||
'';
|
||||
}
|
||||
)
|
||||
# Make sure we can trace from an unprivileged user
|
||||
client.succeed("sudo -u alice dublin-traceroute server")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,89 +1,87 @@
|
|||
import ./make-test-python.nix (
|
||||
{ ... }:
|
||||
{
|
||||
name = "ecryptfs";
|
||||
{ ... }:
|
||||
{
|
||||
name = "ecryptfs";
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
boot.kernelModules = [ "ecryptfs" ];
|
||||
security.pam.enableEcryptfs = true;
|
||||
environment.systemPackages = with pkgs; [ keyutils ];
|
||||
};
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
boot.kernelModules = [ "ecryptfs" ];
|
||||
security.pam.enableEcryptfs = true;
|
||||
environment.systemPackages = with pkgs; [ keyutils ];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
def login_as_alice():
|
||||
machine.wait_until_tty_matches("1", "login: ")
|
||||
machine.send_chars("alice\n")
|
||||
machine.wait_until_tty_matches("1", "Password: ")
|
||||
machine.send_chars("foobar\n")
|
||||
machine.wait_until_tty_matches("1", "alice\@machine")
|
||||
testScript = ''
|
||||
def login_as_alice():
|
||||
machine.wait_until_tty_matches("1", "login: ")
|
||||
machine.send_chars("alice\n")
|
||||
machine.wait_until_tty_matches("1", "Password: ")
|
||||
machine.send_chars("foobar\n")
|
||||
machine.wait_until_tty_matches("1", "alice\@machine")
|
||||
|
||||
|
||||
def logout():
|
||||
machine.send_chars("logout\n")
|
||||
machine.wait_until_tty_matches("1", "login: ")
|
||||
def logout():
|
||||
machine.send_chars("logout\n")
|
||||
machine.wait_until_tty_matches("1", "login: ")
|
||||
|
||||
|
||||
machine.wait_for_unit("default.target")
|
||||
machine.wait_for_unit("default.target")
|
||||
|
||||
with subtest("Set alice up with a password and a home"):
|
||||
machine.succeed("(echo foobar; echo foobar) | passwd alice")
|
||||
machine.succeed("chown -R alice.users ~alice")
|
||||
with subtest("Set alice up with a password and a home"):
|
||||
machine.succeed("(echo foobar; echo foobar) | passwd alice")
|
||||
machine.succeed("chown -R alice.users ~alice")
|
||||
|
||||
with subtest("Migrate alice's home"):
|
||||
out = machine.succeed("echo foobar | ecryptfs-migrate-home -u alice")
|
||||
machine.log(f"ecryptfs-migrate-home said: {out}")
|
||||
with subtest("Migrate alice's home"):
|
||||
out = machine.succeed("echo foobar | ecryptfs-migrate-home -u alice")
|
||||
machine.log(f"ecryptfs-migrate-home said: {out}")
|
||||
|
||||
with subtest("Log alice in (ecryptfs passwhrase is wrapped during first login)"):
|
||||
login_as_alice()
|
||||
machine.send_chars("logout\n")
|
||||
machine.wait_until_tty_matches("1", "login: ")
|
||||
with subtest("Log alice in (ecryptfs passwhrase is wrapped during first login)"):
|
||||
login_as_alice()
|
||||
machine.send_chars("logout\n")
|
||||
machine.wait_until_tty_matches("1", "login: ")
|
||||
|
||||
# Why do I need to do this??
|
||||
machine.succeed("su alice -c ecryptfs-umount-private || true")
|
||||
machine.sleep(1)
|
||||
# Why do I need to do this??
|
||||
machine.succeed("su alice -c ecryptfs-umount-private || true")
|
||||
machine.sleep(1)
|
||||
|
||||
with subtest("check that encrypted home is not mounted"):
|
||||
machine.fail("mount | grep ecryptfs")
|
||||
with subtest("check that encrypted home is not mounted"):
|
||||
machine.fail("mount | grep ecryptfs")
|
||||
|
||||
with subtest("Show contents of the user keyring"):
|
||||
out = machine.succeed("su - alice -c 'keyctl list \@u'")
|
||||
machine.log(f"keyctl unlink said: {out}")
|
||||
with subtest("Show contents of the user keyring"):
|
||||
out = machine.succeed("su - alice -c 'keyctl list \@u'")
|
||||
machine.log(f"keyctl unlink said: {out}")
|
||||
|
||||
with subtest("Log alice again"):
|
||||
login_as_alice()
|
||||
with subtest("Log alice again"):
|
||||
login_as_alice()
|
||||
|
||||
with subtest("Create some files in encrypted home"):
|
||||
machine.succeed("su alice -c 'touch ~alice/a'")
|
||||
machine.succeed("su alice -c 'echo c > ~alice/b'")
|
||||
with subtest("Create some files in encrypted home"):
|
||||
machine.succeed("su alice -c 'touch ~alice/a'")
|
||||
machine.succeed("su alice -c 'echo c > ~alice/b'")
|
||||
|
||||
with subtest("Logout"):
|
||||
logout()
|
||||
with subtest("Logout"):
|
||||
logout()
|
||||
|
||||
# Why do I need to do this??
|
||||
machine.succeed("su alice -c ecryptfs-umount-private || true")
|
||||
machine.sleep(1)
|
||||
# Why do I need to do this??
|
||||
machine.succeed("su alice -c ecryptfs-umount-private || true")
|
||||
machine.sleep(1)
|
||||
|
||||
with subtest("Check that the filesystem is not accessible"):
|
||||
machine.fail("mount | grep ecryptfs")
|
||||
machine.succeed("su alice -c 'test \! -f ~alice/a'")
|
||||
machine.succeed("su alice -c 'test \! -f ~alice/b'")
|
||||
with subtest("Check that the filesystem is not accessible"):
|
||||
machine.fail("mount | grep ecryptfs")
|
||||
machine.succeed("su alice -c 'test \! -f ~alice/a'")
|
||||
machine.succeed("su alice -c 'test \! -f ~alice/b'")
|
||||
|
||||
with subtest("Log alice once more"):
|
||||
login_as_alice()
|
||||
with subtest("Log alice once more"):
|
||||
login_as_alice()
|
||||
|
||||
with subtest("Check that the files are there"):
|
||||
machine.sleep(1)
|
||||
machine.succeed("su alice -c 'test -f ~alice/a'")
|
||||
machine.succeed("su alice -c 'test -f ~alice/b'")
|
||||
machine.succeed('test "$(cat ~alice/b)" = "c"')
|
||||
with subtest("Check that the files are there"):
|
||||
machine.sleep(1)
|
||||
machine.succeed("su alice -c 'test -f ~alice/a'")
|
||||
machine.succeed("su alice -c 'test -f ~alice/b'")
|
||||
machine.succeed('test "$(cat ~alice/b)" = "c"')
|
||||
|
||||
with subtest("Catch https://github.com/NixOS/nixpkgs/issues/16766"):
|
||||
machine.succeed("su alice -c 'ls -lh ~alice/'")
|
||||
with subtest("Catch https://github.com/NixOS/nixpkgs/issues/16766"):
|
||||
machine.succeed("su alice -c 'ls -lh ~alice/'")
|
||||
|
||||
logout()
|
||||
'';
|
||||
}
|
||||
)
|
||||
logout()
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,71 +1,69 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
name = "endlessh-go";
|
||||
meta.maintainers = with lib.maintainers; [ azahi ];
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
name = "endlessh-go";
|
||||
meta.maintainers = with lib.maintainers; [ azahi ];
|
||||
|
||||
nodes = {
|
||||
server =
|
||||
{ ... }:
|
||||
{
|
||||
services.endlessh-go = {
|
||||
enable = true;
|
||||
prometheus.enable = true;
|
||||
openFirewall = true;
|
||||
nodes = {
|
||||
server =
|
||||
{ ... }:
|
||||
{
|
||||
services.endlessh-go = {
|
||||
enable = true;
|
||||
prometheus.enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
|
||||
specialisation = {
|
||||
unprivileged.configuration = {
|
||||
services.endlessh-go = {
|
||||
port = 2222;
|
||||
prometheus.port = 9229;
|
||||
};
|
||||
};
|
||||
|
||||
specialisation = {
|
||||
unprivileged.configuration = {
|
||||
services.endlessh-go = {
|
||||
port = 2222;
|
||||
prometheus.port = 9229;
|
||||
};
|
||||
};
|
||||
|
||||
privileged.configuration = {
|
||||
services.endlessh-go = {
|
||||
port = 22;
|
||||
prometheus.port = 92;
|
||||
};
|
||||
privileged.configuration = {
|
||||
services.endlessh-go = {
|
||||
port = 22;
|
||||
prometheus.port = 92;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
client =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
curl
|
||||
netcat
|
||||
];
|
||||
};
|
||||
};
|
||||
client =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
curl
|
||||
netcat
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
def activate_specialisation(name: str):
|
||||
server.succeed(f"/run/booted-system/specialisation/{name}/bin/switch-to-configuration test >&2")
|
||||
testScript = ''
|
||||
def activate_specialisation(name: str):
|
||||
server.succeed(f"/run/booted-system/specialisation/{name}/bin/switch-to-configuration test >&2")
|
||||
|
||||
start_all()
|
||||
start_all()
|
||||
|
||||
with subtest("Unprivileged"):
|
||||
activate_specialisation("unprivileged")
|
||||
server.wait_for_unit("endlessh-go.service")
|
||||
server.wait_for_open_port(2222)
|
||||
server.wait_for_open_port(9229)
|
||||
server.fail("curl -sSf server:9229/metrics | grep -q endlessh_client_closed_count_total")
|
||||
client.succeed("nc -dvW5 server 2222")
|
||||
server.succeed("curl -sSf server:9229/metrics | grep -q endlessh_client_closed_count_total")
|
||||
client.fail("curl -sSfm 5 server:9229/metrics")
|
||||
with subtest("Unprivileged"):
|
||||
activate_specialisation("unprivileged")
|
||||
server.wait_for_unit("endlessh-go.service")
|
||||
server.wait_for_open_port(2222)
|
||||
server.wait_for_open_port(9229)
|
||||
server.fail("curl -sSf server:9229/metrics | grep -q endlessh_client_closed_count_total")
|
||||
client.succeed("nc -dvW5 server 2222")
|
||||
server.succeed("curl -sSf server:9229/metrics | grep -q endlessh_client_closed_count_total")
|
||||
client.fail("curl -sSfm 5 server:9229/metrics")
|
||||
|
||||
with subtest("Privileged"):
|
||||
activate_specialisation("privileged")
|
||||
server.wait_for_unit("endlessh-go.service")
|
||||
server.wait_for_open_port(22)
|
||||
server.wait_for_open_port(92)
|
||||
server.fail("curl -sSf server:92/metrics | grep -q endlessh_client_closed_count_total")
|
||||
client.succeed("nc -dvW5 server 22")
|
||||
server.succeed("curl -sSf server:92/metrics | grep -q endlessh_client_closed_count_total")
|
||||
client.fail("curl -sSfm 5 server:92/metrics")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("Privileged"):
|
||||
activate_specialisation("privileged")
|
||||
server.wait_for_unit("endlessh-go.service")
|
||||
server.wait_for_open_port(22)
|
||||
server.wait_for_open_port(92)
|
||||
server.fail("curl -sSf server:92/metrics | grep -q endlessh_client_closed_count_total")
|
||||
client.succeed("nc -dvW5 server 22")
|
||||
server.succeed("curl -sSf server:92/metrics | grep -q endlessh_client_closed_count_total")
|
||||
client.fail("curl -sSfm 5 server:92/metrics")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,52 +1,50 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
name = "endlessh";
|
||||
meta.maintainers = with lib.maintainers; [ azahi ];
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
name = "endlessh";
|
||||
meta.maintainers = with lib.maintainers; [ azahi ];
|
||||
|
||||
nodes = {
|
||||
server =
|
||||
{ ... }:
|
||||
{
|
||||
services.endlessh = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
|
||||
specialisation = {
|
||||
unprivileged.configuration.services.endlessh.port = 2222;
|
||||
|
||||
privileged.configuration.services.endlessh.port = 22;
|
||||
};
|
||||
nodes = {
|
||||
server =
|
||||
{ ... }:
|
||||
{
|
||||
services.endlessh = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
|
||||
client =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
curl
|
||||
netcat
|
||||
];
|
||||
specialisation = {
|
||||
unprivileged.configuration.services.endlessh.port = 2222;
|
||||
|
||||
privileged.configuration.services.endlessh.port = 22;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
def activate_specialisation(name: str):
|
||||
server.succeed(f"/run/booted-system/specialisation/{name}/bin/switch-to-configuration test >&2")
|
||||
client =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
curl
|
||||
netcat
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
start_all()
|
||||
testScript = ''
|
||||
def activate_specialisation(name: str):
|
||||
server.succeed(f"/run/booted-system/specialisation/{name}/bin/switch-to-configuration test >&2")
|
||||
|
||||
with subtest("Unprivileged"):
|
||||
activate_specialisation("unprivileged")
|
||||
server.wait_for_unit("endlessh.service")
|
||||
server.wait_for_open_port(2222)
|
||||
client.succeed("nc -dvW5 server 2222")
|
||||
start_all()
|
||||
|
||||
with subtest("Privileged"):
|
||||
activate_specialisation("privileged")
|
||||
server.wait_for_unit("endlessh.service")
|
||||
server.wait_for_open_port(22)
|
||||
client.succeed("nc -dvW5 server 22")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("Unprivileged"):
|
||||
activate_specialisation("unprivileged")
|
||||
server.wait_for_unit("endlessh.service")
|
||||
server.wait_for_open_port(2222)
|
||||
client.succeed("nc -dvW5 server 2222")
|
||||
|
||||
with subtest("Privileged"):
|
||||
activate_specialisation("privileged")
|
||||
server.wait_for_unit("endlessh.service")
|
||||
server.wait_for_open_port(22)
|
||||
client.succeed("nc -dvW5 server 22")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,45 +1,43 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "engelsystem";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ talyz ];
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "engelsystem";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ talyz ];
|
||||
};
|
||||
|
||||
nodes.engelsystem =
|
||||
{ ... }:
|
||||
{
|
||||
services.engelsystem = {
|
||||
enable = true;
|
||||
domain = "engelsystem";
|
||||
createDatabase = true;
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
443
|
||||
];
|
||||
environment.systemPackages = with pkgs; [
|
||||
xmlstarlet
|
||||
libxml2
|
||||
];
|
||||
};
|
||||
|
||||
nodes.engelsystem =
|
||||
{ ... }:
|
||||
{
|
||||
services.engelsystem = {
|
||||
enable = true;
|
||||
domain = "engelsystem";
|
||||
createDatabase = true;
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
443
|
||||
];
|
||||
environment.systemPackages = with pkgs; [
|
||||
xmlstarlet
|
||||
libxml2
|
||||
];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
engelsystem.start()
|
||||
engelsystem.wait_for_unit("phpfpm-engelsystem.service")
|
||||
engelsystem.wait_until_succeeds("curl engelsystem/login -sS -f")
|
||||
engelsystem.succeed(
|
||||
"curl engelsystem/login -sS -f -c cookie | xmllint -html -xmlout - >login"
|
||||
)
|
||||
engelsystem.succeed(
|
||||
"xml sel -T -t -m \"html/head/meta[@name='csrf-token']\" -v @content login >token"
|
||||
)
|
||||
engelsystem.succeed(
|
||||
"curl engelsystem/login -sS -f -b cookie -F 'login=admin' -F 'password=asdfasdf' -F '_token=<token' -L | xmllint -html -xmlout - >news"
|
||||
)
|
||||
engelsystem.succeed(
|
||||
"test 'News - Engelsystem' = \"$(xml sel -T -t -c html/head/title news)\""
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
testScript = ''
|
||||
engelsystem.start()
|
||||
engelsystem.wait_for_unit("phpfpm-engelsystem.service")
|
||||
engelsystem.wait_until_succeeds("curl engelsystem/login -sS -f")
|
||||
engelsystem.succeed(
|
||||
"curl engelsystem/login -sS -f -c cookie | xmllint -html -xmlout - >login"
|
||||
)
|
||||
engelsystem.succeed(
|
||||
"xml sel -T -t -m \"html/head/meta[@name='csrf-token']\" -v @content login >token"
|
||||
)
|
||||
engelsystem.succeed(
|
||||
"curl engelsystem/login -sS -f -b cookie -F 'login=admin' -F 'password=asdfasdf' -F '_token=<token' -L | xmllint -html -xmlout - >news"
|
||||
)
|
||||
engelsystem.succeed(
|
||||
"test 'News - Engelsystem' = \"$(xml sel -T -t -c html/head/title news)\""
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,104 +1,102 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "enlightenment";
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "enlightenment";
|
||||
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ romildo ];
|
||||
timeout = 600;
|
||||
# OCR tests are flaky
|
||||
broken = true;
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ romildo ];
|
||||
timeout = 600;
|
||||
# OCR tests are flaky
|
||||
broken = true;
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
services.xserver.enable = true;
|
||||
services.xserver.desktopManager.enlightenment.enable = true;
|
||||
services.xserver.displayManager = {
|
||||
lightdm.enable = true;
|
||||
autoLogin = {
|
||||
enable = true;
|
||||
user = "alice";
|
||||
};
|
||||
};
|
||||
environment.systemPackages = [ pkgs.xdotool ];
|
||||
services.acpid.enable = true;
|
||||
services.connman.enable = true;
|
||||
services.connman.package = pkgs.connmanMinimal;
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
services.xserver.enable = true;
|
||||
services.xserver.desktopManager.enlightenment.enable = true;
|
||||
services.xserver.displayManager = {
|
||||
lightdm.enable = true;
|
||||
autoLogin = {
|
||||
enable = true;
|
||||
user = "alice";
|
||||
};
|
||||
};
|
||||
environment.systemPackages = [ pkgs.xdotool ];
|
||||
services.acpid.enable = true;
|
||||
services.connman.enable = true;
|
||||
services.connman.package = pkgs.connmanMinimal;
|
||||
};
|
||||
enableOCR = true;
|
||||
|
||||
enableOCR = true;
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
user = nodes.machine.config.users.users.alice;
|
||||
in
|
||||
''
|
||||
with subtest("Ensure x starts"):
|
||||
machine.wait_for_x()
|
||||
machine.wait_for_file("${user.home}/.Xauthority")
|
||||
machine.succeed("xauth merge ${user.home}/.Xauthority")
|
||||
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
let
|
||||
user = nodes.machine.config.users.users.alice;
|
||||
in
|
||||
''
|
||||
with subtest("Ensure x starts"):
|
||||
machine.wait_for_x()
|
||||
machine.wait_for_file("${user.home}/.Xauthority")
|
||||
machine.succeed("xauth merge ${user.home}/.Xauthority")
|
||||
with subtest("Check that logging in has given the user ownership of devices"):
|
||||
machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
|
||||
|
||||
with subtest("Check that logging in has given the user ownership of devices"):
|
||||
machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
|
||||
with subtest("First time wizard"):
|
||||
machine.wait_for_text("Default") # Language
|
||||
machine.screenshot("wizard1")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
machine.screenshot("wizard2")
|
||||
|
||||
with subtest("First time wizard"):
|
||||
machine.wait_for_text("Default") # Language
|
||||
machine.screenshot("wizard1")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
machine.screenshot("wizard2")
|
||||
machine.wait_for_text("English") # Keyboard (default)
|
||||
machine.screenshot("wizard3")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
|
||||
machine.wait_for_text("English") # Keyboard (default)
|
||||
machine.screenshot("wizard3")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
machine.wait_for_text("Standard") # Profile (default)
|
||||
machine.screenshot("wizard4")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
|
||||
machine.wait_for_text("Standard") # Profile (default)
|
||||
machine.screenshot("wizard4")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
machine.wait_for_text("Title") # Sizing (default)
|
||||
machine.screenshot("wizard5")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
|
||||
machine.wait_for_text("Title") # Sizing (default)
|
||||
machine.screenshot("wizard5")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
machine.wait_for_text("clicked") # Windows Focus
|
||||
machine.succeed("xdotool mousemove 512 370 click 1") # Click
|
||||
machine.screenshot("wizard6")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
|
||||
machine.wait_for_text("clicked") # Windows Focus
|
||||
machine.succeed("xdotool mousemove 512 370 click 1") # Click
|
||||
machine.screenshot("wizard6")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
machine.wait_for_text("Connman") # Network Management (default)
|
||||
machine.screenshot("wizard7")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
|
||||
machine.wait_for_text("Connman") # Network Management (default)
|
||||
machine.screenshot("wizard7")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
machine.wait_for_text("BlusZ") # Bluetooth Management (default)
|
||||
machine.screenshot("wizard8")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
|
||||
machine.wait_for_text("BlusZ") # Bluetooth Management (default)
|
||||
machine.screenshot("wizard8")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
machine.wait_for_text("OpenGL") # Compositing (default)
|
||||
machine.screenshot("wizard9")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
|
||||
machine.wait_for_text("OpenGL") # Compositing (default)
|
||||
machine.screenshot("wizard9")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
machine.wait_for_text("update") # Updates
|
||||
machine.succeed("xdotool mousemove 512 495 click 1") # Disable
|
||||
machine.screenshot("wizard10")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
|
||||
machine.wait_for_text("update") # Updates
|
||||
machine.succeed("xdotool mousemove 512 495 click 1") # Disable
|
||||
machine.screenshot("wizard10")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
machine.wait_for_text("taskbar") # Taskbar
|
||||
machine.succeed("xdotool mousemove 480 410 click 1") # Enable
|
||||
machine.screenshot("wizard11")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
|
||||
machine.wait_for_text("taskbar") # Taskbar
|
||||
machine.succeed("xdotool mousemove 480 410 click 1") # Enable
|
||||
machine.screenshot("wizard11")
|
||||
machine.succeed("xdotool mousemove 512 740 click 1") # Next
|
||||
machine.wait_for_text("Home") # The desktop
|
||||
machine.screenshot("wizard12")
|
||||
|
||||
machine.wait_for_text("Home") # The desktop
|
||||
machine.screenshot("wizard12")
|
||||
|
||||
with subtest("Run Terminology"):
|
||||
machine.succeed("terminology >&2 &")
|
||||
machine.sleep(5)
|
||||
machine.send_chars("ls --color -alF\n")
|
||||
machine.sleep(2)
|
||||
machine.screenshot("terminology")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("Run Terminology"):
|
||||
machine.succeed("terminology >&2 &")
|
||||
machine.sleep(5)
|
||||
machine.send_chars("ls --color -alF\n")
|
||||
machine.sleep(2)
|
||||
machine.screenshot("terminology")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,49 +1,47 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "environment";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ nequissimus ];
|
||||
};
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "environment";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ nequissimus ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, lib, ... }:
|
||||
lib.mkMerge [
|
||||
{
|
||||
boot.kernelPackages = pkgs.linuxPackages;
|
||||
environment.etc.plainFile.text = ''
|
||||
Hello World
|
||||
'';
|
||||
environment.etc."folder/with/file".text = ''
|
||||
Foo Bar!
|
||||
'';
|
||||
nodes.machine =
|
||||
{ pkgs, lib, ... }:
|
||||
lib.mkMerge [
|
||||
{
|
||||
boot.kernelPackages = pkgs.linuxPackages;
|
||||
environment.etc.plainFile.text = ''
|
||||
Hello World
|
||||
'';
|
||||
environment.etc."folder/with/file".text = ''
|
||||
Foo Bar!
|
||||
'';
|
||||
|
||||
environment.sessionVariables = {
|
||||
TERMINFO_DIRS = "/run/current-system/sw/share/terminfo";
|
||||
NIXCON = "awesome";
|
||||
SHOULD_NOT_BE_SET = "oops";
|
||||
};
|
||||
}
|
||||
{
|
||||
environment.sessionVariables = {
|
||||
SHOULD_NOT_BE_SET = lib.mkForce null;
|
||||
};
|
||||
}
|
||||
];
|
||||
environment.sessionVariables = {
|
||||
TERMINFO_DIRS = "/run/current-system/sw/share/terminfo";
|
||||
NIXCON = "awesome";
|
||||
SHOULD_NOT_BE_SET = "oops";
|
||||
};
|
||||
}
|
||||
{
|
||||
environment.sessionVariables = {
|
||||
SHOULD_NOT_BE_SET = lib.mkForce null;
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
testScript = ''
|
||||
machine.succeed('[ -L "/etc/plainFile" ]')
|
||||
assert "Hello World" in machine.succeed('cat "/etc/plainFile"')
|
||||
machine.succeed('[ -d "/etc/folder" ]')
|
||||
machine.succeed('[ -d "/etc/folder/with" ]')
|
||||
machine.succeed('[ -L "/etc/folder/with/file" ]')
|
||||
assert "Hello World" in machine.succeed('cat "/etc/plainFile"')
|
||||
testScript = ''
|
||||
machine.succeed('[ -L "/etc/plainFile" ]')
|
||||
assert "Hello World" in machine.succeed('cat "/etc/plainFile"')
|
||||
machine.succeed('[ -d "/etc/folder" ]')
|
||||
machine.succeed('[ -d "/etc/folder/with" ]')
|
||||
machine.succeed('[ -L "/etc/folder/with/file" ]')
|
||||
assert "Hello World" in machine.succeed('cat "/etc/plainFile"')
|
||||
|
||||
assert "/run/current-system/sw/share/terminfo" in machine.succeed(
|
||||
"echo ''${TERMINFO_DIRS}"
|
||||
)
|
||||
assert "awesome" in machine.succeed("echo ''${NIXCON}")
|
||||
machine.fail("printenv SHOULD_NOT_BE_SET")
|
||||
'';
|
||||
}
|
||||
)
|
||||
assert "/run/current-system/sw/share/terminfo" in machine.succeed(
|
||||
"echo ''${TERMINFO_DIRS}"
|
||||
)
|
||||
assert "awesome" in machine.succeed("echo ''${NIXCON}")
|
||||
machine.fail("printenv SHOULD_NOT_BE_SET")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,42 +1,40 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, pkgs, ... }:
|
||||
let
|
||||
pythonShebang = pkgs.writeScript "python-shebang" ''
|
||||
#!/usr/bin/python
|
||||
print("OK")
|
||||
'';
|
||||
{ lib, pkgs, ... }:
|
||||
let
|
||||
pythonShebang = pkgs.writeScript "python-shebang" ''
|
||||
#!/usr/bin/python
|
||||
print("OK")
|
||||
'';
|
||||
|
||||
bashShebang = pkgs.writeScript "bash-shebang" ''
|
||||
#!/usr/bin/bash
|
||||
echo "OK"
|
||||
'';
|
||||
in
|
||||
{
|
||||
name = "envfs";
|
||||
nodes.machine.services.envfs.enable = true;
|
||||
bashShebang = pkgs.writeScript "bash-shebang" ''
|
||||
#!/usr/bin/bash
|
||||
echo "OK"
|
||||
'';
|
||||
in
|
||||
{
|
||||
name = "envfs";
|
||||
nodes.machine.services.envfs.enable = true;
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_until_succeeds("mountpoint -q /usr/bin/")
|
||||
machine.succeed(
|
||||
"PATH=${pkgs.coreutils}/bin /usr/bin/cp --version",
|
||||
# check fallback paths
|
||||
"PATH= /usr/bin/sh --version",
|
||||
"PATH= /usr/bin/env --version",
|
||||
"PATH= test -e /usr/bin/sh",
|
||||
"PATH= test -e /usr/bin/env",
|
||||
# also picks up PATH that was set after execve
|
||||
"! /usr/bin/hello",
|
||||
"PATH=${pkgs.hello}/bin /usr/bin/hello",
|
||||
)
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_until_succeeds("mountpoint -q /usr/bin/")
|
||||
machine.succeed(
|
||||
"PATH=${pkgs.coreutils}/bin /usr/bin/cp --version",
|
||||
# check fallback paths
|
||||
"PATH= /usr/bin/sh --version",
|
||||
"PATH= /usr/bin/env --version",
|
||||
"PATH= test -e /usr/bin/sh",
|
||||
"PATH= test -e /usr/bin/env",
|
||||
# also picks up PATH that was set after execve
|
||||
"! /usr/bin/hello",
|
||||
"PATH=${pkgs.hello}/bin /usr/bin/hello",
|
||||
)
|
||||
|
||||
out = machine.succeed("PATH=${pkgs.python3}/bin ${pythonShebang}")
|
||||
print(out)
|
||||
assert out == "OK\n"
|
||||
out = machine.succeed("PATH=${pkgs.python3}/bin ${pythonShebang}")
|
||||
print(out)
|
||||
assert out == "OK\n"
|
||||
|
||||
out = machine.succeed("PATH=${pkgs.bash}/bin ${bashShebang}")
|
||||
print(out)
|
||||
assert out == "OK\n"
|
||||
'';
|
||||
}
|
||||
)
|
||||
out = machine.succeed("PATH=${pkgs.bash}/bin ${bashShebang}")
|
||||
print(out)
|
||||
assert out == "OK\n"
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,23 +1,21 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "ergo";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ mmahut ];
|
||||
};
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "ergo";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ mmahut ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
machine =
|
||||
{ ... }:
|
||||
{
|
||||
services.ergo.enable = true;
|
||||
services.ergo.api.keyHash = "324dcf027dd4a30a932c441f365a25e86b173defa4b8e58948253471b81b72cf";
|
||||
};
|
||||
};
|
||||
nodes = {
|
||||
machine =
|
||||
{ ... }:
|
||||
{
|
||||
services.ergo.enable = true;
|
||||
services.ergo.api.keyHash = "324dcf027dd4a30a932c441f365a25e86b173defa4b8e58948253471b81b72cf";
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("ergo.service")
|
||||
'';
|
||||
}
|
||||
)
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("ergo.service")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -9,100 +9,98 @@ let
|
|||
iiDir = "/tmp/irc";
|
||||
in
|
||||
|
||||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "ergochat";
|
||||
nodes =
|
||||
{
|
||||
"${server}" = {
|
||||
networking.firewall.allowedTCPPorts = [ ircPort ];
|
||||
services.ergochat = {
|
||||
enable = true;
|
||||
settings.server.motd = pkgs.writeText "ergo.motd" ''
|
||||
The default MOTD doesn't contain the word "nixos" in it.
|
||||
This one does.
|
||||
'';
|
||||
};
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "ergochat";
|
||||
nodes =
|
||||
{
|
||||
"${server}" = {
|
||||
networking.firewall.allowedTCPPorts = [ ircPort ];
|
||||
services.ergochat = {
|
||||
enable = true;
|
||||
settings.server.motd = pkgs.writeText "ergo.motd" ''
|
||||
The default MOTD doesn't contain the word "nixos" in it.
|
||||
This one does.
|
||||
'';
|
||||
};
|
||||
}
|
||||
// lib.listToAttrs (
|
||||
builtins.map (
|
||||
client:
|
||||
lib.nameValuePair client {
|
||||
imports = [
|
||||
./common/user-account.nix
|
||||
];
|
||||
};
|
||||
}
|
||||
// lib.listToAttrs (
|
||||
builtins.map (
|
||||
client:
|
||||
lib.nameValuePair client {
|
||||
imports = [
|
||||
./common/user-account.nix
|
||||
];
|
||||
|
||||
systemd.services.ii = {
|
||||
requires = [ "network.target" ];
|
||||
wantedBy = [ "default.target" ];
|
||||
systemd.services.ii = {
|
||||
requires = [ "network.target" ];
|
||||
wantedBy = [ "default.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
ExecPreStartPre = "mkdir -p ${iiDir}";
|
||||
ExecStart = ''
|
||||
${lib.getBin pkgs.ii}/bin/ii -n ${client} -s ${server} -i ${iiDir}
|
||||
'';
|
||||
User = "alice";
|
||||
};
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
ExecPreStartPre = "mkdir -p ${iiDir}";
|
||||
ExecStart = ''
|
||||
${lib.getBin pkgs.ii}/bin/ii -n ${client} -s ${server} -i ${iiDir}
|
||||
'';
|
||||
User = "alice";
|
||||
};
|
||||
}
|
||||
) clients
|
||||
);
|
||||
};
|
||||
}
|
||||
) clients
|
||||
);
|
||||
|
||||
testScript =
|
||||
let
|
||||
msg = client: "Hello, my name is ${client}";
|
||||
clientScript =
|
||||
client:
|
||||
[
|
||||
''
|
||||
${client}.wait_for_unit("network.target")
|
||||
${client}.systemctl("start ii")
|
||||
${client}.wait_for_unit("ii")
|
||||
${client}.wait_for_file("${iiDir}/${server}/out")
|
||||
''
|
||||
# look for the custom text in the MOTD.
|
||||
''
|
||||
${client}.wait_until_succeeds("grep 'nixos' ${iiDir}/${server}/out")
|
||||
''
|
||||
# wait until first PING from server arrives before joining,
|
||||
# so we don't try it too early
|
||||
''
|
||||
${client}.wait_until_succeeds("grep 'PING' ${iiDir}/${server}/out")
|
||||
''
|
||||
# join ${channel}
|
||||
''
|
||||
${client}.succeed("echo '/j #${channel}' > ${iiDir}/${server}/in")
|
||||
${client}.wait_for_file("${iiDir}/${server}/#${channel}/in")
|
||||
''
|
||||
# send a greeting
|
||||
''
|
||||
${client}.succeed(
|
||||
"echo '${msg client}' > ${iiDir}/${server}/#${channel}/in"
|
||||
)
|
||||
''
|
||||
# check that all greetings arrived on all clients
|
||||
]
|
||||
++ builtins.map (other: ''
|
||||
testScript =
|
||||
let
|
||||
msg = client: "Hello, my name is ${client}";
|
||||
clientScript =
|
||||
client:
|
||||
[
|
||||
''
|
||||
${client}.wait_for_unit("network.target")
|
||||
${client}.systemctl("start ii")
|
||||
${client}.wait_for_unit("ii")
|
||||
${client}.wait_for_file("${iiDir}/${server}/out")
|
||||
''
|
||||
# look for the custom text in the MOTD.
|
||||
''
|
||||
${client}.wait_until_succeeds("grep 'nixos' ${iiDir}/${server}/out")
|
||||
''
|
||||
# wait until first PING from server arrives before joining,
|
||||
# so we don't try it too early
|
||||
''
|
||||
${client}.wait_until_succeeds("grep 'PING' ${iiDir}/${server}/out")
|
||||
''
|
||||
# join ${channel}
|
||||
''
|
||||
${client}.succeed("echo '/j #${channel}' > ${iiDir}/${server}/in")
|
||||
${client}.wait_for_file("${iiDir}/${server}/#${channel}/in")
|
||||
''
|
||||
# send a greeting
|
||||
''
|
||||
${client}.succeed(
|
||||
"grep '${msg other}$' ${iiDir}/${server}/#${channel}/out"
|
||||
"echo '${msg client}' > ${iiDir}/${server}/#${channel}/in"
|
||||
)
|
||||
'') clients;
|
||||
''
|
||||
# check that all greetings arrived on all clients
|
||||
]
|
||||
++ builtins.map (other: ''
|
||||
${client}.succeed(
|
||||
"grep '${msg other}$' ${iiDir}/${server}/#${channel}/out"
|
||||
)
|
||||
'') clients;
|
||||
|
||||
# foldl', but requires a non-empty list instead of a start value
|
||||
reduce = f: list: builtins.foldl' f (builtins.head list) (builtins.tail list);
|
||||
in
|
||||
''
|
||||
start_all()
|
||||
${server}.systemctl("status ergochat")
|
||||
${server}.wait_for_open_port(${toString ircPort})
|
||||
# foldl', but requires a non-empty list instead of a start value
|
||||
reduce = f: list: builtins.foldl' f (builtins.head list) (builtins.tail list);
|
||||
in
|
||||
''
|
||||
start_all()
|
||||
${server}.systemctl("status ergochat")
|
||||
${server}.wait_for_open_port(${toString ircPort})
|
||||
|
||||
# run clientScript for all clients so that every list
|
||||
# entry is executed by every client before advancing
|
||||
# to the next one.
|
||||
''
|
||||
+ lib.concatStrings (reduce (lib.zipListsWith (cs: c: cs + c)) (builtins.map clientScript clients));
|
||||
}
|
||||
)
|
||||
# run clientScript for all clients so that every list
|
||||
# entry is executed by every client before advancing
|
||||
# to the next one.
|
||||
''
|
||||
+ lib.concatStrings (reduce (lib.zipListsWith (cs: c: cs + c)) (builtins.map clientScript clients));
|
||||
}
|
||||
|
|
|
@ -1,29 +1,27 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "eris-server";
|
||||
meta.maintainers = with lib.maintainers; [ ehmry ];
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "eris-server";
|
||||
meta.maintainers = with lib.maintainers; [ ehmry ];
|
||||
|
||||
nodes.server = {
|
||||
environment.systemPackages = [
|
||||
pkgs.eris-go
|
||||
pkgs.eriscmd
|
||||
];
|
||||
services.eris-server = {
|
||||
enable = true;
|
||||
decode = true;
|
||||
listenHttp = "[::1]:80";
|
||||
backends = [ "badger+file:///var/cache/eris.badger?get&put" ];
|
||||
mountpoint = "/eris";
|
||||
};
|
||||
nodes.server = {
|
||||
environment.systemPackages = [
|
||||
pkgs.eris-go
|
||||
pkgs.eriscmd
|
||||
];
|
||||
services.eris-server = {
|
||||
enable = true;
|
||||
decode = true;
|
||||
listenHttp = "[::1]:80";
|
||||
backends = [ "badger+file:///var/cache/eris.badger?get&put" ];
|
||||
mountpoint = "/eris";
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
server.wait_for_unit("eris-server.service")
|
||||
server.wait_for_open_port(5683)
|
||||
server.wait_for_open_port(80)
|
||||
server.succeed("eriscmd get http://[::1] $(echo 'Hail ERIS!' | eriscmd put coap+tcp://[::1]:5683)")
|
||||
'';
|
||||
}
|
||||
)
|
||||
testScript = ''
|
||||
start_all()
|
||||
server.wait_for_unit("eris-server.service")
|
||||
server.wait_for_open_port(5683)
|
||||
server.wait_for_open_port(80)
|
||||
server.succeed("eriscmd get http://[::1] $(echo 'Hail ERIS!' | eriscmd put coap+tcp://[::1]:5683)")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,44 +1,42 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{ pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
testPort = 6052;
|
||||
unixSocket = "/run/esphome/esphome.sock";
|
||||
in
|
||||
{
|
||||
name = "esphome";
|
||||
meta.maintainers = with lib.maintainers; [ oddlama ];
|
||||
let
|
||||
testPort = 6052;
|
||||
unixSocket = "/run/esphome/esphome.sock";
|
||||
in
|
||||
{
|
||||
name = "esphome";
|
||||
meta.maintainers = with lib.maintainers; [ oddlama ];
|
||||
|
||||
nodes = {
|
||||
esphomeTcp =
|
||||
{ ... }:
|
||||
{
|
||||
services.esphome = {
|
||||
enable = true;
|
||||
port = testPort;
|
||||
address = "0.0.0.0";
|
||||
openFirewall = true;
|
||||
};
|
||||
nodes = {
|
||||
esphomeTcp =
|
||||
{ ... }:
|
||||
{
|
||||
services.esphome = {
|
||||
enable = true;
|
||||
port = testPort;
|
||||
address = "0.0.0.0";
|
||||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
|
||||
esphomeUnix =
|
||||
{ ... }:
|
||||
{
|
||||
services.esphome = {
|
||||
enable = true;
|
||||
enableUnixSocket = true;
|
||||
};
|
||||
esphomeUnix =
|
||||
{ ... }:
|
||||
{
|
||||
services.esphome = {
|
||||
enable = true;
|
||||
enableUnixSocket = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
esphomeTcp.wait_for_unit("esphome.service")
|
||||
esphomeTcp.wait_for_open_port(${toString testPort})
|
||||
esphomeTcp.succeed("curl --fail http://localhost:${toString testPort}/")
|
||||
testScript = ''
|
||||
esphomeTcp.wait_for_unit("esphome.service")
|
||||
esphomeTcp.wait_for_open_port(${toString testPort})
|
||||
esphomeTcp.succeed("curl --fail http://localhost:${toString testPort}/")
|
||||
|
||||
esphomeUnix.wait_for_unit("esphome.service")
|
||||
esphomeUnix.wait_for_file("${unixSocket}")
|
||||
esphomeUnix.succeed("curl --fail --unix-socket ${unixSocket} http://localhost/")
|
||||
'';
|
||||
}
|
||||
)
|
||||
esphomeUnix.wait_for_unit("esphome.service")
|
||||
esphomeUnix.wait_for_file("${unixSocket}")
|
||||
esphomeUnix.succeed("curl --fail --unix-socket ${unixSocket} http://localhost/")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,52 +1,50 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{ pkgs, ... }:
|
||||
|
||||
let
|
||||
dataDir = "/var/lib/foobar";
|
||||
let
|
||||
dataDir = "/var/lib/foobar";
|
||||
|
||||
in
|
||||
{
|
||||
name = "etebase-server";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ felschr ];
|
||||
in
|
||||
{
|
||||
name = "etebase-server";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ felschr ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.etebase-server = {
|
||||
inherit dataDir;
|
||||
enable = true;
|
||||
settings.global.secret_file = toString (pkgs.writeText "secret" "123456");
|
||||
};
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.etebase-server = {
|
||||
inherit dataDir;
|
||||
enable = true;
|
||||
settings.global.secret_file = toString (pkgs.writeText "secret" "123456");
|
||||
};
|
||||
};
|
||||
testScript = ''
|
||||
machine.wait_for_unit("etebase-server.service")
|
||||
machine.wait_for_open_port(8001)
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("etebase-server.service")
|
||||
machine.wait_for_open_port(8001)
|
||||
with subtest("Database & src-version were created"):
|
||||
machine.wait_for_file("${dataDir}/src-version")
|
||||
assert (
|
||||
"${pkgs.etebase-server}"
|
||||
in machine.succeed("cat ${dataDir}/src-version")
|
||||
)
|
||||
machine.wait_for_file("${dataDir}/db.sqlite3")
|
||||
machine.wait_for_file("${dataDir}/static")
|
||||
|
||||
with subtest("Database & src-version were created"):
|
||||
machine.wait_for_file("${dataDir}/src-version")
|
||||
assert (
|
||||
"${pkgs.etebase-server}"
|
||||
in machine.succeed("cat ${dataDir}/src-version")
|
||||
)
|
||||
machine.wait_for_file("${dataDir}/db.sqlite3")
|
||||
machine.wait_for_file("${dataDir}/static")
|
||||
with subtest("Only allow access from allowed_hosts"):
|
||||
machine.succeed("curl -sSfL http://0.0.0.0:8001/")
|
||||
machine.fail("curl -sSfL http://127.0.0.1:8001/")
|
||||
machine.fail("curl -sSfL http://localhost:8001/")
|
||||
|
||||
with subtest("Only allow access from allowed_hosts"):
|
||||
machine.succeed("curl -sSfL http://0.0.0.0:8001/")
|
||||
machine.fail("curl -sSfL http://127.0.0.1:8001/")
|
||||
machine.fail("curl -sSfL http://localhost:8001/")
|
||||
with subtest("Run tests"):
|
||||
machine.succeed("etebase-server check")
|
||||
machine.succeed("etebase-server test")
|
||||
|
||||
with subtest("Run tests"):
|
||||
machine.succeed("etebase-server check")
|
||||
machine.succeed("etebase-server test")
|
||||
|
||||
with subtest("Create superuser"):
|
||||
machine.succeed(
|
||||
"etebase-server createsuperuser --no-input --username admin --email root@localhost"
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("Create superuser"):
|
||||
machine.succeed(
|
||||
"etebase-server createsuperuser --no-input --username admin --email root@localhost"
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,28 +1,26 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
|
||||
name = "etesync-dav";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ _3699n ];
|
||||
name = "etesync-dav";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ _3699n ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [
|
||||
pkgs.curl
|
||||
pkgs.etesync-dav
|
||||
];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [
|
||||
pkgs.curl
|
||||
pkgs.etesync-dav
|
||||
];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.succeed("etesync-dav --version")
|
||||
machine.execute("etesync-dav >&2 &")
|
||||
machine.wait_for_open_port(37358)
|
||||
with subtest("Check that the web interface is accessible"):
|
||||
assert "Add User" in machine.succeed("curl -s http://localhost:37358/.web/add/")
|
||||
'';
|
||||
}
|
||||
)
|
||||
testScript = ''
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.succeed("etesync-dav --version")
|
||||
machine.execute("etesync-dav >&2 &")
|
||||
machine.wait_for_open_port(37358)
|
||||
with subtest("Check that the web interface is accessible"):
|
||||
assert "Add User" in machine.succeed("curl -s http://localhost:37358/.web/add/")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,27 +1,25 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
name = "fakeroute";
|
||||
meta.maintainers = with lib.maintainers; [ rnhmjoj ];
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
name = "fakeroute";
|
||||
meta.maintainers = with lib.maintainers; [ rnhmjoj ];
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ ../modules/profiles/minimal.nix ];
|
||||
services.fakeroute.enable = true;
|
||||
services.fakeroute.route = [
|
||||
"216.102.187.130"
|
||||
"4.0.1.122"
|
||||
"198.116.142.34"
|
||||
"63.199.8.242"
|
||||
];
|
||||
environment.systemPackages = [ pkgs.traceroute ];
|
||||
};
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ ../modules/profiles/minimal.nix ];
|
||||
services.fakeroute.enable = true;
|
||||
services.fakeroute.route = [
|
||||
"216.102.187.130"
|
||||
"4.0.1.122"
|
||||
"198.116.142.34"
|
||||
"63.199.8.242"
|
||||
];
|
||||
environment.systemPackages = [ pkgs.traceroute ];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("fakeroute.service")
|
||||
machine.succeed("traceroute 127.0.0.1 | grep -q 216.102.187.130")
|
||||
'';
|
||||
}
|
||||
)
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("fakeroute.service")
|
||||
machine.succeed("traceroute 127.0.0.1 | grep -q 216.102.187.130")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -3,36 +3,34 @@
|
|||
config ? { },
|
||||
pkgs ? import ../.. { inherit system config; },
|
||||
}:
|
||||
import ./make-test-python.nix (
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
name = "fanout";
|
||||
meta.maintainers = [ lib.maintainers.therishidesai ];
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
name = "fanout";
|
||||
meta.maintainers = [ lib.maintainers.therishidesai ];
|
||||
|
||||
nodes =
|
||||
let
|
||||
cfg =
|
||||
{ ... }:
|
||||
{
|
||||
services.fanout = {
|
||||
enable = true;
|
||||
fanoutDevices = 2;
|
||||
bufferSize = 8192;
|
||||
};
|
||||
nodes =
|
||||
let
|
||||
cfg =
|
||||
{ ... }:
|
||||
{
|
||||
services.fanout = {
|
||||
enable = true;
|
||||
fanoutDevices = 2;
|
||||
bufferSize = 8192;
|
||||
};
|
||||
in
|
||||
{
|
||||
machine = cfg;
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
machine = cfg;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
# mDNS.
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
# mDNS.
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
|
||||
machine.succeed("test -c /dev/fanout0")
|
||||
machine.succeed("test -c /dev/fanout1")
|
||||
'';
|
||||
}
|
||||
)
|
||||
machine.succeed("test -c /dev/fanout0")
|
||||
machine.succeed("test -c /dev/fanout1")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,53 +1,51 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{ pkgs, ... }:
|
||||
|
||||
let
|
||||
fenicsScript = pkgs.writeScript "poisson.py" ''
|
||||
#!/usr/bin/env python
|
||||
from dolfin import *
|
||||
let
|
||||
fenicsScript = pkgs.writeScript "poisson.py" ''
|
||||
#!/usr/bin/env python
|
||||
from dolfin import *
|
||||
|
||||
mesh = UnitSquareMesh(4, 4)
|
||||
V = FunctionSpace(mesh, "Lagrange", 1)
|
||||
mesh = UnitSquareMesh(4, 4)
|
||||
V = FunctionSpace(mesh, "Lagrange", 1)
|
||||
|
||||
def boundary(x):
|
||||
return x[0] < DOLFIN_EPS or x[0] > 1.0 - DOLFIN_EPS
|
||||
def boundary(x):
|
||||
return x[0] < DOLFIN_EPS or x[0] > 1.0 - DOLFIN_EPS
|
||||
|
||||
u0 = Constant(0.0)
|
||||
bc = DirichletBC(V, u0, boundary)
|
||||
u0 = Constant(0.0)
|
||||
bc = DirichletBC(V, u0, boundary)
|
||||
|
||||
u = TrialFunction(V)
|
||||
v = TestFunction(V)
|
||||
f = Expression("10*exp(-(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)", degree=2)
|
||||
g = Expression("sin(5*x[0])", degree=2)
|
||||
a = inner(grad(u), grad(v))*dx
|
||||
L = f*v*dx + g*v*ds
|
||||
u = TrialFunction(V)
|
||||
v = TestFunction(V)
|
||||
f = Expression("10*exp(-(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)", degree=2)
|
||||
g = Expression("sin(5*x[0])", degree=2)
|
||||
a = inner(grad(u), grad(v))*dx
|
||||
L = f*v*dx + g*v*ds
|
||||
|
||||
u = Function(V)
|
||||
solve(a == L, u, bc)
|
||||
print(u)
|
||||
u = Function(V)
|
||||
solve(a == L, u, bc)
|
||||
print(u)
|
||||
'';
|
||||
in
|
||||
{
|
||||
name = "fenics";
|
||||
meta = {
|
||||
maintainers = with pkgs.lib.maintainers; [ ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
fenicsnode =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
gcc
|
||||
(python3.withPackages (ps: with ps; [ fenics ]))
|
||||
];
|
||||
};
|
||||
};
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
start_all()
|
||||
fenicsnode.succeed("${fenicsScript}")
|
||||
'';
|
||||
in
|
||||
{
|
||||
name = "fenics";
|
||||
meta = {
|
||||
maintainers = with pkgs.lib.maintainers; [ ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
fenicsnode =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
gcc
|
||||
(python3.withPackages (ps: with ps; [ fenics ]))
|
||||
];
|
||||
};
|
||||
};
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
start_all()
|
||||
fenicsnode.succeed("${fenicsScript}")
|
||||
'';
|
||||
}
|
||||
)
|
||||
}
|
||||
|
|
|
@ -1,98 +1,96 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "ferm";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ mic92 ];
|
||||
};
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "ferm";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ mic92 ];
|
||||
};
|
||||
|
||||
nodes = {
|
||||
client =
|
||||
{ pkgs, ... }:
|
||||
with pkgs.lib;
|
||||
{
|
||||
networking = {
|
||||
dhcpcd.enable = false;
|
||||
interfaces.eth1.ipv6.addresses = mkOverride 0 [
|
||||
{
|
||||
address = "fd00::2";
|
||||
prefixLength = 64;
|
||||
}
|
||||
];
|
||||
interfaces.eth1.ipv4.addresses = mkOverride 0 [
|
||||
{
|
||||
address = "192.168.1.2";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
nodes = {
|
||||
client =
|
||||
{ pkgs, ... }:
|
||||
with pkgs.lib;
|
||||
{
|
||||
networking = {
|
||||
dhcpcd.enable = false;
|
||||
interfaces.eth1.ipv6.addresses = mkOverride 0 [
|
||||
{
|
||||
address = "fd00::2";
|
||||
prefixLength = 64;
|
||||
}
|
||||
];
|
||||
interfaces.eth1.ipv4.addresses = mkOverride 0 [
|
||||
{
|
||||
address = "192.168.1.2";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
server =
|
||||
{ pkgs, ... }:
|
||||
with pkgs.lib;
|
||||
{
|
||||
networking = {
|
||||
dhcpcd.enable = false;
|
||||
useNetworkd = true;
|
||||
useDHCP = false;
|
||||
interfaces.eth1.ipv6.addresses = mkOverride 0 [
|
||||
{
|
||||
address = "fd00::1";
|
||||
prefixLength = 64;
|
||||
}
|
||||
];
|
||||
interfaces.eth1.ipv4.addresses = mkOverride 0 [
|
||||
{
|
||||
address = "192.168.1.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
services = {
|
||||
ferm.enable = true;
|
||||
ferm.config = ''
|
||||
domain (ip ip6) table filter chain INPUT {
|
||||
interface lo ACCEPT;
|
||||
proto tcp dport 8080 REJECT reject-with tcp-reset;
|
||||
}
|
||||
'';
|
||||
nginx.enable = true;
|
||||
nginx.httpConfig = ''
|
||||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
listen 8080;
|
||||
listen [::]:8080;
|
||||
|
||||
location /status { stub_status on; }
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
server =
|
||||
{ pkgs, ... }:
|
||||
with pkgs.lib;
|
||||
{
|
||||
networking = {
|
||||
dhcpcd.enable = false;
|
||||
useNetworkd = true;
|
||||
useDHCP = false;
|
||||
interfaces.eth1.ipv6.addresses = mkOverride 0 [
|
||||
{
|
||||
address = "fd00::1";
|
||||
prefixLength = 64;
|
||||
}
|
||||
];
|
||||
interfaces.eth1.ipv4.addresses = mkOverride 0 [
|
||||
{
|
||||
address = "192.168.1.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
services = {
|
||||
ferm.enable = true;
|
||||
ferm.config = ''
|
||||
domain (ip ip6) table filter chain INPUT {
|
||||
interface lo ACCEPT;
|
||||
proto tcp dport 8080 REJECT reject-with tcp-reset;
|
||||
}
|
||||
'';
|
||||
nginx.enable = true;
|
||||
nginx.httpConfig = ''
|
||||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
listen 8080;
|
||||
listen [::]:8080;
|
||||
|
||||
client.systemctl("start network-online.target")
|
||||
server.systemctl("start network-online.target")
|
||||
client.wait_for_unit("network-online.target")
|
||||
server.wait_for_unit("network-online.target")
|
||||
server.wait_for_unit("ferm.service")
|
||||
server.wait_for_unit("nginx.service")
|
||||
server.wait_until_succeeds("ss -ntl | grep -q 80")
|
||||
location /status { stub_status on; }
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
with subtest("port 80 is allowed"):
|
||||
client.succeed("curl --fail -g http://192.168.1.1:80/status")
|
||||
client.succeed("curl --fail -g http://[fd00::1]:80/status")
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
with subtest("port 8080 is not allowed"):
|
||||
server.succeed("curl --fail -g http://192.168.1.1:8080/status")
|
||||
server.succeed("curl --fail -g http://[fd00::1]:8080/status")
|
||||
client.systemctl("start network-online.target")
|
||||
server.systemctl("start network-online.target")
|
||||
client.wait_for_unit("network-online.target")
|
||||
server.wait_for_unit("network-online.target")
|
||||
server.wait_for_unit("ferm.service")
|
||||
server.wait_for_unit("nginx.service")
|
||||
server.wait_until_succeeds("ss -ntl | grep -q 80")
|
||||
|
||||
client.fail("curl --fail -g http://192.168.1.1:8080/status")
|
||||
client.fail("curl --fail -g http://[fd00::1]:8080/status")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("port 80 is allowed"):
|
||||
client.succeed("curl --fail -g http://192.168.1.1:80/status")
|
||||
client.succeed("curl --fail -g http://[fd00::1]:80/status")
|
||||
|
||||
with subtest("port 8080 is not allowed"):
|
||||
server.succeed("curl --fail -g http://192.168.1.1:8080/status")
|
||||
server.succeed("curl --fail -g http://[fd00::1]:8080/status")
|
||||
|
||||
client.fail("curl --fail -g http://192.168.1.1:8080/status")
|
||||
client.fail("curl --fail -g http://[fd00::1]:8080/status")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,148 +1,146 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "filesender";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ nhnn ];
|
||||
broken = pkgs.stdenv.hostPlatform.isAarch64; # selenium.common.exceptions.WebDriverException: Message: Unsupported platform/architecture combination: linux/aarch64
|
||||
};
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
name = "filesender";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ nhnn ];
|
||||
broken = pkgs.stdenv.hostPlatform.isAarch64; # selenium.common.exceptions.WebDriverException: Message: Unsupported platform/architecture combination: linux/aarch64
|
||||
};
|
||||
|
||||
nodes.filesender =
|
||||
{ ... }:
|
||||
let
|
||||
format = pkgs.formats.php { };
|
||||
in
|
||||
{
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
nodes.filesender =
|
||||
{ ... }:
|
||||
let
|
||||
format = pkgs.formats.php { };
|
||||
in
|
||||
{
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
|
||||
services.filesender.enable = true;
|
||||
services.filesender.localDomain = "filesender";
|
||||
services.filesender.settings = {
|
||||
auth_sp_saml_authentication_source = "default";
|
||||
auth_sp_saml_uid_attribute = "uid";
|
||||
storage_filesystem_path = "/tmp";
|
||||
site_url = "http://filesender";
|
||||
force_ssl = false;
|
||||
admin = "";
|
||||
admin_email = "admin@localhost";
|
||||
email_reply_to = "noreply@localhost";
|
||||
services.filesender.enable = true;
|
||||
services.filesender.localDomain = "filesender";
|
||||
services.filesender.settings = {
|
||||
auth_sp_saml_authentication_source = "default";
|
||||
auth_sp_saml_uid_attribute = "uid";
|
||||
storage_filesystem_path = "/tmp";
|
||||
site_url = "http://filesender";
|
||||
force_ssl = false;
|
||||
admin = "";
|
||||
admin_email = "admin@localhost";
|
||||
email_reply_to = "noreply@localhost";
|
||||
};
|
||||
services.simplesamlphp.filesender = {
|
||||
settings = {
|
||||
baseurlpath = "http://filesender/saml";
|
||||
"module.enable".exampleauth = true;
|
||||
};
|
||||
services.simplesamlphp.filesender = {
|
||||
settings = {
|
||||
baseurlpath = "http://filesender/saml";
|
||||
"module.enable".exampleauth = true;
|
||||
};
|
||||
authSources = {
|
||||
admin = [ "core:AdminPassword" ];
|
||||
default = format.lib.mkMixedArray [ "exampleauth:UserPass" ] {
|
||||
"user:password" = {
|
||||
uid = [ "user" ];
|
||||
cn = [ "user" ];
|
||||
mail = [ "user@nixos.org" ];
|
||||
};
|
||||
authSources = {
|
||||
admin = [ "core:AdminPassword" ];
|
||||
default = format.lib.mkMixedArray [ "exampleauth:UserPass" ] {
|
||||
"user:password" = {
|
||||
uid = [ "user" ];
|
||||
cn = [ "user" ];
|
||||
mail = [ "user@nixos.org" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nodes.client =
|
||||
{
|
||||
pkgs,
|
||||
nodes,
|
||||
...
|
||||
}:
|
||||
let
|
||||
filesenderIP = (builtins.head (nodes.filesender.networking.interfaces.eth1.ipv4.addresses)).address;
|
||||
in
|
||||
{
|
||||
networking.hosts.${filesenderIP} = [ "filesender" ];
|
||||
nodes.client =
|
||||
{
|
||||
pkgs,
|
||||
nodes,
|
||||
...
|
||||
}:
|
||||
let
|
||||
filesenderIP = (builtins.head (nodes.filesender.networking.interfaces.eth1.ipv4.addresses)).address;
|
||||
in
|
||||
{
|
||||
networking.hosts.${filesenderIP} = [ "filesender" ];
|
||||
|
||||
environment.systemPackages =
|
||||
let
|
||||
username = "user";
|
||||
password = "password";
|
||||
browser-test =
|
||||
pkgs.writers.writePython3Bin "browser-test"
|
||||
{
|
||||
libraries = [ pkgs.python3Packages.selenium ];
|
||||
flakeIgnore = [
|
||||
"E124"
|
||||
"E501"
|
||||
];
|
||||
}
|
||||
''
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver import Firefox
|
||||
from selenium.webdriver.firefox.options import Options
|
||||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
|
||||
from selenium.webdriver.firefox.service import Service
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from subprocess import STDOUT
|
||||
import string
|
||||
import random
|
||||
import logging
|
||||
import time
|
||||
selenium_logger = logging.getLogger("selenium")
|
||||
selenium_logger.setLevel(logging.DEBUG)
|
||||
selenium_logger.addHandler(logging.StreamHandler())
|
||||
profile = FirefoxProfile()
|
||||
profile.set_preference("browser.download.folderList", 2)
|
||||
profile.set_preference("browser.download.manager.showWhenStarting", False)
|
||||
profile.set_preference("browser.download.dir", "/tmp/firefox")
|
||||
profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain;text/txt")
|
||||
options = Options()
|
||||
options.profile = profile
|
||||
options.add_argument('--headless')
|
||||
service = Service(log_output=STDOUT)
|
||||
driver = Firefox(options=options)
|
||||
driver.set_window_size(1024, 768)
|
||||
driver.implicitly_wait(30)
|
||||
driver.get('http://filesender/')
|
||||
wait = WebDriverWait(driver, 20)
|
||||
wait.until(EC.title_contains("FileSender"))
|
||||
driver.find_element(By.ID, "btn_logon").click()
|
||||
wait.until(EC.title_contains("Enter your username and password"))
|
||||
driver.find_element(By.ID, 'username').send_keys(
|
||||
'${username}'
|
||||
)
|
||||
driver.find_element(By.ID, 'password').send_keys(
|
||||
'${password}'
|
||||
)
|
||||
driver.find_element(By.ID, "submit_button").click()
|
||||
wait.until(EC.title_contains("FileSender"))
|
||||
wait.until(EC.presence_of_element_located((By.ID, "topmenu_logoff")))
|
||||
test_string = "".join(random.choices(string.ascii_uppercase + string.digits, k=20))
|
||||
with open("/tmp/test_file.txt", "w") as file:
|
||||
file.write(test_string)
|
||||
driver.find_element(By.ID, "files").send_keys("/tmp/test_file.txt")
|
||||
time.sleep(2)
|
||||
driver.find_element(By.CSS_SELECTOR, '.start').click()
|
||||
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".download_link")))
|
||||
download_link = driver.find_element(By.CSS_SELECTOR, '.download_link > textarea').get_attribute('value').strip()
|
||||
driver.get(download_link)
|
||||
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".download")))
|
||||
driver.find_element(By.CSS_SELECTOR, '.download').click()
|
||||
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".ui-dialog-buttonset > button:nth-child(2)")))
|
||||
driver.find_element(By.CSS_SELECTOR, ".ui-dialog-buttonset > button:nth-child(2)").click()
|
||||
driver.close()
|
||||
driver.quit()
|
||||
'';
|
||||
in
|
||||
[
|
||||
pkgs.firefox-unwrapped
|
||||
pkgs.geckodriver
|
||||
browser-test
|
||||
];
|
||||
};
|
||||
environment.systemPackages =
|
||||
let
|
||||
username = "user";
|
||||
password = "password";
|
||||
browser-test =
|
||||
pkgs.writers.writePython3Bin "browser-test"
|
||||
{
|
||||
libraries = [ pkgs.python3Packages.selenium ];
|
||||
flakeIgnore = [
|
||||
"E124"
|
||||
"E501"
|
||||
];
|
||||
}
|
||||
''
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver import Firefox
|
||||
from selenium.webdriver.firefox.options import Options
|
||||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
|
||||
from selenium.webdriver.firefox.service import Service
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from subprocess import STDOUT
|
||||
import string
|
||||
import random
|
||||
import logging
|
||||
import time
|
||||
selenium_logger = logging.getLogger("selenium")
|
||||
selenium_logger.setLevel(logging.DEBUG)
|
||||
selenium_logger.addHandler(logging.StreamHandler())
|
||||
profile = FirefoxProfile()
|
||||
profile.set_preference("browser.download.folderList", 2)
|
||||
profile.set_preference("browser.download.manager.showWhenStarting", False)
|
||||
profile.set_preference("browser.download.dir", "/tmp/firefox")
|
||||
profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain;text/txt")
|
||||
options = Options()
|
||||
options.profile = profile
|
||||
options.add_argument('--headless')
|
||||
service = Service(log_output=STDOUT)
|
||||
driver = Firefox(options=options)
|
||||
driver.set_window_size(1024, 768)
|
||||
driver.implicitly_wait(30)
|
||||
driver.get('http://filesender/')
|
||||
wait = WebDriverWait(driver, 20)
|
||||
wait.until(EC.title_contains("FileSender"))
|
||||
driver.find_element(By.ID, "btn_logon").click()
|
||||
wait.until(EC.title_contains("Enter your username and password"))
|
||||
driver.find_element(By.ID, 'username').send_keys(
|
||||
'${username}'
|
||||
)
|
||||
driver.find_element(By.ID, 'password').send_keys(
|
||||
'${password}'
|
||||
)
|
||||
driver.find_element(By.ID, "submit_button").click()
|
||||
wait.until(EC.title_contains("FileSender"))
|
||||
wait.until(EC.presence_of_element_located((By.ID, "topmenu_logoff")))
|
||||
test_string = "".join(random.choices(string.ascii_uppercase + string.digits, k=20))
|
||||
with open("/tmp/test_file.txt", "w") as file:
|
||||
file.write(test_string)
|
||||
driver.find_element(By.ID, "files").send_keys("/tmp/test_file.txt")
|
||||
time.sleep(2)
|
||||
driver.find_element(By.CSS_SELECTOR, '.start').click()
|
||||
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".download_link")))
|
||||
download_link = driver.find_element(By.CSS_SELECTOR, '.download_link > textarea').get_attribute('value').strip()
|
||||
driver.get(download_link)
|
||||
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".download")))
|
||||
driver.find_element(By.CSS_SELECTOR, '.download').click()
|
||||
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".ui-dialog-buttonset > button:nth-child(2)")))
|
||||
driver.find_element(By.CSS_SELECTOR, ".ui-dialog-buttonset > button:nth-child(2)").click()
|
||||
driver.close()
|
||||
driver.quit()
|
||||
'';
|
||||
in
|
||||
[
|
||||
pkgs.firefox-unwrapped
|
||||
pkgs.geckodriver
|
||||
browser-test
|
||||
];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
filesender.wait_for_file("/run/phpfpm/filesender.sock")
|
||||
filesender.wait_for_open_port(80)
|
||||
if "If you have received an invitation to access this site as a guest" not in client.wait_until_succeeds("curl -sS -f http://filesender"):
|
||||
raise Exception("filesender returned invalid html")
|
||||
client.succeed("browser-test")
|
||||
'';
|
||||
}
|
||||
)
|
||||
testScript = ''
|
||||
start_all()
|
||||
filesender.wait_for_file("/run/phpfpm/filesender.sock")
|
||||
filesender.wait_for_open_port(80)
|
||||
if "If you have received an invitation to access this site as a guest" not in client.wait_until_succeeds("curl -sS -f http://filesender"):
|
||||
raise Exception("filesender returned invalid html")
|
||||
client.succeed("browser-test")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,41 +1,39 @@
|
|||
import ./make-test-python.nix (
|
||||
{ lib, ... }:
|
||||
{ lib, ... }:
|
||||
|
||||
{
|
||||
name = "firefoxpwa";
|
||||
meta.maintainers = with lib.maintainers; [ camillemndn ];
|
||||
{
|
||||
name = "firefoxpwa";
|
||||
meta.maintainers = with lib.maintainers; [ camillemndn ];
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ ./common/x11.nix ];
|
||||
environment.systemPackages = with pkgs; [
|
||||
firefoxpwa
|
||||
jq
|
||||
];
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ ./common/x11.nix ];
|
||||
environment.systemPackages = with pkgs; [
|
||||
firefoxpwa
|
||||
jq
|
||||
];
|
||||
|
||||
programs.firefox = {
|
||||
enable = true;
|
||||
nativeMessagingHosts.packages = [ pkgs.firefoxpwa ];
|
||||
};
|
||||
|
||||
services.jellyfin.enable = true;
|
||||
programs.firefox = {
|
||||
enable = true;
|
||||
nativeMessagingHosts.packages = [ pkgs.firefoxpwa ];
|
||||
};
|
||||
|
||||
enableOCR = true;
|
||||
services.jellyfin.enable = true;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.start()
|
||||
enableOCR = true;
|
||||
|
||||
with subtest("Install a progressive web app"):
|
||||
machine.wait_for_unit("jellyfin.service")
|
||||
machine.wait_for_open_port(8096)
|
||||
machine.succeed("firefoxpwa site install http://localhost:8096/web/manifest.json >&2")
|
||||
testScript = ''
|
||||
machine.start()
|
||||
|
||||
with subtest("Launch the progressive web app"):
|
||||
machine.succeed("firefoxpwa site launch $(jq -r < ~/.local/share/firefoxpwa/config.json '.sites | keys[0]') >&2")
|
||||
machine.wait_for_window("Jellyfin")
|
||||
machine.wait_for_text("Jellyfin")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("Install a progressive web app"):
|
||||
machine.wait_for_unit("jellyfin.service")
|
||||
machine.wait_for_open_port(8096)
|
||||
machine.succeed("firefoxpwa site install http://localhost:8096/web/manifest.json >&2")
|
||||
|
||||
with subtest("Launch the progressive web app"):
|
||||
machine.succeed("firefoxpwa site launch $(jq -r < ~/.local/share/firefoxpwa/config.json '.sites | keys[0]') >&2")
|
||||
machine.wait_for_window("Jellyfin")
|
||||
machine.wait_for_text("Jellyfin")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,95 +1,93 @@
|
|||
import ./make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "firejail";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ sgo ];
|
||||
};
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "firejail";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ sgo ];
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ ./common/user-account.nix ];
|
||||
|
||||
programs.firejail = {
|
||||
enable = true;
|
||||
wrappedBinaries = {
|
||||
bash-jailed = "${pkgs.bash}/bin/bash";
|
||||
bash-jailed2 = {
|
||||
executable = "${pkgs.bash}/bin/bash";
|
||||
extraArgs = [ "--private=~/firejail-home" ];
|
||||
};
|
||||
programs.firejail = {
|
||||
enable = true;
|
||||
wrappedBinaries = {
|
||||
bash-jailed = "${pkgs.bash}/bin/bash";
|
||||
bash-jailed2 = {
|
||||
executable = "${pkgs.bash}/bin/bash";
|
||||
extraArgs = [ "--private=~/firejail-home" ];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.setupFirejailTest = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
before = [ "multi-user.target" ];
|
||||
|
||||
environment = {
|
||||
HOME = "/home/alice";
|
||||
};
|
||||
|
||||
unitConfig = {
|
||||
type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
user = "alice";
|
||||
};
|
||||
|
||||
script = ''
|
||||
cd $HOME
|
||||
|
||||
mkdir .password-store && echo s3cret > .password-store/secret
|
||||
mkdir my-secrets && echo s3cret > my-secrets/secret
|
||||
|
||||
echo publ1c > public
|
||||
|
||||
mkdir -p .config/firejail
|
||||
echo 'blacklist ''${HOME}/my-secrets' > .config/firejail/globals.local
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
systemd.services.setupFirejailTest = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
before = [ "multi-user.target" ];
|
||||
|
||||
# Test path acl with wrapper
|
||||
machine.succeed("sudo -u alice bash-jailed -c 'cat ~/public' | grep -q publ1c")
|
||||
machine.fail(
|
||||
"sudo -u alice bash-jailed -c 'cat ~/.password-store/secret' | grep -q s3cret"
|
||||
)
|
||||
machine.fail("sudo -u alice bash-jailed -c 'cat ~/my-secrets/secret' | grep -q s3cret")
|
||||
environment = {
|
||||
HOME = "/home/alice";
|
||||
};
|
||||
|
||||
# Test extraArgs
|
||||
machine.succeed("sudo -u alice mkdir /home/alice/firejail-home")
|
||||
machine.succeed("sudo -u alice bash-jailed2 -c 'echo test > /home/alice/foo'")
|
||||
machine.fail("sudo -u alice cat /home/alice/foo")
|
||||
machine.succeed("sudo -u alice cat /home/alice/firejail-home/foo | grep test")
|
||||
unitConfig = {
|
||||
type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
user = "alice";
|
||||
};
|
||||
|
||||
# Test path acl with firejail executable
|
||||
machine.succeed("sudo -u alice firejail -- bash -c 'cat ~/public' | grep -q publ1c")
|
||||
machine.fail(
|
||||
"sudo -u alice firejail -- bash -c 'cat ~/.password-store/secret' | grep -q s3cret"
|
||||
)
|
||||
machine.fail(
|
||||
"sudo -u alice firejail -- bash -c 'cat ~/my-secrets/secret' | grep -q s3cret"
|
||||
)
|
||||
script = ''
|
||||
cd $HOME
|
||||
|
||||
# Disabling profiles
|
||||
machine.succeed(
|
||||
"sudo -u alice bash -c 'firejail --noprofile -- cat ~/.password-store/secret' | grep -q s3cret"
|
||||
)
|
||||
mkdir .password-store && echo s3cret > .password-store/secret
|
||||
mkdir my-secrets && echo s3cret > my-secrets/secret
|
||||
|
||||
# CVE-2020-17367
|
||||
machine.fail(
|
||||
"sudo -u alice firejail --private-tmp id --output=/tmp/vuln1 && cat /tmp/vuln1"
|
||||
)
|
||||
echo publ1c > public
|
||||
|
||||
# CVE-2020-17368
|
||||
machine.fail(
|
||||
"sudo -u alice firejail --private-tmp --output=/tmp/foo 'bash -c $(id>/tmp/vuln2;echo id)' && cat /tmp/vuln2"
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
mkdir -p .config/firejail
|
||||
echo 'blacklist ''${HOME}/my-secrets' > .config/firejail/globals.local
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
|
||||
# Test path acl with wrapper
|
||||
machine.succeed("sudo -u alice bash-jailed -c 'cat ~/public' | grep -q publ1c")
|
||||
machine.fail(
|
||||
"sudo -u alice bash-jailed -c 'cat ~/.password-store/secret' | grep -q s3cret"
|
||||
)
|
||||
machine.fail("sudo -u alice bash-jailed -c 'cat ~/my-secrets/secret' | grep -q s3cret")
|
||||
|
||||
# Test extraArgs
|
||||
machine.succeed("sudo -u alice mkdir /home/alice/firejail-home")
|
||||
machine.succeed("sudo -u alice bash-jailed2 -c 'echo test > /home/alice/foo'")
|
||||
machine.fail("sudo -u alice cat /home/alice/foo")
|
||||
machine.succeed("sudo -u alice cat /home/alice/firejail-home/foo | grep test")
|
||||
|
||||
# Test path acl with firejail executable
|
||||
machine.succeed("sudo -u alice firejail -- bash -c 'cat ~/public' | grep -q publ1c")
|
||||
machine.fail(
|
||||
"sudo -u alice firejail -- bash -c 'cat ~/.password-store/secret' | grep -q s3cret"
|
||||
)
|
||||
machine.fail(
|
||||
"sudo -u alice firejail -- bash -c 'cat ~/my-secrets/secret' | grep -q s3cret"
|
||||
)
|
||||
|
||||
# Disabling profiles
|
||||
machine.succeed(
|
||||
"sudo -u alice bash -c 'firejail --noprofile -- cat ~/.password-store/secret' | grep -q s3cret"
|
||||
)
|
||||
|
||||
# CVE-2020-17367
|
||||
machine.fail(
|
||||
"sudo -u alice firejail --private-tmp id --output=/tmp/vuln1 && cat /tmp/vuln1"
|
||||
)
|
||||
|
||||
# CVE-2020-17368
|
||||
machine.fail(
|
||||
"sudo -u alice firejail --private-tmp --output=/tmp/foo 'bash -c $(id>/tmp/vuln2;echo id)' && cat /tmp/vuln2"
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,349 +1,347 @@
|
|||
import ../make-test-python.nix (
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
certs = import ../common/acme/server/snakeoil-certs.nix;
|
||||
domain = certs.domain;
|
||||
in
|
||||
{
|
||||
name = "firezone";
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ oddlama ];
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
certs = import ../common/acme/server/snakeoil-certs.nix;
|
||||
domain = certs.domain;
|
||||
in
|
||||
{
|
||||
name = "firezone";
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ oddlama ];
|
||||
|
||||
nodes = {
|
||||
server =
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
security.pki.certificateFiles = [ certs.ca.cert ];
|
||||
nodes = {
|
||||
server =
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
security.pki.certificateFiles = [ certs.ca.cert ];
|
||||
|
||||
networking.extraHosts = ''
|
||||
${config.networking.primaryIPAddress} ${domain}
|
||||
${config.networking.primaryIPv6Address} ${domain}
|
||||
'';
|
||||
networking.extraHosts = ''
|
||||
${config.networking.primaryIPAddress} ${domain}
|
||||
${config.networking.primaryIPv6Address} ${domain}
|
||||
'';
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
443
|
||||
];
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
443
|
||||
];
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts.${domain} = {
|
||||
sslCertificate = certs.${domain}.cert;
|
||||
sslCertificateKey = certs.${domain}.key;
|
||||
};
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts.${domain} = {
|
||||
sslCertificate = certs.${domain}.cert;
|
||||
sslCertificateKey = certs.${domain}.key;
|
||||
};
|
||||
};
|
||||
|
||||
services.firezone.server = {
|
||||
enable = true;
|
||||
enableLocalDB = true;
|
||||
nginx.enable = true;
|
||||
|
||||
# Doesn't need to work for this test, but needs to be configured
|
||||
# otherwise the server will not start.
|
||||
smtp = {
|
||||
from = "firezone@example.com";
|
||||
host = "mail.localhost";
|
||||
port = 465;
|
||||
implicitTls = true;
|
||||
username = "firezone@example.com";
|
||||
passwordFile = pkgs.writeText "tmpmailpasswd" "supermailpassword";
|
||||
};
|
||||
|
||||
services.firezone.server = {
|
||||
provision = {
|
||||
enable = true;
|
||||
enableLocalDB = true;
|
||||
nginx.enable = true;
|
||||
|
||||
# Doesn't need to work for this test, but needs to be configured
|
||||
# otherwise the server will not start.
|
||||
smtp = {
|
||||
from = "firezone@example.com";
|
||||
host = "mail.localhost";
|
||||
port = 465;
|
||||
implicitTls = true;
|
||||
username = "firezone@example.com";
|
||||
passwordFile = pkgs.writeText "tmpmailpasswd" "supermailpassword";
|
||||
};
|
||||
|
||||
provision = {
|
||||
enable = true;
|
||||
accounts.main = {
|
||||
name = "My Account";
|
||||
relayGroups.my-relays.name = "Relays";
|
||||
gatewayGroups.site.name = "Site";
|
||||
actors = {
|
||||
admin = {
|
||||
type = "account_admin_user";
|
||||
name = "Admin";
|
||||
email = "admin@example.com";
|
||||
};
|
||||
client = {
|
||||
type = "service_account";
|
||||
name = "A client";
|
||||
email = "client@example.com";
|
||||
};
|
||||
accounts.main = {
|
||||
name = "My Account";
|
||||
relayGroups.my-relays.name = "Relays";
|
||||
gatewayGroups.site.name = "Site";
|
||||
actors = {
|
||||
admin = {
|
||||
type = "account_admin_user";
|
||||
name = "Admin";
|
||||
email = "admin@example.com";
|
||||
};
|
||||
resources.res1 = {
|
||||
type = "dns";
|
||||
name = "Dns Resource";
|
||||
address = "resource.example.com";
|
||||
gatewayGroups = [ "site" ];
|
||||
filters = [
|
||||
{ protocol = "icmp"; }
|
||||
{
|
||||
protocol = "tcp";
|
||||
ports = [ 80 ];
|
||||
}
|
||||
];
|
||||
};
|
||||
resources.res2 = {
|
||||
type = "ip";
|
||||
name = "Ip Resource";
|
||||
address = "172.20.2.1";
|
||||
gatewayGroups = [ "site" ];
|
||||
};
|
||||
resources.res3 = {
|
||||
type = "cidr";
|
||||
name = "Cidr Resource";
|
||||
address = "172.20.1.0/24";
|
||||
gatewayGroups = [ "site" ];
|
||||
};
|
||||
policies.pol1 = {
|
||||
description = "Allow anyone res1 access";
|
||||
group = "everyone";
|
||||
resource = "res1";
|
||||
};
|
||||
policies.pol2 = {
|
||||
description = "Allow anyone res2 access";
|
||||
group = "everyone";
|
||||
resource = "res2";
|
||||
};
|
||||
policies.pol3 = {
|
||||
description = "Allow anyone res3 access";
|
||||
group = "everyone";
|
||||
resource = "res3";
|
||||
client = {
|
||||
type = "service_account";
|
||||
name = "A client";
|
||||
email = "client@example.com";
|
||||
};
|
||||
};
|
||||
resources.res1 = {
|
||||
type = "dns";
|
||||
name = "Dns Resource";
|
||||
address = "resource.example.com";
|
||||
gatewayGroups = [ "site" ];
|
||||
filters = [
|
||||
{ protocol = "icmp"; }
|
||||
{
|
||||
protocol = "tcp";
|
||||
ports = [ 80 ];
|
||||
}
|
||||
];
|
||||
};
|
||||
resources.res2 = {
|
||||
type = "ip";
|
||||
name = "Ip Resource";
|
||||
address = "172.20.2.1";
|
||||
gatewayGroups = [ "site" ];
|
||||
};
|
||||
resources.res3 = {
|
||||
type = "cidr";
|
||||
name = "Cidr Resource";
|
||||
address = "172.20.1.0/24";
|
||||
gatewayGroups = [ "site" ];
|
||||
};
|
||||
policies.pol1 = {
|
||||
description = "Allow anyone res1 access";
|
||||
group = "everyone";
|
||||
resource = "res1";
|
||||
};
|
||||
policies.pol2 = {
|
||||
description = "Allow anyone res2 access";
|
||||
group = "everyone";
|
||||
resource = "res2";
|
||||
};
|
||||
policies.pol3 = {
|
||||
description = "Allow anyone res3 access";
|
||||
group = "everyone";
|
||||
resource = "res3";
|
||||
};
|
||||
};
|
||||
|
||||
api.externalUrl = "https://${domain}/api/";
|
||||
web.externalUrl = "https://${domain}/";
|
||||
};
|
||||
|
||||
systemd.services.firezone-server-domain.postStart = lib.mkAfter ''
|
||||
${lib.getExe config.services.firezone.server.domain.package} rpc 'Code.eval_file("${./create-tokens.exs}")'
|
||||
'';
|
||||
api.externalUrl = "https://${domain}/api/";
|
||||
web.externalUrl = "https://${domain}/";
|
||||
};
|
||||
|
||||
relay =
|
||||
{
|
||||
nodes,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
security.pki.certificateFiles = [ certs.ca.cert ];
|
||||
networking.extraHosts = ''
|
||||
${nodes.server.networking.primaryIPAddress} ${domain}
|
||||
${nodes.server.networking.primaryIPv6Address} ${domain}
|
||||
'';
|
||||
systemd.services.firezone-server-domain.postStart = lib.mkAfter ''
|
||||
${lib.getExe config.services.firezone.server.domain.package} rpc 'Code.eval_file("${./create-tokens.exs}")'
|
||||
'';
|
||||
};
|
||||
|
||||
services.firezone.relay = {
|
||||
enable = true;
|
||||
logLevel = "debug";
|
||||
name = "test-relay";
|
||||
apiUrl = "wss://${domain}/api/";
|
||||
tokenFile = "/tmp/shared/relay_token.txt";
|
||||
publicIpv4 = config.networking.primaryIPAddress;
|
||||
publicIpv6 = config.networking.primaryIPv6Address;
|
||||
openFirewall = true;
|
||||
relay =
|
||||
{
|
||||
nodes,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
security.pki.certificateFiles = [ certs.ca.cert ];
|
||||
networking.extraHosts = ''
|
||||
${nodes.server.networking.primaryIPAddress} ${domain}
|
||||
${nodes.server.networking.primaryIPv6Address} ${domain}
|
||||
'';
|
||||
|
||||
services.firezone.relay = {
|
||||
enable = true;
|
||||
logLevel = "debug";
|
||||
name = "test-relay";
|
||||
apiUrl = "wss://${domain}/api/";
|
||||
tokenFile = "/tmp/shared/relay_token.txt";
|
||||
publicIpv4 = config.networking.primaryIPAddress;
|
||||
publicIpv6 = config.networking.primaryIPv6Address;
|
||||
openFirewall = true;
|
||||
};
|
||||
|
||||
# Don't auto-start so we can wait until the token was provisioned
|
||||
systemd.services.firezone-relay.wantedBy = lib.mkForce [ ];
|
||||
};
|
||||
|
||||
# A resource that is only connected to the gateway,
|
||||
# allowing us to confirm the VPN works
|
||||
resource = {
|
||||
virtualisation.vlans = [
|
||||
1
|
||||
2
|
||||
];
|
||||
|
||||
networking.interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = "172.20.1.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
|
||||
networking.interfaces.eth2.ipv4.addresses = [
|
||||
{
|
||||
address = "172.20.2.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
];
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts = {
|
||||
"localhost" = {
|
||||
default = true;
|
||||
locations."/".extraConfig = ''
|
||||
return 200 'greetings from the resource';
|
||||
add_header Content-Type text/plain;
|
||||
'';
|
||||
};
|
||||
|
||||
# Don't auto-start so we can wait until the token was provisioned
|
||||
systemd.services.firezone-relay.wantedBy = lib.mkForce [ ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# A resource that is only connected to the gateway,
|
||||
# allowing us to confirm the VPN works
|
||||
resource = {
|
||||
gateway =
|
||||
{
|
||||
nodes,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
virtualisation.vlans = [
|
||||
1
|
||||
2
|
||||
];
|
||||
|
||||
networking.interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = "172.20.1.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
|
||||
networking.interfaces.eth2.ipv4.addresses = [
|
||||
{
|
||||
address = "172.20.2.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
];
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts = {
|
||||
"localhost" = {
|
||||
default = true;
|
||||
locations."/".extraConfig = ''
|
||||
return 200 'greetings from the resource';
|
||||
add_header Content-Type text/plain;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
gateway =
|
||||
{
|
||||
nodes,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
virtualisation.vlans = [
|
||||
1
|
||||
2
|
||||
networking = {
|
||||
interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = "172.20.1.2";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
|
||||
networking = {
|
||||
interfaces.eth1.ipv4.addresses = [
|
||||
{
|
||||
address = "172.20.1.2";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
interfaces.eth2.ipv4.addresses = [
|
||||
{
|
||||
address = "172.20.2.2";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
|
||||
interfaces.eth2.ipv4.addresses = [
|
||||
{
|
||||
address = "172.20.2.2";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
firewall.enable = false;
|
||||
nftables.enable = true;
|
||||
nftables.tables."filter".family = "inet";
|
||||
nftables.tables."filter".content = ''
|
||||
chain incoming {
|
||||
type filter hook input priority 0; policy accept;
|
||||
}
|
||||
|
||||
firewall.enable = false;
|
||||
nftables.enable = true;
|
||||
nftables.tables."filter".family = "inet";
|
||||
nftables.tables."filter".content = ''
|
||||
chain incoming {
|
||||
type filter hook input priority 0; policy accept;
|
||||
}
|
||||
chain postrouting {
|
||||
type nat hook postrouting priority srcnat; policy accept;
|
||||
meta protocol ip iifname "tun-firezone" oifname { "eth1", "eth2" } masquerade random
|
||||
}
|
||||
|
||||
chain postrouting {
|
||||
type nat hook postrouting priority srcnat; policy accept;
|
||||
meta protocol ip iifname "tun-firezone" oifname { "eth1", "eth2" } masquerade random
|
||||
}
|
||||
chain forward {
|
||||
type filter hook forward priority 0; policy drop;
|
||||
iifname "tun-firezone" accept
|
||||
oifname "tun-firezone" accept
|
||||
}
|
||||
|
||||
chain forward {
|
||||
type filter hook forward priority 0; policy drop;
|
||||
iifname "tun-firezone" accept
|
||||
oifname "tun-firezone" accept
|
||||
}
|
||||
|
||||
chain output {
|
||||
type filter hook output priority 0; policy accept;
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
|
||||
# boot.kernel.sysctl."net.ipv4.conf.all.src_valid_mark" = "1";
|
||||
boot.kernel.sysctl."net.ipv6.conf.default.forwarding" = "1";
|
||||
boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = "1";
|
||||
|
||||
security.pki.certificateFiles = [ certs.ca.cert ];
|
||||
networking.extraHosts = ''
|
||||
${nodes.server.networking.primaryIPAddress} ${domain}
|
||||
${nodes.server.networking.primaryIPv6Address} ${domain}
|
||||
172.20.1.1 resource.example.com
|
||||
chain output {
|
||||
type filter hook output priority 0; policy accept;
|
||||
}
|
||||
'';
|
||||
|
||||
services.firezone.gateway = {
|
||||
enable = true;
|
||||
logLevel = "debug";
|
||||
name = "test-gateway";
|
||||
apiUrl = "wss://${domain}/api/";
|
||||
tokenFile = "/tmp/shared/gateway_token.txt";
|
||||
};
|
||||
|
||||
# Don't auto-start so we can wait until the token was provisioned
|
||||
systemd.services.firezone-gateway.wantedBy = lib.mkForce [ ];
|
||||
};
|
||||
|
||||
client =
|
||||
{
|
||||
nodes,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
security.pki.certificateFiles = [ certs.ca.cert ];
|
||||
networking.useNetworkd = true;
|
||||
networking.extraHosts = ''
|
||||
${nodes.server.networking.primaryIPAddress} ${domain}
|
||||
${nodes.server.networking.primaryIPv6Address} ${domain}
|
||||
'';
|
||||
boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
|
||||
# boot.kernel.sysctl."net.ipv4.conf.all.src_valid_mark" = "1";
|
||||
boot.kernel.sysctl."net.ipv6.conf.default.forwarding" = "1";
|
||||
boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = "1";
|
||||
|
||||
services.firezone.headless-client = {
|
||||
enable = true;
|
||||
logLevel = "debug";
|
||||
name = "test-client-somebody";
|
||||
apiUrl = "wss://${domain}/api/";
|
||||
tokenFile = "/tmp/shared/client_token.txt";
|
||||
};
|
||||
security.pki.certificateFiles = [ certs.ca.cert ];
|
||||
networking.extraHosts = ''
|
||||
${nodes.server.networking.primaryIPAddress} ${domain}
|
||||
${nodes.server.networking.primaryIPv6Address} ${domain}
|
||||
172.20.1.1 resource.example.com
|
||||
'';
|
||||
|
||||
# Don't auto-start so we can wait until the token was provisioned
|
||||
systemd.services.firezone-headless-client.wantedBy = lib.mkForce [ ];
|
||||
services.firezone.gateway = {
|
||||
enable = true;
|
||||
logLevel = "debug";
|
||||
name = "test-gateway";
|
||||
apiUrl = "wss://${domain}/api/";
|
||||
tokenFile = "/tmp/shared/gateway_token.txt";
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ ... }:
|
||||
''
|
||||
start_all()
|
||||
# Don't auto-start so we can wait until the token was provisioned
|
||||
systemd.services.firezone-gateway.wantedBy = lib.mkForce [ ];
|
||||
};
|
||||
|
||||
with subtest("Start server"):
|
||||
server.wait_for_unit("firezone.target")
|
||||
server.wait_until_succeeds("curl -Lsf https://${domain} | grep 'Welcome to Firezone'")
|
||||
server.wait_until_succeeds("curl -Ls https://${domain}/api | grep 'Not Found'")
|
||||
client =
|
||||
{
|
||||
nodes,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
security.pki.certificateFiles = [ certs.ca.cert ];
|
||||
networking.useNetworkd = true;
|
||||
networking.extraHosts = ''
|
||||
${nodes.server.networking.primaryIPAddress} ${domain}
|
||||
${nodes.server.networking.primaryIPv6Address} ${domain}
|
||||
'';
|
||||
|
||||
# Wait for tokens and copy them to shared folder
|
||||
server.wait_for_file("/var/lib/private/firezone/relay_token.txt")
|
||||
server.wait_for_file("/var/lib/private/firezone/gateway_token.txt")
|
||||
server.wait_for_file("/var/lib/private/firezone/client_token.txt")
|
||||
server.succeed("cp /var/lib/private/firezone/*_token.txt /tmp/shared")
|
||||
services.firezone.headless-client = {
|
||||
enable = true;
|
||||
logLevel = "debug";
|
||||
name = "test-client-somebody";
|
||||
apiUrl = "wss://${domain}/api/";
|
||||
tokenFile = "/tmp/shared/client_token.txt";
|
||||
};
|
||||
|
||||
with subtest("Connect relay"):
|
||||
relay.succeed("systemctl start firezone-relay")
|
||||
relay.wait_for_unit("firezone-relay.service")
|
||||
relay.wait_until_succeeds("journalctl --since -2m --unit firezone-relay.service --grep 'Connected to portal.*${domain}'", timeout=30)
|
||||
# Don't auto-start so we can wait until the token was provisioned
|
||||
systemd.services.firezone-headless-client.wantedBy = lib.mkForce [ ];
|
||||
};
|
||||
};
|
||||
|
||||
with subtest("Connect gateway"):
|
||||
gateway.succeed("systemctl start firezone-gateway")
|
||||
gateway.wait_for_unit("firezone-gateway.service")
|
||||
gateway.wait_until_succeeds("journalctl --since -2m --unit firezone-gateway.service --grep 'Connected to portal.*${domain}'", timeout=30)
|
||||
relay.wait_until_succeeds("journalctl --since -2m --unit firezone-relay.service --grep 'Created allocation.*IPv4'", timeout=30)
|
||||
relay.wait_until_succeeds("journalctl --since -2m --unit firezone-relay.service --grep 'Created allocation.*IPv6'", timeout=30)
|
||||
testScript =
|
||||
{ ... }:
|
||||
''
|
||||
start_all()
|
||||
|
||||
# Assert both relay ips are known
|
||||
gateway.wait_until_succeeds("journalctl --since -2m --unit firezone-gateway.service --grep 'Updated allocation.*relay_ip4.*Some.*relay_ip6.*Some'", timeout=30)
|
||||
with subtest("Start server"):
|
||||
server.wait_for_unit("firezone.target")
|
||||
server.wait_until_succeeds("curl -Lsf https://${domain} | grep 'Welcome to Firezone'")
|
||||
server.wait_until_succeeds("curl -Ls https://${domain}/api | grep 'Not Found'")
|
||||
|
||||
with subtest("Connect headless-client"):
|
||||
client.succeed("systemctl start firezone-headless-client")
|
||||
client.wait_for_unit("firezone-headless-client.service")
|
||||
client.wait_until_succeeds("journalctl --since -2m --unit firezone-headless-client.service --grep 'Connected to portal.*${domain}'", timeout=30)
|
||||
client.wait_until_succeeds("journalctl --since -2m --unit firezone-headless-client.service --grep 'Tunnel ready'", timeout=30)
|
||||
# Wait for tokens and copy them to shared folder
|
||||
server.wait_for_file("/var/lib/private/firezone/relay_token.txt")
|
||||
server.wait_for_file("/var/lib/private/firezone/gateway_token.txt")
|
||||
server.wait_for_file("/var/lib/private/firezone/client_token.txt")
|
||||
server.succeed("cp /var/lib/private/firezone/*_token.txt /tmp/shared")
|
||||
|
||||
with subtest("Check DNS based access"):
|
||||
# Check that we can access the resource through the VPN via DNS
|
||||
client.wait_until_succeeds("curl -4 -Lsf http://resource.example.com | grep 'greetings from the resource'")
|
||||
client.wait_until_succeeds("curl -6 -Lsf http://resource.example.com | grep 'greetings from the resource'")
|
||||
with subtest("Connect relay"):
|
||||
relay.succeed("systemctl start firezone-relay")
|
||||
relay.wait_for_unit("firezone-relay.service")
|
||||
relay.wait_until_succeeds("journalctl --since -2m --unit firezone-relay.service --grep 'Connected to portal.*${domain}'", timeout=30)
|
||||
|
||||
with subtest("Check CIDR based access"):
|
||||
# Check that we can access the resource through the VPN via CIDR
|
||||
client.wait_until_succeeds("ping -c1 -W1 172.20.1.1")
|
||||
with subtest("Connect gateway"):
|
||||
gateway.succeed("systemctl start firezone-gateway")
|
||||
gateway.wait_for_unit("firezone-gateway.service")
|
||||
gateway.wait_until_succeeds("journalctl --since -2m --unit firezone-gateway.service --grep 'Connected to portal.*${domain}'", timeout=30)
|
||||
relay.wait_until_succeeds("journalctl --since -2m --unit firezone-relay.service --grep 'Created allocation.*IPv4'", timeout=30)
|
||||
relay.wait_until_succeeds("journalctl --since -2m --unit firezone-relay.service --grep 'Created allocation.*IPv6'", timeout=30)
|
||||
|
||||
with subtest("Check IP based access"):
|
||||
# Check that we can access the resource through the VPN via IP
|
||||
client.wait_until_succeeds("ping -c1 -W1 172.20.2.1")
|
||||
'';
|
||||
}
|
||||
)
|
||||
# Assert both relay ips are known
|
||||
gateway.wait_until_succeeds("journalctl --since -2m --unit firezone-gateway.service --grep 'Updated allocation.*relay_ip4.*Some.*relay_ip6.*Some'", timeout=30)
|
||||
|
||||
with subtest("Connect headless-client"):
|
||||
client.succeed("systemctl start firezone-headless-client")
|
||||
client.wait_for_unit("firezone-headless-client.service")
|
||||
client.wait_until_succeeds("journalctl --since -2m --unit firezone-headless-client.service --grep 'Connected to portal.*${domain}'", timeout=30)
|
||||
client.wait_until_succeeds("journalctl --since -2m --unit firezone-headless-client.service --grep 'Tunnel ready'", timeout=30)
|
||||
|
||||
with subtest("Check DNS based access"):
|
||||
# Check that we can access the resource through the VPN via DNS
|
||||
client.wait_until_succeeds("curl -4 -Lsf http://resource.example.com | grep 'greetings from the resource'")
|
||||
client.wait_until_succeeds("curl -6 -Lsf http://resource.example.com | grep 'greetings from the resource'")
|
||||
|
||||
with subtest("Check CIDR based access"):
|
||||
# Check that we can access the resource through the VPN via CIDR
|
||||
client.wait_until_succeeds("ping -c1 -W1 172.20.1.1")
|
||||
|
||||
with subtest("Check IP based access"):
|
||||
# Check that we can access the resource through the VPN via IP
|
||||
client.wait_until_succeeds("ping -c1 -W1 172.20.2.1")
|
||||
'';
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue