0
0
Fork 0
mirror of https://github.com/NixOS/nixpkgs.git synced 2025-07-13 21:50:33 +03:00

Merge branch 'staging-next' into staging

This commit is contained in:
Jan Tojnar 2022-12-25 01:30:47 +01:00
commit 72c37eddec
895 changed files with 23748 additions and 21115 deletions

View file

@ -98,6 +98,26 @@
<literal>fetch-ec2-metadata.service</literal>
</para>
</listitem>
<listitem>
<para>
<literal>minio</literal> removed support for its legacy
filesystem backend in
<link xlink:href="https://github.com/minio/minio/releases/tag/RELEASE.2022-10-29T06-21-33Z">RELEASE.2022-10-29T06-21-33Z</link>.
This means if your storage was created with the old format,
minio will no longer start. Unfortunately minio doesnt
provide a an automatic migration, they only provide
<link xlink:href="https://min.io/docs/minio/windows/operations/install-deploy-manage/migrate-fs-gateway.html">instructions
how to manually convert the node</link>. To facilitate this
migration we keep around the last version that still supports
the old filesystem backend as
<literal>minio_legacy_fs</literal>. Use it via
<literal>services.minio.package = minio_legacy_fs;</literal>
to export your data before switching to the new version. See
the corresponding
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/199318">issue</link>
for more details.
</para>
</listitem>
<listitem>
<para>
<literal>services.sourcehut.dispatch</literal> and the
@ -149,6 +169,16 @@
<literal>llvmPackages_rocm.clang-unwrapped</literal>.
</para>
</listitem>
<listitem>
<para>
The Nginx module now validates the syntax of config files at
build time. For more complex configurations (using
<literal>include</literal> with out-of-store files notably)
you may need to disable this check by setting
<link linkend="opt-services.nginx.validateConfig">services.nginx.validateConfig</link>
to <literal>false</literal>.
</para>
</listitem>
<listitem>
<para>
The EC2 image module previously detected and automatically
@ -269,6 +299,49 @@
remote <literal>PostgreSQL</literal> database.
</para>
</listitem>
<listitem>
<para>
<literal>services.peertube</literal> now requires you to
specify the secret file
<literal>secrets.secretsFile</literal>. It can be generated by
running <literal>openssl rand -hex 32</literal>. Before
upgrading, read the release notes for PeerTube:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
<link xlink:href="https://github.com/Chocobozzz/PeerTube/releases/tag/v5.0.0">Release
v5.0.0</link>
</para>
</listitem>
</itemizedlist>
<para>
And backup your data.
</para>
</listitem>
<listitem>
<para>
The module <literal>services.headscale</literal> was
refactored to be compliant with
<link xlink:href="https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md">RFC
0042</link>. To be precise, this means that the following
things have changed:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
Most settings has been migrated under
<link linkend="opt-services.headscale.settings">services.headscale.settings</link>
which is an attribute-set that will be converted into
headscales YAML config format. This means that the
configuration from
<link xlink:href="https://github.com/juanfont/headscale/blob/main/config-example.yaml">headscales
example configuration</link> can be directly written as
attribute-set in Nix within this option.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
A new <literal>virtualisation.rosetta</literal> module was

View file

@ -35,6 +35,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- The EC2 image module no longer fetches instance metadata in stage-1. This results in a significantly smaller initramfs, since network drivers no longer need to be included, and faster boots, since metadata fetching can happen in parallel with startup of other services.
This breaks services which rely on metadata being present by the time stage-2 is entered. Anything which reads EC2 metadata from `/etc/ec2-metadata` should now have an `after` dependency on `fetch-ec2-metadata.service`
- `minio` removed support for its legacy filesystem backend in [RELEASE.2022-10-29T06-21-33Z](https://github.com/minio/minio/releases/tag/RELEASE.2022-10-29T06-21-33Z). This means if your storage was created with the old format, minio will no longer start. Unfortunately minio doesn't provide a an automatic migration, they only provide [instructions how to manually convert the node](https://min.io/docs/minio/windows/operations/install-deploy-manage/migrate-fs-gateway.html). To facilitate this migration we keep around the last version that still supports the old filesystem backend as `minio_legacy_fs`. Use it via `services.minio.package = minio_legacy_fs;` to export your data before switching to the new version. See the corresponding [issue](https://github.com/NixOS/nixpkgs/issues/199318) for more details.
- `services.sourcehut.dispatch` and the corresponding package (`sourcehut.dispatchsrht`) have been removed due to [upstream deprecation](https://sourcehut.org/blog/2022-08-01-dispatch-deprecation-plans/).
- The [services.snapserver.openFirewall](#opt-services.snapserver.openFirewall) module option default value has been changed from `true` to `false`. You will need to explicitly set this option to `true`, or configure your firewall.
@ -45,6 +47,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- `llvmPackages_rocm.llvm` will not contain `clang` or `compiler-rt`. `llvmPackages_rocm.clang` will not contain `llvm`. `llvmPackages_rocm.clangNoCompilerRt` has been removed in favor of using `llvmPackages_rocm.clang-unwrapped`.
- The Nginx module now validates the syntax of config files at build time. For more complex configurations (using `include` with out-of-store files notably) you may need to disable this check by setting [services.nginx.validateConfig](#opt-services.nginx.validateConfig) to `false`.
- The EC2 image module previously detected and automatically mounted ext3-formatted instance store devices and partitions in stage-1 (initramfs), storing `/tmp` on the first discovered device. This behaviour, which only catered to very specific use cases and could not be disabled, has been removed. Users relying on this should provide their own implementation, and probably use ext4 and perform the mount in stage-2.
- The EC2 image module previously detected and activated swap-formatted instance store devices and partitions in stage-1 (initramfs). This behaviour has been removed. Users relying on this should provide their own implementation.
@ -59,7 +63,7 @@ In addition to numerous new and upgraded packages, this release has the followin
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
- `vim_configurable` has been renamed to `vim-full` to avoid confusion: `vim-full`'s build-time features are configurable, but both `vim` and `vim-full` are *customizable* (in the sense of user configuration, like vimrc).
- `vim_configurable` has been renamed to `vim-full` to avoid confusion: `vim-full`'s build-time features are configurable, but both `vim` and `vim-full` are _customizable_ (in the sense of user configuration, like vimrc).
- The module for the application firewall `opensnitch` got the ability to configure rules. Available as [services.opensnitch.rules](#opt-services.opensnitch.rules)
@ -78,6 +82,19 @@ In addition to numerous new and upgraded packages, this release has the followin
- `mastodon` now supports connection to a remote `PostgreSQL` database.
- `services.peertube` now requires you to specify the secret file `secrets.secretsFile`. It can be generated by running `openssl rand -hex 32`.
Before upgrading, read the release notes for PeerTube:
- [Release v5.0.0](https://github.com/Chocobozzz/PeerTube/releases/tag/v5.0.0)
And backup your data.
- The module `services.headscale` was refactored to be compliant with [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md). To be precise, this means that the following things have changed:
- Most settings has been migrated under [services.headscale.settings](#opt-services.headscale.settings) which is an attribute-set that
will be converted into headscale's YAML config format. This means that the configuration from
[headscale's example configuration](https://github.com/juanfont/headscale/blob/main/config-example.yaml)
can be directly written as attribute-set in Nix within this option.
- A new `virtualisation.rosetta` module was added to allow running `x86_64` binaries through [Rosetta](https://developer.apple.com/documentation/apple-silicon/about-the-rosetta-translation-environment) inside virtualised NixOS guests on Apple silicon. This feature works by default with the [UTM](https://docs.getutm.app/) virtualisation [package](https://search.nixos.org/packages?channel=unstable&show=utm&from=0&size=1&sort=relevance&type=packages&query=utm).
- The new option `users.motdFile` allows configuring a Message Of The Day that can be updated dynamically.

View file

@ -324,7 +324,11 @@ in rec {
scriptArgs = mkOption {
type = types.str;
default = "";
description = lib.mdDoc "Arguments passed to the main process script.";
example = "%i";
description = lib.mdDoc ''
Arguments passed to the main process script.
Can contain specifiers (`%` placeholders expanded by systemd, see {manpage}`systemd.unit(5)`).
'';
};
preStart = mkOption {

View file

@ -220,6 +220,20 @@ class Driver:
res = driver.polling_conditions.pop()
assert res is self.condition
def wait(self, timeout: int = 900) -> None:
def condition(last: bool) -> bool:
if last:
rootlog.info(f"Last chance for {self.condition.description}")
ret = self.condition.check(force=True)
if not ret and not last:
rootlog.info(
f"({self.condition.description} failure not fatal yet)"
)
return ret
with rootlog.nested(f"waiting for {self.condition.description}"):
retry(condition, timeout=timeout)
if fun_ is None:
return Poll
else:

View file

@ -1,4 +1,5 @@
from typing import Callable, Optional
from math import isfinite
import time
from .logger import rootlog
@ -14,7 +15,7 @@ class PollingCondition:
description: Optional[str]
last_called: float
entered: bool
entry_count: int
def __init__(
self,
@ -34,14 +35,21 @@ class PollingCondition:
self.description = str(description)
self.last_called = float("-inf")
self.entered = False
self.entry_count = 0
def check(self) -> bool:
if self.entered or not self.overdue:
def check(self, force: bool = False) -> bool:
if (self.entered or not self.overdue) and not force:
return True
with self, rootlog.nested(self.nested_message):
rootlog.info(f"Time since last: {time.monotonic() - self.last_called:.2f}s")
time_since_last = time.monotonic() - self.last_called
last_message = (
f"Time since last: {time_since_last:.2f}s"
if isfinite(time_since_last)
else "(not called yet)"
)
rootlog.info(last_message)
try:
res = self.condition() # type: ignore
except Exception:
@ -69,9 +77,16 @@ class PollingCondition:
def overdue(self) -> bool:
return self.last_called + self.seconds_interval < time.monotonic()
@property
def entered(self) -> bool:
# entry_count should never dip *below* zero
assert self.entry_count >= 0
return self.entry_count > 0
def __enter__(self) -> None:
self.entered = True
self.entry_count += 1
def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore
self.entered = False
assert self.entered
self.entry_count -= 1
self.last_called = time.monotonic()

View file

@ -1,3 +1,4 @@
args@
{ system
, pkgs ? import ../.. { inherit system config; }
# Use a minimal kernel?
@ -5,7 +6,7 @@
# Ignored
, config ? { }
# !!! See comment about args in lib/modules.nix
, specialArgs ? { }
, specialArgs ? throw "legacy - do not use, see error below"
# Modules to add to each VM
, extraConfigurations ? [ ]
}:
@ -13,6 +14,13 @@ let
nixos-lib = import ./default.nix { inherit (pkgs) lib; };
in
pkgs.lib.throwIf (args?specialArgs) ''
testing-python.nix: `specialArgs` is not supported anymore. If you're looking
for the public interface to the NixOS test framework, use `runTest`, and
`node.specialArgs`.
See https://nixos.org/manual/nixos/unstable/index.html#sec-calling-nixos-tests
and https://nixos.org/manual/nixos/unstable/index.html#test-opt-node.specialArgs
''
rec {
inherit pkgs;

View file

@ -33,9 +33,13 @@ with lib;
ffmpeg_4 = super.ffmpeg_4-headless;
ffmpeg_5 = super.ffmpeg_5-headless;
gobject-introspection = super.gobject-introspection.override { x11Support = false; };
gpsd = super.gpsd.override { guiSupport = false; };
imagemagick = super.imagemagick.override { libX11Support = false; libXtSupport = false; };
imagemagickBig = super.imagemagickBig.override { libX11Support = false; libXtSupport = false; };
libextractor = super.libextractor.override { gstreamerSupport = false; gtkSupport = false; };
libva = super.libva-minimal;
limesuite = super.limesuite.override { withGui = false; };
msmtp = super.msmtp.override { withKeyring = false; };
networkmanager-fortisslvpn = super.networkmanager-fortisslvpn.override { withGnome = false; };
networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; };
networkmanager-l2tp = super.networkmanager-l2tp.override { withGnome = false; };

View file

@ -70,14 +70,12 @@ let
;
# Timeout in syslinux is in units of 1/10 of a second.
# 0 is used to disable timeouts.
# null means max timeout (35996, just under 1h in 1/10 seconds)
# 0 means disable timeout
syslinuxTimeout = if config.boot.loader.timeout == null then
0
35996
else
max (config.boot.loader.timeout * 10) 1;
max = x: y: if x > y then x else y;
config.boot.loader.timeout * 10;
# The configuration file for syslinux.

View file

@ -180,6 +180,7 @@
./programs/hamster.nix
./programs/htop.nix
./programs/iftop.nix
./programs/i3lock.nix
./programs/iotop.nix
./programs/java.nix
./programs/k3b.nix
@ -724,6 +725,7 @@
./services/monitoring/riemann.nix
./services/monitoring/scollector.nix
./services/monitoring/smartd.nix
./services/monitoring/statsd.nix
./services/monitoring/sysstat.nix
./services/monitoring/teamviewer.nix
./services/monitoring/telegraf.nix
@ -874,7 +876,6 @@
./services/networking/miredo.nix
./services/networking/mjpg-streamer.nix
./services/networking/mmsd.nix
./services/networking/mosquitto.nix
./services/networking/monero.nix
./services/networking/morty.nix
./services/networking/mosquitto.nix

View file

@ -0,0 +1,7 @@
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACCQVnMW/wZWqrdWrjrRPhfEFFq1KLYguagSflLhFnVQmwAAAJASuMMnErjD
JwAAAAtzc2gtZWQyNTUxOQAAACCQVnMW/wZWqrdWrjrRPhfEFFq1KLYguagSflLhFnVQmw
AAAEDIN2VWFyggtoSPXcAFy8dtG1uAig8sCuyE21eMDt2GgJBWcxb/Blaqt1auOtE+F8QU
WrUotiC5qBJ+UuEWdVCbAAAACnJvb3RAbml4b3MBAgM=
-----END OPENSSH PRIVATE KEY-----

View file

@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJBWcxb/Blaqt1auOtE+F8QUWrUotiC5qBJ+UuEWdVCb root@nixos

View file

@ -0,0 +1,140 @@
{ config, lib, pkgs, ... }:
let
keysDirectory = "/var/keys";
user = "builder";
keyType = "ed25519";
in
{ imports = [
../virtualisation/qemu-vm.nix
];
# The builder is not intended to be used interactively
documentation.enable = false;
environment.etc = {
"ssh/ssh_host_ed25519_key" = {
mode = "0600";
source = ./keys/ssh_host_ed25519_key;
};
"ssh/ssh_host_ed25519_key.pub" = {
mode = "0644";
source = ./keys/ssh_host_ed25519_key.pub;
};
};
# DNS fails for QEMU user networking (SLiRP) on macOS. See:
#
# https://github.com/utmapp/UTM/issues/2353
#
# This works around that by using a public DNS server other than the DNS
# server that QEMU provides (normally 10.0.2.3)
networking.nameservers = [ "8.8.8.8" ];
nix.settings = {
auto-optimise-store = true;
min-free = 1024 * 1024 * 1024;
max-free = 3 * 1024 * 1024 * 1024;
trusted-users = [ "root" user ];
};
services.openssh = {
enable = true;
authorizedKeysFiles = [ "${keysDirectory}/%u_${keyType}.pub" ];
};
system.build.macos-builder-installer =
let
privateKey = "/etc/nix/${user}_${keyType}";
publicKey = "${privateKey}.pub";
# This installCredentials script is written so that it's as easy as
# possible for a user to audit before confirming the `sudo`
installCredentials = hostPkgs.writeShellScript "install-credentials" ''
KEYS="''${1}"
INSTALL=${hostPkgs.coreutils}/bin/install
"''${INSTALL}" -g nixbld -m 600 "''${KEYS}/${user}_${keyType}" ${privateKey}
"''${INSTALL}" -g nixbld -m 644 "''${KEYS}/${user}_${keyType}.pub" ${publicKey}
'';
hostPkgs = config.virtualisation.host.pkgs;
script = hostPkgs.writeShellScriptBin "create-builder" ''
KEYS="''${KEYS:-./keys}"
${hostPkgs.coreutils}/bin/mkdir --parent "''${KEYS}"
PRIVATE_KEY="''${KEYS}/${user}_${keyType}"
PUBLIC_KEY="''${PRIVATE_KEY}.pub"
if [ ! -e "''${PRIVATE_KEY}" ] || [ ! -e "''${PUBLIC_KEY}" ]; then
${hostPkgs.coreutils}/bin/rm --force -- "''${PRIVATE_KEY}" "''${PUBLIC_KEY}"
${hostPkgs.openssh}/bin/ssh-keygen -q -f "''${PRIVATE_KEY}" -t ${keyType} -N "" -C 'builder@localhost'
fi
if ! ${hostPkgs.diffutils}/bin/cmp "''${PUBLIC_KEY}" ${publicKey}; then
(set -x; sudo --reset-timestamp ${installCredentials} "''${KEYS}")
fi
KEYS="$(nix-store --add "$KEYS")" ${config.system.build.vm}/bin/run-nixos-vm
'';
in
script.overrideAttrs (old: {
meta = (old.meta or { }) // {
platforms = lib.platforms.darwin;
};
});
system.stateVersion = "22.05";
users.users."${user}"= {
isNormalUser = true;
};
virtualisation = {
diskSize = 20 * 1024;
memorySize = 3 * 1024;
forwardPorts = [
{ from = "host"; guest.port = 22; host.port = 22; }
];
# Disable graphics for the builder since users will likely want to run it
# non-interactively in the background.
graphics = false;
sharedDirectories.keys = {
source = "\"$KEYS\"";
target = keysDirectory;
};
# If we don't enable this option then the host will fail to delegate builds
# to the guest, because:
#
# - The host will lock the path to build
# - The host will delegate the build to the guest
# - The guest will attempt to lock the same path and fail because
# the lockfile on the host is visible on the guest
#
# Snapshotting the host's /nix/store as an image isolates the guest VM's
# /nix/store from the host's /nix/store, preventing this problem.
useNixStoreImage = true;
# Obviously the /nix/store needs to be writable on the guest in order for it
# to perform builds.
writableStore = true;
# This ensures that anything built on the guest isn't lost when the guest is
# restarted.
writableStoreUseTmpfs = false;
};
}

View file

@ -20,15 +20,41 @@ in
};
config = mkOption {
type = with types; attrsOf (attrsOf anything);
default = { };
type =
with types;
let
gitini = attrsOf (attrsOf anything);
in
either gitini (listOf gitini) // {
merge = loc: defs:
let
config = foldl'
(acc: { value, ... }@x: acc // (if isList value then {
ordered = acc.ordered ++ value;
} else {
unordered = acc.unordered ++ [ x ];
}))
{
ordered = [ ];
unordered = [ ];
}
defs;
in
[ (gitini.merge loc config.unordered) ] ++ config.ordered;
};
default = [ ];
example = {
init.defaultBranch = "main";
url."https://github.com/".insteadOf = [ "gh:" "github:" ];
};
description = lib.mdDoc ''
Configuration to write to /etc/gitconfig. See the CONFIGURATION FILE
section of git-config(1) for more information.
Configuration to write to /etc/gitconfig. A list can also be
specified to keep the configuration in order. For example, setting
`config` to `[ { foo.x = 42; } { bar.y = 42; }]` will put the `foo`
section before the `bar` section unlike the default alphabetical
order, which can be helpful for sections such as `include` and
`includeIf`. See the CONFIGURATION FILE section of git-config(1) for
more information.
'';
};
@ -48,8 +74,8 @@ in
config = mkMerge [
(mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
environment.etc.gitconfig = mkIf (cfg.config != {}) {
text = generators.toGitINI cfg.config;
environment.etc.gitconfig = mkIf (cfg.config != [ ]) {
text = concatMapStringsSep "\n" generators.toGitINI cfg.config;
};
})
(mkIf (cfg.enable && cfg.lfs.enable) {

View file

@ -0,0 +1,58 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.programs.i3lock;
in {
###### interface
options = {
programs.i3lock = {
enable = mkEnableOption (mdDoc "i3lock");
package = mkOption {
type = types.package;
default = pkgs.i3lock;
defaultText = literalExpression "pkgs.i3lock";
example = literalExpression ''
pkgs.i3lock-color
'';
description = mdDoc ''
Specify which package to use for the i3lock program,
The i3lock package must include a i3lock file or link in its out directory in order for the u2fSupport option to work correctly.
'';
};
u2fSupport = mkOption {
type = types.bool;
default = false;
example = true;
description = mdDoc ''
Whether to enable U2F support in the i3lock program.
U2F enables authentication using a hardware device, such as a security key.
When U2F support is enabled, the i3lock program will set the setuid bit on the i3lock binary and enable the pam u2fAuth service,
'';
};
};
};
###### implementation
config = mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
security.wrappers.i3lock = mkIf cfg.u2fSupport {
setuid = true;
owner = "root";
group = "root";
source = "${cfg.package.out}/bin/i3lock";
};
security.pam.services.i3lock.u2fAuth = cfg.u2fSupport;
};
}

View file

@ -280,7 +280,7 @@ in {
${pkgs.lighthouse}/bin/lighthouse validator_client \
--network ${cfg.network} \
--beacon-nodes ${lib.concatStringsSep "," cfg.validator.beaconNodes} \
--datadir ${cfg.validator.dataDir}/${cfg.network}
--datadir ${cfg.validator.dataDir}/${cfg.network} \
${optionalString cfg.validator.metrics.enable ''--metrics --metrics-address ${cfg.validator.metrics.address} --metrics-port ${toString cfg.validator.metrics.port}''} \
${cfg.extraArgs} ${cfg.validator.extraArgs}
'';

View file

@ -89,6 +89,8 @@ in
SendSIGHUP = true;
TimeoutStopSec = "30s";
KeyringMode = "shared";
Type = "idle";
};
# Don't kill a user session when using nixos-rebuild

View file

@ -47,7 +47,7 @@ in
{
options = {
services.nitter = {
enable = mkEnableOption (lib.mdDoc "If enabled, start Nitter.");
enable = mkEnableOption (lib.mdDoc "Nitter");
package = mkOption {
default = pkgs.nitter;

View file

@ -555,7 +555,7 @@ in {
auto_assign_org_role = mkOption {
description = lib.mdDoc "Default role new users will be auto assigned.";
default = "Viewer";
type = types.enum ["Viewer" "Editor"];
type = types.enum ["Viewer" "Editor" "Admin"];
};
};

View file

@ -68,6 +68,7 @@ let
"smartctl"
"smokeping"
"sql"
"statsd"
"surfboard"
"systemd"
"tor"

View file

@ -0,0 +1,19 @@
{ config, lib, pkgs, options }:
with lib;
let
cfg = config.services.prometheus.exporters.statsd;
in
{
port = 9102;
serviceOpts = {
serviceConfig = {
ExecStart = ''
${pkgs.prometheus-statsd-exporter}/bin/statsd_exporter \
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
${concatStringsSep " \\\n " cfg.extraFlags}
'';
};
};
}

View file

@ -11,8 +11,10 @@ let
format = pkgs.formats.toml {};
settings = {
database_url = dbURL;
human_logs = true;
syncstorage = {
database_url = dbURL;
};
tokenserver = {
node_type = "mysql";
database_url = dbURL;
@ -253,8 +255,7 @@ in
serviceConfig = {
User = defaultUser;
Group = defaultUser;
ExecStart = "${cfg.package}/bin/syncstorage --config ${configFile}";
Stderr = "journal";
ExecStart = "${cfg.package}/bin/syncserver --config ${configFile}";
EnvironmentFile = lib.mkIf (cfg.secrets != null) "${cfg.secrets}";
# hardening

View file

@ -1,15 +1,18 @@
{ config, lib, pkgs, ... }:
with lib;
let
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.services.headscale;
dataDir = "/var/lib/headscale";
runDir = "/run/headscale";
settingsFormat = pkgs.formats.yaml { };
settingsFormat = pkgs.formats.yaml {};
configFile = settingsFormat.generate "headscale.yaml" cfg.settings;
in
{
in {
options = {
services.headscale = {
enable = mkEnableOption (lib.mdDoc "headscale, Open Source coordination server for Tailscale");
@ -51,15 +54,6 @@ in
'';
};
serverUrl = mkOption {
type = types.str;
default = "http://127.0.0.1:8080";
description = lib.mdDoc ''
The url clients will connect to.
'';
example = "https://myheadscale.example.com:443";
};
address = mkOption {
type = types.str;
default = "127.0.0.1";
@ -78,337 +72,346 @@ in
example = 443;
};
privateKeyFile = mkOption {
type = types.path;
default = "${dataDir}/private.key";
description = lib.mdDoc ''
Path to private key file, generated automatically if it does not exist.
'';
};
derp = {
urls = mkOption {
type = types.listOf types.str;
default = [ "https://controlplane.tailscale.com/derpmap/default" ];
description = lib.mdDoc ''
List of urls containing DERP maps.
See [How Tailscale works](https://tailscale.com/blog/how-tailscale-works/) for more information on DERP maps.
'';
};
paths = mkOption {
type = types.listOf types.path;
default = [ ];
description = lib.mdDoc ''
List of file paths containing DERP maps.
See [How Tailscale works](https://tailscale.com/blog/how-tailscale-works/) for more information on DERP maps.
'';
};
autoUpdate = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Whether to automatically update DERP maps on a set frequency.
'';
example = false;
};
updateFrequency = mkOption {
type = types.str;
default = "24h";
description = lib.mdDoc ''
Frequency to update DERP maps.
'';
example = "5m";
};
};
ephemeralNodeInactivityTimeout = mkOption {
type = types.str;
default = "30m";
description = lib.mdDoc ''
Time before an inactive ephemeral node is deleted.
'';
example = "5m";
};
database = {
type = mkOption {
type = types.enum [ "sqlite3" "postgres" ];
example = "postgres";
default = "sqlite3";
description = lib.mdDoc "Database engine to use.";
};
host = mkOption {
type = types.nullOr types.str;
default = null;
example = "127.0.0.1";
description = lib.mdDoc "Database host address.";
};
port = mkOption {
type = types.nullOr types.port;
default = null;
example = 3306;
description = lib.mdDoc "Database host port.";
};
name = mkOption {
type = types.nullOr types.str;
default = null;
example = "headscale";
description = lib.mdDoc "Database name.";
};
user = mkOption {
type = types.nullOr types.str;
default = null;
example = "headscale";
description = lib.mdDoc "Database user.";
};
passwordFile = mkOption {
type = types.nullOr types.path;
default = null;
example = "/run/keys/headscale-dbpassword";
description = lib.mdDoc ''
A file containing the password corresponding to
{option}`database.user`.
'';
};
path = mkOption {
type = types.nullOr types.str;
default = "${dataDir}/db.sqlite";
description = lib.mdDoc "Path to the sqlite3 database file.";
};
};
logLevel = mkOption {
type = types.str;
default = "info";
description = lib.mdDoc ''
headscale log level.
'';
example = "debug";
};
dns = {
nameservers = mkOption {
type = types.listOf types.str;
default = [ "1.1.1.1" ];
description = lib.mdDoc ''
List of nameservers to pass to Tailscale clients.
'';
};
domains = mkOption {
type = types.listOf types.str;
default = [ ];
description = lib.mdDoc ''
Search domains to inject to Tailscale clients.
'';
example = [ "mydomain.internal" ];
};
magicDns = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
Only works if there is at least a nameserver defined.
'';
example = false;
};
baseDomain = mkOption {
type = types.str;
default = "";
description = lib.mdDoc ''
Defines the base domain to create the hostnames for MagicDNS.
{option}`baseDomain` must be a FQDNs, without the trailing dot.
The FQDN of the hosts will be
`hostname.namespace.base_domain` (e.g.
`myhost.mynamespace.example.com`).
'';
};
};
openIdConnect = {
issuer = mkOption {
type = types.str;
default = "";
description = lib.mdDoc ''
URL to OpenID issuer.
'';
example = "https://openid.example.com";
};
clientId = mkOption {
type = types.str;
default = "";
description = lib.mdDoc ''
OpenID Connect client ID.
'';
};
clientSecretFile = mkOption {
type = types.nullOr types.path;
default = null;
description = lib.mdDoc ''
Path to OpenID Connect client secret file.
'';
};
domainMap = mkOption {
type = types.attrsOf types.str;
default = { };
description = lib.mdDoc ''
Domain map is used to map incoming users (by their email) to
a namespace. The key can be a string, or regex.
'';
example = {
".*" = "default-namespace";
};
};
};
tls = {
letsencrypt = {
hostname = mkOption {
type = types.nullOr types.str;
default = "";
description = lib.mdDoc ''
Domain name to request a TLS certificate for.
'';
};
challengeType = mkOption {
type = types.enum [ "TLS-ALPN-01" "HTTP-01" ];
default = "HTTP-01";
description = lib.mdDoc ''
Type of ACME challenge to use, currently supported types:
`HTTP-01` or `TLS-ALPN-01`.
'';
};
httpListen = mkOption {
type = types.nullOr types.str;
default = ":http";
description = lib.mdDoc ''
When HTTP-01 challenge is chosen, letsencrypt must set up a
verification endpoint, and it will be listening on:
`:http = port 80`.
'';
};
};
certFile = mkOption {
type = types.nullOr types.path;
default = null;
description = lib.mdDoc ''
Path to already created certificate.
'';
};
keyFile = mkOption {
type = types.nullOr types.path;
default = null;
description = lib.mdDoc ''
Path to key for already created certificate.
'';
};
};
aclPolicyFile = mkOption {
type = types.nullOr types.path;
default = null;
description = lib.mdDoc ''
Path to a file containing ACL policies.
'';
};
settings = mkOption {
type = settingsFormat.type;
default = { };
description = lib.mdDoc ''
Overrides to {file}`config.yaml` as a Nix attribute set.
This option is ideal for overriding settings not exposed as Nix options.
Check the [example config](https://github.com/juanfont/headscale/blob/main/config-example.yaml)
for possible options.
'';
type = types.submodule {
freeformType = settingsFormat.type;
options = {
server_url = mkOption {
type = types.str;
default = "http://127.0.0.1:8080";
description = lib.mdDoc ''
The url clients will connect to.
'';
example = "https://myheadscale.example.com:443";
};
private_key_path = mkOption {
type = types.path;
default = "${dataDir}/private.key";
description = lib.mdDoc ''
Path to private key file, generated automatically if it does not exist.
'';
};
noise.private_key_path = mkOption {
type = types.path;
default = "${dataDir}/noise_private.key";
description = lib.mdDoc ''
Path to noise private key file, generated automatically if it does not exist.
'';
};
derp = {
urls = mkOption {
type = types.listOf types.str;
default = ["https://controlplane.tailscale.com/derpmap/default"];
description = lib.mdDoc ''
List of urls containing DERP maps.
See [How Tailscale works](https://tailscale.com/blog/how-tailscale-works/) for more information on DERP maps.
'';
};
paths = mkOption {
type = types.listOf types.path;
default = [];
description = lib.mdDoc ''
List of file paths containing DERP maps.
See [How Tailscale works](https://tailscale.com/blog/how-tailscale-works/) for more information on DERP maps.
'';
};
auto_update_enable = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Whether to automatically update DERP maps on a set frequency.
'';
example = false;
};
update_frequency = mkOption {
type = types.str;
default = "24h";
description = lib.mdDoc ''
Frequency to update DERP maps.
'';
example = "5m";
};
};
ephemeral_node_inactivity_timeout = mkOption {
type = types.str;
default = "30m";
description = lib.mdDoc ''
Time before an inactive ephemeral node is deleted.
'';
example = "5m";
};
db_type = mkOption {
type = types.enum ["sqlite3" "postgres"];
example = "postgres";
default = "sqlite3";
description = lib.mdDoc "Database engine to use.";
};
db_host = mkOption {
type = types.nullOr types.str;
default = null;
example = "127.0.0.1";
description = lib.mdDoc "Database host address.";
};
db_port = mkOption {
type = types.nullOr types.port;
default = null;
example = 3306;
description = lib.mdDoc "Database host port.";
};
db_name = mkOption {
type = types.nullOr types.str;
default = null;
example = "headscale";
description = lib.mdDoc "Database name.";
};
db_user = mkOption {
type = types.nullOr types.str;
default = null;
example = "headscale";
description = lib.mdDoc "Database user.";
};
db_password_file = mkOption {
type = types.nullOr types.path;
default = null;
example = "/run/keys/headscale-dbpassword";
description = lib.mdDoc ''
A file containing the password corresponding to
{option}`database.user`.
'';
};
db_path = mkOption {
type = types.nullOr types.str;
default = "${dataDir}/db.sqlite";
description = lib.mdDoc "Path to the sqlite3 database file.";
};
log.level = mkOption {
type = types.str;
default = "info";
description = lib.mdDoc ''
headscale log level.
'';
example = "debug";
};
log.format = mkOption {
type = types.str;
default = "text";
description = lib.mdDoc ''
headscale log format.
'';
example = "json";
};
dns_config = {
nameservers = mkOption {
type = types.listOf types.str;
default = ["1.1.1.1"];
description = lib.mdDoc ''
List of nameservers to pass to Tailscale clients.
'';
};
override_local_dns = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Whether to use [Override local DNS](https://tailscale.com/kb/1054/dns/).
'';
example = true;
};
domains = mkOption {
type = types.listOf types.str;
default = [];
description = lib.mdDoc ''
Search domains to inject to Tailscale clients.
'';
example = ["mydomain.internal"];
};
magic_dns = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
Only works if there is at least a nameserver defined.
'';
example = false;
};
base_domain = mkOption {
type = types.str;
default = "";
description = lib.mdDoc ''
Defines the base domain to create the hostnames for MagicDNS.
{option}`baseDomain` must be a FQDNs, without the trailing dot.
The FQDN of the hosts will be
`hostname.namespace.base_domain` (e.g.
`myhost.mynamespace.example.com`).
'';
};
};
oidc = {
issuer = mkOption {
type = types.str;
default = "";
description = lib.mdDoc ''
URL to OpenID issuer.
'';
example = "https://openid.example.com";
};
client_id = mkOption {
type = types.str;
default = "";
description = lib.mdDoc ''
OpenID Connect client ID.
'';
};
client_secret_file = mkOption {
type = types.nullOr types.path;
default = null;
description = lib.mdDoc ''
Path to OpenID Connect client secret file.
'';
};
domain_map = mkOption {
type = types.attrsOf types.str;
default = {};
description = lib.mdDoc ''
Domain map is used to map incomming users (by their email) to
a namespace. The key can be a string, or regex.
'';
example = {
".*" = "default-namespace";
};
};
};
tls_letsencrypt_hostname = mkOption {
type = types.nullOr types.str;
default = "";
description = lib.mdDoc ''
Domain name to request a TLS certificate for.
'';
};
tls_letsencrypt_challenge_type = mkOption {
type = types.enum ["TLS-ALPN-01" "HTTP-01"];
default = "HTTP-01";
description = lib.mdDoc ''
Type of ACME challenge to use, currently supported types:
`HTTP-01` or `TLS-ALPN-01`.
'';
};
tls_letsencrypt_listen = mkOption {
type = types.nullOr types.str;
default = ":http";
description = lib.mdDoc ''
When HTTP-01 challenge is chosen, letsencrypt must set up a
verification endpoint, and it will be listening on:
`:http = port 80`.
'';
};
tls_cert_path = mkOption {
type = types.nullOr types.path;
default = null;
description = lib.mdDoc ''
Path to already created certificate.
'';
};
tls_key_path = mkOption {
type = types.nullOr types.path;
default = null;
description = lib.mdDoc ''
Path to key for already created certificate.
'';
};
acl_policy_path = mkOption {
type = types.nullOr types.path;
default = null;
description = lib.mdDoc ''
Path to a file containg ACL policies.
'';
};
};
};
};
};
};
imports = [
# TODO address + port = listen_addr
(mkRenamedOptionModule ["services" "headscale" "serverUrl"] ["services" "headscale" "settings" "server_url"])
(mkRenamedOptionModule ["services" "headscale" "privateKeyFile"] ["services" "headscale" "settings" "private_key_path"])
(mkRenamedOptionModule ["services" "headscale" "derp" "urls"] ["services" "headscale" "settings" "derp" "urls"])
(mkRenamedOptionModule ["services" "headscale" "derp" "paths"] ["services" "headscale" "settings" "derp" "paths"])
(mkRenamedOptionModule ["services" "headscale" "derp" "autoUpdate"] ["services" "headscale" "settings" "derp" "auto_update_enable"])
(mkRenamedOptionModule ["services" "headscale" "derp" "updateFrequency"] ["services" "headscale" "settings" "derp" "update_frequency"])
(mkRenamedOptionModule ["services" "headscale" "ephemeralNodeInactivityTimeout"] ["services" "headscale" "settings" "ephemeral_node_inactivity_timeout"])
(mkRenamedOptionModule ["services" "headscale" "database" "type"] ["services" "headscale" "settings" "db_type"])
(mkRenamedOptionModule ["services" "headscale" "database" "path"] ["services" "headscale" "settings" "db_path"])
(mkRenamedOptionModule ["services" "headscale" "database" "host"] ["services" "headscale" "settings" "db_host"])
(mkRenamedOptionModule ["services" "headscale" "database" "port"] ["services" "headscale" "settings" "db_port"])
(mkRenamedOptionModule ["services" "headscale" "database" "name"] ["services" "headscale" "settings" "db_name"])
(mkRenamedOptionModule ["services" "headscale" "database" "user"] ["services" "headscale" "settings" "db_user"])
(mkRenamedOptionModule ["services" "headscale" "database" "passwordFile"] ["services" "headscale" "settings" "db_password_file"])
(mkRenamedOptionModule ["services" "headscale" "logLevel"] ["services" "headscale" "settings" "log" "level"])
(mkRenamedOptionModule ["services" "headscale" "dns" "nameservers"] ["services" "headscale" "settings" "dns_config" "nameservers"])
(mkRenamedOptionModule ["services" "headscale" "dns" "domains"] ["services" "headscale" "settings" "dns_config" "domains"])
(mkRenamedOptionModule ["services" "headscale" "dns" "magicDns"] ["services" "headscale" "settings" "dns_config" "magic_dns"])
(mkRenamedOptionModule ["services" "headscale" "dns" "baseDomain"] ["services" "headscale" "settings" "dns_config" "base_domain"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "issuer"] ["services" "headscale" "settings" "oidc" "issuer"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientId"] ["services" "headscale" "settings" "oidc" "client_id"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientSecretFile"] ["services" "headscale" "settings" "oidc" "client_secret_file"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "domainMap"] ["services" "headscale" "settings" "oidc" "domain_map"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "hostname"] ["services" "headscale" "settings" "tls_letsencrypt_hostname"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "challengeType"] ["services" "headscale" "settings" "tls_letsencrypt_challenge_type"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "httpListen"] ["services" "headscale" "settings" "tls_letsencrypt_listen"])
(mkRenamedOptionModule ["services" "headscale" "tls" "certFile"] ["services" "headscale" "settings" "tls_cert_path"])
(mkRenamedOptionModule ["services" "headscale" "tls" "keyFile"] ["services" "headscale" "settings" "tls_key_path"])
(mkRenamedOptionModule ["services" "headscale" "aclPolicyFile"] ["services" "headscale" "settings" "acl_policy_path"])
];
config = mkIf cfg.enable {
services.headscale.settings = {
server_url = mkDefault cfg.serverUrl;
listen_addr = mkDefault "${cfg.address}:${toString cfg.port}";
private_key_path = mkDefault cfg.privateKeyFile;
derp = {
urls = mkDefault cfg.derp.urls;
paths = mkDefault cfg.derp.paths;
auto_update_enable = mkDefault cfg.derp.autoUpdate;
update_frequency = mkDefault cfg.derp.updateFrequency;
};
# Turn off update checks since the origin of our package
# is nixpkgs and not Github.
disable_check_updates = true;
ephemeral_node_inactivity_timeout = mkDefault cfg.ephemeralNodeInactivityTimeout;
db_type = mkDefault cfg.database.type;
db_path = mkDefault cfg.database.path;
log_level = mkDefault cfg.logLevel;
dns_config = {
nameservers = mkDefault cfg.dns.nameservers;
domains = mkDefault cfg.dns.domains;
magic_dns = mkDefault cfg.dns.magicDns;
base_domain = mkDefault cfg.dns.baseDomain;
};
unix_socket = "${runDir}/headscale.sock";
# OpenID Connect
oidc = {
issuer = mkDefault cfg.openIdConnect.issuer;
client_id = mkDefault cfg.openIdConnect.clientId;
domain_map = mkDefault cfg.openIdConnect.domainMap;
};
tls_letsencrypt_cache_dir = "${dataDir}/.cache";
} // optionalAttrs (cfg.database.host != null) {
db_host = mkDefault cfg.database.host;
} // optionalAttrs (cfg.database.port != null) {
db_port = mkDefault cfg.database.port;
} // optionalAttrs (cfg.database.name != null) {
db_name = mkDefault cfg.database.name;
} // optionalAttrs (cfg.database.user != null) {
db_user = mkDefault cfg.database.user;
} // optionalAttrs (cfg.tls.letsencrypt.hostname != null) {
tls_letsencrypt_hostname = mkDefault cfg.tls.letsencrypt.hostname;
} // optionalAttrs (cfg.tls.letsencrypt.challengeType != null) {
tls_letsencrypt_challenge_type = mkDefault cfg.tls.letsencrypt.challengeType;
} // optionalAttrs (cfg.tls.letsencrypt.httpListen != null) {
tls_letsencrypt_listen = mkDefault cfg.tls.letsencrypt.httpListen;
} // optionalAttrs (cfg.tls.certFile != null) {
tls_cert_path = mkDefault cfg.tls.certFile;
} // optionalAttrs (cfg.tls.keyFile != null) {
tls_key_path = mkDefault cfg.tls.keyFile;
} // optionalAttrs (cfg.aclPolicyFile != null) {
acl_policy_path = mkDefault cfg.aclPolicyFile;
};
# Setup the headscale configuration in a known path in /etc to
@ -416,7 +419,7 @@ in
# for communication.
environment.etc."headscale/config.yaml".source = configFile;
users.groups.headscale = mkIf (cfg.group == "headscale") { };
users.groups.headscale = mkIf (cfg.group == "headscale") {};
users.users.headscale = mkIf (cfg.user == "headscale") {
description = "headscale user";
@ -427,70 +430,68 @@ in
systemd.services.headscale = {
description = "headscale coordination server for Tailscale";
after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
restartTriggers = [ configFile ];
after = ["network-online.target"];
wantedBy = ["multi-user.target"];
restartTriggers = [configFile];
environment.GIN_MODE = "release";
script = ''
${optionalString (cfg.database.passwordFile != null) ''
export HEADSCALE_DB_PASS="$(head -n1 ${escapeShellArg cfg.database.passwordFile})"
${optionalString (cfg.settings.db_password_file != null) ''
export HEADSCALE_DB_PASS="$(head -n1 ${escapeShellArg cfg.settings.db_password_file})"
''}
${optionalString (cfg.openIdConnect.clientSecretFile != null) ''
export HEADSCALE_OIDC_CLIENT_SECRET="$(head -n1 ${escapeShellArg cfg.openIdConnect.clientSecretFile})"
${optionalString (cfg.settings.oidc.client_secret_file != null) ''
export HEADSCALE_OIDC_CLIENT_SECRET="$(head -n1 ${escapeShellArg cfg.settings.oidc.client_secret_file})"
''}
exec ${cfg.package}/bin/headscale serve
'';
serviceConfig =
let
capabilityBoundingSet = [ "CAP_CHOWN" ] ++ optional (cfg.port < 1024) "CAP_NET_BIND_SERVICE";
in
{
Restart = "always";
Type = "simple";
User = cfg.user;
Group = cfg.group;
serviceConfig = let
capabilityBoundingSet = ["CAP_CHOWN"] ++ optional (cfg.port < 1024) "CAP_NET_BIND_SERVICE";
in {
Restart = "always";
Type = "simple";
User = cfg.user;
Group = cfg.group;
# Hardening options
RuntimeDirectory = "headscale";
# Allow headscale group access so users can be added and use the CLI.
RuntimeDirectoryMode = "0750";
# Hardening options
RuntimeDirectory = "headscale";
# Allow headscale group access so users can be added and use the CLI.
RuntimeDirectoryMode = "0750";
StateDirectory = "headscale";
StateDirectoryMode = "0750";
StateDirectory = "headscale";
StateDirectoryMode = "0750";
ProtectSystem = "strict";
ProtectHome = true;
PrivateTmp = true;
PrivateDevices = true;
ProtectKernelTunables = true;
ProtectControlGroups = true;
RestrictSUIDSGID = true;
PrivateMounts = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectHostname = true;
ProtectClock = true;
ProtectProc = "invisible";
ProcSubset = "pid";
RestrictNamespaces = true;
RemoveIPC = true;
UMask = "0077";
ProtectSystem = "strict";
ProtectHome = true;
PrivateTmp = true;
PrivateDevices = true;
ProtectKernelTunables = true;
ProtectControlGroups = true;
RestrictSUIDSGID = true;
PrivateMounts = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectHostname = true;
ProtectClock = true;
ProtectProc = "invisible";
ProcSubset = "pid";
RestrictNamespaces = true;
RemoveIPC = true;
UMask = "0077";
CapabilityBoundingSet = capabilityBoundingSet;
AmbientCapabilities = capabilityBoundingSet;
NoNewPrivileges = true;
LockPersonality = true;
RestrictRealtime = true;
SystemCallFilter = [ "@system-service" "~@privileged" "@chown" ];
SystemCallArchitectures = "native";
RestrictAddressFamilies = "AF_INET AF_INET6 AF_UNIX";
};
CapabilityBoundingSet = capabilityBoundingSet;
AmbientCapabilities = capabilityBoundingSet;
NoNewPrivileges = true;
LockPersonality = true;
RestrictRealtime = true;
SystemCallFilter = ["@system-service" "~@privileged" "@chown"];
SystemCallArchitectures = "native";
RestrictAddressFamilies = "AF_INET AF_INET6 AF_UNIX";
};
};
};
meta.maintainers = with maintainers; [ kradalby ];
meta.maintainers = with maintainers; [kradalby misterio77];
}

View file

@ -35,6 +35,16 @@ with lib;
systemd.services.rpcbind = {
wantedBy = [ "multi-user.target" ];
# rpcbind performs a check for /var/run/rpcbind.lock at startup
# and will crash if /var/run isn't present. In the stock NixOS
# var.conf tmpfiles configuration file, /var/run is symlinked to
# /run, so rpcbind can enter a race condition in which /var/run
# isn't symlinked yet but tries to interact with the path, so
# controlling the order explicitly here ensures that rpcbind can
# start successfully. The `wants` instead of `requires` should
# avoid creating a strict/brittle dependency.
wants = [ "systemd-tmpfiles-setup.service" ];
after = [ "systemd-tmpfiles-setup.service" ];
};
users.users.rpc = {

View file

@ -8,7 +8,7 @@ let
homeDir = "/var/lib/tox-node";
configFile = let
src = "${pkg.src}/dpkg/config.yml";
src = "${pkg.src}/tox_node/dpkg/config.yml";
confJSON = pkgs.writeText "config.json" (
builtins.toJSON {
log-type = cfg.logType;

View file

@ -25,6 +25,22 @@ in
default = false;
description = lib.mdDoc "Whether to build the PSW package in debug mode.";
};
environment = mkOption {
type = with types; attrsOf str;
default = { };
description = mdDoc "Additional environment variables to pass to the AESM service.";
# Example environment variable for `sgx-azure-dcap-client` provider library
example = {
AZDCAP_COLLATERAL_VERSION = "v2";
AZDCAP_DEBUG_LOG_LEVEL = "INFO";
};
};
quoteProviderLibrary = mkOption {
type = with types; nullOr path;
default = null;
example = literalExpression "pkgs.sgx-azure-dcap-client";
description = lib.mdDoc "Custom quote provider library to use.";
};
settings = mkOption {
description = lib.mdDoc "AESM configuration";
default = { };
@ -83,7 +99,6 @@ in
storeAesmFolder = "${sgx-psw}/aesm";
# Hardcoded path AESM_DATA_FOLDER in psw/ae/aesm_service/source/oal/linux/aesm_util.cpp
aesmDataFolder = "/var/opt/aesmd/data";
aesmStateDirSystemd = "%S/aesmd";
in
{
description = "Intel Architectural Enclave Service Manager";
@ -98,8 +113,8 @@ in
environment = {
NAME = "aesm_service";
AESM_PATH = storeAesmFolder;
LD_LIBRARY_PATH = storeAesmFolder;
};
LD_LIBRARY_PATH = makeLibraryPath [ cfg.quoteProviderLibrary ];
} // cfg.environment;
# Make sure any of the SGX application enclave devices is available
unitConfig.AssertPathExists = [

View file

@ -1,11 +1,11 @@
{ config, pkgs, lib, ... }:
let
inherit (lib) any boolToString concatStringsSep isBool isString literalExpression mapAttrsToList mkDefault mkEnableOption mkIf mkOption optionalAttrs types;
inherit (lib) any boolToString concatStringsSep isBool isString mapAttrsToList mkDefault mkEnableOption mkIf mkMerge mkOption optionalAttrs types;
package = pkgs.dolibarr.override { inherit (cfg) stateDir; };
cfg = config.services.dolibarr;
vhostCfg = config.services.nginx.virtualHosts."${cfg.domain}";
vhostCfg = lib.optionalAttr (cfg.nginx != null) config.services.nginx.virtualHosts."${cfg.domain}";
mkConfigFile = filename: settings:
let
@ -38,7 +38,7 @@ let
force_install_database = cfg.database.name;
force_install_databaselogin = cfg.database.user;
force_install_mainforcehttps = vhostCfg.forceSSL;
force_install_mainforcehttps = vhostCfg.forceSSL or false;
force_install_createuser = false;
force_install_dolibarrlogin = null;
} // optionalAttrs (cfg.database.passwordFile != null) {
@ -183,7 +183,8 @@ in
};
# implementation
config = mkIf cfg.enable {
config = mkIf cfg.enable (mkMerge [
{
assertions = [
{ assertion = cfg.database.createLocally -> cfg.database.user == cfg.user;
@ -214,7 +215,7 @@ in
# Security settings
dolibarr_main_prod = true;
dolibarr_main_force_https = vhostCfg.forceSSL;
dolibarr_main_force_https = vhostCfg.forceSSL or false;
dolibarr_main_restrict_os_commands = "${pkgs.mariadb}/bin/mysqldump, ${pkgs.mariadb}/bin/mysql";
dolibarr_nocsrfcheck = false;
dolibarr_main_instance_unique_id = ''
@ -314,7 +315,9 @@ in
users.groups = optionalAttrs (cfg.group == "dolibarr") {
dolibarr = { };
};
users.users."${config.services.nginx.group}".extraGroups = [ cfg.group ];
};
}
(mkIf (cfg.nginx != null) {
users.users."${config.services.nginx.group}".extraGroups = mkIf (cfg.nginx != null) [ cfg.group ];
})
]);
}

View file

@ -161,6 +161,18 @@ in {
description = lib.mdDoc "Configure nginx as a reverse proxy for peertube.";
};
secrets = {
secretsFile = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
example = "/run/secrets/peertube";
description = lib.mdDoc ''
Secrets to run PeerTube.
Generate one using `openssl rand -hex 32`
'';
};
};
database = {
createLocally = lib.mkOption {
type = lib.types.bool;
@ -201,7 +213,7 @@ in {
passwordFile = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
example = "/run/keys/peertube/password-posgressql-db";
example = "/run/keys/peertube/password-postgresql";
description = lib.mdDoc "Password for PostgreSQL database.";
};
};
@ -282,6 +294,11 @@ in {
prevent this.
'';
}
{ assertion = cfg.secrets.secretsFile != null;
message = ''
<option>services.peertube.secrets.secretsFile</option> needs to be set.
'';
}
{ assertion = !(cfg.redis.enableUnixSocket && (cfg.redis.host != null || cfg.redis.port != null));
message = ''
<option>services.peertube.redis.createLocally</option> and redis network connection (<option>services.peertube.redis.host</option> or <option>services.peertube.redis.port</option>) enabled. Disable either of them.
@ -349,6 +366,7 @@ in {
captions = lib.mkDefault "/var/lib/peertube/storage/captions/";
cache = lib.mkDefault "/var/lib/peertube/storage/cache/";
plugins = lib.mkDefault "/var/lib/peertube/storage/plugins/";
well_known = lib.mkDefault "/var/lib/peertube/storage/well_known/";
client_overrides = lib.mkDefault "/var/lib/peertube/storage/client-overrides/";
};
import = {
@ -417,6 +435,10 @@ in {
#!/bin/sh
umask 077
cat > /var/lib/peertube/config/local.yaml <<EOF
${lib.optionalString (cfg.secrets.secretsFile != null) ''
secrets:
peertube: '$(cat ${cfg.secrets.secretsFile})'
''}
${lib.optionalString ((!cfg.database.createLocally) && (cfg.database.passwordFile != null)) ''
database:
password: '$(cat ${cfg.database.passwordFile})'
@ -443,6 +465,7 @@ in {
RestartSec = 20;
TimeoutSec = 60;
WorkingDirectory = cfg.package;
SyslogIdentifier = "peertube";
# User and group
User = cfg.user;
Group = cfg.group;
@ -548,9 +571,14 @@ in {
'';
};
locations."~ ^/plugins/[^/]+(/[^/]+)?/ws/" = {
tryFiles = "/dev/null @api_websocket";
priority = 1230;
};
locations."@api_websocket" = {
proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
priority = 1230;
priority = 1240;
extraConfig = ''
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@ -581,7 +609,7 @@ in {
'';
};
locations."~ ^/lazy-static/(avatars|banners)/" = {
locations."^~ /lazy-static/avatars/" = {
tryFiles = "$uri @api";
root = cfg.settings.storage.avatars;
priority = 1330;
@ -599,6 +627,26 @@ in {
add_header Cache-Control 'public, max-age=7200';
rewrite ^/lazy-static/avatars/(.*)$ /$1 break;
'';
};
locations."^~ /lazy-static/banners/" = {
tryFiles = "$uri @api";
root = cfg.settings.storage.avatars;
priority = 1340;
extraConfig = ''
if ($request_method = 'OPTIONS') {
${nginxCommonHeaders}
add_header Access-Control-Max-Age 1728000;
add_header Cache-Control 'no-cache';
add_header Content-Type 'text/plain charset=UTF-8';
add_header Content-Length 0;
return 204;
}
${nginxCommonHeaders}
add_header Cache-Control 'public, max-age=7200';
rewrite ^/lazy-static/banners/(.*)$ /$1 break;
'';
};
@ -606,7 +654,7 @@ in {
locations."^~ /lazy-static/previews/" = {
tryFiles = "$uri @api";
root = cfg.settings.storage.previews;
priority = 1340;
priority = 1350;
extraConfig = ''
if ($request_method = 'OPTIONS') {
${nginxCommonHeaders}
@ -624,10 +672,34 @@ in {
'';
};
locations."^~ /static/streaming-playlists/private/" = {
proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
priority = 1410;
extraConfig = ''
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_limit_rate 5M;
'';
};
locations."^~ /static/webseed/private/" = {
proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
priority = 1420;
extraConfig = ''
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_limit_rate 5M;
'';
};
locations."^~ /static/thumbnails/" = {
tryFiles = "$uri @api";
root = cfg.settings.storage.thumbnails;
priority = 1350;
priority = 1430;
extraConfig = ''
if ($request_method = 'OPTIONS') {
${nginxCommonHeaders}
@ -648,8 +720,14 @@ in {
locations."^~ /static/redundancy/" = {
tryFiles = "$uri @api";
root = cfg.settings.storage.redundancy;
priority = 1360;
priority = 1440;
extraConfig = ''
set $peertube_limit_rate 800k;
if ($request_uri ~ -fragmented.mp4$) {
set $peertube_limit_rate 5M;
}
if ($request_method = 'OPTIONS') {
${nginxCommonHeaders}
add_header Access-Control-Max-Age 1728000;
@ -662,15 +740,14 @@ in {
access_log off;
}
aio threads;
sendfile on;
sendfile_max_chunk 1M;
limit_rate $peertube_limit_rate;
limit_rate_after 5M;
set $peertube_limit_rate 800k;
set $limit_rate $peertube_limit_rate;
rewrite ^/static/redundancy/(.*)$ /$1 break;
'';
};
@ -678,8 +755,14 @@ in {
locations."^~ /static/streaming-playlists/" = {
tryFiles = "$uri @api";
root = cfg.settings.storage.streaming_playlists;
priority = 1370;
priority = 1450;
extraConfig = ''
set $peertube_limit_rate 800k;
if ($request_uri ~ -fragmented.mp4$) {
set $peertube_limit_rate 5M;
}
if ($request_method = 'OPTIONS') {
${nginxCommonHeaders}
add_header Access-Control-Max-Age 1728000;
@ -697,20 +780,24 @@ in {
sendfile on;
sendfile_max_chunk 1M;
limit_rate $peertube_limit_rate;
limit_rate_after 5M;
set $peertube_limit_rate 5M;
set $limit_rate $peertube_limit_rate;
rewrite ^/static/streaming-playlists/(.*)$ /$1 break;
'';
};
locations."~ ^/static/webseed/" = {
locations."^~ /static/webseed/" = {
tryFiles = "$uri @api";
root = cfg.settings.storage.videos;
priority = 1380;
priority = 1460;
extraConfig = ''
set $peertube_limit_rate 800k;
if ($request_uri ~ -fragmented.mp4$) {
set $peertube_limit_rate 5M;
}
if ($request_method = 'OPTIONS') {
${nginxCommonHeaders}
add_header Access-Control-Max-Age 1728000;
@ -728,11 +815,9 @@ in {
sendfile on;
sendfile_max_chunk 1M;
limit_rate $peertube_limit_rate;
limit_rate_after 5M;
set $peertube_limit_rate 800k;
set $limit_rate $peertube_limit_rate;
rewrite ^/static/webseed/(.*)$ /$1 break;
'';
};

View file

@ -241,7 +241,7 @@ let
configPath = if cfg.enableReload
then "/etc/nginx/nginx.conf"
else configFile;
else finalConfigFile;
execCommand = "${cfg.package}/bin/nginx -c '${configPath}'";
@ -393,6 +393,38 @@ let
);
mkCertOwnershipAssertion = import ../../../security/acme/mk-cert-ownership-assertion.nix;
snakeOilCert = pkgs.runCommand "nginx-config-validate-cert" { nativeBuildInputs = [ pkgs.openssl.bin ]; } ''
mkdir $out
openssl genrsa -des3 -passout pass:xxxxx -out server.pass.key 2048
openssl rsa -passin pass:xxxxx -in server.pass.key -out $out/server.key
openssl req -new -key $out/server.key -out server.csr \
-subj "/C=UK/ST=Warwickshire/L=Leamington/O=OrgName/OU=IT Department/CN=example.com"
openssl x509 -req -days 1 -in server.csr -signkey $out/server.key -out $out/server.crt
'';
validatedConfigFile = pkgs.runCommand "validated-nginx.conf" { nativeBuildInputs = [ cfg.package ]; } ''
# nginx absolutely wants to read the certificates even when told to only validate config, so let's provide fake certs
sed ${configFile} \
-e "s|ssl_certificate .*;|ssl_certificate ${snakeOilCert}/server.crt;|g" \
-e "s|ssl_trusted_certificate .*;|ssl_trusted_certificate ${snakeOilCert}/server.crt;|g" \
-e "s|ssl_certificate_key .*;|ssl_certificate_key ${snakeOilCert}/server.key;|g" \
> conf
LD_PRELOAD=${pkgs.libredirect}/lib/libredirect.so \
NIX_REDIRECTS="/etc/resolv.conf=/dev/null" \
nginx -t -c $(readlink -f ./conf) > out 2>&1 || true
if ! grep -q "syntax is ok" out; then
echo nginx config validation failed.
echo config was ${configFile}.
echo 'in case of false positive, set `services.nginx.validateConfig` to false.'
echo nginx output:
cat out
exit 1
fi
cp ${configFile} $out
'';
finalConfigFile = if cfg.validateConfig then validatedConfigFile else configFile;
in
{
@ -491,6 +523,17 @@ in
'';
};
validateConfig = mkOption {
# FIXME: re-enable if we can make of the configurations work.
#default = pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform;
default = false;
defaultText = literalExpression "pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform";
type = types.bool;
description = lib.mdDoc ''
Validate the generated nginx config at build time. The check is not very robust and can be disabled in case of false positives. This is notably the case when cross-compiling or when using `include` with files outside of the store.
'';
};
additionalModules = mkOption {
default = [];
type = types.listOf (types.attrsOf types.anything);
@ -1029,7 +1072,7 @@ in
};
environment.etc."nginx/nginx.conf" = mkIf cfg.enableReload {
source = configFile;
source = finalConfigFile;
};
# This service waits for all certificates to be available
@ -1048,7 +1091,7 @@ in
# certs are updated _after_ config has been reloaded.
before = sslTargets;
after = sslServices;
restartTriggers = optionals (cfg.enableReload) [ configFile ];
restartTriggers = optionals (cfg.enableReload) [ finalConfigFile ];
# Block reloading if not all certs exist yet.
# Happens when config changes add new vhosts/certs.
unitConfig.ConditionPathExists = optionals (sslServices != []) (map (certName: certs.${certName}.directory + "/fullchain.pem") dependentCertNames);

View file

@ -105,7 +105,7 @@ in
services.dbus.packages = with pkgs.cinnamon; [
cinnamon-common
cinnamon-screensaver
nemo
nemo-with-extensions
xapp
];
services.cinnamon.apps.enable = mkDefault true;
@ -154,7 +154,7 @@ in
polkit_gnome
# packages
nemo
nemo-with-extensions
cinnamon-control-center
cinnamon-settings-daemon
libgnomekbd

View file

@ -585,6 +585,8 @@ in
hardware.bluetooth.enable = true;
hardware.pulseaudio.enable = true;
networking.networkmanager.enable = true;
# Required for autorotate
hardware.sensor.iio.enable = lib.mkDefault true;
# Recommendations can be found here:
# - https://invent.kde.org/plasma-mobile/plasma-phone-settings/-/tree/master/etc/xdg

View file

@ -1,4 +1,5 @@
#V1: {
system: string
init: string
initrd?: string
initrdSecrets?: string

View file

@ -19,13 +19,15 @@ let
(builtins.toJSON
{
v1 = {
system = config.boot.kernelPackages.stdenv.hostPlatform.system;
kernel = "${config.boot.kernelPackages.kernel}/${config.system.boot.loader.kernelFile}";
kernelParams = config.boot.kernelParams;
initrd = "${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile}";
initrdSecrets = "${config.system.build.initialRamdiskSecretAppender}/bin/append-initrd-secrets";
label = "NixOS ${config.system.nixos.codeName} ${config.system.nixos.label} (Linux ${config.boot.kernelPackages.kernel.modDirVersion})";
inherit (cfg) extensions;
} // lib.optionalAttrs config.boot.initrd.enable {
initrd = "${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile}";
initrdSecrets = "${config.system.build.initialRamdiskSecretAppender}/bin/append-initrd-secrets";
};
});
@ -54,7 +56,7 @@ let
specialisationInjector =
let
specialisationLoader = (lib.mapAttrsToList
(childName: childToplevel: lib.escapeShellArgs [ "--slurpfile" childName "${childToplevel}/bootspec/${filename}" ])
(childName: childToplevel: lib.escapeShellArgs [ "--slurpfile" childName "${childToplevel}/${filename}" ])
children);
in
lib.escapeShellArgs [
@ -66,7 +68,7 @@ let
''
mkdir -p $out/bootspec
${toplevelInjector} | ${specialisationInjector} > $out/bootspec/${filename}
${toplevelInjector} | ${specialisationInjector} > $out/${filename}
'';
validator = pkgs.writeCueValidator ./bootspec.cue {
@ -80,7 +82,7 @@ in
enable = lib.mkEnableOption (lib.mdDoc "Enable generation of RFC-0125 bootspec in $system/bootspec, e.g. /run/current-system/bootspec");
extensions = lib.mkOption {
type = lib.types.attrs;
type = lib.types.attrsOf lib.types.attrs; # <namespace>: { ...namespace-specific fields }
default = { };
description = lib.mdDoc ''
User-defined data that extends the bootspec document.

View file

@ -81,7 +81,7 @@ let
${optionalString (!config.boot.isContainer && config.boot.bootspec.enable) ''
${config.boot.bootspec.writer}
${config.boot.bootspec.validator} "$out/bootspec/${config.boot.bootspec.filename}"
${config.boot.bootspec.validator} "$out/${config.boot.bootspec.filename}"
''}
${config.system.extraSystemBuilderCmds}

View file

@ -205,8 +205,9 @@ let
# Copy ld manually since it isn't detected correctly
cp -pv ${pkgs.stdenv.cc.libc.out}/lib/ld*.so.? $out/lib
# Copy all of the needed libraries
find $out/bin $out/lib -type f | while read BIN; do
# Copy all of the needed libraries in a consistent order so
# duplicates are resolved the same way.
find $out/bin $out/lib -type f | sort | while read BIN; do
echo "Copying libs for executable $BIN"
for LIB in $(${findLibs}/bin/find-libs $BIN); do
TGT="$out/lib/$(basename $LIB)"

View file

@ -503,6 +503,10 @@ in
assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
}
{
assertion = cfgZfs.allowHibernation -> !cfgZfs.forceImportRoot && !cfgZfs.forceImportAll;
message = "boot.zfs.allowHibernation while force importing is enabled will cause data corruption";
}
];
boot = {

View file

@ -109,6 +109,37 @@ in
'';
};
autoPrune = {
enable = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Whether to periodically prune Podman resources. If enabled, a
systemd timer will run `podman system prune -f`
as specified by the `dates` option.
'';
};
flags = mkOption {
type = types.listOf types.str;
default = [];
example = [ "--all" ];
description = lib.mdDoc ''
Any additional flags passed to {command}`podman system prune`.
'';
};
dates = mkOption {
default = "weekly";
type = types.str;
description = lib.mdDoc ''
Specification (in the format described by
{manpage}`systemd.time(7)`) of the time at
which the prune will occur.
'';
};
};
package = lib.mkOption {
type = types.package;
default = podmanPackage;
@ -151,6 +182,23 @@ in
ExecStart = [ "" "${cfg.package}/bin/podman $LOGGING system service" ];
};
systemd.services.podman-prune = {
description = "Prune podman resources";
restartIfChanged = false;
unitConfig.X-StopOnRemoval = false;
serviceConfig.Type = "oneshot";
script = ''
${cfg.package}/bin/podman system prune -f ${toString cfg.autoPrune.flags}
'';
startAt = lib.optional cfg.autoPrune.enable cfg.autoPrune.dates;
after = [ "podman.service" ];
requires = [ "podman.service" ];
};
systemd.sockets.podman.wantedBy = [ "sockets.target" ];
systemd.sockets.podman.socketConfig.SocketGroup = "podman";

View file

@ -144,7 +144,11 @@
in {
name = "acme";
meta.maintainers = lib.teams.acme.members;
meta = {
maintainers = lib.teams.acme.members;
# Hard timeout in seconds. Average run time is about 7 minutes.
timeout = 1800;
};
nodes = {
# The fake ACME server which will respond to client requests
@ -357,6 +361,30 @@ in {
import time
TOTAL_RETRIES = 20
class BackoffTracker(object):
delay = 1
increment = 1
def handle_fail(self, retries, message) -> int:
assert retries < TOTAL_RETRIES, message
print(f"Retrying in {self.delay}s, {retries + 1}/{TOTAL_RETRIES}")
time.sleep(self.delay)
# Only increment after the first try
if retries == 0:
self.delay += self.increment
self.increment *= 2
return retries + 1
backoff = BackoffTracker()
def switch_to(node, name):
# On first switch, this will create a symlink to the current system so that we can
# quickly switch between derivations
@ -404,9 +432,7 @@ in {
assert False
def check_connection(node, domain, retries=3):
assert retries >= 0, f"Failed to connect to https://{domain}"
def check_connection(node, domain, retries=0):
result = node.succeed(
"openssl s_client -brief -verify 2 -CAfile /tmp/ca.crt"
f" -servername {domain} -connect {domain}:443 < /dev/null 2>&1"
@ -414,13 +440,11 @@ in {
for line in result.lower().split("\n"):
if "verification" in line and "error" in line:
time.sleep(3)
return check_connection(node, domain, retries - 1)
retries = backoff.handle_fail(retries, f"Failed to connect to https://{domain}")
return check_connection(node, domain, retries)
def check_connection_key_bits(node, domain, bits, retries=3):
assert retries >= 0, f"Did not find expected number of bits ({bits}) in key"
def check_connection_key_bits(node, domain, bits, retries=0):
result = node.succeed(
"openssl s_client -CAfile /tmp/ca.crt"
f" -servername {domain} -connect {domain}:443 < /dev/null"
@ -429,13 +453,11 @@ in {
print("Key type:", result)
if bits not in result:
time.sleep(3)
return check_connection_key_bits(node, domain, bits, retries - 1)
retries = backoff.handle_fail(retries, f"Did not find expected number of bits ({bits}) in key")
return check_connection_key_bits(node, domain, bits, retries)
def check_stapling(node, domain, retries=3):
assert retries >= 0, "OCSP Stapling check failed"
def check_stapling(node, domain, retries=0):
# Pebble doesn't provide a full OCSP responder, so just check the URL
result = node.succeed(
"openssl s_client -CAfile /tmp/ca.crt"
@ -445,21 +467,19 @@ in {
print("OCSP Responder URL:", result)
if "${caDomain}:4002" not in result.lower():
time.sleep(3)
return check_stapling(node, domain, retries - 1)
retries = backoff.handle_fail(retries, "OCSP Stapling check failed")
return check_stapling(node, domain, retries)
def download_ca_certs(node, retries=5):
assert retries >= 0, "Failed to connect to pebble to download root CA certs"
def download_ca_certs(node, retries=0):
exit_code, _ = node.execute("curl https://${caDomain}:15000/roots/0 > /tmp/ca.crt")
exit_code_2, _ = node.execute(
"curl https://${caDomain}:15000/intermediate-keys/0 >> /tmp/ca.crt"
)
if exit_code + exit_code_2 > 0:
time.sleep(3)
return download_ca_certs(node, retries - 1)
retries = backoff.handle_fail(retries, "Failed to connect to pebble to download root CA certs")
return download_ca_certs(node, retries)
start_all()

View file

@ -1,7 +1,7 @@
{ pkgs, lib, ... }: {
name = "aesmd";
meta = {
maintainers = with lib.maintainers; [ veehaitch ];
maintainers = with lib.maintainers; [ trundle veehaitch ];
};
nodes.machine = { lib, ... }: {
@ -25,38 +25,78 @@
# We don't have a real SGX machine in NixOS tests
systemd.services.aesmd.unitConfig.AssertPathExists = lib.mkForce [ ];
specialisation = {
withQuoteProvider.configuration = { ... }: {
services.aesmd = {
quoteProviderLibrary = pkgs.sgx-azure-dcap-client;
environment = {
AZDCAP_DEBUG_LOG_LEVEL = "INFO";
};
};
};
};
};
testScript = ''
with subtest("aesmd.service starts"):
machine.wait_for_unit("aesmd.service")
status, main_pid = machine.systemctl("show --property MainPID --value aesmd.service")
assert status == 0, "Could not get MainPID of aesmd.service"
main_pid = main_pid.strip()
testScript = { nodes, ... }:
let
specialisations = "${nodes.machine.system.build.toplevel}/specialisation";
in
''
def get_aesmd_pid():
status, main_pid = machine.systemctl("show --property MainPID --value aesmd.service")
assert status == 0, "Could not get MainPID of aesmd.service"
return main_pid.strip()
with subtest("aesmd.service runtime directory permissions"):
runtime_dir = "/run/aesmd";
res = machine.succeed(f"stat -c '%a %U %G' {runtime_dir}").strip()
assert "750 aesmd sgx" == res, f"{runtime_dir} does not have the expected permissions: {res}"
with subtest("aesmd.service starts"):
machine.wait_for_unit("aesmd.service")
with subtest("aesm.socket available on host"):
socket_path = "/var/run/aesmd/aesm.socket"
machine.wait_until_succeeds(f"test -S {socket_path}")
machine.succeed(f"test 777 -eq $(stat -c '%a' {socket_path})")
for op in [ "-r", "-w", "-x" ]:
machine.succeed(f"sudo -u sgxtest test {op} {socket_path}")
machine.fail(f"sudo -u nosgxtest test {op} {socket_path}")
main_pid = get_aesmd_pid()
with subtest("Copies white_list_cert_to_be_verify.bin"):
whitelist_path = "/var/opt/aesmd/data/white_list_cert_to_be_verify.bin"
whitelist_perms = machine.succeed(
f"nsenter -m -t {main_pid} ${pkgs.coreutils}/bin/stat -c '%a' {whitelist_path}"
).strip()
assert "644" == whitelist_perms, f"white_list_cert_to_be_verify.bin has permissions {whitelist_perms}"
with subtest("aesmd.service runtime directory permissions"):
runtime_dir = "/run/aesmd";
res = machine.succeed(f"stat -c '%a %U %G' {runtime_dir}").strip()
assert "750 aesmd sgx" == res, f"{runtime_dir} does not have the expected permissions: {res}"
with subtest("Writes and binds aesm.conf in service namespace"):
aesmd_config = machine.succeed(f"nsenter -m -t {main_pid} ${pkgs.coreutils}/bin/cat /etc/aesmd.conf")
with subtest("aesm.socket available on host"):
socket_path = "/var/run/aesmd/aesm.socket"
machine.wait_until_succeeds(f"test -S {socket_path}")
machine.succeed(f"test 777 -eq $(stat -c '%a' {socket_path})")
for op in [ "-r", "-w", "-x" ]:
machine.succeed(f"sudo -u sgxtest test {op} {socket_path}")
machine.fail(f"sudo -u nosgxtest test {op} {socket_path}")
assert aesmd_config == "whitelist url = http://nixos.org\nproxy type = direct\ndefault quoting type = ecdsa_256\n", "aesmd.conf differs"
'';
with subtest("Copies white_list_cert_to_be_verify.bin"):
whitelist_path = "/var/opt/aesmd/data/white_list_cert_to_be_verify.bin"
whitelist_perms = machine.succeed(
f"nsenter -m -t {main_pid} ${pkgs.coreutils}/bin/stat -c '%a' {whitelist_path}"
).strip()
assert "644" == whitelist_perms, f"white_list_cert_to_be_verify.bin has permissions {whitelist_perms}"
with subtest("Writes and binds aesm.conf in service namespace"):
aesmd_config = machine.succeed(f"nsenter -m -t {main_pid} ${pkgs.coreutils}/bin/cat /etc/aesmd.conf")
assert aesmd_config == "whitelist url = http://nixos.org\nproxy type = direct\ndefault quoting type = ecdsa_256\n", "aesmd.conf differs"
with subtest("aesmd.service without quote provider library has correct LD_LIBRARY_PATH"):
status, environment = machine.systemctl("show --property Environment --value aesmd.service")
assert status == 0, "Could not get Environment of aesmd.service"
env_by_name = dict(entry.split("=", 1) for entry in environment.split())
assert not env_by_name["LD_LIBRARY_PATH"], "LD_LIBRARY_PATH is not empty"
with subtest("aesmd.service with quote provider library starts"):
machine.succeed('${specialisations}/withQuoteProvider/bin/switch-to-configuration test')
machine.wait_for_unit("aesmd.service")
main_pid = get_aesmd_pid()
with subtest("aesmd.service with quote provider library has correct LD_LIBRARY_PATH"):
ld_library_path = machine.succeed(f"xargs -0 -L1 -a /proc/{main_pid}/environ | grep LD_LIBRARY_PATH")
assert ld_library_path.startswith("LD_LIBRARY_PATH=${pkgs.sgx-azure-dcap-client}/lib:"), \
"LD_LIBRARY_PATH is not set to the configured quote provider library"
with subtest("aesmd.service with quote provider library has set AZDCAP_DEBUG_LOG_LEVEL"):
azdcp_debug_log_level = machine.succeed(f"xargs -0 -L1 -a /proc/{main_pid}/environ | grep AZDCAP_DEBUG_LOG_LEVEL")
assert azdcp_debug_log_level == "AZDCAP_DEBUG_LOG_LEVEL=INFO\n", "AZDCAP_DEBUG_LOG_LEVEL is not set to INFO"
'';
}

View file

@ -69,7 +69,7 @@ in {
_3proxy = runTest ./3proxy.nix;
acme = runTest ./acme.nix;
adguardhome = runTest ./adguardhome.nix;
aesmd = runTest ./aesmd.nix;
aesmd = runTestOn ["x86_64-linux"] ./aesmd.nix;
agate = runTest ./web-servers/agate.nix;
agda = handleTest ./agda.nix {};
airsonic = handleTest ./airsonic.nix {};
@ -96,6 +96,7 @@ in {
blockbook-frontend = handleTest ./blockbook-frontend.nix {};
blocky = handleTest ./blocky.nix {};
boot = handleTestOn ["x86_64-linux" "aarch64-linux"] ./boot.nix {};
bootspec = handleTestOn ["x86_64-linux"] ./bootspec.nix {};
boot-stage1 = handleTest ./boot-stage1.nix {};
borgbackup = handleTest ./borgbackup.nix {};
botamusique = handleTest ./botamusique.nix {};
@ -256,6 +257,7 @@ in {
haste-server = handleTest ./haste-server.nix {};
haproxy = handleTest ./haproxy.nix {};
hardened = handleTest ./hardened.nix {};
headscale = handleTest ./headscale.nix {};
healthchecks = handleTest ./web-apps/healthchecks.nix {};
hbase2 = handleTest ./hbase.nix { package=pkgs.hbase2; };
hbase_2_4 = handleTest ./hbase.nix { package=pkgs.hbase_2_4; };

View file

@ -43,7 +43,7 @@ in
machine.start()
machine.wait_for_unit("multi-user.target")
machine.succeed("test -e /run/current-system/bootspec/boot.json")
machine.succeed("test -e /run/current-system/boot.json")
'';
};
@ -65,7 +65,7 @@ in
machine.start()
machine.wait_for_unit("multi-user.target")
machine.succeed("test -e /run/current-system/bootspec/boot.json")
machine.succeed("test -e /run/current-system/boot.json")
'';
};
@ -86,7 +86,33 @@ in
machine.start()
machine.wait_for_unit("multi-user.target")
machine.succeed("test -e /run/current-system/boot.json")
'';
};
# Check that initrd create corresponding entries in bootspec.
initrd = makeTest {
name = "bootspec-with-initrd";
meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
nodes.machine = {
imports = [ standard ];
environment.systemPackages = [ pkgs.jq ];
# It's probably the case, but we want to make it explicit here.
boot.initrd.enable = true;
};
testScript = ''
import json
machine.start()
machine.wait_for_unit("multi-user.target")
machine.succeed("test -e /run/current-system/bootspec/boot.json")
bootspec = json.loads(machine.succeed("jq -r '.v1' /run/current-system/bootspec/boot.json"))
assert all(key in bootspec for key in ('initrd', 'initrdSecrets')), "Bootspec should contain initrd or initrdSecrets field when initrd is enabled"
'';
};
@ -107,11 +133,11 @@ in
machine.start()
machine.wait_for_unit("multi-user.target")
machine.succeed("test -e /run/current-system/bootspec/boot.json")
machine.succeed("test -e /run/current-system/specialisation/something/bootspec/boot.json")
machine.succeed("test -e /run/current-system/boot.json")
machine.succeed("test -e /run/current-system/specialisation/something/boot.json")
sp_in_parent = json.loads(machine.succeed("jq -r '.v1.specialisation.something' /run/current-system/bootspec/boot.json"))
sp_in_fs = json.loads(machine.succeed("cat /run/current-system/specialisation/something/bootspec/boot.json"))
sp_in_parent = json.loads(machine.succeed("jq -r '.v1.specialisation.something' /run/current-system/boot.json"))
sp_in_fs = json.loads(machine.succeed("cat /run/current-system/specialisation/something/boot.json"))
assert sp_in_parent == sp_in_fs['v1'], "Bootspecs of the same specialisation are different!"
'';
@ -135,7 +161,7 @@ in
machine.wait_for_unit("multi-user.target")
current_os_release = machine.succeed("cat /etc/os-release")
bootspec_os_release = machine.succeed("cat $(jq -r '.v1.extensions.osRelease' /run/current-system/bootspec/boot.json)")
bootspec_os_release = machine.succeed("cat $(jq -r '.v1.extensions.osRelease' /run/current-system/boot.json)")
assert current_os_release == bootspec_os_release, "Filename referenced by extension has unexpected contents"
'';

17
nixos/tests/headscale.nix Normal file
View file

@ -0,0 +1,17 @@
import ./make-test-python.nix ({ pkgs, lib, ... }: {
name = "headscale";
meta.maintainers = with lib.maintainers; [ misterio77 ];
nodes.machine = { ... }: {
services.headscale.enable = true;
environment.systemPackages = [ pkgs.headscale ];
};
testScript = ''
machine.wait_for_unit("headscale")
machine.wait_for_open_port(8080)
# Test basic funcionality
machine.succeed("headscale namespaces create test")
machine.succeed("headscale preauthkeys -n test create")
'';
})

View file

@ -64,7 +64,6 @@ let
# wait for reader to be ready
machine.wait_for_file("${readyFile}")
machine.sleep(1)
# send all keys
for key in inputs:
@ -78,9 +77,18 @@ let
with open("${pkgs.writeText "tests.json" (builtins.toJSON tests)}") as json_file:
tests = json.load(json_file)
# These environments used to run in the opposite order, causing the
# following error at openvt startup.
#
# openvt: Couldn't deallocate console 1
#
# This error did not appear in successful runs.
# I don't know the exact cause, but I it seems that openvt and X are
# fighting over the virtual terminal. This does not appear to be a problem
# when the X test runs first.
keymap_environments = {
"VT Keymap": "openvt -sw --",
"Xorg Keymap": "DISPLAY=:0 xterm -title testterm -class testterm -fullscreen -e",
"VT Keymap": "openvt -sw --",
}
machine.wait_for_x()

View file

@ -61,7 +61,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
specialisation.reloadWithErrorsSystem.configuration = {
services.nginx.package = pkgs.nginxMainline;
services.nginx.virtualHosts."!@$$(#*%".locations."~@#*$*!)".proxyPass = ";;;";
services.nginx.virtualHosts."hello".extraConfig = "access_log /does/not/exist.log;";
};
};
};

View file

@ -1,7 +1,7 @@
import ./make-test-python.nix ({pkgs, lib, ...}:
let
# A filesystem image with a (presumably) bootable debian
debianImage = pkgs.vmTools.diskImageFuns.debian9i386 {
debianImage = pkgs.vmTools.diskImageFuns.debian11i386 {
# os-prober cannot detect systems installed on disks without a partition table
# so we create the disk ourselves
createRootFS = with pkgs; ''

View file

@ -1172,6 +1172,25 @@ let
'';
};
statsd = {
exporterConfig = {
enable = true;
};
exporterTest = ''
wait_for_unit("prometheus-statsd-exporter.service")
wait_for_open_port(9102)
succeed("curl http://localhost:9102/metrics | grep 'statsd_exporter_build_info{'")
succeed(
"echo 'test.udp:1|c' > /dev/udp/localhost/9125",
"curl http://localhost:9102/metrics | grep 'test_udp 1'",
)
succeed(
"echo 'test.tcp:1|c' > /dev/tcp/localhost/9125",
"curl http://localhost:9102/metrics | grep 'test_tcp 1'",
)
'';
};
surfboard = {
exporterConfig = {
enable = true;

View file

@ -49,8 +49,8 @@ let
start_all()
machine.wait_for_unit('graphical.target')
machine.wait_until_succeeds('pgrep -x codium')
codium_running.wait()
with codium_running:
# Wait until vscodium is visible. "File" is in the menu bar.
machine.wait_for_text('Get Started')

View file

@ -41,6 +41,9 @@ import ../make-test-python.nix ({pkgs, ...}:
server = { pkgs, ... }: {
environment = {
etc = {
"peertube/secrets-peertube".text = ''
063d9c60d519597acef26003d5ecc32729083965d09181ef3949200cbe5f09ee
'';
"peertube/password-posgressql-db".text = ''
0gUN0C1mgST6czvjZ8T9
'';
@ -67,6 +70,10 @@ import ../make-test-python.nix ({pkgs, ...}:
localDomain = "peertube.local";
enableWebHttps = false;
secrets = {
secretsFile = "/etc/peertube/secrets-peertube";
};
database = {
host = "192.168.2.10";
name = "peertube_local";