0
0
Fork 0
mirror of https://github.com/NixOS/nixpkgs.git synced 2025-07-13 21:50:33 +03:00

Merge remote-tracking branch 'upstream/staging-next' into staging

This commit is contained in:
Silvan Mosberger 2025-04-02 18:21:52 +02:00
commit e52d633a63
1744 changed files with 988732 additions and 514579 deletions

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.gonic;
settingsFormat = pkgs.formats.keyValue {
@ -42,7 +47,9 @@ in
ExecStart =
let
# these values are null by default but should not appear in the final config
filteredSettings = lib.filterAttrs (n: v: !((n == "tls-cert" || n == "tls-key") && v == null)) cfg.settings;
filteredSettings = lib.filterAttrs (
n: v: !((n == "tls-cert" || n == "tls-key") && v == null)
) cfg.settings;
in
"${pkgs.gonic}/bin/gonic -config-path ${settingsFormat.generate "gonic" filteredSettings}";
DynamicUser = true;
@ -56,16 +63,22 @@ in
cfg.settings.playlists-path
cfg.settings.podcast-path
];
BindReadOnlyPaths = [
# gonic can access scrobbling services
"-/etc/resolv.conf"
"${config.security.pki.caBundle}:/etc/ssl/certs/ca-certificates.crt"
builtins.storeDir
] ++ cfg.settings.music-path
++ lib.optional (cfg.settings.tls-cert != null) cfg.settings.tls-cert
++ lib.optional (cfg.settings.tls-key != null) cfg.settings.tls-key;
BindReadOnlyPaths =
[
# gonic can access scrobbling services
"-/etc/resolv.conf"
"${config.security.pki.caBundle}:/etc/ssl/certs/ca-certificates.crt"
builtins.storeDir
]
++ cfg.settings.music-path
++ lib.optional (cfg.settings.tls-cert != null) cfg.settings.tls-cert
++ lib.optional (cfg.settings.tls-key != null) cfg.settings.tls-key;
CapabilityBoundingSet = "";
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
RestrictAddressFamilies = [
"AF_UNIX"
"AF_INET"
"AF_INET6"
];
RestrictNamespaces = true;
PrivateDevices = true;
PrivateUsers = true;
@ -76,7 +89,10 @@ in
ProtectKernelModules = true;
ProtectKernelTunables = true;
SystemCallArchitectures = "native";
SystemCallFilter = [ "@system-service" "~@privileged" ];
SystemCallFilter = [
"@system-service"
"~@privileged"
];
RestrictRealtime = true;
LockPersonality = true;
MemoryDenyWriteExecute = true;

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
name = "mpd";
@ -7,13 +12,17 @@ let
gid = config.ids.gids.mpd;
cfg = config.services.mpd;
credentialsPlaceholder = (creds:
credentialsPlaceholder = (
creds:
let
placeholders = (lib.imap0
(i: c: ''password "{{password-${toString i}}}@${lib.concatStringsSep "," c.permissions}"'')
creds);
placeholders = (
lib.imap0 (
i: c: ''password "{{password-${toString i}}}@${lib.concatStringsSep "," c.permissions}"''
) creds
);
in
lib.concatStringsSep "\n" placeholders);
lib.concatStringsSep "\n" placeholders
);
mpdConf = pkgs.writeText "mpd.conf" ''
# This file was automatically generated by NixOS. Edit mpd's configuration
@ -28,8 +37,10 @@ let
state_file "${cfg.dataDir}/state"
sticker_file "${cfg.dataDir}/sticker.sql"
${lib.optionalString (cfg.network.listenAddress != "any") ''bind_to_address "${cfg.network.listenAddress}"''}
${lib.optionalString (cfg.network.port != 6600) ''port "${toString cfg.network.port}"''}
${lib.optionalString (
cfg.network.listenAddress != "any"
) ''bind_to_address "${cfg.network.listenAddress}"''}
${lib.optionalString (cfg.network.port != 6600) ''port "${toString cfg.network.port}"''}
${lib.optionalString (cfg.fluidsynth) ''
decoder {
plugin "fluidsynth"
@ -37,12 +48,13 @@ let
}
''}
${lib.optionalString (cfg.credentials != []) (credentialsPlaceholder cfg.credentials)}
${lib.optionalString (cfg.credentials != [ ]) (credentialsPlaceholder cfg.credentials)}
${cfg.extraConfig}
'';
in {
in
{
###### interface
@ -160,33 +172,53 @@ in {
};
credentials = lib.mkOption {
type = lib.types.listOf (lib.types.submodule {
options = {
passwordFile = lib.mkOption {
type = lib.types.path;
description = ''
Path to file containing the password.
'';
type = lib.types.listOf (
lib.types.submodule {
options = {
passwordFile = lib.mkOption {
type = lib.types.path;
description = ''
Path to file containing the password.
'';
};
permissions =
let
perms = [
"read"
"add"
"control"
"admin"
];
in
lib.mkOption {
type = lib.types.listOf (lib.types.enum perms);
default = [ "read" ];
description = ''
List of permissions that are granted with this password.
Permissions can be "${lib.concatStringsSep "\", \"" perms}".
'';
};
};
permissions = let
perms = ["read" "add" "control" "admin"];
in lib.mkOption {
type = lib.types.listOf (lib.types.enum perms);
default = [ "read" ];
description = ''
List of permissions that are granted with this password.
Permissions can be "${lib.concatStringsSep "\", \"" perms}".
'';
};
};
});
}
);
description = ''
Credentials and permissions for accessing the mpd server.
'';
default = [];
default = [ ];
example = [
{passwordFile = "/var/lib/secrets/mpd_readonly_password"; permissions = [ "read" ];}
{passwordFile = "/var/lib/secrets/mpd_admin_password"; permissions = ["read" "add" "control" "admin"];}
{
passwordFile = "/var/lib/secrets/mpd_readonly_password";
permissions = [ "read" ];
}
{
passwordFile = "/var/lib/secrets/mpd_admin_password";
permissions = [
"read"
"add"
"control"
"admin"
];
}
];
};
@ -201,7 +233,6 @@ in {
};
###### implementation
config = lib.mkIf cfg.enable {
@ -212,10 +243,15 @@ in {
systemd.sockets.mpd = lib.mkIf cfg.startWhenNeeded {
wantedBy = [ "sockets.target" ];
listenStreams = [
"" # Note: this is needed to override the upstream unit
(if pkgs.lib.hasPrefix "/" cfg.network.listenAddress
then cfg.network.listenAddress
else "${lib.optionalString (cfg.network.listenAddress != "any") "${cfg.network.listenAddress}:"}${toString cfg.network.port}")
"" # Note: this is needed to override the upstream unit
(
if pkgs.lib.hasPrefix "/" cfg.network.listenAddress then
cfg.network.listenAddress
else
"${
lib.optionalString (cfg.network.listenAddress != "any") "${cfg.network.listenAddress}:"
}${toString cfg.network.port}"
)
];
};
@ -226,23 +262,36 @@ in {
''
set -euo pipefail
install -m 600 ${mpdConf} /run/mpd/mpd.conf
'' + lib.optionalString (cfg.credentials != [])
(lib.concatStringsSep "\n"
(lib.imap0
(i: c: ''${pkgs.replace-secret}/bin/replace-secret '{{password-${toString i}}}' '${c.passwordFile}' /run/mpd/mpd.conf'')
cfg.credentials));
''
+ lib.optionalString (cfg.credentials != [ ]) (
lib.concatStringsSep "\n" (
lib.imap0 (
i: c:
''${pkgs.replace-secret}/bin/replace-secret '{{password-${toString i}}}' '${c.passwordFile}' /run/mpd/mpd.conf''
) cfg.credentials
)
);
serviceConfig =
{
User = "${cfg.user}";
# Note: the first "" overrides the ExecStart from the upstream unit
ExecStart = [ "" "${pkgs.mpd}/bin/mpd --systemd /run/mpd/mpd.conf" ];
RuntimeDirectory = "mpd";
StateDirectory = []
++ lib.optionals (cfg.dataDir == "/var/lib/${name}") [ name ]
++ lib.optionals (cfg.playlistDirectory == "/var/lib/${name}/playlists") [ name "${name}/playlists" ]
++ lib.optionals (cfg.musicDirectory == "/var/lib/${name}/music") [ name "${name}/music" ];
};
serviceConfig = {
User = "${cfg.user}";
# Note: the first "" overrides the ExecStart from the upstream unit
ExecStart = [
""
"${pkgs.mpd}/bin/mpd --systemd /run/mpd/mpd.conf"
];
RuntimeDirectory = "mpd";
StateDirectory =
[ ]
++ lib.optionals (cfg.dataDir == "/var/lib/${name}") [ name ]
++ lib.optionals (cfg.playlistDirectory == "/var/lib/${name}/playlists") [
name
"${name}/playlists"
]
++ lib.optionals (cfg.musicDirectory == "/var/lib/${name}/music") [
name
"${name}/music"
];
};
};
users.users = lib.optionalAttrs (cfg.user == name) {

File diff suppressed because it is too large Load diff

View file

@ -1,9 +1,23 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.sanoid;
datasetSettingsType = with lib.types;
(attrsOf (nullOr (oneOf [ str int bool (listOf str) ]))) // {
datasetSettingsType =
with lib.types;
(attrsOf (
nullOr (oneOf [
str
int
bool
(listOf str)
])
))
// {
description = "dataset/template options";
};
@ -48,10 +62,13 @@ let
datasetOptions = rec {
use_template = lib.mkOption {
description = "Names of the templates to use for this dataset.";
type = lib.types.listOf (lib.types.str // {
check = (lib.types.enum (lib.attrNames cfg.templates)).check;
description = "configured template name";
});
type = lib.types.listOf (
lib.types.str
// {
check = (lib.types.enum (lib.attrNames cfg.templates)).check;
description = "configured template name";
}
);
default = [ ];
};
useTemplate = use_template;
@ -63,7 +80,12 @@ let
recursively in an atomic way without the possibility to
override settings for child datasets.
'';
type = with lib.types; oneOf [ bool (enum [ "zfs" ]) ];
type =
with lib.types;
oneOf [
bool
(enum [ "zfs" ])
];
default = false;
};
@ -80,26 +102,32 @@ let
# Function to build "zfs allow" and "zfs unallow" commands for the
# filesystems we've delegated permissions to.
buildAllowCommand = zfsAction: permissions: dataset: lib.escapeShellArgs [
# Here we explicitly use the booted system to guarantee the stable API needed by ZFS
"-+/run/booted-system/sw/bin/zfs"
zfsAction
"sanoid"
(lib.concatStringsSep "," permissions)
dataset
];
buildAllowCommand =
zfsAction: permissions: dataset:
lib.escapeShellArgs [
# Here we explicitly use the booted system to guarantee the stable API needed by ZFS
"-+/run/booted-system/sw/bin/zfs"
zfsAction
"sanoid"
(lib.concatStringsSep "," permissions)
dataset
];
configFile =
let
mkValueString = v:
if lib.isList v then lib.concatStringsSep "," v
else lib.generators.mkValueStringDefault { } v;
mkValueString =
v: if lib.isList v then lib.concatStringsSep "," v else lib.generators.mkValueStringDefault { } v;
mkKeyValue = k: v:
if v == null then ""
else if k == "processChildrenOnly" then ""
else if k == "useTemplate" then ""
else lib.generators.mkKeyValueDefault { inherit mkValueString; } "=" k v;
mkKeyValue =
k: v:
if v == null then
""
else if k == "processChildrenOnly" then
""
else if k == "useTemplate" then
""
else
lib.generators.mkKeyValueDefault { inherit mkValueString; } "=" k v;
in
lib.generators.toINI { inherit mkKeyValue; } cfg.settings;
@ -111,7 +139,7 @@ in
options.services.sanoid = {
enable = lib.mkEnableOption "Sanoid ZFS snapshotting service";
package = lib.mkPackageOption pkgs "sanoid" {};
package = lib.mkPackageOption pkgs "sanoid" { };
interval = lib.mkOption {
type = lib.types.str;
@ -126,21 +154,32 @@ in
};
datasets = lib.mkOption {
type = lib.types.attrsOf (lib.types.submodule ({ config, options, ... }: {
freeformType = datasetSettingsType;
options = commonOptions // datasetOptions;
config.use_template = lib.modules.mkAliasAndWrapDefsWithPriority lib.id (options.useTemplate or { });
config.process_children_only = lib.modules.mkAliasAndWrapDefsWithPriority lib.id (options.processChildrenOnly or { });
}));
type = lib.types.attrsOf (
lib.types.submodule (
{ config, options, ... }:
{
freeformType = datasetSettingsType;
options = commonOptions // datasetOptions;
config.use_template = lib.modules.mkAliasAndWrapDefsWithPriority lib.id (
options.useTemplate or { }
);
config.process_children_only = lib.modules.mkAliasAndWrapDefsWithPriority lib.id (
options.processChildrenOnly or { }
);
}
)
);
default = { };
description = "Datasets to snapshot.";
};
templates = lib.mkOption {
type = lib.types.attrsOf (lib.types.submodule {
freeformType = datasetSettingsType;
options = commonOptions;
});
type = lib.types.attrsOf (
lib.types.submodule {
freeformType = datasetSettingsType;
options = commonOptions;
}
);
default = { };
description = "Templates for datasets.";
};
@ -157,7 +196,11 @@ in
extraArgs = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
example = [ "--verbose" "--readonly" "--debug" ];
example = [
"--verbose"
"--readonly"
"--debug"
];
description = ''
Extra arguments to pass to sanoid. See
<https://github.com/jimsalterjrs/sanoid/#sanoid-command-line-options>
@ -177,14 +220,29 @@ in
systemd.services.sanoid = {
description = "Sanoid snapshot service";
serviceConfig = {
ExecStartPre = (map (buildAllowCommand "allow" [ "snapshot" "mount" "destroy" ]) datasets);
ExecStopPost = (map (buildAllowCommand "unallow" [ "snapshot" "mount" "destroy" ]) datasets);
ExecStart = lib.escapeShellArgs ([
"${cfg.package}/bin/sanoid"
"--cron"
"--configdir"
(pkgs.writeTextDir "sanoid.conf" configFile)
] ++ cfg.extraArgs);
ExecStartPre = (
map (buildAllowCommand "allow" [
"snapshot"
"mount"
"destroy"
]) datasets
);
ExecStopPost = (
map (buildAllowCommand "unallow" [
"snapshot"
"mount"
"destroy"
]) datasets
);
ExecStart = lib.escapeShellArgs (
[
"${cfg.package}/bin/sanoid"
"--cron"
"--configdir"
(pkgs.writeTextDir "sanoid.conf" configFile)
]
++ cfg.extraArgs
);
User = "sanoid";
Group = "sanoid";
DynamicUser = true;

View file

@ -1,54 +1,69 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.syncoid;
# Extract local dasaset names (so no datasets containing "@")
localDatasetName = d: lib.optionals (d != null) (
let m = builtins.match "([^/@]+[^@]*)" d; in
lib.optionals (m != null) m
);
localDatasetName =
d:
lib.optionals (d != null) (
let
m = builtins.match "([^/@]+[^@]*)" d;
in
lib.optionals (m != null) m
);
# Escape as required by: https://www.freedesktop.org/software/systemd/man/systemd.unit.html
escapeUnitName = name:
lib.concatMapStrings (s: if lib.isList s then "-" else s)
(builtins.split "[^a-zA-Z0-9_.\\-]+" name);
escapeUnitName =
name:
lib.concatMapStrings (s: if lib.isList s then "-" else s) (
builtins.split "[^a-zA-Z0-9_.\\-]+" name
);
# Function to build "zfs allow" commands for the filesystems we've delegated
# permissions to. It also checks if the target dataset exists before
# delegating permissions, if it doesn't exist we delegate it to the parent
# dataset (if it exists). This should solve the case of provisoning new
# datasets.
buildAllowCommand = permissions: dataset: (
"-+${pkgs.writeShellScript "zfs-allow-${dataset}" ''
# Here we explicitly use the booted system to guarantee the stable API needed by ZFS
buildAllowCommand =
permissions: dataset:
(
"-+${pkgs.writeShellScript "zfs-allow-${dataset}" ''
# Here we explicitly use the booted system to guarantee the stable API needed by ZFS
# Run a ZFS list on the dataset to check if it exists
if ${lib.escapeShellArgs [
"/run/booted-system/sw/bin/zfs"
"list"
dataset
]} 2> /dev/null; then
${lib.escapeShellArgs [
"/run/booted-system/sw/bin/zfs"
"allow"
cfg.user
(lib.concatStringsSep "," permissions)
dataset
]}
${lib.optionalString ((builtins.dirOf dataset) != ".") ''
else
# Run a ZFS list on the dataset to check if it exists
if ${
lib.escapeShellArgs [
"/run/booted-system/sw/bin/zfs"
"list"
dataset
]
} 2> /dev/null; then
${lib.escapeShellArgs [
"/run/booted-system/sw/bin/zfs"
"allow"
cfg.user
(lib.concatStringsSep "," permissions)
# Remove the last part of the path
(builtins.dirOf dataset)
dataset
]}
''}
fi
''}"
);
${lib.optionalString ((builtins.dirOf dataset) != ".") ''
else
${lib.escapeShellArgs [
"/run/booted-system/sw/bin/zfs"
"allow"
cfg.user
(lib.concatStringsSep "," permissions)
# Remove the last part of the path
(builtins.dirOf dataset)
]}
''}
fi
''}"
);
# Function to build "zfs unallow" commands for the filesystems we've
# delegated permissions to. Here we unallow both the target but also
@ -56,26 +71,30 @@ let
# knowing if the allow command did execute on the parent dataset or
# not in the pre-hook. We can't run the same if in the post hook
# since the dataset should have been created at this point.
buildUnallowCommand = permissions: dataset: (
"-+${pkgs.writeShellScript "zfs-unallow-${dataset}" ''
# Here we explicitly use the booted system to guarantee the stable API needed by ZFS
${lib.escapeShellArgs [
"/run/booted-system/sw/bin/zfs"
"unallow"
cfg.user
(lib.concatStringsSep "," permissions)
dataset
]}
${lib.optionalString ((builtins.dirOf dataset) != ".") (lib.escapeShellArgs [
"/run/booted-system/sw/bin/zfs"
"unallow"
cfg.user
(lib.concatStringsSep "," permissions)
# Remove the last part of the path
(builtins.dirOf dataset)
])}
''}"
);
buildUnallowCommand =
permissions: dataset:
(
"-+${pkgs.writeShellScript "zfs-unallow-${dataset}" ''
# Here we explicitly use the booted system to guarantee the stable API needed by ZFS
${lib.escapeShellArgs [
"/run/booted-system/sw/bin/zfs"
"unallow"
cfg.user
(lib.concatStringsSep "," permissions)
dataset
]}
${lib.optionalString ((builtins.dirOf dataset) != ".") (
lib.escapeShellArgs [
"/run/booted-system/sw/bin/zfs"
"unallow"
cfg.user
(lib.concatStringsSep "," permissions)
# Remove the last part of the path
(builtins.dirOf dataset)
]
)}
''}"
);
in
{
@ -84,7 +103,7 @@ in
options.services.syncoid = {
enable = lib.mkEnableOption "Syncoid ZFS synchronization service";
package = lib.mkPackageOption pkgs "sanoid" {};
package = lib.mkPackageOption pkgs "sanoid" { };
interval = lib.mkOption {
type = lib.types.str;
@ -131,7 +150,14 @@ in
localSourceAllow = lib.mkOption {
type = lib.types.listOf lib.types.str;
# Permissions snapshot and destroy are in case --no-sync-snap is not used
default = [ "bookmark" "hold" "send" "snapshot" "destroy" "mount" ];
default = [
"bookmark"
"hold"
"send"
"snapshot"
"destroy"
"mount"
];
description = ''
Permissions granted for the {option}`services.syncoid.user` user
for local source datasets. See
@ -142,8 +168,21 @@ in
localTargetAllow = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ "change-key" "compression" "create" "mount" "mountpoint" "receive" "rollback" ];
example = [ "create" "mount" "receive" "rollback" ];
default = [
"change-key"
"compression"
"create"
"mount"
"mountpoint"
"receive"
"rollback"
];
example = [
"create"
"mount"
"receive"
"rollback"
];
description = ''
Permissions granted for the {option}`services.syncoid.user` user
for local target datasets. See
@ -176,111 +215,116 @@ in
};
commands = lib.mkOption {
type = lib.types.attrsOf (lib.types.submodule ({ name, ... }: {
options = {
source = lib.mkOption {
type = lib.types.str;
example = "pool/dataset";
description = ''
Source ZFS dataset. Can be either local or remote. Defaults to
the attribute name.
'';
};
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
source = lib.mkOption {
type = lib.types.str;
example = "pool/dataset";
description = ''
Source ZFS dataset. Can be either local or remote. Defaults to
the attribute name.
'';
};
target = lib.mkOption {
type = lib.types.str;
example = "user@server:pool/dataset";
description = ''
Target ZFS dataset. Can be either local
(«pool/dataset») or remote
(«user@server:pool/dataset»).
'';
};
target = lib.mkOption {
type = lib.types.str;
example = "user@server:pool/dataset";
description = ''
Target ZFS dataset. Can be either local
(«pool/dataset») or remote
(«user@server:pool/dataset»).
'';
};
recursive = lib.mkEnableOption ''the transfer of child datasets'';
recursive = lib.mkEnableOption ''the transfer of child datasets'';
sshKey = lib.mkOption {
type = with lib.types; nullOr (coercedTo path toString str);
description = ''
SSH private key file to use to login to the remote system.
Defaults to {option}`services.syncoid.sshKey` option.
'';
};
sshKey = lib.mkOption {
type = with lib.types; nullOr (coercedTo path toString str);
description = ''
SSH private key file to use to login to the remote system.
Defaults to {option}`services.syncoid.sshKey` option.
'';
};
localSourceAllow = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
Permissions granted for the {option}`services.syncoid.user` user
for local source datasets. See
<https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html>
for available permissions.
Defaults to {option}`services.syncoid.localSourceAllow` option.
'';
};
localSourceAllow = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
Permissions granted for the {option}`services.syncoid.user` user
for local source datasets. See
<https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html>
for available permissions.
Defaults to {option}`services.syncoid.localSourceAllow` option.
'';
};
localTargetAllow = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
Permissions granted for the {option}`services.syncoid.user` user
for local target datasets. See
<https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html>
for available permissions.
Make sure to include the `change-key` permission if you send raw encrypted datasets,
the `compression` permission if you send raw compressed datasets, and so on.
For remote target datasets you'll have to set your remote user permissions by yourself.
'';
};
localTargetAllow = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
Permissions granted for the {option}`services.syncoid.user` user
for local target datasets. See
<https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html>
for available permissions.
Make sure to include the `change-key` permission if you send raw encrypted datasets,
the `compression` permission if you send raw compressed datasets, and so on.
For remote target datasets you'll have to set your remote user permissions by yourself.
'';
};
sendOptions = lib.mkOption {
type = lib.types.separatedString " ";
default = "";
example = "Lc e";
description = ''
Advanced options to pass to zfs send. Options are specified
without their leading dashes and separated by spaces.
'';
};
sendOptions = lib.mkOption {
type = lib.types.separatedString " ";
default = "";
example = "Lc e";
description = ''
Advanced options to pass to zfs send. Options are specified
without their leading dashes and separated by spaces.
'';
};
recvOptions = lib.mkOption {
type = lib.types.separatedString " ";
default = "";
example = "ux recordsize o compression=lz4";
description = ''
Advanced options to pass to zfs recv. Options are specified
without their leading dashes and separated by spaces.
'';
};
recvOptions = lib.mkOption {
type = lib.types.separatedString " ";
default = "";
example = "ux recordsize o compression=lz4";
description = ''
Advanced options to pass to zfs recv. Options are specified
without their leading dashes and separated by spaces.
'';
};
useCommonArgs = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Whether to add the configured common arguments to this command.
'';
};
useCommonArgs = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Whether to add the configured common arguments to this command.
'';
};
service = lib.mkOption {
type = lib.types.attrs;
default = { };
description = ''
Systemd configuration specific to this syncoid service.
'';
};
service = lib.mkOption {
type = lib.types.attrs;
default = { };
description = ''
Systemd configuration specific to this syncoid service.
'';
};
extraArgs = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
example = [ "--sshport 2222" ];
description = "Extra syncoid arguments for this command.";
};
};
config = {
source = lib.mkDefault name;
sshKey = lib.mkDefault cfg.sshKey;
localSourceAllow = lib.mkDefault cfg.localSourceAllow;
localTargetAllow = lib.mkDefault cfg.localTargetAllow;
};
}));
extraArgs = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
example = [ "--sshport 2222" ];
description = "Extra syncoid arguments for this command.";
};
};
config = {
source = lib.mkDefault name;
sshKey = lib.mkDefault cfg.sshKey;
localSourceAllow = lib.mkDefault cfg.localSourceAllow;
localTargetAllow = lib.mkDefault cfg.localTargetAllow;
};
}
)
);
default = { };
example = lib.literalExpression ''
{
@ -310,9 +354,10 @@ in
};
};
systemd.services = lib.mapAttrs'
(name: c:
lib.nameValuePair "syncoid-${escapeUnitName name}" (lib.mkMerge [
systemd.services = lib.mapAttrs' (
name: c:
lib.nameValuePair "syncoid-${escapeUnitName name}" (
lib.mkMerge [
{
description = "Syncoid ZFS synchronization from ${c.source} to ${c.target}";
after = [ "zfs.target" ];
@ -321,25 +366,30 @@ in
path = [ "/run/booted-system/sw/bin/" ];
serviceConfig = {
ExecStartPre =
(map (buildAllowCommand c.localSourceAllow) (localDatasetName c.source)) ++
(map (buildAllowCommand c.localTargetAllow) (localDatasetName c.target));
(map (buildAllowCommand c.localSourceAllow) (localDatasetName c.source))
++ (map (buildAllowCommand c.localTargetAllow) (localDatasetName c.target));
ExecStopPost =
(map (buildUnallowCommand c.localSourceAllow) (localDatasetName c.source)) ++
(map (buildUnallowCommand c.localTargetAllow) (localDatasetName c.target));
ExecStart = lib.escapeShellArgs ([ "${cfg.package}/bin/syncoid" ]
(map (buildUnallowCommand c.localSourceAllow) (localDatasetName c.source))
++ (map (buildUnallowCommand c.localTargetAllow) (localDatasetName c.target));
ExecStart = lib.escapeShellArgs (
[ "${cfg.package}/bin/syncoid" ]
++ lib.optionals c.useCommonArgs cfg.commonArgs
++ lib.optional c.recursive "-r"
++ lib.optionals (c.sshKey != null) [ "--sshkey" c.sshKey ]
++ lib.optionals (c.sshKey != null) [
"--sshkey"
c.sshKey
]
++ c.extraArgs
++ [
"--sendoptions"
c.sendOptions
"--recvoptions"
c.recvOptions
"--no-privilege-elevation"
c.source
c.target
]);
"--sendoptions"
c.sendOptions
"--recvoptions"
c.recvOptions
"--no-privilege-elevation"
c.source
c.target
]
);
User = cfg.user;
Group = cfg.group;
StateDirectory = [ "syncoid" ];
@ -372,14 +422,23 @@ in
ProtectKernelTunables = true;
ProtectSystem = "strict";
RemoveIPC = true;
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
RestrictAddressFamilies = [
"AF_UNIX"
"AF_INET"
"AF_INET6"
];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
RootDirectory = "/run/syncoid/${escapeUnitName name}";
RootDirectoryStartOnly = true;
BindPaths = [ "/dev/zfs" ];
BindReadOnlyPaths = [ builtins.storeDir "/etc" "/run" "/bin/sh" ];
BindReadOnlyPaths = [
builtins.storeDir
"/etc"
"/run"
"/bin/sh"
];
# Avoid useless mounting of RootDirectory= in the own RootDirectory= of ExecStart='s mount namespace.
InaccessiblePaths = [ "-+/run/syncoid/${escapeUnitName name}" ];
MountAPIVFS = true;
@ -409,9 +468,13 @@ in
}
cfg.service
c.service
]))
cfg.commands;
]
)
) cfg.commands;
};
meta.maintainers = with lib.maintainers; [ julm lopsided98 ];
meta.maintainers = with lib.maintainers; [
julm
lopsided98
];
}

View file

@ -1,4 +1,10 @@
{ config, options, pkgs, lib, ... }:
{
config,
options,
pkgs,
lib,
...
}:
let
version = "1.10.1";
cfg = config.services.kubernetes.addons.dns;
@ -7,7 +13,8 @@ let
health = 10054;
metrics = 10055;
};
in {
in
{
options.services.kubernetes.addons.dns = {
enable = lib.mkEnableOption "kubernetes dns addon";
@ -15,11 +22,11 @@ in {
description = "Dns addon clusterIP";
# this default is also what kubernetes users
default = (
lib.concatStringsSep "." (
lib.take 3 (lib.splitString "." config.services.kubernetes.apiserver.serviceClusterIpRange
default =
(lib.concatStringsSep "." (
lib.take 3 (lib.splitString "." config.services.kubernetes.apiserver.serviceClusterIpRange)
))
) + ".254";
+ ".254";
defaultText = lib.literalMD ''
The `x.y.z.254` IP of
`config.${options.services.kubernetes.apiserver.serviceClusterIpRange}`.
@ -48,7 +55,10 @@ in {
See: <https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/addon-manager/README.md>.
'';
default = "Reconcile";
type = lib.types.enum [ "Reconcile" "EnsureExists" ];
type = lib.types.enum [
"Reconcile"
"EnsureExists"
];
};
coredns = lib.mkOption {
@ -106,8 +116,9 @@ in {
};
config = lib.mkIf cfg.enable {
services.kubernetes.kubelet.seedDockerImages =
lib.singleton (pkgs.dockerTools.pullImage cfg.coredns);
services.kubernetes.kubelet.seedDockerImages = lib.singleton (
pkgs.dockerTools.pullImage cfg.coredns
);
services.kubernetes.addonManager.bootstrapAddons = {
coredns-cr = {
@ -125,8 +136,16 @@ in {
rules = [
{
apiGroups = [ "" ];
resources = [ "endpoints" "services" "pods" "namespaces" ];
verbs = [ "list" "watch" ];
resources = [
"endpoints"
"services"
"pods"
"namespaces"
];
verbs = [
"list"
"watch"
];
}
{
apiGroups = [ "" ];
@ -136,7 +155,10 @@ in {
{
apiGroups = [ "discovery.k8s.io" ];
resources = [ "endpointslices" ];
verbs = [ "list" "watch" ];
verbs = [
"list"
"watch"
];
}
];
};
@ -219,10 +241,14 @@ in {
spec = {
replicas = cfg.replicas;
selector = {
matchLabels = { k8s-app = "kube-dns"; };
matchLabels = {
k8s-app = "kube-dns";
};
};
strategy = {
rollingUpdate = { maxUnavailable = 1; };
rollingUpdate = {
maxUnavailable = 1;
};
type = "RollingUpdate";
};
template = {
@ -234,7 +260,10 @@ in {
spec = {
containers = [
{
args = [ "-conf" "/etc/coredns/Corefile" ];
args = [
"-conf"
"/etc/coredns/Corefile"
];
image = with cfg.coredns; "${imageName}:${finalImageTag}";
imagePullPolicy = "Never";
livenessProbe = {
@ -358,7 +387,9 @@ in {
protocol = "TCP";
}
];
selector = { k8s-app = "kube-dns"; };
selector = {
k8s-app = "kube-dns";
};
};
};
};

View file

@ -1,4 +1,10 @@
{ config, lib, options, pkgs, ... }:
{
config,
lib,
options,
pkgs,
...
}:
let
top = config.services.kubernetes;
otop = options.services.kubernetes;
@ -6,22 +12,40 @@ let
isRBACEnabled = lib.elem "RBAC" cfg.authorizationMode;
apiserverServiceIP = (lib.concatStringsSep "." (
lib.take 3 (lib.splitString "." cfg.serviceClusterIpRange
)) + ".1");
apiserverServiceIP = (
lib.concatStringsSep "." (lib.take 3 (lib.splitString "." cfg.serviceClusterIpRange)) + ".1"
);
in
{
imports = [
(lib.mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "admissionControl" ] [ "services" "kubernetes" "apiserver" "enableAdmissionPlugins" ])
(lib.mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "address" ] ["services" "kubernetes" "apiserver" "bindAddress"])
(lib.mkRenamedOptionModule
[ "services" "kubernetes" "apiserver" "admissionControl" ]
[ "services" "kubernetes" "apiserver" "enableAdmissionPlugins" ]
)
(lib.mkRenamedOptionModule
[ "services" "kubernetes" "apiserver" "address" ]
[ "services" "kubernetes" "apiserver" "bindAddress" ]
)
(lib.mkRemovedOptionModule [ "services" "kubernetes" "apiserver" "insecureBindAddress" ] "")
(lib.mkRemovedOptionModule [ "services" "kubernetes" "apiserver" "insecurePort" ] "")
(lib.mkRemovedOptionModule [ "services" "kubernetes" "apiserver" "publicAddress" ] "")
(lib.mkRenamedOptionModule [ "services" "kubernetes" "etcd" "servers" ] [ "services" "kubernetes" "apiserver" "etcd" "servers" ])
(lib.mkRenamedOptionModule [ "services" "kubernetes" "etcd" "keyFile" ] [ "services" "kubernetes" "apiserver" "etcd" "keyFile" ])
(lib.mkRenamedOptionModule [ "services" "kubernetes" "etcd" "certFile" ] [ "services" "kubernetes" "apiserver" "etcd" "certFile" ])
(lib.mkRenamedOptionModule [ "services" "kubernetes" "etcd" "caFile" ] [ "services" "kubernetes" "apiserver" "etcd" "caFile" ])
(lib.mkRenamedOptionModule
[ "services" "kubernetes" "etcd" "servers" ]
[ "services" "kubernetes" "apiserver" "etcd" "servers" ]
)
(lib.mkRenamedOptionModule
[ "services" "kubernetes" "etcd" "keyFile" ]
[ "services" "kubernetes" "apiserver" "etcd" "keyFile" ]
)
(lib.mkRenamedOptionModule
[ "services" "kubernetes" "etcd" "certFile" ]
[ "services" "kubernetes" "apiserver" "etcd" "certFile" ]
)
(lib.mkRenamedOptionModule
[ "services" "kubernetes" "etcd" "caFile" ]
[ "services" "kubernetes" "apiserver" "etcd" "caFile" ]
)
];
###### interface
@ -48,8 +72,18 @@ in
Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/Webhook/RBAC/Node). See
<https://kubernetes.io/docs/reference/access-authn-authz/authorization/>
'';
default = ["RBAC" "Node"]; # Enabling RBAC by default, although kubernetes default is AllowAllow
type = listOf (enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "Webhook" "RBAC" "Node"]);
default = [
"RBAC"
"Node"
]; # Enabling RBAC by default, although kubernetes default is AllowAllow
type = listOf (enum [
"AlwaysAllow"
"AlwaysDeny"
"ABAC"
"Webhook"
"RBAC"
"Node"
]);
};
authorizationPolicy = lib.mkOption {
@ -57,7 +91,7 @@ in
Kubernetes apiserver authorization policy file. See
<https://kubernetes.io/docs/reference/access-authn-authz/authorization/>
'';
default = [];
default = [ ];
type = listOf attrs;
};
@ -92,7 +126,7 @@ in
Kubernetes admission control plugins to disable. See
<https://kubernetes.io/docs/admin/admission-controllers/>
'';
default = [];
default = [ ];
type = listOf str;
};
@ -104,14 +138,24 @@ in
<https://kubernetes.io/docs/admin/admission-controllers/>
'';
default = [
"NamespaceLifecycle" "LimitRanger" "ServiceAccount"
"ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds"
"NamespaceLifecycle"
"LimitRanger"
"ServiceAccount"
"ResourceQuota"
"DefaultStorageClass"
"DefaultTolerationSeconds"
"NodeRestriction"
];
example = [
"NamespaceLifecycle" "NamespaceExists" "LimitRanger"
"SecurityContextDeny" "ServiceAccount" "ResourceQuota"
"PodSecurityPolicy" "NodeRestriction" "DefaultStorageClass"
"NamespaceLifecycle"
"NamespaceExists"
"LimitRanger"
"SecurityContextDeny"
"ServiceAccount"
"ResourceQuota"
"PodSecurityPolicy"
"NodeRestriction"
"DefaultStorageClass"
];
type = listOf str;
};
@ -119,7 +163,7 @@ in
etcd = {
servers = lib.mkOption {
description = "List of etcd servers.";
default = ["http://127.0.0.1:2379"];
default = [ "http://127.0.0.1:2379" ];
type = types.listOf types.str;
};
@ -151,7 +195,7 @@ in
extraSANs = lib.mkOption {
description = "Extra x509 Subject Alternative Names to be added to the kubernetes apiserver tls cert.";
default = [];
default = [ ];
type = listOf str;
};
@ -214,7 +258,10 @@ in
Kubernetes apiserver storage backend.
'';
default = "etcd3";
type = enum ["etcd2" "etcd3"];
type = enum [
"etcd2"
"etcd3"
];
};
securePort = lib.mkOption {
@ -309,135 +356,143 @@ in
};
###### implementation
config = lib.mkMerge [
(lib.mkIf cfg.enable {
systemd.services.kube-apiserver = {
description = "Kubernetes APIServer Service";
wantedBy = [ "kubernetes.target" ];
after = [ "network.target" ];
serviceConfig = {
Slice = "kubernetes.slice";
ExecStart = ''
${top.package}/bin/kube-apiserver \
--allow-privileged=${lib.boolToString cfg.allowPrivileged} \
--authorization-mode=${lib.concatStringsSep "," cfg.authorizationMode} \
${lib.optionalString (lib.elem "ABAC" cfg.authorizationMode)
"--authorization-policy-file=${
pkgs.writeText "kube-auth-policy.jsonl"
(lib.concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.authorizationPolicy)
}"
} \
${lib.optionalString (lib.elem "Webhook" cfg.authorizationMode)
"--authorization-webhook-config-file=${cfg.webhookConfig}"
} \
--bind-address=${cfg.bindAddress} \
${lib.optionalString (cfg.advertiseAddress != null)
"--advertise-address=${cfg.advertiseAddress}"} \
${lib.optionalString (cfg.clientCaFile != null)
"--client-ca-file=${cfg.clientCaFile}"} \
--disable-admission-plugins=${lib.concatStringsSep "," cfg.disableAdmissionPlugins} \
--enable-admission-plugins=${lib.concatStringsSep "," cfg.enableAdmissionPlugins} \
--etcd-servers=${lib.concatStringsSep "," cfg.etcd.servers} \
${lib.optionalString (cfg.etcd.caFile != null)
"--etcd-cafile=${cfg.etcd.caFile}"} \
${lib.optionalString (cfg.etcd.certFile != null)
"--etcd-certfile=${cfg.etcd.certFile}"} \
${lib.optionalString (cfg.etcd.keyFile != null)
"--etcd-keyfile=${cfg.etcd.keyFile}"} \
${lib.optionalString (cfg.featureGates != {})
"--feature-gates=${(lib.concatStringsSep "," (builtins.attrValues (lib.mapAttrs (n: v: "${n}=${lib.trivial.boolToString v}") cfg.featureGates)))}"} \
${lib.optionalString (cfg.basicAuthFile != null)
"--basic-auth-file=${cfg.basicAuthFile}"} \
${lib.optionalString (cfg.kubeletClientCaFile != null)
"--kubelet-certificate-authority=${cfg.kubeletClientCaFile}"} \
${lib.optionalString (cfg.kubeletClientCertFile != null)
"--kubelet-client-certificate=${cfg.kubeletClientCertFile}"} \
${lib.optionalString (cfg.kubeletClientKeyFile != null)
"--kubelet-client-key=${cfg.kubeletClientKeyFile}"} \
${lib.optionalString (cfg.preferredAddressTypes != null)
"--kubelet-preferred-address-types=${cfg.preferredAddressTypes}"} \
${lib.optionalString (cfg.proxyClientCertFile != null)
"--proxy-client-cert-file=${cfg.proxyClientCertFile}"} \
${lib.optionalString (cfg.proxyClientKeyFile != null)
"--proxy-client-key-file=${cfg.proxyClientKeyFile}"} \
${lib.optionalString (cfg.runtimeConfig != "")
"--runtime-config=${cfg.runtimeConfig}"} \
--secure-port=${toString cfg.securePort} \
--api-audiences=${toString cfg.apiAudiences} \
--service-account-issuer=${toString cfg.serviceAccountIssuer} \
--service-account-signing-key-file=${cfg.serviceAccountSigningKeyFile} \
--service-account-key-file=${cfg.serviceAccountKeyFile} \
--service-cluster-ip-range=${cfg.serviceClusterIpRange} \
--storage-backend=${cfg.storageBackend} \
${lib.optionalString (cfg.tlsCertFile != null)
"--tls-cert-file=${cfg.tlsCertFile}"} \
${lib.optionalString (cfg.tlsKeyFile != null)
"--tls-private-key-file=${cfg.tlsKeyFile}"} \
${lib.optionalString (cfg.tokenAuthFile != null)
"--token-auth-file=${cfg.tokenAuthFile}"} \
${lib.optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
${cfg.extraOpts}
'';
WorkingDirectory = top.dataDir;
User = "kubernetes";
Group = "kubernetes";
AmbientCapabilities = "cap_net_bind_service";
Restart = "on-failure";
RestartSec = 5;
};
unitConfig = {
StartLimitIntervalSec = 0;
};
systemd.services.kube-apiserver = {
description = "Kubernetes APIServer Service";
wantedBy = [ "kubernetes.target" ];
after = [ "network.target" ];
serviceConfig = {
Slice = "kubernetes.slice";
ExecStart = ''
${top.package}/bin/kube-apiserver \
--allow-privileged=${lib.boolToString cfg.allowPrivileged} \
--authorization-mode=${lib.concatStringsSep "," cfg.authorizationMode} \
${lib.optionalString (lib.elem "ABAC" cfg.authorizationMode) "--authorization-policy-file=${pkgs.writeText "kube-auth-policy.jsonl" (lib.concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.authorizationPolicy)}"} \
${lib.optionalString (lib.elem "Webhook" cfg.authorizationMode) "--authorization-webhook-config-file=${cfg.webhookConfig}"} \
--bind-address=${cfg.bindAddress} \
${lib.optionalString (cfg.advertiseAddress != null) "--advertise-address=${cfg.advertiseAddress}"} \
${lib.optionalString (cfg.clientCaFile != null) "--client-ca-file=${cfg.clientCaFile}"} \
--disable-admission-plugins=${lib.concatStringsSep "," cfg.disableAdmissionPlugins} \
--enable-admission-plugins=${lib.concatStringsSep "," cfg.enableAdmissionPlugins} \
--etcd-servers=${lib.concatStringsSep "," cfg.etcd.servers} \
${lib.optionalString (cfg.etcd.caFile != null) "--etcd-cafile=${cfg.etcd.caFile}"} \
${lib.optionalString (cfg.etcd.certFile != null) "--etcd-certfile=${cfg.etcd.certFile}"} \
${lib.optionalString (cfg.etcd.keyFile != null) "--etcd-keyfile=${cfg.etcd.keyFile}"} \
${
lib.optionalString (cfg.featureGates != { })
"--feature-gates=${
(lib.concatStringsSep "," (
builtins.attrValues (lib.mapAttrs (n: v: "${n}=${lib.trivial.boolToString v}") cfg.featureGates)
))
}"
} \
${lib.optionalString (cfg.basicAuthFile != null) "--basic-auth-file=${cfg.basicAuthFile}"} \
${
lib.optionalString (
cfg.kubeletClientCaFile != null
) "--kubelet-certificate-authority=${cfg.kubeletClientCaFile}"
} \
${
lib.optionalString (
cfg.kubeletClientCertFile != null
) "--kubelet-client-certificate=${cfg.kubeletClientCertFile}"
} \
${
lib.optionalString (
cfg.kubeletClientKeyFile != null
) "--kubelet-client-key=${cfg.kubeletClientKeyFile}"
} \
${
lib.optionalString (
cfg.preferredAddressTypes != null
) "--kubelet-preferred-address-types=${cfg.preferredAddressTypes}"
} \
${
lib.optionalString (
cfg.proxyClientCertFile != null
) "--proxy-client-cert-file=${cfg.proxyClientCertFile}"
} \
${
lib.optionalString (
cfg.proxyClientKeyFile != null
) "--proxy-client-key-file=${cfg.proxyClientKeyFile}"
} \
${lib.optionalString (cfg.runtimeConfig != "") "--runtime-config=${cfg.runtimeConfig}"} \
--secure-port=${toString cfg.securePort} \
--api-audiences=${toString cfg.apiAudiences} \
--service-account-issuer=${toString cfg.serviceAccountIssuer} \
--service-account-signing-key-file=${cfg.serviceAccountSigningKeyFile} \
--service-account-key-file=${cfg.serviceAccountKeyFile} \
--service-cluster-ip-range=${cfg.serviceClusterIpRange} \
--storage-backend=${cfg.storageBackend} \
${lib.optionalString (cfg.tlsCertFile != null) "--tls-cert-file=${cfg.tlsCertFile}"} \
${lib.optionalString (cfg.tlsKeyFile != null) "--tls-private-key-file=${cfg.tlsKeyFile}"} \
${lib.optionalString (cfg.tokenAuthFile != null) "--token-auth-file=${cfg.tokenAuthFile}"} \
${lib.optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
${cfg.extraOpts}
'';
WorkingDirectory = top.dataDir;
User = "kubernetes";
Group = "kubernetes";
AmbientCapabilities = "cap_net_bind_service";
Restart = "on-failure";
RestartSec = 5;
};
services.etcd = {
clientCertAuth = lib.mkDefault true;
peerClientCertAuth = lib.mkDefault true;
listenClientUrls = lib.mkDefault ["https://0.0.0.0:2379"];
listenPeerUrls = lib.mkDefault ["https://0.0.0.0:2380"];
advertiseClientUrls = lib.mkDefault ["https://${top.masterAddress}:2379"];
initialCluster = lib.mkDefault ["${top.masterAddress}=https://${top.masterAddress}:2380"];
name = lib.mkDefault top.masterAddress;
initialAdvertisePeerUrls = lib.mkDefault ["https://${top.masterAddress}:2380"];
unitConfig = {
StartLimitIntervalSec = 0;
};
};
services.kubernetes.addonManager.bootstrapAddons = lib.mkIf isRBACEnabled {
services.etcd = {
clientCertAuth = lib.mkDefault true;
peerClientCertAuth = lib.mkDefault true;
listenClientUrls = lib.mkDefault [ "https://0.0.0.0:2379" ];
listenPeerUrls = lib.mkDefault [ "https://0.0.0.0:2380" ];
advertiseClientUrls = lib.mkDefault [ "https://${top.masterAddress}:2379" ];
initialCluster = lib.mkDefault [ "${top.masterAddress}=https://${top.masterAddress}:2380" ];
name = lib.mkDefault top.masterAddress;
initialAdvertisePeerUrls = lib.mkDefault [ "https://${top.masterAddress}:2380" ];
};
apiserver-kubelet-api-admin-crb = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "ClusterRoleBinding";
metadata = {
name = "system:kube-apiserver:kubelet-api-admin";
};
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "ClusterRole";
name = "system:kubelet-api-admin";
};
subjects = [{
services.kubernetes.addonManager.bootstrapAddons = lib.mkIf isRBACEnabled {
apiserver-kubelet-api-admin-crb = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "ClusterRoleBinding";
metadata = {
name = "system:kube-apiserver:kubelet-api-admin";
};
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "ClusterRole";
name = "system:kubelet-api-admin";
};
subjects = [
{
kind = "User";
name = "system:kube-apiserver";
}];
};
}
];
};
};
services.kubernetes.pki.certs = with top.lib; {
apiServer = mkCert {
name = "kube-apiserver";
CN = "kubernetes";
hosts = [
"kubernetes.default.svc"
"kubernetes.default.svc.${top.addons.dns.clusterDomain}"
cfg.advertiseAddress
top.masterAddress
apiserverServiceIP
"127.0.0.1"
] ++ cfg.extraSANs;
"kubernetes.default.svc"
"kubernetes.default.svc.${top.addons.dns.clusterDomain}"
cfg.advertiseAddress
top.masterAddress
apiserverServiceIP
"127.0.0.1"
] ++ cfg.extraSANs;
action = "systemctl restart kube-apiserver.service";
};
apiserverProxyClient = mkCert {
@ -467,11 +522,11 @@ in
name = "etcd";
CN = top.masterAddress;
hosts = [
"etcd.local"
"etcd.${top.addons.dns.clusterDomain}"
top.masterAddress
cfg.advertiseAddress
];
"etcd.local"
"etcd.${top.addons.dns.clusterDomain}"
top.masterAddress
cfg.advertiseAddress
];
privateKeyOwner = "etcd";
action = "systemctl restart etcd.service";
};

View file

@ -1,4 +1,10 @@
{ config, lib, options, pkgs, ... }:
{
config,
lib,
options,
pkgs,
...
}:
with lib;
@ -9,7 +15,10 @@ let
in
{
imports = [
(mkRenamedOptionModule [ "services" "kubernetes" "proxy" "address" ] ["services" "kubernetes" "proxy" "bindAddress"])
(mkRenamedOptionModule
[ "services" "kubernetes" "proxy" "address" ]
[ "services" "kubernetes" "proxy" "bindAddress" ]
)
];
###### interface
@ -62,16 +71,24 @@ in
description = "Kubernetes Proxy Service";
wantedBy = [ "kubernetes.target" ];
after = [ "kube-apiserver.service" ];
path = with pkgs; [ iptables conntrack-tools ];
path = with pkgs; [
iptables
conntrack-tools
];
serviceConfig = {
Slice = "kubernetes.slice";
ExecStart = ''
${top.package}/bin/kube-proxy \
--bind-address=${cfg.bindAddress} \
${optionalString (top.clusterCidr!=null)
"--cluster-cidr=${top.clusterCidr}"} \
${optionalString (cfg.featureGates != {})
"--feature-gates=${concatStringsSep "," (builtins.attrValues (mapAttrs (n: v: "${n}=${trivial.boolToString v}") cfg.featureGates))}"} \
${optionalString (top.clusterCidr != null) "--cluster-cidr=${top.clusterCidr}"} \
${
optionalString (cfg.featureGates != { })
"--feature-gates=${
concatStringsSep "," (
builtins.attrValues (mapAttrs (n: v: "${n}=${trivial.boolToString v}") cfg.featureGates)
)
}"
} \
--hostname-override=${cfg.hostname} \
--kubeconfig=${top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig} \
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \

View file

@ -1,4 +1,10 @@
{ config, lib, options, pkgs, ... }:
{
config,
lib,
options,
pkgs,
...
}:
let
cfg = config.services.slurm;
@ -7,51 +13,51 @@ let
defaultUser = "slurm";
configFile = pkgs.writeTextDir "slurm.conf"
''
ClusterName=${cfg.clusterName}
StateSaveLocation=${cfg.stateSaveLocation}
SlurmUser=${cfg.user}
${lib.optionalString (cfg.controlMachine != null) "controlMachine=${cfg.controlMachine}"}
${lib.optionalString (cfg.controlAddr != null) "controlAddr=${cfg.controlAddr}"}
${toString (map (x: "NodeName=${x}\n") cfg.nodeName)}
${toString (map (x: "PartitionName=${x}\n") cfg.partitionName)}
PlugStackConfig=${plugStackConfig}/plugstack.conf
ProctrackType=${cfg.procTrackType}
${cfg.extraConfig}
'';
configFile = pkgs.writeTextDir "slurm.conf" ''
ClusterName=${cfg.clusterName}
StateSaveLocation=${cfg.stateSaveLocation}
SlurmUser=${cfg.user}
${lib.optionalString (cfg.controlMachine != null) "controlMachine=${cfg.controlMachine}"}
${lib.optionalString (cfg.controlAddr != null) "controlAddr=${cfg.controlAddr}"}
${toString (map (x: "NodeName=${x}\n") cfg.nodeName)}
${toString (map (x: "PartitionName=${x}\n") cfg.partitionName)}
PlugStackConfig=${plugStackConfig}/plugstack.conf
ProctrackType=${cfg.procTrackType}
${cfg.extraConfig}
'';
plugStackConfig = pkgs.writeTextDir "plugstack.conf"
''
${lib.optionalString cfg.enableSrunX11 "optional ${pkgs.slurm-spank-x11}/lib/x11.so"}
${cfg.extraPlugstackConfig}
'';
plugStackConfig = pkgs.writeTextDir "plugstack.conf" ''
${lib.optionalString cfg.enableSrunX11 "optional ${pkgs.slurm-spank-x11}/lib/x11.so"}
${cfg.extraPlugstackConfig}
'';
cgroupConfig = pkgs.writeTextDir "cgroup.conf"
''
${cfg.extraCgroupConfig}
'';
cgroupConfig = pkgs.writeTextDir "cgroup.conf" ''
${cfg.extraCgroupConfig}
'';
mpiConf = pkgs.writeTextDir "mpi.conf"
''
PMIxCliTmpDirBase=${cfg.mpi.PmixCliTmpDirBase}
${cfg.mpi.extraMpiConfig}
'';
mpiConf = pkgs.writeTextDir "mpi.conf" ''
PMIxCliTmpDirBase=${cfg.mpi.PmixCliTmpDirBase}
${cfg.mpi.extraMpiConfig}
'';
slurmdbdConf = pkgs.writeText "slurmdbd.conf"
''
DbdHost=${cfg.dbdserver.dbdHost}
SlurmUser=${cfg.user}
StorageType=accounting_storage/mysql
StorageUser=${cfg.dbdserver.storageUser}
${cfg.dbdserver.extraConfig}
'';
slurmdbdConf = pkgs.writeText "slurmdbd.conf" ''
DbdHost=${cfg.dbdserver.dbdHost}
SlurmUser=${cfg.user}
StorageType=accounting_storage/mysql
StorageUser=${cfg.dbdserver.storageUser}
${cfg.dbdserver.extraConfig}
'';
# slurm expects some additional config files to be
# in the same directory as slurm.conf
etcSlurm = pkgs.symlinkJoin {
name = "etc-slurm";
paths = [ configFile cgroupConfig plugStackConfig mpiConf ] ++ cfg.extraConfigPaths;
paths = [
configFile
cgroupConfig
plugStackConfig
mpiConf
] ++ cfg.extraConfigPaths;
};
in
@ -134,11 +140,13 @@ in
'';
};
package = lib.mkPackageOption pkgs "slurm" {
example = "slurm-full";
} // {
default = pkgs.slurm.override { enableX11 = ! cfg.enableSrunX11; };
};
package =
lib.mkPackageOption pkgs "slurm" {
example = "slurm-full";
}
// {
default = pkgs.slurm.override { enableX11 = !cfg.enableSrunX11; };
};
controlMachine = lib.mkOption {
type = lib.types.nullOr lib.types.str;
@ -173,7 +181,7 @@ in
nodeName = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [];
default = [ ];
example = lib.literalExpression ''[ "linux[1-32] CPUs=1 State=UNKNOWN" ];'';
description = ''
Name that SLURM uses to refer to a node (or base partition for BlueGene
@ -184,7 +192,7 @@ in
partitionName = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [];
default = [ ];
example = lib.literalExpression ''[ "debug Nodes=linux[1-32] Default=YES MaxTime=INFINITE State=UP" ];'';
description = ''
Name by which the partition may be referenced. Note that now you have
@ -285,7 +293,7 @@ in
extraConfigPaths = lib.mkOption {
type = with lib.types; listOf path;
default = [];
default = [ ];
description = ''
Slurm expects config files for plugins in the same path
as `slurm.conf`. Add extra nix store
@ -353,107 +361,132 @@ in
'';
};
in lib.mkIf ( cfg.enableStools ||
cfg.client.enable ||
cfg.server.enable ||
cfg.dbdserver.enable ) {
in
lib.mkIf (cfg.enableStools || cfg.client.enable || cfg.server.enable || cfg.dbdserver.enable) {
environment.systemPackages = [ wrappedSlurm ];
environment.systemPackages = [ wrappedSlurm ];
services.munge.enable = lib.mkDefault true;
services.munge.enable = lib.mkDefault true;
# use a static uid as default to ensure it is the same on all nodes
users.users.slurm = lib.mkIf (cfg.user == defaultUser) {
name = defaultUser;
group = "slurm";
uid = config.ids.uids.slurm;
};
# use a static uid as default to ensure it is the same on all nodes
users.users.slurm = lib.mkIf (cfg.user == defaultUser) {
name = defaultUser;
group = "slurm";
uid = config.ids.uids.slurm;
};
users.groups.slurm.gid = config.ids.uids.slurm;
users.groups.slurm.gid = config.ids.uids.slurm;
systemd.services.slurmd = lib.mkIf (cfg.client.enable) {
path = with pkgs; [ wrappedSlurm coreutils ]
++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
systemd.services.slurmd = lib.mkIf (cfg.client.enable) {
path =
with pkgs;
[
wrappedSlurm
coreutils
]
++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
wantedBy = [ "multi-user.target" ];
after = [
"systemd-tmpfiles-clean.service"
"munge.service"
"network-online.target"
"remote-fs.target"
wantedBy = [ "multi-user.target" ];
after = [
"systemd-tmpfiles-clean.service"
"munge.service"
"network-online.target"
"remote-fs.target"
];
wants = [ "network-online.target" ];
serviceConfig = {
Type = "forking";
KillMode = "process";
ExecStart = "${wrappedSlurm}/bin/slurmd";
PIDFile = "/run/slurmd.pid";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
LimitMEMLOCK = "infinity";
Delegate = "Yes";
};
};
systemd.tmpfiles.rules = lib.optionals cfg.client.enable [
"d /var/spool/slurmd 755 root root -"
"d ${cfg.mpi.PmixCliTmpDirBase} 755 root root -"
];
wants = [ "network-online.target" ];
serviceConfig = {
Type = "forking";
KillMode = "process";
ExecStart = "${wrappedSlurm}/bin/slurmd";
PIDFile = "/run/slurmd.pid";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
LimitMEMLOCK = "infinity";
Delegate="Yes";
};
};
services.openssh.settings.X11Forwarding = lib.mkIf cfg.client.enable (lib.mkDefault true);
systemd.tmpfiles.rules = lib.optionals cfg.client.enable [
"d /var/spool/slurmd 755 root root -"
"d ${cfg.mpi.PmixCliTmpDirBase} 755 root root -"
];
systemd.services.slurmctld = lib.mkIf (cfg.server.enable) {
path =
with pkgs;
[
wrappedSlurm
munge
coreutils
]
++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
services.openssh.settings.X11Forwarding = lib.mkIf cfg.client.enable (lib.mkDefault true);
wantedBy = [ "multi-user.target" ];
after = [
"network.target"
"munged.service"
];
requires = [ "munged.service" ];
systemd.services.slurmctld = lib.mkIf (cfg.server.enable) {
path = with pkgs; [ wrappedSlurm munge coreutils ]
++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
serviceConfig = {
Type = "forking";
ExecStart = "${wrappedSlurm}/bin/slurmctld";
PIDFile = "/run/slurmctld.pid";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
};
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "munged.service" ];
requires = [ "munged.service" ];
serviceConfig = {
Type = "forking";
ExecStart = "${wrappedSlurm}/bin/slurmctld";
PIDFile = "/run/slurmctld.pid";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
preStart = ''
mkdir -p ${cfg.stateSaveLocation}
chown -R ${cfg.user}:slurm ${cfg.stateSaveLocation}
'';
};
preStart = ''
mkdir -p ${cfg.stateSaveLocation}
chown -R ${cfg.user}:slurm ${cfg.stateSaveLocation}
'';
systemd.services.slurmdbd =
let
# slurm strips the last component off the path
configPath = "$RUNTIME_DIRECTORY/slurmdbd.conf";
in
lib.mkIf (cfg.dbdserver.enable) {
path = with pkgs; [
wrappedSlurm
munge
coreutils
];
wantedBy = [ "multi-user.target" ];
after = [
"network.target"
"munged.service"
"mysql.service"
];
requires = [
"munged.service"
"mysql.service"
];
preStart = ''
install -m 600 -o ${cfg.user} -T ${slurmdbdConf} ${configPath}
${lib.optionalString (cfg.dbdserver.storagePassFile != null) ''
echo "StoragePass=$(cat ${cfg.dbdserver.storagePassFile})" \
>> ${configPath}
''}
'';
script = ''
export SLURM_CONF=${configPath}
exec ${cfg.package}/bin/slurmdbd -D
'';
serviceConfig = {
RuntimeDirectory = "slurmdbd";
Type = "simple";
PIDFile = "/run/slurmdbd.pid";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
};
};
};
systemd.services.slurmdbd = let
# slurm strips the last component off the path
configPath = "$RUNTIME_DIRECTORY/slurmdbd.conf";
in lib.mkIf (cfg.dbdserver.enable) {
path = with pkgs; [ wrappedSlurm munge coreutils ];
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "munged.service" "mysql.service" ];
requires = [ "munged.service" "mysql.service" ];
preStart = ''
install -m 600 -o ${cfg.user} -T ${slurmdbdConf} ${configPath}
${lib.optionalString (cfg.dbdserver.storagePassFile != null) ''
echo "StoragePass=$(cat ${cfg.dbdserver.storagePassFile})" \
>> ${configPath}
''}
'';
script = ''
export SLURM_CONF=${configPath}
exec ${cfg.package}/bin/slurmdbd -D
'';
serviceConfig = {
RuntimeDirectory = "slurmdbd";
Type = "simple";
PIDFile = "/run/slurmdbd.pid";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
};
};
};
}

View file

@ -1,142 +1,166 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.buildkite-agents;
hooksDir = hooks:
hooksDir =
hooks:
let
mkHookEntry = name: text: ''
ln --symbolic ${pkgs.writeShellApplication { inherit name text; }}/bin/${name} $out/${name}
'';
in
pkgs.runCommand "buildkite-agent-hooks" {
preferLocalBuild = true;
} ''
mkdir $out
${lib.concatStringsSep "\n" (lib.mapAttrsToList mkHookEntry hooks)}
'';
pkgs.runCommand "buildkite-agent-hooks"
{
preferLocalBuild = true;
}
''
mkdir $out
${lib.concatStringsSep "\n" (lib.mapAttrsToList mkHookEntry hooks)}
'';
buildkiteOptions = { name ? "", config, ... }: {
options = {
enable = lib.mkOption {
default = true;
type = lib.types.bool;
description = "Whether to enable this buildkite agent";
};
buildkiteOptions =
{
name ? "",
config,
...
}:
{
options = {
enable = lib.mkOption {
default = true;
type = lib.types.bool;
description = "Whether to enable this buildkite agent";
};
package = lib.mkOption {
default = pkgs.buildkite-agent;
defaultText = lib.literalExpression "pkgs.buildkite-agent";
description = "Which buildkite-agent derivation to use";
type = lib.types.package;
};
package = lib.mkOption {
default = pkgs.buildkite-agent;
defaultText = lib.literalExpression "pkgs.buildkite-agent";
description = "Which buildkite-agent derivation to use";
type = lib.types.package;
};
dataDir = lib.mkOption {
default = "/var/lib/buildkite-agent-${name}";
description = "The workdir for the agent";
type = lib.types.str;
};
dataDir = lib.mkOption {
default = "/var/lib/buildkite-agent-${name}";
description = "The workdir for the agent";
type = lib.types.str;
};
extraGroups = lib.mkOption {
default = [ "keys" ];
description = "Groups the user for this buildkite agent should belong to";
type = lib.types.listOf lib.types.str;
};
extraGroups = lib.mkOption {
default = [ "keys" ];
description = "Groups the user for this buildkite agent should belong to";
type = lib.types.listOf lib.types.str;
};
runtimePackages = lib.mkOption {
default = [ pkgs.bash pkgs.gnutar pkgs.gzip pkgs.git pkgs.nix ];
defaultText = lib.literalExpression "[ pkgs.bash pkgs.gnutar pkgs.gzip pkgs.git pkgs.nix ]";
description = "Add programs to the buildkite-agent environment";
type = lib.types.listOf lib.types.package;
};
runtimePackages = lib.mkOption {
default = [
pkgs.bash
pkgs.gnutar
pkgs.gzip
pkgs.git
pkgs.nix
];
defaultText = lib.literalExpression "[ pkgs.bash pkgs.gnutar pkgs.gzip pkgs.git pkgs.nix ]";
description = "Add programs to the buildkite-agent environment";
type = lib.types.listOf lib.types.package;
};
tokenPath = lib.mkOption {
type = lib.types.path;
description = ''
The token from your Buildkite "Agents" page.
tokenPath = lib.mkOption {
type = lib.types.path;
description = ''
The token from your Buildkite "Agents" page.
A run-time path to the token file, which is supposed to be provisioned
outside of Nix store.
'';
};
A run-time path to the token file, which is supposed to be provisioned
outside of Nix store.
'';
};
name = lib.mkOption {
type = lib.types.str;
default = "%hostname-${name}-%n";
description = ''
The name of the agent as seen in the buildkite dashboard.
'';
};
name = lib.mkOption {
type = lib.types.str;
default = "%hostname-${name}-%n";
description = ''
The name of the agent as seen in the buildkite dashboard.
'';
};
tags = lib.mkOption {
type = lib.types.attrsOf (lib.types.either lib.types.str (lib.types.listOf lib.types.str));
default = { };
example = { queue = "default"; docker = "true"; ruby2 = "true"; };
description = ''
Tags for the agent.
'';
};
tags = lib.mkOption {
type = lib.types.attrsOf (lib.types.either lib.types.str (lib.types.listOf lib.types.str));
default = { };
example = {
queue = "default";
docker = "true";
ruby2 = "true";
};
description = ''
Tags for the agent.
'';
};
extraConfig = lib.mkOption {
type = lib.types.lines;
default = "";
example = "debug=true";
description = ''
Extra lines to be added verbatim to the configuration file.
'';
};
extraConfig = lib.mkOption {
type = lib.types.lines;
default = "";
example = "debug=true";
description = ''
Extra lines to be added verbatim to the configuration file.
'';
};
privateSshKeyPath = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
## maximum care is taken so that secrets (ssh keys and the CI token)
## don't end up in the Nix store.
apply = final: if final == null then null else toString final;
privateSshKeyPath = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
## maximum care is taken so that secrets (ssh keys and the CI token)
## don't end up in the Nix store.
apply = final: if final == null then null else toString final;
description = ''
OpenSSH private key
description = ''
OpenSSH private key
A run-time path to the key file, which is supposed to be provisioned
outside of Nix store.
'';
};
A run-time path to the key file, which is supposed to be provisioned
outside of Nix store.
'';
};
hooks = lib.mkOption {
type = lib.types.attrsOf lib.types.lines;
default = { };
example = lib.literalExpression ''
{
environment = '''
export SECRET_VAR=`head -1 /run/keys/secret`
''';
}'';
description = ''
"Agent" hooks to install.
See <https://buildkite.com/docs/agent/v3/hooks> for possible options.
'';
};
hooks = lib.mkOption {
type = lib.types.attrsOf lib.types.lines;
default = { };
example = lib.literalExpression ''
{
environment = '''
export SECRET_VAR=`head -1 /run/keys/secret`
''';
}'';
description = ''
"Agent" hooks to install.
See <https://buildkite.com/docs/agent/v3/hooks> for possible options.
'';
};
hooksPath = lib.mkOption {
type = lib.types.path;
default = hooksDir config.hooks;
defaultText = lib.literalMD "generated from {option}`services.buildkite-agents.<name>.hooks`";
description = ''
Path to the directory storing the hooks.
Consider using {option}`services.buildkite-agents.<name>.hooks.<name>`
instead.
'';
};
hooksPath = lib.mkOption {
type = lib.types.path;
default = hooksDir config.hooks;
defaultText = lib.literalMD "generated from {option}`services.buildkite-agents.<name>.hooks`";
description = ''
Path to the directory storing the hooks.
Consider using {option}`services.buildkite-agents.<name>.hooks.<name>`
instead.
'';
};
shell = lib.mkOption {
type = lib.types.str;
default = "${pkgs.bash}/bin/bash -e -c";
defaultText = lib.literalExpression ''"''${pkgs.bash}/bin/bash -e -c"'';
description = ''
Command that buildkite-agent 3 will execute when it spawns a shell.
'';
shell = lib.mkOption {
type = lib.types.str;
default = "${pkgs.bash}/bin/bash -e -c";
defaultText = lib.literalExpression ''"''${pkgs.bash}/bin/bash -e -c"'';
description = ''
Command that buildkite-agent 3 will execute when it spawns a shell.
'';
};
};
};
};
enabledAgents = lib.filterAttrs (n: v: v.enable) cfg;
mapAgents = function: lib.mkMerge (lib.mapAttrsToList function enabledAgents);
in
@ -152,76 +176,92 @@ in
'';
};
config.users.users = mapAgents (name: cfg: {
"buildkite-agent-${name}" = {
name = "buildkite-agent-${name}";
home = cfg.dataDir;
createHome = true;
description = "Buildkite agent user";
extraGroups = cfg.extraGroups;
isSystemUser = true;
group = "buildkite-agent-${name}";
};
});
config.users.groups = mapAgents (name: cfg: {
"buildkite-agent-${name}" = { };
});
config.systemd.services = mapAgents (name: cfg: {
"buildkite-agent-${name}" = {
description = "Buildkite Agent";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
path = cfg.runtimePackages ++ [ cfg.package pkgs.coreutils ];
environment = config.networking.proxy.envVars // {
HOME = cfg.dataDir;
NIX_REMOTE = "daemon";
config.users.users = mapAgents (
name: cfg: {
"buildkite-agent-${name}" = {
name = "buildkite-agent-${name}";
home = cfg.dataDir;
createHome = true;
description = "Buildkite agent user";
extraGroups = cfg.extraGroups;
isSystemUser = true;
group = "buildkite-agent-${name}";
};
}
);
config.users.groups = mapAgents (
name: cfg: {
"buildkite-agent-${name}" = { };
}
);
## NB: maximum care is taken so that secrets (ssh keys and the CI token)
## don't end up in the Nix store.
preStart =
let
sshDir = "${cfg.dataDir}/.ssh";
tagStr = name: value:
if lib.isList value
then lib.concatStringsSep "," (builtins.map (v: "${name}=${v}") value)
else "${name}=${value}";
tagsStr = lib.concatStringsSep "," (lib.mapAttrsToList tagStr cfg.tags);
in
lib.optionalString (cfg.privateSshKeyPath != null) ''
mkdir -m 0700 -p "${sshDir}"
install -m600 "${toString cfg.privateSshKeyPath}" "${sshDir}/id_rsa"
'' + ''
cat > "${cfg.dataDir}/buildkite-agent.cfg" <<EOF
token="$(cat ${toString cfg.tokenPath})"
name="${cfg.name}"
shell="${cfg.shell}"
tags="${tagsStr}"
build-path="${cfg.dataDir}/builds"
hooks-path="${cfg.hooksPath}"
${cfg.extraConfig}
EOF
config.systemd.services = mapAgents (
name: cfg: {
"buildkite-agent-${name}" = {
description = "Buildkite Agent";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
path = cfg.runtimePackages ++ [
cfg.package
pkgs.coreutils
];
environment = config.networking.proxy.envVars // {
HOME = cfg.dataDir;
NIX_REMOTE = "daemon";
};
## NB: maximum care is taken so that secrets (ssh keys and the CI token)
## don't end up in the Nix store.
preStart =
let
sshDir = "${cfg.dataDir}/.ssh";
tagStr =
name: value:
if lib.isList value then
lib.concatStringsSep "," (builtins.map (v: "${name}=${v}") value)
else
"${name}=${value}";
tagsStr = lib.concatStringsSep "," (lib.mapAttrsToList tagStr cfg.tags);
in
lib.optionalString (cfg.privateSshKeyPath != null) ''
mkdir -m 0700 -p "${sshDir}"
install -m600 "${toString cfg.privateSshKeyPath}" "${sshDir}/id_rsa"
''
+ ''
cat > "${cfg.dataDir}/buildkite-agent.cfg" <<EOF
token="$(cat ${toString cfg.tokenPath})"
name="${cfg.name}"
shell="${cfg.shell}"
tags="${tagsStr}"
build-path="${cfg.dataDir}/builds"
hooks-path="${cfg.hooksPath}"
${cfg.extraConfig}
EOF
'';
serviceConfig = {
ExecStart = "${cfg.package}/bin/buildkite-agent start --config ${cfg.dataDir}/buildkite-agent.cfg";
User = "buildkite-agent-${name}";
RestartSec = 5;
Restart = "on-failure";
TimeoutSec = 10;
# set a long timeout to give buildkite-agent a chance to finish current builds
TimeoutStopSec = "2 min";
KillMode = "mixed";
};
};
}
);
config.assertions = mapAgents (
name: cfg: [
{
assertion = cfg.hooksPath != hooksDir cfg.hooks -> cfg.hooks == { };
message = ''
Options `services.buildkite-agents.${name}.hooksPath' and
`services.buildkite-agents.${name}.hooks.<name>' are mutually exclusive.
'';
serviceConfig = {
ExecStart = "${cfg.package}/bin/buildkite-agent start --config ${cfg.dataDir}/buildkite-agent.cfg";
User = "buildkite-agent-${name}";
RestartSec = 5;
Restart = "on-failure";
TimeoutSec = 10;
# set a long timeout to give buildkite-agent a chance to finish current builds
TimeoutStopSec = "2 min";
KillMode = "mixed";
};
};
});
config.assertions = mapAgents (name: cfg: [{
assertion = cfg.hooksPath != hooksDir cfg.hooks -> cfg.hooks == { };
message = ''
Options `services.buildkite-agents.${name}.hooksPath' and
`services.buildkite-agents.${name}.hooks.<name>' are mutually exclusive.
'';
}]);
}
]
);
}

View file

@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.hydra;
@ -7,61 +12,71 @@ let
hydraConf = pkgs.writeScript "hydra.conf" cfg.extraConfig;
hydraEnv =
{ HYDRA_DBI = cfg.dbi;
HYDRA_CONFIG = "${baseDir}/hydra.conf";
HYDRA_DATA = "${baseDir}";
};
hydraEnv = {
HYDRA_DBI = cfg.dbi;
HYDRA_CONFIG = "${baseDir}/hydra.conf";
HYDRA_DATA = "${baseDir}";
};
env =
{ NIX_REMOTE = "daemon";
{
NIX_REMOTE = "daemon";
PGPASSFILE = "${baseDir}/pgpass";
NIX_REMOTE_SYSTEMS = lib.concatStringsSep ":" cfg.buildMachinesFiles;
} // lib.optionalAttrs (cfg.smtpHost != null) {
}
// lib.optionalAttrs (cfg.smtpHost != null) {
EMAIL_SENDER_TRANSPORT = "SMTP";
EMAIL_SENDER_TRANSPORT_host = cfg.smtpHost;
} // hydraEnv // cfg.extraEnv;
}
// hydraEnv
// cfg.extraEnv;
serverEnv = env //
{ HYDRA_TRACKER = cfg.tracker;
serverEnv =
env
// {
HYDRA_TRACKER = cfg.tracker;
XDG_CACHE_HOME = "${baseDir}/www/.cache";
COLUMNS = "80";
PGPASSFILE = "${baseDir}/pgpass-www"; # grrr
} // (lib.optionalAttrs cfg.debugServer { DBIC_TRACE = "1"; });
}
// (lib.optionalAttrs cfg.debugServer { DBIC_TRACE = "1"; });
localDB = "dbi:Pg:dbname=hydra;user=hydra;";
haveLocalDB = cfg.dbi == localDB;
hydra-package =
let
makeWrapperArgs = lib.concatStringsSep " " (lib.mapAttrsToList (key: value: "--set-default \"${key}\" \"${value}\"") hydraEnv);
in pkgs.buildEnv rec {
name = "hydra-env";
nativeBuildInputs = [ pkgs.makeWrapper ];
paths = [ cfg.package ];
let
makeWrapperArgs = lib.concatStringsSep " " (
lib.mapAttrsToList (key: value: "--set-default \"${key}\" \"${value}\"") hydraEnv
);
in
pkgs.buildEnv rec {
name = "hydra-env";
nativeBuildInputs = [ pkgs.makeWrapper ];
paths = [ cfg.package ];
postBuild = ''
if [ -L "$out/bin" ]; then
unlink "$out/bin"
fi
mkdir -p "$out/bin"
for path in ${lib.concatStringsSep " " paths}; do
if [ -d "$path/bin" ]; then
cd "$path/bin"
for prg in *; do
if [ -f "$prg" ]; then
rm -f "$out/bin/$prg"
if [ -x "$prg" ]; then
makeWrapper "$path/bin/$prg" "$out/bin/$prg" ${makeWrapperArgs}
fi
fi
done
postBuild = ''
if [ -L "$out/bin" ]; then
unlink "$out/bin"
fi
done
'';
};
mkdir -p "$out/bin"
for path in ${lib.concatStringsSep " " paths}; do
if [ -d "$path/bin" ]; then
cd "$path/bin"
for prg in *; do
if [ -f "$prg" ]; then
rm -f "$out/bin/$prg"
if [ -x "$prg" ]; then
makeWrapper "$path/bin/$prg" "$out/bin/$prg" ${makeWrapperArgs}
fi
fi
done
fi
done
'';
};
in
@ -199,7 +214,7 @@ in
extraEnv = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = {};
default = { };
description = "Extra environment variables for Hydra.";
};
@ -211,9 +226,12 @@ in
buildMachinesFiles = lib.mkOption {
type = lib.types.listOf lib.types.path;
default = lib.optional (config.nix.buildMachines != []) "/etc/nix/machines";
default = lib.optional (config.nix.buildMachines != [ ]) "/etc/nix/machines";
defaultText = lib.literalExpression ''lib.optional (config.nix.buildMachines != []) "/etc/nix/machines"'';
example = [ "/etc/nix/machines" "/var/lib/hydra/provisioner/machines" ];
example = [
"/etc/nix/machines"
"/var/lib/hydra/provisioner/machines"
];
description = "List of files containing build machines.";
};
@ -234,7 +252,6 @@ in
};
###### implementation
config = lib.mkIf cfg.enable {
@ -253,42 +270,41 @@ in
gid = config.ids.gids.hydra;
};
users.users.hydra =
{ description = "Hydra";
group = "hydra";
# We don't enable `createHome` here because the creation of the home directory is handled by the hydra-init service below.
home = baseDir;
useDefaultShell = true;
uid = config.ids.uids.hydra;
};
users.users.hydra = {
description = "Hydra";
group = "hydra";
# We don't enable `createHome` here because the creation of the home directory is handled by the hydra-init service below.
home = baseDir;
useDefaultShell = true;
uid = config.ids.uids.hydra;
};
users.users.hydra-queue-runner =
{ description = "Hydra queue runner";
group = "hydra";
useDefaultShell = true;
home = "${baseDir}/queue-runner"; # really only to keep SSH happy
uid = config.ids.uids.hydra-queue-runner;
};
users.users.hydra-queue-runner = {
description = "Hydra queue runner";
group = "hydra";
useDefaultShell = true;
home = "${baseDir}/queue-runner"; # really only to keep SSH happy
uid = config.ids.uids.hydra-queue-runner;
};
users.users.hydra-www =
{ description = "Hydra web server";
group = "hydra";
useDefaultShell = true;
uid = config.ids.uids.hydra-www;
};
users.users.hydra-www = {
description = "Hydra web server";
group = "hydra";
useDefaultShell = true;
uid = config.ids.uids.hydra-www;
};
services.hydra.extraConfig =
''
using_frontend_proxy = 1
base_uri = ${cfg.hydraURL}
notification_sender = ${cfg.notificationSender}
max_servers = ${toString cfg.maxServers}
${lib.optionalString (cfg.logo != null) ''
hydra_logo = ${cfg.logo}
''}
gc_roots_dir = ${cfg.gcRootsDir}
use-substitutes = ${if cfg.useSubstitutes then "1" else "0"}
'';
services.hydra.extraConfig = ''
using_frontend_proxy = 1
base_uri = ${cfg.hydraURL}
notification_sender = ${cfg.notificationSender}
max_servers = ${toString cfg.maxServers}
${lib.optionalString (cfg.logo != null) ''
hydra_logo = ${cfg.logo}
''}
gc_roots_dir = ${cfg.gcRootsDir}
use-substitutes = ${if cfg.useSubstitutes then "1" else "0"}
'';
environment.systemPackages = [ hydra-package ];
@ -301,247 +317,264 @@ in
trusted-users = [ "hydra-queue-runner" ];
}
(lib.mkIf (lib.versionOlder (lib.getVersion config.nix.package.out) "2.4pre")
{
# The default (`true') slows Nix down a lot since the build farm
# has so many GC roots.
gc-check-reachability = false;
}
)
(lib.mkIf (lib.versionOlder (lib.getVersion config.nix.package.out) "2.4pre") {
# The default (`true') slows Nix down a lot since the build farm
# has so many GC roots.
gc-check-reachability = false;
})
];
systemd.slices.system-hydra = {
description = "Hydra CI Server Slice";
documentation = [ "file://${cfg.package}/share/doc/hydra/index.html" "https://nixos.org/hydra/manual/" ];
documentation = [
"file://${cfg.package}/share/doc/hydra/index.html"
"https://nixos.org/hydra/manual/"
];
};
systemd.services.hydra-init =
{ wantedBy = [ "multi-user.target" ];
requires = lib.optional haveLocalDB "postgresql.service";
after = lib.optional haveLocalDB "postgresql.service";
environment = env // {
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-init";
};
path = [ pkgs.util-linux ];
preStart = ''
mkdir -p ${baseDir}
chown hydra:hydra ${baseDir}
chmod 0750 ${baseDir}
systemd.services.hydra-init = {
wantedBy = [ "multi-user.target" ];
requires = lib.optional haveLocalDB "postgresql.service";
after = lib.optional haveLocalDB "postgresql.service";
environment = env // {
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-init";
};
path = [ pkgs.util-linux ];
preStart = ''
mkdir -p ${baseDir}
chown hydra:hydra ${baseDir}
chmod 0750 ${baseDir}
ln -sf ${hydraConf} ${baseDir}/hydra.conf
ln -sf ${hydraConf} ${baseDir}/hydra.conf
mkdir -m 0700 ${baseDir}/www || true
chown hydra-www:hydra ${baseDir}/www
mkdir -m 0700 ${baseDir}/www || true
chown hydra-www:hydra ${baseDir}/www
mkdir -m 0700 ${baseDir}/queue-runner || true
mkdir -m 0750 ${baseDir}/build-logs || true
mkdir -m 0750 ${baseDir}/runcommand-logs || true
chown hydra-queue-runner:hydra \
${baseDir}/queue-runner \
${baseDir}/build-logs \
${baseDir}/runcommand-logs
mkdir -m 0700 ${baseDir}/queue-runner || true
mkdir -m 0750 ${baseDir}/build-logs || true
mkdir -m 0750 ${baseDir}/runcommand-logs || true
chown hydra-queue-runner:hydra \
${baseDir}/queue-runner \
${baseDir}/build-logs \
${baseDir}/runcommand-logs
${lib.optionalString haveLocalDB ''
if ! [ -e ${baseDir}/.db-created ]; then
runuser -u ${config.services.postgresql.superUser} ${config.services.postgresql.package}/bin/createuser hydra
runuser -u ${config.services.postgresql.superUser} ${config.services.postgresql.package}/bin/createdb -- -O hydra hydra
touch ${baseDir}/.db-created
fi
echo "create extension if not exists pg_trgm" | runuser -u ${config.services.postgresql.superUser} -- ${config.services.postgresql.package}/bin/psql hydra
''}
${lib.optionalString haveLocalDB ''
if ! [ -e ${baseDir}/.db-created ]; then
runuser -u ${config.services.postgresql.superUser} ${config.services.postgresql.package}/bin/createuser hydra
runuser -u ${config.services.postgresql.superUser} ${config.services.postgresql.package}/bin/createdb -- -O hydra hydra
touch ${baseDir}/.db-created
fi
echo "create extension if not exists pg_trgm" | runuser -u ${config.services.postgresql.superUser} -- ${config.services.postgresql.package}/bin/psql hydra
''}
if [ ! -e ${cfg.gcRootsDir} ]; then
if [ ! -e ${cfg.gcRootsDir} ]; then
# Move legacy roots directory.
if [ -e /nix/var/nix/gcroots/per-user/hydra/hydra-roots ]; then
mv /nix/var/nix/gcroots/per-user/hydra/hydra-roots ${cfg.gcRootsDir}
fi
mkdir -p ${cfg.gcRootsDir}
# Move legacy roots directory.
if [ -e /nix/var/nix/gcroots/per-user/hydra/hydra-roots ]; then
mv /nix/var/nix/gcroots/per-user/hydra/hydra-roots ${cfg.gcRootsDir}
fi
# Move legacy hydra-www roots.
if [ -e /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots ]; then
find /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots/ -type f -print0 \
| xargs -0 -r mv -f -t ${cfg.gcRootsDir}/
rmdir /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots
fi
mkdir -p ${cfg.gcRootsDir}
fi
chown hydra:hydra ${cfg.gcRootsDir}
chmod 2775 ${cfg.gcRootsDir}
'';
serviceConfig.ExecStart = "${hydra-package}/bin/hydra-init";
serviceConfig.PermissionsStartOnly = true;
serviceConfig.User = "hydra";
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
serviceConfig.Slice = "system-hydra.slice";
# Move legacy hydra-www roots.
if [ -e /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots ]; then
find /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots/ -type f -print0 \
| xargs -0 -r mv -f -t ${cfg.gcRootsDir}/
rmdir /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots
fi
chown hydra:hydra ${cfg.gcRootsDir}
chmod 2775 ${cfg.gcRootsDir}
'';
serviceConfig.ExecStart = "${hydra-package}/bin/hydra-init";
serviceConfig.PermissionsStartOnly = true;
serviceConfig.User = "hydra";
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
serviceConfig.Slice = "system-hydra.slice";
};
systemd.services.hydra-server = {
wantedBy = [ "multi-user.target" ];
requires = [ "hydra-init.service" ];
after = [ "hydra-init.service" ];
environment = serverEnv // {
HYDRA_DBI = "${serverEnv.HYDRA_DBI};application_name=hydra-server";
};
systemd.services.hydra-server =
{ wantedBy = [ "multi-user.target" ];
requires = [ "hydra-init.service" ];
after = [ "hydra-init.service" ];
environment = serverEnv // {
HYDRA_DBI = "${serverEnv.HYDRA_DBI};application_name=hydra-server";
};
restartTriggers = [ hydraConf ];
serviceConfig =
{ ExecStart =
"@${hydra-package}/bin/hydra-server hydra-server -f -h '${cfg.listenHost}' "
+ "-p ${toString cfg.port} --min_spare_servers ${toString cfg.minSpareServers} --max_spare_servers ${toString cfg.maxSpareServers} "
+ "--max_servers ${toString cfg.maxServers} --max_requests 100 ${lib.optionalString cfg.debugServer "-d"}";
User = "hydra-www";
PermissionsStartOnly = true;
Restart = "always";
Slice = "system-hydra.slice";
};
restartTriggers = [ hydraConf ];
serviceConfig = {
ExecStart =
"@${hydra-package}/bin/hydra-server hydra-server -f -h '${cfg.listenHost}' "
+ "-p ${toString cfg.port} --min_spare_servers ${toString cfg.minSpareServers} --max_spare_servers ${toString cfg.maxSpareServers} "
+ "--max_servers ${toString cfg.maxServers} --max_requests 100 ${lib.optionalString cfg.debugServer "-d"}";
User = "hydra-www";
PermissionsStartOnly = true;
Restart = "always";
Slice = "system-hydra.slice";
};
};
systemd.services.hydra-queue-runner =
{ wantedBy = [ "multi-user.target" ];
requires = [ "hydra-init.service" ];
after = [ "hydra-init.service" "network.target" ];
path = [ hydra-package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
restartTriggers = [ hydraConf ];
environment = env // {
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
IN_SYSTEMD = "1"; # to get log severity levels
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-queue-runner";
};
serviceConfig =
{ ExecStart = "@${hydra-package}/bin/hydra-queue-runner hydra-queue-runner -v";
ExecStopPost = "${hydra-package}/bin/hydra-queue-runner --unlock";
User = "hydra-queue-runner";
Restart = "always";
Slice = "system-hydra.slice";
# Ensure we can get core dumps.
LimitCORE = "infinity";
WorkingDirectory = "${baseDir}/queue-runner";
};
systemd.services.hydra-queue-runner = {
wantedBy = [ "multi-user.target" ];
requires = [ "hydra-init.service" ];
after = [
"hydra-init.service"
"network.target"
];
path = [
hydra-package
pkgs.nettools
pkgs.openssh
pkgs.bzip2
config.nix.package
];
restartTriggers = [ hydraConf ];
environment = env // {
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
IN_SYSTEMD = "1"; # to get log severity levels
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-queue-runner";
};
serviceConfig = {
ExecStart = "@${hydra-package}/bin/hydra-queue-runner hydra-queue-runner -v";
ExecStopPost = "${hydra-package}/bin/hydra-queue-runner --unlock";
User = "hydra-queue-runner";
Restart = "always";
Slice = "system-hydra.slice";
systemd.services.hydra-evaluator =
{ wantedBy = [ "multi-user.target" ];
requires = [ "hydra-init.service" ];
wants = [ "network-online.target" ];
after = [ "hydra-init.service" "network.target" "network-online.target" ];
path = with pkgs; [ hydra-package nettools jq ];
restartTriggers = [ hydraConf ];
environment = env // {
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-evaluator";
};
serviceConfig =
{ ExecStart = "@${hydra-package}/bin/hydra-evaluator hydra-evaluator";
User = "hydra";
Restart = "always";
WorkingDirectory = baseDir;
Slice = "system-hydra.slice";
};
# Ensure we can get core dumps.
LimitCORE = "infinity";
WorkingDirectory = "${baseDir}/queue-runner";
};
};
systemd.services.hydra-update-gc-roots =
{ requires = [ "hydra-init.service" ];
after = [ "hydra-init.service" ];
environment = env // {
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-update-gc-roots";
};
serviceConfig =
{ ExecStart = "@${hydra-package}/bin/hydra-update-gc-roots hydra-update-gc-roots";
User = "hydra";
Slice = "system-hydra.slice";
};
startAt = "2,14:15";
systemd.services.hydra-evaluator = {
wantedBy = [ "multi-user.target" ];
requires = [ "hydra-init.service" ];
wants = [ "network-online.target" ];
after = [
"hydra-init.service"
"network.target"
"network-online.target"
];
path = with pkgs; [
hydra-package
nettools
jq
];
restartTriggers = [ hydraConf ];
environment = env // {
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-evaluator";
};
serviceConfig = {
ExecStart = "@${hydra-package}/bin/hydra-evaluator hydra-evaluator";
User = "hydra";
Restart = "always";
WorkingDirectory = baseDir;
Slice = "system-hydra.slice";
};
};
systemd.services.hydra-send-stats =
{ wantedBy = [ "multi-user.target" ];
after = [ "hydra-init.service" ];
environment = env // {
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-send-stats";
};
serviceConfig =
{ ExecStart = "@${hydra-package}/bin/hydra-send-stats hydra-send-stats";
User = "hydra";
Slice = "system-hydra.slice";
};
systemd.services.hydra-update-gc-roots = {
requires = [ "hydra-init.service" ];
after = [ "hydra-init.service" ];
environment = env // {
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-update-gc-roots";
};
serviceConfig = {
ExecStart = "@${hydra-package}/bin/hydra-update-gc-roots hydra-update-gc-roots";
User = "hydra";
Slice = "system-hydra.slice";
};
startAt = "2,14:15";
};
systemd.services.hydra-notify =
{ wantedBy = [ "multi-user.target" ];
requires = [ "hydra-init.service" ];
after = [ "hydra-init.service" ];
restartTriggers = [ hydraConf ];
path = [ pkgs.zstd ];
environment = env // {
PGPASSFILE = "${baseDir}/pgpass-queue-runner";
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify";
};
serviceConfig =
{ ExecStart = "@${hydra-package}/bin/hydra-notify hydra-notify";
# FIXME: run this under a less privileged user?
User = "hydra-queue-runner";
Restart = "always";
RestartSec = 5;
Slice = "system-hydra.slice";
};
systemd.services.hydra-send-stats = {
wantedBy = [ "multi-user.target" ];
after = [ "hydra-init.service" ];
environment = env // {
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-send-stats";
};
serviceConfig = {
ExecStart = "@${hydra-package}/bin/hydra-send-stats hydra-send-stats";
User = "hydra";
Slice = "system-hydra.slice";
};
};
systemd.services.hydra-notify = {
wantedBy = [ "multi-user.target" ];
requires = [ "hydra-init.service" ];
after = [ "hydra-init.service" ];
restartTriggers = [ hydraConf ];
path = [ pkgs.zstd ];
environment = env // {
PGPASSFILE = "${baseDir}/pgpass-queue-runner";
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify";
};
serviceConfig = {
ExecStart = "@${hydra-package}/bin/hydra-notify hydra-notify";
# FIXME: run this under a less privileged user?
User = "hydra-queue-runner";
Restart = "always";
RestartSec = 5;
Slice = "system-hydra.slice";
};
};
# If there is less than a certain amount of free disk space, stop
# the queue/evaluator to prevent builds from failing or aborting.
systemd.services.hydra-check-space =
{ script =
''
if [ $(($(stat -f -c '%a' /nix/store) * $(stat -f -c '%S' /nix/store))) -lt $((${toString cfg.minimumDiskFree} * 1024**3)) ]; then
echo "stopping Hydra queue runner due to lack of free space..."
systemctl stop hydra-queue-runner
fi
if [ $(($(stat -f -c '%a' /nix/store) * $(stat -f -c '%S' /nix/store))) -lt $((${toString cfg.minimumDiskFreeEvaluator} * 1024**3)) ]; then
echo "stopping Hydra evaluator due to lack of free space..."
systemctl stop hydra-evaluator
fi
'';
startAt = "*:0/5";
serviceConfig.Slice = "system-hydra.slice";
};
systemd.services.hydra-check-space = {
script = ''
if [ $(($(stat -f -c '%a' /nix/store) * $(stat -f -c '%S' /nix/store))) -lt $((${toString cfg.minimumDiskFree} * 1024**3)) ]; then
echo "stopping Hydra queue runner due to lack of free space..."
systemctl stop hydra-queue-runner
fi
if [ $(($(stat -f -c '%a' /nix/store) * $(stat -f -c '%S' /nix/store))) -lt $((${toString cfg.minimumDiskFreeEvaluator} * 1024**3)) ]; then
echo "stopping Hydra evaluator due to lack of free space..."
systemctl stop hydra-evaluator
fi
'';
startAt = "*:0/5";
serviceConfig.Slice = "system-hydra.slice";
};
# Periodically compress build logs. The queue runner compresses
# logs automatically after a step finishes, but this doesn't work
# if the queue runner is stopped prematurely.
systemd.services.hydra-compress-logs =
{ path = [ pkgs.bzip2 pkgs.zstd ];
script =
''
set -eou pipefail
compression=$(sed -nr 's/compress_build_logs_compression = ()/\1/p' ${baseDir}/hydra.conf)
if [[ $compression == "" || $compression == bzip2 ]]; then
compressionCmd=(bzip2)
elif [[ $compression == zstd ]]; then
compressionCmd=(zstd --rm)
fi
find ${baseDir}/build-logs -ignore_readdir_race -type f -name "*.drv" -mtime +3 -size +0c -print0 | xargs -0 -r "''${compressionCmd[@]}" --force --quiet
'';
startAt = "Sun 01:45";
serviceConfig.Slice = "system-hydra.slice";
};
systemd.services.hydra-compress-logs = {
path = [
pkgs.bzip2
pkgs.zstd
];
script = ''
set -eou pipefail
compression=$(sed -nr 's/compress_build_logs_compression = ()/\1/p' ${baseDir}/hydra.conf)
if [[ $compression == "" || $compression == bzip2 ]]; then
compressionCmd=(bzip2)
elif [[ $compression == zstd ]]; then
compressionCmd=(zstd --rm)
fi
find ${baseDir}/build-logs -ignore_readdir_race -type f -name "*.drv" -mtime +3 -size +0c -print0 | xargs -0 -r "''${compressionCmd[@]}" --force --quiet
'';
startAt = "Sun 01:45";
serviceConfig.Slice = "system-hydra.slice";
};
services.postgresql.enable = lib.mkIf haveLocalDB true;
services.postgresql.identMap = lib.optionalString haveLocalDB
''
hydra-users hydra hydra
hydra-users hydra-queue-runner hydra
hydra-users hydra-www hydra
hydra-users root hydra
# The postgres user is used to create the pg_trgm extension for the hydra database
hydra-users postgres postgres
'';
services.postgresql.identMap = lib.optionalString haveLocalDB ''
hydra-users hydra hydra
hydra-users hydra-queue-runner hydra
hydra-users hydra-www hydra
hydra-users root hydra
# The postgres user is used to create the pg_trgm extension for the hydra database
hydra-users postgres postgres
'';
services.postgresql.authentication = lib.optionalString haveLocalDB
''
local hydra all ident map=hydra-users
'';
services.postgresql.authentication = lib.optionalString haveLocalDB ''
local hydra all ident map=hydra-users
'';
};

View file

@ -37,10 +37,13 @@ let
# 2. the module configuration
# 3. the extraConfigFiles from the module options
# 4. the locally writable config file, which couchdb itself writes to
configFiles = [
"${cfg.package}/etc/default.ini"
optionsConfigFile
] ++ cfg.extraConfigFiles ++ [ cfg.configFile ];
configFiles =
[
"${cfg.package}/etc/default.ini"
optionsConfigFile
]
++ cfg.extraConfigFiles
++ [ cfg.configFile ];
executable = "${cfg.package}/bin/couchdb";
in
{

View file

@ -1,10 +1,18 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.ferretdb;
in
{
meta.maintainers = with lib.maintainers; [ julienmalka camillemndn ];
meta.maintainers = with lib.maintainers; [
julienmalka
camillemndn
];
options = {
services.ferretdb = {
@ -22,8 +30,11 @@ in
type = lib.types.submodule {
freeformType = with lib.types; attrsOf str;
options = {
FERRETDB_HANDLER = lib.mkOption {
type = lib.types.enum [ "sqlite" "pg" ];
FERRETDB_HANDLER = lib.mkOption {
type = lib.types.enum [
"sqlite"
"pg"
];
default = "sqlite";
description = "Backend handler";
};
@ -41,7 +52,10 @@ in
};
FERRETDB_TELEMETRY = lib.mkOption {
type = lib.types.enum [ "enable" "disable" ];
type = lib.types.enum [
"enable"
"disable"
];
default = "disable";
description = ''
Enable or disable basic telemetry.
@ -64,38 +78,37 @@ in
};
};
config = lib.mkIf cfg.enable
{
services.ferretdb.settings = { };
config = lib.mkIf cfg.enable {
services.ferretdb.settings = { };
systemd.services.ferretdb = {
description = "FerretDB";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
environment = cfg.settings;
serviceConfig = {
Type = "simple";
StateDirectory = "ferretdb";
WorkingDirectory = "/var/lib/ferretdb";
ExecStart = "${cfg.package}/bin/ferretdb";
Restart = "on-failure";
ProtectHome = true;
ProtectSystem = "strict";
PrivateTmp = true;
PrivateDevices = true;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
NoNewPrivileges = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
RemoveIPC = true;
PrivateMounts = true;
DynamicUser = true;
};
systemd.services.ferretdb = {
description = "FerretDB";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
environment = cfg.settings;
serviceConfig = {
Type = "simple";
StateDirectory = "ferretdb";
WorkingDirectory = "/var/lib/ferretdb";
ExecStart = "${cfg.package}/bin/ferretdb";
Restart = "on-failure";
ProtectHome = true;
ProtectSystem = "strict";
PrivateTmp = true;
PrivateDevices = true;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
NoNewPrivileges = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
RemoveIPC = true;
PrivateMounts = true;
DynamicUser = true;
};
};
};
}

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.influxdb;
@ -55,24 +60,32 @@ let
https-enabled = false;
};
graphite = [{
enabled = false;
}];
graphite = [
{
enabled = false;
}
];
udp = [{
enabled = false;
}];
udp = [
{
enabled = false;
}
];
collectd = [{
enabled = false;
typesdb = "${pkgs.collectd-data}/share/collectd/types.db";
database = "collectd_db";
bind-address = ":25826";
}];
collectd = [
{
enabled = false;
typesdb = "${pkgs.collectd-data}/share/collectd/types.db";
database = "collectd_db";
bind-address = ":25826";
}
];
opentsdb = [{
enabled = false;
}];
opentsdb = [
{
enabled = false;
}
];
continuous_queries = {
enabled = true;
@ -93,7 +106,7 @@ let
};
} cfg.extraConfig;
configFile = (pkgs.formats.toml {}).generate "config.toml" configOptions;
configFile = (pkgs.formats.toml { }).generate "config.toml" configOptions;
in
{
@ -130,14 +143,13 @@ in
};
extraConfig = lib.mkOption {
default = {};
default = { };
description = "Extra configuration options for influxdb";
type = lib.types.attrs;
};
};
};
###### implementation
config = lib.mkIf config.services.influxdb.enable {
@ -159,7 +171,9 @@ in
postStart =
let
scheme = if configOptions.http.https-enabled then "-k https" else "http";
bindAddr = (ba: if lib.hasPrefix ":" ba then "127.0.0.1${ba}" else "${ba}")(toString configOptions.http.bind-address);
bindAddr = (ba: if lib.hasPrefix ":" ba then "127.0.0.1${ba}" else "${ba}") (
toString configOptions.http.bind-address
);
in
lib.mkBefore ''
until ${pkgs.curl.bin}/bin/curl -s -o /dev/null ${scheme}://${bindAddr}/ping; do

View file

@ -1,49 +1,137 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.redis;
mkValueString = value:
if value == true then "yes"
else if value == false then "no"
else lib.generators.mkValueStringDefault { } value;
mkValueString =
value:
if value == true then
"yes"
else if value == false then
"no"
else
lib.generators.mkValueStringDefault { } value;
redisConfig = settings: pkgs.writeText "redis.conf" (lib.generators.toKeyValue {
listsAsDuplicateKeys = true;
mkKeyValue = lib.generators.mkKeyValueDefault { inherit mkValueString; } " ";
} settings);
redisConfig =
settings:
pkgs.writeText "redis.conf" (
lib.generators.toKeyValue {
listsAsDuplicateKeys = true;
mkKeyValue = lib.generators.mkKeyValueDefault { inherit mkValueString; } " ";
} settings
);
redisName = name: "redis" + lib.optionalString (name != "") ("-"+name);
redisName = name: "redis" + lib.optionalString (name != "") ("-" + name);
enabledServers = lib.filterAttrs (name: conf: conf.enable) config.services.redis.servers;
in {
in
{
imports = [
(lib.mkRemovedOptionModule [ "services" "redis" "user" ] "The redis module now is hardcoded to the redis user.")
(lib.mkRemovedOptionModule [ "services" "redis" "dbpath" ] "The redis module now uses /var/lib/redis as data directory.")
(lib.mkRemovedOptionModule [ "services" "redis" "dbFilename" ] "The redis module now uses /var/lib/redis/dump.rdb as database dump location.")
(lib.mkRemovedOptionModule [ "services" "redis" "appendOnlyFilename" ] "This option was never used.")
(lib.mkRemovedOptionModule [
"services"
"redis"
"user"
] "The redis module now is hardcoded to the redis user.")
(lib.mkRemovedOptionModule [
"services"
"redis"
"dbpath"
] "The redis module now uses /var/lib/redis as data directory.")
(lib.mkRemovedOptionModule [
"services"
"redis"
"dbFilename"
] "The redis module now uses /var/lib/redis/dump.rdb as database dump location.")
(lib.mkRemovedOptionModule [
"services"
"redis"
"appendOnlyFilename"
] "This option was never used.")
(lib.mkRemovedOptionModule [ "services" "redis" "pidFile" ] "This option was removed.")
(lib.mkRemovedOptionModule [ "services" "redis" "extraConfig" ] "Use services.redis.servers.*.settings instead.")
(lib.mkRenamedOptionModule [ "services" "redis" "enable"] [ "services" "redis" "servers" "" "enable" ])
(lib.mkRenamedOptionModule [ "services" "redis" "port"] [ "services" "redis" "servers" "" "port" ])
(lib.mkRenamedOptionModule [ "services" "redis" "openFirewall"] [ "services" "redis" "servers" "" "openFirewall" ])
(lib.mkRenamedOptionModule [ "services" "redis" "bind"] [ "services" "redis" "servers" "" "bind" ])
(lib.mkRenamedOptionModule [ "services" "redis" "unixSocket"] [ "services" "redis" "servers" "" "unixSocket" ])
(lib.mkRenamedOptionModule [ "services" "redis" "unixSocketPerm"] [ "services" "redis" "servers" "" "unixSocketPerm" ])
(lib.mkRenamedOptionModule [ "services" "redis" "logLevel"] [ "services" "redis" "servers" "" "logLevel" ])
(lib.mkRenamedOptionModule [ "services" "redis" "logfile"] [ "services" "redis" "servers" "" "logfile" ])
(lib.mkRenamedOptionModule [ "services" "redis" "syslog"] [ "services" "redis" "servers" "" "syslog" ])
(lib.mkRenamedOptionModule [ "services" "redis" "databases"] [ "services" "redis" "servers" "" "databases" ])
(lib.mkRenamedOptionModule [ "services" "redis" "maxclients"] [ "services" "redis" "servers" "" "maxclients" ])
(lib.mkRenamedOptionModule [ "services" "redis" "save"] [ "services" "redis" "servers" "" "save" ])
(lib.mkRenamedOptionModule [ "services" "redis" "slaveOf"] [ "services" "redis" "servers" "" "slaveOf" ])
(lib.mkRenamedOptionModule [ "services" "redis" "masterAuth"] [ "services" "redis" "servers" "" "masterAuth" ])
(lib.mkRenamedOptionModule [ "services" "redis" "requirePass"] [ "services" "redis" "servers" "" "requirePass" ])
(lib.mkRenamedOptionModule [ "services" "redis" "requirePassFile"] [ "services" "redis" "servers" "" "requirePassFile" ])
(lib.mkRenamedOptionModule [ "services" "redis" "appendOnly"] [ "services" "redis" "servers" "" "appendOnly" ])
(lib.mkRenamedOptionModule [ "services" "redis" "appendFsync"] [ "services" "redis" "servers" "" "appendFsync" ])
(lib.mkRenamedOptionModule [ "services" "redis" "slowLogLogSlowerThan"] [ "services" "redis" "servers" "" "slowLogLogSlowerThan" ])
(lib.mkRenamedOptionModule [ "services" "redis" "slowLogMaxLen"] [ "services" "redis" "servers" "" "slowLogMaxLen" ])
(lib.mkRenamedOptionModule [ "services" "redis" "settings"] [ "services" "redis" "servers" "" "settings" ])
(lib.mkRemovedOptionModule [
"services"
"redis"
"extraConfig"
] "Use services.redis.servers.*.settings instead.")
(lib.mkRenamedOptionModule
[ "services" "redis" "enable" ]
[ "services" "redis" "servers" "" "enable" ]
)
(lib.mkRenamedOptionModule [ "services" "redis" "port" ] [ "services" "redis" "servers" "" "port" ])
(lib.mkRenamedOptionModule
[ "services" "redis" "openFirewall" ]
[ "services" "redis" "servers" "" "openFirewall" ]
)
(lib.mkRenamedOptionModule [ "services" "redis" "bind" ] [ "services" "redis" "servers" "" "bind" ])
(lib.mkRenamedOptionModule
[ "services" "redis" "unixSocket" ]
[ "services" "redis" "servers" "" "unixSocket" ]
)
(lib.mkRenamedOptionModule
[ "services" "redis" "unixSocketPerm" ]
[ "services" "redis" "servers" "" "unixSocketPerm" ]
)
(lib.mkRenamedOptionModule
[ "services" "redis" "logLevel" ]
[ "services" "redis" "servers" "" "logLevel" ]
)
(lib.mkRenamedOptionModule
[ "services" "redis" "logfile" ]
[ "services" "redis" "servers" "" "logfile" ]
)
(lib.mkRenamedOptionModule
[ "services" "redis" "syslog" ]
[ "services" "redis" "servers" "" "syslog" ]
)
(lib.mkRenamedOptionModule
[ "services" "redis" "databases" ]
[ "services" "redis" "servers" "" "databases" ]
)
(lib.mkRenamedOptionModule
[ "services" "redis" "maxclients" ]
[ "services" "redis" "servers" "" "maxclients" ]
)
(lib.mkRenamedOptionModule [ "services" "redis" "save" ] [ "services" "redis" "servers" "" "save" ])
(lib.mkRenamedOptionModule
[ "services" "redis" "slaveOf" ]
[ "services" "redis" "servers" "" "slaveOf" ]
)
(lib.mkRenamedOptionModule
[ "services" "redis" "masterAuth" ]
[ "services" "redis" "servers" "" "masterAuth" ]
)
(lib.mkRenamedOptionModule
[ "services" "redis" "requirePass" ]
[ "services" "redis" "servers" "" "requirePass" ]
)
(lib.mkRenamedOptionModule
[ "services" "redis" "requirePassFile" ]
[ "services" "redis" "servers" "" "requirePassFile" ]
)
(lib.mkRenamedOptionModule
[ "services" "redis" "appendOnly" ]
[ "services" "redis" "servers" "" "appendOnly" ]
)
(lib.mkRenamedOptionModule
[ "services" "redis" "appendFsync" ]
[ "services" "redis" "servers" "" "appendFsync" ]
)
(lib.mkRenamedOptionModule
[ "services" "redis" "slowLogLogSlowerThan" ]
[ "services" "redis" "servers" "" "slowLogLogSlowerThan" ]
)
(lib.mkRenamedOptionModule
[ "services" "redis" "slowLogMaxLen" ]
[ "services" "redis" "servers" "" "slowLogMaxLen" ]
)
(lib.mkRenamedOptionModule
[ "services" "redis" "settings" ]
[ "services" "redis" "servers" "" "settings" ]
)
];
###### interface
@ -53,365 +141,435 @@ in {
services.redis = {
package = lib.mkPackageOption pkgs "redis" { };
vmOverCommit = lib.mkEnableOption ''
set `vm.overcommit_memory` sysctl to 1
(Suggested for Background Saving: <https://redis.io/docs/get-started/faq/>)
'' // { default = true; };
vmOverCommit =
lib.mkEnableOption ''
set `vm.overcommit_memory` sysctl to 1
(Suggested for Background Saving: <https://redis.io/docs/get-started/faq/>)
''
// {
default = true;
};
servers = lib.mkOption {
type = with lib.types; attrsOf (submodule ({ config, name, ... }: {
options = {
enable = lib.mkEnableOption "Redis server";
user = lib.mkOption {
type = types.str;
default = redisName name;
defaultText = lib.literalExpression ''
if name == "" then "redis" else "redis-''${name}"
'';
description = ''
User account under which this instance of redis-server runs.
::: {.note}
If left as the default value this user will automatically be
created on system activation, otherwise you are responsible for
ensuring the user exists before the redis service starts.
'';
};
group = lib.mkOption {
type = types.str;
default = config.user;
defaultText = lib.literalExpression "config.user";
description = ''
Group account under which this instance of redis-server runs.
::: {.note}
If left as the default value this group will automatically be
created on system activation, otherwise you are responsible for
ensuring the group exists before the redis service starts.
'';
};
port = lib.mkOption {
type = types.port;
default = if name == "" then 6379 else 0;
defaultText = lib.literalExpression ''if name == "" then 6379 else 0'';
description = ''
The TCP port to accept connections.
If port 0 is specified Redis will not listen on a TCP socket.
'';
};
openFirewall = lib.mkOption {
type = types.bool;
default = false;
description = ''
Whether to open ports in the firewall for the server.
'';
};
extraParams = lib.mkOption {
type = with types; listOf str;
default = [];
description = "Extra parameters to append to redis-server invocation";
example = [ "--sentinel" ];
};
bind = lib.mkOption {
type = with types; nullOr str;
default = "127.0.0.1";
description = ''
The IP interface to bind to.
`null` means "all interfaces".
'';
example = "192.0.2.1";
};
unixSocket = lib.mkOption {
type = with types; nullOr path;
default = "/run/${redisName name}/redis.sock";
defaultText = lib.literalExpression ''
if name == "" then "/run/redis/redis.sock" else "/run/redis-''${name}/redis.sock"
'';
description = "The path to the socket to bind to.";
};
unixSocketPerm = lib.mkOption {
type = types.int;
default = 660;
description = "Change permissions for the socket";
example = 600;
};
logLevel = lib.mkOption {
type = types.str;
default = "notice"; # debug, verbose, notice, warning
example = "debug";
description = "Specify the server verbosity level, options: debug, verbose, notice, warning.";
};
logfile = lib.mkOption {
type = types.str;
default = "/dev/null";
description = "Specify the log file name. Also 'stdout' can be used to force Redis to log on the standard output.";
example = "/var/log/redis.log";
};
syslog = lib.mkOption {
type = types.bool;
default = true;
description = "Enable logging to the system logger.";
};
databases = lib.mkOption {
type = types.int;
default = 16;
description = "Set the number of databases.";
};
maxclients = lib.mkOption {
type = types.int;
default = 10000;
description = "Set the max number of connected clients at the same time.";
};
save = lib.mkOption {
type = with types; listOf (listOf int);
default = [ [900 1] [300 10] [60 10000] ];
description = ''
The schedule in which data is persisted to disk, represented as a list of lists where the first element represent the amount of seconds and the second the number of changes.
If set to the empty list (`[]`) then RDB persistence will be disabled (useful if you are using AOF or don't want any persistence).
'';
};
slaveOf = lib.mkOption {
type = with types; nullOr (submodule ({ ... }: {
type =
with lib.types;
attrsOf (
submodule (
{ config, name, ... }:
{
options = {
ip = lib.mkOption {
type = str;
description = "IP of the Redis master";
example = "192.168.1.100";
enable = lib.mkEnableOption "Redis server";
user = lib.mkOption {
type = types.str;
default = redisName name;
defaultText = lib.literalExpression ''
if name == "" then "redis" else "redis-''${name}"
'';
description = ''
User account under which this instance of redis-server runs.
::: {.note}
If left as the default value this user will automatically be
created on system activation, otherwise you are responsible for
ensuring the user exists before the redis service starts.
'';
};
group = lib.mkOption {
type = types.str;
default = config.user;
defaultText = lib.literalExpression "config.user";
description = ''
Group account under which this instance of redis-server runs.
::: {.note}
If left as the default value this group will automatically be
created on system activation, otherwise you are responsible for
ensuring the group exists before the redis service starts.
'';
};
port = lib.mkOption {
type = port;
description = "port of the Redis master";
default = 6379;
type = types.port;
default = if name == "" then 6379 else 0;
defaultText = lib.literalExpression ''if name == "" then 6379 else 0'';
description = ''
The TCP port to accept connections.
If port 0 is specified Redis will not listen on a TCP socket.
'';
};
openFirewall = lib.mkOption {
type = types.bool;
default = false;
description = ''
Whether to open ports in the firewall for the server.
'';
};
extraParams = lib.mkOption {
type = with types; listOf str;
default = [ ];
description = "Extra parameters to append to redis-server invocation";
example = [ "--sentinel" ];
};
bind = lib.mkOption {
type = with types; nullOr str;
default = "127.0.0.1";
description = ''
The IP interface to bind to.
`null` means "all interfaces".
'';
example = "192.0.2.1";
};
unixSocket = lib.mkOption {
type = with types; nullOr path;
default = "/run/${redisName name}/redis.sock";
defaultText = lib.literalExpression ''
if name == "" then "/run/redis/redis.sock" else "/run/redis-''${name}/redis.sock"
'';
description = "The path to the socket to bind to.";
};
unixSocketPerm = lib.mkOption {
type = types.int;
default = 660;
description = "Change permissions for the socket";
example = 600;
};
logLevel = lib.mkOption {
type = types.str;
default = "notice"; # debug, verbose, notice, warning
example = "debug";
description = "Specify the server verbosity level, options: debug, verbose, notice, warning.";
};
logfile = lib.mkOption {
type = types.str;
default = "/dev/null";
description = "Specify the log file name. Also 'stdout' can be used to force Redis to log on the standard output.";
example = "/var/log/redis.log";
};
syslog = lib.mkOption {
type = types.bool;
default = true;
description = "Enable logging to the system logger.";
};
databases = lib.mkOption {
type = types.int;
default = 16;
description = "Set the number of databases.";
};
maxclients = lib.mkOption {
type = types.int;
default = 10000;
description = "Set the max number of connected clients at the same time.";
};
save = lib.mkOption {
type = with types; listOf (listOf int);
default = [
[
900
1
]
[
300
10
]
[
60
10000
]
];
description = ''
The schedule in which data is persisted to disk, represented as a list of lists where the first element represent the amount of seconds and the second the number of changes.
If set to the empty list (`[]`) then RDB persistence will be disabled (useful if you are using AOF or don't want any persistence).
'';
};
slaveOf = lib.mkOption {
type =
with types;
nullOr (
submodule (
{ ... }:
{
options = {
ip = lib.mkOption {
type = str;
description = "IP of the Redis master";
example = "192.168.1.100";
};
port = lib.mkOption {
type = port;
description = "port of the Redis master";
default = 6379;
};
};
}
)
);
default = null;
description = "IP and port to which this redis instance acts as a slave.";
example = {
ip = "192.168.1.100";
port = 6379;
};
};
masterAuth = lib.mkOption {
type = with types; nullOr str;
default = null;
description = ''
If the master is password protected (using the requirePass configuration)
it is possible to tell the slave to authenticate before starting the replication synchronization
process, otherwise the master will refuse the slave request.
(STORED PLAIN TEXT, WORLD-READABLE IN NIX STORE)'';
};
requirePass = lib.mkOption {
type = with types; nullOr str;
default = null;
description = ''
Password for database (STORED PLAIN TEXT, WORLD-READABLE IN NIX STORE).
Use requirePassFile to store it outside of the nix store in a dedicated file.
'';
example = "letmein!";
};
requirePassFile = lib.mkOption {
type = with types; nullOr path;
default = null;
description = "File with password for the database.";
example = "/run/keys/redis-password";
};
appendOnly = lib.mkOption {
type = types.bool;
default = false;
description = "By default data is only periodically persisted to disk, enable this option to use an append-only file for improved persistence.";
};
appendFsync = lib.mkOption {
type = types.str;
default = "everysec"; # no, always, everysec
description = "How often to fsync the append-only log, options: no, always, everysec.";
};
slowLogLogSlowerThan = lib.mkOption {
type = types.int;
default = 10000;
description = "Log queries whose execution take longer than X in milliseconds.";
example = 1000;
};
slowLogMaxLen = lib.mkOption {
type = types.int;
default = 128;
description = "Maximum number of items to keep in slow log.";
};
settings = lib.mkOption {
# TODO: this should be converted to freeformType
type =
with types;
attrsOf (oneOf [
bool
int
str
(listOf str)
]);
default = { };
description = ''
Redis configuration. Refer to
<https://redis.io/topics/config>
for details on supported values.
'';
example = lib.literalExpression ''
{
loadmodule = [ "/path/to/my_module.so" "/path/to/other_module.so" ];
}
'';
};
};
}));
default = null;
description = "IP and port to which this redis instance acts as a slave.";
example = { ip = "192.168.1.100"; port = 6379; };
};
masterAuth = lib.mkOption {
type = with types; nullOr str;
default = null;
description = ''
If the master is password protected (using the requirePass configuration)
it is possible to tell the slave to authenticate before starting the replication synchronization
process, otherwise the master will refuse the slave request.
(STORED PLAIN TEXT, WORLD-READABLE IN NIX STORE)'';
};
requirePass = lib.mkOption {
type = with types; nullOr str;
default = null;
description = ''
Password for database (STORED PLAIN TEXT, WORLD-READABLE IN NIX STORE).
Use requirePassFile to store it outside of the nix store in a dedicated file.
'';
example = "letmein!";
};
requirePassFile = lib.mkOption {
type = with types; nullOr path;
default = null;
description = "File with password for the database.";
example = "/run/keys/redis-password";
};
appendOnly = lib.mkOption {
type = types.bool;
default = false;
description = "By default data is only periodically persisted to disk, enable this option to use an append-only file for improved persistence.";
};
appendFsync = lib.mkOption {
type = types.str;
default = "everysec"; # no, always, everysec
description = "How often to fsync the append-only log, options: no, always, everysec.";
};
slowLogLogSlowerThan = lib.mkOption {
type = types.int;
default = 10000;
description = "Log queries whose execution take longer than X in milliseconds.";
example = 1000;
};
slowLogMaxLen = lib.mkOption {
type = types.int;
default = 128;
description = "Maximum number of items to keep in slow log.";
};
settings = lib.mkOption {
# TODO: this should be converted to freeformType
type = with types; attrsOf (oneOf [ bool int str (listOf str) ]);
default = {};
description = ''
Redis configuration. Refer to
<https://redis.io/topics/config>
for details on supported values.
'';
example = lib.literalExpression ''
{
loadmodule = [ "/path/to/my_module.so" "/path/to/other_module.so" ];
}
'';
};
};
config.settings = lib.mkMerge [
{
inherit (config) port logfile databases maxclients appendOnly;
daemonize = false;
supervised = "systemd";
loglevel = config.logLevel;
syslog-enabled = config.syslog;
save = if config.save == []
then ''""'' # Disable saving with `save = ""`
else map
(d: "${toString (builtins.elemAt d 0)} ${toString (builtins.elemAt d 1)}")
config.save;
dbfilename = "dump.rdb";
dir = "/var/lib/${redisName name}";
appendfsync = config.appendFsync;
slowlog-log-slower-than = config.slowLogLogSlowerThan;
slowlog-max-len = config.slowLogMaxLen;
}
(lib.mkIf (config.bind != null) { inherit (config) bind; })
(lib.mkIf (config.unixSocket != null) {
unixsocket = config.unixSocket;
unixsocketperm = toString config.unixSocketPerm;
})
(lib.mkIf (config.slaveOf != null) { slaveof = "${config.slaveOf.ip} ${toString config.slaveOf.port}"; })
(lib.mkIf (config.masterAuth != null) { masterauth = config.masterAuth; })
(lib.mkIf (config.requirePass != null) { requirepass = config.requirePass; })
];
}));
config.settings = lib.mkMerge [
{
inherit (config)
port
logfile
databases
maxclients
appendOnly
;
daemonize = false;
supervised = "systemd";
loglevel = config.logLevel;
syslog-enabled = config.syslog;
save =
if config.save == [ ] then
''""'' # Disable saving with `save = ""`
else
map (d: "${toString (builtins.elemAt d 0)} ${toString (builtins.elemAt d 1)}") config.save;
dbfilename = "dump.rdb";
dir = "/var/lib/${redisName name}";
appendfsync = config.appendFsync;
slowlog-log-slower-than = config.slowLogLogSlowerThan;
slowlog-max-len = config.slowLogMaxLen;
}
(lib.mkIf (config.bind != null) { inherit (config) bind; })
(lib.mkIf (config.unixSocket != null) {
unixsocket = config.unixSocket;
unixsocketperm = toString config.unixSocketPerm;
})
(lib.mkIf (config.slaveOf != null) {
slaveof = "${config.slaveOf.ip} ${toString config.slaveOf.port}";
})
(lib.mkIf (config.masterAuth != null) { masterauth = config.masterAuth; })
(lib.mkIf (config.requirePass != null) { requirepass = config.requirePass; })
];
}
)
);
description = "Configuration of multiple `redis-server` instances.";
default = {};
default = { };
};
};
};
###### implementation
config = lib.mkIf (enabledServers != {}) {
config = lib.mkIf (enabledServers != { }) {
assertions = lib.attrValues (lib.mapAttrs (name: conf: {
assertion = conf.requirePass != null -> conf.requirePassFile == null;
message = ''
You can only set one services.redis.servers.${name}.requirePass
or services.redis.servers.${name}.requirePassFile
'';
}) enabledServers);
assertions = lib.attrValues (
lib.mapAttrs (name: conf: {
assertion = conf.requirePass != null -> conf.requirePassFile == null;
message = ''
You can only set one services.redis.servers.${name}.requirePass
or services.redis.servers.${name}.requirePassFile
'';
}) enabledServers
);
boot.kernel.sysctl = lib.mkIf cfg.vmOverCommit {
"vm.overcommit_memory" = "1";
};
networking.firewall.allowedTCPPorts = lib.concatMap (conf:
lib.optional conf.openFirewall conf.port
networking.firewall.allowedTCPPorts = lib.concatMap (
conf: lib.optional conf.openFirewall conf.port
) (lib.attrValues enabledServers);
environment.systemPackages = [ cfg.package ];
users.users = lib.mapAttrs' (name: conf: lib.nameValuePair (redisName name) {
description = "System user for the redis-server instance ${name}";
isSystemUser = true;
group = redisName name;
}) enabledServers;
users.groups = lib.mapAttrs' (name: conf: lib.nameValuePair (redisName name) {
}) enabledServers;
users.users = lib.mapAttrs' (
name: conf:
lib.nameValuePair (redisName name) {
description = "System user for the redis-server instance ${name}";
isSystemUser = true;
group = redisName name;
}
) enabledServers;
users.groups = lib.mapAttrs' (
name: conf:
lib.nameValuePair (redisName name) {
}
) enabledServers;
systemd.services = lib.mapAttrs' (name: conf: lib.nameValuePair (redisName name) {
description = "Redis Server - ${redisName name}";
systemd.services = lib.mapAttrs' (
name: conf:
lib.nameValuePair (redisName name) {
description = "Redis Server - ${redisName name}";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
ExecStart = "${cfg.package}/bin/${cfg.package.serverBin or "redis-server"} /var/lib/${redisName name}/redis.conf ${lib.escapeShellArgs conf.extraParams}";
ExecStartPre = "+"+pkgs.writeShellScript "${redisName name}-prep-conf" (let
redisConfVar = "/var/lib/${redisName name}/redis.conf";
redisConfRun = "/run/${redisName name}/nixos.conf";
redisConfStore = redisConfig conf.settings;
in ''
touch "${redisConfVar}" "${redisConfRun}"
chown '${conf.user}':'${conf.group}' "${redisConfVar}" "${redisConfRun}"
chmod 0600 "${redisConfVar}" "${redisConfRun}"
if [ ! -s ${redisConfVar} ]; then
echo 'include "${redisConfRun}"' > "${redisConfVar}"
fi
echo 'include "${redisConfStore}"' > "${redisConfRun}"
${lib.optionalString (conf.requirePassFile != null) ''
{
echo -n "requirepass "
cat ${lib.escapeShellArg conf.requirePassFile}
} >> "${redisConfRun}"
''}
'');
Type = "notify";
# User and group
User = conf.user;
Group = conf.group;
# Runtime directory and mode
RuntimeDirectory = redisName name;
RuntimeDirectoryMode = "0750";
# State directory and mode
StateDirectory = redisName name;
StateDirectoryMode = "0700";
# Access write directories
UMask = "0077";
# Capabilities
CapabilityBoundingSet = "";
# Security
NoNewPrivileges = true;
# Process Properties
LimitNOFILE = lib.mkDefault "${toString (conf.maxclients + 32)}";
# Sandboxing
ProtectSystem = "strict";
ProtectHome = true;
PrivateTmp = true;
PrivateDevices = true;
PrivateUsers = true;
ProtectClock = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectControlGroups = true;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
RestrictNamespaces = true;
LockPersonality = true;
# we need to disable MemoryDenyWriteExecute for keydb
MemoryDenyWriteExecute = cfg.package.pname != "keydb";
RestrictRealtime = true;
RestrictSUIDSGID = true;
PrivateMounts = true;
# System Call Filtering
SystemCallArchitectures = "native";
SystemCallFilter = "~@cpu-emulation @debug @keyring @memlock @mount @obsolete @privileged @resources @setuid";
};
}) enabledServers;
serviceConfig = {
ExecStart = "${cfg.package}/bin/${
cfg.package.serverBin or "redis-server"
} /var/lib/${redisName name}/redis.conf ${lib.escapeShellArgs conf.extraParams}";
ExecStartPre =
"+"
+ pkgs.writeShellScript "${redisName name}-prep-conf" (
let
redisConfVar = "/var/lib/${redisName name}/redis.conf";
redisConfRun = "/run/${redisName name}/nixos.conf";
redisConfStore = redisConfig conf.settings;
in
''
touch "${redisConfVar}" "${redisConfRun}"
chown '${conf.user}':'${conf.group}' "${redisConfVar}" "${redisConfRun}"
chmod 0600 "${redisConfVar}" "${redisConfRun}"
if [ ! -s ${redisConfVar} ]; then
echo 'include "${redisConfRun}"' > "${redisConfVar}"
fi
echo 'include "${redisConfStore}"' > "${redisConfRun}"
${lib.optionalString (conf.requirePassFile != null) ''
{
echo -n "requirepass "
cat ${lib.escapeShellArg conf.requirePassFile}
} >> "${redisConfRun}"
''}
''
);
Type = "notify";
# User and group
User = conf.user;
Group = conf.group;
# Runtime directory and mode
RuntimeDirectory = redisName name;
RuntimeDirectoryMode = "0750";
# State directory and mode
StateDirectory = redisName name;
StateDirectoryMode = "0700";
# Access write directories
UMask = "0077";
# Capabilities
CapabilityBoundingSet = "";
# Security
NoNewPrivileges = true;
# Process Properties
LimitNOFILE = lib.mkDefault "${toString (conf.maxclients + 32)}";
# Sandboxing
ProtectSystem = "strict";
ProtectHome = true;
PrivateTmp = true;
PrivateDevices = true;
PrivateUsers = true;
ProtectClock = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectControlGroups = true;
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
"AF_UNIX"
];
RestrictNamespaces = true;
LockPersonality = true;
# we need to disable MemoryDenyWriteExecute for keydb
MemoryDenyWriteExecute = cfg.package.pname != "keydb";
RestrictRealtime = true;
RestrictSUIDSGID = true;
PrivateMounts = true;
# System Call Filtering
SystemCallArchitectures = "native";
SystemCallFilter = "~@cpu-emulation @debug @keyring @memlock @mount @obsolete @privileged @resources @setuid";
};
}
) enabledServers;
};
}

View file

@ -1,5 +1,10 @@
# Bamf
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
{
meta = with lib; {
maintainers = with lib.maintainers; [ ];

View file

@ -1,5 +1,10 @@
# rygel service.
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let

View file

@ -1,5 +1,10 @@
# Accounts-SSO gSignOn daemon
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
package = pkgs.gsignond.override { plugins = config.services.gsignond.plugins; };
in
@ -24,7 +29,7 @@ in
plugins = lib.mkOption {
type = lib.types.listOf lib.types.package;
default = [];
default = [ ];
description = ''
What plugins to use with the gSignOn daemon.
'';

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
inherit (builtins) concatMap;
@ -8,7 +13,13 @@ let
inherit (lib.modules) mkIf;
inherit (lib.options) literalExpression mkOption;
inherit (lib.strings) concatStringsSep makeSearchPath;
inherit (lib.types) bool listOf attrsOf package lines;
inherit (lib.types)
bool
listOf
attrsOf
package
lines
;
inherit (lib.path) subpath;
pwCfg = config.services.pipewire;
@ -17,24 +28,29 @@ let
json = pkgs.formats.json { };
configSectionsToConfFile = path: value:
pkgs.writeTextDir
path
(concatStringsSep "\n" (
mapAttrsToList
(section: content: "${section} = " + (builtins.toJSON content))
value
));
configSectionsToConfFile =
path: value:
pkgs.writeTextDir path (
concatStringsSep "\n" (
mapAttrsToList (section: content: "${section} = " + (builtins.toJSON content)) value
)
);
mapConfigToFiles = config:
mapAttrsToList
(name: value: configSectionsToConfFile "share/wireplumber/wireplumber.conf.d/${name}.conf" value)
config;
mapConfigToFiles =
config:
mapAttrsToList (
name: value: configSectionsToConfFile "share/wireplumber/wireplumber.conf.d/${name}.conf" value
) config;
mapScriptsToFiles = scripts:
mapAttrsToList
(relativePath: value: pkgs.writeTextDir (subpath.join ["share/wireplumber/scripts" relativePath]) value)
scripts;
mapScriptsToFiles =
scripts:
mapAttrsToList (
relativePath: value:
pkgs.writeTextDir (subpath.join [
"share/wireplumber/scripts"
relativePath
]) value
) scripts;
in
{
meta.maintainers = [ maintainers.k900 ];
@ -62,34 +78,34 @@ in
type = attrsOf (attrsOf json.type);
default = { };
example = literalExpression ''
{
"log-level-debug" = {
"context.properties" = {
# Output Debug log messages as opposed to only the default level (Notice)
"log.level" = "D";
{
"log-level-debug" = {
"context.properties" = {
# Output Debug log messages as opposed to only the default level (Notice)
"log.level" = "D";
};
};
};
"wh-1000xm3-ldac-hq" = {
"monitor.bluez.rules" = [
{
matches = [
{
# Match any bluetooth device with ids equal to that of a WH-1000XM3
"device.name" = "~bluez_card.*";
"device.product.id" = "0x0cd3";
"device.vendor.id" = "usb:054c";
}
];
actions = {
update-props = {
# Set quality to high quality instead of the default of auto
"bluez5.a2dp.ldac.quality" = "hq";
"wh-1000xm3-ldac-hq" = {
"monitor.bluez.rules" = [
{
matches = [
{
# Match any bluetooth device with ids equal to that of a WH-1000XM3
"device.name" = "~bluez_card.*";
"device.product.id" = "0x0cd3";
"device.vendor.id" = "usb:054c";
}
];
actions = {
update-props = {
# Set quality to high quality instead of the default of auto
"bluez5.a2dp.ldac.quality" = "hq";
};
};
};
}
];
};
}
}
];
};
}
'';
description = ''
Additional configuration for the WirePlumber daemon when run in
@ -169,16 +185,16 @@ in
type = listOf package;
default = [ ];
example = literalExpression ''
[
(pkgs.writeTextDir "share/wireplumber/wireplumber.conf.d/10-bluez.conf" '''
monitor.bluez.properties = {
bluez5.roles = [ a2dp_sink a2dp_source bap_sink bap_source hsp_hs hsp_ag hfp_hf hfp_ag ]
bluez5.codecs = [ sbc sbc_xq aac ]
bluez5.enable-sbc-xq = true
bluez5.hfphsp-backend = "native"
}
''')
]
[
(pkgs.writeTextDir "share/wireplumber/wireplumber.conf.d/10-bluez.conf" '''
monitor.bluez.properties = {
bluez5.roles = [ a2dp_sink a2dp_source bap_sink bap_source hsp_hs hsp_ag hfp_hf hfp_ag ]
bluez5.codecs = [ sbc sbc_xq aac ]
bluez5.enable-sbc-xq = true
bluez5.hfphsp-backend = "native"
}
''')
]
'';
description = ''
List of packages that provide WirePlumber configuration, in the form of
@ -231,8 +247,12 @@ in
pathsToLink = [ "/share/wireplumber/scripts" ];
};
configPackages = cfg.configPackages
++ [ extraConfigPkg extraScriptsPkg ]
configPackages =
cfg.configPackages
++ [
extraConfigPkg
extraScriptsPkg
]
++ optional (!pwUsedForAudio) pwNotForAudioConfigPkg;
configs = pkgs.buildEnv {
@ -241,14 +261,9 @@ in
pathsToLink = [ "/share/wireplumber" ];
};
requiredLv2Packages = flatten
(
concatMap
(p:
attrByPath [ "passthru" "requiredLv2Packages" ] [ ] p
)
configPackages
);
requiredLv2Packages = flatten (
concatMap (p: attrByPath [ "passthru" "requiredLv2Packages" ] [ ] p) configPackages
);
lv2Plugins = pkgs.buildEnv {
name = "wireplumber-lv2-plugins";
@ -280,12 +295,18 @@ in
# Make WirePlumber find our config/script files and lv2 plugins required by those
# (but also the configs/scripts shipped with WirePlumber)
XDG_DATA_DIRS = makeSearchPath "share" [ configs cfg.package ];
XDG_DATA_DIRS = makeSearchPath "share" [
configs
cfg.package
];
LV2_PATH = "${lv2Plugins}/lib/lv2";
};
systemd.user.services.wireplumber.environment = mkIf (!pwCfg.systemWide) {
XDG_DATA_DIRS = makeSearchPath "share" [ configs cfg.package ];
XDG_DATA_DIRS = makeSearchPath "share" [
configs
cfg.package
];
LV2_PATH = "${lv2Plugins}/lib/lv2";
};
};

View file

@ -1,148 +1,151 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.athens;
athensConfig = lib.flip lib.recursiveUpdate cfg.extraConfig (
{
GoBinary = "${cfg.goBinary}/bin/go";
GoEnv = cfg.goEnv;
GoBinaryEnvVars = lib.mapAttrsToList (k: v: "${k}=${v}") cfg.goBinaryEnvVars;
GoGetWorkers = cfg.goGetWorkers;
GoGetDir = cfg.goGetDir;
ProtocolWorkers = cfg.protocolWorkers;
LogLevel = cfg.logLevel;
CloudRuntime = cfg.cloudRuntime;
EnablePprof = cfg.enablePprof;
PprofPort = ":${toString cfg.pprofPort}";
FilterFile = cfg.filterFile;
RobotsFile = cfg.robotsFile;
Timeout = cfg.timeout;
StorageType = cfg.storageType;
TLSCertFile = cfg.tlsCertFile;
TLSKeyFile = cfg.tlsKeyFile;
Port = ":${toString cfg.port}";
UnixSocket = cfg.unixSocket;
GlobalEndpoint = cfg.globalEndpoint;
BasicAuthUser = cfg.basicAuthUser;
BasicAuthPass = cfg.basicAuthPass;
ForceSSL = cfg.forceSSL;
ValidatorHook = cfg.validatorHook;
PathPrefix = cfg.pathPrefix;
NETRCPath = cfg.netrcPath;
GithubToken = cfg.githubToken;
HGRCPath = cfg.hgrcPath;
TraceExporter = cfg.traceExporter;
StatsExporter = cfg.statsExporter;
SumDBs = cfg.sumDBs;
NoSumPatterns = cfg.noSumPatterns;
DownloadMode = cfg.downloadMode;
NetworkMode = cfg.networkMode;
DownloadURL = cfg.downloadURL;
SingleFlightType = cfg.singleFlightType;
IndexType = cfg.indexType;
ShutdownTimeout = cfg.shutdownTimeout;
SingleFlight = {
Etcd = {
Endpoints = builtins.concatStringsSep "," cfg.singleFlight.etcd.endpoints;
};
Redis = {
Endpoint = cfg.singleFlight.redis.endpoint;
Password = cfg.singleFlight.redis.password;
LockConfig = {
TTL = cfg.singleFlight.redis.lockConfig.ttl;
Timeout = cfg.singleFlight.redis.lockConfig.timeout;
MaxRetries = cfg.singleFlight.redis.lockConfig.maxRetries;
};
};
RedisSentinel = {
Endpoints = cfg.singleFlight.redisSentinel.endpoints;
MasterName = cfg.singleFlight.redisSentinel.masterName;
SentinelPassword = cfg.singleFlight.redisSentinel.sentinelPassword;
LockConfig = {
TTL = cfg.singleFlight.redisSentinel.lockConfig.ttl;
Timeout = cfg.singleFlight.redisSentinel.lockConfig.timeout;
MaxRetries = cfg.singleFlight.redisSentinel.lockConfig.maxRetries;
};
athensConfig = lib.flip lib.recursiveUpdate cfg.extraConfig ({
GoBinary = "${cfg.goBinary}/bin/go";
GoEnv = cfg.goEnv;
GoBinaryEnvVars = lib.mapAttrsToList (k: v: "${k}=${v}") cfg.goBinaryEnvVars;
GoGetWorkers = cfg.goGetWorkers;
GoGetDir = cfg.goGetDir;
ProtocolWorkers = cfg.protocolWorkers;
LogLevel = cfg.logLevel;
CloudRuntime = cfg.cloudRuntime;
EnablePprof = cfg.enablePprof;
PprofPort = ":${toString cfg.pprofPort}";
FilterFile = cfg.filterFile;
RobotsFile = cfg.robotsFile;
Timeout = cfg.timeout;
StorageType = cfg.storageType;
TLSCertFile = cfg.tlsCertFile;
TLSKeyFile = cfg.tlsKeyFile;
Port = ":${toString cfg.port}";
UnixSocket = cfg.unixSocket;
GlobalEndpoint = cfg.globalEndpoint;
BasicAuthUser = cfg.basicAuthUser;
BasicAuthPass = cfg.basicAuthPass;
ForceSSL = cfg.forceSSL;
ValidatorHook = cfg.validatorHook;
PathPrefix = cfg.pathPrefix;
NETRCPath = cfg.netrcPath;
GithubToken = cfg.githubToken;
HGRCPath = cfg.hgrcPath;
TraceExporter = cfg.traceExporter;
StatsExporter = cfg.statsExporter;
SumDBs = cfg.sumDBs;
NoSumPatterns = cfg.noSumPatterns;
DownloadMode = cfg.downloadMode;
NetworkMode = cfg.networkMode;
DownloadURL = cfg.downloadURL;
SingleFlightType = cfg.singleFlightType;
IndexType = cfg.indexType;
ShutdownTimeout = cfg.shutdownTimeout;
SingleFlight = {
Etcd = {
Endpoints = builtins.concatStringsSep "," cfg.singleFlight.etcd.endpoints;
};
Redis = {
Endpoint = cfg.singleFlight.redis.endpoint;
Password = cfg.singleFlight.redis.password;
LockConfig = {
TTL = cfg.singleFlight.redis.lockConfig.ttl;
Timeout = cfg.singleFlight.redis.lockConfig.timeout;
MaxRetries = cfg.singleFlight.redis.lockConfig.maxRetries;
};
};
Storage = {
CDN = {
Endpoint = cfg.storage.cdn.endpoint;
};
Disk = {
RootPath = cfg.storage.disk.rootPath;
};
GCP = {
ProjectID = cfg.storage.gcp.projectID;
Bucket = cfg.storage.gcp.bucket;
JSONKey = cfg.storage.gcp.jsonKey;
};
Minio = {
Endpoint = cfg.storage.minio.endpoint;
Key = cfg.storage.minio.key;
Secret = cfg.storage.minio.secret;
EnableSSL = cfg.storage.minio.enableSSL;
Bucket = cfg.storage.minio.bucket;
region = cfg.storage.minio.region;
};
Mongo = {
URL = cfg.storage.mongo.url;
DefaultDBName = cfg.storage.mongo.defaultDBName;
CertPath = cfg.storage.mongo.certPath;
Insecure = cfg.storage.mongo.insecure;
};
S3 = {
Region = cfg.storage.s3.region;
Key = cfg.storage.s3.key;
Secret = cfg.storage.s3.secret;
Token = cfg.storage.s3.token;
Bucket = cfg.storage.s3.bucket;
ForcePathStyle = cfg.storage.s3.forcePathStyle;
UseDefaultConfiguration = cfg.storage.s3.useDefaultConfiguration;
CredentialsEndpoint = cfg.storage.s3.credentialsEndpoint;
AwsContainerCredentialsRelativeURI = cfg.storage.s3.awsContainerCredentialsRelativeURI;
Endpoint = cfg.storage.s3.endpoint;
};
AzureBlob = {
AccountName = cfg.storage.azureblob.accountName;
AccountKey = cfg.storage.azureblob.accountKey;
ContainerName = cfg.storage.azureblob.containerName;
};
External = {
URL = cfg.storage.external.url;
RedisSentinel = {
Endpoints = cfg.singleFlight.redisSentinel.endpoints;
MasterName = cfg.singleFlight.redisSentinel.masterName;
SentinelPassword = cfg.singleFlight.redisSentinel.sentinelPassword;
LockConfig = {
TTL = cfg.singleFlight.redisSentinel.lockConfig.ttl;
Timeout = cfg.singleFlight.redisSentinel.lockConfig.timeout;
MaxRetries = cfg.singleFlight.redisSentinel.lockConfig.maxRetries;
};
};
Index = {
MySQL = {
Protocol = cfg.index.mysql.protocol;
Host = cfg.index.mysql.host;
Port = cfg.index.mysql.port;
User = cfg.index.mysql.user;
Password = cfg.index.mysql.password;
Database = cfg.index.mysql.database;
Params = {
parseTime = cfg.index.mysql.params.parseTime;
timeout = cfg.index.mysql.params.timeout;
};
};
Postgres = {
Host = cfg.index.postgres.host;
Port = cfg.index.postgres.port;
User = cfg.index.postgres.user;
Password = cfg.index.postgres.password;
Database = cfg.index.postgres.database;
Params = {
connect_timeout = cfg.index.postgres.params.connect_timeout;
sslmode = cfg.index.postgres.params.sslmode;
};
};
Storage = {
CDN = {
Endpoint = cfg.storage.cdn.endpoint;
};
Disk = {
RootPath = cfg.storage.disk.rootPath;
};
GCP = {
ProjectID = cfg.storage.gcp.projectID;
Bucket = cfg.storage.gcp.bucket;
JSONKey = cfg.storage.gcp.jsonKey;
};
Minio = {
Endpoint = cfg.storage.minio.endpoint;
Key = cfg.storage.minio.key;
Secret = cfg.storage.minio.secret;
EnableSSL = cfg.storage.minio.enableSSL;
Bucket = cfg.storage.minio.bucket;
region = cfg.storage.minio.region;
};
Mongo = {
URL = cfg.storage.mongo.url;
DefaultDBName = cfg.storage.mongo.defaultDBName;
CertPath = cfg.storage.mongo.certPath;
Insecure = cfg.storage.mongo.insecure;
};
S3 = {
Region = cfg.storage.s3.region;
Key = cfg.storage.s3.key;
Secret = cfg.storage.s3.secret;
Token = cfg.storage.s3.token;
Bucket = cfg.storage.s3.bucket;
ForcePathStyle = cfg.storage.s3.forcePathStyle;
UseDefaultConfiguration = cfg.storage.s3.useDefaultConfiguration;
CredentialsEndpoint = cfg.storage.s3.credentialsEndpoint;
AwsContainerCredentialsRelativeURI = cfg.storage.s3.awsContainerCredentialsRelativeURI;
Endpoint = cfg.storage.s3.endpoint;
};
AzureBlob = {
AccountName = cfg.storage.azureblob.accountName;
AccountKey = cfg.storage.azureblob.accountKey;
ContainerName = cfg.storage.azureblob.containerName;
};
External = {
URL = cfg.storage.external.url;
};
};
Index = {
MySQL = {
Protocol = cfg.index.mysql.protocol;
Host = cfg.index.mysql.host;
Port = cfg.index.mysql.port;
User = cfg.index.mysql.user;
Password = cfg.index.mysql.password;
Database = cfg.index.mysql.database;
Params = {
parseTime = cfg.index.mysql.params.parseTime;
timeout = cfg.index.mysql.params.timeout;
};
};
}
);
Postgres = {
Host = cfg.index.postgres.host;
Port = cfg.index.postgres.port;
User = cfg.index.postgres.user;
Password = cfg.index.postgres.password;
Database = cfg.index.postgres.database;
Params = {
connect_timeout = cfg.index.postgres.params.connect_timeout;
sslmode = cfg.index.postgres.params.sslmode;
};
};
};
});
configFile = lib.pipe athensConfig [
(lib.filterAttrsRecursive (_k: v: v != null))
((pkgs.formats.toml {}).generate "config.toml")
((pkgs.formats.toml { }).generate "config.toml")
];
in
{
@ -177,7 +180,10 @@ in
};
goEnv = lib.mkOption {
type = lib.types.enum [ "development" "production" ];
type = lib.types.enum [
"development"
"production"
];
description = "Specifies the type of environment to run. One of 'development' or 'production'.";
default = "development";
example = "production";
@ -220,7 +226,17 @@ in
};
logLevel = lib.mkOption {
type = lib.types.nullOr (lib.types.enum [ "panic" "fatal" "error" "warning" "info" "debug" "trace" ]);
type = lib.types.nullOr (
lib.types.enum [
"panic"
"fatal"
"error"
"warning"
"info"
"debug"
"trace"
]
);
description = ''
Log level for Athens.
Supports all logrus log levels (https://github.com/Sirupsen/logrus#level-logging)".
@ -230,7 +246,10 @@ in
};
cloudRuntime = lib.mkOption {
type = lib.types.enum [ "GCP" "none" ];
type = lib.types.enum [
"GCP"
"none"
];
description = ''
Specifies the Cloud Provider on which the Proxy/registry is running.
'';
@ -279,7 +298,16 @@ in
};
storageType = lib.mkOption {
type = lib.types.enum [ "memory" "disk" "mongo" "gcp" "minio" "s3" "azureblob" "external" ];
type = lib.types.enum [
"memory"
"disk"
"mongo"
"gcp"
"minio"
"s3"
"azureblob"
"external"
];
description = "Specifies the type of storage backend to use.";
default = "disk";
};
@ -401,7 +429,12 @@ in
};
traceExporter = lib.mkOption {
type = lib.types.nullOr (lib.types.enum [ "jaeger" "datadog" ]);
type = lib.types.nullOr (
lib.types.enum [
"jaeger"
"datadog"
]
);
description = ''
Trace exporter to use.
'';
@ -442,7 +475,16 @@ in
};
downloadMode = lib.mkOption {
type = lib.types.oneOf [ (lib.types.enum [ "sync" "async" "redirect" "async_redirect" "none" ]) (lib.types.strMatching "^file:.*$|^custom:.*$") ];
type = lib.types.oneOf [
(lib.types.enum [
"sync"
"async"
"redirect"
"async_redirect"
"none"
])
(lib.types.strMatching "^file:.*$|^custom:.*$")
];
description = ''
Defines how Athens behaves when a module@version
is not found in storage. There are 7 options:
@ -466,7 +508,11 @@ in
};
networkMode = lib.mkOption {
type = lib.types.enum [ "strict" "offline" "fallback" ];
type = lib.types.enum [
"strict"
"offline"
"fallback"
];
description = ''
Configures how Athens will return the results
of the /list endpoint as it can be assembled from both its own
@ -492,7 +538,14 @@ in
};
singleFlightType = lib.mkOption {
type = lib.types.enum [ "memory" "etcd" "redis" "redis-sentinel" "gcp" "azureblob" ];
type = lib.types.enum [
"memory"
"etcd"
"redis"
"redis-sentinel"
"gcp"
"azureblob"
];
description = ''
Determines what mechanism Athens uses to manage concurrency flowing into the Athens backend.
'';
@ -500,7 +553,12 @@ in
};
indexType = lib.mkOption {
type = lib.types.enum [ "none" "memory" "mysql" "postgres" ];
type = lib.types.enum [
"none"
"memory"
"mysql"
"postgres"
];
description = ''
Type of index backend Athens will use.
'';
@ -913,8 +971,12 @@ in
ProtectHome = "read-only";
ProtectSystem = "full";
ReadWritePaths = lib.mkIf (cfg.storage.disk.rootPath != null && (! lib.hasPrefix "/var/lib/" cfg.storage.disk.rootPath)) [ cfg.storage.disk.rootPath ];
StateDirectory = lib.mkIf (lib.hasPrefix "/var/lib/" cfg.storage.disk.rootPath) [ (lib.removePrefix "/var/lib/" cfg.storage.disk.rootPath) ];
ReadWritePaths = lib.mkIf (
cfg.storage.disk.rootPath != null && (!lib.hasPrefix "/var/lib/" cfg.storage.disk.rootPath)
) [ cfg.storage.disk.rootPath ];
StateDirectory = lib.mkIf (lib.hasPrefix "/var/lib/" cfg.storage.disk.rootPath) [
(lib.removePrefix "/var/lib/" cfg.storage.disk.rootPath)
];
CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
@ -923,7 +985,8 @@ in
};
networking.firewall = {
allowedTCPPorts = lib.optionals (cfg.unixSocket == null) [ cfg.port ]
allowedTCPPorts =
lib.optionals (cfg.unixSocket == null) [ cfg.port ]
++ lib.optionals cfg.enablePprof [ cfg.pprofPort ];
};
};

View file

@ -1,13 +1,18 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.jupyterhub;
kernels = (pkgs.jupyter-kernel.create {
definitions = if cfg.kernels != null
then cfg.kernels
else pkgs.jupyter-kernel.default;
});
kernels = (
pkgs.jupyter-kernel.create {
definitions = if cfg.kernels != null then cfg.kernels else pkgs.jupyter-kernel.default;
}
);
jupyterhubConfig = pkgs.writeText "jupyterhub_config.py" ''
c.JupyterHub.bind_url = "http://${cfg.host}:${toString cfg.port}"
@ -23,7 +28,8 @@ let
${cfg.extraConfig}
'';
in {
in
{
meta.maintainers = with lib.maintainers; [ costrouc ];
options.services.jupyterhub = {
@ -71,10 +77,12 @@ in {
jupyterhubEnv = lib.mkOption {
type = lib.types.package;
default = pkgs.python3.withPackages (p: with p; [
jupyterhub
jupyterhub-systemdspawner
]);
default = pkgs.python3.withPackages (
p: with p; [
jupyterhub
jupyterhub-systemdspawner
]
);
defaultText = lib.literalExpression ''
pkgs.python3.withPackages (p: with p; [
jupyterhub
@ -93,10 +101,12 @@ in {
jupyterlabEnv = lib.mkOption {
type = lib.types.package;
default = pkgs.python3.withPackages (p: with p; [
jupyterhub
jupyterlab
]);
default = pkgs.python3.withPackages (
p: with p; [
jupyterhub
jupyterlab
]
);
defaultText = lib.literalExpression ''
pkgs.python3.withPackages (p: with p; [
jupyterhub
@ -115,9 +125,15 @@ in {
};
kernels = lib.mkOption {
type = lib.types.nullOr (lib.types.attrsOf(lib.types.submodule (import ../jupyter/kernel-options.nix {
inherit lib pkgs;
})));
type = lib.types.nullOr (
lib.types.attrsOf (
lib.types.submodule (
import ../jupyter/kernel-options.nix {
inherit lib pkgs;
}
)
)
);
default = null;
example = lib.literalExpression ''
@ -179,7 +195,7 @@ in {
};
config = lib.mkMerge [
(lib.mkIf cfg.enable {
(lib.mkIf cfg.enable {
systemd.services.jupyterhub = {
description = "Jupyterhub development server";

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.greetd;
tty = "tty${toString cfg.vt}";
@ -79,12 +84,14 @@ in
Wants = [
"systemd-user-sessions.service"
];
After = [
"systemd-user-sessions.service"
"getty@${tty}.service"
] ++ lib.optionals (!cfg.greeterManagesPlymouth) [
"plymouth-quit-wait.service"
];
After =
[
"systemd-user-sessions.service"
"getty@${tty}.service"
]
++ lib.optionals (!cfg.greeterManagesPlymouth) [
"plymouth-quit-wait.service"
];
Conflicts = [
"getty@${tty}.service"
];

View file

@ -87,6 +87,7 @@
config.services.taler.enable && (config.services.taler.settings.taler ? CURRENCY)
) config.services.taler.settings.taler.CURRENCY;
services.libeufin.bank.settings.libeufin-bankdb-postgres.CONFIG = lib.mkIf config.services.libeufin.bank.createLocalDatabase "postgresql:///libeufin-bank";
services.libeufin.bank.settings.libeufin-bankdb-postgres.CONFIG =
lib.mkIf config.services.libeufin.bank.createLocalDatabase "postgresql:///libeufin-bank";
};
}

View file

@ -1,31 +1,45 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.archisteamfarm;
format = pkgs.formats.json { };
configFile = format.generate "ASF.json" (cfg.settings // {
# we disable it because ASF cannot update itself anyways
# and nixos takes care of restarting the service
# is in theory not needed as this is already the default for default builds
UpdateChannel = 0;
Headless = true;
} // lib.optionalAttrs (cfg.ipcPasswordFile != null) {
IPCPassword = "#ipcPassword#";
});
configFile = format.generate "ASF.json" (
cfg.settings
// {
# we disable it because ASF cannot update itself anyways
# and nixos takes care of restarting the service
# is in theory not needed as this is already the default for default builds
UpdateChannel = 0;
Headless = true;
}
// lib.optionalAttrs (cfg.ipcPasswordFile != null) {
IPCPassword = "#ipcPassword#";
}
);
ipc-config = format.generate "IPC.config" cfg.ipcSettings;
mkBot = n: c:
format.generate "${n}.json" (c.settings // {
SteamLogin = if c.username == "" then n else c.username;
Enabled = c.enabled;
} // lib.optionalAttrs (c.passwordFile != null) {
SteamPassword = c.passwordFile;
# sets the password format to file (https://github.com/JustArchiNET/ArchiSteamFarm/wiki/Security#file)
PasswordFormat = 4;
});
mkBot =
n: c:
format.generate "${n}.json" (
c.settings
// {
SteamLogin = if c.username == "" then n else c.username;
Enabled = c.enabled;
}
// lib.optionalAttrs (c.passwordFile != null) {
SteamPassword = c.passwordFile;
# sets the password format to file (https://github.com/JustArchiNET/ArchiSteamFarm/wiki/Security#file)
PasswordFormat = 4;
}
);
in
{
options.services.archisteamfarm = {
@ -120,35 +134,37 @@ in
};
bots = lib.mkOption {
type = lib.types.attrsOf (lib.types.submodule {
options = {
username = lib.mkOption {
type = lib.types.str;
description = "Name of the user to log in. Default is attribute name.";
default = "";
type = lib.types.attrsOf (
lib.types.submodule {
options = {
username = lib.mkOption {
type = lib.types.str;
description = "Name of the user to log in. Default is attribute name.";
default = "";
};
passwordFile = lib.mkOption {
type = with lib.types; nullOr path;
default = null;
description = ''
Path to a file containing the password. The file must be readable by the `archisteamfarm` user/group.
Omit or set to null to provide the password a different way, such as through the web-ui.
'';
};
enabled = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Whether to enable the bot on startup.";
};
settings = lib.mkOption {
type = lib.types.attrs;
description = ''
Additional settings that are documented [here](https://github.com/JustArchiNET/ArchiSteamFarm/wiki/Configuration#bot-config).
'';
default = { };
};
};
passwordFile = lib.mkOption {
type = with lib.types; nullOr path;
default = null;
description = ''
Path to a file containing the password. The file must be readable by the `archisteamfarm` user/group.
Omit or set to null to provide the password a different way, such as through the web-ui.
'';
};
enabled = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Whether to enable the bot on startup.";
};
settings = lib.mkOption {
type = lib.types.attrs;
description = ''
Additional settings that are documented [here](https://github.com/JustArchiNET/ArchiSteamFarm/wiki/Configuration#bot-config).
'';
default = { };
};
};
});
}
);
description = ''
Bots name and configuration.
'';
@ -156,7 +172,9 @@ in
exampleBot = {
username = "alice";
passwordFile = "/var/lib/archisteamfarm/secrets/password";
settings = { SteamParentalCode = "1234"; };
settings = {
SteamParentalCode = "1234";
};
};
};
default = { };
@ -221,24 +239,33 @@ in
RestrictSUIDSGID = true;
SecureBits = "noroot-locked";
SystemCallArchitectures = "native";
SystemCallFilter = [ "@system-service" "~@privileged" "mincore" ];
SystemCallFilter = [
"@system-service"
"~@privileged"
"mincore"
];
UMask = "0077";
}
];
preStart =
let
createBotsScript = pkgs.runCommand "ASF-bots" {
preferLocalBuild = true;
} ''
mkdir -p $out
# clean potential removed bots
rm -rf $out/*.json
for i in ${lib.concatStringsSep " " (map (x: "${lib.getName x},${x}") (lib.mapAttrsToList mkBot cfg.bots))}; do IFS=",";
set -- $i
ln -fs $2 $out/$1
done
'';
createBotsScript =
pkgs.runCommand "ASF-bots"
{
preferLocalBuild = true;
}
''
mkdir -p $out
# clean potential removed bots
rm -rf $out/*.json
for i in ${
lib.concatStringsSep " " (map (x: "${lib.getName x},${x}") (lib.mapAttrsToList mkBot cfg.bots))
}; do IFS=",";
set -- $i
ln -fs $2 $out/$1
done
'';
replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
in
''
@ -250,11 +277,11 @@ in
${replaceSecretBin} '#ipcPassword#' '${cfg.ipcPasswordFile}' config/ASF.json
''}
${lib.optionalString (cfg.ipcSettings != {}) ''
${lib.optionalString (cfg.ipcSettings != { }) ''
ln -fs ${ipc-config} config/IPC.config
''}
${lib.optionalString (cfg.bots != {}) ''
${lib.optionalString (cfg.bots != { }) ''
ln -fs ${createBotsScript}/* config/
''}

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.factorio;
name = "Factorio";
@ -36,9 +41,11 @@ let
} // cfg.extraSettings;
serverSettingsString = builtins.toJSON (lib.filterAttrsRecursive (n: v: v != null) serverSettings);
serverSettingsFile = pkgs.writeText "server-settings.json" serverSettingsString;
playerListOption = name: list:
lib.optionalString (list != [])
"--${name}=${pkgs.writeText "${name}.json" (builtins.toJSON list)}";
playerListOption =
name: list:
lib.optionalString (
list != [ ]
) "--${name}=${pkgs.writeText "${name}.json" (builtins.toJSON list)}";
modDir = pkgs.factorio-utils.mkModDirDrv cfg.mods cfg.mods-dat;
in
{
@ -67,8 +74,11 @@ in
# --use-server-whitelist) so we can't implement that behaviour, so we
# might as well match theirs.
type = lib.types.listOf lib.types.str;
default = [];
example = [ "Rseding91" "Oxyd" ];
default = [ ];
example = [
"Rseding91"
"Oxyd"
];
description = ''
If non-empty, only these player names are allowed to connect. The game
will not be able to save any changes made in-game with the /whitelist
@ -87,7 +97,7 @@ in
admins = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [];
default = [ ];
example = [ "username" ];
description = ''
List of player names which will be admin.
@ -167,7 +177,7 @@ in
};
mods = lib.mkOption {
type = lib.types.listOf lib.types.package;
default = [];
default = [ ];
description = ''
Mods the server should install and activate.
@ -202,8 +212,10 @@ in
};
extraSettings = lib.mkOption {
type = lib.types.attrs;
default = {};
example = { max_players = 64; };
default = { };
example = {
max_players = 64;
};
description = ''
Extra game configuration that will go into server-settings.json
'';
@ -288,9 +300,9 @@ in
config = lib.mkIf cfg.enable {
systemd.services.factorio = {
description = "Factorio headless server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
description = "Factorio headless server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
preStart =
(toString [
@ -299,11 +311,13 @@ in
"${cfg.package}/bin/factorio"
"--config=${cfg.configFile}"
"--create=${mkSavePath cfg.saveName}"
(lib.optionalString (cfg.mods != []) "--mod-directory=${modDir}")
(lib.optionalString (cfg.mods != [ ]) "--mod-directory=${modDir}")
])
+ (lib.optionalString (cfg.extraSettingsFile != null) ("\necho ${lib.strings.escapeShellArg serverSettingsString}"
+ (lib.optionalString (cfg.extraSettingsFile != null) (
"\necho ${lib.strings.escapeShellArg serverSettingsString}"
+ " \"$(cat ${cfg.extraSettingsFile})\" | ${lib.getExe pkgs.jq} -s add"
+ " > ${stateDir}/server-settings.json"));
+ " > ${stateDir}/server-settings.json"
));
serviceConfig = {
Restart = "always";
@ -318,15 +332,13 @@ in
"--bind=${cfg.bind}"
(lib.optionalString (!cfg.loadLatestSave) "--start-server=${mkSavePath cfg.saveName}")
"--server-settings=${
if (cfg.extraSettingsFile != null)
then "${stateDir}/server-settings.json"
else serverSettingsFile
if (cfg.extraSettingsFile != null) then "${stateDir}/server-settings.json" else serverSettingsFile
}"
(lib.optionalString cfg.loadLatestSave "--start-server-load-latest")
(lib.optionalString (cfg.mods != []) "--mod-directory=${modDir}")
(lib.optionalString (cfg.mods != [ ]) "--mod-directory=${modDir}")
(playerListOption "server-adminlist" cfg.admins)
(playerListOption "server-whitelist" cfg.allowedPlayers)
(lib.optionalString (cfg.allowedPlayers != []) "--use-server-whitelist")
(lib.optionalString (cfg.allowedPlayers != [ ]) "--use-server-whitelist")
];
# Sandboxing
@ -338,7 +350,12 @@ in
ProtectControlGroups = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" ];
RestrictAddressFamilies = [
"AF_UNIX"
"AF_INET"
"AF_INET6"
"AF_NETLINK"
];
RestrictRealtime = true;
RestrictNamespaces = true;
MemoryDenyWriteExecute = true;

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.minecraft-server;
@ -8,16 +13,25 @@ let
eula=true
'';
whitelistFile = pkgs.writeText "whitelist.json"
(builtins.toJSON
(lib.mapAttrsToList (n: v: { name = n; uuid = v; }) cfg.whitelist));
whitelistFile = pkgs.writeText "whitelist.json" (
builtins.toJSON (
lib.mapAttrsToList (n: v: {
name = n;
uuid = v;
}) cfg.whitelist
)
);
cfgToString = v: if builtins.isBool v then lib.boolToString v else toString v;
serverPropertiesFile = pkgs.writeText "server.properties" (''
# server.properties managed by NixOS configuration
'' + lib.concatStringsSep "\n" (lib.mapAttrsToList
(n: v: "${n}=${cfgToString v}") cfg.serverProperties));
serverPropertiesFile = pkgs.writeText "server.properties" (
''
# server.properties managed by NixOS configuration
''
+ lib.concatStringsSep "\n" (
lib.mapAttrsToList (n: v: "${n}=${cfgToString v}") cfg.serverProperties
)
);
stopScript = pkgs.writeShellScript "minecraft-server-stop" ''
echo stop > ${config.systemd.sockets.minecraft-server.socketConfig.ListenFIFO}
@ -36,15 +50,20 @@ let
serverPort = cfg.serverProperties.server-port or defaultServerPort;
rconPort = if cfg.serverProperties.enable-rcon or false
then cfg.serverProperties."rcon.port" or 25575
else null;
rconPort =
if cfg.serverProperties.enable-rcon or false then
cfg.serverProperties."rcon.port" or 25575
else
null;
queryPort = if cfg.serverProperties.enable-query or false
then cfg.serverProperties."query.port" or 25565
else null;
queryPort =
if cfg.serverProperties.enable-query or false then
cfg.serverProperties."query.port" or 25565
else
null;
in {
in
{
options = {
services.minecraft-server = {
@ -98,13 +117,16 @@ in {
};
whitelist = lib.mkOption {
type = let
minecraftUUID = lib.types.strMatching
"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" // {
description = "Minecraft UUID";
};
in lib.types.attrsOf minecraftUUID;
default = {};
type =
let
minecraftUUID =
lib.types.strMatching "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
// {
description = "Minecraft UUID";
};
in
lib.types.attrsOf minecraftUUID;
default = { };
description = ''
Whitelisted players, only has an effect when
{option}`services.minecraft-server.declarative` is
@ -124,8 +146,14 @@ in {
};
serverProperties = lib.mkOption {
type = with lib.types; attrsOf (oneOf [ bool int str ]);
default = {};
type =
with lib.types;
attrsOf (oneOf [
bool
int
str
]);
default = { };
example = lib.literalExpression ''
{
server-port = 43000;
@ -155,7 +183,8 @@ in {
type = lib.types.separatedString " ";
default = "-Xmx2048M -Xms2048M";
# Example options from https://minecraft.gamepedia.com/Tutorials/Server_startup_script
example = "-Xms4092M -Xmx4092M -XX:+UseG1GC -XX:+CMSIncrementalPacing "
example =
"-Xms4092M -Xmx4092M -XX:+UseG1GC -XX:+CMSIncrementalPacing "
+ "-XX:+CMSClassUnloadingEnabled -XX:ParallelGCThreads=2 "
+ "-XX:MinHeapFreeRatio=5 -XX:MaxHeapFreeRatio=10";
description = "JVM options for the Minecraft server.";
@ -166,13 +195,13 @@ in {
config = lib.mkIf cfg.enable {
users.users.minecraft = {
description = "Minecraft server service user";
home = cfg.dataDir;
createHome = true;
isSystemUser = true;
group = "minecraft";
description = "Minecraft server service user";
home = cfg.dataDir;
createHome = true;
isSystemUser = true;
group = "minecraft";
};
users.groups.minecraft = {};
users.groups.minecraft = { };
systemd.sockets.minecraft-server = {
bindsTo = [ "minecraft-server.service" ];
@ -187,10 +216,13 @@ in {
};
systemd.services.minecraft-server = {
description = "Minecraft Server Service";
wantedBy = [ "multi-user.target" ];
requires = [ "minecraft-server.socket" ];
after = [ "network.target" "minecraft-server.socket" ];
description = "Minecraft Server Service";
wantedBy = [ "multi-user.target" ];
requires = [ "minecraft-server.socket" ];
after = [
"network.target"
"minecraft-server.socket"
];
serviceConfig = {
ExecStart = "${cfg.package}/bin/minecraft-server ${cfg.jvmOpts}";
@ -218,7 +250,10 @@ in {
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
@ -226,49 +261,64 @@ in {
UMask = "0077";
};
preStart = ''
ln -sf ${eulaFile} eula.txt
'' + (if cfg.declarative then ''
preStart =
''
ln -sf ${eulaFile} eula.txt
''
+ (
if cfg.declarative then
''
if [ -e .declarative ]; then
if [ -e .declarative ]; then
# Was declarative before, no need to back up anything
ln -sf ${whitelistFile} whitelist.json
cp -f ${serverPropertiesFile} server.properties
# Was declarative before, no need to back up anything
ln -sf ${whitelistFile} whitelist.json
cp -f ${serverPropertiesFile} server.properties
else
else
# Declarative for the first time, backup stateful files
ln -sb --suffix=.stateful ${whitelistFile} whitelist.json
cp -b --suffix=.stateful ${serverPropertiesFile} server.properties
# Declarative for the first time, backup stateful files
ln -sb --suffix=.stateful ${whitelistFile} whitelist.json
cp -b --suffix=.stateful ${serverPropertiesFile} server.properties
# server.properties must have write permissions, because every time
# the server starts it first parses the file and then regenerates it..
chmod +w server.properties
echo "Autogenerated file that signifies that this server configuration is managed declaratively by NixOS" \
> .declarative
# server.properties must have write permissions, because every time
# the server starts it first parses the file and then regenerates it..
chmod +w server.properties
echo "Autogenerated file that signifies that this server configuration is managed declaratively by NixOS" \
> .declarative
fi
'' else ''
if [ -e .declarative ]; then
rm .declarative
fi
'');
fi
''
else
''
if [ -e .declarative ]; then
rm .declarative
fi
''
);
};
networking.firewall = lib.mkIf cfg.openFirewall (if cfg.declarative then {
allowedUDPPorts = [ serverPort ];
allowedTCPPorts = [ serverPort ]
++ lib.optional (queryPort != null) queryPort
++ lib.optional (rconPort != null) rconPort;
} else {
allowedUDPPorts = [ defaultServerPort ];
allowedTCPPorts = [ defaultServerPort ];
});
networking.firewall = lib.mkIf cfg.openFirewall (
if cfg.declarative then
{
allowedUDPPorts = [ serverPort ];
allowedTCPPorts =
[ serverPort ]
++ lib.optional (queryPort != null) queryPort
++ lib.optional (rconPort != null) rconPort;
}
else
{
allowedUDPPorts = [ defaultServerPort ];
allowedTCPPorts = [ defaultServerPort ];
}
);
assertions = [
{ assertion = cfg.eula;
message = "You must agree to Mojangs EULA to run minecraft-server."
{
assertion = cfg.eula;
message =
"You must agree to Mojangs EULA to run minecraft-server."
+ " Read https://account.mojang.com/documents/minecraft_eula and"
+ " set `services.minecraft-server.eula` to `true` if you agree.";
}

View file

@ -1,8 +1,14 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.hardware.amdgpu;
in {
in
{
options.hardware.amdgpu = {
legacySupport.enable = lib.mkEnableOption ''
using `amdgpu` kernel driver instead of `radeon` for Southern Islands

View file

@ -1,138 +1,191 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
{
imports = [
(lib.mkRenamedOptionModule
[ "virtualisation" "containers" "cdi" "dynamic" "nvidia" "enable" ]
[ "hardware" "nvidia-container-toolkit" "enable" ])
[ "hardware" "nvidia-container-toolkit" "enable" ]
)
];
options = let
mountType = {
options = {
hostPath = lib.mkOption {
type = lib.types.str;
description = "Host path.";
};
containerPath = lib.mkOption {
type = lib.types.str;
description = "Container path.";
};
mountOptions = lib.mkOption {
default = [ "ro" "nosuid" "nodev" "bind" ];
type = lib.types.listOf lib.types.str;
description = "Mount options.";
options =
let
mountType = {
options = {
hostPath = lib.mkOption {
type = lib.types.str;
description = "Host path.";
};
containerPath = lib.mkOption {
type = lib.types.str;
description = "Container path.";
};
mountOptions = lib.mkOption {
default = [
"ro"
"nosuid"
"nodev"
"bind"
];
type = lib.types.listOf lib.types.str;
description = "Mount options.";
};
};
};
in
{
hardware.nvidia-container-toolkit = {
enable = lib.mkOption {
default = false;
type = lib.types.bool;
description = ''
Enable dynamic CDI configuration for Nvidia devices by running
nvidia-container-toolkit on boot.
'';
};
mounts = lib.mkOption {
type = lib.types.listOf (lib.types.submodule mountType);
default = [ ];
description = "Mounts to be added to every container under the Nvidia CDI profile.";
};
mount-nvidia-executables = lib.mkOption {
default = true;
type = lib.types.bool;
description = ''
Mount executables nvidia-smi, nvidia-cuda-mps-control, nvidia-cuda-mps-server,
nvidia-debugdump, nvidia-powerd and nvidia-ctk on containers.
'';
};
device-name-strategy = lib.mkOption {
default = "index";
type = lib.types.enum [
"index"
"uuid"
"type-index"
];
description = ''
Specify the strategy for generating device names,
passed to `nvidia-ctk cdi generate`. This will affect how
you reference the device using `nvidia.com/gpu=` in
the container runtime.
'';
};
mount-nvidia-docker-1-directories = lib.mkOption {
default = true;
type = lib.types.bool;
description = ''
Mount nvidia-docker-1 directories on containers: /usr/local/nvidia/lib and
/usr/local/nvidia/lib64.
'';
};
package = lib.mkPackageOption pkgs "nvidia-container-toolkit" { };
};
};
in {
hardware.nvidia-container-toolkit = {
enable = lib.mkOption {
default = false;
type = lib.types.bool;
description = ''
Enable dynamic CDI configuration for Nvidia devices by running
nvidia-container-toolkit on boot.
'';
};
mounts = lib.mkOption {
type = lib.types.listOf (lib.types.submodule mountType);
default = [];
description = "Mounts to be added to every container under the Nvidia CDI profile.";
};
mount-nvidia-executables = lib.mkOption {
default = true;
type = lib.types.bool;
description = ''
Mount executables nvidia-smi, nvidia-cuda-mps-control, nvidia-cuda-mps-server,
nvidia-debugdump, nvidia-powerd and nvidia-ctk on containers.
'';
};
device-name-strategy = lib.mkOption {
default = "index";
type = lib.types.enum [ "index" "uuid" "type-index" ];
description = ''
Specify the strategy for generating device names,
passed to `nvidia-ctk cdi generate`. This will affect how
you reference the device using `nvidia.com/gpu=` in
the container runtime.
'';
};
mount-nvidia-docker-1-directories = lib.mkOption {
default = true;
type = lib.types.bool;
description = ''
Mount nvidia-docker-1 directories on containers: /usr/local/nvidia/lib and
/usr/local/nvidia/lib64.
'';
};
package = lib.mkPackageOption pkgs "nvidia-container-toolkit" { };
};
};
config = lib.mkIf config.hardware.nvidia-container-toolkit.enable {
assertions = [
{ assertion = config.hardware.nvidia.datacenter.enable || lib.elem "nvidia" config.services.xserver.videoDrivers;
{
assertion =
config.hardware.nvidia.datacenter.enable || lib.elem "nvidia" config.services.xserver.videoDrivers;
message = ''`nvidia-container-toolkit` requires nvidia datacenter or desktop drivers: set `hardware.nvidia.datacenter.enable` or add "nvidia" to `services.xserver.videoDrivers`'';
}];
}
];
virtualisation.docker = {
daemon.settings = lib.mkIf
(lib.versionAtLeast config.virtualisation.docker.package.version "25") {
features.cdi = true;
};
daemon.settings = lib.mkIf (lib.versionAtLeast config.virtualisation.docker.package.version "25") {
features.cdi = true;
};
rootless.daemon.settings = lib.mkIf
(config.virtualisation.docker.rootless.enable &&
(lib.versionAtLeast config.virtualisation.docker.package.version "25")) {
features.cdi = true;
};
rootless.daemon.settings =
lib.mkIf
(
config.virtualisation.docker.rootless.enable
&& (lib.versionAtLeast config.virtualisation.docker.package.version "25")
)
{
features.cdi = true;
};
};
hardware = {
graphics.enable = lib.mkIf (!config.hardware.nvidia.datacenter.enable) true;
nvidia-container-toolkit.mounts = let
nvidia-driver = config.hardware.nvidia.package;
in (lib.mkMerge [
[{ hostPath = pkgs.addDriverRunpath.driverLink;
containerPath = pkgs.addDriverRunpath.driverLink; }
{ hostPath = "${lib.getLib nvidia-driver}/etc";
containerPath = "${lib.getLib nvidia-driver}/etc"; }
{ hostPath = "${lib.getLib nvidia-driver}/share";
containerPath = "${lib.getLib nvidia-driver}/share"; }
{ hostPath = "${lib.getLib pkgs.glibc}/lib";
containerPath = "${lib.getLib pkgs.glibc}/lib"; }
{ hostPath = "${lib.getLib pkgs.glibc}/lib64";
containerPath = "${lib.getLib pkgs.glibc}/lib64"; }]
(lib.mkIf config.hardware.nvidia-container-toolkit.mount-nvidia-executables
[{ hostPath = lib.getExe' nvidia-driver "nvidia-cuda-mps-control";
containerPath = "/usr/bin/nvidia-cuda-mps-control"; }
{ hostPath = lib.getExe' nvidia-driver "nvidia-cuda-mps-server";
containerPath = "/usr/bin/nvidia-cuda-mps-server"; }
{ hostPath = lib.getExe' nvidia-driver "nvidia-debugdump";
containerPath = "/usr/bin/nvidia-debugdump"; }
{ hostPath = lib.getExe' nvidia-driver "nvidia-powerd";
containerPath = "/usr/bin/nvidia-powerd"; }
{ hostPath = lib.getExe' nvidia-driver "nvidia-smi";
containerPath = "/usr/bin/nvidia-smi"; }])
# nvidia-docker 1.0 uses /usr/local/nvidia/lib{,64}
# e.g.
# - https://gitlab.com/nvidia/container-images/cuda/-/blob/e3ff10eab3a1424fe394899df0e0f8ca5a410f0f/dist/12.3.1/ubi9/base/Dockerfile#L44
# - https://github.com/NVIDIA/nvidia-docker/blob/01d2c9436620d7dde4672e414698afe6da4a282f/src/nvidia/volumes.go#L104-L173
(lib.mkIf config.hardware.nvidia-container-toolkit.mount-nvidia-docker-1-directories
[{ hostPath = "${lib.getLib nvidia-driver}/lib";
containerPath = "/usr/local/nvidia/lib"; }
{ hostPath = "${lib.getLib nvidia-driver}/lib";
containerPath = "/usr/local/nvidia/lib64"; }])
]);
nvidia-container-toolkit.mounts =
let
nvidia-driver = config.hardware.nvidia.package;
in
(lib.mkMerge [
[
{
hostPath = pkgs.addDriverRunpath.driverLink;
containerPath = pkgs.addDriverRunpath.driverLink;
}
{
hostPath = "${lib.getLib nvidia-driver}/etc";
containerPath = "${lib.getLib nvidia-driver}/etc";
}
{
hostPath = "${lib.getLib nvidia-driver}/share";
containerPath = "${lib.getLib nvidia-driver}/share";
}
{
hostPath = "${lib.getLib pkgs.glibc}/lib";
containerPath = "${lib.getLib pkgs.glibc}/lib";
}
{
hostPath = "${lib.getLib pkgs.glibc}/lib64";
containerPath = "${lib.getLib pkgs.glibc}/lib64";
}
]
(lib.mkIf config.hardware.nvidia-container-toolkit.mount-nvidia-executables [
{
hostPath = lib.getExe' nvidia-driver "nvidia-cuda-mps-control";
containerPath = "/usr/bin/nvidia-cuda-mps-control";
}
{
hostPath = lib.getExe' nvidia-driver "nvidia-cuda-mps-server";
containerPath = "/usr/bin/nvidia-cuda-mps-server";
}
{
hostPath = lib.getExe' nvidia-driver "nvidia-debugdump";
containerPath = "/usr/bin/nvidia-debugdump";
}
{
hostPath = lib.getExe' nvidia-driver "nvidia-powerd";
containerPath = "/usr/bin/nvidia-powerd";
}
{
hostPath = lib.getExe' nvidia-driver "nvidia-smi";
containerPath = "/usr/bin/nvidia-smi";
}
])
# nvidia-docker 1.0 uses /usr/local/nvidia/lib{,64}
# e.g.
# - https://gitlab.com/nvidia/container-images/cuda/-/blob/e3ff10eab3a1424fe394899df0e0f8ca5a410f0f/dist/12.3.1/ubi9/base/Dockerfile#L44
# - https://github.com/NVIDIA/nvidia-docker/blob/01d2c9436620d7dde4672e414698afe6da4a282f/src/nvidia/volumes.go#L104-L173
(lib.mkIf config.hardware.nvidia-container-toolkit.mount-nvidia-docker-1-directories [
{
hostPath = "${lib.getLib nvidia-driver}/lib";
containerPath = "/usr/local/nvidia/lib";
}
{
hostPath = "${lib.getLib nvidia-driver}/lib";
containerPath = "/usr/local/nvidia/lib64";
}
])
]);
};
systemd.services.nvidia-container-toolkit-cdi-generator = {

View file

@ -28,15 +28,13 @@ let
/* eslint-disable no-unused-vars */
module.exports = {
afterConfig(config) {
${
builtins.concatStringsSep "" (
leafs (
lib.mapAttrsRecursive (path: val: ''
${builtins.concatStringsSep "." path} = ${builtins.toJSON val};
'') { config = settings; }
)
${builtins.concatStringsSep "" (
leafs (
lib.mapAttrsRecursive (path: val: ''
${builtins.concatStringsSep "." path} = ${builtins.toJSON val};
'') { config = settings; }
)
}
)}
${cfg.extraConfig}
},

View file

@ -1,15 +1,20 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.tlp;
enableRDW = config.networking.networkmanager.enable;
# TODO: Use this for having proper parameters in the future
mkTlpConfig = tlpConfig: lib.generators.toKeyValue {
mkKeyValue = lib.generators.mkKeyValueDefault {
mkValueString = val:
if lib.isList val then "\"" + (toString val) + "\""
else toString val;
} "=";
} tlpConfig;
mkTlpConfig =
tlpConfig:
lib.generators.toKeyValue {
mkKeyValue = lib.generators.mkKeyValueDefault {
mkValueString = val: if lib.isList val then "\"" + (toString val) + "\"" else toString val;
} "=";
} tlpConfig;
in
{
###### interface
@ -21,8 +26,17 @@ in
description = "Whether to enable the TLP power management daemon.";
};
settings = lib.mkOption {type = with lib.types; attrsOf (oneOf [bool int float str (listOf str)]);
default = {};
settings = lib.mkOption {
type =
with lib.types;
attrsOf (oneOf [
bool
int
float
str
(listOf str)
]);
default = { };
example = {
SATA_LINKPWR_ON_BAT = "med_power_with_dipm";
USB_BLACKLIST_PHONE = 1;
@ -58,35 +72,40 @@ in
Using config.services.tlp.extraConfig is deprecated and will become unsupported in a future release. Use config.services.tlp.settings instead.
'';
assertions = [{
assertion = cfg.enable -> config.powerManagement.scsiLinkPolicy == null;
message = ''
`services.tlp.enable` and `config.powerManagement.scsiLinkPolicy` cannot be set both.
Set `services.tlp.settings.SATA_LINKPWR_ON_AC` and `services.tlp.settings.SATA_LINKPWR_ON_BAT` instead.
'';
}];
assertions = [
{
assertion = cfg.enable -> config.powerManagement.scsiLinkPolicy == null;
message = ''
`services.tlp.enable` and `config.powerManagement.scsiLinkPolicy` cannot be set both.
Set `services.tlp.settings.SATA_LINKPWR_ON_AC` and `services.tlp.settings.SATA_LINKPWR_ON_BAT` instead.
'';
}
];
environment.etc = {
"tlp.conf".text = (mkTlpConfig cfg.settings) + cfg.extraConfig;
} // lib.optionalAttrs enableRDW {
"NetworkManager/dispatcher.d/99tlp-rdw-nm".source =
"${cfg.package}/lib/NetworkManager/dispatcher.d/99tlp-rdw-nm";
};
environment.etc =
{
"tlp.conf".text = (mkTlpConfig cfg.settings) + cfg.extraConfig;
}
// lib.optionalAttrs enableRDW {
"NetworkManager/dispatcher.d/99tlp-rdw-nm".source =
"${cfg.package}/lib/NetworkManager/dispatcher.d/99tlp-rdw-nm";
};
environment.systemPackages = [ cfg.package ];
services.tlp.settings = let
cfg = config.powerManagement;
maybeDefault = val: lib.mkIf (val != null) (lib.mkDefault val);
in {
CPU_SCALING_GOVERNOR_ON_AC = maybeDefault cfg.cpuFreqGovernor;
CPU_SCALING_GOVERNOR_ON_BAT = maybeDefault cfg.cpuFreqGovernor;
CPU_SCALING_MIN_FREQ_ON_AC = maybeDefault cfg.cpufreq.min;
CPU_SCALING_MAX_FREQ_ON_AC = maybeDefault cfg.cpufreq.max;
CPU_SCALING_MIN_FREQ_ON_BAT = maybeDefault cfg.cpufreq.min;
CPU_SCALING_MAX_FREQ_ON_BAT = maybeDefault cfg.cpufreq.max;
};
services.tlp.settings =
let
cfg = config.powerManagement;
maybeDefault = val: lib.mkIf (val != null) (lib.mkDefault val);
in
{
CPU_SCALING_GOVERNOR_ON_AC = maybeDefault cfg.cpuFreqGovernor;
CPU_SCALING_GOVERNOR_ON_BAT = maybeDefault cfg.cpuFreqGovernor;
CPU_SCALING_MIN_FREQ_ON_AC = maybeDefault cfg.cpufreq.min;
CPU_SCALING_MAX_FREQ_ON_AC = maybeDefault cfg.cpufreq.max;
CPU_SCALING_MIN_FREQ_ON_BAT = maybeDefault cfg.cpufreq.min;
CPU_SCALING_MAX_FREQ_ON_BAT = maybeDefault cfg.cpufreq.max;
};
services.udev.packages = [ cfg.package ];

View file

@ -1,18 +1,22 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
udev = config.systemd.package;
cfg = config.services.udev;
initrdUdevRules = pkgs.runCommand "initrd-udev-rules" {} ''
initrdUdevRules = pkgs.runCommand "initrd-udev-rules" { } ''
mkdir -p $out/etc/udev/rules.d
for f in 60-cdrom_id 60-persistent-storage 75-net-description 80-drivers 80-net-setup-link; do
ln -s ${config.boot.initrd.systemd.package}/lib/udev/rules.d/$f.rules $out/etc/udev/rules.d
done
'';
extraUdevRules = pkgs.writeTextFile {
name = "extra-udev-rules";
text = cfg.extraRules;
@ -36,143 +40,165 @@ let
'';
# Perform substitutions in all udev rules files.
udevRulesFor = { name, udevPackages, udevPath, udev, systemd, binPackages, initrdBin ? null }: pkgs.runCommand name
{ preferLocalBuild = true;
allowSubstitutes = false;
packages = lib.unique (map toString udevPackages);
}
''
mkdir -p $out
shopt -s nullglob
set +o pipefail
udevRulesFor =
{
name,
udevPackages,
udevPath,
udev,
systemd,
binPackages,
initrdBin ? null,
}:
pkgs.runCommand name
{
preferLocalBuild = true;
allowSubstitutes = false;
packages = lib.unique (map toString udevPackages);
}
''
mkdir -p $out
shopt -s nullglob
set +o pipefail
# Set a reasonable $PATH for programs called by udev rules.
echo 'ENV{PATH}="${udevPath}/bin:${udevPath}/sbin"' > $out/00-path.rules
# Set a reasonable $PATH for programs called by udev rules.
echo 'ENV{PATH}="${udevPath}/bin:${udevPath}/sbin"' > $out/00-path.rules
# Add the udev rules from other packages.
for i in $packages; do
echo "Adding rules for package $i"
for j in $i/{etc,lib}/udev/rules.d/*; do
echo "Copying $j to $out/$(basename $j)"
cat $j > $out/$(basename $j)
done
done
# Fix some paths in the standard udev rules. Hacky.
for i in $out/*.rules; do
substituteInPlace $i \
--replace-quiet \"/sbin/modprobe \"${pkgs.kmod}/bin/modprobe \
--replace-quiet \"/sbin/mdadm \"${pkgs.mdadm}/sbin/mdadm \
--replace-quiet \"/sbin/blkid \"${pkgs.util-linux}/sbin/blkid \
--replace-quiet \"/bin/mount \"${pkgs.util-linux}/bin/mount \
--replace-quiet /usr/bin/readlink ${pkgs.coreutils}/bin/readlink \
--replace-quiet /usr/bin/cat ${pkgs.coreutils}/bin/cat \
--replace-quiet /usr/bin/basename ${pkgs.coreutils}/bin/basename 2>/dev/null
${lib.optionalString (initrdBin != null) ''
substituteInPlace $i --replace-quiet '/run/current-system/systemd' "${lib.removeSuffix "/bin" initrdBin}"
''}
done
echo -n "Checking that all programs called by relative paths in udev rules exist in ${udev}/lib/udev... "
import_progs=$(grep 'IMPORT{program}="[^/$]' $out/* |
sed -e 's/.*IMPORT{program}="\([^ "]*\)[ "].*/\1/' | uniq)
run_progs=$(grep -v '^[[:space:]]*#' $out/* | grep 'RUN+="[^/$]' |
sed -e 's/.*RUN+="\([^ "]*\)[ "].*/\1/' | uniq)
for i in $import_progs $run_progs; do
if [[ ! -x ${udev}/lib/udev/$i && ! $i =~ socket:.* ]]; then
echo "FAIL"
echo "$i is called in udev rules but not installed by udev"
exit 1
fi
done
echo "OK"
echo -n "Checking that all programs called by absolute paths in udev rules exist... "
import_progs=$(grep 'IMPORT{program}="/' $out/* |
sed -e 's/.*IMPORT{program}="\([^ "]*\)[ "].*/\1/' | uniq)
run_progs=$(grep -v '^[[:space:]]*#' $out/* | grep 'RUN+="/' |
sed -e 's/.*RUN+="\([^ "]*\)[ "].*/\1/' | uniq)
for i in $import_progs $run_progs; do
# if the path refers to /run/current-system/systemd, replace with config.systemd.package
if [[ $i == /run/current-system/systemd* ]]; then
i="${systemd}/''${i#/run/current-system/systemd/}"
fi
if [[ ! -x $i ]]; then
echo "FAIL"
echo "$i is called in udev rules but is not executable or does not exist"
exit 1
fi
done
echo "OK"
filesToFixup="$(for i in "$out"/*; do
# list all files referring to (/usr)/bin paths, but allow references to /bin/sh.
grep -P -l '\B(?!\/bin\/sh\b)(\/usr)?\/bin(?:\/.*)?' "$i" || :
done)"
if [ -n "$filesToFixup" ]; then
echo "Consider fixing the following udev rules:"
echo "$filesToFixup" | while read localFile; do
remoteFile="origin unknown"
for i in ${toString binPackages}; do
for j in "$i"/*/udev/rules.d/*; do
[ -e "$out/$(basename "$j")" ] || continue
[ "$(basename "$j")" = "$(basename "$localFile")" ] || continue
remoteFile="originally from $j"
break 2
done
# Add the udev rules from other packages.
for i in $packages; do
echo "Adding rules for package $i"
for j in $i/{etc,lib}/udev/rules.d/*; do
echo "Copying $j to $out/$(basename $j)"
cat $j > $out/$(basename $j)
done
refs="$(
grep -o '\B\(/usr\)\?/s\?bin/[^ "]\+' "$localFile" \
| sed -e ':r;N;''${s/\n/ and /;br};s/\n/, /g;br'
)"
echo "$localFile ($remoteFile) contains references to $refs."
done
exit 1
fi
# If auto-configuration is disabled, then remove
# udev's 80-drivers.rules file, which contains rules for
# automatically calling modprobe.
${lib.optionalString (!config.boot.hardwareScan) ''
ln -s /dev/null $out/80-drivers.rules
''}
'';
hwdbBin = pkgs.runCommand "hwdb.bin"
{ preferLocalBuild = true;
allowSubstitutes = false;
packages = lib.unique (map toString ([udev] ++ cfg.packages));
}
''
mkdir -p etc/udev/hwdb.d
for i in $packages; do
echo "Adding hwdb files for package $i"
for j in $i/{etc,lib}/udev/hwdb.d/*; do
ln -s $j etc/udev/hwdb.d/$(basename $j)
# Fix some paths in the standard udev rules. Hacky.
for i in $out/*.rules; do
substituteInPlace $i \
--replace-quiet \"/sbin/modprobe \"${pkgs.kmod}/bin/modprobe \
--replace-quiet \"/sbin/mdadm \"${pkgs.mdadm}/sbin/mdadm \
--replace-quiet \"/sbin/blkid \"${pkgs.util-linux}/sbin/blkid \
--replace-quiet \"/bin/mount \"${pkgs.util-linux}/bin/mount \
--replace-quiet /usr/bin/readlink ${pkgs.coreutils}/bin/readlink \
--replace-quiet /usr/bin/cat ${pkgs.coreutils}/bin/cat \
--replace-quiet /usr/bin/basename ${pkgs.coreutils}/bin/basename 2>/dev/null
${lib.optionalString (initrdBin != null) ''
substituteInPlace $i --replace-quiet '/run/current-system/systemd' "${lib.removeSuffix "/bin" initrdBin}"
''}
done
done
echo "Generating hwdb database..."
# hwdb --update doesn't return error code even on errors!
res="$(${pkgs.buildPackages.systemd}/bin/systemd-hwdb --root=$(pwd) update 2>&1)"
echo "$res"
[ -z "$(echo "$res" | egrep '^Error')" ]
mv etc/udev/hwdb.bin $out
'';
echo -n "Checking that all programs called by relative paths in udev rules exist in ${udev}/lib/udev... "
import_progs=$(grep 'IMPORT{program}="[^/$]' $out/* |
sed -e 's/.*IMPORT{program}="\([^ "]*\)[ "].*/\1/' | uniq)
run_progs=$(grep -v '^[[:space:]]*#' $out/* | grep 'RUN+="[^/$]' |
sed -e 's/.*RUN+="\([^ "]*\)[ "].*/\1/' | uniq)
for i in $import_progs $run_progs; do
if [[ ! -x ${udev}/lib/udev/$i && ! $i =~ socket:.* ]]; then
echo "FAIL"
echo "$i is called in udev rules but not installed by udev"
exit 1
fi
done
echo "OK"
compressFirmware = firmware:
if config.hardware.firmwareCompression == "none" || (firmware.compressFirmware or true) == false then firmware
else if config.hardware.firmwareCompression == "zstd" then pkgs.compressFirmwareZstd firmware
else pkgs.compressFirmwareXz firmware;
echo -n "Checking that all programs called by absolute paths in udev rules exist... "
import_progs=$(grep 'IMPORT{program}="/' $out/* |
sed -e 's/.*IMPORT{program}="\([^ "]*\)[ "].*/\1/' | uniq)
run_progs=$(grep -v '^[[:space:]]*#' $out/* | grep 'RUN+="/' |
sed -e 's/.*RUN+="\([^ "]*\)[ "].*/\1/' | uniq)
for i in $import_progs $run_progs; do
# if the path refers to /run/current-system/systemd, replace with config.systemd.package
if [[ $i == /run/current-system/systemd* ]]; then
i="${systemd}/''${i#/run/current-system/systemd/}"
fi
if [[ ! -x $i ]]; then
echo "FAIL"
echo "$i is called in udev rules but is not executable or does not exist"
exit 1
fi
done
echo "OK"
filesToFixup="$(for i in "$out"/*; do
# list all files referring to (/usr)/bin paths, but allow references to /bin/sh.
grep -P -l '\B(?!\/bin\/sh\b)(\/usr)?\/bin(?:\/.*)?' "$i" || :
done)"
if [ -n "$filesToFixup" ]; then
echo "Consider fixing the following udev rules:"
echo "$filesToFixup" | while read localFile; do
remoteFile="origin unknown"
for i in ${toString binPackages}; do
for j in "$i"/*/udev/rules.d/*; do
[ -e "$out/$(basename "$j")" ] || continue
[ "$(basename "$j")" = "$(basename "$localFile")" ] || continue
remoteFile="originally from $j"
break 2
done
done
refs="$(
grep -o '\B\(/usr\)\?/s\?bin/[^ "]\+' "$localFile" \
| sed -e ':r;N;''${s/\n/ and /;br};s/\n/, /g;br'
)"
echo "$localFile ($remoteFile) contains references to $refs."
done
exit 1
fi
# If auto-configuration is disabled, then remove
# udev's 80-drivers.rules file, which contains rules for
# automatically calling modprobe.
${lib.optionalString (!config.boot.hardwareScan) ''
ln -s /dev/null $out/80-drivers.rules
''}
'';
hwdbBin =
pkgs.runCommand "hwdb.bin"
{
preferLocalBuild = true;
allowSubstitutes = false;
packages = lib.unique (map toString ([ udev ] ++ cfg.packages));
}
''
mkdir -p etc/udev/hwdb.d
for i in $packages; do
echo "Adding hwdb files for package $i"
for j in $i/{etc,lib}/udev/hwdb.d/*; do
ln -s $j etc/udev/hwdb.d/$(basename $j)
done
done
echo "Generating hwdb database..."
# hwdb --update doesn't return error code even on errors!
res="$(${pkgs.buildPackages.systemd}/bin/systemd-hwdb --root=$(pwd) update 2>&1)"
echo "$res"
[ -z "$(echo "$res" | egrep '^Error')" ]
mv etc/udev/hwdb.bin $out
'';
compressFirmware =
firmware:
if
config.hardware.firmwareCompression == "none" || (firmware.compressFirmware or true) == false
then
firmware
else if config.hardware.firmwareCompression == "zstd" then
pkgs.compressFirmwareZstd firmware
else
pkgs.compressFirmwareXz firmware;
# Udev has a 512-character limit for ENV{PATH}, so create a symlink
# tree to work around this.
udevPath = pkgs.buildEnv {
name = "udev-path";
paths = cfg.path;
pathsToLink = [ "/bin" "/sbin" ];
pathsToLink = [
"/bin"
"/sbin"
];
ignoreCollisions = true;
};
@ -201,7 +227,7 @@ in
packages = lib.mkOption {
type = lib.types.listOf lib.types.path;
default = [];
default = [ ];
description = ''
List of packages containing {command}`udev` rules.
All files found in
@ -214,7 +240,7 @@ in
path = lib.mkOption {
type = lib.types.listOf lib.types.path;
default = [];
default = [ ];
description = ''
Packages added to the {env}`PATH` environment variable when
executing programs from Udev rules.
@ -256,7 +282,7 @@ in
hardware.firmware = lib.mkOption {
type = lib.types.listOf lib.types.package;
default = [];
default = [ ];
description = ''
List of packages containing firmware files. Such files
will be loaded automatically if the kernel asks for them
@ -266,19 +292,29 @@ in
precedence. Note that you must rebuild your system if you add
files to any of these directories.
'';
apply = list: pkgs.buildEnv {
name = "firmware";
paths = map compressFirmware list;
pathsToLink = [ "/lib/firmware" ];
ignoreCollisions = true;
};
apply =
list:
pkgs.buildEnv {
name = "firmware";
paths = map compressFirmware list;
pathsToLink = [ "/lib/firmware" ];
ignoreCollisions = true;
};
};
hardware.firmwareCompression = lib.mkOption {
type = lib.types.enum [ "xz" "zstd" "none" ];
default = if config.boot.kernelPackages.kernelAtLeast "5.19" then "zstd"
else if config.boot.kernelPackages.kernelAtLeast "5.3" then "xz"
else "none";
type = lib.types.enum [
"xz"
"zstd"
"none"
];
default =
if config.boot.kernelPackages.kernelAtLeast "5.19" then
"zstd"
else if config.boot.kernelPackages.kernelAtLeast "5.3" then
"xz"
else
"none";
defaultText = "auto";
description = ''
Whether to compress firmware files.
@ -309,7 +345,7 @@ in
packages = lib.mkOption {
type = lib.types.listOf lib.types.path;
default = [];
default = [ ];
description = ''
*This will only be used when systemd is used in stage 1.*
@ -323,7 +359,7 @@ in
binPackages = lib.mkOption {
type = lib.types.listOf lib.types.path;
default = [];
default = [ ];
description = ''
*This will only be used when systemd is used in stage 1.*
@ -351,21 +387,22 @@ in
};
###### implementation
config = lib.mkIf cfg.enable {
assertions = [
{
assertion = config.hardware.firmwareCompression == "zstd" -> config.boot.kernelPackages.kernelAtLeast "5.19";
assertion =
config.hardware.firmwareCompression == "zstd" -> config.boot.kernelPackages.kernelAtLeast "5.19";
message = ''
The firmware compression method is set to zstd, but the kernel version is too old.
The kernel version must be at least 5.3 to use zstd compression.
'';
}
{
assertion = config.hardware.firmwareCompression == "xz" -> config.boot.kernelPackages.kernelAtLeast "5.3";
assertion =
config.hardware.firmwareCompression == "xz" -> config.boot.kernelPackages.kernelAtLeast "5.3";
message = ''
The firmware compression method is set to xz, but the kernel version is too old.
The kernel version must be at least 5.3 to use xz compression.
@ -375,18 +412,28 @@ in
services.udev.extraRules = nixosRules;
services.udev.packages = [ extraUdevRules extraHwdbFile ];
services.udev.packages = [
extraUdevRules
extraHwdbFile
];
services.udev.path = [ pkgs.coreutils pkgs.gnused pkgs.gnugrep pkgs.util-linux udev ];
services.udev.path = [
pkgs.coreutils
pkgs.gnused
pkgs.gnugrep
pkgs.util-linux
udev
];
boot.kernelParams = lib.mkIf (!config.networking.usePredictableInterfaceNames) [ "net.ifnames=0" ];
boot.initrd.extraUdevRulesCommands = lib.mkIf (!config.boot.initrd.systemd.enable && config.boot.initrd.services.udev.rules != "")
''
cat <<'EOF' > $out/99-local.rules
${config.boot.initrd.services.udev.rules}
EOF
'';
boot.initrd.extraUdevRulesCommands =
lib.mkIf (!config.boot.initrd.systemd.enable && config.boot.initrd.services.udev.rules != "")
''
cat <<'EOF' > $out/99-local.rules
${config.boot.initrd.services.udev.rules}
EOF
'';
boot.initrd.services.udev.rules = nixosInitrdRules;
@ -415,32 +462,39 @@ in
udevPath = config.boot.initrd.systemd.contents."/bin".source;
udev = config.boot.initrd.systemd.package;
systemd = config.boot.initrd.systemd.package;
binPackages = config.boot.initrd.services.udev.binPackages ++ [ config.boot.initrd.systemd.contents."/bin".source ];
binPackages = config.boot.initrd.services.udev.binPackages ++ [
config.boot.initrd.systemd.contents."/bin".source
];
};
};
# Insert initrd rules
boot.initrd.services.udev.packages = [
initrdUdevRules
(lib.mkIf (config.boot.initrd.services.udev.rules != "") (pkgs.writeTextFile {
name = "initrd-udev-rules";
destination = "/etc/udev/rules.d/99-local.rules";
text = config.boot.initrd.services.udev.rules;
}))
(lib.mkIf (config.boot.initrd.services.udev.rules != "") (
pkgs.writeTextFile {
name = "initrd-udev-rules";
destination = "/etc/udev/rules.d/99-local.rules";
text = config.boot.initrd.services.udev.rules;
}
))
];
environment.etc = {
"udev/rules.d".source = udevRulesFor {
name = "udev-rules";
udevPackages = cfg.packages;
systemd = config.systemd.package;
binPackages = cfg.packages;
inherit udevPath udev;
environment.etc =
{
"udev/rules.d".source = udevRulesFor {
name = "udev-rules";
udevPackages = cfg.packages;
systemd = config.systemd.package;
binPackages = cfg.packages;
inherit udevPath udev;
};
"udev/hwdb.bin".source = hwdbBin;
}
// lib.optionalAttrs config.boot.modprobeConfig.enable {
# We don't place this into `extraModprobeConfig` so that stage-1 ramdisk doesn't bloat.
"modprobe.d/firmware.conf".text =
"options firmware_class path=${config.hardware.firmware}/lib/firmware";
};
"udev/hwdb.bin".source = hwdbBin;
} // lib.optionalAttrs config.boot.modprobeConfig.enable {
# We don't place this into `extraModprobeConfig` so that stage-1 ramdisk doesn't bloat.
"modprobe.d/firmware.conf".text = "options firmware_class path=${config.hardware.firmware}/lib/firmware";
};
system.requiredKernelConfig = with config.lib.kernelConfig; [
(isEnabled "UNIX")
@ -468,6 +522,9 @@ in
};
imports = [
(lib.mkRenamedOptionModule [ "services" "udev" "initrdRules" ] [ "boot" "initrd" "services" "udev" "rules" ])
(lib.mkRenamedOptionModule
[ "services" "udev" "initrdRules" ]
[ "boot" "initrd" "services" "udev" "rules" ]
)
];
}

View file

@ -1,5 +1,10 @@
# Upower daemon.
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.upower;
@ -242,7 +247,7 @@ in
systemd.packages = [ cfg.package ];
environment.etc."UPower/UPower.conf".text = lib.generators.toINI {} {
environment.etc."UPower/UPower.conf".text = lib.generators.toINI { } {
UPower = {
EnableWattsUpPro = cfg.enableWattsUpPro;
NoPollBatteries = cfg.noPollBatteries;

File diff suppressed because it is too large Load diff

View file

@ -1,48 +1,89 @@
{ config, lib, pkgs, utils, ... }:
{
config,
lib,
pkgs,
utils,
...
}:
let
cfg = config.services.logrotate;
generateLine = n: v:
if builtins.elem n [ "files" "priority" "enable" "global" ] || v == null then null
else if builtins.elem n [ "frequency" ] then "${v}\n"
else if builtins.elem n [ "firstaction" "lastaction" "prerotate" "postrotate" "preremove" ]
then "${n}\n ${v}\n endscript\n"
else if lib.isInt v then "${n} ${toString v}\n"
else if v == true then "${n}\n"
else if v == false then "no${n}\n"
else "${n} ${v}\n";
generateSection = indent: settings: lib.concatStringsSep (lib.fixedWidthString indent " " "") (
lib.filter (x: x != null) (lib.mapAttrsToList generateLine settings)
);
generateLine =
n: v:
if
builtins.elem n [
"files"
"priority"
"enable"
"global"
]
|| v == null
then
null
else if builtins.elem n [ "frequency" ] then
"${v}\n"
else if
builtins.elem n [
"firstaction"
"lastaction"
"prerotate"
"postrotate"
"preremove"
]
then
"${n}\n ${v}\n endscript\n"
else if lib.isInt v then
"${n} ${toString v}\n"
else if v == true then
"${n}\n"
else if v == false then
"no${n}\n"
else
"${n} ${v}\n";
generateSection =
indent: settings:
lib.concatStringsSep (lib.fixedWidthString indent " " "") (
lib.filter (x: x != null) (lib.mapAttrsToList generateLine settings)
);
# generateSection includes a final newline hence weird closing brace
mkConf = settings:
if settings.global or false then generateSection 0 settings
else ''
${lib.concatMapStringsSep "\n" (files: ''"${files}"'') (lib.toList settings.files)} {
${generateSection 2 settings}}
'';
mkConf =
settings:
if settings.global or false then
generateSection 0 settings
else
''
${lib.concatMapStringsSep "\n" (files: ''"${files}"'') (lib.toList settings.files)} {
${generateSection 2 settings}}
'';
settings = lib.sortProperties (lib.attrValues (lib.filterAttrs (_: settings: settings.enable) (
lib.foldAttrs lib.recursiveUpdate { } [
{
header = {
enable = true;
missingok = true;
notifempty = true;
frequency = "weekly";
rotate = 4;
};
}
cfg.settings
{ header = { global = true; priority = 100; }; }
]
)));
settings = lib.sortProperties (
lib.attrValues (
lib.filterAttrs (_: settings: settings.enable) (
lib.foldAttrs lib.recursiveUpdate { } [
{
header = {
enable = true;
missingok = true;
notifempty = true;
frequency = "weekly";
rotate = 4;
};
}
cfg.settings
{
header = {
global = true;
priority = 100;
};
}
]
)
)
);
configFile = pkgs.writeTextFile {
name = "logrotate.conf";
text = lib.concatStringsSep "\n" (
map mkConf settings
);
text = lib.concatStringsSep "\n" (map mkConf settings);
checkPhase = lib.optionalString cfg.checkConfig ''
# logrotate --debug also checks that users specified in config
# file exist, but we only have sandboxed users here so brown these
@ -79,15 +120,27 @@ let
'';
};
mailOption =
lib.optionalString (lib.foldr (n: a: a || (n.mail or false) != false) false (lib.attrValues cfg.settings))
"--mail=${pkgs.mailutils}/bin/mail";
mailOption = lib.optionalString (lib.foldr (n: a: a || (n.mail or false) != false) false (
lib.attrValues cfg.settings
)) "--mail=${pkgs.mailutils}/bin/mail";
in
{
imports = [
(lib.mkRemovedOptionModule [ "services" "logrotate" "config" ] "Modify services.logrotate.settings.header instead")
(lib.mkRemovedOptionModule [ "services" "logrotate" "extraConfig" ] "Modify services.logrotate.settings.header instead")
(lib.mkRemovedOptionModule [ "services" "logrotate" "paths" ] "Add attributes to services.logrotate.settings instead")
(lib.mkRemovedOptionModule [
"services"
"logrotate"
"config"
] "Modify services.logrotate.settings.header instead")
(lib.mkRemovedOptionModule [
"services"
"logrotate"
"extraConfig"
] "Modify services.logrotate.settings.header instead")
(lib.mkRemovedOptionModule [
"services"
"logrotate"
"paths"
] "Add attributes to services.logrotate.settings instead")
];
options = {
@ -136,57 +189,70 @@ in
''';
};
};
'';
type = lib.types.attrsOf (lib.types.submodule ({ name, ... }: {
freeformType = with lib.types; attrsOf (nullOr (oneOf [ int bool str ]));
'';
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
freeformType =
with lib.types;
attrsOf (
nullOr (oneOf [
int
bool
str
])
);
options = {
enable = lib.mkEnableOption "setting individual kill switch" // {
default = true;
};
options = {
enable = lib.mkEnableOption "setting individual kill switch" // {
default = true;
};
global = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Whether this setting is a global option or not: set to have these
settings apply to all files settings with a higher priority.
'';
};
files = lib.mkOption {
type = with lib.types; either str (listOf str);
default = name;
defaultText = ''
The attrset name if not specified
'';
description = ''
Single or list of files for which rules are defined.
The files are quoted with double-quotes in logrotate configuration,
so globs and spaces are supported.
Note this setting is ignored if globals is true.
'';
};
global = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Whether this setting is a global option or not: set to have these
settings apply to all files settings with a higher priority.
'';
};
files = lib.mkOption {
type = with lib.types; either str (listOf str);
default = name;
defaultText = ''
The attrset name if not specified
'';
description = ''
Single or list of files for which rules are defined.
The files are quoted with double-quotes in logrotate configuration,
so globs and spaces are supported.
Note this setting is ignored if globals is true.
'';
};
frequency = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = ''
How often to rotate the logs. Defaults to previously set global setting,
which itself defaults to weekly.
'';
};
frequency = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = ''
How often to rotate the logs. Defaults to previously set global setting,
which itself defaults to weekly.
'';
};
priority = lib.mkOption {
type = lib.types.int;
default = 1000;
description = ''
Order of this logrotate block in relation to the others. The semantics are
the same as with `lib.mkOrder`. Smaller values are inserted first.
'';
};
};
priority = lib.mkOption {
type = lib.types.int;
default = 1000;
description = ''
Order of this logrotate block in relation to the others. The semantics are
the same as with `lib.mkOrder`. Smaller values are inserted first.
'';
};
};
}));
}
)
);
};
configFile = lib.mkOption {
@ -233,7 +299,7 @@ in
extraArgs = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [];
default = [ ];
description = "Additional command line arguments to pass on logrotate invocation";
};
};
@ -248,54 +314,56 @@ in
];
startAt = "hourly";
serviceConfig = {
Type = "oneshot";
ExecStart = "${lib.getExe pkgs.logrotate} ${utils.escapeSystemdExecArgs cfg.extraArgs} ${mailOption} ${cfg.configFile}";
serviceConfig =
{
Type = "oneshot";
ExecStart = "${lib.getExe pkgs.logrotate} ${utils.escapeSystemdExecArgs cfg.extraArgs} ${mailOption} ${cfg.configFile}";
# performance
Nice = 19;
IOSchedulingClass = "best-effort";
IOSchedulingPriority = 7;
# performance
Nice = 19;
IOSchedulingClass = "best-effort";
IOSchedulingPriority = 7;
# hardening
CapabilityBoundingSet = [
"CAP_CHOWN"
"CAP_DAC_OVERRIDE"
"CAP_FOWNER"
"CAP_KILL"
"CAP_SETUID"
"CAP_SETGID"
];
DevicePolicy = "closed";
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateTmp = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "full";
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = false; # can create sgid directories
SystemCallArchitectures = "native";
SystemCallFilter = [
"@system-service"
"~@privileged @resources"
"@chown @setuid"
];
UMask = "0027";
} // lib.optionalAttrs (!cfg.allowNetworking) {
PrivateNetwork = true; # e.g. mail delivery
RestrictAddressFamilies = [ "AF_UNIX" ];
};
# hardening
CapabilityBoundingSet = [
"CAP_CHOWN"
"CAP_DAC_OVERRIDE"
"CAP_FOWNER"
"CAP_KILL"
"CAP_SETUID"
"CAP_SETGID"
];
DevicePolicy = "closed";
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateTmp = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "full";
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = false; # can create sgid directories
SystemCallArchitectures = "native";
SystemCallFilter = [
"@system-service"
"~@privileged @resources"
"@chown @setuid"
];
UMask = "0027";
}
// lib.optionalAttrs (!cfg.allowNetworking) {
PrivateNetwork = true; # e.g. mail delivery
RestrictAddressFamilies = [ "AF_UNIX" ];
};
};
systemd.services.logrotate-checkconf = {
description = "Logrotate configuration check";

View file

@ -1,23 +1,32 @@
{ config, lib, pkgs, ... }: with lib;
{
config,
lib,
pkgs,
...
}:
with lib;
let
cfg = config.services.promtail;
format = pkgs.formats.json {};
prettyJSON = conf: with lib; pipe conf [
(flip removeAttrs [ "_module" ])
(format.generate "promtail-config.json")
];
format = pkgs.formats.json { };
prettyJSON =
conf:
with lib;
pipe conf [
(flip removeAttrs [ "_module" ])
(format.generate "promtail-config.json")
];
allowSystemdJournal = cfg.configuration ? scrape_configs && lib.any (v: v ? journal) cfg.configuration.scrape_configs;
allowSystemdJournal =
cfg.configuration ? scrape_configs && lib.any (v: v ? journal) cfg.configuration.scrape_configs;
allowPositionsFile = !lib.hasPrefix "/var/cache/promtail" positionsFile;
positionsFile = cfg.configuration.positions.filename;
configFile = if cfg.configFile != null
then cfg.configFile
else prettyJSON cfg.configuration;
configFile = if cfg.configFile != null then cfg.configFile else prettyJSON cfg.configuration;
in {
in
{
options.services.promtail = with types; {
enable = mkEnableOption "the Promtail ingresser";
@ -40,7 +49,7 @@ in {
extraFlags = mkOption {
type = listOf str;
default = [];
default = [ ];
example = [ "--server.http-listen-port=3101" ];
description = ''
Specify a list of additional command line flags,
@ -61,47 +70,50 @@ in {
${lib.getExe pkgs.promtail} -config.file=${configFile} -check-syntax
'';
serviceConfig = {
Restart = "on-failure";
TimeoutStopSec = 10;
serviceConfig =
{
Restart = "on-failure";
TimeoutStopSec = 10;
ExecStart = "${pkgs.promtail}/bin/promtail -config.file=${configFile} ${escapeShellArgs cfg.extraFlags}";
ExecStart = "${pkgs.promtail}/bin/promtail -config.file=${configFile} ${escapeShellArgs cfg.extraFlags}";
ProtectSystem = "strict";
ProtectHome = true;
PrivateTmp = true;
PrivateDevices = true;
ProtectKernelTunables = true;
ProtectControlGroups = true;
RestrictSUIDSGID = true;
PrivateMounts = true;
CacheDirectory = "promtail";
ReadWritePaths = lib.optional allowPositionsFile (builtins.dirOf positionsFile);
ProtectSystem = "strict";
ProtectHome = true;
PrivateTmp = true;
PrivateDevices = true;
ProtectKernelTunables = true;
ProtectControlGroups = true;
RestrictSUIDSGID = true;
PrivateMounts = true;
CacheDirectory = "promtail";
ReadWritePaths = lib.optional allowPositionsFile (builtins.dirOf positionsFile);
User = "promtail";
Group = "promtail";
User = "promtail";
Group = "promtail";
CapabilityBoundingSet = "";
NoNewPrivileges = true;
CapabilityBoundingSet = "";
NoNewPrivileges = true;
ProtectKernelModules = true;
SystemCallArchitectures = "native";
ProtectKernelLogs = true;
ProtectClock = true;
ProtectKernelModules = true;
SystemCallArchitectures = "native";
ProtectKernelLogs = true;
ProtectClock = true;
LockPersonality = true;
ProtectHostname = true;
RestrictRealtime = true;
MemoryDenyWriteExecute = true;
PrivateUsers = true;
LockPersonality = true;
ProtectHostname = true;
RestrictRealtime = true;
MemoryDenyWriteExecute = true;
PrivateUsers = true;
SupplementaryGroups = lib.optional (allowSystemdJournal) "systemd-journal";
} // (optionalAttrs (!pkgs.stdenv.hostPlatform.isAarch64) { # FIXME: figure out why this breaks on aarch64
SystemCallFilter = "@system-service";
});
SupplementaryGroups = lib.optional (allowSystemdJournal) "systemd-journal";
}
// (optionalAttrs (!pkgs.stdenv.hostPlatform.isAarch64) {
# FIXME: figure out why this breaks on aarch64
SystemCallFilter = "@system-service";
});
};
users.groups.promtail = {};
users.groups.promtail = { };
users.users.promtail = {
description = "Promtail service user";
isSystemUser = true;

View file

@ -103,5 +103,5 @@ in
};
};
meta.maintainers = with lib.maintainers; [RTUnreal];
meta.maintainers = with lib.maintainers; [ RTUnreal ];
}

View file

@ -1,40 +1,57 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.mailman;
inherit (pkgs.mailmanPackages.buildEnvs { withHyperkitty = cfg.hyperkitty.enable; withLDAP = cfg.ldap.enable; })
mailmanEnv webEnv;
inherit
(pkgs.mailmanPackages.buildEnvs {
withHyperkitty = cfg.hyperkitty.enable;
withLDAP = cfg.ldap.enable;
})
mailmanEnv
webEnv
;
withPostgresql = config.services.postgresql.enable;
# This deliberately doesn't use recursiveUpdate so users can
# override the defaults.
webSettings = {
DEFAULT_FROM_EMAIL = cfg.siteOwner;
SERVER_EMAIL = cfg.siteOwner;
ALLOWED_HOSTS = [ "localhost" "127.0.0.1" ] ++ cfg.webHosts;
COMPRESS_OFFLINE = true;
STATIC_ROOT = "/var/lib/mailman-web-static";
MEDIA_ROOT = "/var/lib/mailman-web/media";
LOGGING = {
version = 1;
disable_existing_loggers = true;
handlers.console.class = "logging.StreamHandler";
loggers.django = {
handlers = [ "console" ];
level = "INFO";
webSettings =
{
DEFAULT_FROM_EMAIL = cfg.siteOwner;
SERVER_EMAIL = cfg.siteOwner;
ALLOWED_HOSTS = [
"localhost"
"127.0.0.1"
] ++ cfg.webHosts;
COMPRESS_OFFLINE = true;
STATIC_ROOT = "/var/lib/mailman-web-static";
MEDIA_ROOT = "/var/lib/mailman-web/media";
LOGGING = {
version = 1;
disable_existing_loggers = true;
handlers.console.class = "logging.StreamHandler";
loggers.django = {
handlers = [ "console" ];
level = "INFO";
};
};
};
HAYSTACK_CONNECTIONS.default = {
ENGINE = "haystack.backends.whoosh_backend.WhooshEngine";
PATH = "/var/lib/mailman-web/fulltext-index";
};
} // lib.optionalAttrs cfg.enablePostfix {
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend";
EMAIL_HOST = "127.0.0.1";
EMAIL_PORT = 25;
} // cfg.webSettings;
HAYSTACK_CONNECTIONS.default = {
ENGINE = "haystack.backends.whoosh_backend.WhooshEngine";
PATH = "/var/lib/mailman-web/fulltext-index";
};
}
// lib.optionalAttrs cfg.enablePostfix {
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend";
EMAIL_HOST = "127.0.0.1";
EMAIL_PORT = 25;
}
// cfg.webSettings;
webSettingsJSON = pkgs.writeText "settings.json" (builtins.toJSON webSettings);
@ -45,9 +62,11 @@ let
transport_file_type: hash
'';
mailmanCfg = lib.generators.toINI {} (lib.recursiveUpdate cfg.settings {
webservice.admin_pass = "#NIXOS_MAILMAN_REST_API_PASS_SECRET#";
});
mailmanCfg = lib.generators.toINI { } (
lib.recursiveUpdate cfg.settings {
webservice.admin_pass = "#NIXOS_MAILMAN_REST_API_PASS_SECRET#";
}
);
mailmanCfgFile = pkgs.writeText "mailman-raw.cfg" mailmanCfg;
@ -64,13 +83,16 @@ let
api_key: @API_KEY@
'';
in {
in
{
###### interface
imports = [
(lib.mkRenamedOptionModule [ "services" "mailman" "hyperkittyBaseUrl" ]
[ "services" "mailman" "hyperkitty" "baseUrl" ])
(lib.mkRenamedOptionModule
[ "services" "mailman" "hyperkittyBaseUrl" ]
[ "services" "mailman" "hyperkitty" "baseUrl" ]
)
(lib.mkRemovedOptionModule [ "services" "mailman" "hyperkittyApiKey" ] ''
The Hyperkitty API key is now generated on first run, and not
@ -146,9 +168,17 @@ in {
groupSearch = {
type = lib.mkOption {
type = lib.types.enum [
"posixGroup" "groupOfNames" "memberDNGroup" "nestedMemberDNGroup" "nestedGroupOfNames"
"groupOfUniqueNames" "nestedGroupOfUniqueNames" "activeDirectoryGroup" "nestedActiveDirectoryGroup"
"organizationalRoleGroup" "nestedOrganizationalRoleGroup"
"posixGroup"
"groupOfNames"
"memberDNGroup"
"nestedMemberDNGroup"
"nestedGroupOfNames"
"groupOfUniqueNames"
"nestedGroupOfUniqueNames"
"activeDirectoryGroup"
"nestedActiveDirectoryGroup"
"organizationalRoleGroup"
"nestedOrganizationalRoleGroup"
];
default = "posixGroup";
apply = v: "${lib.toUpper (lib.substring 0 1 v)}${lib.substring 1 (lib.stringLength v) v}Type";
@ -229,7 +259,7 @@ in {
webHosts = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [];
default = [ ];
description = ''
The list of hostnames and/or IP addresses from which the Mailman Web
UI will accept requests. By default, "localhost" and "127.0.0.1" are
@ -249,7 +279,7 @@ in {
webSettings = lib.mkOption {
type = lib.types.attrs;
default = {};
default = { };
description = ''
Overrides for the default mailman-web Django settings.
'';
@ -268,8 +298,10 @@ in {
uwsgiSettings = lib.mkOption {
default = { };
example = { uwsgi.buffer-size = 8192; };
inherit (pkgs.formats.json {}) type;
example = {
uwsgi.buffer-size = 8192;
};
inherit (pkgs.formats.json { }) type;
description = ''
Extra configuration to merge into uwsgi config.
'';
@ -288,7 +320,7 @@ in {
settings = lib.mkOption {
description = "Settings for mailman.cfg";
type = lib.types.attrsOf (lib.types.attrsOf lib.types.str);
default = {};
default = { };
};
hyperkitty = {
@ -311,72 +343,105 @@ in {
config = lib.mkIf cfg.enable {
services.mailman.settings = {
mailman.site_owner = lib.mkDefault cfg.siteOwner;
mailman.layout = "fhs";
services.mailman.settings =
{
mailman.site_owner = lib.mkDefault cfg.siteOwner;
mailman.layout = "fhs";
"paths.fhs" = {
bin_dir = "${pkgs.mailmanPackages.mailman}/bin";
var_dir = "/var/lib/mailman";
queue_dir = "$var_dir/queue";
template_dir = "$var_dir/templates";
log_dir = "/var/log/mailman";
lock_dir = "/run/mailman/lock";
etc_dir = "/etc";
pid_file = "/run/mailman/master.pid";
};
"paths.fhs" = {
bin_dir = "${pkgs.mailmanPackages.mailman}/bin";
var_dir = "/var/lib/mailman";
queue_dir = "$var_dir/queue";
template_dir = "$var_dir/templates";
log_dir = "/var/log/mailman";
lock_dir = "/run/mailman/lock";
etc_dir = "/etc";
pid_file = "/run/mailman/master.pid";
};
mta.configuration = lib.mkDefault (if cfg.enablePostfix then "${postfixMtaConfig}" else throw "When Mailman Postfix integration is disabled, set `services.mailman.settings.mta.configuration` to the path of the config file required to integrate with your MTA.");
mta.configuration = lib.mkDefault (
if cfg.enablePostfix then
"${postfixMtaConfig}"
else
throw "When Mailman Postfix integration is disabled, set `services.mailman.settings.mta.configuration` to the path of the config file required to integrate with your MTA."
);
"archiver.hyperkitty" = lib.mkIf cfg.hyperkitty.enable {
class = "mailman_hyperkitty.Archiver";
enable = "yes";
configuration = "/var/lib/mailman/mailman-hyperkitty.cfg";
};
} // (let
loggerNames = ["root" "archiver" "bounce" "config" "database" "debug" "error" "fromusenet" "http" "locks" "mischief" "plugins" "runner" "smtp"];
loggerSectionNames = map (n: "logging.${n}") loggerNames;
in lib.genAttrs loggerSectionNames(name: { handler = "stderr"; })
);
assertions = let
inherit (config.services) postfix;
requirePostfixHash = optionPath: dataFile:
"archiver.hyperkitty" = lib.mkIf cfg.hyperkitty.enable {
class = "mailman_hyperkitty.Archiver";
enable = "yes";
configuration = "/var/lib/mailman/mailman-hyperkitty.cfg";
};
}
// (
let
expected = "hash:/var/lib/mailman/data/${dataFile}";
value = lib.attrByPath optionPath [] postfix;
loggerNames = [
"root"
"archiver"
"bounce"
"config"
"database"
"debug"
"error"
"fromusenet"
"http"
"locks"
"mischief"
"plugins"
"runner"
"smtp"
];
loggerSectionNames = map (n: "logging.${n}") loggerNames;
in
{ assertion = postfix.enable -> lib.isList value && lib.elem expected value;
lib.genAttrs loggerSectionNames (name: {
handler = "stderr";
})
);
assertions =
let
inherit (config.services) postfix;
requirePostfixHash =
optionPath: dataFile:
let
expected = "hash:/var/lib/mailman/data/${dataFile}";
value = lib.attrByPath optionPath [ ] postfix;
in
{
assertion = postfix.enable -> lib.isList value && lib.elem expected value;
message = ''
services.postfix.${lib.concatStringsSep "." optionPath} must contain
"${expected}".
See <https://mailman.readthedocs.io/en/latest/src/mailman/docs/mta.html>.
'';
};
in [
{ assertion = cfg.webHosts != [];
message = ''
services.mailman.serve.enable requires there to be at least one entry
in services.mailman.webHosts.
'';
}
] ++ (lib.optionals cfg.enablePostfix [
{ assertion = postfix.enable;
message = ''
Mailman's default NixOS configuration requires Postfix to be enabled.
in
[
{
assertion = cfg.webHosts != [ ];
message = ''
services.mailman.serve.enable requires there to be at least one entry
in services.mailman.webHosts.
'';
}
]
++ (lib.optionals cfg.enablePostfix [
{
assertion = postfix.enable;
message = ''
Mailman's default NixOS configuration requires Postfix to be enabled.
If you want to use another MTA, set services.mailman.enablePostfix
to false and configure settings in services.mailman.settings.mta.
If you want to use another MTA, set services.mailman.enablePostfix
to false and configure settings in services.mailman.settings.mta.
Refer to <https://mailman.readthedocs.io/en/latest/src/mailman/docs/mta.html>
for more info.
'';
}
(requirePostfixHash [ "config" "relay_domains" ] "postfix_domains")
(requirePostfixHash [ "config" "transport_maps" ] "postfix_lmtp")
(requirePostfixHash [ "config" "local_recipient_maps" ] "postfix_lmtp")
]);
Refer to <https://mailman.readthedocs.io/en/latest/src/mailman/docs/mta.html>
for more info.
'';
}
(requirePostfixHash [ "config" "relay_domains" ] "postfix_domains")
(requirePostfixHash [ "config" "transport_maps" ] "postfix_lmtp")
(requirePostfixHash [ "config" "local_recipient_maps" ] "postfix_lmtp")
]);
users.users.mailman = {
description = "GNU Mailman";
@ -388,7 +453,7 @@ in {
isSystemUser = true;
group = "mailman";
};
users.groups.mailman = {};
users.groups.mailman = { };
environment.etc."mailman3/settings.py".text = ''
import os
@ -427,9 +492,13 @@ in {
AUTH_LDAP_GROUP_SEARCH = LDAPSearch("${cfg.ldap.groupSearch.ou}",
ldap.SCOPE_SUBTREE, "${cfg.ldap.groupSearch.query}")
AUTH_LDAP_USER_ATTR_MAP = {
${lib.concatStrings (lib.flip lib.mapAttrsToList cfg.ldap.attrMap (key: value: ''
"${key}": "${value}",
''))}
${lib.concatStrings (
lib.flip lib.mapAttrsToList cfg.ldap.attrMap (
key: value: ''
"${key}": "${value}",
''
)
)}
}
${lib.optionalString (cfg.ldap.superUserGroup != null) ''
AUTH_LDAP_USER_FLAGS_BY_GROUP = {
@ -443,7 +512,7 @@ in {
''}
'';
services.nginx = lib.mkIf (cfg.serve.enable && cfg.webHosts != []) {
services.nginx = lib.mkIf (cfg.serve.enable && cfg.webHosts != [ ]) {
enable = lib.mkDefault true;
virtualHosts = lib.genAttrs cfg.webHosts (webHost: {
locations = {
@ -454,215 +523,263 @@ in {
proxyTimeout = lib.mkDefault "120s";
};
environment.systemPackages = [ (pkgs.buildEnv {
name = "mailman-tools";
# We don't want to pollute the system PATH with a python
# interpreter etc. so let's pick only the stuff we actually
# want from {web,mailman}Env
pathsToLink = ["/bin"];
paths = [ mailmanEnv webEnv ];
# Only mailman-related stuff is installed, the rest is removed
# in `postBuild`.
ignoreCollisions = true;
postBuild = ''
find $out/bin/ -mindepth 1 -not -name "mailman*" -delete
'' + lib.optionalString config.security.sudo.enable ''
mv $out/bin/mailman $out/bin/.mailman-wrapped
echo '#!${pkgs.runtimeShell}
sudo=exec
if [[ "$USER" != mailman ]]; then
sudo="exec /run/wrappers/bin/sudo -u mailman"
fi
$sudo ${placeholder "out"}/bin/.mailman-wrapped "$@"
' > $out/bin/mailman
chmod +x $out/bin/mailman
'';
}) ];
environment.systemPackages = [
(pkgs.buildEnv {
name = "mailman-tools";
# We don't want to pollute the system PATH with a python
# interpreter etc. so let's pick only the stuff we actually
# want from {web,mailman}Env
pathsToLink = [ "/bin" ];
paths = [
mailmanEnv
webEnv
];
# Only mailman-related stuff is installed, the rest is removed
# in `postBuild`.
ignoreCollisions = true;
postBuild =
''
find $out/bin/ -mindepth 1 -not -name "mailman*" -delete
''
+ lib.optionalString config.security.sudo.enable ''
mv $out/bin/mailman $out/bin/.mailman-wrapped
echo '#!${pkgs.runtimeShell}
sudo=exec
if [[ "$USER" != mailman ]]; then
sudo="exec /run/wrappers/bin/sudo -u mailman"
fi
$sudo ${placeholder "out"}/bin/.mailman-wrapped "$@"
' > $out/bin/mailman
chmod +x $out/bin/mailman
'';
})
];
services.postfix = lib.mkIf cfg.enablePostfix {
recipientDelimiter = "+"; # bake recipient addresses in mail envelopes via VERP
recipientDelimiter = "+"; # bake recipient addresses in mail envelopes via VERP
config = {
owner_request_special = "no"; # Mailman handles -owner addresses on its own
owner_request_special = "no"; # Mailman handles -owner addresses on its own
};
};
systemd.sockets.mailman-uwsgi = lib.mkIf cfg.serve.enable {
wantedBy = ["sockets.target"];
before = ["nginx.service"];
wantedBy = [ "sockets.target" ];
before = [ "nginx.service" ];
socketConfig.ListenStream = "/run/mailman-web.socket";
};
systemd.services = {
mailman = {
description = "GNU Mailman Master Process";
before = lib.optional cfg.enablePostfix "postfix.service";
after = [ "network.target" ]
++ lib.optional cfg.enablePostfix "postfix-setup.service"
++ lib.optional withPostgresql "postgresql.service";
restartTriggers = [ mailmanCfgFile ];
requires = lib.optional withPostgresql "postgresql.service";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${mailmanEnv}/bin/mailman start";
ExecStop = "${mailmanEnv}/bin/mailman stop";
User = "mailman";
Group = "mailman";
Type = "forking";
RuntimeDirectory = "mailman";
LogsDirectory = "mailman";
PIDFile = "/run/mailman/master.pid";
Restart = "on-failure";
TimeoutStartSec = 180;
TimeoutStopSec = 180;
systemd.services =
{
mailman = {
description = "GNU Mailman Master Process";
before = lib.optional cfg.enablePostfix "postfix.service";
after =
[ "network.target" ]
++ lib.optional cfg.enablePostfix "postfix-setup.service"
++ lib.optional withPostgresql "postgresql.service";
restartTriggers = [ mailmanCfgFile ];
requires = lib.optional withPostgresql "postgresql.service";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${mailmanEnv}/bin/mailman start";
ExecStop = "${mailmanEnv}/bin/mailman stop";
User = "mailman";
Group = "mailman";
Type = "forking";
RuntimeDirectory = "mailman";
LogsDirectory = "mailman";
PIDFile = "/run/mailman/master.pid";
Restart = "on-failure";
TimeoutStartSec = 180;
TimeoutStopSec = 180;
};
};
};
mailman-settings = {
description = "Generate settings files (including secrets) for Mailman";
before = [ "mailman.service" "mailman-web-setup.service" "mailman-uwsgi.service" "hyperkitty.service" ];
requiredBy = [ "mailman.service" "mailman-web-setup.service" "mailman-uwsgi.service" "hyperkitty.service" ];
path = with pkgs; [ jq ];
after = lib.optional withPostgresql "postgresql.service";
requires = lib.optional withPostgresql "postgresql.service";
serviceConfig.RemainAfterExit = true;
serviceConfig.Type = "oneshot";
script = ''
install -m0750 -o mailman -g mailman ${mailmanCfgFile} /etc/mailman.cfg
${if cfg.restApiPassFile == null then ''
sed -i "s/#NIXOS_MAILMAN_REST_API_PASS_SECRET#/$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 64)/g" \
/etc/mailman.cfg
'' else ''
${pkgs.replace-secret}/bin/replace-secret \
'#NIXOS_MAILMAN_REST_API_PASS_SECRET#' \
${cfg.restApiPassFile} \
/etc/mailman.cfg
''}
mailman-settings = {
description = "Generate settings files (including secrets) for Mailman";
before = [
"mailman.service"
"mailman-web-setup.service"
"mailman-uwsgi.service"
"hyperkitty.service"
];
requiredBy = [
"mailman.service"
"mailman-web-setup.service"
"mailman-uwsgi.service"
"hyperkitty.service"
];
path = with pkgs; [ jq ];
after = lib.optional withPostgresql "postgresql.service";
requires = lib.optional withPostgresql "postgresql.service";
serviceConfig.RemainAfterExit = true;
serviceConfig.Type = "oneshot";
script = ''
install -m0750 -o mailman -g mailman ${mailmanCfgFile} /etc/mailman.cfg
${
if cfg.restApiPassFile == null then
''
sed -i "s/#NIXOS_MAILMAN_REST_API_PASS_SECRET#/$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 64)/g" \
/etc/mailman.cfg
''
else
''
${pkgs.replace-secret}/bin/replace-secret \
'#NIXOS_MAILMAN_REST_API_PASS_SECRET#' \
${cfg.restApiPassFile} \
/etc/mailman.cfg
''
}
mailmanDir=/var/lib/mailman
mailmanWebDir=/var/lib/mailman-web
mailmanDir=/var/lib/mailman
mailmanWebDir=/var/lib/mailman-web
mailmanCfg=$mailmanDir/mailman-hyperkitty.cfg
mailmanWebCfg=$mailmanWebDir/settings_local.json
mailmanCfg=$mailmanDir/mailman-hyperkitty.cfg
mailmanWebCfg=$mailmanWebDir/settings_local.json
install -m 0775 -o mailman -g mailman -d /var/lib/mailman-web-static
install -m 0770 -o mailman -g mailman -d $mailmanDir
install -m 0770 -o ${cfg.webUser} -g mailman -d $mailmanWebDir
install -m 0775 -o mailman -g mailman -d /var/lib/mailman-web-static
install -m 0770 -o mailman -g mailman -d $mailmanDir
install -m 0770 -o ${cfg.webUser} -g mailman -d $mailmanWebDir
if [ ! -e $mailmanWebCfg ]; then
hyperkittyApiKey=$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 64)
secretKey=$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 64)
if [ ! -e $mailmanWebCfg ]; then
hyperkittyApiKey=$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 64)
secretKey=$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 64)
install -m 0440 -o root -g mailman \
<(jq -n '.MAILMAN_ARCHIVER_KEY=$archiver_key | .SECRET_KEY=$secret_key' \
--arg archiver_key "$hyperkittyApiKey" \
--arg secret_key "$secretKey") \
"$mailmanWebCfg"
fi
install -m 0440 -o root -g mailman \
<(jq -n '.MAILMAN_ARCHIVER_KEY=$archiver_key | .SECRET_KEY=$secret_key' \
--arg archiver_key "$hyperkittyApiKey" \
--arg secret_key "$secretKey") \
"$mailmanWebCfg"
fi
hyperkittyApiKey="$(jq -r .MAILMAN_ARCHIVER_KEY "$mailmanWebCfg")"
mailmanCfgTmp=$(mktemp)
sed "s/@API_KEY@/$hyperkittyApiKey/g" ${mailmanHyperkittyCfg} >"$mailmanCfgTmp"
chown mailman:mailman "$mailmanCfgTmp"
mv "$mailmanCfgTmp" "$mailmanCfg"
'';
};
mailman-web-setup = {
description = "Prepare mailman-web files and database";
before = [ "mailman-uwsgi.service" ];
requiredBy = [ "mailman-uwsgi.service" ];
restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
script = ''
[[ -e "${webSettings.STATIC_ROOT}" ]] && find "${webSettings.STATIC_ROOT}/" -mindepth 1 -delete
${webEnv}/bin/mailman-web migrate
${webEnv}/bin/mailman-web collectstatic
${webEnv}/bin/mailman-web compress
'';
serviceConfig = {
User = cfg.webUser;
Group = "mailman";
Type = "oneshot";
WorkingDirectory = "/var/lib/mailman-web";
hyperkittyApiKey="$(jq -r .MAILMAN_ARCHIVER_KEY "$mailmanWebCfg")"
mailmanCfgTmp=$(mktemp)
sed "s/@API_KEY@/$hyperkittyApiKey/g" ${mailmanHyperkittyCfg} >"$mailmanCfgTmp"
chown mailman:mailman "$mailmanCfgTmp"
mv "$mailmanCfgTmp" "$mailmanCfg"
'';
};
};
mailman-uwsgi = lib.mkIf cfg.serve.enable (let
uwsgiConfig = lib.recursiveUpdate {
uwsgi = {
type = "normal";
plugins = ["python3"];
home = webEnv;
http = "127.0.0.1:18507";
buffer-size = 8192;
mailman-web-setup = {
description = "Prepare mailman-web files and database";
before = [ "mailman-uwsgi.service" ];
requiredBy = [ "mailman-uwsgi.service" ];
restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
script = ''
[[ -e "${webSettings.STATIC_ROOT}" ]] && find "${webSettings.STATIC_ROOT}/" -mindepth 1 -delete
${webEnv}/bin/mailman-web migrate
${webEnv}/bin/mailman-web collectstatic
${webEnv}/bin/mailman-web compress
'';
serviceConfig = {
User = cfg.webUser;
Group = "mailman";
Type = "oneshot";
WorkingDirectory = "/var/lib/mailman-web";
};
};
mailman-uwsgi = lib.mkIf cfg.serve.enable (
let
uwsgiConfig = lib.recursiveUpdate {
uwsgi =
{
type = "normal";
plugins = [ "python3" ];
home = webEnv;
http = "127.0.0.1:18507";
buffer-size = 8192;
}
// (
if cfg.serve.virtualRoot == "/" then
{ module = "mailman_web.wsgi:application"; }
else
{
mount = "${cfg.serve.virtualRoot}=mailman_web.wsgi:application";
manage-script-name = true;
}
);
} cfg.serve.uwsgiSettings;
uwsgiConfigFile = pkgs.writeText "uwsgi-mailman.json" (builtins.toJSON uwsgiConfig);
in
{
wantedBy = [ "multi-user.target" ];
after = lib.optional withPostgresql "postgresql.service";
requires = [
"mailman-uwsgi.socket"
"mailman-web-setup.service"
] ++ lib.optional withPostgresql "postgresql.service";
restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
serviceConfig = {
# Since the mailman-web settings.py obstinately creates a logs
# dir in the cwd, change to the (writable) runtime directory before
# starting uwsgi.
ExecStart = "${pkgs.coreutils}/bin/env -C $RUNTIME_DIRECTORY ${
pkgs.uwsgi.override {
plugins = [ "python3" ];
python3 = webEnv.python;
}
}/bin/uwsgi --json ${uwsgiConfigFile}";
User = cfg.webUser;
Group = "mailman";
RuntimeDirectory = "mailman-uwsgi";
Restart = "on-failure";
};
}
// (if cfg.serve.virtualRoot == "/"
then { module = "mailman_web.wsgi:application"; }
else {
mount = "${cfg.serve.virtualRoot}=mailman_web.wsgi:application";
manage-script-name = true;
});
} cfg.serve.uwsgiSettings;
uwsgiConfigFile = pkgs.writeText "uwsgi-mailman.json" (builtins.toJSON uwsgiConfig);
in {
wantedBy = ["multi-user.target"];
after = lib.optional withPostgresql "postgresql.service";
requires = ["mailman-uwsgi.socket" "mailman-web-setup.service"]
++ lib.optional withPostgresql "postgresql.service";
restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
serviceConfig = {
# Since the mailman-web settings.py obstinately creates a logs
# dir in the cwd, change to the (writable) runtime directory before
# starting uwsgi.
ExecStart = "${pkgs.coreutils}/bin/env -C $RUNTIME_DIRECTORY ${pkgs.uwsgi.override { plugins = ["python3"]; python3 = webEnv.python; }}/bin/uwsgi --json ${uwsgiConfigFile}";
User = cfg.webUser;
Group = "mailman";
RuntimeDirectory = "mailman-uwsgi";
Restart = "on-failure";
};
});
);
mailman-daily = {
description = "Trigger daily Mailman events";
startAt = "daily";
restartTriggers = [ mailmanCfgFile ];
serviceConfig = {
ExecStart = "${mailmanEnv}/bin/mailman digests --send";
User = "mailman";
Group = "mailman";
mailman-daily = {
description = "Trigger daily Mailman events";
startAt = "daily";
restartTriggers = [ mailmanCfgFile ];
serviceConfig = {
ExecStart = "${mailmanEnv}/bin/mailman digests --send";
User = "mailman";
Group = "mailman";
};
};
};
hyperkitty = lib.mkIf cfg.hyperkitty.enable {
description = "GNU Hyperkitty QCluster Process";
after = [ "network.target" ];
restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
wantedBy = [ "mailman.service" "multi-user.target" ];
serviceConfig = {
ExecStart = "${webEnv}/bin/mailman-web qcluster";
User = cfg.webUser;
Group = "mailman";
WorkingDirectory = "/var/lib/mailman-web";
Restart = "on-failure";
hyperkitty = lib.mkIf cfg.hyperkitty.enable {
description = "GNU Hyperkitty QCluster Process";
after = [ "network.target" ];
restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
wantedBy = [
"mailman.service"
"multi-user.target"
];
serviceConfig = {
ExecStart = "${webEnv}/bin/mailman-web qcluster";
User = cfg.webUser;
Group = "mailman";
WorkingDirectory = "/var/lib/mailman-web";
Restart = "on-failure";
};
};
};
} // lib.flip lib.mapAttrs' {
"minutely" = "minutely";
"quarter_hourly" = "*:00/15";
"hourly" = "hourly";
"daily" = "daily";
"weekly" = "weekly";
"yearly" = "yearly";
} (name: startAt:
lib.nameValuePair "hyperkitty-${name}" (lib.mkIf cfg.hyperkitty.enable {
description = "Trigger ${name} Hyperkitty events";
inherit startAt;
restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
serviceConfig = {
ExecStart = "${webEnv}/bin/mailman-web runjobs ${name}";
User = cfg.webUser;
Group = "mailman";
WorkingDirectory = "/var/lib/mailman-web";
};
}));
}
// lib.flip lib.mapAttrs'
{
"minutely" = "minutely";
"quarter_hourly" = "*:00/15";
"hourly" = "hourly";
"daily" = "daily";
"weekly" = "weekly";
"yearly" = "yearly";
}
(
name: startAt:
lib.nameValuePair "hyperkitty-${name}" (
lib.mkIf cfg.hyperkitty.enable {
description = "Trigger ${name} Hyperkitty events";
inherit startAt;
restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
serviceConfig = {
ExecStart = "${webEnv}/bin/mailman-web runjobs ${name}";
User = cfg.webUser;
Group = "mailman";
WorkingDirectory = "/var/lib/mailman-web";
};
}
)
);
};
meta = {

View file

@ -1,79 +1,86 @@
{ lib
, config
, pkgs
, ...
{
lib,
config,
pkgs,
...
}:
let
inherit (lib) mkEnableOption mkOption mkIf types;
inherit (lib)
mkEnableOption
mkOption
mkIf
types
;
format = pkgs.formats.toml { };
cfg = config.services.hebbot;
settingsFile = format.generate "config.toml" cfg.settings;
mkTemplateOption = templateName: mkOption {
type = types.path;
description = ''
A path to the Markdown file for the ${templateName}.
'';
};
in
{
meta.maintainers = [ lib.maintainers.raitobezarius ];
options.services.hebbot = {
enable = mkEnableOption "hebbot";
package = lib.mkPackageOption pkgs "hebbot" {};
botPasswordFile = mkOption {
type = types.path;
description = ''
A path to the password file for your bot.
Consider using a path that does not end up in your Nix store
as it would be world readable.
'';
};
templates = {
project = mkTemplateOption "project template";
report = mkTemplateOption "report template";
section = mkTemplateOption "section template";
};
settings = mkOption {
type = format.type;
default = { };
description = ''
Configuration for Hebbot, see, for examples:
- <https://github.com/matrix-org/twim-config/blob/master/config.toml>
- <https://gitlab.gnome.org/Teams/Websites/thisweek.gnome.org/-/blob/main/hebbot/config.toml>
'';
};
mkTemplateOption =
templateName:
mkOption {
type = types.path;
description = ''
A path to the Markdown file for the ${templateName}.
'';
};
in
{
meta.maintainers = [ lib.maintainers.raitobezarius ];
options.services.hebbot = {
enable = mkEnableOption "hebbot";
package = lib.mkPackageOption pkgs "hebbot" { };
botPasswordFile = mkOption {
type = types.path;
description = ''
A path to the password file for your bot.
config = mkIf cfg.enable {
systemd.services.hebbot = {
description = "hebbot - a TWIM-style Matrix bot written in Rust";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
Consider using a path that does not end up in your Nix store
as it would be world readable.
'';
};
templates = {
project = mkTemplateOption "project template";
report = mkTemplateOption "report template";
section = mkTemplateOption "section template";
};
settings = mkOption {
type = format.type;
default = { };
description = ''
Configuration for Hebbot, see, for examples:
preStart = ''
ln -sf ${cfg.templates.project} ./project_template.md
ln -sf ${cfg.templates.report} ./report_template.md
ln -sf ${cfg.templates.section} ./section_template.md
ln -sf ${settingsFile} ./config.toml
'';
- <https://github.com/matrix-org/twim-config/blob/master/config.toml>
- <https://gitlab.gnome.org/Teams/Websites/thisweek.gnome.org/-/blob/main/hebbot/config.toml>
'';
};
};
script = ''
export BOT_PASSWORD="$(cat $CREDENTIALS_DIRECTORY/bot-password-file)"
${lib.getExe cfg.package}
'';
config = mkIf cfg.enable {
systemd.services.hebbot = {
description = "hebbot - a TWIM-style Matrix bot written in Rust";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
DynamicUser = true;
Restart = "on-failure";
LoadCredential = "bot-password-file:${cfg.botPasswordFile}";
RestartSec = "10s";
StateDirectory = "hebbot";
WorkingDirectory = "/var/lib/hebbot";
preStart = ''
ln -sf ${cfg.templates.project} ./project_template.md
ln -sf ${cfg.templates.report} ./report_template.md
ln -sf ${cfg.templates.section} ./section_template.md
ln -sf ${settingsFile} ./config.toml
'';
script = ''
export BOT_PASSWORD="$(cat $CREDENTIALS_DIRECTORY/bot-password-file)"
${lib.getExe cfg.package}
'';
serviceConfig = {
DynamicUser = true;
Restart = "on-failure";
LoadCredential = "bot-password-file:${cfg.botPasswordFile}";
RestartSec = "10s";
StateDirectory = "hebbot";
WorkingDirectory = "/var/lib/hebbot";
};
};
};
}

View file

@ -3,13 +3,14 @@
config,
pkgs,
...
}: let
}:
let
cfg = config.services.mautrix-whatsapp;
dataDir = "/var/lib/mautrix-whatsapp";
registrationFile = "${dataDir}/whatsapp-registration.yaml";
settingsFile = "${dataDir}/config.json";
settingsFileUnsubstituted = settingsFormat.generate "mautrix-whatsapp-config-unsubstituted.json" cfg.settings;
settingsFormat = pkgs.formats.json {};
settingsFormat = pkgs.formats.json { };
appservicePort = 29318;
mkDefaults = lib.mapAttrsRecursive (n: v: lib.mkDefault v);
@ -29,8 +30,8 @@
bridge = {
username_template = "whatsapp_{{.}}";
displayname_template = "{{if .BusinessName}}{{.BusinessName}}{{else if .PushName}}{{.PushName}}{{else}}{{.JID}}{{end}} (WA)";
double_puppet_server_map = {};
login_shared_secret_map = {};
double_puppet_server_map = { };
login_shared_secret_map = { };
command_prefix = "!wa";
permissions."*" = "relay";
relay.enabled = true;
@ -45,7 +46,8 @@
};
};
in {
in
{
options.services.mautrix-whatsapp = {
enable = lib.mkEnableOption "mautrix-whatsapp, a puppeting/relaybot bridge between Matrix and WhatsApp";
@ -129,7 +131,7 @@ in {
description = "Mautrix-WhatsApp bridge user";
};
users.groups.mautrix-whatsapp = {};
users.groups.mautrix-whatsapp = { };
services.matrix-synapse = lib.mkIf cfg.registerToSynapse {
settings.app_service_config_files = [ registrationFile ];
@ -138,18 +140,20 @@ in {
serviceConfig.SupplementaryGroups = [ "mautrix-whatsapp" ];
};
services.mautrix-whatsapp.settings = lib.mkMerge (map mkDefaults [
defaultConfig
# Note: this is defined here to avoid the docs depending on `config`
{ homeserver.domain = config.services.matrix-synapse.settings.server_name; }
]);
services.mautrix-whatsapp.settings = lib.mkMerge (
map mkDefaults [
defaultConfig
# Note: this is defined here to avoid the docs depending on `config`
{ homeserver.domain = config.services.matrix-synapse.settings.server_name; }
]
);
systemd.services.mautrix-whatsapp = {
description = "Mautrix-WhatsApp Service - A WhatsApp bridge for Matrix";
wantedBy = ["multi-user.target"];
wants = ["network-online.target"] ++ cfg.serviceDependencies;
after = ["network-online.target"] ++ cfg.serviceDependencies;
wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ] ++ cfg.serviceDependencies;
after = [ "network-online.target" ] ++ cfg.serviceDependencies;
preStart = ''
# substitute the settings file by environment variables
@ -216,12 +220,12 @@ in {
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallErrorNumber = "EPERM";
SystemCallFilter = ["@system-service"];
SystemCallFilter = [ "@system-service" ];
Type = "simple";
UMask = 0027;
UMask = 27;
};
restartTriggers = [settingsFileUnsubstituted];
restartTriggers = [ settingsFileUnsubstituted ];
};
};
meta.maintainers = with lib.maintainers; [frederictobiasc];
meta.maintainers = with lib.maintainers; [ frederictobiasc ];
}

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.mjolnir;
@ -23,8 +28,15 @@ let
};
moduleConfigFile = pkgs.writeText "module-config.yaml" (
lib.generators.toYAML { } (lib.filterAttrs (_: v: v != null)
(lib.fold lib.recursiveUpdate { } [ yamlConfig cfg.settings ])));
lib.generators.toYAML { } (
lib.filterAttrs (_: v: v != null) (
lib.fold lib.recursiveUpdate { } [
yamlConfig
cfg.settings
]
)
)
);
# these config files will be merged one after the other to build the final config
configFiles = [
@ -36,7 +48,9 @@ let
# replace all secret strings using replace-secret
generateConfig = pkgs.writeShellScript "mjolnir-generate-config" (
let
yqEvalStr = lib.concatImapStringsSep " * " (pos: _: "select(fileIndex == ${toString (pos - 1)})") configFiles;
yqEvalStr = lib.concatImapStringsSep " * " (
pos: _: "select(fileIndex == ${toString (pos - 1)})"
) configFiles;
yqEvalArgs = lib.concatStringsSep " " configFiles;
in
''
@ -190,15 +204,20 @@ in
# which breaks older configs using pantalaimon or access tokens
services.mjolnir.settings.encryption.use = lib.mkDefault false;
services.pantalaimon-headless.instances."mjolnir" = lib.mkIf cfg.pantalaimon.enable
{
services.pantalaimon-headless.instances."mjolnir" =
lib.mkIf cfg.pantalaimon.enable {
homeserver = cfg.homeserverUrl;
} // cfg.pantalaimon.options;
}
// cfg.pantalaimon.options;
systemd.services.mjolnir = {
description = "mjolnir - a moderation tool for Matrix";
wants = [ "network-online.target" ] ++ lib.optionals (cfg.pantalaimon.enable) [ "pantalaimon-mjolnir.service" ];
after = [ "network-online.target" ] ++ lib.optionals (cfg.pantalaimon.enable) [ "pantalaimon-mjolnir.service" ];
wants = [
"network-online.target"
] ++ lib.optionals (cfg.pantalaimon.enable) [ "pantalaimon-mjolnir.service" ];
after = [
"network-online.target"
] ++ lib.optionals (cfg.pantalaimon.enable) [ "pantalaimon-mjolnir.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
@ -215,15 +234,16 @@ in
User = "mjolnir";
Restart = "on-failure";
/* TODO: wait for #102397 to be resolved. Then load secrets from $CREDENTIALS_DIRECTORY+"/NAME"
DynamicUser = true;
LoadCredential = [] ++
lib.optionals (cfg.accessTokenFile != null) [
"access_token:${cfg.accessTokenFile}"
] ++
lib.optionals (cfg.pantalaimon.passwordFile != null) [
"pantalaimon_password:${cfg.pantalaimon.passwordFile}"
];
/*
TODO: wait for #102397 to be resolved. Then load secrets from $CREDENTIALS_DIRECTORY+"/NAME"
DynamicUser = true;
LoadCredential = [] ++
lib.optionals (cfg.accessTokenFile != null) [
"access_token:${cfg.accessTokenFile}"
] ++
lib.optionals (cfg.pantalaimon.passwordFile != null) [
"pantalaimon_password:${cfg.pantalaimon.passwordFile}"
];
*/
};
};

View file

@ -4,44 +4,39 @@
pkgs,
...
}:
with lib; let
with lib;
let
cfg = config.services.anki-sync-server;
name = "anki-sync-server";
specEscape = replaceStrings ["%"] ["%%"];
usersWithIndexes =
lists.imap1 (i: user: {
i = i;
user = user;
})
cfg.users;
specEscape = replaceStrings [ "%" ] [ "%%" ];
usersWithIndexes = lists.imap1 (i: user: {
i = i;
user = user;
}) cfg.users;
usersWithIndexesFile = filter (x: x.user.passwordFile != null) usersWithIndexes;
usersWithIndexesNoFile = filter (x: x.user.passwordFile == null && x.user.password != null) usersWithIndexes;
usersWithIndexesNoFile = filter (
x: x.user.passwordFile == null && x.user.password != null
) usersWithIndexes;
anki-sync-server-run = pkgs.writeShellScript "anki-sync-server-run" ''
# When services.anki-sync-server.users.passwordFile is set,
# each password file is passed as a systemd credential, which is mounted in
# a file system exposed to the service. Here we read the passwords from
# the credential files to pass them as environment variables to the Anki
# sync server.
${
concatMapStringsSep
"\n"
(x: ''
read -r pass < "''${CREDENTIALS_DIRECTORY}/"${escapeShellArg x.user.username}
export SYNC_USER${toString x.i}=${escapeShellArg x.user.username}:"$pass"
'')
usersWithIndexesFile
}
${concatMapStringsSep "\n" (x: ''
read -r pass < "''${CREDENTIALS_DIRECTORY}/"${escapeShellArg x.user.username}
export SYNC_USER${toString x.i}=${escapeShellArg x.user.username}:"$pass"
'') usersWithIndexesFile}
# For users where services.anki-sync-server.users.password isn't set,
# export passwords in environment variables in plaintext.
${
concatMapStringsSep
"\n"
(x: ''export SYNC_USER${toString x.i}=${escapeShellArg x.user.username}:${escapeShellArg x.user.password}'')
usersWithIndexesNoFile
}
${concatMapStringsSep "\n" (
x:
''export SYNC_USER${toString x.i}=${escapeShellArg x.user.username}:${escapeShellArg x.user.password}''
) usersWithIndexesNoFile}
exec ${lib.getExe cfg.package}
'';
in {
in
{
options.services.anki-sync-server = {
enable = mkEnableOption "anki-sync-server";
@ -68,7 +63,6 @@ in {
description = "Base directory where user(s) synchronized data will be stored.";
};
openFirewall = mkOption {
default = false;
type = types.bool;
@ -76,7 +70,8 @@ in {
};
users = mkOption {
type = with types;
type =
with types;
listOf (submodule {
options = {
username = mkOption {
@ -116,13 +111,13 @@ in {
message = "At least one username-password pair must be set.";
}
];
networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [cfg.port];
networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
systemd.services.anki-sync-server = {
description = "anki-sync-server: Anki sync server built into Anki";
after = ["network.target"];
wantedBy = ["multi-user.target"];
path = [cfg.package];
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = [ cfg.package ];
environment = {
SYNC_BASE = cfg.baseDirectory;
SYNC_HOST = specEscape cfg.address;
@ -135,16 +130,15 @@ in {
StateDirectory = name;
ExecStart = anki-sync-server-run;
Restart = "always";
LoadCredential =
map
(x: "${specEscape x.user.username}:${specEscape (toString x.user.passwordFile)}")
usersWithIndexesFile;
LoadCredential = map (
x: "${specEscape x.user.username}:${specEscape (toString x.user.passwordFile)}"
) usersWithIndexesFile;
};
};
};
meta = {
maintainers = with maintainers; [telotortium];
maintainers = with maintainers; [ telotortium ];
doc = ./anki-sync-server.md;
};
}

View file

@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.bazarr;
in
@ -72,7 +77,7 @@ in
};
users.groups = lib.mkIf (cfg.group == "bazarr") {
bazarr = {};
bazarr = { };
};
};
}

View file

@ -1,9 +1,23 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.beesd;
logLevels = { emerg = 0; alert = 1; crit = 2; err = 3; warning = 4; notice = 5; info = 6; debug = 7; };
logLevels = {
emerg = 0;
alert = 1;
crit = 2;
err = 3;
warning = 4;
notice = 5;
info = 6;
debug = 7;
};
fsOptions = with lib.types; {
options.spec = lib.mkOption {
@ -84,8 +98,9 @@ in
};
};
config = {
systemd.services = lib.mapAttrs'
(name: fs: lib.nameValuePair "beesd@${name}" {
systemd.services = lib.mapAttrs' (
name: fs:
lib.nameValuePair "beesd@${name}" {
description = "Block-level BTRFS deduplication for %i";
after = [ "sysinit.target" ];
@ -120,7 +135,7 @@ in
};
unitConfig.RequiresMountsFor = lib.mkIf (lib.hasPrefix "/" fs.spec) fs.spec;
wantedBy = [ "multi-user.target" ];
})
cfg.filesystems;
}
) cfg.filesystems;
};
}

View file

@ -0,0 +1,115 @@
{
config,
lib,
pkgs,
...
}:
let
inherit (lib) types;
cfg = config.services.docling-serve;
in
{
options = {
services.docling-serve = {
enable = lib.mkEnableOption "Docling Serve server";
package = lib.mkPackageOption pkgs "docling-serve" { };
host = lib.mkOption {
type = types.str;
default = "127.0.0.1";
example = "0.0.0.0";
description = ''
The host address which the Docling Serve server HTTP interface listens to.
'';
};
port = lib.mkOption {
type = types.port;
default = 5001;
example = 11111;
description = ''
Which port the Docling Serve server listens to.
'';
};
environment = lib.mkOption {
type = types.attrsOf types.str;
default = {
DOCLING_SERVE_ENABLE_UI = "True";
};
example = ''
{
DOCLING_SERVE_ENABLE_UI = "False";
}
'';
description = ''
Extra environment variables for Docling Serve.
For more details see <https://github.com/docling-project/docling-serve/blob/main/docs/configuration.md>
'';
};
environmentFile = lib.mkOption {
description = ''
Environment file to be passed to the systemd service.
Useful for passing secrets to the service to prevent them from being
world-readable in the Nix store.
'';
type = lib.types.nullOr lib.types.path;
default = null;
example = "/var/lib/secrets/doclingServeSecrets";
};
openFirewall = lib.mkOption {
type = types.bool;
default = false;
description = ''
Whether to open the firewall for Docling Serve.
This adds `services.Docling Serve.port` to `networking.firewall.allowedTCPPorts`.
'';
};
};
};
config = lib.mkIf cfg.enable {
systemd.services.docling-serve = {
description = "Running Docling as an API service";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
environment = cfg.environment;
serviceConfig = {
ExecStart = "${lib.getExe cfg.package} run --host \"${cfg.host}\" --port ${toString cfg.port}";
EnvironmentFile = lib.optional (cfg.environmentFile != null) cfg.environmentFile;
PrivateTmp = true;
DynamicUser = true;
DevicePolicy = "closed";
LockPersonality = true;
PrivateUsers = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectControlGroups = true;
RestrictNamespaces = true;
RestrictRealtime = true;
SystemCallArchitectures = "native";
UMask = "0077";
CapabilityBoundingSet = "";
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
"AF_UNIX"
];
ProtectClock = true;
ProtectProc = "invisible";
};
};
networking.firewall = lib.mkIf cfg.openFirewall { allowedTCPPorts = [ cfg.port ]; };
};
meta.maintainers = with lib.maintainers; [ drupol ];
}

View file

@ -1,10 +1,16 @@
{ lib, pkgs, config, ... }:
{
lib,
pkgs,
config,
...
}:
let
cfg = config.services.domoticz;
pkgDesc = "Domoticz home automation";
in {
in
{
options = {

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.dwm-status;
@ -26,7 +31,16 @@ in
};
order = lib.mkOption {
type = lib.types.listOf (lib.types.enum [ "audio" "backlight" "battery" "cpu_load" "network" "time" ]);
type = lib.types.listOf (
lib.types.enum [
"audio"
"backlight"
"battery"
"cpu_load"
"network"
"time"
]
);
description = ''
List of enabled features in order.
'';
@ -44,7 +58,6 @@ in
};
###### implementation
config = lib.mkIf cfg.enable {

View file

@ -1,4 +1,10 @@
{ config, lib, options, pkgs, ... }:
{
config,
lib,
options,
pkgs,
...
}:
with lib;
@ -17,7 +23,7 @@ let
RUN_MODE = prod
WORK_PATH = ${cfg.stateDir}
${generators.toINI {} cfg.settings}
${generators.toINI { } cfg.settings}
${optionalString (cfg.extraConfig != null) cfg.extraConfig}
'';
@ -25,22 +31,56 @@ in
{
imports = [
(mkRenamedOptionModule [ "services" "gitea" "cookieSecure" ] [ "services" "gitea" "settings" "session" "COOKIE_SECURE" ])
(mkRenamedOptionModule [ "services" "gitea" "disableRegistration" ] [ "services" "gitea" "settings" "service" "DISABLE_REGISTRATION" ])
(mkRenamedOptionModule [ "services" "gitea" "domain" ] [ "services" "gitea" "settings" "server" "DOMAIN" ])
(mkRenamedOptionModule [ "services" "gitea" "httpAddress" ] [ "services" "gitea" "settings" "server" "HTTP_ADDR" ])
(mkRenamedOptionModule [ "services" "gitea" "httpPort" ] [ "services" "gitea" "settings" "server" "HTTP_PORT" ])
(mkRenamedOptionModule [ "services" "gitea" "log" "level" ] [ "services" "gitea" "settings" "log" "LEVEL" ])
(mkRenamedOptionModule [ "services" "gitea" "log" "rootPath" ] [ "services" "gitea" "settings" "log" "ROOT_PATH" ])
(mkRenamedOptionModule [ "services" "gitea" "rootUrl" ] [ "services" "gitea" "settings" "server" "ROOT_URL" ])
(mkRenamedOptionModule [ "services" "gitea" "ssh" "clonePort" ] [ "services" "gitea" "settings" "server" "SSH_PORT" ])
(mkRenamedOptionModule [ "services" "gitea" "staticRootPath" ] [ "services" "gitea" "settings" "server" "STATIC_ROOT_PATH" ])
(mkRenamedOptionModule
[ "services" "gitea" "cookieSecure" ]
[ "services" "gitea" "settings" "session" "COOKIE_SECURE" ]
)
(mkRenamedOptionModule
[ "services" "gitea" "disableRegistration" ]
[ "services" "gitea" "settings" "service" "DISABLE_REGISTRATION" ]
)
(mkRenamedOptionModule
[ "services" "gitea" "domain" ]
[ "services" "gitea" "settings" "server" "DOMAIN" ]
)
(mkRenamedOptionModule
[ "services" "gitea" "httpAddress" ]
[ "services" "gitea" "settings" "server" "HTTP_ADDR" ]
)
(mkRenamedOptionModule
[ "services" "gitea" "httpPort" ]
[ "services" "gitea" "settings" "server" "HTTP_PORT" ]
)
(mkRenamedOptionModule
[ "services" "gitea" "log" "level" ]
[ "services" "gitea" "settings" "log" "LEVEL" ]
)
(mkRenamedOptionModule
[ "services" "gitea" "log" "rootPath" ]
[ "services" "gitea" "settings" "log" "ROOT_PATH" ]
)
(mkRenamedOptionModule
[ "services" "gitea" "rootUrl" ]
[ "services" "gitea" "settings" "server" "ROOT_URL" ]
)
(mkRenamedOptionModule
[ "services" "gitea" "ssh" "clonePort" ]
[ "services" "gitea" "settings" "server" "SSH_PORT" ]
)
(mkRenamedOptionModule
[ "services" "gitea" "staticRootPath" ]
[ "services" "gitea" "settings" "server" "STATIC_ROOT_PATH" ]
)
(mkChangedOptionModule [ "services" "gitea" "enableUnixSocket" ] [ "services" "gitea" "settings" "server" "PROTOCOL" ] (
config: if config.services.gitea.enableUnixSocket then "http+unix" else "http"
))
(mkChangedOptionModule
[ "services" "gitea" "enableUnixSocket" ]
[ "services" "gitea" "settings" "server" "PROTOCOL" ]
(config: if config.services.gitea.enableUnixSocket then "http+unix" else "http")
)
(mkRemovedOptionModule [ "services" "gitea" "ssh" "enable" ] "services.gitea.ssh.enable has been migrated into freeform setting services.gitea.settings.server.DISABLE_SSH. Keep in mind that the setting is inverted")
(mkRemovedOptionModule [ "services" "gitea" "ssh" "enable" ]
"services.gitea.ssh.enable has been migrated into freeform setting services.gitea.settings.server.DISABLE_SSH. Keep in mind that the setting is inverted"
)
];
options = {
@ -86,7 +126,11 @@ in
database = {
type = mkOption {
type = types.enum [ "sqlite3" "mysql" "postgres" ];
type = types.enum [
"sqlite3"
"mysql"
"postgres"
];
example = "mysql";
default = "sqlite3";
description = "Database engine to use.";
@ -143,7 +187,13 @@ in
socket = mkOption {
type = types.nullOr types.path;
default = if (cfg.database.createDatabase && usePostgresql) then "/run/postgresql" else if (cfg.database.createDatabase && useMysql) then "/run/mysqld/mysqld.sock" else null;
default =
if (cfg.database.createDatabase && usePostgresql) then
"/run/postgresql"
else if (cfg.database.createDatabase && useMysql) then
"/run/mysqld/mysqld.sock"
else
null;
defaultText = literalExpression "null";
example = "/run/mysqld/mysqld.sock";
description = "Path to the unix socket file to use for authentication.";
@ -194,7 +244,13 @@ in
};
type = mkOption {
type = types.enum [ "image" "recaptcha" "hcaptcha" "mcaptcha" "cfturnstile" ];
type = types.enum [
"image"
"recaptcha"
"hcaptcha"
"mcaptcha"
"cfturnstile"
];
default = "image";
example = "recaptcha";
description = "The type of CAPTCHA to use for Gitea.";
@ -245,7 +301,18 @@ in
};
type = mkOption {
type = types.enum [ "zip" "rar" "tar" "sz" "tar.gz" "tar.xz" "tar.bz2" "tar.br" "tar.lz4" "tar.zst" ];
type = types.enum [
"zip"
"rar"
"tar"
"sz"
"tar.gz"
"tar.xz"
"tar.bz2"
"tar.br"
"tar.lz4"
"tar.zst"
];
default = "zip";
description = "Archive format used to store the dump file.";
};
@ -308,7 +375,7 @@ in
};
settings = mkOption {
default = {};
default = { };
description = ''
Gitea configuration. Refer to <https://docs.gitea.io/en-us/config-cheat-sheet/>
for details on supported values.
@ -343,21 +410,35 @@ in
};
LEVEL = mkOption {
default = "Info";
type = types.enum [ "Trace" "Debug" "Info" "Warn" "Error" "Critical" ];
type = types.enum [
"Trace"
"Debug"
"Info"
"Warn"
"Error"
"Critical"
];
description = "General log level.";
};
};
server = {
PROTOCOL = mkOption {
type = types.enum [ "http" "https" "fcgi" "http+unix" "fcgi+unix" ];
type = types.enum [
"http"
"https"
"fcgi"
"http+unix"
"fcgi+unix"
];
default = "http";
description = ''Listen protocol. `+unix` means "over unix", not "in addition to."'';
};
HTTP_ADDR = mkOption {
type = types.either types.str types.path;
default = if lib.hasSuffix "+unix" cfg.settings.server.PROTOCOL then "/run/gitea/gitea.sock" else "0.0.0.0";
default =
if lib.hasSuffix "+unix" cfg.settings.server.PROTOCOL then "/run/gitea/gitea.sock" else "0.0.0.0";
defaultText = literalExpression ''if lib.hasSuffix "+unix" cfg.settings.server.PROTOCOL then "/run/gitea/gitea.sock" else "0.0.0.0"'';
description = "Listen address. Must be a path when using a unix socket.";
};
@ -445,10 +526,12 @@ in
config = mkIf cfg.enable {
assertions = [
{ assertion = cfg.database.createDatabase -> useSqlite || cfg.database.user == cfg.user;
{
assertion = cfg.database.createDatabase -> useSqlite || cfg.database.user == cfg.user;
message = "services.gitea.database.user must match services.gitea.user if the database is to be automatically provisioned";
}
{ assertion = cfg.database.createDatabase && usePostgresql -> cfg.database.user == cfg.database.name;
{
assertion = cfg.database.createDatabase && usePostgresql -> cfg.database.user == cfg.database.name;
message = ''
When creating a database via NixOS, the db user and db name must be equal!
If you already have an existing DB+user and this assertion is new, you can safely set
@ -457,115 +540,133 @@ in
'';
}
{
assertion = cfg.captcha.enable -> cfg.captcha.type != "image" -> (cfg.captcha.secretFile != null && cfg.captcha.siteKey != null);
assertion =
cfg.captcha.enable
-> cfg.captcha.type != "image"
-> (cfg.captcha.secretFile != null && cfg.captcha.siteKey != null);
message = ''
Using a CAPTCHA service that is not `image` requires providing a CAPTCHA secret through
the `captcha.secretFile` option and a CAPTCHA site key through the `captcha.siteKey` option.
'';
}
{
assertion = cfg.captcha.url != null -> (builtins.elem cfg.captcha.type ["mcaptcha" "recaptcha"]);
assertion =
cfg.captcha.url != null
-> (builtins.elem cfg.captcha.type [
"mcaptcha"
"recaptcha"
]);
message = ''
`captcha.url` is only relevant when `captcha.type` is `mcaptcha` or `recaptcha`.
'';
}
];
services.gitea.settings = let
captchaPrefix = optionalString cfg.captcha.enable ({
image = "IMAGE";
recaptcha = "RECAPTCHA";
hcaptcha = "HCAPTCHA";
mcaptcha = "MCAPTCHA";
cfturnstile = "CF_TURNSTILE";
}."${cfg.captcha.type}");
in {
"cron.update_checker".ENABLED = lib.mkDefault false;
services.gitea.settings =
let
captchaPrefix = optionalString cfg.captcha.enable (
{
image = "IMAGE";
recaptcha = "RECAPTCHA";
hcaptcha = "HCAPTCHA";
mcaptcha = "MCAPTCHA";
cfturnstile = "CF_TURNSTILE";
}
."${cfg.captcha.type}"
);
in
{
"cron.update_checker".ENABLED = lib.mkDefault false;
database = mkMerge [
{
DB_TYPE = cfg.database.type;
}
(mkIf (useMysql || usePostgresql) {
HOST = if cfg.database.socket != null then cfg.database.socket else cfg.database.host + ":" + toString cfg.database.port;
NAME = cfg.database.name;
USER = cfg.database.user;
PASSWD = "#dbpass#";
})
(mkIf useSqlite {
PATH = cfg.database.path;
})
(mkIf usePostgresql {
SSL_MODE = "disable";
})
];
database = mkMerge [
{
DB_TYPE = cfg.database.type;
}
(mkIf (useMysql || usePostgresql) {
HOST =
if cfg.database.socket != null then
cfg.database.socket
else
cfg.database.host + ":" + toString cfg.database.port;
NAME = cfg.database.name;
USER = cfg.database.user;
PASSWD = "#dbpass#";
})
(mkIf useSqlite {
PATH = cfg.database.path;
})
(mkIf usePostgresql {
SSL_MODE = "disable";
})
];
repository = {
ROOT = cfg.repositoryRoot;
repository = {
ROOT = cfg.repositoryRoot;
};
server = mkIf cfg.lfs.enable {
LFS_START_SERVER = true;
LFS_JWT_SECRET = "#lfsjwtsecret#";
};
camo = mkIf (cfg.camoHmacKeyFile != null) {
HMAC_KEY = "#hmackey#";
};
session = {
COOKIE_NAME = lib.mkDefault "session";
};
security = {
SECRET_KEY = "#secretkey#";
INTERNAL_TOKEN = "#internaltoken#";
INSTALL_LOCK = true;
};
service = mkIf cfg.captcha.enable (mkMerge [
{
ENABLE_CAPTCHA = true;
CAPTCHA_TYPE = cfg.captcha.type;
REQUIRE_CAPTCHA_FOR_LOGIN = cfg.captcha.requireForLogin;
REQUIRE_EXTERNAL_REGISTRATION_CAPTCHA = cfg.captcha.requireForExternalRegistration;
}
(mkIf (cfg.captcha.secretFile != null) {
"${captchaPrefix}_SECRET" = "#captchasecret#";
})
(mkIf (cfg.captcha.siteKey != null) {
"${captchaPrefix}_SITEKEY" = cfg.captcha.siteKey;
})
(mkIf (cfg.captcha.url != null) {
"${captchaPrefix}_URL" = cfg.captcha.url;
})
]);
mailer = mkIf (cfg.mailerPasswordFile != null) {
PASSWD = "#mailerpass#";
};
metrics = mkIf (cfg.metricsTokenFile != null) {
TOKEN = "#metricstoken#";
};
oauth2 = {
JWT_SECRET = "#oauth2jwtsecret#";
};
lfs = mkIf cfg.lfs.enable {
PATH = cfg.lfs.contentDir;
};
packages.CHUNKED_UPLOAD_PATH = "${cfg.stateDir}/tmp/package-upload";
};
server = mkIf cfg.lfs.enable {
LFS_START_SERVER = true;
LFS_JWT_SECRET = "#lfsjwtsecret#";
};
camo = mkIf (cfg.camoHmacKeyFile != null) {
HMAC_KEY = "#hmackey#";
};
session = {
COOKIE_NAME = lib.mkDefault "session";
};
security = {
SECRET_KEY = "#secretkey#";
INTERNAL_TOKEN = "#internaltoken#";
INSTALL_LOCK = true;
};
service = mkIf cfg.captcha.enable (mkMerge [
{
ENABLE_CAPTCHA = true;
CAPTCHA_TYPE = cfg.captcha.type;
REQUIRE_CAPTCHA_FOR_LOGIN = cfg.captcha.requireForLogin;
REQUIRE_EXTERNAL_REGISTRATION_CAPTCHA = cfg.captcha.requireForExternalRegistration;
}
(mkIf (cfg.captcha.secretFile != null) {
"${captchaPrefix}_SECRET" = "#captchasecret#";
})
(mkIf (cfg.captcha.siteKey != null) {
"${captchaPrefix}_SITEKEY" = cfg.captcha.siteKey;
})
(mkIf (cfg.captcha.url != null) {
"${captchaPrefix}_URL" = cfg.captcha.url;
})
]);
mailer = mkIf (cfg.mailerPasswordFile != null) {
PASSWD = "#mailerpass#";
};
metrics = mkIf (cfg.metricsTokenFile != null) {
TOKEN = "#metricstoken#";
};
oauth2 = {
JWT_SECRET = "#oauth2jwtsecret#";
};
lfs = mkIf cfg.lfs.enable {
PATH = cfg.lfs.contentDir;
};
packages.CHUNKED_UPLOAD_PATH = "${cfg.stateDir}/tmp/package-upload";
};
services.postgresql = optionalAttrs (usePostgresql && cfg.database.createDatabase) {
enable = mkDefault true;
ensureDatabases = [ cfg.database.name ];
ensureUsers = [
{ name = cfg.database.user;
{
name = cfg.database.user;
ensureDBOwnership = true;
}
];
@ -577,46 +678,60 @@ in
ensureDatabases = [ cfg.database.name ];
ensureUsers = [
{ name = cfg.database.user;
ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
{
name = cfg.database.user;
ensurePermissions = {
"${cfg.database.name}.*" = "ALL PRIVILEGES";
};
}
];
};
systemd.tmpfiles.rules = [
"d '${cfg.dump.backupDir}' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.dump.backupDir}' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.repositoryRoot}' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.repositoryRoot}' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.stateDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.customDir}' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.customDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.stateDir}/data' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.stateDir}/log' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.stateDir}/.ssh' 0700 ${cfg.user} ${cfg.group} - -"
"z '${cfg.stateDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.customDir}' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.customDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.stateDir}/data' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.stateDir}/log' 0750 ${cfg.user} ${cfg.group} - -"
systemd.tmpfiles.rules =
[
"d '${cfg.dump.backupDir}' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.dump.backupDir}' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.repositoryRoot}' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.repositoryRoot}' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.stateDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.customDir}' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.customDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.stateDir}/data' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.stateDir}/log' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.stateDir}/.ssh' 0700 ${cfg.user} ${cfg.group} - -"
"z '${cfg.stateDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.customDir}' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.customDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.stateDir}/data' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.stateDir}/log' 0750 ${cfg.user} ${cfg.group} - -"
# If we have a folder or symlink with gitea locales, remove it
# And symlink the current gitea locales in place
"L+ '${cfg.stateDir}/conf/locale' - - - - ${cfg.package.out}/locale"
# If we have a folder or symlink with gitea locales, remove it
# And symlink the current gitea locales in place
"L+ '${cfg.stateDir}/conf/locale' - - - - ${cfg.package.out}/locale"
] ++ lib.optionals cfg.lfs.enable [
"d '${cfg.lfs.contentDir}' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.lfs.contentDir}' 0750 ${cfg.user} ${cfg.group} - -"
];
]
++ lib.optionals cfg.lfs.enable [
"d '${cfg.lfs.contentDir}' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.lfs.contentDir}' 0750 ${cfg.user} ${cfg.group} - -"
];
systemd.services.gitea = {
description = "gitea";
after = [ "network.target" ] ++ optional usePostgresql "postgresql.service" ++ optional useMysql "mysql.service";
requires = optional (cfg.database.createDatabase && usePostgresql) "postgresql.service" ++ optional (cfg.database.createDatabase && useMysql) "mysql.service";
after =
[ "network.target" ]
++ optional usePostgresql "postgresql.service"
++ optional useMysql "mysql.service";
requires =
optional (cfg.database.createDatabase && usePostgresql) "postgresql.service"
++ optional (cfg.database.createDatabase && useMysql) "mysql.service";
wantedBy = [ "multi-user.target" ];
path = [ cfg.package pkgs.git pkgs.gnupg ];
path = [
cfg.package
pkgs.git
pkgs.gnupg
];
# In older versions the secret naming for JWT was kind of confusing.
# The file jwt_secret hold the value for LFS_JWT_SECRET and JWT_SECRET
@ -625,85 +740,87 @@ in
# values for JWT_SECRET and the file jwt_secret gets renamed to
# lfs_jwt_secret.
# We have to consider this to stay compatible with older installations.
preStart = let
runConfig = "${cfg.customDir}/conf/app.ini";
secretKey = "${cfg.customDir}/conf/secret_key";
oauth2JwtSecret = "${cfg.customDir}/conf/oauth2_jwt_secret";
oldLfsJwtSecret = "${cfg.customDir}/conf/jwt_secret"; # old file for LFS_JWT_SECRET
lfsJwtSecret = "${cfg.customDir}/conf/lfs_jwt_secret"; # new file for LFS_JWT_SECRET
internalToken = "${cfg.customDir}/conf/internal_token";
replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
in ''
# copy custom configuration and generate random secrets if needed
${optionalString (!cfg.useWizard) ''
function gitea_setup {
cp -f '${configFile}' '${runConfig}'
preStart =
let
runConfig = "${cfg.customDir}/conf/app.ini";
secretKey = "${cfg.customDir}/conf/secret_key";
oauth2JwtSecret = "${cfg.customDir}/conf/oauth2_jwt_secret";
oldLfsJwtSecret = "${cfg.customDir}/conf/jwt_secret"; # old file for LFS_JWT_SECRET
lfsJwtSecret = "${cfg.customDir}/conf/lfs_jwt_secret"; # new file for LFS_JWT_SECRET
internalToken = "${cfg.customDir}/conf/internal_token";
replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
in
''
# copy custom configuration and generate random secrets if needed
${optionalString (!cfg.useWizard) ''
function gitea_setup {
cp -f '${configFile}' '${runConfig}'
if [ ! -s '${secretKey}' ]; then
${exe} generate secret SECRET_KEY > '${secretKey}'
fi
if [ ! -s '${secretKey}' ]; then
${exe} generate secret SECRET_KEY > '${secretKey}'
fi
# Migrate LFS_JWT_SECRET filename
if [[ -s '${oldLfsJwtSecret}' && ! -s '${lfsJwtSecret}' ]]; then
mv '${oldLfsJwtSecret}' '${lfsJwtSecret}'
fi
# Migrate LFS_JWT_SECRET filename
if [[ -s '${oldLfsJwtSecret}' && ! -s '${lfsJwtSecret}' ]]; then
mv '${oldLfsJwtSecret}' '${lfsJwtSecret}'
fi
if [ ! -s '${oauth2JwtSecret}' ]; then
${exe} generate secret JWT_SECRET > '${oauth2JwtSecret}'
fi
if [ ! -s '${oauth2JwtSecret}' ]; then
${exe} generate secret JWT_SECRET > '${oauth2JwtSecret}'
fi
${lib.optionalString cfg.lfs.enable ''
if [ ! -s '${lfsJwtSecret}' ]; then
${exe} generate secret LFS_JWT_SECRET > '${lfsJwtSecret}'
fi
''}
${lib.optionalString cfg.lfs.enable ''
if [ ! -s '${lfsJwtSecret}' ]; then
${exe} generate secret LFS_JWT_SECRET > '${lfsJwtSecret}'
fi
''}
if [ ! -s '${internalToken}' ]; then
${exe} generate secret INTERNAL_TOKEN > '${internalToken}'
fi
if [ ! -s '${internalToken}' ]; then
${exe} generate secret INTERNAL_TOKEN > '${internalToken}'
fi
chmod u+w '${runConfig}'
${replaceSecretBin} '#secretkey#' '${secretKey}' '${runConfig}'
${replaceSecretBin} '#dbpass#' '${cfg.database.passwordFile}' '${runConfig}'
${replaceSecretBin} '#oauth2jwtsecret#' '${oauth2JwtSecret}' '${runConfig}'
${replaceSecretBin} '#internaltoken#' '${internalToken}' '${runConfig}'
chmod u+w '${runConfig}'
${replaceSecretBin} '#secretkey#' '${secretKey}' '${runConfig}'
${replaceSecretBin} '#dbpass#' '${cfg.database.passwordFile}' '${runConfig}'
${replaceSecretBin} '#oauth2jwtsecret#' '${oauth2JwtSecret}' '${runConfig}'
${replaceSecretBin} '#internaltoken#' '${internalToken}' '${runConfig}'
${lib.optionalString cfg.lfs.enable ''
${replaceSecretBin} '#lfsjwtsecret#' '${lfsJwtSecret}' '${runConfig}'
''}
${lib.optionalString cfg.lfs.enable ''
${replaceSecretBin} '#lfsjwtsecret#' '${lfsJwtSecret}' '${runConfig}'
''}
${lib.optionalString (cfg.camoHmacKeyFile != null) ''
${replaceSecretBin} '#hmackey#' '${cfg.camoHmacKeyFile}' '${runConfig}'
''}
${lib.optionalString (cfg.camoHmacKeyFile != null) ''
${replaceSecretBin} '#hmackey#' '${cfg.camoHmacKeyFile}' '${runConfig}'
''}
${lib.optionalString (cfg.mailerPasswordFile != null) ''
${replaceSecretBin} '#mailerpass#' '${cfg.mailerPasswordFile}' '${runConfig}'
''}
${lib.optionalString (cfg.mailerPasswordFile != null) ''
${replaceSecretBin} '#mailerpass#' '${cfg.mailerPasswordFile}' '${runConfig}'
''}
${lib.optionalString (cfg.metricsTokenFile != null) ''
${replaceSecretBin} '#metricstoken#' '${cfg.metricsTokenFile}' '${runConfig}'
''}
${lib.optionalString (cfg.metricsTokenFile != null) ''
${replaceSecretBin} '#metricstoken#' '${cfg.metricsTokenFile}' '${runConfig}'
''}
${lib.optionalString (cfg.captcha.secretFile != null) ''
${replaceSecretBin} '#captchasecret#' '${cfg.captcha.secretFile}' '${runConfig}'
''}
chmod u-w '${runConfig}'
}
(umask 027; gitea_setup)
''}
${lib.optionalString (cfg.captcha.secretFile != null) ''
${replaceSecretBin} '#captchasecret#' '${cfg.captcha.secretFile}' '${runConfig}'
''}
chmod u-w '${runConfig}'
}
(umask 027; gitea_setup)
''}
# run migrations/init the database
${exe} migrate
# run migrations/init the database
${exe} migrate
# update all hooks' binary paths
${exe} admin regenerate hooks
# update all hooks' binary paths
${exe} admin regenerate hooks
# update command option in authorized_keys
if [ -r ${cfg.stateDir}/.ssh/authorized_keys ]
then
${exe} admin regenerate keys
fi
'';
# update command option in authorized_keys
if [ -r ${cfg.stateDir}/.ssh/authorized_keys ]
then
${exe} admin regenerate keys
fi
'';
serviceConfig = {
Type = "simple";
@ -719,7 +836,13 @@ in
ProcSubset = "pid";
ProtectProc = "invisible";
# Access write directories
ReadWritePaths = [ cfg.customDir cfg.dump.backupDir cfg.repositoryRoot cfg.stateDir cfg.lfs.contentDir ];
ReadWritePaths = [
cfg.customDir
cfg.dump.backupDir
cfg.repositoryRoot
cfg.stateDir
cfg.lfs.contentDir
];
UMask = "0027";
# Capabilities
CapabilityBoundingSet = "";
@ -737,7 +860,11 @@ in
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
RestrictAddressFamilies = [
"AF_UNIX"
"AF_INET"
"AF_INET6"
];
RestrictNamespaces = true;
LockPersonality = true;
MemoryDenyWriteExecute = true;
@ -747,7 +874,10 @@ in
PrivateMounts = true;
# System Call Filtering
SystemCallArchitectures = "native";
SystemCallFilter = [ "~@cpu-emulation @debug @keyring @mount @obsolete @privileged @setuid" "setrlimit" ];
SystemCallFilter = [
"~@cpu-emulation @debug @keyring @mount @obsolete @privileged @setuid"
"setrlimit"
];
};
environment = {
@ -769,45 +899,51 @@ in
};
users.groups = mkIf (cfg.group == "gitea") {
gitea = {};
gitea = { };
};
warnings =
optional (cfg.database.password != "") "config.services.gitea.database.password will be stored as plaintext in the Nix store. Use database.passwordFile instead." ++
optional (cfg.extraConfig != null) ''
optional (cfg.database.password != "")
"config.services.gitea.database.password will be stored as plaintext in the Nix store. Use database.passwordFile instead."
++ optional (cfg.extraConfig != null) ''
services.gitea.`extraConfig` is deprecated, please use services.gitea.`settings`.
'' ++
optional (lib.getName cfg.package == "forgejo") ''
''
++ optional (lib.getName cfg.package == "forgejo") ''
Running forgejo via services.gitea.package is no longer supported.
Please use services.forgejo instead.
See https://nixos.org/manual/nixos/unstable/#module-forgejo for migration instructions.
'';
# Create database passwordFile default when password is configured.
services.gitea.database.passwordFile =
mkDefault (toString (pkgs.writeTextFile {
name = "gitea-database-password";
text = cfg.database.password;
}));
services.gitea.database.passwordFile = mkDefault (
toString (
pkgs.writeTextFile {
name = "gitea-database-password";
text = cfg.database.password;
}
)
);
systemd.services.gitea-dump = mkIf cfg.dump.enable {
description = "gitea dump";
after = [ "gitea.service" ];
path = [ cfg.package ];
description = "gitea dump";
after = [ "gitea.service" ];
path = [ cfg.package ];
environment = {
USER = cfg.user;
HOME = cfg.stateDir;
GITEA_WORK_DIR = cfg.stateDir;
GITEA_CUSTOM = cfg.customDir;
};
environment = {
USER = cfg.user;
HOME = cfg.stateDir;
GITEA_WORK_DIR = cfg.stateDir;
GITEA_CUSTOM = cfg.customDir;
};
serviceConfig = {
Type = "oneshot";
User = cfg.user;
ExecStart = "${exe} dump --type ${cfg.dump.type}" + optionalString (cfg.dump.file != null) " --file ${cfg.dump.file}";
WorkingDirectory = cfg.dump.backupDir;
};
serviceConfig = {
Type = "oneshot";
User = cfg.user;
ExecStart =
"${exe} dump --type ${cfg.dump.type}"
+ optionalString (cfg.dump.file != null) " --file ${cfg.dump.file}";
WorkingDirectory = cfg.dump.backupDir;
};
};
systemd.timers.gitea-dump = mkIf cfg.dump.enable {
@ -817,5 +953,9 @@ in
timerConfig.OnCalendar = cfg.dump.interval;
};
};
meta.maintainers = with lib.maintainers; [ ma27 techknowlogick SuperSandro2000 ];
meta.maintainers = with lib.maintainers; [
ma27
techknowlogick
SuperSandro2000
];
}

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.jackett;

View file

@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.jellyseerr;
in

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.metabase;
@ -8,7 +13,8 @@ let
dataDir = "/var/lib/metabase";
in {
in
{
options = {
@ -81,16 +87,18 @@ in {
wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ];
after = [ "network-online.target" ];
environment = {
MB_PLUGINS_DIR = "${dataDir}/plugins";
MB_DB_FILE = "${dataDir}/metabase.db";
MB_JETTY_HOST = cfg.listen.ip;
MB_JETTY_PORT = toString cfg.listen.port;
} // optionalAttrs (cfg.ssl.enable) {
MB_JETTY_SSL = true;
MB_JETTY_SSL_PORT = toString cfg.ssl.port;
MB_JETTY_SSL_KEYSTORE = cfg.ssl.keystore;
};
environment =
{
MB_PLUGINS_DIR = "${dataDir}/plugins";
MB_DB_FILE = "${dataDir}/metabase.db";
MB_JETTY_HOST = cfg.listen.ip;
MB_JETTY_PORT = toString cfg.listen.port;
}
// optionalAttrs (cfg.ssl.enable) {
MB_JETTY_SSL = true;
MB_JETTY_SSL_PORT = toString cfg.ssl.port;
MB_JETTY_SSL_KEYSTORE = cfg.ssl.keystore;
};
serviceConfig = {
DynamicUser = true;
StateDirectory = baseNameOf dataDir;

View file

@ -14,7 +14,7 @@ in
};
dates = lib.mkOption {
default = ["03:45"];
default = [ "03:45" ];
type = with lib.types; listOf str;
description = ''
Specification (in the format described by

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.ntfy-sh;

View file

@ -1,4 +1,10 @@
{ config, options, pkgs, lib, ... }:
{
config,
options,
pkgs,
lib,
...
}:
let
cfg = config.services.paperless;
@ -9,28 +15,38 @@ let
enableRedis = !(cfg.settings ? PAPERLESS_REDIS);
redisServer = config.services.redis.servers.paperless;
env = {
PAPERLESS_DATA_DIR = cfg.dataDir;
PAPERLESS_MEDIA_ROOT = cfg.mediaDir;
PAPERLESS_CONSUMPTION_DIR = cfg.consumptionDir;
PAPERLESS_THUMBNAIL_FONT_NAME = defaultFont;
GUNICORN_CMD_ARGS = "--bind=${cfg.address}:${toString cfg.port}";
} // lib.optionalAttrs (config.time.timeZone != null) {
PAPERLESS_TIME_ZONE = config.time.timeZone;
} // lib.optionalAttrs enableRedis {
PAPERLESS_REDIS = "unix://${redisServer.unixSocket}";
} // lib.optionalAttrs (cfg.settings.PAPERLESS_ENABLE_NLTK or true) {
PAPERLESS_NLTK_DIR = pkgs.symlinkJoin {
name = "paperless_ngx_nltk_data";
paths = cfg.package.nltkData;
};
} // lib.optionalAttrs (cfg.openMPThreadingWorkaround) {
OMP_NUM_THREADS = "1";
} // (lib.mapAttrs (_: s:
if (lib.isAttrs s || lib.isList s) then builtins.toJSON s
else if lib.isBool s then lib.boolToString s
else toString s
) cfg.settings);
env =
{
PAPERLESS_DATA_DIR = cfg.dataDir;
PAPERLESS_MEDIA_ROOT = cfg.mediaDir;
PAPERLESS_CONSUMPTION_DIR = cfg.consumptionDir;
PAPERLESS_THUMBNAIL_FONT_NAME = defaultFont;
GUNICORN_CMD_ARGS = "--bind=${cfg.address}:${toString cfg.port}";
}
// lib.optionalAttrs (config.time.timeZone != null) {
PAPERLESS_TIME_ZONE = config.time.timeZone;
}
// lib.optionalAttrs enableRedis {
PAPERLESS_REDIS = "unix://${redisServer.unixSocket}";
}
// lib.optionalAttrs (cfg.settings.PAPERLESS_ENABLE_NLTK or true) {
PAPERLESS_NLTK_DIR = pkgs.symlinkJoin {
name = "paperless_ngx_nltk_data";
paths = cfg.package.nltkData;
};
}
// lib.optionalAttrs (cfg.openMPThreadingWorkaround) {
OMP_NUM_THREADS = "1";
}
// (lib.mapAttrs (
_: s:
if (lib.isAttrs s || lib.isList s) then
builtins.toJSON s
else if lib.isBool s then
lib.boolToString s
else
toString s
) cfg.settings);
manage = pkgs.writeShellScriptBin "paperless-manage" ''
set -o allexport # Export the following env vars
@ -82,22 +98,38 @@ let
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProcSubset = "pid";
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
RestrictAddressFamilies = [
"AF_UNIX"
"AF_INET"
"AF_INET6"
];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SupplementaryGroups = lib.optional enableRedis redisServer.user;
SystemCallArchitectures = "native";
SystemCallFilter = [ "@system-service" "~@privileged @setuid @keyring" ];
SystemCallFilter = [
"@system-service"
"~@privileged @setuid @keyring"
];
UMask = "0066";
};
in
{
meta.maintainers = with lib.maintainers; [ leona SuperSandro2000 erikarvstedt atemu theuni ];
meta.maintainers = with lib.maintainers; [
leona
SuperSandro2000
erikarvstedt
atemu
theuni
];
imports = [
(lib.mkRenamedOptionModule [ "services" "paperless-ng" ] [ "services" "paperless" ])
(lib.mkRenamedOptionModule [ "services" "paperless" "extraConfig" ] [ "services" "paperless" "settings" ])
(lib.mkRenamedOptionModule
[ "services" "paperless" "extraConfig" ]
[ "services" "paperless" "settings" ]
)
];
options.services.paperless = {
@ -176,9 +208,27 @@ in
settings = lib.mkOption {
type = lib.types.submodule {
freeformType = with lib.types; attrsOf (let
typeList = [ bool float int str path package ];
in oneOf (typeList ++ [ (listOf (oneOf typeList)) (attrsOf (oneOf typeList)) ]));
freeformType =
with lib.types;
attrsOf (
let
typeList = [
bool
float
int
str
path
package
];
in
oneOf (
typeList
++ [
(listOf (oneOf typeList))
(attrsOf (oneOf typeList))
]
)
);
};
default = { };
description = ''
@ -191,7 +241,10 @@ in
'';
example = {
PAPERLESS_OCR_LANGUAGE = "deu+eng";
PAPERLESS_CONSUMER_IGNORE_PATTERN = [ ".DS_STORE/*" "desktop.ini" ];
PAPERLESS_CONSUMER_IGNORE_PATTERN = [
".DS_STORE/*"
"desktop.ini"
];
PAPERLESS_OCR_USER_ARGS = {
optimize = 1;
pdfa_image_compression = "lossless";
@ -206,33 +259,43 @@ in
};
package = lib.mkPackageOption pkgs "paperless-ngx" { } // {
apply = pkg: pkg.override {
tesseract5 = pkg.tesseract5.override {
# always enable detection modules
# tesseract fails to build when eng is not present
enableLanguages = if cfg.settings ? PAPERLESS_OCR_LANGUAGE then
lib.lists.unique (
[ "equ" "osd" "eng" ]
++ lib.splitString "+" cfg.settings.PAPERLESS_OCR_LANGUAGE
)
else null;
apply =
pkg:
pkg.override {
tesseract5 = pkg.tesseract5.override {
# always enable detection modules
# tesseract fails to build when eng is not present
enableLanguages =
if cfg.settings ? PAPERLESS_OCR_LANGUAGE then
lib.lists.unique (
[
"equ"
"osd"
"eng"
]
++ lib.splitString "+" cfg.settings.PAPERLESS_OCR_LANGUAGE
)
else
null;
};
};
};
};
openMPThreadingWorkaround = lib.mkEnableOption ''
a workaround for document classifier timeouts.
openMPThreadingWorkaround =
lib.mkEnableOption ''
a workaround for document classifier timeouts.
Paperless uses OpenBLAS via scikit-learn for document classification.
Paperless uses OpenBLAS via scikit-learn for document classification.
The default is to use threading for OpenMP but this would cause the
document classifier to spin on one core seemingly indefinitely if there
are large amounts of classes per classification; causing it to
effectively never complete due to running into timeouts.
The default is to use threading for OpenMP but this would cause the
document classifier to spin on one core seemingly indefinitely if there
are large amounts of classes per classification; causing it to
effectively never complete due to running into timeouts.
This sets `OMP_NUM_THREADS` to `1` in order to mitigate the issue. See
https://github.com/NixOS/nixpkgs/issues/240591 for more information
'' // lib.mkOption { default = true; };
This sets `OMP_NUM_THREADS` to `1` in order to mitigate the issue. See
https://github.com/NixOS/nixpkgs/issues/240591 for more information
''
// lib.mkOption { default = true; };
environmentFile = lib.mkOption {
type = lib.types.nullOr lib.types.path;
@ -296,231 +359,257 @@ in
};
};
config = lib.mkIf cfg.enable (lib.mkMerge [ {
environment.systemPackages = [ manage ];
config = lib.mkIf cfg.enable (
lib.mkMerge [
{
environment.systemPackages = [ manage ];
services.redis.servers.paperless.enable = lib.mkIf enableRedis true;
services.redis.servers.paperless.enable = lib.mkIf enableRedis true;
services.postgresql = lib.mkIf cfg.database.createLocally {
enable = true;
ensureDatabases = [ "paperless" ];
ensureUsers = [{
name = config.services.paperless.user;
ensureDBOwnership = true;
}];
};
services.postgresql = lib.mkIf cfg.database.createLocally {
enable = true;
ensureDatabases = [ "paperless" ];
ensureUsers = [
{
name = config.services.paperless.user;
ensureDBOwnership = true;
}
];
};
services.paperless.settings = lib.mkIf cfg.database.createLocally {
PAPERLESS_DBENGINE = "postgresql";
PAPERLESS_DBHOST = "/run/postgresql";
PAPERLESS_DBNAME = "paperless";
PAPERLESS_DBUSER = "paperless";
};
services.paperless.settings = lib.mkIf cfg.database.createLocally {
PAPERLESS_DBENGINE = "postgresql";
PAPERLESS_DBHOST = "/run/postgresql";
PAPERLESS_DBNAME = "paperless";
PAPERLESS_DBUSER = "paperless";
};
systemd.slices.system-paperless = {
description = "Paperless Document Management System Slice";
documentation = [ "https://docs.paperless-ngx.com" ];
};
systemd.slices.system-paperless = {
description = "Paperless Document Management System Slice";
documentation = [ "https://docs.paperless-ngx.com" ];
};
systemd.tmpfiles.settings."10-paperless" = let
defaultRule = {
inherit (cfg) user;
inherit (config.users.users.${cfg.user}) group;
};
in {
"${cfg.dataDir}".d = defaultRule;
"${cfg.mediaDir}".d = defaultRule;
"${cfg.consumptionDir}".d = if cfg.consumptionDirIsPublic then { mode = "777"; } else defaultRule;
};
systemd.tmpfiles.settings."10-paperless" =
let
defaultRule = {
inherit (cfg) user;
inherit (config.users.users.${cfg.user}) group;
};
in
{
"${cfg.dataDir}".d = defaultRule;
"${cfg.mediaDir}".d = defaultRule;
"${cfg.consumptionDir}".d = if cfg.consumptionDirIsPublic then { mode = "777"; } else defaultRule;
};
systemd.services.paperless-scheduler = {
description = "Paperless Celery Beat";
wantedBy = [ "multi-user.target" ];
wants = [ "paperless-consumer.service" "paperless-web.service" "paperless-task-queue.service" ];
serviceConfig = defaultServiceConfig // {
User = cfg.user;
ExecStart = "${cfg.package}/bin/celery --app paperless beat --loglevel INFO";
Restart = "on-failure";
LoadCredential = lib.optionalString (cfg.passwordFile != null) "PAPERLESS_ADMIN_PASSWORD:${cfg.passwordFile}";
PrivateNetwork = cfg.database.createLocally; # defaultServiceConfig enables this by default, needs to be disabled for remote DBs
};
environment = env;
systemd.services.paperless-scheduler = {
description = "Paperless Celery Beat";
wantedBy = [ "multi-user.target" ];
wants = [
"paperless-consumer.service"
"paperless-web.service"
"paperless-task-queue.service"
];
serviceConfig = defaultServiceConfig // {
User = cfg.user;
ExecStart = "${cfg.package}/bin/celery --app paperless beat --loglevel INFO";
Restart = "on-failure";
LoadCredential = lib.optionalString (
cfg.passwordFile != null
) "PAPERLESS_ADMIN_PASSWORD:${cfg.passwordFile}";
PrivateNetwork = cfg.database.createLocally; # defaultServiceConfig enables this by default, needs to be disabled for remote DBs
};
environment = env;
preStart = ''
# remove old papaerless-manage symlink
# TODO: drop with NixOS 25.11
[[ -L '${cfg.dataDir}/paperless-manage' ]] && rm '${cfg.dataDir}/paperless-manage'
preStart =
''
# remove old papaerless-manage symlink
# TODO: drop with NixOS 25.11
[[ -L '${cfg.dataDir}/paperless-manage' ]] && rm '${cfg.dataDir}/paperless-manage'
# Auto-migrate on first run or if the package has changed
versionFile="${cfg.dataDir}/src-version"
version=$(cat "$versionFile" 2>/dev/null || echo 0)
# Auto-migrate on first run or if the package has changed
versionFile="${cfg.dataDir}/src-version"
version=$(cat "$versionFile" 2>/dev/null || echo 0)
if [[ $version != ${cfg.package.version} ]]; then
${cfg.package}/bin/paperless-ngx migrate
if [[ $version != ${cfg.package.version} ]]; then
${cfg.package}/bin/paperless-ngx migrate
# Parse old version string format for backwards compatibility
version=$(echo "$version" | grep -ohP '[^-]+$')
# Parse old version string format for backwards compatibility
version=$(echo "$version" | grep -ohP '[^-]+$')
versionLessThan() {
target=$1
[[ $({ echo "$version"; echo "$target"; } | sort -V | head -1) != "$target" ]]
}
versionLessThan() {
target=$1
[[ $({ echo "$version"; echo "$target"; } | sort -V | head -1) != "$target" ]]
}
if versionLessThan 1.12.0; then
# Reindex documents as mentioned in https://github.com/paperless-ngx/paperless-ngx/releases/tag/v1.12.1
echo "Reindexing documents, to allow searching old comments. Required after the 1.12.x upgrade."
${cfg.package}/bin/paperless-ngx document_index reindex
fi
if versionLessThan 1.12.0; then
# Reindex documents as mentioned in https://github.com/paperless-ngx/paperless-ngx/releases/tag/v1.12.1
echo "Reindexing documents, to allow searching old comments. Required after the 1.12.x upgrade."
${cfg.package}/bin/paperless-ngx document_index reindex
fi
echo ${cfg.package.version} > "$versionFile"
fi
''
+ lib.optionalString (cfg.passwordFile != null) ''
export PAPERLESS_ADMIN_USER="''${PAPERLESS_ADMIN_USER:-admin}"
PAPERLESS_ADMIN_PASSWORD=$(cat "$CREDENTIALS_DIRECTORY/PAPERLESS_ADMIN_PASSWORD")
export PAPERLESS_ADMIN_PASSWORD
superuserState="$PAPERLESS_ADMIN_USER:$PAPERLESS_ADMIN_PASSWORD"
superuserStateFile="${cfg.dataDir}/superuser-state"
echo ${cfg.package.version} > "$versionFile"
fi
''
+ lib.optionalString (cfg.passwordFile != null) ''
export PAPERLESS_ADMIN_USER="''${PAPERLESS_ADMIN_USER:-admin}"
PAPERLESS_ADMIN_PASSWORD=$(cat "$CREDENTIALS_DIRECTORY/PAPERLESS_ADMIN_PASSWORD")
export PAPERLESS_ADMIN_PASSWORD
superuserState="$PAPERLESS_ADMIN_USER:$PAPERLESS_ADMIN_PASSWORD"
superuserStateFile="${cfg.dataDir}/superuser-state"
if [[ $(cat "$superuserStateFile" 2>/dev/null) != "$superuserState" ]]; then
${cfg.package}/bin/paperless-ngx manage_superuser
echo "$superuserState" > "$superuserStateFile"
fi
'';
requires = lib.optional cfg.database.createLocally "postgresql.service";
after = lib.optional enableRedis "redis-paperless.service"
++ lib.optional cfg.database.createLocally "postgresql.service";
};
if [[ $(cat "$superuserStateFile" 2>/dev/null) != "$superuserState" ]]; then
${cfg.package}/bin/paperless-ngx manage_superuser
echo "$superuserState" > "$superuserStateFile"
fi
'';
requires = lib.optional cfg.database.createLocally "postgresql.service";
after =
lib.optional enableRedis "redis-paperless.service"
++ lib.optional cfg.database.createLocally "postgresql.service";
};
systemd.services.paperless-task-queue = {
description = "Paperless Celery Workers";
requires = lib.optional cfg.database.createLocally "postgresql.service";
after = [ "paperless-scheduler.service" ]
++ lib.optional cfg.database.createLocally "postgresql.service";
serviceConfig = defaultServiceConfig // {
User = cfg.user;
ExecStart = "${cfg.package}/bin/celery --app paperless worker --loglevel INFO";
Restart = "on-failure";
# The `mbind` syscall is needed for running the classifier.
SystemCallFilter = defaultServiceConfig.SystemCallFilter ++ [ "mbind" ];
# Needs to talk to mail server for automated import rules
PrivateNetwork = false;
};
environment = env;
};
systemd.services.paperless-task-queue = {
description = "Paperless Celery Workers";
requires = lib.optional cfg.database.createLocally "postgresql.service";
after = [
"paperless-scheduler.service"
] ++ lib.optional cfg.database.createLocally "postgresql.service";
serviceConfig = defaultServiceConfig // {
User = cfg.user;
ExecStart = "${cfg.package}/bin/celery --app paperless worker --loglevel INFO";
Restart = "on-failure";
# The `mbind` syscall is needed for running the classifier.
SystemCallFilter = defaultServiceConfig.SystemCallFilter ++ [ "mbind" ];
# Needs to talk to mail server for automated import rules
PrivateNetwork = false;
};
environment = env;
};
systemd.services.paperless-consumer = {
description = "Paperless document consumer";
# Bind to `paperless-scheduler` so that the consumer never runs
# during migrations
bindsTo = [ "paperless-scheduler.service" ];
requires = lib.optional cfg.database.createLocally "postgresql.service";
after = [ "paperless-scheduler.service" ]
++ lib.optional cfg.database.createLocally "postgresql.service";
serviceConfig = defaultServiceConfig // {
User = cfg.user;
ExecStart = "${cfg.package}/bin/paperless-ngx document_consumer";
Restart = "on-failure";
PrivateNetwork = cfg.database.createLocally; # defaultServiceConfig enables this by default, needs to be disabled for remote DBs
};
environment = env;
# Allow the consumer to access the private /tmp directory of the server.
# This is required to support consuming files via a local folder.
unitConfig.JoinsNamespaceOf = "paperless-task-queue.service";
};
systemd.services.paperless-consumer = {
description = "Paperless document consumer";
# Bind to `paperless-scheduler` so that the consumer never runs
# during migrations
bindsTo = [ "paperless-scheduler.service" ];
requires = lib.optional cfg.database.createLocally "postgresql.service";
after = [
"paperless-scheduler.service"
] ++ lib.optional cfg.database.createLocally "postgresql.service";
serviceConfig = defaultServiceConfig // {
User = cfg.user;
ExecStart = "${cfg.package}/bin/paperless-ngx document_consumer";
Restart = "on-failure";
PrivateNetwork = cfg.database.createLocally; # defaultServiceConfig enables this by default, needs to be disabled for remote DBs
};
environment = env;
# Allow the consumer to access the private /tmp directory of the server.
# This is required to support consuming files via a local folder.
unitConfig.JoinsNamespaceOf = "paperless-task-queue.service";
};
systemd.services.paperless-web = {
description = "Paperless web server";
# Bind to `paperless-scheduler` so that the web server never runs
# during migrations
bindsTo = [ "paperless-scheduler.service" ];
requires = lib.optional cfg.database.createLocally "postgresql.service";
after = [ "paperless-scheduler.service" ]
++ lib.optional cfg.database.createLocally "postgresql.service";
# Setup PAPERLESS_SECRET_KEY.
# If this environment variable is left unset, paperless-ngx defaults
# to a well-known value, which is insecure.
script = let
secretKeyFile = "${cfg.dataDir}/nixos-paperless-secret-key";
in ''
if [[ ! -f '${secretKeyFile}' ]]; then
(
umask 0377
tr -dc A-Za-z0-9 < /dev/urandom | head -c64 | ${pkgs.moreutils}/bin/sponge '${secretKeyFile}'
)
fi
PAPERLESS_SECRET_KEY="$(cat '${secretKeyFile}')"
export PAPERLESS_SECRET_KEY
if [[ ! $PAPERLESS_SECRET_KEY ]]; then
echo "PAPERLESS_SECRET_KEY is empty, refusing to start."
exit 1
fi
exec ${cfg.package.python.pkgs.gunicorn}/bin/gunicorn \
-c ${cfg.package}/lib/paperless-ngx/gunicorn.conf.py paperless.asgi:application
'';
serviceConfig = defaultServiceConfig // {
User = cfg.user;
Restart = "on-failure";
systemd.services.paperless-web = {
description = "Paperless web server";
# Bind to `paperless-scheduler` so that the web server never runs
# during migrations
bindsTo = [ "paperless-scheduler.service" ];
requires = lib.optional cfg.database.createLocally "postgresql.service";
after = [
"paperless-scheduler.service"
] ++ lib.optional cfg.database.createLocally "postgresql.service";
# Setup PAPERLESS_SECRET_KEY.
# If this environment variable is left unset, paperless-ngx defaults
# to a well-known value, which is insecure.
script =
let
secretKeyFile = "${cfg.dataDir}/nixos-paperless-secret-key";
in
''
if [[ ! -f '${secretKeyFile}' ]]; then
(
umask 0377
tr -dc A-Za-z0-9 < /dev/urandom | head -c64 | ${pkgs.moreutils}/bin/sponge '${secretKeyFile}'
)
fi
PAPERLESS_SECRET_KEY="$(cat '${secretKeyFile}')"
export PAPERLESS_SECRET_KEY
if [[ ! $PAPERLESS_SECRET_KEY ]]; then
echo "PAPERLESS_SECRET_KEY is empty, refusing to start."
exit 1
fi
exec ${cfg.package.python.pkgs.gunicorn}/bin/gunicorn \
-c ${cfg.package}/lib/paperless-ngx/gunicorn.conf.py paperless.asgi:application
'';
serviceConfig = defaultServiceConfig // {
User = cfg.user;
Restart = "on-failure";
LimitNOFILE = 65536;
# gunicorn needs setuid, liblapack needs mbind
SystemCallFilter = defaultServiceConfig.SystemCallFilter ++ [ "@setuid mbind" ];
# Needs to serve web page
PrivateNetwork = false;
};
environment = env // {
PYTHONPATH = "${cfg.package.python.pkgs.makePythonPath cfg.package.propagatedBuildInputs}:${cfg.package}/lib/paperless-ngx/src";
};
# Allow the web interface to access the private /tmp directory of the server.
# This is required to support uploading files via the web interface.
unitConfig.JoinsNamespaceOf = "paperless-task-queue.service";
};
LimitNOFILE = 65536;
# gunicorn needs setuid, liblapack needs mbind
SystemCallFilter = defaultServiceConfig.SystemCallFilter ++ [ "@setuid mbind" ];
# Needs to serve web page
PrivateNetwork = false;
};
environment = env // {
PYTHONPATH = "${cfg.package.python.pkgs.makePythonPath cfg.package.propagatedBuildInputs}:${cfg.package}/lib/paperless-ngx/src";
};
# Allow the web interface to access the private /tmp directory of the server.
# This is required to support uploading files via the web interface.
unitConfig.JoinsNamespaceOf = "paperless-task-queue.service";
};
users = lib.optionalAttrs (cfg.user == defaultUser) {
users.${defaultUser} = {
group = defaultUser;
uid = config.ids.uids.paperless;
home = cfg.dataDir;
};
users = lib.optionalAttrs (cfg.user == defaultUser) {
users.${defaultUser} = {
group = defaultUser;
uid = config.ids.uids.paperless;
home = cfg.dataDir;
};
groups.${defaultUser} = {
gid = config.ids.gids.paperless;
};
};
}
groups.${defaultUser} = {
gid = config.ids.gids.paperless;
};
};
}
(lib.mkIf cfg.exporter.enable {
systemd.tmpfiles.rules = [
"d '${cfg.exporter.directory}' - ${cfg.user} ${config.users.users.${cfg.user}.group} - -"
];
(lib.mkIf cfg.exporter.enable {
systemd.tmpfiles.rules = [
"d '${cfg.exporter.directory}' - ${cfg.user} ${config.users.users.${cfg.user}.group} - -"
];
services.paperless.exporter.settings = options.services.paperless.exporter.settings.default;
services.paperless.exporter.settings = options.services.paperless.exporter.settings.default;
systemd.services.paperless-exporter = {
startAt = lib.defaultTo [] cfg.exporter.onCalendar;
serviceConfig = {
User = cfg.user;
WorkingDirectory = cfg.dataDir;
};
unitConfig = let
services = [
"paperless-consumer.service"
"paperless-scheduler.service"
"paperless-task-queue.service"
"paperless-web.service" ];
in {
# Shut down the paperless services while the exporter runs
Conflicts = services;
After = services;
# Bring them back up afterwards, regardless of pass/fail
OnFailure = services;
OnSuccess = services;
};
enableStrictShellChecks = true;
path = [ manage ];
script = ''
paperless-manage document_exporter ${cfg.exporter.directory} ${lib.cli.toGNUCommandLineShell {} cfg.exporter.settings}
'';
};
})]);
systemd.services.paperless-exporter = {
startAt = lib.defaultTo [ ] cfg.exporter.onCalendar;
serviceConfig = {
User = cfg.user;
WorkingDirectory = cfg.dataDir;
};
unitConfig =
let
services = [
"paperless-consumer.service"
"paperless-scheduler.service"
"paperless-task-queue.service"
"paperless-web.service"
];
in
{
# Shut down the paperless services while the exporter runs
Conflicts = services;
After = services;
# Bring them back up afterwards, regardless of pass/fail
OnFailure = services;
OnSuccess = services;
};
enableStrictShellChecks = true;
path = [ manage ];
script = ''
paperless-manage document_exporter ${cfg.exporter.directory} ${
lib.cli.toGNUCommandLineShell { } cfg.exporter.settings
}
'';
};
})
]
);
}

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.radicle;
@ -44,7 +49,12 @@ let
{
BindReadOnlyPaths = [
"${cfg.configFile}:${env.RAD_HOME}/config.json"
"${if lib.types.path.check cfg.publicKey then cfg.publicKey else pkgs.writeText "radicle.pub" cfg.publicKey}:${env.RAD_HOME}/keys/radicle.pub"
"${
if lib.types.path.check cfg.publicKey then
cfg.publicKey
else
pkgs.writeText "radicle.pub" cfg.publicKey
}:${env.RAD_HOME}/keys/radicle.pub"
"${config.security.pki.caBundle}:/etc/ssl/certs/ca-certificates.crt"
];
KillMode = "process";
@ -75,7 +85,11 @@ let
ProtectProc = "invisible";
ProtectSystem = "strict";
RemoveIPC = true;
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
RestrictAddressFamilies = [
"AF_UNIX"
"AF_INET"
"AF_INET6"
];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
@ -162,21 +176,27 @@ in
preferLocalBuild = true;
# None of the usual phases are run here because runCommandWith uses buildCommand,
# so just append to buildCommand what would usually be a checkPhase.
buildCommand = previousAttrs.buildCommand + lib.optionalString cfg.checkConfig ''
ln -s $out config.json
install -D -m 644 /dev/stdin keys/radicle.pub <<<"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBgFMhajUng+Rjj/sCFXI9PzG8BQjru2n7JgUVF1Kbv5 snakeoil"
export RAD_HOME=$PWD
${lib.getExe' pkgs.buildPackages.radicle-node "rad"} config >/dev/null || {
cat -n config.json
echo "Invalid config.json according to rad."
echo "Please double-check your services.radicle.settings (producing the config.json above),"
echo "some settings may be missing or have the wrong type."
exit 1
} >&2
'';
buildCommand =
previousAttrs.buildCommand
+ lib.optionalString cfg.checkConfig ''
ln -s $out config.json
install -D -m 644 /dev/stdin keys/radicle.pub <<<"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBgFMhajUng+Rjj/sCFXI9PzG8BQjru2n7JgUVF1Kbv5 snakeoil"
export RAD_HOME=$PWD
${lib.getExe' pkgs.buildPackages.radicle-node "rad"} config >/dev/null || {
cat -n config.json
echo "Invalid config.json according to rad."
echo "Please double-check your services.radicle.settings (producing the config.json above),"
echo "some settings may be missing or have the wrong type."
exit 1
} >&2
'';
});
};
checkConfig = lib.mkEnableOption "checking the {file}`config.json` file resulting from {option}`services.radicle.settings`" // { default = true; };
checkConfig =
lib.mkEnableOption "checking the {file}`config.json` file resulting from {option}`services.radicle.settings`"
// {
default = true;
};
settings = lib.mkOption {
description = ''
See https://app.radicle.xyz/nodes/seed.radicle.garden/rad:z3gqcJUoA1n9HaHKufZs5FCSGazv5/tree/radicle/src/node/config.rs#L275
@ -209,14 +229,16 @@ in
};
nginx = lib.mkOption {
# Type of a single virtual host, or null.
type = lib.types.nullOr (lib.types.submodule (
lib.recursiveUpdate (import ../web-servers/nginx/vhost-options.nix { inherit config lib; }) {
options.serverName = {
default = "radicle-${config.networking.hostName}.${config.networking.domain}";
defaultText = "radicle-\${config.networking.hostName}.\${config.networking.domain}";
};
}
));
type = lib.types.nullOr (
lib.types.submodule (
lib.recursiveUpdate (import ../web-servers/nginx/vhost-options.nix { inherit config lib; }) {
options.serverName = {
default = "radicle-${config.networking.hostName}.${config.networking.domain}";
defaultText = "radicle-\${config.networking.hostName}.\${config.networking.domain}";
};
}
)
);
default = null;
example = lib.literalExpression ''
{
@ -245,110 +267,121 @@ in
};
};
config = lib.mkIf cfg.enable (lib.mkMerge [
{
systemd.services.radicle-node = lib.mkMerge [
(commonServiceConfig "radicle-node")
{
description = "Radicle Node";
documentation = [ "man:radicle-node(1)" ];
serviceConfig = {
ExecStart = "${lib.getExe' cfg.package "radicle-node"} --force --listen ${cfg.node.listenAddress}:${toString cfg.node.listenPort} ${lib.escapeShellArgs cfg.node.extraArgs}";
Restart = lib.mkDefault "on-failure";
RestartSec = "30";
SocketBindAllow = [ "tcp:${toString cfg.node.listenPort}" ];
SystemCallFilter = lib.mkAfter [
# Needed by git upload-pack which calls alarm() and setitimer() when providing a rad clone
"@timer"
];
};
confinement.packages = [
cfg.package
];
}
# Give only access to the private key to radicle-node.
{
serviceConfig =
let keyCred = builtins.split ":" "${cfg.privateKeyFile}"; in
if lib.length keyCred > 1
then {
LoadCredentialEncrypted = [ cfg.privateKeyFile ];
# Note that neither %d nor ${CREDENTIALS_DIRECTORY} works in BindReadOnlyPaths=
BindReadOnlyPaths = [ "/run/credentials/radicle-node.service/${lib.head keyCred}:${env.RAD_HOME}/keys/radicle" ];
}
else {
LoadCredential = [ "radicle:${cfg.privateKeyFile}" ];
BindReadOnlyPaths = [ "/run/credentials/radicle-node.service/radicle:${env.RAD_HOME}/keys/radicle" ];
};
}
];
environment.systemPackages = [
rad-system
];
networking.firewall = lib.mkIf cfg.node.openFirewall {
allowedTCPPorts = [ cfg.node.listenPort ];
};
users = {
users.radicle = {
description = "Radicle";
group = "radicle";
home = env.HOME;
isSystemUser = true;
};
groups.radicle = {
};
};
}
(lib.mkIf cfg.httpd.enable (lib.mkMerge [
config = lib.mkIf cfg.enable (
lib.mkMerge [
{
systemd.services.radicle-httpd = lib.mkMerge [
(commonServiceConfig "radicle-httpd")
systemd.services.radicle-node = lib.mkMerge [
(commonServiceConfig "radicle-node")
{
description = "Radicle HTTP gateway to radicle-node";
documentation = [ "man:radicle-httpd(1)" ];
description = "Radicle Node";
documentation = [ "man:radicle-node(1)" ];
serviceConfig = {
ExecStart = "${lib.getExe' cfg.httpd.package "radicle-httpd"} --listen ${cfg.httpd.listenAddress}:${toString cfg.httpd.listenPort} ${lib.escapeShellArgs cfg.httpd.extraArgs}";
ExecStart = "${lib.getExe' cfg.package "radicle-node"} --force --listen ${cfg.node.listenAddress}:${toString cfg.node.listenPort} ${lib.escapeShellArgs cfg.node.extraArgs}";
Restart = lib.mkDefault "on-failure";
RestartSec = "10";
SocketBindAllow = [ "tcp:${toString cfg.httpd.listenPort}" ];
RestartSec = "30";
SocketBindAllow = [ "tcp:${toString cfg.node.listenPort}" ];
SystemCallFilter = lib.mkAfter [
# Needed by git upload-pack which calls alarm() and setitimer() when providing a git clone
# Needed by git upload-pack which calls alarm() and setitimer() when providing a rad clone
"@timer"
];
};
confinement.packages = [
cfg.httpd.package
];
confinement.packages = [
cfg.package
];
}
# Give only access to the private key to radicle-node.
{
serviceConfig =
let
keyCred = builtins.split ":" "${cfg.privateKeyFile}";
in
if lib.length keyCred > 1 then
{
LoadCredentialEncrypted = [ cfg.privateKeyFile ];
# Note that neither %d nor ${CREDENTIALS_DIRECTORY} works in BindReadOnlyPaths=
BindReadOnlyPaths = [
"/run/credentials/radicle-node.service/${lib.head keyCred}:${env.RAD_HOME}/keys/radicle"
];
}
else
{
LoadCredential = [ "radicle:${cfg.privateKeyFile}" ];
BindReadOnlyPaths = [
"/run/credentials/radicle-node.service/radicle:${env.RAD_HOME}/keys/radicle"
];
};
}
];
environment.systemPackages = [
rad-system
];
networking.firewall = lib.mkIf cfg.node.openFirewall {
allowedTCPPorts = [ cfg.node.listenPort ];
};
users = {
users.radicle = {
description = "Radicle";
group = "radicle";
home = env.HOME;
isSystemUser = true;
};
groups.radicle = {
};
};
}
(lib.mkIf (cfg.httpd.nginx != null) {
services.nginx.virtualHosts.${cfg.httpd.nginx.serverName} = lib.mkMerge [
cfg.httpd.nginx
(lib.mkIf cfg.httpd.enable (
lib.mkMerge [
{
forceSSL = lib.mkDefault true;
enableACME = lib.mkDefault true;
locations."/" = {
proxyPass = "http://${cfg.httpd.listenAddress}:${toString cfg.httpd.listenPort}";
recommendedProxySettings = true;
};
systemd.services.radicle-httpd = lib.mkMerge [
(commonServiceConfig "radicle-httpd")
{
description = "Radicle HTTP gateway to radicle-node";
documentation = [ "man:radicle-httpd(1)" ];
serviceConfig = {
ExecStart = "${lib.getExe' cfg.httpd.package "radicle-httpd"} --listen ${cfg.httpd.listenAddress}:${toString cfg.httpd.listenPort} ${lib.escapeShellArgs cfg.httpd.extraArgs}";
Restart = lib.mkDefault "on-failure";
RestartSec = "10";
SocketBindAllow = [ "tcp:${toString cfg.httpd.listenPort}" ];
SystemCallFilter = lib.mkAfter [
# Needed by git upload-pack which calls alarm() and setitimer() when providing a git clone
"@timer"
];
};
confinement.packages = [
cfg.httpd.package
];
}
];
}
];
services.radicle.settings = {
node.alias = lib.mkDefault cfg.httpd.nginx.serverName;
node.externalAddresses = lib.mkDefault [
"${cfg.httpd.nginx.serverName}:${toString cfg.node.listenPort}"
];
};
})
]))
]);
(lib.mkIf (cfg.httpd.nginx != null) {
services.nginx.virtualHosts.${cfg.httpd.nginx.serverName} = lib.mkMerge [
cfg.httpd.nginx
{
forceSSL = lib.mkDefault true;
enableACME = lib.mkDefault true;
locations."/" = {
proxyPass = "http://${cfg.httpd.listenAddress}:${toString cfg.httpd.listenPort}";
recommendedProxySettings = true;
};
}
];
services.radicle.settings = {
node.alias = lib.mkDefault cfg.httpd.nginx.serverName;
node.externalAddresses = lib.mkDefault [
"${cfg.httpd.nginx.serverName}:${toString cfg.node.listenPort}"
];
};
})
]
))
]
);
meta.maintainers = with lib.maintainers; [
julm

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
{
options.services.sdrplayApi = {
enable = lib.mkOption {

View file

@ -1,7 +1,13 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.tzupdate;
in {
in
{
options.services.tzupdate = {
enable = lib.mkOption {
type = lib.types.bool;

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.apcupsd;
@ -40,40 +45,53 @@ let
chmod a+x "$out/${eventname}"
'';
eventToShellCmds = event: if builtins.hasAttr event cfg.hooks then (shellCmdsForEventScript event (builtins.getAttr event cfg.hooks)) else "";
eventToShellCmds =
event:
if builtins.hasAttr event cfg.hooks then
(shellCmdsForEventScript event (builtins.getAttr event cfg.hooks))
else
"";
scriptDir = pkgs.runCommand "apcupsd-scriptdir" { preferLocalBuild = true; } (''
mkdir "$out"
# Copy SCRIPTDIR from apcupsd package
cp -r ${pkgs.apcupsd}/etc/apcupsd/* "$out"/
# Make the files writeable (nix will unset the write bits afterwards)
chmod u+w "$out"/*
# Remove the sample event notification scripts, because they don't work
# anyways (they try to send mail to "root" with the "mail" command)
(cd "$out" && rm changeme commok commfailure onbattery offbattery)
# Remove the sample apcupsd.conf file (we're generating our own)
rm "$out/apcupsd.conf"
# Set the SCRIPTDIR= line in apccontrol to the dir we're creating now
sed -i -e "s|^SCRIPTDIR=.*|SCRIPTDIR=$out|" "$out/apccontrol"
'' + lib.concatStringsSep "\n" (map eventToShellCmds eventList)
scriptDir = pkgs.runCommand "apcupsd-scriptdir" { preferLocalBuild = true; } (
''
mkdir "$out"
# Copy SCRIPTDIR from apcupsd package
cp -r ${pkgs.apcupsd}/etc/apcupsd/* "$out"/
# Make the files writeable (nix will unset the write bits afterwards)
chmod u+w "$out"/*
# Remove the sample event notification scripts, because they don't work
# anyways (they try to send mail to "root" with the "mail" command)
(cd "$out" && rm changeme commok commfailure onbattery offbattery)
# Remove the sample apcupsd.conf file (we're generating our own)
rm "$out/apcupsd.conf"
# Set the SCRIPTDIR= line in apccontrol to the dir we're creating now
sed -i -e "s|^SCRIPTDIR=.*|SCRIPTDIR=$out|" "$out/apccontrol"
''
+ lib.concatStringsSep "\n" (map eventToShellCmds eventList)
);
# Ensure the CLI uses our generated configFile
wrappedBinaries = pkgs.runCommand "apcupsd-wrapped-binaries" {
preferLocalBuild = true;
nativeBuildInputs = [ pkgs.makeWrapper ];
} ''
for p in "${lib.getBin pkgs.apcupsd}/bin/"*; do
bname=$(basename "$p")
makeWrapper "$p" "$out/bin/$bname" --add-flags "-f ${configFile}"
done
'';
wrappedBinaries =
pkgs.runCommand "apcupsd-wrapped-binaries"
{
preferLocalBuild = true;
nativeBuildInputs = [ pkgs.makeWrapper ];
}
''
for p in "${lib.getBin pkgs.apcupsd}/bin/"*; do
bname=$(basename "$p")
makeWrapper "$p" "$out/bin/$bname" --add-flags "-f ${configFile}"
done
'';
apcupsdWrapped = pkgs.symlinkJoin {
name = "apcupsd-wrapped";
# Put wrappers first so they "win"
paths = [ wrappedBinaries pkgs.apcupsd ];
paths = [
wrappedBinaries
pkgs.apcupsd
];
};
in
@ -115,7 +133,7 @@ in
};
hooks = lib.mkOption {
default = {};
default = { };
example = {
doshutdown = "# shell commands to notify that the computer is shutting down";
};
@ -136,19 +154,24 @@ in
};
###### implementation
config = lib.mkIf cfg.enable {
assertions = [ {
assertion = let hooknames = builtins.attrNames cfg.hooks; in lib.all (x: lib.elem x eventList) hooknames;
message = ''
One (or more) attribute names in services.apcupsd.hooks are invalid.
Current attribute names: ${toString (builtins.attrNames cfg.hooks)}
Valid attribute names : ${toString eventList}
'';
} ];
assertions = [
{
assertion =
let
hooknames = builtins.attrNames cfg.hooks;
in
lib.all (x: lib.elem x eventList) hooknames;
message = ''
One (or more) attribute names in services.apcupsd.hooks are invalid.
Current attribute names: ${toString (builtins.attrNames cfg.hooks)}
Valid attribute names : ${toString eventList}
'';
}
];
# Give users access to the "apcaccess" tool
environment.systemPackages = [ apcupsdWrapped ];

View file

@ -1,10 +1,23 @@
{ pkgs, config, lib, ... }:
{
pkgs,
config,
lib,
...
}:
let
cfg = config.services.cockpit;
inherit (lib) types mkEnableOption mkOption mkIf literalMD mkPackageOption;
settingsFormat = pkgs.formats.ini {};
in {
inherit (lib)
types
mkEnableOption
mkOption
mkIf
literalMD
mkPackageOption
;
settingsFormat = pkgs.formats.ini { };
in
{
options = {
services.cockpit = {
enable = mkEnableOption "Cockpit";
@ -16,7 +29,7 @@ in {
settings = lib.mkOption {
type = settingsFormat.type;
default = {};
default = { };
description = ''
Settings for cockpit that will be saved in /etc/cockpit/cockpit.conf.
@ -49,15 +62,19 @@ in {
# generate cockpit settings
environment.etc."cockpit/cockpit.conf".source = settingsFormat.generate "cockpit.conf" cfg.settings;
security.pam.services.cockpit = {};
security.pam.services.cockpit = { };
networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
systemd.packages = [ cfg.package ];
systemd.sockets.cockpit.wantedBy = [ "multi-user.target" ];
systemd.sockets.cockpit.listenStreams = [ "" (toString cfg.port) ];
systemd.sockets.cockpit.listenStreams = [
""
(toString cfg.port)
];
systemd.tmpfiles.rules = [ # From $out/lib/tmpfiles.d/cockpit-tmpfiles.conf
systemd.tmpfiles.rules = [
# From $out/lib/tmpfiles.d/cockpit-tmpfiles.conf
"C /run/cockpit/inactive.motd 0640 root root - ${cfg.package}/share/cockpit/motd/inactive.motd"
"f /run/cockpit/active.motd 0640 root root -"
"L+ /run/cockpit/motd - - - - inactive.motd"

View file

@ -1,35 +1,47 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.datadog-agent;
ddConf = {
skip_ssl_validation = false;
confd_path = "/etc/datadog-agent/conf.d";
additional_checksd = "/etc/datadog-agent/checks.d";
use_dogstatsd = true;
}
// lib.optionalAttrs (cfg.logLevel != null) { log_level = cfg.logLevel; }
// lib.optionalAttrs (cfg.hostname != null) { inherit (cfg) hostname; }
// lib.optionalAttrs (cfg.ddUrl != null) { dd_url = cfg.ddUrl; }
// lib.optionalAttrs (cfg.site != null) { site = cfg.site; }
// lib.optionalAttrs (cfg.tags != null ) { tags = lib.concatStringsSep ", " cfg.tags; }
// lib.optionalAttrs (cfg.enableLiveProcessCollection) {
process_config = {
dd_agent_bin = "${datadogPkg}/bin/agent";
process_collection.enabled = "true";
container_collection.enabled = "true";
};
}
// lib.optionalAttrs (cfg.enableTraceAgent) { apm_config = { enabled = true; }; }
// cfg.extraConfig;
ddConf =
{
skip_ssl_validation = false;
confd_path = "/etc/datadog-agent/conf.d";
additional_checksd = "/etc/datadog-agent/checks.d";
use_dogstatsd = true;
}
// lib.optionalAttrs (cfg.logLevel != null) { log_level = cfg.logLevel; }
// lib.optionalAttrs (cfg.hostname != null) { inherit (cfg) hostname; }
// lib.optionalAttrs (cfg.ddUrl != null) { dd_url = cfg.ddUrl; }
// lib.optionalAttrs (cfg.site != null) { site = cfg.site; }
// lib.optionalAttrs (cfg.tags != null) { tags = lib.concatStringsSep ", " cfg.tags; }
// lib.optionalAttrs (cfg.enableLiveProcessCollection) {
process_config = {
dd_agent_bin = "${datadogPkg}/bin/agent";
process_collection.enabled = "true";
container_collection.enabled = "true";
};
}
// lib.optionalAttrs (cfg.enableTraceAgent) {
apm_config = {
enabled = true;
};
}
// cfg.extraConfig;
# Generate Datadog configuration files for each configured checks.
# This works because check configurations have predictable paths,
# and because JSON is a valid subset of YAML.
makeCheckConfigs = entries: lib.mapAttrs' (name: conf: {
name = "datadog-agent/conf.d/${name}.d/conf.yaml";
value.source = pkgs.writeText "${name}-check-conf.yaml" (builtins.toJSON conf);
}) entries;
makeCheckConfigs =
entries:
lib.mapAttrs' (name: conf: {
name = "datadog-agent/conf.d/${name}.d/conf.yaml";
value.source = pkgs.writeText "${name}-check-conf.yaml" (builtins.toJSON conf);
}) entries;
defaultChecks = {
disk = cfg.diskCheck;
@ -38,11 +50,15 @@ let
# Assemble all check configurations and the top-level agent
# configuration.
etcfiles = with pkgs; with builtins;
{ "datadog-agent/datadog.yaml" = {
source = writeText "datadog.yaml" (toJSON ddConf);
};
} // makeCheckConfigs (cfg.checks // defaultChecks);
etcfiles =
with pkgs;
with builtins;
{
"datadog-agent/datadog.yaml" = {
source = writeText "datadog.yaml" (toJSON ddConf);
};
}
// makeCheckConfigs (cfg.checks // defaultChecks);
# Apply the configured extraIntegrations to the provided agent
# package. See the documentation of `dd-agent/integrations-core.nix`
@ -50,7 +66,8 @@ let
datadogPkg = cfg.package.override {
pythonPackages = pkgs.datadog-integrations-core cfg.extraIntegrations;
};
in {
in
{
options.services.datadog-agent = {
enable = lib.mkEnableOption "Datadog-agent v7 monitoring service";
@ -95,7 +112,10 @@ in {
tags = lib.mkOption {
description = "The tags to mark this Datadog agent";
example = [ "test" "service" ];
example = [
"test"
"service"
];
default = null;
type = lib.types.nullOr (lib.types.listOf lib.types.str);
};
@ -110,12 +130,19 @@ in {
logLevel = lib.mkOption {
description = "Logging verbosity.";
default = null;
type = lib.types.nullOr (lib.types.enum ["DEBUG" "INFO" "WARN" "ERROR"]);
type = lib.types.nullOr (
lib.types.enum [
"DEBUG"
"INFO"
"WARN"
"ERROR"
]
);
};
extraIntegrations = lib.mkOption {
default = {};
type = lib.types.attrs;
default = { };
type = lib.types.attrs;
description = ''
Extra integrations from the Datadog core-integrations
@ -137,13 +164,13 @@ in {
};
extraConfig = lib.mkOption {
default = {};
default = { };
type = lib.types.attrs;
description = ''
Extra configuration options that will be merged into the
main config file {file}`datadog.yaml`.
'';
};
};
enableLiveProcessCollection = lib.mkOption {
description = ''
@ -205,7 +232,7 @@ in {
};
};
default = {};
default = { };
# sic! The structure of the values is up to the check, so we can
# not usefully constrain the type further.
@ -216,7 +243,7 @@ in {
description = "Disk check config";
type = lib.types.attrs;
default = {
init_config = {};
init_config = { };
instances = [ { use_mount = "false"; } ];
};
};
@ -225,15 +252,27 @@ in {
description = "Network check config";
type = lib.types.attrs;
default = {
init_config = {};
init_config = { };
# Network check only supports one configured instance
instances = [ { collect_connection_state = false;
excluded_interfaces = [ "lo" "lo0" ]; } ];
instances = [
{
collect_connection_state = false;
excluded_interfaces = [
"lo"
"lo0"
];
}
];
};
};
};
config = lib.mkIf cfg.enable {
environment.systemPackages = [ datadogPkg pkgs.sysstat pkgs.procps pkgs.iproute2 ];
environment.systemPackages = [
datadogPkg
pkgs.sysstat
pkgs.procps
pkgs.iproute2
];
users.users.datadog = {
description = "Datadog Agent User";
@ -245,58 +284,73 @@ in {
users.groups.datadog.gid = config.ids.gids.datadog;
systemd.services = let
makeService = attrs: lib.recursiveUpdate {
path = [ datadogPkg pkgs.sysstat pkgs.procps pkgs.iproute2 ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
User = "datadog";
Group = "datadog";
Restart = "always";
RestartSec = 2;
systemd.services =
let
makeService =
attrs:
lib.recursiveUpdate {
path = [
datadogPkg
pkgs.sysstat
pkgs.procps
pkgs.iproute2
];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
User = "datadog";
Group = "datadog";
Restart = "always";
RestartSec = 2;
};
restartTriggers = [ datadogPkg ] ++ map (x: x.source) (lib.attrValues etcfiles);
} attrs;
in
{
datadog-agent = makeService {
description = "Datadog agent monitor";
preStart = ''
chown -R datadog: /etc/datadog-agent
rm -f /etc/datadog-agent/auth_token
'';
script = ''
export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
exec ${datadogPkg}/bin/agent run -c /etc/datadog-agent/datadog.yaml
'';
serviceConfig.PermissionsStartOnly = true;
};
restartTriggers = [ datadogPkg ] ++ map (x: x.source) (lib.attrValues etcfiles);
} attrs;
in {
datadog-agent = makeService {
description = "Datadog agent monitor";
preStart = ''
chown -R datadog: /etc/datadog-agent
rm -f /etc/datadog-agent/auth_token
'';
script = ''
export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
exec ${datadogPkg}/bin/agent run -c /etc/datadog-agent/datadog.yaml
'';
serviceConfig.PermissionsStartOnly = true;
dd-jmxfetch = lib.mkIf (lib.hasAttr "jmx" cfg.checks) (makeService {
description = "Datadog JMX Fetcher";
path = [
datadogPkg
pkgs.python
pkgs.sysstat
pkgs.procps
pkgs.jdk
];
serviceConfig.ExecStart = "${datadogPkg}/bin/dd-jmxfetch";
});
datadog-process-agent = lib.mkIf cfg.enableLiveProcessCollection (makeService {
description = "Datadog Live Process Agent";
path = [ ];
script = ''
export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
${cfg.processAgentPackage}/bin/process-agent --config /etc/datadog-agent/datadog.yaml
'';
});
datadog-trace-agent = lib.mkIf cfg.enableTraceAgent (makeService {
description = "Datadog Trace Agent";
path = [ ];
script = ''
export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
${datadogPkg}/bin/trace-agent --config /etc/datadog-agent/datadog.yaml
'';
});
};
dd-jmxfetch = lib.mkIf (lib.hasAttr "jmx" cfg.checks) (makeService {
description = "Datadog JMX Fetcher";
path = [ datadogPkg pkgs.python pkgs.sysstat pkgs.procps pkgs.jdk ];
serviceConfig.ExecStart = "${datadogPkg}/bin/dd-jmxfetch";
});
datadog-process-agent = lib.mkIf cfg.enableLiveProcessCollection (makeService {
description = "Datadog Live Process Agent";
path = [ ];
script = ''
export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
${cfg.processAgentPackage}/bin/process-agent --config /etc/datadog-agent/datadog.yaml
'';
});
datadog-trace-agent = lib.mkIf cfg.enableTraceAgent (makeService {
description = "Datadog Trace Agent";
path = [ ];
script = ''
export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
${datadogPkg}/bin/trace-agent --config /etc/datadog-agent/datadog.yaml
'';
});
};
environment.etc = etcfiles;
};
}

View file

@ -1,5 +1,10 @@
# Fusion Inventory daemon.
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.fusionInventory;
@ -11,7 +16,8 @@ let
${cfg.extraConfig}
'';
in {
in
{
###### interface
@ -38,7 +44,6 @@ in {
};
};
###### implementation
config = lib.mkIf cfg.enable {

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.librenms;
@ -17,15 +22,17 @@ let
memory_limit = ${toString cfg.settings.php_memory_limit}M
date.timezone = "${config.time.timeZone}"
'';
phpIni = pkgs.runCommand "php.ini"
{
inherit (package) phpPackage;
inherit phpOptions;
preferLocalBuild = true;
passAsFile = [ "phpOptions" ];
} ''
cat $phpPackage/etc/php.ini $phpOptionsPath > $out
'';
phpIni =
pkgs.runCommand "php.ini"
{
inherit (package) phpPackage;
inherit phpOptions;
preferLocalBuild = true;
passAsFile = [ "phpOptions" ];
}
''
cat $phpPackage/etc/php.ini $phpOptionsPath > $out
'';
artisanWrapper = pkgs.writeShellScriptBin "librenms-artisan" ''
cd ${package}
@ -45,8 +52,6 @@ let
$sudo ${package}/lnms "$@"
'';
configFile = pkgs.writeText "config.php" ''
<?php
$new_config = json_decode(file_get_contents("${cfg.dataDir}/config.json"), true);
@ -208,7 +213,13 @@ in
};
poolConfig = mkOption {
type = with types; attrsOf (oneOf [ str int bool ]);
type =
with types;
attrsOf (oneOf [
str
int
bool
]);
default = {
"pm" = "dynamic";
"pm.max_children" = 32;
@ -225,9 +236,7 @@ in
nginx = mkOption {
type = types.submodule (
recursiveUpdate
(import ../web-servers/nginx/vhost-options.nix { inherit config lib; })
{ }
recursiveUpdate (import ../web-servers/nginx/vhost-options.nix { inherit config lib; }) { }
);
default = { };
example = literalExpression ''
@ -390,42 +399,48 @@ in
users.groups.${cfg.group} = { };
services.librenms.settings = {
# basic configs
"user" = cfg.user;
"own_hostname" = cfg.hostname;
"base_url" = lib.mkDefault "/";
"auth_mechanism" = lib.mkDefault "mysql";
services.librenms.settings =
{
# basic configs
"user" = cfg.user;
"own_hostname" = cfg.hostname;
"base_url" = lib.mkDefault "/";
"auth_mechanism" = lib.mkDefault "mysql";
# disable auto update function (won't work with NixOS)
"update" = false;
# disable auto update function (won't work with NixOS)
"update" = false;
# enable fast ping by default
"ping_rrd_step" = 60;
# enable fast ping by default
"ping_rrd_step" = 60;
# set default memory limit to 1G
"php_memory_limit" = lib.mkDefault 1024;
# set default memory limit to 1G
"php_memory_limit" = lib.mkDefault 1024;
# one minute polling
"rrd.step" = if cfg.enableOneMinutePolling then 60 else 300;
"rrd.heartbeat" = if cfg.enableOneMinutePolling then 120 else 600;
} // (lib.optionalAttrs cfg.distributedPoller.enable {
"distributed_poller" = true;
"distributed_poller_name" = lib.mkIf (cfg.distributedPoller.name != null) cfg.distributedPoller.name;
"distributed_poller_group" = cfg.distributedPoller.group;
"distributed_billing" = cfg.distributedPoller.distributedBilling;
"distributed_poller_memcached_host" = cfg.distributedPoller.memcachedHost;
"distributed_poller_memcached_port" = cfg.distributedPoller.memcachedPort;
"rrdcached" = "${cfg.distributedPoller.rrdcachedHost}:${toString cfg.distributedPoller.rrdcachedPort}";
}) // (lib.optionalAttrs cfg.useDistributedPollers {
"distributed_poller" = true;
# still enable a local poller with distributed polling
"distributed_poller_group" = lib.mkDefault "0";
"distributed_billing" = lib.mkDefault true;
"distributed_poller_memcached_host" = "localhost";
"distributed_poller_memcached_port" = 11211;
"rrdcached" = "localhost:42217";
});
# one minute polling
"rrd.step" = if cfg.enableOneMinutePolling then 60 else 300;
"rrd.heartbeat" = if cfg.enableOneMinutePolling then 120 else 600;
}
// (lib.optionalAttrs cfg.distributedPoller.enable {
"distributed_poller" = true;
"distributed_poller_name" = lib.mkIf (
cfg.distributedPoller.name != null
) cfg.distributedPoller.name;
"distributed_poller_group" = cfg.distributedPoller.group;
"distributed_billing" = cfg.distributedPoller.distributedBilling;
"distributed_poller_memcached_host" = cfg.distributedPoller.memcachedHost;
"distributed_poller_memcached_port" = cfg.distributedPoller.memcachedPort;
"rrdcached" =
"${cfg.distributedPoller.rrdcachedHost}:${toString cfg.distributedPoller.rrdcachedPort}";
})
// (lib.optionalAttrs cfg.useDistributedPollers {
"distributed_poller" = true;
# still enable a local poller with distributed polling
"distributed_poller_group" = lib.mkDefault "0";
"distributed_billing" = lib.mkDefault true;
"distributed_poller_memcached_host" = "localhost";
"distributed_poller_memcached_port" = 11211;
"rrdcached" = "localhost:42217";
});
services.memcached = lib.mkIf cfg.useDistributedPollers {
enable = true;
@ -451,12 +466,14 @@ in
services.mysql = lib.mkIf cfg.database.createLocally {
enable = true;
package = lib.mkDefault pkgs.mariadb;
settings.mysqld = {
innodb_file_per_table = 1;
lower_case_table_names = 0;
} // (lib.optionalAttrs cfg.useDistributedPollers {
bind-address = "0.0.0.0";
});
settings.mysqld =
{
innodb_file_per_table = 1;
lower_case_table_names = 0;
}
// (lib.optionalAttrs cfg.useDistributedPollers {
bind-address = "0.0.0.0";
});
ensureDatabases = [ cfg.database.database ];
ensureUsers = [
{
@ -466,10 +483,12 @@ in
};
}
];
initialScript = lib.mkIf cfg.useDistributedPollers (pkgs.writeText "mysql-librenms-init" ''
CREATE USER IF NOT EXISTS '${cfg.database.username}'@'%';
GRANT ALL PRIVILEGES ON ${cfg.database.database}.* TO '${cfg.database.username}'@'%';
'');
initialScript = lib.mkIf cfg.useDistributedPollers (
pkgs.writeText "mysql-librenms-init" ''
CREATE USER IF NOT EXISTS '${cfg.database.username}'@'%';
GRANT ALL PRIVILEGES ON ${cfg.database.database}.* TO '${cfg.database.username}'@'%';
''
);
};
services.nginx = lib.mkIf (!cfg.distributedPoller.enable) {
@ -526,11 +545,20 @@ in
systemd.services.librenms-setup = {
description = "Preparation tasks for LibreNMS";
before = [ "phpfpm-librenms.service" ];
after = [ "systemd-tmpfiles-setup.service" "network.target" ]
++ (lib.optional (cfg.database.host == "localhost") "mysql.service");
after = [
"systemd-tmpfiles-setup.service"
"network.target"
] ++ (lib.optional (cfg.database.host == "localhost") "mysql.service");
wantedBy = [ "multi-user.target" ];
restartTriggers = [ package configFile ];
path = [ pkgs.mariadb pkgs.unixtools.whereis pkgs.gnused ];
restartTriggers = [
package
configFile
];
path = [
pkgs.mariadb
pkgs.unixtools.whereis
pkgs.gnused
];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
@ -559,81 +587,83 @@ in
}"
];
};
script = ''
set -euo pipefail
# config setup
ln -sf ${configFile} ${cfg.dataDir}/config.php
${pkgs.envsubst}/bin/envsubst -i ${configJson} -o ${cfg.dataDir}/config.json
export PHPRC=${phpIni}
if [[ ! -s ${cfg.dataDir}/.env ]]; then
# init .env file
echo "APP_KEY=" > ${cfg.dataDir}/.env
${artisanWrapper}/bin/librenms-artisan key:generate --ansi
${artisanWrapper}/bin/librenms-artisan webpush:vapid
echo "" >> ${cfg.dataDir}/.env
echo -n "NODE_ID=" >> ${cfg.dataDir}/.env
${package.phpPackage}/bin/php -r "echo uniqid();" >> ${cfg.dataDir}/.env
echo "" >> ${cfg.dataDir}/.env
else
# .env file already exists --> only update database and cache config
${pkgs.gnused}/bin/sed -i /^DB_/d ${cfg.dataDir}/.env
${pkgs.gnused}/bin/sed -i /^CACHE_DRIVER/d ${cfg.dataDir}/.env
fi
${lib.optionalString (cfg.useDistributedPollers || cfg.distributedPoller.enable) ''
echo "CACHE_DRIVER=memcached" >> ${cfg.dataDir}/.env
''}
echo "DB_DATABASE=${cfg.database.database}" >> ${cfg.dataDir}/.env
''
+ (
if ! isNull cfg.database.socket
then ''
# use socket connection
echo "DB_SOCKET=${cfg.database.socket}" >> ${cfg.dataDir}/.env
echo "DB_PASSWORD=null" >> ${cfg.dataDir}/.env
script =
''
else ''
# use TCP connection
echo "DB_HOST=${cfg.database.host}" >> ${cfg.dataDir}/.env
echo "DB_PORT=${toString cfg.database.port}" >> ${cfg.dataDir}/.env
echo "DB_USERNAME=${cfg.database.username}" >> ${cfg.dataDir}/.env
echo -n "DB_PASSWORD=" >> ${cfg.dataDir}/.env
cat ${cfg.database.passwordFile} >> ${cfg.dataDir}/.env
set -euo pipefail
# config setup
ln -sf ${configFile} ${cfg.dataDir}/config.php
${pkgs.envsubst}/bin/envsubst -i ${configJson} -o ${cfg.dataDir}/config.json
export PHPRC=${phpIni}
if [[ ! -s ${cfg.dataDir}/.env ]]; then
# init .env file
echo "APP_KEY=" > ${cfg.dataDir}/.env
${artisanWrapper}/bin/librenms-artisan key:generate --ansi
${artisanWrapper}/bin/librenms-artisan webpush:vapid
echo "" >> ${cfg.dataDir}/.env
echo -n "NODE_ID=" >> ${cfg.dataDir}/.env
${package.phpPackage}/bin/php -r "echo uniqid();" >> ${cfg.dataDir}/.env
echo "" >> ${cfg.dataDir}/.env
else
# .env file already exists --> only update database and cache config
${pkgs.gnused}/bin/sed -i /^DB_/d ${cfg.dataDir}/.env
${pkgs.gnused}/bin/sed -i /^CACHE_DRIVER/d ${cfg.dataDir}/.env
fi
${lib.optionalString (cfg.useDistributedPollers || cfg.distributedPoller.enable) ''
echo "CACHE_DRIVER=memcached" >> ${cfg.dataDir}/.env
''}
echo "DB_DATABASE=${cfg.database.database}" >> ${cfg.dataDir}/.env
''
)
+ ''
# clear cache if package has changed (cache may contain cached paths
# to the old package)
OLD_PACKAGE=$(cat ${cfg.dataDir}/package)
if [[ $OLD_PACKAGE != "${package}" ]]; then
rm -r ${cfg.dataDir}/cache/*
fi
+ (
if !isNull cfg.database.socket then
''
# use socket connection
echo "DB_SOCKET=${cfg.database.socket}" >> ${cfg.dataDir}/.env
echo "DB_PASSWORD=null" >> ${cfg.dataDir}/.env
''
else
''
# use TCP connection
echo "DB_HOST=${cfg.database.host}" >> ${cfg.dataDir}/.env
echo "DB_PORT=${toString cfg.database.port}" >> ${cfg.dataDir}/.env
echo "DB_USERNAME=${cfg.database.username}" >> ${cfg.dataDir}/.env
echo -n "DB_PASSWORD=" >> ${cfg.dataDir}/.env
cat ${cfg.database.passwordFile} >> ${cfg.dataDir}/.env
''
)
+ ''
# clear cache if package has changed (cache may contain cached paths
# to the old package)
OLD_PACKAGE=$(cat ${cfg.dataDir}/package)
if [[ $OLD_PACKAGE != "${package}" ]]; then
rm -r ${cfg.dataDir}/cache/*
fi
# convert rrd files when the oneMinutePolling option is changed
OLD_ENABLED=$(cat ${cfg.dataDir}/one_minute_enabled)
if [[ $OLD_ENABLED != "${lib.boolToString cfg.enableOneMinutePolling}" ]]; then
${package}/scripts/rrdstep.php -h all
echo "${lib.boolToString cfg.enableOneMinutePolling}" > ${cfg.dataDir}/one_minute_enabled
fi
# convert rrd files when the oneMinutePolling option is changed
OLD_ENABLED=$(cat ${cfg.dataDir}/one_minute_enabled)
if [[ $OLD_ENABLED != "${lib.boolToString cfg.enableOneMinutePolling}" ]]; then
${package}/scripts/rrdstep.php -h all
echo "${lib.boolToString cfg.enableOneMinutePolling}" > ${cfg.dataDir}/one_minute_enabled
fi
# migrate db if package version has changed
# not necessary for every package change
OLD_VERSION=$(cat ${cfg.dataDir}/version)
if [[ $OLD_VERSION != "${package.version}" ]]; then
${artisanWrapper}/bin/librenms-artisan migrate --force --no-interaction
echo "${package.version}" > ${cfg.dataDir}/version
fi
# migrate db if package version has changed
# not necessary for every package change
OLD_VERSION=$(cat ${cfg.dataDir}/version)
if [[ $OLD_VERSION != "${package.version}" ]]; then
${artisanWrapper}/bin/librenms-artisan migrate --force --no-interaction
echo "${package.version}" > ${cfg.dataDir}/version
fi
# regenerate cache if package has changed
if [[ $OLD_PACKAGE != "${package}" ]]; then
${artisanWrapper}/bin/librenms-artisan view:clear
${artisanWrapper}/bin/librenms-artisan optimize:clear
${artisanWrapper}/bin/librenms-artisan view:cache
${artisanWrapper}/bin/librenms-artisan optimize
echo "${package}" > ${cfg.dataDir}/package
fi
'';
# regenerate cache if package has changed
if [[ $OLD_PACKAGE != "${package}" ]]; then
${artisanWrapper}/bin/librenms-artisan view:clear
${artisanWrapper}/bin/librenms-artisan optimize:clear
${artisanWrapper}/bin/librenms-artisan view:cache
${artisanWrapper}/bin/librenms-artisan optimize
echo "${package}" > ${cfg.dataDir}/package
fi
'';
};
programs.mtr.enable = true;
@ -663,7 +693,9 @@ in
"33 */6 * * * ${cfg.user} ${env} ${package}/cronic ${package}/discovery-wrapper.py 1"
"*/5 * * * * ${cfg.user} ${env} ${package}/discovery.php -h new >> /dev/null 2>&1"
"${if cfg.enableOneMinutePolling then "*" else "*/5"} * * * * ${cfg.user} ${env} ${package}/cronic ${package}/poller-wrapper.py ${toString cfg.pollerThreads}"
"${
if cfg.enableOneMinutePolling then "*" else "*/5"
} * * * * ${cfg.user} ${env} ${package}/cronic ${package}/poller-wrapper.py ${toString cfg.pollerThreads}"
"* * * * * ${cfg.user} ${env} ${package}/alerts.php >> /dev/null 2>&1"
"*/5 * * * * ${cfg.user} ${env} ${package}/check-services.php >> /dev/null 2>&1"
@ -676,7 +708,8 @@ in
"19 0 * * * ${cfg.user} ${env} ${package}/daily.sh notifications >> /dev/null 2>&1"
"19 0 * * * ${cfg.user} ${env} ${package}/daily.sh peeringdb >> /dev/null 2>&1"
"19 0 * * * ${cfg.user} ${env} ${package}/daily.sh mac_oui >> /dev/null 2>&1"
] ++ lib.optionals cfg.enableLocalBilling [
]
++ lib.optionals cfg.enableLocalBilling [
"*/5 * * * * ${cfg.user} ${env} ${package}/poll-billing.php >> /dev/null 2>&1"
"01 * * * * ${cfg.user} ${env} ${package}/billing-calculate.php >> /dev/null 2>&1"
];
@ -691,31 +724,36 @@ in
};
};
environment.systemPackages = [ artisanWrapper lnmsWrapper ];
systemd.tmpfiles.rules = [
"d ${cfg.logDir} 0750 ${cfg.user} ${cfg.group} - -"
"f ${cfg.logDir}/librenms.log 0640 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir} 0750 ${cfg.user} ${cfg.group} - -"
"f ${cfg.dataDir}/.env 0600 ${cfg.user} ${cfg.group} - -"
"f ${cfg.dataDir}/version 0600 ${cfg.user} ${cfg.group} - -"
"f ${cfg.dataDir}/package 0600 ${cfg.user} ${cfg.group} - -"
"f ${cfg.dataDir}/one_minute_enabled 0600 ${cfg.user} ${cfg.group} - -"
"f ${cfg.dataDir}/config.json 0600 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/app 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/debugbar 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/framework 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/framework/cache 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/framework/sessions 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/framework/views 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/logs 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/rrd 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/cache 0700 ${cfg.user} ${cfg.group} - -"
] ++ lib.optionals cfg.useDistributedPollers [
"d ${cfg.dataDir}/rrdcached-journal 0700 ${cfg.user} ${cfg.group} - -"
environment.systemPackages = [
artisanWrapper
lnmsWrapper
];
systemd.tmpfiles.rules =
[
"d ${cfg.logDir} 0750 ${cfg.user} ${cfg.group} - -"
"f ${cfg.logDir}/librenms.log 0640 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir} 0750 ${cfg.user} ${cfg.group} - -"
"f ${cfg.dataDir}/.env 0600 ${cfg.user} ${cfg.group} - -"
"f ${cfg.dataDir}/version 0600 ${cfg.user} ${cfg.group} - -"
"f ${cfg.dataDir}/package 0600 ${cfg.user} ${cfg.group} - -"
"f ${cfg.dataDir}/one_minute_enabled 0600 ${cfg.user} ${cfg.group} - -"
"f ${cfg.dataDir}/config.json 0600 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/app 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/debugbar 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/framework 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/framework/cache 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/framework/sessions 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/framework/views 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/logs 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/rrd 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/cache 0700 ${cfg.user} ${cfg.group} - -"
]
++ lib.optionals cfg.useDistributedPollers [
"d ${cfg.dataDir}/rrdcached-journal 0700 ${cfg.user} ${cfg.group} - -"
];
};
meta.maintainers = with lib.maintainers; [ netali ] ++ lib.teams.wdz.members;

View file

@ -1,5 +1,10 @@
# Nagios system/network monitoring daemon.
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.nagios;
@ -10,75 +15,78 @@ let
nagiosObjectDefs = cfg.objectDefs;
nagiosObjectDefsDir = pkgs.runCommand "nagios-objects" {
inherit nagiosObjectDefs;
preferLocalBuild = true;
} "mkdir -p $out; ln -s $nagiosObjectDefs $out/";
inherit nagiosObjectDefs;
preferLocalBuild = true;
} "mkdir -p $out; ln -s $nagiosObjectDefs $out/";
nagiosCfgFile = let
default = {
log_file="${nagiosLogDir}/current";
log_archive_path="${nagiosLogDir}/archive";
status_file="${nagiosState}/status.dat";
object_cache_file="${nagiosState}/objects.cache";
temp_file="${nagiosState}/nagios.tmp";
lock_file="/run/nagios.lock";
state_retention_file="${nagiosState}/retention.dat";
query_socket="${nagiosState}/nagios.qh";
check_result_path="${nagiosState}";
command_file="${nagiosState}/nagios.cmd";
cfg_dir="${nagiosObjectDefsDir}";
nagios_user="nagios";
nagios_group="nagios";
illegal_macro_output_chars="`~$&|'\"<>";
retain_state_information="1";
};
lines = lib.mapAttrsToList (key: value: "${key}=${value}") (default // cfg.extraConfig);
content = lib.concatStringsSep "\n" lines;
file = pkgs.writeText "nagios.cfg" content;
validated = pkgs.runCommand "nagios-checked.cfg" {preferLocalBuild=true;} ''
cp ${file} nagios.cfg
# nagios checks the existence of /var/lib/nagios, but
# it does not exist in the build sandbox, so we fake it
mkdir lib
lib=$(readlink -f lib)
sed -i s@=${nagiosState}@=$lib@ nagios.cfg
${pkgs.nagios}/bin/nagios -v nagios.cfg && cp ${file} $out
'';
defaultCfgFile = if cfg.validateConfig then validated else file;
in
if cfg.mainConfigFile == null then defaultCfgFile else cfg.mainConfigFile;
nagiosCfgFile =
let
default = {
log_file = "${nagiosLogDir}/current";
log_archive_path = "${nagiosLogDir}/archive";
status_file = "${nagiosState}/status.dat";
object_cache_file = "${nagiosState}/objects.cache";
temp_file = "${nagiosState}/nagios.tmp";
lock_file = "/run/nagios.lock";
state_retention_file = "${nagiosState}/retention.dat";
query_socket = "${nagiosState}/nagios.qh";
check_result_path = "${nagiosState}";
command_file = "${nagiosState}/nagios.cmd";
cfg_dir = "${nagiosObjectDefsDir}";
nagios_user = "nagios";
nagios_group = "nagios";
illegal_macro_output_chars = "`~$&|'\"<>";
retain_state_information = "1";
};
lines = lib.mapAttrsToList (key: value: "${key}=${value}") (default // cfg.extraConfig);
content = lib.concatStringsSep "\n" lines;
file = pkgs.writeText "nagios.cfg" content;
validated = pkgs.runCommand "nagios-checked.cfg" { preferLocalBuild = true; } ''
cp ${file} nagios.cfg
# nagios checks the existence of /var/lib/nagios, but
# it does not exist in the build sandbox, so we fake it
mkdir lib
lib=$(readlink -f lib)
sed -i s@=${nagiosState}@=$lib@ nagios.cfg
${pkgs.nagios}/bin/nagios -v nagios.cfg && cp ${file} $out
'';
defaultCfgFile = if cfg.validateConfig then validated else file;
in
if cfg.mainConfigFile == null then defaultCfgFile else cfg.mainConfigFile;
# Plain configuration for the Nagios web-interface with no
# authentication.
nagiosCGICfgFile = pkgs.writeText "nagios.cgi.conf"
''
main_config_file=${cfg.mainConfigFile}
use_authentication=0
url_html_path=${urlPath}
'';
nagiosCGICfgFile = pkgs.writeText "nagios.cgi.conf" ''
main_config_file=${cfg.mainConfigFile}
use_authentication=0
url_html_path=${urlPath}
'';
extraHttpdConfig =
''
ScriptAlias ${urlPath}/cgi-bin ${pkgs.nagios}/sbin
extraHttpdConfig = ''
ScriptAlias ${urlPath}/cgi-bin ${pkgs.nagios}/sbin
<Directory "${pkgs.nagios}/sbin">
Options ExecCGI
Require all granted
SetEnv NAGIOS_CGI_CONFIG ${cfg.cgiConfigFile}
</Directory>
<Directory "${pkgs.nagios}/sbin">
Options ExecCGI
Require all granted
SetEnv NAGIOS_CGI_CONFIG ${cfg.cgiConfigFile}
</Directory>
Alias ${urlPath} ${pkgs.nagios}/share
Alias ${urlPath} ${pkgs.nagios}/share
<Directory "${pkgs.nagios}/share">
Options None
Require all granted
</Directory>
'';
<Directory "${pkgs.nagios}/share">
Options None
Require all granted
</Directory>
'';
in
{
imports = [
(lib.mkRemovedOptionModule [ "services" "nagios" "urlPath" ] "The urlPath option has been removed as it is hard coded to /nagios in the nagios package.")
(lib.mkRemovedOptionModule [
"services"
"nagios"
"urlPath"
] "The urlPath option has been removed as it is hard coded to /nagios in the nagios package.")
];
meta.maintainers = with lib.maintainers; [ symphorien ];
@ -99,7 +107,11 @@ in
plugins = lib.mkOption {
type = lib.types.listOf lib.types.package;
default = with pkgs; [ monitoring-plugins msmtp mailutils ];
default = with pkgs; [
monitoring-plugins
msmtp
mailutils
];
defaultText = lib.literalExpression "[pkgs.monitoring-plugins pkgs.msmtp pkgs.mailutils]";
description = ''
Packages to be added to the Nagios {env}`PATH`.
@ -121,7 +133,7 @@ in
debug_level = "-1";
debug_file = "/var/log/nagios/debug.log";
};
default = {};
default = { };
description = "Configuration to add to /etc/nagios.cfg";
};
@ -169,13 +181,12 @@ in
};
};
config = lib.mkIf cfg.enable {
users.users.nagios = {
description = "Nagios user";
uid = config.ids.uids.nagios;
home = nagiosState;
group = "nagios";
uid = config.ids.uids.nagios;
home = nagiosState;
group = "nagios";
};
users.groups.nagios = { };
@ -187,9 +198,9 @@ in
environment.systemPackages = [ pkgs.nagios ];
systemd.services.nagios = {
description = "Nagios monitoring daemon";
path = [ pkgs.nagios ] ++ cfg.plugins;
path = [ pkgs.nagios ] ++ cfg.plugins;
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
after = [ "network.target" ];
restartTriggers = [ nagiosCfgFile ];
serviceConfig = {
@ -204,7 +215,10 @@ in
};
services.httpd.virtualHosts = lib.optionalAttrs cfg.enableWebInterface {
${cfg.virtualHost.hostName} = lib.mkMerge [ cfg.virtualHost { extraConfig = extraHttpdConfig; } ];
${cfg.virtualHost.hostName} = lib.mkMerge [
cfg.virtualHost
{ extraConfig = extraHttpdConfig; }
];
};
};
}

View file

@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.netdata;
@ -22,10 +27,12 @@ let
configDirectory = pkgs.runCommand "netdata-config-d" { } ''
mkdir $out
${lib.concatStringsSep "\n" (lib.mapAttrsToList (path: file: ''
${lib.concatStringsSep "\n" (
lib.mapAttrsToList (path: file: ''
mkdir -p "$out/$(dirname ${path})"
${if path == "apps_groups.conf" then "cp" else "ln -s"} "${file}" "$out/${path}"
'') cfg.configDir)}
'') cfg.configDir
)}
'';
localConfig = {
@ -38,16 +45,23 @@ let
"web files group" = "root";
};
"plugin:cgroups" = {
"script to get cgroup network interfaces" = "${wrappedPlugins}/libexec/netdata/plugins.d/cgroup-network";
"script to get cgroup network interfaces" =
"${wrappedPlugins}/libexec/netdata/plugins.d/cgroup-network";
"use unified cgroups" = "yes";
};
};
mkConfig = lib.generators.toINI {} (lib.recursiveUpdate localConfig cfg.config);
configFile = pkgs.writeText "netdata.conf" (if cfg.configText != null then cfg.configText else mkConfig);
mkConfig = lib.generators.toINI { } (lib.recursiveUpdate localConfig cfg.config);
configFile = pkgs.writeText "netdata.conf" (
if cfg.configText != null then cfg.configText else mkConfig
);
defaultUser = "netdata";
isThereAnyWireGuardTunnels = config.networking.wireguard.enable || lib.any (c: lib.hasAttrByPath [ "netdevConfig" "Kind" ] c && c.netdevConfig.Kind == "wireguard") (builtins.attrValues config.systemd.network.netdevs);
isThereAnyWireGuardTunnels =
config.networking.wireguard.enable
|| lib.any (
c: lib.hasAttrByPath [ "netdevConfig" "Kind" ] c && c.netdevConfig.Kind == "wireguard"
) (builtins.attrValues config.systemd.network.netdevs);
extraNdsudoPathsEnv = pkgs.buildEnv {
name = "netdata-ndsudo-env";
@ -55,7 +69,8 @@ let
pathsToLink = [ "/bin" ];
};
in {
in
{
options = {
services.netdata = {
enable = lib.mkEnableOption "netdata";
@ -104,7 +119,7 @@ in {
};
extraPackages = lib.mkOption {
type = lib.types.functionTo (lib.types.listOf lib.types.package);
default = ps: [];
default = ps: [ ];
defaultText = lib.literalExpression "ps: []";
example = lib.literalExpression ''
ps: [
@ -161,7 +176,7 @@ in {
config = lib.mkOption {
type = lib.types.attrsOf lib.types.attrs;
default = {};
default = { };
description = "netdata.conf configuration as nix attributes. cannot be combined with configText.";
example = lib.literalExpression ''
global = {
@ -174,7 +189,7 @@ in {
configDir = lib.mkOption {
type = lib.types.attrsOf lib.types.path;
default = {};
default = { };
description = ''
Complete netdata config directory except netdata.conf.
The default configuration is merged with changes
@ -229,11 +244,12 @@ in {
};
config = lib.mkIf cfg.enable {
assertions =
[ { assertion = cfg.config != {} -> cfg.configText == null ;
message = "Cannot specify both config and configText";
}
];
assertions = [
{
assertion = cfg.config != { } -> cfg.configText == null;
message = "Cannot specify both config and configText";
}
];
# Includes a set of recommended Python plugins in exchange of imperfect disk consumption.
services.netdata.python.extraPackages = lib.mkIf cfg.python.recommendedPythonPackages (ps: [
@ -245,11 +261,12 @@ in {
ps.netdata-pandas
]);
services.netdata.configDir.".opt-out-from-anonymous-statistics" = lib.mkIf (!cfg.enableAnalyticsReporting) (pkgs.writeText ".opt-out-from-anonymous-statistics" "");
services.netdata.configDir.".opt-out-from-anonymous-statistics" = lib.mkIf (
!cfg.enableAnalyticsReporting
) (pkgs.writeText ".opt-out-from-anonymous-statistics" "");
environment.etc."netdata/netdata.conf".source = configFile;
environment.etc."netdata/conf.d".source = configDirectory;
systemd.tmpfiles.settings = lib.mkIf cfg.package.withNdsudo {
"95-netdata-ndsudo" = {
"/var/lib/netdata/ndsudo" = {
@ -276,11 +293,15 @@ in {
systemd.services.netdata = {
description = "Real time performance monitoring";
after = [ "network.target" "suid-sgid-wrappers.service" ];
after = [
"network.target"
"suid-sgid-wrappers.service"
];
# No wrapper means no "useful" netdata.
requires = [ "suid-sgid-wrappers.service" ];
wantedBy = [ "multi-user.target" ];
path = (with pkgs; [
path =
(with pkgs; [
curl
gawk
iproute2
@ -292,181 +313,203 @@ in {
apcupsd # for charts.d
# TODO: firehol # for FireQoS -- this requires more NixOS module support.
util-linux # provides logger command; required for syslog health alarms
])
])
++ lib.optional cfg.python.enable (pkgs.python3.withPackages cfg.python.extraPackages)
++ lib.optional config.virtualisation.libvirtd.enable config.virtualisation.libvirtd.package
++ lib.optional config.virtualisation.docker.enable config.virtualisation.docker.package
++ lib.optionals config.virtualisation.podman.enable [ pkgs.jq config.virtualisation.podman.package ]
++ lib.optionals config.virtualisation.podman.enable [
pkgs.jq
config.virtualisation.podman.package
]
++ lib.optional config.boot.zfs.enabled config.boot.zfs.package;
environment = {
PYTHONPATH = "${cfg.package}/libexec/netdata/python.d/python_modules";
NETDATA_PIPENAME = "/run/netdata/ipc";
} // lib.optionalAttrs (!cfg.enableAnalyticsReporting) {
DO_NOT_TRACK = "1";
};
environment =
{
PYTHONPATH = "${cfg.package}/libexec/netdata/python.d/python_modules";
NETDATA_PIPENAME = "/run/netdata/ipc";
}
// lib.optionalAttrs (!cfg.enableAnalyticsReporting) {
DO_NOT_TRACK = "1";
};
restartTriggers = [
config.environment.etc."netdata/netdata.conf".source
config.environment.etc."netdata/conf.d".source
];
serviceConfig = {
ExecStart = "${cfg.package}/bin/netdata -P /run/netdata/netdata.pid -D -c /etc/netdata/netdata.conf";
ExecReload = "${pkgs.util-linux}/bin/kill -s HUP -s USR1 -s USR2 $MAINPID";
ExecStartPost = pkgs.writeShellScript "wait-for-netdata-up" ''
while [ "$(${cfg.package}/bin/netdatacli ping)" != pong ]; do sleep 0.5; done
'';
serviceConfig =
{
ExecStart = "${cfg.package}/bin/netdata -P /run/netdata/netdata.pid -D -c /etc/netdata/netdata.conf";
ExecReload = "${pkgs.util-linux}/bin/kill -s HUP -s USR1 -s USR2 $MAINPID";
ExecStartPost = pkgs.writeShellScript "wait-for-netdata-up" ''
while [ "$(${cfg.package}/bin/netdatacli ping)" != pong ]; do sleep 0.5; done
'';
TimeoutStopSec = cfg.deadlineBeforeStopSec;
Restart = "on-failure";
# User and group
User = cfg.user;
Group = cfg.group;
# Performance
LimitNOFILE = "30000";
# Runtime directory and mode
RuntimeDirectory = "netdata";
RuntimeDirectoryMode = "0750";
# State directory and mode
StateDirectory = "netdata";
StateDirectoryMode = "0750";
# Cache directory and mode
CacheDirectory = "netdata";
CacheDirectoryMode = "0750";
# Logs directory and mode
LogsDirectory = "netdata";
LogsDirectoryMode = "0750";
# Configuration directory and mode
ConfigurationDirectory = "netdata";
ConfigurationDirectoryMode = "0755";
# AmbientCapabilities
AmbientCapabilities = lib.optional isThereAnyWireGuardTunnels "CAP_NET_ADMIN";
# Capabilities
CapabilityBoundingSet = [
"CAP_DAC_OVERRIDE" # is required for freeipmi and slabinfo plugins
"CAP_DAC_READ_SEARCH" # is required for apps and systemd-journal plugin
"CAP_FOWNER" # is required for freeipmi plugin
"CAP_SETPCAP" # is required for apps, perf and slabinfo plugins
"CAP_SYS_ADMIN" # is required for perf plugin
"CAP_SYS_PTRACE" # is required for apps plugin
"CAP_SYS_RESOURCE" # is required for ebpf plugin
"CAP_NET_RAW" # is required for fping app
"CAP_SYS_CHROOT" # is required for cgroups plugin
"CAP_SETUID" # is required for cgroups and cgroups-network plugins
"CAP_SYSLOG" # is required for systemd-journal plugin
] ++ lib.optional isThereAnyWireGuardTunnels "CAP_NET_ADMIN";
# Sandboxing
ProtectSystem = "full";
ProtectHome = "read-only";
PrivateTmp = true;
ProtectControlGroups = true;
PrivateMounts = true;
} // (lib.optionalAttrs (cfg.claimTokenFile != null) {
LoadCredential = [
"netdata_claim_token:${cfg.claimTokenFile}"
];
TimeoutStopSec = cfg.deadlineBeforeStopSec;
Restart = "on-failure";
# User and group
User = cfg.user;
Group = cfg.group;
# Performance
LimitNOFILE = "30000";
# Runtime directory and mode
RuntimeDirectory = "netdata";
RuntimeDirectoryMode = "0750";
# State directory and mode
StateDirectory = "netdata";
StateDirectoryMode = "0750";
# Cache directory and mode
CacheDirectory = "netdata";
CacheDirectoryMode = "0750";
# Logs directory and mode
LogsDirectory = "netdata";
LogsDirectoryMode = "0750";
# Configuration directory and mode
ConfigurationDirectory = "netdata";
ConfigurationDirectoryMode = "0755";
# AmbientCapabilities
AmbientCapabilities = lib.optional isThereAnyWireGuardTunnels "CAP_NET_ADMIN";
# Capabilities
CapabilityBoundingSet = [
"CAP_DAC_OVERRIDE" # is required for freeipmi and slabinfo plugins
"CAP_DAC_READ_SEARCH" # is required for apps and systemd-journal plugin
"CAP_FOWNER" # is required for freeipmi plugin
"CAP_SETPCAP" # is required for apps, perf and slabinfo plugins
"CAP_SYS_ADMIN" # is required for perf plugin
"CAP_SYS_PTRACE" # is required for apps plugin
"CAP_SYS_RESOURCE" # is required for ebpf plugin
"CAP_NET_RAW" # is required for fping app
"CAP_SYS_CHROOT" # is required for cgroups plugin
"CAP_SETUID" # is required for cgroups and cgroups-network plugins
"CAP_SYSLOG" # is required for systemd-journal plugin
] ++ lib.optional isThereAnyWireGuardTunnels "CAP_NET_ADMIN";
# Sandboxing
ProtectSystem = "full";
ProtectHome = "read-only";
PrivateTmp = true;
ProtectControlGroups = true;
PrivateMounts = true;
}
// (lib.optionalAttrs (cfg.claimTokenFile != null) {
LoadCredential = [
"netdata_claim_token:${cfg.claimTokenFile}"
];
ExecStartPre = pkgs.writeShellScript "netdata-claim" ''
set -euo pipefail
ExecStartPre = pkgs.writeShellScript "netdata-claim" ''
set -euo pipefail
if [[ -f /var/lib/netdata/cloud.d/claimed_id ]]; then
# Already registered
exit
fi
if [[ -f /var/lib/netdata/cloud.d/claimed_id ]]; then
# Already registered
exit
fi
exec ${cfg.package}/bin/netdata-claim.sh \
-token="$(< "$CREDENTIALS_DIRECTORY/netdata_claim_token")" \
-url=https://app.netdata.cloud \
-daemon-not-running
'';
});
exec ${cfg.package}/bin/netdata-claim.sh \
-token="$(< "$CREDENTIALS_DIRECTORY/netdata_claim_token")" \
-url=https://app.netdata.cloud \
-daemon-not-running
'';
});
};
systemd.enableCgroupAccounting = true;
security.wrappers = {
"apps.plugin" = {
source = "${cfg.package}/libexec/netdata/plugins.d/apps.plugin.org";
capabilities = "cap_dac_read_search,cap_sys_ptrace+ep";
owner = cfg.user;
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
};
security.wrappers =
{
"apps.plugin" = {
source = "${cfg.package}/libexec/netdata/plugins.d/apps.plugin.org";
capabilities = "cap_dac_read_search,cap_sys_ptrace+ep";
owner = cfg.user;
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
};
"debugfs.plugin" = {
source = "${cfg.package}/libexec/netdata/plugins.d/debugfs.plugin.org";
capabilities = "cap_dac_read_search+ep";
owner = cfg.user;
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
};
"debugfs.plugin" = {
source = "${cfg.package}/libexec/netdata/plugins.d/debugfs.plugin.org";
capabilities = "cap_dac_read_search+ep";
owner = cfg.user;
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
};
"cgroup-network" = {
source = "${cfg.package}/libexec/netdata/plugins.d/cgroup-network.org";
capabilities = "cap_setuid+ep";
owner = cfg.user;
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
};
"cgroup-network" = {
source = "${cfg.package}/libexec/netdata/plugins.d/cgroup-network.org";
capabilities = "cap_setuid+ep";
owner = cfg.user;
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
};
"perf.plugin" = {
source = "${cfg.package}/libexec/netdata/plugins.d/perf.plugin.org";
capabilities = "cap_sys_admin+ep";
owner = cfg.user;
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
};
"perf.plugin" = {
source = "${cfg.package}/libexec/netdata/plugins.d/perf.plugin.org";
capabilities = "cap_sys_admin+ep";
owner = cfg.user;
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
};
"systemd-journal.plugin" = {
source = "${cfg.package}/libexec/netdata/plugins.d/systemd-journal.plugin.org";
capabilities = "cap_dac_read_search,cap_syslog+ep";
owner = cfg.user;
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
};
"systemd-journal.plugin" = {
source = "${cfg.package}/libexec/netdata/plugins.d/systemd-journal.plugin.org";
capabilities = "cap_dac_read_search,cap_syslog+ep";
owner = cfg.user;
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
};
"slabinfo.plugin" = {
source = "${cfg.package}/libexec/netdata/plugins.d/slabinfo.plugin.org";
capabilities = "cap_dac_override+ep";
owner = cfg.user;
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
};
"slabinfo.plugin" = {
source = "${cfg.package}/libexec/netdata/plugins.d/slabinfo.plugin.org";
capabilities = "cap_dac_override+ep";
owner = cfg.user;
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
};
} // lib.optionalAttrs (cfg.package.withIpmi) {
"freeipmi.plugin" = {
source = "${cfg.package}/libexec/netdata/plugins.d/freeipmi.plugin.org";
capabilities = "cap_dac_override,cap_fowner+ep";
owner = cfg.user;
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
}
// lib.optionalAttrs (cfg.package.withIpmi) {
"freeipmi.plugin" = {
source = "${cfg.package}/libexec/netdata/plugins.d/freeipmi.plugin.org";
capabilities = "cap_dac_override,cap_fowner+ep";
owner = cfg.user;
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
};
}
// lib.optionalAttrs (cfg.package.withNetworkViewer) {
"network-viewer.plugin" = {
source = "${cfg.package}/libexec/netdata/plugins.d/network-viewer.plugin.org";
capabilities = "cap_sys_admin,cap_dac_read_search,cap_sys_ptrace+ep";
owner = cfg.user;
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
};
}
// lib.optionalAttrs (cfg.package.withNdsudo) {
"ndsudo" = {
source = "${cfg.package}/libexec/netdata/plugins.d/ndsudo.org";
setuid = true;
owner = "root";
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
};
};
} // lib.optionalAttrs (cfg.package.withNetworkViewer) {
"network-viewer.plugin" = {
source = "${cfg.package}/libexec/netdata/plugins.d/network-viewer.plugin.org";
capabilities = "cap_sys_admin,cap_dac_read_search,cap_sys_ptrace+ep";
owner = cfg.user;
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
};
} // lib.optionalAttrs (cfg.package.withNdsudo) {
"ndsudo" = {
source = "${cfg.package}/libexec/netdata/plugins.d/ndsudo.org";
setuid = true;
owner = "root";
group = cfg.group;
permissions = "u+rx,g+x,o-rwx";
};
};
security.pam.loginLimits = [
{ domain = "netdata"; type = "soft"; item = "nofile"; value = "10000"; }
{ domain = "netdata"; type = "hard"; item = "nofile"; value = "30000"; }
{
domain = "netdata";
type = "soft";
item = "nofile";
value = "10000";
}
{
domain = "netdata";
type = "hard";
item = "nofile";
value = "30000";
}
];
users.users = lib.optionalAttrs (cfg.user == defaultUser) {
${defaultUser} = {
group = defaultUser;
isSystemUser = true;
extraGroups = lib.optional config.virtualisation.docker.enable "docker"
extraGroups =
lib.optional config.virtualisation.docker.enable "docker"
++ lib.optional config.virtualisation.podman.enable "podman";
};
};

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.prometheus.alertmanagerIrcRelay;
@ -13,7 +18,7 @@ in
extraFlags = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [];
default = [ ];
description = "Extra command line options to pass to alertmanager-irc-relay.";
};
@ -79,7 +84,10 @@ in
ProtectKernelLogs = true;
ProtectControlGroups = true;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
];
RestrictRealtime = true;
RestrictSUIDSGID = true;

View file

@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
with lib;
@ -6,8 +11,7 @@ let
yaml = pkgs.formats.yaml { };
cfg = config.services.prometheus;
checkConfigEnabled =
(lib.isBool cfg.checkConfig && cfg.checkConfig)
|| cfg.checkConfig == "syntax-only";
(lib.isBool cfg.checkConfig && cfg.checkConfig) || cfg.checkConfig == "syntax-only";
workingDir = "/var/lib/" + cfg.stateDir;
@ -19,7 +23,16 @@ let
'';
reload = pkgs.writeShellScriptBin "reload-prometheus" ''
PATH="${makeBinPath (with pkgs; [ systemd coreutils gnugrep ])}"
PATH="${
makeBinPath (
with pkgs;
[
systemd
coreutils
gnugrep
]
)
}"
cursor=$(journalctl --show-cursor -n0 | grep -oP "cursor: \K.*")
kill -HUP $MAINPID
journalctl -u prometheus.service --after-cursor="$cursor" -f \
@ -27,15 +40,20 @@ let
'';
# a wrapper that verifies that the configuration is valid
promtoolCheck = what: name: file:
promtoolCheck =
what: name: file:
if checkConfigEnabled then
pkgs.runCommand "${name}-${replaceStrings [" "] [""] what}-checked" {
preferLocalBuild = true;
nativeBuildInputs = [ cfg.package.cli ];
} ''
ln -s ${file} $out
promtool ${what} $out
'' else file;
pkgs.runCommand "${name}-${replaceStrings [ " " ] [ "" ] what}-checked"
{
preferLocalBuild = true;
nativeBuildInputs = [ cfg.package.cli ];
}
''
ln -s ${file} $out
promtool ${what} $out
''
else
file;
generatedPrometheusYml = yaml.generate "prometheus.yml" promConfig;
@ -45,9 +63,14 @@ let
scrape_configs = filterValidPrometheus cfg.scrapeConfigs;
remote_write = filterValidPrometheus cfg.remoteWrite;
remote_read = filterValidPrometheus cfg.remoteRead;
rule_files = optionals (!(cfg.enableAgentMode)) (map (promtoolCheck "check rules" "rules") (cfg.ruleFiles ++ [
(pkgs.writeText "prometheus.rules" (concatStringsSep "\n" cfg.rules))
]));
rule_files = optionals (!(cfg.enableAgentMode)) (
map (promtoolCheck "check rules" "rules") (
cfg.ruleFiles
++ [
(pkgs.writeText "prometheus.rules" (concatStringsSep "\n" cfg.rules))
]
)
);
alerting = {
inherit (cfg) alertmanagers;
};
@ -58,106 +81,128 @@ let
yml =
if cfg.configText != null then
pkgs.writeText "prometheus.yml" cfg.configText
else generatedPrometheusYml;
else
generatedPrometheusYml;
in
promtoolCheck "check config ${lib.optionalString (cfg.checkConfig == "syntax-only") "--syntax-only"}" "prometheus.yml" yml;
promtoolCheck "check config ${
lib.optionalString (cfg.checkConfig == "syntax-only") "--syntax-only"
}" "prometheus.yml" yml;
cmdlineArgs = cfg.extraFlags ++ [
"--config.file=${
if cfg.enableReload
then "/etc/prometheus/prometheus.yaml"
else prometheusYml
}"
"--web.listen-address=${cfg.listenAddress}:${builtins.toString cfg.port}"
] ++ (
if (cfg.enableAgentMode) then [
"--enable-feature=agent"
] else [
"--alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity }"
"--storage.tsdb.path=${workingDir}/data/"
])
cmdlineArgs =
cfg.extraFlags
++ [
"--config.file=${if cfg.enableReload then "/etc/prometheus/prometheus.yaml" else prometheusYml}"
"--web.listen-address=${cfg.listenAddress}:${builtins.toString cfg.port}"
]
++ (
if (cfg.enableAgentMode) then
[
"--enable-feature=agent"
]
else
[
"--alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity}"
"--storage.tsdb.path=${workingDir}/data/"
]
)
++ optional (cfg.webExternalUrl != null) "--web.external-url=${cfg.webExternalUrl}"
++ optional (cfg.retentionTime != null) "--storage.tsdb.retention.time=${cfg.retentionTime}"
++ optional (cfg.webConfigFile != null) "--web.config.file=${cfg.webConfigFile}";
filterValidPrometheus = filterAttrsListRecursive (n: v: !(n == "_module" || v == null));
filterAttrsListRecursive = pred: x:
filterAttrsListRecursive =
pred: x:
if isAttrs x then
listToAttrs
(
concatMap
(name:
let v = x.${name}; in
if pred name v then [
(nameValuePair name (filterAttrsListRecursive pred v))
] else [ ]
)
(attrNames x)
)
listToAttrs (
concatMap (
name:
let
v = x.${name};
in
if pred name v then
[
(nameValuePair name (filterAttrsListRecursive pred v))
]
else
[ ]
) (attrNames x)
)
else if isList x then
map (filterAttrsListRecursive pred) x
else x;
else
x;
#
# Config types: helper functions
#
mkDefOpt = type: defaultStr: description: mkOpt type (description + ''
mkDefOpt =
type: defaultStr: description:
mkOpt type (
description
+ ''
Defaults to ````${defaultStr}```` in prometheus
when set to `null`.
'');
Defaults to ````${defaultStr}```` in prometheus
when set to `null`.
''
);
mkOpt = type: description: mkOption {
type = types.nullOr type;
default = null;
description = description;
};
mkOpt =
type: description:
mkOption {
type = types.nullOr type;
default = null;
description = description;
};
mkSdConfigModule = extraOptions: types.submodule {
options = {
basic_auth = mkOpt promTypes.basic_auth ''
Optional HTTP basic authentication information.
'';
mkSdConfigModule =
extraOptions:
types.submodule {
options = {
basic_auth = mkOpt promTypes.basic_auth ''
Optional HTTP basic authentication information.
'';
authorization = mkOpt
(types.submodule {
options = {
type = mkDefOpt types.str "Bearer" ''
Sets the authentication type.
authorization =
mkOpt
(types.submodule {
options = {
type = mkDefOpt types.str "Bearer" ''
Sets the authentication type.
'';
credentials = mkOpt types.str ''
Sets the credentials. It is mutually exclusive with `credentials_file`.
'';
credentials_file = mkOpt types.str ''
Sets the credentials to the credentials read from the configured file.
It is mutually exclusive with `credentials`.
'';
};
})
''
Optional `Authorization` header configuration.
'';
credentials = mkOpt types.str ''
Sets the credentials. It is mutually exclusive with `credentials_file`.
'';
oauth2 = mkOpt promtypes.oauth2 ''
Optional OAuth 2.0 configuration.
Cannot be used at the same time as basic_auth or authorization.
'';
credentials_file = mkOpt types.str ''
Sets the credentials to the credentials read from the configured file.
It is mutually exclusive with `credentials`.
'';
};
}) ''
Optional `Authorization` header configuration.
'';
proxy_url = mkOpt types.str ''
Optional proxy URL.
'';
oauth2 = mkOpt promtypes.oauth2 ''
Optional OAuth 2.0 configuration.
Cannot be used at the same time as basic_auth or authorization.
'';
follow_redirects = mkDefOpt types.bool "true" ''
Configure whether HTTP requests follow HTTP 3xx redirects.
'';
proxy_url = mkOpt types.str ''
Optional proxy URL.
'';
follow_redirects = mkDefOpt types.bool "true" ''
Configure whether HTTP requests follow HTTP 3xx redirects.
'';
tls_config = mkOpt promTypes.tls_config ''
TLS configuration.
'';
} // extraOptions;
};
tls_config = mkOpt promTypes.tls_config ''
TLS configuration.
'';
} // extraOptions;
};
#
# Config types: general
@ -345,9 +390,16 @@ let
by the target will be ignored.
'';
scheme = mkDefOpt (types.enum [ "http" "https" ]) "http" ''
The URL scheme with which to fetch metrics from targets.
'';
scheme =
mkDefOpt
(types.enum [
"http"
"https"
])
"http"
''
The URL scheme with which to fetch metrics from targets.
'';
params = mkOpt (types.attrsOf (types.listOf types.str)) ''
Optional HTTP URL parameters.
@ -544,10 +596,17 @@ let
The Azure environment.
'';
authentication_method = mkDefOpt (types.enum [ "OAuth" "ManagedIdentity" ]) "OAuth" ''
The authentication method, either OAuth or ManagedIdentity.
See <https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview>
'';
authentication_method =
mkDefOpt
(types.enum [
"OAuth"
"ManagedIdentity"
])
"OAuth"
''
The authentication method, either OAuth or ManagedIdentity.
See <https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview>
'';
subscription_id = mkOption {
type = types.str;
@ -653,46 +712,55 @@ let
'';
};
mkDockerSdConfigModule = extraOptions: mkSdConfigModule ({
host = mkOption {
type = types.str;
description = ''
Address of the Docker daemon.
'';
};
port = mkDefOpt types.port "80" ''
The port to scrape metrics from, when `role` is nodes, and for discovered
tasks and services that don't have published ports.
'';
filters = mkOpt
(types.listOf (types.submodule {
options = {
name = mkOption {
type = types.str;
description = ''
Name of the filter. The available filters are listed in the upstream documentation:
Services: <https://docs.docker.com/engine/api/v1.40/#operation/ServiceList>
Tasks: <https://docs.docker.com/engine/api/v1.40/#operation/TaskList>
Nodes: <https://docs.docker.com/engine/api/v1.40/#operation/NodeList>
'';
};
values = mkOption {
type = types.str;
description = ''
Value for the filter.
'';
};
mkDockerSdConfigModule =
extraOptions:
mkSdConfigModule (
{
host = mkOption {
type = types.str;
description = ''
Address of the Docker daemon.
'';
};
})) ''
Optional filters to limit the discovery process to a subset of available resources.
'';
refresh_interval = mkDefOpt types.str "60s" ''
The time after which the containers are refreshed.
'';
} // extraOptions);
port = mkDefOpt types.port "80" ''
The port to scrape metrics from, when `role` is nodes, and for discovered
tasks and services that don't have published ports.
'';
filters =
mkOpt
(types.listOf (
types.submodule {
options = {
name = mkOption {
type = types.str;
description = ''
Name of the filter. The available filters are listed in the upstream documentation:
Services: <https://docs.docker.com/engine/api/v1.40/#operation/ServiceList>
Tasks: <https://docs.docker.com/engine/api/v1.40/#operation/TaskList>
Nodes: <https://docs.docker.com/engine/api/v1.40/#operation/NodeList>
'';
};
values = mkOption {
type = types.str;
description = ''
Value for the filter.
'';
};
};
}
))
''
Optional filters to limit the discovery process to a subset of available resources.
'';
refresh_interval = mkDefOpt types.str "60s" ''
The time after which the containers are refreshed.
'';
}
// extraOptions
);
promTypes.docker_sd_config = mkDockerSdConfigModule {
host_networking_host = mkDefOpt types.str "localhost" ''
@ -702,7 +770,11 @@ let
promTypes.dockerswarm_sd_config = mkDockerSdConfigModule {
role = mkOption {
type = types.enum [ "services" "tasks" "nodes" ];
type = types.enum [
"services"
"tasks"
"nodes"
];
description = ''
Role of the targets to retrieve. Must be `services`, `tasks`, or `nodes`.
'';
@ -718,9 +790,19 @@ let
'';
};
type = mkDefOpt (types.enum [ "SRV" "A" "AAAA" "MX" "NS" ]) "SRV" ''
The type of DNS query to perform.
'';
type =
mkDefOpt
(types.enum [
"SRV"
"A"
"AAAA"
"MX"
"NS"
])
"SRV"
''
The type of DNS query to perform.
'';
port = mkOpt types.port ''
The port number used if the query type is not SRV.
@ -772,28 +854,32 @@ let
rule.
'';
filters = mkOpt
(types.listOf (types.submodule {
options = {
name = mkOption {
type = types.str;
description = ''
See [this list](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html)
for the available filters.
'';
};
filters =
mkOpt
(types.listOf (
types.submodule {
options = {
name = mkOption {
type = types.str;
description = ''
See [this list](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html)
for the available filters.
'';
};
values = mkOption {
type = types.listOf types.str;
default = [ ];
description = ''
Value of the filter.
'';
};
};
})) ''
Filters can be used optionally to filter the instance list by other criteria.
'';
values = mkOption {
type = types.listOf types.str;
default = [ ];
description = ''
Value of the filter.
'';
};
};
}
))
''
Filters can be used optionally to filter the instance list by other criteria.
'';
};
};
@ -868,7 +954,10 @@ let
promTypes.hetzner_sd_config = mkSdConfigModule {
role = mkOption {
type = types.enum [ "robot" "hcloud" ];
type = types.enum [
"robot"
"hcloud"
];
description = ''
The Hetzner role of entities that should be discovered.
One of `robot` or `hcloud`.
@ -924,7 +1013,13 @@ let
'';
role = mkOption {
type = types.enum [ "endpoints" "service" "pod" "node" "ingress" ];
type = types.enum [
"endpoints"
"service"
"pod"
"node"
"ingress"
];
description = ''
The Kubernetes role of entities that should be discovered.
One of endpoints, service, pod, node, or ingress.
@ -936,22 +1031,22 @@ let
Note that api_server and kube_config are mutually exclusive.
'';
namespaces = mkOpt
(
types.submodule {
namespaces =
mkOpt
(types.submodule {
options = {
names = mkOpt (types.listOf types.str) ''
Namespace name.
'';
};
}
) ''
Optional namespace discovery. If omitted, all namespaces are used.
'';
})
''
Optional namespace discovery. If omitted, all namespaces are used.
'';
selectors = mkOpt
(
types.listOf (
selectors =
mkOpt
(types.listOf (
types.submodule {
options = {
role = mkOption {
@ -970,21 +1065,21 @@ let
'';
};
}
)
) ''
Optional label and field selectors to limit the discovery process to a subset of available resources.
See <https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/>
and <https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/> to learn more about the possible
filters that can be used. Endpoints role supports pod, service and endpoints selectors, other roles
only support selectors matching the role itself (e.g. node role can only contain node selectors).
))
''
Optional label and field selectors to limit the discovery process to a subset of available resources.
See <https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/>
and <https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/> to learn more about the possible
filters that can be used. Endpoints role supports pod, service and endpoints selectors, other roles
only support selectors matching the role itself (e.g. node role can only contain node selectors).
Note: When making decision about using field/label selector make sure that this
is the best approach - it will prevent Prometheus from reusing single list/watch
for all scrape configs. This might result in a bigger load on the Kubernetes API,
because per each selector combination there will be additional LIST/WATCH. On the other hand,
if you just want to monitor small subset of pods in large cluster it's recommended to use selectors.
Decision, if selectors should be used or not depends on the particular situation.
'';
Note: When making decision about using field/label selector make sure that this
is the best approach - it will prevent Prometheus from reusing single list/watch
for all scrape configs. This might result in a bigger load on the Kubernetes API,
because per each selector combination there will be additional LIST/WATCH. On the other hand,
if you just want to monitor small subset of pods in large cluster it's recommended to use selectors.
Decision, if selectors should be used or not depends on the particular situation.
'';
};
promTypes.kuma_sd_config = mkSdConfigModule {
@ -1188,9 +1283,17 @@ let
instead be specified in the relabeling rule.
'';
availability = mkDefOpt (types.enum [ "public" "admin" "internal" ]) "public" ''
The availability of the endpoint to connect to. Must be one of public, admin or internal.
'';
availability =
mkDefOpt
(types.enum [
"public"
"admin"
"internal"
])
"public"
''
The availability of the endpoint to connect to. Must be one of public, admin or internal.
'';
tls_config = mkOpt promTypes.tls_config ''
TLS configuration.
@ -1260,7 +1363,10 @@ let
};
role = mkOption {
type = types.enum [ "instance" "baremetal" ];
type = types.enum [
"instance"
"baremetal"
];
description = ''
Role of the targets to retrieve. Must be `instance` or `baremetal`.
'';
@ -1316,11 +1422,18 @@ let
'';
};
role = mkDefOpt (types.enum [ "container" "cn" ]) "container" ''
The type of targets to discover, can be set to:
- "container" to discover virtual machines (SmartOS zones, lx/KVM/bhyve branded zones) running on Triton
- "cn" to discover compute nodes (servers/global zones) making up the Triton infrastructure
'';
role =
mkDefOpt
(types.enum [
"container"
"cn"
])
"container"
''
The type of targets to discover, can be set to:
- "container" to discover virtual machines (SmartOS zones, lx/KVM/bhyve branded zones) running on Triton
- "cn" to discover compute nodes (servers/global zones) making up the Triton infrastructure
'';
dns_suffix = mkOption {
type = types.str;
@ -1448,9 +1561,22 @@ let
'';
action =
mkDefOpt (types.enum [ "replace" "lowercase" "uppercase" "keep" "drop" "hashmod" "labelmap" "labeldrop" "labelkeep" ]) "replace" ''
Action to perform based on regex matching.
'';
mkDefOpt
(types.enum [
"replace"
"lowercase"
"uppercase"
"keep"
"drop"
"hashmod"
"labelmap"
"labeldrop"
"labelkeep"
])
"replace"
''
Action to perform based on regex matching.
'';
};
};
@ -1502,52 +1628,56 @@ let
Configures the remote write request's TLS settings.
'';
proxy_url = mkOpt types.str "Optional Proxy URL.";
queue_config = mkOpt
(types.submodule {
options = {
capacity = mkOpt types.int ''
Number of samples to buffer per shard before we block reading of more
samples from the WAL. It is recommended to have enough capacity in each
shard to buffer several requests to keep throughput up while processing
occasional slow remote requests.
'';
max_shards = mkOpt types.int ''
Maximum number of shards, i.e. amount of concurrency.
'';
min_shards = mkOpt types.int ''
Minimum number of shards, i.e. amount of concurrency.
'';
max_samples_per_send = mkOpt types.int ''
Maximum number of samples per send.
'';
batch_send_deadline = mkOpt types.str ''
Maximum time a sample will wait in buffer.
'';
min_backoff = mkOpt types.str ''
Initial retry delay. Gets doubled for every retry.
'';
max_backoff = mkOpt types.str ''
Maximum retry delay.
'';
};
}) ''
Configures the queue used to write to remote storage.
'';
metadata_config = mkOpt
(types.submodule {
options = {
send = mkOpt types.bool ''
Whether metric metadata is sent to remote storage or not.
'';
send_interval = mkOpt types.str ''
How frequently metric metadata is sent to remote storage.
'';
};
}) ''
Configures the sending of series metadata to remote storage.
Metadata configuration is subject to change at any point
or be removed in future releases.
'';
queue_config =
mkOpt
(types.submodule {
options = {
capacity = mkOpt types.int ''
Number of samples to buffer per shard before we block reading of more
samples from the WAL. It is recommended to have enough capacity in each
shard to buffer several requests to keep throughput up while processing
occasional slow remote requests.
'';
max_shards = mkOpt types.int ''
Maximum number of shards, i.e. amount of concurrency.
'';
min_shards = mkOpt types.int ''
Minimum number of shards, i.e. amount of concurrency.
'';
max_samples_per_send = mkOpt types.int ''
Maximum number of samples per send.
'';
batch_send_deadline = mkOpt types.str ''
Maximum time a sample will wait in buffer.
'';
min_backoff = mkOpt types.str ''
Initial retry delay. Gets doubled for every retry.
'';
max_backoff = mkOpt types.str ''
Maximum retry delay.
'';
};
})
''
Configures the queue used to write to remote storage.
'';
metadata_config =
mkOpt
(types.submodule {
options = {
send = mkOpt types.bool ''
Whether metric metadata is sent to remote storage or not.
'';
send_interval = mkOpt types.str ''
How frequently metric metadata is sent to remote storage.
'';
};
})
''
Configures the sending of series metadata to remote storage.
Metadata configuration is subject to change at any point
or be removed in future releases.
'';
};
};
@ -1606,9 +1736,13 @@ in
imports = [
(mkRenamedOptionModule [ "services" "prometheus2" ] [ "services" "prometheus" ])
(mkRemovedOptionModule [ "services" "prometheus" "environmentFile" ]
"It has been removed since it was causing issues (https://github.com/NixOS/nixpkgs/issues/126083) and Prometheus now has native support for secret files, i.e. `basic_auth.password_file` and `authorization.credentials_file`.")
(mkRemovedOptionModule [ "services" "prometheus" "alertmanagerTimeout" ]
"Deprecated upstream and no longer had any effect")
"It has been removed since it was causing issues (https://github.com/NixOS/nixpkgs/issues/126083) and Prometheus now has native support for secret files, i.e. `basic_auth.password_file` and `authorization.credentials_file`."
)
(mkRemovedOptionModule [
"services"
"prometheus"
"alertmanagerTimeout"
] "Deprecated upstream and no longer had any effect")
];
options.services.prometheus = {
@ -1833,9 +1967,9 @@ in
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
ExecStart = "${cfg.package}/bin/prometheus" +
optionalString (length cmdlineArgs != 0) (" \\\n " +
concatStringsSep " \\\n " cmdlineArgs);
ExecStart =
"${cfg.package}/bin/prometheus"
+ optionalString (length cmdlineArgs != 0) (" \\\n " + concatStringsSep " \\\n " cmdlineArgs);
ExecReload = mkIf cfg.enableReload "+${reload}/bin/reload-prometheus";
User = "prometheus";
Restart = "always";
@ -1863,12 +1997,19 @@ in
ProtectProc = "invisible";
ProtectSystem = "full";
RemoveIPC = true;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
"AF_UNIX"
];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = [ "@system-service" "~@privileged" ];
SystemCallFilter = [
"@system-service"
"~@privileged"
];
};
};
# prometheus-config-reload will activate after prometheus. However, what we

View file

@ -1,9 +1,34 @@
{ config, pkgs, lib, options, utils, ... }:
{
config,
pkgs,
lib,
options,
utils,
...
}:
let
inherit (lib) concatStrings foldl foldl' genAttrs literalExpression maintainers
mapAttrs mapAttrsToList mkDefault mkEnableOption mkIf mkMerge mkOption
optional types mkOptionDefault flip attrNames xor;
inherit (lib)
concatStrings
foldl
foldl'
genAttrs
literalExpression
maintainers
mapAttrs
mapAttrsToList
mkDefault
mkEnableOption
mkIf
mkMerge
mkOption
optional
types
mkOptionDefault
flip
attrNames
xor
;
cfg = config.services.prometheus.exporters;
@ -20,234 +45,289 @@ let
# systemd service must be provided by specifying either
# `serviceOpts.script` or `serviceOpts.serviceConfig.ExecStart`
exporterOpts = (genAttrs [
"apcupsd"
"artifactory"
"bind"
"bird"
"bitcoin"
"blackbox"
"borgmatic"
"buildkite-agent"
"chrony"
"collectd"
"deluge"
"dmarc"
"dnsmasq"
"dnssec"
"domain"
"dovecot"
"fastly"
"flow"
"fritz"
"fritzbox"
"frr"
"graphite"
"idrac"
"imap-mailstat"
"influxdb"
"ipmi"
"jitsi"
"json"
"junos-czerwonk"
"kea"
"keylight"
"klipper"
"knot"
"libvirt"
"lnd"
"mail"
"mikrotik"
"modemmanager"
"mongodb"
"mqtt"
"mysqld"
"nats"
"nextcloud"
"nginx"
"nginxlog"
"node"
"node-cert"
"nut"
"nvidia-gpu"
"pgbouncer"
"php-fpm"
"pihole"
"ping"
"postfix"
"postgres"
"process"
"pve"
"py-air-control"
"rasdaemon"
"redis"
"restic"
"rspamd"
"rtl_433"
"sabnzbd"
"scaphandre"
"script"
"shelly"
"smartctl"
"smokeping"
"snmp"
"sql"
"statsd"
"surfboard"
"systemd"
"unbound"
"unpoller"
"v2ray"
"varnish"
"wireguard"
"zfs"
]
(name:
import (./. + "/exporters/${name}.nix") { inherit config lib pkgs options utils; }
)) // (mapAttrs
(name: params:
import (./. + "/exporters/${params.name}.nix") { inherit config lib pkgs options utils; type = params.type ; })
exporterOpts =
(genAttrs
[
"apcupsd"
"artifactory"
"bind"
"bird"
"bitcoin"
"blackbox"
"borgmatic"
"buildkite-agent"
"chrony"
"collectd"
"deluge"
"dmarc"
"dnsmasq"
"dnssec"
"domain"
"dovecot"
"fastly"
"flow"
"fritz"
"fritzbox"
"frr"
"graphite"
"idrac"
"imap-mailstat"
"influxdb"
"ipmi"
"jitsi"
"json"
"junos-czerwonk"
"kea"
"keylight"
"klipper"
"knot"
"libvirt"
"lnd"
"mail"
"mikrotik"
"modemmanager"
"mongodb"
"mqtt"
"mysqld"
"nats"
"nextcloud"
"nginx"
"nginxlog"
"node"
"node-cert"
"nut"
"nvidia-gpu"
"pgbouncer"
"php-fpm"
"pihole"
"ping"
"postfix"
"postgres"
"process"
"pve"
"py-air-control"
"rasdaemon"
"redis"
"restic"
"rspamd"
"rtl_433"
"sabnzbd"
"scaphandre"
"script"
"shelly"
"smartctl"
"smokeping"
"snmp"
"sql"
"statsd"
"surfboard"
"systemd"
"unbound"
"unpoller"
"v2ray"
"varnish"
"wireguard"
"zfs"
]
(
name:
import (./. + "/exporters/${name}.nix") {
inherit
config
lib
pkgs
options
utils
;
}
)
)
// (mapAttrs
(
name: params:
import (./. + "/exporters/${params.name}.nix") {
inherit
config
lib
pkgs
options
utils
;
type = params.type;
}
)
{
exportarr-bazarr = {
name = "exportarr";
type = "bazarr";
};
exportarr-lidarr = {
name = "exportarr";
type = "lidarr";
};
exportarr-prowlarr = {
name = "exportarr";
type = "prowlarr";
};
exportarr-radarr = {
name = "exportarr";
type = "radarr";
};
exportarr-readarr = {
name = "exportarr";
type = "readarr";
};
exportarr-sonarr = {
name = "exportarr";
type = "sonarr";
};
}
);
mkExporterOpts = (
{ name, port }:
{
exportarr-bazarr = {
name = "exportarr";
type = "bazarr";
enable = mkEnableOption "the prometheus ${name} exporter";
port = mkOption {
type = types.port;
default = port;
description = ''
Port to listen on.
'';
};
exportarr-lidarr = {
name = "exportarr";
type = "lidarr";
listenAddress = mkOption {
type = types.str;
default = "0.0.0.0";
description = ''
Address to listen on.
'';
};
exportarr-prowlarr = {
name = "exportarr";
type = "prowlarr";
extraFlags = mkOption {
type = types.listOf types.str;
default = [ ];
description = ''
Extra commandline options to pass to the ${name} exporter.
'';
};
exportarr-radarr = {
name = "exportarr";
type = "radarr";
openFirewall = mkOption {
type = types.bool;
default = false;
description = ''
Open port in firewall for incoming connections.
'';
};
exportarr-readarr = {
name = "exportarr";
type = "readarr";
firewallFilter = mkOption {
type = types.nullOr types.str;
default = null;
example = literalExpression ''
"-i eth0 -p tcp -m tcp --dport ${toString port}"
'';
description = ''
Specify a filter for iptables to use when
{option}`services.prometheus.exporters.${name}.openFirewall`
is true. It is used as `ip46tables -I nixos-fw firewallFilter -j nixos-fw-accept`.
'';
};
exportarr-sonarr = {
name = "exportarr";
type = "sonarr";
firewallRules = mkOption {
type = types.nullOr types.lines;
default = null;
example = literalExpression ''
iifname "eth0" tcp dport ${toString port} counter accept
'';
description = ''
Specify rules for nftables to add to the input chain
when {option}`services.prometheus.exporters.${name}.openFirewall` is true.
'';
};
user = mkOption {
type = types.str;
default = "${name}-exporter";
description = ''
User name under which the ${name} exporter shall be run.
'';
};
group = mkOption {
type = types.str;
default = "${name}-exporter";
description = ''
Group under which the ${name} exporter shall be run.
'';
};
}
);
mkExporterOpts = ({ name, port }: {
enable = mkEnableOption "the prometheus ${name} exporter";
port = mkOption {
type = types.port;
default = port;
description = ''
Port to listen on.
'';
mkSubModule =
{
name,
port,
extraOpts,
imports,
}:
{
${name} = mkOption {
type = types.submodule [
{
inherit imports;
options = (
mkExporterOpts {
inherit name port;
}
// extraOpts
);
}
(
{ config, ... }:
mkIf config.openFirewall {
firewallFilter = mkDefault "-p tcp -m tcp --dport ${toString config.port}";
firewallRules = mkDefault ''tcp dport ${toString config.port} accept comment "${name}-exporter"'';
}
)
];
internal = true;
default = { };
};
};
listenAddress = mkOption {
type = types.str;
default = "0.0.0.0";
description = ''
Address to listen on.
'';
};
extraFlags = mkOption {
type = types.listOf types.str;
default = [];
description = ''
Extra commandline options to pass to the ${name} exporter.
'';
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = ''
Open port in firewall for incoming connections.
'';
};
firewallFilter = mkOption {
type = types.nullOr types.str;
default = null;
example = literalExpression ''
"-i eth0 -p tcp -m tcp --dport ${toString port}"
'';
description = ''
Specify a filter for iptables to use when
{option}`services.prometheus.exporters.${name}.openFirewall`
is true. It is used as `ip46tables -I nixos-fw firewallFilter -j nixos-fw-accept`.
'';
};
firewallRules = mkOption {
type = types.nullOr types.lines;
default = null;
example = literalExpression ''
iifname "eth0" tcp dport ${toString port} counter accept
'';
description = ''
Specify rules for nftables to add to the input chain
when {option}`services.prometheus.exporters.${name}.openFirewall` is true.
'';
};
user = mkOption {
type = types.str;
default = "${name}-exporter";
description = ''
User name under which the ${name} exporter shall be run.
'';
};
group = mkOption {
type = types.str;
default = "${name}-exporter";
description = ''
Group under which the ${name} exporter shall be run.
'';
};
});
mkSubModule = { name, port, extraOpts, imports }: {
${name} = mkOption {
type = types.submodule [{
inherit imports;
options = (mkExporterOpts {
inherit name port;
} // extraOpts);
} ({ config, ... }: mkIf config.openFirewall {
firewallFilter = mkDefault "-p tcp -m tcp --dport ${toString config.port}";
firewallRules = mkDefault ''tcp dport ${toString config.port} accept comment "${name}-exporter"'';
})];
internal = true;
default = {};
};
};
mkSubModules = (foldl' (a: b: a//b) {}
(mapAttrsToList (name: opts: mkSubModule {
inherit name;
inherit (opts) port;
extraOpts = opts.extraOpts or {};
imports = opts.imports or [];
}) exporterOpts)
mkSubModules = (
foldl' (a: b: a // b) { } (
mapAttrsToList (
name: opts:
mkSubModule {
inherit name;
inherit (opts) port;
extraOpts = opts.extraOpts or { };
imports = opts.imports or [ ];
}
) exporterOpts
)
);
mkExporterConf = { name, conf, serviceOpts }:
mkExporterConf =
{
name,
conf,
serviceOpts,
}:
let
enableDynamicUser = serviceOpts.serviceConfig.DynamicUser or true;
nftables = config.networking.nftables.enable;
in
mkIf conf.enable {
warnings = conf.warnings or [];
assertions = conf.assertions or [];
users.users."${name}-exporter" = (mkIf (conf.user == "${name}-exporter" && !enableDynamicUser) {
description = "Prometheus ${name} exporter service user";
isSystemUser = true;
inherit (conf) group;
});
warnings = conf.warnings or [ ];
assertions = conf.assertions or [ ];
users.users."${name}-exporter" = (
mkIf (conf.user == "${name}-exporter" && !enableDynamicUser) {
description = "Prometheus ${name} exporter service user";
isSystemUser = true;
inherit (conf) group;
}
);
users.groups = mkMerge [
(mkIf (conf.group == "${name}-exporter" && !enableDynamicUser) {
"${name}-exporter" = {};
"${name}-exporter" = { };
})
(mkIf (name == "smartctl") {
"smartctl-exporter-access" = {};
"smartctl-exporter-access" = { };
})
];
services.udev.extraRules = mkIf (name == "smartctl") ''
@ -258,39 +338,45 @@ let
"-m comment --comment ${name}-exporter -j nixos-fw-accept"
]);
networking.firewall.extraInputRules = mkIf (conf.openFirewall && nftables) conf.firewallRules;
systemd.services."prometheus-${name}-exporter" = mkMerge ([{
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig.Restart = mkDefault "always";
serviceConfig.PrivateTmp = mkDefault true;
serviceConfig.WorkingDirectory = mkDefault /tmp;
serviceConfig.DynamicUser = mkDefault enableDynamicUser;
serviceConfig.User = mkDefault conf.user;
serviceConfig.Group = conf.group;
# Hardening
serviceConfig.CapabilityBoundingSet = mkDefault [ "" ];
serviceConfig.DeviceAllow = [ "" ];
serviceConfig.LockPersonality = true;
serviceConfig.MemoryDenyWriteExecute = true;
serviceConfig.NoNewPrivileges = true;
serviceConfig.PrivateDevices = mkDefault true;
serviceConfig.ProtectClock = mkDefault true;
serviceConfig.ProtectControlGroups = true;
serviceConfig.ProtectHome = true;
serviceConfig.ProtectHostname = true;
serviceConfig.ProtectKernelLogs = true;
serviceConfig.ProtectKernelModules = true;
serviceConfig.ProtectKernelTunables = true;
serviceConfig.ProtectSystem = mkDefault "strict";
serviceConfig.RemoveIPC = true;
serviceConfig.RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
serviceConfig.RestrictNamespaces = true;
serviceConfig.RestrictRealtime = true;
serviceConfig.RestrictSUIDSGID = true;
serviceConfig.SystemCallArchitectures = "native";
serviceConfig.UMask = "0077";
} serviceOpts ]);
};
systemd.services."prometheus-${name}-exporter" = mkMerge ([
{
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig.Restart = mkDefault "always";
serviceConfig.PrivateTmp = mkDefault true;
serviceConfig.WorkingDirectory = mkDefault /tmp;
serviceConfig.DynamicUser = mkDefault enableDynamicUser;
serviceConfig.User = mkDefault conf.user;
serviceConfig.Group = conf.group;
# Hardening
serviceConfig.CapabilityBoundingSet = mkDefault [ "" ];
serviceConfig.DeviceAllow = [ "" ];
serviceConfig.LockPersonality = true;
serviceConfig.MemoryDenyWriteExecute = true;
serviceConfig.NoNewPrivileges = true;
serviceConfig.PrivateDevices = mkDefault true;
serviceConfig.ProtectClock = mkDefault true;
serviceConfig.ProtectControlGroups = true;
serviceConfig.ProtectHome = true;
serviceConfig.ProtectHostname = true;
serviceConfig.ProtectKernelLogs = true;
serviceConfig.ProtectKernelModules = true;
serviceConfig.ProtectKernelTunables = true;
serviceConfig.ProtectSystem = mkDefault "strict";
serviceConfig.RemoveIPC = true;
serviceConfig.RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
];
serviceConfig.RestrictNamespaces = true;
serviceConfig.RestrictRealtime = true;
serviceConfig.RestrictSUIDSGID = true;
serviceConfig.SystemCallArchitectures = "native";
serviceConfig.UMask = "0077";
}
serviceOpts
]);
};
in
{
@ -310,7 +396,7 @@ in
];
};
description = "Prometheus exporter configuration";
default = {};
default = { };
example = literalExpression ''
{
node = {
@ -322,147 +408,180 @@ in
'';
};
config = mkMerge ([{
assertions = [ {
assertion = cfg.ipmi.enable -> (cfg.ipmi.configFile != null) -> (
!(lib.hasPrefix "/tmp/" cfg.ipmi.configFile)
);
message = ''
Config file specified in `services.prometheus.exporters.ipmi.configFile' must
not reside within /tmp - it won't be visible to the systemd service.
'';
} {
assertion = cfg.ipmi.enable -> (cfg.ipmi.webConfigFile != null) -> (
!(lib.hasPrefix "/tmp/" cfg.ipmi.webConfigFile)
);
message = ''
Config file specified in `services.prometheus.exporters.ipmi.webConfigFile' must
not reside within /tmp - it won't be visible to the systemd service.
'';
} {
assertion =
cfg.restic.enable -> ((cfg.restic.repository == null) != (cfg.restic.repositoryFile == null));
message = ''
Please specify either 'services.prometheus.exporters.restic.repository'
or 'services.prometheus.exporters.restic.repositoryFile'.
'';
} {
assertion = cfg.snmp.enable -> (
(cfg.snmp.configurationPath == null) != (cfg.snmp.configuration == null)
);
message = ''
Please ensure you have either `services.prometheus.exporters.snmp.configuration'
or `services.prometheus.exporters.snmp.configurationPath' set!
'';
} {
assertion = cfg.mikrotik.enable -> (
(cfg.mikrotik.configFile == null) != (cfg.mikrotik.configuration == null)
);
message = ''
Please specify either `services.prometheus.exporters.mikrotik.configuration'
or `services.prometheus.exporters.mikrotik.configFile'.
'';
} {
assertion = cfg.mail.enable -> (
(cfg.mail.configFile == null) != (cfg.mail.configuration == null)
);
message = ''
Please specify either 'services.prometheus.exporters.mail.configuration'
or 'services.prometheus.exporters.mail.configFile'.
'';
} {
assertion = cfg.mysqld.runAsLocalSuperUser -> config.services.mysql.enable;
message = ''
The exporter is configured to run as 'services.mysql.user', but
'services.mysql.enable' is set to false.
'';
} {
assertion = cfg.nextcloud.enable -> (
(cfg.nextcloud.passwordFile == null) != (cfg.nextcloud.tokenFile == null)
);
message = ''
Please specify either 'services.prometheus.exporters.nextcloud.passwordFile' or
'services.prometheus.exporters.nextcloud.tokenFile'
'';
} {
assertion = cfg.sql.enable -> (
(cfg.sql.configFile == null) != (cfg.sql.configuration == null)
);
message = ''
Please specify either 'services.prometheus.exporters.sql.configuration' or
'services.prometheus.exporters.sql.configFile'
'';
} {
assertion = cfg.scaphandre.enable -> (pkgs.stdenv.targetPlatform.isx86_64 == true);
message = ''
Scaphandre only support x86_64 architectures.
'';
} {
assertion = cfg.scaphandre.enable -> ((lib.kernel.whenHelpers pkgs.linux.version).whenOlder "5.11" true).condition == false;
message = ''
Scaphandre requires a kernel version newer than '5.11', '${pkgs.linux.version}' given.
'';
} {
assertion = cfg.scaphandre.enable -> (builtins.elem "intel_rapl_common" config.boot.kernelModules);
message = ''
Scaphandre needs 'intel_rapl_common' kernel module to be enabled. Please add it in 'boot.kernelModules'.
'';
} {
assertion = cfg.idrac.enable -> (
(cfg.idrac.configurationPath == null) != (cfg.idrac.configuration == null)
);
message = ''
Please ensure you have either `services.prometheus.exporters.idrac.configuration'
or `services.prometheus.exporters.idrac.configurationPath' set!
'';
} {
assertion = cfg.deluge.enable -> (
(cfg.deluge.delugePassword == null) != (cfg.deluge.delugePasswordFile == null)
);
message = ''
Please ensure you have either `services.prometheus.exporters.deluge.delugePassword'
or `services.prometheus.exporters.deluge.delugePasswordFile' set!
'';
} {
assertion = cfg.pgbouncer.enable -> (
xor (cfg.pgbouncer.connectionEnvFile == null) (cfg.pgbouncer.connectionString == null)
);
message = ''
Options `services.prometheus.exporters.pgbouncer.connectionEnvFile` and
`services.prometheus.exporters.pgbouncer.connectionString` are mutually exclusive!
'';
}] ++ (flip map (attrNames exporterOpts) (exporter: {
assertion = cfg.${exporter}.firewallFilter != null -> cfg.${exporter}.openFirewall;
message = ''
The `firewallFilter'-option of exporter ${exporter} doesn't have any effect unless
`openFirewall' is set to `true'!
'';
})) ++ config.services.prometheus.exporters.assertions;
warnings = [
(mkIf (config.services.prometheus.exporters.idrac.enable && config.services.prometheus.exporters.idrac.configurationPath != null) ''
Configuration file in `services.prometheus.exporters.idrac.configurationPath` may override
`services.prometheus.exporters.idrac.listenAddress` and/or `services.prometheus.exporters.idrac.port`.
Consider using `services.prometheus.exporters.idrac.configuration` instead.
''
)
] ++ config.services.prometheus.exporters.warnings;
}] ++ [(mkIf config.services.prometheus.exporters.rtl_433.enable {
hardware.rtl-sdr.enable = mkDefault true;
})] ++ [(mkIf config.services.postfix.enable {
services.prometheus.exporters.postfix.group = mkDefault config.services.postfix.setgidGroup;
})] ++ [(mkIf config.services.prometheus.exporters.deluge.enable {
system.activationScripts = {
deluge-exported.text = ''
mkdir -p /etc/deluge-exporter
echo "DELUGE_PASSWORD=$(cat ${config.services.prometheus.exporters.deluge.delugePasswordFile})" > /etc/deluge-exporter/password
'';
};
})] ++ (mapAttrsToList (name: conf:
mkExporterConf {
inherit name;
inherit (conf) serviceOpts;
conf = cfg.${name};
}) exporterOpts)
config = mkMerge (
[
{
assertions =
[
{
assertion =
cfg.ipmi.enable -> (cfg.ipmi.configFile != null) -> (!(lib.hasPrefix "/tmp/" cfg.ipmi.configFile));
message = ''
Config file specified in `services.prometheus.exporters.ipmi.configFile' must
not reside within /tmp - it won't be visible to the systemd service.
'';
}
{
assertion =
cfg.ipmi.enable
-> (cfg.ipmi.webConfigFile != null)
-> (!(lib.hasPrefix "/tmp/" cfg.ipmi.webConfigFile));
message = ''
Config file specified in `services.prometheus.exporters.ipmi.webConfigFile' must
not reside within /tmp - it won't be visible to the systemd service.
'';
}
{
assertion =
cfg.restic.enable -> ((cfg.restic.repository == null) != (cfg.restic.repositoryFile == null));
message = ''
Please specify either 'services.prometheus.exporters.restic.repository'
or 'services.prometheus.exporters.restic.repositoryFile'.
'';
}
{
assertion =
cfg.snmp.enable -> ((cfg.snmp.configurationPath == null) != (cfg.snmp.configuration == null));
message = ''
Please ensure you have either `services.prometheus.exporters.snmp.configuration'
or `services.prometheus.exporters.snmp.configurationPath' set!
'';
}
{
assertion =
cfg.mikrotik.enable -> ((cfg.mikrotik.configFile == null) != (cfg.mikrotik.configuration == null));
message = ''
Please specify either `services.prometheus.exporters.mikrotik.configuration'
or `services.prometheus.exporters.mikrotik.configFile'.
'';
}
{
assertion = cfg.mail.enable -> ((cfg.mail.configFile == null) != (cfg.mail.configuration == null));
message = ''
Please specify either 'services.prometheus.exporters.mail.configuration'
or 'services.prometheus.exporters.mail.configFile'.
'';
}
{
assertion = cfg.mysqld.runAsLocalSuperUser -> config.services.mysql.enable;
message = ''
The exporter is configured to run as 'services.mysql.user', but
'services.mysql.enable' is set to false.
'';
}
{
assertion =
cfg.nextcloud.enable -> ((cfg.nextcloud.passwordFile == null) != (cfg.nextcloud.tokenFile == null));
message = ''
Please specify either 'services.prometheus.exporters.nextcloud.passwordFile' or
'services.prometheus.exporters.nextcloud.tokenFile'
'';
}
{
assertion = cfg.sql.enable -> ((cfg.sql.configFile == null) != (cfg.sql.configuration == null));
message = ''
Please specify either 'services.prometheus.exporters.sql.configuration' or
'services.prometheus.exporters.sql.configFile'
'';
}
{
assertion = cfg.scaphandre.enable -> (pkgs.stdenv.targetPlatform.isx86_64 == true);
message = ''
Scaphandre only support x86_64 architectures.
'';
}
{
assertion =
cfg.scaphandre.enable
-> ((lib.kernel.whenHelpers pkgs.linux.version).whenOlder "5.11" true).condition == false;
message = ''
Scaphandre requires a kernel version newer than '5.11', '${pkgs.linux.version}' given.
'';
}
{
assertion = cfg.scaphandre.enable -> (builtins.elem "intel_rapl_common" config.boot.kernelModules);
message = ''
Scaphandre needs 'intel_rapl_common' kernel module to be enabled. Please add it in 'boot.kernelModules'.
'';
}
{
assertion =
cfg.idrac.enable -> ((cfg.idrac.configurationPath == null) != (cfg.idrac.configuration == null));
message = ''
Please ensure you have either `services.prometheus.exporters.idrac.configuration'
or `services.prometheus.exporters.idrac.configurationPath' set!
'';
}
{
assertion =
cfg.deluge.enable
-> ((cfg.deluge.delugePassword == null) != (cfg.deluge.delugePasswordFile == null));
message = ''
Please ensure you have either `services.prometheus.exporters.deluge.delugePassword'
or `services.prometheus.exporters.deluge.delugePasswordFile' set!
'';
}
{
assertion =
cfg.pgbouncer.enable
-> (xor (cfg.pgbouncer.connectionEnvFile == null) (cfg.pgbouncer.connectionString == null));
message = ''
Options `services.prometheus.exporters.pgbouncer.connectionEnvFile` and
`services.prometheus.exporters.pgbouncer.connectionString` are mutually exclusive!
'';
}
]
++ (flip map (attrNames exporterOpts) (exporter: {
assertion = cfg.${exporter}.firewallFilter != null -> cfg.${exporter}.openFirewall;
message = ''
The `firewallFilter'-option of exporter ${exporter} doesn't have any effect unless
`openFirewall' is set to `true'!
'';
}))
++ config.services.prometheus.exporters.assertions;
warnings = [
(mkIf
(
config.services.prometheus.exporters.idrac.enable
&& config.services.prometheus.exporters.idrac.configurationPath != null
)
''
Configuration file in `services.prometheus.exporters.idrac.configurationPath` may override
`services.prometheus.exporters.idrac.listenAddress` and/or `services.prometheus.exporters.idrac.port`.
Consider using `services.prometheus.exporters.idrac.configuration` instead.
''
)
] ++ config.services.prometheus.exporters.warnings;
}
]
++ [
(mkIf config.services.prometheus.exporters.rtl_433.enable {
hardware.rtl-sdr.enable = mkDefault true;
})
]
++ [
(mkIf config.services.postfix.enable {
services.prometheus.exporters.postfix.group = mkDefault config.services.postfix.setgidGroup;
})
]
++ [
(mkIf config.services.prometheus.exporters.deluge.enable {
system.activationScripts = {
deluge-exported.text = ''
mkdir -p /etc/deluge-exporter
echo "DELUGE_PASSWORD=$(cat ${config.services.prometheus.exporters.deluge.delugePasswordFile})" > /etc/deluge-exporter/password
'';
};
})
]
++ (mapAttrsToList (
name: conf:
mkExporterConf {
inherit name;
inherit (conf) serviceOpts;
conf = cfg.${name};
}
) exporterOpts)
);
meta = {

View file

@ -1,4 +1,10 @@
{ config, lib, pkgs, options, ... }:
{
config,
lib,
pkgs,
options,
...
}:
let
logPrefix = "services.prometheus.exporters.snmp";
@ -13,23 +19,30 @@ let
# This ensures that we can deal with string paths, path types and
# store-path strings with context.
coerceConfigFile = file:
coerceConfigFile =
file:
if (builtins.isPath file) || (lib.isStorePath file) then
file
else
(lib.warn ''
${logPrefix}: configuration file "${file}" is being copied to the nix-store.
If you would like to avoid that, please set enableConfigCheck to false.
'' /. + file);
(
lib.warn ''
${logPrefix}: configuration file "${file}" is being copied to the nix-store.
If you would like to avoid that, please set enableConfigCheck to false.
'' /.
+ file
);
checkConfig = file:
pkgs.runCommand "checked-snmp-exporter-config.yml" {
preferLocalBuild = true;
nativeBuildInputs = [ pkgs.buildPackages.prometheus-snmp-exporter ];
} ''
ln -s ${coerceConfigFile file} $out
snmp_exporter --dry-run --config.file $out
'';
checkConfig =
file:
pkgs.runCommand "checked-snmp-exporter-config.yml"
{
preferLocalBuild = true;
nativeBuildInputs = [ pkgs.buildPackages.prometheus-snmp-exporter ];
}
''
ln -s ${coerceConfigFile file} $out
snmp_exporter --dry-run --config.file $out
'';
in
{
port = 9116;
@ -68,7 +81,10 @@ in
};
logFormat = mkOption {
type = types.enum ["logfmt" "json"];
type = types.enum [
"logfmt"
"json"
];
default = "logfmt";
description = ''
Output format of log messages.
@ -76,7 +92,12 @@ in
};
logLevel = mkOption {
type = types.enum ["debug" "info" "warn" "error"];
type = types.enum [
"debug"
"info"
"warn"
"error"
];
default = "info";
description = ''
Only log messages with the given severity or above.
@ -110,27 +131,27 @@ in
'';
};
};
serviceOpts = let
uncheckedConfigFile = if cfg.configurationPath != null
then cfg.configurationPath
else "${pkgs.writeText "snmp-exporter-conf.yml" (builtins.toJSON cfg.configuration)}";
configFile = if cfg.enableConfigCheck then
checkConfig uncheckedConfigFile
else
uncheckedConfigFile;
in {
serviceConfig = {
EnvironmentFile = lib.mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
ExecStart = ''
${pkgs.prometheus-snmp-exporter}/bin/snmp_exporter \
--config.file=${escapeShellArg configFile} \
${lib.optionalString (cfg.environmentFile != null)
"--config.expand-environment-variables"} \
--log.format=${escapeShellArg cfg.logFormat} \
--log.level=${cfg.logLevel} \
--web.listen-address=${cfg.listenAddress}:${toString cfg.port} \
${concatStringsSep " \\\n " cfg.extraFlags}
'';
serviceOpts =
let
uncheckedConfigFile =
if cfg.configurationPath != null then
cfg.configurationPath
else
"${pkgs.writeText "snmp-exporter-conf.yml" (builtins.toJSON cfg.configuration)}";
configFile = if cfg.enableConfigCheck then checkConfig uncheckedConfigFile else uncheckedConfigFile;
in
{
serviceConfig = {
EnvironmentFile = lib.mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
ExecStart = ''
${pkgs.prometheus-snmp-exporter}/bin/snmp_exporter \
--config.file=${escapeShellArg configFile} \
${lib.optionalString (cfg.environmentFile != null) "--config.expand-environment-variables"} \
--log.format=${escapeShellArg cfg.logFormat} \
--log.level=${cfg.logLevel} \
--web.listen-address=${cfg.listenAddress}:${toString cfg.port} \
${concatStringsSep " \\\n " cfg.extraFlags}
'';
};
};
};
}

View file

@ -1,10 +1,28 @@
{ config, lib, pkgs, utils, ... }:
{
config,
lib,
pkgs,
utils,
...
}:
let
inherit (lib) maintainers;
inherit (lib.meta) getExe;
inherit (lib.modules) mkIf mkMerge;
inherit (lib.options) literalExpression mkEnableOption mkOption mkPackageOption;
inherit (lib.types) bool enum nullOr port str submodule;
inherit (lib.options)
literalExpression
mkEnableOption
mkOption
mkPackageOption
;
inherit (lib.types)
bool
enum
nullOr
port
str
submodule
;
inherit (utils) genJqSecretsReplacementSnippet;
cfg = config.services.scrutiny;
@ -70,7 +88,10 @@ in
};
options.log.level = mkOption {
type = enum [ "INFO" "DEBUG" ];
type = enum [
"INFO"
"DEBUG"
];
default = "INFO";
description = "Log level for Scrutiny.";
};
@ -93,7 +114,8 @@ in
description = "The port of the InfluxDB instance.";
};
options.web.influxdb.tls.insecure_skip_verify = mkEnableOption "skipping TLS verification when connecting to InfluxDB";
options.web.influxdb.tls.insecure_skip_verify =
mkEnableOption "skipping TLS verification when connecting to InfluxDB";
options.web.influxdb.token = mkOption {
type = nullOr str;
@ -160,7 +182,10 @@ in
};
options.log.level = mkOption {
type = enum [ "INFO" "DEBUG" ];
type = enum [
"INFO"
"DEBUG"
];
default = "INFO";
description = "Log level for Scrutiny collector.";
};

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
# TODO: This is not secure, have a look at the file docs/security.txt inside
# the project sources.
let
@ -12,165 +17,186 @@ let
nutFormat = {
type = with lib.types; let
type =
with lib.types;
let
singleAtom = nullOr (oneOf [
bool
int
float
str
]) // {
description = "atom (null, bool, int, float or string)";
};
singleAtom =
nullOr (oneOf [
bool
int
float
str
])
// {
description = "atom (null, bool, int, float or string)";
};
in attrsOf (oneOf [
in
attrsOf (oneOf [
singleAtom
(listOf (nonEmptyListOf singleAtom))
]);
generate = name: value:
generate =
name: value:
let
normalizedValue =
lib.mapAttrs (key: val:
if lib.isList val
then lib.forEach val (elem: if lib.isList elem then elem else [elem])
else
if val == null
then []
else [[val]]
) value;
normalizedValue = lib.mapAttrs (
key: val:
if lib.isList val then
lib.forEach val (elem: if lib.isList elem then elem else [ elem ])
else if val == null then
[ ]
else
[ [ val ] ]
) value;
mkValueString = lib.concatMapStringsSep " " (v:
let str = lib.generators.mkValueStringDefault {} v;
mkValueString = lib.concatMapStringsSep " " (
v:
let
str = lib.generators.mkValueStringDefault { } v;
in
# Quote the value if it has spaces and isn't already quoted.
if (lib.hasInfix " " str) && !(lib.hasPrefix "\"" str && lib.hasSuffix "\"" str)
then "\"${str}\""
else str
# Quote the value if it has spaces and isn't already quoted.
if (lib.hasInfix " " str) && !(lib.hasPrefix "\"" str && lib.hasSuffix "\"" str) then
"\"${str}\""
else
str
);
in pkgs.writeText name (lib.generators.toKeyValue {
mkKeyValue = lib.generators.mkKeyValueDefault { inherit mkValueString; } " ";
listsAsDuplicateKeys = true;
} normalizedValue);
in
pkgs.writeText name (
lib.generators.toKeyValue {
mkKeyValue = lib.generators.mkKeyValueDefault { inherit mkValueString; } " ";
listsAsDuplicateKeys = true;
} normalizedValue
);
};
installSecrets = source: target: secrets:
installSecrets =
source: target: secrets:
pkgs.writeShellScript "installSecrets.sh" ''
install -m0600 -D ${source} "${target}"
${lib.concatLines (lib.forEach secrets (name: ''
${pkgs.replace-secret}/bin/replace-secret \
'@${name}@' \
"$CREDENTIALS_DIRECTORY/${name}" \
"${target}"
''))}
${lib.concatLines (
lib.forEach secrets (name: ''
${pkgs.replace-secret}/bin/replace-secret \
'@${name}@' \
"$CREDENTIALS_DIRECTORY/${name}" \
"${target}"
'')
)}
chmod u-w "${target}"
'';
upsmonConf = nutFormat.generate "upsmon.conf" cfg.upsmon.settings;
upsdUsers = pkgs.writeText "upsd.users" (let
# This looks like INI, but it's not quite because the
# 'upsmon' option lacks a '='. See: man upsd.users
userConfig = name: user: lib.concatStringsSep "\n " (lib.concatLists [
[
"[${name}]"
"password = \"@upsdusers_password_${name}@\""
]
(lib.optional (user.upsmon != null) "upsmon ${user.upsmon}")
(lib.forEach user.actions (action: "actions = ${action}"))
(lib.forEach user.instcmds (instcmd: "instcmds = ${instcmd}"))
]);
in lib.concatStringsSep "\n\n" (lib.mapAttrsToList userConfig cfg.users));
upsdUsers = pkgs.writeText "upsd.users" (
let
# This looks like INI, but it's not quite because the
# 'upsmon' option lacks a '='. See: man upsd.users
userConfig =
name: user:
lib.concatStringsSep "\n " (
lib.concatLists [
[
"[${name}]"
"password = \"@upsdusers_password_${name}@\""
]
(lib.optional (user.upsmon != null) "upsmon ${user.upsmon}")
(lib.forEach user.actions (action: "actions = ${action}"))
(lib.forEach user.instcmds (instcmd: "instcmds = ${instcmd}"))
]
);
in
lib.concatStringsSep "\n\n" (lib.mapAttrsToList userConfig cfg.users)
);
upsOptions =
{ name, config, ... }:
{
options = {
# This can be inferred from the UPS model by looking at
# /nix/store/nut/share/driver.list
driver = lib.mkOption {
type = lib.types.str;
description = ''
Specify the program to run to talk to this UPS. apcsmart,
bestups, and sec are some examples.
'';
};
port = lib.mkOption {
type = lib.types.str;
description = ''
The serial port to which your UPS is connected. /dev/ttyS0 is
usually the first port on Linux boxes, for example.
'';
};
shutdownOrder = lib.mkOption {
default = 0;
type = lib.types.int;
description = ''
When you have multiple UPSes on your system, you usually need to
turn them off in a certain order. upsdrvctl shuts down all the
0s, then the 1s, 2s, and so on. To exclude a UPS from the
shutdown sequence, set this to -1.
'';
};
maxStartDelay = lib.mkOption {
default = null;
type = lib.types.uniq (lib.types.nullOr lib.types.int);
description = ''
This can be set as a global variable above your first UPS
definition and it can also be set in a UPS section. This value
controls how long upsdrvctl will wait for the driver to finish
starting. This keeps your system from getting stuck due to a
broken driver or UPS.
'';
};
description = lib.mkOption {
default = "";
type = lib.types.str;
description = ''
Description of the UPS.
'';
};
directives = lib.mkOption {
default = [ ];
type = lib.types.listOf lib.types.str;
description = ''
List of configuration directives for this UPS.
'';
};
summary = lib.mkOption {
default = "";
type = lib.types.lines;
description = ''
Lines which would be added inside ups.conf for handling this UPS.
'';
};
upsOptions = {name, config, ...}:
{
options = {
# This can be inferred from the UPS model by looking at
# /nix/store/nut/share/driver.list
driver = lib.mkOption {
type = lib.types.str;
description = ''
Specify the program to run to talk to this UPS. apcsmart,
bestups, and sec are some examples.
'';
};
port = lib.mkOption {
type = lib.types.str;
description = ''
The serial port to which your UPS is connected. /dev/ttyS0 is
usually the first port on Linux boxes, for example.
'';
};
config = {
directives = lib.mkOrder 10 (
[
"driver = ${config.driver}"
"port = ${config.port}"
''desc = "${config.description}"''
"sdorder = ${toString config.shutdownOrder}"
]
++ (lib.optional (config.maxStartDelay != null) "maxstartdelay = ${toString config.maxStartDelay}")
);
shutdownOrder = lib.mkOption {
default = 0;
type = lib.types.int;
description = ''
When you have multiple UPSes on your system, you usually need to
turn them off in a certain order. upsdrvctl shuts down all the
0s, then the 1s, 2s, and so on. To exclude a UPS from the
shutdown sequence, set this to -1.
'';
summary = lib.concatStringsSep "\n " ([ "[${name}]" ] ++ config.directives);
};
maxStartDelay = lib.mkOption {
default = null;
type = lib.types.uniq (lib.types.nullOr lib.types.int);
description = ''
This can be set as a global variable above your first UPS
definition and it can also be set in a UPS section. This value
controls how long upsdrvctl will wait for the driver to finish
starting. This keeps your system from getting stuck due to a
broken driver or UPS.
'';
};
description = lib.mkOption {
default = "";
type = lib.types.str;
description = ''
Description of the UPS.
'';
};
directives = lib.mkOption {
default = [];
type = lib.types.listOf lib.types.str;
description = ''
List of configuration directives for this UPS.
'';
};
summary = lib.mkOption {
default = "";
type = lib.types.lines;
description = ''
Lines which would be added inside ups.conf for handling this UPS.
'';
};
};
config = {
directives = lib.mkOrder 10 ([
"driver = ${config.driver}"
"port = ${config.port}"
''desc = "${config.description}"''
"sdorder = ${toString config.shutdownOrder}"
] ++ (lib.optional (config.maxStartDelay != null)
"maxstartdelay = ${toString config.maxStartDelay}")
);
summary =
lib.concatStringsSep "\n "
(["[${name}]"] ++ config.directives);
};
};
listenOptions = {
options = {
address = lib.mkOption {
@ -202,7 +228,7 @@ let
listen = lib.mkOption {
type = with lib.types; listOf (submodule listenOptions);
default = [];
default = [ ];
example = [
{
address = "192.168.50.1";
@ -228,65 +254,71 @@ let
};
config = {
enable = lib.mkDefault (lib.elem cfg.mode [ "standalone" "netserver" ]);
enable = lib.mkDefault (
lib.elem cfg.mode [
"standalone"
"netserver"
]
);
};
};
monitorOptions =
{ name, config, ... }:
{
options = {
system = lib.mkOption {
type = lib.types.str;
default = name;
description = ''
Identifier of the UPS to monitor, in this form: `<upsname>[@<hostname>[:<port>]]`
See `upsmon.conf` for details.
'';
};
monitorOptions = { name, config, ... }: {
options = {
system = lib.mkOption {
type = lib.types.str;
default = name;
description = ''
Identifier of the UPS to monitor, in this form: `<upsname>[@<hostname>[:<port>]]`
See `upsmon.conf` for details.
'';
powerValue = lib.mkOption {
type = lib.types.int;
default = 1;
description = ''
Number of power supplies that the UPS feeds on this system.
See `upsmon.conf` for details.
'';
};
user = lib.mkOption {
type = lib.types.str;
description = ''
Username from `upsd.users` for accessing this UPS.
See `upsmon.conf` for details.
'';
};
passwordFile = lib.mkOption {
type = lib.types.str;
defaultText = lib.literalMD "power.ups.users.\${user}.passwordFile";
description = ''
The full path to a file containing the password from
`upsd.users` for accessing this UPS. The password file
is read on service start.
See `upsmon.conf` for details.
'';
};
type = lib.mkOption {
type = lib.types.str;
default = "master";
description = ''
The relationship with `upsd`.
See `upsmon.conf` for details.
'';
};
};
powerValue = lib.mkOption {
type = lib.types.int;
default = 1;
description = ''
Number of power supplies that the UPS feeds on this system.
See `upsmon.conf` for details.
'';
};
user = lib.mkOption {
type = lib.types.str;
description = ''
Username from `upsd.users` for accessing this UPS.
See `upsmon.conf` for details.
'';
};
passwordFile = lib.mkOption {
type = lib.types.str;
defaultText = lib.literalMD "power.ups.users.\${user}.passwordFile";
description = ''
The full path to a file containing the password from
`upsd.users` for accessing this UPS. The password file
is read on service start.
See `upsmon.conf` for details.
'';
};
type = lib.mkOption {
type = lib.types.str;
default = "master";
description = ''
The relationship with `upsd`.
See `upsmon.conf` for details.
'';
config = {
passwordFile = lib.mkDefault cfg.users.${config.user}.passwordFile;
};
};
config = {
passwordFile = lib.mkDefault cfg.users.${config.user}.passwordFile;
};
};
upsmonOptions = {
options = {
enable = lib.mkOption {
@ -297,7 +329,7 @@ let
monitor = lib.mkOption {
type = with lib.types; attrsOf (submodule monitorOptions);
default = {};
default = { };
description = ''
Set of UPS to monitor. See `man upsmon.conf` for details.
'';
@ -305,7 +337,7 @@ let
settings = lib.mkOption {
type = nutFormat.type;
default = {};
default = { };
defaultText = lib.literalMD ''
{
MINSUPPLIES = 1;
@ -330,10 +362,24 @@ let
};
config = {
enable = lib.mkDefault (lib.elem cfg.mode [ "standalone" "netserver" "netclient" ]);
enable = lib.mkDefault (
lib.elem cfg.mode [
"standalone"
"netserver"
"netclient"
]
);
settings = {
MINSUPPLIES = lib.mkDefault 1;
MONITOR = lib.flip lib.mapAttrsToList cfg.upsmon.monitor (name: monitor: with monitor; [ system powerValue user "\"@upsmon_password_${name}@\"" type ]);
MONITOR = lib.flip lib.mapAttrsToList cfg.upsmon.monitor (
name: monitor: with monitor; [
system
powerValue
user
"\"@upsmon_password_${name}@\""
type
]
);
NOTIFYCMD = lib.mkDefault "${pkgs.nut}/bin/upssched";
POWERDOWNFLAG = lib.mkDefault "/run/killpower";
RUN_AS_USER = "root"; # TODO: replace 'root' by another username.
@ -354,7 +400,7 @@ let
actions = lib.mkOption {
type = with lib.types; listOf str;
default = [];
default = [ ];
description = ''
Allow the user to do certain things with upsd.
See `man upsd.users` for details.
@ -363,7 +409,7 @@ let
instcmds = lib.mkOption {
type = with lib.types; listOf str;
default = [];
default = [ ];
description = ''
Let the user initiate specific instant commands. Use "ALL" to grant all commands automatically. For the full list of what your UPS supports, use "upscmd -l".
See `man upsd.users` for details.
@ -371,7 +417,12 @@ let
};
upsmon = lib.mkOption {
type = with lib.types; nullOr (enum [ "primary" "secondary" ]);
type =
with lib.types;
nullOr (enum [
"primary"
"secondary"
]);
default = null;
description = ''
Add the necessary actions for a upsmon process to work.
@ -383,7 +434,6 @@ let
in
{
options = {
# powerManagement.powerDownCommands
@ -396,7 +446,12 @@ in
mode = lib.mkOption {
default = "standalone";
type = lib.types.enum [ "none" "standalone" "netserver" "netclient" ];
type = lib.types.enum [
"none"
"standalone"
"netserver"
"netclient"
];
description = ''
The MODE determines which part of the NUT is to be started, and
which configuration files must be modified.
@ -450,7 +505,7 @@ in
};
upsmon = lib.mkOption {
default = {};
default = { };
description = ''
Options for the `upsmon.conf` configuration file.
'';
@ -458,7 +513,7 @@ in
};
upsd = lib.mkOption {
default = {};
default = { };
description = ''
Options for the `upsd.conf` configuration file.
'';
@ -466,7 +521,7 @@ in
};
ups = lib.mkOption {
default = {};
default = { };
# see nut/etc/ups.conf.sample
description = ''
This is where you configure all the UPSes that this system will be
@ -477,7 +532,7 @@ in
};
users = lib.mkOption {
default = {};
default = { };
description = ''
Users that can access upsd. See `man upsd.users`.
'';
@ -490,15 +545,20 @@ in
config = lib.mkIf cfg.enable {
assertions = [
(let
totalPowerValue = lib.foldl' lib.add 0 (map (monitor: monitor.powerValue) (lib.attrValues cfg.upsmon.monitor));
minSupplies = cfg.upsmon.settings.MINSUPPLIES;
in lib.mkIf cfg.upsmon.enable {
assertion = totalPowerValue >= minSupplies;
message = ''
`power.ups.upsmon`: Total configured power value (${toString totalPowerValue}) must be at least MINSUPPLIES (${toString minSupplies}).
'';
})
(
let
totalPowerValue = lib.foldl' lib.add 0 (
map (monitor: monitor.powerValue) (lib.attrValues cfg.upsmon.monitor)
);
minSupplies = cfg.upsmon.settings.MINSUPPLIES;
in
lib.mkIf cfg.upsmon.enable {
assertion = totalPowerValue >= minSupplies;
message = ''
`power.ups.upsmon`: Total configured power value (${toString totalPowerValue}) must be at least MINSUPPLIES (${toString minSupplies}).
'';
}
)
];
# For interactive use.
@ -507,9 +567,10 @@ in
networking.firewall = lib.mkIf cfg.openFirewall {
allowedTCPPorts =
if cfg.upsd.listen == []
then [ defaultPort ]
else lib.unique (lib.forEach cfg.upsd.listen (listen: listen.port));
if cfg.upsd.listen == [ ] then
[ defaultPort ]
else
lib.unique (lib.forEach cfg.upsd.listen (listen: listen.port));
};
systemd.slices.system-ups = {
@ -517,47 +578,58 @@ in
documentation = [ "https://networkupstools.org/" ];
};
systemd.services.upsmon = let
secrets = lib.mapAttrsToList (name: monitor: "upsmon_password_${name}") cfg.upsmon.monitor;
createUpsmonConf = installSecrets upsmonConf "/run/nut/upsmon.conf" secrets;
in {
enable = cfg.upsmon.enable;
description = "Uninterruptible Power Supplies (Monitor)";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "forking";
ExecStartPre = "${createUpsmonConf}";
ExecStart = "${pkgs.nut}/sbin/upsmon";
ExecReload = "${pkgs.nut}/sbin/upsmon -c reload";
LoadCredential = lib.mapAttrsToList (name: monitor: "upsmon_password_${name}:${monitor.passwordFile}") cfg.upsmon.monitor;
Slice = "system-ups.slice";
systemd.services.upsmon =
let
secrets = lib.mapAttrsToList (name: monitor: "upsmon_password_${name}") cfg.upsmon.monitor;
createUpsmonConf = installSecrets upsmonConf "/run/nut/upsmon.conf" secrets;
in
{
enable = cfg.upsmon.enable;
description = "Uninterruptible Power Supplies (Monitor)";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "forking";
ExecStartPre = "${createUpsmonConf}";
ExecStart = "${pkgs.nut}/sbin/upsmon";
ExecReload = "${pkgs.nut}/sbin/upsmon -c reload";
LoadCredential = lib.mapAttrsToList (
name: monitor: "upsmon_password_${name}:${monitor.passwordFile}"
) cfg.upsmon.monitor;
Slice = "system-ups.slice";
};
environment = envVars;
};
environment = envVars;
};
systemd.services.upsd = let
secrets = lib.mapAttrsToList (name: user: "upsdusers_password_${name}") cfg.users;
createUpsdUsers = installSecrets upsdUsers "/run/nut/upsd.users" secrets;
in {
enable = cfg.upsd.enable;
description = "Uninterruptible Power Supplies (Daemon)";
after = [ "network.target" "upsmon.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "forking";
ExecStartPre = "${createUpsdUsers}";
# TODO: replace 'root' by another username.
ExecStart = "${pkgs.nut}/sbin/upsd -u root";
ExecReload = "${pkgs.nut}/sbin/upsd -c reload";
LoadCredential = lib.mapAttrsToList (name: user: "upsdusers_password_${name}:${user.passwordFile}") cfg.users;
Slice = "system-ups.slice";
systemd.services.upsd =
let
secrets = lib.mapAttrsToList (name: user: "upsdusers_password_${name}") cfg.users;
createUpsdUsers = installSecrets upsdUsers "/run/nut/upsd.users" secrets;
in
{
enable = cfg.upsd.enable;
description = "Uninterruptible Power Supplies (Daemon)";
after = [
"network.target"
"upsmon.service"
];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "forking";
ExecStartPre = "${createUpsdUsers}";
# TODO: replace 'root' by another username.
ExecStart = "${pkgs.nut}/sbin/upsd -u root";
ExecReload = "${pkgs.nut}/sbin/upsd -c reload";
LoadCredential = lib.mapAttrsToList (
name: user: "upsdusers_password_${name}:${user.passwordFile}"
) cfg.users;
Slice = "system-ups.slice";
};
environment = envVars;
restartTriggers = [
config.environment.etc."nut/upsd.conf".source
];
};
environment = envVars;
restartTriggers = [
config.environment.etc."nut/upsd.conf".source
];
};
systemd.services.upsdrv = {
enable = cfg.upsd.enable;
@ -596,21 +668,20 @@ in
};
environment.etc = {
"nut/nut.conf".source = pkgs.writeText "nut.conf"
''
MODE = ${cfg.mode}
'';
"nut/ups.conf".source = pkgs.writeText "ups.conf"
''
maxstartdelay = ${toString cfg.maxStartDelay}
"nut/nut.conf".source = pkgs.writeText "nut.conf" ''
MODE = ${cfg.mode}
'';
"nut/ups.conf".source = pkgs.writeText "ups.conf" ''
maxstartdelay = ${toString cfg.maxStartDelay}
${lib.concatStringsSep "\n\n" (lib.forEach (lib.attrValues cfg.ups) (ups: ups.summary))}
'';
"nut/upsd.conf".source = pkgs.writeText "upsd.conf"
''
${lib.concatStringsSep "\n" (lib.forEach cfg.upsd.listen (listen: "LISTEN ${listen.address} ${toString listen.port}"))}
${cfg.upsd.extraConfig}
'';
${lib.concatStringsSep "\n\n" (lib.forEach (lib.attrValues cfg.ups) (ups: ups.summary))}
'';
"nut/upsd.conf".source = pkgs.writeText "upsd.conf" ''
${lib.concatStringsSep "\n" (
lib.forEach cfg.upsd.listen (listen: "LISTEN ${listen.address} ${toString listen.port}")
)}
${cfg.upsd.extraConfig}
'';
"nut/upssched.conf".source = cfg.schedulerRules;
"nut/upsd.users".source = "/run/nut/upsd.users";
"nut/upsmon.conf".source = "/run/nut/upsmon.conf";
@ -625,18 +696,18 @@ in
services.udev.packages = [ pkgs.nut ];
/*
users.users.nut =
{ uid = 84;
home = "/var/lib/nut";
createHome = true;
group = "nut";
description = "UPnP A/V Media Server user";
};
/*
users.users.nut =
{ uid = 84;
home = "/var/lib/nut";
createHome = true;
group = "nut";
description = "UPnP A/V Media Server user";
};
users.groups."nut" =
{ gid = 84; };
*/
users.groups."nut" =
{ gid = 84; };
*/
};
}

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.ceph;
@ -6,64 +11,94 @@ let
expandCamelCase = lib.replaceStrings lib.upperChars (map (s: " ${s}") lib.lowerChars);
expandCamelCaseAttrs = lib.mapAttrs' (name: value: lib.nameValuePair (expandCamelCase name) value);
makeServices = daemonType: daemonIds:
lib.mkMerge (map (daemonId:
{ "ceph-${daemonType}-${daemonId}" = makeService daemonType daemonId cfg.global.clusterName cfg.${daemonType}.package; })
daemonIds);
makeServices =
daemonType: daemonIds:
lib.mkMerge (
map (daemonId: {
"ceph-${daemonType}-${daemonId}" =
makeService daemonType daemonId cfg.global.clusterName
cfg.${daemonType}.package;
}) daemonIds
);
makeService = daemonType: daemonId: clusterName: ceph:
makeService =
daemonType: daemonId: clusterName: ceph:
let
stateDirectory = "ceph/${if daemonType == "rgw" then "radosgw" else daemonType}/${clusterName}-${daemonId}"; in {
enable = true;
description = "Ceph ${builtins.replaceStrings lib.lowerChars lib.upperChars daemonType} daemon ${daemonId}";
after = [ "network-online.target" "time-sync.target" ] ++ lib.optional (daemonType == "osd") "ceph-mon.target";
wants = [ "network-online.target" "time-sync.target" ];
partOf = [ "ceph-${daemonType}.target" ];
wantedBy = [ "ceph-${daemonType}.target" ];
stateDirectory = "ceph/${
if daemonType == "rgw" then "radosgw" else daemonType
}/${clusterName}-${daemonId}";
in
{
enable = true;
description = "Ceph ${
builtins.replaceStrings lib.lowerChars lib.upperChars daemonType
} daemon ${daemonId}";
after = [
"network-online.target"
"time-sync.target"
] ++ lib.optional (daemonType == "osd") "ceph-mon.target";
wants = [
"network-online.target"
"time-sync.target"
];
partOf = [ "ceph-${daemonType}.target" ];
wantedBy = [ "ceph-${daemonType}.target" ];
path = [ pkgs.getopt ];
path = [ pkgs.getopt ];
# Don't start services that are not yet initialized
unitConfig.ConditionPathExists = "/var/lib/${stateDirectory}/keyring";
startLimitBurst =
if daemonType == "osd" then 30 else if lib.elem daemonType ["mgr" "mds"] then 3 else 5;
startLimitIntervalSec = 60 * 30; # 30 mins
# Don't start services that are not yet initialized
unitConfig.ConditionPathExists = "/var/lib/${stateDirectory}/keyring";
startLimitBurst =
if daemonType == "osd" then
30
else if
lib.elem daemonType [
"mgr"
"mds"
]
then
3
else
5;
startLimitIntervalSec = 60 * 30; # 30 mins
serviceConfig = {
LimitNOFILE = 1048576;
LimitNPROC = 1048576;
Environment = "CLUSTER=${clusterName}";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
PrivateDevices = "yes";
PrivateTmp = "true";
ProtectHome = "true";
ProtectSystem = "full";
Restart = "on-failure";
StateDirectory = stateDirectory;
User = "ceph";
Group = if daemonType == "osd" then "disk" else "ceph";
ExecStart = ''
${ceph.out}/bin/${if daemonType == "rgw" then "radosgw" else "ceph-${daemonType}"} \
-f --cluster ${clusterName} --id ${daemonId}'';
} // lib.optionalAttrs (daemonType == "osd") {
ExecStartPre = "${ceph.lib}/libexec/ceph/ceph-osd-prestart.sh --id ${daemonId} --cluster ${clusterName}";
RestartSec = "20s";
PrivateDevices = "no"; # osd needs disk access
} // lib.optionalAttrs ( daemonType == "mon") {
RestartSec = "10";
serviceConfig =
{
LimitNOFILE = 1048576;
LimitNPROC = 1048576;
Environment = "CLUSTER=${clusterName}";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
PrivateDevices = "yes";
PrivateTmp = "true";
ProtectHome = "true";
ProtectSystem = "full";
Restart = "on-failure";
StateDirectory = stateDirectory;
User = "ceph";
Group = if daemonType == "osd" then "disk" else "ceph";
ExecStart = ''
${ceph.out}/bin/${if daemonType == "rgw" then "radosgw" else "ceph-${daemonType}"} \
-f --cluster ${clusterName} --id ${daemonId}'';
}
// lib.optionalAttrs (daemonType == "osd") {
ExecStartPre = "${ceph.lib}/libexec/ceph/ceph-osd-prestart.sh --id ${daemonId} --cluster ${clusterName}";
RestartSec = "20s";
PrivateDevices = "no"; # osd needs disk access
}
// lib.optionalAttrs (daemonType == "mon") {
RestartSec = "10";
};
};
makeTarget = daemonType: {
"ceph-${daemonType}" = {
description = "Ceph target allowing to start/stop all ceph-${daemonType} services at once";
partOf = [ "ceph.target" ];
wantedBy = [ "ceph.target" ];
before = [ "ceph.target" ];
unitConfig.StopWhenUnneeded = true;
};
};
makeTarget = daemonType:
{
"ceph-${daemonType}" = {
description = "Ceph target allowing to start/stop all ceph-${daemonType} services at once";
partOf = [ "ceph.target" ];
wantedBy = [ "ceph.target" ];
before = [ "ceph.target" ];
unitConfig.StopWhenUnneeded = true;
};
};
in
{
options.services.ceph = {
@ -131,7 +166,10 @@ in
};
authClusterRequired = lib.mkOption {
type = lib.types.enum [ "cephx" "none" ];
type = lib.types.enum [
"cephx"
"none"
];
default = "cephx";
description = ''
Enables requiring daemons to authenticate with eachother in the cluster.
@ -139,7 +177,10 @@ in
};
authServiceRequired = lib.mkOption {
type = lib.types.enum [ "cephx" "none" ];
type = lib.types.enum [
"cephx"
"none"
];
default = "cephx";
description = ''
Enables requiring clients to authenticate with the cluster to access services in the cluster (e.g. radosgw, mds or osd).
@ -147,7 +188,10 @@ in
};
authClientRequired = lib.mkOption {
type = lib.types.enum [ "cephx" "none" ];
type = lib.types.enum [
"cephx"
"none"
];
default = "cephx";
description = ''
Enables requiring the cluster to authenticate itself to the client.
@ -188,7 +232,7 @@ in
extraConfig = lib.mkOption {
type = with lib.types; attrsOf str;
default = {};
default = { };
example = {
"ms bind ipv6" = "true";
};
@ -201,8 +245,11 @@ in
enable = lib.mkEnableOption "Ceph MGR daemon";
daemons = lib.mkOption {
type = with lib.types; listOf str;
default = [];
example = [ "name1" "name2" ];
default = [ ];
example = [
"name1"
"name2"
];
description = ''
A list of names for manager daemons that should have a service created. The names correspond
to the id part in ceph i.e. [ "name1" ] would result in mgr.name1
@ -211,7 +258,7 @@ in
package = lib.mkPackageOption pkgs "ceph" { };
extraConfig = lib.mkOption {
type = with lib.types; attrsOf str;
default = {};
default = { };
description = ''
Extra configuration to add to the global section for manager daemons.
'';
@ -222,8 +269,11 @@ in
enable = lib.mkEnableOption "Ceph MON daemon";
daemons = lib.mkOption {
type = with lib.types; listOf str;
default = [];
example = [ "name1" "name2" ];
default = [ ];
example = [
"name1"
"name2"
];
description = ''
A list of monitor daemons that should have a service created. The names correspond
to the id part in ceph i.e. [ "name1" ] would result in mon.name1
@ -232,7 +282,7 @@ in
package = lib.mkPackageOption pkgs "ceph" { };
extraConfig = lib.mkOption {
type = with lib.types; attrsOf str;
default = {};
default = { };
description = ''
Extra configuration to add to the monitor section.
'';
@ -243,8 +293,11 @@ in
enable = lib.mkEnableOption "Ceph OSD daemon";
daemons = lib.mkOption {
type = with lib.types; listOf str;
default = [];
example = [ "name1" "name2" ];
default = [ ];
example = [
"name1"
"name2"
];
description = ''
A list of OSD daemons that should have a service created. The names correspond
to the id part in ceph i.e. [ "name1" ] would result in osd.name1
@ -271,8 +324,11 @@ in
enable = lib.mkEnableOption "Ceph MDS daemon";
daemons = lib.mkOption {
type = with lib.types; listOf str;
default = [];
example = [ "name1" "name2" ];
default = [ ];
example = [
"name1"
"name2"
];
description = ''
A list of metadata service daemons that should have a service created. The names correspond
to the id part in ceph i.e. [ "name1" ] would result in mds.name1
@ -281,7 +337,7 @@ in
package = lib.mkPackageOption pkgs "ceph" { };
extraConfig = lib.mkOption {
type = with lib.types; attrsOf str;
default = {};
default = { };
description = ''
Extra configuration to add to the MDS section.
'';
@ -293,8 +349,11 @@ in
package = lib.mkPackageOption pkgs "ceph" { };
daemons = lib.mkOption {
type = with lib.types; listOf str;
default = [];
example = [ "name1" "name2" ];
default = [ ];
example = [
"name1"
"name2"
];
description = ''
A list of rados gateway daemons that should have a service created. The names correspond
to the id part in ceph i.e. [ "name1" ] would result in client.name1, radosgw daemons
@ -308,7 +367,7 @@ in
enable = lib.mkEnableOption "Ceph client configuration";
extraConfig = lib.mkOption {
type = with lib.types; attrsOf (attrsOf str);
default = {};
default = { };
example = lib.literalExpression ''
{
# This would create a section for a radosgw daemon named node0 and related
@ -326,39 +385,50 @@ in
config = lib.mkIf config.services.ceph.enable {
assertions = [
{ assertion = cfg.global.fsid != "";
{
assertion = cfg.global.fsid != "";
message = "fsid has to be set to a valid uuid for the cluster to function";
}
{ assertion = cfg.mon.enable -> cfg.mon.daemons != [];
{
assertion = cfg.mon.enable -> cfg.mon.daemons != [ ];
message = "have to set id of atleast one MON if you're going to enable Monitor";
}
{ assertion = cfg.mds.enable -> cfg.mds.daemons != [];
{
assertion = cfg.mds.enable -> cfg.mds.daemons != [ ];
message = "have to set id of atleast one MDS if you're going to enable Metadata Service";
}
{ assertion = cfg.osd.enable -> cfg.osd.daemons != [];
{
assertion = cfg.osd.enable -> cfg.osd.daemons != [ ];
message = "have to set id of atleast one OSD if you're going to enable OSD";
}
{ assertion = cfg.mgr.enable -> cfg.mgr.daemons != [];
{
assertion = cfg.mgr.enable -> cfg.mgr.daemons != [ ];
message = "have to set id of atleast one MGR if you're going to enable MGR";
}
];
warnings = lib.optional (cfg.global.monInitialMembers == null)
"Not setting up a list of members in monInitialMembers requires that you set the host variable for each mon daemon or else the cluster won't function";
warnings =
lib.optional (cfg.global.monInitialMembers == null)
"Not setting up a list of members in monInitialMembers requires that you set the host variable for each mon daemon or else the cluster won't function";
environment.etc."ceph/ceph.conf".text = let
# Merge the extraConfig set for mgr daemons, as mgr don't have their own section
globalSection = expandCamelCaseAttrs (cfg.global // cfg.extraConfig // lib.optionalAttrs cfg.mgr.enable cfg.mgr.extraConfig);
# Remove all name-value pairs with null values from the attribute set to avoid making empty sections in the ceph.conf
globalSection' = lib.filterAttrs (name: value: value != null) globalSection;
totalConfig = {
global = globalSection';
} // lib.optionalAttrs (cfg.mon.enable && cfg.mon.extraConfig != {}) { mon = cfg.mon.extraConfig; }
// lib.optionalAttrs (cfg.mds.enable && cfg.mds.extraConfig != {}) { mds = cfg.mds.extraConfig; }
// lib.optionalAttrs (cfg.osd.enable && cfg.osd.extraConfig != {}) { osd = cfg.osd.extraConfig; }
// lib.optionalAttrs (cfg.client.enable && cfg.client.extraConfig != {}) cfg.client.extraConfig;
environment.etc."ceph/ceph.conf".text =
let
# Merge the extraConfig set for mgr daemons, as mgr don't have their own section
globalSection = expandCamelCaseAttrs (
cfg.global // cfg.extraConfig // lib.optionalAttrs cfg.mgr.enable cfg.mgr.extraConfig
);
# Remove all name-value pairs with null values from the attribute set to avoid making empty sections in the ceph.conf
globalSection' = lib.filterAttrs (name: value: value != null) globalSection;
totalConfig =
{
global = globalSection';
}
// lib.optionalAttrs (cfg.mon.enable && cfg.mon.extraConfig != { }) { mon = cfg.mon.extraConfig; }
// lib.optionalAttrs (cfg.mds.enable && cfg.mds.extraConfig != { }) { mds = cfg.mds.extraConfig; }
// lib.optionalAttrs (cfg.osd.enable && cfg.osd.extraConfig != { }) { osd = cfg.osd.extraConfig; }
// lib.optionalAttrs (cfg.client.enable && cfg.client.extraConfig != { }) cfg.client.extraConfig;
in
lib.generators.toINI {} totalConfig;
lib.generators.toINI { } totalConfig;
users.users.ceph = {
uid = config.ids.uids.ceph;
@ -371,43 +441,54 @@ in
gid = config.ids.gids.ceph;
};
systemd.services = let
services = []
++ lib.optional cfg.mon.enable (makeServices "mon" cfg.mon.daemons)
++ lib.optional cfg.mds.enable (makeServices "mds" cfg.mds.daemons)
++ lib.optional cfg.osd.enable (makeServices "osd" cfg.osd.daemons)
++ lib.optional cfg.rgw.enable (makeServices "rgw" cfg.rgw.daemons)
++ lib.optional cfg.mgr.enable (makeServices "mgr" cfg.mgr.daemons);
systemd.services =
let
services =
[ ]
++ lib.optional cfg.mon.enable (makeServices "mon" cfg.mon.daemons)
++ lib.optional cfg.mds.enable (makeServices "mds" cfg.mds.daemons)
++ lib.optional cfg.osd.enable (makeServices "osd" cfg.osd.daemons)
++ lib.optional cfg.rgw.enable (makeServices "rgw" cfg.rgw.daemons)
++ lib.optional cfg.mgr.enable (makeServices "mgr" cfg.mgr.daemons);
in
lib.mkMerge services;
lib.mkMerge services;
systemd.targets = let
targets = [
{ ceph = {
description = "Ceph target allowing to start/stop all ceph service instances at once";
wantedBy = [ "multi-user.target" ];
unitConfig.StopWhenUnneeded = true;
}; } ]
++ lib.optional cfg.mon.enable (makeTarget "mon")
++ lib.optional cfg.mds.enable (makeTarget "mds")
++ lib.optional cfg.osd.enable (makeTarget "osd")
++ lib.optional cfg.rgw.enable (makeTarget "rgw")
++ lib.optional cfg.mgr.enable (makeTarget "mgr");
systemd.targets =
let
targets =
[
{
ceph = {
description = "Ceph target allowing to start/stop all ceph service instances at once";
wantedBy = [ "multi-user.target" ];
unitConfig.StopWhenUnneeded = true;
};
}
]
++ lib.optional cfg.mon.enable (makeTarget "mon")
++ lib.optional cfg.mds.enable (makeTarget "mds")
++ lib.optional cfg.osd.enable (makeTarget "osd")
++ lib.optional cfg.rgw.enable (makeTarget "rgw")
++ lib.optional cfg.mgr.enable (makeTarget "mgr");
in
lib.mkMerge targets;
lib.mkMerge targets;
systemd.tmpfiles.settings."10-ceph" = let
defaultConfig = {
user = "ceph";
group = "ceph";
systemd.tmpfiles.settings."10-ceph" =
let
defaultConfig = {
user = "ceph";
group = "ceph";
};
in
{
"/etc/ceph".d = defaultConfig;
"/run/ceph".d = defaultConfig // {
mode = "0770";
};
"/var/lib/ceph".d = defaultConfig;
"/var/lib/ceph/mgr".d = lib.mkIf (cfg.mgr.enable) defaultConfig;
"/var/lib/ceph/mon".d = lib.mkIf (cfg.mon.enable) defaultConfig;
"/var/lib/ceph/osd".d = lib.mkIf (cfg.osd.enable) defaultConfig;
};
in {
"/etc/ceph".d = defaultConfig;
"/run/ceph".d = defaultConfig // { mode = "0770"; };
"/var/lib/ceph".d = defaultConfig;
"/var/lib/ceph/mgr".d = lib.mkIf (cfg.mgr.enable) defaultConfig;
"/var/lib/ceph/mon".d = lib.mkIf (cfg.mon.enable) defaultConfig;
"/var/lib/ceph/osd".d = lib.mkIf (cfg.osd.enable) defaultConfig;
};
};
}

View file

@ -1,4 +1,10 @@
{ config, lib, options, pkgs, ... }:
{
config,
lib,
options,
pkgs,
...
}:
let
cfg = config.services.amule;
opt = options.services.amule;
@ -44,22 +50,25 @@ in
};
###### implementation
config = lib.mkIf cfg.enable {
users.users = lib.mkIf (cfg.user == null) [
{ name = "amule";
{
name = "amule";
description = "AMule daemon";
group = "amule";
uid = config.ids.uids.amule;
} ];
}
];
users.groups = lib.mkIf (cfg.user == null) [
{ name = "amule";
{
name = "amule";
gid = config.ids.gids.amule;
} ];
}
];
systemd.services.amuled = {
description = "AMule daemon";

View file

@ -1,44 +1,60 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.avahi;
yesNo = yes: if yes then "yes" else "no";
avahiDaemonConf = with cfg; pkgs.writeText "avahi-daemon.conf" ''
[server]
${# Users can set `networking.hostName' to the empty string, when getting
# a host name from DHCP. In that case, let Avahi take whatever the
# current host name is; setting `host-name' to the empty string in
# `avahi-daemon.conf' would be invalid.
lib.optionalString (hostName != "") "host-name=${hostName}"}
browse-domains=${lib.concatStringsSep ", " browseDomains}
use-ipv4=${yesNo ipv4}
use-ipv6=${yesNo ipv6}
${lib.optionalString (allowInterfaces!=null) "allow-interfaces=${lib.concatStringsSep "," allowInterfaces}"}
${lib.optionalString (denyInterfaces!=null) "deny-interfaces=${lib.concatStringsSep "," denyInterfaces}"}
${lib.optionalString (domainName!=null) "domain-name=${domainName}"}
allow-point-to-point=${yesNo allowPointToPoint}
${lib.optionalString (cacheEntriesMax!=null) "cache-entries-max=${toString cacheEntriesMax}"}
avahiDaemonConf =
with cfg;
pkgs.writeText "avahi-daemon.conf" ''
[server]
${
# Users can set `networking.hostName' to the empty string, when getting
# a host name from DHCP. In that case, let Avahi take whatever the
# current host name is; setting `host-name' to the empty string in
# `avahi-daemon.conf' would be invalid.
lib.optionalString (hostName != "") "host-name=${hostName}"
}
browse-domains=${lib.concatStringsSep ", " browseDomains}
use-ipv4=${yesNo ipv4}
use-ipv6=${yesNo ipv6}
${lib.optionalString (
allowInterfaces != null
) "allow-interfaces=${lib.concatStringsSep "," allowInterfaces}"}
${lib.optionalString (
denyInterfaces != null
) "deny-interfaces=${lib.concatStringsSep "," denyInterfaces}"}
${lib.optionalString (domainName != null) "domain-name=${domainName}"}
allow-point-to-point=${yesNo allowPointToPoint}
${lib.optionalString (cacheEntriesMax != null) "cache-entries-max=${toString cacheEntriesMax}"}
[wide-area]
enable-wide-area=${yesNo wideArea}
[wide-area]
enable-wide-area=${yesNo wideArea}
[publish]
disable-publishing=${yesNo (!publish.enable)}
disable-user-service-publishing=${yesNo (!publish.userServices)}
publish-addresses=${yesNo (publish.userServices || publish.addresses)}
publish-hinfo=${yesNo publish.hinfo}
publish-workstation=${yesNo publish.workstation}
publish-domain=${yesNo publish.domain}
[publish]
disable-publishing=${yesNo (!publish.enable)}
disable-user-service-publishing=${yesNo (!publish.userServices)}
publish-addresses=${yesNo (publish.userServices || publish.addresses)}
publish-hinfo=${yesNo publish.hinfo}
publish-workstation=${yesNo publish.workstation}
publish-domain=${yesNo publish.domain}
[reflector]
enable-reflector=${yesNo reflector}
${extraConfig}
'';
[reflector]
enable-reflector=${yesNo reflector}
${extraConfig}
'';
in
{
imports = [
(lib.mkRenamedOptionModule [ "services" "avahi" "interfaces" ] [ "services" "avahi" "allowInterfaces" ])
(lib.mkRenamedOptionModule
[ "services" "avahi" "interfaces" ]
[ "services" "avahi" "allowInterfaces" ]
)
(lib.mkRenamedOptionModule [ "services" "avahi" "nssmdns" ] [ "services" "avahi" "nssmdns4" ])
];
@ -77,7 +93,10 @@ in
browseDomains = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
example = [ "0pointer.de" "zeroconf.org" ];
example = [
"0pointer.de"
"zeroconf.org"
];
description = ''
List of non-local DNS domains to be browsed.
'';
@ -270,28 +289,35 @@ in
users.groups.avahi = { };
system.nssModules = lib.optional (cfg.nssmdns4 || cfg.nssmdns6) pkgs.nssmdns;
system.nssDatabases.hosts = let
mdns = if (cfg.nssmdns4 && cfg.nssmdns6) then
"mdns"
else if (!cfg.nssmdns4 && cfg.nssmdns6) then
"mdns6"
else if (cfg.nssmdns4 && !cfg.nssmdns6) then
"mdns4"
else
"";
in lib.optionals (cfg.nssmdns4 || cfg.nssmdns6) (lib.mkMerge [
(lib.mkBefore [ "${mdns}_minimal [NOTFOUND=return]" ]) # before resolve
(lib.mkAfter [ "${mdns}" ]) # after dns
]);
system.nssDatabases.hosts =
let
mdns =
if (cfg.nssmdns4 && cfg.nssmdns6) then
"mdns"
else if (!cfg.nssmdns4 && cfg.nssmdns6) then
"mdns6"
else if (cfg.nssmdns4 && !cfg.nssmdns6) then
"mdns4"
else
"";
in
lib.optionals (cfg.nssmdns4 || cfg.nssmdns6) (
lib.mkMerge [
(lib.mkBefore [ "${mdns}_minimal [NOTFOUND=return]" ]) # before resolve
(lib.mkAfter [ "${mdns}" ]) # after dns
]
);
environment.systemPackages = [ cfg.package ];
environment.etc = (lib.mapAttrs'
(n: v: lib.nameValuePair
"avahi/services/${n}.service"
{ ${if lib.types.path.check v then "source" else "text"} = v; }
)
cfg.extraServiceFiles);
environment.etc = (
lib.mapAttrs' (
n: v:
lib.nameValuePair "avahi/services/${n}.service" {
${if lib.types.path.check v then "source" else "text"} = v;
}
) cfg.extraServiceFiles
);
systemd.sockets.avahi-daemon = {
description = "Avahi mDNS/DNS-SD Stack Activation Socket";
@ -316,7 +342,10 @@ in
# return a sensible value.
environment.LD_LIBRARY_PATH = config.system.nssModules.path;
path = [ pkgs.coreutils cfg.package ];
path = [
pkgs.coreutils
cfg.package
];
serviceConfig = {
NotifyAccess = "main";

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.bind;
@ -7,97 +12,114 @@ let
bindUser = "named";
bindZoneCoerce = list: builtins.listToAttrs (lib.forEach list (zone: { name = zone.name; value = zone; }));
bindZoneCoerce =
list:
builtins.listToAttrs (
lib.forEach list (zone: {
name = zone.name;
value = zone;
})
);
bindZoneOptions = { name, config, ... }: {
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
description = "Name of the zone.";
};
master = lib.mkOption {
description = "Master=false means slave server";
type = lib.types.bool;
};
file = lib.mkOption {
type = lib.types.either lib.types.str lib.types.path;
description = "Zone file resource records contain columns of data, separated by whitespace, that define the record.";
};
masters = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "List of servers for inclusion in stub and secondary zones.";
};
slaves = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "Addresses who may request zone transfers.";
default = [ ];
};
allowQuery = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
List of address ranges allowed to query this zone. Instead of the address(es), this may instead
contain the single string "any".
'';
default = [ "any" ];
};
extraConfig = lib.mkOption {
type = lib.types.lines;
description = "Extra zone config to be appended at the end of the zone section.";
default = "";
bindZoneOptions =
{ name, config, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
description = "Name of the zone.";
};
master = lib.mkOption {
description = "Master=false means slave server";
type = lib.types.bool;
};
file = lib.mkOption {
type = lib.types.either lib.types.str lib.types.path;
description = "Zone file resource records contain columns of data, separated by whitespace, that define the record.";
};
masters = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "List of servers for inclusion in stub and secondary zones.";
};
slaves = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "Addresses who may request zone transfers.";
default = [ ];
};
allowQuery = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
List of address ranges allowed to query this zone. Instead of the address(es), this may instead
contain the single string "any".
'';
default = [ "any" ];
};
extraConfig = lib.mkOption {
type = lib.types.lines;
description = "Extra zone config to be appended at the end of the zone section.";
default = "";
};
};
};
};
confFile = pkgs.writeText "named.conf"
''
include "/etc/bind/rndc.key";
controls {
inet 127.0.0.1 allow {localhost;} keys {"rndc-key";};
};
confFile = pkgs.writeText "named.conf" ''
include "/etc/bind/rndc.key";
controls {
inet 127.0.0.1 allow {localhost;} keys {"rndc-key";};
};
acl cachenetworks { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.cacheNetworks} };
acl badnetworks { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.blockedNetworks} };
acl cachenetworks { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.cacheNetworks} };
acl badnetworks { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.blockedNetworks} };
options {
listen-on { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.listenOn} };
listen-on-v6 { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.listenOnIpv6} };
allow-query-cache { cachenetworks; };
blackhole { badnetworks; };
forward ${cfg.forward};
forwarders { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.forwarders} };
directory "${cfg.directory}";
pid-file "/run/named/named.pid";
${cfg.extraOptions}
};
options {
listen-on { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.listenOn} };
listen-on-v6 { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.listenOnIpv6} };
allow-query-cache { cachenetworks; };
blackhole { badnetworks; };
forward ${cfg.forward};
forwarders { ${lib.concatMapStrings (entry: " ${entry}; ") cfg.forwarders} };
directory "${cfg.directory}";
pid-file "/run/named/named.pid";
${cfg.extraOptions}
};
${cfg.extraConfig}
${cfg.extraConfig}
${ lib.concatMapStrings
({ name, file, master ? true, slaves ? [], masters ? [], allowQuery ? [], extraConfig ? "" }:
''
zone "${name}" {
type ${if master then "master" else "slave"};
file "${file}";
${ if master then
''
allow-transfer {
${lib.concatMapStrings (ip: "${ip};\n") slaves}
};
''
else
''
masters {
${lib.concatMapStrings (ip: "${ip};\n") masters}
};
''
}
allow-query { ${lib.concatMapStrings (ip: "${ip}; ") allowQuery}};
${extraConfig}
};
'')
(lib.attrValues cfg.zones) }
'';
${lib.concatMapStrings (
{
name,
file,
master ? true,
slaves ? [ ],
masters ? [ ],
allowQuery ? [ ],
extraConfig ? "",
}:
''
zone "${name}" {
type ${if master then "master" else "slave"};
file "${file}";
${
if master then
''
allow-transfer {
${lib.concatMapStrings (ip: "${ip};\n") slaves}
};
''
else
''
masters {
${lib.concatMapStrings (ip: "${ip};\n") masters}
};
''
}
allow-query { ${lib.concatMapStrings (ip: "${ip}; ") allowQuery}};
${extraConfig}
};
''
) (lib.attrValues cfg.zones)}
'';
in
@ -111,11 +133,13 @@ in
enable = lib.mkEnableOption "BIND domain name server";
package = lib.mkPackageOption pkgs "bind" { };
cacheNetworks = lib.mkOption {
default = [ "127.0.0.0/24" "::1/128" ];
default = [
"127.0.0.0/24"
"::1/128"
];
type = lib.types.listOf lib.types.str;
description = ''
What networks are allowed to use us as a resolver. Note
@ -155,7 +179,10 @@ in
forward = lib.mkOption {
default = "first";
type = lib.types.enum ["first" "only"];
type = lib.types.enum [
"first"
"only"
];
description = ''
Whether to forward 'first' (try forwarding but lookup directly if forwarding fails) or 'only'.
'';
@ -185,7 +212,9 @@ in
zones = lib.mkOption {
default = [ ];
type = with lib.types; coercedTo (listOf attrs) bindZoneCoerce (attrsOf (lib.types.submodule bindZoneOptions));
type =
with lib.types;
coercedTo (listOf attrs) bindZoneCoerce (attrsOf (lib.types.submodule bindZoneOptions));
description = ''
List of zones we claim authority over.
'';
@ -231,20 +260,18 @@ in
};
###### implementation
config = lib.mkIf cfg.enable {
networking.resolvconf.useLocalResolver = lib.mkDefault true;
users.users.${bindUser} =
{
group = bindUser;
description = "BIND daemon user";
isSystemUser = true;
};
users.groups.${bindUser} = {};
users.users.${bindUser} = {
group = bindUser;
description = "BIND daemon user";
isSystemUser = true;
};
users.groups.${bindUser} = { };
systemd.tmpfiles.settings."bind" = lib.mkIf (cfg.directory != "/run/named") {
${cfg.directory} = {
@ -276,7 +303,9 @@ in
RuntimeDirectoryPreserve = "yes";
ConfigurationDirectory = "bind";
ReadWritePaths = [
(lib.mapAttrsToList (name: config: if (lib.hasPrefix "/" config.file) then ("-${dirOf config.file}") else "") cfg.zones)
(lib.mapAttrsToList (
name: config: if (lib.hasPrefix "/" config.file) then ("-${dirOf config.file}") else ""
) cfg.zones)
cfg.directory
];
CapabilityBoundingSet = "CAP_NET_BIND_SERVICE";

View file

@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.ddclient;
boolToStr = bool: if bool then "yes" else "no";
@ -14,11 +19,16 @@ let
${lib.optionalString (cfg.use == "" && cfg.usev4 != "") "usev4=${cfg.usev4}"}
${lib.optionalString (cfg.use == "" && cfg.usev6 != "") "usev6=${cfg.usev6}"}
login=${cfg.username}
password=${if cfg.protocol == "nsupdate" then "/run/${RuntimeDirectory}/ddclient.key" else "@password_placeholder@"}
password=${
if cfg.protocol == "nsupdate" then
"/run/${RuntimeDirectory}/ddclient.key"
else
"@password_placeholder@"
}
protocol=${cfg.protocol}
${lib.optionalString (cfg.script != "") "script=${cfg.script}"}
${lib.optionalString (cfg.server != "") "server=${cfg.server}"}
${lib.optionalString (cfg.zone != "") "zone=${cfg.zone}"}
${lib.optionalString (cfg.zone != "") "zone=${cfg.zone}"}
ssl=${boolToStr cfg.ssl}
wildcard=YES
quiet=${boolToStr cfg.quiet}
@ -30,24 +40,38 @@ let
preStart = ''
install --mode=600 --owner=$USER ${configFile} /run/${RuntimeDirectory}/ddclient.conf
${lib.optionalString (cfg.configFile == null) (if (cfg.protocol == "nsupdate") then ''
install --mode=600 --owner=$USER ${cfg.passwordFile} /run/${RuntimeDirectory}/ddclient.key
'' else if (cfg.passwordFile != null) then ''
"${pkgs.replace-secret}/bin/replace-secret" "@password_placeholder@" "${cfg.passwordFile}" "/run/${RuntimeDirectory}/ddclient.conf"
'' else ''
sed -i '/^password=@password_placeholder@$/d' /run/${RuntimeDirectory}/ddclient.conf
'')}
${lib.optionalString (cfg.configFile == null) (
if (cfg.protocol == "nsupdate") then
''
install --mode=600 --owner=$USER ${cfg.passwordFile} /run/${RuntimeDirectory}/ddclient.key
''
else if (cfg.passwordFile != null) then
''
"${pkgs.replace-secret}/bin/replace-secret" "@password_placeholder@" "${cfg.passwordFile}" "/run/${RuntimeDirectory}/ddclient.conf"
''
else
''
sed -i '/^password=@password_placeholder@$/d' /run/${RuntimeDirectory}/ddclient.conf
''
)}
'';
in
{
imports = [
(lib.mkChangedOptionModule [ "services" "ddclient" "domain" ] [ "services" "ddclient" "domains" ]
(config:
let value = lib.getAttrFromPath [ "services" "ddclient" "domain" ] config;
in lib.optional (value != "") value))
(lib.mkChangedOptionModule [ "services" "ddclient" "domain" ] [ "services" "ddclient" "domains" ] (
config:
let
value = lib.getAttrFromPath [ "services" "ddclient" "domain" ] config;
in
lib.optional (value != "") value
))
(lib.mkRemovedOptionModule [ "services" "ddclient" "homeDir" ] "")
(lib.mkRemovedOptionModule [ "services" "ddclient" "password" ] "Use services.ddclient.passwordFile instead.")
(lib.mkRemovedOptionModule [
"services"
"ddclient"
"password"
] "Use services.ddclient.passwordFile instead.")
(lib.mkRemovedOptionModule [ "services" "ddclient" "ipv6" ] "")
];
@ -84,7 +108,9 @@ in
username = lib.mkOption {
# For `nsupdate` username contains the path to the nsupdate executable
default = lib.optionalString (config.services.ddclient.protocol == "nsupdate") "${pkgs.bind.dnsutils}/bin/nsupdate";
default = lib.optionalString (
config.services.ddclient.protocol == "nsupdate"
) "${pkgs.bind.dnsutils}/bin/nsupdate";
defaultText = "";
type = str;
description = ''
@ -211,18 +237,21 @@ in
};
};
###### implementation
config = lib.mkIf config.services.ddclient.enable {
warnings = lib.optional (cfg.use != "") "Setting `use` is deprecated, ddclient now supports `usev4` and `usev6` for separate IPv4/IPv6 configuration.";
warnings =
lib.optional (cfg.use != "")
"Setting `use` is deprecated, ddclient now supports `usev4` and `usev6` for separate IPv4/IPv6 configuration.";
systemd.services.ddclient = {
description = "Dynamic DNS Client";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
restartTriggers = lib.optional (cfg.configFile != null) cfg.configFile;
path = lib.optional (lib.hasPrefix "if," cfg.use || lib.hasPrefix "ifv4," cfg.usev4 || lib.hasPrefix "ifv6," cfg.usev6) pkgs.iproute2;
path = lib.optional (
lib.hasPrefix "if," cfg.use || lib.hasPrefix "ifv4," cfg.usev4 || lib.hasPrefix "ifv6," cfg.usev6
) pkgs.iproute2;
serviceConfig = {
DynamicUser = true;

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
dhcpcd = if !config.boot.isContainer then pkgs.dhcpcd else pkgs.dhcpcd.override { udev = null; };
@ -7,86 +12,109 @@ let
interfaces = lib.attrValues config.networking.interfaces;
enableDHCP = config.networking.dhcpcd.enable &&
(config.networking.useDHCP || lib.any (i: i.useDHCP == true) interfaces);
enableDHCP =
config.networking.dhcpcd.enable
&& (config.networking.useDHCP || lib.any (i: i.useDHCP == true) interfaces);
useResolvConf = config.networking.resolvconf.enable;
# Don't start dhcpcd on explicitly configured interfaces or on
# interfaces that are part of a bridge, bond or sit device.
ignoredInterfaces =
map (i: i.name) (lib.filter (i: if i.useDHCP != null then !i.useDHCP else i.ipv4.addresses != [ ]) interfaces)
map (i: i.name) (
lib.filter (i: if i.useDHCP != null then !i.useDHCP else i.ipv4.addresses != [ ]) interfaces
)
++ lib.mapAttrsToList (i: _: i) config.networking.sits
++ lib.concatLists (lib.attrValues (lib.mapAttrs (n: v: v.interfaces) config.networking.bridges))
++ lib.flatten (lib.concatMap (i: lib.attrNames (lib.filterAttrs (_: config: config.type != "internal") i.interfaces)) (lib.attrValues config.networking.vswitches))
++ lib.flatten (
lib.concatMap (
i: lib.attrNames (lib.filterAttrs (_: config: config.type != "internal") i.interfaces)
) (lib.attrValues config.networking.vswitches)
)
++ lib.concatLists (lib.attrValues (lib.mapAttrs (n: v: v.interfaces) config.networking.bonds))
++ config.networking.dhcpcd.denyInterfaces;
arrayAppendOrNull = a1: a2: if a1 == null && a2 == null then null
else if a1 == null then a2 else if a2 == null then a1
else a1 ++ a2;
arrayAppendOrNull =
a1: a2:
if a1 == null && a2 == null then
null
else if a1 == null then
a2
else if a2 == null then
a1
else
a1 ++ a2;
# If dhcp is disabled but explicit interfaces are enabled,
# we need to provide dhcp just for those interfaces.
allowInterfaces = arrayAppendOrNull cfg.allowInterfaces
(if !config.networking.useDHCP && enableDHCP then
map (i: i.name) (lib.filter (i: i.useDHCP == true) interfaces) else null);
allowInterfaces = arrayAppendOrNull cfg.allowInterfaces (
if !config.networking.useDHCP && enableDHCP then
map (i: i.name) (lib.filter (i: i.useDHCP == true) interfaces)
else
null
);
staticIPv6Addresses = map (i: i.name) (lib.filter (i: i.ipv6.addresses != [ ]) interfaces);
noIPv6rs = lib.concatStringsSep "\n" (map (name: ''
interface ${name}
noipv6rs
'') staticIPv6Addresses);
noIPv6rs = lib.concatStringsSep "\n" (
map (name: ''
interface ${name}
noipv6rs
'') staticIPv6Addresses
);
# Config file adapted from the one that ships with dhcpcd.
dhcpcdConf = pkgs.writeText "dhcpcd.conf"
''
# Inform the DHCP server of our hostname for DDNS.
hostname
dhcpcdConf = pkgs.writeText "dhcpcd.conf" ''
# Inform the DHCP server of our hostname for DDNS.
hostname
# A list of options to request from the DHCP server.
option domain_name_servers, domain_name, domain_search, host_name
option classless_static_routes, ntp_servers, interface_mtu
# A list of options to request from the DHCP server.
option domain_name_servers, domain_name, domain_search, host_name
option classless_static_routes, ntp_servers, interface_mtu
# A ServerID is required by RFC2131.
# Commented out because of many non-compliant DHCP servers in the wild :(
#require dhcp_server_identifier
# A ServerID is required by RFC2131.
# Commented out because of many non-compliant DHCP servers in the wild :(
#require dhcp_server_identifier
# A hook script is provided to lookup the hostname if not set by
# the DHCP server, but it should not be run by default.
nohook lookup-hostname
# A hook script is provided to lookup the hostname if not set by
# the DHCP server, but it should not be run by default.
nohook lookup-hostname
# Ignore peth* devices; on Xen, they're renamed physical
# Ethernet cards used for bridging. Likewise for vif* and tap*
# (Xen) and virbr* and vnet* (libvirt).
denyinterfaces ${toString ignoredInterfaces} lo peth* vif* tap* tun* virbr* vnet* vboxnet* sit*
# Ignore peth* devices; on Xen, they're renamed physical
# Ethernet cards used for bridging. Likewise for vif* and tap*
# (Xen) and virbr* and vnet* (libvirt).
denyinterfaces ${toString ignoredInterfaces} lo peth* vif* tap* tun* virbr* vnet* vboxnet* sit*
# Use the list of allowed interfaces if specified
${lib.optionalString (allowInterfaces != null) "allowinterfaces ${toString allowInterfaces}"}
# Use the list of allowed interfaces if specified
${lib.optionalString (allowInterfaces != null) "allowinterfaces ${toString allowInterfaces}"}
# Immediately fork to background if specified, otherwise wait for IP address to be assigned
${{
# Immediately fork to background if specified, otherwise wait for IP address to be assigned
${
{
background = "background";
any = "waitip";
ipv4 = "waitip 4";
ipv6 = "waitip 6";
both = "waitip 4\nwaitip 6";
if-carrier-up = "";
}.${cfg.wait}}
}
.${cfg.wait}
}
${lib.optionalString (config.networking.enableIPv6 == false) ''
# Don't solicit or accept IPv6 Router Advertisements and DHCPv6 if disabled IPv6
noipv6
''}
${lib.optionalString (config.networking.enableIPv6 == false) ''
# Don't solicit or accept IPv6 Router Advertisements and DHCPv6 if disabled IPv6
noipv6
''}
${lib.optionalString (config.networking.enableIPv6 && cfg.IPv6rs == null && staticIPv6Addresses != [ ]) noIPv6rs}
${lib.optionalString (config.networking.enableIPv6 && cfg.IPv6rs == false) ''
noipv6rs
''}
${lib.optionalString (
config.networking.enableIPv6 && cfg.IPv6rs == null && staticIPv6Addresses != [ ]
) noIPv6rs}
${lib.optionalString (config.networking.enableIPv6 && cfg.IPv6rs == false) ''
noipv6rs
''}
${cfg.extraConfig}
'';
${cfg.extraConfig}
'';
in
@ -109,22 +137,22 @@ in
type = lib.types.bool;
default = false;
description = ''
Whenever to leave interfaces configured on dhcpcd daemon
shutdown. Set to true if you have your root or store mounted
over the network or this machine accepts SSH connections
through DHCP interfaces and clients should be notified when
it shuts down.
Whenever to leave interfaces configured on dhcpcd daemon
shutdown. Set to true if you have your root or store mounted
over the network or this machine accepts SSH connections
through DHCP interfaces and clients should be notified when
it shuts down.
'';
};
networking.dhcpcd.denyInterfaces = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [];
default = [ ];
description = ''
Disable the DHCP client for any interface whose name matches
any of the shell glob patterns in this list. The purpose of
this option is to blacklist virtual interfaces such as those
created by Xen, libvirt, LXC, etc.
Disable the DHCP client for any interface whose name matches
any of the shell glob patterns in this list. The purpose of
this option is to blacklist virtual interfaces such as those
created by Xen, libvirt, LXC, etc.
'';
};
@ -132,10 +160,10 @@ in
type = lib.types.nullOr (lib.types.listOf lib.types.str);
default = null;
description = ''
Enable the DHCP client for any interface whose name matches
any of the shell glob patterns in this list. Any interface not
explicitly matched by this pattern will be denied. This pattern only
applies when non-null.
Enable the DHCP client for any interface whose name matches
any of the shell glob patterns in this list. Any interface not
explicitly matched by this pattern will be denied. This pattern only
applies when non-null.
'';
};
@ -143,7 +171,7 @@ in
type = lib.types.lines;
default = "";
description = ''
Literal string to append to the config file generated for dhcpcd.
Literal string to append to the config file generated for dhcpcd.
'';
};
@ -162,26 +190,33 @@ in
default = "";
example = "if [[ $reason =~ BOUND ]]; then echo $interface: Routers are $new_routers - were $old_routers; fi";
description = ''
Shell code that will be run after all other hooks. See
`man dhcpcd-run-hooks` for details on what is possible.
Shell code that will be run after all other hooks. See
`man dhcpcd-run-hooks` for details on what is possible.
::: {.note}
To use sudo or similar tools in your script you may have to set:
::: {.note}
To use sudo or similar tools in your script you may have to set:
systemd.services.dhcpcd.serviceConfig.NoNewPrivileges = false;
systemd.services.dhcpcd.serviceConfig.NoNewPrivileges = false;
In addition, as most of the filesystem is inaccessible to dhcpcd
by default, you may want to define some exceptions, e.g.
In addition, as most of the filesystem is inaccessible to dhcpcd
by default, you may want to define some exceptions, e.g.
systemd.services.dhcpcd.serviceConfig.ReadOnlyPaths = [
"/run/user/1000/bus" # to send desktop notifications
];
:::
systemd.services.dhcpcd.serviceConfig.ReadOnlyPaths = [
"/run/user/1000/bus" # to send desktop notifications
];
:::
'';
};
networking.dhcpcd.wait = lib.mkOption {
type = lib.types.enum [ "background" "any" "ipv4" "ipv6" "both" "if-carrier-up" ];
type = lib.types.enum [
"background"
"any"
"ipv4"
"ipv6"
"both"
"if-carrier-up"
];
default = "any";
description = ''
This option specifies when the dhcpcd service will fork to background.
@ -197,22 +232,27 @@ in
};
###### implementation
config = lib.mkIf enableDHCP {
systemd.services.dhcpcd = let
cfgN = config.networking;
hasDefaultGatewaySet = (cfgN.defaultGateway != null && cfgN.defaultGateway.address != "")
&& (!cfgN.enableIPv6 || (cfgN.defaultGateway6 != null && cfgN.defaultGateway6.address != ""));
in
{ description = "DHCP Client";
systemd.services.dhcpcd =
let
cfgN = config.networking;
hasDefaultGatewaySet =
(cfgN.defaultGateway != null && cfgN.defaultGateway.address != "")
&& (!cfgN.enableIPv6 || (cfgN.defaultGateway6 != null && cfgN.defaultGateway6.address != ""));
in
{
description = "DHCP Client";
documentation = [ "man:dhcpcd(8)" ];
wantedBy = [ "multi-user.target" ] ++ lib.optional (!hasDefaultGatewaySet) "network-online.target";
wants = [ "network.target" "resolvconf.service" ];
wants = [
"network.target"
"resolvconf.service"
];
after = [ "resolvconf.service" ];
before = [ "network-online.target" ];
@ -223,67 +263,93 @@ in
# dhcpcd. So do a "systemctl restart" instead.
stopIfChanged = false;
path = [ dhcpcd pkgs.nettools config.networking.resolvconf.package ];
path = [
dhcpcd
pkgs.nettools
config.networking.resolvconf.package
];
unitConfig.ConditionCapability = "CAP_NET_ADMIN";
serviceConfig =
{ Type = "forking";
PIDFile = "/run/dhcpcd/pid";
SupplementaryGroups = lib.optional useResolvConf "resolvconf";
User = "dhcpcd";
Group = "dhcpcd";
StateDirectory = "dhcpcd";
RuntimeDirectory = "dhcpcd";
serviceConfig = {
Type = "forking";
PIDFile = "/run/dhcpcd/pid";
SupplementaryGroups = lib.optional useResolvConf "resolvconf";
User = "dhcpcd";
Group = "dhcpcd";
StateDirectory = "dhcpcd";
RuntimeDirectory = "dhcpcd";
ExecStartPre = "+${pkgs.writeShellScript "migrate-dhcpcd" ''
# migrate from old database directory
if test -f /var/db/dhcpcd/duid; then
echo 'migrating DHCP leases from /var/db/dhcpcd to /var/lib/dhcpcd ...'
mv /var/db/dhcpcd/* -t /var/lib/dhcpcd
chown dhcpcd:dhcpcd /var/lib/dhcpcd/*
rmdir /var/db/dhcpcd || true
echo done
fi
''}";
ExecStartPre = "+${pkgs.writeShellScript "migrate-dhcpcd" ''
# migrate from old database directory
if test -f /var/db/dhcpcd/duid; then
echo 'migrating DHCP leases from /var/db/dhcpcd to /var/lib/dhcpcd ...'
mv /var/db/dhcpcd/* -t /var/lib/dhcpcd
chown dhcpcd:dhcpcd /var/lib/dhcpcd/*
rmdir /var/db/dhcpcd || true
echo done
fi
''}";
ExecStart = "@${dhcpcd}/sbin/dhcpcd dhcpcd --quiet ${lib.optionalString cfg.persistent "--persistent"} --config ${dhcpcdConf}";
ExecReload = "${dhcpcd}/sbin/dhcpcd --rebind";
Restart = "always";
AmbientCapabilities = [ "CAP_NET_ADMIN" "CAP_NET_RAW" "CAP_NET_BIND_SERVICE" ];
CapabilityBoundingSet = [ "CAP_NET_ADMIN" "CAP_NET_RAW" "CAP_NET_BIND_SERVICE" ];
ReadWritePaths = [ "/proc/sys/net/ipv4" ]
++ lib.optional cfgN.enableIPv6 "/proc/sys/net/ipv6"
++ lib.optionals useResolvConf ([ "/run/resolvconf" ] ++ config.networking.resolvconf.subscriberFiles);
DeviceAllow = "";
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = lib.mkDefault true; # may be disabled for sudo in runHook
PrivateDevices = true;
PrivateMounts = true;
PrivateTmp = true;
PrivateUsers = false;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = "tmpfs"; # allow exceptions to be added to ReadOnlyPaths, etc.
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
RemoveIPC = true;
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" "AF_PACKET" ];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallFilter = [
"@system-service"
"~@aio" "~@keyring" "~@memlock" "~@mount" "~@privileged" "~@resources"
];
SystemCallArchitectures = "native";
UMask = "0027";
};
ExecStart = "@${dhcpcd}/sbin/dhcpcd dhcpcd --quiet ${lib.optionalString cfg.persistent "--persistent"} --config ${dhcpcdConf}";
ExecReload = "${dhcpcd}/sbin/dhcpcd --rebind";
Restart = "always";
AmbientCapabilities = [
"CAP_NET_ADMIN"
"CAP_NET_RAW"
"CAP_NET_BIND_SERVICE"
];
CapabilityBoundingSet = [
"CAP_NET_ADMIN"
"CAP_NET_RAW"
"CAP_NET_BIND_SERVICE"
];
ReadWritePaths =
[ "/proc/sys/net/ipv4" ]
++ lib.optional cfgN.enableIPv6 "/proc/sys/net/ipv6"
++ lib.optionals useResolvConf (
[ "/run/resolvconf" ] ++ config.networking.resolvconf.subscriberFiles
);
DeviceAllow = "";
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = lib.mkDefault true; # may be disabled for sudo in runHook
PrivateDevices = true;
PrivateMounts = true;
PrivateTmp = true;
PrivateUsers = false;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = "tmpfs"; # allow exceptions to be added to ReadOnlyPaths, etc.
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
RemoveIPC = true;
RestrictAddressFamilies = [
"AF_UNIX"
"AF_INET"
"AF_INET6"
"AF_NETLINK"
"AF_PACKET"
];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallFilter = [
"@system-service"
"~@aio"
"~@keyring"
"~@memlock"
"~@mount"
"~@privileged"
"~@resources"
];
SystemCallArchitectures = "native";
UMask = "0027";
};
};
# Note: the service could run with `DynamicUser`, however that makes
@ -294,17 +360,16 @@ in
isSystemUser = true;
group = "dhcpcd";
};
users.groups.dhcpcd = {};
users.groups.dhcpcd = { };
environment.systemPackages = [ dhcpcd ];
environment.etc."dhcpcd.exit-hook".text = cfg.runHook;
powerManagement.resumeCommands = lib.mkIf config.systemd.services.dhcpcd.enable
''
# Tell dhcpcd to rebind its interfaces if it's running.
/run/current-system/systemd/bin/systemctl reload dhcpcd.service
'';
powerManagement.resumeCommands = lib.mkIf config.systemd.services.dhcpcd.enable ''
# Tell dhcpcd to rebind its interfaces if it's running.
/run/current-system/systemd/bin/systemctl reload dhcpcd.service
'';
security.polkit.extraConfig = lib.mkIf config.services.resolved.enable ''
polkit.addRule(function(action, subject) {

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.dnsmasq;
dnsmasq = cfg.package;
@ -9,11 +14,12 @@ let
# lib.mkForce)
formatKeyValue =
name: value:
if value == true
then name
else if value == false
then "# setting `${name}` explicitly set to false"
else lib.generators.mkKeyValueDefault { } "=" name value;
if value == true then
name
else if value == false then
"# setting `${name}` explicitly set to false"
else
lib.generators.mkKeyValueDefault { } "=" name value;
settingsFormat = pkgs.formats.keyValue {
mkKeyValue = formatKeyValue;
@ -27,8 +33,15 @@ in
{
imports = [
(lib.mkRenamedOptionModule [ "services" "dnsmasq" "servers" ] [ "services" "dnsmasq" "settings" "server" ])
(lib.mkRemovedOptionModule [ "services" "dnsmasq" "extraConfig" ] "This option has been replaced by `services.dnsmasq.settings`")
(lib.mkRenamedOptionModule
[ "services" "dnsmasq" "servers" ]
[ "services" "dnsmasq" "settings" "server" ]
)
(lib.mkRemovedOptionModule [
"services"
"dnsmasq"
"extraConfig"
] "This option has been replaced by `services.dnsmasq.settings`")
];
###### interface
@ -45,7 +58,7 @@ in
'';
};
package = lib.mkPackageOption pkgs "dnsmasq" {};
package = lib.mkPackageOption pkgs "dnsmasq" { };
resolveLocalQueries = lib.mkOption {
type = lib.types.bool;
@ -72,7 +85,10 @@ in
options.server = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
example = [ "8.8.8.8" "8.8.4.4" ];
example = [
"8.8.8.8"
"8.8.4.4"
];
description = ''
The DNS servers which dnsmasq should query.
'';
@ -103,7 +119,6 @@ in
};
###### implementation
config = lib.mkIf cfg.enable {
@ -114,8 +129,7 @@ in
resolv-file = lib.mkDefault (lib.optional cfg.resolveLocalQueries "/etc/dnsmasq-resolv.conf");
};
networking.nameservers =
lib.optional cfg.resolveLocalQueries "127.0.0.1";
networking.nameservers = lib.optional cfg.resolveLocalQueries "127.0.0.1";
services.dbus.packages = [ dnsmasq ];
@ -124,7 +138,7 @@ in
group = "dnsmasq";
description = "Dnsmasq daemon user";
};
users.groups.dnsmasq = {};
users.groups.dnsmasq = { };
networking.resolvconf = lib.mkIf cfg.resolveLocalQueries {
useLocalResolver = lib.mkDefault true;
@ -141,28 +155,31 @@ in
};
systemd.services.dnsmasq = {
description = "Dnsmasq Daemon";
after = [ "network.target" "systemd-resolved.service" ];
wantedBy = [ "multi-user.target" ];
path = [ dnsmasq ];
preStart = ''
mkdir -m 755 -p ${stateDir}
touch ${stateDir}/dnsmasq.leases
chown -R dnsmasq ${stateDir}
${lib.optionalString cfg.resolveLocalQueries "touch /etc/dnsmasq-{conf,resolv}.conf"}
dnsmasq --test
'';
serviceConfig = {
Type = "dbus";
BusName = "uk.org.thekelleys.dnsmasq";
ExecStart = "${dnsmasq}/bin/dnsmasq -k --enable-dbus --user=dnsmasq -C ${dnsmasqConf}";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
PrivateTmp = true;
ProtectSystem = true;
ProtectHome = true;
Restart = if cfg.alwaysKeepRunning then "always" else "on-failure";
};
restartTriggers = [ config.environment.etc.hosts.source ];
description = "Dnsmasq Daemon";
after = [
"network.target"
"systemd-resolved.service"
];
wantedBy = [ "multi-user.target" ];
path = [ dnsmasq ];
preStart = ''
mkdir -m 755 -p ${stateDir}
touch ${stateDir}/dnsmasq.leases
chown -R dnsmasq ${stateDir}
${lib.optionalString cfg.resolveLocalQueries "touch /etc/dnsmasq-{conf,resolv}.conf"}
dnsmasq --test
'';
serviceConfig = {
Type = "dbus";
BusName = "uk.org.thekelleys.dnsmasq";
ExecStart = "${dnsmasq}/bin/dnsmasq -k --enable-dbus --user=dnsmasq -C ${dnsmasqConf}";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
PrivateTmp = true;
ProtectSystem = true;
ProtectHome = true;
Restart = if cfg.alwaysKeepRunning then "always" else "on-failure";
};
restartTriggers = [ config.environment.etc.hosts.source ];
};
};

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.ejabberd;
@ -8,11 +13,14 @@ let
${cfg.ctlConfig}
'';
ectl = ''${cfg.package}/bin/ejabberdctl ${lib.optionalString (cfg.configFile != null) "--config ${cfg.configFile}"} --ctl-config "${ctlcfg}" --spool "${cfg.spoolDir}" --logs "${cfg.logsDir}"'';
ectl = ''${cfg.package}/bin/ejabberdctl ${
lib.optionalString (cfg.configFile != null) "--config ${cfg.configFile}"
} --ctl-config "${ctlcfg}" --spool "${cfg.spoolDir}" --logs "${cfg.logsDir}"'';
dumps = lib.escapeShellArgs cfg.loadDumps;
in {
in
{
###### interface
@ -66,7 +74,7 @@ in {
loadDumps = lib.mkOption {
type = lib.types.listOf lib.types.path;
default = [];
default = [ ];
description = "Configuration dumps that should be loaded on the first startup";
example = lib.literalExpression "[ ./myejabberd.dump ]";
};
@ -80,7 +88,6 @@ in {
};
###### implementation
config = lib.mkIf cfg.enable {
@ -103,7 +110,10 @@ in {
description = "ejabberd server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
path = [ pkgs.findutils pkgs.coreutils ] ++ lib.optional cfg.imagemagick pkgs.imagemagick;
path = [
pkgs.findutils
pkgs.coreutils
] ++ lib.optional cfg.imagemagick pkgs.imagemagick;
serviceConfig = {
User = cfg.user;
@ -148,7 +158,7 @@ in {
"d '${cfg.spoolDir}' 0700 ${cfg.user} ${cfg.group} -"
];
security.pam.services.ejabberd = {};
security.pam.services.ejabberd = { };
};

View file

@ -1,43 +1,55 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.hickory-dns;
toml = pkgs.formats.toml { };
zoneType = lib.types.submodule ({ config, ... }: {
freeformType = toml.type;
options = with lib; {
zone = mkOption {
type = types.str;
description = ''
Zone name, like "example.com", "localhost", or "0.0.127.in-addr.arpa".
'';
};
zone_type = mkOption {
type = types.enum [ "Primary" "Secondary" "External" ];
default = "Primary";
description = ''
One of:
- "Primary" (the master, authority for the zone).
- "Secondary" (the slave, replicated from the primary).
- "External" (a cached zone that queries other nameservers).
zoneType = lib.types.submodule (
{ config, ... }:
{
freeformType = toml.type;
options = with lib; {
zone = mkOption {
type = types.str;
description = ''
Zone name, like "example.com", "localhost", or "0.0.127.in-addr.arpa".
'';
};
zone_type = mkOption {
type = types.enum [
"Primary"
"Secondary"
"External"
];
default = "Primary";
description = ''
One of:
- "Primary" (the master, authority for the zone).
- "Secondary" (the slave, replicated from the primary).
- "External" (a cached zone that queries other nameservers).
For more details about these zone types, consult the documentation for BIND,
though note that hickory-dns supports only a subset of BIND's zone types:
<https://bind9.readthedocs.io/en/v9_18_4/reference.html#type>
'';
For more details about these zone types, consult the documentation for BIND,
though note that hickory-dns supports only a subset of BIND's zone types:
<https://bind9.readthedocs.io/en/v9_18_4/reference.html#type>
'';
};
file = mkOption {
type = types.nullOr (types.either types.path types.str);
default = if config.zone_type != "External" then "${config.zone}.zone" else null;
defaultText = literalExpression ''if config.zone_type != "External" then "''${config.zone}.zone" else null'';
description = ''
Path to the .zone file.
If not fully-qualified, this path will be interpreted relative to the `directory` option.
If omitted, defaults to the value of the `zone` option suffixed with ".zone" when `zone_type` isn't External; otherwise, defaults to `null`.
'';
};
};
file = mkOption {
type = types.nullOr (types.either types.path types.str);
default = if config.zone_type != "External" then "${config.zone}.zone" else null;
defaultText = literalExpression ''if config.zone_type != "External" then "''${config.zone}.zone" else null'';
description = ''
Path to the .zone file.
If not fully-qualified, this path will be interpreted relative to the `directory` option.
If omitted, defaults to the value of the `zone` option suffixed with ".zone" when `zone_type` isn't External; otherwise, defaults to `null`.
'';
};
};
});
}
);
in
{
meta.maintainers = with lib.maintainers; [ colinsane ];
@ -45,7 +57,10 @@ in
imports = with lib; [
(mkRenamedOptionModule [ "services" "trust-dns" "enable" ] [ "services" "hickory-dns" "enable" ])
(mkRenamedOptionModule [ "services" "trust-dns" "package" ] [ "services" "hickory-dns" "package" ])
(mkRenamedOptionModule [ "services" "trust-dns" "settings" ] [ "services" "hickory-dns" "settings" ])
(mkRenamedOptionModule
[ "services" "trust-dns" "settings" ]
[ "services" "hickory-dns" "settings" ]
)
(mkRenamedOptionModule [ "services" "trust-dns" "quiet" ] [ "services" "hickory-dns" "quiet" ])
(mkRenamedOptionModule [ "services" "trust-dns" "debug" ] [ "services" "hickory-dns" "debug" ])
];
@ -81,7 +96,13 @@ in
configFile = mkOption {
type = types.path;
default = toml.generate "hickory-dns.toml" (
lib.mapAttrs (_: v: if builtins.isList v then map (v: if builtins.isAttrs v then lib.filterAttrs (_: v: v != null) v else v) v else v) (lib.filterAttrsRecursive (_: v: v != null) cfg.settings)
lib.mapAttrs (
_: v:
if builtins.isList v then
map (v: if builtins.isAttrs v then lib.filterAttrs (_: v: v != null) v else v) v
else
v
) (lib.filterAttrsRecursive (_: v: v != null) cfg.settings)
);
defaultText = lib.literalExpression ''
let toml = pkgs.formats.toml { }; in toml.generate "hickory-dns.toml" cfg.settings
@ -135,7 +156,7 @@ in
};
zones = mkOption {
description = "List of zones to serve.";
default = [];
default = [ ];
type = types.listOf (types.coercedTo types.str (zone: { inherit zone; }) zoneType);
};
};
@ -150,12 +171,13 @@ in
unitConfig.Documentation = "https://hickory-dns.org/";
serviceConfig = {
ExecStart =
let
flags = (lib.optional cfg.debug "--debug") ++ (lib.optional cfg.quiet "--quiet");
flagsStr = builtins.concatStringsSep " " flags;
in ''
${lib.getExe cfg.package} --config ${cfg.configFile} ${flagsStr}
'';
let
flags = (lib.optional cfg.debug "--debug") ++ (lib.optional cfg.quiet "--quiet");
flagsStr = builtins.concatStringsSep " " flags;
in
''
${lib.getExe cfg.package} --config ${cfg.configFile} ${flagsStr}
'';
Type = "simple";
Restart = "on-failure";
RestartSec = "10s";
@ -186,7 +208,11 @@ in
RestrictNamespaces = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
SystemCallFilter = [
"@system-service"
"~@privileged"
"~@resources"
];
};
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
@ -18,18 +23,20 @@ let
${cfg.extraConfig}
'';
snmpGlobalDefs = with cfg.snmp; optionalString enable (
optionalString (socket != null) "snmp_socket ${socket}\n"
+ optionalString enableKeepalived "enable_snmp_keepalived\n"
+ optionalString enableChecker "enable_snmp_checker\n"
+ optionalString enableRfc "enable_snmp_rfc\n"
+ optionalString enableRfcV2 "enable_snmp_rfcv2\n"
+ optionalString enableRfcV3 "enable_snmp_rfcv3\n"
+ optionalString enableTraps "enable_traps"
);
snmpGlobalDefs =
with cfg.snmp;
optionalString enable (
optionalString (socket != null) "snmp_socket ${socket}\n"
+ optionalString enableKeepalived "enable_snmp_keepalived\n"
+ optionalString enableChecker "enable_snmp_checker\n"
+ optionalString enableRfc "enable_snmp_rfc\n"
+ optionalString enableRfcV2 "enable_snmp_rfcv2\n"
+ optionalString enableRfcV3 "enable_snmp_rfcv3\n"
+ optionalString enableTraps "enable_traps"
);
vrrpScriptStr = concatStringsSep "\n" (map (s:
''
vrrpScriptStr = concatStringsSep "\n" (
map (s: ''
vrrp_script ${s.name} {
script "${s.script}"
interval ${toString s.interval}
@ -41,11 +48,11 @@ let
${s.extraConfig}
}
''
) vrrpScripts);
'') vrrpScripts
);
vrrpInstancesStr = concatStringsSep "\n" (map (i:
''
vrrpInstancesStr = concatStringsSep "\n" (
map (i: ''
vrrp_instance ${i.name} {
interface ${i.interface}
state ${i.state}
@ -83,10 +90,12 @@ let
${i.extraConfig}
}
''
) vrrpInstances);
'') vrrpInstances
);
virtualIpLine = ip: ip.addr
virtualIpLine =
ip:
ip.addr
+ optionalString (notNullOrEmpty ip.brd) " brd ${ip.brd}"
+ optionalString (notNullOrEmpty ip.dev) " dev ${ip.dev}"
+ optionalString (notNullOrEmpty ip.scope) " scope ${ip.scope}"
@ -94,39 +103,52 @@ let
notNullOrEmpty = s: !(s == null || s == "");
vrrpScripts = mapAttrsToList (name: config:
vrrpScripts = mapAttrsToList (
name: config:
{
inherit name;
} // config
}
// config
) cfg.vrrpScripts;
vrrpInstances = mapAttrsToList (iName: iConfig:
vrrpInstances = mapAttrsToList (
iName: iConfig:
{
name = iName;
} // iConfig
}
// iConfig
) cfg.vrrpInstances;
vrrpInstanceAssertions = i: [
{ assertion = i.interface != "";
message = "services.keepalived.vrrpInstances.${i.name}.interface option cannot be empty.";
}
{ assertion = i.virtualRouterId >= 0 && i.virtualRouterId <= 255;
message = "services.keepalived.vrrpInstances.${i.name}.virtualRouterId must be an integer between 0..255.";
}
{ assertion = i.priority >= 0 && i.priority <= 255;
message = "services.keepalived.vrrpInstances.${i.name}.priority must be an integer between 0..255.";
}
{ assertion = i.vmacInterface == null || i.useVmac;
message = "services.keepalived.vrrpInstances.${i.name}.vmacInterface has no effect when services.keepalived.vrrpInstances.${i.name}.useVmac is not set.";
}
{ assertion = !i.vmacXmitBase || i.useVmac;
message = "services.keepalived.vrrpInstances.${i.name}.vmacXmitBase has no effect when services.keepalived.vrrpInstances.${i.name}.useVmac is not set.";
}
] ++ flatten (map (virtualIpAssertions i.name) i.virtualIps)
vrrpInstanceAssertions =
i:
[
{
assertion = i.interface != "";
message = "services.keepalived.vrrpInstances.${i.name}.interface option cannot be empty.";
}
{
assertion = i.virtualRouterId >= 0 && i.virtualRouterId <= 255;
message = "services.keepalived.vrrpInstances.${i.name}.virtualRouterId must be an integer between 0..255.";
}
{
assertion = i.priority >= 0 && i.priority <= 255;
message = "services.keepalived.vrrpInstances.${i.name}.priority must be an integer between 0..255.";
}
{
assertion = i.vmacInterface == null || i.useVmac;
message = "services.keepalived.vrrpInstances.${i.name}.vmacInterface has no effect when services.keepalived.vrrpInstances.${i.name}.useVmac is not set.";
}
{
assertion = !i.vmacXmitBase || i.useVmac;
message = "services.keepalived.vrrpInstances.${i.name}.vmacXmitBase has no effect when services.keepalived.vrrpInstances.${i.name}.useVmac is not set.";
}
]
++ flatten (map (virtualIpAssertions i.name) i.virtualIps)
++ flatten (map (vrrpScriptAssertion i.name) i.trackScripts);
virtualIpAssertions = vrrpName: ip: [
{ assertion = ip.addr != "";
{
assertion = ip.addr != "";
message = "The 'addr' option for an services.keepalived.vrrpInstances.${vrrpName}.virtualIps entry cannot be empty.";
}
];
@ -243,18 +265,26 @@ in
};
vrrpScripts = mkOption {
type = types.attrsOf (types.submodule (import ./vrrp-script-options.nix {
inherit lib;
}));
default = {};
type = types.attrsOf (
types.submodule (
import ./vrrp-script-options.nix {
inherit lib;
}
)
);
default = { };
description = "Declarative vrrp script config";
};
vrrpInstances = mkOption {
type = types.attrsOf (types.submodule (import ./vrrp-instance-options.nix {
inherit lib;
}));
default = {};
type = types.attrsOf (
types.submodule (
import ./vrrp-instance-options.nix {
inherit lib;
}
)
);
default = { };
description = "Declarative vhost config";
};
@ -310,7 +340,10 @@ in
systemd.timers.keepalived-boot-delay = {
description = "Keepalive Daemon delay to avoid instant transition to MASTER state";
after = [ "network.target" "network-online.target" ];
after = [
"network.target"
"network-online.target"
];
requires = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
timerConfig = {
@ -319,31 +352,39 @@ in
};
};
systemd.services.keepalived = let
finalConfigFile = if cfg.secretFile == null then keepalivedConf else "/run/keepalived/keepalived.conf";
in {
description = "Keepalive Daemon (LVS and VRRP)";
after = [ "network.target" "network-online.target" ];
wants = [ "network-online.target" ];
serviceConfig = {
Type = "forking";
PIDFile = pidFile;
KillMode = "process";
RuntimeDirectory = "keepalived";
EnvironmentFile = lib.optional (cfg.secretFile != null) cfg.secretFile;
ExecStartPre = lib.optional (cfg.secretFile != null)
(pkgs.writeShellScript "keepalived-pre-start" ''
umask 077
${pkgs.envsubst}/bin/envsubst -i "${keepalivedConf}" > ${finalConfigFile}
'');
ExecStart = "${lib.getExe cfg.package}"
+ " -f ${finalConfigFile}"
+ " -p ${pidFile}"
+ optionalString cfg.snmp.enable " --snmp";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
Restart = "always";
RestartSec = "1s";
systemd.services.keepalived =
let
finalConfigFile =
if cfg.secretFile == null then keepalivedConf else "/run/keepalived/keepalived.conf";
in
{
description = "Keepalive Daemon (LVS and VRRP)";
after = [
"network.target"
"network-online.target"
];
wants = [ "network-online.target" ];
serviceConfig = {
Type = "forking";
PIDFile = pidFile;
KillMode = "process";
RuntimeDirectory = "keepalived";
EnvironmentFile = lib.optional (cfg.secretFile != null) cfg.secretFile;
ExecStartPre = lib.optional (cfg.secretFile != null) (
pkgs.writeShellScript "keepalived-pre-start" ''
umask 077
${pkgs.envsubst}/bin/envsubst -i "${keepalivedConf}" > ${finalConfigFile}
''
);
ExecStart =
"${lib.getExe cfg.package}"
+ " -f ${finalConfigFile}"
+ " -p ${pidFile}"
+ optionalString cfg.snmp.enable " --snmp";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
Restart = "always";
RestartSec = "1s";
};
};
};
};
}

View file

@ -1,25 +1,34 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.minidlna;
format = pkgs.formats.keyValue { listsAsDuplicateKeys = true; };
cfgfile = format.generate "minidlna.conf" cfg.settings;
in {
in
{
options.services.minidlna.enable = lib.mkEnableOption "MiniDLNA, a simple DLNA server. Consider adding `openFirewall = true` into your config";
options.services.minidlna.openFirewall = lib.mkEnableOption "opening HTTP (TCP) and SSDP (UDP) ports in the firewall";
options.services.minidlna.package = lib.mkPackageOption pkgs "minidlna" {};
options.services.minidlna.package = lib.mkPackageOption pkgs "minidlna" { };
options.services.minidlna.settings = lib.mkOption {
default = {};
default = { };
description = "Configuration for {manpage}`minidlna.conf(5)`.";
type = lib.types.submodule {
freeformType = format.type;
options.media_dir = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [];
example = [ "/data/media" "V,/home/alice/video" ];
default = [ ];
example = [
"/data/media"
"V,/home/alice/video"
];
description = ''
Directories to be scanned for media files.
The `A,` `V,` `P,` prefixes restrict a directory to audio, video or image files.
@ -67,22 +76,34 @@ in {
description = "Defines the type of messages that should be logged and down to which level of importance.";
};
options.enable_subtitles = lib.mkOption {
type = lib.types.enum [ "yes" "no" ];
type = lib.types.enum [
"yes"
"no"
];
default = "yes";
description = "Enable subtitle support on unknown clients.";
};
options.inotify = lib.mkOption {
type = lib.types.enum [ "yes" "no" ];
type = lib.types.enum [
"yes"
"no"
];
default = "no";
description = "Whether to enable inotify monitoring to automatically discover new files.";
};
options.enable_tivo = lib.mkOption {
type = lib.types.enum [ "yes" "no" ];
type = lib.types.enum [
"yes"
"no"
];
default = "no";
description = "Support for streaming .jpg and .mp3 files to a TiVo supporting HMO.";
};
options.wide_links = lib.mkOption {
type = lib.types.enum [ "yes" "no" ];
type = lib.types.enum [
"yes"
"no"
];
default = "no";
description = "Set this to yes to allow symlinks that point outside user-defined `media_dir`.";
};

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.mullvad-vpn;
in
@ -46,15 +51,17 @@ with lib;
systemd.services.mullvad-daemon = {
description = "Mullvad VPN daemon";
wantedBy = [ "multi-user.target" ];
wants = [ "network.target" "network-online.target" ];
wants = [
"network.target"
"network-online.target"
];
after = [
"network-online.target"
"NetworkManager.service"
"systemd-resolved.service"
];
# See https://github.com/NixOS/nixpkgs/issues/262681
path = lib.optional config.networking.resolvconf.enable
config.networking.resolvconf.package;
path = lib.optional config.networking.resolvconf.enable config.networking.resolvconf.package;
startLimitBurst = 5;
startLimitIntervalSec = 20;
serviceConfig = {
@ -65,5 +72,8 @@ with lib;
};
};
meta.maintainers = with maintainers; [ arcuru ymarkus ];
meta.maintainers = with maintainers; [
arcuru
ymarkus
];
}

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
@ -307,7 +312,11 @@ in
};
dbus = mkOption {
type = types.enum [ null "session" "system" ];
type = types.enum [
null
"session"
"system"
];
default = null;
description = "Enable D-Bus remote control. Set to the bus you want Murmur to connect to.";
};
@ -316,14 +325,14 @@ in
config = mkIf cfg.enable {
users.users.murmur = mkIf (cfg.user == "murmur") {
description = "Murmur Service user";
home = cfg.stateDir;
createHome = true;
uid = config.ids.uids.murmur;
group = cfg.group;
description = "Murmur Service user";
home = cfg.stateDir;
createHome = true;
uid = config.ids.uids.murmur;
group = cfg.group;
};
users.groups.murmur = mkIf (cfg.group == "murmur") {
gid = config.ids.gids.murmur;
gid = config.ids.gids.murmur;
};
networking.firewall = mkIf cfg.openFirewall {
@ -333,9 +342,9 @@ in
systemd.services.murmur = {
description = "Murmur Chat Service";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
preStart = ''
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
preStart = ''
${pkgs.envsubst}/bin/envsubst \
-o /run/murmur/murmurd.ini \
-i ${configFile}
@ -375,63 +384,72 @@ in
RestrictRealtime = true;
SystemCallArchitectures = "native";
SystemCallFilter = "@system-service";
UMask = 027;
UMask = 27;
};
};
# currently not included in upstream package, addition requested at
# https://github.com/mumble-voip/mumble/issues/6078
services.dbus.packages = mkIf (cfg.dbus == "system") [(pkgs.writeTextFile {
name = "murmur-dbus-policy";
text = ''
<!DOCTYPE busconfig PUBLIC
"-//freedesktop//DTD D-BUS Bus Configuration 1.0//EN"
"http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
<busconfig>
<policy user="${cfg.user}">
<allow own="net.sourceforge.mumble.murmur"/>
</policy>
services.dbus.packages = mkIf (cfg.dbus == "system") [
(pkgs.writeTextFile {
name = "murmur-dbus-policy";
text = ''
<!DOCTYPE busconfig PUBLIC
"-//freedesktop//DTD D-BUS Bus Configuration 1.0//EN"
"http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
<busconfig>
<policy user="${cfg.user}">
<allow own="net.sourceforge.mumble.murmur"/>
</policy>
<policy context="default">
<allow send_destination="net.sourceforge.mumble.murmur"/>
<allow receive_sender="net.sourceforge.mumble.murmur"/>
</policy>
</busconfig>
'';
destination = "/share/dbus-1/system.d/murmur.conf";
})];
<policy context="default">
<allow send_destination="net.sourceforge.mumble.murmur"/>
<allow receive_sender="net.sourceforge.mumble.murmur"/>
</policy>
</busconfig>
'';
destination = "/share/dbus-1/system.d/murmur.conf";
})
];
security.apparmor.policies."bin.mumble-server".profile = ''
include <tunables/global>
security.apparmor.policies."bin.mumble-server".profile =
''
include <tunables/global>
${cfg.package}/bin/{mumble-server,.mumble-server-wrapped} {
include <abstractions/base>
include <abstractions/nameservice>
include <abstractions/ssl_certs>
include "${pkgs.apparmorRulesFromClosure { name = "mumble-server"; } cfg.package}"
pix ${cfg.package}/bin/.mumble-server-wrapped,
${cfg.package}/bin/{mumble-server,.mumble-server-wrapped} {
include <abstractions/base>
include <abstractions/nameservice>
include <abstractions/ssl_certs>
include "${pkgs.apparmorRulesFromClosure { name = "mumble-server"; } cfg.package}"
pix ${cfg.package}/bin/.mumble-server-wrapped,
r ${config.environment.etc."os-release".source},
r ${config.environment.etc."lsb-release".source},
owner rwk ${cfg.stateDir}/murmur.sqlite,
owner rw ${cfg.stateDir}/murmur.sqlite-journal,
owner r ${cfg.stateDir}/,
r /run/murmur/murmurd.pid,
r /run/murmur/murmurd.ini,
r ${configFile},
'' + optionalString (cfg.logFile != null) ''
r ${config.environment.etc."os-release".source},
r ${config.environment.etc."lsb-release".source},
owner rwk ${cfg.stateDir}/murmur.sqlite,
owner rw ${cfg.stateDir}/murmur.sqlite-journal,
owner r ${cfg.stateDir}/,
r /run/murmur/murmurd.pid,
r /run/murmur/murmurd.ini,
r ${configFile},
''
+ optionalString (cfg.logFile != null) ''
rw ${cfg.logFile},
'' + optionalString (cfg.sslCert != "") ''
''
+ optionalString (cfg.sslCert != "") ''
r ${cfg.sslCert},
'' + optionalString (cfg.sslKey != "") ''
''
+ optionalString (cfg.sslKey != "") ''
r ${cfg.sslKey},
'' + optionalString (cfg.sslCa != "") ''
''
+ optionalString (cfg.sslCa != "") ''
r ${cfg.sslCa},
'' + optionalString (cfg.dbus != null) ''
''
+ optionalString (cfg.dbus != null) ''
dbus bus=${cfg.dbus}
'' + ''
}
'';
''
+ ''
}
'';
};
meta.maintainers = with lib.maintainers; [ felixsinger ];

View file

@ -1,22 +1,28 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfgs = config.services;
cfg = cfgs.ncdns;
cfg = cfgs.ncdns;
dataDir = "/var/lib/ncdns";
dataDir = "/var/lib/ncdns";
format = pkgs.formats.toml {};
format = pkgs.formats.toml { };
defaultFiles = {
public = "${dataDir}/bit.key";
public = "${dataDir}/bit.key";
private = "${dataDir}/bit.private";
zonePublic = "${dataDir}/bit-zone.key";
zonePublic = "${dataDir}/bit-zone.key";
zonePrivate = "${dataDir}/bit-zone.private";
};
# if all keys are the default value
needsKeygen = lib.all lib.id (lib.flip lib.mapAttrsToList cfg.dnssec.keys
(n: v: v == lib.getAttr n defaultFiles));
needsKeygen = lib.all lib.id (
lib.flip lib.mapAttrsToList cfg.dnssec.keys (n: v: v == lib.getAttr n defaultFiles)
);
mkDefaultAttrs = lib.mapAttrs (_n: v: lib.mkDefault v);
@ -176,7 +182,6 @@ in
};
###### implementation
config = lib.mkIf cfg.enable {
@ -184,9 +189,10 @@ in
services.pdns-recursor = lib.mkIf cfgs.pdns-recursor.resolveNamecoin {
forwardZonesRecurse.bit = "${cfg.address}:${toString cfg.port}";
luaConfig =
if cfg.dnssec.enable
then ''readTrustAnchorsFromFile("${cfg.dnssec.keys.public}")''
else ''addNTA("bit", "namecoin DNSSEC disabled")'';
if cfg.dnssec.enable then
''readTrustAnchorsFromFile("${cfg.dnssec.keys.public}")''
else
''addNTA("bit", "namecoin DNSSEC disabled")'';
};
# Avoid pdns-recursor not finding the DNSSEC keys
@ -197,9 +203,9 @@ in
services.ncdns.settings = mkDefaultAttrs {
ncdns =
{ # Namecoin RPC
namecoinrpcaddress =
"${cfgs.namecoind.rpc.address}:${toString cfgs.namecoind.rpc.port}";
{
# Namecoin RPC
namecoinrpcaddress = "${cfgs.namecoind.rpc.address}:${toString cfgs.namecoind.rpc.port}";
namecoinrpcusername = cfgs.namecoind.rpc.user;
namecoinrpcpassword = cfgs.namecoind.rpc.password;
@ -211,17 +217,17 @@ in
# Other
bind = "${cfg.address}:${toString cfg.port}";
}
// lib.optionalAttrs cfg.dnssec.enable
{ # DNSSEC
publickey = "../.." + cfg.dnssec.keys.public;
// lib.optionalAttrs cfg.dnssec.enable {
# DNSSEC
publickey = "../.." + cfg.dnssec.keys.public;
privatekey = "../.." + cfg.dnssec.keys.private;
zonepublickey = "../.." + cfg.dnssec.keys.zonePublic;
zonepublickey = "../.." + cfg.dnssec.keys.zonePublic;
zoneprivatekey = "../.." + cfg.dnssec.keys.zonePrivate;
};
# Daemon
service.daemon = true;
xlog.journal = true;
# Daemon
service.daemon = true;
xlog.journal = true;
};
users.users.ncdns = {
@ -229,11 +235,11 @@ in
group = "ncdns";
description = "ncdns daemon user";
};
users.groups.ncdns = {};
users.groups.ncdns = { };
systemd.services.ncdns = {
description = "ncdns daemon";
after = [ "namecoind.service" ];
after = [ "namecoind.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
@ -8,19 +13,25 @@ let
render = s: f: concatStringsSep "\n" (mapAttrsToList f s);
prefer = a: b: if a != null then a else b;
ndppdConf = prefer cfg.configFile (pkgs.writeText "ndppd.conf" ''
route-ttl ${toString cfg.routeTTL}
${render cfg.proxies (proxyInterfaceName: proxy: ''
proxy ${prefer proxy.interface proxyInterfaceName} {
router ${boolToString proxy.router}
timeout ${toString proxy.timeout}
ttl ${toString proxy.ttl}
${render proxy.rules (ruleNetworkName: rule: ''
rule ${prefer rule.network ruleNetworkName} {
${rule.method}${optionalString (rule.method == "iface") " ${rule.interface}"}
}'')}
}'')}
'');
ndppdConf = prefer cfg.configFile (
pkgs.writeText "ndppd.conf" ''
route-ttl ${toString cfg.routeTTL}
${render cfg.proxies (
proxyInterfaceName: proxy: ''
proxy ${prefer proxy.interface proxyInterfaceName} {
router ${boolToString proxy.router}
timeout ${toString proxy.timeout}
ttl ${toString proxy.ttl}
${render proxy.rules (
ruleNetworkName: rule: ''
rule ${prefer rule.network ruleNetworkName} {
${rule.method}${optionalString (rule.method == "iface") " ${rule.interface}"}
}''
)}
}''
)}
''
);
proxy = types.submodule {
options = {
@ -63,7 +74,7 @@ let
is provided, /128 is assumed. You may have several rule sections, and the
addresses may or may not overlap.
'';
default = {};
default = { };
};
};
};
@ -81,7 +92,11 @@ let
default = null;
};
method = mkOption {
type = types.enum [ "static" "iface" "auto" ];
type = types.enum [
"static"
"iface"
"auto"
];
description = ''
static: Immediately answer any Neighbor Solicitation Messages
(if they match the IP rule).
@ -101,7 +116,8 @@ let
};
};
in {
in
{
options.services.ndppd = {
enable = mkEnableOption "daemon that proxies NDP (Neighbor Discovery Protocol) messages between interfaces";
interface = mkOption {
@ -141,7 +157,7 @@ in {
This sets up a listener, that will listen for any Neighbor Solicitation
messages, and respond to them according to a set of rules.
'';
default = {};
default = { };
example = literalExpression ''
{
eth0.rules."1111::/64" = {};
@ -151,18 +167,23 @@ in {
};
config = mkIf cfg.enable {
warnings = mkIf (cfg.interface != null && cfg.network != null) [ ''
The options services.ndppd.interface and services.ndppd.network will probably be removed soon,
please use services.ndppd.proxies.<interface>.rules.<network> instead.
'' ];
warnings = mkIf (cfg.interface != null && cfg.network != null) [
''
The options services.ndppd.interface and services.ndppd.network will probably be removed soon,
please use services.ndppd.proxies.<interface>.rules.<network> instead.
''
];
services.ndppd.proxies = mkIf (cfg.interface != null && cfg.network != null) {
${cfg.interface}.rules.${cfg.network} = {};
${cfg.interface}.rules.${cfg.network} = { };
};
systemd.services.ndppd = {
description = "NDP Proxy Daemon";
documentation = [ "man:ndppd(1)" "man:ndppd.conf(5)" ];
documentation = [
"man:ndppd(1)"
"man:ndppd.conf(5)"
];
after = [ "network-pre.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {

View file

@ -16,7 +16,7 @@ in
{
meta = {
maintainers = with lib.maintainers; [patrickdag];
maintainers = with lib.maintainers; [ patrickdag ];
doc = ./server.md;
};
@ -66,9 +66,10 @@ in
URI = "turn:${turnDomain}:${builtins.toString turnPort}";
Username = "netbird";
Password =
if (cfg.coturn.password != null)
then cfg.coturn.password
else {_secret = cfg.coturn.passwordFile;};
if (cfg.coturn.password != null) then
cfg.coturn.password
else
{ _secret = cfg.coturn.passwordFile; };
}
];
};

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
@ -12,12 +17,13 @@ let
rtcFile = "${stateDir}/chrony.rtc";
configFile = pkgs.writeText "chrony.conf" ''
${concatMapStringsSep "\n" (server: "server " + server + " " + cfg.serverOption + optionalString (cfg.enableNTS) " nts") cfg.servers}
${concatMapStringsSep "\n" (
server: "server " + server + " " + cfg.serverOption + optionalString (cfg.enableNTS) " nts"
) cfg.servers}
${optionalString
(cfg.initstepslew.enabled && (cfg.servers != []))
"initstepslew ${toString cfg.initstepslew.threshold} ${concatStringsSep " " cfg.servers}"
}
${optionalString (
cfg.initstepslew.enabled && (cfg.servers != [ ])
) "initstepslew ${toString cfg.initstepslew.threshold} ${concatStringsSep " " cfg.servers}"}
driftfile ${driftFile}
keyfile ${keyFile}
@ -31,7 +37,13 @@ let
'';
chronyFlags =
[ "-n" "-u" "chrony" "-f" "${configFile}" ]
[
"-n"
"-u"
"chrony"
"-f"
"${configFile}"
]
++ optional cfg.enableMemoryLocking "-m"
++ cfg.extraFlags;
in
@ -60,7 +72,10 @@ in
serverOption = mkOption {
default = "iburst";
type = types.enum [ "iburst" "offline" ];
type = types.enum [
"iburst"
"offline"
];
description = ''
Set option for server directives.
@ -74,7 +89,9 @@ in
enableMemoryLocking = mkOption {
type = types.bool;
default = config.environment.memoryAllocator.provider != "graphene-hardened" && config.environment.memoryAllocator.provider != "graphene-hardened-light";
default =
config.environment.memoryAllocator.provider != "graphene-hardened"
&& config.environment.memoryAllocator.provider != "graphene-hardened-light";
defaultText = ''config.environment.memoryAllocator.provider != "graphene-hardened" && config.environment.memoryAllocator.provider != "graphene-hardened-light"'';
description = ''
Whether to add the `-m` flag to lock memory.
@ -164,19 +181,21 @@ in
};
config = mkIf cfg.enable {
meta.maintainers = with lib.maintainers; [ thoughtpolice vifino ];
meta.maintainers = with lib.maintainers; [
thoughtpolice
vifino
];
environment.systemPackages = [ chronyPkg ];
users.groups.chrony.gid = config.ids.gids.chrony;
users.users.chrony =
{
uid = config.ids.uids.chrony;
group = "chrony";
description = "chrony daemon user";
home = stateDir;
};
users.users.chrony = {
uid = config.ids.uids.chrony;
group = "chrony";
description = "chrony daemon user";
home = stateDir;
};
services.timesyncd.enable = mkForce false;
@ -185,75 +204,112 @@ in
enable = lib.mkForce false;
};
systemd.services.systemd-timedated.environment = { SYSTEMD_TIMEDATED_NTP_SERVICES = "chronyd.service"; };
systemd.services.systemd-timedated.environment = {
SYSTEMD_TIMEDATED_NTP_SERVICES = "chronyd.service";
};
systemd.tmpfiles.rules = [
"d ${stateDir} 0750 chrony chrony - -"
"f ${driftFile} 0640 chrony chrony - -"
"f ${keyFile} 0640 chrony chrony - -"
] ++ lib.optionals cfg.enableRTCTrimming [
"f ${rtcFile} 0640 chrony chrony - -"
];
systemd.tmpfiles.rules =
[
"d ${stateDir} 0750 chrony chrony - -"
"f ${driftFile} 0640 chrony chrony - -"
"f ${keyFile} 0640 chrony chrony - -"
]
++ lib.optionals cfg.enableRTCTrimming [
"f ${rtcFile} 0640 chrony chrony - -"
];
systemd.services.chronyd =
{
description = "chrony NTP daemon";
systemd.services.chronyd = {
description = "chrony NTP daemon";
wantedBy = [ "multi-user.target" ];
wants = [ "time-sync.target" ];
before = [ "time-sync.target" ];
after = [ "network.target" "nss-lookup.target" ];
conflicts = [ "ntpd.service" "systemd-timesyncd.service" ];
wantedBy = [ "multi-user.target" ];
wants = [ "time-sync.target" ];
before = [ "time-sync.target" ];
after = [
"network.target"
"nss-lookup.target"
];
conflicts = [
"ntpd.service"
"systemd-timesyncd.service"
];
path = [ chronyPkg ];
path = [ chronyPkg ];
unitConfig.ConditionCapability = "CAP_SYS_TIME";
serviceConfig = {
Type = "simple";
ExecStart = "${chronyPkg}/bin/chronyd ${builtins.toString chronyFlags}";
unitConfig.ConditionCapability = "CAP_SYS_TIME";
serviceConfig = {
Type = "simple";
ExecStart = "${chronyPkg}/bin/chronyd ${builtins.toString chronyFlags}";
# Proc filesystem
ProcSubset = "pid";
ProtectProc = "invisible";
# Access write directories
ReadWritePaths = [ "${stateDir}" ];
UMask = "0027";
# Capabilities
CapabilityBoundingSet = [ "CAP_CHOWN" "CAP_DAC_OVERRIDE" "CAP_NET_BIND_SERVICE" "CAP_SETGID" "CAP_SETUID" "CAP_SYS_RESOURCE" "CAP_SYS_TIME" ];
# Device Access
DeviceAllow = [ "char-pps rw" "char-ptp rw" "char-rtc rw" ];
DevicePolicy = "closed";
# Security
NoNewPrivileges = true;
# Sandboxing
ProtectSystem = "full";
ProtectHome = true;
PrivateTmp = true;
PrivateDevices = false;
PrivateUsers = false;
ProtectHostname = true;
ProtectClock = false;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
RestrictNamespaces = true;
LockPersonality = true;
MemoryDenyWriteExecute = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
RemoveIPC = true;
PrivateMounts = true;
# System Call Filtering
SystemCallArchitectures = "native";
SystemCallFilter = [ "~@cpu-emulation @debug @keyring @mount @obsolete @privileged @resources" "@clock" "@setuid" "capset" "@chown" ];
};
# Proc filesystem
ProcSubset = "pid";
ProtectProc = "invisible";
# Access write directories
ReadWritePaths = [ "${stateDir}" ];
UMask = "0027";
# Capabilities
CapabilityBoundingSet = [
"CAP_CHOWN"
"CAP_DAC_OVERRIDE"
"CAP_NET_BIND_SERVICE"
"CAP_SETGID"
"CAP_SETUID"
"CAP_SYS_RESOURCE"
"CAP_SYS_TIME"
];
# Device Access
DeviceAllow = [
"char-pps rw"
"char-ptp rw"
"char-rtc rw"
];
DevicePolicy = "closed";
# Security
NoNewPrivileges = true;
# Sandboxing
ProtectSystem = "full";
ProtectHome = true;
PrivateTmp = true;
PrivateDevices = false;
PrivateUsers = false;
ProtectHostname = true;
ProtectClock = false;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
RestrictAddressFamilies = [
"AF_UNIX"
"AF_INET"
"AF_INET6"
];
RestrictNamespaces = true;
LockPersonality = true;
MemoryDenyWriteExecute = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
RemoveIPC = true;
PrivateMounts = true;
# System Call Filtering
SystemCallArchitectures = "native";
SystemCallFilter = [
"~@cpu-emulation @debug @keyring @mount @obsolete @privileged @resources"
"@clock"
"@setuid"
"capset"
"@chown"
];
};
};
assertions = [
{
assertion = !(cfg.enableRTCTrimming && builtins.any (line: (builtins.match "^ *rtcsync" line) != null) (lib.strings.splitString "\n" cfg.extraConfig));
assertion =
!(
cfg.enableRTCTrimming
&& builtins.any (line: (builtins.match "^ *rtcsync" line) != null) (
lib.strings.splitString "\n" cfg.extraConfig
)
);
message = ''
The chrony module now configures `rtcfile` and `rtcautotrim` for you.
These options conflict with `rtcsync` and cause chrony to crash.

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
@ -8,7 +13,8 @@ let
inherit (pkgs) openvpn;
makeOpenVPNJob = cfg: name:
makeOpenVPNJob =
cfg: name:
let
path = makeBinPath (getAttr "openvpn-${name}" config.systemd.services).path;
@ -28,32 +34,32 @@ let
done
${cfg.up}
${optionalString cfg.updateResolvConf
"${pkgs.update-resolv-conf}/libexec/openvpn/update-resolv-conf"}
${optionalString cfg.updateResolvConf "${pkgs.update-resolv-conf}/libexec/openvpn/update-resolv-conf"}
'';
downScript = ''
export PATH=${path}
${optionalString cfg.updateResolvConf
"${pkgs.update-resolv-conf}/libexec/openvpn/update-resolv-conf"}
${optionalString cfg.updateResolvConf "${pkgs.update-resolv-conf}/libexec/openvpn/update-resolv-conf"}
${cfg.down}
'';
configFile = pkgs.writeText "openvpn-config-${name}"
''
errors-to-stderr
${optionalString (cfg.up != "" || cfg.down != "" || cfg.updateResolvConf) "script-security 2"}
${cfg.config}
${optionalString (cfg.up != "" || cfg.updateResolvConf)
"up ${pkgs.writeShellScript "openvpn-${name}-up" upScript}"}
${optionalString (cfg.down != "" || cfg.updateResolvConf)
"down ${pkgs.writeShellScript "openvpn-${name}-down" downScript}"}
${optionalString (cfg.authUserPass != null)
"auth-user-pass ${pkgs.writeText "openvpn-credentials-${name}" ''
${cfg.authUserPass.username}
${cfg.authUserPass.password}
''}"}
'';
configFile = pkgs.writeText "openvpn-config-${name}" ''
errors-to-stderr
${optionalString (cfg.up != "" || cfg.down != "" || cfg.updateResolvConf) "script-security 2"}
${cfg.config}
${optionalString (
cfg.up != "" || cfg.updateResolvConf
) "up ${pkgs.writeShellScript "openvpn-${name}-up" upScript}"}
${optionalString (
cfg.down != "" || cfg.updateResolvConf
) "down ${pkgs.writeShellScript "openvpn-${name}-down" downScript}"}
${optionalString (cfg.authUserPass != null)
"auth-user-pass ${pkgs.writeText "openvpn-credentials-${name}" ''
${cfg.authUserPass.username}
${cfg.authUserPass.password}
''}"
}
'';
in
{
@ -62,7 +68,11 @@ let
wantedBy = optional cfg.autoStart "multi-user.target";
after = [ "network.target" ];
path = [ pkgs.iptables pkgs.iproute2 pkgs.nettools ];
path = [
pkgs.iptables
pkgs.iproute2
pkgs.nettools
];
serviceConfig.ExecStart = "@${openvpn}/sbin/openvpn openvpn --suppress-timestamps --config ${configFile}";
serviceConfig.Restart = "always";
@ -73,9 +83,11 @@ let
openvpn-restart = {
wantedBy = [ "sleep.target" ];
path = [ pkgs.procps ];
script = let
unitNames = map (n: "openvpn-${n}.service") (builtins.attrNames cfg.servers);
in "systemctl try-restart ${lib.escapeShellArgs unitNames}";
script =
let
unitNames = map (n: "openvpn-${n}.service") (builtins.attrNames cfg.servers);
in
"systemctl try-restart ${lib.escapeShellArgs unitNames}";
description = "Sends a signal to OpenVPN process to trigger a restart after return from sleep";
};
};
@ -134,80 +146,84 @@ in
attribute name.
'';
type = with types; attrsOf (submodule {
type =
with types;
attrsOf (submodule {
options = {
options = {
config = mkOption {
type = types.lines;
description = ''
Configuration of this OpenVPN instance. See
{manpage}`openvpn(8)`
for details.
config = mkOption {
type = types.lines;
description = ''
Configuration of this OpenVPN instance. See
{manpage}`openvpn(8)`
for details.
To import an external config file, use the following definition:
`config = "config /path/to/config.ovpn"`
'';
To import an external config file, use the following definition:
`config = "config /path/to/config.ovpn"`
'';
};
up = mkOption {
default = "";
type = types.lines;
description = ''
Shell commands executed when the instance is starting.
'';
};
down = mkOption {
default = "";
type = types.lines;
description = ''
Shell commands executed when the instance is shutting down.
'';
};
autoStart = mkOption {
default = true;
type = types.bool;
description = "Whether this OpenVPN instance should be started automatically.";
};
updateResolvConf = mkOption {
default = false;
type = types.bool;
description = ''
Use the script from the update-resolv-conf package to automatically
update resolv.conf with the DNS information provided by openvpn. The
script will be run after the "up" commands and before the "down" commands.
'';
};
authUserPass = mkOption {
default = null;
description = ''
This option can be used to store the username / password credentials
with the "auth-user-pass" authentication method.
WARNING: Using this option will put the credentials WORLD-READABLE in the Nix store!
'';
type = types.nullOr (
types.submodule {
options = {
username = mkOption {
description = "The username to store inside the credentials file.";
type = types.str;
};
password = mkOption {
description = "The password to store inside the credentials file.";
type = types.str;
};
};
}
);
};
};
up = mkOption {
default = "";
type = types.lines;
description = ''
Shell commands executed when the instance is starting.
'';
};
down = mkOption {
default = "";
type = types.lines;
description = ''
Shell commands executed when the instance is shutting down.
'';
};
autoStart = mkOption {
default = true;
type = types.bool;
description = "Whether this OpenVPN instance should be started automatically.";
};
updateResolvConf = mkOption {
default = false;
type = types.bool;
description = ''
Use the script from the update-resolv-conf package to automatically
update resolv.conf with the DNS information provided by openvpn. The
script will be run after the "up" commands and before the "down" commands.
'';
};
authUserPass = mkOption {
default = null;
description = ''
This option can be used to store the username / password credentials
with the "auth-user-pass" authentication method.
WARNING: Using this option will put the credentials WORLD-READABLE in the Nix store!
'';
type = types.nullOr (types.submodule {
options = {
username = mkOption {
description = "The username to store inside the credentials file.";
type = types.str;
};
password = mkOption {
description = "The password to store inside the credentials file.";
type = types.str;
};
};
});
};
};
});
});
};
@ -219,12 +235,16 @@ in
};
###### implementation
config = mkIf (cfg.servers != { }) {
systemd.services = (listToAttrs (mapAttrsToList (name: value: nameValuePair "openvpn-${name}" (makeOpenVPNJob value name)) cfg.servers))
systemd.services =
(listToAttrs (
mapAttrsToList (
name: value: nameValuePair "openvpn-${name}" (makeOpenVPNJob value name)
) cfg.servers
))
// restartService;
environment.systemPackages = [ openvpn ];

View file

@ -1,32 +1,39 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
let
cfg = config.services.prosody;
sslOpts = { ... }: {
sslOpts =
{ ... }:
{
options = {
options = {
key = mkOption {
type = types.path;
description = "Path to the key file.";
};
# TODO: rename to certificate to match the prosody config
cert = mkOption {
type = types.path;
description = "Path to the certificate file.";
};
extraOptions = mkOption {
type = types.attrs;
default = { };
description = "Extra SSL configuration options.";
};
key = mkOption {
type = types.path;
description = "Path to the key file.";
};
# TODO: rename to certificate to match the prosody config
cert = mkOption {
type = types.path;
description = "Path to the certificate file.";
};
extraOptions = mkOption {
type = types.attrs;
default = {};
description = "Extra SSL configuration options.";
};
};
};
discoOpts = {
options = {
@ -259,231 +266,265 @@ let
};
};
toLua = x:
if builtins.isString x then ''"${x}"''
else if builtins.isBool x then boolToString x
else if builtins.isInt x then toString x
else if builtins.isList x then "{ ${lib.concatMapStringsSep ", " toLua x} }"
else throw "Invalid Lua value";
toLua =
x:
if builtins.isString x then
''"${x}"''
else if builtins.isBool x then
boolToString x
else if builtins.isInt x then
toString x
else if builtins.isList x then
"{ ${lib.concatMapStringsSep ", " toLua x} }"
else
throw "Invalid Lua value";
settingsToLua = prefix: settings: generators.toKeyValue {
listsAsDuplicateKeys = false;
mkKeyValue = k: generators.mkKeyValueDefault {
mkValueString = toLua;
} " = " (prefix + k);
} (filterAttrs (k: v: v != null) settings);
settingsToLua =
prefix: settings:
generators.toKeyValue {
listsAsDuplicateKeys = false;
mkKeyValue =
k:
generators.mkKeyValueDefault {
mkValueString = toLua;
} " = " (prefix + k);
} (filterAttrs (k: v: v != null) settings);
createSSLOptsStr = o: ''
ssl = {
cafile = "/etc/ssl/certs/ca-bundle.crt";
key = "${o.key}";
certificate = "${o.cert}";
${concatStringsSep "\n" (mapAttrsToList (name: value: "${name} = ${toLua value};") o.extraOptions)}
${concatStringsSep "\n" (
mapAttrsToList (name: value: "${name} = ${toLua value};") o.extraOptions
)}
};
'';
mucOpts = { ... }: {
options = {
domain = mkOption {
type = types.str;
description = "Domain name of the MUC";
};
name = mkOption {
type = types.str;
description = "The name to return in service discovery responses for the MUC service itself";
default = "Prosody Chatrooms";
};
restrictRoomCreation = mkOption {
type = types.enum [ true false "admin" "local" ];
default = false;
description = "Restrict room creation to server admins";
};
maxHistoryMessages = mkOption {
type = types.int;
default = 20;
description = "Specifies a limit on what each room can be configured to keep";
};
roomLocking = mkOption {
type = types.bool;
default = true;
description = ''
Enables room locking, which means that a room must be
configured before it can be used. Locked rooms are invisible
and cannot be entered by anyone but the creator
'';
};
roomLockTimeout = mkOption {
type = types.int;
default = 300;
description = ''
Timeout after which the room is destroyed or unlocked if not
configured, in seconds
'';
};
tombstones = mkOption {
type = types.bool;
default = true;
description = ''
When a room is destroyed, it leaves behind a tombstone which
prevents the room being entered or recreated. It also allows
anyone who was not in the room at the time it was destroyed
to learn about it, and to update their bookmarks. Tombstones
prevents the case where someone could recreate a previously
semi-anonymous room in order to learn the real JIDs of those
who often join there.
'';
};
tombstoneExpiry = mkOption {
type = types.int;
default = 2678400;
description = ''
This settings controls how long a tombstone is considered
valid. It defaults to 31 days. After this time, the room in
question can be created again.
'';
};
allowners_muc = mkOption {
type = types.bool;
default = false;
description = ''
Add module allowners, any user in chat is able to
kick other. Usefull in jitsi-meet to kick ghosts.
'';
};
vcard_muc = mkOption {
type = types.bool;
default = true;
description = "Adds the ability to set vCard for Multi User Chat rooms";
};
mucOpts =
{ ... }:
{
options = {
domain = mkOption {
type = types.str;
description = "Domain name of the MUC";
};
name = mkOption {
type = types.str;
description = "The name to return in service discovery responses for the MUC service itself";
default = "Prosody Chatrooms";
};
restrictRoomCreation = mkOption {
type = types.enum [
true
false
"admin"
"local"
];
default = false;
description = "Restrict room creation to server admins";
};
maxHistoryMessages = mkOption {
type = types.int;
default = 20;
description = "Specifies a limit on what each room can be configured to keep";
};
roomLocking = mkOption {
type = types.bool;
default = true;
description = ''
Enables room locking, which means that a room must be
configured before it can be used. Locked rooms are invisible
and cannot be entered by anyone but the creator
'';
};
roomLockTimeout = mkOption {
type = types.int;
default = 300;
description = ''
Timeout after which the room is destroyed or unlocked if not
configured, in seconds
'';
};
tombstones = mkOption {
type = types.bool;
default = true;
description = ''
When a room is destroyed, it leaves behind a tombstone which
prevents the room being entered or recreated. It also allows
anyone who was not in the room at the time it was destroyed
to learn about it, and to update their bookmarks. Tombstones
prevents the case where someone could recreate a previously
semi-anonymous room in order to learn the real JIDs of those
who often join there.
'';
};
tombstoneExpiry = mkOption {
type = types.int;
default = 2678400;
description = ''
This settings controls how long a tombstone is considered
valid. It defaults to 31 days. After this time, the room in
question can be created again.
'';
};
allowners_muc = mkOption {
type = types.bool;
default = false;
description = ''
Add module allowners, any user in chat is able to
kick other. Usefull in jitsi-meet to kick ghosts.
'';
};
vcard_muc = mkOption {
type = types.bool;
default = true;
description = "Adds the ability to set vCard for Multi User Chat rooms";
};
# Extra parameters. Defaulting to prosody default values.
# Adding them explicitly to make them visible from the options
# documentation.
#
# See https://prosody.im/doc/modules/mod_muc for more details.
roomDefaultPublic = mkOption {
type = types.bool;
default = true;
description = "If set, the MUC rooms will be public by default.";
};
roomDefaultMembersOnly = mkOption {
type = types.bool;
default = false;
description = "If set, the MUC rooms will only be accessible to the members by default.";
};
roomDefaultModerated = mkOption {
type = types.bool;
default = false;
description = "If set, the MUC rooms will be moderated by default.";
};
roomDefaultPublicJids = mkOption {
type = types.bool;
default = false;
description = "If set, the MUC rooms will display the public JIDs by default.";
};
roomDefaultChangeSubject = mkOption {
type = types.bool;
default = false;
description = "If set, the rooms will display the public JIDs by default.";
};
roomDefaultHistoryLength = mkOption {
type = types.int;
default = 20;
description = "Number of history message sent to participants by default.";
};
roomDefaultLanguage = mkOption {
type = types.str;
default = "en";
description = "Default room language.";
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = "Additional MUC specific configuration";
# Extra parameters. Defaulting to prosody default values.
# Adding them explicitly to make them visible from the options
# documentation.
#
# See https://prosody.im/doc/modules/mod_muc for more details.
roomDefaultPublic = mkOption {
type = types.bool;
default = true;
description = "If set, the MUC rooms will be public by default.";
};
roomDefaultMembersOnly = mkOption {
type = types.bool;
default = false;
description = "If set, the MUC rooms will only be accessible to the members by default.";
};
roomDefaultModerated = mkOption {
type = types.bool;
default = false;
description = "If set, the MUC rooms will be moderated by default.";
};
roomDefaultPublicJids = mkOption {
type = types.bool;
default = false;
description = "If set, the MUC rooms will display the public JIDs by default.";
};
roomDefaultChangeSubject = mkOption {
type = types.bool;
default = false;
description = "If set, the rooms will display the public JIDs by default.";
};
roomDefaultHistoryLength = mkOption {
type = types.int;
default = 20;
description = "Number of history message sent to participants by default.";
};
roomDefaultLanguage = mkOption {
type = types.str;
default = "en";
description = "Default room language.";
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = "Additional MUC specific configuration";
};
};
};
};
uploadHttpOpts = { ... }: {
options = {
domain = mkOption {
type = types.nullOr types.str;
description = "Domain name for the http-upload service";
};
uploadFileSizeLimit = mkOption {
type = types.str;
default = "50 * 1024 * 1024";
description = "Maximum file size, in bytes. Defaults to 50MB.";
};
uploadExpireAfter = mkOption {
type = types.str;
default = "60 * 60 * 24 * 7";
description = "Max age of a file before it gets deleted, in seconds.";
};
userQuota = mkOption {
type = types.nullOr types.int;
default = null;
example = 1234;
description = ''
Maximum size of all uploaded files per user, in bytes. There
will be no quota if this option is set to null.
'';
};
httpUploadPath = mkOption {
type = types.str;
description = ''
Directory where the uploaded files will be stored when the http_upload module is used.
By default, uploaded files are put in a sub-directory of the default Prosody storage path (usually /var/lib/prosody).
'';
default = "/var/lib/prosody";
uploadHttpOpts =
{ ... }:
{
options = {
domain = mkOption {
type = types.nullOr types.str;
description = "Domain name for the http-upload service";
};
uploadFileSizeLimit = mkOption {
type = types.str;
default = "50 * 1024 * 1024";
description = "Maximum file size, in bytes. Defaults to 50MB.";
};
uploadExpireAfter = mkOption {
type = types.str;
default = "60 * 60 * 24 * 7";
description = "Max age of a file before it gets deleted, in seconds.";
};
userQuota = mkOption {
type = types.nullOr types.int;
default = null;
example = 1234;
description = ''
Maximum size of all uploaded files per user, in bytes. There
will be no quota if this option is set to null.
'';
};
httpUploadPath = mkOption {
type = types.str;
description = ''
Directory where the uploaded files will be stored when the http_upload module is used.
By default, uploaded files are put in a sub-directory of the default Prosody storage path (usually /var/lib/prosody).
'';
default = "/var/lib/prosody";
};
};
};
};
httpFileShareOpts = { ... }: {
freeformType = with types;
let atom = oneOf [ int bool str (listOf atom) ]; in
attrsOf (nullOr atom) // {
description = "int, bool, string or list of them";
httpFileShareOpts =
{ ... }:
{
freeformType =
with types;
let
atom = oneOf [
int
bool
str
(listOf atom)
];
in
attrsOf (nullOr atom)
// {
description = "int, bool, string or list of them";
};
options.domain = mkOption {
type = with types; nullOr str;
description = "Domain name for a http_file_share service.";
};
options.domain = mkOption {
type = with types; nullOr str;
description = "Domain name for a http_file_share service.";
};
};
vHostOpts = { ... }: {
vHostOpts =
{ ... }:
{
options = {
options = {
# TODO: require attribute
domain = mkOption {
type = types.str;
description = "Domain name";
};
# TODO: require attribute
domain = mkOption {
type = types.str;
description = "Domain name";
};
enabled = mkOption {
type = types.bool;
default = false;
description = "Whether to enable the virtual host";
};
enabled = mkOption {
type = types.bool;
default = false;
description = "Whether to enable the virtual host";
};
ssl = mkOption {
type = types.nullOr (types.submodule sslOpts);
default = null;
description = "Paths to SSL files";
};
ssl = mkOption {
type = types.nullOr (types.submodule sslOpts);
default = null;
description = "Paths to SSL files";
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = "Additional virtual host specific configuration";
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = "Additional virtual host specific configuration";
};
};
};
in
{
@ -543,7 +584,7 @@ in
disco_items = mkOption {
type = types.listOf (types.submodule discoOpts);
default = [];
default = [ ];
description = "List of discoverable items you want to advertise.";
};
@ -590,7 +631,10 @@ in
httpInterfaces = mkOption {
type = types.listOf types.str;
default = [ "*" "::" ];
default = [
"*"
"::"
];
description = "Interfaces on which the HTTP server will listen on.";
};
@ -602,7 +646,10 @@ in
httpsInterfaces = mkOption {
type = types.listOf types.str;
default = [ "*" "::" ];
default = [
"*"
"::"
];
description = "Interfaces on which the HTTPS server will listen on.";
};
@ -638,7 +685,7 @@ in
s2sInsecureDomains = mkOption {
type = types.listOf types.str;
default = [];
default = [ ];
example = [ "insecure.example.com" ];
description = ''
Some servers have invalid or self-signed certificates. You can list
@ -650,7 +697,7 @@ in
s2sSecureDomains = mkOption {
type = types.listOf types.str;
default = [];
default = [ ];
example = [ "jabber.org" ];
description = ''
Even if you leave s2s_secure_auth disabled, you can still require valid
@ -658,18 +705,17 @@ in
'';
};
modules = moduleOpts;
extraModules = mkOption {
type = types.listOf types.str;
default = [];
default = [ ];
description = "Enable custom modules";
};
extraPluginPaths = mkOption {
type = types.listOf types.path;
default = [];
default = [ ];
description = "Additional path in which to look find plugins/modules";
};
@ -698,9 +744,11 @@ in
muc = mkOption {
type = types.listOf (types.submodule mucOpts);
default = [ ];
example = [ {
domain = "conference.my-xmpp-example-host.org";
} ];
example = [
{
domain = "conference.my-xmpp-example-host.org";
}
];
description = "Multi User Chat (MUC) configuration";
};
@ -734,13 +782,21 @@ in
admins = mkOption {
type = types.listOf types.str;
default = [];
example = [ "admin1@example.com" "admin2@example.com" ];
default = [ ];
example = [
"admin1@example.com"
"admin2@example.com"
];
description = "List of administrators of the current host";
};
authentication = mkOption {
type = types.enum [ "internal_plain" "internal_hashed" "cyrus" "anonymous" ];
type = types.enum [
"internal_plain"
"internal_hashed"
"cyrus"
"anonymous"
];
default = "internal_hashed";
example = "internal_plain";
description = "Authentication mechanism used for logins.";
@ -766,13 +822,13 @@ in
};
};
###### implementation
config = mkIf cfg.enable {
assertions = let
genericErrMsg = ''
assertions =
let
genericErrMsg = ''
Having a server not XEP-0423-compliant might make your XMPP
experience terrible. See the NixOS manual for further
@ -780,127 +836,151 @@ in
If you know what you're doing, you can disable this warning by
setting config.services.prosody.xmppComplianceSuite to false.
'';
errors = [
{ assertion = (builtins.length cfg.muc > 0) || !cfg.xmppComplianceSuite;
message = ''
You need to setup at least a MUC domain to comply with
XEP-0423.
'' + genericErrMsg;}
{ assertion = cfg.uploadHttp != null || cfg.httpFileShare != null || !cfg.xmppComplianceSuite;
message = ''
You need to setup the http_upload or http_file_share modules through config.services.prosody.uploadHttp
or config.services.prosody.httpFileShare to comply with XEP-0423.
'' + genericErrMsg;}
];
in errors;
'';
errors = [
{
assertion = (builtins.length cfg.muc > 0) || !cfg.xmppComplianceSuite;
message =
''
You need to setup at least a MUC domain to comply with
XEP-0423.
''
+ genericErrMsg;
}
{
assertion = cfg.uploadHttp != null || cfg.httpFileShare != null || !cfg.xmppComplianceSuite;
message =
''
You need to setup the http_upload or http_file_share modules through config.services.prosody.uploadHttp
or config.services.prosody.httpFileShare to comply with XEP-0423.
''
+ genericErrMsg;
}
];
in
errors;
environment.systemPackages = [ cfg.package ];
environment.etc."prosody/prosody.cfg.lua".text =
let
httpDiscoItems = optional (cfg.uploadHttp != null) {
url = cfg.uploadHttp.domain; description = "HTTP upload endpoint";
} ++ optional (cfg.httpFileShare != null) {
url = cfg.httpFileShare.domain; description = "HTTP file share endpoint";
};
mucDiscoItems = builtins.foldl'
(acc: muc: [{ url = muc.domain; description = "${muc.domain} MUC endpoint";}] ++ acc)
[]
cfg.muc;
httpDiscoItems =
optional (cfg.uploadHttp != null) {
url = cfg.uploadHttp.domain;
description = "HTTP upload endpoint";
}
++ optional (cfg.httpFileShare != null) {
url = cfg.httpFileShare.domain;
description = "HTTP file share endpoint";
};
mucDiscoItems = builtins.foldl' (
acc: muc:
[
{
url = muc.domain;
description = "${muc.domain} MUC endpoint";
}
]
++ acc
) [ ] cfg.muc;
discoItems = cfg.disco_items ++ httpDiscoItems ++ mucDiscoItems;
in ''
in
''
pidfile = "/run/prosody/prosody.pid"
pidfile = "/run/prosody/prosody.pid"
log = ${cfg.log}
log = ${cfg.log}
data_path = "${cfg.dataDir}"
plugin_paths = {
${lib.concatStringsSep ", " (map (n: "\"${n}\"") cfg.extraPluginPaths) }
}
data_path = "${cfg.dataDir}"
plugin_paths = {
${lib.concatStringsSep ", " (map (n: "\"${n}\"") cfg.extraPluginPaths)}
}
${ optionalString (cfg.ssl != null) (createSSLOptsStr cfg.ssl) }
${optionalString (cfg.ssl != null) (createSSLOptsStr cfg.ssl)}
admins = ${toLua cfg.admins}
admins = ${toLua cfg.admins}
modules_enabled = {
modules_enabled = {
${ lib.concatStringsSep "\n " (lib.mapAttrsToList
(name: val: optionalString val "${toLua name};")
cfg.modules) }
${ lib.concatStringsSep "\n" (map (x: "${toLua x};") cfg.package.communityModules)}
${ lib.concatStringsSep "\n" (map (x: "${toLua x};") cfg.extraModules)}
};
${lib.concatStringsSep "\n " (
lib.mapAttrsToList (name: val: optionalString val "${toLua name};") cfg.modules
)}
${lib.concatStringsSep "\n" (map (x: "${toLua x};") cfg.package.communityModules)}
${lib.concatStringsSep "\n" (map (x: "${toLua x};") cfg.extraModules)}
};
disco_items = {
${ lib.concatStringsSep "\n" (builtins.map (x: ''{ "${x.url}", "${x.description}"};'') discoItems)}
};
disco_items = {
${lib.concatStringsSep "\n" (builtins.map (x: ''{ "${x.url}", "${x.description}"};'') discoItems)}
};
allow_registration = ${toLua cfg.allowRegistration}
allow_registration = ${toLua cfg.allowRegistration}
c2s_require_encryption = ${toLua cfg.c2sRequireEncryption}
c2s_require_encryption = ${toLua cfg.c2sRequireEncryption}
s2s_require_encryption = ${toLua cfg.s2sRequireEncryption}
s2s_require_encryption = ${toLua cfg.s2sRequireEncryption}
s2s_secure_auth = ${toLua cfg.s2sSecureAuth}
s2s_secure_auth = ${toLua cfg.s2sSecureAuth}
s2s_insecure_domains = ${toLua cfg.s2sInsecureDomains}
s2s_insecure_domains = ${toLua cfg.s2sInsecureDomains}
s2s_secure_domains = ${toLua cfg.s2sSecureDomains}
s2s_secure_domains = ${toLua cfg.s2sSecureDomains}
authentication = ${toLua cfg.authentication}
authentication = ${toLua cfg.authentication}
http_interfaces = ${toLua cfg.httpInterfaces}
http_interfaces = ${toLua cfg.httpInterfaces}
https_interfaces = ${toLua cfg.httpsInterfaces}
https_interfaces = ${toLua cfg.httpsInterfaces}
http_ports = ${toLua cfg.httpPorts}
http_ports = ${toLua cfg.httpPorts}
https_ports = ${toLua cfg.httpsPorts}
https_ports = ${toLua cfg.httpsPorts}
${ cfg.extraConfig }
${cfg.extraConfig}
${lib.concatMapStrings (muc: ''
Component ${toLua muc.domain} "muc"
modules_enabled = { "muc_mam"; ${optionalString muc.vcard_muc ''"vcard_muc";'' } ${optionalString muc.allowners_muc ''"muc_allowners";'' } }
name = ${toLua muc.name}
restrict_room_creation = ${toLua muc.restrictRoomCreation}
max_history_messages = ${toLua muc.maxHistoryMessages}
muc_room_locking = ${toLua muc.roomLocking}
muc_room_lock_timeout = ${toLua muc.roomLockTimeout}
muc_tombstones = ${toLua muc.tombstones}
muc_tombstone_expiry = ${toLua muc.tombstoneExpiry}
muc_room_default_public = ${toLua muc.roomDefaultPublic}
muc_room_default_members_only = ${toLua muc.roomDefaultMembersOnly}
muc_room_default_moderated = ${toLua muc.roomDefaultModerated}
muc_room_default_public_jids = ${toLua muc.roomDefaultPublicJids}
muc_room_default_change_subject = ${toLua muc.roomDefaultChangeSubject}
muc_room_default_history_length = ${toLua muc.roomDefaultHistoryLength}
muc_room_default_language = ${toLua muc.roomDefaultLanguage}
${ muc.extraConfig }
${lib.concatMapStrings (muc: ''
Component ${toLua muc.domain} "muc"
modules_enabled = { "muc_mam"; ${optionalString muc.vcard_muc ''"vcard_muc";''} ${optionalString muc.allowners_muc ''"muc_allowners";''} }
name = ${toLua muc.name}
restrict_room_creation = ${toLua muc.restrictRoomCreation}
max_history_messages = ${toLua muc.maxHistoryMessages}
muc_room_locking = ${toLua muc.roomLocking}
muc_room_lock_timeout = ${toLua muc.roomLockTimeout}
muc_tombstones = ${toLua muc.tombstones}
muc_tombstone_expiry = ${toLua muc.tombstoneExpiry}
muc_room_default_public = ${toLua muc.roomDefaultPublic}
muc_room_default_members_only = ${toLua muc.roomDefaultMembersOnly}
muc_room_default_moderated = ${toLua muc.roomDefaultModerated}
muc_room_default_public_jids = ${toLua muc.roomDefaultPublicJids}
muc_room_default_change_subject = ${toLua muc.roomDefaultChangeSubject}
muc_room_default_history_length = ${toLua muc.roomDefaultHistoryLength}
muc_room_default_language = ${toLua muc.roomDefaultLanguage}
${muc.extraConfig}
'') cfg.muc}
${ lib.optionalString (cfg.uploadHttp != null) ''
Component ${toLua cfg.uploadHttp.domain} "http_upload"
http_upload_file_size_limit = ${cfg.uploadHttp.uploadFileSizeLimit}
http_upload_expire_after = ${cfg.uploadHttp.uploadExpireAfter}
${lib.optionalString (cfg.uploadHttp.userQuota != null) "http_upload_quota = ${toLua cfg.uploadHttp.userQuota}"}
http_upload_path = ${toLua cfg.uploadHttp.httpUploadPath}
''}
${lib.optionalString (cfg.uploadHttp != null) ''
Component ${toLua cfg.uploadHttp.domain} "http_upload"
http_upload_file_size_limit = ${cfg.uploadHttp.uploadFileSizeLimit}
http_upload_expire_after = ${cfg.uploadHttp.uploadExpireAfter}
${lib.optionalString (
cfg.uploadHttp.userQuota != null
) "http_upload_quota = ${toLua cfg.uploadHttp.userQuota}"}
http_upload_path = ${toLua cfg.uploadHttp.httpUploadPath}
''}
${lib.optionalString (cfg.httpFileShare != null) ''
Component ${toLua cfg.httpFileShare.domain} "http_file_share"
${settingsToLua " http_file_share_" (cfg.httpFileShare // { domain = null; })}
''}
${lib.optionalString (cfg.httpFileShare != null) ''
Component ${toLua cfg.httpFileShare.domain} "http_file_share"
${settingsToLua " http_file_share_" (cfg.httpFileShare // { domain = null; })}
''}
${ lib.concatStringsSep "\n" (lib.mapAttrsToList (n: v: ''
VirtualHost "${v.domain}"
enabled = ${boolToString v.enabled};
${ optionalString (v.ssl != null) (createSSLOptsStr v.ssl) }
${ v.extraConfig }
'') cfg.virtualHosts) }
'';
${lib.concatStringsSep "\n" (
lib.mapAttrsToList (n: v: ''
VirtualHost "${v.domain}"
enabled = ${boolToString v.enabled};
${optionalString (v.ssl != null) (createSSLOptsStr v.ssl)}
${v.extraConfig}
'') cfg.virtualHosts
)}
'';
users.users.prosody = mkIf (cfg.user == "prosody") {
uid = config.ids.uids.prosody;

View file

@ -1,6 +1,11 @@
# Module for the IPv6 Router Advertisement Daemon.
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
@ -22,13 +27,13 @@ in
type = types.bool;
default = false;
description = ''
Whether to enable the Router Advertisement Daemon
({command}`radvd`), which provides link-local
advertisements of IPv6 router addresses and prefixes using
the Neighbor Discovery Protocol (NDP). This enables
stateless address autoconfiguration in IPv6 clients on the
network.
'';
Whether to enable the Router Advertisement Daemon
({command}`radvd`), which provides link-local
advertisements of IPv6 router addresses and prefixes using
the Neighbor Discovery Protocol (NDP). This enables
stateless address autoconfiguration in IPv6 clients on the
network.
'';
};
package = mkPackageOption pkgs "radvd" { };
@ -38,50 +43,47 @@ in
default = 0;
example = 5;
description = ''
The debugging level is an integer in the range from 1 to 5,
from quiet to very verbose. A debugging level of 0 completely
turns off debugging.
'';
The debugging level is an integer in the range from 1 to 5,
from quiet to very verbose. A debugging level of 0 completely
turns off debugging.
'';
};
config = mkOption {
type = types.lines;
example =
''
interface eth0 {
AdvSendAdvert on;
prefix 2001:db8:1234:5678::/64 { };
};
'';
example = ''
interface eth0 {
AdvSendAdvert on;
prefix 2001:db8:1234:5678::/64 { };
};
'';
description = ''
The contents of the radvd configuration file.
'';
The contents of the radvd configuration file.
'';
};
};
###### implementation
config = mkIf cfg.enable {
users.users.radvd =
{
isSystemUser = true;
group = "radvd";
description = "Router Advertisement Daemon User";
};
users.groups.radvd = {};
users.users.radvd = {
isSystemUser = true;
group = "radvd";
description = "Router Advertisement Daemon User";
};
users.groups.radvd = { };
systemd.services.radvd =
{ description = "IPv6 Router Advertisement Daemon";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig =
{ ExecStart = "@${cfg.package}/bin/radvd radvd -n -u radvd -d ${toString cfg.debugLevel} -C ${confFile}";
Restart = "always";
};
systemd.services.radvd = {
description = "IPv6 Router Advertisement Daemon";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
ExecStart = "@${cfg.package}/bin/radvd radvd -n -u radvd -d ${toString cfg.debugLevel} -C ${confFile}";
Restart = "always";
};
};
};

View file

@ -1,7 +1,12 @@
# Module for rdnssd, a daemon that configures DNS servers in
# /etc/resolv/conf from IPv6 RDNSS advertisements.
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
let
@ -21,24 +26,25 @@ in
default = false;
#default = config.networking.enableIPv6;
description = ''
Whether to enable the RDNSS daemon
({command}`rdnssd`), which configures DNS servers in
{file}`/etc/resolv.conf` from RDNSS
advertisements sent by IPv6 routers.
'';
Whether to enable the RDNSS daemon
({command}`rdnssd`), which configures DNS servers in
{file}`/etc/resolv.conf` from RDNSS
advertisements sent by IPv6 routers.
'';
};
};
###### implementation
config = mkIf config.services.rdnssd.enable {
assertions = [{
assertion = config.networking.resolvconf.enable;
message = "rdnssd needs resolvconf to work (probably something sets up a static resolv.conf)";
}];
assertions = [
{
assertion = config.networking.resolvconf.enable;
message = "rdnssd needs resolvconf to work (probably something sets up a static resolv.conf)";
}
];
systemd.services.rdnssd = {
description = "RDNSS daemon";
@ -74,7 +80,7 @@ in
isSystemUser = true;
group = "rdnssd";
};
users.groups.rdnssd = {};
users.groups.rdnssd = { };
};

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
let
@ -30,138 +35,157 @@ in
type = types.str;
default = "stderr";
description = ''
Where to send logs.
Where to send logs.
Possible values are:
- stderr
- file:/path/to/file
- syslog:FACILITY where FACILITY is any of "daemon", "local0",
etc.
'';
Possible values are:
- stderr
- file:/path/to/file
- syslog:FACILITY where FACILITY is any of "daemon", "local0",
etc.
'';
};
chroot = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Chroot under which to run redsocks. Log file is opened before
chroot, but if logging to syslog /etc/localtime may be required.
'';
Chroot under which to run redsocks. Log file is opened before
chroot, but if logging to syslog /etc/localtime may be required.
'';
};
redsocks = mkOption {
description = ''
Local port to proxy associations to be performed.
Local port to proxy associations to be performed.
The example shows how to configure a proxy to handle port 80 as HTTP
relay, and all other ports as HTTP connect.
'';
The example shows how to configure a proxy to handle port 80 as HTTP
relay, and all other ports as HTTP connect.
'';
example = [
{ port = 23456; proxy = "1.2.3.4:8080"; type = "http-relay";
{
port = 23456;
proxy = "1.2.3.4:8080";
type = "http-relay";
redirectCondition = "--dport 80";
doNotRedirect = [ "-d 1.2.0.0/16" ];
}
{ port = 23457; proxy = "1.2.3.4:8080"; type = "http-connect";
{
port = 23457;
proxy = "1.2.3.4:8080";
type = "http-connect";
redirectCondition = true;
doNotRedirect = [ "-d 1.2.0.0/16" ];
}
];
type = types.listOf (types.submodule { options = {
ip = mkOption {
type = types.str;
default = "127.0.0.1";
description = ''
IP on which redsocks should listen. Defaults to 127.0.0.1 for
security reasons.
'';
};
type = types.listOf (
types.submodule {
options = {
ip = mkOption {
type = types.str;
default = "127.0.0.1";
description = ''
IP on which redsocks should listen. Defaults to 127.0.0.1 for
security reasons.
'';
};
port = mkOption {
type = types.port;
default = 12345;
description = "Port on which redsocks should listen.";
};
port = mkOption {
type = types.port;
default = 12345;
description = "Port on which redsocks should listen.";
};
proxy = mkOption {
type = types.str;
description = ''
Proxy through which redsocks should forward incoming traffic.
Example: "example.org:8080"
'';
};
proxy = mkOption {
type = types.str;
description = ''
Proxy through which redsocks should forward incoming traffic.
Example: "example.org:8080"
'';
};
type = mkOption {
type = types.enum [ "socks4" "socks5" "http-connect" "http-relay" ];
description = "Type of proxy.";
};
type = mkOption {
type = types.enum [
"socks4"
"socks5"
"http-connect"
"http-relay"
];
description = "Type of proxy.";
};
login = mkOption {
type = with types; nullOr str;
default = null;
description = "Login to send to proxy.";
};
login = mkOption {
type = with types; nullOr str;
default = null;
description = "Login to send to proxy.";
};
password = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Password to send to proxy. WARNING, this will end up
world-readable in the store! Awaiting
https://github.com/NixOS/nix/issues/8 to be able to fix.
'';
};
password = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Password to send to proxy. WARNING, this will end up
world-readable in the store! Awaiting
https://github.com/NixOS/nix/issues/8 to be able to fix.
'';
};
disclose_src = mkOption {
type = types.enum [ "false" "X-Forwarded-For" "Forwarded_ip"
"Forwarded_ipport" ];
default = "false";
description = ''
Way to disclose client IP to the proxy.
- "false": do not disclose
disclose_src = mkOption {
type = types.enum [
"false"
"X-Forwarded-For"
"Forwarded_ip"
"Forwarded_ipport"
];
default = "false";
description = ''
Way to disclose client IP to the proxy.
- "false": do not disclose
http-connect supports the following ways:
- "X-Forwarded-For": add header "X-Forwarded-For: IP"
- "Forwarded_ip": add header "Forwarded: for=IP" (see RFC7239)
- "Forwarded_ipport": add header 'Forwarded: for="IP:port"'
'';
};
http-connect supports the following ways:
- "X-Forwarded-For": add header "X-Forwarded-For: IP"
- "Forwarded_ip": add header "Forwarded: for=IP" (see RFC7239)
- "Forwarded_ipport": add header 'Forwarded: for="IP:port"'
'';
};
redirectInternetOnly = mkOption {
type = types.bool;
default = true;
description = "Exclude all non-globally-routable IPs from redsocks";
};
redirectInternetOnly = mkOption {
type = types.bool;
default = true;
description = "Exclude all non-globally-routable IPs from redsocks";
};
doNotRedirect = mkOption {
type = with types; listOf str;
default = [];
description = ''
Iptables filters that if matched will get the packet off of
redsocks.
'';
example = [ "-d 1.2.3.4" ];
};
doNotRedirect = mkOption {
type = with types; listOf str;
default = [ ];
description = ''
Iptables filters that if matched will get the packet off of
redsocks.
'';
example = [ "-d 1.2.3.4" ];
};
redirectCondition = mkOption {
type = with types; either bool str;
default = false;
description = ''
Conditions to make outbound packets go through this redsocks
instance.
redirectCondition = mkOption {
type = with types; either bool str;
default = false;
description = ''
Conditions to make outbound packets go through this redsocks
instance.
If set to false, no packet will be forwarded. If set to true,
all packets will be forwarded (except packets excluded by
redirectInternetOnly).
If set to false, no packet will be forwarded. If set to true,
all packets will be forwarded (except packets excluded by
redirectInternetOnly).
If set to a string, this is an iptables filter that will be
matched against packets before getting them into redsocks. For
example, setting it to "--dport 80" will only send
packets to port 80 to redsocks. Note "-p tcp" is always
implicitly added, as udp can only be proxied through redudp or
the like.
'';
};
};});
If set to a string, this is an iptables filter that will be
matched against packets before getting them into redsocks. For
example, setting it to "--dport 80" will only send
packets to port 80 to redsocks. Note "-p tcp" is always
implicitly added, as udp can only be proxied through redudp or
the like.
'';
};
};
}
);
};
# TODO: Add support for redudp and dnstc
@ -169,29 +193,33 @@ in
};
##### implementation
config = let
redsocks_blocks = concatMapStrings (block:
let proxy = splitString ":" block.proxy; in
''
redsocks {
local_ip = ${block.ip};
local_port = ${toString block.port};
config =
let
redsocks_blocks = concatMapStrings (
block:
let
proxy = splitString ":" block.proxy;
in
''
redsocks {
local_ip = ${block.ip};
local_port = ${toString block.port};
ip = ${elemAt proxy 0};
port = ${elemAt proxy 1};
type = ${block.type};
ip = ${elemAt proxy 0};
port = ${elemAt proxy 1};
type = ${block.type};
${optionalString (block.login != null) "login = \"${block.login}\";"}
${optionalString (block.password != null) "password = \"${block.password}\";"}
${optionalString (block.login != null) "login = \"${block.login}\";"}
${optionalString (block.password != null) "password = \"${block.password}\";"}
disclose_src = ${block.disclose_src};
}
'') cfg.redsocks;
configfile = pkgs.writeText "redsocks.conf"
''
disclose_src = ${block.disclose_src};
}
''
) cfg.redsocks;
configfile = pkgs.writeText "redsocks.conf" ''
base {
log_debug = ${if cfg.log_debug then "on" else "off" };
log_info = ${if cfg.log_info then "on" else "off" };
log_debug = ${if cfg.log_debug then "on" else "off"};
log_info = ${if cfg.log_info then "on" else "off"};
log = ${cfg.log};
daemon = off;
@ -204,26 +232,27 @@ in
${redsocks_blocks}
'';
internetOnly = [ # TODO: add ipv6-equivalent
"-d 0.0.0.0/8"
"-d 10.0.0.0/8"
"-d 127.0.0.0/8"
"-d 169.254.0.0/16"
"-d 172.16.0.0/12"
"-d 192.168.0.0/16"
"-d 224.168.0.0/4"
"-d 240.168.0.0/4"
];
redCond = block:
optionalString (isString block.redirectCondition) block.redirectCondition;
iptables = concatImapStrings (idx: block:
let chain = "REDSOCKS${toString idx}"; doNotRedirect =
concatMapStringsSep "\n"
(f: "ip46tables -t nat -A ${chain} ${f} -j RETURN 2>/dev/null || true")
(block.doNotRedirect ++ (optionals block.redirectInternetOnly internetOnly));
in
optionalString (block.redirectCondition != false)
''
internetOnly = [
# TODO: add ipv6-equivalent
"-d 0.0.0.0/8"
"-d 10.0.0.0/8"
"-d 127.0.0.0/8"
"-d 169.254.0.0/16"
"-d 172.16.0.0/12"
"-d 192.168.0.0/16"
"-d 224.168.0.0/4"
"-d 240.168.0.0/4"
];
redCond = block: optionalString (isString block.redirectCondition) block.redirectCondition;
iptables = concatImapStrings (
idx: block:
let
chain = "REDSOCKS${toString idx}";
doNotRedirect = concatMapStringsSep "\n" (
f: "ip46tables -t nat -A ${chain} ${f} -j RETURN 2>/dev/null || true"
) (block.doNotRedirect ++ (optionals block.redirectInternetOnly internetOnly));
in
optionalString (block.redirectCondition != false) ''
ip46tables -t nat -F ${chain} 2>/dev/null || true
ip46tables -t nat -N ${chain} 2>/dev/null || true
${doNotRedirect}
@ -233,10 +262,10 @@ in
# iptables-restore
ip46tables -t nat -A OUTPUT -p tcp ${redCond block} -j ${chain} 2>/dev/null || true
''
) cfg.redsocks;
in
) cfg.redsocks;
in
mkIf cfg.enable {
users.groups.redsocks = {};
users.groups.redsocks = { };
users.users.redsocks = {
description = "Redsocks daemon";
group = "redsocks";
@ -252,12 +281,15 @@ in
networking.firewall.extraCommands = iptables;
networking.firewall.extraStopCommands =
concatImapStringsSep "\n" (idx: block:
let chain = "REDSOCKS${toString idx}"; in
optionalString (block.redirectCondition != false)
"ip46tables -t nat -D OUTPUT -p tcp ${redCond block} -j ${chain} 2>/dev/null || true"
) cfg.redsocks;
networking.firewall.extraStopCommands = concatImapStringsSep "\n" (
idx: block:
let
chain = "REDSOCKS${toString idx}";
in
optionalString (
block.redirectCondition != false
) "ip46tables -t nat -D OUTPUT -p tcp ${redCond block} -j ${chain} 2>/dev/null || true"
) cfg.redsocks;
};
meta.maintainers = with lib.maintainers; [ ekleog ];

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
@ -17,54 +22,69 @@ let
known_hosts = entry.knownHosts;
}) cfg.sharedFolders;
configFile = pkgs.writeText "config.json" (builtins.toJSON ({
device_name = cfg.deviceName;
storage_path = cfg.storagePath;
listening_port = cfg.listeningPort;
use_gui = false;
check_for_updates = cfg.checkForUpdates;
use_upnp = cfg.useUpnp;
download_limit = cfg.downloadLimit;
upload_limit = cfg.uploadLimit;
lan_encrypt_data = cfg.encryptLAN;
} // optionalAttrs (cfg.directoryRoot != "") { directory_root = cfg.directoryRoot; }
// optionalAttrs cfg.enableWebUI {
webui = { listen = "${cfg.httpListenAddr}:${toString cfg.httpListenPort}"; } //
(optionalAttrs (cfg.httpLogin != "") { login = cfg.httpLogin; }) //
(optionalAttrs (cfg.httpPass != "") { password = cfg.httpPass; }) //
(optionalAttrs (cfg.apiKey != "") { api_key = cfg.apiKey; });
} // optionalAttrs (sharedFoldersRecord != []) {
shared_folders = sharedFoldersRecord;
}));
configFile = pkgs.writeText "config.json" (
builtins.toJSON (
{
device_name = cfg.deviceName;
storage_path = cfg.storagePath;
listening_port = cfg.listeningPort;
use_gui = false;
check_for_updates = cfg.checkForUpdates;
use_upnp = cfg.useUpnp;
download_limit = cfg.downloadLimit;
upload_limit = cfg.uploadLimit;
lan_encrypt_data = cfg.encryptLAN;
}
// optionalAttrs (cfg.directoryRoot != "") { directory_root = cfg.directoryRoot; }
// optionalAttrs cfg.enableWebUI {
webui =
{
listen = "${cfg.httpListenAddr}:${toString cfg.httpListenPort}";
}
// (optionalAttrs (cfg.httpLogin != "") { login = cfg.httpLogin; })
// (optionalAttrs (cfg.httpPass != "") { password = cfg.httpPass; })
// (optionalAttrs (cfg.apiKey != "") { api_key = cfg.apiKey; });
}
// optionalAttrs (sharedFoldersRecord != [ ]) {
shared_folders = sharedFoldersRecord;
}
)
);
sharedFoldersSecretFiles = map (entry: {
dir = entry.directory;
secretFile = if builtins.hasAttr "secret" entry then
toString (pkgs.writeTextFile {
name = "secret-file";
text = entry.secret;
})
else
entry.secretFile;
secretFile =
if builtins.hasAttr "secret" entry then
toString (
pkgs.writeTextFile {
name = "secret-file";
text = entry.secret;
}
)
else
entry.secretFile;
}) cfg.sharedFolders;
runConfigPath = "/run/rslsync/config.json";
createConfig = pkgs.writeShellScriptBin "create-resilio-config" (
if cfg.sharedFolders != [ ] then ''
${pkgs.jq}/bin/jq \
'.shared_folders |= map(.secret = $ARGS.named[.dir])' \
${
lib.concatMapStringsSep " \\\n "
(entry: ''--arg '${entry.dir}' "$(cat '${entry.secretFile}')"'')
sharedFoldersSecretFiles
} \
<${configFile} \
>${runConfigPath}
'' else ''
# no secrets, passing through config
cp ${configFile} ${runConfigPath};
''
if cfg.sharedFolders != [ ] then
''
${pkgs.jq}/bin/jq \
'.shared_folders |= map(.secret = $ARGS.named[.dir])' \
${
lib.concatMapStringsSep " \\\n " (
entry: ''--arg '${entry.dir}' "$(cat '${entry.secretFile}')"''
) sharedFoldersSecretFiles
} \
<${configFile} \
>${runConfigPath}
''
else
''
# no secrets, passing through config
cp ${configFile} ${runConfigPath};
''
);
in
@ -185,7 +205,7 @@ in
Enable Web UI for administration. Bound to the specified
`httpListenAddress` and
`httpListenPort`.
'';
'';
};
storagePath = mkOption {
@ -212,22 +232,23 @@ in
};
sharedFolders = mkOption {
default = [];
default = [ ];
type = types.listOf (types.attrsOf types.anything);
example =
[ { secretFile = "/run/resilio-secret";
directory = "/home/user/sync_test";
useRelayServer = true;
useTracker = true;
useDHT = false;
searchLAN = true;
useSyncTrash = true;
knownHosts = [
"192.168.1.2:4444"
"192.168.1.3:4444"
];
}
];
example = [
{
secretFile = "/run/resilio-secret";
directory = "/home/user/sync_test";
useRelayServer = true;
useTracker = true;
useDHT = false;
searchLAN = true;
useSyncTrash = true;
knownHosts = [
"192.168.1.2:4444"
"192.168.1.3:4444"
];
}
];
description = ''
Shared folder list. If enabled, web UI must be
disabled. Secrets can be generated using `rslsync --generate-secret`.
@ -252,36 +273,39 @@ in
};
config = mkIf cfg.enable {
assertions =
[ { assertion = cfg.deviceName != "";
message = "Device name cannot be empty.";
}
{ assertion = cfg.enableWebUI -> cfg.sharedFolders == [];
message = "If using shared folders, the web UI cannot be enabled.";
}
{ assertion = cfg.apiKey != "" -> cfg.enableWebUI;
message = "If you're using an API key, you must enable the web server.";
}
];
assertions = [
{
assertion = cfg.deviceName != "";
message = "Device name cannot be empty.";
}
{
assertion = cfg.enableWebUI -> cfg.sharedFolders == [ ];
message = "If using shared folders, the web UI cannot be enabled.";
}
{
assertion = cfg.apiKey != "" -> cfg.enableWebUI;
message = "If you're using an API key, you must enable the web server.";
}
];
users.users.rslsync = {
description = "Resilio Sync Service user";
home = cfg.storagePath;
createHome = true;
uid = config.ids.uids.rslsync;
group = "rslsync";
description = "Resilio Sync Service user";
home = cfg.storagePath;
createHome = true;
uid = config.ids.uids.rslsync;
group = "rslsync";
};
users.groups.rslsync.gid = config.ids.gids.rslsync;
systemd.services.resilio = with pkgs; {
description = "Resilio Sync Service";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
Restart = "on-abort";
UMask = "0002";
User = "rslsync";
Restart = "on-abort";
UMask = "0002";
User = "rslsync";
RuntimeDirectory = "rslsync";
ExecStartPre = "${createConfig}/bin/create-resilio-config";
ExecStart = ''

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
@ -12,7 +17,7 @@ in
extraFlags = mkOption {
type = types.listOf types.str;
default = [];
default = [ ];
description = ''Extra flags passed to the {command}`robustirc-bridge` command. See [RobustIRC Documentation](https://robustirc.net/docs/adminguide.html#_bridge) or {manpage}`robustirc-bridge(1)` for details.'';
example = [
"-network robustirc.net"

View file

@ -1,7 +1,8 @@
{ config
, lib
, pkgs
, ...
{
config,
lib,
pkgs,
...
}:
with lib;

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
@ -25,7 +30,6 @@ with lib;
};
###### implementation
config = mkIf config.services.rpcbind.enable {

View file

@ -1,11 +1,17 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
let
cfg = config.networking.rxe;
in {
in
{
###### interface
options = {
@ -32,21 +38,24 @@ in {
description = "RoCE interfaces";
wantedBy = [ "multi-user.target" ];
after = [ "systemd-modules-load.service" "network-online.target" ];
wants = [ "network-pre.target" "network-online.target" ];
after = [
"systemd-modules-load.service"
"network-online.target"
];
wants = [
"network-pre.target"
"network-online.target"
];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = map ( x:
"${pkgs.iproute2}/bin/rdma link add rxe_${x} type rxe netdev ${x}"
) cfg.interfaces;
ExecStart = map (
x: "${pkgs.iproute2}/bin/rdma link add rxe_${x} type rxe netdev ${x}"
) cfg.interfaces;
ExecStop = map ( x:
"${pkgs.iproute2}/bin/rdma link delete rxe_${x}"
) cfg.interfaces;
ExecStop = map (x: "${pkgs.iproute2}/bin/rdma link delete rxe_${x}") cfg.interfaces;
};
};
};
}

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
@ -47,7 +52,6 @@ in
};
};
###### implementation
config = mkIf cfg.enable {
@ -64,17 +68,17 @@ in
};
systemd.services.sabnzbd = {
description = "sabnzbd server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
Type = "forking";
GuessMainPID = "no";
User = cfg.user;
Group = cfg.group;
StateDirectory = "sabnzbd";
ExecStart = "${lib.getBin cfg.package}/bin/sabnzbd -d -f ${cfg.configFile}";
};
description = "sabnzbd server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
Type = "forking";
GuessMainPID = "no";
User = cfg.user;
Group = cfg.group;
StateDirectory = "sabnzbd";
ExecStart = "${lib.getBin cfg.package}/bin/sabnzbd -d -f ${cfg.configFile}";
};
};
networking.firewall = mkIf cfg.openFirewall {

Some files were not shown because too many files have changed in this diff Show more