mirror of
https://github.com/NixOS/nixpkgs.git
synced 2025-07-12 05:16:25 +03:00
treewide: Format all Nix files
Format all Nix files using the officially approved formatter,
making the CI check introduced in the previous commit succeed:
nix-build ci -A fmt.check
This is the next step of the of the [implementation](https://github.com/NixOS/nixfmt/issues/153)
of the accepted [RFC 166](https://github.com/NixOS/rfcs/pull/166).
This commit will lead to merge conflicts for a number of PRs,
up to an estimated ~1100 (~33%) among the PRs with activity in the past 2
months, but that should be lower than what it would be without the previous
[partial treewide format](https://github.com/NixOS/nixpkgs/pull/322537).
Merge conflicts caused by this commit can now automatically be resolved while rebasing using the
[auto-rebase script](8616af08d9/maintainers/scripts/auto-rebase
).
If you run into any problems regarding any of this, please reach out to the
[formatting team](https://nixos.org/community/teams/formatting/) by
pinging @NixOS/nix-formatting.
This commit is contained in:
parent
2140bf39e4
commit
374e6bcc40
1523 changed files with 986047 additions and 513621 deletions
|
@ -1,142 +1,166 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.services.buildkite-agents;
|
||||
|
||||
hooksDir = hooks:
|
||||
hooksDir =
|
||||
hooks:
|
||||
let
|
||||
mkHookEntry = name: text: ''
|
||||
ln --symbolic ${pkgs.writeShellApplication { inherit name text; }}/bin/${name} $out/${name}
|
||||
'';
|
||||
in
|
||||
pkgs.runCommand "buildkite-agent-hooks" {
|
||||
preferLocalBuild = true;
|
||||
} ''
|
||||
mkdir $out
|
||||
${lib.concatStringsSep "\n" (lib.mapAttrsToList mkHookEntry hooks)}
|
||||
'';
|
||||
pkgs.runCommand "buildkite-agent-hooks"
|
||||
{
|
||||
preferLocalBuild = true;
|
||||
}
|
||||
''
|
||||
mkdir $out
|
||||
${lib.concatStringsSep "\n" (lib.mapAttrsToList mkHookEntry hooks)}
|
||||
'';
|
||||
|
||||
buildkiteOptions = { name ? "", config, ... }: {
|
||||
options = {
|
||||
enable = lib.mkOption {
|
||||
default = true;
|
||||
type = lib.types.bool;
|
||||
description = "Whether to enable this buildkite agent";
|
||||
};
|
||||
buildkiteOptions =
|
||||
{
|
||||
name ? "",
|
||||
config,
|
||||
...
|
||||
}:
|
||||
{
|
||||
options = {
|
||||
enable = lib.mkOption {
|
||||
default = true;
|
||||
type = lib.types.bool;
|
||||
description = "Whether to enable this buildkite agent";
|
||||
};
|
||||
|
||||
package = lib.mkOption {
|
||||
default = pkgs.buildkite-agent;
|
||||
defaultText = lib.literalExpression "pkgs.buildkite-agent";
|
||||
description = "Which buildkite-agent derivation to use";
|
||||
type = lib.types.package;
|
||||
};
|
||||
package = lib.mkOption {
|
||||
default = pkgs.buildkite-agent;
|
||||
defaultText = lib.literalExpression "pkgs.buildkite-agent";
|
||||
description = "Which buildkite-agent derivation to use";
|
||||
type = lib.types.package;
|
||||
};
|
||||
|
||||
dataDir = lib.mkOption {
|
||||
default = "/var/lib/buildkite-agent-${name}";
|
||||
description = "The workdir for the agent";
|
||||
type = lib.types.str;
|
||||
};
|
||||
dataDir = lib.mkOption {
|
||||
default = "/var/lib/buildkite-agent-${name}";
|
||||
description = "The workdir for the agent";
|
||||
type = lib.types.str;
|
||||
};
|
||||
|
||||
extraGroups = lib.mkOption {
|
||||
default = [ "keys" ];
|
||||
description = "Groups the user for this buildkite agent should belong to";
|
||||
type = lib.types.listOf lib.types.str;
|
||||
};
|
||||
extraGroups = lib.mkOption {
|
||||
default = [ "keys" ];
|
||||
description = "Groups the user for this buildkite agent should belong to";
|
||||
type = lib.types.listOf lib.types.str;
|
||||
};
|
||||
|
||||
runtimePackages = lib.mkOption {
|
||||
default = [ pkgs.bash pkgs.gnutar pkgs.gzip pkgs.git pkgs.nix ];
|
||||
defaultText = lib.literalExpression "[ pkgs.bash pkgs.gnutar pkgs.gzip pkgs.git pkgs.nix ]";
|
||||
description = "Add programs to the buildkite-agent environment";
|
||||
type = lib.types.listOf lib.types.package;
|
||||
};
|
||||
runtimePackages = lib.mkOption {
|
||||
default = [
|
||||
pkgs.bash
|
||||
pkgs.gnutar
|
||||
pkgs.gzip
|
||||
pkgs.git
|
||||
pkgs.nix
|
||||
];
|
||||
defaultText = lib.literalExpression "[ pkgs.bash pkgs.gnutar pkgs.gzip pkgs.git pkgs.nix ]";
|
||||
description = "Add programs to the buildkite-agent environment";
|
||||
type = lib.types.listOf lib.types.package;
|
||||
};
|
||||
|
||||
tokenPath = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = ''
|
||||
The token from your Buildkite "Agents" page.
|
||||
tokenPath = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = ''
|
||||
The token from your Buildkite "Agents" page.
|
||||
|
||||
A run-time path to the token file, which is supposed to be provisioned
|
||||
outside of Nix store.
|
||||
'';
|
||||
};
|
||||
A run-time path to the token file, which is supposed to be provisioned
|
||||
outside of Nix store.
|
||||
'';
|
||||
};
|
||||
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "%hostname-${name}-%n";
|
||||
description = ''
|
||||
The name of the agent as seen in the buildkite dashboard.
|
||||
'';
|
||||
};
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "%hostname-${name}-%n";
|
||||
description = ''
|
||||
The name of the agent as seen in the buildkite dashboard.
|
||||
'';
|
||||
};
|
||||
|
||||
tags = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.either lib.types.str (lib.types.listOf lib.types.str));
|
||||
default = { };
|
||||
example = { queue = "default"; docker = "true"; ruby2 = "true"; };
|
||||
description = ''
|
||||
Tags for the agent.
|
||||
'';
|
||||
};
|
||||
tags = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.either lib.types.str (lib.types.listOf lib.types.str));
|
||||
default = { };
|
||||
example = {
|
||||
queue = "default";
|
||||
docker = "true";
|
||||
ruby2 = "true";
|
||||
};
|
||||
description = ''
|
||||
Tags for the agent.
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
default = "";
|
||||
example = "debug=true";
|
||||
description = ''
|
||||
Extra lines to be added verbatim to the configuration file.
|
||||
'';
|
||||
};
|
||||
extraConfig = lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
default = "";
|
||||
example = "debug=true";
|
||||
description = ''
|
||||
Extra lines to be added verbatim to the configuration file.
|
||||
'';
|
||||
};
|
||||
|
||||
privateSshKeyPath = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
default = null;
|
||||
## maximum care is taken so that secrets (ssh keys and the CI token)
|
||||
## don't end up in the Nix store.
|
||||
apply = final: if final == null then null else toString final;
|
||||
privateSshKeyPath = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
default = null;
|
||||
## maximum care is taken so that secrets (ssh keys and the CI token)
|
||||
## don't end up in the Nix store.
|
||||
apply = final: if final == null then null else toString final;
|
||||
|
||||
description = ''
|
||||
OpenSSH private key
|
||||
description = ''
|
||||
OpenSSH private key
|
||||
|
||||
A run-time path to the key file, which is supposed to be provisioned
|
||||
outside of Nix store.
|
||||
'';
|
||||
};
|
||||
A run-time path to the key file, which is supposed to be provisioned
|
||||
outside of Nix store.
|
||||
'';
|
||||
};
|
||||
|
||||
hooks = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.lines;
|
||||
default = { };
|
||||
example = lib.literalExpression ''
|
||||
{
|
||||
environment = '''
|
||||
export SECRET_VAR=`head -1 /run/keys/secret`
|
||||
''';
|
||||
}'';
|
||||
description = ''
|
||||
"Agent" hooks to install.
|
||||
See <https://buildkite.com/docs/agent/v3/hooks> for possible options.
|
||||
'';
|
||||
};
|
||||
hooks = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.lines;
|
||||
default = { };
|
||||
example = lib.literalExpression ''
|
||||
{
|
||||
environment = '''
|
||||
export SECRET_VAR=`head -1 /run/keys/secret`
|
||||
''';
|
||||
}'';
|
||||
description = ''
|
||||
"Agent" hooks to install.
|
||||
See <https://buildkite.com/docs/agent/v3/hooks> for possible options.
|
||||
'';
|
||||
};
|
||||
|
||||
hooksPath = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
default = hooksDir config.hooks;
|
||||
defaultText = lib.literalMD "generated from {option}`services.buildkite-agents.<name>.hooks`";
|
||||
description = ''
|
||||
Path to the directory storing the hooks.
|
||||
Consider using {option}`services.buildkite-agents.<name>.hooks.<name>`
|
||||
instead.
|
||||
'';
|
||||
};
|
||||
hooksPath = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
default = hooksDir config.hooks;
|
||||
defaultText = lib.literalMD "generated from {option}`services.buildkite-agents.<name>.hooks`";
|
||||
description = ''
|
||||
Path to the directory storing the hooks.
|
||||
Consider using {option}`services.buildkite-agents.<name>.hooks.<name>`
|
||||
instead.
|
||||
'';
|
||||
};
|
||||
|
||||
shell = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "${pkgs.bash}/bin/bash -e -c";
|
||||
defaultText = lib.literalExpression ''"''${pkgs.bash}/bin/bash -e -c"'';
|
||||
description = ''
|
||||
Command that buildkite-agent 3 will execute when it spawns a shell.
|
||||
'';
|
||||
shell = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "${pkgs.bash}/bin/bash -e -c";
|
||||
defaultText = lib.literalExpression ''"''${pkgs.bash}/bin/bash -e -c"'';
|
||||
description = ''
|
||||
Command that buildkite-agent 3 will execute when it spawns a shell.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
enabledAgents = lib.filterAttrs (n: v: v.enable) cfg;
|
||||
mapAgents = function: lib.mkMerge (lib.mapAttrsToList function enabledAgents);
|
||||
in
|
||||
|
@ -152,76 +176,92 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
config.users.users = mapAgents (name: cfg: {
|
||||
"buildkite-agent-${name}" = {
|
||||
name = "buildkite-agent-${name}";
|
||||
home = cfg.dataDir;
|
||||
createHome = true;
|
||||
description = "Buildkite agent user";
|
||||
extraGroups = cfg.extraGroups;
|
||||
isSystemUser = true;
|
||||
group = "buildkite-agent-${name}";
|
||||
};
|
||||
});
|
||||
config.users.groups = mapAgents (name: cfg: {
|
||||
"buildkite-agent-${name}" = { };
|
||||
});
|
||||
|
||||
config.systemd.services = mapAgents (name: cfg: {
|
||||
"buildkite-agent-${name}" = {
|
||||
description = "Buildkite Agent";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
path = cfg.runtimePackages ++ [ cfg.package pkgs.coreutils ];
|
||||
environment = config.networking.proxy.envVars // {
|
||||
HOME = cfg.dataDir;
|
||||
NIX_REMOTE = "daemon";
|
||||
config.users.users = mapAgents (
|
||||
name: cfg: {
|
||||
"buildkite-agent-${name}" = {
|
||||
name = "buildkite-agent-${name}";
|
||||
home = cfg.dataDir;
|
||||
createHome = true;
|
||||
description = "Buildkite agent user";
|
||||
extraGroups = cfg.extraGroups;
|
||||
isSystemUser = true;
|
||||
group = "buildkite-agent-${name}";
|
||||
};
|
||||
}
|
||||
);
|
||||
config.users.groups = mapAgents (
|
||||
name: cfg: {
|
||||
"buildkite-agent-${name}" = { };
|
||||
}
|
||||
);
|
||||
|
||||
## NB: maximum care is taken so that secrets (ssh keys and the CI token)
|
||||
## don't end up in the Nix store.
|
||||
preStart =
|
||||
let
|
||||
sshDir = "${cfg.dataDir}/.ssh";
|
||||
tagStr = name: value:
|
||||
if lib.isList value
|
||||
then lib.concatStringsSep "," (builtins.map (v: "${name}=${v}") value)
|
||||
else "${name}=${value}";
|
||||
tagsStr = lib.concatStringsSep "," (lib.mapAttrsToList tagStr cfg.tags);
|
||||
in
|
||||
lib.optionalString (cfg.privateSshKeyPath != null) ''
|
||||
mkdir -m 0700 -p "${sshDir}"
|
||||
install -m600 "${toString cfg.privateSshKeyPath}" "${sshDir}/id_rsa"
|
||||
'' + ''
|
||||
cat > "${cfg.dataDir}/buildkite-agent.cfg" <<EOF
|
||||
token="$(cat ${toString cfg.tokenPath})"
|
||||
name="${cfg.name}"
|
||||
shell="${cfg.shell}"
|
||||
tags="${tagsStr}"
|
||||
build-path="${cfg.dataDir}/builds"
|
||||
hooks-path="${cfg.hooksPath}"
|
||||
${cfg.extraConfig}
|
||||
EOF
|
||||
config.systemd.services = mapAgents (
|
||||
name: cfg: {
|
||||
"buildkite-agent-${name}" = {
|
||||
description = "Buildkite Agent";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
path = cfg.runtimePackages ++ [
|
||||
cfg.package
|
||||
pkgs.coreutils
|
||||
];
|
||||
environment = config.networking.proxy.envVars // {
|
||||
HOME = cfg.dataDir;
|
||||
NIX_REMOTE = "daemon";
|
||||
};
|
||||
|
||||
## NB: maximum care is taken so that secrets (ssh keys and the CI token)
|
||||
## don't end up in the Nix store.
|
||||
preStart =
|
||||
let
|
||||
sshDir = "${cfg.dataDir}/.ssh";
|
||||
tagStr =
|
||||
name: value:
|
||||
if lib.isList value then
|
||||
lib.concatStringsSep "," (builtins.map (v: "${name}=${v}") value)
|
||||
else
|
||||
"${name}=${value}";
|
||||
tagsStr = lib.concatStringsSep "," (lib.mapAttrsToList tagStr cfg.tags);
|
||||
in
|
||||
lib.optionalString (cfg.privateSshKeyPath != null) ''
|
||||
mkdir -m 0700 -p "${sshDir}"
|
||||
install -m600 "${toString cfg.privateSshKeyPath}" "${sshDir}/id_rsa"
|
||||
''
|
||||
+ ''
|
||||
cat > "${cfg.dataDir}/buildkite-agent.cfg" <<EOF
|
||||
token="$(cat ${toString cfg.tokenPath})"
|
||||
name="${cfg.name}"
|
||||
shell="${cfg.shell}"
|
||||
tags="${tagsStr}"
|
||||
build-path="${cfg.dataDir}/builds"
|
||||
hooks-path="${cfg.hooksPath}"
|
||||
${cfg.extraConfig}
|
||||
EOF
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${cfg.package}/bin/buildkite-agent start --config ${cfg.dataDir}/buildkite-agent.cfg";
|
||||
User = "buildkite-agent-${name}";
|
||||
RestartSec = 5;
|
||||
Restart = "on-failure";
|
||||
TimeoutSec = 10;
|
||||
# set a long timeout to give buildkite-agent a chance to finish current builds
|
||||
TimeoutStopSec = "2 min";
|
||||
KillMode = "mixed";
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
|
||||
config.assertions = mapAgents (
|
||||
name: cfg: [
|
||||
{
|
||||
assertion = cfg.hooksPath != hooksDir cfg.hooks -> cfg.hooks == { };
|
||||
message = ''
|
||||
Options `services.buildkite-agents.${name}.hooksPath' and
|
||||
`services.buildkite-agents.${name}.hooks.<name>' are mutually exclusive.
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${cfg.package}/bin/buildkite-agent start --config ${cfg.dataDir}/buildkite-agent.cfg";
|
||||
User = "buildkite-agent-${name}";
|
||||
RestartSec = 5;
|
||||
Restart = "on-failure";
|
||||
TimeoutSec = 10;
|
||||
# set a long timeout to give buildkite-agent a chance to finish current builds
|
||||
TimeoutStopSec = "2 min";
|
||||
KillMode = "mixed";
|
||||
};
|
||||
};
|
||||
});
|
||||
|
||||
config.assertions = mapAgents (name: cfg: [{
|
||||
assertion = cfg.hooksPath != hooksDir cfg.hooks -> cfg.hooks == { };
|
||||
message = ''
|
||||
Options `services.buildkite-agents.${name}.hooksPath' and
|
||||
`services.buildkite-agents.${name}.hooks.<name>' are mutually exclusive.
|
||||
'';
|
||||
}]);
|
||||
}
|
||||
]
|
||||
);
|
||||
}
|
||||
|
|
|
@ -1,4 +1,9 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
|
||||
cfg = config.services.hydra;
|
||||
|
@ -7,61 +12,71 @@ let
|
|||
|
||||
hydraConf = pkgs.writeScript "hydra.conf" cfg.extraConfig;
|
||||
|
||||
hydraEnv =
|
||||
{ HYDRA_DBI = cfg.dbi;
|
||||
HYDRA_CONFIG = "${baseDir}/hydra.conf";
|
||||
HYDRA_DATA = "${baseDir}";
|
||||
};
|
||||
hydraEnv = {
|
||||
HYDRA_DBI = cfg.dbi;
|
||||
HYDRA_CONFIG = "${baseDir}/hydra.conf";
|
||||
HYDRA_DATA = "${baseDir}";
|
||||
};
|
||||
|
||||
env =
|
||||
{ NIX_REMOTE = "daemon";
|
||||
{
|
||||
NIX_REMOTE = "daemon";
|
||||
PGPASSFILE = "${baseDir}/pgpass";
|
||||
NIX_REMOTE_SYSTEMS = lib.concatStringsSep ":" cfg.buildMachinesFiles;
|
||||
} // lib.optionalAttrs (cfg.smtpHost != null) {
|
||||
}
|
||||
// lib.optionalAttrs (cfg.smtpHost != null) {
|
||||
EMAIL_SENDER_TRANSPORT = "SMTP";
|
||||
EMAIL_SENDER_TRANSPORT_host = cfg.smtpHost;
|
||||
} // hydraEnv // cfg.extraEnv;
|
||||
}
|
||||
// hydraEnv
|
||||
// cfg.extraEnv;
|
||||
|
||||
serverEnv = env //
|
||||
{ HYDRA_TRACKER = cfg.tracker;
|
||||
serverEnv =
|
||||
env
|
||||
// {
|
||||
HYDRA_TRACKER = cfg.tracker;
|
||||
XDG_CACHE_HOME = "${baseDir}/www/.cache";
|
||||
COLUMNS = "80";
|
||||
PGPASSFILE = "${baseDir}/pgpass-www"; # grrr
|
||||
} // (lib.optionalAttrs cfg.debugServer { DBIC_TRACE = "1"; });
|
||||
}
|
||||
// (lib.optionalAttrs cfg.debugServer { DBIC_TRACE = "1"; });
|
||||
|
||||
localDB = "dbi:Pg:dbname=hydra;user=hydra;";
|
||||
|
||||
haveLocalDB = cfg.dbi == localDB;
|
||||
|
||||
hydra-package =
|
||||
let
|
||||
makeWrapperArgs = lib.concatStringsSep " " (lib.mapAttrsToList (key: value: "--set-default \"${key}\" \"${value}\"") hydraEnv);
|
||||
in pkgs.buildEnv rec {
|
||||
name = "hydra-env";
|
||||
nativeBuildInputs = [ pkgs.makeWrapper ];
|
||||
paths = [ cfg.package ];
|
||||
let
|
||||
makeWrapperArgs = lib.concatStringsSep " " (
|
||||
lib.mapAttrsToList (key: value: "--set-default \"${key}\" \"${value}\"") hydraEnv
|
||||
);
|
||||
in
|
||||
pkgs.buildEnv rec {
|
||||
name = "hydra-env";
|
||||
nativeBuildInputs = [ pkgs.makeWrapper ];
|
||||
paths = [ cfg.package ];
|
||||
|
||||
postBuild = ''
|
||||
if [ -L "$out/bin" ]; then
|
||||
unlink "$out/bin"
|
||||
fi
|
||||
mkdir -p "$out/bin"
|
||||
|
||||
for path in ${lib.concatStringsSep " " paths}; do
|
||||
if [ -d "$path/bin" ]; then
|
||||
cd "$path/bin"
|
||||
for prg in *; do
|
||||
if [ -f "$prg" ]; then
|
||||
rm -f "$out/bin/$prg"
|
||||
if [ -x "$prg" ]; then
|
||||
makeWrapper "$path/bin/$prg" "$out/bin/$prg" ${makeWrapperArgs}
|
||||
fi
|
||||
fi
|
||||
done
|
||||
postBuild = ''
|
||||
if [ -L "$out/bin" ]; then
|
||||
unlink "$out/bin"
|
||||
fi
|
||||
done
|
||||
'';
|
||||
};
|
||||
mkdir -p "$out/bin"
|
||||
|
||||
for path in ${lib.concatStringsSep " " paths}; do
|
||||
if [ -d "$path/bin" ]; then
|
||||
cd "$path/bin"
|
||||
for prg in *; do
|
||||
if [ -f "$prg" ]; then
|
||||
rm -f "$out/bin/$prg"
|
||||
if [ -x "$prg" ]; then
|
||||
makeWrapper "$path/bin/$prg" "$out/bin/$prg" ${makeWrapperArgs}
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
'';
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
|
@ -199,7 +214,7 @@ in
|
|||
|
||||
extraEnv = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
default = {};
|
||||
default = { };
|
||||
description = "Extra environment variables for Hydra.";
|
||||
};
|
||||
|
||||
|
@ -211,9 +226,12 @@ in
|
|||
|
||||
buildMachinesFiles = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.path;
|
||||
default = lib.optional (config.nix.buildMachines != []) "/etc/nix/machines";
|
||||
default = lib.optional (config.nix.buildMachines != [ ]) "/etc/nix/machines";
|
||||
defaultText = lib.literalExpression ''lib.optional (config.nix.buildMachines != []) "/etc/nix/machines"'';
|
||||
example = [ "/etc/nix/machines" "/var/lib/hydra/provisioner/machines" ];
|
||||
example = [
|
||||
"/etc/nix/machines"
|
||||
"/var/lib/hydra/provisioner/machines"
|
||||
];
|
||||
description = "List of files containing build machines.";
|
||||
};
|
||||
|
||||
|
@ -234,7 +252,6 @@ in
|
|||
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
|
@ -253,42 +270,41 @@ in
|
|||
gid = config.ids.gids.hydra;
|
||||
};
|
||||
|
||||
users.users.hydra =
|
||||
{ description = "Hydra";
|
||||
group = "hydra";
|
||||
# We don't enable `createHome` here because the creation of the home directory is handled by the hydra-init service below.
|
||||
home = baseDir;
|
||||
useDefaultShell = true;
|
||||
uid = config.ids.uids.hydra;
|
||||
};
|
||||
users.users.hydra = {
|
||||
description = "Hydra";
|
||||
group = "hydra";
|
||||
# We don't enable `createHome` here because the creation of the home directory is handled by the hydra-init service below.
|
||||
home = baseDir;
|
||||
useDefaultShell = true;
|
||||
uid = config.ids.uids.hydra;
|
||||
};
|
||||
|
||||
users.users.hydra-queue-runner =
|
||||
{ description = "Hydra queue runner";
|
||||
group = "hydra";
|
||||
useDefaultShell = true;
|
||||
home = "${baseDir}/queue-runner"; # really only to keep SSH happy
|
||||
uid = config.ids.uids.hydra-queue-runner;
|
||||
};
|
||||
users.users.hydra-queue-runner = {
|
||||
description = "Hydra queue runner";
|
||||
group = "hydra";
|
||||
useDefaultShell = true;
|
||||
home = "${baseDir}/queue-runner"; # really only to keep SSH happy
|
||||
uid = config.ids.uids.hydra-queue-runner;
|
||||
};
|
||||
|
||||
users.users.hydra-www =
|
||||
{ description = "Hydra web server";
|
||||
group = "hydra";
|
||||
useDefaultShell = true;
|
||||
uid = config.ids.uids.hydra-www;
|
||||
};
|
||||
users.users.hydra-www = {
|
||||
description = "Hydra web server";
|
||||
group = "hydra";
|
||||
useDefaultShell = true;
|
||||
uid = config.ids.uids.hydra-www;
|
||||
};
|
||||
|
||||
services.hydra.extraConfig =
|
||||
''
|
||||
using_frontend_proxy = 1
|
||||
base_uri = ${cfg.hydraURL}
|
||||
notification_sender = ${cfg.notificationSender}
|
||||
max_servers = ${toString cfg.maxServers}
|
||||
${lib.optionalString (cfg.logo != null) ''
|
||||
hydra_logo = ${cfg.logo}
|
||||
''}
|
||||
gc_roots_dir = ${cfg.gcRootsDir}
|
||||
use-substitutes = ${if cfg.useSubstitutes then "1" else "0"}
|
||||
'';
|
||||
services.hydra.extraConfig = ''
|
||||
using_frontend_proxy = 1
|
||||
base_uri = ${cfg.hydraURL}
|
||||
notification_sender = ${cfg.notificationSender}
|
||||
max_servers = ${toString cfg.maxServers}
|
||||
${lib.optionalString (cfg.logo != null) ''
|
||||
hydra_logo = ${cfg.logo}
|
||||
''}
|
||||
gc_roots_dir = ${cfg.gcRootsDir}
|
||||
use-substitutes = ${if cfg.useSubstitutes then "1" else "0"}
|
||||
'';
|
||||
|
||||
environment.systemPackages = [ hydra-package ];
|
||||
|
||||
|
@ -301,247 +317,264 @@ in
|
|||
trusted-users = [ "hydra-queue-runner" ];
|
||||
}
|
||||
|
||||
(lib.mkIf (lib.versionOlder (lib.getVersion config.nix.package.out) "2.4pre")
|
||||
{
|
||||
# The default (`true') slows Nix down a lot since the build farm
|
||||
# has so many GC roots.
|
||||
gc-check-reachability = false;
|
||||
}
|
||||
)
|
||||
(lib.mkIf (lib.versionOlder (lib.getVersion config.nix.package.out) "2.4pre") {
|
||||
# The default (`true') slows Nix down a lot since the build farm
|
||||
# has so many GC roots.
|
||||
gc-check-reachability = false;
|
||||
})
|
||||
];
|
||||
|
||||
systemd.slices.system-hydra = {
|
||||
description = "Hydra CI Server Slice";
|
||||
documentation = [ "file://${cfg.package}/share/doc/hydra/index.html" "https://nixos.org/hydra/manual/" ];
|
||||
documentation = [
|
||||
"file://${cfg.package}/share/doc/hydra/index.html"
|
||||
"https://nixos.org/hydra/manual/"
|
||||
];
|
||||
};
|
||||
|
||||
systemd.services.hydra-init =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = lib.optional haveLocalDB "postgresql.service";
|
||||
after = lib.optional haveLocalDB "postgresql.service";
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-init";
|
||||
};
|
||||
path = [ pkgs.util-linux ];
|
||||
preStart = ''
|
||||
mkdir -p ${baseDir}
|
||||
chown hydra:hydra ${baseDir}
|
||||
chmod 0750 ${baseDir}
|
||||
systemd.services.hydra-init = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
requires = lib.optional haveLocalDB "postgresql.service";
|
||||
after = lib.optional haveLocalDB "postgresql.service";
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-init";
|
||||
};
|
||||
path = [ pkgs.util-linux ];
|
||||
preStart = ''
|
||||
mkdir -p ${baseDir}
|
||||
chown hydra:hydra ${baseDir}
|
||||
chmod 0750 ${baseDir}
|
||||
|
||||
ln -sf ${hydraConf} ${baseDir}/hydra.conf
|
||||
ln -sf ${hydraConf} ${baseDir}/hydra.conf
|
||||
|
||||
mkdir -m 0700 ${baseDir}/www || true
|
||||
chown hydra-www:hydra ${baseDir}/www
|
||||
mkdir -m 0700 ${baseDir}/www || true
|
||||
chown hydra-www:hydra ${baseDir}/www
|
||||
|
||||
mkdir -m 0700 ${baseDir}/queue-runner || true
|
||||
mkdir -m 0750 ${baseDir}/build-logs || true
|
||||
mkdir -m 0750 ${baseDir}/runcommand-logs || true
|
||||
chown hydra-queue-runner:hydra \
|
||||
${baseDir}/queue-runner \
|
||||
${baseDir}/build-logs \
|
||||
${baseDir}/runcommand-logs
|
||||
mkdir -m 0700 ${baseDir}/queue-runner || true
|
||||
mkdir -m 0750 ${baseDir}/build-logs || true
|
||||
mkdir -m 0750 ${baseDir}/runcommand-logs || true
|
||||
chown hydra-queue-runner:hydra \
|
||||
${baseDir}/queue-runner \
|
||||
${baseDir}/build-logs \
|
||||
${baseDir}/runcommand-logs
|
||||
|
||||
${lib.optionalString haveLocalDB ''
|
||||
if ! [ -e ${baseDir}/.db-created ]; then
|
||||
runuser -u ${config.services.postgresql.superUser} ${config.services.postgresql.package}/bin/createuser hydra
|
||||
runuser -u ${config.services.postgresql.superUser} ${config.services.postgresql.package}/bin/createdb -- -O hydra hydra
|
||||
touch ${baseDir}/.db-created
|
||||
fi
|
||||
echo "create extension if not exists pg_trgm" | runuser -u ${config.services.postgresql.superUser} -- ${config.services.postgresql.package}/bin/psql hydra
|
||||
''}
|
||||
${lib.optionalString haveLocalDB ''
|
||||
if ! [ -e ${baseDir}/.db-created ]; then
|
||||
runuser -u ${config.services.postgresql.superUser} ${config.services.postgresql.package}/bin/createuser hydra
|
||||
runuser -u ${config.services.postgresql.superUser} ${config.services.postgresql.package}/bin/createdb -- -O hydra hydra
|
||||
touch ${baseDir}/.db-created
|
||||
fi
|
||||
echo "create extension if not exists pg_trgm" | runuser -u ${config.services.postgresql.superUser} -- ${config.services.postgresql.package}/bin/psql hydra
|
||||
''}
|
||||
|
||||
if [ ! -e ${cfg.gcRootsDir} ]; then
|
||||
if [ ! -e ${cfg.gcRootsDir} ]; then
|
||||
|
||||
# Move legacy roots directory.
|
||||
if [ -e /nix/var/nix/gcroots/per-user/hydra/hydra-roots ]; then
|
||||
mv /nix/var/nix/gcroots/per-user/hydra/hydra-roots ${cfg.gcRootsDir}
|
||||
fi
|
||||
|
||||
mkdir -p ${cfg.gcRootsDir}
|
||||
# Move legacy roots directory.
|
||||
if [ -e /nix/var/nix/gcroots/per-user/hydra/hydra-roots ]; then
|
||||
mv /nix/var/nix/gcroots/per-user/hydra/hydra-roots ${cfg.gcRootsDir}
|
||||
fi
|
||||
|
||||
# Move legacy hydra-www roots.
|
||||
if [ -e /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots ]; then
|
||||
find /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots/ -type f -print0 \
|
||||
| xargs -0 -r mv -f -t ${cfg.gcRootsDir}/
|
||||
rmdir /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots
|
||||
fi
|
||||
mkdir -p ${cfg.gcRootsDir}
|
||||
fi
|
||||
|
||||
chown hydra:hydra ${cfg.gcRootsDir}
|
||||
chmod 2775 ${cfg.gcRootsDir}
|
||||
'';
|
||||
serviceConfig.ExecStart = "${hydra-package}/bin/hydra-init";
|
||||
serviceConfig.PermissionsStartOnly = true;
|
||||
serviceConfig.User = "hydra";
|
||||
serviceConfig.Type = "oneshot";
|
||||
serviceConfig.RemainAfterExit = true;
|
||||
serviceConfig.Slice = "system-hydra.slice";
|
||||
# Move legacy hydra-www roots.
|
||||
if [ -e /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots ]; then
|
||||
find /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots/ -type f -print0 \
|
||||
| xargs -0 -r mv -f -t ${cfg.gcRootsDir}/
|
||||
rmdir /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots
|
||||
fi
|
||||
|
||||
chown hydra:hydra ${cfg.gcRootsDir}
|
||||
chmod 2775 ${cfg.gcRootsDir}
|
||||
'';
|
||||
serviceConfig.ExecStart = "${hydra-package}/bin/hydra-init";
|
||||
serviceConfig.PermissionsStartOnly = true;
|
||||
serviceConfig.User = "hydra";
|
||||
serviceConfig.Type = "oneshot";
|
||||
serviceConfig.RemainAfterExit = true;
|
||||
serviceConfig.Slice = "system-hydra.slice";
|
||||
};
|
||||
|
||||
systemd.services.hydra-server = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
environment = serverEnv // {
|
||||
HYDRA_DBI = "${serverEnv.HYDRA_DBI};application_name=hydra-server";
|
||||
};
|
||||
|
||||
systemd.services.hydra-server =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
environment = serverEnv // {
|
||||
HYDRA_DBI = "${serverEnv.HYDRA_DBI};application_name=hydra-server";
|
||||
};
|
||||
restartTriggers = [ hydraConf ];
|
||||
serviceConfig =
|
||||
{ ExecStart =
|
||||
"@${hydra-package}/bin/hydra-server hydra-server -f -h '${cfg.listenHost}' "
|
||||
+ "-p ${toString cfg.port} --min_spare_servers ${toString cfg.minSpareServers} --max_spare_servers ${toString cfg.maxSpareServers} "
|
||||
+ "--max_servers ${toString cfg.maxServers} --max_requests 100 ${lib.optionalString cfg.debugServer "-d"}";
|
||||
User = "hydra-www";
|
||||
PermissionsStartOnly = true;
|
||||
Restart = "always";
|
||||
Slice = "system-hydra.slice";
|
||||
};
|
||||
restartTriggers = [ hydraConf ];
|
||||
serviceConfig = {
|
||||
ExecStart =
|
||||
"@${hydra-package}/bin/hydra-server hydra-server -f -h '${cfg.listenHost}' "
|
||||
+ "-p ${toString cfg.port} --min_spare_servers ${toString cfg.minSpareServers} --max_spare_servers ${toString cfg.maxSpareServers} "
|
||||
+ "--max_servers ${toString cfg.maxServers} --max_requests 100 ${lib.optionalString cfg.debugServer "-d"}";
|
||||
User = "hydra-www";
|
||||
PermissionsStartOnly = true;
|
||||
Restart = "always";
|
||||
Slice = "system-hydra.slice";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.hydra-queue-runner =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" "network.target" ];
|
||||
path = [ hydra-package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
|
||||
restartTriggers = [ hydraConf ];
|
||||
environment = env // {
|
||||
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
|
||||
IN_SYSTEMD = "1"; # to get log severity levels
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-queue-runner";
|
||||
};
|
||||
serviceConfig =
|
||||
{ ExecStart = "@${hydra-package}/bin/hydra-queue-runner hydra-queue-runner -v";
|
||||
ExecStopPost = "${hydra-package}/bin/hydra-queue-runner --unlock";
|
||||
User = "hydra-queue-runner";
|
||||
Restart = "always";
|
||||
Slice = "system-hydra.slice";
|
||||
|
||||
# Ensure we can get core dumps.
|
||||
LimitCORE = "infinity";
|
||||
WorkingDirectory = "${baseDir}/queue-runner";
|
||||
};
|
||||
systemd.services.hydra-queue-runner = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [
|
||||
"hydra-init.service"
|
||||
"network.target"
|
||||
];
|
||||
path = [
|
||||
hydra-package
|
||||
pkgs.nettools
|
||||
pkgs.openssh
|
||||
pkgs.bzip2
|
||||
config.nix.package
|
||||
];
|
||||
restartTriggers = [ hydraConf ];
|
||||
environment = env // {
|
||||
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
|
||||
IN_SYSTEMD = "1"; # to get log severity levels
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-queue-runner";
|
||||
};
|
||||
serviceConfig = {
|
||||
ExecStart = "@${hydra-package}/bin/hydra-queue-runner hydra-queue-runner -v";
|
||||
ExecStopPost = "${hydra-package}/bin/hydra-queue-runner --unlock";
|
||||
User = "hydra-queue-runner";
|
||||
Restart = "always";
|
||||
Slice = "system-hydra.slice";
|
||||
|
||||
systemd.services.hydra-evaluator =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
wants = [ "network-online.target" ];
|
||||
after = [ "hydra-init.service" "network.target" "network-online.target" ];
|
||||
path = with pkgs; [ hydra-package nettools jq ];
|
||||
restartTriggers = [ hydraConf ];
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-evaluator";
|
||||
};
|
||||
serviceConfig =
|
||||
{ ExecStart = "@${hydra-package}/bin/hydra-evaluator hydra-evaluator";
|
||||
User = "hydra";
|
||||
Restart = "always";
|
||||
WorkingDirectory = baseDir;
|
||||
Slice = "system-hydra.slice";
|
||||
};
|
||||
# Ensure we can get core dumps.
|
||||
LimitCORE = "infinity";
|
||||
WorkingDirectory = "${baseDir}/queue-runner";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.hydra-update-gc-roots =
|
||||
{ requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-update-gc-roots";
|
||||
};
|
||||
serviceConfig =
|
||||
{ ExecStart = "@${hydra-package}/bin/hydra-update-gc-roots hydra-update-gc-roots";
|
||||
User = "hydra";
|
||||
Slice = "system-hydra.slice";
|
||||
};
|
||||
startAt = "2,14:15";
|
||||
systemd.services.hydra-evaluator = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
wants = [ "network-online.target" ];
|
||||
after = [
|
||||
"hydra-init.service"
|
||||
"network.target"
|
||||
"network-online.target"
|
||||
];
|
||||
path = with pkgs; [
|
||||
hydra-package
|
||||
nettools
|
||||
jq
|
||||
];
|
||||
restartTriggers = [ hydraConf ];
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-evaluator";
|
||||
};
|
||||
serviceConfig = {
|
||||
ExecStart = "@${hydra-package}/bin/hydra-evaluator hydra-evaluator";
|
||||
User = "hydra";
|
||||
Restart = "always";
|
||||
WorkingDirectory = baseDir;
|
||||
Slice = "system-hydra.slice";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.hydra-send-stats =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-send-stats";
|
||||
};
|
||||
serviceConfig =
|
||||
{ ExecStart = "@${hydra-package}/bin/hydra-send-stats hydra-send-stats";
|
||||
User = "hydra";
|
||||
Slice = "system-hydra.slice";
|
||||
};
|
||||
systemd.services.hydra-update-gc-roots = {
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-update-gc-roots";
|
||||
};
|
||||
serviceConfig = {
|
||||
ExecStart = "@${hydra-package}/bin/hydra-update-gc-roots hydra-update-gc-roots";
|
||||
User = "hydra";
|
||||
Slice = "system-hydra.slice";
|
||||
};
|
||||
startAt = "2,14:15";
|
||||
};
|
||||
|
||||
systemd.services.hydra-notify =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
restartTriggers = [ hydraConf ];
|
||||
path = [ pkgs.zstd ];
|
||||
environment = env // {
|
||||
PGPASSFILE = "${baseDir}/pgpass-queue-runner";
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify";
|
||||
};
|
||||
serviceConfig =
|
||||
{ ExecStart = "@${hydra-package}/bin/hydra-notify hydra-notify";
|
||||
# FIXME: run this under a less privileged user?
|
||||
User = "hydra-queue-runner";
|
||||
Restart = "always";
|
||||
RestartSec = 5;
|
||||
Slice = "system-hydra.slice";
|
||||
};
|
||||
systemd.services.hydra-send-stats = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-send-stats";
|
||||
};
|
||||
serviceConfig = {
|
||||
ExecStart = "@${hydra-package}/bin/hydra-send-stats hydra-send-stats";
|
||||
User = "hydra";
|
||||
Slice = "system-hydra.slice";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.hydra-notify = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
restartTriggers = [ hydraConf ];
|
||||
path = [ pkgs.zstd ];
|
||||
environment = env // {
|
||||
PGPASSFILE = "${baseDir}/pgpass-queue-runner";
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify";
|
||||
};
|
||||
serviceConfig = {
|
||||
ExecStart = "@${hydra-package}/bin/hydra-notify hydra-notify";
|
||||
# FIXME: run this under a less privileged user?
|
||||
User = "hydra-queue-runner";
|
||||
Restart = "always";
|
||||
RestartSec = 5;
|
||||
Slice = "system-hydra.slice";
|
||||
};
|
||||
};
|
||||
|
||||
# If there is less than a certain amount of free disk space, stop
|
||||
# the queue/evaluator to prevent builds from failing or aborting.
|
||||
systemd.services.hydra-check-space =
|
||||
{ script =
|
||||
''
|
||||
if [ $(($(stat -f -c '%a' /nix/store) * $(stat -f -c '%S' /nix/store))) -lt $((${toString cfg.minimumDiskFree} * 1024**3)) ]; then
|
||||
echo "stopping Hydra queue runner due to lack of free space..."
|
||||
systemctl stop hydra-queue-runner
|
||||
fi
|
||||
if [ $(($(stat -f -c '%a' /nix/store) * $(stat -f -c '%S' /nix/store))) -lt $((${toString cfg.minimumDiskFreeEvaluator} * 1024**3)) ]; then
|
||||
echo "stopping Hydra evaluator due to lack of free space..."
|
||||
systemctl stop hydra-evaluator
|
||||
fi
|
||||
'';
|
||||
startAt = "*:0/5";
|
||||
serviceConfig.Slice = "system-hydra.slice";
|
||||
};
|
||||
systemd.services.hydra-check-space = {
|
||||
script = ''
|
||||
if [ $(($(stat -f -c '%a' /nix/store) * $(stat -f -c '%S' /nix/store))) -lt $((${toString cfg.minimumDiskFree} * 1024**3)) ]; then
|
||||
echo "stopping Hydra queue runner due to lack of free space..."
|
||||
systemctl stop hydra-queue-runner
|
||||
fi
|
||||
if [ $(($(stat -f -c '%a' /nix/store) * $(stat -f -c '%S' /nix/store))) -lt $((${toString cfg.minimumDiskFreeEvaluator} * 1024**3)) ]; then
|
||||
echo "stopping Hydra evaluator due to lack of free space..."
|
||||
systemctl stop hydra-evaluator
|
||||
fi
|
||||
'';
|
||||
startAt = "*:0/5";
|
||||
serviceConfig.Slice = "system-hydra.slice";
|
||||
};
|
||||
|
||||
# Periodically compress build logs. The queue runner compresses
|
||||
# logs automatically after a step finishes, but this doesn't work
|
||||
# if the queue runner is stopped prematurely.
|
||||
systemd.services.hydra-compress-logs =
|
||||
{ path = [ pkgs.bzip2 pkgs.zstd ];
|
||||
script =
|
||||
''
|
||||
set -eou pipefail
|
||||
compression=$(sed -nr 's/compress_build_logs_compression = ()/\1/p' ${baseDir}/hydra.conf)
|
||||
if [[ $compression == "" || $compression == bzip2 ]]; then
|
||||
compressionCmd=(bzip2)
|
||||
elif [[ $compression == zstd ]]; then
|
||||
compressionCmd=(zstd --rm)
|
||||
fi
|
||||
find ${baseDir}/build-logs -ignore_readdir_race -type f -name "*.drv" -mtime +3 -size +0c -print0 | xargs -0 -r "''${compressionCmd[@]}" --force --quiet
|
||||
'';
|
||||
startAt = "Sun 01:45";
|
||||
serviceConfig.Slice = "system-hydra.slice";
|
||||
};
|
||||
systemd.services.hydra-compress-logs = {
|
||||
path = [
|
||||
pkgs.bzip2
|
||||
pkgs.zstd
|
||||
];
|
||||
script = ''
|
||||
set -eou pipefail
|
||||
compression=$(sed -nr 's/compress_build_logs_compression = ()/\1/p' ${baseDir}/hydra.conf)
|
||||
if [[ $compression == "" || $compression == bzip2 ]]; then
|
||||
compressionCmd=(bzip2)
|
||||
elif [[ $compression == zstd ]]; then
|
||||
compressionCmd=(zstd --rm)
|
||||
fi
|
||||
find ${baseDir}/build-logs -ignore_readdir_race -type f -name "*.drv" -mtime +3 -size +0c -print0 | xargs -0 -r "''${compressionCmd[@]}" --force --quiet
|
||||
'';
|
||||
startAt = "Sun 01:45";
|
||||
serviceConfig.Slice = "system-hydra.slice";
|
||||
};
|
||||
|
||||
services.postgresql.enable = lib.mkIf haveLocalDB true;
|
||||
|
||||
services.postgresql.identMap = lib.optionalString haveLocalDB
|
||||
''
|
||||
hydra-users hydra hydra
|
||||
hydra-users hydra-queue-runner hydra
|
||||
hydra-users hydra-www hydra
|
||||
hydra-users root hydra
|
||||
# The postgres user is used to create the pg_trgm extension for the hydra database
|
||||
hydra-users postgres postgres
|
||||
'';
|
||||
services.postgresql.identMap = lib.optionalString haveLocalDB ''
|
||||
hydra-users hydra hydra
|
||||
hydra-users hydra-queue-runner hydra
|
||||
hydra-users hydra-www hydra
|
||||
hydra-users root hydra
|
||||
# The postgres user is used to create the pg_trgm extension for the hydra database
|
||||
hydra-users postgres postgres
|
||||
'';
|
||||
|
||||
services.postgresql.authentication = lib.optionalString haveLocalDB
|
||||
''
|
||||
local hydra all ident map=hydra-users
|
||||
'';
|
||||
services.postgresql.authentication = lib.optionalString haveLocalDB ''
|
||||
local hydra all ident map=hydra-users
|
||||
'';
|
||||
|
||||
};
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue