mirror of
https://github.com/NixOS/nixpkgs.git
synced 2025-06-09 19:13:26 +03:00
Merge 73557c22ec
into haskell-updates
This commit is contained in:
commit
e756cc1b5e
3126 changed files with 42257 additions and 25416 deletions
7
.github/labeler-no-sync.yml
vendored
7
.github/labeler-no-sync.yml
vendored
|
@ -22,6 +22,13 @@
|
|||
- doc/**/*
|
||||
- nixos/doc/**/*
|
||||
|
||||
"backport release-24.11":
|
||||
- any:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- .github/workflows/*
|
||||
- ci/**/*.*
|
||||
|
||||
"backport release-25.05":
|
||||
- any:
|
||||
- changed-files:
|
||||
|
|
19
.github/workflows/check-cherry-picks.yml
vendored
19
.github/workflows/check-cherry-picks.yml
vendored
|
@ -115,16 +115,17 @@ jobs:
|
|||
repo: context.repo.repo,
|
||||
pull_number: context.payload.pull_request.number
|
||||
})).filter(review =>
|
||||
review.user.login == 'github-actions[bot]' &&
|
||||
review.state == 'CHANGES_REQUESTED'
|
||||
review.user.login == 'github-actions[bot]'
|
||||
).map(async (review) => {
|
||||
await github.rest.pulls.dismissReview({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: context.payload.pull_request.number,
|
||||
review_id: review.id,
|
||||
message: 'All cherry-picks are good now, thank you!'
|
||||
})
|
||||
if (review.state == 'CHANGES_REQUESTED') {
|
||||
await github.rest.pulls.dismissReview({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: context.payload.pull_request.number,
|
||||
review_id: review.id,
|
||||
message: 'All cherry-picks are good now, thank you!'
|
||||
})
|
||||
}
|
||||
await github.graphql(`mutation($node_id:ID!) {
|
||||
minimizeComment(input: {
|
||||
classifier: RESOLVED,
|
||||
|
|
2
.github/workflows/check-format.yml
vendored
2
.github/workflows/check-format.yml
vendored
|
@ -25,7 +25,7 @@ jobs:
|
|||
with:
|
||||
merged-as-untrusted: true
|
||||
|
||||
- uses: cachix/install-nix-action@526118121621777ccd86f79b04685a9319637641 # v31
|
||||
- uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31
|
||||
with:
|
||||
extra_nix_config: sandbox = true
|
||||
|
||||
|
|
8
.github/workflows/check-shell.yml
vendored
8
.github/workflows/check-shell.yml
vendored
|
@ -42,7 +42,13 @@ jobs:
|
|||
with:
|
||||
merged-as-untrusted: true
|
||||
|
||||
- uses: cachix/install-nix-action@526118121621777ccd86f79b04685a9319637641 # v31
|
||||
- uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31
|
||||
|
||||
- uses: cachix/cachix-action@0fc020193b5a1fa3ac4575aa3a7d3aa6a35435ad # v16
|
||||
with:
|
||||
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
|
||||
name: nixpkgs-ci
|
||||
authToken: "${{ secrets.CACHIX_AUTH_TOKEN }}"
|
||||
|
||||
- name: Build shell
|
||||
run: nix-build untrusted/ci -A shell
|
||||
|
|
16
.github/workflows/codeowners-v2.yml
vendored
16
.github/workflows/codeowners-v2.yml
vendored
|
@ -45,7 +45,6 @@ jobs:
|
|||
check:
|
||||
name: Check
|
||||
runs-on: ubuntu-24.04-arm
|
||||
if: github.repository_owner == 'NixOS'
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
|
@ -56,7 +55,7 @@ jobs:
|
|||
merged-as-untrusted: true
|
||||
target-as-trusted: true
|
||||
|
||||
- uses: cachix/install-nix-action@526118121621777ccd86f79b04685a9319637641 # v31
|
||||
- uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31
|
||||
|
||||
- uses: cachix/cachix-action@0fc020193b5a1fa3ac4575aa3a7d3aa6a35435ad # v16
|
||||
with:
|
||||
|
@ -68,7 +67,7 @@ jobs:
|
|||
run: nix-build trusted/ci -A codeownersValidator
|
||||
|
||||
- uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
|
||||
if: vars.OWNER_RO_APP_ID
|
||||
if: github.event_name == 'pull_request_target' && vars.OWNER_RO_APP_ID
|
||||
id: app-token
|
||||
with:
|
||||
app-id: ${{ vars.OWNER_RO_APP_ID }}
|
||||
|
@ -91,9 +90,8 @@ jobs:
|
|||
request:
|
||||
name: Request
|
||||
runs-on: ubuntu-24.04-arm
|
||||
if: github.repository_owner == 'NixOS'
|
||||
steps:
|
||||
- uses: cachix/install-nix-action@526118121621777ccd86f79b04685a9319637641 # v31
|
||||
- uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31
|
||||
|
||||
# Important: Because we use pull_request_target, this checks out the base branch of the PR, not the PR head.
|
||||
# This is intentional, because we need to request the review of owners as declared in the base branch.
|
||||
|
@ -101,8 +99,11 @@ jobs:
|
|||
with:
|
||||
path: trusted
|
||||
|
||||
- name: Build review request package
|
||||
run: nix-build trusted/ci -A requestReviews
|
||||
|
||||
- uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
|
||||
if: vars.OWNER_APP_ID
|
||||
if: github.event_name == 'pull_request_target' && vars.OWNER_APP_ID
|
||||
id: app-token
|
||||
with:
|
||||
app-id: ${{ vars.OWNER_APP_ID }}
|
||||
|
@ -111,9 +112,6 @@ jobs:
|
|||
permission-members: read
|
||||
permission-pull-requests: write
|
||||
|
||||
- name: Build review request package
|
||||
run: nix-build trusted/ci -A requestReviews
|
||||
|
||||
- name: Request reviews
|
||||
if: steps.app-token.outputs.token
|
||||
env:
|
||||
|
|
30
.github/workflows/dismissed-review.yml
vendored
30
.github/workflows/dismissed-review.yml
vendored
|
@ -1,30 +0,0 @@
|
|||
name: Dismissed Review
|
||||
|
||||
on:
|
||||
pull_request_review:
|
||||
types: [dismissed]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
# The check-cherry-picks workflow creates review comments,
|
||||
# that should sometimes be manually dismissed.
|
||||
# When a CI-generated review is dismissed, this job automatically
|
||||
# minimizes it, to prevent it from cluttering the PR.
|
||||
minimize:
|
||||
name: Minimize as resolved
|
||||
if: github.event.review.user.login == 'github-actions[bot]'
|
||||
runs-on: ubuntu-24.04-arm
|
||||
steps:
|
||||
- uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
|
||||
with:
|
||||
script: |
|
||||
await github.graphql(`mutation($node_id:ID!) {
|
||||
minimizeComment(input: {
|
||||
classifier: RESOLVED,
|
||||
subjectId: $node_id
|
||||
})
|
||||
{ clientMutationId }
|
||||
}`, { node_id: context.payload.review.node_id })
|
||||
|
2
.github/workflows/eval-aliases.yml
vendored
2
.github/workflows/eval-aliases.yml
vendored
|
@ -26,7 +26,7 @@ jobs:
|
|||
merged-as-untrusted: true
|
||||
|
||||
- name: Install Nix
|
||||
uses: cachix/install-nix-action@526118121621777ccd86f79b04685a9319637641 # v31
|
||||
uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31
|
||||
with:
|
||||
extra_nix_config: sandbox = true
|
||||
|
||||
|
|
6
.github/workflows/eval.yml
vendored
6
.github/workflows/eval.yml
vendored
|
@ -4,7 +4,7 @@ on:
|
|||
pull_request:
|
||||
paths:
|
||||
- .github/workflows/eval.yml
|
||||
- .github/workflows/reviews.yml # needs eval results from the same event type
|
||||
- .github/workflows/reviewers.yml # needs eval results from the same event type
|
||||
pull_request_target:
|
||||
push:
|
||||
# Keep this synced with ci/request-reviews/dev-branches.txt
|
||||
|
@ -68,7 +68,7 @@ jobs:
|
|||
path: untrusted
|
||||
|
||||
- name: Install Nix
|
||||
uses: cachix/install-nix-action@526118121621777ccd86f79b04685a9319637641 # v31
|
||||
uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31
|
||||
with:
|
||||
extra_nix_config: sandbox = true
|
||||
|
||||
|
@ -180,7 +180,7 @@ jobs:
|
|||
path: trusted
|
||||
|
||||
- name: Install Nix
|
||||
uses: cachix/install-nix-action@526118121621777ccd86f79b04685a9319637641 # v31
|
||||
uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31
|
||||
with:
|
||||
extra_nix_config: sandbox = true
|
||||
|
||||
|
|
8
.github/workflows/lib-tests.yml
vendored
8
.github/workflows/lib-tests.yml
vendored
|
@ -28,10 +28,16 @@ jobs:
|
|||
with:
|
||||
merged-as-untrusted: true
|
||||
|
||||
- uses: cachix/install-nix-action@526118121621777ccd86f79b04685a9319637641 # v31
|
||||
- uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31
|
||||
with:
|
||||
extra_nix_config: sandbox = true
|
||||
|
||||
- uses: cachix/cachix-action@0fc020193b5a1fa3ac4575aa3a7d3aa6a35435ad # v16
|
||||
with:
|
||||
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
|
||||
name: nixpkgs-ci
|
||||
authToken: "${{ secrets.CACHIX_AUTH_TOKEN }}"
|
||||
|
||||
- name: Building Nixpkgs lib-tests
|
||||
run: |
|
||||
nix-build untrusted/ci -A lib-tests
|
||||
|
|
2
.github/workflows/manual-nixos-v2.yml
vendored
2
.github/workflows/manual-nixos-v2.yml
vendored
|
@ -45,7 +45,7 @@ jobs:
|
|||
with:
|
||||
merged-as-untrusted: true
|
||||
|
||||
- uses: cachix/install-nix-action@526118121621777ccd86f79b04685a9319637641 # v31
|
||||
- uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31
|
||||
with:
|
||||
extra_nix_config: sandbox = true
|
||||
|
||||
|
|
2
.github/workflows/manual-nixpkgs-v2.yml
vendored
2
.github/workflows/manual-nixpkgs-v2.yml
vendored
|
@ -29,7 +29,7 @@ jobs:
|
|||
with:
|
||||
merged-as-untrusted: true
|
||||
|
||||
- uses: cachix/install-nix-action@526118121621777ccd86f79b04685a9319637641 # v31
|
||||
- uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31
|
||||
with:
|
||||
extra_nix_config: sandbox = true
|
||||
|
||||
|
|
2
.github/workflows/nix-parse-v2.yml
vendored
2
.github/workflows/nix-parse-v2.yml
vendored
|
@ -26,7 +26,7 @@ jobs:
|
|||
with:
|
||||
merged-as-untrusted: true
|
||||
|
||||
- uses: cachix/install-nix-action@526118121621777ccd86f79b04685a9319637641 # v31
|
||||
- uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31
|
||||
with:
|
||||
extra_nix_config: sandbox = true
|
||||
nix_path: nixpkgs=channel:nixpkgs-unstable
|
||||
|
|
2
.github/workflows/nixpkgs-vet.yml
vendored
2
.github/workflows/nixpkgs-vet.yml
vendored
|
@ -36,7 +36,7 @@ jobs:
|
|||
merged-as-untrusted: true
|
||||
target-as-trusted: true
|
||||
|
||||
- uses: cachix/install-nix-action@526118121621777ccd86f79b04685a9319637641 # v31
|
||||
- uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31
|
||||
|
||||
- name: Running nixpkgs-vet
|
||||
env:
|
||||
|
|
4
.github/workflows/reviewers.yml
vendored
4
.github/workflows/reviewers.yml
vendored
|
@ -34,7 +34,7 @@ jobs:
|
|||
sparse-checkout: ci
|
||||
|
||||
- name: Install Nix
|
||||
uses: cachix/install-nix-action@526118121621777ccd86f79b04685a9319637641 # v31
|
||||
uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31
|
||||
with:
|
||||
extra_nix_config: sandbox = true
|
||||
|
||||
|
@ -44,7 +44,7 @@ jobs:
|
|||
# See ./codeowners-v2.yml, reuse the same App because we need the same permissions
|
||||
# Can't use the token received from permissions above, because it can't get enough permissions
|
||||
- uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
|
||||
if: vars.OWNER_APP_ID
|
||||
if: github.event_name == 'pull_request_target' && vars.OWNER_APP_ID
|
||||
id: app-token
|
||||
with:
|
||||
app-id: ${{ vars.OWNER_APP_ID }}
|
||||
|
|
10
ci/OWNERS
10
ci/OWNERS
|
@ -15,12 +15,9 @@
|
|||
|
||||
# CI
|
||||
/.github/*_TEMPLATE* @SigmaSquadron
|
||||
/.github/actions @NixOS/Security @Mic92 @zowoq @infinisil @azuwis @wolfgangwalther
|
||||
/.github/workflows @NixOS/Security @Mic92 @zowoq @infinisil @azuwis @wolfgangwalther
|
||||
/.github/workflows/check-format.yml @infinisil @wolfgangwalther
|
||||
/.github/workflows/codeowners-v2.yml @infinisil @wolfgangwalther
|
||||
/.github/workflows/nixpkgs-vet.yml @infinisil @philiptaron @wolfgangwalther
|
||||
/ci @infinisil @philiptaron @NixOS/Security @wolfgangwalther
|
||||
/.github/actions @NixOS/Security @Mic92 @zowoq @infinisil @azuwis @wolfgangwalther @philiptaron
|
||||
/.github/workflows @NixOS/Security @Mic92 @zowoq @infinisil @azuwis @wolfgangwalther @philiptaron
|
||||
/ci @NixOS/Security @Mic92 @zowoq @infinisil @azuwis @wolfgangwalther @philiptaron
|
||||
/ci/OWNERS @infinisil @philiptaron
|
||||
|
||||
# Development support
|
||||
|
@ -230,6 +227,7 @@ nixos/modules/installer/tools/nix-fallback-paths.nix @NixOS/nix-team @raitobeza
|
|||
/nixos/tests/snapcast.nix @mweinelt
|
||||
|
||||
# Browsers
|
||||
/pkgs/applications/networking/browsers/librewolf @squalus @DominicWrege @fpletz @LordGrimmauld
|
||||
/pkgs/applications/networking/browsers/firefox @mweinelt
|
||||
/pkgs/applications/networking/browsers/chromium @emilylange @networkException
|
||||
/nixos/tests/chromium.nix @emilylange @networkException
|
||||
|
|
24
ci/README.md
24
ci/README.md
|
@ -7,9 +7,9 @@ This is in contrast with [`maintainers/scripts`](../maintainers/scripts) which i
|
|||
|
||||
CI may need certain packages from Nixpkgs.
|
||||
In order to ensure that the needed packages are generally available without building,
|
||||
[`pinned-nixpkgs.json`](./pinned-nixpkgs.json) contains a pinned Nixpkgs version tested by Hydra.
|
||||
[`pinned.json`](./pinned.json) contains a pinned Nixpkgs version tested by Hydra.
|
||||
|
||||
Run [`update-pinned-nixpkgs.sh`](./update-pinned-nixpkgs.sh) to update it.
|
||||
Run [`update-pinned.sh`](./update-pinned.sh) to update it.
|
||||
|
||||
## `ci/nixpkgs-vet.sh BASE_BRANCH [REPOSITORY]`
|
||||
|
||||
|
@ -20,23 +20,3 @@ Arguments:
|
|||
|
||||
- `BASE_BRANCH`: The base branch to use, e.g. master or release-24.05
|
||||
- `REPOSITORY`: The repository from which to fetch the base branch. Defaults to <https://github.com/NixOS/nixpkgs.git>.
|
||||
|
||||
## `ci/nixpkgs-vet`
|
||||
|
||||
This directory contains scripts and files used and related to [`nixpkgs-vet`](https://github.com/NixOS/nixpkgs-vet/), which the CI uses to implement `pkgs/by-name` checks, along with many other Nixpkgs architecture rules.
|
||||
See also the [CI GitHub Action](../.github/workflows/nixpkgs-vet.yml).
|
||||
|
||||
## `ci/nixpkgs-vet/update-pinned-tool.sh`
|
||||
|
||||
Updates the pinned [`nixpkgs-vet` tool](https://github.com/NixOS/nixpkgs-vet) in [`ci/nixpkgs-vet/pinned-version.txt`](./nixpkgs-vet/pinned-version.txt) to the latest [release](https://github.com/NixOS/nixpkgs-vet/releases).
|
||||
|
||||
Each release contains a pre-built `x86_64-linux` version of the tool which is used by CI.
|
||||
|
||||
This script currently needs to be called manually when the CI tooling needs to be updated.
|
||||
|
||||
Why not just build the tooling right from the PRs Nixpkgs version?
|
||||
|
||||
- Because it allows CI to check all PRs, even if they would break the CI tooling.
|
||||
- Because it makes the CI check very fast, since no Nix builds need to be done, even for mass rebuilds.
|
||||
- Because it improves security, since we don't have to build potentially untrusted code from PRs.
|
||||
The tool only needs a very minimal Nix evaluation at runtime, which can work with [readonly-mode](https://nixos.org/manual/nix/stable/command-ref/opt-common.html#opt-readonly-mode) and [restrict-eval](https://nixos.org/manual/nix/stable/command-ref/conf-file.html#conf-restrict-eval).
|
||||
|
|
|
@ -20,7 +20,7 @@ buildGoModule {
|
|||
})
|
||||
# Undoes part of the above PR: We don't want to require write access
|
||||
# to the repository, that's only needed for GitHub's native CODEOWNERS.
|
||||
# Furthermore, it removes an unneccessary check from the code
|
||||
# Furthermore, it removes an unnecessary check from the code
|
||||
# that breaks tokens generated for GitHub Apps.
|
||||
./permissions.patch
|
||||
# Allows setting a custom CODEOWNERS path using the OWNERS_FILE env var
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
let
|
||||
pinnedNixpkgs = builtins.fromJSON (builtins.readFile ./pinned-nixpkgs.json);
|
||||
pinned = (builtins.fromJSON (builtins.readFile ./pinned.json)).pins;
|
||||
in
|
||||
{
|
||||
system ? builtins.currentSystem,
|
||||
|
@ -10,8 +10,8 @@ let
|
|||
nixpkgs' =
|
||||
if nixpkgs == null then
|
||||
fetchTarball {
|
||||
url = "https://github.com/NixOS/nixpkgs/archive/${pinnedNixpkgs.rev}.tar.gz";
|
||||
sha256 = pinnedNixpkgs.sha256;
|
||||
inherit (pinned.nixpkgs) url;
|
||||
sha256 = pinned.nixpkgs.hash;
|
||||
}
|
||||
else
|
||||
nixpkgs;
|
||||
|
@ -25,9 +25,8 @@ let
|
|||
fmt =
|
||||
let
|
||||
treefmtNixSrc = fetchTarball {
|
||||
# Master at 2025-02-12
|
||||
url = "https://github.com/numtide/treefmt-nix/archive/4f09b473c936d41582dd744e19f34ec27592c5fd.tar.gz";
|
||||
sha256 = "051vh6raskrxw5k6jncm8zbk9fhbzgm1gxpq9gm5xw1b6wgbgcna";
|
||||
inherit (pinned.treefmt-nix) url;
|
||||
sha256 = pinned.treefmt-nix.hash;
|
||||
};
|
||||
treefmtEval = (import treefmtNixSrc).evalModule pkgs {
|
||||
# Important: The auto-rebase script uses `git filter-branch --tree-filter`,
|
||||
|
|
|
@ -25,6 +25,8 @@ runCommand "nixpkgs-vet"
|
|||
env.NIXPKGS_VET_NIX_PACKAGE = nix;
|
||||
}
|
||||
''
|
||||
export NIX_STATE_DIR=$(mktemp -d)
|
||||
|
||||
nixpkgs-vet --base ${filtered base} ${filtered head}
|
||||
|
||||
touch $out
|
||||
|
|
|
@ -61,9 +61,6 @@ trace "Done"
|
|||
trace -n "Merging base branch into the HEAD commit in $tmp/merged.. "
|
||||
git -C "$tmp/merged" merge -q --no-edit "$baseSha"
|
||||
trace -e "\e[34m$(git -C "$tmp/merged" rev-parse HEAD)\e[0m"
|
||||
trace -n "Reading pinned nixpkgs-vet version from pinned-version.txt.. "
|
||||
toolVersion=$(<"$tmp/merged/ci/nixpkgs-vet/pinned-version.txt")
|
||||
trace -e "\e[34m$toolVersion\e[0m"
|
||||
|
||||
trace "Running nixpkgs-vet.."
|
||||
nix-build ci -A nixpkgs-vet --argstr base "$tmp/base" --argstr head "$tmp/merged"
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
0.1.4
|
|
@ -1,22 +0,0 @@
|
|||
#!/usr/bin/env nix-shell
|
||||
#!nix-shell -i bash -p jq curl
|
||||
|
||||
set -o pipefail -o errexit -o nounset
|
||||
|
||||
trace() { echo >&2 "$@"; }
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
|
||||
repository=NixOS/nixpkgs-vet
|
||||
pin_file=$SCRIPT_DIR/pinned-version.txt
|
||||
|
||||
trace -n "Fetching latest release of $repository.. "
|
||||
latestRelease=$(curl -sSfL \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
https://api.github.com/repos/"$repository"/releases/latest)
|
||||
latestVersion=$(jq .tag_name -r <<< "$latestRelease")
|
||||
trace "$latestVersion"
|
||||
|
||||
trace "Updating $pin_file"
|
||||
echo "$latestVersion" > "$pin_file"
|
|
@ -1,4 +0,0 @@
|
|||
{
|
||||
"rev": "3d1f29646e4b57ed468d60f9d286cde23a8d1707",
|
||||
"sha256": "1wzvc9h9a6l9wyhzh892xb5x88kxmbzxb1k8s7fizyyw2q4nqw07"
|
||||
}
|
31
ci/pinned.json
Normal file
31
ci/pinned.json
Normal file
|
@ -0,0 +1,31 @@
|
|||
{
|
||||
"pins": {
|
||||
"nixpkgs": {
|
||||
"type": "Git",
|
||||
"repository": {
|
||||
"type": "GitHub",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs"
|
||||
},
|
||||
"branch": "nixpkgs-unstable",
|
||||
"submodules": false,
|
||||
"revision": "8ca7ec685bbee55d6dcb326abe23945c0806c39e",
|
||||
"url": "https://github.com/NixOS/nixpkgs/archive/8ca7ec685bbee55d6dcb326abe23945c0806c39e.tar.gz",
|
||||
"hash": "1hkxm871m66mjsc4acdki32qqnpgk3n6vi3zrzns2bwlwp6ivcjx"
|
||||
},
|
||||
"treefmt-nix": {
|
||||
"type": "Git",
|
||||
"repository": {
|
||||
"type": "GitHub",
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix"
|
||||
},
|
||||
"branch": "main",
|
||||
"submodules": false,
|
||||
"revision": "1f3f7b784643d488ba4bf315638b2b0a4c5fb007",
|
||||
"url": "https://github.com/numtide/treefmt-nix/archive/1f3f7b784643d488ba4bf315638b2b0a4c5fb007.tar.gz",
|
||||
"hash": "13qisjalw9qvd6lkd9g8225r46j5wdjrp3zw6jrs81q2vxwdz37m"
|
||||
}
|
||||
},
|
||||
"version": 5
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
#!/usr/bin/env nix-shell
|
||||
#!nix-shell -i bash -p jq
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# https://stackoverflow.com/a/246128
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
|
||||
repo=https://github.com/nixos/nixpkgs
|
||||
branch=nixpkgs-unstable
|
||||
file=$SCRIPT_DIR/pinned-nixpkgs.json
|
||||
|
||||
defaultRev=$(git ls-remote "$repo" refs/heads/"$branch" | cut -f1)
|
||||
rev=${1:-$defaultRev}
|
||||
sha256=$(nix-prefetch-url --unpack "$repo/archive/$rev.tar.gz" --name source)
|
||||
|
||||
jq -n --arg rev "$rev" --arg sha256 "$sha256" '$ARGS.named' | tee /dev/stderr > $file
|
8
ci/update-pinned.sh
Executable file
8
ci/update-pinned.sh
Executable file
|
@ -0,0 +1,8 @@
|
|||
#!/usr/bin/env nix-shell
|
||||
#!nix-shell -i bash -p npins
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
npins --lock-file pinned.json update
|
|
@ -40,6 +40,26 @@ If the `moduleNames` argument is omitted, `hasPkgConfigModules` will use `meta.p
|
|||
|
||||
:::
|
||||
|
||||
## `hasCmakeConfigModules` {#tester-hasCmakeConfigModules}
|
||||
|
||||
Checks whether a package exposes a given list of `*config.cmake` modules.
|
||||
Note the moduleNames used in cmake find_package are case sensitive.
|
||||
|
||||
:::{.example #ex-hascmakeconfigmodules}
|
||||
|
||||
# Check that `*config.cmake` modules are exposed using explicit module names
|
||||
|
||||
```nix
|
||||
{
|
||||
passthru.tests.cmake-config = testers.hasCmakeConfigModules {
|
||||
package = finalAttrs.finalPackage;
|
||||
moduleNames = [ "Foo" ];
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
## `lycheeLinkCheck` {#tester-lycheeLinkCheck}
|
||||
|
||||
Check a packaged static site's links with the [`lychee` package](https://search.nixos.org/packages?show=lychee&type=packages&query=lychee).
|
||||
|
|
|
@ -195,6 +195,21 @@ Specifies the contents of the `go.sum` file and triggers rebuilds when it change
|
|||
Defaults to `null`
|
||||
|
||||
|
||||
## Versioned toolchains and builders {#ssec-go-toolchain-versions}
|
||||
|
||||
Beside `buildGoModule`, there are also versioned builders available that pin a specific Go version, like `buildGo124Module` for Go 1.24.
|
||||
Similar, versioned toolchains are available, like `go_1_24` for Go 1.24.
|
||||
Both builder and toolchain of a certain version will be removed as soon as the Go version reaches end of life.
|
||||
|
||||
As toolchain updates in nixpkgs cause mass rebuilds and must go through the staging cycle, it can take a while until a new Go minor version is available to consumers of nixpkgs.
|
||||
If you want quicker access to the latest minor, use `go_latest` toolchain and `buildGoLatestModule` builder.
|
||||
To learn more about the Go maintenance and upgrade procedure in nixpkgs, check out the [Go toolchain/builder upgrade policy](https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/go/README.md#go-toolchainbuilder-upgrade-policy).
|
||||
|
||||
::: {.warning}
|
||||
The use of `go_latest` and `buildGoLatestModule` is restricted within nixpkgs.
|
||||
The [Go toolchain/builder upgrade policy](https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/go/README.md#go-toolchainbuilder-upgrade-policy) must be followed.
|
||||
:::
|
||||
|
||||
## Overriding `goModules` {#buildGoModule-goModules-override}
|
||||
|
||||
Overriding `<pkg>.goModules` by calling `goModules.overrideAttrs` is unsupported. Still, it is possible to override the `vendorHash` (`goModules`'s `outputHash`) and the `pre`/`post` hooks for both the build and patch phases of the primary and `goModules` derivation.
|
||||
|
|
|
@ -161,6 +161,27 @@ completely incompatible with packages from `haskellPackages`.
|
|||
|
||||
<!-- TODO(@maralorn) Link to package set generation docs in the contributors guide below. -->
|
||||
|
||||
### GHC Deprecation Policy {#ghc-deprecation-policy}
|
||||
|
||||
We remove GHC versions according to the following policy:
|
||||
|
||||
#### Major GHC versions {#major-ghc-deprecation}
|
||||
|
||||
We keep the following GHC major versions:
|
||||
1. The current Stackage LTS as the default and all later major versions.
|
||||
2. The two latest major versions older than our default.
|
||||
3. The currently recommended GHCup version and all later major versions.
|
||||
|
||||
Older GHC versions might be kept longer, if there are in-tree consumers. We will coordinate with the maintainers of those dependencies to find a way forward.
|
||||
|
||||
#### Minor GHC versions {#minor-ghc-deprecation}
|
||||
|
||||
Every major version has a default minor version. The default minor version will be updated as soon as viable without breakage.
|
||||
|
||||
Older minor versions for a supported major version will only be kept, if they are the last supported version of a major Stackage LTS release.
|
||||
|
||||
<!-- Policy introduced here: https://discourse.nixos.org/t/nixpkgs-ghc-deprecation-policy-user-feedback-necessary/64153 -->
|
||||
|
||||
## `haskellPackages.mkDerivation` {#haskell-mkderivation}
|
||||
|
||||
Every haskell package set has its own haskell-aware `mkDerivation` which is used
|
||||
|
|
|
@ -1259,6 +1259,13 @@ as many tests should be enabled as possible. Failing tests can still be
|
|||
a good indication that the package is not in a valid state.
|
||||
:::
|
||||
|
||||
::: {.note}
|
||||
We only want to test the functionality of a package. In particular, we are not
|
||||
interested in coverage, formatting, and type checking. If pytest fails with
|
||||
`unrecognized arguments: --cov`, add `pytest-cov-stub` to `nativeCheckInputs`
|
||||
rather than `pytest-cov`.
|
||||
:::
|
||||
|
||||
#### Using pytest {#using-pytest}
|
||||
|
||||
Pytest is the most common test runner for python repositories. A trivial
|
||||
|
|
|
@ -605,8 +605,8 @@ In some projects, the Rust crate is not in the main Python source
|
|||
directory. In such cases, the `cargoRoot` attribute can be used to
|
||||
specify the crate's directory relative to `sourceRoot`. In the
|
||||
following example, the crate is in `src/rust`, as specified in the
|
||||
`cargoRoot` attribute. Note that we also need to specify the correct
|
||||
path for `fetchCargoVendor`.
|
||||
`cargoRoot` attribute. Note that we also need to pass in `cargoRoot`
|
||||
to `fetchCargoVendor`.
|
||||
|
||||
```nix
|
||||
{
|
||||
|
@ -627,8 +627,12 @@ buildPythonPackage rec {
|
|||
};
|
||||
|
||||
cargoDeps = rustPlatform.fetchCargoVendor {
|
||||
inherit pname version src;
|
||||
sourceRoot = "${pname}-${version}/${cargoRoot}";
|
||||
inherit
|
||||
pname
|
||||
version
|
||||
src
|
||||
cargoRoot
|
||||
;
|
||||
hash = "sha256-ctUt8maCjnGddKPf+Ii++wKsAXA1h+JM6zKQNXXwJqQ=";
|
||||
};
|
||||
|
||||
|
|
|
@ -40,9 +40,18 @@
|
|||
"ex-testEqualArrayOrMap-test-function-add-cowbell": [
|
||||
"index.html#ex-testEqualArrayOrMap-test-function-add-cowbell"
|
||||
],
|
||||
"ghc-deprecation-policy": [
|
||||
"index.html#ghc-deprecation-policy"
|
||||
],
|
||||
"inkscape-plugins": [
|
||||
"index.html#inkscape-plugins"
|
||||
],
|
||||
"major-ghc-deprecation": [
|
||||
"index.html#major-ghc-deprecation"
|
||||
],
|
||||
"minor-ghc-deprecation": [
|
||||
"index.html#minor-ghc-deprecation"
|
||||
],
|
||||
"neovim": [
|
||||
"index.html#neovim"
|
||||
],
|
||||
|
@ -1700,6 +1709,12 @@
|
|||
"ex-haspkgconfigmodules-explicitmodules": [
|
||||
"index.html#ex-haspkgconfigmodules-explicitmodules"
|
||||
],
|
||||
"tester-hasCmakeConfigModules": [
|
||||
"index.html#tester-hasCmakeConfigModules"
|
||||
],
|
||||
"ex-hascmakeconfigmodules": [
|
||||
"index.html#ex-hascmakeconfigmodules"
|
||||
],
|
||||
"tester-lycheeLinkCheck": [
|
||||
"index.html#tester-lycheeLinkCheck"
|
||||
],
|
||||
|
@ -3012,6 +3027,9 @@
|
|||
"ex-buildGoModule": [
|
||||
"index.html#ex-buildGoModule"
|
||||
],
|
||||
"ssec-go-toolchain-versions" : [
|
||||
"index.html#ssec-go-toolchain-versions"
|
||||
],
|
||||
"buildGoModule-goModules-override": [
|
||||
"index.html#buildGoModule-goModules-override"
|
||||
],
|
||||
|
|
|
@ -28,6 +28,10 @@
|
|||
- Applications linked against different Mesa versions than installed on the system should now work correctly going forward (however, applications against older Mesa, e.g. from Nixpkgs releases before 25.05, remain broken)
|
||||
- Packages that used to depend on Mesa for libgbm or libdri should use `libgbm` or `dri-pkgconfig-stub` as inputs, respectively
|
||||
|
||||
- GNU Taler has been updated to version 1.0.
|
||||
This marks a significant milestone as the GNU Taler payment system is now available in Swiss Francs for individuals and businesses in Switzerland.
|
||||
For more details, see the [upstream release notes](https://www.taler.net/en/news/2025-01.html).
|
||||
|
||||
- OpenSSH has been updated from 9.9p2 to 10.0p2, dropping support for DSA keys and adding a new `ssh-auth` binary to handle user authentication in a different address space from unauthenticated sessions. See the [full changelog](https://www.openwall.com/lists/oss-security/2025/04/09/1) for more details.
|
||||
|
||||
- Emacs has been updated to 30.1.
|
||||
|
@ -226,6 +230,11 @@
|
|||
|
||||
- `buildGoModule` now supports a `goSum` attribute (`null` by default) to optionally provide a path to `go.sum` and correctly enabling rebuilds when the file changes.
|
||||
|
||||
- The newly added aliases `go_latest` and `buildGoLatestModule` are now available and can be use to prevent packages like `gopls` from breaking whenever the default toolchain minor version is lagging behind.
|
||||
It can also be used _outside of nixpkgs_ to get fast access to new Go minor versions without having to wait for a staging cycle that will update the default builder/toolchain.
|
||||
|
||||
- A [policy documenting the details of Go toolchain and builder upgrades](https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/go/README.md#go-toolchainbuilder-upgrade-policy) in nixpkgs, as well as rules related to using non-default builders like `buildGo1xxModule` and `buildGoLatestModule` has been added in-tree.
|
||||
|
||||
- top-level `playwright` now refers to the github Microsoft/playwright package
|
||||
instead of the python tester launcher. You can still refer to the python
|
||||
launcher via `python3Packages.toPythonApplication python3Packages.playwright`
|
||||
|
|
|
@ -13,7 +13,10 @@
|
|||
|
||||
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
|
||||
|
||||
- The `boot.readOnlyNixStore` has been removed. Control over bind mount options on `/nix/store` is now offered by the `boot.nixStoreMountOpts` option.
|
||||
- The `offrss` package was removed due to lack of upstream maintenance since 2012. It's recommended for users to migrate to another RSS reader
|
||||
|
||||
- `base16-builder` node package has been removed due to lack of upstream maintenance.
|
||||
- `gentium` package now provides `Gentium-*.ttf` files, and not `GentiumPlus-*.ttf` files like before. The font identifiers `Gentium Plus*` are available in the `gentium-plus` package, and if you want to use the more recently updated package `gentium` [by sil](https://software.sil.org/gentium/), you should update your configuration files to use the `Gentium` font identifier.
|
||||
|
||||
## Other Notable Changes {#sec-nixpkgs-release-25.11-notable-changes}
|
||||
|
||||
|
@ -21,6 +24,8 @@
|
|||
|
||||
- New hardening flags, `strictflexarrays1` and `strictflexarrays3` were made available, corresponding to the gcc/clang options `-fstrict-flex-arrays=1` and `-fstrict-flex-arrays=3` respectively.
|
||||
|
||||
- Added `rewriteURL` attribute to the nixpkgs `config`, to allow for rewriting the URLs downloaded by `fetchurl`.
|
||||
|
||||
## Nixpkgs Library {#sec-nixpkgs-release-25.11-lib}
|
||||
|
||||
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
|
||||
|
|
|
@ -599,6 +599,8 @@ Additional file types can be supported by setting the `unpackCmd` variable (see
|
|||
|
||||
The list of source files or directories to be unpacked or copied. One of these must be set. Note that if you use `srcs`, you should also set `sourceRoot` or `setSourceRoot`.
|
||||
|
||||
These should ideally actually be sources and licensed under a FLOSS license. If you have to use a binary upstream release or package non-free software, make sure you correctly mark your derivation as such in the [`sourceProvenance`](#var-meta-sourceProvenance) and [`license`](#sec-meta-license) fields of the [`meta`](#chap-meta) section.
|
||||
|
||||
##### `sourceRoot` {#var-stdenv-sourceRoot}
|
||||
|
||||
After unpacking all of `src` and `srcs`, if neither of `sourceRoot` and `setSourceRoot` are set, `unpackPhase` of the generic builder checks that the unpacking produced a single directory and moves the current working directory into it.
|
||||
|
|
|
@ -1747,7 +1747,7 @@ rec {
|
|||
|
||||
/**
|
||||
Get the first of the `outputs` provided by the package, or the default.
|
||||
This function is alligned with `_overrideFirst()` from the `multiple-outputs.sh` setup hook.
|
||||
This function is aligned with `_overrideFirst()` from the `multiple-outputs.sh` setup hook.
|
||||
Like `getOutput`, the function is idempotent.
|
||||
|
||||
# Inputs
|
||||
|
|
|
@ -389,7 +389,7 @@ rec {
|
|||
|
||||
extensions = composeManyExtensions [ overlayA overlayB ];
|
||||
|
||||
# Caluculate the fixed point of all composed overlays.
|
||||
# Calculate the fixed point of all composed overlays.
|
||||
fixedpoint = lib.fix (lib.extends extensions original );
|
||||
|
||||
in fixedpoint
|
||||
|
|
|
@ -404,7 +404,7 @@ rec {
|
|||
```nix
|
||||
myType = mkOptionType {
|
||||
name = "myType";
|
||||
merge = mergeDefaultOption; # <- This line is redundant. It is the default aready.
|
||||
merge = mergeDefaultOption; # <- This line is redundant. It is the default already.
|
||||
};
|
||||
```
|
||||
|
||||
|
@ -470,7 +470,7 @@ rec {
|
|||
args@{
|
||||
message,
|
||||
# WARNING: the default merge function assumes that the definition is a valid (option) value. You MUST pass a merge function if the return value needs to be
|
||||
# - type checked beyond what .check does (which should be very litte; only on the value head; not attribute values, etc)
|
||||
# - type checked beyond what .check does (which should be very little; only on the value head; not attribute values, etc)
|
||||
# - if you want attribute values to be checked, or list items
|
||||
# - if you want coercedTo-like behavior to work
|
||||
merge ? loc: defs: (head defs).value,
|
||||
|
|
|
@ -54,7 +54,7 @@ let
|
|||
|
||||
missingGithubIds = lib.concatLists (lib.mapAttrsToList checkMaintainer lib.maintainers);
|
||||
|
||||
success = pkgs.runCommand "checked-maintainers-success" { } ">$out";
|
||||
success = pkgs.runCommand "checked-maintainers-success" { } "mkdir $out";
|
||||
|
||||
failure =
|
||||
pkgs.runCommand "checked-maintainers-failure"
|
||||
|
|
|
@ -28,5 +28,14 @@ let
|
|||
in
|
||||
pkgsBB.symlinkJoin {
|
||||
name = "nixpkgs-lib-tests";
|
||||
paths = map testWithNix nixVersions;
|
||||
paths = map testWithNix nixVersions ++ [
|
||||
(import ./maintainers.nix {
|
||||
inherit pkgs;
|
||||
lib = import ../.;
|
||||
})
|
||||
(import ./teams.nix {
|
||||
inherit pkgs;
|
||||
lib = import ../.;
|
||||
})
|
||||
];
|
||||
}
|
||||
|
|
|
@ -19,14 +19,6 @@ pkgs.runCommand "nixpkgs-lib-tests-nix-${nix.version}"
|
|||
buildInputs = [
|
||||
(import ./check-eval.nix)
|
||||
(import ./fetchers.nix)
|
||||
(import ./maintainers.nix {
|
||||
inherit pkgs;
|
||||
lib = import ../.;
|
||||
})
|
||||
(import ./teams.nix {
|
||||
inherit pkgs;
|
||||
lib = import ../.;
|
||||
})
|
||||
(import ../path/tests {
|
||||
inherit pkgs;
|
||||
})
|
||||
|
|
|
@ -75,7 +75,7 @@ let
|
|||
if pos == null then "" else " at ${pos.file}:${toString pos.line}:${toString pos.column}";
|
||||
|
||||
# Internal functor to help for migrating functor.wrapped to functor.payload.elemType
|
||||
# Note that individual attributes can be overriden if needed.
|
||||
# Note that individual attributes can be overridden if needed.
|
||||
elemTypeFunctor =
|
||||
name:
|
||||
{ elemType, ... }@payload:
|
||||
|
@ -1455,8 +1455,14 @@ let
|
|||
nestedTypes.coercedType = coercedType;
|
||||
nestedTypes.finalType = finalType;
|
||||
};
|
||||
/**
|
||||
Augment the given type with an additional type check function.
|
||||
|
||||
# Augment the given type with an additional type check function.
|
||||
:::{.warning}
|
||||
This function has some broken behavior see: [#396021](https://github.com/NixOS/nixpkgs/issues/396021)
|
||||
Fixing is not trivial, we appreciate any help!
|
||||
:::
|
||||
*/
|
||||
addCheck = elemType: check: elemType // { check = x: elemType.check x && check x; };
|
||||
|
||||
};
|
||||
|
|
|
@ -265,6 +265,7 @@
|
|||
};
|
||||
_6543 = {
|
||||
email = "6543@obermui.de";
|
||||
matrix = "@marddl:obermui.de";
|
||||
github = "6543";
|
||||
githubId = 24977596;
|
||||
name = "6543";
|
||||
|
@ -2538,6 +2539,13 @@
|
|||
githubId = 59499799;
|
||||
keys = [ { fingerprint = "A0FF 4F26 6B80 0B86 726D EA5B 3C23 C7BD 9945 2036"; } ];
|
||||
};
|
||||
averyvigolo = {
|
||||
email = "nixpkgs@averyv.me";
|
||||
github = "averyvigolo";
|
||||
githubId = 26379999;
|
||||
name = "Avery Vigolo";
|
||||
keys = [ { fingerprint = "9848 B216 BCBE 29BB 1C6A E0D5 7A4D F5A8 CDBD 49C7"; } ];
|
||||
};
|
||||
avh4 = {
|
||||
email = "gruen0aermel@gmail.com";
|
||||
github = "avh4";
|
||||
|
@ -2644,6 +2652,17 @@
|
|||
githubId = 9315;
|
||||
name = "Zhong Jianxin";
|
||||
};
|
||||
b-fein = {
|
||||
github = "b-fein";
|
||||
githubId = 64250573;
|
||||
name = "Benedikt Fein";
|
||||
};
|
||||
b-m-f = {
|
||||
email = "maximilian@sodawa.com";
|
||||
github = "b-m-f";
|
||||
githubId = 2843450;
|
||||
name = "Maximilian Ehlers";
|
||||
};
|
||||
b-rodrigues = {
|
||||
email = "bruno@brodrigues.co";
|
||||
github = "b-rodrigues";
|
||||
|
@ -2882,17 +2901,11 @@
|
|||
name = "Brandon Elam Barker";
|
||||
};
|
||||
bbenne10 = {
|
||||
email = "Bryan.Bennett@protonmail.com";
|
||||
email = "Bryan.Bennett+nixpkgs@proton.me";
|
||||
matrix = "@bryan.bennett:matrix.org";
|
||||
github = "bbenne10";
|
||||
githubId = 687376;
|
||||
name = "Bryan Bennett";
|
||||
keys = [
|
||||
{
|
||||
# compare with https://keybase.io/bbenne10
|
||||
fingerprint = "41EA 00B4 00F9 6970 1CB2 D3AF EF90 E3E9 8B8F 5C0B";
|
||||
}
|
||||
];
|
||||
};
|
||||
bbenno = {
|
||||
email = "nix@bbenno.com";
|
||||
|
@ -4745,12 +4758,6 @@
|
|||
githubId = 848609;
|
||||
name = "Michael Bishop";
|
||||
};
|
||||
clevor = {
|
||||
email = "myclevorname@gmail.com";
|
||||
github = "myclevorname";
|
||||
githubId = 140354451;
|
||||
name = "Samuel Connelly";
|
||||
};
|
||||
clkamp = {
|
||||
email = "c@lkamp.de";
|
||||
github = "clkamp";
|
||||
|
@ -7910,6 +7917,17 @@
|
|||
githubId = 11909469;
|
||||
name = "Fabian Geiselhart";
|
||||
};
|
||||
f4z3r = {
|
||||
email = "f4z3r-github@pm.me";
|
||||
name = "Jakob Beckmann";
|
||||
github = "f4z3r";
|
||||
githubId = 32326425;
|
||||
keys = [
|
||||
{
|
||||
fingerprint = "358A 6251 E2ED EDC1 9717 14A7 96A8 BA6E C871 2183";
|
||||
}
|
||||
];
|
||||
};
|
||||
fab = {
|
||||
email = "mail@fabian-affolter.ch";
|
||||
matrix = "@fabaff:matrix.org";
|
||||
|
@ -15214,6 +15232,12 @@
|
|||
github = "mariuskimmina";
|
||||
githubId = 38843153;
|
||||
};
|
||||
markasoftware = {
|
||||
name = "Mark Polyakov";
|
||||
email = "mark@markasoftware.com";
|
||||
github = "markasoftware";
|
||||
githubId = 6380084;
|
||||
};
|
||||
markbeep = {
|
||||
email = "mrkswrn@gmail.com";
|
||||
github = "markbeep";
|
||||
|
@ -15567,6 +15591,12 @@
|
|||
name = "Max Niederman";
|
||||
keys = [ { fingerprint = "1DE4 424D BF77 1192 5DC4 CF5E 9AED 8814 81D8 444E"; } ];
|
||||
};
|
||||
max06 = {
|
||||
email = "max06.net@outlook.com";
|
||||
github = "max06";
|
||||
githubId = 7556827;
|
||||
name = "Flo";
|
||||
};
|
||||
maxbrunet = {
|
||||
email = "max@brnt.mx";
|
||||
github = "maxbrunet";
|
||||
|
@ -16147,7 +16177,7 @@
|
|||
email = "mightyiampresence@gmail.com";
|
||||
github = "mightyiam";
|
||||
githubId = 635591;
|
||||
name = "Shahar Dawn Or";
|
||||
name = "Shahar \"Dawn\" Or";
|
||||
};
|
||||
mihaimaruseac = {
|
||||
email = "mihaimaruseac@gmail.com";
|
||||
|
@ -17127,6 +17157,12 @@
|
|||
githubId = 1131571;
|
||||
name = "naelstrof";
|
||||
};
|
||||
naggie = {
|
||||
name = "Cal Bryant";
|
||||
email = "callan.bryant@gmail.com";
|
||||
github = "naggie";
|
||||
githubId = 208440;
|
||||
};
|
||||
nagisa = {
|
||||
name = "Simonas Kazlauskas";
|
||||
email = "nixpkgs@kazlauskas.me";
|
||||
|
@ -17695,6 +17731,12 @@
|
|||
github = "nikstur";
|
||||
githubId = 61635709;
|
||||
};
|
||||
nilathedragon = {
|
||||
email = "nilathedragon@pm.me";
|
||||
name = "Nila The Dragon";
|
||||
github = "nilathedragon";
|
||||
githubId = 43315617;
|
||||
};
|
||||
nilp0inter = {
|
||||
email = "robertomartinezp@gmail.com";
|
||||
github = "nilp0inter";
|
||||
|
@ -17870,6 +17912,12 @@
|
|||
githubId = 810877;
|
||||
name = "Tom Doggett";
|
||||
};
|
||||
noiioiu = {
|
||||
github = "noiioiu";
|
||||
githubId = 151288161;
|
||||
name = "noiioiu";
|
||||
keys = [ { fingerprint = "99CC 06D6 1456 3689 CE75 58F3 BF51 F00D 0748 2A89"; } ];
|
||||
};
|
||||
noisersup = {
|
||||
email = "patryk@kwiatek.xyz";
|
||||
github = "noisersup";
|
||||
|
@ -18099,8 +18147,10 @@
|
|||
github = "numinit";
|
||||
githubId = 369111;
|
||||
keys = [
|
||||
# >=2025
|
||||
# >=2025, stays in one place
|
||||
{ fingerprint = "FD28 F9C9 81C5 D78E 56E8 8311 5C3E B94D 198F 1491"; }
|
||||
# >=2025, travels with me
|
||||
{ fingerprint = "C48F 475F 30A9 B192 3213 D5D5 C6E2 4809 77B2 F2F4"; }
|
||||
# <=2024
|
||||
{ fingerprint = "190B DA97 F616 DE35 6899 ED17 F819 F1AF 2FC1 C1FF"; }
|
||||
];
|
||||
|
@ -18666,6 +18716,12 @@
|
|||
github = "Oughie";
|
||||
githubId = 123173954;
|
||||
};
|
||||
OulipianSummer = {
|
||||
name = "Andrew Benbow";
|
||||
github = "OulipianSummer";
|
||||
githubId = 47955980;
|
||||
email = "abmurrow@duck.com";
|
||||
};
|
||||
outfoxxed = {
|
||||
name = "outfoxxed";
|
||||
email = "nixpkgs@outfoxxed.me";
|
||||
|
@ -19472,6 +19528,12 @@
|
|||
githubId = 7536431;
|
||||
name = "Jonas Fierlings";
|
||||
};
|
||||
pilz = {
|
||||
name = "Pilz";
|
||||
email = "nix@pilz.foo";
|
||||
github = "pilz0";
|
||||
githubId = 48645439;
|
||||
};
|
||||
pimeys = {
|
||||
email = "julius@nauk.io";
|
||||
github = "pimeys";
|
||||
|
@ -26599,13 +26661,6 @@
|
|||
githubId = 5185341;
|
||||
name = "Will Cohen";
|
||||
};
|
||||
williamvds = {
|
||||
email = "nixpkgs@williamvds.me";
|
||||
github = "williamvds";
|
||||
githubId = 26379999;
|
||||
name = "William Vigolo";
|
||||
keys = [ { fingerprint = "9848 B216 BCBE 29BB 1C6A E0D5 7A4D F5A8 CDBD 49C7"; } ];
|
||||
};
|
||||
willibutz = {
|
||||
email = "willibutz@posteo.de";
|
||||
github = "WilliButz";
|
||||
|
@ -27213,6 +27268,11 @@
|
|||
githubId = 4113027;
|
||||
name = "Jesper Geertsen Jonsson";
|
||||
};
|
||||
yethal = {
|
||||
github = "yethal";
|
||||
githubId = 26117918;
|
||||
name = "Yethal";
|
||||
};
|
||||
yinfeng = {
|
||||
email = "lin.yinfeng@outlook.com";
|
||||
github = "linyinfeng";
|
||||
|
|
|
@ -143,6 +143,7 @@ rtp.nvim,,,,,,mrcjkb
|
|||
rustaceanvim,,,,,,mrcjkb
|
||||
say,,,,,,
|
||||
serpent,,,,,,lockejan
|
||||
sofa,,,,,,f4z3r
|
||||
sqlite,,,,,,
|
||||
std._debug,,,,,,
|
||||
std.normalize,,,,,,
|
||||
|
|
|
|
@ -913,6 +913,7 @@ with lib.maintainers;
|
|||
ngi = {
|
||||
members = [
|
||||
eljamm
|
||||
ethancedwards8
|
||||
fricklerhandwerk
|
||||
wegank
|
||||
];
|
||||
|
|
|
@ -56,6 +56,12 @@
|
|||
"module-services-opencloud-basic-usage": [
|
||||
"index.html#module-services-opencloud-basic-usage"
|
||||
],
|
||||
"module-services-networking-pihole-ftl-configuration-inherit-dnsmasq": [
|
||||
"index.html#module-services-networking-pihole-ftl-configuration-inherit-dnsmasq"
|
||||
],
|
||||
"module-services-networking-pihole-ftl-configuration-multiple-interfaces": [
|
||||
"index.html#module-services-networking-pihole-ftl-configuration-multiple-interfaces"
|
||||
],
|
||||
"module-services-strfry": [
|
||||
"index.html#module-services-strfry"
|
||||
],
|
||||
|
@ -743,6 +749,15 @@
|
|||
"module-services-davis-basic-usage": [
|
||||
"index.html#module-services-davis-basic-usage"
|
||||
],
|
||||
"module-services-draupnir": [
|
||||
"index.html#module-services-draupnir"
|
||||
],
|
||||
"module-services-draupnir-setup": [
|
||||
"index.html#module-services-draupnir-setup"
|
||||
],
|
||||
"module-services-draupnir-setup-ems": [
|
||||
"index.html#module-services-draupnir-setup-ems"
|
||||
],
|
||||
"module-services-castopod": [
|
||||
"index.html#module-services-castopod"
|
||||
],
|
||||
|
@ -1448,6 +1463,21 @@
|
|||
"module-services-input-methods-kime": [
|
||||
"index.html#module-services-input-methods-kime"
|
||||
],
|
||||
"module-services-networking-pihole-ftl": [
|
||||
"index.html#module-services-networking-pihole-ftl"
|
||||
],
|
||||
"module-services-networking-pihole-ftl-administration": [
|
||||
"index.html#module-services-networking-pihole-ftl-administration"
|
||||
],
|
||||
"module-services-networking-pihole-ftl-configuration": [
|
||||
"index.html#module-services-networking-pihole-ftl-configuration"
|
||||
],
|
||||
"module-services-web-apps-pihole-web": [
|
||||
"index.html#module-services-web-apps-pihole-web"
|
||||
],
|
||||
"module-services-web-apps-pihole-web-configuration": [
|
||||
"index.html#module-services-web-apps-pihole-web-configuration"
|
||||
],
|
||||
"ch-profiles": [
|
||||
"index.html#ch-profiles"
|
||||
],
|
||||
|
|
|
@ -13,13 +13,21 @@
|
|||
- [gtklock](https://github.com/jovanlanik/gtklock), a GTK-based lockscreen for Wayland. Available as [programs.gtklock](#opt-programs.gtklock.enable).
|
||||
- [Chrysalis](https://github.com/keyboardio/Chrysalis), a graphical configurator for Kaleidoscope-powered keyboards. Available as [programs.chrysalis](#opt-programs.chrysalis.enable).
|
||||
|
||||
- [Pi-hole](https://pi-hole.net/), a DNS sinkhole for advertisements based on Dnsmasq. Available as [services.pihole-ftl](#opt-services.pihole-ftl.enable), and [services.pihole-web](#opt-services.pihole-web.enable) for the web GUI and API.
|
||||
|
||||
- [FileBrowser](https://filebrowser.org/), a web application for managing and sharing files. Available as [services.filebrowser](#opt-services.filebrowser.enable).
|
||||
|
||||
- [LACT](https://github.com/ilya-zlobintsev/LACT), a GPU monitoring and configuration tool, can now be enabled through [services.lact.enable](#opt-services.lact.enable).
|
||||
Note that for LACT to work properly on AMD GPU systems, you need to enable [hardware.amdgpu.overdrive.enable](#opt-hardware.amdgpu.overdrive.enable).
|
||||
|
||||
- [Broadcast Box](https://github.com/Glimesh/broadcast-box), a WebRTC broadcast server. Available as [services.broadcast-box](options.html#opt-services.broadcast-box.enable).
|
||||
|
||||
- [Draupnir](https://github.com/the-draupnir-project/draupnir), a Matrix moderation bot. Available as [services.draupnir](#opt-services.draupnir.enable).
|
||||
|
||||
- [SuiteNumérique Docs](https://github.com/suitenumerique/docs), a collaborative note taking, wiki and documentation web platform and alternative to Notion or Outline. Available as [services.lasuite-docs](#opt-services.lasuite-docs.enable).
|
||||
|
||||
[dwl](https://codeberg.org/dwl/dwl), a compact, hackable compositor for Wayland based on wlroots. Available as [programs.dwl](#opt-programs.dwl.enable).
|
||||
|
||||
## Backward Incompatibilities {#sec-release-25.11-incompatibilities}
|
||||
|
||||
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
|
||||
|
@ -36,5 +44,7 @@
|
|||
|
||||
- `services.clamsmtp` is unmaintained and was removed from Nixpkgs.
|
||||
|
||||
- `services.dnscrypt-proxy2` gains a `package` option to specify dnscrypt-proxy package to use.
|
||||
|
||||
- `amdgpu` kernel driver overdrive mode can now be enabled by setting [hardware.amdgpu.overdrive.enable](#opt-hardware.amdgpu.overdrive.enable) and customized through [hardware.amdgpu.overdrive.ppfeaturemask](#opt-hardware.amdgpu.overdrive.ppfeaturemask).
|
||||
This allows for fine-grained control over the GPU's performance and maybe required by overclocking softwares like Corectrl and Lact. These new options replace old options such as {option}`programs.corectrl.gpuOverclock.enable` and {option}`programs.tuxclocker.enableAMD`.
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
|
||||
Documentation rendered as AsciiDoc. This is useful for e.g. man pages.
|
||||
|
||||
> Note: NixOS itself uses this ouput to to build the configuration.nix man page"
|
||||
> Note: NixOS itself uses this output to to build the configuration.nix man page"
|
||||
|
||||
## optionsNix
|
||||
|
||||
|
@ -59,7 +59,7 @@
|
|||
let
|
||||
# Evaluate a NixOS configuration
|
||||
eval = import (pkgs.path + "/nixos/lib/eval-config.nix") {
|
||||
# Overriden explicitly here, this would include all modules from NixOS otherwise.
|
||||
# Overridden explicitly here, this would include all modules from NixOS otherwise.
|
||||
# See: docs of eval-config.nix for more details
|
||||
baseModules = [];
|
||||
modules = [
|
||||
|
|
|
@ -15,7 +15,6 @@ let
|
|||
inherit (lib)
|
||||
any
|
||||
concatMap
|
||||
filterOverrides
|
||||
isList
|
||||
literalExpression
|
||||
mergeEqualOption
|
||||
|
@ -56,13 +55,10 @@ rec {
|
|||
name = "systemd option";
|
||||
merge =
|
||||
loc: defs:
|
||||
let
|
||||
defs' = filterOverrides defs;
|
||||
in
|
||||
if any (def: isList def.value) defs' then
|
||||
concatMap (def: toList def.value) defs'
|
||||
if any (def: isList def.value) defs then
|
||||
concatMap (def: toList def.value) defs
|
||||
else
|
||||
mergeEqualOption loc defs';
|
||||
mergeEqualOption loc defs;
|
||||
};
|
||||
|
||||
sharedOptions = {
|
||||
|
|
|
@ -3,15 +3,15 @@
|
|||
let
|
||||
inherit (lib)
|
||||
attrNames
|
||||
concatMap
|
||||
concatMapAttrs
|
||||
concatMapStrings
|
||||
flip
|
||||
forEach
|
||||
head
|
||||
listToAttrs
|
||||
mkDefault
|
||||
mkOption
|
||||
nameValuePair
|
||||
optionalAttrs
|
||||
optionalString
|
||||
range
|
||||
toLower
|
||||
|
@ -91,23 +91,22 @@ let
|
|||
# interfaces, use the IP address corresponding to
|
||||
# the first interface (i.e. the first network in its
|
||||
# virtualisation.vlans option).
|
||||
networking.extraHosts = flip concatMapStrings (attrNames nodes) (
|
||||
m':
|
||||
networking.hosts = concatMapAttrs (
|
||||
name: config:
|
||||
let
|
||||
config = nodes.${m'};
|
||||
hostnames =
|
||||
optionalString (
|
||||
config.networking.domain != null
|
||||
) "${config.networking.hostName}.${config.networking.domain} "
|
||||
+ "${config.networking.hostName}\n";
|
||||
in
|
||||
optionalString (
|
||||
config.networking.primaryIPAddress != ""
|
||||
) "${config.networking.primaryIPAddress} ${hostnames}"
|
||||
+ optionalString (config.networking.primaryIPv6Address != "") (
|
||||
"${config.networking.primaryIPv6Address} ${hostnames}"
|
||||
)
|
||||
);
|
||||
optionalAttrs (config.networking.primaryIPAddress != "") {
|
||||
"${config.networking.primaryIPAddress}" = [ hostnames ];
|
||||
}
|
||||
// optionalAttrs (config.networking.primaryIPv6Address != "") {
|
||||
"${config.networking.primaryIPv6Address}" = [ hostnames ];
|
||||
}
|
||||
) nodes;
|
||||
|
||||
virtualisation.qemu.options = qemuOptions;
|
||||
boot.initrd.services.udev.rules = concatMapStrings (x: x + "\n") udevRules;
|
||||
|
@ -130,7 +129,7 @@ let
|
|||
virtualisation.test.nodeName = mkOption {
|
||||
internal = true;
|
||||
default = name;
|
||||
# We need to force this in specilisations, otherwise it'd be
|
||||
# We need to force this in specialisations, otherwise it'd be
|
||||
# readOnly = true;
|
||||
description = ''
|
||||
The `name` in `nodes.<name>`; stable across `specialisations`.
|
||||
|
|
|
@ -60,7 +60,7 @@ let
|
|||
inherit (eval) pkgs;
|
||||
|
||||
excludedTestOptions = [
|
||||
# We cannot evluate _module.args, as it is used during the computation
|
||||
# We cannot evaluate _module.args, as it is used during the computation
|
||||
# of the modules list.
|
||||
"_module.args"
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ let
|
|||
checkType = x: lib.isBool x || lib.isString x || lib.isInt x || x == null;
|
||||
in
|
||||
checkType val || (val._type or "" == "override" && checkType val.content);
|
||||
merge = loc: defs: lib.mergeOneOption loc (lib.filterOverrides defs);
|
||||
merge = loc: defs: lib.mergeOneOption loc defs;
|
||||
};
|
||||
|
||||
in
|
||||
|
@ -22,9 +22,7 @@ in
|
|||
type =
|
||||
let
|
||||
highestValueType = lib.types.ints.unsigned // {
|
||||
merge =
|
||||
loc: defs:
|
||||
lib.foldl (a: b: if b.value == null then null else lib.max a b.value) 0 (lib.filterOverrides defs);
|
||||
merge = loc: defs: lib.foldl (a: b: if b.value == null then null else lib.max a b.value) 0 defs;
|
||||
};
|
||||
in
|
||||
lib.types.submodule {
|
||||
|
|
|
@ -49,6 +49,7 @@ if (-e "/etc/nixos-generate-config.conf") {
|
|||
$rootDir = File::Spec->rel2abs($rootDir); # resolve absolute path
|
||||
}
|
||||
$kernel = $cfg->val("Defaults", "Kernel") // $kernel;
|
||||
$flake = $cfg->val("Defaults", "Flake") // $flake;
|
||||
}
|
||||
|
||||
for (my $n = 0; $n < scalar @ARGV; $n++) {
|
||||
|
|
|
@ -273,7 +273,7 @@ in
|
|||
caddy = 239;
|
||||
taskd = 240;
|
||||
# factorio = 241; # DynamicUser = true
|
||||
# emby = 242; # unusued, removed 2019-05-01
|
||||
# emby = 242; # unused, removed 2019-05-01
|
||||
#graylog = 243;# dynamically allocated as of 2021-09-03
|
||||
sniproxy = 244;
|
||||
nzbget = 245;
|
||||
|
@ -371,7 +371,7 @@ in
|
|||
# system user or group of the same id in someone else's NixOS.
|
||||
# This could break their system and make that person upset for a whole day.
|
||||
#
|
||||
# Sidenote: the default is defined in `shadow` module[2], and the relavent change
|
||||
# Sidenote: the default is defined in `shadow` module[2], and the relevant change
|
||||
# was made way back in 2014[3].
|
||||
#
|
||||
# [1]: https://man7.org/linux/man-pages/man5/login.defs.5.html#:~:text=SYS_UID_MAX%20(number)%2C%20SYS_UID_MIN%20(number)
|
||||
|
@ -700,7 +700,7 @@ in
|
|||
# system user or group of the same id in someone else's NixOS.
|
||||
# This could break their system and make that person upset for a whole day.
|
||||
#
|
||||
# Sidenote: the default is defined in `shadow` module[2], and the relavent change
|
||||
# Sidenote: the default is defined in `shadow` module[2], and the relevant change
|
||||
# was made way back in 2014[3].
|
||||
#
|
||||
# [1]: https://man7.org/linux/man-pages/man5/login.defs.5.html#:~:text=SYS_UID_MAX%20(number)%2C%20SYS_UID_MIN%20(number)
|
||||
|
|
|
@ -332,6 +332,7 @@
|
|||
./programs/vivid.nix
|
||||
./programs/wavemon.nix
|
||||
./programs/wayland/cardboard.nix
|
||||
./programs/wayland/dwl.nix
|
||||
./programs/wayland/gtklock.nix
|
||||
./programs/wayland/hyprland.nix
|
||||
./programs/wayland/hyprlock.nix
|
||||
|
@ -757,6 +758,7 @@
|
|||
./services/matrix/conduit.nix
|
||||
./services/matrix/continuwuity.nix
|
||||
./services/matrix/dendrite.nix
|
||||
./services/matrix/draupnir.nix
|
||||
./services/matrix/hebbot.nix
|
||||
./services/matrix/hookshot.nix
|
||||
./services/matrix/lk-jwt-service.nix
|
||||
|
@ -1268,6 +1270,7 @@
|
|||
./services/networking/pdnsd.nix
|
||||
./services/networking/peroxide.nix
|
||||
./services/networking/picosnitch.nix
|
||||
./services/networking/pihole-ftl.nix
|
||||
./services/networking/pixiecore.nix
|
||||
./services/networking/pleroma.nix
|
||||
./services/networking/powerdns.nix
|
||||
|
@ -1490,6 +1493,7 @@
|
|||
./services/ttys/getty.nix
|
||||
./services/ttys/gpm.nix
|
||||
./services/ttys/kmscon.nix
|
||||
./services/video/broadcast-box.nix
|
||||
./services/video/epgstation/default.nix
|
||||
./services/video/frigate.nix
|
||||
./services/video/go2rtc/default.nix
|
||||
|
@ -1533,6 +1537,7 @@
|
|||
./services/web-apps/documize.nix
|
||||
./services/web-apps/dokuwiki.nix
|
||||
./services/web-apps/dolibarr.nix
|
||||
./services/web-apps/drupal.nix
|
||||
./services/web-apps/echoip.nix
|
||||
./services/web-apps/eintopf.nix
|
||||
./services/web-apps/engelsystem.nix
|
||||
|
@ -1628,6 +1633,7 @@
|
|||
./services/web-apps/photoprism.nix
|
||||
./services/web-apps/phylactery.nix
|
||||
./services/web-apps/pict-rs.nix
|
||||
./services/web-apps/pihole-web.nix
|
||||
./services/web-apps/pingvin-share.nix
|
||||
./services/web-apps/pixelfed.nix
|
||||
./services/web-apps/plantuml-server.nix
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# This profile sets up a sytem for image based appliance usage. An appliance is
|
||||
# This profile sets up a system for image based appliance usage. An appliance is
|
||||
# installed as an image, cannot be re-built, has no Nix available, and is
|
||||
# generally not meant for interactive use. Updates to such an appliance are
|
||||
# handled by updating whole partition images via a tool like systemd-sysupdate.
|
||||
|
|
|
@ -34,6 +34,12 @@ in
|
|||
capabilities = "cap_sys_admin+ep";
|
||||
source = "${package}/bin/gsr-kms-server";
|
||||
};
|
||||
security.wrappers."gpu-screen-recorder" = {
|
||||
owner = "root";
|
||||
group = "root";
|
||||
capabilities = "cap_sys_nice+ep";
|
||||
source = "${package}/bin/gpu-screen-recorder";
|
||||
};
|
||||
};
|
||||
|
||||
meta.maintainers = with lib.maintainers; [ timschumi ];
|
||||
|
|
|
@ -90,7 +90,7 @@ in
|
|||
}
|
||||
'';
|
||||
example.require-ipfs.paths = [ "/ipfs" ];
|
||||
example.require-ipfs.onFeatures = [ "ifps" ];
|
||||
example.require-ipfs.onFeatures = [ "ipfs" ];
|
||||
};
|
||||
extraWrapperArgs = lib.mkOption {
|
||||
type = with lib.types; listOf str;
|
||||
|
|
|
@ -45,7 +45,7 @@ in
|
|||
# software rendering to implement GLX (OpenGL on Xorg).
|
||||
# However, just building TurboVNC with support for that is not enough
|
||||
# (it only takes care of the X server side part of OpenGL);
|
||||
# the indiviudual applications (e.g. `glxgears`) also need to directly load
|
||||
# the individual applications (e.g. `glxgears`) also need to directly load
|
||||
# the OpenGL libs.
|
||||
# Thus, this creates `/run/opengl-driver` populated by Mesa so that the applications
|
||||
# can find the llvmpipe `swrast.so` software rendering DRI lib via `libglvnd`.
|
||||
|
|
|
@ -59,5 +59,5 @@ in
|
|||
];
|
||||
};
|
||||
|
||||
meta.maintainers = with lib.maintainers; [ blackheaven ];
|
||||
meta.maintainers = with lib.maintainers; [ gdifolco ];
|
||||
}
|
||||
|
|
104
nixos/modules/programs/wayland/dwl.nix
Normal file
104
nixos/modules/programs/wayland/dwl.nix
Normal file
|
@ -0,0 +1,104 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.programs.dwl;
|
||||
in
|
||||
{
|
||||
options.programs.dwl = {
|
||||
enable = lib.mkEnableOption ''
|
||||
Dwl is a compact, hackable compositor for Wayland based on wlroots.
|
||||
You can manually launch Dwl by executing "exec dwl" on a TTY.
|
||||
'';
|
||||
|
||||
package = lib.mkPackageOption pkgs "dwl" {
|
||||
example = ''
|
||||
# Lets apply bar patch from:
|
||||
# https://codeberg.org/dwl/dwl-patches/src/branch/main/patches/bar
|
||||
(pkgs.dwl.override {
|
||||
configH = ./dwl-config.h;
|
||||
}).overrideAttrs (oldAttrs: {
|
||||
buildInputs =
|
||||
oldAttrs.buildInputs or []
|
||||
++ [
|
||||
pkgs.libdrm
|
||||
pkgs.fcft
|
||||
];
|
||||
patches = oldAttrs.patches or [] ++ [
|
||||
./bar-0.7.patch
|
||||
];
|
||||
});
|
||||
'';
|
||||
};
|
||||
|
||||
extraSessionCommands = lib.mkOption {
|
||||
default = "";
|
||||
type = lib.types.lines;
|
||||
description = ''
|
||||
Shell commands executed just before dwl is started.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
# Create systemd target for dwl session
|
||||
systemd.user.targets.dwl-session = {
|
||||
description = "dwl compositor session";
|
||||
documentation = [ "man:systemd.special(7)" ];
|
||||
bindsTo = [ "graphical-session.target" ];
|
||||
wants = [ "graphical-session-pre.target" ];
|
||||
after = [ "graphical-session-pre.target" ];
|
||||
};
|
||||
|
||||
# Create wrapper script for dwl
|
||||
environment.etc."xdg/dwl-session" = {
|
||||
text = ''
|
||||
#!${pkgs.runtimeShell}
|
||||
# Import environment variables
|
||||
${cfg.extraSessionCommands}
|
||||
# Setup systemd user environment
|
||||
systemctl --user import-environment DISPLAY WAYLAND_DISPLAY
|
||||
systemctl --user start dwl-session.target
|
||||
# Start dwl
|
||||
exec ${lib.getExe cfg.package}
|
||||
'';
|
||||
mode = "0755"; # Make it executable
|
||||
};
|
||||
|
||||
# Create desktop entry for display managers
|
||||
services.displayManager.sessionPackages =
|
||||
let
|
||||
dwlDesktopFile = pkgs.writeTextFile {
|
||||
name = "dwl-desktop-entry";
|
||||
destination = "/share/wayland-sessions/dwl.desktop";
|
||||
text = ''
|
||||
[Desktop Entry]
|
||||
Name=dwl
|
||||
Comment=Dynamic window manager for Wayland
|
||||
Exec=/etc/xdg/dwl-session
|
||||
Type=Application
|
||||
'';
|
||||
};
|
||||
|
||||
dwlSession = pkgs.symlinkJoin {
|
||||
name = "dwl-session";
|
||||
paths = [ dwlDesktopFile ];
|
||||
passthru.providedSessions = [ "dwl" ];
|
||||
};
|
||||
in
|
||||
[ dwlSession ];
|
||||
|
||||
# Configure XDG portal for dwl (minimal configuration)
|
||||
xdg.portal.config.dwl.default = lib.mkDefault [
|
||||
"wlr"
|
||||
"gtk"
|
||||
];
|
||||
};
|
||||
|
||||
meta.maintainers = with lib.maintainers; [ gurjaka ];
|
||||
}
|
|
@ -76,7 +76,7 @@ in
|
|||
example = lib.literalExpression ''
|
||||
{
|
||||
foo = ./foo;
|
||||
bar = pkgs.bar;
|
||||
inherit (pkgs.yaziPlugins) bar;
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
@ -97,7 +97,7 @@ in
|
|||
example = lib.literalExpression ''
|
||||
{
|
||||
foo = ./foo;
|
||||
bar = pkgs.bar;
|
||||
inherit (pkgs.yaziPlugins) bar;
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
|
|
@ -25,7 +25,7 @@ let
|
|||
in
|
||||
{
|
||||
pg_dump_command =
|
||||
if d.name == "all" then
|
||||
if d.name == "all" && (!(d ? format) || isNull d.format) then
|
||||
"${as_user}${postgresql}/bin/pg_dumpall"
|
||||
else
|
||||
"${as_user}${postgresql}/bin/pg_dump";
|
||||
|
|
|
@ -139,7 +139,7 @@ let
|
|||
[
|
||||
(yamlFormat.generate "helm-chart-manifest-${name}.yaml" (mkHelmChartCR name value))
|
||||
]
|
||||
# alternate the YAML doc seperator (---) and extraDeploy manifests to create
|
||||
# alternate the YAML doc separator (---) and extraDeploy manifests to create
|
||||
# multi document YAMLs
|
||||
++ (lib.concatMap (x: [
|
||||
yamlDocSeparator
|
||||
|
|
|
@ -365,9 +365,12 @@ in
|
|||
keyFile = mkDefault key;
|
||||
trustedCaFile = mkDefault caCert;
|
||||
};
|
||||
networking.extraHosts = mkIf (config.services.etcd.enable) ''
|
||||
127.0.0.1 etcd.${top.addons.dns.clusterDomain} etcd.local
|
||||
'';
|
||||
networking.hosts = mkIf (config.services.etcd.enable) {
|
||||
"127.0.0.1" = [
|
||||
"etcd.${top.addons.dns.clusterDomain}"
|
||||
"etcd.local"
|
||||
];
|
||||
};
|
||||
|
||||
services.flannel = with cfg.certs.flannelClient; {
|
||||
kubeconfig = top.lib.mkKubeConfig "flannel" {
|
||||
|
|
|
@ -11,13 +11,7 @@ in
|
|||
{
|
||||
options = {
|
||||
services.jenkins = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to enable the jenkins continuous integration server.
|
||||
'';
|
||||
};
|
||||
enable = lib.mkEnableOption "Jenkins, a continuous integration server";
|
||||
|
||||
user = lib.mkOption {
|
||||
default = "jenkins";
|
||||
|
@ -89,11 +83,13 @@ in
|
|||
|
||||
package = lib.mkPackageOption pkgs "jenkins" { };
|
||||
|
||||
javaPackage = lib.mkPackageOption pkgs "jdk21" { };
|
||||
|
||||
packages = lib.mkOption {
|
||||
default = [
|
||||
pkgs.stdenv
|
||||
pkgs.git
|
||||
pkgs.jdk17
|
||||
pkgs.jdk21
|
||||
config.programs.ssh.package
|
||||
pkgs.nix
|
||||
];
|
||||
|
@ -239,7 +235,7 @@ in
|
|||
|
||||
# For reference: https://wiki.jenkins.io/display/JENKINS/JenkinsLinuxStartupScript
|
||||
script = ''
|
||||
${pkgs.jdk17}/bin/java ${lib.concatStringsSep " " cfg.extraJavaOptions} -jar ${cfg.package}/webapps/jenkins.war --httpListenAddress=${cfg.listenAddress} \
|
||||
${cfg.javaPackage}/bin/java ${lib.concatStringsSep " " cfg.extraJavaOptions} -jar ${cfg.package}/webapps/jenkins.war --httpListenAddress=${cfg.listenAddress} \
|
||||
--httpPort=${toString cfg.port} \
|
||||
--prefix=${cfg.prefix} \
|
||||
-Djava.awt.headless=true \
|
||||
|
|
|
@ -143,7 +143,7 @@ in
|
|||
lib.mkDefault (json.generate "bonsai_tree.json" (filterNulls cfg.settings));
|
||||
|
||||
# bonsaid is controlled by bonsaictl, so place the latter in the environment by default.
|
||||
# bonsaictl is typically invoked by scripts or a DE so this isn't strictly necesssary,
|
||||
# bonsaictl is typically invoked by scripts or a DE so this isn't strictly necessary,
|
||||
# but it's helpful while administering the service generally.
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ libeufinComponent:
|
|||
cfg.settings."libeufin-${libeufinComponent}db-postgres".CONFIG;
|
||||
|
||||
bankPort = cfg.settings."${if isNexus then "nexus-httpd" else "libeufin-bank"}".PORT;
|
||||
bankHost = lib.elemAt (lib.splitString "/" cfg.settings.libeufin-bank.BASE_URL) 2;
|
||||
in
|
||||
lib.mkIf cfg.enable {
|
||||
services.libeufin.settings = cfg.settings;
|
||||
|
@ -82,7 +83,7 @@ libeufinComponent:
|
|||
args = lib.cli.toGNUCommandLineShell { } {
|
||||
c = configFile;
|
||||
inherit (account) username password name;
|
||||
payto_uri = "payto://x-taler-bank/bank:${toString bankPort}/${account.username}?receiver-name=${account.name}";
|
||||
payto_uri = "payto://x-taler-bank/${bankHost}/${account.username}?receiver-name=${account.name}";
|
||||
exchange = lib.toLower account.username == "exchange";
|
||||
};
|
||||
in
|
||||
|
|
|
@ -51,7 +51,7 @@ in
|
|||
(lib.genAttrs (map (n: "taler-${talerComponent}-${n}") services) (name: {
|
||||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
User = name;
|
||||
User = dbName;
|
||||
Group = groupName;
|
||||
ExecStart = toString [
|
||||
(lib.getExe' cfg.package name)
|
||||
|
@ -85,6 +85,7 @@ in
|
|||
Type = "oneshot";
|
||||
DynamicUser = true;
|
||||
User = dbName;
|
||||
Group = groupName;
|
||||
Restart = "on-failure";
|
||||
RestartSec = "5s";
|
||||
};
|
||||
|
@ -116,7 +117,7 @@ in
|
|||
services.postgresql = {
|
||||
enable = true;
|
||||
ensureDatabases = [ dbName ];
|
||||
ensureUsers = map (service: { name = "taler-${talerComponent}-${service}"; }) servicesDB ++ [
|
||||
ensureUsers = [
|
||||
{
|
||||
name = dbName;
|
||||
ensureDBOwnership = true;
|
||||
|
|
|
@ -25,6 +25,8 @@ let
|
|||
"secmod-eddsa"
|
||||
"secmod-rsa"
|
||||
];
|
||||
|
||||
configFile = config.environment.etc."taler/taler.conf".source;
|
||||
in
|
||||
|
||||
{
|
||||
|
@ -44,11 +46,19 @@ in
|
|||
options = {
|
||||
# TODO: do we want this to be a sub-attribute or only define the exchange set of options here
|
||||
exchange = {
|
||||
AML_THRESHOLD = lib.mkOption {
|
||||
CURRENCY = lib.mkOption {
|
||||
type = lib.types.nonEmptyStr;
|
||||
description = ''
|
||||
The currency which the exchange will operate with. This cannot be changed later.
|
||||
'';
|
||||
};
|
||||
CURRENCY_ROUND_UNIT = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "${cfgTaler.settings.taler.CURRENCY}:1000000";
|
||||
defaultText = "1000000 in {option}`CURRENCY`";
|
||||
description = "Monthly transaction volume until an account is considered suspicious and flagged for AML review.";
|
||||
default = "${cfg.settings.exchange.CURRENCY}:0.01";
|
||||
defaultText = "0.01 in {option}`CURRENCY`";
|
||||
description = ''
|
||||
Smallest amount in this currency that can be transferred using the underlying RTGS. For example: "EUR:0.01" or "JPY:1"
|
||||
'';
|
||||
};
|
||||
DB = lib.mkOption {
|
||||
type = lib.types.enum [ "postgres" ];
|
||||
|
@ -131,24 +141,8 @@ in
|
|||
after = [ "taler-exchange-httpd.service" ];
|
||||
};
|
||||
|
||||
# Taken from https://docs.taler.net/taler-exchange-manual.html#exchange-database-setup
|
||||
# TODO: Why does aggregator need DELETE?
|
||||
systemd.services."taler-${talerComponent}-dbinit".script =
|
||||
let
|
||||
deletePerm = name: lib.optionalString (name == "aggregator") ",DELETE";
|
||||
dbScript = pkgs.writers.writeText "taler-exchange-db-permissions.sql" (
|
||||
lib.pipe servicesDB [
|
||||
(map (name: ''
|
||||
GRANT SELECT,INSERT,UPDATE${deletePerm name} ON ALL TABLES IN SCHEMA exchange TO "taler-exchange-${name}";
|
||||
GRANT USAGE ON SCHEMA exchange TO "taler-exchange-${name}";
|
||||
''))
|
||||
lib.concatStrings
|
||||
]
|
||||
);
|
||||
in
|
||||
''
|
||||
${lib.getExe' cfg.package "taler-exchange-dbinit"}
|
||||
psql -U taler-exchange-httpd -f ${dbScript}
|
||||
'';
|
||||
systemd.services."taler-${talerComponent}-dbinit".script = ''
|
||||
${lib.getExe' cfg.package "taler-exchange-dbinit"} -c ${configFile}
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
|
|
@ -17,8 +17,10 @@ let
|
|||
"webhook"
|
||||
"wirewatch"
|
||||
"depositcheck"
|
||||
"exchange"
|
||||
"exchangekeyupdate"
|
||||
];
|
||||
|
||||
configFile = config.environment.etc."taler/taler.conf".source;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
|
@ -88,21 +90,8 @@ in
|
|||
path = [ cfg.package ];
|
||||
};
|
||||
|
||||
systemd.services."taler-${talerComponent}-dbinit".script =
|
||||
let
|
||||
# NOTE: not documented, but is necessary
|
||||
dbScript = pkgs.writers.writeText "taler-merchant-db-permissions.sql" (
|
||||
lib.concatStrings (
|
||||
map (name: ''
|
||||
GRANT SELECT,INSERT,UPDATE,DELETE ON ALL TABLES IN SCHEMA merchant TO "taler-merchant-${name}";
|
||||
GRANT USAGE ON SCHEMA merchant TO "taler-merchant-${name}";
|
||||
'') servicesDB
|
||||
)
|
||||
);
|
||||
in
|
||||
''
|
||||
${lib.getExe' cfg.package "taler-merchant-dbinit"}
|
||||
psql -U taler-${talerComponent}-httpd -f ${dbScript}
|
||||
'';
|
||||
systemd.services."taler-${talerComponent}-dbinit".script = ''
|
||||
${lib.getExe' cfg.package "taler-merchant-dbinit"} -c ${configFile}
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
|
|
@ -176,7 +176,7 @@ in
|
|||
# need to be writeable, so we can't just point at the ones in the nix
|
||||
# store. Instead we take the approach of copying them out of the store
|
||||
# on first run. If `bookarch` already exists, we assume the rest of the
|
||||
# files do as well, and copy nothing -- otherwise we risk ovewriting
|
||||
# files do as well, and copy nothing -- otherwise we risk overwriting
|
||||
# server state information every time the server is upgraded.
|
||||
preStart = ''
|
||||
if [ ! -e "${cfg.stateDir}"/bookarch ]; then
|
||||
|
|
|
@ -201,7 +201,7 @@ let
|
|||
# the old service and then starts the new service after config updates.
|
||||
# Since we use path-based activation[1] here, the service unit will
|
||||
# immediately[2] be started by the path unit. Probably that start is
|
||||
# before config updates, whcih causes the service unit to use the old
|
||||
# before config updates, which causes the service unit to use the old
|
||||
# config after nixos-rebuild switch. Setting stopIfChanged to false works
|
||||
# around this issue by restarting the service after config updates.
|
||||
# [0]: https://nixos.org/manual/nixos/unstable/#sec-switching-systems
|
||||
|
|
|
@ -23,8 +23,8 @@ let
|
|||
;
|
||||
|
||||
finalPackage = cfg.package.overridePythonAttrs (oldAttrs: {
|
||||
propagatedBuildInputs =
|
||||
oldAttrs.propagatedBuildInputs
|
||||
dependencies =
|
||||
oldAttrs.dependencies
|
||||
# for audio enhancements like auto-gain, noise suppression
|
||||
++ cfg.package.optional-dependencies.webrtc
|
||||
# vad is currently optional, because it is broken on aarch64-linux
|
||||
|
|
|
@ -13,7 +13,7 @@ let
|
|||
|
||||
haveAliases = cfg.postmasterAlias != "" || cfg.rootAlias != "" || cfg.extraAliases != "";
|
||||
haveCanonical = cfg.canonical != "";
|
||||
haveTransport = cfg.transport != "" || (cfg.enableSlowDomains && cfg.slowDomains != [ ]);
|
||||
haveTransport = cfg.transport != "";
|
||||
haveVirtual = cfg.virtual != "";
|
||||
haveLocalRecipients = cfg.localRecipients != null;
|
||||
|
||||
|
@ -319,20 +319,13 @@ let
|
|||
aliasesFile = pkgs.writeText "postfix-aliases" aliases;
|
||||
canonicalFile = pkgs.writeText "postfix-canonical" cfg.canonical;
|
||||
virtualFile = pkgs.writeText "postfix-virtual" cfg.virtual;
|
||||
transportFile = pkgs.writeText "postfix-transport" (
|
||||
lib.optionalString (cfg.enableSlowDomains && cfg.slowDomains != [ ]) (
|
||||
lib.concatMapStrings (domain: ''
|
||||
${domain} slow:
|
||||
'') cfg.slowDomains
|
||||
)
|
||||
+ cfg.transport
|
||||
);
|
||||
localRecipientMapFile = pkgs.writeText "postfix-local-recipient-map" (
|
||||
lib.concatMapStrings (x: x + " ACCEPT\n") cfg.localRecipients
|
||||
);
|
||||
checkClientAccessFile = pkgs.writeText "postfix-check-client-access" cfg.dnsBlacklistOverrides;
|
||||
mainCfFile = pkgs.writeText "postfix-main.cf" mainCf;
|
||||
masterCfFile = pkgs.writeText "postfix-master.cf" masterCfContent;
|
||||
transportFile = pkgs.writeText "postfix-transport" cfg.transport;
|
||||
headerChecksFile = pkgs.writeText "postfix-header-checks" headerChecks;
|
||||
|
||||
in
|
||||
|
@ -557,32 +550,6 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
enableSlowDomains = lib.mkEnableOption "slow domains feature for rate limiting specific domains";
|
||||
|
||||
slowDomains = lib.mkOption {
|
||||
type = with lib.types; listOf str;
|
||||
default = [ ];
|
||||
example = [
|
||||
"orange.fr"
|
||||
"gmail.com"
|
||||
];
|
||||
description = "List of domains to be rate-limited using the slow transport.";
|
||||
};
|
||||
|
||||
slowDomainsConfig = {
|
||||
defaultDestinationRateDelay = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "5s";
|
||||
description = "Default rate delay for destinations.";
|
||||
};
|
||||
|
||||
defaultDestinationConcurrencyLimit = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 3;
|
||||
description = "Concurrency limit for slow destinations.";
|
||||
};
|
||||
};
|
||||
|
||||
aliasMapType = lib.mkOption {
|
||||
type =
|
||||
with lib.types;
|
||||
|
@ -1018,10 +985,7 @@ in
|
|||
smtpd_tls_key_file = cfg.sslKey;
|
||||
|
||||
smtpd_tls_security_level = lib.mkDefault "may";
|
||||
}
|
||||
// lib.optionalAttrs cfg.enableSlowDomains {
|
||||
default_destination_rate_delay = cfg.slowDomainsConfig.defaultDestinationRateDelay;
|
||||
default_destination_concurrency_limit = cfg.slowDomainsConfig.defaultDestinationConcurrencyLimit;
|
||||
|
||||
};
|
||||
|
||||
services.postfix.masterConfig =
|
||||
|
@ -1113,14 +1077,6 @@ in
|
|||
lib.concatLists (lib.mapAttrsToList mkKeyVal cfg.submissionOptions);
|
||||
};
|
||||
}
|
||||
// lib.optionalAttrs cfg.enableSlowDomains {
|
||||
slow = {
|
||||
command = "smtp";
|
||||
type = "unix";
|
||||
private = true;
|
||||
maxproc = 2;
|
||||
};
|
||||
}
|
||||
// lib.optionalAttrs cfg.enableSmtp {
|
||||
smtp_inet = {
|
||||
name = "smtp";
|
||||
|
@ -1172,7 +1128,7 @@ in
|
|||
(lib.mkIf haveCanonical {
|
||||
services.postfix.mapFiles.canonical = canonicalFile;
|
||||
})
|
||||
(lib.mkIf (haveTransport || (cfg.enableSlowDomains && cfg.slowDomains != [ ])) {
|
||||
(lib.mkIf haveTransport {
|
||||
services.postfix.mapFiles.transport = transportFile;
|
||||
})
|
||||
(lib.mkIf haveVirtual {
|
||||
|
|
62
nixos/modules/services/matrix/draupnir.md
Normal file
62
nixos/modules/services/matrix/draupnir.md
Normal file
|
@ -0,0 +1,62 @@
|
|||
# Draupnir (Matrix Moderation Bot) {#module-services-draupnir}
|
||||
|
||||
This chapter will show you how to set up your own, self-hosted
|
||||
[Draupnir](https://github.com/the-draupnir-project/Draupnir) instance.
|
||||
|
||||
As an all-in-one moderation tool, it can protect your server from
|
||||
malicious invites, spam messages, and whatever else you don't want.
|
||||
In addition to server-level protection, Draupnir is great for communities
|
||||
wanting to protect their rooms without having to use their personal
|
||||
accounts for moderation.
|
||||
|
||||
The bot by default includes support for bans, redactions, anti-spam,
|
||||
server ACLs, room directory changes, room alias transfers, account
|
||||
deactivation, room shutdown, and more. (This depends on homeserver configuration and implementation.)
|
||||
|
||||
See the [README](https://github.com/the-draupnir-project/draupnir#readme)
|
||||
page and the [Moderator's guide](https://the-draupnir-project.github.io/draupnir-documentation/moderator/setting-up-and-configuring)
|
||||
for additional instructions on how to setup and use Draupnir.
|
||||
|
||||
For [additional settings](#opt-services.draupnir.settings)
|
||||
see [the default configuration](https://github.com/the-draupnir-project/Draupnir/blob/main/config/default.yaml).
|
||||
|
||||
## Draupnir Setup {#module-services-draupnir-setup}
|
||||
|
||||
First create a new unencrypted, private room which will be used as the management room for Draupnir.
|
||||
This is the room in which moderators will interact with Draupnir and where it will log possible errors and debugging information.
|
||||
You'll need to set this room ID or alias in [services.draupnir.settings.managementRoom](#opt-services.draupnir.settings.managementRoom).
|
||||
|
||||
Next, create a new user for Draupnir on your homeserver, if one does not already exist.
|
||||
|
||||
The Draupnir Matrix user expects to be free of any rate limiting.
|
||||
See [Synapse #6286](https://github.com/matrix-org/synapse/issues/6286)
|
||||
for an example on how to achieve this.
|
||||
|
||||
If you want Draupnir to be able to deactivate users, move room aliases, shut down rooms, etc.
|
||||
you'll need to make the Draupnir user a Matrix server admin.
|
||||
|
||||
Now invite the Draupnir user to the management room.
|
||||
Draupnir will automatically try to join this room on startup.
|
||||
|
||||
```nix
|
||||
{
|
||||
services.draupnir = {
|
||||
enable = true;
|
||||
|
||||
settings = {
|
||||
homeserverUrl = "https://matrix.org";
|
||||
managementRoom = "!yyy:example.org";
|
||||
};
|
||||
|
||||
secrets = {
|
||||
accessToken = "/path/to/secret/containing/access-token";
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Element Matrix Services (EMS) {#module-services-draupnir-setup-ems}
|
||||
|
||||
If you are using a managed ["Element Matrix Services (EMS)"](https://ems.element.io/)
|
||||
server, you will need to consent to the terms and conditions. Upon startup, an error
|
||||
log entry with a URL to the consent page will be generated.
|
257
nixos/modules/services/matrix/draupnir.nix
Normal file
257
nixos/modules/services/matrix/draupnir.nix
Normal file
|
@ -0,0 +1,257 @@
|
|||
{
|
||||
config,
|
||||
options,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.services.draupnir;
|
||||
opt = options.services.draupnir;
|
||||
|
||||
format = pkgs.formats.yaml { };
|
||||
configFile = format.generate "draupnir.yaml" cfg.settings;
|
||||
|
||||
inherit (lib)
|
||||
literalExpression
|
||||
mkEnableOption
|
||||
mkOption
|
||||
mkPackageOption
|
||||
mkRemovedOptionModule
|
||||
mkRenamedOptionModule
|
||||
types
|
||||
;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
# Removed options for those migrating from the Mjolnir module
|
||||
(mkRenamedOptionModule
|
||||
[ "services" "draupnir" "dataPath" ]
|
||||
[ "services" "draupnir" "settings" "dataPath" ]
|
||||
)
|
||||
(mkRenamedOptionModule
|
||||
[ "services" "draupnir" "homeserverUrl" ]
|
||||
[ "services" "draupnir" "settings" "homeserverUrl" ]
|
||||
)
|
||||
(mkRenamedOptionModule
|
||||
[ "services" "draupnir" "managementRoom" ]
|
||||
[ "services" "draupnir" "settings" "managementRoom" ]
|
||||
)
|
||||
(mkRenamedOptionModule
|
||||
[ "services" "draupnir" "accessTokenFile" ]
|
||||
[ "services" "draupnir" "secrets" "accessToken" ]
|
||||
)
|
||||
(mkRemovedOptionModule [ "services" "draupnir" "pantalaimon" ] ''
|
||||
`services.draupnir.pantalaimon.*` has been removed because it depends on the deprecated and vulnerable
|
||||
libolm library for end-to-end encryption and upstream support for Pantalaimon in Draupnir is limited.
|
||||
See <https://the-draupnir-project.github.io/draupnir-documentation/bot/encryption> for details.
|
||||
If you nontheless require E2EE via Pantalaimon, you can configure `services.pantalaimon-headless.instances`
|
||||
yourself and use that with `services.draupnir.settings.pantalaimon` and `services.draupnir.secrets.pantalaimon.password`.
|
||||
'')
|
||||
];
|
||||
|
||||
options.services.draupnir = {
|
||||
enable = mkEnableOption "Draupnir, a moderations bot for Matrix";
|
||||
|
||||
package = mkPackageOption pkgs "draupnir" { };
|
||||
|
||||
settings = mkOption {
|
||||
example = literalExpression ''
|
||||
{
|
||||
homeserverUrl = "https://matrix.org";
|
||||
managementRoom = "#moderators:example.org";
|
||||
|
||||
autojoinOnlyIfManager = true;
|
||||
automaticallyRedactForReasons = [ "spam" "advertising" ];
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
Free-form settings written to Draupnir's configuration file.
|
||||
See [Draupnir's default configuration](https://github.com/the-draupnir-project/Draupnir/blob/main/config/default.yaml) for available settings.
|
||||
'';
|
||||
default = { };
|
||||
type = types.submodule {
|
||||
freeformType = format.type;
|
||||
options = {
|
||||
homeserverUrl = mkOption {
|
||||
type = types.str;
|
||||
example = "https://matrix.org";
|
||||
description = ''
|
||||
Base URL of the Matrix homeserver that provides the Client-Server API.
|
||||
|
||||
::: {.note}
|
||||
When using Pantalaimon, set this to the Pantalaimon URL and
|
||||
{option}`${opt.settings}.rawHomeserverUrl` to the public URL.
|
||||
:::
|
||||
'';
|
||||
};
|
||||
|
||||
rawHomeserverUrl = mkOption {
|
||||
type = types.str;
|
||||
example = "https://matrix.org";
|
||||
default = cfg.settings.homeserverUrl;
|
||||
defaultText = literalExpression "config.${opt.settings}.homeserverUrl";
|
||||
description = ''
|
||||
Public base URL of the Matrix homeserver that provides the Client-Server API when using the Draupnir's
|
||||
[Report forwarding feature](https://the-draupnir-project.github.io/draupnir-documentation/bot/homeserver-administration#report-forwarding).
|
||||
|
||||
::: {.warning}
|
||||
When using Pantalaimon, do not set this to the Pantalaimon URL!
|
||||
:::
|
||||
'';
|
||||
};
|
||||
|
||||
managementRoom = mkOption {
|
||||
type = types.str;
|
||||
example = "#moderators:example.org";
|
||||
description = ''
|
||||
The room ID or alias where moderators can use the bot's functionality.
|
||||
|
||||
The bot has no access controls, so anyone in this room can use the bot - secure this room!
|
||||
Do not enable end-to-end encryption for this room, unless set up with Pantalaimon.
|
||||
|
||||
::: {.warning}
|
||||
When using a room alias, make sure the alias used is on the local homeserver!
|
||||
This prevents an issue where the control room becomes undefined when the alias can't be resolved.
|
||||
:::
|
||||
'';
|
||||
};
|
||||
|
||||
dataPath = mkOption {
|
||||
type = types.path;
|
||||
readOnly = true;
|
||||
default = "/var/lib/draupnir";
|
||||
description = ''
|
||||
The path Draupnir will store its state/data in.
|
||||
|
||||
::: {.warning}
|
||||
This option is read-only.
|
||||
:::
|
||||
|
||||
::: {.note}
|
||||
If you want to customize where this data is stored, use a bind mount.
|
||||
:::
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
secrets = {
|
||||
accessToken = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
description = ''
|
||||
File containing the access token for Draupnir's Matrix account
|
||||
to be used in place of {option}`${opt.settings}.accessToken`.
|
||||
'';
|
||||
};
|
||||
|
||||
pantalaimon.password = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
description = ''
|
||||
File containing the password for Draupnir's Matrix account when used in
|
||||
conjunction with Pantalaimon to be used in place of
|
||||
{option}`${opt.settings}.pantalaimon.password`.
|
||||
|
||||
::: {.warning}
|
||||
Take note that upstream has limited Pantalaimon and E2EE support:
|
||||
<https://the-draupnir-project.github.io/draupnir-documentation/bot/encryption> and
|
||||
<https://the-draupnir-project.github.io/draupnir-documentation/shared/dogfood#e2ee-support>.
|
||||
:::
|
||||
'';
|
||||
};
|
||||
|
||||
web.synapseHTTPAntispam.authorization = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
description = ''
|
||||
File containing the secret token when using the Synapse HTTP Antispam module
|
||||
to be used in place of
|
||||
{option}`${opt.settings}.web.synapseHTTPAntispam.authorization`.
|
||||
|
||||
See <https://the-draupnir-project.github.io/draupnir-documentation/bot/synapse-http-antispam> for details.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
assertions = [
|
||||
{
|
||||
# Removed option for those migrating from the Mjolnir module - mkRemovedOption module does *not* work with submodules.
|
||||
assertion = !(cfg.settings ? protectedRooms);
|
||||
message = "Unset ${opt.settings}.protectedRooms, as it is unsupported on Draupnir. Add these rooms via `!draupnir rooms add` instead.";
|
||||
}
|
||||
];
|
||||
|
||||
systemd.services.draupnir = {
|
||||
description = "Draupnir - a moderation bot for Matrix";
|
||||
wants = [
|
||||
"network-online.target"
|
||||
"matrix-synapse.service"
|
||||
"conduit.service"
|
||||
"dendrite.service"
|
||||
];
|
||||
after = [
|
||||
"network-online.target"
|
||||
"matrix-synapse.service"
|
||||
"conduit.service"
|
||||
"dendrite.service"
|
||||
];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
startLimitIntervalSec = 0;
|
||||
serviceConfig = {
|
||||
ExecStart = toString (
|
||||
[
|
||||
(lib.getExe cfg.package)
|
||||
"--draupnir-config"
|
||||
configFile
|
||||
]
|
||||
++ lib.optionals (cfg.secrets.accessToken != null) [
|
||||
"--access-token-path"
|
||||
"%d/access_token"
|
||||
]
|
||||
++ lib.optionals (cfg.secrets.pantalaimon.password != null) [
|
||||
"--pantalaimon-password-path"
|
||||
"%d/pantalaimon_password"
|
||||
]
|
||||
++ lib.optionals (cfg.secrets.web.synapseHTTPAntispam.authorization != null) [
|
||||
"--http-antispam-authorization-path"
|
||||
"%d/http_antispam_authorization"
|
||||
]
|
||||
);
|
||||
|
||||
WorkingDirectory = "/var/lib/draupnir";
|
||||
StateDirectory = "draupnir";
|
||||
StateDirectoryMode = "0700";
|
||||
ProtectHome = true;
|
||||
PrivateDevices = true;
|
||||
Restart = "on-failure";
|
||||
RestartSec = "5s";
|
||||
DynamicUser = true;
|
||||
LoadCredential =
|
||||
lib.optionals (cfg.secrets.accessToken != null) [
|
||||
"access_token:${cfg.secrets.accessToken}"
|
||||
]
|
||||
++ lib.optionals (cfg.secrets.pantalaimon.password != null) [
|
||||
"pantalaimon_password:${cfg.secrets.pantalaimon.password}"
|
||||
]
|
||||
++ lib.optionals (cfg.secrets.web.synapseHTTPAntispam.authorization != null) [
|
||||
"http_antispam_authorization:${cfg.secrets.web.synapseHTTPAntispam.authorization}"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
meta = {
|
||||
doc = ./draupnir.md;
|
||||
maintainers = with lib.maintainers; [
|
||||
RorySys
|
||||
emilylange
|
||||
];
|
||||
};
|
||||
}
|
|
@ -124,12 +124,13 @@ let
|
|||
genLogConfigFile =
|
||||
logName:
|
||||
format.generate "synapse-log-${logName}.yaml" (
|
||||
cfg.log
|
||||
// optionalAttrs (cfg.log ? handlers.journal) {
|
||||
handlers.journal = cfg.log.handlers.journal // {
|
||||
SYSLOG_IDENTIFIER = logName;
|
||||
};
|
||||
}
|
||||
attrsets.recursiveUpdate cfg.log (
|
||||
optionalAttrs (cfg.log ? handlers.journal) {
|
||||
handlers.journal = cfg.log.handlers.journal // {
|
||||
SYSLOG_IDENTIFIER = logName;
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
toIntBase8 =
|
||||
|
|
|
@ -79,8 +79,13 @@ in
|
|||
)
|
||||
|
||||
(mkRemovedOptionModule [ "services" "gitea" "ssh" "enable" ]
|
||||
"services.gitea.ssh.enable has been migrated into freeform setting services.gitea.settings.server.DISABLE_SSH. Keep in mind that the setting is inverted"
|
||||
"It has been migrated into freeform setting services.gitea.settings.server.DISABLE_SSH. Keep in mind that the setting is inverted."
|
||||
)
|
||||
(mkRemovedOptionModule [
|
||||
"services"
|
||||
"gitea"
|
||||
"useWizard"
|
||||
] "Has been removed because it was broken and lacked automated testing.")
|
||||
];
|
||||
|
||||
options = {
|
||||
|
@ -93,12 +98,6 @@ in
|
|||
|
||||
package = mkPackageOption pkgs "gitea" { };
|
||||
|
||||
useWizard = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = "Do not generate a configuration and use gitea' installation wizard instead. The first registered user will be administrator.";
|
||||
};
|
||||
|
||||
stateDir = mkOption {
|
||||
default = "/var/lib/gitea";
|
||||
type = types.str;
|
||||
|
@ -495,9 +494,7 @@ in
|
|||
This can be disabled by using this option.
|
||||
|
||||
*Note:* please keep in mind that this should be added after the initial
|
||||
deploy unless [](#opt-services.gitea.useWizard)
|
||||
is `true` as the first registered user will be the administrator if
|
||||
no install wizard is used.
|
||||
deploy as the first registered user will be the administrator.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
@ -752,62 +749,60 @@ in
|
|||
in
|
||||
''
|
||||
# copy custom configuration and generate random secrets if needed
|
||||
${optionalString (!cfg.useWizard) ''
|
||||
function gitea_setup {
|
||||
cp -f '${configFile}' '${runConfig}'
|
||||
function gitea_setup {
|
||||
cp -f '${configFile}' '${runConfig}'
|
||||
|
||||
if [ ! -s '${secretKey}' ]; then
|
||||
${exe} generate secret SECRET_KEY > '${secretKey}'
|
||||
if [ ! -s '${secretKey}' ]; then
|
||||
${exe} generate secret SECRET_KEY > '${secretKey}'
|
||||
fi
|
||||
|
||||
# Migrate LFS_JWT_SECRET filename
|
||||
if [[ -s '${oldLfsJwtSecret}' && ! -s '${lfsJwtSecret}' ]]; then
|
||||
mv '${oldLfsJwtSecret}' '${lfsJwtSecret}'
|
||||
fi
|
||||
|
||||
if [ ! -s '${oauth2JwtSecret}' ]; then
|
||||
${exe} generate secret JWT_SECRET > '${oauth2JwtSecret}'
|
||||
fi
|
||||
|
||||
${lib.optionalString cfg.lfs.enable ''
|
||||
if [ ! -s '${lfsJwtSecret}' ]; then
|
||||
${exe} generate secret LFS_JWT_SECRET > '${lfsJwtSecret}'
|
||||
fi
|
||||
''}
|
||||
|
||||
# Migrate LFS_JWT_SECRET filename
|
||||
if [[ -s '${oldLfsJwtSecret}' && ! -s '${lfsJwtSecret}' ]]; then
|
||||
mv '${oldLfsJwtSecret}' '${lfsJwtSecret}'
|
||||
fi
|
||||
if [ ! -s '${internalToken}' ]; then
|
||||
${exe} generate secret INTERNAL_TOKEN > '${internalToken}'
|
||||
fi
|
||||
|
||||
if [ ! -s '${oauth2JwtSecret}' ]; then
|
||||
${exe} generate secret JWT_SECRET > '${oauth2JwtSecret}'
|
||||
fi
|
||||
chmod u+w '${runConfig}'
|
||||
${replaceSecretBin} '#secretkey#' '${secretKey}' '${runConfig}'
|
||||
${replaceSecretBin} '#dbpass#' '${cfg.database.passwordFile}' '${runConfig}'
|
||||
${replaceSecretBin} '#oauth2jwtsecret#' '${oauth2JwtSecret}' '${runConfig}'
|
||||
${replaceSecretBin} '#internaltoken#' '${internalToken}' '${runConfig}'
|
||||
|
||||
${lib.optionalString cfg.lfs.enable ''
|
||||
if [ ! -s '${lfsJwtSecret}' ]; then
|
||||
${exe} generate secret LFS_JWT_SECRET > '${lfsJwtSecret}'
|
||||
fi
|
||||
''}
|
||||
${lib.optionalString cfg.lfs.enable ''
|
||||
${replaceSecretBin} '#lfsjwtsecret#' '${lfsJwtSecret}' '${runConfig}'
|
||||
''}
|
||||
|
||||
if [ ! -s '${internalToken}' ]; then
|
||||
${exe} generate secret INTERNAL_TOKEN > '${internalToken}'
|
||||
fi
|
||||
${lib.optionalString (cfg.camoHmacKeyFile != null) ''
|
||||
${replaceSecretBin} '#hmackey#' '${cfg.camoHmacKeyFile}' '${runConfig}'
|
||||
''}
|
||||
|
||||
chmod u+w '${runConfig}'
|
||||
${replaceSecretBin} '#secretkey#' '${secretKey}' '${runConfig}'
|
||||
${replaceSecretBin} '#dbpass#' '${cfg.database.passwordFile}' '${runConfig}'
|
||||
${replaceSecretBin} '#oauth2jwtsecret#' '${oauth2JwtSecret}' '${runConfig}'
|
||||
${replaceSecretBin} '#internaltoken#' '${internalToken}' '${runConfig}'
|
||||
${lib.optionalString (cfg.mailerPasswordFile != null) ''
|
||||
${replaceSecretBin} '#mailerpass#' '${cfg.mailerPasswordFile}' '${runConfig}'
|
||||
''}
|
||||
|
||||
${lib.optionalString cfg.lfs.enable ''
|
||||
${replaceSecretBin} '#lfsjwtsecret#' '${lfsJwtSecret}' '${runConfig}'
|
||||
''}
|
||||
${lib.optionalString (cfg.metricsTokenFile != null) ''
|
||||
${replaceSecretBin} '#metricstoken#' '${cfg.metricsTokenFile}' '${runConfig}'
|
||||
''}
|
||||
|
||||
${lib.optionalString (cfg.camoHmacKeyFile != null) ''
|
||||
${replaceSecretBin} '#hmackey#' '${cfg.camoHmacKeyFile}' '${runConfig}'
|
||||
''}
|
||||
|
||||
${lib.optionalString (cfg.mailerPasswordFile != null) ''
|
||||
${replaceSecretBin} '#mailerpass#' '${cfg.mailerPasswordFile}' '${runConfig}'
|
||||
''}
|
||||
|
||||
${lib.optionalString (cfg.metricsTokenFile != null) ''
|
||||
${replaceSecretBin} '#metricstoken#' '${cfg.metricsTokenFile}' '${runConfig}'
|
||||
''}
|
||||
|
||||
${lib.optionalString (cfg.captcha.secretFile != null) ''
|
||||
${replaceSecretBin} '#captchasecret#' '${cfg.captcha.secretFile}' '${runConfig}'
|
||||
''}
|
||||
chmod u-w '${runConfig}'
|
||||
}
|
||||
(umask 027; gitea_setup)
|
||||
''}
|
||||
${lib.optionalString (cfg.captcha.secretFile != null) ''
|
||||
${replaceSecretBin} '#captchasecret#' '${cfg.captcha.secretFile}' '${runConfig}'
|
||||
''}
|
||||
chmod u-w '${runConfig}'
|
||||
}
|
||||
(umask 027; gitea_setup)
|
||||
|
||||
# run migrations/init the database
|
||||
${exe} migrate
|
||||
|
|
|
@ -45,11 +45,6 @@ in
|
|||
nixos-icons # needed for gnome and pantheon about dialog, nixos-manual and maybe more
|
||||
xdg-utils
|
||||
];
|
||||
# needed for some display managers to locate desktop manager sessions
|
||||
pathsToLink = [
|
||||
"/share/xsessions"
|
||||
"/share/wayland-sessions"
|
||||
];
|
||||
};
|
||||
|
||||
fonts.enableDefaultPackages = lib.mkDefault true;
|
||||
|
|
|
@ -107,7 +107,7 @@ in
|
|||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
# Upstream Recommandation
|
||||
# Upstream Recommendation
|
||||
LimitNOFILE = 20500;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -48,6 +48,15 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
environmentFile = lib.mkOption {
|
||||
type = with lib.types; nullOr path;
|
||||
default = null;
|
||||
example = "/run/secrets/alloy.env";
|
||||
description = ''
|
||||
EnvironmentFile as defined in {manpage}`systemd.exec(5)`.
|
||||
'';
|
||||
};
|
||||
|
||||
extraFlags = lib.mkOption {
|
||||
type = with lib.types; listOf str;
|
||||
default = [ ];
|
||||
|
@ -84,6 +93,7 @@ in
|
|||
StateDirectory = "alloy";
|
||||
WorkingDirectory = "%S/alloy";
|
||||
Type = "simple";
|
||||
EnvironmentFile = lib.mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -91,7 +91,7 @@ in
|
|||
|
||||
config = lib.mkIf cfg.enable {
|
||||
environment.systemPackages = [ pkgs.below ];
|
||||
# /etc/below.conf is also refered to by the `below` CLI tool,
|
||||
# /etc/below.conf is also referred to by the `below` CLI tool,
|
||||
# so this can't be a store-only file whose path is passed to the service
|
||||
environment.etc."below/below.conf".text = cfgContents;
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ in
|
|||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
|
||||
# set up Security wrapper Same as inteded in deb post install
|
||||
# set up Security wrapper Same as intended in deb post install
|
||||
security.wrappers.cato-clientd = {
|
||||
source = "${cfg.package}/bin/cato-clientd";
|
||||
owner = "root";
|
||||
|
|
|
@ -4,22 +4,25 @@
|
|||
pkgs,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
cfg = config.services.dnscrypt-proxy2;
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
options.services.dnscrypt-proxy2 = {
|
||||
enable = mkEnableOption "dnscrypt-proxy2";
|
||||
enable = lib.mkEnableOption "dnscrypt-proxy2";
|
||||
|
||||
settings = mkOption {
|
||||
package = lib.mkPackageOption pkgs "dnscrypt-proxy" { };
|
||||
|
||||
settings = lib.mkOption {
|
||||
description = ''
|
||||
Attrset that is converted and passed as TOML config file.
|
||||
For available params, see: <https://github.com/DNSCrypt/dnscrypt-proxy/blob/${pkgs.dnscrypt-proxy.version}/dnscrypt-proxy/example-dnscrypt-proxy.toml>
|
||||
'';
|
||||
example = literalExpression ''
|
||||
example = lib.literalExpression ''
|
||||
{
|
||||
sources.public-resolvers = {
|
||||
urls = [ "https://download.dnscrypt.info/resolvers-list/v2/public-resolvers.md" ];
|
||||
|
@ -29,27 +32,27 @@ in
|
|||
};
|
||||
}
|
||||
'';
|
||||
type = types.attrs;
|
||||
type = lib.types.attrs;
|
||||
default = { };
|
||||
};
|
||||
|
||||
upstreamDefaults = mkOption {
|
||||
upstreamDefaults = lib.mkOption {
|
||||
description = ''
|
||||
Whether to base the config declared in {option}`services.dnscrypt-proxy2.settings` on the upstream example config (<https://github.com/DNSCrypt/dnscrypt-proxy/blob/master/dnscrypt-proxy/example-dnscrypt-proxy.toml>)
|
||||
|
||||
Disable this if you want to declare your dnscrypt config from scratch.
|
||||
'';
|
||||
type = types.bool;
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
};
|
||||
|
||||
configFile = mkOption {
|
||||
configFile = lib.mkOption {
|
||||
description = ''
|
||||
Path to TOML config file. See: <https://github.com/DNSCrypt/dnscrypt-proxy/blob/master/dnscrypt-proxy/example-dnscrypt-proxy.toml>
|
||||
If this option is set, it will override any configuration done in options.services.dnscrypt-proxy2.settings.
|
||||
'';
|
||||
example = "/etc/dnscrypt-proxy/dnscrypt-proxy.toml";
|
||||
type = types.path;
|
||||
type = lib.types.path;
|
||||
default =
|
||||
pkgs.runCommand "dnscrypt-proxy.toml"
|
||||
{
|
||||
|
@ -70,11 +73,11 @@ in
|
|||
}
|
||||
${pkgs.buildPackages.remarshal}/bin/json2toml < config.json > $out
|
||||
'';
|
||||
defaultText = literalMD "TOML file generated from {option}`services.dnscrypt-proxy2.settings`";
|
||||
defaultText = lib.literalMD "TOML file generated from {option}`services.dnscrypt-proxy2.settings`";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
config = lib.mkIf cfg.enable {
|
||||
|
||||
networking.nameservers = lib.mkDefault [ "127.0.0.1" ];
|
||||
|
||||
|
@ -94,7 +97,7 @@ in
|
|||
AmbientCapabilities = "CAP_NET_BIND_SERVICE";
|
||||
CacheDirectory = "dnscrypt-proxy";
|
||||
DynamicUser = true;
|
||||
ExecStart = "${pkgs.dnscrypt-proxy}/bin/dnscrypt-proxy -config ${cfg.configFile}";
|
||||
ExecStart = "${lib.getExe cfg.package} -config ${cfg.configFile}";
|
||||
LockPersonality = true;
|
||||
LogsDirectory = "dnscrypt-proxy";
|
||||
MemoryDenyWriteExecute = true;
|
||||
|
|
|
@ -115,6 +115,12 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
configFile = lib.mkOption {
|
||||
type = lib.types.package;
|
||||
default = dnsmasqConf;
|
||||
internal = true;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
@ -172,7 +178,7 @@ in
|
|||
serviceConfig = {
|
||||
Type = "dbus";
|
||||
BusName = "uk.org.thekelleys.dnsmasq";
|
||||
ExecStart = "${dnsmasq}/bin/dnsmasq -k --enable-dbus --user=dnsmasq -C ${dnsmasqConf}";
|
||||
ExecStart = "${dnsmasq}/bin/dnsmasq -k --enable-dbus --user=dnsmasq -C ${cfg.configFile}";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
PrivateTmp = true;
|
||||
ProtectSystem = true;
|
||||
|
|
31
nixos/modules/services/networking/dsnet.md
Normal file
31
nixos/modules/services/networking/dsnet.md
Normal file
|
@ -0,0 +1,31 @@
|
|||
# dsnet {#module-services-dsnet}
|
||||
|
||||
dsnet is a CLI tool to manage a centralised wireguard server. It allows easy
|
||||
generation of client configuration, handling key generation, IP allocation etc.
|
||||
|
||||
It keeps its own configuration at `/etc/dsnetconfig.json`, which is more of a
|
||||
database. It contains key material too.
|
||||
|
||||
The way this module works is to patch this database with whatever is configured
|
||||
in the nix service instantiation. This happens automatically when required.
|
||||
|
||||
This way it is possible to decide what to let dnset manage and what parts you
|
||||
want to keep declaratively.
|
||||
|
||||
```
|
||||
services.dsnet = {
|
||||
enable = true;
|
||||
settings = {
|
||||
ExternalHostname = "vpn.example.com";
|
||||
Network = "10.171.90.0/24";
|
||||
Network6 = "";
|
||||
IP = "10.171.90.1";
|
||||
IP6 = "";
|
||||
DNS = "10.171.90.1";
|
||||
Networks = [ "0.0.0.0/0" ];
|
||||
};
|
||||
|
||||
```
|
||||
|
||||
|
||||
See <https://github.com/naggie/dsnet> for more information.
|
184
nixos/modules/services/networking/dsnet.nix
Normal file
184
nixos/modules/services/networking/dsnet.nix
Normal file
|
@ -0,0 +1,184 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.services.dsnet;
|
||||
settingsFormat = pkgs.formats.json { };
|
||||
patchFile = settingsFormat.generate "dsnet-patch.json" cfg.settings;
|
||||
in
|
||||
{
|
||||
options.services.dsnet = {
|
||||
enable = lib.mkEnableOption "dsnet, a centralised Wireguard VPN manager";
|
||||
|
||||
package = lib.mkPackageOption pkgs "dsnet" { };
|
||||
|
||||
settings = lib.mkOption {
|
||||
type = lib.types.submodule {
|
||||
|
||||
freeformType = settingsFormat.type;
|
||||
|
||||
options = {
|
||||
ExternalHostname = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
example = "vpn.example.com";
|
||||
description = ''
|
||||
The hostname that clients should use to connect to this server.
|
||||
This is used to generate the client configuration files.
|
||||
|
||||
This is preferred over ExternalIP, as it allows for IPv4 and
|
||||
IPv6, as well as enabling the ability tp change IP.
|
||||
'';
|
||||
};
|
||||
|
||||
ExternalIP = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
example = "192.0.2.1";
|
||||
description = ''
|
||||
The external IP address of the server. This is used to generate
|
||||
the client configuration files for when an ExternalHostname is not set.
|
||||
|
||||
Leaving this empty will cause dsnet to use the IP address of
|
||||
what looks like the WAN interface.
|
||||
'';
|
||||
};
|
||||
|
||||
ExternalIP6 = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
example = "2001:db8::1";
|
||||
description = ''
|
||||
The external IPv6 address of the server. This is used to generate
|
||||
the client configuration files for when an ExternalHostname is
|
||||
not set. Used in preference to ExternalIP.
|
||||
|
||||
Leaving this empty will cause dsnet to use the IP address of
|
||||
what looks like the WAN interface.
|
||||
'';
|
||||
};
|
||||
|
||||
Network = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
example = "172.18.0.0/24";
|
||||
description = ''
|
||||
The IPv4 network that the server will use to allocate IPs on the network.
|
||||
Leave this empty to let dsnet choose a network.
|
||||
'';
|
||||
};
|
||||
|
||||
Network6 = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
example = "2001:db8::1/64";
|
||||
description = ''
|
||||
The IPv6 network that the server will use to allocate IPs on the
|
||||
network.
|
||||
Leave this empty to let dsnet choose a network.
|
||||
'';
|
||||
};
|
||||
|
||||
IP = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
example = "172.18.0.1";
|
||||
description = ''
|
||||
The IPv4 address that the server will use on the network.
|
||||
Leave this empty to let dsnet choose an address.
|
||||
'';
|
||||
};
|
||||
|
||||
IP6 = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
example = "2001:db8::1";
|
||||
description = ''
|
||||
The IPv6 address that the server will use on the network
|
||||
Leave this empty to let dsnet choose an address.
|
||||
'';
|
||||
};
|
||||
|
||||
Networks = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.listOf lib.types.str);
|
||||
default = null;
|
||||
example = [
|
||||
"0.0.0.0/0"
|
||||
"192.168.0.0/24"
|
||||
];
|
||||
description = ''
|
||||
The CIDR networks that should route through this server. Clients
|
||||
will be configured to route traffic for these networks through
|
||||
the server peer.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
default = { };
|
||||
description = ''
|
||||
The settings to use for dsnet. This will be converted to a JSON
|
||||
object that will be passed to dsnet as a patch, using the patch
|
||||
command when the service is started. See the dsnet documentation for
|
||||
more information on the additional options.
|
||||
|
||||
Note that the resulting /etc/dsnetconfg.json is more of a database
|
||||
than it is a configuration file. It is therefore recommended that
|
||||
system specific values are configured here, rather than the full
|
||||
configuration including peers.
|
||||
|
||||
Peers may be managed via the dsnet add/remove commands, negating the
|
||||
need to manage key material and cumbersom configuration with nix. If
|
||||
you want peer configuration in nix, you may as well use the regular
|
||||
wireguard module.
|
||||
'';
|
||||
example = {
|
||||
ExternalHostname = "vpn.example.com";
|
||||
ExternalIP = "127.0.0.1";
|
||||
ExternalIP6 = "";
|
||||
ListenPort = 51820;
|
||||
Network = "10.3.148.0/22";
|
||||
Network6 = "";
|
||||
IP = "10.3.148.1";
|
||||
IP6 = "";
|
||||
DNS = "8.8.8.8";
|
||||
Networks = [ "0.0.0.0/0" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
systemd.services.dsnet = {
|
||||
description = "dsnet VPN Management";
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
preStart = ''
|
||||
test ! -f /etc/dsnetconfig.json && ${lib.getExe cfg.package} init
|
||||
${lib.getExe cfg.package} patch < ${patchFile}
|
||||
'';
|
||||
serviceConfig = {
|
||||
ExecStart = "${lib.getExe cfg.package} up";
|
||||
ExecStop = "${lib.getExe cfg.package} down";
|
||||
Type = "oneshot";
|
||||
# consider the service to be active after process exits, so it can be
|
||||
# reloaded
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
|
||||
reload = ''
|
||||
${lib.getExe cfg.package} patch < ${patchFile}
|
||||
${lib.getExe cfg.package} sync < ${patchFile}
|
||||
'';
|
||||
|
||||
# reload _instead_ of restarting on change
|
||||
reloadIfChanged = true;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -289,7 +289,7 @@ in
|
|||
|
||||
{
|
||||
# Note: we want by default to enable OpenSSL, but it seems anything 100 and above is
|
||||
# overriden by default value from vhost-options.nix
|
||||
# overridden by default value from vhost-options.nix
|
||||
enableACME = mkOverride 99 true;
|
||||
forceSSL = mkOverride 99 true;
|
||||
locations.${cfg.nginx.path} = {
|
||||
|
|
|
@ -32,6 +32,10 @@ let
|
|||
restricted-rpc=1
|
||||
''}
|
||||
|
||||
${lib.optionalString (banlist != null) ''
|
||||
ban-list=${banlist}
|
||||
''}
|
||||
|
||||
limit-rate-up=${toString limits.upload}
|
||||
limit-rate-down=${toString limits.download}
|
||||
max-concurrency=${toString limits.threads}
|
||||
|
@ -64,6 +68,23 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
banlist = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
default = null;
|
||||
description = ''
|
||||
Path to a text file containing IPs to block.
|
||||
Useful to prevent DDoS/deanonymization attacks.
|
||||
|
||||
https://github.com/monero-project/meta/issues/1124
|
||||
'';
|
||||
example = lib.literalExpression ''
|
||||
builtins.fetchurl {
|
||||
url = "https://raw.githubusercontent.com/rblaine95/monero-banlist/c6eb9413ddc777e7072d822f49923df0b2a94d88/block.txt";
|
||||
hash = "";
|
||||
};
|
||||
'';
|
||||
};
|
||||
|
||||
mining.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
|
@ -225,7 +246,7 @@ in
|
|||
serviceConfig = {
|
||||
User = "monero";
|
||||
Group = "monero";
|
||||
ExecStart = "${pkgs.monero-cli}/bin/monerod --config-file=${configFile} --non-interactive";
|
||||
ExecStart = "${lib.getExe' pkgs.monero-cli "monerod"} --config-file=${configFile} --non-interactive";
|
||||
Restart = "always";
|
||||
SuccessExitStatus = [
|
||||
0
|
||||
|
|
|
@ -5,8 +5,6 @@
|
|||
...
|
||||
}:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.murmur;
|
||||
forking = cfg.logFile != null;
|
||||
|
@ -18,64 +16,49 @@ let
|
|||
autobanTimeframe=${toString cfg.autobanTimeframe}
|
||||
autobanTime=${toString cfg.autobanTime}
|
||||
|
||||
logfile=${optionalString (cfg.logFile != null) cfg.logFile}
|
||||
${optionalString forking "pidfile=/run/murmur/murmurd.pid"}
|
||||
logfile=${lib.optionalString (cfg.logFile != null) cfg.logFile}
|
||||
${lib.optionalString forking "pidfile=/run/murmur/murmurd.pid"}
|
||||
|
||||
welcometext="${cfg.welcometext}"
|
||||
port=${toString cfg.port}
|
||||
|
||||
${optionalString (cfg.hostName != "") "host=${cfg.hostName}"}
|
||||
${optionalString (cfg.password != "") "serverpassword=${cfg.password}"}
|
||||
${lib.optionalString (cfg.hostName != "") "host=${cfg.hostName}"}
|
||||
${lib.optionalString (cfg.password != "") "serverpassword=${cfg.password}"}
|
||||
|
||||
bandwidth=${toString cfg.bandwidth}
|
||||
users=${toString cfg.users}
|
||||
|
||||
textmessagelength=${toString cfg.textMsgLength}
|
||||
imagemessagelength=${toString cfg.imgMsgLength}
|
||||
allowhtml=${boolToString cfg.allowHtml}
|
||||
allowhtml=${lib.boolToString cfg.allowHtml}
|
||||
logdays=${toString cfg.logDays}
|
||||
bonjour=${boolToString cfg.bonjour}
|
||||
sendversion=${boolToString cfg.sendVersion}
|
||||
bonjour=${lib.boolToString cfg.bonjour}
|
||||
sendversion=${lib.boolToString cfg.sendVersion}
|
||||
|
||||
${optionalString (cfg.registerName != "") "registerName=${cfg.registerName}"}
|
||||
${optionalString (cfg.registerPassword != "") "registerPassword=${cfg.registerPassword}"}
|
||||
${optionalString (cfg.registerUrl != "") "registerUrl=${cfg.registerUrl}"}
|
||||
${optionalString (cfg.registerHostname != "") "registerHostname=${cfg.registerHostname}"}
|
||||
${lib.optionalString (cfg.registerName != "") "registerName=${cfg.registerName}"}
|
||||
${lib.optionalString (cfg.registerPassword != "") "registerPassword=${cfg.registerPassword}"}
|
||||
${lib.optionalString (cfg.registerUrl != "") "registerUrl=${cfg.registerUrl}"}
|
||||
${lib.optionalString (cfg.registerHostname != "") "registerHostname=${cfg.registerHostname}"}
|
||||
|
||||
certrequired=${boolToString cfg.clientCertRequired}
|
||||
${optionalString (cfg.sslCert != "") "sslCert=${cfg.sslCert}"}
|
||||
${optionalString (cfg.sslKey != "") "sslKey=${cfg.sslKey}"}
|
||||
${optionalString (cfg.sslCa != "") "sslCA=${cfg.sslCa}"}
|
||||
certrequired=${lib.boolToString cfg.clientCertRequired}
|
||||
${lib.optionalString (cfg.sslCert != "") "sslCert=${cfg.sslCert}"}
|
||||
${lib.optionalString (cfg.sslKey != "") "sslKey=${cfg.sslKey}"}
|
||||
${lib.optionalString (cfg.sslCa != "") "sslCA=${cfg.sslCa}"}
|
||||
|
||||
${optionalString (cfg.dbus != null) "dbus=${cfg.dbus}"}
|
||||
${lib.optionalString (cfg.dbus != null) "dbus=${cfg.dbus}"}
|
||||
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
(mkRenamedOptionModule [ "services" "murmur" "welcome" ] [ "services" "murmur" "welcometext" ])
|
||||
(mkRemovedOptionModule [ "services" "murmur" "pidfile" ] "Hardcoded to /run/murmur/murmurd.pid now")
|
||||
];
|
||||
|
||||
options = {
|
||||
services.murmur = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "If enabled, start the Murmur Mumble server.";
|
||||
};
|
||||
enable = lib.mkEnableOption "Mumble server";
|
||||
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Open ports in the firewall for the Murmur Mumble server.
|
||||
'';
|
||||
};
|
||||
openFirewall = lib.mkEnableOption "opening ports in the firewall for the Mumble server";
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "murmur";
|
||||
description = ''
|
||||
The name of an existing user to use to run the service.
|
||||
|
@ -83,8 +66,8 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
group = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "murmur";
|
||||
description = ''
|
||||
The name of an existing group to use to run the service.
|
||||
|
@ -92,16 +75,16 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
stateDir = mkOption {
|
||||
type = types.path;
|
||||
stateDir = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
default = "/var/lib/murmur";
|
||||
description = ''
|
||||
Directory to store data for the server.
|
||||
'';
|
||||
};
|
||||
|
||||
autobanAttempts = mkOption {
|
||||
type = types.int;
|
||||
autobanAttempts = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 10;
|
||||
description = ''
|
||||
Number of attempts a client is allowed to make in
|
||||
|
@ -110,8 +93,8 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
autobanTimeframe = mkOption {
|
||||
type = types.int;
|
||||
autobanTimeframe = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 120;
|
||||
description = ''
|
||||
Timeframe in which a client can connect without being banned
|
||||
|
@ -119,47 +102,47 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
autobanTime = mkOption {
|
||||
type = types.int;
|
||||
autobanTime = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 300;
|
||||
description = "The amount of time an IP ban lasts (in seconds).";
|
||||
};
|
||||
|
||||
logFile = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
logFile = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
default = null;
|
||||
example = "/var/log/murmur/murmurd.log";
|
||||
description = "Path to the log file for Murmur daemon. Empty means log to journald.";
|
||||
};
|
||||
|
||||
welcometext = mkOption {
|
||||
type = types.str;
|
||||
welcometext = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
description = "Welcome message for connected clients.";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 64738;
|
||||
description = "Ports to bind to (UDP and TCP).";
|
||||
};
|
||||
|
||||
hostName = mkOption {
|
||||
type = types.str;
|
||||
hostName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
description = "Host to bind to. Defaults binding on all addresses.";
|
||||
};
|
||||
|
||||
package = mkPackageOption pkgs "murmur" { };
|
||||
package = lib.mkPackageOption pkgs "murmur" { };
|
||||
|
||||
password = mkOption {
|
||||
type = types.str;
|
||||
password = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
description = "Required password to join server, if specified.";
|
||||
};
|
||||
|
||||
bandwidth = mkOption {
|
||||
type = types.int;
|
||||
bandwidth = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 72000;
|
||||
description = ''
|
||||
Maximum bandwidth (in bits per second) that clients may send
|
||||
|
@ -167,26 +150,26 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
users = mkOption {
|
||||
type = types.int;
|
||||
users = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 100;
|
||||
description = "Maximum number of concurrent clients allowed.";
|
||||
};
|
||||
|
||||
textMsgLength = mkOption {
|
||||
type = types.int;
|
||||
textMsgLength = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 5000;
|
||||
description = "Max length of text messages. Set 0 for no limit.";
|
||||
};
|
||||
|
||||
imgMsgLength = mkOption {
|
||||
type = types.int;
|
||||
imgMsgLength = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 131072;
|
||||
description = "Max length of image messages. Set 0 for no limit.";
|
||||
};
|
||||
|
||||
allowHtml = mkOption {
|
||||
type = types.bool;
|
||||
allowHtml = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Allow HTML in client messages, comments, and channel
|
||||
|
@ -194,8 +177,8 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
logDays = mkOption {
|
||||
type = types.int;
|
||||
logDays = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 31;
|
||||
description = ''
|
||||
How long to store RPC logs for in the database. Set 0 to
|
||||
|
@ -203,23 +186,16 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
bonjour = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Enable Bonjour auto-discovery, which allows clients over
|
||||
your LAN to automatically discover Murmur servers.
|
||||
'';
|
||||
};
|
||||
bonjour = lib.mkEnableOption "Bonjour auto-discovery, which allows clients over your LAN to automatically discover Mumble servers";
|
||||
|
||||
sendVersion = mkOption {
|
||||
type = types.bool;
|
||||
sendVersion = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Send Murmur version in UDP response.";
|
||||
};
|
||||
|
||||
registerName = mkOption {
|
||||
type = types.str;
|
||||
registerName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
description = ''
|
||||
Public server registration name, and also the name of the
|
||||
|
@ -228,8 +204,8 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
registerPassword = mkOption {
|
||||
type = types.str;
|
||||
registerPassword = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
description = ''
|
||||
Public server registry password, used authenticate your
|
||||
|
@ -238,14 +214,14 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
registerUrl = mkOption {
|
||||
type = types.str;
|
||||
registerUrl = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
description = "URL website for your server.";
|
||||
};
|
||||
|
||||
registerHostname = mkOption {
|
||||
type = types.str;
|
||||
registerHostname = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
description = ''
|
||||
DNS hostname where your server can be reached. This is only
|
||||
|
@ -255,40 +231,36 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
clientCertRequired = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Require clients to authenticate via certificates.";
|
||||
};
|
||||
clientCertRequired = lib.mkEnableOption "requiring clients to authenticate via certificates";
|
||||
|
||||
sslCert = mkOption {
|
||||
type = types.str;
|
||||
sslCert = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
description = "Path to your SSL certificate.";
|
||||
};
|
||||
|
||||
sslKey = mkOption {
|
||||
type = types.str;
|
||||
sslKey = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
description = "Path to your SSL key.";
|
||||
};
|
||||
|
||||
sslCa = mkOption {
|
||||
type = types.str;
|
||||
sslCa = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
description = "Path to your SSL CA certificate.";
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.lines;
|
||||
extraConfig = lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
default = "";
|
||||
description = "Extra configuration to put into murmur.ini.";
|
||||
};
|
||||
|
||||
environmentFile = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
environmentFile = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
default = null;
|
||||
example = literalExpression ''"''${config.services.murmur.stateDir}/murmurd.env"'';
|
||||
example = lib.literalExpression ''"''${config.services.murmur.stateDir}/murmurd.env"'';
|
||||
description = ''
|
||||
Environment file as defined in {manpage}`systemd.exec(5)`.
|
||||
|
||||
|
@ -311,8 +283,8 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
dbus = mkOption {
|
||||
type = types.enum [
|
||||
dbus = lib.mkOption {
|
||||
type = lib.types.enum [
|
||||
null
|
||||
"session"
|
||||
"system"
|
||||
|
@ -323,19 +295,19 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
users.users.murmur = mkIf (cfg.user == "murmur") {
|
||||
config = lib.mkIf cfg.enable {
|
||||
users.users.murmur = lib.mkIf (cfg.user == "murmur") {
|
||||
description = "Murmur Service user";
|
||||
home = cfg.stateDir;
|
||||
createHome = true;
|
||||
uid = config.ids.uids.murmur;
|
||||
group = cfg.group;
|
||||
};
|
||||
users.groups.murmur = mkIf (cfg.group == "murmur") {
|
||||
users.groups.murmur = lib.mkIf (cfg.group == "murmur") {
|
||||
gid = config.ids.gids.murmur;
|
||||
};
|
||||
|
||||
networking.firewall = mkIf cfg.openFirewall {
|
||||
networking.firewall = lib.mkIf cfg.openFirewall {
|
||||
allowedTCPPorts = [ cfg.port ];
|
||||
allowedUDPPorts = [ cfg.port ];
|
||||
};
|
||||
|
@ -353,8 +325,8 @@ in
|
|||
serviceConfig = {
|
||||
# murmurd doesn't fork when logging to the console.
|
||||
Type = if forking then "forking" else "simple";
|
||||
PIDFile = mkIf forking "/run/murmur/murmurd.pid";
|
||||
EnvironmentFile = mkIf (cfg.environmentFile != null) cfg.environmentFile;
|
||||
PIDFile = lib.mkIf forking "/run/murmur/murmurd.pid";
|
||||
EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
|
||||
ExecStart = "${cfg.package}/bin/mumble-server -ini /run/murmur/murmurd.ini";
|
||||
Restart = "always";
|
||||
RuntimeDirectory = "murmur";
|
||||
|
@ -390,7 +362,7 @@ in
|
|||
|
||||
# currently not included in upstream package, addition requested at
|
||||
# https://github.com/mumble-voip/mumble/issues/6078
|
||||
services.dbus.packages = mkIf (cfg.dbus == "system") [
|
||||
services.dbus.packages = lib.mkIf (cfg.dbus == "system") [
|
||||
(pkgs.writeTextFile {
|
||||
name = "murmur-dbus-policy";
|
||||
text = ''
|
||||
|
@ -432,19 +404,19 @@ in
|
|||
r /run/murmur/murmurd.ini,
|
||||
r ${configFile},
|
||||
''
|
||||
+ optionalString (cfg.logFile != null) ''
|
||||
+ lib.optionalString (cfg.logFile != null) ''
|
||||
rw ${cfg.logFile},
|
||||
''
|
||||
+ optionalString (cfg.sslCert != "") ''
|
||||
+ lib.optionalString (cfg.sslCert != "") ''
|
||||
r ${cfg.sslCert},
|
||||
''
|
||||
+ optionalString (cfg.sslKey != "") ''
|
||||
+ lib.optionalString (cfg.sslKey != "") ''
|
||||
r ${cfg.sslKey},
|
||||
''
|
||||
+ optionalString (cfg.sslCa != "") ''
|
||||
+ lib.optionalString (cfg.sslCa != "") ''
|
||||
r ${cfg.sslCa},
|
||||
''
|
||||
+ optionalString (cfg.dbus != null) ''
|
||||
+ lib.optionalString (cfg.dbus != null) ''
|
||||
dbus bus=${cfg.dbus}
|
||||
''
|
||||
+ ''
|
||||
|
|
|
@ -550,7 +550,7 @@ in
|
|||
User = client.user.name;
|
||||
Group = client.user.group;
|
||||
|
||||
# settings implied by DynamicUser=true, without actully using it,
|
||||
# settings implied by DynamicUser=true, without actually using it,
|
||||
# see https://www.freedesktop.org/software/systemd/man/latest/systemd.exec.html#DynamicUser=
|
||||
RemoveIPC = true;
|
||||
PrivateTmp = true;
|
||||
|
|
|
@ -102,7 +102,7 @@ in
|
|||
# special options as its public anyway
|
||||
# As far as I know leaking this secret is just
|
||||
# an information leak as one can fetch some basic app
|
||||
# informations from the IDP
|
||||
# information from the IDP
|
||||
# To actually do something one still needs to have login
|
||||
# data and this secret so this being public will not
|
||||
# suffice for anything just decreasing security
|
||||
|
|
|
@ -287,7 +287,10 @@ in
|
|||
|
||||
systemd.packages = [ pkgs.pdns-recursor ];
|
||||
|
||||
systemd.services.pdns-recursor.wantedBy = [ "multi-user.target" ];
|
||||
systemd.services.pdns-recursor = {
|
||||
restartTriggers = [ config.environment.etc."/pdns-recursor/recursor.yml".source ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
|
||||
users.users.pdns-recursor = {
|
||||
isSystemUser = true;
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
{
|
||||
cfg,
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
}:
|
||||
|
||||
let
|
||||
pihole = pkgs.pihole;
|
||||
makePayload =
|
||||
list:
|
||||
builtins.toJSON {
|
||||
inherit (list) type enabled;
|
||||
address = list.url;
|
||||
comment = list.description;
|
||||
};
|
||||
payloads = map makePayload cfg.lists;
|
||||
in
|
||||
''
|
||||
# Can't use -u (unset) because api.sh uses API_URL before it is set
|
||||
set -eo pipefail
|
||||
pihole="${lib.getExe pihole}"
|
||||
jq="${lib.getExe pkgs.jq}"
|
||||
|
||||
# If the database doesn't exist, it needs to be created with gravity.sh
|
||||
if [ ! -f '${cfg.stateDirectory}'/gravity.db ]; then
|
||||
$pihole -g
|
||||
# Send SIGRTMIN to FTL, which makes it reload the database, opening the newly created one
|
||||
${pkgs.procps}/bin/kill -s SIGRTMIN $(systemctl show --property MainPID --value ${config.systemd.services.pihole-ftl.name})
|
||||
fi
|
||||
|
||||
source ${pihole}/usr/share/pihole/advanced/Scripts/api.sh
|
||||
source ${pihole}/usr/share/pihole/advanced/Scripts/utils.sh
|
||||
|
||||
any_failed=0
|
||||
|
||||
addList() {
|
||||
local payload="$1"
|
||||
|
||||
echo "Adding list: $payload"
|
||||
local result=$(PostFTLData "lists" "$payload")
|
||||
|
||||
local error="$($jq '.error' <<< "$result")"
|
||||
if [[ "$error" != "null" ]]; then
|
||||
echo "Error: $error"
|
||||
any_failed=1
|
||||
return
|
||||
fi
|
||||
|
||||
id="$($jq '.lists.[].id?' <<< "$result")"
|
||||
if [[ "$id" == "null" ]]; then
|
||||
any_failed=1
|
||||
error="$($jq '.processed.errors.[].error' <<< "$result")"
|
||||
echo "Error: $error"
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Added list ID $id: $result"
|
||||
}
|
||||
|
||||
for i in 1 2 3; do
|
||||
(TestAPIAvailability) && break
|
||||
echo "Retrying API shortly..."
|
||||
${pkgs.coreutils}/bin/sleep .5s
|
||||
done;
|
||||
|
||||
LoginAPI
|
||||
|
||||
${builtins.concatStringsSep "\n" (
|
||||
map (
|
||||
payload:
|
||||
lib.pipe payload [
|
||||
lib.strings.escapeShellArg
|
||||
(payload: "addList ${payload}")
|
||||
]
|
||||
) payloads
|
||||
)}
|
||||
|
||||
# Run gravity.sh to load any new lists
|
||||
$pihole -g
|
||||
exit $any_failed
|
||||
''
|
128
nixos/modules/services/networking/pihole-ftl.md
Normal file
128
nixos/modules/services/networking/pihole-ftl.md
Normal file
|
@ -0,0 +1,128 @@
|
|||
# pihole-FTL {#module-services-networking-pihole-ftl}
|
||||
|
||||
*Upstream documentation*: <https://docs.pi-hole.net/ftldns/>
|
||||
|
||||
pihole-FTL is a fork of [Dnsmasq](index.html#module-services-networking-dnsmasq),
|
||||
providing some additional features, including an API for analysis and
|
||||
statistics.
|
||||
|
||||
Note that pihole-FTL and Dnsmasq cannot be enabled at
|
||||
the same time.
|
||||
|
||||
## Configuration {#module-services-networking-pihole-ftl-configuration}
|
||||
|
||||
pihole-FTL can be configured with [{option}`services.pihole-ftl.settings`](options.html#opt-services.pihole-ftl.settings), which controls the content of `pihole.toml`.
|
||||
|
||||
The template pihole.toml is provided in `pihole-ftl.passthru.settingsTemplate`,
|
||||
which describes all settings.
|
||||
|
||||
Example configuration:
|
||||
|
||||
```nix
|
||||
{
|
||||
services.pihole-ftl = {
|
||||
enable = true;
|
||||
openFirewallDHCP = true;
|
||||
queryLogDeleter.enable = true;
|
||||
lists = [
|
||||
{
|
||||
url = "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts";
|
||||
# Alternatively, use the file from nixpkgs. Note its contents won't be
|
||||
# automatically updated by Pi-hole, as it would with an online URL.
|
||||
# url = "file://${pkgs.stevenblack-blocklist}/hosts";
|
||||
description = "Steven Black's unified adlist";
|
||||
}
|
||||
];
|
||||
settings = {
|
||||
dns = {
|
||||
domainNeeded = true;
|
||||
expandHosts = true;
|
||||
interface = "br-lan";
|
||||
listeningMode = "BIND";
|
||||
upstreams = [ "127.0.0.1#5053" ];
|
||||
};
|
||||
dhcp = {
|
||||
active = true;
|
||||
router = "192.168.10.1";
|
||||
start = "192.168.10.2";
|
||||
end = "192.168.10.254";
|
||||
leaseTime = "1d";
|
||||
ipv6 = true;
|
||||
multiDNS = true;
|
||||
hosts = [
|
||||
# Static address for the current host
|
||||
"aa:bb:cc:dd:ee:ff,192.168.10.1,${config.networking.hostName},infinite"
|
||||
];
|
||||
rapidCommit = true;
|
||||
};
|
||||
misc.dnsmasq_lines = [
|
||||
# This DHCP server is the only one on the network
|
||||
"dhcp-authoritative"
|
||||
# Source: https://data.iana.org/root-anchors/root-anchors.xml
|
||||
"trust-anchor=.,38696,8,2,683D2D0ACB8C9B712A1948B27F741219298D0A450D612C483AF444A4C0FB2B16"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Inheriting configuration from Dnsmasq {#module-services-networking-pihole-ftl-configuration-inherit-dnsmasq}
|
||||
|
||||
If [{option}`services.pihole-ftl.useDnsmasqConfig`](options.html#opt-services.pihole-ftl.useDnsmasqConfig) is enabled, the configuration [options of the Dnsmasq
|
||||
module](index.html#module-services-networking-dnsmasq) will be automatically
|
||||
used by pihole-FTL. Note that this may cause duplicate option errors
|
||||
depending on pihole-FTL settings.
|
||||
|
||||
See the [Dnsmasq
|
||||
example](index.html#module-services-networking-dnsmasq-configuration-home) for
|
||||
an exemplar Dnsmasq configuration. Make sure to set
|
||||
[{option}`services.dnsmasq.enable`](options.html#opt-services.dnsmasq.enable) to false and
|
||||
[{option}`services.pihole-ftl.enable`](options.html#opt-services.pihole-ftl.enable) to true instead:
|
||||
|
||||
```nix
|
||||
{
|
||||
services.pihole-ftl = {
|
||||
enable = true;
|
||||
useDnsmasqConfig = true;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Serving on multiple interfaces {#module-services-networking-pihole-ftl-configuration-multiple-interfaces}
|
||||
|
||||
Pi-hole's configuration only supports specifying a single interface. If you want
|
||||
to configure additional interfaces with different configuration, use
|
||||
`misc.dnsmasq_lines` to append extra Dnsmasq options.
|
||||
|
||||
```nix
|
||||
{
|
||||
services.pihole-ftl = {
|
||||
settings.misc.dnsmasq_lines = [
|
||||
# Specify the secondary interface
|
||||
"interface=enp1s0"
|
||||
# A different device is the router on this network, e.g. the one
|
||||
# provided by your ISP
|
||||
"dhcp-option=enp1s0,option:router,192.168.0.1"
|
||||
# Specify the IPv4 ranges to allocate, with a 1-day lease time
|
||||
"dhcp-range=enp1s0,192.168.0.10,192.168.0.253,1d"
|
||||
# Enable IPv6
|
||||
"dhcp-range=::f,::ff,constructor:enp1s0,ra-names,ra-stateless"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Administration {#module-services-networking-pihole-ftl-administration}
|
||||
|
||||
*pihole command documentation*: <https://docs.pi-hole.net/main/pihole-command>
|
||||
|
||||
Enabling pihole-FTL provides the `pihole` command, which can be used to control
|
||||
the daemon and some configuration.
|
||||
|
||||
Note that in NixOS the script has been patched to remove the reinstallation,
|
||||
update, and Dnsmasq configuration commands. In NixOS, Pi-hole's configuration is
|
||||
immutable and must be done with NixOS options.
|
||||
|
||||
For more convenient administration and monitoring, see [Pi-hole
|
||||
Dashboard](#module-services-web-apps-pihole-web)
|
483
nixos/modules/services/networking/pihole-ftl.nix
Normal file
483
nixos/modules/services/networking/pihole-ftl.nix
Normal file
|
@ -0,0 +1,483 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
with {
|
||||
inherit (lib)
|
||||
elemAt
|
||||
getExe
|
||||
hasAttrByPath
|
||||
mkEnableOption
|
||||
mkIf
|
||||
mkOption
|
||||
strings
|
||||
types
|
||||
;
|
||||
};
|
||||
|
||||
let
|
||||
mkDefaults = lib.mapAttrsRecursive (n: v: lib.mkDefault v);
|
||||
|
||||
cfg = config.services.pihole-ftl;
|
||||
|
||||
piholeScript = pkgs.writeScriptBin "pihole" ''
|
||||
sudo=exec
|
||||
if [[ "$USER" != '${cfg.user}' ]]; then
|
||||
sudo='exec /run/wrappers/bin/sudo -u ${cfg.user}'
|
||||
fi
|
||||
$sudo ${getExe cfg.piholePackage} "$@"
|
||||
'';
|
||||
|
||||
settingsFormat = pkgs.formats.toml { };
|
||||
settingsFile = settingsFormat.generate "pihole.toml" cfg.settings;
|
||||
in
|
||||
{
|
||||
options.services.pihole-ftl = {
|
||||
enable = mkEnableOption "Pi-hole FTL";
|
||||
|
||||
package = lib.mkPackageOption pkgs "pihole-ftl" { };
|
||||
piholePackage = lib.mkPackageOption pkgs "pihole" { };
|
||||
|
||||
privacyLevel = mkOption {
|
||||
type = types.numbers.between 0 3;
|
||||
description = ''
|
||||
Level of detail in generated statistics. 0 enables full statistics, 3
|
||||
shows only anonymous statistics.
|
||||
|
||||
See [the documentation](https://docs.pi-hole.net/ftldns/privacylevels).
|
||||
|
||||
Also see services.dnsmasq.settings.log-queries to completely disable
|
||||
query logging.
|
||||
'';
|
||||
default = 0;
|
||||
example = "3";
|
||||
};
|
||||
|
||||
openFirewallDHCP = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Open ports in the firewall for pihole-FTL's DHCP server.";
|
||||
};
|
||||
|
||||
openFirewallWebserver = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Open ports in the firewall for pihole-FTL's webserver, as configured in `settings.webserver.port`.
|
||||
'';
|
||||
};
|
||||
|
||||
configDirectory = mkOption {
|
||||
type = types.path;
|
||||
default = "/etc/pihole";
|
||||
internal = true;
|
||||
readOnly = true;
|
||||
description = ''
|
||||
Path for pihole configuration.
|
||||
pihole does not currently support any path other than /etc/pihole.
|
||||
'';
|
||||
};
|
||||
|
||||
stateDirectory = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/lib/pihole";
|
||||
description = ''
|
||||
Path for pihole state files.
|
||||
'';
|
||||
};
|
||||
|
||||
logDirectory = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/log/pihole";
|
||||
description = "Path for Pi-hole log files";
|
||||
};
|
||||
|
||||
settings = mkOption {
|
||||
type = settingsFormat.type;
|
||||
description = ''
|
||||
Configuration options for pihole.toml.
|
||||
See the upstream [documentation](https://docs.pi-hole.net/ftldns/configfile).
|
||||
'';
|
||||
};
|
||||
|
||||
useDnsmasqConfig = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Import options defined in [](#opt-services.dnsmasq.settings) via
|
||||
misc.dnsmasq_lines in Pi-hole's config.
|
||||
'';
|
||||
};
|
||||
|
||||
pihole = mkOption {
|
||||
type = types.package;
|
||||
default = piholeScript;
|
||||
internal = true;
|
||||
description = "Pi-hole admin script";
|
||||
};
|
||||
|
||||
lists =
|
||||
let
|
||||
adlistType = types.submodule {
|
||||
options = {
|
||||
url = mkOption {
|
||||
type = types.str;
|
||||
description = "URL of the domain list";
|
||||
};
|
||||
type = mkOption {
|
||||
type = types.enum [
|
||||
"allow"
|
||||
"block"
|
||||
];
|
||||
default = "block";
|
||||
description = "Whether domains on this list should be explicitly allowed, or blocked";
|
||||
};
|
||||
enabled = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = "Whether this list is enabled";
|
||||
};
|
||||
description = mkOption {
|
||||
type = types.str;
|
||||
description = "Description of the list";
|
||||
default = "";
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
mkOption {
|
||||
type = with types; listOf adlistType;
|
||||
description = "Deny (or allow) domain lists to use";
|
||||
default = [ ];
|
||||
example = [
|
||||
{
|
||||
url = "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "pihole";
|
||||
description = "User to run the service as.";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
default = "pihole";
|
||||
description = "Group to run the service as.";
|
||||
};
|
||||
|
||||
queryLogDeleter = {
|
||||
enable = mkEnableOption ("Pi-hole FTL DNS query log deleter");
|
||||
|
||||
age = mkOption {
|
||||
type = types.int;
|
||||
default = 90;
|
||||
description = ''
|
||||
Delete DNS query logs older than this many days, if
|
||||
[](#opt-services.pihole-ftl.queryLogDeleter.enable) is on.
|
||||
'';
|
||||
};
|
||||
|
||||
interval = mkOption {
|
||||
type = types.str;
|
||||
default = "weekly";
|
||||
description = ''
|
||||
How often the query log deleter is run. See systemd.time(7) for more
|
||||
information about the format.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = !config.services.dnsmasq.enable;
|
||||
message = "pihole-ftl conflicts with dnsmasq. Please disable one of them.";
|
||||
}
|
||||
|
||||
{
|
||||
assertion =
|
||||
builtins.length cfg.lists == 0
|
||||
|| (
|
||||
(hasAttrByPath [ "webserver" "port" ] cfg.settings)
|
||||
&& !builtins.elem cfg.settings.webserver.port [
|
||||
""
|
||||
null
|
||||
]
|
||||
);
|
||||
message = ''
|
||||
The Pi-hole webserver must be enabled for lists set in services.pihole-ftl.lists to be automatically loaded on startup via the web API.
|
||||
services.pihole-ftl.settings.port must be defined, e.g. by enabling services.pihole-web.enable and defining services.pihole-web.port.
|
||||
'';
|
||||
}
|
||||
|
||||
{
|
||||
assertion =
|
||||
builtins.length cfg.lists == 0
|
||||
|| !(hasAttrByPath [ "webserver" "api" "cli_pw" ] cfg.settings)
|
||||
|| cfg.settings.webserver.api.cli_pw == true;
|
||||
message = ''
|
||||
services.pihole-ftl.settings.webserver.api.cli_pw must be true for lists set in services.pihole-ftl.lists to be automatically loaded on startup.
|
||||
This enables an ephemeral password used by the pihole command.
|
||||
'';
|
||||
}
|
||||
];
|
||||
|
||||
services.pihole-ftl.settings = lib.mkMerge [
|
||||
# Defaults
|
||||
(mkDefaults {
|
||||
misc.readOnly = true; # Prevent config changes via API or CLI by default
|
||||
webserver.port = ""; # Disable the webserver by default
|
||||
misc.privacyLevel = cfg.privacyLevel;
|
||||
})
|
||||
|
||||
# Move state files to cfg.stateDirectory
|
||||
{
|
||||
# TODO: Pi-hole currently hardcodes dhcp-leasefile this in its
|
||||
# generated dnsmasq.conf, and we can't override it
|
||||
misc.dnsmasq_lines = [
|
||||
# "dhcp-leasefile=${cfg.stateDirectory}/dhcp.leases"
|
||||
# "hostsdir=${cfg.stateDirectory}/hosts"
|
||||
];
|
||||
|
||||
files = {
|
||||
database = "${cfg.stateDirectory}/pihole-FTL.db";
|
||||
gravity = "${cfg.stateDirectory}/gravity.db";
|
||||
macvendor = "${cfg.stateDirectory}/gravity.db";
|
||||
log.ftl = "${cfg.logDirectory}/FTL.log";
|
||||
log.dnsmasq = "${cfg.logDirectory}/pihole.log";
|
||||
log.webserver = "${cfg.logDirectory}/webserver.log";
|
||||
};
|
||||
|
||||
webserver.tls = "${cfg.stateDirectory}/tls.pem";
|
||||
}
|
||||
|
||||
(lib.optionalAttrs cfg.useDnsmasqConfig {
|
||||
misc.dnsmasq_lines = lib.pipe config.services.dnsmasq.configFile [
|
||||
builtins.readFile
|
||||
(lib.strings.splitString "\n")
|
||||
(builtins.filter (s: s != ""))
|
||||
];
|
||||
})
|
||||
];
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${cfg.configDirectory} 0700 ${cfg.user} ${cfg.group} - -"
|
||||
"d ${cfg.stateDirectory} 0700 ${cfg.user} ${cfg.group} - -"
|
||||
"d ${cfg.logDirectory} 0700 ${cfg.user} ${cfg.group} - -"
|
||||
];
|
||||
|
||||
systemd.services = {
|
||||
pihole-ftl =
|
||||
let
|
||||
setupService = config.systemd.services.pihole-ftl-setup.name;
|
||||
in
|
||||
{
|
||||
description = "Pi-hole FTL";
|
||||
|
||||
after = [ "network.target" ];
|
||||
before = [ setupService ];
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
wants = [ setupService ];
|
||||
|
||||
environment = {
|
||||
# Currently unused, but allows the service to be reloaded
|
||||
# automatically when the config is changed.
|
||||
PIHOLE_CONFIG = settingsFile;
|
||||
|
||||
# pihole is executed by the /actions/gravity API endpoint
|
||||
PATH = lib.mkForce (
|
||||
lib.makeBinPath [
|
||||
cfg.piholePackage
|
||||
]
|
||||
);
|
||||
};
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
AmbientCapabilities = [
|
||||
"CAP_NET_BIND_SERVICE"
|
||||
"CAP_NET_RAW"
|
||||
"CAP_NET_ADMIN"
|
||||
"CAP_SYS_NICE"
|
||||
"CAP_IPC_LOCK"
|
||||
"CAP_CHOWN"
|
||||
"CAP_SYS_TIME"
|
||||
];
|
||||
ExecStart = "${getExe cfg.package} no-daemon";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 1;
|
||||
# Hardening
|
||||
NoNewPrivileges = true;
|
||||
PrivateTmp = true;
|
||||
PrivateDevices = true;
|
||||
DevicePolicy = "closed";
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = "read-only";
|
||||
ProtectControlGroups = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ReadWritePaths = [
|
||||
cfg.configDirectory
|
||||
cfg.stateDirectory
|
||||
cfg.logDirectory
|
||||
];
|
||||
RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
LockPersonality = true;
|
||||
};
|
||||
};
|
||||
|
||||
pihole-ftl-setup = {
|
||||
description = "Pi-hole FTL setup";
|
||||
# Wait for network so lists can be downloaded
|
||||
after = [ "network-online.target" ];
|
||||
requires = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
|
||||
# Hardening
|
||||
NoNewPrivileges = true;
|
||||
PrivateTmp = true;
|
||||
PrivateDevices = true;
|
||||
DevicePolicy = "closed";
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = "read-only";
|
||||
ProtectControlGroups = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ReadWritePaths = [
|
||||
cfg.configDirectory
|
||||
cfg.stateDirectory
|
||||
cfg.logDirectory
|
||||
];
|
||||
RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
LockPersonality = true;
|
||||
};
|
||||
script = import ./pihole-ftl-setup-script.nix {
|
||||
inherit
|
||||
cfg
|
||||
config
|
||||
lib
|
||||
pkgs
|
||||
;
|
||||
};
|
||||
};
|
||||
|
||||
pihole-ftl-log-deleter = mkIf cfg.queryLogDeleter.enable {
|
||||
description = "Pi-hole FTL DNS query log deleter";
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
# Hardening
|
||||
NoNewPrivileges = true;
|
||||
PrivateTmp = true;
|
||||
PrivateDevices = true;
|
||||
DevicePolicy = "closed";
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = "read-only";
|
||||
ProtectControlGroups = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ReadWritePaths = [ cfg.stateDirectory ];
|
||||
RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
LockPersonality = true;
|
||||
};
|
||||
script =
|
||||
let
|
||||
days = toString cfg.queryLogDeleter.age;
|
||||
database = "${cfg.stateDirectory}/pihole-FTL.db";
|
||||
in
|
||||
''
|
||||
set -euo pipefail
|
||||
|
||||
echo "Deleting query logs older than ${days} days"
|
||||
${getExe cfg.package} sqlite3 "${database}" "DELETE FROM query_storage WHERE timestamp <= CAST(strftime('%s', date('now', '-${days} day')) AS INT); select changes() from query_storage limit 1"
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
systemd.timers.pihole-ftl-log-deleter = mkIf cfg.queryLogDeleter.enable {
|
||||
description = "Pi-hole FTL DNS query log deleter";
|
||||
before = [
|
||||
config.systemd.services.pihole-ftl.name
|
||||
config.systemd.services.pihole-ftl-setup.name
|
||||
];
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = cfg.queryLogDeleter.interval;
|
||||
Unit = "pihole-ftl-log-deleter.service";
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall = lib.mkMerge [
|
||||
(mkIf cfg.openFirewallDHCP {
|
||||
allowedUDPPorts = [ 53 ];
|
||||
allowedTCPPorts = [ 53 ];
|
||||
})
|
||||
|
||||
(mkIf cfg.openFirewallWebserver {
|
||||
allowedTCPPorts = lib.pipe cfg.settings.webserver.port [
|
||||
(lib.splitString ",")
|
||||
(map (
|
||||
port:
|
||||
lib.pipe port [
|
||||
(builtins.split "[[:alpha:]]+")
|
||||
builtins.head
|
||||
lib.toInt
|
||||
]
|
||||
))
|
||||
];
|
||||
})
|
||||
];
|
||||
|
||||
users.users.${cfg.user} = {
|
||||
group = cfg.group;
|
||||
isSystemUser = true;
|
||||
};
|
||||
|
||||
users.groups.${cfg.group} = { };
|
||||
|
||||
environment.etc."pihole/pihole.toml" = {
|
||||
source = settingsFile;
|
||||
user = cfg.user;
|
||||
group = cfg.group;
|
||||
mode = "400";
|
||||
};
|
||||
|
||||
environment.systemPackages = [ cfg.pihole ];
|
||||
|
||||
services.logrotate.settings.pihole-ftl = {
|
||||
enable = true;
|
||||
files = [ "${cfg.logDirectory}/FTL.log" ];
|
||||
};
|
||||
};
|
||||
|
||||
meta = {
|
||||
doc = ./pihole-ftl.md;
|
||||
maintainers = with lib.maintainers; [ averyvigolo ];
|
||||
};
|
||||
}
|
|
@ -42,7 +42,7 @@ let
|
|||
# values must be separated by whitespace or even commas.
|
||||
# Consult either sshd_config(5) or, as last resort, the OpehSSH source for parsing
|
||||
# the options at servconf.c:process_server_config_line_depth() to determine the right "mode"
|
||||
# for each. But fortunaly this fact is documented for most of them in the manpage.
|
||||
# for each. But fortunately this fact is documented for most of them in the manpage.
|
||||
commaSeparated = [
|
||||
"Ciphers"
|
||||
"KexAlgorithms"
|
||||
|
|
|
@ -68,6 +68,7 @@ in
|
|||
systemd.services.tailscale-nginx-auth = {
|
||||
description = "Tailscale NGINX Authentication service";
|
||||
requires = [ "tailscale-nginx-auth.socket" ];
|
||||
after = [ "tailscaled.service" ];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = getExe cfg.package;
|
||||
|
@ -107,6 +108,8 @@ in
|
|||
"~@privileged"
|
||||
"~@setuid"
|
||||
];
|
||||
|
||||
Restart = "on-failure";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -133,7 +133,7 @@ in
|
|||
|
||||
services.yggdrasil.settings.Listen =
|
||||
let
|
||||
# By default linux dynamically alocates ports in range 32768..60999
|
||||
# By default linux dynamically allocates ports in range 32768..60999
|
||||
# `sysctl net.ipv4.ip_local_port_range`
|
||||
# See: https://xkcd.com/221/
|
||||
prot_port = {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue