mirror of
https://github.com/NixOS/nixpkgs.git
synced 2025-07-14 06:00:33 +03:00
Merge branch 'master' into shellconfig
This commit is contained in:
commit
ed44cfb141
3048 changed files with 94716 additions and 58161 deletions
101
.github/workflows/check-by-name.yml
vendored
101
.github/workflows/check-by-name.yml
vendored
|
@ -17,10 +17,28 @@ jobs:
|
|||
# as specified in nixos/release-combined.nix
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Resolving the merge commit
|
||||
run: |
|
||||
if result=$(git ls-remote --exit-code ${{ github.event.pull_request.base.repo.clone_url }} refs/pull/${{ github.event.pull_request.number }}/merge); then
|
||||
mergedSha=$(cut -f1 <<< "$result")
|
||||
echo "The PR appears to not have any conflicts, checking the merge commit $mergedSha"
|
||||
else
|
||||
echo "The PR may have a merge conflict"
|
||||
exit 1
|
||||
fi
|
||||
echo "mergedSha=$mergedSha" >> "$GITHUB_ENV"
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
# pull_request_target checks out the base branch by default
|
||||
ref: refs/pull/${{ github.event.pull_request.number }}/merge
|
||||
ref: ${{ env.mergedSha }}
|
||||
# Fetches the merge commit and its parents
|
||||
fetch-depth: 2
|
||||
- name: Determining PR git hashes
|
||||
run: |
|
||||
# For pull_request_target this is the same as $GITHUB_SHA
|
||||
echo "baseSha=$(git rev-parse HEAD^1)" >> "$GITHUB_ENV"
|
||||
|
||||
echo "headSha=$(git rev-parse HEAD^2)" >> "$GITHUB_ENV"
|
||||
- uses: cachix/install-nix-action@v23
|
||||
- name: Determining channel to use for dependencies
|
||||
run: |
|
||||
|
@ -51,4 +69,83 @@ jobs:
|
|||
# Passing --max-jobs 0 makes sure that we won't build anything
|
||||
nix-build "$nixpkgs" -A tests.nixpkgs-check-by-name --max-jobs 0
|
||||
- name: Running nixpkgs-check-by-name
|
||||
run: result/bin/nixpkgs-check-by-name .
|
||||
run: |
|
||||
echo "Checking whether the check succeeds on the base branch $GITHUB_BASE_REF"
|
||||
git checkout -q "$baseSha"
|
||||
if baseOutput=$(result/bin/nixpkgs-check-by-name . 2>&1); then
|
||||
baseSuccess=1
|
||||
else
|
||||
baseSuccess=
|
||||
fi
|
||||
printf "%s\n" "$baseOutput"
|
||||
|
||||
echo "Checking whether the check would succeed after merging this pull request"
|
||||
git checkout -q "$mergedSha"
|
||||
if mergedOutput=$(result/bin/nixpkgs-check-by-name . 2>&1); then
|
||||
mergedSuccess=1
|
||||
exitCode=0
|
||||
else
|
||||
mergedSuccess=
|
||||
exitCode=1
|
||||
fi
|
||||
printf "%s\n" "$mergedOutput"
|
||||
|
||||
resultToEmoji() {
|
||||
if [[ -n "$1" ]]; then
|
||||
echo ":heavy_check_mark:"
|
||||
else
|
||||
echo ":x:"
|
||||
fi
|
||||
}
|
||||
|
||||
# Print a markdown summary in GitHub actions
|
||||
{
|
||||
echo "| Nixpkgs version | Check result |"
|
||||
echo "| --- | --- |"
|
||||
echo "| Latest base commit | $(resultToEmoji "$baseSuccess") |"
|
||||
echo "| After merging this PR | $(resultToEmoji "$mergedSuccess") |"
|
||||
echo ""
|
||||
|
||||
if [[ -n "$baseSuccess" ]]; then
|
||||
if [[ -n "$mergedSuccess" ]]; then
|
||||
echo "The check succeeds on both the base branch and after merging this PR"
|
||||
else
|
||||
echo "The check succeeds on the base branch, but would fail after merging this PR:"
|
||||
echo "\`\`\`"
|
||||
echo "$mergedOutput"
|
||||
echo "\`\`\`"
|
||||
echo ""
|
||||
fi
|
||||
else
|
||||
if [[ -n "$mergedSuccess" ]]; then
|
||||
echo "The check fails on the base branch, but this PR fixes it, nicely done!"
|
||||
else
|
||||
echo "The check fails on both the base branch and after merging this PR, unknown if only this PRs changes would satisfy the check, the base branch needs to be fixed first."
|
||||
echo ""
|
||||
echo "Failure on the base branch:"
|
||||
echo "\`\`\`"
|
||||
echo "$baseOutput"
|
||||
echo "\`\`\`"
|
||||
echo ""
|
||||
echo "Failure after merging this PR:"
|
||||
echo "\`\`\`"
|
||||
echo "$mergedOutput"
|
||||
echo "\`\`\`"
|
||||
echo ""
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "### Details"
|
||||
echo "- nixpkgs-check-by-name tool:"
|
||||
echo " - Channel: $channel"
|
||||
echo " - Nixpkgs commit: [$rev](https://github.com/${GITHUB_REPOSITORY}/commit/$rev)"
|
||||
echo " - Store path: \`$(realpath result)\`"
|
||||
echo "- Tested Nixpkgs:"
|
||||
echo " - Base branch: $GITHUB_BASE_REF"
|
||||
echo " - Latest base branch commit: [$baseSha](https://github.com/${GITHUB_REPOSITORY}/commit/$baseSha)"
|
||||
echo " - Latest PR commit: [$headSha](https://github.com/${GITHUB_REPOSITORY}/commit/$headSha)"
|
||||
echo " - Merge commit: [$mergedSha](https://github.com/${GITHUB_REPOSITORY}/commit/$mergedSha)"
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
|
||||
exit "$exitCode"
|
||||
|
||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -5,6 +5,7 @@
|
|||
.\#*
|
||||
\#*\#
|
||||
.idea/
|
||||
.nixos-test-history
|
||||
.vscode/
|
||||
outputs/
|
||||
result-*
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
This hook can be used to setup a check phase that
|
||||
requires running a MPI application. It detects the
|
||||
used present MPI implementaion type and exports
|
||||
used present MPI implementation type and exports
|
||||
the neceesary environment variables to use
|
||||
`mpirun` and `mpiexec` in a Nix sandbox.
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ in buildDotnetModule rec {
|
|||
They can be installed either as a global tool for the entire system, or as a local tool specific to project.
|
||||
|
||||
The local installation is the easiest and works on NixOS in the same way as on other Linux distributions.
|
||||
[See dotnet documention](https://learn.microsoft.com/en-us/dotnet/core/tools/global-tools#install-a-local-tool) to learn more.
|
||||
[See dotnet documentation](https://learn.microsoft.com/en-us/dotnet/core/tools/global-tools#install-a-local-tool) to learn more.
|
||||
|
||||
[The global installation method](https://learn.microsoft.com/en-us/dotnet/core/tools/global-tools#install-a-global-tool)
|
||||
should also work most of the time. You have to remember to update the `PATH`
|
||||
|
|
|
@ -221,7 +221,7 @@ Sadly we currently don’t have tooling for this. For this you might be
|
|||
interested in the alternative [haskell.nix] framework, which, be warned, is
|
||||
completely incompatible with packages from `haskellPackages`.
|
||||
|
||||
<!-- TODO(@maralorn) Link to package set generation docs in the contributers guide below. -->
|
||||
<!-- TODO(@maralorn) Link to package set generation docs in the contributors guide below. -->
|
||||
|
||||
## `haskellPackages.mkDerivation` {#haskell-mkderivation}
|
||||
|
||||
|
@ -1029,7 +1029,7 @@ ugly, and we may want to deprecate them at some point. -->
|
|||
`disableCabalFlag flag drv`
|
||||
: Makes sure that the Cabal flag `flag` is disabled in Cabal's configure step.
|
||||
|
||||
`appendBuildflags list drv`
|
||||
`appendBuildFlags list drv`
|
||||
: Adds the strings in `list` to the `buildFlags` argument for `drv`.
|
||||
|
||||
<!-- TODO(@sternenseemann): removeConfigureFlag -->
|
||||
|
@ -1192,7 +1192,7 @@ with GHC), it is recommended to use overlays for Nixpkgs to change them.
|
|||
Since the interrelated parts, i.e. the package set and GHC, are connected
|
||||
via the Nixpkgs fixpoint, we need to modify them both in a way that preserves
|
||||
their connection (or else we'd have to wire it up again manually). This is
|
||||
achieved by changing GHC and the package set in seperate overlays to prevent
|
||||
achieved by changing GHC and the package set in separate overlays to prevent
|
||||
the package set from pulling in GHC from `prev`.
|
||||
|
||||
The result is two overlays like the ones shown below. Adjustable parts are
|
||||
|
|
|
@ -161,6 +161,8 @@ git config --global url."https://github.com/".insteadOf git://github.com/
|
|||
|
||||
`buildNpmPackage` allows you to package npm-based projects in Nixpkgs without the use of an auto-generated dependencies file (as used in [node2nix](#javascript-node2nix)). It works by utilizing npm's cache functionality -- creating a reproducible cache that contains the dependencies of a project, and pointing npm to it.
|
||||
|
||||
Here's an example:
|
||||
|
||||
```nix
|
||||
{ lib, buildNpmPackage, fetchFromGitHub }:
|
||||
|
||||
|
@ -191,6 +193,8 @@ buildNpmPackage rec {
|
|||
}
|
||||
```
|
||||
|
||||
In the default `installPhase` set by `buildNpmPackage`, it uses `npm pack --json --dry-run` to decide what files to install in `$out/lib/node_modules/$name/`, where `$name` is the `name` string defined in the package's `package.json`. Additionally, the `bin` and `man` keys in the source's `package.json` are used to decide what binaries and manpages are supposed to be installed. If these are not defined, `npm pack` may miss some files, and no binaries will be produced.
|
||||
|
||||
#### Arguments {#javascript-buildNpmPackage-arguments}
|
||||
|
||||
* `npmDepsHash`: The output hash of the dependencies for this project. Can be calculated in advance with [`prefetch-npm-deps`](#javascript-buildNpmPackage-prefetch-npm-deps).
|
||||
|
@ -204,10 +208,11 @@ buildNpmPackage rec {
|
|||
* `npmBuildFlags`: Flags to pass to `npm run ${npmBuildScript}`.
|
||||
* `npmPackFlags`: Flags to pass to `npm pack`.
|
||||
* `npmPruneFlags`: Flags to pass to `npm prune`. Defaults to the value of `npmInstallFlags`.
|
||||
* `makeWrapperArgs`: Flags to pass to `makeWrapper`, added to executable calling the generated `.js` with `node` as an interpreter. These scripts are defined in `package.json`.
|
||||
|
||||
#### prefetch-npm-deps {#javascript-buildNpmPackage-prefetch-npm-deps}
|
||||
|
||||
`prefetch-npm-deps` can calculate the hash of the dependencies of an npm project ahead of time.
|
||||
`prefetch-npm-deps` is a Nixpkgs package that calculates the hash of the dependencies of an npm project ahead of time.
|
||||
|
||||
```console
|
||||
$ ls
|
||||
|
@ -217,6 +222,15 @@ $ prefetch-npm-deps package-lock.json
|
|||
sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
```
|
||||
|
||||
#### fetchNpmDeps {#javascript-buildNpmPackage-fetchNpmDeps}
|
||||
|
||||
`fetchNpmDeps` is a Nix function that requires the following mandatory arguments:
|
||||
|
||||
- `src`: A directory / tarball with `package-lock.json` file
|
||||
- `hash`: The output hash of the node dependencies defined in `package-lock.json`.
|
||||
|
||||
It returns a derivation with all `package-lock.json` dependencies downloaded into `$out/`, usable as an npm cache.
|
||||
|
||||
### corepack {#javascript-corepack}
|
||||
|
||||
This package puts the corepack wrappers for pnpm and yarn in your PATH, and they will honor the `packageManager` setting in the `package.json`.
|
||||
|
|
|
@ -200,7 +200,7 @@ Internally, the helper operates in three stages:
|
|||
composer repository on the filesystem containing dependencies specified in
|
||||
`composer.json`. This process uses the function
|
||||
`php.mkComposerRepository` which in turn uses the
|
||||
`php.composerHooks.composerRepositoryHook` hook. Internaly this function uses
|
||||
`php.composerHooks.composerRepositoryHook` hook. Internally this function uses
|
||||
a custom
|
||||
[Composer plugin](https://github.com/nix-community/composer-local-repo-plugin) to
|
||||
generate the repository.
|
||||
|
|
|
@ -817,7 +817,7 @@ $ cargo test
|
|||
## Using community maintained Rust toolchains {#using-community-maintained-rust-toolchains}
|
||||
|
||||
::: {.note}
|
||||
Note: The following projects cannot be used within nixpkgs since [IFD](#ssec-import-from-derivation) is disallowed.
|
||||
The following projects cannot be used within Nixpkgs since [Import From Derivation](https://nixos.org/manual/nix/unstable/language/import-from-derivation) (IFD) is disallowed in Nixpkgs.
|
||||
To package things that require Rust nightly, `RUSTC_BOOTSTRAP = true;` can sometimes be used as a hack.
|
||||
:::
|
||||
|
||||
|
|
|
@ -134,7 +134,7 @@ If one of your favourite plugins isn't packaged, you can package it yourself:
|
|||
{ config, pkgs, ... }:
|
||||
|
||||
let
|
||||
easygrep = pkgs.vimUtils.buildVimPluginFrom2Nix {
|
||||
easygrep = pkgs.vimUtils.buildVimPlugin {
|
||||
name = "vim-easygrep";
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "dkprice";
|
||||
|
@ -212,9 +212,9 @@ Note: this is not possible anymore for Neovim.
|
|||
|
||||
## Adding new plugins to nixpkgs {#adding-new-plugins-to-nixpkgs}
|
||||
|
||||
Nix expressions for Vim plugins are stored in [pkgs/applications/editors/vim/plugins](https://github.com/NixOS/nixpkgs/tree/master/pkgs/applications/editors/vim/plugins). For the vast majority of plugins, Nix expressions are automatically generated by running [`./update.py`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/update.py). This creates a [generated.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/generated.nix) file based on the plugins listed in [vim-plugin-names](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/vim-plugin-names).
|
||||
Nix expressions for Vim plugins are stored in [pkgs/applications/editors/vim/plugins](https://github.com/NixOS/nixpkgs/tree/master/pkgs/applications/editors/vim/plugins). For the vast majority of plugins, Nix expressions are automatically generated by running [`nix-shell -p vimPluginsUpdater --run vim-plugins-updater`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/updater.nix). This creates a [generated.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/generated.nix) file based on the plugins listed in [vim-plugin-names](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/vim-plugin-names).
|
||||
|
||||
After running `./update.py`, if nvim-treesitter received an update, also run [`nvim-treesitter/update.py`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/update.py) to update the tree sitter grammars for `nvim-treesitter`.
|
||||
After running the updater, if nvim-treesitter received an update, also run [`nvim-treesitter/update.py`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/update.py) to update the tree sitter grammars for `nvim-treesitter`.
|
||||
|
||||
Some plugins require overrides in order to function properly. Overrides are placed in [overrides.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/overrides.nix). Overrides are most often required when a plugin requires some dependencies, or extra steps are required during the build process. For example `deoplete-fish` requires both `deoplete-nvim` and `vim-fish`, and so the following override was added:
|
||||
|
||||
|
@ -241,7 +241,8 @@ GITHUB_API_TOKEN=my_token ./pkgs/applications/editors/vim/plugins/update.py
|
|||
Alternatively, set the number of processes to a lower count to avoid rate-limiting.
|
||||
|
||||
```sh
|
||||
./pkgs/applications/editors/vim/plugins/update.py --proc 1
|
||||
|
||||
nix-shell -p vimPluginsUpdater --run 'vim-plugins-updater --proc 1'
|
||||
```
|
||||
|
||||
## How to maintain an out-of-tree overlay of vim plugins ? {#vim-out-of-tree-overlays}
|
||||
|
@ -250,7 +251,7 @@ You can use the updater script to generate basic packages out of a custom vim
|
|||
plugin list:
|
||||
|
||||
```
|
||||
pkgs/applications/editors/vim/plugins/update.py -i vim-plugin-names -o generated.nix --no-commit
|
||||
nix-shell -p vimPluginsUpdater --run vim-plugins-updater -i vim-plugin-names -o generated.nix --no-commit
|
||||
```
|
||||
|
||||
with the contents of `vim-plugin-names` being for example:
|
||||
|
@ -264,7 +265,7 @@ You can then reference the generated vim plugins via:
|
|||
|
||||
```nix
|
||||
myVimPlugins = pkgs.vimPlugins.extend (
|
||||
(pkgs.callPackage generated.nix {})
|
||||
(pkgs.callPackage ./generated.nix {})
|
||||
);
|
||||
```
|
||||
|
||||
|
|
|
@ -991,13 +991,56 @@ Hook executed at the end of the fixup phase.
|
|||
|
||||
If set to `true`, the standard environment will enable debug information in C/C++ builds. After installation, the debug information will be separated from the executables and stored in the output named `debug`. (This output is enabled automatically; you don’t need to set the `outputs` attribute explicitly.) To be precise, the debug information is stored in `debug/lib/debug/.build-id/XX/YYYY…`, where \<XXYYYY…\> is the \<build ID\> of the binary — a SHA-1 hash of the contents of the binary. Debuggers like GDB use the build ID to look up the separated debug information.
|
||||
|
||||
For example, with GDB, you can add
|
||||
:::{.example #ex-gdb-debug-symbols-socat}
|
||||
|
||||
```
|
||||
set debug-file-directory ~/.nix-profile/lib/debug
|
||||
# Enable debug symbols for use with GDB
|
||||
|
||||
To make GDB find debug information for the `socat` package and its dependencies, you can use the following `shell.nix`:
|
||||
|
||||
```nix
|
||||
let
|
||||
pkgs = import ./. {
|
||||
config = {};
|
||||
overlays = [
|
||||
(final: prev: {
|
||||
ncurses = prev.ncurses.overrideAttrs { separateDebugInfo = true; };
|
||||
readline = prev.readline.overrideAttrs { separateDebugInfo = true; };
|
||||
})
|
||||
];
|
||||
};
|
||||
|
||||
myDebugInfoDirs = pkgs.symlinkJoin {
|
||||
name = "myDebugInfoDirs";
|
||||
paths = with pkgs; [
|
||||
glibc.debug
|
||||
ncurses.debug
|
||||
openssl.debug
|
||||
readline.debug
|
||||
];
|
||||
};
|
||||
in
|
||||
pkgs.mkShell {
|
||||
|
||||
NIX_DEBUG_INFO_DIRS = "${pkgs.lib.getLib myDebugInfoDirs}/lib/debug";
|
||||
|
||||
packages = [
|
||||
pkgs.gdb
|
||||
pkgs.socat
|
||||
];
|
||||
|
||||
shellHook = ''
|
||||
${pkgs.lib.getBin pkgs.gdb}/bin/gdb ${pkgs.lib.getBin pkgs.socat}/bin/socat
|
||||
'';
|
||||
}
|
||||
```
|
||||
|
||||
to `~/.gdbinit`. GDB will then be able to find debug information installed via `nix-env -i`.
|
||||
This setup works as follows:
|
||||
- Add [`overlays`](#chap-overlays) to the package set, since debug symbols are disabled for `ncurses` and `readline` by default.
|
||||
- Create a derivation to combine all required debug symbols under one path with [`symlinkJoin`](#trivial-builder-symlinkJoin).
|
||||
- Set the environment variable `NIX_DEBUG_INFO_DIRS` in the shell. Nixpkgs patches `gdb` to use it for looking up debug symbols.
|
||||
- Run `gdb` on the `socat` binary on shell startup in the [`shellHook`](#sec-pkgs-mkShell). Here we use [`lib.getBin`](#function-library-lib.attrsets.getBin) to ensure that the correct derivation output is selected rather than the default one.
|
||||
|
||||
:::
|
||||
|
||||
### The installCheck phase {#ssec-installCheck-phase}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# Using Nixpkgs {#part-using}
|
||||
|
||||
```{=include=} chapters
|
||||
using/platform-support.chapter.md
|
||||
using/configuration.chapter.md
|
||||
using/overlays.chapter.md
|
||||
using/overrides.chapter.md
|
||||
|
|
18
doc/using/platform-support.chapter.md
Normal file
18
doc/using/platform-support.chapter.md
Normal file
|
@ -0,0 +1,18 @@
|
|||
# Platform Support {#chap-platform-support}
|
||||
|
||||
Packages receive varying degrees of support, both in terms of maintainer attention and available computation resources for continuous integration (CI).
|
||||
|
||||
Below is the list of the best supported platforms:
|
||||
|
||||
- `x86_64-linux`: Highest level of support.
|
||||
- `aarch64-linux`: Well supported, with most packages building successfully in CI.
|
||||
- `aarch64-darwin`: Receives better support than `x86_64-darwin`.
|
||||
- `x86_64-darwin`: Receives some support.
|
||||
|
||||
There are many other platforms with varying levels of support.
|
||||
The provisional platform list in [Appendix A] of [RFC046], while not up to date, can be used as guidance.
|
||||
|
||||
A more formal definition of the platform support tiers is provided in [RFC046], but has not been fully implemented yet.
|
||||
|
||||
[RFC046]: https://github.com/NixOS/rfcs/blob/master/rfcs/0046-platform-support-tiers.md
|
||||
[Appendix A]: https://github.com/NixOS/rfcs/blob/master/rfcs/0046-platform-support-tiers.md#appendix-a-non-normative-description-of-platforms-in-november-2019
|
|
@ -338,7 +338,7 @@ rec {
|
|||
);
|
||||
|
||||
/*
|
||||
Like builtins.foldl' but for attribute sets.
|
||||
Like [`lib.lists.foldl'`](#function-library-lib.lists.foldl-prime) but for attribute sets.
|
||||
Iterates over every name-value pair in the given attribute set.
|
||||
The result of the callback function is often called `acc` for accumulator. It is passed between callbacks from left to right and the final `acc` is the return value of `foldlAttrs`.
|
||||
|
||||
|
@ -372,9 +372,9 @@ rec {
|
|||
123
|
||||
|
||||
foldlAttrs
|
||||
(_: _: v: v)
|
||||
(throw "initial accumulator not needed")
|
||||
{ z = 3; a = 2; };
|
||||
(acc: _: _: acc)
|
||||
3
|
||||
{ z = throw "value not needed"; a = throw "value not needed"; };
|
||||
->
|
||||
3
|
||||
|
||||
|
@ -542,6 +542,36 @@ rec {
|
|||
attrs:
|
||||
map (name: f name attrs.${name}) (attrNames attrs);
|
||||
|
||||
/*
|
||||
Deconstruct an attrset to a list of name-value pairs as expected by [`builtins.listToAttrs`](https://nixos.org/manual/nix/stable/language/builtins.html#builtins-listToAttrs).
|
||||
Each element of the resulting list is an attribute set with these attributes:
|
||||
- `name` (string): The name of the attribute
|
||||
- `value` (any): The value of the attribute
|
||||
|
||||
The following is always true:
|
||||
```nix
|
||||
builtins.listToAttrs (attrsToList attrs) == attrs
|
||||
```
|
||||
|
||||
:::{.warning}
|
||||
The opposite is not always true. In general expect that
|
||||
```nix
|
||||
attrsToList (builtins.listToAttrs list) != list
|
||||
```
|
||||
|
||||
This is because the `listToAttrs` removes duplicate names and doesn't preserve the order of the list.
|
||||
:::
|
||||
|
||||
Example:
|
||||
attrsToList { foo = 1; bar = "asdf"; }
|
||||
=> [ { name = "bar"; value = "asdf"; } { name = "foo"; value = 1; } ]
|
||||
|
||||
Type:
|
||||
attrsToList :: AttrSet -> [ { name :: String; value :: Any; } ]
|
||||
|
||||
*/
|
||||
attrsToList = mapAttrsToList nameValuePair;
|
||||
|
||||
|
||||
/* Like `mapAttrs`, except that it recursively applies itself to
|
||||
the *leaf* attributes of a potentially-nested attribute set:
|
||||
|
|
|
@ -69,8 +69,8 @@ rec {
|
|||
"<pkg>.overrideDerivation" to learn about `overrideDerivation` and caveats
|
||||
related to its use.
|
||||
*/
|
||||
makeOverridable = f: origArgs:
|
||||
let
|
||||
makeOverridable = f: lib.setFunctionArgs
|
||||
(origArgs: let
|
||||
result = f origArgs;
|
||||
|
||||
# Creates a functor with the same arguments as f
|
||||
|
@ -95,7 +95,8 @@ rec {
|
|||
lib.setFunctionArgs result (lib.functionArgs result) // {
|
||||
override = overrideArgs;
|
||||
}
|
||||
else result;
|
||||
else result)
|
||||
(lib.functionArgs f);
|
||||
|
||||
|
||||
/* Call the package function in the file `fn` with the required
|
||||
|
|
|
@ -81,8 +81,8 @@ let
|
|||
inherit (self.attrsets) attrByPath hasAttrByPath setAttrByPath
|
||||
getAttrFromPath attrVals attrValues getAttrs catAttrs filterAttrs
|
||||
filterAttrsRecursive foldlAttrs foldAttrs collect nameValuePair mapAttrs
|
||||
mapAttrs' mapAttrsToList concatMapAttrs mapAttrsRecursive mapAttrsRecursiveCond
|
||||
genAttrs isDerivation toDerivation optionalAttrs
|
||||
mapAttrs' mapAttrsToList attrsToList concatMapAttrs mapAttrsRecursive
|
||||
mapAttrsRecursiveCond genAttrs isDerivation toDerivation optionalAttrs
|
||||
zipAttrsWithNames zipAttrsWith zipAttrs recursiveUpdateUntil
|
||||
recursiveUpdate matchAttrs overrideExisting showAttrPath getOutput getBin
|
||||
getLib getDev getMan chooseDevOutputs zipWithNames zip
|
||||
|
@ -106,6 +106,7 @@ let
|
|||
upperChars toLower toUpper addContextFrom splitString
|
||||
removePrefix removeSuffix versionOlder versionAtLeast
|
||||
getName getVersion
|
||||
cmakeOptionType cmakeBool cmakeFeature
|
||||
mesonOption mesonBool mesonEnable
|
||||
nameFromURL enableFeature enableFeatureAs withFeature
|
||||
withFeatureAs fixedWidthString fixedWidthNumber
|
||||
|
|
|
@ -1,5 +1,10 @@
|
|||
# File set library
|
||||
|
||||
This is the internal contributor documentation.
|
||||
The user documentation is [in the Nixpkgs manual](https://nixos.org/manual/nixpkgs/unstable/#sec-fileset).
|
||||
|
||||
## Goals
|
||||
|
||||
The main goal of the file set library is to be able to select local files that should be added to the Nix store.
|
||||
It should have the following properties:
|
||||
- Easy:
|
||||
|
@ -41,12 +46,20 @@ An attribute set with these values:
|
|||
- `_type` (constant string `"fileset"`):
|
||||
Tag to indicate this value is a file set.
|
||||
|
||||
- `_internalVersion` (constant `2`, the current version):
|
||||
- `_internalVersion` (constant `3`, the current version):
|
||||
Version of the representation.
|
||||
|
||||
- `_internalIsEmptyWithoutBase` (bool):
|
||||
Whether this file set is the empty file set without a base path.
|
||||
If `true`, `_internalBase*` and `_internalTree` are not set.
|
||||
This is the only way to represent an empty file set without needing a base path.
|
||||
|
||||
Such a value can be used as the identity element for `union` and the return value of `unions []` and co.
|
||||
|
||||
- `_internalBase` (path):
|
||||
Any files outside of this path cannot influence the set of files.
|
||||
This is always a directory.
|
||||
This is always a directory and should be as long as possible.
|
||||
This is used by `lib.fileset.toSource` to check that all files are under the `root` argument
|
||||
|
||||
- `_internalBaseRoot` (path):
|
||||
The filesystem root of `_internalBase`, same as `(lib.path.splitRoot _internalBase).root`.
|
||||
|
@ -111,9 +124,57 @@ Arguments:
|
|||
- (+) This can be removed later, if we discover it's too restrictive
|
||||
- (-) It leads to errors when a sensible result could sometimes be returned, such as in the above example.
|
||||
|
||||
### Empty file set without a base
|
||||
|
||||
There is a special representation for an empty file set without a base path.
|
||||
This is used for return values that should be empty but when there's no base path that would makes sense.
|
||||
|
||||
Arguments:
|
||||
- Alternative: This could also be represented using `_internalBase = /.` and `_internalTree = null`.
|
||||
- (+) Removes the need for a special representation.
|
||||
- (-) Due to [influence tracking](#influence-tracking),
|
||||
`union empty ./.` would have `/.` as the base path,
|
||||
which would then prevent `toSource { root = ./.; fileset = union empty ./.; }` from working,
|
||||
which is not as one would expect.
|
||||
- (-) With the assumption that there can be multiple filesystem roots (as established with the [path library](../path/README.md)),
|
||||
this would have to cause an error with `union empty pathWithAnotherFilesystemRoot`,
|
||||
which is not as one would expect.
|
||||
- Alternative: Do not have such a value and error when it would be needed as a return value
|
||||
- (+) Removes the need for a special representation.
|
||||
- (-) Leaves us with no identity element for `union` and no reasonable return value for `unions []`.
|
||||
From a set theory perspective, which has a well-known notion of empty sets, this is unintuitive.
|
||||
|
||||
### No intersection for lists
|
||||
|
||||
While there is `intersection a b`, there is no function `intersections [ a b c ]`.
|
||||
|
||||
Arguments:
|
||||
- (+) There is no known use case for such a function, it can be added later if a use case arises
|
||||
- (+) There is no suitable return value for `intersections [ ]`, see also "Nullary intersections" [here](https://en.wikipedia.org/w/index.php?title=List_of_set_identities_and_relations&oldid=1177174035#Definitions)
|
||||
- (-) Could throw an error for that case
|
||||
- (-) Create a special value to represent "all the files" and return that
|
||||
- (+) Such a value could then not be used with `fileFilter` unless the internal representation is changed considerably
|
||||
- (-) Could return the empty file set
|
||||
- (+) This would be wrong in set theory
|
||||
- (-) Inconsistent with `union` and `unions`
|
||||
|
||||
### Intersection base path
|
||||
|
||||
The base path of the result of an `intersection` is the longest base path of the arguments.
|
||||
E.g. the base path of `intersection ./foo ./foo/bar` is `./foo/bar`.
|
||||
Meanwhile `intersection ./foo ./bar` returns the empty file set without a base path.
|
||||
|
||||
Arguments:
|
||||
- Alternative: Use the common prefix of all base paths as the resulting base path
|
||||
- (-) This is unnecessarily strict, because the purpose of the base path is to track the directory under which files _could_ be in the file set. It should be as long as possible.
|
||||
All files contained in `intersection ./foo ./foo/bar` will be under `./foo/bar` (never just under `./foo`), and `intersection ./foo ./bar` will never contain any files (never under `./.`).
|
||||
This would lead to `toSource` having to unexpectedly throw errors for cases such as `toSource { root = ./foo; fileset = intersect ./foo base; }`, where `base` may be `./bar` or `./.`.
|
||||
- (-) There is no benefit to the user, since base path is not directly exposed in the interface
|
||||
|
||||
### Empty directories
|
||||
|
||||
File sets can only represent a _set_ of local files, directories on their own are not representable.
|
||||
File sets can only represent a _set_ of local files.
|
||||
Directories on their own are not representable.
|
||||
|
||||
Arguments:
|
||||
- (+) There does not seem to be a sensible set of combinators when directories can be represented on their own.
|
||||
|
@ -129,7 +190,7 @@ Arguments:
|
|||
|
||||
- `./.` represents all files in `./.` _and_ the directory itself, but not its subdirectories, meaning that at least `./.` will be preserved even if it's empty.
|
||||
|
||||
In that case, `intersect ./. ./foo` should only include files and no directories themselves, since `./.` includes only `./.` as a directory, and same for `./foo`, so there's no overlap in directories.
|
||||
In that case, `intersection ./. ./foo` should only include files and no directories themselves, since `./.` includes only `./.` as a directory, and same for `./foo`, so there's no overlap in directories.
|
||||
But intuitively this operation should result in the same as `./foo` – everything else is just confusing.
|
||||
- (+) This matches how Git only supports files, so developers should already be used to it.
|
||||
- (-) Empty directories (even if they contain nested directories) are neither representable nor preserved when coercing from paths.
|
||||
|
@ -144,7 +205,7 @@ File sets do not support Nix store paths in strings such as `"/nix/store/...-sou
|
|||
|
||||
Arguments:
|
||||
- (+) Such paths are usually produced by derivations, which means `toSource` would either:
|
||||
- Require IFD if `builtins.path` is used as the underlying primitive
|
||||
- Require [Import From Derivation](https://nixos.org/manual/nix/unstable/language/import-from-derivation) (IFD) if `builtins.path` is used as the underlying primitive
|
||||
- Require importing the entire `root` into the store such that derivations can be used to do the filtering
|
||||
- (+) The convenient path coercion like `union ./foo ./bar` wouldn't work for absolute paths, requiring more verbose alternate interfaces:
|
||||
- `let root = "/nix/store/...-source"; in union "${root}/foo" "${root}/bar"`
|
||||
|
@ -180,6 +241,5 @@ Here's a list of places in the library that need to be updated in the future:
|
|||
- > The file set library is currently somewhat limited but is being expanded to include more functions over time.
|
||||
|
||||
in [the manual](../../doc/functions/fileset.section.md)
|
||||
- Once a tracing function exists, `__noEval` in [internal.nix](./internal.nix) should mention it
|
||||
- If/Once a function to convert `lib.sources` values into file sets exists, the `_coerce` and `toSource` functions should be updated to mention that function in the error when such a value is passed
|
||||
- If/Once a function exists that can optionally include a path depending on whether it exists, the error message for the path not existing in `_coerce` should mention the new function
|
||||
|
|
|
@ -6,16 +6,20 @@ let
|
|||
_coerceMany
|
||||
_toSourceFilter
|
||||
_unionMany
|
||||
_printFileset
|
||||
_intersection
|
||||
;
|
||||
|
||||
inherit (builtins)
|
||||
isList
|
||||
isPath
|
||||
pathExists
|
||||
seq
|
||||
typeOf
|
||||
;
|
||||
|
||||
inherit (lib.lists)
|
||||
elemAt
|
||||
imap0
|
||||
;
|
||||
|
||||
|
@ -156,7 +160,7 @@ If a directory does not recursively contain any file, it is omitted from the sto
|
|||
lib.fileset.toSource: `root` is of type ${typeOf root}, but it should be a path instead.''
|
||||
# Currently all Nix paths have the same filesystem root, but this could change in the future.
|
||||
# See also ../path/README.md
|
||||
else if rootFilesystemRoot != filesetFilesystemRoot then
|
||||
else if ! fileset._internalIsEmptyWithoutBase && rootFilesystemRoot != filesetFilesystemRoot then
|
||||
throw ''
|
||||
lib.fileset.toSource: Filesystem roots are not the same for `fileset` and `root` ("${toString root}"):
|
||||
`root`: root "${toString rootFilesystemRoot}"
|
||||
|
@ -170,7 +174,7 @@ If a directory does not recursively contain any file, it is omitted from the sto
|
|||
lib.fileset.toSource: `root` (${toString root}) is a file, but it should be a directory instead. Potential solutions:
|
||||
- If you want to import the file into the store _without_ a containing directory, use string interpolation or `builtins.path` instead of this function.
|
||||
- If you want to import the file into the store _with_ a containing directory, set `root` to the containing directory, such as ${toString (dirOf root)}, and set `fileset` to the file path.''
|
||||
else if ! hasPrefix root fileset._internalBase then
|
||||
else if ! fileset._internalIsEmptyWithoutBase && ! hasPrefix root fileset._internalBase then
|
||||
throw ''
|
||||
lib.fileset.toSource: `fileset` could contain files in ${toString fileset._internalBase}, which is not under the `root` (${toString root}). Potential solutions:
|
||||
- Set `root` to ${toString fileset._internalBase} or any directory higher up. This changes the layout of the resulting store path.
|
||||
|
@ -258,15 +262,11 @@ If a directory does not recursively contain any file, it is omitted from the sto
|
|||
*/
|
||||
unions =
|
||||
# A list of file sets.
|
||||
# Must contain at least 1 element.
|
||||
# The elements can also be paths,
|
||||
# which get [implicitly coerced to file sets](#sec-fileset-path-coercion).
|
||||
filesets:
|
||||
if ! isList filesets then
|
||||
throw "lib.fileset.unions: Expected argument to be a list, but got a ${typeOf filesets}."
|
||||
else if filesets == [ ] then
|
||||
# TODO: This could be supported, but requires an extra internal representation for the empty file set, which would be special for not having a base path.
|
||||
throw "lib.fileset.unions: Expected argument to be a list with at least one element, but it contains no elements."
|
||||
else
|
||||
pipe filesets [
|
||||
# Annotate the elements with context, used by _coerceMany for better errors
|
||||
|
@ -278,4 +278,132 @@ If a directory does not recursively contain any file, it is omitted from the sto
|
|||
_unionMany
|
||||
];
|
||||
|
||||
/*
|
||||
The file set containing all files that are in both of two given file sets.
|
||||
See also [Intersection (set theory)](https://en.wikipedia.org/wiki/Intersection_(set_theory)).
|
||||
|
||||
The given file sets are evaluated as lazily as possible,
|
||||
with the first argument being evaluated first if needed.
|
||||
|
||||
Type:
|
||||
intersection :: FileSet -> FileSet -> FileSet
|
||||
|
||||
Example:
|
||||
# Limit the selected files to the ones in ./., so only ./src and ./Makefile
|
||||
intersection ./. (unions [ ../LICENSE ./src ./Makefile ])
|
||||
*/
|
||||
intersection =
|
||||
# The first file set.
|
||||
# This argument can also be a path,
|
||||
# which gets [implicitly coerced to a file set](#sec-fileset-path-coercion).
|
||||
fileset1:
|
||||
# The second file set.
|
||||
# This argument can also be a path,
|
||||
# which gets [implicitly coerced to a file set](#sec-fileset-path-coercion).
|
||||
fileset2:
|
||||
let
|
||||
filesets = _coerceMany "lib.fileset.intersection" [
|
||||
{
|
||||
context = "first argument";
|
||||
value = fileset1;
|
||||
}
|
||||
{
|
||||
context = "second argument";
|
||||
value = fileset2;
|
||||
}
|
||||
];
|
||||
in
|
||||
_intersection
|
||||
(elemAt filesets 0)
|
||||
(elemAt filesets 1);
|
||||
|
||||
/*
|
||||
Incrementally evaluate and trace a file set in a pretty way.
|
||||
This function is only intended for debugging purposes.
|
||||
The exact tracing format is unspecified and may change.
|
||||
|
||||
This function takes a final argument to return.
|
||||
In comparison, [`traceVal`](#function-library-lib.fileset.traceVal) returns
|
||||
the given file set argument.
|
||||
|
||||
This variant is useful for tracing file sets in the Nix repl.
|
||||
|
||||
Type:
|
||||
trace :: FileSet -> Any -> Any
|
||||
|
||||
Example:
|
||||
trace (unions [ ./Makefile ./src ./tests/run.sh ]) null
|
||||
=>
|
||||
trace: /home/user/src/myProject
|
||||
trace: - Makefile (regular)
|
||||
trace: - src (all files in directory)
|
||||
trace: - tests
|
||||
trace: - run.sh (regular)
|
||||
null
|
||||
*/
|
||||
trace =
|
||||
/*
|
||||
The file set to trace.
|
||||
|
||||
This argument can also be a path,
|
||||
which gets [implicitly coerced to a file set](#sec-fileset-path-coercion).
|
||||
*/
|
||||
fileset:
|
||||
let
|
||||
# "fileset" would be a better name, but that would clash with the argument name,
|
||||
# and we cannot change that because of https://github.com/nix-community/nixdoc/issues/76
|
||||
actualFileset = _coerce "lib.fileset.trace: argument" fileset;
|
||||
in
|
||||
seq
|
||||
(_printFileset actualFileset)
|
||||
(x: x);
|
||||
|
||||
/*
|
||||
Incrementally evaluate and trace a file set in a pretty way.
|
||||
This function is only intended for debugging purposes.
|
||||
The exact tracing format is unspecified and may change.
|
||||
|
||||
This function returns the given file set.
|
||||
In comparison, [`trace`](#function-library-lib.fileset.trace) takes another argument to return.
|
||||
|
||||
This variant is useful for tracing file sets passed as arguments to other functions.
|
||||
|
||||
Type:
|
||||
traceVal :: FileSet -> FileSet
|
||||
|
||||
Example:
|
||||
toSource {
|
||||
root = ./.;
|
||||
fileset = traceVal (unions [
|
||||
./Makefile
|
||||
./src
|
||||
./tests/run.sh
|
||||
]);
|
||||
}
|
||||
=>
|
||||
trace: /home/user/src/myProject
|
||||
trace: - Makefile (regular)
|
||||
trace: - src (all files in directory)
|
||||
trace: - tests
|
||||
trace: - run.sh (regular)
|
||||
"/nix/store/...-source"
|
||||
*/
|
||||
traceVal =
|
||||
/*
|
||||
The file set to trace and return.
|
||||
|
||||
This argument can also be a path,
|
||||
which gets [implicitly coerced to a file set](#sec-fileset-path-coercion).
|
||||
*/
|
||||
fileset:
|
||||
let
|
||||
# "fileset" would be a better name, but that would clash with the argument name,
|
||||
# and we cannot change that because of https://github.com/nix-community/nixdoc/issues/76
|
||||
actualFileset = _coerce "lib.fileset.traceVal: argument" fileset;
|
||||
in
|
||||
seq
|
||||
(_printFileset actualFileset)
|
||||
# We could also return the original fileset argument here,
|
||||
# but that would then duplicate work for consumers of the fileset, because then they have to coerce it again
|
||||
actualFileset;
|
||||
}
|
||||
|
|
|
@ -7,11 +7,14 @@ let
|
|||
isString
|
||||
pathExists
|
||||
readDir
|
||||
typeOf
|
||||
seq
|
||||
split
|
||||
trace
|
||||
typeOf
|
||||
;
|
||||
|
||||
inherit (lib.attrsets)
|
||||
attrNames
|
||||
attrValues
|
||||
mapAttrs
|
||||
setAttrByPath
|
||||
|
@ -28,6 +31,7 @@ let
|
|||
drop
|
||||
elemAt
|
||||
filter
|
||||
findFirst
|
||||
findFirstIndex
|
||||
foldl'
|
||||
head
|
||||
|
@ -64,7 +68,7 @@ rec {
|
|||
# - Increment this version
|
||||
# - Add an additional migration function below
|
||||
# - Update the description of the internal representation in ./README.md
|
||||
_currentVersion = 2;
|
||||
_currentVersion = 3;
|
||||
|
||||
# Migrations between versions. The 0th element converts from v0 to v1, and so on
|
||||
migrations = [
|
||||
|
@ -89,8 +93,38 @@ rec {
|
|||
_internalVersion = 2;
|
||||
}
|
||||
)
|
||||
|
||||
# Convert v2 into v3: filesetTree's now have a representation for an empty file set without a base path
|
||||
(
|
||||
filesetV2:
|
||||
filesetV2 // {
|
||||
# All v1 file sets are not the new empty file set
|
||||
_internalIsEmptyWithoutBase = false;
|
||||
_internalVersion = 3;
|
||||
}
|
||||
)
|
||||
];
|
||||
|
||||
_noEvalMessage = ''
|
||||
lib.fileset: Directly evaluating a file set is not supported.
|
||||
To turn it into a usable source, use `lib.fileset.toSource`.
|
||||
To pretty-print the contents, use `lib.fileset.trace` or `lib.fileset.traceVal`.'';
|
||||
|
||||
# The empty file set without a base path
|
||||
_emptyWithoutBase = {
|
||||
_type = "fileset";
|
||||
|
||||
_internalVersion = _currentVersion;
|
||||
|
||||
# The one and only!
|
||||
_internalIsEmptyWithoutBase = true;
|
||||
|
||||
# Due to alphabetical ordering, this is evaluated last,
|
||||
# which makes the nix repl output nicer than if it would be ordered first.
|
||||
# It also allows evaluating it strictly up to this error, which could be useful
|
||||
_noEval = throw _noEvalMessage;
|
||||
};
|
||||
|
||||
# Create a fileset, see ./README.md#fileset
|
||||
# Type: path -> filesetTree -> fileset
|
||||
_create = base: tree:
|
||||
|
@ -103,14 +137,17 @@ rec {
|
|||
_type = "fileset";
|
||||
|
||||
_internalVersion = _currentVersion;
|
||||
|
||||
_internalIsEmptyWithoutBase = false;
|
||||
_internalBase = base;
|
||||
_internalBaseRoot = parts.root;
|
||||
_internalBaseComponents = components parts.subpath;
|
||||
_internalTree = tree;
|
||||
|
||||
# Double __ to make it be evaluated and ordered first
|
||||
__noEval = throw ''
|
||||
lib.fileset: Directly evaluating a file set is not supported. Use `lib.fileset.toSource` to turn it into a usable source instead.'';
|
||||
# Due to alphabetical ordering, this is evaluated last,
|
||||
# which makes the nix repl output nicer than if it would be ordered first.
|
||||
# It also allows evaluating it strictly up to this error, which could be useful
|
||||
_noEval = throw _noEvalMessage;
|
||||
};
|
||||
|
||||
# Coerce a value to a fileset, erroring when the value cannot be coerced.
|
||||
|
@ -155,14 +192,20 @@ rec {
|
|||
_coerce "${functionContext}: ${context}" value
|
||||
) list;
|
||||
|
||||
firstBaseRoot = (head filesets)._internalBaseRoot;
|
||||
# Find the first value with a base, there may be none!
|
||||
firstWithBase = findFirst (fileset: ! fileset._internalIsEmptyWithoutBase) null filesets;
|
||||
# This value is only accessed if first != null
|
||||
firstBaseRoot = firstWithBase._internalBaseRoot;
|
||||
|
||||
# Finds the first element with a filesystem root different than the first element, if any
|
||||
differentIndex = findFirstIndex (fileset:
|
||||
firstBaseRoot != fileset._internalBaseRoot
|
||||
# The empty value without a base doesn't have a base path
|
||||
! fileset._internalIsEmptyWithoutBase
|
||||
&& firstBaseRoot != fileset._internalBaseRoot
|
||||
) null filesets;
|
||||
in
|
||||
if differentIndex != null then
|
||||
# Only evaluates `differentIndex` if there are any elements with a base
|
||||
if firstWithBase != null && differentIndex != null then
|
||||
throw ''
|
||||
${functionContext}: Filesystem roots are not the same:
|
||||
${(head list).context}: root "${toString firstBaseRoot}"
|
||||
|
@ -203,22 +246,22 @@ rec {
|
|||
// value;
|
||||
|
||||
/*
|
||||
Simplify a filesetTree recursively:
|
||||
- Replace all directories that have no files with `null`
|
||||
A normalisation of a filesetTree suitable filtering with `builtins.path`:
|
||||
- Replace all directories that have no files with `null`.
|
||||
This removes directories that would be empty
|
||||
- Replace all directories with all files with `"directory"`
|
||||
- Replace all directories with all files with `"directory"`.
|
||||
This speeds up the source filter function
|
||||
|
||||
Note that this function is strict, it evaluates the entire tree
|
||||
|
||||
Type: Path -> filesetTree -> filesetTree
|
||||
*/
|
||||
_simplifyTree = path: tree:
|
||||
_normaliseTreeFilter = path: tree:
|
||||
if tree == "directory" || isAttrs tree then
|
||||
let
|
||||
entries = _directoryEntries path tree;
|
||||
simpleSubtrees = mapAttrs (name: _simplifyTree (path + "/${name}")) entries;
|
||||
subtreeValues = attrValues simpleSubtrees;
|
||||
normalisedSubtrees = mapAttrs (name: _normaliseTreeFilter (path + "/${name}")) entries;
|
||||
subtreeValues = attrValues normalisedSubtrees;
|
||||
in
|
||||
# This triggers either when all files in a directory are filtered out
|
||||
# Or when the directory doesn't contain any files at all
|
||||
|
@ -228,10 +271,112 @@ rec {
|
|||
else if all isString subtreeValues then
|
||||
"directory"
|
||||
else
|
||||
simpleSubtrees
|
||||
normalisedSubtrees
|
||||
else
|
||||
tree;
|
||||
|
||||
/*
|
||||
A minimal normalisation of a filesetTree, intended for pretty-printing:
|
||||
- If all children of a path are recursively included or empty directories, the path itself is also recursively included
|
||||
- If all children of a path are fully excluded or empty directories, the path itself is an empty directory
|
||||
- Other empty directories are represented with the special "emptyDir" string
|
||||
While these could be replaced with `null`, that would take another mapAttrs
|
||||
|
||||
Note that this function is partially lazy.
|
||||
|
||||
Type: Path -> filesetTree -> filesetTree (with "emptyDir"'s)
|
||||
*/
|
||||
_normaliseTreeMinimal = path: tree:
|
||||
if tree == "directory" || isAttrs tree then
|
||||
let
|
||||
entries = _directoryEntries path tree;
|
||||
normalisedSubtrees = mapAttrs (name: _normaliseTreeMinimal (path + "/${name}")) entries;
|
||||
subtreeValues = attrValues normalisedSubtrees;
|
||||
in
|
||||
# If there are no entries, or all entries are empty directories, return "emptyDir".
|
||||
# After this branch we know that there's at least one file
|
||||
if all (value: value == "emptyDir") subtreeValues then
|
||||
"emptyDir"
|
||||
|
||||
# If all subtrees are fully included or empty directories
|
||||
# (both of which are coincidentally represented as strings), return "directory".
|
||||
# This takes advantage of the fact that empty directories can be represented as included directories.
|
||||
# Note that the tree == "directory" check allows avoiding recursion
|
||||
else if tree == "directory" || all (value: isString value) subtreeValues then
|
||||
"directory"
|
||||
|
||||
# If all subtrees are fully excluded or empty directories, return null.
|
||||
# This takes advantage of the fact that empty directories can be represented as excluded directories
|
||||
else if all (value: isNull value || value == "emptyDir") subtreeValues then
|
||||
null
|
||||
|
||||
# Mix of included and excluded entries
|
||||
else
|
||||
normalisedSubtrees
|
||||
else
|
||||
tree;
|
||||
|
||||
# Trace a filesetTree in a pretty way when the resulting value is evaluated.
|
||||
# This can handle both normal filesetTree's, and ones returned from _normaliseTreeMinimal
|
||||
# Type: Path -> filesetTree (with "emptyDir"'s) -> Null
|
||||
_printMinimalTree = base: tree:
|
||||
let
|
||||
treeSuffix = tree:
|
||||
if isAttrs tree then
|
||||
""
|
||||
else if tree == "directory" then
|
||||
" (all files in directory)"
|
||||
else
|
||||
# This does "leak" the file type strings of the internal representation,
|
||||
# but this is the main reason these file type strings even are in the representation!
|
||||
# TODO: Consider removing that information from the internal representation for performance.
|
||||
# The file types can still be printed by querying them only during tracing
|
||||
" (${tree})";
|
||||
|
||||
# Only for attribute set trees
|
||||
traceTreeAttrs = prevLine: indent: tree:
|
||||
foldl' (prevLine: name:
|
||||
let
|
||||
subtree = tree.${name};
|
||||
|
||||
# Evaluating this prints the line for this subtree
|
||||
thisLine =
|
||||
trace "${indent}- ${name}${treeSuffix subtree}" prevLine;
|
||||
in
|
||||
if subtree == null || subtree == "emptyDir" then
|
||||
# Don't print anything at all if this subtree is empty
|
||||
prevLine
|
||||
else if isAttrs subtree then
|
||||
# A directory with explicit entries
|
||||
# Do print this node, but also recurse
|
||||
traceTreeAttrs thisLine "${indent} " subtree
|
||||
else
|
||||
# Either a file, or a recursively included directory
|
||||
# Do print this node but no further recursion needed
|
||||
thisLine
|
||||
) prevLine (attrNames tree);
|
||||
|
||||
# Evaluating this will print the first line
|
||||
firstLine =
|
||||
if tree == null || tree == "emptyDir" then
|
||||
trace "(empty)" null
|
||||
else
|
||||
trace "${toString base}${treeSuffix tree}" null;
|
||||
in
|
||||
if isAttrs tree then
|
||||
traceTreeAttrs firstLine "" tree
|
||||
else
|
||||
firstLine;
|
||||
|
||||
# Pretty-print a file set in a pretty way when the resulting value is evaluated
|
||||
# Type: fileset -> Null
|
||||
_printFileset = fileset:
|
||||
if fileset._internalIsEmptyWithoutBase then
|
||||
trace "(empty)" null
|
||||
else
|
||||
_printMinimalTree fileset._internalBase
|
||||
(_normaliseTreeMinimal fileset._internalBase fileset._internalTree);
|
||||
|
||||
# Turn a fileset into a source filter function suitable for `builtins.path`
|
||||
# Only directories recursively containing at least one files are recursed into
|
||||
# Type: Path -> fileset -> (String -> String -> Bool)
|
||||
|
@ -239,7 +384,7 @@ rec {
|
|||
let
|
||||
# Simplify the tree, necessary to make sure all empty directories are null
|
||||
# which has the effect that they aren't included in the result
|
||||
tree = _simplifyTree fileset._internalBase fileset._internalTree;
|
||||
tree = _normaliseTreeFilter fileset._internalBase fileset._internalTree;
|
||||
|
||||
# The base path as a string with a single trailing slash
|
||||
baseString =
|
||||
|
@ -311,17 +456,59 @@ rec {
|
|||
# Special case because the code below assumes that the _internalBase is always included in the result
|
||||
# which shouldn't be done when we have no files at all in the base
|
||||
# This also forces the tree before returning the filter, leads to earlier error messages
|
||||
if tree == null then
|
||||
if fileset._internalIsEmptyWithoutBase || tree == null then
|
||||
empty
|
||||
else
|
||||
nonEmpty;
|
||||
|
||||
# Transforms the filesetTree of a file set to a shorter base path, e.g.
|
||||
# _shortenTreeBase [ "foo" ] (_create /foo/bar null)
|
||||
# => { bar = null; }
|
||||
_shortenTreeBase = targetBaseComponents: fileset:
|
||||
let
|
||||
recurse = index:
|
||||
# If we haven't reached the required depth yet
|
||||
if index < length fileset._internalBaseComponents then
|
||||
# Create an attribute set and recurse as the value, this can be lazily evaluated this way
|
||||
{ ${elemAt fileset._internalBaseComponents index} = recurse (index + 1); }
|
||||
else
|
||||
# Otherwise we reached the appropriate depth, here's the original tree
|
||||
fileset._internalTree;
|
||||
in
|
||||
recurse (length targetBaseComponents);
|
||||
|
||||
# Transforms the filesetTree of a file set to a longer base path, e.g.
|
||||
# _lengthenTreeBase [ "foo" "bar" ] (_create /foo { bar.baz = "regular"; })
|
||||
# => { baz = "regular"; }
|
||||
_lengthenTreeBase = targetBaseComponents: fileset:
|
||||
let
|
||||
recurse = index: tree:
|
||||
# If the filesetTree is an attribute set and we haven't reached the required depth yet
|
||||
if isAttrs tree && index < length targetBaseComponents then
|
||||
# Recurse with the tree under the right component (which might not exist)
|
||||
recurse (index + 1) (tree.${elemAt targetBaseComponents index} or null)
|
||||
else
|
||||
# For all values here we can just return the tree itself:
|
||||
# tree == null -> the result is also null, everything is excluded
|
||||
# tree == "directory" -> the result is also "directory",
|
||||
# because the base path is always a directory and everything is included
|
||||
# isAttrs tree -> the result is `tree`
|
||||
# because we don't need to recurse any more since `index == length longestBaseComponents`
|
||||
tree;
|
||||
in
|
||||
recurse (length fileset._internalBaseComponents) fileset._internalTree;
|
||||
|
||||
# Computes the union of a list of filesets.
|
||||
# The filesets must already be coerced and validated to be in the same filesystem root
|
||||
# Type: [ Fileset ] -> Fileset
|
||||
_unionMany = filesets:
|
||||
let
|
||||
first = head filesets;
|
||||
# All filesets that have a base, aka not the ones that are the empty value without a base
|
||||
filesetsWithBase = filter (fileset: ! fileset._internalIsEmptyWithoutBase) filesets;
|
||||
|
||||
# The first fileset that has a base.
|
||||
# This value is only accessed if there are at all.
|
||||
firstWithBase = head filesetsWithBase;
|
||||
|
||||
# To be able to union filesetTree's together, they need to have the same base path.
|
||||
# Base paths can be unioned by taking their common prefix,
|
||||
|
@ -332,14 +519,14 @@ rec {
|
|||
# so this cannot cause a stack overflow due to a build-up of unevaluated thunks.
|
||||
commonBaseComponents = foldl'
|
||||
(components: el: commonPrefix components el._internalBaseComponents)
|
||||
first._internalBaseComponents
|
||||
firstWithBase._internalBaseComponents
|
||||
# We could also not do the `tail` here to avoid a list allocation,
|
||||
# but then we'd have to pay for a potentially expensive
|
||||
# but unnecessary `commonPrefix` call
|
||||
(tail filesets);
|
||||
(tail filesetsWithBase);
|
||||
|
||||
# The common base path assembled from a filesystem root and the common components
|
||||
commonBase = append first._internalBaseRoot (join commonBaseComponents);
|
||||
commonBase = append firstWithBase._internalBaseRoot (join commonBaseComponents);
|
||||
|
||||
# A list of filesetTree's that all have the same base path
|
||||
# This is achieved by nesting the trees into the components they have over the common base path
|
||||
|
@ -347,17 +534,17 @@ rec {
|
|||
# So the tree under `/foo/bar` gets nested under `{ bar = ...; ... }`,
|
||||
# while the tree under `/foo/baz` gets nested under `{ baz = ...; ... }`
|
||||
# Therefore allowing combined operations over them.
|
||||
trees = map (fileset:
|
||||
setAttrByPath
|
||||
(drop (length commonBaseComponents) fileset._internalBaseComponents)
|
||||
fileset._internalTree
|
||||
) filesets;
|
||||
trees = map (_shortenTreeBase commonBaseComponents) filesetsWithBase;
|
||||
|
||||
# Folds all trees together into a single one using _unionTree
|
||||
# We do not use a fold here because it would cause a thunk build-up
|
||||
# which could cause a stack overflow for a large number of trees
|
||||
resultTree = _unionTrees trees;
|
||||
in
|
||||
# If there's no values with a base, we have no files
|
||||
if filesetsWithBase == [ ] then
|
||||
_emptyWithoutBase
|
||||
else
|
||||
_create commonBase resultTree;
|
||||
|
||||
# The union of multiple filesetTree's with the same base path.
|
||||
|
@ -379,4 +566,76 @@ rec {
|
|||
# The non-null elements have to be attribute sets representing partial trees
|
||||
# We need to recurse into those
|
||||
zipAttrsWith (name: _unionTrees) withoutNull;
|
||||
|
||||
# Computes the intersection of a list of filesets.
|
||||
# The filesets must already be coerced and validated to be in the same filesystem root
|
||||
# Type: Fileset -> Fileset -> Fileset
|
||||
_intersection = fileset1: fileset2:
|
||||
let
|
||||
# The common base components prefix, e.g.
|
||||
# (/foo/bar, /foo/bar/baz) -> /foo/bar
|
||||
# (/foo/bar, /foo/baz) -> /foo
|
||||
commonBaseComponentsLength =
|
||||
# TODO: Have a `lib.lists.commonPrefixLength` function such that we don't need the list allocation from commonPrefix here
|
||||
length (
|
||||
commonPrefix
|
||||
fileset1._internalBaseComponents
|
||||
fileset2._internalBaseComponents
|
||||
);
|
||||
|
||||
# To be able to intersect filesetTree's together, they need to have the same base path.
|
||||
# Base paths can be intersected by taking the longest one (if any)
|
||||
|
||||
# The fileset with the longest base, if any, e.g.
|
||||
# (/foo/bar, /foo/bar/baz) -> /foo/bar/baz
|
||||
# (/foo/bar, /foo/baz) -> null
|
||||
longestBaseFileset =
|
||||
if commonBaseComponentsLength == length fileset1._internalBaseComponents then
|
||||
# The common prefix is the same as the first path, so the second path is equal or longer
|
||||
fileset2
|
||||
else if commonBaseComponentsLength == length fileset2._internalBaseComponents then
|
||||
# The common prefix is the same as the second path, so the first path is longer
|
||||
fileset1
|
||||
else
|
||||
# The common prefix is neither the first nor the second path
|
||||
# This means there's no overlap between the two sets
|
||||
null;
|
||||
|
||||
# Whether the result should be the empty value without a base
|
||||
resultIsEmptyWithoutBase =
|
||||
# If either fileset is the empty fileset without a base, the intersection is too
|
||||
fileset1._internalIsEmptyWithoutBase
|
||||
|| fileset2._internalIsEmptyWithoutBase
|
||||
# If there is no overlap between the base paths
|
||||
|| longestBaseFileset == null;
|
||||
|
||||
# Lengthen each fileset's tree to the longest base prefix
|
||||
tree1 = _lengthenTreeBase longestBaseFileset._internalBaseComponents fileset1;
|
||||
tree2 = _lengthenTreeBase longestBaseFileset._internalBaseComponents fileset2;
|
||||
|
||||
# With two filesetTree's with the same base, we can compute their intersection
|
||||
resultTree = _intersectTree tree1 tree2;
|
||||
in
|
||||
if resultIsEmptyWithoutBase then
|
||||
_emptyWithoutBase
|
||||
else
|
||||
_create longestBaseFileset._internalBase resultTree;
|
||||
|
||||
# The intersection of two filesetTree's with the same base path
|
||||
# The second element is only evaluated as much as necessary.
|
||||
# Type: filesetTree -> filesetTree -> filesetTree
|
||||
_intersectTree = lhs: rhs:
|
||||
if isAttrs lhs && isAttrs rhs then
|
||||
# Both sides are attribute sets, we can recurse for the attributes existing on both sides
|
||||
mapAttrs
|
||||
(name: _intersectTree lhs.${name})
|
||||
(builtins.intersectAttrs lhs rhs)
|
||||
else if lhs == null || isString rhs then
|
||||
# If the lhs is null, the result should also be null
|
||||
# And if the rhs is the identity element
|
||||
# (a string, aka it includes everything), then it's also the lhs
|
||||
lhs
|
||||
else
|
||||
# In all other cases it's the rhs
|
||||
rhs;
|
||||
}
|
||||
|
|
|
@ -57,18 +57,35 @@ with lib.fileset;'
|
|||
expectEqual() {
|
||||
local actualExpr=$1
|
||||
local expectedExpr=$2
|
||||
if ! actualResult=$(nix-instantiate --eval --strict --show-trace \
|
||||
if actualResult=$(nix-instantiate --eval --strict --show-trace 2>"$tmp"/actualStderr \
|
||||
--expr "$prefixExpression ($actualExpr)"); then
|
||||
die "$actualExpr failed to evaluate, but it was expected to succeed"
|
||||
actualExitCode=$?
|
||||
else
|
||||
actualExitCode=$?
|
||||
fi
|
||||
if ! expectedResult=$(nix-instantiate --eval --strict --show-trace \
|
||||
actualStderr=$(< "$tmp"/actualStderr)
|
||||
|
||||
if expectedResult=$(nix-instantiate --eval --strict --show-trace 2>"$tmp"/expectedStderr \
|
||||
--expr "$prefixExpression ($expectedExpr)"); then
|
||||
die "$expectedExpr failed to evaluate, but it was expected to succeed"
|
||||
expectedExitCode=$?
|
||||
else
|
||||
expectedExitCode=$?
|
||||
fi
|
||||
expectedStderr=$(< "$tmp"/expectedStderr)
|
||||
|
||||
if [[ "$actualExitCode" != "$expectedExitCode" ]]; then
|
||||
echo "$actualStderr" >&2
|
||||
echo "$actualResult" >&2
|
||||
die "$actualExpr should have exited with $expectedExitCode, but it exited with $actualExitCode"
|
||||
fi
|
||||
|
||||
if [[ "$actualResult" != "$expectedResult" ]]; then
|
||||
die "$actualExpr should have evaluated to $expectedExpr:\n$expectedResult\n\nbut it evaluated to\n$actualResult"
|
||||
fi
|
||||
|
||||
if [[ "$actualStderr" != "$expectedStderr" ]]; then
|
||||
die "$actualExpr should have had this on stderr:\n$expectedStderr\n\nbut it was\n$actualStderr"
|
||||
fi
|
||||
}
|
||||
|
||||
# Check that a nix expression evaluates successfully to a store path and returns it (without quotes).
|
||||
|
@ -84,14 +101,14 @@ expectStorePath() {
|
|||
crudeUnquoteJSON <<< "$result"
|
||||
}
|
||||
|
||||
# Check that a nix expression fails to evaluate (strictly, coercing to json, read-write-mode).
|
||||
# Check that a nix expression fails to evaluate (strictly, read-write-mode).
|
||||
# And check the received stderr against a regex
|
||||
# The expression has `lib.fileset` in scope.
|
||||
# Usage: expectFailure NIX REGEX
|
||||
expectFailure() {
|
||||
local expr=$1
|
||||
local expectedErrorRegex=$2
|
||||
if result=$(nix-instantiate --eval --strict --json --read-write-mode --show-trace 2>"$tmp/stderr" \
|
||||
if result=$(nix-instantiate --eval --strict --read-write-mode --show-trace 2>"$tmp/stderr" \
|
||||
--expr "$prefixExpression $expr"); then
|
||||
die "$expr evaluated successfully to $result, but it was expected to fail"
|
||||
fi
|
||||
|
@ -101,16 +118,112 @@ expectFailure() {
|
|||
fi
|
||||
}
|
||||
|
||||
# We conditionally use inotifywait in checkFileset.
|
||||
# Check that the traces of a Nix expression are as expected when evaluated.
|
||||
# The expression has `lib.fileset` in scope.
|
||||
# Usage: expectTrace NIX STR
|
||||
expectTrace() {
|
||||
local expr=$1
|
||||
local expectedTrace=$2
|
||||
|
||||
nix-instantiate --eval --show-trace >/dev/null 2>"$tmp"/stderrTrace \
|
||||
--expr "$prefixExpression trace ($expr)" || true
|
||||
|
||||
actualTrace=$(sed -n 's/^trace: //p' "$tmp/stderrTrace")
|
||||
|
||||
nix-instantiate --eval --show-trace >/dev/null 2>"$tmp"/stderrTraceVal \
|
||||
--expr "$prefixExpression traceVal ($expr)" || true
|
||||
|
||||
actualTraceVal=$(sed -n 's/^trace: //p' "$tmp/stderrTraceVal")
|
||||
|
||||
# Test that traceVal returns the same trace as trace
|
||||
if [[ "$actualTrace" != "$actualTraceVal" ]]; then
|
||||
cat "$tmp"/stderrTrace >&2
|
||||
die "$expr traced this for lib.fileset.trace:\n\n$actualTrace\n\nand something different for lib.fileset.traceVal:\n\n$actualTraceVal"
|
||||
fi
|
||||
|
||||
if [[ "$actualTrace" != "$expectedTrace" ]]; then
|
||||
cat "$tmp"/stderrTrace >&2
|
||||
die "$expr should have traced this:\n\n$expectedTrace\n\nbut this was actually traced:\n\n$actualTrace"
|
||||
fi
|
||||
}
|
||||
|
||||
# We conditionally use inotifywait in withFileMonitor.
|
||||
# Check early whether it's available
|
||||
# TODO: Darwin support, though not crucial since we have Linux CI
|
||||
if type inotifywait 2>/dev/null >/dev/null; then
|
||||
canMonitorFiles=1
|
||||
canMonitor=1
|
||||
else
|
||||
echo "Warning: Not checking that excluded files don't get accessed since inotifywait is not available" >&2
|
||||
canMonitorFiles=
|
||||
echo "Warning: Cannot check for paths not getting read since the inotifywait command (from the inotify-tools package) is not available" >&2
|
||||
canMonitor=
|
||||
fi
|
||||
|
||||
# Run a function while monitoring that it doesn't read certain paths
|
||||
# Usage: withFileMonitor FUNNAME PATH...
|
||||
# - FUNNAME should be a bash function that:
|
||||
# - Performs some operation that should not read some paths
|
||||
# - Delete the paths it shouldn't read without triggering any open events
|
||||
# - PATH... are the paths that should not get read
|
||||
#
|
||||
# This function outputs the same as FUNNAME
|
||||
withFileMonitor() {
|
||||
local funName=$1
|
||||
shift
|
||||
|
||||
# If we can't monitor files or have none to monitor, just run the function directly
|
||||
if [[ -z "$canMonitor" ]] || (( "$#" == 0 )); then
|
||||
"$funName"
|
||||
else
|
||||
|
||||
# Use a subshell to start the coprocess in and use a trap to kill it when exiting the subshell
|
||||
(
|
||||
# Assigned by coproc, makes shellcheck happy
|
||||
local watcher watcher_PID
|
||||
|
||||
# Start inotifywait in the background to monitor all excluded paths
|
||||
coproc watcher {
|
||||
# inotifywait outputs a string on stderr when ready
|
||||
# Redirect it to stdout so we can access it from the coproc's stdout fd
|
||||
# exec so that the coprocess is inotify itself, making the kill below work correctly
|
||||
# See below why we listen to both open and delete_self events
|
||||
exec inotifywait --format='%e %w' --event open,delete_self --monitor "$@" 2>&1
|
||||
}
|
||||
|
||||
# This will trigger when this subshell exits, no matter if successful or not
|
||||
# After exiting the subshell, the parent shell will continue executing
|
||||
trap 'kill "${watcher_PID}"' exit
|
||||
|
||||
# Synchronously wait until inotifywait is ready
|
||||
while read -r -u "${watcher[0]}" line && [[ "$line" != "Watches established." ]]; do
|
||||
:
|
||||
done
|
||||
|
||||
# Call the function that should not read the given paths and delete them afterwards
|
||||
"$funName"
|
||||
|
||||
# Get the first event
|
||||
read -r -u "${watcher[0]}" event file
|
||||
|
||||
# With funName potentially reading files first before deleting them,
|
||||
# there's only these two possible event timelines:
|
||||
# - open*, ..., open*, delete_self, ..., delete_self: If some excluded paths were read
|
||||
# - delete_self, ..., delete_self: If no excluded paths were read
|
||||
# So by looking at the first event we can figure out which one it is!
|
||||
# This also means we don't have to wait to collect all events.
|
||||
case "$event" in
|
||||
OPEN*)
|
||||
die "$funName opened excluded file $file when it shouldn't have"
|
||||
;;
|
||||
DELETE_SELF)
|
||||
# Expected events
|
||||
;;
|
||||
*)
|
||||
die "During $funName, Unexpected event type '$event' on file $file that should be excluded"
|
||||
;;
|
||||
esac
|
||||
)
|
||||
fi
|
||||
}
|
||||
|
||||
# Check whether a file set includes/excludes declared paths as expected, usage:
|
||||
#
|
||||
# tree=(
|
||||
|
@ -120,7 +233,7 @@ fi
|
|||
# )
|
||||
# checkFileset './a' # Pass the fileset as the argument
|
||||
declare -A tree
|
||||
checkFileset() (
|
||||
checkFileset() {
|
||||
# New subshell so that we can have a separate trap handler, see `trap` below
|
||||
local fileset=$1
|
||||
|
||||
|
@ -168,54 +281,21 @@ checkFileset() (
|
|||
touch "${filesToCreate[@]}"
|
||||
fi
|
||||
|
||||
# Start inotifywait in the background to monitor all excluded files (if any)
|
||||
if [[ -n "$canMonitorFiles" ]] && (( "${#excludedFiles[@]}" != 0 )); then
|
||||
coproc watcher {
|
||||
# inotifywait outputs a string on stderr when ready
|
||||
# Redirect it to stdout so we can access it from the coproc's stdout fd
|
||||
# exec so that the coprocess is inotify itself, making the kill below work correctly
|
||||
# See below why we listen to both open and delete_self events
|
||||
exec inotifywait --format='%e %w' --event open,delete_self --monitor "${excludedFiles[@]}" 2>&1
|
||||
}
|
||||
# This will trigger when this subshell exits, no matter if successful or not
|
||||
# After exiting the subshell, the parent shell will continue executing
|
||||
# shellcheck disable=SC2154
|
||||
trap 'kill "${watcher_PID}"' exit
|
||||
|
||||
# Synchronously wait until inotifywait is ready
|
||||
while read -r -u "${watcher[0]}" line && [[ "$line" != "Watches established." ]]; do
|
||||
:
|
||||
done
|
||||
fi
|
||||
|
||||
# Call toSource with the fileset, triggering open events for all files that are added to the store
|
||||
expression="toSource { root = ./.; fileset = $fileset; }"
|
||||
storePath=$(expectStorePath "$expression")
|
||||
|
||||
# Remove all files immediately after, triggering delete_self events for all of them
|
||||
rm -rf -- *
|
||||
|
||||
# Only check for the inotify events if we actually started inotify earlier
|
||||
if [[ -v watcher ]]; then
|
||||
# Get the first event
|
||||
read -r -u "${watcher[0]}" event file
|
||||
|
||||
# There's only these two possible event timelines:
|
||||
# - open, ..., open, delete_self, ..., delete_self: If some excluded files were read
|
||||
# - delete_self, ..., delete_self: If no excluded files were read
|
||||
# So by looking at the first event we can figure out which one it is!
|
||||
case "$event" in
|
||||
OPEN)
|
||||
die "$expression opened excluded file $file when it shouldn't have"
|
||||
;;
|
||||
DELETE_SELF)
|
||||
# Expected events
|
||||
;;
|
||||
*)
|
||||
die "Unexpected event type '$event' on file $file that should be excluded"
|
||||
;;
|
||||
esac
|
||||
# We don't have lambda's in bash unfortunately,
|
||||
# so we just define a function instead and then pass its name
|
||||
# shellcheck disable=SC2317
|
||||
run() {
|
||||
# Call toSource with the fileset, triggering open events for all files that are added to the store
|
||||
expectStorePath "$expression"
|
||||
if (( ${#excludedFiles[@]} != 0 )); then
|
||||
rm "${excludedFiles[@]}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Runs the function while checking that the given excluded files aren't read
|
||||
storePath=$(withFileMonitor run "${excludedFiles[@]}")
|
||||
|
||||
# For each path that should be included, make sure it does occur in the resulting store path
|
||||
for p in "${included[@]}"; do
|
||||
|
@ -230,7 +310,9 @@ checkFileset() (
|
|||
die "$expression included path $p when it shouldn't have"
|
||||
fi
|
||||
done
|
||||
)
|
||||
|
||||
rm -rf -- *
|
||||
}
|
||||
|
||||
|
||||
#### Error messages #####
|
||||
|
@ -281,25 +363,32 @@ expectFailure 'toSource { root = ./.; fileset = "/some/path"; }' 'lib.fileset.to
|
|||
expectFailure 'toSource { root = ./.; fileset = ./a; }' 'lib.fileset.toSource: `fileset` \('"$work"'/a\) does not exist.'
|
||||
|
||||
# File sets cannot be evaluated directly
|
||||
expectFailure 'union ./. ./.' 'lib.fileset: Directly evaluating a file set is not supported. Use `lib.fileset.toSource` to turn it into a usable source instead.'
|
||||
expectFailure 'union ./. ./.' 'lib.fileset: Directly evaluating a file set is not supported.
|
||||
\s*To turn it into a usable source, use `lib.fileset.toSource`.
|
||||
\s*To pretty-print the contents, use `lib.fileset.trace` or `lib.fileset.traceVal`.'
|
||||
expectFailure '_emptyWithoutBase' 'lib.fileset: Directly evaluating a file set is not supported.
|
||||
\s*To turn it into a usable source, use `lib.fileset.toSource`.
|
||||
\s*To pretty-print the contents, use `lib.fileset.trace` or `lib.fileset.traceVal`.'
|
||||
|
||||
# Past versions of the internal representation are supported
|
||||
expectEqual '_coerce "<tests>: value" { _type = "fileset"; _internalVersion = 0; _internalBase = ./.; }' \
|
||||
'{ _internalBase = ./.; _internalBaseComponents = path.subpath.components (path.splitRoot ./.).subpath; _internalBaseRoot = /.; _internalVersion = 2; _type = "fileset"; }'
|
||||
'{ _internalBase = ./.; _internalBaseComponents = path.subpath.components (path.splitRoot ./.).subpath; _internalBaseRoot = /.; _internalIsEmptyWithoutBase = false; _internalVersion = 3; _type = "fileset"; }'
|
||||
expectEqual '_coerce "<tests>: value" { _type = "fileset"; _internalVersion = 1; }' \
|
||||
'{ _type = "fileset"; _internalVersion = 2; }'
|
||||
'{ _type = "fileset"; _internalIsEmptyWithoutBase = false; _internalVersion = 3; }'
|
||||
expectEqual '_coerce "<tests>: value" { _type = "fileset"; _internalVersion = 2; }' \
|
||||
'{ _type = "fileset"; _internalIsEmptyWithoutBase = false; _internalVersion = 3; }'
|
||||
|
||||
# Future versions of the internal representation are unsupported
|
||||
expectFailure '_coerce "<tests>: value" { _type = "fileset"; _internalVersion = 3; }' '<tests>: value is a file set created from a future version of the file set library with a different internal representation:
|
||||
\s*- Internal version of the file set: 3
|
||||
\s*- Internal version of the library: 2
|
||||
expectFailure '_coerce "<tests>: value" { _type = "fileset"; _internalVersion = 4; }' '<tests>: value is a file set created from a future version of the file set library with a different internal representation:
|
||||
\s*- Internal version of the file set: 4
|
||||
\s*- Internal version of the library: 3
|
||||
\s*Make sure to update your Nixpkgs to have a newer version of `lib.fileset`.'
|
||||
|
||||
# _create followed by _coerce should give the inputs back without any validation
|
||||
expectEqual '{
|
||||
inherit (_coerce "<test>" (_create ./. "directory"))
|
||||
_internalVersion _internalBase _internalTree;
|
||||
}' '{ _internalBase = ./.; _internalTree = "directory"; _internalVersion = 2; }'
|
||||
}' '{ _internalBase = ./.; _internalTree = "directory"; _internalVersion = 3; }'
|
||||
|
||||
#### Resulting store path ####
|
||||
|
||||
|
@ -311,6 +400,12 @@ tree=(
|
|||
)
|
||||
checkFileset './.'
|
||||
|
||||
# The empty value without a base should also result in an empty result
|
||||
tree=(
|
||||
[a]=0
|
||||
)
|
||||
checkFileset '_emptyWithoutBase'
|
||||
|
||||
# Directories recursively containing no files are not included
|
||||
tree=(
|
||||
[e/]=0
|
||||
|
@ -406,15 +501,32 @@ expectFailure 'toSource { root = ./.; fileset = union ./. ./b; }' 'lib.fileset.u
|
|||
expectFailure 'toSource { root = ./.; fileset = unions [ ./a ./. ]; }' 'lib.fileset.unions: element 0 \('"$work"'/a\) does not exist.'
|
||||
expectFailure 'toSource { root = ./.; fileset = unions [ ./. ./b ]; }' 'lib.fileset.unions: element 1 \('"$work"'/b\) does not exist.'
|
||||
|
||||
# unions needs a list with at least 1 element
|
||||
# unions needs a list
|
||||
expectFailure 'toSource { root = ./.; fileset = unions null; }' 'lib.fileset.unions: Expected argument to be a list, but got a null.'
|
||||
expectFailure 'toSource { root = ./.; fileset = unions [ ]; }' 'lib.fileset.unions: Expected argument to be a list with at least one element, but it contains no elements.'
|
||||
|
||||
# The tree of later arguments should not be evaluated if a former argument already includes all files
|
||||
tree=()
|
||||
checkFileset 'union ./. (_create ./. (abort "This should not be used!"))'
|
||||
checkFileset 'unions [ ./. (_create ./. (abort "This should not be used!")) ]'
|
||||
|
||||
# unions doesn't include any files for an empty list or only empty values without a base
|
||||
tree=(
|
||||
[x]=0
|
||||
[y/z]=0
|
||||
)
|
||||
checkFileset 'unions [ ]'
|
||||
checkFileset 'unions [ _emptyWithoutBase ]'
|
||||
checkFileset 'unions [ _emptyWithoutBase _emptyWithoutBase ]'
|
||||
checkFileset 'union _emptyWithoutBase _emptyWithoutBase'
|
||||
|
||||
# The empty value without a base is the left and right identity of union
|
||||
tree=(
|
||||
[x]=1
|
||||
[y/z]=0
|
||||
)
|
||||
checkFileset 'union ./x _emptyWithoutBase'
|
||||
checkFileset 'union _emptyWithoutBase ./x'
|
||||
|
||||
# union doesn't include files that weren't specified
|
||||
tree=(
|
||||
[x]=1
|
||||
|
@ -467,12 +579,249 @@ for i in $(seq 1000); do
|
|||
tree[$i/a]=1
|
||||
tree[$i/b]=0
|
||||
done
|
||||
(
|
||||
# Locally limit the maximum stack size to 100 * 1024 bytes
|
||||
# If unions was implemented recursively, this would stack overflow
|
||||
ulimit -s 100
|
||||
# This is actually really hard to test:
|
||||
# A lot of files would be needed to cause a stack overflow.
|
||||
# And while we could limit the maximum stack size using `ulimit -s`,
|
||||
# that turns out to not be very deterministic: https://github.com/NixOS/nixpkgs/pull/256417#discussion_r1339396686.
|
||||
# Meanwhile, the test infra here is not the fastest, creating 10000 would be too slow.
|
||||
# So, just using 1000 files for now.
|
||||
checkFileset 'unions (mapAttrsToList (name: _: ./. + "/${name}/a") (builtins.readDir ./.))'
|
||||
|
||||
|
||||
## lib.fileset.intersection
|
||||
|
||||
|
||||
# Different filesystem roots in root and fileset are not supported
|
||||
mkdir -p {foo,bar}/mock-root
|
||||
expectFailure 'with ((import <nixpkgs/lib>).extend (import <nixpkgs/lib/fileset/mock-splitRoot.nix>)).fileset;
|
||||
toSource { root = ./.; fileset = intersection ./foo/mock-root ./bar/mock-root; }
|
||||
' 'lib.fileset.intersection: Filesystem roots are not the same:
|
||||
\s*first argument: root "'"$work"'/foo/mock-root"
|
||||
\s*second argument: root "'"$work"'/bar/mock-root"
|
||||
\s*Different roots are not supported.'
|
||||
rm -rf -- *
|
||||
|
||||
# Coercion errors show the correct context
|
||||
expectFailure 'toSource { root = ./.; fileset = intersection ./a ./.; }' 'lib.fileset.intersection: first argument \('"$work"'/a\) does not exist.'
|
||||
expectFailure 'toSource { root = ./.; fileset = intersection ./. ./b; }' 'lib.fileset.intersection: second argument \('"$work"'/b\) does not exist.'
|
||||
|
||||
# The tree of later arguments should not be evaluated if a former argument already excludes all files
|
||||
tree=(
|
||||
[a]=0
|
||||
)
|
||||
checkFileset 'intersection _emptyWithoutBase (_create ./. (abort "This should not be used!"))'
|
||||
# We don't have any combinators that can explicitly remove files yet, so we need to rely on internal functions to test this for now
|
||||
checkFileset 'intersection (_create ./. { a = null; }) (_create ./. { a = abort "This should not be used!"; })'
|
||||
|
||||
# If either side is empty, the result is empty
|
||||
tree=(
|
||||
[a]=0
|
||||
)
|
||||
checkFileset 'intersection _emptyWithoutBase _emptyWithoutBase'
|
||||
checkFileset 'intersection _emptyWithoutBase (_create ./. null)'
|
||||
checkFileset 'intersection (_create ./. null) _emptyWithoutBase'
|
||||
checkFileset 'intersection (_create ./. null) (_create ./. null)'
|
||||
|
||||
# If the intersection base paths are not overlapping, the result is empty and has no base path
|
||||
mkdir a b c
|
||||
touch {a,b,c}/x
|
||||
expectEqual 'toSource { root = ./c; fileset = intersection ./a ./b; }' 'toSource { root = ./c; fileset = _emptyWithoutBase; }'
|
||||
rm -rf -- *
|
||||
|
||||
# If the intersection exists, the resulting base path is the longest of them
|
||||
mkdir a
|
||||
touch x a/b
|
||||
expectEqual 'toSource { root = ./a; fileset = intersection ./a ./.; }' 'toSource { root = ./a; fileset = ./a; }'
|
||||
expectEqual 'toSource { root = ./a; fileset = intersection ./. ./a; }' 'toSource { root = ./a; fileset = ./a; }'
|
||||
rm -rf -- *
|
||||
|
||||
# Also finds the intersection with null'd filesetTree's
|
||||
tree=(
|
||||
[a]=0
|
||||
[b]=1
|
||||
[c]=0
|
||||
)
|
||||
checkFileset 'intersection (_create ./. { a = "regular"; b = "regular"; c = null; }) (_create ./. { a = null; b = "regular"; c = "regular"; })'
|
||||
|
||||
# Actually computes the intersection between files
|
||||
tree=(
|
||||
[a]=0
|
||||
[b]=0
|
||||
[c]=1
|
||||
[d]=1
|
||||
[e]=0
|
||||
[f]=0
|
||||
)
|
||||
checkFileset 'intersection (unions [ ./a ./b ./c ./d ]) (unions [ ./c ./d ./e ./f ])'
|
||||
|
||||
tree=(
|
||||
[a/x]=0
|
||||
[a/y]=0
|
||||
[b/x]=1
|
||||
[b/y]=1
|
||||
[c/x]=0
|
||||
[c/y]=0
|
||||
)
|
||||
checkFileset 'intersection ./b ./.'
|
||||
checkFileset 'intersection ./b (unions [ ./a/x ./a/y ./b/x ./b/y ./c/x ./c/y ])'
|
||||
|
||||
# Complicated case
|
||||
tree=(
|
||||
[a/x]=0
|
||||
[a/b/i]=1
|
||||
[c/d/x]=0
|
||||
[c/d/f]=1
|
||||
[c/x]=0
|
||||
[c/e/i]=1
|
||||
[c/e/j]=1
|
||||
)
|
||||
checkFileset 'intersection (unions [ ./a/b ./c/d ./c/e ]) (unions [ ./a ./c/d/f ./c/e ])'
|
||||
|
||||
|
||||
## Tracing
|
||||
|
||||
# The second trace argument is returned
|
||||
expectEqual 'trace ./. "some value"' 'builtins.trace "(empty)" "some value"'
|
||||
|
||||
# The fileset traceVal argument is returned
|
||||
expectEqual 'traceVal ./.' 'builtins.trace "(empty)" (_create ./. "directory")'
|
||||
|
||||
# The tracing happens before the final argument is needed
|
||||
expectEqual 'trace ./.' 'builtins.trace "(empty)" (x: x)'
|
||||
|
||||
# Tracing an empty directory shows it as such
|
||||
expectTrace './.' '(empty)'
|
||||
|
||||
# This also works if there are directories, but all recursively without files
|
||||
mkdir -p a/b/c
|
||||
expectTrace './.' '(empty)'
|
||||
rm -rf -- *
|
||||
|
||||
# The empty file set without a base also prints as empty
|
||||
expectTrace '_emptyWithoutBase' '(empty)'
|
||||
expectTrace 'unions [ ]' '(empty)'
|
||||
mkdir foo bar
|
||||
touch {foo,bar}/x
|
||||
expectTrace 'intersection ./foo ./bar' '(empty)'
|
||||
rm -rf -- *
|
||||
|
||||
# If a directory is fully included, print it as such
|
||||
touch a
|
||||
expectTrace './.' "$work"' (all files in directory)'
|
||||
rm -rf -- *
|
||||
|
||||
# If a directory is not fully included, recurse
|
||||
mkdir a b
|
||||
touch a/{x,y} b/{x,y}
|
||||
expectTrace 'union ./a/x ./b' "$work"'
|
||||
- a
|
||||
- x (regular)
|
||||
- b (all files in directory)'
|
||||
rm -rf -- *
|
||||
|
||||
# If an included path is a file, print its type
|
||||
touch a x
|
||||
ln -s a b
|
||||
mkfifo c
|
||||
expectTrace 'unions [ ./a ./b ./c ]' "$work"'
|
||||
- a (regular)
|
||||
- b (symlink)
|
||||
- c (unknown)'
|
||||
rm -rf -- *
|
||||
|
||||
# Do not print directories without any files recursively
|
||||
mkdir -p a/b/c
|
||||
touch b x
|
||||
expectTrace 'unions [ ./a ./b ]' "$work"'
|
||||
- b (regular)'
|
||||
rm -rf -- *
|
||||
|
||||
# If all children are either fully included or empty directories,
|
||||
# the parent should be printed as fully included
|
||||
touch a
|
||||
mkdir b
|
||||
expectTrace 'union ./a ./b' "$work"' (all files in directory)'
|
||||
rm -rf -- *
|
||||
|
||||
mkdir -p x/b x/c
|
||||
touch x/a
|
||||
touch a
|
||||
# If all children are either fully excluded or empty directories,
|
||||
# the parent should be shown (or rather not shown) as fully excluded
|
||||
expectTrace 'unions [ ./a ./x/b ./x/c ]' "$work"'
|
||||
- a (regular)'
|
||||
rm -rf -- *
|
||||
|
||||
# Completely filtered out directories also print as empty
|
||||
touch a
|
||||
expectTrace '_create ./. {}' '(empty)'
|
||||
rm -rf -- *
|
||||
|
||||
# A general test to make sure the resulting format makes sense
|
||||
# Such as indentation and ordering
|
||||
mkdir -p bar/{qux,someDir}
|
||||
touch bar/{baz,qux,someDir/a} foo
|
||||
touch bar/qux/x
|
||||
ln -s x bar/qux/a
|
||||
mkfifo bar/qux/b
|
||||
expectTrace 'unions [
|
||||
./bar/baz
|
||||
./bar/qux/a
|
||||
./bar/qux/b
|
||||
./bar/someDir/a
|
||||
./foo
|
||||
]' "$work"'
|
||||
- bar
|
||||
- baz (regular)
|
||||
- qux
|
||||
- a (symlink)
|
||||
- b (unknown)
|
||||
- someDir (all files in directory)
|
||||
- foo (regular)'
|
||||
rm -rf -- *
|
||||
|
||||
# For recursively included directories,
|
||||
# `(all files in directory)` should only be used if there's at least one file (otherwise it would be `(empty)`)
|
||||
# and this should be determined without doing a full search
|
||||
#
|
||||
# a is intentionally ordered first here in order to allow triggering the short-circuit behavior
|
||||
# We then check that b is not read
|
||||
# In a more realistic scenario, some directories might need to be recursed into,
|
||||
# but a file would be quickly found to trigger the short-circuit.
|
||||
touch a
|
||||
mkdir b
|
||||
# We don't have lambda's in bash unfortunately,
|
||||
# so we just define a function instead and then pass its name
|
||||
# shellcheck disable=SC2317
|
||||
run() {
|
||||
# This shouldn't read b/
|
||||
expectTrace './.' "$work"' (all files in directory)'
|
||||
# Remove all files immediately after, triggering delete_self events for all of them
|
||||
rmdir b
|
||||
}
|
||||
# Runs the function while checking that b isn't read
|
||||
withFileMonitor run b
|
||||
rm -rf -- *
|
||||
|
||||
# Partially included directories trace entries as they are evaluated
|
||||
touch a b c
|
||||
expectTrace '_create ./. { a = null; b = "regular"; c = throw "b"; }' "$work"'
|
||||
- b (regular)'
|
||||
|
||||
# Except entries that need to be evaluated to even figure out if it's only partially included:
|
||||
# Here the directory could be fully excluded or included just from seeing a and b,
|
||||
# so c needs to be evaluated before anything can be traced
|
||||
expectTrace '_create ./. { a = null; b = null; c = throw "c"; }' ''
|
||||
expectTrace '_create ./. { a = "regular"; b = "regular"; c = throw "c"; }' ''
|
||||
rm -rf -- *
|
||||
|
||||
# We can trace large directories (10000 here) without any problems
|
||||
filesToCreate=({0..9}{0..9}{0..9}{0..9})
|
||||
expectedTrace=$work$'\n'$(printf -- '- %s (regular)\n' "${filesToCreate[@]}")
|
||||
# We need an excluded file so it doesn't print as `(all files in directory)`
|
||||
touch 0 "${filesToCreate[@]}"
|
||||
expectTrace 'unions (mapAttrsToList (n: _: ./. + "/${n}") (removeAttrs (builtins.readDir ./.) [ "0" ]))' "$expectedTrace"
|
||||
rm -rf -- *
|
||||
|
||||
# TODO: Once we have combinators and a property testing library, derive property tests from https://en.wikipedia.org/wiki/Algebra_of_sets
|
||||
|
||||
|
|
|
@ -1,26 +1,76 @@
|
|||
{ lib, ... }:
|
||||
rec {
|
||||
/*
|
||||
Compute the fixed point of the given function `f`, which is usually an
|
||||
attribute set that expects its final, non-recursive representation as an
|
||||
argument:
|
||||
`fix f` computes the fixed point of the given function `f`. In other words, the return value is `x` in `x = f x`.
|
||||
|
||||
```
|
||||
f = self: { foo = "foo"; bar = "bar"; foobar = self.foo + self.bar; }
|
||||
`f` must be a lazy function.
|
||||
This means that `x` must be a value that can be partially evaluated,
|
||||
such as an attribute set, a list, or a function.
|
||||
This way, `f` can use one part of `x` to compute another part.
|
||||
|
||||
**Relation to syntactic recursion**
|
||||
|
||||
This section explains `fix` by refactoring from syntactic recursion to a call of `fix` instead.
|
||||
|
||||
For context, Nix lets you define attributes in terms of other attributes syntactically using the [`rec { }` syntax](https://nixos.org/manual/nix/stable/language/constructs.html#recursive-sets).
|
||||
|
||||
```nix
|
||||
nix-repl> rec {
|
||||
foo = "foo";
|
||||
bar = "bar";
|
||||
foobar = foo + bar;
|
||||
}
|
||||
{ bar = "bar"; foo = "foo"; foobar = "foobar"; }
|
||||
```
|
||||
|
||||
Nix evaluates this recursion until all references to `self` have been
|
||||
resolved. At that point, the final result is returned and `f x = x` holds:
|
||||
This is convenient when constructing a value to pass to a function for example,
|
||||
but an equivalent effect can be achieved with the `let` binding syntax:
|
||||
|
||||
```nix
|
||||
nix-repl> let self = {
|
||||
foo = "foo";
|
||||
bar = "bar";
|
||||
foobar = self.foo + self.bar;
|
||||
}; in self
|
||||
{ bar = "bar"; foo = "foo"; foobar = "foobar"; }
|
||||
```
|
||||
|
||||
But in general you can get more reuse out of `let` bindings by refactoring them to a function.
|
||||
|
||||
```nix
|
||||
nix-repl> f = self: {
|
||||
foo = "foo";
|
||||
bar = "bar";
|
||||
foobar = self.foo + self.bar;
|
||||
}
|
||||
```
|
||||
|
||||
This is where `fix` comes in, it contains the syntactic that's not in `f` anymore.
|
||||
|
||||
```nix
|
||||
nix-repl> fix = f:
|
||||
let self = f self; in self;
|
||||
```
|
||||
|
||||
By applying `fix` we get the final result.
|
||||
|
||||
```nix
|
||||
nix-repl> fix f
|
||||
{ bar = "bar"; foo = "foo"; foobar = "foobar"; }
|
||||
```
|
||||
|
||||
Such a refactored `f` using `fix` is not useful by itself.
|
||||
See [`extends`](#function-library-lib.fixedPoints.extends) for an example use case.
|
||||
There `self` is also often called `final`.
|
||||
|
||||
Type: fix :: (a -> a) -> a
|
||||
|
||||
See https://en.wikipedia.org/wiki/Fixed-point_combinator for further
|
||||
details.
|
||||
Example:
|
||||
fix (self: { foo = "foo"; bar = "bar"; foobar = self.foo + self.bar; })
|
||||
=> { bar = "bar"; foo = "foo"; foobar = "foobar"; }
|
||||
|
||||
fix (self: [ 1 2 (elemAt self 0 + elemAt self 1) ])
|
||||
=> [ 1 2 3 ]
|
||||
*/
|
||||
fix = f: let x = f x; in x;
|
||||
|
||||
|
|
|
@ -30,6 +30,14 @@ in mkLicense lset) ({
|
|||
fullName = "Abstyles License";
|
||||
};
|
||||
|
||||
acsl14 = {
|
||||
fullName = "Anti-Capitalist Software License v1.4";
|
||||
url = "https://anticapitalist.software/";
|
||||
/* restrictions on corporations apply for both use and redistribution */
|
||||
free = false;
|
||||
redistributable = false;
|
||||
};
|
||||
|
||||
afl20 = {
|
||||
spdxId = "AFL-2.0";
|
||||
fullName = "Academic Free License v2.0";
|
||||
|
@ -413,9 +421,9 @@ in mkLicense lset) ({
|
|||
fullName = "Eiffel Forum License v2.0";
|
||||
};
|
||||
|
||||
elastic = {
|
||||
fullName = "ELASTIC LICENSE";
|
||||
url = "https://github.com/elastic/elasticsearch/blob/master/licenses/ELASTIC-LICENSE.txt";
|
||||
elastic20 = {
|
||||
fullName = "Elastic License 2.0";
|
||||
url = "https://github.com/elastic/elasticsearch/blob/main/licenses/ELASTIC-LICENSE-2.0.txt";
|
||||
free = false;
|
||||
};
|
||||
|
||||
|
@ -481,6 +489,11 @@ in mkLicense lset) ({
|
|||
free = false;
|
||||
};
|
||||
|
||||
fraunhofer-fdk = {
|
||||
fullName = "Fraunhofer FDK AAC Codec Library";
|
||||
spdxId = "FDK-AAC";
|
||||
};
|
||||
|
||||
free = {
|
||||
fullName = "Unspecified free software license";
|
||||
};
|
||||
|
@ -615,6 +628,12 @@ in mkLicense lset) ({
|
|||
free = false;
|
||||
};
|
||||
|
||||
inria-zelus = {
|
||||
fullName = "INRIA Non-Commercial License Agreement for the Zélus compiler";
|
||||
url = "https://github.com/INRIA/zelus/raw/829f2b97cba93b0543a9ca0272269e6b8fdad356/LICENSE";
|
||||
free = false;
|
||||
};
|
||||
|
||||
ipa = {
|
||||
spdxId = "IPA";
|
||||
fullName = "IPA Font License";
|
||||
|
@ -840,6 +859,14 @@ in mkLicense lset) ({
|
|||
fullName = "University of Illinois/NCSA Open Source License";
|
||||
};
|
||||
|
||||
ncul1 = {
|
||||
spdxId = "NCUL1";
|
||||
fullName = "Netdata Cloud UI License v1.0";
|
||||
free = false;
|
||||
redistributable = true; # Only if used in Netdata products.
|
||||
url = "https://raw.githubusercontent.com/netdata/netdata/master/web/gui/v2/LICENSE.md";
|
||||
};
|
||||
|
||||
nlpl = {
|
||||
spdxId = "NLPL";
|
||||
fullName = "No Limit Public License";
|
||||
|
|
|
@ -86,15 +86,63 @@ rec {
|
|||
else op (foldl' (n - 1)) (elemAt list n);
|
||||
in foldl' (length list - 1);
|
||||
|
||||
/* Strict version of `foldl`.
|
||||
/*
|
||||
Reduce a list by applying a binary operator from left to right,
|
||||
starting with an initial accumulator.
|
||||
|
||||
The difference is that evaluation is forced upon access. Usually used
|
||||
with small whole results (in contrast with lazily-generated list or large
|
||||
lists where only a part is consumed.)
|
||||
Before each application of the operator, the accumulator value is evaluated.
|
||||
This behavior makes this function stricter than [`foldl`](#function-library-lib.lists.foldl).
|
||||
|
||||
Type: foldl' :: (b -> a -> b) -> b -> [a] -> b
|
||||
Unlike [`builtins.foldl'`](https://nixos.org/manual/nix/unstable/language/builtins.html#builtins-foldl'),
|
||||
the initial accumulator argument is evaluated before the first iteration.
|
||||
|
||||
A call like
|
||||
|
||||
```nix
|
||||
foldl' op acc₀ [ x₀ x₁ x₂ ... xₙ₋₁ xₙ ]
|
||||
```
|
||||
|
||||
is (denotationally) equivalent to the following,
|
||||
but with the added benefit that `foldl'` itself will never overflow the stack.
|
||||
|
||||
```nix
|
||||
let
|
||||
acc₁ = builtins.seq acc₀ (op acc₀ x₀ );
|
||||
acc₂ = builtins.seq acc₁ (op acc₁ x₁ );
|
||||
acc₃ = builtins.seq acc₂ (op acc₂ x₂ );
|
||||
...
|
||||
accₙ = builtins.seq accₙ₋₁ (op accₙ₋₁ xₙ₋₁);
|
||||
accₙ₊₁ = builtins.seq accₙ (op accₙ xₙ );
|
||||
in
|
||||
accₙ₊₁
|
||||
|
||||
# Or ignoring builtins.seq
|
||||
op (op (... (op (op (op acc₀ x₀) x₁) x₂) ...) xₙ₋₁) xₙ
|
||||
```
|
||||
|
||||
Type: foldl' :: (acc -> x -> acc) -> acc -> [x] -> acc
|
||||
|
||||
Example:
|
||||
foldl' (acc: x: acc + x) 0 [1 2 3]
|
||||
=> 6
|
||||
*/
|
||||
foldl' = builtins.foldl' or foldl;
|
||||
foldl' =
|
||||
/* The binary operation to run, where the two arguments are:
|
||||
|
||||
1. `acc`: The current accumulator value: Either the initial one for the first iteration, or the result of the previous iteration
|
||||
2. `x`: The corresponding list element for this iteration
|
||||
*/
|
||||
op:
|
||||
# The initial accumulator value
|
||||
acc:
|
||||
# The list to fold
|
||||
list:
|
||||
|
||||
# The builtin `foldl'` is a bit lazier than one might expect.
|
||||
# See https://github.com/NixOS/nix/pull/7158.
|
||||
# In particular, the initial accumulator value is not forced before the first iteration starts.
|
||||
builtins.seq acc
|
||||
(builtins.foldl' op acc list);
|
||||
|
||||
/* Map with index starting from 0
|
||||
|
||||
|
|
|
@ -741,6 +741,64 @@ rec {
|
|||
name = head (splitString sep filename);
|
||||
in assert name != filename; name;
|
||||
|
||||
/* Create a "-D<feature>:<type>=<value>" string that can be passed to typical
|
||||
CMake invocations.
|
||||
|
||||
Type: cmakeOptionType :: string -> string -> string -> string
|
||||
|
||||
@param feature The feature to be set
|
||||
@param type The type of the feature to be set, as described in
|
||||
https://cmake.org/cmake/help/latest/command/set.html
|
||||
the possible values (case insensitive) are:
|
||||
BOOL FILEPATH PATH STRING INTERNAL
|
||||
@param value The desired value
|
||||
|
||||
Example:
|
||||
cmakeOptionType "string" "ENGINE" "sdl2"
|
||||
=> "-DENGINE:STRING=sdl2"
|
||||
*/
|
||||
cmakeOptionType = type: feature: value:
|
||||
assert (lib.elem (lib.toUpper type)
|
||||
[ "BOOL" "FILEPATH" "PATH" "STRING" "INTERNAL" ]);
|
||||
assert (lib.isString feature);
|
||||
assert (lib.isString value);
|
||||
"-D${feature}:${lib.toUpper type}=${value}";
|
||||
|
||||
/* Create a -D<condition>={TRUE,FALSE} string that can be passed to typical
|
||||
CMake invocations.
|
||||
|
||||
Type: cmakeBool :: string -> bool -> string
|
||||
|
||||
@param condition The condition to be made true or false
|
||||
@param flag The controlling flag of the condition
|
||||
|
||||
Example:
|
||||
cmakeBool "ENABLE_STATIC_LIBS" false
|
||||
=> "-DENABLESTATIC_LIBS:BOOL=FALSE"
|
||||
*/
|
||||
cmakeBool = condition: flag:
|
||||
assert (lib.isString condition);
|
||||
assert (lib.isBool flag);
|
||||
cmakeOptionType "bool" condition (lib.toUpper (lib.boolToString flag));
|
||||
|
||||
/* Create a -D<feature>:STRING=<value> string that can be passed to typical
|
||||
CMake invocations.
|
||||
This is the most typical usage, so it deserves a special case.
|
||||
|
||||
Type: cmakeFeature :: string -> string -> string
|
||||
|
||||
@param condition The condition to be made true or false
|
||||
@param flag The controlling flag of the condition
|
||||
|
||||
Example:
|
||||
cmakeFeature "MODULES" "badblock"
|
||||
=> "-DMODULES:STRING=badblock"
|
||||
*/
|
||||
cmakeFeature = feature: value:
|
||||
assert (lib.isString feature);
|
||||
assert (lib.isString value);
|
||||
cmakeOptionType "string" feature value;
|
||||
|
||||
/* Create a -D<feature>=<value> string that can be passed to typical Meson
|
||||
invocations.
|
||||
|
||||
|
@ -796,7 +854,7 @@ rec {
|
|||
assert (lib.isBool flag);
|
||||
mesonOption feature (if flag then "enabled" else "disabled");
|
||||
|
||||
/* Create an --{enable,disable}-<feat> string that can be passed to
|
||||
/* Create an --{enable,disable}-<feature> string that can be passed to
|
||||
standard GNU Autoconf scripts.
|
||||
|
||||
Example:
|
||||
|
@ -805,11 +863,12 @@ rec {
|
|||
enableFeature false "shared"
|
||||
=> "--disable-shared"
|
||||
*/
|
||||
enableFeature = enable: feat:
|
||||
assert isString feat; # e.g. passing openssl instead of "openssl"
|
||||
"--${if enable then "enable" else "disable"}-${feat}";
|
||||
enableFeature = flag: feature:
|
||||
assert lib.isBool flag;
|
||||
assert lib.isString feature; # e.g. passing openssl instead of "openssl"
|
||||
"--${if flag then "enable" else "disable"}-${feature}";
|
||||
|
||||
/* Create an --{enable-<feat>=<value>,disable-<feat>} string that can be passed to
|
||||
/* Create an --{enable-<feature>=<value>,disable-<feature>} string that can be passed to
|
||||
standard GNU Autoconf scripts.
|
||||
|
||||
Example:
|
||||
|
@ -818,9 +877,10 @@ rec {
|
|||
enableFeatureAs false "shared" (throw "ignored")
|
||||
=> "--disable-shared"
|
||||
*/
|
||||
enableFeatureAs = enable: feat: value: enableFeature enable feat + optionalString enable "=${value}";
|
||||
enableFeatureAs = flag: feature: value:
|
||||
enableFeature flag feature + optionalString flag "=${value}";
|
||||
|
||||
/* Create an --{with,without}-<feat> string that can be passed to
|
||||
/* Create an --{with,without}-<feature> string that can be passed to
|
||||
standard GNU Autoconf scripts.
|
||||
|
||||
Example:
|
||||
|
@ -829,11 +889,11 @@ rec {
|
|||
withFeature false "shared"
|
||||
=> "--without-shared"
|
||||
*/
|
||||
withFeature = with_: feat:
|
||||
assert isString feat; # e.g. passing openssl instead of "openssl"
|
||||
"--${if with_ then "with" else "without"}-${feat}";
|
||||
withFeature = flag: feature:
|
||||
assert isString feature; # e.g. passing openssl instead of "openssl"
|
||||
"--${if flag then "with" else "without"}-${feature}";
|
||||
|
||||
/* Create an --{with-<feat>=<value>,without-<feat>} string that can be passed to
|
||||
/* Create an --{with-<feature>=<value>,without-<feature>} string that can be passed to
|
||||
standard GNU Autoconf scripts.
|
||||
|
||||
Example:
|
||||
|
@ -842,7 +902,8 @@ rec {
|
|||
withFeatureAs false "shared" (throw "ignored")
|
||||
=> "--without-shared"
|
||||
*/
|
||||
withFeatureAs = with_: feat: value: withFeature with_ feat + optionalString with_ "=${value}";
|
||||
withFeatureAs = flag: feature: value:
|
||||
withFeature flag feature + optionalString flag "=${value}";
|
||||
|
||||
/* Create a fixed width string with additional prefix to match
|
||||
required width.
|
||||
|
|
|
@ -178,6 +178,12 @@ rec {
|
|||
else if final.isLoongArch64 then "loongarch"
|
||||
else final.parsed.cpu.name;
|
||||
|
||||
# https://source.denx.de/u-boot/u-boot/-/blob/9bfb567e5f1bfe7de8eb41f8c6d00f49d2b9a426/common/image.c#L81-106
|
||||
ubootArch =
|
||||
if final.isx86_32 then "x86" # not i386
|
||||
else if final.isMips64 then "mips64" # uboot *does* distinguish between mips32/mips64
|
||||
else final.linuxArch; # other cases appear to agree with linuxArch
|
||||
|
||||
qemuArch =
|
||||
if final.isAarch32 then "arm"
|
||||
else if final.isS390 && !final.isS390x then null
|
||||
|
|
|
@ -20,6 +20,10 @@ let
|
|||
expr = (builtins.tryEval (builtins.seq expr "didn't throw"));
|
||||
expected = { success = false; value = false; };
|
||||
};
|
||||
testingEval = expr: {
|
||||
expr = (builtins.tryEval expr).success;
|
||||
expected = true;
|
||||
};
|
||||
testingDeepThrow = expr: testingThrow (builtins.deepSeq expr expr);
|
||||
|
||||
testSanitizeDerivationName = { name, expected }:
|
||||
|
@ -39,6 +43,18 @@ in
|
|||
|
||||
runTests {
|
||||
|
||||
# CUSTOMIZATION
|
||||
|
||||
testFunctionArgsMakeOverridable = {
|
||||
expr = functionArgs (makeOverridable ({ a, b, c ? null}: {}));
|
||||
expected = { a = false; b = false; c = true; };
|
||||
};
|
||||
|
||||
testFunctionArgsMakeOverridableOverride = {
|
||||
expr = functionArgs (makeOverridable ({ a, b, c ? null }: {}) { a = 1; b = 2; }).override;
|
||||
expected = { a = false; b = false; c = true; };
|
||||
};
|
||||
|
||||
# TRIVIAL
|
||||
|
||||
testId = {
|
||||
|
@ -505,6 +521,38 @@ runTests {
|
|||
};
|
||||
};
|
||||
|
||||
testFoldl'Empty = {
|
||||
expr = foldl' (acc: el: abort "operation not called") 0 [ ];
|
||||
expected = 0;
|
||||
};
|
||||
|
||||
testFoldl'IntegerAdding = {
|
||||
expr = foldl' (acc: el: acc + el) 0 [ 1 2 3 ];
|
||||
expected = 6;
|
||||
};
|
||||
|
||||
# The accumulator isn't forced deeply
|
||||
testFoldl'NonDeep = {
|
||||
expr = take 3 (foldl'
|
||||
(acc: el: [ el ] ++ acc)
|
||||
[ (abort "unevaluated list entry") ]
|
||||
[ 1 2 3 ]);
|
||||
expected = [ 3 2 1 ];
|
||||
};
|
||||
|
||||
# Compared to builtins.foldl', lib.foldl' evaluates the first accumulator strictly too
|
||||
testFoldl'StrictInitial = {
|
||||
expr = (builtins.tryEval (foldl' (acc: el: el) (throw "hello") [])).success;
|
||||
expected = false;
|
||||
};
|
||||
|
||||
# Make sure we don't get a stack overflow for large lists
|
||||
# This number of elements would notably cause a stack overflow if it was implemented without the `foldl'` builtin
|
||||
testFoldl'Large = {
|
||||
expr = foldl' (acc: el: acc + el) 0 (range 0 100000);
|
||||
expected = 5000050000;
|
||||
};
|
||||
|
||||
testTake = testAllTrue [
|
||||
([] == (take 0 [ 1 2 3 ]))
|
||||
([1] == (take 1 [ 1 2 3 ]))
|
||||
|
@ -708,7 +756,7 @@ runTests {
|
|||
# should just return the initial value
|
||||
emptySet = foldlAttrs (throw "function not needed") 123 { };
|
||||
# should just evaluate to the last value
|
||||
accNotNeeded = foldlAttrs (_acc: _name: v: v) (throw "accumulator not needed") { z = 3; a = 2; };
|
||||
valuesNotNeeded = foldlAttrs (acc: _name: _v: acc) 3 { z = throw "value z not needed"; a = throw "value a not needed"; };
|
||||
# the accumulator doesnt have to be an attrset it can be as trivial as being just a number or string
|
||||
trivialAcc = foldlAttrs (acc: _name: v: acc * 10 + v) 1 { z = 1; a = 2; };
|
||||
};
|
||||
|
@ -718,7 +766,7 @@ runTests {
|
|||
names = [ "bar" "foo" ];
|
||||
};
|
||||
emptySet = 123;
|
||||
accNotNeeded = 3;
|
||||
valuesNotNeeded = 3;
|
||||
trivialAcc = 121;
|
||||
};
|
||||
};
|
||||
|
@ -784,6 +832,26 @@ runTests {
|
|||
expected = { a = 1; b = 2; };
|
||||
};
|
||||
|
||||
testListAttrsReverse = let
|
||||
exampleAttrs = {foo=1; bar="asdf"; baz = [1 3 3 7]; fnord=null;};
|
||||
exampleSingletonList = [{name="foo"; value=1;}];
|
||||
in {
|
||||
expr = {
|
||||
isReverseToListToAttrs = builtins.listToAttrs (attrsToList exampleAttrs) == exampleAttrs;
|
||||
isReverseToAttrsToList = attrsToList (builtins.listToAttrs exampleSingletonList) == exampleSingletonList;
|
||||
testDuplicatePruningBehaviour = attrsToList (builtins.listToAttrs [{name="a"; value=2;} {name="a"; value=1;}]);
|
||||
};
|
||||
expected = {
|
||||
isReverseToAttrsToList = true;
|
||||
isReverseToListToAttrs = true;
|
||||
testDuplicatePruningBehaviour = [{name="a"; value=2;}];
|
||||
};
|
||||
};
|
||||
|
||||
testAttrsToListsCanDealWithFunctions = testingEval (
|
||||
attrsToList { someFunc= a: a + 1;}
|
||||
);
|
||||
|
||||
# GENERATORS
|
||||
# these tests assume attributes are converted to lists
|
||||
# in alphabetical order
|
||||
|
|
|
@ -91,6 +91,9 @@ checkConfigOutput '^true$' config.result ./test-mergeAttrDefinitionsWithPrio.nix
|
|||
# is the option.
|
||||
checkConfigOutput '^true$' config.result ./module-argument-default.nix
|
||||
|
||||
# gvariant
|
||||
checkConfigOutput '^true$' config.assertion ./gvariant.nix
|
||||
|
||||
# types.pathInStore
|
||||
checkConfigOutput '".*/store/0lz9p8xhf89kb1c1kk6jxrzskaiygnlh-bash-5.2-p15.drv"' config.pathInStore.ok1 ./types.nix
|
||||
checkConfigOutput '".*/store/0fb3ykw9r5hpayd05sr0cizwadzq1d8q-bash-5.2-p15"' config.pathInStore.ok2 ./types.nix
|
||||
|
|
|
@ -1,81 +1,52 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
let inherit (lib) concatStringsSep mapAttrsToList mkMerge mkOption types gvariant;
|
||||
in {
|
||||
options.examples = mkOption { type = types.attrsOf gvariant; };
|
||||
{
|
||||
options = {
|
||||
examples = lib.mkOption { type = lib.types.attrs; };
|
||||
assertion = lib.mkOption { type = lib.types.bool; };
|
||||
};
|
||||
|
||||
config = {
|
||||
examples = with gvariant;
|
||||
mkMerge [
|
||||
{ bool = true; }
|
||||
{ bool = true; }
|
||||
|
||||
{ float = 3.14; }
|
||||
|
||||
{ int32 = mkInt32 (- 42); }
|
||||
{ int32 = mkInt32 (- 42); }
|
||||
|
||||
{ uint32 = mkUint32 42; }
|
||||
{ uint32 = mkUint32 42; }
|
||||
|
||||
{ int16 = mkInt16 (-42); }
|
||||
{ int16 = mkInt16 (-42); }
|
||||
|
||||
{ uint16 = mkUint16 42; }
|
||||
{ uint16 = mkUint16 42; }
|
||||
|
||||
{ int64 = mkInt64 (-42); }
|
||||
{ int64 = mkInt64 (-42); }
|
||||
|
||||
{ uint64 = mkUint64 42; }
|
||||
{ uint64 = mkUint64 42; }
|
||||
|
||||
{ array1 = [ "one" ]; }
|
||||
{ array1 = mkArray [ "two" ]; }
|
||||
{ array2 = mkArray [ (mkInt32 1) ]; }
|
||||
{ array2 = mkArray [ (nkUint32 2) ]; }
|
||||
|
||||
{ emptyArray1 = [ ]; }
|
||||
{ emptyArray2 = mkEmptyArray type.uint32; }
|
||||
|
||||
{ string = "foo"; }
|
||||
{ string = "foo"; }
|
||||
{
|
||||
examples = with lib.gvariant; {
|
||||
bool = true;
|
||||
float = 3.14;
|
||||
int32 = mkInt32 (- 42);
|
||||
uint32 = mkUint32 42;
|
||||
int16 = mkInt16 (-42);
|
||||
uint16 = mkUint16 42;
|
||||
int64 = mkInt64 (-42);
|
||||
uint64 = mkUint64 42;
|
||||
array1 = [ "one" ];
|
||||
array2 = mkArray [ (mkInt32 1) ];
|
||||
array3 = mkArray [ (mkUint32 2) ];
|
||||
emptyArray = mkEmptyArray type.uint32;
|
||||
string = "foo";
|
||||
escapedString = ''
|
||||
'\
|
||||
'';
|
||||
}
|
||||
tuple = mkTuple [ (mkInt32 1) [ "foo" ] ];
|
||||
maybe1 = mkNothing type.string;
|
||||
maybe2 = mkJust (mkUint32 4);
|
||||
variant = mkVariant "foo";
|
||||
dictionaryEntry = mkDictionaryEntry (mkInt32 1) [ "foo" ];
|
||||
};
|
||||
|
||||
{ tuple = mkTuple [ (mkInt32 1) [ "foo" ] ]; }
|
||||
|
||||
{ maybe1 = mkNothing type.string; }
|
||||
{ maybe2 = mkJust (mkUint32 4); }
|
||||
|
||||
{ variant1 = mkVariant "foo"; }
|
||||
{ variant2 = mkVariant 42; }
|
||||
|
||||
{ dictionaryEntry = mkDictionaryEntry (mkInt32 1) [ "foo" ]; }
|
||||
];
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = (
|
||||
assertion =
|
||||
let
|
||||
mkLine = n: v: "${n} = ${toString (gvariant.mkValue v)}";
|
||||
result = concatStringsSep "\n" (mapAttrsToList mkLine config.examples);
|
||||
mkLine = n: v: "${n} = ${toString (lib.gvariant.mkValue v)}";
|
||||
result = lib.concatStringsSep "\n" (lib.mapAttrsToList mkLine config.examples);
|
||||
in
|
||||
result + "\n"
|
||||
) == ''
|
||||
array1 = @as ['one','two']
|
||||
array2 = @au [1,2]
|
||||
(result + "\n") == ''
|
||||
array1 = @as ['one']
|
||||
array2 = @ai [1]
|
||||
array3 = @au [@u 2]
|
||||
bool = true
|
||||
dictionaryEntry = @{ias} {1,@as ['foo']}
|
||||
emptyArray1 = @as []
|
||||
emptyArray2 = @au []
|
||||
emptyArray = @au []
|
||||
escapedString = '\'\\\n'
|
||||
float = 3.140000
|
||||
int = -42
|
||||
int16 = @n -42
|
||||
int32 = -42
|
||||
int64 = @x -42
|
||||
maybe1 = @ms nothing
|
||||
maybe2 = just @u 4
|
||||
|
@ -84,10 +55,7 @@ in {
|
|||
uint16 = @q 42
|
||||
uint32 = @u 42
|
||||
uint64 = @t 42
|
||||
variant1 = @v <'foo'>
|
||||
variant2 = @v <42>
|
||||
variant = <'foo'>
|
||||
'';
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
where
|
||||
|
||||
- `handle` is the handle you are going to use in nixpkgs expressions,
|
||||
- `name` is your, preferably real, name,
|
||||
- `name` is a name that people would know and recognize you by,
|
||||
- `email` is your maintainer email address,
|
||||
- `matrix` is your Matrix user ID,
|
||||
- `github` is your GitHub handle (as it appears in the URL of your profile page, `https://github.com/<userhandle>`),
|
||||
|
@ -793,6 +793,12 @@
|
|||
githubId = 5053729;
|
||||
name = "Alias Gram";
|
||||
};
|
||||
alias-dev = {
|
||||
email = "alias-dev@protonmail.com";
|
||||
github = "alias-dev";
|
||||
githubId = 30437811;
|
||||
name = "Alex Andrews";
|
||||
};
|
||||
alibabzo = {
|
||||
email = "alistair.bill@gmail.com";
|
||||
github = "alistairbill";
|
||||
|
@ -829,6 +835,12 @@
|
|||
githubId = 5892756;
|
||||
name = "Alec Snyder";
|
||||
};
|
||||
allusive = {
|
||||
email = "jasper@allusive.dev";
|
||||
name = "Allusive";
|
||||
github = "allusive-dev";
|
||||
githubId = 99632976;
|
||||
};
|
||||
almac = {
|
||||
email = "alma.cemerlic@gmail.com";
|
||||
github = "a1mac";
|
||||
|
@ -895,6 +907,12 @@
|
|||
githubId = 160476;
|
||||
name = "Amanjeev Sethi";
|
||||
};
|
||||
amanse = {
|
||||
email = "amansetiarjp@gmail.com";
|
||||
github = "amanse";
|
||||
githubId = 13214574;
|
||||
name = "Aman Setia";
|
||||
};
|
||||
amar1729 = {
|
||||
email = "amar.paul16@gmail.com";
|
||||
github = "Amar1729";
|
||||
|
@ -1768,6 +1786,15 @@
|
|||
githubId = 1222;
|
||||
name = "Aaron VonderHaar";
|
||||
};
|
||||
aviallon = {
|
||||
email = "antoine-nixos@lesviallon.fr";
|
||||
github = "aviallon";
|
||||
githubId = 7479436;
|
||||
name = "Antoine Viallon";
|
||||
keys = [{
|
||||
fingerprint = "4AC4 A28D 7208 FC6F 2B51 5EA9 D126 B13A B555 E16F";
|
||||
}];
|
||||
};
|
||||
avitex = {
|
||||
email = "theavitex@gmail.com";
|
||||
github = "avitex";
|
||||
|
@ -3668,18 +3695,18 @@
|
|||
githubId = 398996;
|
||||
name = "Christopher Singley";
|
||||
};
|
||||
cstrahan = {
|
||||
email = "charles@cstrahan.com";
|
||||
github = "cstrahan";
|
||||
githubId = 143982;
|
||||
name = "Charles Strahan";
|
||||
};
|
||||
cswank = {
|
||||
email = "craigswank@gmail.com";
|
||||
github = "cswank";
|
||||
githubId = 490965;
|
||||
name = "Craig Swank";
|
||||
};
|
||||
ctron = {
|
||||
email = "ctron@dentrassi.de";
|
||||
github = "ctron";
|
||||
githubId = 202474;
|
||||
name = "Jens Reimann";
|
||||
};
|
||||
cust0dian = {
|
||||
email = "serg@effectful.software";
|
||||
github = "cust0dian";
|
||||
|
@ -3947,7 +3974,7 @@
|
|||
};
|
||||
davidarmstronglewis = {
|
||||
email = "davidlewis@mac.com";
|
||||
github = "davidarmstronglewis";
|
||||
github = "oceanlewis";
|
||||
githubId = 6754950;
|
||||
name = "David Armstrong Lewis";
|
||||
};
|
||||
|
@ -4851,6 +4878,12 @@
|
|||
githubId = 50854;
|
||||
name = "edef";
|
||||
};
|
||||
edeneast = {
|
||||
email = "edenofest@gmail.com";
|
||||
github = "edeneast";
|
||||
githubId = 2746374;
|
||||
name = "edeneast";
|
||||
};
|
||||
ederoyd46 = {
|
||||
email = "matt@ederoyd.co.uk";
|
||||
github = "ederoyd46";
|
||||
|
@ -5821,10 +5854,14 @@
|
|||
githubId = 1618343;
|
||||
};
|
||||
foo-dogsquared = {
|
||||
email = "foo.dogsquared@gmail.com";
|
||||
email = "foodogsquared@foodogsquared.one";
|
||||
github = "foo-dogsquared";
|
||||
githubId = 34962634;
|
||||
matrix = "@foodogsquared:matrix.org";
|
||||
name = "Gabriel Arazas";
|
||||
keys = [{
|
||||
fingerprint = "DDD7 D0BD 602E 564B AA04 FC35 1431 0D91 4115 2B92";
|
||||
}];
|
||||
};
|
||||
fooker = {
|
||||
email = "fooker@lab.sh";
|
||||
|
@ -5982,6 +6019,15 @@
|
|||
githubId = 134872;
|
||||
name = "Sergei Lukianov";
|
||||
};
|
||||
fryuni = {
|
||||
name = "Luiz Ferraz";
|
||||
email = "luiz@lferraz.com";
|
||||
github = "Fryuni";
|
||||
githubId = 11063910;
|
||||
keys = [{
|
||||
fingerprint = "2109 4B0E 560B 031E F539 62C8 2B56 8731 DB24 47EC";
|
||||
}];
|
||||
};
|
||||
fsagbuya = {
|
||||
email = "fa@m-labs.ph";
|
||||
github = "fsagbuya";
|
||||
|
@ -6047,6 +6093,15 @@
|
|||
githubId = 12715461;
|
||||
name = "Anders Bo Rasmussen";
|
||||
};
|
||||
fwam = {
|
||||
name = "Legion Orsetti";
|
||||
email = "fwam@queereen.dev";
|
||||
github = "fwam";
|
||||
githubId = 113541944;
|
||||
keys = [{
|
||||
fingerprint = "3822 20B8 57ED 0602 3786 8A7A 18E1 AE22 D704 B4FC";
|
||||
}];
|
||||
};
|
||||
fwc = {
|
||||
github = "fwc";
|
||||
githubId = 29337229;
|
||||
|
@ -6415,6 +6470,10 @@
|
|||
githubId = 1447245;
|
||||
name = "Robin Gloster";
|
||||
};
|
||||
gm6k = {
|
||||
email = "nix@quidecco.pl";
|
||||
name = "Isidor Zeuner";
|
||||
};
|
||||
gmemstr = {
|
||||
email = "git@gmem.ca";
|
||||
github = "gmemstr";
|
||||
|
@ -6762,6 +6821,12 @@
|
|||
githubId = 33523827;
|
||||
name = "Harrison Thorne";
|
||||
};
|
||||
haruki7049 = {
|
||||
email = "tontonkirikiri@gmail.com";
|
||||
github = "haruki7049";
|
||||
githubId = 64677724;
|
||||
name = "haruki7049";
|
||||
};
|
||||
harvidsen = {
|
||||
email = "harvidsen@gmail.com";
|
||||
github = "harvidsen";
|
||||
|
@ -8006,6 +8071,12 @@
|
|||
githubId = 854319;
|
||||
name = "Matt McHenry";
|
||||
};
|
||||
jerrysm64 = {
|
||||
email = "jerry.starke@icloud.com";
|
||||
github = "jerrysm64";
|
||||
githubId = 42114389;
|
||||
name = "Jerry Starke";
|
||||
};
|
||||
jeschli = {
|
||||
email = "jeschli@gmail.com";
|
||||
github = "0mbi";
|
||||
|
@ -8109,6 +8180,12 @@
|
|||
githubId = 6445082;
|
||||
name = "Joseph Lukasik";
|
||||
};
|
||||
jgoux = {
|
||||
email = "hi@jgoux.dev";
|
||||
github = "jgoux";
|
||||
githubId = 1443499;
|
||||
name = "Julien Goux";
|
||||
};
|
||||
jhh = {
|
||||
email = "jeff@j3ff.io";
|
||||
github = "jhh";
|
||||
|
@ -8509,6 +8586,12 @@
|
|||
github = "jorsn";
|
||||
githubId = 4646725;
|
||||
};
|
||||
joscha = {
|
||||
name = "Joscha Loos";
|
||||
email = "j.loos@posteo.net";
|
||||
github = "jooooscha";
|
||||
githubId = 57965027;
|
||||
};
|
||||
josephst = {
|
||||
name = "Joseph Stahl";
|
||||
email = "hello@josephstahl.com";
|
||||
|
@ -8551,6 +8634,12 @@
|
|||
githubId = 1918771;
|
||||
name = "Joe Doyle";
|
||||
};
|
||||
jpentland = {
|
||||
email = "joe.pentland@gmail.com";
|
||||
github = "jpentland";
|
||||
githubId = 1135582;
|
||||
name = "Joe Pentland";
|
||||
};
|
||||
jperras = {
|
||||
email = "joel@nerderati.com";
|
||||
github = "jperras";
|
||||
|
@ -10329,12 +10418,6 @@
|
|||
githubId = 84395723;
|
||||
name = "Lukas Wurzinger";
|
||||
};
|
||||
lukeadams = {
|
||||
email = "luke.adams@belljar.io";
|
||||
github = "lukeadams";
|
||||
githubId = 3508077;
|
||||
name = "Luke Adams";
|
||||
};
|
||||
lukebfox = {
|
||||
email = "lbentley-fox1@sheffield.ac.uk";
|
||||
github = "lukebfox";
|
||||
|
@ -10958,12 +11041,6 @@
|
|||
githubId = 4708337;
|
||||
name = "Marcelo A. de L. Santos";
|
||||
};
|
||||
maxhille = {
|
||||
email = "mh@lambdasoup.com";
|
||||
github = "maxhille";
|
||||
githubId = 693447;
|
||||
name = "Max Hille";
|
||||
};
|
||||
maximsmol = {
|
||||
email = "maximsmol@gmail.com";
|
||||
github = "maximsmol";
|
||||
|
@ -11539,6 +11616,15 @@
|
|||
githubId = 1776903;
|
||||
name = "Andrew Abbott";
|
||||
};
|
||||
mirrorwitch = {
|
||||
email = "mirrorwitch@transmom.love";
|
||||
github = "mirrorwitch";
|
||||
githubId = 146672255;
|
||||
name = "mirrorwitch";
|
||||
keys = [{
|
||||
fingerprint = "C3E7 F8C4 9CBC 9320 D360 B117 8516 D0FA 7D8F 58FC";
|
||||
}];
|
||||
};
|
||||
Misaka13514 = {
|
||||
name = "Misaka13514";
|
||||
email = "Misaka13514@gmail.com";
|
||||
|
@ -11582,6 +11668,13 @@
|
|||
githubId = 1001112;
|
||||
name = "Marcin Janczyk";
|
||||
};
|
||||
mjm = {
|
||||
email = "matt@mattmoriarity.com";
|
||||
github = "mjm";
|
||||
githubId = 1181;
|
||||
matrix = "@mjm:beeper.com";
|
||||
name = "Matt Moriarity";
|
||||
};
|
||||
mjp = {
|
||||
email = "mike@mythik.co.uk";
|
||||
github = "MikePlayle";
|
||||
|
@ -12082,6 +12175,11 @@
|
|||
githubId = 59313755;
|
||||
name = "Maxim Karasev";
|
||||
};
|
||||
mxmlnkn = {
|
||||
github = "mxmlnkn";
|
||||
githubId = 6842824;
|
||||
name = "Maximilian Knespel";
|
||||
};
|
||||
myaats = {
|
||||
email = "mats@mats.sh";
|
||||
github = "Myaats";
|
||||
|
@ -12753,6 +12851,12 @@
|
|||
githubId = 9939720;
|
||||
name = "Philippe Nguyen";
|
||||
};
|
||||
npulidomateo = {
|
||||
matrix = "@npulidomateo:matrix.org";
|
||||
github = "npulidomateo";
|
||||
githubId = 13149442;
|
||||
name = "Nico Pulido-Mateo";
|
||||
};
|
||||
nrdxp = {
|
||||
email = "tim.deh@pm.me";
|
||||
matrix = "@timdeh:matrix.org";
|
||||
|
@ -12910,6 +13014,13 @@
|
|||
fingerprint = "939E F8A5 CED8 7F50 5BB5 B2D0 24BC 2738 5F70 234F";
|
||||
}];
|
||||
};
|
||||
octodi = {
|
||||
name = "octodi";
|
||||
email = "octodi@proton.me";
|
||||
matrix = "@octodi:matrix.org";
|
||||
github = "octodi";
|
||||
githubId = 127038896;
|
||||
};
|
||||
oddlama = {
|
||||
email = "oddlama@oddlama.org";
|
||||
github = "oddlama";
|
||||
|
@ -12937,6 +13048,11 @@
|
|||
githubId = 585547;
|
||||
name = "Jaka Hudoklin";
|
||||
};
|
||||
offsetcyan = {
|
||||
github = "offsetcyan";
|
||||
githubId = 49906709;
|
||||
name = "Dakota";
|
||||
};
|
||||
oida = {
|
||||
email = "oida@posteo.de";
|
||||
github = "oida";
|
||||
|
@ -13914,7 +14030,7 @@
|
|||
name = "Pedro Pombeiro";
|
||||
};
|
||||
pongo1231 = {
|
||||
email = "pongo1999712@gmail.com";
|
||||
email = "pongo12310@gmail.com";
|
||||
github = "pongo1231";
|
||||
githubId = 4201956;
|
||||
name = "pongo1231";
|
||||
|
@ -14049,6 +14165,12 @@
|
|||
githubId = 406946;
|
||||
name = "Valentin Lorentz";
|
||||
};
|
||||
prominentretail = {
|
||||
email = "me@jakepark.me";
|
||||
github = "ProminentRetail";
|
||||
githubId = 94048404;
|
||||
name = "Jake Park";
|
||||
};
|
||||
proofconstruction = {
|
||||
email = "source@proof.construction";
|
||||
github = "proofconstruction";
|
||||
|
@ -14692,6 +14814,12 @@
|
|||
githubId = 42619;
|
||||
name = "Wei-Ming Yang";
|
||||
};
|
||||
rickvanprim = {
|
||||
email = "me@rickvanprim.com";
|
||||
github = "rickvanprim";
|
||||
githubId = 13792812;
|
||||
name = "James Leitch";
|
||||
};
|
||||
rickynils = {
|
||||
email = "rickynils@gmail.com";
|
||||
github = "rickynils";
|
||||
|
@ -15012,15 +15140,6 @@
|
|||
}];
|
||||
name = "Rahul Butani";
|
||||
};
|
||||
rs0vere = {
|
||||
email = "rs0vere@proton.me";
|
||||
github = "rs0vere";
|
||||
githubId = 140035635;
|
||||
keys = [{
|
||||
fingerprint = "C6D8 B5C2 FA79 901B DCCF 95E1 FEC4 5C5A ED00 C58D";
|
||||
}];
|
||||
name = "Red Star Over Earth";
|
||||
};
|
||||
rski = {
|
||||
name = "rski";
|
||||
email = "rom.skiad+nix@gmail.com";
|
||||
|
@ -15274,6 +15393,12 @@
|
|||
githubId = 171470;
|
||||
name = "Sam Hug";
|
||||
};
|
||||
SamirTalwar = {
|
||||
email = "lazy.git@functional.computer";
|
||||
github = "abstracte";
|
||||
githubId = 47852;
|
||||
name = "Samir Talwar";
|
||||
};
|
||||
samlich = {
|
||||
email = "nixos@samli.ch";
|
||||
github = "samlich";
|
||||
|
@ -15314,6 +15439,12 @@
|
|||
githubId = 107703;
|
||||
name = "Samuel Rivas";
|
||||
};
|
||||
samueltardieu = {
|
||||
email = "nixpkgs@sam.rfc1149.net";
|
||||
github = "samueltardieu";
|
||||
githubId = 44656;
|
||||
name = "Samuel Tardieu";
|
||||
};
|
||||
samw = {
|
||||
email = "sam@wlcx.cc";
|
||||
github = "wlcx";
|
||||
|
@ -15418,6 +15549,12 @@
|
|||
githubId = 3958212;
|
||||
name = "Tom Sorlie";
|
||||
};
|
||||
schinmai-akamai = {
|
||||
email = "schinmai@akamai.com";
|
||||
github = "schinmai-akamai";
|
||||
githubId = 70169773;
|
||||
name = "Tarun Chinmai Sekar";
|
||||
};
|
||||
schmitthenner = {
|
||||
email = "development@schmitthenner.eu";
|
||||
github = "fkz";
|
||||
|
@ -16001,6 +16138,12 @@
|
|||
fingerprint = "B234 EFD4 2B42 FE81 EE4D 7627 F72C 4A88 7F9A 24CA";
|
||||
}];
|
||||
};
|
||||
sironheart = {
|
||||
email = "git@beisenherz.dev";
|
||||
github = "Sironheart";
|
||||
githubId = 13799656;
|
||||
name = "Steffen Beisenherz";
|
||||
};
|
||||
sirseruju = {
|
||||
email = "sir.seruju@yandex.ru";
|
||||
github = "SirSeruju";
|
||||
|
@ -16234,6 +16377,16 @@
|
|||
githubId = 53029739;
|
||||
name = "Joshua Ortiz";
|
||||
};
|
||||
Sorixelle = {
|
||||
email = "ruby+nixpkgs@srxl.me";
|
||||
matrix = "@ruby:isincredibly.gay";
|
||||
name = "Ruby Iris Juric";
|
||||
github = "Sorixelle";
|
||||
githubId = 38685302;
|
||||
keys = [{
|
||||
fingerprint = "2D76 76C7 A28E 16FC 75C7 268D 1B55 6ED8 4B0E 303A";
|
||||
}];
|
||||
};
|
||||
sorki = {
|
||||
email = "srk@48.io";
|
||||
github = "sorki";
|
||||
|
@ -17657,12 +17810,6 @@
|
|||
githubId = 10110;
|
||||
name = "Travis B. Hartwell";
|
||||
};
|
||||
travisdavis-ops = {
|
||||
email = "travisdavismedia@gmail.com";
|
||||
github = "TravisDavis-ops";
|
||||
githubId = 52011418;
|
||||
name = "Travis Davis";
|
||||
};
|
||||
traxys = {
|
||||
email = "quentin+dev@familleboyer.net";
|
||||
github = "traxys";
|
||||
|
@ -17687,6 +17834,13 @@
|
|||
githubId = 25440339;
|
||||
name = "Tom Repetti";
|
||||
};
|
||||
trevdev = {
|
||||
email = "trev@trevdev.ca";
|
||||
matrix = "@trevdev:matrix.org";
|
||||
github = "trev-dev";
|
||||
githubId = 28788713;
|
||||
name = "Trevor Richards";
|
||||
};
|
||||
trevorj = {
|
||||
email = "nix@trevor.joynson.io";
|
||||
github = "akatrevorjay";
|
||||
|
@ -18452,7 +18606,7 @@
|
|||
githubId = 60148;
|
||||
};
|
||||
water-sucks = {
|
||||
email = "varun@cvte.org";
|
||||
email = "varun@snare.dev";
|
||||
name = "Varun Narravula";
|
||||
github = "water-sucks";
|
||||
githubId = 68445574;
|
||||
|
@ -18539,6 +18693,12 @@
|
|||
fingerprint = "F844 80B2 0CA9 D6CC C7F5 2479 A776 D2AD 099E 8BC0";
|
||||
}];
|
||||
};
|
||||
wexder = {
|
||||
email = "wexder19@gmail.com";
|
||||
github = "wexder";
|
||||
githubId = 24979302;
|
||||
name = "Vladimír Zahradník";
|
||||
};
|
||||
wheelsandmetal = {
|
||||
email = "jakob@schmutz.co.uk";
|
||||
github = "wheelsandmetal";
|
||||
|
@ -18566,6 +18726,12 @@
|
|||
githubId = 7121530;
|
||||
name = "Wolf Honoré";
|
||||
};
|
||||
wietsedv = {
|
||||
email = "wietsedv@proton.me";
|
||||
github = "wietsedv";
|
||||
githubId = 13139101;
|
||||
name = "Wietse de Vries";
|
||||
};
|
||||
wigust = {
|
||||
name = "Oleg Pykhalov";
|
||||
email = "go.wigust@gmail.com";
|
||||
|
@ -18974,7 +19140,7 @@
|
|||
];
|
||||
};
|
||||
yayayayaka = {
|
||||
email = "nixpkgs@uwu.is";
|
||||
email = "github@uwu.is";
|
||||
matrix = "@yaya:uwu.is";
|
||||
github = "yayayayaka";
|
||||
githubId = 73759599;
|
||||
|
@ -19088,6 +19254,13 @@
|
|||
github = "YorikSar";
|
||||
githubId = 428074;
|
||||
};
|
||||
YoshiRulz = {
|
||||
name = "YoshiRulz";
|
||||
email = "OSSYoshiRulz+Nixpkgs@gmail.com";
|
||||
matrix = "@YoshiRulz:matrix.org";
|
||||
github = "YoshiRulz";
|
||||
githubId = 13409956;
|
||||
};
|
||||
yrashk = {
|
||||
email = "yrashk@gmail.com";
|
||||
github = "yrashk";
|
||||
|
|
|
@ -12,5 +12,5 @@ import ../../pkgs/top-level/release.nix
|
|||
scrubJobs = false;
|
||||
# No need to evaluate on i686.
|
||||
supportedSystems = [ "x86_64-linux" ];
|
||||
limitedSupportedSystems = [];
|
||||
bootstrapConfigs = [];
|
||||
}
|
||||
|
|
|
@ -13,12 +13,15 @@ STDOUT->autoflush(1);
|
|||
|
||||
my $ua = LWP::UserAgent->new();
|
||||
|
||||
if (!defined $ENV{GH_TOKEN}) {
|
||||
die "Set GH_TOKEN before running this script";
|
||||
}
|
||||
|
||||
keys %$maintainers_json; # reset the internal iterator so a prior each() doesn't affect the loop
|
||||
while(my($k, $v) = each %$maintainers_json) {
|
||||
my $current_user = %$v{'github'};
|
||||
if (!defined $current_user) {
|
||||
print "$k has no github handle\n";
|
||||
next;
|
||||
}
|
||||
my $github_id = %$v{'githubId'};
|
||||
if (!defined $github_id) {
|
||||
|
@ -37,13 +40,16 @@ while(my($k, $v) = each %$maintainers_json) {
|
|||
sleep($ratelimit_reset - time() + 5);
|
||||
}
|
||||
if ($resp->code != 200) {
|
||||
print $current_user . " likely deleted their github account\n";
|
||||
print "$k likely deleted their github account\n";
|
||||
next;
|
||||
}
|
||||
my $resp_json = from_json($resp->content);
|
||||
my $api_user = %$resp_json{"login"};
|
||||
if (lc($current_user) ne lc($api_user)) {
|
||||
print $current_user . " is now known on github as " . $api_user . ". Editing maintainer-list.nix…\n";
|
||||
if (!defined $current_user) {
|
||||
print "$k is known on github as $api_user.\n";
|
||||
}
|
||||
elsif (lc($current_user) ne lc($api_user)) {
|
||||
print "$k is now known on github as $api_user. Editing maintainer-list.nix…\n";
|
||||
my $file = path($maintainers_list_nix);
|
||||
my $data = $file->slurp_utf8;
|
||||
$data =~ s/github = "$current_user";$/github = "$api_user";/m;
|
||||
|
|
|
@ -72,6 +72,7 @@ lualogging,,,,,,
|
|||
luaossl,,,,,5.1,
|
||||
luaposix,,,,34.1.1-1,,vyp lblasc
|
||||
luarepl,,,,,,
|
||||
luarocks-build-rust-mlua,,,,,,mrcjkb
|
||||
luasec,,,,,,flosse
|
||||
luasocket,,,,,,
|
||||
luasql-sqlite3,,,,,,vyp
|
||||
|
@ -89,8 +90,10 @@ lyaml,,,,,,lblasc
|
|||
magick,,,,,,donovanglover
|
||||
markdown,,,,,,
|
||||
mediator_lua,,,,,,
|
||||
middleclass,,,,,,
|
||||
mpack,,,,,,
|
||||
moonscript,https://github.com/leafo/moonscript.git,dev-1,,,,arobyn
|
||||
nui-nvim,,,,,,mrcjkb
|
||||
nvim-client,https://github.com/neovim/lua-client.git,,,,,
|
||||
nvim-cmp,https://github.com/hrsh7th/nvim-cmp,,,,,
|
||||
penlight,https://github.com/lunarmodules/Penlight.git,,,,,alerque
|
||||
|
@ -108,5 +111,7 @@ teal-language-server,,,http://luarocks.org/dev,,,
|
|||
telescope.nvim,,,,,5.1,
|
||||
telescope-manix,,,,,,
|
||||
tl,,,,,,mephistophiles
|
||||
toml,,,,,,mrcjkb
|
||||
toml-edit,,,,,5.1,mrcjkb
|
||||
vstruct,https://github.com/ToxicFrog/vstruct.git,,,,,
|
||||
vusted,,,,,,figsoda
|
||||
|
|
|
|
@ -321,8 +321,14 @@ def load_plugins_from_csv(
|
|||
return plugins
|
||||
|
||||
|
||||
def run_nix_expr(expr):
|
||||
with CleanEnvironment() as nix_path:
|
||||
|
||||
def run_nix_expr(expr, nixpkgs: str):
|
||||
'''
|
||||
:param expr nix expression to fetch current plugins
|
||||
:param nixpkgs Path towards a nixpkgs checkout
|
||||
'''
|
||||
# local_pkgs = str(Path(__file__).parent.parent.parent)
|
||||
with CleanEnvironment(nixpkgs) as nix_path:
|
||||
cmd = [
|
||||
"nix",
|
||||
"eval",
|
||||
|
@ -396,9 +402,9 @@ class Editor:
|
|||
"""CSV spec"""
|
||||
print("the update member function should be overriden in subclasses")
|
||||
|
||||
def get_current_plugins(self) -> List[Plugin]:
|
||||
def get_current_plugins(self, nixpkgs) -> List[Plugin]:
|
||||
"""To fill the cache"""
|
||||
data = run_nix_expr(self.get_plugins)
|
||||
data = run_nix_expr(self.get_plugins, nixpkgs)
|
||||
plugins = []
|
||||
for name, attr in data.items():
|
||||
p = Plugin(name, attr["rev"], attr["submodules"], attr["sha256"])
|
||||
|
@ -414,7 +420,7 @@ class Editor:
|
|||
raise NotImplementedError()
|
||||
|
||||
def get_update(self, input_file: str, outfile: str, config: FetchConfig):
|
||||
cache: Cache = Cache(self.get_current_plugins(), self.cache_file)
|
||||
cache: Cache = Cache(self.get_current_plugins(self.nixpkgs), self.cache_file)
|
||||
_prefetch = functools.partial(prefetch, cache=cache)
|
||||
|
||||
def update() -> dict:
|
||||
|
@ -453,6 +459,12 @@ class Editor:
|
|||
By default from {self.default_in} to {self.default_out}"""
|
||||
),
|
||||
)
|
||||
common.add_argument(
|
||||
"--nixpkgs",
|
||||
type=str,
|
||||
default=os.getcwd(),
|
||||
help="Adjust log level",
|
||||
)
|
||||
common.add_argument(
|
||||
"--input-names",
|
||||
"-i",
|
||||
|
@ -541,22 +553,27 @@ class Editor:
|
|||
command = args.command or "update"
|
||||
log.setLevel(LOG_LEVELS[args.debug])
|
||||
log.info("Chose to run command: %s", command)
|
||||
self.nixpkgs = args.nixpkgs
|
||||
|
||||
if not args.no_commit:
|
||||
self.nixpkgs_repo = git.Repo(self.root, search_parent_directories=True)
|
||||
self.nixpkgs_repo = git.Repo(args.nixpkgs, search_parent_directories=True)
|
||||
|
||||
getattr(self, command)(args)
|
||||
|
||||
|
||||
class CleanEnvironment(object):
|
||||
def __init__(self, nixpkgs):
|
||||
self.local_pkgs = nixpkgs
|
||||
|
||||
def __enter__(self) -> str:
|
||||
self.old_environ = os.environ.copy()
|
||||
"""
|
||||
local_pkgs = str(Path(__file__).parent.parent.parent)
|
||||
"""
|
||||
self.old_environ = os.environ.copy()
|
||||
self.empty_config = NamedTemporaryFile()
|
||||
self.empty_config.write(b"{}")
|
||||
self.empty_config.flush()
|
||||
os.environ["NIXPKGS_CONFIG"] = self.empty_config.name
|
||||
return f"localpkgs={local_pkgs}"
|
||||
# os.environ["NIXPKGS_CONFIG"] = self.empty_config.name
|
||||
return f"localpkgs={self.local_pkgs}"
|
||||
|
||||
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
|
||||
os.environ.update(self.old_environ)
|
||||
|
@ -758,7 +775,8 @@ def commit(repo: git.Repo, message: str, files: List[Path]) -> None:
|
|||
|
||||
|
||||
def update_plugins(editor: Editor, args):
|
||||
"""The main entry function of this module. All input arguments are grouped in the `Editor`."""
|
||||
"""The main entry function of this module.
|
||||
All input arguments are grouped in the `Editor`."""
|
||||
|
||||
log.info("Start updating plugins")
|
||||
fetch_config = FetchConfig(args.proc, args.github_token)
|
||||
|
|
|
@ -2,11 +2,11 @@
|
|||
#!nix-shell update-luarocks-shell.nix -i python3
|
||||
|
||||
# format:
|
||||
# $ nix run nixpkgs.python3Packages.black -c black update.py
|
||||
# $ nix run nixpkgs#python3Packages.black -- update.py
|
||||
# type-check:
|
||||
# $ nix run nixpkgs.python3Packages.mypy -c mypy update.py
|
||||
# $ nix run nixpkgs#python3Packages.mypy -- update.py
|
||||
# linted:
|
||||
# $ nix run nixpkgs.python3Packages.flake8 -c flake8 --ignore E501,E265,E402 update.py
|
||||
# $ nix run nixpkgs#python3Packages.flake8 -- --ignore E501,E265,E402 update.py
|
||||
|
||||
import inspect
|
||||
import os
|
||||
|
@ -40,36 +40,40 @@ nixpkgs$ ./maintainers/scripts/update-luarocks-packages
|
|||
|
||||
You can customize the generated packages in pkgs/development/lua-modules/overrides.nix
|
||||
*/
|
||||
""".format(GENERATED_NIXFILE=GENERATED_NIXFILE)
|
||||
""".format(
|
||||
GENERATED_NIXFILE=GENERATED_NIXFILE
|
||||
)
|
||||
|
||||
FOOTER = """
|
||||
}
|
||||
/* GENERATED - do not edit this file */
|
||||
"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class LuaPlugin:
|
||||
name: str
|
||||
'''Name of the plugin, as seen on luarocks.org'''
|
||||
"""Name of the plugin, as seen on luarocks.org"""
|
||||
src: str
|
||||
'''address to the git repository'''
|
||||
"""address to the git repository"""
|
||||
ref: Optional[str]
|
||||
'''git reference (branch name/tag)'''
|
||||
"""git reference (branch name/tag)"""
|
||||
version: Optional[str]
|
||||
'''Set it to pin a package '''
|
||||
"""Set it to pin a package """
|
||||
server: Optional[str]
|
||||
'''luarocks.org registers packages under different manifests.
|
||||
"""luarocks.org registers packages under different manifests.
|
||||
Its value can be 'http://luarocks.org/dev'
|
||||
'''
|
||||
"""
|
||||
luaversion: Optional[str]
|
||||
'''Attribue of the lua interpreter if a package is available only for a specific lua version'''
|
||||
"""Attribue of the lua interpreter if a package is available only for a specific lua version"""
|
||||
maintainers: Optional[str]
|
||||
''' Optional string listing maintainers separated by spaces'''
|
||||
""" Optional string listing maintainers separated by spaces"""
|
||||
|
||||
@property
|
||||
def normalized_name(self) -> str:
|
||||
return self.name.replace(".", "-")
|
||||
|
||||
|
||||
# rename Editor to LangUpdate/ EcosystemUpdater
|
||||
class LuaEditor(pluginupdate.Editor):
|
||||
def get_current_plugins(self):
|
||||
|
@ -80,8 +84,10 @@ class LuaEditor(pluginupdate.Editor):
|
|||
csvfilename = input_file
|
||||
log.info("Loading package descriptions from %s", csvfilename)
|
||||
|
||||
with open(csvfilename, newline='') as csvfile:
|
||||
reader = csv.DictReader(csvfile,)
|
||||
with open(csvfilename, newline="") as csvfile:
|
||||
reader = csv.DictReader(
|
||||
csvfile,
|
||||
)
|
||||
for row in reader:
|
||||
# name,server,version,luaversion,maintainers
|
||||
plugin = LuaPlugin(**row)
|
||||
|
@ -91,12 +97,7 @@ class LuaEditor(pluginupdate.Editor):
|
|||
def update(self, args):
|
||||
update_plugins(self, args)
|
||||
|
||||
def generate_nix(
|
||||
self,
|
||||
results: List[Tuple[LuaPlugin, str]],
|
||||
outfilename: str
|
||||
):
|
||||
|
||||
def generate_nix(self, results: List[Tuple[LuaPlugin, str]], outfilename: str):
|
||||
with tempfile.NamedTemporaryFile("w+") as f:
|
||||
f.write(HEADER)
|
||||
header2 = textwrap.dedent(
|
||||
|
@ -105,9 +106,10 @@ class LuaEditor(pluginupdate.Editor):
|
|||
{ self, stdenv, lib, fetchurl, fetchgit, callPackage, ... } @ args:
|
||||
final: prev:
|
||||
{
|
||||
""")
|
||||
"""
|
||||
)
|
||||
f.write(header2)
|
||||
for (plugin, nix_expr) in results:
|
||||
for plugin, nix_expr in results:
|
||||
f.write(f"{plugin.normalized_name} = {nix_expr}")
|
||||
f.write(FOOTER)
|
||||
f.flush()
|
||||
|
@ -156,14 +158,15 @@ class LuaEditor(pluginupdate.Editor):
|
|||
# luaPackages.append(plugin)
|
||||
pass
|
||||
|
||||
|
||||
def generate_pkg_nix(plug: LuaPlugin):
|
||||
'''
|
||||
"""
|
||||
Generate nix expression for a luarocks package
|
||||
Our cache key associates "p.name-p.version" to its rockspec
|
||||
'''
|
||||
"""
|
||||
log.debug("Generating nix expression for %s", plug.name)
|
||||
custom_env = os.environ.copy()
|
||||
custom_env['LUAROCKS_CONFIG'] = LUAROCKS_CONFIG
|
||||
custom_env["LUAROCKS_CONFIG"] = LUAROCKS_CONFIG
|
||||
|
||||
# we add --dev else luarocks wont find all the "scm" (=dev) versions of the
|
||||
# packages
|
||||
|
@ -176,7 +179,10 @@ def generate_pkg_nix(plug: LuaPlugin):
|
|||
# if plug.server == "src":
|
||||
if plug.src != "":
|
||||
if plug.src is None:
|
||||
msg = "src must be set when 'version' is set to \"src\" for package %s" % plug.name
|
||||
msg = (
|
||||
"src must be set when 'version' is set to \"src\" for package %s"
|
||||
% plug.name
|
||||
)
|
||||
log.error(msg)
|
||||
raise RuntimeError(msg)
|
||||
log.debug("Updating from source %s", plug.src)
|
||||
|
@ -185,7 +191,6 @@ def generate_pkg_nix(plug: LuaPlugin):
|
|||
else:
|
||||
cmd.append(plug.name)
|
||||
if plug.version and plug.version != "src":
|
||||
|
||||
cmd.append(plug.version)
|
||||
|
||||
if plug.server != "src" and plug.server:
|
||||
|
@ -194,23 +199,26 @@ def generate_pkg_nix(plug: LuaPlugin):
|
|||
if plug.luaversion:
|
||||
cmd.append(f"--lua-version={plug.luaversion}")
|
||||
|
||||
log.debug("running %s", ' '.join(cmd))
|
||||
log.debug("running %s", " ".join(cmd))
|
||||
|
||||
output = subprocess.check_output(cmd, env=custom_env, text=True)
|
||||
output = "callPackage(" + output.strip() + ") {};\n\n"
|
||||
return (plug, output)
|
||||
|
||||
def main():
|
||||
|
||||
editor = LuaEditor("lua", ROOT, '',
|
||||
def main():
|
||||
editor = LuaEditor(
|
||||
"lua",
|
||||
ROOT,
|
||||
"",
|
||||
default_in=ROOT.joinpath(PKG_LIST),
|
||||
default_out = ROOT.joinpath(GENERATED_NIXFILE)
|
||||
default_out=ROOT.joinpath(GENERATED_NIXFILE),
|
||||
)
|
||||
|
||||
editor.run()
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
# vim: set ft=python noet fdm=manual fenc=utf-8 ff=unix sts=0 sw=4 ts=4 :
|
||||
|
|
|
@ -682,6 +682,18 @@ with lib.maintainers; {
|
|||
shortName = "Numtide team";
|
||||
};
|
||||
|
||||
ocaml = {
|
||||
members = [
|
||||
alizter
|
||||
];
|
||||
githubTeams = [
|
||||
"ocaml"
|
||||
];
|
||||
scope = "Maintain the OCaml compiler and package set.";
|
||||
shortName = "OCaml";
|
||||
enableFeatureFreezePing = true;
|
||||
};
|
||||
|
||||
openstack = {
|
||||
members = [
|
||||
SuperSandro2000
|
||||
|
|
|
@ -1,11 +1,7 @@
|
|||
# Customising Packages {#sec-customising-packages}
|
||||
|
||||
Some packages in Nixpkgs have options to enable or disable optional
|
||||
functionality or change other aspects of the package. For instance, the
|
||||
Firefox wrapper package (which provides Firefox with a set of plugins
|
||||
such as the Adobe Flash player) has an option to enable the Google Talk
|
||||
plugin. It can be set in `configuration.nix` as follows:
|
||||
`nixpkgs.config.firefox.enableGoogleTalkPlugin = true;`
|
||||
functionality or change other aspects of the package.
|
||||
|
||||
::: {.warning}
|
||||
Unfortunately, Nixpkgs currently lacks a way to query available
|
||||
|
@ -13,7 +9,7 @@ configuration options.
|
|||
:::
|
||||
|
||||
::: {.note}
|
||||
Alternatively, many packages come with extensions one might add.
|
||||
For example, many packages come with extensions one might add.
|
||||
Examples include:
|
||||
- [`passExtensions.pass-otp`](https://search.nixos.org/packages/query=passExtensions.pass-otp)
|
||||
- [`python310Packages.requests`](https://search.nixos.org/packages/query=python310Packages.requests)
|
||||
|
|
|
@ -26,7 +26,7 @@ directory which is scanned by the ICL loader for ICD files. For example:
|
|||
|
||||
```ShellSession
|
||||
$ export \
|
||||
OCL_ICD_VENDORS=`nix-build '<nixpkgs>' --no-out-link -A rocm-opencl-icd`/etc/OpenCL/vendors/
|
||||
OCL_ICD_VENDORS=`nix-build '<nixpkgs>' --no-out-link -A rocmPackages.clr.icd`/etc/OpenCL/vendors/
|
||||
```
|
||||
|
||||
The second mechanism is to add the OpenCL driver package to
|
||||
|
@ -50,13 +50,13 @@ Platform Vendor Advanced Micro Devices, Inc.
|
|||
|
||||
Modern AMD [Graphics Core
|
||||
Next](https://en.wikipedia.org/wiki/Graphics_Core_Next) (GCN) GPUs are
|
||||
supported through the rocm-opencl-icd package. Adding this package to
|
||||
supported through the rocmPackages.clr.icd package. Adding this package to
|
||||
[](#opt-hardware.opengl.extraPackages)
|
||||
enables OpenCL support:
|
||||
|
||||
```nix
|
||||
hardware.opengl.extraPackages = [
|
||||
rocm-opencl-icd
|
||||
rocmPackages.clr.icd
|
||||
];
|
||||
```
|
||||
|
||||
|
|
|
@ -45,8 +45,8 @@ services.xserver.displayManager.gdm.enable = true;
|
|||
You can set the keyboard layout (and optionally the layout variant):
|
||||
|
||||
```nix
|
||||
services.xserver.layout = "de";
|
||||
services.xserver.xkbVariant = "neo";
|
||||
services.xserver.xkb.layout = "de";
|
||||
services.xserver.xkb.variant = "neo";
|
||||
```
|
||||
|
||||
The X server is started automatically at boot time. If you don't want
|
||||
|
@ -266,7 +266,7 @@ Once the configuration is applied, and you did a logout/login cycle, the
|
|||
layout should be ready to use. You can try it by e.g. running
|
||||
`setxkbmap us-greek` and then type `<alt>+a` (it may not get applied in
|
||||
your terminal straight away). To change the default, the usual
|
||||
`services.xserver.layout` option can still be used.
|
||||
`services.xserver.xkb.layout` option can still be used.
|
||||
|
||||
A layout can have several other components besides `xkb_symbols`, for
|
||||
example we will define new keycodes for some multimedia key and bind
|
||||
|
|
|
@ -90,7 +90,7 @@ lib.mkOption {
|
|||
```
|
||||
:::
|
||||
|
||||
### `mkPackageOption`, `mkPackageOptionMD` {#sec-option-declarations-util-mkPackageOption}
|
||||
### `mkPackageOption` {#sec-option-declarations-util-mkPackageOption}
|
||||
|
||||
Usage:
|
||||
|
||||
|
@ -121,15 +121,13 @@ valid attribute path in pkgs (if name is a list).
|
|||
|
||||
If you wish to explicitly provide no default, pass `null` as `default`.
|
||||
|
||||
During the transition to CommonMark documentation `mkPackageOption` creates an option with a DocBook description attribute, once the transition is completed it will create a CommonMark description instead. `mkPackageOptionMD` always creates an option with a CommonMark description attribute and will be removed some time after the transition is completed.
|
||||
|
||||
[]{#ex-options-declarations-util-mkPackageOption}
|
||||
Examples:
|
||||
|
||||
::: {#ex-options-declarations-util-mkPackageOption-hello .example}
|
||||
### Simple `mkPackageOption` usage
|
||||
```nix
|
||||
lib.mkPackageOptionMD pkgs "hello" { }
|
||||
lib.mkPackageOption pkgs "hello" { }
|
||||
# is like
|
||||
lib.mkOption {
|
||||
type = lib.types.package;
|
||||
|
@ -143,7 +141,7 @@ lib.mkOption {
|
|||
::: {#ex-options-declarations-util-mkPackageOption-ghc .example}
|
||||
### `mkPackageOption` with explicit default and example
|
||||
```nix
|
||||
lib.mkPackageOptionMD pkgs "GHC" {
|
||||
lib.mkPackageOption pkgs "GHC" {
|
||||
default = [ "ghc" ];
|
||||
example = "pkgs.haskell.packages.ghc92.ghc.withPackages (hkgs: [ hkgs.primes ])";
|
||||
}
|
||||
|
|
|
@ -528,7 +528,7 @@ The only required parameter is `name`.
|
|||
|
||||
: A string representation of the type function name.
|
||||
|
||||
`definition`
|
||||
`description`
|
||||
|
||||
: Description of the type used in documentation. Give information of
|
||||
the type and any of its arguments.
|
||||
|
|
|
@ -353,7 +353,7 @@ When upgrading from a previous release, please be aware of the following incompa
|
|||
|
||||
Another benefit of the refactoring is that we can now issue reloads via either `pkill -HUP unbound` and `systemctl reload unbound` to reload the running configuration without taking the daemon offline. A prerequisite of this was that unbound configuration is available on a well known path on the file system. We are using the path `/etc/unbound/unbound.conf` as that is the default in the CLI tooling which in turn enables us to use `unbound-control` without passing a custom configuration location.
|
||||
|
||||
The module has also been reworked to be [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md) compliant. As such, `sevices.unbound.extraConfig` has been removed and replaced by [services.unbound.settings](options.html#opt-services.unbound.settings). `services.unbound.interfaces` has been renamed to `services.unbound.settings.server.interface`.
|
||||
The module has also been reworked to be [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md) compliant. As such, `services.unbound.extraConfig` has been removed and replaced by [services.unbound.settings](options.html#opt-services.unbound.settings). `services.unbound.interfaces` has been renamed to `services.unbound.settings.server.interface`.
|
||||
|
||||
`services.unbound.forwardAddresses` and `services.unbound.allowedAccess` have also been changed to use the new settings interface. You can follow the instructions when executing `nixos-rebuild` to upgrade your configuration to use the new interface.
|
||||
|
||||
|
|
|
@ -130,7 +130,7 @@ In addition to numerous new and upgraded packages, this release includes the fol
|
|||
don't lose access to their files.
|
||||
|
||||
In any other case, it's safe to use OpenSSL 3 for PHP's OpenSSL extension. This can be done by setting
|
||||
[](#opt-services.nextcloud.enableBrokenCiphersForSSE) to `false`.
|
||||
`services.nextcloud.enableBrokenCiphersForSSE` to `false`.
|
||||
|
||||
- The `coq` package and versioned variants starting at `coq_8_14` no
|
||||
longer include CoqIDE, which is now available through
|
||||
|
|
|
@ -87,7 +87,7 @@ In addition to numerous new and updated packages, this release has the following
|
|||
|
||||
- [gmediarender](https://github.com/hzeller/gmrender-resurrect), a simple, headless UPnP/DLNA renderer. Available as [services.gmediarender](options.html#opt-services.gmediarender.enable).
|
||||
|
||||
- [go2rtc](https://github.com/AlexxIT/go2rtc), a camera streaming appliation with support for RTSP, WebRTC, HomeKit, FFMPEG, RTMP and other protocols. Available as [services.go2rtc](options.html#opt-services.go2rtc.enable).
|
||||
- [go2rtc](https://github.com/AlexxIT/go2rtc), a camera streaming application with support for RTSP, WebRTC, HomeKit, FFMPEG, RTMP and other protocols. Available as [services.go2rtc](options.html#opt-services.go2rtc.enable).
|
||||
|
||||
- [goeland](https://github.com/slurdge/goeland), an alternative to rss2email written in Golang with many filters. Available as [services.goeland](#opt-services.goeland.enable).
|
||||
|
||||
|
@ -203,7 +203,7 @@ In addition to numerous new and updated packages, this release has the following
|
|||
|
||||
- `graylog` has been updated to version 5, which can not be updated directly from the previously packaged version 3.3. If you had installed the previously packaged version 3.3, please follow the [upgrade path](https://go2docs.graylog.org/5-0/upgrading_graylog/upgrade_path.htm) from 3.3 to 4.0 to 4.3 to 5.0.
|
||||
|
||||
- `buildFHSUserEnv` is now called `buildFHSEnv` and uses FlatPak's Bubblewrap sandboxing tool rather than Nixpkgs' own chrootenv. The old chrootenv-based implemenation is still available via `buildFHSEnvChroot` but is considered deprecated and will be removed when the remaining uses inside Nixpkgs have been migrated. If your FHSEnv-wrapped application misbehaves when using the new bubblewrap implementation, please create an issue in Nixpkgs.
|
||||
- `buildFHSUserEnv` is now called `buildFHSEnv` and uses FlatPak's Bubblewrap sandboxing tool rather than Nixpkgs' own chrootenv. The old chrootenv-based implementation is still available via `buildFHSEnvChroot` but is considered deprecated and will be removed when the remaining uses inside Nixpkgs have been migrated. If your FHSEnv-wrapped application misbehaves when using the new bubblewrap implementation, please create an issue in Nixpkgs.
|
||||
|
||||
- `nushell` has been updated to at least version 0.77.0, which includes potential breaking changes in aliases. The old aliases are now available as `old-alias` but it is recommended you migrate to the new format. See [Reworked aliases](https://www.nushell.sh/blog/2023-03-14-nushell_0_77.html#reworked-aliases-breaking-changes-kubouch).
|
||||
|
||||
|
@ -555,7 +555,7 @@ In addition to numerous new and updated packages, this release has the following
|
|||
|
||||
- `buildDunePackage` now defaults to `strictDeps = true` which means that any library should go into `buildInputs` or `checkInputs`. Any executable that is run on the building machine should go into `nativeBuildInputs` or `nativeCheckInputs` respectively. Example of executables are `ocaml`, `findlib` and `menhir`. PPXs are libraries which are built by dune and should therefore not go into `nativeBuildInputs`.
|
||||
|
||||
- `buildFHSUserEnv` is now called `buildFHSEnv` and uses FlatPak's Bubblewrap sandboxing tool rather than Nixpkgs' own chrootenv. The old chrootenv-based implemenation is still available via `buildFHSEnvChroot` but is considered deprecated and will be removed when the remaining uses inside Nixpkgs have been migrated. If your FHSEnv-wrapped application misbehaves when using the new bubblewrap implementation, please create an issue in Nixpkgs.
|
||||
- `buildFHSUserEnv` is now called `buildFHSEnv` and uses FlatPak's Bubblewrap sandboxing tool rather than Nixpkgs' own chrootenv. The old chrootenv-based implementation is still available via `buildFHSEnvChroot` but is considered deprecated and will be removed when the remaining uses inside Nixpkgs have been migrated. If your FHSEnv-wrapped application misbehaves when using the new bubblewrap implementation, please create an issue in Nixpkgs.
|
||||
|
||||
- Top-level `buildPlatform`, `hostPlatform`, `targetPlatform` have been deprecated, use `stdenv.X` instead.
|
||||
|
||||
|
|
|
@ -26,6 +26,9 @@
|
|||
|
||||
[`sudo-rs`]: https://github.com/memorysafety/sudo-rs/
|
||||
|
||||
- All [ROCm](https://rocm.docs.amd.com/en/latest/) packages have been updated to 5.7.0.
|
||||
- [ROCm](https://rocm.docs.amd.com/en/latest/) package attribute sets are versioned: `rocmPackages` -> `rocmPackages_5`.
|
||||
|
||||
- If the user has a custom shell enabled via `users.users.${USERNAME}.shell = ${CUSTOMSHELL}`, the
|
||||
assertion will require them to also set `programs.${CUSTOMSHELL}.enable =
|
||||
true`. This is generally safe behavior, but for anyone needing to opt out from
|
||||
|
@ -92,8 +95,19 @@
|
|||
|
||||
- [tuxedo-rs](https://github.com/AaronErhardt/tuxedo-rs), Rust utilities for interacting with hardware from TUXEDO Computers.
|
||||
|
||||
- [audiobookshelf](https://github.com/advplyr/audiobookshelf/), a self-hosted audiobook and podcast server. Available as [services.audiobookshelf](#opt-services.audiobookshelf.enable).
|
||||
|
||||
- [ZITADEL](https://zitadel.com), a turnkey identity and access management platform. Available as [services.zitadel](#opt-services.zitadel.enable).
|
||||
|
||||
- [netclient](https://github.com/gravitl/netclient), an automated WireGuard® Management Client. Available as [services.netclient](#opt-services.netclient.enable).
|
||||
|
||||
- [trunk-ng](https://github.com/ctron/trunk), A fork of `trunk`: Build, bundle & ship your Rust WASM application to the web
|
||||
|
||||
## Backward Incompatibilities {#sec-release-23.11-incompatibilities}
|
||||
|
||||
- `network-online.target` has been fixed to no longer time out for systems with `networking.useDHCP = true` and `networking.useNetworkd = true`.
|
||||
Workarounds for this can be removed.
|
||||
|
||||
- The `boot.loader.raspberryPi` options have been marked deprecated, with intent for removal for NixOS 24.11. They had a limited use-case, and do not work like people expect. They required either very old installs ([before mid-2019](https://github.com/NixOS/nixpkgs/pull/62462)) or customized builds out of scope of the standard and generic AArch64 support. That option set never supported the Raspberry Pi 4 family of devices.
|
||||
|
||||
- `python3.pkgs.sequoia` was removed in favor of `python3.pkgs.pysequoia`. The latter package is based on upstream's dedicated repository for sequoia's Python bindings, where the Python bindings from [gitlab:sequoia-pgp/sequoia](https://gitlab.com/sequoia-pgp/sequoia) were removed long ago.
|
||||
|
@ -112,7 +126,7 @@
|
|||
|
||||
- `pass` now does not contain `password-store.el`. Users should get `password-store.el` from Emacs lisp package set `emacs.pkgs.password-store`.
|
||||
|
||||
- `services.knot` now supports `.settings` from RFC42. The change is not 100% compatible with the previous `.extraConfig`.
|
||||
- `services.knot` now supports `.settings` from RFC42. The previous `.extraConfig` still works the same, but it displays a warning now.
|
||||
|
||||
- `mu` now does not install `mu4e` files by default. Users should get `mu4e` from Emacs lisp package set `emacs.pkgs.mu4e`.
|
||||
|
||||
|
@ -124,6 +138,9 @@
|
|||
|
||||
- The `services.ananicy.extraRules` option now has the type of `listOf attrs` instead of `string`.
|
||||
|
||||
- `buildVimPluginFrom2Nix` has been renamed to `buildVimPlugin`, which now
|
||||
now skips `configurePhase` and `buildPhase`
|
||||
|
||||
- JACK tools (`jack_*` except `jack_control`) have moved from the `jack2` package to `jack-example-tools`
|
||||
|
||||
- The `matrix-synapse` package & module have undergone some significant internal changes, for most setups no intervention is needed, though:
|
||||
|
@ -133,12 +150,25 @@
|
|||
- A list of all extras (and the extras enabled by default) can be found at the [option's reference for `services.matrix-synapse.extras`](#opt-services.matrix-synapse.extras).
|
||||
- In some cases (e.g. for running synapse workers) it was necessary to re-use the `PYTHONPATH` of `matrix-synapse.service`'s environment to have all plugins available. This isn't necessary anymore, instead `config.services.matrix-synapse.package` can be used as it points to the wrapper with properly configured `extras` and also all plugins defined via [`services.matrix-synapse.plugins`](#opt-services.matrix-synapse.plugins) available. This is also the reason for why the option is read-only now, it's supposed to be set by the module only.
|
||||
|
||||
- `netbox` was updated to 3.6. NixOS' `services.netbox.package` still defaults to 3.5 if `stateVersion` is earlier than 23.11. Please review upstream's breaking changes [for 3.6.0](https://github.com/netbox-community/netbox/releases/tag/v3.6.0) and upgrade NetBox by changing `services.netbox.package`. Database migrations will be run automatically.
|
||||
|
||||
- `etcd` has been updated to 3.5, you will want to read the [3.3 to 3.4](https://etcd.io/docs/v3.5/upgrades/upgrade_3_4/) and [3.4 to 3.5](https://etcd.io/docs/v3.5/upgrades/upgrade_3_5/) upgrade guides
|
||||
|
||||
- `gitlab` installations created or updated between versions \[15.11.0, 15.11.2] have an incorrect database schema. This will become a problem when upgrading to `gitlab` >=16.2.0. A workaround for affected users can be found in the [GitLab docs](https://docs.gitlab.com/ee/update/versions/gitlab_16_changes.html#undefined-column-error-upgrading-to-162-or-later).
|
||||
|
||||
- `consul` has been updated to `1.16.0`. See the [release note](https://github.com/hashicorp/consul/releases/tag/v1.16.0) for more details. Once a new Consul version has started and upgraded its data directory, it generally cannot be downgraded to the previous version.
|
||||
|
||||
- `llvmPackages_rocm` has been moved to `rocmPackages.llvm`.
|
||||
|
||||
- `hip`, `rocm-opencl-runtime`, `rocm-opencl-icd`, and `rocclr` have been combined into `rocmPackages.clr`.
|
||||
|
||||
- `clang-ocl`, `clr`, `composable_kernel`, `hipblas`, `hipcc`, `hip-common`, `hipcub`,
|
||||
`hipfft`, `hipfort`, `hipify`, `hipsolver`, `hipsparse`, `migraphx`, `miopen`, `miopengemm`,
|
||||
`rccl`, `rdc`, `rocalution`, `rocblas`, `rocdgbapi`, `rocfft`, `rocgdb`, `rocm-cmake`,
|
||||
`rocm-comgr`, `rocm-core`, `rocm-device-libs`, `rocminfo`, `rocmlir`, `rocm-runtime`,
|
||||
`rocm-smi`, `rocm-thunk`, `rocprim`, `rocprofiler`, `rocrand`, `rocr-debug-agent`,
|
||||
`rocsolver`, `rocsparse`, `rocthrust`, `roctracer`, `rocwmma`, and `tensile` have been moved to `rocmPackages`.
|
||||
|
||||
- `himalaya` has been updated to `0.8.0`, which drops the native TLS support (in favor of Rustls) and add OAuth 2.0 support. See the [release note](https://github.com/soywod/himalaya/releases/tag/v0.8.0) for more details.
|
||||
|
||||
- `nix-prefetch-git` now ignores global and user git config, to improve reproducibility.
|
||||
|
@ -165,6 +195,10 @@
|
|||
|
||||
- PHP now defaults to PHP 8.2, updated from 8.1.
|
||||
|
||||
- GraalVM has been updated to the latest version, and this brings significant changes. Upstream don't release multiple versions targeting different JVMs anymore, so now we only have one GraalVM derivation (`graalvm-ce`). While at first glance the version may seem a downgrade (22.3.1 -> 21.0.0), the major version is now following the JVM it targets (so this latest version targets JVM 21). Also some products like `llvm-installable-svm` and `native-image-svm` were incorporate to the main GraalVM derivation, so they're included by default.
|
||||
|
||||
- GraalPy (`graalCEPackages.graalpy`), TruffleRuby (`graalCEPackages.truffleruby`), GraalJS (`graalCEPackages.graaljs`) and GraalNodeJS (`grallCEPackages.graalnodejs`) are now indepedent from the main GraalVM derivation.
|
||||
|
||||
- The ISC DHCP package and corresponding module have been removed, because they are end of life upstream. See https://www.isc.org/blogs/isc-dhcp-eol/ for details and switch to a different DHCP implementation like kea or dnsmasq.
|
||||
|
||||
- `prometheus-unbound-exporter` has been replaced by the Let's Encrypt maintained version, since the previous version was archived. This requires some changes to the module configuration, most notable `controlInterface` needs migration
|
||||
|
@ -172,6 +206,8 @@
|
|||
|
||||
- `odoo` now defaults to 16, updated from 15.
|
||||
|
||||
- `varnish` was upgraded from 7.2.x to 7.4.x, see https://varnish-cache.org/docs/7.3/whats-new/upgrading-7.3.html and https://varnish-cache.org/docs/7.4/whats-new/upgrading-7.4.html for upgrade notes. The current LTS version is still offered as `varnish60`.
|
||||
|
||||
- `util-linux` is now supported on Darwin and is no longer an alias to `unixtools`. Use the `unixtools.util-linux` package for access to the Apple variants of the utilities.
|
||||
|
||||
- `services.keyd` changed API. Now you can create multiple configuration files.
|
||||
|
@ -186,6 +222,8 @@
|
|||
|
||||
- `fileSystems.<name>.autoResize` now uses `systemd-growfs` to resize the file system online in stage 2. This means that `f2fs` and `ext2` can no longer be auto resized, while `xfs` and `btrfs` now can be.
|
||||
|
||||
- `nixos-rebuild {switch,boot,test,dry-activate}` now runs the system activation inside `systemd-run`, creating an ephemeral systemd service and protecting the system switch against issues like network disconnections during remote (e.g. SSH) sessions. This has the side effect of running the switch in an isolated environment, that could possible break post-switch scripts that depends on things like environment variables being set. If you want to opt-out from this behavior for now, you may set the `NIXOS_SWITCH_USE_DIRTY_ENV` environment variable before running `nixos-rebuild`. However, keep in mind that this option will be removed in the future.
|
||||
|
||||
- The `services.vaultwarden.config` option default value was changed to make Vaultwarden only listen on localhost, following the [secure defaults for most NixOS services](https://github.com/NixOS/nixpkgs/issues/100192).
|
||||
|
||||
- `services.lemmy.settings.federation` was removed in 0.17.0 and no longer has any effect. To enable federation, the hostname must be set in the configuration file and then federation must be enabled in the admin web UI. See the [release notes](https://github.com/LemmyNet/lemmy/blob/c32585b03429f0f76d1e4ff738786321a0a9df98/RELEASES.md#upgrade-instructions) for more details.
|
||||
|
@ -196,6 +234,10 @@
|
|||
|
||||
- `spamassassin` no longer supports the `Hashcash` module. The module needs to be removed from the `loadplugin` list if it was copied over from the default `initPreConf` option.
|
||||
|
||||
- `nano` was removed from `environment.defaultPackages`. To not leave systems without a editor, now `programs.nano.enable` is enabled by default.
|
||||
|
||||
- `programs.nano.nanorc` and `programs.nano.syntaxHighlight` no longer have an effect unless `programs.nano.enable` is set to true which is the default.
|
||||
|
||||
- `services.outline.sequelizeArguments` has been removed, as `outline` no longer executes database migrations via the `sequelize` cli.
|
||||
|
||||
- The binary of the package `cloud-sql-proxy` has changed from `cloud_sql_proxy` to `cloud-sql-proxy`.
|
||||
|
@ -210,7 +252,7 @@
|
|||
|
||||
- The `aseprite-unfree` package has been upgraded from 1.2.16.3 to 1.2.40. The free version of aseprite has been dropped because it is EOL and the package attribute now points to the unfree version. A maintained fork of the last free version of Aseprite, named 'LibreSprite', is available in the `libresprite` package.
|
||||
|
||||
- The default `kops` version is now 1.27.0 and support for 1.24 and older has been dropped.
|
||||
- The default `kops` version is now 1.28.0 and support for 1.25 and older has been dropped.
|
||||
|
||||
- `pharo` has been updated to latest stable (PharoVM 10.0.5), which is compatible with the latest stable and oldstable images (Pharo 10 and 11). The VM in question is the 64bit Spur. The 32bit version has been dropped due to lack of maintenance. The Cog VM has been deleted because it is severily outdated. Finally, the `pharo-launcher` package has been deleted because it was not compatible with the newer VM, and due to lack of maintenance.
|
||||
|
||||
|
@ -236,12 +278,23 @@
|
|||
order, or relying on `mkBefore` and `mkAfter`, but may impact users calling
|
||||
`mkOrder n` with n ≤ 400.
|
||||
|
||||
- X keyboard extension (XKB) options have been reorganized into a single attribute set, `services.xserver.xkb`. Specifically, `services.xserver.layout` is now `services.xserver.xkb.layout`, `services.xserver.xkbModel` is now `services.xserver.xkb.model`, `services.xserver.xkbOptions` is now `services.xserver.xkb.options`, `services.xserver.xkbVariant` is now `services.xserver.xkb.variant`, and `services.xserver.xkbDir` is now `services.xserver.xkb.dir`.
|
||||
|
||||
- `networking.networkmanager.firewallBackend` was removed as NixOS is now using iptables-nftables-compat even when using iptables, therefore Networkmanager now uses the nftables backend unconditionally.
|
||||
|
||||
- [`lib.lists.foldl'`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.foldl-prime) now always evaluates the initial accumulator argument first.
|
||||
If you depend on the lazier behavior, consider using [`lib.lists.foldl`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.foldl) or [`builtins.foldl'`](https://nixos.org/manual/nix/stable/language/builtins.html#builtins-foldl') instead.
|
||||
|
||||
- [`lib.attrsets.foldlAttrs`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.attrsets.foldlAttrs) now always evaluates the initial accumulator argument first.
|
||||
|
||||
- `rome` was removed because it is no longer maintained and is succeeded by `biome`.
|
||||
|
||||
- The `services.mtr-exporter.target` has been removed in favor of `services.mtr-exporter.jobs` which allows specifying multiple targets.
|
||||
|
||||
- Setting `nixpkgs.config` options while providing an external `pkgs` instance will now raise an error instead of silently ignoring the options. NixOS modules no longer set `nixpkgs.config` to accomodate this. This specifically affects `services.locate`, `services.xserver.displayManager.lightdm.greeters.tiny` and `programs.firefox` NixOS modules. No manual intervention should be required in most cases, however, configurations relying on those modules affecting packages outside the system environment should switch to explicit overlays.
|
||||
|
||||
- `service.borgmatic.settings.location` and `services.borgmatic.configurations.<name>.location` are deprecated, please move your options out of sections to the global scope.
|
||||
|
||||
## Other Notable Changes {#sec-release-23.11-notable-changes}
|
||||
|
||||
- The Cinnamon module now enables XDG desktop integration by default. If you are experiencing collisions related to xdg-desktop-portal-gtk you can safely remove `xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];` from your NixOS configuration.
|
||||
|
@ -276,6 +329,8 @@
|
|||
|
||||
- The `fonts.fonts` and `fonts.enableDefaultFonts` options have been renamed to `fonts.packages` and `fonts.enableDefaultPackages` respectively.
|
||||
|
||||
- `pkgs.openvpn3` now optionally supports systemd-resolved. `programs.openvpn3` will automatically enable systemd-resolved support if `config.services.resolved.enable` is enabled.
|
||||
|
||||
- `services.fail2ban.jails` can now be configured with attribute sets defining settings and filters instead of lines. The stringed options `daemonConfig` and `extraSettings` have respectively been replaced by `daemonSettings` and `jails.DEFAULT.settings` which use attribute sets.
|
||||
|
||||
- The application firewall `opensnitch` now uses the process monitor method eBPF as default as recommended by upstream. The method can be changed with the setting [services.opensnitch.settings.ProcMonitorMethod](#opt-services.opensnitch.settings.ProcMonitorMethod).
|
||||
|
@ -288,11 +343,11 @@ The module update takes care of the new config syntax and the data itself (user
|
|||
|
||||
- `services.nginx` gained a `defaultListen` option at server-level with support for PROXY protocol listeners, also `proxyProtocol` is now exposed in `services.nginx.virtualHosts.<name>.listen` option. It is now possible to run PROXY listeners and non-PROXY listeners at a server-level, see [#213510](https://github.com/NixOS/nixpkgs/pull/213510/) for more details.
|
||||
|
||||
- `services.restic.backups` now adds wrapper scripts to your system path, which set the same environment variables as the service, so restic operations can easly be run from the command line. This behavior can be disabled by setting `createWrapper` to `false`, per backup configuration.
|
||||
- `services.restic.backups` now adds wrapper scripts to your system path, which set the same environment variables as the service, so restic operations can easily be run from the command line. This behavior can be disabled by setting `createWrapper` to `false`, per backup configuration.
|
||||
|
||||
- `services.prometheus.exporters` has a new exporter to monitor electrical power consumption based on PowercapRAPL sensor called [Scaphandre](https://github.com/hubblo-org/scaphandre), see [#239803](https://github.com/NixOS/nixpkgs/pull/239803) for more details.
|
||||
|
||||
- The MariaDB C client library was upgraded from 3.2.x to 3.3.x. It is recomended to review the [upstream release notes](https://mariadb.com/kb/en/mariadb-connector-c-33-release-notes/).
|
||||
- The MariaDB C client library was upgraded from 3.2.x to 3.3.x. It is recommended to review the [upstream release notes](https://mariadb.com/kb/en/mariadb-connector-c-33-release-notes/).
|
||||
|
||||
- The module `services.calibre-server` has new options to configure the `host`, `port`, `auth.enable`, `auth.mode` and `auth.userDb` path, see [#216497](https://github.com/NixOS/nixpkgs/pull/216497/) for more details.
|
||||
|
||||
|
@ -307,6 +362,8 @@ The module update takes care of the new config syntax and the data itself (user
|
|||
|
||||
- `programs.gnupg.agent.pinentryFlavor` is now set in `/etc/gnupg/gpg-agent.conf`, and will no longer take precedence over a `pinentry-program` set in `~/.gnupg/gpg-agent.conf`.
|
||||
|
||||
- `programs.gnupg` now has the option `agent.settings` to set verbatim config values in `/etc/gnupg/gpg-agent.conf`.
|
||||
|
||||
- `dockerTools.buildImage`, `dockerTools.buildLayeredImage` and `dockerTools.streamLayeredImage` now use `lib.makeOverridable` to allow `dockerTools`-based images to be customized more efficiently at the nix-level.
|
||||
|
||||
- `services.influxdb2` now supports doing an automatic initial setup and provisioning of users, organizations, buckets and authentication tokens, see [#249502](https://github.com/NixOS/nixpkgs/pull/249502) for more details.
|
||||
|
@ -339,6 +396,25 @@ The module update takes care of the new config syntax and the data itself (user
|
|||
- `keepTerminfo` controls whether `TERMINFO` and `TERMINFO_DIRS` are preserved
|
||||
for `root` and the `wheel` group.
|
||||
|
||||
- CoreDNS can now be built with external plugins by overriding `externalPlugins` and `vendorHash` arguments like this:
|
||||
|
||||
```
|
||||
services.coredns = {
|
||||
enable = true;
|
||||
package = pkgs.coredns.override {
|
||||
externalPlugins = [
|
||||
{name = "fanout"; repo = "github.com/networkservicemesh/fanout"; version = "v1.9.1";}
|
||||
];
|
||||
vendorHash = "<SRI hash>";
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
To get the necessary SRI hash, set `vendorHash = "";`. The build will fail and produce the correct `vendorHash` in the error message.
|
||||
|
||||
If you use this feature, updates to CoreDNS may require updating `vendorHash` by following these steps again.
|
||||
|
||||
- `fusuma` now enables the following plugins: [appmatcher](https://github.com/iberianpig/fusuma-plugin-appmatcher), [keypress](https://github.com/iberianpig/fusuma-plugin-keypress), [sendkey](https://github.com/iberianpig/fusuma-plugin-sendkey), [tap](https://github.com/iberianpig/fusuma-plugin-tap) and [wmctrl](https://github.com/iberianpig/fusuma-plugin-wmctrl).
|
||||
|
||||
## Nixpkgs internals {#sec-release-23.11-nixpkgs-internals}
|
||||
|
||||
|
@ -358,7 +434,7 @@ The module update takes care of the new config syntax and the data itself (user
|
|||
|
||||
- The `qemu-vm.nix` module by default now identifies block devices via
|
||||
persistent names available in `/dev/disk/by-*`. Because the rootDevice is
|
||||
identfied by its filesystem label, it needs to be formatted before the VM is
|
||||
identified by its filesystem label, it needs to be formatted before the VM is
|
||||
started. The functionality of automatically formatting the rootDevice in the
|
||||
initrd is removed from the QEMU module. However, for tests that depend on
|
||||
this functionality, a test utility for the scripted initrd is added
|
||||
|
@ -370,4 +446,14 @@ The module update takes care of the new config syntax and the data itself (user
|
|||
|
||||
- `python3.pkgs.flitBuildHook` has been removed. Use `flit-core` and `format = "pyproject"` instead.
|
||||
|
||||
- The `extend` function of `llvmPackages` has been removed due it coming from the `tools` attrset thus only extending the `tool` attrset. A possible replacement is to construct the set from `libraries` and `tools`, or patch nixpkgs.
|
||||
|
||||
- The `qemu-vm.nix` module now supports disabling overriding `fileSystems` with
|
||||
`virtualisation.fileSystems`. This enables the user to boot VMs from
|
||||
"external" disk images not created by the qemu-vm module. You can stop the
|
||||
qemu-vm module from overriding `fileSystems` by setting
|
||||
`virtualisation.fileSystems = lib.mkForce { };`.
|
||||
|
||||
- The `electron` packages now places its application files in `$out/libexec/electron` instead of `$out/lib/electron`. Packages using electron-builder will fail to build and need to be adjusted by changing `lib` to `libexec`.
|
||||
|
||||
- `teleport` has been upgraded from major version 12 to major version 14. Please see upstream [upgrade instructions](https://goteleport.com/docs/management/operations/upgrading/) and release notes for versions [13](https://goteleport.com/docs/changelog/#1300-050823) and [14](https://goteleport.com/docs/changelog/#1400-092023). Note that Teleport does not officially support upgrades across more than one major version at a time. If you're running Teleport server components, it is recommended to first upgrade to an intermediate 13.x version by setting `services.teleport.package = pkgs.teleport_13`. Afterwards, this option can be removed to upgrade to the default version (14).
|
||||
|
|
|
@ -34,9 +34,6 @@ evalConfigArgs@
|
|||
in lib.optional (e != "") (import e)
|
||||
}:
|
||||
|
||||
let pkgs_ = pkgs;
|
||||
in
|
||||
|
||||
let
|
||||
inherit (lib) optional;
|
||||
|
||||
|
@ -58,8 +55,9 @@ let
|
|||
nixpkgs.system = lib.mkDefault system;
|
||||
})
|
||||
++
|
||||
(optional (pkgs_ != null) {
|
||||
_module.args.pkgs = lib.mkForce pkgs_;
|
||||
(optional (pkgs != null) {
|
||||
# This should be default priority, so it conflicts with any user-defined pkgs.
|
||||
nixpkgs.pkgs = pkgs;
|
||||
})
|
||||
);
|
||||
};
|
||||
|
@ -109,10 +107,10 @@ let
|
|||
|
||||
nixosWithUserModules = noUserModules.extendModules { modules = allUserModules; };
|
||||
|
||||
withExtraArgs = nixosSystem: nixosSystem // {
|
||||
withExtraAttrs = configuration: configuration // {
|
||||
inherit extraArgs;
|
||||
inherit (nixosSystem._module.args) pkgs;
|
||||
extendModules = args: withExtraArgs (nixosSystem.extendModules args);
|
||||
inherit (configuration._module.args) pkgs;
|
||||
extendModules = args: withExtraAttrs (configuration.extendModules args);
|
||||
};
|
||||
in
|
||||
withWarnings (withExtraArgs nixosWithUserModules)
|
||||
withWarnings (withExtraAttrs nixosWithUserModules)
|
||||
|
|
|
@ -80,6 +80,10 @@ in rec {
|
|||
optional (attr ? ${name} && !elem attr.${name} values)
|
||||
"Systemd ${group} field `${name}' cannot have value `${toString attr.${name}}'.";
|
||||
|
||||
assertValuesSomeOfOr = name: values: default: group: attr:
|
||||
optional (attr ? ${name} && !(all (x: elem x values) (splitString " " attr.${name}) || attr.${name} == default))
|
||||
"Systemd ${group} field `${name}' cannot have value `${toString attr.${name}}'.";
|
||||
|
||||
assertHasField = name: group: attr:
|
||||
optional (!(attr ? ${name}))
|
||||
"Systemd ${group} field `${name}' must exist.";
|
||||
|
|
|
@ -4,19 +4,20 @@
|
|||
, qemu_pkg ? qemu_test
|
||||
, coreutils
|
||||
, imagemagick_light
|
||||
, libtiff
|
||||
, netpbm
|
||||
, qemu_test
|
||||
, socat
|
||||
, ruff
|
||||
, tesseract4
|
||||
, vde2
|
||||
, extraPythonPackages ? (_ : [])
|
||||
}:
|
||||
|
||||
python3Packages.buildPythonApplication rec {
|
||||
python3Packages.buildPythonApplication {
|
||||
pname = "nixos-test-driver";
|
||||
version = "1.1";
|
||||
src = ./.;
|
||||
format = "pyproject";
|
||||
|
||||
propagatedBuildInputs = [
|
||||
coreutils
|
||||
|
@ -31,14 +32,13 @@ python3Packages.buildPythonApplication rec {
|
|||
++ extraPythonPackages python3Packages;
|
||||
|
||||
doCheck = true;
|
||||
nativeCheckInputs = with python3Packages; [ mypy pylint black ];
|
||||
nativeCheckInputs = with python3Packages; [ mypy ruff black ];
|
||||
checkPhase = ''
|
||||
mypy --disallow-untyped-defs \
|
||||
--no-implicit-optional \
|
||||
--pretty \
|
||||
--no-color-output \
|
||||
--ignore-missing-imports ${src}/test_driver
|
||||
pylint --errors-only --enable=unused-import ${src}/test_driver
|
||||
black --check --diff ${src}/test_driver
|
||||
echo -e "\x1b[32m## run mypy\x1b[0m"
|
||||
mypy test_driver extract-docstrings.py
|
||||
echo -e "\x1b[32m## run ruff\x1b[0m"
|
||||
ruff .
|
||||
echo -e "\x1b[32m## run black\x1b[0m"
|
||||
black --check --diff .
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import ast
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
"""
|
||||
This program takes all the Machine class methods and prints its methods in
|
||||
|
@ -40,10 +41,13 @@ some_function(param1, param2)
|
|||
|
||||
"""
|
||||
|
||||
assert len(sys.argv) == 2
|
||||
|
||||
with open(sys.argv[1], "r") as f:
|
||||
module = ast.parse(f.read())
|
||||
def main() -> None:
|
||||
if len(sys.argv) != 2:
|
||||
print(f"Usage: {sys.argv[0]} <path-to-test-driver>")
|
||||
sys.exit(1)
|
||||
|
||||
module = ast.parse(Path(sys.argv[1]).read_text())
|
||||
|
||||
class_definitions = (node for node in module.body if isinstance(node, ast.ClassDef))
|
||||
|
||||
|
@ -55,12 +59,16 @@ function_definitions = [
|
|||
]
|
||||
function_definitions.sort(key=lambda x: x.name)
|
||||
|
||||
for f in function_definitions:
|
||||
docstr = ast.get_docstring(f)
|
||||
for function in function_definitions:
|
||||
docstr = ast.get_docstring(function)
|
||||
if docstr is not None:
|
||||
args = ", ".join((a.arg for a in f.args.args[1:]))
|
||||
args = ", ".join(a.arg for a in function.args.args[1:])
|
||||
args = f"({args})"
|
||||
|
||||
docstr = "\n".join((f" {l}" for l in docstr.strip().splitlines()))
|
||||
docstr = "\n".join(f" {line}" for line in docstr.strip().splitlines())
|
||||
|
||||
print(f"{f.name}{args}\n\n:{docstr[1:]}\n")
|
||||
print(f"{function.name}{args}\n\n:{docstr[1:]}\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
44
nixos/lib/test-driver/pyproject.toml
Normal file
44
nixos/lib/test-driver/pyproject.toml
Normal file
|
@ -0,0 +1,44 @@
|
|||
[build-system]
|
||||
requires = ["setuptools"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "nixos-test-driver"
|
||||
version = "0.0.0"
|
||||
|
||||
[project.scripts]
|
||||
nixos-test-driver = "test_driver:main"
|
||||
generate-driver-symbols = "test_driver:generate_driver_symbols"
|
||||
|
||||
[tool.setuptools.packages]
|
||||
find = {}
|
||||
|
||||
[tool.setuptools.package-data]
|
||||
test_driver = ["py.typed"]
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 88
|
||||
|
||||
select = ["E", "F", "I", "U", "N"]
|
||||
ignore = ["E501"]
|
||||
|
||||
# xxx: we can import https://pypi.org/project/types-colorama/ here
|
||||
[[tool.mypy.overrides]]
|
||||
module = "colorama.*"
|
||||
ignore_missing_imports = true
|
||||
|
||||
[[tool.mypy.overrides]]
|
||||
module = "ptpython.*"
|
||||
ignore_missing_imports = true
|
||||
|
||||
[tool.black]
|
||||
line-length = 88
|
||||
target-version = ['py39']
|
||||
include = '\.pyi?$'
|
||||
|
||||
[tool.mypy]
|
||||
python_version = "3.10"
|
||||
warn_redundant_casts = true
|
||||
disallow_untyped_calls = true
|
||||
disallow_untyped_defs = true
|
||||
no_implicit_optional = true
|
|
@ -1,14 +0,0 @@
|
|||
from setuptools import setup, find_packages
|
||||
|
||||
setup(
|
||||
name="nixos-test-driver",
|
||||
version='1.1',
|
||||
packages=find_packages(),
|
||||
package_data={"test_driver": ["py.typed"]},
|
||||
entry_points={
|
||||
"console_scripts": [
|
||||
"nixos-test-driver=test_driver:main",
|
||||
"generate-driver-symbols=test_driver:generate_driver_symbols"
|
||||
]
|
||||
},
|
||||
)
|
2
nixos/lib/test-driver/shell.nix
Normal file
2
nixos/lib/test-driver/shell.nix
Normal file
|
@ -0,0 +1,2 @@
|
|||
with import ../../.. {};
|
||||
pkgs.callPackage ./default.nix {}
|
|
@ -1,11 +1,12 @@
|
|||
from pathlib import Path
|
||||
import argparse
|
||||
import ptpython.repl
|
||||
import os
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import ptpython.repl
|
||||
|
||||
from test_driver.logger import rootlog
|
||||
from test_driver.driver import Driver
|
||||
from test_driver.logger import rootlog
|
||||
|
||||
|
||||
class EnvDefault(argparse.Action):
|
||||
|
@ -25,9 +26,7 @@ class EnvDefault(argparse.Action):
|
|||
)
|
||||
if required and default:
|
||||
required = False
|
||||
super(EnvDefault, self).__init__(
|
||||
default=default, required=required, nargs=nargs, **kwargs
|
||||
)
|
||||
super().__init__(default=default, required=required, nargs=nargs, **kwargs)
|
||||
|
||||
def __call__(self, parser, namespace, values, option_string=None): # type: ignore
|
||||
setattr(namespace, self.dest, values)
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Iterator, List, Union, Optional, Callable, ContextManager
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, ContextManager, Dict, Iterator, List, Optional, Union
|
||||
|
||||
from test_driver.logger import rootlog
|
||||
from test_driver.machine import Machine, NixStartScript, retry
|
||||
from test_driver.vlan import VLan
|
||||
from test_driver.polling_condition import PollingCondition
|
||||
from test_driver.vlan import VLan
|
||||
|
||||
|
||||
def get_tmp_dir() -> Path:
|
||||
|
|
|
@ -1,13 +1,17 @@
|
|||
from colorama import Style, Fore
|
||||
from contextlib import contextmanager
|
||||
from typing import Any, Dict, Iterator
|
||||
from queue import Queue, Empty
|
||||
from xml.sax.saxutils import XMLGenerator
|
||||
# mypy: disable-error-code="no-untyped-call"
|
||||
# drop the above line when mypy is upgraded to include
|
||||
# https://github.com/python/typeshed/commit/49b717ca52bf0781a538b04c0d76a5513f7119b8
|
||||
import codecs
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import unicodedata
|
||||
from contextlib import contextmanager
|
||||
from queue import Empty, Queue
|
||||
from typing import Any, Dict, Iterator
|
||||
from xml.sax.saxutils import XMLGenerator
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
|
||||
class Logger:
|
||||
|
|
|
@ -1,7 +1,3 @@
|
|||
from contextlib import _GeneratorContextManager, nullcontext
|
||||
from pathlib import Path
|
||||
from queue import Queue
|
||||
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
|
||||
import base64
|
||||
import io
|
||||
import os
|
||||
|
@ -16,6 +12,10 @@ import sys
|
|||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
from contextlib import _GeneratorContextManager, nullcontext
|
||||
from pathlib import Path
|
||||
from queue import Queue
|
||||
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
|
||||
|
||||
from test_driver.logger import rootlog
|
||||
|
||||
|
@ -236,14 +236,14 @@ class LegacyStartCommand(StartCommand):
|
|||
|
||||
def __init__(
|
||||
self,
|
||||
netBackendArgs: Optional[str] = None,
|
||||
netFrontendArgs: Optional[str] = None,
|
||||
netBackendArgs: Optional[str] = None, # noqa: N803
|
||||
netFrontendArgs: Optional[str] = None, # noqa: N803
|
||||
hda: Optional[Tuple[Path, str]] = None,
|
||||
cdrom: Optional[str] = None,
|
||||
usb: Optional[str] = None,
|
||||
bios: Optional[str] = None,
|
||||
qemuBinary: Optional[str] = None,
|
||||
qemuFlags: Optional[str] = None,
|
||||
qemuBinary: Optional[str] = None, # noqa: N803
|
||||
qemuFlags: Optional[str] = None, # noqa: N803
|
||||
):
|
||||
if qemuBinary is not None:
|
||||
self._cmd = qemuBinary
|
||||
|
@ -599,7 +599,7 @@ class Machine:
|
|||
return (-1, output.decode())
|
||||
|
||||
# Get the return code
|
||||
self.shell.send("echo ${PIPESTATUS[0]}\n".encode())
|
||||
self.shell.send(b"echo ${PIPESTATUS[0]}\n")
|
||||
rc = int(self._next_newline_closed_block_from_shell().strip())
|
||||
|
||||
return (rc, output.decode(errors="replace"))
|
||||
|
@ -843,6 +843,9 @@ class Machine:
|
|||
|
||||
while True:
|
||||
chunk = self.shell.recv(1024)
|
||||
# No need to print empty strings, it means we are waiting.
|
||||
if len(chunk) == 0:
|
||||
continue
|
||||
self.log(f"Guest shell says: {chunk!r}")
|
||||
# NOTE: for this to work, nothing must be printed after this line!
|
||||
if b"Spawning backdoor root shell..." in chunk:
|
||||
|
@ -1129,7 +1132,7 @@ class Machine:
|
|||
return
|
||||
|
||||
assert self.shell
|
||||
self.shell.send("poweroff\n".encode())
|
||||
self.shell.send(b"poweroff\n")
|
||||
self.wait_for_shutdown()
|
||||
|
||||
def crash(self) -> None:
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
from typing import Callable, Optional
|
||||
from math import isfinite
|
||||
import time
|
||||
from math import isfinite
|
||||
from typing import Callable, Optional
|
||||
|
||||
from .logger import rootlog
|
||||
|
||||
|
||||
class PollingConditionFailed(Exception):
|
||||
class PollingConditionError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
|
@ -60,7 +60,7 @@ class PollingCondition:
|
|||
|
||||
def maybe_raise(self) -> None:
|
||||
if not self.check():
|
||||
raise PollingConditionFailed(self.status_message(False))
|
||||
raise PollingConditionError(self.status_message(False))
|
||||
|
||||
def status_message(self, status: bool) -> str:
|
||||
return f"Polling condition {'succeeded' if status else 'failed'}: {self.description}"
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
from pathlib import Path
|
||||
import io
|
||||
import os
|
||||
import pty
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
from test_driver.logger import rootlog
|
||||
|
||||
|
|
|
@ -28,15 +28,17 @@ let
|
|||
{
|
||||
virtualisation.qemu.package = testModuleArgs.config.qemu.package;
|
||||
})
|
||||
(optionalAttrs (!config.node.pkgsReadOnly) {
|
||||
({ options, ... }: {
|
||||
key = "nodes.nix-pkgs";
|
||||
config = {
|
||||
config = optionalAttrs (!config.node.pkgsReadOnly) (
|
||||
mkIf (!options.nixpkgs.pkgs.isDefined) {
|
||||
# Ensure we do not use aliases. Ideally this is only set
|
||||
# when the test framework is used by Nixpkgs NixOS tests.
|
||||
nixpkgs.config.allowAliases = false;
|
||||
# TODO: switch to nixpkgs.hostPlatform and make sure containers-imperative test still evaluates.
|
||||
nixpkgs.system = hostPkgs.stdenv.hostPlatform.system;
|
||||
};
|
||||
}
|
||||
);
|
||||
})
|
||||
testModuleArgs.config.extraBaseModules
|
||||
];
|
||||
|
|
|
@ -177,6 +177,7 @@ rec {
|
|||
genJqSecretsReplacementSnippet' = attr: set: output:
|
||||
let
|
||||
secrets = recursiveGetAttrWithJqPrefix set attr;
|
||||
stringOrDefault = str: def: if str == "" then def else str;
|
||||
in ''
|
||||
if [[ -h '${output}' ]]; then
|
||||
rm '${output}'
|
||||
|
@ -195,10 +196,12 @@ rec {
|
|||
(attrNames secrets))
|
||||
+ "\n"
|
||||
+ "${pkgs.jq}/bin/jq >'${output}' "
|
||||
+ lib.escapeShellArg (concatStringsSep
|
||||
+ lib.escapeShellArg (stringOrDefault
|
||||
(concatStringsSep
|
||||
" | "
|
||||
(imap1 (index: name: ''${name} = $ENV.secret${toString index}'')
|
||||
(attrNames secrets)))
|
||||
".")
|
||||
+ ''
|
||||
<<'EOF'
|
||||
${builtins.toJSON set}
|
||||
|
|
|
@ -127,8 +127,8 @@ in
|
|||
${optionalString (config.environment.sessionVariables ? XKB_CONFIG_ROOT)
|
||||
"-I${config.environment.sessionVariables.XKB_CONFIG_ROOT}"
|
||||
} \
|
||||
-model '${xkbModel}' -layout '${layout}' \
|
||||
-option '${xkbOptions}' -variant '${xkbVariant}' > "$out"
|
||||
-model '${xkb.model}' -layout '${xkb.layout}' \
|
||||
-option '${xkb.options}' -variant '${xkb.variant}' > "$out"
|
||||
'');
|
||||
}
|
||||
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
options = {
|
||||
gnu = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
When enabled, GNU software is chosen by default whenever a there is
|
||||
a choice between GNU and non-GNU software (e.g., GNU lsh
|
||||
vs. OpenSSH).
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.gnu {
|
||||
|
||||
environment.systemPackages = with pkgs;
|
||||
# TODO: Adjust `requiredPackages' from `system-path.nix'.
|
||||
# TODO: Add Inetutils once it has the new `ifconfig'.
|
||||
[ parted
|
||||
#fdisk # XXX: GNU fdisk currently fails to build and it's redundant
|
||||
# with the `parted' command.
|
||||
nano zile
|
||||
texinfo # for the stand-alone Info reader
|
||||
]
|
||||
++ lib.optional (!stdenv.isAarch32) grub2;
|
||||
|
||||
|
||||
# GNU GRUB, where available.
|
||||
boot.loader.grub.enable = !pkgs.stdenv.isAarch32;
|
||||
|
||||
# GNU lsh.
|
||||
services.openssh.enable = false;
|
||||
services.lshd.enable = true;
|
||||
programs.ssh.startAgent = false;
|
||||
services.xserver.startGnuPGAgent = true;
|
||||
|
||||
# TODO: GNU dico.
|
||||
# TODO: GNU Inetutils' inetd.
|
||||
# TODO: GNU Pies.
|
||||
};
|
||||
}
|
|
@ -42,8 +42,7 @@ let
|
|||
];
|
||||
|
||||
defaultPackageNames =
|
||||
[ "nano"
|
||||
"perl"
|
||||
[ "perl"
|
||||
"rsync"
|
||||
"strace"
|
||||
];
|
||||
|
|
|
@ -341,6 +341,20 @@ let
|
|||
administrator before being able to use the system again.
|
||||
'';
|
||||
};
|
||||
|
||||
linger = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
Whether to enable lingering for this user. If true, systemd user
|
||||
units will start at boot, rather than starting at login and stopping
|
||||
at logout. This is the declarative equivalent of running
|
||||
`loginctl enable-linger` for this user.
|
||||
|
||||
If false, user units will not be started until the user logs in, and
|
||||
may be stopped on logout depending on the settings in `logind.conf`.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkMerge
|
||||
|
@ -460,6 +474,8 @@ let
|
|||
gidsAreUnique = idsAreUnique (filterAttrs (n: g: g.gid != null) cfg.groups) "gid";
|
||||
sdInitrdUidsAreUnique = idsAreUnique (filterAttrs (n: u: u.uid != null) config.boot.initrd.systemd.users) "uid";
|
||||
sdInitrdGidsAreUnique = idsAreUnique (filterAttrs (n: g: g.gid != null) config.boot.initrd.systemd.groups) "gid";
|
||||
groupNames = lib.mapAttrsToList (n: g: g.name) cfg.groups;
|
||||
usersWithoutExistingGroup = lib.filterAttrs (n: u: !lib.elem u.group groupNames) cfg.users;
|
||||
|
||||
spec = pkgs.writeText "users-groups.json" (builtins.toJSON {
|
||||
inherit (cfg) mutableUsers;
|
||||
|
@ -672,6 +688,20 @@ in {
|
|||
'';
|
||||
};
|
||||
|
||||
system.activationScripts.update-lingering = let
|
||||
lingerDir = "/var/lib/systemd/linger";
|
||||
lingeringUsers = map (u: u.name) (attrValues (flip filterAttrs cfg.users (n: u: u.linger)));
|
||||
lingeringUsersFile = builtins.toFile "lingering-users"
|
||||
(concatStrings (map (s: "${s}\n")
|
||||
(sort (a: b: a < b) lingeringUsers))); # this sorting is important for `comm` to work correctly
|
||||
in stringAfter [ "users" ] ''
|
||||
if [ -e ${lingerDir} ] ; then
|
||||
cd ${lingerDir}
|
||||
ls ${lingerDir} | sort | comm -3 -1 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl disable-linger
|
||||
ls ${lingerDir} | sort | comm -3 -2 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl enable-linger
|
||||
fi
|
||||
'';
|
||||
|
||||
# Warn about user accounts with deprecated password hashing schemes
|
||||
system.activationScripts.hashes = {
|
||||
deps = [ "users" ];
|
||||
|
@ -711,7 +741,8 @@ in {
|
|||
|
||||
environment.profiles = [
|
||||
"$HOME/.nix-profile"
|
||||
"\${XDG_STATE_HOME:-$HOME/.local/state}/nix/profile"
|
||||
"\${XDG_STATE_HOME}/nix/profile"
|
||||
"$HOME/.local/state/nix/profile"
|
||||
"/etc/profiles/per-user/$USER"
|
||||
];
|
||||
|
||||
|
@ -761,6 +792,18 @@ in {
|
|||
{ assertion = !cfg.enforceIdUniqueness || (sdInitrdUidsAreUnique && sdInitrdGidsAreUnique);
|
||||
message = "systemd initrd UIDs and GIDs must be unique!";
|
||||
}
|
||||
{ assertion = usersWithoutExistingGroup == {};
|
||||
message =
|
||||
let
|
||||
errUsers = lib.attrNames usersWithoutExistingGroup;
|
||||
missingGroups = lib.unique (lib.mapAttrsToList (n: u: u.group) usersWithoutExistingGroup);
|
||||
mkConfigHint = group: "users.groups.${group} = {};";
|
||||
in ''
|
||||
The following users have a primary group that is undefined: ${lib.concatStringsSep " " errUsers}
|
||||
Hint: Add this to your NixOS configuration:
|
||||
${lib.concatStringsSep "\n " (map mkConfigHint missingGroups)}
|
||||
'';
|
||||
}
|
||||
{ # If mutableUsers is false, to prevent users creating a
|
||||
# configuration that locks them out of the system, ensure that
|
||||
# there is at least one "privileged" account that has a
|
||||
|
|
|
@ -69,7 +69,7 @@ in {
|
|||
})
|
||||
(mkIf cfg.enableAllFirmware {
|
||||
assertions = [{
|
||||
assertion = !cfg.enableAllFirmware || config.nixpkgs.config.allowUnfree;
|
||||
assertion = !cfg.enableAllFirmware || pkgs.config.allowUnfree;
|
||||
message = ''
|
||||
the list of hardware.enableAllFirmware contains non-redistributable licensed firmware files.
|
||||
This requires nixpkgs.config.allowUnfree to be true.
|
||||
|
|
|
@ -66,36 +66,32 @@ let
|
|||
};
|
||||
|
||||
filterDTBs = src: if cfg.filter == null
|
||||
then "${src}/dtbs"
|
||||
then src
|
||||
else
|
||||
pkgs.runCommand "dtbs-filtered" {} ''
|
||||
mkdir -p $out
|
||||
cd ${src}/dtbs
|
||||
cd ${src}
|
||||
find . -type f -name '${cfg.filter}' -print0 \
|
||||
| xargs -0 cp -v --no-preserve=mode --target-directory $out --parents
|
||||
'';
|
||||
|
||||
filteredDTBs = filterDTBs cfg.kernelPackage;
|
||||
|
||||
# Compile single Device Tree overlay source
|
||||
# file (.dts) into its compiled variant (.dtbo)
|
||||
compileDTS = name: f: pkgs.callPackage({ stdenv, dtc }: stdenv.mkDerivation {
|
||||
name = "${name}-dtbo";
|
||||
|
||||
nativeBuildInputs = [ dtc ];
|
||||
|
||||
buildCommand = ''
|
||||
$CC -E -nostdinc -I${getDev cfg.kernelPackage}/lib/modules/${cfg.kernelPackage.modDirVersion}/source/scripts/dtc/include-prefixes -undef -D__DTS__ -x assembler-with-cpp ${f} | \
|
||||
dtc -I dts -O dtb -@ -o $out
|
||||
'';
|
||||
}) {};
|
||||
filteredDTBs = filterDTBs cfg.dtbSource;
|
||||
|
||||
# Fill in `dtboFile` for each overlay if not set already.
|
||||
# Existence of one of these is guarded by assertion below
|
||||
withDTBOs = xs: flip map xs (o: o // { dtboFile =
|
||||
let
|
||||
includePaths = ["${getDev cfg.kernelPackage}/lib/modules/${cfg.kernelPackage.modDirVersion}/source/scripts/dtc/include-prefixes"] ++ cfg.dtboBuildExtraIncludePaths;
|
||||
extraPreprocessorFlags = cfg.dtboBuildExtraPreprocessorFlags;
|
||||
in
|
||||
if o.dtboFile == null then
|
||||
if o.dtsFile != null then compileDTS o.name o.dtsFile
|
||||
else compileDTS o.name (pkgs.writeText "dts" o.dtsText)
|
||||
let
|
||||
dtsFile = if o.dtsFile == null then (pkgs.writeText "dts" o.dtsText) else o.dtsFile;
|
||||
in
|
||||
pkgs.deviceTree.compileDTS {
|
||||
name = "${o.name}-dtbo";
|
||||
inherit includePaths extraPreprocessorFlags dtsFile;
|
||||
}
|
||||
else o.dtboFile; } );
|
||||
|
||||
in
|
||||
|
@ -121,7 +117,39 @@ in
|
|||
example = literalExpression "pkgs.linux_latest";
|
||||
type = types.path;
|
||||
description = lib.mdDoc ''
|
||||
Kernel package containing the base device-tree (.dtb) to boot. Uses
|
||||
Kernel package where device tree include directory is from. Also used as default source of dtb package to apply overlays to
|
||||
'';
|
||||
};
|
||||
|
||||
dtboBuildExtraPreprocessorFlags = mkOption {
|
||||
default = [];
|
||||
example = literalExpression "[ \"-DMY_DTB_DEFINE\" ]";
|
||||
type = types.listOf types.str;
|
||||
description = lib.mdDoc ''
|
||||
Additional flags to pass to the preprocessor during dtbo compilations
|
||||
'';
|
||||
};
|
||||
|
||||
dtboBuildExtraIncludePaths = mkOption {
|
||||
default = [];
|
||||
example = literalExpression ''
|
||||
[
|
||||
./my_custom_include_dir_1
|
||||
./custom_include_dir_2
|
||||
]
|
||||
'';
|
||||
type = types.listOf types.path;
|
||||
description = lib.mdDoc ''
|
||||
Additional include paths that will be passed to the preprocessor when creating the final .dts to compile into .dtbo
|
||||
'';
|
||||
};
|
||||
|
||||
dtbSource = mkOption {
|
||||
default = "${cfg.kernelPackage}/dtbs";
|
||||
defaultText = literalExpression "\${cfg.kernelPackage}/dtbs";
|
||||
type = types.path;
|
||||
description = lib.mdDoc ''
|
||||
Path to dtb directory that overlays and other processing will be applied to. Uses
|
||||
device trees bundled with the Linux kernel by default.
|
||||
'';
|
||||
};
|
||||
|
|
|
@ -20,9 +20,6 @@ in
|
|||
{
|
||||
|
||||
config = mkIf enabled {
|
||||
|
||||
nixpkgs.config.xorg.abiCompat = "1.20";
|
||||
|
||||
services.xserver.drivers = singleton
|
||||
{ name = "amdgpu"; modules = [ package ]; display = true; };
|
||||
|
||||
|
|
|
@ -163,15 +163,15 @@ in
|
|||
# console = {
|
||||
# font = "Lat2-Terminus16";
|
||||
# keyMap = "us";
|
||||
# useXkbConfig = true; # use xkbOptions in tty.
|
||||
# useXkbConfig = true; # use xkb.options in tty.
|
||||
# };
|
||||
|
||||
$xserverConfig
|
||||
|
||||
$desktopConfiguration
|
||||
# Configure keymap in X11
|
||||
# services.xserver.layout = "us";
|
||||
# services.xserver.xkbOptions = "eurosign:e,caps:escape";
|
||||
# services.xserver.xkb.layout = "us";
|
||||
# services.xserver.xkb.options = "eurosign:e,caps:escape";
|
||||
|
||||
# Enable CUPS to print documents.
|
||||
# services.printing.enable = true;
|
||||
|
|
|
@ -4,14 +4,15 @@ with lib;
|
|||
|
||||
let
|
||||
cfg = config.services.locate;
|
||||
isMLocate = hasPrefix "mlocate" cfg.locate.name;
|
||||
isPLocate = hasPrefix "plocate" cfg.locate.name;
|
||||
isMorPLocate = (isMLocate || isPLocate);
|
||||
isFindutils = hasPrefix "findutils" cfg.locate.name;
|
||||
isMLocate = hasPrefix "mlocate" cfg.package.name;
|
||||
isPLocate = hasPrefix "plocate" cfg.package.name;
|
||||
isMorPLocate = isMLocate || isPLocate;
|
||||
isFindutils = hasPrefix "findutils" cfg.package.name;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
(mkRenamedOptionModule [ "services" "locate" "period" ] [ "services" "locate" "interval" ])
|
||||
(mkRenamedOptionModule [ "services" "locate" "locate" ] [ "services" "locate" "package" ])
|
||||
(mkRemovedOptionModule [ "services" "locate" "includeStore" ] "Use services.locate.prunePaths")
|
||||
];
|
||||
|
||||
|
@ -25,10 +26,10 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
locate = mkOption {
|
||||
package = mkOption {
|
||||
type = package;
|
||||
default = pkgs.findutils.locate;
|
||||
defaultText = literalExpression "pkgs.findutils";
|
||||
defaultText = literalExpression "pkgs.findutils.locate";
|
||||
example = literalExpression "pkgs.mlocate";
|
||||
description = lib.mdDoc ''
|
||||
The locate implementation to use
|
||||
|
@ -216,25 +217,23 @@ in
|
|||
setgid = true;
|
||||
setuid = false;
|
||||
};
|
||||
mlocate = (mkIf isMLocate {
|
||||
mlocate = mkIf isMLocate {
|
||||
group = "mlocate";
|
||||
source = "${cfg.locate}/bin/locate";
|
||||
});
|
||||
plocate = (mkIf isPLocate {
|
||||
source = "${cfg.package}/bin/locate";
|
||||
};
|
||||
plocate = mkIf isPLocate {
|
||||
group = "plocate";
|
||||
source = "${cfg.locate}/bin/plocate";
|
||||
});
|
||||
source = "${cfg.package}/bin/plocate";
|
||||
};
|
||||
in
|
||||
mkIf isMorPLocate {
|
||||
locate = mkMerge [ common mlocate plocate ];
|
||||
plocate = (mkIf isPLocate (mkMerge [ common plocate ]));
|
||||
plocate = mkIf isPLocate (mkMerge [ common plocate ]);
|
||||
};
|
||||
|
||||
nixpkgs.config = { locate.dbfile = cfg.output; };
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
environment.systemPackages = [ cfg.locate ];
|
||||
|
||||
environment.variables = mkIf (!isMorPLocate) { LOCATE_PATH = cfg.output; };
|
||||
environment.variables.LOCATE_PATH = cfg.output;
|
||||
|
||||
environment.etc = {
|
||||
# write /etc/updatedb.conf for manual calls to `updatedb`
|
||||
|
@ -270,13 +269,13 @@ in
|
|||
args = concatLists (map toFlags [ "pruneFS" "pruneNames" "prunePaths" ]);
|
||||
in
|
||||
''
|
||||
exec ${cfg.locate}/bin/updatedb \
|
||||
exec ${cfg.package}/bin/updatedb \
|
||||
--output ${toString cfg.output} ${concatStringsSep " " args} \
|
||||
--prune-bind-mounts ${if cfg.pruneBindMounts then "yes" else "no"} \
|
||||
${concatStringsSep " " cfg.extraFlags}
|
||||
''
|
||||
else ''
|
||||
exec ${cfg.locate}/bin/updatedb \
|
||||
exec ${cfg.package}/bin/updatedb \
|
||||
${optionalString (cfg.localuser != null && !isMorPLocate) "--localuser=${cfg.localuser}"} \
|
||||
--output=${toString cfg.output} ${concatStringsSep " " cfg.extraFlags}
|
||||
'';
|
||||
|
|
|
@ -176,16 +176,12 @@ in
|
|||
'';
|
||||
type = types.listOf overlayType;
|
||||
description = lib.mdDoc ''
|
||||
List of overlays to use with the Nix Packages collection.
|
||||
(For details, see the Nixpkgs documentation.) It allows
|
||||
you to override packages globally. Each function in the list
|
||||
takes as an argument the *original* Nixpkgs.
|
||||
The first argument should be used for finding dependencies, and
|
||||
the second should be used for overriding recipes.
|
||||
List of overlays to apply to Nixpkgs.
|
||||
This option allows modifying the Nixpkgs package set accessed through the `pkgs` module argument.
|
||||
|
||||
If `nixpkgs.pkgs` is set, overlays specified here
|
||||
will be applied after the overlays that were already present
|
||||
in `nixpkgs.pkgs`.
|
||||
For details, see the [Overlays chapter in the Nixpkgs manual](https://nixos.org/manual/nixpkgs/stable/#chap-overlays).
|
||||
|
||||
If the {option}`nixpkgs.pkgs` option is set, overlays specified using `nixpkgs.overlays` will be applied after the overlays that were already included in `nixpkgs.pkgs`.
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -383,6 +379,16 @@ in
|
|||
the legacy definitions.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = opt.pkgs.isDefined -> cfg.config == {};
|
||||
message = ''
|
||||
Your system configures nixpkgs with an externally created instance.
|
||||
`nixpkgs.config` options should be passed when creating the instance instead.
|
||||
|
||||
Current value:
|
||||
${lib.generators.toPretty { multiline = true; } opt.config}
|
||||
'';
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
./config/fonts/fontdir.nix
|
||||
./config/fonts/ghostscript.nix
|
||||
./config/fonts/packages.nix
|
||||
./config/gnu.nix
|
||||
./config/gtk/gtk-icon-cache.nix
|
||||
./config/i18n.nix
|
||||
./config/iproute2.nix
|
||||
|
@ -346,6 +345,7 @@
|
|||
./services/audio/squeezelite.nix
|
||||
./services/audio/tts.nix
|
||||
./services/audio/wyoming/faster-whisper.nix
|
||||
./services/audio/wyoming/openwakeword.nix
|
||||
./services/audio/wyoming/piper.nix
|
||||
./services/audio/ympd.nix
|
||||
./services/backup/automysqlbackup.nix
|
||||
|
@ -984,6 +984,7 @@
|
|||
./services/networking/ndppd.nix
|
||||
./services/networking/nebula.nix
|
||||
./services/networking/netbird.nix
|
||||
./services/networking/netclient.nix
|
||||
./services/networking/networkd-dispatcher.nix
|
||||
./services/networking/networkmanager.nix
|
||||
./services/networking/nextdns.nix
|
||||
|
@ -1081,6 +1082,7 @@
|
|||
./services/networking/thelounge.nix
|
||||
./services/networking/tinc.nix
|
||||
./services/networking/tinydns.nix
|
||||
./services/networking/tinyproxy.nix
|
||||
./services/networking/tmate-ssh-server.nix
|
||||
./services/networking/tox-bootstrapd.nix
|
||||
./services/networking/tox-node.nix
|
||||
|
@ -1211,6 +1213,7 @@
|
|||
./services/web-apps/atlassian/confluence.nix
|
||||
./services/web-apps/atlassian/crowd.nix
|
||||
./services/web-apps/atlassian/jira.nix
|
||||
./services/web-apps/audiobookshelf.nix
|
||||
./services/web-apps/bookstack.nix
|
||||
./services/web-apps/calibre-web.nix
|
||||
./services/web-apps/coder.nix
|
||||
|
@ -1284,6 +1287,7 @@
|
|||
./services/web-apps/powerdns-admin.nix
|
||||
./services/web-apps/prosody-filer.nix
|
||||
./services/web-apps/restya-board.nix
|
||||
./services/web-apps/rimgo.nix
|
||||
./services/web-apps/sftpgo.nix
|
||||
./services/web-apps/rss-bridge.nix
|
||||
./services/web-apps/selfoss.nix
|
||||
|
@ -1300,6 +1304,7 @@
|
|||
./services/web-apps/writefreely.nix
|
||||
./services/web-apps/youtrack.nix
|
||||
./services/web-apps/zabbix.nix
|
||||
./services/web-apps/zitadel.nix
|
||||
./services/web-servers/agate.nix
|
||||
./services/web-servers/apache-httpd/default.nix
|
||||
./services/web-servers/caddy/default.nix
|
||||
|
|
|
@ -102,8 +102,6 @@ with lib;
|
|||
jq # for closureInfo
|
||||
# For boot.initrd.systemd
|
||||
makeInitrdNGTool
|
||||
systemdStage1
|
||||
systemdStage1Network
|
||||
];
|
||||
|
||||
boot.swraid.enable = true;
|
||||
|
|
|
@ -24,7 +24,7 @@ in {
|
|||
security.wrappers.bandwhich = {
|
||||
owner = "root";
|
||||
group = "root";
|
||||
capabilities = "cap_net_raw,cap_net_admin+ep";
|
||||
capabilities = "cap_sys_ptrace,cap_dac_read_search,cap_net_raw,cap_net_admin+ep";
|
||||
source = "${pkgs.bandwhich}/bin/bandwhich";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -27,6 +27,6 @@ with lib;
|
|||
"opt/brave/native-messaging-hosts/${appId}".source = source "hosts/chromium";
|
||||
"opt/brave/policies/managed/${appId}".source = source "policies/chromium";
|
||||
};
|
||||
nixpkgs.config.firefox.enableBrowserpass = true;
|
||||
programs.firefox.wrapperConfig.enableBrowserpass = true;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ in
|
|||
# be specified here; do so in the default value of programs.less.envVariables instead
|
||||
PAGER = mkDefault "less";
|
||||
EDITOR = mkDefault "nano";
|
||||
XDG_CONFIG_DIRS = [ "/etc/xdg" ]; # needs to be before profile-relative paths to allow changes through environment.etc
|
||||
};
|
||||
|
||||
# since we set PAGER to this above, make sure it's installed
|
||||
|
@ -33,6 +32,11 @@ in
|
|||
"/run/current-system/sw"
|
||||
];
|
||||
|
||||
environment.sessionVariables =
|
||||
{
|
||||
XDG_CONFIG_DIRS = [ "/etc/xdg" ]; # needs to be before profile-relative paths to allow changes through environment.etc
|
||||
};
|
||||
|
||||
# TODO: move most of these elsewhere
|
||||
environment.profileRelativeSessionVariables =
|
||||
{ PATH = [ "/bin" ];
|
||||
|
|
|
@ -36,6 +36,12 @@ in
|
|||
];
|
||||
};
|
||||
|
||||
wrapperConfig = mkOption {
|
||||
type = types.attrs;
|
||||
default = {};
|
||||
description = mdDoc "Arguments to pass to Firefox wrapper";
|
||||
};
|
||||
|
||||
policies = mkOption {
|
||||
type = policyFormat.type;
|
||||
default = { };
|
||||
|
@ -227,16 +233,22 @@ in
|
|||
] ++ optionals nmh.passff [
|
||||
passff-host
|
||||
];
|
||||
})
|
||||
];
|
||||
cfg = let
|
||||
# copy-pasted from the wrapper; TODO: figure out fix
|
||||
applicationName = cfg.package.binaryName or (lib.getName cfg.package);
|
||||
|
||||
nixpkgs.config.firefox = {
|
||||
nixpkgsConfig = pkgs.config.${applicationName} or {};
|
||||
optionConfig = cfg.wrapperConfig;
|
||||
nmhConfig = {
|
||||
enableBrowserpass = nmh.browserpass;
|
||||
enableBukubrow = nmh.bukubrow;
|
||||
enableTridactylNative = nmh.tridactyl;
|
||||
enableUgetIntegrator = nmh.ugetIntegrator;
|
||||
enableFXCastBridge = nmh.fxCast;
|
||||
};
|
||||
in nixpkgsConfig // optionConfig // nmhConfig;
|
||||
})
|
||||
];
|
||||
|
||||
environment.etc =
|
||||
let
|
||||
|
|
|
@ -6,6 +6,10 @@ let
|
|||
|
||||
cfg = config.programs.gnupg;
|
||||
|
||||
agentSettingsFormat = pkgs.formats.keyValue {
|
||||
mkKeyValue = lib.generators.mkKeyValueDefault { } " ";
|
||||
};
|
||||
|
||||
xserverCfg = config.services.xserver;
|
||||
|
||||
defaultPinentryFlavor =
|
||||
|
@ -82,6 +86,18 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
agent.settings = mkOption {
|
||||
type = agentSettingsFormat.type;
|
||||
default = { };
|
||||
example = {
|
||||
default-cache-ttl = 600;
|
||||
};
|
||||
description = lib.mdDoc ''
|
||||
Configuration for /etc/gnupg/gpg-agent.conf.
|
||||
See {manpage}`gpg-agent(1)` for supported options.
|
||||
'';
|
||||
};
|
||||
|
||||
dirmngr.enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
|
@ -92,17 +108,20 @@ in
|
|||
};
|
||||
|
||||
config = mkIf cfg.agent.enable {
|
||||
environment.etc."gnupg/gpg-agent.conf".text =
|
||||
lib.optionalString (cfg.agent.pinentryFlavor != null) ''
|
||||
pinentry-program ${pkgs.pinentry.${cfg.agent.pinentryFlavor}}/bin/pinentry
|
||||
'';
|
||||
programs.gnupg.agent.settings = {
|
||||
pinentry-program = lib.mkIf (cfg.agent.pinentryFlavor != null)
|
||||
"${pkgs.pinentry.${cfg.agent.pinentryFlavor}}/bin/pinentry";
|
||||
};
|
||||
|
||||
environment.etc."gnupg/gpg-agent.conf".source =
|
||||
agentSettingsFormat.generate "gpg-agent.conf" cfg.agent.settings;
|
||||
|
||||
# This overrides the systemd user unit shipped with the gnupg package
|
||||
systemd.user.services.gpg-agent = {
|
||||
unitConfig = {
|
||||
Description = "GnuPG cryptographic agent and passphrase cache";
|
||||
Documentation = "man:gpg-agent(1)";
|
||||
Requires = [ "gpg-agent.socket" ];
|
||||
Requires = [ "sockets.target" ];
|
||||
};
|
||||
serviceConfig = {
|
||||
ExecStart = "${cfg.package}/bin/gpg-agent --supervised";
|
||||
|
|
|
@ -2,14 +2,16 @@
|
|||
|
||||
let
|
||||
cfg = config.programs.nano;
|
||||
LF = "\n";
|
||||
in
|
||||
|
||||
{
|
||||
###### interface
|
||||
|
||||
options = {
|
||||
programs.nano = {
|
||||
enable = lib.mkEnableOption (lib.mdDoc "nano") // {
|
||||
default = true;
|
||||
};
|
||||
|
||||
package = lib.mkPackageOptionMD pkgs "nano" { };
|
||||
|
||||
nanorc = lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
|
@ -24,28 +26,22 @@ in
|
|||
set tabsize 2
|
||||
'';
|
||||
};
|
||||
|
||||
syntaxHighlight = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
default = false;
|
||||
description = lib.mdDoc "Whether to enable syntax highlight for various languages.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
###### implementation
|
||||
|
||||
config = lib.mkIf (cfg.nanorc != "" || cfg.syntaxHighlight) {
|
||||
environment.etc.nanorc.text = lib.concatStringsSep LF (
|
||||
( lib.optionals cfg.syntaxHighlight [
|
||||
"# The line below is added because value of programs.nano.syntaxHighlight is set to true"
|
||||
''include "${pkgs.nano}/share/nano/*.nanorc"''
|
||||
""
|
||||
])
|
||||
++ ( lib.optionals (cfg.nanorc != "") [
|
||||
"# The lines below have been set from value of programs.nano.nanorc"
|
||||
cfg.nanorc
|
||||
])
|
||||
);
|
||||
config = lib.mkIf cfg.enable {
|
||||
environment = {
|
||||
etc.nanorc.text = (lib.optionalString cfg.syntaxHighlight ''
|
||||
# load syntax highlighting files
|
||||
include "${cfg.package}/share/nano/*.nanorc"
|
||||
'') + cfg.nanorc;
|
||||
systemPackages = [ cfg.package ];
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -8,11 +8,23 @@ in
|
|||
{
|
||||
options.programs.openvpn3 = {
|
||||
enable = mkEnableOption (lib.mdDoc "the openvpn3 client");
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.openvpn3.override {
|
||||
enableSystemdResolved = config.services.resolved.enable;
|
||||
};
|
||||
defaultText = literalExpression ''pkgs.openvpn3.override {
|
||||
enableSystemdResolved = config.services.resolved.enable;
|
||||
}'';
|
||||
description = lib.mdDoc ''
|
||||
Which package to use for `openvpn3`.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
services.dbus.packages = with pkgs; [
|
||||
openvpn3
|
||||
services.dbus.packages = [
|
||||
cfg.package
|
||||
];
|
||||
|
||||
users.users.openvpn = {
|
||||
|
@ -25,8 +37,8 @@ in
|
|||
gid = config.ids.gids.openvpn;
|
||||
};
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
openvpn3
|
||||
environment.systemPackages = [
|
||||
cfg.package
|
||||
];
|
||||
};
|
||||
|
||||
|
|
20
nixos/modules/programs/projecteur.nix
Normal file
20
nixos/modules/programs/projecteur.nix
Normal file
|
@ -0,0 +1,20 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.programs.projecteur;
|
||||
in
|
||||
{
|
||||
options.programs.projecteur = {
|
||||
enable = lib.mkEnableOption (lib.mdDoc "projecteur");
|
||||
package = lib.mkPackageOptionMD pkgs "projecteur" { };
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
services.udev.packages = [ cfg.package ];
|
||||
};
|
||||
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ benneti drupol ];
|
||||
};
|
||||
}
|
|
@ -66,10 +66,10 @@ in
|
|||
};
|
||||
|
||||
systemd.tmpfiles.rules = let
|
||||
user = config.services.greetd.settings.default_session.user;
|
||||
group = config.users.users.${config.services.greetd.settings.default_session.user}.group;
|
||||
in [
|
||||
"d /var/log/regreet 0755 greeter ${user} - -"
|
||||
"d /var/cache/regreet 0755 greeter ${user} - -"
|
||||
"d /var/log/regreet 0755 greeter ${group} - -"
|
||||
"d /var/cache/regreet 0755 greeter ${group} - -"
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -5,6 +5,23 @@ with lib;
|
|||
let
|
||||
cfg = config.programs.rust-motd;
|
||||
format = pkgs.formats.toml { };
|
||||
|
||||
# Order the sections in the TOML according to the order of sections
|
||||
# in `cfg.order`.
|
||||
motdConf = pkgs.runCommand "motd.conf"
|
||||
{
|
||||
__structuredAttrs = true;
|
||||
inherit (cfg) order settings;
|
||||
nativeBuildInputs = [ pkgs.remarshal pkgs.jq ];
|
||||
}
|
||||
''
|
||||
cat "$NIX_ATTRS_JSON_FILE" \
|
||||
| jq '.settings as $settings
|
||||
| .order
|
||||
| map({ key: ., value: $settings."\(.)" })
|
||||
| from_entries' -r \
|
||||
| json2toml /dev/stdin "$out"
|
||||
'';
|
||||
in {
|
||||
options.programs.rust-motd = {
|
||||
enable = mkEnableOption (lib.mdDoc "rust-motd");
|
||||
|
@ -27,10 +44,43 @@ in {
|
|||
For possible formats, please refer to {manpage}`systemd.time(7)`.
|
||||
'';
|
||||
};
|
||||
settings = mkOption {
|
||||
type = types.submodule {
|
||||
freeformType = format.type;
|
||||
order = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = attrNames cfg.settings;
|
||||
defaultText = literalExpression "attrNames cfg.settings";
|
||||
description = mdDoc ''
|
||||
The order of the sections in [](#opt-programs.rust-motd.settings).
|
||||
By default they are ordered alphabetically.
|
||||
|
||||
Context: since attribute sets in Nix are always
|
||||
ordered alphabetically internally this means that
|
||||
|
||||
```nix
|
||||
{
|
||||
uptime = { /* ... */ };
|
||||
banner = { /* ... */ };
|
||||
}
|
||||
```
|
||||
|
||||
will still have `banner` displayed before `uptime`.
|
||||
|
||||
To work around that, this option can be used to define the order of all keys,
|
||||
i.e.
|
||||
|
||||
```nix
|
||||
{
|
||||
order = [
|
||||
"uptime"
|
||||
"banner"
|
||||
];
|
||||
}
|
||||
```
|
||||
|
||||
makes sure that `uptime` is placed before `banner` in the motd.
|
||||
'';
|
||||
};
|
||||
settings = mkOption {
|
||||
type = types.attrsOf format.type;
|
||||
description = mdDoc ''
|
||||
Settings on what to generate. Please read the
|
||||
[upstream documentation](https://github.com/rust-motd/rust-motd/blob/main/README.md#configuration)
|
||||
|
@ -45,14 +95,21 @@ in {
|
|||
`programs.rust-motd` is incompatible with `users.motd`!
|
||||
'';
|
||||
}
|
||||
{ assertion = sort (a: b: a < b) cfg.order == attrNames cfg.settings;
|
||||
message = ''
|
||||
Please ensure that every section from `programs.rust-motd.settings` is present in
|
||||
`programs.rust-motd.order`.
|
||||
'';
|
||||
}
|
||||
];
|
||||
systemd.services.rust-motd = {
|
||||
path = with pkgs; [ bash ];
|
||||
documentation = [ "https://github.com/rust-motd/rust-motd/blob/v${pkgs.rust-motd.version}/README.md" ];
|
||||
description = "motd generator";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.writeShellScript "update-motd" ''
|
||||
${pkgs.rust-motd}/bin/rust-motd ${format.generate "motd.conf" cfg.settings} > motd
|
||||
${pkgs.rust-motd}/bin/rust-motd ${motdConf} > motd
|
||||
''}";
|
||||
CapabilityBoundingSet = [ "" ];
|
||||
LockPersonality = true;
|
||||
|
|
|
@ -42,6 +42,11 @@ in {
|
|||
<https://github.com/swaywm/sway/wiki> and
|
||||
"man 5 sway" for more information'');
|
||||
|
||||
enableRealtime = mkEnableOption (lib.mdDoc ''
|
||||
add CAP_SYS_NICE capability on `sway` binary for realtime scheduling
|
||||
privileges. This may improve latency and reduce stuttering, specially in
|
||||
high load scenarios'') // { default = true; };
|
||||
|
||||
package = mkOption {
|
||||
type = with types; nullOr package;
|
||||
default = defaultSwayPackage;
|
||||
|
@ -149,6 +154,14 @@ in {
|
|||
"sway/config".source = mkOptionDefault "${cfg.package}/etc/sway/config";
|
||||
};
|
||||
};
|
||||
security.wrappers = mkIf (cfg.enableRealtime && cfg.package != null) {
|
||||
sway = {
|
||||
owner = "root";
|
||||
group = "root";
|
||||
source = "${cfg.package}/bin/sway";
|
||||
capabilities = "cap_sys_nice+ep";
|
||||
};
|
||||
};
|
||||
# To make a Sway session available if a display manager like SDDM is enabled:
|
||||
services.xserver.displayManager.sessionPackages = optionals (cfg.package != null) [ cfg.package ]; }
|
||||
(import ./wayland-session.nix { inherit lib pkgs; })
|
||||
|
|
|
@ -5,8 +5,29 @@ let
|
|||
|
||||
parentWrapperDir = dirOf wrapperDir;
|
||||
|
||||
securityWrapper = sourceProg : pkgs.callPackage ./wrapper.nix {
|
||||
# This is security-sensitive code, and glibc vulns happen from time to time.
|
||||
# musl is security-focused and generally more minimal, so it's a better choice here.
|
||||
# The dynamic linker is still a fairly complex piece of code, and the wrappers are
|
||||
# quite small, so linking it statically is more appropriate.
|
||||
securityWrapper = sourceProg : pkgs.pkgsStatic.callPackage ./wrapper.nix {
|
||||
inherit sourceProg;
|
||||
|
||||
# glibc definitions of insecure environment variables
|
||||
#
|
||||
# We extract the single header file we need into its own derivation,
|
||||
# so that we don't have to pull full glibc sources to build wrappers.
|
||||
#
|
||||
# They're taken from pkgs.glibc so that we don't have to keep as close
|
||||
# an eye on glibc changes. Not every relevant variable is in this header,
|
||||
# so we maintain a slightly stricter list in wrapper.c itself as well.
|
||||
unsecvars = lib.overrideDerivation (pkgs.srcOnly pkgs.glibc)
|
||||
({ name, ... }: {
|
||||
name = "${name}-unsecvars";
|
||||
installPhase = ''
|
||||
mkdir $out
|
||||
cp sysdeps/generic/unsecvars.h $out
|
||||
'';
|
||||
});
|
||||
};
|
||||
|
||||
fileModeType =
|
||||
|
|
|
@ -17,14 +17,15 @@
|
|||
#include <syscall.h>
|
||||
#include <byteswap.h>
|
||||
|
||||
// imported from glibc
|
||||
#include "unsecvars.h"
|
||||
|
||||
#ifndef SOURCE_PROG
|
||||
#error SOURCE_PROG should be defined via preprocessor commandline
|
||||
#endif
|
||||
|
||||
// aborts when false, printing the failed expression
|
||||
#define ASSERT(expr) ((expr) ? (void) 0 : assert_failure(#expr))
|
||||
// aborts when returns non-zero, printing the failed expression and errno
|
||||
#define MUSTSUCCEED(expr) ((expr) ? print_errno_and_die(#expr) : (void) 0)
|
||||
|
||||
extern char **environ;
|
||||
|
||||
|
@ -45,12 +46,6 @@ static noreturn void assert_failure(const char *assertion) {
|
|||
abort();
|
||||
}
|
||||
|
||||
static noreturn void print_errno_and_die(const char *assertion) {
|
||||
fprintf(stderr, "Call `%s` in NixOS's wrapper.c failed: %s\n", assertion, strerror(errno));
|
||||
fflush(stderr);
|
||||
abort();
|
||||
}
|
||||
|
||||
int get_last_cap(unsigned *last_cap) {
|
||||
FILE* file = fopen("/proc/sys/kernel/cap_last_cap", "r");
|
||||
if (file == NULL) {
|
||||
|
@ -151,9 +146,55 @@ static int make_caps_ambient(const char *self_path) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
// These are environment variable aliases for glibc tunables.
|
||||
// This list shouldn't grow further, since this is a legacy mechanism.
|
||||
// Any future tunables are expected to only be accessible through GLIBC_TUNABLES.
|
||||
//
|
||||
// They are not included in the glibc-provided UNSECURE_ENVVARS list,
|
||||
// since any SUID executable ignores them. This wrapper also serves
|
||||
// executables that are merely granted ambient capabilities, rather than
|
||||
// being SUID, and hence don't run in secure mode. We'd like them to
|
||||
// defend those in depth as well, so we clear these explicitly.
|
||||
//
|
||||
// Except for MALLOC_CHECK_ (which is marked SXID_ERASE), these are all
|
||||
// marked SXID_IGNORE (ignored in secure mode), so even the glibc version
|
||||
// of this wrapper would leave them intact.
|
||||
#define UNSECURE_ENVVARS_TUNABLES \
|
||||
"MALLOC_CHECK_\0" \
|
||||
"MALLOC_TOP_PAD_\0" \
|
||||
"MALLOC_PERTURB_\0" \
|
||||
"MALLOC_MMAP_THRESHOLD_\0" \
|
||||
"MALLOC_TRIM_THRESHOLD_\0" \
|
||||
"MALLOC_MMAP_MAX_\0" \
|
||||
"MALLOC_ARENA_MAX\0" \
|
||||
"MALLOC_ARENA_TEST\0"
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
ASSERT(argc >= 1);
|
||||
|
||||
int debug = getenv(wrapper_debug) != NULL;
|
||||
|
||||
// Drop insecure environment variables explicitly
|
||||
//
|
||||
// glibc does this automatically in SUID binaries, but we'd like to cover this:
|
||||
//
|
||||
// a) before it gets to glibc
|
||||
// b) in binaries that are only granted ambient capabilities by the wrapper,
|
||||
// but don't run with an altered effective UID/GID, nor directly gain
|
||||
// capabilities themselves, and thus don't run in secure mode.
|
||||
//
|
||||
// We're using musl, which doesn't drop environment variables in secure mode,
|
||||
// and we'd also like glibc-specific variables to be covered.
|
||||
//
|
||||
// If we don't explicitly unset them, it's quite easy to just set LD_PRELOAD,
|
||||
// have it passed through to the wrapped program, and gain privileges.
|
||||
for (char *unsec = UNSECURE_ENVVARS_TUNABLES UNSECURE_ENVVARS; *unsec; unsec = strchr(unsec, 0) + 1) {
|
||||
if (debug) {
|
||||
fprintf(stderr, "unsetting %s\n", unsec);
|
||||
}
|
||||
unsetenv(unsec);
|
||||
}
|
||||
|
||||
// Read the capabilities set on the wrapper and raise them in to
|
||||
// the ambient set so the program we're wrapping receives the
|
||||
// capabilities too!
|
||||
|
|
|
@ -1,11 +1,10 @@
|
|||
{ stdenv, linuxHeaders, sourceProg, debug ? false }:
|
||||
{ stdenv, unsecvars, linuxHeaders, sourceProg, debug ? false }:
|
||||
# For testing:
|
||||
# $ nix-build -E 'with import <nixpkgs> {}; pkgs.callPackage ./wrapper.nix { parentWrapperDir = "/run/wrappers"; debug = true; }'
|
||||
stdenv.mkDerivation {
|
||||
name = "security-wrapper";
|
||||
buildInputs = [ linuxHeaders ];
|
||||
dontUnpack = true;
|
||||
hardeningEnable = [ "pie" ];
|
||||
CFLAGS = [
|
||||
''-DSOURCE_PROG="${sourceProg}"''
|
||||
] ++ (if debug then [
|
||||
|
@ -16,6 +15,6 @@ stdenv.mkDerivation {
|
|||
dontStrip = debug;
|
||||
installPhase = ''
|
||||
mkdir -p $out/bin
|
||||
$CC $CFLAGS ${./wrapper.c} -o $out/bin/security-wrapper
|
||||
$CC $CFLAGS ${./wrapper.c} -I${unsecvars} -o $out/bin/security-wrapper
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -57,6 +57,7 @@ in
|
|||
ReadWritePaths = "";
|
||||
BindReadOnlyPaths = [
|
||||
# gonic can access scrobbling services
|
||||
"-/etc/resolv.conf"
|
||||
"-/etc/ssl/certs/ca-certificates.crt"
|
||||
builtins.storeDir
|
||||
cfg.settings.podcast-path
|
||||
|
|
157
nixos/modules/services/audio/wyoming/openwakeword.nix
Normal file
157
nixos/modules/services/audio/wyoming/openwakeword.nix
Normal file
|
@ -0,0 +1,157 @@
|
|||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, ...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.services.wyoming.openwakeword;
|
||||
|
||||
inherit (lib)
|
||||
concatMapStringsSep
|
||||
escapeShellArgs
|
||||
mkOption
|
||||
mdDoc
|
||||
mkEnableOption
|
||||
mkIf
|
||||
mkPackageOptionMD
|
||||
types
|
||||
;
|
||||
|
||||
inherit (builtins)
|
||||
toString
|
||||
;
|
||||
|
||||
models = [
|
||||
# wyoming_openwakeword/models/*.tflite
|
||||
"alexa"
|
||||
"hey_jarvis"
|
||||
"hey_mycroft"
|
||||
"hey_rhasspy"
|
||||
"ok_nabu"
|
||||
];
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
meta.buildDocsInSandbox = false;
|
||||
|
||||
options.services.wyoming.openwakeword = with types; {
|
||||
enable = mkEnableOption (mdDoc "Wyoming openWakeWord server");
|
||||
|
||||
package = mkPackageOptionMD pkgs "wyoming-openwakeword" { };
|
||||
|
||||
uri = mkOption {
|
||||
type = strMatching "^(tcp|unix)://.*$";
|
||||
default = "tcp://0.0.0.0:10400";
|
||||
example = "tcp://192.0.2.1:5000";
|
||||
description = mdDoc ''
|
||||
URI to bind the wyoming server to.
|
||||
'';
|
||||
};
|
||||
|
||||
models = mkOption {
|
||||
type = listOf (enum models);
|
||||
default = models;
|
||||
description = mdDoc ''
|
||||
List of wake word models that should be made available.
|
||||
'';
|
||||
};
|
||||
|
||||
preloadModels = mkOption {
|
||||
type = listOf (enum models);
|
||||
default = [
|
||||
"ok_nabu"
|
||||
];
|
||||
description = mdDoc ''
|
||||
List of wake word models to preload after startup.
|
||||
'';
|
||||
};
|
||||
|
||||
threshold = mkOption {
|
||||
type = float;
|
||||
default = 0.5;
|
||||
description = mdDoc ''
|
||||
Activation threshold (0-1), where higher means fewer activations.
|
||||
|
||||
See trigger level for the relationship between activations and
|
||||
wake word detections.
|
||||
'';
|
||||
apply = toString;
|
||||
};
|
||||
|
||||
triggerLevel = mkOption {
|
||||
type = int;
|
||||
default = 1;
|
||||
description = mdDoc ''
|
||||
Number of activations before a detection is registered.
|
||||
|
||||
A higher trigger level means fewer detections.
|
||||
'';
|
||||
apply = toString;
|
||||
};
|
||||
|
||||
extraArgs = mkOption {
|
||||
type = listOf str;
|
||||
default = [ ];
|
||||
description = mdDoc ''
|
||||
Extra arguments to pass to the server commandline.
|
||||
'';
|
||||
apply = escapeShellArgs;
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services."wyoming-openwakeword" = {
|
||||
description = "Wyoming openWakeWord server";
|
||||
after = [
|
||||
"network-online.target"
|
||||
];
|
||||
wantedBy = [
|
||||
"multi-user.target"
|
||||
];
|
||||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
User = "wyoming-openwakeword";
|
||||
# https://github.com/home-assistant/addons/blob/master/openwakeword/rootfs/etc/s6-overlay/s6-rc.d/openwakeword/run
|
||||
ExecStart = ''
|
||||
${cfg.package}/bin/wyoming-openwakeword \
|
||||
--uri ${cfg.uri} \
|
||||
${concatMapStringsSep " " (model: "--model ${model}") cfg.models} \
|
||||
${concatMapStringsSep " " (model: "--preload-model ${model}") cfg.preloadModels} \
|
||||
--threshold ${cfg.threshold} \
|
||||
--trigger-level ${cfg.triggerLevel} ${cfg.extraArgs}
|
||||
'';
|
||||
CapabilityBoundingSet = "";
|
||||
DeviceAllow = "";
|
||||
DevicePolicy = "closed";
|
||||
LockPersonality = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
PrivateDevices = true;
|
||||
PrivateUsers = true;
|
||||
ProtectHome = true;
|
||||
ProtectHostname = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectProc = "invisible";
|
||||
ProcSubset = "pid";
|
||||
RestrictAddressFamilies = [
|
||||
"AF_INET"
|
||||
"AF_INET6"
|
||||
"AF_UNIX"
|
||||
];
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RuntimeDirectory = "wyoming-openwakeword";
|
||||
SystemCallArchitectures = "native";
|
||||
SystemCallFilter = [
|
||||
"@system-service"
|
||||
"~@privileged"
|
||||
];
|
||||
UMask = "0077";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -84,8 +84,8 @@ let
|
|||
backupScript = mkBackupScript backupJobName cfg;
|
||||
in nameValuePair backupJobName {
|
||||
description = "BorgBackup job ${name}";
|
||||
path = with pkgs; [
|
||||
borgbackup openssh
|
||||
path = [
|
||||
config.services.borgbackup.package pkgs.openssh
|
||||
];
|
||||
script = "exec " + optionalString cfg.inhibitsSleep ''\
|
||||
${pkgs.systemd}/bin/systemd-inhibit \
|
||||
|
@ -137,7 +137,7 @@ let
|
|||
'');
|
||||
|
||||
mkBorgWrapper = name: cfg: mkWrapperDrv {
|
||||
original = "${pkgs.borgbackup}/bin/borg";
|
||||
original = getExe config.services.borgbackup.package;
|
||||
name = "borg-job-${name}";
|
||||
set = { BORG_REPO = cfg.repo; } // (mkPassEnv cfg) // cfg.environment;
|
||||
};
|
||||
|
@ -231,6 +231,8 @@ in {
|
|||
|
||||
###### interface
|
||||
|
||||
options.services.borgbackup.package = mkPackageOptionMD pkgs "borgbackup" { };
|
||||
|
||||
options.services.borgbackup.jobs = mkOption {
|
||||
description = lib.mdDoc ''
|
||||
Deduplicating backups using BorgBackup.
|
||||
|
@ -769,6 +771,7 @@ in {
|
|||
|
||||
users = mkMerge (mapAttrsToList mkUsersConfig repos);
|
||||
|
||||
environment.systemPackages = with pkgs; [ borgbackup ] ++ (mapAttrsToList mkBorgWrapper jobs);
|
||||
environment.systemPackages =
|
||||
[ config.services.borgbackup.package ] ++ (mapAttrsToList mkBorgWrapper jobs);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -6,32 +6,50 @@ let
|
|||
cfg = config.services.borgmatic;
|
||||
settingsFormat = pkgs.formats.yaml { };
|
||||
|
||||
repository = with types; submodule {
|
||||
options = {
|
||||
path = mkOption {
|
||||
type = str;
|
||||
description = mdDoc ''
|
||||
Path to the repository
|
||||
'';
|
||||
};
|
||||
label = mkOption {
|
||||
type = str;
|
||||
description = mdDoc ''
|
||||
Label to the repository
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
cfgType = with types; submodule {
|
||||
freeformType = settingsFormat.type;
|
||||
options.location = {
|
||||
options = {
|
||||
source_directories = mkOption {
|
||||
type = listOf str;
|
||||
type = nullOr (listOf str);
|
||||
default = null;
|
||||
description = mdDoc ''
|
||||
List of source directories to backup (required). Globs and
|
||||
tildes are expanded.
|
||||
List of source directories and files to backup. Globs and tildes are
|
||||
expanded. Do not backslash spaces in path names.
|
||||
'';
|
||||
example = [ "/home" "/etc" "/var/log/syslog*" ];
|
||||
example = [ "/home" "/etc" "/var/log/syslog*" "/home/user/path with spaces" ];
|
||||
};
|
||||
repositories = mkOption {
|
||||
type = listOf str;
|
||||
type = nullOr (listOf repository);
|
||||
default = null;
|
||||
description = mdDoc ''
|
||||
Paths to local or remote repositories (required). Tildes are
|
||||
expanded. Multiple repositories are backed up to in
|
||||
sequence. Borg placeholders can be used. See the output of
|
||||
"borg help placeholders" for details. See ssh_command for
|
||||
SSH options like identity file or port. If systemd service
|
||||
is used, then add local repository paths in the systemd
|
||||
service file to the ReadWritePaths list.
|
||||
A required list of local or remote repositories with paths and
|
||||
optional labels (which can be used with the --repository flag to
|
||||
select a repository). Tildes are expanded. Multiple repositories are
|
||||
backed up to in sequence. Borg placeholders can be used. See the
|
||||
output of "borg help placeholders" for details. See ssh_command for
|
||||
SSH options like identity file or port. If systemd service is used,
|
||||
then add local repository paths in the systemd service file to the
|
||||
ReadWritePaths list.
|
||||
'';
|
||||
example = [
|
||||
"ssh://user@backupserver/./sourcehostname.borg"
|
||||
"ssh://user@backupserver/./{fqdn}"
|
||||
"/var/local/backups/local.borg"
|
||||
{ path="ssh://user@backupserver/./sourcehostname.borg"; label="backupserver"; }
|
||||
{ path="/mnt/backup"; label="local"; }
|
||||
];
|
||||
};
|
||||
};
|
||||
|
@ -62,6 +80,13 @@ in
|
|||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
warnings = []
|
||||
++ optional (cfg.settings != null && cfg.settings.location != null)
|
||||
"`services.borgmatic.settings.location` is deprecated, please move your options out of sections to the global scope"
|
||||
++ optional (catAttrs "location" (attrValues cfg.configurations) != [])
|
||||
"`services.borgmatic.configurations.<name>.location` is deprecated, please move your options out of sections to the global scope"
|
||||
;
|
||||
|
||||
environment.systemPackages = [ pkgs.borgmatic ];
|
||||
|
||||
environment.etc = (optionalAttrs (cfg.settings != null) { "borgmatic/config.yaml".source = cfgfile; }) //
|
||||
|
|
|
@ -166,7 +166,7 @@ in
|
|||
{ command = "${pkgs.coreutils}/bin/mkdir"; options = [ "NOPASSWD" ]; }
|
||||
{ command = "${pkgs.coreutils}/bin/readlink"; options = [ "NOPASSWD" ]; }
|
||||
# for ssh, they are not the same than the one hard coded in ${pkgs.btrbk}
|
||||
{ command = "/run/current-system/bin/btrfs"; options = [ "NOPASSWD" ]; }
|
||||
{ command = "/run/current-system/sw/bin/btrfs"; options = [ "NOPASSWD" ]; }
|
||||
{ command = "/run/current-system/sw/bin/mkdir"; options = [ "NOPASSWD" ]; }
|
||||
{ command = "/run/current-system/sw/bin/readlink"; options = [ "NOPASSWD" ]; }
|
||||
];
|
||||
|
@ -182,7 +182,7 @@ in
|
|||
(doasCmdNoPass "${pkgs.coreutils}/bin/mkdir")
|
||||
(doasCmdNoPass "${pkgs.coreutils}/bin/readlink")
|
||||
# for ssh, they are not the same than the one hard coded in ${pkgs.btrbk}
|
||||
(doasCmdNoPass "/run/current-system/bin/btrfs")
|
||||
(doasCmdNoPass "/run/current-system/sw/bin/btrfs")
|
||||
(doasCmdNoPass "/run/current-system/sw/bin/mkdir")
|
||||
(doasCmdNoPass "/run/current-system/sw/bin/readlink")
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ let
|
|||
|
||||
defaultMasterCfg = pkgs.writeText "master.cfg" ''
|
||||
from buildbot.plugins import *
|
||||
${cfg.extraImports}
|
||||
factory = util.BuildFactory()
|
||||
c = BuildmasterConfig = dict(
|
||||
workers = [${concatStringsSep "," cfg.workers}],
|
||||
|
@ -28,6 +29,7 @@ let
|
|||
schedulers = [ ${concatStringsSep "," cfg.schedulers} ],
|
||||
builders = [ ${concatStringsSep "," cfg.builders} ],
|
||||
services = [ ${concatStringsSep "," cfg.reporters} ],
|
||||
configurators = [ ${concatStringsSep "," cfg.configurators} ],
|
||||
)
|
||||
for step in [ ${concatStringsSep "," cfg.factorySteps} ]:
|
||||
factory.addStep(step)
|
||||
|
@ -79,6 +81,15 @@ in {
|
|||
];
|
||||
};
|
||||
|
||||
configurators = mkOption {
|
||||
type = types.listOf types.str;
|
||||
description = lib.mdDoc "Configurator Steps, see https://docs.buildbot.net/latest/manual/configuration/configurators.html";
|
||||
default = [];
|
||||
example = [
|
||||
"util.JanitorConfigurator(logHorizon=timedelta(weeks=4), hour=12, dayOfWeek=6)"
|
||||
];
|
||||
};
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
|
@ -91,6 +102,13 @@ in {
|
|||
default = "c['buildbotNetUsageData'] = None";
|
||||
};
|
||||
|
||||
extraImports = mkOption {
|
||||
type = types.str;
|
||||
description = lib.mdDoc "Extra python imports to prepend to master.cfg";
|
||||
default = "";
|
||||
example = "from buildbot.process.project import Project";
|
||||
};
|
||||
|
||||
masterCfg = mkOption {
|
||||
type = types.path;
|
||||
description = lib.mdDoc "Optionally pass master.cfg path. Other options in this configuration will be ignored.";
|
||||
|
|
|
@ -187,7 +187,7 @@ in {
|
|||
serviceConfig = {
|
||||
User = pgmanage;
|
||||
Group = pgmanage;
|
||||
ExecStart = "${pkgs.pgmanage}/sbin/pgmanage -c ${confFile}" +
|
||||
ExecStart = "${cfg.package}/sbin/pgmanage -c ${confFile}" +
|
||||
optionalString cfg.localOnly " --local-only=true";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -106,7 +106,7 @@ in
|
|||
identMap = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
example = literalExample ''
|
||||
example = ''
|
||||
map-name-0 system-username-0 database-username-0
|
||||
map-name-1 system-username-1 database-username-1
|
||||
'';
|
||||
|
|
|
@ -42,6 +42,6 @@ in
|
|||
|
||||
services.dbus.packages = [ pkgs.gnome-browser-connector ];
|
||||
|
||||
nixpkgs.config.firefox.enableGnomeExtensions = true;
|
||||
programs.firefox.wrapperConfig.enableGnomeExtensions = true;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
with lib;
|
||||
let
|
||||
cfg = config.services.keyd;
|
||||
settingsFormat = pkgs.formats.ini { };
|
||||
|
||||
keyboardOptions = { ... }: {
|
||||
options = {
|
||||
|
@ -16,7 +15,7 @@ let
|
|||
};
|
||||
|
||||
settings = mkOption {
|
||||
type = settingsFormat.type;
|
||||
type = (pkgs.formats.ini { }).type;
|
||||
default = { };
|
||||
example = {
|
||||
main = {
|
||||
|
@ -37,6 +36,20 @@ let
|
|||
See <https://github.com/rvaiya/keyd> how to configure.
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
example = ''
|
||||
[control+shift]
|
||||
h = left
|
||||
'';
|
||||
description = lib.mdDoc ''
|
||||
Extra configuration that is appended to the end of the file.
|
||||
**Do not** write `ids` section here, use a separate option for it.
|
||||
You can use this option to define compound layers that must always be defined after the layer they are comprised.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
|
@ -85,15 +98,12 @@ in
|
|||
environment.etc = mapAttrs'
|
||||
(name: options:
|
||||
nameValuePair "keyd/${name}.conf" {
|
||||
source = pkgs.runCommand "${name}.conf"
|
||||
{
|
||||
ids = ''
|
||||
text = ''
|
||||
[ids]
|
||||
${concatStringsSep "\n" options.ids}
|
||||
'';
|
||||
passAsFile = [ "ids" ];
|
||||
} ''
|
||||
cat $idsPath <(echo) ${settingsFormat.generate "keyd-${name}.conf" options.settings} >$out
|
||||
|
||||
${generators.toINI {} options.settings}
|
||||
${options.extraConfig}
|
||||
'';
|
||||
})
|
||||
cfg.keyboards;
|
||||
|
|
|
@ -457,10 +457,12 @@ in {
|
|||
"inkbird"
|
||||
"keymitt_ble"
|
||||
"led_ble"
|
||||
"medcom_ble"
|
||||
"melnor"
|
||||
"moat"
|
||||
"mopeka"
|
||||
"oralb"
|
||||
"private_ble_device"
|
||||
"qingping"
|
||||
"rapt_ble"
|
||||
"ruuvi_gateway"
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
with lib;
|
||||
let
|
||||
cfg = config.services.ulogd;
|
||||
settingsFormat = pkgs.formats.ini { };
|
||||
settingsFormat = pkgs.formats.ini { listsAsDuplicateKeys = true; };
|
||||
settingsFile = settingsFormat.generate "ulogd.conf" cfg.settings;
|
||||
in {
|
||||
options = {
|
||||
|
@ -12,22 +12,34 @@ in {
|
|||
|
||||
settings = mkOption {
|
||||
example = {
|
||||
global.stack = "stack=log1:NFLOG,base1:BASE,pcap1:PCAP";
|
||||
global.stack = [
|
||||
"log1:NFLOG,base1:BASE,ifi1:IFINDEX,ip2str1:IP2STR,print1:PRINTPKT,emu1:LOGEMU"
|
||||
"log1:NFLOG,base1:BASE,pcap1:PCAP"
|
||||
];
|
||||
|
||||
log1.group = 2;
|
||||
|
||||
pcap1 = {
|
||||
file = "/var/log/ulogd.pcap";
|
||||
sync = 1;
|
||||
file = "/var/log/ulogd.pcap";
|
||||
};
|
||||
|
||||
emu1 = {
|
||||
sync = 1;
|
||||
file = "/var/log/ulogd_pkts.log";
|
||||
};
|
||||
};
|
||||
type = settingsFormat.type;
|
||||
default = { };
|
||||
description = lib.mdDoc "Configuration for ulogd. See {file}`/share/doc/ulogd/` in `pkgs.ulogd.doc`.";
|
||||
description = lib.mdDoc
|
||||
"Configuration for ulogd. See {file}`/share/doc/ulogd/` in `pkgs.ulogd.doc`.";
|
||||
};
|
||||
|
||||
logLevel = mkOption {
|
||||
type = types.enum [ 1 3 5 7 8 ];
|
||||
default = 5;
|
||||
description = lib.mdDoc "Log level (1 = debug, 3 = info, 5 = notice, 7 = error, 8 = fatal)";
|
||||
description = lib.mdDoc
|
||||
"Log level (1 = debug, 3 = info, 5 = notice, 7 = error, 8 = fatal)";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -40,7 +52,10 @@ in {
|
|||
before = [ "network-pre.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.ulogd}/bin/ulogd -c ${settingsFile} --verbose --loglevel ${toString cfg.logLevel}";
|
||||
ExecStart =
|
||||
"${pkgs.ulogd}/bin/ulogd -c ${settingsFile} --verbose --loglevel ${
|
||||
toString cfg.logLevel
|
||||
}";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -314,7 +314,7 @@ in {
|
|||
queue_dir = "$var_dir/queue";
|
||||
template_dir = "$var_dir/templates";
|
||||
log_dir = "/var/log/mailman";
|
||||
lock_dir = "$var_dir/lock";
|
||||
lock_dir = "/run/mailman/lock";
|
||||
etc_dir = "/etc";
|
||||
pid_file = "/run/mailman/master.pid";
|
||||
};
|
||||
|
|
|
@ -7,7 +7,7 @@ in
|
|||
options.services.matrix-synapse.sliding-sync = {
|
||||
enable = lib.mkEnableOption (lib.mdDoc "sliding sync");
|
||||
|
||||
package = lib.mkPackageOption pkgs "matrix-sliding-sync" { };
|
||||
package = lib.mkPackageOptionMD pkgs "matrix-sliding-sync" { };
|
||||
|
||||
settings = lib.mkOption {
|
||||
type = lib.types.submodule {
|
||||
|
@ -44,7 +44,7 @@ in
|
|||
};
|
||||
};
|
||||
default = { };
|
||||
description = ''
|
||||
description = lib.mdDoc ''
|
||||
Freeform environment variables passed to the sliding sync proxy.
|
||||
Refer to <https://github.com/matrix-org/sliding-sync#setup> for all supported values.
|
||||
'';
|
||||
|
|
|
@ -31,7 +31,7 @@ let
|
|||
clientConfig."m.homeserver".base_url = baseUrl;
|
||||
serverConfig."m.server" = "${fqdn}:443";
|
||||
mkWellKnown = data: ''
|
||||
add_header Content-Type application/json;
|
||||
default_type application/json;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
return 200 '${builtins.toJSON data}';
|
||||
'';
|
||||
|
|
|
@ -12,7 +12,9 @@ let
|
|||
|
||||
usePostgresql = cfg.settings.database.name == "psycopg2";
|
||||
hasLocalPostgresDB = let args = cfg.settings.database.args; in
|
||||
usePostgresql && (!(args ? host) || (elem args.host [ "localhost" "127.0.0.1" "::1" ]));
|
||||
usePostgresql
|
||||
&& (!(args ? host) || (elem args.host [ "localhost" "127.0.0.1" "::1" ]))
|
||||
&& config.services.postgresql.enable;
|
||||
hasWorkers = cfg.workers != { };
|
||||
|
||||
listenerSupportsResource = resource: listener:
|
||||
|
@ -944,23 +946,6 @@ in {
|
|||
by synapse in `services.matrix-synapse.settings.listeners` or in one of the workers!
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = hasLocalPostgresDB -> config.services.postgresql.enable;
|
||||
message = ''
|
||||
Cannot deploy matrix-synapse with a configuration for a local postgresql database
|
||||
and a missing postgresql service. Since 20.03 it's mandatory to manually configure the
|
||||
database (please read the thread in https://github.com/NixOS/nixpkgs/pull/80447 for
|
||||
further reference).
|
||||
|
||||
If you
|
||||
- try to deploy a fresh synapse, you need to configure the database yourself. An example
|
||||
for this can be found in <nixpkgs/nixos/tests/matrix/synapse.nix>
|
||||
- update your existing matrix-synapse instance, you simply need to add `services.postgresql.enable = true`
|
||||
to your configuration.
|
||||
|
||||
For further information about this update, please read the release-notes of 20.03 carefully.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = hasWorkers -> cfg.settings.redis.enabled;
|
||||
message = ''
|
||||
|
@ -1034,9 +1019,11 @@ in {
|
|||
partOf = [ "matrix-synapse.target" ];
|
||||
wantedBy = [ "matrix-synapse.target" ];
|
||||
unitConfig.ReloadPropagatedFrom = "matrix-synapse.target";
|
||||
requires = optional hasLocalPostgresDB "postgresql.service";
|
||||
}
|
||||
else {
|
||||
after = [ "network-online.target" ] ++ optional hasLocalPostgresDB "postgresql.service";
|
||||
requires = optional hasLocalPostgresDB "postgresql.service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
baseServiceConfig = {
|
||||
|
@ -1070,7 +1057,7 @@ in {
|
|||
ProtectKernelTunables = true;
|
||||
ProtectProc = "invisible";
|
||||
ProtectSystem = "strict";
|
||||
ReadWritePaths = [ cfg.dataDir ];
|
||||
ReadWritePaths = [ cfg.dataDir cfg.settings.media_store_path ];
|
||||
RemoveIPC = true;
|
||||
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
|
||||
RestrictNamespaces = true;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue