Compare commits

..

3 Commits

Author SHA1 Message Date
Herwig Hochleitner
e680e4d440 ueforth: put derivation inputs on a single line
Co-authored-by: Ben Siraphob <bensiraphob@gmail.com>
2021-11-28 21:19:16 +01:00
Herwig Hochleitner
3ebae8161d ueforth: implement PR feedback 2021-11-26 02:56:28 +01:00
Herwig Hochleitner
bd0ee00e20 ueforth: init 2021-11-16 13:07:09 +01:00
15026 changed files with 283049 additions and 469485 deletions

View File

@@ -61,12 +61,19 @@ trim_trailing_whitespace = unset
[nixos/modules/services/networking/ircd-hybrid/*.{conf,in}]
trim_trailing_whitespace = unset
[nixos/tests/systemd-networkd-vrf.nix]
trim_trailing_whitespace = unset
[pkgs/build-support/dotnetenv/Wrapper/**]
end_of_line = unset
indent_style = unset
insert_final_newline = unset
trim_trailing_whitespace = unset
[pkgs/build-support/upstream-updater/**]
indent_style = unset
trim_trailing_whitespace = unset
[pkgs/development/compilers/elm/registry.dat]
end_of_line = unset
insert_final_newline = unset
@@ -80,3 +87,10 @@ trim_trailing_whitespace = unset
[pkgs/tools/misc/timidity/timidity.cfg]
trim_trailing_whitespace = unset
[pkgs/tools/security/enpass/data.json]
insert_final_newline = unset
trim_trailing_whitespace = unset
[pkgs/top-level/emscripten-packages.nix]
trim_trailing_whitespace = unset

View File

@@ -1,32 +0,0 @@
# This file contains a list of commits that are not likely what you
# are looking for in a blame, such as mass reformatting or renaming.
# You can set this file as a default ignore file for blame by running
# the following command.
#
# $ git config blame.ignoreRevsFile .git-blame-ignore-revs
#
# To temporarily not use this file add
# --ignore-revs-file=""
# to your blame command.
#
# The ignoreRevsFile can't be set globally due to blame failing if the file isn't present.
# To not have to set the option in every repository it is needed in,
# save the following script in your path with the name "git-bblame"
# now you can run
# $ git bblame $FILE
# to use the .git-blame-ignore-revs file if it is present.
#
# #!/usr/bin/env bash
# repo_root=$(git rev-parse --show-toplevel)
# if [[ -e $repo_root/.git-blame-ignore-revs ]]; then
# git blame --ignore-revs-file="$repo_root/.git-blame-ignore-revs" $@
# else
# git blame $@
# fi
# nixos/modules/rename: Sort alphabetically
1f71224fe86605ef4cd23ed327b3da7882dad382
# nixos: fix module paths in rename.nix
d08ede042b74b8199dc748323768227b88efcf7c

142
.github/CODEOWNERS vendored
View File

@@ -6,10 +6,6 @@
#
# For documentation on this file, see https://help.github.com/articles/about-codeowners/
# Mentioned users will get code review requests.
#
# IMPORTANT NOTE: in order to actually get pinged, commit access is required.
# This also holds true for GitHub teams. Since almost none of our teams have write
# permissions, you need to list all members of the team with commit access individually.
# This file
/.github/CODEOWNERS @edolstra
@@ -38,11 +34,10 @@
/pkgs/top-level/release-cross.nix @Ericson2314 @matthewbauer
/pkgs/stdenv/generic @Ericson2314 @matthewbauer
/pkgs/stdenv/cross @Ericson2314 @matthewbauer
/pkgs/build-support/cc-wrapper @Ericson2314
/pkgs/build-support/bintools-wrapper @Ericson2314
/pkgs/build-support/cc-wrapper @Ericson2314 @orivej
/pkgs/build-support/bintools-wrapper @Ericson2314 @orivej
/pkgs/build-support/setup-hooks @Ericson2314
/pkgs/build-support/setup-hooks/auto-patchelf.sh @layus
/pkgs/build-support/setup-hooks/auto-patchelf.py @layus
/pkgs/build-support/setup-hooks/auto-patchelf.sh @aszlig
# Nixpkgs build-support
/pkgs/build-support/writers @lassulus @Profpatsch
@@ -75,12 +70,6 @@
# NixOS integration test driver
/nixos/lib/test-driver @tfc
# Systemd
/nixos/modules/system/boot/systemd.nix @NixOS/systemd
/nixos/modules/system/boot/systemd @NixOS/systemd
/nixos/lib/systemd-*.nix @NixOS/systemd
/pkgs/os-specific/linux/systemd @NixOS/systemd
# Updaters
## update.nix
/maintainers/scripts/update.nix @jtojnar
@@ -95,7 +84,8 @@
/pkgs/development/python-modules @FRidh @jonringer
/doc/languages-frameworks/python.section.md @FRidh
/pkgs/development/tools/poetry2nix @adisbladis
/pkgs/development/interpreters/python/hooks @FRidh @jonringer
/pkgs/development/interpreters/python/hooks @FRidh @jonringer @DavHau
/pkgs/development/interpreters/python/conda @DavHau
# Haskell
/doc/languages-frameworks/haskell.section.md @cdepillabout @sternenseemann @maralorn @expipiplus1
@@ -107,13 +97,13 @@
/pkgs/top-level/haskell-packages.nix @cdepillabout @sternenseemann @maralorn @expipiplus1
# Perl
/pkgs/development/interpreters/perl @stigtsp @zakame
/pkgs/top-level/perl-packages.nix @stigtsp @zakame
/pkgs/development/perl-modules @stigtsp @zakame
/pkgs/development/interpreters/perl @volth @stigtsp @zakame
/pkgs/top-level/perl-packages.nix @volth @stigtsp @zakame
/pkgs/development/perl-modules @volth @stigtsp @zakame
# R
/pkgs/applications/science/math/R @jbedo
/pkgs/development/r-modules @jbedo
/pkgs/applications/science/math/R @jbedo @bcdarwin
/pkgs/development/r-modules @jbedo @bcdarwin
# Ruby
/pkgs/development/interpreters/ruby @marsam
@@ -121,8 +111,11 @@
# Rust
/pkgs/development/compilers/rust @Mic92 @LnL7 @zowoq
/pkgs/build-support/rust @zowoq
/doc/languages-frameworks/rust.section.md @zowoq
/pkgs/build-support/rust @andir @zowoq
# Darwin-related
/pkgs/stdenv/darwin @NixOS/darwin-maintainers
/pkgs/os-specific/darwin @NixOS/darwin-maintainers
# C compilers
/pkgs/development/compilers/gcc @matthewbauer
@@ -132,14 +125,14 @@
/pkgs/top-level/unix-tools.nix @matthewbauer
/pkgs/development/tools/xcbuild @matthewbauer
# Audio
/nixos/modules/services/audio/botamusique.nix @mweinelt
/nixos/modules/services/audio/snapserver.nix @mweinelt
/nixos/tests/modules/services/audio/botamusique.nix @mweinelt
/nixos/tests/snapcast.nix @mweinelt
# Browsers
/pkgs/applications/networking/browsers/firefox @mweinelt
# Beam-related (Erlang, Elixir, LFE, etc)
/pkgs/development/beam-modules @gleber
/pkgs/development/interpreters/erlang @gleber
/pkgs/development/interpreters/lfe @gleber
/pkgs/development/interpreters/elixir @gleber
/pkgs/development/tools/build-managers/rebar @gleber
/pkgs/development/tools/build-managers/rebar3 @gleber
/pkgs/development/tools/erlang @gleber
# Jetbrains
/pkgs/applications/editors/jetbrains @edwtjo
@@ -167,30 +160,12 @@
/nixos/tests/hardened.nix @joachifm
/pkgs/os-specific/linux/kernel/hardened-config.nix @joachifm
# Home Automation
/nixos/modules/services/misc/home-assistant.nix @mweinelt
/nixos/modules/services/misc/zigbee2mqtt.nix @mweinelt
/nixos/tests/home-assistant.nix @mweinelt
/nixos/tests/zigbee2mqtt.nix @mweinelt
/pkgs/servers/home-assistant @mweinelt
/pkgs/tools/misc/esphome @mweinelt
# Network Time Daemons
/pkgs/tools/networking/chrony @thoughtpolice
/pkgs/tools/networking/ntp @thoughtpolice
/pkgs/tools/networking/openntpd @thoughtpolice
/nixos/modules/services/networking/ntp @thoughtpolice
# Network
/pkgs/tools/networking/kea/default.nix @mweinelt
/pkgs/tools/networking/babeld/default.nix @mweinelt
/nixos/modules/services/networking/babeld.nix @mweinelt
/nixos/modules/services/networking/kea.nix @mweinelt
/nixos/modules/services/networking/knot.nix @mweinelt
/nixos/tests/babeld.nix @mweinelt
/nixos/tests/kea.nix @mweinelt
/nixos/tests/knot.nix @mweinelt
# Dhall
/pkgs/development/dhall-modules @Gabriel439 @Profpatsch @ehmry
/pkgs/development/interpreters/dhall @Gabriel439 @Profpatsch @ehmry
@@ -199,7 +174,7 @@
/pkgs/development/idris-modules @Infinisil
# Bazel
/pkgs/development/tools/build-managers/bazel @Profpatsch
/pkgs/development/tools/build-managers/bazel @mboes @Profpatsch
# NixOS modules for e-mail and dns services
/nixos/modules/services/mail/mailman.nix @peti
@@ -208,18 +183,18 @@
/nixos/modules/services/mail/rspamd.nix @peti
# Emacs
/pkgs/applications/editors/emacs/elisp-packages @adisbladis
/pkgs/applications/editors/emacs @adisbladis
/pkgs/top-level/emacs-packages.nix @adisbladis
/pkgs/applications/editors/emacs-modes @adisbladis
/pkgs/applications/editors/emacs @adisbladis
/pkgs/top-level/emacs-packages.nix @adisbladis
# Neovim
/pkgs/applications/editors/neovim @jonringer @teto
# VimPlugins
/pkgs/applications/editors/vim/plugins @jonringer
/pkgs/misc/vim-plugins @jonringer @softinio
# VsCode Extensions
/pkgs/applications/editors/vscode/extensions @jonringer
/pkgs/misc/vscode-extensions @jonringer
# Prometheus exporter modules and tests
/nixos/modules/services/monitoring/prometheus/exporters.nix @WilliButz
@@ -227,61 +202,38 @@
/nixos/tests/prometheus-exporters.nix @WilliButz
# PHP interpreter, packages, extensions, tests and documentation
/doc/languages-frameworks/php.section.md @aanderse @etu @globin @ma27 @talyz
/nixos/tests/php @aanderse @etu @globin @ma27 @talyz
/pkgs/build-support/build-pecl.nix @aanderse @etu @globin @ma27 @talyz
/pkgs/development/interpreters/php @jtojnar @aanderse @etu @globin @ma27 @talyz
/pkgs/development/php-packages @aanderse @etu @globin @ma27 @talyz
/pkgs/top-level/php-packages.nix @jtojnar @aanderse @etu @globin @ma27 @talyz
/doc/languages-frameworks/php.section.md @NixOS/php @aanderse @etu @globin @ma27 @talyz
/nixos/tests/php @NixOS/php @aanderse @etu @globin @ma27 @talyz
/pkgs/build-support/build-pecl.nix @NixOS/php @aanderse @etu @globin @ma27 @talyz
/pkgs/development/interpreters/php @jtojnar @NixOS/php @aanderse @etu @globin @ma27 @talyz
/pkgs/development/php-packages @NixOS/php @aanderse @etu @globin @ma27 @talyz
/pkgs/top-level/php-packages.nix @jtojnar @NixOS/php @aanderse @etu @globin @ma27 @talyz
# Podman, CRI-O modules and related
/nixos/modules/virtualisation/containers.nix @zowoq @adisbladis
/nixos/modules/virtualisation/cri-o.nix @zowoq @adisbladis
/nixos/modules/virtualisation/podman @zowoq @adisbladis
/nixos/tests/cri-o.nix @zowoq @adisbladis
/nixos/tests/podman @zowoq @adisbladis
/nixos/modules/virtualisation/containers.nix @NixOS/podman @zowoq
/nixos/modules/virtualisation/cri-o.nix @NixOS/podman @zowoq
/nixos/modules/virtualisation/podman.nix @NixOS/podman @zowoq
/nixos/tests/cri-o.nix @NixOS/podman @zowoq
/nixos/tests/podman.nix @NixOS/podman @zowoq
# Docker tools
/pkgs/build-support/docker @roberth
/nixos/tests/docker-tools* @roberth
/doc/builders/images/dockertools.section.md @roberth
/pkgs/build-support/docker @roberth @utdemir
/nixos/tests/docker-tools-overlay.nix @roberth
/nixos/tests/docker-tools.nix @roberth
/doc/builders/images/dockertools.xml @roberth
# Blockchains
/pkgs/applications/blockchains @mmahut @RaghavSood
# Go
/doc/languages-frameworks/go.section.md @kalbasit @Mic92 @zowoq
/pkgs/development/compilers/go @kalbasit @Mic92 @zowoq
/pkgs/development/go-modules @kalbasit @Mic92 @zowoq
/pkgs/development/go-packages @kalbasit @Mic92 @zowoq
# GNOME
/pkgs/desktops/gnome @jtojnar @hedning
/pkgs/desktops/gnome/extensions @piegamesde @jtojnar @hedning
# Cinnamon
/pkgs/desktops/cinnamon @mkg20001
# nim
/pkgs/development/compilers/nim @ehmry
/pkgs/development/nim-packages @ehmry
#nim
/pkgs/development/compilers/nim @ehmry
/pkgs/development/nim-packages @ehmry
/pkgs/top-level/nim-packages.nix @ehmry
# terraform providers
/pkgs/applications/networking/cluster/terraform-providers @zowoq
# kubernetes
/nixos/doc/manual/configuration/kubernetes.chapter.md @zowoq
/nixos/modules/services/cluster/kubernetes @zowoq
/nixos/tests/kubernetes @zowoq
/pkgs/applications/networking/cluster/kubernetes @zowoq
# Matrix
/pkgs/servers/heisenbridge @piegamesde
/pkgs/servers/matrix-conduit @piegamesde
/pkgs/servers/matrix-synapse/matrix-appservice-irc @piegamesde
/nixos/modules/services/misc/heisenbridge.nix @piegamesde
/nixos/modules/services/misc/matrix-appservice-irc.nix @piegamesde
/nixos/modules/services/misc/matrix-conduit.nix @piegamesde
/nixos/tests/matrix-appservice-irc.nix @piegamesde
/nixos/tests/matrix-conduit.nix @piegamesde

View File

@@ -38,3 +38,11 @@ Please run `nix-shell -p nix-info --run "nix-info -m"` and paste the result.
[user@system:~]$ nix-shell -p nix-info --run "nix-info -m"
output here
```
Maintainer information:
```yaml
# a list of nixpkgs attributes affected by the problem
attribute:
# a list of nixos modules affected by the problem
module:
```

View File

@@ -13,10 +13,10 @@ assignees: ''
<!-- Note that these are hard requirements -->
<!--
You can use the "Go to file" functionality on GitHub to find the package
You can use the "Go to file" functionality on github to find the package
Then you can go to the history for this package
Find the latest "package_name: old_version -> new_version" commit
The "new_version" is the current version of the package
The "new_version" is the the current version of the package
-->
- [ ] Checked the [nixpkgs master branch](https://github.com/NixOS/nixpkgs)
<!--
@@ -29,7 +29,7 @@ There's a high chance that you'll have the new version right away while helping
###### Project name
`nix search` name:
<!--
The current version can be found easily with the same process as above for checking the master branch
The current version can be found easily with the same process than above for checking the master branch
If an open PR is present for the package, take this version as the current one and link to the PR
-->
current version:

View File

@@ -1,10 +1,16 @@
###### Description of changes
<!--
For package updates please link to a changelog or describe changes, this helps your fellow maintainers discover breaking updates.
For new packages please briefly describe the package or provide a link to its homepage.
To help with the large amounts of pull requests, we would appreciate your
reviews of other pull requests, especially simple package updates. Just leave a
comment describing what you have tested in the relevant package/service.
Reviewing helps to reduce the average time-to-merge for everyone.
Thanks a lot if you do!
List of open PRs: https://github.com/NixOS/nixpkgs/pulls
Reviewing guidelines: https://nixos.org/manual/nixpkgs/unstable/#chap-reviewing-contributions
-->
###### Motivation for this change
###### Things done
<!-- Please check what applies. Note that these are not hard requirements but merely serve as information for reviewers. -->
@@ -14,7 +20,7 @@ For new packages please briefly describe the package or provide a link to its ho
- [ ] aarch64-linux
- [ ] x86_64-darwin
- [ ] aarch64-darwin
- [ ] For non-Linux: Is `sandbox = true` set in `nix.conf`? (See [Nix manual](https://nixos.org/manual/nix/stable/command-ref/conf-file.html))
- [ ] For non-Linux: Is `sandbox = true` set in `nix.conf`? (See [Nix manual](https://nixos.org/manual/nix/stable/#sec-conf-file))
- [ ] Tested, as applicable:
- [NixOS test(s)](https://nixos.org/manual/nixos/unstable/index.html#sec-nixos-tests) (look inside [nixos/tests](https://github.com/NixOS/nixpkgs/blob/master/nixos/tests))
- and/or [package tests](https://nixos.org/manual/nixpkgs/unstable/#sec-package-tests)
@@ -22,20 +28,8 @@ For new packages please briefly describe the package or provide a link to its ho
- made sure NixOS tests are [linked](https://nixos.org/manual/nixpkgs/unstable/#ssec-nixos-tests-linking) to the relevant packages
- [ ] Tested compilation of all packages that depend on this change using `nix-shell -p nixpkgs-review --run "nixpkgs-review rev HEAD"`. Note: all changes have to be committed, also see [nixpkgs-review usage](https://github.com/Mic92/nixpkgs-review#usage)
- [ ] Tested basic functionality of all binary files (usually in `./result/bin/`)
- [22.05 Release Notes (or backporting 21.11 Release notes)](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md#generating-2205-release-notes)
- [21.11 Release Notes (or backporting 21.05 Release notes)](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md#generating-2111-release-notes)
- [ ] (Package updates) Added a release notes entry if the change is major or breaking
- [ ] (Module updates) Added a release notes entry if the change is significant
- [ ] (Module addition) Added a release notes entry if adding a new NixOS module
- [ ] (Release notes changes) Ran `nixos/doc/manual/md-to-db.sh` to update generated release notes
- [ ] Fits [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
<!--
To help with the large amounts of pull requests, we would appreciate your
reviews of other pull requests, especially simple package updates. Just leave a
comment describing what you have tested in the relevant package/service.
Reviewing helps to reduce the average time-to-merge for everyone.
Thanks a lot if you do!
List of open PRs: https://github.com/NixOS/nixpkgs/pulls
Reviewing guidelines: https://nixos.org/manual/nixpkgs/unstable/#chap-reviewing-contributions
-->

10
.github/labeler.yml vendored
View File

@@ -5,6 +5,10 @@
- pkgs/development/libraries/agda/**/*
- pkgs/top-level/agda-packages.nix
"6.topic: bsd":
- pkgs/os-specific/bsd/**/*
- pkgs/stdenv/freebsd/**/*
"6.topic: cinnamon":
- pkgs/desktops/cinnamon/**/*
@@ -12,7 +16,7 @@
- nixos/modules/services/editors/emacs.nix
- nixos/modules/services/editors/emacs.xml
- nixos/tests/emacs-daemon.nix
- pkgs/applications/editors/emacs/elisp-packages/**/*
- pkgs/applications/editors/emacs-modes/**/*
- pkgs/applications/editors/emacs/**/*
- pkgs/build-support/emacs/**/*
- pkgs/top-level/emacs-packages.nix
@@ -138,9 +142,7 @@
"6.topic: vim":
- doc/languages-frameworks/vim.section.md
- pkgs/applications/editors/vim/**/*
- pkgs/applications/editors/vim/plugins/**/*
- nixos/modules/programs/neovim.nix
- pkgs/applications/editors/neovim/**/*
- pkgs/misc/vim-plugins/**/*
"6.topic: xfce":
- nixos/doc/manual/configuration/xfce.xml

View File

@@ -2,19 +2,13 @@ name: Backport
on:
pull_request_target:
types: [closed, labeled]
# WARNING:
# When extending this action, be aware that $GITHUB_TOKEN allows write access to
# the GitHub repository. This means that it should not evaluate user input in a
# way that allows code injection.
jobs:
backport:
name: Backport Pull Request
if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name))
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
# required to find all branches
fetch-depth: 0

View File

@@ -1,26 +1,20 @@
name: Basic evaluation checks
on:
workflow_dispatch
# pull_request:
# branches:
# - master
# - release-**
# push:
# branches:
# - master
# - release-**
pull_request:
branches:
- master
- release-**
push:
branches:
- master
- release-**
jobs:
tests:
runs-on: ubuntu-latest
# we don't limit this action to only NixOS repo since the checks are cheap and useful developer feedback
steps:
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v17
- uses: cachix/cachix-action@v10
with:
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
name: nixpkgs-ci
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
- uses: actions/checkout@v2
- uses: cachix/install-nix-action@v15
# explicit list of supportedSystems is needed until aarch64-darwin becomes part of the trunk jobset
- run: nix-build pkgs/top-level/release.nix -A tarball.nixpkgs-basic-release-checks --arg supportedSystems '[ "aarch64-darwin" "aarch64-linux" "x86_64-linux" "x86_64-darwin" ]'

View File

@@ -22,7 +22,7 @@ jobs:
if: steps.ismerge.outputs.ismerge != 'true'
- name: Warn if the commit was a direct push
if: steps.ismerge.outputs.ismerge != 'true'
uses: peter-evans/commit-comment@v2
uses: peter-evans/commit-comment@v1
with:
body: |
@${{ github.actor }}, you pushed a commit directly to master/release branch

View File

@@ -11,33 +11,36 @@ on:
jobs:
tests:
runs-on: ubuntu-latest
if: "github.repository_owner == 'NixOS' && !contains(github.event.pull_request.title, '[skip editorconfig]')"
if: github.repository_owner == 'NixOS'
steps:
- name: Get list of changed files from PR
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo 'PR_DIFF<<EOF' >> $GITHUB_ENV
gh api \
repos/NixOS/nixpkgs/pulls/${{github.event.number}}/files --paginate \
| jq '.[] | select(.status != "removed") | .filename' \
> "$HOME/changed_files"
- name: print list of changed files
run: |
cat "$HOME/changed_files"
- uses: actions/checkout@v3
>> $GITHUB_ENV
echo 'EOF' >> $GITHUB_ENV
- uses: actions/checkout@v2
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v17
if: env.PR_DIFF
- uses: cachix/install-nix-action@v15
if: env.PR_DIFF
with:
# nixpkgs commit is pinned so that it doesn't break
# editorconfig-checker 2.4.0
nix_path: nixpkgs=https://github.com/NixOS/nixpkgs/archive/c473cc8714710179df205b153f4e9fa007107ff9.tar.gz
nix_path: nixpkgs=https://github.com/NixOS/nixpkgs/archive/f93ecc4f6bc60414d8b73dbdf615ceb6a2c604df.tar.gz
- name: install editorconfig-checker
run: nix-env -iA editorconfig-checker -f '<nixpkgs>'
if: env.PR_DIFF
- name: Checking EditorConfig
if: env.PR_DIFF
run: |
cat "$HOME/changed_files" | xargs -r editorconfig-checker -disable-indent-size
echo "$PR_DIFF" | xargs editorconfig-checker -disable-indent-size
- if: ${{ failure() }}
run: |
echo "::error :: Hey! It looks like your changes don't follow our editorconfig settings. Read https://editorconfig.org/#download to configure your editor so you never see this error again."

View File

@@ -4,11 +4,6 @@ on:
pull_request_target:
types: [edited, opened, synchronize, reopened]
# WARNING:
# When extending this action, be aware that $GITHUB_TOKEN allows some write
# access to the GitHub API. This means that it should not evaluate user input in
# a way that allows code injection.
permissions:
contents: read
pull-requests: write
@@ -18,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
- uses: actions/labeler@v4
- uses: actions/labeler@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
sync-labels: true

View File

@@ -14,17 +14,17 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v17
- uses: cachix/install-nix-action@v15
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true
- uses: cachix/cachix-action@v10
with:
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
# This cache is for the nixos/nixpkgs manual builds and should not be trusted or used elsewhere.
name: nixpkgs-ci
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
- name: Building NixOS manual

View File

@@ -14,17 +14,17 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v17
- uses: cachix/install-nix-action@v15
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true
- uses: cachix/cachix-action@v10
with:
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
# This cache is for the nixos/nixpkgs manual builds and should not be trusted or used elsewhere.
name: nixpkgs-ci
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
- name: Building Nixpkgs manual

View File

@@ -15,11 +15,11 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v17
- uses: cachix/install-nix-action@v15
- name: Check DocBook files generated from Markdown are consistent
run: |
nixos/doc/manual/md-to-db.sh

View File

@@ -3,11 +3,6 @@ name: "set pending status"
on:
pull_request_target:
# WARNING:
# When extending this action, be aware that $GITHUB_TOKEN allows write access to
# the GitHub repository. This means that it should not evaluate user input in a
# way that allows code injection.
jobs:
action:
runs-on: ubuntu-latest

View File

@@ -32,13 +32,9 @@ jobs:
into: staging-next-21.05
- from: staging-next-21.05
into: staging-21.05
- from: release-21.11
into: staging-next-21.11
- from: staging-next-21.11
into: staging-21.11
name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
uses: devmasx/merge-branch@1.4.0
@@ -49,7 +45,7 @@ jobs:
github_token: ${{ secrets.GITHUB_TOKEN }}
- name: Comment on failure
uses: peter-evans/create-or-update-comment@v2
uses: peter-evans/create-or-update-comment@v1
if: ${{ failure() }}
with:
issue-number: 105153

View File

@@ -32,7 +32,7 @@ jobs:
into: staging
name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
uses: devmasx/merge-branch@1.4.0
@@ -43,7 +43,7 @@ jobs:
github_token: ${{ secrets.GITHUB_TOKEN }}
- name: Comment on failure
uses: peter-evans/create-or-update-comment@v2
uses: peter-evans/create-or-update-comment@v1
if: ${{ failure() }}
with:
issue-number: 105153

View File

@@ -1,47 +0,0 @@
name: "Update terraform-providers"
on:
schedule:
- cron: "14 3 * * 1"
workflow_dispatch:
jobs:
tf-providers:
if: github.repository_owner == 'NixOS' && github.ref == 'refs/heads/master' # ensure workflow_dispatch only runs on master
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v17
- name: setup
id: setup
run: |
echo ::set-output name=title::"terraform-providers: update $(date -u +"%Y-%m-%d")"
- name: update terraform-providers
run: |
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
git config user.name "github-actions[bot]"
pushd pkgs/applications/networking/cluster/terraform-providers
./update-all-providers --no-build
git commit -m "${{ steps.setup.outputs.title }}" providers.json
popd
- name: create PR
uses: peter-evans/create-pull-request@v3
with:
body: |
Automatic update of terraform providers.
Created by [update-terraform-providers](https://github.com/NixOS/nixpkgs/blob/master/.github/workflows/update-terraform-providers.yml) action.
Check that all providers build with `@ofborg build terraform-full`
branch: terraform-providers-update
delete-branch: false
labels: "2.status: work-in-progress"
title: ${{ steps.setup.outputs.title }}
token: ${{ secrets.GITHUB_TOKEN }}
- name: comment on failure
uses: peter-evans/create-or-update-comment@v2
if: ${{ failure() }}
with:
issue-number: 153416
body: |
Automatic update of terraform providers [failed](https://github.com/NixOS/nixpkgs/actions/runs/${{ github.run_id }}).

6
.gitignore vendored
View File

@@ -3,11 +3,8 @@
.*.swp
.*.swo
.idea/
.vscode/
outputs/
result
result-*
source/
/doc/NEWS.html
/doc/NEWS.txt
/doc/manual.html
@@ -24,6 +21,3 @@ __pycache__
# generated by pkgs/common-updater/update-script.nix
update-git-commits.txt
# JetBrains IDEA module declaration file
/nixpkgs.iml

View File

@@ -1 +1 @@
22.05
21.11

View File

@@ -11,16 +11,12 @@ under the terms of [COPYING](COPYING), which is an MIT-like license.
## Submitting changes
Read the ["Submitting changes"](https://nixos.org/nixpkgs/manual/#chap-submitting-changes) section of the nixpkgs manual. It explains how to write, test, and iterate on your change, and which branch to base your pull request against.
Below is a short excerpt of some points in there:
* Format the commit messages in the following way:
```
(pkg-name | nixos/<module>): (from -> to | init at version | refactor | etc)
(Motivation for change. Link to release notes. Additional information.)
(Motivation for change. Additional information.)
```
For consistency, there should not be a period at the end of the commit message's summary line (the first line of the commit message).
@@ -29,7 +25,6 @@ Below is a short excerpt of some points in there:
* nginx: init at 2.0.1
* firefox: 54.0.1 -> 55.0
https://www.mozilla.org/en-US/firefox/55.0/releasenotes/
* nixos/hydra: add bazBaz option
Dual baz behavior is needed to do foo.
@@ -45,7 +40,7 @@ Below is a short excerpt of some points in there:
* If there is no upstream license, `meta.license` should default to `lib.licenses.unfree`.
* `meta.maintainers` must be set.
See the nixpkgs manual for more details on [standard meta-attributes](https://nixos.org/nixpkgs/manual/#sec-standard-meta-attributes).
See the nixpkgs manual for more details on [standard meta-attributes](https://nixos.org/nixpkgs/manual/#sec-standard-meta-attributes) and on how to [submit changes to nixpkgs](https://nixos.org/nixpkgs/manual/#chap-submitting-changes).
## Writing good commit messages
@@ -53,55 +48,15 @@ In addition to writing properly formatted commit messages, it's important to inc
For package version upgrades and such a one-line commit message is usually sufficient.
## Rebasing between branches (i.e. from master to staging)
From time to time, changes between branches must be rebased, for example, if the
number of new rebuilds they would cause is too large for the target branch. When
rebasing, care must be taken to include only the intended changes, otherwise
many CODEOWNERS will be inadvertently requested for review. To achieve this,
rebasing should not be performed directly on the target branch, but on the merge
base between the current and target branch.
In the following example, we see a rebase from `master` onto the merge base
between `master` and `staging`, so that a change can eventually be retargeted to
`staging`. The example uses `upstream` as the remote for `NixOS/nixpkgs.git`
while the `origin` remote is used for the remote you are pushing to.
```console
# Find the common base between two branches
common=$(git merge-base upstream/master upstream/staging)
# Find the common base between your feature branch and master
commits=$(git merge-base $(git branch --show-current) upstream/master)
# Rebase all commits onto the common base
git rebase --onto=$common $commits
# Force push your changes
git push origin $(git branch --show-current) --force-with-lease
```
Then change the base branch in the GitHub PR using the *Edit* button in the upper
right corner, and switch from `master` to `staging`. After the PR has been
retargeted it might be necessary to do a final rebase onto the target branch, to
resolve any outstanding merge conflicts.
```console
# Rebase onto target branch
git rebase upstream/staging
# Review and fixup possible conflicts
git status
# Force push your changes
git push origin $(git branch --show-current) --force-with-lease
```
## Backporting changes
Follow these steps to backport a change into a release branch in compliance with the [commit policy](https://nixos.org/nixpkgs/manual/#submitting-changes-stable-release-branches).
1. Take note of the commits in which the change was introduced into `master` branch.
2. Check out the target _release branch_, e.g. `release-21.11`. Do not use a _channel branch_ like `nixos-21.11` or `nixpkgs-21.11-darwin`.
2. Check out the target _release branch_, e.g. `release-20.09`. Do not use a _channel branch_ like `nixos-20.09` or `nixpkgs-20.09`.
3. Create a branch for your change, e.g. `git checkout -b backport`.
4. When the reason to backport is not obvious from the original commit message, use `git cherry-pick -xe <original commit>` and add a reason. Otherwise use `git cherry-pick -x <original commit>`. That's fine for minor version updates that only include security and bug fixes, commits that fixes an otherwise broken package or similar. Please also ensure the commits exists on the master branch; in the case of squashed or rebased merges, the commit hash will change and the new commits can be found in the merge message at the bottom of the master pull request.
5. Push to GitHub and open a backport pull request. Make sure to select the release branch (e.g. `release-21.11`) as the target branch of the pull request, and link to the pull request in which the original change was comitted to `master`. The pull request title should be the commit title with the release version as prefix, e.g. `[21.11]`.
5. Push to GitHub and open a backport pull request. Make sure to select the release branch (e.g. `release-20.09`) as the target branch of the pull request, and link to the pull request in which the original change was comitted to `master`. The pull request title should be the commit title with the release version as prefix, e.g. `[20.09]`.
6. When the backport pull request is merged and you have the necessary privileges you can also replace the label `9.needs: port to stable` with `8.has: port to stable` on the original pull request. This way maintainers can keep track of missing backports easier.
## Criteria for Backporting changes
@@ -113,17 +68,17 @@ Anything that does not cause user or downstream dependency regressions can be ba
- Services which require a client to be up-to-date regardless. (E.g. `spotify`, `steam`, or `discord`)
- Security critical applications (E.g. `firefox`)
## Generating 22.05 Release Notes
## Generating 21.11 Release Notes
(This section also applies to backporting 21.11 release notes: substitute "rl-2205" for "rl-2111".)
(This section also applies to backporting 21.05 release notes: substitute "rl-2111" for "rl-2105".)
Documentation in nixpkgs is transitioning to a markdown-centric workflow. Release notes now require a translation step to convert from markdown to a compatible docbook document.
Steps for updating 22.05 Release notes:
Steps for updating 21.11 Release notes:
1. Edit `nixos/doc/manual/release-notes/rl-2205.section.md` with the desired changes
2. Run `./nixos/doc/manual/md-to-db.sh` to render `nixos/doc/manual/from_md/release-notes/rl-2205.section.xml`
3. Include changes to `rl-2205.section.md` and `rl-2205.section.xml` in the same commit.
1. Edit `nixos/doc/manual/release-notes/rl-2111.section.md` with the desired changes
2. Run `./nixos/doc/manual/md-to-db.sh` to render `nixos/doc/manual/from_md/release-notes/rl-2111.section.xml`
3. Include changes to `rl-2111.section.md` and `rl-2111.section.xml` in the same commit.
## Reviewing contributions

View File

@@ -1,4 +1,4 @@
Copyright (c) 2003-2022 Eelco Dolstra and the Nixpkgs/NixOS contributors
Copyright (c) 2003-2021 Eelco Dolstra and the Nixpkgs/NixOS contributors
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the

View File

@@ -1,15 +1,10 @@
<p align="center">
<a href="https://nixos.org#gh-light-mode-only">
<img src="https://raw.githubusercontent.com/NixOS/nixos-homepage/master/logo/nixos-hires.png" width="500px" alt="NixOS logo"/>
</a>
<a href="https://nixos.org#gh-dark-mode-only">
<img src="https://raw.githubusercontent.com/NixOS/nixos-artwork/master/logo/nixos-white.png" width="500px" alt="NixOS logo"/>
</a>
<a href="https://nixos.org/nixos"><img src="https://nixos.org/logo/nixos-hires.png" width="500px" alt="NixOS logo" /></a>
</p>
<p align="center">
<a href="https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md"><img src="https://img.shields.io/github/contributors-anon/NixOS/nixpkgs" alt="Contributors badge" /></a>
<a href="https://opencollective.com/nixos"><img src="https://opencollective.com/nixos/tiers/supporter/badge.svg?label=supporters&color=brightgreen" alt="Open Collective supporters" /></a>
<a href="https://www.codetriage.com/nixos/nixpkgs"><img src="https://www.codetriage.com/nixos/nixpkgs/badges/users.svg" alt="Code Triagers badge" /></a>
<a href="https://opencollective.com/nixos"><img src="https://opencollective.com/nixos/tiers/supporter/badge.svg?label=Supporter&color=brightgreen" alt="Open Collective supporters" /></a>
</p>
[Nixpkgs](https://github.com/nixos/nixpkgs) is a collection of over
@@ -51,9 +46,9 @@ Nixpkgs and NixOS are built and tested by our continuous integration
system, [Hydra](https://hydra.nixos.org/).
* [Continuous package builds for unstable/master](https://hydra.nixos.org/jobset/nixos/trunk-combined)
* [Continuous package builds for the NixOS 21.11 release](https://hydra.nixos.org/jobset/nixos/release-21.11)
* [Continuous package builds for the NixOS 21.05 release](https://hydra.nixos.org/jobset/nixos/release-21.05)
* [Tests for unstable/master](https://hydra.nixos.org/job/nixos/trunk-combined/tested#tabs-constituents)
* [Tests for the NixOS 21.11 release](https://hydra.nixos.org/job/nixos/release-21.11/tested#tabs-constituents)
* [Tests for the NixOS 21.05 release](https://hydra.nixos.org/job/nixos/release-21.05/tested#tabs-constituents)
Artifacts successfully built with Hydra are published to cache at
https://cache.nixos.org/. When successful build and test criteria are

View File

@@ -1,5 +1,6 @@
--[[
Turns a manpage reference into a link, when a mapping is defined below.
Turns a manpage reference into a link, when a mapping is defined
in the unix-man-urls.lua file.
]]
local man_urls = {

View File

@@ -6,7 +6,7 @@ When using Nix, you will frequently need to download source code and other files
Because fixed output derivations are _identified_ by their hash, a common mistake is to update a fetcher's URL or a version parameter, without updating the hash. **This will cause the old contents to be used.** So remember to always invalidate the hash argument.
For those who develop and maintain fetchers, a similar problem arises with changes to the implementation of a fetcher. These may cause a fixed output derivation to fail, but won't normally be caught by tests because the supposed output is already in the store or cache. For the purpose of testing, you can use a trick that is embodied by the [`invalidateFetcherByDrvHash`](#tester-invalidateFetcherByDrvHash) function. It uses the derivation `name` to create a unique output path per fetcher implementation, defeating the caching precisely where it would be harmful.
For those who develop and maintain fetchers, a similar problem arises with changes to the implementation of a fetcher. These may cause a fixed output derivation to fail, but won't normally be caught by tests because the supposed output is already in the store or cache. For the purpose of testing, you can use a trick that is embodied by the [`invalidateFetcherByDrvHash`](#sec-pkgs-invalidateFetcherByDrvHash) function. It uses the derivation `name` to create a unique output path per fetcher implementation, defeating the caching precisely where it would be harmful.
## `fetchurl` and `fetchzip` {#fetchurl}
@@ -40,24 +40,6 @@ Used with Git. Expects `url` to a Git repo, `rev`, and `sha256`. `rev` in this c
Additionally the following optional arguments can be given: `fetchSubmodules = true` makes `fetchgit` also fetch the submodules of a repository. If `deepClone` is set to true, the entire repository is cloned as opposing to just creating a shallow clone. `deepClone = true` also implies `leaveDotGit = true` which means that the `.git` directory of the clone won't be removed after checkout.
If only parts of the repository are needed, `sparseCheckout` can be used. This will prevent git from fetching unnecessary blobs from server, see [git sparse-checkout](https://git-scm.com/docs/git-sparse-checkout) and [git clone --filter](https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---filterltfilter-specgt) for more infomation:
```nix
{ stdenv, fetchgit }:
stdenv.mkDerivation {
name = "hello";
src = fetchgit {
url = "https://...";
sparseCheckout = ''
path/to/be/included
another/path
'';
sha256 = "0000000000000000000000000000000000000000000000000000";
};
}
```
## `fetchfossil` {#fetchfossil}
Used with Fossil. Expects `url` to a Fossil archive, `rev`, and `sha256`.
@@ -72,10 +54,6 @@ Used with Mercurial. Expects `url`, `rev`, and `sha256`.
A number of fetcher functions wrap part of `fetchurl` and `fetchzip`. They are mainly convenience functions intended for commonly used destinations of source code in Nixpkgs. These wrapper fetchers are listed below.
## `fetchFromGitea` {#fetchfromgitea}
`fetchFromGitea` expects five arguments. `domain` is the gitea server name. `owner` is a string corresponding to the Gitea user or organization that controls this repository. `repo` corresponds to the name of the software repository. These are located at the top of every Gitea HTML page as `owner`/`repo`. `rev` corresponds to the Git commit hash or tag (e.g `v1.0`) that will be downloaded from Git. Finally, `sha256` corresponds to the hash of the extracted directory. Again, other hash algorithms are also available but `sha256` is currently preferred.
## `fetchFromGitHub` {#fetchfromgithub}
`fetchFromGitHub` expects four arguments. `owner` is a string corresponding to the GitHub user or organization that controls this repository. `repo` corresponds to the name of the software repository. These are located at the top of every GitHub HTML page as `owner`/`repo`. `rev` corresponds to the Git commit hash or tag (e.g `v1.0`) that will be downloaded from Git. Finally, `sha256` corresponds to the hash of the extracted directory. Again, other hash algorithms are also available but `sha256` is currently preferred.
@@ -104,11 +82,4 @@ This is used with repo.or.cz repositories. The arguments expected are very simil
## `fetchFromSourcehut` {#fetchfromsourcehut}
This is used with sourcehut repositories. Similar to `fetchFromGitHub` above,
it expects `owner`, `repo`, `rev` and `sha256`, but don't forget the tilde (~)
in front of the username! Expected arguments also include `vc` ("git" (default)
or "hg"), `domain` and `fetchSubmodules`.
If `fetchSubmodules` is `true`, `fetchFromSourcehut` uses `fetchgit`
or `fetchhg` with `fetchSubmodules` or `fetchSubrepos` set to `true`,
respectively. Otherwise the fetcher uses `fetchzip`.
This is used with sourcehut repositories. The arguments expected are very similar to fetchFromGitHub above. Don't forget the tilde (~) in front of the user name!

View File

@@ -151,12 +151,6 @@ Create a Docker image with many of the store paths being on their own layer to i
: Shell commands to run while creating the archive for the final layer in a fakeroot environment. Unlike `extraCommands`, you can run `chown` to change the owners of the files in the archive, changing fakeroot's state instead of the real filesystem. The latter would require privileges that the build user does not have. Static binaries do not interact with the fakeroot environment. By default all files in the archive will be owned by root.
`enableFakechroot` _optional_
: Whether to run in `fakeRootCommands` in `fakechroot`, making programs behave as though `/` is the root of the image being created, while files in the Nix store are available as usual. This allows scripts that perform installation in `/` to work as expected. Considering that `fakechroot` is implemented via the same mechanism as `fakeroot`, the same caveats apply.
*Default:* `false`
### Behavior of `contents` in the final image {#dockerTools-buildLayeredImage-arg-contents}
Each path directly listed in `contents` will have a symlink in the root of the image.

View File

@@ -29,7 +29,7 @@ How to add a new (major) version of the Linux kernel to Nixpkgs:
4. If needed you can also run `make menuconfig`:
```ShellSession
$ nix-env -f "<nixpkgs>" -iA ncurses
$ nix-env -i ncurses
$ export NIX_CFLAGS_LINK=-lncurses
$ make menuconfig ARCH=arch
```

View File

@@ -56,7 +56,7 @@ Use `programs.steam.enable = true;` if you want to add steam to systemPackages a
## steam-run {#sec-steam-run}
The FHS-compatible chroot used for Steam can also be used to run other Linux games that expect a FHS environment. To use it, install the `steam-run` package and run the game with
The FHS-compatible chroot used for Steam can also be used to run other Linux games that expect a FHS environment. To use it, install the `steam-run-native` package and run the game with
```
steam-run ./foo

View File

@@ -7,4 +7,5 @@
</para>
<xi:include href="special/fhs-environments.section.xml" />
<xi:include href="special/mkshell.section.xml" />
<xi:include href="special/invalidateFetcherByDrvHash.section.xml" />
</chapter>

View File

@@ -45,5 +45,3 @@ One can create a simple environment using a `shell.nix` like that:
```
Running `nix-shell` would then drop you into a shell with these libraries and binaries available. You can use this to run closed-source applications which expect FHS structure without hassles: simply change `runScript` to the application path, e.g. `./bin/start.sh` -- relative paths are supported.
Additionally, the FHS builder links all relocated gsettings-schemas (the glib setup-hook moves them to `share/gsettings-schemas/${name}/glib-2.0/schemas`) to their standard FHS location. This means you don't need to wrap binaries with `wrapGAppsHook`.

View File

@@ -0,0 +1,31 @@
## `invalidateFetcherByDrvHash` {#sec-pkgs-invalidateFetcherByDrvHash}
Use the derivation hash to invalidate the output via name, for testing.
Type: `(a@{ name, ... } -> Derivation) -> a -> Derivation`
Normally, fixed output derivations can and should be cached by their output
hash only, but for testing we want to re-fetch everytime the fetcher changes.
Changes to the fetcher become apparent in the drvPath, which is a hash of
how to fetch, rather than a fixed store path.
By inserting this hash into the name, we can make sure to re-run the fetcher
every time the fetcher changes.
This relies on the assumption that Nix isn't clever enough to reuse its
database of local store contents to optimize fetching.
You might notice that the "salted" name derives from the normal invocation,
not the final derivation. `invalidateFetcherByDrvHash` has to invoke the fetcher
function twice: once to get a derivation hash, and again to produce the final
fixed output derivation.
Example:
tests.fetchgit = invalidateFetcherByDrvHash fetchgit {
name = "nix-source";
url = "https://github.com/NixOS/nix";
rev = "9d9dbe6ed05854e03811c361a3380e09183f4f4a";
sha256 = "sha256-7DszvbCNTjpzGRmpIVAWXk20P0/XTrWZ79KSOGLrUWY=";
};

View File

@@ -1,37 +1,17 @@
# pkgs.mkShell {#sec-pkgs-mkShell}
`pkgs.mkShell` is a specialized `stdenv.mkDerivation` that removes some
repetition when using it with `nix-shell` (or `nix develop`).
`pkgs.mkShell` is a special kind of derivation that is only useful when using
it combined with `nix-shell`. It will in fact fail to instantiate when invoked
with `nix-build`.
## Usage {#sec-pkgs-mkShell-usage}
Here is a common usage example:
```nix
{ pkgs ? import <nixpkgs> {} }:
pkgs.mkShell {
# specify which packages to add to the shell environment
packages = [ pkgs.gnumake ];
inputsFrom = [ pkgs.hello pkgs.gnutar ];
shellHook = ''
export DEBUG=1
'';
# add all the dependencies, of the given packages, to the shell environment
inputsFrom = with pkgs; [ hello gnutar ];
}
```
## Attributes
* `name` (default: `nix-shell`). Set the name of the derivation.
* `packages` (default: `[]`). Add executable packages to the `nix-shell` environment.
* `inputsFrom` (default: `[]`). Add build dependencies of the listed derivations to the `nix-shell` environment.
* `shellHook` (default: `""`). Bash statements that are executed by `nix-shell`.
... all the attributes of `stdenv.mkDerivation`.
## Building the shell
This derivation output will contain a text file that contains a reference to
all the build inputs. This is useful in CI where we want to make sure that
every derivation, and its dependencies, build properly. Or when creating a GC
root so that the build dependencies don't get garbage-collected.

View File

@@ -1,82 +0,0 @@
# Testers {#chap-testers}
This chapter describes several testing builders which are available in the <literal>testers</literal> namespace.
## `testVersion` {#tester-testVersion}
Checks the command output contains the specified version
Although simplistic, this test assures that the main program
can run. While there's no substitute for a real test case,
it does catch dynamic linking errors and such. It also provides
some protection against accidentally building the wrong version,
for example when using an 'old' hash in a fixed-output derivation.
Examples:
```nix
passthru.tests.version = testVersion { package = hello; };
passthru.tests.version = testVersion {
package = seaweedfs;
command = "weed version";
};
passthru.tests.version = testVersion {
package = key;
command = "KeY --help";
# Wrong '2.5' version in the code. Drop on next version.
version = "2.5";
};
```
## `testEqualDerivation` {#tester-testEqualDerivation}
Checks that two packages produce the exact same build instructions.
This can be used to make sure that a certain difference of configuration,
such as the presence of an overlay does not cause a cache miss.
When the derivations are equal, the return value is an empty file.
Otherwise, the build log explains the difference via `nix-diff`.
Example:
```nix
testEqualDerivation
"The hello package must stay the same when enabling checks."
hello
(hello.overrideAttrs(o: { doCheck = true; }))
```
## `invalidateFetcherByDrvHash` {#tester-invalidateFetcherByDrvHash}
Use the derivation hash to invalidate the output via name, for testing.
Type: `(a@{ name, ... } -> Derivation) -> a -> Derivation`
Normally, fixed output derivations can and should be cached by their output
hash only, but for testing we want to re-fetch everytime the fetcher changes.
Changes to the fetcher become apparent in the drvPath, which is a hash of
how to fetch, rather than a fixed store path.
By inserting this hash into the name, we can make sure to re-run the fetcher
every time the fetcher changes.
This relies on the assumption that Nix isn't clever enough to reuse its
database of local store contents to optimize fetching.
You might notice that the "salted" name derives from the normal invocation,
not the final derivation. `invalidateFetcherByDrvHash` has to invoke the fetcher
function twice: once to get a derivation hash, and again to produce the final
fixed output derivation.
Example:
```nix
tests.fetchgit = invalidateFetcherByDrvHash fetchgit {
name = "nix-source";
url = "https://github.com/NixOS/nix";
rev = "9d9dbe6ed05854e03811c361a3380e09183f4f4a";
sha256 = "sha256-7DszvbCNTjpzGRmpIVAWXk20P0/XTrWZ79KSOGLrUWY=";
};
```

View File

@@ -47,88 +47,6 @@ These functions write `text` to the Nix store. This is useful for creating scrip
Many more commands wrap `writeTextFile` including `writeText`, `writeTextDir`, `writeScript`, and `writeScriptBin`. These are convenience functions over `writeTextFile`.
Here are a few examples:
```nix
# Writes my-file to /nix/store/<store path>
writeTextFile {
name = "my-file";
text = ''
Contents of File
'';
}
# See also the `writeText` helper function below.
# Writes executable my-file to /nix/store/<store path>/bin/my-file
writeTextFile {
name = "my-file";
text = ''
Contents of File
'';
executable = true;
destination = "/bin/my-file";
}
# Writes contents of file to /nix/store/<store path>
writeText "my-file"
''
Contents of File
'';
# Writes contents of file to /nix/store/<store path>/share/my-file
writeTextDir "share/my-file"
''
Contents of File
'';
# Writes my-file to /nix/store/<store path> and makes executable
writeScript "my-file"
''
Contents of File
'';
# Writes my-file to /nix/store/<store path>/bin/my-file and makes executable.
writeScriptBin "my-file"
''
Contents of File
'';
# Writes my-file to /nix/store/<store path> and makes executable.
writeShellScript "my-file"
''
Contents of File
'';
# Writes my-file to /nix/store/<store path>/bin/my-file and makes executable.
writeShellScriptBin "my-file"
''
Contents of File
'';
```
## `concatTextFile`, `concatText`, `concatScript` {#trivial-builder-concatText}
These functions concatenate `files` to the Nix store in a single file. This is useful for configuration files structured in lines of text. `concatTextFile` takes an attribute set and expects two arguments, `name` and `files`. `name` corresponds to the name used in the Nix store path. `files` will be the files to be concatenated. You can also set `executable` to true to make this file have the executable bit set.
`concatText` and`concatScript` are simple wrappers over `concatTextFile`.
Here are a few examples:
```nix
# Writes my-file to /nix/store/<store path>
concatTextFile {
name = "my-file";
files = [ drv1 "${drv2}/path/to/file" ];
}
# See also the `concatText` helper function below.
# Writes executable my-file to /nix/store/<store path>/bin/my-file
concatTextFile {
name = "my-file";
files = [ drv1 "${drv2}/path/to/file" ];
executable = true;
destination = "/bin/my-file";
}
# Writes contents of files to /nix/store/<store path>
concatText "my-file" [ file1 file2 ]
# Writes contents of files to /nix/store/<store path>
concatScript "my-file" [ file1 file2 ]
```
## `writeShellApplication` {#trivial-builder-writeShellApplication}
This can be used to easily produce a shell script that has some dependencies (`runtimeInputs`). It automatically sets the `PATH` of the script to contain all of the listed inputs, sets some sanity shellopts (`errexit`, `nounset`, `pipefail`), and checks the resulting script with [`shellcheck`](https://github.com/koalaman/shellcheck).
@@ -154,26 +72,6 @@ validation.
## `symlinkJoin` {#trivial-builder-symlinkJoin}
This can be used to put many derivations into the same directory structure. It works by creating a new derivation and adding symlinks to each of the paths listed. It expects two arguments, `name`, and `paths`. `name` is the name used in the Nix store path for the created derivation. `paths` is a list of paths that will be symlinked. These paths can be to Nix store derivations or any other subdirectory contained within.
Here is an example:
```nix
# adds symlinks of hello and stack to current build and prints "links added"
symlinkJoin { name = "myexample"; paths = [ pkgs.hello pkgs.stack ]; postBuild = "echo links added"; }
```
This creates a derivation with a directory structure like the following:
```
/nix/store/sglsr5g079a5235hy29da3mq3hv8sjmm-myexample
|-- bin
| |-- hello -> /nix/store/qy93dp4a3rqyn2mz63fbxjg228hffwyw-hello-2.10/bin/hello
| `-- stack -> /nix/store/6lzdpxshx78281vy056lbk553ijsdr44-stack-2.1.3.1/bin/stack
`-- share
|-- bash-completion
| `-- completions
| `-- stack -> /nix/store/6lzdpxshx78281vy056lbk553ijsdr44-stack-2.1.3.1/share/bash-completion/completions/stack
|-- fish
| `-- vendor_completions.d
| `-- stack.fish -> /nix/store/6lzdpxshx78281vy056lbk553ijsdr44-stack-2.1.3.1/share/fish/vendor_completions.d/stack.fish
...
```
## `writeReferencesToFile` {#trivial-builder-writeReferencesToFile}

View File

@@ -214,17 +214,17 @@ Most of the time, these are the same. For instance, the package `e2fsprogs` has
There are a few naming guidelines:
- The `pname` attribute _should_ be identical to the upstream package name.
- The `name` attribute _should_ be identical to the upstream package name.
- The `pname` and the `version` attribute _must not_ contain uppercase letters — e.g., `"mplayer" instead of `"MPlayer"`.
- The `name` attribute _must not_ contain uppercase letters — e.g., `"mplayer-1.0rc2"` instead of `"MPlayer-1.0rc2"`.
- The `version` attribute _must_ start with a digit e.g`"0.3.1rc2".
- The version part of the `name` attribute _must_ start with a digit (following a dash) — e.g., `"hello-0.3.1rc2"`.
- If a package is not a release but a commit from a repository, then the `version` attribute _must_ be the date of that (fetched) commit. The date _must_ be in `"unstable-YYYY-MM-DD"` format.
- If a package is not a release but a commit from a repository, then the version part of the name _must_ be the date of that (fetched) commit. The date _must_ be in `"YYYY-MM-DD"` format. Also append `"unstable"` to the name - e.g., `"pkgname-unstable-2014-09-23"`.
- Dashes in the package `pname` _should_ be preserved in new variable names, rather than converted to underscores or camel cased — e.g., `http-parser` instead of `http_parser` or `httpParser`. The hyphenated style is preferred in all three package names.
- Dashes in the package name _should_ be preserved in new variable names, rather than converted to underscores or camel cased — e.g., `http-parser` instead of `http_parser` or `httpParser`. The hyphenated style is preferred in all three package names.
- If there are multiple versions of a package, this _should_ be reflected in the variable names in `all-packages.nix`, e.g. `json-c_0_9` and `json-c_0_11`. If there is an obvious “default” version, make an attribute like `json-c = json-c_0_9;`. See also [](#sec-versioning)
- If there are multiple versions of a package, this _should_ be reflected in the variable names in `all-packages.nix`, e.g. `json-c-0-9` and `json-c-0-11`. If there is an obvious “default” version, make an attribute like `json-c = json-c-0-9;`. See also [](#sec-versioning)
## File naming and organisation {#sec-organisation}
@@ -540,11 +540,10 @@ If you do need to do create this sort of patch file, one way to do so is with gi
If a patch is available online but does not cleanly apply, it can be modified in some fixed ways by using additional optional arguments for `fetchpatch`:
- `relative`: Similar to using `git-diff`'s `--relative` flag, only keep changes inside the specified directory, making paths relative to it.
- `stripLen`: Remove the first `stripLen` components of pathnames in the patch.
- `extraPrefix`: Prefix pathnames by this string.
- `excludes`: Exclude files matching these patterns (applies after the above arguments).
- `includes`: Include only files matching these patterns (applies after the above arguments).
- `excludes`: Exclude files matching this pattern.
- `includes`: Include only files matching this pattern.
- `revert`: Revert the patch.
Note that because the checksum is computed after applying these effects, using or modifying these arguments will have no effect unless the `sha256` argument is changed as well.

View File

@@ -55,7 +55,7 @@ Additionally, the following syntax extensions are currently used:
- []{#ssec-contributing-markup-inline-roles}
If you want to link to a man page, you can use `` {manpage}`nix.conf(5)` ``, which will turn into {manpage}`nix.conf(5)`.
The references will turn into links when a mapping exists in {file}`doc/build-aux/pandoc-filters/link-unix-man-references.lua`.
The references will turn into links when a mapping exists in {file}`doc/build-aux/pandoc-filters/unix-man-urls.lua`.
This syntax is taken from [MyST](https://myst-parser.readthedocs.io/en/latest/syntax/syntax.html#roles-an-in-line-extension-point). Though, the feature originates from [reStructuredText](https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html#role-manpage) with slightly different syntax.

View File

@@ -103,8 +103,7 @@ Sample template for a new package review is provided below.
- [ ] `meta.maintainers` is set
- [ ] build time only dependencies are declared in `nativeBuildInputs`
- [ ] source is fetched using the appropriate function
- [ ] the list of `phases` is not overridden
- [ ] when a phase (like `installPhase`) is overridden it starts with `runHook preInstall` and ends with `runHook postInstall`.
- [ ] phases are respected
- [ ] patches that are remotely available are fetched with `fetchpatch`
##### Possible improvements
@@ -122,10 +121,10 @@ Reviewing process:
- [CODEOWNERS](https://help.github.com/articles/about-codeowners/) will make GitHub notify users based on the submitted changes, but it can happen that it misses some of the package maintainers.
- Ensure that the module tests, if any, are succeeding.
- Ensure that the introduced options are correct.
- Type should be appropriate (string related types differs in their merging capabilities, `loaOf` and `string` types are deprecated).
- Type should be appropriate (string related types differs in their merging capabilities, `optionSet` and `string` types are deprecated).
- Description, default and example should be provided.
- Ensure that option changes are backward compatible.
- `mkRenamedOptionModuleWith` provides a way to make option changes backward compatible.
- `mkRenamedOptionModule` and `mkAliasOptionModule` functions provide way to make option changes backward compatible.
- Ensure that removed options are declared with `mkRemovedOptionModule`
- Ensure that changes that are not backward compatible are mentioned in release notes.
- Ensure that documentations affected by the change is updated.
@@ -157,7 +156,7 @@ Reviewing process:
- Ensure that the module tests, if any, are succeeding.
- Ensure that the introduced options are correct.
- Type should be appropriate (string related types differs in their merging capabilities, `loaOf` and `string` types are deprecated).
- Type should be appropriate (string related types differs in their merging capabilities, `optionSet` and `string` types are deprecated).
- Description, default and example should be provided.
- Ensure that module `meta` field is present
- Maintainers should be declared in `meta.maintainers`.

View File

@@ -43,13 +43,13 @@
- nixpkgs:
- update pkg
- `nix-env -iA pkg-attribute-name -f <path to your local nixpkgs folder>`
- `nix-env -i pkg-name -f <path to your local nixpkgs folder>`
- add pkg
- Make sure its in `pkgs/top-level/all-packages.nix`
- `nix-env -iA pkg-attribute-name -f <path to your local nixpkgs folder>`
- `nix-env -i pkg-name -f <path to your local nixpkgs folder>`
- _If you dont want to install pkg in you profile_.
- `nix-build -A pkg-attribute-name <path to your local nixpkgs folder>` and check results in the folder `result`. It will appear in the same directory where you did `nix-build`.
- If you installed your package with `nix-env`, you can run `nix-env -e pkg-name` where `pkg-name` is as reported by `nix-env -q` to uninstall it from your system.
- `nix-build -A pkg-attribute-name <path to your local nixpkgs folder>/default.nix` and check results in the folder `result`. It will appear in the same directory where you did `nix-build`.
- If you did `nix-env -i pkg-name` you can do `nix-env -e pkg-name` to uninstall it from your system.
- NixOS and its modules:
- You can add new module to your NixOS configuration file (usually its `/etc/nixos/configuration.nix`). And do `sudo nixos-rebuild test -I nixpkgs=<path to your local nixpkgs folder> --fast`.
@@ -98,7 +98,7 @@ We use jbidwatcher as an example for a discontinued project here.
1. Create a new branch for your change, e.g. `git checkout -b jbidwatcher`
1. Remove the actual package including its directory, e.g. `rm -rf pkgs/applications/misc/jbidwatcher`
1. Remove the package from the list of all packages (`pkgs/top-level/all-packages.nix`).
1. Add an alias for the package name in `pkgs/top-level/aliases.nix` (There is also `pkgs/applications/editors/vim/plugins/aliases.nix`. Package sets typically do not have aliases, so we can't add them there.)
1. Add an alias for the package name in `pkgs/top-level/aliases.nix` (There is also `pkgs/misc/vim-plugins/aliases.nix`. Package sets typically do not have aliases, so we can't add them there.)
For example in this case:
@@ -227,7 +227,7 @@ digraph {
}
```
[This GitHub Action](https://github.com/NixOS/nixpkgs/blob/master/.github/workflows/periodic-merge-6h.yml) brings changes from `master` to `staging-next` and from `staging-next` to `staging` every 6 hours.
[This GitHub Action](https://github.com/NixOS/nixpkgs/blob/master/.github/workflows/merge-staging.yml) brings changes from `master` to `staging-next` and from `staging-next` to `staging` every 6 hours.
### Master branch {#submitting-changes-master-branch}
@@ -246,21 +246,11 @@ If the branch is already in a broken state, please refrain from adding extra new
### Stable release branches {#submitting-changes-stable-release-branches}
The same staging workflow applies to stable release branches, but the main branch is called `release-*` instead of `master`.
For cherry-picking a commit to a stable release branch (“backporting”), use `git cherry-pick -x <original commit>` so that the original commit id is included in the commit.
Example branch names: `release-21.11`, `staging-21.11`, `staging-next-21.11`.
Add a reason for the backport by using `git cherry-pick -xe <original commit>` instead when it is not obvious from the original commit message. It is not needed when it's a minor version update that includes security and bug fixes but don't add new features or when the commit fixes an otherwise broken package.
Most changes added to the stable release branches are cherry-picked (“backported”) from the `master` and staging branches.
#### Automatically backporting a Pull Request {#submitting-changes-stable-release-branches-automatic-backports}
Assign label `backport <branch>` (e.g. `backport release-21.11`) to the PR and a backport PR is automatically created after the PR is merged.
#### Manually backporting changes {#submitting-changes-stable-release-branches-manual-backports}
Cherry-pick changes via `git cherry-pick -x <original commit>` so that the original commit id is included in the commit message.
Add a reason for the backport when it is not obvious from the original commit message. You can do this by cherry picking with `git cherry-pick -xe <original commit>`, which allows editing the commit message. This is not needed for minor version updates that include security and bug fixes but don't add new features or when the commit fixes an otherwise broken package.
For backporting Pull Requests to stable branches, assign label `backport <branch>` to the original Pull Requests and automation should take care of the rest once the Pull Requests is merged.
Here is an example of a cherry-picked commit message with good reason description:

View File

@@ -1474,7 +1474,7 @@ lib.attrsets.zipAttrsWith
<section xml:id="function-library-lib.attrsets.zipAttrs">
<title><function>lib.attrsets.zipAttrs</function></title>
<subtitle><literal>zipAttrs :: [ AttrSet ] -> AttrSet</literal>
<subtitle><literal>zipAttrsWith :: [ AttrSet ] -> AttrSet</literal>
</subtitle>
<xi:include href="./locations.xml" xpointer="lib.attrsets.zipAttrs" />

View File

@@ -1,10 +0,0 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="chap-hooks">
<title>Hooks reference</title>
<para>
Nixpkgs has several hook packages that augment the stdenv phases.
</para>
<xi:include href="./postgresql-test-hook.section.xml" />
</chapter>

View File

@@ -1,59 +0,0 @@
# `postgresqlTestHook` {#sec-postgresqlTestHook}
This hook starts a PostgreSQL server during the `checkPhase`. Example:
```nix
{ stdenv, postgresql, postgresqlTestHook }:
stdenv.mkDerivation {
# ...
checkInputs = [
postgresql
postgresqlTestHook
];
}
```
If you use a custom `checkPhase`, remember to add the `runHook` calls:
```nix
checkPhase ''
runHook preCheck
# ... your tests
runHook postCheck
''
```
## Variables {#sec-postgresqlTestHook-variables}
The hook logic will read a number of variables and set them to a default value if unset or empty.
Exported variables:
- `PGDATA`: location of server files.
- `PGHOST`: location of UNIX domain socket directory; the default `host` in a connection string.
- `PGUSER`: user to create / log in with, default: `test_user`.
- `PGDATABASE`: database name, default: `test_db`.
Bash-only variables:
- `postgresqlTestUserOptions`: SQL options to use when creating the `$PGUSER` role, default: `LOGIN`.
- `postgresqlTestSetupSQL`: SQL commands to run as database administrator after startup, default: statements that create `$PGUSER` and `$PGDATABASE`.
- `postgresqlTestSetupCommands`: bash commands to run after database start, defaults to running `$postgresqlTestSetupSQL` as database administrator.
- `postgresqlEnableTCP`: set to `1` to enable TCP listening. Flaky; not recommended.
- `postgresqlStartCommands`: defaults to `pg_ctl start`.
## TCP and the Nix sandbox {#sec-postgresqlTestHook-tcp}
`postgresqlEnableTCP` relies on network sandboxing, which is not available on macOS and some custom Nix installations, resulting in flaky tests.
For this reason, it is disabled by default.
The preferred solution is to make the test suite use a UNIX domain socket connection. This is the default behavior when no `host` connection parameter is provided.
Some test suites hardcode a value for `host` though, so a patch may be required. If you can upstream the patch, you can make `host` default to the `PGHOST` environment variable when set. Otherwise, you can patch it locally to omit the `host` connection string parameter altogether.
::: {.note}
The error `libpq: failed (could not receive data from server: Connection refused` is generally an indication that the test suite is trying to connect through TCP.
:::

View File

@@ -74,7 +74,7 @@ there are 3 steps, frontend dependencies (javascript), backend dependencies (eli
##### mixRelease - Frontend dependencies (javascript) {#mix-release-javascript-deps}
For phoenix projects, inside of nixpkgs you can either use yarn2nix (mkYarnModule) or node2nix. An example with yarn2nix can be found [here](https://github.com/NixOS/nixpkgs/blob/master/pkgs/servers/web-apps/plausible/default.nix#L39). An example with node2nix will follow. To package something outside of nixpkgs, you have alternatives like [npmlock2nix](https://github.com/nix-community/npmlock2nix) or [nix-npm-buildpackage](https://github.com/serokell/nix-npm-buildpackage)
for phoenix projects, inside of nixpkgs you can either use yarn2nix (mkYarnModule) or node2nix. An example with yarn2nix can be found [here](https://github.com/NixOS/nixpkgs/blob/master/pkgs/servers/web-apps/plausible/default.nix#L39). An example with node2nix will follow. To package something outside of nixpkgs, you have alternatives like [npmlock2nix](https://github.com/nix-community/npmlock2nix) or [nix-npm-buildpackage](https://github.com/serokell/nix-npm-buildpackage)
##### mixRelease - backend dependencies (mix) {#mix-release-mix-deps}
@@ -82,13 +82,13 @@ There are 2 ways to package backend dependencies. With mix2nix and with a fixed-
###### mix2nix {#mix2nix}
`mix2nix` is a cli tool available in nixpkgs. it will generate a nix expression from a mix.lock file. It is quite standard in the 2nix tool series.
mix2nix is a cli tool available in nixpkgs. it will generate a nix expression from a mix.lock file. It is quite standard in the 2nix tool series.
Note that currently mix2nix can't handle git dependencies inside the mix.lock file. If you have git dependencies, you can either add them manually (see [example](https://github.com/NixOS/nixpkgs/blob/master/pkgs/servers/pleroma/default.nix#L20)) or use the FOD method.
The advantage of using mix2nix is that nix will know your whole dependency graph. On a dependency update, this won't trigger a full rebuild and download of all the dependencies, where FOD will do so.
Practical steps:
practical steps:
- run `mix2nix > mix_deps.nix` in the upstream repo.
- pass `mixNixDeps = with pkgs; import ./mix_deps.nix { inherit lib beamPackages; };` as an argument to mixRelease.
@@ -280,30 +280,6 @@ mkShell {
}
```
### Using an overlay
If you need to use an overlay to change some attributes of a derivation, e.g. if you need a bugfix from a version that is not yet available in nixpkgs, you can override attributes such as `version` (and the corresponding `sha256`) and then use this overlay in your development environment:
#### `shell.nix`
```nix
let
elixir_1_13_1_overlay = (self: super: {
elixir_1_13 = super.elixir_1_13.override {
version = "1.13.1";
sha256 = "0z0b1w2vvw4vsnb99779c2jgn9bgslg7b1pmd9vlbv02nza9qj5p";
};
});
pkgs = import <nixpkgs> { overlays = [ elixir_1_13_1_overlay ]; };
in
with pkgs;
mkShell {
buildInputs = [
elixir_1_13
];
}
```
#### Elixir - Phoenix project {#elixir---phoenix-project}
Here is an example `shell.nix`.
@@ -315,10 +291,10 @@ let
# define packages to install
basePackages = [
git
# replace with beam.packages.erlang.elixir_1_13 if you need
# replace with beam.packages.erlang.elixir_1_11 if you need
beam.packages.erlang.elixir
nodejs
postgresql_14
postgresql_13
# only used for frontend dependencies
# you are free to use yarn2nix as well
nodePackages.node2nix
@@ -336,11 +312,10 @@ let
mkdir -p .nix-mix .nix-hex
export MIX_HOME=$PWD/.nix-mix
export HEX_HOME=$PWD/.nix-mix
# make hex from Nixpkgs available
# `mix local.hex` will install hex into MIX_HOME and should take precedence
export MIX_PATH="${beam.packages.erlang.hex}/lib/erlang/lib/hex/ebin"
export PATH=$MIX_HOME/bin:$HEX_HOME/bin:$PATH
export LANG=C.UTF-8
# TODO: not sure how to make hex available without installing it afterwards.
mix local.hex --if-missing
export LANG=en_US.UTF-8
# keep your shell history in iex
export ERL_AFLAGS="-kernel shell_history enabled"

View File

@@ -1,49 +0,0 @@
# CHICKEN {#sec-chicken}
[CHICKEN](https://call-cc.org/) is a
[R⁵RS](https://schemers.org/Documents/Standards/R5RS/HTML/)-compliant Scheme
compiler. It includes an interactive mode and a custom package format, "eggs".
## Using Eggs
Eggs described in nixpkgs are available inside the
`chickenPackages.chickenEggs` attrset. Including an egg as a build input is
done in the typical Nix fashion. For example, to include support for [SRFI
189](https://srfi.schemers.org/srfi-189/srfi-189.html) in a derivation, one
might write:
```nix
buildInputs = [
chicken
chickenPackages.chickenEggs.srfi-189
];
```
Both `chicken` and its eggs have a setup hook which configures the environment
variables `CHICKEN_INCLUDE_PATH` and `CHICKEN_REPOSITORY_PATH`.
## Updating Eggs
nixpkgs only knows about a subset of all published eggs. It uses
[egg2nix](https://github.com/the-kenny/egg2nix) to generate a
package set from a list of eggs to include.
The package set is regenerated by running the following shell commands:
```
$ nix-shell -p chickenPackages.egg2nix
$ cd pkgs/development/compilers/chicken/5/
$ egg2nix eggs.scm > eggs.nix
```
## Adding Eggs
When we run `egg2nix`, we obtain one collection of eggs with
mutually-compatible versions. This means that when we add new eggs, we may
need to update existing eggs. To keep those separate, follow the procedure for
updating eggs before including more eggs.
To include more eggs, edit `pkgs/development/compilers/chicken/5/eggs.scm`.
The first section of this file lists eggs which are required by `egg2nix`
itself; all other eggs go into the second section. After editing, follow the
procedure for updating eggs.

View File

@@ -29,8 +29,7 @@ The recommended way of defining a derivation for a Coq library, is to use the `c
* `releaseRev` (optional, defaults to `(v: v)`), provides a default mapping from release names to revision hashes/branch names/tags,
* `displayVersion` (optional), provides a way to alter the computation of `name` from `pname`, by explaining how to display version numbers,
* `namePrefix` (optional, defaults to `[ "coq" ]`), provides a way to alter the computation of `name` from `pname`, by explaining which dependencies must occur in `name`,
* `extraNativeBuildInputs` (optional), by default `nativeBuildInputs` just contains `coq`, this allows to add more native build inputs, `nativeBuildInputs` are executables and `buildInputs` are libraries and dependencies,
* `extraBuildInputs` (optional), this allows to add more build inputs,
* `extraBuildInputs` (optional), by default `buildInputs` just contains `coq`, this allows to add more build inputs,
* `mlPlugin` (optional, defaults to `false`). Some extensions (plugins) might require OCaml and sometimes other OCaml packages. Standard dependencies can be added by setting the current option to `true`. For a finer grain control, the `coq.ocamlPackages` attribute can be used in `extraBuildInputs` to depend on the same package set Coq was built against.
* `useDune2ifVersion` (optional, default to `(x: false)` uses Dune2 to build the package if the provided predicate evaluates to true on the version, e.g. `useDune2if = versions.isGe "1.1"` will use dune if the version of the package is greater or equal to `"1.1"`,
* `useDune2` (optional, defaults to `false`) uses Dune2 to build the package if set to true, the presence of this attribute overrides the behavior of the previous one.

View File

@@ -1,34 +0,0 @@
# CUDA {#cuda}
CUDA-only packages are stored in the `cudaPackages` packages set. This set
includes the `cudatoolkit`, portions of the toolkit in separate derivations,
`cudnn`, `cutensor` and `nccl`.
A package set is available for each CUDA version, so for example
`cudaPackages_11_6`. Within each set is a matching version of the above listed
packages. Additionally, other versions of the packages that are packaged and
compatible are available as well. For example, there can be a
`cudaPackages.cudnn_8_3_2` package.
To use one or more CUDA packages in an expression, give the expression a `cudaPackages` parameter, and in case CUDA is optional
```nix
cudaSupport ? false
cudaPackages ? {}
```
When using `callPackage`, you can choose to pass in a different variant, e.g.
when a different version of the toolkit suffices
```nix
mypkg = callPackage { cudaPackages = cudaPackages_11_5; }
```
If another version of say `cudnn` or `cutensor` is needed, you can override the
package set to make it the default. This guarantees you get a consistent package
set.
```nix
mypkg = let
cudaPackages = cudaPackages_11_5.overrideScope' (final: prev {
cudnn = prev.cudnn_8_3_2;
}});
in callPackage { inherit cudaPackages; };
```

View File

@@ -71,52 +71,34 @@ The `dotnetCorePackages.sdk` contains both a runtime and the full sdk of a given
To package Dotnet applications, you can use `buildDotnetModule`. This has similar arguments to `stdenv.mkDerivation`, with the following additions:
* `projectFile` has to be used for specifying the dotnet project file relative to the source root. These usually have `.sln` or `.csproj` file extensions. This can be an array of multiple projects as well.
* `nugetDeps` has to be used to specify the NuGet dependency file. Unfortunately, these cannot be deterministically fetched without a lockfile. A script to fetch these is available as `passthru.fetch-deps`. This file can also be generated manually using `nuget-to-nix` tool, which is available in nixpkgs.
* `packNupkg` is used to pack project as a `nupkg`, and installs it to `$out/share`. If set to `true`, the derivation can be used as a dependency for another dotnet project by adding it to `projectReferences`.
* `projectReferences` can be used to resolve `ProjectReference` project items. Referenced projects can be packed with `buildDotnetModule` by setting the `packNupkg = true` attribute and passing a list of derivations to `projectReferences`. Since we are sharing referenced projects as NuGets they must be added to csproj/fsproj files as `PackageReference` as well.
For example, your project has a local dependency:
```xml
<ProjectReference Include="../foo/bar.fsproj" />
```
To enable discovery through `projectReferences` you would need to add:
```xml
<ProjectReference Include="../foo/bar.fsproj" />
<PackageReference Include="bar" Version="*" Condition=" '$(ContinuousIntegrationBuild)'=='true' "/>
```
* `executables` is used to specify which executables get wrapped to `$out/bin`, relative to `$out/lib/$pname`. If this is unset, all executables generated will get installed. If you do not want to install any, set this to `[]`. This gets done in the `preFixup` phase.
* `projectFile` has to be used for specifying the dotnet project file relative to the source root. These usually have `.sln` or `.csproj` file extensions.
* `nugetDeps` has to be used to specify the NuGet dependency file. Unfortunately, these cannot be deterministically fetched without a lockfile. This file should be generated using `nuget-to-nix` tool, which is available in nixpkgs.
* `executables` is used to specify which executables get wrapped to `$out/bin`, relative to `$out/lib/$pname`. If this is unset, all executables generated will get installed. If you do not want to install any, set this to `[]`.
* `runtimeDeps` is used to wrap libraries into `LD_LIBRARY_PATH`. This is how dotnet usually handles runtime dependencies.
* `buildType` is used to change the type of build. Possible values are `Release`, `Debug`, etc. By default, this is set to `Release`.
* `dotnet-sdk` is useful in cases where you need to change what dotnet SDK is being used.
* `dotnet-runtime` is useful in cases where you need to change what dotnet runtime is being used. This can be either a regular dotnet runtime, or an aspnetcore.
* `dotnet-test-sdk` is useful in cases where unit tests expect a different dotnet SDK. By default, this is set to the `dotnet-sdk` attribute.
* `testProjectFile` is useful in cases where the regular project file does not contain the unit tests. It gets restored and build, but not installed. You may need to regenerate your nuget lockfile after setting this.
* `testProjectFile` is useful in cases where the regular project file does not contain the unit tests. By default, this is set to the `projectFile` attribute.
* `disabledTests` is used to disable running specific unit tests. This gets passed as: `dotnet test --filter "FullyQualifiedName!={}"`, to ensure compatibility with all unit test frameworks.
* `dotnetRestoreFlags` can be used to pass flags to `dotnet restore`.
* `dotnetBuildFlags` can be used to pass flags to `dotnet build`.
* `dotnetTestFlags` can be used to pass flags to `dotnet test`. Used only if `doCheck` is set to `true`.
* `dotnetTestFlags` can be used to pass flags to `dotnet test`.
* `dotnetInstallFlags` can be used to pass flags to `dotnet install`.
* `dotnetPackFlags` can be used to pass flags to `dotnet pack`. Used only if `packNupkg` is set to `true`.
* `dotnetFlags` can be used to pass flags to all of the above phases.
When packaging a new application, you need to fetch it's dependencies. You can set `nugetDeps` to an empty string to make the derivation temporarily evaluate, and then run `nix-build -A package.passthru.fetch-deps` to generate it's dependency fetching script. After running the script, you should have the location of the generated lockfile printed to the console. This can be copied to a stable directory. Note that if either `projectFile` or `nugetDeps` are unset, this script cannot be generated!
Here is an example `default.nix`, using some of the previously discussed arguments:
```nix
{ lib, buildDotnetModule, dotnetCorePackages, ffmpeg }:
let
referencedProject = import ../../bar { ... };
in buildDotnetModule rec {
buildDotnetModule rec {
pname = "someDotnetApplication";
version = "0.1";
src = ./.;
projectFile = "src/project.sln";
nugetDeps = ./deps.nix; # File generated with `nix-build -A package.passthru.fetch-deps`.
projectReferences = [ referencedProject ]; # `referencedProject` must contain `nupkg` in the folder structure.
nugetDeps = ./deps.nix; # File generated with `nuget-to-nix path/to/src > deps.nix`.
dotnet-sdk = dotnetCorePackages.sdk_3_1;
dotnet-runtime = dotnetCorePackages.net_5_0;
@@ -125,8 +107,6 @@ in buildDotnetModule rec {
executables = [ "foo" ]; # This wraps "$out/lib/$pname/foo" to `$out/bin/foo`.
executables = []; # Don't install any executables.
packNupkg = true; # This packs the project as "foo-0.1.nupkg" at `$out/share`.
runtimeDeps = [ ffmpeg ]; # This will wrap ffmpeg's library path into `LD_LIBRARY_PATH`.
}
```

View File

@@ -15,12 +15,12 @@ Modes of use of `emscripten`:
If you want to work with `emcc`, `emconfigure` and `emmake` as you are used to from Ubuntu and similar distributions you can use these commands:
* `nix-env -f "<nixpkgs>" -iA emscripten`
* `nix-env -i emscripten`
* `nix-shell -p emscripten`
* **Declarative usage**:
This mode is far more power full since this makes use of `nix` for dependency management of emscripten libraries and targets by using the `mkDerivation` which is implemented by `pkgs.emscriptenStdenv` and `pkgs.buildEmscriptenPackage`. The source for the packages is in `pkgs/top-level/emscripten-packages.nix` and the abstraction behind it in `pkgs/development/em-modules/generic/default.nix`. From the root of the nixpkgs repository:
This mode is far more power full since this makes use of `nix` for dependency management of emscripten libraries and targets by using the `mkDerivation` which is implemented by `pkgs.emscriptenStdenv` and `pkgs.buildEmscriptenPackage`. The source for the packages is in `pkgs/top-level/emscripten-packages.nix` and the abstraction behind it in `pkgs/development/em-modules/generic/default.nix`.
* build and install all packages:
* `nix-env -iA emscriptenPackages`

View File

@@ -42,21 +42,7 @@ Unlike other libraries mentioned in this section, GdkPixbuf only supports a sing
### Icons {#ssec-gnome-icons}
When an application uses icons, an icon theme should be available in `XDG_DATA_DIRS` during runtime. The package for the default, icon-less [hicolor-icon-theme](https://www.freedesktop.org/wiki/Software/icon-theme/) (should be propagated by every icon theme) contains [a setup hook](#ssec-gnome-hooks-hicolor-icon-theme) that will pick up icon themes from `buildInputs` and add their datadirs to `XDG_ICON_DIRS` environment variable (this is Nixpkgs specific, not actually a XDG standard variable). Unfortunately, relying on that would mean every user has to download the theme included in the package expression no matter their preference. For that reason, we leave the installation of icon theme on the user. If you use one of the desktop environments, you probably already have an icon theme installed.
In the rare case you need to use icons from dependencies (e.g. when an app forces an icon theme), you can use the following to pick them up:
```nix
buildInputs = [
pantheon.elementary-icon-theme
];
preFixup = ''
gappsWrapperArgs+=(
# The icon theme is hardcoded.
--prefix XDG_DATA_DIRS : "$XDG_ICON_DIRS"
)
'';
```
When an application uses icons, an icon theme should be available in `XDG_DATA_DIRS` during runtime. The package for the default, icon-less [hicolor-icon-theme](https://www.freedesktop.org/wiki/Software/icon-theme/) (should be propagated by every icon theme) contains [a setup hook](#ssec-gnome-hooks-hicolor-icon-theme) that will pick up icon themes from `buildInputs` and pass it to our wrapper. Unfortunately, relying on that would mean every user has to download the theme included in the package expression no matter their preference. For that reason, we leave the installation of icon theme on the user. If you use one of the desktop environments, you probably already have an icon theme installed.
To avoid costly file system access when locating icons, GTK, [as well as Qt](https://woboq.com/blog/qicon-reads-gtk-icon-cache-in-qt57.html), can rely on `icon-theme.cache` files from the themes' top-level directories. These files are generated using `gtk-update-icon-cache`, which is expected to be run whenever an icon is added or removed to an icon theme (typically an application icon into `hicolor` theme) and some programs do indeed run this after icon installation. However, since packages are installed into their own prefix by Nix, this would lead to conflicts. For that reason, `gtk3` provides a [setup hook](#ssec-gnome-hooks-gtk-drop-icon-theme-cache) that will clean the file from installation. Since most applications only ship their own icon that will be loaded on start-up, it should not affect them too much. On the other hand, icon themes are much larger and more widely used so we need to cache them. Because we recommend installing icon themes globally, we will generate the cache files from all packages in a profile using a NixOS module. You can enable the cache generation using `gtk.iconCache.enable` option if your desktop environment does not already do that.
@@ -106,13 +92,13 @@ For convenience, it also adds `dconf.lib` for a GIO module implementing a GSetti
- []{#ssec-gnome-hooks-glib} `glib` setup hook will populate `GSETTINGS_SCHEMAS_PATH` and then `wrapGAppsHook` will prepend it to `XDG_DATA_DIRS`.
- []{#ssec-gnome-hooks-gdk-pixbuf} `gdk-pixbuf` setup hook will populate `GDK_PIXBUF_MODULE_FILE` with the path to biggest `loaders.cache` file from the dependencies containing [GdkPixbuf loaders](#ssec-gnome-gdk-pixbuf-loaders). This works fine when there are only two packages containing loaders (`gdk-pixbuf` and e.g. `librsvg`) it will choose the second one, reasonably expecting that it will be bigger since it describes extra loader in addition to the default ones. But when there are more than two loader packages, this logic will break. One possible solution would be constructing a custom cache file for each package containing a program like `services/x11/gdk-pixbuf.nix` NixOS module does. `wrapGAppsHook` copies the `GDK_PIXBUF_MODULE_FILE` environment variable into the produced wrapper.
- []{#ssec-gnome-hooks-gdk-pixbuf} `gdk-pixbuf` setup hook will populate `GDK_PIXBUF_MODULE_FILE` with the path to biggest `loaders.cache` file from the dependencies containing [GdkPixbuf loaders](ssec-gnome-gdk-pixbuf-loaders). This works fine when there are only two packages containing loaders (`gdk-pixbuf` and e.g. `librsvg`) it will choose the second one, reasonably expecting that it will be bigger since it describes extra loader in addition to the default ones. But when there are more than two loader packages, this logic will break. One possible solution would be constructing a custom cache file for each package containing a program like `services/x11/gdk-pixbuf.nix` NixOS module does. `wrapGAppsHook` copies the `GDK_PIXBUF_MODULE_FILE` environment variable into the produced wrapper.
- []{#ssec-gnome-hooks-gtk-drop-icon-theme-cache} One of `gtk3`s setup hooks will remove `icon-theme.cache` files from packages icon theme directories to avoid conflicts. Icon theme packages should prevent this with `dontDropIconThemeCache = true;`.
- []{#ssec-gnome-hooks-dconf} `dconf.lib` is a dependency of `wrapGAppsHook`, which then also adds it to the `GIO_EXTRA_MODULES` variable.
- []{#ssec-gnome-hooks-hicolor-icon-theme} `hicolor-icon-theme`s setup hook will add icon themes to `XDG_ICON_DIRS`.
- []{#ssec-gnome-hooks-hicolor-icon-theme} `hicolor-icon-theme`s setup hook will add icon themes to `XDG_ICON_DIRS` which is prepended to `XDG_DATA_DIRS` by `wrapGAppsHook`.
- []{#ssec-gnome-hooks-gobject-introspection} `gobject-introspection` setup hook populates `GI_TYPELIB_PATH` variable with `lib/girepository-1.0` directories of dependencies, which is then added to wrapper by `wrapGAppsHook`. It also adds `share` directories of dependencies to `XDG_DATA_DIRS`, which is intended to promote GIR files but it also [pollutes the closures](https://github.com/NixOS/nixpkgs/issues/32790) of packages using `wrapGAppsHook`.

View File

@@ -12,7 +12,8 @@ The function `buildGoModule` builds Go programs managed with Go modules. It buil
In the following is an example expression using `buildGoModule`, the following arguments are of special significance to the function:
- `vendorSha256`: is the hash of the output of the intermediate fetcher derivation. `vendorSha256` can also take `null` as an input. When `null` is used as a value, rather than fetching the dependencies and vendoring them, we use the vendoring included within the source repo. If you'd like to not have to update this field on dependency changes, run `go mod vendor` in your source repo and set `vendorSha256 = null;`
- `proxyVendor`: Fetches (go mod download) and proxies the vendor directory. This is useful if your code depends on c code and go mod tidy does not include the needed sources to build or if any dependency has case-insensitive conflicts which will produce platform dependant `vendorSha256` checksums.
- `runVend`: runs the vend command to generate the vendor directory. This is useful if your code depends on c code and go mod tidy does not include the needed sources to build.
- `proxyVendor`: Fetches (go mod download) and proxies the vendor directory. This is useful if any dependency has case-insensitive conflicts which will produce platform dependant `vendorSha256` checksums.
```nix
pet = buildGoModule rec {
@@ -28,11 +29,14 @@ pet = buildGoModule rec {
vendorSha256 = "1879j77k96684wi554rkjxydrj8g3hpp0kvxz03sd8dmwr3lh83j";
runVend = true;
meta = with lib; {
description = "Simple command-line snippet manager, written in Go";
homepage = "https://github.com/knqyf263/pet";
license = licenses.mit;
maintainers = with maintainers; [ kalbasit ];
platforms = platforms.linux ++ platforms.darwin;
};
}
```
@@ -142,8 +146,4 @@ Removes the pre-existing vendor directory. This should only be used if the depen
### `subPackages` {#var-go-subPackages}
Specified as a string or list of strings. Limits the builder from building child packages that have not been listed. If `subPackages` is not specified, all child packages will be built.
### `excludedPackages` {#var-go-excludedPackages}
Specified as a string or list of strings. Causes the builder to skip building child packages that match any of the provided values. If `excludedPackages` is not specified, all child packages will be built.
Limits the builder from building child packages that have not been listed. If `subPackages` is not specified, all child packages will be built.

View File

@@ -5,7 +5,10 @@
The easiest way to get a working idris version is to install the `idris` attribute:
```ShellSession
$ nix-env -f "<nixpkgs>" -iA idris
$ # On NixOS
$ nix-env -i nixos.idris
$ # On non-NixOS
$ nix-env -i nixpkgs.idris
```
This however only provides the `prelude` and `base` libraries. To install idris with additional libraries, you can use the `idrisPackages.with-packages` function, e.g. in an overlay in `~/.config/nixpkgs/overlays/my-idris.nix`:

View File

@@ -9,10 +9,8 @@
<xi:include href="android.section.xml" />
<xi:include href="beam.section.xml" />
<xi:include href="bower.section.xml" />
<xi:include href="chicken.section.xml" />
<xi:include href="coq.section.xml" />
<xi:include href="crystal.section.xml" />
<xi:include href="cuda.section.xml" />
<xi:include href="dhall.section.xml" />
<xi:include href="dotnet.section.xml" />
<xi:include href="emscripten.section.xml" />

View File

@@ -85,7 +85,7 @@ you will still need to commit the modified version of the lock files, but at lea
each tool has an abstraction to just build the node_modules (dependencies) directory. you can always use the stdenv.mkDerivation with the node_modules to build the package (symlink the node_modules directory and then use the package build command). the node_modules abstraction can be also used to build some web framework frontends. For an example of this see how [plausible](https://github.com/NixOS/nixpkgs/blob/master/pkgs/servers/web-apps/plausible/default.nix) is built. mkYarnModules to make the derivation containing node_modules. Then when building the frontend you can just symlink the node_modules directory
## Javascript packages inside nixpkgs {#javascript-packages-nixpkgs}
## javascript packages inside nixpkgs {#javascript-packages-nixpkgs}
The `pkgs/development/node-packages` folder contains a generated collection of
[NPM packages](https://npmjs.com/) that can be installed with the Nix package
@@ -121,14 +121,12 @@ requires `node-gyp-build`, so [we override](https://github.com/NixOS/nixpkgs/blo
};
```
### Adding and Updating Javascript packages in nixpkgs
To add a package from NPM to nixpkgs:
1. Modify `pkgs/development/node-packages/node-packages.json` to add, update
or remove package entries to have it included in `nodePackages` and
`nodePackages_latest`.
2. Run the script: `./pkgs/development/node-packages/generate.sh`.
2. Run the script: `cd pkgs/development/node-packages && ./generate.sh`.
3. Build your new package to test your changes:
`cd /path/to/nixpkgs && nix-build -A nodePackages.<new-or-updated-package>`.
To build against the latest stable Current Node.js version (e.g. 14.x):
@@ -139,26 +137,6 @@ For more information about the generation process, consult the
[README.md](https://github.com/svanderburg/node2nix) file of the `node2nix`
tool.
To update NPM packages in nixpkgs, run the same `generate.sh` script:
```sh
./pkgs/development/node-packages/generate.sh
```
#### Git protocol error
Some packages may have Git dependencies from GitHub specified with `git://`.
GitHub has
[disabled unecrypted Git connections](https://github.blog/2021-09-01-improving-git-protocol-security-github/#no-more-unauthenticated-git),
so you may see the following error when running the generate script:
`The unauthenticated git protocol on port 9418 is no longer supported`.
Use the following Git configuration to resolve the issue:
```sh
git config --global url."https://github.com/".insteadOf git://github.com/
```
## Tool specific instructions {#javascript-tool-specific}
### node2nix {#javascript-node2nix}

View File

@@ -128,7 +128,7 @@ Let's present the luarocks way first and the manual one in a second time.
### Packaging a library on luarocks {#packaging-a-library-on-luarocks}
[Luarocks.org](https://luarocks.org/) is the main repository of lua packages.
[Luarocks.org](www.luarocks.org) is the main repository of lua packages.
The site proposes two types of packages, the rockspec and the src.rock
(equivalent of a [rockspec](https://github.com/luarocks/luarocks/wiki/Rockspec-format) but with the source).
These packages can have different build types such as `cmake`, `builtin` etc .

View File

@@ -1,49 +1,19 @@
# OCaml {#sec-language-ocaml}
## User guide {#sec-language-ocaml-user-guide}
OCaml libraries are available in attribute sets of the form `ocaml-ng.ocamlPackages_X_XX` where X is to be replaced with the desired compiler version. For example, ocamlgraph compiled with OCaml 4.12 can be found in `ocaml-ng.ocamlPackages_4_12.ocamlgraph`. The compiler itself is also located in this set, under the name `ocaml`.
If you don't care about the exact compiler version, `ocamlPackages` is a top-level alias pointing to a recent version of OCaml.
OCaml applications are usually available top-level, and not inside `ocamlPackages`. Notable exceptions are build tools that must be built with the same compiler version as the compiler you intend to use like `dune` or `ocaml-lsp`.
To open a shell able to build a typical OCaml project, put the dependencies in `buildInputs` and add `ocamlPackages.ocaml` and `ocamlPackages.findlib` to `nativeBuildInputs` at least.
For example:
```nix
let
pkgs = import <nixpkgs> {};
# choose the ocaml version you want to use
ocamlPackages = pkgs.ocaml-ng.ocamlPackages_4_12;
in
pkgs.mkShell {
# build tools
nativeBuildInputs = with ocamlPackages; [ ocaml findlib dune_2 ocaml-lsp ];
# dependencies
buildInputs = with ocamlPackages; [ ocamlgraph ];
}
```
## Packaging guide {#sec-language-ocaml-packaging}
OCaml libraries should be installed in `$(out)/lib/ocaml/${ocaml.version}/site-lib/`. Such directories are automatically added to the `$OCAMLPATH` environment variable when building another package that depends on them or when opening a `nix-shell`.
Given that most of the OCaml ecosystem is now built with dune, nixpkgs includes a convenience build support function called `buildDunePackage` that will build an OCaml package using dune, OCaml and findlib and any additional dependencies provided as `buildInputs` or `propagatedBuildInputs`.
Here is a simple package example.
- It defines an (optional) attribute `minimalOCamlVersion` (see note below)
that will be used to throw a descriptive evaluation error if building with
an older OCaml is attempted.
- It defines an (optional) attribute `minimalOCamlVersion` that will be used to
throw a descriptive evaluation error if building with an older OCaml is
attempted.
- It uses the `fetchFromGitHub` fetcher to get its source.
- `duneVersion = "2"` ensures that Dune version 2 is used for the
build (this is the default; valid values are `"1"`, `"2"`, and `"3"`);
note that there is also a legacy `useDune2` boolean attribute:
set to `false` it corresponds to `duneVersion = "1"`; set to `true` it
corresponds to `duneVersion = "2"`. If both arguments (`duneVersion` and
`useDune2`) are given, the second one (`useDune2`) is silently ignored.
- `useDune2 = true` ensures that the latest version of Dune is used for the
build (this may become the default value in a future release).
- It sets the optional `doCheck` attribute such that tests will be run with
`dune runtest -p angstrom` after the build (`dune build -p angstrom`) is
@@ -71,7 +41,7 @@ Here is a simple package example.
buildDunePackage rec {
pname = "angstrom";
version = "0.15.0";
duneVersion = "2";
useDune2 = true;
minimalOCamlVersion = "4.04";
@@ -121,11 +91,3 @@ buildDunePackage rec {
};
}
```
Note about `minimalOCamlVersion`. A deprecated version of this argument was
spelled `minimumOCamlVersion`; setting the old attribute wrongly modifies the
derivation hash and is therefore inappropriate. As a technical dept, currently
packaged libraries may still use the old spelling: maintainers are invited to
fix this when updating packages. Massive renaming is strongly discouraged as it
would be challenging to review, difficult to test, and will cause unnecessary
rebuild.

View File

@@ -24,10 +24,18 @@ You can test building an Octave package as follows:
$ nix-build -A octavePackages.symbolic
```
To install it into your user profile, run this command from the root of the repository:
When building Octave packages with `nix-build`, the `buildOctavePackage` function adds `octave-octaveVersion` to; the start of the package's name attribute.
This can be required when installing the package using `nix-env`:
```ShellSession
$ nix-env -f. -iA octavePackages.symbolic
$ nix-env -i octave-6.2.0-symbolic
```
Although, you can also install it using the attribute name:
```ShellSession
$ nix-env -i -A octavePackages.symbolic
```
You can build Octave with packages by using the `withPackages` passed-through function.

View File

@@ -58,7 +58,13 @@ in `all-packages.nix`. You can test building a Perl package as follows:
$ nix-build -A perlPackages.ClassC3
```
To install it with `nix-env` instead: `nix-env -f. -iA perlPackages.ClassC3`.
`buildPerlPackage` adds `perl-` to the start of the name attribute, so the package above is actually called `perl-Class-C3-0.21`. So to install it, you can say:
```ShellSession
$ nix-env -i perl-Class-C3
```
(Of course you can also install using the attribute name: `nix-env -i -A perlPackages.ClassC3`.)
So what does `buildPerlPackage` do? It does the following:
@@ -129,11 +135,9 @@ This will remove the `-I` flags from the shebang line, rewrite them in the `use
Nix expressions for Perl packages can be generated (almost) automatically from CPAN. This is done by the program `nix-generate-from-cpan`, which can be installed as follows:
```ShellSession
$ nix-env -f "<nixpkgs>" -iA nix-generate-from-cpan
$ nix-env -i nix-generate-from-cpan
```
Substitute `<nixpkgs>` by the path of a nixpkgs clone to use the latest version.
This program takes a Perl module name, looks it up on CPAN, fetches and unpacks the corresponding package, and prints a Nix expression on standard output. For example:
```ShellSession

View File

@@ -764,7 +764,7 @@ and in this case the `python38` interpreter is automatically used.
### Interpreters {#interpreters}
Versions 2.7, 3.7, 3.8 and 3.9 of the CPython interpreter are available as
Versions 2.7, 3.6, 3.7, 3.8 and 3.9 of the CPython interpreter are available as
respectively `python27`, `python37`, `python38` and `python39`. The
aliases `python2` and `python3` correspond to respectively `python27` and
`python39`. The attribute `python` maps to `python2`. The PyPy interpreters
@@ -834,7 +834,6 @@ sets are
* `pkgs.python38Packages`
* `pkgs.python39Packages`
* `pkgs.python310Packages`
* `pkgs.python311Packages`
* `pkgs.pypyPackages`
and the aliases
@@ -979,31 +978,6 @@ with import <nixpkgs> {};
in python.withPackages(ps: [ps.blaze])).env
```
#### Optional extra dependencies
Some packages define optional dependencies for additional features. With
`setuptools` this is called `extras_require` and `flit` calls it `extras-require`. A
method for supporting this is by declaring the extras of a package in its
`passthru`, e.g. in case of the package `dask`
```nix
passthru.extras-require = {
complete = [ distributed ];
};
```
and letting the package requiring the extra add the list to its dependencies
```nix
propagatedBuildInputs = [
...
] ++ dask.extras-require.complete;
```
Note this method is preferred over adding parameters to builders, as that can
result in packages depending on different variants and thereby causing
collisions.
#### `buildPythonApplication` function {#buildpythonapplication-function}
The `buildPythonApplication` function is practically the same as
@@ -1021,18 +995,18 @@ called with `callPackage` and passed `python` or `pythonPackages` (possibly
specifying an interpreter version), like this:
```nix
{ lib, python3 }:
{ lib, python3Packages }:
python3.pkgs.buildPythonApplication rec {
python3Packages.buildPythonApplication rec {
pname = "luigi";
version = "2.7.9";
src = python3.pkgs.fetchPypi {
src = python3Packages.fetchPypi {
inherit pname version;
sha256 = "035w8gqql36zlan0xjrzz9j4lh9hs0qrsgnbyw07qs7lnkvbdv9x";
};
propagatedBuildInputs = with python3.pkgs; [ tornado python-daemon ];
propagatedBuildInputs = with python3Packages; [ tornado_4 python-daemon ];
meta = with lib; {
...
@@ -1658,25 +1632,3 @@ would be:
```ShellSession
$ maintainers/scripts/update-python-libraries --target minor --commit --use-pkgs-prefix pkgs/development/python-modules/**/default.nix
```
## CPython Update Schedule
With [PEP 602](https://www.python.org/dev/peps/pep-0602/), CPython now
follows a yearly release cadence. In nixpkgs, all supported interpreters
are made available, but only the most recent two
interpreters package sets are built; this is a compromise between being
the latest interpreter, and what the majority of the Python packages support.
New CPython interpreters are released in October. Generally, it takes some
time for the majority of active Python projects to support the latest stable
interpreter. To help ease the migration for Nixpkgs users
between Python interpreters the schedule below will be used:
| When | Event |
| --- | --- |
| After YY.11 Release | Bump CPython package set window. The latest and previous latest stable should now be built. |
| After YY.05 Release | Bump default CPython interpreter to latest stable. |
In practice, this means that the Python community will have had a stable interpreter
for ~2 months before attempting to update the package set. And this will
allow for ~7 months for Python applications to support the latest interpreter.

View File

@@ -201,19 +201,6 @@ $ nix-shell --run 'ruby -rpg -e "puts PG.library_version"'
Of course for this use-case one could also use overlays since the configuration for `pg` depends on the `postgresql` alias, but for demonstration purposes this has to suffice.
### Platform-specific gems
Right now, bundix has some issues with pre-built, platform-specific gems: [bundix PR #68](https://github.com/nix-community/bundix/pull/68).
Until this is solved, you can tell bundler to not use platform-specific gems and instead build them from source each time:
- globally (will be set in `~/.config/.bundle/config`):
```shell
$ bundle config set force_ruby_platform true
```
- locally (will be set in `<project-root>/.bundle/config`):
```shell
$ bundle config set --local force_ruby_platform true
```
### Adding a gem to the default gemset {#adding-a-gem-to-the-default-gemset}
Now that you know how to get a working Ruby environment with Nix, it's time to go forward and start actually developing with Ruby. We will first have a look at how Ruby gems are packaged on Nix. Then, we will look at how you can use development mode with your code.

View File

@@ -293,7 +293,7 @@ Test flags, e.g., `--package foo`, can be passed to `cargo test` via the
Another attribute, called `checkFlags`, is used to pass arguments to the test
binary itself, as stated
[here](https://doc.rust-lang.org/cargo/commands/cargo-test.html).
(here)[https://doc.rust-lang.org/cargo/commands/cargo-test.html].
#### Tests relying on the structure of the `target/` directory {#tests-relying-on-the-structure-of-the-target-directory}
@@ -464,8 +464,6 @@ you of the correct hash.
be disabled by setting `dontUseCargoParallelTests`.
* `cargoInstallHook`: install binaries and static/shared libraries
that were built using `cargoBuildHook`.
* `bindgenHook`: for crates which use `bindgen` as a build dependency, lets
`bindgen` find `libclang` and `libclang` find the libraries in `buildInputs`.
### Examples {#examples}

View File

@@ -6,7 +6,7 @@ Since release 15.09 there is a new TeX Live packaging that lives entirely under
- For basic usage just pull `texlive.combined.scheme-basic` for an environment with basic LaTeX support.
- It typically won't work to use separately installed packages together. Instead, you can build a custom set of packages like this. Most CTAN packages should be available:
- It typically won't work to use separately installed packages together. Instead, you can build a custom set of packages like this:
```nix
texlive.combine {

View File

@@ -18,7 +18,7 @@ Adding custom .vimrc lines can be done using the following code:
```nix
vim_configurable.customize {
# `name` optionally specifies the name of the executable and package
# `name` specifies the name of the executable and package
name = "vim-with-plugins";
vimrcConfig.customRC = ''
@@ -28,9 +28,6 @@ vim_configurable.customize {
```
This configuration is used when Vim is invoked with the command specified as name, in this case `vim-with-plugins`.
You can also omit `name` to customize Vim itself. See the
[definition of `vimUtils.makeCustomizable`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/vim-utils.nix#L408)
for all supported options.
For Neovim the `configure` argument can be overridden to achieve the same:
@@ -289,7 +286,7 @@ Sample output1:
"reload" = buildVimPluginFrom2Nix { # created by nix#NixDerivation
name = "reload";
src = fetchgit {
url = "https://github.com/xolox/vim-reload";
url = "git://github.com/xolox/vim-reload";
rev = "0a601a668727f5b675cb1ddc19f6861f3f7ab9e1";
sha256 = "0vb832l9yxj919f5hfg6qj6bn9ni57gnjd3bj7zpq7d4iv2s4wdh";
};
@@ -312,9 +309,9 @@ Sample output2:
## Adding new plugins to nixpkgs {#adding-new-plugins-to-nixpkgs}
Nix expressions for Vim plugins are stored in [pkgs/applications/editors/vim/plugins](https://github.com/NixOS/nixpkgs/tree/master/pkgs/applications/editors/vim/plugins). For the vast majority of plugins, Nix expressions are automatically generated by running [`./update.py`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/update.py). This creates a [generated.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/generated.nix) file based on the plugins listed in [vim-plugin-names](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/vim-plugin-names). Plugins are listed in alphabetical order in `vim-plugin-names` using the format `[github username]/[repository]@[gitref]`. For example https://github.com/scrooloose/nerdtree becomes `scrooloose/nerdtree`.
Nix expressions for Vim plugins are stored in [pkgs/misc/vim-plugins](/pkgs/misc/vim-plugins). For the vast majority of plugins, Nix expressions are automatically generated by running [`./update.py`](/pkgs/misc/vim-plugins/update.py). This creates a [generated.nix](/pkgs/misc/vim-plugins/generated.nix) file based on the plugins listed in [vim-plugin-names](/pkgs/misc/vim-plugins/vim-plugin-names). Plugins are listed in alphabetical order in `vim-plugin-names` using the format `[github username]/[repository]@[gitref]`. For example https://github.com/scrooloose/nerdtree becomes `scrooloose/nerdtree`.
Some plugins require overrides in order to function properly. Overrides are placed in [overrides.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/overrides.nix). Overrides are most often required when a plugin requires some dependencies, or extra steps are required during the build process. For example `deoplete-fish` requires both `deoplete-nvim` and `vim-fish`, and so the following override was added:
Some plugins require overrides in order to function properly. Overrides are placed in [overrides.nix](/pkgs/misc/vim-plugins/overrides.nix). Overrides are most often required when a plugin requires some dependencies, or extra steps are required during the build process. For example `deoplete-fish` requires both `deoplete-nvim` and `vim-fish`, and so the following override was added:
```nix
deoplete-fish = super.deoplete-fish.overrideAttrs(old: {
@@ -333,13 +330,13 @@ Finally, there are some plugins that are also packaged in nodePackages because t
Run the update script with a GitHub API token that has at least `public_repo` access. Running the script without the token is likely to result in rate-limiting (429 errors). For steps on creating an API token, please refer to [GitHub's token documentation](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/creating-a-personal-access-token).
```sh
GITHUB_API_TOKEN=my_token ./pkgs/applications/editors/vim/plugins/update.py
GITHUB_API_TOKEN=my_token ./pkgs/misc/vim-plugins/update.py
```
Alternatively, set the number of processes to a lower count to avoid rate-limiting.
```sh
./pkgs/applications/editors/vim/plugins/update.py --proc 1
./pkgs/misc/vim-plugins/update.py --proc 1
```
## Important repositories {#important-repositories}

View File

@@ -25,10 +25,8 @@
<title>Builders</title>
<xi:include href="builders/fetchers.chapter.xml" />
<xi:include href="builders/trivial-builders.chapter.xml" />
<xi:include href="builders/testers.chapter.xml" />
<xi:include href="builders/special.xml" />
<xi:include href="builders/images.xml" />
<xi:include href="hooks/index.xml" />
<xi:include href="languages-frameworks/index.xml" />
<xi:include href="builders/packages/index.xml" />
</part>

View File

@@ -78,46 +78,21 @@ If both the dependency and depending packages aren't compilers or other machine-
Finally, if the depending package is a compiler or other machine-code-producing tool, it might need dependencies that run at "emit time". This is for compilers that (regrettably) insist on being built together with their source languages' standard libraries. Assuming build != host != target, a run-time dependency of the standard library cannot be run at the compiler's build time or run time, but only at the run time of code emitted by the compiler.
Putting this all together, that means that we have dependency types of the form "X→ E", which means that the dependency executes on X and emits code for E; each of X and E can be `build`, `host`, or `target`, and E can be `*` to indicate that the dependency is not a compiler-like package.
Dependency types describe the relationships that a package has with each of its transitive dependencies. You could think of attaching one or more dependency types to each of the formal parameters at the top of a package's `.nix` file, as well as to all of *their* formal parameters, and so on. Triples like `(foo, bar, baz)`, on the other hand, are a property of an instantiated derivation -- you could would attach a triple `(mips-linux, mips-linux, sparc-solaris)` to a `.drv` file in `/nix/store`.
Only nine dependency types matter in practice:
Putting this all together, that means we have dependencies in the form "host → target", in at most the following six combinations:
#### Possible dependency types {#possible-dependency-types}
| Dependency type | Dependencys host platform | Dependencys target platform |
|-----------------|----------------------------|------------------------------|
| build → * | build | (none) |
| build → build | build | build |
| build → host | build | host |
| build → target | build | target |
| host → * | host | (none) |
| host → host | host | host |
| host → target | host | target |
| target → * | target | (none) |
| target → target | target | target |
| Dependencys host platform | Dependencys target platform |
|----------------------------|------------------------------|
| build | build |
| build | host |
| build | target |
| host | host |
| host | target |
| target | target |
Let's use `g++` as an example to make this table clearer. `g++` is a C++ compiler written in C. Suppose we are building `g++` with a `(build, host, target)` platform triple of `(foo, bar, baz)`. This means we are using a `foo`-machine to build a copy of `g++` which will run on a `bar`-machine and emit binaries for the `baz`-machine.
* `g++` links against the host platform's `glibc` C library, which is a "host→ *" dependency with a triple of `(bar, bar, *)`. Since it is a library, not a compiler, it has no "target".
* Since `g++` is written in C, the `gcc` compiler used to compile it is a "build→ host" dependency of `g++` with a triple of `(foo, foo, bar)`. This compiler runs on the build platform and emits code for the host platform.
* `gcc` links against the build platform's `glibc` C library, which is a "build→ *" dependency with a triple of `(foo, foo, *)`. Since it is a library, not a compiler, it has no "target".
* This `gcc` is itself compiled by an *earlier* copy of `gcc`. This earlier copy of `gcc` is a "build→ build" dependency of `g++` with a triple of `(foo, foo, foo)`. This "early `gcc`" runs on the build platform and emits code for the build platform.
* `g++` is bundled with `libgcc`, which includes a collection of target-machine routines for exception handling and
software floating point emulation. `libgcc` would be a "target→ *" dependency with triple `(foo, baz, *)`, because it consists of machine code which gets linked against the output of the compiler that we are building. It is a library, not a compiler, so it has no target of its own.
* `libgcc` is written in C and compiled with `gcc`. The `gcc` that compiles it will be a "build→ target" dependency with triple `(foo, foo, baz)`. It gets compiled *and run* at `g++`-build-time (on platform `foo`), but must emit code for the `baz`-platform.
* `g++` allows inline assembler code, so it depends on access to a copy of the `gas` assembler. This would be a "host→ target" dependency with triple `(foo, bar, baz)`.
* `g++` (and `gcc`) include a library `libgccjit.so`, which wrap the compiler in a library to create a just-in-time compiler. In nixpkgs, this library is in the `libgccjit` package; if C++ required that programs have access to a JIT, `g++` would need to add a "target→ target" dependency for `libgccjit` with triple `(foo, baz, baz)`. This would ensure that the compiler ships with a copy of `libgccjit` which both executes on and generates code for the `baz`-platform.
* If `g++` itself linked against `libgccjit.so` (for example, to allow compile-time-evaluated C++ expressions), then the `libgccjit` package used to provide this functionality would be a "host→ host" dependency of `g++`: it is code which runs on the `host` and emits code for execution on the `host`.
Some examples will make this table clearer. Suppose there's some package that is being built with a `(build, host, target)` platform triple of `(foo, bar, baz)`. If it has a build-time library dependency, that would be a "host build" dependency with a triple of `(foo, foo, *)` (the target platform is irrelevant). If it needs a compiler to be built, that would be a "build → host" dependency with a triple of `(foo, foo, *)` (the target platform is irrelevant). That compiler, would be built with another compiler, also "build → host" dependency, with a triple of `(foo, foo, foo)`.
### Cross packaging cookbook {#ssec-cross-cookbook}

View File

@@ -192,6 +192,10 @@ meta.hydraPlatforms = [];
If set to `true`, the package is marked as "broken", meaning that it wont show up in `nix-env -qa`, and cannot be built or installed. Such packages should be removed from Nixpkgs eventually unless they are fixed.
### `updateWalker` {#var-meta-updateWalker}
If set to `true`, the package is tested to be updated correctly by the `update-walker.sh` script without additional settings. Such packages have `meta.version` set and their homepage (or the page specified by `meta.downloadPage`) contains a direct link to the package tarball.
## Licenses {#sec-meta-license}
The `meta.license` attribute should preferably contain a value from `lib.licenses` defined in [`nixpkgs/lib/licenses.nix`](https://github.com/NixOS/nixpkgs/blob/master/lib/licenses.nix), or in-place license description of the same format if the license is unlikely to be useful in another expression.

View File

@@ -125,7 +125,7 @@ The extension of `PATH` with dependencies, alluded to above, proceeds according
A dependency is said to be **propagated** when some of its other-transitive (non-immediate) downstream dependencies also need it as an immediate dependency.
[^footnote-stdenv-propagated-dependencies]
It is important to note that dependencies are not necessarily propagated as the same sort of dependency that they were before, but rather as the corresponding sort so that the platform rules still line up. To determine the exact rules for dependency propagation, we start by assigning to each dependency a couple of ternary numbers (`-1` for `build`, `0` for `host`, and `1` for `target`) representing its [dependency type](#possible-dependency-types), which captures how its host and target platforms are each "offset" from the depending derivations host and target platforms. The following table summarize the different combinations that can be obtained:
It is important to note that dependencies are not necessarily propagated as the same sort of dependency that they were before, but rather as the corresponding sort so that the platform rules still line up. To determine the exact rules for dependency propagation, we start by assigning to each dependency a couple of ternary numbers (`-1` for `build`, `0` for `host`, and `1` for `target`), representing how respectively its host and target platforms are "offset" from the depending derivations platforms. The following table summarize the different combinations that can be obtained:
| `host → target` | attribute name | offset |
| ------------------- | ------------------- | -------- |
@@ -319,16 +319,10 @@ For information about how to run the updates, execute `nix-shell maintainers/scr
## Phases {#sec-stdenv-phases}
`stdenv.mkDerivation` sets the Nix [derivation](https://nixos.org/manual/nix/stable/expressions/derivations.html#derivations)'s builder to a script that loads the stdenv `setup.sh` bash library and calls `genericBuild`. Most packaging functions rely on this default builder.
This generic command invokes a number of *phases*. Package builds are split into phases to make it easier to override specific parts of the build (e.g., unpacking the sources or installing the binaries).
The generic builder has a number of *phases*. Package builds are split into phases to make it easier to override specific parts of the build (e.g., unpacking the sources or installing the binaries). Furthermore, it allows a nicer presentation of build logs in the Nix build farm.
Each phase can be overridden in its entirety either by setting the environment variable `namePhase` to a string containing some shell commands to be executed, or by redefining the shell function `namePhase`. The former is convenient to override a phase from the derivation, while the latter is convenient from a build script. However, typically one only wants to *add* some commands to a phase, e.g. by defining `postInstall` or `preFixup`, as skipping some of the default actions may have unexpected consequences. The default script for each phase is defined in the file `pkgs/stdenv/generic/setup.sh`.
When overriding a phase, for example `installPhase`, it is important to start with `runHook preInstall` and end it with `runHook postInstall`, otherwise `preInstall` and `postInstall` will not be run. Even if you don't use them directly, it is good practice to do so anyways for downstream users who would want to add a `postInstall` by overriding your derivation.
While inside an interactive `nix-shell`, if you wanted to run all phases in the order they would be run in an actual build, you can invoke `genericBuild` yourself.
### Controlling phases {#ssec-controlling-phases}
There are a number of variables that control what phases are executed and in what order:
@@ -339,8 +333,7 @@ There are a number of variables that control what phases are executed and in wha
Specifies the phases. You can change the order in which phases are executed, or add new phases, by setting this variable. If its not set, the default value is used, which is `$prePhases unpackPhase patchPhase $preConfigurePhases configurePhase $preBuildPhases buildPhase checkPhase $preInstallPhases installPhase fixupPhase installCheckPhase $preDistPhases distPhase $postPhases`.
It is discouraged to set this variable, as it is easy to miss some important functionality hidden in some of the less obviously needed phases (like `fixupPhase` which patches the shebang of scripts).
Usually, if you just want to add a few phases, its more convenient to set one of the variables below (such as `preInstallPhases`).
Usually, if you just want to add a few phases, its more convenient to set one of the variables below (such as `preInstallPhases`), as you then dont specify all the normal phases.
##### `prePhases` {#var-stdenv-prePhases}
@@ -797,13 +790,13 @@ Hook executed at the start of the distribution phase.
Hook executed at the end of the distribution phase.
## Shell functions and utilities {#ssec-stdenv-functions}
## Shell functions {#ssec-stdenv-functions}
The standard environment provides a number of useful functions.
### `makeWrapper` \<executable\> \<wrapperfile\> \<args\> {#fun-makeWrapper}
Constructs a wrapper for a program with various possible arguments. It is defined as part of 2 setup-hooks named `makeWrapper` and `makeBinaryWrapper` that implement the same bash functions. Hence, to use it you have to add `makeWrapper` to your `nativeBuildInputs`. Here's an example usage:
Constructs a wrapper for a program with various possible arguments. For example:
```bash
# adds `FOOBAR=baz` to `$out/bin/foo`s environment
@@ -815,24 +808,9 @@ makeWrapper $out/bin/foo $wrapperfile --set FOOBAR baz
makeWrapper $out/bin/foo $wrapperfile --prefix PATH : ${lib.makeBinPath [ hello git ]}
```
Theres many more kinds of arguments, they are documented in `nixpkgs/pkgs/build-support/setup-hooks/make-wrapper.sh` for the `makeWrapper` implementation and in `nixpkgs/pkgs/build-support/setup-hooks/make-binary-wrapper.sh` for the `makeBinaryWrapper` implementation.
Theres many more kinds of arguments, they are documented in `nixpkgs/pkgs/build-support/setup-hooks/make-wrapper.sh`.
`wrapProgram` is a convenience function you probably want to use most of the time, implemented by both `makeWrapper` and `makeBinaryWrapper`.
Using the `makeBinaryWrapper` implementation is usually preferred, as it creates a tiny _compiled_ wrapper executable, that can be used as a shebang interpreter. This is needed mostly on Darwin, where shebangs cannot point to scripts, [due to a limitation with the `execve`-syscall](https://stackoverflow.com/questions/67100831/macos-shebang-with-absolute-path-not-working). Compiled wrappers generated by `makeBinaryWrapper` can be inspected with `less <path-to-wrapper>` - by scrolling past the binary data you should be able to see the shell command that generated the executable and there see the environment variables that were injected into the wrapper.
### `remove-references-to -t` \<storepath\> [ `-t` \<storepath\> ... ] \<file\> ... {#fun-remove-references-to}
Removes the references of the specified files to the specified store files. This is done without changing the size of the file by replacing the hash by `eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee`, and should work on compiled executables. This is meant to be used to remove the dependency of the output on inputs that are known to be unnecessary at runtime. Of course, reckless usage will break the patched programs.
To use this, add `removeReferencesTo` to `nativeBuildInputs`.
As `remove-references-to` is an actual executable and not a shell function, it can be used with `find`.
Example removing all references to the compiler in the output:
```nix
postInstall = ''
find "$out" -type f -exec remove-references-to -t ${stdenv.cc} '{}' +
'';
```
`wrapProgram` is a convenience function you probably want to use most of the time.
### `substitute` \<infile\> \<outfile\> \<subs\> {#fun-substitute}
@@ -907,9 +885,9 @@ someVar=$(stripHash $name)
### `wrapProgram` \<executable\> \<makeWrapperArgs\> {#fun-wrapProgram}
Convenience function for `makeWrapper` that replaces `<\executable\>` with a wrapper that executes the original program. It takes all the same arguments as `makeWrapper`, except for `--inherit-argv0` (used by the `makeBinaryWrapper` implementation) and `--argv0` (used by both `makeWrapper` and `makeBinaryWrapper` wrapper implementations).
Convenience function for `makeWrapper` that automatically creates a sane wrapper file. It takes all the same arguments as `makeWrapper`, except for `--argv0`.
If you will apply it multiple times, it will overwrite the wrapper file and you will end up with double wrapping, which should be avoided.
It cannot be applied multiple times, since it will overwrite the wrapper file.
## Package setup hooks {#ssec-setup-hooks}
@@ -1043,7 +1021,7 @@ You can also specify a `runtimeDependencies` variable which lists dependencies t
In certain situations you may want to run the main command (`autoPatchelf`) of the setup hook on a file or a set of directories instead of unconditionally patching all outputs. This can be done by setting the `dontAutoPatchelf` environment variable to a non-empty value.
By default `autoPatchelf` will fail as soon as any ELF file requires a dependency which cannot be resolved via the given build inputs. In some situations you might prefer to just leave missing dependencies unpatched and continue to patch the rest. This can be achieved by setting the `autoPatchelfIgnoreMissingDeps` environment variable to a non-empty value. `autoPatchelfIgnoreMissingDeps` can be set to a list like `autoPatchelfIgnoreMissingDeps = [ "libcuda.so.1" "libcudart.so.1" ];` or to simply `[ "*" ]` to ignore all missing dependencies.
By default `autoPatchelf` will fail as soon as any ELF file requires a dependency which cannot be resolved via the given build inputs. In some situations you might prefer to just leave missing dependencies unpatched and continue to patch the rest. This can be achieved by setting the `autoPatchelfIgnoreMissingDeps` environment variable to a non-empty value.
The `autoPatchelf` command also recognizes a `--no-recurse` command line flag, which prevents it from recursing into subdirectories.

View File

@@ -77,7 +77,7 @@ In Nixpkgs, we have multiple implementations of the BLAS/LAPACK numerical linear
The Nixpkgs attribute is `openblas` for ILP64 (integer width = 64 bits) and `openblasCompat` for LP64 (integer width = 32 bits). `openblasCompat` is the default.
- [LAPACK reference](http://www.netlib.org/lapack/) (also provides BLAS and CBLAS)
- [LAPACK reference](http://www.netlib.org/lapack/) (also provides BLAS)
The Nixpkgs attribute is `lapack-reference`.
@@ -117,23 +117,7 @@ $ LD_LIBRARY_PATH=$(nix-build -A mkl)/lib${LD_LIBRARY_PATH:+:}$LD_LIBRARY_PATH n
Intel MKL requires an `openmp` implementation when running with multiple processors. By default, `mkl` will use Intel's `iomp` implementation if no other is specified, but this is a runtime-only dependency and binary compatible with the LLVM implementation. To use that one instead, Intel recommends users set it with `LD_PRELOAD`. Note that `mkl` is only available on `x86_64-linux` and `x86_64-darwin`. Moreover, Hydra is not building and distributing pre-compiled binaries using it.
To override `blas` and `lapack` with its reference implementations (i.e. for development purposes), one can use the following overlay:
```nix
self: super:
{
blas = super.blas.override {
blasProvider = self.lapack-reference;
};
lapack = super.lapack.override {
lapackProvider = self.lapack-reference;
};
}
```
For BLAS/LAPACK switching to work correctly, all packages must depend on `blas` or `lapack`. This ensures that only one BLAS/LAPACK library is used at one time. There are two versions of BLAS/LAPACK currently in the wild, `LP64` (integer size = 32 bits) and `ILP64` (integer size = 64 bits). The attributes `blas` and `lapack` are `LP64` by default. Their `ILP64` version are provided through the attributes `blas-ilp64` and `lapack-ilp64`. Some software needs special flags or patches to work with `ILP64`. You can check if `ILP64` is used in Nixpkgs with `blas.isILP64` and `lapack.isILP64`. Some software does NOT work with `ILP64`, and derivations need to specify an assertion to prevent this. You can prevent `ILP64` from being used with the following:
For BLAS/LAPACK switching to work correctly, all packages must depend on `blas` or `lapack`. This ensures that only one BLAS/LAPACK library is used at one time. There are two versions of BLAS/LAPACK currently in the wild, `LP64` (integer size = 32 bits) and `ILP64` (integer size = 64 bits). Some software needs special flags or patches to work with `ILP64`. You can check if `ILP64` is used in Nixpkgs with `blas.isILP64` and `lapack.isILP64`. Some software does NOT work with `ILP64`, and derivations need to specify an assertion to prevent this. You can prevent `ILP64` from being used with the following:
```nix
{ stdenv, blas, lapack, ... }:

View File

@@ -18,16 +18,58 @@
in
{
lib = lib.extend (final: prev: {
nixos = import ./nixos/lib { lib = final; };
nixosSystem = args:
nixosSystem = { modules, ... } @ args:
import ./nixos/lib/eval-config.nix (args // {
modules = args.modules ++ [ {
system.nixos.versionSuffix =
".${final.substring 0 8 (self.lastModifiedDate or self.lastModified or "19700101")}.${self.shortRev or "dirty"}";
system.nixos.revision = final.mkIf (self ? rev) self.rev;
} ];
modules =
let
vmConfig = (import ./nixos/lib/eval-config.nix
(args // {
modules = modules ++ [ ./nixos/modules/virtualisation/qemu-vm.nix ];
})).config;
vmWithBootLoaderConfig = (import ./nixos/lib/eval-config.nix
(args // {
modules = modules ++ [
./nixos/modules/virtualisation/qemu-vm.nix
{ virtualisation.useBootLoader = true; }
({ config, ... }: {
virtualisation.useEFIBoot =
config.boot.loader.systemd-boot.enable ||
config.boot.loader.efi.canTouchEfiVariables;
})
];
})).config;
moduleDeclarationFile =
let
# Even though `modules` is a mandatory argument for `nixosSystem`, it doesn't
# mean that the evaluator always keeps track of its position. If there
# are too many levels of indirection, the position gets lost at some point.
intermediatePos = builtins.unsafeGetAttrPos "modules" args;
in
if intermediatePos == null then null else intermediatePos.file;
# Add the invoking file as error message location for modules
# that don't have their own locations; presumably inline modules.
addModuleDeclarationFile =
m: if moduleDeclarationFile == null then m else {
_file = moduleDeclarationFile;
imports = [ m ];
};
in
map addModuleDeclarationFile modules ++ [
{
system.nixos.versionSuffix =
".${final.substring 0 8 (self.lastModifiedDate or self.lastModified or "19700101")}.${self.shortRev or "dirty"}";
system.nixos.revision = final.mkIf (self ? rev) self.rev;
system.build = {
vm = vmConfig.system.build.vm;
vmWithBootLoader = vmWithBootLoaderConfig.system.build.vm;
};
}
];
});
});

View File

@@ -2,33 +2,35 @@
rec {
/* Throw if pred is false, else return pred.
/* Print a trace message if pred is false.
Intended to be used to augment asserts with helpful error messages.
Example:
assertMsg false "nope"
stderr> error: nope
=> false
stderr> trace: nope
assert assertMsg ("foo" == "bar") "foo is not bar, silly"; ""
stderr> error: foo is not bar, silly
assert (assertMsg ("foo" == "bar") "foo is not bar, silly"); ""
stderr> trace: foo is not bar, silly
stderr> assert failed at
Type:
assertMsg :: Bool -> String -> Bool
*/
# TODO(Profpatsch): add tests that check stderr
assertMsg = pred: msg:
pred || builtins.throw msg;
if pred
then true
else builtins.trace msg false;
/* Specialized `assertMsg` for checking if val is one of the elements
of a list. Useful for checking enums.
Example:
let sslLibrary = "libressl";
let sslLibrary = "libressl"
in assertOneOf "sslLibrary" sslLibrary [ "openssl" "bearssl" ]
stderr> error: sslLibrary must be one of [
stderr> "openssl"
stderr> "bearssl"
stderr> ], but is: "libressl"
=> false
stderr> trace: sslLibrary must be one of "openssl", "bearssl", but is: "libressl"
Type:
assertOneOf :: String -> ComparableVal -> List ComparableVal -> Bool

View File

@@ -3,9 +3,9 @@
let
inherit (builtins) head tail length;
inherit (lib.trivial) id;
inherit (lib.strings) concatStringsSep concatMapStringsSep escapeNixIdentifier sanitizeDerivationName;
inherit (lib.lists) foldr foldl' concatMap concatLists elemAt all partition groupBy take foldl;
inherit (lib.trivial) and;
inherit (lib.strings) concatStringsSep sanitizeDerivationName;
inherit (lib.lists) foldr foldl' concatMap concatLists elemAt;
in
rec {
@@ -73,108 +73,11 @@ rec {
getAttrFromPath ["z" "z"] x
=> error: cannot find attribute `z.z'
*/
getAttrFromPath = attrPath:
getAttrFromPath = attrPath: set:
let errorMsg = "cannot find attribute `" + concatStringsSep "." attrPath + "'";
in attrByPath attrPath (abort errorMsg);
in attrByPath attrPath (abort errorMsg) set;
/* Update or set specific paths of an attribute set.
Takes a list of updates to apply and an attribute set to apply them to,
and returns the attribute set with the updates applied. Updates are
represented as { path = ...; update = ...; } values, where `path` is a
list of strings representing the attribute path that should be updated,
and `update` is a function that takes the old value at that attribute path
as an argument and returns the new
value it should be.
Properties:
- Updates to deeper attribute paths are applied before updates to more
shallow attribute paths
- Multiple updates to the same attribute path are applied in the order
they appear in the update list
- If any but the last `path` element leads into a value that is not an
attribute set, an error is thrown
- If there is an update for an attribute path that doesn't exist,
accessing the argument in the update function causes an error, but
intermediate attribute sets are implicitly created as needed
Example:
updateManyAttrsByPath [
{
path = [ "a" "b" ];
update = old: { d = old.c; };
}
{
path = [ "a" "b" "c" ];
update = old: old + 1;
}
{
path = [ "x" "y" ];
update = old: "xy";
}
] { a.b.c = 0; }
=> { a = { b = { d = 1; }; }; x = { y = "xy"; }; }
*/
updateManyAttrsByPath = let
# When recursing into attributes, instead of updating the `path` of each
# update using `tail`, which needs to allocate an entirely new list,
# we just pass a prefix length to use and make sure to only look at the
# path without the prefix length, so that we can reuse the original list
# entries.
go = prefixLength: hasValue: value: updates:
let
# Splits updates into ones on this level (split.right)
# And ones on levels further down (split.wrong)
split = partition (el: length el.path == prefixLength) updates;
# Groups updates on further down levels into the attributes they modify
nested = groupBy (el: elemAt el.path prefixLength) split.wrong;
# Applies only nested modification to the input value
withNestedMods =
# Return the value directly if we don't have any nested modifications
if split.wrong == [] then
if hasValue then value
else
# Throw an error if there is no value. This `head` call here is
# safe, but only in this branch since `go` could only be called
# with `hasValue == false` for nested updates, in which case
# it's also always called with at least one update
let updatePath = (head split.right).path; in
throw
( "updateManyAttrsByPath: Path '${showAttrPath updatePath}' does "
+ "not exist in the given value, but the first update to this "
+ "path tries to access the existing value.")
else
# If there are nested modifications, try to apply them to the value
if ! hasValue then
# But if we don't have a value, just use an empty attribute set
# as the value, but simplify the code a bit
mapAttrs (name: go (prefixLength + 1) false null) nested
else if isAttrs value then
# If we do have a value and it's an attribute set, override it
# with the nested modifications
value //
mapAttrs (name: go (prefixLength + 1) (value ? ${name}) value.${name}) nested
else
# However if it's not an attribute set, we can't apply the nested
# modifications, throw an error
let updatePath = (head split.wrong).path; in
throw
( "updateManyAttrsByPath: Path '${showAttrPath updatePath}' needs to "
+ "be updated, but path '${showAttrPath (take prefixLength updatePath)}' "
+ "of the given value is not an attribute set, so we can't "
+ "update an attribute inside of it.");
# We get the final result by applying all the updates on this level
# after having applied all the nested updates
# We use foldl instead of foldl' so that in case of multiple updates,
# intermediate values aren't evaluated if not needed
in foldl (acc: el: el.update acc) withNestedMods split.right;
in updates: value: go 0 true value updates;
/* Return the specified attributes from a set.
Example:
@@ -251,12 +154,12 @@ rec {
foldAttrs (n: a: [n] ++ a) [] [{ a = 2; } { a = 3; }]
=> { a = [ 2 3 ]; }
*/
foldAttrs = op: nul:
foldAttrs = op: nul: list_of_attrs:
foldr (n: a:
foldr (name: o:
o // { ${name} = op n.${name} (a.${name} or nul); }
) a (attrNames n)
) {};
) {} list_of_attrs;
/* Recursively collect sets that verify a given predicate named `pred'
@@ -373,7 +276,7 @@ rec {
/* Like `mapAttrsRecursive', but it takes an additional predicate
function that tells it whether to recurse into an attribute
function that tells it whether to recursive into an attribute
set. If it returns false, `mapAttrsRecursiveCond' does not
recurse, but does apply the map function. If it returns true, it
does recurse, and does not apply the map function.
@@ -392,14 +295,14 @@ rec {
*/
mapAttrsRecursiveCond = cond: f: set:
let
recurse = path:
recurse = path: set:
let
g =
name: value:
if isAttrs value && cond value
then recurse (path ++ [name]) value
else f (path ++ [name]) value;
in mapAttrs g;
in mapAttrs g set;
in recurse [] set;
@@ -424,7 +327,7 @@ rec {
isDerivation "foobar"
=> false
*/
isDerivation = x: x.type or null == "derivation";
isDerivation = x: isAttrs x && x ? type && x.type == "derivation";
/* Converts a store path to a fake derivation. */
toDerivation = path:
@@ -466,7 +369,7 @@ rec {
value = f name (catAttrs name sets);
}) names);
/* Implementation note: Common names appear multiple times in the list of
/* Implementation note: Common names appear multiple times in the list of
names, hopefully this does not affect the system because the maximal
laziness avoid computing twice the same expression and listToAttrs does
not care about duplicated attribute names.
@@ -475,8 +378,7 @@ rec {
zipAttrsWith (name: values: values) [{a = "x";} {a = "y"; b = "z";}]
=> { a = ["x" "y"]; b = ["z"] }
*/
zipAttrsWith =
builtins.zipAttrsWith or (f: sets: zipAttrsWithNames (concatMap attrNames sets) f sets);
zipAttrsWith = f: sets: zipAttrsWithNames (concatMap attrNames sets) f sets;
/* Like `zipAttrsWith' with `(name: values: values)' as the function.
Example:
@@ -517,8 +419,8 @@ rec {
let f = attrPath:
zipAttrsWith (n: values:
let here = attrPath ++ [n]; in
if length values == 1
|| pred here (elemAt values 1) (head values) then
if tail values == []
|| pred here (head (tail values)) (head values) then
head values
else
f here values
@@ -544,7 +446,10 @@ rec {
}
*/
recursiveUpdate = recursiveUpdateUntil (path: lhs: rhs: !(isAttrs lhs && isAttrs rhs));
recursiveUpdate = lhs: rhs:
recursiveUpdateUntil (path: lhs: rhs:
!(isAttrs lhs && isAttrs rhs)
) lhs rhs;
/* Returns true if the pattern is contained in the set. False otherwise.
@@ -553,8 +458,8 @@ rec {
=> true
*/
matchAttrs = pattern: attrs: assert isAttrs pattern;
all id (attrValues (zipAttrsWithNames (attrNames pattern) (n: values:
let pat = head values; val = elemAt values 1; in
foldr and true (attrValues (zipAttrsWithNames (attrNames pattern) (n: values:
let pat = head values; val = head (tail values); in
if length values == 1 then false
else if isAttrs pat then isAttrs val && matchAttrs pat val
else pat == val
@@ -574,20 +479,6 @@ rec {
overrideExisting = old: new:
mapAttrs (name: value: new.${name} or value) old;
/* Turns a list of strings into a human-readable description of those
strings represented as an attribute path. The result of this function is
not intended to be machine-readable.
Example:
showAttrPath [ "foo" "10" "bar" ]
=> "foo.\"10\".bar"
showAttrPath []
=> "<root attribute path>"
*/
showAttrPath = path:
if path == [] then "<root attribute path>"
else concatMapStringsSep "." escapeNixIdentifier path;
/* Get a package output.
If no output is found, fallback to `.out` and then to the default.

View File

@@ -117,55 +117,8 @@ rec {
callPackageWith = autoArgs: fn: args:
let
f = if lib.isFunction fn then fn else import fn;
fargs = lib.functionArgs f;
# All arguments that will be passed to the function
# This includes automatic ones and ones passed explicitly
allArgs = builtins.intersectAttrs fargs autoArgs // args;
# A list of argument names that the function requires, but
# wouldn't be passed to it
missingArgs = lib.attrNames
# Filter out arguments that have a default value
(lib.filterAttrs (name: value: ! value)
# Filter out arguments that would be passed
(removeAttrs fargs (lib.attrNames allArgs)));
# Get a list of suggested argument names for a given missing one
getSuggestions = arg: lib.pipe (autoArgs // args) [
lib.attrNames
# Only use ones that are at most 2 edits away. While mork would work,
# levenshteinAtMost is only fast for 2 or less.
(lib.filter (lib.strings.levenshteinAtMost 2 arg))
# Put strings with shorter distance first
(lib.sort (x: y: lib.strings.levenshtein x arg < lib.strings.levenshtein y arg))
# Only take the first couple results
(lib.take 3)
# Quote all entries
(map (x: "\"" + x + "\""))
];
prettySuggestions = suggestions:
if suggestions == [] then ""
else if lib.length suggestions == 1 then ", did you mean ${lib.elemAt suggestions 0}?"
else ", did you mean ${lib.concatStringsSep ", " (lib.init suggestions)} or ${lib.last suggestions}?";
errorForArg = arg:
let
loc = builtins.unsafeGetAttrPos arg fargs;
# loc' can be removed once lib/minver.nix is >2.3.4, since that includes
# https://github.com/NixOS/nix/pull/3468 which makes loc be non-null
loc' = if loc != null then loc.file + ":" + toString loc.line
else if ! lib.isFunction fn then
toString fn + lib.optionalString (lib.sources.pathIsDirectory fn) "/default.nix"
else "<unknown location>";
in "Function called without required argument \"${arg}\" at "
+ "${loc'}${prettySuggestions (getSuggestions arg)}";
# Only show the error for the first missing argument
error = errorForArg (lib.head missingArgs);
in if missingArgs == [] then makeOverridable f allArgs else throw error;
auto = builtins.intersectAttrs (lib.functionArgs f) autoArgs;
in makeOverridable f (auto // args);
/* Like callPackage, but for a function that returns an attribute

View File

@@ -66,10 +66,8 @@ let
stringLength sub substring tail trace;
inherit (self.trivial) id const pipe concat or and bitAnd bitOr bitXor
bitNot boolToString mergeAttrs flip mapNullable inNixShell isFloat min max
importJSON importTOML warn warnIf warnIfNot throwIf throwIfNot checkListOfEnum
info showWarnings nixpkgsVersion version isInOldestRelease
mod compare splitByAndCompare
functionArgs setFunctionArgs isFunction toFunction
importJSON importTOML warn warnIf info showWarnings nixpkgsVersion version
mod compare splitByAndCompare functionArgs setFunctionArgs isFunction
toHexString toBaseDigits;
inherit (self.fixedPoints) fix fix' converge extends composeExtensions
composeManyExtensions makeExtensible makeExtensibleWithCustomName;
@@ -79,10 +77,9 @@ let
mapAttrs' mapAttrsToList mapAttrsRecursive mapAttrsRecursiveCond
genAttrs isDerivation toDerivation optionalAttrs
zipAttrsWithNames zipAttrsWith zipAttrs recursiveUpdateUntil
recursiveUpdate matchAttrs overrideExisting showAttrPath getOutput getBin
recursiveUpdate matchAttrs overrideExisting getOutput getBin
getLib getDev getMan chooseDevOutputs zipWithNames zip
recurseIntoAttrs dontRecurseIntoAttrs cartesianProductOfSets
updateManyAttrsByPath;
recurseIntoAttrs dontRecurseIntoAttrs cartesianProductOfSets;
inherit (self.lists) singleton forEach foldr fold foldl foldl' imap0 imap1
concatMap flatten remove findSingle findFirst any all count
optional optionals toList range partition zipListsWith zipLists
@@ -108,28 +105,26 @@ let
makeScope makeScopeWithSplicing;
inherit (self.meta) addMetaAttrs dontDistribute setName updateName
appendToName mapDerivationAttrset setPrio lowPrio lowPrioSet hiPrio
hiPrioSet getLicenseFromSpdxId;
hiPrioSet;
inherit (self.sources) pathType pathIsDirectory cleanSourceFilter
cleanSource sourceByRegex sourceFilesBySuffices
commitIdFromGitRepo cleanSourceWith pathHasContext
canCleanSource pathIsRegularFile pathIsGitRepo;
inherit (self.modules) evalModules setDefaultModuleLocation
unifyModuleSyntax applyModuleArgsIfFunction mergeModules
inherit (self.modules) evalModules unifyModuleSyntax
applyIfFunction mergeModules
mergeModules' mergeOptionDecls evalOptionValue mergeDefinitions
pushDownProperties dischargeProperties filterOverrides
sortProperties fixupOptionType mkIf mkAssert mkMerge mkOverride
mkOptionDefault mkDefault mkImageMediaOverride mkForce mkVMOverride
mkFixStrictness mkOrder mkBefore mkAfter mkAliasDefinitions
mkAliasAndWrapDefinitions fixMergeModules mkRemovedOptionModule
mkRenamedOptionModule mkRenamedOptionModuleWith
mkMergedOptionModule mkChangedOptionModule
mkRenamedOptionModule mkMergedOptionModule mkChangedOptionModule
mkAliasOptionModule mkDerivedConfig doRename;
inherit (self.options) isOption mkEnableOption mkSinkUndeclaredOptions
mergeDefaultOption mergeOneOption mergeEqualOption mergeUniqueOption
getValues getFiles
optionAttrSetToDocList optionAttrSetToDocList'
mergeDefaultOption mergeOneOption mergeEqualOption getValues
getFiles optionAttrSetToDocList optionAttrSetToDocList'
scrubOptionValue literalExpression literalExample literalDocBook
showOption showFiles unknownModule mkOption mkPackageOption;
showOption showFiles unknownModule mkOption;
inherit (self.types) isType setType defaultTypeMerge defaultFunctor
isOptionType mkOptionType;
inherit (self.asserts)

View File

@@ -10,7 +10,7 @@
* are mostly generators themselves, called with
* their respective default values; they can be reused.
*
* Tests can be found in ./tests/misc.nix
* Tests can be found in ./tests.nix
* Documentation in the manual, #sec-generators
*/
{ lib }:
@@ -108,7 +108,7 @@ rec {
* The mk* configuration attributes can generically change
* the way sections and key-value strings are generated.
*
* For more examples see the test cases in ./tests/misc.nix.
* For more examples see the test cases in ./tests.nix.
*/
toINI = {
# apply transformations (e.g. escapes) to section names
@@ -130,51 +130,6 @@ rec {
# map input to ini sections
mapAttrsToStringsSep "\n" mkSection attrsOfAttrs;
/* Generate an INI-style config file from an attrset
* specifying the global section (no header), and an
* attrset of sections to an attrset of key-value pairs.
*
* generators.toINIWithGlobalSection {} {
* globalSection = {
* someGlobalKey = "hi";
* };
* sections = {
* foo = { hi = "${pkgs.hello}"; ciao = "bar"; };
* baz = { "also, integers" = 42; };
* }
*
*> someGlobalKey=hi
*>
*> [baz]
*> also, integers=42
*>
*> [foo]
*> ciao=bar
*> hi=/nix/store/y93qql1p5ggfnaqjjqhxcw0vqw95rlz0-hello-2.10
*
* The mk* configuration attributes can generically change
* the way sections and key-value strings are generated.
*
* For more examples see the test cases in ./tests/misc.nix.
*
* If you dont need a global section, you can also use
* `generators.toINI` directly, which only takes
* the part in `sections`.
*/
toINIWithGlobalSection = {
# apply transformations (e.g. escapes) to section names
mkSectionName ? (name: libStr.escape [ "[" "]" ] name),
# format a setting line from key and value
mkKeyValue ? mkKeyValueDefault {} "=",
# allow lists as values for duplicate keys
listsAsDuplicateKeys ? false
}: { globalSection, sections }:
( if globalSection == {}
then ""
else (toKeyValue { inherit mkKeyValue listsAsDuplicateKeys; } globalSection)
+ "\n")
+ (toINI { inherit mkSectionName mkKeyValue listsAsDuplicateKeys; } sections);
/* Generate a git-config file from an attrset.
*
* It has two major differences from the regular INI format:

View File

@@ -67,11 +67,6 @@ in mkLicense lset) ({
free = false;
};
aom = {
fullName = "Alliance for Open Media Patent License 1.0";
url = "https://aomedia.org/license/patent-license/";
};
apsl20 = {
spdxId = "APSL-2.0";
fullName = "Apple Public Source License 2.0";
@@ -389,11 +384,6 @@ in mkLicense lset) ({
free = false;
};
generaluser = {
fullName = "GeneralUser GS License v2.0";
url = "http://www.schristiancollins.com/generaluser.php"; # license included in sources
};
gpl1Only = {
spdxId = "GPL-1.0-only";
fullName = "GNU General Public License v1.0 only";
@@ -470,11 +460,6 @@ in mkLicense lset) ({
spdxId = "imagemagick";
};
imlib2 = {
spdxId = "Imlib2";
fullName = "Imlib2 License";
};
inria-compcert = {
fullName = "INRIA Non-Commercial License Agreement for the CompCert verified compiler";
url = "https://compcert.org/doc/LICENSE.txt";
@@ -601,21 +586,6 @@ in mkLicense lset) ({
spdxId = "MIT";
fullName = "MIT License";
};
# https://spdx.org/licenses/MIT-feh.html
mit-feh = {
spdxId = "MIT-feh";
fullName = "feh License";
};
mitAdvertising = {
spdxId = "MIT-advertising";
fullName = "Enlightenment License (e16)";
};
mit0 = {
spdxId = "MIT-0";
fullName = "MIT No Attribution";
};
mpl10 = {
spdxId = "MPL-1.0";

View File

@@ -4,7 +4,6 @@
let
inherit (lib.strings) toInt;
inherit (lib.trivial) compare min;
inherit (lib.attrsets) mapAttrs;
in
rec {
@@ -341,15 +340,15 @@ rec {
groupBy' builtins.add 0 (x: boolToString (x > 2)) [ 5 1 2 3 4 ]
=> { true = 12; false = 3; }
*/
groupBy' = op: nul: pred: lst: mapAttrs (name: foldl op nul) (groupBy pred lst);
groupBy' = op: nul: pred: lst:
foldl' (r: e:
let
key = pred e;
in
r // { ${key} = op (r.${key} or nul) e; }
) {} lst;
groupBy = builtins.groupBy or (
pred: foldl' (r: e:
let
key = pred e;
in
r // { ${key} = (r.${key} or []) ++ [e]; }
) {});
groupBy = groupBy' (sum: e: sum ++ [e]) [];
/* Merges two lists of the same size together. If the sizes aren't the same
the merging stops at the shortest. How both lists are merged is defined

View File

@@ -78,7 +78,7 @@ rec {
2. (modern) a pattern for the platform `parsed` field.
We can inject these into a pattern for the whole of a structured platform,
We can inject these into a patten for the whole of a structured platform,
and then match that.
*/
platformMatch = platform: elem: let
@@ -99,31 +99,4 @@ rec {
availableOn = platform: pkg:
lib.any (platformMatch platform) pkg.meta.platforms &&
lib.all (elem: !platformMatch platform elem) (pkg.meta.badPlatforms or []);
/* Get the corresponding attribute in lib.licenses
from the SPDX ID.
For SPDX IDs, see
https://spdx.org/licenses
Type:
getLicenseFromSpdxId :: str -> AttrSet
Example:
lib.getLicenseFromSpdxId "MIT" == lib.licenses.mit
=> true
lib.getLicenseFromSpdxId "mIt" == lib.licenses.mit
=> true
lib.getLicenseFromSpdxId "MY LICENSE"
=> trace: warning: getLicenseFromSpdxId: No license matches the given SPDX ID: MY LICENSE
=> { shortName = "MY LICENSE"; }
*/
getLicenseFromSpdxId =
let
spdxLicenses = lib.mapAttrs (id: ls: assert lib.length ls == 1; builtins.head ls)
(lib.groupBy (l: lib.toLower l.spdxId) (lib.filter (l: l ? spdxId) (lib.attrValues lib.licenses)));
in licstr:
spdxLicenses.${ lib.toLower licstr } or (
lib.warn "getLicenseFromSpdxId: No license matches the given SPDX ID: ${licstr}"
{ shortName = licstr; }
);
}

View File

@@ -9,10 +9,12 @@ let
catAttrs
concatLists
concatMap
concatStringsSep
count
elem
filter
findFirst
flip
foldl
foldl'
getAttrFromPath
head
@@ -37,7 +39,6 @@ let
toList
types
warnIf
zipAttrsWith
;
inherit (lib.options)
isOption
@@ -46,22 +47,7 @@ let
showFiles
showOption
unknownModule
literalExpression
;
showDeclPrefix = loc: decl: prefix:
" - option(s) with prefix `${showOption (loc ++ [prefix])}' in module `${decl._file}'";
showRawDecls = loc: decls:
concatStringsSep "\n"
(sort (a: b: a < b)
(concatMap
(decl: map
(showDeclPrefix loc decl)
(attrNames decl.options)
)
decls
));
in
rec {
@@ -76,8 +62,6 @@ rec {
type: A module system type representing the module set as a submodule,
to be extended by configuration from the containing module set.
This is also available as the module argument moduleType.
extendModules: A function similar to evalModules but building on top
of the module set. Its arguments, modules and specialArgs are
added to the existing values.
@@ -90,8 +74,6 @@ rec {
override existing configuration fundamentally requires a new
fixpoint to be constructed.
This is also available as a module argument.
_module: A portion of the configuration tree which is elided from
config. It contains some values that are mostly internal to the
module system implementation.
@@ -115,33 +97,11 @@ rec {
check ? true
}:
let
withWarnings = x:
lib.warnIf (evalModulesArgs?args) "The args argument to evalModules is deprecated. Please set config._module.args instead."
lib.warnIf (evalModulesArgs?check) "The check argument to evalModules is deprecated. Please set config._module.check instead."
x;
legacyModules =
optional (evalModulesArgs?args) {
config = {
_module.args = args;
};
}
++ optional (evalModulesArgs?check) {
config = {
_module.check = mkDefault check;
};
};
regularModules = modules ++ legacyModules;
# This internal module declare internal options under the `_module'
# attribute. These options are fragile, as they are used by the
# module system to change the interpretation of modules.
#
# When extended with extendModules or moduleType, a fresh instance of
# this module is used, to avoid conflicts and allow chaining of
# extendModules.
internalModule = rec {
_file = "lib/modules.nix";
_file = ./modules.nix;
key = _file;
@@ -153,103 +113,21 @@ rec {
# support for that, in turn it's lazy in its values. This means e.g.
# a `_module.args.pkgs = import (fetchTarball { ... }) {}` won't
# start a download when `pkgs` wasn't evaluated.
type = types.lazyAttrsOf types.raw;
# Only render documentation once at the root of the option tree,
# not for all individual submodules.
internal = prefix != [];
# TODO: Change the type of this option to a submodule with a
# freeformType, so that individual arguments can be documented
# separately
description = ''
Additional arguments passed to each module in addition to ones
like <literal>lib</literal>, <literal>config</literal>,
and <literal>pkgs</literal>, <literal>modulesPath</literal>.
</para>
<para>
This option is also available to all submodules. Submodules do not
inherit args from their parent module, nor do they provide args to
their parent module or sibling submodules. The sole exception to
this is the argument <literal>name</literal> which is provided by
parent modules to a submodule and contains the attribute name
the submodule is bound to, or a unique generated name if it is
not bound to an attribute.
</para>
<para>
Some arguments are already passed by default, of which the
following <emphasis>cannot</emphasis> be changed with this option:
<itemizedlist>
<listitem>
<para>
<varname>lib</varname>: The nixpkgs library.
</para>
</listitem>
<listitem>
<para>
<varname>config</varname>: The results of all options after merging the values from all modules together.
</para>
</listitem>
<listitem>
<para>
<varname>options</varname>: The options declared in all modules.
</para>
</listitem>
<listitem>
<para>
<varname>specialArgs</varname>: The <literal>specialArgs</literal> argument passed to <literal>evalModules</literal>.
</para>
</listitem>
<listitem>
<para>
All attributes of <varname>specialArgs</varname>
</para>
<para>
Whereas option values can generally depend on other option values
thanks to laziness, this does not apply to <literal>imports</literal>, which
must be computed statically before anything else.
</para>
<para>
For this reason, callers of the module system can provide <literal>specialArgs</literal>
which are available during import resolution.
</para>
<para>
For NixOS, <literal>specialArgs</literal> includes
<varname>modulesPath</varname>, which allows you to import
extra modules from the nixpkgs package tree without having to
somehow make the module aware of the location of the
<literal>nixpkgs</literal> or NixOS directories.
<programlisting>
{ modulesPath, ... }: {
imports = [
(modulesPath + "/profiles/minimal.nix")
];
}
</programlisting>
</para>
</listitem>
</itemizedlist>
</para>
<para>
For NixOS, the default value for this option includes at least this argument:
<itemizedlist>
<listitem>
<para>
<varname>pkgs</varname>: The nixpkgs package set according to
the <option>nixpkgs.pkgs</option> option.
</para>
</listitem>
</itemizedlist>
'';
type = types.lazyAttrsOf types.unspecified;
internal = true;
description = "Arguments passed to each module.";
};
_module.check = mkOption {
type = types.bool;
internal = true;
default = true;
default = check;
description = "Whether to check whether all option definitions have matching declarations.";
};
_module.freeformType = mkOption {
type = types.nullOr types.optionType;
# Disallow merging for now, but could be implemented nicely with a `types.optionType`
type = types.nullOr (types.uniq types.attrs);
internal = true;
default = null;
description = ''
@@ -268,15 +146,14 @@ rec {
config = {
_module.args = {
inherit extendModules;
moduleType = type;
};
} // args;
};
};
merged =
let collected = collectModules
(specialArgs.modulesPath or "")
(regularModules ++ [ internalModule ])
(modules ++ [ internalModule ])
({ inherit lib options config specialArgs; } // specialArgs);
in mergeModules prefix (reverseList collected);
@@ -340,7 +217,7 @@ rec {
prefix ? [],
}:
evalModules (evalModulesArgs // {
modules = regularModules ++ modules;
modules = evalModulesArgs.modules ++ modules;
specialArgs = evalModulesArgs.specialArgs or {} // specialArgs;
prefix = extendArgs.prefix or evalModulesArgs.prefix;
});
@@ -349,7 +226,7 @@ rec {
inherit modules specialArgs;
};
result = withWarnings {
result = {
options = checked options;
config = checked (removeAttrs config [ "_module" ]);
_module = checked (config._module);
@@ -366,11 +243,11 @@ rec {
# Like unifyModuleSyntax, but also imports paths and calls functions if necessary
loadModule = args: fallbackFile: fallbackKey: m:
if isFunction m || isAttrs m then
unifyModuleSyntax fallbackFile fallbackKey (applyModuleArgsIfFunction fallbackKey m args)
unifyModuleSyntax fallbackFile fallbackKey (applyIfFunction fallbackKey m args)
else if isList m then
let defs = [{ file = fallbackFile; value = m; }]; in
throw "Module imports can't be nested lists. Perhaps you meant to remove one level of lists? Definitions: ${showDefs defs}"
else unifyModuleSyntax (toString m) (toString m) (applyModuleArgsIfFunction (toString m) (import m) args);
else unifyModuleSyntax (toString m) (toString m) (applyIfFunction (toString m) (import m) args);
/*
Collects all modules recursively into the form
@@ -431,10 +308,6 @@ rec {
in modulesPath: initialModules: args:
filterModules modulesPath (collectStructuredModules unknownModule "" initialModules args);
/* Wrap a module with a default location for reporting errors. */
setDefaultModuleLocation = file: m:
{ _file = file; imports = [ m ]; };
/* Massage a module into canonical form, that is, a set consisting
of options, config and imports attributes. */
unifyModuleSyntax = file: key: m:
@@ -467,7 +340,7 @@ rec {
config = addFreeformType (addMeta (removeAttrs m ["_file" "key" "disabledModules" "require" "imports" "freeformType"]));
};
applyModuleArgsIfFunction = key: f: args@{ config, options, lib, ... }: if isFunction f then
applyIfFunction = key: f: args@{ config, options, lib, ... }: if isFunction f then
let
# Module arguments are resolved in a strict manner when attribute set
# deconstruction is used. As the arguments are now defined with the
@@ -544,11 +417,10 @@ rec {
}
*/
byName = attr: f: modules:
zipAttrsWith (n: concatLists)
(map (module: let subtree = module.${attr}; in
if !(builtins.isAttrs subtree) then
foldl' (acc: module:
if !(builtins.isAttrs module.${attr}) then
throw ''
You're trying to declare a value of type `${builtins.typeOf subtree}'
You're trying to declare a value of type `${builtins.typeOf module.${attr}}'
rather than an attribute-set for the option
`${builtins.concatStringsSep "." prefix}'!
@@ -557,8 +429,11 @@ rec {
this option by e.g. referring to `man 5 configuration.nix'!
''
else
mapAttrs (n: f module) subtree
) modules);
acc // (mapAttrs (n: v:
(acc.${n} or []) ++ f module v
) module.${attr}
)
) {} modules;
# an attrset 'name' => list of submodules that declare name.
declsByName = byName "options" (module: option:
[{ inherit (module) _file; options = option; }]
@@ -572,63 +447,28 @@ rec {
[{ inherit (module) file; inherit value; }]
) configs;
# Convert an option tree decl to a submodule option decl
optionTreeToOption = decl:
if isOption decl.options
then decl
else decl // {
options = mkOption {
type = types.submoduleWith {
modules = [ { options = decl.options; } ];
# `null` is not intended for use by modules. It is an internal
# value that means "whatever the user has declared elsewhere".
# This might become obsolete with https://github.com/NixOS/nixpkgs/issues/162398
shorthandOnlyDefinesConfig = null;
};
};
};
resultsByName = mapAttrs (name: decls:
resultsByName = flip mapAttrs declsByName (name: decls:
# We're descending into attribute name.
let
loc = prefix ++ [name];
defns = defnsByName.${name} or [];
defns' = defnsByName'.${name} or [];
optionDecls = filter (m: isOption m.options) decls;
nrOptions = count (m: isOption m.options) decls;
in
if length optionDecls == length decls then
if nrOptions == length decls then
let opt = fixupOptionType loc (mergeOptionDecls loc decls);
in {
matchedOptions = evalOptionValue loc opt defns';
unmatchedDefns = [];
}
else if optionDecls != [] then
if all (x: x.options.type.name == "submodule") optionDecls
# Raw options can only be merged into submodules. Merging into
# attrsets might be nice, but ambiguous. Suppose we have
# attrset as a `attrsOf submodule`. User declares option
# attrset.foo.bar, this could mean:
# a. option `bar` is only available in `attrset.foo`
# b. option `foo.bar` is available in all `attrset.*`
# c. reject and require "<name>" as a reminder that it behaves like (b).
# d. magically combine (a) and (c).
# All of the above are merely syntax sugar though.
then
let opt = fixupOptionType loc (mergeOptionDecls loc (map optionTreeToOption decls));
in {
matchedOptions = evalOptionValue loc opt defns';
unmatchedDefns = [];
}
else
let
firstNonOption = findFirst (m: !isOption m.options) "" decls;
nonOptions = filter (m: !isOption m.options) decls;
in
throw "The option `${showOption loc}' in module `${(lib.head optionDecls)._file}' would be a parent of the following options, but its type `${(lib.head optionDecls).options.type.description or "<no description>"}' does not support nested options.\n${
showRawDecls loc nonOptions
}"
else if nrOptions != 0 then
let
firstOption = findFirst (m: isOption m.options) "" decls;
firstNonOption = findFirst (m: !isOption m.options) "" decls;
in
throw "The option `${showOption loc}' in `${firstOption._file}' is a prefix of options in `${firstNonOption._file}'."
else
mergeModules' loc decls defns) declsByName;
mergeModules' loc decls defns);
matchedOptions = mapAttrs (n: v: v.matchedOptions) resultsByName;
@@ -642,19 +482,12 @@ rec {
inherit matchedOptions;
# Transforms unmatchedDefnsByName into a list of definitions
unmatchedDefns =
if configs == []
then
# When no config values exist, there can be no unmatched config, so
# we short circuit and avoid evaluating more _options_ than necessary.
[]
else
concatLists (mapAttrsToList (name: defs:
map (def: def // {
# Set this so we know when the definition first left unmatched territory
prefix = [name] ++ (def.prefix or []);
}) defs
) unmatchedDefnsByName);
unmatchedDefns = concatLists (mapAttrsToList (name: defs:
map (def: def // {
# Set this so we know when the definition first left unmatched territory
prefix = [name] ++ (def.prefix or []);
}) defs
) unmatchedDefnsByName);
};
/* Merge multiple option declarations into a single declaration. In
@@ -670,9 +503,11 @@ rec {
correspond to the definition of 'loc' in 'opt.file'. */
mergeOptionDecls =
let
packSubmodule = file: m:
{ _file = file; imports = [ m ]; };
coerceOption = file: opt:
if isFunction opt then setDefaultModuleLocation file opt
else setDefaultModuleLocation file { options = opt; };
if isFunction opt then packSubmodule file opt
else packSubmodule file { options = opt; };
in loc: opts:
foldl' (res: opt:
let t = res.type;
@@ -693,9 +528,17 @@ rec {
throw "The option `${showOption loc}' in `${opt._file}' is already declared in ${showFiles res.declarations}."
else
let
/* Add the modules of the current option to the list of modules
already collected. The options attribute except either a list of
submodules or a submodule. For each submodule, we add the file of the
current option declaration as the file use for the submodule. If the
submodule defines any filename, then we ignore the enclosing option file. */
options' = toList opt.options.options;
getSubModules = opt.options.type.getSubModules or null;
submodules =
if getSubModules != null then map (setDefaultModuleLocation opt._file) getSubModules ++ res.options
if getSubModules != null then map (packSubmodule opt._file) getSubModules ++ res.options
else if opt.options ? options then map (coerceOption opt._file) options' ++ res.options
else res.options;
in opt.options // res //
{ declarations = res.declarations ++ [opt._file];
@@ -739,8 +582,6 @@ rec {
definitions = map (def: def.value) res.defsFinal;
files = map (def: def.file) res.defsFinal;
inherit (res) isDefined;
# This allows options to be correctly displayed using `${options.path.to.it}`
__toString = _: showOption loc;
};
# Merge definitions of a value of a given type.
@@ -878,13 +719,26 @@ rec {
compare = a: b: (a.priority or 1000) < (b.priority or 1000);
in sort compare defs';
# This calls substSubModules, whose entire purpose is only to ensure that
# option declarations in submodules have accurate position information.
# TODO: Merge this into mergeOptionDecls
/* Hack for backward compatibility: convert options of type
optionSet to options of type submodule. FIXME: remove
eventually. */
fixupOptionType = loc: opt:
if opt.type.getSubModules or null == null
then opt // { type = opt.type or types.unspecified; }
else opt // { type = opt.type.substSubModules opt.options; options = []; };
let
options = opt.options or
(throw "Option `${showOption loc}' has type optionSet but has no option attribute, in ${showFiles opt.declarations}.");
f = tp:
let optionSetIn = type: (tp.name == type) && (tp.functor.wrapped.name == "optionSet");
in
if tp.name == "option set" || tp.name == "submodule" then
throw "The option ${showOption loc} uses submodules without a wrapping type, in ${showFiles opt.declarations}."
else if optionSetIn "attrsOf" then types.attrsOf (types.submodule options)
else if optionSetIn "listOf" then types.listOf (types.submodule options)
else if optionSetIn "nullOr" then types.nullOr (types.submodule options)
else tp;
in
if opt.type.getSubModules or null == null
then opt // { type = f (opt.type or types.unspecified); }
else opt // { type = opt.type.substSubModules opt.options; options = []; };
/* Properties. */
@@ -1016,26 +870,6 @@ rec {
use = builtins.trace "Obsolete option `${showOption from}' is used. It was renamed to `${showOption to}'.";
};
mkRenamedOptionModuleWith = {
/* Old option path as list of strings. */
from,
/* New option path as list of strings. */
to,
/*
Release number of the first release that contains the rename, ignoring backports.
Set it to the upcoming release, matching the nixpkgs/.version file.
*/
sinceRelease,
}: doRename {
inherit from to;
visible = false;
warn = lib.isInOldestRelease sinceRelease;
use = lib.warnIf (lib.isInOldestRelease sinceRelease)
"Obsolete option `${showOption from}' is used. It was renamed to `${showOption to}'.";
};
/* Return a module that causes a warning to be shown if any of the "from"
option is defined; the defined values can be used in the "mergeFn" to set
the "to" value.
@@ -1067,7 +901,7 @@ rec {
mkMergedOptionModule = from: to: mergeFn:
{ config, options, ... }:
{
options = foldl' recursiveUpdate {} (map (path: setAttrByPath path (mkOption {
options = foldl recursiveUpdate {} (map (path: setAttrByPath path (mkOption {
visible = false;
# To use the value in mergeFn without triggering errors
default = "_mkMergedOptionModule";
@@ -1171,7 +1005,7 @@ rec {
/* Use this function to import a JSON file as NixOS configuration.
modules.importJSON :: path -> attrs
importJSON -> path -> attrs
*/
importJSON = file: {
_file = file;
@@ -1180,7 +1014,7 @@ rec {
/* Use this function to import a TOML file as NixOS configuration.
modules.importTOML :: path -> attrs
importTOML -> path -> attrs
*/
importTOML = file: {
_file = file;

View File

@@ -26,7 +26,6 @@ let
take
;
inherit (lib.attrsets)
attrByPath
optionalAttrs
;
inherit (lib.strings)
@@ -79,6 +78,8 @@ rec {
visible ? null,
# Whether the option can be set only once
readOnly ? null,
# Deprecated, used by types.optionSet.
options ? null
} @ attrs:
attrs // { _type = "option"; };
@@ -98,49 +99,6 @@ rec {
type = lib.types.bool;
};
/* Creates an Option attribute set for an option that specifies the
package a module should use for some purpose.
Type: mkPackageOption :: pkgs -> string -> { default :: [string], example :: null | string | [string] } -> option
The package is specified as a list of strings representing its attribute path in nixpkgs.
Because of this, you need to pass nixpkgs itself as the first argument.
The second argument is the name of the option, used in the description "The <name> package to use.".
You can also pass an example value, either a literal string or a package's attribute path.
You can omit the default path if the name of the option is also attribute path in nixpkgs.
Example:
mkPackageOption pkgs "hello" { }
=> { _type = "option"; default = «derivation /nix/store/3r2vg51hlxj3cx5vscp0vkv60bqxkaq0-hello-2.10.drv»; defaultText = { ... }; description = "The hello package to use."; type = { ... }; }
Example:
mkPackageOption pkgs "GHC" {
default = [ "ghc" ];
example = "pkgs.haskell.package.ghc922.ghc.withPackages (hkgs: [ hkgs.primes ])";
}
=> { _type = "option"; default = «derivation /nix/store/jxx55cxsjrf8kyh3fp2ya17q99w7541r-ghc-8.10.7.drv»; defaultText = { ... }; description = "The GHC package to use."; example = { ... }; type = { ... }; }
*/
mkPackageOption =
# Package set (a specific version of nixpkgs)
pkgs:
# Name for the package, shown in option description
name:
{ default ? [ name ], example ? null }:
let default' = if !isList default then [ default ] else default;
in mkOption {
type = lib.types.package;
description = "The ${name} package to use.";
default = attrByPath default'
(throw "${concatStringsSep "." default'} cannot be found in pkgs") pkgs;
defaultText = literalExpression ("pkgs." + concatStringsSep "." default');
${if example != null then "example" else null} = literalExpression
(if isList example then "pkgs." + concatStringsSep "." example else example);
};
/* This option accepts anything, but it does not produce any result.
This is useful for sharing a module across different module sets
@@ -170,13 +128,11 @@ rec {
else if all isInt list && all (x: x == head list) list then head list
else throw "Cannot merge definitions of `${showOption loc}'. Definition values:${showDefs defs}";
mergeOneOption = mergeUniqueOption { message = ""; };
mergeUniqueOption = { message }: loc: defs:
if length defs == 1
then (head defs).value
else assert length defs > 1;
throw "The option `${showOption loc}' is defined multiple times.\n${message}\nDefinition values:${showDefs defs}";
mergeOneOption = loc: defs:
if defs == [] then abort "This case should never happen."
else if length defs != 1 then
throw "The unique option `${showOption loc}' is defined multiple times. Definition values:${showDefs defs}"
else (head defs).value;
/* "Merge" option definitions by checking that they all have the same value. */
mergeEqualOption = loc: defs:
@@ -221,7 +177,7 @@ rec {
docOption = rec {
loc = opt.loc;
name = showOption opt.loc;
description = opt.description or null;
description = opt.description or (lib.warn "Option `${name}' has no description." "This option has no description.");
declarations = filter (x: x != unknownModule) opt.declarations;
internal = opt.internal or false;
visible =
@@ -229,7 +185,7 @@ rec {
then true
else opt.visible or true;
readOnly = opt.readOnly or false;
type = opt.type.description or "unspecified";
type = opt.type.description or null;
}
// optionalAttrs (opt ? example) { example = scrubOptionValue opt.example; }
// optionalAttrs (opt ? default) { default = scrubOptionValue opt.default; }

View File

@@ -20,26 +20,17 @@ let
readFile
;
/*
Returns the type of a path: regular (for file), symlink, or directory.
*/
pathType = path: getAttr (baseNameOf path) (readDir (dirOf path));
# Returns the type of a path: regular (for file), symlink, or directory
pathType = p: getAttr (baseNameOf p) (readDir (dirOf p));
/*
Returns true if the path exists and is a directory, false otherwise.
*/
pathIsDirectory = path: if pathExists path then (pathType path) == "directory" else false;
# Returns true if the path exists and is a directory, false otherwise
pathIsDirectory = p: if pathExists p then (pathType p) == "directory" else false;
/*
Returns true if the path exists and is a regular file, false otherwise.
*/
pathIsRegularFile = path: if pathExists path then (pathType path) == "regular" else false;
# Returns true if the path exists and is a regular file, false otherwise
pathIsRegularFile = p: if pathExists p then (pathType p) == "regular" else false;
/*
A basic filter for `cleanSourceWith` that removes
directories of version control system, backup files (*~)
and some generated files.
*/
# Bring in a path as a source, filtering out all Subversion and CVS
# directories, as well as backup files (*~).
cleanSourceFilter = name: type: let baseName = baseNameOf (toString name); in ! (
# Filter out version control software files/directories
(baseName == ".git" || type == "directory" && (baseName == ".svn" || baseName == "CVS" || baseName == ".hg")) ||
@@ -57,48 +48,43 @@ let
(type == "unknown")
);
/*
Filters a source tree removing version control files and directories using cleanSourceFilter.
Example:
cleanSource ./.
*/
# Filters a source tree removing version control files and directories using cleanSourceWith
#
# Example:
# cleanSource ./.
cleanSource = src: cleanSourceWith { filter = cleanSourceFilter; inherit src; };
/*
Like `builtins.filterSource`, except it will compose with itself,
allowing you to chain multiple calls together without any
intermediate copies being put in the nix store.
Example:
lib.cleanSourceWith {
filter = f;
src = lib.cleanSourceWith {
filter = g;
src = ./.;
};
}
# Succeeds!
builtins.filterSource f (builtins.filterSource g ./.)
# Fails!
*/
cleanSourceWith =
{
# A path or cleanSourceWith result to filter and/or rename.
src,
# Optional with default value: constant true (include everything)
# The function will be combined with the && operator such
# that src.filter is called lazily.
# For implementing a filter, see
# https://nixos.org/nix/manual/#builtin-filterSource
# Type: A function (path -> type -> bool)
filter ? _path: _type: true,
# Optional name to use as part of the store path.
# This defaults to `src.name` or otherwise `"source"`.
name ? null
}:
# Like `builtins.filterSource`, except it will compose with itself,
# allowing you to chain multiple calls together without any
# intermediate copies being put in the nix store.
#
# lib.cleanSourceWith {
# filter = f;
# src = lib.cleanSourceWith {
# filter = g;
# src = ./.;
# };
# }
# # Succeeds!
#
# builtins.filterSource f (builtins.filterSource g ./.)
# # Fails!
#
# Parameters:
#
# src: A path or cleanSourceWith result to filter and/or rename.
#
# filter: A function (path -> type -> bool)
# Optional with default value: constant true (include everything)
# The function will be combined with the && operator such
# that src.filter is called lazily.
# For implementing a filter, see
# https://nixos.org/nix/manual/#builtin-filterSource
#
# name: Optional name to use as part of the store path.
# This defaults to `src.name` or otherwise `"source"`.
#
cleanSourceWith = { filter ? _path: _type: true, src, name ? null }:
let
orig = toSourceAttributes src;
in fromSourceAttributes {
@@ -130,11 +116,9 @@ let
satisfiesSubpathInvariant = src ? satisfiesSubpathInvariant && src.satisfiesSubpathInvariant;
};
/*
Filter sources by a list of regular expressions.
Example: src = sourceByRegex ./my-subproject [".*\.py$" "^database.sql$"]
*/
# Filter sources by a list of regular expressions.
#
# E.g. `src = sourceByRegex ./my-subproject [".*\.py$" "^database.sql$"]`
sourceByRegex = src: regexes:
let
isFiltered = src ? _isLibCleanSourceWith;
@@ -169,11 +153,8 @@ let
pathIsGitRepo = path: (tryEval (commitIdFromGitRepo path)).success;
/*
Get the commit id of a git repo.
Example: commitIdFromGitRepo <nixpkgs/.git>
*/
# Get the commit id of a git repo
# Example: commitIdFromGitRepo <nixpkgs/.git>
commitIdFromGitRepo =
let readCommitFromFile = file: path:
let fileName = toString path + "/" + file;

View File

@@ -253,7 +253,10 @@ rec {
=> false
*/
hasInfix = infix: content:
builtins.match ".*${escapeRegex infix}.*" content != null;
let
drop = x: substring 1 (stringLength x) x;
in hasPrefix infix content
|| content != "" && hasInfix infix (drop content);
/* Convert a string to a list of characters (i.e. singleton strings).
This allows you to, e.g., map a function over each character. However,
@@ -753,14 +756,7 @@ rec {
sanitizeDerivationName pkgs.hello
=> "-nix-store-2g75chlbpxlrqn15zlby2dfh8hr9qwbk-hello-2.10"
*/
sanitizeDerivationName =
let okRegex = match "[[:alnum:]+_?=-][[:alnum:]+._?=-]*";
in
string:
# First detect the common case of already valid strings, to speed those up
if stringLength string <= 207 && okRegex string != null
then unsafeDiscardStringContext string
else lib.pipe string [
sanitizeDerivationName = string: lib.pipe string [
# Get rid of string context. This is safe under the assumption that the
# resulting string is only used as a derivation name
unsafeDiscardStringContext
@@ -778,131 +774,4 @@ rec {
(x: if stringLength x == 0 then "unknown" else x)
];
/* Computes the Levenshtein distance between two strings.
Complexity O(n*m) where n and m are the lengths of the strings.
Algorithm adjusted from https://stackoverflow.com/a/9750974/6605742
Type: levenshtein :: string -> string -> int
Example:
levenshtein "foo" "foo"
=> 0
levenshtein "book" "hook"
=> 1
levenshtein "hello" "Heyo"
=> 3
*/
levenshtein = a: b: let
# Two dimensional array with dimensions (stringLength a + 1, stringLength b + 1)
arr = lib.genList (i:
lib.genList (j:
dist i j
) (stringLength b + 1)
) (stringLength a + 1);
d = x: y: lib.elemAt (lib.elemAt arr x) y;
dist = i: j:
let c = if substring (i - 1) 1 a == substring (j - 1) 1 b
then 0 else 1;
in
if j == 0 then i
else if i == 0 then j
else lib.min
( lib.min (d (i - 1) j + 1) (d i (j - 1) + 1))
( d (i - 1) (j - 1) + c );
in d (stringLength a) (stringLength b);
/* Returns the length of the prefix common to both strings.
*/
commonPrefixLength = a: b:
let
m = lib.min (stringLength a) (stringLength b);
go = i: if i >= m then m else if substring i 1 a == substring i 1 b then go (i + 1) else i;
in go 0;
/* Returns the length of the suffix common to both strings.
*/
commonSuffixLength = a: b:
let
m = lib.min (stringLength a) (stringLength b);
go = i: if i >= m then m else if substring (stringLength a - i - 1) 1 a == substring (stringLength b - i - 1) 1 b then go (i + 1) else i;
in go 0;
/* Returns whether the levenshtein distance between two strings is at most some value
Complexity is O(min(n,m)) for k <= 2 and O(n*m) otherwise
Type: levenshteinAtMost :: int -> string -> string -> bool
Example:
levenshteinAtMost 0 "foo" "foo"
=> true
levenshteinAtMost 1 "foo" "boa"
=> false
levenshteinAtMost 2 "foo" "boa"
=> true
levenshteinAtMost 2 "This is a sentence" "this is a sentense."
=> false
levenshteinAtMost 3 "This is a sentence" "this is a sentense."
=> true
*/
levenshteinAtMost = let
infixDifferAtMost1 = x: y: stringLength x <= 1 && stringLength y <= 1;
# This function takes two strings stripped by their common pre and suffix,
# and returns whether they differ by at most two by Levenshtein distance.
# Because of this stripping, if they do indeed differ by at most two edits,
# we know that those edits were (if at all) done at the start or the end,
# while the middle has to have stayed the same. This fact is used in the
# implementation.
infixDifferAtMost2 = x: y:
let
xlen = stringLength x;
ylen = stringLength y;
# This function is only called with |x| >= |y| and |x| - |y| <= 2, so
# diff is one of 0, 1 or 2
diff = xlen - ylen;
# Infix of x and y, stripped by the left and right most character
xinfix = substring 1 (xlen - 2) x;
yinfix = substring 1 (ylen - 2) y;
# x and y but a character deleted at the left or right
xdelr = substring 0 (xlen - 1) x;
xdell = substring 1 (xlen - 1) x;
ydelr = substring 0 (ylen - 1) y;
ydell = substring 1 (ylen - 1) y;
in
# A length difference of 2 can only be gotten with 2 delete edits,
# which have to have happened at the start and end of x
# Example: "abcdef" -> "bcde"
if diff == 2 then xinfix == y
# A length difference of 1 can only be gotten with a deletion on the
# right and a replacement on the left or vice versa.
# Example: "abcdef" -> "bcdez" or "zbcde"
else if diff == 1 then xinfix == ydelr || xinfix == ydell
# No length difference can either happen through replacements on both
# sides, or a deletion on the left and an insertion on the right or
# vice versa
# Example: "abcdef" -> "zbcdez" or "bcdefz" or "zabcde"
else xinfix == yinfix || xdelr == ydell || xdell == ydelr;
in k: if k <= 0 then a: b: a == b else
let f = a: b:
let
alen = stringLength a;
blen = stringLength b;
prelen = commonPrefixLength a b;
suflen = commonSuffixLength a b;
presuflen = prelen + suflen;
ainfix = substring prelen (alen - presuflen) a;
binfix = substring prelen (blen - presuflen) b;
in
# Make a be the bigger string
if alen < blen then f b a
# If a has over k more characters than b, even with k deletes on a, b can't be reached
else if alen - blen > k then false
else if k == 1 then infixDifferAtMost1 ainfix binfix
else if k == 2 then infixDifferAtMost2 ainfix binfix
else levenshtein ainfix binfix <= k;
in f;
}

View File

@@ -105,8 +105,7 @@ rec {
else if final.isAarch64 then "arm64"
else if final.isx86_32 then "i386"
else if final.isx86_64 then "x86_64"
else if final.isMips32 then "mips"
else if final.isMips64 then "mips" # linux kernel does not distinguish mips32/mips64
else if final.isMips then "mips"
else if final.isPower then "powerpc"
else if final.isRiscV then "riscv"
else if final.isS390 then "s390"

View File

@@ -26,7 +26,7 @@ let
# Linux
"aarch64-linux" "armv5tel-linux" "armv6l-linux" "armv7a-linux"
"armv7l-linux" "i686-linux" "m68k-linux" "mipsel-linux" "mips64el-linux"
"armv7l-linux" "i686-linux" "m68k-linux" "mipsel-linux"
"powerpc64-linux" "powerpc64le-linux" "riscv32-linux"
"riscv64-linux" "s390-linux" "s390x-linux" "x86_64-linux"
@@ -39,8 +39,8 @@ let
"riscv32-netbsd" "riscv64-netbsd" "x86_64-netbsd"
# none
"aarch64_be-none" "aarch64-none" "arm-none" "armv6l-none" "avr-none" "i686-none"
"msp430-none" "or1k-none" "m68k-none" "powerpc-none" "powerpcle-none"
"aarch64-none" "arm-none" "armv6l-none" "avr-none" "i686-none"
"msp430-none" "or1k-none" "m68k-none" "powerpc-none"
"riscv32-none" "riscv64-none" "s390-none" "s390x-none" "vc4-none"
"x86_64-none"
@@ -87,11 +87,7 @@ in {
darwin = filterDoubles predicates.isDarwin;
freebsd = filterDoubles predicates.isFreeBSD;
# Should be better, but MinGW is unclear.
gnu = filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnu; })
++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabi; })
++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabihf; })
++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnuabin32; })
++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnuabi64; });
gnu = filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnu; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabi; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabihf; });
illumos = filterDoubles predicates.isSunOS;
linux = filterDoubles predicates.isLinux;
netbsd = filterDoubles predicates.isNetBSD;

View File

@@ -93,26 +93,6 @@ rec {
config = "mipsel-unknown-linux-gnu";
} // platforms.fuloong2f_n32;
# MIPS ABI table transcribed from here: https://wiki.debian.org/Multiarch/Tuples
# can execute on 32bit chip
mips-linux-gnu = { config = "mips-linux-gnu"; } // platforms.gcc_mips32r2_o32;
mipsel-linux-gnu = { config = "mipsel-linux-gnu"; } // platforms.gcc_mips32r2_o32;
mipsisa32r6-linux-gnu = { config = "mipsisa32r6-linux-gnu"; } // platforms.gcc_mips32r6_o32;
mipsisa32r6el-linux-gnu = { config = "mipsisa32r6el-linux-gnu"; } // platforms.gcc_mips32r6_o32;
# require 64bit chip (for more registers, 64-bit floating point, 64-bit "long long") but use 32bit pointers
mips64-linux-gnuabin32 = { config = "mips64-linux-gnuabin32"; } // platforms.gcc_mips64r2_n32;
mips64el-linux-gnuabin32 = { config = "mips64el-linux-gnuabin32"; } // platforms.gcc_mips64r2_n32;
mipsisa64r6-linux-gnuabin32 = { config = "mipsisa64r6-linux-gnuabin32"; } // platforms.gcc_mips64r6_n32;
mipsisa64r6el-linux-gnuabin32 = { config = "mipsisa64r6el-linux-gnuabin32"; } // platforms.gcc_mips64r6_n32;
# 64bit pointers
mips64-linux-gnuabi64 = { config = "mips64-linux-gnuabi64"; } // platforms.gcc_mips64r2_64;
mips64el-linux-gnuabi64 = { config = "mips64el-linux-gnuabi64"; } // platforms.gcc_mips64r2_64;
mipsisa64r6-linux-gnuabi64 = { config = "mipsisa64r6-linux-gnuabi64"; } // platforms.gcc_mips64r6_64;
mipsisa64r6el-linux-gnuabi64 = { config = "mipsisa64r6el-linux-gnuabi64"; } // platforms.gcc_mips64r6_64;
muslpi = raspberryPi // {
config = "armv6l-unknown-linux-musleabihf";
};
@@ -310,7 +290,6 @@ rec {
libc = "nblibc";
};
# this is broken and never worked fully
x86_64-netbsd-llvm = {
config = "x86_64-unknown-netbsd";
libc = "nblibc";

View File

@@ -13,15 +13,10 @@ rec {
isx86_64 = { cpu = { family = "x86"; bits = 64; }; };
isPowerPC = { cpu = cpuTypes.powerpc; };
isPower = { cpu = { family = "power"; }; };
isPower64 = { cpu = { family = "power"; bits = 64; }; };
isx86 = { cpu = { family = "x86"; }; };
isAarch32 = { cpu = { family = "arm"; bits = 32; }; };
isAarch64 = { cpu = { family = "arm"; bits = 64; }; };
isMips = { cpu = { family = "mips"; }; };
isMips32 = { cpu = { family = "mips"; bits = 32; }; };
isMips64 = { cpu = { family = "mips"; bits = 64; }; };
isMips64n32 = { cpu = { family = "mips"; bits = 64; }; abi = { abi = "n32"; }; };
isMips64n64 = { cpu = { family = "mips"; bits = 64; }; abi = { abi = "64"; }; };
isMmix = { cpu = { family = "mmix"; }; };
isRiscV = { cpu = { family = "riscv"; }; };
isSparc = { cpu = { family = "sparc"; }; };
@@ -62,7 +57,7 @@ rec {
isAndroid = [ { abi = abis.android; } { abi = abis.androideabi; } ];
isGnu = with abis; map (a: { abi = a; }) [ gnuabi64 gnu gnueabi gnueabihf ];
isMusl = with abis; map (a: { abi = a; }) [ musl musleabi musleabihf muslabin32 muslabi64 ];
isMusl = with abis; map (a: { abi = a; }) [ musl musleabi musleabihf ];
isUClibc = with abis; map (a: { abi = a; }) [ uclibc uclibceabi uclibceabihf ];
isEfi = map (family: { cpu.family = family; })

View File

@@ -359,20 +359,13 @@ rec {
];
};
gnuabi64 = { abi = "64"; };
muslabi64 = { abi = "64"; };
# NOTE: abi=n32 requires a 64-bit MIPS chip! That is not a typo.
# It is basically the 64-bit abi with 32-bit pointers. Details:
# https://www.linux-mips.org/pub/linux/mips/doc/ABI/MIPS-N32-ABI-Handbook.pdf
gnuabin32 = { abi = "n32"; };
muslabin32 = { abi = "n32"; };
musleabi = { float = "soft"; };
musleabihf = { float = "hard"; };
musl = {};
uclibceabi = { float = "soft"; };
uclibceabihf = { float = "hard"; };
uclibceabihf = { float = "soft"; };
uclibceabi = { float = "hard"; };
uclibc = {};
unknown = {};

View File

@@ -1,10 +1,3 @@
# Note: lib/systems/default.nix takes care of producing valid,
# fully-formed "platform" values (e.g. hostPlatform, buildPlatform,
# targetPlatform, etc) containing at least the minimal set of attrs
# required (see types.parsedPlatform in lib/systems/parse.nix). This
# file takes an already-valid platform and further elaborates it with
# optional fields such as linux-kernel, gcc, etc.
{ lib }:
rec {
pc = {
@@ -489,43 +482,6 @@ rec {
};
};
# can execute on 32bit chip
gcc_mips32r2_o32 = { gcc = { arch = "mips32r2"; abi = "o32"; }; };
gcc_mips32r6_o32 = { gcc = { arch = "mips32r6"; abi = "o32"; }; };
gcc_mips64r2_n32 = { gcc = { arch = "mips64r2"; abi = "n32"; }; };
gcc_mips64r6_n32 = { gcc = { arch = "mips64r6"; abi = "n32"; }; };
gcc_mips64r2_64 = { gcc = { arch = "mips64r2"; abi = "64"; }; };
gcc_mips64r6_64 = { gcc = { arch = "mips64r6"; abi = "64"; }; };
# based on:
# https://www.mail-archive.com/qemu-discuss@nongnu.org/msg05179.html
# https://gmplib.org/~tege/qemu.html#mips64-debian
mips64el-qemu-linux-gnuabi64 = {
linux-kernel = {
name = "mips64el";
baseConfig = "64r2el_defconfig";
target = "vmlinuz";
autoModules = false;
DTB = true;
# for qemu 9p passthrough filesystem
extraConfig = ''
MIPS_MALTA y
PAGE_SIZE_4KB y
CPU_LITTLE_ENDIAN y
CPU_MIPS64_R2 y
64BIT y
CPU_MIPS64_R2 y
NET_9P y
NET_9P_VIRTIO y
9P_FS y
9P_FS_POSIX_ACL y
PCI y
VIRTIO_PCI y
'';
};
};
##
## Other
##
@@ -543,9 +499,6 @@ rec {
};
};
# This function takes a minimally-valid "platform" and returns an
# attrset containing zero or more additional attrs which should be
# included in the platform in order to further elaborate it.
select = platform:
# x86
/**/ if platform.isx86 then pc

View File

@@ -4,9 +4,7 @@
{ lib }:
rec {
# List of systems that are built by Hydra.
hydra = tier1 ++ tier2 ++ tier3 ++ [
"aarch64-darwin"
];
hydra = tier1 ++ tier2 ++ tier3;
tier1 = [
"x86_64-linux"
@@ -18,6 +16,7 @@ rec {
];
tier3 = [
"aarch64-darwin"
"armv6l-linux"
"armv7l-linux"
"i686-linux"

View File

@@ -471,66 +471,6 @@ runTests {
'';
};
testToINIWithGlobalSectionEmpty = {
expr = generators.toINIWithGlobalSection {} {
globalSection = {
};
sections = {
};
};
expected = ''
'';
};
testToINIWithGlobalSectionGlobalEmptyIsTheSameAsToINI =
let
sections = {
"section 1" = {
attribute1 = 5;
x = "Me-se JarJar Binx";
};
"foo" = {
"he\\h=he" = "this is okay";
};
};
in {
expr =
generators.toINIWithGlobalSection {} {
globalSection = {};
sections = sections;
};
expected = generators.toINI {} sections;
};
testToINIWithGlobalSectionFull = {
expr = generators.toINIWithGlobalSection {} {
globalSection = {
foo = "bar";
test = false;
};
sections = {
"section 1" = {
attribute1 = 5;
x = "Me-se JarJar Binx";
};
"foo" = {
"he\\h=he" = "this is okay";
};
};
};
expected = ''
foo=bar
test=false
[foo]
he\h\=he=this is okay
[section 1]
attribute1=5
x=Me-se JarJar Binx
'';
};
/* right now only invocation check */
testToJSONSimple =
let val = {
@@ -556,7 +496,7 @@ runTests {
testToPretty =
let
deriv = derivation { name = "test"; builder = "/bin/sh"; system = "aarch64-linux"; };
deriv = derivation { name = "test"; builder = "/bin/sh"; system = builtins.currentSystem; };
in {
expr = mapAttrs (const (generators.toPretty { multiline = false; })) rec {
int = 42;
@@ -709,11 +649,6 @@ runTests {
expected = "foo";
};
testSanitizeDerivationNameUnicode = testSanitizeDerivationName {
name = "fö";
expected = "f-";
};
testSanitizeDerivationNameAscii = testSanitizeDerivationName {
name = " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~";
expected = "-+--.-0123456789-=-?-ABCDEFGHIJKLMNOPQRSTUVWXYZ-_-abcdefghijklmnopqrstuvwxyz-";
@@ -756,7 +691,7 @@ runTests {
locs = filter (o: ! o.internal) (optionAttrSetToDocList options);
in map (o: o.loc) locs;
expected = [ [ "_module" "args" ] [ "foo" ] [ "foo" "<name>" "bar" ] [ "foo" "bar" ] ];
expected = [ [ "foo" ] [ "foo" "<name>" "bar" ] [ "foo" "bar" ] ];
};
testCartesianProductOfEmptySet = {
@@ -826,308 +761,4 @@ runTests {
{ a = 3; b = 30; c = 300; }
];
};
# The example from the showAttrPath documentation
testShowAttrPathExample = {
expr = showAttrPath [ "foo" "10" "bar" ];
expected = "foo.\"10\".bar";
};
testShowAttrPathEmpty = {
expr = showAttrPath [];
expected = "<root attribute path>";
};
testShowAttrPathVarious = {
expr = showAttrPath [
"."
"foo"
"2"
"a2-b"
"_bc'de"
];
expected = ''".".foo."2".a2-b._bc'de'';
};
testGroupBy = {
expr = groupBy (n: toString (mod n 5)) (range 0 16);
expected = {
"0" = [ 0 5 10 15 ];
"1" = [ 1 6 11 16 ];
"2" = [ 2 7 12 ];
"3" = [ 3 8 13 ];
"4" = [ 4 9 14 ];
};
};
testGroupBy' = {
expr = groupBy' builtins.add 0 (x: boolToString (x > 2)) [ 5 1 2 3 4 ];
expected = { false = 3; true = 12; };
};
# The example from the updateManyAttrsByPath documentation
testUpdateManyAttrsByPathExample = {
expr = updateManyAttrsByPath [
{
path = [ "a" "b" ];
update = old: { d = old.c; };
}
{
path = [ "a" "b" "c" ];
update = old: old + 1;
}
{
path = [ "x" "y" ];
update = old: "xy";
}
] { a.b.c = 0; };
expected = { a = { b = { d = 1; }; }; x = { y = "xy"; }; };
};
# If there are no updates, the value is passed through
testUpdateManyAttrsByPathNone = {
expr = updateManyAttrsByPath [] "something";
expected = "something";
};
# A single update to the root path is just like applying the function directly
testUpdateManyAttrsByPathSingleIncrement = {
expr = updateManyAttrsByPath [
{
path = [ ];
update = old: old + 1;
}
] 0;
expected = 1;
};
# Multiple updates can be applied are done in order
testUpdateManyAttrsByPathMultipleIncrements = {
expr = updateManyAttrsByPath [
{
path = [ ];
update = old: old + "a";
}
{
path = [ ];
update = old: old + "b";
}
{
path = [ ];
update = old: old + "c";
}
] "";
expected = "abc";
};
# If an update doesn't use the value, all previous updates are not evaluated
testUpdateManyAttrsByPathLazy = {
expr = updateManyAttrsByPath [
{
path = [ ];
update = old: old + throw "nope";
}
{
path = [ ];
update = old: "untainted";
}
] (throw "start");
expected = "untainted";
};
# Deeply nested attributes can be updated without affecting others
testUpdateManyAttrsByPathDeep = {
expr = updateManyAttrsByPath [
{
path = [ "a" "b" "c" ];
update = old: old + 1;
}
] {
a.b.c = 0;
a.b.z = 0;
a.y.z = 0;
x.y.z = 0;
};
expected = {
a.b.c = 1;
a.b.z = 0;
a.y.z = 0;
x.y.z = 0;
};
};
# Nested attributes are updated first
testUpdateManyAttrsByPathNestedBeforehand = {
expr = updateManyAttrsByPath [
{
path = [ "a" ];
update = old: old // { x = old.b; };
}
{
path = [ "a" "b" ];
update = old: old + 1;
}
] {
a.b = 0;
};
expected = {
a.b = 1;
a.x = 1;
};
};
## Levenshtein distance functions and co.
testCommonPrefixLengthEmpty = {
expr = strings.commonPrefixLength "" "hello";
expected = 0;
};
testCommonPrefixLengthSame = {
expr = strings.commonPrefixLength "hello" "hello";
expected = 5;
};
testCommonPrefixLengthDiffering = {
expr = strings.commonPrefixLength "hello" "hey";
expected = 2;
};
testCommonSuffixLengthEmpty = {
expr = strings.commonSuffixLength "" "hello";
expected = 0;
};
testCommonSuffixLengthSame = {
expr = strings.commonSuffixLength "hello" "hello";
expected = 5;
};
testCommonSuffixLengthDiffering = {
expr = strings.commonSuffixLength "test" "rest";
expected = 3;
};
testLevenshteinEmpty = {
expr = strings.levenshtein "" "";
expected = 0;
};
testLevenshteinOnlyAdd = {
expr = strings.levenshtein "" "hello there";
expected = 11;
};
testLevenshteinOnlyRemove = {
expr = strings.levenshtein "hello there" "";
expected = 11;
};
testLevenshteinOnlyTransform = {
expr = strings.levenshtein "abcdef" "ghijkl";
expected = 6;
};
testLevenshteinMixed = {
expr = strings.levenshtein "kitchen" "sitting";
expected = 5;
};
testLevenshteinAtMostZeroFalse = {
expr = strings.levenshteinAtMost 0 "foo" "boo";
expected = false;
};
testLevenshteinAtMostZeroTrue = {
expr = strings.levenshteinAtMost 0 "foo" "foo";
expected = true;
};
testLevenshteinAtMostOneFalse = {
expr = strings.levenshteinAtMost 1 "car" "ct";
expected = false;
};
testLevenshteinAtMostOneTrue = {
expr = strings.levenshteinAtMost 1 "car" "cr";
expected = true;
};
# We test levenshteinAtMost 2 particularly well because it uses a complicated
# implementation
testLevenshteinAtMostTwoIsEmpty = {
expr = strings.levenshteinAtMost 2 "" "";
expected = true;
};
testLevenshteinAtMostTwoIsZero = {
expr = strings.levenshteinAtMost 2 "abcdef" "abcdef";
expected = true;
};
testLevenshteinAtMostTwoIsOne = {
expr = strings.levenshteinAtMost 2 "abcdef" "abddef";
expected = true;
};
testLevenshteinAtMostTwoDiff0False = {
expr = strings.levenshteinAtMost 2 "abcdef" "aczyef";
expected = false;
};
testLevenshteinAtMostTwoDiff0Outer = {
expr = strings.levenshteinAtMost 2 "abcdef" "zbcdez";
expected = true;
};
testLevenshteinAtMostTwoDiff0DelLeft = {
expr = strings.levenshteinAtMost 2 "abcdef" "bcdefz";
expected = true;
};
testLevenshteinAtMostTwoDiff0DelRight = {
expr = strings.levenshteinAtMost 2 "abcdef" "zabcde";
expected = true;
};
testLevenshteinAtMostTwoDiff1False = {
expr = strings.levenshteinAtMost 2 "abcdef" "bddez";
expected = false;
};
testLevenshteinAtMostTwoDiff1DelLeft = {
expr = strings.levenshteinAtMost 2 "abcdef" "bcdez";
expected = true;
};
testLevenshteinAtMostTwoDiff1DelRight = {
expr = strings.levenshteinAtMost 2 "abcdef" "zbcde";
expected = true;
};
testLevenshteinAtMostTwoDiff2False = {
expr = strings.levenshteinAtMost 2 "hello" "hxo";
expected = false;
};
testLevenshteinAtMostTwoDiff2True = {
expr = strings.levenshteinAtMost 2 "hello" "heo";
expected = true;
};
testLevenshteinAtMostTwoDiff3 = {
expr = strings.levenshteinAtMost 2 "hello" "ho";
expected = false;
};
testLevenshteinAtMostThreeFalse = {
expr = strings.levenshteinAtMost 3 "hello" "Holla!";
expected = false;
};
testLevenshteinAtMostThreeTrue = {
expr = strings.levenshteinAtMost 3 "hello" "Holla";
expected = true;
};
}

View File

@@ -1,11 +1,8 @@
#!/usr/bin/env bash
#!/bin/sh
#
# This script is used to test that the module system is working as expected.
# By default it test the version of nixpkgs which is defined in the NIX_PATH.
set -o errexit -o noclobber -o nounset -o pipefail
shopt -s failglob inherit_errexit
# https://stackoverflow.com/a/246128/6605742
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
@@ -16,103 +13,101 @@ fail=0
evalConfig() {
local attr=$1
shift
local script="import ./default.nix { modules = [ $* ];}"
shift;
local script="import ./default.nix { modules = [ $@ ];}"
nix-instantiate --timeout 1 -E "$script" -A "$attr" --eval-only --show-trace --read-write-mode
}
reportFailure() {
local attr=$1
shift
local script="import ./default.nix { modules = [ $* ];}"
shift;
local script="import ./default.nix { modules = [ $@ ];}"
echo 2>&1 "$ nix-instantiate -E '$script' -A '$attr' --eval-only"
evalConfig "$attr" "$@" || true
((++fail))
evalConfig "$attr" "$@"
fail=$((fail + 1))
}
checkConfigOutput() {
local outputContains=$1
shift
shift;
if evalConfig "$@" 2>/dev/null | grep --silent "$outputContains" ; then
((++pass))
pass=$((pass + 1))
return 0;
else
echo 2>&1 "error: Expected result matching '$outputContains', while evaluating"
reportFailure "$@"
return 1
fi
}
checkConfigError() {
local errorContains=$1
local err=""
shift
if err="$(evalConfig "$@" 2>&1 >/dev/null)"; then
shift;
if err==$(evalConfig "$@" 2>&1 >/dev/null); then
echo 2>&1 "error: Expected error code, got exit code 0, while evaluating"
reportFailure "$@"
return 1
else
if echo "$err" | grep -zP --silent "$errorContains" ; then
((++pass))
pass=$((pass + 1))
return 0;
else
echo 2>&1 "error: Expected error matching '$errorContains', while evaluating"
reportFailure "$@"
return 1
fi
fi
}
# Check boolean option.
checkConfigOutput '^false$' config.enable ./declare-enable.nix
checkConfigOutput "false" config.enable ./declare-enable.nix
checkConfigError 'The option .* does not exist. Definition values:\n\s*- In .*: true' config.enable ./define-enable.nix
checkConfigOutput '^1$' config.bare-submodule.nested ./declare-bare-submodule.nix ./declare-bare-submodule-nested-option.nix
checkConfigOutput '^2$' config.bare-submodule.deep ./declare-bare-submodule.nix ./declare-bare-submodule-deep-option.nix
checkConfigOutput '^42$' config.bare-submodule.nested ./declare-bare-submodule.nix ./declare-bare-submodule-nested-option.nix ./declare-bare-submodule-deep-option.nix ./define-bare-submodule-values.nix
checkConfigOutput '^420$' config.bare-submodule.deep ./declare-bare-submodule.nix ./declare-bare-submodule-nested-option.nix ./declare-bare-submodule-deep-option.nix ./define-bare-submodule-values.nix
checkConfigOutput '^2$' config.bare-submodule.deep ./declare-bare-submodule.nix ./declare-bare-submodule-deep-option.nix ./define-shorthandOnlyDefinesConfig-true.nix
checkConfigError 'The option .bare-submodule.deep. in .*/declare-bare-submodule-deep-option.nix. is already declared in .*/declare-bare-submodule-deep-option-duplicate.nix' config.bare-submodule.deep ./declare-bare-submodule.nix ./declare-bare-submodule-deep-option.nix ./declare-bare-submodule-deep-option-duplicate.nix
# Check integer types.
# unsigned
checkConfigOutput '^42$' config.value ./declare-int-unsigned-value.nix ./define-value-int-positive.nix
checkConfigOutput "42" config.value ./declare-int-unsigned-value.nix ./define-value-int-positive.nix
checkConfigError 'A definition for option .* is not of type.*unsigned integer.*. Definition values:\n\s*- In .*: -23' config.value ./declare-int-unsigned-value.nix ./define-value-int-negative.nix
# positive
checkConfigError 'A definition for option .* is not of type.*positive integer.*. Definition values:\n\s*- In .*: 0' config.value ./declare-int-positive-value.nix ./define-value-int-zero.nix
# between
checkConfigOutput '^42$' config.value ./declare-int-between-value.nix ./define-value-int-positive.nix
checkConfigOutput "42" config.value ./declare-int-between-value.nix ./define-value-int-positive.nix
checkConfigError 'A definition for option .* is not of type.*between.*-21 and 43.*inclusive.*. Definition values:\n\s*- In .*: -23' config.value ./declare-int-between-value.nix ./define-value-int-negative.nix
# Check either types
# types.either
checkConfigOutput '^42$' config.value ./declare-either.nix ./define-value-int-positive.nix
checkConfigOutput '^"24"$' config.value ./declare-either.nix ./define-value-string.nix
checkConfigOutput "42" config.value ./declare-either.nix ./define-value-int-positive.nix
checkConfigOutput "\"24\"" config.value ./declare-either.nix ./define-value-string.nix
# types.oneOf
checkConfigOutput '^42$' config.value ./declare-oneOf.nix ./define-value-int-positive.nix
checkConfigOutput '^\[ \]$' config.value ./declare-oneOf.nix ./define-value-list.nix
checkConfigOutput '^"24"$' config.value ./declare-oneOf.nix ./define-value-string.nix
checkConfigOutput "42" config.value ./declare-oneOf.nix ./define-value-int-positive.nix
checkConfigOutput "[ ]" config.value ./declare-oneOf.nix ./define-value-list.nix
checkConfigOutput "\"24\"" config.value ./declare-oneOf.nix ./define-value-string.nix
# Check mkForce without submodules.
set -- config.enable ./declare-enable.nix ./define-enable.nix
checkConfigOutput '^true$' "$@"
checkConfigOutput '^false$' "$@" ./define-force-enable.nix
checkConfigOutput '^false$' "$@" ./define-enable-force.nix
checkConfigOutput "true" "$@"
checkConfigOutput "false" "$@" ./define-force-enable.nix
checkConfigOutput "false" "$@" ./define-enable-force.nix
# Check mkForce with option and submodules.
checkConfigError 'attribute .*foo.* .* not found' config.attrsOfSub.foo.enable ./declare-attrsOfSub-any-enable.nix
checkConfigOutput '^false$' config.attrsOfSub.foo.enable ./declare-attrsOfSub-any-enable.nix ./define-attrsOfSub-foo.nix
checkConfigOutput 'false' config.attrsOfSub.foo.enable ./declare-attrsOfSub-any-enable.nix ./define-attrsOfSub-foo.nix
set -- config.attrsOfSub.foo.enable ./declare-attrsOfSub-any-enable.nix ./define-attrsOfSub-foo-enable.nix
checkConfigOutput '^true$' "$@"
checkConfigOutput '^false$' "$@" ./define-force-attrsOfSub-foo-enable.nix
checkConfigOutput '^false$' "$@" ./define-attrsOfSub-force-foo-enable.nix
checkConfigOutput '^false$' "$@" ./define-attrsOfSub-foo-force-enable.nix
checkConfigOutput '^false$' "$@" ./define-attrsOfSub-foo-enable-force.nix
checkConfigOutput 'true' "$@"
checkConfigOutput 'false' "$@" ./define-force-attrsOfSub-foo-enable.nix
checkConfigOutput 'false' "$@" ./define-attrsOfSub-force-foo-enable.nix
checkConfigOutput 'false' "$@" ./define-attrsOfSub-foo-force-enable.nix
checkConfigOutput 'false' "$@" ./define-attrsOfSub-foo-enable-force.nix
# Check overriding effect of mkForce on submodule definitions.
checkConfigError 'attribute .*bar.* .* not found' config.attrsOfSub.bar.enable ./declare-attrsOfSub-any-enable.nix ./define-attrsOfSub-foo.nix
checkConfigOutput '^false$' config.attrsOfSub.bar.enable ./declare-attrsOfSub-any-enable.nix ./define-attrsOfSub-foo.nix ./define-attrsOfSub-bar.nix
checkConfigOutput 'false' config.attrsOfSub.bar.enable ./declare-attrsOfSub-any-enable.nix ./define-attrsOfSub-foo.nix ./define-attrsOfSub-bar.nix
set -- config.attrsOfSub.bar.enable ./declare-attrsOfSub-any-enable.nix ./define-attrsOfSub-foo.nix ./define-attrsOfSub-bar-enable.nix
checkConfigOutput '^true$' "$@"
checkConfigOutput 'true' "$@"
checkConfigError 'attribute .*bar.* .* not found' "$@" ./define-force-attrsOfSub-foo-enable.nix
checkConfigError 'attribute .*bar.* .* not found' "$@" ./define-attrsOfSub-force-foo-enable.nix
checkConfigOutput '^true$' "$@" ./define-attrsOfSub-foo-force-enable.nix
checkConfigOutput '^true$' "$@" ./define-attrsOfSub-foo-enable-force.nix
checkConfigOutput 'true' "$@" ./define-attrsOfSub-foo-force-enable.nix
checkConfigOutput 'true' "$@" ./define-attrsOfSub-foo-enable-force.nix
# Check mkIf with submodules.
checkConfigError 'attribute .*foo.* .* not found' config.attrsOfSub.foo.enable ./declare-enable.nix ./declare-attrsOfSub-any-enable.nix
@@ -120,16 +115,16 @@ set -- config.attrsOfSub.foo.enable ./declare-enable.nix ./declare-attrsOfSub-an
checkConfigError 'attribute .*foo.* .* not found' "$@" ./define-if-attrsOfSub-foo-enable.nix
checkConfigError 'attribute .*foo.* .* not found' "$@" ./define-attrsOfSub-if-foo-enable.nix
checkConfigError 'attribute .*foo.* .* not found' "$@" ./define-attrsOfSub-foo-if-enable.nix
checkConfigOutput '^false$' "$@" ./define-attrsOfSub-foo-enable-if.nix
checkConfigOutput '^true$' "$@" ./define-enable.nix ./define-if-attrsOfSub-foo-enable.nix
checkConfigOutput '^true$' "$@" ./define-enable.nix ./define-attrsOfSub-if-foo-enable.nix
checkConfigOutput '^true$' "$@" ./define-enable.nix ./define-attrsOfSub-foo-if-enable.nix
checkConfigOutput '^true$' "$@" ./define-enable.nix ./define-attrsOfSub-foo-enable-if.nix
checkConfigOutput 'false' "$@" ./define-attrsOfSub-foo-enable-if.nix
checkConfigOutput 'true' "$@" ./define-enable.nix ./define-if-attrsOfSub-foo-enable.nix
checkConfigOutput 'true' "$@" ./define-enable.nix ./define-attrsOfSub-if-foo-enable.nix
checkConfigOutput 'true' "$@" ./define-enable.nix ./define-attrsOfSub-foo-if-enable.nix
checkConfigOutput 'true' "$@" ./define-enable.nix ./define-attrsOfSub-foo-enable-if.nix
# Check disabledModules with config definitions and option declarations.
set -- config.enable ./define-enable.nix ./declare-enable.nix
checkConfigOutput '^true$' "$@"
checkConfigOutput '^false$' "$@" ./disable-define-enable.nix
checkConfigOutput "true" "$@"
checkConfigOutput "false" "$@" ./disable-define-enable.nix
checkConfigError "The option .*enable.* does not exist. Definition values:\n\s*- In .*: true" "$@" ./disable-declare-enable.nix
checkConfigError "attribute .*enable.* in selection path .*config.enable.* not found" "$@" ./disable-define-enable.nix ./disable-declare-enable.nix
checkConfigError "attribute .*enable.* in selection path .*config.enable.* not found" "$@" ./disable-enable-modules.nix
@@ -137,7 +132,7 @@ checkConfigError "attribute .*enable.* in selection path .*config.enable.* not f
# Check _module.args.
set -- config.enable ./declare-enable.nix ./define-enable-with-custom-arg.nix
checkConfigError 'while evaluating the module argument .*custom.* in .*define-enable-with-custom-arg.nix.*:' "$@"
checkConfigOutput '^true$' "$@" ./define-_module-args-custom.nix
checkConfigOutput "true" "$@" ./define-_module-args-custom.nix
# Check that using _module.args on imports cause infinite recursions, with
# the proper error context.
@@ -148,77 +143,77 @@ checkConfigError 'infinite recursion encountered' "$@"
# Check _module.check.
set -- config.enable ./declare-enable.nix ./define-enable.nix ./define-attrsOfSub-foo.nix
checkConfigError 'The option .* does not exist. Definition values:\n\s*- In .*' "$@"
checkConfigOutput '^true$' "$@" ./define-module-check.nix
checkConfigOutput "true" "$@" ./define-module-check.nix
# Check coerced value.
checkConfigOutput '^"42"$' config.value ./declare-coerced-value.nix
checkConfigOutput '^"24"$' config.value ./declare-coerced-value.nix ./define-value-string.nix
checkConfigOutput "\"42\"" config.value ./declare-coerced-value.nix
checkConfigOutput "\"24\"" config.value ./declare-coerced-value.nix ./define-value-string.nix
checkConfigError 'A definition for option .* is not.*string or signed integer convertible to it.*. Definition values:\n\s*- In .*: \[ \]' config.value ./declare-coerced-value.nix ./define-value-list.nix
# Check coerced value with unsound coercion
checkConfigOutput '^12$' config.value ./declare-coerced-value-unsound.nix
checkConfigOutput "12" config.value ./declare-coerced-value-unsound.nix
checkConfigError 'A definition for option .* is not of type .*. Definition values:\n\s*- In .*: "1000"' config.value ./declare-coerced-value-unsound.nix ./define-value-string-bigint.nix
checkConfigError 'json.exception.parse_error' config.value ./declare-coerced-value-unsound.nix ./define-value-string-arbitrary.nix
# Check mkAliasOptionModule.
checkConfigOutput '^true$' config.enable ./alias-with-priority.nix
checkConfigOutput '^true$' config.enableAlias ./alias-with-priority.nix
checkConfigOutput '^false$' config.enable ./alias-with-priority-can-override.nix
checkConfigOutput '^false$' config.enableAlias ./alias-with-priority-can-override.nix
checkConfigOutput "true" config.enable ./alias-with-priority.nix
checkConfigOutput "true" config.enableAlias ./alias-with-priority.nix
checkConfigOutput "false" config.enable ./alias-with-priority-can-override.nix
checkConfigOutput "false" config.enableAlias ./alias-with-priority-can-override.nix
# submoduleWith
## specialArgs should work
checkConfigOutput '^"foo"$' config.submodule.foo ./declare-submoduleWith-special.nix
checkConfigOutput "foo" config.submodule.foo ./declare-submoduleWith-special.nix
## shorthandOnlyDefines config behaves as expected
checkConfigOutput '^true$' config.submodule.config ./declare-submoduleWith-shorthand.nix ./define-submoduleWith-shorthand.nix
checkConfigOutput "true" config.submodule.config ./declare-submoduleWith-shorthand.nix ./define-submoduleWith-shorthand.nix
checkConfigError 'is not of type `boolean' config.submodule.config ./declare-submoduleWith-shorthand.nix ./define-submoduleWith-noshorthand.nix
checkConfigError "You're trying to declare a value of type \`bool'\n\s*rather than an attribute-set for the option" config.submodule.config ./declare-submoduleWith-noshorthand.nix ./define-submoduleWith-shorthand.nix
checkConfigOutput '^true$' config.submodule.config ./declare-submoduleWith-noshorthand.nix ./define-submoduleWith-noshorthand.nix
checkConfigOutput "true" config.submodule.config ./declare-submoduleWith-noshorthand.nix ./define-submoduleWith-noshorthand.nix
## submoduleWith should merge all modules in one swoop
checkConfigOutput '^true$' config.submodule.inner ./declare-submoduleWith-modules.nix
checkConfigOutput '^true$' config.submodule.outer ./declare-submoduleWith-modules.nix
checkConfigOutput "true" config.submodule.inner ./declare-submoduleWith-modules.nix
checkConfigOutput "true" config.submodule.outer ./declare-submoduleWith-modules.nix
# Should also be able to evaluate the type name (which evaluates freeformType,
# which evaluates all the modules defined by the type)
checkConfigOutput '^"submodule"$' options.submodule.type.description ./declare-submoduleWith-modules.nix
checkConfigOutput "submodule" options.submodule.type.description ./declare-submoduleWith-modules.nix
## submodules can be declared using (evalModules {...}).type
checkConfigOutput '^true$' config.submodule.inner ./declare-submodule-via-evalModules.nix
checkConfigOutput '^true$' config.submodule.outer ./declare-submodule-via-evalModules.nix
checkConfigOutput "true" config.submodule.inner ./declare-submodule-via-evalModules.nix
checkConfigOutput "true" config.submodule.outer ./declare-submodule-via-evalModules.nix
# Should also be able to evaluate the type name (which evaluates freeformType,
# which evaluates all the modules defined by the type)
checkConfigOutput '^"submodule"$' options.submodule.type.description ./declare-submodule-via-evalModules.nix
checkConfigOutput "submodule" options.submodule.type.description ./declare-submodule-via-evalModules.nix
## Paths should be allowed as values and work as expected
checkConfigOutput '^true$' config.submodule.enable ./declare-submoduleWith-path.nix
checkConfigOutput "true" config.submodule.enable ./declare-submoduleWith-path.nix
# Check that disabledModules works recursively and correctly
checkConfigOutput '^true$' config.enable ./disable-recursive/main.nix
checkConfigOutput '^true$' config.enable ./disable-recursive/{main.nix,disable-foo.nix}
checkConfigOutput '^true$' config.enable ./disable-recursive/{main.nix,disable-bar.nix}
checkConfigOutput "true" config.enable ./disable-recursive/main.nix
checkConfigOutput "true" config.enable ./disable-recursive/{main.nix,disable-foo.nix}
checkConfigOutput "true" config.enable ./disable-recursive/{main.nix,disable-bar.nix}
checkConfigError 'The option .* does not exist. Definition values:\n\s*- In .*: true' config.enable ./disable-recursive/{main.nix,disable-foo.nix,disable-bar.nix}
# Check that imports can depend on derivations
checkConfigOutput '^true$' config.enable ./import-from-store.nix
checkConfigOutput "true" config.enable ./import-from-store.nix
# Check that configs can be conditional on option existence
checkConfigOutput '^true$' config.enable ./define-option-dependently.nix ./declare-enable.nix ./declare-int-positive-value.nix
checkConfigOutput '^360$' config.value ./define-option-dependently.nix ./declare-enable.nix ./declare-int-positive-value.nix
checkConfigOutput '^7$' config.value ./define-option-dependently.nix ./declare-int-positive-value.nix
checkConfigOutput '^true$' config.set.enable ./define-option-dependently-nested.nix ./declare-enable-nested.nix ./declare-int-positive-value-nested.nix
checkConfigOutput '^360$' config.set.value ./define-option-dependently-nested.nix ./declare-enable-nested.nix ./declare-int-positive-value-nested.nix
checkConfigOutput '^7$' config.set.value ./define-option-dependently-nested.nix ./declare-int-positive-value-nested.nix
checkConfigOutput true config.enable ./define-option-dependently.nix ./declare-enable.nix ./declare-int-positive-value.nix
checkConfigOutput 360 config.value ./define-option-dependently.nix ./declare-enable.nix ./declare-int-positive-value.nix
checkConfigOutput 7 config.value ./define-option-dependently.nix ./declare-int-positive-value.nix
checkConfigOutput true config.set.enable ./define-option-dependently-nested.nix ./declare-enable-nested.nix ./declare-int-positive-value-nested.nix
checkConfigOutput 360 config.set.value ./define-option-dependently-nested.nix ./declare-enable-nested.nix ./declare-int-positive-value-nested.nix
checkConfigOutput 7 config.set.value ./define-option-dependently-nested.nix ./declare-int-positive-value-nested.nix
# Check attrsOf and lazyAttrsOf. Only lazyAttrsOf should be lazy, and only
# attrsOf should work with conditional definitions
# In addition, lazyAttrsOf should honor an options emptyValue
checkConfigError "is not lazy" config.isLazy ./declare-attrsOf.nix ./attrsOf-lazy-check.nix
checkConfigOutput '^true$' config.isLazy ./declare-lazyAttrsOf.nix ./attrsOf-lazy-check.nix
checkConfigOutput '^true$' config.conditionalWorks ./declare-attrsOf.nix ./attrsOf-conditional-check.nix
checkConfigOutput '^false$' config.conditionalWorks ./declare-lazyAttrsOf.nix ./attrsOf-conditional-check.nix
checkConfigOutput '^"empty"$' config.value.foo ./declare-lazyAttrsOf.nix ./attrsOf-conditional-check.nix
checkConfigOutput "true" config.isLazy ./declare-lazyAttrsOf.nix ./attrsOf-lazy-check.nix
checkConfigOutput "true" config.conditionalWorks ./declare-attrsOf.nix ./attrsOf-conditional-check.nix
checkConfigOutput "false" config.conditionalWorks ./declare-lazyAttrsOf.nix ./attrsOf-conditional-check.nix
checkConfigOutput "empty" config.value.foo ./declare-lazyAttrsOf.nix ./attrsOf-conditional-check.nix
# Even with multiple assignments, a type error should be thrown if any of them aren't valid
@@ -227,105 +222,64 @@ checkConfigError 'A definition for option .* is not of type .*' \
## Freeform modules
# Assigning without a declared option should work
checkConfigOutput '^"24"$' config.value ./freeform-attrsOf.nix ./define-value-string.nix
checkConfigOutput 24 config.value ./freeform-attrsOf.nix ./define-value-string.nix
# No freeform assigments shouldn't make it error
checkConfigOutput '^{ }$' config ./freeform-attrsOf.nix
checkConfigOutput '{ }' config ./freeform-attrsOf.nix
# but only if the type matches
checkConfigError 'A definition for option .* is not of type .*' config.value ./freeform-attrsOf.nix ./define-value-list.nix
# and properties should be applied
checkConfigOutput '^"yes"$' config.value ./freeform-attrsOf.nix ./define-value-string-properties.nix
checkConfigOutput yes config.value ./freeform-attrsOf.nix ./define-value-string-properties.nix
# Options should still be declarable, and be able to have a type that doesn't match the freeform type
checkConfigOutput '^false$' config.enable ./freeform-attrsOf.nix ./define-value-string.nix ./declare-enable.nix
checkConfigOutput '^"24"$' config.value ./freeform-attrsOf.nix ./define-value-string.nix ./declare-enable.nix
checkConfigOutput false config.enable ./freeform-attrsOf.nix ./define-value-string.nix ./declare-enable.nix
checkConfigOutput 24 config.value ./freeform-attrsOf.nix ./define-value-string.nix ./declare-enable.nix
# and this should work too with nested values
checkConfigOutput '^false$' config.nest.foo ./freeform-attrsOf.nix ./freeform-nested.nix
checkConfigOutput '^"bar"$' config.nest.bar ./freeform-attrsOf.nix ./freeform-nested.nix
checkConfigOutput false config.nest.foo ./freeform-attrsOf.nix ./freeform-nested.nix
checkConfigOutput bar config.nest.bar ./freeform-attrsOf.nix ./freeform-nested.nix
# Check whether a declared option can depend on an freeform-typed one
checkConfigOutput '^null$' config.foo ./freeform-attrsOf.nix ./freeform-str-dep-unstr.nix
checkConfigOutput '^"24"$' config.foo ./freeform-attrsOf.nix ./freeform-str-dep-unstr.nix ./define-value-string.nix
checkConfigOutput null config.foo ./freeform-attrsOf.nix ./freeform-str-dep-unstr.nix
checkConfigOutput 24 config.foo ./freeform-attrsOf.nix ./freeform-str-dep-unstr.nix ./define-value-string.nix
# Check whether an freeform-typed value can depend on a declared option, this can only work with lazyAttrsOf
checkConfigError 'infinite recursion encountered' config.foo ./freeform-attrsOf.nix ./freeform-unstr-dep-str.nix
checkConfigError 'The option .* is used but not defined' config.foo ./freeform-lazyAttrsOf.nix ./freeform-unstr-dep-str.nix
checkConfigOutput '^"24"$' config.foo ./freeform-lazyAttrsOf.nix ./freeform-unstr-dep-str.nix ./define-value-string.nix
# submodules in freeformTypes should have their locations annotated
checkConfigOutput '/freeform-submodules.nix"$' config.fooDeclarations.0 ./freeform-submodules.nix
# freeformTypes can get merged using `types.type`, including submodules
checkConfigOutput '^10$' config.free.xxx.foo ./freeform-submodules.nix
checkConfigOutput '^10$' config.free.yyy.bar ./freeform-submodules.nix
checkConfigOutput 24 config.foo ./freeform-lazyAttrsOf.nix ./freeform-unstr-dep-str.nix ./define-value-string.nix
## types.anything
# Check that attribute sets are merged recursively
checkConfigOutput '^null$' config.value.foo ./types-anything/nested-attrs.nix
checkConfigOutput '^null$' config.value.l1.foo ./types-anything/nested-attrs.nix
checkConfigOutput '^null$' config.value.l1.l2.foo ./types-anything/nested-attrs.nix
checkConfigOutput '^null$' config.value.l1.l2.l3.foo ./types-anything/nested-attrs.nix
checkConfigOutput null config.value.foo ./types-anything/nested-attrs.nix
checkConfigOutput null config.value.l1.foo ./types-anything/nested-attrs.nix
checkConfigOutput null config.value.l1.l2.foo ./types-anything/nested-attrs.nix
checkConfigOutput null config.value.l1.l2.l3.foo ./types-anything/nested-attrs.nix
# Attribute sets that are coercible to strings shouldn't be recursed into
checkConfigOutput '^"foo"$' config.value.outPath ./types-anything/attrs-coercible.nix
checkConfigOutput foo config.value.outPath ./types-anything/attrs-coercible.nix
# Multiple lists aren't concatenated together
checkConfigError 'The option .* has conflicting definitions' config.value ./types-anything/lists.nix
# Check that all equalizable atoms can be used as long as all definitions are equal
checkConfigOutput '^0$' config.value.int ./types-anything/equal-atoms.nix
checkConfigOutput '^false$' config.value.bool ./types-anything/equal-atoms.nix
checkConfigOutput '^""$' config.value.string ./types-anything/equal-atoms.nix
checkConfigOutput '^/$' config.value.path ./types-anything/equal-atoms.nix
checkConfigOutput '^null$' config.value.null ./types-anything/equal-atoms.nix
checkConfigOutput '^0.1$' config.value.float ./types-anything/equal-atoms.nix
checkConfigOutput 0 config.value.int ./types-anything/equal-atoms.nix
checkConfigOutput false config.value.bool ./types-anything/equal-atoms.nix
checkConfigOutput '""' config.value.string ./types-anything/equal-atoms.nix
checkConfigOutput / config.value.path ./types-anything/equal-atoms.nix
checkConfigOutput null config.value.null ./types-anything/equal-atoms.nix
checkConfigOutput 0.1 config.value.float ./types-anything/equal-atoms.nix
# Functions can't be merged together
checkConfigError "The option .value.multiple-lambdas.<function body>. has conflicting option types" config.applied.multiple-lambdas ./types-anything/functions.nix
checkConfigOutput '^<LAMBDA>$' config.value.single-lambda ./types-anything/functions.nix
checkConfigOutput '^null$' config.applied.merging-lambdas.x ./types-anything/functions.nix
checkConfigOutput '^null$' config.applied.merging-lambdas.y ./types-anything/functions.nix
checkConfigOutput '<LAMBDA>' config.value.single-lambda ./types-anything/functions.nix
checkConfigOutput 'null' config.applied.merging-lambdas.x ./types-anything/functions.nix
checkConfigOutput 'null' config.applied.merging-lambdas.y ./types-anything/functions.nix
# Check that all mk* modifiers are applied
checkConfigError 'attribute .* not found' config.value.mkiffalse ./types-anything/mk-mods.nix
checkConfigOutput '^{ }$' config.value.mkiftrue ./types-anything/mk-mods.nix
checkConfigOutput '^1$' config.value.mkdefault ./types-anything/mk-mods.nix
checkConfigOutput '^{ }$' config.value.mkmerge ./types-anything/mk-mods.nix
checkConfigOutput '^true$' config.value.mkbefore ./types-anything/mk-mods.nix
checkConfigOutput '^1$' config.value.nested.foo ./types-anything/mk-mods.nix
checkConfigOutput '^"baz"$' config.value.nested.bar.baz ./types-anything/mk-mods.nix
checkConfigOutput '{ }' config.value.mkiftrue ./types-anything/mk-mods.nix
checkConfigOutput 1 config.value.mkdefault ./types-anything/mk-mods.nix
checkConfigOutput '{ }' config.value.mkmerge ./types-anything/mk-mods.nix
checkConfigOutput true config.value.mkbefore ./types-anything/mk-mods.nix
checkConfigOutput 1 config.value.nested.foo ./types-anything/mk-mods.nix
checkConfigOutput baz config.value.nested.bar.baz ./types-anything/mk-mods.nix
## types.functionTo
checkConfigOutput '^"input is input"$' config.result ./functionTo/trivial.nix
checkConfigOutput '^"a b"$' config.result ./functionTo/merging-list.nix
checkConfigOutput "input is input" config.result ./functionTo/trivial.nix
checkConfigOutput "a b" config.result ./functionTo/merging-list.nix
checkConfigError 'A definition for option .fun.\[function body\]. is not of type .string.. Definition values:\n\s*- In .*wrong-type.nix' config.result ./functionTo/wrong-type.nix
checkConfigOutput '^"b a"$' config.result ./functionTo/list-order.nix
checkConfigOutput '^"a c"$' config.result ./functionTo/merging-attrs.nix
# moduleType
checkConfigOutput '^"a b"$' config.resultFoo ./declare-variants.nix ./define-variant.nix
checkConfigOutput '^"a y z"$' config.resultFooBar ./declare-variants.nix ./define-variant.nix
checkConfigOutput '^"a b c"$' config.resultFooFoo ./declare-variants.nix ./define-variant.nix
## emptyValue's
checkConfigOutput "[ ]" config.list.a ./emptyValues.nix
checkConfigOutput "{ }" config.attrs.a ./emptyValues.nix
checkConfigOutput "null" config.null.a ./emptyValues.nix
checkConfigOutput "{ }" config.submodule.a ./emptyValues.nix
# These types don't have empty values
checkConfigError 'The option .int.a. is used but not defined' config.int.a ./emptyValues.nix
checkConfigError 'The option .nonEmptyList.a. is used but not defined' config.nonEmptyList.a ./emptyValues.nix
## types.raw
checkConfigOutput "{ foo = <CODE>; }" config.unprocessedNesting ./raw.nix
checkConfigOutput "10" config.processedToplevel ./raw.nix
checkConfigError "The option .multiple. is defined multiple times" config.multiple ./raw.nix
checkConfigOutput "bar" config.priorities ./raw.nix
## Option collision
checkConfigError \
'The option .set. in module .*/declare-set.nix. would be a parent of the following options, but its type .attribute set of signed integers. does not support nested options.\n\s*- option[(]s[)] with prefix .set.enable. in module .*/declare-enable-nested.nix.' \
config.set \
./declare-set.nix ./declare-enable-nested.nix
# Test that types.optionType merges types correctly
checkConfigOutput '^10$' config.theOption.int ./optionTypeMerging.nix
checkConfigOutput '^"hello"$' config.theOption.str ./optionTypeMerging.nix
# Test that types.optionType correctly annotates option locations
checkConfigError 'The option .theOption.nested. in .other.nix. is already declared in .optionTypeFile.nix.' config.theOption.nested ./optionTypeFile.nix
# Test that types.optionType leaves types untouched as long as they don't need to be merged
checkConfigOutput 'ok' config.freeformItems.foo.bar ./adhoc-freeformType-survives-type-merge.nix
checkConfigOutput "b a" config.result ./functionTo/list-order.nix
checkConfigOutput "a c" config.result ./functionTo/merging-attrs.nix
cat <<EOF
====== module tests ======
@@ -333,7 +287,7 @@ $pass Pass
$fail Fail
EOF
if [ "$fail" -ne 0 ]; then
if test $fail -ne 0; then
exit 1
fi
exit 0

View File

@@ -1,14 +0,0 @@
{ lib, ... }: {
options.dummy = lib.mkOption { type = lib.types.anything; default = {}; };
freeformType =
let
a = lib.types.attrsOf (lib.types.submodule { options.bar = lib.mkOption { }; });
in
# modifying types like this breaks type merging.
# This test makes sure that type merging is not performed when only a single declaration exists.
# Don't modify types in practice!
a // {
merge = loc: defs: { freeformItems = a.merge loc defs; };
};
config.foo.bar = "ok";
}

View File

@@ -1,13 +1,6 @@
{ lib, ... }:
let
deathtrapArgs = lib.mapAttrs
(k: _: throw "The module system is too strict, accessing an unused option's ${k} mkOption-attribute.")
(lib.functionArgs lib.mkOption);
in
{
{ lib, ... }: {
options.value = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = {};
};
options.testing-laziness-so-don't-read-me = lib.mkOption deathtrapArgs;
}

View File

@@ -1,10 +0,0 @@
{ lib, ... }:
let
inherit (lib) mkOption types;
in
{
options.bare-submodule.deep = mkOption {
type = types.int;
default = 2;
};
}

View File

@@ -1,10 +0,0 @@
{ lib, ... }:
let
inherit (lib) mkOption types;
in
{
options.bare-submodule.deep = mkOption {
type = types.int;
default = 2;
};
}

View File

@@ -1,19 +0,0 @@
{ config, lib, ... }:
let
inherit (lib) mkOption types;
in
{
options.bare-submodule = mkOption {
type = types.submoduleWith {
shorthandOnlyDefinesConfig = config.shorthandOnlyDefinesConfig;
modules = [
{
options.nested = mkOption {
type = types.int;
default = 1;
};
}
];
};
};
}

View File

@@ -1,18 +0,0 @@
{ config, lib, ... }:
let
inherit (lib) mkOption types;
in
{
options.bare-submodule = mkOption {
type = types.submoduleWith {
modules = [ ];
shorthandOnlyDefinesConfig = config.shorthandOnlyDefinesConfig;
};
default = {};
};
# config-dependent options: won't recommend, but useful for making this test parameterized
options.shorthandOnlyDefinesConfig = mkOption {
default = false;
};
}

View File

@@ -1,12 +0,0 @@
{ lib, ... }:
{
options.set = lib.mkOption {
default = { };
example = { a = 1; };
type = lib.types.attrsOf lib.types.int;
description = ''
Some descriptive text
'';
};
}

View File

@@ -1,9 +0,0 @@
{ lib, moduleType, ... }:
let inherit (lib) mkOption types;
in
{
options.variants = mkOption {
type = types.lazyAttrsOf moduleType;
default = {};
};
}

View File

@@ -1,4 +0,0 @@
{
bare-submodule.nested = 42;
bare-submodule.deep = 420;
}

View File

@@ -1 +0,0 @@
{ shorthandOnlyDefinesConfig = true; }

View File

@@ -1,22 +0,0 @@
{ config, lib, ... }:
let inherit (lib) types mkOption attrNames;
in
{
options = {
attrs = mkOption { type = types.attrsOf lib.types.int; };
result = mkOption { };
resultFoo = mkOption { };
resultFooBar = mkOption { };
resultFooFoo = mkOption { };
};
config = {
attrs.a = 1;
variants.foo.attrs.b = 1;
variants.bar.attrs.y = 1;
variants.foo.variants.bar.attrs.z = 1;
variants.foo.variants.foo.attrs.c = 3;
resultFoo = lib.concatMapStringsSep " " toString (attrNames config.variants.foo.attrs);
resultFooBar = lib.concatMapStringsSep " " toString (attrNames config.variants.foo.variants.bar.attrs);
resultFooFoo = lib.concatMapStringsSep " " toString (attrNames config.variants.foo.variants.foo.attrs);
};
}

Some files were not shown because too many files have changed in this diff Show More