mirror of
https://github.com/NixOS/nix
synced 2025-07-07 22:33:57 +02:00
Merge branch 'read-only-local-store' into overlayfs-store
This commit is contained in:
commit
71f3bad749
79 changed files with 1152 additions and 367 deletions
|
@ -11,6 +11,10 @@ assignees: ''
|
||||||
|
|
||||||
<!-- describe your problem -->
|
<!-- describe your problem -->
|
||||||
|
|
||||||
|
## Proposal
|
||||||
|
|
||||||
|
<!-- propose a solution -->
|
||||||
|
|
||||||
## Checklist
|
## Checklist
|
||||||
|
|
||||||
<!-- make sure this issue is not redundant or obsolete -->
|
<!-- make sure this issue is not redundant or obsolete -->
|
||||||
|
@ -22,10 +26,6 @@ assignees: ''
|
||||||
[source]: https://github.com/NixOS/nix/tree/master/doc/manual/src
|
[source]: https://github.com/NixOS/nix/tree/master/doc/manual/src
|
||||||
[open documentation issues and pull requests]: https://github.com/NixOS/nix/labels/documentation
|
[open documentation issues and pull requests]: https://github.com/NixOS/nix/labels/documentation
|
||||||
|
|
||||||
## Proposal
|
|
||||||
|
|
||||||
<!-- propose a solution -->
|
|
||||||
|
|
||||||
## Priorities
|
## Priorities
|
||||||
|
|
||||||
Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc).
|
Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc).
|
||||||
|
|
2
.github/workflows/backport.yml
vendored
2
.github/workflows/backport.yml
vendored
|
@ -21,7 +21,7 @@ jobs:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Create backport PRs
|
- name: Create backport PRs
|
||||||
# should be kept in sync with `version`
|
# should be kept in sync with `version`
|
||||||
uses: zeebe-io/backport-action@v1.3.0
|
uses: zeebe-io/backport-action@v1.3.1
|
||||||
with:
|
with:
|
||||||
# Config README: https://github.com/zeebe-io/backport-action#backport-action
|
# Config README: https://github.com/zeebe-io/backport-action#backport-action
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
10
.github/workflows/ci.yml
vendored
10
.github/workflows/ci.yml
vendored
|
@ -11,6 +11,7 @@ jobs:
|
||||||
tests:
|
tests:
|
||||||
needs: [check_secrets]
|
needs: [check_secrets]
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest, macos-latest]
|
os: [ubuntu-latest, macos-latest]
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
@ -19,7 +20,7 @@ jobs:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- uses: cachix/install-nix-action@v21
|
- uses: cachix/install-nix-action@v22
|
||||||
with:
|
with:
|
||||||
# The sandbox would otherwise be disabled by default on Darwin
|
# The sandbox would otherwise be disabled by default on Darwin
|
||||||
extra_nix_config: "sandbox = true"
|
extra_nix_config: "sandbox = true"
|
||||||
|
@ -61,7 +62,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
||||||
- uses: cachix/install-nix-action@v21
|
- uses: cachix/install-nix-action@v22
|
||||||
with:
|
with:
|
||||||
install_url: https://releases.nixos.org/nix/nix-2.13.3/install
|
install_url: https://releases.nixos.org/nix/nix-2.13.3/install
|
||||||
- uses: cachix/cachix-action@v12
|
- uses: cachix/cachix-action@v12
|
||||||
|
@ -76,13 +77,14 @@ jobs:
|
||||||
needs: [installer, check_secrets]
|
needs: [installer, check_secrets]
|
||||||
if: github.event_name == 'push' && needs.check_secrets.outputs.cachix == 'true'
|
if: github.event_name == 'push' && needs.check_secrets.outputs.cachix == 'true'
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest, macos-latest]
|
os: [ubuntu-latest, macos-latest]
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
||||||
- uses: cachix/install-nix-action@v21
|
- uses: cachix/install-nix-action@v22
|
||||||
with:
|
with:
|
||||||
install_url: '${{needs.installer.outputs.installerURL}}'
|
install_url: '${{needs.installer.outputs.installerURL}}'
|
||||||
install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve"
|
install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve"
|
||||||
|
@ -109,7 +111,7 @@ jobs:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- uses: cachix/install-nix-action@v21
|
- uses: cachix/install-nix-action@v22
|
||||||
with:
|
with:
|
||||||
install_url: https://releases.nixos.org/nix/nix-2.13.3/install
|
install_url: https://releases.nixos.org/nix/nix-2.13.3/install
|
||||||
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
||||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -89,6 +89,7 @@ perl/Makefile.config
|
||||||
/tests/ca/config.nix
|
/tests/ca/config.nix
|
||||||
/tests/dyn-drv/config.nix
|
/tests/dyn-drv/config.nix
|
||||||
/tests/repl-result-out
|
/tests/repl-result-out
|
||||||
|
/tests/test-libstoreconsumer/test-libstoreconsumer
|
||||||
|
|
||||||
# /tests/lang/
|
# /tests/lang/
|
||||||
/tests/lang/*.out
|
/tests/lang/*.out
|
||||||
|
|
|
@ -5,7 +5,6 @@ We appreciate your support.
|
||||||
|
|
||||||
Reading and following these guidelines will help us make the contribution process easy and effective for everyone involved.
|
Reading and following these guidelines will help us make the contribution process easy and effective for everyone involved.
|
||||||
|
|
||||||
|
|
||||||
## Report a bug
|
## Report a bug
|
||||||
|
|
||||||
1. Check on the [GitHub issue tracker](https://github.com/NixOS/nix/issues) if your bug was already reported.
|
1. Check on the [GitHub issue tracker](https://github.com/NixOS/nix/issues) if your bug was already reported.
|
||||||
|
@ -30,6 +29,8 @@ Check out the [security policy](https://github.com/NixOS/nix/security/policy).
|
||||||
You can use [labels](https://github.com/NixOS/nix/labels) to filter for relevant topics.
|
You can use [labels](https://github.com/NixOS/nix/labels) to filter for relevant topics.
|
||||||
|
|
||||||
2. Search for related issues that cover what you're going to work on. It could help to mention there that you will work on the issue.
|
2. Search for related issues that cover what you're going to work on. It could help to mention there that you will work on the issue.
|
||||||
|
|
||||||
|
Issues labeled ["good first issue"](https://github.com/NixOS/nix/labels/good-first-issue) should be relatively easy to fix and are likely to get merged quickly.
|
||||||
Pull requests addressing issues labeled ["idea approved"](https://github.com/NixOS/nix/labels/idea%20approved) are especially welcomed by maintainers and will receive prioritised review.
|
Pull requests addressing issues labeled ["idea approved"](https://github.com/NixOS/nix/labels/idea%20approved) are especially welcomed by maintainers and will receive prioritised review.
|
||||||
|
|
||||||
3. Check the [Nix reference manual](https://nixos.org/manual/nix/unstable/contributing/hacking.html) for information on building Nix and running its tests.
|
3. Check the [Nix reference manual](https://nixos.org/manual/nix/unstable/contributing/hacking.html) for information on building Nix and running its tests.
|
||||||
|
|
1
Makefile
1
Makefile
|
@ -28,6 +28,7 @@ makefiles += \
|
||||||
src/libexpr/tests/local.mk \
|
src/libexpr/tests/local.mk \
|
||||||
tests/local.mk \
|
tests/local.mk \
|
||||||
tests/overlay-local-store/local.mk \
|
tests/overlay-local-store/local.mk \
|
||||||
|
tests/test-libstoreconsumer/local.mk \
|
||||||
tests/plugins/local.mk
|
tests/plugins/local.mk
|
||||||
else
|
else
|
||||||
makefiles += \
|
makefiles += \
|
||||||
|
|
|
@ -97,7 +97,10 @@
|
||||||
- [manifest.json](command-ref/files/manifest.json.md)
|
- [manifest.json](command-ref/files/manifest.json.md)
|
||||||
- [Channels](command-ref/files/channels.md)
|
- [Channels](command-ref/files/channels.md)
|
||||||
- [Default Nix expression](command-ref/files/default-nix-expression.md)
|
- [Default Nix expression](command-ref/files/default-nix-expression.md)
|
||||||
- [Architecture](architecture/architecture.md)
|
- [Architecture and Design](architecture/architecture.md)
|
||||||
|
- [File System Object](architecture/file-system-object.md)
|
||||||
|
- [Protocols](protocols/protocols.md)
|
||||||
|
- [Serving Tarball Flakes](protocols/tarball-fetcher.md)
|
||||||
- [Glossary](glossary.md)
|
- [Glossary](glossary.md)
|
||||||
- [Contributing](contributing/contributing.md)
|
- [Contributing](contributing/contributing.md)
|
||||||
- [Hacking](contributing/hacking.md)
|
- [Hacking](contributing/hacking.md)
|
||||||
|
|
|
@ -7,11 +7,11 @@ It should help users understand why Nix behaves as it does, and it should help d
|
||||||
|
|
||||||
Nix consists of [hierarchical layers].
|
Nix consists of [hierarchical layers].
|
||||||
|
|
||||||
[hierarchical layers]: https://en.m.wikipedia.org/wiki/Multitier_architecture#Layers
|
[hierarchical layers]: https://en.wikipedia.org/wiki/Multitier_architecture#Layers
|
||||||
|
|
||||||
The following [concept map] shows its main components (rectangles), the objects they operate on (rounded rectangles), and their interactions (connecting phrases):
|
The following [concept map] shows its main components (rectangles), the objects they operate on (rounded rectangles), and their interactions (connecting phrases):
|
||||||
|
|
||||||
[concept map]: https://en.m.wikipedia.org/wiki/Concept_map
|
[concept map]: https://en.wikipedia.org/wiki/Concept_map
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -76,7 +76,7 @@ The result of a build task can be input to another build task.
|
||||||
The following [data flow diagram] shows a build plan for illustration.
|
The following [data flow diagram] shows a build plan for illustration.
|
||||||
Build inputs used as instructions to a build task are marked accordingly:
|
Build inputs used as instructions to a build task are marked accordingly:
|
||||||
|
|
||||||
[data flow diagram]: https://en.m.wikipedia.org/wiki/Data-flow_diagram
|
[data flow diagram]: https://en.wikipedia.org/wiki/Data-flow_diagram
|
||||||
|
|
||||||
```
|
```
|
||||||
+--------------------------------------------------------------------+
|
+--------------------------------------------------------------------+
|
||||||
|
|
64
doc/manual/src/architecture/file-system-object.md
Normal file
64
doc/manual/src/architecture/file-system-object.md
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
# File System Object
|
||||||
|
|
||||||
|
Nix uses a simplified model of the file system, which consists of file system objects.
|
||||||
|
Every file system object is one of the following:
|
||||||
|
|
||||||
|
- File
|
||||||
|
|
||||||
|
- A possibly empty sequence of bytes for contents
|
||||||
|
- A single boolean representing the [executable](https://en.m.wikipedia.org/wiki/File-system_permissions#Permissions) permission
|
||||||
|
|
||||||
|
- Directory
|
||||||
|
|
||||||
|
Mapping of names to child file system objects
|
||||||
|
|
||||||
|
- [Symbolic link](https://en.m.wikipedia.org/wiki/Symbolic_link)
|
||||||
|
|
||||||
|
An arbitrary string.
|
||||||
|
Nix does not assign any semantics to symbolic links.
|
||||||
|
|
||||||
|
File system objects and their children form a tree.
|
||||||
|
A bare file or symlink can be a root file system object.
|
||||||
|
|
||||||
|
Nix does not encode any other file system notions such as [hard links](https://en.m.wikipedia.org/wiki/Hard_link), [permissions](https://en.m.wikipedia.org/wiki/File-system_permissions), timestamps, or other metadata.
|
||||||
|
|
||||||
|
## Examples of file system objects
|
||||||
|
|
||||||
|
A plain file:
|
||||||
|
|
||||||
|
```
|
||||||
|
50 B, executable: false
|
||||||
|
```
|
||||||
|
|
||||||
|
An executable file:
|
||||||
|
|
||||||
|
```
|
||||||
|
122 KB, executable: true
|
||||||
|
```
|
||||||
|
|
||||||
|
A symlink:
|
||||||
|
|
||||||
|
```
|
||||||
|
-> /usr/bin/sh
|
||||||
|
```
|
||||||
|
|
||||||
|
A directory with contents:
|
||||||
|
|
||||||
|
```
|
||||||
|
├── bin
|
||||||
|
│ └── hello: 35 KB, executable: true
|
||||||
|
└── share
|
||||||
|
├── info
|
||||||
|
│ └── hello.info: 36 KB, executable: false
|
||||||
|
└── man
|
||||||
|
└── man1
|
||||||
|
└── hello.1.gz: 790 B, executable: false
|
||||||
|
```
|
||||||
|
|
||||||
|
A directory that contains a symlink and other directories:
|
||||||
|
|
||||||
|
```
|
||||||
|
├── bin -> share/go/bin
|
||||||
|
├── nix-support/
|
||||||
|
└── share/
|
||||||
|
```
|
|
@ -4,7 +4,7 @@
|
||||||
|
|
||||||
# Synopsis
|
# Synopsis
|
||||||
|
|
||||||
`nix-channel` {`--add` url [*name*] | `--remove` *name* | `--list` | `--update` [*names…*] | `--rollback` [*generation*] }
|
`nix-channel` {`--add` url [*name*] | `--remove` *name* | `--list` | `--update` [*names…*] | `--list-generations` | `--rollback` [*generation*] }
|
||||||
|
|
||||||
# Description
|
# Description
|
||||||
|
|
||||||
|
@ -39,6 +39,15 @@ This command has the following operations:
|
||||||
for `nix-env` operations (by symlinking them from the directory
|
for `nix-env` operations (by symlinking them from the directory
|
||||||
`~/.nix-defexpr`).
|
`~/.nix-defexpr`).
|
||||||
|
|
||||||
|
- `--list-generations`\
|
||||||
|
Prints a list of all the current existing generations for the
|
||||||
|
channel profile.
|
||||||
|
|
||||||
|
Works the same way as
|
||||||
|
```
|
||||||
|
nix-env --profile /nix/var/nix/profiles/per-user/$USER/channels --list-generations
|
||||||
|
```
|
||||||
|
|
||||||
- `--rollback` \[*generation*\]\
|
- `--rollback` \[*generation*\]\
|
||||||
Reverts the previous call to `nix-channel
|
Reverts the previous call to `nix-channel
|
||||||
--update`. Optionally, you can specify a specific channel generation
|
--update`. Optionally, you can specify a specific channel generation
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Name
|
# Name
|
||||||
|
|
||||||
`nix-collect-garbage` - delete unreachable store paths
|
`nix-collect-garbage` - delete unreachable [store objects]
|
||||||
|
|
||||||
# Synopsis
|
# Synopsis
|
||||||
|
|
||||||
|
@ -8,17 +8,57 @@
|
||||||
|
|
||||||
# Description
|
# Description
|
||||||
|
|
||||||
The command `nix-collect-garbage` is mostly an alias of [`nix-store
|
The command `nix-collect-garbage` is mostly an alias of [`nix-store --gc`](@docroot@/command-ref/nix-store/gc.md).
|
||||||
--gc`](@docroot@/command-ref/nix-store/gc.md), that is, it deletes all
|
That is, it deletes all unreachable [store objects] in the Nix store to clean up your system.
|
||||||
unreachable paths in the Nix store to clean up your system. However,
|
|
||||||
it provides two additional options: `-d` (`--delete-old`), which
|
However, it provides two additional options,
|
||||||
deletes all old generations of all profiles in `/nix/var/nix/profiles`
|
[`--delete-old`](#opt-delete-old) and [`--delete-older-than`](#opt-delete-older-than),
|
||||||
by invoking `nix-env --delete-generations old` on all profiles (of
|
which also delete old [profiles], allowing potentially more [store objects] to be deleted because profiles are also garbage collection roots.
|
||||||
course, this makes rollbacks to previous configurations impossible);
|
These options are the equivalent of running
|
||||||
and `--delete-older-than` *period*, where period is a value such as
|
[`nix-env --delete-generations`](@docroot@/command-ref/nix-env/delete-generations.md)
|
||||||
`30d`, which deletes all generations older than the specified number
|
with various augments on multiple profiles,
|
||||||
of days in all profiles in `/nix/var/nix/profiles` (except for the
|
prior to running `nix-collect-garbage` (or just `nix-store --gc`) without any flags.
|
||||||
generations that were active at that point in time).
|
|
||||||
|
> **Note**
|
||||||
|
>
|
||||||
|
> Deleting previous configurations makes rollbacks to them impossible.
|
||||||
|
|
||||||
|
These flags should be used with care, because they potentially delete generations of profiles used by other users on the system.
|
||||||
|
|
||||||
|
## Locations searched for profiles
|
||||||
|
|
||||||
|
`nix-collect-garbage` cannot know about all profiles; that information doesn't exist.
|
||||||
|
Instead, it looks in a few locations, and acts on all profiles it finds there:
|
||||||
|
|
||||||
|
1. The default profile locations as specified in the [profiles] section of the manual.
|
||||||
|
|
||||||
|
2. > **NOTE**
|
||||||
|
>
|
||||||
|
> Not stable; subject to change
|
||||||
|
>
|
||||||
|
> Do not rely on this functionality; it just exists for migration purposes and is may change in the future.
|
||||||
|
> These deprecated paths remain a private implementation detail of Nix.
|
||||||
|
|
||||||
|
`$NIX_STATE_DIR/profiles` and `$NIX_STATE_DIR/profiles/per-user`.
|
||||||
|
|
||||||
|
With the exception of `$NIX_STATE_DIR/profiles/per-user/root` and `$NIX_STATE_DIR/profiles/default`, these directories are no longer used by other commands.
|
||||||
|
`nix-collect-garbage` looks there anyways in order to clean up profiles from older versions of Nix.
|
||||||
|
|
||||||
|
# Options
|
||||||
|
|
||||||
|
These options are for deleting old [profiles] prior to deleting unreachable [store objects].
|
||||||
|
|
||||||
|
- <span id="opt-delete-old">[`--delete-old`](#opt-delete-old)</span> / `-d`\
|
||||||
|
Delete all old generations of profiles.
|
||||||
|
|
||||||
|
This is the equivalent of invoking `nix-env --delete-generations old` on each found profile.
|
||||||
|
|
||||||
|
- <span id="opt-delete-older-than">[`--delete-older-than`](#opt-delete-older-than)</span> *period*\
|
||||||
|
Delete all generations of profiles older than the specified amount (except for the generations that were active at that point in time).
|
||||||
|
*period* is a value such as `30d`, which would mean 30 days.
|
||||||
|
|
||||||
|
This is the equivalent of invoking [`nix-env --delete-generations <period>`](@docroot@/command-ref/nix-env/delete-generations.md#generations-days) on each found profile.
|
||||||
|
See the documentation of that command for additional information about the *period* argument.
|
||||||
|
|
||||||
{{#include ./opt-common.md}}
|
{{#include ./opt-common.md}}
|
||||||
|
|
||||||
|
@ -32,3 +72,6 @@ generations of each profile, do
|
||||||
```console
|
```console
|
||||||
$ nix-collect-garbage -d
|
$ nix-collect-garbage -d
|
||||||
```
|
```
|
||||||
|
|
||||||
|
[profiles]: @docroot@/command-ref/files/profiles.md
|
||||||
|
[store objects]: @docroot@/glossary.md#gloss-store-object
|
||||||
|
|
|
@ -9,14 +9,39 @@
|
||||||
# Description
|
# Description
|
||||||
|
|
||||||
This operation deletes the specified generations of the current profile.
|
This operation deletes the specified generations of the current profile.
|
||||||
The generations can be a list of generation numbers, the special value
|
|
||||||
`old` to delete all non-current generations, a value such as `30d` to
|
*generations* can be a one of the following:
|
||||||
delete all generations older than the specified number of days (except
|
|
||||||
for the generation that was active at that point in time), or a value
|
- <span id="generations-list">`<number>...`</span>:\
|
||||||
such as `+5` to keep the last `5` generations ignoring any newer than
|
A list of generation numbers, each one a separate command-line argument.
|
||||||
current, e.g., if `30` is the current generation `+5` will delete
|
|
||||||
generation `25` and all older generations. Periodically deleting old
|
Delete exactly the profile generations given by their generation number.
|
||||||
generations is important to make garbage collection effective.
|
Deleting the current generation is not allowed.
|
||||||
|
|
||||||
|
- The special value <span id="generations-old">`old`</span>
|
||||||
|
|
||||||
|
Delete all generations older than the current one.
|
||||||
|
|
||||||
|
- <span id="generations-days">`<days>d`</span>:\
|
||||||
|
The last *days* days
|
||||||
|
|
||||||
|
*Example*: `30d`
|
||||||
|
|
||||||
|
Delete all generations older than *days* days.
|
||||||
|
The generation that was active at that point in time is excluded, and will not be deleted.
|
||||||
|
|
||||||
|
- <span id="generations-count">`+<count>`</span>:\
|
||||||
|
The last *count* generations up to the present
|
||||||
|
|
||||||
|
*Example*: `+5`
|
||||||
|
|
||||||
|
Keep the last *count* generations, along with any newer than current.
|
||||||
|
|
||||||
|
Periodically deleting old generations is important to make garbage collection
|
||||||
|
effective.
|
||||||
|
The is because profiles are also garbage collection roots — any [store object] reachable from a profile is "alive" and ineligible for deletion.
|
||||||
|
|
||||||
|
[store object]: @docroot@/glossary.md#gloss-store-object
|
||||||
|
|
||||||
{{#include ./opt-common.md}}
|
{{#include ./opt-common.md}}
|
||||||
|
|
||||||
|
@ -28,19 +53,35 @@ generations is important to make garbage collection effective.
|
||||||
|
|
||||||
# Examples
|
# Examples
|
||||||
|
|
||||||
|
## Delete explicit generation numbers
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ nix-env --delete-generations 3 4 8
|
$ nix-env --delete-generations 3 4 8
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Delete the generations numbered 3, 4, and 8, so long as the current active generation is not any of those.
|
||||||
|
|
||||||
|
## Keep most-recent by count count
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ nix-env --delete-generations +5
|
$ nix-env --delete-generations +5
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Suppose `30` is the current generation, and we currently have generations numbered `20` through `32`.
|
||||||
|
|
||||||
|
Then this command will delete generations `20` through `25` (`<= 30 - 5`),
|
||||||
|
and keep generations `26` through `31` (`> 30 - 5`).
|
||||||
|
|
||||||
|
## Keep most-recent in days
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ nix-env --delete-generations 30d
|
$ nix-env --delete-generations 30d
|
||||||
```
|
```
|
||||||
|
|
||||||
|
This command will delete all generations older than 30 days, except for the generation that was active 30 days ago (if it currently exists).
|
||||||
|
|
||||||
|
## Delete all older
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ nix-env --profile other_profile --delete-generations old
|
$ nix-env --profile other_profile --delete-generations old
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -378,7 +378,7 @@ rm $(git ls-files doc/manual/ -o | grep -F '.md') && rmdir doc/manual/src/comman
|
||||||
[`mdbook-linkcheck`] does not implement checking [URI fragments] yet.
|
[`mdbook-linkcheck`] does not implement checking [URI fragments] yet.
|
||||||
|
|
||||||
[`mdbook-linkcheck`]: https://github.com/Michael-F-Bryan/mdbook-linkcheck
|
[`mdbook-linkcheck`]: https://github.com/Michael-F-Bryan/mdbook-linkcheck
|
||||||
[URI fragments]: https://en.m.wikipedia.org/wiki/URI_fragment
|
[URI fragments]: https://en.wikipedia.org/wiki/URI_fragment
|
||||||
|
|
||||||
#### `@docroot@` variable
|
#### `@docroot@` variable
|
||||||
|
|
||||||
|
|
|
@ -85,12 +85,17 @@
|
||||||
|
|
||||||
[store path]: #gloss-store-path
|
[store path]: #gloss-store-path
|
||||||
|
|
||||||
|
- [file system object]{#gloss-store-object}\
|
||||||
|
The Nix data model for representing simplified file system data.
|
||||||
|
|
||||||
|
See [File System Object](@docroot@/architecture/file-system-object.md) for details.
|
||||||
|
|
||||||
|
[file system object]: #gloss-file-system-object
|
||||||
|
|
||||||
- [store object]{#gloss-store-object}\
|
- [store object]{#gloss-store-object}\
|
||||||
A file that is an immediate child of the Nix store directory. These
|
|
||||||
can be regular files, but also entire directory trees. Store objects
|
A store object consists of a [file system object], [reference]s to other store objects, and other metadata.
|
||||||
can be sources (objects copied from outside of the store),
|
It can be referred to by a [store path].
|
||||||
derivation outputs (objects produced by running a build task), or
|
|
||||||
derivations (files describing a build task).
|
|
||||||
|
|
||||||
[store object]: #gloss-store-object
|
[store object]: #gloss-store-object
|
||||||
|
|
||||||
|
@ -112,9 +117,10 @@
|
||||||
from some server.
|
from some server.
|
||||||
|
|
||||||
- [substituter]{#gloss-substituter}\
|
- [substituter]{#gloss-substituter}\
|
||||||
A *substituter* is an additional store from which Nix will
|
An additional [store]{#gloss-store} from which Nix can obtain store objects instead of building them.
|
||||||
copy store objects it doesn't have. For details, see the
|
Often the substituter is a [binary cache](#gloss-binary-cache), but any store can serve as substituter.
|
||||||
[`substituters` option](./command-ref/conf-file.md#conf-substituters).
|
|
||||||
|
See the [`substituters` configuration option](./command-ref/conf-file.md#conf-substituters) for details.
|
||||||
|
|
||||||
[substituter]: #gloss-substituter
|
[substituter]: #gloss-substituter
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
- Bash Shell. The `./configure` script relies on bashisms, so Bash is
|
- Bash Shell. The `./configure` script relies on bashisms, so Bash is
|
||||||
required.
|
required.
|
||||||
|
|
||||||
- A version of GCC or Clang that supports C++17.
|
- A version of GCC or Clang that supports C++20.
|
||||||
|
|
||||||
- `pkg-config` to locate dependencies. If your distribution does not
|
- `pkg-config` to locate dependencies. If your distribution does not
|
||||||
provide it, you can get it from
|
provide it, you can get it from
|
||||||
|
|
|
@ -1,12 +1,11 @@
|
||||||
# Nix Language
|
# Nix Language
|
||||||
|
|
||||||
The Nix language is
|
The Nix language is designed for conveniently creating and composing *derivations* – precise descriptions of how contents of existing files are used to derive new files.
|
||||||
|
It is:
|
||||||
|
|
||||||
- *domain-specific*
|
- *domain-specific*
|
||||||
|
|
||||||
It only exists for the Nix package manager:
|
It comes with [built-in functions](@docroot@/language/builtins.md) to integrate with the Nix store, which manages files and performs the derivations declared in the Nix language.
|
||||||
to describe packages and configurations as well as their variants and compositions.
|
|
||||||
It is not intended for general purpose use.
|
|
||||||
|
|
||||||
- *declarative*
|
- *declarative*
|
||||||
|
|
||||||
|
@ -25,7 +24,7 @@ The Nix language is
|
||||||
|
|
||||||
- *lazy*
|
- *lazy*
|
||||||
|
|
||||||
Expressions are only evaluated when their value is needed.
|
Values are only computed when they are needed.
|
||||||
|
|
||||||
- *dynamically typed*
|
- *dynamically typed*
|
||||||
|
|
||||||
|
|
4
doc/manual/src/protocols/protocols.md
Normal file
4
doc/manual/src/protocols/protocols.md
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
# Protocols
|
||||||
|
|
||||||
|
This chapter documents various developer-facing interfaces provided by
|
||||||
|
Nix.
|
42
doc/manual/src/protocols/tarball-fetcher.md
Normal file
42
doc/manual/src/protocols/tarball-fetcher.md
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
# Lockable HTTP Tarball Protocol
|
||||||
|
|
||||||
|
Tarball flakes can be served as regular tarballs via HTTP or the file
|
||||||
|
system (for `file://` URLs). Unless the server implements the Lockable
|
||||||
|
HTTP Tarball protocol, it is the responsibility of the user to make sure that
|
||||||
|
the URL always produces the same tarball contents.
|
||||||
|
|
||||||
|
An HTTP server can return an "immutable" HTTP URL appropriate for lock
|
||||||
|
files. This allows users to specify a tarball flake input in
|
||||||
|
`flake.nix` that requests the latest version of a flake
|
||||||
|
(e.g. `https://example.org/hello/latest.tar.gz`), while `flake.lock`
|
||||||
|
will record a URL whose contents will not change
|
||||||
|
(e.g. `https://example.org/hello/<revision>.tar.gz`). To do so, the
|
||||||
|
server must return an [HTTP `Link` header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Link) with the `rel` attribute set to
|
||||||
|
`immutable`, as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
Link: <flakeref>; rel="immutable"
|
||||||
|
```
|
||||||
|
|
||||||
|
(Note the required `<` and `>` characters around *flakeref*.)
|
||||||
|
|
||||||
|
*flakeref* must be a tarball flakeref. It can contain flake attributes
|
||||||
|
such as `narHash`, `rev` and `revCount`. If `narHash` is included, its
|
||||||
|
value must be the NAR hash of the unpacked tarball (as computed via
|
||||||
|
`nix hash path`). Nix checks the contents of the returned tarball
|
||||||
|
against the `narHash` attribute. The `rev` and `revCount` attributes
|
||||||
|
are useful when the tarball flake is a mirror of a fetcher type that
|
||||||
|
has those attributes, such as Git or GitHub. They are not checked by
|
||||||
|
Nix.
|
||||||
|
|
||||||
|
```
|
||||||
|
Link: <https://example.org/hello/442793d9ec0584f6a6e82fa253850c8085bb150a.tar.gz
|
||||||
|
?rev=442793d9ec0584f6a6e82fa253850c8085bb150a
|
||||||
|
&revCount=835
|
||||||
|
&narHash=sha256-GUm8Uh/U74zFCwkvt9Mri4DSM%2BmHj3tYhXUkYpiv31M%3D>; rel="immutable"
|
||||||
|
```
|
||||||
|
|
||||||
|
(The linebreaks in this example are for clarity and must not be included in the actual response.)
|
||||||
|
|
||||||
|
For tarball flakes, the value of the `lastModified` flake attribute is
|
||||||
|
defined as the timestamp of the newest file inside the tarball.
|
|
@ -1,2 +1,3 @@
|
||||||
# Release X.Y (202?-??-??)
|
# Release X.Y (202?-??-??)
|
||||||
|
|
||||||
|
- [`nix-channel`](../command-ref/nix-channel.md) now supports a `--list-generations` subcommand
|
||||||
|
|
|
@ -590,6 +590,8 @@
|
||||||
|
|
||||||
tests.sourcehutFlakes = runNixOSTestFor "x86_64-linux" ./tests/nixos/sourcehut-flakes.nix;
|
tests.sourcehutFlakes = runNixOSTestFor "x86_64-linux" ./tests/nixos/sourcehut-flakes.nix;
|
||||||
|
|
||||||
|
tests.tarballFlakes = runNixOSTestFor "x86_64-linux" ./tests/nixos/tarball-flakes.nix;
|
||||||
|
|
||||||
tests.containers = runNixOSTestFor "x86_64-linux" ./tests/nixos/containers/containers.nix;
|
tests.containers = runNixOSTestFor "x86_64-linux" ./tests/nixos/containers/containers.nix;
|
||||||
|
|
||||||
tests.setuid = lib.genAttrs
|
tests.setuid = lib.genAttrs
|
||||||
|
|
|
@ -117,6 +117,7 @@ Pull requests in this column are reviewed together during work meetings.
|
||||||
This is both for spreading implementation knowledge and for establishing common values in code reviews.
|
This is both for spreading implementation knowledge and for establishing common values in code reviews.
|
||||||
|
|
||||||
When the overall direction is agreed upon, even when further changes are required, the pull request is assigned to one team member.
|
When the overall direction is agreed upon, even when further changes are required, the pull request is assigned to one team member.
|
||||||
|
If significant changes are requested or reviewers cannot come to a conclusion in reasonable time, the pull request is [marked as draft](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/changing-the-stage-of-a-pull-request#converting-a-pull-request-to-a-draft).
|
||||||
|
|
||||||
### Assigned
|
### Assigned
|
||||||
|
|
||||||
|
|
|
@ -100,7 +100,7 @@ poly_extra_try_me_commands() {
|
||||||
poly_configure_nix_daemon_service() {
|
poly_configure_nix_daemon_service() {
|
||||||
task "Setting up the nix-daemon LaunchDaemon"
|
task "Setting up the nix-daemon LaunchDaemon"
|
||||||
_sudo "to set up the nix-daemon as a LaunchDaemon" \
|
_sudo "to set up the nix-daemon as a LaunchDaemon" \
|
||||||
/bin/cp -f "/nix/var/nix/profiles/default$NIX_DAEMON_DEST" "$NIX_DAEMON_DEST"
|
/usr/bin/install -m -rw-r--r-- "/nix/var/nix/profiles/default$NIX_DAEMON_DEST" "$NIX_DAEMON_DEST"
|
||||||
|
|
||||||
_sudo "to load the LaunchDaemon plist for nix-daemon" \
|
_sudo "to load the LaunchDaemon plist for nix-daemon" \
|
||||||
launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist
|
launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist
|
||||||
|
|
|
@ -700,6 +700,10 @@ EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
welcome_to_nix() {
|
welcome_to_nix() {
|
||||||
|
local -r NIX_UID_RANGES="${NIX_FIRST_BUILD_UID}..$((NIX_FIRST_BUILD_UID + NIX_USER_COUNT - 1))"
|
||||||
|
local -r RANGE_TEXT=$(echo -ne "${BLUE}(uids [${NIX_UID_RANGES}])${ESC}")
|
||||||
|
local -r GROUP_TEXT=$(echo -ne "${BLUE}(gid ${NIX_BUILD_GROUP_ID})${ESC}")
|
||||||
|
|
||||||
ok "Welcome to the Multi-User Nix Installation"
|
ok "Welcome to the Multi-User Nix Installation"
|
||||||
|
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
|
@ -713,8 +717,8 @@ manager. This will happen in a few stages:
|
||||||
2. Show you what I am going to install and where. Then I will ask
|
2. Show you what I am going to install and where. Then I will ask
|
||||||
if you are ready to continue.
|
if you are ready to continue.
|
||||||
|
|
||||||
3. Create the system users and groups that the Nix daemon uses to run
|
3. Create the system users ${RANGE_TEXT} and groups ${GROUP_TEXT}
|
||||||
builds.
|
that the Nix daemon uses to run builds.
|
||||||
|
|
||||||
4. Perform the basic installation of the Nix files daemon.
|
4. Perform the basic installation of the Nix files daemon.
|
||||||
|
|
||||||
|
|
|
@ -239,9 +239,7 @@ void MixProfile::updateProfile(const StorePath & storePath)
|
||||||
if (!store) throw Error("'--profile' is not supported for this Nix store");
|
if (!store) throw Error("'--profile' is not supported for this Nix store");
|
||||||
auto profile2 = absPath(*profile);
|
auto profile2 = absPath(*profile);
|
||||||
switchLink(profile2,
|
switchLink(profile2,
|
||||||
createGeneration(
|
createGeneration(*store, profile2, storePath));
|
||||||
ref<LocalFSStore>(store),
|
|
||||||
profile2, storePath));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MixProfile::updateProfile(const BuiltPaths & buildables)
|
void MixProfile::updateProfile(const BuiltPaths & buildables)
|
||||||
|
|
|
@ -165,7 +165,7 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s)
|
||||||
{
|
{
|
||||||
if (EvalSettings::isPseudoUrl(s)) {
|
if (EvalSettings::isPseudoUrl(s)) {
|
||||||
auto storePath = fetchers::downloadTarball(
|
auto storePath = fetchers::downloadTarball(
|
||||||
state.store, EvalSettings::resolvePseudoUrl(s), "source", false).first.storePath;
|
state.store, EvalSettings::resolvePseudoUrl(s), "source", false).tree.storePath;
|
||||||
return state.rootPath(CanonPath(state.store->toRealPath(storePath)));
|
return state.rootPath(CanonPath(state.store->toRealPath(storePath)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -701,7 +701,7 @@ RawInstallablesCommand::RawInstallablesCommand()
|
||||||
{
|
{
|
||||||
addFlag({
|
addFlag({
|
||||||
.longName = "stdin",
|
.longName = "stdin",
|
||||||
.description = "Read installables from the standard input.",
|
.description = "Read installables from the standard input. No default installable applied.",
|
||||||
.handler = {&readFromStdIn, true}
|
.handler = {&readFromStdIn, true}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -730,9 +730,9 @@ void RawInstallablesCommand::run(ref<Store> store)
|
||||||
while (std::cin >> word) {
|
while (std::cin >> word) {
|
||||||
rawInstallables.emplace_back(std::move(word));
|
rawInstallables.emplace_back(std::move(word));
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
|
|
||||||
applyDefaultInstallables(rawInstallables);
|
applyDefaultInstallables(rawInstallables);
|
||||||
|
}
|
||||||
run(store, std::move(rawInstallables));
|
run(store, std::move(rawInstallables));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -741,7 +741,8 @@ struct EvalSettings : Config
|
||||||
If set to `true`, the Nix evaluator will not allow access to any
|
If set to `true`, the Nix evaluator will not allow access to any
|
||||||
files outside of the Nix search path (as set via the `NIX_PATH`
|
files outside of the Nix search path (as set via the `NIX_PATH`
|
||||||
environment variable or the `-I` option), or to URIs outside of
|
environment variable or the `-I` option), or to URIs outside of
|
||||||
`allowed-uri`. The default is `false`.
|
[`allowed-uris`](../command-ref/conf-file.md#conf-allowed-uris).
|
||||||
|
The default is `false`.
|
||||||
)"};
|
)"};
|
||||||
|
|
||||||
Setting<bool> pureEval{this, false, "pure-eval",
|
Setting<bool> pureEval{this, false, "pure-eval",
|
||||||
|
|
|
@ -793,7 +793,7 @@ std::pair<bool, std::string> EvalState::resolveSearchPathElem(const SearchPathEl
|
||||||
if (EvalSettings::isPseudoUrl(elem.second)) {
|
if (EvalSettings::isPseudoUrl(elem.second)) {
|
||||||
try {
|
try {
|
||||||
auto storePath = fetchers::downloadTarball(
|
auto storePath = fetchers::downloadTarball(
|
||||||
store, EvalSettings::resolvePseudoUrl(elem.second), "source", false).first.storePath;
|
store, EvalSettings::resolvePseudoUrl(elem.second), "source", false).tree.storePath;
|
||||||
res = { true, store->toRealPath(storePath) };
|
res = { true, store->toRealPath(storePath) };
|
||||||
} catch (FileTransferError & e) {
|
} catch (FileTransferError & e) {
|
||||||
logWarning({
|
logWarning({
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
#include "globals.hh"
|
#include "globals.hh"
|
||||||
#include "json-to-value.hh"
|
#include "json-to-value.hh"
|
||||||
#include "names.hh"
|
#include "names.hh"
|
||||||
#include "references.hh"
|
#include "path-references.hh"
|
||||||
#include "store-api.hh"
|
#include "store-api.hh"
|
||||||
#include "util.hh"
|
#include "util.hh"
|
||||||
#include "value-to-json.hh"
|
#include "value-to-json.hh"
|
||||||
|
@ -4058,18 +4058,6 @@ static RegisterPrimOp primop_splitVersion({
|
||||||
RegisterPrimOp::PrimOps * RegisterPrimOp::primOps;
|
RegisterPrimOp::PrimOps * RegisterPrimOp::primOps;
|
||||||
|
|
||||||
|
|
||||||
RegisterPrimOp::RegisterPrimOp(std::string name, size_t arity, PrimOpFun fun)
|
|
||||||
{
|
|
||||||
if (!primOps) primOps = new PrimOps;
|
|
||||||
primOps->push_back({
|
|
||||||
.name = name,
|
|
||||||
.args = {},
|
|
||||||
.arity = arity,
|
|
||||||
.fun = fun,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
RegisterPrimOp::RegisterPrimOp(Info && info)
|
RegisterPrimOp::RegisterPrimOp(Info && info)
|
||||||
{
|
{
|
||||||
if (!primOps) primOps = new PrimOps;
|
if (!primOps) primOps = new PrimOps;
|
||||||
|
|
|
@ -28,11 +28,6 @@ struct RegisterPrimOp
|
||||||
* will get called during EvalState initialization, so there
|
* will get called during EvalState initialization, so there
|
||||||
* may be primops not yet added and builtins is not yet sorted.
|
* may be primops not yet added and builtins is not yet sorted.
|
||||||
*/
|
*/
|
||||||
RegisterPrimOp(
|
|
||||||
std::string name,
|
|
||||||
size_t arity,
|
|
||||||
PrimOpFun fun);
|
|
||||||
|
|
||||||
RegisterPrimOp(Info && info);
|
RegisterPrimOp(Info && info);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,11 @@ static void prim_unsafeDiscardStringContext(EvalState & state, const PosIdx pos,
|
||||||
v.mkString(*s);
|
v.mkString(*s);
|
||||||
}
|
}
|
||||||
|
|
||||||
static RegisterPrimOp primop_unsafeDiscardStringContext("__unsafeDiscardStringContext", 1, prim_unsafeDiscardStringContext);
|
static RegisterPrimOp primop_unsafeDiscardStringContext({
|
||||||
|
.name = "__unsafeDiscardStringContext",
|
||||||
|
.arity = 1,
|
||||||
|
.fun = prim_unsafeDiscardStringContext
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
static void prim_hasContext(EvalState & state, const PosIdx pos, Value * * args, Value & v)
|
static void prim_hasContext(EvalState & state, const PosIdx pos, Value * * args, Value & v)
|
||||||
|
@ -22,7 +26,16 @@ static void prim_hasContext(EvalState & state, const PosIdx pos, Value * * args,
|
||||||
v.mkBool(!context.empty());
|
v.mkBool(!context.empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
static RegisterPrimOp primop_hasContext("__hasContext", 1, prim_hasContext);
|
static RegisterPrimOp primop_hasContext({
|
||||||
|
.name = "__hasContext",
|
||||||
|
.args = {"s"},
|
||||||
|
.doc = R"(
|
||||||
|
Return `true` if string *s* has a non-empty context. The
|
||||||
|
context can be obtained with
|
||||||
|
[`getContext`](#builtins-getContext).
|
||||||
|
)",
|
||||||
|
.fun = prim_hasContext
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
/* Sometimes we want to pass a derivation path (i.e. pkg.drvPath) to a
|
/* Sometimes we want to pass a derivation path (i.e. pkg.drvPath) to a
|
||||||
|
@ -51,7 +64,11 @@ static void prim_unsafeDiscardOutputDependency(EvalState & state, const PosIdx p
|
||||||
v.mkString(*s, context2);
|
v.mkString(*s, context2);
|
||||||
}
|
}
|
||||||
|
|
||||||
static RegisterPrimOp primop_unsafeDiscardOutputDependency("__unsafeDiscardOutputDependency", 1, prim_unsafeDiscardOutputDependency);
|
static RegisterPrimOp primop_unsafeDiscardOutputDependency({
|
||||||
|
.name = "__unsafeDiscardOutputDependency",
|
||||||
|
.arity = 1,
|
||||||
|
.fun = prim_unsafeDiscardOutputDependency
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
/* Extract the context of a string as a structured Nix value.
|
/* Extract the context of a string as a structured Nix value.
|
||||||
|
@ -119,7 +136,30 @@ static void prim_getContext(EvalState & state, const PosIdx pos, Value * * args,
|
||||||
v.mkAttrs(attrs);
|
v.mkAttrs(attrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static RegisterPrimOp primop_getContext("__getContext", 1, prim_getContext);
|
static RegisterPrimOp primop_getContext({
|
||||||
|
.name = "__getContext",
|
||||||
|
.args = {"s"},
|
||||||
|
.doc = R"(
|
||||||
|
Return the string context of *s*.
|
||||||
|
|
||||||
|
The string context tracks references to derivations within a string.
|
||||||
|
It is represented as an attribute set of [store derivation](@docroot@/glossary.md#gloss-store-derivation) paths mapping to output names.
|
||||||
|
|
||||||
|
Using [string interpolation](@docroot@/language/string-interpolation.md) on a derivation will add that derivation to the string context.
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```nix
|
||||||
|
builtins.getContext "${derivation { name = "a"; builder = "b"; system = "c"; }}"
|
||||||
|
```
|
||||||
|
|
||||||
|
evaluates to
|
||||||
|
|
||||||
|
```
|
||||||
|
{ "/nix/store/arhvjaf6zmlyn8vh8fgn55rpwnxq0n7l-a.drv" = { outputs = [ "out" ]; }; }
|
||||||
|
```
|
||||||
|
)",
|
||||||
|
.fun = prim_getContext
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
/* Append the given context to a given string.
|
/* Append the given context to a given string.
|
||||||
|
@ -192,6 +232,10 @@ static void prim_appendContext(EvalState & state, const PosIdx pos, Value * * ar
|
||||||
v.mkString(orig, context);
|
v.mkString(orig, context);
|
||||||
}
|
}
|
||||||
|
|
||||||
static RegisterPrimOp primop_appendContext("__appendContext", 2, prim_appendContext);
|
static RegisterPrimOp primop_appendContext({
|
||||||
|
.name = "__appendContext",
|
||||||
|
.arity = 2,
|
||||||
|
.fun = prim_appendContext
|
||||||
|
});
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -88,6 +88,10 @@ static void prim_fetchMercurial(EvalState & state, const PosIdx pos, Value * * a
|
||||||
state.allowPath(tree.storePath);
|
state.allowPath(tree.storePath);
|
||||||
}
|
}
|
||||||
|
|
||||||
static RegisterPrimOp r_fetchMercurial("fetchMercurial", 1, prim_fetchMercurial);
|
static RegisterPrimOp r_fetchMercurial({
|
||||||
|
.name = "fetchMercurial",
|
||||||
|
.arity = 1,
|
||||||
|
.fun = prim_fetchMercurial
|
||||||
|
});
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -194,7 +194,11 @@ static void prim_fetchTree(EvalState & state, const PosIdx pos, Value * * args,
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: document
|
// FIXME: document
|
||||||
static RegisterPrimOp primop_fetchTree("fetchTree", 1, prim_fetchTree);
|
static RegisterPrimOp primop_fetchTree({
|
||||||
|
.name = "fetchTree",
|
||||||
|
.arity = 1,
|
||||||
|
.fun = prim_fetchTree
|
||||||
|
});
|
||||||
|
|
||||||
static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v,
|
static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v,
|
||||||
const std::string & who, bool unpack, std::string name)
|
const std::string & who, bool unpack, std::string name)
|
||||||
|
@ -262,7 +266,7 @@ static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v
|
||||||
// https://github.com/NixOS/nix/issues/4313
|
// https://github.com/NixOS/nix/issues/4313
|
||||||
auto storePath =
|
auto storePath =
|
||||||
unpack
|
unpack
|
||||||
? fetchers::downloadTarball(state.store, *url, name, (bool) expectedHash).first.storePath
|
? fetchers::downloadTarball(state.store, *url, name, (bool) expectedHash).tree.storePath
|
||||||
: fetchers::downloadFile(state.store, *url, name, (bool) expectedHash).storePath;
|
: fetchers::downloadFile(state.store, *url, name, (bool) expectedHash).storePath;
|
||||||
|
|
||||||
if (expectedHash) {
|
if (expectedHash) {
|
||||||
|
|
|
@ -90,6 +90,24 @@ static void prim_fromTOML(EvalState & state, const PosIdx pos, Value * * args, V
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static RegisterPrimOp primop_fromTOML("fromTOML", 1, prim_fromTOML);
|
static RegisterPrimOp primop_fromTOML({
|
||||||
|
.name = "fromTOML",
|
||||||
|
.args = {"e"},
|
||||||
|
.doc = R"(
|
||||||
|
Convert a TOML string to a Nix value. For example,
|
||||||
|
|
||||||
|
```nix
|
||||||
|
builtins.fromTOML ''
|
||||||
|
x=1
|
||||||
|
s="a"
|
||||||
|
[table]
|
||||||
|
y=2
|
||||||
|
''
|
||||||
|
```
|
||||||
|
|
||||||
|
returns the value `{ s = "a"; table = { y = 2; }; x = 1; }`.
|
||||||
|
)",
|
||||||
|
.fun = prim_fromTOML
|
||||||
|
});
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
///@file
|
///@file
|
||||||
|
|
||||||
#include "types.hh"
|
#include "types.hh"
|
||||||
|
#include "hash.hh"
|
||||||
|
|
||||||
#include <variant>
|
#include <variant>
|
||||||
|
|
||||||
|
|
|
@ -159,6 +159,12 @@ std::pair<Tree, Input> Input::fetch(ref<Store> store) const
|
||||||
input.to_string(), *prevLastModified);
|
input.to_string(), *prevLastModified);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (auto prevRev = getRev()) {
|
||||||
|
if (input.getRev() != prevRev)
|
||||||
|
throw Error("'rev' attribute mismatch in input '%s', expected %s",
|
||||||
|
input.to_string(), prevRev->gitRev());
|
||||||
|
}
|
||||||
|
|
||||||
if (auto prevRevCount = getRevCount()) {
|
if (auto prevRevCount = getRevCount()) {
|
||||||
if (input.getRevCount() != prevRevCount)
|
if (input.getRevCount() != prevRevCount)
|
||||||
throw Error("'revCount' attribute mismatch in input '%s', expected %d",
|
throw Error("'revCount' attribute mismatch in input '%s', expected %d",
|
||||||
|
|
|
@ -158,6 +158,7 @@ struct DownloadFileResult
|
||||||
StorePath storePath;
|
StorePath storePath;
|
||||||
std::string etag;
|
std::string etag;
|
||||||
std::string effectiveUrl;
|
std::string effectiveUrl;
|
||||||
|
std::optional<std::string> immutableUrl;
|
||||||
};
|
};
|
||||||
|
|
||||||
DownloadFileResult downloadFile(
|
DownloadFileResult downloadFile(
|
||||||
|
@ -167,7 +168,14 @@ DownloadFileResult downloadFile(
|
||||||
bool locked,
|
bool locked,
|
||||||
const Headers & headers = {});
|
const Headers & headers = {});
|
||||||
|
|
||||||
std::pair<Tree, time_t> downloadTarball(
|
struct DownloadTarballResult
|
||||||
|
{
|
||||||
|
Tree tree;
|
||||||
|
time_t lastModified;
|
||||||
|
std::optional<std::string> immutableUrl;
|
||||||
|
};
|
||||||
|
|
||||||
|
DownloadTarballResult downloadTarball(
|
||||||
ref<Store> store,
|
ref<Store> store,
|
||||||
const std::string & url,
|
const std::string & url,
|
||||||
const std::string & name,
|
const std::string & name,
|
||||||
|
|
|
@ -207,21 +207,21 @@ struct GitArchiveInputScheme : InputScheme
|
||||||
|
|
||||||
auto url = getDownloadUrl(input);
|
auto url = getDownloadUrl(input);
|
||||||
|
|
||||||
auto [tree, lastModified] = downloadTarball(store, url.url, input.getName(), true, url.headers);
|
auto result = downloadTarball(store, url.url, input.getName(), true, url.headers);
|
||||||
|
|
||||||
input.attrs.insert_or_assign("lastModified", uint64_t(lastModified));
|
input.attrs.insert_or_assign("lastModified", uint64_t(result.lastModified));
|
||||||
|
|
||||||
getCache()->add(
|
getCache()->add(
|
||||||
store,
|
store,
|
||||||
lockedAttrs,
|
lockedAttrs,
|
||||||
{
|
{
|
||||||
{"rev", rev->gitRev()},
|
{"rev", rev->gitRev()},
|
||||||
{"lastModified", uint64_t(lastModified)}
|
{"lastModified", uint64_t(result.lastModified)}
|
||||||
},
|
},
|
||||||
tree.storePath,
|
result.tree.storePath,
|
||||||
true);
|
true);
|
||||||
|
|
||||||
return {std::move(tree.storePath), input};
|
return {result.tree.storePath, input};
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,8 @@ DownloadFileResult downloadFile(
|
||||||
return {
|
return {
|
||||||
.storePath = std::move(cached->storePath),
|
.storePath = std::move(cached->storePath),
|
||||||
.etag = getStrAttr(cached->infoAttrs, "etag"),
|
.etag = getStrAttr(cached->infoAttrs, "etag"),
|
||||||
.effectiveUrl = getStrAttr(cached->infoAttrs, "url")
|
.effectiveUrl = getStrAttr(cached->infoAttrs, "url"),
|
||||||
|
.immutableUrl = maybeGetStrAttr(cached->infoAttrs, "immutableUrl"),
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -55,12 +56,14 @@ DownloadFileResult downloadFile(
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: write to temporary file.
|
// FIXME: write to temporary file.
|
||||||
|
|
||||||
Attrs infoAttrs({
|
Attrs infoAttrs({
|
||||||
{"etag", res.etag},
|
{"etag", res.etag},
|
||||||
{"url", res.effectiveUri},
|
{"url", res.effectiveUri},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
if (res.immutableUrl)
|
||||||
|
infoAttrs.emplace("immutableUrl", *res.immutableUrl);
|
||||||
|
|
||||||
std::optional<StorePath> storePath;
|
std::optional<StorePath> storePath;
|
||||||
|
|
||||||
if (res.cached) {
|
if (res.cached) {
|
||||||
|
@ -111,10 +114,11 @@ DownloadFileResult downloadFile(
|
||||||
.storePath = std::move(*storePath),
|
.storePath = std::move(*storePath),
|
||||||
.etag = res.etag,
|
.etag = res.etag,
|
||||||
.effectiveUrl = res.effectiveUri,
|
.effectiveUrl = res.effectiveUri,
|
||||||
|
.immutableUrl = res.immutableUrl,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<Tree, time_t> downloadTarball(
|
DownloadTarballResult downloadTarball(
|
||||||
ref<Store> store,
|
ref<Store> store,
|
||||||
const std::string & url,
|
const std::string & url,
|
||||||
const std::string & name,
|
const std::string & name,
|
||||||
|
@ -131,8 +135,9 @@ std::pair<Tree, time_t> downloadTarball(
|
||||||
|
|
||||||
if (cached && !cached->expired)
|
if (cached && !cached->expired)
|
||||||
return {
|
return {
|
||||||
Tree { .actualPath = store->toRealPath(cached->storePath), .storePath = std::move(cached->storePath) },
|
.tree = Tree { .actualPath = store->toRealPath(cached->storePath), .storePath = std::move(cached->storePath) },
|
||||||
getIntAttr(cached->infoAttrs, "lastModified")
|
.lastModified = (time_t) getIntAttr(cached->infoAttrs, "lastModified"),
|
||||||
|
.immutableUrl = maybeGetStrAttr(cached->infoAttrs, "immutableUrl"),
|
||||||
};
|
};
|
||||||
|
|
||||||
auto res = downloadFile(store, url, name, locked, headers);
|
auto res = downloadFile(store, url, name, locked, headers);
|
||||||
|
@ -160,6 +165,9 @@ std::pair<Tree, time_t> downloadTarball(
|
||||||
{"etag", res.etag},
|
{"etag", res.etag},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
if (res.immutableUrl)
|
||||||
|
infoAttrs.emplace("immutableUrl", *res.immutableUrl);
|
||||||
|
|
||||||
getCache()->add(
|
getCache()->add(
|
||||||
store,
|
store,
|
||||||
inAttrs,
|
inAttrs,
|
||||||
|
@ -168,8 +176,9 @@ std::pair<Tree, time_t> downloadTarball(
|
||||||
locked);
|
locked);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
Tree { .actualPath = store->toRealPath(*unpackedStorePath), .storePath = std::move(*unpackedStorePath) },
|
.tree = Tree { .actualPath = store->toRealPath(*unpackedStorePath), .storePath = std::move(*unpackedStorePath) },
|
||||||
lastModified,
|
.lastModified = lastModified,
|
||||||
|
.immutableUrl = res.immutableUrl,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -189,21 +198,33 @@ struct CurlInputScheme : InputScheme
|
||||||
|
|
||||||
virtual bool isValidURL(const ParsedURL & url) const = 0;
|
virtual bool isValidURL(const ParsedURL & url) const = 0;
|
||||||
|
|
||||||
std::optional<Input> inputFromURL(const ParsedURL & url) const override
|
std::optional<Input> inputFromURL(const ParsedURL & _url) const override
|
||||||
{
|
{
|
||||||
if (!isValidURL(url))
|
if (!isValidURL(_url))
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
|
|
||||||
Input input;
|
Input input;
|
||||||
|
|
||||||
auto urlWithoutApplicationScheme = url;
|
auto url = _url;
|
||||||
urlWithoutApplicationScheme.scheme = parseUrlScheme(url.scheme).transport;
|
|
||||||
|
url.scheme = parseUrlScheme(url.scheme).transport;
|
||||||
|
|
||||||
input.attrs.insert_or_assign("type", inputType());
|
|
||||||
input.attrs.insert_or_assign("url", urlWithoutApplicationScheme.to_string());
|
|
||||||
auto narHash = url.query.find("narHash");
|
auto narHash = url.query.find("narHash");
|
||||||
if (narHash != url.query.end())
|
if (narHash != url.query.end())
|
||||||
input.attrs.insert_or_assign("narHash", narHash->second);
|
input.attrs.insert_or_assign("narHash", narHash->second);
|
||||||
|
|
||||||
|
if (auto i = get(url.query, "rev"))
|
||||||
|
input.attrs.insert_or_assign("rev", *i);
|
||||||
|
|
||||||
|
if (auto i = get(url.query, "revCount"))
|
||||||
|
if (auto n = string2Int<uint64_t>(*i))
|
||||||
|
input.attrs.insert_or_assign("revCount", *n);
|
||||||
|
|
||||||
|
url.query.erase("rev");
|
||||||
|
url.query.erase("revCount");
|
||||||
|
|
||||||
|
input.attrs.insert_or_assign("type", inputType());
|
||||||
|
input.attrs.insert_or_assign("url", url.to_string());
|
||||||
return input;
|
return input;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -212,7 +233,8 @@ struct CurlInputScheme : InputScheme
|
||||||
auto type = maybeGetStrAttr(attrs, "type");
|
auto type = maybeGetStrAttr(attrs, "type");
|
||||||
if (type != inputType()) return {};
|
if (type != inputType()) return {};
|
||||||
|
|
||||||
std::set<std::string> allowedNames = {"type", "url", "narHash", "name", "unpack"};
|
// FIXME: some of these only apply to TarballInputScheme.
|
||||||
|
std::set<std::string> allowedNames = {"type", "url", "narHash", "name", "unpack", "rev", "revCount"};
|
||||||
for (auto & [name, value] : attrs)
|
for (auto & [name, value] : attrs)
|
||||||
if (!allowedNames.count(name))
|
if (!allowedNames.count(name))
|
||||||
throw Error("unsupported %s input attribute '%s'", *type, name);
|
throw Error("unsupported %s input attribute '%s'", *type, name);
|
||||||
|
@ -275,10 +297,22 @@ struct TarballInputScheme : CurlInputScheme
|
||||||
: hasTarballExtension(url.path));
|
: hasTarballExtension(url.path));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<StorePath, Input> fetch(ref<Store> store, const Input & input) override
|
std::pair<StorePath, Input> fetch(ref<Store> store, const Input & _input) override
|
||||||
{
|
{
|
||||||
auto tree = downloadTarball(store, getStrAttr(input.attrs, "url"), input.getName(), false).first;
|
Input input(_input);
|
||||||
return {std::move(tree.storePath), input};
|
auto url = getStrAttr(input.attrs, "url");
|
||||||
|
auto result = downloadTarball(store, url, input.getName(), false);
|
||||||
|
|
||||||
|
if (result.immutableUrl) {
|
||||||
|
auto immutableInput = Input::fromURL(*result.immutableUrl);
|
||||||
|
// FIXME: would be nice to support arbitrary flakerefs
|
||||||
|
// here, e.g. git flakes.
|
||||||
|
if (immutableInput.getType() != "tarball")
|
||||||
|
throw Error("tarball 'Link' headers that redirect to non-tarball URLs are not supported");
|
||||||
|
input = immutableInput;
|
||||||
|
}
|
||||||
|
|
||||||
|
return {result.tree.storePath, std::move(input)};
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
#include "worker.hh"
|
#include "worker.hh"
|
||||||
#include "builtins.hh"
|
#include "builtins.hh"
|
||||||
#include "builtins/buildenv.hh"
|
#include "builtins/buildenv.hh"
|
||||||
#include "references.hh"
|
#include "path-references.hh"
|
||||||
#include "finally.hh"
|
#include "finally.hh"
|
||||||
#include "util.hh"
|
#include "util.hh"
|
||||||
#include "archive.hh"
|
#include "archive.hh"
|
||||||
|
@ -2389,18 +2389,21 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs()
|
||||||
continue;
|
continue;
|
||||||
auto references = *referencesOpt;
|
auto references = *referencesOpt;
|
||||||
|
|
||||||
auto rewriteOutput = [&]() {
|
auto rewriteOutput = [&](const StringMap & rewrites) {
|
||||||
/* Apply hash rewriting if necessary. */
|
/* Apply hash rewriting if necessary. */
|
||||||
if (!outputRewrites.empty()) {
|
if (!rewrites.empty()) {
|
||||||
debug("rewriting hashes in '%1%'; cross fingers", actualPath);
|
debug("rewriting hashes in '%1%'; cross fingers", actualPath);
|
||||||
|
|
||||||
/* FIXME: this is in-memory. */
|
/* FIXME: Is this actually streaming? */
|
||||||
StringSink sink;
|
auto source = sinkToSource([&](Sink & nextSink) {
|
||||||
dumpPath(actualPath, sink);
|
RewritingSink rsink(rewrites, nextSink);
|
||||||
|
dumpPath(actualPath, rsink);
|
||||||
|
rsink.flush();
|
||||||
|
});
|
||||||
|
Path tmpPath = actualPath + ".tmp";
|
||||||
|
restorePath(tmpPath, *source);
|
||||||
deletePath(actualPath);
|
deletePath(actualPath);
|
||||||
sink.s = rewriteStrings(sink.s, outputRewrites);
|
movePath(tmpPath, actualPath);
|
||||||
StringSource source(sink.s);
|
|
||||||
restorePath(actualPath, source);
|
|
||||||
|
|
||||||
/* FIXME: set proper permissions in restorePath() so
|
/* FIXME: set proper permissions in restorePath() so
|
||||||
we don't have to do another traversal. */
|
we don't have to do another traversal. */
|
||||||
|
@ -2449,7 +2452,7 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs()
|
||||||
"since recursive hashing is not enabled (one of outputHashMode={flat,text} is true)",
|
"since recursive hashing is not enabled (one of outputHashMode={flat,text} is true)",
|
||||||
actualPath);
|
actualPath);
|
||||||
}
|
}
|
||||||
rewriteOutput();
|
rewriteOutput(outputRewrites);
|
||||||
/* FIXME optimize and deduplicate with addToStore */
|
/* FIXME optimize and deduplicate with addToStore */
|
||||||
std::string oldHashPart { scratchPath->hashPart() };
|
std::string oldHashPart { scratchPath->hashPart() };
|
||||||
HashModuloSink caSink { outputHash.hashType, oldHashPart };
|
HashModuloSink caSink { outputHash.hashType, oldHashPart };
|
||||||
|
@ -2487,16 +2490,14 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs()
|
||||||
Hash::dummy,
|
Hash::dummy,
|
||||||
};
|
};
|
||||||
if (*scratchPath != newInfo0.path) {
|
if (*scratchPath != newInfo0.path) {
|
||||||
// Also rewrite the output path
|
// If the path has some self-references, we need to rewrite
|
||||||
auto source = sinkToSource([&](Sink & nextSink) {
|
// them.
|
||||||
RewritingSink rsink2(oldHashPart, std::string(newInfo0.path.hashPart()), nextSink);
|
// (note that this doesn't invalidate the ca hash we calculated
|
||||||
dumpPath(actualPath, rsink2);
|
// above because it's computed *modulo the self-references*, so
|
||||||
rsink2.flush();
|
// it already takes this rewrite into account).
|
||||||
});
|
rewriteOutput(
|
||||||
Path tmpPath = actualPath + ".tmp";
|
StringMap{{oldHashPart,
|
||||||
restorePath(tmpPath, *source);
|
std::string(newInfo0.path.hashPart())}});
|
||||||
deletePath(actualPath);
|
|
||||||
movePath(tmpPath, actualPath);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
HashResult narHashAndSize = hashPath(htSHA256, actualPath);
|
HashResult narHashAndSize = hashPath(htSHA256, actualPath);
|
||||||
|
@ -2518,7 +2519,7 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs()
|
||||||
outputRewrites.insert_or_assign(
|
outputRewrites.insert_or_assign(
|
||||||
std::string { scratchPath->hashPart() },
|
std::string { scratchPath->hashPart() },
|
||||||
std::string { requiredFinalPath.hashPart() });
|
std::string { requiredFinalPath.hashPart() });
|
||||||
rewriteOutput();
|
rewriteOutput(outputRewrites);
|
||||||
auto narHashAndSize = hashPath(htSHA256, actualPath);
|
auto narHashAndSize = hashPath(htSHA256, actualPath);
|
||||||
ValidPathInfo newInfo0 { requiredFinalPath, narHashAndSize.first };
|
ValidPathInfo newInfo0 { requiredFinalPath, narHashAndSize.first };
|
||||||
newInfo0.narSize = narHashAndSize.second;
|
newInfo0.narSize = narHashAndSize.second;
|
||||||
|
|
|
@ -186,9 +186,9 @@ struct curlFileTransfer : public FileTransfer
|
||||||
size_t realSize = size * nmemb;
|
size_t realSize = size * nmemb;
|
||||||
std::string line((char *) contents, realSize);
|
std::string line((char *) contents, realSize);
|
||||||
printMsg(lvlVomit, "got header for '%s': %s", request.uri, trim(line));
|
printMsg(lvlVomit, "got header for '%s': %s", request.uri, trim(line));
|
||||||
|
|
||||||
static std::regex statusLine("HTTP/[^ ]+ +[0-9]+(.*)", std::regex::extended | std::regex::icase);
|
static std::regex statusLine("HTTP/[^ ]+ +[0-9]+(.*)", std::regex::extended | std::regex::icase);
|
||||||
std::smatch match;
|
if (std::smatch match; std::regex_match(line, match, statusLine)) {
|
||||||
if (std::regex_match(line, match, statusLine)) {
|
|
||||||
result.etag = "";
|
result.etag = "";
|
||||||
result.data.clear();
|
result.data.clear();
|
||||||
result.bodySize = 0;
|
result.bodySize = 0;
|
||||||
|
@ -196,9 +196,11 @@ struct curlFileTransfer : public FileTransfer
|
||||||
acceptRanges = false;
|
acceptRanges = false;
|
||||||
encoding = "";
|
encoding = "";
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
auto i = line.find(':');
|
auto i = line.find(':');
|
||||||
if (i != std::string::npos) {
|
if (i != std::string::npos) {
|
||||||
std::string name = toLower(trim(line.substr(0, i)));
|
std::string name = toLower(trim(line.substr(0, i)));
|
||||||
|
|
||||||
if (name == "etag") {
|
if (name == "etag") {
|
||||||
result.etag = trim(line.substr(i + 1));
|
result.etag = trim(line.substr(i + 1));
|
||||||
/* Hack to work around a GitHub bug: it sends
|
/* Hack to work around a GitHub bug: it sends
|
||||||
|
@ -212,10 +214,22 @@ struct curlFileTransfer : public FileTransfer
|
||||||
debug("shutting down on 200 HTTP response with expected ETag");
|
debug("shutting down on 200 HTTP response with expected ETag");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
} else if (name == "content-encoding")
|
}
|
||||||
|
|
||||||
|
else if (name == "content-encoding")
|
||||||
encoding = trim(line.substr(i + 1));
|
encoding = trim(line.substr(i + 1));
|
||||||
|
|
||||||
else if (name == "accept-ranges" && toLower(trim(line.substr(i + 1))) == "bytes")
|
else if (name == "accept-ranges" && toLower(trim(line.substr(i + 1))) == "bytes")
|
||||||
acceptRanges = true;
|
acceptRanges = true;
|
||||||
|
|
||||||
|
else if (name == "link" || name == "x-amz-meta-link") {
|
||||||
|
auto value = trim(line.substr(i + 1));
|
||||||
|
static std::regex linkRegex("<([^>]*)>; rel=\"immutable\"", std::regex::extended | std::regex::icase);
|
||||||
|
if (std::smatch match; std::regex_match(value, match, linkRegex))
|
||||||
|
result.immutableUrl = match.str(1);
|
||||||
|
else
|
||||||
|
debug("got invalid link header '%s'", value);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return realSize;
|
return realSize;
|
||||||
|
@ -345,7 +359,7 @@ struct curlFileTransfer : public FileTransfer
|
||||||
{
|
{
|
||||||
auto httpStatus = getHTTPStatus();
|
auto httpStatus = getHTTPStatus();
|
||||||
|
|
||||||
char * effectiveUriCStr;
|
char * effectiveUriCStr = nullptr;
|
||||||
curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUriCStr);
|
curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUriCStr);
|
||||||
if (effectiveUriCStr)
|
if (effectiveUriCStr)
|
||||||
result.effectiveUri = effectiveUriCStr;
|
result.effectiveUri = effectiveUriCStr;
|
||||||
|
|
|
@ -80,6 +80,10 @@ struct FileTransferResult
|
||||||
std::string effectiveUri;
|
std::string effectiveUri;
|
||||||
std::string data;
|
std::string data;
|
||||||
uint64_t bodySize = 0;
|
uint64_t bodySize = 0;
|
||||||
|
/* An "immutable" URL for this resource (i.e. one whose contents
|
||||||
|
will never change), as returned by the `Link: <url>;
|
||||||
|
rel="immutable"` header. */
|
||||||
|
std::optional<std::string> immutableUrl;
|
||||||
};
|
};
|
||||||
|
|
||||||
class Store;
|
class Store;
|
||||||
|
|
|
@ -77,7 +77,30 @@ Settings::Settings()
|
||||||
allowedImpureHostPrefixes = tokenizeString<StringSet>("/System/Library /usr/lib /dev /bin/sh");
|
allowedImpureHostPrefixes = tokenizeString<StringSet>("/System/Library /usr/lib /dev /bin/sh");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
buildHook = getSelfExe().value_or("nix") + " __build-remote";
|
/* Set the build hook location
|
||||||
|
|
||||||
|
For builds we perform a self-invocation, so Nix has to be self-aware.
|
||||||
|
That is, it has to know where it is installed. We don't think it's sentient.
|
||||||
|
|
||||||
|
Normally, nix is installed according to `nixBinDir`, which is set at compile time,
|
||||||
|
but can be overridden. This makes for a great default that works even if this
|
||||||
|
code is linked as a library into some other program whose main is not aware
|
||||||
|
that it might need to be a build remote hook.
|
||||||
|
|
||||||
|
However, it may not have been installed at all. For example, if it's a static build,
|
||||||
|
there's a good chance that it has been moved out of its installation directory.
|
||||||
|
That makes `nixBinDir` useless. Instead, we'll query the OS for the path to the
|
||||||
|
current executable, using `getSelfExe()`.
|
||||||
|
|
||||||
|
As a last resort, we resort to `PATH`. Hopefully we find a `nix` there that's compatible.
|
||||||
|
If you're porting Nix to a new platform, that might be good enough for a while, but
|
||||||
|
you'll want to improve `getSelfExe()` to work on your platform.
|
||||||
|
*/
|
||||||
|
std::string nixExePath = nixBinDir + "/nix";
|
||||||
|
if (!pathExists(nixExePath)) {
|
||||||
|
nixExePath = getSelfExe().value_or("nix");
|
||||||
|
}
|
||||||
|
buildHook = nixExePath + " __build-remote";
|
||||||
}
|
}
|
||||||
|
|
||||||
void loadConfFile()
|
void loadConfFile()
|
||||||
|
|
|
@ -710,20 +710,19 @@ public:
|
||||||
Strings{"https://cache.nixos.org/"},
|
Strings{"https://cache.nixos.org/"},
|
||||||
"substituters",
|
"substituters",
|
||||||
R"(
|
R"(
|
||||||
A list of [URLs of Nix stores](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format)
|
A list of [URLs of Nix stores](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format) to be used as substituters, separated by whitespace.
|
||||||
to be used as substituters, separated by whitespace.
|
A substituter is an additional [store]{@docroot@/glossary.md##gloss-store} from which Nix can obtain [store objects](@docroot@/glossary.md#gloss-store-object) instead of building them.
|
||||||
Substituters are tried based on their Priority value, which each substituter can set
|
|
||||||
independently. Lower value means higher priority.
|
|
||||||
The default is `https://cache.nixos.org`, with a Priority of 40.
|
|
||||||
|
|
||||||
At least one of the following conditions must be met for Nix to use
|
Substituters are tried based on their priority value, which each substituter can set independently.
|
||||||
a substituter:
|
Lower value means higher priority.
|
||||||
|
The default is `https://cache.nixos.org`, which has a priority of 40.
|
||||||
|
|
||||||
|
At least one of the following conditions must be met for Nix to use a substituter:
|
||||||
|
|
||||||
- the substituter is in the [`trusted-substituters`](#conf-trusted-substituters) list
|
- the substituter is in the [`trusted-substituters`](#conf-trusted-substituters) list
|
||||||
- the user calling Nix is in the [`trusted-users`](#conf-trusted-users) list
|
- the user calling Nix is in the [`trusted-users`](#conf-trusted-users) list
|
||||||
|
|
||||||
In addition, each store path should be trusted as described
|
In addition, each store path should be trusted as described in [`trusted-public-keys`](#conf-trusted-public-keys)
|
||||||
in [`trusted-public-keys`](#conf-trusted-public-keys)
|
|
||||||
)",
|
)",
|
||||||
{"binary-caches"}};
|
{"binary-caches"}};
|
||||||
|
|
||||||
|
|
|
@ -52,14 +52,15 @@ struct LocalStoreConfig : virtual LocalFSStoreConfig
|
||||||
R"(
|
R"(
|
||||||
Allow this store to be opened when its [database](@docroot@/glossary.md#gloss-nix-database) is on a read-only filesystem.
|
Allow this store to be opened when its [database](@docroot@/glossary.md#gloss-nix-database) is on a read-only filesystem.
|
||||||
|
|
||||||
Normally Nix will attempt to open the store database in read-write mode, even for querying (when write access is not needed).
|
Normally Nix will attempt to open the store database in read-write mode, even for querying (when write access is not needed), causing it to fail if the database is on a read-only filesystem.
|
||||||
This causes it to fail if the database is on a read-only filesystem.
|
|
||||||
|
|
||||||
Enable read-only mode to disable locking and open the SQLite database with the [`immutable` parameter](https://www.sqlite.org/c3ref/open.html) set.
|
Enable read-only mode to disable locking and open the SQLite database with the [`immutable` parameter](https://www.sqlite.org/c3ref/open.html) set.
|
||||||
|
|
||||||
**Warning**
|
> **Warning**
|
||||||
Do not use this unless the filesystem is read-only.
|
> Do not use this unless the filesystem is read-only.
|
||||||
Using it when the filesystem is writable can cause incorrect query results or corruption errors if the database is changed by another process.
|
>
|
||||||
|
> Using it when the filesystem is writable can cause incorrect query results or corruption errors if the database is changed by another process.
|
||||||
|
> While the filesystem the database resides on might appear to be read-only, consider whether another user or system might have write access to it.
|
||||||
)"};
|
)"};
|
||||||
|
|
||||||
const std::string name() override { return "Local Store"; }
|
const std::string name() override { return "Local Store"; }
|
||||||
|
|
73
src/libstore/path-references.cc
Normal file
73
src/libstore/path-references.cc
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
#include "path-references.hh"
|
||||||
|
#include "hash.hh"
|
||||||
|
#include "util.hh"
|
||||||
|
#include "archive.hh"
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <mutex>
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
PathRefScanSink::PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap)
|
||||||
|
: RefScanSink(std::move(hashes))
|
||||||
|
, backMap(std::move(backMap))
|
||||||
|
{ }
|
||||||
|
|
||||||
|
PathRefScanSink PathRefScanSink::fromPaths(const StorePathSet & refs)
|
||||||
|
{
|
||||||
|
StringSet hashes;
|
||||||
|
std::map<std::string, StorePath> backMap;
|
||||||
|
|
||||||
|
for (auto & i : refs) {
|
||||||
|
std::string hashPart(i.hashPart());
|
||||||
|
auto inserted = backMap.emplace(hashPart, i).second;
|
||||||
|
assert(inserted);
|
||||||
|
hashes.insert(hashPart);
|
||||||
|
}
|
||||||
|
|
||||||
|
return PathRefScanSink(std::move(hashes), std::move(backMap));
|
||||||
|
}
|
||||||
|
|
||||||
|
StorePathSet PathRefScanSink::getResultPaths()
|
||||||
|
{
|
||||||
|
/* Map the hashes found back to their store paths. */
|
||||||
|
StorePathSet found;
|
||||||
|
for (auto & i : getResult()) {
|
||||||
|
auto j = backMap.find(i);
|
||||||
|
assert(j != backMap.end());
|
||||||
|
found.insert(j->second);
|
||||||
|
}
|
||||||
|
|
||||||
|
return found;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::pair<StorePathSet, HashResult> scanForReferences(
|
||||||
|
const std::string & path,
|
||||||
|
const StorePathSet & refs)
|
||||||
|
{
|
||||||
|
HashSink hashSink { htSHA256 };
|
||||||
|
auto found = scanForReferences(hashSink, path, refs);
|
||||||
|
auto hash = hashSink.finish();
|
||||||
|
return std::pair<StorePathSet, HashResult>(found, hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
StorePathSet scanForReferences(
|
||||||
|
Sink & toTee,
|
||||||
|
const Path & path,
|
||||||
|
const StorePathSet & refs)
|
||||||
|
{
|
||||||
|
PathRefScanSink refsSink = PathRefScanSink::fromPaths(refs);
|
||||||
|
TeeSink sink { refsSink, toTee };
|
||||||
|
|
||||||
|
/* Look for the hashes in the NAR dump of the path. */
|
||||||
|
dumpPath(path, sink);
|
||||||
|
|
||||||
|
return refsSink.getResultPaths();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
25
src/libstore/path-references.hh
Normal file
25
src/libstore/path-references.hh
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "references.hh"
|
||||||
|
#include "path.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
std::pair<StorePathSet, HashResult> scanForReferences(const Path & path, const StorePathSet & refs);
|
||||||
|
|
||||||
|
StorePathSet scanForReferences(Sink & toTee, const Path & path, const StorePathSet & refs);
|
||||||
|
|
||||||
|
class PathRefScanSink : public RefScanSink
|
||||||
|
{
|
||||||
|
std::map<std::string, StorePath> backMap;
|
||||||
|
|
||||||
|
PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap);
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
static PathRefScanSink fromPaths(const StorePathSet & refs);
|
||||||
|
|
||||||
|
StorePathSet getResultPaths();
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -13,8 +13,10 @@
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
/* Parse a generation name of the format
|
/**
|
||||||
`<profilename>-<number>-link'. */
|
* Parse a generation name of the format
|
||||||
|
* `<profilename>-<number>-link'.
|
||||||
|
*/
|
||||||
static std::optional<GenerationNumber> parseName(const std::string & profileName, const std::string & name)
|
static std::optional<GenerationNumber> parseName(const std::string & profileName, const std::string & name)
|
||||||
{
|
{
|
||||||
if (name.substr(0, profileName.size() + 1) != profileName + "-") return {};
|
if (name.substr(0, profileName.size() + 1) != profileName + "-") return {};
|
||||||
|
@ -28,7 +30,6 @@ static std::optional<GenerationNumber> parseName(const std::string & profileName
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path profile)
|
std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path profile)
|
||||||
{
|
{
|
||||||
Generations gens;
|
Generations gens;
|
||||||
|
@ -61,15 +62,16 @@ std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path pro
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void makeName(const Path & profile, GenerationNumber num,
|
/**
|
||||||
Path & outLink)
|
* Create a generation name that can be parsed by `parseName()`.
|
||||||
|
*/
|
||||||
|
static Path makeName(const Path & profile, GenerationNumber num)
|
||||||
{
|
{
|
||||||
Path prefix = fmt("%1%-%2%", profile, num);
|
return fmt("%s-%s-link", profile, num);
|
||||||
outLink = prefix + "-link";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath)
|
Path createGeneration(LocalFSStore & store, Path profile, StorePath outPath)
|
||||||
{
|
{
|
||||||
/* The new generation number should be higher than old the
|
/* The new generation number should be higher than old the
|
||||||
previous ones. */
|
previous ones. */
|
||||||
|
@ -79,7 +81,7 @@ Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath)
|
||||||
if (gens.size() > 0) {
|
if (gens.size() > 0) {
|
||||||
Generation last = gens.back();
|
Generation last = gens.back();
|
||||||
|
|
||||||
if (readLink(last.path) == store->printStorePath(outPath)) {
|
if (readLink(last.path) == store.printStorePath(outPath)) {
|
||||||
/* We only create a new generation symlink if it differs
|
/* We only create a new generation symlink if it differs
|
||||||
from the last one.
|
from the last one.
|
||||||
|
|
||||||
|
@ -89,7 +91,7 @@ Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath)
|
||||||
return last.path;
|
return last.path;
|
||||||
}
|
}
|
||||||
|
|
||||||
num = gens.back().number;
|
num = last.number;
|
||||||
} else {
|
} else {
|
||||||
num = 0;
|
num = 0;
|
||||||
}
|
}
|
||||||
|
@ -100,9 +102,8 @@ Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath)
|
||||||
to the permanent roots (of which the GC would have a stale
|
to the permanent roots (of which the GC would have a stale
|
||||||
view). If we didn't do it this way, the GC might remove the
|
view). If we didn't do it this way, the GC might remove the
|
||||||
user environment etc. we've just built. */
|
user environment etc. we've just built. */
|
||||||
Path generation;
|
Path generation = makeName(profile, num + 1);
|
||||||
makeName(profile, num + 1, generation);
|
store.addPermRoot(outPath, generation);
|
||||||
store->addPermRoot(outPath, generation);
|
|
||||||
|
|
||||||
return generation;
|
return generation;
|
||||||
}
|
}
|
||||||
|
@ -117,12 +118,19 @@ static void removeFile(const Path & path)
|
||||||
|
|
||||||
void deleteGeneration(const Path & profile, GenerationNumber gen)
|
void deleteGeneration(const Path & profile, GenerationNumber gen)
|
||||||
{
|
{
|
||||||
Path generation;
|
Path generation = makeName(profile, gen);
|
||||||
makeName(profile, gen, generation);
|
|
||||||
removeFile(generation);
|
removeFile(generation);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete a generation with dry-run mode.
|
||||||
|
*
|
||||||
|
* Like `deleteGeneration()` but:
|
||||||
|
*
|
||||||
|
* - We log what we are going to do.
|
||||||
|
*
|
||||||
|
* - We only actually delete if `dryRun` is false.
|
||||||
|
*/
|
||||||
static void deleteGeneration2(const Path & profile, GenerationNumber gen, bool dryRun)
|
static void deleteGeneration2(const Path & profile, GenerationNumber gen, bool dryRun)
|
||||||
{
|
{
|
||||||
if (dryRun)
|
if (dryRun)
|
||||||
|
@ -150,28 +158,37 @@ void deleteGenerations(const Path & profile, const std::set<GenerationNumber> &
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Advanced the iterator until the given predicate `cond` returns `true`.
|
||||||
|
*/
|
||||||
|
static inline void iterDropUntil(Generations & gens, auto && i, auto && cond)
|
||||||
|
{
|
||||||
|
for (; i != gens.rend() && !cond(*i); ++i);
|
||||||
|
}
|
||||||
|
|
||||||
void deleteGenerationsGreaterThan(const Path & profile, GenerationNumber max, bool dryRun)
|
void deleteGenerationsGreaterThan(const Path & profile, GenerationNumber max, bool dryRun)
|
||||||
{
|
{
|
||||||
|
if (max == 0)
|
||||||
|
throw Error("Must keep at least one generation, otherwise the current one would be deleted");
|
||||||
|
|
||||||
PathLocks lock;
|
PathLocks lock;
|
||||||
lockProfile(lock, profile);
|
lockProfile(lock, profile);
|
||||||
|
|
||||||
bool fromCurGen = false;
|
auto [gens, _curGen] = findGenerations(profile);
|
||||||
auto [gens, curGen] = findGenerations(profile);
|
auto curGen = _curGen;
|
||||||
for (auto i = gens.rbegin(); i != gens.rend(); ++i) {
|
|
||||||
if (i->number == curGen) {
|
auto i = gens.rbegin();
|
||||||
fromCurGen = true;
|
|
||||||
max--;
|
// Find the current generation
|
||||||
continue;
|
iterDropUntil(gens, i, [&](auto & g) { return g.number == curGen; });
|
||||||
}
|
|
||||||
if (fromCurGen) {
|
// Skip over `max` generations, preserving them
|
||||||
if (max) {
|
for (auto keep = 0; i != gens.rend() && keep < max; ++i, ++keep);
|
||||||
max--;
|
|
||||||
continue;
|
// Delete the rest
|
||||||
}
|
for (; i != gens.rend(); ++i)
|
||||||
deleteGeneration2(profile, i->number, dryRun);
|
deleteGeneration2(profile, i->number, dryRun);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void deleteOldGenerations(const Path & profile, bool dryRun)
|
void deleteOldGenerations(const Path & profile, bool dryRun)
|
||||||
{
|
{
|
||||||
|
@ -193,23 +210,33 @@ void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun)
|
||||||
|
|
||||||
auto [gens, curGen] = findGenerations(profile);
|
auto [gens, curGen] = findGenerations(profile);
|
||||||
|
|
||||||
bool canDelete = false;
|
auto i = gens.rbegin();
|
||||||
for (auto i = gens.rbegin(); i != gens.rend(); ++i)
|
|
||||||
if (canDelete) {
|
// Predicate that the generation is older than the given time.
|
||||||
assert(i->creationTime < t);
|
auto older = [&](auto & g) { return g.creationTime < t; };
|
||||||
|
|
||||||
|
// Find the first older generation, if one exists
|
||||||
|
iterDropUntil(gens, i, older);
|
||||||
|
|
||||||
|
/* Take the previous generation
|
||||||
|
|
||||||
|
We don't want delete this one yet because it
|
||||||
|
existed at the requested point in time, and
|
||||||
|
we want to be able to roll back to it. */
|
||||||
|
if (i != gens.rend()) ++i;
|
||||||
|
|
||||||
|
// Delete all previous generations (unless current).
|
||||||
|
for (; i != gens.rend(); ++i) {
|
||||||
|
/* Creating date and generations should be monotonic, so lower
|
||||||
|
numbered derivations should also be older. */
|
||||||
|
assert(older(*i));
|
||||||
if (i->number != curGen)
|
if (i->number != curGen)
|
||||||
deleteGeneration2(profile, i->number, dryRun);
|
deleteGeneration2(profile, i->number, dryRun);
|
||||||
} else if (i->creationTime < t) {
|
|
||||||
/* We may now start deleting generations, but we don't
|
|
||||||
delete this generation yet, because this generation was
|
|
||||||
still the one that was active at the requested point in
|
|
||||||
time. */
|
|
||||||
canDelete = true;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void deleteGenerationsOlderThan(const Path & profile, std::string_view timeSpec, bool dryRun)
|
time_t parseOlderThanTimeSpec(std::string_view timeSpec)
|
||||||
{
|
{
|
||||||
if (timeSpec.empty() || timeSpec[timeSpec.size() - 1] != 'd')
|
if (timeSpec.empty() || timeSpec[timeSpec.size() - 1] != 'd')
|
||||||
throw UsageError("invalid number of days specifier '%1%', expected something like '14d'", timeSpec);
|
throw UsageError("invalid number of days specifier '%1%', expected something like '14d'", timeSpec);
|
||||||
|
@ -221,9 +248,7 @@ void deleteGenerationsOlderThan(const Path & profile, std::string_view timeSpec,
|
||||||
if (!days || *days < 1)
|
if (!days || *days < 1)
|
||||||
throw UsageError("invalid number of days specifier '%1%'", timeSpec);
|
throw UsageError("invalid number of days specifier '%1%'", timeSpec);
|
||||||
|
|
||||||
time_t oldTime = curTime - *days * 24 * 3600;
|
return curTime - *days * 24 * 3600;
|
||||||
|
|
||||||
deleteGenerationsOlderThan(profile, oldTime, dryRun);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,9 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
///@file
|
/**
|
||||||
|
* @file Implementation of Profiles.
|
||||||
|
*
|
||||||
|
* See the manual for additional information.
|
||||||
|
*/
|
||||||
|
|
||||||
#include "types.hh"
|
#include "types.hh"
|
||||||
#include "pathlocks.hh"
|
#include "pathlocks.hh"
|
||||||
|
@ -12,41 +16,166 @@ namespace nix {
|
||||||
class StorePath;
|
class StorePath;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A positive number identifying a generation for a given profile.
|
||||||
|
*
|
||||||
|
* Generation numbers are assigned sequentially. Each new generation is
|
||||||
|
* assigned 1 + the current highest generation number.
|
||||||
|
*/
|
||||||
typedef uint64_t GenerationNumber;
|
typedef uint64_t GenerationNumber;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A generation is a revision of a profile.
|
||||||
|
*
|
||||||
|
* Each generation is a mapping (key-value pair) from an identifier
|
||||||
|
* (`number`) to a store object (specified by `path`).
|
||||||
|
*/
|
||||||
struct Generation
|
struct Generation
|
||||||
{
|
{
|
||||||
|
/**
|
||||||
|
* The number of a generation is its unique identifier within the
|
||||||
|
* profile.
|
||||||
|
*/
|
||||||
GenerationNumber number;
|
GenerationNumber number;
|
||||||
|
/**
|
||||||
|
* The store path identifies the store object that is the contents
|
||||||
|
* of the generation.
|
||||||
|
*
|
||||||
|
* These store paths / objects are not unique to the generation
|
||||||
|
* within a profile. Nix tries to ensure successive generations have
|
||||||
|
* distinct contents to avoid bloat, but nothing stops two
|
||||||
|
* non-adjacent generations from having the same contents.
|
||||||
|
*
|
||||||
|
* @todo Use `StorePath` instead of `Path`?
|
||||||
|
*/
|
||||||
Path path;
|
Path path;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* When the generation was created. This is extra metadata about the
|
||||||
|
* generation used to make garbage collecting old generations more
|
||||||
|
* convenient.
|
||||||
|
*/
|
||||||
time_t creationTime;
|
time_t creationTime;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* All the generations of a profile
|
||||||
|
*/
|
||||||
typedef std::list<Generation> Generations;
|
typedef std::list<Generation> Generations;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the list of currently present generations for the specified
|
* Find all generations for the given profile.
|
||||||
* profile, sorted by generation number. Also returns the number of
|
*
|
||||||
* the current generation.
|
* @param profile A profile specified by its name and location combined
|
||||||
|
* into a path. E.g. if "foo" is the name of the profile, and "/bar/baz"
|
||||||
|
* is the directory it is in, then the path "/bar/baz/foo" would be the
|
||||||
|
* argument for this parameter.
|
||||||
|
*
|
||||||
|
* @return The pair of:
|
||||||
|
*
|
||||||
|
* - The list of currently present generations for the specified profile,
|
||||||
|
* sorted by ascending generation number.
|
||||||
|
*
|
||||||
|
* - The number of the current/active generation.
|
||||||
|
*
|
||||||
|
* Note that the current/active generation need not be the latest one.
|
||||||
*/
|
*/
|
||||||
std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path profile);
|
std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path profile);
|
||||||
|
|
||||||
class LocalFSStore;
|
class LocalFSStore;
|
||||||
|
|
||||||
Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath);
|
/**
|
||||||
|
* Create a new generation of the given profile
|
||||||
|
*
|
||||||
|
* If the previous generation (not the currently active one!) has a
|
||||||
|
* distinct store object, a fresh generation number is mapped to the
|
||||||
|
* given store object, referenced by path. Otherwise, the previous
|
||||||
|
* generation is assumed.
|
||||||
|
*
|
||||||
|
* The behavior of reusing existing generations like this makes this
|
||||||
|
* procedure idempotent. It also avoids clutter.
|
||||||
|
*/
|
||||||
|
Path createGeneration(LocalFSStore & store, Path profile, StorePath outPath);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unconditionally delete a generation
|
||||||
|
*
|
||||||
|
* @param profile A profile specified by its name and location combined into a path.
|
||||||
|
*
|
||||||
|
* @param gen The generation number specifying exactly which generation
|
||||||
|
* to delete.
|
||||||
|
*
|
||||||
|
* Because there is no check of whether the generation to delete is
|
||||||
|
* active, this is somewhat unsafe.
|
||||||
|
*
|
||||||
|
* @todo Should we expose this at all?
|
||||||
|
*/
|
||||||
void deleteGeneration(const Path & profile, GenerationNumber gen);
|
void deleteGeneration(const Path & profile, GenerationNumber gen);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete the given set of generations.
|
||||||
|
*
|
||||||
|
* @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete.
|
||||||
|
*
|
||||||
|
* @param gensToDelete The generations to delete, specified by a set of
|
||||||
|
* numbers.
|
||||||
|
*
|
||||||
|
* @param dryRun Log what would be deleted instead of actually doing
|
||||||
|
* so.
|
||||||
|
*
|
||||||
|
* Trying to delete the currently active generation will fail, and cause
|
||||||
|
* no generations to be deleted.
|
||||||
|
*/
|
||||||
void deleteGenerations(const Path & profile, const std::set<GenerationNumber> & gensToDelete, bool dryRun);
|
void deleteGenerations(const Path & profile, const std::set<GenerationNumber> & gensToDelete, bool dryRun);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete generations older than `max` passed the current generation.
|
||||||
|
*
|
||||||
|
* @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete.
|
||||||
|
*
|
||||||
|
* @param max How many generations to keep up to the current one. Must
|
||||||
|
* be at least 1 so we don't delete the current one.
|
||||||
|
*
|
||||||
|
* @param dryRun Log what would be deleted instead of actually doing
|
||||||
|
* so.
|
||||||
|
*/
|
||||||
void deleteGenerationsGreaterThan(const Path & profile, GenerationNumber max, bool dryRun);
|
void deleteGenerationsGreaterThan(const Path & profile, GenerationNumber max, bool dryRun);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete all generations other than the current one
|
||||||
|
*
|
||||||
|
* @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete.
|
||||||
|
*
|
||||||
|
* @param dryRun Log what would be deleted instead of actually doing
|
||||||
|
* so.
|
||||||
|
*/
|
||||||
void deleteOldGenerations(const Path & profile, bool dryRun);
|
void deleteOldGenerations(const Path & profile, bool dryRun);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete generations older than `t`, except for the most recent one
|
||||||
|
* older than `t`.
|
||||||
|
*
|
||||||
|
* @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete.
|
||||||
|
*
|
||||||
|
* @param dryRun Log what would be deleted instead of actually doing
|
||||||
|
* so.
|
||||||
|
*/
|
||||||
void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun);
|
void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun);
|
||||||
|
|
||||||
void deleteGenerationsOlderThan(const Path & profile, std::string_view timeSpec, bool dryRun);
|
/**
|
||||||
|
* Parse a temp spec intended for `deleteGenerationsOlderThan()`.
|
||||||
|
*
|
||||||
|
* Throws an exception if `timeSpec` fails to parse.
|
||||||
|
*/
|
||||||
|
time_t parseOlderThanTimeSpec(std::string_view timeSpec);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Smaller wrapper around `replaceSymlink` for replacing the current
|
||||||
|
* generation of a profile. Does not enforce proper structure.
|
||||||
|
*
|
||||||
|
* @todo Always use `switchGeneration()` instead, and delete this.
|
||||||
|
*/
|
||||||
void switchLink(Path link, Path target);
|
void switchLink(Path link, Path target);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -29,7 +29,7 @@ enum class SQLiteOpenMode {
|
||||||
* Use this mode if the database is on a read-only filesystem.
|
* Use this mode if the database is on a read-only filesystem.
|
||||||
* Fails with an error if the database does not exist.
|
* Fails with an error if the database does not exist.
|
||||||
*/
|
*/
|
||||||
Immutable
|
Immutable,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -225,15 +225,7 @@ constexpr std::array<ExperimentalFeatureDetails, 15> xpFeatureDetails = {{
|
||||||
.tag = Xp::ReadOnlyLocalStore,
|
.tag = Xp::ReadOnlyLocalStore,
|
||||||
.name = "read-only-local-store",
|
.name = "read-only-local-store",
|
||||||
.description = R"(
|
.description = R"(
|
||||||
Allow the use of the `read-only` parameter in local store URIs.
|
Allow the use of the `read-only` parameter in [local store](@docroot@/command-ref/new-cli/nix3-help-stores.md#local-store) URIs.
|
||||||
|
|
||||||
Set this parameter to `true` to allow stores with databases on read-only filesystems to be opened for querying; ordinarily Nix will refuse to do this.
|
|
||||||
|
|
||||||
This is because SQLite requires write access to the database file to perform the file locking operations necessary for safe concurrent access.
|
|
||||||
When `read-only` is set to `true`, the database will be opened in immutable mode.
|
|
||||||
|
|
||||||
Under this mode, SQLite will not do any locking at all, so you should be certain that the database will not be changed.
|
|
||||||
While the filesystem the database resides on might be read-only to this process, consider whether another user, process, or system, might have write access to it.
|
|
||||||
)",
|
)",
|
||||||
},
|
},
|
||||||
}};
|
}};
|
||||||
|
|
|
@ -63,30 +63,19 @@ std::pair<AutoCloseFD, Path> createTempFile(const Path & prefix)
|
||||||
return {std::move(fd), tmpl};
|
return {std::move(fd), tmpl};
|
||||||
}
|
}
|
||||||
|
|
||||||
void createSymlink(const Path & target, const Path & link,
|
void createSymlink(const Path & target, const Path & link)
|
||||||
std::optional<time_t> mtime)
|
|
||||||
{
|
{
|
||||||
if (symlink(target.c_str(), link.c_str()))
|
if (symlink(target.c_str(), link.c_str()))
|
||||||
throw SysError("creating symlink from '%1%' to '%2%'", link, target);
|
throw SysError("creating symlink from '%1%' to '%2%'", link, target);
|
||||||
if (mtime) {
|
|
||||||
struct timeval times[2];
|
|
||||||
times[0].tv_sec = *mtime;
|
|
||||||
times[0].tv_usec = 0;
|
|
||||||
times[1].tv_sec = *mtime;
|
|
||||||
times[1].tv_usec = 0;
|
|
||||||
if (lutimes(link.c_str(), times))
|
|
||||||
throw SysError("setting time of symlink '%s'", link);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void replaceSymlink(const Path & target, const Path & link,
|
void replaceSymlink(const Path & target, const Path & link)
|
||||||
std::optional<time_t> mtime)
|
|
||||||
{
|
{
|
||||||
for (unsigned int n = 0; true; n++) {
|
for (unsigned int n = 0; true; n++) {
|
||||||
Path tmp = canonPath(fmt("%s/.%d_%s", dirOf(link), n, baseNameOf(link)));
|
Path tmp = canonPath(fmt("%s/.%d_%s", dirOf(link), n, baseNameOf(link)));
|
||||||
|
|
||||||
try {
|
try {
|
||||||
createSymlink(target, tmp, mtime);
|
createSymlink(target, tmp);
|
||||||
} catch (SysError & e) {
|
} catch (SysError & e) {
|
||||||
if (e.errNo == EEXIST) continue;
|
if (e.errNo == EEXIST) continue;
|
||||||
throw;
|
throw;
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
@ -66,69 +67,20 @@ void RefScanSink::operator () (std::string_view data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
PathRefScanSink::PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap)
|
|
||||||
: RefScanSink(std::move(hashes))
|
|
||||||
, backMap(std::move(backMap))
|
|
||||||
{ }
|
|
||||||
|
|
||||||
PathRefScanSink PathRefScanSink::fromPaths(const StorePathSet & refs)
|
|
||||||
{
|
|
||||||
StringSet hashes;
|
|
||||||
std::map<std::string, StorePath> backMap;
|
|
||||||
|
|
||||||
for (auto & i : refs) {
|
|
||||||
std::string hashPart(i.hashPart());
|
|
||||||
auto inserted = backMap.emplace(hashPart, i).second;
|
|
||||||
assert(inserted);
|
|
||||||
hashes.insert(hashPart);
|
|
||||||
}
|
|
||||||
|
|
||||||
return PathRefScanSink(std::move(hashes), std::move(backMap));
|
|
||||||
}
|
|
||||||
|
|
||||||
StorePathSet PathRefScanSink::getResultPaths()
|
|
||||||
{
|
|
||||||
/* Map the hashes found back to their store paths. */
|
|
||||||
StorePathSet found;
|
|
||||||
for (auto & i : getResult()) {
|
|
||||||
auto j = backMap.find(i);
|
|
||||||
assert(j != backMap.end());
|
|
||||||
found.insert(j->second);
|
|
||||||
}
|
|
||||||
|
|
||||||
return found;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
std::pair<StorePathSet, HashResult> scanForReferences(
|
|
||||||
const std::string & path,
|
|
||||||
const StorePathSet & refs)
|
|
||||||
{
|
|
||||||
HashSink hashSink { htSHA256 };
|
|
||||||
auto found = scanForReferences(hashSink, path, refs);
|
|
||||||
auto hash = hashSink.finish();
|
|
||||||
return std::pair<StorePathSet, HashResult>(found, hash);
|
|
||||||
}
|
|
||||||
|
|
||||||
StorePathSet scanForReferences(
|
|
||||||
Sink & toTee,
|
|
||||||
const Path & path,
|
|
||||||
const StorePathSet & refs)
|
|
||||||
{
|
|
||||||
PathRefScanSink refsSink = PathRefScanSink::fromPaths(refs);
|
|
||||||
TeeSink sink { refsSink, toTee };
|
|
||||||
|
|
||||||
/* Look for the hashes in the NAR dump of the path. */
|
|
||||||
dumpPath(path, sink);
|
|
||||||
|
|
||||||
return refsSink.getResultPaths();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
RewritingSink::RewritingSink(const std::string & from, const std::string & to, Sink & nextSink)
|
RewritingSink::RewritingSink(const std::string & from, const std::string & to, Sink & nextSink)
|
||||||
: from(from), to(to), nextSink(nextSink)
|
: RewritingSink({{from, to}}, nextSink)
|
||||||
{
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
RewritingSink::RewritingSink(const StringMap & rewrites, Sink & nextSink)
|
||||||
|
: rewrites(rewrites), nextSink(nextSink)
|
||||||
|
{
|
||||||
|
std::string::size_type maxRewriteSize = 0;
|
||||||
|
for (auto & [from, to] : rewrites) {
|
||||||
assert(from.size() == to.size());
|
assert(from.size() == to.size());
|
||||||
|
maxRewriteSize = std::max(maxRewriteSize, from.size());
|
||||||
|
}
|
||||||
|
this->maxRewriteSize = maxRewriteSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
void RewritingSink::operator () (std::string_view data)
|
void RewritingSink::operator () (std::string_view data)
|
||||||
|
@ -136,13 +88,13 @@ void RewritingSink::operator () (std::string_view data)
|
||||||
std::string s(prev);
|
std::string s(prev);
|
||||||
s.append(data);
|
s.append(data);
|
||||||
|
|
||||||
size_t j = 0;
|
s = rewriteStrings(s, rewrites);
|
||||||
while ((j = s.find(from, j)) != std::string::npos) {
|
|
||||||
matches.push_back(pos + j);
|
|
||||||
s.replace(j, from.size(), to);
|
|
||||||
}
|
|
||||||
|
|
||||||
prev = s.size() < from.size() ? s : std::string(s, s.size() - from.size() + 1, from.size() - 1);
|
prev = s.size() < maxRewriteSize
|
||||||
|
? s
|
||||||
|
: maxRewriteSize == 0
|
||||||
|
? ""
|
||||||
|
: std::string(s, s.size() - maxRewriteSize + 1, maxRewriteSize - 1);
|
||||||
|
|
||||||
auto consumed = s.size() - prev.size();
|
auto consumed = s.size() - prev.size();
|
||||||
|
|
|
@ -2,14 +2,9 @@
|
||||||
///@file
|
///@file
|
||||||
|
|
||||||
#include "hash.hh"
|
#include "hash.hh"
|
||||||
#include "path.hh"
|
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
std::pair<StorePathSet, HashResult> scanForReferences(const Path & path, const StorePathSet & refs);
|
|
||||||
|
|
||||||
StorePathSet scanForReferences(Sink & toTee, const Path & path, const StorePathSet & refs);
|
|
||||||
|
|
||||||
class RefScanSink : public Sink
|
class RefScanSink : public Sink
|
||||||
{
|
{
|
||||||
StringSet hashes;
|
StringSet hashes;
|
||||||
|
@ -28,28 +23,18 @@ public:
|
||||||
void operator () (std::string_view data) override;
|
void operator () (std::string_view data) override;
|
||||||
};
|
};
|
||||||
|
|
||||||
class PathRefScanSink : public RefScanSink
|
|
||||||
{
|
|
||||||
std::map<std::string, StorePath> backMap;
|
|
||||||
|
|
||||||
PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap);
|
|
||||||
|
|
||||||
public:
|
|
||||||
|
|
||||||
static PathRefScanSink fromPaths(const StorePathSet & refs);
|
|
||||||
|
|
||||||
StorePathSet getResultPaths();
|
|
||||||
};
|
|
||||||
|
|
||||||
struct RewritingSink : Sink
|
struct RewritingSink : Sink
|
||||||
{
|
{
|
||||||
std::string from, to, prev;
|
const StringMap rewrites;
|
||||||
|
std::string::size_type maxRewriteSize;
|
||||||
|
std::string prev;
|
||||||
Sink & nextSink;
|
Sink & nextSink;
|
||||||
uint64_t pos = 0;
|
uint64_t pos = 0;
|
||||||
|
|
||||||
std::vector<uint64_t> matches;
|
std::vector<uint64_t> matches;
|
||||||
|
|
||||||
RewritingSink(const std::string & from, const std::string & to, Sink & nextSink);
|
RewritingSink(const std::string & from, const std::string & to, Sink & nextSink);
|
||||||
|
RewritingSink(const StringMap & rewrites, Sink & nextSink);
|
||||||
|
|
||||||
void operator () (std::string_view data) override;
|
void operator () (std::string_view data) override;
|
||||||
|
|
46
src/libutil/tests/references.cc
Normal file
46
src/libutil/tests/references.cc
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
#include "references.hh"
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
using std::string;
|
||||||
|
|
||||||
|
struct RewriteParams {
|
||||||
|
string originalString, finalString;
|
||||||
|
StringMap rewrites;
|
||||||
|
|
||||||
|
friend std::ostream& operator<<(std::ostream& os, const RewriteParams& bar) {
|
||||||
|
StringSet strRewrites;
|
||||||
|
for (auto & [from, to] : bar.rewrites)
|
||||||
|
strRewrites.insert(from + "->" + to);
|
||||||
|
return os <<
|
||||||
|
"OriginalString: " << bar.originalString << std::endl <<
|
||||||
|
"Rewrites: " << concatStringsSep(",", strRewrites) << std::endl <<
|
||||||
|
"Expected result: " << bar.finalString;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class RewriteTest : public ::testing::TestWithParam<RewriteParams> {
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_P(RewriteTest, IdentityRewriteIsIdentity) {
|
||||||
|
RewriteParams param = GetParam();
|
||||||
|
StringSink rewritten;
|
||||||
|
auto rewriter = RewritingSink(param.rewrites, rewritten);
|
||||||
|
rewriter(param.originalString);
|
||||||
|
rewriter.flush();
|
||||||
|
ASSERT_EQ(rewritten.s, param.finalString);
|
||||||
|
}
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_CASE_P(
|
||||||
|
references,
|
||||||
|
RewriteTest,
|
||||||
|
::testing::Values(
|
||||||
|
RewriteParams{ "foooo", "baroo", {{"foo", "bar"}, {"bar", "baz"}}},
|
||||||
|
RewriteParams{ "foooo", "bazoo", {{"fou", "bar"}, {"foo", "baz"}}},
|
||||||
|
RewriteParams{ "foooo", "foooo", {}}
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -1853,6 +1853,7 @@ void setStackSize(size_t stackSize)
|
||||||
|
|
||||||
#if __linux__
|
#if __linux__
|
||||||
static AutoCloseFD fdSavedMountNamespace;
|
static AutoCloseFD fdSavedMountNamespace;
|
||||||
|
static AutoCloseFD fdSavedRoot;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void saveMountNamespace()
|
void saveMountNamespace()
|
||||||
|
@ -1860,10 +1861,11 @@ void saveMountNamespace()
|
||||||
#if __linux__
|
#if __linux__
|
||||||
static std::once_flag done;
|
static std::once_flag done;
|
||||||
std::call_once(done, []() {
|
std::call_once(done, []() {
|
||||||
AutoCloseFD fd = open("/proc/self/ns/mnt", O_RDONLY);
|
fdSavedMountNamespace = open("/proc/self/ns/mnt", O_RDONLY);
|
||||||
if (!fd)
|
if (!fdSavedMountNamespace)
|
||||||
throw SysError("saving parent mount namespace");
|
throw SysError("saving parent mount namespace");
|
||||||
fdSavedMountNamespace = std::move(fd);
|
|
||||||
|
fdSavedRoot = open("/proc/self/root", O_RDONLY);
|
||||||
});
|
});
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -1876,9 +1878,16 @@ void restoreMountNamespace()
|
||||||
|
|
||||||
if (fdSavedMountNamespace && setns(fdSavedMountNamespace.get(), CLONE_NEWNS) == -1)
|
if (fdSavedMountNamespace && setns(fdSavedMountNamespace.get(), CLONE_NEWNS) == -1)
|
||||||
throw SysError("restoring parent mount namespace");
|
throw SysError("restoring parent mount namespace");
|
||||||
if (chdir(savedCwd.c_str()) == -1) {
|
|
||||||
throw SysError("restoring cwd");
|
if (fdSavedRoot) {
|
||||||
|
if (fchdir(fdSavedRoot.get()))
|
||||||
|
throw SysError("chdir into saved root");
|
||||||
|
if (chroot("."))
|
||||||
|
throw SysError("chroot into saved root");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (chdir(savedCwd.c_str()) == -1)
|
||||||
|
throw SysError("restoring cwd");
|
||||||
} catch (Error & e) {
|
} catch (Error & e) {
|
||||||
debug(e.msg());
|
debug(e.msg());
|
||||||
}
|
}
|
||||||
|
|
|
@ -256,14 +256,12 @@ inline Paths createDirs(PathView path)
|
||||||
/**
|
/**
|
||||||
* Create a symlink.
|
* Create a symlink.
|
||||||
*/
|
*/
|
||||||
void createSymlink(const Path & target, const Path & link,
|
void createSymlink(const Path & target, const Path & link);
|
||||||
std::optional<time_t> mtime = {});
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Atomically create or replace a symlink.
|
* Atomically create or replace a symlink.
|
||||||
*/
|
*/
|
||||||
void replaceSymlink(const Path & target, const Path & link,
|
void replaceSymlink(const Path & target, const Path & link);
|
||||||
std::optional<time_t> mtime = {});
|
|
||||||
|
|
||||||
void renameFile(const Path & src, const Path & dst);
|
void renameFile(const Path & src, const Path & dst);
|
||||||
|
|
||||||
|
|
|
@ -177,6 +177,7 @@ static int main_nix_channel(int argc, char ** argv)
|
||||||
cRemove,
|
cRemove,
|
||||||
cList,
|
cList,
|
||||||
cUpdate,
|
cUpdate,
|
||||||
|
cListGenerations,
|
||||||
cRollback
|
cRollback
|
||||||
} cmd = cNone;
|
} cmd = cNone;
|
||||||
std::vector<std::string> args;
|
std::vector<std::string> args;
|
||||||
|
@ -193,6 +194,8 @@ static int main_nix_channel(int argc, char ** argv)
|
||||||
cmd = cList;
|
cmd = cList;
|
||||||
} else if (*arg == "--update") {
|
} else if (*arg == "--update") {
|
||||||
cmd = cUpdate;
|
cmd = cUpdate;
|
||||||
|
} else if (*arg == "--list-generations") {
|
||||||
|
cmd = cListGenerations;
|
||||||
} else if (*arg == "--rollback") {
|
} else if (*arg == "--rollback") {
|
||||||
cmd = cRollback;
|
cmd = cRollback;
|
||||||
} else {
|
} else {
|
||||||
|
@ -237,6 +240,11 @@ static int main_nix_channel(int argc, char ** argv)
|
||||||
case cUpdate:
|
case cUpdate:
|
||||||
update(StringSet(args.begin(), args.end()));
|
update(StringSet(args.begin(), args.end()));
|
||||||
break;
|
break;
|
||||||
|
case cListGenerations:
|
||||||
|
if (!args.empty())
|
||||||
|
throw UsageError("'--list-generations' expects no arguments");
|
||||||
|
std::cout << runProgram(settings.nixBinDir + "/nix-env", false, {"--profile", profile, "--list-generations"}) << std::flush;
|
||||||
|
break;
|
||||||
case cRollback:
|
case cRollback:
|
||||||
if (args.size() > 1)
|
if (args.size() > 1)
|
||||||
throw UsageError("'--rollback' has at most one argument");
|
throw UsageError("'--rollback' has at most one argument");
|
||||||
|
|
|
@ -41,9 +41,10 @@ void removeOldGenerations(std::string dir)
|
||||||
}
|
}
|
||||||
if (link.find("link") != std::string::npos) {
|
if (link.find("link") != std::string::npos) {
|
||||||
printInfo("removing old generations of profile %s", path);
|
printInfo("removing old generations of profile %s", path);
|
||||||
if (deleteOlderThan != "")
|
if (deleteOlderThan != "") {
|
||||||
deleteGenerationsOlderThan(path, deleteOlderThan, dryRun);
|
auto t = parseOlderThanTimeSpec(deleteOlderThan);
|
||||||
else
|
deleteGenerationsOlderThan(path, t, dryRun);
|
||||||
|
} else
|
||||||
deleteOldGenerations(path, dryRun);
|
deleteOldGenerations(path, dryRun);
|
||||||
}
|
}
|
||||||
} else if (type == DT_DIR) {
|
} else if (type == DT_DIR) {
|
||||||
|
|
|
@ -772,7 +772,7 @@ static void opSet(Globals & globals, Strings opFlags, Strings opArgs)
|
||||||
|
|
||||||
debug("switching to new user environment");
|
debug("switching to new user environment");
|
||||||
Path generation = createGeneration(
|
Path generation = createGeneration(
|
||||||
ref<LocalFSStore>(store2),
|
*store2,
|
||||||
globals.profile,
|
globals.profile,
|
||||||
drv.queryOutPath());
|
drv.queryOutPath());
|
||||||
switchLink(globals.profile, generation);
|
switchLink(globals.profile, generation);
|
||||||
|
@ -1356,13 +1356,14 @@ static void opDeleteGenerations(Globals & globals, Strings opFlags, Strings opAr
|
||||||
if (opArgs.size() == 1 && opArgs.front() == "old") {
|
if (opArgs.size() == 1 && opArgs.front() == "old") {
|
||||||
deleteOldGenerations(globals.profile, globals.dryRun);
|
deleteOldGenerations(globals.profile, globals.dryRun);
|
||||||
} else if (opArgs.size() == 1 && opArgs.front().find('d') != std::string::npos) {
|
} else if (opArgs.size() == 1 && opArgs.front().find('d') != std::string::npos) {
|
||||||
deleteGenerationsOlderThan(globals.profile, opArgs.front(), globals.dryRun);
|
auto t = parseOlderThanTimeSpec(opArgs.front());
|
||||||
|
deleteGenerationsOlderThan(globals.profile, t, globals.dryRun);
|
||||||
} else if (opArgs.size() == 1 && opArgs.front().find('+') != std::string::npos) {
|
} else if (opArgs.size() == 1 && opArgs.front().find('+') != std::string::npos) {
|
||||||
if (opArgs.front().size() < 2)
|
if (opArgs.front().size() < 2)
|
||||||
throw Error("invalid number of generations '%1%'", opArgs.front());
|
throw Error("invalid number of generations '%1%'", opArgs.front());
|
||||||
auto str_max = opArgs.front().substr(1);
|
auto str_max = opArgs.front().substr(1);
|
||||||
auto max = string2Int<GenerationNumber>(str_max);
|
auto max = string2Int<GenerationNumber>(str_max);
|
||||||
if (!max || *max == 0)
|
if (!max)
|
||||||
throw Error("invalid number of generations to keep '%1%'", opArgs.front());
|
throw Error("invalid number of generations to keep '%1%'", opArgs.front());
|
||||||
deleteGenerationsGreaterThan(globals.profile, *max, globals.dryRun);
|
deleteGenerationsGreaterThan(globals.profile, *max, globals.dryRun);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -158,7 +158,7 @@ bool createUserEnv(EvalState & state, DrvInfos & elems,
|
||||||
}
|
}
|
||||||
|
|
||||||
debug("switching to new user environment");
|
debug("switching to new user environment");
|
||||||
Path generation = createGeneration(ref<LocalFSStore>(store2), profile, topLevelOut);
|
Path generation = createGeneration(*store2, profile, topLevelOut);
|
||||||
switchLink(profile, generation);
|
switchLink(profile, generation);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -806,9 +806,10 @@ struct CmdProfileWipeHistory : virtual StoreCommand, MixDefaultProfile, MixDryRu
|
||||||
|
|
||||||
void run(ref<Store> store) override
|
void run(ref<Store> store) override
|
||||||
{
|
{
|
||||||
if (minAge)
|
if (minAge) {
|
||||||
deleteGenerationsOlderThan(*profile, *minAge, dryRun);
|
auto t = parseOlderThanTimeSpec(*minAge);
|
||||||
else
|
deleteGenerationsOlderThan(*profile, t, dryRun);
|
||||||
|
} else
|
||||||
deleteOldGenerations(*profile, dryRun);
|
deleteOldGenerations(*profile, dryRun);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -129,3 +129,7 @@ nix build --impure -f multiple-outputs.nix --json e --no-link | jq --exit-status
|
||||||
(.drvPath | match(".*multiple-outputs-e.drv")) and
|
(.drvPath | match(".*multiple-outputs-e.drv")) and
|
||||||
(.outputs | keys == ["a_a", "b"]))
|
(.outputs | keys == ["a_a", "b"]))
|
||||||
'
|
'
|
||||||
|
|
||||||
|
# Make sure that `--stdin` works and does not apply any defaults
|
||||||
|
printf "" | nix build --no-link --stdin --json | jq --exit-status '. == []'
|
||||||
|
printf "%s\n" "$drv^*" | nix build --no-link --stdin --json | jq --exit-status '.[0]|has("drvPath")'
|
||||||
|
|
|
@ -5,6 +5,12 @@ enableFeatures "fetch-closure"
|
||||||
clearStore
|
clearStore
|
||||||
clearCacheCache
|
clearCacheCache
|
||||||
|
|
||||||
|
# Old daemons don't properly zero out the self-references when
|
||||||
|
# calculating the CA hashes, so this breaks `nix store
|
||||||
|
# make-content-addressed` which expects the client and the daemon to
|
||||||
|
# compute the same hash
|
||||||
|
requireDaemonNewerThan "2.16.0pre20230524"
|
||||||
|
|
||||||
# Initialize binary cache.
|
# Initialize binary cache.
|
||||||
nonCaPath=$(nix build --json --file ./dependencies.nix --no-link | jq -r .[].outputs.out)
|
nonCaPath=$(nix build --json --file ./dependencies.nix --no-link | jq -r .[].outputs.out)
|
||||||
caPath=$(nix store make-content-addressed --json $nonCaPath | jq -r '.rewrites | map(.) | .[]')
|
caPath=$(nix store make-content-addressed --json $nonCaPath | jq -r '.rewrites | map(.) | .[]')
|
||||||
|
|
28
tests/gc.sh
28
tests/gc.sh
|
@ -50,31 +50,3 @@ if test -e $outPath/foobar; then false; fi
|
||||||
# Check that the store is empty.
|
# Check that the store is empty.
|
||||||
rmdir $NIX_STORE_DIR/.links
|
rmdir $NIX_STORE_DIR/.links
|
||||||
rmdir $NIX_STORE_DIR
|
rmdir $NIX_STORE_DIR
|
||||||
|
|
||||||
## Test `nix-collect-garbage -d`
|
|
||||||
testCollectGarbageD () {
|
|
||||||
clearProfiles
|
|
||||||
# Run two `nix-env` commands, should create two generations of
|
|
||||||
# the profile
|
|
||||||
nix-env -f ./user-envs.nix -i foo-1.0
|
|
||||||
nix-env -f ./user-envs.nix -i foo-2.0pre1
|
|
||||||
[[ $(nix-env --list-generations | wc -l) -eq 2 ]]
|
|
||||||
|
|
||||||
# Clear the profile history. There should be only one generation
|
|
||||||
# left
|
|
||||||
nix-collect-garbage -d
|
|
||||||
[[ $(nix-env --list-generations | wc -l) -eq 1 ]]
|
|
||||||
}
|
|
||||||
# `nix-env` doesn't work with CA derivations, so let's ignore that bit if we're
|
|
||||||
# using them
|
|
||||||
if [[ -z "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then
|
|
||||||
testCollectGarbageD
|
|
||||||
|
|
||||||
# Run the same test, but forcing the profiles at their legacy location under
|
|
||||||
# /nix/var/nix.
|
|
||||||
#
|
|
||||||
# Regression test for #8294
|
|
||||||
rm ~/.nix-profile
|
|
||||||
ln -s $NIX_STATE_DIR/profiles/per-user/me ~/.nix-profile
|
|
||||||
testCollectGarbageD
|
|
||||||
fi
|
|
||||||
|
|
|
@ -16,6 +16,7 @@ nix_tests = \
|
||||||
flakes/flake-in-submodule.sh \
|
flakes/flake-in-submodule.sh \
|
||||||
ca/gc.sh \
|
ca/gc.sh \
|
||||||
gc.sh \
|
gc.sh \
|
||||||
|
nix-collect-garbage-d.sh \
|
||||||
remote-store.sh \
|
remote-store.sh \
|
||||||
legacy-ssh-store.sh \
|
legacy-ssh-store.sh \
|
||||||
lang.sh \
|
lang.sh \
|
||||||
|
@ -135,6 +136,7 @@ nix_tests = \
|
||||||
flakes/show.sh \
|
flakes/show.sh \
|
||||||
impure-derivations.sh \
|
impure-derivations.sh \
|
||||||
path-from-hash-part.sh \
|
path-from-hash-part.sh \
|
||||||
|
test-libstoreconsumer.sh \
|
||||||
toString-path.sh \
|
toString-path.sh \
|
||||||
read-only-store.sh
|
read-only-store.sh
|
||||||
|
|
||||||
|
@ -154,6 +156,7 @@ test-deps += \
|
||||||
tests/common/vars-and-functions.sh \
|
tests/common/vars-and-functions.sh \
|
||||||
tests/config.nix \
|
tests/config.nix \
|
||||||
tests/ca/config.nix \
|
tests/ca/config.nix \
|
||||||
|
tests/test-libstoreconsumer/test-libstoreconsumer \
|
||||||
tests/dyn-drv/config.nix
|
tests/dyn-drv/config.nix
|
||||||
|
|
||||||
ifeq ($(BUILD_SHARED_LIBS), 1)
|
ifeq ($(BUILD_SHARED_LIBS), 1)
|
||||||
|
|
|
@ -8,6 +8,7 @@ rm -f $TEST_HOME/.nix-channels $TEST_HOME/.nix-profile
|
||||||
nix-channel --add http://foo/bar xyzzy
|
nix-channel --add http://foo/bar xyzzy
|
||||||
nix-channel --list | grepQuiet http://foo/bar
|
nix-channel --list | grepQuiet http://foo/bar
|
||||||
nix-channel --remove xyzzy
|
nix-channel --remove xyzzy
|
||||||
|
[[ $(nix-channel --list-generations | wc -l) == 1 ]]
|
||||||
|
|
||||||
[ -e $TEST_HOME/.nix-channels ]
|
[ -e $TEST_HOME/.nix-channels ]
|
||||||
[ "$(cat $TEST_HOME/.nix-channels)" = '' ]
|
[ "$(cat $TEST_HOME/.nix-channels)" = '' ]
|
||||||
|
@ -38,6 +39,7 @@ ln -s dependencies.nix $TEST_ROOT/nixexprs/default.nix
|
||||||
# Test the update action.
|
# Test the update action.
|
||||||
nix-channel --add file://$TEST_ROOT/foo
|
nix-channel --add file://$TEST_ROOT/foo
|
||||||
nix-channel --update
|
nix-channel --update
|
||||||
|
[[ $(nix-channel --list-generations | wc -l) == 2 ]]
|
||||||
|
|
||||||
# Do a query.
|
# Do a query.
|
||||||
nix-env -qa \* --meta --xml --out-path > $TEST_ROOT/meta.xml
|
nix-env -qa \* --meta --xml --out-path > $TEST_ROOT/meta.xml
|
||||||
|
|
40
tests/nix-collect-garbage-d.sh
Normal file
40
tests/nix-collect-garbage-d.sh
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
source common.sh
|
||||||
|
|
||||||
|
clearStore
|
||||||
|
|
||||||
|
## Test `nix-collect-garbage -d`
|
||||||
|
|
||||||
|
# TODO make `nix-env` doesn't work with CA derivations, and make
|
||||||
|
# `ca/nix-collect-garbage-d.sh` wrapper.
|
||||||
|
|
||||||
|
testCollectGarbageD () {
|
||||||
|
clearProfiles
|
||||||
|
# Run two `nix-env` commands, should create two generations of
|
||||||
|
# the profile
|
||||||
|
nix-env -f ./user-envs.nix -i foo-1.0 "$@"
|
||||||
|
nix-env -f ./user-envs.nix -i foo-2.0pre1 "$@"
|
||||||
|
[[ $(nix-env --list-generations "$@" | wc -l) -eq 2 ]]
|
||||||
|
|
||||||
|
# Clear the profile history. There should be only one generation
|
||||||
|
# left
|
||||||
|
nix-collect-garbage -d
|
||||||
|
[[ $(nix-env --list-generations "$@" | wc -l) -eq 1 ]]
|
||||||
|
}
|
||||||
|
|
||||||
|
testCollectGarbageD
|
||||||
|
|
||||||
|
# Run the same test, but forcing the profiles an arbitrary location.
|
||||||
|
rm ~/.nix-profile
|
||||||
|
ln -s $TEST_ROOT/blah ~/.nix-profile
|
||||||
|
testCollectGarbageD
|
||||||
|
|
||||||
|
# Run the same test, but forcing the profiles at their legacy location under
|
||||||
|
# /nix/var/nix.
|
||||||
|
#
|
||||||
|
# Note that we *don't* use the default profile; `nix-collect-garbage` will
|
||||||
|
# need to check the legacy conditional unconditionally not just follow
|
||||||
|
# `~/.nix-profile` to pass this test.
|
||||||
|
#
|
||||||
|
# Regression test for #8294
|
||||||
|
rm ~/.nix-profile
|
||||||
|
testCollectGarbageD --profile "$NIX_STATE_DIR/profiles/per-user/me"
|
84
tests/nixos/tarball-flakes.nix
Normal file
84
tests/nixos/tarball-flakes.nix
Normal file
|
@ -0,0 +1,84 @@
|
||||||
|
{ lib, config, nixpkgs, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
pkgs = config.nodes.machine.nixpkgs.pkgs;
|
||||||
|
|
||||||
|
root = pkgs.runCommand "nixpkgs-flake" {}
|
||||||
|
''
|
||||||
|
mkdir -p $out/stable
|
||||||
|
|
||||||
|
set -x
|
||||||
|
dir=nixpkgs-${nixpkgs.shortRev}
|
||||||
|
cp -prd ${nixpkgs} $dir
|
||||||
|
# Set the correct timestamp in the tarball.
|
||||||
|
find $dir -print0 | xargs -0 touch -t ${builtins.substring 0 12 nixpkgs.lastModifiedDate}.${builtins.substring 12 2 nixpkgs.lastModifiedDate} --
|
||||||
|
tar cfz $out/stable/${nixpkgs.rev}.tar.gz $dir --hard-dereference
|
||||||
|
|
||||||
|
echo 'Redirect "/latest.tar.gz" "/stable/${nixpkgs.rev}.tar.gz"' > $out/.htaccess
|
||||||
|
|
||||||
|
echo 'Header set Link "<http://localhost/stable/${nixpkgs.rev}.tar.gz?rev=${nixpkgs.rev}&revCount=1234>; rel=\"immutable\""' > $out/stable/.htaccess
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
|
||||||
|
{
|
||||||
|
name = "tarball-flakes";
|
||||||
|
|
||||||
|
nodes =
|
||||||
|
{
|
||||||
|
machine =
|
||||||
|
{ config, pkgs, ... }:
|
||||||
|
{ networking.firewall.allowedTCPPorts = [ 80 ];
|
||||||
|
|
||||||
|
services.httpd.enable = true;
|
||||||
|
services.httpd.adminAddr = "foo@example.org";
|
||||||
|
services.httpd.extraConfig = ''
|
||||||
|
ErrorLog syslog:local6
|
||||||
|
'';
|
||||||
|
services.httpd.virtualHosts."localhost" =
|
||||||
|
{ servedDirs =
|
||||||
|
[ { urlPath = "/";
|
||||||
|
dir = root;
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
virtualisation.writableStore = true;
|
||||||
|
virtualisation.diskSize = 2048;
|
||||||
|
virtualisation.additionalPaths = [ pkgs.hello pkgs.fuse ];
|
||||||
|
virtualisation.memorySize = 4096;
|
||||||
|
nix.settings.substituters = lib.mkForce [ ];
|
||||||
|
nix.extraOptions = "experimental-features = nix-command flakes";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = { nodes }: ''
|
||||||
|
# fmt: off
|
||||||
|
import json
|
||||||
|
|
||||||
|
start_all()
|
||||||
|
|
||||||
|
machine.wait_for_unit("httpd.service")
|
||||||
|
|
||||||
|
out = machine.succeed("nix flake metadata --json http://localhost/latest.tar.gz")
|
||||||
|
print(out)
|
||||||
|
info = json.loads(out)
|
||||||
|
|
||||||
|
# Check that we got redirected to the immutable URL.
|
||||||
|
assert info["locked"]["url"] == "http://localhost/stable/${nixpkgs.rev}.tar.gz"
|
||||||
|
|
||||||
|
# Check that we got the rev and revCount attributes.
|
||||||
|
assert info["revision"] == "${nixpkgs.rev}"
|
||||||
|
assert info["revCount"] == 1234
|
||||||
|
|
||||||
|
# Check that fetching with rev/revCount/narHash succeeds.
|
||||||
|
machine.succeed("nix flake metadata --json http://localhost/latest.tar.gz?rev=" + info["revision"])
|
||||||
|
machine.succeed("nix flake metadata --json http://localhost/latest.tar.gz?revCount=" + str(info["revCount"]))
|
||||||
|
machine.succeed("nix flake metadata --json http://localhost/latest.tar.gz?narHash=" + info["locked"]["narHash"])
|
||||||
|
|
||||||
|
# Check that fetching fails if we provide incorrect attributes.
|
||||||
|
machine.fail("nix flake metadata --json http://localhost/latest.tar.gz?rev=493300eb13ae6fb387fbd47bf54a85915acc31c0")
|
||||||
|
machine.fail("nix flake metadata --json http://localhost/latest.tar.gz?revCount=789")
|
||||||
|
machine.fail("nix flake metadata --json http://localhost/latest.tar.gz?narHash=sha256-tbudgBSg+bHWHiHnlteNzN8TUvI80ygS9IULh4rklEw=")
|
||||||
|
'';
|
||||||
|
|
||||||
|
}
|
|
@ -21,4 +21,8 @@ static void prim_anotherNull (EvalState & state, const PosIdx pos, Value ** args
|
||||||
v.mkBool(false);
|
v.mkBool(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static RegisterPrimOp rp("anotherNull", 0, prim_anotherNull);
|
static RegisterPrimOp rp({
|
||||||
|
.name = "anotherNull",
|
||||||
|
.arity = 0,
|
||||||
|
.fun = prim_anotherNull,
|
||||||
|
});
|
||||||
|
|
6
tests/test-libstoreconsumer.sh
Normal file
6
tests/test-libstoreconsumer.sh
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
source common.sh
|
||||||
|
|
||||||
|
drv="$(nix-instantiate simple.nix)"
|
||||||
|
cat "$drv"
|
||||||
|
out="$(./test-libstoreconsumer/test-libstoreconsumer "$drv")"
|
||||||
|
cat "$out/hello" | grep -F "Hello World!"
|
6
tests/test-libstoreconsumer/README.md
Normal file
6
tests/test-libstoreconsumer/README.md
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
|
||||||
|
A very simple C++ consumer of the libstore library.
|
||||||
|
|
||||||
|
- Keep it simple. Library consumers expect something simple.
|
||||||
|
- No build hook, or any other reinvocations.
|
||||||
|
- No more global state than necessary.
|
12
tests/test-libstoreconsumer/local.mk
Normal file
12
tests/test-libstoreconsumer/local.mk
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
programs += test-libstoreconsumer
|
||||||
|
|
||||||
|
test-libstoreconsumer_DIR := $(d)
|
||||||
|
|
||||||
|
test-libstoreconsumer_SOURCES := \
|
||||||
|
$(wildcard $(d)/*.cc) \
|
||||||
|
|
||||||
|
test-libstoreconsumer_CXXFLAGS += -I src/libutil -I src/libstore
|
||||||
|
|
||||||
|
test-libstoreconsumer_LIBS = libstore libutil
|
||||||
|
|
||||||
|
test-libstoreconsumer_LDFLAGS = -pthread $(SODIUM_LIBS) $(EDITLINE_LIBS) $(BOOST_LDFLAGS) $(LOWDOWN_LIBS)
|
45
tests/test-libstoreconsumer/main.cc
Normal file
45
tests/test-libstoreconsumer/main.cc
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
#include "globals.hh"
|
||||||
|
#include "store-api.hh"
|
||||||
|
#include "build-result.hh"
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
using namespace nix;
|
||||||
|
|
||||||
|
int main (int argc, char **argv)
|
||||||
|
{
|
||||||
|
try {
|
||||||
|
if (argc != 2) {
|
||||||
|
std::cerr << "Usage: " << argv[0] << " store/path/to/something.drv\n";
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string drvPath = argv[1];
|
||||||
|
|
||||||
|
initLibStore();
|
||||||
|
|
||||||
|
auto store = nix::openStore();
|
||||||
|
|
||||||
|
// build the derivation
|
||||||
|
|
||||||
|
std::vector<DerivedPath> paths {
|
||||||
|
DerivedPath::Built {
|
||||||
|
.drvPath = store->parseStorePath(drvPath),
|
||||||
|
.outputs = OutputsSpec::Names{"out"}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const auto results = store->buildPathsWithResults(paths, bmNormal, store);
|
||||||
|
|
||||||
|
for (const auto & result : results) {
|
||||||
|
for (const auto & [outputName, realisation] : result.builtOutputs) {
|
||||||
|
std::cout << store->printStorePath(realisation.outPath) << "\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
} catch (const std::exception & e) {
|
||||||
|
std::cerr << "Error: " << e.what() << "\n";
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
Loading…
Add table
Add a link
Reference in a new issue