mirror of
https://github.com/NixOS/nix
synced 2025-06-28 01:11:15 +02:00
Tagging release 2.24.9
-----BEGIN PGP SIGNATURE----- iQFHBAABCAAxFiEEtUHVUwEnDgvPFcpdgXC0cm1xmN4FAmb3K78THGVkb2xzdHJh QGdtYWlsLmNvbQAKCRCBcLRybXGY3t5xB/4mKlFd8hka45CuQrGN6lJrIs76kvn5 mXDLWpHTOyipUZN1ZKACUPlKD/8cP8sHwd3/fILlwKAOurgWCd/+QwAPltT01r/w T02E4haXGLmWwdZ+uPcm/lBdZVq8IZ1oU/9+EFKsbaYpa4O4kZPHe3joPr4ebVlO zXndiR5FDSSEg05qAXr62KndgydTf/xtjEEv6jONzMaO1MCK6OAHIKCZg2ybsV/S 5ayfUESRFwGg4/BbzSEkEO0wl8mgwo6PbD0BI83FSC9W1gaR2ImadjA9GPKBkS1o 8Rj/KrP55JZkQExEQWquptEMlKoDdruQUelXXBBeqnXErG2bORV+Z7xG =SUre -----END PGP SIGNATURE----- Merge tag '2.24.9' into sync-2.24.9 Tagging release 2.24.9
This commit is contained in:
commit
d8d38c4257
58 changed files with 605 additions and 415 deletions
32
.github/workflows/backport.yml
vendored
32
.github/workflows/backport.yml
vendored
|
@ -1,32 +0,0 @@
|
|||
name: Backport
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed, labeled]
|
||||
permissions:
|
||||
contents: read
|
||||
jobs:
|
||||
backport:
|
||||
name: Backport Pull Request
|
||||
permissions:
|
||||
# for zeebe-io/backport-action
|
||||
contents: write
|
||||
pull-requests: write
|
||||
if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name))
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
# required to find all branches
|
||||
fetch-depth: 0
|
||||
- name: Create backport PRs
|
||||
# should be kept in sync with `version`
|
||||
uses: zeebe-io/backport-action@v3.0.2
|
||||
with:
|
||||
# Config README: https://github.com/zeebe-io/backport-action#backport-action
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
github_workspace: ${{ github.workspace }}
|
||||
pull_description: |-
|
||||
Automatic backport to `${target_branch}`, triggered by a label in #${pull_number}.
|
||||
# should be kept in sync with `uses`
|
||||
version: v0.0.5
|
92
.mergify.yml
Normal file
92
.mergify.yml
Normal file
|
@ -0,0 +1,92 @@
|
|||
queue_rules:
|
||||
- name: default
|
||||
# all required tests need to go here
|
||||
merge_conditions:
|
||||
- check-success=installer
|
||||
- check-success=installer_test (macos-latest)
|
||||
- check-success=installer_test (ubuntu-latest)
|
||||
- check-success=tests (macos-latest)
|
||||
- check-success=tests (ubuntu-latest)
|
||||
- check-success=vm_tests
|
||||
merge_method: rebase
|
||||
batch_size: 5
|
||||
|
||||
pull_request_rules:
|
||||
- name: merge using the merge queue
|
||||
conditions:
|
||||
- base=master
|
||||
- label~=merge-queue|dependencies
|
||||
actions:
|
||||
queue: {}
|
||||
|
||||
# The rules below will first create backport pull requests and put those in a merge queue.
|
||||
|
||||
- name: backport patches to 2.18
|
||||
conditions:
|
||||
- label=backport 2.18-maintenance
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- 2.18-maintenance
|
||||
labels:
|
||||
- merge-queue
|
||||
|
||||
- name: backport patches to 2.19
|
||||
conditions:
|
||||
- label=backport 2.19-maintenance
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- 2.19-maintenance
|
||||
labels:
|
||||
- merge-queue
|
||||
|
||||
- name: backport patches to 2.20
|
||||
conditions:
|
||||
- label=backport 2.20-maintenance
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- 2.20-maintenance
|
||||
labels:
|
||||
- merge-queue
|
||||
|
||||
- name: backport patches to 2.21
|
||||
conditions:
|
||||
- label=backport 2.21-maintenance
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- 2.21-maintenance
|
||||
labels:
|
||||
- merge-queue
|
||||
|
||||
- name: backport patches to 2.22
|
||||
conditions:
|
||||
- label=backport 2.22-maintenance
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- 2.22-maintenance
|
||||
labels:
|
||||
- merge-queue
|
||||
|
||||
- name: backport patches to 2.23
|
||||
conditions:
|
||||
- label=backport 2.23-maintenance
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- 2.23-maintenance
|
||||
labels:
|
||||
- merge-queue
|
||||
|
||||
- name: backport patches to 2.24
|
||||
conditions:
|
||||
- label=backport 2.24-maintenance
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- "2.24-maintenance"
|
||||
labels:
|
||||
- merge-queue
|
2
.version
2
.version
|
@ -1 +1 @@
|
|||
2.24.2
|
||||
2.24.9
|
||||
|
|
14
doc/manual/rl-next/filesystem-errors.md
Normal file
14
doc/manual/rl-next/filesystem-errors.md
Normal file
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
synopsis: wrap filesystem exceptions more correctly
|
||||
issues: []
|
||||
prs: [11378]
|
||||
---
|
||||
|
||||
|
||||
With the switch to `std::filesystem` in different places, Nix started to throw `std::filesystem::filesystem_error` in many places instead of its own exceptions.
|
||||
|
||||
This lead to no longer generating error traces, for example when listing a non-existing directory.
|
||||
|
||||
This version catches these types of exception correctly and wrap them into Nix's own exeception type.
|
||||
|
||||
Author: [**@Mic92**](https://github.com/Mic92)
|
8
doc/manual/rl-next/verify-tls.md
Normal file
8
doc/manual/rl-next/verify-tls.md
Normal file
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
synopsis: "`<nix/fetchurl.nix>` uses TLS verification"
|
||||
prs: [11585]
|
||||
---
|
||||
|
||||
Previously `<nix/fetchurl.nix>` did not do TLS verification. This was because the Nix sandbox in the past did not have access to TLS certificates, and Nix checks the hash of the fetched file anyway. However, this can expose authentication data from `netrc` and URLs to man-in-the-middle attackers. In addition, Nix now in some cases (such as when using impure derivations) does *not* check the hash. Therefore we have now enabled TLS verification. This means that downloads by `<nix/fetchurl.nix>` will now fail if you're fetching from a HTTPS server that does not have a valid certificate.
|
||||
|
||||
`<nix/fetchurl.nix>` is also known as the builtin derivation builder `builtin:fetchurl`. It's not to be confused with the evaluation-time function `builtins.fetchurl`, which was not affected by this issue.
|
|
@ -14,6 +14,14 @@ This option requires either:
|
|||
* Linux running systemd, with SELinux disabled
|
||||
* MacOS
|
||||
|
||||
> **Updating to macOS 15 Sequoia**
|
||||
>
|
||||
> If you recently updated to macOS 15 Sequoia and are getting
|
||||
> ```console
|
||||
> error: the user '_nixbld1' in the group 'nixbld' does not exist
|
||||
> ```
|
||||
> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling.
|
||||
|
||||
```console
|
||||
$ bash <(curl -L https://nixos.org/nix/install) --daemon
|
||||
```
|
||||
|
|
|
@ -1,5 +1,13 @@
|
|||
# Installing a Binary Distribution
|
||||
|
||||
> **Updating to macOS 15 Sequoia**
|
||||
>
|
||||
> If you recently updated to macOS 15 Sequoia and are getting
|
||||
> ```console
|
||||
> error: the user '_nixbld1' in the group 'nixbld' does not exist
|
||||
> ```
|
||||
> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling.
|
||||
|
||||
To install the latest version Nix, run the following command:
|
||||
|
||||
```console
|
||||
|
|
|
@ -39,8 +39,6 @@
|
|||
`pkgconfig` and the Boehm garbage collector, and pass the flag
|
||||
`--enable-gc` to `configure`.
|
||||
|
||||
For `bdw-gc` <= 8.2.4 Nix needs a [small patch](https://github.com/NixOS/nix/blob/ac4d2e7b857acdfeac35ac8a592bdecee2d29838/boehmgc-traceable_allocator-public.diff) to be applied.
|
||||
|
||||
- The `boost` library of version 1.66.0 or higher. It can be obtained
|
||||
from the official web site <https://www.boost.org/>.
|
||||
|
||||
|
|
|
@ -43,6 +43,14 @@ which you may remove.
|
|||
|
||||
### macOS
|
||||
|
||||
> **Updating to macOS 15 Sequoia**
|
||||
>
|
||||
> If you recently updated to macOS 15 Sequoia and are getting
|
||||
> ```console
|
||||
> error: the user '_nixbld1' in the group 'nixbld' does not exist
|
||||
> ```
|
||||
> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling.
|
||||
|
||||
1. If system-wide shell initialisation files haven't been altered since installing Nix, use the backups made by the installer:
|
||||
|
||||
```console
|
||||
|
|
6
flake.lock
generated
6
flake.lock
generated
|
@ -79,11 +79,11 @@
|
|||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1721548954,
|
||||
"narHash": "sha256-7cCC8+Tdq1+3OPyc3+gVo9dzUNkNIQfwSDJ2HSi2u3o=",
|
||||
"lastModified": 1723688146,
|
||||
"narHash": "sha256-sqLwJcHYeWLOeP/XoLwAtYjr01TISlkOfz+NG82pbdg=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "63d37ccd2d178d54e7fb691d7ec76000740ea24a",
|
||||
"rev": "c3d4ac725177c030b1e289015989da2ad9d56af0",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
((NEW_NIX_FIRST_BUILD_UID=301))
|
||||
((NEW_NIX_FIRST_BUILD_UID=351))
|
||||
|
||||
id_available(){
|
||||
dscl . list /Users UniqueID | grep -E '\b'"$1"'\b' >/dev/null
|
||||
|
|
|
@ -4,7 +4,17 @@ set -eu
|
|||
set -o pipefail
|
||||
|
||||
# System specific settings
|
||||
export NIX_FIRST_BUILD_UID="${NIX_FIRST_BUILD_UID:-301}"
|
||||
# Notes:
|
||||
# - up to macOS Big Sur we used the same GID/UIDs as Linux (30000:30001-32)
|
||||
# - we changed UID to 301 because Big Sur updates failed into recovery mode
|
||||
# we're targeting the 200-400 UID range for role users mentioned in the
|
||||
# usage note for sysadminctl
|
||||
# - we changed UID to 351 because Sequoia now uses UIDs 300-304 for its own
|
||||
# daemon users
|
||||
# - we changed GID to 350 alongside above just because it hides the nixbld
|
||||
# group from the Users & Groups settings panel :)
|
||||
export NIX_FIRST_BUILD_UID="${NIX_FIRST_BUILD_UID:-351}"
|
||||
export NIX_BUILD_GROUP_ID="${NIX_BUILD_GROUP_ID:-350}"
|
||||
export NIX_BUILD_USER_NAME_TEMPLATE="_nixbld%d"
|
||||
|
||||
readonly NIX_DAEMON_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist
|
||||
|
|
|
@ -23,10 +23,10 @@ readonly RED='\033[31m'
|
|||
# installer allows overriding build user count to speed up installation
|
||||
# as creating each user takes non-trivial amount of time on macos
|
||||
readonly NIX_USER_COUNT=${NIX_USER_COUNT:-32}
|
||||
readonly NIX_BUILD_GROUP_ID="${NIX_BUILD_GROUP_ID:-30000}"
|
||||
readonly NIX_BUILD_GROUP_NAME="nixbld"
|
||||
# each system specific installer must set these:
|
||||
# NIX_FIRST_BUILD_UID
|
||||
# NIX_BUILD_GROUP_ID
|
||||
# NIX_BUILD_USER_NAME_TEMPLATE
|
||||
# Please don't change this. We don't support it, because the
|
||||
# default shell profile that comes with Nix doesn't support it.
|
||||
|
@ -530,9 +530,7 @@ It seems the build group $NIX_BUILD_GROUP_NAME already exists, but
|
|||
with the UID $primary_group_id. This script can't really handle
|
||||
that right now, so I'm going to give up.
|
||||
|
||||
You can fix this by editing this script and changing the
|
||||
NIX_BUILD_GROUP_ID variable near the top to from $NIX_BUILD_GROUP_ID
|
||||
to $primary_group_id and re-run.
|
||||
You can export NIX_BUILD_GROUP_ID=$primary_group_id and re-run.
|
||||
EOF
|
||||
else
|
||||
row " Exists" "Yes"
|
||||
|
|
|
@ -5,6 +5,7 @@ set -o pipefail
|
|||
|
||||
# System specific settings
|
||||
export NIX_FIRST_BUILD_UID="${NIX_FIRST_BUILD_UID:-30001}"
|
||||
export NIX_BUILD_GROUP_ID="${NIX_BUILD_GROUP_ID:-30000}"
|
||||
export NIX_BUILD_USER_NAME_TEMPLATE="nixbld%d"
|
||||
|
||||
readonly SERVICE_SRC=/lib/systemd/system/nix-daemon.service
|
||||
|
|
|
@ -170,7 +170,9 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s, const Path * bas
|
|||
{
|
||||
if (EvalSettings::isPseudoUrl(s)) {
|
||||
auto accessor = fetchers::downloadTarball(
|
||||
EvalSettings::resolvePseudoUrl(s)).accessor;
|
||||
state.store,
|
||||
state.fetchSettings,
|
||||
EvalSettings::resolvePseudoUrl(s));
|
||||
auto storePath = fetchToStore(*state.store, SourcePath(accessor), FetchMode::Copy);
|
||||
return state.rootPath(CanonPath(state.store->toRealPath(storePath)));
|
||||
}
|
||||
|
|
|
@ -32,122 +32,6 @@ static void * oomHandler(size_t requested)
|
|||
throw std::bad_alloc();
|
||||
}
|
||||
|
||||
class BoehmGCStackAllocator : public StackAllocator
|
||||
{
|
||||
boost::coroutines2::protected_fixedsize_stack stack{
|
||||
// We allocate 8 MB, the default max stack size on NixOS.
|
||||
// A smaller stack might be quicker to allocate but reduces the stack
|
||||
// depth available for source filter expressions etc.
|
||||
std::max(boost::context::stack_traits::default_size(), static_cast<std::size_t>(8 * 1024 * 1024))};
|
||||
|
||||
// This is specific to boost::coroutines2::protected_fixedsize_stack.
|
||||
// The stack protection page is included in sctx.size, so we have to
|
||||
// subtract one page size from the stack size.
|
||||
std::size_t pfss_usable_stack_size(boost::context::stack_context & sctx)
|
||||
{
|
||||
return sctx.size - boost::context::stack_traits::page_size();
|
||||
}
|
||||
|
||||
public:
|
||||
boost::context::stack_context allocate() override
|
||||
{
|
||||
auto sctx = stack.allocate();
|
||||
|
||||
// Stacks generally start at a high address and grow to lower addresses.
|
||||
// Architectures that do the opposite are rare; in fact so rare that
|
||||
// boost_routine does not implement it.
|
||||
// So we subtract the stack size.
|
||||
GC_add_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
|
||||
return sctx;
|
||||
}
|
||||
|
||||
void deallocate(boost::context::stack_context sctx) override
|
||||
{
|
||||
GC_remove_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
|
||||
stack.deallocate(sctx);
|
||||
}
|
||||
};
|
||||
|
||||
static BoehmGCStackAllocator boehmGCStackAllocator;
|
||||
|
||||
/**
|
||||
* When a thread goes into a coroutine, we lose its original sp until
|
||||
* control flow returns to the thread.
|
||||
* While in the coroutine, the sp points outside the thread stack,
|
||||
* so we can detect this and push the entire thread stack instead,
|
||||
* as an approximation.
|
||||
* The coroutine's stack is covered by `BoehmGCStackAllocator`.
|
||||
* This is not an optimal solution, because the garbage is scanned when a
|
||||
* coroutine is active, for both the coroutine and the original thread stack.
|
||||
* However, the implementation is quite lean, and usually we don't have active
|
||||
* coroutines during evaluation, so this is acceptable.
|
||||
*/
|
||||
void fixupBoehmStackPointer(void ** sp_ptr, void * _pthread_id)
|
||||
{
|
||||
void *& sp = *sp_ptr;
|
||||
auto pthread_id = reinterpret_cast<pthread_t>(_pthread_id);
|
||||
# ifndef __APPLE__
|
||||
pthread_attr_t pattr;
|
||||
# endif
|
||||
size_t osStackSize;
|
||||
// The low address of the stack, which grows down.
|
||||
void * osStackLimit;
|
||||
void * osStackBase;
|
||||
|
||||
# ifdef __APPLE__
|
||||
osStackSize = pthread_get_stacksize_np(pthread_id);
|
||||
osStackLimit = pthread_get_stackaddr_np(pthread_id);
|
||||
# else
|
||||
if (pthread_attr_init(&pattr)) {
|
||||
throw Error("fixupBoehmStackPointer: pthread_attr_init failed");
|
||||
}
|
||||
# ifdef HAVE_PTHREAD_GETATTR_NP
|
||||
if (pthread_getattr_np(pthread_id, &pattr)) {
|
||||
throw Error("fixupBoehmStackPointer: pthread_getattr_np failed");
|
||||
}
|
||||
# elif HAVE_PTHREAD_ATTR_GET_NP
|
||||
if (!pthread_attr_init(&pattr)) {
|
||||
throw Error("fixupBoehmStackPointer: pthread_attr_init failed");
|
||||
}
|
||||
if (!pthread_attr_get_np(pthread_id, &pattr)) {
|
||||
throw Error("fixupBoehmStackPointer: pthread_attr_get_np failed");
|
||||
}
|
||||
# else
|
||||
# error "Need one of `pthread_attr_get_np` or `pthread_getattr_np`"
|
||||
# endif
|
||||
if (pthread_attr_getstack(&pattr, &osStackLimit, &osStackSize)) {
|
||||
throw Error("fixupBoehmStackPointer: pthread_attr_getstack failed");
|
||||
}
|
||||
if (pthread_attr_destroy(&pattr)) {
|
||||
throw Error("fixupBoehmStackPointer: pthread_attr_destroy failed");
|
||||
}
|
||||
# endif
|
||||
osStackBase = (char *) osStackLimit + osStackSize;
|
||||
// NOTE: We assume the stack grows down, as it does on all architectures we support.
|
||||
// Architectures that grow the stack up are rare.
|
||||
if (sp >= osStackBase || sp < osStackLimit) { // sp is outside the os stack
|
||||
sp = osStackLimit;
|
||||
}
|
||||
}
|
||||
|
||||
/* Disable GC while this object lives. Used by CoroutineContext.
|
||||
*
|
||||
* Boehm keeps a count of GC_disable() and GC_enable() calls,
|
||||
* and only enables GC when the count matches.
|
||||
*/
|
||||
class BoehmDisableGC
|
||||
{
|
||||
public:
|
||||
BoehmDisableGC()
|
||||
{
|
||||
GC_disable();
|
||||
};
|
||||
~BoehmDisableGC()
|
||||
{
|
||||
GC_enable();
|
||||
};
|
||||
};
|
||||
|
||||
static inline void initGCReal()
|
||||
{
|
||||
/* Initialise the Boehm garbage collector. */
|
||||
|
@ -168,24 +52,6 @@ static inline void initGCReal()
|
|||
|
||||
GC_set_oom_fn(oomHandler);
|
||||
|
||||
StackAllocator::defaultAllocator = &boehmGCStackAllocator;
|
||||
|
||||
// TODO: Remove __APPLE__ condition.
|
||||
// Comment suggests an implementation that works on darwin and windows
|
||||
// https://github.com/ivmai/bdwgc/issues/362#issuecomment-1936672196
|
||||
# if GC_VERSION_MAJOR >= 8 && GC_VERSION_MINOR >= 2 && GC_VERSION_MICRO >= 4 && !defined(__APPLE__)
|
||||
GC_set_sp_corrector(&fixupBoehmStackPointer);
|
||||
|
||||
if (!GC_get_sp_corrector()) {
|
||||
printTalkative("BoehmGC on this platform does not support sp_corrector; will disable GC inside coroutines");
|
||||
/* Used to disable GC when entering coroutines on macOS */
|
||||
create_coro_gc_hook = []() -> std::shared_ptr<void> { return std::make_shared<BoehmDisableGC>(); };
|
||||
}
|
||||
# else
|
||||
# warning \
|
||||
"BoehmGC version does not support GC while coroutine exists. GC will be disabled inside coroutines. Consider updating bdw-gc to 8.2.4 or later."
|
||||
# endif
|
||||
|
||||
/* Set the initial heap size to something fairly big (25% of
|
||||
physical RAM, up to a maximum of 384 MiB) so that in most cases
|
||||
we don't need to garbage collect at all. (Collection has a
|
||||
|
|
|
@ -3083,7 +3083,9 @@ std::optional<std::string> EvalState::resolveLookupPathPath(const LookupPath::Pa
|
|||
if (EvalSettings::isPseudoUrl(value)) {
|
||||
try {
|
||||
auto accessor = fetchers::downloadTarball(
|
||||
EvalSettings::resolvePseudoUrl(value)).accessor;
|
||||
store,
|
||||
fetchSettings,
|
||||
EvalSettings::resolvePseudoUrl(value));
|
||||
auto storePath = fetchToStore(*store, SourcePath(accessor), FetchMode::Copy);
|
||||
return finish(store->toRealPath(storePath));
|
||||
} catch (Error & e) {
|
||||
|
|
|
@ -3136,7 +3136,11 @@ static void prim_zipAttrsWith(EvalState & state, const PosIdx pos, Value * * arg
|
|||
std::optional<ListBuilder> list;
|
||||
};
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
std::map<Symbol, Item, std::less<Symbol>, traceable_allocator<std::pair<const Symbol, Item>>> attrsSeen;
|
||||
#else
|
||||
std::map<Symbol, Item> attrsSeen;
|
||||
#endif
|
||||
|
||||
state.forceFunction(*args[0], pos, "while evaluating the first argument passed to builtins.zipAttrsWith");
|
||||
state.forceList(*args[1], pos, "while evaluating the second argument passed to builtins.zipAttrsWith");
|
||||
|
|
|
@ -495,7 +495,11 @@ static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v
|
|||
// https://github.com/NixOS/nix/issues/4313
|
||||
auto storePath =
|
||||
unpack
|
||||
? fetchToStore(*state.store, fetchers::downloadTarball(*url).accessor, FetchMode::Copy, name)
|
||||
? fetchToStore(
|
||||
*state.store,
|
||||
fetchers::downloadTarball(state.store, state.fetchSettings, *url),
|
||||
FetchMode::Copy,
|
||||
name)
|
||||
: fetchers::downloadFile(state.store, *url, name).storePath;
|
||||
|
||||
if (expectedHash) {
|
||||
|
|
|
@ -460,7 +460,13 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this<GitRepoImpl>
|
|||
std::string re = R"(Good "git" signature for \* with .* key SHA256:[)";
|
||||
for (const fetchers::PublicKey & k : publicKeys){
|
||||
// Calculate sha256 fingerprint from public key and escape the regex symbol '+' to match the key literally
|
||||
auto fingerprint = trim(hashString(HashAlgorithm::SHA256, base64Decode(k.key)).to_string(nix::HashFormat::Base64, false), "=");
|
||||
std::string keyDecoded;
|
||||
try {
|
||||
keyDecoded = base64Decode(k.key);
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, "while decoding public key '%s' used for git signature", k.key);
|
||||
}
|
||||
auto fingerprint = trim(hashString(HashAlgorithm::SHA256, keyDecoded).to_string(nix::HashFormat::Base64, false), "=");
|
||||
auto escaped_fingerprint = std::regex_replace(fingerprint, std::regex("\\+"), "\\+" );
|
||||
re += "(" + escaped_fingerprint + ")";
|
||||
}
|
||||
|
@ -601,12 +607,16 @@ struct GitSourceAccessor : SourceAccessor
|
|||
return readBlob(path, true);
|
||||
}
|
||||
|
||||
Hash getSubmoduleRev(const CanonPath & path)
|
||||
/**
|
||||
* If `path` exists and is a submodule, return its
|
||||
* revision. Otherwise return nothing.
|
||||
*/
|
||||
std::optional<Hash> getSubmoduleRev(const CanonPath & path)
|
||||
{
|
||||
auto entry = need(path);
|
||||
auto entry = lookup(path);
|
||||
|
||||
if (git_tree_entry_type(entry) != GIT_OBJECT_COMMIT)
|
||||
throw Error("'%s' is not a submodule", showPath(path));
|
||||
if (!entry || git_tree_entry_type(entry) != GIT_OBJECT_COMMIT)
|
||||
return std::nullopt;
|
||||
|
||||
return toHash(*git_tree_entry_id(entry));
|
||||
}
|
||||
|
@ -1074,8 +1084,10 @@ std::vector<std::tuple<GitRepoImpl::Submodule, Hash>> GitRepoImpl::getSubmodules
|
|||
auto rawAccessor = getRawAccessor(rev);
|
||||
|
||||
for (auto & submodule : parseSubmodules(pathTemp)) {
|
||||
auto rev = rawAccessor->getSubmoduleRev(submodule.path);
|
||||
result.push_back({std::move(submodule), rev});
|
||||
/* Filter out .gitmodules entries that don't exist or are not
|
||||
submodules. */
|
||||
if (auto rev = rawAccessor->getSubmoduleRev(submodule.path))
|
||||
result.push_back({std::move(submodule), *rev});
|
||||
}
|
||||
|
||||
return result;
|
||||
|
|
|
@ -584,9 +584,10 @@ struct GitInputScheme : InputScheme
|
|||
}
|
||||
|
||||
try {
|
||||
setWriteTime(localRefFile, now, now);
|
||||
if (!input.getRev())
|
||||
setWriteTime(localRefFile, now, now);
|
||||
} catch (Error & e) {
|
||||
warn("could not update mtime for file '%s': %s", localRefFile, e.msg());
|
||||
warn("could not update mtime for file '%s': %s", localRefFile, e.info().msg);
|
||||
}
|
||||
if (!originalRef && !storeCachedHead(repoInfo.url, ref))
|
||||
warn("could not update cached head '%s' for '%s'", ref, repoInfo.url);
|
||||
|
|
|
@ -102,7 +102,7 @@ DownloadFileResult downloadFile(
|
|||
};
|
||||
}
|
||||
|
||||
DownloadTarballResult downloadTarball(
|
||||
static DownloadTarballResult downloadTarball_(
|
||||
const std::string & url,
|
||||
const Headers & headers)
|
||||
{
|
||||
|
@ -202,6 +202,22 @@ DownloadTarballResult downloadTarball(
|
|||
return attrsToResult(infoAttrs);
|
||||
}
|
||||
|
||||
ref<SourceAccessor> downloadTarball(
|
||||
ref<Store> store,
|
||||
const Settings & settings,
|
||||
const std::string & url)
|
||||
{
|
||||
/* Go through Input::getAccessor() to ensure that the resulting
|
||||
accessor has a fingerprint. */
|
||||
fetchers::Attrs attrs;
|
||||
attrs.insert_or_assign("type", "tarball");
|
||||
attrs.insert_or_assign("url", url);
|
||||
|
||||
auto input = Input::fromAttrs(settings, std::move(attrs));
|
||||
|
||||
return input.getAccessor(store).first;
|
||||
}
|
||||
|
||||
// An input scheme corresponding to a curl-downloadable resource.
|
||||
struct CurlInputScheme : InputScheme
|
||||
{
|
||||
|
@ -353,7 +369,7 @@ struct TarballInputScheme : CurlInputScheme
|
|||
{
|
||||
auto input(_input);
|
||||
|
||||
auto result = downloadTarball(getStrAttr(input.attrs, "url"), {});
|
||||
auto result = downloadTarball_(getStrAttr(input.attrs, "url"), {});
|
||||
|
||||
result.accessor->setPathDisplay("«" + input.to_string() + "»");
|
||||
|
||||
|
|
|
@ -14,6 +14,8 @@ struct SourceAccessor;
|
|||
|
||||
namespace nix::fetchers {
|
||||
|
||||
struct Settings;
|
||||
|
||||
struct DownloadFileResult
|
||||
{
|
||||
StorePath storePath;
|
||||
|
@ -40,8 +42,9 @@ struct DownloadTarballResult
|
|||
* Download and import a tarball into the Git cache. The result is the
|
||||
* Git tree hash of the root directory.
|
||||
*/
|
||||
DownloadTarballResult downloadTarball(
|
||||
const std::string & url,
|
||||
const Headers & headers = {});
|
||||
ref<SourceAccessor> downloadTarball(
|
||||
ref<Store> store,
|
||||
const Settings & settings,
|
||||
const std::string & url);
|
||||
|
||||
}
|
||||
|
|
|
@ -183,7 +183,7 @@ Goal::Co PathSubstitutionGoal::tryToRun(StorePath subPath, nix::ref<Store> sub,
|
|||
/* Make sure that we are allowed to start a substitution. Note that even
|
||||
if maxSubstitutionJobs == 0, we still allow a substituter to run. This
|
||||
prevents infinite waiting. */
|
||||
if (worker.getNrSubstitutions() >= std::max(1U, (unsigned int) settings.maxSubstitutionJobs)) {
|
||||
while (worker.getNrSubstitutions() >= std::max(1U, (unsigned int) settings.maxSubstitutionJobs)) {
|
||||
worker.waitForBuildSlot(shared_from_this());
|
||||
co_await Suspend{};
|
||||
}
|
||||
|
|
|
@ -184,13 +184,13 @@ void Worker::wakeUp(GoalPtr goal)
|
|||
}
|
||||
|
||||
|
||||
unsigned Worker::getNrLocalBuilds()
|
||||
size_t Worker::getNrLocalBuilds()
|
||||
{
|
||||
return nrLocalBuilds;
|
||||
}
|
||||
|
||||
|
||||
unsigned Worker::getNrSubstitutions()
|
||||
size_t Worker::getNrSubstitutions()
|
||||
{
|
||||
return nrSubstitutions;
|
||||
}
|
||||
|
|
|
@ -92,12 +92,12 @@ private:
|
|||
* Number of build slots occupied. This includes local builds but does not
|
||||
* include substitutions or remote builds via the build hook.
|
||||
*/
|
||||
unsigned int nrLocalBuilds;
|
||||
size_t nrLocalBuilds;
|
||||
|
||||
/**
|
||||
* Number of substitution slots occupied.
|
||||
*/
|
||||
unsigned int nrSubstitutions;
|
||||
size_t nrSubstitutions;
|
||||
|
||||
/**
|
||||
* Maps used to prevent multiple instantiations of a goal for the
|
||||
|
@ -235,12 +235,12 @@ public:
|
|||
* Return the number of local build processes currently running (but not
|
||||
* remote builds via the build hook).
|
||||
*/
|
||||
unsigned int getNrLocalBuilds();
|
||||
size_t getNrLocalBuilds();
|
||||
|
||||
/**
|
||||
* Return the number of substitution processes currently running.
|
||||
*/
|
||||
unsigned int getNrSubstitutions();
|
||||
size_t getNrSubstitutions();
|
||||
|
||||
/**
|
||||
* Registers a running child process. `inBuildSlot` means that
|
||||
|
|
|
@ -38,10 +38,7 @@ void builtinFetchurl(
|
|||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
|
||||
/* No need to do TLS verification, because we check the hash of
|
||||
the result anyway. */
|
||||
FileTransferRequest request(url);
|
||||
request.verifyTLS = false;
|
||||
request.decompress = false;
|
||||
|
||||
auto decompressor = makeDecompressionSink(
|
||||
|
|
|
@ -3,31 +3,53 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
namespace fs { using namespace std::filesystem; }
|
||||
|
||||
void builtinUnpackChannel(
|
||||
const BasicDerivation & drv,
|
||||
const std::map<std::string, Path> & outputs)
|
||||
{
|
||||
auto getAttr = [&](const std::string & name) {
|
||||
auto getAttr = [&](const std::string & name) -> const std::string & {
|
||||
auto i = drv.env.find(name);
|
||||
if (i == drv.env.end()) throw Error("attribute '%s' missing", name);
|
||||
return i->second;
|
||||
};
|
||||
|
||||
auto out = outputs.at("out");
|
||||
auto channelName = getAttr("channelName");
|
||||
auto src = getAttr("src");
|
||||
fs::path out{outputs.at("out")};
|
||||
auto & channelName = getAttr("channelName");
|
||||
auto & src = getAttr("src");
|
||||
|
||||
createDirs(out);
|
||||
if (fs::path{channelName}.filename().string() != channelName) {
|
||||
throw Error("channelName is not allowed to contain filesystem seperators, got %1%", channelName);
|
||||
}
|
||||
|
||||
try {
|
||||
fs::create_directories(out);
|
||||
} catch (fs::filesystem_error &) {
|
||||
throw SysError("creating directory '%1%'", out.string());
|
||||
}
|
||||
|
||||
unpackTarfile(src, out);
|
||||
|
||||
auto entries = std::filesystem::directory_iterator{out};
|
||||
auto fileName = entries->path().string();
|
||||
auto fileCount = std::distance(std::filesystem::begin(entries), std::filesystem::end(entries));
|
||||
size_t fileCount;
|
||||
std::string fileName;
|
||||
try {
|
||||
auto entries = fs::directory_iterator{out};
|
||||
fileName = entries->path().string();
|
||||
fileCount = std::distance(fs::begin(entries), fs::end(entries));
|
||||
} catch (fs::filesystem_error &) {
|
||||
throw SysError("failed to read directory %1%", out.string());
|
||||
}
|
||||
|
||||
if (fileCount != 1)
|
||||
throw Error("channel tarball '%s' contains more than one file", src);
|
||||
std::filesystem::rename(fileName, (out + "/" + channelName));
|
||||
|
||||
auto target = out / channelName;
|
||||
try {
|
||||
fs::rename(fileName, target);
|
||||
} catch (fs::filesystem_error &) {
|
||||
throw SysError("failed to rename %1% to %2%", fileName, target.string());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -169,28 +169,29 @@ protected:
|
|||
{
|
||||
try {
|
||||
checkEnabled();
|
||||
|
||||
auto request(makeRequest(path));
|
||||
|
||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
getFileTransfer()->enqueueFileTransfer(request,
|
||||
{[callbackPtr, this](std::future<FileTransferResult> result) {
|
||||
try {
|
||||
(*callbackPtr)(std::move(result.get().data));
|
||||
} catch (FileTransferError & e) {
|
||||
if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden)
|
||||
return (*callbackPtr)({});
|
||||
maybeDisable();
|
||||
callbackPtr->rethrow();
|
||||
} catch (...) {
|
||||
callbackPtr->rethrow();
|
||||
}
|
||||
}});
|
||||
|
||||
} catch (...) {
|
||||
callback.rethrow();
|
||||
return;
|
||||
}
|
||||
|
||||
auto request(makeRequest(path));
|
||||
|
||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
getFileTransfer()->enqueueFileTransfer(request,
|
||||
{[callbackPtr, this](std::future<FileTransferResult> result) {
|
||||
try {
|
||||
(*callbackPtr)(std::move(result.get().data));
|
||||
} catch (FileTransferError & e) {
|
||||
if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden)
|
||||
return (*callbackPtr)({});
|
||||
maybeDisable();
|
||||
callbackPtr->rethrow();
|
||||
} catch (...) {
|
||||
callbackPtr->rethrow();
|
||||
}
|
||||
}});
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -159,8 +159,9 @@ static Machine parseBuilderLine(const std::set<std::string> & defaultSystems, co
|
|||
const auto & str = tokens[fieldIndex];
|
||||
try {
|
||||
base64Decode(str);
|
||||
} catch (const Error & e) {
|
||||
throw FormatError("bad machine specification: a column #%lu in a row: '%s' is not valid base64 string: %s", fieldIndex, line, e.what());
|
||||
} catch (FormatError & e) {
|
||||
e.addTrace({}, "while parsing machine specification at a column #%lu in a row: '%s'", fieldIndex, line);
|
||||
throw;
|
||||
}
|
||||
return str;
|
||||
};
|
||||
|
|
|
@ -6,6 +6,16 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
static std::string parsePublicHostKey(std::string_view host, std::string_view sshPublicHostKey)
|
||||
{
|
||||
try {
|
||||
return base64Decode(sshPublicHostKey);
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, "while decoding ssh public host key for host '%s'", host);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
SSHMaster::SSHMaster(
|
||||
std::string_view host,
|
||||
std::string_view keyFile,
|
||||
|
@ -14,7 +24,7 @@ SSHMaster::SSHMaster(
|
|||
: host(host)
|
||||
, fakeSSH(host == "localhost")
|
||||
, keyFile(keyFile)
|
||||
, sshPublicHostKey(sshPublicHostKey)
|
||||
, sshPublicHostKey(parsePublicHostKey(host, sshPublicHostKey))
|
||||
, useMaster(useMaster && !fakeSSH)
|
||||
, compress(compress)
|
||||
, logFD(logFD)
|
||||
|
@ -38,7 +48,7 @@ void SSHMaster::addCommonSSHOpts(Strings & args)
|
|||
std::filesystem::path fileName = state->tmpDir->path() / "host-key";
|
||||
auto p = host.rfind("@");
|
||||
std::string thost = p != std::string::npos ? std::string(host, p + 1) : host;
|
||||
writeFile(fileName.string(), thost + " " + base64Decode(sshPublicHostKey) + "\n");
|
||||
writeFile(fileName.string(), thost + " " + sshPublicHostKey + "\n");
|
||||
args.insert(args.end(), {"-oUserKnownHostsFile=" + fileName.string()});
|
||||
}
|
||||
if (compress)
|
||||
|
|
|
@ -14,6 +14,9 @@ private:
|
|||
const std::string host;
|
||||
bool fakeSSH;
|
||||
const std::string keyFile;
|
||||
/**
|
||||
* Raw bytes, not Base64 encoding.
|
||||
*/
|
||||
const std::string sshPublicHostKey;
|
||||
const bool useMaster;
|
||||
const bool compress;
|
||||
|
|
|
@ -210,14 +210,16 @@ StorePath Store::addToStore(
|
|||
fsm = FileSerialisationMethod::NixArchive;
|
||||
break;
|
||||
}
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
dumpPath(path, sink, fsm, filter);
|
||||
std::optional<StorePath> storePath;
|
||||
auto sink = sourceToSink([&](Source & source) {
|
||||
LengthSource lengthSource(source);
|
||||
storePath = addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair);
|
||||
if (lengthSource.total >= settings.warnLargePathThreshold)
|
||||
warn("copied large path '%s' to the store (%s)", path, renderSize(lengthSource.total));
|
||||
});
|
||||
LengthSource lengthSource(*source);
|
||||
auto storePath = addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair);
|
||||
if (lengthSource.total >= settings.warnLargePathThreshold)
|
||||
warn("copied large path '%s' to the store (%s)", path, renderSize(lengthSource.total));
|
||||
return storePath;
|
||||
dumpPath(path, *sink, fsm, filter);
|
||||
sink->finish();
|
||||
return storePath.value();
|
||||
}
|
||||
|
||||
void Store::addMultipleToStore(
|
||||
|
|
|
@ -3000,6 +3000,7 @@ void LocalDerivationGoal::deleteTmpDir(bool force)
|
|||
might have privileged stuff (like a copy of netrc). */
|
||||
if (settings.keepFailed && !force && !drv->isBuiltin()) {
|
||||
printError("note: keeping build directory '%s'", tmpDir);
|
||||
chmod(topTmpDir.c_str(), 0755);
|
||||
chmod(tmpDir.c_str(), 0755);
|
||||
}
|
||||
else
|
||||
|
|
|
@ -49,6 +49,7 @@ R""(
|
|||
(if (param "_ALLOW_LOCAL_NETWORKING")
|
||||
(begin
|
||||
(allow network* (remote ip "localhost:*"))
|
||||
(allow network-inbound (local ip "*:*")) ; required to bind and listen
|
||||
|
||||
; Allow access to /etc/resolv.conf (which is a symlink to
|
||||
; /private/var/run/resolv.conf).
|
||||
|
|
|
@ -23,7 +23,7 @@ struct ArchiveSettings : Config
|
|||
false,
|
||||
#endif
|
||||
"use-case-hack",
|
||||
"Whether to enable a Darwin-specific hack for dealing with file name collisions."};
|
||||
"Whether to enable a macOS-specific hack for dealing with file name case collisions."};
|
||||
};
|
||||
|
||||
static ArchiveSettings archiveSettings;
|
||||
|
@ -214,11 +214,13 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath
|
|||
else if (t == "directory") {
|
||||
sink.createDirectory(path);
|
||||
|
||||
std::string prevName;
|
||||
|
||||
while (1) {
|
||||
s = getString();
|
||||
|
||||
if (s == "entry") {
|
||||
std::string name, prevName;
|
||||
std::string name;
|
||||
|
||||
s = getString();
|
||||
if (s != "(") throw badArchive("expected open tag");
|
||||
|
@ -241,6 +243,9 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath
|
|||
debug("case collision between '%1%' and '%2%'", i->first, name);
|
||||
name += caseHackSuffix;
|
||||
name += std::to_string(++i->second);
|
||||
auto j = names.find(name);
|
||||
if (j != names.end())
|
||||
throw Error("NAR contains file name '%s' that collides with case-hacked file name '%s'", prevName, j->first);
|
||||
} else
|
||||
names[name] = 0;
|
||||
}
|
||||
|
|
|
@ -68,10 +68,19 @@ static RestoreSinkSettings restoreSinkSettings;
|
|||
|
||||
static GlobalConfig::Register r1(&restoreSinkSettings);
|
||||
|
||||
static std::filesystem::path append(const std::filesystem::path & src, const CanonPath & path)
|
||||
{
|
||||
auto dst = src;
|
||||
if (!path.rel().empty())
|
||||
dst /= path.rel();
|
||||
return dst;
|
||||
}
|
||||
|
||||
void RestoreSink::createDirectory(const CanonPath & path)
|
||||
{
|
||||
std::filesystem::create_directory(dstPath / path.rel());
|
||||
auto p = append(dstPath, path);
|
||||
if (!std::filesystem::create_directory(p))
|
||||
throw Error("path '%s' already exists", p.string());
|
||||
};
|
||||
|
||||
struct RestoreRegularFile : CreateRegularFileSink {
|
||||
|
@ -82,14 +91,6 @@ struct RestoreRegularFile : CreateRegularFileSink {
|
|||
void preallocateContents(uint64_t size) override;
|
||||
};
|
||||
|
||||
static std::filesystem::path append(const std::filesystem::path & src, const CanonPath & path)
|
||||
{
|
||||
auto dst = src;
|
||||
if (!path.rel().empty())
|
||||
dst /= path.rel();
|
||||
return dst;
|
||||
}
|
||||
|
||||
void RestoreSink::createRegularFile(const CanonPath & path, std::function<void(CreateRegularFileSink &)> func)
|
||||
{
|
||||
auto p = append(dstPath, path);
|
||||
|
|
|
@ -245,7 +245,12 @@ Hash::Hash(std::string_view rest, HashAlgorithm algo, bool isSRI)
|
|||
}
|
||||
|
||||
else if (isSRI || rest.size() == base64Len()) {
|
||||
auto d = base64Decode(rest);
|
||||
std::string d;
|
||||
try {
|
||||
d = base64Decode(rest);
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, "While decoding hash '%s'", rest);
|
||||
}
|
||||
if (d.size() != hashSize)
|
||||
throw BadHash("invalid %s hash '%s'", isSRI ? "SRI" : "base-64", rest);
|
||||
assert(hashSize);
|
||||
|
|
|
@ -132,23 +132,24 @@ SourceAccessor::DirEntries PosixSourceAccessor::readDirectory(const CanonPath &
|
|||
{
|
||||
assertNoSymlinks(path);
|
||||
DirEntries res;
|
||||
for (auto & entry : std::filesystem::directory_iterator{makeAbsPath(path)}) {
|
||||
checkInterrupt();
|
||||
auto type = [&]() -> std::optional<Type> {
|
||||
std::filesystem::file_type nativeType;
|
||||
try {
|
||||
nativeType = entry.symlink_status().type();
|
||||
} catch (std::filesystem::filesystem_error & e) {
|
||||
// We cannot always stat the child. (Ideally there is no
|
||||
// stat because the native directory entry has the type
|
||||
// already, but this isn't always the case.)
|
||||
if (e.code() == std::errc::permission_denied || e.code() == std::errc::operation_not_permitted)
|
||||
return std::nullopt;
|
||||
else throw;
|
||||
}
|
||||
try {
|
||||
for (auto & entry : std::filesystem::directory_iterator{makeAbsPath(path)}) {
|
||||
checkInterrupt();
|
||||
auto type = [&]() -> std::optional<Type> {
|
||||
std::filesystem::file_type nativeType;
|
||||
try {
|
||||
nativeType = entry.symlink_status().type();
|
||||
} catch (std::filesystem::filesystem_error & e) {
|
||||
// We cannot always stat the child. (Ideally there is no
|
||||
// stat because the native directory entry has the type
|
||||
// already, but this isn't always the case.)
|
||||
if (e.code() == std::errc::permission_denied || e.code() == std::errc::operation_not_permitted)
|
||||
return std::nullopt;
|
||||
else throw;
|
||||
}
|
||||
|
||||
// cannot exhaustively enumerate because implementation-specific
|
||||
// additional file types are allowed.
|
||||
// cannot exhaustively enumerate because implementation-specific
|
||||
// additional file types are allowed.
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wswitch-enum"
|
||||
switch (nativeType) {
|
||||
|
@ -158,8 +159,11 @@ SourceAccessor::DirEntries PosixSourceAccessor::readDirectory(const CanonPath &
|
|||
default: return tMisc;
|
||||
}
|
||||
#pragma GCC diagnostic pop
|
||||
}();
|
||||
res.emplace(entry.path().filename().string(), type);
|
||||
}();
|
||||
res.emplace(entry.path().filename().string(), type);
|
||||
}
|
||||
} catch (std::filesystem::filesystem_error & e) {
|
||||
throw SysError("reading directory %1%", showPath(path));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -171,55 +171,6 @@ size_t StringSource::read(char * data, size_t len)
|
|||
#error Coroutines are broken in this version of Boost!
|
||||
#endif
|
||||
|
||||
/* A concrete datatype allow virtual dispatch of stack allocation methods. */
|
||||
struct VirtualStackAllocator {
|
||||
StackAllocator *allocator = StackAllocator::defaultAllocator;
|
||||
|
||||
boost::context::stack_context allocate() {
|
||||
return allocator->allocate();
|
||||
}
|
||||
|
||||
void deallocate(boost::context::stack_context sctx) {
|
||||
allocator->deallocate(sctx);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/* This class reifies the default boost coroutine stack allocation strategy with
|
||||
a virtual interface. */
|
||||
class DefaultStackAllocator : public StackAllocator {
|
||||
boost::coroutines2::default_stack stack;
|
||||
|
||||
boost::context::stack_context allocate() override {
|
||||
return stack.allocate();
|
||||
}
|
||||
|
||||
void deallocate(boost::context::stack_context sctx) override {
|
||||
stack.deallocate(sctx);
|
||||
}
|
||||
};
|
||||
|
||||
static DefaultStackAllocator defaultAllocatorSingleton;
|
||||
|
||||
StackAllocator *StackAllocator::defaultAllocator = &defaultAllocatorSingleton;
|
||||
|
||||
|
||||
std::shared_ptr<void> (*create_coro_gc_hook)() = []() -> std::shared_ptr<void> {
|
||||
return {};
|
||||
};
|
||||
|
||||
/* This class is used for entry and exit hooks on coroutines */
|
||||
class CoroutineContext {
|
||||
/* Disable GC when entering the coroutine without the boehm patch,
|
||||
* since it doesn't find the main thread stack in this case.
|
||||
* std::shared_ptr<void> performs type-erasure, so it will call the right
|
||||
* deleter. */
|
||||
const std::shared_ptr<void> coro_gc_hook = create_coro_gc_hook();
|
||||
public:
|
||||
CoroutineContext() {};
|
||||
~CoroutineContext() {};
|
||||
};
|
||||
|
||||
std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
|
||||
{
|
||||
struct SourceToSink : FinishSink
|
||||
|
@ -241,14 +192,12 @@ std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
|
|||
cur = in;
|
||||
|
||||
if (!coro) {
|
||||
CoroutineContext ctx;
|
||||
coro = coro_t::push_type(VirtualStackAllocator{}, [&](coro_t::pull_type & yield) {
|
||||
LambdaSource source([&](char *out, size_t out_len) {
|
||||
coro = coro_t::push_type([&](coro_t::pull_type & yield) {
|
||||
LambdaSource source([&](char * out, size_t out_len) {
|
||||
if (cur.empty()) {
|
||||
yield();
|
||||
if (yield.get()) {
|
||||
return (size_t)0;
|
||||
}
|
||||
if (yield.get())
|
||||
throw EndOfFile("coroutine has finished");
|
||||
}
|
||||
|
||||
size_t n = std::min(cur.size(), out_len);
|
||||
|
@ -263,20 +212,14 @@ std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
|
|||
if (!*coro) { unreachable(); }
|
||||
|
||||
if (!cur.empty()) {
|
||||
CoroutineContext ctx;
|
||||
(*coro)(false);
|
||||
}
|
||||
}
|
||||
|
||||
void finish() override
|
||||
{
|
||||
if (!coro) return;
|
||||
if (!*coro) unreachable();
|
||||
{
|
||||
CoroutineContext ctx;
|
||||
if (coro && *coro)
|
||||
(*coro)(true);
|
||||
}
|
||||
if (*coro) unreachable();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -307,8 +250,7 @@ std::unique_ptr<Source> sinkToSource(
|
|||
size_t read(char * data, size_t len) override
|
||||
{
|
||||
if (!coro) {
|
||||
CoroutineContext ctx;
|
||||
coro = coro_t::pull_type(VirtualStackAllocator{}, [&](coro_t::push_type & yield) {
|
||||
coro = coro_t::pull_type([&](coro_t::push_type & yield) {
|
||||
LambdaSink sink([&](std::string_view data) {
|
||||
if (!data.empty()) yield(std::string(data));
|
||||
});
|
||||
|
@ -320,7 +262,6 @@ std::unique_ptr<Source> sinkToSource(
|
|||
|
||||
if (pos == cur.size()) {
|
||||
if (!cur.empty()) {
|
||||
CoroutineContext ctx;
|
||||
(*coro)();
|
||||
}
|
||||
cur = coro->get();
|
||||
|
|
|
@ -557,27 +557,4 @@ struct FramedSink : nix::BufferedSink
|
|||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Stack allocation strategy for sinkToSource.
|
||||
* Mutable to avoid a boehm gc dependency in libutil.
|
||||
*
|
||||
* boost::context doesn't provide a virtual class, so we define our own.
|
||||
*/
|
||||
struct StackAllocator {
|
||||
virtual boost::context::stack_context allocate() = 0;
|
||||
virtual void deallocate(boost::context::stack_context sctx) = 0;
|
||||
|
||||
/**
|
||||
* The stack allocator to use in sinkToSource and potentially elsewhere.
|
||||
* It is reassigned by the initGC() method in libexpr.
|
||||
*/
|
||||
static StackAllocator *defaultAllocator;
|
||||
};
|
||||
|
||||
/* Disabling GC when entering a coroutine (without the boehm patch).
|
||||
mutable to avoid boehm gc dependency in libutil.
|
||||
*/
|
||||
extern std::shared_ptr<void> (*create_coro_gc_hook)();
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -14,17 +14,25 @@ BorrowedCryptoValue BorrowedCryptoValue::parse(std::string_view s)
|
|||
return {s.substr(0, colon), s.substr(colon + 1)};
|
||||
}
|
||||
|
||||
Key::Key(std::string_view s)
|
||||
Key::Key(std::string_view s, bool sensitiveValue)
|
||||
{
|
||||
auto ss = BorrowedCryptoValue::parse(s);
|
||||
|
||||
name = ss.name;
|
||||
key = ss.payload;
|
||||
|
||||
if (name == "" || key == "")
|
||||
throw Error("secret key is corrupt");
|
||||
try {
|
||||
if (name == "" || key == "")
|
||||
throw FormatError("key is corrupt");
|
||||
|
||||
key = base64Decode(key);
|
||||
key = base64Decode(key);
|
||||
} catch (Error & e) {
|
||||
std::string extra;
|
||||
if (!sensitiveValue)
|
||||
extra = fmt(" with raw value '%s'", key);
|
||||
e.addTrace({}, "while decoding key named '%s'%s", name, extra);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
std::string Key::to_string() const
|
||||
|
@ -33,7 +41,7 @@ std::string Key::to_string() const
|
|||
}
|
||||
|
||||
SecretKey::SecretKey(std::string_view s)
|
||||
: Key(s)
|
||||
: Key{s, true}
|
||||
{
|
||||
if (key.size() != crypto_sign_SECRETKEYBYTES)
|
||||
throw Error("secret key is not valid");
|
||||
|
@ -66,7 +74,7 @@ SecretKey SecretKey::generate(std::string_view name)
|
|||
}
|
||||
|
||||
PublicKey::PublicKey(std::string_view s)
|
||||
: Key(s)
|
||||
: Key{s, false}
|
||||
{
|
||||
if (key.size() != crypto_sign_PUBLICKEYBYTES)
|
||||
throw Error("public key is not valid");
|
||||
|
@ -83,7 +91,12 @@ bool PublicKey::verifyDetached(std::string_view data, std::string_view sig) cons
|
|||
|
||||
bool PublicKey::verifyDetachedAnon(std::string_view data, std::string_view sig) const
|
||||
{
|
||||
auto sig2 = base64Decode(sig);
|
||||
std::string sig2;
|
||||
try {
|
||||
sig2 = base64Decode(sig);
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, "while decoding signature '%s'", sig);
|
||||
}
|
||||
if (sig2.size() != crypto_sign_BYTES)
|
||||
throw Error("signature is not valid");
|
||||
|
||||
|
|
|
@ -31,15 +31,19 @@ struct Key
|
|||
std::string name;
|
||||
std::string key;
|
||||
|
||||
/**
|
||||
* Construct Key from a string in the format
|
||||
* ‘<name>:<key-in-base64>’.
|
||||
*/
|
||||
Key(std::string_view s);
|
||||
|
||||
std::string to_string() const;
|
||||
|
||||
protected:
|
||||
|
||||
/**
|
||||
* Construct Key from a string in the format
|
||||
* ‘<name>:<key-in-base64>’.
|
||||
*
|
||||
* @param sensitiveValue Avoid displaying the raw Base64 in error
|
||||
* messages to avoid leaking private keys.
|
||||
*/
|
||||
Key(std::string_view s, bool sensitiveValue);
|
||||
|
||||
Key(std::string_view name, std::string && key)
|
||||
: name(name), key(std::move(key)) { }
|
||||
};
|
||||
|
|
|
@ -8,6 +8,10 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
namespace fs {
|
||||
using namespace std::filesystem;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
int callback_open(struct archive *, void * self)
|
||||
|
@ -102,14 +106,14 @@ TarArchive::TarArchive(Source & source, bool raw, std::optional<std::string> com
|
|||
"Failed to open archive (%s)");
|
||||
}
|
||||
|
||||
TarArchive::TarArchive(const Path & path)
|
||||
TarArchive::TarArchive(const fs::path & path)
|
||||
: archive{archive_read_new()}
|
||||
, buffer(defaultBufferSize)
|
||||
{
|
||||
archive_read_support_filter_all(archive);
|
||||
enableSupportedFormats(archive);
|
||||
archive_read_set_option(archive, NULL, "mac-ext", NULL);
|
||||
check(archive_read_open_filename(archive, path.c_str(), 16384), "failed to open archive: %s");
|
||||
check(archive_read_open_filename(archive, path.string().c_str(), 16384), "failed to open archive: %s");
|
||||
}
|
||||
|
||||
void TarArchive::close()
|
||||
|
@ -123,7 +127,7 @@ TarArchive::~TarArchive()
|
|||
archive_read_free(this->archive);
|
||||
}
|
||||
|
||||
static void extract_archive(TarArchive & archive, const Path & destDir)
|
||||
static void extract_archive(TarArchive & archive, const fs::path & destDir)
|
||||
{
|
||||
int flags = ARCHIVE_EXTRACT_TIME | ARCHIVE_EXTRACT_SECURE_SYMLINKS | ARCHIVE_EXTRACT_SECURE_NODOTDOT;
|
||||
|
||||
|
@ -140,7 +144,7 @@ static void extract_archive(TarArchive & archive, const Path & destDir)
|
|||
else
|
||||
archive.check(r);
|
||||
|
||||
archive_entry_copy_pathname(entry, (destDir + "/" + name).c_str());
|
||||
archive_entry_copy_pathname(entry, (destDir / name).string().c_str());
|
||||
|
||||
// sources can and do contain dirs with no rx bits
|
||||
if (archive_entry_filetype(entry) == AE_IFDIR && (archive_entry_mode(entry) & 0500) != 0500)
|
||||
|
@ -149,7 +153,7 @@ static void extract_archive(TarArchive & archive, const Path & destDir)
|
|||
// Patch hardlink path
|
||||
const char * original_hardlink = archive_entry_hardlink(entry);
|
||||
if (original_hardlink) {
|
||||
archive_entry_copy_hardlink(entry, (destDir + "/" + original_hardlink).c_str());
|
||||
archive_entry_copy_hardlink(entry, (destDir / original_hardlink).string().c_str());
|
||||
}
|
||||
|
||||
archive.check(archive_read_extract(archive.archive, entry, flags));
|
||||
|
@ -158,19 +162,19 @@ static void extract_archive(TarArchive & archive, const Path & destDir)
|
|||
archive.close();
|
||||
}
|
||||
|
||||
void unpackTarfile(Source & source, const Path & destDir)
|
||||
void unpackTarfile(Source & source, const fs::path & destDir)
|
||||
{
|
||||
auto archive = TarArchive(source);
|
||||
|
||||
createDirs(destDir);
|
||||
fs::create_directories(destDir);
|
||||
extract_archive(archive, destDir);
|
||||
}
|
||||
|
||||
void unpackTarfile(const Path & tarFile, const Path & destDir)
|
||||
void unpackTarfile(const fs::path & tarFile, const fs::path & destDir)
|
||||
{
|
||||
auto archive = TarArchive(tarFile);
|
||||
|
||||
createDirs(destDir);
|
||||
fs::create_directories(destDir);
|
||||
extract_archive(archive, destDir);
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ struct TarArchive
|
|||
|
||||
void check(int err, const std::string & reason = "failed to extract archive (%s)");
|
||||
|
||||
explicit TarArchive(const Path & path);
|
||||
explicit TarArchive(const std::filesystem::path & path);
|
||||
|
||||
/// @brief Create a generic archive from source.
|
||||
/// @param source - Input byte stream.
|
||||
|
@ -37,9 +37,9 @@ struct TarArchive
|
|||
|
||||
int getArchiveFilterCodeByName(const std::string & method);
|
||||
|
||||
void unpackTarfile(Source & source, const Path & destDir);
|
||||
void unpackTarfile(Source & source, const std::filesystem::path & destDir);
|
||||
|
||||
void unpackTarfile(const Path & tarFile, const Path & destDir);
|
||||
void unpackTarfile(const std::filesystem::path & tarFile, const std::filesystem::path & destDir);
|
||||
|
||||
time_t unpackTarfileToSink(TarArchive & archive, ExtendedFileSystemObjectSink & parseSink);
|
||||
|
||||
|
|
|
@ -261,7 +261,7 @@ std::string base64Decode(std::string_view s)
|
|||
|
||||
char digit = base64DecodeChars[(unsigned char) c];
|
||||
if (digit == npos)
|
||||
throw Error("invalid character in Base64 string: '%c'", c);
|
||||
throw FormatError("invalid character in Base64 string: '%c'", c);
|
||||
|
||||
bits += 6;
|
||||
d = d << 6 | digit;
|
||||
|
|
|
@ -210,9 +210,13 @@ constexpr char treeNull[] = " ";
|
|||
|
||||
|
||||
/**
|
||||
* Base64 encoding/decoding.
|
||||
* Encode arbitrary bytes as Base64.
|
||||
*/
|
||||
std::string base64Encode(std::string_view s);
|
||||
|
||||
/**
|
||||
* Decode arbitrary bytes to Base64.
|
||||
*/
|
||||
std::string base64Decode(std::string_view s);
|
||||
|
||||
|
||||
|
|
|
@ -163,7 +163,7 @@ static void main_nix_build(int argc, char * * argv)
|
|||
script = argv[1];
|
||||
try {
|
||||
auto lines = tokenizeString<Strings>(readFile(script), "\n");
|
||||
if (std::regex_search(lines.front(), std::regex("^#!"))) {
|
||||
if (!lines.empty() && std::regex_search(lines.front(), std::regex("^#!"))) {
|
||||
lines.pop_front();
|
||||
inShebang = true;
|
||||
for (int i = 2; i < argc; ++i)
|
||||
|
|
BIN
tests/functional/case-collision.nar
Normal file
BIN
tests/functional/case-collision.nar
Normal file
Binary file not shown.
|
@ -1,24 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
source common.sh
|
||||
|
||||
TODO_NixOS
|
||||
|
||||
clearStore
|
||||
|
||||
rm -rf "$TEST_ROOT/case"
|
||||
|
||||
opts=("--option" "use-case-hack" "true")
|
||||
|
||||
# Check whether restoring and dumping a NAR that contains case
|
||||
# collisions is round-tripping, even on a case-insensitive system.
|
||||
|
||||
nix-store "${opts[@]}" --restore "$TEST_ROOT/case" < case.nar
|
||||
nix-store "${opts[@]}" --dump "$TEST_ROOT/case" > "$TEST_ROOT/case.nar"
|
||||
cmp case.nar "$TEST_ROOT/case.nar"
|
||||
[ "$(nix-hash "${opts[@]}" --type sha256 "$TEST_ROOT/case")" = "$(nix-hash --flat --type sha256 case.nar)" ]
|
||||
|
||||
# Check whether we detect true collisions (e.g. those remaining after
|
||||
# removal of the suffix).
|
||||
touch "$TEST_ROOT/case/xt_CONNMARK.h~nix~case~hack~3"
|
||||
(! nix-store "${opts[@]}" --dump "$TEST_ROOT/case" > /dev/null)
|
BIN
tests/functional/duplicate.nar
Normal file
BIN
tests/functional/duplicate.nar
Normal file
Binary file not shown.
|
@ -104,6 +104,27 @@ noSubmoduleRepo=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$subR
|
|||
|
||||
[[ $noSubmoduleRepoBaseline == $noSubmoduleRepo ]]
|
||||
|
||||
# Test .gitmodules with entries that refer to non-existent objects or objects that are not submodules.
|
||||
cat >> $rootRepo/.gitmodules <<EOF
|
||||
[submodule "missing"]
|
||||
path = missing
|
||||
url = https://example.org/missing.git
|
||||
|
||||
[submodule "file"]
|
||||
path = file
|
||||
url = https://example.org/file.git
|
||||
EOF
|
||||
echo foo > $rootRepo/file
|
||||
git -C $rootRepo add file
|
||||
git -C $rootRepo commit -a -m "Add bad submodules"
|
||||
|
||||
rev=$(git -C $rootRepo rev-parse HEAD)
|
||||
|
||||
r=$(nix eval --raw --expr "builtins.fetchGit { url = file://$rootRepo; rev = \"$rev\"; submodules = true; }")
|
||||
|
||||
[[ -f $r/file ]]
|
||||
[[ ! -e $r/missing ]]
|
||||
|
||||
# Test relative submodule URLs.
|
||||
rm $TEST_HOME/.cache/nix/fetcher-cache*
|
||||
rm -rf $rootRepo/.git $rootRepo/.gitmodules $rootRepo/sub
|
||||
|
|
|
@ -90,7 +90,7 @@ nix_tests = \
|
|||
derivation-advanced-attributes.sh \
|
||||
import-derivation.sh \
|
||||
nix_path.sh \
|
||||
case-hack.sh \
|
||||
nars.sh \
|
||||
placeholders.sh \
|
||||
ssh-relay.sh \
|
||||
build.sh \
|
||||
|
|
94
tests/functional/nars.sh
Executable file
94
tests/functional/nars.sh
Executable file
|
@ -0,0 +1,94 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
source common.sh
|
||||
|
||||
TODO_NixOS
|
||||
|
||||
clearStore
|
||||
|
||||
# Check that NARs with duplicate directory entries are rejected.
|
||||
rm -rf "$TEST_ROOT/out"
|
||||
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "NAR directory is not sorted"
|
||||
|
||||
# Check that nix-store --restore fails if the output already exists.
|
||||
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out' already exists"
|
||||
|
||||
rm -rf "$TEST_ROOT/out"
|
||||
echo foo > "$TEST_ROOT/out"
|
||||
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "File exists"
|
||||
|
||||
rm -rf "$TEST_ROOT/out"
|
||||
ln -s "$TEST_ROOT/out2" "$TEST_ROOT/out"
|
||||
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "File exists"
|
||||
|
||||
mkdir -p "$TEST_ROOT/out2"
|
||||
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out' already exists"
|
||||
|
||||
# The same, but for a regular file.
|
||||
nix-store --dump ./nars.sh > "$TEST_ROOT/tmp.nar"
|
||||
|
||||
rm -rf "$TEST_ROOT/out"
|
||||
nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar"
|
||||
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists"
|
||||
|
||||
rm -rf "$TEST_ROOT/out"
|
||||
mkdir -p "$TEST_ROOT/out"
|
||||
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists"
|
||||
|
||||
rm -rf "$TEST_ROOT/out"
|
||||
ln -s "$TEST_ROOT/out2" "$TEST_ROOT/out"
|
||||
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists"
|
||||
|
||||
mkdir -p "$TEST_ROOT/out2"
|
||||
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists"
|
||||
|
||||
# The same, but for a symlink.
|
||||
ln -sfn foo "$TEST_ROOT/symlink"
|
||||
nix-store --dump "$TEST_ROOT/symlink" > "$TEST_ROOT/tmp.nar"
|
||||
|
||||
rm -rf "$TEST_ROOT/out"
|
||||
nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar"
|
||||
[[ -L "$TEST_ROOT/out" ]]
|
||||
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists"
|
||||
|
||||
rm -rf "$TEST_ROOT/out"
|
||||
mkdir -p "$TEST_ROOT/out"
|
||||
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists"
|
||||
|
||||
rm -rf "$TEST_ROOT/out"
|
||||
ln -s "$TEST_ROOT/out2" "$TEST_ROOT/out"
|
||||
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists"
|
||||
|
||||
mkdir -p "$TEST_ROOT/out2"
|
||||
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists"
|
||||
|
||||
# Check whether restoring and dumping a NAR that contains case
|
||||
# collisions is round-tripping, even on a case-insensitive system.
|
||||
rm -rf "$TEST_ROOT/case"
|
||||
opts=("--option" "use-case-hack" "true")
|
||||
nix-store "${opts[@]}" --restore "$TEST_ROOT/case" < case.nar
|
||||
nix-store "${opts[@]}" --dump "$TEST_ROOT/case" > "$TEST_ROOT/case.nar"
|
||||
cmp case.nar "$TEST_ROOT/case.nar"
|
||||
[ "$(nix-hash "${opts[@]}" --type sha256 "$TEST_ROOT/case")" = "$(nix-hash --flat --type sha256 case.nar)" ]
|
||||
|
||||
# Check whether we detect true collisions (e.g. those remaining after
|
||||
# removal of the suffix).
|
||||
touch "$TEST_ROOT/case/xt_CONNMARK.h~nix~case~hack~3"
|
||||
(! nix-store "${opts[@]}" --dump "$TEST_ROOT/case" > /dev/null)
|
||||
|
||||
# Detect NARs that have a directory entry that after case-hacking
|
||||
# collides with another entry (e.g. a directory containing 'Test',
|
||||
# 'Test~nix~case~hack~1' and 'test').
|
||||
rm -rf "$TEST_ROOT/case"
|
||||
expectStderr 1 nix-store "${opts[@]}" --restore "$TEST_ROOT/case" < case-collision.nar | grepQuiet "NAR contains file name 'test' that collides with case-hacked file name 'Test~nix~case~hack~1'"
|
||||
|
||||
# Deserializing a NAR that contains file names that Unicode-normalize
|
||||
# to the same name should fail on macOS but succeed on Linux.
|
||||
rm -rf "$TEST_ROOT/out"
|
||||
if [[ $(uname) = Darwin ]]; then
|
||||
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < unnormalized.nar | grepQuiet "path '.*/out/â' already exists"
|
||||
else
|
||||
nix-store --restore "$TEST_ROOT/out" < unnormalized.nar
|
||||
[[ -e $TEST_ROOT/out/â ]]
|
||||
[[ -e $TEST_ROOT/out/â ]]
|
||||
fi
|
BIN
tests/functional/unnormalized.nar
Normal file
BIN
tests/functional/unnormalized.nar
Normal file
Binary file not shown.
|
@ -146,4 +146,6 @@ in
|
|||
functional_root = runNixOSTestFor "x86_64-linux" ./functional/as-root.nix;
|
||||
|
||||
user-sandboxing = runNixOSTestFor "x86_64-linux" ./user-sandboxing;
|
||||
|
||||
fetchurl = runNixOSTestFor "x86_64-linux" ./fetchurl.nix;
|
||||
}
|
||||
|
|
78
tests/nixos/fetchurl.nix
Normal file
78
tests/nixos/fetchurl.nix
Normal file
|
@ -0,0 +1,78 @@
|
|||
# Test whether builtin:fetchurl properly performs TLS certificate
|
||||
# checks on HTTPS servers.
|
||||
|
||||
{ lib, config, pkgs, ... }:
|
||||
|
||||
let
|
||||
|
||||
makeTlsCert = name: pkgs.runCommand name {
|
||||
nativeBuildInputs = with pkgs; [ openssl ];
|
||||
} ''
|
||||
mkdir -p $out
|
||||
openssl req -x509 \
|
||||
-subj '/CN=${name}/' -days 49710 \
|
||||
-addext 'subjectAltName = DNS:${name}' \
|
||||
-keyout "$out/key.pem" -newkey ed25519 \
|
||||
-out "$out/cert.pem" -noenc
|
||||
'';
|
||||
|
||||
goodCert = makeTlsCert "good";
|
||||
badCert = makeTlsCert "bad";
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
name = "nss-preload";
|
||||
|
||||
nodes = {
|
||||
machine = { lib, pkgs, ... }: {
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
|
||||
virtualHosts."good" = {
|
||||
addSSL = true;
|
||||
sslCertificate = "${goodCert}/cert.pem";
|
||||
sslCertificateKey = "${goodCert}/key.pem";
|
||||
root = pkgs.runCommand "nginx-root" {} ''
|
||||
mkdir "$out"
|
||||
echo 'hello world' > "$out/index.html"
|
||||
'';
|
||||
};
|
||||
|
||||
virtualHosts."bad" = {
|
||||
addSSL = true;
|
||||
sslCertificate = "${badCert}/cert.pem";
|
||||
sslCertificateKey = "${badCert}/key.pem";
|
||||
root = pkgs.runCommand "nginx-root" {} ''
|
||||
mkdir "$out"
|
||||
echo 'foobar' > "$out/index.html"
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
security.pki.certificateFiles = [ "${goodCert}/cert.pem" ];
|
||||
|
||||
networking.hosts."127.0.0.1" = [ "good" "bad" ];
|
||||
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
nix.settings.experimental-features = "nix-command";
|
||||
};
|
||||
};
|
||||
|
||||
testScript = { nodes, ... }: ''
|
||||
machine.wait_for_unit("nginx")
|
||||
machine.wait_for_open_port(443)
|
||||
|
||||
out = machine.succeed("curl https://good/index.html")
|
||||
assert out == "hello world\n"
|
||||
|
||||
# Fetching from a server with a trusted cert should work.
|
||||
machine.succeed("nix build --no-substitute --expr 'import <nix/fetchurl.nix> { url = \"https://good/index.html\"; hash = \"sha256-qUiQTy8PR5uPgZdpSzAYSw0u0cHNKh7A+4XSmaGSpEc=\"; }'")
|
||||
|
||||
# Fetching from a server with an untrusted cert should fail.
|
||||
err = machine.fail("nix build --no-substitute --expr 'import <nix/fetchurl.nix> { url = \"https://bad/index.html\"; hash = \"sha256-rsBwZF/lPuOzdjBZN2E08FjMM3JHyXit0Xi2zN+wAZ8=\"; }' 2>&1")
|
||||
print(err)
|
||||
assert "SSL certificate problem: self-signed certificate" in err
|
||||
'';
|
||||
}
|
|
@ -8,7 +8,7 @@
|
|||
#include "tests/nix_api_expr.hh"
|
||||
#include "tests/string_callback.hh"
|
||||
|
||||
#include "gmock/gmock.h"
|
||||
#include <gmock/gmock.h>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
namespace nixC {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue