From 450252c92c3b5d0e7e71398fdc9f7630cf197326 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 8 Aug 2024 17:21:00 +0200 Subject: [PATCH 01/67] Bump version --- .version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.version b/.version index 5827d9bfd..29690d10f 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -2.24.2 +2.24.3 From 5b62a1dbd60f716b88c9da5a78ae1ea533cc82d9 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Fri, 16 Aug 2024 07:09:27 -0700 Subject: [PATCH 02/67] flake.lock: Update MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Updated input 'nixpkgs': 'github:NixOS/nixpkgs/63d37ccd2d178d54e7fb691d7ec76000740ea24a?narHash=sha256-7cCC8%2BTdq1%2B3OPyc3%2BgVo9dzUNkNIQfwSDJ2HSi2u3o%3D' (2024-07-21) → 'github:NixOS/nixpkgs/c3d4ac725177c030b1e289015989da2ad9d56af0?narHash=sha256-sqLwJcHYeWLOeP/XoLwAtYjr01TISlkOfz%2BNG82pbdg%3D' (2024-08-15) (cherry picked from commit 8866d2cd838902d45782541efe08efc1e1f1a2ab) --- flake.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index 2ac413a69..b5d0b881c 100644 --- a/flake.lock +++ b/flake.lock @@ -80,11 +80,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1721548954, - "narHash": "sha256-7cCC8+Tdq1+3OPyc3+gVo9dzUNkNIQfwSDJ2HSi2u3o=", + "lastModified": 1723688146, + "narHash": "sha256-sqLwJcHYeWLOeP/XoLwAtYjr01TISlkOfz+NG82pbdg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "63d37ccd2d178d54e7fb691d7ec76000740ea24a", + "rev": "c3d4ac725177c030b1e289015989da2ad9d56af0", "type": "github" }, "original": { From d550139191cfddb313f431d7f2c68d7873a62991 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Fri, 16 Aug 2024 07:22:30 -0700 Subject: [PATCH 03/67] ci: check that all outputs for all systems can evaluate (cherry picked from commit aa3d35c1f4145c9532620a20d6727c2214eab054) --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4eb9cf10d..e9397621e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -49,6 +49,7 @@ jobs: done ) & - run: nix --experimental-features 'nix-command flakes' flake check -L + - run: nix --experimental-features 'nix-command flakes' flake show --all-systems --json # Steps to test CI automation in your own fork. # Cachix: From 4e707b8e577a9f41f91fc4b6ddb1ac5c3bb47b97 Mon Sep 17 00:00:00 2001 From: Andrew Marshall Date: Thu, 8 Aug 2024 14:29:40 -0400 Subject: [PATCH 04/67] libstore: fix port binding in __darwinAllowLocalNetworking sandbox In d60c3f7f7c83134b5b4470ed84b6d5ed38e28753, this was changed to close a hole in the sandbox. Unfortunately, this was too restrictive such that it made local port binding fail, thus making derivations that needed `__darwinAllowLocalNetworking` gain nearly nothing, and thus largely fail (as the primary use for it is to enable port binding). This unfortunately does mean that a sandboxed build process can, in coordination with an actor outside the sandbox, escape the sandbox by binding a port and connecting to it externally to send data. I do not see a way around this with my experimentation and understanding of the (quite undocumented) macOS sandbox profile API. Notably it seems not possible to use the sandbox to do any of: - Restrict the remote IP of inbound network requests - Restrict the address being bound to As such, the `(local ip "*:*")` here appears to be functionally no different than `(local ip "localhost:*")` (however it *should* be different than removing the filter entirely, as that would make it also apply to non-IP networking). Doing `(allow network-inbound (require-all (local ip "localhost:*") (remote ip "localhost:*")))` causes listening to fail. Note that `network-inbound` implies `network-bind`. (cherry picked from commit 00f6db36fd72c9e82e923ce89d0ddb7d2e738528) --- src/libstore/unix/build/sandbox-defaults.sb | 1 + 1 file changed, 1 insertion(+) diff --git a/src/libstore/unix/build/sandbox-defaults.sb b/src/libstore/unix/build/sandbox-defaults.sb index 6da01b735..15cd6daf5 100644 --- a/src/libstore/unix/build/sandbox-defaults.sb +++ b/src/libstore/unix/build/sandbox-defaults.sb @@ -49,6 +49,7 @@ R""( (if (param "_ALLOW_LOCAL_NETWORKING") (begin (allow network* (remote ip "localhost:*")) + (allow network-inbound (local ip "*:*")) ; required to bind and listen ; Allow access to /etc/resolv.conf (which is a symlink to ; /private/var/run/resolv.conf). From 90fb4e8890c393d860521cb13e892a5cd19ab395 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 19 Aug 2024 12:46:17 +0200 Subject: [PATCH 05/67] Bump version --- .version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.version b/.version index 29690d10f..b71a29b1f 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -2.24.3 +2.24.4 From 7befd60c01c1593dd2db86fd4c695c3e9f26416e Mon Sep 17 00:00:00 2001 From: Tom Bereknyei Date: Sun, 18 Aug 2024 22:35:54 -0400 Subject: [PATCH 06/67] fix: check to see if there are any lines before (cherry picked from commit 59db8fd62b5300afbbabb1e8a12d547b336a3bdf) --- src/nix-build/nix-build.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index 0ce987d8a..a5b9e1e54 100644 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -163,7 +163,7 @@ static void main_nix_build(int argc, char * * argv) script = argv[1]; try { auto lines = tokenizeString(readFile(script), "\n"); - if (std::regex_search(lines.front(), std::regex("^#!"))) { + if (!lines.empty() && std::regex_search(lines.front(), std::regex("^#!"))) { lines.pop_front(); inShebang = true; for (int i = 2; i < argc; ++i) From 30a57328d2b53ec8d140af0065f4875501d5c28d Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 23 Aug 2024 13:15:30 +0200 Subject: [PATCH 07/67] Backport https://github.com/NixOS/nix/pull/11152 --- .../src/installation/prerequisites-source.md | 2 - src/libexpr/eval-gc.cc | 134 ------------------ src/libstore/store-api.cc | 16 ++- src/libutil/serialise.cc | 71 +--------- src/libutil/serialise.hh | 23 --- 5 files changed, 15 insertions(+), 231 deletions(-) diff --git a/doc/manual/src/installation/prerequisites-source.md b/doc/manual/src/installation/prerequisites-source.md index 4aafa6d27..c346a0a4b 100644 --- a/doc/manual/src/installation/prerequisites-source.md +++ b/doc/manual/src/installation/prerequisites-source.md @@ -39,8 +39,6 @@ `pkgconfig` and the Boehm garbage collector, and pass the flag `--enable-gc` to `configure`. - For `bdw-gc` <= 8.2.4 Nix needs a [small patch](https://github.com/NixOS/nix/blob/ac4d2e7b857acdfeac35ac8a592bdecee2d29838/boehmgc-traceable_allocator-public.diff) to be applied. - - The `boost` library of version 1.66.0 or higher. It can be obtained from the official web site . diff --git a/src/libexpr/eval-gc.cc b/src/libexpr/eval-gc.cc index 2f0e8c0c9..07ce05a2c 100644 --- a/src/libexpr/eval-gc.cc +++ b/src/libexpr/eval-gc.cc @@ -32,122 +32,6 @@ static void * oomHandler(size_t requested) throw std::bad_alloc(); } -class BoehmGCStackAllocator : public StackAllocator -{ - boost::coroutines2::protected_fixedsize_stack stack{ - // We allocate 8 MB, the default max stack size on NixOS. - // A smaller stack might be quicker to allocate but reduces the stack - // depth available for source filter expressions etc. - std::max(boost::context::stack_traits::default_size(), static_cast(8 * 1024 * 1024))}; - - // This is specific to boost::coroutines2::protected_fixedsize_stack. - // The stack protection page is included in sctx.size, so we have to - // subtract one page size from the stack size. - std::size_t pfss_usable_stack_size(boost::context::stack_context & sctx) - { - return sctx.size - boost::context::stack_traits::page_size(); - } - -public: - boost::context::stack_context allocate() override - { - auto sctx = stack.allocate(); - - // Stacks generally start at a high address and grow to lower addresses. - // Architectures that do the opposite are rare; in fact so rare that - // boost_routine does not implement it. - // So we subtract the stack size. - GC_add_roots(static_cast(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp); - return sctx; - } - - void deallocate(boost::context::stack_context sctx) override - { - GC_remove_roots(static_cast(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp); - stack.deallocate(sctx); - } -}; - -static BoehmGCStackAllocator boehmGCStackAllocator; - -/** - * When a thread goes into a coroutine, we lose its original sp until - * control flow returns to the thread. - * While in the coroutine, the sp points outside the thread stack, - * so we can detect this and push the entire thread stack instead, - * as an approximation. - * The coroutine's stack is covered by `BoehmGCStackAllocator`. - * This is not an optimal solution, because the garbage is scanned when a - * coroutine is active, for both the coroutine and the original thread stack. - * However, the implementation is quite lean, and usually we don't have active - * coroutines during evaluation, so this is acceptable. - */ -void fixupBoehmStackPointer(void ** sp_ptr, void * _pthread_id) -{ - void *& sp = *sp_ptr; - auto pthread_id = reinterpret_cast(_pthread_id); -# ifndef __APPLE__ - pthread_attr_t pattr; -# endif - size_t osStackSize; - // The low address of the stack, which grows down. - void * osStackLimit; - void * osStackBase; - -# ifdef __APPLE__ - osStackSize = pthread_get_stacksize_np(pthread_id); - osStackLimit = pthread_get_stackaddr_np(pthread_id); -# else - if (pthread_attr_init(&pattr)) { - throw Error("fixupBoehmStackPointer: pthread_attr_init failed"); - } -# ifdef HAVE_PTHREAD_GETATTR_NP - if (pthread_getattr_np(pthread_id, &pattr)) { - throw Error("fixupBoehmStackPointer: pthread_getattr_np failed"); - } -# elif HAVE_PTHREAD_ATTR_GET_NP - if (!pthread_attr_init(&pattr)) { - throw Error("fixupBoehmStackPointer: pthread_attr_init failed"); - } - if (!pthread_attr_get_np(pthread_id, &pattr)) { - throw Error("fixupBoehmStackPointer: pthread_attr_get_np failed"); - } -# else -# error "Need one of `pthread_attr_get_np` or `pthread_getattr_np`" -# endif - if (pthread_attr_getstack(&pattr, &osStackLimit, &osStackSize)) { - throw Error("fixupBoehmStackPointer: pthread_attr_getstack failed"); - } - if (pthread_attr_destroy(&pattr)) { - throw Error("fixupBoehmStackPointer: pthread_attr_destroy failed"); - } -# endif - osStackBase = (char *) osStackLimit + osStackSize; - // NOTE: We assume the stack grows down, as it does on all architectures we support. - // Architectures that grow the stack up are rare. - if (sp >= osStackBase || sp < osStackLimit) { // sp is outside the os stack - sp = osStackLimit; - } -} - -/* Disable GC while this object lives. Used by CoroutineContext. - * - * Boehm keeps a count of GC_disable() and GC_enable() calls, - * and only enables GC when the count matches. - */ -class BoehmDisableGC -{ -public: - BoehmDisableGC() - { - GC_disable(); - }; - ~BoehmDisableGC() - { - GC_enable(); - }; -}; - static inline void initGCReal() { /* Initialise the Boehm garbage collector. */ @@ -168,24 +52,6 @@ static inline void initGCReal() GC_set_oom_fn(oomHandler); - StackAllocator::defaultAllocator = &boehmGCStackAllocator; - -// TODO: Remove __APPLE__ condition. -// Comment suggests an implementation that works on darwin and windows -// https://github.com/ivmai/bdwgc/issues/362#issuecomment-1936672196 -# if GC_VERSION_MAJOR >= 8 && GC_VERSION_MINOR >= 2 && GC_VERSION_MICRO >= 4 && !defined(__APPLE__) - GC_set_sp_corrector(&fixupBoehmStackPointer); - - if (!GC_get_sp_corrector()) { - printTalkative("BoehmGC on this platform does not support sp_corrector; will disable GC inside coroutines"); - /* Used to disable GC when entering coroutines on macOS */ - create_coro_gc_hook = []() -> std::shared_ptr { return std::make_shared(); }; - } -# else -# warning \ - "BoehmGC version does not support GC while coroutine exists. GC will be disabled inside coroutines. Consider updating bdw-gc to 8.2.4 or later." -# endif - /* Set the initial heap size to something fairly big (25% of physical RAM, up to a maximum of 384 MiB) so that in most cases we don't need to garbage collect at all. (Collection has a diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index b3e5ad014..8eef340cc 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -210,14 +210,16 @@ StorePath Store::addToStore( fsm = FileSerialisationMethod::NixArchive; break; } - auto source = sinkToSource([&](Sink & sink) { - dumpPath(path, sink, fsm, filter); + std::optional storePath; + auto sink = sourceToSink([&](Source & source) { + LengthSource lengthSource(source); + storePath = addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair); + if (lengthSource.total >= settings.warnLargePathThreshold) + warn("copied large path '%s' to the store (%s)", path, renderSize(lengthSource.total)); }); - LengthSource lengthSource(*source); - auto storePath = addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair); - if (lengthSource.total >= settings.warnLargePathThreshold) - warn("copied large path '%s' to the store (%s)", path, renderSize(lengthSource.total)); - return storePath; + dumpPath(path, *sink, fsm, filter); + sink->finish(); + return storePath.value(); } void Store::addMultipleToStore( diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index 4899134d7..5352a436b 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -171,55 +171,6 @@ size_t StringSource::read(char * data, size_t len) #error Coroutines are broken in this version of Boost! #endif -/* A concrete datatype allow virtual dispatch of stack allocation methods. */ -struct VirtualStackAllocator { - StackAllocator *allocator = StackAllocator::defaultAllocator; - - boost::context::stack_context allocate() { - return allocator->allocate(); - } - - void deallocate(boost::context::stack_context sctx) { - allocator->deallocate(sctx); - } -}; - - -/* This class reifies the default boost coroutine stack allocation strategy with - a virtual interface. */ -class DefaultStackAllocator : public StackAllocator { - boost::coroutines2::default_stack stack; - - boost::context::stack_context allocate() override { - return stack.allocate(); - } - - void deallocate(boost::context::stack_context sctx) override { - stack.deallocate(sctx); - } -}; - -static DefaultStackAllocator defaultAllocatorSingleton; - -StackAllocator *StackAllocator::defaultAllocator = &defaultAllocatorSingleton; - - -std::shared_ptr (*create_coro_gc_hook)() = []() -> std::shared_ptr { - return {}; -}; - -/* This class is used for entry and exit hooks on coroutines */ -class CoroutineContext { - /* Disable GC when entering the coroutine without the boehm patch, - * since it doesn't find the main thread stack in this case. - * std::shared_ptr performs type-erasure, so it will call the right - * deleter. */ - const std::shared_ptr coro_gc_hook = create_coro_gc_hook(); -public: - CoroutineContext() {}; - ~CoroutineContext() {}; -}; - std::unique_ptr sourceToSink(std::function fun) { struct SourceToSink : FinishSink @@ -241,14 +192,12 @@ std::unique_ptr sourceToSink(std::function fun) cur = in; if (!coro) { - CoroutineContext ctx; - coro = coro_t::push_type(VirtualStackAllocator{}, [&](coro_t::pull_type & yield) { - LambdaSource source([&](char *out, size_t out_len) { + coro = coro_t::push_type([&](coro_t::pull_type & yield) { + LambdaSource source([&](char * out, size_t out_len) { if (cur.empty()) { yield(); - if (yield.get()) { - return (size_t)0; - } + if (yield.get()) + throw EndOfFile("coroutine has finished"); } size_t n = std::min(cur.size(), out_len); @@ -263,20 +212,14 @@ std::unique_ptr sourceToSink(std::function fun) if (!*coro) { unreachable(); } if (!cur.empty()) { - CoroutineContext ctx; (*coro)(false); } } void finish() override { - if (!coro) return; - if (!*coro) unreachable(); - { - CoroutineContext ctx; + if (coro && *coro) (*coro)(true); - } - if (*coro) unreachable(); } }; @@ -307,8 +250,7 @@ std::unique_ptr sinkToSource( size_t read(char * data, size_t len) override { if (!coro) { - CoroutineContext ctx; - coro = coro_t::pull_type(VirtualStackAllocator{}, [&](coro_t::push_type & yield) { + coro = coro_t::pull_type([&](coro_t::push_type & yield) { LambdaSink sink([&](std::string_view data) { if (!data.empty()) yield(std::string(data)); }); @@ -320,7 +262,6 @@ std::unique_ptr sinkToSource( if (pos == cur.size()) { if (!cur.empty()) { - CoroutineContext ctx; (*coro)(); } cur = coro->get(); diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index c7290dcef..e9f3e3a4a 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -557,27 +557,4 @@ struct FramedSink : nix::BufferedSink }; }; -/** - * Stack allocation strategy for sinkToSource. - * Mutable to avoid a boehm gc dependency in libutil. - * - * boost::context doesn't provide a virtual class, so we define our own. - */ -struct StackAllocator { - virtual boost::context::stack_context allocate() = 0; - virtual void deallocate(boost::context::stack_context sctx) = 0; - - /** - * The stack allocator to use in sinkToSource and potentially elsewhere. - * It is reassigned by the initGC() method in libexpr. - */ - static StackAllocator *defaultAllocator; -}; - -/* Disabling GC when entering a coroutine (without the boehm patch). - mutable to avoid boehm gc dependency in libutil. - */ -extern std::shared_ptr (*create_coro_gc_hook)(); - - } From 9d8669b14a402a8fd440fdce0ab3d874319a6984 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 23 Aug 2024 16:15:11 +0200 Subject: [PATCH 08/67] Bump version --- .version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.version b/.version index b71a29b1f..23a93836a 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -2.24.4 +2.24.5 From 0c25bea7cca21cc8e56ce9ed5b5391289fd30e04 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 2 Sep 2024 17:28:11 +0200 Subject: [PATCH 09/67] Respect max-substitution-jobs again This broke in #11005. Any number of PathSubstitutionGoals would be woken up by a single build slot becoming available. If there are a lot of substitution goals active, this could lead to us running out of file descriptors (especially on macOS where the default limit is 256). (cherry picked from commit a33cb8af5693af56dd69073dc5dddb4c6900ad7a) --- src/libstore/build/substitution-goal.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/build/substitution-goal.cc b/src/libstore/build/substitution-goal.cc index 0152f1808..a26eea820 100644 --- a/src/libstore/build/substitution-goal.cc +++ b/src/libstore/build/substitution-goal.cc @@ -183,7 +183,7 @@ Goal::Co PathSubstitutionGoal::tryToRun(StorePath subPath, nix::ref sub, /* Make sure that we are allowed to start a substitution. Note that even if maxSubstitutionJobs == 0, we still allow a substituter to run. This prevents infinite waiting. */ - if (worker.getNrSubstitutions() >= std::max(1U, (unsigned int) settings.maxSubstitutionJobs)) { + while (worker.getNrSubstitutions() >= std::max(1U, (unsigned int) settings.maxSubstitutionJobs)) { worker.waitForBuildSlot(shared_from_this()); co_await Suspend{}; } From c21f664e82aef1d44d71e1c5cc4e0021b4f8a1b8 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 2 Sep 2024 17:28:55 +0200 Subject: [PATCH 10/67] "unsigned" -> size_t Slight cleanup. (cherry picked from commit b7acd1c4145c7316085f2a12bfa26ef742ac6146) --- src/libstore/build/worker.cc | 4 ++-- src/libstore/build/worker.hh | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index ab0ba67b5..dbe86f43f 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -184,13 +184,13 @@ void Worker::wakeUp(GoalPtr goal) } -unsigned Worker::getNrLocalBuilds() +size_t Worker::getNrLocalBuilds() { return nrLocalBuilds; } -unsigned Worker::getNrSubstitutions() +size_t Worker::getNrSubstitutions() { return nrSubstitutions; } diff --git a/src/libstore/build/worker.hh b/src/libstore/build/worker.hh index 33a7bf015..e083dbea6 100644 --- a/src/libstore/build/worker.hh +++ b/src/libstore/build/worker.hh @@ -92,12 +92,12 @@ private: * Number of build slots occupied. This includes local builds but does not * include substitutions or remote builds via the build hook. */ - unsigned int nrLocalBuilds; + size_t nrLocalBuilds; /** * Number of substitution slots occupied. */ - unsigned int nrSubstitutions; + size_t nrSubstitutions; /** * Maps used to prevent multiple instantiations of a goal for the @@ -235,12 +235,12 @@ public: * Return the number of local build processes currently running (but not * remote builds via the build hook). */ - unsigned int getNrLocalBuilds(); + size_t getNrLocalBuilds(); /** * Return the number of substitution processes currently running. */ - unsigned int getNrSubstitutions(); + size_t getNrSubstitutions(); /** * Registers a running child process. `inBuildSlot` means that From ea7abb58b59562952262a0ef43e30f9f85639cd4 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 3 Sep 2024 16:51:36 +0200 Subject: [PATCH 11/67] Bump version --- .version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.version b/.version index 23a93836a..c5f92d6f8 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -2.24.5 +2.24.6 From 0679505d8ce991416650504e409d8c2055a8f6bd Mon Sep 17 00:00:00 2001 From: "Travis A. Everett" Date: Tue, 2 Jul 2024 21:02:45 -0500 Subject: [PATCH 12/67] install-darwin: fix _nixbld uids for macOS sequoia Starting in macOS 15 Sequoia, macOS daemon UIDs are encroaching on our default UIDs of 301-332. This commit relocates our range up to avoid clashing with the current UIDs of 301-304 and buy us a little time while still leaving headroom for people installing more than 32 users. (cherry picked from commit df36ff0d1e60f59eb3e0442fa335252421ec8057) --- scripts/bigsur-nixbld-user-migration.sh | 2 +- scripts/install-darwin-multi-user.sh | 12 +++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/scripts/bigsur-nixbld-user-migration.sh b/scripts/bigsur-nixbld-user-migration.sh index 0eb312e07..bc42e02e6 100755 --- a/scripts/bigsur-nixbld-user-migration.sh +++ b/scripts/bigsur-nixbld-user-migration.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -((NEW_NIX_FIRST_BUILD_UID=301)) +((NEW_NIX_FIRST_BUILD_UID=350)) id_available(){ dscl . list /Users UniqueID | grep -E '\b'"$1"'\b' >/dev/null diff --git a/scripts/install-darwin-multi-user.sh b/scripts/install-darwin-multi-user.sh index 24c9052f9..bd1a54ad8 100644 --- a/scripts/install-darwin-multi-user.sh +++ b/scripts/install-darwin-multi-user.sh @@ -4,7 +4,17 @@ set -eu set -o pipefail # System specific settings -export NIX_FIRST_BUILD_UID="${NIX_FIRST_BUILD_UID:-301}" +# Notes: +# - up to macOS Big Sur we used the same GID/UIDs as Linux (30000:30001-32) +# - we changed UID to 301 because Big Sur updates failed into recovery mode +# we're targeting the 200-400 UID range for role users mentioned in the +# usage note for sysadminctl +# - we changed UID to 350 because Sequoia now uses UIDs 300-304 for its own +# daemon users +# - we changed GID to 350 alongside above just because it hides the nixbld +# group from the Users & Groups settings panel :) +export NIX_FIRST_BUILD_UID="${NIX_FIRST_BUILD_UID:-350}" +export NIX_BUILD_GROUP_ID="${NIX_BUILD_GROUP_ID:-350}" export NIX_BUILD_USER_NAME_TEMPLATE="_nixbld%d" readonly NIX_DAEMON_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist From c5a0e624d94505d6544ed2175ecaa08d78cf4b6e Mon Sep 17 00:00:00 2001 From: "Travis A. Everett" Date: Tue, 2 Jul 2024 21:22:35 -0500 Subject: [PATCH 13/67] install-darwin: move nixbld gid to match first UID (cherry picked from commit 75567423fb6163559575c38867cda09b754364d7) --- scripts/install-multi-user.sh | 6 ++---- scripts/install-systemd-multi-user.sh | 1 + 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh index 6aee073e3..a487d459f 100644 --- a/scripts/install-multi-user.sh +++ b/scripts/install-multi-user.sh @@ -23,10 +23,10 @@ readonly RED='\033[31m' # installer allows overriding build user count to speed up installation # as creating each user takes non-trivial amount of time on macos readonly NIX_USER_COUNT=${NIX_USER_COUNT:-32} -readonly NIX_BUILD_GROUP_ID="${NIX_BUILD_GROUP_ID:-30000}" readonly NIX_BUILD_GROUP_NAME="nixbld" # each system specific installer must set these: # NIX_FIRST_BUILD_UID +# NIX_BUILD_GROUP_ID # NIX_BUILD_USER_NAME_TEMPLATE # Please don't change this. We don't support it, because the # default shell profile that comes with Nix doesn't support it. @@ -530,9 +530,7 @@ It seems the build group $NIX_BUILD_GROUP_NAME already exists, but with the UID $primary_group_id. This script can't really handle that right now, so I'm going to give up. -You can fix this by editing this script and changing the -NIX_BUILD_GROUP_ID variable near the top to from $NIX_BUILD_GROUP_ID -to $primary_group_id and re-run. +You can export NIX_BUILD_GROUP_ID=$primary_group_id and re-run. EOF else row " Exists" "Yes" diff --git a/scripts/install-systemd-multi-user.sh b/scripts/install-systemd-multi-user.sh index a62ed7e3a..a79a69990 100755 --- a/scripts/install-systemd-multi-user.sh +++ b/scripts/install-systemd-multi-user.sh @@ -5,6 +5,7 @@ set -o pipefail # System specific settings export NIX_FIRST_BUILD_UID="${NIX_FIRST_BUILD_UID:-30001}" +export NIX_BUILD_GROUP_ID="${NIX_BUILD_GROUP_ID:-30000}" export NIX_BUILD_USER_NAME_TEMPLATE="nixbld%d" readonly SERVICE_SRC=/lib/systemd/system/nix-daemon.service From 8d0414d682b18323bab362d31e8f1c43125a63d4 Mon Sep 17 00:00:00 2001 From: Emily Date: Mon, 26 Aug 2024 17:59:58 +0100 Subject: [PATCH 14/67] install-darwin: increment base UID by 1 (#15) (cherry picked from commit 11cf29b15c8ea144035eb6a9d9f31bb05eee2048) --- scripts/bigsur-nixbld-user-migration.sh | 2 +- scripts/install-darwin-multi-user.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/bigsur-nixbld-user-migration.sh b/scripts/bigsur-nixbld-user-migration.sh index bc42e02e6..57f65da72 100755 --- a/scripts/bigsur-nixbld-user-migration.sh +++ b/scripts/bigsur-nixbld-user-migration.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -((NEW_NIX_FIRST_BUILD_UID=350)) +((NEW_NIX_FIRST_BUILD_UID=351)) id_available(){ dscl . list /Users UniqueID | grep -E '\b'"$1"'\b' >/dev/null diff --git a/scripts/install-darwin-multi-user.sh b/scripts/install-darwin-multi-user.sh index bd1a54ad8..89c66b8f4 100644 --- a/scripts/install-darwin-multi-user.sh +++ b/scripts/install-darwin-multi-user.sh @@ -9,11 +9,11 @@ set -o pipefail # - we changed UID to 301 because Big Sur updates failed into recovery mode # we're targeting the 200-400 UID range for role users mentioned in the # usage note for sysadminctl -# - we changed UID to 350 because Sequoia now uses UIDs 300-304 for its own +# - we changed UID to 351 because Sequoia now uses UIDs 300-304 for its own # daemon users # - we changed GID to 350 alongside above just because it hides the nixbld # group from the Users & Groups settings panel :) -export NIX_FIRST_BUILD_UID="${NIX_FIRST_BUILD_UID:-350}" +export NIX_FIRST_BUILD_UID="${NIX_FIRST_BUILD_UID:-351}" export NIX_BUILD_GROUP_ID="${NIX_BUILD_GROUP_ID:-350}" export NIX_BUILD_USER_NAME_TEMPLATE="_nixbld%d" From 437f7a0042a7eb27e379c65557acd492e62c6496 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 12 Aug 2024 15:47:02 +0200 Subject: [PATCH 15/67] fetchers::downloadTarball(): Return a cacheable accessor downloadTarball() is used by `-I foo=` etc. fetchToStore() needs the accessor to have a fingerprint to enable caching. Fixes #11271. (cherry picked from commit 9f6ee93f488c8935b560588ad7ba321d9618f588) --- src/libcmd/common-eval-args.cc | 4 +++- src/libexpr/eval.cc | 4 +++- src/libexpr/primops/fetchTree.cc | 6 +++++- src/libfetchers/tarball.cc | 20 ++++++++++++++++++-- src/libfetchers/tarball.hh | 9 ++++++--- 5 files changed, 35 insertions(+), 8 deletions(-) diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc index fcef92487..ae9994a05 100644 --- a/src/libcmd/common-eval-args.cc +++ b/src/libcmd/common-eval-args.cc @@ -171,7 +171,9 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s, const Path * bas { if (EvalSettings::isPseudoUrl(s)) { auto accessor = fetchers::downloadTarball( - EvalSettings::resolvePseudoUrl(s)).accessor; + state.store, + state.fetchSettings, + EvalSettings::resolvePseudoUrl(s)); auto storePath = fetchToStore(*state.store, SourcePath(accessor), FetchMode::Copy); return state.rootPath(CanonPath(state.store->toRealPath(storePath))); } diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index de5d85821..0bb1a5ea6 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -3083,7 +3083,9 @@ std::optional EvalState::resolveLookupPathPath(const LookupPath::Pa if (EvalSettings::isPseudoUrl(value)) { try { auto accessor = fetchers::downloadTarball( - EvalSettings::resolvePseudoUrl(value)).accessor; + store, + fetchSettings, + EvalSettings::resolvePseudoUrl(value)); auto storePath = fetchToStore(*store, SourcePath(accessor), FetchMode::Copy); return finish(store->toRealPath(storePath)); } catch (Error & e) { diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index f79b6b7b8..0e49cbc71 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -501,7 +501,11 @@ static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v // https://github.com/NixOS/nix/issues/4313 auto storePath = unpack - ? fetchToStore(*state.store, fetchers::downloadTarball(*url).accessor, FetchMode::Copy, name) + ? fetchToStore( + *state.store, + fetchers::downloadTarball(state.store, state.fetchSettings, *url), + FetchMode::Copy, + name) : fetchers::downloadFile(state.store, *url, name).storePath; if (expectedHash) { diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index 457210542..dd4f3b780 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -102,7 +102,7 @@ DownloadFileResult downloadFile( }; } -DownloadTarballResult downloadTarball( +static DownloadTarballResult downloadTarball_( const std::string & url, const Headers & headers) { @@ -202,6 +202,22 @@ DownloadTarballResult downloadTarball( return attrsToResult(infoAttrs); } +ref downloadTarball( + ref store, + const Settings & settings, + const std::string & url) +{ + /* Go through Input::getAccessor() to ensure that the resulting + accessor has a fingerprint. */ + fetchers::Attrs attrs; + attrs.insert_or_assign("type", "tarball"); + attrs.insert_or_assign("url", url); + + auto input = Input::fromAttrs(settings, std::move(attrs)); + + return input.getAccessor(store).first; +} + // An input scheme corresponding to a curl-downloadable resource. struct CurlInputScheme : InputScheme { @@ -353,7 +369,7 @@ struct TarballInputScheme : CurlInputScheme { auto input(_input); - auto result = downloadTarball(getStrAttr(input.attrs, "url"), {}); + auto result = downloadTarball_(getStrAttr(input.attrs, "url"), {}); result.accessor->setPathDisplay("«" + input.to_string() + "»"); diff --git a/src/libfetchers/tarball.hh b/src/libfetchers/tarball.hh index d9bdd123d..2042041d5 100644 --- a/src/libfetchers/tarball.hh +++ b/src/libfetchers/tarball.hh @@ -14,6 +14,8 @@ struct SourceAccessor; namespace nix::fetchers { +struct Settings; + struct DownloadFileResult { StorePath storePath; @@ -40,8 +42,9 @@ struct DownloadTarballResult * Download and import a tarball into the Git cache. The result is the * Git tree hash of the root directory. */ -DownloadTarballResult downloadTarball( - const std::string & url, - const Headers & headers = {}); +ref downloadTarball( + ref store, + const Settings & settings, + const std::string & url); } From f0cffa7300cec037fd5bf8adb40a2657f3af3bda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Sat, 17 Aug 2024 08:31:41 +0200 Subject: [PATCH 16/67] replace backport github action with mergify The current backport action cannot automerge because the github action bot does not trigger github CI actions. Mergify instead does not have this limitation and can also use a merge queue. On top we have now a declarative configuration to allow contributers to add new tests to required without having access to the github org. An example pull request and backport can be seen here: https://github.com/Mic92/nix-1/pull/4 and here: https://github.com/Mic92/nix-1/pull/5 To complete the setup the mergify app must be enabled for this repository. It's already installed in the nixos organization for nixos-hardware and other repositories. (cherry picked from commit 80f20fa4cb75ad48d74047ca060869bb9138f776) --- .github/workflows/backport.yml | 32 ------------ .mergify.yml | 92 ++++++++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 32 deletions(-) delete mode 100644 .github/workflows/backport.yml create mode 100644 .mergify.yml diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml deleted file mode 100644 index dd110de6c..000000000 --- a/.github/workflows/backport.yml +++ /dev/null @@ -1,32 +0,0 @@ -name: Backport -on: - pull_request_target: - types: [closed, labeled] -permissions: - contents: read -jobs: - backport: - name: Backport Pull Request - permissions: - # for zeebe-io/backport-action - contents: write - pull-requests: write - if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name)) - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - ref: ${{ github.event.pull_request.head.sha }} - # required to find all branches - fetch-depth: 0 - - name: Create backport PRs - # should be kept in sync with `version` - uses: zeebe-io/backport-action@v3.0.2 - with: - # Config README: https://github.com/zeebe-io/backport-action#backport-action - github_token: ${{ secrets.GITHUB_TOKEN }} - github_workspace: ${{ github.workspace }} - pull_description: |- - Automatic backport to `${target_branch}`, triggered by a label in #${pull_number}. - # should be kept in sync with `uses` - version: v0.0.5 diff --git a/.mergify.yml b/.mergify.yml new file mode 100644 index 000000000..663c45d92 --- /dev/null +++ b/.mergify.yml @@ -0,0 +1,92 @@ +queue_rules: + - name: default + # all required tests need to go here + merge_conditions: + - check-success=installer + - check-success=installer_test (macos-latest) + - check-success=installer_test (ubuntu-latest) + - check-success=tests (macos-latest) + - check-success=tests (ubuntu-latest) + - check-success=vm_tests + merge_method: rebase + batch_size: 5 + +pull_request_rules: + - name: merge using the merge queue + conditions: + - base=master + - label~=merge-queue|dependencies + actions: + queue: {} + +# The rules below will first create backport pull requests and put those in a merge queue. + + - name: backport patches to 2.18 + conditions: + - label=backport 2.18-maintenance + actions: + backport: + branches: + - 2.18-maintenance + labels: + - merge-queue + + - name: backport patches to 2.19 + conditions: + - label=backport 2.19-maintenance + actions: + backport: + branches: + - 2.19-maintenance + labels: + - merge-queue + + - name: backport patches to 2.20 + conditions: + - label=backport 2.20-maintenance + actions: + backport: + branches: + - 2.20-maintenance + labels: + - merge-queue + + - name: backport patches to 2.21 + conditions: + - label=backport 2.21-maintenance + actions: + backport: + branches: + - 2.21-maintenance + labels: + - merge-queue + + - name: backport patches to 2.22 + conditions: + - label=backport 2.22-maintenance + actions: + backport: + branches: + - 2.22-maintenance + labels: + - merge-queue + + - name: backport patches to 2.23 + conditions: + - label=backport 2.23-maintenance + actions: + backport: + branches: + - 2.23-maintenance + labels: + - merge-queue + + - name: backport patches to 2.24 + conditions: + - label=backport 2.24-maintenance + actions: + backport: + branches: + - "2.24-maintenance" + labels: + - merge-queue From 12fa019ae558641df0a23a7973d64e687b2d8ba8 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 4 Sep 2024 21:43:59 +0200 Subject: [PATCH 17/67] NAR parser: Fix check for duplicate / incorrectly sorted entries "prevName" was always empty because it was declared in the wrong scope. (cherry picked from commit 495d32e1b8e5d5143f048d1be755a96bea822b19) --- src/libutil/archive.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index e2ebcda0c..353760398 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -214,11 +214,13 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath else if (t == "directory") { sink.createDirectory(path); + std::string prevName; + while (1) { s = getString(); if (s == "entry") { - std::string name, prevName; + std::string name; s = getString(); if (s != "(") throw badArchive("expected open tag"); From 6187ee468f1ffd5ff4f931b9e027e718d12f9f20 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 5 Sep 2024 16:41:15 +0200 Subject: [PATCH 18/67] Add test case for NARs with duplicate directory entries This test was made by @puckipedia. (cherry picked from commit 83d5b32803e5b828967a27b1ea93c5728d3a4d0a) --- tests/functional/duplicate.nar | Bin 0 -> 1400 bytes tests/functional/local.mk | 2 +- tests/functional/{case-hack.sh => nars.sh} | 9 +++++---- 3 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 tests/functional/duplicate.nar rename tests/functional/{case-hack.sh => nars.sh} (79%) diff --git a/tests/functional/duplicate.nar b/tests/functional/duplicate.nar new file mode 100644 index 0000000000000000000000000000000000000000..1d0993ed4cab41a6d45907ac0c17026afd5471a2 GIT binary patch literal 1400 zcmdT@+it=z49zZ#4T*h25D#ojRW~kz9 z$BsP}-LYn0DAbktf#N+v9qTBW&+onV;7jX2S0C@V9t<{lr}pt&I-XgF4v29E z3g3EyMu?&G+_E0O>ztu< "$TEST_ROOT/case.nar" cmp case.nar "$TEST_ROOT/case.nar" From f160d3ac68f67497f7f4948fa7a236790c7fee12 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 5 Sep 2024 16:48:43 +0200 Subject: [PATCH 19/67] Test that nix-store --restore fails if the output already exists This restores the behaviour from before the std::filesystem refactorings. (cherry picked from commit da1ad28912334bb57f923afb4745273fd68f695c) --- src/libutil/fs-sink.cc | 3 ++- tests/functional/nars.sh | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/libutil/fs-sink.cc b/src/libutil/fs-sink.cc index f15324d0a..696cd17ea 100644 --- a/src/libutil/fs-sink.cc +++ b/src/libutil/fs-sink.cc @@ -71,7 +71,8 @@ static GlobalConfig::Register r1(&restoreSinkSettings); void RestoreSink::createDirectory(const CanonPath & path) { - std::filesystem::create_directory(dstPath / path.rel()); + if (!std::filesystem::create_directory(dstPath / path.rel())) + throw Error("path '%s' already exists", (dstPath / path.rel()).string()); }; struct RestoreRegularFile : CreateRegularFileSink { diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index c58d12cd5..106bd10fc 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -10,6 +10,9 @@ clearStore rm -rf "$TEST_ROOT/out" expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "NAR directory is not sorted" +# Check that nix-store --restore fails if the output already exists. +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out/' already exists" + # Check whether restoring and dumping a NAR that contains case # collisions is round-tripping, even on a case-insensitive system. rm -rf "$TEST_ROOT/case" From 0cfc9bf1334a340b2123221e9fead71ab2b3307e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 5 Sep 2024 16:54:12 +0200 Subject: [PATCH 20/67] More tests (cherry picked from commit 77c090cdbd56220895a2447efae79f68ed7861c5) --- tests/functional/nars.sh | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index 106bd10fc..b2b6b2b1a 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -13,6 +13,17 @@ expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet # Check that nix-store --restore fails if the output already exists. expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out/' already exists" +rm -rf "$TEST_ROOT/out" +echo foo > "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "cannot create directory.*File exists" + +rm -rf "$TEST_ROOT/out" +ln -s "$TEST_ROOT/out2" "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "cannot create directory.*File exists" + +mkdir -p "$TEST_ROOT/out2" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out/' already exists" + # Check whether restoring and dumping a NAR that contains case # collisions is round-tripping, even on a case-insensitive system. rm -rf "$TEST_ROOT/case" From 12889704966afa417a1c9044755665646f9c2872 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 5 Sep 2024 19:26:10 +0200 Subject: [PATCH 21/67] Detect NAR directory entries that collide with another path after case-hacking The test was made by @puckipedia. (cherry picked from commit 35575873813f60fff26f27a65e09038986f17cb5) --- src/libutil/archive.cc | 3 +++ tests/functional/case-collision.nar | Bin 0 -> 1928 bytes tests/functional/nars.sh | 6 ++++++ 3 files changed, 9 insertions(+) create mode 100644 tests/functional/case-collision.nar diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index 353760398..849bfe022 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -243,6 +243,9 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath debug("case collision between '%1%' and '%2%'", i->first, name); name += caseHackSuffix; name += std::to_string(++i->second); + auto j = names.find(name); + if (j != names.end()) + throw Error("NAR contains file name '%s' that collides with case-hacked file name '%s'", prevName, j->first); } else names[name] = 0; } diff --git a/tests/functional/case-collision.nar b/tests/functional/case-collision.nar new file mode 100644 index 0000000000000000000000000000000000000000..2eff86901c617be2a830d23074923cb5b3b69aa3 GIT binary patch literal 1928 zcmd^9%}&EG3@&2)Y!WvfAc(_YXsQr5o`XF=mU?TnHklH4TQ7Zf(qMC#G>KJ{av&Gy za}?+EXU7lO&ocTjmrj*>2lMyfx+4Dz*%4W6x6p6LgbVFJp>-|c8?s<9`cB0$vW{^$ z?iYCMuQE2ai07y7GmkrZ&%wH>q|5FJD{C-t@C1MJc_jzOWqdC0M~c()?t*xok{-HJ zs!i9+H#iU9)|ED!?3UuAbZZF8FyEZ~jG6y2J~toM9S7FoQvGmE`2|Vij(PpHA1=*f z7ka8+sd=Qc8V} DaOkrB literal 0 HcmV?d00001 diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index b2b6b2b1a..f2339af88 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -37,3 +37,9 @@ cmp case.nar "$TEST_ROOT/case.nar" # removal of the suffix). touch "$TEST_ROOT/case/xt_CONNMARK.h~nix~case~hack~3" (! nix-store "${opts[@]}" --dump "$TEST_ROOT/case" > /dev/null) + +# Detect NARs that have a directory entry that after case-hacking +# collides with another entry (e.g. a directory containing 'Test', +# 'Test~nix~case~hack~1' and 'test'). +rm -rf "$TEST_ROOT/case" +expectStderr 1 nix-store "${opts[@]}" --restore "$TEST_ROOT/case" < case-collision.nar | grepQuiet "NAR contains file name 'test' that collides with case-hacked file name 'Test~nix~case~hack~1'" From a041688133e69016b94110c76719813e11135365 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 5 Sep 2024 20:37:26 +0200 Subject: [PATCH 22/67] Test that deserializing NARs with names with equal Unicode normal forms fails on macOS The test is based on the one by @puckipedia but with the file names swapped to make them sorted. (cherry picked from commit 7a765a6aafa27267659eb7339cf7039990f30caa) --- tests/functional/nars.sh | 11 +++++++++++ tests/functional/unnormalized.nar | Bin 0 -> 1728 bytes 2 files changed, 11 insertions(+) create mode 100644 tests/functional/unnormalized.nar diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index f2339af88..b16650e7e 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -43,3 +43,14 @@ touch "$TEST_ROOT/case/xt_CONNMARK.h~nix~case~hack~3" # 'Test~nix~case~hack~1' and 'test'). rm -rf "$TEST_ROOT/case" expectStderr 1 nix-store "${opts[@]}" --restore "$TEST_ROOT/case" < case-collision.nar | grepQuiet "NAR contains file name 'test' that collides with case-hacked file name 'Test~nix~case~hack~1'" + +# Deserializing a NAR that contains file names that Unicode-normalize +# to the same name should fail on macOS but succeed on Linux. +rm -rf "$TEST_ROOT/out" +if [[ $(uname) = Darwin ]]; then + expectStderr 1 nix-store --restore "$TEST_ROOT/out" < unnormalized.nar | grepQuiet "cannot create directory.*File exists" +else + nix-store --restore "$TEST_ROOT/out" < unnormalized.nar + [[ -e $TEST_ROOT/out/â ]] + [[ -e $TEST_ROOT/out/â ]] +fi diff --git a/tests/functional/unnormalized.nar b/tests/functional/unnormalized.nar new file mode 100644 index 0000000000000000000000000000000000000000..4b7edb17e0b4a9b75cf2958e9f12cceca22d267c GIT binary patch literal 1728 zcmd^9&2GXl4DNo}ka&koJMc51YTAwW-~mEvXhfQz#07fgQFxVI_fQML(N5J=2`NbQ zV*7LLe6bx5vh%0qe#)&Vu$+Qc|z8XR?vo72w}Ja>8T2af{uR|2^gTKAx4X{4ZTc z-^V~CHIFT~SHUB7Jzi)&Hr6bq0+*^U@tqW~ Date: Thu, 5 Sep 2024 20:55:24 +0200 Subject: [PATCH 23/67] Fix test on macOS (cherry picked from commit 21dcbd7e83929fbf8b6c666d743afa0a9ea73d83) --- tests/functional/nars.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index b16650e7e..bd2c49fce 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -48,7 +48,7 @@ expectStderr 1 nix-store "${opts[@]}" --restore "$TEST_ROOT/case" < case-collisi # to the same name should fail on macOS but succeed on Linux. rm -rf "$TEST_ROOT/out" if [[ $(uname) = Darwin ]]; then - expectStderr 1 nix-store --restore "$TEST_ROOT/out" < unnormalized.nar | grepQuiet "cannot create directory.*File exists" + expectStderr 1 nix-store --restore "$TEST_ROOT/out" < unnormalized.nar | grepQuiet "path '.*/out/â' already exists" else nix-store --restore "$TEST_ROOT/out" < unnormalized.nar [[ -e $TEST_ROOT/out/â ]] From 25510ba66f31dce539796d0101cfee8c52e2752d Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 5 Sep 2024 22:21:53 +0200 Subject: [PATCH 24/67] RestoreSink::createDirectory(): Use append() On macOS, `mkdir("x/')` behaves differently than `mkdir("x")` if `x` is a dangling symlink (the formed succeed while the latter fails). So make sure we always strip the trailing slash. (cherry picked from commit 9fcb588dd8a7b3f0d7d103cea449abcf9f736ad6) --- src/libutil/fs-sink.cc | 20 ++++++++++---------- tests/functional/nars.sh | 8 ++++---- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/libutil/fs-sink.cc b/src/libutil/fs-sink.cc index 696cd17ea..a08cb0a4c 100644 --- a/src/libutil/fs-sink.cc +++ b/src/libutil/fs-sink.cc @@ -68,11 +68,19 @@ static RestoreSinkSettings restoreSinkSettings; static GlobalConfig::Register r1(&restoreSinkSettings); +static std::filesystem::path append(const std::filesystem::path & src, const CanonPath & path) +{ + auto dst = src; + if (!path.rel().empty()) + dst /= path.rel(); + return dst; +} void RestoreSink::createDirectory(const CanonPath & path) { - if (!std::filesystem::create_directory(dstPath / path.rel())) - throw Error("path '%s' already exists", (dstPath / path.rel()).string()); + auto p = append(dstPath, path); + if (!std::filesystem::create_directory(p)) + throw Error("path '%s' already exists", p.string()); }; struct RestoreRegularFile : CreateRegularFileSink { @@ -83,14 +91,6 @@ struct RestoreRegularFile : CreateRegularFileSink { void preallocateContents(uint64_t size) override; }; -static std::filesystem::path append(const std::filesystem::path & src, const CanonPath & path) -{ - auto dst = src; - if (!path.rel().empty()) - dst /= path.rel(); - return dst; -} - void RestoreSink::createRegularFile(const CanonPath & path, std::function func) { auto p = append(dstPath, path); diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index bd2c49fce..4f2470ea7 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -11,18 +11,18 @@ rm -rf "$TEST_ROOT/out" expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "NAR directory is not sorted" # Check that nix-store --restore fails if the output already exists. -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out/' already exists" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out' already exists" rm -rf "$TEST_ROOT/out" echo foo > "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "cannot create directory.*File exists" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "File exists" rm -rf "$TEST_ROOT/out" ln -s "$TEST_ROOT/out2" "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "cannot create directory.*File exists" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "File exists" mkdir -p "$TEST_ROOT/out2" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out/' already exists" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out' already exists" # Check whether restoring and dumping a NAR that contains case # collisions is round-tripping, even on a case-insensitive system. From e25410c7886a91167ca0ca2f496bf6bf17ee6510 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 6 Sep 2024 16:28:09 +0200 Subject: [PATCH 25/67] Test that deserializing regular files / symlinks is exclusive (cherry picked from commit 52ba3cc5eac0418218a90c0cddb06688d4c7b5d3) --- tests/functional/nars.sh | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index 4f2470ea7..ed19637a1 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -24,6 +24,44 @@ expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet mkdir -p "$TEST_ROOT/out2" expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out' already exists" +# The same, but for a regular file. +nix-store --dump ./nars.sh > "$TEST_ROOT/tmp.nar" + +rm -rf "$TEST_ROOT/out" +nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" + +rm -rf "$TEST_ROOT/out" +mkdir -p "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" + +rm -rf "$TEST_ROOT/out" +ln -s "$TEST_ROOT/out2" "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" + +mkdir -p "$TEST_ROOT/out2" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" + +# The same, but for a symlink +ln -sfn foo "$TEST_ROOT/symlink" +nix-store --dump "$TEST_ROOT/symlink" > "$TEST_ROOT/tmp.nar" + +rm -rf "$TEST_ROOT/out" +nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" +[[ -L "$TEST_ROOT/out" ]] +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" + +rm -rf "$TEST_ROOT/out" +mkdir -p "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" + +rm -rf "$TEST_ROOT/out" +ln -s "$TEST_ROOT/out2" "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" + +mkdir -p "$TEST_ROOT/out2" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" + # Check whether restoring and dumping a NAR that contains case # collisions is round-tripping, even on a case-insensitive system. rm -rf "$TEST_ROOT/case" From 2e1cb495c1bf36d59c234d923a139c01a3866ee1 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 9 Sep 2024 14:11:35 +0200 Subject: [PATCH 26/67] Typo (cherry picked from commit 4cfa59fdb32aa4fcc58b735d8843ce308692a652) --- tests/functional/nars.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index ed19637a1..9f5f43dc6 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -42,7 +42,7 @@ expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | gre mkdir -p "$TEST_ROOT/out2" expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" -# The same, but for a symlink +# The same, but for a symlink. ln -sfn foo "$TEST_ROOT/symlink" nix-store --dump "$TEST_ROOT/symlink" > "$TEST_ROOT/tmp.nar" From a6ad5565ef15a18ea2f60de4d57f75cd0175b167 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 9 Sep 2024 14:29:05 +0200 Subject: [PATCH 27/67] Improve use-case-hack description slightly (cherry picked from commit 5ca2f58798e6f514b5194c16c0fea0d8ec128171) --- src/libutil/archive.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index 849bfe022..458438cbd 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -23,7 +23,7 @@ struct ArchiveSettings : Config false, #endif "use-case-hack", - "Whether to enable a Darwin-specific hack for dealing with file name collisions."}; + "Whether to enable a macOS-specific hack for dealing with file name case collisions."}; }; static ArchiveSettings archiveSettings; From 0f825b38f43df5722be32526476b832b62b98e97 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 10 Sep 2024 13:45:04 +0200 Subject: [PATCH 28/67] Bump version --- .version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.version b/.version index c5f92d6f8..7ed0e12bc 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -2.24.6 +2.24.7 From 40461a8e0e347d457875653a1e08da51dbb1c587 Mon Sep 17 00:00:00 2001 From: Artturin Date: Wed, 11 Sep 2024 00:17:03 +0300 Subject: [PATCH 29/67] Fix making the build directory kept by `keep-failed` readable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Caused by 1d3696f0fb88d610abc234a60e0d6d424feafdf1 Without this fix the kept build directory is readable only by root ``` $ sudo ls -ld /comp-temp/nix-build-openssh-static-x86_64-unknown-linux-musl-9.8p1.drv-5 drwx------ root root 60 B Wed Sep 11 00:09:48 2024  /comp-temp/nix-build-openssh-static-x86_64-unknown-linux-musl-9.8p1.drv-5/ $ sudo ls -ld /comp-temp/nix-build-openssh-static-x86_64-unknown-linux-musl-9.8p1.drv-5/build drwxr-xr-x nixbld1 nixbld 80 B Wed Sep 11 00:09:58 2024  /comp-temp/nix-build-openssh-static-x86_64-unknown-linux-musl-9.8p1.drv-5/build/ ``` (cherry picked from commit ebebe626ff4ec6da98c0a043c64b35efe1c05bc3) --- src/libstore/unix/build/local-derivation-goal.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/libstore/unix/build/local-derivation-goal.cc b/src/libstore/unix/build/local-derivation-goal.cc index d3482df17..c9a54bb0f 100644 --- a/src/libstore/unix/build/local-derivation-goal.cc +++ b/src/libstore/unix/build/local-derivation-goal.cc @@ -3000,6 +3000,7 @@ void LocalDerivationGoal::deleteTmpDir(bool force) might have privileged stuff (like a copy of netrc). */ if (settings.keepFailed && !force && !drv->isBuiltin()) { printError("note: keeping build directory '%s'", tmpDir); + chmod(topTmpDir.c_str(), 0755); chmod(tmpDir.c_str(), 0755); } else From 97c5ac575277c35c5df09c837c312a5ed8408fa1 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 9 Sep 2024 19:52:21 +0200 Subject: [PATCH 30/67] Git fetcher: Don't update mtime of ref file if fetching by rev This fixes the warning $ nix eval --store /tmp/nix --expr 'builtins.fetchTree { type = "git"; url = "https://github.com/DeterminateSystems/attic"; ref = "fixups-for-magic-nix-cache"; rev = "635753a2069d4b8228e846dc5c09ad361c75cd1a"; }' warning: could not update mtime for file '/home/eelco/.cache/nix/gitv3/09788h9zgba5lbfkaa6ija2dvi004jwsqjf5ln21i2njs07cz766/refs/heads/fixups-for-magic-nix-cache': error: changing modification time of '"/home/eelco/.cache/nix/gitv3/09788h9zgba5lbfkaa6ija2dvi004jwsqjf5ln21i2njs07cz766/refs/heads/fixups-for-magic-nix-cache"': No such file or directory When we're fetching by rev, that file doesn't necessarily exist, and we don't care about it anyway. (cherry picked from commit b80b091bac1eeb6fa64db1ae078de5c6a2e4b1b8) --- src/libfetchers/git.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index 076c757c5..6c5bda470 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -584,9 +584,10 @@ struct GitInputScheme : InputScheme } try { - setWriteTime(localRefFile, now, now); + if (!input.getRev()) + setWriteTime(localRefFile, now, now); } catch (Error & e) { - warn("could not update mtime for file '%s': %s", localRefFile, e.msg()); + warn("could not update mtime for file '%s': %s", localRefFile, e.info().msg); } if (!originalRef && !storeCachedHead(repoInfo.url, ref)) warn("could not update cached head '%s' for '%s'", ref, repoInfo.url); From 751907dc8a2cf1af867fbf4877ec64b68c010ed6 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 4 Sep 2024 14:43:43 +0200 Subject: [PATCH 31/67] Git fetcher: Ignore .gitmodules entries that are not submodules Fixes #10739. (cherry picked from commit 9d24080090539c717015add8f2d8ce02d1d84a2d) --- src/libfetchers/git-utils.cc | 18 ++++++++++++------ tests/functional/fetchGitSubmodules.sh | 21 +++++++++++++++++++++ 2 files changed, 33 insertions(+), 6 deletions(-) diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index 114aa4ec0..0bc930ab2 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -601,12 +601,16 @@ struct GitSourceAccessor : SourceAccessor return readBlob(path, true); } - Hash getSubmoduleRev(const CanonPath & path) + /** + * If `path` exists and is a submodule, return its + * revision. Otherwise return nothing. + */ + std::optional getSubmoduleRev(const CanonPath & path) { - auto entry = need(path); + auto entry = lookup(path); - if (git_tree_entry_type(entry) != GIT_OBJECT_COMMIT) - throw Error("'%s' is not a submodule", showPath(path)); + if (!entry || git_tree_entry_type(entry) != GIT_OBJECT_COMMIT) + return std::nullopt; return toHash(*git_tree_entry_id(entry)); } @@ -1074,8 +1078,10 @@ std::vector> GitRepoImpl::getSubmodules auto rawAccessor = getRawAccessor(rev); for (auto & submodule : parseSubmodules(pathTemp)) { - auto rev = rawAccessor->getSubmoduleRev(submodule.path); - result.push_back({std::move(submodule), rev}); + /* Filter out .gitmodules entries that don't exist or are not + submodules. */ + if (auto rev = rawAccessor->getSubmoduleRev(submodule.path)) + result.push_back({std::move(submodule), *rev}); } return result; diff --git a/tests/functional/fetchGitSubmodules.sh b/tests/functional/fetchGitSubmodules.sh index 4a3e4c347..cd3b51674 100755 --- a/tests/functional/fetchGitSubmodules.sh +++ b/tests/functional/fetchGitSubmodules.sh @@ -104,6 +104,27 @@ noSubmoduleRepo=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$subR [[ $noSubmoduleRepoBaseline == $noSubmoduleRepo ]] +# Test .gitmodules with entries that refer to non-existent objects or objects that are not submodules. +cat >> $rootRepo/.gitmodules < $rootRepo/file +git -C $rootRepo add file +git -C $rootRepo commit -a -m "Add bad submodules" + +rev=$(git -C $rootRepo rev-parse HEAD) + +r=$(nix eval --raw --expr "builtins.fetchGit { url = file://$rootRepo; rev = \"$rev\"; submodules = true; }") + +[[ -f $r/file ]] +[[ ! -e $r/missing ]] + # Test relative submodule URLs. rm $TEST_HOME/.cache/nix/fetcher-cache* rm -rf $rootRepo/.git $rootRepo/.gitmodules $rootRepo/sub From cd97688bce63dcc6605486a5a2cc41a5d11b3552 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Mon, 26 Aug 2024 21:14:20 +0200 Subject: [PATCH 32/67] builtins.readDir: fix nix error trace on filesystem errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before: nix-env % ./src/nix/nix eval --impure --expr 'let f = builtins.readDir "/nix/store/hs3yxdq9knimwdm51gvbs4dvncz46f9d-hello-2.12.1/foo"; in f' --show-trace error: filesystem error: directory iterator cannot open directory: No such file or directory [/nix/store/hs3yxdq9knimwdm51gvbs4dvncz46f9d-hello-2.12.1/foo] After: error: … while calling the 'readDir' builtin at «string»:1:9: 1| let f = builtins.readDir "/nix/store/hs3yxdq9knimwdm51gvbs4dvncz46f9d-hello-2.12.1/foo"; in f | ^ error: reading directory '/nix/store/hs3yxdq9knimwdm51gvbs4dvncz46f9d-hello-2.12.1/foo': No such file or directory (cherry picked from commit 22ba4dc78d956020e06e0618f020e11700749823) --- src/libutil/posix-source-accessor.cc | 40 +++++++++++++++------------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/src/libutil/posix-source-accessor.cc b/src/libutil/posix-source-accessor.cc index 2b1a485d5..d09ea4a87 100644 --- a/src/libutil/posix-source-accessor.cc +++ b/src/libutil/posix-source-accessor.cc @@ -132,23 +132,24 @@ SourceAccessor::DirEntries PosixSourceAccessor::readDirectory(const CanonPath & { assertNoSymlinks(path); DirEntries res; - for (auto & entry : std::filesystem::directory_iterator{makeAbsPath(path)}) { - checkInterrupt(); - auto type = [&]() -> std::optional { - std::filesystem::file_type nativeType; - try { - nativeType = entry.symlink_status().type(); - } catch (std::filesystem::filesystem_error & e) { - // We cannot always stat the child. (Ideally there is no - // stat because the native directory entry has the type - // already, but this isn't always the case.) - if (e.code() == std::errc::permission_denied || e.code() == std::errc::operation_not_permitted) - return std::nullopt; - else throw; - } + try { + for (auto & entry : std::filesystem::directory_iterator{makeAbsPath(path)}) { + checkInterrupt(); + auto type = [&]() -> std::optional { + std::filesystem::file_type nativeType; + try { + nativeType = entry.symlink_status().type(); + } catch (std::filesystem::filesystem_error & e) { + // We cannot always stat the child. (Ideally there is no + // stat because the native directory entry has the type + // already, but this isn't always the case.) + if (e.code() == std::errc::permission_denied || e.code() == std::errc::operation_not_permitted) + return std::nullopt; + else throw; + } - // cannot exhaustively enumerate because implementation-specific - // additional file types are allowed. + // cannot exhaustively enumerate because implementation-specific + // additional file types are allowed. #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wswitch-enum" switch (nativeType) { @@ -158,8 +159,11 @@ SourceAccessor::DirEntries PosixSourceAccessor::readDirectory(const CanonPath & default: return tMisc; } #pragma GCC diagnostic pop - }(); - res.emplace(entry.path().filename().string(), type); + }(); + res.emplace(entry.path().filename().string(), type); + } + } catch (std::filesystem::filesystem_error & e) { + throw SysError("reading directory %1%", showPath(path)); } return res; } From c84fc0120f57b117c5cd24dcaa82033a32ce8761 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Thu, 5 Sep 2024 12:59:54 +0200 Subject: [PATCH 33/67] builtins.unpackChannel: wrap filesystem errors and sanitize channelName Otherwise these errors are not caught correctly (cherry picked from commit 70c52d72f4ee93b68b57b12cd7892bba03446067) --- src/libstore/builtins/unpack-channel.cc | 28 +++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/src/libstore/builtins/unpack-channel.cc b/src/libstore/builtins/unpack-channel.cc index a5f2b8e3a..7f9a520ee 100644 --- a/src/libstore/builtins/unpack-channel.cc +++ b/src/libstore/builtins/unpack-channel.cc @@ -13,21 +13,37 @@ void builtinUnpackChannel( return i->second; }; - auto out = outputs.at("out"); - auto channelName = getAttr("channelName"); + std::filesystem::path out(outputs.at("out")); + std::filesystem::path channelName(getAttr("channelName")); auto src = getAttr("src"); + if (channelName.filename() != channelName) { + throw Error("channelName is not allowed to contain filesystem seperators, got %1%", channelName); + } + createDirs(out); unpackTarfile(src, out); - auto entries = std::filesystem::directory_iterator{out}; - auto fileName = entries->path().string(); - auto fileCount = std::distance(std::filesystem::begin(entries), std::filesystem::end(entries)); + size_t fileCount; + std::string fileName; + try { + auto entries = std::filesystem::directory_iterator{out}; + fileName = entries->path().string(); + fileCount = std::distance(std::filesystem::begin(entries), std::filesystem::end(entries)); + } catch (std::filesystem::filesystem_error &e) { + throw SysError("failed to read directory %1%", out); + } + if (fileCount != 1) throw Error("channel tarball '%s' contains more than one file", src); - std::filesystem::rename(fileName, (out + "/" + channelName)); + std::filesystem::path target(out / channelName); + try { + std::filesystem::rename(fileName, target); + } catch (std::filesystem::filesystem_error &e) { + throw SysError("failed to rename %1% to %2%", fileName, target); + } } } From 60001b193672074ff205a53940214a8e6abb8b91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Thu, 5 Sep 2024 14:08:20 +0200 Subject: [PATCH 34/67] add release notes for filesystem fixes Update doc/manual/rl-next/filesystem-errors.md Co-authored-by: John Ericson (cherry picked from commit 04ce0e648aeac282b114cf426cea8a078c97e0a8) --- doc/manual/rl-next/filesystem-errors.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 doc/manual/rl-next/filesystem-errors.md diff --git a/doc/manual/rl-next/filesystem-errors.md b/doc/manual/rl-next/filesystem-errors.md new file mode 100644 index 000000000..2d5b26228 --- /dev/null +++ b/doc/manual/rl-next/filesystem-errors.md @@ -0,0 +1,14 @@ +--- +synopsis: wrap filesystem exceptions more correctly +issues: [] +prs: [11378] +--- + + +With the switch to `std::filesystem` in different places, Nix started to throw `std::filesystem::filesystem_error` in many places instead of its own exceptions. + +This lead to no longer generating error traces, for example when listing a non-existing directory, and can also lead to crashes inside the Nix REPL. + +This version catches these types of exception correctly and wrap them into Nix's own exeception type. + +Author: [**@Mic92**](https://github.com/Mic92) From 4354d903845ec2329a764d615130decc942f8a19 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 11 Sep 2024 11:59:11 -0400 Subject: [PATCH 35/67] tweak unpack channel built-in, std::filesystem::path for tarball (cherry picked from commit 193dc490971b0435c7de7565b86110a59d515ff2) --- src/libstore/builtins/unpack-channel.cc | 36 ++++++++++++++----------- src/libutil/tarfile.cc | 22 ++++++++------- src/libutil/tarfile.hh | 6 ++--- 3 files changed, 37 insertions(+), 27 deletions(-) diff --git a/src/libstore/builtins/unpack-channel.cc b/src/libstore/builtins/unpack-channel.cc index 7f9a520ee..d30626a30 100644 --- a/src/libstore/builtins/unpack-channel.cc +++ b/src/libstore/builtins/unpack-channel.cc @@ -3,46 +3,52 @@ namespace nix { +namespace fs { using namespace std::filesystem; } + void builtinUnpackChannel( const BasicDerivation & drv, const std::map & outputs) { - auto getAttr = [&](const std::string & name) { + auto getAttr = [&](const std::string & name) -> const std::string & { auto i = drv.env.find(name); if (i == drv.env.end()) throw Error("attribute '%s' missing", name); return i->second; }; - std::filesystem::path out(outputs.at("out")); - std::filesystem::path channelName(getAttr("channelName")); - auto src = getAttr("src"); + fs::path out{outputs.at("out")}; + auto & channelName = getAttr("channelName"); + auto & src = getAttr("src"); - if (channelName.filename() != channelName) { + if (fs::path{channelName}.filename().string() != channelName) { throw Error("channelName is not allowed to contain filesystem seperators, got %1%", channelName); } - createDirs(out); + try { + fs::create_directories(out); + } catch (fs::filesystem_error &) { + throw SysError("creating directory '%1%'", out.string()); + } unpackTarfile(src, out); size_t fileCount; std::string fileName; try { - auto entries = std::filesystem::directory_iterator{out}; + auto entries = fs::directory_iterator{out}; fileName = entries->path().string(); - fileCount = std::distance(std::filesystem::begin(entries), std::filesystem::end(entries)); - } catch (std::filesystem::filesystem_error &e) { - throw SysError("failed to read directory %1%", out); + fileCount = std::distance(fs::begin(entries), fs::end(entries)); + } catch (fs::filesystem_error &) { + throw SysError("failed to read directory %1%", out.string()); } - if (fileCount != 1) throw Error("channel tarball '%s' contains more than one file", src); - std::filesystem::path target(out / channelName); + + auto target = out / channelName; try { - std::filesystem::rename(fileName, target); - } catch (std::filesystem::filesystem_error &e) { - throw SysError("failed to rename %1% to %2%", fileName, target); + fs::rename(fileName, target); + } catch (fs::filesystem_error &) { + throw SysError("failed to rename %1% to %2%", fileName, target.string()); } } diff --git a/src/libutil/tarfile.cc b/src/libutil/tarfile.cc index 2e3236295..a8a22d283 100644 --- a/src/libutil/tarfile.cc +++ b/src/libutil/tarfile.cc @@ -8,6 +8,10 @@ namespace nix { +namespace fs { +using namespace std::filesystem; +} + namespace { int callback_open(struct archive *, void * self) @@ -102,14 +106,14 @@ TarArchive::TarArchive(Source & source, bool raw, std::optional com "Failed to open archive (%s)"); } -TarArchive::TarArchive(const Path & path) +TarArchive::TarArchive(const fs::path & path) : archive{archive_read_new()} , buffer(defaultBufferSize) { archive_read_support_filter_all(archive); enableSupportedFormats(archive); archive_read_set_option(archive, NULL, "mac-ext", NULL); - check(archive_read_open_filename(archive, path.c_str(), 16384), "failed to open archive: %s"); + check(archive_read_open_filename(archive, path.string().c_str(), 16384), "failed to open archive: %s"); } void TarArchive::close() @@ -123,7 +127,7 @@ TarArchive::~TarArchive() archive_read_free(this->archive); } -static void extract_archive(TarArchive & archive, const Path & destDir) +static void extract_archive(TarArchive & archive, const fs::path & destDir) { int flags = ARCHIVE_EXTRACT_TIME | ARCHIVE_EXTRACT_SECURE_SYMLINKS | ARCHIVE_EXTRACT_SECURE_NODOTDOT; @@ -140,7 +144,7 @@ static void extract_archive(TarArchive & archive, const Path & destDir) else archive.check(r); - archive_entry_copy_pathname(entry, (destDir + "/" + name).c_str()); + archive_entry_copy_pathname(entry, (destDir / name).string().c_str()); // sources can and do contain dirs with no rx bits if (archive_entry_filetype(entry) == AE_IFDIR && (archive_entry_mode(entry) & 0500) != 0500) @@ -149,7 +153,7 @@ static void extract_archive(TarArchive & archive, const Path & destDir) // Patch hardlink path const char * original_hardlink = archive_entry_hardlink(entry); if (original_hardlink) { - archive_entry_copy_hardlink(entry, (destDir + "/" + original_hardlink).c_str()); + archive_entry_copy_hardlink(entry, (destDir / original_hardlink).string().c_str()); } archive.check(archive_read_extract(archive.archive, entry, flags)); @@ -158,19 +162,19 @@ static void extract_archive(TarArchive & archive, const Path & destDir) archive.close(); } -void unpackTarfile(Source & source, const Path & destDir) +void unpackTarfile(Source & source, const fs::path & destDir) { auto archive = TarArchive(source); - createDirs(destDir); + fs::create_directories(destDir); extract_archive(archive, destDir); } -void unpackTarfile(const Path & tarFile, const Path & destDir) +void unpackTarfile(const fs::path & tarFile, const fs::path & destDir) { auto archive = TarArchive(tarFile); - createDirs(destDir); + fs::create_directories(destDir); extract_archive(archive, destDir); } diff --git a/src/libutil/tarfile.hh b/src/libutil/tarfile.hh index 0517177db..5e29c6bba 100644 --- a/src/libutil/tarfile.hh +++ b/src/libutil/tarfile.hh @@ -15,7 +15,7 @@ struct TarArchive void check(int err, const std::string & reason = "failed to extract archive (%s)"); - explicit TarArchive(const Path & path); + explicit TarArchive(const std::filesystem::path & path); /// @brief Create a generic archive from source. /// @param source - Input byte stream. @@ -37,9 +37,9 @@ struct TarArchive int getArchiveFilterCodeByName(const std::string & method); -void unpackTarfile(Source & source, const Path & destDir); +void unpackTarfile(Source & source, const std::filesystem::path & destDir); -void unpackTarfile(const Path & tarFile, const Path & destDir); +void unpackTarfile(const std::filesystem::path & tarFile, const std::filesystem::path & destDir); time_t unpackTarfileToSink(TarArchive & archive, ExtendedFileSystemObjectSink & parseSink); From 684a690480784c21ad5580735c41af13fff04b6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Fri, 13 Sep 2024 14:20:34 +0200 Subject: [PATCH 36/67] update filesystem-errors changelog to 2.24 release --- doc/manual/rl-next/filesystem-errors.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/manual/rl-next/filesystem-errors.md b/doc/manual/rl-next/filesystem-errors.md index 2d5b26228..faa9352b9 100644 --- a/doc/manual/rl-next/filesystem-errors.md +++ b/doc/manual/rl-next/filesystem-errors.md @@ -7,7 +7,7 @@ prs: [11378] With the switch to `std::filesystem` in different places, Nix started to throw `std::filesystem::filesystem_error` in many places instead of its own exceptions. -This lead to no longer generating error traces, for example when listing a non-existing directory, and can also lead to crashes inside the Nix REPL. +This lead to no longer generating error traces, for example when listing a non-existing directory. This version catches these types of exception correctly and wrap them into Nix's own exeception type. From 1b076b4f84a74a47d4f4eeb14c7d1e485a754c87 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 16:03:05 +0200 Subject: [PATCH 37/67] doc: add admonitions for macOS 15 Sequoia update (#11487) (#11509) The impending release of macOS 15 Sequoia will break many existing nix installs on macOS, which may lead to an increased number of people who are looking to try to reinstall Nix without noticing the open/pinned issue (#10892) that explains the problem and outlines how to migrate existing installs. These admonitions are a short-term measure until we are over the hump and support volumes dwindle. (cherry picked from commit 48477d4a3e7130c89b2ded4496c00ef74601091f) Co-authored-by: Travis A. Everett --- doc/manual/src/installation/index.md | 8 ++++++++ doc/manual/src/installation/installing-binary.md | 8 ++++++++ doc/manual/src/installation/uninstall.md | 8 ++++++++ 3 files changed, 24 insertions(+) diff --git a/doc/manual/src/installation/index.md b/doc/manual/src/installation/index.md index dafdeb667..16a7f485a 100644 --- a/doc/manual/src/installation/index.md +++ b/doc/manual/src/installation/index.md @@ -14,6 +14,14 @@ This option requires either: * Linux running systemd, with SELinux disabled * MacOS +> **Updating to macOS 15 Sequoia** +> +> If you recently updated to macOS 15 Sequoia and are getting +> ```console +> error: the user '_nixbld1' in the group 'nixbld' does not exist +> ``` +> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. + ```console $ bash <(curl -L https://nixos.org/nix/install) --daemon ``` diff --git a/doc/manual/src/installation/installing-binary.md b/doc/manual/src/installation/installing-binary.md index 6a168ff3d..6a1a5ddca 100644 --- a/doc/manual/src/installation/installing-binary.md +++ b/doc/manual/src/installation/installing-binary.md @@ -1,5 +1,13 @@ # Installing a Binary Distribution +> **Updating to macOS 15 Sequoia** +> +> If you recently updated to macOS 15 Sequoia and are getting +> ```console +> error: the user '_nixbld1' in the group 'nixbld' does not exist +> ``` +> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. + To install the latest version Nix, run the following command: ```console diff --git a/doc/manual/src/installation/uninstall.md b/doc/manual/src/installation/uninstall.md index 590327fea..97590e3db 100644 --- a/doc/manual/src/installation/uninstall.md +++ b/doc/manual/src/installation/uninstall.md @@ -43,6 +43,14 @@ which you may remove. ### macOS +> **Updating to macOS 15 Sequoia** +> +> If you recently updated to macOS 15 Sequoia and are getting +> ```console +> error: the user '_nixbld1' in the group 'nixbld' does not exist +> ``` +> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. + 1. If system-wide shell initialisation files haven't been altered since installing Nix, use the backups made by the installer: ```console From 9941f620c442f0996d7889d948b781304e5fb0f2 Mon Sep 17 00:00:00 2001 From: Brian McGee Date: Mon, 31 Jul 2023 18:40:45 +0100 Subject: [PATCH 38/67] base64Decode: clearer error message when an invalid character is detected Output the offending string in its entirety to provide context. Closes #8479 (cherry picked from commit dc3ccf02bfd4d359228b54f5c24ae2b6caf6428e) --- src/libutil/util.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 698e181a1..174e7ce8f 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -260,8 +260,9 @@ std::string base64Decode(std::string_view s) if (c == '\n') continue; char digit = base64DecodeChars[(unsigned char) c]; - if (digit == npos) - throw Error("invalid character in Base64 string: '%c'", c); + if (digit == npos) { + throw Error("invalid character in Base64 string: '%c' in '%s'", c, s.data()); + } bits += 6; d = d << 6 | digit; From 5b5e1920eb519304833aebf9e061c66a262880cd Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 19 Sep 2024 19:16:31 +0200 Subject: [PATCH 39/67] Fix missing GC root in zipAttrsWith My SNAFU was that I assumed that all the `Value *`s we put in `attrsSeen` are already reachable (which they are), but I forgot about the `elems` pointer in `ListBuilder`. Fixes #11547. (cherry picked from commit 0c2fdd2f3c0f04bef4b5c74fbb02a5f8227c07df) --- src/libexpr/primops.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 7ceb84f0e..50552f6de 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -3136,7 +3136,7 @@ static void prim_zipAttrsWith(EvalState & state, const PosIdx pos, Value * * arg std::optional list; }; - std::map attrsSeen; + std::map, traceable_allocator>> attrsSeen; state.forceFunction(*args[0], pos, "while evaluating the first argument passed to builtins.zipAttrsWith"); state.forceList(*args[1], pos, "while evaluating the second argument passed to builtins.zipAttrsWith"); From ecd83dc155ac770caa5faccb98f045da8d579e29 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 19 Sep 2024 19:52:47 +0200 Subject: [PATCH 40/67] Use HAVE_BOEHMGC Co-authored-by: Robert Hensing (cherry picked from commit 4449b0da744c32cb9cbb06b661a5f5df4444497a) --- src/libexpr/primops.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 50552f6de..8536eb359 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -3136,7 +3136,11 @@ static void prim_zipAttrsWith(EvalState & state, const PosIdx pos, Value * * arg std::optional list; }; +#if HAVE_BOEHMGC std::map, traceable_allocator>> attrsSeen; +#else + std::map attrsSeen; +#endif state.forceFunction(*args[0], pos, "while evaluating the first argument passed to builtins.zipAttrsWith"); state.forceList(*args[1], pos, "while evaluating the second argument passed to builtins.zipAttrsWith"); From a7fdef6858dd45b9d7bda7c92324c63faee7f509 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 20 Sep 2024 01:19:15 +0200 Subject: [PATCH 41/67] Bump version --- .version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.version b/.version index 7ed0e12bc..4ee8b9932 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -2.24.7 +2.24.8 From 563dedcf64d960e816fcd216f0944638e6677626 Mon Sep 17 00:00:00 2001 From: Alyssa Ross Date: Sat, 31 Aug 2024 15:59:18 +0200 Subject: [PATCH 42/67] Don't refer to public keys as secret keys in error This constructor is used for public keys as well. (cherry picked from commit 9cc550d65252d3ad822cc12496ef71482c47ff7e) --- src/libutil/signature/local-keys.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libutil/signature/local-keys.cc b/src/libutil/signature/local-keys.cc index 858b036f5..00c4543f2 100644 --- a/src/libutil/signature/local-keys.cc +++ b/src/libutil/signature/local-keys.cc @@ -22,7 +22,7 @@ Key::Key(std::string_view s) key = ss.payload; if (name == "" || key == "") - throw Error("secret key is corrupt"); + throw Error("key is corrupt"); key = base64Decode(key); } From 1e03ea386b75fbdd8bba01203f059694d0e4c139 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 20 Sep 2024 10:41:45 -0400 Subject: [PATCH 43/67] Revert "base64Decode: clearer error message when an invalid character is detected" We have a safer way of doing this. This reverts commit dc3ccf02bfd4d359228b54f5c24ae2b6caf6428e. (cherry picked from commit d0c351bf4392e76d81b282aaaafdf2c2e0a64c69) --- src/libutil/util.cc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 174e7ce8f..698e181a1 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -260,9 +260,8 @@ std::string base64Decode(std::string_view s) if (c == '\n') continue; char digit = base64DecodeChars[(unsigned char) c]; - if (digit == npos) { - throw Error("invalid character in Base64 string: '%c' in '%s'", c, s.data()); - } + if (digit == npos) + throw Error("invalid character in Base64 string: '%c'", c); bits += 6; d = d << 6 | digit; From 082f6bb35d4c3d63afeaead5733e253760d0d344 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 17 Sep 2024 15:25:30 -0400 Subject: [PATCH 44/67] Ensure error messages don't leak private key Since #8766, invalid base64 is rendered in errors, but we don't actually want to show this in the case of an invalid private keys. Co-Authored-By: Eelco Dolstra (cherry picked from commit 2b6b03d8df8811ef85605461c030466af84a8761) --- src/libfetchers/git-utils.cc | 8 +++++++- src/libstore/machines.cc | 5 +++-- src/libstore/ssh.cc | 14 ++++++++++++-- src/libstore/ssh.hh | 3 +++ src/libutil/hash.cc | 7 ++++++- src/libutil/signature/local-keys.cc | 27 ++++++++++++++++++++------- src/libutil/signature/local-keys.hh | 16 ++++++++++------ src/libutil/util.cc | 2 +- src/libutil/util.hh | 6 +++++- tests/unit/libexpr/nix_api_expr.cc | 2 +- 10 files changed, 68 insertions(+), 22 deletions(-) diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index 0bc930ab2..79ff6e7cd 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -460,7 +460,13 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this std::string re = R"(Good "git" signature for \* with .* key SHA256:[)"; for (const fetchers::PublicKey & k : publicKeys){ // Calculate sha256 fingerprint from public key and escape the regex symbol '+' to match the key literally - auto fingerprint = trim(hashString(HashAlgorithm::SHA256, base64Decode(k.key)).to_string(nix::HashFormat::Base64, false), "="); + std::string keyDecoded; + try { + keyDecoded = base64Decode(k.key); + } catch (Error & e) { + e.addTrace({}, "while decoding public key '%s' used for git signature", k.key); + } + auto fingerprint = trim(hashString(HashAlgorithm::SHA256, keyDecoded).to_string(nix::HashFormat::Base64, false), "="); auto escaped_fingerprint = std::regex_replace(fingerprint, std::regex("\\+"), "\\+" ); re += "(" + escaped_fingerprint + ")"; } diff --git a/src/libstore/machines.cc b/src/libstore/machines.cc index 256cf9188..5e038fb28 100644 --- a/src/libstore/machines.cc +++ b/src/libstore/machines.cc @@ -159,8 +159,9 @@ static Machine parseBuilderLine(const std::set & defaultSystems, co const auto & str = tokens[fieldIndex]; try { base64Decode(str); - } catch (const Error & e) { - throw FormatError("bad machine specification: a column #%lu in a row: '%s' is not valid base64 string: %s", fieldIndex, line, e.what()); + } catch (FormatError & e) { + e.addTrace({}, "while parsing machine specification at a column #%lu in a row: '%s'", fieldIndex, line); + throw; } return str; }; diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc index e5d623adf..f9cb61778 100644 --- a/src/libstore/ssh.cc +++ b/src/libstore/ssh.cc @@ -6,6 +6,16 @@ namespace nix { +static std::string parsePublicHostKey(std::string_view host, std::string_view sshPublicHostKey) +{ + try { + return base64Decode(sshPublicHostKey); + } catch (Error & e) { + e.addTrace({}, "while decoding ssh public host key for host '%s'", host); + throw; + } +} + SSHMaster::SSHMaster( std::string_view host, std::string_view keyFile, @@ -14,7 +24,7 @@ SSHMaster::SSHMaster( : host(host) , fakeSSH(host == "localhost") , keyFile(keyFile) - , sshPublicHostKey(sshPublicHostKey) + , sshPublicHostKey(parsePublicHostKey(host, sshPublicHostKey)) , useMaster(useMaster && !fakeSSH) , compress(compress) , logFD(logFD) @@ -38,7 +48,7 @@ void SSHMaster::addCommonSSHOpts(Strings & args) std::filesystem::path fileName = state->tmpDir->path() / "host-key"; auto p = host.rfind("@"); std::string thost = p != std::string::npos ? std::string(host, p + 1) : host; - writeFile(fileName.string(), thost + " " + base64Decode(sshPublicHostKey) + "\n"); + writeFile(fileName.string(), thost + " " + sshPublicHostKey + "\n"); args.insert(args.end(), {"-oUserKnownHostsFile=" + fileName.string()}); } if (compress) diff --git a/src/libstore/ssh.hh b/src/libstore/ssh.hh index 19b30e883..4097134d0 100644 --- a/src/libstore/ssh.hh +++ b/src/libstore/ssh.hh @@ -14,6 +14,9 @@ private: const std::string host; bool fakeSSH; const std::string keyFile; + /** + * Raw bytes, not Base64 encoding. + */ const std::string sshPublicHostKey; const bool useMaster; const bool compress; diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc index ab2a8695d..748176d33 100644 --- a/src/libutil/hash.cc +++ b/src/libutil/hash.cc @@ -245,7 +245,12 @@ Hash::Hash(std::string_view rest, HashAlgorithm algo, bool isSRI) } else if (isSRI || rest.size() == base64Len()) { - auto d = base64Decode(rest); + std::string d; + try { + d = base64Decode(rest); + } catch (Error & e) { + e.addTrace({}, "While decoding hash '%s'", rest); + } if (d.size() != hashSize) throw BadHash("invalid %s hash '%s'", isSRI ? "SRI" : "base-64", rest); assert(hashSize); diff --git a/src/libutil/signature/local-keys.cc b/src/libutil/signature/local-keys.cc index 00c4543f2..70bcb5f33 100644 --- a/src/libutil/signature/local-keys.cc +++ b/src/libutil/signature/local-keys.cc @@ -14,17 +14,25 @@ BorrowedCryptoValue BorrowedCryptoValue::parse(std::string_view s) return {s.substr(0, colon), s.substr(colon + 1)}; } -Key::Key(std::string_view s) +Key::Key(std::string_view s, bool sensitiveValue) { auto ss = BorrowedCryptoValue::parse(s); name = ss.name; key = ss.payload; - if (name == "" || key == "") - throw Error("key is corrupt"); + try { + if (name == "" || key == "") + throw FormatError("key is corrupt"); - key = base64Decode(key); + key = base64Decode(key); + } catch (Error & e) { + std::string extra; + if (!sensitiveValue) + extra = fmt(" with raw value '%s'", key); + e.addTrace({}, "while decoding key named '%s'%s", name, extra); + throw; + } } std::string Key::to_string() const @@ -33,7 +41,7 @@ std::string Key::to_string() const } SecretKey::SecretKey(std::string_view s) - : Key(s) + : Key{s, true} { if (key.size() != crypto_sign_SECRETKEYBYTES) throw Error("secret key is not valid"); @@ -66,7 +74,7 @@ SecretKey SecretKey::generate(std::string_view name) } PublicKey::PublicKey(std::string_view s) - : Key(s) + : Key{s, false} { if (key.size() != crypto_sign_PUBLICKEYBYTES) throw Error("public key is not valid"); @@ -83,7 +91,12 @@ bool PublicKey::verifyDetached(std::string_view data, std::string_view sig) cons bool PublicKey::verifyDetachedAnon(std::string_view data, std::string_view sig) const { - auto sig2 = base64Decode(sig); + std::string sig2; + try { + sig2 = base64Decode(sig); + } catch (Error & e) { + e.addTrace({}, "while decoding signature '%s'", sig); + } if (sig2.size() != crypto_sign_BYTES) throw Error("signature is not valid"); diff --git a/src/libutil/signature/local-keys.hh b/src/libutil/signature/local-keys.hh index 4aafc1239..9977f0dac 100644 --- a/src/libutil/signature/local-keys.hh +++ b/src/libutil/signature/local-keys.hh @@ -31,15 +31,19 @@ struct Key std::string name; std::string key; - /** - * Construct Key from a string in the format - * ‘:’. - */ - Key(std::string_view s); - std::string to_string() const; protected: + + /** + * Construct Key from a string in the format + * ‘:’. + * + * @param sensitiveValue Avoid displaying the raw Base64 in error + * messages to avoid leaking private keys. + */ + Key(std::string_view s, bool sensitiveValue); + Key(std::string_view name, std::string && key) : name(name), key(std::move(key)) { } }; diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 698e181a1..7a79e4249 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -261,7 +261,7 @@ std::string base64Decode(std::string_view s) char digit = base64DecodeChars[(unsigned char) c]; if (digit == npos) - throw Error("invalid character in Base64 string: '%c'", c); + throw FormatError("invalid character in Base64 string: '%c'", c); bits += 6; d = d << 6 | digit; diff --git a/src/libutil/util.hh b/src/libutil/util.hh index 877d15279..9fbc710cc 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -210,9 +210,13 @@ constexpr char treeNull[] = " "; /** - * Base64 encoding/decoding. + * Encode arbitrary bytes as Base64. */ std::string base64Encode(std::string_view s); + +/** + * Decode arbitrary bytes to Base64. + */ std::string base64Decode(std::string_view s); diff --git a/tests/unit/libexpr/nix_api_expr.cc b/tests/unit/libexpr/nix_api_expr.cc index 8b97d6923..b37ac44b3 100644 --- a/tests/unit/libexpr/nix_api_expr.cc +++ b/tests/unit/libexpr/nix_api_expr.cc @@ -8,7 +8,7 @@ #include "tests/nix_api_expr.hh" #include "tests/string_callback.hh" -#include "gmock/gmock.h" +#include #include namespace nixC { From d4824c8ff7567e35760f211a52f7766947e52a9f Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 23 Sep 2024 15:09:44 +0200 Subject: [PATCH 45/67] builtin:fetchurl: Enable TLS verification This is better for privacy and to avoid leaking netrc credentials in a MITM attack, but also the assumption that we check the hash no longer holds in some cases (in particular for impure derivations). Partially reverts https://github.com/NixOS/nix/commit/5db358d4d78aea7204a8f22c5bf2a309267ee038. (cherry picked from commit c04bc17a5a0fdcb725a11ef6541f94730112e7b6) --- src/libstore/builtins/fetchurl.cc | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc index b9dfeba2f..f33060c33 100644 --- a/src/libstore/builtins/fetchurl.cc +++ b/src/libstore/builtins/fetchurl.cc @@ -38,10 +38,7 @@ void builtinFetchurl( auto source = sinkToSource([&](Sink & sink) { - /* No need to do TLS verification, because we check the hash of - the result anyway. */ FileTransferRequest request(url); - request.verifyTLS = false; request.decompress = false; auto decompressor = makeDecompressionSink( From ee6a5faf4b39978adb3095970ac140a91ec896cc Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 24 Sep 2024 16:13:28 +0200 Subject: [PATCH 46/67] Add a test for builtin:fetchurl cert verification (cherry picked from commit f2f47fa725fc87bfb536de171a2ea81f2789c9fb) # Conflicts: # tests/nixos/default.nix --- tests/nixos/default.nix | 11 ++++++ tests/nixos/fetchurl.nix | 78 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+) create mode 100644 tests/nixos/fetchurl.nix diff --git a/tests/nixos/default.nix b/tests/nixos/default.nix index c0c7b42fd..7612ce5f9 100644 --- a/tests/nixos/default.nix +++ b/tests/nixos/default.nix @@ -146,4 +146,15 @@ in functional_root = runNixOSTestFor "x86_64-linux" ./functional/as-root.nix; user-sandboxing = runNixOSTestFor "x86_64-linux" ./user-sandboxing; +<<<<<<< HEAD +======= + + s3-binary-cache-store = runNixOSTestFor "x86_64-linux" ./s3-binary-cache-store.nix; + + fsync = runNixOSTestFor "x86_64-linux" ./fsync.nix; + + cgroups = runNixOSTestFor "x86_64-linux" ./cgroups; + + fetchurl = runNixOSTestFor "x86_64-linux" ./fetchurl.nix; +>>>>>>> f2f47fa72 (Add a test for builtin:fetchurl cert verification) } diff --git a/tests/nixos/fetchurl.nix b/tests/nixos/fetchurl.nix new file mode 100644 index 000000000..476f779bc --- /dev/null +++ b/tests/nixos/fetchurl.nix @@ -0,0 +1,78 @@ +# Test whether builtin:fetchurl properly performs TLS certificate +# checks on HTTPS servers. + +{ lib, config, pkgs, ... }: + +let + + makeTlsCert = name: pkgs.runCommand name { + nativeBuildInputs = with pkgs; [ openssl ]; + } '' + mkdir -p $out + openssl req -x509 \ + -subj '/CN=${name}/' -days 49710 \ + -addext 'subjectAltName = DNS:${name}' \ + -keyout "$out/key.pem" -newkey ed25519 \ + -out "$out/cert.pem" -noenc + ''; + + goodCert = makeTlsCert "good"; + badCert = makeTlsCert "bad"; + +in + +{ + name = "nss-preload"; + + nodes = { + machine = { lib, pkgs, ... }: { + services.nginx = { + enable = true; + + virtualHosts."good" = { + addSSL = true; + sslCertificate = "${goodCert}/cert.pem"; + sslCertificateKey = "${goodCert}/key.pem"; + root = pkgs.runCommand "nginx-root" {} '' + mkdir "$out" + echo 'hello world' > "$out/index.html" + ''; + }; + + virtualHosts."bad" = { + addSSL = true; + sslCertificate = "${badCert}/cert.pem"; + sslCertificateKey = "${badCert}/key.pem"; + root = pkgs.runCommand "nginx-root" {} '' + mkdir "$out" + echo 'foobar' > "$out/index.html" + ''; + }; + }; + + security.pki.certificateFiles = [ "${goodCert}/cert.pem" ]; + + networking.hosts."127.0.0.1" = [ "good" "bad" ]; + + virtualisation.writableStore = true; + + nix.settings.experimental-features = "nix-command"; + }; + }; + + testScript = { nodes, ... }: '' + machine.wait_for_unit("nginx") + machine.wait_for_open_port(443) + + out = machine.succeed("curl https://good/index.html") + assert out == "hello world\n" + + # Fetching from a server with a trusted cert should work. + machine.succeed("nix build --no-substitute --expr 'import { url = \"https://good/index.html\"; hash = \"sha256-qUiQTy8PR5uPgZdpSzAYSw0u0cHNKh7A+4XSmaGSpEc=\"; }'") + + # Fetching from a server with an untrusted cert should fail. + err = machine.fail("nix build --no-substitute --expr 'import { url = \"https://bad/index.html\"; hash = \"sha256-rsBwZF/lPuOzdjBZN2E08FjMM3JHyXit0Xi2zN+wAZ8=\"; }' 2>&1") + print(err) + assert "SSL certificate problem: self-signed certificate" in err + ''; +} From 345a264a39a40e891587553d41db2989a36e2065 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 25 Sep 2024 22:33:50 +0200 Subject: [PATCH 47/67] Add release note (cherry picked from commit 7b39cd631e0d3c3d238015c6f450c59bbc9cbc5b) --- doc/manual/rl-next/verify-tls.md | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 doc/manual/rl-next/verify-tls.md diff --git a/doc/manual/rl-next/verify-tls.md b/doc/manual/rl-next/verify-tls.md new file mode 100644 index 000000000..489941d5b --- /dev/null +++ b/doc/manual/rl-next/verify-tls.md @@ -0,0 +1,8 @@ +--- +synopsis: "`` uses TLS verification" +prs: [11585] +--- + +Previously `` did not do TLS verification. This was because the Nix sandbox in the past did not have access to TLS certificates, and Nix checks the hash of the fetched file anyway. However, this can expose authentication data from `netrc` and URLs to man-in-the-middle attackers. In addition, Nix now in some cases (such as when using impure derivations) does *not* check the hash. Therefore we have now enabled TLS verification. This means that downloads by `` will now fail if you're fetching from a HTTPS server that does not have a valid certificate. + +`` is also known as the builtin derivation builder `builtin:fetchurl`. It's not to be confused with the evaluation-time function `builtins.fetchurl`, which was not affected by this issues. From e87be60055fd17895f3d9713f837d73f85bcf48d Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 26 Sep 2024 00:15:04 +0200 Subject: [PATCH 48/67] Typo (cherry picked from commit ef8987955be337976ae229c44870cf6adc43bba5) --- doc/manual/rl-next/verify-tls.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/manual/rl-next/verify-tls.md b/doc/manual/rl-next/verify-tls.md index 489941d5b..afc689f46 100644 --- a/doc/manual/rl-next/verify-tls.md +++ b/doc/manual/rl-next/verify-tls.md @@ -5,4 +5,4 @@ prs: [11585] Previously `` did not do TLS verification. This was because the Nix sandbox in the past did not have access to TLS certificates, and Nix checks the hash of the fetched file anyway. However, this can expose authentication data from `netrc` and URLs to man-in-the-middle attackers. In addition, Nix now in some cases (such as when using impure derivations) does *not* check the hash. Therefore we have now enabled TLS verification. This means that downloads by `` will now fail if you're fetching from a HTTPS server that does not have a valid certificate. -`` is also known as the builtin derivation builder `builtin:fetchurl`. It's not to be confused with the evaluation-time function `builtins.fetchurl`, which was not affected by this issues. +`` is also known as the builtin derivation builder `builtin:fetchurl`. It's not to be confused with the evaluation-time function `builtins.fetchurl`, which was not affected by this issue. From ba8159801770df18435de8f1cc63b3b523ab65ec Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 26 Sep 2024 00:17:03 +0200 Subject: [PATCH 49/67] Resolve conflict --- tests/nixos/default.nix | 9 --------- 1 file changed, 9 deletions(-) diff --git a/tests/nixos/default.nix b/tests/nixos/default.nix index 7612ce5f9..313dc2f3c 100644 --- a/tests/nixos/default.nix +++ b/tests/nixos/default.nix @@ -146,15 +146,6 @@ in functional_root = runNixOSTestFor "x86_64-linux" ./functional/as-root.nix; user-sandboxing = runNixOSTestFor "x86_64-linux" ./user-sandboxing; -<<<<<<< HEAD -======= - - s3-binary-cache-store = runNixOSTestFor "x86_64-linux" ./s3-binary-cache-store.nix; - - fsync = runNixOSTestFor "x86_64-linux" ./fsync.nix; - - cgroups = runNixOSTestFor "x86_64-linux" ./cgroups; fetchurl = runNixOSTestFor "x86_64-linux" ./fetchurl.nix; ->>>>>>> f2f47fa72 (Add a test for builtin:fetchurl cert verification) } From b23812a59c6854378f042e33f5e006c4d9dc516a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 26 Sep 2024 03:25:40 +0200 Subject: [PATCH 50/67] Bump version --- .version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.version b/.version index 4ee8b9932..358c8e60e 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -2.24.8 +2.24.9 From 34fd00accce3d0f1efe12e89735542a707e6e89d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Tue, 24 Sep 2024 08:02:57 +0200 Subject: [PATCH 51/67] create git caches atomically When working on speeding up the CI, I triggered a race condition in the creation of the tarball cache. This code now instead will ensure that half-initialized repositories are no longer visible to any other nix process. This is the error message that I got before: error: opening Git repository '"/Users/runner/.cache/nix/tarball-cache"': could not find repository at '/Users/runner/.cache/nix/tarball-cache' (cherry picked from commit 12d5b2cfa1e77816abc9c7c6989afaead9723bbc) --- src/libfetchers/git-utils.cc | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index 79ff6e7cd..e45590b80 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -159,6 +159,27 @@ static Object peelToTreeOrBlob(git_object * obj) return peelObject(obj, GIT_OBJECT_TREE); } +static void initRepoAtomically(std::filesystem::path &path, bool bare) { + if (pathExists(path.string())) return; + + Path tmpDir = createTempDir(std::filesystem::path(path).parent_path()); + AutoDelete delTmpDir(tmpDir, true); + Repository tmpRepo; + + if (git_repository_init(Setter(tmpRepo), tmpDir.c_str(), bare)) + throw Error("creating Git repository %s: %s", path, git_error_last()->message); + try { + std::filesystem::rename(tmpDir, path); + } catch (std::filesystem::filesystem_error & e) { + if (e.code() == std::errc::file_exists) // Someone might race us to create the repository. + return; + else + throw SysError("moving temporary git repository from %s to %s", tmpDir, path); + } + // we successfully moved the repository, so the temporary directory no longer exists. + delTmpDir.cancel(); +} + struct GitRepoImpl : GitRepo, std::enable_shared_from_this { /** Location of the repository on disk. */ @@ -170,13 +191,10 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this { initLibGit2(); - if (pathExists(path.string())) { - if (git_repository_open(Setter(repo), path.string().c_str())) - throw Error("opening Git repository '%s': %s", path, git_error_last()->message); - } else { - if (git_repository_init(Setter(repo), path.string().c_str(), bare)) - throw Error("creating Git repository '%s': %s", path, git_error_last()->message); - } + initRepoAtomically(path, bare); + if (git_repository_open(Setter(repo), path.string().c_str())) + throw Error("opening Git repository '%s': %s", path, git_error_last()->message); + } operator git_repository * () From 15a2b49115f2b8fcb6152afd7209e147d7042685 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 27 Sep 2024 00:16:52 +0200 Subject: [PATCH 52/67] HttpBinaryCacheStore::getFile(): Fix uncaught exception This method is marked as `noexcept`, but `enqueueFileTransfer()` can throw `Interrupted` if the user has hit Ctrl-C or if the `ThreadPool` that the thread is a part of is shutting down. (cherry picked from commit 4566854981423ec36c1c7987ea2bcaba619b5d4e) --- src/libstore/http-binary-cache-store.cc | 37 +++++++++++++------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index b15ef4e4c..fc7ac2dea 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -169,28 +169,29 @@ protected: { try { checkEnabled(); + + auto request(makeRequest(path)); + + auto callbackPtr = std::make_shared(std::move(callback)); + + getFileTransfer()->enqueueFileTransfer(request, + {[callbackPtr, this](std::future result) { + try { + (*callbackPtr)(std::move(result.get().data)); + } catch (FileTransferError & e) { + if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden) + return (*callbackPtr)({}); + maybeDisable(); + callbackPtr->rethrow(); + } catch (...) { + callbackPtr->rethrow(); + } + }}); + } catch (...) { callback.rethrow(); return; } - - auto request(makeRequest(path)); - - auto callbackPtr = std::make_shared(std::move(callback)); - - getFileTransfer()->enqueueFileTransfer(request, - {[callbackPtr, this](std::future result) { - try { - (*callbackPtr)(std::move(result.get().data)); - } catch (FileTransferError & e) { - if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden) - return (*callbackPtr)({}); - maybeDisable(); - callbackPtr->rethrow(); - } catch (...) { - callbackPtr->rethrow(); - } - }}); } /** From a1d841bf2c387a805ebdd165f2511aff9f6e63ec Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Sat, 28 Sep 2024 00:05:03 +0200 Subject: [PATCH 53/67] Bump version --- .version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.version b/.version index 358c8e60e..588b4a3cc 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -2.24.9 +2.24.10 From 742eb0f8159c2b22470ec7b6c5c0e9a99c008349 Mon Sep 17 00:00:00 2001 From: Puck Meerburg Date: Sat, 28 Sep 2024 16:54:39 +0200 Subject: [PATCH 54/67] fix passing CA files into builtins:fetchurl sandbox MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch has been manually adapted from https://github.com/lix-project/lix/commit/14dc84ed03f1b7e5a41bb6fdce00916faab32b60 Tested with: $ NIX_SSL_CERT_FILE=$(nix-build '' -A cacert)/etc/ssl/certs/ca-bundle.crt nix-build --store $(mktemp -d) -E 'import { url = https://google.com; }' Finished at 16:57:50 after 1s warning: found empty hash, assuming 'sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=' this derivation will be built: nix-output-monitor error: DerivationReadError /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv: openFile: does not exist (No such file or directory) /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv nix-output-monitor error: DerivationReadError /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv: openFile: does not exist (No such file or directory) nix-output-monitor error: DerivationReadError /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv: openFile: does not exist (No such file or directory) nix-output-monitor error: DerivationReadError /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv: openFile: does not exist (No such file or directory) google.com> building '/nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv' nix-output-monitor error: DerivationReadError /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv: openFile: does not exist (No such file or directory) google.com> error: nix-output-monitor error: DerivationReadError /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv: openFile: does not exist (No such file or directory) google.com> … writing file '/nix/store/0zynn4n8yx59bczy1mgh1lq2rnprvvrc-google.com' nix-output-monitor error: DerivationReadError /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv: openFile: does not exist (No such file or directory) google.com> nix-output-monitor error: DerivationReadError /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv: openFile: does not exist (No such file or directory) google.com> error: unable to download 'https://google.com': Problem with the SSL CA cert (path? access rights?) (77) error setting certificate file: /nix/store/nlgbippbbgn38hynjkp1ghiybcq1dqhx-nss-cacert-3.101.1/etc/ssl/certs/ca-bundle.crt nix-output-monitor error: DerivationReadError /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv: openFile: does not exist (No such file or directory) nix-output-monitor error: DerivationReadError /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv: openFile: does not exist (No such file or directory) error: builder for '/nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv' failed with exit code 1 Now returns: nix-env % NIX_SSL_CERT_FILE=$(nix-build '' -A cacert)/etc/ssl/certs/ca-bundle.crt nix-build --store $(mktemp -d) -E 'import { url = https://google.com; }' Finished at 17:05:48 after 0s warning: found empty hash, assuming 'sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=' this derivation will be built: nix-output-monitor error: DerivationReadError /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv: openFile: does not exist (No such file or directory) /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv nix-output-monitor error: DerivationReadError /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv: openFile: does not exist (No such file or directory) nix-output-monitor error: DerivationReadError /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv: openFile: does not exist (No such file or directory) nix-output-monitor error: DerivationReadError /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv: openFile: does not exist (No such file or directory) google.com> building '/nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv' nix-output-monitor error: DerivationReadError /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv: openFile: does not exist (No such file or directory) nix-output-monitor error: DerivationReadError /nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv: openFile: does not exist (No such file or directory) error: hash mismatch in fixed-output derivation '/nix/store/4qljhy0jj2b0abjzpsbyarpia1bqylwc-google.com.drv': specified: sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= (cherry picked from commit c1ecf0bee973e620c9282bd71ddf1a5710968249) --- src/libstore/builtins.hh | 3 ++- src/libstore/builtins/fetchurl.cc | 6 +++++- .../unix/build/local-derivation-goal.cc | 21 ++++++++++++------- tests/nixos/fetchurl.nix | 6 ++++++ 4 files changed, 27 insertions(+), 9 deletions(-) diff --git a/src/libstore/builtins.hh b/src/libstore/builtins.hh index 93558b49e..091946e01 100644 --- a/src/libstore/builtins.hh +++ b/src/libstore/builtins.hh @@ -9,7 +9,8 @@ namespace nix { void builtinFetchurl( const BasicDerivation & drv, const std::map & outputs, - const std::string & netrcData); + const std::string & netrcData, + const std::string & caFileData); void builtinUnpackChannel( const BasicDerivation & drv, diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc index f33060c33..90e58dfdb 100644 --- a/src/libstore/builtins/fetchurl.cc +++ b/src/libstore/builtins/fetchurl.cc @@ -9,7 +9,8 @@ namespace nix { void builtinFetchurl( const BasicDerivation & drv, const std::map & outputs, - const std::string & netrcData) + const std::string & netrcData, + const std::string & caFileData) { /* Make the host's netrc data available. Too bad curl requires this to be stored in a file. It would be nice if we could just @@ -19,6 +20,9 @@ void builtinFetchurl( writeFile(settings.netrcFile, netrcData, 0600); } + settings.caFile = "ca-certificates.crt"; + writeFile(settings.caFile, caFileData, 0600); + auto out = get(drv.outputs, "out"); if (!out) throw Error("'builtin:fetchurl' requires an 'out' output"); diff --git a/src/libstore/unix/build/local-derivation-goal.cc b/src/libstore/unix/build/local-derivation-goal.cc index c9a54bb0f..54ca69580 100644 --- a/src/libstore/unix/build/local-derivation-goal.cc +++ b/src/libstore/unix/build/local-derivation-goal.cc @@ -1746,13 +1746,20 @@ void LocalDerivationGoal::runChild() bool setUser = true; - /* Make the contents of netrc available to builtin:fetchurl - (which may run under a different uid and/or in a sandbox). */ + /* Make the contents of netrc and the CA certificate bundle + available to builtin:fetchurl (which may run under a + different uid and/or in a sandbox). */ std::string netrcData; - try { - if (drv->isBuiltin() && drv->builder == "builtin:fetchurl") - netrcData = readFile(settings.netrcFile); - } catch (SystemError &) { } + std::string caFileData; + if (drv->isBuiltin() && drv->builder == "builtin:fetchurl") { + try { + netrcData = readFile(settings.netrcFile); + } catch (SystemError &) { } + + try { + caFileData = readFile(settings.caFile); + } catch (SystemError &) { } + } #if __linux__ if (useChroot) { @@ -2191,7 +2198,7 @@ void LocalDerivationGoal::runChild() worker.store.printStorePath(scratchOutputs.at(e.first))); if (drv->builder == "builtin:fetchurl") - builtinFetchurl(*drv, outputs, netrcData); + builtinFetchurl(*drv, outputs, netrcData, caFileData); else if (drv->builder == "builtin:buildenv") builtinBuildenv(*drv, outputs); else if (drv->builder == "builtin:unpack-channel") diff --git a/tests/nixos/fetchurl.nix b/tests/nixos/fetchurl.nix index 476f779bc..f873bf4b5 100644 --- a/tests/nixos/fetchurl.nix +++ b/tests/nixos/fetchurl.nix @@ -67,6 +67,9 @@ in out = machine.succeed("curl https://good/index.html") assert out == "hello world\n" + out = machine.succeed("cat ${badCert}/cert.pem > /tmp/cafile.pem; curl --cacert /tmp/cafile.pem https://bad/index.html") + assert out == "foobar\n" + # Fetching from a server with a trusted cert should work. machine.succeed("nix build --no-substitute --expr 'import { url = \"https://good/index.html\"; hash = \"sha256-qUiQTy8PR5uPgZdpSzAYSw0u0cHNKh7A+4XSmaGSpEc=\"; }'") @@ -74,5 +77,8 @@ in err = machine.fail("nix build --no-substitute --expr 'import { url = \"https://bad/index.html\"; hash = \"sha256-rsBwZF/lPuOzdjBZN2E08FjMM3JHyXit0Xi2zN+wAZ8=\"; }' 2>&1") print(err) assert "SSL certificate problem: self-signed certificate" in err + + # Fetching from a server with a trusted cert should work via environment variable override. + machine.succeed("NIX_SSL_CERT_FILE=/tmp/cafile.pem nix build --no-substitute --expr 'import { url = \"https://bad/index.html\"; hash = \"sha256-rsBwZF/lPuOzdjBZN2E08FjMM3JHyXit0Xi2zN+wAZ8=\"; }'") ''; } From 5f1b132187651dddfc9435c5e0a83737d016c780 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Sat, 28 Sep 2024 17:06:10 +0200 Subject: [PATCH 55/67] tests/nixos/fetchurl: drop unused variables (cherry picked from commit 410853ddcf91910bd4db7421b3df756e25a4fbbd) --- tests/nixos/fetchurl.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/nixos/fetchurl.nix b/tests/nixos/fetchurl.nix index f873bf4b5..243c0cacc 100644 --- a/tests/nixos/fetchurl.nix +++ b/tests/nixos/fetchurl.nix @@ -1,7 +1,7 @@ # Test whether builtin:fetchurl properly performs TLS certificate # checks on HTTPS servers. -{ lib, config, pkgs, ... }: +{ pkgs, ... }: let @@ -25,7 +25,7 @@ in name = "nss-preload"; nodes = { - machine = { lib, pkgs, ... }: { + machine = { pkgs, ... }: { services.nginx = { enable = true; @@ -60,7 +60,7 @@ in }; }; - testScript = { nodes, ... }: '' + testScript = '' machine.wait_for_unit("nginx") machine.wait_for_open_port(443) From d80bf54e3b61b296a8944e2c95088c37661b0deb Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 5 Aug 2024 11:38:38 +0200 Subject: [PATCH 56/67] Add a VM test for S3BinaryCacheStore Fixes #11238. (cherry picked from commit 2950f9e18af1bd57b566b8c0b4df71022edb3b80) --- tests/nixos/default.nix | 2 + tests/nixos/nix-copy-closure.nix | 2 +- tests/nixos/s3-binary-cache-store.nix | 63 +++++++++++++++++++++++++++ 3 files changed, 66 insertions(+), 1 deletion(-) create mode 100644 tests/nixos/s3-binary-cache-store.nix diff --git a/tests/nixos/default.nix b/tests/nixos/default.nix index 313dc2f3c..e79bb59b8 100644 --- a/tests/nixos/default.nix +++ b/tests/nixos/default.nix @@ -148,4 +148,6 @@ in user-sandboxing = runNixOSTestFor "x86_64-linux" ./user-sandboxing; fetchurl = runNixOSTestFor "x86_64-linux" ./fetchurl.nix; + + s3-binary-cache-store = runNixOSTestFor "x86_64-linux" ./s3-binary-cache-store.nix; } diff --git a/tests/nixos/nix-copy-closure.nix b/tests/nixos/nix-copy-closure.nix index 66cbfb033..b9daa0a1f 100644 --- a/tests/nixos/nix-copy-closure.nix +++ b/tests/nixos/nix-copy-closure.nix @@ -1,6 +1,6 @@ # Test ‘nix-copy-closure’. -{ lib, config, nixpkgs, hostPkgs, ... }: +{ lib, config, nixpkgs, ... }: let pkgs = config.nodes.client.nixpkgs.pkgs; diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix new file mode 100644 index 000000000..015457968 --- /dev/null +++ b/tests/nixos/s3-binary-cache-store.nix @@ -0,0 +1,63 @@ +{ lib, config, nixpkgs, ... }: + +let + pkgs = config.nodes.client.nixpkgs.pkgs; + + pkgA = pkgs.cowsay; + + accessKey = "BKIKJAA5BMMU2RHO6IBB"; + secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12"; + env = "AWS_ACCESS_KEY_ID=${accessKey} AWS_SECRET_ACCESS_KEY=${secretKey}"; + + storeUrl = "s3://my-cache?endpoint=http://server:9000®ion=eu-west-1"; + +in { + name = "nix-copy-closure"; + + nodes = + { server = + { config, lib, pkgs, ... }: + { virtualisation.writableStore = true; + virtualisation.additionalPaths = [ pkgA ]; + environment.systemPackages = [ pkgs.minio-client ]; + nix.extraOptions = "experimental-features = nix-command"; + services.minio = { + enable = true; + region = "eu-west-1"; + rootCredentialsFile = pkgs.writeText "minio-credentials-full" '' + MINIO_ROOT_USER=${accessKey} + MINIO_ROOT_PASSWORD=${secretKey} + ''; + }; + networking.firewall.allowedTCPPorts = [ 9000 ]; + }; + + client = + { config, pkgs, ... }: + { virtualisation.writableStore = true; + nix.extraOptions = "experimental-features = nix-command"; + }; + }; + + testScript = { nodes }: '' + # fmt: off + start_all() + + # Create a binary cache. + server.wait_for_unit("minio") + + server.succeed("mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4") + server.succeed("mc mb minio/my-cache") + + server.succeed("${env} nix copy --to '${storeUrl}' ${pkgA}") + + # Copy a package from the binary cache. + client.fail("nix path-info ${pkgA}") + + client.succeed("${env} nix store info --store '${storeUrl}' >&2") + + client.succeed("${env} nix copy --no-check-sigs --from '${storeUrl}' ${pkgA}") + + client.succeed("nix path-info ${pkgA}") + ''; +} From 4912a9e7fdd69b9b66437a94a86eb04789f2fd12 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 11 Oct 2024 14:31:15 +0200 Subject: [PATCH 57/67] builtins.fetchurl: Fix segfault on s3:// URLs Also, add an activity to show that we're downloading an s3:// file. Fixes #11674. (cherry picked from commit 0500fba56a02c3c8458d257b6ea24af1c81c8b9e) --- src/libstore/filetransfer.cc | 5 +++++ tests/nixos/s3-binary-cache-store.nix | 3 +++ 2 files changed, 8 insertions(+) diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 5ea8b6f96..b84210805 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -754,12 +754,17 @@ struct curlFileTransfer : public FileTransfer S3Helper s3Helper(profile, region, scheme, endpoint); + Activity act(*logger, lvlTalkative, actFileTransfer, + fmt("downloading '%s'", request.uri), + {request.uri}, request.parentAct); + // FIXME: implement ETag auto s3Res = s3Helper.getObject(bucketName, key); FileTransferResult res; if (!s3Res.data) throw FileTransferError(NotFound, "S3 object '%s' does not exist", request.uri); res.data = std::move(*s3Res.data); + res.urls.push_back(request.uri); callback(std::move(res)); #else throw nix::Error("cannot download '%s' because Nix is not built with S3 support", request.uri); diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index 015457968..6ae2e3572 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -51,6 +51,9 @@ in { server.succeed("${env} nix copy --to '${storeUrl}' ${pkgA}") + # Test fetchurl on s3:// URLs while we're at it. + client.succeed("${env} nix eval --impure --expr 'builtins.fetchurl { name = \"foo\"; url = \"s3://my-cache/nix-cache-info?endpoint=http://server:9000®ion=eu-west-1\"; }'") + # Copy a package from the binary cache. client.fail("nix path-info ${pkgA}") From 339236d32ef337cdc5fb3e1e964f7ee92d7141f6 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 11 Oct 2024 14:55:22 +0200 Subject: [PATCH 58/67] Make S3 downloads slightly more interruptable (cherry picked from commit d38f62f64d389cb4e9a582d89aa3f8a50fb3c074) --- src/libstore/s3-binary-cache-store.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 21175b1eb..bcbf0b55e 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -9,6 +9,7 @@ #include "globals.hh" #include "compression.hh" #include "filetransfer.hh" +#include "signals.hh" #include #include @@ -117,6 +118,7 @@ class RetryStrategy : public Aws::Client::DefaultRetryStrategy { bool ShouldRetry(const Aws::Client::AWSError& error, long attemptedRetries) const override { + checkInterrupt(); auto retry = Aws::Client::DefaultRetryStrategy::ShouldRetry(error, attemptedRetries); if (retry) printError("AWS error '%s' (%s), will retry in %d ms", From 1294442c6cc6a2ee883f9dd932ad5139f5b35a92 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 14 Oct 2024 13:15:55 +0200 Subject: [PATCH 59/67] Add assert (cherry picked from commit d2f4d076195f048146fa64916283a524f6820380) --- src/libfetchers/tarball.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index dd4f3b780..52ba73f62 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -90,6 +90,7 @@ DownloadFileResult downloadFile( /* Cache metadata for all URLs in the redirect chain. */ for (auto & url : res.urls) { key.second.insert_or_assign("url", url); + assert(!res.urls.empty()); infoAttrs.insert_or_assign("url", *res.urls.rbegin()); getCache()->upsert(key, *store, infoAttrs, *storePath); } From 9da1300617891a5f71e7ec5d8380aaa1e4cf2240 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 14 Oct 2024 13:53:54 +0200 Subject: [PATCH 60/67] Handle tarballs where directory entries are not contiguous I.e. when not all entries underneath a directory X follow eachother, but there is some entry Y that isn't a child of X in between. Fixes #11656. (cherry picked from commit 4012954b596b725dd61d49668691a69d491120c3) --- src/libfetchers/git-utils.cc | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index e45590b80..6efb453ec 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -855,8 +855,24 @@ struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink void pushBuilder(std::string name) { + const git_tree_entry * entry; + Tree prevTree = nullptr; + + if (!pendingDirs.empty() && + (entry = git_treebuilder_get(pendingDirs.back().builder.get(), name.c_str()))) + { + /* Clone a tree that we've already finished. This happens + if a tarball has directory entries that are not + contiguous. */ + if (git_tree_entry_type(entry) != GIT_OBJECT_TREE) + throw Error("parent of '%s' is not a directory", name); + + if (git_tree_entry_to_object((git_object * *) (git_tree * *) Setter(prevTree), *repo, entry)) + throw Error("looking up parent of '%s': %s", name, git_error_last()->message); + } + git_treebuilder * b; - if (git_treebuilder_new(&b, *repo, nullptr)) + if (git_treebuilder_new(&b, *repo, prevTree.get())) throw Error("creating a tree builder: %s", git_error_last()->message); pendingDirs.push_back({ .name = std::move(name), .builder = TreeBuilder(b) }); }; From 57ace600af864f2d06bdf7391de316a26827047a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 14 Oct 2024 14:10:36 +0200 Subject: [PATCH 61/67] Add a test (cherry picked from commit a7b9877da9d1bdafcc9b2f4681ecb3a1b83de7fc) --- tests/functional/tarball.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/functional/tarball.sh b/tests/functional/tarball.sh index 4d8945625..a1e0f9cb0 100755 --- a/tests/functional/tarball.sh +++ b/tests/functional/tarball.sh @@ -100,3 +100,17 @@ chmod +x "$TEST_ROOT/tar_root/foo" tar cvf "$TEST_ROOT/tar.tar" -C "$TEST_ROOT/tar_root" . path="$(nix flake prefetch --refresh --json "tarball+file://$TEST_ROOT/tar.tar" | jq -r .storePath)" [[ $(cat "$path/foo") = bar ]] + +# Test a tarball with non-contiguous directory entries. +rm -rf "$TEST_ROOT/tar_root" +mkdir -p "$TEST_ROOT/tar_root/a/b" +echo foo > "$TEST_ROOT/tar_root/a/b/foo" +echo bla > "$TEST_ROOT/tar_root/bla" +tar cvf "$TEST_ROOT/tar.tar" -C "$TEST_ROOT/tar_root" . +echo abc > "$TEST_ROOT/tar_root/bla" +echo xyzzy > "$TEST_ROOT/tar_root/a/b/xyzzy" +tar rvf "$TEST_ROOT/tar.tar" -C "$TEST_ROOT/tar_root" ./a/b/xyzzy ./bla +path="$(nix flake prefetch --refresh --json "tarball+file://$TEST_ROOT/tar.tar" | jq -r .storePath)" +[[ $(cat "$path/a/b/xyzzy") = xyzzy ]] +[[ $(cat "$path/a/b/foo") = foo ]] +[[ $(cat "$path/bla") = abc ]] From 0e9b04a66ed4ea5f097a6ba0489a01d9f08e891a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Fri, 18 Oct 2024 12:03:33 +0300 Subject: [PATCH 62/67] fix env-vars beeing written to `/tmp` This overall seems like insecure tmp file handling to me. Because other users could replace files in /tmp with a symlink and make the nix-shell override other files. fixes https://github.com/NixOS/nix/issues/11470 (cherry picked from commit 2105574702b582578c43b551cfe8905715211f03) --- src/nix-build/nix-build.cc | 17 +++++------------ tests/functional/nix-shell.sh | 9 +++++++++ 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index a5b9e1e54..5346641eb 100644 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -526,8 +526,6 @@ static void main_nix_build(int argc, char * * argv) // Set the environment. auto env = getEnv(); - auto tmp = getEnvNonEmpty("TMPDIR").value_or("/tmp"); - if (pure) { decltype(env) newEnv; for (auto & i : env) @@ -538,18 +536,16 @@ static void main_nix_build(int argc, char * * argv) env["__ETC_PROFILE_SOURCED"] = "1"; } - env["NIX_BUILD_TOP"] = env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = tmp; + env["NIX_BUILD_TOP"] = env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = tmpDir.path(); env["NIX_STORE"] = store->storeDir; env["NIX_BUILD_CORES"] = std::to_string(settings.buildCores); auto passAsFile = tokenizeString(getOr(drv.env, "passAsFile", "")); - bool keepTmp = false; int fileNr = 0; for (auto & var : drv.env) if (passAsFile.count(var.first)) { - keepTmp = true; auto fn = ".attr-" + std::to_string(fileNr++); Path p = (tmpDir.path() / fn).string(); writeFile(p, var.second); @@ -591,7 +587,6 @@ static void main_nix_build(int argc, char * * argv) env["NIX_ATTRS_SH_FILE"] = attrsSH; env["NIX_ATTRS_JSON_FILE"] = attrsJSON; - keepTmp = true; } } @@ -601,12 +596,10 @@ static void main_nix_build(int argc, char * * argv) lose the current $PATH directories. */ auto rcfile = (tmpDir.path() / "rc").string(); std::string rc = fmt( - R"(_nix_shell_clean_tmpdir() { command rm -rf %1%; }; )"s + - (keepTmp ? - "trap _nix_shell_clean_tmpdir EXIT; " - "exitHooks+=(_nix_shell_clean_tmpdir); " - "failureHooks+=(_nix_shell_clean_tmpdir); ": - "_nix_shell_clean_tmpdir; ") + + (R"(_nix_shell_clean_tmpdir() { command rm -rf %1%; };)"s + "trap _nix_shell_clean_tmpdir EXIT; " + "exitHooks+=(_nix_shell_clean_tmpdir); " + "failureHooks+=(_nix_shell_clean_tmpdir); ") + (pure ? "" : "[ -n \"$PS1\" ] && [ -e ~/.bashrc ] && source ~/.bashrc;") + "%2%" // always clear PATH. diff --git a/tests/functional/nix-shell.sh b/tests/functional/nix-shell.sh index b9625eb66..b14e3dc6a 100755 --- a/tests/functional/nix-shell.sh +++ b/tests/functional/nix-shell.sh @@ -31,6 +31,15 @@ output=$(nix-shell --pure --keep SELECTED_IMPURE_VAR "$shellDotNix" -A shellDrv [ "$output" = " - foo - bar - baz" ] +# test NIX_BUILD_TOP +testTmpDir=$(pwd)/nix-shell +mkdir -p "$testTmpDir" +output=$(TMPDIR="$testTmpDir" nix-shell --pure "$shellDotNix" -A shellDrv --run 'echo $NIX_BUILD_TOP') +[[ "$output" =~ ${testTmpDir}.* ]] || { + echo "expected $output =~ ${testTmpDir}.*" >&2 + exit 1 +} + # Test nix-shell on a .drv [[ $(nix-shell --pure $(nix-instantiate "$shellDotNix" -A shellDrv) --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"') = " - foo - bar - false" ]] From d6ece7e94aa4253f8c32e81707d87f4280587e6d Mon Sep 17 00:00:00 2001 From: Artemis Tosini Date: Thu, 24 Oct 2024 21:24:47 +0000 Subject: [PATCH 63/67] Fix OpenBSD build with Makefiles OpenBSD dynamic libraries never link to libc directly. Instead, they have undefined symbols for all libc functions they use that ld.so resolves to the libc referred to in the main executable. Thus, disallowing undefined symbols will always fail (cherry picked from commit c49bff2434971d693b03525622082a81b5ed75eb) --- mk/libraries.mk | 4 +++- mk/platform.mk | 4 ++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/mk/libraries.mk b/mk/libraries.mk index b99ba2782..a7848ba35 100644 --- a/mk/libraries.mk +++ b/mk/libraries.mk @@ -86,7 +86,9 @@ define build-library else ifndef HOST_DARWIN ifndef HOST_WINDOWS - $(1)_LDFLAGS += -Wl,-z,defs + ifndef HOST_OPENBSD + $(1)_LDFLAGS += -Wl,-z,defs + endif endif endif endif diff --git a/mk/platform.mk b/mk/platform.mk index 22c114a20..3c4fff780 100644 --- a/mk/platform.mk +++ b/mk/platform.mk @@ -21,6 +21,10 @@ ifdef HOST_OS HOST_NETBSD = 1 HOST_UNIX = 1 endif + ifeq ($(patsubst openbsd%,,$(HOST_KERNEL)),) + HOST_OPENBSD = 1 + HOST_UNIX = 1 + endif ifeq ($(HOST_KERNEL), linux) HOST_LINUX = 1 HOST_UNIX = 1 From 0ae90918db12f7cf20f40216460c8eba91004a78 Mon Sep 17 00:00:00 2001 From: Artemis Tosini Date: Sat, 26 Oct 2024 16:46:32 +0000 Subject: [PATCH 64/67] package.nix: Disable GC on OpenBSD Nix fails to build on OpenBSD with a linking error due to a non-found symbol in boehm-gc. Just disable the GC until we can find a proper workaround. (cherry picked from commit fecc1ca2055ee590d8b957830f70512fcecbfe4b) --- package.nix | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/package.nix b/package.nix index a7c8923e8..e1b4aebb0 100644 --- a/package.nix +++ b/package.nix @@ -75,7 +75,9 @@ # # Temporarily disabled on Windows because the `GC_throw_bad_alloc` # symbol is missing during linking. -, enableGC ? !stdenv.hostPlatform.isWindows +# +# Disabled on OpenBSD because of missing `_data_start` symbol while linking +, enableGC ? !stdenv.hostPlatform.isWindows && !stdenv.hostPlatform.isOpenBSD # Whether to enable Markdown rendering in the Nix binary. , enableMarkdown ? !stdenv.hostPlatform.isWindows From 803943fce4c9b4825d1b962d9b338ddf7e30074d Mon Sep 17 00:00:00 2001 From: Artemis Tosini Date: Sat, 26 Oct 2024 17:12:06 +0000 Subject: [PATCH 65/67] Add support for `utimensat` as an alternative to `lutimes` OpenBSD doesn't support `lutimes`, but does support `utimensat` which subsumes it. In fact, all the BSDs, Linux, and newer macOS all support it. So lets make this our first choice for the implementation. In addition, let's get rid of the `lutimes` `ENOSYS` special case. The Linux manpage says > ENOSYS > > The kernel does not support this call; Linux 2.6.22 or later is > required. which I think is the origin of this check, but that's a very old version of Linux at this point. The code can be simplified a lot of we drop support for it here (as we've done elsewhere, anyways). Co-Authored-By: John Ericson (cherry picked from commit d0232028111ce4f5a066d9a302fec142ebe91037) --- configure.ac | 7 ++-- src/libutil/file-system.cc | 70 +++++++++++++++++++------------------- src/libutil/meson.build | 4 +++ 3 files changed, 43 insertions(+), 38 deletions(-) diff --git a/configure.ac b/configure.ac index 5c22ed176..dd33dbe11 100644 --- a/configure.ac +++ b/configure.ac @@ -89,9 +89,10 @@ AC_LANG_POP(C++) AC_CHECK_FUNCS([statvfs pipe2]) -# Check for lutimes, optionally used for changing the mtime of -# symlinks. -AC_CHECK_FUNCS([lutimes]) +# Check for lutimes and utimensat, optionally used for changing the +# mtime of symlinks. +AC_CHECK_DECLS([AT_SYMLINK_NOFOLLOW], [], [], [[#include ]]) +AC_CHECK_FUNCS([lutimes utimensat]) # Check whether the store optimiser can optimise symlinks. diff --git a/src/libutil/file-system.cc b/src/libutil/file-system.cc index 060a806fb..04e4369fa 100644 --- a/src/libutil/file-system.cc +++ b/src/libutil/file-system.cc @@ -574,7 +574,28 @@ void setWriteTime( time_t modificationTime, std::optional optIsSymlink) { -#ifndef _WIN32 +#ifdef _WIN32 + // FIXME use `fs::last_write_time`. + // + // Would be nice to use std::filesystem unconditionally, but + // doesn't support access time just modification time. + // + // System clock vs File clock issues also make that annoying. + warn("Changing file times is not yet implemented on Windows, path is '%s'", path); +#elif HAVE_UTIMENSAT && HAVE_DECL_AT_SYMLINK_NOFOLLOW + struct timespec times[2] = { + { + .tv_sec = accessedTime, + .tv_nsec = 0, + }, + { + .tv_sec = modificationTime, + .tv_nsec = 0, + }, + }; + if (utimensat(AT_FDCWD, path.c_str(), times, AT_SYMLINK_NOFOLLOW) == -1) + throw SysError("changing modification time of '%s' (using `utimensat`)", path); +#else struct timeval times[2] = { { .tv_sec = accessedTime, @@ -585,42 +606,21 @@ void setWriteTime( .tv_usec = 0, }, }; -#endif - - auto nonSymlink = [&]{ - bool isSymlink = optIsSymlink - ? *optIsSymlink - : fs::is_symlink(path); - - if (!isSymlink) { -#ifdef _WIN32 - // FIXME use `fs::last_write_time`. - // - // Would be nice to use std::filesystem unconditionally, but - // doesn't support access time just modification time. - // - // System clock vs File clock issues also make that annoying. - warn("Changing file times is not yet implemented on Windows, path is '%s'", path); -#else - if (utimes(path.c_str(), times) == -1) { - - throw SysError("changing modification time of '%s' (not a symlink)", path); - } -#endif - } else { - throw Error("Cannot modification time of symlink '%s'", path); - } - }; - #if HAVE_LUTIMES - if (lutimes(path.c_str(), times) == -1) { - if (errno == ENOSYS) - nonSymlink(); - else - throw SysError("changing modification time of '%s'", path); - } + if (lutimes(path.c_str(), times) == -1) + throw SysError("changing modification time of '%s'", path); #else - nonSymlink(); + bool isSymlink = optIsSymlink + ? *optIsSymlink + : fs::is_symlink(path); + + if (!isSymlink) { + if (utimes(path.c_str(), times) == -1) + throw SysError("changing modification time of '%s' (not a symlink)", path); + } else { + throw Error("Cannot modification time of symlink '%s'", path); + } +#endif #endif } diff --git a/src/libutil/meson.build b/src/libutil/meson.build index 8552c4c9d..cba5a5288 100644 --- a/src/libutil/meson.build +++ b/src/libutil/meson.build @@ -41,6 +41,8 @@ check_funcs = [ # Optionally used to try to close more file descriptors (e.g. before # forking) on Unix. 'sysconf', + # Optionally used for changing the mtime of files and symlinks. + 'utimensat', ] foreach funcspec : check_funcs define_name = 'HAVE_' + funcspec.underscorify().to_upper() @@ -48,6 +50,8 @@ foreach funcspec : check_funcs configdata.set(define_name, define_value) endforeach +configdata.set('HAVE_DECL_AT_SYMLINK_NOFOLLOW', cxx.has_header_symbol('fcntl.h', 'AT_SYMLINK_NOFOLLOW').to_int()) + subdir('build-utils-meson/threads') if host_machine.system() == 'windows' From d9284d246ced1ccdcd358ecdc9c6c9153ecaa264 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 11 Jul 2024 16:21:27 +0200 Subject: [PATCH 66/67] Remove unused InstallableFlake::getFlakeOutputs() (cherry picked from commit 87323a5689f4789d9fc25271a16ba57c57f76392) --- src/libcmd/installable-flake.cc | 14 -------------- src/libcmd/installable-flake.hh | 2 -- 2 files changed, 16 deletions(-) diff --git a/src/libcmd/installable-flake.cc b/src/libcmd/installable-flake.cc index 852a5618e..8796ad5ba 100644 --- a/src/libcmd/installable-flake.cc +++ b/src/libcmd/installable-flake.cc @@ -43,20 +43,6 @@ std::vector InstallableFlake::getActualAttrPaths() return res; } -Value * InstallableFlake::getFlakeOutputs(EvalState & state, const flake::LockedFlake & lockedFlake) -{ - auto vFlake = state.allocValue(); - - callFlake(state, lockedFlake, *vFlake); - - auto aOutputs = vFlake->attrs()->get(state.symbols.create("outputs")); - assert(aOutputs); - - state.forceValue(*aOutputs->value, aOutputs->value->determinePos(noPos)); - - return aOutputs->value; -} - static std::string showAttrPaths(const std::vector & paths) { std::string s; diff --git a/src/libcmd/installable-flake.hh b/src/libcmd/installable-flake.hh index b0d6f5afc..8e0a232ef 100644 --- a/src/libcmd/installable-flake.hh +++ b/src/libcmd/installable-flake.hh @@ -53,8 +53,6 @@ struct InstallableFlake : InstallableValue std::vector getActualAttrPaths(); - Value * getFlakeOutputs(EvalState & state, const flake::LockedFlake & lockedFlake); - DerivedPathsWithInfo toDerivedPaths() override; std::pair toValue(EvalState & state) override; From f9180f12c4ca28e224db7f7efbc9600b2e25da8a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 30 Oct 2024 15:30:29 +0100 Subject: [PATCH 67/67] release notes: 2.24.9 --- doc/manual/rl-next/filesystem-errors.md | 14 -------------- doc/manual/rl-next/verify-tls.md | 8 -------- doc/manual/src/release-notes/rl-2.24.md | 15 +++++++++++++++ 3 files changed, 15 insertions(+), 22 deletions(-) delete mode 100644 doc/manual/rl-next/filesystem-errors.md delete mode 100644 doc/manual/rl-next/verify-tls.md diff --git a/doc/manual/rl-next/filesystem-errors.md b/doc/manual/rl-next/filesystem-errors.md deleted file mode 100644 index faa9352b9..000000000 --- a/doc/manual/rl-next/filesystem-errors.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -synopsis: wrap filesystem exceptions more correctly -issues: [] -prs: [11378] ---- - - -With the switch to `std::filesystem` in different places, Nix started to throw `std::filesystem::filesystem_error` in many places instead of its own exceptions. - -This lead to no longer generating error traces, for example when listing a non-existing directory. - -This version catches these types of exception correctly and wrap them into Nix's own exeception type. - -Author: [**@Mic92**](https://github.com/Mic92) diff --git a/doc/manual/rl-next/verify-tls.md b/doc/manual/rl-next/verify-tls.md deleted file mode 100644 index afc689f46..000000000 --- a/doc/manual/rl-next/verify-tls.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -synopsis: "`` uses TLS verification" -prs: [11585] ---- - -Previously `` did not do TLS verification. This was because the Nix sandbox in the past did not have access to TLS certificates, and Nix checks the hash of the fetched file anyway. However, this can expose authentication data from `netrc` and URLs to man-in-the-middle attackers. In addition, Nix now in some cases (such as when using impure derivations) does *not* check the hash. Therefore we have now enabled TLS verification. This means that downloads by `` will now fail if you're fetching from a HTTPS server that does not have a valid certificate. - -`` is also known as the builtin derivation builder `builtin:fetchurl`. It's not to be confused with the evaluation-time function `builtins.fetchurl`, which was not affected by this issue. diff --git a/doc/manual/src/release-notes/rl-2.24.md b/doc/manual/src/release-notes/rl-2.24.md index 5bcc1d79c..38358d728 100644 --- a/doc/manual/src/release-notes/rl-2.24.md +++ b/doc/manual/src/release-notes/rl-2.24.md @@ -274,6 +274,21 @@ be configured using the `warn-large-path-threshold` setting, e.g. `--warn-large-path-threshold 100M`. +- Wrap filesystem exceptions more correctly [#11378](https://github.com/NixOS/nix/pull/11378) + + With the switch to `std::filesystem` in different places, Nix started to throw `std::filesystem::filesystem_error` in many places instead of its own exceptions. + + This led to no longer generating error traces, for example when listing a non-existing directory. + + This version catches these types of exception correctly and wraps them into Nix's own exeception type. + + Author: [**@Mic92**](https://github.com/Mic92) + +- `` uses TLS verification [#11585](https://github.com/NixOS/nix/pull/11585) + + Previously `` did not do TLS verification. This was because the Nix sandbox in the past did not have access to TLS certificates, and Nix checks the hash of the fetched file anyway. However, this can expose authentication data from `netrc` and URLs to man-in-the-middle attackers. In addition, Nix now in some cases (such as when using impure derivations) does *not* check the hash. Therefore we have now enabled TLS verification. This means that downloads by `` will now fail if you're fetching from a HTTPS server that does not have a valid certificate. + + `` is also known as the builtin derivation builder `builtin:fetchurl`. It's not to be confused with the evaluation-time function `builtins.fetchurl`, which was not affected by this issue. # Contributors