From db680e0e5751828e788bff4becd89886bde71480 Mon Sep 17 00:00:00 2001 From: Valentin Gagarin Date: Thu, 8 Jun 2023 02:00:05 +0200 Subject: [PATCH 01/16] refine wording on the purpose of the Nix language packages and configurations are not really a concept in Nix or the Nix language. the idea of transforming files into other files clearly captures what it's all about, and the new phrasing should make the term "derivation" more obvious both in terms of meaning and origin. --- doc/manual/src/language/index.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/doc/manual/src/language/index.md b/doc/manual/src/language/index.md index 3eabe1a02..69ebc70c0 100644 --- a/doc/manual/src/language/index.md +++ b/doc/manual/src/language/index.md @@ -4,9 +4,7 @@ The Nix language is - *domain-specific* - It only exists for the Nix package manager: - to describe packages and configurations as well as their variants and compositions. - It is not intended for general purpose use. + Its purpose is to conveniently create and compose precise descriptions of how contents of existing files are used to derive new files. - *declarative* @@ -25,7 +23,7 @@ The Nix language is - *lazy* - Expressions are only evaluated when their value is needed. + Values are only computed when they are needed. - *dynamically typed* From e54538c461e993827d9fbe3b8883d3887f184798 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 9 Jun 2023 16:09:29 +0200 Subject: [PATCH 02/16] restoreMountNamespace(): Restore the original root directory This is necessary when we're in a chroot environment, where the process root is not the same as the root of the mount namespace (e.g. in nixos-enter). Fixes #7602. --- src/libutil/util.cc | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/src/libutil/util.cc b/src/libutil/util.cc index aa0a154fd..26f9dc8a8 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -1853,6 +1853,7 @@ void setStackSize(size_t stackSize) #if __linux__ static AutoCloseFD fdSavedMountNamespace; +static AutoCloseFD fdSavedRoot; #endif void saveMountNamespace() @@ -1860,10 +1861,11 @@ void saveMountNamespace() #if __linux__ static std::once_flag done; std::call_once(done, []() { - AutoCloseFD fd = open("/proc/self/ns/mnt", O_RDONLY); - if (!fd) + fdSavedMountNamespace = open("/proc/self/ns/mnt", O_RDONLY); + if (!fdSavedMountNamespace) throw SysError("saving parent mount namespace"); - fdSavedMountNamespace = std::move(fd); + + fdSavedRoot = open("/proc/self/root", O_RDONLY); }); #endif } @@ -1876,9 +1878,16 @@ void restoreMountNamespace() if (fdSavedMountNamespace && setns(fdSavedMountNamespace.get(), CLONE_NEWNS) == -1) throw SysError("restoring parent mount namespace"); - if (chdir(savedCwd.c_str()) == -1) { - throw SysError("restoring cwd"); + + if (fdSavedRoot) { + if (fchdir(fdSavedRoot.get())) + throw SysError("chdir into saved root"); + if (chroot(".")) + throw SysError("chroot into saved root"); } + + if (chdir(savedCwd.c_str()) == -1) + throw SysError("restoring cwd"); } catch (Error & e) { debug(e.msg()); } From 3402b650cdce318e42acd4dc34f42423cafef587 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 7 Jun 2023 15:06:12 +0200 Subject: [PATCH 03/16] Add a generic check for rev attribute mismatches --- src/libfetchers/fetchers.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index 91db3a9eb..2860c1ceb 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -159,6 +159,12 @@ std::pair Input::fetch(ref store) const input.to_string(), *prevLastModified); } + if (auto prevRev = getRev()) { + if (input.getRev() != prevRev) + throw Error("'rev' attribute mismatch in input '%s', expected %s", + input.to_string(), prevRev->gitRev()); + } + if (auto prevRevCount = getRevCount()) { if (input.getRevCount() != prevRevCount) throw Error("'revCount' attribute mismatch in input '%s', expected %d", From 1ad3328c5efea041990fa82e6ad24ae2b4e81c24 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 7 Jun 2023 14:26:30 +0200 Subject: [PATCH 04/16] Allow tarball URLs to redirect to a lockable immutable URL Previously, for tarball flakes, we recorded the original URL of the tarball flake, rather than the URL to which it ultimately redirects. Thus, a flake URL like http://example.org/patchelf-latest.tar that redirects to http://example.org/patchelf-.tar was not really usable. We couldn't record the redirected URL, because sites like GitHub redirect to CDN URLs that we can't rely on to be stable. So now we use the redirected URL only if the server returns the `x-nix-is-immutable` or `x-amz-meta-nix-is-immutable` headers in its response. --- flake.nix | 2 + src/libcmd/common-eval-args.cc | 2 +- src/libexpr/parser.y | 2 +- src/libexpr/primops/fetchTree.cc | 2 +- src/libfetchers/attrs.hh | 1 + src/libfetchers/fetchers.hh | 10 +++- src/libfetchers/github.cc | 10 ++-- src/libfetchers/tarball.cc | 68 +++++++++++++++++++------- src/libstore/filetransfer.cc | 22 +++++++-- src/libstore/filetransfer.hh | 4 ++ tests/nixos/tarball-flakes.nix | 84 ++++++++++++++++++++++++++++++++ 11 files changed, 177 insertions(+), 30 deletions(-) create mode 100644 tests/nixos/tarball-flakes.nix diff --git a/flake.nix b/flake.nix index a4ee80b32..bdbf54169 100644 --- a/flake.nix +++ b/flake.nix @@ -590,6 +590,8 @@ tests.sourcehutFlakes = runNixOSTestFor "x86_64-linux" ./tests/nixos/sourcehut-flakes.nix; + tests.tarballFlakes = runNixOSTestFor "x86_64-linux" ./tests/nixos/tarball-flakes.nix; + tests.containers = runNixOSTestFor "x86_64-linux" ./tests/nixos/containers/containers.nix; tests.setuid = lib.genAttrs diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc index ff3abd534..7f97364a1 100644 --- a/src/libcmd/common-eval-args.cc +++ b/src/libcmd/common-eval-args.cc @@ -165,7 +165,7 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s) { if (EvalSettings::isPseudoUrl(s)) { auto storePath = fetchers::downloadTarball( - state.store, EvalSettings::resolvePseudoUrl(s), "source", false).first.storePath; + state.store, EvalSettings::resolvePseudoUrl(s), "source", false).tree.storePath; return state.rootPath(CanonPath(state.store->toRealPath(storePath))); } diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index 4d981712a..3b545fd84 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -793,7 +793,7 @@ std::pair EvalState::resolveSearchPathElem(const SearchPathEl if (EvalSettings::isPseudoUrl(elem.second)) { try { auto storePath = fetchers::downloadTarball( - store, EvalSettings::resolvePseudoUrl(elem.second), "source", false).first.storePath; + store, EvalSettings::resolvePseudoUrl(elem.second), "source", false).tree.storePath; res = { true, store->toRealPath(storePath) }; } catch (FileTransferError & e) { logWarning({ diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index fe880aaa8..b34a46fa0 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -262,7 +262,7 @@ static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v // https://github.com/NixOS/nix/issues/4313 auto storePath = unpack - ? fetchers::downloadTarball(state.store, *url, name, (bool) expectedHash).first.storePath + ? fetchers::downloadTarball(state.store, *url, name, (bool) expectedHash).tree.storePath : fetchers::downloadFile(state.store, *url, name, (bool) expectedHash).storePath; if (expectedHash) { diff --git a/src/libfetchers/attrs.hh b/src/libfetchers/attrs.hh index 1a14bb023..9f885a793 100644 --- a/src/libfetchers/attrs.hh +++ b/src/libfetchers/attrs.hh @@ -2,6 +2,7 @@ ///@file #include "types.hh" +#include "hash.hh" #include diff --git a/src/libfetchers/fetchers.hh b/src/libfetchers/fetchers.hh index 498ad7e4d..d0738f619 100644 --- a/src/libfetchers/fetchers.hh +++ b/src/libfetchers/fetchers.hh @@ -158,6 +158,7 @@ struct DownloadFileResult StorePath storePath; std::string etag; std::string effectiveUrl; + std::optional immutableUrl; }; DownloadFileResult downloadFile( @@ -167,7 +168,14 @@ DownloadFileResult downloadFile( bool locked, const Headers & headers = {}); -std::pair downloadTarball( +struct DownloadTarballResult +{ + Tree tree; + time_t lastModified; + std::optional immutableUrl; +}; + +DownloadTarballResult downloadTarball( ref store, const std::string & url, const std::string & name, diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index 6c1d573ce..80598e7f8 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -207,21 +207,21 @@ struct GitArchiveInputScheme : InputScheme auto url = getDownloadUrl(input); - auto [tree, lastModified] = downloadTarball(store, url.url, input.getName(), true, url.headers); + auto result = downloadTarball(store, url.url, input.getName(), true, url.headers); - input.attrs.insert_or_assign("lastModified", uint64_t(lastModified)); + input.attrs.insert_or_assign("lastModified", uint64_t(result.lastModified)); getCache()->add( store, lockedAttrs, { {"rev", rev->gitRev()}, - {"lastModified", uint64_t(lastModified)} + {"lastModified", uint64_t(result.lastModified)} }, - tree.storePath, + result.tree.storePath, true); - return {std::move(tree.storePath), input}; + return {result.tree.storePath, input}; } }; diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index 96fe5faca..e42aca6db 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -32,7 +32,8 @@ DownloadFileResult downloadFile( return { .storePath = std::move(cached->storePath), .etag = getStrAttr(cached->infoAttrs, "etag"), - .effectiveUrl = getStrAttr(cached->infoAttrs, "url") + .effectiveUrl = getStrAttr(cached->infoAttrs, "url"), + .immutableUrl = maybeGetStrAttr(cached->infoAttrs, "immutableUrl"), }; }; @@ -55,12 +56,14 @@ DownloadFileResult downloadFile( } // FIXME: write to temporary file. - Attrs infoAttrs({ {"etag", res.etag}, {"url", res.effectiveUri}, }); + if (res.immutableUrl) + infoAttrs.emplace("immutableUrl", *res.immutableUrl); + std::optional storePath; if (res.cached) { @@ -111,10 +114,11 @@ DownloadFileResult downloadFile( .storePath = std::move(*storePath), .etag = res.etag, .effectiveUrl = res.effectiveUri, + .immutableUrl = res.immutableUrl, }; } -std::pair downloadTarball( +DownloadTarballResult downloadTarball( ref store, const std::string & url, const std::string & name, @@ -131,8 +135,9 @@ std::pair downloadTarball( if (cached && !cached->expired) return { - Tree { .actualPath = store->toRealPath(cached->storePath), .storePath = std::move(cached->storePath) }, - getIntAttr(cached->infoAttrs, "lastModified") + .tree = Tree { .actualPath = store->toRealPath(cached->storePath), .storePath = std::move(cached->storePath) }, + .lastModified = (time_t) getIntAttr(cached->infoAttrs, "lastModified"), + .immutableUrl = maybeGetStrAttr(cached->infoAttrs, "immutableUrl"), }; auto res = downloadFile(store, url, name, locked, headers); @@ -160,6 +165,9 @@ std::pair downloadTarball( {"etag", res.etag}, }); + if (res.immutableUrl) + infoAttrs.emplace("immutableUrl", *res.immutableUrl); + getCache()->add( store, inAttrs, @@ -168,8 +176,9 @@ std::pair downloadTarball( locked); return { - Tree { .actualPath = store->toRealPath(*unpackedStorePath), .storePath = std::move(*unpackedStorePath) }, - lastModified, + .tree = Tree { .actualPath = store->toRealPath(*unpackedStorePath), .storePath = std::move(*unpackedStorePath) }, + .lastModified = lastModified, + .immutableUrl = res.immutableUrl, }; } @@ -189,21 +198,33 @@ struct CurlInputScheme : InputScheme virtual bool isValidURL(const ParsedURL & url) const = 0; - std::optional inputFromURL(const ParsedURL & url) const override + std::optional inputFromURL(const ParsedURL & _url) const override { - if (!isValidURL(url)) + if (!isValidURL(_url)) return std::nullopt; Input input; - auto urlWithoutApplicationScheme = url; - urlWithoutApplicationScheme.scheme = parseUrlScheme(url.scheme).transport; + auto url = _url; + + url.scheme = parseUrlScheme(url.scheme).transport; - input.attrs.insert_or_assign("type", inputType()); - input.attrs.insert_or_assign("url", urlWithoutApplicationScheme.to_string()); auto narHash = url.query.find("narHash"); if (narHash != url.query.end()) input.attrs.insert_or_assign("narHash", narHash->second); + + if (auto i = get(url.query, "rev")) + input.attrs.insert_or_assign("rev", *i); + + if (auto i = get(url.query, "revCount")) + if (auto n = string2Int(*i)) + input.attrs.insert_or_assign("revCount", *n); + + url.query.erase("rev"); + url.query.erase("revCount"); + + input.attrs.insert_or_assign("type", inputType()); + input.attrs.insert_or_assign("url", url.to_string()); return input; } @@ -212,7 +233,8 @@ struct CurlInputScheme : InputScheme auto type = maybeGetStrAttr(attrs, "type"); if (type != inputType()) return {}; - std::set allowedNames = {"type", "url", "narHash", "name", "unpack"}; + // FIXME: some of these only apply to TarballInputScheme. + std::set allowedNames = {"type", "url", "narHash", "name", "unpack", "rev", "revCount"}; for (auto & [name, value] : attrs) if (!allowedNames.count(name)) throw Error("unsupported %s input attribute '%s'", *type, name); @@ -275,10 +297,22 @@ struct TarballInputScheme : CurlInputScheme : hasTarballExtension(url.path)); } - std::pair fetch(ref store, const Input & input) override + std::pair fetch(ref store, const Input & _input) override { - auto tree = downloadTarball(store, getStrAttr(input.attrs, "url"), input.getName(), false).first; - return {std::move(tree.storePath), input}; + Input input(_input); + auto url = getStrAttr(input.attrs, "url"); + auto result = downloadTarball(store, url, input.getName(), false); + + if (result.immutableUrl) { + auto immutableInput = Input::fromURL(*result.immutableUrl); + // FIXME: would be nice to support arbitrary flakerefs + // here, e.g. git flakes. + if (immutableInput.getType() != "tarball") + throw Error("tarball 'Link' headers that redirect to non-tarball URLs are not supported"); + input = immutableInput; + } + + return {result.tree.storePath, std::move(input)}; } }; diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 2346accbe..38b691279 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -186,9 +186,9 @@ struct curlFileTransfer : public FileTransfer size_t realSize = size * nmemb; std::string line((char *) contents, realSize); printMsg(lvlVomit, "got header for '%s': %s", request.uri, trim(line)); + static std::regex statusLine("HTTP/[^ ]+ +[0-9]+(.*)", std::regex::extended | std::regex::icase); - std::smatch match; - if (std::regex_match(line, match, statusLine)) { + if (std::smatch match; std::regex_match(line, match, statusLine)) { result.etag = ""; result.data.clear(); result.bodySize = 0; @@ -196,9 +196,11 @@ struct curlFileTransfer : public FileTransfer acceptRanges = false; encoding = ""; } else { + auto i = line.find(':'); if (i != std::string::npos) { std::string name = toLower(trim(line.substr(0, i))); + if (name == "etag") { result.etag = trim(line.substr(i + 1)); /* Hack to work around a GitHub bug: it sends @@ -212,10 +214,22 @@ struct curlFileTransfer : public FileTransfer debug("shutting down on 200 HTTP response with expected ETag"); return 0; } - } else if (name == "content-encoding") + } + + else if (name == "content-encoding") encoding = trim(line.substr(i + 1)); + else if (name == "accept-ranges" && toLower(trim(line.substr(i + 1))) == "bytes") acceptRanges = true; + + else if (name == "link" || name == "x-amz-meta-link") { + auto value = trim(line.substr(i + 1)); + static std::regex linkRegex("<([^>]*)>; rel=\"immutable\"", std::regex::extended | std::regex::icase); + if (std::smatch match; std::regex_match(value, match, linkRegex)) + result.immutableUrl = match.str(1); + else + debug("got invalid link header '%s'", value); + } } } return realSize; @@ -345,7 +359,7 @@ struct curlFileTransfer : public FileTransfer { auto httpStatus = getHTTPStatus(); - char * effectiveUriCStr; + char * effectiveUriCStr = nullptr; curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUriCStr); if (effectiveUriCStr) result.effectiveUri = effectiveUriCStr; diff --git a/src/libstore/filetransfer.hh b/src/libstore/filetransfer.hh index 378c6ff78..a3b0dde1f 100644 --- a/src/libstore/filetransfer.hh +++ b/src/libstore/filetransfer.hh @@ -80,6 +80,10 @@ struct FileTransferResult std::string effectiveUri; std::string data; uint64_t bodySize = 0; + /* An "immutable" URL for this resource (i.e. one whose contents + will never change), as returned by the `Link: ; + rel="immutable"` header. */ + std::optional immutableUrl; }; class Store; diff --git a/tests/nixos/tarball-flakes.nix b/tests/nixos/tarball-flakes.nix new file mode 100644 index 000000000..1d43a5d04 --- /dev/null +++ b/tests/nixos/tarball-flakes.nix @@ -0,0 +1,84 @@ +{ lib, config, nixpkgs, ... }: + +let + pkgs = config.nodes.machine.nixpkgs.pkgs; + + root = pkgs.runCommand "nixpkgs-flake" {} + '' + mkdir -p $out/stable + + set -x + dir=nixpkgs-${nixpkgs.shortRev} + cp -prd ${nixpkgs} $dir + # Set the correct timestamp in the tarball. + find $dir -print0 | xargs -0 touch -t ${builtins.substring 0 12 nixpkgs.lastModifiedDate}.${builtins.substring 12 2 nixpkgs.lastModifiedDate} -- + tar cfz $out/stable/${nixpkgs.rev}.tar.gz $dir --hard-dereference + + echo 'Redirect "/latest.tar.gz" "/stable/${nixpkgs.rev}.tar.gz"' > $out/.htaccess + + echo 'Header set Link "; rel=\"immutable\""' > $out/stable/.htaccess + ''; +in + +{ + name = "tarball-flakes"; + + nodes = + { + machine = + { config, pkgs, ... }: + { networking.firewall.allowedTCPPorts = [ 80 ]; + + services.httpd.enable = true; + services.httpd.adminAddr = "foo@example.org"; + services.httpd.extraConfig = '' + ErrorLog syslog:local6 + ''; + services.httpd.virtualHosts."localhost" = + { servedDirs = + [ { urlPath = "/"; + dir = root; + } + ]; + }; + + virtualisation.writableStore = true; + virtualisation.diskSize = 2048; + virtualisation.additionalPaths = [ pkgs.hello pkgs.fuse ]; + virtualisation.memorySize = 4096; + nix.settings.substituters = lib.mkForce [ ]; + nix.extraOptions = "experimental-features = nix-command flakes"; + }; + }; + + testScript = { nodes }: '' + # fmt: off + import json + + start_all() + + machine.wait_for_unit("httpd.service") + + out = machine.succeed("nix flake metadata --json http://localhost/latest.tar.gz") + print(out) + info = json.loads(out) + + # Check that we got redirected to the immutable URL. + assert info["locked"]["url"] == "http://localhost/stable/${nixpkgs.rev}.tar.gz" + + # Check that we got the rev and revCount attributes. + assert info["revision"] == "${nixpkgs.rev}" + assert info["revCount"] == 1234 + + # Check that fetching with rev/revCount/narHash succeeds. + machine.succeed("nix flake metadata --json http://localhost/latest.tar.gz?rev=" + info["revision"]) + machine.succeed("nix flake metadata --json http://localhost/latest.tar.gz?revCount=" + str(info["revCount"])) + machine.succeed("nix flake metadata --json http://localhost/latest.tar.gz?narHash=" + info["locked"]["narHash"]) + + # Check that fetching fails if we provide incorrect attributes. + machine.fail("nix flake metadata --json http://localhost/latest.tar.gz?rev=493300eb13ae6fb387fbd47bf54a85915acc31c0") + machine.fail("nix flake metadata --json http://localhost/latest.tar.gz?revCount=789") + machine.fail("nix flake metadata --json http://localhost/latest.tar.gz?narHash=sha256-tbudgBSg+bHWHiHnlteNzN8TUvI80ygS9IULh4rklEw=") + ''; + +} From c3c40763421e881ed5c326ed88a0539b6585c368 Mon Sep 17 00:00:00 2001 From: Valentin Gagarin Date: Thu, 15 Jun 2023 03:06:38 +0200 Subject: [PATCH 05/16] make domain-specificity more specific also slightly reword the purpose statement to introduce (and explain) derivations right away. --- doc/manual/src/language/index.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/manual/src/language/index.md b/doc/manual/src/language/index.md index 69ebc70c0..29950a52d 100644 --- a/doc/manual/src/language/index.md +++ b/doc/manual/src/language/index.md @@ -1,10 +1,11 @@ # Nix Language -The Nix language is +The Nix language is designed for conveniently creating and composing *derivations* – precise descriptions of how contents of existing files are used to derive new files. +It is: - *domain-specific* - Its purpose is to conveniently create and compose precise descriptions of how contents of existing files are used to derive new files. + It comes with [built-in functions](@docroot@/language/builtins.md) to integrate with the Nix store, which manages files and performs the derivations declared in the Nix language. - *declarative* From d2696cdd1ee404ef6c6bd532cb90007dd8ee3e9f Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Wed, 14 Jun 2023 22:57:42 +0200 Subject: [PATCH 06/16] Fix build hook error for libstore library users A library shouldn't require changes to the caller's argument handling, especially if it doesn't have to, and indeed we don't have to. This changes the lookup order to prioritize the hardcoded path to nix if it exists. The static executable still finds itself through /proc and the like. --- .gitignore | 1 + Makefile | 1 + src/libstore/globals.cc | 25 ++++++++++++++- tests/local.mk | 2 ++ tests/test-libstoreconsumer.sh | 6 ++++ tests/test-libstoreconsumer/README.md | 6 ++++ tests/test-libstoreconsumer/local.mk | 12 +++++++ tests/test-libstoreconsumer/main.cc | 45 +++++++++++++++++++++++++++ 8 files changed, 97 insertions(+), 1 deletion(-) create mode 100644 tests/test-libstoreconsumer.sh create mode 100644 tests/test-libstoreconsumer/README.md create mode 100644 tests/test-libstoreconsumer/local.mk create mode 100644 tests/test-libstoreconsumer/main.cc diff --git a/.gitignore b/.gitignore index 7ae1071d0..29d9106ae 100644 --- a/.gitignore +++ b/.gitignore @@ -89,6 +89,7 @@ perl/Makefile.config /tests/ca/config.nix /tests/dyn-drv/config.nix /tests/repl-result-out +/tests/test-libstoreconsumer/test-libstoreconsumer # /tests/lang/ /tests/lang/*.out diff --git a/Makefile b/Makefile index d6b49473a..c6220482a 100644 --- a/Makefile +++ b/Makefile @@ -27,6 +27,7 @@ makefiles += \ src/libstore/tests/local.mk \ src/libexpr/tests/local.mk \ tests/local.mk \ + tests/test-libstoreconsumer/local.mk \ tests/plugins/local.mk else makefiles += \ diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 32e9a6ea9..d53377239 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -77,7 +77,30 @@ Settings::Settings() allowedImpureHostPrefixes = tokenizeString("/System/Library /usr/lib /dev /bin/sh"); #endif - buildHook = getSelfExe().value_or("nix") + " __build-remote"; + /* Set the build hook location + + For builds we perform a self-invocation, so Nix has to be self-aware. + That is, it has to know where it is installed. We don't think it's sentient. + + Normally, nix is installed according to `nixBinDir`, which is set at compile time, + but can be overridden. This makes for a great default that works even if this + code is linked as a library into some other program whose main is not aware + that it might need to be a build remote hook. + + However, it may not have been installed at all. For example, if it's a static build, + there's a good chance that it has been moved out of its installation directory. + That makes `nixBinDir` useless. Instead, we'll query the OS for the path to the + current executable, using `getSelfExe()`. + + As a last resort, we resort to `PATH`. Hopefully we find a `nix` there that's compatible. + If you're porting Nix to a new platform, that might be good enough for a while, but + you'll want to improve `getSelfExe()` to work on your platform. + */ + std::string nixExePath = nixBinDir + "/nix"; + if (!pathExists(nixExePath)) { + nixExePath = getSelfExe().value_or("nix"); + } + buildHook = nixExePath + " __build-remote"; } void loadConfFile() diff --git a/tests/local.mk b/tests/local.mk index 9e340e2e2..a37365efe 100644 --- a/tests/local.mk +++ b/tests/local.mk @@ -134,6 +134,7 @@ nix_tests = \ flakes/show.sh \ impure-derivations.sh \ path-from-hash-part.sh \ + test-libstoreconsumer.sh \ toString-path.sh ifeq ($(HAVE_LIBCPUID), 1) @@ -152,6 +153,7 @@ test-deps += \ tests/common/vars-and-functions.sh \ tests/config.nix \ tests/ca/config.nix \ + tests/test-libstoreconsumer/test-libstoreconsumer \ tests/dyn-drv/config.nix ifeq ($(BUILD_SHARED_LIBS), 1) diff --git a/tests/test-libstoreconsumer.sh b/tests/test-libstoreconsumer.sh new file mode 100644 index 000000000..8a77cf5a1 --- /dev/null +++ b/tests/test-libstoreconsumer.sh @@ -0,0 +1,6 @@ +source common.sh + +drv="$(nix-instantiate simple.nix)" +cat "$drv" +out="$(./test-libstoreconsumer/test-libstoreconsumer "$drv")" +cat "$out/hello" | grep -F "Hello World!" diff --git a/tests/test-libstoreconsumer/README.md b/tests/test-libstoreconsumer/README.md new file mode 100644 index 000000000..ded69850f --- /dev/null +++ b/tests/test-libstoreconsumer/README.md @@ -0,0 +1,6 @@ + +A very simple C++ consumer of the libstore library. + + - Keep it simple. Library consumers expect something simple. + - No build hook, or any other reinvocations. + - No more global state than necessary. diff --git a/tests/test-libstoreconsumer/local.mk b/tests/test-libstoreconsumer/local.mk new file mode 100644 index 000000000..cd2d0c7f8 --- /dev/null +++ b/tests/test-libstoreconsumer/local.mk @@ -0,0 +1,12 @@ +programs += test-libstoreconsumer + +test-libstoreconsumer_DIR := $(d) + +test-libstoreconsumer_SOURCES := \ + $(wildcard $(d)/*.cc) \ + +test-libstoreconsumer_CXXFLAGS += -I src/libutil -I src/libstore + +test-libstoreconsumer_LIBS = libstore libutil + +test-libstoreconsumer_LDFLAGS = -pthread $(SODIUM_LIBS) $(EDITLINE_LIBS) $(BOOST_LDFLAGS) $(LOWDOWN_LIBS) diff --git a/tests/test-libstoreconsumer/main.cc b/tests/test-libstoreconsumer/main.cc new file mode 100644 index 000000000..31b6d8ef1 --- /dev/null +++ b/tests/test-libstoreconsumer/main.cc @@ -0,0 +1,45 @@ +#include "globals.hh" +#include "store-api.hh" +#include "build-result.hh" +#include + +using namespace nix; + +int main (int argc, char **argv) +{ + try { + if (argc != 2) { + std::cerr << "Usage: " << argv[0] << " store/path/to/something.drv\n"; + return 1; + } + + std::string drvPath = argv[1]; + + initLibStore(); + + auto store = nix::openStore(); + + // build the derivation + + std::vector paths { + DerivedPath::Built { + .drvPath = store->parseStorePath(drvPath), + .outputs = OutputsSpec::Names{"out"} + } + }; + + const auto results = store->buildPathsWithResults(paths, bmNormal, store); + + for (const auto & result : results) { + for (const auto & [outputName, realisation] : result.builtOutputs) { + std::cout << store->printStorePath(realisation.outPath) << "\n"; + } + } + + return 0; + + } catch (const std::exception & e) { + std::cerr << "Error: " << e.what() << "\n"; + return 1; + } +} From b2247ef4f6b02cf4e666455eb9afc86398fff35d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A9ophane=20Hufschmitt?= Date: Thu, 15 Jun 2023 21:24:14 +0200 Subject: [PATCH 07/16] Don't assume the type of string::size_type The code accidentally conflated `std::string::size_type` and `long unsigned int`. This was fine on 64bits machines where they are apparently the same in practice, but not on 32bits. Fix that by using `std::string::size_type` everywhere. --- src/libutil/references.cc | 2 +- src/libutil/references.hh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/libutil/references.cc b/src/libutil/references.cc index 74003584a..7f59b4c09 100644 --- a/src/libutil/references.cc +++ b/src/libutil/references.cc @@ -75,7 +75,7 @@ RewritingSink::RewritingSink(const std::string & from, const std::string & to, S RewritingSink::RewritingSink(const StringMap & rewrites, Sink & nextSink) : rewrites(rewrites), nextSink(nextSink) { - long unsigned int maxRewriteSize = 0; + std::string::size_type maxRewriteSize = 0; for (auto & [from, to] : rewrites) { assert(from.size() == to.size()); maxRewriteSize = std::max(maxRewriteSize, from.size()); diff --git a/src/libutil/references.hh b/src/libutil/references.hh index ffd730e7b..f0baeffe1 100644 --- a/src/libutil/references.hh +++ b/src/libutil/references.hh @@ -26,7 +26,7 @@ public: struct RewritingSink : Sink { const StringMap rewrites; - long unsigned int maxRewriteSize; + std::string::size_type maxRewriteSize; std::string prev; Sink & nextSink; uint64_t pos = 0; From cab03fb7794f6e20e02dc1460a78201ab3955c18 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 16 Jun 2023 15:58:42 +0200 Subject: [PATCH 08/16] Add docs --- doc/manual/src/SUMMARY.md.in | 2 ++ doc/manual/src/protocols/protocols.md | 4 +++ doc/manual/src/protocols/tarball-fetcher.md | 40 +++++++++++++++++++++ 3 files changed, 46 insertions(+) create mode 100644 doc/manual/src/protocols/protocols.md create mode 100644 doc/manual/src/protocols/tarball-fetcher.md diff --git a/doc/manual/src/SUMMARY.md.in b/doc/manual/src/SUMMARY.md.in index 69c721b57..cfbb38be8 100644 --- a/doc/manual/src/SUMMARY.md.in +++ b/doc/manual/src/SUMMARY.md.in @@ -98,6 +98,8 @@ - [Channels](command-ref/files/channels.md) - [Default Nix expression](command-ref/files/default-nix-expression.md) - [Architecture](architecture/architecture.md) +- [Protocols](protocols/protocols.md) + - [Serving Tarball Flakes](protocols/tarball-fetcher.md) - [Glossary](glossary.md) - [Contributing](contributing/contributing.md) - [Hacking](contributing/hacking.md) diff --git a/doc/manual/src/protocols/protocols.md b/doc/manual/src/protocols/protocols.md new file mode 100644 index 000000000..d6bf1d809 --- /dev/null +++ b/doc/manual/src/protocols/protocols.md @@ -0,0 +1,4 @@ +# Protocols + +This chapter documents various developer-facing interfaces provided by +Nix. diff --git a/doc/manual/src/protocols/tarball-fetcher.md b/doc/manual/src/protocols/tarball-fetcher.md new file mode 100644 index 000000000..1c6c92000 --- /dev/null +++ b/doc/manual/src/protocols/tarball-fetcher.md @@ -0,0 +1,40 @@ +# Serving Tarball Flakes + +Tarball flakes are served as regular tarballs via HTTP or the file +system (for `file://` URLs). + +An HTTP server can return an "immutable" flakeref appropriate for lock +files. This allows users to specify a tarball flake input in +`flake.nix` that requests the latest version of a flake +(e.g. `https://example.org/hello/latest.tar.gz`), while `flake.lock` +will record a URL whose contents will not change +(e.g. `https://example.org/hello/.tar.gz`). To do so, the +server must return a `Link` header with the `rel` attribute set to +`immutable`, as follows: + +``` +Link: ; rel="immutable" +``` + +(Note the required `<` and `>` characters around *flakeref*.) + +*flakeref* must be a tarball flakeref. It can contain flake attributes +such as `narHash`, `rev` and `revCount`. If `narHash` is included, its +value must be the NAR hash of the unpacked tarball (as computed via +`nix hash path`). Nix checks the contents of the returned tarball +against the `narHash` attribute. The `rev` and `revCount` attributes +are useful when the tarball flake is a mirror of a fetcher type that +has those attributes, such as Git or GitHub. They are not checked by +Nix. + +``` +Link: ; rel="immutable" +``` + +(The linebreaks in this example are for clarity and must not be included in the actual response.) + +For tarball flakes, the value of the `lastModified` flake attribute is +defined as the timestamp of the newest file inside the tarball. From b1ed9b4b0cc037a8b14dcc37fd32f6a5a16e7ba3 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 16 Jun 2023 16:48:37 +0200 Subject: [PATCH 09/16] Apply suggestions from code review Co-authored-by: Robert Hensing --- doc/manual/src/protocols/tarball-fetcher.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/doc/manual/src/protocols/tarball-fetcher.md b/doc/manual/src/protocols/tarball-fetcher.md index 1c6c92000..0d3212303 100644 --- a/doc/manual/src/protocols/tarball-fetcher.md +++ b/doc/manual/src/protocols/tarball-fetcher.md @@ -1,15 +1,17 @@ -# Serving Tarball Flakes +# Lockable HTTP Tarball Protocol -Tarball flakes are served as regular tarballs via HTTP or the file -system (for `file://` URLs). +Tarball flakes can be served as regular tarballs via HTTP or the file +system (for `file://` URLs). Unless the server implements the Lockable +HTTP Tarball protocol, it is the responsibility of the user to make sure that +the URL always produces the same tarball contents. -An HTTP server can return an "immutable" flakeref appropriate for lock +An HTTP server can return an "immutable" HTTP URL appropriate for lock files. This allows users to specify a tarball flake input in `flake.nix` that requests the latest version of a flake (e.g. `https://example.org/hello/latest.tar.gz`), while `flake.lock` will record a URL whose contents will not change (e.g. `https://example.org/hello/.tar.gz`). To do so, the -server must return a `Link` header with the `rel` attribute set to +server must return an [HTTP `Link` header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Link) with the `rel` attribute set to `immutable`, as follows: ``` From 741f7837f85d1675efbcf43877bbcdfb9a862745 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christina=20S=C3=B8rensen?= <89321978+cafkafk@users.noreply.github.com> Date: Sat, 17 Jun 2023 09:06:17 +0000 Subject: [PATCH 10/16] Fix wikipedia links (#8533) --- doc/manual/src/architecture/architecture.md | 6 +++--- doc/manual/src/contributing/hacking.md | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/manual/src/architecture/architecture.md b/doc/manual/src/architecture/architecture.md index e51958052..9e969972e 100644 --- a/doc/manual/src/architecture/architecture.md +++ b/doc/manual/src/architecture/architecture.md @@ -7,11 +7,11 @@ It should help users understand why Nix behaves as it does, and it should help d Nix consists of [hierarchical layers]. -[hierarchical layers]: https://en.m.wikipedia.org/wiki/Multitier_architecture#Layers +[hierarchical layers]: https://en.wikipedia.org/wiki/Multitier_architecture#Layers The following [concept map] shows its main components (rectangles), the objects they operate on (rounded rectangles), and their interactions (connecting phrases): -[concept map]: https://en.m.wikipedia.org/wiki/Concept_map +[concept map]: https://en.wikipedia.org/wiki/Concept_map ``` @@ -76,7 +76,7 @@ The result of a build task can be input to another build task. The following [data flow diagram] shows a build plan for illustration. Build inputs used as instructions to a build task are marked accordingly: -[data flow diagram]: https://en.m.wikipedia.org/wiki/Data-flow_diagram +[data flow diagram]: https://en.wikipedia.org/wiki/Data-flow_diagram ``` +--------------------------------------------------------------------+ diff --git a/doc/manual/src/contributing/hacking.md b/doc/manual/src/contributing/hacking.md index b954a2167..c57d45138 100644 --- a/doc/manual/src/contributing/hacking.md +++ b/doc/manual/src/contributing/hacking.md @@ -378,7 +378,7 @@ rm $(git ls-files doc/manual/ -o | grep -F '.md') && rmdir doc/manual/src/comman [`mdbook-linkcheck`] does not implement checking [URI fragments] yet. [`mdbook-linkcheck`]: https://github.com/Michael-F-Bryan/mdbook-linkcheck -[URI fragments]: https://en.m.wikipedia.org/wiki/URI_fragment +[URI fragments]: https://en.wikipedia.org/wiki/URI_fragment #### `@docroot@` variable From b931d83550b200676c36ae8d1038a281ae4aa0ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Na=C3=AFm=20Favier?= Date: Sat, 17 Jun 2023 15:05:10 +0200 Subject: [PATCH 11/16] ci: bump install-nix-action, don't fail fast --- .github/workflows/ci.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0f1f6d43f..c3a17d106 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,6 +11,7 @@ jobs: tests: needs: [check_secrets] strategy: + fail-fast: false matrix: os: [ubuntu-latest, macos-latest] runs-on: ${{ matrix.os }} @@ -19,7 +20,7 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 0 - - uses: cachix/install-nix-action@v21 + - uses: cachix/install-nix-action@v22 with: # The sandbox would otherwise be disabled by default on Darwin extra_nix_config: "sandbox = true" @@ -61,7 +62,7 @@ jobs: with: fetch-depth: 0 - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - - uses: cachix/install-nix-action@v21 + - uses: cachix/install-nix-action@v22 with: install_url: https://releases.nixos.org/nix/nix-2.13.3/install - uses: cachix/cachix-action@v12 @@ -76,13 +77,14 @@ jobs: needs: [installer, check_secrets] if: github.event_name == 'push' && needs.check_secrets.outputs.cachix == 'true' strategy: + fail-fast: false matrix: os: [ubuntu-latest, macos-latest] runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v3 - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - - uses: cachix/install-nix-action@v21 + - uses: cachix/install-nix-action@v22 with: install_url: '${{needs.installer.outputs.installerURL}}' install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve" @@ -109,7 +111,7 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 0 - - uses: cachix/install-nix-action@v21 + - uses: cachix/install-nix-action@v22 with: install_url: https://releases.nixos.org/nix/nix-2.13.3/install - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV From 6b06e97bde3d2d87a750210d46086624dc96a0f8 Mon Sep 17 00:00:00 2001 From: Adam Joseph Date: Thu, 15 Jun 2023 17:12:03 -0700 Subject: [PATCH 12/16] src/libexpr/eval.hh: add link for allowed-uris option This commit adds a link to the documentation for `--option allowed-uris` where that option is mentioned while describing `restrict-eval`. --- src/libexpr/eval.hh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index ea1f5975b..8e41bdbd0 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -741,7 +741,8 @@ struct EvalSettings : Config If set to `true`, the Nix evaluator will not allow access to any files outside of the Nix search path (as set via the `NIX_PATH` environment variable or the `-I` option), or to URIs outside of - `allowed-uris`. The default is `false`. + [`allowed-uris`](../command-ref/conf-file.md#conf-allowed-uris). + The default is `false`. )"}; Setting pureEval{this, false, "pure-eval", From 7bf17f8825b7aacde8b4e3c5e035f6d442d649c4 Mon Sep 17 00:00:00 2001 From: Valentin Gagarin Date: Mon, 19 Jun 2023 05:45:08 +0200 Subject: [PATCH 13/16] Add description for file system objects (#8500) While this is not actually a notion in the implementation, it is explicitly described in the thesis and quite important for understanding how the store works. Co-authored-by: John Ericson Co-authored-by: Robert Hensing --- doc/manual/src/SUMMARY.md.in | 3 +- .../src/architecture/file-system-object.md | 64 +++++++++++++++++++ doc/manual/src/glossary.md | 15 +++-- 3 files changed, 76 insertions(+), 6 deletions(-) create mode 100644 doc/manual/src/architecture/file-system-object.md diff --git a/doc/manual/src/SUMMARY.md.in b/doc/manual/src/SUMMARY.md.in index cfbb38be8..13d2e4d15 100644 --- a/doc/manual/src/SUMMARY.md.in +++ b/doc/manual/src/SUMMARY.md.in @@ -97,7 +97,8 @@ - [manifest.json](command-ref/files/manifest.json.md) - [Channels](command-ref/files/channels.md) - [Default Nix expression](command-ref/files/default-nix-expression.md) -- [Architecture](architecture/architecture.md) +- [Architecture and Design](architecture/architecture.md) + - [File System Object](architecture/file-system-object.md) - [Protocols](protocols/protocols.md) - [Serving Tarball Flakes](protocols/tarball-fetcher.md) - [Glossary](glossary.md) diff --git a/doc/manual/src/architecture/file-system-object.md b/doc/manual/src/architecture/file-system-object.md new file mode 100644 index 000000000..42f047260 --- /dev/null +++ b/doc/manual/src/architecture/file-system-object.md @@ -0,0 +1,64 @@ +# File System Object + +Nix uses a simplified model of the file system, which consists of file system objects. +Every file system object is one of the following: + + - File + + - A possibly empty sequence of bytes for contents + - A single boolean representing the [executable](https://en.m.wikipedia.org/wiki/File-system_permissions#Permissions) permission + + - Directory + + Mapping of names to child file system objects + + - [Symbolic link](https://en.m.wikipedia.org/wiki/Symbolic_link) + + An arbitrary string. + Nix does not assign any semantics to symbolic links. + +File system objects and their children form a tree. +A bare file or symlink can be a root file system object. + +Nix does not encode any other file system notions such as [hard links](https://en.m.wikipedia.org/wiki/Hard_link), [permissions](https://en.m.wikipedia.org/wiki/File-system_permissions), timestamps, or other metadata. + +## Examples of file system objects + +A plain file: + +``` +50 B, executable: false +``` + +An executable file: + +``` +122 KB, executable: true +``` + +A symlink: + +``` +-> /usr/bin/sh +``` + +A directory with contents: + +``` +├── bin +│   └── hello: 35 KB, executable: true +└── share + ├── info + │   └── hello.info: 36 KB, executable: false + └── man + └── man1 + └── hello.1.gz: 790 B, executable: false +``` + +A directory that contains a symlink and other directories: + +``` +├── bin -> share/go/bin +├── nix-support/ +└── share/ +``` diff --git a/doc/manual/src/glossary.md b/doc/manual/src/glossary.md index 47a484826..ac0bb3c2f 100644 --- a/doc/manual/src/glossary.md +++ b/doc/manual/src/glossary.md @@ -85,12 +85,17 @@ [store path]: #gloss-store-path + - [file system object]{#gloss-store-object}\ + The Nix data model for representing simplified file system data. + + See [File System Object](@docroot@/architecture/file-system-object.md) for details. + + [file system object]: #gloss-file-system-object + - [store object]{#gloss-store-object}\ - A file that is an immediate child of the Nix store directory. These - can be regular files, but also entire directory trees. Store objects - can be sources (objects copied from outside of the store), - derivation outputs (objects produced by running a build task), or - derivations (files describing a build task). + + A store object consists of a [file system object], [reference]s to other store objects, and other metadata. + It can be referred to by a [store path]. [store object]: #gloss-store-object From c404623a1d39431cf7b4ccd0b0b396a821a6eade Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 19 Jun 2023 00:04:59 -0400 Subject: [PATCH 14/16] Clean up a few things related to profiles (#8526) - Greatly expand API docs - Clean up code in misc ways - Instead of a complicated single loop on generations, do different operations in successive subsequent steps. - Avoid `ref` in one place where `&` is fine - Just return path instead of mutating an argument in `makeName` Co-authored-by: Valentin Gagarin --- src/libcmd/command.cc | 4 +- src/libstore/profiles.cc | 123 +++++++++------ src/libstore/profiles.hh | 143 +++++++++++++++++- .../nix-collect-garbage.cc | 7 +- src/nix-env/nix-env.cc | 7 +- src/nix-env/user-env.cc | 2 +- src/nix/profile.cc | 7 +- 7 files changed, 224 insertions(+), 69 deletions(-) diff --git a/src/libcmd/command.cc b/src/libcmd/command.cc index 6c4648b34..4fc197956 100644 --- a/src/libcmd/command.cc +++ b/src/libcmd/command.cc @@ -239,9 +239,7 @@ void MixProfile::updateProfile(const StorePath & storePath) if (!store) throw Error("'--profile' is not supported for this Nix store"); auto profile2 = absPath(*profile); switchLink(profile2, - createGeneration( - ref(store), - profile2, storePath)); + createGeneration(*store, profile2, storePath)); } void MixProfile::updateProfile(const BuiltPaths & buildables) diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc index ba5c8583f..4e9955948 100644 --- a/src/libstore/profiles.cc +++ b/src/libstore/profiles.cc @@ -13,8 +13,10 @@ namespace nix { -/* Parse a generation name of the format - `--link'. */ +/** + * Parse a generation name of the format + * `--link'. + */ static std::optional parseName(const std::string & profileName, const std::string & name) { if (name.substr(0, profileName.size() + 1) != profileName + "-") return {}; @@ -28,7 +30,6 @@ static std::optional parseName(const std::string & profileName } - std::pair> findGenerations(Path profile) { Generations gens; @@ -61,15 +62,16 @@ std::pair> findGenerations(Path pro } -static void makeName(const Path & profile, GenerationNumber num, - Path & outLink) +/** + * Create a generation name that can be parsed by `parseName()`. + */ +static Path makeName(const Path & profile, GenerationNumber num) { - Path prefix = fmt("%1%-%2%", profile, num); - outLink = prefix + "-link"; + return fmt("%s-%s-link", profile, num); } -Path createGeneration(ref store, Path profile, StorePath outPath) +Path createGeneration(LocalFSStore & store, Path profile, StorePath outPath) { /* The new generation number should be higher than old the previous ones. */ @@ -79,7 +81,7 @@ Path createGeneration(ref store, Path profile, StorePath outPath) if (gens.size() > 0) { Generation last = gens.back(); - if (readLink(last.path) == store->printStorePath(outPath)) { + if (readLink(last.path) == store.printStorePath(outPath)) { /* We only create a new generation symlink if it differs from the last one. @@ -89,7 +91,7 @@ Path createGeneration(ref store, Path profile, StorePath outPath) return last.path; } - num = gens.back().number; + num = last.number; } else { num = 0; } @@ -100,9 +102,8 @@ Path createGeneration(ref store, Path profile, StorePath outPath) to the permanent roots (of which the GC would have a stale view). If we didn't do it this way, the GC might remove the user environment etc. we've just built. */ - Path generation; - makeName(profile, num + 1, generation); - store->addPermRoot(outPath, generation); + Path generation = makeName(profile, num + 1); + store.addPermRoot(outPath, generation); return generation; } @@ -117,12 +118,19 @@ static void removeFile(const Path & path) void deleteGeneration(const Path & profile, GenerationNumber gen) { - Path generation; - makeName(profile, gen, generation); + Path generation = makeName(profile, gen); removeFile(generation); } - +/** + * Delete a generation with dry-run mode. + * + * Like `deleteGeneration()` but: + * + * - We log what we are going to do. + * + * - We only actually delete if `dryRun` is false. + */ static void deleteGeneration2(const Path & profile, GenerationNumber gen, bool dryRun) { if (dryRun) @@ -150,27 +158,36 @@ void deleteGenerations(const Path & profile, const std::set & } } +/** + * Advanced the iterator until the given predicate `cond` returns `true`. + */ +static inline void iterDropUntil(Generations & gens, auto && i, auto && cond) +{ + for (; i != gens.rend() && !cond(*i); ++i); +} + void deleteGenerationsGreaterThan(const Path & profile, GenerationNumber max, bool dryRun) { + if (max == 0) + throw Error("Must keep at least one generation, otherwise the current one would be deleted"); + PathLocks lock; lockProfile(lock, profile); - bool fromCurGen = false; - auto [gens, curGen] = findGenerations(profile); - for (auto i = gens.rbegin(); i != gens.rend(); ++i) { - if (i->number == curGen) { - fromCurGen = true; - max--; - continue; - } - if (fromCurGen) { - if (max) { - max--; - continue; - } - deleteGeneration2(profile, i->number, dryRun); - } - } + auto [gens, _curGen] = findGenerations(profile); + auto curGen = _curGen; + + auto i = gens.rbegin(); + + // Find the current generation + iterDropUntil(gens, i, [&](auto & g) { return g.number == curGen; }); + + // Skip over `max` generations, preserving them + for (auto keep = 0; i != gens.rend() && keep < max; ++i, ++keep); + + // Delete the rest + for (; i != gens.rend(); ++i) + deleteGeneration2(profile, i->number, dryRun); } void deleteOldGenerations(const Path & profile, bool dryRun) @@ -193,23 +210,33 @@ void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun) auto [gens, curGen] = findGenerations(profile); - bool canDelete = false; - for (auto i = gens.rbegin(); i != gens.rend(); ++i) - if (canDelete) { - assert(i->creationTime < t); - if (i->number != curGen) - deleteGeneration2(profile, i->number, dryRun); - } else if (i->creationTime < t) { - /* We may now start deleting generations, but we don't - delete this generation yet, because this generation was - still the one that was active at the requested point in - time. */ - canDelete = true; - } + auto i = gens.rbegin(); + + // Predicate that the generation is older than the given time. + auto older = [&](auto & g) { return g.creationTime < t; }; + + // Find the first older generation, if one exists + iterDropUntil(gens, i, older); + + /* Take the previous generation + + We don't want delete this one yet because it + existed at the requested point in time, and + we want to be able to roll back to it. */ + if (i != gens.rend()) ++i; + + // Delete all previous generations (unless current). + for (; i != gens.rend(); ++i) { + /* Creating date and generations should be monotonic, so lower + numbered derivations should also be older. */ + assert(older(*i)); + if (i->number != curGen) + deleteGeneration2(profile, i->number, dryRun); + } } -void deleteGenerationsOlderThan(const Path & profile, std::string_view timeSpec, bool dryRun) +time_t parseOlderThanTimeSpec(std::string_view timeSpec) { if (timeSpec.empty() || timeSpec[timeSpec.size() - 1] != 'd') throw UsageError("invalid number of days specifier '%1%', expected something like '14d'", timeSpec); @@ -221,9 +248,7 @@ void deleteGenerationsOlderThan(const Path & profile, std::string_view timeSpec, if (!days || *days < 1) throw UsageError("invalid number of days specifier '%1%'", timeSpec); - time_t oldTime = curTime - *days * 24 * 3600; - - deleteGenerationsOlderThan(profile, oldTime, dryRun); + return curTime - *days * 24 * 3600; } diff --git a/src/libstore/profiles.hh b/src/libstore/profiles.hh index 4e1f42e83..193c0bf21 100644 --- a/src/libstore/profiles.hh +++ b/src/libstore/profiles.hh @@ -1,7 +1,11 @@ #pragma once -///@file +/** + * @file Implementation of Profiles. + * + * See the manual for additional information. + */ - #include "types.hh" +#include "types.hh" #include "pathlocks.hh" #include @@ -12,41 +16,166 @@ namespace nix { class StorePath; +/** + * A positive number identifying a generation for a given profile. + * + * Generation numbers are assigned sequentially. Each new generation is + * assigned 1 + the current highest generation number. + */ typedef uint64_t GenerationNumber; +/** + * A generation is a revision of a profile. + * + * Each generation is a mapping (key-value pair) from an identifier + * (`number`) to a store object (specified by `path`). + */ struct Generation { + /** + * The number of a generation is its unique identifier within the + * profile. + */ GenerationNumber number; + /** + * The store path identifies the store object that is the contents + * of the generation. + * + * These store paths / objects are not unique to the generation + * within a profile. Nix tries to ensure successive generations have + * distinct contents to avoid bloat, but nothing stops two + * non-adjacent generations from having the same contents. + * + * @todo Use `StorePath` instead of `Path`? + */ Path path; + + /** + * When the generation was created. This is extra metadata about the + * generation used to make garbage collecting old generations more + * convenient. + */ time_t creationTime; }; +/** + * All the generations of a profile + */ typedef std::list Generations; /** - * Returns the list of currently present generations for the specified - * profile, sorted by generation number. Also returns the number of - * the current generation. + * Find all generations for the given profile. + * + * @param profile A profile specified by its name and location combined + * into a path. E.g. if "foo" is the name of the profile, and "/bar/baz" + * is the directory it is in, then the path "/bar/baz/foo" would be the + * argument for this parameter. + * + * @return The pair of: + * + * - The list of currently present generations for the specified profile, + * sorted by ascending generation number. + * + * - The number of the current/active generation. + * + * Note that the current/active generation need not be the latest one. */ std::pair> findGenerations(Path profile); class LocalFSStore; -Path createGeneration(ref store, Path profile, StorePath outPath); +/** + * Create a new generation of the given profile + * + * If the previous generation (not the currently active one!) has a + * distinct store object, a fresh generation number is mapped to the + * given store object, referenced by path. Otherwise, the previous + * generation is assumed. + * + * The behavior of reusing existing generations like this makes this + * procedure idempotent. It also avoids clutter. + */ +Path createGeneration(LocalFSStore & store, Path profile, StorePath outPath); +/** + * Unconditionally delete a generation + * + * @param profile A profile specified by its name and location combined into a path. + * + * @param gen The generation number specifying exactly which generation + * to delete. + * + * Because there is no check of whether the generation to delete is + * active, this is somewhat unsafe. + * + * @todo Should we expose this at all? + */ void deleteGeneration(const Path & profile, GenerationNumber gen); +/** + * Delete the given set of generations. + * + * @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete. + * + * @param gensToDelete The generations to delete, specified by a set of + * numbers. + * + * @param dryRun Log what would be deleted instead of actually doing + * so. + * + * Trying to delete the currently active generation will fail, and cause + * no generations to be deleted. + */ void deleteGenerations(const Path & profile, const std::set & gensToDelete, bool dryRun); +/** + * Delete generations older than `max` passed the current generation. + * + * @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete. + * + * @param max How many generations to keep up to the current one. Must + * be at least 1 so we don't delete the current one. + * + * @param dryRun Log what would be deleted instead of actually doing + * so. + */ void deleteGenerationsGreaterThan(const Path & profile, GenerationNumber max, bool dryRun); +/** + * Delete all generations other than the current one + * + * @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete. + * + * @param dryRun Log what would be deleted instead of actually doing + * so. + */ void deleteOldGenerations(const Path & profile, bool dryRun); +/** + * Delete generations older than `t`, except for the most recent one + * older than `t`. + * + * @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete. + * + * @param dryRun Log what would be deleted instead of actually doing + * so. + */ void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun); -void deleteGenerationsOlderThan(const Path & profile, std::string_view timeSpec, bool dryRun); +/** + * Parse a temp spec intended for `deleteGenerationsOlderThan()`. + * + * Throws an exception if `timeSpec` fails to parse. + */ +time_t parseOlderThanTimeSpec(std::string_view timeSpec); +/** + * Smaller wrapper around `replaceSymlink` for replacing the current + * generation of a profile. Does not enforce proper structure. + * + * @todo Always use `switchGeneration()` instead, and delete this. + */ void switchLink(Path link, Path target); /** diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc index cb1f42e35..70af53b28 100644 --- a/src/nix-collect-garbage/nix-collect-garbage.cc +++ b/src/nix-collect-garbage/nix-collect-garbage.cc @@ -41,9 +41,10 @@ void removeOldGenerations(std::string dir) } if (link.find("link") != std::string::npos) { printInfo("removing old generations of profile %s", path); - if (deleteOlderThan != "") - deleteGenerationsOlderThan(path, deleteOlderThan, dryRun); - else + if (deleteOlderThan != "") { + auto t = parseOlderThanTimeSpec(deleteOlderThan); + deleteGenerationsOlderThan(path, t, dryRun); + } else deleteOldGenerations(path, dryRun); } } else if (type == DT_DIR) { diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 5e94f2d14..91b073b49 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -772,7 +772,7 @@ static void opSet(Globals & globals, Strings opFlags, Strings opArgs) debug("switching to new user environment"); Path generation = createGeneration( - ref(store2), + *store2, globals.profile, drv.queryOutPath()); switchLink(globals.profile, generation); @@ -1356,13 +1356,14 @@ static void opDeleteGenerations(Globals & globals, Strings opFlags, Strings opAr if (opArgs.size() == 1 && opArgs.front() == "old") { deleteOldGenerations(globals.profile, globals.dryRun); } else if (opArgs.size() == 1 && opArgs.front().find('d') != std::string::npos) { - deleteGenerationsOlderThan(globals.profile, opArgs.front(), globals.dryRun); + auto t = parseOlderThanTimeSpec(opArgs.front()); + deleteGenerationsOlderThan(globals.profile, t, globals.dryRun); } else if (opArgs.size() == 1 && opArgs.front().find('+') != std::string::npos) { if (opArgs.front().size() < 2) throw Error("invalid number of generations '%1%'", opArgs.front()); auto str_max = opArgs.front().substr(1); auto max = string2Int(str_max); - if (!max || *max == 0) + if (!max) throw Error("invalid number of generations to keep '%1%'", opArgs.front()); deleteGenerationsGreaterThan(globals.profile, *max, globals.dryRun); } else { diff --git a/src/nix-env/user-env.cc b/src/nix-env/user-env.cc index 9e916abc4..d12d70f33 100644 --- a/src/nix-env/user-env.cc +++ b/src/nix-env/user-env.cc @@ -158,7 +158,7 @@ bool createUserEnv(EvalState & state, DrvInfos & elems, } debug("switching to new user environment"); - Path generation = createGeneration(ref(store2), profile, topLevelOut); + Path generation = createGeneration(*store2, profile, topLevelOut); switchLink(profile, generation); } diff --git a/src/nix/profile.cc b/src/nix/profile.cc index 7cea616d2..f3b73f10d 100644 --- a/src/nix/profile.cc +++ b/src/nix/profile.cc @@ -806,9 +806,10 @@ struct CmdProfileWipeHistory : virtual StoreCommand, MixDefaultProfile, MixDryRu void run(ref store) override { - if (minAge) - deleteGenerationsOlderThan(*profile, *minAge, dryRun); - else + if (minAge) { + auto t = parseOlderThanTimeSpec(*minAge); + deleteGenerationsOlderThan(*profile, t, dryRun); + } else deleteOldGenerations(*profile, dryRun); } }; From 966e5dc991e849d2cd26d19266ae3f065ec69c96 Mon Sep 17 00:00:00 2001 From: Valentin Gagarin Date: Mon, 19 Jun 2023 10:39:19 +0200 Subject: [PATCH 15/16] CONTRIBUTING.md: add link to "good first issues" --- CONTRIBUTING.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 57a949906..4a72a8eac 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,7 +5,6 @@ We appreciate your support. Reading and following these guidelines will help us make the contribution process easy and effective for everyone involved. - ## Report a bug 1. Check on the [GitHub issue tracker](https://github.com/NixOS/nix/issues) if your bug was already reported. @@ -30,6 +29,8 @@ Check out the [security policy](https://github.com/NixOS/nix/security/policy). You can use [labels](https://github.com/NixOS/nix/labels) to filter for relevant topics. 2. Search for related issues that cover what you're going to work on. It could help to mention there that you will work on the issue. + + Issues labeled ["good first issue"](https://github.com/NixOS/nix/labels/good-first-issue) should be relatively easy to fix and are likely to get merged quickly. Pull requests addressing issues labeled ["idea approved"](https://github.com/NixOS/nix/labels/idea%20approved) are especially welcomed by maintainers and will receive prioritised review. 3. Check the [Nix reference manual](https://nixos.org/manual/nix/unstable/contributing/hacking.html) for information on building Nix and running its tests. From b6e74ea5a82e5c43d42bf8386236947408748a96 Mon Sep 17 00:00:00 2001 From: Valentin Gagarin Date: Mon, 19 Jun 2023 10:55:34 +0200 Subject: [PATCH 16/16] maintainers: add note on marking PRs as draft as discussed with maintainers team --- maintainers/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/maintainers/README.md b/maintainers/README.md index d13349438..0d520cb0c 100644 --- a/maintainers/README.md +++ b/maintainers/README.md @@ -117,6 +117,7 @@ Pull requests in this column are reviewed together during work meetings. This is both for spreading implementation knowledge and for establishing common values in code reviews. When the overall direction is agreed upon, even when further changes are required, the pull request is assigned to one team member. +If significant changes are requested or reviewers cannot come to a conclusion in reasonable time, the pull request is [marked as draft](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/changing-the-stage-of-a-pull-request#converting-a-pull-request-to-a-draft). ### Assigned