diff --git a/.version b/.version index 358c8e60e..588b4a3cc 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -2.24.9 +2.24.10 diff --git a/configure.ac b/configure.ac index 5c22ed176..dd33dbe11 100644 --- a/configure.ac +++ b/configure.ac @@ -89,9 +89,10 @@ AC_LANG_POP(C++) AC_CHECK_FUNCS([statvfs pipe2]) -# Check for lutimes, optionally used for changing the mtime of -# symlinks. -AC_CHECK_FUNCS([lutimes]) +# Check for lutimes and utimensat, optionally used for changing the +# mtime of symlinks. +AC_CHECK_DECLS([AT_SYMLINK_NOFOLLOW], [], [], [[#include ]]) +AC_CHECK_FUNCS([lutimes utimensat]) # Check whether the store optimiser can optimise symlinks. diff --git a/doc/manual/rl-next/filesystem-errors.md b/doc/manual/rl-next/filesystem-errors.md deleted file mode 100644 index faa9352b9..000000000 --- a/doc/manual/rl-next/filesystem-errors.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -synopsis: wrap filesystem exceptions more correctly -issues: [] -prs: [11378] ---- - - -With the switch to `std::filesystem` in different places, Nix started to throw `std::filesystem::filesystem_error` in many places instead of its own exceptions. - -This lead to no longer generating error traces, for example when listing a non-existing directory. - -This version catches these types of exception correctly and wrap them into Nix's own exeception type. - -Author: [**@Mic92**](https://github.com/Mic92) diff --git a/doc/manual/rl-next/verify-tls.md b/doc/manual/rl-next/verify-tls.md deleted file mode 100644 index afc689f46..000000000 --- a/doc/manual/rl-next/verify-tls.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -synopsis: "`` uses TLS verification" -prs: [11585] ---- - -Previously `` did not do TLS verification. This was because the Nix sandbox in the past did not have access to TLS certificates, and Nix checks the hash of the fetched file anyway. However, this can expose authentication data from `netrc` and URLs to man-in-the-middle attackers. In addition, Nix now in some cases (such as when using impure derivations) does *not* check the hash. Therefore we have now enabled TLS verification. This means that downloads by `` will now fail if you're fetching from a HTTPS server that does not have a valid certificate. - -`` is also known as the builtin derivation builder `builtin:fetchurl`. It's not to be confused with the evaluation-time function `builtins.fetchurl`, which was not affected by this issue. diff --git a/doc/manual/src/release-notes/rl-2.24.md b/doc/manual/src/release-notes/rl-2.24.md index 5bcc1d79c..38358d728 100644 --- a/doc/manual/src/release-notes/rl-2.24.md +++ b/doc/manual/src/release-notes/rl-2.24.md @@ -274,6 +274,21 @@ be configured using the `warn-large-path-threshold` setting, e.g. `--warn-large-path-threshold 100M`. +- Wrap filesystem exceptions more correctly [#11378](https://github.com/NixOS/nix/pull/11378) + + With the switch to `std::filesystem` in different places, Nix started to throw `std::filesystem::filesystem_error` in many places instead of its own exceptions. + + This led to no longer generating error traces, for example when listing a non-existing directory. + + This version catches these types of exception correctly and wraps them into Nix's own exeception type. + + Author: [**@Mic92**](https://github.com/Mic92) + +- `` uses TLS verification [#11585](https://github.com/NixOS/nix/pull/11585) + + Previously `` did not do TLS verification. This was because the Nix sandbox in the past did not have access to TLS certificates, and Nix checks the hash of the fetched file anyway. However, this can expose authentication data from `netrc` and URLs to man-in-the-middle attackers. In addition, Nix now in some cases (such as when using impure derivations) does *not* check the hash. Therefore we have now enabled TLS verification. This means that downloads by `` will now fail if you're fetching from a HTTPS server that does not have a valid certificate. + + `` is also known as the builtin derivation builder `builtin:fetchurl`. It's not to be confused with the evaluation-time function `builtins.fetchurl`, which was not affected by this issue. # Contributors diff --git a/mk/libraries.mk b/mk/libraries.mk index b99ba2782..a7848ba35 100644 --- a/mk/libraries.mk +++ b/mk/libraries.mk @@ -86,7 +86,9 @@ define build-library else ifndef HOST_DARWIN ifndef HOST_WINDOWS - $(1)_LDFLAGS += -Wl,-z,defs + ifndef HOST_OPENBSD + $(1)_LDFLAGS += -Wl,-z,defs + endif endif endif endif diff --git a/mk/platform.mk b/mk/platform.mk index 22c114a20..3c4fff780 100644 --- a/mk/platform.mk +++ b/mk/platform.mk @@ -21,6 +21,10 @@ ifdef HOST_OS HOST_NETBSD = 1 HOST_UNIX = 1 endif + ifeq ($(patsubst openbsd%,,$(HOST_KERNEL)),) + HOST_OPENBSD = 1 + HOST_UNIX = 1 + endif ifeq ($(HOST_KERNEL), linux) HOST_LINUX = 1 HOST_UNIX = 1 diff --git a/package.nix b/package.nix index a7c8923e8..e1b4aebb0 100644 --- a/package.nix +++ b/package.nix @@ -75,7 +75,9 @@ # # Temporarily disabled on Windows because the `GC_throw_bad_alloc` # symbol is missing during linking. -, enableGC ? !stdenv.hostPlatform.isWindows +# +# Disabled on OpenBSD because of missing `_data_start` symbol while linking +, enableGC ? !stdenv.hostPlatform.isWindows && !stdenv.hostPlatform.isOpenBSD # Whether to enable Markdown rendering in the Nix binary. , enableMarkdown ? !stdenv.hostPlatform.isWindows diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index 79ff6e7cd..6efb453ec 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -159,6 +159,27 @@ static Object peelToTreeOrBlob(git_object * obj) return peelObject(obj, GIT_OBJECT_TREE); } +static void initRepoAtomically(std::filesystem::path &path, bool bare) { + if (pathExists(path.string())) return; + + Path tmpDir = createTempDir(std::filesystem::path(path).parent_path()); + AutoDelete delTmpDir(tmpDir, true); + Repository tmpRepo; + + if (git_repository_init(Setter(tmpRepo), tmpDir.c_str(), bare)) + throw Error("creating Git repository %s: %s", path, git_error_last()->message); + try { + std::filesystem::rename(tmpDir, path); + } catch (std::filesystem::filesystem_error & e) { + if (e.code() == std::errc::file_exists) // Someone might race us to create the repository. + return; + else + throw SysError("moving temporary git repository from %s to %s", tmpDir, path); + } + // we successfully moved the repository, so the temporary directory no longer exists. + delTmpDir.cancel(); +} + struct GitRepoImpl : GitRepo, std::enable_shared_from_this { /** Location of the repository on disk. */ @@ -170,13 +191,10 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this { initLibGit2(); - if (pathExists(path.string())) { - if (git_repository_open(Setter(repo), path.string().c_str())) - throw Error("opening Git repository '%s': %s", path, git_error_last()->message); - } else { - if (git_repository_init(Setter(repo), path.string().c_str(), bare)) - throw Error("creating Git repository '%s': %s", path, git_error_last()->message); - } + initRepoAtomically(path, bare); + if (git_repository_open(Setter(repo), path.string().c_str())) + throw Error("opening Git repository '%s': %s", path, git_error_last()->message); + } operator git_repository * () @@ -837,8 +855,24 @@ struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink void pushBuilder(std::string name) { + const git_tree_entry * entry; + Tree prevTree = nullptr; + + if (!pendingDirs.empty() && + (entry = git_treebuilder_get(pendingDirs.back().builder.get(), name.c_str()))) + { + /* Clone a tree that we've already finished. This happens + if a tarball has directory entries that are not + contiguous. */ + if (git_tree_entry_type(entry) != GIT_OBJECT_TREE) + throw Error("parent of '%s' is not a directory", name); + + if (git_tree_entry_to_object((git_object * *) (git_tree * *) Setter(prevTree), *repo, entry)) + throw Error("looking up parent of '%s': %s", name, git_error_last()->message); + } + git_treebuilder * b; - if (git_treebuilder_new(&b, *repo, nullptr)) + if (git_treebuilder_new(&b, *repo, prevTree.get())) throw Error("creating a tree builder: %s", git_error_last()->message); pendingDirs.push_back({ .name = std::move(name), .builder = TreeBuilder(b) }); }; diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index dd4f3b780..52ba73f62 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -90,6 +90,7 @@ DownloadFileResult downloadFile( /* Cache metadata for all URLs in the redirect chain. */ for (auto & url : res.urls) { key.second.insert_or_assign("url", url); + assert(!res.urls.empty()); infoAttrs.insert_or_assign("url", *res.urls.rbegin()); getCache()->upsert(key, *store, infoAttrs, *storePath); } diff --git a/src/libstore/builtins.hh b/src/libstore/builtins.hh index 93558b49e..091946e01 100644 --- a/src/libstore/builtins.hh +++ b/src/libstore/builtins.hh @@ -9,7 +9,8 @@ namespace nix { void builtinFetchurl( const BasicDerivation & drv, const std::map & outputs, - const std::string & netrcData); + const std::string & netrcData, + const std::string & caFileData); void builtinUnpackChannel( const BasicDerivation & drv, diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc index f33060c33..90e58dfdb 100644 --- a/src/libstore/builtins/fetchurl.cc +++ b/src/libstore/builtins/fetchurl.cc @@ -9,7 +9,8 @@ namespace nix { void builtinFetchurl( const BasicDerivation & drv, const std::map & outputs, - const std::string & netrcData) + const std::string & netrcData, + const std::string & caFileData) { /* Make the host's netrc data available. Too bad curl requires this to be stored in a file. It would be nice if we could just @@ -19,6 +20,9 @@ void builtinFetchurl( writeFile(settings.netrcFile, netrcData, 0600); } + settings.caFile = "ca-certificates.crt"; + writeFile(settings.caFile, caFileData, 0600); + auto out = get(drv.outputs, "out"); if (!out) throw Error("'builtin:fetchurl' requires an 'out' output"); diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 5ea8b6f96..b84210805 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -754,12 +754,17 @@ struct curlFileTransfer : public FileTransfer S3Helper s3Helper(profile, region, scheme, endpoint); + Activity act(*logger, lvlTalkative, actFileTransfer, + fmt("downloading '%s'", request.uri), + {request.uri}, request.parentAct); + // FIXME: implement ETag auto s3Res = s3Helper.getObject(bucketName, key); FileTransferResult res; if (!s3Res.data) throw FileTransferError(NotFound, "S3 object '%s' does not exist", request.uri); res.data = std::move(*s3Res.data); + res.urls.push_back(request.uri); callback(std::move(res)); #else throw nix::Error("cannot download '%s' because Nix is not built with S3 support", request.uri); diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 21175b1eb..bcbf0b55e 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -9,6 +9,7 @@ #include "globals.hh" #include "compression.hh" #include "filetransfer.hh" +#include "signals.hh" #include #include @@ -117,6 +118,7 @@ class RetryStrategy : public Aws::Client::DefaultRetryStrategy { bool ShouldRetry(const Aws::Client::AWSError& error, long attemptedRetries) const override { + checkInterrupt(); auto retry = Aws::Client::DefaultRetryStrategy::ShouldRetry(error, attemptedRetries); if (retry) printError("AWS error '%s' (%s), will retry in %d ms", diff --git a/src/libstore/unix/build/local-derivation-goal.cc b/src/libstore/unix/build/local-derivation-goal.cc index c9a54bb0f..54ca69580 100644 --- a/src/libstore/unix/build/local-derivation-goal.cc +++ b/src/libstore/unix/build/local-derivation-goal.cc @@ -1746,13 +1746,20 @@ void LocalDerivationGoal::runChild() bool setUser = true; - /* Make the contents of netrc available to builtin:fetchurl - (which may run under a different uid and/or in a sandbox). */ + /* Make the contents of netrc and the CA certificate bundle + available to builtin:fetchurl (which may run under a + different uid and/or in a sandbox). */ std::string netrcData; - try { - if (drv->isBuiltin() && drv->builder == "builtin:fetchurl") - netrcData = readFile(settings.netrcFile); - } catch (SystemError &) { } + std::string caFileData; + if (drv->isBuiltin() && drv->builder == "builtin:fetchurl") { + try { + netrcData = readFile(settings.netrcFile); + } catch (SystemError &) { } + + try { + caFileData = readFile(settings.caFile); + } catch (SystemError &) { } + } #if __linux__ if (useChroot) { @@ -2191,7 +2198,7 @@ void LocalDerivationGoal::runChild() worker.store.printStorePath(scratchOutputs.at(e.first))); if (drv->builder == "builtin:fetchurl") - builtinFetchurl(*drv, outputs, netrcData); + builtinFetchurl(*drv, outputs, netrcData, caFileData); else if (drv->builder == "builtin:buildenv") builtinBuildenv(*drv, outputs); else if (drv->builder == "builtin:unpack-channel") diff --git a/src/libutil/file-system.cc b/src/libutil/file-system.cc index 060a806fb..04e4369fa 100644 --- a/src/libutil/file-system.cc +++ b/src/libutil/file-system.cc @@ -574,7 +574,28 @@ void setWriteTime( time_t modificationTime, std::optional optIsSymlink) { -#ifndef _WIN32 +#ifdef _WIN32 + // FIXME use `fs::last_write_time`. + // + // Would be nice to use std::filesystem unconditionally, but + // doesn't support access time just modification time. + // + // System clock vs File clock issues also make that annoying. + warn("Changing file times is not yet implemented on Windows, path is '%s'", path); +#elif HAVE_UTIMENSAT && HAVE_DECL_AT_SYMLINK_NOFOLLOW + struct timespec times[2] = { + { + .tv_sec = accessedTime, + .tv_nsec = 0, + }, + { + .tv_sec = modificationTime, + .tv_nsec = 0, + }, + }; + if (utimensat(AT_FDCWD, path.c_str(), times, AT_SYMLINK_NOFOLLOW) == -1) + throw SysError("changing modification time of '%s' (using `utimensat`)", path); +#else struct timeval times[2] = { { .tv_sec = accessedTime, @@ -585,42 +606,21 @@ void setWriteTime( .tv_usec = 0, }, }; -#endif - - auto nonSymlink = [&]{ - bool isSymlink = optIsSymlink - ? *optIsSymlink - : fs::is_symlink(path); - - if (!isSymlink) { -#ifdef _WIN32 - // FIXME use `fs::last_write_time`. - // - // Would be nice to use std::filesystem unconditionally, but - // doesn't support access time just modification time. - // - // System clock vs File clock issues also make that annoying. - warn("Changing file times is not yet implemented on Windows, path is '%s'", path); -#else - if (utimes(path.c_str(), times) == -1) { - - throw SysError("changing modification time of '%s' (not a symlink)", path); - } -#endif - } else { - throw Error("Cannot modification time of symlink '%s'", path); - } - }; - #if HAVE_LUTIMES - if (lutimes(path.c_str(), times) == -1) { - if (errno == ENOSYS) - nonSymlink(); - else - throw SysError("changing modification time of '%s'", path); - } + if (lutimes(path.c_str(), times) == -1) + throw SysError("changing modification time of '%s'", path); #else - nonSymlink(); + bool isSymlink = optIsSymlink + ? *optIsSymlink + : fs::is_symlink(path); + + if (!isSymlink) { + if (utimes(path.c_str(), times) == -1) + throw SysError("changing modification time of '%s' (not a symlink)", path); + } else { + throw Error("Cannot modification time of symlink '%s'", path); + } +#endif #endif } diff --git a/src/libutil/meson.build b/src/libutil/meson.build index 8552c4c9d..cba5a5288 100644 --- a/src/libutil/meson.build +++ b/src/libutil/meson.build @@ -41,6 +41,8 @@ check_funcs = [ # Optionally used to try to close more file descriptors (e.g. before # forking) on Unix. 'sysconf', + # Optionally used for changing the mtime of files and symlinks. + 'utimensat', ] foreach funcspec : check_funcs define_name = 'HAVE_' + funcspec.underscorify().to_upper() @@ -48,6 +50,8 @@ foreach funcspec : check_funcs configdata.set(define_name, define_value) endforeach +configdata.set('HAVE_DECL_AT_SYMLINK_NOFOLLOW', cxx.has_header_symbol('fcntl.h', 'AT_SYMLINK_NOFOLLOW').to_int()) + subdir('build-utils-meson/threads') if host_machine.system() == 'windows' diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index a5b9e1e54..5346641eb 100644 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -526,8 +526,6 @@ static void main_nix_build(int argc, char * * argv) // Set the environment. auto env = getEnv(); - auto tmp = getEnvNonEmpty("TMPDIR").value_or("/tmp"); - if (pure) { decltype(env) newEnv; for (auto & i : env) @@ -538,18 +536,16 @@ static void main_nix_build(int argc, char * * argv) env["__ETC_PROFILE_SOURCED"] = "1"; } - env["NIX_BUILD_TOP"] = env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = tmp; + env["NIX_BUILD_TOP"] = env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = tmpDir.path(); env["NIX_STORE"] = store->storeDir; env["NIX_BUILD_CORES"] = std::to_string(settings.buildCores); auto passAsFile = tokenizeString(getOr(drv.env, "passAsFile", "")); - bool keepTmp = false; int fileNr = 0; for (auto & var : drv.env) if (passAsFile.count(var.first)) { - keepTmp = true; auto fn = ".attr-" + std::to_string(fileNr++); Path p = (tmpDir.path() / fn).string(); writeFile(p, var.second); @@ -591,7 +587,6 @@ static void main_nix_build(int argc, char * * argv) env["NIX_ATTRS_SH_FILE"] = attrsSH; env["NIX_ATTRS_JSON_FILE"] = attrsJSON; - keepTmp = true; } } @@ -601,12 +596,10 @@ static void main_nix_build(int argc, char * * argv) lose the current $PATH directories. */ auto rcfile = (tmpDir.path() / "rc").string(); std::string rc = fmt( - R"(_nix_shell_clean_tmpdir() { command rm -rf %1%; }; )"s + - (keepTmp ? - "trap _nix_shell_clean_tmpdir EXIT; " - "exitHooks+=(_nix_shell_clean_tmpdir); " - "failureHooks+=(_nix_shell_clean_tmpdir); ": - "_nix_shell_clean_tmpdir; ") + + (R"(_nix_shell_clean_tmpdir() { command rm -rf %1%; };)"s + "trap _nix_shell_clean_tmpdir EXIT; " + "exitHooks+=(_nix_shell_clean_tmpdir); " + "failureHooks+=(_nix_shell_clean_tmpdir); ") + (pure ? "" : "[ -n \"$PS1\" ] && [ -e ~/.bashrc ] && source ~/.bashrc;") + "%2%" // always clear PATH. diff --git a/tests/functional/nix-shell.sh b/tests/functional/nix-shell.sh index b9625eb66..b14e3dc6a 100755 --- a/tests/functional/nix-shell.sh +++ b/tests/functional/nix-shell.sh @@ -31,6 +31,15 @@ output=$(nix-shell --pure --keep SELECTED_IMPURE_VAR "$shellDotNix" -A shellDrv [ "$output" = " - foo - bar - baz" ] +# test NIX_BUILD_TOP +testTmpDir=$(pwd)/nix-shell +mkdir -p "$testTmpDir" +output=$(TMPDIR="$testTmpDir" nix-shell --pure "$shellDotNix" -A shellDrv --run 'echo $NIX_BUILD_TOP') +[[ "$output" =~ ${testTmpDir}.* ]] || { + echo "expected $output =~ ${testTmpDir}.*" >&2 + exit 1 +} + # Test nix-shell on a .drv [[ $(nix-shell --pure $(nix-instantiate "$shellDotNix" -A shellDrv) --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"') = " - foo - bar - false" ]] diff --git a/tests/functional/tarball.sh b/tests/functional/tarball.sh index 4d8945625..a1e0f9cb0 100755 --- a/tests/functional/tarball.sh +++ b/tests/functional/tarball.sh @@ -100,3 +100,17 @@ chmod +x "$TEST_ROOT/tar_root/foo" tar cvf "$TEST_ROOT/tar.tar" -C "$TEST_ROOT/tar_root" . path="$(nix flake prefetch --refresh --json "tarball+file://$TEST_ROOT/tar.tar" | jq -r .storePath)" [[ $(cat "$path/foo") = bar ]] + +# Test a tarball with non-contiguous directory entries. +rm -rf "$TEST_ROOT/tar_root" +mkdir -p "$TEST_ROOT/tar_root/a/b" +echo foo > "$TEST_ROOT/tar_root/a/b/foo" +echo bla > "$TEST_ROOT/tar_root/bla" +tar cvf "$TEST_ROOT/tar.tar" -C "$TEST_ROOT/tar_root" . +echo abc > "$TEST_ROOT/tar_root/bla" +echo xyzzy > "$TEST_ROOT/tar_root/a/b/xyzzy" +tar rvf "$TEST_ROOT/tar.tar" -C "$TEST_ROOT/tar_root" ./a/b/xyzzy ./bla +path="$(nix flake prefetch --refresh --json "tarball+file://$TEST_ROOT/tar.tar" | jq -r .storePath)" +[[ $(cat "$path/a/b/xyzzy") = xyzzy ]] +[[ $(cat "$path/a/b/foo") = foo ]] +[[ $(cat "$path/bla") = abc ]] diff --git a/tests/nixos/default.nix b/tests/nixos/default.nix index 313dc2f3c..e79bb59b8 100644 --- a/tests/nixos/default.nix +++ b/tests/nixos/default.nix @@ -148,4 +148,6 @@ in user-sandboxing = runNixOSTestFor "x86_64-linux" ./user-sandboxing; fetchurl = runNixOSTestFor "x86_64-linux" ./fetchurl.nix; + + s3-binary-cache-store = runNixOSTestFor "x86_64-linux" ./s3-binary-cache-store.nix; } diff --git a/tests/nixos/fetchurl.nix b/tests/nixos/fetchurl.nix index 476f779bc..243c0cacc 100644 --- a/tests/nixos/fetchurl.nix +++ b/tests/nixos/fetchurl.nix @@ -1,7 +1,7 @@ # Test whether builtin:fetchurl properly performs TLS certificate # checks on HTTPS servers. -{ lib, config, pkgs, ... }: +{ pkgs, ... }: let @@ -25,7 +25,7 @@ in name = "nss-preload"; nodes = { - machine = { lib, pkgs, ... }: { + machine = { pkgs, ... }: { services.nginx = { enable = true; @@ -60,13 +60,16 @@ in }; }; - testScript = { nodes, ... }: '' + testScript = '' machine.wait_for_unit("nginx") machine.wait_for_open_port(443) out = machine.succeed("curl https://good/index.html") assert out == "hello world\n" + out = machine.succeed("cat ${badCert}/cert.pem > /tmp/cafile.pem; curl --cacert /tmp/cafile.pem https://bad/index.html") + assert out == "foobar\n" + # Fetching from a server with a trusted cert should work. machine.succeed("nix build --no-substitute --expr 'import { url = \"https://good/index.html\"; hash = \"sha256-qUiQTy8PR5uPgZdpSzAYSw0u0cHNKh7A+4XSmaGSpEc=\"; }'") @@ -74,5 +77,8 @@ in err = machine.fail("nix build --no-substitute --expr 'import { url = \"https://bad/index.html\"; hash = \"sha256-rsBwZF/lPuOzdjBZN2E08FjMM3JHyXit0Xi2zN+wAZ8=\"; }' 2>&1") print(err) assert "SSL certificate problem: self-signed certificate" in err + + # Fetching from a server with a trusted cert should work via environment variable override. + machine.succeed("NIX_SSL_CERT_FILE=/tmp/cafile.pem nix build --no-substitute --expr 'import { url = \"https://bad/index.html\"; hash = \"sha256-rsBwZF/lPuOzdjBZN2E08FjMM3JHyXit0Xi2zN+wAZ8=\"; }'") ''; } diff --git a/tests/nixos/nix-copy-closure.nix b/tests/nixos/nix-copy-closure.nix index 66cbfb033..b9daa0a1f 100644 --- a/tests/nixos/nix-copy-closure.nix +++ b/tests/nixos/nix-copy-closure.nix @@ -1,6 +1,6 @@ # Test ‘nix-copy-closure’. -{ lib, config, nixpkgs, hostPkgs, ... }: +{ lib, config, nixpkgs, ... }: let pkgs = config.nodes.client.nixpkgs.pkgs; diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix new file mode 100644 index 000000000..6ae2e3572 --- /dev/null +++ b/tests/nixos/s3-binary-cache-store.nix @@ -0,0 +1,66 @@ +{ lib, config, nixpkgs, ... }: + +let + pkgs = config.nodes.client.nixpkgs.pkgs; + + pkgA = pkgs.cowsay; + + accessKey = "BKIKJAA5BMMU2RHO6IBB"; + secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12"; + env = "AWS_ACCESS_KEY_ID=${accessKey} AWS_SECRET_ACCESS_KEY=${secretKey}"; + + storeUrl = "s3://my-cache?endpoint=http://server:9000®ion=eu-west-1"; + +in { + name = "nix-copy-closure"; + + nodes = + { server = + { config, lib, pkgs, ... }: + { virtualisation.writableStore = true; + virtualisation.additionalPaths = [ pkgA ]; + environment.systemPackages = [ pkgs.minio-client ]; + nix.extraOptions = "experimental-features = nix-command"; + services.minio = { + enable = true; + region = "eu-west-1"; + rootCredentialsFile = pkgs.writeText "minio-credentials-full" '' + MINIO_ROOT_USER=${accessKey} + MINIO_ROOT_PASSWORD=${secretKey} + ''; + }; + networking.firewall.allowedTCPPorts = [ 9000 ]; + }; + + client = + { config, pkgs, ... }: + { virtualisation.writableStore = true; + nix.extraOptions = "experimental-features = nix-command"; + }; + }; + + testScript = { nodes }: '' + # fmt: off + start_all() + + # Create a binary cache. + server.wait_for_unit("minio") + + server.succeed("mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4") + server.succeed("mc mb minio/my-cache") + + server.succeed("${env} nix copy --to '${storeUrl}' ${pkgA}") + + # Test fetchurl on s3:// URLs while we're at it. + client.succeed("${env} nix eval --impure --expr 'builtins.fetchurl { name = \"foo\"; url = \"s3://my-cache/nix-cache-info?endpoint=http://server:9000®ion=eu-west-1\"; }'") + + # Copy a package from the binary cache. + client.fail("nix path-info ${pkgA}") + + client.succeed("${env} nix store info --store '${storeUrl}' >&2") + + client.succeed("${env} nix copy --no-check-sigs --from '${storeUrl}' ${pkgA}") + + client.succeed("nix path-info ${pkgA}") + ''; +}