1
0
Fork 0
mirror of https://github.com/NixOS/nix synced 2025-07-03 06:11:46 +02:00

Merge pull request #39 from DeterminateSystems/sync-2.24.9

Sync with upstream 2.24.9
This commit is contained in:
Eelco Dolstra 2024-10-30 17:10:17 +01:00 committed by GitHub
commit a2994430eb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
72 changed files with 825 additions and 501 deletions

View file

@ -1,32 +0,0 @@
name: Backport
on:
pull_request_target:
types: [closed, labeled]
permissions:
contents: read
jobs:
backport:
name: Backport Pull Request
permissions:
# for zeebe-io/backport-action
contents: write
pull-requests: write
if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name))
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
# required to find all branches
fetch-depth: 0
- name: Create backport PRs
# should be kept in sync with `version`
uses: zeebe-io/backport-action@v3.0.2
with:
# Config README: https://github.com/zeebe-io/backport-action#backport-action
github_token: ${{ secrets.GITHUB_TOKEN }}
github_workspace: ${{ github.workspace }}
pull_description: |-
Automatic backport to `${target_branch}`, triggered by a label in #${pull_number}.
# should be kept in sync with `uses`
version: v0.0.5

92
.mergify.yml Normal file
View file

@ -0,0 +1,92 @@
queue_rules:
- name: default
# all required tests need to go here
merge_conditions:
- check-success=installer
- check-success=installer_test (macos-latest)
- check-success=installer_test (ubuntu-latest)
- check-success=tests (macos-latest)
- check-success=tests (ubuntu-latest)
- check-success=vm_tests
merge_method: rebase
batch_size: 5
pull_request_rules:
- name: merge using the merge queue
conditions:
- base=master
- label~=merge-queue|dependencies
actions:
queue: {}
# The rules below will first create backport pull requests and put those in a merge queue.
- name: backport patches to 2.18
conditions:
- label=backport 2.18-maintenance
actions:
backport:
branches:
- 2.18-maintenance
labels:
- merge-queue
- name: backport patches to 2.19
conditions:
- label=backport 2.19-maintenance
actions:
backport:
branches:
- 2.19-maintenance
labels:
- merge-queue
- name: backport patches to 2.20
conditions:
- label=backport 2.20-maintenance
actions:
backport:
branches:
- 2.20-maintenance
labels:
- merge-queue
- name: backport patches to 2.21
conditions:
- label=backport 2.21-maintenance
actions:
backport:
branches:
- 2.21-maintenance
labels:
- merge-queue
- name: backport patches to 2.22
conditions:
- label=backport 2.22-maintenance
actions:
backport:
branches:
- 2.22-maintenance
labels:
- merge-queue
- name: backport patches to 2.23
conditions:
- label=backport 2.23-maintenance
actions:
backport:
branches:
- 2.23-maintenance
labels:
- merge-queue
- name: backport patches to 2.24
conditions:
- label=backport 2.24-maintenance
actions:
backport:
branches:
- "2.24-maintenance"
labels:
- merge-queue

View file

@ -1 +1 @@
2.24.2
2.24.10

View file

@ -89,9 +89,10 @@ AC_LANG_POP(C++)
AC_CHECK_FUNCS([statvfs pipe2])
# Check for lutimes, optionally used for changing the mtime of
# symlinks.
AC_CHECK_FUNCS([lutimes])
# Check for lutimes and utimensat, optionally used for changing the
# mtime of symlinks.
AC_CHECK_DECLS([AT_SYMLINK_NOFOLLOW], [], [], [[#include <fcntl.h>]])
AC_CHECK_FUNCS([lutimes utimensat])
# Check whether the store optimiser can optimise symlinks.

View file

@ -14,6 +14,14 @@ This option requires either:
* Linux running systemd, with SELinux disabled
* MacOS
> **Updating to macOS 15 Sequoia**
>
> If you recently updated to macOS 15 Sequoia and are getting
> ```console
> error: the user '_nixbld1' in the group 'nixbld' does not exist
> ```
> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling.
```console
$ bash <(curl -L https://nixos.org/nix/install) --daemon
```

View file

@ -1,5 +1,13 @@
# Installing a Binary Distribution
> **Updating to macOS 15 Sequoia**
>
> If you recently updated to macOS 15 Sequoia and are getting
> ```console
> error: the user '_nixbld1' in the group 'nixbld' does not exist
> ```
> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling.
To install the latest version Nix, run the following command:
```console

View file

@ -39,8 +39,6 @@
`pkgconfig` and the Boehm garbage collector, and pass the flag
`--enable-gc` to `configure`.
For `bdw-gc` <= 8.2.4 Nix needs a [small patch](https://github.com/NixOS/nix/blob/ac4d2e7b857acdfeac35ac8a592bdecee2d29838/boehmgc-traceable_allocator-public.diff) to be applied.
- The `boost` library of version 1.66.0 or higher. It can be obtained
from the official web site <https://www.boost.org/>.

View file

@ -43,6 +43,14 @@ which you may remove.
### macOS
> **Updating to macOS 15 Sequoia**
>
> If you recently updated to macOS 15 Sequoia and are getting
> ```console
> error: the user '_nixbld1' in the group 'nixbld' does not exist
> ```
> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling.
1. If system-wide shell initialisation files haven't been altered since installing Nix, use the backups made by the installer:
```console

View file

@ -274,6 +274,21 @@
be configured using the `warn-large-path-threshold` setting,
e.g. `--warn-large-path-threshold 100M`.
- Wrap filesystem exceptions more correctly [#11378](https://github.com/NixOS/nix/pull/11378)
With the switch to `std::filesystem` in different places, Nix started to throw `std::filesystem::filesystem_error` in many places instead of its own exceptions.
This led to no longer generating error traces, for example when listing a non-existing directory.
This version catches these types of exception correctly and wraps them into Nix's own exeception type.
Author: [**@Mic92**](https://github.com/Mic92)
- `<nix/fetchurl.nix>` uses TLS verification [#11585](https://github.com/NixOS/nix/pull/11585)
Previously `<nix/fetchurl.nix>` did not do TLS verification. This was because the Nix sandbox in the past did not have access to TLS certificates, and Nix checks the hash of the fetched file anyway. However, this can expose authentication data from `netrc` and URLs to man-in-the-middle attackers. In addition, Nix now in some cases (such as when using impure derivations) does *not* check the hash. Therefore we have now enabled TLS verification. This means that downloads by `<nix/fetchurl.nix>` will now fail if you're fetching from a HTTPS server that does not have a valid certificate.
`<nix/fetchurl.nix>` is also known as the builtin derivation builder `builtin:fetchurl`. It's not to be confused with the evaluation-time function `builtins.fetchurl`, which was not affected by this issue.
# Contributors

6
flake.lock generated
View file

@ -79,11 +79,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1721548954,
"narHash": "sha256-7cCC8+Tdq1+3OPyc3+gVo9dzUNkNIQfwSDJ2HSi2u3o=",
"lastModified": 1723688146,
"narHash": "sha256-sqLwJcHYeWLOeP/XoLwAtYjr01TISlkOfz+NG82pbdg=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "63d37ccd2d178d54e7fb691d7ec76000740ea24a",
"rev": "c3d4ac725177c030b1e289015989da2ad9d56af0",
"type": "github"
},
"original": {

View file

@ -86,7 +86,9 @@ define build-library
else
ifndef HOST_DARWIN
ifndef HOST_WINDOWS
$(1)_LDFLAGS += -Wl,-z,defs
ifndef HOST_OPENBSD
$(1)_LDFLAGS += -Wl,-z,defs
endif
endif
endif
endif

View file

@ -21,6 +21,10 @@ ifdef HOST_OS
HOST_NETBSD = 1
HOST_UNIX = 1
endif
ifeq ($(patsubst openbsd%,,$(HOST_KERNEL)),)
HOST_OPENBSD = 1
HOST_UNIX = 1
endif
ifeq ($(HOST_KERNEL), linux)
HOST_LINUX = 1
HOST_UNIX = 1

View file

@ -75,7 +75,9 @@
#
# Temporarily disabled on Windows because the `GC_throw_bad_alloc`
# symbol is missing during linking.
, enableGC ? !stdenv.hostPlatform.isWindows
#
# Disabled on OpenBSD because of missing `_data_start` symbol while linking
, enableGC ? !stdenv.hostPlatform.isWindows && !stdenv.hostPlatform.isOpenBSD
# Whether to enable Markdown rendering in the Nix binary.
, enableMarkdown ? !stdenv.hostPlatform.isWindows

View file

@ -1,6 +1,6 @@
#!/usr/bin/env bash
((NEW_NIX_FIRST_BUILD_UID=301))
((NEW_NIX_FIRST_BUILD_UID=351))
id_available(){
dscl . list /Users UniqueID | grep -E '\b'"$1"'\b' >/dev/null

View file

@ -4,7 +4,17 @@ set -eu
set -o pipefail
# System specific settings
export NIX_FIRST_BUILD_UID="${NIX_FIRST_BUILD_UID:-301}"
# Notes:
# - up to macOS Big Sur we used the same GID/UIDs as Linux (30000:30001-32)
# - we changed UID to 301 because Big Sur updates failed into recovery mode
# we're targeting the 200-400 UID range for role users mentioned in the
# usage note for sysadminctl
# - we changed UID to 351 because Sequoia now uses UIDs 300-304 for its own
# daemon users
# - we changed GID to 350 alongside above just because it hides the nixbld
# group from the Users & Groups settings panel :)
export NIX_FIRST_BUILD_UID="${NIX_FIRST_BUILD_UID:-351}"
export NIX_BUILD_GROUP_ID="${NIX_BUILD_GROUP_ID:-350}"
export NIX_BUILD_USER_NAME_TEMPLATE="_nixbld%d"
readonly NIX_DAEMON_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist

View file

@ -23,10 +23,10 @@ readonly RED='\033[31m'
# installer allows overriding build user count to speed up installation
# as creating each user takes non-trivial amount of time on macos
readonly NIX_USER_COUNT=${NIX_USER_COUNT:-32}
readonly NIX_BUILD_GROUP_ID="${NIX_BUILD_GROUP_ID:-30000}"
readonly NIX_BUILD_GROUP_NAME="nixbld"
# each system specific installer must set these:
# NIX_FIRST_BUILD_UID
# NIX_BUILD_GROUP_ID
# NIX_BUILD_USER_NAME_TEMPLATE
# Please don't change this. We don't support it, because the
# default shell profile that comes with Nix doesn't support it.
@ -530,9 +530,7 @@ It seems the build group $NIX_BUILD_GROUP_NAME already exists, but
with the UID $primary_group_id. This script can't really handle
that right now, so I'm going to give up.
You can fix this by editing this script and changing the
NIX_BUILD_GROUP_ID variable near the top to from $NIX_BUILD_GROUP_ID
to $primary_group_id and re-run.
You can export NIX_BUILD_GROUP_ID=$primary_group_id and re-run.
EOF
else
row " Exists" "Yes"

View file

@ -5,6 +5,7 @@ set -o pipefail
# System specific settings
export NIX_FIRST_BUILD_UID="${NIX_FIRST_BUILD_UID:-30001}"
export NIX_BUILD_GROUP_ID="${NIX_BUILD_GROUP_ID:-30000}"
export NIX_BUILD_USER_NAME_TEMPLATE="nixbld%d"
readonly SERVICE_SRC=/lib/systemd/system/nix-daemon.service

View file

@ -170,7 +170,9 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s, const Path * bas
{
if (EvalSettings::isPseudoUrl(s)) {
auto accessor = fetchers::downloadTarball(
EvalSettings::resolvePseudoUrl(s)).accessor;
state.store,
state.fetchSettings,
EvalSettings::resolvePseudoUrl(s));
auto storePath = fetchToStore(*state.store, SourcePath(accessor), FetchMode::Copy);
return state.rootPath(CanonPath(state.store->toRealPath(storePath)));
}

View file

@ -43,20 +43,6 @@ std::vector<std::string> InstallableFlake::getActualAttrPaths()
return res;
}
Value * InstallableFlake::getFlakeOutputs(EvalState & state, const flake::LockedFlake & lockedFlake)
{
auto vFlake = state.allocValue();
callFlake(state, lockedFlake, *vFlake);
auto aOutputs = vFlake->attrs()->get(state.symbols.create("outputs"));
assert(aOutputs);
state.forceValue(*aOutputs->value, aOutputs->value->determinePos(noPos));
return aOutputs->value;
}
static std::string showAttrPaths(const std::vector<std::string> & paths)
{
std::string s;

View file

@ -53,8 +53,6 @@ struct InstallableFlake : InstallableValue
std::vector<std::string> getActualAttrPaths();
Value * getFlakeOutputs(EvalState & state, const flake::LockedFlake & lockedFlake);
DerivedPathsWithInfo toDerivedPaths() override;
std::pair<Value *, PosIdx> toValue(EvalState & state) override;

View file

@ -32,122 +32,6 @@ static void * oomHandler(size_t requested)
throw std::bad_alloc();
}
class BoehmGCStackAllocator : public StackAllocator
{
boost::coroutines2::protected_fixedsize_stack stack{
// We allocate 8 MB, the default max stack size on NixOS.
// A smaller stack might be quicker to allocate but reduces the stack
// depth available for source filter expressions etc.
std::max(boost::context::stack_traits::default_size(), static_cast<std::size_t>(8 * 1024 * 1024))};
// This is specific to boost::coroutines2::protected_fixedsize_stack.
// The stack protection page is included in sctx.size, so we have to
// subtract one page size from the stack size.
std::size_t pfss_usable_stack_size(boost::context::stack_context & sctx)
{
return sctx.size - boost::context::stack_traits::page_size();
}
public:
boost::context::stack_context allocate() override
{
auto sctx = stack.allocate();
// Stacks generally start at a high address and grow to lower addresses.
// Architectures that do the opposite are rare; in fact so rare that
// boost_routine does not implement it.
// So we subtract the stack size.
GC_add_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
return sctx;
}
void deallocate(boost::context::stack_context sctx) override
{
GC_remove_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
stack.deallocate(sctx);
}
};
static BoehmGCStackAllocator boehmGCStackAllocator;
/**
* When a thread goes into a coroutine, we lose its original sp until
* control flow returns to the thread.
* While in the coroutine, the sp points outside the thread stack,
* so we can detect this and push the entire thread stack instead,
* as an approximation.
* The coroutine's stack is covered by `BoehmGCStackAllocator`.
* This is not an optimal solution, because the garbage is scanned when a
* coroutine is active, for both the coroutine and the original thread stack.
* However, the implementation is quite lean, and usually we don't have active
* coroutines during evaluation, so this is acceptable.
*/
void fixupBoehmStackPointer(void ** sp_ptr, void * _pthread_id)
{
void *& sp = *sp_ptr;
auto pthread_id = reinterpret_cast<pthread_t>(_pthread_id);
# ifndef __APPLE__
pthread_attr_t pattr;
# endif
size_t osStackSize;
// The low address of the stack, which grows down.
void * osStackLimit;
void * osStackBase;
# ifdef __APPLE__
osStackSize = pthread_get_stacksize_np(pthread_id);
osStackLimit = pthread_get_stackaddr_np(pthread_id);
# else
if (pthread_attr_init(&pattr)) {
throw Error("fixupBoehmStackPointer: pthread_attr_init failed");
}
# ifdef HAVE_PTHREAD_GETATTR_NP
if (pthread_getattr_np(pthread_id, &pattr)) {
throw Error("fixupBoehmStackPointer: pthread_getattr_np failed");
}
# elif HAVE_PTHREAD_ATTR_GET_NP
if (!pthread_attr_init(&pattr)) {
throw Error("fixupBoehmStackPointer: pthread_attr_init failed");
}
if (!pthread_attr_get_np(pthread_id, &pattr)) {
throw Error("fixupBoehmStackPointer: pthread_attr_get_np failed");
}
# else
# error "Need one of `pthread_attr_get_np` or `pthread_getattr_np`"
# endif
if (pthread_attr_getstack(&pattr, &osStackLimit, &osStackSize)) {
throw Error("fixupBoehmStackPointer: pthread_attr_getstack failed");
}
if (pthread_attr_destroy(&pattr)) {
throw Error("fixupBoehmStackPointer: pthread_attr_destroy failed");
}
# endif
osStackBase = (char *) osStackLimit + osStackSize;
// NOTE: We assume the stack grows down, as it does on all architectures we support.
// Architectures that grow the stack up are rare.
if (sp >= osStackBase || sp < osStackLimit) { // sp is outside the os stack
sp = osStackLimit;
}
}
/* Disable GC while this object lives. Used by CoroutineContext.
*
* Boehm keeps a count of GC_disable() and GC_enable() calls,
* and only enables GC when the count matches.
*/
class BoehmDisableGC
{
public:
BoehmDisableGC()
{
GC_disable();
};
~BoehmDisableGC()
{
GC_enable();
};
};
static inline void initGCReal()
{
/* Initialise the Boehm garbage collector. */
@ -168,24 +52,6 @@ static inline void initGCReal()
GC_set_oom_fn(oomHandler);
StackAllocator::defaultAllocator = &boehmGCStackAllocator;
// TODO: Remove __APPLE__ condition.
// Comment suggests an implementation that works on darwin and windows
// https://github.com/ivmai/bdwgc/issues/362#issuecomment-1936672196
# if GC_VERSION_MAJOR >= 8 && GC_VERSION_MINOR >= 2 && GC_VERSION_MICRO >= 4 && !defined(__APPLE__)
GC_set_sp_corrector(&fixupBoehmStackPointer);
if (!GC_get_sp_corrector()) {
printTalkative("BoehmGC on this platform does not support sp_corrector; will disable GC inside coroutines");
/* Used to disable GC when entering coroutines on macOS */
create_coro_gc_hook = []() -> std::shared_ptr<void> { return std::make_shared<BoehmDisableGC>(); };
}
# else
# warning \
"BoehmGC version does not support GC while coroutine exists. GC will be disabled inside coroutines. Consider updating bdw-gc to 8.2.4 or later."
# endif
/* Set the initial heap size to something fairly big (25% of
physical RAM, up to a maximum of 384 MiB) so that in most cases
we don't need to garbage collect at all. (Collection has a

View file

@ -3083,7 +3083,9 @@ std::optional<std::string> EvalState::resolveLookupPathPath(const LookupPath::Pa
if (EvalSettings::isPseudoUrl(value)) {
try {
auto accessor = fetchers::downloadTarball(
EvalSettings::resolvePseudoUrl(value)).accessor;
store,
fetchSettings,
EvalSettings::resolvePseudoUrl(value));
auto storePath = fetchToStore(*store, SourcePath(accessor), FetchMode::Copy);
return finish(store->toRealPath(storePath));
} catch (Error & e) {

View file

@ -3136,7 +3136,11 @@ static void prim_zipAttrsWith(EvalState & state, const PosIdx pos, Value * * arg
std::optional<ListBuilder> list;
};
#if HAVE_BOEHMGC
std::map<Symbol, Item, std::less<Symbol>, traceable_allocator<std::pair<const Symbol, Item>>> attrsSeen;
#else
std::map<Symbol, Item> attrsSeen;
#endif
state.forceFunction(*args[0], pos, "while evaluating the first argument passed to builtins.zipAttrsWith");
state.forceList(*args[1], pos, "while evaluating the second argument passed to builtins.zipAttrsWith");

View file

@ -495,7 +495,11 @@ static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v
// https://github.com/NixOS/nix/issues/4313
auto storePath =
unpack
? fetchToStore(*state.store, fetchers::downloadTarball(*url).accessor, FetchMode::Copy, name)
? fetchToStore(
*state.store,
fetchers::downloadTarball(state.store, state.fetchSettings, *url),
FetchMode::Copy,
name)
: fetchers::downloadFile(state.store, *url, name).storePath;
if (expectedHash) {

View file

@ -159,6 +159,27 @@ static Object peelToTreeOrBlob(git_object * obj)
return peelObject<Object>(obj, GIT_OBJECT_TREE);
}
static void initRepoAtomically(std::filesystem::path &path, bool bare) {
if (pathExists(path.string())) return;
Path tmpDir = createTempDir(std::filesystem::path(path).parent_path());
AutoDelete delTmpDir(tmpDir, true);
Repository tmpRepo;
if (git_repository_init(Setter(tmpRepo), tmpDir.c_str(), bare))
throw Error("creating Git repository %s: %s", path, git_error_last()->message);
try {
std::filesystem::rename(tmpDir, path);
} catch (std::filesystem::filesystem_error & e) {
if (e.code() == std::errc::file_exists) // Someone might race us to create the repository.
return;
else
throw SysError("moving temporary git repository from %s to %s", tmpDir, path);
}
// we successfully moved the repository, so the temporary directory no longer exists.
delTmpDir.cancel();
}
struct GitRepoImpl : GitRepo, std::enable_shared_from_this<GitRepoImpl>
{
/** Location of the repository on disk. */
@ -170,13 +191,10 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this<GitRepoImpl>
{
initLibGit2();
if (pathExists(path.string())) {
if (git_repository_open(Setter(repo), path.string().c_str()))
throw Error("opening Git repository '%s': %s", path, git_error_last()->message);
} else {
if (git_repository_init(Setter(repo), path.string().c_str(), bare))
throw Error("creating Git repository '%s': %s", path, git_error_last()->message);
}
initRepoAtomically(path, bare);
if (git_repository_open(Setter(repo), path.string().c_str()))
throw Error("opening Git repository '%s': %s", path, git_error_last()->message);
}
operator git_repository * ()
@ -460,7 +478,13 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this<GitRepoImpl>
std::string re = R"(Good "git" signature for \* with .* key SHA256:[)";
for (const fetchers::PublicKey & k : publicKeys){
// Calculate sha256 fingerprint from public key and escape the regex symbol '+' to match the key literally
auto fingerprint = trim(hashString(HashAlgorithm::SHA256, base64Decode(k.key)).to_string(nix::HashFormat::Base64, false), "=");
std::string keyDecoded;
try {
keyDecoded = base64Decode(k.key);
} catch (Error & e) {
e.addTrace({}, "while decoding public key '%s' used for git signature", k.key);
}
auto fingerprint = trim(hashString(HashAlgorithm::SHA256, keyDecoded).to_string(nix::HashFormat::Base64, false), "=");
auto escaped_fingerprint = std::regex_replace(fingerprint, std::regex("\\+"), "\\+" );
re += "(" + escaped_fingerprint + ")";
}
@ -601,12 +625,16 @@ struct GitSourceAccessor : SourceAccessor
return readBlob(path, true);
}
Hash getSubmoduleRev(const CanonPath & path)
/**
* If `path` exists and is a submodule, return its
* revision. Otherwise return nothing.
*/
std::optional<Hash> getSubmoduleRev(const CanonPath & path)
{
auto entry = need(path);
auto entry = lookup(path);
if (git_tree_entry_type(entry) != GIT_OBJECT_COMMIT)
throw Error("'%s' is not a submodule", showPath(path));
if (!entry || git_tree_entry_type(entry) != GIT_OBJECT_COMMIT)
return std::nullopt;
return toHash(*git_tree_entry_id(entry));
}
@ -827,8 +855,24 @@ struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink
void pushBuilder(std::string name)
{
const git_tree_entry * entry;
Tree prevTree = nullptr;
if (!pendingDirs.empty() &&
(entry = git_treebuilder_get(pendingDirs.back().builder.get(), name.c_str())))
{
/* Clone a tree that we've already finished. This happens
if a tarball has directory entries that are not
contiguous. */
if (git_tree_entry_type(entry) != GIT_OBJECT_TREE)
throw Error("parent of '%s' is not a directory", name);
if (git_tree_entry_to_object((git_object * *) (git_tree * *) Setter(prevTree), *repo, entry))
throw Error("looking up parent of '%s': %s", name, git_error_last()->message);
}
git_treebuilder * b;
if (git_treebuilder_new(&b, *repo, nullptr))
if (git_treebuilder_new(&b, *repo, prevTree.get()))
throw Error("creating a tree builder: %s", git_error_last()->message);
pendingDirs.push_back({ .name = std::move(name), .builder = TreeBuilder(b) });
};
@ -1074,8 +1118,10 @@ std::vector<std::tuple<GitRepoImpl::Submodule, Hash>> GitRepoImpl::getSubmodules
auto rawAccessor = getRawAccessor(rev);
for (auto & submodule : parseSubmodules(pathTemp)) {
auto rev = rawAccessor->getSubmoduleRev(submodule.path);
result.push_back({std::move(submodule), rev});
/* Filter out .gitmodules entries that don't exist or are not
submodules. */
if (auto rev = rawAccessor->getSubmoduleRev(submodule.path))
result.push_back({std::move(submodule), *rev});
}
return result;

View file

@ -584,9 +584,10 @@ struct GitInputScheme : InputScheme
}
try {
setWriteTime(localRefFile, now, now);
if (!input.getRev())
setWriteTime(localRefFile, now, now);
} catch (Error & e) {
warn("could not update mtime for file '%s': %s", localRefFile, e.msg());
warn("could not update mtime for file '%s': %s", localRefFile, e.info().msg);
}
if (!originalRef && !storeCachedHead(repoInfo.url, ref))
warn("could not update cached head '%s' for '%s'", ref, repoInfo.url);

View file

@ -90,6 +90,7 @@ DownloadFileResult downloadFile(
/* Cache metadata for all URLs in the redirect chain. */
for (auto & url : res.urls) {
key.second.insert_or_assign("url", url);
assert(!res.urls.empty());
infoAttrs.insert_or_assign("url", *res.urls.rbegin());
getCache()->upsert(key, *store, infoAttrs, *storePath);
}
@ -102,7 +103,7 @@ DownloadFileResult downloadFile(
};
}
DownloadTarballResult downloadTarball(
static DownloadTarballResult downloadTarball_(
const std::string & url,
const Headers & headers)
{
@ -202,6 +203,22 @@ DownloadTarballResult downloadTarball(
return attrsToResult(infoAttrs);
}
ref<SourceAccessor> downloadTarball(
ref<Store> store,
const Settings & settings,
const std::string & url)
{
/* Go through Input::getAccessor() to ensure that the resulting
accessor has a fingerprint. */
fetchers::Attrs attrs;
attrs.insert_or_assign("type", "tarball");
attrs.insert_or_assign("url", url);
auto input = Input::fromAttrs(settings, std::move(attrs));
return input.getAccessor(store).first;
}
// An input scheme corresponding to a curl-downloadable resource.
struct CurlInputScheme : InputScheme
{
@ -353,7 +370,7 @@ struct TarballInputScheme : CurlInputScheme
{
auto input(_input);
auto result = downloadTarball(getStrAttr(input.attrs, "url"), {});
auto result = downloadTarball_(getStrAttr(input.attrs, "url"), {});
result.accessor->setPathDisplay("«" + input.to_string() + "»");

View file

@ -14,6 +14,8 @@ struct SourceAccessor;
namespace nix::fetchers {
struct Settings;
struct DownloadFileResult
{
StorePath storePath;
@ -40,8 +42,9 @@ struct DownloadTarballResult
* Download and import a tarball into the Git cache. The result is the
* Git tree hash of the root directory.
*/
DownloadTarballResult downloadTarball(
const std::string & url,
const Headers & headers = {});
ref<SourceAccessor> downloadTarball(
ref<Store> store,
const Settings & settings,
const std::string & url);
}

View file

@ -183,7 +183,7 @@ Goal::Co PathSubstitutionGoal::tryToRun(StorePath subPath, nix::ref<Store> sub,
/* Make sure that we are allowed to start a substitution. Note that even
if maxSubstitutionJobs == 0, we still allow a substituter to run. This
prevents infinite waiting. */
if (worker.getNrSubstitutions() >= std::max(1U, (unsigned int) settings.maxSubstitutionJobs)) {
while (worker.getNrSubstitutions() >= std::max(1U, (unsigned int) settings.maxSubstitutionJobs)) {
worker.waitForBuildSlot(shared_from_this());
co_await Suspend{};
}

View file

@ -184,13 +184,13 @@ void Worker::wakeUp(GoalPtr goal)
}
unsigned Worker::getNrLocalBuilds()
size_t Worker::getNrLocalBuilds()
{
return nrLocalBuilds;
}
unsigned Worker::getNrSubstitutions()
size_t Worker::getNrSubstitutions()
{
return nrSubstitutions;
}

View file

@ -92,12 +92,12 @@ private:
* Number of build slots occupied. This includes local builds but does not
* include substitutions or remote builds via the build hook.
*/
unsigned int nrLocalBuilds;
size_t nrLocalBuilds;
/**
* Number of substitution slots occupied.
*/
unsigned int nrSubstitutions;
size_t nrSubstitutions;
/**
* Maps used to prevent multiple instantiations of a goal for the
@ -235,12 +235,12 @@ public:
* Return the number of local build processes currently running (but not
* remote builds via the build hook).
*/
unsigned int getNrLocalBuilds();
size_t getNrLocalBuilds();
/**
* Return the number of substitution processes currently running.
*/
unsigned int getNrSubstitutions();
size_t getNrSubstitutions();
/**
* Registers a running child process. `inBuildSlot` means that

View file

@ -9,7 +9,8 @@ namespace nix {
void builtinFetchurl(
const BasicDerivation & drv,
const std::map<std::string, Path> & outputs,
const std::string & netrcData);
const std::string & netrcData,
const std::string & caFileData);
void builtinUnpackChannel(
const BasicDerivation & drv,

View file

@ -9,7 +9,8 @@ namespace nix {
void builtinFetchurl(
const BasicDerivation & drv,
const std::map<std::string, Path> & outputs,
const std::string & netrcData)
const std::string & netrcData,
const std::string & caFileData)
{
/* Make the host's netrc data available. Too bad curl requires
this to be stored in a file. It would be nice if we could just
@ -19,6 +20,9 @@ void builtinFetchurl(
writeFile(settings.netrcFile, netrcData, 0600);
}
settings.caFile = "ca-certificates.crt";
writeFile(settings.caFile, caFileData, 0600);
auto out = get(drv.outputs, "out");
if (!out)
throw Error("'builtin:fetchurl' requires an 'out' output");
@ -38,10 +42,7 @@ void builtinFetchurl(
auto source = sinkToSource([&](Sink & sink) {
/* No need to do TLS verification, because we check the hash of
the result anyway. */
FileTransferRequest request(url);
request.verifyTLS = false;
request.decompress = false;
auto decompressor = makeDecompressionSink(

View file

@ -3,31 +3,53 @@
namespace nix {
namespace fs { using namespace std::filesystem; }
void builtinUnpackChannel(
const BasicDerivation & drv,
const std::map<std::string, Path> & outputs)
{
auto getAttr = [&](const std::string & name) {
auto getAttr = [&](const std::string & name) -> const std::string & {
auto i = drv.env.find(name);
if (i == drv.env.end()) throw Error("attribute '%s' missing", name);
return i->second;
};
auto out = outputs.at("out");
auto channelName = getAttr("channelName");
auto src = getAttr("src");
fs::path out{outputs.at("out")};
auto & channelName = getAttr("channelName");
auto & src = getAttr("src");
createDirs(out);
if (fs::path{channelName}.filename().string() != channelName) {
throw Error("channelName is not allowed to contain filesystem seperators, got %1%", channelName);
}
try {
fs::create_directories(out);
} catch (fs::filesystem_error &) {
throw SysError("creating directory '%1%'", out.string());
}
unpackTarfile(src, out);
auto entries = std::filesystem::directory_iterator{out};
auto fileName = entries->path().string();
auto fileCount = std::distance(std::filesystem::begin(entries), std::filesystem::end(entries));
size_t fileCount;
std::string fileName;
try {
auto entries = fs::directory_iterator{out};
fileName = entries->path().string();
fileCount = std::distance(fs::begin(entries), fs::end(entries));
} catch (fs::filesystem_error &) {
throw SysError("failed to read directory %1%", out.string());
}
if (fileCount != 1)
throw Error("channel tarball '%s' contains more than one file", src);
std::filesystem::rename(fileName, (out + "/" + channelName));
auto target = out / channelName;
try {
fs::rename(fileName, target);
} catch (fs::filesystem_error &) {
throw SysError("failed to rename %1% to %2%", fileName, target.string());
}
}
}

View file

@ -754,12 +754,17 @@ struct curlFileTransfer : public FileTransfer
S3Helper s3Helper(profile, region, scheme, endpoint);
Activity act(*logger, lvlTalkative, actFileTransfer,
fmt("downloading '%s'", request.uri),
{request.uri}, request.parentAct);
// FIXME: implement ETag
auto s3Res = s3Helper.getObject(bucketName, key);
FileTransferResult res;
if (!s3Res.data)
throw FileTransferError(NotFound, "S3 object '%s' does not exist", request.uri);
res.data = std::move(*s3Res.data);
res.urls.push_back(request.uri);
callback(std::move(res));
#else
throw nix::Error("cannot download '%s' because Nix is not built with S3 support", request.uri);

View file

@ -169,28 +169,29 @@ protected:
{
try {
checkEnabled();
auto request(makeRequest(path));
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
getFileTransfer()->enqueueFileTransfer(request,
{[callbackPtr, this](std::future<FileTransferResult> result) {
try {
(*callbackPtr)(std::move(result.get().data));
} catch (FileTransferError & e) {
if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden)
return (*callbackPtr)({});
maybeDisable();
callbackPtr->rethrow();
} catch (...) {
callbackPtr->rethrow();
}
}});
} catch (...) {
callback.rethrow();
return;
}
auto request(makeRequest(path));
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
getFileTransfer()->enqueueFileTransfer(request,
{[callbackPtr, this](std::future<FileTransferResult> result) {
try {
(*callbackPtr)(std::move(result.get().data));
} catch (FileTransferError & e) {
if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden)
return (*callbackPtr)({});
maybeDisable();
callbackPtr->rethrow();
} catch (...) {
callbackPtr->rethrow();
}
}});
}
/**

View file

@ -159,8 +159,9 @@ static Machine parseBuilderLine(const std::set<std::string> & defaultSystems, co
const auto & str = tokens[fieldIndex];
try {
base64Decode(str);
} catch (const Error & e) {
throw FormatError("bad machine specification: a column #%lu in a row: '%s' is not valid base64 string: %s", fieldIndex, line, e.what());
} catch (FormatError & e) {
e.addTrace({}, "while parsing machine specification at a column #%lu in a row: '%s'", fieldIndex, line);
throw;
}
return str;
};

View file

@ -9,6 +9,7 @@
#include "globals.hh"
#include "compression.hh"
#include "filetransfer.hh"
#include "signals.hh"
#include <aws/core/Aws.h>
#include <aws/core/VersionConfig.h>
@ -117,6 +118,7 @@ class RetryStrategy : public Aws::Client::DefaultRetryStrategy
{
bool ShouldRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const override
{
checkInterrupt();
auto retry = Aws::Client::DefaultRetryStrategy::ShouldRetry(error, attemptedRetries);
if (retry)
printError("AWS error '%s' (%s), will retry in %d ms",

View file

@ -6,6 +6,16 @@
namespace nix {
static std::string parsePublicHostKey(std::string_view host, std::string_view sshPublicHostKey)
{
try {
return base64Decode(sshPublicHostKey);
} catch (Error & e) {
e.addTrace({}, "while decoding ssh public host key for host '%s'", host);
throw;
}
}
SSHMaster::SSHMaster(
std::string_view host,
std::string_view keyFile,
@ -14,7 +24,7 @@ SSHMaster::SSHMaster(
: host(host)
, fakeSSH(host == "localhost")
, keyFile(keyFile)
, sshPublicHostKey(sshPublicHostKey)
, sshPublicHostKey(parsePublicHostKey(host, sshPublicHostKey))
, useMaster(useMaster && !fakeSSH)
, compress(compress)
, logFD(logFD)
@ -38,7 +48,7 @@ void SSHMaster::addCommonSSHOpts(Strings & args)
std::filesystem::path fileName = state->tmpDir->path() / "host-key";
auto p = host.rfind("@");
std::string thost = p != std::string::npos ? std::string(host, p + 1) : host;
writeFile(fileName.string(), thost + " " + base64Decode(sshPublicHostKey) + "\n");
writeFile(fileName.string(), thost + " " + sshPublicHostKey + "\n");
args.insert(args.end(), {"-oUserKnownHostsFile=" + fileName.string()});
}
if (compress)

View file

@ -14,6 +14,9 @@ private:
const std::string host;
bool fakeSSH;
const std::string keyFile;
/**
* Raw bytes, not Base64 encoding.
*/
const std::string sshPublicHostKey;
const bool useMaster;
const bool compress;

View file

@ -210,14 +210,16 @@ StorePath Store::addToStore(
fsm = FileSerialisationMethod::NixArchive;
break;
}
auto source = sinkToSource([&](Sink & sink) {
dumpPath(path, sink, fsm, filter);
std::optional<StorePath> storePath;
auto sink = sourceToSink([&](Source & source) {
LengthSource lengthSource(source);
storePath = addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair);
if (lengthSource.total >= settings.warnLargePathThreshold)
warn("copied large path '%s' to the store (%s)", path, renderSize(lengthSource.total));
});
LengthSource lengthSource(*source);
auto storePath = addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair);
if (lengthSource.total >= settings.warnLargePathThreshold)
warn("copied large path '%s' to the store (%s)", path, renderSize(lengthSource.total));
return storePath;
dumpPath(path, *sink, fsm, filter);
sink->finish();
return storePath.value();
}
void Store::addMultipleToStore(

View file

@ -1746,13 +1746,20 @@ void LocalDerivationGoal::runChild()
bool setUser = true;
/* Make the contents of netrc available to builtin:fetchurl
(which may run under a different uid and/or in a sandbox). */
/* Make the contents of netrc and the CA certificate bundle
available to builtin:fetchurl (which may run under a
different uid and/or in a sandbox). */
std::string netrcData;
try {
if (drv->isBuiltin() && drv->builder == "builtin:fetchurl")
netrcData = readFile(settings.netrcFile);
} catch (SystemError &) { }
std::string caFileData;
if (drv->isBuiltin() && drv->builder == "builtin:fetchurl") {
try {
netrcData = readFile(settings.netrcFile);
} catch (SystemError &) { }
try {
caFileData = readFile(settings.caFile);
} catch (SystemError &) { }
}
#if __linux__
if (useChroot) {
@ -2191,7 +2198,7 @@ void LocalDerivationGoal::runChild()
worker.store.printStorePath(scratchOutputs.at(e.first)));
if (drv->builder == "builtin:fetchurl")
builtinFetchurl(*drv, outputs, netrcData);
builtinFetchurl(*drv, outputs, netrcData, caFileData);
else if (drv->builder == "builtin:buildenv")
builtinBuildenv(*drv, outputs);
else if (drv->builder == "builtin:unpack-channel")
@ -3000,6 +3007,7 @@ void LocalDerivationGoal::deleteTmpDir(bool force)
might have privileged stuff (like a copy of netrc). */
if (settings.keepFailed && !force && !drv->isBuiltin()) {
printError("note: keeping build directory '%s'", tmpDir);
chmod(topTmpDir.c_str(), 0755);
chmod(tmpDir.c_str(), 0755);
}
else

View file

@ -49,6 +49,7 @@ R""(
(if (param "_ALLOW_LOCAL_NETWORKING")
(begin
(allow network* (remote ip "localhost:*"))
(allow network-inbound (local ip "*:*")) ; required to bind and listen
; Allow access to /etc/resolv.conf (which is a symlink to
; /private/var/run/resolv.conf).

View file

@ -23,7 +23,7 @@ struct ArchiveSettings : Config
false,
#endif
"use-case-hack",
"Whether to enable a Darwin-specific hack for dealing with file name collisions."};
"Whether to enable a macOS-specific hack for dealing with file name case collisions."};
};
static ArchiveSettings archiveSettings;
@ -214,11 +214,13 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath
else if (t == "directory") {
sink.createDirectory(path);
std::string prevName;
while (1) {
s = getString();
if (s == "entry") {
std::string name, prevName;
std::string name;
s = getString();
if (s != "(") throw badArchive("expected open tag");
@ -241,6 +243,9 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath
debug("case collision between '%1%' and '%2%'", i->first, name);
name += caseHackSuffix;
name += std::to_string(++i->second);
auto j = names.find(name);
if (j != names.end())
throw Error("NAR contains file name '%s' that collides with case-hacked file name '%s'", prevName, j->first);
} else
names[name] = 0;
}

View file

@ -574,7 +574,28 @@ void setWriteTime(
time_t modificationTime,
std::optional<bool> optIsSymlink)
{
#ifndef _WIN32
#ifdef _WIN32
// FIXME use `fs::last_write_time`.
//
// Would be nice to use std::filesystem unconditionally, but
// doesn't support access time just modification time.
//
// System clock vs File clock issues also make that annoying.
warn("Changing file times is not yet implemented on Windows, path is '%s'", path);
#elif HAVE_UTIMENSAT && HAVE_DECL_AT_SYMLINK_NOFOLLOW
struct timespec times[2] = {
{
.tv_sec = accessedTime,
.tv_nsec = 0,
},
{
.tv_sec = modificationTime,
.tv_nsec = 0,
},
};
if (utimensat(AT_FDCWD, path.c_str(), times, AT_SYMLINK_NOFOLLOW) == -1)
throw SysError("changing modification time of '%s' (using `utimensat`)", path);
#else
struct timeval times[2] = {
{
.tv_sec = accessedTime,
@ -585,42 +606,21 @@ void setWriteTime(
.tv_usec = 0,
},
};
#endif
auto nonSymlink = [&]{
bool isSymlink = optIsSymlink
? *optIsSymlink
: fs::is_symlink(path);
if (!isSymlink) {
#ifdef _WIN32
// FIXME use `fs::last_write_time`.
//
// Would be nice to use std::filesystem unconditionally, but
// doesn't support access time just modification time.
//
// System clock vs File clock issues also make that annoying.
warn("Changing file times is not yet implemented on Windows, path is '%s'", path);
#else
if (utimes(path.c_str(), times) == -1) {
throw SysError("changing modification time of '%s' (not a symlink)", path);
}
#endif
} else {
throw Error("Cannot modification time of symlink '%s'", path);
}
};
#if HAVE_LUTIMES
if (lutimes(path.c_str(), times) == -1) {
if (errno == ENOSYS)
nonSymlink();
else
throw SysError("changing modification time of '%s'", path);
}
if (lutimes(path.c_str(), times) == -1)
throw SysError("changing modification time of '%s'", path);
#else
nonSymlink();
bool isSymlink = optIsSymlink
? *optIsSymlink
: fs::is_symlink(path);
if (!isSymlink) {
if (utimes(path.c_str(), times) == -1)
throw SysError("changing modification time of '%s' (not a symlink)", path);
} else {
throw Error("Cannot modification time of symlink '%s'", path);
}
#endif
#endif
}

View file

@ -68,10 +68,19 @@ static RestoreSinkSettings restoreSinkSettings;
static GlobalConfig::Register r1(&restoreSinkSettings);
static std::filesystem::path append(const std::filesystem::path & src, const CanonPath & path)
{
auto dst = src;
if (!path.rel().empty())
dst /= path.rel();
return dst;
}
void RestoreSink::createDirectory(const CanonPath & path)
{
std::filesystem::create_directory(dstPath / path.rel());
auto p = append(dstPath, path);
if (!std::filesystem::create_directory(p))
throw Error("path '%s' already exists", p.string());
};
struct RestoreRegularFile : CreateRegularFileSink {
@ -82,14 +91,6 @@ struct RestoreRegularFile : CreateRegularFileSink {
void preallocateContents(uint64_t size) override;
};
static std::filesystem::path append(const std::filesystem::path & src, const CanonPath & path)
{
auto dst = src;
if (!path.rel().empty())
dst /= path.rel();
return dst;
}
void RestoreSink::createRegularFile(const CanonPath & path, std::function<void(CreateRegularFileSink &)> func)
{
auto p = append(dstPath, path);

View file

@ -245,7 +245,12 @@ Hash::Hash(std::string_view rest, HashAlgorithm algo, bool isSRI)
}
else if (isSRI || rest.size() == base64Len()) {
auto d = base64Decode(rest);
std::string d;
try {
d = base64Decode(rest);
} catch (Error & e) {
e.addTrace({}, "While decoding hash '%s'", rest);
}
if (d.size() != hashSize)
throw BadHash("invalid %s hash '%s'", isSRI ? "SRI" : "base-64", rest);
assert(hashSize);

View file

@ -41,6 +41,8 @@ check_funcs = [
# Optionally used to try to close more file descriptors (e.g. before
# forking) on Unix.
'sysconf',
# Optionally used for changing the mtime of files and symlinks.
'utimensat',
]
foreach funcspec : check_funcs
define_name = 'HAVE_' + funcspec.underscorify().to_upper()
@ -48,6 +50,8 @@ foreach funcspec : check_funcs
configdata.set(define_name, define_value)
endforeach
configdata.set('HAVE_DECL_AT_SYMLINK_NOFOLLOW', cxx.has_header_symbol('fcntl.h', 'AT_SYMLINK_NOFOLLOW').to_int())
subdir('build-utils-meson/threads')
if host_machine.system() == 'windows'

View file

@ -132,23 +132,24 @@ SourceAccessor::DirEntries PosixSourceAccessor::readDirectory(const CanonPath &
{
assertNoSymlinks(path);
DirEntries res;
for (auto & entry : std::filesystem::directory_iterator{makeAbsPath(path)}) {
checkInterrupt();
auto type = [&]() -> std::optional<Type> {
std::filesystem::file_type nativeType;
try {
nativeType = entry.symlink_status().type();
} catch (std::filesystem::filesystem_error & e) {
// We cannot always stat the child. (Ideally there is no
// stat because the native directory entry has the type
// already, but this isn't always the case.)
if (e.code() == std::errc::permission_denied || e.code() == std::errc::operation_not_permitted)
return std::nullopt;
else throw;
}
try {
for (auto & entry : std::filesystem::directory_iterator{makeAbsPath(path)}) {
checkInterrupt();
auto type = [&]() -> std::optional<Type> {
std::filesystem::file_type nativeType;
try {
nativeType = entry.symlink_status().type();
} catch (std::filesystem::filesystem_error & e) {
// We cannot always stat the child. (Ideally there is no
// stat because the native directory entry has the type
// already, but this isn't always the case.)
if (e.code() == std::errc::permission_denied || e.code() == std::errc::operation_not_permitted)
return std::nullopt;
else throw;
}
// cannot exhaustively enumerate because implementation-specific
// additional file types are allowed.
// cannot exhaustively enumerate because implementation-specific
// additional file types are allowed.
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wswitch-enum"
switch (nativeType) {
@ -158,8 +159,11 @@ SourceAccessor::DirEntries PosixSourceAccessor::readDirectory(const CanonPath &
default: return tMisc;
}
#pragma GCC diagnostic pop
}();
res.emplace(entry.path().filename().string(), type);
}();
res.emplace(entry.path().filename().string(), type);
}
} catch (std::filesystem::filesystem_error & e) {
throw SysError("reading directory %1%", showPath(path));
}
return res;
}

View file

@ -171,55 +171,6 @@ size_t StringSource::read(char * data, size_t len)
#error Coroutines are broken in this version of Boost!
#endif
/* A concrete datatype allow virtual dispatch of stack allocation methods. */
struct VirtualStackAllocator {
StackAllocator *allocator = StackAllocator::defaultAllocator;
boost::context::stack_context allocate() {
return allocator->allocate();
}
void deallocate(boost::context::stack_context sctx) {
allocator->deallocate(sctx);
}
};
/* This class reifies the default boost coroutine stack allocation strategy with
a virtual interface. */
class DefaultStackAllocator : public StackAllocator {
boost::coroutines2::default_stack stack;
boost::context::stack_context allocate() override {
return stack.allocate();
}
void deallocate(boost::context::stack_context sctx) override {
stack.deallocate(sctx);
}
};
static DefaultStackAllocator defaultAllocatorSingleton;
StackAllocator *StackAllocator::defaultAllocator = &defaultAllocatorSingleton;
std::shared_ptr<void> (*create_coro_gc_hook)() = []() -> std::shared_ptr<void> {
return {};
};
/* This class is used for entry and exit hooks on coroutines */
class CoroutineContext {
/* Disable GC when entering the coroutine without the boehm patch,
* since it doesn't find the main thread stack in this case.
* std::shared_ptr<void> performs type-erasure, so it will call the right
* deleter. */
const std::shared_ptr<void> coro_gc_hook = create_coro_gc_hook();
public:
CoroutineContext() {};
~CoroutineContext() {};
};
std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
{
struct SourceToSink : FinishSink
@ -241,14 +192,12 @@ std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
cur = in;
if (!coro) {
CoroutineContext ctx;
coro = coro_t::push_type(VirtualStackAllocator{}, [&](coro_t::pull_type & yield) {
LambdaSource source([&](char *out, size_t out_len) {
coro = coro_t::push_type([&](coro_t::pull_type & yield) {
LambdaSource source([&](char * out, size_t out_len) {
if (cur.empty()) {
yield();
if (yield.get()) {
return (size_t)0;
}
if (yield.get())
throw EndOfFile("coroutine has finished");
}
size_t n = std::min(cur.size(), out_len);
@ -263,20 +212,14 @@ std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
if (!*coro) { unreachable(); }
if (!cur.empty()) {
CoroutineContext ctx;
(*coro)(false);
}
}
void finish() override
{
if (!coro) return;
if (!*coro) unreachable();
{
CoroutineContext ctx;
if (coro && *coro)
(*coro)(true);
}
if (*coro) unreachable();
}
};
@ -307,8 +250,7 @@ std::unique_ptr<Source> sinkToSource(
size_t read(char * data, size_t len) override
{
if (!coro) {
CoroutineContext ctx;
coro = coro_t::pull_type(VirtualStackAllocator{}, [&](coro_t::push_type & yield) {
coro = coro_t::pull_type([&](coro_t::push_type & yield) {
LambdaSink sink([&](std::string_view data) {
if (!data.empty()) yield(std::string(data));
});
@ -320,7 +262,6 @@ std::unique_ptr<Source> sinkToSource(
if (pos == cur.size()) {
if (!cur.empty()) {
CoroutineContext ctx;
(*coro)();
}
cur = coro->get();

View file

@ -557,27 +557,4 @@ struct FramedSink : nix::BufferedSink
};
};
/**
* Stack allocation strategy for sinkToSource.
* Mutable to avoid a boehm gc dependency in libutil.
*
* boost::context doesn't provide a virtual class, so we define our own.
*/
struct StackAllocator {
virtual boost::context::stack_context allocate() = 0;
virtual void deallocate(boost::context::stack_context sctx) = 0;
/**
* The stack allocator to use in sinkToSource and potentially elsewhere.
* It is reassigned by the initGC() method in libexpr.
*/
static StackAllocator *defaultAllocator;
};
/* Disabling GC when entering a coroutine (without the boehm patch).
mutable to avoid boehm gc dependency in libutil.
*/
extern std::shared_ptr<void> (*create_coro_gc_hook)();
}

View file

@ -14,17 +14,25 @@ BorrowedCryptoValue BorrowedCryptoValue::parse(std::string_view s)
return {s.substr(0, colon), s.substr(colon + 1)};
}
Key::Key(std::string_view s)
Key::Key(std::string_view s, bool sensitiveValue)
{
auto ss = BorrowedCryptoValue::parse(s);
name = ss.name;
key = ss.payload;
if (name == "" || key == "")
throw Error("secret key is corrupt");
try {
if (name == "" || key == "")
throw FormatError("key is corrupt");
key = base64Decode(key);
key = base64Decode(key);
} catch (Error & e) {
std::string extra;
if (!sensitiveValue)
extra = fmt(" with raw value '%s'", key);
e.addTrace({}, "while decoding key named '%s'%s", name, extra);
throw;
}
}
std::string Key::to_string() const
@ -33,7 +41,7 @@ std::string Key::to_string() const
}
SecretKey::SecretKey(std::string_view s)
: Key(s)
: Key{s, true}
{
if (key.size() != crypto_sign_SECRETKEYBYTES)
throw Error("secret key is not valid");
@ -66,7 +74,7 @@ SecretKey SecretKey::generate(std::string_view name)
}
PublicKey::PublicKey(std::string_view s)
: Key(s)
: Key{s, false}
{
if (key.size() != crypto_sign_PUBLICKEYBYTES)
throw Error("public key is not valid");
@ -83,7 +91,12 @@ bool PublicKey::verifyDetached(std::string_view data, std::string_view sig) cons
bool PublicKey::verifyDetachedAnon(std::string_view data, std::string_view sig) const
{
auto sig2 = base64Decode(sig);
std::string sig2;
try {
sig2 = base64Decode(sig);
} catch (Error & e) {
e.addTrace({}, "while decoding signature '%s'", sig);
}
if (sig2.size() != crypto_sign_BYTES)
throw Error("signature is not valid");

View file

@ -31,15 +31,19 @@ struct Key
std::string name;
std::string key;
/**
* Construct Key from a string in the format
* <name>:<key-in-base64>.
*/
Key(std::string_view s);
std::string to_string() const;
protected:
/**
* Construct Key from a string in the format
* <name>:<key-in-base64>.
*
* @param sensitiveValue Avoid displaying the raw Base64 in error
* messages to avoid leaking private keys.
*/
Key(std::string_view s, bool sensitiveValue);
Key(std::string_view name, std::string && key)
: name(name), key(std::move(key)) { }
};

View file

@ -8,6 +8,10 @@
namespace nix {
namespace fs {
using namespace std::filesystem;
}
namespace {
int callback_open(struct archive *, void * self)
@ -102,14 +106,14 @@ TarArchive::TarArchive(Source & source, bool raw, std::optional<std::string> com
"Failed to open archive (%s)");
}
TarArchive::TarArchive(const Path & path)
TarArchive::TarArchive(const fs::path & path)
: archive{archive_read_new()}
, buffer(defaultBufferSize)
{
archive_read_support_filter_all(archive);
enableSupportedFormats(archive);
archive_read_set_option(archive, NULL, "mac-ext", NULL);
check(archive_read_open_filename(archive, path.c_str(), 16384), "failed to open archive: %s");
check(archive_read_open_filename(archive, path.string().c_str(), 16384), "failed to open archive: %s");
}
void TarArchive::close()
@ -123,7 +127,7 @@ TarArchive::~TarArchive()
archive_read_free(this->archive);
}
static void extract_archive(TarArchive & archive, const Path & destDir)
static void extract_archive(TarArchive & archive, const fs::path & destDir)
{
int flags = ARCHIVE_EXTRACT_TIME | ARCHIVE_EXTRACT_SECURE_SYMLINKS | ARCHIVE_EXTRACT_SECURE_NODOTDOT;
@ -140,7 +144,7 @@ static void extract_archive(TarArchive & archive, const Path & destDir)
else
archive.check(r);
archive_entry_copy_pathname(entry, (destDir + "/" + name).c_str());
archive_entry_copy_pathname(entry, (destDir / name).string().c_str());
// sources can and do contain dirs with no rx bits
if (archive_entry_filetype(entry) == AE_IFDIR && (archive_entry_mode(entry) & 0500) != 0500)
@ -149,7 +153,7 @@ static void extract_archive(TarArchive & archive, const Path & destDir)
// Patch hardlink path
const char * original_hardlink = archive_entry_hardlink(entry);
if (original_hardlink) {
archive_entry_copy_hardlink(entry, (destDir + "/" + original_hardlink).c_str());
archive_entry_copy_hardlink(entry, (destDir / original_hardlink).string().c_str());
}
archive.check(archive_read_extract(archive.archive, entry, flags));
@ -158,19 +162,19 @@ static void extract_archive(TarArchive & archive, const Path & destDir)
archive.close();
}
void unpackTarfile(Source & source, const Path & destDir)
void unpackTarfile(Source & source, const fs::path & destDir)
{
auto archive = TarArchive(source);
createDirs(destDir);
fs::create_directories(destDir);
extract_archive(archive, destDir);
}
void unpackTarfile(const Path & tarFile, const Path & destDir)
void unpackTarfile(const fs::path & tarFile, const fs::path & destDir)
{
auto archive = TarArchive(tarFile);
createDirs(destDir);
fs::create_directories(destDir);
extract_archive(archive, destDir);
}

View file

@ -15,7 +15,7 @@ struct TarArchive
void check(int err, const std::string & reason = "failed to extract archive (%s)");
explicit TarArchive(const Path & path);
explicit TarArchive(const std::filesystem::path & path);
/// @brief Create a generic archive from source.
/// @param source - Input byte stream.
@ -37,9 +37,9 @@ struct TarArchive
int getArchiveFilterCodeByName(const std::string & method);
void unpackTarfile(Source & source, const Path & destDir);
void unpackTarfile(Source & source, const std::filesystem::path & destDir);
void unpackTarfile(const Path & tarFile, const Path & destDir);
void unpackTarfile(const std::filesystem::path & tarFile, const std::filesystem::path & destDir);
time_t unpackTarfileToSink(TarArchive & archive, ExtendedFileSystemObjectSink & parseSink);

View file

@ -261,7 +261,7 @@ std::string base64Decode(std::string_view s)
char digit = base64DecodeChars[(unsigned char) c];
if (digit == npos)
throw Error("invalid character in Base64 string: '%c'", c);
throw FormatError("invalid character in Base64 string: '%c'", c);
bits += 6;
d = d << 6 | digit;

View file

@ -210,9 +210,13 @@ constexpr char treeNull[] = " ";
/**
* Base64 encoding/decoding.
* Encode arbitrary bytes as Base64.
*/
std::string base64Encode(std::string_view s);
/**
* Decode arbitrary bytes to Base64.
*/
std::string base64Decode(std::string_view s);

View file

@ -163,7 +163,7 @@ static void main_nix_build(int argc, char * * argv)
script = argv[1];
try {
auto lines = tokenizeString<Strings>(readFile(script), "\n");
if (std::regex_search(lines.front(), std::regex("^#!"))) {
if (!lines.empty() && std::regex_search(lines.front(), std::regex("^#!"))) {
lines.pop_front();
inShebang = true;
for (int i = 2; i < argc; ++i)
@ -526,8 +526,6 @@ static void main_nix_build(int argc, char * * argv)
// Set the environment.
auto env = getEnv();
auto tmp = getEnvNonEmpty("TMPDIR").value_or("/tmp");
if (pure) {
decltype(env) newEnv;
for (auto & i : env)
@ -538,18 +536,16 @@ static void main_nix_build(int argc, char * * argv)
env["__ETC_PROFILE_SOURCED"] = "1";
}
env["NIX_BUILD_TOP"] = env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = tmp;
env["NIX_BUILD_TOP"] = env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = tmpDir.path();
env["NIX_STORE"] = store->storeDir;
env["NIX_BUILD_CORES"] = std::to_string(settings.buildCores);
auto passAsFile = tokenizeString<StringSet>(getOr(drv.env, "passAsFile", ""));
bool keepTmp = false;
int fileNr = 0;
for (auto & var : drv.env)
if (passAsFile.count(var.first)) {
keepTmp = true;
auto fn = ".attr-" + std::to_string(fileNr++);
Path p = (tmpDir.path() / fn).string();
writeFile(p, var.second);
@ -591,7 +587,6 @@ static void main_nix_build(int argc, char * * argv)
env["NIX_ATTRS_SH_FILE"] = attrsSH;
env["NIX_ATTRS_JSON_FILE"] = attrsJSON;
keepTmp = true;
}
}
@ -601,12 +596,10 @@ static void main_nix_build(int argc, char * * argv)
lose the current $PATH directories. */
auto rcfile = (tmpDir.path() / "rc").string();
std::string rc = fmt(
R"(_nix_shell_clean_tmpdir() { command rm -rf %1%; }; )"s +
(keepTmp ?
"trap _nix_shell_clean_tmpdir EXIT; "
"exitHooks+=(_nix_shell_clean_tmpdir); "
"failureHooks+=(_nix_shell_clean_tmpdir); ":
"_nix_shell_clean_tmpdir; ") +
(R"(_nix_shell_clean_tmpdir() { command rm -rf %1%; };)"s
"trap _nix_shell_clean_tmpdir EXIT; "
"exitHooks+=(_nix_shell_clean_tmpdir); "
"failureHooks+=(_nix_shell_clean_tmpdir); ") +
(pure ? "" : "[ -n \"$PS1\" ] && [ -e ~/.bashrc ] && source ~/.bashrc;") +
"%2%"
// always clear PATH.

Binary file not shown.

View file

@ -1,24 +0,0 @@
#!/usr/bin/env bash
source common.sh
TODO_NixOS
clearStore
rm -rf "$TEST_ROOT/case"
opts=("--option" "use-case-hack" "true")
# Check whether restoring and dumping a NAR that contains case
# collisions is round-tripping, even on a case-insensitive system.
nix-store "${opts[@]}" --restore "$TEST_ROOT/case" < case.nar
nix-store "${opts[@]}" --dump "$TEST_ROOT/case" > "$TEST_ROOT/case.nar"
cmp case.nar "$TEST_ROOT/case.nar"
[ "$(nix-hash "${opts[@]}" --type sha256 "$TEST_ROOT/case")" = "$(nix-hash --flat --type sha256 case.nar)" ]
# Check whether we detect true collisions (e.g. those remaining after
# removal of the suffix).
touch "$TEST_ROOT/case/xt_CONNMARK.h~nix~case~hack~3"
(! nix-store "${opts[@]}" --dump "$TEST_ROOT/case" > /dev/null)

Binary file not shown.

View file

@ -104,6 +104,27 @@ noSubmoduleRepo=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$subR
[[ $noSubmoduleRepoBaseline == $noSubmoduleRepo ]]
# Test .gitmodules with entries that refer to non-existent objects or objects that are not submodules.
cat >> $rootRepo/.gitmodules <<EOF
[submodule "missing"]
path = missing
url = https://example.org/missing.git
[submodule "file"]
path = file
url = https://example.org/file.git
EOF
echo foo > $rootRepo/file
git -C $rootRepo add file
git -C $rootRepo commit -a -m "Add bad submodules"
rev=$(git -C $rootRepo rev-parse HEAD)
r=$(nix eval --raw --expr "builtins.fetchGit { url = file://$rootRepo; rev = \"$rev\"; submodules = true; }")
[[ -f $r/file ]]
[[ ! -e $r/missing ]]
# Test relative submodule URLs.
rm $TEST_HOME/.cache/nix/fetcher-cache*
rm -rf $rootRepo/.git $rootRepo/.gitmodules $rootRepo/sub

View file

@ -90,7 +90,7 @@ nix_tests = \
derivation-advanced-attributes.sh \
import-derivation.sh \
nix_path.sh \
case-hack.sh \
nars.sh \
placeholders.sh \
ssh-relay.sh \
build.sh \

94
tests/functional/nars.sh Executable file
View file

@ -0,0 +1,94 @@
#!/usr/bin/env bash
source common.sh
TODO_NixOS
clearStore
# Check that NARs with duplicate directory entries are rejected.
rm -rf "$TEST_ROOT/out"
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "NAR directory is not sorted"
# Check that nix-store --restore fails if the output already exists.
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out' already exists"
rm -rf "$TEST_ROOT/out"
echo foo > "$TEST_ROOT/out"
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "File exists"
rm -rf "$TEST_ROOT/out"
ln -s "$TEST_ROOT/out2" "$TEST_ROOT/out"
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "File exists"
mkdir -p "$TEST_ROOT/out2"
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out' already exists"
# The same, but for a regular file.
nix-store --dump ./nars.sh > "$TEST_ROOT/tmp.nar"
rm -rf "$TEST_ROOT/out"
nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar"
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists"
rm -rf "$TEST_ROOT/out"
mkdir -p "$TEST_ROOT/out"
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists"
rm -rf "$TEST_ROOT/out"
ln -s "$TEST_ROOT/out2" "$TEST_ROOT/out"
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists"
mkdir -p "$TEST_ROOT/out2"
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists"
# The same, but for a symlink.
ln -sfn foo "$TEST_ROOT/symlink"
nix-store --dump "$TEST_ROOT/symlink" > "$TEST_ROOT/tmp.nar"
rm -rf "$TEST_ROOT/out"
nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar"
[[ -L "$TEST_ROOT/out" ]]
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists"
rm -rf "$TEST_ROOT/out"
mkdir -p "$TEST_ROOT/out"
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists"
rm -rf "$TEST_ROOT/out"
ln -s "$TEST_ROOT/out2" "$TEST_ROOT/out"
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists"
mkdir -p "$TEST_ROOT/out2"
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists"
# Check whether restoring and dumping a NAR that contains case
# collisions is round-tripping, even on a case-insensitive system.
rm -rf "$TEST_ROOT/case"
opts=("--option" "use-case-hack" "true")
nix-store "${opts[@]}" --restore "$TEST_ROOT/case" < case.nar
nix-store "${opts[@]}" --dump "$TEST_ROOT/case" > "$TEST_ROOT/case.nar"
cmp case.nar "$TEST_ROOT/case.nar"
[ "$(nix-hash "${opts[@]}" --type sha256 "$TEST_ROOT/case")" = "$(nix-hash --flat --type sha256 case.nar)" ]
# Check whether we detect true collisions (e.g. those remaining after
# removal of the suffix).
touch "$TEST_ROOT/case/xt_CONNMARK.h~nix~case~hack~3"
(! nix-store "${opts[@]}" --dump "$TEST_ROOT/case" > /dev/null)
# Detect NARs that have a directory entry that after case-hacking
# collides with another entry (e.g. a directory containing 'Test',
# 'Test~nix~case~hack~1' and 'test').
rm -rf "$TEST_ROOT/case"
expectStderr 1 nix-store "${opts[@]}" --restore "$TEST_ROOT/case" < case-collision.nar | grepQuiet "NAR contains file name 'test' that collides with case-hacked file name 'Test~nix~case~hack~1'"
# Deserializing a NAR that contains file names that Unicode-normalize
# to the same name should fail on macOS but succeed on Linux.
rm -rf "$TEST_ROOT/out"
if [[ $(uname) = Darwin ]]; then
expectStderr 1 nix-store --restore "$TEST_ROOT/out" < unnormalized.nar | grepQuiet "path '.*/out/â' already exists"
else
nix-store --restore "$TEST_ROOT/out" < unnormalized.nar
[[ -e $TEST_ROOT/out/â ]]
[[ -e $TEST_ROOT/out/â ]]
fi

View file

@ -31,6 +31,15 @@ output=$(nix-shell --pure --keep SELECTED_IMPURE_VAR "$shellDotNix" -A shellDrv
[ "$output" = " - foo - bar - baz" ]
# test NIX_BUILD_TOP
testTmpDir=$(pwd)/nix-shell
mkdir -p "$testTmpDir"
output=$(TMPDIR="$testTmpDir" nix-shell --pure "$shellDotNix" -A shellDrv --run 'echo $NIX_BUILD_TOP')
[[ "$output" =~ ${testTmpDir}.* ]] || {
echo "expected $output =~ ${testTmpDir}.*" >&2
exit 1
}
# Test nix-shell on a .drv
[[ $(nix-shell --pure $(nix-instantiate "$shellDotNix" -A shellDrv) --run \
'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"') = " - foo - bar - false" ]]

View file

@ -100,3 +100,17 @@ chmod +x "$TEST_ROOT/tar_root/foo"
tar cvf "$TEST_ROOT/tar.tar" -C "$TEST_ROOT/tar_root" .
path="$(nix flake prefetch --refresh --json "tarball+file://$TEST_ROOT/tar.tar" | jq -r .storePath)"
[[ $(cat "$path/foo") = bar ]]
# Test a tarball with non-contiguous directory entries.
rm -rf "$TEST_ROOT/tar_root"
mkdir -p "$TEST_ROOT/tar_root/a/b"
echo foo > "$TEST_ROOT/tar_root/a/b/foo"
echo bla > "$TEST_ROOT/tar_root/bla"
tar cvf "$TEST_ROOT/tar.tar" -C "$TEST_ROOT/tar_root" .
echo abc > "$TEST_ROOT/tar_root/bla"
echo xyzzy > "$TEST_ROOT/tar_root/a/b/xyzzy"
tar rvf "$TEST_ROOT/tar.tar" -C "$TEST_ROOT/tar_root" ./a/b/xyzzy ./bla
path="$(nix flake prefetch --refresh --json "tarball+file://$TEST_ROOT/tar.tar" | jq -r .storePath)"
[[ $(cat "$path/a/b/xyzzy") = xyzzy ]]
[[ $(cat "$path/a/b/foo") = foo ]]
[[ $(cat "$path/bla") = abc ]]

Binary file not shown.

View file

@ -146,4 +146,8 @@ in
functional_root = runNixOSTestFor "x86_64-linux" ./functional/as-root.nix;
user-sandboxing = runNixOSTestFor "x86_64-linux" ./user-sandboxing;
fetchurl = runNixOSTestFor "x86_64-linux" ./fetchurl.nix;
s3-binary-cache-store = runNixOSTestFor "x86_64-linux" ./s3-binary-cache-store.nix;
}

84
tests/nixos/fetchurl.nix Normal file
View file

@ -0,0 +1,84 @@
# Test whether builtin:fetchurl properly performs TLS certificate
# checks on HTTPS servers.
{ pkgs, ... }:
let
makeTlsCert = name: pkgs.runCommand name {
nativeBuildInputs = with pkgs; [ openssl ];
} ''
mkdir -p $out
openssl req -x509 \
-subj '/CN=${name}/' -days 49710 \
-addext 'subjectAltName = DNS:${name}' \
-keyout "$out/key.pem" -newkey ed25519 \
-out "$out/cert.pem" -noenc
'';
goodCert = makeTlsCert "good";
badCert = makeTlsCert "bad";
in
{
name = "nss-preload";
nodes = {
machine = { pkgs, ... }: {
services.nginx = {
enable = true;
virtualHosts."good" = {
addSSL = true;
sslCertificate = "${goodCert}/cert.pem";
sslCertificateKey = "${goodCert}/key.pem";
root = pkgs.runCommand "nginx-root" {} ''
mkdir "$out"
echo 'hello world' > "$out/index.html"
'';
};
virtualHosts."bad" = {
addSSL = true;
sslCertificate = "${badCert}/cert.pem";
sslCertificateKey = "${badCert}/key.pem";
root = pkgs.runCommand "nginx-root" {} ''
mkdir "$out"
echo 'foobar' > "$out/index.html"
'';
};
};
security.pki.certificateFiles = [ "${goodCert}/cert.pem" ];
networking.hosts."127.0.0.1" = [ "good" "bad" ];
virtualisation.writableStore = true;
nix.settings.experimental-features = "nix-command";
};
};
testScript = ''
machine.wait_for_unit("nginx")
machine.wait_for_open_port(443)
out = machine.succeed("curl https://good/index.html")
assert out == "hello world\n"
out = machine.succeed("cat ${badCert}/cert.pem > /tmp/cafile.pem; curl --cacert /tmp/cafile.pem https://bad/index.html")
assert out == "foobar\n"
# Fetching from a server with a trusted cert should work.
machine.succeed("nix build --no-substitute --expr 'import <nix/fetchurl.nix> { url = \"https://good/index.html\"; hash = \"sha256-qUiQTy8PR5uPgZdpSzAYSw0u0cHNKh7A+4XSmaGSpEc=\"; }'")
# Fetching from a server with an untrusted cert should fail.
err = machine.fail("nix build --no-substitute --expr 'import <nix/fetchurl.nix> { url = \"https://bad/index.html\"; hash = \"sha256-rsBwZF/lPuOzdjBZN2E08FjMM3JHyXit0Xi2zN+wAZ8=\"; }' 2>&1")
print(err)
assert "SSL certificate problem: self-signed certificate" in err
# Fetching from a server with a trusted cert should work via environment variable override.
machine.succeed("NIX_SSL_CERT_FILE=/tmp/cafile.pem nix build --no-substitute --expr 'import <nix/fetchurl.nix> { url = \"https://bad/index.html\"; hash = \"sha256-rsBwZF/lPuOzdjBZN2E08FjMM3JHyXit0Xi2zN+wAZ8=\"; }'")
'';
}

View file

@ -1,6 +1,6 @@
# Test nix-copy-closure.
{ lib, config, nixpkgs, hostPkgs, ... }:
{ lib, config, nixpkgs, ... }:
let
pkgs = config.nodes.client.nixpkgs.pkgs;

View file

@ -0,0 +1,66 @@
{ lib, config, nixpkgs, ... }:
let
pkgs = config.nodes.client.nixpkgs.pkgs;
pkgA = pkgs.cowsay;
accessKey = "BKIKJAA5BMMU2RHO6IBB";
secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12";
env = "AWS_ACCESS_KEY_ID=${accessKey} AWS_SECRET_ACCESS_KEY=${secretKey}";
storeUrl = "s3://my-cache?endpoint=http://server:9000&region=eu-west-1";
in {
name = "nix-copy-closure";
nodes =
{ server =
{ config, lib, pkgs, ... }:
{ virtualisation.writableStore = true;
virtualisation.additionalPaths = [ pkgA ];
environment.systemPackages = [ pkgs.minio-client ];
nix.extraOptions = "experimental-features = nix-command";
services.minio = {
enable = true;
region = "eu-west-1";
rootCredentialsFile = pkgs.writeText "minio-credentials-full" ''
MINIO_ROOT_USER=${accessKey}
MINIO_ROOT_PASSWORD=${secretKey}
'';
};
networking.firewall.allowedTCPPorts = [ 9000 ];
};
client =
{ config, pkgs, ... }:
{ virtualisation.writableStore = true;
nix.extraOptions = "experimental-features = nix-command";
};
};
testScript = { nodes }: ''
# fmt: off
start_all()
# Create a binary cache.
server.wait_for_unit("minio")
server.succeed("mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4")
server.succeed("mc mb minio/my-cache")
server.succeed("${env} nix copy --to '${storeUrl}' ${pkgA}")
# Test fetchurl on s3:// URLs while we're at it.
client.succeed("${env} nix eval --impure --expr 'builtins.fetchurl { name = \"foo\"; url = \"s3://my-cache/nix-cache-info?endpoint=http://server:9000&region=eu-west-1\"; }'")
# Copy a package from the binary cache.
client.fail("nix path-info ${pkgA}")
client.succeed("${env} nix store info --store '${storeUrl}' >&2")
client.succeed("${env} nix copy --no-check-sigs --from '${storeUrl}' ${pkgA}")
client.succeed("nix path-info ${pkgA}")
'';
}

View file

@ -8,7 +8,7 @@
#include "tests/nix_api_expr.hh"
#include "tests/string_callback.hh"
#include "gmock/gmock.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace nixC {