1
0
Fork 0
mirror of https://github.com/NixOS/nix synced 2025-07-07 14:21:48 +02:00

Merge commit 'b24757f08a' into sync-2.24.2

This commit is contained in:
Eelco Dolstra 2024-08-08 15:34:12 +02:00
commit c1d27763c6
330 changed files with 4907 additions and 1814 deletions

View file

@ -11,6 +11,7 @@
#include "machines.hh"
#include "shared.hh"
#include "plugin.hh"
#include "pathlocks.hh"
#include "globals.hh"
#include "serialise.hh"
@ -100,7 +101,7 @@ static int main_build_remote(int argc, char * * argv)
}
std::optional<StorePath> drvPath;
StoreReference storeUri;
std::string storeUri;
while (true) {
@ -233,17 +234,16 @@ static int main_build_remote(int argc, char * * argv)
lock = -1;
try {
storeUri = bestMachine->storeUri.render();
Activity act(*logger, lvlTalkative, actUnknown, fmt("connecting to '%s'", bestMachine->storeUri.render()));
Activity act(*logger, lvlTalkative, actUnknown, fmt("connecting to '%s'", storeUri));
sshStore = bestMachine->openStore();
sshStore->connect();
storeUri = bestMachine->storeUri;
} catch (std::exception & e) {
auto msg = chomp(drainFD(5, false));
printError("cannot build on '%s': %s%s",
bestMachine->storeUri.render(), e.what(),
storeUri, e.what(),
msg.empty() ? "" : ": " + msg);
bestMachine->enabled = false;
continue;
@ -258,15 +258,28 @@ connected:
assert(sshStore);
std::cerr << "# accept\n" << storeUri.render() << "\n";
std::cerr << "# accept\n" << storeUri << "\n";
auto inputs = readStrings<PathSet>(source);
auto wantedOutputs = readStrings<StringSet>(source);
AutoCloseFD uploadLock = openLockFile(currentLoad + "/" + escapeUri(storeUri.render()) + ".upload-lock", true);
AutoCloseFD uploadLock;
{
auto setUpdateLock = [&](auto && fileName){
uploadLock = openLockFile(currentLoad + "/" + escapeUri(fileName) + ".upload-lock", true);
};
try {
setUpdateLock(storeUri);
} catch (SysError & e) {
if (e.errNo != ENAMETOOLONG) throw;
// Try again hashing the store URL so we have a shorter path
auto h = hashString(HashAlgorithm::MD5, storeUri);
setUpdateLock(h.to_string(HashFormat::Base64, false));
}
}
{
Activity act(*logger, lvlTalkative, actUnknown, fmt("waiting for the upload lock to '%s'", storeUri.render()));
Activity act(*logger, lvlTalkative, actUnknown, fmt("waiting for the upload lock to '%s'", storeUri));
auto old = signal(SIGALRM, handleAlarm);
alarm(15 * 60);
@ -279,7 +292,7 @@ connected:
auto substitute = settings.buildersUseSubstitutes ? Substitute : NoSubstitute;
{
Activity act(*logger, lvlTalkative, actUnknown, fmt("copying dependencies to '%s'", storeUri.render()));
Activity act(*logger, lvlTalkative, actUnknown, fmt("copying dependencies to '%s'", storeUri));
copyPaths(*store, *sshStore, store->parseStorePathSet(inputs), NoRepair, NoCheckSigs, substitute);
}
@ -317,7 +330,7 @@ connected:
optResult = sshStore->buildDerivation(*drvPath, (const BasicDerivation &) drv);
auto & result = *optResult;
if (!result.success())
throw Error("build of '%s' on '%s' failed: %s", store->printStorePath(*drvPath), storeUri.render(), result.errorMsg);
throw Error("build of '%s' on '%s' failed: %s", store->printStorePath(*drvPath), storeUri, result.errorMsg);
} else {
copyClosure(*store, *sshStore, StorePathSet {*drvPath}, NoRepair, NoCheckSigs, substitute);
auto res = sshStore->buildPathsWithResults({
@ -360,7 +373,7 @@ connected:
}
if (!missingPaths.empty()) {
Activity act(*logger, lvlTalkative, actUnknown, fmt("copying outputs from '%s'", storeUri.render()));
Activity act(*logger, lvlTalkative, actUnknown, fmt("copying outputs from '%s'", storeUri));
if (auto localStore = store.dynamic_pointer_cast<LocalStore>())
for (auto & path : missingPaths)
localStore->locksHeld.insert(store->printStorePath(path)); /* FIXME: ugly */

View file

@ -53,10 +53,6 @@ mkMesonDerivation (finalAttrs: {
echo "doc external-api-docs $out/share/doc/nix/external-api/html" >> ''${!outputDoc}/nix-support/hydra-build-products
'';
enableParallelBuilding = true;
strictDeps = true;
meta = {
platforms = lib.platforms.all;
};

View file

@ -48,10 +48,6 @@ mkMesonDerivation (finalAttrs: {
echo "doc internal-api-docs $out/share/doc/nix/internal-api/html" >> ''${!outputDoc}/nix-support/hydra-build-products
'';
enableParallelBuilding = true;
strictDeps = true;
meta = {
platforms = lib.platforms.all;
};

View file

@ -90,75 +90,11 @@ MixEvalArgs::MixEvalArgs()
.longName = "include",
.shortName = 'I',
.description = R"(
Add *path* to the Nix search path. The Nix search path is
initialized from the colon-separated [`NIX_PATH`](@docroot@/command-ref/env-common.md#env-NIX_PATH) environment
variable, and is used to look up the location of Nix expressions using [paths](@docroot@/language/types.md#type-path) enclosed in angle
brackets (i.e., `<nixpkgs>`).
Add *path* to search path entries used to resolve [lookup paths](@docroot@/language/constructs/lookup-path.md)
For instance, passing
This option may be given multiple times.
```
-I /home/eelco/Dev
-I /etc/nixos
```
will cause Nix to look for paths relative to `/home/eelco/Dev` and
`/etc/nixos`, in that order. This is equivalent to setting the
`NIX_PATH` environment variable to
```
/home/eelco/Dev:/etc/nixos
```
It is also possible to match paths against a prefix. For example,
passing
```
-I nixpkgs=/home/eelco/Dev/nixpkgs-branch
-I /etc/nixos
```
will cause Nix to search for `<nixpkgs/path>` in
`/home/eelco/Dev/nixpkgs-branch/path` and `/etc/nixos/nixpkgs/path`.
If a path in the Nix search path starts with `http://` or `https://`,
it is interpreted as the URL of a tarball that will be downloaded and
unpacked to a temporary location. The tarball must consist of a single
top-level directory. For example, passing
```
-I nixpkgs=https://github.com/NixOS/nixpkgs/archive/master.tar.gz
```
tells Nix to download and use the current contents of the `master`
branch in the `nixpkgs` repository.
The URLs of the tarballs from the official `nixos.org` channels
(see [the manual page for `nix-channel`](../nix-channel.md)) can be
abbreviated as `channel:<channel-name>`. For instance, the
following two flags are equivalent:
```
-I nixpkgs=channel:nixos-21.05
-I nixpkgs=https://nixos.org/channels/nixos-21.05/nixexprs.tar.xz
```
You can also fetch source trees using [flake URLs](./nix3-flake.md#url-like-syntax) and add them to the
search path. For instance,
```
-I nixpkgs=flake:nixpkgs
```
specifies that the prefix `nixpkgs` shall refer to the source tree
downloaded from the `nixpkgs` entry in the flake registry. Similarly,
```
-I nixpkgs=flake:github:NixOS/nixpkgs/nixos-22.05
```
makes `<nixpkgs>` refer to a particular branch of the
`NixOS/nixpkgs` repository on GitHub.
Paths added through `-I` take precedence over the [`nix-path` configuration setting](@docroot@/command-ref/conf-file.md#conf-nix-path) and the [`NIX_PATH` environment variable](@docroot@/command-ref/env-common.md#env-NIX_PATH).
)",
.category = category,
.labels = {"path"},
@ -213,7 +149,7 @@ Bindings * MixEvalArgs::getAutoArgs(EvalState & state)
auto v = state.allocValue();
std::visit(overloaded {
[&](const AutoArgExpr & arg) {
state.mkThunk_(*v, state.parseExprFromString(arg.expr, state.rootPath(".")));
state.mkThunk_(*v, state.parseExprFromString(arg.expr, compatibilitySettings.nixShellShebangArgumentsRelativeToScript ? state.rootPath(absPath(getCommandBaseDir())) : state.rootPath(".")));
},
[&](const AutoArgString & arg) {
v->mkString(arg.s);

View file

@ -7,12 +7,29 @@ struct CompatibilitySettings : public Config
CompatibilitySettings() = default;
// Added in Nix 2.24, July 2024.
Setting<bool> nixShellAlwaysLooksForShellNix{this, true, "nix-shell-always-looks-for-shell-nix", R"(
Before Nix 2.24, [`nix-shell`](@docroot@/command-ref/nix-shell.md) would only look at `shell.nix` if it was in the working directory - when no file was specified.
Since Nix 2.24, `nix-shell` always looks for a `shell.nix`, whether that's in the working directory, or in a directory that was passed as an argument.
You may set this to `false` to revert to the Nix 2.3 behavior.
You may set this to `false` to temporarily revert to the behavior of Nix 2.23 and older.
Using this setting is not recommended.
It will be deprecated and removed.
)"};
// Added in Nix 2.24, July 2024.
Setting<bool> nixShellShebangArgumentsRelativeToScript{
this, true, "nix-shell-shebang-arguments-relative-to-script", R"(
Before Nix 2.24, relative file path expressions in arguments in a `nix-shell` shebang were resolved relative to the working directory.
Since Nix 2.24, `nix-shell` resolves these paths in a manner that is relative to the [base directory](@docroot@/glossary.md#gloss-base-directory), defined as the script's directory.
You may set this to `false` to temporarily revert to the behavior of Nix 2.23 and older.
Using this setting is not recommended.
It will be deprecated and removed.
)"};
};

View file

@ -66,7 +66,7 @@ struct ExtraPathInfoValue : ExtraPathInfo
};
/**
* An Installable which corresponds a Nix langauge value, in addition to
* An Installable which corresponds a Nix language value, in addition to
* a collection of \ref DerivedPath "derived paths".
*/
struct InstallableValue : Installable

View file

@ -1,21 +1,23 @@
#include "markdown.hh"
#include "util.hh"
#include "environment-variables.hh"
#include "error.hh"
#include "finally.hh"
#include "terminal.hh"
#if HAVE_LOWDOWN
# include <sys/queue.h>
# include <lowdown.h>
# include <sys/queue.h>
# include <lowdown.h>
#endif
namespace nix {
std::string renderMarkdownToTerminal(std::string_view markdown)
{
#if HAVE_LOWDOWN
static std::string doRenderMarkdownToTerminal(std::string_view markdown)
{
int windowWidth = getWindowSize().second;
struct lowdown_opts opts {
struct lowdown_opts opts
{
.type = LOWDOWN_TERM,
.maxdepth = 20,
.cols = (size_t) std::max(windowWidth - 5, 60),
@ -51,9 +53,21 @@ std::string renderMarkdownToTerminal(std::string_view markdown)
throw Error("allocation error while rendering Markdown");
return filterANSIEscapes(std::string(buf->data, buf->size), !isTTY());
#else
return std::string(markdown);
#endif
}
std::string renderMarkdownToTerminal(std::string_view markdown)
{
if (auto e = getEnv("_NIX_TEST_RAW_MARKDOWN"); e && *e == "1")
return std::string(markdown);
else
return doRenderMarkdownToTerminal(markdown);
}
#else
std::string renderMarkdownToTerminal(std::string_view markdown)
{
return std::string(markdown);
}
#endif
} // namespace nix

View file

@ -1,10 +1,17 @@
#pragma once
///@file
#include "types.hh"
#include <string_view>
namespace nix {
/**
* Render the given Markdown text to the terminal.
*
* If Nix is compiled without Markdown support, this function will return the input text as-is.
*
* The renderer takes into account the terminal width, and wraps text accordingly.
*/
std::string renderMarkdownToTerminal(std::string_view markdown);
}

View file

@ -30,6 +30,8 @@ deps_public_maybe_subproject = [
]
subdir('build-utils-meson/subprojects')
subdir('build-utils-meson/threads')
nlohmann_json = dependency('nlohmann_json', version : '>= 3.9')
deps_public += nlohmann_json

View file

@ -93,14 +93,8 @@ mkMesonDerivation (finalAttrs: {
LDFLAGS = "-fuse-ld=gold";
};
enableParallelBuilding = true;
separateDebugInfo = !stdenv.hostPlatform.isStatic;
# TODO `releaseTools.coverageAnalysis` in Nixpkgs needs to be updated
# to work with `strictDeps`.
strictDeps = true;
hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie";
meta = {

View file

@ -19,6 +19,7 @@ extern "C" {
#include "repl-interacter.hh"
#include "file-system.hh"
#include "repl.hh"
#include "environment-variables.hh"
namespace nix {
@ -34,6 +35,7 @@ void sigintHandler(int signo)
static detail::ReplCompleterMixin * curRepl; // ugly
#ifndef USE_READLINE
static char * completionCallback(char * s, int * match)
{
auto possible = curRepl->completePrefix(s);
@ -100,6 +102,7 @@ static int listPossibleCallback(char * s, char *** avp)
return ac;
}
#endif
ReadlineLikeInteracter::Guard ReadlineLikeInteracter::init(detail::ReplCompleterMixin * repl)
{
@ -175,10 +178,23 @@ bool ReadlineLikeInteracter::getLine(std::string & input, ReplPromptType promptT
return true;
}
// editline doesn't echo the input to the output when non-interactive, unlike readline
// this results in a different behavior when running tests. The echoing is
// quite useful for reading the test output, so we add it here.
if (auto e = getEnv("_NIX_TEST_REPL_ECHO"); s && e && *e == "1")
{
#ifndef USE_READLINE
// This is probably not right for multi-line input, but we don't use that
// in the characterisation tests, so it's fine.
std::cout << promptForType(promptType) << s << std::endl;
#endif
}
if (!s)
return false;
input += s;
input += '\n';
return true;
}

View file

@ -217,7 +217,7 @@ ReplExitStatus NixRepl::mainLoop()
case ProcessLineResult::PromptAgain:
break;
default:
abort();
unreachable();
}
} catch (ParseError & e) {
if (e.msg().find("unexpected end of file") != std::string::npos) {
@ -644,9 +644,6 @@ ProcessLineResult NixRepl::processLine(std::string line)
fallbackPos = attr->pos;
fallbackDoc = state->getDocCommentForPos(fallbackPos);
}
} else {
evalString(arg, v);
}
evalString(arg, v);

View file

@ -29,6 +29,8 @@ deps_public_maybe_subproject = [
]
subdir('build-utils-meson/subprojects')
subdir('build-utils-meson/threads')
# TODO rename, because it will conflict with downstream projects
configdata.set_quoted('PACKAGE_VERSION', meson.project_version())

View file

@ -14,10 +14,10 @@
#include "nix_api_util.h"
#include "nix_api_util_internal.h"
#ifdef HAVE_BOEHMGC
#include <mutex>
#define GC_INCLUDE_NEW 1
#include "gc_cpp.h"
#if HAVE_BOEHMGC
# include <mutex>
# define GC_INCLUDE_NEW 1
# include "gc_cpp.h"
#endif
nix_err nix_libexpr_init(nix_c_context * context)
@ -131,7 +131,7 @@ void nix_state_free(EvalState * state)
delete state;
}
#ifdef HAVE_BOEHMGC
#if HAVE_BOEHMGC
std::unordered_map<
const void *,
unsigned int,
@ -207,7 +207,7 @@ nix_err nix_value_decref(nix_c_context * context, nix_value *x)
void nix_gc_register_finalizer(void * obj, void * cd, void (*finalizer)(void * obj, void * cd))
{
#ifdef HAVE_BOEHMGC
#if HAVE_BOEHMGC
GC_REGISTER_FINALIZER(obj, finalizer, cd, 0, 0);
#endif
}

View file

@ -14,7 +14,7 @@
#include <nlohmann/json.hpp>
#ifdef HAVE_BOEHMGC
#if HAVE_BOEHMGC
# include "gc/gc.h"
# define GC_INCLUDE_NEW 1
# include "gc_cpp.h"
@ -174,7 +174,7 @@ ExternalValue * nix_create_external_value(nix_c_context * context, NixCExternalV
context->last_err_code = NIX_OK;
try {
auto ret = new
#ifdef HAVE_BOEHMGC
#if HAVE_BOEHMGC
(GC)
#endif
NixCExternalValue(*desc, v);

View file

@ -14,7 +14,7 @@
#include "nix_api_value.h"
#include "value/context.hh"
#ifdef HAVE_BOEHMGC
#if HAVE_BOEHMGC
# include "gc/gc.h"
# define GC_INCLUDE_NEW 1
# include "gc_cpp.h"
@ -131,7 +131,7 @@ PrimOp * nix_alloc_primop(
try {
using namespace std::placeholders;
auto p = new
#ifdef HAVE_BOEHMGC
#if HAVE_BOEHMGC
(GC)
#endif
nix::PrimOp{

View file

@ -63,12 +63,8 @@ mkMesonDerivation (finalAttrs: {
LDFLAGS = "-fuse-ld=gold";
};
enableParallelBuilding = true;
separateDebugInfo = !stdenv.hostPlatform.isStatic;
strictDeps = true;
hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie";
meta = {

View file

@ -134,7 +134,7 @@ std::pair<SourcePath, uint32_t> findPackageFilename(EvalState & state, Value & v
return {SourcePath{path.accessor, CanonPath(fn.substr(0, colon))}, lineno};
} catch (std::invalid_argument & e) {
fail();
abort();
unreachable();
}
}

View file

@ -95,7 +95,7 @@ struct AttrDb
{
try {
auto state(_state->lock());
if (!failed)
if (!failed && state->txn->active)
state->txn->commit();
state->txn.reset();
} catch (...) {

View file

@ -92,6 +92,14 @@ void EvalErrorBuilder<T>::debugThrow()
throw error;
}
template<class T>
void EvalErrorBuilder<T>::panic()
{
logError(error.info());
printError("This is a bug! An unexpected condition occurred, causing the Nix evaluator to have to stop. If you could share a reproducible example or a core dump, please open an issue at https://github.com/NixOS/nix/issues");
abort();
}
template class EvalErrorBuilder<EvalBaseError>;
template class EvalErrorBuilder<EvalError>;
template class EvalErrorBuilder<AssertionError>;

View file

@ -110,6 +110,12 @@ public:
* Delete the `EvalErrorBuilder` and throw the underlying exception.
*/
[[gnu::noinline, gnu::noreturn]] void debugThrow();
/**
* A programming error or fatal condition occurred. Abort the process for core dump and debugging.
* This does not print a proper backtrace, because unwinding the stack is destructive.
*/
[[gnu::noinline, gnu::noreturn]] void panic();
};
}

View file

@ -1,5 +1,7 @@
#include "error.hh"
#include "environment-variables.hh"
#include "eval-settings.hh"
#include "config-global.hh"
#include "serialise.hh"
#include "eval-gc.hh"
@ -84,14 +86,17 @@ void fixupBoehmStackPointer(void ** sp_ptr, void * _pthread_id)
{
void *& sp = *sp_ptr;
auto pthread_id = reinterpret_cast<pthread_t>(_pthread_id);
# ifndef __APPLE__
pthread_attr_t pattr;
# endif
size_t osStackSize;
void * osStackLow;
// The low address of the stack, which grows down.
void * osStackLimit;
void * osStackBase;
# ifdef __APPLE__
osStackSize = pthread_get_stacksize_np(pthread_id);
osStackLow = pthread_get_stackaddr_np(pthread_id);
osStackLimit = pthread_get_stackaddr_np(pthread_id);
# else
if (pthread_attr_init(&pattr)) {
throw Error("fixupBoehmStackPointer: pthread_attr_init failed");
@ -110,18 +115,18 @@ void fixupBoehmStackPointer(void ** sp_ptr, void * _pthread_id)
# else
# error "Need one of `pthread_attr_get_np` or `pthread_getattr_np`"
# endif
if (pthread_attr_getstack(&pattr, &osStackLow, &osStackSize)) {
if (pthread_attr_getstack(&pattr, &osStackLimit, &osStackSize)) {
throw Error("fixupBoehmStackPointer: pthread_attr_getstack failed");
}
if (pthread_attr_destroy(&pattr)) {
throw Error("fixupBoehmStackPointer: pthread_attr_destroy failed");
}
# endif
osStackBase = (char *) osStackLow + osStackSize;
osStackBase = (char *) osStackLimit + osStackSize;
// NOTE: We assume the stack grows down, as it does on all architectures we support.
// Architectures that grow the stack up are rare.
if (sp >= osStackBase || sp < osStackLow) { // lo is outside the os stack
sp = osStackBase;
if (sp >= osStackBase || sp < osStackLimit) { // sp is outside the os stack
sp = osStackLimit;
}
}
@ -155,6 +160,10 @@ static inline void initGCReal()
there. */
GC_set_no_dls(1);
/* Enable perf measurements. This is just a setting; not much of a
start of something. */
GC_start_performance_measurement();
GC_INIT();
GC_set_oom_fn(oomHandler);
@ -202,6 +211,14 @@ static inline void initGCReal()
}
}
static size_t gcCyclesAfterInit = 0;
size_t getGCCycles()
{
assertGCInitialized();
return static_cast<size_t>(GC_get_gc_no()) - gcCyclesAfterInit;
}
#endif
static bool gcInitialised = false;
@ -213,8 +230,16 @@ void initGC()
#if HAVE_BOEHMGC
initGCReal();
gcCyclesAfterInit = GC_get_gc_no();
#endif
// NIX_PATH must override the regular setting
// See the comment in applyConfig
if (auto nixPathEnv = getEnv("NIX_PATH")) {
globalConfig.set("nix-path", concatStringsSep(" ", EvalSettings::parseNixPath(nixPathEnv.value())));
}
gcInitialised = true;
}
@ -223,4 +248,4 @@ void assertGCInitialized()
assert(gcInitialised);
}
}
} // namespace nix

View file

@ -1,6 +1,8 @@
#pragma once
///@file
#include <cstddef>
namespace nix {
/**
@ -13,4 +15,11 @@ void initGC();
*/
void assertGCInitialized();
}
#ifdef HAVE_BOEHMGC
/**
* The number of GC cycles since initGC().
*/
size_t getGCCycles();
#endif
} // namespace nix

View file

@ -8,7 +8,7 @@ namespace nix {
/* Very hacky way to parse $NIX_PATH, which is colon-separated, but
can contain URLs (e.g. "nixpkgs=https://bla...:foo=https://"). */
static Strings parseNixPath(const std::string & s)
Strings EvalSettings::parseNixPath(const std::string & s)
{
Strings res;
@ -48,15 +48,12 @@ EvalSettings::EvalSettings(bool & readOnlyMode, EvalSettings::LookupPathHooks lo
: readOnlyMode{readOnlyMode}
, lookupPathHooks{lookupPathHooks}
{
auto var = getEnv("NIX_PATH");
if (var) nixPath = parseNixPath(*var);
var = getEnv("NIX_ABORT_ON_WARN");
auto var = getEnv("NIX_ABORT_ON_WARN");
if (var && (var == "1" || var == "yes" || var == "true"))
builtinsAbortOnWarn = true;
}
Strings EvalSettings::getDefaultNixPath() const
Strings EvalSettings::getDefaultNixPath()
{
Strings res;
auto add = [&](const Path & p, const std::string & s = std::string()) {
@ -69,11 +66,9 @@ Strings EvalSettings::getDefaultNixPath() const
}
};
if (!restrictEval && !pureEval) {
add(getNixDefExpr() + "/channels");
add(rootChannelsDir() + "/nixpkgs", "nixpkgs");
add(rootChannelsDir());
}
add(getNixDefExpr() + "/channels");
add(rootChannelsDir() + "/nixpkgs", "nixpkgs");
add(rootChannelsDir());
return res;
}

View file

@ -43,10 +43,12 @@ struct EvalSettings : Config
bool & readOnlyMode;
Strings getDefaultNixPath() const;
static Strings getDefaultNixPath();
static bool isPseudoUrl(std::string_view s);
static Strings parseNixPath(const std::string & s);
static std::string resolvePseudoUrl(std::string_view url);
LookupPathHooks lookupPathHooks;
@ -63,7 +65,7 @@ struct EvalSettings : Config
extern "C" typedef void (*ValueInitialiser) (EvalState & state, Value & v);
```
The [Nix C++ API documentation](@docroot@/contributing/documentation.md#api-documentation) has more details on evaluator internals.
The [Nix C++ API documentation](@docroot@/development/documentation.md#api-documentation) has more details on evaluator internals.
- `builtins.exec` *arguments*
@ -71,25 +73,30 @@ struct EvalSettings : Config
)"};
Setting<Strings> nixPath{
this, getDefaultNixPath(), "nix-path",
this, {}, "nix-path",
R"(
List of search paths to use for [lookup path](@docroot@/language/constructs/lookup-path.md) resolution.
This setting determines the value of
[`builtins.nixPath`](@docroot@/language/builtins.md#builtins-nixPath) and can be used with [`builtins.findFile`](@docroot@/language/builtins.md#builtins-findFile).
The default value is
- The configuration setting is overridden by the [`NIX_PATH`](@docroot@/command-ref/env-common.md#env-NIX_PATH)
environment variable.
- `NIX_PATH` is overridden by [specifying the setting as the command line flag](@docroot@/command-ref/conf-file.md#command-line-flags) `--nix-path`.
- Any current value is extended by the [`-I` option](@docroot@/command-ref/opt-common.md#opt-I) or `--extra-nix-path`.
```
$HOME/.nix-defexpr/channels
nixpkgs=$NIX_STATE_DIR/profiles/per-user/root/channels/nixpkgs
$NIX_STATE_DIR/profiles/per-user/root/channels
```
If the respective paths are accessible, the default values are:
It can be overridden with the [`NIX_PATH` environment variable](@docroot@/command-ref/env-common.md#env-NIX_PATH) or the [`-I` command line option](@docroot@/command-ref/opt-common.md#opt-I).
- `$HOME/.nix-defexpr/channels`
- `nixpkgs=$NIX_STATE_DIR/profiles/per-user/root/channels/nixpkgs`
- `$NIX_STATE_DIR/profiles/per-user/root/channels`
See [`NIX_STATE_DIR`](@docroot@/command-ref/env-common.md#env-NIX_STATE_DIR) for details.
> **Note**
>
> If [pure evaluation](#conf-pure-eval) is enabled, `nixPath` evaluates to the empty list `[ ]`.
> If [restricted evaluation](@docroot@/command-ref/conf-file.md#conf-restrict-eval) is enabled, the default value is empty.
>
> If [pure evaluation](#conf-pure-eval) is enabled, `builtins.nixPath` *always* evaluates to the empty list `[ ]`.
)", {}, false};
Setting<std::string> currentSystem{

View file

@ -149,7 +149,7 @@ std::string_view showType(ValueType type, bool withArticle)
case nFloat: return WA("a", "float");
case nThunk: return WA("a", "thunk");
}
abort();
unreachable();
}
@ -215,7 +215,7 @@ static Symbol getName(const AttrName & name, EvalState & state, Env & env)
static constexpr size_t BASE_ENV_SIZE = 128;
EvalState::EvalState(
const LookupPath & _lookupPath,
const LookupPath & lookupPathFromArguments,
ref<Store> store,
const fetchers::Settings & fetchSettings,
const EvalSettings & settings,
@ -331,12 +331,21 @@ EvalState::EvalState(
vStringSymlink.mkString("symlink");
vStringUnknown.mkString("unknown");
/* Initialise the Nix expression search path. */
/* Construct the Nix expression search path. */
assert(lookupPath.elements.empty());
if (!settings.pureEval) {
for (auto & i : _lookupPath.elements)
for (auto & i : lookupPathFromArguments.elements) {
lookupPath.elements.emplace_back(LookupPath::Elem {i});
for (auto & i : settings.nixPath.get())
}
/* $NIX_PATH overriding regular settings is implemented as a hack in `initGC()` */
for (auto & i : settings.nixPath.get()) {
lookupPath.elements.emplace_back(LookupPath::Elem::parse(i));
}
if (!settings.restrictEval) {
for (auto & i : EvalSettings::getDefaultNixPath()) {
lookupPath.elements.emplace_back(LookupPath::Elem::parse(i));
}
}
}
/* Allow access to all paths in the search path. */
@ -771,7 +780,7 @@ void EvalState::runDebugRepl(const Error * error, const Env & env, const Expr &
case ReplExitStatus::Continue:
break;
default:
abort();
unreachable();
}
}
}
@ -1140,7 +1149,7 @@ inline void EvalState::evalAttrs(Env & env, Expr * e, Value & v, const PosIdx po
void Expr::eval(EvalState & state, Env & env, Value & v)
{
abort();
unreachable();
}
@ -1573,7 +1582,7 @@ void EvalState::callFunction(Value & fun, size_t nrArgs, Value * * args, Value &
.withFrame(*fun.payload.lambda.env, lambda)
.debugThrow();
}
abort(); // can't happen
unreachable();
}
}
@ -1825,9 +1834,24 @@ void ExprIf::eval(EvalState & state, Env & env, Value & v)
void ExprAssert::eval(EvalState & state, Env & env, Value & v)
{
if (!state.evalBool(env, cond, pos, "in the condition of the assert statement")) {
std::ostringstream out;
cond->show(state.symbols, out);
state.error<AssertionError>("assertion '%1%' failed", out.str()).atPos(pos).withFrame(env, *this).debugThrow();
auto exprStr = ({
std::ostringstream out;
cond->show(state.symbols, out);
out.str();
});
if (auto eq = dynamic_cast<ExprOpEq *>(cond)) {
try {
Value v1; eq->e1->eval(state, env, v1);
Value v2; eq->e2->eval(state, env, v2);
state.assertEqValues(v1, v2, eq->pos, "in an equality assertion");
} catch (AssertionError & e) {
e.addTrace(state.positions[pos], "while evaluating the condition of the assertion '%s'", exprStr);
throw;
}
}
state.error<AssertionError>("assertion '%1%' failed", exprStr).atPos(pos).withFrame(env, *this).debugThrow();
}
body->eval(state, env, v);
}
@ -2484,6 +2508,214 @@ SingleDerivedPath EvalState::coerceToSingleDerivedPath(const PosIdx pos, Value &
}
// NOTE: This implementation must match eqValues!
// We accept this burden because informative error messages for
// `assert a == b; x` are critical for our users' testing UX.
void EvalState::assertEqValues(Value & v1, Value & v2, const PosIdx pos, std::string_view errorCtx)
{
// This implementation must match eqValues.
forceValue(v1, pos);
forceValue(v2, pos);
if (&v1 == &v2)
return;
// Special case type-compatibility between float and int
if ((v1.type() == nInt || v1.type() == nFloat) && (v2.type() == nInt || v2.type() == nFloat)) {
if (eqValues(v1, v2, pos, errorCtx)) {
return;
} else {
error<AssertionError>(
"%s with value '%s' is not equal to %s with value '%s'",
showType(v1),
ValuePrinter(*this, v1, errorPrintOptions),
showType(v2),
ValuePrinter(*this, v2, errorPrintOptions))
.debugThrow();
}
}
if (v1.type() != v2.type()) {
error<AssertionError>(
"%s of value '%s' is not equal to %s of value '%s'",
showType(v1),
ValuePrinter(*this, v1, errorPrintOptions),
showType(v2),
ValuePrinter(*this, v2, errorPrintOptions))
.debugThrow();
}
switch (v1.type()) {
case nInt:
if (v1.integer() != v2.integer()) {
error<AssertionError>("integer '%d' is not equal to integer '%d'", v1.integer(), v2.integer()).debugThrow();
}
return;
case nBool:
if (v1.boolean() != v2.boolean()) {
error<AssertionError>(
"boolean '%s' is not equal to boolean '%s'",
ValuePrinter(*this, v1, errorPrintOptions),
ValuePrinter(*this, v2, errorPrintOptions))
.debugThrow();
}
return;
case nString:
if (strcmp(v1.c_str(), v2.c_str()) != 0) {
error<AssertionError>(
"string '%s' is not equal to string '%s'",
ValuePrinter(*this, v1, errorPrintOptions),
ValuePrinter(*this, v2, errorPrintOptions))
.debugThrow();
}
return;
case nPath:
if (v1.payload.path.accessor != v2.payload.path.accessor) {
error<AssertionError>(
"path '%s' is not equal to path '%s' because their accessors are different",
ValuePrinter(*this, v1, errorPrintOptions),
ValuePrinter(*this, v2, errorPrintOptions))
.debugThrow();
}
if (strcmp(v1.payload.path.path, v2.payload.path.path) != 0) {
error<AssertionError>(
"path '%s' is not equal to path '%s'",
ValuePrinter(*this, v1, errorPrintOptions),
ValuePrinter(*this, v2, errorPrintOptions))
.debugThrow();
}
return;
case nNull:
return;
case nList:
if (v1.listSize() != v2.listSize()) {
error<AssertionError>(
"list of size '%d' is not equal to list of size '%d', left hand side is '%s', right hand side is '%s'",
v1.listSize(),
v2.listSize(),
ValuePrinter(*this, v1, errorPrintOptions),
ValuePrinter(*this, v2, errorPrintOptions))
.debugThrow();
}
for (size_t n = 0; n < v1.listSize(); ++n) {
try {
assertEqValues(*v1.listElems()[n], *v2.listElems()[n], pos, errorCtx);
} catch (Error & e) {
e.addTrace(positions[pos], "while comparing list element %d", n);
throw;
}
}
return;
case nAttrs: {
if (isDerivation(v1) && isDerivation(v2)) {
auto i = v1.attrs()->get(sOutPath);
auto j = v2.attrs()->get(sOutPath);
if (i && j) {
try {
assertEqValues(*i->value, *j->value, pos, errorCtx);
return;
} catch (Error & e) {
e.addTrace(positions[pos], "while comparing a derivation by its '%s' attribute", "outPath");
throw;
}
assert(false);
}
}
if (v1.attrs()->size() != v2.attrs()->size()) {
error<AssertionError>(
"attribute names of attribute set '%s' differs from attribute set '%s'",
ValuePrinter(*this, v1, errorPrintOptions),
ValuePrinter(*this, v2, errorPrintOptions))
.debugThrow();
}
// Like normal comparison, we compare the attributes in non-deterministic Symbol index order.
// This function is called when eqValues has found a difference, so to reliably
// report about its result, we should follow in its literal footsteps and not
// try anything fancy that could lead to an error.
Bindings::const_iterator i, j;
for (i = v1.attrs()->begin(), j = v2.attrs()->begin(); i != v1.attrs()->end(); ++i, ++j) {
if (i->name != j->name) {
// A difference in a sorted list means that one attribute is not contained in the other, but we don't
// know which. Let's find out. Could use <, but this is more clear.
if (!v2.attrs()->get(i->name)) {
error<AssertionError>(
"attribute name '%s' is contained in '%s', but not in '%s'",
symbols[i->name],
ValuePrinter(*this, v1, errorPrintOptions),
ValuePrinter(*this, v2, errorPrintOptions))
.debugThrow();
}
if (!v1.attrs()->get(j->name)) {
error<AssertionError>(
"attribute name '%s' is missing in '%s', but is contained in '%s'",
symbols[j->name],
ValuePrinter(*this, v1, errorPrintOptions),
ValuePrinter(*this, v2, errorPrintOptions))
.debugThrow();
}
assert(false);
}
try {
assertEqValues(*i->value, *j->value, pos, errorCtx);
} catch (Error & e) {
// The order of traces is reversed, so this presents as
// where left hand side is
// at <pos>
// where right hand side is
// at <pos>
// while comparing attribute '<name>'
if (j->pos != noPos)
e.addTrace(positions[j->pos], "where right hand side is");
if (i->pos != noPos)
e.addTrace(positions[i->pos], "where left hand side is");
e.addTrace(positions[pos], "while comparing attribute '%s'", symbols[i->name]);
throw;
}
}
return;
}
case nFunction:
error<AssertionError>("distinct functions and immediate comparisons of identical functions compare as unequal")
.debugThrow();
case nExternal:
if (!(*v1.external() == *v2.external())) {
error<AssertionError>(
"external value '%s' is not equal to external value '%s'",
ValuePrinter(*this, v1, errorPrintOptions),
ValuePrinter(*this, v2, errorPrintOptions))
.debugThrow();
}
return;
case nFloat:
// !!!
if (!(v1.fpoint() == v2.fpoint())) {
error<AssertionError>("float '%f' is not equal to float '%f'", v1.fpoint(), v2.fpoint()).debugThrow();
}
return;
case nThunk: // Must not be left by forceValue
assert(false);
default: // Note that we pass compiler flags that should make `default:` unreachable.
// Also note that this probably ran after `eqValues`, which implements
// the same logic more efficiently (without having to unwind stacks),
// so maybe `assertEqValues` and `eqValues` are out of sync. Check it for solutions.
error<EvalError>("assertEqValues: cannot compare %1% with %2%", showType(v1), showType(v2)).withTrace(pos, errorCtx).panic();
}
}
// This implementation must match assertEqValues
bool EvalState::eqValues(Value & v1, Value & v2, const PosIdx pos, std::string_view errorCtx)
{
forceValue(v1, pos);
@ -2557,11 +2789,13 @@ bool EvalState::eqValues(Value & v1, Value & v2, const PosIdx pos, std::string_v
return *v1.external() == *v2.external();
case nFloat:
// !!!
return v1.fpoint() == v2.fpoint();
case nThunk: // Must not be left by forceValue
default:
error<EvalError>("cannot compare %1% with %2%", showType(v1), showType(v2)).withTrace(pos, errorCtx).debugThrow();
assert(false);
default: // Note that we pass compiler flags that should make `default:` unreachable.
error<EvalError>("eqValues: cannot compare %1% with %2%", showType(v1), showType(v2)).withTrace(pos, errorCtx).panic();
}
}
@ -2610,6 +2844,11 @@ void EvalState::printStatistics()
#if HAVE_BOEHMGC
GC_word heapSize, totalBytes;
GC_get_heap_usage_safe(&heapSize, 0, 0, 0, &totalBytes);
double gcFullOnlyTime = ({
auto ms = GC_get_full_gc_total_time();
ms * 0.001;
});
auto gcCycles = getGCCycles();
#endif
auto outPath = getEnv("NIX_SHOW_STATS_PATH").value_or("-");
@ -2620,6 +2859,15 @@ void EvalState::printStatistics()
#ifndef _WIN32 // TODO implement
topObj["cpuTime"] = cpuTime;
#endif
topObj["time"] = {
#ifndef _WIN32 // TODO implement
{"cpu", cpuTime},
#endif
#if HAVE_BOEHMGC
{GC_is_incremental_mode() ? "gcNonIncremental" : "gc", gcFullOnlyTime},
{GC_is_incremental_mode() ? "gcNonIncrementalFraction" : "gcFraction", gcFullOnlyTime / cpuTime},
#endif
};
topObj["envs"] = {
{"number", nrEnvs},
{"elements", nrValuesInEnvs},
@ -2661,6 +2909,7 @@ void EvalState::printStatistics()
topObj["gc"] = {
{"heapSize", heapSize},
{"totalBytes", totalBytes},
{"cycles", gcCycles},
};
#endif

View file

@ -130,7 +130,7 @@ struct Constant
typedef std::map<std::string, Value *> ValMap;
#endif
typedef std::map<PosIdx, DocComment> DocCommentMap;
typedef std::unordered_map<PosIdx, DocComment> DocCommentMap;
struct Env
{
@ -335,7 +335,7 @@ private:
* Associate source positions of certain AST nodes with their preceding doc comment, if they have one.
* Grouped by file.
*/
std::map<SourcePath, DocCommentMap> positionToDocComment;
std::unordered_map<SourcePath, DocCommentMap> positionToDocComment;
LookupPath lookupPath;
@ -655,6 +655,15 @@ public:
*/
bool eqValues(Value & v1, Value & v2, const PosIdx pos, std::string_view errorCtx);
/**
* Like `eqValues`, but throws an `AssertionError` if not equal.
*
* WARNING:
* Callers should call `eqValues` first and report if `assertEqValues` behaves
* incorrectly. (e.g. if it doesn't throw if eqValues returns false or vice versa)
*/
void assertEqValues(Value & v1, Value & v2, const PosIdx pos, std::string_view errorCtx);
bool isFunctor(Value & fun);
// FIXME: use std::span

View file

@ -42,7 +42,7 @@ class JSONSax : nlohmann::json_sax<json> {
auto attrs2 = state.buildBindings(attrs.size());
for (auto & i : attrs)
attrs2.insert(i.first, i.second);
parent->value(state).mkAttrs(attrs2.alreadySorted());
parent->value(state).mkAttrs(attrs2);
return std::move(parent);
}
void add() override { v = nullptr; }
@ -80,42 +80,42 @@ class JSONSax : nlohmann::json_sax<json> {
public:
JSONSax(EvalState & state, Value & v) : state(state), rs(new JSONState(&v)) {};
bool null()
bool null() override
{
rs->value(state).mkNull();
rs->add();
return true;
}
bool boolean(bool val)
bool boolean(bool val) override
{
rs->value(state).mkBool(val);
rs->add();
return true;
}
bool number_integer(number_integer_t val)
bool number_integer(number_integer_t val) override
{
rs->value(state).mkInt(val);
rs->add();
return true;
}
bool number_unsigned(number_unsigned_t val)
bool number_unsigned(number_unsigned_t val) override
{
rs->value(state).mkInt(val);
rs->add();
return true;
}
bool number_float(number_float_t val, const string_t & s)
bool number_float(number_float_t val, const string_t & s) override
{
rs->value(state).mkFloat(val);
rs->add();
return true;
}
bool string(string_t & val)
bool string(string_t & val) override
{
rs->value(state).mkString(val);
rs->add();
@ -123,7 +123,7 @@ public:
}
#if NLOHMANN_JSON_VERSION_MAJOR >= 3 && NLOHMANN_JSON_VERSION_MINOR >= 8
bool binary(binary_t&)
bool binary(binary_t&) override
{
// This function ought to be unreachable
assert(false);
@ -131,35 +131,35 @@ public:
}
#endif
bool start_object(std::size_t len)
bool start_object(std::size_t len) override
{
rs = std::make_unique<JSONObjectState>(std::move(rs));
return true;
}
bool key(string_t & name)
bool key(string_t & name) override
{
dynamic_cast<JSONObjectState*>(rs.get())->key(name, state);
return true;
}
bool end_object() {
bool end_object() override {
rs = rs->resolve(state);
rs->add();
return true;
}
bool end_array() {
bool end_array() override {
return end_object();
}
bool start_array(size_t len) {
bool start_array(size_t len) override {
rs = std::make_unique<JSONListState>(std::move(rs),
len != std::numeric_limits<size_t>::max() ? len : 128);
return true;
}
bool parse_error(std::size_t, const std::string&, const nlohmann::detail::exception& ex) {
bool parse_error(std::size_t, const std::string&, const nlohmann::detail::exception& ex) override {
throw JSONParseError("%s", ex.what());
}
};

View file

@ -67,6 +67,14 @@ static StringToken unescapeStr(SymbolTable & symbols, char * s, size_t length)
return {result, size_t(t - result)};
}
static void requireExperimentalFeature(const ExperimentalFeature & feature, const Pos & pos)
{
if (!experimentalFeatureSettings.isEnabled(feature))
throw ParseError(ErrorInfo{
.msg = HintFmt("experimental Nix feature '%1%' is disabled; add '--extra-experimental-features %1%' to enable it", showExperimentalFeature(feature)),
.pos = pos,
});
}
}
@ -119,6 +127,12 @@ or { return OR_KW; }
\-\> { return IMPL; }
\/\/ { return UPDATE; }
\+\+ { return CONCAT; }
\<\| { requireExperimentalFeature(Xp::PipeOperators, state->positions[CUR_POS]);
return PIPE_FROM;
}
\|\> { requireExperimentalFeature(Xp::PipeOperators, state->positions[CUR_POS]);
return PIPE_INTO;
}
{ID} { yylval->id = {yytext, (size_t) yyleng}; return ID; }
{INT} { errno = 0;

View file

@ -32,6 +32,7 @@ subdir('build-utils-meson/threads')
boost = dependency(
'boost',
modules : ['container', 'context'],
include_type: 'system',
)
# boost is a public dependency, but not a pkg-config dependency unfortunately, so we
# put in `deps_other`.
@ -55,7 +56,12 @@ if bdw_gc.found()
endif
configdata.set('HAVE_BOEHMGC', bdw_gc.found().to_int())
toml11 = dependency('toml11', version : '>=3.7.0', method : 'cmake')
toml11 = dependency(
'toml11',
version : '>=3.7.0',
method : 'cmake',
include_type: 'system',
)
deps_other += toml11
config_h = configure_file(

View file

@ -25,7 +25,7 @@ std::ostream & operator <<(std::ostream & str, const SymbolStr & symbol)
void Expr::show(const SymbolTable & symbols, std::ostream & str) const
{
abort();
unreachable();
}
void ExprInt::show(const SymbolTable & symbols, std::ostream & str) const
@ -82,7 +82,9 @@ void ExprAttrs::showBindings(const SymbolTable & symbols, std::ostream & str) co
return sa < sb;
});
std::vector<Symbol> inherits;
std::map<ExprInheritFrom *, std::vector<Symbol>> inheritsFrom;
// We can use the displacement as a proxy for the order in which the symbols were parsed.
// The assignment of displacements should be deterministic, so that showBindings is deterministic.
std::map<Displacement, std::vector<Symbol>> inheritsFrom;
for (auto & i : sorted) {
switch (i->second.kind) {
case AttrDef::Kind::Plain:
@ -93,7 +95,7 @@ void ExprAttrs::showBindings(const SymbolTable & symbols, std::ostream & str) co
case AttrDef::Kind::InheritedFrom: {
auto & select = dynamic_cast<ExprSelect &>(*i->second.e);
auto & from = dynamic_cast<ExprInheritFrom &>(*select.e);
inheritsFrom[&from].push_back(i->first);
inheritsFrom[from.displ].push_back(i->first);
break;
}
}
@ -105,7 +107,7 @@ void ExprAttrs::showBindings(const SymbolTable & symbols, std::ostream & str) co
}
for (const auto & [from, syms] : inheritsFrom) {
str << "inherit (";
(*inheritFromExprs)[from->displ]->show(symbols, str);
(*inheritFromExprs)[from]->show(symbols, str);
str << ")";
for (auto sym : syms) str << " " << symbols[sym];
str << "; ";
@ -269,7 +271,7 @@ std::string showAttrPath(const SymbolTable & symbols, const AttrPath & attrPath)
void Expr::bindVars(EvalState & es, const std::shared_ptr<const StaticEnv> & env)
{
abort();
unreachable();
}
void ExprInt::bindVars(EvalState & es, const std::shared_ptr<const StaticEnv> & env)

View file

@ -186,7 +186,7 @@ struct ExprInheritFrom : ExprVar
this->fromWith = nullptr;
}
void bindVars(EvalState & es, const std::shared_ptr<const StaticEnv> & env);
void bindVars(EvalState & es, const std::shared_ptr<const StaticEnv> & env) override;
};
struct ExprSelect : Expr
@ -203,7 +203,7 @@ struct ExprSelect : Expr
*
* @param[out] v The attribute set that should contain the last attribute name (if it exists).
* @return The last attribute name in `attrPath`
*
*
* @note This does *not* evaluate the final attribute, and does not fail if that's the only attribute that does not exist.
*/
Symbol evalExceptFinalSelect(EvalState & state, Env & env, Value & attrs);

View file

@ -102,12 +102,8 @@ mkMesonDerivation (finalAttrs: {
LDFLAGS = "-fuse-ld=gold";
};
enableParallelBuilding = true;
separateDebugInfo = !stdenv.hostPlatform.isStatic;
strictDeps = true;
hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie";
meta = {

View file

@ -64,7 +64,7 @@ struct LexerState
/**
* @brief Maps some positions to a DocComment, where the comment is relevant to the location.
*/
std::map<PosIdx, DocComment> & positionToDocComment;
std::unordered_map<PosIdx, DocComment> & positionToDocComment;
PosTable & positions;
PosTable::Origin origin;
@ -291,12 +291,23 @@ inline Expr * ParserState::stripIndentation(const PosIdx pos,
s2 = std::string(s2, 0, p + 1);
}
es2->emplace_back(i->first, new ExprString(std::move(s2)));
// Ignore empty strings for a minor optimisation and AST simplification
if (s2 != "") {
es2->emplace_back(i->first, new ExprString(std::move(s2)));
}
};
for (; i != es.end(); ++i, --n) {
std::visit(overloaded { trimExpr, trimString }, i->second);
}
// If there is nothing at all, return the empty string directly.
// This also ensures that equivalent empty strings result in the same ast, which is helpful when testing formatters.
if (es2->size() == 0) {
auto *const result = new ExprString("");
delete es2;
return result;
}
/* If this is a single string, then don't do a concatenation. */
if (es2->size() == 1 && dynamic_cast<ExprString *>((*es2)[0].second)) {
auto *const result = (*es2)[0].second;

View file

@ -48,7 +48,7 @@
namespace nix {
typedef std::map<PosIdx, DocComment> DocCommentMap;
typedef std::unordered_map<PosIdx, DocComment> DocCommentMap;
Expr * parseExprFromBuf(
char * text,
@ -99,6 +99,14 @@ static void setDocPosition(const LexerState & lexerState, ExprLambda * lambda, P
}
}
static Expr * makeCall(PosIdx pos, Expr * fn, Expr * arg) {
if (auto e2 = dynamic_cast<ExprCall *>(fn)) {
e2->args.push_back(arg);
return fn;
}
return new ExprCall(pos, fn, {arg});
}
%}
@ -123,6 +131,7 @@ static void setDocPosition(const LexerState & lexerState, ExprLambda * lambda, P
%type <e> start expr expr_function expr_if expr_op
%type <e> expr_select expr_simple expr_app
%type <e> expr_pipe_from expr_pipe_into
%type <list> expr_list
%type <attrs> binds
%type <formals> formals
@ -140,6 +149,7 @@ static void setDocPosition(const LexerState & lexerState, ExprLambda * lambda, P
%token <path> PATH HPATH SPATH PATH_END
%token <uri> URI
%token IF THEN ELSE ASSERT WITH LET IN_KW REC INHERIT EQ NEQ AND OR IMPL OR_KW
%token PIPE_FROM PIPE_INTO /* <| and |> */
%token DOLLAR_CURLY /* == ${ */
%token IND_STRING_OPEN IND_STRING_CLOSE
%token ELLIPSIS
@ -206,9 +216,21 @@ expr_function
expr_if
: IF expr THEN expr ELSE expr { $$ = new ExprIf(CUR_POS, $2, $4, $6); }
| expr_pipe_from
| expr_pipe_into
| expr_op
;
expr_pipe_from
: expr_op PIPE_FROM expr_pipe_from { $$ = makeCall(state->at(@2), $1, $3); }
| expr_op PIPE_FROM expr_op { $$ = makeCall(state->at(@2), $1, $3); }
;
expr_pipe_into
: expr_pipe_into PIPE_INTO expr_op { $$ = makeCall(state->at(@2), $3, $1); }
| expr_op PIPE_INTO expr_op { $$ = makeCall(state->at(@2), $3, $1); }
;
expr_op
: '!' expr_op %prec NOT { $$ = new ExprOpNot($2); }
| '-' expr_op %prec NEGATE { $$ = new ExprCall(CUR_POS, new ExprVar(state->s.sub), {new ExprInt(0), $2}); }
@ -233,13 +255,7 @@ expr_op
;
expr_app
: expr_app expr_select {
if (auto e2 = dynamic_cast<ExprCall *>($1)) {
e2->args.push_back($2);
$$ = $1;
} else
$$ = new ExprCall(CUR_POS, $1, {$2});
}
: expr_app expr_select { $$ = makeCall(CUR_POS, $1, $2); }
| expr_select
;

View file

@ -1,6 +1,7 @@
#pragma once
#include <cinttypes>
#include <functional>
namespace nix {
@ -8,6 +9,7 @@ class PosIdx
{
friend struct LazyPosAcessors;
friend class PosTable;
friend class std::hash<PosIdx>;
private:
uint32_t id;
@ -37,8 +39,26 @@ public:
{
return id == other.id;
}
size_t hash() const noexcept
{
return std::hash<uint32_t>{}(id);
}
};
inline PosIdx noPos = {};
}
namespace std {
template<>
struct hash<nix::PosIdx>
{
std::size_t operator()(nix::PosIdx pos) const noexcept
{
return pos.hash();
}
};
} // namespace std

View file

@ -426,7 +426,7 @@ static void prim_typeOf(EvalState & state, const PosIdx pos, Value * * args, Val
t = args[0]->external()->typeOf();
break;
case nFloat: t = "float"; break;
case nThunk: abort();
case nThunk: unreachable();
}
v.mkString(t);
}
@ -719,7 +719,7 @@ static RegisterPrimOp primop_genericClosure(PrimOp {
.doc = R"(
`builtins.genericClosure` iteratively computes the transitive closure over an arbitrary relation defined by a function.
It takes *attrset* with two attributes named `startSet` and `operator`, and returns a list of attrbute sets:
It takes *attrset* with two attributes named `startSet` and `operator`, and returns a list of attribute sets:
- `startSet`:
The initial list of attribute sets.
@ -1843,34 +1843,6 @@ static RegisterPrimOp primop_findFile(PrimOp {
.doc = R"(
Find *lookup-path* in *search-path*.
A search path is represented list of [attribute sets](./types.md#attribute-set) with two attributes:
- `prefix` is a relative path.
- `path` denotes a file system location
The exact syntax depends on the command line interface.
Examples of search path attribute sets:
- ```
{
prefix = "nixos-config";
path = "/etc/nixos/configuration.nix";
}
```
- ```
{
prefix = "";
path = "/nix/var/nix/profiles/per-user/root/channels";
}
```
The lookup algorithm checks each entry until a match is found, returning a [path value](@docroot@/language/types.md#type-path) of the match:
- If *lookup-path* matches `prefix`, then the remainder of *lookup-path* (the "suffix") is searched for within the directory denoted by `path`.
Note that the `path` may need to be downloaded at this point to look inside.
- If the suffix is found inside that directory, then the entry is a match.
The combined absolute path of the directory (now downloaded if need be) and the suffix is returned.
[Lookup path](@docroot@/language/constructs/lookup-path.md) expressions are [desugared](https://en.wikipedia.org/wiki/Syntactic_sugar) using this and [`builtins.nixPath`](#builtins-nixPath):
```nix
@ -1882,6 +1854,119 @@ static RegisterPrimOp primop_findFile(PrimOp {
```nix
builtins.findFile builtins.nixPath "nixpkgs"
```
A search path is represented as a list of [attribute sets](./types.md#attribute-set) with two attributes:
- `prefix` is a relative path.
- `path` denotes a file system location
Examples of search path attribute sets:
- ```
{
prefix = "";
path = "/nix/var/nix/profiles/per-user/root/channels";
}
```
- ```
{
prefix = "nixos-config";
path = "/etc/nixos/configuration.nix";
}
```
- ```
{
prefix = "nixpkgs";
path = "https://github.com/NixOS/nixpkgs/tarballs/master";
}
```
- ```
{
prefix = "nixpkgs";
path = "channel:nixpkgs-unstable";
}
```
- ```
{
prefix = "flake-compat";
path = "flake:github:edolstra/flake-compat";
}
```
The lookup algorithm checks each entry until a match is found, returning a [path value](@docroot@/language/types.md#type-path) of the match:
- If a prefix of `lookup-path` matches `prefix`, then the remainder of *lookup-path* (the "suffix") is searched for within the directory denoted by `path`.
The contents of `path` may need to be downloaded at this point to look inside.
- If the suffix is found inside that directory, then the entry is a match.
The combined absolute path of the directory (now downloaded if need be) and the suffix is returned.
> **Example**
>
> A *search-path* value
>
> ```
> [
> {
> prefix = "";
> path = "/home/eelco/Dev";
> }
> {
> prefix = "nixos-config";
> path = "/etc/nixos";
> }
> ]
> ```
>
> and a *lookup-path* value `"nixos-config"` will cause Nix to try `/home/eelco/Dev/nixos-config` and `/etc/nixos` in that order and return the first path that exists.
If `path` starts with `http://` or `https://`, it is interpreted as the URL of a tarball that will be downloaded and unpacked to a temporary location.
The tarball must consist of a single top-level directory.
The URLs of the tarballs from the official `nixos.org` channels can be abbreviated as `channel:<channel-name>`.
See [documentation on `nix-channel`](@docroot@/command-ref/nix-channel.md) for details about channels.
> **Example**
>
> These two search path entries are equivalent:
>
> - ```
> {
> prefix = "nixpkgs";
> path = "channel:nixpkgs-unstable";
> }
> ```
> - ```
> {
> prefix = "nixpkgs";
> path = "https://nixos.org/channels/nixos-unstable/nixexprs.tar.xz";
> }
> ```
Search paths can also point to source trees using [flake URLs](@docroot@/command-ref/new-cli/nix3-flake.md#url-like-syntax).
> **Example**
>
> The search path entry
>
> ```
> {
> prefix = "nixpkgs";
> path = "flake:nixpkgs";
> }
> ```
> specifies that the prefix `nixpkgs` shall refer to the source tree downloaded from the `nixpkgs` entry in the flake registry.
>
> Similarly
>
> ```
> {
> prefix = "nixpkgs";
> path = "flake:github:nixos/nixpkgs/nixos-22.05";
> }
> ```
>
> makes `<nixpkgs>` refer to a particular branch of the `NixOS/nixpkgs` repository on GitHub.
)",
.fun = prim_findFile,
});
@ -4731,6 +4816,13 @@ void EvalState::createBaseEnv()
.doc = R"(
The value of the [`nix-path` configuration setting](@docroot@/command-ref/conf-file.md#conf-nix-path): a list of search path entries used to resolve [lookup paths](@docroot@/language/constructs/lookup-path.md).
> **Example**
>
> ```bash
> $ NIX_PATH= nix-instantiate --eval --expr "builtins.nixPath" -I foo=bar --no-pure-eval
> [ { path = "bar"; prefix = "foo"; } ]
> ```
Lookup path expressions are [desugared](https://en.wikipedia.org/wiki/Syntactic_sugar) using this and
[`builtins.findFile`](./builtins.html#builtins-findFile):

View file

@ -1,6 +1,31 @@
/* This is the implementation of the derivation builtin function.
It's actually a wrapper around the derivationStrict primop. */
# This is the implementation of the derivation builtin function.
# It's actually a wrapper around the derivationStrict primop.
# Note that the following comment will be shown in :doc in the repl, but not in the manual.
/**
Create a derivation.
# Inputs
The single argument is an attribute set that describes what to build and how to build it.
See https://nix.dev/manual/nix/2.23/language/derivations
# Output
The result is an attribute set that describes the derivation.
Notably it contains the outputs, which in the context of the Nix language are special strings that refer to the output paths, which may not yet exist.
The realisation of these outputs only occurs when needed; for example
* When `nix-build` or a similar command is run, it realises the outputs that were requested on its command line.
See https://nix.dev/manual/nix/2.23/command-ref/nix-build
* When `import`, `readFile`, `readDir` or some other functions are called, they have to realise the outputs they depend on.
This is referred to as "import from derivation".
See https://nix.dev/manual/nix/2.23/language/import-from-derivation
Note that `derivation` is very bare-bones, and provides almost no commands during the build.
Most likely, you'll want to use functions like `stdenv.mkDerivation` in Nixpkgs to set up a basic environment.
*/
drvAttrs @ { outputs ? [ "out" ], ... }:
let

View file

@ -523,9 +523,19 @@ static void prim_fetchurl(EvalState & state, const PosIdx pos, Value * * args, V
static RegisterPrimOp primop_fetchurl({
.name = "__fetchurl",
.args = {"url"},
.args = {"arg"},
.doc = R"(
Download the specified URL and return the path of the downloaded file.
`arg` can be either a string denoting the URL, or an attribute set with the following attributes:
- `url`
The URL of the file to download.
- `name` (default: the last path component of the URL)
A name for the file in the store. This can be useful if the URL has any
characters that are invalid for the store.
Not available in [restricted evaluation mode](@docroot@/command-ref/conf-file.md#conf-restrict-eval).
)",
@ -543,11 +553,11 @@ static RegisterPrimOp primop_fetchTarball({
.doc = R"(
Download the specified URL, unpack it and return the path of the
unpacked tree. The file must be a tape archive (`.tar`) compressed
with `gzip`, `bzip2` or `xz`. The top-level path component of the
files in the tarball is removed, so it is best if the tarball
contains a single directory at top level. The typical use of the
function is to obtain external Nix expression dependencies, such as
a particular version of Nixpkgs, e.g.
with `gzip`, `bzip2` or `xz`. If the tarball consists of a
single directory, then the top-level path component of the files
in the tarball is removed. The typical use of the function is to
obtain external Nix expression dependencies, such as a
particular version of Nixpkgs, e.g.
```nix
with import (fetchTarball https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz) {};
@ -654,12 +664,12 @@ static RegisterPrimOp primop_fetchGit({
Whether to check `rev` for a signature matching `publicKey` or `publicKeys`.
If `verifyCommit` is enabled, then `fetchGit` cannot use a local repository with uncommitted changes.
Requires the [`verified-fetches` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-verified-fetches).
Requires the [`verified-fetches` experimental feature](@docroot@/development/experimental-features.md#xp-feature-verified-fetches).
- `publicKey`
The public key against which `rev` is verified if `verifyCommit` is enabled.
Requires the [`verified-fetches` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-verified-fetches).
Requires the [`verified-fetches` experimental feature](@docroot@/development/experimental-features.md#xp-feature-verified-fetches).
- `keytype` (default: `"ssh-ed25519"`)
@ -671,7 +681,7 @@ static RegisterPrimOp primop_fetchGit({
- `"ssh-ed25519"`
- `"ssh-ed25519-sk"`
- `"ssh-rsa"`
Requires the [`verified-fetches` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-verified-fetches).
Requires the [`verified-fetches` experimental feature](@docroot@/development/experimental-features.md#xp-feature-verified-fetches).
- `publicKeys`
@ -685,7 +695,7 @@ static RegisterPrimOp primop_fetchGit({
}
```
Requires the [`verified-fetches` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-verified-fetches).
Requires the [`verified-fetches` experimental feature](@docroot@/development/experimental-features.md#xp-feature-verified-fetches).
Here are some examples of how to use `fetchGit`.

View file

@ -2,6 +2,7 @@
#include "eval-inline.hh"
#include <sstream>
#include <toml.hpp>
namespace nix {

View file

@ -94,7 +94,7 @@ void printAmbiguous(
break;
default:
printError("Nix evaluator internal error: printAmbiguous: invalid value type");
abort();
unreachable();
}
}

View file

@ -299,6 +299,9 @@ private:
output << ANSI_NORMAL;
}
/**
* @note This may force items.
*/
bool shouldPrettyPrintAttrs(AttrVec & v)
{
if (!options.shouldPrettyPrint() || v.empty()) {
@ -315,6 +318,9 @@ private:
return true;
}
// It is ok to force the item(s) here, because they will be printed anyway.
state.forceValue(*item, item->determinePos(noPos));
// Pretty-print single-item attrsets only if they contain nested
// structures.
auto itemType = item->type();
@ -371,6 +377,9 @@ private:
}
}
/**
* @note This may force items.
*/
bool shouldPrettyPrintList(std::span<Value * const> list)
{
if (!options.shouldPrettyPrint() || list.empty()) {
@ -387,6 +396,9 @@ private:
return true;
}
// It is ok to force the item(s) here, because they will be printed anyway.
state.forceValue(*item, item->determinePos(noPos));
// Pretty-print single-item lists only if they contain nested
// structures.
auto itemType = item->type();
@ -463,7 +475,7 @@ private:
else
output << "primop";
} else {
abort();
unreachable();
}
output << "»";
@ -492,7 +504,7 @@ private:
if (options.ansiColors)
output << ANSI_NORMAL;
} else {
abort();
unreachable();
}
}

View file

@ -7,6 +7,7 @@
#include "types.hh"
#include "chunked-vector.hh"
#include "error.hh"
namespace nix {
@ -69,6 +70,8 @@ public:
auto operator<=>(const Symbol other) const { return id <=> other.id; }
bool operator==(const Symbol other) const { return id == other.id; }
friend class std::hash<Symbol>;
};
/**
@ -113,7 +116,7 @@ public:
SymbolStr operator[](Symbol s) const
{
if (s.id == 0 || s.id > store.size())
abort();
unreachable();
return SymbolStr(store[s.id - 1]);
}
@ -132,3 +135,12 @@ public:
};
}
template<>
struct std::hash<nix::Symbol>
{
std::size_t operator()(const nix::Symbol & s) const noexcept
{
return std::hash<decltype(s.id)>{}(s.id);
}
};

View file

@ -285,7 +285,7 @@ public:
if (invalidIsThunk)
return nThunk;
else
abort();
unreachable();
}
inline void finishValue(InternalType newType, Payload newPayload)
@ -494,11 +494,11 @@ void Value::mkBlackhole()
#if HAVE_BOEHMGC
typedef std::vector<Value *, traceable_allocator<Value *>> ValueVector;
typedef std::map<Symbol, Value *, std::less<Symbol>, traceable_allocator<std::pair<const Symbol, Value *>>> ValueMap;
typedef std::unordered_map<Symbol, Value *, std::hash<Symbol>, std::equal_to<Symbol>, traceable_allocator<std::pair<const Symbol, Value *>>> ValueMap;
typedef std::map<Symbol, ValueVector, std::less<Symbol>, traceable_allocator<std::pair<const Symbol, ValueVector>>> ValueVectorMap;
#else
typedef std::vector<Value *> ValueVector;
typedef std::map<Symbol, Value *> ValueMap;
typedef std::unordered_map<Symbol, Value *> ValueMap;
typedef std::map<Symbol, ValueVector> ValueVectorMap;
#endif

View file

@ -33,7 +33,7 @@ nlohmann::json attrsToJSON(const Attrs & attrs)
json[attr.first] = *v;
} else if (auto v = std::get_if<Explicit<bool>>(&attr.second)) {
json[attr.first] = v->t;
} else abort();
} else unreachable();
}
return json;
}
@ -99,7 +99,7 @@ std::map<std::string, std::string> attrsToQuery(const Attrs & attrs)
query.insert_or_assign(attr.first, *v);
} else if (auto v = std::get_if<Explicit<bool>>(&attr.second)) {
query.insert_or_assign(attr.first, v->t ? "1" : "0");
} else abort();
} else unreachable();
}
return query;
}

View file

@ -126,16 +126,39 @@ Object lookupObject(git_repository * repo, const git_oid & oid, git_object_t typ
}
template<typename T>
T peelObject(git_repository * repo, git_object * obj, git_object_t type)
T peelObject(git_object * obj, git_object_t type)
{
T obj2;
if (git_object_peel((git_object * *) (typename T::pointer *) Setter(obj2), obj, type)) {
auto err = git_error_last();
throw Error("peeling Git object '%s': %s", git_object_id(obj), err->message);
throw Error("peeling Git object '%s': %s", *git_object_id(obj), err->message);
}
return obj2;
}
template<typename T>
T dupObject(typename T::pointer obj)
{
T obj2;
if (git_object_dup((git_object * *) (typename T::pointer *) Setter(obj2), (git_object *) obj))
throw Error("duplicating object '%s': %s", *git_object_id((git_object *) obj), git_error_last()->message);
return obj2;
}
/**
* Peel the specified object (i.e. follow tag and commit objects) to
* either a blob or a tree.
*/
static Object peelToTreeOrBlob(git_object * obj)
{
/* git_object_peel() doesn't handle blob objects, so handle those
specially. */
if (git_object_type(obj) == GIT_OBJECT_BLOB)
return dupObject<Object>(obj);
else
return peelObject<Object>(obj, GIT_OBJECT_TREE);
}
struct GitRepoImpl : GitRepo, std::enable_shared_from_this<GitRepoImpl>
{
/** Location of the repository on disk. */
@ -166,7 +189,7 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this<GitRepoImpl>
std::unordered_set<git_oid> done;
std::queue<Commit> todo;
todo.push(peelObject<Commit>(*this, lookupObject(*this, hashToOID(rev)).get(), GIT_OBJECT_COMMIT));
todo.push(peelObject<Commit>(lookupObject(*this, hashToOID(rev)).get(), GIT_OBJECT_COMMIT));
while (auto commit = pop(todo)) {
if (!done.insert(*git_commit_id(commit->get())).second) continue;
@ -184,7 +207,7 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this<GitRepoImpl>
uint64_t getLastModified(const Hash & rev) override
{
auto commit = peelObject<Commit>(*this, lookupObject(*this, hashToOID(rev)).get(), GIT_OBJECT_COMMIT);
auto commit = peelObject<Commit>(lookupObject(*this, hashToOID(rev)).get(), GIT_OBJECT_COMMIT);
return git_commit_time(commit.get());
}
@ -463,6 +486,23 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this<GitRepoImpl>
return narHash;
}
Hash dereferenceSingletonDirectory(const Hash & oid_) override
{
auto oid = hashToOID(oid_);
auto _tree = lookupObject(*this, oid, GIT_OBJECT_TREE);
auto tree = (const git_tree *) &*_tree;
if (git_tree_entrycount(tree) == 1) {
auto entry = git_tree_entry_byindex(tree, 0);
auto mode = git_tree_entry_filemode(entry);
if (mode == GIT_FILEMODE_TREE)
oid = *git_tree_entry_id(entry);
}
return toHash(oid);
}
};
ref<GitRepo> GitRepo::openRepo(const std::filesystem::path & path, bool create, bool bare)
@ -476,11 +516,11 @@ ref<GitRepo> GitRepo::openRepo(const std::filesystem::path & path, bool create,
struct GitSourceAccessor : SourceAccessor
{
ref<GitRepoImpl> repo;
Tree root;
Object root;
GitSourceAccessor(ref<GitRepoImpl> repo_, const Hash & rev)
: repo(repo_)
, root(peelObject<Tree>(*repo, lookupObject(*repo, hashToOID(rev)).get(), GIT_OBJECT_TREE))
, root(peelToTreeOrBlob(lookupObject(*repo, hashToOID(rev)).get()))
{
}
@ -506,7 +546,7 @@ struct GitSourceAccessor : SourceAccessor
std::optional<Stat> maybeLstat(const CanonPath & path) override
{
if (path.isRoot())
return Stat { .type = tDirectory };
return Stat { .type = git_object_type(root.get()) == GIT_OBJECT_TREE ? tDirectory : tRegular };
auto entry = lookup(path);
if (!entry)
@ -616,10 +656,10 @@ struct GitSourceAccessor : SourceAccessor
std::optional<Tree> lookupTree(const CanonPath & path)
{
if (path.isRoot()) {
Tree tree;
if (git_tree_dup(Setter(tree), root.get()))
throw Error("duplicating directory '%s': %s", showPath(path), git_error_last()->message);
return tree;
if (git_object_type(root.get()) == GIT_OBJECT_TREE)
return dupObject<Tree>((git_tree *) &*root);
else
return std::nullopt;
}
auto entry = lookup(path);
@ -646,10 +686,10 @@ struct GitSourceAccessor : SourceAccessor
std::variant<Tree, Submodule> getTree(const CanonPath & path)
{
if (path.isRoot()) {
Tree tree;
if (git_tree_dup(Setter(tree), root.get()))
throw Error("duplicating directory '%s': %s", showPath(path), git_error_last()->message);
return tree;
if (git_object_type(root.get()) == GIT_OBJECT_TREE)
return dupObject<Tree>((git_tree *) &*root);
else
throw Error("Git root object '%s' is not a directory", *git_object_id(root.get()));
}
auto entry = need(path);
@ -669,6 +709,9 @@ struct GitSourceAccessor : SourceAccessor
Blob getBlob(const CanonPath & path, bool expectSymlink)
{
if (!expectSymlink && git_object_type(root.get()) == GIT_OBJECT_BLOB)
return dupObject<Blob>((git_blob *) &*root);
auto notExpected = [&]()
{
throw Error(
@ -782,8 +825,6 @@ struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink
std::vector<PendingDir> pendingDirs;
size_t componentsToStrip = 1;
void pushBuilder(std::string name)
{
git_treebuilder * b;
@ -839,9 +880,6 @@ struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink
{
std::span<const std::string> pathComponents2{pathComponents};
if (pathComponents2.size() <= componentsToStrip) return false;
pathComponents2 = pathComponents2.subspan(componentsToStrip);
updateBuilders(
isDir
? pathComponents2
@ -964,7 +1002,8 @@ struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink
git_tree_entry_filemode(entry));
}
Hash sync() override {
Hash sync() override
{
updateBuilders({});
auto [oid, _name] = popBuilder();

View file

@ -98,6 +98,13 @@ struct GitRepo
* serialisation. This is memoised on-disk.
*/
virtual Hash treeHashToNarHash(const Hash & treeHash) = 0;
/**
* If the specified Git object is a directory with a single entry
* that is a directory, return the ID of that object.
* Otherwise, return the passed ID unchanged.
*/
virtual Hash dereferenceSingletonDirectory(const Hash & oid) = 0;
};
ref<GitRepo> getTarballCache();

View file

@ -254,12 +254,18 @@ struct GitArchiveInputScheme : InputScheme
getFileTransfer()->download(std::move(req), sink);
});
auto act = std::make_unique<Activity>(*logger, lvlInfo, actUnknown,
fmt("unpacking '%s' into the Git cache", input.to_string()));
TarArchive archive { *source };
auto parseSink = getTarballCache()->getFileSystemObjectSink();
auto tarballCache = getTarballCache();
auto parseSink = tarballCache->getFileSystemObjectSink();
auto lastModified = unpackTarfileToSink(archive, *parseSink);
act.reset();
TarballInfo tarballInfo {
.treeHash = parseSink->sync(),
.treeHash = tarballCache->dereferenceSingletonDirectory(parseSink->sync()),
.lastModified = lastModified
};

View file

@ -26,6 +26,8 @@ deps_public_maybe_subproject = [
]
subdir('build-utils-meson/subprojects')
subdir('build-utils-meson/threads')
nlohmann_json = dependency('nlohmann_json', version : '>= 3.9')
deps_public += nlohmann_json

View file

@ -67,14 +67,8 @@ mkMesonDerivation (finalAttrs: {
LDFLAGS = "-fuse-ld=gold";
};
enableParallelBuilding = true;
separateDebugInfo = !stdenv.hostPlatform.isStatic;
# TODO `releaseTools.coverageAnalysis` in Nixpkgs needs to be updated
# to work with `strictDeps`.
strictDeps = true;
hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie";
meta = {

View file

@ -143,6 +143,9 @@ DownloadTarballResult downloadTarball(
// TODO: fall back to cached value if download fails.
auto act = std::make_unique<Activity>(*logger, lvlInfo, actUnknown,
fmt("unpacking '%s' into the Git cache", url));
AutoDelete cleanupTemp;
/* Note: if the download is cached, `importTarball()` will receive
@ -164,9 +167,12 @@ DownloadTarballResult downloadTarball(
TarArchive{path};
})
: TarArchive{*source};
auto parseSink = getTarballCache()->getFileSystemObjectSink();
auto tarballCache = getTarballCache();
auto parseSink = tarballCache->getFileSystemObjectSink();
auto lastModified = unpackTarfileToSink(archive, *parseSink);
act.reset();
auto res(_res->lock());
Attrs infoAttrs;
@ -177,7 +183,8 @@ DownloadTarballResult downloadTarball(
infoAttrs = cached->value;
} else {
infoAttrs.insert_or_assign("etag", res->etag);
infoAttrs.insert_or_assign("treeHash", parseSink->sync().gitRev());
infoAttrs.insert_or_assign("treeHash",
tarballCache->dereferenceSingletonDirectory(parseSink->sync()).gitRev());
infoAttrs.insert_or_assign("lastModified", uint64_t(lastModified));
if (res->immutableUrl)
infoAttrs.insert_or_assign("immutableUrl", *res->immutableUrl);

View file

@ -0,0 +1,10 @@
prefix=@prefix@
libdir=@libdir@
includedir=@includedir@
Name: Nix
Description: Nix Package Manager
Version: @PACKAGE_VERSION@
Requires: nix-util nix-store nix-expr
Libs: -L${libdir} -lnixflake
Cflags: -I${includedir}/nix -std=c++2a

View file

@ -15,3 +15,8 @@ libflake_CXXFLAGS += $(INCLUDE_libutil) $(INCLUDE_libstore) $(INCLUDE_libfetcher
libflake_LDFLAGS += $(THREAD_LDFLAGS)
libflake_LIBS = libutil libstore libfetchers libexpr
$(eval $(call install-file-in, $(buildprefix)$(d)/flake/nix-flake.pc, $(libdir)/pkgconfig, 0644))
$(foreach i, $(wildcard src/libflake/flake/*.hh), \
$(eval $(call install-file-in, $(i), $(includedir)/nix/flake, 0644)))

View file

@ -26,6 +26,8 @@ deps_public_maybe_subproject = [
]
subdir('build-utils-meson/subprojects')
subdir('build-utils-meson/threads')
nlohmann_json = dependency('nlohmann_json', version : '>= 3.9')
deps_public += nlohmann_json

View file

@ -67,14 +67,8 @@ mkMesonDerivation (finalAttrs: {
LDFLAGS = "-fuse-ld=gold";
};
enableParallelBuilding = true;
separateDebugInfo = !stdenv.hostPlatform.isStatic;
# TODO `releaseTools.coverageAnalysis` in Nixpkgs needs to be updated
# to work with `strictDeps`.
strictDeps = true;
hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie";
meta = {

1
src/libmain-c/.version Symbolic link
View file

@ -0,0 +1 @@
../../.version

View file

@ -0,0 +1 @@
../../build-utils-meson

86
src/libmain-c/meson.build Normal file
View file

@ -0,0 +1,86 @@
project('nix-main-c', 'cpp',
version : files('.version'),
default_options : [
'cpp_std=c++2a',
# TODO(Qyriad): increase the warning level
'warning_level=1',
'debug=true',
'optimization=2',
'errorlogs=true', # Please print logs for tests that fail
],
meson_version : '>= 1.1',
license : 'LGPL-2.1-or-later',
)
cxx = meson.get_compiler('cpp')
subdir('build-utils-meson/deps-lists')
configdata = configuration_data()
deps_private_maybe_subproject = [
dependency('nix-util'),
dependency('nix-store'),
dependency('nix-main'),
]
deps_public_maybe_subproject = [
dependency('nix-util-c'),
dependency('nix-store-c'),
]
subdir('build-utils-meson/subprojects')
subdir('build-utils-meson/threads')
# TODO rename, because it will conflict with downstream projects
configdata.set_quoted('PACKAGE_VERSION', meson.project_version())
config_h = configure_file(
configuration : configdata,
output : 'config-main.h',
)
add_project_arguments(
# TODO(Qyriad): Yes this is how the autoconf+Make system did it.
# It would be nice for our headers to be idempotent instead.
# From C++ libraries, only for internals
'-include', 'config-util.hh',
'-include', 'config-store.hh',
'-include', 'config-main.hh',
# From C libraries, for our public, installed headers too
'-include', 'config-util.h',
'-include', 'config-store.h',
'-include', 'config-main.h',
language : 'cpp',
)
subdir('build-utils-meson/diagnostics')
sources = files(
'nix_api_main.cc',
)
include_dirs = [include_directories('.')]
headers = [config_h] + files(
'nix_api_main.h',
)
subdir('build-utils-meson/export-all-symbols')
this_library = library(
'nixmainc',
sources,
dependencies : deps_public + deps_private + deps_other,
include_directories : include_dirs,
link_args: linker_export_flags,
prelink : true, # For C++ static initializers
install : true,
)
install_headers(headers, subdir : 'nix', preserve_path : true)
libraries_private = []
subdir('build-utils-meson/export')

View file

@ -0,0 +1,16 @@
#include "nix_api_store.h"
#include "nix_api_store_internal.h"
#include "nix_api_util.h"
#include "nix_api_util_internal.h"
#include "plugin.hh"
nix_err nix_init_plugins(nix_c_context * context)
{
if (context)
context->last_err_code = NIX_OK;
try {
nix::initPlugins();
}
NIXC_CATCH_ERRS
}

View file

@ -0,0 +1,40 @@
#ifndef NIX_API_MAIN_H
#define NIX_API_MAIN_H
/**
* @defgroup libmain libmain
* @brief C bindings for nix libmain
*
* libmain has misc utilities for CLI commands
* @{
*/
/** @file
* @brief Main entry for the libmain C bindings
*/
#include "nix_api_util.h"
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
// cffi start
/**
* @brief Loads the plugins specified in Nix's plugin-files setting.
*
* Call this once, after calling your desired init functions and setting
* relevant settings.
*
* @param[out] context Optional, stores error information
* @return NIX_OK if the initialization was successful, an error code otherwise.
*/
nix_err nix_init_plugins(nix_c_context * context);
// cffi end
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif // NIX_API_MAIN_H

79
src/libmain-c/package.nix Normal file
View file

@ -0,0 +1,79 @@
{ lib
, stdenv
, mkMesonDerivation
, releaseTools
, meson
, ninja
, pkg-config
, nix-util-c
, nix-store
, nix-store-c
, nix-main
# Configuration Options
, version
}:
let
inherit (lib) fileset;
in
mkMesonDerivation (finalAttrs: {
pname = "nix-main-c";
inherit version;
workDir = ./.;
fileset = fileset.unions [
../../build-utils-meson
./build-utils-meson
../../.version
./.version
./meson.build
# ./meson.options
(fileset.fileFilter (file: file.hasExt "cc") ./.)
(fileset.fileFilter (file: file.hasExt "hh") ./.)
(fileset.fileFilter (file: file.hasExt "h") ./.)
];
outputs = [ "out" "dev" ];
nativeBuildInputs = [
meson
ninja
pkg-config
];
propagatedBuildInputs = [
nix-util-c
nix-store
nix-store-c
nix-main
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags = [
];
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
separateDebugInfo = !stdenv.hostPlatform.isStatic;
hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie";
meta = {
platforms = lib.platforms.unix ++ lib.platforms.windows;
};
})

View file

@ -5,6 +5,7 @@
#include "logging.hh"
#include "loggers.hh"
#include "util.hh"
#include "plugin.hh"
namespace nix {

View file

@ -36,7 +36,7 @@ Logger * makeDefaultLogger() {
return logger;
}
default:
abort();
unreachable();
}
}

View file

@ -26,6 +26,7 @@ deps_public_maybe_subproject = [
]
subdir('build-utils-meson/subprojects')
subdir('build-utils-meson/threads')
pubsetbuf_test = '''
#include <iostream>
@ -64,6 +65,7 @@ subdir('build-utils-meson/diagnostics')
sources = files(
'common-args.cc',
'loggers.cc',
'plugin.cc',
'progress-bar.cc',
'shared.cc',
)
@ -79,6 +81,7 @@ include_dirs = [include_directories('.')]
headers = [config_h] + files(
'common-args.hh',
'loggers.hh',
'plugin.hh',
'progress-bar.hh',
'shared.hh',
)

View file

@ -62,14 +62,8 @@ mkMesonDerivation (finalAttrs: {
LDFLAGS = "-fuse-ld=gold";
};
enableParallelBuilding = true;
separateDebugInfo = !stdenv.hostPlatform.isStatic;
# TODO `releaseTools.coverageAnalysis` in Nixpkgs needs to be updated
# to work with `strictDeps`.
strictDeps = true;
hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie";
meta = {

119
src/libmain/plugin.cc Normal file
View file

@ -0,0 +1,119 @@
#ifndef _WIN32
# include <dlfcn.h>
#endif
#include <filesystem>
#include "config-global.hh"
#include "signals.hh"
namespace nix {
struct PluginFilesSetting : public BaseSetting<Paths>
{
bool pluginsLoaded = false;
PluginFilesSetting(
Config * options,
const Paths & def,
const std::string & name,
const std::string & description,
const std::set<std::string> & aliases = {})
: BaseSetting<Paths>(def, true, name, description, aliases)
{
options->addSetting(this);
}
Paths parse(const std::string & str) const override;
};
Paths PluginFilesSetting::parse(const std::string & str) const
{
if (pluginsLoaded)
throw UsageError(
"plugin-files set after plugins were loaded, you may need to move the flag before the subcommand");
return BaseSetting<Paths>::parse(str);
}
struct PluginSettings : Config
{
PluginFilesSetting pluginFiles{
this,
{},
"plugin-files",
R"(
A list of plugin files to be loaded by Nix. Each of these files will
be dlopened by Nix. If they contain the symbol `nix_plugin_entry()`,
this symbol will be called. Alternatively, they can affect execution
through static initialization. In particular, these plugins may construct
static instances of RegisterPrimOp to add new primops or constants to the
expression language, RegisterStoreImplementation to add new store
implementations, RegisterCommand to add new subcommands to the `nix`
command, and RegisterSetting to add new nix config settings. See the
constructors for those types for more details.
Warning! These APIs are inherently unstable and may change from
release to release.
Since these files are loaded into the same address space as Nix
itself, they must be DSOs compatible with the instance of Nix
running at the time (i.e. compiled against the same headers, not
linked to any incompatible libraries). They should not be linked to
any Nix libs directly, as those will be available already at load
time.
If an entry in the list is a directory, all files in the directory
are loaded as plugins (non-recursively).
)"};
};
static PluginSettings pluginSettings;
static GlobalConfig::Register rPluginSettings(&pluginSettings);
void initPlugins()
{
assert(!pluginSettings.pluginFiles.pluginsLoaded);
for (const auto & pluginFile : pluginSettings.pluginFiles.get()) {
std::vector<std::filesystem::path> pluginFiles;
try {
auto ents = std::filesystem::directory_iterator{pluginFile};
for (const auto & ent : ents) {
checkInterrupt();
pluginFiles.emplace_back(ent.path());
}
} catch (std::filesystem::filesystem_error & e) {
if (e.code() != std::errc::not_a_directory)
throw;
pluginFiles.emplace_back(pluginFile);
}
for (const auto & file : pluginFiles) {
checkInterrupt();
/* handle is purposefully leaked as there may be state in the
DSO needed by the action of the plugin. */
#ifndef _WIN32 // TODO implement via DLL loading on Windows
void * handle = dlopen(file.c_str(), RTLD_LAZY | RTLD_LOCAL);
if (!handle)
throw Error("could not dynamically open plugin file '%s': %s", file, dlerror());
/* Older plugins use a statically initialized object to run their code.
Newer plugins can also export nix_plugin_entry() */
void (*nix_plugin_entry)() = (void (*)()) dlsym(handle, "nix_plugin_entry");
if (nix_plugin_entry)
nix_plugin_entry();
#else
throw Error("could not dynamically open plugin file '%s'", file);
#endif
}
}
/* Since plugins can add settings, try to re-apply previously
unknown settings. */
globalConfig.reapplyUnknownSettings();
globalConfig.warnUnknownSettings();
/* Tell the user if they try to set plugin-files after we've already loaded */
pluginSettings.pluginFiles.pluginsLoaded = true;
}
}

12
src/libmain/plugin.hh Normal file
View file

@ -0,0 +1,12 @@
#pragma once
///@file
namespace nix {
/**
* This should be called after settings are initialized, but before
* anything else
*/
void initPlugins();
}

View file

@ -27,6 +27,8 @@ deps_public_maybe_subproject = [
]
subdir('build-utils-meson/subprojects')
subdir('build-utils-meson/threads')
# TODO rename, because it will conflict with downstream projects
configdata.set_quoted('PACKAGE_VERSION', meson.project_version())

View file

@ -29,16 +29,6 @@ nix_err nix_libstore_init_no_load_config(nix_c_context * context)
NIXC_CATCH_ERRS
}
nix_err nix_init_plugins(nix_c_context * context)
{
if (context)
context->last_err_code = NIX_OK;
try {
nix::initPlugins();
}
NIXC_CATCH_ERRS
}
Store * nix_store_open(nix_c_context * context, const char * uri, const char *** params)
{
if (context)

View file

@ -42,17 +42,6 @@ nix_err nix_libstore_init(nix_c_context * context);
*/
nix_err nix_libstore_init_no_load_config(nix_c_context * context);
/**
* @brief Loads the plugins specified in Nix's plugin-files setting.
*
* Call this once, after calling your desired init functions and setting
* relevant settings.
*
* @param[out] context Optional, stores error information
* @return NIX_OK if the initialization was successful, an error code otherwise.
*/
nix_err nix_init_plugins(nix_c_context * context);
/**
* @brief Open a nix store.
*

View file

@ -64,12 +64,8 @@ mkMesonDerivation (finalAttrs: {
LDFLAGS = "-fuse-ld=gold";
};
enableParallelBuilding = true;
separateDebugInfo = !stdenv.hostPlatform.isStatic;
strictDeps = true;
hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie";
meta = {

View file

@ -1569,7 +1569,7 @@ void DerivationGoal::waiteeDone(GoalPtr waitee, ExitCode result)
{
Goal::waiteeDone(waitee, result);
if (!useDerivation) return;
if (!useDerivation || !drv) return;
auto & fullDrv = *dynamic_cast<Derivation *>(drv.get());
auto * dg = dynamic_cast<DerivationGoal *>(&*waitee);

View file

@ -62,7 +62,7 @@ Goal::Co DrvOutputSubstitutionGoal::init()
#ifndef _WIN32
outPipe->readSide.get()
#else
&outPipe
&*outPipe
#endif
}, true, false);

View file

@ -36,7 +36,7 @@ public:
Co init() override;
Co realisationFetched(std::shared_ptr<const Realisation> outputInfo, nix::ref<nix::Store> sub);
void timedOut(Error && ex) override { abort(); };
void timedOut(Error && ex) override { unreachable(); };
std::string key() override;

View file

@ -400,12 +400,12 @@ public:
virtual void handleChildOutput(Descriptor fd, std::string_view data)
{
abort();
unreachable();
}
virtual void handleEOF(Descriptor fd)
{
abort();
unreachable();
}
void trace(std::string_view s);

View file

@ -50,7 +50,7 @@ public:
PathSubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
~PathSubstitutionGoal();
void timedOut(Error && ex) override { abort(); };
void timedOut(Error && ex) override { unreachable(); };
/**
* We prepend "a$" to the key name to ensure substitution goals

View file

@ -216,7 +216,7 @@ void Worker::childStarted(GoalPtr goal, const std::set<MuxablePipePollState::Com
nrLocalBuilds++;
break;
default:
abort();
unreachable();
}
}
}
@ -239,7 +239,7 @@ void Worker::childTerminated(Goal * goal, bool wakeSleepers)
nrLocalBuilds--;
break;
default:
abort();
unreachable();
}
}

View file

@ -33,7 +33,7 @@ Sink & operator << (Sink & sink, const Logger::Fields & fields)
sink << f.i;
else if (f.type == Logger::Field::tString)
sink << f.s;
else abort();
else unreachable();
}
return sink;
}
@ -167,7 +167,7 @@ struct TunnelSink : Sink
{
Sink & to;
TunnelSink(Sink & to) : to(to) { }
void operator () (std::string_view data)
void operator () (std::string_view data) override
{
to << STDERR_WRITE;
writeString(data, to);
@ -246,10 +246,9 @@ struct ClientSettings
// the daemon, as that could cause some pretty weird stuff
if (parseFeatures(tokenizeString<StringSet>(value)) != experimentalFeatureSettings.experimentalFeatures.get())
debug("Ignoring the client-specified experimental features");
} else if (name == settings.pluginFiles.name) {
if (tokenizeString<Paths>(value) != settings.pluginFiles.get())
warn("Ignoring the client-specified plugin-files.\n"
"The client specifying plugins to the daemon never made sense, and was removed in Nix >=2.14.");
} else if (name == "plugin-files") {
warn("Ignoring the client-specified plugin-files.\n"
"The client specifying plugins to the daemon never made sense, and was removed in Nix >=2.14.");
}
else if (trusted
|| name == settings.buildTimeout.name
@ -270,26 +269,21 @@ struct ClientSettings
};
static void performOp(TunnelLogger * logger, ref<Store> store,
TrustedFlag trusted, RecursiveFlag recursive, WorkerProto::Version clientVersion,
Source & from, BufferedSink & to, WorkerProto::Op op)
TrustedFlag trusted, RecursiveFlag recursive,
WorkerProto::BasicServerConnection & conn,
WorkerProto::Op op)
{
WorkerProto::ReadConn rconn {
.from = from,
.version = clientVersion,
};
WorkerProto::WriteConn wconn {
.to = to,
.version = clientVersion,
};
WorkerProto::ReadConn rconn(conn);
WorkerProto::WriteConn wconn(conn);
switch (op) {
case WorkerProto::Op::IsValidPath: {
auto path = store->parseStorePath(readString(from));
auto path = store->parseStorePath(readString(conn.from));
logger->startWork();
bool result = store->isValidPath(path);
logger->stopWork();
to << result;
conn.to << result;
break;
}
@ -297,8 +291,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
auto paths = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
SubstituteFlag substitute = NoSubstitute;
if (GET_PROTOCOL_MINOR(clientVersion) >= 27) {
substitute = readInt(from) ? Substitute : NoSubstitute;
if (GET_PROTOCOL_MINOR(conn.protoVersion) >= 27) {
substitute = readInt(conn.from) ? Substitute : NoSubstitute;
}
logger->startWork();
@ -312,13 +306,13 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
case WorkerProto::Op::HasSubstitutes: {
auto path = store->parseStorePath(readString(from));
auto path = store->parseStorePath(readString(conn.from));
logger->startWork();
StorePathSet paths; // FIXME
paths.insert(path);
auto res = store->querySubstitutablePaths(paths);
logger->stopWork();
to << (res.count(path) != 0);
conn.to << (res.count(path) != 0);
break;
}
@ -332,11 +326,11 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
case WorkerProto::Op::QueryPathHash: {
auto path = store->parseStorePath(readString(from));
auto path = store->parseStorePath(readString(conn.from));
logger->startWork();
auto hash = store->queryPathInfo(path)->narHash;
logger->stopWork();
to << hash.to_string(HashFormat::Base16, false);
conn.to << hash.to_string(HashFormat::Base16, false);
break;
}
@ -344,7 +338,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
case WorkerProto::Op::QueryReferrers:
case WorkerProto::Op::QueryValidDerivers:
case WorkerProto::Op::QueryDerivationOutputs: {
auto path = store->parseStorePath(readString(from));
auto path = store->parseStorePath(readString(conn.from));
logger->startWork();
StorePathSet paths;
if (op == WorkerProto::Op::QueryReferences)
@ -361,16 +355,16 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
case WorkerProto::Op::QueryDerivationOutputNames: {
auto path = store->parseStorePath(readString(from));
auto path = store->parseStorePath(readString(conn.from));
logger->startWork();
auto names = store->readDerivation(path).outputNames();
logger->stopWork();
to << names;
conn.to << names;
break;
}
case WorkerProto::Op::QueryDerivationOutputMap: {
auto path = store->parseStorePath(readString(from));
auto path = store->parseStorePath(readString(conn.from));
logger->startWork();
auto outputs = store->queryPartialDerivationOutputMap(path);
logger->stopWork();
@ -379,37 +373,37 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
case WorkerProto::Op::QueryDeriver: {
auto path = store->parseStorePath(readString(from));
auto path = store->parseStorePath(readString(conn.from));
logger->startWork();
auto info = store->queryPathInfo(path);
logger->stopWork();
to << (info->deriver ? store->printStorePath(*info->deriver) : "");
conn.to << (info->deriver ? store->printStorePath(*info->deriver) : "");
break;
}
case WorkerProto::Op::QueryPathFromHashPart: {
auto hashPart = readString(from);
auto hashPart = readString(conn.from);
logger->startWork();
auto path = store->queryPathFromHashPart(hashPart);
logger->stopWork();
to << (path ? store->printStorePath(*path) : "");
conn.to << (path ? store->printStorePath(*path) : "");
break;
}
case WorkerProto::Op::AddToStore: {
if (GET_PROTOCOL_MINOR(clientVersion) >= 25) {
auto name = readString(from);
auto camStr = readString(from);
if (GET_PROTOCOL_MINOR(conn.protoVersion) >= 25) {
auto name = readString(conn.from);
auto camStr = readString(conn.from);
auto refs = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
bool repairBool;
from >> repairBool;
conn.from >> repairBool;
auto repair = RepairFlag{repairBool};
logger->startWork();
auto pathInfo = [&]() {
// NB: FramedSource must be out of scope before logger->stopWork();
auto [contentAddressMethod, hashAlgo] = ContentAddressMethod::parseWithAlgo(camStr);
FramedSource source(from);
FramedSource source(conn.from);
FileSerialisationMethod dumpMethod;
switch (contentAddressMethod.getFileIngestionMethod()) {
case FileIngestionMethod::Flat:
@ -440,7 +434,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
bool fixed;
uint8_t recursive;
std::string hashAlgoRaw;
from >> baseName >> fixed /* obsolete */ >> recursive >> hashAlgoRaw;
conn.from >> baseName >> fixed /* obsolete */ >> recursive >> hashAlgoRaw;
if (recursive > true)
throw Error("unsupported FileIngestionMethod with value of %i; you may need to upgrade nix-daemon", recursive);
method = recursive
@ -460,11 +454,11 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
so why all this extra work? We still parse the NAR so
that we aren't sending arbitrary data to `saved`
unwittingly`, and we know when the NAR ends so we don't
consume the rest of `from` and can't parse another
consume the rest of `conn.from` and can't parse another
command. (We don't trust `addToStoreFromDump` to not
eagerly consume the entire stream it's given, past the
length of the Nar. */
TeeSource savedNARSource(from, saved);
TeeSource savedNARSource(conn.from, saved);
NullFileSystemObjectSink sink; /* just parse the NAR */
parseDump(sink, savedNARSource);
});
@ -473,20 +467,20 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
*dumpSource, baseName, FileSerialisationMethod::NixArchive, method, hashAlgo);
logger->stopWork();
to << store->printStorePath(path);
conn.to << store->printStorePath(path);
}
break;
}
case WorkerProto::Op::AddMultipleToStore: {
bool repair, dontCheckSigs;
from >> repair >> dontCheckSigs;
conn.from >> repair >> dontCheckSigs;
if (!trusted && dontCheckSigs)
dontCheckSigs = false;
logger->startWork();
{
FramedSource source(from);
FramedSource source(conn.from);
store->addMultipleToStore(source,
RepairFlag{repair},
dontCheckSigs ? NoCheckSigs : CheckSigs);
@ -496,8 +490,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
case WorkerProto::Op::AddTextToStore: {
std::string suffix = readString(from);
std::string s = readString(from);
std::string suffix = readString(conn.from);
std::string s = readString(conn.from);
auto refs = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
logger->startWork();
auto path = ({
@ -505,37 +499,37 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
store->addToStoreFromDump(source, suffix, FileSerialisationMethod::Flat, ContentAddressMethod::Raw::Text, HashAlgorithm::SHA256, refs, NoRepair);
});
logger->stopWork();
to << store->printStorePath(path);
conn.to << store->printStorePath(path);
break;
}
case WorkerProto::Op::ExportPath: {
auto path = store->parseStorePath(readString(from));
readInt(from); // obsolete
auto path = store->parseStorePath(readString(conn.from));
readInt(conn.from); // obsolete
logger->startWork();
TunnelSink sink(to);
TunnelSink sink(conn.to);
store->exportPath(path, sink);
logger->stopWork();
to << 1;
conn.to << 1;
break;
}
case WorkerProto::Op::ImportPaths: {
logger->startWork();
TunnelSource source(from, to);
TunnelSource source(conn.from, conn.to);
auto paths = store->importPaths(source,
trusted ? NoCheckSigs : CheckSigs);
logger->stopWork();
Strings paths2;
for (auto & i : paths) paths2.push_back(store->printStorePath(i));
to << paths2;
conn.to << paths2;
break;
}
case WorkerProto::Op::BuildPaths: {
auto drvs = WorkerProto::Serialise<DerivedPaths>::read(*store, rconn);
BuildMode mode = bmNormal;
if (GET_PROTOCOL_MINOR(clientVersion) >= 15) {
if (GET_PROTOCOL_MINOR(conn.protoVersion) >= 15) {
mode = WorkerProto::Serialise<BuildMode>::read(*store, rconn);
/* Repairing is not atomic, so disallowed for "untrusted"
@ -553,7 +547,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
logger->startWork();
store->buildPaths(drvs, mode);
logger->stopWork();
to << 1;
conn.to << 1;
break;
}
@ -579,7 +573,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
case WorkerProto::Op::BuildDerivation: {
auto drvPath = store->parseStorePath(readString(from));
auto drvPath = store->parseStorePath(readString(conn.from));
BasicDerivation drv;
/*
* Note: unlike wopEnsurePath, this operation reads a
@ -590,7 +584,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
* it cannot be trusted that its outPath was calculated
* correctly.
*/
readDerivation(from, *store, drv, Derivation::nameFromPath(drvPath));
readDerivation(conn.from, *store, drv, Derivation::nameFromPath(drvPath));
auto buildMode = WorkerProto::Serialise<BuildMode>::read(*store, rconn);
logger->startWork();
@ -656,20 +650,20 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
case WorkerProto::Op::EnsurePath: {
auto path = store->parseStorePath(readString(from));
auto path = store->parseStorePath(readString(conn.from));
logger->startWork();
store->ensurePath(path);
logger->stopWork();
to << 1;
conn.to << 1;
break;
}
case WorkerProto::Op::AddTempRoot: {
auto path = store->parseStorePath(readString(from));
auto path = store->parseStorePath(readString(conn.from));
logger->startWork();
store->addTempRoot(path);
logger->stopWork();
to << 1;
conn.to << 1;
break;
}
@ -679,24 +673,24 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
"you are not privileged to create perm roots\n\n"
"hint: you can just do this client-side without special privileges, and probably want to do that instead.");
auto storePath = WorkerProto::Serialise<StorePath>::read(*store, rconn);
Path gcRoot = absPath(readString(from));
Path gcRoot = absPath(readString(conn.from));
logger->startWork();
auto & localFSStore = require<LocalFSStore>(*store);
localFSStore.addPermRoot(storePath, gcRoot);
logger->stopWork();
to << gcRoot;
conn.to << gcRoot;
break;
}
case WorkerProto::Op::AddIndirectRoot: {
Path path = absPath(readString(from));
Path path = absPath(readString(conn.from));
logger->startWork();
auto & indirectRootStore = require<IndirectRootStore>(*store);
indirectRootStore.addIndirectRoot(path);
logger->stopWork();
to << 1;
conn.to << 1;
break;
}
@ -704,7 +698,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
case WorkerProto::Op::SyncWithGC: {
logger->startWork();
logger->stopWork();
to << 1;
conn.to << 1;
break;
}
@ -718,24 +712,24 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
for (auto & i : roots)
size += i.second.size();
to << size;
conn.to << size;
for (auto & [target, links] : roots)
for (auto & link : links)
to << link << store->printStorePath(target);
conn.to << link << store->printStorePath(target);
break;
}
case WorkerProto::Op::CollectGarbage: {
GCOptions options;
options.action = (GCOptions::GCAction) readInt(from);
options.action = (GCOptions::GCAction) readInt(conn.from);
options.pathsToDelete = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
from >> options.ignoreLiveness >> options.maxFreed;
conn.from >> options.ignoreLiveness >> options.maxFreed;
// obsolete fields
readInt(from);
readInt(from);
readInt(from);
readInt(conn.from);
readInt(conn.from);
readInt(conn.from);
GCResults results;
@ -746,7 +740,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
gcStore.collectGarbage(options, results);
logger->stopWork();
to << results.paths << results.bytesFreed << 0 /* obsolete */;
conn.to << results.paths << results.bytesFreed << 0 /* obsolete */;
break;
}
@ -755,24 +749,24 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
ClientSettings clientSettings;
clientSettings.keepFailed = readInt(from);
clientSettings.keepGoing = readInt(from);
clientSettings.tryFallback = readInt(from);
clientSettings.verbosity = (Verbosity) readInt(from);
clientSettings.maxBuildJobs = readInt(from);
clientSettings.maxSilentTime = readInt(from);
readInt(from); // obsolete useBuildHook
clientSettings.verboseBuild = lvlError == (Verbosity) readInt(from);
readInt(from); // obsolete logType
readInt(from); // obsolete printBuildTrace
clientSettings.buildCores = readInt(from);
clientSettings.useSubstitutes = readInt(from);
clientSettings.keepFailed = readInt(conn.from);
clientSettings.keepGoing = readInt(conn.from);
clientSettings.tryFallback = readInt(conn.from);
clientSettings.verbosity = (Verbosity) readInt(conn.from);
clientSettings.maxBuildJobs = readInt(conn.from);
clientSettings.maxSilentTime = readInt(conn.from);
readInt(conn.from); // obsolete useBuildHook
clientSettings.verboseBuild = lvlError == (Verbosity) readInt(conn.from);
readInt(conn.from); // obsolete logType
readInt(conn.from); // obsolete printBuildTrace
clientSettings.buildCores = readInt(conn.from);
clientSettings.useSubstitutes = readInt(conn.from);
if (GET_PROTOCOL_MINOR(clientVersion) >= 12) {
unsigned int n = readInt(from);
if (GET_PROTOCOL_MINOR(conn.protoVersion) >= 12) {
unsigned int n = readInt(conn.from);
for (unsigned int i = 0; i < n; i++) {
auto name = readString(from);
auto value = readString(from);
auto name = readString(conn.from);
auto value = readString(conn.from);
clientSettings.overrides.emplace(name, value);
}
}
@ -789,20 +783,20 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
case WorkerProto::Op::QuerySubstitutablePathInfo: {
auto path = store->parseStorePath(readString(from));
auto path = store->parseStorePath(readString(conn.from));
logger->startWork();
SubstitutablePathInfos infos;
store->querySubstitutablePathInfos({{path, std::nullopt}}, infos);
logger->stopWork();
auto i = infos.find(path);
if (i == infos.end())
to << 0;
conn.to << 0;
else {
to << 1
conn.to << 1
<< (i->second.deriver ? store->printStorePath(*i->second.deriver) : "");
WorkerProto::write(*store, wconn, i->second.references);
to << i->second.downloadSize
<< i->second.narSize;
conn.to << i->second.downloadSize
<< i->second.narSize;
}
break;
}
@ -810,7 +804,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
case WorkerProto::Op::QuerySubstitutablePathInfos: {
SubstitutablePathInfos infos;
StorePathCAMap pathsMap = {};
if (GET_PROTOCOL_MINOR(clientVersion) < 22) {
if (GET_PROTOCOL_MINOR(conn.protoVersion) < 22) {
auto paths = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
for (auto & path : paths)
pathsMap.emplace(path, std::nullopt);
@ -819,12 +813,12 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
logger->startWork();
store->querySubstitutablePathInfos(pathsMap, infos);
logger->stopWork();
to << infos.size();
conn.to << infos.size();
for (auto & i : infos) {
to << store->printStorePath(i.first)
<< (i.second.deriver ? store->printStorePath(*i.second.deriver) : "");
conn.to << store->printStorePath(i.first)
<< (i.second.deriver ? store->printStorePath(*i.second.deriver) : "");
WorkerProto::write(*store, wconn, i.second.references);
to << i.second.downloadSize << i.second.narSize;
conn.to << i.second.downloadSize << i.second.narSize;
}
break;
}
@ -838,22 +832,22 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
case WorkerProto::Op::QueryPathInfo: {
auto path = store->parseStorePath(readString(from));
auto path = store->parseStorePath(readString(conn.from));
std::shared_ptr<const ValidPathInfo> info;
logger->startWork();
try {
info = store->queryPathInfo(path);
} catch (InvalidPath &) {
if (GET_PROTOCOL_MINOR(clientVersion) < 17) throw;
if (GET_PROTOCOL_MINOR(conn.protoVersion) < 17) throw;
}
logger->stopWork();
if (info) {
if (GET_PROTOCOL_MINOR(clientVersion) >= 17)
to << 1;
if (GET_PROTOCOL_MINOR(conn.protoVersion) >= 17)
conn.to << 1;
WorkerProto::write(*store, wconn, static_cast<const UnkeyedValidPathInfo &>(*info));
} else {
assert(GET_PROTOCOL_MINOR(clientVersion) >= 17);
to << 0;
assert(GET_PROTOCOL_MINOR(conn.protoVersion) >= 17);
conn.to << 0;
}
break;
}
@ -862,61 +856,61 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
logger->startWork();
store->optimiseStore();
logger->stopWork();
to << 1;
conn.to << 1;
break;
case WorkerProto::Op::VerifyStore: {
bool checkContents, repair;
from >> checkContents >> repair;
conn.from >> checkContents >> repair;
logger->startWork();
if (repair && !trusted)
throw Error("you are not privileged to repair paths");
bool errors = store->verifyStore(checkContents, (RepairFlag) repair);
logger->stopWork();
to << errors;
conn.to << errors;
break;
}
case WorkerProto::Op::AddSignatures: {
auto path = store->parseStorePath(readString(from));
StringSet sigs = readStrings<StringSet>(from);
auto path = store->parseStorePath(readString(conn.from));
StringSet sigs = readStrings<StringSet>(conn.from);
logger->startWork();
store->addSignatures(path, sigs);
logger->stopWork();
to << 1;
conn.to << 1;
break;
}
case WorkerProto::Op::NarFromPath: {
auto path = store->parseStorePath(readString(from));
auto path = store->parseStorePath(readString(conn.from));
logger->startWork();
logger->stopWork();
dumpPath(store->toRealPath(path), to);
dumpPath(store->toRealPath(path), conn.to);
break;
}
case WorkerProto::Op::AddToStoreNar: {
bool repair, dontCheckSigs;
auto path = store->parseStorePath(readString(from));
auto deriver = readString(from);
auto narHash = Hash::parseAny(readString(from), HashAlgorithm::SHA256);
auto path = store->parseStorePath(readString(conn.from));
auto deriver = readString(conn.from);
auto narHash = Hash::parseAny(readString(conn.from), HashAlgorithm::SHA256);
ValidPathInfo info { path, narHash };
if (deriver != "")
info.deriver = store->parseStorePath(deriver);
info.references = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
from >> info.registrationTime >> info.narSize >> info.ultimate;
info.sigs = readStrings<StringSet>(from);
info.ca = ContentAddress::parseOpt(readString(from));
from >> repair >> dontCheckSigs;
conn.from >> info.registrationTime >> info.narSize >> info.ultimate;
info.sigs = readStrings<StringSet>(conn.from);
info.ca = ContentAddress::parseOpt(readString(conn.from));
conn.from >> repair >> dontCheckSigs;
if (!trusted && dontCheckSigs)
dontCheckSigs = false;
if (!trusted)
info.ultimate = false;
if (GET_PROTOCOL_MINOR(clientVersion) >= 23) {
if (GET_PROTOCOL_MINOR(conn.protoVersion) >= 23) {
logger->startWork();
{
FramedSource source(from);
FramedSource source(conn.from);
store->addToStore(info, source, (RepairFlag) repair,
dontCheckSigs ? NoCheckSigs : CheckSigs);
}
@ -926,10 +920,10 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
else {
std::unique_ptr<Source> source;
StringSink saved;
if (GET_PROTOCOL_MINOR(clientVersion) >= 21)
source = std::make_unique<TunnelSource>(from, to);
if (GET_PROTOCOL_MINOR(conn.protoVersion) >= 21)
source = std::make_unique<TunnelSource>(conn.from, conn.to);
else {
TeeSource tee { from, saved };
TeeSource tee { conn.from, saved };
NullFileSystemObjectSink ether;
parseDump(ether, tee);
source = std::make_unique<StringSource>(saved.s);
@ -957,15 +951,15 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
WorkerProto::write(*store, wconn, willBuild);
WorkerProto::write(*store, wconn, willSubstitute);
WorkerProto::write(*store, wconn, unknown);
to << downloadSize << narSize;
conn.to << downloadSize << narSize;
break;
}
case WorkerProto::Op::RegisterDrvOutput: {
logger->startWork();
if (GET_PROTOCOL_MINOR(clientVersion) < 31) {
auto outputId = DrvOutput::parse(readString(from));
auto outputPath = StorePath(readString(from));
if (GET_PROTOCOL_MINOR(conn.protoVersion) < 31) {
auto outputId = DrvOutput::parse(readString(conn.from));
auto outputPath = StorePath(readString(conn.from));
store->registerDrvOutput(Realisation{
.id = outputId, .outPath = outputPath});
} else {
@ -978,10 +972,10 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
case WorkerProto::Op::QueryRealisation: {
logger->startWork();
auto outputId = DrvOutput::parse(readString(from));
auto outputId = DrvOutput::parse(readString(conn.from));
auto info = store->queryRealisation(outputId);
logger->stopWork();
if (GET_PROTOCOL_MINOR(clientVersion) < 31) {
if (GET_PROTOCOL_MINOR(conn.protoVersion) < 31) {
std::set<StorePath> outPaths;
if (info) outPaths.insert(info->outPath);
WorkerProto::write(*store, wconn, outPaths);
@ -994,19 +988,19 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
case WorkerProto::Op::AddBuildLog: {
StorePath path{readString(from)};
StorePath path{readString(conn.from)};
logger->startWork();
if (!trusted)
throw Error("you are not privileged to add logs");
auto & logStore = require<LogStore>(*store);
{
FramedSource source(from);
FramedSource source(conn.from);
StringSink sink;
source.drainInto(sink);
logStore.addBuildLog(path, sink.s);
}
logger->stopWork();
to << 1;
conn.to << 1;
break;
}
@ -1021,8 +1015,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
void processConnection(
ref<Store> store,
FdSource & from,
FdSink & to,
FdSource && from,
FdSink && to,
TrustedFlag trusted,
RecursiveFlag recursive)
{
@ -1031,14 +1025,20 @@ void processConnection(
#endif
/* Exchange the greeting. */
WorkerProto::Version clientVersion =
auto [protoVersion, features] =
WorkerProto::BasicServerConnection::handshake(
to, from, PROTOCOL_VERSION);
to, from, PROTOCOL_VERSION, WorkerProto::allFeatures);
if (clientVersion < 0x10a)
if (protoVersion < 0x10a)
throw Error("the Nix client version is too old");
auto tunnelLogger = new TunnelLogger(to, clientVersion);
WorkerProto::BasicServerConnection conn;
conn.to = std::move(to);
conn.from = std::move(from);
conn.protoVersion = protoVersion;
conn.features = features;
auto tunnelLogger = new TunnelLogger(conn.to, protoVersion);
auto prevLogger = nix::logger;
// FIXME
if (!recursive)
@ -1051,12 +1051,6 @@ void processConnection(
printMsgUsing(prevLogger, lvlDebug, "%d operations", opCount);
});
WorkerProto::BasicServerConnection conn {
.to = to,
.from = from,
.clientVersion = clientVersion,
};
conn.postHandshake(*store, {
.daemonNixVersion = nixVersion,
// We and the underlying store both need to trust the client for
@ -1072,13 +1066,13 @@ void processConnection(
try {
tunnelLogger->stopWork();
to.flush();
conn.to.flush();
/* Process client requests. */
while (true) {
WorkerProto::Op op;
try {
op = (enum WorkerProto::Op) readInt(from);
op = (enum WorkerProto::Op) readInt(conn.from);
} catch (Interrupted & e) {
break;
} catch (EndOfFile & e) {
@ -1092,7 +1086,7 @@ void processConnection(
debug("performing daemon worker op: %d", op);
try {
performOp(tunnelLogger, store, trusted, recursive, clientVersion, from, to, op);
performOp(tunnelLogger, store, trusted, recursive, conn, op);
} catch (Error & e) {
/* If we're not in a state where we can send replies, then
something went wrong processing the input of the
@ -1108,19 +1102,19 @@ void processConnection(
throw;
}
to.flush();
conn.to.flush();
assert(!tunnelLogger->state_.lock()->canSendStderr);
};
} catch (Error & e) {
tunnelLogger->stopWork(&e);
to.flush();
conn.to.flush();
return;
} catch (std::exception & e) {
auto ex = Error(e.what());
tunnelLogger->stopWork(&ex);
to.flush();
conn.to.flush();
return;
}
}

View file

@ -10,8 +10,8 @@ enum RecursiveFlag : bool { NotRecursive = false, Recursive = true };
void processConnection(
ref<Store> store,
FdSource & from,
FdSink & to,
FdSource && from,
FdSink && to,
TrustedFlag trusted,
RecursiveFlag recursive);

View file

@ -6,6 +6,13 @@ namespace nix {
struct DummyStoreConfig : virtual StoreConfig {
using StoreConfig::StoreConfig;
DummyStoreConfig(std::string_view scheme, std::string_view authority, const Params & params)
: StoreConfig(params)
{
if (!authority.empty())
throw UsageError("`%s` store URIs must not contain an authority part %s", scheme, authority);
}
const std::string name() override { return "Dummy Store"; }
std::string doc() override
@ -14,21 +21,22 @@ struct DummyStoreConfig : virtual StoreConfig {
#include "dummy-store.md"
;
}
static std::set<std::string> uriSchemes() {
return {"dummy"};
}
};
struct DummyStore : public virtual DummyStoreConfig, public virtual Store
{
DummyStore(std::string_view scheme, std::string_view authority, const Params & params)
: DummyStore(params)
{
if (!authority.empty())
throw UsageError("`%s` store URIs must not contain an authority part %s", scheme, authority);
}
: StoreConfig(params)
, DummyStoreConfig(scheme, authority, params)
, Store(params)
{ }
DummyStore(const Params & params)
: StoreConfig(params)
, DummyStoreConfig(params)
, Store(params)
: DummyStore("dummy", "", params)
{ }
std::string getUri() override
@ -50,10 +58,6 @@ struct DummyStore : public virtual DummyStoreConfig, public virtual Store
return Trusted;
}
static std::set<std::string> uriSchemes() {
return {"dummy"};
}
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
{ unsupported("queryPathFromHashPart"); }

View file

@ -71,7 +71,10 @@ struct curlFileTransfer : public FileTransfer
curl_off_t writtenToSink = 0;
std::chrono::steady_clock::time_point startTime = std::chrono::steady_clock::now();
inline static const std::set<long> successfulStatuses {200, 201, 204, 206, 304, 0 /* other protocol */};
/* Get the HTTP status code, or 0 for other protocols. */
long getHTTPStatus()
{
@ -373,10 +376,14 @@ struct curlFileTransfer : public FileTransfer
void finish(CURLcode code)
{
auto finishTime = std::chrono::steady_clock::now();
auto httpStatus = getHTTPStatus();
debug("finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d bytes",
request.verb(), request.uri, code, httpStatus, result.bodySize);
debug("finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d bytes, duration = %.2f s",
request.verb(), request.uri, code, httpStatus, result.bodySize,
std::chrono::duration_cast<std::chrono::milliseconds>(finishTime - startTime).count() / 1000.0f
);
appendCurrentUrl();
@ -851,8 +858,10 @@ void FileTransfer::download(
buffer). We don't wait forever to prevent stalling the
download thread. (Hopefully sleeping will throttle the
sender.) */
if (state->data.size() > 1024 * 1024) {
if (state->data.size() > fileTransferSettings.downloadBufferSize) {
debug("download buffer is full; going to sleep");
static bool haveWarned = false;
warnOnce(haveWarned, "download buffer is full; consider increasing the 'download-buffer-size' setting");
state.wait_for(state->request, std::chrono::seconds(10));
}

View file

@ -47,6 +47,12 @@ struct FileTransferSettings : Config
Setting<unsigned int> tries{this, 5, "download-attempts",
"How often Nix will attempt to download a file before giving up."};
Setting<size_t> downloadBufferSize{this, 64 * 1024 * 1024, "download-buffer-size",
R"(
The size of Nix's internal download buffer during `curl` transfers. If data is
not processed quickly enough to exceed the size of this buffer, downloads may stall.
)"};
};
extern FileTransferSettings fileTransferSettings;

View file

@ -559,7 +559,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
non-blocking flag from the server socket, so
explicitly make it blocking. */
if (fcntl(fdClient.get(), F_SETFL, fcntl(fdClient.get(), F_GETFL) & ~O_NONBLOCK) == -1)
abort();
panic("Could not set non-blocking flag on client socket");
while (true) {
try {
@ -891,7 +891,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
void LocalStore::autoGC(bool sync)
{
#ifdef HAVE_STATVFS
#if HAVE_STATVFS
static auto fakeFreeSpaceFile = getEnv("_NIX_TEST_FREE_SPACE_FILE");
auto getAvail = [this]() -> uint64_t {

View file

@ -15,7 +15,6 @@
#include <nlohmann/json.hpp>
#ifndef _WIN32
# include <dlfcn.h>
# include <sys/utsname.h>
#endif
@ -298,25 +297,28 @@ template<> std::string BaseSetting<SandboxMode>::to_string() const
if (value == smEnabled) return "true";
else if (value == smRelaxed) return "relaxed";
else if (value == smDisabled) return "false";
else abort();
else unreachable();
}
template<> void BaseSetting<SandboxMode>::convertToArg(Args & args, const std::string & category)
{
args.addFlag({
.longName = name,
.aliases = aliases,
.description = "Enable sandboxing.",
.category = category,
.handler = {[this]() { override(smEnabled); }}
});
args.addFlag({
.longName = "no-" + name,
.aliases = aliases,
.description = "Disable sandboxing.",
.category = category,
.handler = {[this]() { override(smDisabled); }}
});
args.addFlag({
.longName = "relaxed-" + name,
.aliases = aliases,
.description = "Enable sandboxing, but allow builds to disable it.",
.category = category,
.handler = {[this]() { override(smRelaxed); }}
@ -335,60 +337,6 @@ unsigned int MaxBuildJobsSetting::parse(const std::string & str) const
}
Paths PluginFilesSetting::parse(const std::string & str) const
{
if (pluginsLoaded)
throw UsageError("plugin-files set after plugins were loaded, you may need to move the flag before the subcommand");
return BaseSetting<Paths>::parse(str);
}
void initPlugins()
{
assert(!settings.pluginFiles.pluginsLoaded);
for (const auto & pluginFile : settings.pluginFiles.get()) {
std::vector<std::filesystem::path> pluginFiles;
try {
auto ents = std::filesystem::directory_iterator{pluginFile};
for (const auto & ent : ents) {
checkInterrupt();
pluginFiles.emplace_back(ent.path());
}
} catch (std::filesystem::filesystem_error & e) {
if (e.code() != std::errc::not_a_directory)
throw;
pluginFiles.emplace_back(pluginFile);
}
for (const auto & file : pluginFiles) {
checkInterrupt();
/* handle is purposefully leaked as there may be state in the
DSO needed by the action of the plugin. */
#ifndef _WIN32 // TODO implement via DLL loading on Windows
void *handle =
dlopen(file.c_str(), RTLD_LAZY | RTLD_LOCAL);
if (!handle)
throw Error("could not dynamically open plugin file '%s': %s", file, dlerror());
/* Older plugins use a statically initialized object to run their code.
Newer plugins can also export nix_plugin_entry() */
void (*nix_plugin_entry)() = (void (*)())dlsym(handle, "nix_plugin_entry");
if (nix_plugin_entry)
nix_plugin_entry();
#else
throw Error("could not dynamically open plugin file '%s'", file);
#endif
}
}
/* Since plugins can add settings, try to re-apply previously
unknown settings. */
globalConfig.reapplyUnknownSettings();
globalConfig.warnUnknownSettings();
/* Tell the user if they try to set plugin-files after we've already loaded */
settings.pluginFiles.pluginsLoaded = true;
}
static void preloadNSS()
{
/* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of

View file

@ -31,23 +31,6 @@ struct MaxBuildJobsSetting : public BaseSetting<unsigned int>
unsigned int parse(const std::string & str) const override;
};
struct PluginFilesSetting : public BaseSetting<Paths>
{
bool pluginsLoaded = false;
PluginFilesSetting(Config * options,
const Paths & def,
const std::string & name,
const std::string & description,
const std::set<std::string> & aliases = {})
: BaseSetting<Paths>(def, true, name, description, aliases)
{
options->addSetting(this);
}
Paths parse(const std::string & str) const override;
};
const uint32_t maxIdsPerBuild =
#if __linux__
1 << 16
@ -303,7 +286,7 @@ public:
For backward compatibility, `ssh://` may be omitted.
The hostname may be an alias defined in `~/.ssh/config`.
2. A comma-separated list of [Nix system types](@docroot@/contributing/hacking.md#system-type).
2. A comma-separated list of [Nix system types](@docroot@/development/building.md#system-type).
If omitted, this defaults to the local platform type.
> **Example**
@ -883,13 +866,13 @@ public:
- `ca-derivations`
Included by default if the [`ca-derivations` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-ca-derivations) is enabled.
Included by default if the [`ca-derivations` experimental feature](@docroot@/development/experimental-features.md#xp-feature-ca-derivations) is enabled.
This system feature is implicitly required by derivations with the [`__contentAddressed` attribute](@docroot@/language/advanced-attributes.md#adv-attr-__contentAddressed).
- `recursive-nix`
Included by default if the [`recursive-nix` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-recursive-nix) is enabled.
Included by default if the [`recursive-nix` experimental feature](@docroot@/development/experimental-features.md#xp-feature-recursive-nix) is enabled.
- `uid-range`
@ -1158,33 +1141,6 @@ public:
Setting<uint64_t> minFreeCheckInterval{this, 5, "min-free-check-interval",
"Number of seconds between checking free disk space."};
PluginFilesSetting pluginFiles{
this, {}, "plugin-files",
R"(
A list of plugin files to be loaded by Nix. Each of these files will
be dlopened by Nix. If they contain the symbol `nix_plugin_entry()`,
this symbol will be called. Alternatively, they can affect execution
through static initialization. In particular, these plugins may construct
static instances of RegisterPrimOp to add new primops or constants to the
expression language, RegisterStoreImplementation to add new store
implementations, RegisterCommand to add new subcommands to the `nix`
command, and RegisterSetting to add new nix config settings. See the
constructors for those types for more details.
Warning! These APIs are inherently unstable and may change from
release to release.
Since these files are loaded into the same address space as Nix
itself, they must be DSOs compatible with the instance of Nix
running at the time (i.e. compiled against the same headers, not
linked to any incompatible libraries). They should not be linked to
any Nix libs directly, as those will be available already at load
time.
If an entry in the list is a directory, all files in the directory
are loaded as plugins (non-recursively).
)"};
Setting<size_t> narBufferSize{this, 32 * 1024 * 1024, "nar-buffer-size",
"Maximum size of NARs before spilling them to disk."};
@ -1278,12 +1234,6 @@ public:
// FIXME: don't use a global variable.
extern Settings settings;
/**
* This should be called after settings are initialized, but before
* anything else
*/
void initPlugins();
/**
* Load the configuration (from `nix.conf`, `NIX_CONFIG`, etc.) into the
* given configuration object.

View file

@ -1,4 +1,4 @@
#include "binary-cache-store.hh"
#include "http-binary-cache-store.hh"
#include "filetransfer.hh"
#include "globals.hh"
#include "nar-info-disk-cache.hh"
@ -8,26 +8,37 @@ namespace nix {
MakeError(UploadToHTTP, Error);
struct HttpBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
HttpBinaryCacheStoreConfig::HttpBinaryCacheStoreConfig(
std::string_view scheme,
std::string_view _cacheUri,
const Params & params)
: StoreConfig(params)
, BinaryCacheStoreConfig(params)
, cacheUri(
std::string { scheme }
+ "://"
+ (!_cacheUri.empty()
? _cacheUri
: throw UsageError("`%s` Store requires a non-empty authority in Store URL", scheme)))
{
using BinaryCacheStoreConfig::BinaryCacheStoreConfig;
while (!cacheUri.empty() && cacheUri.back() == '/')
cacheUri.pop_back();
}
const std::string name() override { return "HTTP Binary Cache Store"; }
std::string doc() override
{
return
#include "http-binary-cache-store.md"
;
}
};
std::string HttpBinaryCacheStoreConfig::doc()
{
return
#include "http-binary-cache-store.md"
;
}
class HttpBinaryCacheStore : public virtual HttpBinaryCacheStoreConfig, public virtual BinaryCacheStore
{
private:
Path cacheUri;
struct State
{
bool enabled = true;
@ -40,23 +51,14 @@ public:
HttpBinaryCacheStore(
std::string_view scheme,
PathView _cacheUri,
PathView cacheUri,
const Params & params)
: StoreConfig(params)
, BinaryCacheStoreConfig(params)
, HttpBinaryCacheStoreConfig(params)
, HttpBinaryCacheStoreConfig(scheme, cacheUri, params)
, Store(params)
, BinaryCacheStore(params)
, cacheUri(
std::string { scheme }
+ "://"
+ (!_cacheUri.empty()
? _cacheUri
: throw UsageError("`%s` Store requires a non-empty authority in Store URL", scheme)))
{
while (!cacheUri.empty() && cacheUri.back() == '/')
cacheUri.pop_back();
diskCache = getNarInfoDiskCache();
}
@ -81,14 +83,6 @@ public:
}
}
static std::set<std::string> uriSchemes()
{
static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1";
auto ret = std::set<std::string>({"http", "https"});
if (forceHttp) ret.insert("file");
return ret;
}
protected:
void maybeDisable()

View file

@ -0,0 +1,30 @@
#include "binary-cache-store.hh"
namespace nix {
struct HttpBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
{
using BinaryCacheStoreConfig::BinaryCacheStoreConfig;
HttpBinaryCacheStoreConfig(std::string_view scheme, std::string_view _cacheUri, const Params & params);
Path cacheUri;
const std::string name() override
{
return "HTTP Binary Cache Store";
}
static std::set<std::string> uriSchemes()
{
static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1";
auto ret = std::set<std::string>({"http", "https"});
if (forceHttp)
ret.insert("file");
return ret;
}
std::string doc() override;
};
}

View file

@ -26,6 +26,8 @@ struct LegacySSHStoreConfig : virtual CommonSSHStoreConfig
const std::string name() override { return "SSH Store"; }
static std::set<std::string> uriSchemes() { return {"ssh"}; }
std::string doc() override;
};
@ -46,8 +48,6 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
SSHMaster master;
static std::set<std::string> uriSchemes() { return {"ssh"}; }
LegacySSHStore(
std::string_view scheme,
std::string_view host,

View file

@ -1,4 +1,4 @@
#include "binary-cache-store.hh"
#include "local-binary-cache-store.hh"
#include "globals.hh"
#include "nar-info-disk-cache.hh"
#include "signals.hh"
@ -7,28 +7,27 @@
namespace nix {
struct LocalBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
LocalBinaryCacheStoreConfig::LocalBinaryCacheStoreConfig(
std::string_view scheme,
PathView binaryCacheDir,
const Params & params)
: StoreConfig(params)
, BinaryCacheStoreConfig(params)
, binaryCacheDir(binaryCacheDir)
{
using BinaryCacheStoreConfig::BinaryCacheStoreConfig;
}
const std::string name() override { return "Local Binary Cache Store"; }
std::string doc() override
{
return
#include "local-binary-cache-store.md"
;
}
};
class LocalBinaryCacheStore : public virtual LocalBinaryCacheStoreConfig, public virtual BinaryCacheStore
std::string LocalBinaryCacheStoreConfig::doc()
{
private:
return
#include "local-binary-cache-store.md"
;
}
Path binaryCacheDir;
public:
struct LocalBinaryCacheStore : virtual LocalBinaryCacheStoreConfig, virtual BinaryCacheStore
{
/**
* @param binaryCacheDir `file://` is a short-hand for `file:///`
* for now.
@ -39,10 +38,9 @@ public:
const Params & params)
: StoreConfig(params)
, BinaryCacheStoreConfig(params)
, LocalBinaryCacheStoreConfig(params)
, LocalBinaryCacheStoreConfig(scheme, binaryCacheDir, params)
, Store(params)
, BinaryCacheStore(params)
, binaryCacheDir(binaryCacheDir)
{
}
@ -53,8 +51,6 @@ public:
return "file://" + binaryCacheDir;
}
static std::set<std::string> uriSchemes();
protected:
bool fileExists(const std::string & path) override;
@ -123,7 +119,7 @@ bool LocalBinaryCacheStore::fileExists(const std::string & path)
return pathExists(binaryCacheDir + "/" + path);
}
std::set<std::string> LocalBinaryCacheStore::uriSchemes()
std::set<std::string> LocalBinaryCacheStoreConfig::uriSchemes()
{
if (getEnv("_NIX_FORCE_HTTP") == "1")
return {};

View file

@ -0,0 +1,23 @@
#include "binary-cache-store.hh"
namespace nix {
struct LocalBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
{
using BinaryCacheStoreConfig::BinaryCacheStoreConfig;
LocalBinaryCacheStoreConfig(std::string_view scheme, PathView binaryCacheDir, const Params & params);
Path binaryCacheDir;
const std::string name() override
{
return "Local Binary Cache Store";
}
static std::set<std::string> uriSchemes();
std::string doc() override;
};
}

View file

@ -8,6 +8,20 @@
namespace nix {
LocalFSStoreConfig::LocalFSStoreConfig(PathView rootDir, const Params & params)
: StoreConfig(params)
// Default `?root` from `rootDir` if non set
// FIXME don't duplicate description once we don't have root setting
, rootDir{
this,
!rootDir.empty() && params.count("root") == 0
? (std::optional<Path>{rootDir})
: std::nullopt,
"root",
"Directory prefixed to all other paths."}
{
}
LocalFSStore::LocalFSStore(const Params & params)
: Store(params)
{

View file

@ -11,6 +11,15 @@ struct LocalFSStoreConfig : virtual StoreConfig
{
using StoreConfig::StoreConfig;
/**
* Used to override the `root` settings. Can't be done via modifying
* `params` reliably because this parameter is unused except for
* passing to base class constructors.
*
* @todo Make this less error-prone with new store settings system.
*/
LocalFSStoreConfig(PathView path, const Params & params);
const OptionalPathSetting rootDir{this, std::nullopt,
"root",
"Directory prefixed to all other paths."};

View file

@ -18,11 +18,11 @@ Path LocalOverlayStoreConfig::toUpperPath(const StorePath & path) {
return upperLayer + "/" + path.to_string();
}
LocalOverlayStore::LocalOverlayStore(const Params & params)
LocalOverlayStore::LocalOverlayStore(std::string_view scheme, PathView path, const Params & params)
: StoreConfig(params)
, LocalFSStoreConfig(params)
, LocalFSStoreConfig(path, params)
, LocalStoreConfig(params)
, LocalOverlayStoreConfig(params)
, LocalOverlayStoreConfig(scheme, path, params)
, Store(params)
, LocalFSStore(params)
, LocalStore(params)

View file

@ -8,11 +8,16 @@ namespace nix {
struct LocalOverlayStoreConfig : virtual LocalStoreConfig
{
LocalOverlayStoreConfig(const StringMap & params)
: StoreConfig(params)
, LocalFSStoreConfig(params)
, LocalStoreConfig(params)
: LocalOverlayStoreConfig("local-overlay", "", params)
{ }
LocalOverlayStoreConfig(std::string_view scheme, PathView path, const Params & params)
: StoreConfig(params)
, LocalFSStoreConfig(path, params)
, LocalStoreConfig(scheme, path, params)
{
}
const Setting<std::string> lowerStoreUri{(StoreConfig*) this, "", "lower-store",
R"(
[Store URL](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format)
@ -58,6 +63,11 @@ struct LocalOverlayStoreConfig : virtual LocalStoreConfig
return ExperimentalFeature::LocalOverlayStore;
}
static std::set<std::string> uriSchemes()
{
return { "local-overlay" };
}
std::string doc() override;
protected:
@ -90,19 +100,12 @@ class LocalOverlayStore : public virtual LocalOverlayStoreConfig, public virtual
ref<LocalFSStore> lowerStore;
public:
LocalOverlayStore(const Params & params);
LocalOverlayStore(std::string_view scheme, PathView path, const Params & params)
: LocalOverlayStore(params)
LocalOverlayStore(const Params & params)
: LocalOverlayStore("local-overlay", "", params)
{
if (!path.empty())
throw UsageError("local-overlay:// store url doesn't support path part, only scheme and query params");
}
static std::set<std::string> uriSchemes()
{
return { "local-overlay" };
}
LocalOverlayStore(std::string_view scheme, PathView path, const Params & params);
std::string getUri() override
{

View file

@ -56,6 +56,15 @@
namespace nix {
LocalStoreConfig::LocalStoreConfig(
std::string_view scheme,
std::string_view authority,
const Params & params)
: StoreConfig(params)
, LocalFSStoreConfig(authority, params)
{
}
std::string LocalStoreConfig::doc()
{
return
@ -183,10 +192,13 @@ void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
}
}
LocalStore::LocalStore(const Params & params)
LocalStore::LocalStore(
std::string_view scheme,
PathView path,
const Params & params)
: StoreConfig(params)
, LocalFSStoreConfig(params)
, LocalStoreConfig(params)
, LocalFSStoreConfig(path, params)
, LocalStoreConfig(scheme, path, params)
, Store(params)
, LocalFSStore(params)
, dbDir(stateDir + "/db")
@ -465,19 +477,8 @@ LocalStore::LocalStore(const Params & params)
}
LocalStore::LocalStore(
std::string_view scheme,
PathView path,
const Params & _params)
: LocalStore([&]{
// Default `?root` from `path` if non set
if (!path.empty() && _params.count("root") == 0) {
auto params = _params;
params.insert_or_assign("root", std::string { path });
return params;
}
return _params;
}())
LocalStore::LocalStore(const Params & params)
: LocalStore("local", "", params)
{
}

View file

@ -38,6 +38,11 @@ struct LocalStoreConfig : virtual LocalFSStoreConfig
{
using LocalFSStoreConfig::LocalFSStoreConfig;
LocalStoreConfig(
std::string_view scheme,
std::string_view authority,
const Params & params);
Setting<bool> requireSigs{this,
settings.requireSigs,
"require-sigs",
@ -62,6 +67,9 @@ struct LocalStoreConfig : virtual LocalFSStoreConfig
const std::string name() override { return "Local Store"; }
static std::set<std::string> uriSchemes()
{ return {"local"}; }
std::string doc() override;
};
@ -144,9 +152,6 @@ public:
~LocalStore();
static std::set<std::string> uriSchemes()
{ return {"local"}; }
/**
* Implementations of abstract store API methods.
*/

View file

@ -55,6 +55,7 @@ configdata.set('CAN_LINK_SYMLINK', can_link_symlink.to_int())
check_funcs = [
# Optionally used for canonicalising files from the build
'lchown',
'statvfs',
]
foreach funcspec : check_funcs
define_name = 'HAVE_' + funcspec.underscorify().to_upper()
@ -72,6 +73,7 @@ subdir('build-utils-meson/threads')
boost = dependency(
'boost',
modules : ['container'],
include_type: 'system',
)
# boost is a public dependency, but not a pkg-config dependency unfortunately, so we
# put in `deps_other`.
@ -99,6 +101,23 @@ deps_public += nlohmann_json
sqlite = dependency('sqlite3', 'sqlite', version : '>=3.6.19')
deps_private += sqlite
# AWS C++ SDK has bad pkg-config
aws_s3 = dependency('aws-cpp-sdk-s3', required : false)
configdata.set('ENABLE_S3', aws_s3.found().to_int())
if aws_s3.found()
aws_s3 = declare_dependency(
include_directories: include_directories(aws_s3.get_variable('includedir')),
link_args: [
'-L' + aws_s3.get_variable('libdir'),
'-laws-cpp-sdk-transfer',
'-laws-cpp-sdk-s3',
'-laws-cpp-sdk-core',
'-laws-crt-cpp',
],
).as_system('system')
endif
deps_other += aws_s3
subdir('build-utils-meson/generate-header')
generated_headers = []
@ -243,10 +262,12 @@ headers = [config_h] + files(
'filetransfer.hh',
'gc-store.hh',
'globals.hh',
'http-binary-cache-store.hh',
'indirect-root-store.hh',
'keys.hh',
'legacy-ssh-store.hh',
'length-prefixed-protocol-helper.hh',
'local-binary-cache-store.hh',
'local-fs-store.hh',
'local-overlay-store.hh',
'local-store.hh',

View file

@ -164,7 +164,7 @@ public:
Cache & getCache(State & state, const std::string & uri)
{
auto i = state.caches.find(uri);
if (i == state.caches.end()) abort();
if (i == state.caches.end()) unreachable();
return i->second;
}
@ -211,7 +211,7 @@ public:
{
auto r(state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority));
if (!r.next()) { abort(); }
if (!r.next()) { unreachable(); }
ret.id = (int) r.getInt(0);
}

View file

@ -66,10 +66,7 @@ mkMesonDerivation (finalAttrs: {
] ++ lib.optional stdenv.hostPlatform.isLinux libseccomp
# There have been issues building these dependencies
++ lib.optional (stdenv.hostPlatform == stdenv.buildPlatform && (stdenv.isLinux || stdenv.isDarwin))
(aws-sdk-cpp.override {
apis = ["s3" "transfer"];
customMemoryManagement = false;
})
aws-sdk-cpp
;
propagatedBuildInputs = [
@ -101,12 +98,8 @@ mkMesonDerivation (finalAttrs: {
LDFLAGS = "-fuse-ld=gold";
};
enableParallelBuilding = true;
separateDebugInfo = !stdenv.hostPlatform.isStatic;
strictDeps = true;
hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie";
meta = {

View file

@ -73,8 +73,11 @@ void RemoteStore::initConnection(Connection & conn)
StringSink saved;
TeeSource tee(conn.from, saved);
try {
conn.daemonVersion = WorkerProto::BasicClientConnection::handshake(
conn.to, tee, PROTOCOL_VERSION);
auto [protoVersion, features] = WorkerProto::BasicClientConnection::handshake(
conn.to, tee, PROTOCOL_VERSION,
WorkerProto::allFeatures);
conn.protoVersion = protoVersion;
conn.features = features;
} catch (SerialisationError & e) {
/* In case the other side is waiting for our input, close
it. */
@ -88,6 +91,9 @@ void RemoteStore::initConnection(Connection & conn)
static_cast<WorkerProto::ClientHandshakeInfo &>(conn) = conn.postHandshake(*this);
for (auto & feature : conn.features)
debug("negotiated feature '%s'", feature);
auto ex = conn.processStderrReturn();
if (ex) std::rethrow_exception(ex);
}
@ -115,7 +121,7 @@ void RemoteStore::setOptions(Connection & conn)
<< settings.buildCores
<< settings.useSubstitutes;
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 12) {
if (GET_PROTOCOL_MINOR(conn.protoVersion) >= 12) {
std::map<std::string, Config::SettingInfo> overrides;
settings.getSettings(overrides, true); // libstore settings
fileTransferSettings.getSettings(overrides, true);
@ -128,7 +134,7 @@ void RemoteStore::setOptions(Connection & conn)
overrides.erase(settings.useSubstitutes.name);
overrides.erase(loggerSettings.showTrace.name);
overrides.erase(experimentalFeatureSettings.experimentalFeatures.name);
overrides.erase(settings.pluginFiles.name);
overrides.erase("plugin-files");
conn.to << overrides.size();
for (auto & i : overrides)
conn.to << i.first << i.second.value;
@ -175,7 +181,7 @@ bool RemoteStore::isValidPathUncached(const StorePath & path)
StorePathSet RemoteStore::queryValidPaths(const StorePathSet & paths, SubstituteFlag maybeSubstitute)
{
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
if (GET_PROTOCOL_MINOR(conn->protoVersion) < 12) {
StorePathSet res;
for (auto & i : paths)
if (isValidPath(i)) res.insert(i);
@ -198,7 +204,7 @@ StorePathSet RemoteStore::queryAllValidPaths()
StorePathSet RemoteStore::querySubstitutablePaths(const StorePathSet & paths)
{
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
if (GET_PROTOCOL_MINOR(conn->protoVersion) < 12) {
StorePathSet res;
for (auto & i : paths) {
conn->to << WorkerProto::Op::HasSubstitutes << printStorePath(i);
@ -221,7 +227,7 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
if (GET_PROTOCOL_MINOR(conn->protoVersion) < 12) {
for (auto & i : pathsMap) {
SubstitutablePathInfo info;
@ -241,7 +247,7 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S
} else {
conn->to << WorkerProto::Op::QuerySubstitutablePathInfos;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 22) {
if (GET_PROTOCOL_MINOR(conn->protoVersion) < 22) {
StorePathSet paths;
for (auto & path : pathsMap)
paths.insert(path.first);
@ -368,7 +374,7 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
std::optional<ConnectionHandle> conn_(getConnection());
auto & conn = *conn_;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 25) {
if (GET_PROTOCOL_MINOR(conn->protoVersion) >= 25) {
conn->to
<< WorkerProto::Op::AddToStore
@ -485,7 +491,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
{
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) {
if (GET_PROTOCOL_MINOR(conn->protoVersion) < 18) {
auto source2 = sinkToSource([&](Sink & sink) {
sink << 1 // == path follows
;
@ -513,11 +519,11 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
<< info.ultimate << info.sigs << renderContentAddress(info.ca)
<< repair << !checkSigs;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 23) {
if (GET_PROTOCOL_MINOR(conn->protoVersion) >= 23) {
conn.withFramedSink([&](Sink & sink) {
copyNAR(source, sink);
});
} else if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 21) {
} else if (GET_PROTOCOL_MINOR(conn->protoVersion) >= 21) {
conn.processStderr(0, &source);
} else {
copyNAR(source, conn->to);
@ -554,7 +560,7 @@ void RemoteStore::addMultipleToStore(
RepairFlag repair,
CheckSigsFlag checkSigs)
{
if (GET_PROTOCOL_MINOR(getConnection()->daemonVersion) >= 32) {
if (GET_PROTOCOL_MINOR(getConnection()->protoVersion) >= 32) {
auto conn(getConnection());
conn->to
<< WorkerProto::Op::AddMultipleToStore
@ -572,7 +578,7 @@ void RemoteStore::registerDrvOutput(const Realisation & info)
{
auto conn(getConnection());
conn->to << WorkerProto::Op::RegisterDrvOutput;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
if (GET_PROTOCOL_MINOR(conn->protoVersion) < 31) {
conn->to << info.id.to_string();
conn->to << std::string(info.outPath.to_string());
} else {
@ -587,7 +593,7 @@ void RemoteStore::queryRealisationUncached(const DrvOutput & id,
try {
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 27) {
if (GET_PROTOCOL_MINOR(conn->protoVersion) < 27) {
warn("the daemon is too old to support content-addressed derivations, please upgrade it to 2.4");
return callback(nullptr);
}
@ -597,7 +603,7 @@ void RemoteStore::queryRealisationUncached(const DrvOutput & id,
conn.processStderr();
auto real = [&]() -> std::shared_ptr<const Realisation> {
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
if (GET_PROTOCOL_MINOR(conn->protoVersion) < 31) {
auto outPaths = WorkerProto::Serialise<std::set<StorePath>>::read(
*this, *conn);
if (outPaths.empty())
@ -644,9 +650,9 @@ void RemoteStore::buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMod
auto conn(getConnection());
conn->to << WorkerProto::Op::BuildPaths;
assert(GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13);
assert(GET_PROTOCOL_MINOR(conn->protoVersion) >= 13);
WorkerProto::write(*this, *conn, drvPaths);
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 15)
if (GET_PROTOCOL_MINOR(conn->protoVersion) >= 15)
conn->to << buildMode;
else
/* Old daemons did not take a 'buildMode' parameter, so we
@ -667,7 +673,7 @@ std::vector<KeyedBuildResult> RemoteStore::buildPathsWithResults(
std::optional<ConnectionHandle> conn_(getConnection());
auto & conn = *conn_;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 34) {
if (GET_PROTOCOL_MINOR(conn->protoVersion) >= 34) {
conn->to << WorkerProto::Op::BuildPathsWithResults;
WorkerProto::write(*this, *conn, paths);
conn->to << buildMode;
@ -841,7 +847,7 @@ void RemoteStore::queryMissing(const std::vector<DerivedPath> & targets,
{
{
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 19)
if (GET_PROTOCOL_MINOR(conn->protoVersion) < 19)
// Don't hold the connection handle in the fallback case
// to prevent a deadlock.
goto fallback;
@ -889,7 +895,7 @@ void RemoteStore::connect()
unsigned int RemoteStore::getProtocol()
{
auto conn(connections->get());
return conn->daemonVersion;
return conn->protoVersion;
}
std::optional<TrustedFlag> RemoteStore::isTrustedClient()

Some files were not shown because too many files have changed in this diff Show more