1
0
Fork 0
mirror of https://github.com/NixOS/nix synced 2025-07-07 06:01:48 +02:00

Merge remote-tracking branch 'origin/master' into coerce-string

This commit is contained in:
Eelco Dolstra 2023-01-02 20:53:39 +01:00
commit 6b69652385
233 changed files with 5278 additions and 2874 deletions

View file

@ -186,12 +186,12 @@ static int main_build_remote(int argc, char * * argv)
// build the hint template.
std::string errorText =
"Failed to find a machine for remote build!\n"
"derivation: %s\nrequired (system, features): (%s, %s)";
"derivation: %s\nrequired (system, features): (%s, [%s])";
errorText += "\n%s available machines:";
errorText += "\n(systems, maxjobs, supportedFeatures, mandatoryFeatures)";
for (unsigned int i = 0; i < machines.size(); ++i)
errorText += "\n(%s, %s, %s, %s)";
errorText += "\n([%s], %s, [%s], [%s])";
// add the template values.
std::string drvstr;

View file

@ -226,7 +226,7 @@ MixProfile::MixProfile()
{
addFlag({
.longName = "profile",
.description = "The profile to update.",
.description = "The profile to operate on.",
.labels = {"path"},
.handler = {&profile},
.completer = completePath

View file

@ -32,7 +32,77 @@ MixEvalArgs::MixEvalArgs()
addFlag({
.longName = "include",
.shortName = 'I',
.description = "Add *path* to the list of locations used to look up `<...>` file names.",
.description = R"(
Add *path* to the Nix search path. The Nix search path is
initialized from the colon-separated [`NIX_PATH`](./env-common.md#env-NIX_PATH) environment
variable, and is used to look up the location of Nix expressions using [paths](../language/values.md#type-path) enclosed in angle
brackets (i.e., `<nixpkgs>`).
For instance, passing
```
-I /home/eelco/Dev
-I /etc/nixos
```
will cause Nix to look for paths relative to `/home/eelco/Dev` and
`/etc/nixos`, in that order. This is equivalent to setting the
`NIX_PATH` environment variable to
```
/home/eelco/Dev:/etc/nixos
```
It is also possible to match paths against a prefix. For example,
passing
```
-I nixpkgs=/home/eelco/Dev/nixpkgs-branch
-I /etc/nixos
```
will cause Nix to search for `<nixpkgs/path>` in
`/home/eelco/Dev/nixpkgs-branch/path` and `/etc/nixos/nixpkgs/path`.
If a path in the Nix search path starts with `http://` or `https://`,
it is interpreted as the URL of a tarball that will be downloaded and
unpacked to a temporary location. The tarball must consist of a single
top-level directory. For example, passing
```
-I nixpkgs=https://github.com/NixOS/nixpkgs/archive/master.tar.gz
```
tells Nix to download and use the current contents of the `master`
branch in the `nixpkgs` repository.
The URLs of the tarballs from the official `nixos.org` channels
(see [the manual page for `nix-channel`](../nix-channel.md)) can be
abbreviated as `channel:<channel-name>`. For instance, the
following two flags are equivalent:
```
-I nixpkgs=channel:nixos-21.05
-I nixpkgs=https://nixos.org/channels/nixos-21.05/nixexprs.tar.xz
```
You can also fetch source trees using [flake URLs](./nix3-flake.md#url-like-syntax) and add them to the
search path. For instance,
```
-I nixpkgs=flake:nixpkgs
```
specifies that the prefix `nixpkgs` shall refer to the source tree
downloaded from the `nixpkgs` entry in the flake registry. Similarly,
```
-I nixpkgs=flake:github:NixOS/nixpkgs/nixos-22.05
```
makes `<nixpkgs>` refer to a particular branch of the
`NixOS/nixpkgs` repository on GitHub.
)",
.category = category,
.labels = {"path"},
.handler = {[&](std::string s) { searchPath.push_back(s); }}
@ -89,14 +159,25 @@ Bindings * MixEvalArgs::getAutoArgs(EvalState & state)
Path lookupFileArg(EvalState & state, std::string_view s)
{
if (isUri(s)) {
return state.store->toRealPath(
fetchers::downloadTarball(
state.store, resolveUri(s), "source", false).first.storePath);
} else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') {
if (EvalSettings::isPseudoUrl(s)) {
auto storePath = fetchers::downloadTarball(
state.store, EvalSettings::resolvePseudoUrl(s), "source", false).first.storePath;
return state.store->toRealPath(storePath);
}
else if (hasPrefix(s, "flake:")) {
settings.requireExperimentalFeature(Xp::Flakes);
auto flakeRef = parseFlakeRef(std::string(s.substr(6)), {}, true, false);
auto storePath = flakeRef.resolve(state.store).fetchTree(state.store).first.storePath;
return state.store->toRealPath(storePath);
}
else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') {
Path p(s.substr(1, s.size() - 2));
return state.findFile(p);
} else
}
else
return absPath(std::string(s));
}

View file

@ -168,7 +168,7 @@ SourceExprCommand::SourceExprCommand(bool supportReadOnlyMode)
addFlag({
.longName = "derivation",
.description = "Operate on the store derivation rather than its outputs.",
.description = "Operate on the [store derivation](../../glossary.md#gloss-store-derivation) rather than its outputs.",
.category = installablesCategory,
.handler = {&operateOn, OperateOn::Derivation},
});
@ -207,55 +207,59 @@ Strings SourceExprCommand::getDefaultFlakeAttrPathPrefixes()
void SourceExprCommand::completeInstallable(std::string_view prefix)
{
if (file) {
completionType = ctAttrs;
try {
if (file) {
completionType = ctAttrs;
evalSettings.pureEval = false;
auto state = getEvalState();
Expr *e = state->parseExprFromFile(
resolveExprPath(state->checkSourcePath(lookupFileArg(*state, *file)))
);
evalSettings.pureEval = false;
auto state = getEvalState();
Expr *e = state->parseExprFromFile(
resolveExprPath(state->checkSourcePath(lookupFileArg(*state, *file)))
);
Value root;
state->eval(e, root);
Value root;
state->eval(e, root);
auto autoArgs = getAutoArgs(*state);
auto autoArgs = getAutoArgs(*state);
std::string prefix_ = std::string(prefix);
auto sep = prefix_.rfind('.');
std::string searchWord;
if (sep != std::string::npos) {
searchWord = prefix_.substr(sep + 1, std::string::npos);
prefix_ = prefix_.substr(0, sep);
} else {
searchWord = prefix_;
prefix_ = "";
}
std::string prefix_ = std::string(prefix);
auto sep = prefix_.rfind('.');
std::string searchWord;
if (sep != std::string::npos) {
searchWord = prefix_.substr(sep + 1, std::string::npos);
prefix_ = prefix_.substr(0, sep);
} else {
searchWord = prefix_;
prefix_ = "";
}
auto [v, pos] = findAlongAttrPath(*state, prefix_, *autoArgs, root);
Value &v1(*v);
state->forceValue(v1, pos);
Value v2;
state->autoCallFunction(*autoArgs, v1, v2);
auto [v, pos] = findAlongAttrPath(*state, prefix_, *autoArgs, root);
Value &v1(*v);
state->forceValue(v1, pos);
Value v2;
state->autoCallFunction(*autoArgs, v1, v2);
if (v2.type() == nAttrs) {
for (auto & i : *v2.attrs) {
std::string name = state->symbols[i.name];
if (name.find(searchWord) == 0) {
if (prefix_ == "")
completions->add(name);
else
completions->add(prefix_ + "." + name);
if (v2.type() == nAttrs) {
for (auto & i : *v2.attrs) {
std::string name = state->symbols[i.name];
if (name.find(searchWord) == 0) {
if (prefix_ == "")
completions->add(name);
else
completions->add(prefix_ + "." + name);
}
}
}
} else {
completeFlakeRefWithFragment(
getEvalState(),
lockFlags,
getDefaultFlakeAttrPathPrefixes(),
getDefaultFlakeAttrPaths(),
prefix);
}
} else {
completeFlakeRefWithFragment(
getEvalState(),
lockFlags,
getDefaultFlakeAttrPathPrefixes(),
getDefaultFlakeAttrPaths(),
prefix);
} catch (EvalError&) {
// Don't want eval errors to mess-up with the completion engine, so let's just swallow them
}
}
@ -395,44 +399,56 @@ static StorePath getDeriver(
struct InstallableStorePath : Installable
{
ref<Store> store;
StorePath storePath;
DerivedPath req;
InstallableStorePath(ref<Store> store, StorePath && storePath)
: store(store), storePath(std::move(storePath)) { }
: store(store),
req(storePath.isDerivation()
? (DerivedPath) DerivedPath::Built {
.drvPath = std::move(storePath),
.outputs = {},
}
: (DerivedPath) DerivedPath::Opaque {
.path = std::move(storePath),
})
{ }
std::string what() const override { return store->printStorePath(storePath); }
InstallableStorePath(ref<Store> store, DerivedPath && req)
: store(store), req(std::move(req))
{ }
std::string what() const override
{
return req.to_string(*store);
}
DerivedPaths toDerivedPaths() override
{
if (storePath.isDerivation()) {
auto drv = store->readDerivation(storePath);
return {
DerivedPath::Built {
.drvPath = storePath,
.outputs = drv.outputNames(),
}
};
} else {
return {
DerivedPath::Opaque {
.path = storePath,
}
};
}
return { req };
}
StorePathSet toDrvPaths(ref<Store> store) override
{
if (storePath.isDerivation()) {
return {storePath};
} else {
return {getDeriver(store, *this, storePath)};
}
return std::visit(overloaded {
[&](const DerivedPath::Built & bfd) -> StorePathSet {
return { bfd.drvPath };
},
[&](const DerivedPath::Opaque & bo) -> StorePathSet {
return { getDeriver(store, *this, bo.path) };
},
}, req.raw());
}
std::optional<StorePath> getStorePath() override
{
return storePath;
return std::visit(overloaded {
[&](const DerivedPath::Built & bfd) {
return bfd.drvPath;
},
[&](const DerivedPath::Opaque & bo) {
return bo.path;
},
}, req.raw());
}
};
@ -777,7 +793,8 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
if (file == "-") {
auto e = state->parseStdin();
state->eval(e, *vFile);
} else if (file)
}
else if (file)
state->evalFile(lookupFileArg(*state, *file), *vFile);
else {
auto e = state->parseExprFromString(*expr, absPath("."));
@ -798,7 +815,22 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
for (auto & s : ss) {
std::exception_ptr ex;
if (s.find('/') != std::string::npos) {
auto found = s.rfind('^');
if (found != std::string::npos) {
try {
result.push_back(std::make_shared<InstallableStorePath>(
store,
DerivedPath::Built::parse(*store, s.substr(0, found), s.substr(found + 1))));
continue;
} catch (BadStorePath &) {
} catch (...) {
if (!ex)
ex = std::current_exception();
}
}
found = s.find('/');
if (found != std::string::npos) {
try {
result.push_back(std::make_shared<InstallableStorePath>(store, store->followLinksToStorePath(s)));
continue;
@ -840,20 +872,20 @@ std::shared_ptr<Installable> SourceExprCommand::parseInstallable(
return installables.front();
}
BuiltPaths Installable::build(
std::vector<BuiltPathWithResult> Installable::build(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
const std::vector<std::shared_ptr<Installable>> & installables,
BuildMode bMode)
{
BuiltPaths res;
for (auto & [_, builtPath] : build2(evalStore, store, mode, installables, bMode))
res.push_back(builtPath);
std::vector<BuiltPathWithResult> res;
for (auto & [_, builtPathWithResult] : build2(evalStore, store, mode, installables, bMode))
res.push_back(builtPathWithResult);
return res;
}
std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::build2(
std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> Installable::build2(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
@ -873,7 +905,7 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
}
}
std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> res;
std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> res;
switch (mode) {
@ -914,10 +946,10 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
output, *drvOutput->second);
}
}
res.push_back({installable, BuiltPath::Built { bfd.drvPath, outputs }});
res.push_back({installable, {.path = BuiltPath::Built { bfd.drvPath, outputs }}});
},
[&](const DerivedPath::Opaque & bo) {
res.push_back({installable, BuiltPath::Opaque { bo.path }});
res.push_back({installable, {.path = BuiltPath::Opaque { bo.path }}});
},
}, path.raw());
}
@ -927,7 +959,7 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
case Realise::Outputs: {
if (settings.printMissing)
printMissing(store, pathsToBuild, lvlInfo);
printMissing(store, pathsToBuild, lvlInfo);
for (auto & buildResult : store->buildPathsWithResults(pathsToBuild, bMode, evalStore)) {
if (!buildResult.success())
@ -939,10 +971,10 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
std::map<std::string, StorePath> outputs;
for (auto & path : buildResult.builtOutputs)
outputs.emplace(path.first.outputName, path.second.outPath);
res.push_back({installable, BuiltPath::Built { bfd.drvPath, outputs }});
res.push_back({installable, {.path = BuiltPath::Built { bfd.drvPath, outputs }, .result = buildResult}});
},
[&](const DerivedPath::Opaque & bo) {
res.push_back({installable, BuiltPath::Opaque { bo.path }});
res.push_back({installable, {.path = BuiltPath::Opaque { bo.path }, .result = buildResult}});
},
}, buildResult.path.raw());
}
@ -965,9 +997,12 @@ BuiltPaths Installable::toBuiltPaths(
OperateOn operateOn,
const std::vector<std::shared_ptr<Installable>> & installables)
{
if (operateOn == OperateOn::Output)
return Installable::build(evalStore, store, mode, installables);
else {
if (operateOn == OperateOn::Output) {
BuiltPaths res;
for (auto & p : Installable::build(evalStore, store, mode, installables))
res.push_back(p.path);
return res;
} else {
if (mode == Realise::Nothing)
settings.readOnlyMode = true;

View file

@ -7,6 +7,7 @@
#include "eval.hh"
#include "store-api.hh"
#include "flake/flake.hh"
#include "build-result.hh"
#include <optional>
@ -51,6 +52,12 @@ enum class OperateOn {
Derivation
};
struct BuiltPathWithResult
{
BuiltPath path;
std::optional<BuildResult> result;
};
struct Installable
{
virtual ~Installable() { }
@ -91,14 +98,14 @@ struct Installable
return FlakeRef::fromAttrs({{"type","indirect"}, {"id", "nixpkgs"}});
}
static BuiltPaths build(
static std::vector<BuiltPathWithResult> build(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
const std::vector<std::shared_ptr<Installable>> & installables,
BuildMode bMode = bmNormal);
static std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> build2(
static std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> build2(
ref<Store> evalStore,
ref<Store> store,
Realise mode,

View file

@ -8,7 +8,7 @@ libcmd_SOURCES := $(wildcard $(d)/*.cc)
libcmd_CXXFLAGS += -I src/libutil -I src/libstore -I src/libexpr -I src/libmain -I src/libfetchers -I src/nix
libcmd_LDFLAGS = $(EDITLINE_LIBS) -llowdown -pthread
libcmd_LDFLAGS = $(EDITLINE_LIBS) $(LOWDOWN_LIBS) -pthread
libcmd_LIBS = libstore libutil libexpr libmain libfetchers

View file

@ -215,17 +215,15 @@ static std::ostream & showDebugTrace(std::ostream & out, const PosTable & positi
out << dt.hint.str() << "\n";
// prefer direct pos, but if noPos then try the expr.
auto pos = *dt.pos
? *dt.pos
: positions[dt.expr.getPos() ? dt.expr.getPos() : noPos];
auto pos = dt.pos
? dt.pos
: static_cast<std::shared_ptr<AbstractPos>>(positions[dt.expr.getPos() ? dt.expr.getPos() : noPos]);
if (pos) {
printAtPos(pos, out);
auto loc = getCodeLines(pos);
if (loc.has_value()) {
out << pos;
if (auto loc = pos->getCodeLines()) {
out << "\n";
printCodeLines(out, "", pos, *loc);
printCodeLines(out, "", *pos, *loc);
out << "\n";
}
}
@ -270,6 +268,7 @@ void NixRepl::mainLoop()
// ctrl-D should exit the debugger.
state->debugStop = false;
state->debugQuit = true;
logger->cout("");
break;
}
try {
@ -384,6 +383,10 @@ StringSet NixRepl::completePrefix(const std::string & prefix)
i++;
}
} else {
/* Temporarily disable the debugger, to avoid re-entering readline. */
auto debug_repl = state->debugRepl;
state->debugRepl = nullptr;
Finally restoreDebug([&]() { state->debugRepl = debug_repl; });
try {
/* This is an expression that should evaluate to an
attribute set. Evaluate it to get the names of the
@ -584,15 +587,17 @@ bool NixRepl::processLine(std::string line)
Value v;
evalString(arg, v);
const auto [file, line] = [&] () -> std::pair<std::string, uint32_t> {
const auto [path, line] = [&] () -> std::pair<Path, uint32_t> {
if (v.type() == nPath || v.type() == nString) {
PathSet context;
auto filename = state->coerceToString(noPos, v, context, "while evaluating the filename to edit").toOwned();
state->symbols.create(filename);
return {filename, 0};
auto path = state->coerceToPath(noPos, v, context, "while evaluating the filename to edit");
return {path, 0};
} else if (v.isLambda()) {
auto pos = state->positions[v.lambda.fun->pos];
return {pos.file, pos.line};
if (auto path = std::get_if<Path>(&pos.origin))
return {*path, pos.line};
else
throw Error("'%s' cannot be shown in an editor", pos);
} else {
// assume it's a derivation
return findPackageFilename(*state, v, arg);
@ -600,7 +605,7 @@ bool NixRepl::processLine(std::string line)
}();
// Open in EDITOR
auto args = editorFor(file, line);
auto args = editorFor(path, line);
auto editor = args.front();
args.pop_front();
@ -782,7 +787,7 @@ void NixRepl::loadFlake(const std::string & flakeRefS)
flake::LockFlags {
.updateLockFile = false,
.useRegistries = !evalSettings.pureEval,
.allowMutable = !evalSettings.pureEval,
.allowUnlocked = !evalSettings.pureEval,
}),
v);
addAttrsToScope(v);

View file

@ -645,17 +645,17 @@ NixInt AttrCursor::getInt()
cachedValue = root->db->getAttr(getKey());
if (cachedValue && !std::get_if<placeholder_t>(&cachedValue->second)) {
if (auto i = std::get_if<int_t>(&cachedValue->second)) {
debug("using cached Integer attribute '%s'", getAttrPathStr());
debug("using cached integer attribute '%s'", getAttrPathStr());
return i->x;
} else
throw TypeError("'%s' is not an Integer", getAttrPathStr());
throw TypeError("'%s' is not an integer", getAttrPathStr());
}
}
auto & v = forceValue();
if (v.type() != nInt)
throw TypeError("'%s' is not an Integer", getAttrPathStr());
throw TypeError("'%s' is not an integer", getAttrPathStr());
return v.integer;
}

View file

@ -7,7 +7,6 @@
#include "globals.hh"
#include "eval-inline.hh"
#include "filetransfer.hh"
#include "json.hh"
#include "function-trace.hh"
#include <algorithm>
@ -21,6 +20,7 @@
#include <functional>
#include <sys/resource.h>
#include <nlohmann/json.hpp>
#if HAVE_BOEHMGC
@ -35,6 +35,8 @@
#endif
using json = nlohmann::json;
namespace nix {
static char * allocString(size_t size)
@ -43,7 +45,7 @@ static char * allocString(size_t size)
#if HAVE_BOEHMGC
t = (char *) GC_MALLOC_ATOMIC(size);
#else
t = malloc(size);
t = (char *) malloc(size);
#endif
if (!t) throw std::bad_alloc();
return t;
@ -65,26 +67,19 @@ static char * dupString(const char * s)
// When there's no need to write to the string, we can optimize away empty
// string allocations.
// This function handles makeImmutableStringWithLen(null, 0) by returning the
// empty string.
static const char * makeImmutableStringWithLen(const char * s, size_t size)
// This function handles makeImmutableString(std::string_view()) by returning
// the empty string.
static const char * makeImmutableString(std::string_view s)
{
char * t;
const size_t size = s.size();
if (size == 0)
return "";
#if HAVE_BOEHMGC
t = GC_STRNDUP(s, size);
#else
t = strndup(s, size);
#endif
if (!t) throw std::bad_alloc();
auto t = allocString(size + 1);
memcpy(t, s.data(), size);
t[size] = '\0';
return t;
}
static inline const char * makeImmutableString(std::string_view s) {
return makeImmutableStringWithLen(s.data(), s.size());
}
RootValue allocRootValue(Value * v)
{
@ -404,7 +399,8 @@ static Strings parseNixPath(const std::string & s)
}
if (*p == ':') {
if (isUri(std::string(start2, s.end()))) {
auto prefix = std::string(start2, s.end());
if (EvalSettings::isPseudoUrl(prefix) || hasPrefix(prefix, "flake:")) {
++p;
while (p != s.end() && *p != ':') ++p;
}
@ -436,21 +432,23 @@ ErrorBuilder & ErrorBuilder::withFrameTrace(PosIdx pos, const std::string_view t
return *this;
}
ErrorBuilder & ErrorBuilder::withSuggestions(Suggestions & s) {
ErrorBuilder & ErrorBuilder::withSuggestions(Suggestions & s)
{
info.suggestions = s;
return *this;
}
ErrorBuilder & ErrorBuilder::withFrame(const Env & env, const Expr & expr) {
ErrorBuilder & ErrorBuilder::withFrame(const Env & env, const Expr & expr)
{
// NOTE: This is abusing side-effects.
// TODO: check compatibility with nested debugger calls.
state.debugTraces.push_front(DebugTrace {
.pos = std::nullopt,
.expr = expr,
.env = env,
.hint = hintformat("Fake frame for debugg{ing,er} purposes"),
.isError = true
});
.pos = nullptr,
.expr = expr,
.env = env,
.hint = hintformat("Fake frame for debugging purposes"),
.isError = true
});
return *this;
}
@ -508,9 +506,6 @@ EvalState::EvalState(
#if HAVE_BOEHMGC
, valueAllocCache(std::allocate_shared<void *>(traceable_allocator<void *>(), nullptr))
, env1AllocCache(std::allocate_shared<void *>(traceable_allocator<void *>(), nullptr))
#else
, valueAllocCache(std::make_shared<void *>(nullptr))
, env1AllocCache(std::make_shared<void *>(nullptr))
#endif
, baseEnv(allocEnv(128))
, staticBaseEnv{std::make_shared<StaticEnv>(false, nullptr)}
@ -842,7 +837,7 @@ void EvalState::runDebugRepl(const Error * error, const Env & env, const Expr &
? std::make_unique<DebugTraceStacker>(
*this,
DebugTrace {
.pos = error->info().errPos ? *error->info().errPos : positions[expr.getPos()],
.pos = error->info().errPos ? error->info().errPos : static_cast<std::shared_ptr<AbstractPos>>(positions[expr.getPos()]),
.expr = expr,
.env = env,
.hint = error->info().msg,
@ -869,7 +864,7 @@ void EvalState::runDebugRepl(const Error * error, const Env & env, const Expr &
void EvalState::addErrorTrace(Error & e, const char * s, const std::string & s2) const
{
e.addTrace(std::nullopt, s, s2);
e.addTrace(nullptr, s, s2);
}
void EvalState::addErrorTrace(Error & e, const PosIdx pos, const char * s, const std::string & s2, bool frame) const
@ -881,13 +876,13 @@ static std::unique_ptr<DebugTraceStacker> makeDebugTraceStacker(
EvalState & state,
Expr & expr,
Env & env,
std::optional<ErrPos> pos,
std::shared_ptr<AbstractPos> && pos,
const char * s,
const std::string & s2)
{
return std::make_unique<DebugTraceStacker>(state,
DebugTrace {
.pos = pos,
.pos = std::move(pos),
.expr = expr,
.env = env,
.hint = hintfmt(s, s2),
@ -993,9 +988,9 @@ void EvalState::mkThunk_(Value & v, Expr * expr)
void EvalState::mkPos(Value & v, PosIdx p)
{
auto pos = positions[p];
if (!pos.file.empty()) {
if (auto path = std::get_if<Path>(&pos.origin)) {
auto attrs = buildBindings(3);
attrs.alloc(sFile).mkString(pos.file);
attrs.alloc(sFile).mkString(*path);
attrs.alloc(sLine).mkInt(pos.line);
attrs.alloc(sColumn).mkInt(pos.column);
v.mkAttrs(attrs);
@ -1103,7 +1098,7 @@ void EvalState::cacheFile(
*this,
*e,
this->baseEnv,
e->getPos() ? std::optional(ErrPos(positions[e->getPos()])) : std::nullopt,
e->getPos() ? static_cast<std::shared_ptr<AbstractPos>>(positions[e->getPos()]) : nullptr,
"while evaluating the file '%1%':", resolvedPath)
: nullptr;
@ -1373,10 +1368,13 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v)
state.forceValue(*vAttrs, (pos2 ? pos2 : this->pos ) );
} catch (Error & e) {
auto pos2r = state.positions[pos2];
if (pos2 && pos2r.file != state.derivationNixPath)
state.addErrorTrace(e, pos2, "while evaluating the attribute '%1%'",
showAttrPath(state, env, attrPath));
if (pos2) {
auto pos2r = state.positions[pos2];
auto origin = std::get_if<Path>(&pos2r.origin);
if (!(origin && *origin == state.derivationNixPath))
state.addErrorTrace(e, pos2, "while evaluating the attribute '%1%'",
showAttrPath(state, env, attrPath));
}
throw;
}
@ -1519,7 +1517,7 @@ void EvalState::callFunction(Value & fun, size_t nrArgs, Value * * args, Value &
auto dts = debugRepl
? makeDebugTraceStacker(
*this, *lambda.body, env2, positions[lambda.pos],
"while evaluating %s",
"while calling %s",
lambda.name
? concatStrings("'", symbols[lambda.name], "'")
: "anonymous lambda")
@ -1528,13 +1526,14 @@ void EvalState::callFunction(Value & fun, size_t nrArgs, Value * * args, Value &
lambda.body->eval(*this, env2, vCur);
} catch (Error & e) {
if (loggerSettings.showTrace.get()) {
addErrorTrace(e,
lambda.pos,
"while evaluating %s",
lambda.name
? concatStrings("'", symbols[lambda.name], "'")
: "anonymous lambda",
true);
addErrorTrace(
e,
lambda.pos,
"while calling %s",
lambda.name
? concatStrings("'", symbols[lambda.name], "'")
: "anonymous lambda",
true);
if (pos) addErrorTrace(e, pos, "from call site%s", "", true);
}
throw;
@ -1704,7 +1703,7 @@ void EvalState::autoCallFunction(Bindings & args, Value & fun, Value & res)
Nix attempted to evaluate a function as a top level expression; in
this case it must have its arguments supplied either by default
values, or passed explicitly with '--arg' or '--argstr'. See
https://nixos.org/manual/nix/stable/expressions/language-constructs.html#functions.)", symbols[i.name])
https://nixos.org/manual/nix/stable/language/constructs.html#functions.)", symbols[i.name])
.atPos(i.pos).withFrame(*fun.lambda.env, *fun.lambda.fun).debugThrow<MissingArgumentError>();
}
}
@ -2357,97 +2356,99 @@ void EvalState::printStats()
std::fstream fs;
if (outPath != "-")
fs.open(outPath, std::fstream::out);
JSONObject topObj(outPath == "-" ? std::cerr : fs, true);
topObj.attr("cpuTime",cpuTime);
{
auto envs = topObj.object("envs");
envs.attr("number", nrEnvs);
envs.attr("elements", nrValuesInEnvs);
envs.attr("bytes", bEnvs);
}
{
auto lists = topObj.object("list");
lists.attr("elements", nrListElems);
lists.attr("bytes", bLists);
lists.attr("concats", nrListConcats);
}
{
auto values = topObj.object("values");
values.attr("number", nrValues);
values.attr("bytes", bValues);
}
{
auto syms = topObj.object("symbols");
syms.attr("number", symbols.size());
syms.attr("bytes", symbols.totalSize());
}
{
auto sets = topObj.object("sets");
sets.attr("number", nrAttrsets);
sets.attr("bytes", bAttrsets);
sets.attr("elements", nrAttrsInAttrsets);
}
{
auto sizes = topObj.object("sizes");
sizes.attr("Env", sizeof(Env));
sizes.attr("Value", sizeof(Value));
sizes.attr("Bindings", sizeof(Bindings));
sizes.attr("Attr", sizeof(Attr));
}
topObj.attr("nrOpUpdates", nrOpUpdates);
topObj.attr("nrOpUpdateValuesCopied", nrOpUpdateValuesCopied);
topObj.attr("nrThunks", nrThunks);
topObj.attr("nrAvoided", nrAvoided);
topObj.attr("nrLookups", nrLookups);
topObj.attr("nrPrimOpCalls", nrPrimOpCalls);
topObj.attr("nrFunctionCalls", nrFunctionCalls);
json topObj = json::object();
topObj["cpuTime"] = cpuTime;
topObj["envs"] = {
{"number", nrEnvs},
{"elements", nrValuesInEnvs},
{"bytes", bEnvs},
};
topObj["list"] = {
{"elements", nrListElems},
{"bytes", bLists},
{"concats", nrListConcats},
};
topObj["values"] = {
{"number", nrValues},
{"bytes", bValues},
};
topObj["symbols"] = {
{"number", symbols.size()},
{"bytes", symbols.totalSize()},
};
topObj["sets"] = {
{"number", nrAttrsets},
{"bytes", bAttrsets},
{"elements", nrAttrsInAttrsets},
};
topObj["sizes"] = {
{"Env", sizeof(Env)},
{"Value", sizeof(Value)},
{"Bindings", sizeof(Bindings)},
{"Attr", sizeof(Attr)},
};
topObj["nrOpUpdates"] = nrOpUpdates;
topObj["nrOpUpdateValuesCopied"] = nrOpUpdateValuesCopied;
topObj["nrThunks"] = nrThunks;
topObj["nrAvoided"] = nrAvoided;
topObj["nrLookups"] = nrLookups;
topObj["nrPrimOpCalls"] = nrPrimOpCalls;
topObj["nrFunctionCalls"] = nrFunctionCalls;
#if HAVE_BOEHMGC
{
auto gc = topObj.object("gc");
gc.attr("heapSize", heapSize);
gc.attr("totalBytes", totalBytes);
}
topObj["gc"] = {
{"heapSize", heapSize},
{"totalBytes", totalBytes},
};
#endif
if (countCalls) {
topObj["primops"] = primOpCalls;
{
auto obj = topObj.object("primops");
for (auto & i : primOpCalls)
obj.attr(i.first, i.second);
}
{
auto list = topObj.list("functions");
auto& list = topObj["functions"];
list = json::array();
for (auto & [fun, count] : functionCalls) {
auto obj = list.object();
json obj = json::object();
if (fun->name)
obj.attr("name", (std::string_view) symbols[fun->name]);
obj["name"] = (std::string_view) symbols[fun->name];
else
obj.attr("name", nullptr);
obj["name"] = nullptr;
if (auto pos = positions[fun->pos]) {
obj.attr("file", (std::string_view) pos.file);
obj.attr("line", pos.line);
obj.attr("column", pos.column);
if (auto path = std::get_if<Path>(&pos.origin))
obj["file"] = *path;
obj["line"] = pos.line;
obj["column"] = pos.column;
}
obj.attr("count", count);
obj["count"] = count;
list.push_back(obj);
}
}
{
auto list = topObj.list("attributes");
auto list = topObj["attributes"];
list = json::array();
for (auto & i : attrSelects) {
auto obj = list.object();
json obj = json::object();
if (auto pos = positions[i.first]) {
obj.attr("file", (const std::string &) pos.file);
obj.attr("line", pos.line);
obj.attr("column", pos.column);
if (auto path = std::get_if<Path>(&pos.origin))
obj["file"] = *path;
obj["line"] = pos.line;
obj["column"] = pos.column;
}
obj.attr("count", i.second);
obj["count"] = i.second;
list.push_back(obj);
}
}
}
if (getEnv("NIX_SHOW_SYMBOLS").value_or("0") != "0") {
auto list = topObj.list("symbols");
symbols.dump([&](const std::string & s) { list.elem(s); });
// XXX: overrides earlier assignment
topObj["symbols"] = json::array();
auto &list = topObj["symbols"];
symbols.dump([&](const std::string & s) { list.emplace_back(s); });
}
if (outPath == "-") {
std::cerr << topObj.dump(2) << std::endl;
} else {
fs << topObj.dump(2) << std::endl;
}
}
}
@ -2502,6 +2503,23 @@ Strings EvalSettings::getDefaultNixPath()
return res;
}
bool EvalSettings::isPseudoUrl(std::string_view s)
{
if (s.compare(0, 8, "channel:") == 0) return true;
size_t pos = s.find("://");
if (pos == std::string::npos) return false;
std::string scheme(s, 0, pos);
return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git" || scheme == "s3" || scheme == "ssh";
}
std::string EvalSettings::resolvePseudoUrl(std::string_view url)
{
if (hasPrefix(url, "channel:"))
return "https://nixos.org/channels/" + std::string(url.substr(8)) + "/nixexprs.tar.xz";
else
return std::string(url);
}
EvalSettings evalSettings;
static GlobalConfig::Register rEvalSettings(&evalSettings);

View file

@ -60,7 +60,6 @@ void copyContext(const Value & v, PathSet & context);
typedef std::map<Path, StorePath> SrcToStore;
std::ostream & printValue(const EvalState & state, std::ostream & str, const Value & v);
std::string printValue(const EvalState & state, const Value & v);
std::ostream & operator << (std::ostream & os, const ValueType t);
@ -78,7 +77,7 @@ struct RegexCache;
std::shared_ptr<RegexCache> makeRegexCache();
struct DebugTrace {
std::optional<ErrPos> pos;
std::shared_ptr<AbstractPos> pos;
const Expr & expr;
const Env & env;
hintformat hint;
@ -437,8 +436,12 @@ private:
friend struct ExprAttrs;
friend struct ExprLet;
Expr * parse(char * text, size_t length, FileOrigin origin, const PathView path,
const PathView basePath, std::shared_ptr<StaticEnv> & staticEnv);
Expr * parse(
char * text,
size_t length,
Pos::Origin origin,
Path basePath,
std::shared_ptr<StaticEnv> & staticEnv);
public:
@ -570,6 +573,10 @@ struct EvalSettings : Config
static Strings getDefaultNixPath();
static bool isPseudoUrl(std::string_view s);
static std::string resolvePseudoUrl(std::string_view url);
Setting<bool> enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation",
"Whether builtin functions that allow executing native code should be enabled."};

View file

@ -43,7 +43,7 @@ let
outputs = flake.outputs (inputs // { self = result; });
result = outputs // sourceInfo // { inherit inputs; inherit outputs; inherit sourceInfo; };
result = outputs // sourceInfo // { inherit inputs; inherit outputs; inherit sourceInfo; _type = "flake"; };
in
if node.flake or true then
assert builtins.isFunction flake.outputs;

View file

@ -56,7 +56,7 @@ void ConfigFile::apply()
auto tlname = get(trustedList, name);
if (auto saved = tlname ? get(*tlname, valueS) : nullptr) {
trusted = *saved;
warn("Using saved setting for '%s = %s' from ~/.local/share/nix/trusted-settings.json.", name,valueS);
printInfo("Using saved setting for '%s = %s' from ~/.local/share/nix/trusted-settings.json.", name, valueS);
} else {
// FIXME: filter ANSI escapes, newlines, \r, etc.
if (std::tolower(logger->ask(fmt("do you want to allow configuration setting '%s' to be set to '" ANSI_RED "%s" ANSI_NORMAL "' (y/N)?", name, valueS)).value_or('n')) == 'y') {

View file

@ -143,7 +143,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
} catch (Error & e) {
e.addTrace(
state.positions[attr.pos],
hintfmt("in flake attribute '%s'", state.symbols[attr.name]));
hintfmt("while evaluating flake attribute '%s'", state.symbols[attr.name]));
throw;
}
}
@ -152,7 +152,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
try {
input.ref = FlakeRef::fromAttrs(attrs);
} catch (Error & e) {
e.addTrace(state.positions[pos], hintfmt("in flake input"));
e.addTrace(state.positions[pos], hintfmt("while evaluating flake input"));
throw;
}
else {
@ -220,7 +220,7 @@ static Flake getFlake(
Value vInfo;
state.evalFile(flakeFile, vInfo, true); // FIXME: symlink attack
expectType(state, nAttrs, vInfo, state.positions.add({flakeFile, foFile}, 0, 0));
expectType(state, nAttrs, vInfo, state.positions.add({flakeFile}, 1, 1));
if (auto description = vInfo.attrs->get(state.sDescription)) {
expectType(state, nString, *description->value, description->pos);
@ -353,7 +353,7 @@ LockedFlake lockFlake(
std::function<void(
const FlakeInputs & flakeInputs,
std::shared_ptr<Node> node,
ref<Node> node,
const InputPath & inputPathPrefix,
std::shared_ptr<const Node> oldNode,
const InputPath & lockRootPath,
@ -362,9 +362,15 @@ LockedFlake lockFlake(
computeLocks;
computeLocks = [&](
/* The inputs of this node, either from flake.nix or
flake.lock. */
const FlakeInputs & flakeInputs,
std::shared_ptr<Node> node,
/* The node whose locks are to be updated.*/
ref<Node> node,
/* The path to this node in the lock file graph. */
const InputPath & inputPathPrefix,
/* The old node, if any, from which locks can be
copied. */
std::shared_ptr<const Node> oldNode,
const InputPath & lockRootPath,
const Path & parentPath,
@ -452,7 +458,7 @@ LockedFlake lockFlake(
/* Copy the input from the old lock since its flakeref
didn't change and there is no override from a
higher level flake. */
auto childNode = std::make_shared<LockedNode>(
auto childNode = make_ref<LockedNode>(
oldLock->lockedRef, oldLock->originalRef, oldLock->isFlake);
node->inputs.insert_or_assign(id, childNode);
@ -481,7 +487,7 @@ LockedFlake lockFlake(
.isFlake = (*lockedNode)->isFlake,
});
} else if (auto follows = std::get_if<1>(&i.second)) {
if (! trustLock) {
if (!trustLock) {
// It is possible that the flake has changed,
// so we must confirm all the follows that are in the lock file are also in the flake.
auto overridePath(inputPath);
@ -521,8 +527,8 @@ LockedFlake lockFlake(
this input. */
debug("creating new input '%s'", inputPathS);
if (!lockFlags.allowMutable && !input.ref->input.isLocked())
throw Error("cannot update flake input '%s' in pure mode", inputPathS);
if (!lockFlags.allowUnlocked && !input.ref->input.isLocked())
throw Error("cannot update unlocked flake input '%s' in pure mode", inputPathS);
/* Note: in case of an --override-input, we use
the *original* ref (input2.ref) for the
@ -544,7 +550,7 @@ LockedFlake lockFlake(
auto inputFlake = getFlake(state, localRef, useRegistries, flakeCache, inputPath);
auto childNode = std::make_shared<LockedNode>(inputFlake.lockedRef, ref);
auto childNode = make_ref<LockedNode>(inputFlake.lockedRef, ref);
node->inputs.insert_or_assign(id, childNode);
@ -564,15 +570,19 @@ LockedFlake lockFlake(
oldLock
? std::dynamic_pointer_cast<const Node>(oldLock)
: LockFile::read(
inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root,
oldLock ? lockRootPath : inputPath, localPath, false);
inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root.get_ptr(),
oldLock ? lockRootPath : inputPath,
localPath,
false);
}
else {
auto [sourceInfo, resolvedRef, lockedRef] = fetchOrSubstituteTree(
state, *input.ref, useRegistries, flakeCache);
node->inputs.insert_or_assign(id,
std::make_shared<LockedNode>(lockedRef, ref, false));
auto childNode = make_ref<LockedNode>(lockedRef, ref, false);
node->inputs.insert_or_assign(id, childNode);
}
}
@ -587,8 +597,13 @@ LockedFlake lockFlake(
auto parentPath = canonPath(flake.sourceInfo->actualPath + "/" + flake.lockedRef.subdir, true);
computeLocks(
flake.inputs, newLockFile.root, {},
lockFlags.recreateLockFile ? nullptr : oldLockFile.root, {}, parentPath, false);
flake.inputs,
newLockFile.root,
{},
lockFlags.recreateLockFile ? nullptr : oldLockFile.root.get_ptr(),
{},
parentPath,
false);
for (auto & i : lockFlags.inputOverrides)
if (!overridesUsed.count(i.first))
@ -611,9 +626,9 @@ LockedFlake lockFlake(
if (lockFlags.writeLockFile) {
if (auto sourcePath = topRef.input.getSourcePath()) {
if (!newLockFile.isImmutable()) {
if (auto unlockedInput = newLockFile.isUnlocked()) {
if (fetchSettings.warnDirty)
warn("will not write lock file of flake '%s' because it has a mutable input", topRef);
warn("will not write lock file of flake '%s' because it has an unlocked input ('%s')", topRef, *unlockedInput);
} else {
if (!lockFlags.updateLockFile)
throw Error("flake '%s' requires lock file changes but they're not allowed due to '--no-update-lock-file'", topRef);
@ -737,7 +752,7 @@ static void prim_getFlake(EvalState & state, const PosIdx pos, Value * * args, V
.updateLockFile = false,
.writeLockFile = false,
.useRegistries = !evalSettings.pureEval && fetchSettings.useRegistries,
.allowMutable = !evalSettings.pureEval,
.allowUnlocked = !evalSettings.pureEval,
}),
v);
}

View file

@ -108,11 +108,11 @@ struct LockFlags
bool applyNixConfig = false;
/* Whether mutable flake references (i.e. those without a Git
/* Whether unlocked flake references (i.e. those without a Git
revision or similar) without a corresponding lock are
allowed. Mutable flake references with a lock are always
allowed. Unlocked flake references with a lock are always
allowed. */
bool allowMutable = true;
bool allowUnlocked = true;
/* Whether to commit changes to flake.lock. */
bool commitLockFile = false;

View file

@ -35,7 +35,7 @@ typedef std::string FlakeId;
struct FlakeRef
{
/* fetcher-specific representation of the input, sufficient to
/* Fetcher-specific representation of the input, sufficient to
perform the fetch operation. */
fetchers::Input input;

View file

@ -31,7 +31,7 @@ FlakeRef getFlakeRef(
}
LockedNode::LockedNode(const nlohmann::json & json)
: lockedRef(getFlakeRef(json, "locked", "info"))
: lockedRef(getFlakeRef(json, "locked", "info")) // FIXME: remove "info"
, originalRef(getFlakeRef(json, "original", nullptr))
, isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true)
{
@ -49,15 +49,15 @@ std::shared_ptr<Node> LockFile::findInput(const InputPath & path)
{
auto pos = root;
if (!pos) return {};
for (auto & elem : path) {
if (auto i = get(pos->inputs, elem)) {
if (auto node = std::get_if<0>(&*i))
pos = *node;
else if (auto follows = std::get_if<1>(&*i)) {
pos = findInput(*follows);
if (!pos) return {};
if (auto p = findInput(*follows))
pos = ref(p);
else
return {};
}
} else
return {};
@ -72,7 +72,7 @@ LockFile::LockFile(const nlohmann::json & json, const Path & path)
if (version < 5 || version > 7)
throw Error("lock file '%s' has unsupported version %d", path, version);
std::unordered_map<std::string, std::shared_ptr<Node>> nodeMap;
std::map<std::string, ref<Node>> nodeMap;
std::function<void(Node & node, const nlohmann::json & jsonNode)> getInputs;
@ -93,12 +93,12 @@ LockFile::LockFile(const nlohmann::json & json, const Path & path)
auto jsonNode2 = nodes.find(inputKey);
if (jsonNode2 == nodes.end())
throw Error("lock file references missing node '%s'", inputKey);
auto input = std::make_shared<LockedNode>(*jsonNode2);
auto input = make_ref<LockedNode>(*jsonNode2);
k = nodeMap.insert_or_assign(inputKey, input).first;
getInputs(*input, *jsonNode2);
}
if (auto child = std::dynamic_pointer_cast<LockedNode>(k->second))
node.inputs.insert_or_assign(i.key(), child);
if (auto child = k->second.dynamic_pointer_cast<LockedNode>())
node.inputs.insert_or_assign(i.key(), ref(child));
else
// FIXME: replace by follows node
throw Error("lock file contains cycle to root node");
@ -122,9 +122,9 @@ nlohmann::json LockFile::toJSON() const
std::unordered_map<std::shared_ptr<const Node>, std::string> nodeKeys;
std::unordered_set<std::string> keys;
std::function<std::string(const std::string & key, std::shared_ptr<const Node> node)> dumpNode;
std::function<std::string(const std::string & key, ref<const Node> node)> dumpNode;
dumpNode = [&](std::string key, std::shared_ptr<const Node> node) -> std::string
dumpNode = [&](std::string key, ref<const Node> node) -> std::string
{
auto k = nodeKeys.find(node);
if (k != nodeKeys.end())
@ -159,10 +159,11 @@ nlohmann::json LockFile::toJSON() const
n["inputs"] = std::move(inputs);
}
if (auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(node)) {
if (auto lockedNode = node.dynamic_pointer_cast<const LockedNode>()) {
n["original"] = fetchers::attrsToJSON(lockedNode->originalRef.toAttrs());
n["locked"] = fetchers::attrsToJSON(lockedNode->lockedRef.toAttrs());
if (!lockedNode->isFlake) n["flake"] = false;
if (!lockedNode->isFlake)
n["flake"] = false;
}
nodes[key] = std::move(n);
@ -201,13 +202,13 @@ void LockFile::write(const Path & path) const
writeFile(path, fmt("%s\n", *this));
}
bool LockFile::isImmutable() const
std::optional<FlakeRef> LockFile::isUnlocked() const
{
std::unordered_set<std::shared_ptr<const Node>> nodes;
std::set<ref<const Node>> nodes;
std::function<void(std::shared_ptr<const Node> node)> visit;
std::function<void(ref<const Node> node)> visit;
visit = [&](std::shared_ptr<const Node> node)
visit = [&](ref<const Node> node)
{
if (!nodes.insert(node).second) return;
for (auto & i : node->inputs)
@ -219,11 +220,12 @@ bool LockFile::isImmutable() const
for (auto & i : nodes) {
if (i == root) continue;
auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(i);
if (lockedNode && !lockedNode->lockedRef.input.isLocked()) return false;
auto node = i.dynamic_pointer_cast<const LockedNode>();
if (node && !node->lockedRef.input.isLocked())
return node->lockedRef;
}
return true;
return {};
}
bool LockFile::operator ==(const LockFile & other) const
@ -247,12 +249,12 @@ InputPath parseInputPath(std::string_view s)
std::map<InputPath, Node::Edge> LockFile::getAllInputs() const
{
std::unordered_set<std::shared_ptr<Node>> done;
std::set<ref<Node>> done;
std::map<InputPath, Node::Edge> res;
std::function<void(const InputPath & prefix, std::shared_ptr<Node> node)> recurse;
std::function<void(const InputPath & prefix, ref<Node> node)> recurse;
recurse = [&](const InputPath & prefix, std::shared_ptr<Node> node)
recurse = [&](const InputPath & prefix, ref<Node> node)
{
if (!done.insert(node).second) return;

View file

@ -20,7 +20,7 @@ struct LockedNode;
type LockedNode. */
struct Node : std::enable_shared_from_this<Node>
{
typedef std::variant<std::shared_ptr<LockedNode>, InputPath> Edge;
typedef std::variant<ref<LockedNode>, InputPath> Edge;
std::map<FlakeId, Edge> inputs;
@ -47,11 +47,13 @@ struct LockedNode : Node
struct LockFile
{
std::shared_ptr<Node> root = std::make_shared<Node>();
ref<Node> root = make_ref<Node>();
LockFile() {};
LockFile(const nlohmann::json & json, const Path & path);
typedef std::map<ref<const Node>, std::string> KeyMap;
nlohmann::json toJSON() const;
std::string to_string() const;
@ -60,7 +62,8 @@ struct LockFile
void write(const Path & path) const;
bool isImmutable() const;
/* Check whether this lock file has any unlocked inputs. */
std::optional<FlakeRef> isUnlocked() const;
bool operator ==(const LockFile & other) const;

View file

@ -150,7 +150,7 @@ DrvInfo::Outputs DrvInfo::queryOutputs(bool withPaths, bool onlyOutputsToInstall
/* Check for `meta.outputsToInstall` and return `outputs` reduced to that. */
const Value * outTI = queryMeta("outputsToInstall");
if (!outTI) return outputs;
const auto errMsg = Error("this derivation has bad 'meta.outputsToInstall'");
auto errMsg = Error("this derivation has bad 'meta.outputsToInstall'");
/* ^ this shows during `nix-env -i` right under the bad derivation */
if (!outTI->isList()) throw errMsg;
Outputs result;

View file

@ -8,6 +8,58 @@
namespace nix {
struct PosAdapter : AbstractPos
{
Pos::Origin origin;
PosAdapter(Pos::Origin origin)
: origin(std::move(origin))
{
}
std::optional<std::string> getSource() const override
{
return std::visit(overloaded {
[](const Pos::none_tag &) -> std::optional<std::string> {
return std::nullopt;
},
[](const Pos::Stdin & s) -> std::optional<std::string> {
// Get rid of the null terminators added by the parser.
return std::string(s.source->c_str());
},
[](const Pos::String & s) -> std::optional<std::string> {
// Get rid of the null terminators added by the parser.
return std::string(s.source->c_str());
},
[](const Path & path) -> std::optional<std::string> {
try {
return readFile(path);
} catch (Error &) {
return std::nullopt;
}
}
}, origin);
}
void print(std::ostream & out) const override
{
std::visit(overloaded {
[&](const Pos::none_tag &) { out << "«none»"; },
[&](const Pos::Stdin &) { out << "«stdin»"; },
[&](const Pos::String & s) { out << "«string»"; },
[&](const Path & path) { out << path; }
}, origin);
}
};
Pos::operator std::shared_ptr<AbstractPos>() const
{
auto pos = std::make_shared<PosAdapter>(origin);
pos->line = line;
pos->column = column;
return pos;
}
/* Displaying abstract syntax trees. */
static void showString(std::ostream & str, std::string_view s)
@ -248,24 +300,10 @@ void ExprPos::show(const SymbolTable & symbols, std::ostream & str) const
std::ostream & operator << (std::ostream & str, const Pos & pos)
{
if (!pos)
if (auto pos2 = (std::shared_ptr<AbstractPos>) pos) {
str << *pos2;
} else
str << "undefined position";
else
{
auto f = format(ANSI_BOLD "%1%" ANSI_NORMAL ":%2%:%3%");
switch (pos.origin) {
case foFile:
f % (const std::string &) pos.file;
break;
case foStdin:
case foString:
f % "(string)";
break;
default:
throw Error("unhandled Pos origin!");
}
str << (f % pos.line % pos.column).str();
}
return str;
}
@ -289,7 +327,6 @@ std::string showAttrPath(const SymbolTable & symbols, const AttrPath & attrPath)
}
/* Computing levels/displacements for variables. */
void Expr::bindVars(EvalState & es, const std::shared_ptr<const StaticEnv> & env)

View file

@ -22,15 +22,22 @@ MakeError(MissingArgumentError, EvalError);
MakeError(RestrictedPathError, Error);
/* Position objects. */
struct Pos
{
std::string file;
FileOrigin origin;
uint32_t line;
uint32_t column;
struct none_tag { };
struct Stdin { ref<std::string> source; };
struct String { ref<std::string> source; };
typedef std::variant<none_tag, Stdin, String, Path> Origin;
Origin origin;
explicit operator bool() const { return line > 0; }
operator std::shared_ptr<AbstractPos>() const;
};
class PosIdx {
@ -46,7 +53,11 @@ public:
explicit operator bool() const { return id > 0; }
bool operator<(const PosIdx other) const { return id < other.id; }
bool operator <(const PosIdx other) const { return id < other.id; }
bool operator ==(const PosIdx other) const { return id == other.id; }
bool operator !=(const PosIdx other) const { return id != other.id; }
};
class PosTable
@ -60,13 +71,13 @@ public:
// current origins.back() can be reused or not.
mutable uint32_t idx = std::numeric_limits<uint32_t>::max();
explicit Origin(uint32_t idx): idx(idx), file{}, origin{} {}
// Used for searching in PosTable::[].
explicit Origin(uint32_t idx): idx(idx), origin{Pos::none_tag()} {}
public:
const std::string file;
const FileOrigin origin;
const Pos::Origin origin;
Origin(std::string file, FileOrigin origin): file(std::move(file)), origin(origin) {}
Origin(Pos::Origin origin): origin(origin) {}
};
struct Offset {
@ -106,7 +117,7 @@ public:
[] (const auto & a, const auto & b) { return a.idx < b.idx; });
const auto origin = *std::prev(pastOrigin);
const auto offset = offsets[idx];
return {origin.file, origin.origin, offset.line, offset.column};
return {offset.line, offset.column, origin.origin};
}
};

View file

@ -34,11 +34,6 @@ namespace nix {
Path basePath;
PosTable::Origin origin;
std::optional<ErrorInfo> error;
ParseData(EvalState & state, PosTable::Origin origin)
: state(state)
, symbols(state.symbols)
, origin(std::move(origin))
{ };
};
struct ParserFormals {
@ -643,29 +638,26 @@ formal
#include "filetransfer.hh"
#include "fetchers.hh"
#include "store-api.hh"
#include "flake/flake.hh"
namespace nix {
Expr * EvalState::parse(char * text, size_t length, FileOrigin origin,
const PathView path, const PathView basePath, std::shared_ptr<StaticEnv> & staticEnv)
Expr * EvalState::parse(
char * text,
size_t length,
Pos::Origin origin,
Path basePath,
std::shared_ptr<StaticEnv> & staticEnv)
{
yyscan_t scanner;
std::string file;
switch (origin) {
case foFile:
file = path;
break;
case foStdin:
case foString:
file = text;
break;
default:
assert(false);
}
ParseData data(*this, {file, origin});
data.basePath = basePath;
ParseData data {
.state = *this,
.symbols = symbols,
.basePath = std::move(basePath),
.origin = {origin},
};
yylex_init(&scanner);
yy_scan_buffer(text, length, scanner);
@ -717,14 +709,15 @@ Expr * EvalState::parseExprFromFile(const Path & path, std::shared_ptr<StaticEnv
auto buffer = readFile(path);
// readFile should have left some extra space for terminators
buffer.append("\0\0", 2);
return parse(buffer.data(), buffer.size(), foFile, path, dirOf(path), staticEnv);
return parse(buffer.data(), buffer.size(), path, dirOf(path), staticEnv);
}
Expr * EvalState::parseExprFromString(std::string s, const Path & basePath, std::shared_ptr<StaticEnv> & staticEnv)
Expr * EvalState::parseExprFromString(std::string s_, const Path & basePath, std::shared_ptr<StaticEnv> & staticEnv)
{
s.append("\0\0", 2);
return parse(s.data(), s.size(), foString, "", basePath, staticEnv);
auto s = make_ref<std::string>(std::move(s_));
s->append("\0\0", 2);
return parse(s->data(), s->size(), Pos::String{.source = s}, basePath, staticEnv);
}
@ -740,7 +733,8 @@ Expr * EvalState::parseStdin()
auto buffer = drainFD(0);
// drainFD should have left some extra space for terminators
buffer.append("\0\0", 2);
return parse(buffer.data(), buffer.size(), foStdin, "", absPath("."), staticBaseEnv);
auto s = make_ref<std::string>(std::move(buffer));
return parse(s->data(), s->size(), Pos::Stdin{.source = s}, absPath("."), staticBaseEnv);
}
@ -805,17 +799,28 @@ std::pair<bool, std::string> EvalState::resolveSearchPathElem(const SearchPathEl
std::pair<bool, std::string> res;
if (isUri(elem.second)) {
if (EvalSettings::isPseudoUrl(elem.second)) {
try {
res = { true, store->toRealPath(fetchers::downloadTarball(
store, resolveUri(elem.second), "source", false).first.storePath) };
auto storePath = fetchers::downloadTarball(
store, EvalSettings::resolvePseudoUrl(elem.second), "source", false).first.storePath;
res = { true, store->toRealPath(storePath) };
} catch (FileTransferError & e) {
logWarning({
.msg = hintfmt("Nix search path entry '%1%' cannot be downloaded, ignoring", elem.second)
});
res = { false, "" };
}
} else {
}
else if (hasPrefix(elem.second, "flake:")) {
settings.requireExperimentalFeature(Xp::Flakes);
auto flakeRef = parseFlakeRef(elem.second.substr(6), {}, true, false);
debug("fetching flake search path element '%s''", elem.second);
auto storePath = flakeRef.resolve(store).fetchTree(store).first.storePath;
res = { true, store->toRealPath(storePath) };
}
else {
auto path = absPath(elem.second);
if (pathExists(path))
res = { true, path };

View file

@ -5,14 +5,15 @@
#include "globals.hh"
#include "json-to-value.hh"
#include "names.hh"
#include "references.hh"
#include "store-api.hh"
#include "util.hh"
#include "json.hh"
#include "value-to-json.hh"
#include "value-to-xml.hh"
#include "primops.hh"
#include <boost/container/small_vector.hpp>
#include <nlohmann/json.hpp>
#include <sys/types.h>
#include <sys/stat.h>
@ -361,8 +362,7 @@ void prim_exec(EvalState & state, const PosIdx pos, Value * * args, Value & v)
auto output = runProgram(program, true, commandArgs);
Expr * parsed;
try {
auto base = state.positions[pos];
parsed = state.parseExprFromString(std::move(output), base.file);
parsed = state.parseExprFromString(std::move(output), "/");
} catch (Error & e) {
e.addTrace(state.positions[pos], "while parsing the output from '%1%'", program);
throw;
@ -585,7 +585,7 @@ struct CompareValues
state.error("cannot compare %s with %s; values of that type are incomparable", showType(*v1), showType(*v2)).debugThrow<EvalError>();
}
} catch (Error & e) {
e.addTrace(std::nullopt, errorCtx);
e.addTrace(nullptr, errorCtx);
throw;
}
}
@ -788,8 +788,8 @@ static void prim_addErrorContext(EvalState & state, const PosIdx pos, Value * *
v = *args[1];
} catch (Error & e) {
PathSet context;
e.addTrace(std::nullopt, state.coerceToString(pos, *args[0], context,
"while evaluating the error message passed to builtins.addErrorContext").toOwned());
e.addTrace(nullptr, state.coerceToString(pos, *args[0], context,
"while evaluating the error message passed to builtins.addErrorContext").toOwned());
throw;
}
}
@ -1003,6 +1003,7 @@ static void prim_second(EvalState & state, const PosIdx pos, Value * * args, Val
derivation. */
static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
using nlohmann::json;
state.forceAttrs(*args[0], pos, "while evaluating the argument passed to builtins.derivationStrict");
/* Figure out the name first (for stack backtraces). */
@ -1018,11 +1019,10 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * *
}
/* Check whether attributes should be passed as a JSON file. */
std::ostringstream jsonBuf;
std::unique_ptr<JSONObject> jsonObject;
std::optional<json> jsonObject;
attr = args[0]->attrs->find(state.sStructuredAttrs);
if (attr != args[0]->attrs->end() && state.forceBool(*attr->value, pos, "while evaluating the `__structuredAttrs` attribute passed to builtins.derivationStrict"))
jsonObject = std::make_unique<JSONObject>(jsonBuf);
jsonObject = json::object();
/* Check whether null attributes should be ignored. */
bool ignoreNulls = false;
@ -1128,8 +1128,7 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * *
if (i->name == state.sStructuredAttrs) continue;
auto placeholder(jsonObject->placeholder(key));
printValueAsJSON(state, true, *i->value, pos, placeholder, context);
(*jsonObject)[key] = printValueAsJSON(state, true, *i->value, pos, context);
if (i->name == state.sBuilder)
drv.builder = state.forceString(*i->value, context, posDrvName, "while evaluating the `builder` attribute passed to builtins.derivationStrict");
@ -1173,8 +1172,8 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * *
}
if (jsonObject) {
drv.env.emplace("__json", jsonObject->dump());
jsonObject.reset();
drv.env.emplace("__json", jsonBuf.str());
}
/* Everything in the context of the strings in the derivation
@ -1452,10 +1451,10 @@ static RegisterPrimOp primop_storePath({
static void prim_pathExists(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
/* We dont check the path right now, because we dont want to
throw if the path isnt allowed, but just return false (and we
cant just catch the exception here because we still want to
throw if something in the evaluation of `*args[0]` tries to
access an unauthorized path). */
throw if the path isnt allowed, but just return false (and we
cant just catch the exception here because we still want to
throw if something in the evaluation of `*args[0]` tries to
access an unauthorized path). */
auto path = realisePath(state, pos, *args[0], { .checkForPureEval = false });
try {
@ -1533,6 +1532,10 @@ static void prim_readFile(EvalState & state, const PosIdx pos, Value * * args, V
refs = state.store->queryPathInfo(state.store->toStorePath(path).first)->references;
} catch (Error &) { // FIXME: should be InvalidPathError
}
// Re-scan references to filter down to just the ones that actually occur in the file.
auto refsSink = PathRefScanSink::fromPaths(refs);
refsSink << s;
refs = refsSink.getResultPaths();
}
auto context = state.store->printStorePathSet(refs);
v.mkString(s, context);
@ -1913,8 +1916,8 @@ static RegisterPrimOp primop_toFile({
";
```
Note that `${configFile}` is an
[antiquotation](language-values.md), so the result of the
Note that `${configFile}` is a
[string interpolation](language/values.md#type-string), so the result of the
expression `configFile`
(i.e., a path like `/nix/store/m7p7jfny445k...-foo.conf`) will be
spliced into the resulting string.
@ -2379,12 +2382,18 @@ static RegisterPrimOp primop_listToAttrs({
Construct a set from a list specifying the names and values of each
attribute. Each element of the list should be a set consisting of a
string-valued attribute `name` specifying the name of the attribute,
and an attribute `value` specifying its value. Example:
and an attribute `value` specifying its value.
In case of duplicate occurrences of the same name, the first
takes precedence.
Example:
```nix
builtins.listToAttrs
[ { name = "foo"; value = 123; }
{ name = "bar"; value = 456; }
{ name = "bar"; value = 420; }
]
```
@ -2402,12 +2411,62 @@ static void prim_intersectAttrs(EvalState & state, const PosIdx pos, Value * * a
state.forceAttrs(*args[0], pos, "while evaluating the first argument passed to builtins.intersectAttrs");
state.forceAttrs(*args[1], pos, "while evaluating the second argument passed to builtins.intersectAttrs");
auto attrs = state.buildBindings(std::min(args[0]->attrs->size(), args[1]->attrs->size()));
Bindings &left = *args[0]->attrs;
Bindings &right = *args[1]->attrs;
for (auto & i : *args[0]->attrs) {
Bindings::iterator j = args[1]->attrs->find(i.name);
if (j != args[1]->attrs->end())
attrs.insert(*j);
auto attrs = state.buildBindings(std::min(left.size(), right.size()));
// The current implementation has good asymptotic complexity and is reasonably
// simple. Further optimization may be possible, but does not seem productive,
// considering the state of eval performance in 2022.
//
// I have looked for reusable and/or standard solutions and these are my
// findings:
//
// STL
// ===
// std::set_intersection is not suitable, as it only performs a simultaneous
// linear scan; not taking advantage of random access. This is O(n + m), so
// linear in the largest set, which is not acceptable for callPackage in Nixpkgs.
//
// Simultaneous scan, with alternating simple binary search
// ===
// One alternative algorithm scans the attrsets simultaneously, jumping
// forward using `lower_bound` in case of inequality. This should perform
// well on very similar sets, having a local and predictable access pattern.
// On dissimilar sets, it seems to need more comparisons than the current
// algorithm, as few consecutive attrs match. `lower_bound` could take
// advantage of the decreasing remaining search space, but this causes
// the medians to move, which can mean that they don't stay in the cache
// like they would with the current naive `find`.
//
// Double binary search
// ===
// The optimal algorithm may be "Double binary search", which doesn't
// scan at all, but rather divides both sets simultaneously.
// See "Fast Intersection Algorithms for Sorted Sequences" by Baeza-Yates et al.
// https://cs.uwaterloo.ca/~ajsaling/papers/intersection_alg_app10.pdf
// The only downsides I can think of are not having a linear access pattern
// for similar sets, and having to maintain a more intricate algorithm.
//
// Adaptive
// ===
// Finally one could run try a simultaneous scan, count misses and fall back
// to double binary search when the counter hit some threshold and/or ratio.
if (left.size() < right.size()) {
for (auto & l : left) {
Bindings::iterator r = right.find(l.name);
if (r != right.end())
attrs.insert(*r);
}
}
else {
for (auto & r : right) {
Bindings::iterator l = left.find(r.name);
if (l != left.end())
attrs.insert(r);
}
}
v.mkAttrs(attrs.alreadySorted());
@ -2419,6 +2478,8 @@ static RegisterPrimOp primop_intersectAttrs({
.doc = R"(
Return a set consisting of the attributes in the set *e2* which have the
same name as some attribute in *e1*.
Performs in O(*n* log *m*) where *n* is the size of the smaller set and *m* the larger set's size.
)",
.fun = prim_intersectAttrs,
});
@ -3999,7 +4060,7 @@ void EvalState::createBaseEnv()
// the parser needs two NUL bytes as terminators; one of them
// is implied by being a C string.
"\0";
eval(parse(code, sizeof(code), foFile, derivationNixPath, "/", staticBaseEnv), *vDerivation);
eval(parse(code, sizeof(code), derivationNixPath, "/", staticBaseEnv), *vDerivation);
}

View file

@ -218,8 +218,6 @@ static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v
} else
url = state.forceStringNoCtx(*args[0], pos, "while evaluating the url we should fetch");
url = resolveUri(*url);
state.checkURI(*url);
if (name == "")

View file

@ -12,6 +12,7 @@ namespace nix {
class LibExprTest : public ::testing::Test {
public:
static void SetUpTestSuite() {
initLibStore();
initGC();
}
@ -123,7 +124,7 @@ namespace nix {
MATCHER_P(IsAttrsOfSize, n, fmt("Is a set of size [%1%]", n)) {
if (arg.type() != nAttrs) {
*result_listener << "Expexted set got " << arg.type();
*result_listener << "Expected set got " << arg.type();
return false;
} else if (arg.attrs->size() != (size_t)n) {
*result_listener << "Expected a set with " << n << " attributes but got " << arg.attrs->size();

View file

@ -151,20 +151,7 @@ namespace nix {
// The `y` attribute is at position
const char* expr = "builtins.unsafeGetAttrPos \"y\" { y = \"x\"; }";
auto v = eval(expr);
ASSERT_THAT(v, IsAttrsOfSize(3));
auto file = v.attrs->find(createSymbol("file"));
ASSERT_NE(file, nullptr);
// FIXME: The file when running these tests is the input string?!?
ASSERT_THAT(*file->value, IsStringEq(expr));
auto line = v.attrs->find(createSymbol("line"));
ASSERT_NE(line, nullptr);
ASSERT_THAT(*line->value, IsIntEq(1));
auto column = v.attrs->find(createSymbol("column"));
ASSERT_NE(column, nullptr);
ASSERT_THAT(*column->value, IsIntEq(33));
ASSERT_THAT(v, IsNull());
}
TEST_F(PrimOpTest, hasAttr) {
@ -617,7 +604,7 @@ namespace nix {
TEST_F(PrimOpTest, storeDir) {
auto v = eval("builtins.storeDir");
ASSERT_THAT(v, IsStringEq("/nix/store"));
ASSERT_THAT(v, IsStringEq(settings.nixStore));
}
TEST_F(PrimOpTest, nixVersion) {

View file

@ -1,84 +1,82 @@
#include "value-to-json.hh"
#include "json.hh"
#include "eval-inline.hh"
#include "util.hh"
#include <cstdlib>
#include <iomanip>
#include <nlohmann/json.hpp>
namespace nix {
void printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, JSONPlaceholder & out, PathSet & context, bool copyToStore)
using json = nlohmann::json;
json printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, PathSet & context, bool copyToStore)
{
checkInterrupt();
if (strict) state.forceValue(v, pos);
json out;
switch (v.type()) {
case nInt:
out.write(v.integer);
out = v.integer;
break;
case nBool:
out.write(v.boolean);
out = v.boolean;
break;
case nString:
copyContext(v, context);
out.write(v.string.s);
out = v.string.s;
break;
case nPath:
if (copyToStore)
out.write(state.copyPathToStore(context, v.path));
out = state.copyPathToStore(context, v.path);
else
out.write(v.path);
out = v.path;
break;
case nNull:
out.write(nullptr);
break;
case nAttrs: {
auto maybeString = state.tryAttrsToString(pos, v, context, false, false);
if (maybeString) {
out.write(*maybeString);
out = *maybeString;
break;
}
auto i = v.attrs->find(state.sOutPath);
if (i == v.attrs->end()) {
auto obj(out.object());
out = json::object();
StringSet names;
for (auto & j : *v.attrs)
names.emplace(state.symbols[j.name]);
for (auto & j : names) {
Attr & a(*v.attrs->find(state.symbols.create(j)));
auto placeholder(obj.placeholder(j));
printValueAsJSON(state, strict, *a.value, a.pos, placeholder, context, copyToStore);
out[j] = printValueAsJSON(state, strict, *a.value, a.pos, context, copyToStore);
}
} else
printValueAsJSON(state, strict, *i->value, i->pos, out, context, copyToStore);
return printValueAsJSON(state, strict, *i->value, i->pos, context, copyToStore);
break;
}
case nList: {
auto list(out.list());
for (auto elem : v.listItems()) {
auto placeholder(list.placeholder());
printValueAsJSON(state, strict, *elem, pos, placeholder, context, copyToStore);
}
out = json::array();
for (auto elem : v.listItems())
out.push_back(printValueAsJSON(state, strict, *elem, pos, context, copyToStore));
break;
}
case nExternal:
v.external->printValueAsJSON(state, strict, out, context, copyToStore);
return v.external->printValueAsJSON(state, strict, context, copyToStore);
break;
case nFloat:
out.write(v.fpoint);
out = v.fpoint;
break;
case nThunk:
@ -91,17 +89,17 @@ void printValueAsJSON(EvalState & state, bool strict,
state.debugThrowLastTrace(e);
throw e;
}
return out;
}
void printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore)
{
JSONPlaceholder out(str);
printValueAsJSON(state, strict, v, pos, out, context, copyToStore);
str << printValueAsJSON(state, strict, v, pos, context, copyToStore);
}
void ExternalValueBase::printValueAsJSON(EvalState & state, bool strict,
JSONPlaceholder & out, PathSet & context, bool copyToStore) const
json ExternalValueBase::printValueAsJSON(EvalState & state, bool strict,
PathSet & context, bool copyToStore) const
{
state.debugThrowLastTrace(TypeError("cannot convert %1% to JSON", showType()));
}

View file

@ -5,13 +5,12 @@
#include <string>
#include <map>
#include <nlohmann/json_fwd.hpp>
namespace nix {
class JSONPlaceholder;
void printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, JSONPlaceholder & out, PathSet & context, bool copyToStore = true);
nlohmann::json printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, PathSet & context, bool copyToStore = true);
void printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore = true);

View file

@ -24,7 +24,8 @@ static void printValueAsXML(EvalState & state, bool strict, bool location,
static void posToXML(EvalState & state, XMLAttrs & xmlAttrs, const Pos & pos)
{
xmlAttrs["path"] = pos.file;
if (auto path = std::get_if<Path>(&pos.origin))
xmlAttrs["path"] = *path;
xmlAttrs["line"] = (format("%1%") % pos.line).str();
xmlAttrs["column"] = (format("%1%") % pos.column).str();
}

View file

@ -7,6 +7,7 @@
#if HAVE_BOEHMGC
#include <gc/gc_allocator.h>
#endif
#include <nlohmann/json_fwd.hpp>
namespace nix {
@ -62,7 +63,6 @@ class StorePath;
class Store;
class EvalState;
class XMLWriter;
class JSONPlaceholder;
typedef int64_t NixInt;
@ -98,8 +98,8 @@ class ExternalValueBase
virtual bool operator ==(const ExternalValueBase & b) const;
/* Print the value as JSON. Defaults to unconvertable, i.e. throws an error */
virtual void printValueAsJSON(EvalState & state, bool strict,
JSONPlaceholder & out, PathSet & context, bool copyToStore = true) const;
virtual nlohmann::json printValueAsJSON(EvalState & state, bool strict,
PathSet & context, bool copyToStore = true) const;
/* Print the value as XML. Defaults to unevaluated */
virtual void printValueAsXML(EvalState & state, bool strict, bool location,

View file

@ -71,7 +71,12 @@ struct FetchSettings : public Config
"Whether to warn about dirty Git/Mercurial trees."};
Setting<std::string> flakeRegistry{this, "https://channels.nixos.org/flake-registry.json", "flake-registry",
"Path or URI of the global flake registry."};
R"(
Path or URI of the global flake registry.
When empty, disables the global flake registry.
)"};
Setting<bool> useRegistries{this, true, "use-registries",
"Whether to use flake registries to resolve flake references."};

View file

@ -266,7 +266,7 @@ std::optional<time_t> Input::getLastModified() const
return {};
}
ParsedURL InputScheme::toURL(const Input & input)
ParsedURL InputScheme::toURL(const Input & input) const
{
throw Error("don't know how to convert input '%s' to a URL", attrsToJSON(input.attrs));
}
@ -274,7 +274,7 @@ ParsedURL InputScheme::toURL(const Input & input)
Input InputScheme::applyOverrides(
const Input & input,
std::optional<std::string> ref,
std::optional<Hash> rev)
std::optional<Hash> rev) const
{
if (ref)
throw Error("don't know how to set branch/tag name of input '%s' to '%s'", input.to_string(), *ref);
@ -293,7 +293,7 @@ void InputScheme::markChangedFile(const Input & input, std::string_view file, st
assert(false);
}
void InputScheme::clone(const Input & input, const Path & destDir)
void InputScheme::clone(const Input & input, const Path & destDir) const
{
throw Error("do not know how to clone input '%s'", input.to_string());
}

View file

@ -107,26 +107,25 @@ public:
* recognized. The Input object contains the information the fetcher
* needs to actually perform the "fetch()" when called.
*/
struct InputScheme
{
virtual ~InputScheme()
{ }
virtual std::optional<Input> inputFromURL(const ParsedURL & url) = 0;
virtual std::optional<Input> inputFromURL(const ParsedURL & url) const = 0;
virtual std::optional<Input> inputFromAttrs(const Attrs & attrs) = 0;
virtual std::optional<Input> inputFromAttrs(const Attrs & attrs) const = 0;
virtual ParsedURL toURL(const Input & input);
virtual ParsedURL toURL(const Input & input) const;
virtual bool hasAllInfo(const Input & input) = 0;
virtual bool hasAllInfo(const Input & input) const = 0;
virtual Input applyOverrides(
const Input & input,
std::optional<std::string> ref,
std::optional<Hash> rev);
std::optional<Hash> rev) const;
virtual void clone(const Input & input, const Path & destDir);
virtual void clone(const Input & input, const Path & destDir) const;
virtual std::optional<Path> getSourcePath(const Input & input);

View file

@ -18,6 +18,7 @@
using namespace std::string_literals;
namespace nix::fetchers {
namespace {
// Explicit initial branch of our bare repo to suppress warnings from new version of git.
@ -26,23 +27,23 @@ namespace {
// old version of git, which will ignore unrecognized `-c` options.
const std::string gitInitialBranch = "__nix_dummy_branch";
bool isCacheFileWithinTtl(const time_t now, const struct stat & st)
bool isCacheFileWithinTtl(time_t now, const struct stat & st)
{
return st.st_mtime + settings.tarballTtl > now;
}
bool touchCacheFile(const Path& path, const time_t& touch_time)
bool touchCacheFile(const Path & path, time_t touch_time)
{
struct timeval times[2];
times[0].tv_sec = touch_time;
times[0].tv_usec = 0;
times[1].tv_sec = touch_time;
times[1].tv_usec = 0;
struct timeval times[2];
times[0].tv_sec = touch_time;
times[0].tv_usec = 0;
times[1].tv_sec = touch_time;
times[1].tv_usec = 0;
return lutimes(path.c_str(), times) == 0;
return lutimes(path.c_str(), times) == 0;
}
Path getCachePath(std::string key)
Path getCachePath(std::string_view key)
{
return getCacheDir() + "/nix/gitv3/" +
hashString(htSHA256, key).to_string(Base32, false);
@ -57,13 +58,12 @@ Path getCachePath(std::string key)
// ...
std::optional<std::string> readHead(const Path & path)
{
auto [exit_code, output] = runProgram(RunOptions {
auto [status, output] = runProgram(RunOptions {
.program = "git",
// FIXME: use 'HEAD' to avoid returning all refs
.args = {"ls-remote", "--symref", path},
});
if (exit_code != 0) {
return std::nullopt;
}
if (status != 0) return std::nullopt;
std::string_view line = output;
line = line.substr(0, line.find("\n"));
@ -82,12 +82,11 @@ std::optional<std::string> readHead(const Path & path)
}
// Persist the HEAD ref from the remote repo in the local cached repo.
bool storeCachedHead(const std::string& actualUrl, const std::string& headRef)
bool storeCachedHead(const std::string & actualUrl, const std::string & headRef)
{
Path cacheDir = getCachePath(actualUrl);
auto gitDir = ".";
try {
runProgram("git", true, { "-C", cacheDir, "--git-dir", gitDir, "symbolic-ref", "--", "HEAD", headRef });
runProgram("git", true, { "-C", cacheDir, "--git-dir", ".", "symbolic-ref", "--", "HEAD", headRef });
} catch (ExecError &e) {
if (!WIFEXITED(e.status)) throw;
return false;
@ -96,7 +95,7 @@ bool storeCachedHead(const std::string& actualUrl, const std::string& headRef)
return true;
}
std::optional<std::string> readHeadCached(const std::string& actualUrl)
std::optional<std::string> readHeadCached(const std::string & actualUrl)
{
// Create a cache path to store the branch of the HEAD ref. Append something
// in front of the URL to prevent collision with the repository itself.
@ -110,16 +109,15 @@ std::optional<std::string> readHeadCached(const std::string& actualUrl)
cachedRef = readHead(cacheDir);
if (cachedRef != std::nullopt &&
*cachedRef != gitInitialBranch &&
isCacheFileWithinTtl(now, st)) {
isCacheFileWithinTtl(now, st))
{
debug("using cached HEAD ref '%s' for repo '%s'", *cachedRef, actualUrl);
return cachedRef;
}
}
auto ref = readHead(actualUrl);
if (ref) {
return ref;
}
if (ref) return ref;
if (cachedRef) {
// If the cached git ref is expired in fetch() below, and the 'git fetch'
@ -250,7 +248,7 @@ std::pair<StorePath, Input> fetchFromWorkdir(ref<Store> store, Input & input, co
struct GitInputScheme : InputScheme
{
std::optional<Input> inputFromURL(const ParsedURL & url) override
std::optional<Input> inputFromURL(const ParsedURL & url) const override
{
if (url.scheme != "git" &&
url.scheme != "git+http" &&
@ -265,7 +263,7 @@ struct GitInputScheme : InputScheme
Attrs attrs;
attrs.emplace("type", "git");
for (auto &[name, value] : url.query) {
for (auto & [name, value] : url.query) {
if (name == "rev" || name == "ref")
attrs.emplace(name, value);
else if (name == "shallow" || name == "submodules")
@ -279,7 +277,7 @@ struct GitInputScheme : InputScheme
return inputFromAttrs(attrs);
}
std::optional<Input> inputFromAttrs(const Attrs & attrs) override
std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{
if (maybeGetStrAttr(attrs, "type") != "git") return {};
@ -302,7 +300,7 @@ struct GitInputScheme : InputScheme
return input;
}
ParsedURL toURL(const Input & input) override
ParsedURL toURL(const Input & input) const override
{
auto url = parseURL(getStrAttr(input.attrs, "url"));
if (url.scheme != "git") url.scheme = "git+" + url.scheme;
@ -313,7 +311,7 @@ struct GitInputScheme : InputScheme
return url;
}
bool hasAllInfo(const Input & input) override
bool hasAllInfo(const Input & input) const override
{
bool maybeDirty = !input.getRef();
bool shallow = maybeGetBoolAttr(input.attrs, "shallow").value_or(false);
@ -325,7 +323,7 @@ struct GitInputScheme : InputScheme
Input applyOverrides(
const Input & input,
std::optional<std::string> ref,
std::optional<Hash> rev) override
std::optional<Hash> rev) const override
{
auto res(input);
if (rev) res.attrs.insert_or_assign("rev", rev->gitRev());
@ -335,7 +333,7 @@ struct GitInputScheme : InputScheme
return res;
}
void clone(const Input & input, const Path & destDir) override
void clone(const Input & input, const Path & destDir) const override
{
auto [isLocal, actualUrl] = getActualUrl(input);
@ -485,6 +483,10 @@ struct GitInputScheme : InputScheme
}
input.attrs.insert_or_assign("ref", *head);
unlockedAttrs.insert_or_assign("ref", *head);
} else {
if (!input.getRev()) {
unlockedAttrs.insert_or_assign("ref", input.getRef().value());
}
}
if (auto res = getCache()->lookup(store, unlockedAttrs)) {
@ -599,9 +601,9 @@ struct GitInputScheme : InputScheme
{
throw Error(
"Cannot find Git revision '%s' in ref '%s' of repository '%s'! "
"Please make sure that the " ANSI_BOLD "rev" ANSI_NORMAL " exists on the "
ANSI_BOLD "ref" ANSI_NORMAL " you've specified or add " ANSI_BOLD
"allRefs = true;" ANSI_NORMAL " to " ANSI_BOLD "fetchGit" ANSI_NORMAL ".",
"Please make sure that the " ANSI_BOLD "rev" ANSI_NORMAL " exists on the "
ANSI_BOLD "ref" ANSI_NORMAL " you've specified or add " ANSI_BOLD
"allRefs = true;" ANSI_NORMAL " to " ANSI_BOLD "fetchGit" ANSI_NORMAL ".",
input.getRev()->gitRev(),
*input.getRef(),
actualUrl

View file

@ -26,11 +26,11 @@ std::regex hostRegex(hostRegexS, std::regex::ECMAScript);
struct GitArchiveInputScheme : InputScheme
{
virtual std::string type() = 0;
virtual std::string type() const = 0;
virtual std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const = 0;
std::optional<Input> inputFromURL(const ParsedURL & url) override
std::optional<Input> inputFromURL(const ParsedURL & url) const override
{
if (url.scheme != type()) return {};
@ -100,7 +100,7 @@ struct GitArchiveInputScheme : InputScheme
return input;
}
std::optional<Input> inputFromAttrs(const Attrs & attrs) override
std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{
if (maybeGetStrAttr(attrs, "type") != type()) return {};
@ -116,7 +116,7 @@ struct GitArchiveInputScheme : InputScheme
return input;
}
ParsedURL toURL(const Input & input) override
ParsedURL toURL(const Input & input) const override
{
auto owner = getStrAttr(input.attrs, "owner");
auto repo = getStrAttr(input.attrs, "repo");
@ -132,7 +132,7 @@ struct GitArchiveInputScheme : InputScheme
};
}
bool hasAllInfo(const Input & input) override
bool hasAllInfo(const Input & input) const override
{
return input.getRev() && maybeGetIntAttr(input.attrs, "lastModified");
}
@ -140,7 +140,7 @@ struct GitArchiveInputScheme : InputScheme
Input applyOverrides(
const Input & _input,
std::optional<std::string> ref,
std::optional<Hash> rev) override
std::optional<Hash> rev) const override
{
auto input(_input);
if (rev && ref)
@ -227,7 +227,7 @@ struct GitArchiveInputScheme : InputScheme
struct GitHubInputScheme : GitArchiveInputScheme
{
std::string type() override { return "github"; }
std::string type() const override { return "github"; }
std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const override
{
@ -240,14 +240,29 @@ struct GitHubInputScheme : GitArchiveInputScheme
return std::pair<std::string, std::string>("Authorization", fmt("token %s", token));
}
std::string getHost(const Input & input) const
{
return maybeGetStrAttr(input.attrs, "host").value_or("github.com");
}
std::string getOwner(const Input & input) const
{
return getStrAttr(input.attrs, "owner");
}
std::string getRepo(const Input & input) const
{
return getStrAttr(input.attrs, "repo");
}
Hash getRevFromRef(nix::ref<Store> store, const Input & input) const override
{
auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com");
auto host = getHost(input);
auto url = fmt(
host == "github.com"
? "https://api.%s/repos/%s/%s/commits/%s"
: "https://%s/api/v3/repos/%s/%s/commits/%s",
host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), *input.getRef());
host, getOwner(input), getRepo(input), *input.getRef());
Headers headers = makeHeadersWithAuthTokens(host);
@ -262,25 +277,30 @@ struct GitHubInputScheme : GitArchiveInputScheme
DownloadUrl getDownloadUrl(const Input & input) const override
{
// FIXME: use regular /archive URLs instead? api.github.com
// might have stricter rate limits.
auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com");
auto url = fmt(
host == "github.com"
? "https://api.%s/repos/%s/%s/tarball/%s"
: "https://%s/api/v3/repos/%s/%s/tarball/%s",
host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"),
input.getRev()->to_string(Base16, false));
auto host = getHost(input);
Headers headers = makeHeadersWithAuthTokens(host);
// If we have no auth headers then we default to the public archive
// urls so we do not run into rate limits.
const auto urlFmt =
host != "github.com"
? "https://%s/api/v3/repos/%s/%s/tarball/%s"
: headers.empty()
? "https://%s/%s/%s/archive/%s.tar.gz"
: "https://api.%s/repos/%s/%s/tarball/%s";
const auto url = fmt(urlFmt, host, getOwner(input), getRepo(input),
input.getRev()->to_string(Base16, false));
return DownloadUrl { url, headers };
}
void clone(const Input & input, const Path & destDir) override
void clone(const Input & input, const Path & destDir) const override
{
auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com");
auto host = getHost(input);
Input::fromURL(fmt("git+https://%s/%s/%s.git",
host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo")))
host, getOwner(input), getRepo(input)))
.applyOverrides(input.getRef(), input.getRev())
.clone(destDir);
}
@ -288,7 +308,7 @@ struct GitHubInputScheme : GitArchiveInputScheme
struct GitLabInputScheme : GitArchiveInputScheme
{
std::string type() override { return "gitlab"; }
std::string type() const override { return "gitlab"; }
std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const override
{
@ -343,7 +363,7 @@ struct GitLabInputScheme : GitArchiveInputScheme
return DownloadUrl { url, headers };
}
void clone(const Input & input, const Path & destDir) override
void clone(const Input & input, const Path & destDir) const override
{
auto host = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com");
// FIXME: get username somewhere
@ -356,7 +376,7 @@ struct GitLabInputScheme : GitArchiveInputScheme
struct SourceHutInputScheme : GitArchiveInputScheme
{
std::string type() override { return "sourcehut"; }
std::string type() const override { return "sourcehut"; }
std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const override
{
@ -430,7 +450,7 @@ struct SourceHutInputScheme : GitArchiveInputScheme
return DownloadUrl { url, headers };
}
void clone(const Input & input, const Path & destDir) override
void clone(const Input & input, const Path & destDir) const override
{
auto host = maybeGetStrAttr(input.attrs, "host").value_or("git.sr.ht");
Input::fromURL(fmt("git+https://%s/%s/%s",

View file

@ -7,7 +7,7 @@ std::regex flakeRegex("[a-zA-Z][a-zA-Z0-9_-]*", std::regex::ECMAScript);
struct IndirectInputScheme : InputScheme
{
std::optional<Input> inputFromURL(const ParsedURL & url) override
std::optional<Input> inputFromURL(const ParsedURL & url) const override
{
if (url.scheme != "flake") return {};
@ -50,7 +50,7 @@ struct IndirectInputScheme : InputScheme
return input;
}
std::optional<Input> inputFromAttrs(const Attrs & attrs) override
std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{
if (maybeGetStrAttr(attrs, "type") != "indirect") return {};
@ -68,7 +68,7 @@ struct IndirectInputScheme : InputScheme
return input;
}
ParsedURL toURL(const Input & input) override
ParsedURL toURL(const Input & input) const override
{
ParsedURL url;
url.scheme = "flake";
@ -78,7 +78,7 @@ struct IndirectInputScheme : InputScheme
return url;
}
bool hasAllInfo(const Input & input) override
bool hasAllInfo(const Input & input) const override
{
return false;
}
@ -86,7 +86,7 @@ struct IndirectInputScheme : InputScheme
Input applyOverrides(
const Input & _input,
std::optional<std::string> ref,
std::optional<Hash> rev) override
std::optional<Hash> rev) const override
{
auto input(_input);
if (rev) input.attrs.insert_or_assign("rev", rev->gitRev());

View file

@ -43,7 +43,7 @@ static std::string runHg(const Strings & args, const std::optional<std::string>
struct MercurialInputScheme : InputScheme
{
std::optional<Input> inputFromURL(const ParsedURL & url) override
std::optional<Input> inputFromURL(const ParsedURL & url) const override
{
if (url.scheme != "hg+http" &&
url.scheme != "hg+https" &&
@ -69,7 +69,7 @@ struct MercurialInputScheme : InputScheme
return inputFromAttrs(attrs);
}
std::optional<Input> inputFromAttrs(const Attrs & attrs) override
std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{
if (maybeGetStrAttr(attrs, "type") != "hg") return {};
@ -89,7 +89,7 @@ struct MercurialInputScheme : InputScheme
return input;
}
ParsedURL toURL(const Input & input) override
ParsedURL toURL(const Input & input) const override
{
auto url = parseURL(getStrAttr(input.attrs, "url"));
url.scheme = "hg+" + url.scheme;
@ -98,7 +98,7 @@ struct MercurialInputScheme : InputScheme
return url;
}
bool hasAllInfo(const Input & input) override
bool hasAllInfo(const Input & input) const override
{
// FIXME: ugly, need to distinguish between dirty and clean
// default trees.
@ -108,7 +108,7 @@ struct MercurialInputScheme : InputScheme
Input applyOverrides(
const Input & input,
std::optional<std::string> ref,
std::optional<Hash> rev) override
std::optional<Hash> rev) const override
{
auto res(input);
if (rev) res.attrs.insert_or_assign("rev", rev->gitRev());

View file

@ -6,7 +6,7 @@ namespace nix::fetchers {
struct PathInputScheme : InputScheme
{
std::optional<Input> inputFromURL(const ParsedURL & url) override
std::optional<Input> inputFromURL(const ParsedURL & url) const override
{
if (url.scheme != "path") return {};
@ -32,7 +32,7 @@ struct PathInputScheme : InputScheme
return input;
}
std::optional<Input> inputFromAttrs(const Attrs & attrs) override
std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{
if (maybeGetStrAttr(attrs, "type") != "path") return {};
@ -54,7 +54,7 @@ struct PathInputScheme : InputScheme
return input;
}
ParsedURL toURL(const Input & input) override
ParsedURL toURL(const Input & input) const override
{
auto query = attrsToQuery(input.attrs);
query.erase("path");
@ -66,7 +66,7 @@ struct PathInputScheme : InputScheme
};
}
bool hasAllInfo(const Input & input) override
bool hasAllInfo(const Input & input) const override
{
return true;
}

View file

@ -153,6 +153,9 @@ static std::shared_ptr<Registry> getGlobalRegistry(ref<Store> store)
{
static auto reg = [&]() {
auto path = fetchSettings.flakeRegistry.get();
if (path == "") {
return std::make_shared<Registry>(Registry::Global); // empty registry
}
if (!hasPrefix(path, "/")) {
auto storePath = downloadFile(store, path, "flake-registry.json", false).storePath;

View file

@ -185,7 +185,7 @@ struct CurlInputScheme : InputScheme
virtual bool isValidURL(const ParsedURL & url) const = 0;
std::optional<Input> inputFromURL(const ParsedURL & url) override
std::optional<Input> inputFromURL(const ParsedURL & url) const override
{
if (!isValidURL(url))
return std::nullopt;
@ -203,7 +203,7 @@ struct CurlInputScheme : InputScheme
return input;
}
std::optional<Input> inputFromAttrs(const Attrs & attrs) override
std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{
auto type = maybeGetStrAttr(attrs, "type");
if (type != inputType()) return {};
@ -220,16 +220,17 @@ struct CurlInputScheme : InputScheme
return input;
}
ParsedURL toURL(const Input & input) override
ParsedURL toURL(const Input & input) const override
{
auto url = parseURL(getStrAttr(input.attrs, "url"));
// NAR hashes are preferred over file hashes since tar/zip files // don't have a canonical representation.
// NAR hashes are preferred over file hashes since tar/zip
// files don't have a canonical representation.
if (auto narHash = input.getNarHash())
url.query.insert_or_assign("narHash", narHash->to_string(SRI, true));
return url;
}
bool hasAllInfo(const Input & input) override
bool hasAllInfo(const Input & input) const override
{
return true;
}

View file

@ -132,7 +132,7 @@ public:
log(*state, lvl, fs.s);
}
void logEI(const ErrorInfo &ei) override
void logEI(const ErrorInfo & ei) override
{
auto state(state_.lock());
@ -180,10 +180,12 @@ public:
auto machineName = getS(fields, 1);
if (machineName != "")
i->s += fmt(" on " ANSI_BOLD "%s" ANSI_NORMAL, machineName);
auto curRound = getI(fields, 2);
auto nrRounds = getI(fields, 3);
if (nrRounds != 1)
i->s += fmt(" (round %d/%d)", curRound, nrRounds);
// Used to be curRound and nrRounds, but the
// implementation was broken for a long time.
if (getI(fields, 2) != 1 || getI(fields, 3) != 1) {
throw Error("log message indicated repeating builds, but this is not currently implemented");
}
i->name = DrvName(name).name;
}

View file

@ -33,6 +33,7 @@
namespace nix {
char * * savedArgv;
static bool gcWarning = true;
@ -234,6 +235,7 @@ void initNix()
#endif
preloadNSS();
initLibStore();
}

View file

@ -9,7 +9,6 @@
#include "remote-fs-accessor.hh"
#include "nar-info-disk-cache.hh"
#include "nar-accessor.hh"
#include "json.hh"
#include "thread-pool.hh"
#include "callback.hh"
@ -194,19 +193,12 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
/* Optionally write a JSON file containing a listing of the
contents of the NAR. */
if (writeNARListing) {
std::ostringstream jsonOut;
nlohmann::json j = {
{"version", 1},
{"root", listNar(ref<FSAccessor>(narAccessor), "", true)},
};
{
JSONObject jsonRoot(jsonOut);
jsonRoot.attr("version", 1);
{
auto res = jsonRoot.placeholder("root");
listNar(res, ref<FSAccessor>(narAccessor), "", true);
}
}
upsertFile(std::string(info.path.hashPart()) + ".ls", jsonOut.str(), "application/json");
upsertFile(std::string(info.path.hashPart()) + ".ls", j.dump(), "application/json");
}
/* Optionally maintain an index of DWARF debug info files
@ -331,6 +323,17 @@ bool BinaryCacheStore::isValidPathUncached(const StorePath & storePath)
return fileExists(narInfoFileFor(storePath));
}
std::optional<StorePath> BinaryCacheStore::queryPathFromHashPart(const std::string & hashPart)
{
auto pseudoPath = StorePath(hashPart + "-" + MissingName);
try {
auto info = queryPathInfo(pseudoPath);
return info->path;
} catch (InvalidPath &) {
return std::nullopt;
}
}
void BinaryCacheStore::narFromPath(const StorePath & storePath, Sink & sink)
{
auto info = queryPathInfo(storePath).cast<const NarInfo>();
@ -343,7 +346,7 @@ void BinaryCacheStore::narFromPath(const StorePath & storePath, Sink & sink)
try {
getFile(info->url, *decompressor);
} catch (NoSuchBinaryCacheFile & e) {
throw SubstituteGone(e.info());
throw SubstituteGone(std::move(e.info()));
}
decompressor->finish();

View file

@ -95,8 +95,7 @@ public:
void queryPathInfoUncached(const StorePath & path,
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
{ unsupported("queryPathFromHashPart"); }
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
void addToStore(const ValidPathInfo & info, Source & narSource,
RepairFlag repair, CheckSigsFlag checkSigs) override;

View file

@ -5,7 +5,7 @@
#include <string>
#include <chrono>
#include <optional>
namespace nix {
@ -78,6 +78,9 @@ struct BuildResult
was repeated). */
time_t startTime = 0, stopTime = 0;
/* User and system CPU time the build took. */
std::optional<std::chrono::microseconds> cpuUser, cpuSystem;
bool success()
{
return status == Built || status == Substituted || status == AlreadyValid || status == ResolvesToAlreadyValid;

View file

@ -7,7 +7,6 @@
#include "finally.hh"
#include "util.hh"
#include "archive.hh"
#include "json.hh"
#include "compression.hh"
#include "worker-protocol.hh"
#include "topo-sort.hh"
@ -40,7 +39,6 @@
#include <sys/ioctl.h>
#include <net/if.h>
#include <netinet/ip.h>
#include <sys/personality.h>
#include <sys/mman.h>
#include <sched.h>
#include <sys/param.h>
@ -135,7 +133,7 @@ void DerivationGoal::killChild()
void DerivationGoal::timedOut(Error && ex)
{
killChild();
done(BuildResult::TimedOut, {}, ex);
done(BuildResult::TimedOut, {}, std::move(ex));
}
@ -502,6 +500,14 @@ void DerivationGoal::inputsRealised()
now-known results of dependencies. If so, we become a
stub goal aliasing that resolved derivation goal. */
std::optional attempt = fullDrv.tryResolve(worker.store, inputDrvOutputs);
if (!attempt) {
/* TODO (impure derivations-induced tech debt) (see below):
The above attempt should have found it, but because we manage
inputDrvOutputs statefully, sometimes it gets out of sync with
the real source of truth (store). So we query the store
directly if there's a problem. */
attempt = fullDrv.tryResolve(worker.store);
}
assert(attempt);
Derivation drvResolved { *std::move(attempt) };
@ -528,13 +534,31 @@ void DerivationGoal::inputsRealised()
/* Add the relevant output closures of the input derivation
`i' as input paths. Only add the closures of output paths
that are specified as inputs. */
for (auto & j : wantedDepOutputs)
if (auto outPath = get(inputDrvOutputs, { depDrvPath, j }))
for (auto & j : wantedDepOutputs) {
/* TODO (impure derivations-induced tech debt):
Tracking input derivation outputs statefully through the
goals is error prone and has led to bugs.
For a robust nix, we need to move towards the `else` branch,
which does not rely on goal state to match up with the
reality of the store, which is our real source of truth.
However, the impure derivations feature still relies on this
fragile way of doing things, because its builds do not have
a representation in the store, which is a usability problem
in itself */
if (auto outPath = get(inputDrvOutputs, { depDrvPath, j })) {
worker.store.computeFSClosure(*outPath, inputPaths);
else
throw Error(
"derivation '%s' requires non-existent output '%s' from input derivation '%s'",
worker.store.printStorePath(drvPath), j, worker.store.printStorePath(depDrvPath));
}
else {
auto outMap = worker.evalStore.queryDerivationOutputMap(depDrvPath);
auto outMapPath = outMap.find(j);
if (outMapPath == outMap.end()) {
throw Error(
"derivation '%s' requires non-existent output '%s' from input derivation '%s'",
worker.store.printStorePath(drvPath), j, worker.store.printStorePath(depDrvPath));
}
worker.store.computeFSClosure(outMapPath->second, inputPaths);
}
}
}
}
@ -546,10 +570,6 @@ void DerivationGoal::inputsRealised()
/* What type of derivation are we building? */
derivationType = drv->type();
/* Don't repeat fixed-output derivations since they're already
verified by their output hash.*/
nrRounds = derivationType.isFixed() ? 1 : settings.buildRepeat + 1;
/* Okay, try to build. Note that here we don't wait for a build
slot to become available, since we don't need one if there is a
build hook. */
@ -564,12 +584,11 @@ void DerivationGoal::started()
auto msg = fmt(
buildMode == bmRepair ? "repairing outputs of '%s'" :
buildMode == bmCheck ? "checking outputs of '%s'" :
nrRounds > 1 ? "building '%s' (round %d/%d)" :
"building '%s'", worker.store.printStorePath(drvPath), curRound, nrRounds);
"building '%s'", worker.store.printStorePath(drvPath));
fmt("building '%s'", worker.store.printStorePath(drvPath));
if (hook) msg += fmt(" on '%s'", machineName);
act = std::make_unique<Activity>(*logger, lvlInfo, actBuild, msg,
Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", curRound, nrRounds});
Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", 1, 1});
mcRunningBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.runningBuilds);
worker.updateProgress();
}
@ -869,6 +888,14 @@ void DerivationGoal::buildDone()
cleanupPostChildKill();
if (buildResult.cpuUser && buildResult.cpuSystem) {
debug("builder for '%s' terminated with status %d, user CPU %.3fs, system CPU %.3fs",
worker.store.printStorePath(drvPath),
status,
((double) buildResult.cpuUser->count()) / 1000000,
((double) buildResult.cpuSystem->count()) / 1000000);
}
bool diskFull = false;
try {
@ -915,14 +942,6 @@ void DerivationGoal::buildDone()
cleanupPostOutputsRegisteredModeNonCheck();
/* Repeat the build if necessary. */
if (curRound++ < nrRounds) {
outputLocks.unlock();
state = &DerivationGoal::tryToBuild;
worker.wakeUp(shared_from_this());
return;
}
/* It is now safe to delete the lock files, since all future
lockers will see that the output paths are valid; they will
not create new lock files with the same names as the old
@ -951,7 +970,7 @@ void DerivationGoal::buildDone()
BuildResult::PermanentFailure;
}
done(st, {}, e);
done(st, {}, std::move(e));
return;
}
}
@ -983,22 +1002,34 @@ void DerivationGoal::resolvedFinished()
throw Error(
"derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,resolve)",
worker.store.printStorePath(drvPath), wantedOutput);
auto realisation = get(resolvedResult.builtOutputs, DrvOutput { *resolvedHash, wantedOutput });
if (!realisation)
throw Error(
"derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,realisation)",
worker.store.printStorePath(resolvedDrvGoal->drvPath), wantedOutput);
auto realisation = [&]{
auto take1 = get(resolvedResult.builtOutputs, DrvOutput { *resolvedHash, wantedOutput });
if (take1) return *take1;
/* The above `get` should work. But sateful tracking of
outputs in resolvedResult, this can get out of sync with the
store, which is our actual source of truth. For now we just
check the store directly if it fails. */
auto take2 = worker.evalStore.queryRealisation(DrvOutput { *resolvedHash, wantedOutput });
if (take2) return *take2;
throw Error(
"derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,realisation)",
worker.store.printStorePath(resolvedDrvGoal->drvPath), wantedOutput);
}();
if (drv->type().isPure()) {
auto newRealisation = *realisation;
auto newRealisation = realisation;
newRealisation.id = DrvOutput { initialOutput->outputHash, wantedOutput };
newRealisation.signatures.clear();
if (!drv->type().isFixed())
newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation->outPath);
newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation.outPath);
signRealisation(newRealisation);
worker.store.registerDrvOutput(newRealisation);
}
outputPaths.insert(realisation->outPath);
builtOutputs.emplace(realisation->id, *realisation);
outputPaths.insert(realisation.outPath);
builtOutputs.emplace(realisation.id, realisation);
}
runPostBuildHook(
@ -1402,7 +1433,7 @@ void DerivationGoal::done(
fs << worker.store.printStorePath(drvPath) << "\t" << buildResult.toString() << std::endl;
}
amDone(buildResult.success() ? ecSuccess : ecFailed, ex);
amDone(buildResult.success() ? ecSuccess : ecFailed, std::move(ex));
}

View file

@ -115,11 +115,6 @@ struct DerivationGoal : public Goal
BuildMode buildMode;
/* The current round, if we're building multiple times. */
size_t curRound = 1;
size_t nrRounds;
std::unique_ptr<MaintainCount<uint64_t>> mcExpectedBuilds, mcRunningBuilds;
std::unique_ptr<Activity> act;

View file

@ -30,7 +30,7 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
if (ex)
logError(i->ex->info());
else
ex = i->ex;
ex = std::move(i->ex);
}
if (i->exitCode != Goal::ecSuccess) {
if (auto i2 = dynamic_cast<DerivationGoal *>(i.get())) failed.insert(i2->drvPath);
@ -40,7 +40,7 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
if (failed.size() == 1 && ex) {
ex->status = worker.exitStatus();
throw *ex;
throw std::move(*ex);
} else if (!failed.empty()) {
if (ex) logError(ex->info());
throw Error(worker.exitStatus(), "build of %s failed", showPaths(failed));
@ -109,7 +109,7 @@ void Store::ensurePath(const StorePath & path)
if (goal->exitCode != Goal::ecSuccess) {
if (goal->ex) {
goal->ex->status = worker.exitStatus();
throw *goal->ex;
throw std::move(*goal->ex);
} else
throw Error(worker.exitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path));
}

View file

@ -16,11 +16,11 @@ HookInstance::HookInstance()
buildHookArgs.pop_front();
Strings args;
args.push_back(std::string(baseNameOf(buildHook)));
for (auto & arg : buildHookArgs)
args.push_back(arg);
args.push_back(std::string(baseNameOf(settings.buildHook.get())));
args.push_back(std::to_string(verbosity));
/* Create a pipe to get the output of the child. */

View file

@ -8,13 +8,14 @@
#include "finally.hh"
#include "util.hh"
#include "archive.hh"
#include "json.hh"
#include "compression.hh"
#include "daemon.hh"
#include "worker-protocol.hh"
#include "topo-sort.hh"
#include "callback.hh"
#include "json-utils.hh"
#include "cgroup.hh"
#include "personality.hh"
#include <regex>
#include <queue>
@ -24,7 +25,6 @@
#include <termios.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/utsname.h>
#include <sys/resource.h>
#include <sys/socket.h>
@ -37,7 +37,6 @@
#include <sys/ioctl.h>
#include <net/if.h>
#include <netinet/ip.h>
#include <sys/personality.h>
#include <sys/mman.h>
#include <sched.h>
#include <sys/param.h>
@ -56,6 +55,7 @@
#include <pwd.h>
#include <grp.h>
#include <iostream>
namespace nix {
@ -129,26 +129,44 @@ void LocalDerivationGoal::killChild()
if (pid != -1) {
worker.childTerminated(this);
if (buildUser) {
/* If we're using a build user, then there is a tricky
race condition: if we kill the build user before the
child has done its setuid() to the build user uid, then
it won't be killed, and we'll potentially lock up in
pid.wait(). So also send a conventional kill to the
child. */
::kill(-pid, SIGKILL); /* ignore the result */
buildUser->kill();
pid.wait();
} else
pid.kill();
/* If we're using a build user, then there is a tricky race
condition: if we kill the build user before the child has
done its setuid() to the build user uid, then it won't be
killed, and we'll potentially lock up in pid.wait(). So
also send a conventional kill to the child. */
::kill(-pid, SIGKILL); /* ignore the result */
assert(pid == -1);
killSandbox(true);
pid.wait();
}
DerivationGoal::killChild();
}
void LocalDerivationGoal::killSandbox(bool getStats)
{
if (cgroup) {
#if __linux__
auto stats = destroyCgroup(*cgroup);
if (getStats) {
buildResult.cpuUser = stats.cpuUser;
buildResult.cpuSystem = stats.cpuSystem;
}
#else
abort();
#endif
}
else if (buildUser) {
auto uid = buildUser->getUID();
assert(uid != 0);
killUser(uid);
}
}
void LocalDerivationGoal::tryLocalBuild() {
unsigned int curBuilds = worker.getNrLocalBuilds();
if (curBuilds >= settings.maxBuildJobs) {
@ -158,28 +176,46 @@ void LocalDerivationGoal::tryLocalBuild() {
return;
}
/* If `build-users-group' is not empty, then we have to build as
one of the members of that group. */
if (settings.buildUsersGroup != "" && getuid() == 0) {
#if defined(__linux__) || defined(__APPLE__)
if (!buildUser) buildUser = std::make_unique<UserLock>();
/* Are we doing a chroot build? */
{
auto noChroot = parsedDrv->getBoolAttr("__noChroot");
if (settings.sandboxMode == smEnabled) {
if (noChroot)
throw Error("derivation '%s' has '__noChroot' set, "
"but that's not allowed when 'sandbox' is 'true'", worker.store.printStorePath(drvPath));
#if __APPLE__
if (additionalSandboxProfile != "")
throw Error("derivation '%s' specifies a sandbox profile, "
"but this is only allowed when 'sandbox' is 'relaxed'", worker.store.printStorePath(drvPath));
#endif
useChroot = true;
}
else if (settings.sandboxMode == smDisabled)
useChroot = false;
else if (settings.sandboxMode == smRelaxed)
useChroot = derivationType.isSandboxed() && !noChroot;
}
if (buildUser->findFreeUser()) {
/* Make sure that no other processes are executing under this
uid. */
buildUser->kill();
} else {
auto & localStore = getLocalStore();
if (localStore.storeDir != localStore.realStoreDir.get()) {
#if __linux__
useChroot = true;
#else
throw Error("building using a diverted store is not supported on this platform");
#endif
}
if (useBuildUsers()) {
if (!buildUser)
buildUser = acquireUserLock(parsedDrv->useUidRange() ? 65536 : 1, useChroot);
if (!buildUser) {
if (!actLock)
actLock = std::make_unique<Activity>(*logger, lvlWarn, actBuildWaiting,
fmt("waiting for UID to build '%s'", yellowtxt(worker.store.printStorePath(drvPath))));
worker.waitForAWhile(shared_from_this());
return;
}
#else
/* Don't know how to block the creation of setuid/setgid
binaries on this platform. */
throw Error("build users are not supported on this platform for security reasons");
#endif
}
actLock.reset();
@ -193,7 +229,7 @@ void LocalDerivationGoal::tryLocalBuild() {
outputLocks.unlock();
buildUser.reset();
worker.permanentFailure = true;
done(BuildResult::InputRejected, {}, e);
done(BuildResult::InputRejected, {}, std::move(e));
return;
}
@ -270,7 +306,7 @@ void LocalDerivationGoal::cleanupPostChildKill()
malicious user from leaving behind a process that keeps files
open and modifies them after they have been chown'ed to
root. */
if (buildUser) buildUser->kill();
killSandbox(true);
/* Terminate the recursive Nix daemon. */
stopDaemon();
@ -363,6 +399,64 @@ static void linkOrCopy(const Path & from, const Path & to)
void LocalDerivationGoal::startBuilder()
{
if ((buildUser && buildUser->getUIDCount() != 1)
#if __linux__
|| settings.useCgroups
#endif
)
{
#if __linux__
settings.requireExperimentalFeature(Xp::Cgroups);
auto cgroupFS = getCgroupFS();
if (!cgroupFS)
throw Error("cannot determine the cgroups file system");
auto ourCgroups = getCgroups("/proc/self/cgroup");
auto ourCgroup = ourCgroups[""];
if (ourCgroup == "")
throw Error("cannot determine cgroup name from /proc/self/cgroup");
auto ourCgroupPath = canonPath(*cgroupFS + "/" + ourCgroup);
if (!pathExists(ourCgroupPath))
throw Error("expected cgroup directory '%s'", ourCgroupPath);
static std::atomic<unsigned int> counter{0};
cgroup = buildUser
? fmt("%s/nix-build-uid-%d", ourCgroupPath, buildUser->getUID())
: fmt("%s/nix-build-pid-%d-%d", ourCgroupPath, getpid(), counter++);
debug("using cgroup '%s'", *cgroup);
/* When using a build user, record the cgroup we used for that
user so that if we got interrupted previously, we can kill
any left-over cgroup first. */
if (buildUser) {
auto cgroupsDir = settings.nixStateDir + "/cgroups";
createDirs(cgroupsDir);
auto cgroupFile = fmt("%s/%d", cgroupsDir, buildUser->getUID());
if (pathExists(cgroupFile)) {
auto prevCgroup = readFile(cgroupFile);
destroyCgroup(prevCgroup);
}
writeFile(cgroupFile, *cgroup);
}
#else
throw Error("cgroups are not supported on this platform");
#endif
}
/* Make sure that no other processes are executing under the
sandbox uids. This must be done before any chownToBuilder()
calls. */
killSandbox(false);
/* Right platform? */
if (!parsedDrv->canBuildLocally(worker.store))
throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}",
@ -376,35 +470,6 @@ void LocalDerivationGoal::startBuilder()
additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or("");
#endif
/* Are we doing a chroot build? */
{
auto noChroot = parsedDrv->getBoolAttr("__noChroot");
if (settings.sandboxMode == smEnabled) {
if (noChroot)
throw Error("derivation '%s' has '__noChroot' set, "
"but that's not allowed when 'sandbox' is 'true'", worker.store.printStorePath(drvPath));
#if __APPLE__
if (additionalSandboxProfile != "")
throw Error("derivation '%s' specifies a sandbox profile, "
"but this is only allowed when 'sandbox' is 'relaxed'", worker.store.printStorePath(drvPath));
#endif
useChroot = true;
}
else if (settings.sandboxMode == smDisabled)
useChroot = false;
else if (settings.sandboxMode == smRelaxed)
useChroot = derivationType.isSandboxed() && !noChroot;
}
auto & localStore = getLocalStore();
if (localStore.storeDir != localStore.realStoreDir.get()) {
#if __linux__
useChroot = true;
#else
throw Error("building using a diverted store is not supported on this platform");
#endif
}
/* Create a temporary directory where the build will take
place. */
tmpDir = createTempDir("", "nix-build-" + std::string(drvPath.name()), false, false, 0700);
@ -580,10 +645,11 @@ void LocalDerivationGoal::startBuilder()
printMsg(lvlChatty, format("setting up chroot environment in '%1%'") % chrootRootDir);
if (mkdir(chrootRootDir.c_str(), 0750) == -1)
// FIXME: make this 0700
if (mkdir(chrootRootDir.c_str(), buildUser && buildUser->getUIDCount() != 1 ? 0755 : 0750) == -1)
throw SysError("cannot create '%1%'", chrootRootDir);
if (buildUser && chown(chrootRootDir.c_str(), 0, buildUser->getGID()) == -1)
if (buildUser && chown(chrootRootDir.c_str(), buildUser->getUIDCount() != 1 ? buildUser->getUID() : 0, buildUser->getGID()) == -1)
throw SysError("cannot change ownership of '%1%'", chrootRootDir);
/* Create a writable /tmp in the chroot. Many builders need
@ -597,6 +663,10 @@ void LocalDerivationGoal::startBuilder()
nobody account. The latter is kind of a hack to support
Samba-in-QEMU. */
createDirs(chrootRootDir + "/etc");
chownToBuilder(chrootRootDir + "/etc");
if (parsedDrv->useUidRange() && (!buildUser || buildUser->getUIDCount() < 65536))
throw Error("feature 'uid-range' requires the setting '%s' to be enabled", settings.autoAllocateUids.name);
/* Declare the build user's group so that programs get a consistent
view of the system (e.g., "id -gn"). */
@ -647,12 +717,28 @@ void LocalDerivationGoal::startBuilder()
dirsInChroot.erase(worker.store.printStorePath(*i.second.second));
}
#elif __APPLE__
/* We don't really have any parent prep work to do (yet?)
All work happens in the child, instead. */
if (cgroup) {
if (mkdir(cgroup->c_str(), 0755) != 0)
throw SysError("creating cgroup '%s'", *cgroup);
chownToBuilder(*cgroup);
chownToBuilder(*cgroup + "/cgroup.procs");
chownToBuilder(*cgroup + "/cgroup.threads");
//chownToBuilder(*cgroup + "/cgroup.subtree_control");
}
#else
throw Error("sandboxing builds is not supported on this platform");
if (parsedDrv->useUidRange())
throw Error("feature 'uid-range' is not supported on this platform");
#if __APPLE__
/* We don't really have any parent prep work to do (yet?)
All work happens in the child, instead. */
#else
throw Error("sandboxing builds is not supported on this platform");
#endif
#endif
} else {
if (parsedDrv->useUidRange())
throw Error("feature 'uid-range' is only supported in sandboxed builds");
}
if (needsHashRewrite() && pathExists(homeDir))
@ -913,14 +999,16 @@ void LocalDerivationGoal::startBuilder()
the calling user (if build users are disabled). */
uid_t hostUid = buildUser ? buildUser->getUID() : getuid();
uid_t hostGid = buildUser ? buildUser->getGID() : getgid();
uid_t nrIds = buildUser ? buildUser->getUIDCount() : 1;
writeFile("/proc/" + std::to_string(pid) + "/uid_map",
fmt("%d %d 1", sandboxUid(), hostUid));
fmt("%d %d %d", sandboxUid(), hostUid, nrIds));
writeFile("/proc/" + std::to_string(pid) + "/setgroups", "deny");
if (!buildUser || buildUser->getUIDCount() == 1)
writeFile("/proc/" + std::to_string(pid) + "/setgroups", "deny");
writeFile("/proc/" + std::to_string(pid) + "/gid_map",
fmt("%d %d 1", sandboxGid(), hostGid));
fmt("%d %d %d", sandboxGid(), hostGid, nrIds));
} else {
debug("note: not using a user namespace");
if (!buildUser)
@ -947,6 +1035,10 @@ void LocalDerivationGoal::startBuilder()
throw SysError("getting sandbox user namespace");
}
/* Move the child into its own cgroup. */
if (cgroup)
writeFile(*cgroup + "/cgroup.procs", fmt("%d", (pid_t) pid));
/* Signal the builder that we've updated its user namespace. */
writeFull(userNamespaceSync.writeSide.get(), "1");
@ -1552,6 +1644,22 @@ void setupSeccomp()
seccomp_arch_add(ctx, SCMP_ARCH_ARM) != 0)
printError("unable to add ARM seccomp architecture; this may result in spurious build failures if running 32-bit ARM processes");
if (nativeSystem == "mips64-linux" &&
seccomp_arch_add(ctx, SCMP_ARCH_MIPS) != 0)
printError("unable to add mips seccomp architecture");
if (nativeSystem == "mips64-linux" &&
seccomp_arch_add(ctx, SCMP_ARCH_MIPS64N32) != 0)
printError("unable to add mips64-*abin32 seccomp architecture");
if (nativeSystem == "mips64el-linux" &&
seccomp_arch_add(ctx, SCMP_ARCH_MIPSEL) != 0)
printError("unable to add mipsel seccomp architecture");
if (nativeSystem == "mips64el-linux" &&
seccomp_arch_add(ctx, SCMP_ARCH_MIPSEL64N32) != 0)
printError("unable to add mips64el-*abin32 seccomp architecture");
/* Prevent builders from creating setuid/setgid binaries. */
for (int perm : { S_ISUID, S_ISGID }) {
if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(EPERM), SCMP_SYS(chmod), 1,
@ -1763,6 +1871,13 @@ void LocalDerivationGoal::runChild()
if (mount("none", (chrootRootDir + "/proc").c_str(), "proc", 0, 0) == -1)
throw SysError("mounting /proc");
/* Mount sysfs on /sys. */
if (buildUser && buildUser->getUIDCount() != 1) {
createDirs(chrootRootDir + "/sys");
if (mount("none", (chrootRootDir + "/sys").c_str(), "sysfs", 0, 0) == -1)
throw SysError("mounting /sys");
}
/* Mount a new tmpfs on /dev/shm to ensure that whatever
the builder puts in /dev/shm is cleaned up automatically. */
if (pathExists("/dev/shm") && mount("none", (chrootRootDir + "/dev/shm").c_str(), "tmpfs", 0,
@ -1805,6 +1920,12 @@ void LocalDerivationGoal::runChild()
if (unshare(CLONE_NEWNS) == -1)
throw SysError("unsharing mount namespace");
/* Unshare the cgroup namespace. This means
/proc/self/cgroup will show the child's cgroup as '/'
rather than whatever it is in the parent. */
if (cgroup && unshare(CLONE_NEWCGROUP) == -1)
throw SysError("unsharing cgroup namespace");
/* Do the chroot(). */
if (chdir(chrootRootDir.c_str()) == -1)
throw SysError("cannot change directory to '%1%'", chrootRootDir);
@ -1842,33 +1963,7 @@ void LocalDerivationGoal::runChild()
/* Close all other file descriptors. */
closeMostFDs({STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO});
#if __linux__
/* Change the personality to 32-bit if we're doing an
i686-linux build on an x86_64-linux machine. */
struct utsname utsbuf;
uname(&utsbuf);
if ((drv->platform == "i686-linux"
&& (settings.thisSystem == "x86_64-linux"
|| (!strcmp(utsbuf.sysname, "Linux") && !strcmp(utsbuf.machine, "x86_64"))))
|| drv->platform == "armv7l-linux"
|| drv->platform == "armv6l-linux")
{
if (personality(PER_LINUX32) == -1)
throw SysError("cannot set 32-bit personality");
}
/* Impersonate a Linux 2.6 machine to get some determinism in
builds that depend on the kernel version. */
if ((drv->platform == "i686-linux" || drv->platform == "x86_64-linux") && settings.impersonateLinux26) {
int cur = personality(0xffffffff);
if (cur != -1) personality(cur | 0x0020000 /* == UNAME26 */);
}
/* Disable address space randomization for improved
determinism. */
int cur = personality(0xffffffff);
if (cur != -1) personality(cur | ADDR_NO_RANDOMIZE);
#endif
setPersonality(drv->platform);
/* Disable core dumps by default. */
struct rlimit limit = { 0, RLIM_INFINITY };
@ -1890,9 +1985,8 @@ void LocalDerivationGoal::runChild()
if (setUser && buildUser) {
/* Preserve supplementary groups of the build user, to allow
admins to specify groups such as "kvm". */
if (!buildUser->getSupplementaryGIDs().empty() &&
setgroups(buildUser->getSupplementaryGIDs().size(),
buildUser->getSupplementaryGIDs().data()) == -1)
auto gids = buildUser->getSupplementaryGIDs();
if (setgroups(gids.size(), gids.data()) == -1)
throw SysError("cannot set supplementary groups of build user");
if (setgid(buildUser->getGID()) == -1 ||
@ -2139,7 +2233,6 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
InodesSeen inodesSeen;
Path checkSuffix = ".check";
bool keepPreviousRound = settings.keepFailed || settings.runDiffHook;
std::exception_ptr delayedException;
@ -2221,7 +2314,10 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* Canonicalise first. This ensures that the path we're
rewriting doesn't contain a hard link to /etc/shadow or
something like that. */
canonicalisePathMetaData(actualPath, buildUser ? buildUser->getUID() : -1, inodesSeen);
canonicalisePathMetaData(
actualPath,
buildUser ? std::optional(buildUser->getUIDRange()) : std::nullopt,
inodesSeen);
debug("scanning for references for output '%s' in temp location '%s'", outputName, actualPath);
@ -2314,6 +2410,10 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
sink.s = rewriteStrings(sink.s, outputRewrites);
StringSource source(sink.s);
restorePath(actualPath, source);
/* FIXME: set proper permissions in restorePath() so
we don't have to do another traversal. */
canonicalisePathMetaData(actualPath, {}, inodesSeen);
}
};
@ -2476,7 +2576,7 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* FIXME: set proper permissions in restorePath() so
we don't have to do another traversal. */
canonicalisePathMetaData(actualPath, -1, inodesSeen);
canonicalisePathMetaData(actualPath, {}, inodesSeen);
/* Calculate where we'll move the output files. In the checking case we
will leave leave them where they are, for now, rather than move to
@ -2560,10 +2660,8 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
debug("unreferenced input: '%1%'", worker.store.printStorePath(i));
}
if (curRound == nrRounds) {
localStore.optimisePath(actualPath, NoRepair); // FIXME: combine with scanForReferences()
worker.markContentsGood(newInfo.path);
}
localStore.optimisePath(actualPath, NoRepair); // FIXME: combine with scanForReferences()
worker.markContentsGood(newInfo.path);
newInfo.deriver = drvPath;
newInfo.ultimate = true;
@ -2592,61 +2690,6 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* Apply output checks. */
checkOutputs(infos);
/* Compare the result with the previous round, and report which
path is different, if any.*/
if (curRound > 1 && prevInfos != infos) {
assert(prevInfos.size() == infos.size());
for (auto i = prevInfos.begin(), j = infos.begin(); i != prevInfos.end(); ++i, ++j)
if (!(*i == *j)) {
buildResult.isNonDeterministic = true;
Path prev = worker.store.printStorePath(i->second.path) + checkSuffix;
bool prevExists = keepPreviousRound && pathExists(prev);
hintformat hint = prevExists
? hintfmt("output '%s' of '%s' differs from '%s' from previous round",
worker.store.printStorePath(i->second.path), worker.store.printStorePath(drvPath), prev)
: hintfmt("output '%s' of '%s' differs from previous round",
worker.store.printStorePath(i->second.path), worker.store.printStorePath(drvPath));
handleDiffHook(
buildUser ? buildUser->getUID() : getuid(),
buildUser ? buildUser->getGID() : getgid(),
prev, worker.store.printStorePath(i->second.path),
worker.store.printStorePath(drvPath), tmpDir);
if (settings.enforceDeterminism)
throw NotDeterministic(hint);
printError(hint);
curRound = nrRounds; // we know enough, bail out early
}
}
/* If this is the first round of several, then move the output out of the way. */
if (nrRounds > 1 && curRound == 1 && curRound < nrRounds && keepPreviousRound) {
for (auto & [_, outputStorePath] : finalOutputs) {
auto path = worker.store.printStorePath(outputStorePath);
Path prev = path + checkSuffix;
deletePath(prev);
Path dst = path + checkSuffix;
renameFile(path, dst);
}
}
if (curRound < nrRounds) {
prevInfos = std::move(infos);
return {};
}
/* Remove the .check directories if we're done. FIXME: keep them
if the result was not determistic? */
if (curRound == nrRounds) {
for (auto & [_, outputStorePath] : finalOutputs) {
Path prev = worker.store.printStorePath(outputStorePath) + checkSuffix;
deletePath(prev);
}
}
/* Register each output path as valid, and register the sets of
paths referenced by each of them. If there are cycles in the
outputs, this will fail. */

View file

@ -15,6 +15,9 @@ struct LocalDerivationGoal : public DerivationGoal
/* The process ID of the builder. */
Pid pid;
/* The cgroup of the builder, if any. */
std::optional<Path> cgroup;
/* The temporary directory. */
Path tmpDir;
@ -92,8 +95,8 @@ struct LocalDerivationGoal : public DerivationGoal
result. */
std::map<Path, ValidPathInfo> prevInfos;
uid_t sandboxUid() { return usingUserNamespace ? 1000 : buildUser->getUID(); }
gid_t sandboxGid() { return usingUserNamespace ? 100 : buildUser->getGID(); }
uid_t sandboxUid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 1000 : 0) : buildUser->getUID(); }
gid_t sandboxGid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 100 : 0) : buildUser->getGID(); }
const static Path homeDir;
@ -197,6 +200,10 @@ struct LocalDerivationGoal : public DerivationGoal
/* Forcibly kill the child process, if any. */
void killChild() override;
/* Kill any processes running under the build user UID or in the
cgroup of the build. */
void killSandbox(bool getStats);
/* Create alternative path calculated from but distinct from the
input, so we can avoid overwriting outputs (or other store paths)
that already exist. */

View file

@ -0,0 +1,44 @@
#include "personality.hh"
#include "globals.hh"
#if __linux__
#include <sys/utsname.h>
#include <sys/personality.h>
#endif
#include <cstring>
namespace nix {
void setPersonality(std::string_view system)
{
#if __linux__
/* Change the personality to 32-bit if we're doing an
i686-linux build on an x86_64-linux machine. */
struct utsname utsbuf;
uname(&utsbuf);
if ((system == "i686-linux"
&& (std::string_view(SYSTEM) == "x86_64-linux"
|| (!strcmp(utsbuf.sysname, "Linux") && !strcmp(utsbuf.machine, "x86_64"))))
|| system == "armv7l-linux"
|| system == "armv6l-linux")
{
if (personality(PER_LINUX32) == -1)
throw SysError("cannot set 32-bit personality");
}
/* Impersonate a Linux 2.6 machine to get some determinism in
builds that depend on the kernel version. */
if ((system == "i686-linux" || system == "x86_64-linux") && settings.impersonateLinux26) {
int cur = personality(0xffffffff);
if (cur != -1) personality(cur | 0x0020000 /* == UNAME26 */);
}
/* Disable address space randomization for improved
determinism. */
int cur = personality(0xffffffff);
if (cur != -1) personality(cur | ADDR_NO_RANDOMIZE);
#endif
}
}

View file

@ -0,0 +1,11 @@
#pragma once
#include <string>
namespace nix {
void setPersonality(std::string_view system);
}

View file

@ -95,7 +95,7 @@ static void createLinks(State & state, const Path & srcDir, const Path & dstDir,
throw Error(
"files '%1%' and '%2%' have the same priority %3%; "
"use 'nix-env --set-flag priority NUMBER INSTALLED_PKGNAME' "
"or type 'nix profile install --help' if using 'nix profile' to find out how"
"or type 'nix profile install --help' if using 'nix profile' to find out how "
"to change the priority of one of the conflicting packages"
" (0 being the highest priority)",
srcFile, readLink(dstFile), priority);

View file

@ -238,7 +238,6 @@ struct ClientSettings
}
else if (trusted
|| name == settings.buildTimeout.name
|| name == settings.buildRepeat.name
|| name == settings.maxSilentTime.name
|| name == settings.pollInterval.name
|| name == "connect-timeout"

View file

@ -448,7 +448,7 @@ std::string Derivation::unparse(const Store & store, bool maskOutputs,
// FIXME: remove
bool isDerivation(const std::string & fileName)
bool isDerivation(std::string_view fileName)
{
return hasSuffix(fileName, drvExtension);
}

View file

@ -224,7 +224,7 @@ StorePath writeDerivation(Store & store,
Derivation parseDerivation(const Store & store, std::string && s, std::string_view name);
// FIXME: remove
bool isDerivation(const std::string & fileName);
bool isDerivation(std::string_view fileName);
/* Calculate the name that will be used for the store path for this
output.

View file

@ -20,11 +20,12 @@ nlohmann::json DerivedPath::Built::toJSON(ref<Store> store) const {
// Fallback for the input-addressed derivation case: We expect to always be
// able to print the output paths, so lets do it
const auto knownOutputs = store->queryPartialDerivationOutputMap(drvPath);
for (const auto& output : outputs) {
for (const auto & output : outputs) {
auto knownOutput = get(knownOutputs, output);
res["outputs"][output] = (knownOutput && *knownOutput)
? store->printStorePath(**knownOutput)
: nullptr;
if (knownOutput && *knownOutput)
res["outputs"][output] = store->printStorePath(**knownOutput);
else
res["outputs"][output] = nullptr;
}
return res;
}
@ -53,28 +54,13 @@ StorePathSet BuiltPath::outPaths() const
);
}
template<typename T>
nlohmann::json stuffToJSON(const std::vector<T> & ts, ref<Store> store) {
auto res = nlohmann::json::array();
for (const T & t : ts) {
std::visit([&res, store](const auto & t) {
res.push_back(t.toJSON(store));
}, t.raw());
}
return res;
}
nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store)
{ return stuffToJSON<BuiltPath>(buildables, store); }
nlohmann::json derivedPathsToJSON(const DerivedPaths & paths, ref<Store> store)
{ return stuffToJSON<DerivedPath>(paths, store); }
std::string DerivedPath::Opaque::to_string(const Store & store) const {
std::string DerivedPath::Opaque::to_string(const Store & store) const
{
return store.printStorePath(path);
}
std::string DerivedPath::Built::to_string(const Store & store) const {
std::string DerivedPath::Built::to_string(const Store & store) const
{
return store.printStorePath(drvPath)
+ "!"
+ (outputs.empty() ? std::string { "*" } : concatStringsSep(",", outputs));
@ -93,15 +79,16 @@ DerivedPath::Opaque DerivedPath::Opaque::parse(const Store & store, std::string_
return {store.parseStorePath(s)};
}
DerivedPath::Built DerivedPath::Built::parse(const Store & store, std::string_view s)
DerivedPath::Built DerivedPath::Built::parse(const Store & store, std::string_view drvS, std::string_view outputsS)
{
size_t n = s.find("!");
assert(n != s.npos);
auto drvPath = store.parseStorePath(s.substr(0, n));
auto outputsS = s.substr(n + 1);
auto drvPath = store.parseStorePath(drvS);
std::set<std::string> outputs;
if (outputsS != "*")
if (outputsS != "*") {
outputs = tokenizeString<std::set<std::string>>(outputsS, ",");
if (outputs.empty())
throw Error(
"Explicit list of wanted outputs '%s' must not be empty. Consider using '*' as a wildcard meaning all outputs if no output in particular is wanted.", outputsS);
}
return {drvPath, outputs};
}
@ -110,7 +97,7 @@ DerivedPath DerivedPath::parse(const Store & store, std::string_view s)
size_t n = s.find("!");
return n == s.npos
? (DerivedPath) DerivedPath::Opaque::parse(store, s)
: (DerivedPath) DerivedPath::Built::parse(store, s);
: (DerivedPath) DerivedPath::Built::parse(store, s.substr(0, n), s.substr(n + 1));
}
RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const

View file

@ -47,7 +47,7 @@ struct DerivedPathBuilt {
std::set<std::string> outputs;
std::string to_string(const Store & store) const;
static DerivedPathBuilt parse(const Store & store, std::string_view);
static DerivedPathBuilt parse(const Store & store, std::string_view, std::string_view);
nlohmann::json toJSON(ref<Store> store) const;
bool operator < (const DerivedPathBuilt & b) const
@ -125,7 +125,4 @@ struct BuiltPath : _BuiltPathRaw {
typedef std::vector<DerivedPath> DerivedPaths;
typedef std::vector<BuiltPath> BuiltPaths;
nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store);
nlohmann::json derivedPathsToJSON(const DerivedPaths & , ref<Store> store);
}

View file

@ -33,14 +33,6 @@ FileTransferSettings fileTransferSettings;
static GlobalConfig::Register rFileTransferSettings(&fileTransferSettings);
std::string resolveUri(std::string_view uri)
{
if (uri.compare(0, 8, "channel:") == 0)
return "https://nixos.org/channels/" + std::string(uri.substr(8)) + "/nixexprs.tar.xz";
else
return std::string(uri);
}
struct curlFileTransfer : public FileTransfer
{
CURLM * curlm = 0;
@ -142,9 +134,9 @@ struct curlFileTransfer : public FileTransfer
}
template<class T>
void fail(const T & e)
void fail(T && e)
{
failEx(std::make_exception_ptr(e));
failEx(std::make_exception_ptr(std::move(e)));
}
LambdaSink finalSink;
@ -472,7 +464,7 @@ struct curlFileTransfer : public FileTransfer
fileTransfer.enqueueItem(shared_from_this());
}
else
fail(exc);
fail(std::move(exc));
}
}
};
@ -873,14 +865,4 @@ FileTransferError::FileTransferError(FileTransfer::Error error, std::optional<st
err.msg = hf;
}
bool isUri(std::string_view s)
{
if (s.compare(0, 8, "channel:") == 0) return true;
size_t pos = s.find("://");
if (pos == std::string::npos) return false;
std::string scheme(s, 0, pos);
return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git" || scheme == "s3" || scheme == "ssh";
}
}

View file

@ -125,9 +125,4 @@ public:
FileTransferError(FileTransfer::Error error, std::optional<std::string> response, const Args & ... args);
};
bool isUri(std::string_view s);
/* Resolve deprecated 'channel:<foo>' URLs. */
std::string resolveUri(std::string_view uri);
}

View file

@ -147,7 +147,7 @@ void LocalStore::addTempRoot(const StorePath & path)
} catch (SysError & e) {
/* The garbage collector may have exited, so we need to
restart. */
if (e.errNo == EPIPE) {
if (e.errNo == EPIPE || e.errNo == ECONNRESET) {
debug("GC socket disconnected");
state->fdRootsSocket.close();
goto restart;
@ -506,6 +506,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
Finally cleanup([&]() {
debug("GC roots server shutting down");
fdServer.close();
while (true) {
auto item = remove_begin(*connections.lock());
if (!item) break;

View file

@ -130,6 +130,10 @@ StringSet Settings::getDefaultSystemFeatures()
actually require anything special on the machines. */
StringSet features{"nixos-test", "benchmark", "big-parallel"};
#if __linux__
features.insert("uid-range");
#endif
#if __linux__
if (access("/dev/kvm", R_OK | W_OK) == 0)
features.insert("kvm");
@ -287,4 +291,18 @@ void initPlugins()
settings.pluginFiles.pluginsLoaded = true;
}
static bool initLibStoreDone = false;
void assertLibStoreInitialized() {
if (!initLibStoreDone) {
printError("The program must call nix::initNix() before calling any libstore library functions.");
abort();
};
}
void initLibStore() {
initLibStoreDone = true;
}
}

View file

@ -46,6 +46,14 @@ struct PluginFilesSetting : public BaseSetting<Paths>
void set(const std::string & str, bool append = false) override;
};
const uint32_t maxIdsPerBuild =
#if __linux__
1 << 16
#else
1
#endif
;
class Settings : public Config {
unsigned int getDefaultCores();
@ -273,8 +281,69 @@ public:
`NIX_REMOTE` is empty, the uid under which the Nix daemon runs if
`NIX_REMOTE` is `daemon`). Obviously, this should not be used in
multi-user settings with untrusted users.
Defaults to `nixbld` when running as root, *empty* otherwise.
)",
{}, false};
Setting<bool> autoAllocateUids{this, false, "auto-allocate-uids",
R"(
Whether to select UIDs for builds automatically, instead of using the
users in `build-users-group`.
UIDs are allocated starting at 872415232 (0x34000000) on Linux and 56930 on macOS.
> **Warning**
> This is an experimental feature.
To enable it, add the following to [`nix.conf`](#):
```
extra-experimental-features = auto-allocate-uids
auto-allocate-uids = true
```
)"};
Setting<uint32_t> startId{this,
#if __linux__
0x34000000,
#else
56930,
#endif
"start-id",
"The first UID and GID to use for dynamic ID allocation."};
Setting<uint32_t> uidCount{this,
#if __linux__
maxIdsPerBuild * 128,
#else
128,
#endif
"id-count",
"The number of UIDs/GIDs to use for dynamic ID allocation."};
#if __linux__
Setting<bool> useCgroups{
this, false, "use-cgroups",
R"(
Whether to execute builds inside cgroups.
This is only supported on Linux.
Cgroups are required and enabled automatically for derivations
that require the `uid-range` system feature.
> **Warning**
> This is an experimental feature.
To enable it, add the following to [`nix.conf`](#):
```
extra-experimental-features = cgroups
use-cgroups = true
```
)"};
#endif
Setting<bool> impersonateLinux26{this, false, "impersonate-linux-26",
"Whether to impersonate a Linux 2.6 machine on newer kernels.",
{"build-impersonate-linux-26"}};
@ -307,11 +376,6 @@ public:
)",
{"build-max-log-size"}};
/* When buildRepeat > 0 and verboseBuild == true, whether to print
repeated builds (i.e. builds other than the first one) to
stderr. Hack to prevent Hydra logs from being polluted. */
bool printRepeatedBuilds = true;
Setting<unsigned int> pollInterval{this, 5, "build-poll-interval",
"How often (in seconds) to poll for locks."};
@ -427,6 +491,9 @@ public:
for example, `/dev/nvidiactl?` specifies that `/dev/nvidiactl` will
only be mounted in the sandbox if it exists in the host filesystem.
If the source is in the Nix store, then its closure will be added to
the sandbox as well.
Depending on how Nix was built, the default value for this option
may be empty or provide `/bin/sh` as a bind-mount of `bash`.
)",
@ -435,19 +502,6 @@ public:
Setting<bool> sandboxFallback{this, true, "sandbox-fallback",
"Whether to disable sandboxing when the kernel doesn't allow it."};
Setting<size_t> buildRepeat{
this, 0, "repeat",
R"(
How many times to repeat builds to check whether they are
deterministic. The default value is 0. If the value is non-zero,
every build is repeated the specified number of times. If the
contents of any of the runs differs from the previous ones and
`enforce-determinism` is true, the build is rejected and the
resulting store paths are not registered as valid in Nixs
database.
)",
{"build-repeat"}};
#if __linux__
Setting<std::string> sandboxShmSize{
this, "50%", "sandbox-dev-shm-size",
@ -511,10 +565,6 @@ public:
configuration file, and cannot be passed at the command line.
)"};
Setting<bool> enforceDeterminism{
this, true, "enforce-determinism",
"Whether to fail if repeated builds produce different output. See `repeat`."};
Setting<Strings> trustedPublicKeys{
this,
{"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="},
@ -563,10 +613,10 @@ public:
cache) must have a signature by a trusted key. A trusted key is one
listed in `trusted-public-keys`, or a public key counterpart to a
private key stored in a file listed in `secret-key-files`.
Set to `false` to disable signature checking and trust all
non-content-addressed paths unconditionally.
(Content-addressed paths are inherently trustworthy and thus
unaffected by this configuration option.)
)"};
@ -937,4 +987,12 @@ std::vector<Path> getUserConfigFiles();
extern const std::string nixVersion;
/* NB: This is not sufficient. You need to call initNix() */
void initLibStore();
/* It's important to initialize before doing _anything_, which is why we
call upon the programmer to handle this correctly. However, we only add
this in a key locations, so as not to litter the code. */
void assertLibStoreInitialized();
}

View file

@ -255,8 +255,8 @@ private:
<< settings.maxLogSize;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3)
conn.to
<< settings.buildRepeat
<< settings.enforceDeterminism;
<< 0 // buildRepeat hasn't worked for ages anyway
<< 0;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 7) {
conn.to << ((int) settings.keepFailed);

View file

@ -91,6 +91,7 @@ void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
if (!lockFile(lockFd.get(), ltWrite, false)) {
printInfo("waiting for exclusive access to the Nix store for ca drvs...");
lockFile(lockFd.get(), ltNone, false); // We have acquired a shared lock; release it to prevent deadlocks
lockFile(lockFd.get(), ltWrite, true);
}
@ -299,6 +300,7 @@ LocalStore::LocalStore(const Params & params)
if (!lockFile(globalLock.get(), ltWrite, false)) {
printInfo("waiting for exclusive access to the Nix store...");
lockFile(globalLock.get(), ltNone, false); // We have acquired a shared lock; release it to prevent deadlocks
lockFile(globalLock.get(), ltWrite, true);
}
@ -583,7 +585,10 @@ void canonicaliseTimestampAndPermissions(const Path & path)
}
static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
static void canonicalisePathMetaData_(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange,
InodesSeen & inodesSeen)
{
checkInterrupt();
@ -630,7 +635,7 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
However, ignore files that we chown'ed ourselves previously to
ensure that we don't fail on hard links within the same build
(i.e. "touch $out/foo; ln $out/foo $out/bar"). */
if (fromUid != (uid_t) -1 && st.st_uid != fromUid) {
if (uidRange && (st.st_uid < uidRange->first || st.st_uid > uidRange->second)) {
if (S_ISDIR(st.st_mode) || !inodesSeen.count(Inode(st.st_dev, st.st_ino)))
throw BuildError("invalid ownership on file '%1%'", path);
mode_t mode = st.st_mode & ~S_IFMT;
@ -663,14 +668,17 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
if (S_ISDIR(st.st_mode)) {
DirEntries entries = readDirectory(path);
for (auto & i : entries)
canonicalisePathMetaData_(path + "/" + i.name, fromUid, inodesSeen);
canonicalisePathMetaData_(path + "/" + i.name, uidRange, inodesSeen);
}
}
void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
void canonicalisePathMetaData(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange,
InodesSeen & inodesSeen)
{
canonicalisePathMetaData_(path, fromUid, inodesSeen);
canonicalisePathMetaData_(path, uidRange, inodesSeen);
/* On platforms that don't have lchown(), the top-level path can't
be a symlink, since we can't change its ownership. */
@ -683,10 +691,11 @@ void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & ino
}
void canonicalisePathMetaData(const Path & path, uid_t fromUid)
void canonicalisePathMetaData(const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange)
{
InodesSeen inodesSeen;
canonicalisePathMetaData(path, fromUid, inodesSeen);
canonicalisePathMetaData(path, uidRange, inodesSeen);
}
@ -1331,7 +1340,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
autoGC();
canonicalisePathMetaData(realPath, -1);
canonicalisePathMetaData(realPath, {});
optimisePath(realPath, repair); // FIXME: combine with hashPath()
@ -1444,7 +1453,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name
narHash = narSink.finish();
}
canonicalisePathMetaData(realPath, -1); // FIXME: merge into restorePath
canonicalisePathMetaData(realPath, {}); // FIXME: merge into restorePath
optimisePath(realPath, repair);
@ -1486,7 +1495,7 @@ StorePath LocalStore::addTextToStore(
writeFile(realPath, s);
canonicalisePathMetaData(realPath, -1);
canonicalisePathMetaData(realPath, {});
StringSink sink;
dumpString(s, sink);

View file

@ -310,9 +310,18 @@ typedef std::set<Inode> InodesSeen;
- the permissions are set of 444 or 555 (i.e., read-only with or
without execute permission; setuid bits etc. are cleared)
- the owner and group are set to the Nix user and group, if we're
running as root. */
void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen);
void canonicalisePathMetaData(const Path & path, uid_t fromUid);
running as root.
If uidRange is not empty, this function will throw an error if it
encounters files owned by a user outside of the closed interval
[uidRange->first, uidRange->second].
*/
void canonicalisePathMetaData(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange,
InodesSeen & inodesSeen);
void canonicalisePathMetaData(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange);
void canonicaliseTimestampAndPermissions(const Path & path);

View file

@ -20,7 +20,7 @@ endif
$(foreach file,$(libstore_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/sandbox)))
ifeq ($(ENABLE_S3), 1)
libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core
libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core -laws-crt-cpp
endif
ifdef HOST_SOLARIS

View file

@ -2,105 +2,197 @@
#include "globals.hh"
#include "pathlocks.hh"
#include <grp.h>
#include <pwd.h>
#include <fcntl.h>
#include <unistd.h>
#include <grp.h>
namespace nix {
UserLock::UserLock()
struct SimpleUserLock : UserLock
{
assert(settings.buildUsersGroup != "");
createDirs(settings.nixStateDir + "/userpool");
}
AutoCloseFD fdUserLock;
uid_t uid;
gid_t gid;
std::vector<gid_t> supplementaryGIDs;
bool UserLock::findFreeUser() {
if (enabled()) return true;
uid_t getUID() override { assert(uid); return uid; }
uid_t getUIDCount() override { return 1; }
gid_t getGID() override { assert(gid); return gid; }
/* Get the members of the build-users-group. */
struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
if (!gr)
throw Error("the group '%1%' specified in 'build-users-group' does not exist",
settings.buildUsersGroup);
gid = gr->gr_gid;
std::vector<gid_t> getSupplementaryGIDs() override { return supplementaryGIDs; }
/* Copy the result of getgrnam. */
Strings users;
for (char * * p = gr->gr_mem; *p; ++p) {
debug("found build user '%1%'", *p);
users.push_back(*p);
}
static std::unique_ptr<UserLock> acquire()
{
assert(settings.buildUsersGroup != "");
createDirs(settings.nixStateDir + "/userpool");
if (users.empty())
throw Error("the build users group '%1%' has no members",
settings.buildUsersGroup);
/* Get the members of the build-users-group. */
struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
if (!gr)
throw Error("the group '%s' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
/* Find a user account that isn't currently in use for another
build. */
for (auto & i : users) {
debug("trying user '%1%'", i);
struct passwd * pw = getpwnam(i.c_str());
if (!pw)
throw Error("the user '%1%' in the group '%2%' does not exist",
i, settings.buildUsersGroup);
fnUserLock = (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str();
AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
if (!fd)
throw SysError("opening user lock '%1%'", fnUserLock);
if (lockFile(fd.get(), ltWrite, false)) {
fdUserLock = std::move(fd);
user = i;
uid = pw->pw_uid;
/* Sanity check... */
if (uid == getuid() || uid == geteuid())
throw Error("the Nix user should not be a member of '%1%'",
settings.buildUsersGroup);
#if __linux__
/* Get the list of supplementary groups of this build user. This
is usually either empty or contains a group such as "kvm". */
int ngroups = 32; // arbitrary initial guess
supplementaryGIDs.resize(ngroups);
int err = getgrouplist(pw->pw_name, pw->pw_gid, supplementaryGIDs.data(),
&ngroups);
// Our initial size of 32 wasn't sufficient, the correct size has
// been stored in ngroups, so we try again.
if (err == -1) {
supplementaryGIDs.resize(ngroups);
err = getgrouplist(pw->pw_name, pw->pw_gid, supplementaryGIDs.data(),
&ngroups);
}
// If it failed once more, then something must be broken.
if (err == -1)
throw Error("failed to get list of supplementary groups for '%1%'",
pw->pw_name);
// Finally, trim back the GID list to its real size
supplementaryGIDs.resize(ngroups);
#endif
isEnabled = true;
return true;
/* Copy the result of getgrnam. */
Strings users;
for (char * * p = gr->gr_mem; *p; ++p) {
debug("found build user '%s'", *p);
users.push_back(*p);
}
if (users.empty())
throw Error("the build users group '%s' has no members", settings.buildUsersGroup);
/* Find a user account that isn't currently in use for another
build. */
for (auto & i : users) {
debug("trying user '%s'", i);
struct passwd * pw = getpwnam(i.c_str());
if (!pw)
throw Error("the user '%s' in the group '%s' does not exist", i, settings.buildUsersGroup);
auto fnUserLock = fmt("%s/userpool/%s", settings.nixStateDir,pw->pw_uid);
AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
if (!fd)
throw SysError("opening user lock '%s'", fnUserLock);
if (lockFile(fd.get(), ltWrite, false)) {
auto lock = std::make_unique<SimpleUserLock>();
lock->fdUserLock = std::move(fd);
lock->uid = pw->pw_uid;
lock->gid = gr->gr_gid;
/* Sanity check... */
if (lock->uid == getuid() || lock->uid == geteuid())
throw Error("the Nix user should not be a member of '%s'", settings.buildUsersGroup);
#if __linux__
/* Get the list of supplementary groups of this build
user. This is usually either empty or contains a
group such as "kvm". */
int ngroups = 32; // arbitrary initial guess
std::vector<gid_t> gids;
gids.resize(ngroups);
int err = getgrouplist(
pw->pw_name, pw->pw_gid,
gids.data(),
&ngroups);
/* Our initial size of 32 wasn't sufficient, the
correct size has been stored in ngroups, so we try
again. */
if (err == -1) {
gids.resize(ngroups);
err = getgrouplist(
pw->pw_name, pw->pw_gid,
gids.data(),
&ngroups);
}
// If it failed once more, then something must be broken.
if (err == -1)
throw Error("failed to get list of supplementary groups for '%s'", pw->pw_name);
// Finally, trim back the GID list to its real size.
for (auto i = 0; i < ngroups; i++)
if (gids[i] != lock->gid)
lock->supplementaryGIDs.push_back(gids[i]);
#endif
return lock;
}
}
return nullptr;
}
};
return false;
}
void UserLock::kill()
struct AutoUserLock : UserLock
{
killUser(uid);
AutoCloseFD fdUserLock;
uid_t firstUid = 0;
gid_t firstGid = 0;
uid_t nrIds = 1;
uid_t getUID() override { assert(firstUid); return firstUid; }
gid_t getUIDCount() override { return nrIds; }
gid_t getGID() override { assert(firstGid); return firstGid; }
std::vector<gid_t> getSupplementaryGIDs() override { return {}; }
static std::unique_ptr<UserLock> acquire(uid_t nrIds, bool useChroot)
{
settings.requireExperimentalFeature(Xp::AutoAllocateUids);
assert(settings.startId > 0);
assert(settings.uidCount % maxIdsPerBuild == 0);
assert((uint64_t) settings.startId + (uint64_t) settings.uidCount <= std::numeric_limits<uid_t>::max());
assert(nrIds <= maxIdsPerBuild);
createDirs(settings.nixStateDir + "/userpool2");
size_t nrSlots = settings.uidCount / maxIdsPerBuild;
for (size_t i = 0; i < nrSlots; i++) {
debug("trying user slot '%d'", i);
createDirs(settings.nixStateDir + "/userpool2");
auto fnUserLock = fmt("%s/userpool2/slot-%d", settings.nixStateDir, i);
AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
if (!fd)
throw SysError("opening user lock '%s'", fnUserLock);
if (lockFile(fd.get(), ltWrite, false)) {
auto firstUid = settings.startId + i * maxIdsPerBuild;
auto pw = getpwuid(firstUid);
if (pw)
throw Error("auto-allocated UID %d clashes with existing user account '%s'", firstUid, pw->pw_name);
auto lock = std::make_unique<AutoUserLock>();
lock->fdUserLock = std::move(fd);
lock->firstUid = firstUid;
if (useChroot)
lock->firstGid = firstUid;
else {
struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
if (!gr)
throw Error("the group '%s' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
lock->firstGid = gr->gr_gid;
}
lock->nrIds = nrIds;
return lock;
}
}
return nullptr;
}
};
std::unique_ptr<UserLock> acquireUserLock(uid_t nrIds, bool useChroot)
{
if (settings.autoAllocateUids)
return AutoUserLock::acquire(nrIds, useChroot);
else
return SimpleUserLock::acquire();
}
bool useBuildUsers()
{
#if __linux__
static bool b = (settings.buildUsersGroup != "" || settings.autoAllocateUids) && getuid() == 0;
return b;
#elif __APPLE__
static bool b = settings.buildUsersGroup != "" && getuid() == 0;
return b;
#else
return false;
#endif
}
}

View file

@ -1,37 +1,38 @@
#pragma once
#include "sync.hh"
#include "types.hh"
#include "util.hh"
#include <optional>
#include <sys/types.h>
namespace nix {
class UserLock
struct UserLock
{
private:
Path fnUserLock;
AutoCloseFD fdUserLock;
virtual ~UserLock() { }
bool isEnabled = false;
std::string user;
uid_t uid = 0;
gid_t gid = 0;
std::vector<gid_t> supplementaryGIDs;
/* Get the first and last UID. */
std::pair<uid_t, uid_t> getUIDRange()
{
auto first = getUID();
return {first, first + getUIDCount() - 1};
}
public:
UserLock();
/* Get the first UID. */
virtual uid_t getUID() = 0;
void kill();
virtual uid_t getUIDCount() = 0;
std::string getUser() { return user; }
uid_t getUID() { assert(uid); return uid; }
uid_t getGID() { assert(gid); return gid; }
std::vector<gid_t> getSupplementaryGIDs() { return supplementaryGIDs; }
bool findFreeUser();
bool enabled() { return isEnabled; }
virtual gid_t getGID() = 0;
virtual std::vector<gid_t> getSupplementaryGIDs() = 0;
};
/* Acquire a user lock for a UID range of size `nrIds`. Note that this
may return nullptr if no user is available. */
std::unique_ptr<UserLock> acquireUserLock(uid_t nrIds, bool useChroot);
bool useBuildUsers();
}

View file

@ -1,6 +1,5 @@
#include "nar-accessor.hh"
#include "archive.hh"
#include "json.hh"
#include <map>
#include <stack>
@ -243,42 +242,43 @@ ref<FSAccessor> makeLazyNarAccessor(const std::string & listing,
return make_ref<NarAccessor>(listing, getNarBytes);
}
void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor,
const Path & path, bool recurse)
using nlohmann::json;
json listNar(ref<FSAccessor> accessor, const Path & path, bool recurse)
{
auto st = accessor->stat(path);
auto obj = res.object();
json obj = json::object();
switch (st.type) {
case FSAccessor::Type::tRegular:
obj.attr("type", "regular");
obj.attr("size", st.fileSize);
obj["type"] = "regular";
obj["size"] = st.fileSize;
if (st.isExecutable)
obj.attr("executable", true);
obj["executable"] = true;
if (st.narOffset)
obj.attr("narOffset", st.narOffset);
obj["narOffset"] = st.narOffset;
break;
case FSAccessor::Type::tDirectory:
obj.attr("type", "directory");
obj["type"] = "directory";
{
auto res2 = obj.object("entries");
obj["entries"] = json::object();
json &res2 = obj["entries"];
for (auto & name : accessor->readDirectory(path)) {
if (recurse) {
auto res3 = res2.placeholder(name);
listNar(res3, accessor, path + "/" + name, true);
res2[name] = listNar(accessor, path + "/" + name, true);
} else
res2.object(name);
res2[name] = json::object();
}
}
break;
case FSAccessor::Type::tSymlink:
obj.attr("type", "symlink");
obj.attr("target", accessor->readLink(path));
obj["type"] = "symlink";
obj["target"] = accessor->readLink(path);
break;
default:
throw Error("path '%s' does not exist in NAR", path);
}
return obj;
}
}

View file

@ -2,6 +2,7 @@
#include <functional>
#include <nlohmann/json_fwd.hpp>
#include "fs-accessor.hh"
namespace nix {
@ -24,11 +25,8 @@ ref<FSAccessor> makeLazyNarAccessor(
const std::string & listing,
GetNarBytes getNarBytes);
class JSONPlaceholder;
/* Write a JSON representation of the contents of a NAR (except file
contents). */
void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor,
const Path & path, bool recurse);
nlohmann::json listNar(ref<FSAccessor> accessor, const Path & path, bool recurse);
}

View file

@ -166,16 +166,37 @@ public:
return i->second;
}
std::optional<Cache> queryCacheRaw(State & state, const std::string & uri)
{
auto i = state.caches.find(uri);
if (i == state.caches.end()) {
auto queryCache(state.queryCache.use()(uri)(time(0) - cacheInfoTtl));
if (!queryCache.next())
return std::nullopt;
state.caches.emplace(uri,
Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
}
return getCache(state, uri);
}
void createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
{
retrySQLite<void>([&]() {
auto state(_state.lock());
SQLiteTxn txn(state->db);
// FIXME: race
// To avoid the race, we have to check if maybe someone hasn't yet created
// the cache for this URI in the meantime.
auto cache(queryCacheRaw(*state, uri));
if (cache)
return;
state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec();
assert(sqlite3_changes(state->db) == 1);
state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority};
txn.commit();
});
}
@ -183,21 +204,12 @@ public:
{
return retrySQLite<std::optional<CacheInfo>>([&]() -> std::optional<CacheInfo> {
auto state(_state.lock());
auto i = state->caches.find(uri);
if (i == state->caches.end()) {
auto queryCache(state->queryCache.use()(uri)(time(0) - cacheInfoTtl));
if (!queryCache.next())
return std::nullopt;
state->caches.emplace(uri,
Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
}
auto & cache(getCache(*state, uri));
auto cache(queryCacheRaw(*state, uri));
if (!cache)
return std::nullopt;
return CacheInfo {
.wantMassQuery = cache.wantMassQuery,
.priority = cache.priority
.wantMassQuery = cache->wantMassQuery,
.priority = cache->priority
};
});
}

View file

@ -2,7 +2,6 @@
#include <nlohmann/json.hpp>
#include <regex>
#include "json.hh"
namespace nix {
@ -90,6 +89,7 @@ std::optional<Strings> ParsedDerivation::getStringsAttr(const std::string & name
StringSet ParsedDerivation::getRequiredSystemFeatures() const
{
// FIXME: cache this?
StringSet res;
for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings()))
res.insert(i);
@ -125,6 +125,11 @@ bool ParsedDerivation::substitutesAllowed() const
return getBoolAttr("allowSubstitutes", true);
}
bool ParsedDerivation::useUidRange() const
{
return getRequiredSystemFeatures().count("uid-range");
}
static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*");
std::optional<nlohmann::json> ParsedDerivation::prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths)
@ -144,16 +149,11 @@ std::optional<nlohmann::json> ParsedDerivation::prepareStructuredAttrs(Store & s
auto e = json.find("exportReferencesGraph");
if (e != json.end() && e->is_object()) {
for (auto i = e->begin(); i != e->end(); ++i) {
std::ostringstream str;
{
JSONPlaceholder jsonRoot(str, true);
StorePathSet storePaths;
for (auto & p : *i)
storePaths.insert(store.parseStorePath(p.get<std::string>()));
store.pathInfoToJSON(jsonRoot,
store.exportReferences(storePaths, inputPaths), false, true);
}
json[i.key()] = nlohmann::json::parse(str.str()); // urgh
StorePathSet storePaths;
for (auto & p : *i)
storePaths.insert(store.parseStorePath(p.get<std::string>()));
json[i.key()] = store.pathInfoToJSON(
store.exportReferences(storePaths, inputPaths), false, true);
}
}

View file

@ -38,6 +38,8 @@ public:
bool substitutesAllowed() const;
bool useUidRange() const;
std::optional<nlohmann::json> prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths);
};

View file

@ -67,6 +67,40 @@ void RefScanSink::operator () (std::string_view data)
}
PathRefScanSink::PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap)
: RefScanSink(std::move(hashes))
, backMap(std::move(backMap))
{ }
PathRefScanSink PathRefScanSink::fromPaths(const StorePathSet & refs)
{
StringSet hashes;
std::map<std::string, StorePath> backMap;
for (auto & i : refs) {
std::string hashPart(i.hashPart());
auto inserted = backMap.emplace(hashPart, i).second;
assert(inserted);
hashes.insert(hashPart);
}
return PathRefScanSink(std::move(hashes), std::move(backMap));
}
StorePathSet PathRefScanSink::getResultPaths()
{
/* Map the hashes found back to their store paths. */
StorePathSet found;
for (auto & i : getResult()) {
auto j = backMap.find(i);
assert(j != backMap.end());
found.insert(j->second);
}
return found;
}
std::pair<StorePathSet, HashResult> scanForReferences(
const std::string & path,
const StorePathSet & refs)
@ -82,30 +116,13 @@ StorePathSet scanForReferences(
const Path & path,
const StorePathSet & refs)
{
StringSet hashes;
std::map<std::string, StorePath> backMap;
for (auto & i : refs) {
std::string hashPart(i.hashPart());
auto inserted = backMap.emplace(hashPart, i).second;
assert(inserted);
hashes.insert(hashPart);
}
PathRefScanSink refsSink = PathRefScanSink::fromPaths(refs);
TeeSink sink { refsSink, toTee };
/* Look for the hashes in the NAR dump of the path. */
RefScanSink refsSink(std::move(hashes));
TeeSink sink { refsSink, toTee };
dumpPath(path, sink);
/* Map the hashes found back to their store paths. */
StorePathSet found;
for (auto & i : refsSink.getResult()) {
auto j = backMap.find(i);
assert(j != backMap.end());
found.insert(j->second);
}
return found;
return refsSink.getResultPaths();
}

View file

@ -27,6 +27,19 @@ public:
void operator () (std::string_view data) override;
};
class PathRefScanSink : public RefScanSink
{
std::map<std::string, StorePath> backMap;
PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap);
public:
static PathRefScanSink fromPaths(const StorePathSet & refs);
StorePathSet getResultPaths();
};
struct RewritingSink : Sink
{
std::string from, to, prev;

View file

@ -1,6 +1,6 @@
#include <nlohmann/json.hpp>
#include "remote-fs-accessor.hh"
#include "nar-accessor.hh"
#include "json.hh"
#include <sys/types.h>
#include <sys/stat.h>
@ -38,10 +38,8 @@ ref<FSAccessor> RemoteFSAccessor::addToCache(std::string_view hashPart, std::str
if (cacheDir != "") {
try {
std::ostringstream str;
JSONPlaceholder jsonRoot(str);
listNar(jsonRoot, narAccessor, "", true);
writeFile(makeCacheFile(hashPart, "ls"), str.str());
nlohmann::json j = listNar(narAccessor, "", true);
writeFile(makeCacheFile(hashPart, "ls"), j.dump());
} catch (...) {
ignoreException();
}

View file

@ -447,7 +447,7 @@ void RemoteStore::queryPathInfoUncached(const StorePath & path,
} catch (Error & e) {
// Ugly backwards compatibility hack.
if (e.msg().find("is not valid") != std::string::npos)
throw InvalidPath(e.info());
throw InvalidPath(std::move(e.info()));
throw;
}
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {

View file

@ -8,12 +8,15 @@
namespace nix {
SQLiteError::SQLiteError(const char *path, int errNo, int extendedErrNo, hintformat && hf)
: Error(""), path(path), errNo(errNo), extendedErrNo(extendedErrNo)
SQLiteError::SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, hintformat && hf)
: Error(""), path(path), errMsg(errMsg), errNo(errNo), extendedErrNo(extendedErrNo), offset(offset)
{
err.msg = hintfmt("%s: %s (in '%s')",
auto offsetStr = (offset == -1) ? "" : "at offset " + std::to_string(offset) + ": ";
err.msg = hintfmt("%s: %s%s, %s (in '%s')",
normaltxt(hf.str()),
offsetStr,
sqlite3_errstr(extendedErrNo),
errMsg,
path ? path : "(in-memory)");
}
@ -21,11 +24,13 @@ SQLiteError::SQLiteError(const char *path, int errNo, int extendedErrNo, hintfor
{
int err = sqlite3_errcode(db);
int exterr = sqlite3_extended_errcode(db);
int offset = sqlite3_error_offset(db);
auto path = sqlite3_db_filename(db, nullptr);
auto errMsg = sqlite3_errmsg(db);
if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) {
auto exp = SQLiteBusy(path, err, exterr, std::move(hf));
auto exp = SQLiteBusy(path, errMsg, err, exterr, offset, std::move(hf));
exp.err.msg = hintfmt(
err == SQLITE_PROTOCOL
? "SQLite database '%s' is busy (SQLITE_PROTOCOL)"
@ -33,7 +38,7 @@ SQLiteError::SQLiteError(const char *path, int errNo, int extendedErrNo, hintfor
path ? path : "(in-memory)");
throw exp;
} else
throw SQLiteError(path, err, exterr, std::move(hf));
throw SQLiteError(path, errMsg, err, exterr, offset, std::move(hf));
}
SQLite::SQLite(const Path & path, bool create)

View file

@ -98,21 +98,22 @@ struct SQLiteTxn
struct SQLiteError : Error
{
const char *path;
int errNo, extendedErrNo;
std::string path;
std::string errMsg;
int errNo, extendedErrNo, offset;
template<typename... Args>
[[noreturn]] static void throw_(sqlite3 * db, const std::string & fs, const Args & ... args) {
throw_(db, hintfmt(fs, args...));
}
SQLiteError(const char *path, int errNo, int extendedErrNo, hintformat && hf);
SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, hintformat && hf);
protected:
template<typename... Args>
SQLiteError(const char *path, int errNo, int extendedErrNo, const std::string & fs, const Args & ... args)
: SQLiteError(path, errNo, extendedErrNo, hintfmt(fs, args...))
SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, const std::string & fs, const Args & ... args)
: SQLiteError(path, errNo, extendedErrNo, offset, hintfmt(fs, args...))
{ }
[[noreturn]] static void throw_(sqlite3 * db, hintformat && hf);

View file

@ -67,7 +67,7 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string
if (fakeSSH) {
args = { "bash", "-c" };
} else {
args = { "ssh", host.c_str(), "-x", "-a" };
args = { "ssh", host.c_str(), "-x" };
addCommonSSHOpts(args);
if (socketPath != "")
args.insert(args.end(), {"-S", socketPath});

View file

@ -6,32 +6,34 @@
#include "util.hh"
#include "nar-info-disk-cache.hh"
#include "thread-pool.hh"
#include "json.hh"
#include "url.hh"
#include "archive.hh"
#include "callback.hh"
#include "remote-store.hh"
#include <nlohmann/json.hpp>
#include <regex>
using json = nlohmann::json;
namespace nix {
bool Store::isInStore(const Path & path) const
bool Store::isInStore(PathView path) const
{
return isInDir(path, storeDir);
}
std::pair<StorePath, Path> Store::toStorePath(const Path & path) const
std::pair<StorePath, Path> Store::toStorePath(PathView path) const
{
if (!isInStore(path))
throw Error("path '%1%' is not in the Nix store", path);
Path::size_type slash = path.find('/', storeDir.size() + 1);
auto slash = path.find('/', storeDir.size() + 1);
if (slash == Path::npos)
return {parseStorePath(path), ""};
else
return {parseStorePath(std::string_view(path).substr(0, slash)), path.substr(slash)};
return {parseStorePath(path.substr(0, slash)), (Path) path.substr(slash)};
}
@ -456,6 +458,7 @@ Store::Store(const Params & params)
: StoreConfig(params)
, state({(size_t) pathInfoCacheSize})
{
assertLibStoreInitialized();
}
@ -838,56 +841,53 @@ StorePathSet Store::exportReferences(const StorePathSet & storePaths, const Stor
return paths;
}
void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & storePaths,
json Store::pathInfoToJSON(const StorePathSet & storePaths,
bool includeImpureInfo, bool showClosureSize,
Base hashBase,
AllowInvalidFlag allowInvalid)
{
auto jsonList = jsonOut.list();
json::array_t jsonList = json::array();
for (auto & storePath : storePaths) {
auto jsonPath = jsonList.object();
auto& jsonPath = jsonList.emplace_back(json::object());
try {
auto info = queryPathInfo(storePath);
jsonPath.attr("path", printStorePath(info->path));
jsonPath
.attr("narHash", info->narHash.to_string(hashBase, true))
.attr("narSize", info->narSize);
jsonPath["path"] = printStorePath(info->path);
jsonPath["narHash"] = info->narHash.to_string(hashBase, true);
jsonPath["narSize"] = info->narSize;
{
auto jsonRefs = jsonPath.list("references");
auto& jsonRefs = (jsonPath["references"] = json::array());
for (auto & ref : info->references)
jsonRefs.elem(printStorePath(ref));
jsonRefs.emplace_back(printStorePath(ref));
}
if (info->ca)
jsonPath.attr("ca", renderContentAddress(info->ca));
jsonPath["ca"] = renderContentAddress(info->ca);
std::pair<uint64_t, uint64_t> closureSizes;
if (showClosureSize) {
closureSizes = getClosureSize(info->path);
jsonPath.attr("closureSize", closureSizes.first);
jsonPath["closureSize"] = closureSizes.first;
}
if (includeImpureInfo) {
if (info->deriver)
jsonPath.attr("deriver", printStorePath(*info->deriver));
jsonPath["deriver"] = printStorePath(*info->deriver);
if (info->registrationTime)
jsonPath.attr("registrationTime", info->registrationTime);
jsonPath["registrationTime"] = info->registrationTime;
if (info->ultimate)
jsonPath.attr("ultimate", info->ultimate);
jsonPath["ultimate"] = info->ultimate;
if (!info->sigs.empty()) {
auto jsonSigs = jsonPath.list("signatures");
for (auto & sig : info->sigs)
jsonSigs.elem(sig);
jsonPath["signatures"].push_back(sig);
}
auto narInfo = std::dynamic_pointer_cast<const NarInfo>(
@ -895,21 +895,22 @@ void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & store
if (narInfo) {
if (!narInfo->url.empty())
jsonPath.attr("url", narInfo->url);
jsonPath["url"] = narInfo->url;
if (narInfo->fileHash)
jsonPath.attr("downloadHash", narInfo->fileHash->to_string(hashBase, true));
jsonPath["downloadHash"] = narInfo->fileHash->to_string(hashBase, true);
if (narInfo->fileSize)
jsonPath.attr("downloadSize", narInfo->fileSize);
jsonPath["downloadSize"] = narInfo->fileSize;
if (showClosureSize)
jsonPath.attr("closureDownloadSize", closureSizes.second);
jsonPath["closureDownloadSize"] = closureSizes.second;
}
}
} catch (InvalidPath &) {
jsonPath.attr("path", printStorePath(storePath));
jsonPath.attr("valid", false);
jsonPath["path"] = printStorePath(storePath);
jsonPath["valid"] = false;
}
}
return jsonList;
}

View file

@ -14,6 +14,7 @@
#include "path-info.hh"
#include "repair-flag.hh"
#include <nlohmann/json_fwd.hpp>
#include <atomic>
#include <limits>
#include <map>
@ -68,7 +69,6 @@ struct Derivation;
class FSAccessor;
class NarInfoDiskCache;
class Store;
class JSONPlaceholder;
enum CheckSigsFlag : bool { NoCheckSigs = false, CheckSigs = true };
@ -179,7 +179,7 @@ public:
/* Return true if path is in the Nix store (but not the Nix
store itself). */
bool isInStore(const Path & path) const;
bool isInStore(PathView path) const;
/* Return true if path is a store path, i.e. a direct child of
the Nix store. */
@ -187,7 +187,7 @@ public:
/* Split a path like /nix/store/<hash>-<name>/<bla> into
/nix/store/<hash>-<name> and /<bla>. */
std::pair<StorePath, Path> toStorePath(const Path & path) const;
std::pair<StorePath, Path> toStorePath(PathView path) const;
/* Follow symlinks until we end up with a path in the Nix store. */
Path followLinksToStore(std::string_view path) const;
@ -512,7 +512,7 @@ public:
variable elements such as the registration time are
included. If showClosureSize is true, the closure size of
each path is included. */
void pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & storePaths,
nlohmann::json pathInfoToJSON(const StorePathSet & storePaths,
bool includeImpureInfo, bool showClosureSize,
Base hashBase = Base32,
AllowInvalidFlag allowInvalid = DisallowInvalid);

View file

@ -35,10 +35,6 @@ static ArchiveSettings archiveSettings;
static GlobalConfig::Register rArchiveSettings(&archiveSettings);
const std::string narVersionMagic1 = "nix-archive-1";
static std::string caseHackSuffix = "~nix~case~hack~";
PathFilter defaultPathFilter = [](const Path &) { return true; };

View file

@ -103,7 +103,9 @@ void copyNAR(Source & source, Sink & sink);
void copyPath(const Path & from, const Path & to);
extern const std::string narVersionMagic1;
inline constexpr std::string_view narVersionMagic1 = "nix-archive-1";
inline constexpr std::string_view caseHackSuffix = "~nix~case~hack~";
}

103
src/libutil/canon-path.cc Normal file
View file

@ -0,0 +1,103 @@
#include "canon-path.hh"
#include "util.hh"
namespace nix {
CanonPath CanonPath::root = CanonPath("/");
CanonPath::CanonPath(std::string_view raw)
: path(absPath((Path) raw, "/"))
{ }
CanonPath::CanonPath(std::string_view raw, const CanonPath & root)
: path(absPath((Path) raw, root.abs()))
{ }
std::optional<CanonPath> CanonPath::parent() const
{
if (isRoot()) return std::nullopt;
return CanonPath(unchecked_t(), path.substr(0, std::max((size_t) 1, path.rfind('/'))));
}
void CanonPath::pop()
{
assert(!isRoot());
path.resize(std::max((size_t) 1, path.rfind('/')));
}
bool CanonPath::isWithin(const CanonPath & parent) const
{
return !(
path.size() < parent.path.size()
|| path.substr(0, parent.path.size()) != parent.path
|| (parent.path.size() > 1 && path.size() > parent.path.size()
&& path[parent.path.size()] != '/'));
}
CanonPath CanonPath::removePrefix(const CanonPath & prefix) const
{
assert(isWithin(prefix));
if (prefix.isRoot()) return *this;
if (path.size() == prefix.path.size()) return root;
return CanonPath(unchecked_t(), path.substr(prefix.path.size()));
}
void CanonPath::extend(const CanonPath & x)
{
if (x.isRoot()) return;
if (isRoot())
path += x.rel();
else
path += x.abs();
}
CanonPath CanonPath::operator + (const CanonPath & x) const
{
auto res = *this;
res.extend(x);
return res;
}
void CanonPath::push(std::string_view c)
{
assert(c.find('/') == c.npos);
assert(c != "." && c != "..");
if (!isRoot()) path += '/';
path += c;
}
CanonPath CanonPath::operator + (std::string_view c) const
{
auto res = *this;
res.push(c);
return res;
}
bool CanonPath::isAllowed(const std::set<CanonPath> & allowed) const
{
/* Check if `this` is an exact match or the parent of an
allowed path. */
auto lb = allowed.lower_bound(*this);
if (lb != allowed.end()) {
if (lb->isWithin(*this))
return true;
}
/* Check if a parent of `this` is allowed. */
auto path = *this;
while (!path.isRoot()) {
path.pop();
if (allowed.count(path))
return true;
}
return false;
}
std::ostream & operator << (std::ostream & stream, const CanonPath & path)
{
stream << path.abs();
return stream;
}
}

173
src/libutil/canon-path.hh Normal file
View file

@ -0,0 +1,173 @@
#pragma once
#include <string>
#include <optional>
#include <cassert>
#include <iostream>
#include <set>
namespace nix {
/* A canonical representation of a path. It ensures the following:
- It always starts with a slash.
- It never ends with a slash, except if the path is "/".
- A slash is never followed by a slash (i.e. no empty components).
- There are no components equal to '.' or '..'.
Note that the path does not need to correspond to an actually
existing path, and there is no guarantee that symlinks are
resolved.
*/
class CanonPath
{
std::string path;
public:
/* Construct a canon path from a non-canonical path. Any '.', '..'
or empty components are removed. */
CanonPath(std::string_view raw);
explicit CanonPath(const char * raw)
: CanonPath(std::string_view(raw))
{ }
struct unchecked_t { };
CanonPath(unchecked_t _, std::string path)
: path(std::move(path))
{ }
static CanonPath root;
/* If `raw` starts with a slash, return
`CanonPath(raw)`. Otherwise return a `CanonPath` representing
`root + "/" + raw`. */
CanonPath(std::string_view raw, const CanonPath & root);
bool isRoot() const
{ return path.size() <= 1; }
explicit operator std::string_view() const
{ return path; }
const std::string & abs() const
{ return path; }
/* Like abs(), but return an empty string if this path is
'/'. Thus the returned string never ends in a slash. */
const std::string & absOrEmpty() const
{
const static std::string epsilon;
return isRoot() ? epsilon : path;
}
const char * c_str() const
{ return path.c_str(); }
std::string_view rel() const
{ return ((std::string_view) path).substr(1); }
struct Iterator
{
std::string_view remaining;
size_t slash;
Iterator(std::string_view remaining)
: remaining(remaining)
, slash(remaining.find('/'))
{ }
bool operator != (const Iterator & x) const
{ return remaining.data() != x.remaining.data(); }
const std::string_view operator * () const
{ return remaining.substr(0, slash); }
void operator ++ ()
{
if (slash == remaining.npos)
remaining = remaining.substr(remaining.size());
else {
remaining = remaining.substr(slash + 1);
slash = remaining.find('/');
}
}
};
Iterator begin() const { return Iterator(rel()); }
Iterator end() const { return Iterator(rel().substr(path.size() - 1)); }
std::optional<CanonPath> parent() const;
/* Remove the last component. Panics if this path is the root. */
void pop();
std::optional<std::string_view> dirOf() const
{
if (isRoot()) return std::nullopt;
return ((std::string_view) path).substr(0, path.rfind('/'));
}
std::optional<std::string_view> baseName() const
{
if (isRoot()) return std::nullopt;
return ((std::string_view) path).substr(path.rfind('/') + 1);
}
bool operator == (const CanonPath & x) const
{ return path == x.path; }
bool operator != (const CanonPath & x) const
{ return path != x.path; }
/* Compare paths lexicographically except that path separators
are sorted before any other character. That is, in the sorted order
a directory is always followed directly by its children. For
instance, 'foo' < 'foo/bar' < 'foo!'. */
bool operator < (const CanonPath & x) const
{
auto i = path.begin();
auto j = x.path.begin();
for ( ; i != path.end() && j != x.path.end(); ++i, ++j) {
auto c_i = *i;
if (c_i == '/') c_i = 0;
auto c_j = *j;
if (c_j == '/') c_j = 0;
if (c_i < c_j) return true;
if (c_i > c_j) return false;
}
return i == path.end() && j != x.path.end();
}
/* Return true if `this` is equal to `parent` or a child of
`parent`. */
bool isWithin(const CanonPath & parent) const;
CanonPath removePrefix(const CanonPath & prefix) const;
/* Append another path to this one. */
void extend(const CanonPath & x);
/* Concatenate two paths. */
CanonPath operator + (const CanonPath & x) const;
/* Add a path component to this one. It must not contain any slashes. */
void push(std::string_view c);
CanonPath operator + (std::string_view c) const;
/* Check whether access to this path is allowed, which is the case
if 1) `this` is within any of the `allowed` paths; or 2) any of
the `allowed` paths are within `this`. (The latter condition
ensures access to the parents of allowed paths.) */
bool isAllowed(const std::set<CanonPath> & allowed) const;
};
std::ostream & operator << (std::ostream & stream, const CanonPath & path);
}

148
src/libutil/cgroup.cc Normal file
View file

@ -0,0 +1,148 @@
#if __linux__
#include "cgroup.hh"
#include "util.hh"
#include "finally.hh"
#include <chrono>
#include <cmath>
#include <regex>
#include <unordered_set>
#include <thread>
#include <dirent.h>
#include <mntent.h>
namespace nix {
std::optional<Path> getCgroupFS()
{
static auto res = [&]() -> std::optional<Path> {
auto fp = fopen("/proc/mounts", "r");
if (!fp) return std::nullopt;
Finally delFP = [&]() { fclose(fp); };
while (auto ent = getmntent(fp))
if (std::string_view(ent->mnt_type) == "cgroup2")
return ent->mnt_dir;
return std::nullopt;
}();
return res;
}
// FIXME: obsolete, check for cgroup2
std::map<std::string, std::string> getCgroups(const Path & cgroupFile)
{
std::map<std::string, std::string> cgroups;
for (auto & line : tokenizeString<std::vector<std::string>>(readFile(cgroupFile), "\n")) {
static std::regex regex("([0-9]+):([^:]*):(.*)");
std::smatch match;
if (!std::regex_match(line, match, regex))
throw Error("invalid line '%s' in '%s'", line, cgroupFile);
std::string name = hasPrefix(std::string(match[2]), "name=") ? std::string(match[2], 5) : match[2];
cgroups.insert_or_assign(name, match[3]);
}
return cgroups;
}
static CgroupStats destroyCgroup(const Path & cgroup, bool returnStats)
{
if (!pathExists(cgroup)) return {};
auto procsFile = cgroup + "/cgroup.procs";
if (!pathExists(procsFile))
throw Error("'%s' is not a cgroup", cgroup);
/* Use the fast way to kill every process in a cgroup, if
available. */
auto killFile = cgroup + "/cgroup.kill";
if (pathExists(killFile))
writeFile(killFile, "1");
/* Otherwise, manually kill every process in the subcgroups and
this cgroup. */
for (auto & entry : readDirectory(cgroup)) {
if (entry.type != DT_DIR) continue;
destroyCgroup(cgroup + "/" + entry.name, false);
}
int round = 1;
std::unordered_set<pid_t> pidsShown;
while (true) {
auto pids = tokenizeString<std::vector<std::string>>(readFile(procsFile));
if (pids.empty()) break;
if (round > 20)
throw Error("cannot kill cgroup '%s'", cgroup);
for (auto & pid_s : pids) {
pid_t pid;
if (auto o = string2Int<pid_t>(pid_s))
pid = *o;
else
throw Error("invalid pid '%s'", pid);
if (pidsShown.insert(pid).second) {
try {
auto cmdline = readFile(fmt("/proc/%d/cmdline", pid));
using namespace std::string_literals;
warn("killing stray builder process %d (%s)...",
pid, trim(replaceStrings(cmdline, "\0"s, " ")));
} catch (SysError &) {
}
}
// FIXME: pid wraparound
if (kill(pid, SIGKILL) == -1 && errno != ESRCH)
throw SysError("killing member %d of cgroup '%s'", pid, cgroup);
}
auto sleep = std::chrono::milliseconds((int) std::pow(2.0, std::min(round, 10)));
if (sleep.count() > 100)
printError("waiting for %d ms for cgroup '%s' to become empty", sleep.count(), cgroup);
std::this_thread::sleep_for(sleep);
round++;
}
CgroupStats stats;
if (returnStats) {
auto cpustatPath = cgroup + "/cpu.stat";
if (pathExists(cpustatPath)) {
for (auto & line : tokenizeString<std::vector<std::string>>(readFile(cpustatPath), "\n")) {
std::string_view userPrefix = "user_usec ";
if (hasPrefix(line, userPrefix)) {
auto n = string2Int<uint64_t>(line.substr(userPrefix.size()));
if (n) stats.cpuUser = std::chrono::microseconds(*n);
}
std::string_view systemPrefix = "system_usec ";
if (hasPrefix(line, systemPrefix)) {
auto n = string2Int<uint64_t>(line.substr(systemPrefix.size()));
if (n) stats.cpuSystem = std::chrono::microseconds(*n);
}
}
}
}
if (rmdir(cgroup.c_str()) == -1)
throw SysError("deleting cgroup '%s'", cgroup);
return stats;
}
CgroupStats destroyCgroup(const Path & cgroup)
{
return destroyCgroup(cgroup, true);
}
}
#endif

29
src/libutil/cgroup.hh Normal file
View file

@ -0,0 +1,29 @@
#pragma once
#if __linux__
#include <chrono>
#include <optional>
#include "types.hh"
namespace nix {
std::optional<Path> getCgroupFS();
std::map<std::string, std::string> getCgroups(const Path & cgroupFile);
struct CgroupStats
{
std::optional<std::chrono::microseconds> cpuUser, cpuSystem;
};
/* Destroy the cgroup denoted by 'path'. The postcondition is that
'path' does not exist, and thus any processes in the cgroup have
been killed. Also return statistics from the cgroup just before
destruction. */
CgroupStats destroyCgroup(const Path & cgroup);
}
#endif

View file

@ -9,9 +9,9 @@ namespace nix {
const std::string nativeSystem = SYSTEM;
void BaseError::addTrace(std::optional<ErrPos> e, hintformat hint, bool frame)
void BaseError::addTrace(std::shared_ptr<AbstractPos> && e, hintformat hint, bool frame)
{
err.traces.push_front(Trace { .pos = e, .hint = hint, .frame = frame });
err.traces.push_front(Trace { .pos = std::move(e), .hint = hint, .frame = frame });
}
// c++ std::exception descendants must have a 'const char* what()' function.
@ -30,91 +30,46 @@ const std::string & BaseError::calcWhat() const
std::optional<std::string> ErrorInfo::programName = std::nullopt;
std::ostream & operator<<(std::ostream & os, const hintformat & hf)
std::ostream & operator <<(std::ostream & os, const hintformat & hf)
{
return os << hf.str();
}
std::string showErrPos(const ErrPos & errPos)
std::ostream & operator <<(std::ostream & str, const AbstractPos & pos)
{
if (errPos.line > 0) {
if (errPos.column > 0) {
return fmt("%d:%d", errPos.line, errPos.column);
} else {
return fmt("%d", errPos.line);
}
}
else {
return "";
}
pos.print(str);
str << ":" << pos.line;
if (pos.column > 0)
str << ":" << pos.column;
return str;
}
std::optional<LinesOfCode> getCodeLines(const ErrPos & errPos)
std::optional<LinesOfCode> AbstractPos::getCodeLines() const
{
if (errPos.line <= 0)
if (line == 0)
return std::nullopt;
if (errPos.origin == foFile) {
LinesOfCode loc;
try {
// FIXME: when running as the daemon, make sure we don't
// open a file to which the client doesn't have access.
AutoCloseFD fd = open(errPos.file.c_str(), O_RDONLY | O_CLOEXEC);
if (!fd) return {};
if (auto source = getSource()) {
// count the newlines.
int count = 0;
std::string line;
int pl = errPos.line - 1;
do
{
line = readLine(fd.get());
++count;
if (count < pl)
;
else if (count == pl)
loc.prevLineOfCode = line;
else if (count == pl + 1)
loc.errLineOfCode = line;
else if (count == pl + 2) {
loc.nextLineOfCode = line;
break;
}
} while (true);
return loc;
}
catch (EndOfFile & eof) {
if (loc.errLineOfCode.has_value())
return loc;
else
return std::nullopt;
}
catch (std::exception & e) {
return std::nullopt;
}
} else {
std::istringstream iss(errPos.file);
std::istringstream iss(*source);
// count the newlines.
int count = 0;
std::string line;
int pl = errPos.line - 1;
std::string curLine;
int pl = line - 1;
LinesOfCode loc;
do
{
std::getline(iss, line);
do {
std::getline(iss, curLine);
++count;
if (count < pl)
{
;
}
else if (count == pl) {
loc.prevLineOfCode = line;
loc.prevLineOfCode = curLine;
} else if (count == pl + 1) {
loc.errLineOfCode = line;
loc.errLineOfCode = curLine;
} else if (count == pl + 2) {
loc.nextLineOfCode = line;
loc.nextLineOfCode = curLine;
break;
}
@ -124,12 +79,14 @@ std::optional<LinesOfCode> getCodeLines(const ErrPos & errPos)
return loc;
}
return std::nullopt;
}
// print lines of code to the ostream, indicating the error column.
void printCodeLines(std::ostream & out,
const std::string & prefix,
const ErrPos & errPos,
const AbstractPos & errPos,
const LinesOfCode & loc)
{
// previous line of code.
@ -176,32 +133,6 @@ void printCodeLines(std::ostream & out,
}
}
// Enough indent to align with with the `... `
// prepended to each element of the trace
#define ELLIPSIS_INDENT " "
void printAtPos(const ErrPos & pos, std::ostream & out)
{
if (pos) {
switch (pos.origin) {
case foFile: {
out << fmt(ELLIPSIS_INDENT "at " ANSI_WARNING "%s:%s" ANSI_NORMAL ":", pos.file, showErrPos(pos));
break;
}
case foString: {
out << fmt(ELLIPSIS_INDENT "at " ANSI_WARNING "«string»:%s" ANSI_NORMAL ":", showErrPos(pos));
break;
}
case foStdin: {
out << fmt(ELLIPSIS_INDENT "at " ANSI_WARNING "«stdin»:%s" ANSI_NORMAL ":", showErrPos(pos));
break;
}
default:
throw Error("invalid FileOrigin in errPos");
}
}
}
static std::string indent(std::string_view indentFirst, std::string_view indentRest, std::string_view s)
{
std::string res;
@ -266,27 +197,8 @@ std::ostream & showErrorInfo(std::ostream & out, const ErrorInfo & einfo, bool s
prefix += ":" ANSI_NORMAL " ";
std::ostringstream oss;
oss << einfo.msg << "\n";
if (einfo.errPos.has_value() && *einfo.errPos) {
printAtPos(*einfo.errPos, oss);
auto loc = getCodeLines(*einfo.errPos);
// lines of code.
if (loc.has_value()) {
oss << "\n";
printCodeLines(oss, "", *einfo.errPos, *loc);
}
oss << "\n";
}
auto suggestions = einfo.suggestions.trim();
if (! suggestions.suggestions.empty()){
oss << "Did you mean " <<
suggestions.trim() <<
"?" << std::endl;
}
auto noSource = ANSI_ITALIC " (source not available)" ANSI_NORMAL "\n";
/*
* Traces
@ -382,36 +294,61 @@ std::ostream & showErrorInfo(std::ostream & out, const ErrorInfo & einfo, bool s
*
*/
// Enough indent to align with with the `... `
// prepended to each element of the trace
auto ellipsisIndent = " ";
bool frameOnly = false;
if (!einfo.traces.empty()) {
unsigned int count = 0;
for (auto iter = einfo.traces.rbegin(); iter != einfo.traces.rend(); ++iter) {
size_t count = 0;
for (const auto & trace : einfo.traces) {
if (!showTrace && count > 3) {
oss << "\n" << "(truncated)" << "\n";
oss << "\n" << ANSI_ITALIC "(stack trace truncated)" ANSI_NORMAL << "\n";
break;
}
if (iter->hint.str().empty()) continue;
if (frameOnly && !iter->frame) continue;
if (trace.hint.str().empty()) continue;
if (frameOnly && !trace.frame) continue;
count++;
frameOnly = iter->frame;
frameOnly = trace.frame;
oss << "\n" << "" << iter->hint.str() << "\n";
oss << "\n" << "" << trace.hint.str() << "\n";
if (iter->pos.has_value() && (*iter->pos)) {
if (trace.pos) {
count++;
auto pos = iter->pos.value();
printAtPos(pos, oss);
auto loc = getCodeLines(pos);
if (loc.has_value()) {
oss << "\n" << ellipsisIndent << ANSI_BLUE << "at " ANSI_WARNING << *trace.pos << ANSI_NORMAL << ":";
if (auto loc = trace.pos->getCodeLines()) {
oss << "\n";
printCodeLines(oss, "", pos, *loc);
}
oss << "\n";
printCodeLines(oss, "", *trace.pos, *loc);
oss << "\n";
} else
oss << noSource;
}
}
oss << "\n" << prefix;
}
oss << einfo.msg << "\n";
if (einfo.errPos) {
oss << "\n" << ANSI_BLUE << "at " ANSI_WARNING << *einfo.errPos << ANSI_NORMAL << ":";
if (auto loc = einfo.errPos->getCodeLines()) {
oss << "\n";
printCodeLines(oss, "", *einfo.errPos, *loc);
oss << "\n";
} else
oss << noSource;
}
auto suggestions = einfo.suggestions.trim();
if (!suggestions.suggestions.empty()) {
oss << "Did you mean " <<
suggestions.trim() <<
"?" << std::endl;
}
out << indent(prefix, std::string(filterANSIEscapes(prefix, true).size(), ' '), chomp(oss.str()));

View file

@ -54,13 +54,6 @@ typedef enum {
lvlVomit
} Verbosity;
/* adjust Pos::origin bit width when adding stuff here */
typedef enum {
foFile,
foStdin,
foString
} FileOrigin;
// the lines of code surrounding an error.
struct LinesOfCode {
std::optional<std::string> prevLineOfCode;
@ -68,47 +61,30 @@ struct LinesOfCode {
std::optional<std::string> nextLineOfCode;
};
// ErrPos indicates the location of an error in a nix file.
struct ErrPos {
int line = 0;
int column = 0;
std::string file;
FileOrigin origin;
/* An abstract type that represents a location in a source file. */
struct AbstractPos
{
uint32_t line = 0;
uint32_t column = 0;
operator bool() const
{
return line != 0;
}
/* Return the contents of the source file. */
virtual std::optional<std::string> getSource() const
{ return std::nullopt; };
// convert from the Pos struct, found in libexpr.
template <class P>
ErrPos & operator=(const P & pos)
{
origin = pos.origin;
line = pos.line;
column = pos.column;
file = pos.file;
return *this;
}
virtual void print(std::ostream & out) const = 0;
template <class P>
ErrPos(const P & p)
{
*this = p;
}
std::optional<LinesOfCode> getCodeLines() const;
};
std::optional<LinesOfCode> getCodeLines(const ErrPos & errPos);
std::ostream & operator << (std::ostream & str, const AbstractPos & pos);
void printCodeLines(std::ostream & out,
const std::string & prefix,
const ErrPos & errPos,
const AbstractPos & errPos,
const LinesOfCode & loc);
void printAtPos(const ErrPos & pos, std::ostream & out);
struct Trace {
std::optional<ErrPos> pos;
std::shared_ptr<AbstractPos> pos;
hintformat hint;
bool frame;
};
@ -116,7 +92,7 @@ struct Trace {
struct ErrorInfo {
Verbosity level;
hintformat msg;
std::optional<ErrPos> errPos;
std::shared_ptr<AbstractPos> errPos;
std::list<Trace> traces;
Suggestions suggestions;
@ -179,17 +155,18 @@ public:
const std::string & msg() const { return calcWhat(); }
const ErrorInfo & info() const { calcWhat(); return err; }
void pushTrace(Trace trace) {
void pushTrace(Trace trace)
{
err.traces.push_front(trace);
}
template<typename... Args>
void addTrace(std::optional<ErrPos> e, std::string_view fs, const Args & ... args)
void addTrace(std::shared_ptr<AbstractPos> && e, std::string_view fs, const Args & ... args)
{
addTrace(e, hintfmt(std::string(fs), args...));
addTrace(std::move(e), hintfmt(std::string(fs), args...));
}
void addTrace(std::optional<ErrPos> e, hintformat hint, bool frame = false);
void addTrace(std::shared_ptr<AbstractPos> && e, hintformat hint, bool frame = false);
bool hasTrace() const { return !err.traces.empty(); }

View file

@ -14,6 +14,8 @@ std::map<ExperimentalFeature, std::string> stringifiedXpFeatures = {
{ Xp::NoUrlLiterals, "no-url-literals" },
{ Xp::FetchClosure, "fetch-closure" },
{ Xp::ReplFlake, "repl-flake" },
{ Xp::AutoAllocateUids, "auto-allocate-uids" },
{ Xp::Cgroups, "cgroups" },
};
const std::optional<ExperimentalFeature> parseExperimentalFeature(const std::string_view & name)

View file

@ -23,6 +23,8 @@ enum struct ExperimentalFeature
NoUrlLiterals,
FetchClosure,
ReplFlake,
AutoAllocateUids,
Cgroups,
};
/**

View file

@ -1,5 +1,6 @@
#include <sys/time.h>
#include <filesystem>
#include <atomic>
#include "finally.hh"
#include "util.hh"
@ -10,7 +11,7 @@ namespace fs = std::filesystem;
namespace nix {
static Path tempName(Path tmpRoot, const Path & prefix, bool includePid,
int & counter)
std::atomic<unsigned int> & counter)
{
tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR").value_or("/tmp") : tmpRoot, true);
if (includePid)
@ -22,9 +23,9 @@ static Path tempName(Path tmpRoot, const Path & prefix, bool includePid,
Path createTempDir(const Path & tmpRoot, const Path & prefix,
bool includePid, bool useGlobalCounter, mode_t mode)
{
static int globalCounter = 0;
int localCounter = 0;
int & counter(useGlobalCounter ? globalCounter : localCounter);
static std::atomic<unsigned int> globalCounter = 0;
std::atomic<unsigned int> localCounter = 0;
auto & counter(useGlobalCounter ? globalCounter : localCounter);
while (1) {
checkInterrupt();

View file

@ -148,7 +148,7 @@ inline hintformat hintfmt(const std::string & fs, const Args & ... args)
return f;
}
inline hintformat hintfmt(std::string plain_string)
inline hintformat hintfmt(const std::string & plain_string)
{
// we won't be receiving any args in this case, so just print the original string
return hintfmt("%s", normaltxt(plain_string));

View file

@ -1,203 +0,0 @@
#include "json.hh"
#include <iomanip>
#include <cstdint>
#include <cstring>
namespace nix {
template<>
void toJSON<std::string_view>(std::ostream & str, const std::string_view & s)
{
constexpr size_t BUF_SIZE = 4096;
char buf[BUF_SIZE + 7]; // BUF_SIZE + largest single sequence of puts
size_t bufPos = 0;
const auto flush = [&] {
str.write(buf, bufPos);
bufPos = 0;
};
const auto put = [&] (char c) {
buf[bufPos++] = c;
};
put('"');
for (auto i = s.begin(); i != s.end(); i++) {
if (bufPos >= BUF_SIZE) flush();
if (*i == '\"' || *i == '\\') { put('\\'); put(*i); }
else if (*i == '\n') { put('\\'); put('n'); }
else if (*i == '\r') { put('\\'); put('r'); }
else if (*i == '\t') { put('\\'); put('t'); }
else if (*i >= 0 && *i < 32) {
const char hex[17] = "0123456789abcdef";
put('\\');
put('u');
put(hex[(uint16_t(*i) >> 12) & 0xf]);
put(hex[(uint16_t(*i) >> 8) & 0xf]);
put(hex[(uint16_t(*i) >> 4) & 0xf]);
put(hex[(uint16_t(*i) >> 0) & 0xf]);
}
else put(*i);
}
put('"');
flush();
}
void toJSON(std::ostream & str, const char * s)
{
if (!s) str << "null"; else toJSON(str, std::string_view(s));
}
template<> void toJSON<int>(std::ostream & str, const int & n) { str << n; }
template<> void toJSON<unsigned int>(std::ostream & str, const unsigned int & n) { str << n; }
template<> void toJSON<long>(std::ostream & str, const long & n) { str << n; }
template<> void toJSON<unsigned long>(std::ostream & str, const unsigned long & n) { str << n; }
template<> void toJSON<long long>(std::ostream & str, const long long & n) { str << n; }
template<> void toJSON<unsigned long long>(std::ostream & str, const unsigned long long & n) { str << n; }
template<> void toJSON<float>(std::ostream & str, const float & n) { str << n; }
template<> void toJSON<double>(std::ostream & str, const double & n) { str << n; }
template<> void toJSON<std::string>(std::ostream & str, const std::string & s) { toJSON(str, (std::string_view) s); }
template<> void toJSON<bool>(std::ostream & str, const bool & b)
{
str << (b ? "true" : "false");
}
template<> void toJSON<std::nullptr_t>(std::ostream & str, const std::nullptr_t & b)
{
str << "null";
}
JSONWriter::JSONWriter(std::ostream & str, bool indent)
: state(new JSONState(str, indent))
{
state->stack++;
}
JSONWriter::JSONWriter(JSONState * state)
: state(state)
{
state->stack++;
}
JSONWriter::~JSONWriter()
{
if (state) {
assertActive();
state->stack--;
if (state->stack == 0) delete state;
}
}
void JSONWriter::comma()
{
assertActive();
if (first) {
first = false;
} else {
state->str << ',';
}
if (state->indent) indent();
}
void JSONWriter::indent()
{
state->str << '\n' << std::string(state->depth * 2, ' ');
}
void JSONList::open()
{
state->depth++;
state->str << '[';
}
JSONList::~JSONList()
{
state->depth--;
if (state->indent && !first) indent();
state->str << "]";
}
JSONList JSONList::list()
{
comma();
return JSONList(state);
}
JSONObject JSONList::object()
{
comma();
return JSONObject(state);
}
JSONPlaceholder JSONList::placeholder()
{
comma();
return JSONPlaceholder(state);
}
void JSONObject::open()
{
state->depth++;
state->str << '{';
}
JSONObject::~JSONObject()
{
if (state) {
state->depth--;
if (state->indent && !first) indent();
state->str << "}";
}
}
void JSONObject::attr(std::string_view s)
{
comma();
toJSON(state->str, s);
state->str << ':';
if (state->indent) state->str << ' ';
}
JSONList JSONObject::list(std::string_view name)
{
attr(name);
return JSONList(state);
}
JSONObject JSONObject::object(std::string_view name)
{
attr(name);
return JSONObject(state);
}
JSONPlaceholder JSONObject::placeholder(std::string_view name)
{
attr(name);
return JSONPlaceholder(state);
}
JSONList JSONPlaceholder::list()
{
assertValid();
first = false;
return JSONList(state);
}
JSONObject JSONPlaceholder::object()
{
assertValid();
first = false;
return JSONObject(state);
}
JSONPlaceholder::~JSONPlaceholder()
{
if (first) {
assert(std::uncaught_exceptions());
if (state->stack != 0)
write(nullptr);
}
}
}

View file

@ -1,185 +0,0 @@
#pragma once
#include <iostream>
#include <vector>
#include <cassert>
namespace nix {
void toJSON(std::ostream & str, const char * s);
template<typename T>
void toJSON(std::ostream & str, const T & n);
class JSONWriter
{
protected:
struct JSONState
{
std::ostream & str;
bool indent;
size_t depth = 0;
size_t stack = 0;
JSONState(std::ostream & str, bool indent) : str(str), indent(indent) { }
~JSONState()
{
assert(stack == 0);
}
};
JSONState * state;
bool first = true;
JSONWriter(std::ostream & str, bool indent);
JSONWriter(JSONState * state);
~JSONWriter();
void assertActive()
{
assert(state->stack != 0);
}
void comma();
void indent();
};
class JSONObject;
class JSONPlaceholder;
class JSONList : JSONWriter
{
private:
friend class JSONObject;
friend class JSONPlaceholder;
void open();
JSONList(JSONState * state)
: JSONWriter(state)
{
open();
}
public:
JSONList(std::ostream & str, bool indent = false)
: JSONWriter(str, indent)
{
open();
}
~JSONList();
template<typename T>
JSONList & elem(const T & v)
{
comma();
toJSON(state->str, v);
return *this;
}
JSONList list();
JSONObject object();
JSONPlaceholder placeholder();
};
class JSONObject : JSONWriter
{
private:
friend class JSONList;
friend class JSONPlaceholder;
void open();
JSONObject(JSONState * state)
: JSONWriter(state)
{
open();
}
void attr(std::string_view s);
public:
JSONObject(std::ostream & str, bool indent = false)
: JSONWriter(str, indent)
{
open();
}
JSONObject(const JSONObject & obj) = delete;
JSONObject(JSONObject && obj)
: JSONWriter(obj.state)
{
obj.state = 0;
}
~JSONObject();
template<typename T>
JSONObject & attr(std::string_view name, const T & v)
{
attr(name);
toJSON(state->str, v);
return *this;
}
JSONList list(std::string_view name);
JSONObject object(std::string_view name);
JSONPlaceholder placeholder(std::string_view name);
};
class JSONPlaceholder : JSONWriter
{
private:
friend class JSONList;
friend class JSONObject;
JSONPlaceholder(JSONState * state)
: JSONWriter(state)
{
}
void assertValid()
{
assertActive();
assert(first);
}
public:
JSONPlaceholder(std::ostream & str, bool indent = false)
: JSONWriter(str, indent)
{
}
~JSONPlaceholder();
template<typename T>
void write(const T & v)
{
assertValid();
first = false;
toJSON(state->str, v);
}
JSONList list();
JSONObject object();
};
}

View file

@ -105,14 +105,6 @@ public:
Verbosity verbosity = lvlInfo;
void warnOnce(bool & haveWarned, const FormatOrString & fs)
{
if (!haveWarned) {
warn(fs.s);
haveWarned = true;
}
}
void writeToStderr(std::string_view s)
{
try {
@ -130,15 +122,30 @@ Logger * makeSimpleLogger(bool printBuildLogs)
return new SimpleLogger(printBuildLogs);
}
std::atomic<uint64_t> nextId{(uint64_t) getpid() << 32};
std::atomic<uint64_t> nextId{0};
Activity::Activity(Logger & logger, Verbosity lvl, ActivityType type,
const std::string & s, const Logger::Fields & fields, ActivityId parent)
: logger(logger), id(nextId++)
: logger(logger), id(nextId++ + (((uint64_t) getpid()) << 32))
{
logger.startActivity(id, lvl, type, s, fields, parent);
}
void to_json(nlohmann::json & json, std::shared_ptr<AbstractPos> pos)
{
if (pos) {
json["line"] = pos->line;
json["column"] = pos->column;
std::ostringstream str;
pos->print(str);
json["file"] = str.str();
} else {
json["line"] = nullptr;
json["column"] = nullptr;
json["file"] = nullptr;
}
}
struct JSONLogger : Logger {
Logger & prevLogger;
@ -185,27 +192,14 @@ struct JSONLogger : Logger {
json["level"] = ei.level;
json["msg"] = oss.str();
json["raw_msg"] = ei.msg.str();
if (ei.errPos.has_value() && (*ei.errPos)) {
json["line"] = ei.errPos->line;
json["column"] = ei.errPos->column;
json["file"] = ei.errPos->file;
} else {
json["line"] = nullptr;
json["column"] = nullptr;
json["file"] = nullptr;
}
to_json(json, ei.errPos);
if (loggerSettings.showTrace.get() && !ei.traces.empty()) {
nlohmann::json traces = nlohmann::json::array();
for (auto iter = ei.traces.rbegin(); iter != ei.traces.rend(); ++iter) {
nlohmann::json stackFrame;
stackFrame["raw_msg"] = iter->hint.str();
if (iter->pos.has_value() && (*iter->pos)) {
stackFrame["line"] = iter->pos->line;
stackFrame["column"] = iter->pos->column;
stackFrame["file"] = iter->pos->file;
}
to_json(stackFrame, iter->pos);
traces.push_back(stackFrame);
}

View file

@ -82,7 +82,7 @@ public:
log(lvlInfo, fs);
}
virtual void logEI(const ErrorInfo &ei) = 0;
virtual void logEI(const ErrorInfo & ei) = 0;
void logEI(Verbosity lvl, ErrorInfo ei)
{
@ -225,7 +225,11 @@ inline void warn(const std::string & fs, const Args & ... args)
logger->warn(f.str());
}
void warnOnce(bool & haveWarned, const FormatOrString & fs);
#define warnOnce(haveWarned, args...) \
if (!haveWarned) { \
haveWarned = true; \
warn(args); \
}
void writeToStderr(std::string_view s);

View file

@ -83,6 +83,11 @@ public:
return p != other.p;
}
bool operator < (const ref<T> & other) const
{
return p < other.p;
}
private:
template<typename T2, typename... Args>

Some files were not shown because too many files have changed in this diff Show more