mirror of
https://github.com/NixOS/nix
synced 2025-07-07 01:51:47 +02:00
Merge remote-tracking branch 'upstream/master' into fix-and-ci-static-builds
This commit is contained in:
commit
25f7ff16fa
341 changed files with 13513 additions and 18444 deletions
|
@ -38,9 +38,9 @@ static AutoCloseFD openSlotLock(const Machine & m, uint64_t slot)
|
|||
return openLockFile(fmt("%s/%s-%d", currentLoad, escapeUri(m.storeUri), slot), true);
|
||||
}
|
||||
|
||||
static bool allSupportedLocally(const std::set<std::string>& requiredFeatures) {
|
||||
static bool allSupportedLocally(Store & store, const std::set<std::string>& requiredFeatures) {
|
||||
for (auto & feature : requiredFeatures)
|
||||
if (!settings.systemFeatures.get().count(feature)) return false;
|
||||
if (!store.systemFeatures.get().count(feature)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,7 @@ static int _main(int argc, char * * argv)
|
|||
auto canBuildLocally = amWilling
|
||||
&& ( neededSystem == settings.thisSystem
|
||||
|| settings.extraPlatforms.get().count(neededSystem) > 0)
|
||||
&& allSupportedLocally(requiredFeatures);
|
||||
&& allSupportedLocally(*store, requiredFeatures);
|
||||
|
||||
/* Error ignored here, will be caught later */
|
||||
mkdir(currentLoad.c_str(), 0777);
|
||||
|
@ -201,7 +201,7 @@ static int _main(int argc, char * * argv)
|
|||
% concatStringsSep<StringSet>(", ", m.mandatoryFeatures);
|
||||
}
|
||||
|
||||
logError({
|
||||
logErrorInfo(lvlInfo, {
|
||||
.name = "Remote build",
|
||||
.description = "Failed to find a machine for remote build!",
|
||||
.hint = hint
|
||||
|
@ -224,15 +224,7 @@ static int _main(int argc, char * * argv)
|
|||
|
||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("connecting to '%s'", bestMachine->storeUri));
|
||||
|
||||
Store::Params storeParams;
|
||||
if (hasPrefix(bestMachine->storeUri, "ssh://")) {
|
||||
storeParams["max-connections"] = "1";
|
||||
storeParams["log-fd"] = "4";
|
||||
if (bestMachine->sshKey != "")
|
||||
storeParams["ssh-key"] = bestMachine->sshKey;
|
||||
}
|
||||
|
||||
sshStore = openStore(bestMachine->storeUri, storeParams);
|
||||
sshStore = bestMachine->openStore();
|
||||
sshStore->connect();
|
||||
storeUri = bestMachine->storeUri;
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ MixEvalArgs::MixEvalArgs()
|
|||
addFlag({
|
||||
.longName = "include",
|
||||
.shortName = 'I',
|
||||
.description = "add a path to the list of locations used to look up <...> file names",
|
||||
.description = "add a path to the list of locations used to look up `<...>` file names",
|
||||
.labels = {"path"},
|
||||
.handler = {[&](std::string s) { searchPath.push_back(s); }}
|
||||
});
|
||||
|
|
|
@ -391,7 +391,8 @@ Value & AttrCursor::forceValue()
|
|||
|
||||
if (root->db && (!cachedValue || std::get_if<placeholder_t>(&cachedValue->second))) {
|
||||
if (v.type == tString)
|
||||
cachedValue = {root->db->setString(getKey(), v.string.s, v.string.context), v.string.s};
|
||||
cachedValue = {root->db->setString(getKey(), v.string.s, v.string.context),
|
||||
string_t{v.string.s, {}}};
|
||||
else if (v.type == tPath)
|
||||
cachedValue = {root->db->setString(getKey(), v.path), v.path};
|
||||
else if (v.type == tBool)
|
||||
|
@ -405,7 +406,7 @@ Value & AttrCursor::forceValue()
|
|||
return v;
|
||||
}
|
||||
|
||||
std::shared_ptr<AttrCursor> AttrCursor::maybeGetAttr(Symbol name)
|
||||
std::shared_ptr<AttrCursor> AttrCursor::maybeGetAttr(Symbol name, bool forceErrors)
|
||||
{
|
||||
if (root->db) {
|
||||
if (!cachedValue)
|
||||
|
@ -422,9 +423,12 @@ std::shared_ptr<AttrCursor> AttrCursor::maybeGetAttr(Symbol name)
|
|||
if (attr) {
|
||||
if (std::get_if<missing_t>(&attr->second))
|
||||
return nullptr;
|
||||
else if (std::get_if<failed_t>(&attr->second))
|
||||
throw EvalError("cached failure of attribute '%s'", getAttrPathStr(name));
|
||||
else
|
||||
else if (std::get_if<failed_t>(&attr->second)) {
|
||||
if (forceErrors)
|
||||
debug("reevaluating failed cached attribute '%s'");
|
||||
else
|
||||
throw CachedEvalError("cached failure of attribute '%s'", getAttrPathStr(name));
|
||||
} else
|
||||
return std::make_shared<AttrCursor>(root,
|
||||
std::make_pair(shared_from_this(), name), nullptr, std::move(attr));
|
||||
}
|
||||
|
@ -469,9 +473,9 @@ std::shared_ptr<AttrCursor> AttrCursor::maybeGetAttr(std::string_view name)
|
|||
return maybeGetAttr(root->state.symbols.create(name));
|
||||
}
|
||||
|
||||
std::shared_ptr<AttrCursor> AttrCursor::getAttr(Symbol name)
|
||||
std::shared_ptr<AttrCursor> AttrCursor::getAttr(Symbol name, bool forceErrors)
|
||||
{
|
||||
auto p = maybeGetAttr(name);
|
||||
auto p = maybeGetAttr(name, forceErrors);
|
||||
if (!p)
|
||||
throw Error("attribute '%s' does not exist", getAttrPathStr(name));
|
||||
return p;
|
||||
|
@ -600,7 +604,7 @@ bool AttrCursor::isDerivation()
|
|||
|
||||
StorePath AttrCursor::forceDerivation()
|
||||
{
|
||||
auto aDrvPath = getAttr(root->state.sDrvPath);
|
||||
auto aDrvPath = getAttr(root->state.sDrvPath, true);
|
||||
auto drvPath = root->state.store->parseStorePath(aDrvPath->getString());
|
||||
if (!root->state.store->isValidPath(drvPath) && !settings.readOnlyMode) {
|
||||
/* The eval cache contains 'drvPath', but the actual path has
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
|
||||
namespace nix::eval_cache {
|
||||
|
||||
MakeError(CachedEvalError, EvalError);
|
||||
|
||||
class AttrDb;
|
||||
class AttrCursor;
|
||||
|
||||
|
@ -92,11 +94,11 @@ public:
|
|||
|
||||
std::string getAttrPathStr(Symbol name) const;
|
||||
|
||||
std::shared_ptr<AttrCursor> maybeGetAttr(Symbol name);
|
||||
std::shared_ptr<AttrCursor> maybeGetAttr(Symbol name, bool forceErrors = false);
|
||||
|
||||
std::shared_ptr<AttrCursor> maybeGetAttr(std::string_view name);
|
||||
|
||||
std::shared_ptr<AttrCursor> getAttr(Symbol name);
|
||||
std::shared_ptr<AttrCursor> getAttr(Symbol name, bool forceErrors = false);
|
||||
|
||||
std::shared_ptr<AttrCursor> getAttr(std::string_view name);
|
||||
|
||||
|
|
|
@ -381,10 +381,14 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
|
|||
auto path = r.second;
|
||||
|
||||
if (store->isInStore(r.second)) {
|
||||
StorePathSet closure;
|
||||
store->computeFSClosure(store->toStorePath(r.second).first, closure);
|
||||
for (auto & path : closure)
|
||||
allowedPaths->insert(store->printStorePath(path));
|
||||
try {
|
||||
StorePathSet closure;
|
||||
store->computeFSClosure(store->toStorePath(r.second).first, closure);
|
||||
for (auto & path : closure)
|
||||
allowedPaths->insert(store->printStorePath(path));
|
||||
} catch (InvalidPath &) {
|
||||
allowedPaths->insert(r.second);
|
||||
}
|
||||
} else
|
||||
allowedPaths->insert(r.second);
|
||||
}
|
||||
|
@ -509,7 +513,7 @@ Value * EvalState::addPrimOp(const string & name,
|
|||
if (arity == 0) {
|
||||
auto vPrimOp = allocValue();
|
||||
vPrimOp->type = tPrimOp;
|
||||
vPrimOp->primOp = new PrimOp(primOp, 1, sym);
|
||||
vPrimOp->primOp = new PrimOp { .fun = primOp, .arity = 1, .name = sym };
|
||||
Value v;
|
||||
mkApp(v, *vPrimOp, *vPrimOp);
|
||||
return addConstant(name, v);
|
||||
|
@ -517,7 +521,7 @@ Value * EvalState::addPrimOp(const string & name,
|
|||
|
||||
Value * v = allocValue();
|
||||
v->type = tPrimOp;
|
||||
v->primOp = new PrimOp(primOp, arity, sym);
|
||||
v->primOp = new PrimOp { .fun = primOp, .arity = arity, .name = sym };
|
||||
staticBaseEnv.vars[symbols.create(name)] = baseEnvDispl;
|
||||
baseEnv.values[baseEnvDispl++] = v;
|
||||
baseEnv.values[0]->attrs->push_back(Attr(sym, v));
|
||||
|
@ -525,12 +529,59 @@ Value * EvalState::addPrimOp(const string & name,
|
|||
}
|
||||
|
||||
|
||||
Value * EvalState::addPrimOp(PrimOp && primOp)
|
||||
{
|
||||
/* Hack to make constants lazy: turn them into a application of
|
||||
the primop to a dummy value. */
|
||||
if (primOp.arity == 0) {
|
||||
primOp.arity = 1;
|
||||
auto vPrimOp = allocValue();
|
||||
vPrimOp->type = tPrimOp;
|
||||
vPrimOp->primOp = new PrimOp(std::move(primOp));
|
||||
Value v;
|
||||
mkApp(v, *vPrimOp, *vPrimOp);
|
||||
return addConstant(primOp.name, v);
|
||||
}
|
||||
|
||||
Symbol envName = primOp.name;
|
||||
if (hasPrefix(primOp.name, "__"))
|
||||
primOp.name = symbols.create(std::string(primOp.name, 2));
|
||||
|
||||
Value * v = allocValue();
|
||||
v->type = tPrimOp;
|
||||
v->primOp = new PrimOp(std::move(primOp));
|
||||
staticBaseEnv.vars[envName] = baseEnvDispl;
|
||||
baseEnv.values[baseEnvDispl++] = v;
|
||||
baseEnv.values[0]->attrs->push_back(Attr(primOp.name, v));
|
||||
return v;
|
||||
}
|
||||
|
||||
|
||||
Value & EvalState::getBuiltin(const string & name)
|
||||
{
|
||||
return *baseEnv.values[0]->attrs->find(symbols.create(name))->value;
|
||||
}
|
||||
|
||||
|
||||
std::optional<EvalState::Doc> EvalState::getDoc(Value & v)
|
||||
{
|
||||
if (v.type == tPrimOp || v.type == tPrimOpApp) {
|
||||
auto v2 = &v;
|
||||
while (v2->type == tPrimOpApp)
|
||||
v2 = v2->primOpApp.left;
|
||||
if (v2->primOp->doc)
|
||||
return Doc {
|
||||
.pos = noPos,
|
||||
.name = v2->primOp->name,
|
||||
.arity = v2->primOp->arity,
|
||||
.args = v2->primOp->args,
|
||||
.doc = v2->primOp->doc,
|
||||
};
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
|
||||
/* Every "format" object (even temporary) takes up a few hundred bytes
|
||||
of stack space, which is a real killer in the recursive
|
||||
evaluator. So here are some helper functions for throwing
|
||||
|
@ -1299,12 +1350,23 @@ void EvalState::autoCallFunction(Bindings & args, Value & fun, Value & res)
|
|||
Value * actualArgs = allocValue();
|
||||
mkAttrs(*actualArgs, fun.lambda.fun->formals->formals.size());
|
||||
|
||||
for (auto & i : fun.lambda.fun->formals->formals) {
|
||||
Bindings::iterator j = args.find(i.name);
|
||||
if (j != args.end())
|
||||
actualArgs->attrs->push_back(*j);
|
||||
else if (!i.def)
|
||||
throwTypeError("cannot auto-call a function that has an argument without a default value ('%1%')", i.name);
|
||||
if (fun.lambda.fun->formals->ellipsis) {
|
||||
// If the formals have an ellipsis (eg the function accepts extra args) pass
|
||||
// all available automatic arguments (which includes arguments specified on
|
||||
// the command line via --arg/--argstr)
|
||||
for (auto& v : args) {
|
||||
actualArgs->attrs->push_back(v);
|
||||
}
|
||||
} else {
|
||||
// Otherwise, only pass the arguments that the function accepts
|
||||
for (auto & i : fun.lambda.fun->formals->formals) {
|
||||
Bindings::iterator j = args.find(i.name);
|
||||
if (j != args.end()) {
|
||||
actualArgs->attrs->push_back(*j);
|
||||
} else if (!i.def) {
|
||||
throwTypeError("cannot auto-call a function that has an argument without a default value ('%1%')", i.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
actualArgs->attrs->sort();
|
||||
|
|
|
@ -30,8 +30,8 @@ struct PrimOp
|
|||
PrimOpFun fun;
|
||||
size_t arity;
|
||||
Symbol name;
|
||||
PrimOp(PrimOpFun fun, size_t arity, Symbol name)
|
||||
: fun(fun), arity(arity), name(name) { }
|
||||
std::vector<std::string> args;
|
||||
const char * doc = nullptr;
|
||||
};
|
||||
|
||||
|
||||
|
@ -242,10 +242,23 @@ private:
|
|||
Value * addPrimOp(const string & name,
|
||||
size_t arity, PrimOpFun primOp);
|
||||
|
||||
Value * addPrimOp(PrimOp && primOp);
|
||||
|
||||
public:
|
||||
|
||||
Value & getBuiltin(const string & name);
|
||||
|
||||
struct Doc
|
||||
{
|
||||
Pos pos;
|
||||
std::optional<Symbol> name;
|
||||
size_t arity;
|
||||
std::vector<std::string> args;
|
||||
const char * doc;
|
||||
};
|
||||
|
||||
std::optional<Doc> getDoc(Value & v);
|
||||
|
||||
private:
|
||||
|
||||
inline Value * lookupVar(Env * env, const ExprVar & var, bool noEval);
|
||||
|
@ -357,24 +370,60 @@ struct EvalSettings : Config
|
|||
Setting<bool> enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation",
|
||||
"Whether builtin functions that allow executing native code should be enabled."};
|
||||
|
||||
Setting<Strings> nixPath{this, getDefaultNixPath(), "nix-path",
|
||||
"List of directories to be searched for <...> file references."};
|
||||
Setting<Strings> nixPath{
|
||||
this, getDefaultNixPath(), "nix-path",
|
||||
"List of directories to be searched for `<...>` file references."};
|
||||
|
||||
Setting<bool> restrictEval{this, false, "restrict-eval",
|
||||
"Whether to restrict file system access to paths in $NIX_PATH, "
|
||||
"and network access to the URI prefixes listed in 'allowed-uris'."};
|
||||
Setting<bool> restrictEval{
|
||||
this, false, "restrict-eval",
|
||||
R"(
|
||||
If set to `true`, the Nix evaluator will not allow access to any
|
||||
files outside of the Nix search path (as set via the `NIX_PATH`
|
||||
environment variable or the `-I` option), or to URIs outside of
|
||||
`allowed-uri`. The default is `false`.
|
||||
)"};
|
||||
|
||||
Setting<bool> pureEval{this, false, "pure-eval",
|
||||
"Whether to restrict file system and network access to files specified by cryptographic hash."};
|
||||
|
||||
Setting<bool> enableImportFromDerivation{this, true, "allow-import-from-derivation",
|
||||
"Whether the evaluator allows importing the result of a derivation."};
|
||||
Setting<bool> enableImportFromDerivation{
|
||||
this, true, "allow-import-from-derivation",
|
||||
R"(
|
||||
By default, Nix allows you to `import` from a derivation, allowing
|
||||
building at evaluation time. With this option set to false, Nix will
|
||||
throw an error when evaluating an expression that uses this feature,
|
||||
allowing users to ensure their evaluation will not require any
|
||||
builds to take place.
|
||||
)"};
|
||||
|
||||
Setting<Strings> allowedUris{this, {}, "allowed-uris",
|
||||
"Prefixes of URIs that builtin functions such as fetchurl and fetchGit are allowed to fetch."};
|
||||
R"(
|
||||
A list of URI prefixes to which access is allowed in restricted
|
||||
evaluation mode. For example, when set to
|
||||
`https://github.com/NixOS`, builtin functions such as `fetchGit` are
|
||||
allowed to access `https://github.com/NixOS/patchelf.git`.
|
||||
)"};
|
||||
|
||||
Setting<bool> traceFunctionCalls{this, false, "trace-function-calls",
|
||||
"Emit log messages for each function entry and exit at the 'vomit' log level (-vvvv)."};
|
||||
R"(
|
||||
If set to `true`, the Nix evaluator will trace every function call.
|
||||
Nix will print a log message at the "vomit" level for every function
|
||||
entrance and function exit.
|
||||
|
||||
function-trace entered undefined position at 1565795816999559622
|
||||
function-trace exited undefined position at 1565795816999581277
|
||||
function-trace entered /nix/store/.../example.nix:226:41 at 1565795253249935150
|
||||
function-trace exited /nix/store/.../example.nix:226:41 at 1565795253249941684
|
||||
|
||||
The `undefined position` means the function call is a builtin.
|
||||
|
||||
Use the `contrib/stack-collapse.py` script distributed with the Nix
|
||||
source code to convert the trace logs in to a format suitable for
|
||||
`flamegraph.pl`.
|
||||
)"};
|
||||
|
||||
Setting<bool> useEvalCache{this, true, "eval-cache",
|
||||
"Whether to use the flake evaluation cache."};
|
||||
};
|
||||
|
||||
extern EvalSettings evalSettings;
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -10,9 +10,11 @@ struct RegisterPrimOp
|
|||
struct Info
|
||||
{
|
||||
std::string name;
|
||||
size_t arity;
|
||||
PrimOpFun primOp;
|
||||
std::vector<std::string> args;
|
||||
size_t arity = 0;
|
||||
const char * doc;
|
||||
std::optional<std::string> requiredFeature;
|
||||
PrimOpFun fun;
|
||||
};
|
||||
|
||||
typedef std::vector<Info> PrimOps;
|
||||
|
@ -26,6 +28,8 @@ struct RegisterPrimOp
|
|||
size_t arity,
|
||||
PrimOpFun fun,
|
||||
std::optional<std::string> requiredFeature = {});
|
||||
|
||||
RegisterPrimOp(Info && info);
|
||||
};
|
||||
|
||||
/* These primops are disabled without enableNativeCode, but plugins
|
||||
|
|
|
@ -212,7 +212,7 @@ static void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v,
|
|||
: hashFile(htSHA256, path);
|
||||
if (hash != *expectedHash)
|
||||
throw Error((unsigned int) 102, "hash mismatch in file downloaded from '%s':\n wanted: %s\n got: %s",
|
||||
*url, expectedHash->to_string(Base32, true), hash->to_string(Base32, true));
|
||||
*url, expectedHash->to_string(Base32, true), hash.to_string(Base32, true));
|
||||
}
|
||||
|
||||
if (state.allowedPaths)
|
||||
|
@ -226,18 +226,187 @@ static void prim_fetchurl(EvalState & state, const Pos & pos, Value * * args, Va
|
|||
fetch(state, pos, args, v, "fetchurl", false, "");
|
||||
}
|
||||
|
||||
static RegisterPrimOp primop_fetchurl({
|
||||
.name = "__fetchurl",
|
||||
.args = {"url"},
|
||||
.doc = R"(
|
||||
Download the specified URL and return the path of the downloaded
|
||||
file. This function is not available if [restricted evaluation
|
||||
mode](../command-ref/conf-file.md) is enabled.
|
||||
)",
|
||||
.fun = prim_fetchurl,
|
||||
});
|
||||
|
||||
static void prim_fetchTarball(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
fetch(state, pos, args, v, "fetchTarball", true, "source");
|
||||
}
|
||||
|
||||
static RegisterPrimOp primop_fetchTarball({
|
||||
.name = "fetchTarball",
|
||||
.args = {"args"},
|
||||
.doc = R"(
|
||||
Download the specified URL, unpack it and return the path of the
|
||||
unpacked tree. The file must be a tape archive (`.tar`) compressed
|
||||
with `gzip`, `bzip2` or `xz`. The top-level path component of the
|
||||
files in the tarball is removed, so it is best if the tarball
|
||||
contains a single directory at top level. The typical use of the
|
||||
function is to obtain external Nix expression dependencies, such as
|
||||
a particular version of Nixpkgs, e.g.
|
||||
|
||||
```nix
|
||||
with import (fetchTarball https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz) {};
|
||||
|
||||
stdenv.mkDerivation { … }
|
||||
```
|
||||
|
||||
The fetched tarball is cached for a certain amount of time (1 hour
|
||||
by default) in `~/.cache/nix/tarballs/`. You can change the cache
|
||||
timeout either on the command line with `--option tarball-ttl number
|
||||
of seconds` or in the Nix configuration file with this option: `
|
||||
number of seconds to cache `.
|
||||
|
||||
Note that when obtaining the hash with ` nix-prefetch-url ` the
|
||||
option `--unpack` is required.
|
||||
|
||||
This function can also verify the contents against a hash. In that
|
||||
case, the function takes a set instead of a URL. The set requires
|
||||
the attribute `url` and the attribute `sha256`, e.g.
|
||||
|
||||
```nix
|
||||
with import (fetchTarball {
|
||||
url = "https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz";
|
||||
sha256 = "1jppksrfvbk5ypiqdz4cddxdl8z6zyzdb2srq8fcffr327ld5jj2";
|
||||
}) {};
|
||||
|
||||
stdenv.mkDerivation { … }
|
||||
```
|
||||
|
||||
This function is not available if [restricted evaluation
|
||||
mode](../command-ref/conf-file.md) is enabled.
|
||||
)",
|
||||
.fun = prim_fetchTarball,
|
||||
});
|
||||
|
||||
static void prim_fetchGit(EvalState &state, const Pos &pos, Value **args, Value &v)
|
||||
{
|
||||
fetchTree(state, pos, args, v, "git", true);
|
||||
}
|
||||
|
||||
static RegisterPrimOp r2("__fetchurl", 1, prim_fetchurl);
|
||||
static RegisterPrimOp r3("fetchTarball", 1, prim_fetchTarball);
|
||||
static RegisterPrimOp r4("fetchGit", 1, prim_fetchGit);
|
||||
static RegisterPrimOp primop_fetchGit({
|
||||
.name = "fetchGit",
|
||||
.args = {"args"},
|
||||
.doc = R"(
|
||||
Fetch a path from git. *args* can be a URL, in which case the HEAD
|
||||
of the repo at that URL is fetched. Otherwise, it can be an
|
||||
attribute with the following attributes (all except `url` optional):
|
||||
|
||||
- url
|
||||
The URL of the repo.
|
||||
|
||||
- name
|
||||
The name of the directory the repo should be exported to in the
|
||||
store. Defaults to the basename of the URL.
|
||||
|
||||
- rev
|
||||
The git revision to fetch. Defaults to the tip of `ref`.
|
||||
|
||||
- ref
|
||||
The git ref to look for the requested revision under. This is
|
||||
often a branch or tag name. Defaults to `HEAD`.
|
||||
|
||||
By default, the `ref` value is prefixed with `refs/heads/`. As
|
||||
of Nix 2.3.0 Nix will not prefix `refs/heads/` if `ref` starts
|
||||
with `refs/`.
|
||||
|
||||
- submodules
|
||||
A Boolean parameter that specifies whether submodules should be
|
||||
checked out. Defaults to `false`.
|
||||
|
||||
Here are some examples of how to use `fetchGit`.
|
||||
|
||||
- To fetch a private repository over SSH:
|
||||
|
||||
```nix
|
||||
builtins.fetchGit {
|
||||
url = "git@github.com:my-secret/repository.git";
|
||||
ref = "master";
|
||||
rev = "adab8b916a45068c044658c4158d81878f9ed1c3";
|
||||
}
|
||||
```
|
||||
|
||||
- To fetch an arbitrary reference:
|
||||
|
||||
```nix
|
||||
builtins.fetchGit {
|
||||
url = "https://github.com/NixOS/nix.git";
|
||||
ref = "refs/heads/0.5-release";
|
||||
}
|
||||
```
|
||||
|
||||
- If the revision you're looking for is in the default branch of
|
||||
the git repository you don't strictly need to specify the branch
|
||||
name in the `ref` attribute.
|
||||
|
||||
However, if the revision you're looking for is in a future
|
||||
branch for the non-default branch you will need to specify the
|
||||
the `ref` attribute as well.
|
||||
|
||||
```nix
|
||||
builtins.fetchGit {
|
||||
url = "https://github.com/nixos/nix.git";
|
||||
rev = "841fcbd04755c7a2865c51c1e2d3b045976b7452";
|
||||
ref = "1.11-maintenance";
|
||||
}
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> It is nice to always specify the branch which a revision
|
||||
> belongs to. Without the branch being specified, the fetcher
|
||||
> might fail if the default branch changes. Additionally, it can
|
||||
> be confusing to try a commit from a non-default branch and see
|
||||
> the fetch fail. If the branch is specified the fault is much
|
||||
> more obvious.
|
||||
|
||||
- If the revision you're looking for is in the default branch of
|
||||
the git repository you may omit the `ref` attribute.
|
||||
|
||||
```nix
|
||||
builtins.fetchGit {
|
||||
url = "https://github.com/nixos/nix.git";
|
||||
rev = "841fcbd04755c7a2865c51c1e2d3b045976b7452";
|
||||
}
|
||||
```
|
||||
|
||||
- To fetch a specific tag:
|
||||
|
||||
```nix
|
||||
builtins.fetchGit {
|
||||
url = "https://github.com/nixos/nix.git";
|
||||
ref = "refs/tags/1.9";
|
||||
}
|
||||
```
|
||||
|
||||
- To fetch the latest version of a remote branch:
|
||||
|
||||
```nix
|
||||
builtins.fetchGit {
|
||||
url = "ssh://git@github.com/nixos/nix.git";
|
||||
ref = "master";
|
||||
}
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> Nix will refetch the branch in accordance with
|
||||
> the option `tarball-ttl`.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> This behavior is disabled in *Pure evaluation mode*.
|
||||
)",
|
||||
.fun = prim_fetchGit,
|
||||
});
|
||||
|
||||
}
|
||||
|
|
|
@ -130,12 +130,12 @@ std::pair<Tree, Input> Input::fetch(ref<Store> store) const
|
|||
tree.actualPath = store->toRealPath(tree.storePath);
|
||||
|
||||
auto narHash = store->queryPathInfo(tree.storePath)->narHash;
|
||||
input.attrs.insert_or_assign("narHash", narHash->to_string(SRI, true));
|
||||
input.attrs.insert_or_assign("narHash", narHash.to_string(SRI, true));
|
||||
|
||||
if (auto prevNarHash = getNarHash()) {
|
||||
if (narHash != *prevNarHash)
|
||||
throw Error((unsigned int) 102, "NAR hash mismatch in input '%s' (%s), expected '%s', got '%s'",
|
||||
to_string(), tree.actualPath, prevNarHash->to_string(SRI, true), narHash->to_string(SRI, true));
|
||||
to_string(), tree.actualPath, prevNarHash->to_string(SRI, true), narHash.to_string(SRI, true));
|
||||
}
|
||||
|
||||
if (auto prevLastModified = getLastModified()) {
|
||||
|
|
|
@ -269,7 +269,7 @@ struct GitInputScheme : InputScheme
|
|||
// modified dirty file?
|
||||
input.attrs.insert_or_assign(
|
||||
"lastModified",
|
||||
haveCommits ? std::stoull(runProgram("git", true, { "-C", actualUrl, "log", "-1", "--format=%ct", "HEAD" })) : 0);
|
||||
haveCommits ? std::stoull(runProgram("git", true, { "-C", actualUrl, "log", "-1", "--format=%ct", "--no-show-signature", "HEAD" })) : 0);
|
||||
|
||||
return {
|
||||
Tree(store->printStorePath(storePath), std::move(storePath)),
|
||||
|
@ -421,7 +421,7 @@ struct GitInputScheme : InputScheme
|
|||
|
||||
auto storePath = store->addToStore(name, tmpDir, FileIngestionMethod::Recursive, htSHA256, filter);
|
||||
|
||||
auto lastModified = std::stoull(runProgram("git", true, { "-C", repoDir, "log", "-1", "--format=%ct", input.getRev()->gitRev() }));
|
||||
auto lastModified = std::stoull(runProgram("git", true, { "-C", repoDir, "log", "-1", "--format=%ct", "--no-show-signature", input.getRev()->gitRev() }));
|
||||
|
||||
Attrs infoAttrs({
|
||||
{"rev", input.getRev()->gitRev()},
|
||||
|
|
|
@ -182,11 +182,21 @@ struct GitHubInputScheme : GitArchiveInputScheme
|
|||
{
|
||||
std::string type() override { return "github"; }
|
||||
|
||||
void addAccessToken(std::string & url) const
|
||||
{
|
||||
std::string accessToken = settings.githubAccessToken.get();
|
||||
if (accessToken != "")
|
||||
url += "?access_token=" + accessToken;
|
||||
}
|
||||
|
||||
Hash getRevFromRef(nix::ref<Store> store, const Input & input) const override
|
||||
{
|
||||
auto host_url = maybeGetStrAttr(input.attrs, "url").value_or("github.com");
|
||||
auto url = fmt("https://api.%s/repos/%s/%s/commits/%s", // FIXME: check
|
||||
host_url, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), *input.getRef());
|
||||
|
||||
addAccessToken(url);
|
||||
|
||||
auto json = nlohmann::json::parse(
|
||||
readFile(
|
||||
store->toRealPath(
|
||||
|
@ -205,9 +215,7 @@ struct GitHubInputScheme : GitArchiveInputScheme
|
|||
host_url, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"),
|
||||
input.getRev()->to_string(Base16, false));
|
||||
|
||||
std::string accessToken = settings.githubAccessToken.get();
|
||||
if (accessToken != "")
|
||||
url += "?access_token=" + accessToken;
|
||||
addAccessToken(url);
|
||||
|
||||
return url;
|
||||
}
|
||||
|
|
|
@ -147,7 +147,7 @@ static std::shared_ptr<Registry> getGlobalRegistry(ref<Store> store)
|
|||
if (!hasPrefix(path, "/")) {
|
||||
auto storePath = downloadFile(store, path, "flake-registry.json", false).storePath;
|
||||
if (auto store2 = store.dynamic_pointer_cast<LocalFSStore>())
|
||||
store2->addPermRoot(storePath, getCacheDir() + "/nix/flake-registry.json", true);
|
||||
store2->addPermRoot(storePath, getCacheDir() + "/nix/flake-registry.json");
|
||||
path = store->toRealPath(storePath);
|
||||
}
|
||||
|
||||
|
|
|
@ -67,8 +67,10 @@ DownloadFileResult downloadFile(
|
|||
StringSink sink;
|
||||
dumpString(*res.data, sink);
|
||||
auto hash = hashString(htSHA256, *res.data);
|
||||
ValidPathInfo info(store->makeFixedOutputPath(FileIngestionMethod::Flat, hash, name));
|
||||
info.narHash = hashString(htSHA256, *sink.s);
|
||||
ValidPathInfo info {
|
||||
store->makeFixedOutputPath(FileIngestionMethod::Flat, hash, name),
|
||||
hashString(htSHA256, *sink.s),
|
||||
};
|
||||
info.narSize = sink.s->size();
|
||||
info.ca = FixedOutputHash {
|
||||
.method = FileIngestionMethod::Flat,
|
||||
|
|
|
@ -28,7 +28,7 @@ MixCommonArgs::MixCommonArgs(const string & programName)
|
|||
|
||||
addFlag({
|
||||
.longName = "option",
|
||||
.description = "set a Nix configuration option (overriding nix.conf)",
|
||||
.description = "set a Nix configuration option (overriding `nix.conf`)",
|
||||
.labels = {"name", "value"},
|
||||
.handler = {[](std::string name, std::string value) {
|
||||
try {
|
||||
|
@ -51,8 +51,8 @@ MixCommonArgs::MixCommonArgs(const string & programName)
|
|||
|
||||
addFlag({
|
||||
.longName = "log-format",
|
||||
.description = "format of log output; \"raw\", \"internal-json\", \"bar\" "
|
||||
"or \"bar-with-logs\"",
|
||||
.description = "format of log output; `raw`, `internal-json`, `bar` "
|
||||
"or `bar-with-logs`",
|
||||
.labels = {"format"},
|
||||
.handler = {[](std::string format) { setLogFormat(format); }},
|
||||
});
|
||||
|
|
|
@ -362,7 +362,7 @@ public:
|
|||
auto width = getWindowSize().second;
|
||||
if (width <= 0) width = std::numeric_limits<decltype(width)>::max();
|
||||
|
||||
writeToStderr("\r" + filterANSIEscapes(line, false, width) + "\e[K");
|
||||
writeToStderr("\r" + filterANSIEscapes(line, false, width) + ANSI_NORMAL + "\e[K");
|
||||
}
|
||||
|
||||
std::string getStatus(State & state)
|
||||
|
|
|
@ -277,6 +277,8 @@ void printVersion(const string & programName)
|
|||
#if HAVE_SODIUM
|
||||
cfg.push_back("signed-caches");
|
||||
#endif
|
||||
std::cout << "System type: " << settings.thisSystem << "\n";
|
||||
std::cout << "Additional system types: " << concatStringsSep(", ", settings.extraPlatforms.get()) << "\n";
|
||||
std::cout << "Features: " << concatStringsSep(", ", cfg) << "\n";
|
||||
std::cout << "System configuration file: " << settings.nixConfDir + "/nix.conf" << "\n";
|
||||
std::cout << "User configuration files: " <<
|
||||
|
|
|
@ -143,7 +143,7 @@ struct FileSource : FdSource
|
|||
void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs)
|
||||
{
|
||||
assert(info.narHash && info.narSize);
|
||||
assert(info.narSize);
|
||||
|
||||
if (!repair && isValidPath(info.path)) {
|
||||
// FIXME: copyNAR -> null sink
|
||||
|
@ -219,7 +219,7 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource
|
|||
}
|
||||
}
|
||||
|
||||
upsertFile(std::string(info.path.to_string()) + ".ls", jsonOut.str(), "application/json");
|
||||
upsertFile(std::string(info.path.hashPart()) + ".ls", jsonOut.str(), "application/json");
|
||||
}
|
||||
|
||||
/* Optionally maintain an index of DWARF debug info files
|
||||
|
@ -312,14 +312,10 @@ void BinaryCacheStore::narFromPath(const StorePath & storePath, Sink & sink)
|
|||
{
|
||||
auto info = queryPathInfo(storePath).cast<const NarInfo>();
|
||||
|
||||
uint64_t narSize = 0;
|
||||
LengthSink narSize;
|
||||
TeeSink tee { sink, narSize };
|
||||
|
||||
LambdaSink wrapperSink([&](const unsigned char * data, size_t len) {
|
||||
sink(data, len);
|
||||
narSize += len;
|
||||
});
|
||||
|
||||
auto decompressor = makeDecompressionSink(info->compression, wrapperSink);
|
||||
auto decompressor = makeDecompressionSink(info->compression, tee);
|
||||
|
||||
try {
|
||||
getFile(info->url, *decompressor);
|
||||
|
@ -331,7 +327,7 @@ void BinaryCacheStore::narFromPath(const StorePath & storePath, Sink & sink)
|
|||
|
||||
stats.narRead++;
|
||||
//stats.narReadCompressedBytes += nar->size(); // FIXME
|
||||
stats.narReadBytes += narSize;
|
||||
stats.narReadBytes += narSize.length;
|
||||
}
|
||||
|
||||
void BinaryCacheStore::queryPathInfoUncached(const StorePath & storePath,
|
||||
|
@ -385,7 +381,10 @@ StorePath BinaryCacheStore::addToStore(const string & name, const Path & srcPath
|
|||
h = hashString(hashAlgo, s);
|
||||
}
|
||||
|
||||
ValidPathInfo info(makeFixedOutputPath(method, *h, name));
|
||||
ValidPathInfo info {
|
||||
makeFixedOutputPath(method, *h, name),
|
||||
Hash::dummy, // Will be fixed in addToStore, which recomputes nar hash
|
||||
};
|
||||
|
||||
auto source = StringSource { *sink.s };
|
||||
addToStore(info, source, repair, CheckSigs);
|
||||
|
@ -396,7 +395,10 @@ StorePath BinaryCacheStore::addToStore(const string & name, const Path & srcPath
|
|||
StorePath BinaryCacheStore::addTextToStore(const string & name, const string & s,
|
||||
const StorePathSet & references, RepairFlag repair)
|
||||
{
|
||||
ValidPathInfo info(computeStorePathForText(name, s, references));
|
||||
ValidPathInfo info {
|
||||
computeStorePathForText(name, s, references),
|
||||
Hash::dummy, // Will be fixed in addToStore, which recomputes nar hash
|
||||
};
|
||||
info.references = references;
|
||||
|
||||
if (repair || !isValidPath(info.path)) {
|
||||
|
|
|
@ -1181,8 +1181,8 @@ void DerivationGoal::haveDerivation()
|
|||
|
||||
retrySubstitution = false;
|
||||
|
||||
for (auto & i : drv->outputs)
|
||||
worker.store.addTempRoot(i.second.path(worker.store, drv->name));
|
||||
for (auto & i : drv->outputsAndPaths(worker.store))
|
||||
worker.store.addTempRoot(i.second.second);
|
||||
|
||||
/* Check what outputs paths are not already valid. */
|
||||
auto invalidOutputs = checkPathValidity(false, buildMode == bmRepair);
|
||||
|
@ -1288,14 +1288,14 @@ void DerivationGoal::repairClosure()
|
|||
|
||||
/* Get the output closure. */
|
||||
StorePathSet outputClosure;
|
||||
for (auto & i : drv->outputs) {
|
||||
for (auto & i : drv->outputsAndPaths(worker.store)) {
|
||||
if (!wantOutput(i.first, wantedOutputs)) continue;
|
||||
worker.store.computeFSClosure(i.second.path(worker.store, drv->name), outputClosure);
|
||||
worker.store.computeFSClosure(i.second.second, outputClosure);
|
||||
}
|
||||
|
||||
/* Filter out our own outputs (which we have already checked). */
|
||||
for (auto & i : drv->outputs)
|
||||
outputClosure.erase(i.second.path(worker.store, drv->name));
|
||||
for (auto & i : drv->outputsAndPaths(worker.store))
|
||||
outputClosure.erase(i.second.second);
|
||||
|
||||
/* Get all dependencies of this derivation so that we know which
|
||||
derivation is responsible for which path in the output
|
||||
|
@ -1306,8 +1306,8 @@ void DerivationGoal::repairClosure()
|
|||
for (auto & i : inputClosure)
|
||||
if (i.isDerivation()) {
|
||||
Derivation drv = worker.store.derivationFromPath(i);
|
||||
for (auto & j : drv.outputs)
|
||||
outputsToDrv.insert_or_assign(j.second.path(worker.store, drv.name), i);
|
||||
for (auto & j : drv.outputsAndPaths(worker.store))
|
||||
outputsToDrv.insert_or_assign(j.second.second, i);
|
||||
}
|
||||
|
||||
/* Check each path (slow!). */
|
||||
|
@ -1466,16 +1466,16 @@ void DerivationGoal::tryToBuild()
|
|||
|
||||
/* If any of the outputs already exist but are not valid, delete
|
||||
them. */
|
||||
for (auto & i : drv->outputs) {
|
||||
if (worker.store.isValidPath(i.second.path(worker.store, drv->name))) continue;
|
||||
debug("removing invalid path '%s'", worker.store.printStorePath(i.second.path(worker.store, drv->name)));
|
||||
deletePath(worker.store.Store::toRealPath(i.second.path(worker.store, drv->name)));
|
||||
for (auto & i : drv->outputsAndPaths(worker.store)) {
|
||||
if (worker.store.isValidPath(i.second.second)) continue;
|
||||
debug("removing invalid path '%s'", worker.store.printStorePath(i.second.second));
|
||||
deletePath(worker.store.Store::toRealPath(i.second.second));
|
||||
}
|
||||
|
||||
/* Don't do a remote build if the derivation has the attribute
|
||||
`preferLocalBuild' set. Also, check and repair modes are only
|
||||
supported for local builds. */
|
||||
bool buildLocally = buildMode != bmNormal || parsedDrv->willBuildLocally();
|
||||
bool buildLocally = buildMode != bmNormal || parsedDrv->willBuildLocally(worker.store);
|
||||
|
||||
/* Is the build hook willing to accept this job? */
|
||||
if (!buildLocally) {
|
||||
|
@ -1919,8 +1919,8 @@ StorePathSet DerivationGoal::exportReferences(const StorePathSet & storePaths)
|
|||
for (auto & j : paths2) {
|
||||
if (j.isDerivation()) {
|
||||
Derivation drv = worker.store.derivationFromPath(j);
|
||||
for (auto & k : drv.outputs)
|
||||
worker.store.computeFSClosure(k.second.path(worker.store, drv.name), paths);
|
||||
for (auto & k : drv.outputsAndPaths(worker.store))
|
||||
worker.store.computeFSClosure(k.second.second, paths);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1964,13 +1964,13 @@ void linkOrCopy(const Path & from, const Path & to)
|
|||
void DerivationGoal::startBuilder()
|
||||
{
|
||||
/* Right platform? */
|
||||
if (!parsedDrv->canBuildLocally())
|
||||
if (!parsedDrv->canBuildLocally(worker.store))
|
||||
throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}",
|
||||
drv->platform,
|
||||
concatStringsSep(", ", parsedDrv->getRequiredSystemFeatures()),
|
||||
worker.store.printStorePath(drvPath),
|
||||
settings.thisSystem,
|
||||
concatStringsSep<StringSet>(", ", settings.systemFeatures));
|
||||
concatStringsSep<StringSet>(", ", worker.store.systemFeatures));
|
||||
|
||||
if (drv->isBuiltin())
|
||||
preloadNSS();
|
||||
|
@ -2014,8 +2014,8 @@ void DerivationGoal::startBuilder()
|
|||
chownToBuilder(tmpDir);
|
||||
|
||||
/* Substitute output placeholders with the actual output paths. */
|
||||
for (auto & output : drv->outputs)
|
||||
inputRewrites[hashPlaceholder(output.first)] = worker.store.printStorePath(output.second.path(worker.store, drv->name));
|
||||
for (auto & output : drv->outputsAndPaths(worker.store))
|
||||
inputRewrites[hashPlaceholder(output.first)] = worker.store.printStorePath(output.second.second);
|
||||
|
||||
/* Construct the environment passed to the builder. */
|
||||
initEnv();
|
||||
|
@ -2199,8 +2199,8 @@ void DerivationGoal::startBuilder()
|
|||
rebuilding a path that is in settings.dirsInChroot
|
||||
(typically the dependencies of /bin/sh). Throw them
|
||||
out. */
|
||||
for (auto & i : drv->outputs)
|
||||
dirsInChroot.erase(worker.store.printStorePath(i.second.path(worker.store, drv->name)));
|
||||
for (auto & i : drv->outputsAndPaths(worker.store))
|
||||
dirsInChroot.erase(worker.store.printStorePath(i.second.second));
|
||||
|
||||
#elif __APPLE__
|
||||
/* We don't really have any parent prep work to do (yet?)
|
||||
|
@ -2612,8 +2612,8 @@ void DerivationGoal::writeStructuredAttrs()
|
|||
|
||||
/* Add an "outputs" object containing the output paths. */
|
||||
nlohmann::json outputs;
|
||||
for (auto & i : drv->outputs)
|
||||
outputs[i.first] = rewriteStrings(worker.store.printStorePath(i.second.path(worker.store, drv->name)), inputRewrites);
|
||||
for (auto & i : drv->outputsAndPaths(worker.store))
|
||||
outputs[i.first] = rewriteStrings(worker.store.printStorePath(i.second.second), inputRewrites);
|
||||
json["outputs"] = outputs;
|
||||
|
||||
/* Handle exportReferencesGraph. */
|
||||
|
@ -2756,8 +2756,12 @@ struct RestrictedStore : public LocalFSStore
|
|||
void queryReferrers(const StorePath & path, StorePathSet & referrers) override
|
||||
{ }
|
||||
|
||||
OutputPathMap queryDerivationOutputMap(const StorePath & path) override
|
||||
{ throw Error("queryDerivationOutputMap"); }
|
||||
std::map<std::string, std::optional<StorePath>> queryPartialDerivationOutputMap(const StorePath & path) override
|
||||
{
|
||||
if (!goal.isAllowed(path))
|
||||
throw InvalidPath("cannot query output map for unknown path '%s' in recursive Nix", printStorePath(path));
|
||||
return next->queryPartialDerivationOutputMap(path);
|
||||
}
|
||||
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
|
||||
{ throw Error("queryPathFromHashPart"); }
|
||||
|
@ -2815,9 +2819,9 @@ struct RestrictedStore : public LocalFSStore
|
|||
if (!goal.isAllowed(path.path))
|
||||
throw InvalidPath("cannot build unknown path '%s' in recursive Nix", printStorePath(path.path));
|
||||
auto drv = derivationFromPath(path.path);
|
||||
for (auto & output : drv.outputs)
|
||||
for (auto & output : drv.outputsAndPaths(*this))
|
||||
if (wantOutput(output.first, path.outputs))
|
||||
newPaths.insert(output.second.path(*this, drv.name));
|
||||
newPaths.insert(output.second.second);
|
||||
} else if (!goal.isAllowed(path.path))
|
||||
throw InvalidPath("cannot build unknown path '%s' in recursive Nix", printStorePath(path.path));
|
||||
}
|
||||
|
@ -2920,7 +2924,8 @@ void DerivationGoal::startDaemon()
|
|||
FdSink to(remote.get());
|
||||
try {
|
||||
daemon::processConnection(store, from, to,
|
||||
daemon::NotTrusted, daemon::Recursive, "nobody", 65535);
|
||||
daemon::NotTrusted, daemon::Recursive,
|
||||
[&](Store & store) { store.createUser("nobody", 65535); });
|
||||
debug("terminated daemon connection");
|
||||
} catch (SysError &) {
|
||||
ignoreException();
|
||||
|
@ -3179,7 +3184,7 @@ void DerivationGoal::runChild()
|
|||
createDirs(chrootRootDir + "/dev/shm");
|
||||
createDirs(chrootRootDir + "/dev/pts");
|
||||
ss.push_back("/dev/full");
|
||||
if (settings.systemFeatures.get().count("kvm") && pathExists("/dev/kvm"))
|
||||
if (worker.store.systemFeatures.get().count("kvm") && pathExists("/dev/kvm"))
|
||||
ss.push_back("/dev/kvm");
|
||||
ss.push_back("/dev/null");
|
||||
ss.push_back("/dev/random");
|
||||
|
@ -3616,8 +3621,8 @@ void DerivationGoal::registerOutputs()
|
|||
to do anything here. */
|
||||
if (hook) {
|
||||
bool allValid = true;
|
||||
for (auto & i : drv->outputs)
|
||||
if (!worker.store.isValidPath(i.second.path(worker.store, drv->name))) allValid = false;
|
||||
for (auto & i : drv->outputsAndPaths(worker.store))
|
||||
if (!worker.store.isValidPath(i.second.second)) allValid = false;
|
||||
if (allValid) return;
|
||||
}
|
||||
|
||||
|
@ -3638,23 +3643,23 @@ void DerivationGoal::registerOutputs()
|
|||
Nix calls. */
|
||||
StorePathSet referenceablePaths;
|
||||
for (auto & p : inputPaths) referenceablePaths.insert(p);
|
||||
for (auto & i : drv->outputs) referenceablePaths.insert(i.second.path(worker.store, drv->name));
|
||||
for (auto & i : drv->outputsAndPaths(worker.store)) referenceablePaths.insert(i.second.second);
|
||||
for (auto & p : addedPaths) referenceablePaths.insert(p);
|
||||
|
||||
/* Check whether the output paths were created, and grep each
|
||||
output path to determine what other paths it references. Also make all
|
||||
output paths read-only. */
|
||||
for (auto & i : drv->outputs) {
|
||||
auto path = worker.store.printStorePath(i.second.path(worker.store, drv->name));
|
||||
if (!missingPaths.count(i.second.path(worker.store, drv->name))) continue;
|
||||
for (auto & i : drv->outputsAndPaths(worker.store)) {
|
||||
auto path = worker.store.printStorePath(i.second.second);
|
||||
if (!missingPaths.count(i.second.second)) continue;
|
||||
|
||||
Path actualPath = path;
|
||||
if (needsHashRewrite()) {
|
||||
auto r = redirectedOutputs.find(i.second.path(worker.store, drv->name));
|
||||
auto r = redirectedOutputs.find(i.second.second);
|
||||
if (r != redirectedOutputs.end()) {
|
||||
auto redirected = worker.store.Store::toRealPath(r->second);
|
||||
if (buildMode == bmRepair
|
||||
&& redirectedBadOutputs.count(i.second.path(worker.store, drv->name))
|
||||
&& redirectedBadOutputs.count(i.second.second)
|
||||
&& pathExists(redirected))
|
||||
replaceValidPath(path, redirected);
|
||||
if (buildMode == bmCheck)
|
||||
|
@ -3721,7 +3726,7 @@ void DerivationGoal::registerOutputs()
|
|||
hash). */
|
||||
std::optional<ContentAddress> ca;
|
||||
|
||||
if (! std::holds_alternative<DerivationOutputInputAddressed>(i.second.output)) {
|
||||
if (! std::holds_alternative<DerivationOutputInputAddressed>(i.second.first.output)) {
|
||||
DerivationOutputCAFloating outputHash;
|
||||
std::visit(overloaded {
|
||||
[&](DerivationOutputInputAddressed doi) {
|
||||
|
@ -3736,7 +3741,7 @@ void DerivationGoal::registerOutputs()
|
|||
[&](DerivationOutputCAFloating dof) {
|
||||
outputHash = dof;
|
||||
},
|
||||
}, i.second.output);
|
||||
}, i.second.first.output);
|
||||
|
||||
if (outputHash.method == FileIngestionMethod::Flat) {
|
||||
/* The output path should be a regular file without execute permission. */
|
||||
|
@ -3753,12 +3758,12 @@ void DerivationGoal::registerOutputs()
|
|||
? hashPath(outputHash.hashType, actualPath).first
|
||||
: hashFile(outputHash.hashType, actualPath);
|
||||
|
||||
auto dest = worker.store.makeFixedOutputPath(outputHash.method, h2, i.second.path(worker.store, drv->name).name());
|
||||
auto dest = worker.store.makeFixedOutputPath(outputHash.method, h2, i.second.second.name());
|
||||
|
||||
// true if either floating CA, or incorrect fixed hash.
|
||||
bool needsMove = true;
|
||||
|
||||
if (auto p = std::get_if<DerivationOutputCAFixed>(& i.second.output)) {
|
||||
if (auto p = std::get_if<DerivationOutputCAFixed>(& i.second.first.output)) {
|
||||
Hash & h = p->hash.hash;
|
||||
if (h != h2) {
|
||||
|
||||
|
@ -3864,8 +3869,10 @@ void DerivationGoal::registerOutputs()
|
|||
worker.markContentsGood(worker.store.parseStorePath(path));
|
||||
}
|
||||
|
||||
ValidPathInfo info(worker.store.parseStorePath(path));
|
||||
info.narHash = hash.first;
|
||||
ValidPathInfo info {
|
||||
worker.store.parseStorePath(path),
|
||||
hash.first,
|
||||
};
|
||||
info.narSize = hash.second;
|
||||
info.references = std::move(references);
|
||||
info.deriver = drvPath;
|
||||
|
@ -3921,8 +3928,8 @@ void DerivationGoal::registerOutputs()
|
|||
|
||||
/* If this is the first round of several, then move the output out of the way. */
|
||||
if (nrRounds > 1 && curRound == 1 && curRound < nrRounds && keepPreviousRound) {
|
||||
for (auto & i : drv->outputs) {
|
||||
auto path = worker.store.printStorePath(i.second.path(worker.store, drv->name));
|
||||
for (auto & i : drv->outputsAndPaths(worker.store)) {
|
||||
auto path = worker.store.printStorePath(i.second.second);
|
||||
Path prev = path + checkSuffix;
|
||||
deletePath(prev);
|
||||
Path dst = path + checkSuffix;
|
||||
|
@ -3939,8 +3946,8 @@ void DerivationGoal::registerOutputs()
|
|||
/* Remove the .check directories if we're done. FIXME: keep them
|
||||
if the result was not determistic? */
|
||||
if (curRound == nrRounds) {
|
||||
for (auto & i : drv->outputs) {
|
||||
Path prev = worker.store.printStorePath(i.second.path(worker.store, drv->name)) + checkSuffix;
|
||||
for (auto & i : drv->outputsAndPaths(worker.store)) {
|
||||
Path prev = worker.store.printStorePath(i.second.second) + checkSuffix;
|
||||
deletePath(prev);
|
||||
}
|
||||
}
|
||||
|
@ -4238,12 +4245,12 @@ void DerivationGoal::flushLine()
|
|||
StorePathSet DerivationGoal::checkPathValidity(bool returnValid, bool checkHash)
|
||||
{
|
||||
StorePathSet result;
|
||||
for (auto & i : drv->outputs) {
|
||||
for (auto & i : drv->outputsAndPaths(worker.store)) {
|
||||
if (!wantOutput(i.first, wantedOutputs)) continue;
|
||||
bool good =
|
||||
worker.store.isValidPath(i.second.path(worker.store, drv->name)) &&
|
||||
(!checkHash || worker.pathContentsGood(i.second.path(worker.store, drv->name)));
|
||||
if (good == returnValid) result.insert(i.second.path(worker.store, drv->name));
|
||||
worker.store.isValidPath(i.second.second) &&
|
||||
(!checkHash || worker.pathContentsGood(i.second.second));
|
||||
if (good == returnValid) result.insert(i.second.second);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -4980,7 +4987,7 @@ void Worker::waitForInput()
|
|||
std::vector<unsigned char> buffer(4096);
|
||||
for (auto & k : fds2) {
|
||||
if (pollStatus.at(fdToPollStatus.at(k)).revents) {
|
||||
ssize_t rd = read(k, buffer.data(), buffer.size());
|
||||
ssize_t rd = ::read(k, buffer.data(), buffer.size());
|
||||
// FIXME: is there a cleaner way to handle pt close
|
||||
// than EIO? Is this even standard?
|
||||
if (rd == 0 || (rd == -1 && errno == EIO)) {
|
||||
|
@ -5070,7 +5077,7 @@ bool Worker::pathContentsGood(const StorePath & path)
|
|||
if (!pathExists(store.printStorePath(path)))
|
||||
res = false;
|
||||
else {
|
||||
HashResult current = hashPath(info->narHash->type, store.printStorePath(path));
|
||||
HashResult current = hashPath(info->narHash.type, store.printStorePath(path));
|
||||
Hash nullHash(htSHA256);
|
||||
res = info->narHash == nullHash || info->narHash == current.first;
|
||||
}
|
||||
|
|
|
@ -58,6 +58,20 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
|
|||
}
|
||||
};
|
||||
|
||||
/* Try the hashed mirrors first. */
|
||||
if (getAttr("outputHashMode") == "flat")
|
||||
for (auto hashedMirror : settings.hashedMirrors.get())
|
||||
try {
|
||||
if (!hasSuffix(hashedMirror, "/")) hashedMirror += '/';
|
||||
std::optional<HashType> ht = parseHashTypeOpt(getAttr("outputHashAlgo"));
|
||||
Hash h = newHashAllowEmpty(getAttr("outputHash"), ht);
|
||||
fetch(hashedMirror + printHashType(h.type) + "/" + h.to_string(Base16, false));
|
||||
return;
|
||||
} catch (Error & e) {
|
||||
debug(e.what());
|
||||
}
|
||||
|
||||
/* Otherwise try the specified URL. */
|
||||
fetch(mainUrl);
|
||||
}
|
||||
|
||||
|
|
|
@ -289,7 +289,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
logger->startWork();
|
||||
auto hash = store->queryPathInfo(path)->narHash;
|
||||
logger->stopWork();
|
||||
to << hash->to_string(Base16, false);
|
||||
to << hash.to_string(Base16, false);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -325,9 +325,9 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
case wopQueryDerivationOutputMap: {
|
||||
auto path = store->parseStorePath(readString(from));
|
||||
logger->startWork();
|
||||
OutputPathMap outputs = store->queryDerivationOutputMap(path);
|
||||
auto outputs = store->queryPartialDerivationOutputMap(path);
|
||||
logger->stopWork();
|
||||
writeOutputPathMap(*store, to, outputs);
|
||||
worker_proto::write(*store, to, outputs);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -454,8 +454,46 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
readDerivation(from, *store, drv, Derivation::nameFromPath(drvPath));
|
||||
BuildMode buildMode = (BuildMode) readInt(from);
|
||||
logger->startWork();
|
||||
if (!trusted)
|
||||
throw Error("you are not privileged to build derivations");
|
||||
|
||||
/* Content-addressed derivations are trustless because their output paths
|
||||
are verified by their content alone, so any derivation is free to
|
||||
try to produce such a path.
|
||||
|
||||
Input-addressed derivation output paths, however, are calculated
|
||||
from the derivation closure that produced them---even knowing the
|
||||
root derivation is not enough. That the output data actually came
|
||||
from those derivations is fundamentally unverifiable, but the daemon
|
||||
trusts itself on that matter. The question instead is whether the
|
||||
submitted plan has rights to the output paths it wants to fill, and
|
||||
at least the derivation closure proves that.
|
||||
|
||||
It would have been nice if input-address algorithm merely depended
|
||||
on the build time closure, rather than depending on the derivation
|
||||
closure. That would mean input-addressed paths used at build time
|
||||
would just be trusted and not need their own evidence. This is in
|
||||
fact fine as the same guarantees would hold *inductively*: either
|
||||
the remote builder has those paths and already trusts them, or it
|
||||
needs to build them too and thus their evidence must be provided in
|
||||
turn. The advantage of this variant algorithm is that the evidence
|
||||
for input-addressed paths which the remote builder already has
|
||||
doesn't need to be sent again.
|
||||
|
||||
That said, now that we have floating CA derivations, it is better
|
||||
that people just migrate to those which also solve this problem, and
|
||||
others. It's the same migration difficulty with strictly more
|
||||
benefit.
|
||||
|
||||
Lastly, do note that when we parse fixed-output content-addressed
|
||||
derivations, we throw out the precomputed output paths and just
|
||||
store the hashes, so there aren't two competing sources of truth an
|
||||
attacker could exploit. */
|
||||
if (drv.type() == DerivationType::InputAddressed && !trusted)
|
||||
throw Error("you are not privileged to build input-addressed derivations");
|
||||
|
||||
/* Make sure that the non-input-addressed derivations that got this far
|
||||
are in fact content-addressed if we don't trust them. */
|
||||
assert(derivationIsCA(drv.type()) || trusted);
|
||||
|
||||
auto res = store->buildDerivation(drvPath, drv, buildMode);
|
||||
logger->stopWork();
|
||||
to << res.status << res.errorMsg;
|
||||
|
@ -638,7 +676,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
if (GET_PROTOCOL_MINOR(clientVersion) >= 17)
|
||||
to << 1;
|
||||
to << (info->deriver ? store->printStorePath(*info->deriver) : "")
|
||||
<< info->narHash->to_string(Base16, false);
|
||||
<< info->narHash.to_string(Base16, false);
|
||||
writeStorePaths(*store, to, info->references);
|
||||
to << info->registrationTime << info->narSize;
|
||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 16) {
|
||||
|
@ -688,17 +726,18 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
auto path = store->parseStorePath(readString(from));
|
||||
logger->startWork();
|
||||
logger->stopWork();
|
||||
dumpPath(store->printStorePath(path), to);
|
||||
dumpPath(store->toRealPath(path), to);
|
||||
break;
|
||||
}
|
||||
|
||||
case wopAddToStoreNar: {
|
||||
bool repair, dontCheckSigs;
|
||||
ValidPathInfo info(store->parseStorePath(readString(from)));
|
||||
auto path = store->parseStorePath(readString(from));
|
||||
auto deriver = readString(from);
|
||||
auto narHash = Hash::parseAny(readString(from), htSHA256);
|
||||
ValidPathInfo info { path, narHash };
|
||||
if (deriver != "")
|
||||
info.deriver = store->parseStorePath(deriver);
|
||||
info.narHash = Hash::parseAny(readString(from), htSHA256);
|
||||
info.references = readStorePaths<StorePathSet>(*store, from);
|
||||
from >> info.registrationTime >> info.narSize >> info.ultimate;
|
||||
info.sigs = readStrings<StringSet>(from);
|
||||
|
@ -817,8 +856,7 @@ void processConnection(
|
|||
FdSink & to,
|
||||
TrustedFlag trusted,
|
||||
RecursiveFlag recursive,
|
||||
const std::string & userName,
|
||||
uid_t userId)
|
||||
std::function<void(Store &)> authHook)
|
||||
{
|
||||
auto monitor = !recursive ? std::make_unique<MonitorFdHup>(from.fd) : nullptr;
|
||||
|
||||
|
@ -859,15 +897,7 @@ void processConnection(
|
|||
|
||||
/* If we can't accept clientVersion, then throw an error
|
||||
*here* (not above). */
|
||||
|
||||
#if 0
|
||||
/* Prevent users from doing something very dangerous. */
|
||||
if (geteuid() == 0 &&
|
||||
querySetting("build-users-group", "") == "")
|
||||
throw Error("if you run 'nix-daemon' as root, then you MUST set 'build-users-group'!");
|
||||
#endif
|
||||
|
||||
store->createUser(userName, userId);
|
||||
authHook(*store);
|
||||
|
||||
tunnelLogger->stopWork();
|
||||
to.flush();
|
||||
|
|
|
@ -12,7 +12,10 @@ void processConnection(
|
|||
FdSink & to,
|
||||
TrustedFlag trusted,
|
||||
RecursiveFlag recursive,
|
||||
const std::string & userName,
|
||||
uid_t userId);
|
||||
/* Arbitrary hook to check authorization / initialize user data / whatever
|
||||
after the protocol has been negotiated. The idea is that this function
|
||||
and everything it calls doesn't know about this stuff, and the
|
||||
`nix-daemon` handles that instead. */
|
||||
std::function<void(Store &)> authHook);
|
||||
|
||||
}
|
||||
|
|
|
@ -61,8 +61,8 @@ bool BasicDerivation::isBuiltin() const
|
|||
}
|
||||
|
||||
|
||||
StorePath writeDerivation(ref<Store> store,
|
||||
const Derivation & drv, std::string_view name, RepairFlag repair)
|
||||
StorePath writeDerivation(Store & store,
|
||||
const Derivation & drv, RepairFlag repair)
|
||||
{
|
||||
auto references = drv.inputSrcs;
|
||||
for (auto & i : drv.inputDrvs)
|
||||
|
@ -70,11 +70,11 @@ StorePath writeDerivation(ref<Store> store,
|
|||
/* Note that the outputs of a derivation are *not* references
|
||||
(that can be missing (of course) and should not necessarily be
|
||||
held during a garbage collection). */
|
||||
auto suffix = std::string(name) + drvExtension;
|
||||
auto contents = drv.unparse(*store, false);
|
||||
auto suffix = std::string(drv.name) + drvExtension;
|
||||
auto contents = drv.unparse(store, false);
|
||||
return settings.readOnlyMode
|
||||
? store->computeStorePathForText(suffix, contents, references)
|
||||
: store->addTextToStore(suffix, contents, references, repair);
|
||||
? store.computeStorePathForText(suffix, contents, references)
|
||||
: store.addTextToStore(suffix, contents, references, repair);
|
||||
}
|
||||
|
||||
|
||||
|
@ -139,18 +139,14 @@ static StringSet parseStrings(std::istream & str, bool arePaths)
|
|||
}
|
||||
|
||||
|
||||
static DerivationOutput parseDerivationOutput(const Store & store, std::istringstream & str)
|
||||
static DerivationOutput parseDerivationOutput(const Store & store,
|
||||
StorePath path, std::string_view hashAlgo, std::string_view hash)
|
||||
{
|
||||
expect(str, ","); auto path = store.parseStorePath(parsePath(str));
|
||||
expect(str, ","); auto hashAlgo = parseString(str);
|
||||
expect(str, ","); const auto hash = parseString(str);
|
||||
expect(str, ")");
|
||||
|
||||
if (hashAlgo != "") {
|
||||
auto method = FileIngestionMethod::Flat;
|
||||
if (string(hashAlgo, 0, 2) == "r:") {
|
||||
method = FileIngestionMethod::Recursive;
|
||||
hashAlgo = string(hashAlgo, 2);
|
||||
hashAlgo = hashAlgo.substr(2);
|
||||
}
|
||||
const HashType hashType = parseHashType(hashAlgo);
|
||||
|
||||
|
@ -178,8 +174,18 @@ static DerivationOutput parseDerivationOutput(const Store & store, std::istrings
|
|||
};
|
||||
}
|
||||
|
||||
static DerivationOutput parseDerivationOutput(const Store & store, std::istringstream & str)
|
||||
{
|
||||
expect(str, ","); auto path = store.parseStorePath(parsePath(str));
|
||||
expect(str, ","); const auto hashAlgo = parseString(str);
|
||||
expect(str, ","); const auto hash = parseString(str);
|
||||
expect(str, ")");
|
||||
|
||||
static Derivation parseDerivation(const Store & store, std::string && s, std::string_view name)
|
||||
return parseDerivationOutput(store, std::move(path), hashAlgo, hash);
|
||||
}
|
||||
|
||||
|
||||
Derivation parseDerivation(const Store & store, std::string && s, std::string_view name)
|
||||
{
|
||||
Derivation drv;
|
||||
drv.name = name;
|
||||
|
@ -227,34 +233,6 @@ static Derivation parseDerivation(const Store & store, std::string && s, std::st
|
|||
}
|
||||
|
||||
|
||||
Derivation readDerivation(const Store & store, const Path & drvPath, std::string_view name)
|
||||
{
|
||||
try {
|
||||
return parseDerivation(store, readFile(drvPath), name);
|
||||
} catch (FormatError & e) {
|
||||
throw Error("error parsing derivation '%1%': %2%", drvPath, e.msg());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Derivation Store::derivationFromPath(const StorePath & drvPath)
|
||||
{
|
||||
ensurePath(drvPath);
|
||||
return readDerivation(drvPath);
|
||||
}
|
||||
|
||||
|
||||
Derivation Store::readDerivation(const StorePath & drvPath)
|
||||
{
|
||||
auto accessor = getFSAccessor();
|
||||
try {
|
||||
return parseDerivation(*this, accessor->readFile(printStorePath(drvPath)), Derivation::nameFromPath(drvPath));
|
||||
} catch (FormatError & e) {
|
||||
throw Error("error parsing derivation '%s': %s", printStorePath(drvPath), e.msg());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void printString(string & res, std::string_view s)
|
||||
{
|
||||
char buf[s.size() * 2 + 2];
|
||||
|
@ -474,12 +452,12 @@ DrvHashModulo hashDerivationModulo(Store & store, const Derivation & drv, bool m
|
|||
throw Error("Regular input-addressed derivations are not yet allowed to depend on CA derivations");
|
||||
case DerivationType::CAFixed: {
|
||||
std::map<std::string, Hash> outputHashes;
|
||||
for (const auto & i : drv.outputs) {
|
||||
auto & dof = std::get<DerivationOutputCAFixed>(i.second.output);
|
||||
for (const auto & i : drv.outputsAndPaths(store)) {
|
||||
auto & dof = std::get<DerivationOutputCAFixed>(i.second.first.output);
|
||||
auto hash = hashString(htSHA256, "fixed:out:"
|
||||
+ dof.hash.printMethodAlgo() + ":"
|
||||
+ dof.hash.hash.to_string(Base16, false) + ":"
|
||||
+ store.printStorePath(i.second.path(store, drv.name)));
|
||||
+ store.printStorePath(i.second.second));
|
||||
outputHashes.insert_or_assign(i.first, std::move(hash));
|
||||
}
|
||||
return outputHashes;
|
||||
|
@ -533,46 +511,18 @@ bool wantOutput(const string & output, const std::set<string> & wanted)
|
|||
StorePathSet BasicDerivation::outputPaths(const Store & store) const
|
||||
{
|
||||
StorePathSet paths;
|
||||
for (auto & i : outputs)
|
||||
paths.insert(i.second.path(store, name));
|
||||
for (auto & i : outputsAndPaths(store))
|
||||
paths.insert(i.second.second);
|
||||
return paths;
|
||||
}
|
||||
|
||||
static DerivationOutput readDerivationOutput(Source & in, const Store & store)
|
||||
{
|
||||
auto path = store.parseStorePath(readString(in));
|
||||
auto hashAlgo = readString(in);
|
||||
auto hash = readString(in);
|
||||
const auto hashAlgo = readString(in);
|
||||
const auto hash = readString(in);
|
||||
|
||||
if (hashAlgo != "") {
|
||||
auto method = FileIngestionMethod::Flat;
|
||||
if (string(hashAlgo, 0, 2) == "r:") {
|
||||
method = FileIngestionMethod::Recursive;
|
||||
hashAlgo = string(hashAlgo, 2);
|
||||
}
|
||||
auto hashType = parseHashType(hashAlgo);
|
||||
return hash != ""
|
||||
? DerivationOutput {
|
||||
.output = DerivationOutputCAFixed {
|
||||
.hash = FixedOutputHash {
|
||||
.method = std::move(method),
|
||||
.hash = Hash::parseNonSRIUnprefixed(hash, hashType),
|
||||
},
|
||||
}
|
||||
}
|
||||
: (settings.requireExperimentalFeature("ca-derivations"),
|
||||
DerivationOutput {
|
||||
.output = DerivationOutputCAFloating {
|
||||
.method = std::move(method),
|
||||
.hashType = std::move(hashType),
|
||||
},
|
||||
});
|
||||
} else
|
||||
return DerivationOutput {
|
||||
.output = DerivationOutputInputAddressed {
|
||||
.path = std::move(path),
|
||||
}
|
||||
};
|
||||
return parseDerivationOutput(store, std::move(path), hashAlgo, hash);
|
||||
}
|
||||
|
||||
StringSet BasicDerivation::outputNames() const
|
||||
|
@ -583,6 +533,27 @@ StringSet BasicDerivation::outputNames() const
|
|||
return names;
|
||||
}
|
||||
|
||||
DerivationOutputsAndPaths BasicDerivation::outputsAndPaths(const Store & store) const {
|
||||
DerivationOutputsAndPaths outsAndPaths;
|
||||
for (auto output : outputs)
|
||||
outsAndPaths.insert(std::make_pair(
|
||||
output.first,
|
||||
std::make_pair(output.second, output.second.path(store, name))
|
||||
)
|
||||
);
|
||||
return outsAndPaths;
|
||||
}
|
||||
|
||||
DerivationOutputsAndOptPaths BasicDerivation::outputsAndOptPaths(const Store & store) const {
|
||||
DerivationOutputsAndOptPaths outsAndOptPaths;
|
||||
for (auto output : outputs)
|
||||
outsAndOptPaths.insert(std::make_pair(
|
||||
output.first,
|
||||
std::make_pair(output.second, output.second.pathOpt(store, output.first))
|
||||
)
|
||||
);
|
||||
return outsAndOptPaths;
|
||||
}
|
||||
|
||||
std::string_view BasicDerivation::nameFromPath(const StorePath & drvPath) {
|
||||
auto nameWithSuffix = drvPath.name();
|
||||
|
@ -623,9 +594,9 @@ Source & readDerivation(Source & in, const Store & store, BasicDerivation & drv,
|
|||
void writeDerivation(Sink & out, const Store & store, const BasicDerivation & drv)
|
||||
{
|
||||
out << drv.outputs.size();
|
||||
for (auto & i : drv.outputs) {
|
||||
for (auto & i : drv.outputsAndPaths(store)) {
|
||||
out << i.first
|
||||
<< store.printStorePath(i.second.path(store, drv.name));
|
||||
<< store.printStorePath(i.second.second);
|
||||
std::visit(overloaded {
|
||||
[&](DerivationOutputInputAddressed doi) {
|
||||
out << "" << "";
|
||||
|
@ -638,7 +609,7 @@ void writeDerivation(Sink & out, const Store & store, const BasicDerivation & dr
|
|||
out << (makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType))
|
||||
<< "";
|
||||
},
|
||||
}, i.second.output);
|
||||
}, i.second.first.output);
|
||||
}
|
||||
writeStorePaths(store, out, drv.inputSrcs);
|
||||
out << drv.platform << drv.builder << drv.args;
|
||||
|
|
|
@ -47,6 +47,9 @@ struct DerivationOutput
|
|||
DerivationOutputCAFloating
|
||||
> output;
|
||||
std::optional<HashType> hashAlgoOpt(const Store & store) const;
|
||||
/* Note, when you use this function you should make sure that you're passing
|
||||
the right derivation name. When in doubt, you should use the safer
|
||||
interface provided by BasicDerivation::outputsAndPaths */
|
||||
std::optional<StorePath> pathOpt(const Store & store, std::string_view drvName) const;
|
||||
/* DEPRECATED: Remove after CA drvs are fully implemented */
|
||||
StorePath path(const Store & store, std::string_view drvName) const {
|
||||
|
@ -58,6 +61,15 @@ struct DerivationOutput
|
|||
|
||||
typedef std::map<string, DerivationOutput> DerivationOutputs;
|
||||
|
||||
/* These are analogues to the previous DerivationOutputs data type, but they
|
||||
also contains, for each output, the (optional) store path in which it would
|
||||
be written. To calculate values of these types, see the corresponding
|
||||
functions in BasicDerivation */
|
||||
typedef std::map<string, std::pair<DerivationOutput, StorePath>>
|
||||
DerivationOutputsAndPaths;
|
||||
typedef std::map<string, std::pair<DerivationOutput, std::optional<StorePath>>>
|
||||
DerivationOutputsAndOptPaths;
|
||||
|
||||
/* For inputs that are sub-derivations, we specify exactly which
|
||||
output IDs we are interested in. */
|
||||
typedef std::map<StorePath, StringSet> DerivationInputs;
|
||||
|
@ -107,6 +119,13 @@ struct BasicDerivation
|
|||
/* Return the output names of a derivation. */
|
||||
StringSet outputNames() const;
|
||||
|
||||
/* Calculates the maps that contains all the DerivationOutputs, but
|
||||
augmented with knowledge of the Store paths they would be written into.
|
||||
The first one of these functions will be removed when the CA work is
|
||||
completed */
|
||||
DerivationOutputsAndPaths outputsAndPaths(const Store & store) const;
|
||||
DerivationOutputsAndOptPaths outputsAndOptPaths(const Store & store) const;
|
||||
|
||||
static std::string_view nameFromPath(const StorePath & storePath);
|
||||
};
|
||||
|
||||
|
@ -127,11 +146,11 @@ class Store;
|
|||
enum RepairFlag : bool { NoRepair = false, Repair = true };
|
||||
|
||||
/* Write a derivation to the Nix store, and return its path. */
|
||||
StorePath writeDerivation(ref<Store> store,
|
||||
const Derivation & drv, std::string_view name, RepairFlag repair = NoRepair);
|
||||
StorePath writeDerivation(Store & store,
|
||||
const Derivation & drv, RepairFlag repair = NoRepair);
|
||||
|
||||
/* Read a derivation from a file. */
|
||||
Derivation readDerivation(const Store & store, const Path & drvPath, std::string_view name);
|
||||
Derivation parseDerivation(const Store & store, std::string && s, std::string_view name);
|
||||
|
||||
// FIXME: remove
|
||||
bool isDerivation(const string & fileName);
|
||||
|
|
59
src/libstore/dummy-store.cc
Normal file
59
src/libstore/dummy-store.cc
Normal file
|
@ -0,0 +1,59 @@
|
|||
#include "store-api.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
static std::string uriScheme = "dummy://";
|
||||
|
||||
struct DummyStore : public Store
|
||||
{
|
||||
DummyStore(const Params & params)
|
||||
: Store(params)
|
||||
{ }
|
||||
|
||||
string getUri() override
|
||||
{
|
||||
return uriScheme;
|
||||
}
|
||||
|
||||
void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override
|
||||
{
|
||||
callback(nullptr);
|
||||
}
|
||||
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
|
||||
{ unsupported("queryPathFromHashPart"); }
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override
|
||||
{ unsupported("addToStore"); }
|
||||
|
||||
StorePath addToStore(const string & name, const Path & srcPath,
|
||||
FileIngestionMethod method, HashType hashAlgo,
|
||||
PathFilter & filter, RepairFlag repair) override
|
||||
{ unsupported("addToStore"); }
|
||||
|
||||
StorePath addTextToStore(const string & name, const string & s,
|
||||
const StorePathSet & references, RepairFlag repair) override
|
||||
{ unsupported("addTextToStore"); }
|
||||
|
||||
void narFromPath(const StorePath & path, Sink & sink) override
|
||||
{ unsupported("narFromPath"); }
|
||||
|
||||
void ensurePath(const StorePath & path) override
|
||||
{ unsupported("ensurePath"); }
|
||||
|
||||
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode) override
|
||||
{ unsupported("buildDerivation"); }
|
||||
};
|
||||
|
||||
static RegisterStoreImplementation regStore([](
|
||||
const std::string & uri, const Store::Params & params)
|
||||
-> std::shared_ptr<Store>
|
||||
{
|
||||
if (uri != uriScheme) return nullptr;
|
||||
return std::make_shared<DummyStore>(params);
|
||||
});
|
||||
|
||||
}
|
|
@ -38,9 +38,9 @@ void Store::exportPath(const StorePath & path, Sink & sink)
|
|||
filesystem corruption from spreading to other machines.
|
||||
Don't complain if the stored hash is zero (unknown). */
|
||||
Hash hash = hashSink.currentHash().first;
|
||||
if (hash != info->narHash && info->narHash != Hash(info->narHash->type))
|
||||
if (hash != info->narHash && info->narHash != Hash(info->narHash.type))
|
||||
throw Error("hash of path '%s' has changed from '%s' to '%s'!",
|
||||
printStorePath(path), info->narHash->to_string(Base32, true), hash.to_string(Base32, true));
|
||||
printStorePath(path), info->narHash.to_string(Base32, true), hash.to_string(Base32, true));
|
||||
|
||||
teeSink
|
||||
<< exportMagic
|
||||
|
@ -69,17 +69,18 @@ StorePaths Store::importPaths(Source & source, CheckSigsFlag checkSigs)
|
|||
if (magic != exportMagic)
|
||||
throw Error("Nix archive cannot be imported; wrong format");
|
||||
|
||||
ValidPathInfo info(parseStorePath(readString(source)));
|
||||
auto path = parseStorePath(readString(source));
|
||||
|
||||
//Activity act(*logger, lvlInfo, format("importing path '%s'") % info.path);
|
||||
|
||||
info.references = readStorePaths<StorePathSet>(*this, source);
|
||||
|
||||
auto references = readStorePaths<StorePathSet>(*this, source);
|
||||
auto deriver = readString(source);
|
||||
auto narHash = hashString(htSHA256, *saved.s);
|
||||
|
||||
ValidPathInfo info { path, narHash };
|
||||
if (deriver != "")
|
||||
info.deriver = parseStorePath(deriver);
|
||||
|
||||
info.narHash = hashString(htSHA256, *saved.s);
|
||||
info.references = references;
|
||||
info.narSize = saved.s->size();
|
||||
|
||||
// Ignore optional legacy signature.
|
||||
|
|
|
@ -17,15 +17,30 @@ struct FileTransferSettings : Config
|
|||
Setting<std::string> userAgentSuffix{this, "", "user-agent-suffix",
|
||||
"String appended to the user agent in HTTP requests."};
|
||||
|
||||
Setting<size_t> httpConnections{this, 25, "http-connections",
|
||||
"Number of parallel HTTP connections.",
|
||||
Setting<size_t> httpConnections{
|
||||
this, 25, "http-connections",
|
||||
R"(
|
||||
The maximum number of parallel TCP connections used to fetch
|
||||
files from binary caches and by other downloads. It defaults
|
||||
to 25. 0 means no limit.
|
||||
)",
|
||||
{"binary-caches-parallel-connections"}};
|
||||
|
||||
Setting<unsigned long> connectTimeout{this, 0, "connect-timeout",
|
||||
"Timeout for connecting to servers during downloads. 0 means use curl's builtin default."};
|
||||
Setting<unsigned long> connectTimeout{
|
||||
this, 0, "connect-timeout",
|
||||
R"(
|
||||
The timeout (in seconds) for establishing connections in the
|
||||
binary cache substituter. It corresponds to `curl`’s
|
||||
`--connect-timeout` option.
|
||||
)"};
|
||||
|
||||
Setting<unsigned long> stalledDownloadTimeout{this, 300, "stalled-download-timeout",
|
||||
"Timeout (in seconds) for receiving data from servers during download. Nix cancels idle downloads after this timeout's duration."};
|
||||
Setting<unsigned long> stalledDownloadTimeout{
|
||||
this, 300, "stalled-download-timeout",
|
||||
R"(
|
||||
The timeout (in seconds) for receiving data from servers
|
||||
during download. Nix cancels idle downloads after this
|
||||
timeout's duration.
|
||||
)"};
|
||||
|
||||
Setting<unsigned int> tries{this, 5, "download-attempts",
|
||||
"How often Nix will attempt to download a file before giving up."};
|
||||
|
|
|
@ -85,8 +85,7 @@ void LocalStore::addIndirectRoot(const Path & path)
|
|||
}
|
||||
|
||||
|
||||
Path LocalFSStore::addPermRoot(const StorePath & storePath,
|
||||
const Path & _gcRoot, bool indirect, bool allowOutsideRootsDir)
|
||||
Path LocalFSStore::addPermRoot(const StorePath & storePath, const Path & _gcRoot)
|
||||
{
|
||||
Path gcRoot(canonPath(_gcRoot));
|
||||
|
||||
|
@ -95,47 +94,12 @@ Path LocalFSStore::addPermRoot(const StorePath & storePath,
|
|||
"creating a garbage collector root (%1%) in the Nix store is forbidden "
|
||||
"(are you running nix-build inside the store?)", gcRoot);
|
||||
|
||||
if (indirect) {
|
||||
/* Don't clobber the link if it already exists and doesn't
|
||||
point to the Nix store. */
|
||||
if (pathExists(gcRoot) && (!isLink(gcRoot) || !isInStore(readLink(gcRoot))))
|
||||
throw Error("cannot create symlink '%1%'; already exists", gcRoot);
|
||||
makeSymlink(gcRoot, printStorePath(storePath));
|
||||
addIndirectRoot(gcRoot);
|
||||
}
|
||||
|
||||
else {
|
||||
if (!allowOutsideRootsDir) {
|
||||
Path rootsDir = canonPath((format("%1%/%2%") % stateDir % gcRootsDir).str());
|
||||
|
||||
if (string(gcRoot, 0, rootsDir.size() + 1) != rootsDir + "/")
|
||||
throw Error(
|
||||
"path '%1%' is not a valid garbage collector root; "
|
||||
"it's not in the directory '%2%'",
|
||||
gcRoot, rootsDir);
|
||||
}
|
||||
|
||||
if (baseNameOf(gcRoot) == std::string(storePath.to_string()))
|
||||
writeFile(gcRoot, "");
|
||||
else
|
||||
makeSymlink(gcRoot, printStorePath(storePath));
|
||||
}
|
||||
|
||||
/* Check that the root can be found by the garbage collector.
|
||||
!!! This can be very slow on machines that have many roots.
|
||||
Instead of reading all the roots, it would be more efficient to
|
||||
check if the root is in a directory in or linked from the
|
||||
gcroots directory. */
|
||||
if (settings.checkRootReachability) {
|
||||
auto roots = findRoots(false);
|
||||
if (roots[storePath].count(gcRoot) == 0)
|
||||
logWarning({
|
||||
.name = "GC root",
|
||||
.hint = hintfmt("warning: '%1%' is not in a directory where the garbage collector looks for roots; "
|
||||
"therefore, '%2%' might be removed by the garbage collector",
|
||||
gcRoot, printStorePath(storePath))
|
||||
});
|
||||
}
|
||||
/* Don't clobber the link if it already exists and doesn't
|
||||
point to the Nix store. */
|
||||
if (pathExists(gcRoot) && (!isLink(gcRoot) || !isInStore(readLink(gcRoot))))
|
||||
throw Error("cannot create symlink '%1%'; already exists", gcRoot);
|
||||
makeSymlink(gcRoot, printStorePath(storePath));
|
||||
addIndirectRoot(gcRoot);
|
||||
|
||||
/* Grab the global GC root, causing us to block while a GC is in
|
||||
progress. This prevents the set of permanent roots from
|
||||
|
|
|
@ -10,6 +10,9 @@
|
|||
#include <sys/utsname.h>
|
||||
#include <unordered_set>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
|
@ -160,9 +163,9 @@ template<> std::string BaseSetting<SandboxMode>::to_string() const
|
|||
else abort();
|
||||
}
|
||||
|
||||
template<> void BaseSetting<SandboxMode>::toJSON(JSONPlaceholder & out)
|
||||
template<> nlohmann::json BaseSetting<SandboxMode>::toJSON()
|
||||
{
|
||||
AbstractSetting::toJSON(out);
|
||||
return AbstractSetting::toJSON();
|
||||
}
|
||||
|
||||
template<> void BaseSetting<SandboxMode>::convertToArg(Args & args, const std::string & category)
|
||||
|
|
|
@ -80,89 +80,209 @@ public:
|
|||
Setting<bool> keepGoing{this, false, "keep-going",
|
||||
"Whether to keep building derivations when another build fails."};
|
||||
|
||||
Setting<bool> tryFallback{this, false, "fallback",
|
||||
"Whether to fall back to building when substitution fails.",
|
||||
Setting<bool> tryFallback{
|
||||
this, false, "fallback",
|
||||
R"(
|
||||
If set to `true`, Nix will fall back to building from source if a
|
||||
binary substitute fails. This is equivalent to the `--fallback`
|
||||
flag. The default is `false`.
|
||||
)",
|
||||
{"build-fallback"}};
|
||||
|
||||
/* Whether to show build log output in real time. */
|
||||
bool verboseBuild = true;
|
||||
|
||||
Setting<size_t> logLines{this, 10, "log-lines",
|
||||
"If verbose-build is false, the number of lines of the tail of "
|
||||
"If `verbose-build` is false, the number of lines of the tail of "
|
||||
"the log to show if a build fails."};
|
||||
|
||||
MaxBuildJobsSetting maxBuildJobs{this, 1, "max-jobs",
|
||||
"Maximum number of parallel build jobs. \"auto\" means use number of cores.",
|
||||
MaxBuildJobsSetting maxBuildJobs{
|
||||
this, 1, "max-jobs",
|
||||
R"(
|
||||
This option defines the maximum number of jobs that Nix will try to
|
||||
build in parallel. The default is `1`. The special value `auto`
|
||||
causes Nix to use the number of CPUs in your system. `0` is useful
|
||||
when using remote builders to prevent any local builds (except for
|
||||
`preferLocalBuild` derivation attribute which executes locally
|
||||
regardless). It can be overridden using the `--max-jobs` (`-j`)
|
||||
command line switch.
|
||||
)",
|
||||
{"build-max-jobs"}};
|
||||
|
||||
Setting<unsigned int> buildCores{this, getDefaultCores(), "cores",
|
||||
"Number of CPU cores to utilize in parallel within a build, "
|
||||
"i.e. by passing this number to Make via '-j'. 0 means that the "
|
||||
"number of actual CPU cores on the local host ought to be "
|
||||
"auto-detected.", {"build-cores"}};
|
||||
Setting<unsigned int> buildCores{
|
||||
this, getDefaultCores(), "cores",
|
||||
R"(
|
||||
Sets the value of the `NIX_BUILD_CORES` environment variable in the
|
||||
invocation of builders. Builders can use this variable at their
|
||||
discretion to control the maximum amount of parallelism. For
|
||||
instance, in Nixpkgs, if the derivation attribute
|
||||
`enableParallelBuilding` is set to `true`, the builder passes the
|
||||
`-jN` flag to GNU Make. It can be overridden using the `--cores`
|
||||
command line switch and defaults to `1`. The value `0` means that
|
||||
the builder should use all available CPU cores in the system.
|
||||
)",
|
||||
{"build-cores"}};
|
||||
|
||||
/* Read-only mode. Don't copy stuff to the store, don't change
|
||||
the database. */
|
||||
bool readOnlyMode = false;
|
||||
|
||||
Setting<std::string> thisSystem{this, SYSTEM, "system",
|
||||
"The canonical Nix system name."};
|
||||
Setting<std::string> thisSystem{
|
||||
this, SYSTEM, "system",
|
||||
R"(
|
||||
This option specifies the canonical Nix system name of the current
|
||||
installation, such as `i686-linux` or `x86_64-darwin`. Nix can only
|
||||
build derivations whose `system` attribute equals the value
|
||||
specified here. In general, it never makes sense to modify this
|
||||
value from its default, since you can use it to ‘lie’ about the
|
||||
platform you are building on (e.g., perform a Mac OS build on a
|
||||
Linux machine; the result would obviously be wrong). It only makes
|
||||
sense if the Nix binaries can run on multiple platforms, e.g.,
|
||||
‘universal binaries’ that run on `x86_64-linux` and `i686-linux`.
|
||||
|
||||
Setting<time_t> maxSilentTime{this, 0, "max-silent-time",
|
||||
"The maximum time in seconds that a builer can go without "
|
||||
"producing any output on stdout/stderr before it is killed. "
|
||||
"0 means infinity.",
|
||||
It defaults to the canonical Nix system name detected by `configure`
|
||||
at build time.
|
||||
)"};
|
||||
|
||||
Setting<time_t> maxSilentTime{
|
||||
this, 0, "max-silent-time",
|
||||
R"(
|
||||
This option defines the maximum number of seconds that a builder can
|
||||
go without producing any data on standard output or standard error.
|
||||
This is useful (for instance in an automated build system) to catch
|
||||
builds that are stuck in an infinite loop, or to catch remote builds
|
||||
that are hanging due to network problems. It can be overridden using
|
||||
the `--max-silent-time` command line switch.
|
||||
|
||||
The value `0` means that there is no timeout. This is also the
|
||||
default.
|
||||
)",
|
||||
{"build-max-silent-time"}};
|
||||
|
||||
Setting<time_t> buildTimeout{this, 0, "timeout",
|
||||
"The maximum duration in seconds that a builder can run. "
|
||||
"0 means infinity.", {"build-timeout"}};
|
||||
Setting<time_t> buildTimeout{
|
||||
this, 0, "timeout",
|
||||
R"(
|
||||
This option defines the maximum number of seconds that a builder can
|
||||
run. This is useful (for instance in an automated build system) to
|
||||
catch builds that are stuck in an infinite loop but keep writing to
|
||||
their standard output or standard error. It can be overridden using
|
||||
the `--timeout` command line switch.
|
||||
|
||||
The value `0` means that there is no timeout. This is also the
|
||||
default.
|
||||
)",
|
||||
{"build-timeout"}};
|
||||
|
||||
PathSetting buildHook{this, true, nixLibexecDir + "/nix/build-remote", "build-hook",
|
||||
"The path of the helper program that executes builds to remote machines."};
|
||||
|
||||
Setting<std::string> builders{this, "@" + nixConfDir + "/machines", "builders",
|
||||
"A semicolon-separated list of build machines, in the format of nix.machines."};
|
||||
Setting<std::string> builders{
|
||||
this, "@" + nixConfDir + "/machines", "builders",
|
||||
"A semicolon-separated list of build machines, in the format of `nix.machines`."};
|
||||
|
||||
Setting<bool> buildersUseSubstitutes{this, false, "builders-use-substitutes",
|
||||
"Whether build machines should use their own substitutes for obtaining "
|
||||
"build dependencies if possible, rather than waiting for this host to "
|
||||
"upload them."};
|
||||
Setting<bool> buildersUseSubstitutes{
|
||||
this, false, "builders-use-substitutes",
|
||||
R"(
|
||||
If set to `true`, Nix will instruct remote build machines to use
|
||||
their own binary substitutes if available. In practical terms, this
|
||||
means that remote hosts will fetch as many build dependencies as
|
||||
possible from their own substitutes (e.g, from `cache.nixos.org`),
|
||||
instead of waiting for this host to upload them all. This can
|
||||
drastically reduce build times if the network connection between
|
||||
this computer and the remote build host is slow.
|
||||
)"};
|
||||
|
||||
Setting<off_t> reservedSize{this, 8 * 1024 * 1024, "gc-reserved-space",
|
||||
"Amount of reserved disk space for the garbage collector."};
|
||||
|
||||
Setting<bool> fsyncMetadata{this, true, "fsync-metadata",
|
||||
"Whether SQLite should use fsync()."};
|
||||
Setting<bool> fsyncMetadata{
|
||||
this, true, "fsync-metadata",
|
||||
R"(
|
||||
If set to `true`, changes to the Nix store metadata (in
|
||||
`/nix/var/nix/db`) are synchronously flushed to disk. This improves
|
||||
robustness in case of system crashes, but reduces performance. The
|
||||
default is `true`.
|
||||
)"};
|
||||
|
||||
Setting<bool> useSQLiteWAL{this, !isWSL1(), "use-sqlite-wal",
|
||||
"Whether SQLite should use WAL mode."};
|
||||
|
||||
Setting<bool> syncBeforeRegistering{this, false, "sync-before-registering",
|
||||
"Whether to call sync() before registering a path as valid."};
|
||||
"Whether to call `sync()` before registering a path as valid."};
|
||||
|
||||
Setting<bool> useSubstitutes{this, true, "substitute",
|
||||
"Whether to use substitutes.",
|
||||
Setting<bool> useSubstitutes{
|
||||
this, true, "substitute",
|
||||
R"(
|
||||
If set to `true` (default), Nix will use binary substitutes if
|
||||
available. This option can be disabled to force building from
|
||||
source.
|
||||
)",
|
||||
{"build-use-substitutes"}};
|
||||
|
||||
Setting<std::string> buildUsersGroup{this, "", "build-users-group",
|
||||
"The Unix group that contains the build users."};
|
||||
Setting<std::string> buildUsersGroup{
|
||||
this, "", "build-users-group",
|
||||
R"(
|
||||
This options specifies the Unix group containing the Nix build user
|
||||
accounts. In multi-user Nix installations, builds should not be
|
||||
performed by the Nix account since that would allow users to
|
||||
arbitrarily modify the Nix store and database by supplying specially
|
||||
crafted builders; and they cannot be performed by the calling user
|
||||
since that would allow him/her to influence the build result.
|
||||
|
||||
Therefore, if this option is non-empty and specifies a valid group,
|
||||
builds will be performed under the user accounts that are a member
|
||||
of the group specified here (as listed in `/etc/group`). Those user
|
||||
accounts should not be used for any other purpose\!
|
||||
|
||||
Nix will never run two builds under the same user account at the
|
||||
same time. This is to prevent an obvious security hole: a malicious
|
||||
user writing a Nix expression that modifies the build result of a
|
||||
legitimate Nix expression being built by another user. Therefore it
|
||||
is good to have as many Nix build user accounts as you can spare.
|
||||
(Remember: uids are cheap.)
|
||||
|
||||
The build users should have permission to create files in the Nix
|
||||
store, but not delete them. Therefore, `/nix/store` should be owned
|
||||
by the Nix account, its group should be the group specified here,
|
||||
and its mode should be `1775`.
|
||||
|
||||
If the build users group is empty, builds will be performed under
|
||||
the uid of the Nix process (that is, the uid of the caller if
|
||||
`NIX_REMOTE` is empty, the uid under which the Nix daemon runs if
|
||||
`NIX_REMOTE` is `daemon`). Obviously, this should not be used in
|
||||
multi-user settings with untrusted users.
|
||||
)"};
|
||||
|
||||
Setting<bool> impersonateLinux26{this, false, "impersonate-linux-26",
|
||||
"Whether to impersonate a Linux 2.6 machine on newer kernels.",
|
||||
{"build-impersonate-linux-26"}};
|
||||
|
||||
Setting<bool> keepLog{this, true, "keep-build-log",
|
||||
"Whether to store build logs.",
|
||||
Setting<bool> keepLog{
|
||||
this, true, "keep-build-log",
|
||||
R"(
|
||||
If set to `true` (the default), Nix will write the build log of a
|
||||
derivation (i.e. the standard output and error of its builder) to
|
||||
the directory `/nix/var/log/nix/drvs`. The build log can be
|
||||
retrieved using the command `nix-store -l path`.
|
||||
)",
|
||||
{"build-keep-log"}};
|
||||
|
||||
Setting<bool> compressLog{this, true, "compress-build-log",
|
||||
"Whether to compress logs.",
|
||||
Setting<bool> compressLog{
|
||||
this, true, "compress-build-log",
|
||||
R"(
|
||||
If set to `true` (the default), build logs written to
|
||||
`/nix/var/log/nix/drvs` will be compressed on the fly using bzip2.
|
||||
Otherwise, they will not be compressed.
|
||||
)",
|
||||
{"build-compress-log"}};
|
||||
|
||||
Setting<unsigned long> maxLogSize{this, 0, "max-build-log-size",
|
||||
"Maximum number of bytes a builder can write to stdout/stderr "
|
||||
"before being killed (0 means no limit).",
|
||||
Setting<unsigned long> maxLogSize{
|
||||
this, 0, "max-build-log-size",
|
||||
R"(
|
||||
This option defines the maximum number of bytes that a builder can
|
||||
write to its stdout/stderr. If the builder exceeds this limit, it’s
|
||||
killed. A value of `0` (the default) means that there is no limit.
|
||||
)",
|
||||
{"build-max-log-size"}};
|
||||
|
||||
/* When buildRepeat > 0 and verboseBuild == true, whether to print
|
||||
|
@ -173,57 +293,156 @@ public:
|
|||
Setting<unsigned int> pollInterval{this, 5, "build-poll-interval",
|
||||
"How often (in seconds) to poll for locks."};
|
||||
|
||||
Setting<bool> checkRootReachability{this, false, "gc-check-reachability",
|
||||
"Whether to check if new GC roots can in fact be found by the "
|
||||
"garbage collector."};
|
||||
Setting<bool> gcKeepOutputs{
|
||||
this, false, "keep-outputs",
|
||||
R"(
|
||||
If `true`, the garbage collector will keep the outputs of
|
||||
non-garbage derivations. If `false` (default), outputs will be
|
||||
deleted unless they are GC roots themselves (or reachable from other
|
||||
roots).
|
||||
|
||||
Setting<bool> gcKeepOutputs{this, false, "keep-outputs",
|
||||
"Whether the garbage collector should keep outputs of live derivations.",
|
||||
In general, outputs must be registered as roots separately. However,
|
||||
even if the output of a derivation is registered as a root, the
|
||||
collector will still delete store paths that are used only at build
|
||||
time (e.g., the C compiler, or source tarballs downloaded from the
|
||||
network). To prevent it from doing so, set this option to `true`.
|
||||
)",
|
||||
{"gc-keep-outputs"}};
|
||||
|
||||
Setting<bool> gcKeepDerivations{this, true, "keep-derivations",
|
||||
"Whether the garbage collector should keep derivers of live paths.",
|
||||
Setting<bool> gcKeepDerivations{
|
||||
this, true, "keep-derivations",
|
||||
R"(
|
||||
If `true` (default), the garbage collector will keep the derivations
|
||||
from which non-garbage store paths were built. If `false`, they will
|
||||
be deleted unless explicitly registered as a root (or reachable from
|
||||
other roots).
|
||||
|
||||
Keeping derivation around is useful for querying and traceability
|
||||
(e.g., it allows you to ask with what dependencies or options a
|
||||
store path was built), so by default this option is on. Turn it off
|
||||
to save a bit of disk space (or a lot if `keep-outputs` is also
|
||||
turned on).
|
||||
)",
|
||||
{"gc-keep-derivations"}};
|
||||
|
||||
Setting<bool> autoOptimiseStore{this, false, "auto-optimise-store",
|
||||
"Whether to automatically replace files with identical contents with hard links."};
|
||||
Setting<bool> autoOptimiseStore{
|
||||
this, false, "auto-optimise-store",
|
||||
R"(
|
||||
If set to `true`, Nix automatically detects files in the store
|
||||
that have identical contents, and replaces them with hard links to
|
||||
a single copy. This saves disk space. If set to `false` (the
|
||||
default), you can still run `nix-store --optimise` to get rid of
|
||||
duplicate files.
|
||||
)"};
|
||||
|
||||
Setting<bool> envKeepDerivations{this, false, "keep-env-derivations",
|
||||
"Whether to add derivations as a dependency of user environments "
|
||||
"(to prevent them from being GCed).",
|
||||
Setting<bool> envKeepDerivations{
|
||||
this, false, "keep-env-derivations",
|
||||
R"(
|
||||
If `false` (default), derivations are not stored in Nix user
|
||||
environments. That is, the derivations of any build-time-only
|
||||
dependencies may be garbage-collected.
|
||||
|
||||
If `true`, when you add a Nix derivation to a user environment, the
|
||||
path of the derivation is stored in the user environment. Thus, the
|
||||
derivation will not be garbage-collected until the user environment
|
||||
generation is deleted (`nix-env --delete-generations`). To prevent
|
||||
build-time-only dependencies from being collected, you should also
|
||||
turn on `keep-outputs`.
|
||||
|
||||
The difference between this option and `keep-derivations` is that
|
||||
this one is “sticky”: it applies to any user environment created
|
||||
while this option was enabled, while `keep-derivations` only applies
|
||||
at the moment the garbage collector is run.
|
||||
)",
|
||||
{"env-keep-derivations"}};
|
||||
|
||||
/* Whether to lock the Nix client and worker to the same CPU. */
|
||||
bool lockCPU;
|
||||
|
||||
Setting<SandboxMode> sandboxMode{this,
|
||||
Setting<SandboxMode> sandboxMode{
|
||||
this,
|
||||
#if __linux__
|
||||
smEnabled
|
||||
#else
|
||||
smDisabled
|
||||
#endif
|
||||
, "sandbox",
|
||||
"Whether to enable sandboxed builds. Can be \"true\", \"false\" or \"relaxed\".",
|
||||
R"(
|
||||
If set to `true`, builds will be performed in a *sandboxed
|
||||
environment*, i.e., they’re isolated from the normal file system
|
||||
hierarchy and will only see their dependencies in the Nix store,
|
||||
the temporary build directory, private versions of `/proc`,
|
||||
`/dev`, `/dev/shm` and `/dev/pts` (on Linux), and the paths
|
||||
configured with the `sandbox-paths` option. This is useful to
|
||||
prevent undeclared dependencies on files in directories such as
|
||||
`/usr/bin`. In addition, on Linux, builds run in private PID,
|
||||
mount, network, IPC and UTS namespaces to isolate them from other
|
||||
processes in the system (except that fixed-output derivations do
|
||||
not run in private network namespace to ensure they can access the
|
||||
network).
|
||||
|
||||
Currently, sandboxing only work on Linux and macOS. The use of a
|
||||
sandbox requires that Nix is run as root (so you should use the
|
||||
“build users” feature to perform the actual builds under different
|
||||
users than root).
|
||||
|
||||
If this option is set to `relaxed`, then fixed-output derivations
|
||||
and derivations that have the `__noChroot` attribute set to `true`
|
||||
do not run in sandboxes.
|
||||
|
||||
The default is `true` on Linux and `false` on all other platforms.
|
||||
)",
|
||||
{"build-use-chroot", "build-use-sandbox"}};
|
||||
|
||||
Setting<PathSet> sandboxPaths{this, {}, "sandbox-paths",
|
||||
"The paths to make available inside the build sandbox.",
|
||||
Setting<PathSet> sandboxPaths{
|
||||
this, {}, "sandbox-paths",
|
||||
R"(
|
||||
A list of paths bind-mounted into Nix sandbox environments. You can
|
||||
use the syntax `target=source` to mount a path in a different
|
||||
location in the sandbox; for instance, `/bin=/nix-bin` will mount
|
||||
the path `/nix-bin` as `/bin` inside the sandbox. If *source* is
|
||||
followed by `?`, then it is not an error if *source* does not exist;
|
||||
for example, `/dev/nvidiactl?` specifies that `/dev/nvidiactl` will
|
||||
only be mounted in the sandbox if it exists in the host filesystem.
|
||||
|
||||
Depending on how Nix was built, the default value for this option
|
||||
may be empty or provide `/bin/sh` as a bind-mount of `bash`.
|
||||
)",
|
||||
{"build-chroot-dirs", "build-sandbox-paths"}};
|
||||
|
||||
Setting<bool> sandboxFallback{this, true, "sandbox-fallback",
|
||||
"Whether to disable sandboxing when the kernel doesn't allow it."};
|
||||
|
||||
Setting<PathSet> extraSandboxPaths{this, {}, "extra-sandbox-paths",
|
||||
"Additional paths to make available inside the build sandbox.",
|
||||
Setting<PathSet> extraSandboxPaths{
|
||||
this, {}, "extra-sandbox-paths",
|
||||
R"(
|
||||
A list of additional paths appended to `sandbox-paths`. Useful if
|
||||
you want to extend its default value.
|
||||
)",
|
||||
{"build-extra-chroot-dirs", "build-extra-sandbox-paths"}};
|
||||
|
||||
Setting<size_t> buildRepeat{this, 0, "repeat",
|
||||
"The number of times to repeat a build in order to verify determinism.",
|
||||
Setting<size_t> buildRepeat{
|
||||
this, 0, "repeat",
|
||||
R"(
|
||||
How many times to repeat builds to check whether they are
|
||||
deterministic. The default value is 0. If the value is non-zero,
|
||||
every build is repeated the specified number of times. If the
|
||||
contents of any of the runs differs from the previous ones and
|
||||
`enforce-determinism` is true, the build is rejected and the
|
||||
resulting store paths are not registered as “valid” in Nix’s
|
||||
database.
|
||||
)",
|
||||
{"build-repeat"}};
|
||||
|
||||
#if __linux__
|
||||
Setting<std::string> sandboxShmSize{this, "50%", "sandbox-dev-shm-size",
|
||||
"The size of /dev/shm in the build sandbox."};
|
||||
Setting<std::string> sandboxShmSize{
|
||||
this, "50%", "sandbox-dev-shm-size",
|
||||
R"(
|
||||
This option determines the maximum size of the `tmpfs` filesystem
|
||||
mounted on `/dev/shm` in Linux sandboxes. For the format, see the
|
||||
description of the `size` option of `tmpfs` in mount8. The default
|
||||
is `50%`.
|
||||
)"};
|
||||
|
||||
Setting<Path> sandboxBuildDir{this, "/build", "sandbox-build-dir",
|
||||
"The build directory inside the sandbox."};
|
||||
|
@ -237,118 +456,411 @@ public:
|
|||
"Whether to log Darwin sandbox access violations to the system log."};
|
||||
#endif
|
||||
|
||||
Setting<bool> runDiffHook{this, false, "run-diff-hook",
|
||||
"Whether to run the program specified by the diff-hook setting "
|
||||
"repeated builds produce a different result. Typically used to "
|
||||
"plug in diffoscope."};
|
||||
Setting<bool> runDiffHook{
|
||||
this, false, "run-diff-hook",
|
||||
R"(
|
||||
If true, enable the execution of the `diff-hook` program.
|
||||
|
||||
PathSetting diffHook{this, true, "", "diff-hook",
|
||||
"A program that prints out the differences between the two paths "
|
||||
"specified on its command line."};
|
||||
When using the Nix daemon, `run-diff-hook` must be set in the
|
||||
`nix.conf` configuration file, and cannot be passed at the command
|
||||
line.
|
||||
)"};
|
||||
|
||||
Setting<bool> enforceDeterminism{this, true, "enforce-determinism",
|
||||
"Whether to fail if repeated builds produce different output."};
|
||||
PathSetting diffHook{
|
||||
this, true, "", "diff-hook",
|
||||
R"(
|
||||
Absolute path to an executable capable of diffing build
|
||||
results. The hook is executed if `run-diff-hook` is true, and the
|
||||
output of a build is known to not be the same. This program is not
|
||||
executed to determine if two results are the same.
|
||||
|
||||
Setting<Strings> trustedPublicKeys{this,
|
||||
The diff hook is executed by the same user and group who ran the
|
||||
build. However, the diff hook does not have write access to the
|
||||
store path just built.
|
||||
|
||||
The diff hook program receives three parameters:
|
||||
|
||||
1. A path to the previous build's results
|
||||
|
||||
2. A path to the current build's results
|
||||
|
||||
3. The path to the build's derivation
|
||||
|
||||
4. The path to the build's scratch directory. This directory will
|
||||
exist only if the build was run with `--keep-failed`.
|
||||
|
||||
The stderr and stdout output from the diff hook will not be
|
||||
displayed to the user. Instead, it will print to the nix-daemon's
|
||||
log.
|
||||
|
||||
When using the Nix daemon, `diff-hook` must be set in the `nix.conf`
|
||||
configuration file, and cannot be passed at the command line.
|
||||
)"};
|
||||
|
||||
Setting<bool> enforceDeterminism{
|
||||
this, true, "enforce-determinism",
|
||||
"Whether to fail if repeated builds produce different output. See `repeat`."};
|
||||
|
||||
Setting<Strings> trustedPublicKeys{
|
||||
this,
|
||||
{"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="},
|
||||
"trusted-public-keys",
|
||||
"Trusted public keys for secure substitution.",
|
||||
R"(
|
||||
A whitespace-separated list of public keys. When paths are copied
|
||||
from another Nix store (such as a binary cache), they must be
|
||||
signed with one of these keys. For example:
|
||||
`cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=
|
||||
hydra.nixos.org-1:CNHJZBh9K4tP3EKF6FkkgeVYsS3ohTl+oS0Qa8bezVs=`.
|
||||
)",
|
||||
{"binary-cache-public-keys"}};
|
||||
|
||||
Setting<Strings> secretKeyFiles{this, {}, "secret-key-files",
|
||||
"Secret keys with which to sign local builds."};
|
||||
Setting<Strings> secretKeyFiles{
|
||||
this, {}, "secret-key-files",
|
||||
R"(
|
||||
A whitespace-separated list of files containing secret (private)
|
||||
keys. These are used to sign locally-built paths. They can be
|
||||
generated using `nix-store --generate-binary-cache-key`. The
|
||||
corresponding public key can be distributed to other users, who
|
||||
can add it to `trusted-public-keys` in their `nix.conf`.
|
||||
)"};
|
||||
|
||||
Setting<unsigned int> tarballTtl{this, 60 * 60, "tarball-ttl",
|
||||
"How long downloaded files are considered up-to-date."};
|
||||
Setting<unsigned int> tarballTtl{
|
||||
this, 60 * 60, "tarball-ttl",
|
||||
R"(
|
||||
The number of seconds a downloaded tarball is considered fresh. If
|
||||
the cached tarball is stale, Nix will check whether it is still up
|
||||
to date using the ETag header. Nix will download a new version if
|
||||
the ETag header is unsupported, or the cached ETag doesn't match.
|
||||
|
||||
Setting<bool> requireSigs{this, true, "require-sigs",
|
||||
"Whether to check that any non-content-addressed path added to the "
|
||||
"Nix store has a valid signature (that is, one signed using a key "
|
||||
"listed in 'trusted-public-keys'."};
|
||||
Setting the TTL to `0` forces Nix to always check if the tarball is
|
||||
up to date.
|
||||
|
||||
Setting<StringSet> extraPlatforms{this,
|
||||
Nix caches tarballs in `$XDG_CACHE_HOME/nix/tarballs`.
|
||||
|
||||
Files fetched via `NIX_PATH`, `fetchGit`, `fetchMercurial`,
|
||||
`fetchTarball`, and `fetchurl` respect this TTL.
|
||||
)"};
|
||||
|
||||
Setting<bool> requireSigs{
|
||||
this, true, "require-sigs",
|
||||
R"(
|
||||
If set to `true` (the default), any non-content-addressed path added
|
||||
or copied to the Nix store (e.g. when substituting from a binary
|
||||
cache) must have a valid signature, that is, be signed using one of
|
||||
the keys listed in `trusted-public-keys` or `secret-key-files`. Set
|
||||
to `false` to disable signature checking.
|
||||
)"};
|
||||
|
||||
Setting<StringSet> extraPlatforms{
|
||||
this,
|
||||
std::string{SYSTEM} == "x86_64-linux" && !isWSL1() ? StringSet{"i686-linux"} : StringSet{},
|
||||
"extra-platforms",
|
||||
"Additional platforms that can be built on the local system. "
|
||||
"These may be supported natively (e.g. armv7 on some aarch64 CPUs "
|
||||
"or using hacks like qemu-user."};
|
||||
R"(
|
||||
Platforms other than the native one which this machine is capable of
|
||||
building for. This can be useful for supporting additional
|
||||
architectures on compatible machines: i686-linux can be built on
|
||||
x86\_64-linux machines (and the default for this setting reflects
|
||||
this); armv7 is backwards-compatible with armv6 and armv5tel; some
|
||||
aarch64 machines can also natively run 32-bit ARM code; and
|
||||
qemu-user may be used to support non-native platforms (though this
|
||||
may be slow and buggy). Most values for this are not enabled by
|
||||
default because build systems will often misdetect the target
|
||||
platform and generate incompatible code, so you may wish to
|
||||
cross-check the results of using this option against proper
|
||||
natively-built versions of your derivations.
|
||||
)"};
|
||||
|
||||
Setting<StringSet> systemFeatures{this, getDefaultSystemFeatures(),
|
||||
Setting<StringSet> systemFeatures{
|
||||
this, getDefaultSystemFeatures(),
|
||||
"system-features",
|
||||
"Optional features that this system implements (like \"kvm\")."};
|
||||
R"(
|
||||
A set of system “features” supported by this machine, e.g. `kvm`.
|
||||
Derivations can express a dependency on such features through the
|
||||
derivation attribute `requiredSystemFeatures`. For example, the
|
||||
attribute
|
||||
|
||||
Setting<Strings> substituters{this,
|
||||
requiredSystemFeatures = [ "kvm" ];
|
||||
|
||||
ensures that the derivation can only be built on a machine with the
|
||||
`kvm` feature.
|
||||
|
||||
This setting by default includes `kvm` if `/dev/kvm` is accessible,
|
||||
and the pseudo-features `nixos-test`, `benchmark` and `big-parallel`
|
||||
that are used in Nixpkgs to route builds to specific machines.
|
||||
)"};
|
||||
|
||||
Setting<Strings> substituters{
|
||||
this,
|
||||
nixStore == "/nix/store" ? Strings{"https://cache.nixos.org/"} : Strings(),
|
||||
"substituters",
|
||||
"The URIs of substituters (such as https://cache.nixos.org/).",
|
||||
R"(
|
||||
A list of URLs of substituters, separated by whitespace. The default
|
||||
is `https://cache.nixos.org`.
|
||||
)",
|
||||
{"binary-caches"}};
|
||||
|
||||
// FIXME: provide a way to add to option values.
|
||||
Setting<Strings> extraSubstituters{this, {}, "extra-substituters",
|
||||
"Additional URIs of substituters.",
|
||||
Setting<Strings> extraSubstituters{
|
||||
this, {}, "extra-substituters",
|
||||
R"(
|
||||
Additional binary caches appended to those specified in
|
||||
`substituters`. When used by unprivileged users, untrusted
|
||||
substituters (i.e. those not listed in `trusted-substituters`) are
|
||||
silently ignored.
|
||||
)",
|
||||
{"extra-binary-caches"}};
|
||||
|
||||
Setting<StringSet> trustedSubstituters{this, {}, "trusted-substituters",
|
||||
"Disabled substituters that may be enabled via the substituters option by untrusted users.",
|
||||
Setting<StringSet> trustedSubstituters{
|
||||
this, {}, "trusted-substituters",
|
||||
R"(
|
||||
A list of URLs of substituters, separated by whitespace. These are
|
||||
not used by default, but can be enabled by users of the Nix daemon
|
||||
by specifying `--option substituters urls` on the command
|
||||
line. Unprivileged users are only allowed to pass a subset of the
|
||||
URLs listed in `substituters` and `trusted-substituters`.
|
||||
)",
|
||||
{"trusted-binary-caches"}};
|
||||
|
||||
Setting<Strings> trustedUsers{this, {"root"}, "trusted-users",
|
||||
"Which users or groups are trusted to ask the daemon to do unsafe things."};
|
||||
Setting<Strings> trustedUsers{
|
||||
this, {"root"}, "trusted-users",
|
||||
R"(
|
||||
A list of names of users (separated by whitespace) that have
|
||||
additional rights when connecting to the Nix daemon, such as the
|
||||
ability to specify additional binary caches, or to import unsigned
|
||||
NARs. You can also specify groups by prefixing them with `@`; for
|
||||
instance, `@wheel` means all users in the `wheel` group. The default
|
||||
is `root`.
|
||||
|
||||
Setting<unsigned int> ttlNegativeNarInfoCache{this, 3600, "narinfo-cache-negative-ttl",
|
||||
"The TTL in seconds for negative lookups in the disk cache i.e binary cache lookups that "
|
||||
"return an invalid path result"};
|
||||
> **Warning**
|
||||
>
|
||||
> Adding a user to `trusted-users` is essentially equivalent to
|
||||
> giving that user root access to the system. For example, the user
|
||||
> can set `sandbox-paths` and thereby obtain read access to
|
||||
> directories that are otherwise inacessible to them.
|
||||
)"};
|
||||
|
||||
Setting<unsigned int> ttlPositiveNarInfoCache{this, 30 * 24 * 3600, "narinfo-cache-positive-ttl",
|
||||
"The TTL in seconds for positive lookups in the disk cache i.e binary cache lookups that "
|
||||
"return a valid path result."};
|
||||
Setting<unsigned int> ttlNegativeNarInfoCache{
|
||||
this, 3600, "narinfo-cache-negative-ttl",
|
||||
R"(
|
||||
The TTL in seconds for negative lookups. If a store path is queried
|
||||
from a substituter but was not found, there will be a negative
|
||||
lookup cached in the local disk cache database for the specified
|
||||
duration.
|
||||
)"};
|
||||
|
||||
Setting<unsigned int> ttlPositiveNarInfoCache{
|
||||
this, 30 * 24 * 3600, "narinfo-cache-positive-ttl",
|
||||
R"(
|
||||
The TTL in seconds for positive lookups. If a store path is queried
|
||||
from a substituter, the result of the query will be cached in the
|
||||
local disk cache database including some of the NAR metadata. The
|
||||
default TTL is a month, setting a shorter TTL for positive lookups
|
||||
can be useful for binary caches that have frequent garbage
|
||||
collection, in which case having a more frequent cache invalidation
|
||||
would prevent trying to pull the path again and failing with a hash
|
||||
mismatch if the build isn't reproducible.
|
||||
)"};
|
||||
|
||||
/* ?Who we trust to use the daemon in safe ways */
|
||||
Setting<Strings> allowedUsers{this, {"*"}, "allowed-users",
|
||||
"Which users or groups are allowed to connect to the daemon."};
|
||||
Setting<Strings> allowedUsers{
|
||||
this, {"*"}, "allowed-users",
|
||||
R"(
|
||||
A list of names of users (separated by whitespace) that are allowed
|
||||
to connect to the Nix daemon. As with the `trusted-users` option,
|
||||
you can specify groups by prefixing them with `@`. Also, you can
|
||||
allow all users by specifying `*`. The default is `*`.
|
||||
|
||||
Note that trusted users are always allowed to connect.
|
||||
)"};
|
||||
|
||||
Setting<bool> printMissing{this, true, "print-missing",
|
||||
"Whether to print what paths need to be built or downloaded."};
|
||||
|
||||
Setting<std::string> preBuildHook{this, "",
|
||||
"pre-build-hook",
|
||||
"A program to run just before a build to set derivation-specific build settings."};
|
||||
Setting<std::string> preBuildHook{
|
||||
this, "", "pre-build-hook",
|
||||
R"(
|
||||
If set, the path to a program that can set extra derivation-specific
|
||||
settings for this system. This is used for settings that can't be
|
||||
captured by the derivation model itself and are too variable between
|
||||
different versions of the same system to be hard-coded into nix.
|
||||
|
||||
Setting<std::string> postBuildHook{this, "", "post-build-hook",
|
||||
"A program to run just after each successful build."};
|
||||
The hook is passed the derivation path and, if sandboxes are
|
||||
enabled, the sandbox directory. It can then modify the sandbox and
|
||||
send a series of commands to modify various settings to stdout. The
|
||||
currently recognized commands are:
|
||||
|
||||
Setting<std::string> netrcFile{this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file",
|
||||
"Path to the netrc file used to obtain usernames/passwords for downloads."};
|
||||
- `extra-sandbox-paths`
|
||||
Pass a list of files and directories to be included in the
|
||||
sandbox for this build. One entry per line, terminated by an
|
||||
empty line. Entries have the same format as `sandbox-paths`.
|
||||
)"};
|
||||
|
||||
Setting<std::string> postBuildHook{
|
||||
this, "", "post-build-hook",
|
||||
R"(
|
||||
Optional. The path to a program to execute after each build.
|
||||
|
||||
This option is only settable in the global `nix.conf`, or on the
|
||||
command line by trusted users.
|
||||
|
||||
When using the nix-daemon, the daemon executes the hook as `root`.
|
||||
If the nix-daemon is not involved, the hook runs as the user
|
||||
executing the nix-build.
|
||||
|
||||
- The hook executes after an evaluation-time build.
|
||||
|
||||
- The hook does not execute on substituted paths.
|
||||
|
||||
- The hook's output always goes to the user's terminal.
|
||||
|
||||
- If the hook fails, the build succeeds but no further builds
|
||||
execute.
|
||||
|
||||
- The hook executes synchronously, and blocks other builds from
|
||||
progressing while it runs.
|
||||
|
||||
The program executes with no arguments. The program's environment
|
||||
contains the following environment variables:
|
||||
|
||||
- `DRV_PATH`
|
||||
The derivation for the built paths.
|
||||
|
||||
Example:
|
||||
`/nix/store/5nihn1a7pa8b25l9zafqaqibznlvvp3f-bash-4.4-p23.drv`
|
||||
|
||||
- `OUT_PATHS`
|
||||
Output paths of the built derivation, separated by a space
|
||||
character.
|
||||
|
||||
Example:
|
||||
`/nix/store/zf5lbh336mnzf1nlswdn11g4n2m8zh3g-bash-4.4-p23-dev
|
||||
/nix/store/rjxwxwv1fpn9wa2x5ssk5phzwlcv4mna-bash-4.4-p23-doc
|
||||
/nix/store/6bqvbzjkcp9695dq0dpl5y43nvy37pq1-bash-4.4-p23-info
|
||||
/nix/store/r7fng3kk3vlpdlh2idnrbn37vh4imlj2-bash-4.4-p23-man
|
||||
/nix/store/xfghy8ixrhz3kyy6p724iv3cxji088dx-bash-4.4-p23`.
|
||||
)"};
|
||||
|
||||
Setting<std::string> netrcFile{
|
||||
this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file",
|
||||
R"(
|
||||
If set to an absolute path to a `netrc` file, Nix will use the HTTP
|
||||
authentication credentials in this file when trying to download from
|
||||
a remote host through HTTP or HTTPS. Defaults to
|
||||
`$NIX_CONF_DIR/netrc`.
|
||||
|
||||
The `netrc` file consists of a list of accounts in the following
|
||||
format:
|
||||
|
||||
machine my-machine
|
||||
login my-username
|
||||
password my-password
|
||||
|
||||
For the exact syntax, see [the `curl`
|
||||
documentation](https://ec.haxx.se/usingcurl-netrc.html).
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> This must be an absolute path, and `~` is not resolved. For
|
||||
> example, `~/.netrc` won't resolve to your home directory's
|
||||
> `.netrc`.
|
||||
)"};
|
||||
|
||||
/* Path to the SSL CA file used */
|
||||
Path caFile;
|
||||
|
||||
#if __linux__
|
||||
Setting<bool> filterSyscalls{this, true, "filter-syscalls",
|
||||
"Whether to prevent certain dangerous system calls, such as "
|
||||
"creation of setuid/setgid files or adding ACLs or extended "
|
||||
"attributes. Only disable this if you're aware of the "
|
||||
"security implications."};
|
||||
Setting<bool> filterSyscalls{
|
||||
this, true, "filter-syscalls",
|
||||
R"(
|
||||
Whether to prevent certain dangerous system calls, such as
|
||||
creation of setuid/setgid files or adding ACLs or extended
|
||||
attributes. Only disable this if you're aware of the
|
||||
security implications.
|
||||
)"};
|
||||
|
||||
Setting<bool> allowNewPrivileges{this, false, "allow-new-privileges",
|
||||
"Whether builders can acquire new privileges by calling programs with "
|
||||
"setuid/setgid bits or with file capabilities."};
|
||||
Setting<bool> allowNewPrivileges{
|
||||
this, false, "allow-new-privileges",
|
||||
R"(
|
||||
(Linux-specific.) By default, builders on Linux cannot acquire new
|
||||
privileges by calling setuid/setgid programs or programs that have
|
||||
file capabilities. For example, programs such as `sudo` or `ping`
|
||||
will fail. (Note that in sandbox builds, no such programs are
|
||||
available unless you bind-mount them into the sandbox via the
|
||||
`sandbox-paths` option.) You can allow the use of such programs by
|
||||
enabling this option. This is impure and usually undesirable, but
|
||||
may be useful in certain scenarios (e.g. to spin up containers or
|
||||
set up userspace network interfaces in tests).
|
||||
)"};
|
||||
#endif
|
||||
|
||||
Setting<uint64_t> minFree{this, 0, "min-free",
|
||||
"Automatically run the garbage collector when free disk space drops below the specified amount."};
|
||||
Setting<Strings> hashedMirrors{
|
||||
this, {}, "hashed-mirrors",
|
||||
R"(
|
||||
A list of web servers used by `builtins.fetchurl` to obtain files by
|
||||
hash. The default is `http://tarballs.nixos.org/`. Given a hash type
|
||||
*ht* and a base-16 hash *h*, Nix will try to download the file from
|
||||
*hashed-mirror*/*ht*/*h*. This allows files to be downloaded even if
|
||||
they have disappeared from their original URI. For example, given
|
||||
the default mirror `http://tarballs.nixos.org/`, when building the
|
||||
derivation
|
||||
|
||||
Setting<uint64_t> maxFree{this, std::numeric_limits<uint64_t>::max(), "max-free",
|
||||
"Stop deleting garbage when free disk space is above the specified amount."};
|
||||
```nix
|
||||
builtins.fetchurl {
|
||||
url = "https://example.org/foo-1.2.3.tar.xz";
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae";
|
||||
}
|
||||
```
|
||||
|
||||
Nix will attempt to download this file from
|
||||
`http://tarballs.nixos.org/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae`
|
||||
first. If it is not available there, if will try the original URI.
|
||||
)"};
|
||||
|
||||
Setting<uint64_t> minFree{
|
||||
this, 0, "min-free",
|
||||
R"(
|
||||
When free disk space in `/nix/store` drops below `min-free` during a
|
||||
build, Nix performs a garbage-collection until `max-free` bytes are
|
||||
available or there is no more garbage. A value of `0` (the default)
|
||||
disables this feature.
|
||||
)"};
|
||||
|
||||
Setting<uint64_t> maxFree{
|
||||
this, std::numeric_limits<uint64_t>::max(), "max-free",
|
||||
R"(
|
||||
When a garbage collection is triggered by the `min-free` option, it
|
||||
stops as soon as `max-free` bytes are available. The default is
|
||||
infinity (i.e. delete all garbage).
|
||||
)"};
|
||||
|
||||
Setting<uint64_t> minFreeCheckInterval{this, 5, "min-free-check-interval",
|
||||
"Number of seconds between checking free disk space."};
|
||||
|
||||
Setting<Paths> pluginFiles{this, {}, "plugin-files",
|
||||
"Plugins to dynamically load at nix initialization time."};
|
||||
Setting<Paths> pluginFiles{
|
||||
this, {}, "plugin-files",
|
||||
R"(
|
||||
A list of plugin files to be loaded by Nix. Each of these files will
|
||||
be dlopened by Nix, allowing them to affect execution through static
|
||||
initialization. In particular, these plugins may construct static
|
||||
instances of RegisterPrimOp to add new primops or constants to the
|
||||
expression language, RegisterStoreImplementation to add new store
|
||||
implementations, RegisterCommand to add new subcommands to the `nix`
|
||||
command, and RegisterSetting to add new nix config settings. See the
|
||||
constructors for those types for more details.
|
||||
|
||||
Since these files are loaded into the same address space as Nix
|
||||
itself, they must be DSOs compatible with the instance of Nix
|
||||
running at the time (i.e. compiled against the same headers, not
|
||||
linked to any incompatible libraries). They should not be linked to
|
||||
any Nix libs directly, as those will be available already at load
|
||||
time.
|
||||
|
||||
If an entry in the list is a directory, all files in the directory
|
||||
are loaded as plugins (non-recursively).
|
||||
)"};
|
||||
|
||||
Setting<std::string> githubAccessToken{this, "", "github-access-token",
|
||||
"GitHub access token to get access to GitHub data through the GitHub API for github:<..> flakes."};
|
||||
"GitHub access token to get access to GitHub data through the GitHub API for `github:<..>` flakes."};
|
||||
|
||||
Setting<Strings> experimentalFeatures{this, {}, "experimental-features",
|
||||
"Experimental Nix features to enable."};
|
||||
|
|
|
@ -85,7 +85,7 @@ protected:
|
|||
checkEnabled();
|
||||
|
||||
try {
|
||||
FileTransferRequest request(cacheUri + "/" + path);
|
||||
FileTransferRequest request(makeRequest(path));
|
||||
request.head = true;
|
||||
getFileTransfer()->download(request);
|
||||
return true;
|
||||
|
@ -103,7 +103,7 @@ protected:
|
|||
std::shared_ptr<std::basic_iostream<char>> istream,
|
||||
const std::string & mimeType) override
|
||||
{
|
||||
auto req = FileTransferRequest(cacheUri + "/" + path);
|
||||
auto req = makeRequest(path);
|
||||
req.data = std::make_shared<string>(StreamToSourceAdapter(istream).drain());
|
||||
req.mimeType = mimeType;
|
||||
try {
|
||||
|
@ -115,8 +115,11 @@ protected:
|
|||
|
||||
FileTransferRequest makeRequest(const std::string & path)
|
||||
{
|
||||
FileTransferRequest request(cacheUri + "/" + path);
|
||||
return request;
|
||||
return FileTransferRequest(
|
||||
hasPrefix(path, "https://") || hasPrefix(path, "http://") || hasPrefix(path, "file://")
|
||||
? path
|
||||
: cacheUri + "/" + path);
|
||||
|
||||
}
|
||||
|
||||
void getFile(const std::string & path, Sink & sink) override
|
||||
|
|
|
@ -93,6 +93,9 @@ struct LegacySSHStore : public Store
|
|||
try {
|
||||
auto conn(connections->get());
|
||||
|
||||
/* No longer support missing NAR hash */
|
||||
assert(GET_PROTOCOL_MINOR(conn->remoteVersion) >= 4);
|
||||
|
||||
debug("querying remote host '%s' for info on '%s'", host, printStorePath(path));
|
||||
|
||||
conn->to << cmdQueryPathInfos << PathSet{printStorePath(path)};
|
||||
|
@ -100,8 +103,10 @@ struct LegacySSHStore : public Store
|
|||
|
||||
auto p = readString(conn->from);
|
||||
if (p.empty()) return callback(nullptr);
|
||||
auto info = std::make_shared<ValidPathInfo>(parseStorePath(p));
|
||||
assert(path == info->path);
|
||||
auto path2 = parseStorePath(p);
|
||||
assert(path == path2);
|
||||
/* Hash will be set below. FIXME construct ValidPathInfo at end. */
|
||||
auto info = std::make_shared<ValidPathInfo>(path, Hash::dummy);
|
||||
|
||||
PathSet references;
|
||||
auto deriver = readString(conn->from);
|
||||
|
@ -111,12 +116,14 @@ struct LegacySSHStore : public Store
|
|||
readLongLong(conn->from); // download size
|
||||
info->narSize = readLongLong(conn->from);
|
||||
|
||||
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 4) {
|
||||
{
|
||||
auto s = readString(conn->from);
|
||||
info->narHash = s.empty() ? std::optional<Hash>{} : Hash::parseAnyPrefixed(s);
|
||||
info->ca = parseContentAddressOpt(readString(conn->from));
|
||||
info->sigs = readStrings<StringSet>(conn->from);
|
||||
if (s == "")
|
||||
throw Error("NAR hash is now mandatory");
|
||||
info->narHash = Hash::parseAnyPrefixed(s);
|
||||
}
|
||||
info->ca = parseContentAddressOpt(readString(conn->from));
|
||||
info->sigs = readStrings<StringSet>(conn->from);
|
||||
|
||||
auto s = readString(conn->from);
|
||||
assert(s == "");
|
||||
|
@ -138,7 +145,7 @@ struct LegacySSHStore : public Store
|
|||
<< cmdAddToStoreNar
|
||||
<< printStorePath(info.path)
|
||||
<< (info.deriver ? printStorePath(*info.deriver) : "")
|
||||
<< info.narHash->to_string(Base16, false);
|
||||
<< info.narHash.to_string(Base16, false);
|
||||
writeStorePaths(*this, conn->to, info.references);
|
||||
conn->to
|
||||
<< info.registrationTime
|
||||
|
@ -202,6 +209,24 @@ struct LegacySSHStore : public Store
|
|||
const StorePathSet & references, RepairFlag repair) override
|
||||
{ unsupported("addTextToStore"); }
|
||||
|
||||
private:
|
||||
|
||||
void putBuildSettings(Connection & conn)
|
||||
{
|
||||
conn.to
|
||||
<< settings.maxSilentTime
|
||||
<< settings.buildTimeout;
|
||||
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 2)
|
||||
conn.to
|
||||
<< settings.maxLogSize;
|
||||
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3)
|
||||
conn.to
|
||||
<< settings.buildRepeat
|
||||
<< settings.enforceDeterminism;
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode) override
|
||||
{
|
||||
|
@ -211,16 +236,8 @@ struct LegacySSHStore : public Store
|
|||
<< cmdBuildDerivation
|
||||
<< printStorePath(drvPath);
|
||||
writeDerivation(conn->to, *this, drv);
|
||||
conn->to
|
||||
<< settings.maxSilentTime
|
||||
<< settings.buildTimeout;
|
||||
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 2)
|
||||
conn->to
|
||||
<< settings.maxLogSize;
|
||||
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 3)
|
||||
conn->to
|
||||
<< settings.buildRepeat
|
||||
<< settings.enforceDeterminism;
|
||||
|
||||
putBuildSettings(*conn);
|
||||
|
||||
conn->to.flush();
|
||||
|
||||
|
@ -234,6 +251,29 @@ struct LegacySSHStore : public Store
|
|||
return status;
|
||||
}
|
||||
|
||||
void buildPaths(const std::vector<StorePathWithOutputs> & drvPaths, BuildMode buildMode) override
|
||||
{
|
||||
auto conn(connections->get());
|
||||
|
||||
conn->to << cmdBuildPaths;
|
||||
Strings ss;
|
||||
for (auto & p : drvPaths)
|
||||
ss.push_back(p.to_string(*this));
|
||||
conn->to << ss;
|
||||
|
||||
putBuildSettings(*conn);
|
||||
|
||||
conn->to.flush();
|
||||
|
||||
BuildResult result;
|
||||
result.status = (BuildResult::Status) readInt(conn->from);
|
||||
|
||||
if (!result.success()) {
|
||||
conn->from >> result.errorMsg;
|
||||
throw Error(result.status, result.errorMsg);
|
||||
}
|
||||
}
|
||||
|
||||
void ensurePath(const StorePath & path) override
|
||||
{ unsupported("ensurePath"); }
|
||||
|
||||
|
|
|
@ -594,7 +594,7 @@ uint64_t LocalStore::addValidPath(State & state,
|
|||
|
||||
state.stmtRegisterValidPath.use()
|
||||
(printStorePath(info.path))
|
||||
(info.narHash->to_string(Base16, true))
|
||||
(info.narHash.to_string(Base16, true))
|
||||
(info.registrationTime == 0 ? time(0) : info.registrationTime)
|
||||
(info.deriver ? printStorePath(*info.deriver) : "", (bool) info.deriver)
|
||||
(info.narSize, info.narSize != 0)
|
||||
|
@ -618,11 +618,11 @@ uint64_t LocalStore::addValidPath(State & state,
|
|||
registration above is undone. */
|
||||
if (checkOutputs) checkDerivationOutputs(info.path, drv);
|
||||
|
||||
for (auto & i : drv.outputs) {
|
||||
for (auto & i : drv.outputsAndPaths(*this)) {
|
||||
state.stmtAddDerivationOutput.use()
|
||||
(id)
|
||||
(i.first)
|
||||
(printStorePath(i.second.path(*this, drv.name)))
|
||||
(printStorePath(i.second.second))
|
||||
.exec();
|
||||
}
|
||||
}
|
||||
|
@ -641,25 +641,28 @@ void LocalStore::queryPathInfoUncached(const StorePath & path,
|
|||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
|
||||
{
|
||||
try {
|
||||
auto info = std::make_shared<ValidPathInfo>(path);
|
||||
|
||||
callback(retrySQLite<std::shared_ptr<ValidPathInfo>>([&]() {
|
||||
auto state(_state.lock());
|
||||
|
||||
/* Get the path info. */
|
||||
auto useQueryPathInfo(state->stmtQueryPathInfo.use()(printStorePath(info->path)));
|
||||
auto useQueryPathInfo(state->stmtQueryPathInfo.use()(printStorePath(path)));
|
||||
|
||||
if (!useQueryPathInfo.next())
|
||||
return std::shared_ptr<ValidPathInfo>();
|
||||
|
||||
info->id = useQueryPathInfo.getInt(0);
|
||||
auto id = useQueryPathInfo.getInt(0);
|
||||
|
||||
auto narHash = Hash::dummy;
|
||||
try {
|
||||
info->narHash = Hash::parseAnyPrefixed(useQueryPathInfo.getStr(1));
|
||||
narHash = Hash::parseAnyPrefixed(useQueryPathInfo.getStr(1));
|
||||
} catch (BadHash & e) {
|
||||
throw Error("in valid-path entry for '%s': %s", printStorePath(path), e.what());
|
||||
throw Error("invalid-path entry for '%s': %s", printStorePath(path), e.what());
|
||||
}
|
||||
|
||||
auto info = std::make_shared<ValidPathInfo>(path, narHash);
|
||||
|
||||
info->id = id;
|
||||
|
||||
info->registrationTime = useQueryPathInfo.getInt(2);
|
||||
|
||||
auto s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 3);
|
||||
|
@ -694,7 +697,7 @@ void LocalStore::updatePathInfo(State & state, const ValidPathInfo & info)
|
|||
{
|
||||
state.stmtUpdatePathInfo.use()
|
||||
(info.narSize, info.narSize != 0)
|
||||
(info.narHash->to_string(Base16, true))
|
||||
(info.narHash.to_string(Base16, true))
|
||||
(info.ultimate ? 1 : 0, info.ultimate)
|
||||
(concatStringsSep(" ", info.sigs), !info.sigs.empty())
|
||||
(renderContentAddress(info.ca), (bool) info.ca)
|
||||
|
@ -782,17 +785,21 @@ StorePathSet LocalStore::queryValidDerivers(const StorePath & path)
|
|||
}
|
||||
|
||||
|
||||
OutputPathMap LocalStore::queryDerivationOutputMap(const StorePath & path)
|
||||
std::map<std::string, std::optional<StorePath>> LocalStore::queryPartialDerivationOutputMap(const StorePath & path)
|
||||
{
|
||||
return retrySQLite<OutputPathMap>([&]() {
|
||||
std::map<std::string, std::optional<StorePath>> outputs;
|
||||
BasicDerivation drv = readDerivation(path);
|
||||
for (auto & [outName, _] : drv.outputs) {
|
||||
outputs.insert_or_assign(outName, std::nullopt);
|
||||
}
|
||||
return retrySQLite<std::map<std::string, std::optional<StorePath>>>([&]() {
|
||||
auto state(_state.lock());
|
||||
|
||||
auto useQueryDerivationOutputs(state->stmtQueryDerivationOutputs.use()
|
||||
(queryValidPathId(*state, path)));
|
||||
|
||||
OutputPathMap outputs;
|
||||
while (useQueryDerivationOutputs.next())
|
||||
outputs.emplace(
|
||||
outputs.insert_or_assign(
|
||||
useQueryDerivationOutputs.getStr(0),
|
||||
parseStorePath(useQueryDerivationOutputs.getStr(1))
|
||||
);
|
||||
|
@ -920,7 +927,7 @@ void LocalStore::registerValidPaths(const ValidPathInfos & infos)
|
|||
StorePathSet paths;
|
||||
|
||||
for (auto & i : infos) {
|
||||
assert(i.narHash && i.narHash->type == htSHA256);
|
||||
assert(i.narHash.type == htSHA256);
|
||||
if (isValidPath_(*state, i.path))
|
||||
updatePathInfo(*state, i);
|
||||
else
|
||||
|
@ -984,9 +991,6 @@ const PublicKeys & LocalStore::getPublicKeys()
|
|||
void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs)
|
||||
{
|
||||
if (!info.narHash)
|
||||
throw Error("cannot add path '%s' because it lacks a hash", printStorePath(info.path));
|
||||
|
||||
if (requireSigs && checkSigs && !info.checkSignatures(*this, getPublicKeys()))
|
||||
throw Error("cannot add path '%s' because it lacks a valid signature", printStorePath(info.path));
|
||||
|
||||
|
@ -1021,11 +1025,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
|
|||
else
|
||||
hashSink = std::make_unique<HashModuloSink>(htSHA256, std::string(info.path.hashPart()));
|
||||
|
||||
LambdaSource wrapperSource([&](unsigned char * data, size_t len) -> size_t {
|
||||
size_t n = source.read(data, len);
|
||||
(*hashSink)(data, n);
|
||||
return n;
|
||||
});
|
||||
TeeSource wrapperSource { source, *hashSink };
|
||||
|
||||
restorePath(realPath, wrapperSource);
|
||||
|
||||
|
@ -1033,7 +1033,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
|
|||
|
||||
if (hashResult.first != info.narHash)
|
||||
throw Error("hash mismatch importing path '%s';\n wanted: %s\n got: %s",
|
||||
printStorePath(info.path), info.narHash->to_string(Base32, true), hashResult.first.to_string(Base32, true));
|
||||
printStorePath(info.path), info.narHash.to_string(Base32, true), hashResult.first.to_string(Base32, true));
|
||||
|
||||
if (hashResult.second != info.narSize)
|
||||
throw Error("size mismatch importing path '%s';\n wanted: %s\n got: %s",
|
||||
|
@ -1155,8 +1155,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, const string & name,
|
|||
|
||||
optimisePath(realPath);
|
||||
|
||||
ValidPathInfo info(dstPath);
|
||||
info.narHash = narHash.first;
|
||||
ValidPathInfo info { dstPath, narHash.first };
|
||||
info.narSize = narHash.second;
|
||||
info.ca = FixedOutputHash { .method = method, .hash = hash };
|
||||
registerValidPath(info);
|
||||
|
@ -1199,8 +1198,7 @@ StorePath LocalStore::addTextToStore(const string & name, const string & s,
|
|||
|
||||
optimisePath(realPath);
|
||||
|
||||
ValidPathInfo info(dstPath);
|
||||
info.narHash = narHash;
|
||||
ValidPathInfo info { dstPath, narHash };
|
||||
info.narSize = sink.s->size();
|
||||
info.references = references;
|
||||
info.ca = TextHash { .hash = hash };
|
||||
|
@ -1315,9 +1313,9 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair)
|
|||
|
||||
std::unique_ptr<AbstractHashSink> hashSink;
|
||||
if (!info->ca || !info->references.count(info->path))
|
||||
hashSink = std::make_unique<HashSink>(info->narHash->type);
|
||||
hashSink = std::make_unique<HashSink>(info->narHash.type);
|
||||
else
|
||||
hashSink = std::make_unique<HashModuloSink>(info->narHash->type, std::string(info->path.hashPart()));
|
||||
hashSink = std::make_unique<HashModuloSink>(info->narHash.type, std::string(info->path.hashPart()));
|
||||
|
||||
dumpPath(Store::toRealPath(i), *hashSink);
|
||||
auto current = hashSink->finish();
|
||||
|
@ -1326,7 +1324,7 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair)
|
|||
logError({
|
||||
.name = "Invalid hash - path modified",
|
||||
.hint = hintfmt("path '%s' was modified! expected hash '%s', got '%s'",
|
||||
printStorePath(i), info->narHash->to_string(Base32, true), current.first.to_string(Base32, true))
|
||||
printStorePath(i), info->narHash.to_string(Base32, true), current.first.to_string(Base32, true))
|
||||
});
|
||||
if (repair) repairPath(i); else errors = true;
|
||||
} else {
|
||||
|
|
|
@ -23,9 +23,6 @@ namespace nix {
|
|||
const int nixSchemaVersion = 10;
|
||||
|
||||
|
||||
struct Derivation;
|
||||
|
||||
|
||||
struct OptimiseStats
|
||||
{
|
||||
unsigned long filesLinked = 0;
|
||||
|
@ -133,7 +130,7 @@ public:
|
|||
|
||||
StorePathSet queryValidDerivers(const StorePath & path) override;
|
||||
|
||||
OutputPathMap queryDerivationOutputMap(const StorePath & path) override;
|
||||
std::map<std::string, std::optional<StorePath>> queryPartialDerivationOutputMap(const StorePath & path) override;
|
||||
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#include "machines.hh"
|
||||
#include "util.hh"
|
||||
#include "globals.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
|
@ -48,6 +49,29 @@ bool Machine::mandatoryMet(const std::set<string> & features) const {
|
|||
});
|
||||
}
|
||||
|
||||
ref<Store> Machine::openStore() const {
|
||||
Store::Params storeParams;
|
||||
if (hasPrefix(storeUri, "ssh://")) {
|
||||
storeParams["max-connections"] = "1";
|
||||
storeParams["log-fd"] = "4";
|
||||
if (sshKey != "")
|
||||
storeParams["ssh-key"] = sshKey;
|
||||
}
|
||||
{
|
||||
auto & fs = storeParams["system-features"];
|
||||
auto append = [&](auto feats) {
|
||||
for (auto & f : feats) {
|
||||
if (fs.size() > 0) fs += ' ';
|
||||
fs += f;
|
||||
}
|
||||
};
|
||||
append(supportedFeatures);
|
||||
append(mandatoryFeatures);
|
||||
}
|
||||
|
||||
return nix::openStore(storeUri, storeParams);
|
||||
}
|
||||
|
||||
void parseMachines(const std::string & s, Machines & machines)
|
||||
{
|
||||
for (auto line : tokenizeString<std::vector<string>>(s, "\n;")) {
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
class Store;
|
||||
|
||||
struct Machine {
|
||||
|
||||
const string storeUri;
|
||||
|
@ -28,6 +30,8 @@ struct Machine {
|
|||
decltype(supportedFeatures) supportedFeatures,
|
||||
decltype(mandatoryFeatures) mandatoryFeatures,
|
||||
decltype(sshPublicHostKey) sshPublicHostKey);
|
||||
|
||||
ref<Store> openStore() const;
|
||||
};
|
||||
|
||||
typedef std::vector<Machine> Machines;
|
||||
|
|
|
@ -207,10 +207,10 @@ void Store::queryMissing(const std::vector<StorePathWithOutputs> & targets,
|
|||
ParsedDerivation parsedDrv(StorePath(path.path), *drv);
|
||||
|
||||
PathSet invalid;
|
||||
for (auto & j : drv->outputs)
|
||||
for (auto & j : drv->outputsAndPaths(*this))
|
||||
if (wantOutput(j.first, path.outputs)
|
||||
&& !isValidPath(j.second.path(*this, drv->name)))
|
||||
invalid.insert(printStorePath(j.second.path(*this, drv->name)));
|
||||
&& !isValidPath(j.second.second))
|
||||
invalid.insert(printStorePath(j.second.second));
|
||||
if (invalid.empty()) return;
|
||||
|
||||
if (settings.useSubstitutes && parsedDrv.substitutesAllowed()) {
|
||||
|
|
|
@ -49,7 +49,8 @@ struct NarAccessor : public FSAccessor
|
|||
: acc(acc), source(source)
|
||||
{ }
|
||||
|
||||
void createMember(const Path & path, NarMember member) {
|
||||
void createMember(const Path & path, NarMember member)
|
||||
{
|
||||
size_t level = std::count(path.begin(), path.end(), '/');
|
||||
while (parents.size() > level) parents.pop();
|
||||
|
||||
|
|
|
@ -189,13 +189,14 @@ public:
|
|||
return {oInvalid, 0};
|
||||
|
||||
auto namePart = queryNAR.getStr(1);
|
||||
auto narInfo = make_ref<NarInfo>(StorePath(hashPart + "-" + namePart));
|
||||
auto narInfo = make_ref<NarInfo>(
|
||||
StorePath(hashPart + "-" + namePart),
|
||||
Hash::parseAnyPrefixed(queryNAR.getStr(6)));
|
||||
narInfo->url = queryNAR.getStr(2);
|
||||
narInfo->compression = queryNAR.getStr(3);
|
||||
if (!queryNAR.isNull(4))
|
||||
narInfo->fileHash = Hash::parseAnyPrefixed(queryNAR.getStr(4));
|
||||
narInfo->fileSize = queryNAR.getInt(5);
|
||||
narInfo->narHash = Hash::parseAnyPrefixed(queryNAR.getStr(6));
|
||||
narInfo->narSize = queryNAR.getInt(7);
|
||||
for (auto & r : tokenizeString<Strings>(queryNAR.getStr(8), " "))
|
||||
narInfo->references.insert(StorePath(r));
|
||||
|
@ -232,7 +233,7 @@ public:
|
|||
(narInfo ? narInfo->compression : "", narInfo != 0)
|
||||
(narInfo && narInfo->fileHash ? narInfo->fileHash->to_string(Base32, true) : "", narInfo && narInfo->fileHash)
|
||||
(narInfo ? narInfo->fileSize : 0, narInfo != 0 && narInfo->fileSize)
|
||||
(info->narHash->to_string(Base32, true))
|
||||
(info->narHash.to_string(Base32, true))
|
||||
(info->narSize)
|
||||
(concatStringsSep(" ", info->shortRefs()))
|
||||
(info->deriver ? std::string(info->deriver->to_string()) : "", (bool) info->deriver)
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
#include "globals.hh"
|
||||
#include "nar-info.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & whence)
|
||||
: ValidPathInfo(StorePath(StorePath::dummy)) // FIXME: hack
|
||||
: ValidPathInfo(StorePath(StorePath::dummy), Hash(Hash::dummy)) // FIXME: hack
|
||||
{
|
||||
auto corrupt = [&]() {
|
||||
return Error("NAR info file '%1%' is corrupt", whence);
|
||||
|
@ -19,6 +20,7 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string &
|
|||
};
|
||||
|
||||
bool havePath = false;
|
||||
bool haveNarHash = false;
|
||||
|
||||
size_t pos = 0;
|
||||
while (pos < s.size()) {
|
||||
|
@ -46,8 +48,10 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string &
|
|||
else if (name == "FileSize") {
|
||||
if (!string2Int(value, fileSize)) throw corrupt();
|
||||
}
|
||||
else if (name == "NarHash")
|
||||
else if (name == "NarHash") {
|
||||
narHash = parseHashField(value);
|
||||
haveNarHash = true;
|
||||
}
|
||||
else if (name == "NarSize") {
|
||||
if (!string2Int(value, narSize)) throw corrupt();
|
||||
}
|
||||
|
@ -76,7 +80,7 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string &
|
|||
|
||||
if (compression == "") compression = "bzip2";
|
||||
|
||||
if (!havePath || url.empty() || narSize == 0 || !narHash) throw corrupt();
|
||||
if (!havePath || !haveNarHash || url.empty() || narSize == 0) throw corrupt();
|
||||
}
|
||||
|
||||
std::string NarInfo::to_string(const Store & store) const
|
||||
|
@ -89,8 +93,8 @@ std::string NarInfo::to_string(const Store & store) const
|
|||
assert(fileHash && fileHash->type == htSHA256);
|
||||
res += "FileHash: " + fileHash->to_string(Base32, true) + "\n";
|
||||
res += "FileSize: " + std::to_string(fileSize) + "\n";
|
||||
assert(narHash && narHash->type == htSHA256);
|
||||
res += "NarHash: " + narHash->to_string(Base32, true) + "\n";
|
||||
assert(narHash.type == htSHA256);
|
||||
res += "NarHash: " + narHash.to_string(Base32, true) + "\n";
|
||||
res += "NarSize: " + std::to_string(narSize) + "\n";
|
||||
|
||||
res += "References: " + concatStringsSep(" ", shortRefs()) + "\n";
|
||||
|
|
|
@ -2,10 +2,12 @@
|
|||
|
||||
#include "types.hh"
|
||||
#include "hash.hh"
|
||||
#include "store-api.hh"
|
||||
#include "path-info.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
class Store;
|
||||
|
||||
struct NarInfo : ValidPathInfo
|
||||
{
|
||||
std::string url;
|
||||
|
@ -15,7 +17,7 @@ struct NarInfo : ValidPathInfo
|
|||
std::string system;
|
||||
|
||||
NarInfo() = delete;
|
||||
NarInfo(StorePath && path) : ValidPathInfo(std::move(path)) { }
|
||||
NarInfo(StorePath && path, Hash narHash) : ValidPathInfo(std::move(path), narHash) { }
|
||||
NarInfo(const ValidPathInfo & info) : ValidPathInfo(info) { }
|
||||
NarInfo(const Store & store, const std::string & s, const std::string & whence);
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ StringSet ParsedDerivation::getRequiredSystemFeatures() const
|
|||
return res;
|
||||
}
|
||||
|
||||
bool ParsedDerivation::canBuildLocally() const
|
||||
bool ParsedDerivation::canBuildLocally(Store & localStore) const
|
||||
{
|
||||
if (drv.platform != settings.thisSystem.get()
|
||||
&& !settings.extraPlatforms.get().count(drv.platform)
|
||||
|
@ -102,14 +102,14 @@ bool ParsedDerivation::canBuildLocally() const
|
|||
return false;
|
||||
|
||||
for (auto & feature : getRequiredSystemFeatures())
|
||||
if (!settings.systemFeatures.get().count(feature)) return false;
|
||||
if (!localStore.systemFeatures.get().count(feature)) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ParsedDerivation::willBuildLocally() const
|
||||
bool ParsedDerivation::willBuildLocally(Store & localStore) const
|
||||
{
|
||||
return getBoolAttr("preferLocalBuild") && canBuildLocally();
|
||||
return getBoolAttr("preferLocalBuild") && canBuildLocally(localStore);
|
||||
}
|
||||
|
||||
bool ParsedDerivation::substitutesAllowed() const
|
||||
|
|
|
@ -29,9 +29,9 @@ public:
|
|||
|
||||
StringSet getRequiredSystemFeatures() const;
|
||||
|
||||
bool canBuildLocally() const;
|
||||
bool canBuildLocally(Store & localStore) const;
|
||||
|
||||
bool willBuildLocally() const;
|
||||
bool willBuildLocally(Store & localStore) const;
|
||||
|
||||
bool substitutesAllowed() const;
|
||||
};
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#pragma once
|
||||
|
||||
#include "crypto.hh"
|
||||
#include "path.hh"
|
||||
#include "hash.hh"
|
||||
#include "content-address.hh"
|
||||
|
@ -29,7 +30,7 @@ struct ValidPathInfo
|
|||
StorePath path;
|
||||
std::optional<StorePath> deriver;
|
||||
// TODO document this
|
||||
std::optional<Hash> narHash;
|
||||
Hash narHash;
|
||||
StorePathSet references;
|
||||
time_t registrationTime = 0;
|
||||
uint64_t narSize = 0; // 0 = unknown
|
||||
|
@ -100,8 +101,8 @@ struct ValidPathInfo
|
|||
|
||||
ValidPathInfo(const ValidPathInfo & other) = default;
|
||||
|
||||
ValidPathInfo(StorePath && path) : path(std::move(path)) { };
|
||||
ValidPathInfo(const StorePath & path) : path(path) { };
|
||||
ValidPathInfo(StorePath && path, Hash narHash) : path(std::move(path)), narHash(narHash) { };
|
||||
ValidPathInfo(const StorePath & path, Hash narHash) : path(path), narHash(narHash) { };
|
||||
|
||||
virtual ~ValidPathInfo() { }
|
||||
};
|
||||
|
|
|
@ -72,7 +72,7 @@ static void makeName(const Path & profile, GenerationNumber num,
|
|||
}
|
||||
|
||||
|
||||
Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath)
|
||||
Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath)
|
||||
{
|
||||
/* The new generation number should be higher than old the
|
||||
previous ones. */
|
||||
|
@ -82,7 +82,7 @@ Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath)
|
|||
if (gens.size() > 0) {
|
||||
Generation last = gens.back();
|
||||
|
||||
if (readLink(last.path) == outPath) {
|
||||
if (readLink(last.path) == store->printStorePath(outPath)) {
|
||||
/* We only create a new generation symlink if it differs
|
||||
from the last one.
|
||||
|
||||
|
@ -105,7 +105,7 @@ Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath)
|
|||
user environment etc. we've just built. */
|
||||
Path generation;
|
||||
makeName(profile, num + 1, generation);
|
||||
store->addPermRoot(store->parseStorePath(outPath), generation, false, true);
|
||||
store->addPermRoot(outPath, generation);
|
||||
|
||||
return generation;
|
||||
}
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
class StorePath;
|
||||
|
||||
|
||||
typedef unsigned int GenerationNumber;
|
||||
|
||||
|
@ -28,7 +30,7 @@ std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path pro
|
|||
|
||||
class LocalFSStore;
|
||||
|
||||
Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath);
|
||||
Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath);
|
||||
|
||||
void deleteGeneration(const Path & profile, GenerationNumber gen);
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@ template<> StorePathSet readStorePaths(const Store & store, Source & from)
|
|||
return paths;
|
||||
}
|
||||
|
||||
|
||||
void writeStorePaths(const Store & store, Sink & out, const StorePathSet & paths)
|
||||
{
|
||||
out << paths.size();
|
||||
|
@ -39,6 +38,7 @@ void writeStorePaths(const Store & store, Sink & out, const StorePathSet & paths
|
|||
out << store.printStorePath(i);
|
||||
}
|
||||
|
||||
|
||||
StorePathCAMap readStorePathCAMap(const Store & store, Source & from)
|
||||
{
|
||||
StorePathCAMap paths;
|
||||
|
@ -57,30 +57,36 @@ void writeStorePathCAMap(const Store & store, Sink & out, const StorePathCAMap &
|
|||
}
|
||||
}
|
||||
|
||||
std::map<string, StorePath> readOutputPathMap(const Store & store, Source & from)
|
||||
|
||||
namespace worker_proto {
|
||||
|
||||
StorePath read(const Store & store, Source & from, Phantom<StorePath> _)
|
||||
{
|
||||
std::map<string, StorePath> pathMap;
|
||||
auto rawInput = readStrings<Strings>(from);
|
||||
if (rawInput.size() % 2)
|
||||
throw Error("got an odd number of elements from the daemon when trying to read a output path map");
|
||||
auto curInput = rawInput.begin();
|
||||
while (curInput != rawInput.end()) {
|
||||
auto thisKey = *curInput++;
|
||||
auto thisValue = *curInput++;
|
||||
pathMap.emplace(thisKey, store.parseStorePath(thisValue));
|
||||
}
|
||||
return pathMap;
|
||||
return store.parseStorePath(readString(from));
|
||||
}
|
||||
|
||||
void writeOutputPathMap(const Store & store, Sink & out, const std::map<string, StorePath> & pathMap)
|
||||
void write(const Store & store, Sink & out, const StorePath & storePath)
|
||||
{
|
||||
out << 2*pathMap.size();
|
||||
for (auto & i : pathMap) {
|
||||
out << i.first;
|
||||
out << store.printStorePath(i.second);
|
||||
}
|
||||
out << store.printStorePath(storePath);
|
||||
}
|
||||
|
||||
|
||||
template<>
|
||||
std::optional<StorePath> read(const Store & store, Source & from, Phantom<std::optional<StorePath>> _)
|
||||
{
|
||||
auto s = readString(from);
|
||||
return s == "" ? std::optional<StorePath> {} : store.parseStorePath(s);
|
||||
}
|
||||
|
||||
template<>
|
||||
void write(const Store & store, Sink & out, const std::optional<StorePath> & storePathOpt)
|
||||
{
|
||||
out << (storePathOpt ? store.printStorePath(*storePathOpt) : "");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
/* TODO: Separate these store impls into different files, give them better names */
|
||||
RemoteStore::RemoteStore(const Params & params)
|
||||
: Store(params)
|
||||
|
@ -278,9 +284,9 @@ struct ConnectionHandle
|
|||
|
||||
RemoteStore::Connection * operator -> () { return &*handle; }
|
||||
|
||||
void processStderr(Sink * sink = 0, Source * source = 0)
|
||||
void processStderr(Sink * sink = 0, Source * source = 0, bool flush = true)
|
||||
{
|
||||
auto ex = handle->processStderr(sink, source);
|
||||
auto ex = handle->processStderr(sink, source, flush);
|
||||
if (ex) {
|
||||
daemonException = true;
|
||||
std::rethrow_exception(ex);
|
||||
|
@ -419,10 +425,10 @@ void RemoteStore::queryPathInfoUncached(const StorePath & path,
|
|||
bool valid; conn->from >> valid;
|
||||
if (!valid) throw InvalidPath("path '%s' is not valid", printStorePath(path));
|
||||
}
|
||||
info = std::make_shared<ValidPathInfo>(StorePath(path));
|
||||
auto deriver = readString(conn->from);
|
||||
auto narHash = Hash::parseAny(readString(conn->from), htSHA256);
|
||||
info = std::make_shared<ValidPathInfo>(path, narHash);
|
||||
if (deriver != "") info->deriver = parseStorePath(deriver);
|
||||
info->narHash = Hash::parseAny(readString(conn->from), htSHA256);
|
||||
info->references = readStorePaths<StorePathSet>(*this, conn->from);
|
||||
conn->from >> info->registrationTime >> info->narSize;
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) {
|
||||
|
@ -468,12 +474,12 @@ StorePathSet RemoteStore::queryDerivationOutputs(const StorePath & path)
|
|||
}
|
||||
|
||||
|
||||
OutputPathMap RemoteStore::queryDerivationOutputMap(const StorePath & path)
|
||||
std::map<std::string, std::optional<StorePath>> RemoteStore::queryPartialDerivationOutputMap(const StorePath & path)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopQueryDerivationOutputMap << printStorePath(path);
|
||||
conn.processStderr();
|
||||
return readOutputPathMap(*this, conn->from);
|
||||
return worker_proto::read(*this, conn->from, Phantom<std::map<std::string, std::optional<StorePath>>> {});
|
||||
|
||||
}
|
||||
|
||||
|
@ -521,7 +527,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
|
|||
conn->to << wopAddToStoreNar
|
||||
<< printStorePath(info.path)
|
||||
<< (info.deriver ? printStorePath(*info.deriver) : "")
|
||||
<< info.narHash->to_string(Base16, false);
|
||||
<< info.narHash.to_string(Base16, false);
|
||||
writeStorePaths(*this, conn->to, info.references);
|
||||
conn->to << info.registrationTime << info.narSize
|
||||
<< info.ultimate << info.sigs << renderContentAddress(info.ca)
|
||||
|
@ -529,6 +535,8 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
|
|||
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 23) {
|
||||
|
||||
conn->to.flush();
|
||||
|
||||
std::exception_ptr ex;
|
||||
|
||||
struct FramedSink : BufferedSink
|
||||
|
@ -568,7 +576,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
|
|||
std::thread stderrThread([&]()
|
||||
{
|
||||
try {
|
||||
conn.processStderr();
|
||||
conn.processStderr(nullptr, nullptr, false);
|
||||
} catch (...) {
|
||||
ex = std::current_exception();
|
||||
}
|
||||
|
@ -878,9 +886,10 @@ static Logger::Fields readFields(Source & from)
|
|||
}
|
||||
|
||||
|
||||
std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source * source)
|
||||
std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source * source, bool flush)
|
||||
{
|
||||
to.flush();
|
||||
if (flush)
|
||||
to.flush();
|
||||
|
||||
while (true) {
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ public:
|
|||
|
||||
StorePathSet queryDerivationOutputs(const StorePath & path) override;
|
||||
|
||||
OutputPathMap queryDerivationOutputMap(const StorePath & path) override;
|
||||
std::map<std::string, std::optional<StorePath>> queryPartialDerivationOutputMap(const StorePath & path) override;
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
|
||||
|
||||
StorePathSet querySubstitutablePaths(const StorePathSet & paths) override;
|
||||
|
@ -114,7 +114,7 @@ protected:
|
|||
|
||||
virtual ~Connection();
|
||||
|
||||
std::exception_ptr processStderr(Sink * sink = 0, Source * source = 0);
|
||||
std::exception_ptr processStderr(Sink * sink = 0, Source * source = 0, bool flush = true);
|
||||
};
|
||||
|
||||
ref<Connection> openConnectionWrapper();
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
#include "crypto.hh"
|
||||
#include "fs-accessor.hh"
|
||||
#include "globals.hh"
|
||||
#include "store-api.hh"
|
||||
#include "util.hh"
|
||||
#include "nar-info-disk-cache.hh"
|
||||
#include "thread-pool.hh"
|
||||
#include "json.hh"
|
||||
#include "derivations.hh"
|
||||
#include "url.hh"
|
||||
#include "archive.hh"
|
||||
|
||||
|
@ -320,8 +320,10 @@ ValidPathInfo Store::addToStoreSlow(std::string_view name, const Path & srcPath,
|
|||
if (expectedCAHash && expectedCAHash != hash)
|
||||
throw Error("hash mismatch for '%s'", srcPath);
|
||||
|
||||
ValidPathInfo info(makeFixedOutputPath(method, hash, name));
|
||||
info.narHash = narHash;
|
||||
ValidPathInfo info {
|
||||
makeFixedOutputPath(method, hash, name),
|
||||
narHash,
|
||||
};
|
||||
info.narSize = narSize;
|
||||
info.ca = FixedOutputHash { .method = method, .hash = hash };
|
||||
|
||||
|
@ -357,6 +359,17 @@ bool Store::PathInfoCacheValue::isKnownNow()
|
|||
return std::chrono::steady_clock::now() < time_point + ttl;
|
||||
}
|
||||
|
||||
OutputPathMap Store::queryDerivationOutputMap(const StorePath & path) {
|
||||
auto resp = queryPartialDerivationOutputMap(path);
|
||||
OutputPathMap result;
|
||||
for (auto & [outName, optOutPath] : resp) {
|
||||
if (!optOutPath)
|
||||
throw Error("output '%s' has no store path mapped to it", outName);
|
||||
result.insert_or_assign(outName, *optOutPath);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
StorePathSet Store::queryDerivationOutputs(const StorePath & path)
|
||||
{
|
||||
auto outputMap = this->queryDerivationOutputMap(path);
|
||||
|
@ -565,7 +578,7 @@ string Store::makeValidityRegistration(const StorePathSet & paths,
|
|||
auto info = queryPathInfo(i);
|
||||
|
||||
if (showHash) {
|
||||
s += info->narHash->to_string(Base16, false) + "\n";
|
||||
s += info->narHash.to_string(Base16, false) + "\n";
|
||||
s += (format("%1%\n") % info->narSize).str();
|
||||
}
|
||||
|
||||
|
@ -597,7 +610,7 @@ void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & store
|
|||
auto info = queryPathInfo(storePath);
|
||||
|
||||
jsonPath
|
||||
.attr("narHash", info->narHash->to_string(hashBase, true))
|
||||
.attr("narHash", info->narHash.to_string(hashBase, true))
|
||||
.attr("narSize", info->narSize);
|
||||
|
||||
{
|
||||
|
@ -725,20 +738,6 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
|
|||
info = info2;
|
||||
}
|
||||
|
||||
if (!info->narHash) {
|
||||
StringSink sink;
|
||||
srcStore->narFromPath({storePath}, sink);
|
||||
auto info2 = make_ref<ValidPathInfo>(*info);
|
||||
info2->narHash = hashString(htSHA256, *sink.s);
|
||||
if (!info->narSize) info2->narSize = sink.s->size();
|
||||
if (info->ultimate) info2->ultimate = false;
|
||||
info = info2;
|
||||
|
||||
StringSource source(*sink.s);
|
||||
dstStore->addToStore(*info, source, repair, checkSigs);
|
||||
return;
|
||||
}
|
||||
|
||||
if (info->ultimate) {
|
||||
auto info2 = make_ref<ValidPathInfo>(*info);
|
||||
info2->ultimate = false;
|
||||
|
@ -746,12 +745,12 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
|
|||
}
|
||||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
LambdaSink wrapperSink([&](const unsigned char * data, size_t len) {
|
||||
sink(data, len);
|
||||
LambdaSink progressSink([&](const unsigned char * data, size_t len) {
|
||||
total += len;
|
||||
act.progress(total, info->narSize);
|
||||
});
|
||||
srcStore->narFromPath(storePath, wrapperSink);
|
||||
TeeSink tee { sink, progressSink };
|
||||
srcStore->narFromPath(storePath, tee);
|
||||
}, [&]() {
|
||||
throw EndOfFile("NAR for '%s' fetched from '%s' is incomplete", srcStore->printStorePath(storePath), srcStore->getUri());
|
||||
});
|
||||
|
@ -863,19 +862,22 @@ void copyClosure(ref<Store> srcStore, ref<Store> dstStore,
|
|||
}
|
||||
|
||||
|
||||
std::optional<ValidPathInfo> decodeValidPathInfo(const Store & store, std::istream & str, bool hashGiven)
|
||||
std::optional<ValidPathInfo> decodeValidPathInfo(const Store & store, std::istream & str, std::optional<HashResult> hashGiven)
|
||||
{
|
||||
std::string path;
|
||||
getline(str, path);
|
||||
if (str.eof()) { return {}; }
|
||||
ValidPathInfo info(store.parseStorePath(path));
|
||||
if (hashGiven) {
|
||||
if (!hashGiven) {
|
||||
string s;
|
||||
getline(str, s);
|
||||
info.narHash = Hash::parseAny(s, htSHA256);
|
||||
auto narHash = Hash::parseAny(s, htSHA256);
|
||||
getline(str, s);
|
||||
if (!string2Int(s, info.narSize)) throw Error("number expected");
|
||||
uint64_t narSize;
|
||||
if (!string2Int(s, narSize)) throw Error("number expected");
|
||||
hashGiven = { narHash, narSize };
|
||||
}
|
||||
ValidPathInfo info(store.parseStorePath(path), hashGiven->first);
|
||||
info.narSize = hashGiven->second;
|
||||
std::string deriver;
|
||||
getline(str, deriver);
|
||||
if (deriver != "") info.deriver = store.parseStorePath(deriver);
|
||||
|
@ -910,12 +912,12 @@ string showPaths(const PathSet & paths)
|
|||
|
||||
std::string ValidPathInfo::fingerprint(const Store & store) const
|
||||
{
|
||||
if (narSize == 0 || !narHash)
|
||||
throw Error("cannot calculate fingerprint of path '%s' because its size/hash is not known",
|
||||
if (narSize == 0)
|
||||
throw Error("cannot calculate fingerprint of path '%s' because its size is not known",
|
||||
store.printStorePath(path));
|
||||
return
|
||||
"1;" + store.printStorePath(path) + ";"
|
||||
+ narHash->to_string(Base32, true) + ";"
|
||||
+ narHash.to_string(Base32, true) + ";"
|
||||
+ std::to_string(narSize) + ";"
|
||||
+ concatStringsSep(",", store.printStorePathSet(references));
|
||||
}
|
||||
|
@ -981,6 +983,26 @@ Strings ValidPathInfo::shortRefs() const
|
|||
}
|
||||
|
||||
|
||||
Derivation Store::derivationFromPath(const StorePath & drvPath)
|
||||
{
|
||||
ensurePath(drvPath);
|
||||
return readDerivation(drvPath);
|
||||
}
|
||||
|
||||
|
||||
Derivation Store::readDerivation(const StorePath & drvPath)
|
||||
{
|
||||
auto accessor = getFSAccessor();
|
||||
try {
|
||||
return parseDerivation(*this,
|
||||
accessor->readFile(printStorePath(drvPath)),
|
||||
Derivation::nameFromPath(drvPath));
|
||||
} catch (FormatError & e) {
|
||||
throw Error("error parsing derivation '%s': %s", printStorePath(drvPath), e.msg());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
#include "hash.hh"
|
||||
#include "content-address.hh"
|
||||
#include "serialise.hh"
|
||||
#include "crypto.hh"
|
||||
#include "lru-cache.hh"
|
||||
#include "sync.hh"
|
||||
#include "globals.hh"
|
||||
|
@ -164,6 +163,10 @@ public:
|
|||
|
||||
Setting<bool> wantMassQuery{this, false, "want-mass-query", "whether this substituter can be queried efficiently for path validity"};
|
||||
|
||||
Setting<StringSet> systemFeatures{this, settings.systemFeatures,
|
||||
"system-features",
|
||||
"Optional features that the system this store builds on implements (like \"kvm\")."};
|
||||
|
||||
protected:
|
||||
|
||||
struct PathInfoCacheValue {
|
||||
|
@ -340,9 +343,15 @@ public:
|
|||
/* Query the outputs of the derivation denoted by `path'. */
|
||||
virtual StorePathSet queryDerivationOutputs(const StorePath & path);
|
||||
|
||||
/* Query the mapping outputName=>outputPath for the given derivation */
|
||||
virtual OutputPathMap queryDerivationOutputMap(const StorePath & path)
|
||||
{ unsupported("queryDerivationOutputMap"); }
|
||||
/* Query the mapping outputName => outputPath for the given derivation. All
|
||||
outputs are mentioned so ones mising the mapping are mapped to
|
||||
`std::nullopt`. */
|
||||
virtual std::map<std::string, std::optional<StorePath>> queryPartialDerivationOutputMap(const StorePath & path)
|
||||
{ unsupported("queryPartialDerivationOutputMap"); }
|
||||
|
||||
/* Query the mapping outputName=>outputPath for the given derivation.
|
||||
Assume every output has a mapping and throw an exception otherwise. */
|
||||
OutputPathMap queryDerivationOutputMap(const StorePath & path);
|
||||
|
||||
/* Query the full store path given the hash part of a valid store
|
||||
path, or empty if the path doesn't exist. */
|
||||
|
@ -640,8 +649,7 @@ public:
|
|||
ref<FSAccessor> getFSAccessor() override;
|
||||
|
||||
/* Register a permanent GC root. */
|
||||
Path addPermRoot(const StorePath & storePath,
|
||||
const Path & gcRoot, bool indirect, bool allowOutsideRootsDir = false);
|
||||
Path addPermRoot(const StorePath & storePath, const Path & gcRoot);
|
||||
|
||||
virtual Path getRealStoreDir() { return storeDir; }
|
||||
|
||||
|
@ -761,7 +769,7 @@ string showPaths(const PathSet & paths);
|
|||
std::optional<ValidPathInfo> decodeValidPathInfo(
|
||||
const Store & store,
|
||||
std::istream & str,
|
||||
bool hashGiven = false);
|
||||
std::optional<HashResult> hashGiven = std::nullopt);
|
||||
|
||||
/* Split URI into protocol+hierarchy part and its parameter set. */
|
||||
std::pair<std::string, Store::Params> splitUriAndParams(const std::string & uri);
|
||||
|
|
|
@ -6,7 +6,7 @@ namespace nix {
|
|||
#define WORKER_MAGIC_1 0x6e697863
|
||||
#define WORKER_MAGIC_2 0x6478696f
|
||||
|
||||
#define PROTOCOL_VERSION 0x117
|
||||
#define PROTOCOL_VERSION 0x118
|
||||
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
|
||||
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
|
||||
|
||||
|
@ -70,10 +70,84 @@ template<class T> T readStorePaths(const Store & store, Source & from);
|
|||
|
||||
void writeStorePaths(const Store & store, Sink & out, const StorePathSet & paths);
|
||||
|
||||
/* To guide overloading */
|
||||
template<typename T>
|
||||
struct Phantom {};
|
||||
|
||||
|
||||
namespace worker_proto {
|
||||
/* FIXME maybe move more stuff inside here */
|
||||
|
||||
StorePath read(const Store & store, Source & from, Phantom<StorePath> _);
|
||||
void write(const Store & store, Sink & out, const StorePath & storePath);
|
||||
|
||||
template<typename T>
|
||||
std::map<std::string, T> read(const Store & store, Source & from, Phantom<std::map<std::string, T>> _);
|
||||
template<typename T>
|
||||
void write(const Store & store, Sink & out, const std::map<string, T> & resMap);
|
||||
template<typename T>
|
||||
std::optional<T> read(const Store & store, Source & from, Phantom<std::optional<T>> _);
|
||||
template<typename T>
|
||||
void write(const Store & store, Sink & out, const std::optional<T> & optVal);
|
||||
|
||||
/* Specialization which uses and empty string for the empty case, taking
|
||||
advantage of the fact StorePaths always serialize to a non-empty string.
|
||||
This is done primarily for backwards compatability, so that StorePath <=
|
||||
std::optional<StorePath>, where <= is the compatability partial order.
|
||||
*/
|
||||
template<>
|
||||
void write(const Store & store, Sink & out, const std::optional<StorePath> & optVal);
|
||||
|
||||
template<typename T>
|
||||
std::map<std::string, T> read(const Store & store, Source & from, Phantom<std::map<std::string, T>> _)
|
||||
{
|
||||
std::map<string, T> resMap;
|
||||
auto size = (size_t)readInt(from);
|
||||
while (size--) {
|
||||
auto thisKey = readString(from);
|
||||
resMap.insert_or_assign(std::move(thisKey), nix::worker_proto::read(store, from, Phantom<T> {}));
|
||||
}
|
||||
return resMap;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void write(const Store & store, Sink & out, const std::map<string, T> & resMap)
|
||||
{
|
||||
out << resMap.size();
|
||||
for (auto & i : resMap) {
|
||||
out << i.first;
|
||||
nix::worker_proto::write(store, out, i.second);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
std::optional<T> read(const Store & store, Source & from, Phantom<std::optional<T>> _)
|
||||
{
|
||||
auto tag = readNum<uint8_t>(from);
|
||||
switch (tag) {
|
||||
case 0:
|
||||
return std::nullopt;
|
||||
case 1:
|
||||
return nix::worker_proto::read(store, from, Phantom<T> {});
|
||||
default:
|
||||
throw Error("got an invalid tag bit for std::optional: %#04x", tag);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void write(const Store & store, Sink & out, const std::optional<T> & optVal)
|
||||
{
|
||||
out << (optVal ? 1 : 0);
|
||||
if (optVal)
|
||||
nix::worker_proto::write(store, out, *optVal);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
StorePathCAMap readStorePathCAMap(const Store & store, Source & from);
|
||||
|
||||
void writeStorePathCAMap(const Store & store, Sink & out, const StorePathCAMap & paths);
|
||||
|
||||
void writeOutputPathMap(const Store & store, Sink & out, const OutputPathMap & paths);
|
||||
|
||||
}
|
||||
|
|
|
@ -366,11 +366,7 @@ void copyNAR(Source & source, Sink & sink)
|
|||
|
||||
ParseSink parseSink; /* null sink; just parse the NAR */
|
||||
|
||||
LambdaSource wrapper([&](unsigned char * data, size_t len) {
|
||||
auto n = source.read(data, len);
|
||||
sink(data, n);
|
||||
return n;
|
||||
});
|
||||
TeeSource wrapper { source, sink };
|
||||
|
||||
parseDump(parseSink, wrapper);
|
||||
}
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
|
||||
#include <glob.h>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
namespace nix {
|
||||
|
||||
void Args::addFlag(Flag && flag_)
|
||||
|
@ -205,6 +207,43 @@ bool Args::processArgs(const Strings & args, bool finish)
|
|||
return res;
|
||||
}
|
||||
|
||||
nlohmann::json Args::toJSON()
|
||||
{
|
||||
auto flags = nlohmann::json::object();
|
||||
|
||||
for (auto & [name, flag] : longFlags) {
|
||||
auto j = nlohmann::json::object();
|
||||
if (flag->shortName)
|
||||
j["shortName"] = std::string(1, flag->shortName);
|
||||
if (flag->description != "")
|
||||
j["description"] = flag->description;
|
||||
if (flag->category != "")
|
||||
j["category"] = flag->category;
|
||||
if (flag->handler.arity != ArityAny)
|
||||
j["arity"] = flag->handler.arity;
|
||||
if (!flag->labels.empty())
|
||||
j["labels"] = flag->labels;
|
||||
flags[name] = std::move(j);
|
||||
}
|
||||
|
||||
auto args = nlohmann::json::array();
|
||||
|
||||
for (auto & arg : expectedArgs) {
|
||||
auto j = nlohmann::json::object();
|
||||
j["label"] = arg.label;
|
||||
j["optional"] = arg.optional;
|
||||
if (arg.handler.arity != ArityAny)
|
||||
j["arity"] = arg.handler.arity;
|
||||
args.push_back(std::move(j));
|
||||
}
|
||||
|
||||
auto res = nlohmann::json::object();
|
||||
res["description"] = description();
|
||||
res["flags"] = std::move(flags);
|
||||
res["args"] = std::move(args);
|
||||
return res;
|
||||
}
|
||||
|
||||
static void hashTypeCompleter(size_t index, std::string_view prefix)
|
||||
{
|
||||
for (auto & type : hashTypes)
|
||||
|
@ -313,11 +352,29 @@ void Command::printHelp(const string & programName, std::ostream & out)
|
|||
}
|
||||
}
|
||||
|
||||
nlohmann::json Command::toJSON()
|
||||
{
|
||||
auto exs = nlohmann::json::array();
|
||||
|
||||
for (auto & example : examples()) {
|
||||
auto ex = nlohmann::json::object();
|
||||
ex["description"] = example.description;
|
||||
ex["command"] = chomp(stripIndentation(example.command));
|
||||
exs.push_back(std::move(ex));
|
||||
}
|
||||
|
||||
auto res = Args::toJSON();
|
||||
res["examples"] = std::move(exs);
|
||||
auto s = doc();
|
||||
if (s != "") res.emplace("doc", stripIndentation(s));
|
||||
return res;
|
||||
}
|
||||
|
||||
MultiCommand::MultiCommand(const Commands & commands)
|
||||
: commands(commands)
|
||||
{
|
||||
expectArgs({
|
||||
.label = "command",
|
||||
.label = "subcommand",
|
||||
.optional = true,
|
||||
.handler = {[=](std::string s) {
|
||||
assert(!command);
|
||||
|
@ -387,4 +444,20 @@ bool MultiCommand::processArgs(const Strings & args, bool finish)
|
|||
return Args::processArgs(args, finish);
|
||||
}
|
||||
|
||||
nlohmann::json MultiCommand::toJSON()
|
||||
{
|
||||
auto cmds = nlohmann::json::object();
|
||||
|
||||
for (auto & [name, commandFun] : commands) {
|
||||
auto command = commandFun();
|
||||
auto j = command->toJSON();
|
||||
j["category"] = categories[command->category()];
|
||||
cmds[name] = std::move(j);
|
||||
}
|
||||
|
||||
auto res = Args::toJSON();
|
||||
res["commands"] = std::move(cmds);
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
#include <map>
|
||||
#include <memory>
|
||||
|
||||
#include <nlohmann/json_fwd.hpp>
|
||||
|
||||
#include "util.hh"
|
||||
|
||||
namespace nix {
|
||||
|
@ -20,6 +22,7 @@ public:
|
|||
|
||||
virtual void printHelp(const string & programName, std::ostream & out);
|
||||
|
||||
/* Return a short one-line description of the command. */
|
||||
virtual std::string description() { return ""; }
|
||||
|
||||
protected:
|
||||
|
@ -203,6 +206,8 @@ public:
|
|||
});
|
||||
}
|
||||
|
||||
virtual nlohmann::json toJSON();
|
||||
|
||||
friend class MultiCommand;
|
||||
};
|
||||
|
||||
|
@ -217,6 +222,9 @@ struct Command : virtual Args
|
|||
virtual void prepare() { };
|
||||
virtual void run() = 0;
|
||||
|
||||
/* Return documentation about this command, in Markdown format. */
|
||||
virtual std::string doc() { return ""; }
|
||||
|
||||
struct Example
|
||||
{
|
||||
std::string description;
|
||||
|
@ -234,6 +242,8 @@ struct Command : virtual Args
|
|||
virtual Category category() { return catDefault; }
|
||||
|
||||
void printHelp(const string & programName, std::ostream & out) override;
|
||||
|
||||
nlohmann::json toJSON() override;
|
||||
};
|
||||
|
||||
typedef std::map<std::string, std::function<ref<Command>()>> Commands;
|
||||
|
@ -259,6 +269,8 @@ public:
|
|||
bool processFlag(Strings::iterator & pos, Strings::iterator end) override;
|
||||
|
||||
bool processArgs(const Strings & args, bool finish) override;
|
||||
|
||||
nlohmann::json toJSON() override;
|
||||
};
|
||||
|
||||
Strings argvToStrings(int argc, char * * argv);
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#include "config.hh"
|
||||
#include "args.hh"
|
||||
#include "json.hh"
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
@ -131,15 +132,18 @@ void Config::resetOverriden()
|
|||
s.second.setting->overriden = false;
|
||||
}
|
||||
|
||||
void Config::toJSON(JSONObject & out)
|
||||
nlohmann::json Config::toJSON()
|
||||
{
|
||||
auto res = nlohmann::json::object();
|
||||
for (auto & s : _settings)
|
||||
if (!s.second.isAlias) {
|
||||
JSONObject out2(out.object(s.first));
|
||||
out2.attr("description", s.second.setting->description);
|
||||
JSONPlaceholder out3(out2.placeholder("value"));
|
||||
s.second.setting->toJSON(out3);
|
||||
auto obj = nlohmann::json::object();
|
||||
obj.emplace("description", s.second.setting->description);
|
||||
obj.emplace("aliases", s.second.setting->aliases);
|
||||
obj.emplace("value", s.second.setting->toJSON());
|
||||
res.emplace(s.first, obj);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void Config::convertToArgs(Args & args, const std::string & category)
|
||||
|
@ -153,7 +157,7 @@ AbstractSetting::AbstractSetting(
|
|||
const std::string & name,
|
||||
const std::string & description,
|
||||
const std::set<std::string> & aliases)
|
||||
: name(name), description(description), aliases(aliases)
|
||||
: name(name), description(stripIndentation(description)), aliases(aliases)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -162,9 +166,9 @@ void AbstractSetting::setDefault(const std::string & str)
|
|||
if (!overriden) set(str);
|
||||
}
|
||||
|
||||
void AbstractSetting::toJSON(JSONPlaceholder & out)
|
||||
nlohmann::json AbstractSetting::toJSON()
|
||||
{
|
||||
out.write(to_string());
|
||||
return to_string();
|
||||
}
|
||||
|
||||
void AbstractSetting::convertToArg(Args & args, const std::string & category)
|
||||
|
@ -172,9 +176,9 @@ void AbstractSetting::convertToArg(Args & args, const std::string & category)
|
|||
}
|
||||
|
||||
template<typename T>
|
||||
void BaseSetting<T>::toJSON(JSONPlaceholder & out)
|
||||
nlohmann::json BaseSetting<T>::toJSON()
|
||||
{
|
||||
out.write(value);
|
||||
return value;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
|
@ -255,11 +259,9 @@ template<> std::string BaseSetting<Strings>::to_string() const
|
|||
return concatStringsSep(" ", value);
|
||||
}
|
||||
|
||||
template<> void BaseSetting<Strings>::toJSON(JSONPlaceholder & out)
|
||||
template<> nlohmann::json BaseSetting<Strings>::toJSON()
|
||||
{
|
||||
JSONList list(out.list());
|
||||
for (auto & s : value)
|
||||
list.elem(s);
|
||||
return value;
|
||||
}
|
||||
|
||||
template<> void BaseSetting<StringSet>::set(const std::string & str)
|
||||
|
@ -272,11 +274,9 @@ template<> std::string BaseSetting<StringSet>::to_string() const
|
|||
return concatStringsSep(" ", value);
|
||||
}
|
||||
|
||||
template<> void BaseSetting<StringSet>::toJSON(JSONPlaceholder & out)
|
||||
template<> nlohmann::json BaseSetting<StringSet>::toJSON()
|
||||
{
|
||||
JSONList list(out.list());
|
||||
for (auto & s : value)
|
||||
list.elem(s);
|
||||
return value;
|
||||
}
|
||||
|
||||
template class BaseSetting<int>;
|
||||
|
@ -323,10 +323,12 @@ void GlobalConfig::resetOverriden()
|
|||
config->resetOverriden();
|
||||
}
|
||||
|
||||
void GlobalConfig::toJSON(JSONObject & out)
|
||||
nlohmann::json GlobalConfig::toJSON()
|
||||
{
|
||||
auto res = nlohmann::json::object();
|
||||
for (auto & config : *configRegistrations)
|
||||
config->toJSON(out);
|
||||
res.update(config->toJSON());
|
||||
return res;
|
||||
}
|
||||
|
||||
void GlobalConfig::convertToArgs(Args & args, const std::string & category)
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
|
||||
#include "types.hh"
|
||||
|
||||
#include <nlohmann/json_fwd.hpp>
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace nix {
|
||||
|
@ -42,8 +44,6 @@ namespace nix {
|
|||
|
||||
class Args;
|
||||
class AbstractSetting;
|
||||
class JSONPlaceholder;
|
||||
class JSONObject;
|
||||
|
||||
class AbstractConfig
|
||||
{
|
||||
|
@ -97,7 +97,7 @@ public:
|
|||
* Outputs all settings to JSON
|
||||
* - out: JSONObject to write the configuration to
|
||||
*/
|
||||
virtual void toJSON(JSONObject & out) = 0;
|
||||
virtual nlohmann::json toJSON() = 0;
|
||||
|
||||
/**
|
||||
* Converts settings to `Args` to be used on the command line interface
|
||||
|
@ -167,7 +167,7 @@ public:
|
|||
|
||||
void resetOverriden() override;
|
||||
|
||||
void toJSON(JSONObject & out) override;
|
||||
nlohmann::json toJSON() override;
|
||||
|
||||
void convertToArgs(Args & args, const std::string & category) override;
|
||||
};
|
||||
|
@ -206,7 +206,7 @@ protected:
|
|||
|
||||
virtual std::string to_string() const = 0;
|
||||
|
||||
virtual void toJSON(JSONPlaceholder & out);
|
||||
virtual nlohmann::json toJSON();
|
||||
|
||||
virtual void convertToArg(Args & args, const std::string & category);
|
||||
|
||||
|
@ -251,7 +251,7 @@ public:
|
|||
|
||||
void convertToArg(Args & args, const std::string & category) override;
|
||||
|
||||
void toJSON(JSONPlaceholder & out) override;
|
||||
nlohmann::json toJSON() override;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
|
@ -319,7 +319,7 @@ struct GlobalConfig : public AbstractConfig
|
|||
|
||||
void resetOverriden() override;
|
||||
|
||||
void toJSON(JSONObject & out) override;
|
||||
nlohmann::json toJSON() override;
|
||||
|
||||
void convertToArgs(Args & args, const std::string & category) override;
|
||||
|
||||
|
|
|
@ -136,6 +136,8 @@ std::string Hash::to_string(Base base, bool includeType) const
|
|||
return s;
|
||||
}
|
||||
|
||||
Hash Hash::dummy(htSHA256);
|
||||
|
||||
Hash Hash::parseSRI(std::string_view original) {
|
||||
auto rest = original;
|
||||
|
||||
|
|
|
@ -59,9 +59,6 @@ private:
|
|||
Hash(std::string_view s, HashType type, bool isSRI);
|
||||
|
||||
public:
|
||||
/* Check whether a hash is set. */
|
||||
operator bool () const { return (bool) type; }
|
||||
|
||||
/* Check whether two hash are equal. */
|
||||
bool operator == (const Hash & h2) const;
|
||||
|
||||
|
@ -105,6 +102,8 @@ public:
|
|||
assert(type == htSHA1);
|
||||
return std::string(to_string(Base16, false), 0, 7);
|
||||
}
|
||||
|
||||
static Hash dummy;
|
||||
};
|
||||
|
||||
/* Helper that defaults empty hashes to the 0 hash. */
|
||||
|
|
|
@ -184,6 +184,33 @@ struct JSONLogger : Logger {
|
|||
json["action"] = "msg";
|
||||
json["level"] = ei.level;
|
||||
json["msg"] = oss.str();
|
||||
json["raw_msg"] = ei.hint->str();
|
||||
|
||||
if (ei.errPos.has_value() && (*ei.errPos)) {
|
||||
json["line"] = ei.errPos->line;
|
||||
json["column"] = ei.errPos->column;
|
||||
json["file"] = ei.errPos->file;
|
||||
} else {
|
||||
json["line"] = nullptr;
|
||||
json["column"] = nullptr;
|
||||
json["file"] = nullptr;
|
||||
}
|
||||
|
||||
if (loggerSettings.showTrace.get() && !ei.traces.empty()) {
|
||||
nlohmann::json traces = nlohmann::json::array();
|
||||
for (auto iter = ei.traces.rbegin(); iter != ei.traces.rend(); ++iter) {
|
||||
nlohmann::json stackFrame;
|
||||
stackFrame["raw_msg"] = iter->hint.str();
|
||||
if (iter->pos.has_value() && (*iter->pos)) {
|
||||
stackFrame["line"] = iter->pos->line;
|
||||
stackFrame["column"] = iter->pos->column;
|
||||
stackFrame["file"] = iter->pos->file;
|
||||
}
|
||||
traces.push_back(stackFrame);
|
||||
}
|
||||
|
||||
json["trace"] = traces;
|
||||
}
|
||||
|
||||
write(json);
|
||||
}
|
||||
|
|
|
@ -37,10 +37,12 @@ typedef uint64_t ActivityId;
|
|||
|
||||
struct LoggerSettings : Config
|
||||
{
|
||||
Setting<bool> showTrace{this,
|
||||
false,
|
||||
"show-trace",
|
||||
"Whether to show a stack trace on evaluation errors."};
|
||||
Setting<bool> showTrace{
|
||||
this, false, "show-trace",
|
||||
R"(
|
||||
Where Nix should print out a stack trace in case of Nix
|
||||
expression evaluation errors.
|
||||
)"};
|
||||
};
|
||||
|
||||
extern LoggerSettings loggerSettings;
|
||||
|
|
|
@ -23,7 +23,8 @@ struct Sink
|
|||
};
|
||||
|
||||
|
||||
/* A buffered abstract sink. */
|
||||
/* A buffered abstract sink. Warning: a BufferedSink should not be
|
||||
used from multiple threads concurrently. */
|
||||
struct BufferedSink : virtual Sink
|
||||
{
|
||||
size_t bufSize, bufPos;
|
||||
|
@ -66,7 +67,8 @@ struct Source
|
|||
};
|
||||
|
||||
|
||||
/* A buffered abstract source. */
|
||||
/* A buffered abstract source. Warning: a BufferedSource should not be
|
||||
used from multiple threads concurrently. */
|
||||
struct BufferedSource : Source
|
||||
{
|
||||
size_t bufSize, bufPosIn, bufPosOut;
|
||||
|
@ -225,6 +227,17 @@ struct SizedSource : Source
|
|||
}
|
||||
};
|
||||
|
||||
/* A sink that that just counts the number of bytes given to it */
|
||||
struct LengthSink : Sink
|
||||
{
|
||||
uint64_t length = 0;
|
||||
|
||||
virtual void operator () (const unsigned char * _, size_t len)
|
||||
{
|
||||
length += len;
|
||||
}
|
||||
};
|
||||
|
||||
/* Convert a function into a sink. */
|
||||
struct LambdaSink : Sink
|
||||
{
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
#include "json.hh"
|
||||
#include "config.hh"
|
||||
#include "args.hh"
|
||||
|
||||
#include <sstream>
|
||||
#include <gtest/gtest.h>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
@ -33,7 +33,7 @@ namespace nix {
|
|||
const auto iter = settings.find("name-of-the-setting");
|
||||
ASSERT_NE(iter, settings.end());
|
||||
ASSERT_EQ(iter->second.value, "");
|
||||
ASSERT_EQ(iter->second.description, "description");
|
||||
ASSERT_EQ(iter->second.description, "description\n");
|
||||
}
|
||||
|
||||
TEST(Config, getDefinedOverridenSettingNotSet) {
|
||||
|
@ -59,7 +59,7 @@ namespace nix {
|
|||
const auto iter = settings.find("name-of-the-setting");
|
||||
ASSERT_NE(iter, settings.end());
|
||||
ASSERT_EQ(iter->second.value, "value");
|
||||
ASSERT_EQ(iter->second.description, "description");
|
||||
ASSERT_EQ(iter->second.description, "description\n");
|
||||
}
|
||||
|
||||
TEST(Config, getDefinedSettingSet2) {
|
||||
|
@ -73,7 +73,7 @@ namespace nix {
|
|||
const auto e = settings.find("name-of-the-setting");
|
||||
ASSERT_NE(e, settings.end());
|
||||
ASSERT_EQ(e->second.value, "value");
|
||||
ASSERT_EQ(e->second.description, "description");
|
||||
ASSERT_EQ(e->second.description, "description\n");
|
||||
}
|
||||
|
||||
TEST(Config, addSetting) {
|
||||
|
@ -152,29 +152,16 @@ namespace nix {
|
|||
}
|
||||
|
||||
TEST(Config, toJSONOnEmptyConfig) {
|
||||
std::stringstream out;
|
||||
{ // Scoped to force the destructor of JSONObject to write the final `}`
|
||||
JSONObject obj(out);
|
||||
Config config;
|
||||
config.toJSON(obj);
|
||||
}
|
||||
|
||||
ASSERT_EQ(out.str(), "{}");
|
||||
ASSERT_EQ(Config().toJSON().dump(), "{}");
|
||||
}
|
||||
|
||||
TEST(Config, toJSONOnNonEmptyConfig) {
|
||||
std::stringstream out;
|
||||
{ // Scoped to force the destructor of JSONObject to write the final `}`
|
||||
JSONObject obj(out);
|
||||
Config config;
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
Setting<std::string> setting{&config, "", "name-of-the-setting", "description"};
|
||||
setting.assign("value");
|
||||
|
||||
Config config;
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
Setting<std::string> setting{&config, "", "name-of-the-setting", "description"};
|
||||
setting.assign("value");
|
||||
|
||||
config.toJSON(obj);
|
||||
}
|
||||
ASSERT_EQ(out.str(), R"#({"name-of-the-setting":{"description":"description","value":"value"}})#");
|
||||
ASSERT_EQ(config.toJSON().dump(), R"#({"name-of-the-setting":{"aliases":[],"description":"description\n","value":"value"}})#");
|
||||
}
|
||||
|
||||
TEST(Config, setSettingAlias) {
|
||||
|
|
|
@ -34,6 +34,24 @@ namespace nix {
|
|||
}
|
||||
}
|
||||
|
||||
TEST(logEI, jsonOutput) {
|
||||
SymbolTable testTable;
|
||||
auto problem_file = testTable.create("random.nix");
|
||||
testing::internal::CaptureStderr();
|
||||
|
||||
makeJSONLogger(*logger)->logEI({
|
||||
.name = "error name",
|
||||
.description = "error without any code lines.",
|
||||
.hint = hintfmt("this hint has %1% templated %2%!!",
|
||||
"yellow",
|
||||
"values"),
|
||||
.errPos = Pos(foFile, problem_file, 02, 13)
|
||||
});
|
||||
|
||||
auto str = testing::internal::GetCapturedStderr();
|
||||
ASSERT_STREQ(str.c_str(), "\x1B[31;1merror:\x1B[0m\x1B[34;1m --- SysError --- error-unit-test\x1B[0m\nopening file '\x1B[33;1mrandom.nix\x1B[0m': \x1B[33;1mNo such file or directory\x1B[0m\n@nix {\"action\":\"msg\",\"column\":13,\"file\":\"random.nix\",\"level\":0,\"line\":2,\"msg\":\"\\u001b[31;1merror:\\u001b[0m\\u001b[34;1m --- error name --- error-unit-test\\u001b[0m\\n\\u001b[34;1mat: \\u001b[33;1m(2:13)\\u001b[34;1m in file: \\u001b[0mrandom.nix\\n\\nerror without any code lines.\\n\\nthis hint has \\u001b[33;1myellow\\u001b[0m templated \\u001b[33;1mvalues\\u001b[0m!!\",\"raw_msg\":\"this hint has \\u001b[33;1myellow\\u001b[0m templated \\u001b[33;1mvalues\\u001b[0m!!\"}\n");
|
||||
}
|
||||
|
||||
TEST(logEI, appendingHintsToPreviousError) {
|
||||
|
||||
MakeError(TestError, Error);
|
||||
|
|
|
@ -1464,6 +1464,47 @@ string base64Decode(std::string_view s)
|
|||
}
|
||||
|
||||
|
||||
std::string stripIndentation(std::string_view s)
|
||||
{
|
||||
size_t minIndent = 10000;
|
||||
size_t curIndent = 0;
|
||||
bool atStartOfLine = true;
|
||||
|
||||
for (auto & c : s) {
|
||||
if (atStartOfLine && c == ' ')
|
||||
curIndent++;
|
||||
else if (c == '\n') {
|
||||
if (atStartOfLine)
|
||||
minIndent = std::max(minIndent, curIndent);
|
||||
curIndent = 0;
|
||||
atStartOfLine = true;
|
||||
} else {
|
||||
if (atStartOfLine) {
|
||||
minIndent = std::min(minIndent, curIndent);
|
||||
atStartOfLine = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::string res;
|
||||
|
||||
size_t pos = 0;
|
||||
while (pos < s.size()) {
|
||||
auto eol = s.find('\n', pos);
|
||||
if (eol == s.npos) eol = s.size();
|
||||
if (eol - pos > minIndent)
|
||||
res.append(s.substr(pos + minIndent, eol - pos - minIndent));
|
||||
res.push_back('\n');
|
||||
pos = eol + 1;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
static Sync<std::pair<unsigned short, unsigned short>> windowSize{{0, 0}};
|
||||
|
||||
|
||||
|
|
|
@ -464,6 +464,12 @@ string base64Encode(std::string_view s);
|
|||
string base64Decode(std::string_view s);
|
||||
|
||||
|
||||
/* Remove common leading whitespace from the lines in the string
|
||||
's'. For example, if every line is indented by at least 3 spaces,
|
||||
then we remove 3 spaces from the start of every line. */
|
||||
std::string stripIndentation(std::string_view s);
|
||||
|
||||
|
||||
/* Get a value for the specified key from an associate container. */
|
||||
template <class T>
|
||||
std::optional<typename T::mapped_type> get(const T & map, const typename T::key_type & key)
|
||||
|
|
|
@ -525,7 +525,7 @@ static void _main(int argc, char * * argv)
|
|||
|
||||
for (auto & symlink : resultSymlinks)
|
||||
if (auto store2 = store.dynamic_pointer_cast<LocalFSStore>())
|
||||
store2->addPermRoot(store->parseStorePath(symlink.second), absPath(symlink.first), true);
|
||||
store2->addPermRoot(store->parseStorePath(symlink.second), absPath(symlink.first));
|
||||
|
||||
logger->stop();
|
||||
|
||||
|
|
|
@ -239,7 +239,15 @@ static void daemonLoop(char * * argv)
|
|||
// Handle the connection.
|
||||
FdSource from(remote.get());
|
||||
FdSink to(remote.get());
|
||||
processConnection(openUncachedStore(), from, to, trusted, NotRecursive, user, peer.uid);
|
||||
processConnection(openUncachedStore(), from, to, trusted, NotRecursive, [&](Store & store) {
|
||||
#if 0
|
||||
/* Prevent users from doing something very dangerous. */
|
||||
if (geteuid() == 0 &&
|
||||
querySetting("build-users-group", "") == "")
|
||||
throw Error("if you run 'nix-daemon' as root, then you MUST set 'build-users-group'!");
|
||||
#endif
|
||||
store.createUser(user, peer.uid);
|
||||
});
|
||||
|
||||
exit(0);
|
||||
}, options);
|
||||
|
@ -324,7 +332,10 @@ static int _main(int argc, char * * argv)
|
|||
} else {
|
||||
FdSource from(STDIN_FILENO);
|
||||
FdSink to(STDOUT_FILENO);
|
||||
processConnection(openUncachedStore(), from, to, Trusted, NotRecursive, "root", 0);
|
||||
/* Auth hook is empty because in this mode we blindly trust the
|
||||
standard streams. Limitting access to thoses is explicitly
|
||||
not `nix-daemon`'s responsibility. */
|
||||
processConnection(openUncachedStore(), from, to, Trusted, NotRecursive, [&](Store & _){});
|
||||
}
|
||||
} else {
|
||||
daemonLoop(argv);
|
||||
|
|
|
@ -708,7 +708,9 @@ static void opSet(Globals & globals, Strings opFlags, Strings opArgs)
|
|||
}
|
||||
|
||||
debug(format("switching to new user environment"));
|
||||
Path generation = createGeneration(ref<LocalFSStore>(store2), globals.profile, drv.queryOutPath());
|
||||
Path generation = createGeneration(
|
||||
ref<LocalFSStore>(store2), globals.profile,
|
||||
store2->parseStorePath(drv.queryOutPath()));
|
||||
switchLink(globals.profile, generation);
|
||||
}
|
||||
|
||||
|
|
|
@ -151,7 +151,8 @@ bool createUserEnv(EvalState & state, DrvInfos & elems,
|
|||
}
|
||||
|
||||
debug(format("switching to new user environment"));
|
||||
Path generation = createGeneration(ref<LocalFSStore>(store2), profile, topLevelOut);
|
||||
Path generation = createGeneration(ref<LocalFSStore>(store2), profile,
|
||||
store2->parseStorePath(topLevelOut));
|
||||
switchLink(profile, generation);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ using namespace nix;
|
|||
|
||||
static Path gcRoot;
|
||||
static int rootNr = 0;
|
||||
static bool indirectRoot = false;
|
||||
|
||||
|
||||
enum OutputKind { okPlain, okXML, okJSON };
|
||||
|
@ -71,11 +70,11 @@ void processExpr(EvalState & state, const Strings & attrPaths,
|
|||
if (gcRoot == "")
|
||||
printGCWarning();
|
||||
else {
|
||||
Path rootName = indirectRoot ? absPath(gcRoot) : gcRoot;
|
||||
Path rootName = absPath(gcRoot);
|
||||
if (++rootNr > 1) rootName += "-" + std::to_string(rootNr);
|
||||
auto store2 = state.store.dynamic_pointer_cast<LocalFSStore>();
|
||||
if (store2)
|
||||
drvPath = store2->addPermRoot(store2->parseStorePath(drvPath), rootName, indirectRoot);
|
||||
drvPath = store2->addPermRoot(store2->parseStorePath(drvPath), rootName);
|
||||
}
|
||||
std::cout << fmt("%s%s\n", drvPath, (outputName != "out" ? "!" + outputName : ""));
|
||||
}
|
||||
|
@ -127,7 +126,7 @@ static int _main(int argc, char * * argv)
|
|||
else if (*arg == "--add-root")
|
||||
gcRoot = getArg(*arg, arg, end);
|
||||
else if (*arg == "--indirect")
|
||||
indirectRoot = true;
|
||||
;
|
||||
else if (*arg == "--xml")
|
||||
outputKind = okXML;
|
||||
else if (*arg == "--json")
|
||||
|
|
|
@ -34,7 +34,6 @@ typedef void (* Operation) (Strings opFlags, Strings opArgs);
|
|||
|
||||
static Path gcRoot;
|
||||
static int rootNr = 0;
|
||||
static bool indirectRoot = false;
|
||||
static bool noOutput = false;
|
||||
static std::shared_ptr<Store> store;
|
||||
|
||||
|
@ -85,7 +84,7 @@ static PathSet realisePath(StorePathWithOutputs path, bool build = true)
|
|||
Path rootName = gcRoot;
|
||||
if (rootNr > 1) rootName += "-" + std::to_string(rootNr);
|
||||
if (i->first != "out") rootName += "-" + i->first;
|
||||
outPath = store2->addPermRoot(store->parseStorePath(outPath), rootName, indirectRoot);
|
||||
outPath = store2->addPermRoot(store->parseStorePath(outPath), rootName);
|
||||
}
|
||||
}
|
||||
outputs.insert(outPath);
|
||||
|
@ -104,7 +103,7 @@ static PathSet realisePath(StorePathWithOutputs path, bool build = true)
|
|||
Path rootName = gcRoot;
|
||||
rootNr++;
|
||||
if (rootNr > 1) rootName += "-" + std::to_string(rootNr);
|
||||
return {store2->addPermRoot(path.path, rootName, indirectRoot)};
|
||||
return {store2->addPermRoot(path.path, rootName)};
|
||||
}
|
||||
}
|
||||
return {store->printStorePath(path.path)};
|
||||
|
@ -218,8 +217,8 @@ static StorePathSet maybeUseOutputs(const StorePath & storePath, bool useOutput,
|
|||
if (useOutput && storePath.isDerivation()) {
|
||||
auto drv = store->derivationFromPath(storePath);
|
||||
StorePathSet outputs;
|
||||
for (auto & i : drv.outputs)
|
||||
outputs.insert(i.second.path(*store, drv.name));
|
||||
for (auto & i : drv.outputsAndPaths(*store))
|
||||
outputs.insert(i.second.second);
|
||||
return outputs;
|
||||
}
|
||||
else return {storePath};
|
||||
|
@ -312,8 +311,8 @@ static void opQuery(Strings opFlags, Strings opArgs)
|
|||
auto i2 = store->followLinksToStorePath(i);
|
||||
if (forceRealise) realisePath({i2});
|
||||
Derivation drv = store->derivationFromPath(i2);
|
||||
for (auto & j : drv.outputs)
|
||||
cout << fmt("%1%\n", store->printStorePath(j.second.path(*store, drv.name)));
|
||||
for (auto & j : drv.outputsAndPaths(*store))
|
||||
cout << fmt("%1%\n", store->printStorePath(j.second.second));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -372,8 +371,8 @@ static void opQuery(Strings opFlags, Strings opArgs)
|
|||
for (auto & j : maybeUseOutputs(store->followLinksToStorePath(i), useOutput, forceRealise)) {
|
||||
auto info = store->queryPathInfo(j);
|
||||
if (query == qHash) {
|
||||
assert(info->narHash && info->narHash->type == htSHA256);
|
||||
cout << fmt("%s\n", info->narHash->to_string(Base32, true));
|
||||
assert(info->narHash.type == htSHA256);
|
||||
cout << fmt("%s\n", info->narHash.to_string(Base32, true));
|
||||
} else if (query == qSize)
|
||||
cout << fmt("%d\n", info->narSize);
|
||||
}
|
||||
|
@ -495,7 +494,10 @@ static void registerValidity(bool reregister, bool hashGiven, bool canonicalise)
|
|||
ValidPathInfos infos;
|
||||
|
||||
while (1) {
|
||||
auto info = decodeValidPathInfo(*store, cin, hashGiven);
|
||||
// We use a dummy value because we'll set it below. FIXME be correct by
|
||||
// construction and avoid dummy value.
|
||||
auto hashResultOpt = !hashGiven ? std::optional<HashResult> { {Hash::dummy, -1} } : std::nullopt;
|
||||
auto info = decodeValidPathInfo(*store, cin, hashResultOpt);
|
||||
if (!info) break;
|
||||
if (!store->isValidPath(info->path) || reregister) {
|
||||
/* !!! races */
|
||||
|
@ -723,7 +725,7 @@ static void opVerifyPath(Strings opFlags, Strings opArgs)
|
|||
auto path = store->followLinksToStorePath(i);
|
||||
printMsg(lvlTalkative, "checking path '%s'...", store->printStorePath(path));
|
||||
auto info = store->queryPathInfo(path);
|
||||
HashSink sink(info->narHash->type);
|
||||
HashSink sink(info->narHash.type);
|
||||
store->narFromPath(path, sink);
|
||||
auto current = sink.finish();
|
||||
if (current.first != info->narHash) {
|
||||
|
@ -732,7 +734,7 @@ static void opVerifyPath(Strings opFlags, Strings opArgs)
|
|||
.hint = hintfmt(
|
||||
"path '%s' was modified! expected hash '%s', got '%s'",
|
||||
store->printStorePath(path),
|
||||
info->narHash->to_string(Base32, true),
|
||||
info->narHash.to_string(Base32, true),
|
||||
current.first.to_string(Base32, true))
|
||||
});
|
||||
status = 1;
|
||||
|
@ -862,7 +864,7 @@ static void opServe(Strings opFlags, Strings opArgs)
|
|||
out << info->narSize // downloadSize
|
||||
<< info->narSize;
|
||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 4)
|
||||
out << (info->narHash ? info->narHash->to_string(Base32, true) : "")
|
||||
out << info->narHash.to_string(Base32, true)
|
||||
<< renderContentAddress(info->ca)
|
||||
<< info->sigs;
|
||||
} catch (InvalidPath &) {
|
||||
|
@ -944,11 +946,13 @@ static void opServe(Strings opFlags, Strings opArgs)
|
|||
if (!writeAllowed) throw Error("importing paths is not allowed");
|
||||
|
||||
auto path = readString(in);
|
||||
ValidPathInfo info(store->parseStorePath(path));
|
||||
auto deriver = readString(in);
|
||||
ValidPathInfo info {
|
||||
store->parseStorePath(path),
|
||||
Hash::parseAny(readString(in), htSHA256),
|
||||
};
|
||||
if (deriver != "")
|
||||
info.deriver = store->parseStorePath(deriver);
|
||||
info.narHash = Hash::parseAny(readString(in), htSHA256);
|
||||
info.references = readStorePaths<StorePathSet>(*store, in);
|
||||
in >> info.registrationTime >> info.narSize >> info.ultimate;
|
||||
info.sigs = readStrings<StringSet>(in);
|
||||
|
@ -1080,7 +1084,7 @@ static int _main(int argc, char * * argv)
|
|||
else if (*arg == "--add-root")
|
||||
gcRoot = absPath(getArg(*arg, arg, end));
|
||||
else if (*arg == "--indirect")
|
||||
indirectRoot = true;
|
||||
;
|
||||
else if (*arg == "--no-output")
|
||||
noOutput = true;
|
||||
else if (*arg != "" && arg->at(0) == '-') {
|
||||
|
|
|
@ -36,6 +36,14 @@ struct CmdAddToStore : MixDryRun, StoreCommand
|
|||
return "add a path to the Nix store";
|
||||
}
|
||||
|
||||
std::string doc() override
|
||||
{
|
||||
return R"(
|
||||
Copy the file or directory *path* to the Nix store, and
|
||||
print the resulting store path on standard output.
|
||||
)";
|
||||
}
|
||||
|
||||
Examples examples() override
|
||||
{
|
||||
return {
|
||||
|
@ -60,8 +68,10 @@ struct CmdAddToStore : MixDryRun, StoreCommand
|
|||
hash = hsink.finish().first;
|
||||
}
|
||||
|
||||
ValidPathInfo info(store->makeFixedOutputPath(ingestionMethod, hash, *namePart));
|
||||
info.narHash = narHash;
|
||||
ValidPathInfo info {
|
||||
store->makeFixedOutputPath(ingestionMethod, hash, *namePart),
|
||||
narHash,
|
||||
};
|
||||
info.narSize = sink.s->size();
|
||||
info.ca = std::optional { FixedOutputHash {
|
||||
.method = ingestionMethod,
|
||||
|
|
|
@ -8,7 +8,7 @@ namespace nix {
|
|||
|
||||
App Installable::toApp(EvalState & state)
|
||||
{
|
||||
auto [cursor, attrPath] = getCursor(state, true);
|
||||
auto [cursor, attrPath] = getCursor(state);
|
||||
|
||||
auto type = cursor->getAttr("type")->getString();
|
||||
|
||||
|
|
|
@ -71,14 +71,14 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixProfile
|
|||
[&](BuildableOpaque bo) {
|
||||
std::string symlink = outLink;
|
||||
if (i) symlink += fmt("-%d", i);
|
||||
store2->addPermRoot(bo.path, absPath(symlink), true);
|
||||
store2->addPermRoot(bo.path, absPath(symlink));
|
||||
},
|
||||
[&](BuildableFromDrv bfd) {
|
||||
for (auto & output : bfd.outputs) {
|
||||
std::string symlink = outLink;
|
||||
if (i) symlink += fmt("-%d", i);
|
||||
if (output.first != "out") symlink += fmt("-%s", output.first);
|
||||
store2->addPermRoot(output.second, absPath(symlink), true);
|
||||
store2->addPermRoot(output.second, absPath(symlink));
|
||||
}
|
||||
},
|
||||
}, buildables[i]);
|
||||
|
|
|
@ -122,7 +122,7 @@ struct CmdBundle : InstallableCommand
|
|||
if (!outLink)
|
||||
outLink = baseNameOf(app.program);
|
||||
|
||||
store.dynamic_pointer_cast<LocalFSStore>()->addPermRoot(outPath, absPath(*outLink), true);
|
||||
store.dynamic_pointer_cast<LocalFSStore>()->addPermRoot(outPath, absPath(*outLink));
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -4,12 +4,25 @@
|
|||
#include "nixexpr.hh"
|
||||
#include "profiles.hh"
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
extern char * * environ __attribute__((weak));
|
||||
|
||||
namespace nix {
|
||||
|
||||
Commands * RegisterCommand::commands = nullptr;
|
||||
|
||||
void NixMultiCommand::printHelp(const string & programName, std::ostream & out)
|
||||
{
|
||||
MultiCommand::printHelp(programName, out);
|
||||
}
|
||||
|
||||
nlohmann::json NixMultiCommand::toJSON()
|
||||
{
|
||||
// FIXME: use Command::toJSON() as well.
|
||||
return MultiCommand::toJSON();
|
||||
}
|
||||
|
||||
StoreCommand::StoreCommand()
|
||||
{
|
||||
}
|
||||
|
@ -121,7 +134,7 @@ void MixProfile::updateProfile(const StorePath & storePath)
|
|||
switchLink(profile2,
|
||||
createGeneration(
|
||||
ref<LocalFSStore>(store),
|
||||
profile2, store->printStorePath(storePath)));
|
||||
profile2, storePath));
|
||||
}
|
||||
|
||||
void MixProfile::updateProfile(const Buildables & buildables)
|
||||
|
|
|
@ -21,6 +21,13 @@ static constexpr Command::Category catSecondary = 100;
|
|||
static constexpr Command::Category catUtility = 101;
|
||||
static constexpr Command::Category catNixInstallation = 102;
|
||||
|
||||
struct NixMultiCommand : virtual MultiCommand, virtual Command
|
||||
{
|
||||
void printHelp(const string & programName, std::ostream & out) override;
|
||||
|
||||
nlohmann::json toJSON() override;
|
||||
};
|
||||
|
||||
/* A command that requires a Nix store. */
|
||||
struct StoreCommand : virtual Command
|
||||
{
|
||||
|
|
|
@ -15,7 +15,7 @@ struct Var
|
|||
{
|
||||
bool exported = true;
|
||||
bool associative = false;
|
||||
std::string value; // quoted string or array
|
||||
std::string quoted; // quoted string or array
|
||||
};
|
||||
|
||||
struct BuildEnvironment
|
||||
|
@ -75,12 +75,12 @@ BuildEnvironment readEnvironment(const Path & path)
|
|||
|
||||
else if (std::regex_search(pos, file.cend(), match, varRegex, std::regex_constants::match_continuous)) {
|
||||
pos = match[0].second;
|
||||
res.env.insert({match[1], Var { .exported = exported.count(match[1]) > 0, .value = match[2] }});
|
||||
res.env.insert({match[1], Var { .exported = exported.count(match[1]) > 0, .quoted = match[2] }});
|
||||
}
|
||||
|
||||
else if (std::regex_search(pos, file.cend(), match, assocArrayRegex, std::regex_constants::match_continuous)) {
|
||||
pos = match[0].second;
|
||||
res.env.insert({match[1], Var { .associative = true, .value = match[2] }});
|
||||
res.env.insert({match[1], Var { .associative = true, .quoted = match[2] }});
|
||||
}
|
||||
|
||||
else if (std::regex_search(pos, file.cend(), match, functionRegex, std::regex_constants::match_continuous)) {
|
||||
|
@ -92,6 +92,8 @@ BuildEnvironment readEnvironment(const Path & path)
|
|||
path, file.substr(pos - file.cbegin(), 60));
|
||||
}
|
||||
|
||||
res.env.erase("__output");
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -124,31 +126,33 @@ StorePath getDerivationEnvironment(ref<Store> store, const StorePath & drvPath)
|
|||
|
||||
/* Rehash and write the derivation. FIXME: would be nice to use
|
||||
'buildDerivation', but that's privileged. */
|
||||
auto drvName = std::string(drvPath.name());
|
||||
assert(hasSuffix(drvName, ".drv"));
|
||||
drvName.resize(drvName.size() - 4);
|
||||
drvName += "-env";
|
||||
for (auto & output : drv.outputs)
|
||||
drv.env.erase(output.first);
|
||||
drv.outputs = {{"out", DerivationOutput { .output = DerivationOutputInputAddressed { .path = StorePath::dummy }}}};
|
||||
drv.env["out"] = "";
|
||||
drv.env["_outputs_saved"] = drv.env["outputs"];
|
||||
drv.env["outputs"] = "out";
|
||||
drv.name += "-env";
|
||||
for (auto & output : drv.outputs) {
|
||||
output.second = { .output = DerivationOutputInputAddressed { .path = StorePath::dummy } };
|
||||
drv.env[output.first] = "";
|
||||
}
|
||||
drv.inputSrcs.insert(std::move(getEnvShPath));
|
||||
Hash h = std::get<0>(hashDerivationModulo(*store, drv, true));
|
||||
auto shellOutPath = store->makeOutputPath("out", h, drvName);
|
||||
drv.outputs.insert_or_assign("out", DerivationOutput { .output = DerivationOutputInputAddressed {
|
||||
.path = shellOutPath
|
||||
} });
|
||||
drv.env["out"] = store->printStorePath(shellOutPath);
|
||||
auto shellDrvPath2 = writeDerivation(store, drv, drvName);
|
||||
|
||||
for (auto & output : drv.outputs) {
|
||||
auto outPath = store->makeOutputPath(output.first, h, drv.name);
|
||||
output.second = { .output = DerivationOutputInputAddressed { .path = outPath } };
|
||||
drv.env[output.first] = store->printStorePath(outPath);
|
||||
}
|
||||
|
||||
auto shellDrvPath = writeDerivation(*store, drv);
|
||||
|
||||
/* Build the derivation. */
|
||||
store->buildPaths({{shellDrvPath2}});
|
||||
store->buildPaths({{shellDrvPath}});
|
||||
|
||||
assert(store->isValidPath(shellOutPath));
|
||||
for (auto & outPath : drv.outputPaths(*store)) {
|
||||
assert(store->isValidPath(outPath));
|
||||
auto outPathS = store->toRealPath(outPath);
|
||||
if (lstat(outPathS).st_size)
|
||||
return outPath;
|
||||
}
|
||||
|
||||
return shellOutPath;
|
||||
throw Error("get-env.sh failed to produce an environment");
|
||||
}
|
||||
|
||||
struct Common : InstallableCommand, MixProfile
|
||||
|
@ -174,8 +178,12 @@ struct Common : InstallableCommand, MixProfile
|
|||
"UID",
|
||||
};
|
||||
|
||||
void makeRcScript(const BuildEnvironment & buildEnvironment, std::ostream & out)
|
||||
std::string makeRcScript(
|
||||
const BuildEnvironment & buildEnvironment,
|
||||
const Path & outputsDir = absPath(".") + "/outputs")
|
||||
{
|
||||
std::ostringstream out;
|
||||
|
||||
out << "unset shellHook\n";
|
||||
|
||||
out << "nix_saved_PATH=\"$PATH\"\n";
|
||||
|
@ -183,9 +191,9 @@ struct Common : InstallableCommand, MixProfile
|
|||
for (auto & i : buildEnvironment.env) {
|
||||
if (!ignoreVars.count(i.first) && !hasPrefix(i.first, "BASH_")) {
|
||||
if (i.second.associative)
|
||||
out << fmt("declare -A %s=(%s)\n", i.first, i.second.value);
|
||||
out << fmt("declare -A %s=(%s)\n", i.first, i.second.quoted);
|
||||
else {
|
||||
out << fmt("%s=%s\n", i.first, i.second.value);
|
||||
out << fmt("%s=%s\n", i.first, i.second.quoted);
|
||||
if (i.second.exported)
|
||||
out << fmt("export %s\n", i.first);
|
||||
}
|
||||
|
@ -196,13 +204,26 @@ struct Common : InstallableCommand, MixProfile
|
|||
|
||||
out << buildEnvironment.bashFunctions << "\n";
|
||||
|
||||
// FIXME: set outputs
|
||||
|
||||
out << "export NIX_BUILD_TOP=\"$(mktemp -d --tmpdir nix-shell.XXXXXX)\"\n";
|
||||
for (auto & i : {"TMP", "TMPDIR", "TEMP", "TEMPDIR"})
|
||||
out << fmt("export %s=\"$NIX_BUILD_TOP\"\n", i);
|
||||
|
||||
out << "eval \"$shellHook\"\n";
|
||||
|
||||
/* Substitute occurrences of output paths. */
|
||||
auto outputs = buildEnvironment.env.find("outputs");
|
||||
assert(outputs != buildEnvironment.env.end());
|
||||
|
||||
// FIXME: properly unquote 'outputs'.
|
||||
StringMap rewrites;
|
||||
for (auto & outputName : tokenizeString<std::vector<std::string>>(replaceStrings(outputs->second.quoted, "'", ""))) {
|
||||
auto from = buildEnvironment.env.find(outputName);
|
||||
assert(from != buildEnvironment.env.end());
|
||||
// FIXME: unquote
|
||||
rewrites.insert({from->second.quoted, outputsDir + "/" + outputName});
|
||||
}
|
||||
|
||||
return rewriteStrings(out.str(), rewrites);
|
||||
}
|
||||
|
||||
Strings getDefaultFlakeAttrPaths() override
|
||||
|
@ -243,19 +264,57 @@ struct Common : InstallableCommand, MixProfile
|
|||
struct CmdDevelop : Common, MixEnvironment
|
||||
{
|
||||
std::vector<std::string> command;
|
||||
std::optional<std::string> phase;
|
||||
|
||||
CmdDevelop()
|
||||
{
|
||||
addFlag({
|
||||
.longName = "command",
|
||||
.shortName = 'c',
|
||||
.description = "command and arguments to be executed insted of an interactive shell",
|
||||
.description = "command and arguments to be executed instead of an interactive shell",
|
||||
.labels = {"command", "args"},
|
||||
.handler = {[&](std::vector<std::string> ss) {
|
||||
if (ss.empty()) throw UsageError("--command requires at least one argument");
|
||||
command = ss;
|
||||
}}
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "phase",
|
||||
.description = "phase to run (e.g. `build` or `configure`)",
|
||||
.labels = {"phase-name"},
|
||||
.handler = {&phase},
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "configure",
|
||||
.description = "run the configure phase",
|
||||
.handler = {&phase, {"configure"}},
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "build",
|
||||
.description = "run the build phase",
|
||||
.handler = {&phase, {"build"}},
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "check",
|
||||
.description = "run the check phase",
|
||||
.handler = {&phase, {"check"}},
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "install",
|
||||
.description = "run the install phase",
|
||||
.handler = {&phase, {"install"}},
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "installcheck",
|
||||
.description = "run the installcheck phase",
|
||||
.handler = {&phase, {"installCheck"}},
|
||||
});
|
||||
}
|
||||
|
||||
std::string description() override
|
||||
|
@ -291,19 +350,31 @@ struct CmdDevelop : Common, MixEnvironment
|
|||
|
||||
auto [rcFileFd, rcFilePath] = createTempFile("nix-shell");
|
||||
|
||||
std::ostringstream ss;
|
||||
makeRcScript(buildEnvironment, ss);
|
||||
auto script = makeRcScript(buildEnvironment);
|
||||
|
||||
ss << fmt("rm -f '%s'\n", rcFilePath);
|
||||
if (verbosity >= lvlDebug)
|
||||
script += "set -x\n";
|
||||
|
||||
if (!command.empty()) {
|
||||
script += fmt("rm -f '%s'\n", rcFilePath);
|
||||
|
||||
if (phase) {
|
||||
if (!command.empty())
|
||||
throw UsageError("you cannot use both '--command' and '--phase'");
|
||||
// FIXME: foundMakefile is set by buildPhase, need to get
|
||||
// rid of that.
|
||||
script += fmt("foundMakefile=1\n");
|
||||
script += fmt("runHook %1%Phase\n", *phase);
|
||||
script += fmt("exit 0\n", *phase);
|
||||
}
|
||||
|
||||
else if (!command.empty()) {
|
||||
std::vector<std::string> args;
|
||||
for (auto s : command)
|
||||
args.push_back(shellEscape(s));
|
||||
ss << fmt("exec %s\n", concatStringsSep(" ", args));
|
||||
script += fmt("exec %s\n", concatStringsSep(" ", args));
|
||||
}
|
||||
|
||||
writeFull(rcFileFd.get(), ss.str());
|
||||
writeFull(rcFileFd.get(), script);
|
||||
|
||||
stopProgressBar();
|
||||
|
||||
|
@ -365,7 +436,7 @@ struct CmdPrintDevEnv : Common
|
|||
|
||||
stopProgressBar();
|
||||
|
||||
makeRcScript(buildEnvironment, std::cout);
|
||||
std::cout << makeRcScript(buildEnvironment);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -572,7 +572,7 @@ struct CmdFlakeInitCommon : virtual Args, EvalCommand
|
|||
Strings{templateName == "" ? "defaultTemplate" : templateName},
|
||||
Strings(attrsPathPrefixes), lockFlags);
|
||||
|
||||
auto [cursor, attrPath] = installable.getCursor(*evalState, true);
|
||||
auto [cursor, attrPath] = installable.getCursor(*evalState);
|
||||
|
||||
auto templateDir = cursor->getAttr("path")->getString();
|
||||
|
||||
|
@ -782,7 +782,6 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun
|
|||
struct CmdFlakeShow : FlakeCommand
|
||||
{
|
||||
bool showLegacy = false;
|
||||
bool useEvalCache = true;
|
||||
|
||||
CmdFlakeShow()
|
||||
{
|
||||
|
@ -791,12 +790,6 @@ struct CmdFlakeShow : FlakeCommand
|
|||
.description = "show the contents of the 'legacyPackages' output",
|
||||
.handler = {&showLegacy, true}
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "no-eval-cache",
|
||||
.description = "do not use the flake evaluation cache",
|
||||
.handler = {[&]() { useEvalCache = false; }}
|
||||
});
|
||||
}
|
||||
|
||||
std::string description() override
|
||||
|
@ -934,13 +927,13 @@ struct CmdFlakeShow : FlakeCommand
|
|||
}
|
||||
};
|
||||
|
||||
auto cache = openEvalCache(*state, flake, useEvalCache);
|
||||
auto cache = openEvalCache(*state, flake);
|
||||
|
||||
visit(*cache->getRoot(), {}, fmt(ANSI_BOLD "%s" ANSI_NORMAL, flake->flake.lockedRef), "");
|
||||
}
|
||||
};
|
||||
|
||||
struct CmdFlake : virtual MultiCommand, virtual Command
|
||||
struct CmdFlake : NixMultiCommand
|
||||
{
|
||||
CmdFlake()
|
||||
: MultiCommand({
|
||||
|
@ -970,11 +963,6 @@ struct CmdFlake : virtual MultiCommand, virtual Command
|
|||
command->second->prepare();
|
||||
command->second->run();
|
||||
}
|
||||
|
||||
void printHelp(const string & programName, std::ostream & out) override
|
||||
{
|
||||
MultiCommand::printHelp(programName, out);
|
||||
}
|
||||
};
|
||||
|
||||
static auto r1 = registerCommand<CmdFlake>("flake");
|
||||
|
|
|
@ -1,12 +1,6 @@
|
|||
set -e
|
||||
if [ -e .attrs.sh ]; then source .attrs.sh; fi
|
||||
|
||||
outputs=$_outputs_saved
|
||||
for __output in $_outputs_saved; do
|
||||
declare "$__output"="$out"
|
||||
done
|
||||
unset _outputs_saved __output
|
||||
|
||||
export IN_NIX_SHELL=impure
|
||||
export dontAddDisableDepTrack=1
|
||||
|
||||
|
@ -14,5 +8,12 @@ if [[ -n $stdenv ]]; then
|
|||
source $stdenv/setup
|
||||
fi
|
||||
|
||||
export > $out
|
||||
set >> $out
|
||||
for __output in $outputs; do
|
||||
if [[ -z $__done ]]; then
|
||||
export > ${!__output}
|
||||
set >> ${!__output}
|
||||
__done=1
|
||||
else
|
||||
echo -n >> ${!__output}
|
||||
fi
|
||||
done
|
||||
|
|
|
@ -76,7 +76,7 @@ MixFlakeOptions::MixFlakeOptions()
|
|||
|
||||
addFlag({
|
||||
.longName = "override-input",
|
||||
.description = "override a specific flake input (e.g. 'dwarffs/nixpkgs')",
|
||||
.description = "override a specific flake input (e.g. `dwarffs/nixpkgs`)",
|
||||
.labels = {"input-path", "flake-url"},
|
||||
.handler = {[&](std::string inputPath, std::string flakeRef) {
|
||||
lockFlags.inputOverrides.insert_or_assign(
|
||||
|
@ -116,7 +116,7 @@ SourceExprCommand::SourceExprCommand()
|
|||
addFlag({
|
||||
.longName = "file",
|
||||
.shortName = 'f',
|
||||
.description = "evaluate FILE rather than the default",
|
||||
.description = "evaluate *file* rather than the default",
|
||||
.labels = {"file"},
|
||||
.handler = {&file},
|
||||
.completer = completePath
|
||||
|
@ -124,7 +124,7 @@ SourceExprCommand::SourceExprCommand()
|
|||
|
||||
addFlag({
|
||||
.longName ="expr",
|
||||
.description = "evaluate attributes from EXPR",
|
||||
.description = "evaluate attributes from *expr*",
|
||||
.labels = {"expr"},
|
||||
.handler = {&expr}
|
||||
});
|
||||
|
@ -183,8 +183,7 @@ void completeFlakeRefWithFragment(
|
|||
auto flakeRef = parseFlakeRef(flakeRefS, absPath("."));
|
||||
|
||||
auto evalCache = openEvalCache(*evalState,
|
||||
std::make_shared<flake::LockedFlake>(lockFlake(*evalState, flakeRef, lockFlags)),
|
||||
true);
|
||||
std::make_shared<flake::LockedFlake>(lockFlake(*evalState, flakeRef, lockFlags)));
|
||||
|
||||
auto root = evalCache->getRoot();
|
||||
|
||||
|
@ -273,7 +272,7 @@ Buildable Installable::toBuildable()
|
|||
}
|
||||
|
||||
std::vector<std::pair<std::shared_ptr<eval_cache::AttrCursor>, std::string>>
|
||||
Installable::getCursors(EvalState & state, bool useEvalCache)
|
||||
Installable::getCursors(EvalState & state)
|
||||
{
|
||||
auto evalCache =
|
||||
std::make_shared<nix::eval_cache::EvalCache>(std::nullopt, state,
|
||||
|
@ -282,9 +281,9 @@ Installable::getCursors(EvalState & state, bool useEvalCache)
|
|||
}
|
||||
|
||||
std::pair<std::shared_ptr<eval_cache::AttrCursor>, std::string>
|
||||
Installable::getCursor(EvalState & state, bool useEvalCache)
|
||||
Installable::getCursor(EvalState & state)
|
||||
{
|
||||
auto cursors = getCursors(state, useEvalCache);
|
||||
auto cursors = getCursors(state);
|
||||
if (cursors.empty())
|
||||
throw Error("cannot find flake attribute '%s'", what());
|
||||
return cursors[0];
|
||||
|
@ -305,8 +304,8 @@ struct InstallableStorePath : Installable
|
|||
if (storePath.isDerivation()) {
|
||||
std::map<std::string, StorePath> outputs;
|
||||
auto drv = store->readDerivation(storePath);
|
||||
for (auto & [name, output] : drv.outputs)
|
||||
outputs.emplace(name, output.path(*store, drv.name));
|
||||
for (auto & i : drv.outputsAndPaths(*store))
|
||||
outputs.emplace(i.first, i.second.second);
|
||||
return {
|
||||
BuildableFromDrv {
|
||||
.drvPath = storePath,
|
||||
|
@ -420,12 +419,11 @@ Value * InstallableFlake::getFlakeOutputs(EvalState & state, const flake::Locked
|
|||
|
||||
ref<eval_cache::EvalCache> openEvalCache(
|
||||
EvalState & state,
|
||||
std::shared_ptr<flake::LockedFlake> lockedFlake,
|
||||
bool useEvalCache)
|
||||
std::shared_ptr<flake::LockedFlake> lockedFlake)
|
||||
{
|
||||
auto fingerprint = lockedFlake->getFingerprint();
|
||||
return make_ref<nix::eval_cache::EvalCache>(
|
||||
useEvalCache && evalSettings.pureEval
|
||||
evalSettings.useEvalCache && evalSettings.pureEval
|
||||
? std::optional { std::cref(fingerprint) }
|
||||
: std::nullopt,
|
||||
state,
|
||||
|
@ -460,10 +458,9 @@ static std::string showAttrPaths(const std::vector<std::string> & paths)
|
|||
|
||||
std::tuple<std::string, FlakeRef, InstallableValue::DerivationInfo> InstallableFlake::toDerivation()
|
||||
{
|
||||
|
||||
auto lockedFlake = getLockedFlake();
|
||||
|
||||
auto cache = openEvalCache(*state, lockedFlake, true);
|
||||
auto cache = openEvalCache(*state, lockedFlake);
|
||||
auto root = cache->getRoot();
|
||||
|
||||
for (auto & attrPath : getActualAttrPaths()) {
|
||||
|
@ -517,11 +514,10 @@ std::pair<Value *, Pos> InstallableFlake::toValue(EvalState & state)
|
|||
}
|
||||
|
||||
std::vector<std::pair<std::shared_ptr<eval_cache::AttrCursor>, std::string>>
|
||||
InstallableFlake::getCursors(EvalState & state, bool useEvalCache)
|
||||
InstallableFlake::getCursors(EvalState & state)
|
||||
{
|
||||
auto evalCache = openEvalCache(state,
|
||||
std::make_shared<flake::LockedFlake>(lockFlake(state, flakeRef, lockFlags)),
|
||||
useEvalCache);
|
||||
std::make_shared<flake::LockedFlake>(lockFlake(state, flakeRef, lockFlags)));
|
||||
|
||||
auto root = evalCache->getRoot();
|
||||
|
||||
|
|
|
@ -62,10 +62,10 @@ struct Installable
|
|||
}
|
||||
|
||||
virtual std::vector<std::pair<std::shared_ptr<eval_cache::AttrCursor>, std::string>>
|
||||
getCursors(EvalState & state, bool useEvalCache);
|
||||
getCursors(EvalState & state);
|
||||
|
||||
std::pair<std::shared_ptr<eval_cache::AttrCursor>, std::string>
|
||||
getCursor(EvalState & state, bool useEvalCache);
|
||||
getCursor(EvalState & state);
|
||||
|
||||
virtual FlakeRef nixpkgsFlakeRef() const
|
||||
{
|
||||
|
@ -118,7 +118,7 @@ struct InstallableFlake : InstallableValue
|
|||
std::pair<Value *, Pos> toValue(EvalState & state) override;
|
||||
|
||||
std::vector<std::pair<std::shared_ptr<eval_cache::AttrCursor>, std::string>>
|
||||
getCursors(EvalState & state, bool useEvalCache) override;
|
||||
getCursors(EvalState & state) override;
|
||||
|
||||
std::shared_ptr<flake::LockedFlake> getLockedFlake() const;
|
||||
|
||||
|
@ -127,7 +127,6 @@ struct InstallableFlake : InstallableValue
|
|||
|
||||
ref<eval_cache::EvalCache> openEvalCache(
|
||||
EvalState & state,
|
||||
std::shared_ptr<flake::LockedFlake> lockedFlake,
|
||||
bool useEvalCache);
|
||||
std::shared_ptr<flake::LockedFlake> lockedFlake);
|
||||
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ nix_CXXFLAGS += -I src/libutil -I src/libstore -I src/libfetchers -I src/libexpr
|
|||
|
||||
nix_LIBS = libexpr libmain libfetchers libstore libutil
|
||||
|
||||
nix_LDFLAGS = -pthread $(SODIUM_LIBS) $(EDITLINE_LIBS) $(BOOST_LDFLAGS) -lboost_context -lboost_thread -lboost_system
|
||||
nix_LDFLAGS = -pthread $(SODIUM_LIBS) $(EDITLINE_LIBS) $(BOOST_LDFLAGS) -lboost_context -lboost_thread -lboost_system -llowdown
|
||||
|
||||
$(foreach name, \
|
||||
nix-build nix-channel nix-collect-garbage nix-copy-closure nix-daemon nix-env nix-hash nix-instantiate nix-prefetch-url nix-shell nix-store, \
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
#include <netdb.h>
|
||||
#include <netinet/in.h>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
extern std::string chrootHelperName;
|
||||
|
||||
void chrootHelper(int argc, char * * argv);
|
||||
|
@ -140,6 +142,11 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs
|
|||
printHelp(programName, std::cout);
|
||||
throw Exit();
|
||||
}
|
||||
|
||||
std::string description() override
|
||||
{
|
||||
return "a tool for reproducible and declarative configuration management";
|
||||
}
|
||||
};
|
||||
|
||||
void mainWrapped(int argc, char * * argv)
|
||||
|
@ -172,6 +179,29 @@ void mainWrapped(int argc, char * * argv)
|
|||
|
||||
NixArgs args;
|
||||
|
||||
if (argc == 2 && std::string(argv[1]) == "__dump-args") {
|
||||
std::cout << args.toJSON().dump() << "\n";
|
||||
return;
|
||||
}
|
||||
|
||||
if (argc == 2 && std::string(argv[1]) == "__dump-builtins") {
|
||||
EvalState state({}, openStore("dummy://"));
|
||||
auto res = nlohmann::json::object();
|
||||
auto builtins = state.baseEnv.values[0]->attrs;
|
||||
for (auto & builtin : *builtins) {
|
||||
auto b = nlohmann::json::object();
|
||||
if (builtin.value->type != tPrimOp) continue;
|
||||
auto primOp = builtin.value->primOp;
|
||||
if (!primOp->doc) continue;
|
||||
b["arity"] = primOp->arity;
|
||||
b["args"] = primOp->args;
|
||||
b["doc"] = trim(stripIndentation(primOp->doc));
|
||||
res[(std::string) builtin.name] = std::move(b);
|
||||
}
|
||||
std::cout << res.dump() << "\n";
|
||||
return;
|
||||
}
|
||||
|
||||
Finally printCompletions([&]()
|
||||
{
|
||||
if (completions) {
|
||||
|
|
|
@ -77,14 +77,16 @@ struct CmdMakeContentAddressable : StorePathsCommand, MixJSON
|
|||
|
||||
auto narHash = hashModuloSink.finish().first;
|
||||
|
||||
ValidPathInfo info(store->makeFixedOutputPath(FileIngestionMethod::Recursive, narHash, path.name(), references, hasSelfReference));
|
||||
ValidPathInfo info {
|
||||
store->makeFixedOutputPath(FileIngestionMethod::Recursive, narHash, path.name(), references, hasSelfReference),
|
||||
narHash,
|
||||
};
|
||||
info.references = std::move(references);
|
||||
if (hasSelfReference) info.references.insert(info.path);
|
||||
info.narHash = narHash;
|
||||
info.narSize = sink.s->size();
|
||||
info.ca = FixedOutputHash {
|
||||
.method = FileIngestionMethod::Recursive,
|
||||
.hash = *info.narHash,
|
||||
.hash = info.narHash,
|
||||
};
|
||||
|
||||
if (!json)
|
||||
|
|
50
src/nix/markdown.cc
Normal file
50
src/nix/markdown.cc
Normal file
|
@ -0,0 +1,50 @@
|
|||
#include "markdown.hh"
|
||||
#include "util.hh"
|
||||
#include "finally.hh"
|
||||
|
||||
#include <sys/queue.h>
|
||||
extern "C" {
|
||||
#include <lowdown.h>
|
||||
}
|
||||
|
||||
namespace nix {
|
||||
|
||||
std::string renderMarkdownToTerminal(std::string_view markdown)
|
||||
{
|
||||
struct lowdown_opts opts {
|
||||
.type = LOWDOWN_TERM,
|
||||
.maxdepth = 20,
|
||||
.cols = std::min(getWindowSize().second, (unsigned short) 80),
|
||||
.hmargin = 0,
|
||||
.vmargin = 0,
|
||||
.feat = LOWDOWN_COMMONMARK | LOWDOWN_FENCED | LOWDOWN_DEFLIST | LOWDOWN_TABLES,
|
||||
.oflags = 0,
|
||||
};
|
||||
|
||||
auto doc = lowdown_doc_new(&opts);
|
||||
if (!doc)
|
||||
throw Error("cannot allocate Markdown document");
|
||||
Finally freeDoc([&]() { lowdown_doc_free(doc); });
|
||||
|
||||
size_t maxn = 0;
|
||||
auto node = lowdown_doc_parse(doc, &maxn, markdown.data(), markdown.size());
|
||||
if (!node)
|
||||
throw Error("cannot parse Markdown document");
|
||||
Finally freeNode([&]() { lowdown_node_free(node); });
|
||||
|
||||
auto renderer = lowdown_term_new(&opts);
|
||||
if (!renderer)
|
||||
throw Error("cannot allocate Markdown renderer");
|
||||
Finally freeRenderer([&]() { lowdown_term_free(renderer); });
|
||||
|
||||
auto buf = lowdown_buf_new(16384);
|
||||
if (!buf)
|
||||
throw Error("cannot allocate Markdown output buffer");
|
||||
Finally freeBuffer([&]() { lowdown_buf_free(buf); });
|
||||
|
||||
lowdown_term_rndr(buf, nullptr, renderer, node);
|
||||
|
||||
return std::string(buf->data, buf->size);
|
||||
}
|
||||
|
||||
}
|
7
src/nix/markdown.hh
Normal file
7
src/nix/markdown.hh
Normal file
|
@ -0,0 +1,7 @@
|
|||
#include "types.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
std::string renderMarkdownToTerminal(std::string_view markdown);
|
||||
|
||||
}
|
|
@ -129,11 +129,13 @@ struct ProfileManifest
|
|||
|
||||
auto narHash = hashString(htSHA256, *sink.s);
|
||||
|
||||
ValidPathInfo info(store->makeFixedOutputPath(FileIngestionMethod::Recursive, narHash, "profile", references));
|
||||
ValidPathInfo info {
|
||||
store->makeFixedOutputPath(FileIngestionMethod::Recursive, narHash, "profile", references),
|
||||
narHash,
|
||||
};
|
||||
info.references = std::move(references);
|
||||
info.narHash = narHash;
|
||||
info.narSize = sink.s->size();
|
||||
info.ca = FixedOutputHash { .method = FileIngestionMethod::Recursive, .hash = *info.narHash };
|
||||
info.ca = FixedOutputHash { .method = FileIngestionMethod::Recursive, .hash = info.narHash };
|
||||
|
||||
auto source = StringSource { *sink.s };
|
||||
store->addToStore(info, source);
|
||||
|
@ -435,7 +437,7 @@ struct CmdProfileDiffClosures : virtual StoreCommand, MixDefaultProfile
|
|||
}
|
||||
};
|
||||
|
||||
struct CmdProfile : virtual MultiCommand, virtual Command
|
||||
struct CmdProfile : NixMultiCommand
|
||||
{
|
||||
CmdProfile()
|
||||
: MultiCommand({
|
||||
|
@ -459,11 +461,6 @@ struct CmdProfile : virtual MultiCommand, virtual Command
|
|||
command->second->prepare();
|
||||
command->second->run();
|
||||
}
|
||||
|
||||
void printHelp(const string & programName, std::ostream & out) override
|
||||
{
|
||||
MultiCommand::printHelp(programName, out);
|
||||
}
|
||||
};
|
||||
|
||||
static auto r1 = registerCommand<CmdProfile>("profile");
|
||||
|
|
|
@ -115,7 +115,7 @@ struct CmdRegistryPin : virtual Args, EvalCommand
|
|||
}
|
||||
};
|
||||
|
||||
struct CmdRegistry : virtual MultiCommand, virtual Command
|
||||
struct CmdRegistry : virtual NixMultiCommand
|
||||
{
|
||||
CmdRegistry()
|
||||
: MultiCommand({
|
||||
|
@ -141,11 +141,6 @@ struct CmdRegistry : virtual MultiCommand, virtual Command
|
|||
command->second->prepare();
|
||||
command->second->run();
|
||||
}
|
||||
|
||||
void printHelp(const string & programName, std::ostream & out) override
|
||||
{
|
||||
MultiCommand::printHelp(programName, out);
|
||||
}
|
||||
};
|
||||
|
||||
static auto r1 = registerCommand<CmdRegistry>("registry");
|
||||
|
|
|
@ -32,6 +32,7 @@ extern "C" {
|
|||
#include "globals.hh"
|
||||
#include "command.hh"
|
||||
#include "finally.hh"
|
||||
#include "markdown.hh"
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
#define GC_INCLUDE_NEW
|
||||
|
@ -64,7 +65,7 @@ struct NixRepl
|
|||
void mainLoop(const std::vector<std::string> & files);
|
||||
StringSet completePrefix(string prefix);
|
||||
bool getLine(string & input, const std::string &prompt);
|
||||
Path getDerivationPath(Value & v);
|
||||
StorePath getDerivationPath(Value & v);
|
||||
bool processLine(string line);
|
||||
void loadFile(const Path & path);
|
||||
void initEnv();
|
||||
|
@ -375,13 +376,16 @@ bool isVarName(const string & s)
|
|||
}
|
||||
|
||||
|
||||
Path NixRepl::getDerivationPath(Value & v) {
|
||||
StorePath NixRepl::getDerivationPath(Value & v) {
|
||||
auto drvInfo = getDerivation(*state, v, false);
|
||||
if (!drvInfo)
|
||||
throw Error("expression does not evaluate to a derivation, so I can't build it");
|
||||
Path drvPath = drvInfo->queryDrvPath();
|
||||
if (drvPath == "" || !state->store->isValidPath(state->store->parseStorePath(drvPath)))
|
||||
throw Error("expression did not evaluate to a valid derivation");
|
||||
Path drvPathRaw = drvInfo->queryDrvPath();
|
||||
if (drvPathRaw == "")
|
||||
throw Error("expression did not evaluate to a valid derivation (no drv path)");
|
||||
StorePath drvPath = state->store->parseStorePath(drvPathRaw);
|
||||
if (!state->store->isValidPath(drvPath))
|
||||
throw Error("expression did not evaluate to a valid derivation (invalid drv path)");
|
||||
return drvPath;
|
||||
}
|
||||
|
||||
|
@ -416,7 +420,8 @@ bool NixRepl::processLine(string line)
|
|||
<< " :r Reload all files\n"
|
||||
<< " :s <expr> Build dependencies of derivation, then start nix-shell\n"
|
||||
<< " :t <expr> Describe result of evaluation\n"
|
||||
<< " :u <expr> Build derivation, then start nix-shell\n";
|
||||
<< " :u <expr> Build derivation, then start nix-shell\n"
|
||||
<< " :doc <expr> Show documentation of a builtin function\n";
|
||||
}
|
||||
|
||||
else if (command == ":a" || command == ":add") {
|
||||
|
@ -474,29 +479,30 @@ bool NixRepl::processLine(string line)
|
|||
evalString("drv: (import <nixpkgs> {}).runCommand \"shell\" { buildInputs = [ drv ]; } \"\"", f);
|
||||
state->callFunction(f, v, result, Pos());
|
||||
|
||||
Path drvPath = getDerivationPath(result);
|
||||
runProgram(settings.nixBinDir + "/nix-shell", Strings{drvPath});
|
||||
StorePath drvPath = getDerivationPath(result);
|
||||
runProgram(settings.nixBinDir + "/nix-shell", Strings{state->store->printStorePath(drvPath)});
|
||||
}
|
||||
|
||||
else if (command == ":b" || command == ":i" || command == ":s") {
|
||||
Value v;
|
||||
evalString(arg, v);
|
||||
Path drvPath = getDerivationPath(v);
|
||||
StorePath drvPath = getDerivationPath(v);
|
||||
Path drvPathRaw = state->store->printStorePath(drvPath);
|
||||
|
||||
if (command == ":b") {
|
||||
/* We could do the build in this process using buildPaths(),
|
||||
but doing it in a child makes it easier to recover from
|
||||
problems / SIGINT. */
|
||||
if (runProgram(settings.nixBinDir + "/nix", Strings{"build", "--no-link", drvPath}) == 0) {
|
||||
auto drv = readDerivation(*state->store, drvPath, Derivation::nameFromPath(state->store->parseStorePath(drvPath)));
|
||||
if (runProgram(settings.nixBinDir + "/nix", Strings{"build", "--no-link", drvPathRaw}) == 0) {
|
||||
auto drv = state->store->readDerivation(drvPath);
|
||||
std::cout << std::endl << "this derivation produced the following outputs:" << std::endl;
|
||||
for (auto & i : drv.outputs)
|
||||
std::cout << fmt(" %s -> %s\n", i.first, state->store->printStorePath(i.second.path(*state->store, drv.name)));
|
||||
for (auto & i : drv.outputsAndPaths(*state->store))
|
||||
std::cout << fmt(" %s -> %s\n", i.first, state->store->printStorePath(i.second.second));
|
||||
}
|
||||
} else if (command == ":i") {
|
||||
runProgram(settings.nixBinDir + "/nix-env", Strings{"-i", drvPath});
|
||||
runProgram(settings.nixBinDir + "/nix-env", Strings{"-i", drvPathRaw});
|
||||
} else {
|
||||
runProgram(settings.nixBinDir + "/nix-shell", Strings{drvPath});
|
||||
runProgram(settings.nixBinDir + "/nix-shell", Strings{drvPathRaw});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -509,6 +515,29 @@ bool NixRepl::processLine(string line)
|
|||
else if (command == ":q" || command == ":quit")
|
||||
return false;
|
||||
|
||||
else if (command == ":doc") {
|
||||
Value v;
|
||||
evalString(arg, v);
|
||||
if (auto doc = state->getDoc(v)) {
|
||||
std::string markdown;
|
||||
|
||||
if (!doc->args.empty() && doc->name) {
|
||||
auto args = doc->args;
|
||||
for (auto & arg : args)
|
||||
arg = "*" + arg + "*";
|
||||
|
||||
markdown +=
|
||||
"**Synopsis:** `builtins." + (std::string) (*doc->name) + "` "
|
||||
+ concatStringsSep(" ", args) + "\n\n";
|
||||
}
|
||||
|
||||
markdown += trim(stripIndentation(doc->doc));
|
||||
|
||||
std::cout << renderMarkdownToTerminal(markdown);
|
||||
} else
|
||||
throw Error("value does not have documentation");
|
||||
}
|
||||
|
||||
else if (command != "")
|
||||
throw Error("unknown command '%1%'", command);
|
||||
|
||||
|
@ -782,7 +811,7 @@ struct CmdRepl : StoreCommand, MixEvalArgs
|
|||
return {
|
||||
Example{
|
||||
"Display all special commands within the REPL:",
|
||||
"nix repl\n nix-repl> :?"
|
||||
"nix repl\nnix-repl> :?"
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -177,7 +177,7 @@ struct CmdSearch : InstallableCommand, MixJSON
|
|||
}
|
||||
};
|
||||
|
||||
for (auto & [cursor, prefix] : installable->getCursors(*state, true))
|
||||
for (auto & [cursor, prefix] : installable->getCursors(*state))
|
||||
visit(*cursor, parseAttrPath(*state, prefix));
|
||||
|
||||
if (!json && !results)
|
||||
|
|
|
@ -2,7 +2,8 @@
|
|||
#include "common-args.hh"
|
||||
#include "shared.hh"
|
||||
#include "store-api.hh"
|
||||
#include "json.hh"
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
@ -19,8 +20,7 @@ struct CmdShowConfig : Command, MixJSON
|
|||
{
|
||||
if (json) {
|
||||
// FIXME: use appropriate JSON types (bool, ints, etc).
|
||||
JSONObject jsonObj(std::cout);
|
||||
globalConfig.toJSON(jsonObj);
|
||||
logger->stdout_("%s", globalConfig.toJSON().dump());
|
||||
} else {
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
globalConfig.getSettings(settings);
|
||||
|
|
|
@ -67,9 +67,9 @@ struct CmdShowDerivation : InstallablesCommand
|
|||
|
||||
{
|
||||
auto outputsObj(drvObj.object("outputs"));
|
||||
for (auto & output : drv.outputs) {
|
||||
for (auto & output : drv.outputsAndPaths(*store)) {
|
||||
auto outputObj(outputsObj.object(output.first));
|
||||
outputObj.attr("path", store->printStorePath(output.second.path(*store, drv.name)));
|
||||
outputObj.attr("path", store->printStorePath(output.second.second));
|
||||
|
||||
std::visit(overloaded {
|
||||
[&](DerivationOutputInputAddressed doi) {
|
||||
|
@ -81,7 +81,7 @@ struct CmdShowDerivation : InstallablesCommand
|
|||
[&](DerivationOutputCAFloating dof) {
|
||||
outputObj.attr("hashAlgo", makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType));
|
||||
},
|
||||
}, output.second.output);
|
||||
}, output.second.first.output);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -91,15 +91,15 @@ struct CmdVerify : StorePathsCommand
|
|||
|
||||
std::unique_ptr<AbstractHashSink> hashSink;
|
||||
if (!info->ca)
|
||||
hashSink = std::make_unique<HashSink>(info->narHash->type);
|
||||
hashSink = std::make_unique<HashSink>(info->narHash.type);
|
||||
else
|
||||
hashSink = std::make_unique<HashModuloSink>(info->narHash->type, std::string(info->path.hashPart()));
|
||||
hashSink = std::make_unique<HashModuloSink>(info->narHash.type, std::string(info->path.hashPart()));
|
||||
|
||||
store->narFromPath(info->path, *hashSink);
|
||||
|
||||
auto hash = hashSink->finish();
|
||||
|
||||
if (hash.first != *info->narHash) {
|
||||
if (hash.first != info->narHash) {
|
||||
corrupted++;
|
||||
act2.result(resCorruptedPath, store->printStorePath(info->path));
|
||||
logError({
|
||||
|
@ -107,7 +107,7 @@ struct CmdVerify : StorePathsCommand
|
|||
.hint = hintfmt(
|
||||
"path '%s' was modified! expected hash '%s', got '%s'",
|
||||
store->printStorePath(info->path),
|
||||
info->narHash->to_string(Base32, true),
|
||||
info->narHash.to_string(Base32, true),
|
||||
hash.first.to_string(Base32, true))
|
||||
});
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue