mirror of
https://github.com/NixOS/nix
synced 2025-07-07 14:21:48 +02:00
Merge branch 'master' into indexed-store-path-outputs
This commit is contained in:
commit
13f2a6f38d
153 changed files with 2341 additions and 1432 deletions
|
@ -88,7 +88,8 @@ EvalCommand::EvalCommand()
|
|||
{
|
||||
addFlag({
|
||||
.longName = "debugger",
|
||||
.description = "start an interactive environment if evaluation fails",
|
||||
.description = "Start an interactive environment if evaluation fails.",
|
||||
.category = MixEvalArgs::category,
|
||||
.handler = {&startReplOnEvalErrors, true},
|
||||
});
|
||||
}
|
||||
|
@ -225,7 +226,7 @@ MixProfile::MixProfile()
|
|||
{
|
||||
addFlag({
|
||||
.longName = "profile",
|
||||
.description = "The profile to update.",
|
||||
.description = "The profile to operate on.",
|
||||
.labels = {"path"},
|
||||
.handler = {&profile},
|
||||
.completer = completePath
|
||||
|
|
|
@ -13,8 +13,6 @@ namespace nix {
|
|||
|
||||
MixEvalArgs::MixEvalArgs()
|
||||
{
|
||||
auto category = "Common evaluation options";
|
||||
|
||||
addFlag({
|
||||
.longName = "arg",
|
||||
.description = "Pass the value *expr* as the argument *name* to Nix functions.",
|
||||
|
|
|
@ -10,6 +10,8 @@ class Bindings;
|
|||
|
||||
struct MixEvalArgs : virtual Args
|
||||
{
|
||||
static constexpr auto category = "Common evaluation options";
|
||||
|
||||
MixEvalArgs();
|
||||
|
||||
Bindings * getAutoArgs(EvalState & state);
|
||||
|
|
|
@ -628,6 +628,8 @@ InstallableFlake::InstallableFlake(
|
|||
|
||||
std::tuple<std::string, FlakeRef, InstallableValue::DerivationInfo> InstallableFlake::toDerivation()
|
||||
{
|
||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("evaluating derivation '%s'", what()));
|
||||
|
||||
auto attr = getCursor(*state);
|
||||
|
||||
auto attrPath = attr->getAttrPathStr();
|
||||
|
|
|
@ -18,7 +18,7 @@ std::string renderMarkdownToTerminal(std::string_view markdown)
|
|||
.hmargin = 0,
|
||||
.vmargin = 0,
|
||||
.feat = LOWDOWN_COMMONMARK | LOWDOWN_FENCED | LOWDOWN_DEFLIST | LOWDOWN_TABLES,
|
||||
.oflags = 0,
|
||||
.oflags = LOWDOWN_TERM_NOLINK,
|
||||
};
|
||||
|
||||
auto doc = lowdown_doc_new(&opts);
|
||||
|
|
|
@ -35,6 +35,7 @@ extern "C" {
|
|||
#include "finally.hh"
|
||||
#include "markdown.hh"
|
||||
#include "local-fs-store.hh"
|
||||
#include "progress-bar.hh"
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
#define GC_INCLUDE_NEW
|
||||
|
@ -241,7 +242,11 @@ void NixRepl::mainLoop()
|
|||
|
||||
// Allow nix-repl specific settings in .inputrc
|
||||
rl_readline_name = "nix-repl";
|
||||
createDirs(dirOf(historyFile));
|
||||
try {
|
||||
createDirs(dirOf(historyFile));
|
||||
} catch (SysError & e) {
|
||||
logWarning(e.info());
|
||||
}
|
||||
#ifndef READLINE
|
||||
el_hist_size = 1000;
|
||||
#endif
|
||||
|
@ -252,6 +257,10 @@ void NixRepl::mainLoop()
|
|||
rl_set_list_possib_func(listPossibleCallback);
|
||||
#endif
|
||||
|
||||
/* Stop the progress bar because it interferes with the display of
|
||||
the repl. */
|
||||
stopProgressBar();
|
||||
|
||||
std::string input;
|
||||
|
||||
while (true) {
|
||||
|
@ -1037,10 +1046,11 @@ void runRepl(
|
|||
|
||||
struct CmdRepl : InstallablesCommand
|
||||
{
|
||||
CmdRepl(){
|
||||
CmdRepl() {
|
||||
evalSettings.pureEval = false;
|
||||
}
|
||||
void prepare()
|
||||
|
||||
void prepare() override
|
||||
{
|
||||
if (!settings.isExperimentalFeatureEnabled(Xp::ReplFlake) && !(file) && this->_installables.size() >= 1) {
|
||||
warn("future versions of Nix will require using `--file` to load a file");
|
||||
|
@ -1053,12 +1063,15 @@ struct CmdRepl : InstallablesCommand
|
|||
}
|
||||
installables = InstallablesCommand::load();
|
||||
}
|
||||
|
||||
std::vector<std::string> files;
|
||||
|
||||
Strings getDefaultFlakeAttrPaths() override
|
||||
{
|
||||
return {""};
|
||||
}
|
||||
virtual bool useDefaultInstallables() override
|
||||
|
||||
bool useDefaultInstallables() override
|
||||
{
|
||||
return file.has_value() or expr.has_value();
|
||||
}
|
||||
|
|
|
@ -507,11 +507,6 @@ std::shared_ptr<AttrCursor> AttrCursor::maybeGetAttr(Symbol name, bool forceErro
|
|||
return nullptr;
|
||||
//throw TypeError("'%s' is not an attribute set", getAttrPathStr());
|
||||
|
||||
for (auto & attr : *v.attrs) {
|
||||
if (root->db)
|
||||
root->db->setPlaceholder({cachedValue->first, attr.name});
|
||||
}
|
||||
|
||||
auto attr = v.attrs->get(name);
|
||||
|
||||
if (!attr) {
|
||||
|
|
|
@ -2501,18 +2501,18 @@ void EvalState::printStats()
|
|||
}
|
||||
{
|
||||
auto list = topObj.list("functions");
|
||||
for (auto & i : functionCalls) {
|
||||
for (auto & [fun, count] : functionCalls) {
|
||||
auto obj = list.object();
|
||||
if (i.first->name)
|
||||
obj.attr("name", (const std::string &) i.first->name);
|
||||
if (fun->name)
|
||||
obj.attr("name", (std::string_view) symbols[fun->name]);
|
||||
else
|
||||
obj.attr("name", nullptr);
|
||||
if (auto pos = positions[i.first->pos]) {
|
||||
obj.attr("file", (const std::string &) pos.file);
|
||||
if (auto pos = positions[fun->pos]) {
|
||||
obj.attr("file", (std::string_view) pos.file);
|
||||
obj.attr("line", pos.line);
|
||||
obj.attr("column", pos.column);
|
||||
}
|
||||
obj.attr("count", i.second);
|
||||
obj.attr("count", count);
|
||||
}
|
||||
}
|
||||
{
|
||||
|
|
|
@ -12,13 +12,13 @@
|
|||
, executable ? false
|
||||
, unpack ? false
|
||||
, name ? baseNameOf (toString url)
|
||||
, impure ? false
|
||||
}:
|
||||
|
||||
derivation {
|
||||
derivation ({
|
||||
builder = "builtin:fetchurl";
|
||||
|
||||
# New-style output content requirements.
|
||||
inherit outputHashAlgo outputHash;
|
||||
outputHashMode = if unpack || executable then "recursive" else "flat";
|
||||
|
||||
inherit name url executable unpack;
|
||||
|
@ -38,4 +38,6 @@ derivation {
|
|||
|
||||
# To make "nix-prefetch-url" work.
|
||||
urls = [ url ];
|
||||
}
|
||||
} // (if impure
|
||||
then { __impure = true; }
|
||||
else { inherit outputHashAlgo outputHash; }))
|
||||
|
|
|
@ -43,7 +43,7 @@ let
|
|||
|
||||
outputs = flake.outputs (inputs // { self = result; });
|
||||
|
||||
result = outputs // sourceInfo // { inherit inputs; inherit outputs; inherit sourceInfo; };
|
||||
result = outputs // sourceInfo // { inherit inputs; inherit outputs; inherit sourceInfo; _type = "flake"; };
|
||||
in
|
||||
if node.flake or true then
|
||||
assert builtins.isFunction flake.outputs;
|
||||
|
|
|
@ -68,7 +68,7 @@ void ConfigFile::apply()
|
|||
}
|
||||
}
|
||||
if (!trusted) {
|
||||
warn("ignoring untrusted flake configuration setting '%s'", name);
|
||||
warn("ignoring untrusted flake configuration setting '%s'.\nPass '%s' to trust it", name, "--accept-flake-config");
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -341,7 +341,6 @@ LockedFlake lockFlake(
|
|||
|
||||
debug("old lock file: %s", oldLockFile);
|
||||
|
||||
// FIXME: check whether all overrides are used.
|
||||
std::map<InputPath, FlakeInput> overrides;
|
||||
std::set<InputPath> overridesUsed, updatesUsed;
|
||||
|
||||
|
@ -484,12 +483,12 @@ LockedFlake lockFlake(
|
|||
} else if (auto follows = std::get_if<1>(&i.second)) {
|
||||
if (! trustLock) {
|
||||
// It is possible that the flake has changed,
|
||||
// so we must confirm all the follows that are in the lockfile are also in the flake.
|
||||
// so we must confirm all the follows that are in the lock file are also in the flake.
|
||||
auto overridePath(inputPath);
|
||||
overridePath.push_back(i.first);
|
||||
auto o = overrides.find(overridePath);
|
||||
// If the override disappeared, we have to refetch the flake,
|
||||
// since some of the inputs may not be present in the lockfile.
|
||||
// since some of the inputs may not be present in the lock file.
|
||||
if (o == overrides.end()) {
|
||||
mustRefetch = true;
|
||||
// There's no point populating the rest of the fake inputs,
|
||||
|
|
|
@ -28,7 +28,7 @@ typedef std::string FlakeId;
|
|||
* object that fetcher generates (usually via
|
||||
* FlakeRef::fromAttrs(attrs) or parseFlakeRef(url) calls).
|
||||
*
|
||||
* The actual fetch not have been performed yet (i.e. a FlakeRef may
|
||||
* The actual fetch may not have been performed yet (i.e. a FlakeRef may
|
||||
* be lazy), but the fetcher can be invoked at any time via the
|
||||
* FlakeRef to ensure the store is populated with this input.
|
||||
*/
|
||||
|
|
|
@ -36,7 +36,7 @@ LockedNode::LockedNode(const nlohmann::json & json)
|
|||
, isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true)
|
||||
{
|
||||
if (!lockedRef.input.isLocked())
|
||||
throw Error("lockfile contains mutable lock '%s'",
|
||||
throw Error("lock file contains mutable lock '%s'",
|
||||
fetchers::attrsToJSON(lockedRef.input.toAttrs()));
|
||||
}
|
||||
|
||||
|
|
|
@ -2454,8 +2454,8 @@ static RegisterPrimOp primop_intersectAttrs({
|
|||
.name = "__intersectAttrs",
|
||||
.args = {"e1", "e2"},
|
||||
.doc = R"(
|
||||
Return a set consisting of the attributes in the set *e2* that also
|
||||
exist in the set *e1*.
|
||||
Return a set consisting of the attributes in the set *e2* which have the
|
||||
same name as some attribute in *e1*.
|
||||
)",
|
||||
.fun = prim_intersectAttrs,
|
||||
});
|
||||
|
@ -3821,8 +3821,8 @@ static RegisterPrimOp primop_parseDrvName({
|
|||
.args = {"s"},
|
||||
.doc = R"(
|
||||
Split the string *s* into a package name and version. The package
|
||||
name is everything up to but not including the first dash followed
|
||||
by a digit, and the version is everything following that dash. The
|
||||
name is everything up to but not including the first dash not followed
|
||||
by a letter, and the version is everything following that dash. The
|
||||
result is returned in a set `{ name, version }`. Thus,
|
||||
`builtins.parseDrvName "nix-0.12pre12876"` returns `{ name =
|
||||
"nix"; version = "0.12pre12876"; }`.
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
namespace nix {
|
||||
|
||||
void printValueAsJSON(EvalState & state, bool strict,
|
||||
Value & v, const PosIdx pos, JSONPlaceholder & out, PathSet & context)
|
||||
Value & v, const PosIdx pos, JSONPlaceholder & out, PathSet & context, bool copyToStore)
|
||||
{
|
||||
checkInterrupt();
|
||||
|
||||
|
@ -32,7 +32,10 @@ void printValueAsJSON(EvalState & state, bool strict,
|
|||
break;
|
||||
|
||||
case nPath:
|
||||
out.write(state.copyPathToStore(context, v.path));
|
||||
if (copyToStore)
|
||||
out.write(state.copyPathToStore(context, v.path));
|
||||
else
|
||||
out.write(v.path);
|
||||
break;
|
||||
|
||||
case nNull:
|
||||
|
@ -54,10 +57,10 @@ void printValueAsJSON(EvalState & state, bool strict,
|
|||
for (auto & j : names) {
|
||||
Attr & a(*v.attrs->find(state.symbols.create(j)));
|
||||
auto placeholder(obj.placeholder(j));
|
||||
printValueAsJSON(state, strict, *a.value, a.pos, placeholder, context);
|
||||
printValueAsJSON(state, strict, *a.value, a.pos, placeholder, context, copyToStore);
|
||||
}
|
||||
} else
|
||||
printValueAsJSON(state, strict, *i->value, i->pos, out, context);
|
||||
printValueAsJSON(state, strict, *i->value, i->pos, out, context, copyToStore);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -65,13 +68,13 @@ void printValueAsJSON(EvalState & state, bool strict,
|
|||
auto list(out.list());
|
||||
for (auto elem : v.listItems()) {
|
||||
auto placeholder(list.placeholder());
|
||||
printValueAsJSON(state, strict, *elem, pos, placeholder, context);
|
||||
printValueAsJSON(state, strict, *elem, pos, placeholder, context, copyToStore);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case nExternal:
|
||||
v.external->printValueAsJSON(state, strict, out, context);
|
||||
v.external->printValueAsJSON(state, strict, out, context, copyToStore);
|
||||
break;
|
||||
|
||||
case nFloat:
|
||||
|
@ -91,14 +94,14 @@ void printValueAsJSON(EvalState & state, bool strict,
|
|||
}
|
||||
|
||||
void printValueAsJSON(EvalState & state, bool strict,
|
||||
Value & v, const PosIdx pos, std::ostream & str, PathSet & context)
|
||||
Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore)
|
||||
{
|
||||
JSONPlaceholder out(str);
|
||||
printValueAsJSON(state, strict, v, pos, out, context);
|
||||
printValueAsJSON(state, strict, v, pos, out, context, copyToStore);
|
||||
}
|
||||
|
||||
void ExternalValueBase::printValueAsJSON(EvalState & state, bool strict,
|
||||
JSONPlaceholder & out, PathSet & context) const
|
||||
JSONPlaceholder & out, PathSet & context, bool copyToStore) const
|
||||
{
|
||||
state.debugThrowLastTrace(TypeError("cannot convert %1% to JSON", showType()));
|
||||
}
|
||||
|
|
|
@ -11,9 +11,9 @@ namespace nix {
|
|||
class JSONPlaceholder;
|
||||
|
||||
void printValueAsJSON(EvalState & state, bool strict,
|
||||
Value & v, const PosIdx pos, JSONPlaceholder & out, PathSet & context);
|
||||
Value & v, const PosIdx pos, JSONPlaceholder & out, PathSet & context, bool copyToStore = true);
|
||||
|
||||
void printValueAsJSON(EvalState & state, bool strict,
|
||||
Value & v, const PosIdx pos, std::ostream & str, PathSet & context);
|
||||
Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore = true);
|
||||
|
||||
}
|
||||
|
|
|
@ -99,7 +99,7 @@ class ExternalValueBase
|
|||
|
||||
/* Print the value as JSON. Defaults to unconvertable, i.e. throws an error */
|
||||
virtual void printValueAsJSON(EvalState & state, bool strict,
|
||||
JSONPlaceholder & out, PathSet & context) const;
|
||||
JSONPlaceholder & out, PathSet & context, bool copyToStore = true) const;
|
||||
|
||||
/* Print the value as XML. Defaults to unevaluated */
|
||||
virtual void printValueAsXML(EvalState & state, bool strict, bool location,
|
||||
|
|
|
@ -370,7 +370,7 @@ struct GitInputScheme : InputScheme
|
|||
auto gitDir = ".git";
|
||||
|
||||
runProgram("git", true,
|
||||
{ "-C", *sourcePath, "--git-dir", gitDir, "add", "--force", "--intent-to-add", "--", std::string(file) });
|
||||
{ "-C", *sourcePath, "--git-dir", gitDir, "add", "--intent-to-add", "--", std::string(file) });
|
||||
|
||||
if (commitMsg)
|
||||
runProgram("git", true,
|
||||
|
@ -485,6 +485,10 @@ struct GitInputScheme : InputScheme
|
|||
}
|
||||
input.attrs.insert_or_assign("ref", *head);
|
||||
unlockedAttrs.insert_or_assign("ref", *head);
|
||||
} else {
|
||||
if (!input.getRev()) {
|
||||
unlockedAttrs.insert_or_assign("ref", input.getRef().value());
|
||||
}
|
||||
}
|
||||
|
||||
if (auto res = getCache()->lookup(store, unlockedAttrs)) {
|
||||
|
|
|
@ -32,6 +32,7 @@ MixCommonArgs::MixCommonArgs(const std::string & programName)
|
|||
addFlag({
|
||||
.longName = "option",
|
||||
.description = "Set the Nix configuration setting *name* to *value* (overriding `nix.conf`).",
|
||||
.category = miscCategory,
|
||||
.labels = {"name", "value"},
|
||||
.handler = {[](std::string name, std::string value) {
|
||||
try {
|
||||
|
|
|
@ -6,6 +6,7 @@ namespace nix {
|
|||
|
||||
//static constexpr auto commonArgsCategory = "Miscellaneous common options";
|
||||
static constexpr auto loggingCategory = "Logging-related options";
|
||||
static constexpr auto miscCategory = "Miscellaneous global options";
|
||||
|
||||
class MixCommonArgs : public virtual Args
|
||||
{
|
||||
|
|
|
@ -30,8 +30,11 @@ Logger * makeDefaultLogger() {
|
|||
return makeJSONLogger(*makeSimpleLogger(true));
|
||||
case LogFormat::bar:
|
||||
return makeProgressBar();
|
||||
case LogFormat::barWithLogs:
|
||||
return makeProgressBar(true);
|
||||
case LogFormat::barWithLogs: {
|
||||
auto logger = makeProgressBar();
|
||||
logger->setPrintBuildLogs(true);
|
||||
return logger;
|
||||
}
|
||||
default:
|
||||
abort();
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <map>
|
||||
#include <thread>
|
||||
#include <iostream>
|
||||
#include <chrono>
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
@ -48,6 +49,7 @@ private:
|
|||
bool visible = true;
|
||||
ActivityId parent;
|
||||
std::optional<std::string> name;
|
||||
std::chrono::time_point<std::chrono::steady_clock> startTime;
|
||||
};
|
||||
|
||||
struct ActivitiesByType
|
||||
|
@ -79,22 +81,22 @@ private:
|
|||
|
||||
std::condition_variable quitCV, updateCV;
|
||||
|
||||
bool printBuildLogs;
|
||||
bool printBuildLogs = false;
|
||||
bool isTTY;
|
||||
|
||||
public:
|
||||
|
||||
ProgressBar(bool printBuildLogs, bool isTTY)
|
||||
: printBuildLogs(printBuildLogs)
|
||||
, isTTY(isTTY)
|
||||
ProgressBar(bool isTTY)
|
||||
: isTTY(isTTY)
|
||||
{
|
||||
state_.lock()->active = isTTY;
|
||||
updateThread = std::thread([&]() {
|
||||
auto state(state_.lock());
|
||||
auto nextWakeup = std::chrono::milliseconds::max();
|
||||
while (state->active) {
|
||||
if (!state->haveUpdate)
|
||||
state.wait(updateCV);
|
||||
draw(*state);
|
||||
state.wait_for(updateCV, nextWakeup);
|
||||
nextWakeup = draw(*state);
|
||||
state.wait_for(quitCV, std::chrono::milliseconds(50));
|
||||
}
|
||||
});
|
||||
|
@ -118,7 +120,8 @@ public:
|
|||
updateThread.join();
|
||||
}
|
||||
|
||||
bool isVerbose() override {
|
||||
bool isVerbose() override
|
||||
{
|
||||
return printBuildLogs;
|
||||
}
|
||||
|
||||
|
@ -159,11 +162,13 @@ public:
|
|||
if (lvl <= verbosity && !s.empty() && type != actBuildWaiting)
|
||||
log(*state, lvl, s + "...");
|
||||
|
||||
state->activities.emplace_back(ActInfo());
|
||||
state->activities.emplace_back(ActInfo {
|
||||
.s = s,
|
||||
.type = type,
|
||||
.parent = parent,
|
||||
.startTime = std::chrono::steady_clock::now()
|
||||
});
|
||||
auto i = std::prev(state->activities.end());
|
||||
i->s = s;
|
||||
i->type = type;
|
||||
i->parent = parent;
|
||||
state->its.emplace(act, i);
|
||||
state->activitiesByType[type].its.emplace(act, i);
|
||||
|
||||
|
@ -327,10 +332,12 @@ public:
|
|||
updateCV.notify_one();
|
||||
}
|
||||
|
||||
void draw(State & state)
|
||||
std::chrono::milliseconds draw(State & state)
|
||||
{
|
||||
auto nextWakeup = std::chrono::milliseconds::max();
|
||||
|
||||
state.haveUpdate = false;
|
||||
if (!state.active) return;
|
||||
if (!state.active) return nextWakeup;
|
||||
|
||||
std::string line;
|
||||
|
||||
|
@ -341,12 +348,25 @@ public:
|
|||
line += "]";
|
||||
}
|
||||
|
||||
auto now = std::chrono::steady_clock::now();
|
||||
|
||||
if (!state.activities.empty()) {
|
||||
if (!status.empty()) line += " ";
|
||||
auto i = state.activities.rbegin();
|
||||
|
||||
while (i != state.activities.rend() && (!i->visible || (i->s.empty() && i->lastLine.empty())))
|
||||
while (i != state.activities.rend()) {
|
||||
if (i->visible && (!i->s.empty() || !i->lastLine.empty())) {
|
||||
/* Don't show activities until some time has
|
||||
passed, to avoid displaying very short
|
||||
activities. */
|
||||
auto delay = std::chrono::milliseconds(10);
|
||||
if (i->startTime + delay < now)
|
||||
break;
|
||||
else
|
||||
nextWakeup = std::min(nextWakeup, std::chrono::duration_cast<std::chrono::milliseconds>(delay - (now - i->startTime)));
|
||||
}
|
||||
++i;
|
||||
}
|
||||
|
||||
if (i != state.activities.rend()) {
|
||||
line += i->s;
|
||||
|
@ -366,6 +386,8 @@ public:
|
|||
if (width <= 0) width = std::numeric_limits<decltype(width)>::max();
|
||||
|
||||
writeToStderr("\r" + filterANSIEscapes(line, false, width) + ANSI_NORMAL + "\e[K");
|
||||
|
||||
return nextWakeup;
|
||||
}
|
||||
|
||||
std::string getStatus(State & state)
|
||||
|
@ -480,19 +502,21 @@ public:
|
|||
draw(*state);
|
||||
return s[0];
|
||||
}
|
||||
|
||||
void setPrintBuildLogs(bool printBuildLogs) override
|
||||
{
|
||||
this->printBuildLogs = printBuildLogs;
|
||||
}
|
||||
};
|
||||
|
||||
Logger * makeProgressBar(bool printBuildLogs)
|
||||
Logger * makeProgressBar()
|
||||
{
|
||||
return new ProgressBar(
|
||||
printBuildLogs,
|
||||
shouldANSI()
|
||||
);
|
||||
return new ProgressBar(shouldANSI());
|
||||
}
|
||||
|
||||
void startProgressBar(bool printBuildLogs)
|
||||
void startProgressBar()
|
||||
{
|
||||
logger = makeProgressBar(printBuildLogs);
|
||||
logger = makeProgressBar();
|
||||
}
|
||||
|
||||
void stopProgressBar()
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
Logger * makeProgressBar(bool printBuildLogs = false);
|
||||
Logger * makeProgressBar();
|
||||
|
||||
void startProgressBar(bool printBuildLogs = false);
|
||||
void startProgressBar();
|
||||
|
||||
void stopProgressBar();
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include "gc-store.hh"
|
||||
#include "util.hh"
|
||||
#include "loggers.hh"
|
||||
#include "progress-bar.hh"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cctype>
|
||||
|
@ -181,8 +182,9 @@ void initNix()
|
|||
/* Reset SIGCHLD to its default. */
|
||||
struct sigaction act;
|
||||
sigemptyset(&act.sa_mask);
|
||||
act.sa_handler = SIG_DFL;
|
||||
act.sa_flags = 0;
|
||||
|
||||
act.sa_handler = SIG_DFL;
|
||||
if (sigaction(SIGCHLD, &act, 0))
|
||||
throw SysError("resetting SIGCHLD");
|
||||
|
||||
|
@ -194,9 +196,20 @@ void initNix()
|
|||
/* HACK: on darwin, we need can’t use sigprocmask with SIGWINCH.
|
||||
* Instead, add a dummy sigaction handler, and signalHandlerThread
|
||||
* can handle the rest. */
|
||||
struct sigaction sa;
|
||||
sa.sa_handler = sigHandler;
|
||||
if (sigaction(SIGWINCH, &sa, 0)) throw SysError("handling SIGWINCH");
|
||||
act.sa_handler = sigHandler;
|
||||
if (sigaction(SIGWINCH, &act, 0)) throw SysError("handling SIGWINCH");
|
||||
|
||||
/* Disable SA_RESTART for interrupts, so that system calls on this thread
|
||||
* error with EINTR like they do on Linux.
|
||||
* Most signals on BSD systems default to SA_RESTART on, but Nix
|
||||
* expects EINTR from syscalls to properly exit. */
|
||||
act.sa_handler = SIG_DFL;
|
||||
if (sigaction(SIGINT, &act, 0)) throw SysError("handling SIGINT");
|
||||
if (sigaction(SIGTERM, &act, 0)) throw SysError("handling SIGTERM");
|
||||
if (sigaction(SIGHUP, &act, 0)) throw SysError("handling SIGHUP");
|
||||
if (sigaction(SIGPIPE, &act, 0)) throw SysError("handling SIGPIPE");
|
||||
if (sigaction(SIGQUIT, &act, 0)) throw SysError("handling SIGQUIT");
|
||||
if (sigaction(SIGTRAP, &act, 0)) throw SysError("handling SIGTRAP");
|
||||
#endif
|
||||
|
||||
/* Register a SIGSEGV handler to detect stack overflows. */
|
||||
|
@ -410,6 +423,8 @@ RunPager::RunPager()
|
|||
if (!pager) pager = getenv("PAGER");
|
||||
if (pager && ((std::string) pager == "" || (std::string) pager == "cat")) return;
|
||||
|
||||
stopProgressBar();
|
||||
|
||||
Pipe toPager;
|
||||
toPager.create();
|
||||
|
||||
|
|
|
@ -113,5 +113,25 @@ struct PrintFreed
|
|||
/* Install a SIGSEGV handler to detect stack overflows. */
|
||||
void detectStackOverflow();
|
||||
|
||||
/* Pluggable behavior to run in case of a stack overflow.
|
||||
|
||||
Default value: defaultStackOverflowHandler.
|
||||
|
||||
This is called by the handler installed by detectStackOverflow().
|
||||
|
||||
This gives Nix library consumers a limit opportunity to report the error
|
||||
condition. The handler should exit the process.
|
||||
See defaultStackOverflowHandler() for a reference implementation.
|
||||
|
||||
NOTE: Use with diligence, because this runs in the signal handler, with very
|
||||
limited stack space and a potentially a corrupted heap, all while the failed
|
||||
thread is blocked indefinitely. All functions called must be reentrant. */
|
||||
extern std::function<void(siginfo_t * info, void * ctx)> stackOverflowHandler;
|
||||
|
||||
/* The default, robust implementation of stackOverflowHandler.
|
||||
|
||||
Prints an error message directly to stderr using a syscall instead of the
|
||||
logger. Exits the process immediately after. */
|
||||
void defaultStackOverflowHandler(siginfo_t * info, void * ctx);
|
||||
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include "error.hh"
|
||||
#include "shared.hh"
|
||||
|
||||
#include <cstring>
|
||||
#include <cstddef>
|
||||
|
@ -29,9 +30,7 @@ static void sigsegvHandler(int signo, siginfo_t * info, void * ctx)
|
|||
ptrdiff_t diff = (char *) info->si_addr - sp;
|
||||
if (diff < 0) diff = -diff;
|
||||
if (diff < 4096) {
|
||||
char msg[] = "error: stack overflow (possible infinite recursion)\n";
|
||||
[[gnu::unused]] auto res = write(2, msg, strlen(msg));
|
||||
_exit(1); // maybe abort instead?
|
||||
nix::stackOverflowHandler(info, ctx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -67,5 +66,12 @@ void detectStackOverflow()
|
|||
#endif
|
||||
}
|
||||
|
||||
std::function<void(siginfo_t * info, void * ctx)> stackOverflowHandler(defaultStackOverflowHandler);
|
||||
|
||||
void defaultStackOverflowHandler(siginfo_t * info, void * ctx) {
|
||||
char msg[] = "error: stack overflow (possible infinite recursion)\n";
|
||||
[[gnu::unused]] auto res = write(2, msg, strlen(msg));
|
||||
_exit(1); // maybe abort instead?
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -331,6 +331,17 @@ bool BinaryCacheStore::isValidPathUncached(const StorePath & storePath)
|
|||
return fileExists(narInfoFileFor(storePath));
|
||||
}
|
||||
|
||||
std::optional<StorePath> BinaryCacheStore::queryPathFromHashPart(const std::string & hashPart)
|
||||
{
|
||||
auto pseudoPath = StorePath(hashPart + "-" + MissingName);
|
||||
try {
|
||||
auto info = queryPathInfo(pseudoPath);
|
||||
return info->path;
|
||||
} catch (InvalidPath &) {
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
void BinaryCacheStore::narFromPath(const StorePath & storePath, Sink & sink)
|
||||
{
|
||||
auto info = queryPathInfo(storePath).cast<const NarInfo>();
|
||||
|
|
|
@ -95,8 +95,7 @@ public:
|
|||
void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
|
||||
{ unsupported("queryPathFromHashPart"); }
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
|
|
|
@ -344,7 +344,7 @@ void DerivationGoal::gaveUpOnSubstitution()
|
|||
for (auto & i : dynamic_cast<Derivation *>(drv.get())->inputDrvs) {
|
||||
/* Ensure that pure, non-fixed-output derivations don't
|
||||
depend on impure derivations. */
|
||||
if (drv->type().isPure() && !drv->type().isFixed()) {
|
||||
if (settings.isExperimentalFeatureEnabled(Xp::ImpureDerivations) && drv->type().isPure() && !drv->type().isFixed()) {
|
||||
auto inputDrv = worker.evalStore.readDerivation(i.first);
|
||||
if (!inputDrv.type().isPure())
|
||||
throw Error("pure derivation '%s' depends on impure derivation '%s'",
|
||||
|
@ -705,8 +705,7 @@ static void movePath(const Path & src, const Path & dst)
|
|||
if (changePerm)
|
||||
chmod_(src, st.st_mode | S_IWUSR);
|
||||
|
||||
if (rename(src.c_str(), dst.c_str()))
|
||||
throw SysError("renaming '%1%' to '%2%'", src, dst);
|
||||
renameFile(src, dst);
|
||||
|
||||
if (changePerm)
|
||||
chmod_(dst, st.st_mode);
|
||||
|
@ -914,12 +913,6 @@ void DerivationGoal::buildDone()
|
|||
outputPaths
|
||||
);
|
||||
|
||||
if (buildMode == bmCheck) {
|
||||
cleanupPostOutputsRegisteredModeCheck();
|
||||
done(BuildResult::Built, std::move(builtOutputs));
|
||||
return;
|
||||
}
|
||||
|
||||
cleanupPostOutputsRegisteredModeNonCheck();
|
||||
|
||||
/* Repeat the build if necessary. */
|
||||
|
|
|
@ -16,11 +16,11 @@ HookInstance::HookInstance()
|
|||
buildHookArgs.pop_front();
|
||||
|
||||
Strings args;
|
||||
args.push_back(std::string(baseNameOf(buildHook)));
|
||||
|
||||
for (auto & arg : buildHookArgs)
|
||||
args.push_back(arg);
|
||||
|
||||
args.push_back(std::string(baseNameOf(settings.buildHook.get())));
|
||||
args.push_back(std::to_string(verbosity));
|
||||
|
||||
/* Create a pipe to get the output of the child. */
|
||||
|
|
|
@ -223,8 +223,7 @@ static void movePath(const Path & src, const Path & dst)
|
|||
if (changePerm)
|
||||
chmod_(src, st.st_mode | S_IWUSR);
|
||||
|
||||
if (rename(src.c_str(), dst.c_str()))
|
||||
throw SysError("renaming '%1%' to '%2%'", src, dst);
|
||||
renameFile(src, dst);
|
||||
|
||||
if (changePerm)
|
||||
chmod_(dst, st.st_mode);
|
||||
|
@ -311,7 +310,7 @@ bool LocalDerivationGoal::cleanupDecideWhetherDiskFull()
|
|||
if (buildMode != bmCheck && status.known->isValid()) continue;
|
||||
auto p = worker.store.printStorePath(status.known->path);
|
||||
if (pathExists(chrootRootDir + p))
|
||||
rename((chrootRootDir + p).c_str(), p.c_str());
|
||||
renameFile((chrootRootDir + p), p);
|
||||
}
|
||||
|
||||
return diskFull;
|
||||
|
@ -845,18 +844,43 @@ void LocalDerivationGoal::startBuilder()
|
|||
/* Some distros patch Linux to not allow unprivileged
|
||||
* user namespaces. If we get EPERM or EINVAL, try
|
||||
* without CLONE_NEWUSER and see if that works.
|
||||
* Details: https://salsa.debian.org/kernel-team/linux/-/commit/d98e00eda6bea437e39b9e80444eee84a32438a6
|
||||
*/
|
||||
usingUserNamespace = false;
|
||||
flags &= ~CLONE_NEWUSER;
|
||||
child = clone(childEntry, stack + stackSize, flags, this);
|
||||
}
|
||||
/* Otherwise exit with EPERM so we can handle this in the
|
||||
parent. This is only done when sandbox-fallback is set
|
||||
to true (the default). */
|
||||
if (child == -1 && (errno == EPERM || errno == EINVAL) && settings.sandboxFallback)
|
||||
_exit(1);
|
||||
if (child == -1) throw SysError("cloning builder process");
|
||||
|
||||
if (child == -1) {
|
||||
switch(errno) {
|
||||
case EPERM:
|
||||
case EINVAL: {
|
||||
int errno_ = errno;
|
||||
if (!userNamespacesEnabled && errno==EPERM)
|
||||
notice("user namespaces appear to be disabled; they are required for sandboxing; check /proc/sys/user/max_user_namespaces");
|
||||
if (userNamespacesEnabled) {
|
||||
Path procSysKernelUnprivilegedUsernsClone = "/proc/sys/kernel/unprivileged_userns_clone";
|
||||
if (pathExists(procSysKernelUnprivilegedUsernsClone)
|
||||
&& trim(readFile(procSysKernelUnprivilegedUsernsClone)) == "0") {
|
||||
notice("user namespaces appear to be disabled; they are required for sandboxing; check /proc/sys/kernel/unprivileged_userns_clone");
|
||||
}
|
||||
}
|
||||
Path procSelfNsUser = "/proc/self/ns/user";
|
||||
if (!pathExists(procSelfNsUser))
|
||||
notice("/proc/self/ns/user does not exist; your kernel was likely built without CONFIG_USER_NS=y, which is required for sandboxing");
|
||||
/* Otherwise exit with EPERM so we can handle this in the
|
||||
parent. This is only done when sandbox-fallback is set
|
||||
to true (the default). */
|
||||
if (settings.sandboxFallback)
|
||||
_exit(1);
|
||||
/* Mention sandbox-fallback in the error message so the user
|
||||
knows that having it disabled contributed to the
|
||||
unrecoverability of this failure */
|
||||
throw SysError(errno_, "creating sandboxed builder process using clone(), without sandbox-fallback");
|
||||
}
|
||||
default:
|
||||
throw SysError("creating sandboxed builder process using clone()");
|
||||
}
|
||||
}
|
||||
writeFull(builderOut.writeSide.get(),
|
||||
fmt("%d %d\n", usingUserNamespace, child));
|
||||
_exit(0);
|
||||
|
@ -1570,6 +1594,8 @@ void LocalDerivationGoal::runChild()
|
|||
/* Warning: in the child we should absolutely not make any SQLite
|
||||
calls! */
|
||||
|
||||
bool sendException = true;
|
||||
|
||||
try { /* child */
|
||||
|
||||
commonChildInit(builderOut);
|
||||
|
@ -2026,6 +2052,8 @@ void LocalDerivationGoal::runChild()
|
|||
/* Indicate that we managed to set up the build environment. */
|
||||
writeFull(STDERR_FILENO, std::string("\2\n"));
|
||||
|
||||
sendException = false;
|
||||
|
||||
/* Execute the program. This should not return. */
|
||||
if (drv->isBuiltin()) {
|
||||
try {
|
||||
|
@ -2079,10 +2107,13 @@ void LocalDerivationGoal::runChild()
|
|||
throw SysError("executing '%1%'", drv->builder);
|
||||
|
||||
} catch (Error & e) {
|
||||
writeFull(STDERR_FILENO, "\1\n");
|
||||
FdSink sink(STDERR_FILENO);
|
||||
sink << e;
|
||||
sink.flush();
|
||||
if (sendException) {
|
||||
writeFull(STDERR_FILENO, "\1\n");
|
||||
FdSink sink(STDERR_FILENO);
|
||||
sink << e;
|
||||
sink.flush();
|
||||
} else
|
||||
std::cerr << e.msg();
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
|
@ -2350,10 +2381,8 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
|
|||
if (*scratchPath != finalPath) {
|
||||
// Also rewrite the output path
|
||||
auto source = sinkToSource([&](Sink & nextSink) {
|
||||
StringSink sink;
|
||||
dumpPath(actualPath, sink);
|
||||
RewritingSink rsink2(oldHashPart, std::string(finalPath.hashPart()), nextSink);
|
||||
rsink2(sink.s);
|
||||
dumpPath(actualPath, rsink2);
|
||||
rsink2.flush();
|
||||
});
|
||||
Path tmpPath = actualPath + ".tmp";
|
||||
|
@ -2600,8 +2629,7 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
|
|||
Path prev = path + checkSuffix;
|
||||
deletePath(prev);
|
||||
Path dst = path + checkSuffix;
|
||||
if (rename(path.c_str(), dst.c_str()))
|
||||
throw SysError("renaming '%s' to '%s'", path, dst);
|
||||
renameFile(path, dst);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,8 +22,7 @@ void builtinUnpackChannel(const BasicDerivation & drv)
|
|||
auto entries = readDirectory(out);
|
||||
if (entries.size() != 1)
|
||||
throw Error("channel tarball '%s' contains more than one file", src);
|
||||
if (rename((out + "/" + entries[0].name).c_str(), (out + "/" + channelName).c_str()) == -1)
|
||||
throw SysError("renaming channel directory");
|
||||
renameFile((out + "/" + entries[0].name), (out + "/" + channelName));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -239,6 +239,8 @@ struct ClientSettings
|
|||
else if (trusted
|
||||
|| name == settings.buildTimeout.name
|
||||
|| name == settings.buildRepeat.name
|
||||
|| name == settings.maxSilentTime.name
|
||||
|| name == settings.pollInterval.name
|
||||
|| name == "connect-timeout"
|
||||
|| (name == "builders" && value == ""))
|
||||
settings.set(name, value);
|
||||
|
|
|
@ -308,6 +308,9 @@ struct curlFileTransfer : public FileTransfer
|
|||
|
||||
curl_easy_setopt(req, CURLOPT_HTTPHEADER, requestHeaders);
|
||||
|
||||
if (settings.downloadSpeed.get() > 0)
|
||||
curl_easy_setopt(req, CURLOPT_MAX_RECV_SPEED_LARGE, (curl_off_t) (settings.downloadSpeed.get() * 1024));
|
||||
|
||||
if (request.head)
|
||||
curl_easy_setopt(req, CURLOPT_NOBODY, 1);
|
||||
|
||||
|
@ -319,7 +322,6 @@ struct curlFileTransfer : public FileTransfer
|
|||
}
|
||||
|
||||
if (request.verifyTLS) {
|
||||
debug("verify TLS: Nix CA file = '%s'", settings.caFile);
|
||||
if (settings.caFile != "")
|
||||
curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.c_str());
|
||||
} else {
|
||||
|
|
|
@ -39,9 +39,7 @@ static void makeSymlink(const Path & link, const Path & target)
|
|||
createSymlink(target, tempLink);
|
||||
|
||||
/* Atomically replace the old one. */
|
||||
if (rename(tempLink.c_str(), link.c_str()) == -1)
|
||||
throw SysError("cannot rename '%1%' to '%2%'",
|
||||
tempLink , link);
|
||||
renameFile(tempLink, link);
|
||||
}
|
||||
|
||||
|
||||
|
@ -621,6 +619,17 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
Path path = storeDir + "/" + std::string(baseName);
|
||||
Path realPath = realStoreDir + "/" + std::string(baseName);
|
||||
|
||||
/* There may be temp directories in the store that are still in use
|
||||
by another process. We need to be sure that we can acquire an
|
||||
exclusive lock before deleting them. */
|
||||
if (baseName.find("tmp-", 0) == 0) {
|
||||
AutoCloseFD tmpDirFd = open(realPath.c_str(), O_RDONLY | O_DIRECTORY);
|
||||
if (tmpDirFd.get() == -1 || !lockFile(tmpDirFd.get(), ltWrite, false)) {
|
||||
debug("skipping locked tempdir '%s'", realPath);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
printInfo("deleting '%1%'", path);
|
||||
|
||||
results.paths.insert(path);
|
||||
|
|
|
@ -114,7 +114,13 @@ std::vector<Path> getUserConfigFiles()
|
|||
|
||||
unsigned int Settings::getDefaultCores()
|
||||
{
|
||||
return std::max(1U, std::thread::hardware_concurrency());
|
||||
const unsigned int concurrency = std::max(1U, std::thread::hardware_concurrency());
|
||||
const unsigned int maxCPU = getMaxCPU();
|
||||
|
||||
if (maxCPU > 0)
|
||||
return maxCPU;
|
||||
else
|
||||
return concurrency;
|
||||
}
|
||||
|
||||
StringSet Settings::getDefaultSystemFeatures()
|
||||
|
@ -148,13 +154,9 @@ StringSet Settings::getDefaultExtraPlatforms()
|
|||
// machines. Note that we can’t force processes from executing
|
||||
// x86_64 in aarch64 environments or vice versa since they can
|
||||
// always exec with their own binary preferences.
|
||||
if (pathExists("/Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist") ||
|
||||
pathExists("/System/Library/LaunchDaemons/com.apple.oahd.plist")) {
|
||||
if (std::string{SYSTEM} == "x86_64-darwin")
|
||||
extraPlatforms.insert("aarch64-darwin");
|
||||
else if (std::string{SYSTEM} == "aarch64-darwin")
|
||||
extraPlatforms.insert("x86_64-darwin");
|
||||
}
|
||||
if (std::string{SYSTEM} == "aarch64-darwin" &&
|
||||
runProgram(RunOptions {.program = "arch", .args = {"-arch", "x86_64", "/usr/bin/true"}, .mergeStderrToStdout = true}).first == 0)
|
||||
extraPlatforms.insert("x86_64-darwin");
|
||||
#endif
|
||||
|
||||
return extraPlatforms;
|
||||
|
|
|
@ -560,9 +560,15 @@ public:
|
|||
R"(
|
||||
If set to `true` (the default), any non-content-addressed path added
|
||||
or copied to the Nix store (e.g. when substituting from a binary
|
||||
cache) must have a valid signature, that is, be signed using one of
|
||||
the keys listed in `trusted-public-keys` or `secret-key-files`. Set
|
||||
to `false` to disable signature checking.
|
||||
cache) must have a signature by a trusted key. A trusted key is one
|
||||
listed in `trusted-public-keys`, or a public key counterpart to a
|
||||
private key stored in a file listed in `secret-key-files`.
|
||||
|
||||
Set to `false` to disable signature checking and trust all
|
||||
non-content-addressed paths unconditionally.
|
||||
|
||||
(Content-addressed paths are inherently trustworthy and thus
|
||||
unaffected by this configuration option.)
|
||||
)"};
|
||||
|
||||
Setting<StringSet> extraPlatforms{
|
||||
|
@ -613,6 +619,14 @@ public:
|
|||
are tried based on their Priority value, which each substituter can set
|
||||
independently. Lower value means higher priority.
|
||||
The default is `https://cache.nixos.org`, with a Priority of 40.
|
||||
|
||||
Nix will copy a store path from a remote store only if one
|
||||
of the following is true:
|
||||
|
||||
- the store object is signed by one of the [`trusted-public-keys`](#conf-trusted-public-keys)
|
||||
- the substituter is in the [`trusted-substituters`](#conf-trusted-substituters) list
|
||||
- the [`require-sigs`](#conf-require-sigs) option has been set to `false`
|
||||
- the store object is [output-addressed](glossary.md#gloss-output-addressed-store-object)
|
||||
)",
|
||||
{"binary-caches"}};
|
||||
|
||||
|
@ -746,6 +760,13 @@ public:
|
|||
/nix/store/xfghy8ixrhz3kyy6p724iv3cxji088dx-bash-4.4-p23`.
|
||||
)"};
|
||||
|
||||
Setting<unsigned int> downloadSpeed {
|
||||
this, 0, "download-speed",
|
||||
R"(
|
||||
Specify the maximum transfer rate in kilobytes per second you want
|
||||
Nix to use for downloads.
|
||||
)"};
|
||||
|
||||
Setting<std::string> netrcFile{
|
||||
this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file",
|
||||
R"(
|
||||
|
|
|
@ -57,8 +57,7 @@ protected:
|
|||
AutoDelete del(tmp, false);
|
||||
StreamToSourceAdapter source(istream);
|
||||
writeFile(tmp, source);
|
||||
if (rename(tmp.c_str(), path2.c_str()))
|
||||
throw SysError("renaming '%1%' to '%2%'", tmp, path2);
|
||||
renameFile(tmp, path2);
|
||||
del.cancel();
|
||||
}
|
||||
|
||||
|
|
|
@ -158,7 +158,7 @@ void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
|
|||
txn.commit();
|
||||
}
|
||||
|
||||
writeFile(schemaPath, fmt("%d", nixCASchemaVersion));
|
||||
writeFile(schemaPath, fmt("%d", nixCASchemaVersion), 0666, true);
|
||||
lockFile(lockFd.get(), ltRead, true);
|
||||
}
|
||||
}
|
||||
|
@ -281,7 +281,7 @@ LocalStore::LocalStore(const Params & params)
|
|||
else if (curSchema == 0) { /* new store */
|
||||
curSchema = nixSchemaVersion;
|
||||
openDB(*state, true);
|
||||
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
|
||||
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str(), 0666, true);
|
||||
}
|
||||
|
||||
else if (curSchema < nixSchemaVersion) {
|
||||
|
@ -329,7 +329,7 @@ LocalStore::LocalStore(const Params & params)
|
|||
txn.commit();
|
||||
}
|
||||
|
||||
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
|
||||
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str(), 0666, true);
|
||||
|
||||
lockFile(globalLock.get(), ltRead, true);
|
||||
}
|
||||
|
@ -751,7 +751,7 @@ void LocalStore::registerDrvOutput(const Realisation & info, CheckSigsFlag check
|
|||
if (checkSigs == NoCheckSigs || !realisationIsUntrusted(info))
|
||||
registerDrvOutput(info);
|
||||
else
|
||||
throw Error("cannot register realisation '%s' because it lacks a valid signature", info.outPath.to_string());
|
||||
throw Error("cannot register realisation '%s' because it lacks a signature by a trusted key", info.outPath.to_string());
|
||||
}
|
||||
|
||||
void LocalStore::registerDrvOutput(const Realisation & info)
|
||||
|
@ -1266,7 +1266,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
|
|||
RepairFlag repair, CheckSigsFlag checkSigs)
|
||||
{
|
||||
if (checkSigs && pathInfoIsUntrusted(info))
|
||||
throw Error("cannot add path '%s' because it lacks a valid signature", printStorePath(info.path));
|
||||
throw Error("cannot add path '%s' because it lacks a signature by a trusted key", printStorePath(info.path));
|
||||
|
||||
addTempRoot(info.path);
|
||||
|
||||
|
@ -1382,13 +1382,15 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name
|
|||
|
||||
std::unique_ptr<AutoDelete> delTempDir;
|
||||
Path tempPath;
|
||||
Path tempDir;
|
||||
AutoCloseFD tempDirFd;
|
||||
|
||||
if (!inMemory) {
|
||||
/* Drain what we pulled so far, and then keep on pulling */
|
||||
StringSource dumpSource { dump };
|
||||
ChainSource bothSource { dumpSource, source };
|
||||
|
||||
auto tempDir = createTempDir(realStoreDir, "add");
|
||||
std::tie(tempDir, tempDirFd) = createTempDirInStore();
|
||||
delTempDir = std::make_unique<AutoDelete>(tempDir);
|
||||
tempPath = tempDir + "/x";
|
||||
|
||||
|
@ -1430,8 +1432,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name
|
|||
writeFile(realPath, dumpSource);
|
||||
} else {
|
||||
/* Move the temporary path we restored above. */
|
||||
if (rename(tempPath.c_str(), realPath.c_str()))
|
||||
throw Error("renaming '%s' to '%s'", tempPath, realPath);
|
||||
moveFile(tempPath, realPath);
|
||||
}
|
||||
|
||||
/* For computing the nar hash. In recursive SHA-256 mode, this
|
||||
|
@ -1508,18 +1509,24 @@ StorePath LocalStore::addTextToStore(
|
|||
|
||||
|
||||
/* Create a temporary directory in the store that won't be
|
||||
garbage-collected. */
|
||||
Path LocalStore::createTempDirInStore()
|
||||
garbage-collected until the returned FD is closed. */
|
||||
std::pair<Path, AutoCloseFD> LocalStore::createTempDirInStore()
|
||||
{
|
||||
Path tmpDir;
|
||||
Path tmpDirFn;
|
||||
AutoCloseFD tmpDirFd;
|
||||
bool lockedByUs = false;
|
||||
do {
|
||||
/* There is a slight possibility that `tmpDir' gets deleted by
|
||||
the GC between createTempDir() and addTempRoot(), so repeat
|
||||
until `tmpDir' exists. */
|
||||
tmpDir = createTempDir(realStoreDir);
|
||||
addTempRoot(parseStorePath(tmpDir));
|
||||
} while (!pathExists(tmpDir));
|
||||
return tmpDir;
|
||||
the GC between createTempDir() and when we acquire a lock on it.
|
||||
We'll repeat until 'tmpDir' exists and we've locked it. */
|
||||
tmpDirFn = createTempDir(realStoreDir, "tmp");
|
||||
tmpDirFd = open(tmpDirFn.c_str(), O_RDONLY | O_DIRECTORY);
|
||||
if (tmpDirFd.get() < 0) {
|
||||
continue;
|
||||
}
|
||||
lockedByUs = lockFile(tmpDirFd.get(), ltWrite, true);
|
||||
} while (!pathExists(tmpDirFn) || !lockedByUs);
|
||||
return {tmpDirFn, std::move(tmpDirFd)};
|
||||
}
|
||||
|
||||
|
||||
|
@ -1942,8 +1949,7 @@ void LocalStore::addBuildLog(const StorePath & drvPath, std::string_view log)
|
|||
|
||||
writeFile(tmpFile, compress("bzip2", log));
|
||||
|
||||
if (rename(tmpFile.c_str(), logPath.c_str()) != 0)
|
||||
throw SysError("renaming '%1%' to '%2%'", tmpFile, logPath);
|
||||
renameFile(tmpFile, logPath);
|
||||
}
|
||||
|
||||
std::optional<std::string> LocalStore::getVersion()
|
||||
|
|
|
@ -256,7 +256,7 @@ private:
|
|||
|
||||
void findRuntimeRoots(Roots & roots, bool censor);
|
||||
|
||||
Path createTempDirInStore();
|
||||
std::pair<Path, AutoCloseFD> createTempDirInStore();
|
||||
|
||||
void checkDerivationOutputs(const StorePath & drvPath, const Derivation & drv);
|
||||
|
||||
|
|
|
@ -75,6 +75,9 @@ struct NarAccessor : public FSAccessor
|
|||
createMember(path, {FSAccessor::Type::tRegular, false, 0, 0});
|
||||
}
|
||||
|
||||
void closeRegularFile() override
|
||||
{ }
|
||||
|
||||
void isExecutable() override
|
||||
{
|
||||
parents.top()->isExecutable = true;
|
||||
|
|
|
@ -229,7 +229,9 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
|
|||
}
|
||||
|
||||
/* Atomically replace the old file with the new hard link. */
|
||||
if (rename(tempLink.c_str(), path.c_str()) == -1) {
|
||||
try {
|
||||
renameFile(tempLink, path);
|
||||
} catch (SysError & e) {
|
||||
if (unlink(tempLink.c_str()) == -1)
|
||||
printError("unable to unlink '%1%'", tempLink);
|
||||
if (errno == EMLINK) {
|
||||
|
@ -240,7 +242,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
|
|||
debug("'%s' has reached maximum number of links", linkPath);
|
||||
return;
|
||||
}
|
||||
throw SysError("cannot rename '%1%' to '%2%'", tempLink, path);
|
||||
throw;
|
||||
}
|
||||
|
||||
stats.filesLinked++;
|
||||
|
|
|
@ -580,7 +580,6 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
|
|||
|
||||
try {
|
||||
conn->to.written = 0;
|
||||
conn->to.warn = true;
|
||||
connections->incCapacity();
|
||||
{
|
||||
Finally cleanup([&]() { connections->decCapacity(); });
|
||||
|
@ -591,7 +590,6 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
|
|||
dumpString(contents, conn->to);
|
||||
}
|
||||
}
|
||||
conn->to.warn = false;
|
||||
conn.processStderr();
|
||||
} catch (SysError & e) {
|
||||
/* Daemon closed while we were sending the path. Probably OOM
|
||||
|
@ -673,6 +671,23 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
|
|||
}
|
||||
|
||||
|
||||
void RemoteStore::addMultipleToStore(
|
||||
PathsSource & pathsToCopy,
|
||||
Activity & act,
|
||||
RepairFlag repair,
|
||||
CheckSigsFlag checkSigs)
|
||||
{
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
sink << pathsToCopy.size();
|
||||
for (auto & [pathInfo, pathSource] : pathsToCopy) {
|
||||
pathInfo.write(sink, *this, 16);
|
||||
pathSource->drainInto(sink);
|
||||
}
|
||||
});
|
||||
|
||||
addMultipleToStore(*source, repair, checkSigs);
|
||||
}
|
||||
|
||||
void RemoteStore::addMultipleToStore(
|
||||
Source & source,
|
||||
RepairFlag repair,
|
||||
|
|
|
@ -88,6 +88,12 @@ public:
|
|||
RepairFlag repair,
|
||||
CheckSigsFlag checkSigs) override;
|
||||
|
||||
void addMultipleToStore(
|
||||
PathsSource & pathsToCopy,
|
||||
Activity & act,
|
||||
RepairFlag repair,
|
||||
CheckSigsFlag checkSigs) override;
|
||||
|
||||
StorePath addTextToStore(
|
||||
std::string_view name,
|
||||
std::string_view s,
|
||||
|
|
|
@ -98,7 +98,9 @@
|
|||
(allow file*
|
||||
(literal "/private/var/select/sh"))
|
||||
|
||||
; Allow Rosetta 2 to run x86_64 binaries on aarch64-darwin.
|
||||
; Allow Rosetta 2 to run x86_64 binaries on aarch64-darwin (and vice versa).
|
||||
(allow file-read*
|
||||
(subpath "/Library/Apple/usr/libexec/oah")
|
||||
(subpath "/System/Library/Apple/usr/libexec/oah"))
|
||||
(subpath "/System/Library/Apple/usr/libexec/oah")
|
||||
(subpath "/System/Library/LaunchDaemons/com.apple.oahd.plist")
|
||||
(subpath "/Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist"))
|
||||
|
|
|
@ -14,3 +14,7 @@
|
|||
|
||||
; Allow DNS lookups.
|
||||
(allow network-outbound (remote unix-socket (path-literal "/private/var/run/mDNSResponder")))
|
||||
|
||||
; Allow access to trustd.
|
||||
(allow mach-lookup (global-name "com.apple.trustd"))
|
||||
(allow mach-lookup (global-name "com.apple.trustd.agent"))
|
||||
|
|
|
@ -67,7 +67,7 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string
|
|||
if (fakeSSH) {
|
||||
args = { "bash", "-c" };
|
||||
} else {
|
||||
args = { "ssh", host.c_str(), "-x", "-a" };
|
||||
args = { "ssh", host.c_str(), "-x" };
|
||||
addCommonSSHOpts(args);
|
||||
if (socketPath != "")
|
||||
args.insert(args.end(), {"-S", socketPath});
|
||||
|
|
|
@ -258,6 +258,84 @@ StorePath Store::addToStore(
|
|||
return addToStoreFromDump(*source, name, method, hashAlgo, repair, references);
|
||||
}
|
||||
|
||||
void Store::addMultipleToStore(
|
||||
PathsSource & pathsToCopy,
|
||||
Activity & act,
|
||||
RepairFlag repair,
|
||||
CheckSigsFlag checkSigs)
|
||||
{
|
||||
std::atomic<size_t> nrDone{0};
|
||||
std::atomic<size_t> nrFailed{0};
|
||||
std::atomic<uint64_t> bytesExpected{0};
|
||||
std::atomic<uint64_t> nrRunning{0};
|
||||
|
||||
using PathWithInfo = std::pair<ValidPathInfo, std::unique_ptr<Source>>;
|
||||
|
||||
std::map<StorePath, PathWithInfo *> infosMap;
|
||||
StorePathSet storePathsToAdd;
|
||||
for (auto & thingToAdd : pathsToCopy) {
|
||||
infosMap.insert_or_assign(thingToAdd.first.path, &thingToAdd);
|
||||
storePathsToAdd.insert(thingToAdd.first.path);
|
||||
}
|
||||
|
||||
auto showProgress = [&]() {
|
||||
act.progress(nrDone, pathsToCopy.size(), nrRunning, nrFailed);
|
||||
};
|
||||
|
||||
ThreadPool pool;
|
||||
|
||||
processGraph<StorePath>(pool,
|
||||
storePathsToAdd,
|
||||
|
||||
[&](const StorePath & path) {
|
||||
|
||||
auto & [info, _] = *infosMap.at(path);
|
||||
|
||||
if (isValidPath(info.path)) {
|
||||
nrDone++;
|
||||
showProgress();
|
||||
return StorePathSet();
|
||||
}
|
||||
|
||||
bytesExpected += info.narSize;
|
||||
act.setExpected(actCopyPath, bytesExpected);
|
||||
|
||||
return info.references;
|
||||
},
|
||||
|
||||
[&](const StorePath & path) {
|
||||
checkInterrupt();
|
||||
|
||||
auto & [info_, source_] = *infosMap.at(path);
|
||||
auto info = info_;
|
||||
info.ultimate = false;
|
||||
|
||||
/* Make sure that the Source object is destroyed when
|
||||
we're done. In particular, a SinkToSource object must
|
||||
be destroyed to ensure that the destructors on its
|
||||
stack frame are run; this includes
|
||||
LegacySSHStore::narFromPath()'s connection lock. */
|
||||
auto source = std::move(source_);
|
||||
|
||||
if (!isValidPath(info.path)) {
|
||||
MaintainCount<decltype(nrRunning)> mc(nrRunning);
|
||||
showProgress();
|
||||
try {
|
||||
addToStore(info, *source, repair, checkSigs);
|
||||
} catch (Error & e) {
|
||||
nrFailed++;
|
||||
if (!settings.keepGoing)
|
||||
throw e;
|
||||
printMsg(lvlError, "could not copy %s: %s", printStorePath(path), e.what());
|
||||
showProgress();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
nrDone++;
|
||||
showProgress();
|
||||
});
|
||||
}
|
||||
|
||||
void Store::addMultipleToStore(
|
||||
Source & source,
|
||||
|
@ -992,113 +1070,61 @@ std::map<StorePath, StorePath> copyPaths(
|
|||
for (auto & path : storePaths)
|
||||
if (!valid.count(path)) missing.insert(path);
|
||||
|
||||
Activity act(*logger, lvlInfo, actCopyPaths, fmt("copying %d paths", missing.size()));
|
||||
|
||||
// In the general case, `addMultipleToStore` requires a sorted list of
|
||||
// store paths to add, so sort them right now
|
||||
auto sortedMissing = srcStore.topoSortPaths(missing);
|
||||
std::reverse(sortedMissing.begin(), sortedMissing.end());
|
||||
|
||||
std::map<StorePath, StorePath> pathsMap;
|
||||
for (auto & path : storePaths)
|
||||
pathsMap.insert_or_assign(path, path);
|
||||
|
||||
Activity act(*logger, lvlInfo, actCopyPaths, fmt("copying %d paths", missing.size()));
|
||||
Store::PathsSource pathsToCopy;
|
||||
|
||||
auto sorted = srcStore.topoSortPaths(missing);
|
||||
std::reverse(sorted.begin(), sorted.end());
|
||||
auto computeStorePathForDst = [&](const ValidPathInfo & currentPathInfo) -> StorePath {
|
||||
auto storePathForSrc = currentPathInfo.path;
|
||||
auto storePathForDst = storePathForSrc;
|
||||
if (currentPathInfo.ca && currentPathInfo.references.empty()) {
|
||||
storePathForDst = dstStore.makeFixedOutputPathFromCA(storePathForSrc.name(), *currentPathInfo.ca);
|
||||
if (dstStore.storeDir == srcStore.storeDir)
|
||||
assert(storePathForDst == storePathForSrc);
|
||||
if (storePathForDst != storePathForSrc)
|
||||
debug("replaced path '%s' to '%s' for substituter '%s'",
|
||||
srcStore.printStorePath(storePathForSrc),
|
||||
dstStore.printStorePath(storePathForDst),
|
||||
dstStore.getUri());
|
||||
}
|
||||
return storePathForDst;
|
||||
};
|
||||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
sink << sorted.size();
|
||||
for (auto & storePath : sorted) {
|
||||
for (auto & missingPath : sortedMissing) {
|
||||
auto info = srcStore.queryPathInfo(missingPath);
|
||||
|
||||
auto storePathForDst = computeStorePathForDst(*info);
|
||||
pathsMap.insert_or_assign(missingPath, storePathForDst);
|
||||
|
||||
ValidPathInfo infoForDst = *info;
|
||||
infoForDst.path = storePathForDst;
|
||||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
// We can reasonably assume that the copy will happen whenever we
|
||||
// read the path, so log something about that at that point
|
||||
auto srcUri = srcStore.getUri();
|
||||
auto dstUri = dstStore.getUri();
|
||||
auto storePathS = srcStore.printStorePath(storePath);
|
||||
auto storePathS = srcStore.printStorePath(missingPath);
|
||||
Activity act(*logger, lvlInfo, actCopyPath,
|
||||
makeCopyPathMessage(srcUri, dstUri, storePathS),
|
||||
{storePathS, srcUri, dstUri});
|
||||
PushActivity pact(act.id);
|
||||
|
||||
auto info = srcStore.queryPathInfo(storePath);
|
||||
info->write(sink, srcStore, 16);
|
||||
srcStore.narFromPath(storePath, sink);
|
||||
}
|
||||
});
|
||||
|
||||
dstStore.addMultipleToStore(*source, repair, checkSigs);
|
||||
|
||||
#if 0
|
||||
std::atomic<size_t> nrDone{0};
|
||||
std::atomic<size_t> nrFailed{0};
|
||||
std::atomic<uint64_t> bytesExpected{0};
|
||||
std::atomic<uint64_t> nrRunning{0};
|
||||
|
||||
auto showProgress = [&]() {
|
||||
act.progress(nrDone, missing.size(), nrRunning, nrFailed);
|
||||
};
|
||||
|
||||
ThreadPool pool;
|
||||
|
||||
processGraph<StorePath>(pool,
|
||||
StorePathSet(missing.begin(), missing.end()),
|
||||
|
||||
[&](const StorePath & storePath) {
|
||||
auto info = srcStore.queryPathInfo(storePath);
|
||||
auto storePathForDst = storePath;
|
||||
if (info->ca && info->references.empty()) {
|
||||
storePathForDst = dstStore.makeFixedOutputPathFromCA(storePath.name(), *info->ca);
|
||||
if (dstStore.storeDir == srcStore.storeDir)
|
||||
assert(storePathForDst == storePath);
|
||||
if (storePathForDst != storePath)
|
||||
debug("replaced path '%s' to '%s' for substituter '%s'",
|
||||
srcStore.printStorePath(storePath),
|
||||
dstStore.printStorePath(storePathForDst),
|
||||
dstStore.getUri());
|
||||
}
|
||||
pathsMap.insert_or_assign(storePath, storePathForDst);
|
||||
|
||||
if (dstStore.isValidPath(storePath)) {
|
||||
nrDone++;
|
||||
showProgress();
|
||||
return StorePathSet();
|
||||
}
|
||||
|
||||
bytesExpected += info->narSize;
|
||||
act.setExpected(actCopyPath, bytesExpected);
|
||||
|
||||
return info->references;
|
||||
},
|
||||
|
||||
[&](const StorePath & storePath) {
|
||||
checkInterrupt();
|
||||
|
||||
auto info = srcStore.queryPathInfo(storePath);
|
||||
|
||||
auto storePathForDst = storePath;
|
||||
if (info->ca && info->references.empty()) {
|
||||
storePathForDst = dstStore.makeFixedOutputPathFromCA(storePath.name(), *info->ca);
|
||||
if (dstStore.storeDir == srcStore.storeDir)
|
||||
assert(storePathForDst == storePath);
|
||||
if (storePathForDst != storePath)
|
||||
debug("replaced path '%s' to '%s' for substituter '%s'",
|
||||
srcStore.printStorePath(storePath),
|
||||
dstStore.printStorePath(storePathForDst),
|
||||
dstStore.getUri());
|
||||
}
|
||||
pathsMap.insert_or_assign(storePath, storePathForDst);
|
||||
|
||||
if (!dstStore.isValidPath(storePathForDst)) {
|
||||
MaintainCount<decltype(nrRunning)> mc(nrRunning);
|
||||
showProgress();
|
||||
try {
|
||||
copyStorePath(srcStore, dstStore, storePath, repair, checkSigs);
|
||||
} catch (Error &e) {
|
||||
nrFailed++;
|
||||
if (!settings.keepGoing)
|
||||
throw e;
|
||||
printMsg(lvlError, "could not copy %s: %s", dstStore.printStorePath(storePath), e.what());
|
||||
showProgress();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
nrDone++;
|
||||
showProgress();
|
||||
srcStore.narFromPath(missingPath, sink);
|
||||
});
|
||||
#endif
|
||||
pathsToCopy.push_back(std::pair{infoForDst, std::move(source)});
|
||||
}
|
||||
|
||||
dstStore.addMultipleToStore(pathsToCopy, act, repair, checkSigs);
|
||||
|
||||
return pathsMap;
|
||||
}
|
||||
|
@ -1321,7 +1347,12 @@ std::shared_ptr<Store> openFromNonUri(const std::string & uri, const Store::Para
|
|||
else if (pathExists(settings.nixDaemonSocketFile))
|
||||
return std::make_shared<UDSRemoteStore>(params);
|
||||
#if __linux__
|
||||
else if (!pathExists(stateDir) && params.empty() && getuid() != 0 && !getEnv("NIX_STORE_DIR").has_value()) {
|
||||
else if (!pathExists(stateDir)
|
||||
&& params.empty()
|
||||
&& getuid() != 0
|
||||
&& !getEnv("NIX_STORE_DIR").has_value()
|
||||
&& !getEnv("NIX_STATE_DIR").has_value())
|
||||
{
|
||||
/* If /nix doesn't exist, there is no daemon socket, and
|
||||
we're not root, then automatically set up a chroot
|
||||
store in ~/.local/share/nix/root. */
|
||||
|
@ -1332,9 +1363,9 @@ std::shared_ptr<Store> openFromNonUri(const std::string & uri, const Store::Para
|
|||
} catch (Error & e) {
|
||||
return std::make_shared<LocalStore>(params);
|
||||
}
|
||||
warn("'/nix' does not exist, so Nix will use '%s' as a chroot store", chrootStore);
|
||||
warn("'%s' does not exist, so Nix will use '%s' as a chroot store", stateDir, chrootStore);
|
||||
} else
|
||||
debug("'/nix' does not exist, so Nix will use '%s' as a chroot store", chrootStore);
|
||||
debug("'%s' does not exist, so Nix will use '%s' as a chroot store", stateDir, chrootStore);
|
||||
Store::Params params2;
|
||||
params2["root"] = chrootStore;
|
||||
return std::make_shared<LocalStore>(params2);
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#pragma once
|
||||
|
||||
#include "nar-info.hh"
|
||||
#include "realisation.hh"
|
||||
#include "path.hh"
|
||||
#include "derived-path.hh"
|
||||
|
@ -359,12 +360,22 @@ public:
|
|||
virtual void addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs) = 0;
|
||||
|
||||
// A list of paths infos along with a source providing the content of the
|
||||
// associated store path
|
||||
using PathsSource = std::vector<std::pair<ValidPathInfo, std::unique_ptr<Source>>>;
|
||||
|
||||
/* Import multiple paths into the store. */
|
||||
virtual void addMultipleToStore(
|
||||
Source & source,
|
||||
RepairFlag repair = NoRepair,
|
||||
CheckSigsFlag checkSigs = CheckSigs);
|
||||
|
||||
virtual void addMultipleToStore(
|
||||
PathsSource & pathsToCopy,
|
||||
Activity & act,
|
||||
RepairFlag repair = NoRepair,
|
||||
CheckSigsFlag checkSigs = CheckSigs);
|
||||
|
||||
/* Copy the contents of a path to the store and register the
|
||||
validity the resulting path. The resulting path is returned.
|
||||
The function object `filter' can be used to exclude files (see
|
||||
|
|
|
@ -234,6 +234,7 @@ static void parse(ParseSink & sink, Source & source, const Path & path)
|
|||
|
||||
else if (s == "contents" && type == tpRegular) {
|
||||
parseContents(sink, source, path);
|
||||
sink.closeRegularFile();
|
||||
}
|
||||
|
||||
else if (s == "executable" && type == tpRegular) {
|
||||
|
@ -324,6 +325,12 @@ struct RestoreSink : ParseSink
|
|||
if (!fd) throw SysError("creating file '%1%'", p);
|
||||
}
|
||||
|
||||
void closeRegularFile() override
|
||||
{
|
||||
/* Call close explicitly to make sure the error is checked */
|
||||
fd.close();
|
||||
}
|
||||
|
||||
void isExecutable() override
|
||||
{
|
||||
struct stat st;
|
||||
|
|
|
@ -60,6 +60,7 @@ struct ParseSink
|
|||
virtual void createDirectory(const Path & path) { };
|
||||
|
||||
virtual void createRegularFile(const Path & path) { };
|
||||
virtual void closeRegularFile() { };
|
||||
virtual void isExecutable() { };
|
||||
virtual void preallocateContents(uint64_t size) { };
|
||||
virtual void receiveContents(std::string_view data) { };
|
||||
|
|
|
@ -216,7 +216,7 @@ nlohmann::json Args::toJSON()
|
|||
if (flag->shortName)
|
||||
j["shortName"] = std::string(1, flag->shortName);
|
||||
if (flag->description != "")
|
||||
j["description"] = flag->description;
|
||||
j["description"] = trim(flag->description);
|
||||
j["category"] = flag->category;
|
||||
if (flag->handler.arity != ArityAny)
|
||||
j["arity"] = flag->handler.arity;
|
||||
|
@ -237,7 +237,7 @@ nlohmann::json Args::toJSON()
|
|||
}
|
||||
|
||||
auto res = nlohmann::json::object();
|
||||
res["description"] = description();
|
||||
res["description"] = trim(description());
|
||||
res["flags"] = std::move(flags);
|
||||
res["args"] = std::move(args);
|
||||
auto s = doc();
|
||||
|
@ -379,7 +379,7 @@ nlohmann::json MultiCommand::toJSON()
|
|||
auto j = command->toJSON();
|
||||
auto cat = nlohmann::json::object();
|
||||
cat["id"] = command->category();
|
||||
cat["description"] = categories[command->category()];
|
||||
cat["description"] = trim(categories[command->category()]);
|
||||
j["category"] = std::move(cat);
|
||||
cmds[name] = std::move(j);
|
||||
}
|
||||
|
|
|
@ -204,13 +204,19 @@ public:
|
|||
int errNo;
|
||||
|
||||
template<typename... Args>
|
||||
SysError(const Args & ... args)
|
||||
SysError(int errNo_, const Args & ... args)
|
||||
: Error("")
|
||||
{
|
||||
errNo = errno;
|
||||
errNo = errNo_;
|
||||
auto hf = hintfmt(args...);
|
||||
err.msg = hintfmt("%1%: %2%", normaltxt(hf.str()), strerror(errNo));
|
||||
}
|
||||
|
||||
template<typename... Args>
|
||||
SysError(const Args & ... args)
|
||||
: SysError(errno, args ...)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
|
172
src/libutil/filesystem.cc
Normal file
172
src/libutil/filesystem.cc
Normal file
|
@ -0,0 +1,172 @@
|
|||
#include <sys/time.h>
|
||||
#include <filesystem>
|
||||
|
||||
#include "finally.hh"
|
||||
#include "util.hh"
|
||||
#include "types.hh"
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace nix {
|
||||
|
||||
static Path tempName(Path tmpRoot, const Path & prefix, bool includePid,
|
||||
int & counter)
|
||||
{
|
||||
tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR").value_or("/tmp") : tmpRoot, true);
|
||||
if (includePid)
|
||||
return (format("%1%/%2%-%3%-%4%") % tmpRoot % prefix % getpid() % counter++).str();
|
||||
else
|
||||
return (format("%1%/%2%-%3%") % tmpRoot % prefix % counter++).str();
|
||||
}
|
||||
|
||||
Path createTempDir(const Path & tmpRoot, const Path & prefix,
|
||||
bool includePid, bool useGlobalCounter, mode_t mode)
|
||||
{
|
||||
static int globalCounter = 0;
|
||||
int localCounter = 0;
|
||||
int & counter(useGlobalCounter ? globalCounter : localCounter);
|
||||
|
||||
while (1) {
|
||||
checkInterrupt();
|
||||
Path tmpDir = tempName(tmpRoot, prefix, includePid, counter);
|
||||
if (mkdir(tmpDir.c_str(), mode) == 0) {
|
||||
#if __FreeBSD__
|
||||
/* Explicitly set the group of the directory. This is to
|
||||
work around around problems caused by BSD's group
|
||||
ownership semantics (directories inherit the group of
|
||||
the parent). For instance, the group of /tmp on
|
||||
FreeBSD is "wheel", so all directories created in /tmp
|
||||
will be owned by "wheel"; but if the user is not in
|
||||
"wheel", then "tar" will fail to unpack archives that
|
||||
have the setgid bit set on directories. */
|
||||
if (chown(tmpDir.c_str(), (uid_t) -1, getegid()) != 0)
|
||||
throw SysError("setting group of directory '%1%'", tmpDir);
|
||||
#endif
|
||||
return tmpDir;
|
||||
}
|
||||
if (errno != EEXIST)
|
||||
throw SysError("creating directory '%1%'", tmpDir);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
std::pair<AutoCloseFD, Path> createTempFile(const Path & prefix)
|
||||
{
|
||||
Path tmpl(getEnv("TMPDIR").value_or("/tmp") + "/" + prefix + ".XXXXXX");
|
||||
// Strictly speaking, this is UB, but who cares...
|
||||
// FIXME: use O_TMPFILE.
|
||||
AutoCloseFD fd(mkstemp((char *) tmpl.c_str()));
|
||||
if (!fd)
|
||||
throw SysError("creating temporary file '%s'", tmpl);
|
||||
closeOnExec(fd.get());
|
||||
return {std::move(fd), tmpl};
|
||||
}
|
||||
|
||||
void createSymlink(const Path & target, const Path & link,
|
||||
std::optional<time_t> mtime)
|
||||
{
|
||||
if (symlink(target.c_str(), link.c_str()))
|
||||
throw SysError("creating symlink from '%1%' to '%2%'", link, target);
|
||||
if (mtime) {
|
||||
struct timeval times[2];
|
||||
times[0].tv_sec = *mtime;
|
||||
times[0].tv_usec = 0;
|
||||
times[1].tv_sec = *mtime;
|
||||
times[1].tv_usec = 0;
|
||||
if (lutimes(link.c_str(), times))
|
||||
throw SysError("setting time of symlink '%s'", link);
|
||||
}
|
||||
}
|
||||
|
||||
void replaceSymlink(const Path & target, const Path & link,
|
||||
std::optional<time_t> mtime)
|
||||
{
|
||||
for (unsigned int n = 0; true; n++) {
|
||||
Path tmp = canonPath(fmt("%s/.%d_%s", dirOf(link), n, baseNameOf(link)));
|
||||
|
||||
try {
|
||||
createSymlink(target, tmp, mtime);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo == EEXIST) continue;
|
||||
throw;
|
||||
}
|
||||
|
||||
renameFile(tmp, link);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void setWriteTime(const fs::path & p, const struct stat & st)
|
||||
{
|
||||
struct timeval times[2];
|
||||
times[0] = {
|
||||
.tv_sec = st.st_atime,
|
||||
.tv_usec = 0,
|
||||
};
|
||||
times[1] = {
|
||||
.tv_sec = st.st_mtime,
|
||||
.tv_usec = 0,
|
||||
};
|
||||
if (lutimes(p.c_str(), times) != 0)
|
||||
throw SysError("changing modification time of '%s'", p);
|
||||
}
|
||||
|
||||
void copy(const fs::directory_entry & from, const fs::path & to, bool andDelete)
|
||||
{
|
||||
// TODO: Rewrite the `is_*` to use `symlink_status()`
|
||||
auto statOfFrom = lstat(from.path().c_str());
|
||||
auto fromStatus = from.symlink_status();
|
||||
|
||||
// Mark the directory as writable so that we can delete its children
|
||||
if (andDelete && fs::is_directory(fromStatus)) {
|
||||
fs::permissions(from.path(), fs::perms::owner_write, fs::perm_options::add | fs::perm_options::nofollow);
|
||||
}
|
||||
|
||||
|
||||
if (fs::is_symlink(fromStatus) || fs::is_regular_file(fromStatus)) {
|
||||
fs::copy(from.path(), to, fs::copy_options::copy_symlinks | fs::copy_options::overwrite_existing);
|
||||
} else if (fs::is_directory(fromStatus)) {
|
||||
fs::create_directory(to);
|
||||
for (auto & entry : fs::directory_iterator(from.path())) {
|
||||
copy(entry, to / entry.path().filename(), andDelete);
|
||||
}
|
||||
} else {
|
||||
throw Error("file '%s' has an unsupported type", from.path());
|
||||
}
|
||||
|
||||
setWriteTime(to, statOfFrom);
|
||||
if (andDelete) {
|
||||
if (!fs::is_symlink(fromStatus))
|
||||
fs::permissions(from.path(), fs::perms::owner_write, fs::perm_options::add | fs::perm_options::nofollow);
|
||||
fs::remove(from.path());
|
||||
}
|
||||
}
|
||||
|
||||
void renameFile(const Path & oldName, const Path & newName)
|
||||
{
|
||||
fs::rename(oldName, newName);
|
||||
}
|
||||
|
||||
void moveFile(const Path & oldName, const Path & newName)
|
||||
{
|
||||
try {
|
||||
renameFile(oldName, newName);
|
||||
} catch (fs::filesystem_error & e) {
|
||||
auto oldPath = fs::path(oldName);
|
||||
auto newPath = fs::path(newName);
|
||||
// For the move to be as atomic as possible, copy to a temporary
|
||||
// directory
|
||||
fs::path temp = createTempDir(newPath.parent_path(), "rename-tmp");
|
||||
Finally removeTemp = [&]() { fs::remove(temp); };
|
||||
auto tempCopyTarget = temp / "copy-target";
|
||||
if (e.code().value() == EXDEV) {
|
||||
fs::remove(newPath);
|
||||
warn("Can’t rename %s as %s, copying instead", oldName, newName);
|
||||
copy(fs::directory_entry(oldPath), tempCopyTarget, true);
|
||||
renameFile(tempCopyTarget, newPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -6,7 +6,8 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
void toJSON(std::ostream & str, const char * start, const char * end)
|
||||
template<>
|
||||
void toJSON<std::string_view>(std::ostream & str, const std::string_view & s)
|
||||
{
|
||||
constexpr size_t BUF_SIZE = 4096;
|
||||
char buf[BUF_SIZE + 7]; // BUF_SIZE + largest single sequence of puts
|
||||
|
@ -21,7 +22,7 @@ void toJSON(std::ostream & str, const char * start, const char * end)
|
|||
};
|
||||
|
||||
put('"');
|
||||
for (auto i = start; i != end; i++) {
|
||||
for (auto i = s.begin(); i != s.end(); i++) {
|
||||
if (bufPos >= BUF_SIZE) flush();
|
||||
if (*i == '\"' || *i == '\\') { put('\\'); put(*i); }
|
||||
else if (*i == '\n') { put('\\'); put('n'); }
|
||||
|
@ -44,7 +45,7 @@ void toJSON(std::ostream & str, const char * start, const char * end)
|
|||
|
||||
void toJSON(std::ostream & str, const char * s)
|
||||
{
|
||||
if (!s) str << "null"; else toJSON(str, s, s + strlen(s));
|
||||
if (!s) str << "null"; else toJSON(str, std::string_view(s));
|
||||
}
|
||||
|
||||
template<> void toJSON<int>(std::ostream & str, const int & n) { str << n; }
|
||||
|
@ -55,11 +56,7 @@ template<> void toJSON<long long>(std::ostream & str, const long long & n) { str
|
|||
template<> void toJSON<unsigned long long>(std::ostream & str, const unsigned long long & n) { str << n; }
|
||||
template<> void toJSON<float>(std::ostream & str, const float & n) { str << n; }
|
||||
template<> void toJSON<double>(std::ostream & str, const double & n) { str << n; }
|
||||
|
||||
template<> void toJSON<std::string>(std::ostream & str, const std::string & s)
|
||||
{
|
||||
toJSON(str, s.c_str(), s.c_str() + s.size());
|
||||
}
|
||||
template<> void toJSON<std::string>(std::ostream & str, const std::string & s) { toJSON(str, (std::string_view) s); }
|
||||
|
||||
template<> void toJSON<bool>(std::ostream & str, const bool & b)
|
||||
{
|
||||
|
@ -154,7 +151,7 @@ JSONObject::~JSONObject()
|
|||
}
|
||||
}
|
||||
|
||||
void JSONObject::attr(const std::string & s)
|
||||
void JSONObject::attr(std::string_view s)
|
||||
{
|
||||
comma();
|
||||
toJSON(state->str, s);
|
||||
|
@ -162,19 +159,19 @@ void JSONObject::attr(const std::string & s)
|
|||
if (state->indent) state->str << ' ';
|
||||
}
|
||||
|
||||
JSONList JSONObject::list(const std::string & name)
|
||||
JSONList JSONObject::list(std::string_view name)
|
||||
{
|
||||
attr(name);
|
||||
return JSONList(state);
|
||||
}
|
||||
|
||||
JSONObject JSONObject::object(const std::string & name)
|
||||
JSONObject JSONObject::object(std::string_view name)
|
||||
{
|
||||
attr(name);
|
||||
return JSONObject(state);
|
||||
}
|
||||
|
||||
JSONPlaceholder JSONObject::placeholder(const std::string & name)
|
||||
JSONPlaceholder JSONObject::placeholder(std::string_view name)
|
||||
{
|
||||
attr(name);
|
||||
return JSONPlaceholder(state);
|
||||
|
@ -196,7 +193,11 @@ JSONObject JSONPlaceholder::object()
|
|||
|
||||
JSONPlaceholder::~JSONPlaceholder()
|
||||
{
|
||||
assert(!first || std::uncaught_exceptions());
|
||||
if (first) {
|
||||
assert(std::uncaught_exceptions());
|
||||
if (state->stack != 0)
|
||||
write(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
void toJSON(std::ostream & str, const char * start, const char * end);
|
||||
void toJSON(std::ostream & str, const char * s);
|
||||
|
||||
template<typename T>
|
||||
|
@ -107,7 +106,7 @@ private:
|
|||
open();
|
||||
}
|
||||
|
||||
void attr(const std::string & s);
|
||||
void attr(std::string_view s);
|
||||
|
||||
public:
|
||||
|
||||
|
@ -128,18 +127,18 @@ public:
|
|||
~JSONObject();
|
||||
|
||||
template<typename T>
|
||||
JSONObject & attr(const std::string & name, const T & v)
|
||||
JSONObject & attr(std::string_view name, const T & v)
|
||||
{
|
||||
attr(name);
|
||||
toJSON(state->str, v);
|
||||
return *this;
|
||||
}
|
||||
|
||||
JSONList list(const std::string & name);
|
||||
JSONList list(std::string_view name);
|
||||
|
||||
JSONObject object(const std::string & name);
|
||||
JSONObject object(std::string_view name);
|
||||
|
||||
JSONPlaceholder placeholder(const std::string & name);
|
||||
JSONPlaceholder placeholder(std::string_view name);
|
||||
};
|
||||
|
||||
class JSONPlaceholder : JSONWriter
|
||||
|
|
|
@ -111,6 +111,9 @@ public:
|
|||
|
||||
virtual std::optional<char> ask(std::string_view s)
|
||||
{ return {}; }
|
||||
|
||||
virtual void setPrintBuildLogs(bool printBuildLogs)
|
||||
{ }
|
||||
};
|
||||
|
||||
ActivityId getCurActivity();
|
||||
|
|
|
@ -48,24 +48,9 @@ FdSink::~FdSink()
|
|||
}
|
||||
|
||||
|
||||
size_t threshold = 256 * 1024 * 1024;
|
||||
|
||||
static void warnLargeDump()
|
||||
{
|
||||
warn("dumping very large path (> 256 MiB); this may run out of memory");
|
||||
}
|
||||
|
||||
|
||||
void FdSink::write(std::string_view data)
|
||||
{
|
||||
written += data.size();
|
||||
static bool warned = false;
|
||||
if (warn && !warned) {
|
||||
if (written > threshold) {
|
||||
warnLargeDump();
|
||||
warned = true;
|
||||
}
|
||||
}
|
||||
try {
|
||||
writeFull(fd, data);
|
||||
} catch (SysError & e) {
|
||||
|
@ -448,11 +433,6 @@ Error readError(Source & source)
|
|||
|
||||
void StringSink::operator () (std::string_view data)
|
||||
{
|
||||
static bool warned = false;
|
||||
if (!warned && s.size() > threshold) {
|
||||
warnLargeDump();
|
||||
warned = true;
|
||||
}
|
||||
s.append(data);
|
||||
}
|
||||
|
||||
|
|
|
@ -97,19 +97,17 @@ protected:
|
|||
struct FdSink : BufferedSink
|
||||
{
|
||||
int fd;
|
||||
bool warn = false;
|
||||
size_t written = 0;
|
||||
|
||||
FdSink() : fd(-1) { }
|
||||
FdSink(int fd) : fd(fd) { }
|
||||
FdSink(FdSink&&) = default;
|
||||
|
||||
FdSink& operator=(FdSink && s)
|
||||
FdSink & operator=(FdSink && s)
|
||||
{
|
||||
flush();
|
||||
fd = s.fd;
|
||||
s.fd = -1;
|
||||
warn = s.warn;
|
||||
written = s.written;
|
||||
return *this;
|
||||
}
|
||||
|
|
|
@ -102,8 +102,8 @@ namespace nix {
|
|||
|
||||
TEST(toJSON, substringEscape) {
|
||||
std::stringstream out;
|
||||
const char *s = "foo\t";
|
||||
toJSON(out, s+3, s + strlen(s));
|
||||
std::string_view s = "foo\t";
|
||||
toJSON(out, s.substr(3));
|
||||
|
||||
ASSERT_EQ(out.str(), "\"\\t\"");
|
||||
}
|
||||
|
|
|
@ -35,6 +35,9 @@
|
|||
#ifdef __linux__
|
||||
#include <sys/prctl.h>
|
||||
#include <sys/resource.h>
|
||||
|
||||
#include <mntent.h>
|
||||
#include <cmath>
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -350,7 +353,7 @@ void readFile(const Path & path, Sink & sink)
|
|||
}
|
||||
|
||||
|
||||
void writeFile(const Path & path, std::string_view s, mode_t mode)
|
||||
void writeFile(const Path & path, std::string_view s, mode_t mode, bool sync)
|
||||
{
|
||||
AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode);
|
||||
if (!fd)
|
||||
|
@ -361,10 +364,16 @@ void writeFile(const Path & path, std::string_view s, mode_t mode)
|
|||
e.addTrace({}, "writing file '%1%'", path);
|
||||
throw;
|
||||
}
|
||||
if (sync)
|
||||
fd.fsync();
|
||||
// Explicitly close to make sure exceptions are propagated.
|
||||
fd.close();
|
||||
if (sync)
|
||||
syncParent(path);
|
||||
}
|
||||
|
||||
|
||||
void writeFile(const Path & path, Source & source, mode_t mode)
|
||||
void writeFile(const Path & path, Source & source, mode_t mode, bool sync)
|
||||
{
|
||||
AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode);
|
||||
if (!fd)
|
||||
|
@ -383,6 +392,20 @@ void writeFile(const Path & path, Source & source, mode_t mode)
|
|||
e.addTrace({}, "writing file '%1%'", path);
|
||||
throw;
|
||||
}
|
||||
if (sync)
|
||||
fd.fsync();
|
||||
// Explicitly close to make sure exceptions are propagated.
|
||||
fd.close();
|
||||
if (sync)
|
||||
syncParent(path);
|
||||
}
|
||||
|
||||
void syncParent(const Path & path)
|
||||
{
|
||||
AutoCloseFD fd = open(dirOf(path).c_str(), O_RDONLY, 0);
|
||||
if (!fd)
|
||||
throw SysError("opening file '%1%'", path);
|
||||
fd.fsync();
|
||||
}
|
||||
|
||||
std::string readLine(int fd)
|
||||
|
@ -505,61 +528,6 @@ void deletePath(const Path & path, uint64_t & bytesFreed)
|
|||
}
|
||||
|
||||
|
||||
static Path tempName(Path tmpRoot, const Path & prefix, bool includePid,
|
||||
int & counter)
|
||||
{
|
||||
tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR").value_or("/tmp") : tmpRoot, true);
|
||||
if (includePid)
|
||||
return (format("%1%/%2%-%3%-%4%") % tmpRoot % prefix % getpid() % counter++).str();
|
||||
else
|
||||
return (format("%1%/%2%-%3%") % tmpRoot % prefix % counter++).str();
|
||||
}
|
||||
|
||||
|
||||
Path createTempDir(const Path & tmpRoot, const Path & prefix,
|
||||
bool includePid, bool useGlobalCounter, mode_t mode)
|
||||
{
|
||||
static int globalCounter = 0;
|
||||
int localCounter = 0;
|
||||
int & counter(useGlobalCounter ? globalCounter : localCounter);
|
||||
|
||||
while (1) {
|
||||
checkInterrupt();
|
||||
Path tmpDir = tempName(tmpRoot, prefix, includePid, counter);
|
||||
if (mkdir(tmpDir.c_str(), mode) == 0) {
|
||||
#if __FreeBSD__
|
||||
/* Explicitly set the group of the directory. This is to
|
||||
work around around problems caused by BSD's group
|
||||
ownership semantics (directories inherit the group of
|
||||
the parent). For instance, the group of /tmp on
|
||||
FreeBSD is "wheel", so all directories created in /tmp
|
||||
will be owned by "wheel"; but if the user is not in
|
||||
"wheel", then "tar" will fail to unpack archives that
|
||||
have the setgid bit set on directories. */
|
||||
if (chown(tmpDir.c_str(), (uid_t) -1, getegid()) != 0)
|
||||
throw SysError("setting group of directory '%1%'", tmpDir);
|
||||
#endif
|
||||
return tmpDir;
|
||||
}
|
||||
if (errno != EEXIST)
|
||||
throw SysError("creating directory '%1%'", tmpDir);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
std::pair<AutoCloseFD, Path> createTempFile(const Path & prefix)
|
||||
{
|
||||
Path tmpl(getEnv("TMPDIR").value_or("/tmp") + "/" + prefix + ".XXXXXX");
|
||||
// Strictly speaking, this is UB, but who cares...
|
||||
// FIXME: use O_TMPFILE.
|
||||
AutoCloseFD fd(mkstemp((char *) tmpl.c_str()));
|
||||
if (!fd)
|
||||
throw SysError("creating temporary file '%s'", tmpl);
|
||||
closeOnExec(fd.get());
|
||||
return {std::move(fd), tmpl};
|
||||
}
|
||||
|
||||
|
||||
std::string getUserName()
|
||||
{
|
||||
auto pw = getpwuid(geteuid());
|
||||
|
@ -574,6 +542,7 @@ Path getHome()
|
|||
{
|
||||
static Path homeDir = []()
|
||||
{
|
||||
std::optional<std::string> unownedUserHomeDir = {};
|
||||
auto homeDir = getEnv("HOME");
|
||||
if (homeDir) {
|
||||
// Only use $HOME if doesn't exist or is owned by the current user.
|
||||
|
@ -585,8 +554,7 @@ Path getHome()
|
|||
homeDir.reset();
|
||||
}
|
||||
} else if (st.st_uid != geteuid()) {
|
||||
warn("$HOME ('%s') is not owned by you, falling back to the one defined in the 'passwd' file", *homeDir);
|
||||
homeDir.reset();
|
||||
unownedUserHomeDir.swap(homeDir);
|
||||
}
|
||||
}
|
||||
if (!homeDir) {
|
||||
|
@ -597,6 +565,9 @@ Path getHome()
|
|||
|| !pw || !pw->pw_dir || !pw->pw_dir[0])
|
||||
throw Error("cannot determine user's home directory");
|
||||
homeDir = pw->pw_dir;
|
||||
if (unownedUserHomeDir.has_value() && unownedUserHomeDir != homeDir) {
|
||||
warn("$HOME ('%s') is not owned by you, falling back to the one defined in the 'passwd' file ('%s')", *unownedUserHomeDir, *homeDir);
|
||||
}
|
||||
}
|
||||
return *homeDir;
|
||||
}();
|
||||
|
@ -678,44 +649,6 @@ Paths createDirs(const Path & path)
|
|||
}
|
||||
|
||||
|
||||
void createSymlink(const Path & target, const Path & link,
|
||||
std::optional<time_t> mtime)
|
||||
{
|
||||
if (symlink(target.c_str(), link.c_str()))
|
||||
throw SysError("creating symlink from '%1%' to '%2%'", link, target);
|
||||
if (mtime) {
|
||||
struct timeval times[2];
|
||||
times[0].tv_sec = *mtime;
|
||||
times[0].tv_usec = 0;
|
||||
times[1].tv_sec = *mtime;
|
||||
times[1].tv_usec = 0;
|
||||
if (lutimes(link.c_str(), times))
|
||||
throw SysError("setting time of symlink '%s'", link);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void replaceSymlink(const Path & target, const Path & link,
|
||||
std::optional<time_t> mtime)
|
||||
{
|
||||
for (unsigned int n = 0; true; n++) {
|
||||
Path tmp = canonPath(fmt("%s/.%d_%s", dirOf(link), n, baseNameOf(link)));
|
||||
|
||||
try {
|
||||
createSymlink(target, tmp, mtime);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo == EEXIST) continue;
|
||||
throw;
|
||||
}
|
||||
|
||||
if (rename(tmp.c_str(), link.c_str()) != 0)
|
||||
throw SysError("renaming '%1%' to '%2%'", tmp, link);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void readFull(int fd, char * buf, size_t count)
|
||||
{
|
||||
while (count) {
|
||||
|
@ -788,7 +721,55 @@ void drainFD(int fd, Sink & sink, bool block)
|
|||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
unsigned int getMaxCPU()
|
||||
{
|
||||
#if __linux__
|
||||
try {
|
||||
FILE *fp = fopen("/proc/mounts", "r");
|
||||
if (!fp)
|
||||
return 0;
|
||||
|
||||
Strings cgPathParts;
|
||||
|
||||
struct mntent *ent;
|
||||
while ((ent = getmntent(fp))) {
|
||||
std::string mountType, mountPath;
|
||||
|
||||
mountType = ent->mnt_type;
|
||||
mountPath = ent->mnt_dir;
|
||||
|
||||
if (mountType == "cgroup2") {
|
||||
cgPathParts.push_back(mountPath);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
|
||||
if (cgPathParts.size() > 0 && pathExists("/proc/self/cgroup")) {
|
||||
std::string currentCgroup = readFile("/proc/self/cgroup");
|
||||
Strings cgValues = tokenizeString<Strings>(currentCgroup, ":");
|
||||
cgPathParts.push_back(trim(cgValues.back(), "\n"));
|
||||
cgPathParts.push_back("cpu.max");
|
||||
std::string fullCgPath = canonPath(concatStringsSep("/", cgPathParts));
|
||||
|
||||
if (pathExists(fullCgPath)) {
|
||||
std::string cpuMax = readFile(fullCgPath);
|
||||
std::vector<std::string> cpuMaxParts = tokenizeString<std::vector<std::string>>(cpuMax, " ");
|
||||
std::string quota = cpuMaxParts[0];
|
||||
std::string period = trim(cpuMaxParts[1], "\n");
|
||||
|
||||
if (quota != "max")
|
||||
return std::ceil(std::stoi(quota) / std::stof(period));
|
||||
}
|
||||
}
|
||||
} catch (Error &) { ignoreException(); }
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
@ -880,6 +861,20 @@ void AutoCloseFD::close()
|
|||
}
|
||||
}
|
||||
|
||||
void AutoCloseFD::fsync()
|
||||
{
|
||||
if (fd != -1) {
|
||||
int result;
|
||||
#if __APPLE__
|
||||
result = ::fcntl(fd, F_FULLFSYNC);
|
||||
#else
|
||||
result = ::fsync(fd);
|
||||
#endif
|
||||
if (result == -1)
|
||||
throw SysError("fsync file descriptor %1%", fd);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
AutoCloseFD::operator bool() const
|
||||
{
|
||||
|
|
|
@ -115,9 +115,12 @@ std::string readFile(const Path & path);
|
|||
void readFile(const Path & path, Sink & sink);
|
||||
|
||||
/* Write a string to a file. */
|
||||
void writeFile(const Path & path, std::string_view s, mode_t mode = 0666);
|
||||
void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, bool sync = false);
|
||||
|
||||
void writeFile(const Path & path, Source & source, mode_t mode = 0666);
|
||||
void writeFile(const Path & path, Source & source, mode_t mode = 0666, bool sync = false);
|
||||
|
||||
/* Flush a file's parent directory to disk */
|
||||
void syncParent(const Path & path);
|
||||
|
||||
/* Read a line from a file descriptor. */
|
||||
std::string readLine(int fd);
|
||||
|
@ -168,6 +171,17 @@ void createSymlink(const Path & target, const Path & link,
|
|||
void replaceSymlink(const Path & target, const Path & link,
|
||||
std::optional<time_t> mtime = {});
|
||||
|
||||
void renameFile(const Path & src, const Path & dst);
|
||||
|
||||
/**
|
||||
* Similar to 'renameFile', but fallback to a copy+remove if `src` and `dst`
|
||||
* are on a different filesystem.
|
||||
*
|
||||
* Beware that this might not be atomic because of the copy that happens behind
|
||||
* the scenes
|
||||
*/
|
||||
void moveFile(const Path & src, const Path & dst);
|
||||
|
||||
|
||||
/* Wrappers arount read()/write() that read/write exactly the
|
||||
requested number of bytes. */
|
||||
|
@ -182,6 +196,9 @@ std::string drainFD(int fd, bool block = true, const size_t reserveSize=0);
|
|||
|
||||
void drainFD(int fd, Sink & sink, bool block = true);
|
||||
|
||||
/* If cgroups are active, attempt to calculate the number of CPUs available.
|
||||
If cgroups are unavailable or if cpu.max is set to "max", return 0. */
|
||||
unsigned int getMaxCPU();
|
||||
|
||||
/* Automatic cleanup of resources. */
|
||||
|
||||
|
@ -217,6 +234,7 @@ public:
|
|||
explicit operator bool() const;
|
||||
int release();
|
||||
void close();
|
||||
void fsync();
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -85,7 +85,6 @@ static void main_nix_build(int argc, char * * argv)
|
|||
Strings attrPaths;
|
||||
Strings left;
|
||||
RepairFlag repair = NoRepair;
|
||||
Path gcRoot;
|
||||
BuildMode buildMode = bmNormal;
|
||||
bool readStdin = false;
|
||||
|
||||
|
@ -167,9 +166,6 @@ static void main_nix_build(int argc, char * * argv)
|
|||
else if (*arg == "--out-link" || *arg == "-o")
|
||||
outLink = getArg(*arg, arg, end);
|
||||
|
||||
else if (*arg == "--add-root")
|
||||
gcRoot = getArg(*arg, arg, end);
|
||||
|
||||
else if (*arg == "--dry-run")
|
||||
dryRun = true;
|
||||
|
||||
|
@ -401,7 +397,7 @@ static void main_nix_build(int argc, char * * argv)
|
|||
auto bashDrv = drv->requireDrvPath();
|
||||
pathsToBuild.push_back(DerivedPath::Built {
|
||||
.drvPath = bashDrv,
|
||||
.outputs = {},
|
||||
.outputs = {"out"},
|
||||
});
|
||||
pathsToCopy.insert(bashDrv);
|
||||
shellDrv = bashDrv;
|
||||
|
|
|
@ -940,12 +940,12 @@ static void queryJSON(Globals & globals, std::vector<DrvInfo> & elems, bool prin
|
|||
JSONObject metaObj = pkgObj.object("meta");
|
||||
StringSet metaNames = i.queryMetaNames();
|
||||
for (auto & j : metaNames) {
|
||||
auto placeholder = metaObj.placeholder(j);
|
||||
Value * v = i.queryMeta(j);
|
||||
if (!v) {
|
||||
printError("derivation '%s' has invalid meta attribute '%s'", i.queryName(), j);
|
||||
placeholder.write(nullptr);
|
||||
metaObj.attr(j, nullptr);
|
||||
} else {
|
||||
auto placeholder = metaObj.placeholder(j);
|
||||
PathSet context;
|
||||
printValueAsJSON(*globals.state, true, *v, noPos, placeholder, context);
|
||||
}
|
||||
|
|
|
@ -52,9 +52,10 @@ void processExpr(EvalState & state, const Strings & attrPaths,
|
|||
state.autoCallFunction(autoArgs, v, vRes);
|
||||
if (output == okXML)
|
||||
printValueAsXML(state, strict, location, vRes, std::cout, context, noPos);
|
||||
else if (output == okJSON)
|
||||
else if (output == okJSON) {
|
||||
printValueAsJSON(state, strict, vRes, v.determinePos(noPos), std::cout, context);
|
||||
else {
|
||||
std::cout << std::endl;
|
||||
} else {
|
||||
if (strict) state.forceValueDeep(vRes);
|
||||
vRes.print(state.symbols, std::cout);
|
||||
std::cout << std::endl;
|
||||
|
|
|
@ -922,7 +922,7 @@ static void opServe(Strings opFlags, Strings opArgs)
|
|||
|
||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 3)
|
||||
out << status.timesBuilt << status.isNonDeterministic << status.startTime << status.stopTime;
|
||||
if (GET_PROTOCOL_MINOR(clientVersion >= 6)) {
|
||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 6) {
|
||||
worker_proto::write(*store, out, status.builtOutputs);
|
||||
}
|
||||
|
||||
|
|
|
@ -66,7 +66,9 @@ UnresolvedApp Installable::toApp(EvalState & state)
|
|||
|
||||
auto type = cursor->getAttr("type")->getString();
|
||||
|
||||
std::string expected = !attrPath.empty() && state.symbols[attrPath[0]] == "apps" ? "app" : "derivation";
|
||||
std::string expected = !attrPath.empty() &&
|
||||
(state.symbols[attrPath[0]] == "apps" || state.symbols[attrPath[0]] == "defaultApp")
|
||||
? "app" : "derivation";
|
||||
if (type != expected)
|
||||
throw Error("attribute '%s' should have type '%s'", cursor->getAttrPathStr(), expected);
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ flake output attributes:
|
|||
|
||||
* `bundlers.<system>.default`
|
||||
|
||||
If an attribute *name* is given, `nix run` tries the following flake
|
||||
If an attribute *name* is given, `nix bundle` tries the following flake
|
||||
output attributes:
|
||||
|
||||
* `bundlers.<system>.<name>`
|
||||
|
|
|
@ -246,6 +246,7 @@ struct Common : InstallableCommand, MixProfile
|
|||
"NIX_LOG_FD",
|
||||
"NIX_REMOTE",
|
||||
"PPID",
|
||||
"SHELL",
|
||||
"SHELLOPTS",
|
||||
"SSL_CERT_FILE", // FIXME: only want to ignore /no-cert-file.crt
|
||||
"TEMP",
|
||||
|
@ -288,8 +289,10 @@ struct Common : InstallableCommand, MixProfile
|
|||
|
||||
out << "unset shellHook\n";
|
||||
|
||||
for (auto & var : savedVars)
|
||||
for (auto & var : savedVars) {
|
||||
out << fmt("%s=${%s:-}\n", var, var);
|
||||
out << fmt("nix_saved_%s=\"$%s\"\n", var, var);
|
||||
}
|
||||
|
||||
buildEnvironment.toBash(out, ignoreVars);
|
||||
|
||||
|
|
|
@ -66,6 +66,12 @@ R""(
|
|||
`nixpkgs#glibc` in `~/my-glibc` and want to compile another package
|
||||
against it.
|
||||
|
||||
* Run a series of script commands:
|
||||
|
||||
```console
|
||||
# nix develop --command bash -c "mkdir build && cmake .. && make"
|
||||
```
|
||||
|
||||
# Description
|
||||
|
||||
`nix develop` starts a `bash` shell that provides an interactive build
|
||||
|
|
|
@ -116,7 +116,8 @@ struct CmdEval : MixJSON, InstallableCommand
|
|||
|
||||
else if (json) {
|
||||
JSONPlaceholder jsonOut(std::cout);
|
||||
printValueAsJSON(*state, true, *v, pos, jsonOut, context);
|
||||
printValueAsJSON(*state, true, *v, pos, jsonOut, context, false);
|
||||
std::cout << std::endl;
|
||||
}
|
||||
|
||||
else {
|
||||
|
|
|
@ -6,7 +6,7 @@ R""(
|
|||
lock file:
|
||||
|
||||
```console
|
||||
# nix flake update
|
||||
# nix flake update --commit-lock-file
|
||||
* Updated 'nix': 'github:NixOS/nix/9fab14adbc3810d5cc1f88672fde1eee4358405c' -> 'github:NixOS/nix/8927cba62f5afb33b01016d5c4f7f8b7d0adde3c'
|
||||
* Updated 'nixpkgs': 'github:NixOS/nixpkgs/3d2d8f281a27d466fa54b469b5993f7dde198375' -> 'github:NixOS/nixpkgs/a3a3dda3bacf61e8a39258a0ed9c924eeca8e293'
|
||||
…
|
||||
|
|
|
@ -212,7 +212,8 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
|
|||
ANSI_BOLD "Last modified:" ANSI_NORMAL " %s",
|
||||
std::put_time(std::localtime(&*lastModified), "%F %T"));
|
||||
|
||||
logger->cout(ANSI_BOLD "Inputs:" ANSI_NORMAL);
|
||||
if (!lockedFlake.lockFile.root->inputs.empty())
|
||||
logger->cout(ANSI_BOLD "Inputs:" ANSI_NORMAL);
|
||||
|
||||
std::unordered_set<std::shared_ptr<Node>> visited;
|
||||
|
||||
|
|
|
@ -43,6 +43,7 @@ __dumpEnv() {
|
|||
local __var_name="${BASH_REMATCH[2]}"
|
||||
|
||||
if [[ $__var_name =~ ^BASH_ || \
|
||||
$__var_name =~ ^COMP_ || \
|
||||
$__var_name = _ || \
|
||||
$__var_name = DIRSTACK || \
|
||||
$__var_name = EUID || \
|
||||
|
@ -54,7 +55,9 @@ __dumpEnv() {
|
|||
$__var_name = PWD || \
|
||||
$__var_name = RANDOM || \
|
||||
$__var_name = SHLVL || \
|
||||
$__var_name = SECONDS \
|
||||
$__var_name = SECONDS || \
|
||||
$__var_name = EPOCHREALTIME || \
|
||||
$__var_name = EPOCHSECONDS \
|
||||
]]; then continue; fi
|
||||
|
||||
if [[ -z $__first ]]; then printf ',\n'; else __first=; fi
|
||||
|
|
|
@ -74,6 +74,7 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs
|
|||
addFlag({
|
||||
.longName = "help",
|
||||
.description = "Show usage information.",
|
||||
.category = miscCategory,
|
||||
.handler = {[&]() { throw HelpRequested(); }},
|
||||
});
|
||||
|
||||
|
@ -82,12 +83,13 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs
|
|||
.shortName = 'L',
|
||||
.description = "Print full build logs on standard error.",
|
||||
.category = loggingCategory,
|
||||
.handler = {[&]() {setLogFormat(LogFormat::barWithLogs); }},
|
||||
.handler = {[&]() { logger->setPrintBuildLogs(true); }},
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "version",
|
||||
.description = "Show version information.",
|
||||
.category = miscCategory,
|
||||
.handler = {[&]() { showVersion = true; }},
|
||||
});
|
||||
|
||||
|
@ -95,12 +97,14 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs
|
|||
.longName = "offline",
|
||||
.aliases = {"no-net"}, // FIXME: remove
|
||||
.description = "Disable substituters and consider all previously downloaded files up-to-date.",
|
||||
.category = miscCategory,
|
||||
.handler = {[&]() { useNet = false; }},
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "refresh",
|
||||
.description = "Consider all previously downloaded files out-of-date.",
|
||||
.category = miscCategory,
|
||||
.handler = {[&]() { refresh = true; }},
|
||||
});
|
||||
}
|
||||
|
@ -187,7 +191,7 @@ static void showHelp(std::vector<std::string> subcommand, MultiCommand & topleve
|
|||
*vUtils);
|
||||
|
||||
auto attrs = state.buildBindings(16);
|
||||
attrs.alloc("command").mkString(toplevel.toJSON().dump());
|
||||
attrs.alloc("toplevel").mkString(toplevel.toJSON().dump());
|
||||
|
||||
auto vRes = state.allocValue();
|
||||
state.callFunction(*vGenerateManpage, state.allocValue()->mkAttrs(attrs), *vRes, noPos);
|
||||
|
@ -266,7 +270,7 @@ void mainWrapped(int argc, char * * argv)
|
|||
programPath = argv[0];
|
||||
auto programName = std::string(baseNameOf(programPath));
|
||||
|
||||
if (argc > 0 && std::string_view(argv[0]) == "__build-remote") {
|
||||
if (argc > 1 && std::string_view(argv[1]) == "__build-remote") {
|
||||
programName = "build-remote";
|
||||
argv++; argc--;
|
||||
}
|
||||
|
@ -325,7 +329,7 @@ void mainWrapped(int argc, char * * argv)
|
|||
std::cout << "attrs\n"; break;
|
||||
}
|
||||
for (auto & s : *completions)
|
||||
std::cout << s.completion << "\t" << s.description << "\n";
|
||||
std::cout << s.completion << "\t" << trim(s.description) << "\n";
|
||||
}
|
||||
});
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ R""(
|
|||
|
||||
```console
|
||||
# nix copy --to /tmp/nix --trusted-public-keys '' nixpkgs#hello
|
||||
cannot add path '/nix/store/zy9wbxwcygrwnh8n2w9qbbcr6zk87m26-libunistring-0.9.10' because it lacks a valid signature
|
||||
cannot add path '/nix/store/zy9wbxwcygrwnh8n2w9qbbcr6zk87m26-libunistring-0.9.10' because it lacks a signature by a trusted key
|
||||
```
|
||||
|
||||
* Create a content-addressed representation of the current NixOS
|
||||
|
|
39
src/nix/path-from-hash-part.cc
Normal file
39
src/nix/path-from-hash-part.cc
Normal file
|
@ -0,0 +1,39 @@
|
|||
#include "command.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
using namespace nix;
|
||||
|
||||
struct CmdPathFromHashPart : StoreCommand
|
||||
{
|
||||
std::string hashPart;
|
||||
|
||||
CmdPathFromHashPart()
|
||||
{
|
||||
expectArgs({
|
||||
.label = "hash-part",
|
||||
.handler = {&hashPart},
|
||||
});
|
||||
}
|
||||
|
||||
std::string description() override
|
||||
{
|
||||
return "get a store path from its hash part";
|
||||
}
|
||||
|
||||
std::string doc() override
|
||||
{
|
||||
return
|
||||
#include "path-from-hash-part.md"
|
||||
;
|
||||
}
|
||||
|
||||
void run(ref<Store> store) override
|
||||
{
|
||||
if (auto storePath = store->queryPathFromHashPart(hashPart))
|
||||
logger->cout(store->printStorePath(*storePath));
|
||||
else
|
||||
throw Error("there is no store path corresponding to '%s'", hashPart);
|
||||
}
|
||||
};
|
||||
|
||||
static auto rCmdPathFromHashPart = registerCommand2<CmdPathFromHashPart>({"store", "path-from-hash-part"});
|
20
src/nix/path-from-hash-part.md
Normal file
20
src/nix/path-from-hash-part.md
Normal file
|
@ -0,0 +1,20 @@
|
|||
R""(
|
||||
|
||||
# Examples
|
||||
|
||||
* Return the full store path with the given hash part:
|
||||
|
||||
```console
|
||||
# nix store path-from-hash-part --store https://cache.nixos.org/ 0i2jd68mp5g6h2sa5k9c85rb80sn8hi9
|
||||
/nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10
|
||||
```
|
||||
|
||||
# Description
|
||||
|
||||
Given the hash part of a store path (that is, the 32 characters
|
||||
following `/nix/store/`), return the full store path. This is
|
||||
primarily useful in the implementation of binary caches, where a
|
||||
request for a `.narinfo` file only supplies the hash part
|
||||
(e.g. `https://cache.nixos.org/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9.narinfo`).
|
||||
|
||||
)""
|
|
@ -11,7 +11,7 @@ them to be rolled back easily.
|
|||
|
||||
The default profile used by `nix profile` is `$HOME/.nix-profile`,
|
||||
which, if it does not exist, is created as a symlink to
|
||||
`/nix/var/nix/profiles/per-user/default` if Nix is invoked by the
|
||||
`/nix/var/nix/profiles/default` if Nix is invoked by the
|
||||
`root` user, or `/nix/var/nix/profiles/per-user/`*username* otherwise.
|
||||
|
||||
You can specify another profile location using `--profile` *path*.
|
||||
|
|
|
@ -36,7 +36,7 @@ R""(
|
|||
Loading Installable ''...
|
||||
Added 1 variables.
|
||||
|
||||
# nix repl --extra_experimental_features 'flakes repl-flake' nixpkgs
|
||||
# nix repl --extra-experimental-features 'flakes repl-flake' nixpkgs
|
||||
Loading Installable 'flake:nixpkgs#'...
|
||||
Added 5 variables.
|
||||
|
||||
|
|
|
@ -23,6 +23,12 @@ R""(
|
|||
Hi everybody!
|
||||
```
|
||||
|
||||
* Run multiple commands in a shell environment:
|
||||
|
||||
```console
|
||||
# nix shell nixpkgs#gnumake -c sh -c "cd src && make"
|
||||
```
|
||||
|
||||
* Run GNU Hello in a chroot store:
|
||||
|
||||
```console
|
||||
|
|
|
@ -41,7 +41,7 @@ struct CmdVerify : StorePathsCommand
|
|||
addFlag({
|
||||
.longName = "sigs-needed",
|
||||
.shortName = 'n',
|
||||
.description = "Require that each path has at least *n* valid signatures.",
|
||||
.description = "Require that each path is signed by at least *n* different keys.",
|
||||
.labels = {"n"},
|
||||
.handler = {&sigsNeeded}
|
||||
});
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue