1
0
Fork 0
mirror of https://github.com/NixOS/nix synced 2025-06-28 22:01:15 +02:00

Merge remote-tracking branch 'origin/master' into fsync-store-paths

This commit is contained in:
Eelco Dolstra 2024-08-21 16:37:21 +02:00
commit e049d38290
2136 changed files with 102665 additions and 49570 deletions

1
src/libstore/.version Symbolic link
View file

@ -0,0 +1 @@
../../.version

View file

@ -2,7 +2,7 @@
#include "binary-cache-store.hh"
#include "compression.hh"
#include "derivations.hh"
#include "fs-accessor.hh"
#include "source-accessor.hh"
#include "globals.hh"
#include "nar-info.hh"
#include "sync.hh"
@ -11,11 +11,14 @@
#include "nar-accessor.hh"
#include "thread-pool.hh"
#include "callback.hh"
#include "signals.hh"
#include "archive.hh"
#include <chrono>
#include <future>
#include <regex>
#include <fstream>
#include <sstream>
#include <nlohmann/json.hpp>
@ -26,7 +29,8 @@ BinaryCacheStore::BinaryCacheStore(const Params & params)
, Store(params)
{
if (secretKeyFile != "")
secretKey = std::unique_ptr<SecretKey>(new SecretKey(readFile(secretKeyFile)));
signer = std::make_unique<LocalSigner>(
SecretKey { readFile(secretKeyFile) });
StringSink sink;
sink << narVersionMagic1;
@ -121,14 +125,6 @@ void BinaryCacheStore::writeNarInfo(ref<NarInfo> narInfo)
diskCache->upsertNarInfo(getUri(), std::string(narInfo->path.hashPart()), std::shared_ptr<NarInfo>(narInfo));
}
AutoCloseFD openFile(const Path & path)
{
auto fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
if (!fd)
throw SysError("opening file '%1%'", path);
return fd;
}
ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs,
std::function<ValidPathInfo(HashResult)> mkInfo)
@ -142,9 +138,9 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
/* Read the NAR simultaneously into a CompressionSink+FileSink (to
write the compressed NAR to disk), into a HashSink (to get the
NAR hash), and into a NarAccessor (to get the NAR listing). */
HashSink fileHashSink { htSHA256 };
std::shared_ptr<FSAccessor> narAccessor;
HashSink narHashSink { htSHA256 };
HashSink fileHashSink { HashAlgorithm::SHA256 };
std::shared_ptr<SourceAccessor> narAccessor;
HashSink narHashSink { HashAlgorithm::SHA256 };
{
FdSink fileSink(fdTemp.get());
TeeSink teeSinkCompressed { fileSink, fileHashSink };
@ -164,8 +160,8 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
auto [fileHash, fileSize] = fileHashSink.finish();
narInfo->fileHash = fileHash;
narInfo->fileSize = fileSize;
narInfo->url = "nar/" + narInfo->fileHash->to_string(Base32, false) + ".nar"
+ (compression == "xz" ? ".xz" :
narInfo->url = "nar/" + narInfo->fileHash->to_string(HashFormat::Nix32, false) + ".nar"
+ (compression == "xz" ? ".xz" :
compression == "bzip2" ? ".bz2" :
compression == "zstd" ? ".zst" :
compression == "lzip" ? ".lzip" :
@ -195,7 +191,7 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
if (writeNARListing) {
nlohmann::json j = {
{"version", 1},
{"root", listNar(ref<FSAccessor>(narAccessor), "", true)},
{"root", listNar(ref<SourceAccessor>(narAccessor), CanonPath::root, true)},
};
upsertFile(std::string(info.path.hashPart()) + ".ls", j.dump(), "application/json");
@ -206,9 +202,9 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
specify the NAR file and member containing the debug info. */
if (writeDebugInfo) {
std::string buildIdDir = "/lib/debug/.build-id";
CanonPath buildIdDir("lib/debug/.build-id");
if (narAccessor->stat(buildIdDir).type == FSAccessor::tDirectory) {
if (auto st = narAccessor->maybeLstat(buildIdDir); st && st->type == SourceAccessor::tDirectory) {
ThreadPool threadPool(25);
@ -231,17 +227,17 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
std::regex regex1("^[0-9a-f]{2}$");
std::regex regex2("^[0-9a-f]{38}\\.debug$");
for (auto & s1 : narAccessor->readDirectory(buildIdDir)) {
auto dir = buildIdDir + "/" + s1;
for (auto & [s1, _type] : narAccessor->readDirectory(buildIdDir)) {
auto dir = buildIdDir / s1;
if (narAccessor->stat(dir).type != FSAccessor::tDirectory
if (narAccessor->lstat(dir).type != SourceAccessor::tDirectory
|| !std::regex_match(s1, regex1))
continue;
for (auto & s2 : narAccessor->readDirectory(dir)) {
auto debugPath = dir + "/" + s2;
for (auto & [s2, _type] : narAccessor->readDirectory(dir)) {
auto debugPath = dir / s2;
if (narAccessor->stat(debugPath).type != FSAccessor::tRegular
if (narAccessor->lstat(debugPath).type != SourceAccessor::tRegular
|| !std::regex_match(s2, regex2))
continue;
@ -250,7 +246,7 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
std::string key = "debuginfo/" + buildId;
std::string target = "../" + narInfo->url;
threadPool.enqueue(std::bind(doFile, std::string(debugPath, 1), key, target));
threadPool.enqueue(std::bind(doFile, std::string(debugPath.rel()), key, target));
}
}
@ -272,7 +268,7 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
stats.narWriteCompressionTimeMs += duration;
/* Atomically write the NAR info file.*/
if (secretKey) narInfo->sign(*this, *secretKey);
if (signer) narInfo->sign(*this, *signer);
writeNarInfo(narInfo);
@ -299,18 +295,75 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource
}});
}
StorePath BinaryCacheStore::addToStoreFromDump(Source & dump, std::string_view name,
FileIngestionMethod method, HashType hashAlgo, RepairFlag repair, const StorePathSet & references)
StorePath BinaryCacheStore::addToStoreFromDump(
Source & dump,
std::string_view name,
FileSerialisationMethod dumpMethod,
ContentAddressMethod hashMethod,
HashAlgorithm hashAlgo,
const StorePathSet & references,
RepairFlag repair)
{
if (method != FileIngestionMethod::Recursive || hashAlgo != htSHA256)
std::optional<Hash> caHash;
std::string nar;
// Calculating Git hash from NAR stream not yet implemented. May not
// be possible to implement in single-pass if the NAR is in an
// inconvenient order. Could fetch after uploading, however.
if (hashMethod.getFileIngestionMethod() == FileIngestionMethod::Git)
unsupported("addToStoreFromDump");
return addToStoreCommon(dump, repair, CheckSigs, [&](HashResult nar) {
if (auto * dump2p = dynamic_cast<StringSource *>(&dump)) {
auto & dump2 = *dump2p;
// Hack, this gives us a "replayable" source so we can compute
// multiple hashes more easily.
//
// Only calculate if the dump is in the right format, however.
if (static_cast<FileIngestionMethod>(dumpMethod) == hashMethod.getFileIngestionMethod())
caHash = hashString(HashAlgorithm::SHA256, dump2.s);
switch (dumpMethod) {
case FileSerialisationMethod::NixArchive:
// The dump is already NAR in this case, just use it.
nar = dump2.s;
break;
case FileSerialisationMethod::Flat:
{
// The dump is Flat, so we need to convert it to NAR with a
// single file.
StringSink s;
dumpString(dump2.s, s);
nar = std::move(s.s);
break;
}
}
} else {
// Otherwise, we have to do th same hashing as NAR so our single
// hash will suffice for both purposes.
if (dumpMethod != FileSerialisationMethod::NixArchive || hashAlgo != HashAlgorithm::SHA256)
unsupported("addToStoreFromDump");
}
StringSource narDump { nar };
// Use `narDump` if we wrote to `nar`.
Source & narDump2 = nar.size() > 0
? static_cast<Source &>(narDump)
: dump;
return addToStoreCommon(narDump2, repair, CheckSigs, [&](HashResult nar) {
ValidPathInfo info {
makeFixedOutputPath(method, nar.first, name, references),
*this,
name,
ContentAddressWithReferences::fromParts(
hashMethod,
caHash ? *caHash : nar.first,
{
.others = references,
// caller is not capable of creating a self-reference, because this is content-addressed without modulus
.self = false,
}),
nar.first,
};
info.narSize = nar.second;
info.references = references;
return info;
})->path;
}
@ -370,7 +423,7 @@ void BinaryCacheStore::queryPathInfoUncached(const StorePath & storePath,
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
getFile(narInfoFile,
{[=](std::future<std::optional<std::string>> fut) {
{[=,this](std::future<std::optional<std::string>> fut) {
try {
auto data = fut.get();
@ -390,63 +443,37 @@ void BinaryCacheStore::queryPathInfoUncached(const StorePath & storePath,
StorePath BinaryCacheStore::addToStore(
std::string_view name,
const Path & srcPath,
FileIngestionMethod method,
HashType hashAlgo,
const SourcePath & path,
ContentAddressMethod method,
HashAlgorithm hashAlgo,
const StorePathSet & references,
PathFilter & filter,
RepairFlag repair,
const StorePathSet & references)
RepairFlag repair)
{
/* FIXME: Make BinaryCacheStore::addToStoreCommon support
non-recursive+sha256 so we can just use the default
implementation of this method in terms of addToStoreFromDump. */
HashSink sink { hashAlgo };
if (method == FileIngestionMethod::Recursive) {
dumpPath(srcPath, sink, filter);
} else {
readFile(srcPath, sink);
}
auto h = sink.finish().first;
auto h = hashPath(path, method.getFileIngestionMethod(), hashAlgo, filter).first;
auto source = sinkToSource([&](Sink & sink) {
dumpPath(srcPath, sink, filter);
path.dumpPath(sink, filter);
});
return addToStoreCommon(*source, repair, CheckSigs, [&](HashResult nar) {
ValidPathInfo info {
makeFixedOutputPath(method, h, name, references),
*this,
name,
ContentAddressWithReferences::fromParts(
method,
h,
{
.others = references,
// caller is not capable of creating a self-reference, because this is content-addressed without modulus
.self = false,
}),
nar.first,
};
info.narSize = nar.second;
info.references = references;
info.ca = FixedOutputHash {
.method = method,
.hash = h,
};
return info;
})->path;
}
StorePath BinaryCacheStore::addTextToStore(
std::string_view name,
std::string_view s,
const StorePathSet & references,
RepairFlag repair)
{
auto textHash = hashString(htSHA256, s);
auto path = makeTextPath(name, textHash, references);
if (!repair && isValidPath(path))
return path;
StringSink sink;
dumpString(s, sink);
StringSource source(sink.s);
return addToStoreCommon(source, repair, CheckSigs, [&](HashResult nar) {
ValidPathInfo info { path, nar.first };
info.narSize = nar.second;
info.ca = TextHash { textHash };
info.references = references;
return info;
})->path;
}
@ -483,9 +510,9 @@ void BinaryCacheStore::registerDrvOutput(const Realisation& info) {
upsertFile(filePath, info.toJSON().dump(), "application/json");
}
ref<FSAccessor> BinaryCacheStore::getFSAccessor()
ref<SourceAccessor> BinaryCacheStore::getFSAccessor(bool requireValidPath)
{
return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()), localNarCache);
return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()), requireValidPath, localNarCache);
}
void BinaryCacheStore::addSignatures(const StorePath & storePath, const StringSet & sigs)
@ -502,22 +529,9 @@ void BinaryCacheStore::addSignatures(const StorePath & storePath, const StringSe
writeNarInfo(narInfo);
}
std::optional<std::string> BinaryCacheStore::getBuildLog(const StorePath & path)
std::optional<std::string> BinaryCacheStore::getBuildLogExact(const StorePath & path)
{
auto drvPath = path;
if (!path.isDerivation()) {
try {
auto info = queryPathInfo(path);
// FIXME: add a "Log" field to .narinfo
if (!info->deriver) return std::nullopt;
drvPath = *info->deriver;
} catch (InvalidPath &) {
return std::nullopt;
}
}
auto logPath = "log/" + std::string(baseNameOf(printStorePath(drvPath)));
auto logPath = "log/" + std::string(baseNameOf(printStorePath(path)));
debug("fetching build log from binary cache '%s/%s'", getUri(), logPath);

View file

@ -1,6 +1,7 @@
#pragma once
///@file
#include "crypto.hh"
#include "signature/local-keys.hh"
#include "store-api.hh"
#include "log-store.hh"
@ -16,27 +17,47 @@ struct BinaryCacheStoreConfig : virtual StoreConfig
{
using StoreConfig::StoreConfig;
const Setting<std::string> compression{(StoreConfig*) this, "xz", "compression", "NAR compression method ('xz', 'bzip2', 'gzip', 'zstd', or 'none')"};
const Setting<bool> writeNARListing{(StoreConfig*) this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"};
const Setting<bool> writeDebugInfo{(StoreConfig*) this, false, "index-debug-info", "whether to index DWARF debug info files by build ID"};
const Setting<Path> secretKeyFile{(StoreConfig*) this, "", "secret-key", "path to secret key used to sign the binary cache"};
const Setting<Path> localNarCache{(StoreConfig*) this, "", "local-nar-cache", "path to a local cache of NARs"};
const Setting<bool> parallelCompression{(StoreConfig*) this, false, "parallel-compression",
"enable multi-threading compression for NARs, available for xz and zstd only currently"};
const Setting<int> compressionLevel{(StoreConfig*) this, -1, "compression-level",
"specify 'preset level' of compression to be used with NARs: "
"meaning and accepted range of values depends on compression method selected, "
"other than -1 which we reserve to indicate Nix defaults should be used"};
const Setting<std::string> compression{this, "xz", "compression",
"NAR compression method (`xz`, `bzip2`, `gzip`, `zstd`, or `none`)."};
const Setting<bool> writeNARListing{this, false, "write-nar-listing",
"Whether to write a JSON file that lists the files in each NAR."};
const Setting<bool> writeDebugInfo{this, false, "index-debug-info",
R"(
Whether to index DWARF debug info files by build ID. This allows [`dwarffs`](https://github.com/edolstra/dwarffs) to
fetch debug info on demand
)"};
const Setting<Path> secretKeyFile{this, "", "secret-key",
"Path to the secret key used to sign the binary cache."};
const Setting<Path> localNarCache{this, "", "local-nar-cache",
"Path to a local cache of NARs fetched from this binary cache, used by commands such as `nix store cat`."};
const Setting<bool> parallelCompression{this, false, "parallel-compression",
"Enable multi-threaded compression of NARs. This is currently only available for `xz` and `zstd`."};
const Setting<int> compressionLevel{this, -1, "compression-level",
R"(
The *preset level* to be used when compressing NARs.
The meaning and accepted values depend on the compression method selected.
`-1` specifies that the default compression level should be used.
)"};
};
/**
* @note subclasses must implement at least one of the two
* virtual getFile() methods.
*/
class BinaryCacheStore : public virtual BinaryCacheStoreConfig,
public virtual Store,
public virtual LogStore
{
private:
std::unique_ptr<SecretKey> secretKey;
std::unique_ptr<Signer> signer;
protected:
@ -58,14 +79,15 @@ public:
std::string && data,
const std::string & mimeType);
/* Note: subclasses must implement at least one of the two
following getFile() methods. */
/* Dump the contents of the specified file to a sink. */
/**
* Dump the contents of the specified file to a sink.
*/
virtual void getFile(const std::string & path, Sink & sink);
/* Fetch the specified file and call the specified callback with
the result. A subclass may implement this asynchronously. */
/**
* Fetch the specified file and call the specified callback with
* the result. A subclass may implement this asynchronously.
*/
virtual void getFile(
const std::string & path,
Callback<std::optional<std::string>> callback) noexcept;
@ -100,22 +122,22 @@ public:
void addToStore(const ValidPathInfo & info, Source & narSource,
RepairFlag repair, CheckSigsFlag checkSigs) override;
StorePath addToStoreFromDump(Source & dump, std::string_view name,
FileIngestionMethod method, HashType hashAlgo, RepairFlag repair, const StorePathSet & references) override;
StorePath addToStoreFromDump(
Source & dump,
std::string_view name,
FileSerialisationMethod dumpMethod,
ContentAddressMethod hashMethod,
HashAlgorithm hashAlgo,
const StorePathSet & references,
RepairFlag repair) override;
StorePath addToStore(
std::string_view name,
const Path & srcPath,
FileIngestionMethod method,
HashType hashAlgo,
PathFilter & filter,
RepairFlag repair,
const StorePathSet & references) override;
StorePath addTextToStore(
std::string_view name,
std::string_view s,
const SourcePath & path,
ContentAddressMethod method,
HashAlgorithm hashAlgo,
const StorePathSet & references,
PathFilter & filter,
RepairFlag repair) override;
void registerDrvOutput(const Realisation & info) override;
@ -125,11 +147,11 @@ public:
void narFromPath(const StorePath & path, Sink & sink) override;
ref<FSAccessor> getFSAccessor() override;
ref<SourceAccessor> getFSAccessor(bool requireValidPath = true) override;
void addSignatures(const StorePath & storePath, const StringSet & sigs) override;
std::optional<std::string> getBuildLog(const StorePath & path) override;
std::optional<std::string> getBuildLogExact(const StorePath & path) override;
void addBuildLog(const StorePath & drvPath, std::string_view log) override;

View file

@ -0,0 +1,8 @@
#include "build-result.hh"
namespace nix {
bool BuildResult::operator==(const BuildResult &) const noexcept = default;
std::strong_ordering BuildResult::operator<=>(const BuildResult &) const noexcept = default;
}

View file

@ -1,4 +1,5 @@
#pragma once
///@file
#include "realisation.hh"
#include "derived-path.hh"
@ -11,9 +12,12 @@ namespace nix {
struct BuildResult
{
/* Note: don't remove status codes, and only add new status codes
at the end of the list, to prevent client/server
incompatibilities in the nix-store --serve protocol. */
/**
* @note This is directly used in the nix-store --serve protocol.
* That means we need to worry about compatability across versions.
* Therefore, don't remove status codes, and only add new status
* codes at the end of the list.
*/
enum Status {
Built = 0,
Substituted,
@ -21,8 +25,10 @@ struct BuildResult
PermanentFailure,
InputRejected,
OutputRejected,
TransientFailure, // possibly transient
CachedFailure, // no longer used
/// possibly transient
TransientFailure,
/// no longer used
CachedFailure,
TimedOut,
MiscFailure,
DependencyFailed,
@ -32,7 +38,12 @@ struct BuildResult
NoSubstituters,
} status = MiscFailure;
// FIXME: include entire ErrorInfo object.
/**
* Information about the error if the build failed.
*
* @todo This should be an entire ErrorInfo object, not just a
* string, for richer information.
*/
std::string errorMsg;
std::string toString() const {
@ -52,35 +63,46 @@ struct BuildResult
case LogLimitExceeded: return "LogLimitExceeded";
case NotDeterministic: return "NotDeterministic";
case ResolvesToAlreadyValid: return "ResolvesToAlreadyValid";
case NoSubstituters: return "NoSubstituters";
default: return "Unknown";
};
}();
return strStatus + ((errorMsg == "") ? "" : " : " + errorMsg);
}
/* How many times this build was performed. */
/**
* How many times this build was performed.
*/
unsigned int timesBuilt = 0;
/* If timesBuilt > 1, whether some builds did not produce the same
result. (Note that 'isNonDeterministic = false' does not mean
the build is deterministic, just that we don't have evidence of
non-determinism.) */
/**
* If timesBuilt > 1, whether some builds did not produce the same
* result. (Note that 'isNonDeterministic = false' does not mean
* the build is deterministic, just that we don't have evidence of
* non-determinism.)
*/
bool isNonDeterministic = false;
/* The derivation we built or the store path we substituted. */
DerivedPath path;
/**
* For derivations, a mapping from the names of the wanted outputs
* to actual paths.
*/
SingleDrvOutputs builtOutputs;
/* For derivations, a mapping from the names of the wanted outputs
to actual paths. */
DrvOutputs builtOutputs;
/* The start/stop times of the build (or one of the rounds, if it
was repeated). */
/**
* The start/stop times of the build (or one of the rounds, if it
* was repeated).
*/
time_t startTime = 0, stopTime = 0;
/* User and system CPU time the build took. */
/**
* User and system CPU time the build took.
*/
std::optional<std::chrono::microseconds> cpuUser, cpuSystem;
bool operator ==(const BuildResult &) const noexcept;
std::strong_ordering operator <=>(const BuildResult &) const noexcept;
bool success()
{
return status == Built || status == Substituted || status == AlreadyValid || status == ResolvesToAlreadyValid;
@ -92,4 +114,20 @@ struct BuildResult
}
};
/**
* A `BuildResult` together with its "primary key".
*/
struct KeyedBuildResult : BuildResult
{
/**
* The derivation we built or the store path we substituted.
*/
DerivedPath path;
// Hack to work around a gcc "may be used uninitialized" warning.
KeyedBuildResult(BuildResult res, DerivedPath path)
: BuildResult(std::move(res)), path(std::move(path))
{ }
};
}

View file

@ -0,0 +1 @@
../../build-utils-meson

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,11 @@
#pragma once
///@file
#include "parsed-derivations.hh"
#include "lock.hh"
#ifndef _WIN32
# include "user-lock.hh"
#endif
#include "outputs-spec.hh"
#include "store-api.hh"
#include "pathlocks.hh"
#include "goal.hh"
@ -10,12 +14,16 @@ namespace nix {
using std::map;
#ifndef _WIN32 // TODO enable build hook on Windows
struct HookInstance;
#endif
typedef enum {rpAccept, rpDecline, rpPostpone} HookReply;
/* Unless we are repairing, we don't both to test validity and just assume it,
so the choices are `Absent` or `Valid`. */
/**
* Unless we are repairing, we don't both to test validity and just assume it,
* so the choices are `Absent` or `Valid`.
*/
enum struct PathStatus {
Corrupt,
Absent,
@ -25,11 +33,15 @@ enum struct PathStatus {
struct InitialOutputStatus {
StorePath path;
PathStatus status;
/* Valid in the store, and additionally non-corrupt if we are repairing */
/**
* Valid in the store, and additionally non-corrupt if we are repairing
*/
bool isValid() const {
return status == PathStatus::Valid;
}
/* Merely present, allowed to be corrupt */
/**
* Merely present, allowed to be corrupt
*/
bool isPresent() const {
return status == PathStatus::Corrupt
|| status == PathStatus::Valid;
@ -42,61 +54,127 @@ struct InitialOutput {
std::optional<InitialOutputStatus> known;
};
/**
* A goal for building some or all of the outputs of a derivation.
*/
struct DerivationGoal : public Goal
{
/* Whether to use an on-disk .drv file. */
/**
* Whether to use an on-disk .drv file.
*/
bool useDerivation;
/* The path of the derivation. */
/** The path of the derivation. */
StorePath drvPath;
/* The goal for the corresponding resolved derivation */
/**
* The goal for the corresponding resolved derivation
*/
std::shared_ptr<DerivationGoal> resolvedDrvGoal;
/* The specific outputs that we need to build. Empty means all of
them. */
StringSet wantedOutputs;
/**
* The specific outputs that we need to build.
*/
OutputsSpec wantedOutputs;
/* Mapping from input derivations + output names to actual store
paths. This is filled in by waiteeDone() as each dependency
finishes, before inputsRealised() is reached, */
/**
* Mapping from input derivations + output names to actual store
* paths. This is filled in by waiteeDone() as each dependency
* finishes, before inputsRealised() is reached.
*/
std::map<std::pair<StorePath, std::string>, StorePath> inputDrvOutputs;
/* Whether additional wanted outputs have been added. */
bool needRestart = false;
/**
* See `needRestart`; just for that field.
*/
enum struct NeedRestartForMoreOutputs {
/**
* The goal state machine is progressing based on the current value of
* `wantedOutputs. No actions are needed.
*/
OutputsUnmodifedDontNeed,
/**
* `wantedOutputs` has been extended, but the state machine is
* proceeding according to its old value, so we need to restart.
*/
OutputsAddedDoNeed,
/**
* The goal state machine has progressed to the point of doing a build,
* in which case all outputs will be produced, so extensions to
* `wantedOutputs` no longer require a restart.
*/
BuildInProgressWillNotNeed,
};
/* Whether to retry substituting the outputs after building the
inputs. This is done in case of an incomplete closure. */
bool retrySubstitution = false;
/**
* Whether additional wanted outputs have been added.
*/
NeedRestartForMoreOutputs needRestart = NeedRestartForMoreOutputs::OutputsUnmodifedDontNeed;
/* Whether we've retried substitution, in which case we won't try
again. */
bool retriedSubstitution = false;
/**
* See `retrySubstitution`; just for that field.
*/
enum RetrySubstitution {
/**
* No issues have yet arose, no need to restart.
*/
NoNeed,
/**
* Something failed and there is an incomplete closure. Let's retry
* substituting.
*/
YesNeed,
/**
* We are current or have already retried substitution, and whether or
* not something goes wrong we will not retry again.
*/
AlreadyRetried,
};
/* The derivation stored at drvPath. */
/**
* Whether to retry substituting the outputs after building the
* inputs. This is done in case of an incomplete closure.
*/
RetrySubstitution retrySubstitution = RetrySubstitution::NoNeed;
/**
* The derivation stored at drvPath.
*/
std::unique_ptr<Derivation> drv;
std::unique_ptr<ParsedDerivation> parsedDrv;
/* The remainder is state held during the build. */
/**
* The remainder is state held during the build.
*/
/* Locks on (fixed) output paths. */
/**
* Locks on (fixed) output paths.
*/
PathLocks outputLocks;
/* All input paths (that is, the union of FS closures of the
immediate input paths). */
/**
* All input paths (that is, the union of FS closures of the
* immediate input paths).
*/
StorePathSet inputPaths;
std::map<std::string, InitialOutput> initialOutputs;
/* File descriptor for the log file. */
/**
* File descriptor for the log file.
*/
AutoCloseFD fdLogFile;
std::shared_ptr<BufferedSink> logFileSink, logSink;
/* Number of bytes received from the builder's stdout/stderr. */
/**
* Number of bytes received from the builder's stdout/stderr.
*/
unsigned long logSize;
/* The most recent log lines. */
/**
* The most recent log lines.
*/
std::list<std::string> logTail;
std::string currentLogLine;
@ -104,14 +182,17 @@ struct DerivationGoal : public Goal
std::string currentHookLine;
/* The build hook. */
#ifndef _WIN32 // TODO enable build hook on Windows
/**
* The build hook.
*/
std::unique_ptr<HookInstance> hook;
#endif
/* The sort of derivation we are building. */
DerivationType derivationType;
typedef void (DerivationGoal::*GoalState)();
GoalState state;
/**
* The sort of derivation we are building.
*/
std::optional<DerivationType> derivationType;
BuildMode buildMode;
@ -119,19 +200,23 @@ struct DerivationGoal : public Goal
std::unique_ptr<Activity> act;
/* Activity that denotes waiting for a lock. */
/**
* Activity that denotes waiting for a lock.
*/
std::unique_ptr<Activity> actLock;
std::map<ActivityId, Activity> builderActivities;
/* The remote machine on which we're building. */
/**
* The remote machine on which we're building.
*/
std::string machineName;
DerivationGoal(const StorePath & drvPath,
const StringSet & wantedOutputs, Worker & worker,
const OutputsSpec & wantedOutputs, Worker & worker,
BuildMode buildMode = bmNormal);
DerivationGoal(const StorePath & drvPath, const BasicDerivation & drv,
const StringSet & wantedOutputs, Worker & worker,
const OutputsSpec & wantedOutputs, Worker & worker,
BuildMode buildMode = bmNormal);
virtual ~DerivationGoal();
@ -139,47 +224,64 @@ struct DerivationGoal : public Goal
std::string key() override;
void work() override;
/**
* Add wanted outputs to an already existing derivation goal.
*/
void addWantedOutputs(const OutputsSpec & outputs);
/* Add wanted outputs to an already existing derivation goal. */
void addWantedOutputs(const StringSet & outputs);
/**
* The states.
*/
Co init() override;
Co getDerivation();
Co loadDerivation();
Co haveDerivation();
Co outputsSubstitutionTried();
Co gaveUpOnSubstitution();
Co closureRepaired();
Co inputsRealised();
Co tryToBuild();
virtual Co tryLocalBuild();
Co buildDone();
/* The states. */
void getDerivation();
void loadDerivation();
void haveDerivation();
void outputsSubstitutionTried();
void gaveUpOnSubstitution();
void closureRepaired();
void inputsRealised();
void tryToBuild();
virtual void tryLocalBuild();
void buildDone();
Co resolvedFinished();
void resolvedFinished();
/* Is the build hook willing to perform the build? */
/**
* Is the build hook willing to perform the build?
*/
HookReply tryBuildHook();
virtual int getChildStatus();
/* Check that the derivation outputs all exist and register them
as valid. */
virtual DrvOutputs registerOutputs();
/**
* Check that the derivation outputs all exist and register them
* as valid.
*/
virtual SingleDrvOutputs registerOutputs();
/* Open a log file and a pipe to it. */
/**
* Open a log file and a pipe to it.
*/
Path openLogFile();
/* Sign the newly built realisation if the store allows it */
/**
* Sign the newly built realisation if the store allows it
*/
virtual void signRealisation(Realisation&) {}
/* Close the log file. */
/**
* Close the log file.
*/
void closeLogFile();
/* Close the read side of the logger pipe. */
/**
* Close the read side of the logger pipe.
*/
virtual void closeReadPipes();
/* Cleanup hooks for buildDone() */
/**
* Cleanup hooks for buildDone()
*/
virtual void cleanupHookFinally();
virtual void cleanupPreChildKill();
virtual void cleanupPostChildKill();
@ -187,46 +289,58 @@ struct DerivationGoal : public Goal
virtual void cleanupPostOutputsRegisteredModeCheck();
virtual void cleanupPostOutputsRegisteredModeNonCheck();
virtual bool isReadDesc(int fd);
virtual bool isReadDesc(Descriptor fd);
/* Callback used by the worker to write to the log. */
void handleChildOutput(int fd, std::string_view data) override;
void handleEOF(int fd) override;
/**
* Callback used by the worker to write to the log.
*/
void handleChildOutput(Descriptor fd, std::string_view data) override;
void handleEOF(Descriptor fd) override;
void flushLine();
/* Wrappers around the corresponding Store methods that first consult the
derivation. This is currently needed because when there is no drv file
there also is no DB entry. */
/**
* Wrappers around the corresponding Store methods that first consult the
* derivation. This is currently needed because when there is no drv file
* there also is no DB entry.
*/
std::map<std::string, std::optional<StorePath>> queryPartialDerivationOutputMap();
OutputPathMap queryDerivationOutputMap();
/* Update 'initialOutputs' to determine the current status of the
outputs of the derivation. Also returns a Boolean denoting
whether all outputs are valid and non-corrupt, and a
'DrvOutputs' structure containing the valid and wanted
outputs. */
std::pair<bool, DrvOutputs> checkPathValidity();
/**
* Update 'initialOutputs' to determine the current status of the
* outputs of the derivation. Also returns a Boolean denoting
* whether all outputs are valid and non-corrupt, and a
* 'SingleDrvOutputs' structure containing the valid outputs.
*/
std::pair<bool, SingleDrvOutputs> checkPathValidity();
/* Aborts if any output is not valid or corrupt, and otherwise
returns a 'DrvOutputs' structure containing the wanted
outputs. */
DrvOutputs assertPathValidity();
/**
* Aborts if any output is not valid or corrupt, and otherwise
* returns a 'SingleDrvOutputs' structure containing all outputs.
*/
SingleDrvOutputs assertPathValidity();
/* Forcibly kill the child process, if any. */
/**
* Forcibly kill the child process, if any.
*/
virtual void killChild();
void repairClosure();
Co repairClosure();
void started();
void done(
Done done(
BuildResult::Status status,
DrvOutputs builtOutputs = {},
SingleDrvOutputs builtOutputs = {},
std::optional<Error> ex = {});
void waiteeDone(GoalPtr waitee, ExitCode result) override;
StorePathSet exportReferences(const StorePathSet & storePaths);
JobCategory jobCategory() const override {
return JobCategory::Build;
};
};
MakeError(NotDeterministic, BuildError);

View file

@ -14,131 +14,135 @@ DrvOutputSubstitutionGoal::DrvOutputSubstitutionGoal(
: Goal(worker, DerivedPath::Opaque { StorePath::dummy })
, id(id)
{
state = &DrvOutputSubstitutionGoal::init;
name = fmt("substitution of '%s'", id.to_string());
trace("created");
}
void DrvOutputSubstitutionGoal::init()
Goal::Co DrvOutputSubstitutionGoal::init()
{
trace("init");
/* If the derivation already exists, were done */
if (worker.store.queryRealisation(id)) {
amDone(ecSuccess);
return;
co_return amDone(ecSuccess);
}
subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list<ref<Store>>();
tryNext();
}
auto subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list<ref<Store>>();
void DrvOutputSubstitutionGoal::tryNext()
{
trace("trying next substituter");
bool substituterFailed = false;
if (subs.size() == 0) {
/* None left. Terminate this goal and let someone else deal
with it. */
debug("derivation output '%s' is required, but there is no substituter that can provide it", id.to_string());
for (auto sub : subs) {
trace("trying next substituter");
/* Hack: don't indicate failure if there were no substituters.
In that case the calling derivation should just do a
build. */
amDone(substituterFailed ? ecFailed : ecNoSubstituters);
/* The callback of the curl download below can outlive `this` (if
some other error occurs), so it must not touch `this`. So put
the shared state in a separate refcounted object. */
auto outPipe = std::make_shared<MuxablePipe>();
#ifndef _WIN32
outPipe->create();
#else
outPipe->createAsyncPipe(worker.ioport.get());
#endif
if (substituterFailed) {
worker.failedSubstitutions++;
worker.updateProgress();
auto promise = std::make_shared<std::promise<std::shared_ptr<const Realisation>>>();
sub->queryRealisation(
id,
{ [outPipe(outPipe), promise(promise)](std::future<std::shared_ptr<const Realisation>> res) {
try {
Finally updateStats([&]() { outPipe->writeSide.close(); });
promise->set_value(res.get());
} catch (...) {
promise->set_exception(std::current_exception());
}
} });
worker.childStarted(shared_from_this(), {
#ifndef _WIN32
outPipe->readSide.get()
#else
&*outPipe
#endif
}, true, false);
co_await Suspend{};
worker.childTerminated(this);
/*
* The realisation corresponding to the given output id.
* Will be filled once we can get it.
*/
std::shared_ptr<const Realisation> outputInfo;
try {
outputInfo = promise->get_future().get();
} catch (std::exception & e) {
printError(e.what());
substituterFailed = true;
}
return;
if (!outputInfo) continue;
bool failed = false;
for (const auto & [depId, depPath] : outputInfo->dependentRealisations) {
if (depId != id) {
if (auto localOutputInfo = worker.store.queryRealisation(depId);
localOutputInfo && localOutputInfo->outPath != depPath) {
warn(
"substituter '%s' has an incompatible realisation for '%s', ignoring.\n"
"Local: %s\n"
"Remote: %s",
sub->getUri(),
depId.to_string(),
worker.store.printStorePath(localOutputInfo->outPath),
worker.store.printStorePath(depPath)
);
failed = true;
break;
}
addWaitee(worker.makeDrvOutputSubstitutionGoal(depId));
}
}
if (failed) continue;
co_return realisationFetched(outputInfo, sub);
}
sub = subs.front();
subs.pop_front();
/* None left. Terminate this goal and let someone else deal
with it. */
debug("derivation output '%s' is required, but there is no substituter that can provide it", id.to_string());
// FIXME: Make async
// outputInfo = sub->queryRealisation(id);
outPipe.create();
promise = decltype(promise)();
if (substituterFailed) {
worker.failedSubstitutions++;
worker.updateProgress();
}
sub->queryRealisation(
id, { [&](std::future<std::shared_ptr<const Realisation>> res) {
try {
Finally updateStats([this]() { outPipe.writeSide.close(); });
promise.set_value(res.get());
} catch (...) {
promise.set_exception(std::current_exception());
}
} });
worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false);
state = &DrvOutputSubstitutionGoal::realisationFetched;
/* Hack: don't indicate failure if there were no substituters.
In that case the calling derivation should just do a
build. */
co_return amDone(substituterFailed ? ecFailed : ecNoSubstituters);
}
void DrvOutputSubstitutionGoal::realisationFetched()
{
worker.childTerminated(this);
try {
outputInfo = promise.get_future().get();
} catch (std::exception & e) {
printError(e.what());
substituterFailed = true;
}
if (!outputInfo) {
return tryNext();
}
for (const auto & [depId, depPath] : outputInfo->dependentRealisations) {
if (depId != id) {
if (auto localOutputInfo = worker.store.queryRealisation(depId);
localOutputInfo && localOutputInfo->outPath != depPath) {
warn(
"substituter '%s' has an incompatible realisation for '%s', ignoring.\n"
"Local: %s\n"
"Remote: %s",
sub->getUri(),
depId.to_string(),
worker.store.printStorePath(localOutputInfo->outPath),
worker.store.printStorePath(depPath)
);
tryNext();
return;
}
addWaitee(worker.makeDrvOutputSubstitutionGoal(depId));
}
}
Goal::Co DrvOutputSubstitutionGoal::realisationFetched(std::shared_ptr<const Realisation> outputInfo, nix::ref<nix::Store> sub) {
addWaitee(worker.makePathSubstitutionGoal(outputInfo->outPath));
if (waitees.empty()) outPathValid();
else state = &DrvOutputSubstitutionGoal::outPathValid;
}
if (!waitees.empty()) co_await Suspend{};
void DrvOutputSubstitutionGoal::outPathValid()
{
assert(outputInfo);
trace("output path substituted");
if (nrFailed > 0) {
debug("The output path of the derivation output '%s' could not be substituted", id.to_string());
amDone(nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed);
return;
co_return amDone(nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed);
}
worker.store.registerDrvOutput(*outputInfo);
finished();
}
void DrvOutputSubstitutionGoal::finished()
{
trace("finished");
amDone(ecSuccess);
co_return amDone(ecSuccess);
}
std::string DrvOutputSubstitutionGoal::key()
@ -148,14 +152,9 @@ std::string DrvOutputSubstitutionGoal::key()
return "a$" + std::string(id.to_string());
}
void DrvOutputSubstitutionGoal::work()
void DrvOutputSubstitutionGoal::handleEOF(Descriptor fd)
{
(this->*state)();
}
void DrvOutputSubstitutionGoal::handleEOF(int fd)
{
if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this());
worker.wakeUp(shared_from_this());
}

View file

@ -1,60 +1,50 @@
#pragma once
///@file
#include <thread>
#include <future>
#include "store-api.hh"
#include "goal.hh"
#include "realisation.hh"
#include <thread>
#include <future>
#include "muxable-pipe.hh"
namespace nix {
class Worker;
// Substitution of a derivation output.
// This is done in three steps:
// 1. Fetch the output info from a substituter
// 2. Substitute the corresponding output path
// 3. Register the output info
/**
* Substitution of a derivation output.
* This is done in three steps:
* 1. Fetch the output info from a substituter
* 2. Substitute the corresponding output path
* 3. Register the output info
*/
class DrvOutputSubstitutionGoal : public Goal {
private:
// The drv output we're trying to substitue
/**
* The drv output we're trying to substitute
*/
DrvOutput id;
// The realisation corresponding to the given output id.
// Will be filled once we can get it.
std::shared_ptr<const Realisation> outputInfo;
/* The remaining substituters. */
std::list<ref<Store>> subs;
/* The current substituter. */
std::shared_ptr<Store> sub;
Pipe outPipe;
std::thread thr;
std::promise<std::shared_ptr<const Realisation>> promise;
/* Whether a substituter failed. */
bool substituterFailed = false;
public:
DrvOutputSubstitutionGoal(const DrvOutput& id, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
typedef void (DrvOutputSubstitutionGoal::*GoalState)();
GoalState state;
void init();
void tryNext();
void realisationFetched();
void outPathValid();
void finished();
Co init() override;
Co realisationFetched(std::shared_ptr<const Realisation> outputInfo, nix::ref<nix::Store> sub);
void timedOut(Error && ex) override { abort(); };
void timedOut(Error && ex) override { unreachable(); };
std::string key() override;
void work() override;
void handleEOF(int fd) override;
void handleEOF(Descriptor fd) override;
JobCategory jobCategory() const override {
return JobCategory::Substitution;
};
};
}

View file

@ -1,7 +1,10 @@
#include "worker.hh"
#include "substitution-goal.hh"
#include "derivation-goal.hh"
#ifndef _WIN32 // TODO Enable building on Windows
# include "derivation-goal.hh"
#endif
#include "local-store.hh"
#include "strings.hh"
namespace nix {
@ -10,20 +13,12 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
Worker worker(*this, evalStore ? *evalStore : *this);
Goals goals;
for (const auto & br : reqs) {
std::visit(overloaded {
[&](const DerivedPath::Built & bfd) {
goals.insert(worker.makeDerivationGoal(bfd.drvPath, bfd.outputs, buildMode));
},
[&](const DerivedPath::Opaque & bo) {
goals.insert(worker.makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair));
},
}, br.raw());
}
for (auto & br : reqs)
goals.insert(worker.makeGoal(br, buildMode));
worker.run(goals);
StorePathSet failed;
StringSet failed;
std::optional<Error> ex;
for (auto & i : goals) {
if (i->ex) {
@ -33,21 +28,26 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
ex = std::move(i->ex);
}
if (i->exitCode != Goal::ecSuccess) {
if (auto i2 = dynamic_cast<DerivationGoal *>(i.get())) failed.insert(i2->drvPath);
else if (auto i2 = dynamic_cast<PathSubstitutionGoal *>(i.get())) failed.insert(i2->storePath);
#ifndef _WIN32 // TODO Enable building on Windows
if (auto i2 = dynamic_cast<DerivationGoal *>(i.get()))
failed.insert(printStorePath(i2->drvPath));
else
#endif
if (auto i2 = dynamic_cast<PathSubstitutionGoal *>(i.get()))
failed.insert(printStorePath(i2->storePath));
}
}
if (failed.size() == 1 && ex) {
ex->status = worker.exitStatus();
ex->withExitStatus(worker.failingExitStatus());
throw std::move(*ex);
} else if (!failed.empty()) {
if (ex) logError(ex->info());
throw Error(worker.exitStatus(), "build of %s failed", showPaths(failed));
throw Error(worker.failingExitStatus(), "build of %s failed", concatStringsSep(", ", quoteStrings(failed)));
}
}
std::vector<BuildResult> Store::buildPathsWithResults(
std::vector<KeyedBuildResult> Store::buildPathsWithResults(
const std::vector<DerivedPath> & reqs,
BuildMode buildMode,
std::shared_ptr<Store> evalStore)
@ -55,23 +55,23 @@ std::vector<BuildResult> Store::buildPathsWithResults(
Worker worker(*this, evalStore ? *evalStore : *this);
Goals goals;
for (const auto & br : reqs) {
std::visit(overloaded {
[&](const DerivedPath::Built & bfd) {
goals.insert(worker.makeDerivationGoal(bfd.drvPath, bfd.outputs, buildMode));
},
[&](const DerivedPath::Opaque & bo) {
goals.insert(worker.makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair));
},
}, br.raw());
std::vector<std::pair<const DerivedPath &, GoalPtr>> state;
for (const auto & req : reqs) {
auto goal = worker.makeGoal(req, buildMode);
goals.insert(goal);
state.push_back({req, goal});
}
worker.run(goals);
std::vector<BuildResult> results;
std::vector<KeyedBuildResult> results;
for (auto & i : goals)
results.push_back(i->buildResult);
for (auto & [req, goalPtr] : state)
results.emplace_back(KeyedBuildResult {
goalPtr->getBuildResult(req),
/* .path = */ req,
});
return results;
}
@ -80,16 +80,23 @@ BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivat
BuildMode buildMode)
{
Worker worker(*this, *this);
auto goal = worker.makeBasicDerivationGoal(drvPath, drv, {}, buildMode);
#ifndef _WIN32 // TODO Enable building on Windows
auto goal = worker.makeBasicDerivationGoal(drvPath, drv, OutputsSpec::All {}, buildMode);
#else
std::shared_ptr<Goal> goal;
throw UnimplementedError("Building derivations not yet implemented on windows.");
#endif
try {
worker.run(Goals{goal});
return goal->buildResult;
return goal->getBuildResult(DerivedPath::Built {
.drvPath = makeConstantStorePathRef(drvPath),
.outputs = OutputsSpec::All {},
});
} catch (Error & e) {
return BuildResult {
.status = BuildResult::MiscFailure,
.errorMsg = e.msg(),
.path = DerivedPath::Built { .drvPath = drvPath },
};
};
}
@ -108,15 +115,15 @@ void Store::ensurePath(const StorePath & path)
if (goal->exitCode != Goal::ecSuccess) {
if (goal->ex) {
goal->ex->status = worker.exitStatus();
goal->ex->withExitStatus(worker.failingExitStatus());
throw std::move(*goal->ex);
} else
throw Error(worker.exitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path));
throw Error(worker.failingExitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path));
}
}
void LocalStore::repairPath(const StorePath & path)
void Store::repairPath(const StorePath & path)
{
Worker worker(*this, *this);
GoalPtr goal = worker.makePathSubstitutionGoal(path, Repair);
@ -130,10 +137,14 @@ void LocalStore::repairPath(const StorePath & path)
auto info = queryPathInfo(path);
if (info->deriver && isValidPath(*info->deriver)) {
goals.clear();
goals.insert(worker.makeDerivationGoal(*info->deriver, StringSet(), bmRepair));
goals.insert(worker.makeGoal(DerivedPath::Built {
.drvPath = makeConstantStorePathRef(*info->deriver),
// FIXME: Should just build the specific output we need.
.outputs = OutputsSpec::All { },
}, bmRepair));
worker.run(goals);
} else
throw Error(worker.exitStatus(), "cannot repair path '%s'", printStorePath(path));
throw Error(worker.failingExitStatus(), "cannot repair path '%s'", printStorePath(path));
}
}

View file

@ -3,6 +3,97 @@
namespace nix {
using Co = nix::Goal::Co;
using promise_type = nix::Goal::promise_type;
using handle_type = nix::Goal::handle_type;
using Suspend = nix::Goal::Suspend;
Co::Co(Co&& rhs) {
this->handle = rhs.handle;
rhs.handle = nullptr;
}
void Co::operator=(Co&& rhs) {
this->handle = rhs.handle;
rhs.handle = nullptr;
}
Co::~Co() {
if (handle) {
handle.promise().alive = false;
handle.destroy();
}
}
Co promise_type::get_return_object() {
auto handle = handle_type::from_promise(*this);
return Co{handle};
};
std::coroutine_handle<> promise_type::final_awaiter::await_suspend(handle_type h) noexcept {
auto& p = h.promise();
auto goal = p.goal;
assert(goal);
goal->trace("in final_awaiter");
auto c = std::move(p.continuation);
if (c) {
// We still have a continuation, i.e. work to do.
// We assert that the goal is still busy.
assert(goal->exitCode == ecBusy);
assert(goal->top_co); // Goal must have an active coroutine.
assert(goal->top_co->handle == h); // The active coroutine must be us.
assert(p.alive); // We must not have been destructed.
// we move continuation to the top,
// note: previous top_co is actually h, so by moving into it,
// we're calling the destructor on h, DON'T use h and p after this!
// We move our continuation into `top_co`, i.e. the marker for the active continuation.
// By doing this we destruct the old `top_co`, i.e. us, so `h` can't be used anymore.
// Be careful not to access freed memory!
goal->top_co = std::move(c);
// We resume `top_co`.
return goal->top_co->handle;
} else {
// We have no continuation, i.e. no more work to do,
// so the goal must not be busy anymore.
assert(goal->exitCode != ecBusy);
// We reset `top_co` for good measure.
p.goal->top_co = {};
// We jump to the noop coroutine, which doesn't do anything and immediately suspends.
// This passes control back to the caller of goal.work().
return std::noop_coroutine();
}
}
void promise_type::return_value(Co&& next) {
goal->trace("return_value(Co&&)");
// Save old continuation.
auto old_continuation = std::move(continuation);
// We set next as our continuation.
continuation = std::move(next);
// We set next's goal, and thus it must not have one already.
assert(!continuation->handle.promise().goal);
continuation->handle.promise().goal = goal;
// Nor can next have a continuation, as we set it to our old one.
assert(!continuation->handle.promise().continuation);
continuation->handle.promise().continuation = std::move(old_continuation);
}
std::coroutine_handle<> nix::Goal::Co::await_suspend(handle_type caller) {
assert(handle); // we must be a valid coroutine
auto& p = handle.promise();
assert(!p.continuation); // we must have no continuation
assert(!p.goal); // we must not have a goal yet
auto goal = caller.promise().goal;
assert(goal);
p.goal = goal;
p.continuation = std::move(goal->top_co); // we set our continuation to be top_co (i.e. caller)
goal->top_co = std::move(*this); // we set top_co to ourselves, don't use this anymore after this!
return p.goal->top_co->handle; // we execute ourselves
}
bool CompareGoalPtrs::operator() (const GoalPtr & a, const GoalPtr & b) const {
std::string s1 = a->key();
@ -11,6 +102,29 @@ bool CompareGoalPtrs::operator() (const GoalPtr & a, const GoalPtr & b) const {
}
BuildResult Goal::getBuildResult(const DerivedPath & req) const {
BuildResult res { buildResult };
if (auto pbp = std::get_if<DerivedPath::Built>(&req)) {
auto & bp = *pbp;
/* Because goals are in general shared between derived paths
that share the same derivation, we need to filter their
results to get back just the results we care about.
*/
for (auto it = res.builtOutputs.begin(); it != res.builtOutputs.end();) {
if (bp.outputs.contains(it->first))
++it;
else
it = res.builtOutputs.erase(it);
}
}
return res;
}
void addToWeakGoals(WeakGoals & goals, GoalPtr p)
{
if (goals.find(p) != goals.end())
@ -52,10 +166,10 @@ void Goal::waiteeDone(GoalPtr waitee, ExitCode result)
}
}
void Goal::amDone(ExitCode result, std::optional<Error> ex)
Goal::Done Goal::amDone(ExitCode result, std::optional<Error> ex)
{
trace("done");
assert(top_co);
assert(exitCode == ecBusy);
assert(result == ecSuccess || result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure);
exitCode = result;
@ -75,12 +189,31 @@ void Goal::amDone(ExitCode result, std::optional<Error> ex)
worker.removeGoal(shared_from_this());
cleanup();
// We drop the continuation.
// In `final_awaiter` this will signal that there is no more work to be done.
top_co->handle.promise().continuation = {};
// won't return to caller because of logic in final_awaiter
return Done{};
}
void Goal::trace(const FormatOrString & fs)
void Goal::trace(std::string_view s)
{
debug("%1%: %2%", name, fs.s);
debug("%1%: %2%", name, s);
}
void Goal::work()
{
assert(top_co);
assert(top_co->handle);
assert(top_co->handle.promise().alive);
top_co->handle.resume();
// We either should be in a state where we can be work()-ed again,
// or we should be done.
assert(top_co || exitCode != ecBusy);
}
}

View file

@ -1,16 +1,22 @@
#pragma once
///@file
#include "types.hh"
#include "store-api.hh"
#include "build-result.hh"
#include <coroutine>
namespace nix {
/* Forward definition. */
/**
* Forward definition.
*/
struct Goal;
class Worker;
/* A pointer to a goal. */
/**
* A pointer to a goal.
*/
typedef std::shared_ptr<Goal> GoalPtr;
typedef std::weak_ptr<Goal> WeakGoalPtr;
@ -18,95 +24,422 @@ struct CompareGoalPtrs {
bool operator() (const GoalPtr & a, const GoalPtr & b) const;
};
/* Set of goals. */
/**
* Set of goals.
*/
typedef std::set<GoalPtr, CompareGoalPtrs> Goals;
typedef std::set<WeakGoalPtr, std::owner_less<WeakGoalPtr>> WeakGoals;
/* A map of paths to goals (and the other way around). */
/**
* A map of paths to goals (and the other way around).
*/
typedef std::map<StorePath, WeakGoalPtr> WeakGoalMap;
/**
* Used as a hint to the worker on how to schedule a particular goal. For example,
* builds are typically CPU- and memory-bound, while substitutions are I/O bound.
* Using this information, the worker might decide to schedule more or fewer goals
* of each category in parallel.
*/
enum struct JobCategory {
/**
* A build of a derivation; it will use CPU and disk resources.
*/
Build,
/**
* A substitution an arbitrary store object; it will use network resources.
*/
Substitution,
};
struct Goal : public std::enable_shared_from_this<Goal>
{
typedef enum {ecBusy, ecSuccess, ecFailed, ecNoSubstituters, ecIncompleteClosure} ExitCode;
/* Backlink to the worker. */
/**
* Backlink to the worker.
*/
Worker & worker;
/* Goals that this goal is waiting for. */
/**
* Goals that this goal is waiting for.
*/
Goals waitees;
/* Goals waiting for this one to finish. Must use weak pointers
here to prevent cycles. */
/**
* Goals waiting for this one to finish. Must use weak pointers
* here to prevent cycles.
*/
WeakGoals waiters;
/* Number of goals we are/were waiting for that have failed. */
/**
* Number of goals we are/were waiting for that have failed.
*/
size_t nrFailed = 0;
/* Number of substitution goals we are/were waiting for that
failed because there are no substituters. */
/**
* Number of substitution goals we are/were waiting for that
* failed because there are no substituters.
*/
size_t nrNoSubstituters = 0;
/* Number of substitution goals we are/were waiting for that
failed because they had unsubstitutable references. */
/**
* Number of substitution goals we are/were waiting for that
* failed because they had unsubstitutable references.
*/
size_t nrIncompleteClosure = 0;
/* Name of this goal for debugging purposes. */
/**
* Name of this goal for debugging purposes.
*/
std::string name;
/* Whether the goal is finished. */
/**
* Whether the goal is finished.
*/
ExitCode exitCode = ecBusy;
/* Build result. */
protected:
/**
* Build result.
*/
BuildResult buildResult;
public:
/* Exception containing an error message, if any. */
/**
* Suspend our goal and wait until we get @ref work()-ed again.
* `co_await`-able by @ref Co.
*/
struct Suspend {};
/**
* Return from the current coroutine and suspend our goal
* if we're not busy anymore, or jump to the next coroutine
* set to be executed/resumed.
*/
struct Return {};
/**
* `co_return`-ing this will end the goal.
* If you're not inside a coroutine, you can safely discard this.
*/
struct [[nodiscard]] Done {
private:
Done(){}
friend Goal;
};
// forward declaration of promise_type, see below
struct promise_type;
/**
* Handle to coroutine using @ref Co and @ref promise_type.
*/
using handle_type = std::coroutine_handle<promise_type>;
/**
* C++20 coroutine wrapper for use in goal logic.
* Coroutines are functions that use `co_await`/`co_return` (and `co_yield`, but not supported by @ref Co).
*
* @ref Co is meant to be used by methods of subclasses of @ref Goal.
* The main functionality provided by `Co` is
* - `co_await Suspend{}`: Suspends the goal.
* - `co_await f()`: Waits until `f()` finishes.
* - `co_return f()`: Tail-calls `f()`.
* - `co_return Return{}`: Ends coroutine.
*
* The idea is that you implement the goal logic using coroutines,
* and do the core thing a goal can do, suspension, when you have
* children you're waiting for.
* Coroutines allow you to resume the work cleanly.
*
* @note Brief explanation of C++20 coroutines:
* When you `Co f()`, a `std::coroutine_handle<promise_type>` is created,
* alongside its @ref promise_type.
* There are suspension points at the beginning of the coroutine,
* at every `co_await`, and at the final (possibly implicit) `co_return`.
* Once suspended, you can resume the `std::coroutine_handle` by doing `coroutine_handle.resume()`.
* Suspension points are implemented by passing a struct to the compiler
* that implements `await_sus`pend.
* `await_suspend` can either say "cancel suspension", in which case execution resumes,
* "suspend", in which case control is passed back to the caller of `coroutine_handle.resume()`
* or the place where the coroutine function is initially executed in the case of the initial
* suspension, or `await_suspend` can specify another coroutine to jump to, which is
* how tail calls are implemented.
*
* @note Resources:
* - https://lewissbaker.github.io/
* - https://www.chiark.greenend.org.uk/~sgtatham/quasiblog/coroutines-c++20/
* - https://www.scs.stanford.edu/~dm/blog/c++-coroutines.html
*
* @todo Allocate explicitly on stack since HALO thing doesn't really work,
* specifically, there's no way to uphold the requirements when trying to do
* tail-calls without using a trampoline AFAICT.
*
* @todo Support returning data natively
*/
struct [[nodiscard]] Co {
/**
* The underlying handle.
*/
handle_type handle;
explicit Co(handle_type handle) : handle(handle) {};
void operator=(Co&&);
Co(Co&& rhs);
~Co();
bool await_ready() { return false; };
/**
* When we `co_await` another @ref Co-returning coroutine,
* we tell the caller of `caller_coroutine.resume()` to switch to our coroutine (@ref handle).
* To make sure we return to the original coroutine, we set it as the continuation of our
* coroutine. In @ref promise_type::final_awaiter we check if it's set and if so we return to it.
*
* To explain in more understandable terms:
* When we `co_await Co_returning_function()`, this function is called on the resultant @ref Co of
* the _called_ function, and C++ automatically passes the caller in.
*
* `goal` field of @ref promise_type is also set here by copying it from the caller.
*/
std::coroutine_handle<> await_suspend(handle_type handle);
void await_resume() {};
};
/**
* Used on initial suspend, does the same as @ref std::suspend_always,
* but asserts that everything has been set correctly.
*/
struct InitialSuspend {
/**
* Handle of coroutine that does the
* initial suspend
*/
handle_type handle;
bool await_ready() { return false; };
void await_suspend(handle_type handle_) {
handle = handle_;
}
void await_resume() {
assert(handle);
assert(handle.promise().goal); // goal must be set
assert(handle.promise().goal->top_co); // top_co of goal must be set
assert(handle.promise().goal->top_co->handle == handle); // top_co of goal must be us
}
};
/**
* Promise type for coroutines defined using @ref Co.
* Attached to coroutine handle.
*/
struct promise_type {
/**
* Either this is who called us, or it is who we will tail-call.
* It is what we "jump" to once we are done.
*/
std::optional<Co> continuation;
/**
* The goal that we're a part of.
* Set either in @ref Co::await_suspend or in constructor of @ref Goal.
*/
Goal* goal = nullptr;
/**
* Is set to false when destructed to ensure we don't use a
* destructed coroutine by accident
*/
bool alive = true;
/**
* The awaiter used by @ref final_suspend.
*/
struct final_awaiter {
bool await_ready() noexcept { return false; };
/**
* Here we execute our continuation, by passing it back to the caller.
* C++ compiler will create code that takes that and executes it promptly.
* `h` is the handle for the coroutine that is finishing execution,
* thus it must be destroyed.
*/
std::coroutine_handle<> await_suspend(handle_type h) noexcept;
void await_resume() noexcept { assert(false); };
};
/**
* Called by compiler generated code to construct the @ref Co
* that is returned from a @ref Co-returning coroutine.
*/
Co get_return_object();
/**
* Called by compiler generated code before body of coroutine.
* We use this opportunity to set the @ref goal field
* and `top_co` field of @ref Goal.
*/
InitialSuspend initial_suspend() { return {}; };
/**
* Called on `co_return`. Creates @ref final_awaiter which
* either jumps to continuation or suspends goal.
*/
final_awaiter final_suspend() noexcept { return {}; };
/**
* Does nothing, but provides an opportunity for
* @ref final_suspend to happen.
*/
void return_value(Return) {}
/**
* Does nothing, but provides an opportunity for
* @ref final_suspend to happen.
*/
void return_value(Done) {}
/**
* When "returning" another coroutine, what happens is that
* we set it as our own continuation, thus once the final suspend
* happens, we transfer control to it.
* The original continuation we had is set as the continuation
* of the coroutine passed in.
* @ref final_suspend is called after this, and @ref final_awaiter will
* pass control off to @ref continuation.
*
* If we already have a continuation, that continuation is set as
* the continuation of the new continuation. Thus, the continuation
* passed to @ref return_value must not have a continuation set.
*/
void return_value(Co&&);
/**
* If an exception is thrown inside a coroutine,
* we re-throw it in the context of the "resumer" of the continuation.
*/
void unhandled_exception() { throw; };
/**
* Allows awaiting a @ref Co.
*/
Co&& await_transform(Co&& co) { return static_cast<Co&&>(co); }
/**
* Allows awaiting a @ref Suspend.
* Always suspends.
*/
std::suspend_always await_transform(Suspend) { return {}; };
};
/**
* The coroutine being currently executed.
* MUST be updated when switching the coroutine being executed.
* This is used both for memory management and to resume the last
* coroutine executed.
* Destroying this should destroy all coroutines created for this goal.
*/
std::optional<Co> top_co;
/**
* The entry point for the goal
*/
virtual Co init() = 0;
/**
* Wrapper around @ref init since virtual functions
* can't be used in constructors.
*/
inline Co init_wrapper();
/**
* Signals that the goal is done.
* `co_return` the result. If you're not inside a coroutine, you can ignore
* the return value safely.
*/
Done amDone(ExitCode result, std::optional<Error> ex = {});
virtual void cleanup() { }
/**
* Project a `BuildResult` with just the information that pertains
* to the given request.
*
* In general, goals may be aliased between multiple requests, and
* the stored `BuildResult` has information for the union of all
* requests. We don't want to leak what the other request are for
* sake of both privacy and determinism, and this "safe accessor"
* ensures we don't.
*/
BuildResult getBuildResult(const DerivedPath &) const;
/**
* Exception containing an error message, if any.
*/
std::optional<Error> ex;
Goal(Worker & worker, DerivedPath path)
: worker(worker)
, buildResult { .path = std::move(path) }
{ }
: worker(worker), top_co(init_wrapper())
{
// top_co shouldn't have a goal already, should be nullptr.
assert(!top_co->handle.promise().goal);
// we set it such that top_co can pass it down to its subcoroutines.
top_co->handle.promise().goal = this;
}
virtual ~Goal()
{
trace("goal destroyed");
}
virtual void work() = 0;
void work();
void addWaitee(GoalPtr waitee);
virtual void waiteeDone(GoalPtr waitee, ExitCode result);
virtual void handleChildOutput(int fd, std::string_view data)
virtual void handleChildOutput(Descriptor fd, std::string_view data)
{
abort();
unreachable();
}
virtual void handleEOF(int fd)
virtual void handleEOF(Descriptor fd)
{
abort();
unreachable();
}
void trace(const FormatOrString & fs);
void trace(std::string_view s);
std::string getName()
std::string getName() const
{
return name;
}
/* Callback in case of a timeout. It should wake up its waiters,
get rid of any running child processes that are being monitored
by the worker (important!), etc. */
/**
* Callback in case of a timeout. It should wake up its waiters,
* get rid of any running child processes that are being monitored
* by the worker (important!), etc.
*/
virtual void timedOut(Error && ex) = 0;
virtual std::string key() = 0;
void amDone(ExitCode result, std::optional<Error> ex = {});
virtual void cleanup() { }
/**
* @brief Hint for the scheduler, which concurrency limit applies.
* @see JobCategory
*/
virtual JobCategory jobCategory() const = 0;
};
void addToWeakGoals(WeakGoals & goals, GoalPtr p);
}
template<typename... ArgTypes>
struct std::coroutine_traits<nix::Goal::Co, ArgTypes...> {
using promise_type = nix::Goal::promise_type;
};
nix::Goal::Co nix::Goal::init_wrapper() {
co_return init();
}

View file

@ -1,218 +0,0 @@
#pragma once
#include "derivation-goal.hh"
#include "local-store.hh"
namespace nix {
struct LocalDerivationGoal : public DerivationGoal
{
LocalStore & getLocalStore();
/* User selected for running the builder. */
std::unique_ptr<UserLock> buildUser;
/* The process ID of the builder. */
Pid pid;
/* The cgroup of the builder, if any. */
std::optional<Path> cgroup;
/* The temporary directory. */
Path tmpDir;
/* The path of the temporary directory in the sandbox. */
Path tmpDirInSandbox;
/* Pipe for the builder's standard output/error. */
Pipe builderOut;
/* Pipe for synchronising updates to the builder namespaces. */
Pipe userNamespaceSync;
/* The mount namespace and user namespace of the builder, used to add additional
paths to the sandbox as a result of recursive Nix calls. */
AutoCloseFD sandboxMountNamespace;
AutoCloseFD sandboxUserNamespace;
/* On Linux, whether we're doing the build in its own user
namespace. */
bool usingUserNamespace = true;
/* Whether we're currently doing a chroot build. */
bool useChroot = false;
Path chrootRootDir;
/* RAII object to delete the chroot directory. */
std::shared_ptr<AutoDelete> autoDelChroot;
/* Whether to run the build in a private network namespace. */
bool privateNetwork = false;
/* Stuff we need to pass to initChild(). */
struct ChrootPath {
Path source;
bool optional;
ChrootPath(Path source = "", bool optional = false)
: source(source), optional(optional)
{ }
};
typedef map<Path, ChrootPath> DirsInChroot; // maps target path to source path
DirsInChroot dirsInChroot;
typedef map<std::string, std::string> Environment;
Environment env;
#if __APPLE__
typedef std::string SandboxProfile;
SandboxProfile additionalSandboxProfile;
#endif
/* Hash rewriting. */
StringMap inputRewrites, outputRewrites;
typedef map<StorePath, StorePath> RedirectedOutputs;
RedirectedOutputs redirectedOutputs;
/* The outputs paths used during the build.
- Input-addressed derivations or fixed content-addressed outputs are
sometimes built when some of their outputs already exist, and can not
be hidden via sandboxing. We use temporary locations instead and
rewrite after the build. Otherwise the regular predetermined paths are
put here.
- Floating content-addressed derivations do not know their final build
output paths until the outputs are hashed, so random locations are
used, and then renamed. The randomness helps guard against hidden
self-references.
*/
OutputPathMap scratchOutputs;
/* Path registration info from the previous round, if we're
building multiple times. Since this contains the hash, it
allows us to compare whether two rounds produced the same
result. */
std::map<Path, ValidPathInfo> prevInfos;
uid_t sandboxUid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 1000 : 0) : buildUser->getUID(); }
gid_t sandboxGid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 100 : 0) : buildUser->getGID(); }
const static Path homeDir;
/* The recursive Nix daemon socket. */
AutoCloseFD daemonSocket;
/* The daemon main thread. */
std::thread daemonThread;
/* The daemon worker threads. */
std::vector<std::thread> daemonWorkerThreads;
/* Paths that were added via recursive Nix calls. */
StorePathSet addedPaths;
/* Realisations that were added via recursive Nix calls. */
std::set<DrvOutput> addedDrvOutputs;
/* Recursive Nix calls are only allowed to build or realize paths
in the original input closure or added via a recursive Nix call
(so e.g. you can't do 'nix-store -r /nix/store/<bla>' where
/nix/store/<bla> is some arbitrary path in a binary cache). */
bool isAllowed(const StorePath & path)
{
return inputPaths.count(path) || addedPaths.count(path);
}
bool isAllowed(const DrvOutput & id)
{
return addedDrvOutputs.count(id);
}
bool isAllowed(const DerivedPath & req);
friend struct RestrictedStore;
using DerivationGoal::DerivationGoal;
virtual ~LocalDerivationGoal() override;
/* Whether we need to perform hash rewriting if there are valid output paths. */
bool needsHashRewrite();
/* The additional states. */
void tryLocalBuild() override;
/* Start building a derivation. */
void startBuilder();
/* Fill in the environment for the builder. */
void initEnv();
/* Setup tmp dir location. */
void initTmpDir();
/* Write a JSON file containing the derivation attributes. */
void writeStructuredAttrs();
void startDaemon();
void stopDaemon();
/* Add 'path' to the set of paths that may be referenced by the
outputs, and make it appear in the sandbox. */
void addDependency(const StorePath & path);
/* Make a file owned by the builder. */
void chownToBuilder(const Path & path);
int getChildStatus() override;
/* Run the builder's process. */
void runChild();
/* Check that the derivation outputs all exist and register them
as valid. */
DrvOutputs registerOutputs() override;
void signRealisation(Realisation &) override;
/* Check that an output meets the requirements specified by the
'outputChecks' attribute (or the legacy
'{allowed,disallowed}{References,Requisites}' attributes). */
void checkOutputs(const std::map<std::string, ValidPathInfo> & outputs);
/* Close the read side of the logger pipe. */
void closeReadPipes() override;
/* Cleanup hooks for buildDone() */
void cleanupHookFinally() override;
void cleanupPreChildKill() override;
void cleanupPostChildKill() override;
bool cleanupDecideWhetherDiskFull() override;
void cleanupPostOutputsRegisteredModeCheck() override;
void cleanupPostOutputsRegisteredModeNonCheck() override;
bool isReadDesc(int fd) override;
/* Delete the temporary directory, if we have one. */
void deleteTmpDir(bool force);
/* Forcibly kill the child process, if any. */
void killChild() override;
/* Kill any processes running under the build user UID or in the
cgroup of the build. */
void killSandbox(bool getStats);
/* Create alternative path calculated from but distinct from the
input, so we can avoid overwriting outputs (or other store paths)
that already exist. */
StorePath makeFallbackPath(const StorePath & path);
/* Make a path to another based on the output name along with the
derivation hash. */
/* FIXME add option to randomize, so we can audit whether our
rewrites caught everything */
StorePath makeFallbackPath(std::string_view outputName);
};
}

View file

@ -2,6 +2,8 @@
#include "substitution-goal.hh"
#include "nar-info.hh"
#include "finally.hh"
#include "signals.hh"
#include <coroutine>
namespace nix {
@ -11,7 +13,6 @@ PathSubstitutionGoal::PathSubstitutionGoal(const StorePath & storePath, Worker &
, repair(repair)
, ca(ca)
{
state = &PathSubstitutionGoal::init;
name = fmt("substitution of '%s'", worker.store.printStorePath(this->storePath));
trace("created");
maintainExpectedSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.expectedSubstitutions);
@ -24,7 +25,7 @@ PathSubstitutionGoal::~PathSubstitutionGoal()
}
void PathSubstitutionGoal::done(
Goal::Done PathSubstitutionGoal::done(
ExitCode result,
BuildResult::Status status,
std::optional<std::string> errorMsg)
@ -34,17 +35,11 @@ void PathSubstitutionGoal::done(
debug(*errorMsg);
buildResult.errorMsg = *errorMsg;
}
amDone(result);
return amDone(result);
}
void PathSubstitutionGoal::work()
{
(this->*state)();
}
void PathSubstitutionGoal::init()
Goal::Co PathSubstitutionGoal::init()
{
trace("init");
@ -52,170 +47,162 @@ void PathSubstitutionGoal::init()
/* If the path already exists we're done. */
if (!repair && worker.store.isValidPath(storePath)) {
done(ecSuccess, BuildResult::AlreadyValid);
return;
co_return done(ecSuccess, BuildResult::AlreadyValid);
}
if (settings.readOnlyMode)
throw Error("cannot substitute path '%s' - no write access to the Nix store", worker.store.printStorePath(storePath));
subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list<ref<Store>>();
auto subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list<ref<Store>>();
tryNext();
bool substituterFailed = false;
for (auto sub : subs) {
trace("trying next substituter");
cleanup();
/* The path the substituter refers to the path as. This will be
* different when the stores have different names. */
std::optional<StorePath> subPath;
/* Path info returned by the substituter's query info operation. */
std::shared_ptr<const ValidPathInfo> info;
if (ca) {
subPath = sub->makeFixedOutputPathFromCA(
std::string { storePath.name() },
ContentAddressWithReferences::withoutRefs(*ca));
if (sub->storeDir == worker.store.storeDir)
assert(subPath == storePath);
} else if (sub->storeDir != worker.store.storeDir) {
continue;
}
try {
// FIXME: make async
info = sub->queryPathInfo(subPath ? *subPath : storePath);
} catch (InvalidPath &) {
continue;
} catch (SubstituterDisabled & e) {
if (settings.tryFallback) continue;
else throw e;
} catch (Error & e) {
if (settings.tryFallback) {
logError(e.info());
continue;
} else throw e;
}
if (info->path != storePath) {
if (info->isContentAddressed(*sub) && info->references.empty()) {
auto info2 = std::make_shared<ValidPathInfo>(*info);
info2->path = storePath;
info = info2;
} else {
printError("asked '%s' for '%s' but got '%s'",
sub->getUri(), worker.store.printStorePath(storePath), sub->printStorePath(info->path));
continue;
}
}
/* Update the total expected download size. */
auto narInfo = std::dynamic_pointer_cast<const NarInfo>(info);
maintainExpectedNar = std::make_unique<MaintainCount<uint64_t>>(worker.expectedNarSize, info->narSize);
maintainExpectedDownload =
narInfo && narInfo->fileSize
? std::make_unique<MaintainCount<uint64_t>>(worker.expectedDownloadSize, narInfo->fileSize)
: nullptr;
worker.updateProgress();
/* Bail out early if this substituter lacks a valid
signature. LocalStore::addToStore() also checks for this, but
only after we've downloaded the path. */
if (!sub->isTrusted && worker.store.pathInfoIsUntrusted(*info))
{
warn("ignoring substitute for '%s' from '%s', as it's not signed by any of the keys in 'trusted-public-keys'",
worker.store.printStorePath(storePath), sub->getUri());
continue;
}
/* To maintain the closure invariant, we first have to realise the
paths referenced by this one. */
for (auto & i : info->references)
if (i != storePath) /* ignore self-references */
addWaitee(worker.makePathSubstitutionGoal(i));
if (!waitees.empty()) co_await Suspend{};
// FIXME: consider returning boolean instead of passing in reference
bool out = false; // is mutated by tryToRun
co_await tryToRun(subPath ? *subPath : storePath, sub, info, out);
substituterFailed = substituterFailed || out;
}
/* None left. Terminate this goal and let someone else deal
with it. */
if (substituterFailed) {
worker.failedSubstitutions++;
worker.updateProgress();
}
/* Hack: don't indicate failure if there were no substituters.
In that case the calling derivation should just do a
build. */
co_return done(
substituterFailed ? ecFailed : ecNoSubstituters,
BuildResult::NoSubstituters,
fmt("path '%s' is required, but there is no substituter that can build it", worker.store.printStorePath(storePath)));
}
void PathSubstitutionGoal::tryNext()
{
trace("trying next substituter");
cleanup();
if (subs.size() == 0) {
/* None left. Terminate this goal and let someone else deal
with it. */
/* Hack: don't indicate failure if there were no substituters.
In that case the calling derivation should just do a
build. */
done(
substituterFailed ? ecFailed : ecNoSubstituters,
BuildResult::NoSubstituters,
fmt("path '%s' is required, but there is no substituter that can build it", worker.store.printStorePath(storePath)));
if (substituterFailed) {
worker.failedSubstitutions++;
worker.updateProgress();
}
return;
}
sub = subs.front();
subs.pop_front();
if (ca) {
subPath = sub->makeFixedOutputPathFromCA(storePath.name(), *ca);
if (sub->storeDir == worker.store.storeDir)
assert(subPath == storePath);
} else if (sub->storeDir != worker.store.storeDir) {
tryNext();
return;
}
try {
// FIXME: make async
info = sub->queryPathInfo(subPath ? *subPath : storePath);
} catch (InvalidPath &) {
tryNext();
return;
} catch (SubstituterDisabled &) {
if (settings.tryFallback) {
tryNext();
return;
}
throw;
} catch (Error & e) {
if (settings.tryFallback) {
logError(e.info());
tryNext();
return;
}
throw;
}
if (info->path != storePath) {
if (info->isContentAddressed(*sub) && info->references.empty()) {
auto info2 = std::make_shared<ValidPathInfo>(*info);
info2->path = storePath;
info = info2;
} else {
printError("asked '%s' for '%s' but got '%s'",
sub->getUri(), worker.store.printStorePath(storePath), sub->printStorePath(info->path));
tryNext();
return;
}
}
/* Update the total expected download size. */
auto narInfo = std::dynamic_pointer_cast<const NarInfo>(info);
maintainExpectedNar = std::make_unique<MaintainCount<uint64_t>>(worker.expectedNarSize, info->narSize);
maintainExpectedDownload =
narInfo && narInfo->fileSize
? std::make_unique<MaintainCount<uint64_t>>(worker.expectedDownloadSize, narInfo->fileSize)
: nullptr;
worker.updateProgress();
/* Bail out early if this substituter lacks a valid
signature. LocalStore::addToStore() also checks for this, but
only after we've downloaded the path. */
if (!sub->isTrusted && worker.store.pathInfoIsUntrusted(*info))
{
warn("ignoring substitute for '%s' from '%s', as it's not signed by any of the keys in 'trusted-public-keys'",
worker.store.printStorePath(storePath), sub->getUri());
tryNext();
return;
}
/* To maintain the closure invariant, we first have to realise the
paths referenced by this one. */
for (auto & i : info->references)
if (i != storePath) /* ignore self-references */
addWaitee(worker.makePathSubstitutionGoal(i));
if (waitees.empty()) /* to prevent hang (no wake-up event) */
referencesValid();
else
state = &PathSubstitutionGoal::referencesValid;
}
void PathSubstitutionGoal::referencesValid()
Goal::Co PathSubstitutionGoal::tryToRun(StorePath subPath, nix::ref<Store> sub, std::shared_ptr<const ValidPathInfo> info, bool & substituterFailed)
{
trace("all references realised");
if (nrFailed > 0) {
done(
co_return done(
nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed,
BuildResult::DependencyFailed,
fmt("some references of path '%s' could not be realised", worker.store.printStorePath(storePath)));
return;
}
for (auto & i : info->references)
if (i != storePath) /* ignore self-references */
assert(worker.store.isValidPath(i));
state = &PathSubstitutionGoal::tryToRun;
worker.wakeUp(shared_from_this());
}
co_await Suspend{};
void PathSubstitutionGoal::tryToRun()
{
trace("trying to run");
/* Make sure that we are allowed to start a build. Note that even
if maxBuildJobs == 0 (no local builds allowed), we still allow
a substituter to run. This is because substitutions cannot be
distributed to another machine via the build hook. */
if (worker.getNrLocalBuilds() >= std::max(1U, (unsigned int) settings.maxBuildJobs)) {
/* Make sure that we are allowed to start a substitution. Note that even
if maxSubstitutionJobs == 0, we still allow a substituter to run. This
prevents infinite waiting. */
if (worker.getNrSubstitutions() >= std::max(1U, (unsigned int) settings.maxSubstitutionJobs)) {
worker.waitForBuildSlot(shared_from_this());
return;
co_await Suspend{};
}
maintainRunningSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.runningSubstitutions);
auto maintainRunningSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.runningSubstitutions);
worker.updateProgress();
#ifndef _WIN32
outPipe.create();
#else
outPipe.createAsyncPipe(worker.ioport.get());
#endif
promise = std::promise<void>();
auto promise = std::promise<void>();
thr = std::thread([this]() {
thr = std::thread([this, &promise, &subPath, &sub]() {
try {
ReceiveInterrupts receiveInterrupts;
/* Wake up the worker loop when we're done. */
Finally updateStats([this]() { outPipe.writeSide.close(); });
@ -223,7 +210,7 @@ void PathSubstitutionGoal::tryToRun()
PushActivity pact(act.id);
copyStorePath(*sub, worker.store,
subPath ? *subPath : storePath, repair, sub->isTrusted ? NoCheckSigs : CheckSigs);
subPath, repair, sub->isTrusted ? NoCheckSigs : CheckSigs);
promise.set_value();
} catch (...) {
@ -231,14 +218,16 @@ void PathSubstitutionGoal::tryToRun()
}
});
worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false);
worker.childStarted(shared_from_this(), {
#ifndef _WIN32
outPipe.readSide.get()
#else
&outPipe
#endif
}, true, false);
state = &PathSubstitutionGoal::finished;
}
co_await Suspend{};
void PathSubstitutionGoal::finished()
{
trace("substitute finished");
thr.join();
@ -260,10 +249,7 @@ void PathSubstitutionGoal::finished()
substituterFailed = true;
}
/* Try the next substitute. */
state = &PathSubstitutionGoal::tryNext;
worker.wakeUp(shared_from_this());
return;
co_return Return{};
}
worker.markContentsGood(storePath);
@ -281,23 +267,19 @@ void PathSubstitutionGoal::finished()
worker.doneDownloadSize += fileSize;
}
assert(maintainExpectedNar);
worker.doneNarSize += maintainExpectedNar->delta;
maintainExpectedNar.reset();
worker.updateProgress();
done(ecSuccess, BuildResult::Substituted);
co_return done(ecSuccess, BuildResult::Substituted);
}
void PathSubstitutionGoal::handleChildOutput(int fd, std::string_view data)
void PathSubstitutionGoal::handleEOF(Descriptor fd)
{
}
void PathSubstitutionGoal::handleEOF(int fd)
{
if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this());
worker.wakeUp(shared_from_this());
}

View file

@ -1,59 +1,47 @@
#pragma once
///@file
#include "lock.hh"
#include "worker.hh"
#include "store-api.hh"
#include "goal.hh"
#include "muxable-pipe.hh"
#include <coroutine>
#include <future>
#include <source_location>
namespace nix {
class Worker;
struct PathSubstitutionGoal : public Goal
{
/* The store path that should be realised through a substitute. */
/**
* The store path that should be realised through a substitute.
*/
StorePath storePath;
/* The path the substituter refers to the path as. This will be
different when the stores have different names. */
std::optional<StorePath> subPath;
/* The remaining substituters. */
std::list<ref<Store>> subs;
/* The current substituter. */
std::shared_ptr<Store> sub;
/* Whether a substituter failed. */
bool substituterFailed = false;
/* Path info returned by the substituter's query info operation. */
std::shared_ptr<const ValidPathInfo> info;
/* Pipe for the substituter's standard output. */
Pipe outPipe;
/* The substituter thread. */
std::thread thr;
std::promise<void> promise;
/* Whether to try to repair a valid path. */
/**
* Whether to try to repair a valid path.
*/
RepairFlag repair;
/* Location where we're downloading the substitute. Differs from
storePath when doing a repair. */
Path destPath;
/**
* Pipe for the substituter's standard output.
*/
MuxablePipe outPipe;
/**
* The substituter thread.
*/
std::thread thr;
std::unique_ptr<MaintainCount<uint64_t>> maintainExpectedSubstitutions,
maintainRunningSubstitutions, maintainExpectedNar, maintainExpectedDownload;
typedef void (PathSubstitutionGoal::*GoalState)();
GoalState state;
/* Content address for recomputing store path */
/**
* Content address for recomputing store path
*/
std::optional<ContentAddress> ca;
void done(
Done done(
ExitCode result,
BuildResult::Status status,
std::optional<std::string> errorMsg = {});
@ -62,30 +50,37 @@ public:
PathSubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
~PathSubstitutionGoal();
void timedOut(Error && ex) override { abort(); };
void timedOut(Error && ex) override { unreachable(); };
/**
* We prepend "a$" to the key name to ensure substitution goals
* happen before derivation goals.
*/
std::string key() override
{
/* "a$" ensures substitution goals happen before derivation
goals. */
return "a$" + std::string(storePath.name()) + "$" + worker.store.printStorePath(storePath);
}
void work() override;
/**
* The states.
*/
Co init() override;
Co gotInfo();
Co tryToRun(StorePath subPath, nix::ref<Store> sub, std::shared_ptr<const ValidPathInfo> info, bool & substituterFailed);
Co finished();
/* The states. */
void init();
void tryNext();
void gotInfo();
void referencesValid();
void tryToRun();
void finished();
/**
* Callback used by the worker to write to the log.
*/
void handleChildOutput(Descriptor fd, std::string_view data) override {};
void handleEOF(Descriptor fd) override;
/* Callback used by the worker to write to the log. */
void handleChildOutput(int fd, std::string_view data) override;
void handleEOF(int fd) override;
/* Called by destructor, can't be overridden */
void cleanup() override final;
void cleanup() override;
JobCategory jobCategory() const override {
return JobCategory::Substitution;
};
};
}

View file

@ -1,11 +1,14 @@
#include "local-store.hh"
#include "machines.hh"
#include "worker.hh"
#include "substitution-goal.hh"
#include "drv-output-substitution-goal.hh"
#include "local-derivation-goal.hh"
#include "hook-instance.hh"
#include <poll.h>
#include "derivation-goal.hh"
#ifndef _WIN32 // TODO Enable building on Windows
# include "local-derivation-goal.hh"
# include "hook-instance.hh"
#endif
#include "signals.hh"
namespace nix {
@ -16,8 +19,8 @@ Worker::Worker(Store & store, Store & evalStore)
, store(store)
, evalStore(evalStore)
{
/* Debugging: prevent recursive workers. */
nrLocalBuilds = 0;
nrSubstitutions = 0;
lastWokenUp = steady_time_point::min();
permanentFailure = false;
timedOut = false;
@ -42,7 +45,7 @@ Worker::~Worker()
std::shared_ptr<DerivationGoal> Worker::makeDerivationGoalCommon(
const StorePath & drvPath,
const StringSet & wantedOutputs,
const OutputsSpec & wantedOutputs,
std::function<std::shared_ptr<DerivationGoal>()> mkDrvGoal)
{
std::weak_ptr<DerivationGoal> & goal_weak = derivationGoals[drvPath];
@ -59,23 +62,30 @@ std::shared_ptr<DerivationGoal> Worker::makeDerivationGoalCommon(
std::shared_ptr<DerivationGoal> Worker::makeDerivationGoal(const StorePath & drvPath,
const StringSet & wantedOutputs, BuildMode buildMode)
const OutputsSpec & wantedOutputs, BuildMode buildMode)
{
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() -> std::shared_ptr<DerivationGoal> {
return !dynamic_cast<LocalStore *>(&store)
? std::make_shared</* */DerivationGoal>(drvPath, wantedOutputs, *this, buildMode)
: std::make_shared<LocalDerivationGoal>(drvPath, wantedOutputs, *this, buildMode);
return
#ifndef _WIN32 // TODO Enable building on Windows
dynamic_cast<LocalStore *>(&store)
? std::make_shared<LocalDerivationGoal>(drvPath, wantedOutputs, *this, buildMode)
:
#endif
std::make_shared</* */DerivationGoal>(drvPath, wantedOutputs, *this, buildMode);
});
}
std::shared_ptr<DerivationGoal> Worker::makeBasicDerivationGoal(const StorePath & drvPath,
const BasicDerivation & drv, const StringSet & wantedOutputs, BuildMode buildMode)
const BasicDerivation & drv, const OutputsSpec & wantedOutputs, BuildMode buildMode)
{
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() -> std::shared_ptr<DerivationGoal> {
return !dynamic_cast<LocalStore *>(&store)
? std::make_shared</* */DerivationGoal>(drvPath, drv, wantedOutputs, *this, buildMode)
: std::make_shared<LocalDerivationGoal>(drvPath, drv, wantedOutputs, *this, buildMode);
return
#ifndef _WIN32 // TODO Enable building on Windows
dynamic_cast<LocalStore *>(&store)
? std::make_shared<LocalDerivationGoal>(drvPath, drv, wantedOutputs, *this, buildMode)
:
#endif
std::make_shared</* */DerivationGoal>(drvPath, drv, wantedOutputs, *this, buildMode);
});
}
@ -92,6 +102,7 @@ std::shared_ptr<PathSubstitutionGoal> Worker::makePathSubstitutionGoal(const Sto
return goal;
}
std::shared_ptr<DrvOutputSubstitutionGoal> Worker::makeDrvOutputSubstitutionGoal(const DrvOutput& id, RepairFlag repair, std::optional<ContentAddress> ca)
{
std::weak_ptr<DrvOutputSubstitutionGoal> & goal_weak = drvOutputSubstitutionGoals[id];
@ -104,6 +115,23 @@ std::shared_ptr<DrvOutputSubstitutionGoal> Worker::makeDrvOutputSubstitutionGoal
return goal;
}
GoalPtr Worker::makeGoal(const DerivedPath & req, BuildMode buildMode)
{
return std::visit(overloaded {
[&](const DerivedPath::Built & bfd) -> GoalPtr {
if (auto bop = std::get_if<DerivedPath::Opaque>(&*bfd.drvPath))
return makeDerivationGoal(bop->path, bfd.outputs, buildMode);
else
throw UnimplementedError("Building dynamic derivations in one shot is not yet implemented.");
},
[&](const DerivedPath::Opaque & bo) -> GoalPtr {
return makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair);
},
}, req.raw());
}
template<typename K, typename G>
static void removeGoal(std::shared_ptr<G> goal, std::map<K, std::weak_ptr<G>> & goalMap)
{
@ -123,7 +151,8 @@ void Worker::removeGoal(GoalPtr goal)
{
if (auto drvGoal = std::dynamic_pointer_cast<DerivationGoal>(goal))
nix::removeGoal(drvGoal, derivationGoals);
else if (auto subGoal = std::dynamic_pointer_cast<PathSubstitutionGoal>(goal))
else
if (auto subGoal = std::dynamic_pointer_cast<PathSubstitutionGoal>(goal))
nix::removeGoal(subGoal, substitutionGoals);
else if (auto subGoal = std::dynamic_pointer_cast<DrvOutputSubstitutionGoal>(goal))
nix::removeGoal(subGoal, drvOutputSubstitutionGoals);
@ -161,18 +190,35 @@ unsigned Worker::getNrLocalBuilds()
}
void Worker::childStarted(GoalPtr goal, const std::set<int> & fds,
unsigned Worker::getNrSubstitutions()
{
return nrSubstitutions;
}
void Worker::childStarted(GoalPtr goal, const std::set<MuxablePipePollState::CommChannel> & channels,
bool inBuildSlot, bool respectTimeouts)
{
Child child;
child.goal = goal;
child.goal2 = goal.get();
child.fds = fds;
child.channels = channels;
child.timeStarted = child.lastOutput = steady_time_point::clock::now();
child.inBuildSlot = inBuildSlot;
child.respectTimeouts = respectTimeouts;
children.emplace_back(child);
if (inBuildSlot) nrLocalBuilds++;
if (inBuildSlot) {
switch (goal->jobCategory()) {
case JobCategory::Substitution:
nrSubstitutions++;
break;
case JobCategory::Build:
nrLocalBuilds++;
break;
default:
unreachable();
}
}
}
@ -183,8 +229,18 @@ void Worker::childTerminated(Goal * goal, bool wakeSleepers)
if (i == children.end()) return;
if (i->inBuildSlot) {
assert(nrLocalBuilds > 0);
nrLocalBuilds--;
switch (goal->jobCategory()) {
case JobCategory::Substitution:
assert(nrSubstitutions > 0);
nrSubstitutions--;
break;
case JobCategory::Build:
assert(nrLocalBuilds > 0);
nrLocalBuilds--;
break;
default:
unreachable();
}
}
children.erase(i);
@ -204,8 +260,10 @@ void Worker::childTerminated(Goal * goal, bool wakeSleepers)
void Worker::waitForBuildSlot(GoalPtr goal)
{
debug("wait for build slot");
if (getNrLocalBuilds() < settings.maxBuildJobs)
goal->trace("wait for build slot");
bool isSubstitutionGoal = goal->jobCategory() == JobCategory::Substitution;
if ((!isSubstitutionGoal && getNrLocalBuilds() < settings.maxBuildJobs) ||
(isSubstitutionGoal && getNrSubstitutions() < settings.maxSubstitutionJobs))
wakeUp(goal); /* we can do it right away */
else
addToWeakGoals(wantingToBuild, goal);
@ -233,8 +291,12 @@ void Worker::run(const Goals & _topGoals)
for (auto & i : _topGoals) {
topGoals.insert(i);
if (auto goal = dynamic_cast<DerivationGoal *>(i.get())) {
topPaths.push_back(DerivedPath::Built{goal->drvPath, goal->wantedOutputs});
} else if (auto goal = dynamic_cast<PathSubstitutionGoal *>(i.get())) {
topPaths.push_back(DerivedPath::Built {
.drvPath = makeConstantStorePathRef(goal->drvPath),
.outputs = goal->wantedOutputs,
});
} else
if (auto goal = dynamic_cast<PathSubstitutionGoal *>(i.get())) {
topPaths.push_back(DerivedPath::Opaque{goal->storePath});
}
}
@ -275,21 +337,27 @@ void Worker::run(const Goals & _topGoals)
/* Wait for input. */
if (!children.empty() || !waitingForAWhile.empty())
waitForInput();
else {
if (awake.empty() && 0 == settings.maxBuildJobs)
{
if (getMachines().empty())
throw Error("unable to start any build; either increase '--max-jobs' "
"or enable remote builds."
"\nhttps://nixos.org/manual/nix/stable/advanced-topics/distributed-builds.html");
else
throw Error("unable to start any build; remote machines may not have "
"all required system features."
"\nhttps://nixos.org/manual/nix/stable/advanced-topics/distributed-builds.html");
else if (awake.empty() && 0U == settings.maxBuildJobs) {
if (getMachines().empty())
throw Error(
R"(
Unable to start any build;
either increase '--max-jobs' or enable remote builds.
}
assert(!awake.empty());
}
For more information run 'man nix.conf' and search for '/machines'.
)"
);
else
throw Error(
R"(
Unable to start any build;
remote machines may not have all required system features.
For more information run 'man nix.conf' and search for '/machines'.
)"
);
} else assert(!awake.empty());
}
/* If --keep-going is not set, it's possible that the main goal
@ -346,23 +414,25 @@ void Worker::waitForInput()
if (useTimeout)
vomit("sleeping %d seconds", timeout);
MuxablePipePollState state;
#ifndef _WIN32
/* Use select() to wait for the input side of any logger pipe to
become `available'. Note that `available' (i.e., non-blocking)
includes EOF. */
std::vector<struct pollfd> pollStatus;
std::map<int, size_t> fdToPollStatus;
for (auto & i : children) {
for (auto & j : i.fds) {
pollStatus.push_back((struct pollfd) { .fd = j, .events = POLLIN });
fdToPollStatus[j] = pollStatus.size() - 1;
for (auto & j : i.channels) {
state.pollStatus.push_back((struct pollfd) { .fd = j, .events = POLLIN });
state.fdToPollStatus[j] = state.pollStatus.size() - 1;
}
}
#endif
if (poll(pollStatus.data(), pollStatus.size(),
useTimeout ? timeout * 1000 : -1) == -1) {
if (errno == EINTR) return;
throw SysError("waiting for input");
}
state.poll(
#ifdef _WIN32
ioport.get(),
#endif
useTimeout ? (std::optional { timeout * 1000 }) : std::nullopt);
auto after = steady_time_point::clock::now();
@ -377,32 +447,18 @@ void Worker::waitForInput()
GoalPtr goal = j->goal.lock();
assert(goal);
std::set<int> fds2(j->fds);
std::vector<unsigned char> buffer(4096);
for (auto & k : fds2) {
const auto fdPollStatusId = get(fdToPollStatus, k);
assert(fdPollStatusId);
assert(*fdPollStatusId < pollStatus.size());
if (pollStatus.at(*fdPollStatusId).revents) {
ssize_t rd = ::read(k, buffer.data(), buffer.size());
// FIXME: is there a cleaner way to handle pt close
// than EIO? Is this even standard?
if (rd == 0 || (rd == -1 && errno == EIO)) {
debug("%1%: got EOF", goal->getName());
goal->handleEOF(k);
j->fds.erase(k);
} else if (rd == -1) {
if (errno != EINTR)
throw SysError("%s: read failed", goal->getName());
} else {
printMsg(lvlVomit, "%1%: read %2% bytes",
goal->getName(), rd);
std::string data((char *) buffer.data(), rd);
j->lastOutput = after;
goal->handleChildOutput(k, data);
}
}
}
state.iterate(
j->channels,
[&](Descriptor k, std::string_view data) {
printMsg(lvlVomit, "%1%: read %2% bytes",
goal->getName(), data.size());
j->lastOutput = after;
goal->handleChildOutput(k, data);
},
[&](Descriptor k) {
debug("%1%: got EOF", goal->getName());
goal->handleEOF(k);
});
if (goal->exitCode == Goal::ecBusy &&
0 != settings.maxSilentTime &&
@ -436,16 +492,9 @@ void Worker::waitForInput()
}
unsigned int Worker::exitStatus()
unsigned int Worker::failingExitStatus()
{
/*
* 1100100
* ^^^^
* |||`- timeout
* ||`-- output hash mismatch
* |`--- build failure
* `---- not deterministic
*/
// See API docs in header for explanation
unsigned int mask = 0;
bool buildFailure = permanentFailure || timedOut || hashMismatch;
if (buildFailure)
@ -474,9 +523,11 @@ bool Worker::pathContentsGood(const StorePath & path)
if (!pathExists(store.printStorePath(path)))
res = false;
else {
HashResult current = hashPath(info->narHash.type, store.printStorePath(path));
Hash nullHash(htSHA256);
res = info->narHash == nullHash || info->narHash == current.first;
auto current = hashPath(
{store.getFSAccessor(), CanonPath(store.printStorePath(path))},
FileIngestionMethod::NixArchive, info->narHash.algo).first;
Hash nullHash(HashAlgorithm::SHA256);
res = info->narHash == nullHash || info->narHash == current;
}
pathContentsGoodCache.insert_or_assign(path, res);
if (!res)
@ -491,10 +542,13 @@ void Worker::markContentsGood(const StorePath & path)
}
GoalPtr upcast_goal(std::shared_ptr<PathSubstitutionGoal> subGoal) {
GoalPtr upcast_goal(std::shared_ptr<PathSubstitutionGoal> subGoal)
{
return subGoal;
}
GoalPtr upcast_goal(std::shared_ptr<DrvOutputSubstitutionGoal> subGoal) {
GoalPtr upcast_goal(std::shared_ptr<DrvOutputSubstitutionGoal> subGoal)
{
return subGoal;
}

View file

@ -1,10 +1,11 @@
#pragma once
///@file
#include "types.hh"
#include "lock.hh"
#include "store-api.hh"
#include "goal.hh"
#include "realisation.hh"
#include "muxable-pipe.hh"
#include <future>
#include <thread>
@ -16,39 +17,50 @@ struct DerivationGoal;
struct PathSubstitutionGoal;
class DrvOutputSubstitutionGoal;
/* Workaround for not being able to declare a something like
class PathSubstitutionGoal : public Goal;
even when Goal is a complete type.
This is still a static cast. The purpose of exporting it is to define it in
a place where `PathSubstitutionGoal` is concrete, and use it in a place where it
is opaque. */
/**
* Workaround for not being able to declare a something like
*
* ```c++
* class PathSubstitutionGoal : public Goal;
* ```
* even when Goal is a complete type.
*
* This is still a static cast. The purpose of exporting it is to define it in
* a place where `PathSubstitutionGoal` is concrete, and use it in a place where it
* is opaque.
*/
GoalPtr upcast_goal(std::shared_ptr<PathSubstitutionGoal> subGoal);
GoalPtr upcast_goal(std::shared_ptr<DrvOutputSubstitutionGoal> subGoal);
typedef std::chrono::time_point<std::chrono::steady_clock> steady_time_point;
/* A mapping used to remember for each child process to what goal it
belongs, and file descriptors for receiving log data and output
path creation commands. */
/**
* A mapping used to remember for each child process to what goal it
* belongs, and comm channels for receiving log data and output
* path creation commands.
*/
struct Child
{
WeakGoalPtr goal;
Goal * goal2; // ugly hackery
std::set<int> fds;
std::set<MuxablePipePollState::CommChannel> channels;
bool respectTimeouts;
bool inBuildSlot;
steady_time_point lastOutput; /* time we last got output on stdout/stderr */
/**
* Time we last got output on stdout/stderr
*/
steady_time_point lastOutput;
steady_time_point timeStarted;
};
#ifndef _WIN32 // TODO Enable building on Windows
/* Forward definition. */
struct HookInstance;
#endif
/* The worker class. */
/**
* Coordinates one or more realisations and their interdependencies.
*/
class Worker
{
private:
@ -56,38 +68,63 @@ private:
/* Note: the worker should only have strong pointers to the
top-level goals. */
/* The top-level goals of the worker. */
/**
* The top-level goals of the worker.
*/
Goals topGoals;
/* Goals that are ready to do some work. */
/**
* Goals that are ready to do some work.
*/
WeakGoals awake;
/* Goals waiting for a build slot. */
/**
* Goals waiting for a build slot.
*/
WeakGoals wantingToBuild;
/* Child processes currently running. */
/**
* Child processes currently running.
*/
std::list<Child> children;
/* Number of build slots occupied. This includes local builds and
substitutions but not remote builds via the build hook. */
/**
* Number of build slots occupied. This includes local builds but does not
* include substitutions or remote builds via the build hook.
*/
unsigned int nrLocalBuilds;
/* Maps used to prevent multiple instantiations of a goal for the
same derivation / path. */
/**
* Number of substitution slots occupied.
*/
unsigned int nrSubstitutions;
/**
* Maps used to prevent multiple instantiations of a goal for the
* same derivation / path.
*/
std::map<StorePath, std::weak_ptr<DerivationGoal>> derivationGoals;
std::map<StorePath, std::weak_ptr<PathSubstitutionGoal>> substitutionGoals;
std::map<DrvOutput, std::weak_ptr<DrvOutputSubstitutionGoal>> drvOutputSubstitutionGoals;
/* Goals waiting for busy paths to be unlocked. */
/**
* Goals waiting for busy paths to be unlocked.
*/
WeakGoals waitingForAnyGoal;
/* Goals sleeping for a few seconds (polling a lock). */
/**
* Goals sleeping for a few seconds (polling a lock).
*/
WeakGoals waitingForAWhile;
/* Last time the goals in `waitingForAWhile' where woken up. */
/**
* Last time the goals in `waitingForAWhile` were woken up.
*/
steady_time_point lastWokenUp;
/* Cache for pathContentsGood(). */
/**
* Cache for pathContentsGood().
*/
std::map<StorePath, bool> pathContentsGoodCache;
public:
@ -96,23 +133,37 @@ public:
const Activity actDerivations;
const Activity actSubstitutions;
/* Set if at least one derivation had a BuildError (i.e. permanent
failure). */
/**
* Set if at least one derivation had a BuildError (i.e. permanent
* failure).
*/
bool permanentFailure;
/* Set if at least one derivation had a timeout. */
/**
* Set if at least one derivation had a timeout.
*/
bool timedOut;
/* Set if at least one derivation fails with a hash mismatch. */
/**
* Set if at least one derivation fails with a hash mismatch.
*/
bool hashMismatch;
/* Set if at least one derivation is not deterministic in check mode. */
/**
* Set if at least one derivation is not deterministic in check mode.
*/
bool checkMismatch;
#ifdef _WIN32
AutoCloseFD ioport;
#endif
Store & store;
Store & evalStore;
#ifndef _WIN32 // TODO Enable building on Windows
std::unique_ptr<HookInstance> hook;
#endif
uint64_t expectedBuilds = 0;
uint64_t doneBuilds = 0;
@ -128,78 +179,141 @@ public:
uint64_t expectedNarSize = 0;
uint64_t doneNarSize = 0;
/* Whether to ask the build hook if it can build a derivation. If
it answers with "decline-permanently", we don't try again. */
/**
* Whether to ask the build hook if it can build a derivation. If
* it answers with "decline-permanently", we don't try again.
*/
bool tryBuildHook = true;
Worker(Store & store, Store & evalStore);
~Worker();
/* Make a goal (with caching). */
/**
* Make a goal (with caching).
*/
/* derivation goal */
/**
* @ref DerivationGoal "derivation goal"
*/
private:
std::shared_ptr<DerivationGoal> makeDerivationGoalCommon(
const StorePath & drvPath, const StringSet & wantedOutputs,
const StorePath & drvPath, const OutputsSpec & wantedOutputs,
std::function<std::shared_ptr<DerivationGoal>()> mkDrvGoal);
public:
std::shared_ptr<DerivationGoal> makeDerivationGoal(
const StorePath & drvPath,
const StringSet & wantedOutputs, BuildMode buildMode = bmNormal);
const OutputsSpec & wantedOutputs, BuildMode buildMode = bmNormal);
std::shared_ptr<DerivationGoal> makeBasicDerivationGoal(
const StorePath & drvPath, const BasicDerivation & drv,
const StringSet & wantedOutputs, BuildMode buildMode = bmNormal);
const OutputsSpec & wantedOutputs, BuildMode buildMode = bmNormal);
/* substitution goal */
/**
* @ref SubstitutionGoal "substitution goal"
*/
std::shared_ptr<PathSubstitutionGoal> makePathSubstitutionGoal(const StorePath & storePath, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
std::shared_ptr<DrvOutputSubstitutionGoal> makeDrvOutputSubstitutionGoal(const DrvOutput & id, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
/* Remove a dead goal. */
/**
* Make a goal corresponding to the `DerivedPath`.
*
* It will be a `DerivationGoal` for a `DerivedPath::Built` or
* a `SubstitutionGoal` for a `DerivedPath::Opaque`.
*/
GoalPtr makeGoal(const DerivedPath & req, BuildMode buildMode = bmNormal);
/**
* Remove a dead goal.
*/
void removeGoal(GoalPtr goal);
/* Wake up a goal (i.e., there is something for it to do). */
/**
* Wake up a goal (i.e., there is something for it to do).
*/
void wakeUp(GoalPtr goal);
/* Return the number of local build and substitution processes
currently running (but not remote builds via the build
hook). */
/**
* Return the number of local build processes currently running (but not
* remote builds via the build hook).
*/
unsigned int getNrLocalBuilds();
/* Registers a running child process. `inBuildSlot' means that
the process counts towards the jobs limit. */
void childStarted(GoalPtr goal, const std::set<int> & fds,
/**
* Return the number of substitution processes currently running.
*/
unsigned int getNrSubstitutions();
/**
* Registers a running child process. `inBuildSlot` means that
* the process counts towards the jobs limit.
*/
void childStarted(GoalPtr goal, const std::set<MuxablePipePollState::CommChannel> & channels,
bool inBuildSlot, bool respectTimeouts);
/* Unregisters a running child process. `wakeSleepers' should be
false if there is no sense in waking up goals that are sleeping
because they can't run yet (e.g., there is no free build slot,
or the hook would still say `postpone'). */
/**
* Unregisters a running child process. `wakeSleepers` should be
* false if there is no sense in waking up goals that are sleeping
* because they can't run yet (e.g., there is no free build slot,
* or the hook would still say `postpone`).
*/
void childTerminated(Goal * goal, bool wakeSleepers = true);
/* Put `goal' to sleep until a build slot becomes available (which
might be right away). */
/**
* Put `goal` to sleep until a build slot becomes available (which
* might be right away).
*/
void waitForBuildSlot(GoalPtr goal);
/* Wait for any goal to finish. Pretty indiscriminate way to
wait for some resource that some other goal is holding. */
/**
* Wait for any goal to finish. Pretty indiscriminate way to
* wait for some resource that some other goal is holding.
*/
void waitForAnyGoal(GoalPtr goal);
/* Wait for a few seconds and then retry this goal. Used when
waiting for a lock held by another process. This kind of
polling is inefficient, but POSIX doesn't really provide a way
to wait for multiple locks in the main select() loop. */
/**
* Wait for a few seconds and then retry this goal. Used when
* waiting for a lock held by another process. This kind of
* polling is inefficient, but POSIX doesn't really provide a way
* to wait for multiple locks in the main select() loop.
*/
void waitForAWhile(GoalPtr goal);
/* Loop until the specified top-level goals have finished. */
/**
* Loop until the specified top-level goals have finished.
*/
void run(const Goals & topGoals);
/* Wait for input to become available. */
/**
* Wait for input to become available.
*/
void waitForInput();
unsigned int exitStatus();
/***
* The exit status in case of failure.
*
* In the case of a build failure, returned value follows this
* bitmask:
*
* ```
* 0b1100100
* ^^^^
* |||`- timeout
* ||`-- output hash mismatch
* |`--- build failure
* `---- not deterministic
* ```
*
* In other words, the failure code is at least 100 (0b1100100), but
* might also be greater.
*
* Otherwise (no build failure, but some other sort of failure by
* assumption), this returned value is 1.
*/
unsigned int failingExitStatus();
/* Check whether the given valid path exists and has the right
contents. */
/**
* Check whether the given valid path exists and has the right
* contents.
*/
bool pathContentsGood(const StorePath & path);
void markContentsGood(const StorePath & path);

View file

@ -1,11 +1,18 @@
#pragma once
///@file
#include "derivations.hh"
namespace nix {
// TODO: make pluggable.
void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData);
void builtinUnpackChannel(const BasicDerivation & drv);
void builtinFetchurl(
const BasicDerivation & drv,
const std::map<std::string, Path> & outputs,
const std::string & netrcData);
void builtinUnpackChannel(
const BasicDerivation & drv,
const std::map<std::string, Path> & outputs);
}

View file

@ -1,4 +1,6 @@
#include "buildenv.hh"
#include "derivations.hh"
#include "signals.hh"
#include <sys/stat.h>
#include <sys/types.h>
@ -16,12 +18,12 @@ struct State
/* For each activated package, create symlinks */
static void createLinks(State & state, const Path & srcDir, const Path & dstDir, int priority)
{
DirEntries srcFiles;
std::filesystem::directory_iterator srcFiles;
try {
srcFiles = readDirectory(srcDir);
} catch (SysError & e) {
if (e.errNo == ENOTDIR) {
srcFiles = std::filesystem::directory_iterator{srcDir};
} catch (std::filesystem::filesystem_error & e) {
if (e.code() == std::errc::not_a_directory) {
warn("not including '%s' in the user environment because it's not a directory", srcDir);
return;
}
@ -29,11 +31,13 @@ static void createLinks(State & state, const Path & srcDir, const Path & dstDir,
}
for (const auto & ent : srcFiles) {
if (ent.name[0] == '.')
checkInterrupt();
auto name = ent.path().filename();
if (name.string()[0] == '.')
/* not matched by glob */
continue;
auto srcFile = srcDir + "/" + ent.name;
auto dstFile = dstDir + "/" + ent.name;
auto srcFile = (std::filesystem::path{srcDir} / name).string();
auto dstFile = (std::filesystem::path{dstDir} / name).string();
struct stat srcSt;
try {
@ -63,9 +67,9 @@ static void createLinks(State & state, const Path & srcDir, const Path & dstDir,
continue;
else if (S_ISDIR(srcSt.st_mode)) {
struct stat dstSt;
auto res = lstat(dstFile.c_str(), &dstSt);
if (res == 0) {
auto dstStOpt = maybeLstat(dstFile.c_str());
if (dstStOpt) {
auto & dstSt = *dstStOpt;
if (S_ISDIR(dstSt.st_mode)) {
createLinks(state, srcFile, dstFile, priority);
continue;
@ -75,38 +79,38 @@ static void createLinks(State & state, const Path & srcDir, const Path & dstDir,
throw Error("collision between '%1%' and non-directory '%2%'", srcFile, target);
if (unlink(dstFile.c_str()) == -1)
throw SysError("unlinking '%1%'", dstFile);
if (mkdir(dstFile.c_str(), 0755) == -1)
if (mkdir(dstFile.c_str()
#ifndef _WIN32 // TODO abstract mkdir perms for Windows
, 0755
#endif
) == -1)
throw SysError("creating directory '%1%'", dstFile);
createLinks(state, target, dstFile, state.priorities[dstFile]);
createLinks(state, srcFile, dstFile, priority);
continue;
}
} else if (errno != ENOENT)
throw SysError("getting status of '%1%'", dstFile);
}
}
else {
struct stat dstSt;
auto res = lstat(dstFile.c_str(), &dstSt);
if (res == 0) {
auto dstStOpt = maybeLstat(dstFile.c_str());
if (dstStOpt) {
auto & dstSt = *dstStOpt;
if (S_ISLNK(dstSt.st_mode)) {
auto prevPriority = state.priorities[dstFile];
if (prevPriority == priority)
throw Error(
"files '%1%' and '%2%' have the same priority %3%; "
"use 'nix-env --set-flag priority NUMBER INSTALLED_PKGNAME' "
"or type 'nix profile install --help' if using 'nix profile' to find out how"
"to change the priority of one of the conflicting packages"
" (0 being the highest priority)",
srcFile, readLink(dstFile), priority);
throw BuildEnvFileConflictError(
readLink(dstFile),
srcFile,
priority
);
if (prevPriority < priority)
continue;
if (unlink(dstFile.c_str()) == -1)
throw SysError("unlinking '%1%'", dstFile);
} else if (S_ISDIR(dstSt.st_mode))
throw Error("collision between non-directory '%1%' and directory '%2%'", srcFile, dstFile);
} else if (errno != ENOENT)
throw SysError("getting status of '%1%'", dstFile);
}
}
createSymlink(srcFile, dstFile);
@ -162,7 +166,9 @@ void buildProfile(const Path & out, Packages && pkgs)
debug("created %d symlinks in user environment", state.symlinks);
}
void builtinBuildenv(const BasicDerivation & drv)
void builtinBuildenv(
const BasicDerivation & drv,
const std::map<std::string, Path> & outputs)
{
auto getAttr = [&](const std::string & name) {
auto i = drv.env.find(name);
@ -170,21 +176,25 @@ void builtinBuildenv(const BasicDerivation & drv)
return i->second;
};
Path out = getAttr("out");
auto out = outputs.at("out");
createDirs(out);
/* Convert the stuff we get from the environment back into a
* coherent data type. */
Packages pkgs;
auto derivations = tokenizeString<Strings>(getAttr("derivations"));
while (!derivations.empty()) {
/* !!! We're trusting the caller to structure derivations env var correctly */
auto active = derivations.front(); derivations.pop_front();
auto priority = stoi(derivations.front()); derivations.pop_front();
auto outputs = stoi(derivations.front()); derivations.pop_front();
for (auto n = 0; n < outputs; n++) {
auto path = derivations.front(); derivations.pop_front();
pkgs.emplace_back(path, active != "false", priority);
{
auto derivations = tokenizeString<Strings>(getAttr("derivations"));
auto itemIt = derivations.begin();
while (itemIt != derivations.end()) {
/* !!! We're trusting the caller to structure derivations env var correctly */
const bool active = "false" != *itemIt++;
const int priority = stoi(*itemIt++);
const size_t outputs = stoul(*itemIt++);
for (size_t n {0}; n < outputs; n++) {
pkgs.emplace_back(std::move(*itemIt++), active, priority);
}
}
}

View file

@ -1,10 +1,13 @@
#pragma once
///@file
#include "derivations.hh"
#include "store-api.hh"
namespace nix {
/**
* Think of this as a "store level package attrset", but stripped down to no more than the needs of buildenv.
*/
struct Package {
Path path;
bool active;
@ -12,10 +15,38 @@ struct Package {
Package(const Path & path, bool active, int priority) : path{path}, active{active}, priority{priority} {}
};
class BuildEnvFileConflictError : public Error
{
public:
const Path fileA;
const Path fileB;
int priority;
BuildEnvFileConflictError(
const Path fileA,
const Path fileB,
int priority
)
: Error(
"Unable to build profile. There is a conflict for the following files:\n"
"\n"
" %1%\n"
" %2%",
fileA,
fileB
)
, fileA(fileA)
, fileB(fileB)
, priority(priority)
{}
};
typedef std::vector<Package> Packages;
void buildProfile(const Path & out, Packages && pkgs);
void builtinBuildenv(const BasicDerivation & drv);
void builtinBuildenv(
const BasicDerivation & drv,
const std::map<std::string, Path> & outputs);
}

View file

@ -6,7 +6,10 @@
namespace nix {
void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
void builtinFetchurl(
const BasicDerivation & drv,
const std::map<std::string, Path> & outputs,
const std::string & netrcData)
{
/* Make the host's netrc data available. Too bad curl requires
this to be stored in a file. It would be nice if we could just
@ -16,14 +19,15 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
writeFile(settings.netrcFile, netrcData, 0600);
}
auto getAttr = [&](const std::string & name) {
auto i = drv.env.find(name);
if (i == drv.env.end()) throw Error("attribute '%s' missing", name);
return i->second;
};
auto out = get(drv.outputs, "out");
if (!out)
throw Error("'builtin:fetchurl' requires an 'out' output");
Path storePath = getAttr("out");
auto mainUrl = getAttr("url");
if (!(drv.type().isFixed() || drv.type().isImpure()))
throw Error("'builtin:fetchurl' must be a fixed-output or impure derivation");
auto storePath = outputs.at("out");
auto mainUrl = drv.env.at("url");
bool unpack = getOr(drv.env, "unpack", "") == "1";
/* Note: have to use a fresh fileTransfer here because we're in
@ -59,13 +63,12 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
};
/* Try the hashed mirrors first. */
if (getAttr("outputHashMode") == "flat")
auto dof = std::get_if<DerivationOutput::CAFixed>(&out->raw);
if (dof && dof->ca.method.getFileIngestionMethod() == FileIngestionMethod::Flat)
for (auto hashedMirror : settings.hashedMirrors.get())
try {
if (!hasSuffix(hashedMirror, "/")) hashedMirror += '/';
std::optional<HashType> ht = parseHashTypeOpt(getAttr("outputHashAlgo"));
Hash h = newHashAllowEmpty(getAttr("outputHash"), ht);
fetch(hashedMirror + printHashType(h.type) + "/" + h.to_string(Base16, false));
fetch(hashedMirror + printHashAlgo(dof->ca.hash.algo) + "/" + dof->ca.hash.to_string(HashFormat::Base16, false));
return;
} catch (Error & e) {
debug(e.what());

View file

@ -3,7 +3,9 @@
namespace nix {
void builtinUnpackChannel(const BasicDerivation & drv)
void builtinUnpackChannel(
const BasicDerivation & drv,
const std::map<std::string, Path> & outputs)
{
auto getAttr = [&](const std::string & name) {
auto i = drv.env.find(name);
@ -11,7 +13,7 @@ void builtinUnpackChannel(const BasicDerivation & drv)
return i->second;
};
Path out = getAttr("out");
auto out = outputs.at("out");
auto channelName = getAttr("channelName");
auto src = getAttr("src");
@ -19,10 +21,13 @@ void builtinUnpackChannel(const BasicDerivation & drv)
unpackTarfile(src, out);
auto entries = readDirectory(out);
if (entries.size() != 1)
auto entries = std::filesystem::directory_iterator{out};
auto fileName = entries->path().string();
auto fileCount = std::distance(std::filesystem::begin(entries), std::filesystem::end(entries));
if (fileCount != 1)
throw Error("channel tarball '%s' contains more than one file", src);
renameFile((out + "/" + entries[0].name), (out + "/" + channelName));
std::filesystem::rename(fileName, (out + "/" + channelName));
}
}

View file

@ -0,0 +1,41 @@
#pragma once
/**
* @file
*
* Template implementations (as opposed to mere declarations).
*
* This file is an exmample of the "impl.hh" pattern. See the
* contributing guide.
*/
#include "common-protocol.hh"
#include "length-prefixed-protocol-helper.hh"
namespace nix {
/* protocol-agnostic templates */
#define COMMON_USE_LENGTH_PREFIX_SERIALISER(TEMPLATE, T) \
TEMPLATE T CommonProto::Serialise< T >::read(const StoreDirConfig & store, CommonProto::ReadConn conn) \
{ \
return LengthPrefixedProtoHelper<CommonProto, T >::read(store, conn); \
} \
TEMPLATE void CommonProto::Serialise< T >::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const T & t) \
{ \
LengthPrefixedProtoHelper<CommonProto, T >::write(store, conn, t); \
}
COMMON_USE_LENGTH_PREFIX_SERIALISER(template<typename T>, std::vector<T>)
COMMON_USE_LENGTH_PREFIX_SERIALISER(template<typename T>, std::set<T>)
COMMON_USE_LENGTH_PREFIX_SERIALISER(template<typename... Ts>, std::tuple<Ts...>)
#define COMMA_ ,
COMMON_USE_LENGTH_PREFIX_SERIALISER(
template<typename K COMMA_ typename V>,
std::map<K COMMA_ V>)
#undef COMMA_
/* protocol-specific templates */
}

View file

@ -0,0 +1,97 @@
#include "serialise.hh"
#include "path-with-outputs.hh"
#include "store-api.hh"
#include "build-result.hh"
#include "common-protocol.hh"
#include "common-protocol-impl.hh"
#include "archive.hh"
#include "derivations.hh"
#include <nlohmann/json.hpp>
namespace nix {
/* protocol-agnostic definitions */
std::string CommonProto::Serialise<std::string>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
{
return readString(conn.from);
}
void CommonProto::Serialise<std::string>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const std::string & str)
{
conn.to << str;
}
StorePath CommonProto::Serialise<StorePath>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
{
return store.parseStorePath(readString(conn.from));
}
void CommonProto::Serialise<StorePath>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const StorePath & storePath)
{
conn.to << store.printStorePath(storePath);
}
ContentAddress CommonProto::Serialise<ContentAddress>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
{
return ContentAddress::parse(readString(conn.from));
}
void CommonProto::Serialise<ContentAddress>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const ContentAddress & ca)
{
conn.to << renderContentAddress(ca);
}
Realisation CommonProto::Serialise<Realisation>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
{
std::string rawInput = readString(conn.from);
return Realisation::fromJSON(
nlohmann::json::parse(rawInput),
"remote-protocol"
);
}
void CommonProto::Serialise<Realisation>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const Realisation & realisation)
{
conn.to << realisation.toJSON().dump();
}
DrvOutput CommonProto::Serialise<DrvOutput>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
{
return DrvOutput::parse(readString(conn.from));
}
void CommonProto::Serialise<DrvOutput>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const DrvOutput & drvOutput)
{
conn.to << drvOutput.to_string();
}
std::optional<StorePath> CommonProto::Serialise<std::optional<StorePath>>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
{
auto s = readString(conn.from);
return s == "" ? std::optional<StorePath> {} : store.parseStorePath(s);
}
void CommonProto::Serialise<std::optional<StorePath>>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const std::optional<StorePath> & storePathOpt)
{
conn.to << (storePathOpt ? store.printStorePath(*storePathOpt) : "");
}
std::optional<ContentAddress> CommonProto::Serialise<std::optional<ContentAddress>>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
{
return ContentAddress::parseOpt(readString(conn.from));
}
void CommonProto::Serialise<std::optional<ContentAddress>>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const std::optional<ContentAddress> & caOpt)
{
conn.to << (caOpt ? renderContentAddress(*caOpt) : "");
}
}

View file

@ -0,0 +1,106 @@
#pragma once
///@file
#include "serialise.hh"
namespace nix {
struct StoreDirConfig;
struct Source;
// items being serialized
class StorePath;
struct ContentAddress;
struct DrvOutput;
struct Realisation;
/**
* Shared serializers between the worker protocol, serve protocol, and a
* few others.
*
* This `struct` is basically just a `namespace`; We use a type rather
* than a namespace just so we can use it as a template argument.
*/
struct CommonProto
{
/**
* A unidirectional read connection, to be used by the read half of the
* canonical serializers below.
*/
struct ReadConn {
Source & from;
};
/**
* A unidirectional write connection, to be used by the write half of the
* canonical serializers below.
*/
struct WriteConn {
Sink & to;
};
template<typename T>
struct Serialise;
/**
* Wrapper function around `CommonProto::Serialise<T>::write` that allows us to
* infer the type instead of having to write it down explicitly.
*/
template<typename T>
static void write(const StoreDirConfig & store, WriteConn conn, const T & t)
{
CommonProto::Serialise<T>::write(store, conn, t);
}
};
#define DECLARE_COMMON_SERIALISER(T) \
struct CommonProto::Serialise< T > \
{ \
static T read(const StoreDirConfig & store, CommonProto::ReadConn conn); \
static void write(const StoreDirConfig & store, CommonProto::WriteConn conn, const T & str); \
}
template<>
DECLARE_COMMON_SERIALISER(std::string);
template<>
DECLARE_COMMON_SERIALISER(StorePath);
template<>
DECLARE_COMMON_SERIALISER(ContentAddress);
template<>
DECLARE_COMMON_SERIALISER(DrvOutput);
template<>
DECLARE_COMMON_SERIALISER(Realisation);
template<typename T>
DECLARE_COMMON_SERIALISER(std::vector<T>);
template<typename T>
DECLARE_COMMON_SERIALISER(std::set<T>);
template<typename... Ts>
DECLARE_COMMON_SERIALISER(std::tuple<Ts...>);
#define COMMA_ ,
template<typename K, typename V>
DECLARE_COMMON_SERIALISER(std::map<K COMMA_ V>);
#undef COMMA_
/**
* These use the empty string for the null case, relying on the fact
* that the underlying types never serialize to the empty string.
*
* We do this instead of a generic std::optional<T> instance because
* ordinal tags (0 or 1, here) are a bit of a compatability hazard. For
* the same reason, we don't have a std::variant<T..> instances (ordinal
* tags 0...n).
*
* We could the generic instances and then these as specializations for
* compatability, but that's proven a bit finnicky, and also makes the
* worker protocol harder to implement in other languages where such
* specializations may not be allowed.
*/
template<>
DECLARE_COMMON_SERIALISER(std::optional<StorePath>);
template<>
DECLARE_COMMON_SERIALISER(std::optional<ContentAddress>);
}

View file

@ -0,0 +1,43 @@
#include <regex>
#include "common-ssh-store-config.hh"
#include "ssh.hh"
namespace nix {
static std::string extractConnStr(std::string_view scheme, std::string_view _connStr)
{
if (_connStr.empty())
throw UsageError("`%s` store requires a valid SSH host as the authority part in Store URI", scheme);
std::string connStr{_connStr};
std::smatch result;
static std::regex v6AddrRegex("^((.*)@)?\\[(.*)\\]$");
if (std::regex_match(connStr, result, v6AddrRegex)) {
connStr = result[1].matched ? result.str(1) + result.str(3) : result.str(3);
}
return connStr;
}
CommonSSHStoreConfig::CommonSSHStoreConfig(std::string_view scheme, std::string_view host, const Params & params)
: StoreConfig(params)
, host(extractConnStr(scheme, host))
{
}
SSHMaster CommonSSHStoreConfig::createSSHMaster(bool useMaster, Descriptor logFD)
{
return {
host,
sshKey.get(),
sshPublicHostKey.get(),
useMaster,
compress,
logFD,
};
}
}

View file

@ -0,0 +1,62 @@
#pragma once
///@file
#include "store-api.hh"
namespace nix {
class SSHMaster;
struct CommonSSHStoreConfig : virtual StoreConfig
{
using StoreConfig::StoreConfig;
CommonSSHStoreConfig(std::string_view scheme, std::string_view host, const Params & params);
const Setting<Path> sshKey{this, "", "ssh-key",
"Path to the SSH private key used to authenticate to the remote machine."};
const Setting<std::string> sshPublicHostKey{this, "", "base64-ssh-public-host-key",
"The public host key of the remote machine."};
const Setting<bool> compress{this, false, "compress",
"Whether to enable SSH compression."};
const Setting<std::string> remoteStore{this, "", "remote-store",
R"(
[Store URL](@docroot@/store/types/index.md#store-url-format)
to be used on the remote machine. The default is `auto`
(i.e. use the Nix daemon or `/nix/store` directly).
)"};
/**
* The `parseURL` function supports both IPv6 URIs as defined in
* RFC2732, but also pure addresses. The latter one is needed here to
* connect to a remote store via SSH (it's possible to do e.g. `ssh root@::1`).
*
* When initialized, the following adjustments are made:
*
* - If the URL looks like `root@[::1]` (which is allowed by the URL parser and probably
* needed to pass further flags), it
* will be transformed into `root@::1` for SSH (same for `[::1]` -> `::1`).
*
* - If the URL looks like `root@::1` it will be left as-is.
*
* - In any other case, the string will be left as-is.
*
* Will throw an error if `connStr` is empty too.
*/
std::string host;
/**
* Small wrapper around `SSHMaster::SSHMaster` that gets most
* arguments from this configuration.
*
* See that constructor for details on the remaining two arguments.
*/
SSHMaster createSSHMaster(
bool useMaster,
Descriptor logFD = INVALID_DESCRIPTOR);
};
}

View file

@ -4,58 +4,146 @@
namespace nix {
std::string FixedOutputHash::printMethodAlgo() const
{
return makeFileIngestionPrefix(method) + printHashType(hash.type);
}
std::string makeFileIngestionPrefix(const FileIngestionMethod m)
std::string_view makeFileIngestionPrefix(FileIngestionMethod m)
{
switch (m) {
case FileIngestionMethod::Flat:
// Not prefixed for back compat
return "";
case FileIngestionMethod::Recursive:
case FileIngestionMethod::NixArchive:
return "r:";
case FileIngestionMethod::Git:
experimentalFeatureSettings.require(Xp::GitHashing);
return "git:";
default:
throw Error("impossible, caught both cases");
assert(false);
}
}
std::string makeFixedOutputCA(FileIngestionMethod method, const Hash & hash)
std::string_view ContentAddressMethod::render() const
{
return "fixed:"
+ makeFileIngestionPrefix(method)
+ hash.to_string(Base32, true);
switch (raw) {
case ContentAddressMethod::Raw::Text:
return "text";
case ContentAddressMethod::Raw::Flat:
case ContentAddressMethod::Raw::NixArchive:
case ContentAddressMethod::Raw::Git:
return renderFileIngestionMethod(getFileIngestionMethod());
default:
assert(false);
}
}
std::string renderContentAddress(ContentAddress ca)
{
return std::visit(overloaded {
[](TextHash & th) {
return "text:" + th.hash.to_string(Base32, true);
},
[](FixedOutputHash & fsh) {
return makeFixedOutputCA(fsh.method, fsh.hash);
}
}, ca);
}
std::string renderContentAddressMethod(ContentAddressMethod cam)
{
return std::visit(overloaded {
[](TextHashMethod & th) {
return std::string{"text:"} + printHashType(htSHA256);
},
[](FixedOutputHashMethod & fshm) {
return "fixed:" + makeFileIngestionPrefix(fshm.fileIngestionMethod) + printHashType(fshm.hashType);
}
}, cam);
}
/*
Parses content address strings up to the hash.
/**
* **Not surjective**
*
* This is not exposed because `FileIngestionMethod::Flat` maps to
* `ContentAddressMethod::Raw::Flat` and
* `ContentAddressMethod::Raw::Text` alike. We can thus only safely use
* this when the latter is ruled out (e.g. because it is already
* handled).
*/
static ContentAddressMethod parseContentAddressMethodPrefix(std::string_view & rest)
static ContentAddressMethod fileIngestionMethodToContentAddressMethod(FileIngestionMethod m)
{
switch (m) {
case FileIngestionMethod::Flat:
return ContentAddressMethod::Raw::Flat;
case FileIngestionMethod::NixArchive:
return ContentAddressMethod::Raw::NixArchive;
case FileIngestionMethod::Git:
return ContentAddressMethod::Raw::Git;
default:
assert(false);
}
}
ContentAddressMethod ContentAddressMethod::parse(std::string_view m)
{
if (m == "text")
return ContentAddressMethod::Raw::Text;
else
return fileIngestionMethodToContentAddressMethod(
parseFileIngestionMethod(m));
}
std::string_view ContentAddressMethod::renderPrefix() const
{
switch (raw) {
case ContentAddressMethod::Raw::Text:
return "text:";
case ContentAddressMethod::Raw::Flat:
case ContentAddressMethod::Raw::NixArchive:
case ContentAddressMethod::Raw::Git:
return makeFileIngestionPrefix(getFileIngestionMethod());
default:
assert(false);
}
}
ContentAddressMethod ContentAddressMethod::parsePrefix(std::string_view & m)
{
if (splitPrefix(m, "r:")) {
return ContentAddressMethod::Raw::NixArchive;
}
else if (splitPrefix(m, "git:")) {
experimentalFeatureSettings.require(Xp::GitHashing);
return ContentAddressMethod::Raw::Git;
}
else if (splitPrefix(m, "text:")) {
return ContentAddressMethod::Raw::Text;
}
return ContentAddressMethod::Raw::Flat;
}
/**
* This is slightly more mindful of forward compat in that it uses `fixed:`
* rather than just doing a raw empty prefix or `r:`, which doesn't "save room"
* for future changes very well.
*/
static std::string renderPrefixModern(const ContentAddressMethod & ca)
{
switch (ca.raw) {
case ContentAddressMethod::Raw::Text:
return "text:";
case ContentAddressMethod::Raw::Flat:
case ContentAddressMethod::Raw::NixArchive:
case ContentAddressMethod::Raw::Git:
return "fixed:" + makeFileIngestionPrefix(ca.getFileIngestionMethod());
default:
assert(false);
}
}
std::string ContentAddressMethod::renderWithAlgo(HashAlgorithm ha) const
{
return renderPrefixModern(*this) + printHashAlgo(ha);
}
FileIngestionMethod ContentAddressMethod::getFileIngestionMethod() const
{
switch (raw) {
case ContentAddressMethod::Raw::Flat:
return FileIngestionMethod::Flat;
case ContentAddressMethod::Raw::NixArchive:
return FileIngestionMethod::NixArchive;
case ContentAddressMethod::Raw::Git:
return FileIngestionMethod::Git;
case ContentAddressMethod::Raw::Text:
return FileIngestionMethod::Flat;
default:
assert(false);
}
}
std::string ContentAddress::render() const
{
return renderPrefixModern(method) + this->hash.to_string(HashFormat::Nix32, true);
}
/**
* Parses content address strings up to the hash.
*/
static std::pair<ContentAddressMethod, HashAlgorithm> parseContentAddressMethodPrefix(std::string_view & rest)
{
std::string_view wholeInput { rest };
@ -67,58 +155,53 @@ static ContentAddressMethod parseContentAddressMethodPrefix(std::string_view & r
prefix = *optPrefix;
}
auto parseHashType_ = [&](){
auto hashTypeRaw = splitPrefixTo(rest, ':');
if (!hashTypeRaw)
auto parseHashAlgorithm_ = [&](){
auto hashAlgoRaw = splitPrefixTo(rest, ':');
if (!hashAlgoRaw)
throw UsageError("content address hash must be in form '<algo>:<hash>', but found: %s", wholeInput);
HashType hashType = parseHashType(*hashTypeRaw);
return std::move(hashType);
HashAlgorithm hashAlgo = parseHashAlgo(*hashAlgoRaw);
return hashAlgo;
};
// Switch on prefix
if (prefix == "text") {
// No parsing of the ingestion method, "text" only support flat.
HashType hashType = parseHashType_();
if (hashType != htSHA256)
throw Error("text content address hash should use %s, but instead uses %s",
printHashType(htSHA256), printHashType(hashType));
return TextHashMethod {};
HashAlgorithm hashAlgo = parseHashAlgorithm_();
return {
ContentAddressMethod::Raw::Text,
std::move(hashAlgo),
};
} else if (prefix == "fixed") {
// Parse method
auto method = FileIngestionMethod::Flat;
auto method = ContentAddressMethod::Raw::Flat;
if (splitPrefix(rest, "r:"))
method = FileIngestionMethod::Recursive;
HashType hashType = parseHashType_();
return FixedOutputHashMethod {
.fileIngestionMethod = method,
.hashType = std::move(hashType),
method = ContentAddressMethod::Raw::NixArchive;
else if (splitPrefix(rest, "git:")) {
experimentalFeatureSettings.require(Xp::GitHashing);
method = ContentAddressMethod::Raw::Git;
}
HashAlgorithm hashAlgo = parseHashAlgorithm_();
return {
std::move(method),
std::move(hashAlgo),
};
} else
throw UsageError("content address prefix '%s' is unrecognized. Recogonized prefixes are 'text' or 'fixed'", prefix);
}
ContentAddress parseContentAddress(std::string_view rawCa) {
ContentAddress ContentAddress::parse(std::string_view rawCa)
{
auto rest = rawCa;
ContentAddressMethod caMethod = parseContentAddressMethodPrefix(rest);
auto [caMethod, hashAlgo] = parseContentAddressMethodPrefix(rest);
return std::visit(
overloaded {
[&](TextHashMethod & thm) {
return ContentAddress(TextHash {
.hash = Hash::parseNonSRIUnprefixed(rest, htSHA256)
});
},
[&](FixedOutputHashMethod & fohMethod) {
return ContentAddress(FixedOutputHash {
.method = fohMethod.fileIngestionMethod,
.hash = Hash::parseNonSRIUnprefixed(rest, std::move(fohMethod.hashType)),
});
},
}, caMethod);
return ContentAddress {
.method = std::move(caMethod),
.hash = Hash::parseNonSRIUnprefixed(rest, hashAlgo),
};
}
ContentAddressMethod parseContentAddressMethod(std::string_view caMethod)
std::pair<ContentAddressMethod, HashAlgorithm> ContentAddressMethod::parseWithAlgo(std::string_view caMethod)
{
std::string asPrefix = std::string{caMethod} + ":";
// parseContentAddressMethodPrefix takes its argument by reference
@ -126,26 +209,102 @@ ContentAddressMethod parseContentAddressMethod(std::string_view caMethod)
return parseContentAddressMethodPrefix(asPrefixView);
}
std::optional<ContentAddress> parseContentAddressOpt(std::string_view rawCaOpt)
std::optional<ContentAddress> ContentAddress::parseOpt(std::string_view rawCaOpt)
{
return rawCaOpt == "" ? std::optional<ContentAddress>() : parseContentAddress(rawCaOpt);
return rawCaOpt == ""
? std::nullopt
: std::optional { ContentAddress::parse(rawCaOpt) };
};
std::string renderContentAddress(std::optional<ContentAddress> ca)
{
return ca ? renderContentAddress(*ca) : "";
return ca ? ca->render() : "";
}
Hash getContentAddressHash(const ContentAddress & ca)
std::string ContentAddress::printMethodAlgo() const
{
return std::string { method.renderPrefix() }
+ printHashAlgo(hash.algo);
}
bool StoreReferences::empty() const
{
return !self && others.empty();
}
size_t StoreReferences::size() const
{
return (self ? 1 : 0) + others.size();
}
ContentAddressWithReferences ContentAddressWithReferences::withoutRefs(const ContentAddress & ca) noexcept
{
switch (ca.method.raw) {
case ContentAddressMethod::Raw::Text:
return TextInfo {
.hash = ca.hash,
.references = {},
};
case ContentAddressMethod::Raw::Flat:
case ContentAddressMethod::Raw::NixArchive:
case ContentAddressMethod::Raw::Git:
return FixedOutputInfo {
.method = ca.method.getFileIngestionMethod(),
.hash = ca.hash,
.references = {},
};
default:
assert(false);
}
}
ContentAddressWithReferences ContentAddressWithReferences::fromParts(
ContentAddressMethod method, Hash hash, StoreReferences refs)
{
switch (method.raw) {
case ContentAddressMethod::Raw::Text:
if (refs.self)
throw Error("self-reference not allowed with text hashing");
return TextInfo {
.hash = std::move(hash),
.references = std::move(refs.others),
};
case ContentAddressMethod::Raw::Flat:
case ContentAddressMethod::Raw::NixArchive:
case ContentAddressMethod::Raw::Git:
return FixedOutputInfo {
.method = method.getFileIngestionMethod(),
.hash = std::move(hash),
.references = std::move(refs),
};
default:
assert(false);
}
}
ContentAddressMethod ContentAddressWithReferences::getMethod() const
{
return std::visit(overloaded {
[](const TextHash & th) {
[](const TextInfo & th) -> ContentAddressMethod {
return ContentAddressMethod::Raw::Text;
},
[](const FixedOutputInfo & fsh) -> ContentAddressMethod {
return fileIngestionMethodToContentAddressMethod(
fsh.method);
},
}, raw);
}
Hash ContentAddressWithReferences::getHash() const
{
return std::visit(overloaded {
[](const TextInfo & th) {
return th.hash;
},
[](const FixedOutputHash & fsh) {
[](const FixedOutputInfo & fsh) {
return fsh.hash;
}
}, ca);
},
}, raw);
}
}

View file

@ -1,77 +1,316 @@
#pragma once
///@file
#include <variant>
#include "hash.hh"
#include "path.hh"
#include "file-content-address.hh"
#include "variant-wrapper.hh"
namespace nix {
enum struct FileIngestionMethod : uint8_t {
Flat = false,
Recursive = true
/*
* Content addressing method
*/
/**
* Compute the prefix to the hash algorithm which indicates how the
* files were ingested.
*/
std::string_view makeFileIngestionPrefix(FileIngestionMethod m);
/**
* An enumeration of all the ways we can content-address store objects.
*
* Just the type of a content address. Combine with the hash itself, and
* we have a `ContentAddress` as defined below. Combine that, in turn,
* with info on references, and we have `ContentAddressWithReferences`,
* as defined further below.
*/
struct ContentAddressMethod
{
enum struct Raw {
/**
* Calculate a store path using the `FileIngestionMethod::Flat`
* hash of the file system objects, and references.
*
* See `store-object/content-address.md#method-flat` in the
* manual.
*/
Flat,
/**
* Calculate a store path using the
* `FileIngestionMethod::NixArchive` hash of the file system
* objects, and references.
*
* See `store-object/content-address.md#method-flat` in the
* manual.
*/
NixArchive,
/**
* Calculate a store path using the `FileIngestionMethod::Git`
* hash of the file system objects, and references.
*
* Part of `ExperimentalFeature::GitHashing`.
*
* See `store-object/content-address.md#method-git` in the
* manual.
*/
Git,
/**
* Calculate a store path using the `FileIngestionMethod::Flat`
* hash of the file system objects, and references, but in a
* different way than `ContentAddressMethod::Raw::Flat`.
*
* See `store-object/content-address.md#method-text` in the
* manual.
*/
Text,
};
Raw raw;
bool operator ==(const ContentAddressMethod &) const = default;
auto operator <=>(const ContentAddressMethod &) const = default;
MAKE_WRAPPER_CONSTRUCTOR(ContentAddressMethod);
/**
* Parse a content addressing method (name).
*
* The inverse of `render`.
*/
static ContentAddressMethod parse(std::string_view rawCaMethod);
/**
* Render a content addressing method (name).
*
* The inverse of `parse`.
*/
std::string_view render() const;
/**
* Parse the prefix tag which indicates how the files
* were ingested, with the fixed output case not prefixed for back
* compat.
*
* @param [in] m A string that should begin with the prefix.
* @param [out] m The remainder of the string after the prefix.
*/
static ContentAddressMethod parsePrefix(std::string_view & m);
/**
* Render the prefix tag which indicates how the files wre ingested.
*
* The rough inverse of `parsePrefix()`.
*/
std::string_view renderPrefix() const;
/**
* Parse a content addressing method and hash algorithm.
*/
static std::pair<ContentAddressMethod, HashAlgorithm> parseWithAlgo(std::string_view rawCaMethod);
/**
* Render a content addressing method and hash algorithm in a
* nicer way, prefixing both cases.
*
* The rough inverse of `parse()`.
*/
std::string renderWithAlgo(HashAlgorithm ha) const;
/**
* Get the underlying way to content-address file system objects.
*
* Different ways of hashing store objects may use the same method
* for hashing file systeme objects.
*/
FileIngestionMethod getFileIngestionMethod() const;
};
struct TextHash {
Hash hash;
};
/// Pair of a hash, and how the file system was ingested
struct FixedOutputHash {
FileIngestionMethod method;
/*
* Mini content address
*/
/**
* We've accumulated several types of content-addressed paths over the
* years; fixed-output derivations support multiple hash algorithms and
* serialisation methods (flat file vs NAR). Thus, ca has one of the
* following forms:
*
* - `TextIngestionMethod`:
* text:sha256:<sha256 hash of file contents>
*
* - `FixedIngestionMethod`:
* fixed:<r?>:<hash algorithm>:<hash of file contents>
*/
struct ContentAddress
{
/**
* How the file system objects are serialized
*/
ContentAddressMethod method;
/**
* Hash of that serialization
*/
Hash hash;
bool operator ==(const ContentAddress &) const = default;
auto operator <=>(const ContentAddress &) const = default;
/**
* Compute the content-addressability assertion
* (`ValidPathInfo::ca`) for paths created by
* `Store::makeFixedOutputPath()` / `Store::addToStore()`.
*/
std::string render() const;
static ContentAddress parse(std::string_view rawCa);
static std::optional<ContentAddress> parseOpt(std::string_view rawCaOpt);
std::string printMethodAlgo() const;
};
/*
We've accumulated several types of content-addressed paths over the years;
fixed-output derivations support multiple hash algorithms and serialisation
methods (flat file vs NAR). Thus, ca has one of the following forms:
* text:sha256:<sha256 hash of file contents>: For paths
computed by makeTextPath() / addTextToStore().
* fixed:<r?>:<ht>:<h>: For paths computed by
makeFixedOutputPath() / addToStore().
*/
typedef std::variant<
TextHash, // for paths computed by makeTextPath() / addTextToStore
FixedOutputHash // for path computed by makeFixedOutputPath
> ContentAddress;
/* Compute the prefix to the hash algorithm which indicates how the files were
ingested. */
std::string makeFileIngestionPrefix(const FileIngestionMethod m);
/* Compute the content-addressability assertion (ValidPathInfo::ca)
for paths created by makeFixedOutputPath() / addToStore(). */
std::string makeFixedOutputCA(FileIngestionMethod method, const Hash & hash);
std::string renderContentAddress(ContentAddress ca);
/**
* Render the `ContentAddress` if it exists to a string, return empty
* string otherwise.
*/
std::string renderContentAddress(std::optional<ContentAddress> ca);
ContentAddress parseContentAddress(std::string_view rawCa);
std::optional<ContentAddress> parseContentAddressOpt(std::string_view rawCaOpt);
Hash getContentAddressHash(const ContentAddress & ca);
/*
We only have one way to hash text with references, so this is single-value
type is only useful in std::variant.
*/
struct TextHashMethod { };
struct FixedOutputHashMethod {
FileIngestionMethod fileIngestionMethod;
HashType hashType;
* Full content address
*
* See the schema for store paths in store-api.cc
*/
/**
* A set of references to other store objects.
*
* References to other store objects are tracked with store paths, self
* references however are tracked with a boolean.
*/
struct StoreReferences
{
/**
* References to other store objects
*/
StorePathSet others;
/**
* Reference to this store object
*/
bool self = false;
/**
* @return true iff no references, i.e. others is empty and self is
* false.
*/
bool empty() const;
/**
* Returns the numbers of references, i.e. the size of others + 1
* iff self is true.
*/
size_t size() const;
bool operator ==(const StoreReferences &) const = default;
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
//auto operator <=>(const StoreReferences &) const = default;
};
typedef std::variant<
TextHashMethod,
FixedOutputHashMethod
> ContentAddressMethod;
// This matches the additional info that we need for makeTextPath
struct TextInfo
{
/**
* Hash of the contents of the text/file.
*/
Hash hash;
ContentAddressMethod parseContentAddressMethod(std::string_view rawCaMethod);
/**
* References to other store objects only; self references
* disallowed
*/
StorePathSet references;
std::string renderContentAddressMethod(ContentAddressMethod caMethod);
bool operator ==(const TextInfo &) const = default;
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
//auto operator <=>(const TextInfo &) const = default;
};
struct FixedOutputInfo
{
/**
* How the file system objects are serialized
*/
FileIngestionMethod method;
/**
* Hash of that serialization
*/
Hash hash;
/**
* References to other store objects or this one.
*/
StoreReferences references;
bool operator ==(const FixedOutputInfo &) const = default;
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
//auto operator <=>(const FixedOutputInfo &) const = default;
};
/**
* Ways of content addressing but not a complete ContentAddress.
*
* A ContentAddress without a Hash.
*/
struct ContentAddressWithReferences
{
typedef std::variant<
TextInfo,
FixedOutputInfo
> Raw;
Raw raw;
bool operator ==(const ContentAddressWithReferences &) const = default;
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
//auto operator <=>(const ContentAddressWithReferences &) const = default;
MAKE_WRAPPER_CONSTRUCTOR(ContentAddressWithReferences);
/**
* Create a `ContentAddressWithReferences` from a mere
* `ContentAddress`, by claiming no references.
*/
static ContentAddressWithReferences withoutRefs(const ContentAddress &) noexcept;
/**
* Create a `ContentAddressWithReferences` from 3 parts:
*
* @param method Way ingesting the file system data.
*
* @param hash Hash of ingested file system data.
*
* @param refs References to other store objects or oneself.
*
* @note note that all combinations are supported. This is a
* *partial function* and exceptions will be thrown for invalid
* combinations.
*/
static ContentAddressWithReferences fromParts(
ContentAddressMethod method, Hash hash, StoreReferences refs);
ContentAddressMethod getMethod() const;
Hash getHash() const;
};
}

View file

@ -1,116 +0,0 @@
#include "crypto.hh"
#include "util.hh"
#include "globals.hh"
#include <sodium.h>
namespace nix {
static std::pair<std::string_view, std::string_view> split(std::string_view s)
{
size_t colon = s.find(':');
if (colon == std::string::npos || colon == 0)
return {"", ""};
return {s.substr(0, colon), s.substr(colon + 1)};
}
Key::Key(std::string_view s)
{
auto ss = split(s);
name = ss.first;
key = ss.second;
if (name == "" || key == "")
throw Error("secret key is corrupt");
key = base64Decode(key);
}
std::string Key::to_string() const
{
return name + ":" + base64Encode(key);
}
SecretKey::SecretKey(std::string_view s)
: Key(s)
{
if (key.size() != crypto_sign_SECRETKEYBYTES)
throw Error("secret key is not valid");
}
std::string SecretKey::signDetached(std::string_view data) const
{
unsigned char sig[crypto_sign_BYTES];
unsigned long long sigLen;
crypto_sign_detached(sig, &sigLen, (unsigned char *) data.data(), data.size(),
(unsigned char *) key.data());
return name + ":" + base64Encode(std::string((char *) sig, sigLen));
}
PublicKey SecretKey::toPublicKey() const
{
unsigned char pk[crypto_sign_PUBLICKEYBYTES];
crypto_sign_ed25519_sk_to_pk(pk, (unsigned char *) key.data());
return PublicKey(name, std::string((char *) pk, crypto_sign_PUBLICKEYBYTES));
}
SecretKey SecretKey::generate(std::string_view name)
{
unsigned char pk[crypto_sign_PUBLICKEYBYTES];
unsigned char sk[crypto_sign_SECRETKEYBYTES];
if (crypto_sign_keypair(pk, sk) != 0)
throw Error("key generation failed");
return SecretKey(name, std::string((char *) sk, crypto_sign_SECRETKEYBYTES));
}
PublicKey::PublicKey(std::string_view s)
: Key(s)
{
if (key.size() != crypto_sign_PUBLICKEYBYTES)
throw Error("public key is not valid");
}
bool verifyDetached(const std::string & data, const std::string & sig,
const PublicKeys & publicKeys)
{
auto ss = split(sig);
auto key = publicKeys.find(std::string(ss.first));
if (key == publicKeys.end()) return false;
auto sig2 = base64Decode(ss.second);
if (sig2.size() != crypto_sign_BYTES)
throw Error("signature is not valid");
return crypto_sign_verify_detached((unsigned char *) sig2.data(),
(unsigned char *) data.data(), data.size(),
(unsigned char *) key->second.key.data()) == 0;
}
PublicKeys getDefaultPublicKeys()
{
PublicKeys publicKeys;
// FIXME: filter duplicates
for (auto s : settings.trustedPublicKeys.get()) {
PublicKey key(s);
publicKeys.emplace(key.name, key);
}
for (auto secretKeyFile : settings.secretKeyFiles.get()) {
try {
SecretKey secretKey(readFile(secretKeyFile));
publicKeys.emplace(secretKey.name, secretKey.toPublicKey());
} catch (SysError & e) {
/* Ignore unreadable key files. That's normal in a
multi-user installation. */
}
}
return publicKeys;
}
}

View file

@ -1,62 +0,0 @@
#pragma once
#include "types.hh"
#include <map>
namespace nix {
struct Key
{
std::string name;
std::string key;
/* Construct Key from a string in the format
<name>:<key-in-base64>. */
Key(std::string_view s);
std::string to_string() const;
protected:
Key(std::string_view name, std::string && key)
: name(name), key(std::move(key)) { }
};
struct PublicKey;
struct SecretKey : Key
{
SecretKey(std::string_view s);
/* Return a detached signature of the given string. */
std::string signDetached(std::string_view s) const;
PublicKey toPublicKey() const;
static SecretKey generate(std::string_view name);
private:
SecretKey(std::string_view name, std::string && key)
: Key(name, std::move(key)) { }
};
struct PublicKey : Key
{
PublicKey(std::string_view data);
private:
PublicKey(std::string_view name, std::string && key)
: Key(name, std::move(key)) { }
friend struct SecretKey;
};
typedef std::map<std::string, PublicKey> PublicKeys;
/* Return true iff sig is a correct signature over data using one
of the given public keys. */
bool verifyDetached(const std::string & data, const std::string & sig,
const PublicKeys & publicKeys);
PublicKeys getDefaultPublicKeys();
}

File diff suppressed because it is too large Load diff

View file

@ -1,23 +1,18 @@
#pragma once
///@file
#include "serialise.hh"
#include "store-api.hh"
namespace nix::daemon {
enum TrustedFlag : bool { NotTrusted = false, Trusted = true };
enum RecursiveFlag : bool { NotRecursive = false, Recursive = true };
void processConnection(
ref<Store> store,
FdSource & from,
FdSink & to,
FdSource && from,
FdSink && to,
TrustedFlag trusted,
RecursiveFlag recursive,
/* Arbitrary hook to check authorization / initialize user data / whatever
after the protocol has been negotiated. The idea is that this function
and everything it calls doesn't know about this stuff, and the
`nix-daemon` handles that instead. */
std::function<void(Store &)> authHook);
RecursiveFlag recursive);
}

File diff suppressed because it is too large Load diff

View file

@ -1,163 +1,296 @@
#pragma once
///@file
#include "path.hh"
#include "types.hh"
#include "hash.hh"
#include "content-address.hh"
#include "repair-flag.hh"
#include "derived-path-map.hh"
#include "sync.hh"
#include "variant-wrapper.hh"
#include <map>
#include <variant>
namespace nix {
struct StoreDirConfig;
/* Abstract syntax of derivations. */
/* The traditional non-fixed-output derivation type. */
struct DerivationOutputInputAddressed
{
StorePath path;
};
/* Fixed-output derivations, whose output paths are content addressed
according to that fixed output. */
struct DerivationOutputCAFixed
{
FixedOutputHash hash; /* hash used for expected hash computation */
StorePath path(const Store & store, std::string_view drvName, std::string_view outputName) const;
};
/* Floating-output derivations, whose output paths are content addressed, but
not fixed, and so are dynamically calculated from whatever the output ends
up being. */
struct DerivationOutputCAFloating
{
/* information used for expected hash computation */
FileIngestionMethod method;
HashType hashType;
};
/* Input-addressed output which depends on a (CA) derivation whose hash isn't
* known yet.
/**
* A single output of a BasicDerivation (and Derivation).
*/
struct DerivationOutputDeferred {};
/* Impure output which is moved to a content-addressed location (like
CAFloating) but isn't registered as a realization.
*/
struct DerivationOutputImpure
struct DerivationOutput
{
/* information used for expected hash computation */
FileIngestionMethod method;
HashType hashType;
};
/**
* The traditional non-fixed-output derivation type.
*/
struct InputAddressed
{
StorePath path;
typedef std::variant<
DerivationOutputInputAddressed,
DerivationOutputCAFixed,
DerivationOutputCAFloating,
DerivationOutputDeferred,
DerivationOutputImpure
> _DerivationOutputRaw;
bool operator == (const InputAddressed &) const = default;
auto operator <=> (const InputAddressed &) const = default;
};
struct DerivationOutput : _DerivationOutputRaw
{
using Raw = _DerivationOutputRaw;
using Raw::Raw;
/**
* Fixed-output derivations, whose output paths are content
* addressed according to that fixed output.
*/
struct CAFixed
{
/**
* Method and hash used for expected hash computation.
*
* References are not allowed by fiat.
*/
ContentAddress ca;
using InputAddressed = DerivationOutputInputAddressed;
using CAFixed = DerivationOutputCAFixed;
using CAFloating = DerivationOutputCAFloating;
using Deferred = DerivationOutputDeferred;
using Impure = DerivationOutputImpure;
/**
* Return the \ref StorePath "store path" corresponding to this output
*
* @param drvName The name of the derivation this is an output of, without the `.drv`.
* @param outputName The name of this output.
*/
StorePath path(const StoreDirConfig & store, std::string_view drvName, OutputNameView outputName) const;
/* Note, when you use this function you should make sure that you're passing
the right derivation name. When in doubt, you should use the safer
interface provided by BasicDerivation::outputsAndOptPaths */
std::optional<StorePath> path(const Store & store, std::string_view drvName, std::string_view outputName) const;
bool operator == (const CAFixed &) const = default;
auto operator <=> (const CAFixed &) const = default;
};
inline const Raw & raw() const {
return static_cast<const Raw &>(*this);
}
/**
* Floating-output derivations, whose output paths are content
* addressed, but not fixed, and so are dynamically calculated from
* whatever the output ends up being.
* */
struct CAFloating
{
/**
* How the file system objects will be serialized for hashing
*/
ContentAddressMethod method;
/**
* How the serialization will be hashed
*/
HashAlgorithm hashAlgo;
bool operator == (const CAFloating &) const = default;
auto operator <=> (const CAFloating &) const = default;
};
/**
* Input-addressed output which depends on a (CA) derivation whose hash
* isn't known yet.
*/
struct Deferred {
bool operator == (const Deferred &) const = default;
auto operator <=> (const Deferred &) const = default;
};
/**
* Impure output which is moved to a content-addressed location (like
* CAFloating) but isn't registered as a realization.
*/
struct Impure
{
/**
* How the file system objects will be serialized for hashing
*/
ContentAddressMethod method;
/**
* How the serialization will be hashed
*/
HashAlgorithm hashAlgo;
bool operator == (const Impure &) const = default;
auto operator <=> (const Impure &) const = default;
};
typedef std::variant<
InputAddressed,
CAFixed,
CAFloating,
Deferred,
Impure
> Raw;
Raw raw;
bool operator == (const DerivationOutput &) const = default;
auto operator <=> (const DerivationOutput &) const = default;
MAKE_WRAPPER_CONSTRUCTOR(DerivationOutput);
/**
* Force choosing a variant
*/
DerivationOutput() = delete;
/**
* \note when you use this function you should make sure that you're
* passing the right derivation name. When in doubt, you should use
* the safer interface provided by
* BasicDerivation::outputsAndOptPaths
*/
std::optional<StorePath> path(const StoreDirConfig & store, std::string_view drvName, OutputNameView outputName) const;
nlohmann::json toJSON(
const StoreDirConfig & store,
std::string_view drvName,
OutputNameView outputName) const;
/**
* @param xpSettings Stop-gap to avoid globals during unit tests.
*/
static DerivationOutput fromJSON(
const StoreDirConfig & store,
std::string_view drvName,
OutputNameView outputName,
const nlohmann::json & json,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
};
typedef std::map<std::string, DerivationOutput> DerivationOutputs;
/* These are analogues to the previous DerivationOutputs data type, but they
also contains, for each output, the (optional) store path in which it would
be written. To calculate values of these types, see the corresponding
functions in BasicDerivation */
/**
* These are analogues to the previous DerivationOutputs data type,
* but they also contains, for each output, the (optional) store
* path in which it would be written. To calculate values of these
* types, see the corresponding functions in BasicDerivation.
*/
typedef std::map<std::string, std::pair<DerivationOutput, std::optional<StorePath>>>
DerivationOutputsAndOptPaths;
/* For inputs that are sub-derivations, we specify exactly which
output IDs we are interested in. */
/**
* For inputs that are sub-derivations, we specify exactly which
* output IDs we are interested in.
*/
typedef std::map<StorePath, StringSet> DerivationInputs;
struct DerivationType_InputAddressed {
bool deferred;
};
struct DerivationType {
/**
* Input-addressed derivation types
*/
struct InputAddressed {
/**
* True iff the derivation type can't be determined statically,
* for instance because it (transitively) depends on a content-addressed
* derivation.
*/
bool deferred;
struct DerivationType_ContentAddressed {
bool sandboxed;
bool fixed;
};
bool operator == (const InputAddressed &) const = default;
auto operator <=> (const InputAddressed &) const = default;
};
struct DerivationType_Impure {
};
/**
* Content-addressed derivation types
*/
struct ContentAddressed {
/**
* Whether the derivation should be built safely inside a sandbox.
*/
bool sandboxed;
/**
* Whether the derivation's outputs' content-addresses are "fixed"
* or "floating".
*
* - Fixed: content-addresses are written down as part of the
* derivation itself. If the outputs don't end up matching the
* build fails.
*
* - Floating: content-addresses are not written down, we do not
* know them until we perform the build.
*/
bool fixed;
typedef std::variant<
DerivationType_InputAddressed,
DerivationType_ContentAddressed,
DerivationType_Impure
> _DerivationTypeRaw;
bool operator == (const ContentAddressed &) const = default;
auto operator <=> (const ContentAddressed &) const = default;
};
struct DerivationType : _DerivationTypeRaw {
using Raw = _DerivationTypeRaw;
using Raw::Raw;
using InputAddressed = DerivationType_InputAddressed;
using ContentAddressed = DerivationType_ContentAddressed;
using Impure = DerivationType_Impure;
/**
* Impure derivation type
*
* This is similar at buil-time to the content addressed, not standboxed, not fixed
* type, but has some restrictions on its usage.
*/
struct Impure {
bool operator == (const Impure &) const = default;
auto operator <=> (const Impure &) const = default;
};
/* Do the outputs of the derivation have paths calculated from their content,
or from the derivation itself? */
typedef std::variant<
InputAddressed,
ContentAddressed,
Impure
> Raw;
Raw raw;
bool operator == (const DerivationType &) const = default;
auto operator <=> (const DerivationType &) const = default;
MAKE_WRAPPER_CONSTRUCTOR(DerivationType);
/**
* Force choosing a variant
*/
DerivationType() = delete;
/**
* Do the outputs of the derivation have paths calculated from their
* content, or from the derivation itself?
*/
bool isCA() const;
/* Is the content of the outputs fixed a-priori via a hash? Never true for
non-CA derivations. */
/**
* Is the content of the outputs fixed <em>a priori</em> via a hash?
* Never true for non-CA derivations.
*/
bool isFixed() const;
/* Whether the derivation is fully sandboxed. If false, the
sandbox is opened up, e.g. the derivation has access to the
network. Note that whether or not we actually sandbox the
derivation is controlled separately. Always true for non-CA
derivations. */
/**
* Whether the derivation is fully sandboxed. If false, the sandbox
* is opened up, e.g. the derivation has access to the network. Note
* that whether or not we actually sandbox the derivation is
* controlled separately. Always true for non-CA derivations.
*/
bool isSandboxed() const;
/* Whether the derivation is expected to produce the same result
every time, and therefore it only needs to be built once. This
is only false for derivations that have the attribute '__impure
= true'. */
bool isPure() const;
/**
* Whether the derivation is expected to produce a different result
* every time, and therefore it needs to be rebuilt every time. This is
* only true for derivations that have the attribute '__impure =
* true'.
*
* Non-impure derivations can still behave impurely, to the degree permitted
* by the sandbox. Hence why this method isn't `isPure`: impure derivations
* are not the negation of pure derivations. Purity can not be ascertained
* except by rather heavy tools.
*/
bool isImpure() const;
/* Does the derivation knows its own output paths?
Only true when there's no floating-ca derivation involved in the
closure, or if fixed output.
/**
* Does the derivation knows its own output paths?
* Only true when there's no floating-ca derivation involved in the
* closure, or if fixed output.
*/
bool hasKnownOutputPaths() const;
inline const Raw & raw() const {
return static_cast<const Raw &>(*this);
}
};
struct BasicDerivation
{
DerivationOutputs outputs; /* keyed on symbolic IDs */
StorePathSet inputSrcs; /* inputs that are sources */
/**
* keyed on symbolic IDs
*/
DerivationOutputs outputs;
/**
* inputs that are sources
*/
StorePathSet inputSrcs;
std::string platform;
Path builder;
Strings args;
@ -169,155 +302,219 @@ struct BasicDerivation
bool isBuiltin() const;
/* Return true iff this is a fixed-output derivation. */
/**
* Return true iff this is a fixed-output derivation.
*/
DerivationType type() const;
/* Return the output names of a derivation. */
/**
* Return the output names of a derivation.
*/
StringSet outputNames() const;
/* Calculates the maps that contains all the DerivationOutputs, but
augmented with knowledge of the Store paths they would be written
into. */
DerivationOutputsAndOptPaths outputsAndOptPaths(const Store & store) const;
/**
* Calculates the maps that contains all the DerivationOutputs, but
* augmented with knowledge of the Store paths they would be written
* into.
*/
DerivationOutputsAndOptPaths outputsAndOptPaths(const StoreDirConfig & store) const;
static std::string_view nameFromPath(const StorePath & storePath);
bool operator == (const BasicDerivation &) const = default;
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
//auto operator <=> (const BasicDerivation &) const = default;
};
class Store;
struct Derivation : BasicDerivation
{
DerivationInputs inputDrvs; /* inputs that are sub-derivations */
/**
* inputs that are sub-derivations
*/
DerivedPathMap<std::set<OutputName>> inputDrvs;
/* Print a derivation. */
std::string unparse(const Store & store, bool maskOutputs,
std::map<std::string, StringSet> * actualInputs = nullptr) const;
/**
* Print a derivation.
*/
std::string unparse(const StoreDirConfig & store, bool maskOutputs,
DerivedPathMap<StringSet>::ChildNode::Map * actualInputs = nullptr) const;
/* Return the underlying basic derivation but with these changes:
/**
* Return the underlying basic derivation but with these changes:
*
* 1. Input drvs are emptied, but the outputs of them that were used
* are added directly to input sources.
*
* 2. Input placeholders are replaced with realized input store
* paths.
*/
std::optional<BasicDerivation> tryResolve(Store & store, Store * evalStore = nullptr) const;
1. Input drvs are emptied, but the outputs of them that were used are
added directly to input sources.
2. Input placeholders are replaced with realized input store paths. */
std::optional<BasicDerivation> tryResolve(Store & store) const;
/* Like the above, but instead of querying the Nix database for
realisations, uses a given mapping from input derivation paths
+ output names to actual output store paths. */
/**
* Like the above, but instead of querying the Nix database for
* realisations, uses a given mapping from input derivation paths +
* output names to actual output store paths.
*/
std::optional<BasicDerivation> tryResolve(
Store & store,
const std::map<std::pair<StorePath, std::string>, StorePath> & inputDrvOutputs) const;
/**
* Check that the derivation is valid and does not present any
* illegal states.
*
* This is mainly a matter of checking the outputs, where our C++
* representation supports all sorts of combinations we do not yet
* allow.
*/
void checkInvariants(Store & store, const StorePath & drvPath) const;
Derivation() = default;
Derivation(const BasicDerivation & bd) : BasicDerivation(bd) { }
Derivation(BasicDerivation && bd) : BasicDerivation(std::move(bd)) { }
nlohmann::json toJSON(const StoreDirConfig & store) const;
static Derivation fromJSON(
const StoreDirConfig & store,
const nlohmann::json & json,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
bool operator == (const Derivation &) const = default;
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
//auto operator <=> (const Derivation &) const = default;
};
class Store;
/* Write a derivation to the Nix store, and return its path. */
/**
* Write a derivation to the Nix store, and return its path.
*/
StorePath writeDerivation(Store & store,
const Derivation & drv,
RepairFlag repair = NoRepair,
bool readOnly = false);
/* Read a derivation from a file. */
Derivation parseDerivation(const Store & store, std::string && s, std::string_view name);
/**
* Read a derivation from a file.
*/
Derivation parseDerivation(
const StoreDirConfig & store,
std::string && s,
std::string_view name,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
// FIXME: remove
/**
* \todo Remove.
*
* Use Path::isDerivation instead.
*/
bool isDerivation(std::string_view fileName);
/* Calculate the name that will be used for the store path for this
output.
This is usually <drv-name>-<output-name>, but is just <drv-name> when
the output name is "out". */
std::string outputPathName(std::string_view drvName, std::string_view outputName);
/**
* Calculate the name that will be used for the store path for this
* output.
*
* This is usually <drv-name>-<output-name>, but is just <drv-name> when
* the output name is "out".
*/
std::string outputPathName(std::string_view drvName, OutputNameView outputName);
// The hashes modulo of a derivation.
//
// Each output is given a hash, although in practice only the content-addressed
// derivations (fixed-output or not) will have a different hash for each
// output.
/**
* The hashes modulo of a derivation.
*
* Each output is given a hash, although in practice only the content-addressed
* derivations (fixed-output or not) will have a different hash for each
* output.
*/
struct DrvHash {
/**
* Map from output names to hashes
*/
std::map<std::string, Hash> hashes;
enum struct Kind : bool {
// Statically determined derivations.
// This hash will be directly used to compute the output paths
/**
* Statically determined derivations.
* This hash will be directly used to compute the output paths
*/
Regular,
// Floating-output derivations (and their reverse dependencies).
/**
* Floating-output derivations (and their reverse dependencies).
*/
Deferred,
};
/**
* The kind of derivation this is, simplified for just "derivation hash
* modulo" purposes.
*/
Kind kind;
};
void operator |= (DrvHash::Kind & self, const DrvHash::Kind & other) noexcept;
/* Returns hashes with the details of fixed-output subderivations
expunged.
A fixed-output derivation is a derivation whose outputs have a
specified content hash and hash algorithm. (Currently they must have
exactly one output (`out'), which is specified using the `outputHash'
and `outputHashAlgo' attributes, but the algorithm doesn't assume
this.) We don't want changes to such derivations to propagate upwards
through the dependency graph, changing output paths everywhere.
For instance, if we change the url in a call to the `fetchurl'
function, we do not want to rebuild everything depending on it---after
all, (the hash of) the file being downloaded is unchanged. So the
*output paths* should not change. On the other hand, the *derivation
paths* should change to reflect the new dependency graph.
For fixed-output derivations, this returns a map from the name of
each output to its hash, unique up to the output's contents.
For regular derivations, it returns a single hash of the derivation
ATerm, after subderivations have been likewise expunged from that
derivation.
/**
* Returns hashes with the details of fixed-output subderivations
* expunged.
*
* A fixed-output derivation is a derivation whose outputs have a
* specified content hash and hash algorithm. (Currently they must have
* exactly one output (`out`), which is specified using the `outputHash`
* and `outputHashAlgo` attributes, but the algorithm doesn't assume
* this.) We don't want changes to such derivations to propagate upwards
* through the dependency graph, changing output paths everywhere.
*
* For instance, if we change the url in a call to the `fetchurl`
* function, we do not want to rebuild everything depending on it---after
* all, (the hash of) the file being downloaded is unchanged. So the
* *output paths* should not change. On the other hand, the *derivation
* paths* should change to reflect the new dependency graph.
*
* For fixed-output derivations, this returns a map from the name of
* each output to its hash, unique up to the output's contents.
*
* For regular derivations, it returns a single hash of the derivation
* ATerm, after subderivations have been likewise expunged from that
* derivation.
*/
DrvHash hashDerivationModulo(Store & store, const Derivation & drv, bool maskOutputs);
/*
Return a map associating each output to a hash that uniquely identifies its
derivation (modulo the self-references).
FIXME: what is the Hash in this map?
/**
* Return a map associating each output to a hash that uniquely identifies its
* derivation (modulo the self-references).
*
* \todo What is the Hash in this map?
*/
std::map<std::string, Hash> staticOutputHashes(Store & store, const Derivation & drv);
/* Memoisation of hashDerivationModulo(). */
/**
* Memoisation of hashDerivationModulo().
*/
typedef std::map<StorePath, DrvHash> DrvHashes;
// FIXME: global, though at least thread-safe.
extern Sync<DrvHashes> drvHashes;
bool wantOutput(const std::string & output, const std::set<std::string> & wanted);
struct Source;
struct Sink;
Source & readDerivation(Source & in, const Store & store, BasicDerivation & drv, std::string_view name);
void writeDerivation(Sink & out, const Store & store, const BasicDerivation & drv);
Source & readDerivation(Source & in, const StoreDirConfig & store, BasicDerivation & drv, std::string_view name);
void writeDerivation(Sink & out, const StoreDirConfig & store, const BasicDerivation & drv);
/* This creates an opaque and almost certainly unique string
deterministically from the output name.
It is used as a placeholder to allow derivations to refer to their
own outputs without needing to use the hash of a derivation in
itself, making the hash near-impossible to calculate. */
std::string hashPlaceholder(const std::string_view outputName);
/* This creates an opaque and almost certainly unique string
deterministically from a derivation path and output name.
It is used as a placeholder to allow derivations to refer to
content-addressed paths whose content --- and thus the path
themselves --- isn't yet known. This occurs when a derivation has a
dependency which is a CA derivation. */
std::string downstreamPlaceholder(const Store & store, const StorePath & drvPath, std::string_view outputName);
/**
* This creates an opaque and almost certainly unique string
* deterministically from the output name.
*
* It is used as a placeholder to allow derivations to refer to their
* own outputs without needing to use the hash of a derivation in
* itself, making the hash near-impossible to calculate.
*/
std::string hashPlaceholder(const OutputNameView outputName);
extern const Hash impureOutputHash;

View file

@ -0,0 +1,71 @@
#include "derived-path-map.hh"
#include "util.hh"
namespace nix {
template<typename V>
typename DerivedPathMap<V>::ChildNode & DerivedPathMap<V>::ensureSlot(const SingleDerivedPath & k)
{
std::function<ChildNode &(const SingleDerivedPath & )> initIter;
initIter = [&](const auto & k) -> auto & {
return std::visit(overloaded {
[&](const SingleDerivedPath::Opaque & bo) -> auto & {
// will not overwrite if already there
return map[bo.path];
},
[&](const SingleDerivedPath::Built & bfd) -> auto & {
auto & n = initIter(*bfd.drvPath);
return n.childMap[bfd.output];
},
}, k.raw());
};
return initIter(k);
}
template<typename V>
typename DerivedPathMap<V>::ChildNode * DerivedPathMap<V>::findSlot(const SingleDerivedPath & k)
{
std::function<ChildNode *(const SingleDerivedPath & )> initIter;
initIter = [&](const auto & k) {
return std::visit(overloaded {
[&](const SingleDerivedPath::Opaque & bo) {
auto it = map.find(bo.path);
return it != map.end()
? &it->second
: nullptr;
},
[&](const SingleDerivedPath::Built & bfd) {
auto * n = initIter(*bfd.drvPath);
if (!n) return (ChildNode *)nullptr;
auto it = n->childMap.find(bfd.output);
return it != n->childMap.end()
? &it->second
: nullptr;
},
}, k.raw());
};
return initIter(k);
}
}
// instantiations
namespace nix {
template<>
bool DerivedPathMap<std::set<std::string>>::ChildNode::operator == (
const DerivedPathMap<std::set<std::string>>::ChildNode &) const noexcept = default;
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
#if 0
template<>
std::strong_ordering DerivedPathMap<std::set<std::string>>::ChildNode::operator <=> (
const DerivedPathMap<std::set<std::string>>::ChildNode &) const noexcept = default;
#endif
template struct DerivedPathMap<std::set<std::string>>::ChildNode;
template struct DerivedPathMap<std::set<std::string>>;
};

View file

@ -0,0 +1,110 @@
#pragma once
///@file
#include "types.hh"
#include "derived-path.hh"
namespace nix {
/**
* A simple Trie, of sorts. Conceptually a map of `SingleDerivedPath` to
* values.
*
* Concretely, an n-ary tree, as described below. A
* `SingleDerivedPath::Opaque` maps to the value of an immediate child
* of the root node. A `SingleDerivedPath::Built` maps to a deeper child
* node: the `SingleDerivedPath::Built::drvPath` is first mapped to a a
* child node (inductively), and then the
* `SingleDerivedPath::Built::output` is used to look up that child's
* child via its map. In this manner, every `SingleDerivedPath` is
* mapped to a child node.
*
* @param V A type to instantiate for each output. It should probably
* should be an "optional" type so not every interior node has to have a
* value. `* const Something` or `std::optional<Something>` would be
* good choices for "optional" types.
*/
template<typename V>
struct DerivedPathMap {
/**
* A child node (non-root node).
*/
struct ChildNode {
/**
* Value of this child node.
*
* @see DerivedPathMap for what `V` should be.
*/
V value;
/**
* The map type for the root node.
*/
using Map = std::map<OutputName, ChildNode>;
/**
* The map of the root node.
*/
Map childMap;
bool operator == (const ChildNode &) const noexcept;
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
// decltype(std::declval<V>() <=> std::declval<V>())
// operator <=> (const ChildNode &) const noexcept;
};
/**
* The map type for the root node.
*/
using Map = std::map<StorePath, ChildNode>;
/**
* The map of root node.
*/
Map map;
bool operator == (const DerivedPathMap &) const = default;
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
// auto operator <=> (const DerivedPathMap &) const noexcept;
/**
* Find the node for `k`, creating it if needed.
*
* The node is referred to as a "slot" on the assumption that `V` is
* some sort of optional type, so the given key can be set or unset
* by changing this node.
*/
ChildNode & ensureSlot(const SingleDerivedPath & k);
/**
* Like `ensureSlot` but does not create the slot if it doesn't exist.
*
* Read the entire description of `ensureSlot` to understand an
* important caveat here that "have slot" does *not* imply "key is
* set in map". To ensure a key is set one would need to get the
* child node (with `findSlot` or `ensureSlot`) *and* check the
* `ChildNode::value`.
*/
ChildNode * findSlot(const SingleDerivedPath & k);
};
template<>
bool DerivedPathMap<std::set<std::string>>::ChildNode::operator == (
const DerivedPathMap<std::set<std::string>>::ChildNode &) const noexcept;
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
#if 0
template<>
std::strong_ordering DerivedPathMap<std::set<std::string>>::ChildNode::operator <=> (
const DerivedPathMap<std::set<std::string>>::ChildNode &) const noexcept;
template<>
inline auto DerivedPathMap<std::set<std::string>>::operator <=> (const DerivedPathMap<std::set<std::string>> &) const noexcept = default;
#endif
extern template struct DerivedPathMap<std::set<std::string>>::ChildNode;
extern template struct DerivedPathMap<std::set<std::string>>;
}

View file

@ -1,6 +1,7 @@
#include "derived-path.hh"
#include "derivations.hh"
#include "store-api.hh"
#include "comparator.hh"
#include <nlohmann/json.hpp>
@ -8,127 +9,302 @@
namespace nix {
nlohmann::json DerivedPath::Opaque::toJSON(ref<Store> store) const {
// Custom implementation to avoid `ref` ptr equality
GENERATE_CMP_EXT(
,
std::strong_ordering,
SingleDerivedPathBuilt,
*me->drvPath,
me->output);
// Custom implementation to avoid `ref` ptr equality
// TODO no `GENERATE_CMP_EXT` because no `std::set::operator<=>` on
// Darwin, per header.
GENERATE_EQUAL(
,
DerivedPathBuilt ::,
DerivedPathBuilt,
*me->drvPath,
me->outputs);
GENERATE_ONE_CMP(
,
bool,
DerivedPathBuilt ::,
<,
DerivedPathBuilt,
*me->drvPath,
me->outputs);
nlohmann::json DerivedPath::Opaque::toJSON(const StoreDirConfig & store) const
{
return store.printStorePath(path);
}
nlohmann::json SingleDerivedPath::Built::toJSON(Store & store) const {
nlohmann::json res;
res["path"] = store->printStorePath(path);
res["drvPath"] = drvPath->toJSON(store);
// Fallback for the input-addressed derivation case: We expect to always be
// able to print the output paths, so lets do it
// FIXME try-resolve on drvPath
const auto outputMap = store.queryPartialDerivationOutputMap(resolveDerivedPath(store, *drvPath));
res["output"] = output;
auto outputPathIter = outputMap.find(output);
if (outputPathIter == outputMap.end())
res["outputPath"] = nullptr;
else if (std::optional p = outputPathIter->second)
res["outputPath"] = store.printStorePath(*p);
else
res["outputPath"] = nullptr;
return res;
}
nlohmann::json DerivedPath::Built::toJSON(ref<Store> store) const {
nlohmann::json DerivedPath::Built::toJSON(Store & store) const {
nlohmann::json res;
res["drvPath"] = store->printStorePath(drvPath);
res["drvPath"] = drvPath->toJSON(store);
// Fallback for the input-addressed derivation case: We expect to always be
// able to print the output paths, so lets do it
const auto knownOutputs = store->queryPartialDerivationOutputMap(drvPath);
for (const auto & output : outputs) {
auto knownOutput = get(knownOutputs, output);
if (knownOutput && *knownOutput)
res["outputs"][output] = store->printStorePath(**knownOutput);
// FIXME try-resolve on drvPath
const auto outputMap = store.queryPartialDerivationOutputMap(resolveDerivedPath(store, *drvPath));
for (const auto & [output, outputPathOpt] : outputMap) {
if (!outputs.contains(output)) continue;
if (outputPathOpt)
res["outputs"][output] = store.printStorePath(*outputPathOpt);
else
res["outputs"][output] = nullptr;
}
return res;
}
nlohmann::json BuiltPath::Built::toJSON(ref<Store> store) const {
nlohmann::json res;
res["drvPath"] = store->printStorePath(drvPath);
for (const auto& [output, path] : outputs) {
res["outputs"][output] = store->printStorePath(path);
}
return res;
}
StorePathSet BuiltPath::outPaths() const
nlohmann::json SingleDerivedPath::toJSON(Store & store) const
{
return std::visit(
overloaded{
[](const BuiltPath::Opaque & p) { return StorePathSet{p.path}; },
[](const BuiltPath::Built & b) {
StorePathSet res;
for (auto & [_, path] : b.outputs)
res.insert(path);
return res;
},
}, raw()
);
return std::visit([&](const auto & buildable) {
return buildable.toJSON(store);
}, raw());
}
std::string DerivedPath::Opaque::to_string(const Store & store) const
nlohmann::json DerivedPath::toJSON(Store & store) const
{
return std::visit([&](const auto & buildable) {
return buildable.toJSON(store);
}, raw());
}
std::string DerivedPath::Opaque::to_string(const StoreDirConfig & store) const
{
return store.printStorePath(path);
}
std::string DerivedPath::Built::to_string(const Store & store) const
std::string SingleDerivedPath::Built::to_string(const StoreDirConfig & store) const
{
return store.printStorePath(drvPath)
+ "!"
+ (outputs.empty() ? std::string { "*" } : concatStringsSep(",", outputs));
return drvPath->to_string(store) + "^" + output;
}
std::string DerivedPath::to_string(const Store & store) const
std::string SingleDerivedPath::Built::to_string_legacy(const StoreDirConfig & store) const
{
return drvPath->to_string(store) + "!" + output;
}
std::string DerivedPath::Built::to_string(const StoreDirConfig & store) const
{
return drvPath->to_string(store)
+ '^'
+ outputs.to_string();
}
std::string DerivedPath::Built::to_string_legacy(const StoreDirConfig & store) const
{
return drvPath->to_string_legacy(store)
+ "!"
+ outputs.to_string();
}
std::string SingleDerivedPath::to_string(const StoreDirConfig & store) const
{
return std::visit(
[&](const auto & req) { return req.to_string(store); },
this->raw());
raw());
}
std::string DerivedPath::to_string(const StoreDirConfig & store) const
{
return std::visit(
[&](const auto & req) { return req.to_string(store); },
raw());
}
std::string SingleDerivedPath::to_string_legacy(const StoreDirConfig & store) const
{
return std::visit(overloaded {
[&](const SingleDerivedPath::Built & req) { return req.to_string_legacy(store); },
[&](const SingleDerivedPath::Opaque & req) { return req.to_string(store); },
}, this->raw());
}
std::string DerivedPath::to_string_legacy(const StoreDirConfig & store) const
{
return std::visit(overloaded {
[&](const DerivedPath::Built & req) { return req.to_string_legacy(store); },
[&](const DerivedPath::Opaque & req) { return req.to_string(store); },
}, this->raw());
}
DerivedPath::Opaque DerivedPath::Opaque::parse(const Store & store, std::string_view s)
DerivedPath::Opaque DerivedPath::Opaque::parse(const StoreDirConfig & store, std::string_view s)
{
return {store.parseStorePath(s)};
}
DerivedPath::Built DerivedPath::Built::parse(const Store & store, std::string_view drvS, std::string_view outputsS)
void drvRequireExperiment(
const SingleDerivedPath & drv,
const ExperimentalFeatureSettings & xpSettings)
{
auto drvPath = store.parseStorePath(drvS);
std::set<std::string> outputs;
if (outputsS != "*") {
outputs = tokenizeString<std::set<std::string>>(outputsS, ",");
if (outputs.empty())
throw Error(
"Explicit list of wanted outputs '%s' must not be empty. Consider using '*' as a wildcard meaning all outputs if no output in particular is wanted.", outputsS);
}
return {drvPath, outputs};
std::visit(overloaded {
[&](const SingleDerivedPath::Opaque &) {
// plain drv path; no experimental features required.
},
[&](const SingleDerivedPath::Built &) {
xpSettings.require(Xp::DynamicDerivations);
},
}, drv.raw());
}
DerivedPath DerivedPath::parse(const Store & store, std::string_view s)
SingleDerivedPath::Built SingleDerivedPath::Built::parse(
const StoreDirConfig & store, ref<SingleDerivedPath> drv,
OutputNameView output,
const ExperimentalFeatureSettings & xpSettings)
{
size_t n = s.find("!");
drvRequireExperiment(*drv, xpSettings);
return {
.drvPath = drv,
.output = std::string { output },
};
}
DerivedPath::Built DerivedPath::Built::parse(
const StoreDirConfig & store, ref<SingleDerivedPath> drv,
OutputNameView outputsS,
const ExperimentalFeatureSettings & xpSettings)
{
drvRequireExperiment(*drv, xpSettings);
return {
.drvPath = drv,
.outputs = OutputsSpec::parse(outputsS),
};
}
static SingleDerivedPath parseWithSingle(
const StoreDirConfig & store, std::string_view s, std::string_view separator,
const ExperimentalFeatureSettings & xpSettings)
{
size_t n = s.rfind(separator);
return n == s.npos
? (SingleDerivedPath) SingleDerivedPath::Opaque::parse(store, s)
: (SingleDerivedPath) SingleDerivedPath::Built::parse(store,
make_ref<SingleDerivedPath>(parseWithSingle(
store,
s.substr(0, n),
separator,
xpSettings)),
s.substr(n + 1),
xpSettings);
}
SingleDerivedPath SingleDerivedPath::parse(
const StoreDirConfig & store,
std::string_view s,
const ExperimentalFeatureSettings & xpSettings)
{
return parseWithSingle(store, s, "^", xpSettings);
}
SingleDerivedPath SingleDerivedPath::parseLegacy(
const StoreDirConfig & store,
std::string_view s,
const ExperimentalFeatureSettings & xpSettings)
{
return parseWithSingle(store, s, "!", xpSettings);
}
static DerivedPath parseWith(
const StoreDirConfig & store, std::string_view s, std::string_view separator,
const ExperimentalFeatureSettings & xpSettings)
{
size_t n = s.rfind(separator);
return n == s.npos
? (DerivedPath) DerivedPath::Opaque::parse(store, s)
: (DerivedPath) DerivedPath::Built::parse(store, s.substr(0, n), s.substr(n + 1));
: (DerivedPath) DerivedPath::Built::parse(store,
make_ref<SingleDerivedPath>(parseWithSingle(
store,
s.substr(0, n),
separator,
xpSettings)),
s.substr(n + 1),
xpSettings);
}
RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const
DerivedPath DerivedPath::parse(
const StoreDirConfig & store,
std::string_view s,
const ExperimentalFeatureSettings & xpSettings)
{
RealisedPath::Set res;
std::visit(
overloaded{
[&](const BuiltPath::Opaque & p) { res.insert(p.path); },
[&](const BuiltPath::Built & p) {
auto drvHashes =
staticOutputHashes(store, store.readDerivation(p.drvPath));
for (auto& [outputName, outputPath] : p.outputs) {
if (settings.isExperimentalFeatureEnabled(
Xp::CaDerivations)) {
auto drvOutput = get(drvHashes, outputName);
if (!drvOutput)
throw Error(
"the derivation '%s' has unrealised output '%s' (derived-path.cc/toRealisedPaths)",
store.printStorePath(p.drvPath), outputName);
auto thisRealisation = store.queryRealisation(
DrvOutput{*drvOutput, outputName});
assert(thisRealisation); // Weve built it, so we must
// have the realisation
res.insert(*thisRealisation);
} else {
res.insert(outputPath);
}
}
},
return parseWith(store, s, "^", xpSettings);
}
DerivedPath DerivedPath::parseLegacy(
const StoreDirConfig & store,
std::string_view s,
const ExperimentalFeatureSettings & xpSettings)
{
return parseWith(store, s, "!", xpSettings);
}
DerivedPath DerivedPath::fromSingle(const SingleDerivedPath & req)
{
return std::visit(overloaded {
[&](const SingleDerivedPath::Opaque & o) -> DerivedPath {
return o;
},
raw());
return res;
[&](const SingleDerivedPath::Built & b) -> DerivedPath {
return DerivedPath::Built {
.drvPath = b.drvPath,
.outputs = OutputsSpec::Names { b.output },
};
},
}, req.raw());
}
const StorePath & SingleDerivedPath::Built::getBaseStorePath() const
{
return drvPath->getBaseStorePath();
}
const StorePath & DerivedPath::Built::getBaseStorePath() const
{
return drvPath->getBaseStorePath();
}
template<typename DP>
static inline const StorePath & getBaseStorePath_(const DP & derivedPath)
{
return std::visit(overloaded {
[&](const typename DP::Built & bfd) -> auto & {
return bfd.drvPath->getBaseStorePath();
},
[&](const typename DP::Opaque & bo) -> auto & {
return bo.path;
},
}, derivedPath.raw());
}
const StorePath & SingleDerivedPath::getBaseStorePath() const
{
return getBaseStorePath_(*this);
}
const StorePath & DerivedPath::getBaseStorePath() const
{
return getBaseStorePath_(*this);
}
}

View file

@ -1,15 +1,20 @@
#pragma once
///@file
#include "util.hh"
#include "path.hh"
#include "realisation.hh"
#include "outputs-spec.hh"
#include "config.hh"
#include "ref.hh"
#include <optional>
#include <variant>
#include <nlohmann/json_fwd.hpp>
namespace nix {
struct StoreDirConfig;
// TODO stop needing this, `toJSON` below should be pure
class Store;
/**
@ -22,16 +27,140 @@ class Store;
struct DerivedPathOpaque {
StorePath path;
nlohmann::json toJSON(ref<Store> store) const;
std::string to_string(const Store & store) const;
static DerivedPathOpaque parse(const Store & store, std::string_view);
std::string to_string(const StoreDirConfig & store) const;
static DerivedPathOpaque parse(const StoreDirConfig & store, std::string_view);
nlohmann::json toJSON(const StoreDirConfig & store) const;
bool operator < (const DerivedPathOpaque & b) const
{ return path < b.path; }
bool operator == (const DerivedPathOpaque &) const = default;
auto operator <=> (const DerivedPathOpaque &) const = default;
};
struct SingleDerivedPath;
/**
* A derived path that is built from a derivation
* A single derived path that is built from a derivation
*
* Built derived paths are pair of a derivation and an output name. They are
* evaluated by building the derivation, and then taking the resulting output
* path of the given output name.
*/
struct SingleDerivedPathBuilt {
ref<SingleDerivedPath> drvPath;
OutputName output;
/**
* Get the store path this is ultimately derived from (by realising
* and projecting outputs).
*
* Note that this is *not* a property of the store object being
* referred to, but just of this path --- how we happened to be
* referring to that store object. In other words, this means this
* function breaks "referential transparency". It should therefore
* be used only with great care.
*/
const StorePath & getBaseStorePath() const;
/**
* Uses `^` as the separator
*/
std::string to_string(const StoreDirConfig & store) const;
/**
* Uses `!` as the separator
*/
std::string to_string_legacy(const StoreDirConfig & store) const;
/**
* The caller splits on the separator, so it works for both variants.
*
* @param xpSettings Stop-gap to avoid globals during unit tests.
*/
static SingleDerivedPathBuilt parse(
const StoreDirConfig & store, ref<SingleDerivedPath> drvPath,
OutputNameView outputs,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
nlohmann::json toJSON(Store & store) const;
bool operator == (const SingleDerivedPathBuilt &) const noexcept;
std::strong_ordering operator <=> (const SingleDerivedPathBuilt &) const noexcept;
};
using _SingleDerivedPathRaw = std::variant<
DerivedPathOpaque,
SingleDerivedPathBuilt
>;
/**
* A "derived path" is a very simple sort of expression (not a Nix
* language expression! But an expression in a the general sense) that
* evaluates to (concrete) store path. It is either:
*
* - opaque, in which case it is just a concrete store path with
* possibly no known derivation
*
* - built, in which case it is a pair of a derivation path and an
* output name.
*/
struct SingleDerivedPath : _SingleDerivedPathRaw {
using Raw = _SingleDerivedPathRaw;
using Raw::Raw;
using Opaque = DerivedPathOpaque;
using Built = SingleDerivedPathBuilt;
inline const Raw & raw() const {
return static_cast<const Raw &>(*this);
}
bool operator == (const SingleDerivedPath &) const = default;
auto operator <=> (const SingleDerivedPath &) const = default;
/**
* Get the store path this is ultimately derived from (by realising
* and projecting outputs).
*
* Note that this is *not* a property of the store object being
* referred to, but just of this path --- how we happened to be
* referring to that store object. In other words, this means this
* function breaks "referential transparency". It should therefore
* be used only with great care.
*/
const StorePath & getBaseStorePath() const;
/**
* Uses `^` as the separator
*/
std::string to_string(const StoreDirConfig & store) const;
/**
* Uses `!` as the separator
*/
std::string to_string_legacy(const StoreDirConfig & store) const;
/**
* Uses `^` as the separator
*
* @param xpSettings Stop-gap to avoid globals during unit tests.
*/
static SingleDerivedPath parse(
const StoreDirConfig & store,
std::string_view,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
/**
* Uses `!` as the separator
*
* @param xpSettings Stop-gap to avoid globals during unit tests.
*/
static SingleDerivedPath parseLegacy(
const StoreDirConfig & store,
std::string_view,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
nlohmann::json toJSON(Store & store) const;
};
static inline ref<SingleDerivedPath> makeConstantStorePathRef(StorePath drvPath)
{
return make_ref<SingleDerivedPath>(SingleDerivedPath::Opaque { drvPath });
}
/**
* A set of derived paths that are built from a derivation
*
* Built derived paths are pair of a derivation and some output names.
* They are evaluated by building the derivation, and then replacing the
@ -43,15 +172,43 @@ struct DerivedPathOpaque {
* output name.
*/
struct DerivedPathBuilt {
StorePath drvPath;
std::set<std::string> outputs;
ref<SingleDerivedPath> drvPath;
OutputsSpec outputs;
std::string to_string(const Store & store) const;
static DerivedPathBuilt parse(const Store & store, std::string_view, std::string_view);
nlohmann::json toJSON(ref<Store> store) const;
/**
* Get the store path this is ultimately derived from (by realising
* and projecting outputs).
*
* Note that this is *not* a property of the store object being
* referred to, but just of this path --- how we happened to be
* referring to that store object. In other words, this means this
* function breaks "referential transparency". It should therefore
* be used only with great care.
*/
const StorePath & getBaseStorePath() const;
bool operator < (const DerivedPathBuilt & b) const
{ return std::make_pair(drvPath, outputs) < std::make_pair(b.drvPath, b.outputs); }
/**
* Uses `^` as the separator
*/
std::string to_string(const StoreDirConfig & store) const;
/**
* Uses `!` as the separator
*/
std::string to_string_legacy(const StoreDirConfig & store) const;
/**
* The caller splits on the separator, so it works for both variants.
*
* @param xpSettings Stop-gap to avoid globals during unit tests.
*/
static DerivedPathBuilt parse(
const StoreDirConfig & store, ref<SingleDerivedPath>,
std::string_view,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
nlohmann::json toJSON(Store & store) const;
bool operator == (const DerivedPathBuilt &) const noexcept;
// TODO libc++ 16 (used by darwin) missing `std::set::operator <=>`, can't do yet.
bool operator < (const DerivedPathBuilt &) const noexcept;
};
using _DerivedPathRaw = std::variant<
@ -61,13 +218,13 @@ using _DerivedPathRaw = std::variant<
/**
* A "derived path" is a very simple sort of expression that evaluates
* to (concrete) store path. It is either:
* to one or more (concrete) store paths. It is either:
*
* - opaque, in which case it is just a concrete store path with
* - opaque, in which case it is just a single concrete store path with
* possibly no known derivation
*
* - built, in which case it is a pair of a derivation path and an
* output name.
* - built, in which case it is a pair of a derivation path and some
* output names.
*/
struct DerivedPath : _DerivedPathRaw {
using Raw = _DerivedPathRaw;
@ -80,49 +237,69 @@ struct DerivedPath : _DerivedPathRaw {
return static_cast<const Raw &>(*this);
}
std::string to_string(const Store & store) const;
static DerivedPath parse(const Store & store, std::string_view);
};
bool operator == (const DerivedPath &) const = default;
// TODO libc++ 16 (used by darwin) missing `std::set::operator <=>`, can't do yet.
//auto operator <=> (const DerivedPath &) const = default;
/**
* A built derived path with hints in the form of optional concrete output paths.
*
* See 'BuiltPath' for more an explanation.
*/
struct BuiltPathBuilt {
StorePath drvPath;
std::map<std::string, StorePath> outputs;
/**
* Get the store path this is ultimately derived from (by realising
* and projecting outputs).
*
* Note that this is *not* a property of the store object being
* referred to, but just of this path --- how we happened to be
* referring to that store object. In other words, this means this
* function breaks "referential transparency". It should therefore
* be used only with great care.
*/
const StorePath & getBaseStorePath() const;
nlohmann::json toJSON(ref<Store> store) const;
static BuiltPathBuilt parse(const Store & store, std::string_view);
};
/**
* Uses `^` as the separator
*/
std::string to_string(const StoreDirConfig & store) const;
/**
* Uses `!` as the separator
*/
std::string to_string_legacy(const StoreDirConfig & store) const;
/**
* Uses `^` as the separator
*
* @param xpSettings Stop-gap to avoid globals during unit tests.
*/
static DerivedPath parse(
const StoreDirConfig & store,
std::string_view,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
/**
* Uses `!` as the separator
*
* @param xpSettings Stop-gap to avoid globals during unit tests.
*/
static DerivedPath parseLegacy(
const StoreDirConfig & store,
std::string_view,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
using _BuiltPathRaw = std::variant<
DerivedPath::Opaque,
BuiltPathBuilt
>;
/**
* A built path. Similar to a `DerivedPath`, but enriched with the corresponding
* output path(s).
*/
struct BuiltPath : _BuiltPathRaw {
using Raw = _BuiltPathRaw;
using Raw::Raw;
using Opaque = DerivedPathOpaque;
using Built = BuiltPathBuilt;
inline const Raw & raw() const {
return static_cast<const Raw &>(*this);
}
StorePathSet outPaths() const;
RealisedPath::Set toRealisedPaths(Store & store) const;
/**
* Convert a `SingleDerivedPath` to a `DerivedPath`.
*/
static DerivedPath fromSingle(const SingleDerivedPath &);
nlohmann::json toJSON(Store & store) const;
};
typedef std::vector<DerivedPath> DerivedPaths;
typedef std::vector<BuiltPath> BuiltPaths;
/**
* Used by various parser functions to require experimental features as
* needed.
*
* Somewhat unfortunate this cannot just be an implementation detail for
* this module.
*
* @param xpSettings Stop-gap to avoid globals during unit tests.
*/
void drvRequireExperiment(
const SingleDerivedPath & drv,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
}

View file

@ -0,0 +1,58 @@
#include "downstream-placeholder.hh"
#include "derivations.hh"
namespace nix {
std::string DownstreamPlaceholder::render() const
{
return "/" + hash.to_string(HashFormat::Nix32, false);
}
DownstreamPlaceholder DownstreamPlaceholder::unknownCaOutput(
const StorePath & drvPath,
OutputNameView outputName,
const ExperimentalFeatureSettings & xpSettings)
{
xpSettings.require(Xp::CaDerivations);
auto drvNameWithExtension = drvPath.name();
auto drvName = drvNameWithExtension.substr(0, drvNameWithExtension.size() - 4);
auto clearText = "nix-upstream-output:" + std::string { drvPath.hashPart() } + ":" + outputPathName(drvName, outputName);
return DownstreamPlaceholder {
hashString(HashAlgorithm::SHA256, clearText)
};
}
DownstreamPlaceholder DownstreamPlaceholder::unknownDerivation(
const DownstreamPlaceholder & placeholder,
OutputNameView outputName,
const ExperimentalFeatureSettings & xpSettings)
{
xpSettings.require(Xp::DynamicDerivations);
auto compressed = compressHash(placeholder.hash, 20);
auto clearText = "nix-computed-output:"
+ compressed.to_string(HashFormat::Nix32, false)
+ ":" + std::string { outputName };
return DownstreamPlaceholder {
hashString(HashAlgorithm::SHA256, clearText)
};
}
DownstreamPlaceholder DownstreamPlaceholder::fromSingleDerivedPathBuilt(
const SingleDerivedPath::Built & b,
const ExperimentalFeatureSettings & xpSettings)
{
return std::visit(overloaded {
[&](const SingleDerivedPath::Opaque & o) {
return DownstreamPlaceholder::unknownCaOutput(o.path, b.output, xpSettings);
},
[&](const SingleDerivedPath::Built & b2) {
return DownstreamPlaceholder::unknownDerivation(
DownstreamPlaceholder::fromSingleDerivedPathBuilt(b2, xpSettings),
b.output,
xpSettings);
},
}, b.drvPath->raw());
}
}

View file

@ -0,0 +1,91 @@
#pragma once
///@file
#include "hash.hh"
#include "path.hh"
#include "derived-path.hh"
namespace nix {
/**
* Downstream Placeholders are opaque and almost certainly unique values
* used to allow derivations to refer to store objects which are yet to
* be built and for we do not yet have store paths for.
*
* They correspond to `DerivedPaths` that are not `DerivedPath::Opaque`,
* except for the cases involving input addressing or fixed outputs
* where we do know a store path for the derivation output in advance.
*
* Unlike `DerivationPath`, however, `DownstreamPlaceholder` is
* purposefully opaque and obfuscated. This is so they are hard to
* create by accident, and so substituting them (once we know what the
* path to store object is) is unlikely to capture other stuff it
* shouldn't.
*
* We use them with `Derivation`: the `render()` method is called to
* render an opaque string which can be used in the derivation, and the
* resolving logic can substitute those strings for store paths when
* resolving `Derivation.inputDrvs` to `BasicDerivation.inputSrcs`.
*/
class DownstreamPlaceholder
{
/**
* `DownstreamPlaceholder` is just a newtype of `Hash`.
* This its only field.
*/
Hash hash;
/**
* Newtype constructor
*/
DownstreamPlaceholder(Hash hash) : hash(hash) { }
public:
/**
* This creates an opaque and almost certainly unique string
* deterministically from the placeholder.
*/
std::string render() const;
/**
* Create a placeholder for an unknown output of a content-addressed
* derivation.
*
* The derivation itself is known (we have a store path for it), but
* the output doesn't yet have a known store path.
*
* @param xpSettings Stop-gap to avoid globals during unit tests.
*/
static DownstreamPlaceholder unknownCaOutput(
const StorePath & drvPath,
OutputNameView outputName,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
/**
* Create a placehold for the output of an unknown derivation.
*
* The derivation is not yet known because it is a dynamic
* derivaiton --- it is itself an output of another derivation ---
* and we just have (another) placeholder for it.
*
* @param xpSettings Stop-gap to avoid globals during unit tests.
*/
static DownstreamPlaceholder unknownDerivation(
const DownstreamPlaceholder & drvPlaceholder,
OutputNameView outputName,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
/**
* Convenience constructor that handles both cases (unknown
* content-addressed output and unknown derivation), delegating as
* needed to `unknownCaOutput` and `unknownDerivation`.
*
* Recursively builds up a placeholder from a
* `SingleDerivedPath::Built.drvPath` chain.
*/
static DownstreamPlaceholder fromSingleDerivedPathBuilt(
const SingleDerivedPath::Built & built,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
};
}

View file

@ -6,19 +6,37 @@ namespace nix {
struct DummyStoreConfig : virtual StoreConfig {
using StoreConfig::StoreConfig;
DummyStoreConfig(std::string_view scheme, std::string_view authority, const Params & params)
: StoreConfig(params)
{
if (!authority.empty())
throw UsageError("`%s` store URIs must not contain an authority part %s", scheme, authority);
}
const std::string name() override { return "Dummy Store"; }
std::string doc() override
{
return
#include "dummy-store.md"
;
}
static std::set<std::string> uriSchemes() {
return {"dummy"};
}
};
struct DummyStore : public virtual DummyStoreConfig, public virtual Store
{
DummyStore(const std::string scheme, const std::string uri, const Params & params)
: DummyStore(params)
DummyStore(std::string_view scheme, std::string_view authority, const Params & params)
: StoreConfig(params)
, DummyStoreConfig(scheme, authority, params)
, Store(params)
{ }
DummyStore(const Params & params)
: StoreConfig(params)
, DummyStoreConfig(params)
, Store(params)
: DummyStore("dummy", "", params)
{ }
std::string getUri() override
@ -32,8 +50,12 @@ struct DummyStore : public virtual DummyStoreConfig, public virtual Store
callback(nullptr);
}
static std::set<std::string> uriSchemes() {
return {"dummy"};
/**
* The dummy store is incapable of *not* trusting! :)
*/
virtual std::optional<TrustedFlag> isTrustedClient() override
{
return Trusted;
}
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
@ -43,12 +65,15 @@ struct DummyStore : public virtual DummyStoreConfig, public virtual Store
RepairFlag repair, CheckSigsFlag checkSigs) override
{ unsupported("addToStore"); }
StorePath addTextToStore(
virtual StorePath addToStoreFromDump(
Source & dump,
std::string_view name,
std::string_view s,
const StorePathSet & references,
RepairFlag repair) override
{ unsupported("addTextToStore"); }
FileSerialisationMethod dumpMethod = FileSerialisationMethod::NixArchive,
ContentAddressMethod hashMethod = FileIngestionMethod::NixArchive,
HashAlgorithm hashAlgo = HashAlgorithm::SHA256,
const StorePathSet & references = StorePathSet(),
RepairFlag repair = NoRepair) override
{ unsupported("addToStore"); }
void narFromPath(const StorePath & path, Sink & sink) override
{ unsupported("narFromPath"); }
@ -56,6 +81,9 @@ struct DummyStore : public virtual DummyStoreConfig, public virtual Store
void queryRealisationUncached(const DrvOutput &,
Callback<std::shared_ptr<const Realisation>> callback) noexcept override
{ callback(nullptr); }
virtual ref<SourceAccessor> getFSAccessor(bool requireValidPath) override
{ unsupported("getFSAccessor"); }
};
static RegisterStoreImplementation<DummyStore, DummyStoreConfig> regDummyStore;

View file

@ -0,0 +1,13 @@
R"(
**Store URL format**: `dummy://`
This store type represents a store that contains no store paths and
cannot be written to. It's useful when you want to use the Nix
evaluator when no actual Nix store exists, e.g.
```console
# nix eval --store dummy:// --expr '1 + 2'
```
)"

View file

@ -1,7 +1,8 @@
#include "serialise.hh"
#include "store-api.hh"
#include "archive.hh"
#include "worker-protocol.hh"
#include "common-protocol.hh"
#include "common-protocol-impl.hh"
#include <algorithm>
@ -16,7 +17,7 @@ void Store::exportPaths(const StorePathSet & paths, Sink & sink)
//logger->incExpected(doneLabel, sorted.size());
for (auto & path : sorted) {
//Activity act(*logger, lvlInfo, format("exporting path '%s'") % path);
//Activity act(*logger, lvlInfo, "exporting path '%s'", path);
sink << 1;
exportPath(path, sink);
//logger->incProgress(doneLabel);
@ -29,7 +30,7 @@ void Store::exportPath(const StorePath & path, Sink & sink)
{
auto info = queryPathInfo(path);
HashSink hashSink(htSHA256);
HashSink hashSink(HashAlgorithm::SHA256);
TeeSink teeSink(sink, hashSink);
narFromPath(path, teeSink);
@ -38,14 +39,16 @@ void Store::exportPath(const StorePath & path, Sink & sink)
filesystem corruption from spreading to other machines.
Don't complain if the stored hash is zero (unknown). */
Hash hash = hashSink.currentHash().first;
if (hash != info->narHash && info->narHash != Hash(info->narHash.type))
if (hash != info->narHash && info->narHash != Hash(info->narHash.algo))
throw Error("hash of path '%s' has changed from '%s' to '%s'!",
printStorePath(path), info->narHash.to_string(Base32, true), hash.to_string(Base32, true));
printStorePath(path), info->narHash.to_string(HashFormat::Nix32, true), hash.to_string(HashFormat::Nix32, true));
teeSink
<< exportMagic
<< printStorePath(path);
worker_proto::write(*this, teeSink, info->references);
CommonProto::write(*this,
CommonProto::WriteConn { .to = teeSink },
info->references);
teeSink
<< (info->deriver ? printStorePath(*info->deriver) : "")
<< 0;
@ -62,7 +65,7 @@ StorePaths Store::importPaths(Source & source, CheckSigsFlag checkSigs)
/* Extract the NAR from the source. */
StringSink saved;
TeeSource tee { source, saved };
ParseSink ether;
NullFileSystemObjectSink ether;
parseDump(ether, tee);
uint32_t magic = readInt(source);
@ -71,11 +74,12 @@ StorePaths Store::importPaths(Source & source, CheckSigsFlag checkSigs)
auto path = parseStorePath(readString(source));
//Activity act(*logger, lvlInfo, format("importing path '%s'") % info.path);
//Activity act(*logger, lvlInfo, "importing path '%s'", info.path);
auto references = worker_proto::read(*this, source, Phantom<StorePathSet> {});
auto references = CommonProto::Serialise<StorePathSet>::read(*this,
CommonProto::ReadConn { .from = source });
auto deriver = readString(source);
auto narHash = hashString(htSHA256, saved.s);
auto narHash = hashString(HashAlgorithm::SHA256, saved.s);
ValidPathInfo info { path, narHash };
if (deriver != "")

View file

@ -1,16 +1,21 @@
#include "filetransfer.hh"
#include "util.hh"
#include "globals.hh"
#include "config-global.hh"
#include "store-api.hh"
#include "s3.hh"
#include "compression.hh"
#include "finally.hh"
#include "callback.hh"
#include "signals.hh"
#if ENABLE_S3
#include <aws/core/client/ClientConfiguration.h>
#endif
#if __linux__
# include "namespaces.hh"
#endif
#include <unistd.h>
#include <fcntl.h>
@ -49,6 +54,8 @@ struct curlFileTransfer : public FileTransfer
bool done = false; // whether either the success or failure function has been called
Callback<FileTransferResult> callback;
CURL * req = 0;
// buffer to accompany the `req` above
char errbuf[CURL_ERROR_SIZE];
bool active = false; // whether the handle has been added to the multi object
std::string statusMsg;
@ -66,7 +73,10 @@ struct curlFileTransfer : public FileTransfer
curl_off_t writtenToSink = 0;
std::chrono::steady_clock::time_point startTime = std::chrono::steady_clock::now();
inline static const std::set<long> successfulStatuses {200, 201, 204, 206, 304, 0 /* other protocol */};
/* Get the HTTP status code, or 0 for other protocols. */
long getHTTPStatus()
{
@ -88,6 +98,10 @@ struct curlFileTransfer : public FileTransfer
{request.uri}, request.parentAct)
, callback(std::move(callback))
, finalSink([this](std::string_view data) {
if (errorSink) {
(*errorSink)(data);
}
if (this->request.dataCallback) {
auto httpStatus = getHTTPStatus();
@ -101,6 +115,9 @@ struct curlFileTransfer : public FileTransfer
this->result.data.append(data);
})
{
result.urls.push_back(request.uri);
requestHeaders = curl_slist_append(requestHeaders, "Accept-Encoding: zstd, br, gzip, deflate, bzip2, xz");
if (!request.expectedETag.empty())
requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str());
if (!request.mimeType.empty())
@ -162,8 +179,6 @@ struct curlFileTransfer : public FileTransfer
}
}
if (errorSink)
(*errorSink)({(char *) contents, realSize});
(*decompressionSink)({(char *) contents, realSize});
return realSize;
@ -178,24 +193,35 @@ struct curlFileTransfer : public FileTransfer
return ((TransferItem *) userp)->writeCallback(contents, size, nmemb);
}
void appendCurrentUrl()
{
char * effectiveUriCStr = nullptr;
curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUriCStr);
if (effectiveUriCStr && *result.urls.rbegin() != effectiveUriCStr)
result.urls.push_back(effectiveUriCStr);
}
size_t headerCallback(void * contents, size_t size, size_t nmemb)
{
size_t realSize = size * nmemb;
std::string line((char *) contents, realSize);
printMsg(lvlVomit, format("got header for '%s': %s") % request.uri % trim(line));
printMsg(lvlVomit, "got header for '%s': %s", request.uri, trim(line));
static std::regex statusLine("HTTP/[^ ]+ +[0-9]+(.*)", std::regex::extended | std::regex::icase);
std::smatch match;
if (std::regex_match(line, match, statusLine)) {
if (std::smatch match; std::regex_match(line, match, statusLine)) {
result.etag = "";
result.data.clear();
result.bodySize = 0;
statusMsg = trim(match.str(1));
acceptRanges = false;
encoding = "";
appendCurrentUrl();
} else {
auto i = line.find(':');
if (i != std::string::npos) {
std::string name = toLower(trim(line.substr(0, i)));
if (name == "etag") {
result.etag = trim(line.substr(i + 1));
/* Hack to work around a GitHub bug: it sends
@ -206,13 +232,25 @@ struct curlFileTransfer : public FileTransfer
long httpStatus = 0;
curl_easy_getinfo(req, CURLINFO_RESPONSE_CODE, &httpStatus);
if (result.etag == request.expectedETag && httpStatus == 200) {
debug(format("shutting down on 200 HTTP response with expected ETag"));
debug("shutting down on 200 HTTP response with expected ETag");
return 0;
}
} else if (name == "content-encoding")
}
else if (name == "content-encoding")
encoding = trim(line.substr(i + 1));
else if (name == "accept-ranges" && toLower(trim(line.substr(i + 1))) == "bytes")
acceptRanges = true;
else if (name == "link" || name == "x-amz-meta-link") {
auto value = trim(line.substr(i + 1));
static std::regex linkRegex("<([^>]*)>; rel=\"immutable\"", std::regex::extended | std::regex::icase);
if (std::smatch match; std::regex_match(value, match, linkRegex))
result.immutableUrl = match.str(1);
else
debug("got invalid link header '%s'", value);
}
}
}
return realSize;
@ -226,11 +264,11 @@ struct curlFileTransfer : public FileTransfer
int progressCallback(double dltotal, double dlnow)
{
try {
act.progress(dlnow, dltotal);
act.progress(dlnow, dltotal);
} catch (nix::Interrupted &) {
assert(_isInterrupted);
assert(getInterrupted());
}
return _isInterrupted;
return getInterrupted();
}
static int progressCallbackWrapper(void * userp, double dltotal, double dlnow, double ultotal, double ulnow)
@ -315,7 +353,7 @@ struct curlFileTransfer : public FileTransfer
if (request.verifyTLS) {
if (settings.caFile != "")
curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.c_str());
curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.get().c_str());
} else {
curl_easy_setopt(req, CURLOPT_SSL_VERIFYPEER, 0);
curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0);
@ -334,21 +372,25 @@ struct curlFileTransfer : public FileTransfer
if (writtenToSink)
curl_easy_setopt(req, CURLOPT_RESUME_FROM_LARGE, writtenToSink);
curl_easy_setopt(req, CURLOPT_ERRORBUFFER, errbuf);
errbuf[0] = 0;
result.data.clear();
result.bodySize = 0;
}
void finish(CURLcode code)
{
auto finishTime = std::chrono::steady_clock::now();
auto httpStatus = getHTTPStatus();
char * effectiveUriCStr;
curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUriCStr);
if (effectiveUriCStr)
result.effectiveUri = effectiveUriCStr;
debug("finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d bytes, duration = %.2f s",
request.verb(), request.uri, code, httpStatus, result.bodySize,
std::chrono::duration_cast<std::chrono::milliseconds>(finishTime - startTime).count() / 1000.0f
);
debug("finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d bytes",
request.verb(), request.uri, code, httpStatus, result.bodySize);
appendCurrentUrl();
if (decompressionSink) {
try {
@ -404,6 +446,10 @@ struct curlFileTransfer : public FileTransfer
err = Misc;
} else {
// Don't bother retrying on certain cURL errors either
// Allow selecting a subset of enum values
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wswitch-enum"
switch (code) {
case CURLE_FAILED_INIT:
case CURLE_URL_MALFORMAT:
@ -424,6 +470,7 @@ struct curlFileTransfer : public FileTransfer
default: // Shut up warnings
break;
}
#pragma GCC diagnostic pop
}
attempt++;
@ -432,7 +479,7 @@ struct curlFileTransfer : public FileTransfer
if (errorSink)
response = std::move(errorSink->s);
auto exc =
code == CURLE_ABORTED_BY_CALLBACK && _isInterrupted
code == CURLE_ABORTED_BY_CALLBACK && getInterrupted()
? FileTransferError(Interrupted, std::move(response), "%s of '%s' was interrupted", request.verb(), request.uri)
: httpStatus != 0
? FileTransferError(err,
@ -442,8 +489,8 @@ struct curlFileTransfer : public FileTransfer
code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code)))
: FileTransferError(err,
std::move(response),
"unable to %s '%s': %s (%d)",
request.verb(), request.uri, curl_easy_strerror(code), code);
"unable to %s '%s': %s (%d) %s",
request.verb(), request.uri, curl_easy_strerror(code), code, errbuf);
/* If this is a transient error, then maybe retry the
download after a while. If we're writing to a
@ -482,10 +529,12 @@ struct curlFileTransfer : public FileTransfer
Sync<State> state_;
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
/* We can't use a std::condition_variable to wake up the curl
thread, because it only monitors file descriptors. So use a
pipe instead. */
Pipe wakeupPipe;
#endif
std::thread workerThread;
@ -505,8 +554,10 @@ struct curlFileTransfer : public FileTransfer
fileTransferSettings.httpConnections.get());
#endif
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
wakeupPipe.create();
fcntl(wakeupPipe.readSide.get(), F_SETFL, O_NONBLOCK);
#endif
workerThread = std::thread([&]() { workerThreadEntry(); });
}
@ -527,17 +578,28 @@ struct curlFileTransfer : public FileTransfer
auto state(state_.lock());
state->quit = true;
}
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
writeFull(wakeupPipe.writeSide.get(), " ", false);
#endif
}
void workerThreadMain()
{
/* Cause this thread to be notified on SIGINT. */
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
auto callback = createInterruptCallback([&]() {
stopWorkerThread();
});
#endif
unshareFilesystem();
#if __linux__
try {
tryUnshareFilesystem();
} catch (nix::Error & e) {
e.addTrace({}, "in download thread");
throw;
}
#endif
std::map<CURL *, std::shared_ptr<TransferItem>> items;
@ -571,9 +633,11 @@ struct curlFileTransfer : public FileTransfer
/* Wait for activity, including wakeup events. */
int numfds = 0;
struct curl_waitfd extraFDs[1];
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
extraFDs[0].fd = wakeupPipe.readSide.get();
extraFDs[0].events = CURL_WAIT_POLLIN;
extraFDs[0].revents = 0;
#endif
long maxSleepTimeMs = items.empty() ? 10000 : 100;
auto sleepTimeMs =
nextWakeup != std::chrono::steady_clock::time_point()
@ -657,7 +721,9 @@ struct curlFileTransfer : public FileTransfer
throw nix::Error("cannot enqueue download request because the download thread is shutting down");
state->incoming.push(item);
}
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
writeFull(wakeupPipe.writeSide.get(), " ");
#endif
}
#if ENABLE_S3
@ -756,7 +822,10 @@ FileTransferResult FileTransfer::upload(const FileTransferRequest & request)
return enqueueFileTransfer(request).get();
}
void FileTransfer::download(FileTransferRequest && request, Sink & sink)
void FileTransfer::download(
FileTransferRequest && request,
Sink & sink,
std::function<void(FileTransferResult)> resultCallback)
{
/* Note: we can't call 'sink' via request.dataCallback, because
that would cause the sink to execute on the fileTransfer
@ -794,8 +863,10 @@ void FileTransfer::download(FileTransferRequest && request, Sink & sink)
buffer). We don't wait forever to prevent stalling the
download thread. (Hopefully sleeping will throttle the
sender.) */
if (state->data.size() > 1024 * 1024) {
if (state->data.size() > fileTransferSettings.downloadBufferSize) {
debug("download buffer is full; going to sleep");
static bool haveWarned = false;
warnOnce(haveWarned, "download buffer is full; consider increasing the 'download-buffer-size' setting");
state.wait_for(state->request, std::chrono::seconds(10));
}
@ -806,11 +877,13 @@ void FileTransfer::download(FileTransferRequest && request, Sink & sink)
};
enqueueFileTransfer(request,
{[_state](std::future<FileTransferResult> fut) {
{[_state, resultCallback{std::move(resultCallback)}](std::future<FileTransferResult> fut) {
auto state(_state->lock());
state->quit = true;
try {
fut.get();
auto res = fut.get();
if (resultCallback)
resultCallback(std::move(res));
} catch (...) {
state->exc = std::current_exception();
}
@ -828,7 +901,7 @@ void FileTransfer::download(FileTransferRequest && request, Sink & sink)
{
auto state(_state->lock());
while (state->data.empty()) {
if (state->data.empty()) {
if (state->quit) {
if (state->exc) std::rethrow_exception(state->exc);
@ -836,9 +909,13 @@ void FileTransfer::download(FileTransferRequest && request, Sink & sink)
}
state.wait(state->avail);
if (state->data.empty()) continue;
}
chunk = std::move(state->data);
/* Reset state->data after the move, since we check data.empty() */
state->data = "";
state->request.notify_one();
}
@ -855,12 +932,12 @@ template<typename... Args>
FileTransferError::FileTransferError(FileTransfer::Error error, std::optional<std::string> response, const Args & ... args)
: Error(args...), error(error), response(response)
{
const auto hf = hintfmt(args...);
const auto hf = HintFmt(args...);
// FIXME: Due to https://github.com/NixOS/nix/issues/3841 we don't know how
// to print different messages for different verbosity levels. For now
// we add some heuristics for detecting when we want to show the response.
if (response && (response->size() < 1024 || response->find("<html>") != std::string::npos))
err.msg = hintfmt("%1%\n\nresponse body:\n\n%2%", normaltxt(hf.str()), chomp(*response));
err.msg = HintFmt("%1%\n\nresponse body:\n\n%2%", Uncolored(hf.str()), chomp(*response));
else
err.msg = hf;
}

View file

@ -1,12 +1,15 @@
#pragma once
#include "types.hh"
#include "hash.hh"
#include "config.hh"
///@file
#include <string>
#include <future>
#include "logging.hh"
#include "types.hh"
#include "ref.hh"
#include "config.hh"
#include "serialise.hh"
namespace nix {
struct FileTransferSettings : Config
@ -44,6 +47,12 @@ struct FileTransferSettings : Config
Setting<unsigned int> tries{this, 5, "download-attempts",
"How often Nix will attempt to download a file before giving up."};
Setting<size_t> downloadBufferSize{this, 64 * 1024 * 1024, "download-buffer-size",
R"(
The size of Nix's internal download buffer during `curl` transfers. If data is
not processed quickly enough to exceed the size of this buffer, downloads may stall.
)"};
};
extern FileTransferSettings fileTransferSettings;
@ -74,11 +83,35 @@ struct FileTransferRequest
struct FileTransferResult
{
/**
* Whether this is a cache hit (i.e. the ETag supplied in the
* request is still valid). If so, `data` is empty.
*/
bool cached = false;
/**
* The ETag of the object.
*/
std::string etag;
std::string effectiveUri;
/**
* All URLs visited in the redirect chain.
*/
std::vector<std::string> urls;
/**
* The response body.
*/
std::string data;
uint64_t bodySize = 0;
/**
* An "immutable" URL for this resource (i.e. one whose contents
* will never change), as returned by the `Link: <url>;
* rel="immutable"` header.
*/
std::optional<std::string> immutableUrl;
};
class Store;
@ -87,39 +120,59 @@ struct FileTransfer
{
virtual ~FileTransfer() { }
/* Enqueue a data transfer request, returning a future to the result of
the download. The future may throw a FileTransferError
exception. */
/**
* Enqueue a data transfer request, returning a future to the result of
* the download. The future may throw a FileTransferError
* exception.
*/
virtual void enqueueFileTransfer(const FileTransferRequest & request,
Callback<FileTransferResult> callback) = 0;
std::future<FileTransferResult> enqueueFileTransfer(const FileTransferRequest & request);
/* Synchronously download a file. */
/**
* Synchronously download a file.
*/
FileTransferResult download(const FileTransferRequest & request);
/* Synchronously upload a file. */
/**
* Synchronously upload a file.
*/
FileTransferResult upload(const FileTransferRequest & request);
/* Download a file, writing its data to a sink. The sink will be
invoked on the thread of the caller. */
void download(FileTransferRequest && request, Sink & sink);
/**
* Download a file, writing its data to a sink. The sink will be
* invoked on the thread of the caller.
*/
void download(
FileTransferRequest && request,
Sink & sink,
std::function<void(FileTransferResult)> resultCallback = {});
enum Error { NotFound, Forbidden, Misc, Transient, Interrupted };
};
/* Return a shared FileTransfer object. Using this object is preferred
because it enables connection reuse and HTTP/2 multiplexing. */
/**
* @return a shared FileTransfer object.
*
* Using this object is preferred because it enables connection reuse
* and HTTP/2 multiplexing.
*/
ref<FileTransfer> getFileTransfer();
/* Return a new FileTransfer object. */
/**
* @return a new FileTransfer object
*
* Prefer getFileTransfer() to this; see its docs for why.
*/
ref<FileTransfer> makeFileTransfer();
class FileTransferError : public Error
{
public:
FileTransfer::Error error;
std::optional<std::string> response; // intentionally optional
/// intentionally optional
std::optional<std::string> response;
template<typename... Args>
FileTransferError(FileTransfer::Error error, std::optional<std::string> response, const Args & ... args);

View file

@ -1,40 +0,0 @@
#pragma once
#include "types.hh"
namespace nix {
/* An abstract class for accessing a filesystem-like structure, such
as a (possibly remote) Nix store or the contents of a NAR file. */
class FSAccessor
{
public:
enum Type { tMissing, tRegular, tSymlink, tDirectory };
struct Stat
{
Type type = tMissing;
uint64_t fileSize = 0; // regular files only
bool isExecutable = false; // regular files only
uint64_t narOffset = 0; // regular files only
};
virtual ~FSAccessor() { }
virtual Stat stat(const Path & path) = 0;
virtual StringSet readDirectory(const Path & path) = 0;
/**
* Read a file inside the store.
*
* If `requireValidPath` is set to `true` (the default), the path must be
* inside a valid store path, otherwise it just needs to be physically
* present (but not necessarily properly registered)
*/
virtual std::string readFile(const Path & path, bool requireValidPath = true) = 0;
virtual std::string readLink(const Path & path) = 0;
};
}

View file

@ -1,8 +1,10 @@
#pragma once
///@file
#include <unordered_set>
#include "store-api.hh"
namespace nix {
@ -11,19 +13,20 @@ typedef std::unordered_map<StorePath, std::unordered_set<std::string>> Roots;
struct GCOptions
{
/* Garbage collector operation:
- `gcReturnLive': return the set of paths reachable from
(i.e. in the closure of) the roots.
- `gcReturnDead': return the set of paths not reachable from
the roots.
- `gcDeleteDead': actually delete the latter set.
- `gcDeleteSpecific': delete the paths listed in
`pathsToDelete', insofar as they are not reachable.
*/
/**
* Garbage collector operation:
*
* - `gcReturnLive`: return the set of paths reachable from
* (i.e. in the closure of) the roots.
*
* - `gcReturnDead`: return the set of paths not reachable from
* the roots.
*
* - `gcDeleteDead`: actually delete the latter set.
*
* - `gcDeleteSpecific`: delete the paths listed in
* `pathsToDelete`, insofar as they are not reachable.
*/
typedef enum {
gcReturnLive,
gcReturnDead,
@ -33,51 +36,84 @@ struct GCOptions
GCAction action{gcDeleteDead};
/* If `ignoreLiveness' is set, then reachability from the roots is
ignored (dangerous!). However, the paths must still be
unreferenced *within* the store (i.e., there can be no other
store paths that depend on them). */
/**
* If `ignoreLiveness` is set, then reachability from the roots is
* ignored (dangerous!). However, the paths must still be
* unreferenced *within* the store (i.e., there can be no other
* store paths that depend on them).
*/
bool ignoreLiveness{false};
/* For `gcDeleteSpecific', the paths to delete. */
/**
* For `gcDeleteSpecific`, the paths to delete.
*/
StorePathSet pathsToDelete;
/* Stop after at least `maxFreed' bytes have been freed. */
/**
* Stop after at least `maxFreed` bytes have been freed.
*/
uint64_t maxFreed{std::numeric_limits<uint64_t>::max()};
};
struct GCResults
{
/* Depending on the action, the GC roots, or the paths that would
be or have been deleted. */
/**
* Depending on the action, the GC roots, or the paths that would
* be or have been deleted.
*/
PathSet paths;
/* For `gcReturnDead', `gcDeleteDead' and `gcDeleteSpecific', the
number of bytes that would be or was freed. */
/**
* For `gcReturnDead`, `gcDeleteDead` and `gcDeleteSpecific`, the
* number of bytes that would be or was freed.
*/
uint64_t bytesFreed = 0;
};
/**
* Mix-in class for \ref Store "stores" which expose a notion of garbage
* collection.
*
* Garbage collection will allow deleting paths which are not
* transitively "rooted".
*
* The notion of GC roots actually not part of this class.
*
* - The base `Store` class has `Store::addTempRoot()` because for a store
* that doesn't support garbage collection at all, a temporary GC root is
* safely implementable as no-op.
*
* @todo actually this is not so good because stores are *views*.
* Some views have only a no-op temp roots even though others to the
* same store allow triggering GC. For instance one can't add a root
* over ssh, but that doesn't prevent someone from gc-ing that store
* accesed via SSH locally).
*
* - The derived `LocalFSStore` class has `LocalFSStore::addPermRoot`,
* which is not part of this class because it relies on the notion of
* an ambient file system. There are stores (`ssh-ng://`, for one),
* that *do* support garbage collection but *don't* expose any file
* system, and `LocalFSStore::addPermRoot` thus does not make sense
* for them.
*/
struct GcStore : public virtual Store
{
inline static std::string operationName = "Garbage collection";
/* Add an indirect root, which is merely a symlink to `path' from
/nix/var/nix/gcroots/auto/<hash of `path'>. `path' is supposed
to be a symlink to a store path. The garbage collector will
automatically remove the indirect root when it finds that
`path' has disappeared. */
virtual void addIndirectRoot(const Path & path) = 0;
/* Find the roots of the garbage collector. Each root is a pair
(link, storepath) where `link' is the path of the symlink
outside of the Nix store that point to `storePath'. If
'censor' is true, privacy-sensitive information about roots
found in /proc is censored. */
/**
* Find the roots of the garbage collector. Each root is a pair
* `(link, storepath)` where `link` is the path of the symlink
* outside of the Nix store that point to `storePath`. If
* `censor` is true, privacy-sensitive information about roots
* found in `/proc` is censored.
*/
virtual Roots findRoots(bool censor) = 0;
/* Perform a garbage collection. */
/**
* Perform a garbage collection.
*/
virtual void collectGarbage(const GCOptions & options, GCResults & results) = 0;
};

View file

@ -1,8 +1,14 @@
#include "derivations.hh"
#include "globals.hh"
#include "local-store.hh"
#include "local-fs-store.hh"
#include "finally.hh"
#include "unix-domain-socket.hh"
#include "signals.hh"
#if !defined(__linux__)
// For shelling out to lsof
# include "processes.hh"
#endif
#include <functional>
#include <queue>
@ -13,124 +19,105 @@
#include <climits>
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/statvfs.h>
#if HAVE_STATVFS
# include <sys/statvfs.h>
#endif
#ifndef _WIN32
# include <poll.h>
# include <sys/socket.h>
# include <sys/un.h>
#endif
#include <sys/types.h>
#include <sys/un.h>
#include <unistd.h>
namespace nix {
static std::string gcSocketPath = "/gc-socket/socket";
static std::string gcRootsDir = "gcroots";
static void makeSymlink(const Path & link, const Path & target)
{
/* Create directories up to `gcRoot'. */
createDirs(dirOf(link));
/* Create the new symlink. */
Path tempLink = (format("%1%.tmp-%2%-%3%")
% link % getpid() % random()).str();
createSymlink(target, tempLink);
/* Atomically replace the old one. */
renameFile(tempLink, link);
}
void LocalStore::addIndirectRoot(const Path & path)
{
std::string hash = hashString(htSHA1, path).to_string(Base32, false);
std::string hash = hashString(HashAlgorithm::SHA1, path).to_string(HashFormat::Nix32, false);
Path realRoot = canonPath(fmt("%1%/%2%/auto/%3%", stateDir, gcRootsDir, hash));
makeSymlink(realRoot, path);
}
Path LocalFSStore::addPermRoot(const StorePath & storePath, const Path & _gcRoot)
void LocalStore::createTempRootsFile()
{
Path gcRoot(canonPath(_gcRoot));
auto fdTempRoots(_fdTempRoots.lock());
if (isInStore(gcRoot))
throw Error(
"creating a garbage collector root (%1%) in the Nix store is forbidden "
"(are you running nix-build inside the store?)", gcRoot);
/* Create the temporary roots file for this process. */
if (*fdTempRoots) return;
/* Register this root with the garbage collector, if it's
running. This should be superfluous since the caller should
have registered this root yet, but let's be on the safe
side. */
addTempRoot(storePath);
while (1) {
if (pathExists(fnTempRoots))
/* It *must* be stale, since there can be no two
processes with the same pid. */
unlink(fnTempRoots.c_str());
/* Don't clobber the link if it already exists and doesn't
point to the Nix store. */
if (pathExists(gcRoot) && (!isLink(gcRoot) || !isInStore(readLink(gcRoot))))
throw Error("cannot create symlink '%1%'; already exists", gcRoot);
makeSymlink(gcRoot, printStorePath(storePath));
addIndirectRoot(gcRoot);
*fdTempRoots = openLockFile(fnTempRoots, true);
return gcRoot;
debug("acquiring write lock on '%s'", fnTempRoots);
lockFile(fdTempRoots->get(), ltWrite, true);
/* Check whether the garbage collector didn't get in our
way. */
struct stat st;
if (fstat(fromDescriptorReadOnly(fdTempRoots->get()), &st) == -1)
throw SysError("statting '%1%'", fnTempRoots);
if (st.st_size == 0) break;
/* The garbage collector deleted this file before we could get
a lock. (It won't delete the file after we get a lock.)
Try again. */
}
}
void LocalStore::addTempRoot(const StorePath & path)
{
auto state(_state.lock());
/* Create the temporary roots file for this process. */
if (!state->fdTempRoots) {
while (1) {
if (pathExists(fnTempRoots))
/* It *must* be stale, since there can be no two
processes with the same pid. */
unlink(fnTempRoots.c_str());
state->fdTempRoots = openLockFile(fnTempRoots, true);
debug("acquiring write lock on '%s'", fnTempRoots);
lockFile(state->fdTempRoots.get(), ltWrite, true);
/* Check whether the garbage collector didn't get in our
way. */
struct stat st;
if (fstat(state->fdTempRoots.get(), &st) == -1)
throw SysError("statting '%1%'", fnTempRoots);
if (st.st_size == 0) break;
/* The garbage collector deleted this file before we could
get a lock. (It won't delete the file after we get a
lock.) Try again. */
}
if (readOnly) {
debug("Read-only store doesn't support creating lock files for temp roots, but nothing can be deleted anyways.");
return;
}
if (!state->fdGCLock)
state->fdGCLock = openGCLock();
createTempRootsFile();
/* Open/create the global GC lock file. */
{
auto fdGCLock(_fdGCLock.lock());
if (!*fdGCLock)
*fdGCLock = openGCLock();
}
restart:
FdLock gcLock(state->fdGCLock.get(), ltRead, false, "");
/* Try to acquire a shared global GC lock (non-blocking). This
only succeeds if the garbage collector is not currently
running. */
FdLock gcLock(_fdGCLock.lock()->get(), ltRead, false, "");
if (!gcLock.acquired) {
/* We couldn't get a shared global GC lock, so the garbage
collector is running. So we have to connect to the garbage
collector and inform it about our root. */
if (!state->fdRootsSocket) {
auto fdRootsSocket(_fdRootsSocket.lock());
if (!*fdRootsSocket) {
auto socketPath = stateDir.get() + gcSocketPath;
debug("connecting to '%s'", socketPath);
state->fdRootsSocket = createUnixDomainSocket();
*fdRootsSocket = createUnixDomainSocket();
try {
nix::connect(state->fdRootsSocket.get(), socketPath);
nix::connect(toSocket(fdRootsSocket->get()), socketPath);
} catch (SysError & e) {
/* The garbage collector may have exited, so we need to
restart. */
if (e.errNo == ECONNREFUSED) {
debug("GC socket connection refused");
state->fdRootsSocket.close();
/* The garbage collector may have exited or not
created the socket yet, so we need to restart. */
if (e.errNo == ECONNREFUSED || e.errNo == ENOENT) {
debug("GC socket connection refused: %s", e.msg());
fdRootsSocket->close();
std::this_thread::sleep_for(std::chrono::milliseconds(100));
goto restart;
}
throw;
@ -139,9 +126,9 @@ void LocalStore::addTempRoot(const StorePath & path)
try {
debug("sending GC root '%s'", printStorePath(path));
writeFull(state->fdRootsSocket.get(), printStorePath(path) + "\n", false);
writeFull(fdRootsSocket->get(), printStorePath(path) + "\n", false);
char c;
readFull(state->fdRootsSocket.get(), &c, 1);
readFull(fdRootsSocket->get(), &c, 1);
assert(c == '1');
debug("got ack for GC root '%s'", printStorePath(path));
} catch (SysError & e) {
@ -149,20 +136,21 @@ void LocalStore::addTempRoot(const StorePath & path)
restart. */
if (e.errNo == EPIPE || e.errNo == ECONNRESET) {
debug("GC socket disconnected");
state->fdRootsSocket.close();
fdRootsSocket->close();
goto restart;
}
throw;
} catch (EndOfFile & e) {
debug("GC socket disconnected");
state->fdRootsSocket.close();
fdRootsSocket->close();
goto restart;
}
}
/* Append the store path to the temporary roots file. */
/* Record the store path in the temporary roots file so it will be
seen by a future run of the garbage collector. */
auto s = printStorePath(path) + '\0';
writeFull(state->fdTempRoots.get(), s);
writeFull(_fdTempRoots.lock()->get(), s);
}
@ -173,18 +161,24 @@ void LocalStore::findTempRoots(Roots & tempRoots, bool censor)
{
/* Read the `temproots' directory for per-process temporary root
files. */
for (auto & i : readDirectory(tempRootsDir)) {
if (i.name[0] == '.') {
for (auto & i : std::filesystem::directory_iterator{tempRootsDir}) {
checkInterrupt();
auto name = i.path().filename().string();
if (name[0] == '.') {
// Ignore hidden files. Some package managers (notably portage) create
// those to keep the directory alive.
continue;
}
Path path = tempRootsDir + "/" + i.name;
Path path = i.path().string();
pid_t pid = std::stoi(i.name);
pid_t pid = std::stoi(name);
debug(format("reading temporary root file '%1%'") % path);
AutoCloseFD fd(open(path.c_str(), O_CLOEXEC | O_RDWR, 0666));
debug("reading temporary root file '%1%'", path);
AutoCloseFD fd(toDescriptor(open(path.c_str(),
#ifndef _WIN32
O_CLOEXEC |
#endif
O_RDWR, 0666)));
if (!fd) {
/* It's okay if the file has disappeared. */
if (errno == ENOENT) continue;
@ -217,7 +211,7 @@ void LocalStore::findTempRoots(Roots & tempRoots, bool censor)
}
void LocalStore::findRoots(const Path & path, unsigned char type, Roots & roots)
void LocalStore::findRoots(const Path & path, std::filesystem::file_type type, Roots & roots)
{
auto foundRoot = [&](const Path & path, const Path & target) {
try {
@ -231,15 +225,17 @@ void LocalStore::findRoots(const Path & path, unsigned char type, Roots & roots)
try {
if (type == DT_UNKNOWN)
type = getFileType(path);
if (type == std::filesystem::file_type::unknown)
type = std::filesystem::symlink_status(path).type();
if (type == DT_DIR) {
for (auto & i : readDirectory(path))
findRoots(path + "/" + i.name, i.type, roots);
if (type == std::filesystem::file_type::directory) {
for (auto & i : std::filesystem::directory_iterator{path}) {
checkInterrupt();
findRoots(i.path().string(), i.symlink_status().type(), roots);
}
}
else if (type == DT_LNK) {
else if (type == std::filesystem::file_type::symlink) {
Path target = readLink(path);
if (isInStore(target))
foundRoot(path, target);
@ -249,19 +245,18 @@ void LocalStore::findRoots(const Path & path, unsigned char type, Roots & roots)
target = absPath(target, dirOf(path));
if (!pathExists(target)) {
if (isInDir(path, stateDir + "/" + gcRootsDir + "/auto")) {
printInfo(format("removing stale link from '%1%' to '%2%'") % path % target);
printInfo("removing stale link from '%1%' to '%2%'", path, target);
unlink(path.c_str());
}
} else {
struct stat st2 = lstat(target);
if (!S_ISLNK(st2.st_mode)) return;
if (!std::filesystem::is_symlink(target)) return;
Path target2 = readLink(target);
if (isInStore(target2)) foundRoot(target, target2);
}
}
}
else if (type == DT_REG) {
else if (type == std::filesystem::file_type::regular) {
auto storePath = maybeParseStorePath(storeDir + "/" + std::string(baseNameOf(path)));
if (storePath && isValidPath(*storePath))
roots[std::move(*storePath)].emplace(path);
@ -269,6 +264,14 @@ void LocalStore::findRoots(const Path & path, unsigned char type, Roots & roots)
}
catch (std::filesystem::filesystem_error & e) {
/* We only ignore permanent failures. */
if (e.code() == std::errc::permission_denied || e.code() == std::errc::no_such_file_or_directory || e.code() == std::errc::not_a_directory)
printInfo("cannot read potential root '%1%'", path);
else
throw;
}
catch (SysError & e) {
/* We only ignore permanent failures. */
if (e.errNo == EACCES || e.errNo == ENOENT || e.errNo == ENOTDIR)
@ -282,8 +285,8 @@ void LocalStore::findRoots(const Path & path, unsigned char type, Roots & roots)
void LocalStore::findRootsNoTemp(Roots & roots, bool censor)
{
/* Process direct roots in {gcroots,profiles}. */
findRoots(stateDir + "/" + gcRootsDir, DT_UNKNOWN, roots);
findRoots(stateDir + "/profiles", DT_UNKNOWN, roots);
findRoots(stateDir + "/" + gcRootsDir, std::filesystem::file_type::unknown, roots);
findRoots(stateDir + "/profiles", std::filesystem::file_type::unknown, roots);
/* Add additional roots returned by different platforms-specific
heuristics. This is typically used to add running programs to
@ -302,29 +305,25 @@ Roots LocalStore::findRoots(bool censor)
return roots;
}
typedef std::unordered_map<Path, std::unordered_set<std::string>> UncheckedRoots;
/**
* Key is a mere string because cannot has path with macOS's libc++
*/
typedef std::unordered_map<std::string, std::unordered_set<std::string>> UncheckedRoots;
static void readProcLink(const std::string & file, UncheckedRoots & roots)
static void readProcLink(const std::filesystem::path & file, UncheckedRoots & roots)
{
/* 64 is the starting buffer size gnu readlink uses... */
auto bufsiz = ssize_t{64};
try_again:
char buf[bufsiz];
auto res = readlink(file.c_str(), buf, bufsiz);
if (res == -1) {
if (errno == ENOENT || errno == EACCES || errno == ESRCH)
std::filesystem::path buf;
try {
buf = std::filesystem::read_symlink(file);
} catch (std::filesystem::filesystem_error & e) {
if (e.code() == std::errc::no_such_file_or_directory
|| e.code() == std::errc::permission_denied
|| e.code() == std::errc::no_such_process)
return;
throw SysError("reading symlink");
throw;
}
if (res == bufsiz) {
if (SSIZE_MAX / 2 < bufsiz)
throw Error("stupidly long symlink");
bufsiz *= 2;
goto try_again;
}
if (res > 0 && buf[0] == '/')
roots[std::string(static_cast<char *>(buf), res)]
.emplace(file);
if (buf.is_absolute())
roots[buf.string()].emplace(file.string());
}
static std::string quoteRegexChars(const std::string & raw)
@ -358,35 +357,35 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
while (errno = 0, ent = readdir(procDir.get())) {
checkInterrupt();
if (std::regex_match(ent->d_name, digitsRegex)) {
readProcLink(fmt("/proc/%s/exe" ,ent->d_name), unchecked);
readProcLink(fmt("/proc/%s/cwd", ent->d_name), unchecked);
auto fdStr = fmt("/proc/%s/fd", ent->d_name);
auto fdDir = AutoCloseDir(opendir(fdStr.c_str()));
if (!fdDir) {
if (errno == ENOENT || errno == EACCES)
continue;
throw SysError("opening %1%", fdStr);
}
struct dirent * fd_ent;
while (errno = 0, fd_ent = readdir(fdDir.get())) {
if (fd_ent->d_name[0] != '.')
readProcLink(fmt("%s/%s", fdStr, fd_ent->d_name), unchecked);
}
if (errno) {
if (errno == ESRCH)
continue;
throw SysError("iterating /proc/%1%/fd", ent->d_name);
}
fdDir.reset();
try {
auto mapFile = fmt("/proc/%s/maps", ent->d_name);
auto mapLines = tokenizeString<std::vector<std::string>>(readFile(mapFile), "\n");
readProcLink(fmt("/proc/%s/exe" ,ent->d_name), unchecked);
readProcLink(fmt("/proc/%s/cwd", ent->d_name), unchecked);
auto fdStr = fmt("/proc/%s/fd", ent->d_name);
auto fdDir = AutoCloseDir(opendir(fdStr.c_str()));
if (!fdDir) {
if (errno == ENOENT || errno == EACCES)
continue;
throw SysError("opening %1%", fdStr);
}
struct dirent * fd_ent;
while (errno = 0, fd_ent = readdir(fdDir.get())) {
if (fd_ent->d_name[0] != '.')
readProcLink(fmt("%s/%s", fdStr, fd_ent->d_name), unchecked);
}
if (errno) {
if (errno == ESRCH)
continue;
throw SysError("iterating /proc/%1%/fd", ent->d_name);
}
fdDir.reset();
std::filesystem::path mapFile = fmt("/proc/%s/maps", ent->d_name);
auto mapLines = tokenizeString<std::vector<std::string>>(readFile(mapFile.string()), "\n");
for (const auto & line : mapLines) {
auto match = std::smatch{};
if (std::regex_match(line, match, mapRegex))
unchecked[match[1]].emplace(mapFile);
unchecked[match[1]].emplace(mapFile.string());
}
auto envFile = fmt("/proc/%s/environ", ent->d_name);
@ -394,7 +393,7 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
auto env_end = std::sregex_iterator{};
for (auto i = std::sregex_iterator{envString.begin(), envString.end(), storePathRegex}; i != env_end; ++i)
unchecked[i->str()].emplace(envFile);
} catch (SysError & e) {
} catch (SystemError & e) {
if (errno == ENOENT || errno == EACCES || errno == ESRCH)
continue;
throw;
@ -417,7 +416,7 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
for (const auto & line : lsofLines) {
std::smatch match;
if (std::regex_match(line, match, lsofRegex))
unchecked[match[1]].emplace("{lsof}");
unchecked[match[1].str()].emplace("{lsof}");
}
} catch (ExecError & e) {
/* lsof not installed, lsof failed */
@ -490,11 +489,20 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
auto fdGCLock = openGCLock();
FdLock gcLock(fdGCLock.get(), ltWrite, true, "waiting for the big garbage collector lock...");
/* Synchronisation point to test ENOENT handling in
addTempRoot(), see tests/gc-non-blocking.sh. */
if (auto p = getEnv("_NIX_TEST_GC_SYNC_1"))
readFile(*p);
/* Start the server for receiving new roots. */
auto socketPath = stateDir.get() + gcSocketPath;
createDirs(dirOf(socketPath));
auto fdServer = createUnixDomainSocket(socketPath, 0666);
// TODO nonblocking socket on windows?
#ifdef _WIN32
throw UnimplementedError("External GC client not implemented yet");
#else
if (fcntl(fdServer.get(), F_SETFL, fcntl(fdServer.get(), F_GETFL) | O_NONBLOCK) == -1)
throw SysError("making socket '%1%' non-blocking", socketPath);
@ -550,8 +558,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
/* On macOS, accepted sockets inherit the
non-blocking flag from the server socket, so
explicitly make it blocking. */
if (fcntl(fdServer.get(), F_SETFL, fcntl(fdServer.get(), F_GETFL) & ~O_NONBLOCK) == -1)
abort();
if (fcntl(fdClient.get(), F_SETFL, fcntl(fdClient.get(), F_GETFL) & ~O_NONBLOCK) == -1)
panic("Could not set non-blocking flag on client socket");
while (true) {
try {
@ -595,6 +603,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
if (serverThread.joinable()) serverThread.join();
});
#endif
/* Find the roots. Since we've grabbed the GC lock, the set of
permanent roots cannot increase now. */
printInfo("finding garbage collector roots...");
@ -613,6 +623,10 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
roots.insert(root.first);
}
/* Synchronisation point for testing, see tests/functional/gc-non-blocking.sh. */
if (auto p = getEnv("_NIX_TEST_GC_SYNC_2"))
readFile(*p);
/* Helper function that deletes a path from the store and throws
GCLimitReached if we've deleted enough garbage. */
auto deleteFromStore = [&](std::string_view baseName)
@ -624,8 +638,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
by another process. We need to be sure that we can acquire an
exclusive lock before deleting them. */
if (baseName.find("tmp-", 0) == 0) {
AutoCloseFD tmpDirFd = open(realPath.c_str(), O_RDONLY | O_DIRECTORY);
if (tmpDirFd.get() == -1 || !lockFile(tmpDirFd.get(), ltWrite, false)) {
AutoCloseFD tmpDirFd = openDirectory(realPath);
if (!tmpDirFd || !lockFile(tmpDirFd.get(), ltWrite, false)) {
debug("skipping locked tempdir '%s'", realPath);
return;
}
@ -636,7 +650,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
results.paths.insert(path);
uint64_t bytesFreed;
deletePath(realPath, bytesFreed);
deleteStorePath(realPath, bytesFreed);
results.bytesFreed += bytesFreed;
if (results.bytesFreed > options.maxFreed) {
@ -723,7 +738,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
auto i = referrersCache.find(*path);
if (i == referrersCache.end()) {
StorePathSet referrers;
queryReferrers(*path, referrers);
queryGCReferrers(*path, referrers);
referrersCache.emplace(*path, std::move(referrers));
i = referrersCache.find(*path);
}
@ -759,10 +774,6 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
}
};
/* Synchronisation point for testing, see tests/gc-concurrent.sh. */
if (auto p = getEnv("_NIX_TEST_GC_SYNC"))
readFile(*p);
/* Either delete all garbage paths, or just the specified
paths (for gcDeleteSpecific). */
if (options.action == GCOptions::gcDeleteSpecific) {
@ -773,7 +784,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
throw Error(
"Cannot delete path '%1%' since it is still alive. "
"To find out why, use: "
"nix-store --query --roots",
"nix-store --query --roots and nix-store --query --referrers",
printStorePath(i));
}
@ -849,19 +860,25 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
continue;
}
printMsg(lvlTalkative, format("deleting unused link '%1%'") % path);
printMsg(lvlTalkative, "deleting unused link '%1%'", path);
if (unlink(path.c_str()) == -1)
throw SysError("deleting '%1%'", path);
/* Do not accound for deleted file here. Rely on deletePath()
/* Do not account for deleted file here. Rely on deletePath()
accounting. */
}
struct stat st;
if (stat(linksDir.c_str(), &st) == -1)
throw SysError("statting '%1%'", linksDir);
int64_t overhead = st.st_blocks * 512ULL;
int64_t overhead =
#ifdef _WIN32
0
#else
st.st_blocks * 512ULL
#endif
;
printInfo("note: currently hard linking saves %.2f MiB",
((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)));
@ -874,6 +891,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
void LocalStore::autoGC(bool sync)
{
#if HAVE_STATVFS
static auto fakeFreeSpaceFile = getEnv("_NIX_TEST_FREE_SPACE_FILE");
auto getAvail = [this]() -> uint64_t {
@ -950,6 +968,7 @@ void LocalStore::autoGC(bool sync)
sync:
// Wait for the future outside of the state lock.
if (sync) future.get();
#endif
}

View file

@ -1,18 +1,40 @@
#include "globals.hh"
#include "util.hh"
#include "config-global.hh"
#include "current-process.hh"
#include "archive.hh"
#include "args.hh"
#include "abstract-setting-to-json.hh"
#include "compute-levels.hh"
#include "signals.hh"
#include <algorithm>
#include <map>
#include <mutex>
#include <thread>
#include <dlfcn.h>
#include <sys/utsname.h>
#include <nlohmann/json.hpp>
#ifndef _WIN32
# include <sys/utsname.h>
#endif
#ifdef __GLIBC__
# include <gnu/lib-names.h>
# include <nss.h>
# include <dlfcn.h>
#endif
#if __APPLE__
# include "processes.hh"
#endif
#include "config-impl.hh"
#ifdef __APPLE__
#include <sys/sysctl.h>
#endif
#include "strings.hh"
namespace nix {
@ -30,28 +52,29 @@ static GlobalConfig::Register rSettings(&settings);
Settings::Settings()
: nixPrefix(NIX_PREFIX)
, nixStore(canonPath(getEnv("NIX_STORE_DIR").value_or(getEnv("NIX_STORE").value_or(NIX_STORE_DIR))))
, nixDataDir(canonPath(getEnv("NIX_DATA_DIR").value_or(NIX_DATA_DIR)))
, nixLogDir(canonPath(getEnv("NIX_LOG_DIR").value_or(NIX_LOG_DIR)))
, nixStateDir(canonPath(getEnv("NIX_STATE_DIR").value_or(NIX_STATE_DIR)))
, nixConfDir(canonPath(getEnv("NIX_CONF_DIR").value_or(NIX_CONF_DIR)))
, nixStore(
#ifndef _WIN32
// On Windows `/nix/store` is not a canonical path, but we dont'
// want to deal with that yet.
canonPath
#endif
(getEnvNonEmpty("NIX_STORE_DIR").value_or(getEnvNonEmpty("NIX_STORE").value_or(NIX_STORE_DIR))))
, nixDataDir(canonPath(getEnvNonEmpty("NIX_DATA_DIR").value_or(NIX_DATA_DIR)))
, nixLogDir(canonPath(getEnvNonEmpty("NIX_LOG_DIR").value_or(NIX_LOG_DIR)))
, nixStateDir(canonPath(getEnvNonEmpty("NIX_STATE_DIR").value_or(NIX_STATE_DIR)))
, nixConfDir(canonPath(getEnvNonEmpty("NIX_CONF_DIR").value_or(NIX_CONF_DIR)))
, nixUserConfFiles(getUserConfigFiles())
, nixBinDir(canonPath(getEnv("NIX_BIN_DIR").value_or(NIX_BIN_DIR)))
, nixManDir(canonPath(NIX_MAN_DIR))
, nixDaemonSocketFile(canonPath(getEnv("NIX_DAEMON_SOCKET_PATH").value_or(nixStateDir + DEFAULT_SOCKET_PATH)))
, nixDaemonSocketFile(canonPath(getEnvNonEmpty("NIX_DAEMON_SOCKET_PATH").value_or(nixStateDir + DEFAULT_SOCKET_PATH)))
{
buildUsersGroup = getuid() == 0 ? "nixbld" : "";
lockCPU = getEnv("NIX_AFFINITY_HACK") == "1";
#ifndef _WIN32
buildUsersGroup = isRootUser() ? "nixbld" : "";
#endif
allowSymlinkedStore = getEnv("NIX_IGNORE_SYMLINK_STORE") == "1";
caFile = getEnv("NIX_SSL_CERT_FILE").value_or(getEnv("SSL_CERT_FILE").value_or(""));
if (caFile == "") {
for (auto & fn : {"/etc/ssl/certs/ca-certificates.crt", "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"})
if (pathExists(fn)) {
caFile = fn;
break;
}
}
auto sslOverride = getEnv("NIX_SSL_CERT_FILE").value_or(getEnv("SSL_CERT_FILE").value_or(""));
if (sslOverride != "")
caFile = sslOverride;
/* Backwards compatibility. */
auto s = getEnv("NIX_REMOTE_SYSTEMS");
@ -59,7 +82,7 @@ Settings::Settings()
Strings ss;
for (auto & p : tokenizeString<Strings>(*s, ":"))
ss.push_back("@" + p);
builders = concatStringsSep(" ", ss);
builders = concatStringsSep("\n", ss);
}
#if defined(__linux__) && defined(SANDBOX_SHELL)
@ -71,26 +94,31 @@ Settings::Settings()
sandboxPaths = tokenizeString<StringSet>("/System/Library/Frameworks /System/Library/PrivateFrameworks /bin/sh /bin/bash /private/tmp /private/var/tmp /usr/lib");
allowedImpureHostPrefixes = tokenizeString<StringSet>("/System/Library /usr/lib /dev /bin/sh");
#endif
buildHook = getSelfExe().value_or("nix") + " __build-remote";
}
void loadConfFile()
void loadConfFile(AbstractConfig & config)
{
globalConfig.applyConfigFile(settings.nixConfDir + "/nix.conf");
auto applyConfigFile = [&](const Path & path) {
try {
std::string contents = readFile(path);
config.applyConfig(contents, path);
} catch (SystemError &) { }
};
applyConfigFile(settings.nixConfDir + "/nix.conf");
/* We only want to send overrides to the daemon, i.e. stuff from
~/.nix/nix.conf or the command line. */
globalConfig.resetOverridden();
config.resetOverridden();
auto files = settings.nixUserConfFiles;
for (auto file = files.rbegin(); file != files.rend(); file++) {
globalConfig.applyConfigFile(*file);
applyConfigFile(*file);
}
auto nixConfEnv = getEnv("NIX_CONFIG");
if (nixConfEnv.has_value()) {
globalConfig.applyConfig(nixConfEnv.value(), "NIX_CONFIG");
config.applyConfig(nixConfEnv.value(), "NIX_CONFIG");
}
}
@ -123,6 +151,29 @@ unsigned int Settings::getDefaultCores()
return concurrency;
}
#if __APPLE__
static bool hasVirt() {
int hasVMM;
int hvSupport;
size_t size;
size = sizeof(hasVMM);
if (sysctlbyname("kern.hv_vmm_present", &hasVMM, &size, NULL, 0) == 0) {
if (hasVMM)
return false;
}
// whether the kernel and hardware supports virt
size = sizeof(hvSupport);
if (sysctlbyname("kern.hv_support", &hvSupport, &size, NULL, 0) == 0) {
return hvSupport == 1;
} else {
return false;
}
}
#endif
StringSet Settings::getDefaultSystemFeatures()
{
/* For backwards compatibility, accept some "features" that are
@ -139,6 +190,11 @@ StringSet Settings::getDefaultSystemFeatures()
features.insert("kvm");
#endif
#if __APPLE__
if (hasVirt())
features.insert("apple-virt");
#endif
return features;
}
@ -166,25 +222,24 @@ StringSet Settings::getDefaultExtraPlatforms()
return extraPlatforms;
}
bool Settings::isExperimentalFeatureEnabled(const ExperimentalFeature & feature)
{
auto & f = experimentalFeatures.get();
return std::find(f.begin(), f.end(), feature) != f.end();
}
void Settings::requireExperimentalFeature(const ExperimentalFeature & feature)
{
if (!isExperimentalFeatureEnabled(feature))
throw MissingExperimentalFeature(feature);
}
bool Settings::isWSL1()
{
#if __linux__
struct utsname utsbuf;
uname(&utsbuf);
// WSL1 uses -Microsoft suffix
// WSL2 uses -microsoft-standard suffix
return hasSuffix(utsbuf.release, "-Microsoft");
#else
return false;
#endif
}
Path Settings::getDefaultSSLCertFile()
{
for (auto & fn : {"/etc/ssl/certs/ca-certificates.crt", "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"})
if (pathAccessible(fn)) return fn;
return "";
}
const std::string nixVersion = PACKAGE_VERSION;
@ -195,100 +250,129 @@ NLOHMANN_JSON_SERIALIZE_ENUM(SandboxMode, {
{SandboxMode::smDisabled, false},
});
template<> void BaseSetting<SandboxMode>::set(const std::string & str, bool append)
template<> SandboxMode BaseSetting<SandboxMode>::parse(const std::string & str) const
{
if (str == "true") value = smEnabled;
else if (str == "relaxed") value = smRelaxed;
else if (str == "false") value = smDisabled;
if (str == "true") return smEnabled;
else if (str == "relaxed") return smRelaxed;
else if (str == "false") return smDisabled;
else throw UsageError("option '%s' has invalid value '%s'", name, str);
}
template<> bool BaseSetting<SandboxMode>::isAppendable()
template<> struct BaseSetting<SandboxMode>::trait
{
return false;
}
static constexpr bool appendable = false;
};
template<> std::string BaseSetting<SandboxMode>::to_string() const
{
if (value == smEnabled) return "true";
else if (value == smRelaxed) return "relaxed";
else if (value == smDisabled) return "false";
else abort();
else unreachable();
}
template<> void BaseSetting<SandboxMode>::convertToArg(Args & args, const std::string & category)
{
args.addFlag({
.longName = name,
.aliases = aliases,
.description = "Enable sandboxing.",
.category = category,
.handler = {[=]() { override(smEnabled); }}
.handler = {[this]() { override(smEnabled); }}
});
args.addFlag({
.longName = "no-" + name,
.aliases = aliases,
.description = "Disable sandboxing.",
.category = category,
.handler = {[=]() { override(smDisabled); }}
.handler = {[this]() { override(smDisabled); }}
});
args.addFlag({
.longName = "relaxed-" + name,
.aliases = aliases,
.description = "Enable sandboxing, but allow builds to disable it.",
.category = category,
.handler = {[=]() { override(smRelaxed); }}
.handler = {[this]() { override(smRelaxed); }}
});
}
void MaxBuildJobsSetting::set(const std::string & str, bool append)
unsigned int MaxBuildJobsSetting::parse(const std::string & str) const
{
if (str == "auto") value = std::max(1U, std::thread::hardware_concurrency());
if (str == "auto") return std::max(1U, std::thread::hardware_concurrency());
else {
if (auto n = string2Int<decltype(value)>(str))
value = *n;
return *n;
else
throw UsageError("configuration setting '%s' should be 'auto' or an integer", name);
}
}
void PluginFilesSetting::set(const std::string & str, bool append)
static void preloadNSS()
{
if (pluginsLoaded)
throw UsageError("plugin-files set after plugins were loaded, you may need to move the flag before the subcommand");
BaseSetting<Paths>::set(str, append);
/* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of
one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already
been loaded in the parent. So we force a lookup of an invalid domain to force the NSS machinery to
load its lookup libraries in the parent before any child gets a chance to. */
static std::once_flag dns_resolve_flag;
std::call_once(dns_resolve_flag, []() {
#ifdef __GLIBC__
/* On linux, glibc will run every lookup through the nss layer.
* That means every lookup goes, by default, through nscd, which acts as a local
* cache.
* Because we run builds in a sandbox, we also remove access to nscd otherwise
* lookups would leak into the sandbox.
*
* But now we have a new problem, we need to make sure the nss_dns backend that
* does the dns lookups when nscd is not available is loaded or available.
*
* We can't make it available without leaking nix's environment, so instead we'll
* load the backend, and configure nss so it does not try to run dns lookups
* through nscd.
*
* This is technically only used for builtins:fetch* functions so we only care
* about dns.
*
* All other platforms are unaffected.
*/
if (!dlopen(LIBNSS_DNS_SO, RTLD_NOW))
warn("unable to load nss_dns backend");
// FIXME: get hosts entry from nsswitch.conf.
__nss_configure_lookup("hosts", "files dns");
#endif
});
}
static bool initLibStoreDone = false;
void assertLibStoreInitialized() {
if (!initLibStoreDone) {
printError("The program must call nix::initNix() before calling any libstore library functions.");
abort();
};
}
void initLibStore(bool loadConfig) {
if (initLibStoreDone) return;
initLibUtil();
if (loadConfig)
loadConfFile(globalConfig);
preloadNSS();
/* On macOS, don't use the per-session TMPDIR (as set e.g. by
sshd). This breaks build users because they don't have access
to the TMPDIR, in particular in nix-store --serve. */
#if __APPLE__
if (hasPrefix(defaultTempDir(), "/var/folders/"))
unsetenv("TMPDIR");
#endif
initLibStoreDone = true;
}
void initPlugins()
{
assert(!settings.pluginFiles.pluginsLoaded);
for (const auto & pluginFile : settings.pluginFiles.get()) {
Paths pluginFiles;
try {
auto ents = readDirectory(pluginFile);
for (const auto & ent : ents)
pluginFiles.emplace_back(pluginFile + "/" + ent.name);
} catch (SysError & e) {
if (e.errNo != ENOTDIR)
throw;
pluginFiles.emplace_back(pluginFile);
}
for (const auto & file : pluginFiles) {
/* handle is purposefully leaked as there may be state in the
DSO needed by the action of the plugin. */
void *handle =
dlopen(file.c_str(), RTLD_LAZY | RTLD_LOCAL);
if (!handle)
throw Error("could not dynamically open plugin file '%s': %s", file, dlerror());
}
}
/* Since plugins can add settings, try to re-apply previously
unknown settings. */
globalConfig.reapplyUnknownSettings();
globalConfig.warnUnknownSettings();
/* Tell the user if they try to set plugin-files after we've already loaded */
settings.pluginFiles.pluginsLoaded = true;
}
}

View file

@ -1,9 +1,11 @@
#pragma once
///@file
#include "types.hh"
#include "config.hh"
#include "util.hh"
#include "environment-variables.hh"
#include "experimental-features.hh"
#include "users.hh"
#include <map>
#include <limits>
@ -26,24 +28,7 @@ struct MaxBuildJobsSetting : public BaseSetting<unsigned int>
options->addSetting(this);
}
void set(const std::string & str, bool append = false) override;
};
struct PluginFilesSetting : public BaseSetting<Paths>
{
bool pluginsLoaded = false;
PluginFilesSetting(Config * options,
const Paths & def,
const std::string & name,
const std::string & description,
const std::set<std::string> & aliases = {})
: BaseSetting<Paths>(def, true, name, description, aliases)
{
options->addSetting(this);
}
void set(const std::string & str, bool append = false) override;
unsigned int parse(const std::string & str) const override;
};
const uint32_t maxIdsPerBuild =
@ -64,40 +49,59 @@ class Settings : public Config {
bool isWSL1();
Path getDefaultSSLCertFile();
public:
Settings();
Path nixPrefix;
/* The directory where we store sources and derived files. */
/**
* The directory where we store sources and derived files.
*/
Path nixStore;
Path nixDataDir; /* !!! fix */
/* The directory where we log various operations. */
/**
* The directory where we log various operations.
*/
Path nixLogDir;
/* The directory where state is stored. */
/**
* The directory where state is stored.
*/
Path nixStateDir;
/* The directory where system configuration files are stored. */
/**
* The directory where system configuration files are stored.
*/
Path nixConfDir;
/* A list of user configuration files to load. */
/**
* A list of user configuration files to load.
*/
std::vector<Path> nixUserConfFiles;
/* The directory where the main programs are stored. */
Path nixBinDir;
/* The directory where the man pages are stored. */
/**
* The directory where the man pages are stored.
*/
Path nixManDir;
/* File name of the socket the daemon listens to. */
/**
* File name of the socket the daemon listens to.
*/
Path nixDaemonSocketFile;
Setting<std::string> storeUri{this, getEnv("NIX_REMOTE").value_or("auto"), "store",
"The default Nix store to use."};
R"(
The [URL of the Nix store](@docroot@/store/types/index.md#store-url-format)
to use for most operations.
See the
[Store Types](@docroot@/store/types/index.md)
section of the manual for supported store types and settings.
)"};
Setting<bool> keepFailed{this, false, "keep-failed",
"Whether to keep temporary directories of failed builds."};
@ -114,61 +118,98 @@ public:
)",
{"build-fallback"}};
/* Whether to show build log output in real time. */
/**
* Whether to show build log output in real time.
*/
bool verboseBuild = true;
Setting<size_t> logLines{this, 10, "log-lines",
Setting<size_t> logLines{this, 25, "log-lines",
"The number of lines of the tail of "
"the log to show if a build fails."};
MaxBuildJobsSetting maxBuildJobs{
this, 1, "max-jobs",
R"(
This option defines the maximum number of jobs that Nix will try to
build in parallel. The default is `1`. The special value `auto`
causes Nix to use the number of CPUs in your system. `0` is useful
when using remote builders to prevent any local builds (except for
`preferLocalBuild` derivation attribute which executes locally
regardless). It can be overridden using the `--max-jobs` (`-j`)
command line switch.
Maximum number of jobs that Nix will try to build locally in parallel.
The special value `auto` causes Nix to use the number of CPUs in your system.
Use `0` to disable local builds and directly use the remote machines specified in [`builders`](#conf-builders).
This will not affect derivations that have [`preferLocalBuild = true`](@docroot@/language/advanced-attributes.md#adv-attr-preferLocalBuild), which are always built locally.
> **Note**
>
> The number of CPU cores to use for each build job is independently determined by the [`cores`](#conf-cores) setting.
<!-- TODO(@fricklerhandwerk): would be good to have those shorthands for common options as part of the specification -->
The setting can be overridden using the `--max-jobs` (`-j`) command line switch.
)",
{"build-max-jobs"}};
Setting<unsigned int> maxSubstitutionJobs{
this, 16, "max-substitution-jobs",
R"(
This option defines the maximum number of substitution jobs that Nix
will try to run in parallel. The default is `16`. The minimum value
one can choose is `1` and lower values will be interpreted as `1`.
)",
{"substitution-max-jobs"}};
Setting<unsigned int> buildCores{
this,
getDefaultCores(),
"cores",
R"(
Sets the value of the `NIX_BUILD_CORES` environment variable in the
invocation of builders. Builders can use this variable at their
discretion to control the maximum amount of parallelism. For
instance, in Nixpkgs, if the derivation attribute
`enableParallelBuilding` is set to `true`, the builder passes the
`-jN` flag to GNU Make. It can be overridden using the `--cores`
command line switch and defaults to `1`. The value `0` means that
the builder should use all available CPU cores in the system.
)",
{"build-cores"}, false};
Sets the value of the `NIX_BUILD_CORES` environment variable in the [invocation of the `builder` executable](@docroot@/language/derivations.md#builder-execution) of a derivation.
The `builder` executable can use this variable to control its own maximum amount of parallelism.
/* Read-only mode. Don't copy stuff to the store, don't change
the database. */
<!--
FIXME(@fricklerhandwerk): I don't think this should even be mentioned here.
A very generic example using `derivation` and `xargs` may be more appropriate to explain the mechanism.
Using `mkDerivation` as an example requires being aware of that there are multiple independent layers that are completely opaque here.
-->
For instance, in Nixpkgs, if the attribute `enableParallelBuilding` for the `mkDerivation` build helper is set to `true`, it will pass the `-j${NIX_BUILD_CORES}` flag to GNU Make.
The value `0` means that the `builder` should use all available CPU cores in the system.
> **Note**
>
> The number of parallel local Nix build jobs is independently controlled with the [`max-jobs`](#conf-max-jobs) setting.
)",
{"build-cores"},
// Don't document the machine-specific default value
false};
/**
* Read-only mode. Don't copy stuff to the store, don't change
* the database.
*/
bool readOnlyMode = false;
Setting<std::string> thisSystem{
this, SYSTEM, "system",
R"(
This option specifies the canonical Nix system name of the current
installation, such as `i686-linux` or `x86_64-darwin`. Nix can only
build derivations whose `system` attribute equals the value
specified here. In general, it never makes sense to modify this
value from its default, since you can use it to lie about the
platform you are building on (e.g., perform a Mac OS build on a
Linux machine; the result would obviously be wrong). It only makes
sense if the Nix binaries can run on multiple platforms, e.g.,
universal binaries that run on `x86_64-linux` and `i686-linux`.
The system type of the current Nix installation.
Nix will only build a given [derivation](@docroot@/language/derivations.md) locally when its `system` attribute equals any of the values specified here or in [`extra-platforms`](#conf-extra-platforms).
It defaults to the canonical Nix system name detected by `configure`
at build time.
The default value is set when Nix itself is compiled for the system it will run on.
The following system types are widely used, as Nix is actively supported on these platforms:
- `x86_64-linux`
- `x86_64-darwin`
- `i686-linux`
- `aarch64-linux`
- `aarch64-darwin`
- `armv6l-linux`
- `armv7l-linux`
In general, you do not have to modify this setting.
While you can force Nix to run a Darwin-specific `builder` executable on a Linux machine, the result would obviously be wrong.
This value is available in the Nix language as
[`builtins.currentSystem`](@docroot@/language/builtins.md#builtins-currentSystem)
if the
[`eval-system`](#conf-eval-system)
configuration option is set as the empty string.
)"};
Setting<time_t> maxSilentTime{
@ -200,26 +241,150 @@ public:
)",
{"build-timeout"}};
PathSetting buildHook{this, true, "", "build-hook",
"The path of the helper program that executes builds to remote machines."};
Setting<Strings> buildHook{this, {"nix", "__build-remote"}, "build-hook",
R"(
The path to the helper program that executes remote builds.
Nix communicates with the build hook over `stdio` using a custom protocol to request builds that cannot be performed directly by the Nix daemon.
The default value is the internal Nix binary that implements remote building.
> **Important**
>
> Change this setting only if you really know what youre doing.
)"};
Setting<std::string> builders{
this, "@" + nixConfDir + "/machines", "builders",
R"(
A semicolon-separated list of build machines.
For the exact format and examples, see [the manual chapter on remote builds](../advanced-topics/distributed-builds.md)
A semicolon- or newline-separated list of build machines.
In addition to the [usual ways of setting configuration options](@docroot@/command-ref/conf-file.md), the value can be read from a file by prefixing its absolute path with `@`.
> **Example**
>
> This is the default setting:
>
> ```
> builders = @/etc/nix/machines
> ```
Each machine specification consists of the following elements, separated by spaces.
Only the first element is required.
To leave a field at its default, set it to `-`.
1. The URI of the remote store in the format `ssh://[username@]hostname`.
> **Example**
>
> `ssh://nix@mac`
For backward compatibility, `ssh://` may be omitted.
The hostname may be an alias defined in `~/.ssh/config`.
2. A comma-separated list of [Nix system types](@docroot@/development/building.md#system-type).
If omitted, this defaults to the local platform type.
> **Example**
>
> `aarch64-darwin`
It is possible for a machine to support multiple platform types.
> **Example**
>
> `i686-linux,x86_64-linux`
3. The SSH identity file to be used to log in to the remote machine.
If omitted, SSH will use its regular identities.
> **Example**
>
> `/home/user/.ssh/id_mac`
4. The maximum number of builds that Nix will execute in parallel on the machine.
Typically this should be equal to the number of CPU cores.
5. The speed factor, indicating the relative speed of the machine as a positive integer.
If there are multiple machines of the right type, Nix will prefer the fastest, taking load into account.
6. A comma-separated list of supported [system features](#conf-system-features).
A machine will only be used to build a derivation if all the features in the derivation's [`requiredSystemFeatures`](@docroot@/language/advanced-attributes.html#adv-attr-requiredSystemFeatures) attribute are supported by that machine.
7. A comma-separated list of required [system features](#conf-system-features).
A machine will only be used to build a derivation if all of the machines required features appear in the derivations [`requiredSystemFeatures`](@docroot@/language/advanced-attributes.html#adv-attr-requiredSystemFeatures) attribute.
8. The (base64-encoded) public host key of the remote machine.
If omitted, SSH will use its regular `known_hosts` file.
The value for this field can be obtained via `base64 -w0`.
> **Example**
>
> Multiple builders specified on the command line:
>
> ```console
> --builders 'ssh://mac x86_64-darwin ; ssh://beastie x86_64-freebsd'
> ```
> **Example**
>
> This specifies several machines that can perform `i686-linux` builds:
>
> ```
> nix@scratchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy 8 1 kvm
> nix@itchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy 8 2
> nix@poochie.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy 1 2 kvm benchmark
> ```
>
> However, `poochie` will only build derivations that have the attribute
>
> ```nix
> requiredSystemFeatures = [ "benchmark" ];
> ```
>
> or
>
> ```nix
> requiredSystemFeatures = [ "benchmark" "kvm" ];
> ```
>
> `itchy` cannot do builds that require `kvm`, but `scratchy` does support such builds.
> For regular builds, `itchy` will be preferred over `scratchy` because it has a higher speed factor.
For Nix to use substituters, the calling user must be in the [`trusted-users`](#conf-trusted-users) list.
> **Note**
>
> A build machine must be accessible via SSH and have Nix installed.
> `nix` must be available in `$PATH` for the user connecting over SSH.
> **Warning**
>
> If you are building via the Nix daemon (default), the Nix daemon user account on the local machine (that is, `root`) requires access to a user account on the remote machine (not necessarily `root`).
>
> If you cant or dont want to configure `root` to be able to access the remote machine, set [`store`](#conf-store) to any [local store](@docroot@/store/types/local-store.html), e.g. by passing `--store /tmp` to the command on the local machine.
To build only on remote machines and disable local builds, set [`max-jobs`](#conf-max-jobs) to 0.
If you want the remote machines to use substituters, set [`builders-use-substitutes`](#conf-builders-use-substituters) to `true`.
)",
{}, false};
Setting<bool> alwaysAllowSubstitutes{
this, false, "always-allow-substitutes",
R"(
If set to `true`, Nix will ignore the [`allowSubstitutes`](@docroot@/language/advanced-attributes.md) attribute in derivations and always attempt to use [available substituters](#conf-substituters).
)"};
Setting<bool> buildersUseSubstitutes{
this, false, "builders-use-substitutes",
R"(
If set to `true`, Nix will instruct remote build machines to use
their own binary substitutes if available. In practical terms, this
means that remote hosts will fetch as many build dependencies as
possible from their own substitutes (e.g, from `cache.nixos.org`),
instead of waiting for this host to upload them all. This can
drastically reduce build times if the network connection between
this computer and the remote build host is slow.
If set to `true`, Nix will instruct [remote build machines](#conf-builders) to use their own [`substituters`](#conf-substituters) if available.
It means that remote build hosts will fetch as many dependencies as possible from their own substituters (e.g, from `cache.nixos.org`) instead of waiting for the local machine to upload them all.
This can drastically reduce build times if the network connection between the local machine and the remote build host is slow.
)"};
Setting<off_t> reservedSize{this, 8 * 1024 * 1024, "gc-reserved-space",
@ -244,8 +409,10 @@ public:
Setting<bool> useSQLiteWAL{this, !isWSL1(), "use-sqlite-wal",
"Whether SQLite should use WAL mode."};
#ifndef _WIN32
Setting<bool> syncBeforeRegistering{this, false, "sync-before-registering",
"Whether to call `sync()` before registering a path as valid."};
#endif
Setting<bool> useSubstitutes{
this, true, "substitute",
@ -286,8 +453,8 @@ public:
If the build users group is empty, builds will be performed under
the uid of the Nix process (that is, the uid of the caller if
`NIX_REMOTE` is empty, the uid under which the Nix daemon runs if
`NIX_REMOTE` is `daemon`). Obviously, this should not be used in
multi-user settings with untrusted users.
`NIX_REMOTE` is `daemon`). Obviously, this should not be used
with a nix daemon accessible to untrusted clients.
Defaults to `nixbld` when running as root, *empty* otherwise.
)",
@ -299,17 +466,7 @@ public:
users in `build-users-group`.
UIDs are allocated starting at 872415232 (0x34000000) on Linux and 56930 on macOS.
> **Warning**
> This is an experimental feature.
To enable it, add the following to [`nix.conf`](#):
```
extra-experimental-features = auto-allocate-uids
auto-allocate-uids = true
```
)"};
)", {}, true, Xp::AutoAllocateUids};
Setting<uint32_t> startId{this,
#if __linux__
@ -336,18 +493,8 @@ public:
Whether to execute builds inside cgroups.
This is only supported on Linux.
Cgroups are required and enabled automatically for derivations
Cgroups are required and enabled automatically for derivations
that require the `uid-range` system feature.
> **Warning**
> This is an experimental feature.
To enable it, add the following to [`nix.conf`](#):
```
extra-experimental-features = cgroups
use-cgroups = true
```
)"};
#endif
@ -449,9 +596,6 @@ public:
)",
{"env-keep-derivations"}};
/* Whether to lock the Nix client and worker to the same CPU. */
bool lockCPU;
Setting<SandboxMode> sandboxMode{
this,
#if __linux__
@ -498,6 +642,9 @@ public:
for example, `/dev/nvidiactl?` specifies that `/dev/nvidiactl` will
only be mounted in the sandbox if it exists in the host filesystem.
If the source is in the Nix store, then its closure will be added to
the sandbox as well.
Depending on how Nix was built, the default value for this option
may be empty or provide `/bin/sh` as a bind-mount of `bash`.
)",
@ -506,20 +653,60 @@ public:
Setting<bool> sandboxFallback{this, true, "sandbox-fallback",
"Whether to disable sandboxing when the kernel doesn't allow it."};
#ifndef _WIN32
Setting<bool> requireDropSupplementaryGroups{this, isRootUser(), "require-drop-supplementary-groups",
R"(
Following the principle of least privilege,
Nix will attempt to drop supplementary groups when building with sandboxing.
However this can fail under some circumstances.
For example, if the user lacks the `CAP_SETGID` capability.
Search `setgroups(2)` for `EPERM` to find more detailed information on this.
If you encounter such a failure, setting this option to `false` will let you ignore it and continue.
But before doing so, you should consider the security implications carefully.
Not dropping supplementary groups means the build sandbox will be less restricted than intended.
This option defaults to `true` when the user is root
(since `root` usually has permissions to call setgroups)
and `false` otherwise.
)"};
#endif
#if __linux__
Setting<std::string> sandboxShmSize{
this, "50%", "sandbox-dev-shm-size",
R"(
This option determines the maximum size of the `tmpfs` filesystem
mounted on `/dev/shm` in Linux sandboxes. For the format, see the
description of the `size` option of `tmpfs` in mount8. The default
is `50%`.
*Linux only*
This option determines the maximum size of the `tmpfs` filesystem
mounted on `/dev/shm` in Linux sandboxes. For the format, see the
description of the `size` option of `tmpfs` in mount(8). The default
is `50%`.
)"};
Setting<Path> sandboxBuildDir{this, "/build", "sandbox-build-dir",
"The build directory inside the sandbox."};
R"(
*Linux only*
The build directory inside the sandbox.
This directory is backed by [`build-dir`](#conf-build-dir) on the host.
)"};
#endif
Setting<std::optional<Path>> buildDir{this, std::nullopt, "build-dir",
R"(
The directory on the host, in which derivations' temporary build directories are created.
If not set, Nix will use the system temporary directory indicated by the `TMPDIR` environment variable.
Note that builds are often performed by the Nix daemon, so its `TMPDIR` is used, and not that of the Nix command line interface.
This is also the location where [`--keep-failed`](@docroot@/command-ref/opt-common.md#opt-keep-failed) leaves its files.
If Nix runs without sandbox, or if the platform does not support sandboxing with bind mounts (e.g. macOS), then the [`builder`](@docroot@/language/derivations.md#attr-builder)'s environment will contain this directory, instead of the virtual location [`sandbox-build-dir`](#conf-sandbox-build-dir).
)"};
Setting<PathSet> allowedImpureHostPrefixes{this, {}, "allowed-impure-host-deps",
"Which prefixes to allow derivations to ask for access to (primarily for Darwin)."};
@ -538,8 +725,8 @@ public:
line.
)"};
PathSetting diffHook{
this, true, "", "diff-hook",
OptionalPathSetting diffHook{
this, std::nullopt, "diff-hook",
R"(
Absolute path to an executable capable of diffing build
results. The hook is executed if `run-diff-hook` is true, and the
@ -574,11 +761,16 @@ public:
{"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="},
"trusted-public-keys",
R"(
A whitespace-separated list of public keys. When paths are copied
from another Nix store (such as a binary cache), they must be
signed with one of these keys. For example:
`cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=
hydra.nixos.org-1:CNHJZBh9K4tP3EKF6FkkgeVYsS3ohTl+oS0Qa8bezVs=`.
A whitespace-separated list of public keys.
At least one of the following condition must be met
for Nix to accept copying a store object from another
Nix store (such as a [substituter](#conf-substituters)):
- the store object has been signed using a key in the trusted keys list
- the [`require-sigs`](#conf-require-sigs) option has been set to `false`
- the store URL is configured with `trusted=true`
- the store object is [content-addressed](@docroot@/glossary.md#gloss-content-addressed-store-object)
)",
{"binary-cache-public-keys"}};
@ -630,96 +822,118 @@ public:
getDefaultExtraPlatforms(),
"extra-platforms",
R"(
Platforms other than the native one which this machine is capable of
building for. This can be useful for supporting additional
architectures on compatible machines: i686-linux can be built on
x86\_64-linux machines (and the default for this setting reflects
this); armv7 is backwards-compatible with armv6 and armv5tel; some
aarch64 machines can also natively run 32-bit ARM code; and
qemu-user may be used to support non-native platforms (though this
may be slow and buggy). Most values for this are not enabled by
default because build systems will often misdetect the target
platform and generate incompatible code, so you may wish to
cross-check the results of using this option against proper
natively-built versions of your derivations.
)", {}, false};
System types of executables that can be run on this machine.
Nix will only build a given [derivation](@docroot@/language/derivations.md) locally when its `system` attribute equals any of the values specified here or in the [`system` option](#conf-system).
Setting this can be useful to build derivations locally on compatible machines:
- `i686-linux` executables can be run on `x86_64-linux` machines (set by default)
- `x86_64-darwin` executables can be run on macOS `aarch64-darwin` with Rosetta 2 (set by default where applicable)
- `armv6` and `armv5tel` executables can be run on `armv7`
- some `aarch64` machines can also natively run 32-bit ARM code
- `qemu-user` may be used to support non-native platforms (though this
may be slow and buggy)
Build systems will usually detect the target platform to be the current physical system and therefore produce machine code incompatible with what may be intended in the derivation.
You should design your derivation's `builder` accordingly and cross-check the results when using this option against natively-built versions of your derivation.
)",
{},
// Don't document the machine-specific default value
false};
Setting<StringSet> systemFeatures{
this,
getDefaultSystemFeatures(),
"system-features",
R"(
A set of system features supported by this machine, e.g. `kvm`.
Derivations can express a dependency on such features through the
derivation attribute `requiredSystemFeatures`. For example, the
attribute
A set of system features supported by this machine.
requiredSystemFeatures = [ "kvm" ];
This complements the [`system`](#conf-system) and [`extra-platforms`](#conf-extra-platforms) configuration options and the corresponding [`system`](@docroot@/language/derivations.md#attr-system) attribute on derivations.
ensures that the derivation can only be built on a machine with the
`kvm` feature.
A derivation can require system features in the [`requiredSystemFeatures` attribute](@docroot@/language/advanced-attributes.md#adv-attr-requiredSystemFeatures), and the machine to build the derivation must have them.
This setting by default includes `kvm` if `/dev/kvm` is accessible,
and the pseudo-features `nixos-test`, `benchmark` and `big-parallel`
that are used in Nixpkgs to route builds to specific machines.
)", {}, false};
System features are user-defined, but Nix sets the following defaults:
- `apple-virt`
Included on Darwin if virtualization is available.
- `kvm`
Included on Linux if `/dev/kvm` is accessible.
- `nixos-test`, `benchmark`, `big-parallel`
These historical pseudo-features are always enabled for backwards compatibility, as they are used in Nixpkgs to route Hydra builds to specific machines.
- `ca-derivations`
Included by default if the [`ca-derivations` experimental feature](@docroot@/development/experimental-features.md#xp-feature-ca-derivations) is enabled.
This system feature is implicitly required by derivations with the [`__contentAddressed` attribute](@docroot@/language/advanced-attributes.md#adv-attr-__contentAddressed).
- `recursive-nix`
Included by default if the [`recursive-nix` experimental feature](@docroot@/development/experimental-features.md#xp-feature-recursive-nix) is enabled.
- `uid-range`
On Linux, Nix can run builds in a user namespace where they run as root (UID 0) and have 65,536 UIDs available.
This is primarily useful for running containers such as `systemd-nspawn` inside a Nix build. For an example, see [`tests/systemd-nspawn/nix`][nspawn].
[nspawn]: https://github.com/NixOS/nix/blob/67bcb99700a0da1395fa063d7c6586740b304598/tests/systemd-nspawn.nix.
Included by default on Linux if the [`auto-allocate-uids`](#conf-auto-allocate-uids) setting is enabled.
)",
{},
// Don't document the machine-specific default value
false};
Setting<Strings> substituters{
this,
Strings{"https://cache.nixos.org/"},
"substituters",
R"(
A list of URLs of substituters, separated by whitespace. Substituters
are tried based on their Priority value, which each substituter can set
independently. Lower value means higher priority.
The default is `https://cache.nixos.org`, with a Priority of 40.
A list of [URLs of Nix stores](@docroot@/store/types/index.md#store-url-format) to be used as substituters, separated by whitespace.
A substituter is an additional [store](@docroot@/glossary.md#gloss-store) from which Nix can obtain [store objects](@docroot@/store/store-object.md) instead of building them.
Nix will copy a store path from a remote store only if one
of the following is true:
Substituters are tried based on their priority value, which each substituter can set independently.
Lower value means higher priority.
The default is `https://cache.nixos.org`, which has a priority of 40.
- the store object is signed by one of the [`trusted-public-keys`](#conf-trusted-public-keys)
- the substituter is in the [`trusted-substituters`](#conf-trusted-substituters) list
- the [`require-sigs`](#conf-require-sigs) option has been set to `false`
- the store object is [output-addressed](glossary.md#gloss-output-addressed-store-object)
At least one of the following conditions must be met for Nix to use a substituter:
- The substituter is in the [`trusted-substituters`](#conf-trusted-substituters) list
- The user calling Nix is in the [`trusted-users`](#conf-trusted-users) list
In addition, each store path should be trusted as described in [`trusted-public-keys`](#conf-trusted-public-keys)
)",
{"binary-caches"}};
Setting<StringSet> trustedSubstituters{
this, {}, "trusted-substituters",
R"(
A list of URLs of substituters, separated by whitespace. These are
not used by default, but can be enabled by users of the Nix daemon
by specifying `--option substituters urls` on the command
line. Unprivileged users are only allowed to pass a subset of the
URLs listed in `substituters` and `trusted-substituters`.
A list of [Nix store URLs](@docroot@/store/types/index.md#store-url-format), separated by whitespace.
These are not used by default, but users of the Nix daemon can enable them by specifying [`substituters`](#conf-substituters).
Unprivileged users (those set in only [`allowed-users`](#conf-allowed-users) but not [`trusted-users`](#conf-trusted-users)) can pass as `substituters` only those URLs listed in `trusted-substituters`.
)",
{"trusted-binary-caches"}};
Setting<Strings> trustedUsers{
this, {"root"}, "trusted-users",
R"(
A list of names of users (separated by whitespace) that have
additional rights when connecting to the Nix daemon, such as the
ability to specify additional binary caches, or to import unsigned
NARs. You can also specify groups by prefixing them with `@`; for
instance, `@wheel` means all users in the `wheel` group. The default
is `root`.
> **Warning**
>
> Adding a user to `trusted-users` is essentially equivalent to
> giving that user root access to the system. For example, the user
> can set `sandbox-paths` and thereby obtain read access to
> directories that are otherwise inacessible to them.
)"};
Setting<unsigned int> ttlNegativeNarInfoCache{
this, 3600, "narinfo-cache-negative-ttl",
R"(
The TTL in seconds for negative lookups. If a store path is queried
from a substituter but was not found, there will be a negative
lookup cached in the local disk cache database for the specified
duration.
The TTL in seconds for negative lookups.
If a store path is queried from a [substituter](#conf-substituters) but was not found, there will be a negative lookup cached in the local disk cache database for the specified duration.
Set to `0` to force updating the lookup cache.
To wipe the lookup cache completely:
```shell-session
$ rm $HOME/.cache/nix/binary-cache-v*.sqlite*
# rm /root/.cache/nix/binary-cache-v*.sqlite*
```
)"};
Setting<unsigned int> ttlPositiveNarInfoCache{
@ -735,18 +949,6 @@ public:
mismatch if the build isn't reproducible.
)"};
/* ?Who we trust to use the daemon in safe ways */
Setting<Strings> allowedUsers{
this, {"*"}, "allowed-users",
R"(
A list of names of users (separated by whitespace) that are allowed
to connect to the Nix daemon. As with the `trusted-users` option,
you can specify groups by prefixing them with `@`. Also, you can
allow all users by specifying `*`. The default is `*`.
Note that trusted users are always allowed to connect.
)"};
Setting<bool> printMissing{this, true, "print-missing",
"Whether to print what paths need to be built or downloaded."};
@ -846,8 +1048,22 @@ public:
> `.netrc`.
)"};
/* Path to the SSL CA file used */
Path caFile;
Setting<Path> caFile{
this, getDefaultSSLCertFile(), "ssl-cert-file",
R"(
The path of a file containing CA certificates used to
authenticate `https://` downloads. Nix by default will use
the first of the following files that exists:
1. `/etc/ssl/certs/ca-certificates.crt`
2. `/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt`
The path can be overridden by the following environment
variables, in order of precedence:
1. `NIX_SSL_CERT_FILE`
2. `SSL_CERT_FILE`
)"};
#if __linux__
Setting<bool> filterSyscalls{
@ -872,7 +1088,9 @@ public:
may be useful in certain scenarios (e.g. to spin up containers or
set up userspace network interfaces in tests).
)"};
#endif
#if HAVE_ACL_SUPPORT
Setting<StringSet> ignoredAcls{
this, {"security.selinux", "system.nfs4_acl", "security.csm"}, "ignored-acls",
R"(
@ -887,12 +1105,11 @@ public:
this, {}, "hashed-mirrors",
R"(
A list of web servers used by `builtins.fetchurl` to obtain files by
hash. The default is `http://tarballs.nixos.org/`. Given a hash type
*ht* and a base-16 hash *h*, Nix will try to download the file from
*hashed-mirror*/*ht*/*h*. This allows files to be downloaded even if
they have disappeared from their original URI. For example, given
the default mirror `http://tarballs.nixos.org/`, when building the
derivation
hash. Given a hash algorithm *ha* and a base-16 hash *h*, Nix will try to
download the file from *hashed-mirror*/*ha*/*h*. This allows files to
be downloaded even if they have disappeared from their original URI.
For example, given an example mirror `http://tarballs.nixos.org/`,
when building the derivation
```nix
builtins.fetchurl {
@ -916,7 +1133,10 @@ public:
)"};
Setting<uint64_t> maxFree{
this, std::numeric_limits<uint64_t>::max(), "max-free",
// n.b. this is deliberately int64 max rather than uint64 max because
// this goes through the Nix language JSON parser and thus needs to be
// representable in Nix language integers.
this, std::numeric_limits<int64_t>::max(), "max-free",
R"(
When a garbage collection is triggered by the `min-free` option, it
stops as soon as `max-free` bytes are available. The default is
@ -926,39 +1146,6 @@ public:
Setting<uint64_t> minFreeCheckInterval{this, 5, "min-free-check-interval",
"Number of seconds between checking free disk space."};
PluginFilesSetting pluginFiles{
this, {}, "plugin-files",
R"(
A list of plugin files to be loaded by Nix. Each of these files will
be dlopened by Nix, allowing them to affect execution through static
initialization. In particular, these plugins may construct static
instances of RegisterPrimOp to add new primops or constants to the
expression language, RegisterStoreImplementation to add new store
implementations, RegisterCommand to add new subcommands to the `nix`
command, and RegisterSetting to add new nix config settings. See the
constructors for those types for more details.
Warning! These APIs are inherently unstable and may change from
release to release.
Since these files are loaded into the same address space as Nix
itself, they must be DSOs compatible with the instance of Nix
running at the time (i.e. compiled against the same headers, not
linked to any incompatible libraries). They should not be linked to
any Nix libs directly, as those will be available already at load
time.
If an entry in the list is a directory, all files in the directory
are loaded as plugins (non-recursively).
)"};
Setting<std::set<ExperimentalFeature>> experimentalFeatures{this, {}, "experimental-features",
"Experimental Nix features to enable."};
bool isExperimentalFeatureEnabled(const ExperimentalFeature &);
void requireExperimentalFeature(const ExperimentalFeature &);
Setting<size_t> narBufferSize{this, 32 * 1024 * 1024, "nar-buffer-size",
"Maximum size of NARs before spilling them to disk."};
@ -974,21 +1161,111 @@ public:
resolves to a different location from that of the build machine. You
can enable this setting if you are sure you're not going to do that.
)"};
Setting<bool> useXDGBaseDirectories{
this, false, "use-xdg-base-directories",
R"(
If set to `true`, Nix will conform to the [XDG Base Directory Specification] for files in `$HOME`.
The environment variables used to implement this are documented in the [Environment Variables section](@docroot@/command-ref/env-common.md).
[XDG Base Directory Specification]: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
> **Warning**
> This changes the location of some well-known symlinks that Nix creates, which might break tools that rely on the old, non-XDG-conformant locations.
In particular, the following locations change:
| Old | New |
|-------------------|--------------------------------|
| `~/.nix-profile` | `$XDG_STATE_HOME/nix/profile` |
| `~/.nix-defexpr` | `$XDG_STATE_HOME/nix/defexpr` |
| `~/.nix-channels` | `$XDG_STATE_HOME/nix/channels` |
If you already have Nix installed and are using [profiles](@docroot@/package-management/profiles.md) or [channels](@docroot@/command-ref/nix-channel.md), you should migrate manually when you enable this option.
If `$XDG_STATE_HOME` is not set, use `$HOME/.local/state/nix` instead of `$XDG_STATE_HOME/nix`.
This can be achieved with the following shell commands:
```sh
nix_state_home=${XDG_STATE_HOME-$HOME/.local/state}/nix
mkdir -p $nix_state_home
mv $HOME/.nix-profile $nix_state_home/profile
mv $HOME/.nix-defexpr $nix_state_home/defexpr
mv $HOME/.nix-channels $nix_state_home/channels
```
)"
};
Setting<StringMap> impureEnv {this, {}, "impure-env",
R"(
A list of items, each in the format of:
- `name=value`: Set environment variable `name` to `value`.
If the user is trusted (see `trusted-users` option), when building
a fixed-output derivation, environment variables set in this option
will be passed to the builder if they are listed in [`impureEnvVars`](@docroot@/language/advanced-attributes.md##adv-attr-impureEnvVars).
This option is useful for, e.g., setting `https_proxy` for
fixed-output derivations and in a multi-user Nix installation, or
setting private access tokens when fetching a private repository.
)",
{}, // aliases
true, // document default
Xp::ConfigurableImpureEnv
};
Setting<std::string> upgradeNixStorePathUrl{
this,
"https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix",
"upgrade-nix-store-path-url",
R"(
Used by `nix upgrade-nix`, the URL of the file that contains the
store paths of the latest Nix release.
)"
};
Setting<uint64_t> warnLargePathThreshold{
this,
// n.b. this is deliberately int64 max rather than uint64 max because
// this goes through the Nix language JSON parser and thus needs to be
// representable in Nix language integers.
std::numeric_limits<int64_t>::max(),
"warn-large-path-threshold",
R"(
Warn when copying a path larger than this number of bytes to the Nix store
(as determined by its NAR serialisation).
)"
};
};
// FIXME: don't use a global variable.
extern Settings settings;
/* This should be called after settings are initialized, but before
anything else */
void initPlugins();
void loadConfFile();
/**
* Load the configuration (from `nix.conf`, `NIX_CONFIG`, etc.) into the
* given configuration object.
*
* Usually called with `globalConfig`.
*/
void loadConfFile(AbstractConfig & config);
// Used by the Settings constructor
std::vector<Path> getUserConfigFiles();
extern const std::string nixVersion;
/**
* @param loadConfig Whether to load configuration from `nix.conf`, `NIX_CONFIG`, etc. May be disabled for unit tests.
* @note When using libexpr, and/or libmain, This is not sufficient. See initNix().
*/
void initLibStore(bool loadConfig = true);
/**
* It's important to initialize before doing _anything_, which is why we
* call upon the programmer to handle this correctly. However, we only add
* this in a key locations, so as not to litter the code.
*/
void assertLibStoreInitialized();
}

View file

@ -1,4 +1,4 @@
#include "binary-cache-store.hh"
#include "http-binary-cache-store.hh"
#include "filetransfer.hh"
#include "globals.hh"
#include "nar-info-disk-cache.hh"
@ -8,19 +8,37 @@ namespace nix {
MakeError(UploadToHTTP, Error);
struct HttpBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
{
using BinaryCacheStoreConfig::BinaryCacheStoreConfig;
const std::string name() override { return "Http Binary Cache Store"; }
};
HttpBinaryCacheStoreConfig::HttpBinaryCacheStoreConfig(
std::string_view scheme,
std::string_view _cacheUri,
const Params & params)
: StoreConfig(params)
, BinaryCacheStoreConfig(params)
, cacheUri(
std::string { scheme }
+ "://"
+ (!_cacheUri.empty()
? _cacheUri
: throw UsageError("`%s` Store requires a non-empty authority in Store URL", scheme)))
{
while (!cacheUri.empty() && cacheUri.back() == '/')
cacheUri.pop_back();
}
std::string HttpBinaryCacheStoreConfig::doc()
{
return
#include "http-binary-cache-store.md"
;
}
class HttpBinaryCacheStore : public virtual HttpBinaryCacheStoreConfig, public virtual BinaryCacheStore
{
private:
Path cacheUri;
struct State
{
bool enabled = true;
@ -32,19 +50,15 @@ private:
public:
HttpBinaryCacheStore(
const std::string & scheme,
const Path & _cacheUri,
std::string_view scheme,
PathView cacheUri,
const Params & params)
: StoreConfig(params)
, BinaryCacheStoreConfig(params)
, HttpBinaryCacheStoreConfig(params)
, HttpBinaryCacheStoreConfig(scheme, cacheUri, params)
, Store(params)
, BinaryCacheStore(params)
, cacheUri(scheme + "://" + _cacheUri)
{
if (cacheUri.back() == '/')
cacheUri.pop_back();
diskCache = getNarInfoDiskCache();
}
@ -56,7 +70,7 @@ public:
void init() override
{
// FIXME: do this lazily?
if (auto cacheInfo = diskCache->cacheExists(cacheUri)) {
if (auto cacheInfo = diskCache->upToDateCacheExists(cacheUri)) {
wantMassQuery.setDefault(cacheInfo->wantMassQuery);
priority.setDefault(cacheInfo->priority);
} else {
@ -69,14 +83,6 @@ public:
}
}
static std::set<std::string> uriSchemes()
{
static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1";
auto ret = std::set<std::string>({"http", "https"});
if (forceHttp) ret.insert("file");
return ret;
}
protected:
void maybeDisable()
@ -187,6 +193,18 @@ protected:
}});
}
/**
* This isn't actually necessary read only. We support "upsert" now, so we
* have a notion of authentication via HTTP POST/PUT.
*
* For now, we conservatively say we don't know.
*
* \todo try to expose our HTTP authentication status.
*/
std::optional<TrustedFlag> isTrustedClient() override
{
return std::nullopt;
}
};
static RegisterStoreImplementation<HttpBinaryCacheStore, HttpBinaryCacheStoreConfig> regHttpBinaryCacheStore;

View file

@ -0,0 +1,30 @@
#include "binary-cache-store.hh"
namespace nix {
struct HttpBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
{
using BinaryCacheStoreConfig::BinaryCacheStoreConfig;
HttpBinaryCacheStoreConfig(std::string_view scheme, std::string_view _cacheUri, const Params & params);
Path cacheUri;
const std::string name() override
{
return "HTTP Binary Cache Store";
}
static std::set<std::string> uriSchemes()
{
static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1";
auto ret = std::set<std::string>({"http", "https"});
if (forceHttp)
ret.insert("file");
return ret;
}
std::string doc() override;
};
}

View file

@ -0,0 +1,8 @@
R"(
**Store URL format**: `http://...`, `https://...`
This store allows a binary cache to be accessed via the HTTP
protocol.
)"

View file

@ -0,0 +1,45 @@
#include "indirect-root-store.hh"
namespace nix {
void IndirectRootStore::makeSymlink(const Path & link, const Path & target)
{
/* Create directories up to `gcRoot'. */
createDirs(dirOf(link));
/* Create the new symlink. */
Path tempLink = fmt("%1%.tmp-%2%-%3%", link, getpid(), rand());
createSymlink(target, tempLink);
/* Atomically replace the old one. */
std::filesystem::rename(tempLink, link);
}
Path IndirectRootStore::addPermRoot(const StorePath & storePath, const Path & _gcRoot)
{
Path gcRoot(canonPath(_gcRoot));
if (isInStore(gcRoot))
throw Error(
"creating a garbage collector root (%1%) in the Nix store is forbidden "
"(are you running nix-build inside the store?)",
gcRoot);
/* Register this root with the garbage collector, if it's
running. This should be superfluous since the caller should
have registered this root yet, but let's be on the safe
side. */
addTempRoot(storePath);
/* Don't clobber the link if it already exists and doesn't
point to the Nix store. */
if (pathExists(gcRoot) && (!std::filesystem::is_symlink(gcRoot) || !isInStore(readLink(gcRoot))))
throw Error("cannot create symlink '%1%'; already exists", gcRoot);
makeSymlink(gcRoot, printStorePath(storePath));
addIndirectRoot(gcRoot);
return gcRoot;
}
}

View file

@ -0,0 +1,75 @@
#pragma once
///@file
#include "local-fs-store.hh"
namespace nix {
/**
* Mix-in class for implementing permanent roots as a pair of a direct
* (strong) reference and indirect weak reference to the first
* reference.
*
* See methods for details on the operations it represents.
*
* @note
* To understand the purpose of this class, it might help to do some
* "closed-world" rather than "open-world" reasoning, and consider the
* problem it solved for us. This class was factored out from
* `LocalFSStore` in order to support the following table, which
* contains 4 concrete store types (non-abstract classes, exposed to the
* user), and how they implemented the two GC root methods:
*
* @note
* | | `addPermRoot()` | `addIndirectRoot()` |
* |-------------------|-----------------|---------------------|
* | `LocalStore` | local | local |
* | `UDSRemoteStore` | local | remote |
* | `SSHStore` | doesn't have | doesn't have |
* | `MountedSSHStore` | remote | doesn't have |
*
* @note
* Note how only the local implementations of `addPermRoot()` need
* `addIndirectRoot()`; that is what this class enforces. Without it,
* and with `addPermRoot()` and `addIndirectRoot()` both `virtual`, we
* would accidentally be allowing for a combinatorial explosion of
* possible implementations many of which make no sense. Having this and
* that invariant enforced cuts down that space.
*/
struct IndirectRootStore : public virtual LocalFSStore
{
inline static std::string operationName = "Indirect GC roots registration";
/**
* Implementation of `LocalFSStore::addPermRoot` where the permanent
* root is a pair of
*
* - The user-facing symlink which all implementations must create
*
* - An additional weak reference known as the "indirect root" that
* points to that symlink.
*
* The garbage collector will automatically remove the indirect root
* when it finds that the symlink has disappeared.
*
* The implementation of this method is concrete, but it delegates
* to `addIndirectRoot()` which is abstract.
*/
Path addPermRoot(const StorePath & storePath, const Path & gcRoot) override final;
/**
* Add an indirect root, which is a weak reference to the
* user-facing symlink created by `addPermRoot()`.
*
* @param path user-facing and user-controlled symlink to a store
* path.
*
* The form this weak-reference takes is implementation-specific.
*/
virtual void addIndirectRoot(const Path & path) = 0;
protected:
void makeSymlink(const Path & link, const Path & target);
};
}

31
src/libstore/keys.cc Normal file
View file

@ -0,0 +1,31 @@
#include "file-system.hh"
#include "globals.hh"
#include "keys.hh"
namespace nix {
PublicKeys getDefaultPublicKeys()
{
PublicKeys publicKeys;
// FIXME: filter duplicates
for (auto s : settings.trustedPublicKeys.get()) {
PublicKey key(s);
publicKeys.emplace(key.name, key);
}
for (auto secretKeyFile : settings.secretKeyFiles.get()) {
try {
SecretKey secretKey(readFile(secretKeyFile));
publicKeys.emplace(secretKey.name, secretKey.toPublicKey());
} catch (SystemError & e) {
/* Ignore unreadable key files. That's normal in a
multi-user installation. */
}
}
return publicKeys;
}
}

10
src/libstore/keys.hh Normal file
View file

@ -0,0 +1,10 @@
#pragma once
///@file
#include "signature/local-keys.hh"
namespace nix {
PublicKeys getDefaultPublicKeys();
}

View file

@ -1,386 +1,321 @@
#include "legacy-ssh-store.hh"
#include "common-ssh-store-config.hh"
#include "archive.hh"
#include "pool.hh"
#include "remote-store.hh"
#include "serve-protocol.hh"
#include "serve-protocol-connection.hh"
#include "serve-protocol-impl.hh"
#include "build-result.hh"
#include "store-api.hh"
#include "path-with-outputs.hh"
#include "worker-protocol.hh"
#include "ssh.hh"
#include "derivations.hh"
#include "callback.hh"
namespace nix {
struct LegacySSHStoreConfig : virtual StoreConfig
LegacySSHStoreConfig::LegacySSHStoreConfig(
std::string_view scheme,
std::string_view authority,
const Params & params)
: StoreConfig(params)
, CommonSSHStoreConfig(scheme, authority, params)
{
using StoreConfig::StoreConfig;
const Setting<int> maxConnections{(StoreConfig*) this, 1, "max-connections", "maximum number of concurrent SSH connections"};
const Setting<Path> sshKey{(StoreConfig*) this, "", "ssh-key", "path to an SSH private key"};
const Setting<std::string> sshPublicHostKey{(StoreConfig*) this, "", "base64-ssh-public-host-key", "The public half of the host's SSH key"};
const Setting<bool> compress{(StoreConfig*) this, false, "compress", "whether to compress the connection"};
const Setting<Path> remoteProgram{(StoreConfig*) this, "nix-store", "remote-program", "path to the nix-store executable on the remote system"};
const Setting<std::string> remoteStore{(StoreConfig*) this, "", "remote-store", "URI of the store on the remote system"};
}
const std::string name() override { return "Legacy SSH Store"; }
std::string LegacySSHStoreConfig::doc()
{
return
#include "legacy-ssh-store.md"
;
}
struct LegacySSHStore::Connection : public ServeProto::BasicClientConnection
{
std::unique_ptr<SSHMaster::Connection> sshConn;
bool good = true;
};
struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Store
LegacySSHStore::LegacySSHStore(
std::string_view scheme,
std::string_view host,
const Params & params)
: StoreConfig(params)
, CommonSSHStoreConfig(scheme, host, params)
, LegacySSHStoreConfig(scheme, host, params)
, Store(params)
, connections(make_ref<Pool<Connection>>(
std::max(1, (int) maxConnections),
[this]() { return openConnection(); },
[](const ref<Connection> & r) { return r->good; }
))
, master(createSSHMaster(
// Use SSH master only if using more than 1 connection.
connections->capacity() > 1,
logFD))
{
// Hack for getting remote build log output.
// Intentionally not in `LegacySSHStoreConfig` so that it doesn't appear in
// the documentation
const Setting<int> logFD{(StoreConfig*) this, -1, "log-fd", "file descriptor to which SSH's stderr is connected"};
}
struct Connection
{
std::unique_ptr<SSHMaster::Connection> sshConn;
FdSink to;
FdSource from;
int remoteVersion;
bool good = true;
};
std::string host;
ref<Pool<Connection>> connections;
SSHMaster master;
static std::set<std::string> uriSchemes() { return {"ssh"}; }
LegacySSHStore(const std::string & scheme, const std::string & host, const Params & params)
: StoreConfig(params)
, LegacySSHStoreConfig(params)
, Store(params)
, host(host)
, connections(make_ref<Pool<Connection>>(
std::max(1, (int) maxConnections),
[this]() { return openConnection(); },
[](const ref<Connection> & r) { return r->good; }
))
, master(
host,
sshKey,
sshPublicHostKey,
// Use SSH master only if using more than 1 connection.
connections->capacity() > 1,
compress,
logFD)
{
ref<LegacySSHStore::Connection> LegacySSHStore::openConnection()
{
auto conn = make_ref<Connection>();
Strings command = remoteProgram.get();
command.push_back("--serve");
command.push_back("--write");
if (remoteStore.get() != "") {
command.push_back("--store");
command.push_back(remoteStore.get());
}
conn->sshConn = master.startCommand(std::move(command));
conn->to = FdSink(conn->sshConn->in.get());
conn->from = FdSource(conn->sshConn->out.get());
ref<Connection> openConnection()
{
auto conn = make_ref<Connection>();
conn->sshConn = master.startCommand(
fmt("%s --serve --write", remoteProgram)
+ (remoteStore.get() == "" ? "" : " --store " + shellEscape(remoteStore.get())));
conn->to = FdSink(conn->sshConn->in.get());
conn->from = FdSource(conn->sshConn->out.get());
try {
conn->to << SERVE_MAGIC_1 << SERVE_PROTOCOL_VERSION;
conn->to.flush();
StringSink saved;
try {
TeeSource tee(conn->from, saved);
unsigned int magic = readInt(tee);
if (magic != SERVE_MAGIC_2)
throw Error("'nix-store --serve' protocol mismatch from '%s'", host);
} catch (SerialisationError & e) {
/* In case the other side is waiting for our input,
close it. */
conn->sshConn->in.close();
auto msg = conn->from.drain();
throw Error("'nix-store --serve' protocol mismatch from '%s', got '%s'",
host, chomp(saved.s + msg));
}
conn->remoteVersion = readInt(conn->from);
if (GET_PROTOCOL_MAJOR(conn->remoteVersion) != 0x200)
throw Error("unsupported 'nix-store --serve' protocol version on '%s'", host);
} catch (EndOfFile & e) {
throw Error("cannot connect to '%1%'", host);
StringSink saved;
TeeSource tee(conn->from, saved);
try {
conn->remoteVersion = ServeProto::BasicClientConnection::handshake(
conn->to, tee, SERVE_PROTOCOL_VERSION, host);
} catch (SerialisationError & e) {
// in.close(): Don't let the remote block on us not writing.
conn->sshConn->in.close();
{
NullSink nullSink;
tee.drainInto(nullSink);
}
return conn;
};
std::string getUri() override
{
return *uriSchemes().begin() + "://" + host;
throw Error("'nix-store --serve' protocol mismatch from '%s', got '%s'",
host, chomp(saved.s));
} catch (EndOfFile & e) {
throw Error("cannot connect to '%1%'", host);
}
void queryPathInfoUncached(const StorePath & path,
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override
{
try {
auto conn(connections->get());
return conn;
};
/* No longer support missing NAR hash */
assert(GET_PROTOCOL_MINOR(conn->remoteVersion) >= 4);
debug("querying remote host '%s' for info on '%s'", host, printStorePath(path));
std::string LegacySSHStore::getUri()
{
return *uriSchemes().begin() + "://" + host;
}
conn->to << cmdQueryPathInfos << PathSet{printStorePath(path)};
conn->to.flush();
auto p = readString(conn->from);
if (p.empty()) return callback(nullptr);
auto path2 = parseStorePath(p);
assert(path == path2);
/* Hash will be set below. FIXME construct ValidPathInfo at end. */
auto info = std::make_shared<ValidPathInfo>(path, Hash::dummy);
PathSet references;
auto deriver = readString(conn->from);
if (deriver != "")
info->deriver = parseStorePath(deriver);
info->references = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
readLongLong(conn->from); // download size
info->narSize = readLongLong(conn->from);
{
auto s = readString(conn->from);
if (s == "")
throw Error("NAR hash is now mandatory");
info->narHash = Hash::parseAnyPrefixed(s);
}
info->ca = parseContentAddressOpt(readString(conn->from));
info->sigs = readStrings<StringSet>(conn->from);
auto s = readString(conn->from);
assert(s == "");
callback(std::move(info));
} catch (...) { callback.rethrow(); }
}
void addToStore(const ValidPathInfo & info, Source & source,
RepairFlag repair, CheckSigsFlag checkSigs) override
{
debug("adding path '%s' to remote host '%s'", printStorePath(info.path), host);
void LegacySSHStore::queryPathInfoUncached(const StorePath & path,
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
{
try {
auto conn(connections->get());
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 5) {
/* No longer support missing NAR hash */
assert(GET_PROTOCOL_MINOR(conn->remoteVersion) >= 4);
conn->to
<< cmdAddToStoreNar
<< printStorePath(info.path)
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< info.narHash.to_string(Base16, false);
worker_proto::write(*this, conn->to, info.references);
conn->to
<< info.registrationTime
<< info.narSize
<< info.ultimate
<< info.sigs
<< renderContentAddress(info.ca);
try {
copyNAR(source, conn->to);
} catch (...) {
conn->good = false;
throw;
}
conn->to.flush();
debug("querying remote host '%s' for info on '%s'", host, printStorePath(path));
} else {
auto infos = conn->queryPathInfos(*this, {path});
conn->to
<< cmdImportPaths
<< 1;
try {
copyNAR(source, conn->to);
} catch (...) {
conn->good = false;
throw;
}
conn->to
<< exportMagic
<< printStorePath(info.path);
worker_proto::write(*this, conn->to, info.references);
conn->to
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< 0
<< 0;
conn->to.flush();
switch (infos.size()) {
case 0:
return callback(nullptr);
case 1: {
auto & [path2, info] = *infos.begin();
if (info.narHash == Hash::dummy)
throw Error("NAR hash is now mandatory");
assert(path == path2);
return callback(std::make_shared<ValidPathInfo>(
std::move(path),
std::move(info)
));
}
default:
throw Error("More path infos returned than queried");
}
} catch (...) { callback.rethrow(); }
}
void LegacySSHStore::addToStore(const ValidPathInfo & info, Source & source,
RepairFlag repair, CheckSigsFlag checkSigs)
{
debug("adding path '%s' to remote host '%s'", printStorePath(info.path), host);
auto conn(connections->get());
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 5) {
conn->to
<< ServeProto::Command::AddToStoreNar
<< printStorePath(info.path)
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< info.narHash.to_string(HashFormat::Base16, false);
ServeProto::write(*this, *conn, info.references);
conn->to
<< info.registrationTime
<< info.narSize
<< info.ultimate
<< info.sigs
<< renderContentAddress(info.ca);
try {
copyNAR(source, conn->to);
} catch (...) {
conn->good = false;
throw;
}
conn->to.flush();
if (readInt(conn->from) != 1)
throw Error("failed to add path '%s' to remote host '%s'", printStorePath(info.path), host);
}
void narFromPath(const StorePath & path, Sink & sink) override
{
auto conn(connections->get());
} else {
conn->to << cmdDumpStorePath << printStorePath(path);
conn->to.flush();
copyNAR(conn->from, sink);
}
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
{ unsupported("queryPathFromHashPart"); }
StorePath addToStore(
std::string_view name,
const Path & srcPath,
FileIngestionMethod method,
HashType hashAlgo,
PathFilter & filter,
RepairFlag repair,
const StorePathSet & references) override
{ unsupported("addToStore"); }
StorePath addTextToStore(
std::string_view name,
std::string_view s,
const StorePathSet & references,
RepairFlag repair) override
{ unsupported("addTextToStore"); }
private:
void putBuildSettings(Connection & conn)
{
conn.to
<< settings.maxSilentTime
<< settings.buildTimeout;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 2)
conn.to
<< settings.maxLogSize;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3)
conn.to
<< 0 // buildRepeat hasn't worked for ages anyway
conn->importPaths(*this, [&](Sink & sink) {
try {
copyNAR(source, sink);
} catch (...) {
conn->good = false;
throw;
}
sink
<< exportMagic
<< printStorePath(info.path);
ServeProto::write(*this, *conn, info.references);
sink
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< 0
<< 0;
});
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 7) {
conn.to << ((int) settings.keepFailed);
}
}
}
void LegacySSHStore::narFromPath(const StorePath & path, Sink & sink)
{
auto conn(connections->get());
conn->narFromPath(*this, path, [&](auto & source) {
copyNAR(source, sink);
});
}
static ServeProto::BuildOptions buildSettings()
{
return {
.maxSilentTime = settings.maxSilentTime,
.buildTimeout = settings.buildTimeout,
.maxLogSize = settings.maxLogSize,
.nrRepeats = 0, // buildRepeat hasn't worked for ages anyway
.enforceDeterminism = 0,
.keepFailed = settings.keepFailed,
};
}
BuildResult LegacySSHStore::buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
BuildMode buildMode)
{
auto conn(connections->get());
conn->putBuildDerivationRequest(*this, drvPath, drv, buildSettings());
return conn->getBuildDerivationResponse(*this);
}
void LegacySSHStore::buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode, std::shared_ptr<Store> evalStore)
{
if (evalStore && evalStore.get() != this)
throw Error("building on an SSH store is incompatible with '--eval-store'");
auto conn(connections->get());
conn->to << ServeProto::Command::BuildPaths;
Strings ss;
for (auto & p : drvPaths) {
auto sOrDrvPath = StorePathWithOutputs::tryFromDerivedPath(p);
std::visit(overloaded {
[&](const StorePathWithOutputs & s) {
ss.push_back(s.to_string(*this));
},
[&](const StorePath & drvPath) {
throw Error("wanted to fetch '%s' but the legacy ssh protocol doesn't support merely substituting drv files via the build paths command. It would build them instead. Try using ssh-ng://", printStorePath(drvPath));
},
[&](std::monostate) {
throw Error("wanted build derivation that is itself a build product, but the legacy ssh protocol doesn't support that. Try using ssh-ng://");
},
}, sOrDrvPath);
}
conn->to << ss;
ServeProto::write(*this, *conn, buildSettings());
conn->to.flush();
BuildResult result;
result.status = (BuildResult::Status) readInt(conn->from);
if (!result.success()) {
conn->from >> result.errorMsg;
throw Error(result.status, result.errorMsg);
}
}
void LegacySSHStore::computeFSClosure(const StorePathSet & paths,
StorePathSet & out, bool flipDirection,
bool includeOutputs, bool includeDerivers)
{
if (flipDirection || includeDerivers) {
Store::computeFSClosure(paths, out, flipDirection, includeOutputs, includeDerivers);
return;
}
public:
auto conn(connections->get());
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
BuildMode buildMode) override
{
auto conn(connections->get());
conn->to
<< ServeProto::Command::QueryClosure
<< includeOutputs;
ServeProto::write(*this, *conn, paths);
conn->to.flush();
conn->to
<< cmdBuildDerivation
<< printStorePath(drvPath);
writeDerivation(conn->to, *this, drv);
for (auto & i : ServeProto::Serialise<StorePathSet>::read(*this, *conn))
out.insert(i);
}
putBuildSettings(*conn);
conn->to.flush();
StorePathSet LegacySSHStore::queryValidPaths(const StorePathSet & paths,
SubstituteFlag maybeSubstitute)
{
auto conn(connections->get());
return conn->queryValidPaths(*this,
false, paths, maybeSubstitute);
}
BuildResult status { .path = DerivedPath::Built { .drvPath = drvPath } };
status.status = (BuildResult::Status) readInt(conn->from);
conn->from >> status.errorMsg;
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 3)
conn->from >> status.timesBuilt >> status.isNonDeterministic >> status.startTime >> status.stopTime;
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 6) {
status.builtOutputs = worker_proto::read(*this, conn->from, Phantom<DrvOutputs> {});
}
return status;
}
void LegacySSHStore::connect()
{
auto conn(connections->get());
}
void buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override
{
if (evalStore && evalStore.get() != this)
throw Error("building on an SSH store is incompatible with '--eval-store'");
auto conn(connections->get());
unsigned int LegacySSHStore::getProtocol()
{
auto conn(connections->get());
return conn->remoteVersion;
}
conn->to << cmdBuildPaths;
Strings ss;
for (auto & p : drvPaths) {
auto sOrDrvPath = StorePathWithOutputs::tryFromDerivedPath(p);
std::visit(overloaded {
[&](const StorePathWithOutputs & s) {
ss.push_back(s.to_string(*this));
},
[&](const StorePath & drvPath) {
throw Error("wanted to fetch '%s' but the legacy ssh protocol doesn't support merely substituting drv files via the build paths command. It would build them instead. Try using ssh-ng://", printStorePath(drvPath));
},
}, sOrDrvPath);
}
conn->to << ss;
putBuildSettings(*conn);
/**
* The legacy ssh protocol doesn't support checking for trusted-user.
* Try using ssh-ng:// instead if you want to know.
*/
std::optional<TrustedFlag> isTrustedClient()
{
return std::nullopt;
}
conn->to.flush();
BuildResult result { .path = DerivedPath::Opaque { StorePath::dummy } };
result.status = (BuildResult::Status) readInt(conn->from);
if (!result.success()) {
conn->from >> result.errorMsg;
throw Error(result.status, result.errorMsg);
}
}
void ensurePath(const StorePath & path) override
{ unsupported("ensurePath"); }
void computeFSClosure(const StorePathSet & paths,
StorePathSet & out, bool flipDirection = false,
bool includeOutputs = false, bool includeDerivers = false) override
{
if (flipDirection || includeDerivers) {
Store::computeFSClosure(paths, out, flipDirection, includeOutputs, includeDerivers);
return;
}
auto conn(connections->get());
conn->to
<< cmdQueryClosure
<< includeOutputs;
worker_proto::write(*this, conn->to, paths);
conn->to.flush();
for (auto & i : worker_proto::read(*this, conn->from, Phantom<StorePathSet> {}))
out.insert(i);
}
StorePathSet queryValidPaths(const StorePathSet & paths,
SubstituteFlag maybeSubstitute = NoSubstitute) override
{
auto conn(connections->get());
conn->to
<< cmdQueryValidPaths
<< false // lock
<< maybeSubstitute;
worker_proto::write(*this, conn->to, paths);
conn->to.flush();
return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
}
void connect() override
{
auto conn(connections->get());
}
unsigned int getProtocol() override
{
auto conn(connections->get());
return conn->remoteVersion;
}
void queryRealisationUncached(const DrvOutput &,
Callback<std::shared_ptr<const Realisation>> callback) noexcept override
// TODO: Implement
{ unsupported("queryRealisation"); }
};
static RegisterStoreImplementation<LegacySSHStore, LegacySSHStoreConfig> regLegacySSHStore;

View file

@ -0,0 +1,141 @@
#pragma once
///@file
#include "common-ssh-store-config.hh"
#include "store-api.hh"
#include "ssh.hh"
#include "callback.hh"
#include "pool.hh"
namespace nix {
struct LegacySSHStoreConfig : virtual CommonSSHStoreConfig
{
using CommonSSHStoreConfig::CommonSSHStoreConfig;
LegacySSHStoreConfig(
std::string_view scheme,
std::string_view authority,
const Params & params);
const Setting<Strings> remoteProgram{this, {"nix-store"}, "remote-program",
"Path to the `nix-store` executable on the remote machine."};
const Setting<int> maxConnections{this, 1, "max-connections",
"Maximum number of concurrent SSH connections."};
const std::string name() override { return "SSH Store"; }
static std::set<std::string> uriSchemes() { return {"ssh"}; }
std::string doc() override;
};
struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Store
{
#ifndef _WIN32
// Hack for getting remote build log output.
// Intentionally not in `LegacySSHStoreConfig` so that it doesn't appear in
// the documentation
const Setting<int> logFD{this, INVALID_DESCRIPTOR, "log-fd", "file descriptor to which SSH's stderr is connected"};
#else
Descriptor logFD = INVALID_DESCRIPTOR;
#endif
struct Connection;
ref<Pool<Connection>> connections;
SSHMaster master;
LegacySSHStore(
std::string_view scheme,
std::string_view host,
const Params & params);
ref<Connection> openConnection();
std::string getUri() override;
void queryPathInfoUncached(const StorePath & path,
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
void addToStore(const ValidPathInfo & info, Source & source,
RepairFlag repair, CheckSigsFlag checkSigs) override;
void narFromPath(const StorePath & path, Sink & sink) override;
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
{ unsupported("queryPathFromHashPart"); }
StorePath addToStore(
std::string_view name,
const SourcePath & path,
ContentAddressMethod method,
HashAlgorithm hashAlgo,
const StorePathSet & references,
PathFilter & filter,
RepairFlag repair) override
{ unsupported("addToStore"); }
virtual StorePath addToStoreFromDump(
Source & dump,
std::string_view name,
FileSerialisationMethod dumpMethod = FileSerialisationMethod::NixArchive,
ContentAddressMethod hashMethod = FileIngestionMethod::NixArchive,
HashAlgorithm hashAlgo = HashAlgorithm::SHA256,
const StorePathSet & references = StorePathSet(),
RepairFlag repair = NoRepair) override
{ unsupported("addToStore"); }
public:
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
BuildMode buildMode) override;
void buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override;
void ensurePath(const StorePath & path) override
{ unsupported("ensurePath"); }
virtual ref<SourceAccessor> getFSAccessor(bool requireValidPath) override
{ unsupported("getFSAccessor"); }
/**
* The default instance would schedule the work on the client side, but
* for consistency with `buildPaths` and `buildDerivation` it should happen
* on the remote side.
*
* We make this fail for now so we can add implement this properly later
* without it being a breaking change.
*/
void repairPath(const StorePath & path) override
{ unsupported("repairPath"); }
void computeFSClosure(const StorePathSet & paths,
StorePathSet & out, bool flipDirection = false,
bool includeOutputs = false, bool includeDerivers = false) override;
StorePathSet queryValidPaths(const StorePathSet & paths,
SubstituteFlag maybeSubstitute = NoSubstitute) override;
void connect() override;
unsigned int getProtocol() override;
/**
* The legacy ssh protocol doesn't support checking for trusted-user.
* Try using ssh-ng:// instead if you want to know.
*/
std::optional<TrustedFlag> isTrustedClient() override
{
return std::nullopt;
}
void queryRealisationUncached(const DrvOutput &,
Callback<std::shared_ptr<const Realisation>> callback) noexcept override
// TODO: Implement
{ unsupported("queryRealisation"); }
};
}

View file

@ -0,0 +1,8 @@
R"(
**Store URL format**: `ssh://[username@]hostname`
This store type allows limited access to a remote store on another
machine via SSH.
)"

View file

@ -0,0 +1,162 @@
#pragma once
/**
* @file Reusable serialisers for serialization container types in a
* length-prefixed manner.
*
* Used by both the Worker and Serve protocols.
*/
#include "types.hh"
namespace nix {
struct StoreDirConfig;
/**
* Reusable serialisers for serialization container types in a
* length-prefixed manner.
*
* @param T The type of the collection being serialised
*
* @param Inner This the most important parameter; this is the "inner"
* protocol. The user of this will substitute `MyProtocol` or similar
* when making a `MyProtocol::Serialiser<Collection<T>>`. Note that the
* inside is allowed to call to call `Inner::Serialiser` on different
* types. This is especially important for `std::map` which doesn't have
* a single `T` but one `K` and one `V`.
*/
template<class Inner, typename T>
struct LengthPrefixedProtoHelper;
/*!
* \typedef LengthPrefixedProtoHelper::S
*
* Read this as simply `using S = Inner::Serialise;`.
*
* It would be nice to use that directly, but C++ doesn't seem to allow
* it. The `typename` keyword needed to refer to `Inner` seems to greedy
* (low precedence), and then C++ complains that `Serialise` is not a
* type parameter but a real type.
*
* Making this `S` alias seems to be the only way to avoid these issues.
*/
#define LENGTH_PREFIXED_PROTO_HELPER(Inner, T) \
struct LengthPrefixedProtoHelper< Inner, T > \
{ \
static T read(const StoreDirConfig & store, typename Inner::ReadConn conn); \
static void write(const StoreDirConfig & store, typename Inner::WriteConn conn, const T & str); \
private: \
template<typename U> using S = typename Inner::template Serialise<U>; \
}
template<class Inner, typename T>
LENGTH_PREFIXED_PROTO_HELPER(Inner, std::vector<T>);
template<class Inner, typename T>
LENGTH_PREFIXED_PROTO_HELPER(Inner, std::set<T>);
template<class Inner, typename... Ts>
LENGTH_PREFIXED_PROTO_HELPER(Inner, std::tuple<Ts...>);
template<class Inner, typename K, typename V>
#define _X std::map<K, V>
LENGTH_PREFIXED_PROTO_HELPER(Inner, _X);
#undef _X
template<class Inner, typename T>
std::vector<T>
LengthPrefixedProtoHelper<Inner, std::vector<T>>::read(
const StoreDirConfig & store, typename Inner::ReadConn conn)
{
std::vector<T> resSet;
auto size = readNum<size_t>(conn.from);
while (size--) {
resSet.push_back(S<T>::read(store, conn));
}
return resSet;
}
template<class Inner, typename T>
void
LengthPrefixedProtoHelper<Inner, std::vector<T>>::write(
const StoreDirConfig & store, typename Inner::WriteConn conn, const std::vector<T> & resSet)
{
conn.to << resSet.size();
for (auto & key : resSet) {
S<T>::write(store, conn, key);
}
}
template<class Inner, typename T>
std::set<T>
LengthPrefixedProtoHelper<Inner, std::set<T>>::read(
const StoreDirConfig & store, typename Inner::ReadConn conn)
{
std::set<T> resSet;
auto size = readNum<size_t>(conn.from);
while (size--) {
resSet.insert(S<T>::read(store, conn));
}
return resSet;
}
template<class Inner, typename T>
void
LengthPrefixedProtoHelper<Inner, std::set<T>>::write(
const StoreDirConfig & store, typename Inner::WriteConn conn, const std::set<T> & resSet)
{
conn.to << resSet.size();
for (auto & key : resSet) {
S<T>::write(store, conn, key);
}
}
template<class Inner, typename K, typename V>
std::map<K, V>
LengthPrefixedProtoHelper<Inner, std::map<K, V>>::read(
const StoreDirConfig & store, typename Inner::ReadConn conn)
{
std::map<K, V> resMap;
auto size = readNum<size_t>(conn.from);
while (size--) {
auto k = S<K>::read(store, conn);
auto v = S<V>::read(store, conn);
resMap.insert_or_assign(std::move(k), std::move(v));
}
return resMap;
}
template<class Inner, typename K, typename V>
void
LengthPrefixedProtoHelper<Inner, std::map<K, V>>::write(
const StoreDirConfig & store, typename Inner::WriteConn conn, const std::map<K, V> & resMap)
{
conn.to << resMap.size();
for (auto & i : resMap) {
S<K>::write(store, conn, i.first);
S<V>::write(store, conn, i.second);
}
}
template<class Inner, typename... Ts>
std::tuple<Ts...>
LengthPrefixedProtoHelper<Inner, std::tuple<Ts...>>::read(
const StoreDirConfig & store, typename Inner::ReadConn conn)
{
return std::tuple<Ts...> {
S<Ts>::read(store, conn)...,
};
}
template<class Inner, typename... Ts>
void
LengthPrefixedProtoHelper<Inner, std::tuple<Ts...>>::write(
const StoreDirConfig & store, typename Inner::WriteConn conn, const std::tuple<Ts...> & res)
{
std::apply([&]<typename... Us>(const Us &... args) {
(S<Us>::write(store, conn, args), ...);
}, res);
}
}

View file

@ -0,0 +1,34 @@
/*
* Determine the syscall number for `fchmodat2`.
*
* On most platforms this is 452. Exceptions can be found on
* a glibc git checkout via `rg --pcre2 'define __NR_fchmodat2 (?!452)'`.
*
* The problem is that glibc 2.39 and libseccomp 2.5.5 are needed to
* get the syscall number. However, a Nix built against nixpkgs 23.11
* (glibc 2.38) should still have the issue fixed without depending
* on the build environment.
*
* To achieve that, the macros below try to determine the platform and
* set the syscall number which is platform-specific, but
* in most cases 452.
*
* TODO: remove this when 23.11 is EOL and the entire (supported) ecosystem
* is on glibc 2.39.
*/
#if HAVE_SECCOMP
# if defined(__alpha__)
# define NIX_SYSCALL_FCHMODAT2 562
# elif defined(__x86_64__) && SIZE_MAX == 0xFFFFFFFF // x32
# define NIX_SYSCALL_FCHMODAT2 1073742276
# elif defined(__mips__) && defined(__mips64) && defined(_ABIN64) // mips64/n64
# define NIX_SYSCALL_FCHMODAT2 5452
# elif defined(__mips__) && defined(__mips64) && defined(_ABIN32) // mips64/n32
# define NIX_SYSCALL_FCHMODAT2 6452
# elif defined(__mips__) && defined(_ABIO32) // mips32
# define NIX_SYSCALL_FCHMODAT2 4452
# else
# define NIX_SYSCALL_FCHMODAT2 452
# endif
#endif // HAVE_SECCOMP

View file

@ -0,0 +1,10 @@
sources += files(
'personality.cc',
)
include_dirs += include_directories('.')
headers += files(
'fchmodat2-compat.hh',
'personality.hh',
)

View file

@ -0,0 +1,41 @@
#include "personality.hh"
#include "globals.hh"
#include <sys/utsname.h>
#include <sys/personality.h>
#include <cstring>
namespace nix::linux {
void setPersonality(std::string_view system)
{
/* Change the personality to 32-bit if we're doing an
i686-linux build on an x86_64-linux machine. */
struct utsname utsbuf;
uname(&utsbuf);
if ((system == "i686-linux"
&& (std::string_view(SYSTEM) == "x86_64-linux"
|| (!strcmp(utsbuf.sysname, "Linux") && !strcmp(utsbuf.machine, "x86_64"))))
|| system == "armv7l-linux"
|| system == "armv6l-linux"
|| system == "armv5tel-linux")
{
if (personality(PER_LINUX32) == -1)
throw SysError("cannot set 32-bit personality");
}
/* Impersonate a Linux 2.6 machine to get some determinism in
builds that depend on the kernel version. */
if ((system == "i686-linux" || system == "x86_64-linux") && settings.impersonateLinux26) {
int cur = personality(0xffffffff);
if (cur != -1) personality(cur | 0x0020000 /* == UNAME26 */);
}
/* Disable address space randomization for improved
determinism. */
int cur = personality(0xffffffff);
if (cur != -1) personality(cur | ADDR_NO_RANDOMIZE);
}
}

View file

@ -0,0 +1,12 @@
#pragma once
///@file
#include <string>
namespace nix::linux {
void setPersonality(std::string_view system);
}

View file

@ -1,36 +1,46 @@
#include "binary-cache-store.hh"
#include "local-binary-cache-store.hh"
#include "globals.hh"
#include "nar-info-disk-cache.hh"
#include "signals.hh"
#include <atomic>
namespace nix {
struct LocalBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
LocalBinaryCacheStoreConfig::LocalBinaryCacheStoreConfig(
std::string_view scheme,
PathView binaryCacheDir,
const Params & params)
: StoreConfig(params)
, BinaryCacheStoreConfig(params)
, binaryCacheDir(binaryCacheDir)
{
using BinaryCacheStoreConfig::BinaryCacheStoreConfig;
}
const std::string name() override { return "Local Binary Cache Store"; }
};
class LocalBinaryCacheStore : public virtual LocalBinaryCacheStoreConfig, public virtual BinaryCacheStore
std::string LocalBinaryCacheStoreConfig::doc()
{
private:
return
#include "local-binary-cache-store.md"
;
}
Path binaryCacheDir;
public:
struct LocalBinaryCacheStore : virtual LocalBinaryCacheStoreConfig, virtual BinaryCacheStore
{
/**
* @param binaryCacheDir `file://` is a short-hand for `file:///`
* for now.
*/
LocalBinaryCacheStore(
const std::string scheme,
const Path & binaryCacheDir,
std::string_view scheme,
PathView binaryCacheDir,
const Params & params)
: StoreConfig(params)
, BinaryCacheStoreConfig(params)
, LocalBinaryCacheStoreConfig(params)
, LocalBinaryCacheStoreConfig(scheme, binaryCacheDir, params)
, Store(params)
, BinaryCacheStore(params)
, binaryCacheDir(binaryCacheDir)
{
}
@ -41,8 +51,6 @@ public:
return "file://" + binaryCacheDir;
}
static std::set<std::string> uriSchemes();
protected:
bool fileExists(const std::string & path) override;
@ -57,7 +65,7 @@ protected:
AutoDelete del(tmp, false);
StreamToSourceAdapter source(istream);
writeFile(tmp, source);
renameFile(tmp, path2);
std::filesystem::rename(tmp, path2);
del.cancel();
}
@ -76,18 +84,24 @@ protected:
{
StorePathSet paths;
for (auto & entry : readDirectory(binaryCacheDir)) {
if (entry.name.size() != 40 ||
!hasSuffix(entry.name, ".narinfo"))
for (auto & entry : std::filesystem::directory_iterator{binaryCacheDir}) {
checkInterrupt();
auto name = entry.path().filename().string();
if (name.size() != 40 ||
!hasSuffix(name, ".narinfo"))
continue;
paths.insert(parseStorePath(
storeDir + "/" + entry.name.substr(0, entry.name.size() - 8)
storeDir + "/" + name.substr(0, name.size() - 8)
+ "-" + MissingName));
}
return paths;
}
std::optional<TrustedFlag> isTrustedClient() override
{
return Trusted;
}
};
void LocalBinaryCacheStore::init()
@ -105,7 +119,7 @@ bool LocalBinaryCacheStore::fileExists(const std::string & path)
return pathExists(binaryCacheDir + "/" + path);
}
std::set<std::string> LocalBinaryCacheStore::uriSchemes()
std::set<std::string> LocalBinaryCacheStoreConfig::uriSchemes()
{
if (getEnv("_NIX_FORCE_HTTP") == "1")
return {};

View file

@ -0,0 +1,23 @@
#include "binary-cache-store.hh"
namespace nix {
struct LocalBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
{
using BinaryCacheStoreConfig::BinaryCacheStoreConfig;
LocalBinaryCacheStoreConfig(std::string_view scheme, PathView binaryCacheDir, const Params & params);
Path binaryCacheDir;
const std::string name() override
{
return "Local Binary Cache Store";
}
static std::set<std::string> uriSchemes();
std::string doc() override;
};
}

View file

@ -0,0 +1,16 @@
R"(
**Store URL format**: `file://`*path*
This store allows reading and writing a binary cache stored in *path*
in the local filesystem. If *path* does not exist, it will be created.
For example, the following builds or downloads `nixpkgs#hello` into
the local store and then copies it to the binary cache in
`/tmp/binary-cache`:
```
# nix copy --to file:///tmp/binary-cache nixpkgs#hello
```
)"

View file

@ -1,5 +1,5 @@
#include "archive.hh"
#include "fs-accessor.hh"
#include "posix-source-accessor.hh"
#include "store-api.hh"
#include "local-fs-store.hh"
#include "globals.hh"
@ -8,74 +8,76 @@
namespace nix {
LocalFSStoreConfig::LocalFSStoreConfig(PathView rootDir, const Params & params)
: StoreConfig(params)
// Default `?root` from `rootDir` if non set
// FIXME don't duplicate description once we don't have root setting
, rootDir{
this,
!rootDir.empty() && params.count("root") == 0
? (std::optional<Path>{rootDir})
: std::nullopt,
"root",
"Directory prefixed to all other paths."}
{
}
LocalFSStore::LocalFSStore(const Params & params)
: Store(params)
{
}
struct LocalStoreAccessor : public FSAccessor
struct LocalStoreAccessor : PosixSourceAccessor
{
ref<LocalFSStore> store;
bool requireValidPath;
LocalStoreAccessor(ref<LocalFSStore> store) : store(store) { }
LocalStoreAccessor(ref<LocalFSStore> store, bool requireValidPath)
: store(store)
, requireValidPath(requireValidPath)
{ }
Path toRealPath(const Path & path, bool requireValidPath = true)
CanonPath toRealPath(const CanonPath & path)
{
auto storePath = store->toStorePath(path).first;
auto [storePath, rest] = store->toStorePath(path.abs());
if (requireValidPath && !store->isValidPath(storePath))
throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath));
return store->getRealStoreDir() + std::string(path, store->storeDir.size());
return CanonPath(store->getRealStoreDir()) / storePath.to_string() / CanonPath(rest);
}
FSAccessor::Stat stat(const Path & path) override
std::optional<Stat> maybeLstat(const CanonPath & path) override
{
auto realPath = toRealPath(path);
/* Handle the case where `path` is (a parent of) the store. */
if (isDirOrInDir(store->storeDir, path.abs()))
return Stat{ .type = tDirectory };
struct stat st;
if (lstat(realPath.c_str(), &st)) {
if (errno == ENOENT || errno == ENOTDIR) return {Type::tMissing, 0, false};
throw SysError("getting status of '%1%'", path);
}
if (!S_ISREG(st.st_mode) && !S_ISDIR(st.st_mode) && !S_ISLNK(st.st_mode))
throw Error("file '%1%' has unsupported type", path);
return {
S_ISREG(st.st_mode) ? Type::tRegular :
S_ISLNK(st.st_mode) ? Type::tSymlink :
Type::tDirectory,
S_ISREG(st.st_mode) ? (uint64_t) st.st_size : 0,
S_ISREG(st.st_mode) && st.st_mode & S_IXUSR};
return PosixSourceAccessor::maybeLstat(toRealPath(path));
}
StringSet readDirectory(const Path & path) override
DirEntries readDirectory(const CanonPath & path) override
{
auto realPath = toRealPath(path);
auto entries = nix::readDirectory(realPath);
StringSet res;
for (auto & entry : entries)
res.insert(entry.name);
return res;
return PosixSourceAccessor::readDirectory(toRealPath(path));
}
std::string readFile(const Path & path, bool requireValidPath = true) override
void readFile(
const CanonPath & path,
Sink & sink,
std::function<void(uint64_t)> sizeCallback) override
{
return nix::readFile(toRealPath(path, requireValidPath));
return PosixSourceAccessor::readFile(toRealPath(path), sink, sizeCallback);
}
std::string readLink(const Path & path) override
std::string readLink(const CanonPath & path) override
{
return nix::readLink(toRealPath(path));
return PosixSourceAccessor::readLink(toRealPath(path));
}
};
ref<FSAccessor> LocalFSStore::getFSAccessor()
ref<SourceAccessor> LocalFSStore::getFSAccessor(bool requireValidPath)
{
return make_ref<LocalStoreAccessor>(ref<LocalFSStore>(
std::dynamic_pointer_cast<LocalFSStore>(shared_from_this())));
std::dynamic_pointer_cast<LocalFSStore>(shared_from_this())),
requireValidPath);
}
void LocalFSStore::narFromPath(const StorePath & path, Sink & sink)
@ -87,20 +89,8 @@ void LocalFSStore::narFromPath(const StorePath & path, Sink & sink)
const std::string LocalFSStore::drvsLogDir = "drvs";
std::optional<std::string> LocalFSStore::getBuildLog(const StorePath & path_)
std::optional<std::string> LocalFSStore::getBuildLogExact(const StorePath & path)
{
auto path = path_;
if (!path.isDerivation()) {
try {
auto info = queryPathInfo(path);
if (!info->deriver) return std::nullopt;
path = *info->deriver;
} catch (InvalidPath &) {
return std::nullopt;
}
}
auto baseName = path.to_string();
for (int j = 0; j < 2; j++) {

View file

@ -1,4 +1,5 @@
#pragma once
///@file
#include "store-api.hh"
#include "gc-store.hh"
@ -9,20 +10,33 @@ namespace nix {
struct LocalFSStoreConfig : virtual StoreConfig
{
using StoreConfig::StoreConfig;
// FIXME: the (StoreConfig*) cast works around a bug in gcc that causes
// it to omit the call to the Setting constructor. Clang works fine
// either way.
const PathSetting rootDir{(StoreConfig*) this, true, "",
"root", "directory prefixed to all other paths"};
const PathSetting stateDir{(StoreConfig*) this, false,
rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir,
"state", "directory where Nix will store state"};
const PathSetting logDir{(StoreConfig*) this, false,
rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir,
"log", "directory where Nix will store state"};
const PathSetting realStoreDir{(StoreConfig*) this, false,
rootDir != "" ? rootDir + "/nix/store" : storeDir, "real",
"physical path to the Nix store"};
/**
* Used to override the `root` settings. Can't be done via modifying
* `params` reliably because this parameter is unused except for
* passing to base class constructors.
*
* @todo Make this less error-prone with new store settings system.
*/
LocalFSStoreConfig(PathView path, const Params & params);
const OptionalPathSetting rootDir{this, std::nullopt,
"root",
"Directory prefixed to all other paths."};
const PathSetting stateDir{this,
rootDir.get() ? *rootDir.get() + "/nix/var/nix" : settings.nixStateDir,
"state",
"Directory where Nix will store state."};
const PathSetting logDir{this,
rootDir.get() ? *rootDir.get() + "/nix/var/log/nix" : settings.nixLogDir,
"log",
"directory where Nix will store log files."};
const PathSetting realStoreDir{this,
rootDir.get() ? *rootDir.get() + "/nix/store" : storeDir, "real",
"Physical path of the Nix store."};
};
class LocalFSStore : public virtual LocalFSStoreConfig,
@ -31,16 +45,30 @@ class LocalFSStore : public virtual LocalFSStoreConfig,
public virtual LogStore
{
public:
inline static std::string operationName = "Local Filesystem Store";
const static std::string drvsLogDir;
LocalFSStore(const Params & params);
void narFromPath(const StorePath & path, Sink & sink) override;
ref<FSAccessor> getFSAccessor() override;
ref<SourceAccessor> getFSAccessor(bool requireValidPath = true) override;
/* Register a permanent GC root. */
Path addPermRoot(const StorePath & storePath, const Path & gcRoot);
/**
* Creates symlink from the `gcRoot` to the `storePath` and
* registers the `gcRoot` as a permanent GC root. The `gcRoot`
* symlink lives outside the store and is created and owned by the
* user.
*
* @param gcRoot The location of the symlink.
*
* @param storePath The store object being rooted. The symlink will
* point to `toRealPath(store.printStorePath(storePath))`.
*
* How the permanent GC root corresponding to this symlink is
* managed is implementation-specific.
*/
virtual Path addPermRoot(const StorePath & storePath, const Path & gcRoot) = 0;
virtual Path getRealStoreDir() { return realStoreDir; }
@ -50,7 +78,7 @@ public:
return getRealStoreDir() + "/" + std::string(storePath, storeDir.size() + 1);
}
std::optional<std::string> getBuildLog(const StorePath & path) override;
std::optional<std::string> getBuildLogExact(const StorePath & path) override;
};

View file

@ -0,0 +1,292 @@
#include "local-overlay-store.hh"
#include "callback.hh"
#include "realisation.hh"
#include "processes.hh"
#include "url.hh"
#include <regex>
namespace nix {
std::string LocalOverlayStoreConfig::doc()
{
return
#include "local-overlay-store.md"
;
}
Path LocalOverlayStoreConfig::toUpperPath(const StorePath & path) {
return upperLayer + "/" + path.to_string();
}
LocalOverlayStore::LocalOverlayStore(std::string_view scheme, PathView path, const Params & params)
: StoreConfig(params)
, LocalFSStoreConfig(path, params)
, LocalStoreConfig(params)
, LocalOverlayStoreConfig(scheme, path, params)
, Store(params)
, LocalFSStore(params)
, LocalStore(params)
, lowerStore(openStore(percentDecode(lowerStoreUri.get())).dynamic_pointer_cast<LocalFSStore>())
{
if (checkMount.get()) {
std::smatch match;
std::string mountInfo;
auto mounts = readFile("/proc/self/mounts");
auto regex = std::regex(R"((^|\n)overlay )" + realStoreDir.get() + R"( .*(\n|$))");
// Mount points can be stacked, so there might be multiple matching entries.
// Loop until the last match, which will be the current state of the mount point.
while (std::regex_search(mounts, match, regex)) {
mountInfo = match.str();
mounts = match.suffix();
}
auto checkOption = [&](std::string option, std::string value) {
return std::regex_search(mountInfo, std::regex("\\b" + option + "=" + value + "( |,)"));
};
auto expectedLowerDir = lowerStore->realStoreDir.get();
if (!checkOption("lowerdir", expectedLowerDir) || !checkOption("upperdir", upperLayer)) {
debug("expected lowerdir: %s", expectedLowerDir);
debug("expected upperdir: %s", upperLayer);
debug("actual mount: %s", mountInfo);
throw Error("overlay filesystem '%s' mounted incorrectly",
realStoreDir.get());
}
}
}
void LocalOverlayStore::registerDrvOutput(const Realisation & info)
{
// First do queryRealisation on lower layer to populate DB
auto res = lowerStore->queryRealisation(info.id);
if (res)
LocalStore::registerDrvOutput(*res);
LocalStore::registerDrvOutput(info);
}
void LocalOverlayStore::queryPathInfoUncached(const StorePath & path,
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
{
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
LocalStore::queryPathInfoUncached(path,
{[this, path, callbackPtr](std::future<std::shared_ptr<const ValidPathInfo>> fut) {
try {
auto info = fut.get();
if (info)
return (*callbackPtr)(std::move(info));
} catch (...) {
return callbackPtr->rethrow();
}
// If we don't have it, check lower store
lowerStore->queryPathInfo(path,
{[path, callbackPtr](std::future<ref<const ValidPathInfo>> fut) {
try {
(*callbackPtr)(fut.get().get_ptr());
} catch (...) {
return callbackPtr->rethrow();
}
}});
}});
}
void LocalOverlayStore::queryRealisationUncached(const DrvOutput & drvOutput,
Callback<std::shared_ptr<const Realisation>> callback) noexcept
{
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
LocalStore::queryRealisationUncached(drvOutput,
{[this, drvOutput, callbackPtr](std::future<std::shared_ptr<const Realisation>> fut) {
try {
auto info = fut.get();
if (info)
return (*callbackPtr)(std::move(info));
} catch (...) {
return callbackPtr->rethrow();
}
// If we don't have it, check lower store
lowerStore->queryRealisation(drvOutput,
{[callbackPtr](std::future<std::shared_ptr<const Realisation>> fut) {
try {
(*callbackPtr)(fut.get());
} catch (...) {
return callbackPtr->rethrow();
}
}});
}});
}
bool LocalOverlayStore::isValidPathUncached(const StorePath & path)
{
auto res = LocalStore::isValidPathUncached(path);
if (res) return res;
res = lowerStore->isValidPath(path);
if (res) {
// Get path info from lower store so upper DB genuinely has it.
auto p = lowerStore->queryPathInfo(path);
// recur on references, syncing entire closure.
for (auto & r : p->references)
if (r != path)
isValidPath(r);
LocalStore::registerValidPath(*p);
}
return res;
}
void LocalOverlayStore::queryReferrers(const StorePath & path, StorePathSet & referrers)
{
LocalStore::queryReferrers(path, referrers);
lowerStore->queryReferrers(path, referrers);
}
void LocalOverlayStore::queryGCReferrers(const StorePath & path, StorePathSet & referrers)
{
LocalStore::queryReferrers(path, referrers);
}
StorePathSet LocalOverlayStore::queryValidDerivers(const StorePath & path)
{
auto res = LocalStore::queryValidDerivers(path);
for (auto p : lowerStore->queryValidDerivers(path))
res.insert(p);
return res;
}
std::optional<StorePath> LocalOverlayStore::queryPathFromHashPart(const std::string & hashPart)
{
auto res = LocalStore::queryPathFromHashPart(hashPart);
if (res)
return res;
else
return lowerStore->queryPathFromHashPart(hashPart);
}
void LocalOverlayStore::registerValidPaths(const ValidPathInfos & infos)
{
// First, get any from lower store so we merge
{
StorePathSet notInUpper;
for (auto & [p, _] : infos)
if (!LocalStore::isValidPathUncached(p)) // avoid divergence
notInUpper.insert(p);
auto pathsInLower = lowerStore->queryValidPaths(notInUpper);
ValidPathInfos inLower;
for (auto & p : pathsInLower)
inLower.insert_or_assign(p, *lowerStore->queryPathInfo(p));
LocalStore::registerValidPaths(inLower);
}
// Then do original request
LocalStore::registerValidPaths(infos);
}
void LocalOverlayStore::collectGarbage(const GCOptions & options, GCResults & results)
{
LocalStore::collectGarbage(options, results);
remountIfNecessary();
}
void LocalOverlayStore::deleteStorePath(const Path & path, uint64_t & bytesFreed)
{
auto mergedDir = realStoreDir.get() + "/";
if (path.substr(0, mergedDir.length()) != mergedDir) {
warn("local-overlay: unexpected gc path '%s' ", path);
return;
}
StorePath storePath = {path.substr(mergedDir.length())};
auto upperPath = toUpperPath(storePath);
if (pathExists(upperPath)) {
debug("upper exists: %s", path);
if (lowerStore->isValidPath(storePath)) {
debug("lower exists: %s", storePath.to_string());
// Path also exists in lower store.
// We must delete via upper layer to avoid creating a whiteout.
deletePath(upperPath, bytesFreed);
_remountRequired = true;
} else {
// Path does not exist in lower store.
// So we can delete via overlayfs and not need to remount.
LocalStore::deleteStorePath(path, bytesFreed);
}
}
}
void LocalOverlayStore::optimiseStore()
{
Activity act(*logger, actOptimiseStore);
// Note for LocalOverlayStore, queryAllValidPaths only returns paths in upper layer
auto paths = queryAllValidPaths();
act.progress(0, paths.size());
uint64_t done = 0;
for (auto & path : paths) {
if (lowerStore->isValidPath(path)) {
uint64_t bytesFreed = 0;
// Deduplicate store path
deleteStorePath(Store::toRealPath(path), bytesFreed);
}
done++;
act.progress(done, paths.size());
}
remountIfNecessary();
}
LocalStore::VerificationResult LocalOverlayStore::verifyAllValidPaths(RepairFlag repair)
{
StorePathSet done;
auto existsInStoreDir = [&](const StorePath & storePath) {
return pathExists(realStoreDir.get() + "/" + storePath.to_string());
};
bool errors = false;
StorePathSet validPaths;
for (auto & i : queryAllValidPaths())
verifyPath(i, existsInStoreDir, done, validPaths, repair, errors);
return {
.errors = errors,
.validPaths = validPaths,
};
}
void LocalOverlayStore::remountIfNecessary()
{
if (!_remountRequired) return;
if (remountHook.get().empty()) {
warn("'%s' needs remounting, set remount-hook to do this automatically", realStoreDir.get());
} else {
runProgram(remountHook, false, {realStoreDir});
}
_remountRequired = false;
}
static RegisterStoreImplementation<LocalOverlayStore, LocalOverlayStoreConfig> regLocalOverlayStore;
}

View file

@ -0,0 +1,218 @@
#include "local-store.hh"
namespace nix {
/**
* Configuration for `LocalOverlayStore`.
*/
struct LocalOverlayStoreConfig : virtual LocalStoreConfig
{
LocalOverlayStoreConfig(const StringMap & params)
: LocalOverlayStoreConfig("local-overlay", "", params)
{ }
LocalOverlayStoreConfig(std::string_view scheme, PathView path, const Params & params)
: StoreConfig(params)
, LocalFSStoreConfig(path, params)
, LocalStoreConfig(scheme, path, params)
{
}
const Setting<std::string> lowerStoreUri{(StoreConfig*) this, "", "lower-store",
R"(
[Store URL](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format)
for the lower store. The default is `auto` (i.e. use the Nix daemon or `/nix/store` directly).
Must be a store with a store dir on the file system.
Must be used as OverlayFS lower layer for this store's store dir.
)"};
const PathSetting upperLayer{(StoreConfig*) this, "", "upper-layer",
R"(
Directory containing the OverlayFS upper layer for this store's store dir.
)"};
Setting<bool> checkMount{(StoreConfig*) this, true, "check-mount",
R"(
Check that the overlay filesystem is correctly mounted.
Nix does not manage the overlayfs mount point itself, but the correct
functioning of the overlay store does depend on this mount point being set up
correctly. Rather than just assume this is the case, check that the lowerdir
and upperdir options are what we expect them to be. This check is on by
default, but can be disabled if needed.
)"};
const PathSetting remountHook{(StoreConfig*) this, "", "remount-hook",
R"(
Script or other executable to run when overlay filesystem needs remounting.
This is occasionally necessary when deleting a store path that exists in both upper and lower layers.
In such a situation, bypassing OverlayFS and deleting the path in the upper layer directly
is the only way to perform the deletion without creating a "whiteout".
However this causes the OverlayFS kernel data structures to get out-of-sync,
and can lead to 'stale file handle' errors; remounting solves the problem.
The store directory is passed as an argument to the invoked executable.
)"};
const std::string name() override { return "Experimental Local Overlay Store"; }
std::optional<ExperimentalFeature> experimentalFeature() const override
{
return ExperimentalFeature::LocalOverlayStore;
}
static std::set<std::string> uriSchemes()
{
return { "local-overlay" };
}
std::string doc() override;
protected:
/**
* @return The host OS path corresponding to the store path for the
* upper layer.
*
* @note The there is no guarantee a store object is actually stored
* at that file path. It might be stored in the lower layer instead,
* or it might not be part of this store at all.
*/
Path toUpperPath(const StorePath & path);
};
/**
* Variation of local store using OverlayFS for the store directory.
*
* Documentation on overridden methods states how they differ from their
* `LocalStore` counterparts.
*/
class LocalOverlayStore : public virtual LocalOverlayStoreConfig, public virtual LocalStore
{
/**
* The store beneath us.
*
* Our store dir should be an overlay fs where the lower layer
* is that store's store dir, and the upper layer is some
* scratch storage just for us.
*/
ref<LocalFSStore> lowerStore;
public:
LocalOverlayStore(const Params & params)
: LocalOverlayStore("local-overlay", "", params)
{
}
LocalOverlayStore(std::string_view scheme, PathView path, const Params & params);
std::string getUri() override
{
return "local-overlay://";
}
private:
/**
* First copy up any lower store realisation with the same key, so we
* merge rather than mask it.
*/
void registerDrvOutput(const Realisation & info) override;
/**
* Check lower store if upper DB does not have.
*/
void queryPathInfoUncached(const StorePath & path,
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
/**
* Check lower store if upper DB does not have.
*
* In addition, copy up metadata for lower store objects (and their
* closure). (I.e. Optimistically cache in the upper DB.)
*/
bool isValidPathUncached(const StorePath & path) override;
/**
* Check the lower store and upper DB.
*/
void queryReferrers(const StorePath & path, StorePathSet & referrers) override;
/**
* Check the lower store and upper DB.
*/
StorePathSet queryValidDerivers(const StorePath & path) override;
/**
* Check lower store if upper DB does not have.
*/
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
/**
* First copy up any lower store realisation with the same key, so we
* merge rather than mask it.
*/
void registerValidPaths(const ValidPathInfos & infos) override;
/**
* Check lower store if upper DB does not have.
*/
void queryRealisationUncached(const DrvOutput&,
Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
/**
* Call `remountIfNecessary` after collecting garbage normally.
*/
void collectGarbage(const GCOptions & options, GCResults & results) override;
/**
* Check which layers the store object exists in to try to avoid
* needing to remount.
*/
void deleteStorePath(const Path & path, uint64_t & bytesFreed) override;
/**
* Deduplicate by removing store objects from the upper layer that
* are now in the lower layer.
*
* Operations on a layered store will not cause duplications, but addition of
* new store objects to the lower layer can instill induce them
* (there is no way to prevent that). This cleans up those
* duplications.
*
* @note We do not yet optomise the upper layer in the normal way
* (hardlink) yet. We would like to, but it requires more
* refactoring of existing code to support this sustainably.
*/
void optimiseStore() override;
/**
* Check all paths registered in the upper DB.
*
* Note that this includes store objects that reside in either overlayfs layer;
* just enumerating the contents of the upper layer would skip them.
*
* We don't verify the contents of both layers on the assumption that the lower layer is far bigger,
* and also the observation that anything not in the upper db the overlayfs doesn't yet care about.
*/
VerificationResult verifyAllValidPaths(RepairFlag repair) override;
/**
* Deletion only effects the upper layer, so we ignore lower-layer referrers.
*/
void queryGCReferrers(const StorePath & path, StorePathSet & referrers) override;
/**
* Call the `remountHook` if we have done something such that the
* OverlayFS needed to be remounted. See that hook's user-facing
* documentation for further details.
*/
void remountIfNecessary();
/**
* State for `remountIfNecessary`
*/
std::atomic_bool _remountRequired = false;
};
}

View file

@ -0,0 +1,131 @@
R"(
**Store URL format**: `local-overlay`
This store type is a variation of the [local store] designed to leverage Linux's [Overlay Filesystem](https://docs.kernel.org/filesystems/overlayfs.html) (OverlayFS for short).
Just as OverlayFS combines a lower and upper filesystem by treating the upper one as a patch against the lower, the local overlay store combines a lower store with an upper almost-[local store].
("almost" because while the upper fileystems for OverlayFS is valid on its own, the upper almost-store is not a valid local store on its own because some references will dangle.)
To use this store, you will first need to configure an OverlayFS mountpoint [appropriately](#example-filesystem-layout) as Nix will not do this for you (though it will verify the mountpoint is configured correctly).
### Conceptual parts of a local overlay store
*This is a more abstract/conceptual description of the parts of a layered store, an authoritative reference.
For more "practical" instructions, see the worked-out example in the next subsection.*
The parts of a local overlay store are as follows:
- **Lower store**:
> Specified with the [`lower-store`](#store-experimental-local-overlay-store-lower-store) setting.
This is any store implementation that includes a store directory as part of the native operating system filesystem.
For example, this could be a [local store], [local daemon store], or even another local overlay store.
The local overlay store never tries to modify the lower store in any way.
Something else could modify the lower store, but there are restrictions on this
Nix itself requires that this store only grow, and not change in other ways.
For example, new store objects can be added, but deleting or modifying store objects is not allowed in general, because that will confuse and corrupt any local overlay store using those objects.
(In addition, the underlying filesystem overlay mechanism may impose additional restrictions, see below.)
The lower store must not change while it is mounted as part of an overlay store.
To ensure it does not, you might want to mount the store directory read-only (which then requires the [read-only] parameter to be set to `true`).
- **Lower store directory**:
> Specified with `lower-store.real` setting.
This is the directory used/exposed by the lower store.
As specified above, Nix requires the local store can only grow not change in other ways.
Linux's OverlayFS in addition imposes the further requirement that this directory cannot change at all.
That means that, while any local overlay store exists that is using this store as a lower store, this directory must not change.
- **Lower metadata source**:
> Not directly specified.
> A consequence of the `lower-store` setting, depending on the type of lower store chosen.
This is abstract, just some way to read the metadata of lower store [store objects][store object].
For example it could be a SQLite database (for the [local store]), or a socket connection (for the [local daemon store]).
This need not be writable.
As stated above a local overlay store never tries to modify its lower store.
The lower store's metadata is considered part of the lower store, just as the store's [file system objects][file system object] that appear in the store directory are.
- **Upper almost-store**:
> Not directly specified.
> Instead the constituent parts are independently specified as described below.
This is almost but not quite just a [local store].
That is because taken in isolation, not as part of a local overlay store, by itself, it would appear corrupted.
But combined with everything else as part of an overlay local store, it is valid.
- **Upper layer directory**:
> Specified with [`upper-layer`](#store-experimental-local-overlay-store-upper-layer) setting.
This contains additional [store objects][store object]
(or, strictly speaking, their [file system objects][file system object] that the local overlay store will extend the lower store with).
- **Upper store directory**:
> Specified with the [`real`](#store-experimental-local-overlay-store-real) setting.
> This the same as the base local store setting, and can also be indirectly specified with the [`root`](#store-experimental-local-overlay-store-root) setting.
This contains all the store objects from each of the two directories.
The lower store directory and upper layer directory are combined via OverlayFS to create this directory.
Nix doesn't do this itself, because it typically wouldn't have the permissions to do so, so it is the responsibility of the user to set this up first.
Nix can, however, optionally check that that the OverlayFS mount settings appear as expected, matching Nix's own settings.
- **Upper SQLite database**:
> Not directly specified.
> The location of the database instead depends on the [`state`](#store-experimental-local-overlay-store-state) setting.
> It is is always `${state}/db`.
This contains the metadata of all of the upper layer [store objects][store object] (everything beyond their file system objects), and also duplicate copies of some lower layer store object's metadta.
The duplication is so the metadata for the [closure](@docroot@/glossary.md#gloss-closure) of upper layer [store objects][store object] can be found entirely within the upper layer.
(This allows us to use the same SQL Schema as the [local store]'s SQLite database, as foreign keys in that schema enforce closure metadata to be self-contained in this way.)
[file system object]: @docroot@/store/file-system-object.md
[store object]: @docroot@/store/store-object.md
### Example filesystem layout
Here is a worked out example of usage, following the concepts in the previous section.
Say we have the following paths:
- `/mnt/example/merged-store/nix/store`
- `/mnt/example/store-a/nix/store`
- `/mnt/example/store-b`
Then the following store URI can be used to access a local-overlay store at `/mnt/example/merged-store`:
```
local-overlay://?root=/mnt/example/merged-store&lower-store=/mnt/example/store-a&upper-layer=/mnt/example/store-b
```
The lower store directory is located at `/mnt/example/store-a/nix/store`, while the upper layer is at `/mnt/example/store-b`.
Before accessing the overlay store you will need to ensure the OverlayFS mount is set up correctly:
```shell
mount -t overlay overlay \
-o lowerdir="/mnt/example/store-a/nix/store" \
-o upperdir="/mnt/example/store-b" \
-o workdir="/mnt/example/workdir" \
"/mnt/example/merged-store/nix/store"
```
Note that OverlayFS requires `/mnt/example/workdir` to be on the same volume as the `upperdir`.
By default, Nix will check that the mountpoint as been set up correctly and fail with an error if it has not.
You can override this behaviour by passing [`check-mount=false`](#store-experimental-local-overlay-store-check-mount) if you need to.
)"

File diff suppressed because it is too large Load diff

View file

@ -1,13 +1,12 @@
#pragma once
///@file
#include "sqlite.hh"
#include "pathlocks.hh"
#include "store-api.hh"
#include "local-fs-store.hh"
#include "gc-store.hh"
#include "indirect-root-store.hh"
#include "sync.hh"
#include "util.hh"
#include <chrono>
#include <future>
@ -18,10 +17,14 @@
namespace nix {
/* Nix store and database schema version. Version 1 (or 0) was Nix <=
0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10.
Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is
Nix 1.0. Version 7 is Nix 1.3. Version 10 is 2.0. */
/**
* Nix store and database schema version.
*
* Version 1 (or 0) was Nix <=
* 0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10.
* Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is
* Nix 1.0. Version 7 is Nix 1.3. Version 10 is 2.0.
*/
const int nixSchemaVersion = 10;
@ -29,58 +32,87 @@ struct OptimiseStats
{
unsigned long filesLinked = 0;
uint64_t bytesFreed = 0;
uint64_t blocksFreed = 0;
};
struct LocalStoreConfig : virtual LocalFSStoreConfig
{
using LocalFSStoreConfig::LocalFSStoreConfig;
Setting<bool> requireSigs{(StoreConfig*) this,
LocalStoreConfig(
std::string_view scheme,
std::string_view authority,
const Params & params);
Setting<bool> requireSigs{this,
settings.requireSigs,
"require-sigs", "whether store paths should have a trusted signature on import"};
"require-sigs",
"Whether store paths copied into this store should have a trusted signature."};
Setting<bool> readOnly{this,
false,
"read-only",
R"(
Allow this store to be opened when its [database](@docroot@/glossary.md#gloss-nix-database) is on a read-only filesystem.
Normally Nix will attempt to open the store database in read-write mode, even for querying (when write access is not needed), causing it to fail if the database is on a read-only filesystem.
Enable read-only mode to disable locking and open the SQLite database with the [`immutable` parameter](https://www.sqlite.org/c3ref/open.html) set.
> **Warning**
> Do not use this unless the filesystem is read-only.
>
> Using it when the filesystem is writable can cause incorrect query results or corruption errors if the database is changed by another process.
> While the filesystem the database resides on might appear to be read-only, consider whether another user or system might have write access to it.
)"};
const std::string name() override { return "Local Store"; }
static std::set<std::string> uriSchemes()
{ return {"local"}; }
std::string doc() override;
};
class LocalStore : public virtual LocalStoreConfig, public virtual LocalFSStore, public virtual GcStore
class LocalStore : public virtual LocalStoreConfig
, public virtual IndirectRootStore
, public virtual GcStore
{
private:
/* Lock file used for upgrading. */
/**
* Lock file used for upgrading.
*/
AutoCloseFD globalLock;
struct State
{
/* The SQLite database object. */
/**
* The SQLite database object.
*/
SQLite db;
struct Stmts;
std::unique_ptr<Stmts> stmts;
/* The global GC lock */
AutoCloseFD fdGCLock;
/* The file to which we write our temporary roots. */
AutoCloseFD fdTempRoots;
/* Connection to the garbage collector. */
AutoCloseFD fdRootsSocket;
/* The last time we checked whether to do an auto-GC, or an
auto-GC finished. */
/**
* The last time we checked whether to do an auto-GC, or an
* auto-GC finished.
*/
std::chrono::time_point<std::chrono::steady_clock> lastGCCheck;
/* Whether auto-GC is running. If so, get gcFuture to wait for
the GC to finish. */
/**
* Whether auto-GC is running. If so, get gcFuture to wait for
* the GC to finish.
*/
bool gcRunning = false;
std::shared_future<void> gcFuture;
/* How much disk space was available after the previous
auto-GC. If the current available disk space is below
minFree but not much below availAfterGC, then there is no
point in starting a new GC. */
/**
* How much disk space was available after the previous
* auto-GC. If the current available disk space is below
* minFree but not much below availAfterGC, then there is no
* point in starting a new GC.
*/
uint64_t availAfterGC = std::numeric_limits<uint64_t>::max();
std::unique_ptr<PublicKeys> publicKeys;
@ -103,16 +135,26 @@ private:
public:
// Hack for build-remote.cc.
/**
* Hack for build-remote.cc.
*/
PathSet locksHeld;
/* Initialise the local store, upgrading the schema if
necessary. */
/**
* Initialise the local store, upgrading the schema if
* necessary.
*/
LocalStore(const Params & params);
LocalStore(
std::string_view scheme,
PathView path,
const Params & params);
~LocalStore();
/* Implementations of abstract store API methods. */
/**
* Implementations of abstract store API methods.
*/
std::string getUri() override;
@ -130,32 +172,56 @@ public:
StorePathSet queryValidDerivers(const StorePath & path) override;
std::map<std::string, std::optional<StorePath>> queryPartialDerivationOutputMap(const StorePath & path) override;
std::map<std::string, std::optional<StorePath>> queryStaticPartialDerivationOutputMap(const StorePath & path) override;
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
StorePathSet querySubstitutablePaths(const StorePathSet & paths) override;
void querySubstitutablePathInfos(const StorePathCAMap & paths,
SubstitutablePathInfos & infos) override;
bool pathInfoIsUntrusted(const ValidPathInfo &) override;
bool realisationIsUntrusted(const Realisation & ) override;
void addToStore(const ValidPathInfo & info, Source & source,
RepairFlag repair, CheckSigsFlag checkSigs) override;
StorePath addToStoreFromDump(Source & dump, std::string_view name,
FileIngestionMethod method, HashType hashAlgo, RepairFlag repair, const StorePathSet & references) override;
StorePath addTextToStore(
StorePath addToStoreFromDump(
Source & dump,
std::string_view name,
std::string_view s,
FileSerialisationMethod dumpMethod,
ContentAddressMethod hashMethod,
HashAlgorithm hashAlgo,
const StorePathSet & references,
RepairFlag repair) override;
void addTempRoot(const StorePath & path) override;
private:
void createTempRootsFile();
/**
* The file to which we write our temporary roots.
*/
Sync<AutoCloseFD> _fdTempRoots;
/**
* The global GC lock.
*/
Sync<AutoCloseFD> _fdGCLock;
/**
* Connection to the garbage collector.
*/
Sync<AutoCloseFD> _fdRootsSocket;
public:
/**
* Implementation of IndirectRootStore::addIndirectRoot().
*
* The weak reference merely is a symlink to `path' from
* /nix/var/nix/gcroots/auto/<hash of `path'>.
*/
void addIndirectRoot(const Path & path) override;
private:
@ -170,42 +236,96 @@ public:
void collectGarbage(const GCOptions & options, GCResults & results) override;
/* Optimise the disk space usage of the Nix store by hard-linking
files with the same contents. */
/**
* Called by `collectGarbage` to trace in reverse.
*
* Using this rather than `queryReferrers` directly allows us to
* fine-tune which referrers we consider for garbage collection;
* some store implementations take advantage of this.
*/
virtual void queryGCReferrers(const StorePath & path, StorePathSet & referrers)
{
return queryReferrers(path, referrers);
}
/**
* Called by `collectGarbage` to recursively delete a path.
* The default implementation simply calls `deletePath`, but it can be
* overridden by stores that wish to provide their own deletion behaviour.
*/
virtual void deleteStorePath(const Path & path, uint64_t & bytesFreed);
/**
* Optimise the disk space usage of the Nix store by hard-linking
* files with the same contents.
*/
void optimiseStore(OptimiseStats & stats);
void optimiseStore() override;
/* Optimise a single store path. Optionally, test the encountered
symlinks for corruption. */
/**
* Optimise a single store path. Optionally, test the encountered
* symlinks for corruption.
*/
void optimisePath(const Path & path, RepairFlag repair);
bool verifyStore(bool checkContents, RepairFlag repair) override;
/* Register the validity of a path, i.e., that `path' exists, that
the paths referenced by it exists, and in the case of an output
path of a derivation, that it has been produced by a successful
execution of the derivation (or something equivalent). Also
register the hash of the file system contents of the path. The
hash must be a SHA-256 hash. */
protected:
/**
* Result of `verifyAllValidPaths`
*/
struct VerificationResult {
/**
* Whether any errors were encountered
*/
bool errors;
/**
* A set of so-far valid paths. The store objects pointed to by
* those paths are suitable for further validation checking.
*/
StorePathSet validPaths;
};
/**
* First, unconditional step of `verifyStore`
*/
virtual VerificationResult verifyAllValidPaths(RepairFlag repair);
public:
/**
* Register the validity of a path, i.e., that `path` exists, that
* the paths referenced by it exists, and in the case of an output
* path of a derivation, that it has been produced by a successful
* execution of the derivation (or something equivalent). Also
* register the hash of the file system contents of the path. The
* hash must be a SHA-256 hash.
*/
void registerValidPath(const ValidPathInfo & info);
void registerValidPaths(const ValidPathInfos & infos);
virtual void registerValidPaths(const ValidPathInfos & infos);
unsigned int getProtocol() override;
void vacuumDB();
std::optional<TrustedFlag> isTrustedClient() override;
void repairPath(const StorePath & path) override;
void vacuumDB();
void addSignatures(const StorePath & storePath, const StringSet & sigs) override;
/* If free disk space in /nix/store if below minFree, delete
garbage until it exceeds maxFree. */
/**
* If free disk space in /nix/store if below minFree, delete
* garbage until it exceeds maxFree.
*/
void autoGC(bool sync = true);
/* Register the store path 'output' as the output named 'outputName' of
derivation 'deriver'. */
/**
* Register the store path 'output' as the output named 'outputName' of
* derivation 'deriver'.
*/
void registerDrvOutput(const Realisation & info) override;
void registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs) override;
void cacheDrvOutputMapping(
@ -221,8 +341,17 @@ public:
std::optional<std::string> getVersion() override;
protected:
void verifyPath(const StorePath & path, std::function<bool(const StorePath &)> existsInStoreDir,
StorePathSet & done, StorePathSet & validPaths, RepairFlag repair, bool & errors);
private:
/**
* Retrieve the current version of the database schema.
* If the database does not exist yet, the version returned will be 0.
*/
int getSchema();
void openDB(State & state, bool create);
@ -235,12 +364,11 @@ private:
void invalidatePath(State & state, const StorePath & path);
/* Delete a path from the Nix store. */
/**
* Delete a path from the Nix store.
*/
void invalidatePathChecked(const StorePath & path);
void verifyPath(const Path & path, const StringSet & store,
PathSet & done, StorePathSet & validPaths, RepairFlag repair, bool & errors);
std::shared_ptr<const ValidPathInfo> queryPathInfoInternal(State & state, const StorePath & path);
void updatePathInfo(State & state, const ValidPathInfo & info);
@ -250,15 +378,13 @@ private:
PathSet queryValidPathsOld();
ValidPathInfo queryPathInfoOld(const Path & path);
void findRoots(const Path & path, unsigned char type, Roots & roots);
void findRoots(const Path & path, std::filesystem::file_type type, Roots & roots);
void findRootsNoTemp(Roots & roots, bool censor);
void findRuntimeRoots(Roots & roots, bool censor);
std::pair<Path, AutoCloseFD> createTempDirInStore();
void checkDerivationOutputs(const StorePath & drvPath, const Derivation & drv);
std::pair<std::filesystem::path, AutoCloseFD> createTempDirInStore();
typedef std::unordered_set<ino_t> InodeHash;
@ -270,26 +396,13 @@ private:
bool isValidPath_(State & state, const StorePath & path);
void queryReferrers(State & state, const StorePath & path, StorePathSet & referrers);
/* Add signatures to a ValidPathInfo or Realisation using the secret keys
specified by the secret-key-files option. */
/**
* Add signatures to a ValidPathInfo or Realisation using the secret keys
* specified by the secret-key-files option.
*/
void signPathInfo(ValidPathInfo & info);
void signRealisation(Realisation &);
void createUser(const std::string & userName, uid_t userId) override;
// XXX: Make a generic `Store` method
FixedOutputHash hashCAPath(
const FileIngestionMethod & method,
const HashType & hashType,
const StorePath & path);
FixedOutputHash hashCAPath(
const FileIngestionMethod & method,
const HashType & hashType,
const Path & path,
const std::string_view pathHash
);
void addBuildLog(const StorePath & drvPath, std::string_view log) override;
friend struct LocalDerivationGoal;
@ -298,33 +411,4 @@ private:
friend struct DerivationGoal;
};
typedef std::pair<dev_t, ino_t> Inode;
typedef std::set<Inode> InodesSeen;
/* "Fix", or canonicalise, the meta-data of the files in a store path
after it has been built. In particular:
- the last modification date on each file is set to 1 (i.e.,
00:00:01 1/1/1970 UTC)
- the permissions are set of 444 or 555 (i.e., read-only with or
without execute permission; setuid bits etc. are cleared)
- the owner and group are set to the Nix user and group, if we're
running as root.
If uidRange is not empty, this function will throw an error if it
encounters files owned by a user outside of the closed interval
[uidRange->first, uidRange->second].
*/
void canonicalisePathMetaData(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange,
InodesSeen & inodesSeen);
void canonicalisePathMetaData(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange);
void canonicaliseTimestampAndPermissions(const Path & path);
MakeError(PathInUse, Error);
}

View file

@ -0,0 +1,39 @@
R"(
**Store URL format**: `local`, *root*
This store type accesses a Nix store in the local filesystem directly
(i.e. not via the Nix daemon). *root* is an absolute path that is
prefixed to other directories such as the Nix store directory. The
store pseudo-URL `local` denotes a store that uses `/` as its root
directory.
A store that uses a *root* other than `/` is called a *chroot
store*. With such stores, the store directory is "logically" still
`/nix/store`, so programs stored in them can only be built and
executed by `chroot`-ing into *root*. Chroot stores only support
building and running on Linux when [`mount namespaces`](https://man7.org/linux/man-pages/man7/mount_namespaces.7.html) and [`user namespaces`](https://man7.org/linux/man-pages/man7/user_namespaces.7.html) are
enabled.
For example, the following uses `/tmp/root` as the chroot environment
to build or download `nixpkgs#hello` and then execute it:
```console
# nix run --store /tmp/root nixpkgs#hello
Hello, world!
```
Here, the "physical" store location is `/tmp/root/nix/store`, and
Nix's store metadata is in `/tmp/root/nix/var/nix/db`.
It is also possible, but not recommended, to change the "logical"
location of the Nix store from its default of `/nix/store`. This makes
it impossible to use default substituters such as
`https://cache.nixos.org/`, and thus you may have to build everything
locally. Here is an example:
```console
# nix build --store 'local?store=/tmp/my-nix/store&state=/tmp/my-nix/state&log=/tmp/my-nix/log' nixpkgs#hello
```
)"

View file

@ -5,71 +5,96 @@ libstore_NAME = libnixstore
libstore_DIR := $(d)
libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc $(d)/build/*.cc)
ifdef HOST_UNIX
libstore_SOURCES += $(wildcard $(d)/unix/*.cc $(d)/unix/build/*.cc)
endif
ifdef HOST_LINUX
libstore_SOURCES += $(wildcard $(d)/linux/*.cc)
endif
ifdef HOST_WINDOWS
libstore_SOURCES += $(wildcard $(d)/windows/*.cc)
endif
libstore_LIBS = libutil
libstore_LDFLAGS += $(SQLITE3_LIBS) $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread
libstore_LDFLAGS += $(SQLITE3_LIBS) $(LIBCURL_LIBS) $(THREAD_LDFLAGS)
ifdef HOST_LINUX
libstore_LDFLAGS += -ldl
libstore_LDFLAGS += -ldl
endif
ifdef HOST_DARWIN
libstore_FILES = sandbox-defaults.sb sandbox-minimal.sb sandbox-network.sb
ifdef HOST_WINDOWS
libstore_LDFLAGS += -lws2_32
endif
$(foreach file,$(libstore_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/sandbox)))
ifeq ($(ENABLE_S3), 1)
libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core -laws-crt-cpp
libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core -laws-crt-cpp
endif
ifdef HOST_SOLARIS
libstore_LDFLAGS += -lsocket
libstore_LDFLAGS += -lsocket
endif
ifeq ($(HAVE_SECCOMP), 1)
libstore_LDFLAGS += $(LIBSECCOMP_LIBS)
libstore_LDFLAGS += $(LIBSECCOMP_LIBS)
endif
# Not just for this library itself, but also for downstream libraries using this library
INCLUDE_libstore := -I $(d) -I $(d)/build
ifdef HOST_UNIX
INCLUDE_libstore += -I $(d)/unix -I $(d)/unix/build
endif
ifdef HOST_LINUX
INCLUDE_libstore += -I $(d)/linux
endif
ifdef HOST_WINDOWS
INCLUDE_libstore += -I $(d)/windows
endif
ifdef HOST_WINDOWS
NIX_ROOT = N:\\\\
else
NIX_ROOT =
endif
# Prefix all but `NIX_STORE_DIR`, since we aren't doing a local store
# yet so a "logical" store dir that is the same as unix is prefered.
#
# Also, it keeps the unit tests working.
libstore_CXXFLAGS += \
-I src/libutil -I src/libstore -I src/libstore/build \
-DNIX_PREFIX=\"$(prefix)\" \
$(INCLUDE_libutil) $(INCLUDE_libstore) $(INCLUDE_libstore) \
-DNIX_PREFIX=\"$(NIX_ROOT)$(prefix)\" \
-DNIX_STORE_DIR=\"$(storedir)\" \
-DNIX_DATA_DIR=\"$(datadir)\" \
-DNIX_STATE_DIR=\"$(localstatedir)/nix\" \
-DNIX_LOG_DIR=\"$(localstatedir)/log/nix\" \
-DNIX_CONF_DIR=\"$(sysconfdir)/nix\" \
-DNIX_BIN_DIR=\"$(bindir)\" \
-DNIX_MAN_DIR=\"$(mandir)\" \
-DLSOF=\"$(lsof)\"
-DNIX_DATA_DIR=\"$(NIX_ROOT)$(datadir)\" \
-DNIX_STATE_DIR=\"$(NIX_ROOT)$(localstatedir)/nix\" \
-DNIX_LOG_DIR=\"$(NIX_ROOT)$(localstatedir)/log/nix\" \
-DNIX_CONF_DIR=\"$(NIX_ROOT)$(sysconfdir)/nix\" \
-DNIX_MAN_DIR=\"$(NIX_ROOT)$(mandir)\" \
-DLSOF=\"$(NIX_ROOT)$(lsof)\"
ifeq ($(embedded_sandbox_shell),yes)
libstore_CXXFLAGS += -DSANDBOX_SHELL=\"__embedded_sandbox_shell__\"
$(d)/build/local-derivation-goal.cc: $(d)/embedded-sandbox-shell.gen.hh
$(d)/unix/build/local-derivation-goal.cc: $(d)/unix/embedded-sandbox-shell.gen.hh
$(d)/embedded-sandbox-shell.gen.hh: $(sandbox_shell)
$(d)/unix/embedded-sandbox-shell.gen.hh: $(sandbox_shell)
$(trace-gen) hexdump -v -e '1/1 "0x%x," "\n"' < $< > $@.tmp
@mv $@.tmp $@
else
ifneq ($(sandbox_shell),)
libstore_CXXFLAGS += -DSANDBOX_SHELL="\"$(sandbox_shell)\""
endif
ifneq ($(sandbox_shell),)
libstore_CXXFLAGS += -DSANDBOX_SHELL="\"$(sandbox_shell)\""
endif
endif
$(d)/local-store.cc: $(d)/schema.sql.gen.hh $(d)/ca-specific-schema.sql.gen.hh
$(d)/build.cc:
%.gen.hh: %
@echo 'R"foo(' >> $@.tmp
$(trace-gen) cat $< >> $@.tmp
@echo ')foo"' >> $@.tmp
@mv $@.tmp $@
$(d)/unix/build.cc:
clean-files += $(d)/schema.sql.gen.hh $(d)/ca-specific-schema.sql.gen.hh
$(eval $(call install-file-in, $(d)/nix-store.pc, $(libdir)/pkgconfig, 0644))
$(eval $(call install-file-in, $(buildprefix)$(d)/nix-store.pc, $(libdir)/pkgconfig, 0644))
$(foreach i, $(wildcard src/libstore/builtins/*.hh), \
$(eval $(call install-file-in, $(i), $(includedir)/nix/builtins, 0644)))

12
src/libstore/log-store.cc Normal file
View file

@ -0,0 +1,12 @@
#include "log-store.hh"
namespace nix {
std::optional<std::string> LogStore::getBuildLog(const StorePath & path) {
auto maybePath = getBuildDerivationPath(path);
if (!maybePath)
return std::nullopt;
return getBuildLogExact(maybePath.value());
}
}

View file

@ -1,4 +1,5 @@
#pragma once
///@file
#include "store-api.hh"
@ -9,9 +10,13 @@ struct LogStore : public virtual Store
{
inline static std::string operationName = "Build log storage and retrieval";
/* Return the build log of the specified store path, if available,
or null otherwise. */
virtual std::optional<std::string> getBuildLog(const StorePath & path) = 0;
/**
* Return the build log of the specified store path, if available,
* or null otherwise.
*/
std::optional<std::string> getBuildLog(const StorePath & path);
virtual std::optional<std::string> getBuildLogExact(const StorePath & path) = 0;
virtual void addBuildLog(const StorePath & path, std::string_view log) = 0;

View file

@ -1,5 +1,4 @@
#include "machines.hh"
#include "util.hh"
#include "globals.hh"
#include "store-api.hh"
@ -7,7 +6,8 @@
namespace nix {
Machine::Machine(decltype(storeUri) storeUri,
Machine::Machine(
const std::string & storeUri,
decltype(systemTypes) systemTypes,
decltype(sshKey) sshKey,
decltype(maxJobs) maxJobs,
@ -15,7 +15,7 @@ Machine::Machine(decltype(storeUri) storeUri,
decltype(supportedFeatures) supportedFeatures,
decltype(mandatoryFeatures) mandatoryFeatures,
decltype(sshPublicHostKey) sshPublicHostKey) :
storeUri(
storeUri(StoreReference::parse(
// Backwards compatibility: if the URI is schemeless, is not a path,
// and is not one of the special store connection words, prepend
// ssh://.
@ -29,15 +29,23 @@ Machine::Machine(decltype(storeUri) storeUri,
|| hasPrefix(storeUri, "local?")
|| hasPrefix(storeUri, "?")
? storeUri
: "ssh://" + storeUri),
: "ssh://" + storeUri)),
systemTypes(systemTypes),
sshKey(sshKey),
maxJobs(maxJobs),
speedFactor(std::max(1U, speedFactor)),
speedFactor(speedFactor == 0.0f ? 1.0f : std::move(speedFactor)),
supportedFeatures(supportedFeatures),
mandatoryFeatures(mandatoryFeatures),
sshPublicHostKey(sshPublicHostKey)
{}
{
if (speedFactor < 0.0)
throw UsageError("speed factor must be >= 0");
}
bool Machine::systemSupported(const std::string & system) const
{
return system == "builtin" || (systemTypes.count(system) > 0);
}
bool Machine::allSupported(const std::set<std::string> & features) const
{
@ -56,23 +64,26 @@ bool Machine::mandatoryMet(const std::set<std::string> & features) const
});
}
ref<Store> Machine::openStore() const
StoreReference Machine::completeStoreReference() const
{
Store::Params storeParams;
if (hasPrefix(storeUri, "ssh://")) {
storeParams["max-connections"] = "1";
storeParams["log-fd"] = "4";
auto storeUri = this->storeUri;
auto * generic = std::get_if<StoreReference::Specified>(&storeUri.variant);
if (generic && generic->scheme == "ssh") {
storeUri.params["max-connections"] = "1";
storeUri.params["log-fd"] = "4";
}
if (hasPrefix(storeUri, "ssh://") || hasPrefix(storeUri, "ssh-ng://")) {
if (generic && (generic->scheme == "ssh" || generic->scheme == "ssh-ng")) {
if (sshKey != "")
storeParams["ssh-key"] = sshKey;
storeUri.params["ssh-key"] = sshKey;
if (sshPublicHostKey != "")
storeParams["base64-ssh-public-host-key"] = sshPublicHostKey;
storeUri.params["base64-ssh-public-host-key"] = sshPublicHostKey;
}
{
auto & fs = storeParams["system-features"];
auto & fs = storeUri.params["system-features"];
auto append = [&](auto feats) {
for (auto & f : feats) {
if (fs.size() > 0) fs += ' ';
@ -83,7 +94,12 @@ ref<Store> Machine::openStore() const
append(mandatoryFeatures);
}
return nix::openStore(storeUri, storeParams);
return storeUri;
}
ref<Store> Machine::openStore() const
{
return nix::openStore(completeStoreReference());
}
static std::vector<std::string> expandBuilderLines(const std::string & builders)
@ -115,7 +131,7 @@ static std::vector<std::string> expandBuilderLines(const std::string & builders)
return result;
}
static Machine parseBuilderLine(const std::string & line)
static Machine parseBuilderLine(const std::set<std::string> & defaultSystems, const std::string & line)
{
const auto tokens = tokenizeString<std::vector<std::string>>(line);
@ -131,6 +147,14 @@ static Machine parseBuilderLine(const std::string & line)
return result.value();
};
auto parseFloatField = [&](size_t fieldIndex) {
const auto result = string2Float<float>(tokens[fieldIndex]);
if (!result) {
throw FormatError("bad machine specification: failed to convert column #%lu in a row: '%s' to 'float'", fieldIndex, line);
}
return result.value();
};
auto ensureBase64 = [&](size_t fieldIndex) {
const auto & str = tokens[fieldIndex];
try {
@ -144,29 +168,46 @@ static Machine parseBuilderLine(const std::string & line)
if (!isSet(0))
throw FormatError("bad machine specification: store URL was not found at the first column of a row: '%s'", line);
// TODO use designated initializers, once C++ supports those with
// custom constructors.
return {
// `storeUri`
tokens[0],
isSet(1) ? tokenizeString<std::vector<std::string>>(tokens[1], ",") : std::vector<std::string>{settings.thisSystem},
// `systemTypes`
isSet(1) ? tokenizeString<std::set<std::string>>(tokens[1], ",") : defaultSystems,
// `sshKey`
isSet(2) ? tokens[2] : "",
// `maxJobs`
isSet(3) ? parseUnsignedIntField(3) : 1U,
isSet(4) ? parseUnsignedIntField(4) : 1U,
// `speedFactor`
isSet(4) ? parseFloatField(4) : 1.0f,
// `supportedFeatures`
isSet(5) ? tokenizeString<std::set<std::string>>(tokens[5], ",") : std::set<std::string>{},
// `mandatoryFeatures`
isSet(6) ? tokenizeString<std::set<std::string>>(tokens[6], ",") : std::set<std::string>{},
// `sshPublicHostKey`
isSet(7) ? ensureBase64(7) : ""
};
}
static Machines parseBuilderLines(const std::vector<std::string> & builders)
static Machines parseBuilderLines(const std::set<std::string> & defaultSystems, const std::vector<std::string> & builders)
{
Machines result;
std::transform(builders.begin(), builders.end(), std::back_inserter(result), parseBuilderLine);
std::transform(
builders.begin(), builders.end(), std::back_inserter(result),
[&](auto && line) { return parseBuilderLine(defaultSystems, line); });
return result;
}
Machines Machine::parseConfig(const std::set<std::string> & defaultSystems, const std::string & s)
{
const auto builderLines = expandBuilderLines(s);
return parseBuilderLines(defaultSystems, builderLines);
}
Machines getMachines()
{
const auto builderLines = expandBuilderLines(settings.builders);
return parseBuilderLines(builderLines);
return Machine::parseConfig({settings.thisSystem}, settings.builders);
}
}

View file

@ -1,28 +1,48 @@
#pragma once
///@file
#include "types.hh"
#include "ref.hh"
#include "store-reference.hh"
namespace nix {
class Store;
struct Machine;
typedef std::vector<Machine> Machines;
struct Machine {
const std::string storeUri;
const std::vector<std::string> systemTypes;
const StoreReference storeUri;
const std::set<std::string> systemTypes;
const std::string sshKey;
const unsigned int maxJobs;
const unsigned int speedFactor;
const float speedFactor;
const std::set<std::string> supportedFeatures;
const std::set<std::string> mandatoryFeatures;
const std::string sshPublicHostKey;
bool enabled = true;
/**
* @return Whether `system` is either `"builtin"` or in
* `systemTypes`.
*/
bool systemSupported(const std::string & system) const;
/**
* @return Whether `features` is a subset of the union of `supportedFeatures` and
* `mandatoryFeatures`
*/
bool allSupported(const std::set<std::string> & features) const;
/**
* @return @Whether `mandatoryFeatures` is a subset of `features`
*/
bool mandatoryMet(const std::set<std::string> & features) const;
Machine(decltype(storeUri) storeUri,
Machine(
const std::string & storeUri,
decltype(systemTypes) systemTypes,
decltype(sshKey) sshKey,
decltype(maxJobs) maxJobs,
@ -31,13 +51,38 @@ struct Machine {
decltype(mandatoryFeatures) mandatoryFeatures,
decltype(sshPublicHostKey) sshPublicHostKey);
/**
* Elaborate `storeUri` into a complete store reference,
* incorporating information from the other fields of the `Machine`
* as applicable.
*/
StoreReference completeStoreReference() const;
/**
* Open a `Store` for this machine.
*
* Just a simple function composition:
* ```c++
* nix::openStore(completeStoreReference())
* ```
*/
ref<Store> openStore() const;
/**
* Parse a machine configuration.
*
* Every machine is specified on its own line, and lines beginning
* with `@` are interpreted as paths to other configuration files in
* the same format.
*/
static Machines parseConfig(const std::set<std::string> & defaultSystems, const std::string & config);
};
typedef std::vector<Machine> Machines;
void parseMachines(const std::string & s, Machines & machines);
/**
* Parse machines from the global config
*
* @todo Remove, globals are bad.
*/
Machines getMachines();
}

View file

@ -27,46 +27,47 @@ std::map<StorePath, StorePath> makeContentAddressed(
StringMap rewrites;
StorePathSet references;
bool hasSelfReference = false;
StoreReferences refs;
for (auto & ref : oldInfo->references) {
if (ref == path)
hasSelfReference = true;
refs.self = true;
else {
auto i = remappings.find(ref);
auto replacement = i != remappings.end() ? i->second : ref;
// FIXME: warn about unremapped paths?
if (replacement != ref)
rewrites.insert_or_assign(srcStore.printStorePath(ref), srcStore.printStorePath(replacement));
references.insert(std::move(replacement));
refs.others.insert(std::move(replacement));
}
}
sink.s = rewriteStrings(sink.s, rewrites);
HashModuloSink hashModuloSink(htSHA256, oldHashPart);
HashModuloSink hashModuloSink(HashAlgorithm::SHA256, oldHashPart);
hashModuloSink(sink.s);
auto narModuloHash = hashModuloSink.finish().first;
auto dstPath = dstStore.makeFixedOutputPath(
FileIngestionMethod::Recursive, narModuloHash, path.name(), references, hasSelfReference);
ValidPathInfo info {
dstStore,
path.name(),
FixedOutputInfo {
.method = FileIngestionMethod::NixArchive,
.hash = narModuloHash,
.references = std::move(refs),
},
Hash::dummy,
};
printInfo("rewriting '%s' to '%s'", pathS, srcStore.printStorePath(dstPath));
printInfo("rewriting '%s' to '%s'", pathS, dstStore.printStorePath(info.path));
StringSink sink2;
RewritingSink rsink2(oldHashPart, std::string(dstPath.hashPart()), sink2);
RewritingSink rsink2(oldHashPart, std::string(info.path.hashPart()), sink2);
rsink2(sink.s);
rsink2.flush();
ValidPathInfo info { dstPath, hashString(htSHA256, sink2.s) };
info.references = std::move(references);
if (hasSelfReference) info.references.insert(info.path);
info.narHash = hashString(HashAlgorithm::SHA256, sink2.s);
info.narSize = sink.s.size();
info.ca = FixedOutputHash {
.method = FileIngestionMethod::Recursive,
.hash = narModuloHash,
};
StringSource source(sink2.s);
dstStore.addToStore(info, source);
@ -77,4 +78,15 @@ std::map<StorePath, StorePath> makeContentAddressed(
return remappings;
}
StorePath makeContentAddressed(
Store & srcStore,
Store & dstStore,
const StorePath & fromPath)
{
auto remappings = makeContentAddressed(srcStore, dstStore, StorePathSet { fromPath });
auto i = remappings.find(fromPath);
assert(i != remappings.end());
return i->second;
}
}

View file

@ -1,12 +1,24 @@
#pragma once
///@file
#include "store-api.hh"
namespace nix {
/** Rewrite a closure of store paths to be completely content addressed.
*/
std::map<StorePath, StorePath> makeContentAddressed(
Store & srcStore,
Store & dstStore,
const StorePathSet & storePaths);
const StorePathSet & rootPaths);
/** Rewrite a closure of a store path to be completely content addressed.
*
* This is a convenience function for the case where you only have one root path.
*/
StorePath makeContentAddressed(
Store & srcStore,
Store & dstStore,
const StorePath & rootPath);
}

424
src/libstore/meson.build Normal file
View file

@ -0,0 +1,424 @@
project('nix-store', 'cpp',
version : files('.version'),
default_options : [
'cpp_std=c++2a',
# TODO(Qyriad): increase the warning level
'warning_level=1',
'debug=true',
'optimization=2',
'errorlogs=true', # Please print logs for tests that fail
],
meson_version : '>= 1.1',
license : 'LGPL-2.1-or-later',
)
cxx = meson.get_compiler('cpp')
subdir('build-utils-meson/deps-lists')
configdata = configuration_data()
# TODO rename, because it will conflict with downstream projects
configdata.set_quoted('PACKAGE_VERSION', meson.project_version())
configdata.set_quoted('SYSTEM', host_machine.cpu_family() + '-' + host_machine.system())
deps_private_maybe_subproject = [
]
deps_public_maybe_subproject = [
dependency('nix-util'),
]
subdir('build-utils-meson/subprojects')
run_command('ln', '-s',
meson.project_build_root() / '__nothing_link_target',
meson.project_build_root() / '__nothing_symlink',
check : true,
)
can_link_symlink = run_command('ln',
meson.project_build_root() / '__nothing_symlink',
meson.project_build_root() / '__nothing_hardlink',
check : false,
).returncode() == 0
run_command('rm', '-f',
meson.project_build_root() / '__nothing_symlink',
meson.project_build_root() / '__nothing_hardlink',
check : true,
)
summary('can hardlink to symlink', can_link_symlink, bool_yn : true)
configdata.set('CAN_LINK_SYMLINK', can_link_symlink.to_int())
# Check for each of these functions, and create a define like `#define HAVE_LCHOWN 1`.
#
# Only need to do functions that deps (like `libnixutil`) didn't already
# check for.
check_funcs = [
# Optionally used for canonicalising files from the build
'lchown',
'statvfs',
]
foreach funcspec : check_funcs
define_name = 'HAVE_' + funcspec.underscorify().to_upper()
define_value = cxx.has_function(funcspec).to_int()
configdata.set(define_name, define_value)
endforeach
has_acl_support = cxx.has_header('sys/xattr.h') \
and cxx.has_function('llistxattr') \
and cxx.has_function('lremovexattr')
configdata.set('HAVE_ACL_SUPPORT', has_acl_support.to_int())
subdir('build-utils-meson/threads')
boost = dependency(
'boost',
modules : ['container'],
include_type: 'system',
)
# boost is a public dependency, but not a pkg-config dependency unfortunately, so we
# put in `deps_other`.
deps_other += boost
curl = dependency('libcurl', 'curl')
deps_private += curl
# seccomp only makes sense on Linux
is_linux = host_machine.system() == 'linux'
seccomp_required = get_option('seccomp-sandboxing')
if not is_linux and seccomp_required.enabled()
warning('Force-enabling seccomp on non-Linux does not make sense')
endif
seccomp = dependency('libseccomp', 'seccomp', required : seccomp_required, version : '>=2.5.5')
if is_linux and not seccomp.found()
warning('Sandbox security is reduced because libseccomp has not been found! Please provide libseccomp if it supports your CPU architecture.')
endif
configdata.set('HAVE_SECCOMP', seccomp.found().to_int())
deps_private += seccomp
nlohmann_json = dependency('nlohmann_json', version : '>= 3.9')
deps_public += nlohmann_json
sqlite = dependency('sqlite3', 'sqlite', version : '>=3.6.19')
deps_private += sqlite
# AWS C++ SDK has bad pkg-config
aws_s3 = dependency('aws-cpp-sdk-s3', required : false)
configdata.set('ENABLE_S3', aws_s3.found().to_int())
if aws_s3.found()
aws_s3 = declare_dependency(
include_directories: include_directories(aws_s3.get_variable('includedir')),
link_args: [
'-L' + aws_s3.get_variable('libdir'),
'-laws-cpp-sdk-transfer',
'-laws-cpp-sdk-s3',
'-laws-cpp-sdk-core',
'-laws-crt-cpp',
],
).as_system('system')
endif
deps_other += aws_s3
subdir('build-utils-meson/generate-header')
generated_headers = []
foreach header : [
'schema.sql',
'ca-specific-schema.sql',
]
generated_headers += gen_header.process(header)
endforeach
busybox = find_program(get_option('sandbox-shell'), required : false)
if get_option('embedded-sandbox-shell')
# This one goes in config.h
# The path to busybox is passed as a -D flag when compiling this_library.
# This solution is inherited from the old make buildsystem
# TODO: do this differently?
configdata.set('HAVE_EMBEDDED_SANDBOX_SHELL', 1)
hexdump = find_program('hexdump', native : true)
embedded_sandbox_shell_gen = custom_target(
'embedded-sandbox-shell.gen.hh',
command : [
hexdump,
'-v',
'-e',
'1/1 "0x%x," "\n"'
],
input : busybox.full_path(),
output : 'embedded-sandbox-shell.gen.hh',
capture : true,
feed : true,
)
generated_headers += embedded_sandbox_shell_gen
endif
config_h = configure_file(
configuration : configdata,
output : 'config-store.hh',
)
add_project_arguments(
# TODO(Qyriad): Yes this is how the autoconf+Make system did it.
# It would be nice for our headers to be idempotent instead.
'-include', 'config-util.hh',
'-include', 'config-store.hh',
language : 'cpp',
)
subdir('build-utils-meson/diagnostics')
sources = files(
'binary-cache-store.cc',
'build-result.cc',
'build/derivation-goal.cc',
'build/drv-output-substitution-goal.cc',
'build/entry-points.cc',
'build/goal.cc',
'build/substitution-goal.cc',
'build/worker.cc',
'builtins/buildenv.cc',
'builtins/fetchurl.cc',
'builtins/unpack-channel.cc',
'common-protocol.cc',
'common-ssh-store-config.cc',
'content-address.cc',
'daemon.cc',
'derivations.cc',
'derived-path-map.cc',
'derived-path.cc',
'downstream-placeholder.cc',
'dummy-store.cc',
'export-import.cc',
'filetransfer.cc',
'gc.cc',
'globals.cc',
'http-binary-cache-store.cc',
'indirect-root-store.cc',
'keys.cc',
'legacy-ssh-store.cc',
'local-binary-cache-store.cc',
'local-fs-store.cc',
'local-overlay-store.cc',
'local-store.cc',
'log-store.cc',
'machines.cc',
'make-content-addressed.cc',
'misc.cc',
'names.cc',
'nar-accessor.cc',
'nar-info-disk-cache.cc',
'nar-info.cc',
'optimise-store.cc',
'outputs-spec.cc',
'parsed-derivations.cc',
'path-info.cc',
'path-references.cc',
'path-with-outputs.cc',
'path.cc',
'pathlocks.cc',
'posix-fs-canonicalise.cc',
'profiles.cc',
'realisation.cc',
'remote-fs-accessor.cc',
'remote-store.cc',
's3-binary-cache-store.cc',
'serve-protocol-connection.cc',
'serve-protocol.cc',
'sqlite.cc',
'ssh-store.cc',
'ssh.cc',
'store-api.cc',
'store-reference.cc',
'uds-remote-store.cc',
'worker-protocol-connection.cc',
'worker-protocol.cc',
)
include_dirs = [
include_directories('.'),
include_directories('build'),
]
headers = [config_h] + files(
'binary-cache-store.hh',
'build-result.hh',
'build/derivation-goal.hh',
'build/drv-output-substitution-goal.hh',
'build/goal.hh',
'build/substitution-goal.hh',
'build/worker.hh',
'builtins.hh',
'builtins/buildenv.hh',
'common-protocol-impl.hh',
'common-protocol.hh',
'common-ssh-store-config.hh',
'content-address.hh',
'daemon.hh',
'derivations.hh',
'derived-path-map.hh',
'derived-path.hh',
'downstream-placeholder.hh',
'filetransfer.hh',
'gc-store.hh',
'globals.hh',
'http-binary-cache-store.hh',
'indirect-root-store.hh',
'keys.hh',
'legacy-ssh-store.hh',
'length-prefixed-protocol-helper.hh',
'local-binary-cache-store.hh',
'local-fs-store.hh',
'local-overlay-store.hh',
'local-store.hh',
'log-store.hh',
'machines.hh',
'make-content-addressed.hh',
'names.hh',
'nar-accessor.hh',
'nar-info-disk-cache.hh',
'nar-info.hh',
'outputs-spec.hh',
'parsed-derivations.hh',
'path-info.hh',
'path-references.hh',
'path-regex.hh',
'path-with-outputs.hh',
'path.hh',
'pathlocks.hh',
'posix-fs-canonicalise.hh',
'profiles.hh',
'realisation.hh',
'remote-fs-accessor.hh',
'remote-store-connection.hh',
'remote-store.hh',
's3-binary-cache-store.hh',
's3.hh',
'ssh-store.hh',
'serve-protocol-connection.hh',
'serve-protocol-impl.hh',
'serve-protocol.hh',
'sqlite.hh',
'ssh.hh',
'store-api.hh',
'store-cast.hh',
'store-dir-config.hh',
'store-reference.hh',
'uds-remote-store.hh',
'worker-protocol-connection.hh',
'worker-protocol-impl.hh',
'worker-protocol.hh',
)
if host_machine.system() == 'linux'
subdir('linux')
endif
if host_machine.system() == 'windows'
subdir('windows')
else
subdir('unix')
endif
fs = import('fs')
prefix = get_option('prefix')
# For each of these paths, assume that it is relative to the prefix unless
# it is already an absolute path (which is the default for store-dir, state-dir, and log-dir).
path_opts = [
# Meson built-ins.
'datadir',
'mandir',
'libdir',
'includedir',
'libexecdir',
# Homecooked Nix directories.
'store-dir',
'state-dir',
'log-dir',
]
# For your grepping pleasure, this loop sets the following variables that aren't mentioned
# literally above:
# store_dir
# state_dir
# log_dir
# profile_dir
foreach optname : path_opts
varname = optname.replace('-', '_')
path = get_option(optname)
if fs.is_absolute(path)
set_variable(varname, path)
else
set_variable(varname, prefix / path)
endif
endforeach
# sysconfdir doesn't get anything installed to directly, and is only used to
# tell Nix where to look for nix.conf, so it doesn't get appended to prefix.
sysconfdir = get_option('sysconfdir')
if not fs.is_absolute(sysconfdir)
sysconfdir = '/' / sysconfdir
endif
lsof = find_program('lsof', required : false)
# Aside from prefix itself, each of these was made into an absolute path
# by joining it with prefix, unless it was already an absolute path
# (which is the default for store-dir, state-dir, and log-dir).
cpp_str_defines = {
'NIX_PREFIX': prefix,
'NIX_STORE_DIR': store_dir,
'NIX_DATA_DIR': datadir,
'NIX_STATE_DIR': state_dir / 'nix',
'NIX_LOG_DIR': log_dir,
'NIX_CONF_DIR': sysconfdir / 'nix',
'NIX_MAN_DIR': mandir,
}
if lsof.found()
lsof_path = lsof.full_path()
else
# Just look up on the PATH
lsof_path = 'lsof'
endif
cpp_str_defines += {
'LSOF': lsof_path
}
if get_option('embedded-sandbox-shell')
cpp_str_defines += {
'SANDBOX_SHELL': '__embedded_sandbox_shell__'
}
elif busybox.found()
cpp_str_defines += {
'SANDBOX_SHELL': busybox.full_path()
}
endif
cpp_args = []
foreach name, value : cpp_str_defines
cpp_args += [
'-D' + name + '=' + '"' + value + '"'
]
endforeach
subdir('build-utils-meson/export-all-symbols')
this_library = library(
'nixstore',
generated_headers,
sources,
dependencies : deps_public + deps_private + deps_other,
include_directories : include_dirs,
cpp_args : cpp_args,
link_args: linker_export_flags,
prelink : true, # For C++ static initializers
install : true,
)
install_headers(headers, subdir : 'nix', preserve_path : true)
libraries_private = []
subdir('build-utils-meson/export')

View file

@ -0,0 +1,25 @@
# vim: filetype=meson
option('embedded-sandbox-shell', type : 'boolean', value : false,
description : 'include the sandbox shell in the Nix binary',
)
option('seccomp-sandboxing', type : 'feature',
description : 'build support for seccomp sandboxing (recommended unless your arch doesn\'t support libseccomp, only relevant on Linux)',
)
option('sandbox-shell', type : 'string', value : 'busybox',
description : 'path to a statically-linked shell to use as /bin/sh in sandboxes (usually busybox)',
)
option('store-dir', type : 'string', value : '/nix/store',
description : 'path of the Nix store',
)
option('state-dir', type : 'string', value : '/nix/var',
description : 'path to store state in for Nix',
)
option('log-dir', type : 'string', value : '/nix/var/log/nix',
description : 'path to store logs in for Nix',
)

View file

@ -1,13 +1,16 @@
#include <unordered_set>
#include "derivations.hh"
#include "parsed-derivations.hh"
#include "globals.hh"
#include "local-store.hh"
#include "store-api.hh"
#include "thread-pool.hh"
#include "realisation.hh"
#include "topo-sort.hh"
#include "callback.hh"
#include "closure.hh"
#include "filetransfer.hh"
#include "strings.hh"
namespace nix {
@ -83,14 +86,15 @@ void Store::computeFSClosure(const StorePath & startPath,
}
std::optional<ContentAddress> getDerivationCA(const BasicDerivation & drv)
const ContentAddress * getDerivationCA(const BasicDerivation & drv)
{
auto out = drv.outputs.find("out");
if (out != drv.outputs.end()) {
if (const auto * v = std::get_if<DerivationOutput::CAFixed>(&out->second.raw()))
return v->hash;
if (out == drv.outputs.end())
return nullptr;
if (auto dof = std::get_if<DerivationOutput::CAFixed>(&out->second.raw)) {
return &dof->ca;
}
return std::nullopt;
return nullptr;
}
void Store::queryMissing(const std::vector<DerivedPath> & targets,
@ -124,14 +128,26 @@ void Store::queryMissing(const std::vector<DerivedPath> & targets,
std::function<void(DerivedPath)> doPath;
std::function<void(ref<SingleDerivedPath>, const DerivedPathMap<StringSet>::ChildNode &)> enqueueDerivedPaths;
enqueueDerivedPaths = [&](ref<SingleDerivedPath> inputDrv, const DerivedPathMap<StringSet>::ChildNode & inputNode) {
if (!inputNode.value.empty())
pool.enqueue(std::bind(doPath, DerivedPath::Built { inputDrv, inputNode.value }));
for (const auto & [outputName, childNode] : inputNode.childMap)
enqueueDerivedPaths(
make_ref<SingleDerivedPath>(SingleDerivedPath::Built { inputDrv, outputName }),
childNode);
};
auto mustBuildDrv = [&](const StorePath & drvPath, const Derivation & drv) {
{
auto state(state_.lock());
state->willBuild.insert(drvPath);
}
for (auto & i : drv.inputDrvs)
pool.enqueue(std::bind(doPath, DerivedPath::Built { i.first, i.second }));
for (const auto & [inputDrv, inputNode] : drv.inputDrvs.map) {
enqueueDerivedPaths(makeConstantStorePathRef(inputDrv), inputNode);
}
};
auto checkOutput = [&](
@ -140,7 +156,13 @@ void Store::queryMissing(const std::vector<DerivedPath> & targets,
if (drvState_->lock()->done) return;
SubstitutablePathInfos infos;
querySubstitutablePathInfos({{outPath, getDerivationCA(*drv)}}, infos);
auto * cap = getDerivationCA(*drv);
querySubstitutablePathInfos({
{
outPath,
cap ? std::optional { *cap } : std::nullopt,
},
}, infos);
if (infos.empty()) {
drvState_->lock()->done = true;
@ -169,10 +191,18 @@ void Store::queryMissing(const std::vector<DerivedPath> & targets,
std::visit(overloaded {
[&](const DerivedPath::Built & bfd) {
if (!isValidPath(bfd.drvPath)) {
auto drvPathP = std::get_if<DerivedPath::Opaque>(&*bfd.drvPath);
if (!drvPathP) {
// TODO make work in this case.
warn("Ignoring dynamic derivation %s while querying missing paths; not yet implemented", bfd.drvPath->to_string(*this));
return;
}
auto & drvPath = drvPathP->path;
if (!isValidPath(drvPath)) {
// FIXME: we could try to substitute the derivation.
auto state(state_.lock());
state->unknown.insert(bfd.drvPath);
state->unknown.insert(drvPath);
return;
}
@ -180,25 +210,55 @@ void Store::queryMissing(const std::vector<DerivedPath> & targets,
/* true for regular derivations, and CA derivations for which we
have a trust mapping for all wanted outputs. */
auto knownOutputPaths = true;
for (auto & [outputName, pathOpt] : queryPartialDerivationOutputMap(bfd.drvPath)) {
for (auto & [outputName, pathOpt] : queryPartialDerivationOutputMap(drvPath)) {
if (!pathOpt) {
knownOutputPaths = false;
break;
}
if (wantOutput(outputName, bfd.outputs) && !isValidPath(*pathOpt))
if (bfd.outputs.contains(outputName) && !isValidPath(*pathOpt))
invalid.insert(*pathOpt);
}
if (knownOutputPaths && invalid.empty()) return;
auto drv = make_ref<Derivation>(derivationFromPath(bfd.drvPath));
ParsedDerivation parsedDrv(StorePath(bfd.drvPath), *drv);
auto drv = make_ref<Derivation>(derivationFromPath(drvPath));
ParsedDerivation parsedDrv(StorePath(drvPath), *drv);
if (!knownOutputPaths && settings.useSubstitutes && parsedDrv.substitutesAllowed()) {
experimentalFeatureSettings.require(Xp::CaDerivations);
// If there are unknown output paths, attempt to find if the
// paths are known to substituters through a realisation.
auto outputHashes = staticOutputHashes(*this, *drv);
knownOutputPaths = true;
for (auto [outputName, hash] : outputHashes) {
if (!bfd.outputs.contains(outputName))
continue;
bool found = false;
for (auto &sub : getDefaultSubstituters()) {
auto realisation = sub->queryRealisation({hash, outputName});
if (!realisation)
continue;
found = true;
if (!isValidPath(realisation->outPath))
invalid.insert(realisation->outPath);
break;
}
if (!found) {
// Some paths did not have a realisation, this must be built.
knownOutputPaths = false;
break;
}
}
}
if (knownOutputPaths && settings.useSubstitutes && parsedDrv.substitutesAllowed()) {
auto drvState = make_ref<Sync<DrvState>>(DrvState(invalid.size()));
for (auto & output : invalid)
pool.enqueue(std::bind(checkOutput, bfd.drvPath, drv, output, drvState));
pool.enqueue(std::bind(checkOutput, drvPath, drv, output, drvState));
} else
mustBuildDrv(bfd.drvPath, *drv);
mustBuildDrv(drvPath, *drv);
},
[&](const DerivedPath::Opaque & bo) {
@ -273,32 +333,140 @@ std::map<DrvOutput, StorePath> drvOutputReferences(
std::map<DrvOutput, StorePath> drvOutputReferences(
Store & store,
const Derivation & drv,
const StorePath & outputPath)
const StorePath & outputPath,
Store * evalStore_)
{
auto & evalStore = evalStore_ ? *evalStore_ : store;
std::set<Realisation> inputRealisations;
for (const auto & [inputDrv, outputNames] : drv.inputDrvs) {
const auto outputHashes =
staticOutputHashes(store, store.readDerivation(inputDrv));
for (const auto & outputName : outputNames) {
auto outputHash = get(outputHashes, outputName);
if (!outputHash)
throw Error(
"output '%s' of derivation '%s' isn't realised", outputName,
store.printStorePath(inputDrv));
auto thisRealisation = store.queryRealisation(
DrvOutput{*outputHash, outputName});
if (!thisRealisation)
throw Error(
"output '%s' of derivation '%s' isn't built", outputName,
store.printStorePath(inputDrv));
inputRealisations.insert(*thisRealisation);
std::function<void(const StorePath &, const DerivedPathMap<StringSet>::ChildNode &)> accumRealisations;
accumRealisations = [&](const StorePath & inputDrv, const DerivedPathMap<StringSet>::ChildNode & inputNode) {
if (!inputNode.value.empty()) {
auto outputHashes =
staticOutputHashes(evalStore, evalStore.readDerivation(inputDrv));
for (const auto & outputName : inputNode.value) {
auto outputHash = get(outputHashes, outputName);
if (!outputHash)
throw Error(
"output '%s' of derivation '%s' isn't realised", outputName,
store.printStorePath(inputDrv));
auto thisRealisation = store.queryRealisation(
DrvOutput{*outputHash, outputName});
if (!thisRealisation)
throw Error(
"output '%s' of derivation '%s' isnt built", outputName,
store.printStorePath(inputDrv));
inputRealisations.insert(*thisRealisation);
}
}
}
if (!inputNode.value.empty()) {
auto d = makeConstantStorePathRef(inputDrv);
for (const auto & [outputName, childNode] : inputNode.childMap) {
SingleDerivedPath next = SingleDerivedPath::Built { d, outputName };
accumRealisations(
// TODO deep resolutions for dynamic derivations, issue #8947, would go here.
resolveDerivedPath(store, next, evalStore_),
childNode);
}
}
};
for (const auto & [inputDrv, inputNode] : drv.inputDrvs.map)
accumRealisations(inputDrv, inputNode);
auto info = store.queryPathInfo(outputPath);
return drvOutputReferences(Realisation::closure(store, inputRealisations), info->references);
}
OutputPathMap resolveDerivedPath(Store & store, const DerivedPath::Built & bfd, Store * evalStore_)
{
auto drvPath = resolveDerivedPath(store, *bfd.drvPath, evalStore_);
auto outputsOpt_ = store.queryPartialDerivationOutputMap(drvPath, evalStore_);
auto outputsOpt = std::visit(overloaded {
[&](const OutputsSpec::All &) {
// Keep all outputs
return std::move(outputsOpt_);
},
[&](const OutputsSpec::Names & names) {
// Get just those mentioned by name
std::map<std::string, std::optional<StorePath>> outputsOpt;
for (auto & output : names) {
auto * pOutputPathOpt = get(outputsOpt_, output);
if (!pOutputPathOpt)
throw Error(
"the derivation '%s' doesn't have an output named '%s'",
bfd.drvPath->to_string(store), output);
outputsOpt.insert_or_assign(output, std::move(*pOutputPathOpt));
}
return outputsOpt;
},
}, bfd.outputs.raw);
OutputPathMap outputs;
for (auto & [outputName, outputPathOpt] : outputsOpt) {
if (!outputPathOpt)
throw MissingRealisation(bfd.drvPath->to_string(store), outputName);
auto & outputPath = *outputPathOpt;
outputs.insert_or_assign(outputName, outputPath);
}
return outputs;
}
StorePath resolveDerivedPath(Store & store, const SingleDerivedPath & req, Store * evalStore_)
{
auto & evalStore = evalStore_ ? *evalStore_ : store;
return std::visit(overloaded {
[&](const SingleDerivedPath::Opaque & bo) {
return bo.path;
},
[&](const SingleDerivedPath::Built & bfd) {
auto drvPath = resolveDerivedPath(store, *bfd.drvPath, evalStore_);
auto outputPaths = evalStore.queryPartialDerivationOutputMap(drvPath, evalStore_);
if (outputPaths.count(bfd.output) == 0)
throw Error("derivation '%s' does not have an output named '%s'",
store.printStorePath(drvPath), bfd.output);
auto & optPath = outputPaths.at(bfd.output);
if (!optPath)
throw MissingRealisation(bfd.drvPath->to_string(store), bfd.output);
return *optPath;
},
}, req.raw());
}
OutputPathMap resolveDerivedPath(Store & store, const DerivedPath::Built & bfd)
{
auto drvPath = resolveDerivedPath(store, *bfd.drvPath);
auto outputMap = store.queryDerivationOutputMap(drvPath);
auto outputsLeft = std::visit(overloaded {
[&](const OutputsSpec::All &) {
return StringSet {};
},
[&](const OutputsSpec::Names & names) {
return static_cast<StringSet>(names);
},
}, bfd.outputs.raw);
for (auto iter = outputMap.begin(); iter != outputMap.end();) {
auto & outputName = iter->first;
if (bfd.outputs.contains(outputName)) {
outputsLeft.erase(outputName);
++iter;
} else {
iter = outputMap.erase(iter);
}
}
if (!outputsLeft.empty())
throw Error("derivation '%s' does not have an outputs %s",
store.printStorePath(drvPath),
concatStringsSep(", ", quoteStrings(std::get<OutputsSpec::Names>(bfd.outputs.raw))));
return outputMap;
}
}

View file

@ -0,0 +1,18 @@
R"(
**Store URL format**: `mounted-ssh-ng://[username@]hostname`
Experimental store type that allows full access to a Nix store on a remote machine,
and additionally requires that store be mounted in the local file system.
The mounting of that store is not managed by Nix, and must by managed manually.
It could be accomplished with SSHFS or NFS, for example.
The local file system is used to optimize certain operations.
For example, rather than serializing Nix archives and sending over the Nix channel,
we can directly access the file system data via the mount-point.
The local file system is also used to make certain operations possible that wouldn't otherwise be.
For example, persistent GC roots can be created if they reside on the same file system as the remote store:
the remote side will create the symlinks necessary to avoid race conditions.
)"

View file

@ -94,7 +94,7 @@ static bool componentsLT(const std::string_view c1, const std::string_view c2)
}
int compareVersions(const std::string_view v1, const std::string_view v2)
std::strong_ordering compareVersions(const std::string_view v1, const std::string_view v2)
{
auto p1 = v1.begin();
auto p2 = v2.begin();
@ -102,11 +102,11 @@ int compareVersions(const std::string_view v1, const std::string_view v2)
while (p1 != v1.end() || p2 != v2.end()) {
auto c1 = nextComponent(p1, v1.end());
auto c2 = nextComponent(p2, v2.end());
if (componentsLT(c1, c2)) return -1;
else if (componentsLT(c2, c1)) return 1;
if (componentsLT(c1, c2)) return std::strong_ordering::less;
else if (componentsLT(c2, c1)) return std::strong_ordering::greater;
}
return 0;
return std::strong_ordering::equal;
}

View file

@ -1,4 +1,5 @@
#pragma once
///@file
#include <memory>
@ -29,7 +30,7 @@ typedef std::list<DrvName> DrvNames;
std::string_view nextComponent(std::string_view::const_iterator & p,
const std::string_view::const_iterator end);
int compareVersions(const std::string_view v1, const std::string_view v2);
std::strong_ordering compareVersions(const std::string_view v1, const std::string_view v2);
DrvNames drvNamesFromArgs(const Strings & opArgs);
}

View file

@ -3,7 +3,6 @@
#include <map>
#include <stack>
#include <algorithm>
#include <nlohmann/json.hpp>
@ -11,13 +10,7 @@ namespace nix {
struct NarMember
{
FSAccessor::Type type = FSAccessor::Type::tMissing;
bool isExecutable = false;
/* If this is a regular file, position of the contents of this
file in the NAR. */
uint64_t start = 0, size = 0;
SourceAccessor::Stat stat;
std::string target;
@ -25,7 +18,36 @@ struct NarMember
std::map<std::string, NarMember> children;
};
struct NarAccessor : public FSAccessor
struct NarMemberConstructor : CreateRegularFileSink
{
private:
NarMember & narMember;
uint64_t & pos;
public:
NarMemberConstructor(NarMember & nm, uint64_t & pos)
: narMember(nm), pos(pos)
{ }
void isExecutable() override
{
narMember.stat.isExecutable = true;
}
void preallocateContents(uint64_t size) override
{
narMember.stat.fileSize = size;
narMember.stat.narOffset = pos;
}
void operator () (std::string_view data) override
{ }
};
struct NarAccessor : public SourceAccessor
{
std::optional<const std::string> nar;
@ -33,7 +55,7 @@ struct NarAccessor : public FSAccessor
NarMember root;
struct NarIndexer : ParseSink, Source
struct NarIndexer : FileSystemObjectSink, Source
{
NarAccessor & acc;
Source & source;
@ -48,54 +70,58 @@ struct NarAccessor : public FSAccessor
: acc(acc), source(source)
{ }
void createMember(const Path & path, NarMember member)
NarMember & createMember(const CanonPath & path, NarMember member)
{
size_t level = std::count(path.begin(), path.end(), '/');
size_t level = 0;
for (auto _ : path) {
(void)_;
++level;
}
while (parents.size() > level) parents.pop();
if (parents.empty()) {
acc.root = std::move(member);
parents.push(&acc.root);
return acc.root;
} else {
if (parents.top()->type != FSAccessor::Type::tDirectory)
if (parents.top()->stat.type != Type::tDirectory)
throw Error("NAR file missing parent directory of path '%s'", path);
auto result = parents.top()->children.emplace(baseNameOf(path), std::move(member));
parents.push(&result.first->second);
auto result = parents.top()->children.emplace(*path.baseName(), std::move(member));
auto & ref = result.first->second;
parents.push(&ref);
return ref;
}
}
void createDirectory(const Path & path) override
void createDirectory(const CanonPath & path) override
{
createMember(path, {FSAccessor::Type::tDirectory, false, 0, 0});
createMember(path, NarMember{ .stat = {
.type = Type::tDirectory,
.fileSize = 0,
.isExecutable = false,
.narOffset = 0
} });
}
void createRegularFile(const Path & path) override
void createRegularFile(const CanonPath & path, std::function<void(CreateRegularFileSink &)> func) override
{
createMember(path, {FSAccessor::Type::tRegular, false, 0, 0});
auto & nm = createMember(path, NarMember{ .stat = {
.type = Type::tRegular,
.fileSize = 0,
.isExecutable = false,
.narOffset = 0
} });
NarMemberConstructor nmc { nm, pos };
func(nmc);
}
void closeRegularFile() override
{ }
void isExecutable() override
{
parents.top()->isExecutable = true;
}
void preallocateContents(uint64_t size) override
{
assert(size <= std::numeric_limits<uint64_t>::max());
parents.top()->size = (uint64_t) size;
parents.top()->start = pos;
}
void receiveContents(std::string_view data) override
{ }
void createSymlink(const Path & path, const std::string & target) override
void createSymlink(const CanonPath & path, const std::string & target) override
{
createMember(path,
NarMember{FSAccessor::Type::tSymlink, false, 0, 0, target});
NarMember{
.stat = {.type = Type::tSymlink},
.target = target});
}
size_t read(char * data, size_t len) override
@ -130,18 +156,19 @@ struct NarAccessor : public FSAccessor
std::string type = v["type"];
if (type == "directory") {
member.type = FSAccessor::Type::tDirectory;
for (auto i = v["entries"].begin(); i != v["entries"].end(); ++i) {
std::string name = i.key();
recurse(member.children[name], i.value());
member.stat = {.type = Type::tDirectory};
for (const auto &[name, function] : v["entries"].items()) {
recurse(member.children[name], function);
}
} else if (type == "regular") {
member.type = FSAccessor::Type::tRegular;
member.size = v["size"];
member.isExecutable = v.value("executable", false);
member.start = v["narOffset"];
member.stat = {
.type = Type::tRegular,
.fileSize = v["size"],
.isExecutable = v.value("executable", false),
.narOffset = v["narOffset"]
};
} else if (type == "symlink") {
member.type = FSAccessor::Type::tSymlink;
member.stat = {.type = Type::tSymlink};
member.target = v.value("target", "");
} else return;
};
@ -150,133 +177,122 @@ struct NarAccessor : public FSAccessor
recurse(root, v);
}
NarMember * find(const Path & path)
NarMember * find(const CanonPath & path)
{
Path canon = path == "" ? "" : canonPath(path);
NarMember * current = &root;
auto end = path.end();
for (auto it = path.begin(); it != end; ) {
// because it != end, the remaining component is non-empty so we need
// a directory
if (current->type != FSAccessor::Type::tDirectory) return nullptr;
// skip slash (canonPath above ensures that this is always a slash)
assert(*it == '/');
it += 1;
// lookup current component
auto next = std::find(it, end, '/');
auto child = current->children.find(std::string(it, next));
for (const auto & i : path) {
if (current->stat.type != Type::tDirectory) return nullptr;
auto child = current->children.find(std::string(i));
if (child == current->children.end()) return nullptr;
current = &child->second;
it = next;
}
return current;
}
NarMember & get(const Path & path) {
NarMember & get(const CanonPath & path) {
auto result = find(path);
if (result == nullptr)
if (!result)
throw Error("NAR file does not contain path '%1%'", path);
return *result;
}
Stat stat(const Path & path) override
std::optional<Stat> maybeLstat(const CanonPath & path) override
{
auto i = find(path);
if (i == nullptr)
return {FSAccessor::Type::tMissing, 0, false};
return {i->type, i->size, i->isExecutable, i->start};
if (!i)
return std::nullopt;
return i->stat;
}
StringSet readDirectory(const Path & path) override
DirEntries readDirectory(const CanonPath & path) override
{
auto i = get(path);
if (i.type != FSAccessor::Type::tDirectory)
if (i.stat.type != Type::tDirectory)
throw Error("path '%1%' inside NAR file is not a directory", path);
StringSet res;
for (auto & child : i.children)
res.insert(child.first);
DirEntries res;
for (const auto & child : i.children)
res.insert_or_assign(child.first, std::nullopt);
return res;
}
std::string readFile(const Path & path, bool requireValidPath = true) override
std::string readFile(const CanonPath & path) override
{
auto i = get(path);
if (i.type != FSAccessor::Type::tRegular)
if (i.stat.type != Type::tRegular)
throw Error("path '%1%' inside NAR file is not a regular file", path);
if (getNarBytes) return getNarBytes(i.start, i.size);
if (getNarBytes) return getNarBytes(*i.stat.narOffset, *i.stat.fileSize);
assert(nar);
return std::string(*nar, i.start, i.size);
return std::string(*nar, *i.stat.narOffset, *i.stat.fileSize);
}
std::string readLink(const Path & path) override
std::string readLink(const CanonPath & path) override
{
auto i = get(path);
if (i.type != FSAccessor::Type::tSymlink)
if (i.stat.type != Type::tSymlink)
throw Error("path '%1%' inside NAR file is not a symlink", path);
return i.target;
}
};
ref<FSAccessor> makeNarAccessor(std::string && nar)
ref<SourceAccessor> makeNarAccessor(std::string && nar)
{
return make_ref<NarAccessor>(std::move(nar));
}
ref<FSAccessor> makeNarAccessor(Source & source)
ref<SourceAccessor> makeNarAccessor(Source & source)
{
return make_ref<NarAccessor>(source);
}
ref<FSAccessor> makeLazyNarAccessor(const std::string & listing,
ref<SourceAccessor> makeLazyNarAccessor(const std::string & listing,
GetNarBytes getNarBytes)
{
return make_ref<NarAccessor>(listing, getNarBytes);
}
using nlohmann::json;
json listNar(ref<FSAccessor> accessor, const Path & path, bool recurse)
json listNar(ref<SourceAccessor> accessor, const CanonPath & path, bool recurse)
{
auto st = accessor->stat(path);
auto st = accessor->lstat(path);
json obj = json::object();
switch (st.type) {
case FSAccessor::Type::tRegular:
case SourceAccessor::Type::tRegular:
obj["type"] = "regular";
obj["size"] = st.fileSize;
if (st.fileSize)
obj["size"] = *st.fileSize;
if (st.isExecutable)
obj["executable"] = true;
if (st.narOffset)
obj["narOffset"] = st.narOffset;
if (st.narOffset && *st.narOffset)
obj["narOffset"] = *st.narOffset;
break;
case FSAccessor::Type::tDirectory:
case SourceAccessor::Type::tDirectory:
obj["type"] = "directory";
{
obj["entries"] = json::object();
json &res2 = obj["entries"];
for (auto & name : accessor->readDirectory(path)) {
for (const auto & [name, type] : accessor->readDirectory(path)) {
if (recurse) {
res2[name] = listNar(accessor, path + "/" + name, true);
res2[name] = listNar(accessor, path / name, true);
} else
res2[name] = json::object();
}
}
break;
case FSAccessor::Type::tSymlink:
case SourceAccessor::Type::tSymlink:
obj["type"] = "symlink";
obj["target"] = accessor->readLink(path);
break;
default:
throw Error("path '%s' does not exist in NAR", path);
case SourceAccessor::Type::tMisc:
assert(false); // cannot happen for NARs
}
return obj;
}

View file

@ -1,32 +1,40 @@
#pragma once
///@file
#include "source-accessor.hh"
#include <functional>
#include <nlohmann/json_fwd.hpp>
#include "fs-accessor.hh"
namespace nix {
struct Source;
/* Return an object that provides access to the contents of a NAR
file. */
ref<FSAccessor> makeNarAccessor(std::string && nar);
/**
* Return an object that provides access to the contents of a NAR
* file.
*/
ref<SourceAccessor> makeNarAccessor(std::string && nar);
ref<FSAccessor> makeNarAccessor(Source & source);
ref<SourceAccessor> makeNarAccessor(Source & source);
/* Create a NAR accessor from a NAR listing (in the format produced by
listNar()). The callback getNarBytes(offset, length) is used by the
readFile() method of the accessor to get the contents of files
inside the NAR. */
typedef std::function<std::string(uint64_t, uint64_t)> GetNarBytes;
/**
* Create a NAR accessor from a NAR listing (in the format produced by
* listNar()). The callback getNarBytes(offset, length) is used by the
* readFile() method of the accessor to get the contents of files
* inside the NAR.
*/
using GetNarBytes = std::function<std::string(uint64_t, uint64_t)>;
ref<FSAccessor> makeLazyNarAccessor(
ref<SourceAccessor> makeLazyNarAccessor(
const std::string & listing,
GetNarBytes getNarBytes);
/* Write a JSON representation of the contents of a NAR (except file
contents). */
nlohmann::json listNar(ref<FSAccessor> accessor, const Path & path, bool recurse);
/**
* Write a JSON representation of the contents of a NAR (except file
* contents).
*/
nlohmann::json listNar(ref<SourceAccessor> accessor, const CanonPath & path, bool recurse);
}

View file

@ -1,4 +1,5 @@
#include "nar-info-disk-cache.hh"
#include "users.hh"
#include "sync.hh"
#include "sqlite.hh"
#include "globals.hh"
@ -6,6 +7,8 @@
#include <sqlite3.h>
#include <nlohmann/json.hpp>
#include "strings.hh"
namespace nix {
static const char * schema = R"sql(
@ -84,11 +87,10 @@ public:
Sync<State> _state;
NarInfoDiskCacheImpl()
NarInfoDiskCacheImpl(Path dbPath = getCacheDir() + "/nix/binary-cache-v6.sqlite")
{
auto state(_state.lock());
Path dbPath = getCacheDir() + "/nix/binary-cache-v6.sqlite";
createDirs(dirOf(dbPath));
state->db = SQLite(dbPath);
@ -98,7 +100,7 @@ public:
state->db.exec(schema);
state->insertCache.create(state->db,
"insert or replace into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?, ?, ?, ?, ?)");
"insert into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?1, ?2, ?3, ?4, ?5) on conflict (url) do update set timestamp = ?2, storeDir = ?3, wantMassQuery = ?4, priority = ?5 returning id;");
state->queryCache.create(state->db,
"select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ? and timestamp > ?");
@ -162,42 +164,75 @@ public:
Cache & getCache(State & state, const std::string & uri)
{
auto i = state.caches.find(uri);
if (i == state.caches.end()) abort();
if (i == state.caches.end()) unreachable();
return i->second;
}
void createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
private:
std::optional<Cache> queryCacheRaw(State & state, const std::string & uri)
{
retrySQLite<void>([&]() {
auto i = state.caches.find(uri);
if (i == state.caches.end()) {
auto queryCache(state.queryCache.use()(uri)(time(0) - cacheInfoTtl));
if (!queryCache.next())
return std::nullopt;
auto cache = Cache {
.id = (int) queryCache.getInt(0),
.storeDir = queryCache.getStr(1),
.wantMassQuery = queryCache.getInt(2) != 0,
.priority = (int) queryCache.getInt(3),
};
state.caches.emplace(uri, cache);
}
return getCache(state, uri);
}
public:
int createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
{
return retrySQLite<int>([&]() {
auto state(_state.lock());
SQLiteTxn txn(state->db);
// FIXME: race
// To avoid the race, we have to check if maybe someone hasn't yet created
// the cache for this URI in the meantime.
auto cache(queryCacheRaw(*state, uri));
state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec();
assert(sqlite3_changes(state->db) == 1);
state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority};
if (cache)
return cache->id;
Cache ret {
.id = -1, // set below
.storeDir = storeDir,
.wantMassQuery = wantMassQuery,
.priority = priority,
};
{
auto r(state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority));
if (!r.next()) { unreachable(); }
ret.id = (int) r.getInt(0);
}
state->caches[uri] = ret;
txn.commit();
return ret.id;
});
}
std::optional<CacheInfo> cacheExists(const std::string & uri) override
std::optional<CacheInfo> upToDateCacheExists(const std::string & uri) override
{
return retrySQLite<std::optional<CacheInfo>>([&]() -> std::optional<CacheInfo> {
auto state(_state.lock());
auto i = state->caches.find(uri);
if (i == state->caches.end()) {
auto queryCache(state->queryCache.use()(uri)(time(0) - cacheInfoTtl));
if (!queryCache.next())
return std::nullopt;
state->caches.emplace(uri,
Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
}
auto & cache(getCache(*state, uri));
auto cache(queryCacheRaw(*state, uri));
if (!cache)
return std::nullopt;
return CacheInfo {
.wantMassQuery = cache.wantMassQuery,
.priority = cache.priority
.id = cache->id,
.wantMassQuery = cache->wantMassQuery,
.priority = cache->priority
};
});
}
@ -241,7 +276,7 @@ public:
narInfo->deriver = StorePath(queryNAR.getStr(9));
for (auto & sig : tokenizeString<Strings>(queryNAR.getStr(10), " "))
narInfo->sigs.insert(sig);
narInfo->ca = parseContentAddressOpt(queryNAR.getStr(11));
narInfo->ca = ContentAddress::parseOpt(queryNAR.getStr(11));
return {oValid, narInfo};
});
@ -300,9 +335,9 @@ public:
(std::string(info->path.name()))
(narInfo ? narInfo->url : "", narInfo != 0)
(narInfo ? narInfo->compression : "", narInfo != 0)
(narInfo && narInfo->fileHash ? narInfo->fileHash->to_string(Base32, true) : "", narInfo && narInfo->fileHash)
(narInfo && narInfo->fileHash ? narInfo->fileHash->to_string(HashFormat::Nix32, true) : "", narInfo && narInfo->fileHash)
(narInfo ? narInfo->fileSize : 0, narInfo != 0 && narInfo->fileSize)
(info->narHash.to_string(Base32, true))
(info->narHash.to_string(HashFormat::Nix32, true))
(info->narSize)
(concatStringsSep(" ", info->shortRefs()))
(info->deriver ? std::string(info->deriver->to_string()) : "", (bool) info->deriver)
@ -359,4 +394,9 @@ ref<NarInfoDiskCache> getNarInfoDiskCache()
return cache;
}
ref<NarInfoDiskCache> getTestNarInfoDiskCache(Path dbPath)
{
return make_ref<NarInfoDiskCacheImpl>(dbPath);
}
}

View file

@ -1,4 +1,5 @@
#pragma once
///@file
#include "ref.hh"
#include "nar-info.hh"
@ -13,16 +14,17 @@ public:
virtual ~NarInfoDiskCache() { }
virtual void createCache(const std::string & uri, const Path & storeDir,
virtual int createCache(const std::string & uri, const Path & storeDir,
bool wantMassQuery, int priority) = 0;
struct CacheInfo
{
int id;
bool wantMassQuery;
int priority;
};
virtual std::optional<CacheInfo> cacheExists(const std::string & uri) = 0;
virtual std::optional<CacheInfo> upToDateCacheExists(const std::string & uri) = 0;
virtual std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
const std::string & uri, const std::string & hashPart) = 0;
@ -41,8 +43,12 @@ public:
const std::string & uri, const DrvOutput & id) = 0;
};
/* Return a singleton cache object that can be used concurrently by
multiple threads. */
/**
* Return a singleton cache object that can be used concurrently by
* multiple threads.
*/
ref<NarInfoDiskCache> getNarInfoDiskCache();
ref<NarInfoDiskCache> getTestNarInfoDiskCache(Path dbPath);
}

View file

@ -1,21 +1,25 @@
#include "globals.hh"
#include "nar-info.hh"
#include "store-api.hh"
#include "strings.hh"
namespace nix {
NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & whence)
: ValidPathInfo(StorePath(StorePath::dummy), Hash(Hash::dummy)) // FIXME: hack
{
auto corrupt = [&]() {
return Error("NAR info file '%1%' is corrupt", whence);
unsigned line = 1;
auto corrupt = [&](const char * reason) {
return Error("NAR info file '%1%' is corrupt: %2%", whence,
std::string(reason) + (line > 0 ? " at line " + std::to_string(line) : ""));
};
auto parseHashField = [&](const std::string & s) {
try {
return Hash::parseAnyPrefixed(s);
} catch (BadHash &) {
throw corrupt();
throw corrupt("bad hash");
}
};
@ -26,12 +30,12 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string &
while (pos < s.size()) {
size_t colon = s.find(':', pos);
if (colon == std::string::npos) throw corrupt();
if (colon == s.npos) throw corrupt("expecting ':'");
std::string name(s, pos, colon - pos);
size_t eol = s.find('\n', colon + 2);
if (eol == std::string::npos) throw corrupt();
if (eol == s.npos) throw corrupt("expecting '\\n'");
std::string value(s, colon + 2, eol - colon - 2);
@ -47,7 +51,7 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string &
fileHash = parseHashField(value);
else if (name == "FileSize") {
auto n = string2Int<decltype(fileSize)>(value);
if (!n) throw corrupt();
if (!n) throw corrupt("invalid FileSize");
fileSize = *n;
}
else if (name == "NarHash") {
@ -56,12 +60,12 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string &
}
else if (name == "NarSize") {
auto n = string2Int<decltype(narSize)>(value);
if (!n) throw corrupt();
if (!n) throw corrupt("invalid NarSize");
narSize = *n;
}
else if (name == "References") {
auto refs = tokenizeString<Strings>(value, " ");
if (!references.empty()) throw corrupt();
if (!references.empty()) throw corrupt("extra References");
for (auto & r : refs)
references.insert(StorePath(r));
}
@ -72,17 +76,26 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string &
else if (name == "Sig")
sigs.insert(value);
else if (name == "CA") {
if (ca) throw corrupt();
if (ca) throw corrupt("extra CA");
// FIXME: allow blank ca or require skipping field?
ca = parseContentAddressOpt(value);
ca = ContentAddress::parseOpt(value);
}
pos = eol + 1;
line += 1;
}
if (compression == "") compression = "bzip2";
if (!havePath || !haveNarHash || url.empty() || narSize == 0) throw corrupt();
if (!havePath || !haveNarHash || url.empty() || narSize == 0) {
line = 0; // don't include line information in the error
throw corrupt(
!havePath ? "StorePath missing" :
!haveNarHash ? "NarHash missing" :
url.empty() ? "URL missing" :
narSize == 0 ? "NarSize missing or zero"
: "?");
}
}
std::string NarInfo::to_string(const Store & store) const
@ -92,11 +105,11 @@ std::string NarInfo::to_string(const Store & store) const
res += "URL: " + url + "\n";
assert(compression != "");
res += "Compression: " + compression + "\n";
assert(fileHash && fileHash->type == htSHA256);
res += "FileHash: " + fileHash->to_string(Base32, true) + "\n";
assert(fileHash && fileHash->algo == HashAlgorithm::SHA256);
res += "FileHash: " + fileHash->to_string(HashFormat::Nix32, true) + "\n";
res += "FileSize: " + std::to_string(fileSize) + "\n";
assert(narHash.type == htSHA256);
res += "NarHash: " + narHash.to_string(Base32, true) + "\n";
assert(narHash.algo == HashAlgorithm::SHA256);
res += "NarHash: " + narHash.to_string(HashFormat::Nix32, true) + "\n";
res += "NarSize: " + std::to_string(narSize) + "\n";
res += "References: " + concatStringsSep(" ", shortRefs()) + "\n";
@ -113,4 +126,58 @@ std::string NarInfo::to_string(const Store & store) const
return res;
}
nlohmann::json NarInfo::toJSON(
const Store & store,
bool includeImpureInfo,
HashFormat hashFormat) const
{
using nlohmann::json;
auto jsonObject = ValidPathInfo::toJSON(store, includeImpureInfo, hashFormat);
if (includeImpureInfo) {
if (!url.empty())
jsonObject["url"] = url;
if (!compression.empty())
jsonObject["compression"] = compression;
if (fileHash)
jsonObject["downloadHash"] = fileHash->to_string(hashFormat, true);
if (fileSize)
jsonObject["downloadSize"] = fileSize;
}
return jsonObject;
}
NarInfo NarInfo::fromJSON(
const Store & store,
const StorePath & path,
const nlohmann::json & json)
{
using nlohmann::detail::value_t;
NarInfo res {
ValidPathInfo {
path,
UnkeyedValidPathInfo::fromJSON(store, json),
}
};
if (json.contains("url"))
res.url = getString(valueAt(json, "url"));
if (json.contains("compression"))
res.compression = getString(valueAt(json, "compression"));
if (json.contains("downloadHash"))
res.fileHash = Hash::parseAny(
getString(valueAt(json, "downloadHash")),
std::nullopt);
if (json.contains("downloadSize"))
res.fileSize = getInteger(valueAt(json, "downloadSize"));
return res;
}
}

View file

@ -1,4 +1,5 @@
#pragma once
///@file
#include "types.hh"
#include "hash.hh"
@ -16,11 +17,27 @@ struct NarInfo : ValidPathInfo
uint64_t fileSize = 0;
NarInfo() = delete;
NarInfo(StorePath && path, Hash narHash) : ValidPathInfo(std::move(path), narHash) { }
NarInfo(const Store & store, std::string name, ContentAddressWithReferences ca, Hash narHash)
: ValidPathInfo(store, std::move(name), std::move(ca), narHash)
{ }
NarInfo(StorePath path, Hash narHash) : ValidPathInfo(std::move(path), narHash) { }
NarInfo(const ValidPathInfo & info) : ValidPathInfo(info) { }
NarInfo(const Store & store, const std::string & s, const std::string & whence);
bool operator ==(const NarInfo &) const = default;
// TODO libc++ 16 (used by darwin) missing `std::optional::operator <=>`, can't do yet
//auto operator <=>(const NarInfo &) const = default;
std::string to_string(const Store & store) const;
nlohmann::json toJSON(
const Store & store,
bool includeImpureInfo,
HashFormat hashFormat) const override;
static NarInfo fromJSON(
const Store & store,
const StorePath & path,
const nlohmann::json & json);
};
}

View file

@ -5,5 +5,6 @@ includedir=@includedir@
Name: Nix
Description: Nix Package Manager
Version: @PACKAGE_VERSION@
Libs: -L${libdir} -lnixstore -lnixutil
Cflags: -I${includedir}/nix -std=c++17
Requires: nix-util
Libs: -L${libdir} -lnixstore
Cflags: -I${includedir}/nix -std=c++2a

View file

@ -1,6 +1,8 @@
#include "util.hh"
#include "local-store.hh"
#include "globals.hh"
#include "signals.hh"
#include "posix-fs-canonicalise.hh"
#include "posix-source-accessor.hh"
#include <cstdlib>
#include <cstring>
@ -55,7 +57,7 @@ LocalStore::InodeHash LocalStore::loadInodeHash()
}
if (errno) throw SysError("reading directory '%1%'", linksDir);
printMsg(lvlTalkative, format("loaded %1% hash inodes") % inodeHash.size());
printMsg(lvlTalkative, "loaded %1% hash inodes", inodeHash.size());
return inodeHash;
}
@ -73,7 +75,7 @@ Strings LocalStore::readDirectoryIgnoringInodes(const Path & path, const InodeHa
checkInterrupt();
if (inodeHash.count(dirent->d_ino)) {
debug(format("'%1%' is already linked") % dirent->d_name);
debug("'%1%' is already linked", dirent->d_name);
continue;
}
@ -96,13 +98,14 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
#if __APPLE__
/* HFS/macOS has some undocumented security feature disabling hardlinking for
special files within .app dirs. *.app/Contents/PkgInfo and
*.app/Contents/Resources/\*.lproj seem to be the only paths affected. See
https://github.com/NixOS/nix/issues/1443 for more discussion. */
special files within .app dirs. Known affected paths include
*.app/Contents/{PkgInfo,Resources/\*.lproj,_CodeSignature} and .DS_Store.
See https://github.com/NixOS/nix/issues/1443 and
https://github.com/NixOS/nix/pull/2230 for more discussion. */
if (std::regex_search(path, std::regex("\\.app/Contents/.+$")))
{
debug(format("'%1%' is not allowed to be linked in macOS") % path);
debug("'%1%' is not allowed to be linked in macOS", path);
return;
}
#endif
@ -145,62 +148,68 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
Also note that if `path' is a symlink, then we're hashing the
contents of the symlink (i.e. the result of readlink()), not
the contents of the target (which may not even exist). */
Hash hash = hashPath(htSHA256, path).first;
debug(format("'%1%' has hash '%2%'") % path % hash.to_string(Base32, true));
Hash hash = ({
hashPath(
{make_ref<PosixSourceAccessor>(), CanonPath(path)},
FileSerialisationMethod::NixArchive, HashAlgorithm::SHA256).first;
});
debug("'%1%' has hash '%2%'", path, hash.to_string(HashFormat::Nix32, true));
/* Check if this is a known hash. */
Path linkPath = linksDir + "/" + hash.to_string(Base32, false);
std::filesystem::path linkPath = std::filesystem::path{linksDir} / hash.to_string(HashFormat::Nix32, false);
/* Maybe delete the link, if it has been corrupted. */
if (pathExists(linkPath)) {
auto stLink = lstat(linkPath);
if (std::filesystem::exists(std::filesystem::symlink_status(linkPath))) {
auto stLink = lstat(linkPath.string());
if (st.st_size != stLink.st_size
|| (repair && hash != hashPath(htSHA256, linkPath).first))
|| (repair && hash != ({
hashPath(
PosixSourceAccessor::createAtRoot(linkPath),
FileSerialisationMethod::NixArchive, HashAlgorithm::SHA256).first;
})))
{
// XXX: Consider overwriting linkPath with our valid version.
warn("removing corrupted link '%s'", linkPath);
warn("removing corrupted link %s", linkPath);
warn("There may be more corrupted paths."
"\nYou should run `nix-store --verify --check-contents --repair` to fix them all");
unlink(linkPath.c_str());
std::filesystem::remove(linkPath);
}
}
if (!pathExists(linkPath)) {
if (!std::filesystem::exists(std::filesystem::symlink_status(linkPath))) {
/* Nope, create a hard link in the links directory. */
if (link(path.c_str(), linkPath.c_str()) == 0) {
try {
std::filesystem::create_hard_link(path, linkPath);
inodeHash.insert(st.st_ino);
return;
}
} catch (std::filesystem::filesystem_error & e) {
if (e.code() == std::errc::file_exists) {
/* Fall through if another process created linkPath before
we did. */
}
switch (errno) {
case EEXIST:
/* Fall through if another process created linkPath before
we did. */
break;
else if (e.code() == std::errc::no_space_on_device) {
/* On ext4, that probably means the directory index is
full. When that happens, it's fine to ignore it: we
just effectively disable deduplication of this
file. */
printInfo("cannot link '%s' to '%s': %s", linkPath, path, strerror(errno));
return;
}
case ENOSPC:
/* On ext4, that probably means the directory index is
full. When that happens, it's fine to ignore it: we
just effectively disable deduplication of this
file. */
printInfo("cannot link '%s' to '%s': %s", linkPath, path, strerror(errno));
return;
default:
throw SysError("cannot link '%1%' to '%2%'", linkPath, path);
else throw;
}
}
/* Yes! We've seen a file with the same contents. Replace the
current file with a hard link to that file. */
auto stLink = lstat(linkPath);
auto stLink = lstat(linkPath.string());
if (st.st_ino == stLink.st_ino) {
debug(format("'%1%' is already linked to '%2%'") % path % linkPath);
debug("'%1%' is already linked to '%2%'", path, linkPath);
return;
}
printMsg(lvlTalkative, format("linking '%1%' to '%2%'") % path % linkPath);
printMsg(lvlTalkative, "linking '%1%' to '%2%'", path, linkPath);
/* Make the containing directory writable, but only if it's not
the store itself (we don't want or need to mess with its
@ -213,28 +222,30 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
its timestamp back to 0. */
MakeReadOnly makeReadOnly(mustToggle ? dirOfPath : "");
Path tempLink = (format("%1%/.tmp-link-%2%-%3%")
% realStoreDir % getpid() % random()).str();
std::filesystem::path tempLink = fmt("%1%/.tmp-link-%2%-%3%", realStoreDir, getpid(), rand());
if (link(linkPath.c_str(), tempLink.c_str()) == -1) {
if (errno == EMLINK) {
try {
std::filesystem::create_hard_link(linkPath, tempLink);
inodeHash.insert(st.st_ino);
} catch (std::filesystem::filesystem_error & e) {
if (e.code() == std::errc::too_many_links) {
/* Too many links to the same file (>= 32000 on most file
systems). This is likely to happen with empty files.
Just shrug and ignore. */
if (st.st_size)
printInfo(format("'%1%' has maximum number of links") % linkPath);
printInfo("'%1%' has maximum number of links", linkPath);
return;
}
throw SysError("cannot link '%1%' to '%2%'", tempLink, linkPath);
throw;
}
/* Atomically replace the old file with the new hard link. */
try {
renameFile(tempLink, path);
} catch (SysError & e) {
if (unlink(tempLink.c_str()) == -1)
std::filesystem::rename(tempLink, path);
} catch (std::filesystem::filesystem_error & e) {
std::filesystem::remove(tempLink);
printError("unable to unlink '%1%'", tempLink);
if (errno == EMLINK) {
if (e.code() == std::errc::too_many_links) {
/* Some filesystems generate too many links on the rename,
rather than on the original link. (Probably it
temporarily increases the st_nlink field before
@ -247,10 +258,13 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
stats.filesLinked++;
stats.bytesFreed += st.st_size;
stats.blocksFreed += st.st_blocks;
if (act)
act->result(resFileLinked, st.st_size, st.st_blocks);
act->result(resFileLinked, st.st_size
#ifndef _WIN32
, st.st_blocks
#endif
);
}

View file

@ -0,0 +1,196 @@
#include <regex>
#include <nlohmann/json.hpp>
#include "util.hh"
#include "regex-combinators.hh"
#include "outputs-spec.hh"
#include "path-regex.hh"
#include "strings-inline.hh"
namespace nix {
bool OutputsSpec::contains(const std::string & outputName) const
{
return std::visit(overloaded {
[&](const OutputsSpec::All &) {
return true;
},
[&](const OutputsSpec::Names & outputNames) {
return outputNames.count(outputName) > 0;
},
}, raw);
}
static std::string outputSpecRegexStr =
regex::either(
regex::group(R"(\*)"),
regex::group(regex::list(nameRegexStr)));
std::optional<OutputsSpec> OutputsSpec::parseOpt(std::string_view s)
{
static std::regex regex(std::string { outputSpecRegexStr });
std::smatch match;
std::string s2 { s }; // until some improves std::regex
if (!std::regex_match(s2, match, regex))
return std::nullopt;
if (match[1].matched)
return { OutputsSpec::All {} };
if (match[2].matched)
return OutputsSpec::Names { tokenizeString<StringSet>(match[2].str(), ",") };
assert(false);
}
OutputsSpec OutputsSpec::parse(std::string_view s)
{
std::optional spec = parseOpt(s);
if (!spec)
throw Error("invalid outputs specifier '%s'", s);
return std::move(*spec);
}
std::optional<std::pair<std::string_view, ExtendedOutputsSpec>> ExtendedOutputsSpec::parseOpt(std::string_view s)
{
auto found = s.rfind('^');
if (found == std::string::npos)
return std::pair { s, ExtendedOutputsSpec::Default {} };
auto specOpt = OutputsSpec::parseOpt(s.substr(found + 1));
if (!specOpt)
return std::nullopt;
return std::pair { s.substr(0, found), ExtendedOutputsSpec::Explicit { std::move(*specOpt) } };
}
std::pair<std::string_view, ExtendedOutputsSpec> ExtendedOutputsSpec::parse(std::string_view s)
{
std::optional spec = parseOpt(s);
if (!spec)
throw Error("invalid extended outputs specifier '%s'", s);
return *spec;
}
std::string OutputsSpec::to_string() const
{
return std::visit(overloaded {
[&](const OutputsSpec::All &) -> std::string {
return "*";
},
[&](const OutputsSpec::Names & outputNames) -> std::string {
return concatStringsSep(",", outputNames);
},
}, raw);
}
std::string ExtendedOutputsSpec::to_string() const
{
return std::visit(overloaded {
[&](const ExtendedOutputsSpec::Default &) -> std::string {
return "";
},
[&](const ExtendedOutputsSpec::Explicit & outputSpec) -> std::string {
return "^" + outputSpec.to_string();
},
}, raw);
}
OutputsSpec OutputsSpec::union_(const OutputsSpec & that) const
{
return std::visit(overloaded {
[&](const OutputsSpec::All &) -> OutputsSpec {
return OutputsSpec::All { };
},
[&](const OutputsSpec::Names & theseNames) -> OutputsSpec {
return std::visit(overloaded {
[&](const OutputsSpec::All &) -> OutputsSpec {
return OutputsSpec::All {};
},
[&](const OutputsSpec::Names & thoseNames) -> OutputsSpec {
OutputsSpec::Names ret = theseNames;
ret.insert(thoseNames.begin(), thoseNames.end());
return ret;
},
}, that.raw);
},
}, raw);
}
bool OutputsSpec::isSubsetOf(const OutputsSpec & that) const
{
return std::visit(overloaded {
[&](const OutputsSpec::All &) {
return true;
},
[&](const OutputsSpec::Names & thoseNames) {
return std::visit(overloaded {
[&](const OutputsSpec::All &) {
return false;
},
[&](const OutputsSpec::Names & theseNames) {
bool ret = true;
for (auto & o : theseNames)
if (thoseNames.count(o) == 0)
ret = false;
return ret;
},
}, raw);
},
}, that.raw);
}
}
namespace nlohmann {
using namespace nix;
OutputsSpec adl_serializer<OutputsSpec>::from_json(const json & json) {
auto names = json.get<StringSet>();
if (names == StringSet({"*"}))
return OutputsSpec::All {};
else
return OutputsSpec::Names { std::move(names) };
}
void adl_serializer<OutputsSpec>::to_json(json & json, OutputsSpec t) {
std::visit(overloaded {
[&](const OutputsSpec::All &) {
json = std::vector<std::string>({"*"});
},
[&](const OutputsSpec::Names & names) {
json = names;
},
}, t.raw);
}
ExtendedOutputsSpec adl_serializer<ExtendedOutputsSpec>::from_json(const json & json) {
if (json.is_null())
return ExtendedOutputsSpec::Default {};
else {
return ExtendedOutputsSpec::Explicit { json.get<OutputsSpec>() };
}
}
void adl_serializer<ExtendedOutputsSpec>::to_json(json & json, ExtendedOutputsSpec t) {
std::visit(overloaded {
[&](const ExtendedOutputsSpec::Default &) {
json = nullptr;
},
[&](const ExtendedOutputsSpec::Explicit & e) {
adl_serializer<OutputsSpec>::to_json(json, e);
},
}, t.raw);
}
}

Some files were not shown because too many files have changed in this diff Show more