mirror of
https://github.com/NixOS/nix
synced 2025-06-27 16:51:15 +02:00
Merge branch 'master' of github.com:NixOS/nix into allow-relative-paths-in-store-option
This commit is contained in:
commit
fefd6c9e5f
175 changed files with 9588 additions and 2639 deletions
|
@ -15,6 +15,7 @@
|
|||
#include <chrono>
|
||||
#include <future>
|
||||
#include <regex>
|
||||
#include <fstream>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
|
@ -57,6 +58,13 @@ void BinaryCacheStore::init()
|
|||
}
|
||||
}
|
||||
|
||||
void BinaryCacheStore::upsertFile(const std::string & path,
|
||||
std::string && data,
|
||||
const std::string & mimeType)
|
||||
{
|
||||
upsertFile(path, std::make_shared<std::stringstream>(std::move(data)), mimeType);
|
||||
}
|
||||
|
||||
void BinaryCacheStore::getFile(const std::string & path,
|
||||
Callback<std::shared_ptr<std::string>> callback) noexcept
|
||||
{
|
||||
|
@ -113,13 +121,74 @@ void BinaryCacheStore::writeNarInfo(ref<NarInfo> narInfo)
|
|||
diskCache->upsertNarInfo(getUri(), hashPart, std::shared_ptr<NarInfo>(narInfo));
|
||||
}
|
||||
|
||||
void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor)
|
||||
AutoCloseFD openFile(const Path & path)
|
||||
{
|
||||
// FIXME: See if we can use the original source to reduce memory usage.
|
||||
auto nar = make_ref<std::string>(narSource.drain());
|
||||
auto fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
|
||||
if (!fd)
|
||||
throw SysError("opening file '%1%'", path);
|
||||
return fd;
|
||||
}
|
||||
|
||||
if (!repair && isValidPath(info.path)) return;
|
||||
struct FileSource : FdSource
|
||||
{
|
||||
AutoCloseFD fd2;
|
||||
|
||||
FileSource(const Path & path)
|
||||
: fd2(openFile(path))
|
||||
{
|
||||
fd = fd2.get();
|
||||
}
|
||||
};
|
||||
|
||||
void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs)
|
||||
{
|
||||
assert(info.narHash && info.narSize);
|
||||
|
||||
if (!repair && isValidPath(info.path)) {
|
||||
// FIXME: copyNAR -> null sink
|
||||
narSource.drain();
|
||||
return;
|
||||
}
|
||||
|
||||
auto [fdTemp, fnTemp] = createTempFile();
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
/* Read the NAR simultaneously into a CompressionSink+FileSink (to
|
||||
write the compressed NAR to disk), into a HashSink (to get the
|
||||
NAR hash), and into a NarAccessor (to get the NAR listing). */
|
||||
HashSink fileHashSink(htSHA256);
|
||||
std::shared_ptr<FSAccessor> narAccessor;
|
||||
{
|
||||
FdSink fileSink(fdTemp.get());
|
||||
TeeSink teeSink(fileSink, fileHashSink);
|
||||
auto compressionSink = makeCompressionSink(compression, teeSink);
|
||||
TeeSource teeSource(narSource, *compressionSink);
|
||||
narAccessor = makeNarAccessor(teeSource);
|
||||
compressionSink->finish();
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
auto narInfo = make_ref<NarInfo>(info);
|
||||
narInfo->narSize = info.narSize;
|
||||
narInfo->narHash = info.narHash;
|
||||
narInfo->compression = compression;
|
||||
auto [fileHash, fileSize] = fileHashSink.finish();
|
||||
narInfo->fileHash = fileHash;
|
||||
narInfo->fileSize = fileSize;
|
||||
narInfo->url = "nar/" + narInfo->fileHash.to_string(Base32, false) + ".nar"
|
||||
+ (compression == "xz" ? ".xz" :
|
||||
compression == "bzip2" ? ".bz2" :
|
||||
compression == "br" ? ".br" :
|
||||
"");
|
||||
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
printMsg(lvlTalkative, "copying path '%1%' (%2% bytes, compressed %3$.1f%% in %4% ms) to binary cache",
|
||||
printStorePath(narInfo->path), info.narSize,
|
||||
((1.0 - (double) fileSize / info.narSize) * 100.0),
|
||||
duration);
|
||||
|
||||
/* Verify that all references are valid. This may do some .narinfo
|
||||
reads, but typically they'll already be cached. */
|
||||
|
@ -132,23 +201,6 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource
|
|||
printStorePath(info.path), printStorePath(ref));
|
||||
}
|
||||
|
||||
assert(nar->compare(0, narMagic.size(), narMagic) == 0);
|
||||
|
||||
auto narInfo = make_ref<NarInfo>(info);
|
||||
|
||||
narInfo->narSize = nar->size();
|
||||
narInfo->narHash = hashString(htSHA256, *nar);
|
||||
|
||||
if (info.narHash && info.narHash != narInfo->narHash)
|
||||
throw Error("refusing to copy corrupted path '%1%' to binary cache", printStorePath(info.path));
|
||||
|
||||
auto accessor_ = std::dynamic_pointer_cast<RemoteFSAccessor>(accessor);
|
||||
|
||||
auto narAccessor = makeNarAccessor(nar);
|
||||
|
||||
if (accessor_)
|
||||
accessor_->addToCache(printStorePath(info.path), *nar, narAccessor);
|
||||
|
||||
/* Optionally write a JSON file containing a listing of the
|
||||
contents of the NAR. */
|
||||
if (writeNARListing) {
|
||||
|
@ -160,33 +212,13 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource
|
|||
|
||||
{
|
||||
auto res = jsonRoot.placeholder("root");
|
||||
listNar(res, narAccessor, "", true);
|
||||
listNar(res, ref<FSAccessor>(narAccessor), "", true);
|
||||
}
|
||||
}
|
||||
|
||||
upsertFile(std::string(info.path.to_string()) + ".ls", jsonOut.str(), "application/json");
|
||||
}
|
||||
|
||||
/* Compress the NAR. */
|
||||
narInfo->compression = compression;
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
auto narCompressed = compress(compression, *nar, parallelCompression);
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
narInfo->fileHash = hashString(htSHA256, *narCompressed);
|
||||
narInfo->fileSize = narCompressed->size();
|
||||
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
printMsg(lvlTalkative, "copying path '%1%' (%2% bytes, compressed %3$.1f%% in %4% ms) to binary cache",
|
||||
printStorePath(narInfo->path), narInfo->narSize,
|
||||
((1.0 - (double) narCompressed->size() / nar->size()) * 100.0),
|
||||
duration);
|
||||
|
||||
narInfo->url = "nar/" + narInfo->fileHash.to_string(Base32, false) + ".nar"
|
||||
+ (compression == "xz" ? ".xz" :
|
||||
compression == "bzip2" ? ".bz2" :
|
||||
compression == "br" ? ".br" :
|
||||
"");
|
||||
|
||||
/* Optionally maintain an index of DWARF debug info files
|
||||
consisting of JSON files named 'debuginfo/<build-id>' that
|
||||
specify the NAR file and member containing the debug info. */
|
||||
|
@ -247,12 +279,14 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource
|
|||
/* Atomically write the NAR file. */
|
||||
if (repair || !fileExists(narInfo->url)) {
|
||||
stats.narWrite++;
|
||||
upsertFile(narInfo->url, *narCompressed, "application/x-nix-nar");
|
||||
upsertFile(narInfo->url,
|
||||
std::make_shared<std::fstream>(fnTemp, std::ios_base::in),
|
||||
"application/x-nix-nar");
|
||||
} else
|
||||
stats.narWriteAverted++;
|
||||
|
||||
stats.narWriteBytes += nar->size();
|
||||
stats.narWriteCompressedBytes += narCompressed->size();
|
||||
stats.narWriteBytes += info.narSize;
|
||||
stats.narWriteCompressedBytes += fileSize;
|
||||
stats.narWriteCompressionTimeMs += duration;
|
||||
|
||||
/* Atomically write the NAR info file.*/
|
||||
|
@ -351,7 +385,7 @@ StorePath BinaryCacheStore::addToStore(const string & name, const Path & srcPath
|
|||
ValidPathInfo info(makeFixedOutputPath(method, h, name));
|
||||
|
||||
auto source = StringSource { *sink.s };
|
||||
addToStore(info, source, repair, CheckSigs, nullptr);
|
||||
addToStore(info, source, repair, CheckSigs);
|
||||
|
||||
return std::move(info.path);
|
||||
}
|
||||
|
@ -366,7 +400,7 @@ StorePath BinaryCacheStore::addTextToStore(const string & name, const string & s
|
|||
StringSink sink;
|
||||
dumpString(s, sink);
|
||||
auto source = StringSource { *sink.s };
|
||||
addToStore(info, source, repair, CheckSigs, nullptr);
|
||||
addToStore(info, source, repair, CheckSigs);
|
||||
}
|
||||
|
||||
return std::move(info.path);
|
||||
|
|
|
@ -36,9 +36,13 @@ public:
|
|||
virtual bool fileExists(const std::string & path) = 0;
|
||||
|
||||
virtual void upsertFile(const std::string & path,
|
||||
const std::string & data,
|
||||
std::shared_ptr<std::basic_iostream<char>> istream,
|
||||
const std::string & mimeType) = 0;
|
||||
|
||||
void upsertFile(const std::string & path,
|
||||
std::string && data,
|
||||
const std::string & mimeType);
|
||||
|
||||
/* Note: subclasses must implement at least one of the two
|
||||
following getFile() methods. */
|
||||
|
||||
|
@ -75,8 +79,7 @@ public:
|
|||
{ unsupported("queryPathFromHashPart"); }
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs,
|
||||
std::shared_ptr<FSAccessor> accessor) override;
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
|
||||
StorePath addToStore(const string & name, const Path & srcPath,
|
||||
FileIngestionMethod method, HashType hashAlgo,
|
||||
|
|
|
@ -1950,8 +1950,11 @@ void linkOrCopy(const Path & from, const Path & to)
|
|||
/* Hard-linking fails if we exceed the maximum link count on a
|
||||
file (e.g. 32000 of ext3), which is quite possible after a
|
||||
'nix-store --optimise'. FIXME: actually, why don't we just
|
||||
bind-mount in this case? */
|
||||
if (errno != EMLINK)
|
||||
bind-mount in this case?
|
||||
|
||||
It can also fail with EPERM in BeegFS v7 and earlier versions
|
||||
which don't allow hard-links to other directories */
|
||||
if (errno != EMLINK && errno != EPERM)
|
||||
throw SysError("linking '%s' to '%s'", to, from);
|
||||
copyPath(from, to);
|
||||
}
|
||||
|
@ -2038,7 +2041,10 @@ void DerivationGoal::startBuilder()
|
|||
if (!std::regex_match(fileName, regex))
|
||||
throw Error("invalid file name '%s' in 'exportReferencesGraph'", fileName);
|
||||
|
||||
auto storePath = worker.store.parseStorePath(*i++);
|
||||
auto storePathS = *i++;
|
||||
if (!worker.store.isInStore(storePathS))
|
||||
throw BuildError("'exportReferencesGraph' contains a non-store path '%1%'", storePathS);
|
||||
auto storePath = worker.store.toStorePath(storePathS).first;
|
||||
|
||||
/* Write closure info to <fileName>. */
|
||||
writeFile(tmpDir + "/" + fileName,
|
||||
|
@ -2077,7 +2083,7 @@ void DerivationGoal::startBuilder()
|
|||
for (auto & i : dirsInChroot)
|
||||
try {
|
||||
if (worker.store.isInStore(i.second.source))
|
||||
worker.store.computeFSClosure(worker.store.parseStorePath(worker.store.toStorePath(i.second.source)), closure);
|
||||
worker.store.computeFSClosure(worker.store.toStorePath(i.second.source).first, closure);
|
||||
} catch (InvalidPath & e) {
|
||||
} catch (Error & e) {
|
||||
throw Error("while processing 'sandbox-paths': %s", e.what());
|
||||
|
@ -2750,8 +2756,8 @@ struct RestrictedStore : public LocalFSStore
|
|||
void queryReferrers(const StorePath & path, StorePathSet & referrers) override
|
||||
{ }
|
||||
|
||||
StorePathSet queryDerivationOutputs(const StorePath & path) override
|
||||
{ throw Error("queryDerivationOutputs"); }
|
||||
OutputPathMap queryDerivationOutputMap(const StorePath & path) override
|
||||
{ throw Error("queryDerivationOutputMap"); }
|
||||
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
|
||||
{ throw Error("queryPathFromHashPart"); }
|
||||
|
@ -2762,10 +2768,9 @@ struct RestrictedStore : public LocalFSStore
|
|||
{ throw Error("addToStore"); }
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs,
|
||||
std::shared_ptr<FSAccessor> accessor = 0) override
|
||||
RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs) override
|
||||
{
|
||||
next->addToStore(info, narSource, repair, checkSigs, accessor);
|
||||
next->addToStore(info, narSource, repair, checkSigs);
|
||||
goal.addDependency(info.path);
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ struct Package {
|
|||
Path path;
|
||||
bool active;
|
||||
int priority;
|
||||
Package(Path path, bool active, int priority) : path{path}, active{active}, priority{priority} {}
|
||||
Package(const Path & path, bool active, int priority) : path{path}, active{active}, priority{priority} {}
|
||||
};
|
||||
|
||||
typedef std::vector<Package> Packages;
|
||||
|
|
|
@ -58,13 +58,16 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
|
|||
}
|
||||
};
|
||||
|
||||
/* We always have one output, and if it's a fixed-output derivation (as
|
||||
checked below) it must be the only output */
|
||||
auto & output = drv.outputs.begin()->second;
|
||||
|
||||
/* Try the hashed mirrors first. */
|
||||
if (getAttr("outputHashMode") == "flat")
|
||||
if (output.hash && output.hash->method == FileIngestionMethod::Flat)
|
||||
for (auto hashedMirror : settings.hashedMirrors.get())
|
||||
try {
|
||||
if (!hasSuffix(hashedMirror, "/")) hashedMirror += '/';
|
||||
auto ht = parseHashTypeOpt(getAttr("outputHashAlgo"));
|
||||
auto h = Hash(getAttr("outputHash"), ht);
|
||||
auto & h = output.hash->hash;
|
||||
fetch(hashedMirror + printHashType(*h.type) + "/" + h.to_string(Base16, false));
|
||||
return;
|
||||
} catch (Error & e) {
|
||||
|
|
|
@ -82,4 +82,16 @@ std::string renderContentAddress(std::optional<ContentAddress> ca) {
|
|||
return ca ? renderContentAddress(*ca) : "";
|
||||
}
|
||||
|
||||
Hash getContentAddressHash(const ContentAddress & ca)
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[](TextHash th) {
|
||||
return th.hash;
|
||||
},
|
||||
[](FixedOutputHash fsh) {
|
||||
return fsh.hash;
|
||||
}
|
||||
}, ca);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -53,4 +53,6 @@ ContentAddress parseContentAddress(std::string_view rawCa);
|
|||
|
||||
std::optional<ContentAddress> parseContentAddressOpt(std::string_view rawCaOpt);
|
||||
|
||||
Hash getContentAddressHash(const ContentAddress & ca);
|
||||
|
||||
}
|
||||
|
|
|
@ -78,10 +78,10 @@ struct TunnelLogger : public Logger
|
|||
if (ei.level > verbosity) return;
|
||||
|
||||
std::stringstream oss;
|
||||
oss << ei;
|
||||
showErrorInfo(oss, ei, false);
|
||||
|
||||
StringSink buf;
|
||||
buf << STDERR_NEXT << oss.str() << "\n"; // (fs.s + "\n");
|
||||
buf << STDERR_NEXT << oss.str();
|
||||
enqueueMsg(*buf.s);
|
||||
}
|
||||
|
||||
|
@ -347,6 +347,15 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
break;
|
||||
}
|
||||
|
||||
case wopQueryDerivationOutputMap: {
|
||||
auto path = store->parseStorePath(readString(from));
|
||||
logger->startWork();
|
||||
OutputPathMap outputs = store->queryDerivationOutputMap(path);
|
||||
logger->stopWork();
|
||||
writeOutputPathMap(*store, to, outputs);
|
||||
break;
|
||||
}
|
||||
|
||||
case wopQueryDeriver: {
|
||||
auto path = store->parseStorePath(readString(from));
|
||||
logger->startWork();
|
||||
|
@ -382,7 +391,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
}
|
||||
HashType hashAlgo = parseHashType(s);
|
||||
|
||||
TeeSource savedNAR(from);
|
||||
StringSink savedNAR;
|
||||
TeeSource savedNARSource(from, savedNAR);
|
||||
RetrieveRegularNARSink savedRegular;
|
||||
|
||||
if (method == FileIngestionMethod::Recursive) {
|
||||
|
@ -390,7 +400,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
a string so that we can pass it to
|
||||
addToStoreFromDump(). */
|
||||
ParseSink sink; /* null sink; just parse the NAR */
|
||||
parseDump(sink, savedNAR);
|
||||
parseDump(sink, savedNARSource);
|
||||
} else
|
||||
parseDump(savedRegular, from);
|
||||
|
||||
|
@ -398,7 +408,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
if (!savedRegular.regular) throw Error("regular file expected");
|
||||
|
||||
auto path = store->addToStoreFromDump(
|
||||
method == FileIngestionMethod::Recursive ? *savedNAR.data : savedRegular.s,
|
||||
method == FileIngestionMethod::Recursive ? *savedNAR.s : savedRegular.s,
|
||||
baseName,
|
||||
method,
|
||||
hashAlgo);
|
||||
|
@ -433,7 +443,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
case wopImportPaths: {
|
||||
logger->startWork();
|
||||
TunnelSource source(from, to);
|
||||
auto paths = store->importPaths(source, nullptr,
|
||||
auto paths = store->importPaths(source,
|
||||
trusted ? NoCheckSigs : CheckSigs);
|
||||
logger->stopWork();
|
||||
Strings paths2;
|
||||
|
@ -722,9 +732,9 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
if (GET_PROTOCOL_MINOR(clientVersion) >= 21)
|
||||
source = std::make_unique<TunnelSource>(from, to);
|
||||
else {
|
||||
TeeSink tee(from);
|
||||
TeeParseSink tee(from);
|
||||
parseDump(tee, tee.source);
|
||||
saved = std::move(*tee.source.data);
|
||||
saved = std::move(*tee.saved.s);
|
||||
source = std::make_unique<StringSource>(saved);
|
||||
}
|
||||
|
||||
|
@ -732,7 +742,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
|
||||
// FIXME: race if addToStore doesn't read source?
|
||||
store->addToStore(info, *source, (RepairFlag) repair,
|
||||
dontCheckSigs ? NoCheckSigs : CheckSigs, nullptr);
|
||||
dontCheckSigs ? NoCheckSigs : CheckSigs);
|
||||
|
||||
logger->stopWork();
|
||||
break;
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
#include "util.hh"
|
||||
#include "worker-protocol.hh"
|
||||
#include "fs-accessor.hh"
|
||||
#include "istringstream_nocopy.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
@ -101,7 +100,7 @@ static StringSet parseStrings(std::istream & str, bool arePaths)
|
|||
}
|
||||
|
||||
|
||||
static DerivationOutput parseDerivationOutput(const Store & store, istringstream_nocopy & str)
|
||||
static DerivationOutput parseDerivationOutput(const Store & store, std::istringstream & str)
|
||||
{
|
||||
expect(str, ","); auto path = store.parseStorePath(parsePath(str));
|
||||
expect(str, ","); auto hashAlgo = parseString(str);
|
||||
|
@ -129,10 +128,10 @@ static DerivationOutput parseDerivationOutput(const Store & store, istringstream
|
|||
}
|
||||
|
||||
|
||||
static Derivation parseDerivation(const Store & store, const string & s)
|
||||
static Derivation parseDerivation(const Store & store, std::string && s)
|
||||
{
|
||||
Derivation drv;
|
||||
istringstream_nocopy str(s);
|
||||
std::istringstream str(std::move(s));
|
||||
expect(str, "Derive([");
|
||||
|
||||
/* Parse the list of outputs. */
|
||||
|
|
|
@ -7,24 +7,6 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
struct HashAndWriteSink : Sink
|
||||
{
|
||||
Sink & writeSink;
|
||||
HashSink hashSink;
|
||||
HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256)
|
||||
{
|
||||
}
|
||||
virtual void operator () (const unsigned char * data, size_t len)
|
||||
{
|
||||
writeSink(data, len);
|
||||
hashSink(data, len);
|
||||
}
|
||||
Hash currentHash()
|
||||
{
|
||||
return hashSink.currentHash().first;
|
||||
}
|
||||
};
|
||||
|
||||
void Store::exportPaths(const StorePathSet & paths, Sink & sink)
|
||||
{
|
||||
auto sorted = topoSortPaths(paths);
|
||||
|
@ -47,28 +29,29 @@ void Store::exportPath(const StorePath & path, Sink & sink)
|
|||
{
|
||||
auto info = queryPathInfo(path);
|
||||
|
||||
HashAndWriteSink hashAndWriteSink(sink);
|
||||
HashSink hashSink(htSHA256);
|
||||
TeeSink teeSink(sink, hashSink);
|
||||
|
||||
narFromPath(path, hashAndWriteSink);
|
||||
narFromPath(path, teeSink);
|
||||
|
||||
/* Refuse to export paths that have changed. This prevents
|
||||
filesystem corruption from spreading to other machines.
|
||||
Don't complain if the stored hash is zero (unknown). */
|
||||
Hash hash = hashAndWriteSink.currentHash();
|
||||
Hash hash = hashSink.currentHash().first;
|
||||
if (hash != info->narHash && info->narHash != Hash(*info->narHash.type))
|
||||
throw Error("hash of path '%s' has changed from '%s' to '%s'!",
|
||||
printStorePath(path), info->narHash.to_string(Base32, true), hash.to_string(Base32, true));
|
||||
|
||||
hashAndWriteSink
|
||||
teeSink
|
||||
<< exportMagic
|
||||
<< printStorePath(path);
|
||||
writeStorePaths(*this, hashAndWriteSink, info->references);
|
||||
hashAndWriteSink
|
||||
writeStorePaths(*this, teeSink, info->references);
|
||||
teeSink
|
||||
<< (info->deriver ? printStorePath(*info->deriver) : "")
|
||||
<< 0;
|
||||
}
|
||||
|
||||
StorePaths Store::importPaths(Source & source, std::shared_ptr<FSAccessor> accessor, CheckSigsFlag checkSigs)
|
||||
StorePaths Store::importPaths(Source & source, CheckSigsFlag checkSigs)
|
||||
{
|
||||
StorePaths res;
|
||||
while (true) {
|
||||
|
@ -77,7 +60,7 @@ StorePaths Store::importPaths(Source & source, std::shared_ptr<FSAccessor> acces
|
|||
if (n != 1) throw Error("input doesn't look like something created by 'nix-store --export'");
|
||||
|
||||
/* Extract the NAR from the source. */
|
||||
TeeSink tee(source);
|
||||
TeeParseSink tee(source);
|
||||
parseDump(tee, tee.source);
|
||||
|
||||
uint32_t magic = readInt(source);
|
||||
|
@ -94,16 +77,16 @@ StorePaths Store::importPaths(Source & source, std::shared_ptr<FSAccessor> acces
|
|||
if (deriver != "")
|
||||
info.deriver = parseStorePath(deriver);
|
||||
|
||||
info.narHash = hashString(htSHA256, *tee.source.data);
|
||||
info.narSize = tee.source.data->size();
|
||||
info.narHash = hashString(htSHA256, *tee.saved.s);
|
||||
info.narSize = tee.saved.s->size();
|
||||
|
||||
// Ignore optional legacy signature.
|
||||
if (readInt(source) == 1)
|
||||
readString(source);
|
||||
|
||||
// Can't use underlying source, which would have been exhausted
|
||||
auto source = StringSource { *tee.source.data };
|
||||
addToStore(info, source, NoRepair, checkSigs, accessor);
|
||||
auto source = StringSource { *tee.saved.s };
|
||||
addToStore(info, source, NoRepair, checkSigs);
|
||||
|
||||
res.push_back(info.path);
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <queue>
|
||||
#include <random>
|
||||
#include <thread>
|
||||
#include <regex>
|
||||
|
||||
using namespace std::string_literals;
|
||||
|
||||
|
@ -56,7 +57,7 @@ struct curlFileTransfer : public FileTransfer
|
|||
Callback<FileTransferResult> callback;
|
||||
CURL * req = 0;
|
||||
bool active = false; // whether the handle has been added to the multi object
|
||||
std::string status;
|
||||
std::string statusMsg;
|
||||
|
||||
unsigned int attempt = 0;
|
||||
|
||||
|
@ -175,12 +176,13 @@ struct curlFileTransfer : public FileTransfer
|
|||
size_t realSize = size * nmemb;
|
||||
std::string line((char *) contents, realSize);
|
||||
printMsg(lvlVomit, format("got header for '%s': %s") % request.uri % trim(line));
|
||||
if (line.compare(0, 5, "HTTP/") == 0) { // new response starts
|
||||
static std::regex statusLine("HTTP/[^ ]+ +[0-9]+(.*)", std::regex::extended | std::regex::icase);
|
||||
std::smatch match;
|
||||
if (std::regex_match(line, match, statusLine)) {
|
||||
result.etag = "";
|
||||
auto ss = tokenizeString<vector<string>>(line, " ");
|
||||
status = ss.size() >= 2 ? ss[1] : "";
|
||||
result.data = std::make_shared<std::string>();
|
||||
result.bodySize = 0;
|
||||
statusMsg = trim(match[1]);
|
||||
acceptRanges = false;
|
||||
encoding = "";
|
||||
} else {
|
||||
|
@ -194,7 +196,9 @@ struct curlFileTransfer : public FileTransfer
|
|||
the expected ETag on a 200 response, then shut
|
||||
down the connection because we already have the
|
||||
data. */
|
||||
if (result.etag == request.expectedETag && status == "200") {
|
||||
long httpStatus = 0;
|
||||
curl_easy_getinfo(req, CURLINFO_RESPONSE_CODE, &httpStatus);
|
||||
if (result.etag == request.expectedETag && httpStatus == 200) {
|
||||
debug(format("shutting down on 200 HTTP response with expected ETag"));
|
||||
return 0;
|
||||
}
|
||||
|
@ -413,8 +417,8 @@ struct curlFileTransfer : public FileTransfer
|
|||
? FileTransferError(Interrupted, fmt("%s of '%s' was interrupted", request.verb(), request.uri))
|
||||
: httpStatus != 0
|
||||
? FileTransferError(err,
|
||||
fmt("unable to %s '%s': HTTP error %d",
|
||||
request.verb(), request.uri, httpStatus)
|
||||
fmt("unable to %s '%s': HTTP error %d ('%s')",
|
||||
request.verb(), request.uri, httpStatus, statusMsg)
|
||||
+ (code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code)))
|
||||
)
|
||||
: FileTransferError(err,
|
||||
|
|
|
@ -262,11 +262,13 @@ void LocalStore::findTempRoots(FDs & fds, Roots & tempRoots, bool censor)
|
|||
void LocalStore::findRoots(const Path & path, unsigned char type, Roots & roots)
|
||||
{
|
||||
auto foundRoot = [&](const Path & path, const Path & target) {
|
||||
auto storePath = maybeParseStorePath(toStorePath(target));
|
||||
if (storePath && isValidPath(*storePath))
|
||||
roots[std::move(*storePath)].emplace(path);
|
||||
else
|
||||
printInfo("skipping invalid root from '%1%' to '%2%'", path, target);
|
||||
try {
|
||||
auto storePath = toStorePath(target).first;
|
||||
if (isValidPath(storePath))
|
||||
roots[std::move(storePath)].emplace(path);
|
||||
else
|
||||
printInfo("skipping invalid root from '%1%' to '%2%'", path, target);
|
||||
} catch (BadStorePath &) { }
|
||||
};
|
||||
|
||||
try {
|
||||
|
@ -472,15 +474,15 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
|
|||
|
||||
for (auto & [target, links] : unchecked) {
|
||||
if (!isInStore(target)) continue;
|
||||
Path pathS = toStorePath(target);
|
||||
if (!isStorePath(pathS)) continue;
|
||||
auto path = parseStorePath(pathS);
|
||||
if (!isValidPath(path)) continue;
|
||||
debug("got additional root '%1%'", pathS);
|
||||
if (censor)
|
||||
roots[path].insert(censored);
|
||||
else
|
||||
roots[path].insert(links.begin(), links.end());
|
||||
try {
|
||||
auto path = toStorePath(target).first;
|
||||
if (!isValidPath(path)) continue;
|
||||
debug("got additional root '%1%'", printStorePath(path));
|
||||
if (censor)
|
||||
roots[path].insert(censored);
|
||||
else
|
||||
roots[path].insert(links.begin(), links.end());
|
||||
} catch (BadStorePath &) { }
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ Settings::Settings()
|
|||
, nixLibexecDir(canonPath(getEnv("NIX_LIBEXEC_DIR").value_or(NIX_LIBEXEC_DIR)))
|
||||
, nixBinDir(canonPath(getEnv("NIX_BIN_DIR").value_or(NIX_BIN_DIR)))
|
||||
, nixManDir(canonPath(NIX_MAN_DIR))
|
||||
, nixDaemonSocketFile(canonPath(nixStateDir + DEFAULT_SOCKET_PATH))
|
||||
, nixDaemonSocketFile(canonPath(getEnv("NIX_DAEMON_SOCKET_PATH").value_or(nixStateDir + DEFAULT_SOCKET_PATH)))
|
||||
{
|
||||
buildUsersGroup = getuid() == 0 ? "nixbld" : "";
|
||||
lockCPU = getEnv("NIX_AFFINITY_HACK") == "1";
|
||||
|
|
|
@ -196,10 +196,6 @@ public:
|
|||
/* Whether to lock the Nix client and worker to the same CPU. */
|
||||
bool lockCPU;
|
||||
|
||||
/* Whether to show a stack trace if Nix evaluation fails. */
|
||||
Setting<bool> showTrace{this, false, "show-trace",
|
||||
"Whether to show a stack trace on evaluation errors."};
|
||||
|
||||
Setting<SandboxMode> sandboxMode{this,
|
||||
#if __linux__
|
||||
smEnabled
|
||||
|
@ -369,6 +365,12 @@ public:
|
|||
|
||||
Setting<bool> warnDirty{this, true, "warn-dirty",
|
||||
"Whether to warn about dirty Git/Mercurial trees."};
|
||||
|
||||
Setting<size_t> narBufferSize{this, 32 * 1024 * 1024, "nar-buffer-size",
|
||||
"Maximum size of NARs before spilling them to disk."};
|
||||
|
||||
Setting<std::string> flakeRegistry{this, "https://github.com/NixOS/flake-registry/raw/master/flake-registry.json", "flake-registry",
|
||||
"Path or URI of the global flake registry."};
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -100,11 +100,11 @@ protected:
|
|||
}
|
||||
|
||||
void upsertFile(const std::string & path,
|
||||
const std::string & data,
|
||||
std::shared_ptr<std::basic_iostream<char>> istream,
|
||||
const std::string & mimeType) override
|
||||
{
|
||||
auto req = FileTransferRequest(cacheUri + "/" + path);
|
||||
req.data = std::make_shared<string>(data); // FIXME: inefficient
|
||||
req.data = std::make_shared<string>(StreamToSourceAdapter(istream).drain());
|
||||
req.mimeType = mimeType;
|
||||
try {
|
||||
getFileTransfer()->upload(req);
|
||||
|
|
|
@ -126,8 +126,7 @@ struct LegacySSHStore : public Store
|
|||
}
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs,
|
||||
std::shared_ptr<FSAccessor> accessor) override
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override
|
||||
{
|
||||
debug("adding path '%s' to remote host '%s'", printStorePath(info.path), host);
|
||||
|
||||
|
|
|
@ -31,8 +31,18 @@ protected:
|
|||
bool fileExists(const std::string & path) override;
|
||||
|
||||
void upsertFile(const std::string & path,
|
||||
const std::string & data,
|
||||
const std::string & mimeType) override;
|
||||
std::shared_ptr<std::basic_iostream<char>> istream,
|
||||
const std::string & mimeType) override
|
||||
{
|
||||
auto path2 = binaryCacheDir + "/" + path;
|
||||
Path tmp = path2 + ".tmp." + std::to_string(getpid());
|
||||
AutoDelete del(tmp, false);
|
||||
StreamToSourceAdapter source(istream);
|
||||
writeFile(tmp, source);
|
||||
if (rename(tmp.c_str(), path2.c_str()))
|
||||
throw SysError("renaming '%1%' to '%2%'", tmp, path2);
|
||||
del.cancel();
|
||||
}
|
||||
|
||||
void getFile(const std::string & path, Sink & sink) override
|
||||
{
|
||||
|
@ -52,7 +62,9 @@ protected:
|
|||
if (entry.name.size() != 40 ||
|
||||
!hasSuffix(entry.name, ".narinfo"))
|
||||
continue;
|
||||
paths.insert(parseStorePath(storeDir + "/" + entry.name.substr(0, entry.name.size() - 8)));
|
||||
paths.insert(parseStorePath(
|
||||
storeDir + "/" + entry.name.substr(0, entry.name.size() - 8)
|
||||
+ "-" + MissingName));
|
||||
}
|
||||
|
||||
return paths;
|
||||
|
@ -68,28 +80,11 @@ void LocalBinaryCacheStore::init()
|
|||
BinaryCacheStore::init();
|
||||
}
|
||||
|
||||
static void atomicWrite(const Path & path, const std::string & s)
|
||||
{
|
||||
Path tmp = path + ".tmp." + std::to_string(getpid());
|
||||
AutoDelete del(tmp, false);
|
||||
writeFile(tmp, s);
|
||||
if (rename(tmp.c_str(), path.c_str()))
|
||||
throw SysError("renaming '%1%' to '%2%'", tmp, path);
|
||||
del.cancel();
|
||||
}
|
||||
|
||||
bool LocalBinaryCacheStore::fileExists(const std::string & path)
|
||||
{
|
||||
return pathExists(binaryCacheDir + "/" + path);
|
||||
}
|
||||
|
||||
void LocalBinaryCacheStore::upsertFile(const std::string & path,
|
||||
const std::string & data,
|
||||
const std::string & mimeType)
|
||||
{
|
||||
atomicWrite(binaryCacheDir + "/" + path, data);
|
||||
}
|
||||
|
||||
static RegisterStoreImplementation regStore([](
|
||||
const std::string & uri, const Store::Params & params)
|
||||
-> std::shared_ptr<Store>
|
||||
|
|
|
@ -20,9 +20,9 @@ struct LocalStoreAccessor : public FSAccessor
|
|||
|
||||
Path toRealPath(const Path & path)
|
||||
{
|
||||
Path storePath = store->toStorePath(path);
|
||||
if (!store->isValidPath(store->parseStorePath(storePath)))
|
||||
throw InvalidPath("path '%1%' is not a valid store path", storePath);
|
||||
auto storePath = store->toStorePath(path).first;
|
||||
if (!store->isValidPath(storePath))
|
||||
throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath));
|
||||
return store->getRealStoreDir() + std::string(path, store->storeDir.size());
|
||||
}
|
||||
|
||||
|
|
|
@ -594,7 +594,7 @@ uint64_t LocalStore::addValidPath(State & state,
|
|||
(concatStringsSep(" ", info.sigs), !info.sigs.empty())
|
||||
(renderContentAddress(info.ca), (bool) info.ca)
|
||||
.exec();
|
||||
uint64_t id = sqlite3_last_insert_rowid(state.db);
|
||||
uint64_t id = state.db.getLastInsertedRowId();
|
||||
|
||||
/* If this is a derivation, then store the derivation outputs in
|
||||
the database. This is useful for the garbage collector: it can
|
||||
|
@ -774,17 +774,20 @@ StorePathSet LocalStore::queryValidDerivers(const StorePath & path)
|
|||
}
|
||||
|
||||
|
||||
StorePathSet LocalStore::queryDerivationOutputs(const StorePath & path)
|
||||
OutputPathMap LocalStore::queryDerivationOutputMap(const StorePath & path)
|
||||
{
|
||||
return retrySQLite<StorePathSet>([&]() {
|
||||
return retrySQLite<OutputPathMap>([&]() {
|
||||
auto state(_state.lock());
|
||||
|
||||
auto useQueryDerivationOutputs(state->stmtQueryDerivationOutputs.use()
|
||||
(queryValidPathId(*state, path)));
|
||||
|
||||
StorePathSet outputs;
|
||||
OutputPathMap outputs;
|
||||
while (useQueryDerivationOutputs.next())
|
||||
outputs.insert(parseStorePath(useQueryDerivationOutputs.getStr(1)));
|
||||
outputs.emplace(
|
||||
useQueryDerivationOutputs.getStr(0),
|
||||
parseStorePath(useQueryDerivationOutputs.getStr(1))
|
||||
);
|
||||
|
||||
return outputs;
|
||||
});
|
||||
|
@ -959,7 +962,7 @@ const PublicKeys & LocalStore::getPublicKeys()
|
|||
|
||||
|
||||
void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor)
|
||||
RepairFlag repair, CheckSigsFlag checkSigs)
|
||||
{
|
||||
if (!info.narHash)
|
||||
throw Error("cannot add path '%s' because it lacks a hash", printStorePath(info.path));
|
||||
|
@ -973,7 +976,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
|
|||
|
||||
PathLocks outputLock;
|
||||
|
||||
Path realPath = realStoreDir + "/" + std::string(info.path.to_string());
|
||||
auto realPath = Store::toRealPath(info.path);
|
||||
|
||||
/* Lock the output path. But don't lock if we're being called
|
||||
from a build hook (whose parent process already acquired a
|
||||
|
@ -1044,8 +1047,7 @@ StorePath LocalStore::addToStoreFromDump(const string & dump, const string & nam
|
|||
/* The first check above is an optimisation to prevent
|
||||
unnecessary lock acquisition. */
|
||||
|
||||
Path realPath = realStoreDir + "/";
|
||||
realPath += dstPath.to_string();
|
||||
auto realPath = Store::toRealPath(dstPath);
|
||||
|
||||
PathLocks outputLock({realPath});
|
||||
|
||||
|
@ -1095,16 +1097,119 @@ StorePath LocalStore::addToStore(const string & name, const Path & _srcPath,
|
|||
{
|
||||
Path srcPath(absPath(_srcPath));
|
||||
|
||||
/* Read the whole path into memory. This is not a very scalable
|
||||
method for very large paths, but `copyPath' is mainly used for
|
||||
small files. */
|
||||
StringSink sink;
|
||||
if (method == FileIngestionMethod::Recursive)
|
||||
dumpPath(srcPath, sink, filter);
|
||||
else
|
||||
sink.s = make_ref<std::string>(readFile(srcPath));
|
||||
if (method != FileIngestionMethod::Recursive)
|
||||
return addToStoreFromDump(readFile(srcPath), name, method, hashAlgo, repair);
|
||||
|
||||
return addToStoreFromDump(*sink.s, name, method, hashAlgo, repair);
|
||||
/* For computing the NAR hash. */
|
||||
auto sha256Sink = std::make_unique<HashSink>(htSHA256);
|
||||
|
||||
/* For computing the store path. In recursive SHA-256 mode, this
|
||||
is the same as the NAR hash, so no need to do it again. */
|
||||
std::unique_ptr<HashSink> hashSink =
|
||||
hashAlgo == htSHA256
|
||||
? nullptr
|
||||
: std::make_unique<HashSink>(hashAlgo);
|
||||
|
||||
/* Read the source path into memory, but only if it's up to
|
||||
narBufferSize bytes. If it's larger, write it to a temporary
|
||||
location in the Nix store. If the subsequently computed
|
||||
destination store path is already valid, we just delete the
|
||||
temporary path. Otherwise, we move it to the destination store
|
||||
path. */
|
||||
bool inMemory = true;
|
||||
std::string nar;
|
||||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
|
||||
LambdaSink sink2([&](const unsigned char * buf, size_t len) {
|
||||
(*sha256Sink)(buf, len);
|
||||
if (hashSink) (*hashSink)(buf, len);
|
||||
|
||||
if (inMemory) {
|
||||
if (nar.size() + len > settings.narBufferSize) {
|
||||
inMemory = false;
|
||||
sink << 1;
|
||||
sink((const unsigned char *) nar.data(), nar.size());
|
||||
nar.clear();
|
||||
} else {
|
||||
nar.append((const char *) buf, len);
|
||||
}
|
||||
}
|
||||
|
||||
if (!inMemory) sink(buf, len);
|
||||
});
|
||||
|
||||
dumpPath(srcPath, sink2, filter);
|
||||
});
|
||||
|
||||
std::unique_ptr<AutoDelete> delTempDir;
|
||||
Path tempPath;
|
||||
|
||||
try {
|
||||
/* Wait for the source coroutine to give us some dummy
|
||||
data. This is so that we don't create the temporary
|
||||
directory if the NAR fits in memory. */
|
||||
readInt(*source);
|
||||
|
||||
auto tempDir = createTempDir(realStoreDir, "add");
|
||||
delTempDir = std::make_unique<AutoDelete>(tempDir);
|
||||
tempPath = tempDir + "/x";
|
||||
|
||||
restorePath(tempPath, *source);
|
||||
|
||||
} catch (EndOfFile &) {
|
||||
if (!inMemory) throw;
|
||||
/* The NAR fits in memory, so we didn't do restorePath(). */
|
||||
}
|
||||
|
||||
auto sha256 = sha256Sink->finish();
|
||||
|
||||
Hash hash = hashSink ? hashSink->finish().first : sha256.first;
|
||||
|
||||
auto dstPath = makeFixedOutputPath(method, hash, name);
|
||||
|
||||
addTempRoot(dstPath);
|
||||
|
||||
if (repair || !isValidPath(dstPath)) {
|
||||
|
||||
/* The first check above is an optimisation to prevent
|
||||
unnecessary lock acquisition. */
|
||||
|
||||
auto realPath = Store::toRealPath(dstPath);
|
||||
|
||||
PathLocks outputLock({realPath});
|
||||
|
||||
if (repair || !isValidPath(dstPath)) {
|
||||
|
||||
deletePath(realPath);
|
||||
|
||||
autoGC();
|
||||
|
||||
if (inMemory) {
|
||||
/* Restore from the NAR in memory. */
|
||||
StringSource source(nar);
|
||||
restorePath(realPath, source);
|
||||
} else {
|
||||
/* Move the temporary path we restored above. */
|
||||
if (rename(tempPath.c_str(), realPath.c_str()))
|
||||
throw Error("renaming '%s' to '%s'", tempPath, realPath);
|
||||
}
|
||||
|
||||
canonicalisePathMetaData(realPath, -1); // FIXME: merge into restorePath
|
||||
|
||||
optimisePath(realPath);
|
||||
|
||||
ValidPathInfo info(dstPath);
|
||||
info.narHash = sha256.first;
|
||||
info.narSize = sha256.second;
|
||||
info.ca = FixedOutputHash { .method = method, .hash = hash };
|
||||
registerValidPath(info);
|
||||
}
|
||||
|
||||
outputLock.setDeletion(true);
|
||||
}
|
||||
|
||||
return dstPath;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1118,8 +1223,7 @@ StorePath LocalStore::addTextToStore(const string & name, const string & s,
|
|||
|
||||
if (repair || !isValidPath(dstPath)) {
|
||||
|
||||
Path realPath = realStoreDir + "/";
|
||||
realPath += dstPath.to_string();
|
||||
auto realPath = Store::toRealPath(dstPath);
|
||||
|
||||
PathLocks outputLock({realPath});
|
||||
|
||||
|
|
|
@ -133,7 +133,7 @@ public:
|
|||
|
||||
StorePathSet queryValidDerivers(const StorePath & path) override;
|
||||
|
||||
StorePathSet queryDerivationOutputs(const StorePath & path) override;
|
||||
OutputPathMap queryDerivationOutputMap(const StorePath & path) override;
|
||||
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
|
||||
|
||||
|
@ -143,8 +143,7 @@ public:
|
|||
SubstitutablePathInfos & infos) override;
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs,
|
||||
std::shared_ptr<FSAccessor> accessor) override;
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
|
||||
StorePath addToStore(const string & name, const Path & srcPath,
|
||||
FileIngestionMethod method, HashType hashAlgo,
|
||||
|
|
|
@ -61,3 +61,6 @@ $(d)/build.cc:
|
|||
clean-files += $(d)/schema.sql.gen.hh
|
||||
|
||||
$(eval $(call install-file-in, $(d)/nix-store.pc, $(prefix)/lib/pkgconfig, 0644))
|
||||
|
||||
$(foreach i, $(wildcard src/libstore/builtins/*.hh), \
|
||||
$(eval $(call install-file-in, $(i), $(includedir)/nix/builtins, 0644)))
|
||||
|
|
|
@ -18,7 +18,7 @@ struct NarMember
|
|||
|
||||
/* If this is a regular file, position of the contents of this
|
||||
file in the NAR. */
|
||||
size_t start = 0, size = 0;
|
||||
uint64_t start = 0, size = 0;
|
||||
|
||||
std::string target;
|
||||
|
||||
|
@ -34,17 +34,19 @@ struct NarAccessor : public FSAccessor
|
|||
|
||||
NarMember root;
|
||||
|
||||
struct NarIndexer : ParseSink, StringSource
|
||||
struct NarIndexer : ParseSink, Source
|
||||
{
|
||||
NarAccessor & acc;
|
||||
Source & source;
|
||||
|
||||
std::stack<NarMember *> parents;
|
||||
|
||||
std::string currentStart;
|
||||
bool isExec = false;
|
||||
|
||||
NarIndexer(NarAccessor & acc, const std::string & nar)
|
||||
: StringSource(nar), acc(acc)
|
||||
uint64_t pos = 0;
|
||||
|
||||
NarIndexer(NarAccessor & acc, Source & source)
|
||||
: acc(acc), source(source)
|
||||
{ }
|
||||
|
||||
void createMember(const Path & path, NarMember member) {
|
||||
|
@ -79,31 +81,38 @@ struct NarAccessor : public FSAccessor
|
|||
|
||||
void preallocateContents(unsigned long long size) override
|
||||
{
|
||||
currentStart = string(s, pos, 16);
|
||||
assert(size <= std::numeric_limits<size_t>::max());
|
||||
parents.top()->size = (size_t)size;
|
||||
assert(size <= std::numeric_limits<uint64_t>::max());
|
||||
parents.top()->size = (uint64_t) size;
|
||||
parents.top()->start = pos;
|
||||
}
|
||||
|
||||
void receiveContents(unsigned char * data, unsigned int len) override
|
||||
{
|
||||
// Sanity check
|
||||
if (!currentStart.empty()) {
|
||||
assert(len < 16 || currentStart == string((char *) data, 16));
|
||||
currentStart.clear();
|
||||
}
|
||||
}
|
||||
{ }
|
||||
|
||||
void createSymlink(const Path & path, const string & target) override
|
||||
{
|
||||
createMember(path,
|
||||
NarMember{FSAccessor::Type::tSymlink, false, 0, 0, target});
|
||||
}
|
||||
|
||||
size_t read(unsigned char * data, size_t len) override
|
||||
{
|
||||
auto n = source.read(data, len);
|
||||
pos += n;
|
||||
return n;
|
||||
}
|
||||
};
|
||||
|
||||
NarAccessor(ref<const std::string> nar) : nar(nar)
|
||||
{
|
||||
NarIndexer indexer(*this, *nar);
|
||||
StringSource source(*nar);
|
||||
NarIndexer indexer(*this, source);
|
||||
parseDump(indexer, indexer);
|
||||
}
|
||||
|
||||
NarAccessor(Source & source)
|
||||
{
|
||||
NarIndexer indexer(*this, source);
|
||||
parseDump(indexer, indexer);
|
||||
}
|
||||
|
||||
|
@ -219,6 +228,11 @@ ref<FSAccessor> makeNarAccessor(ref<const std::string> nar)
|
|||
return make_ref<NarAccessor>(nar);
|
||||
}
|
||||
|
||||
ref<FSAccessor> makeNarAccessor(Source & source)
|
||||
{
|
||||
return make_ref<NarAccessor>(source);
|
||||
}
|
||||
|
||||
ref<FSAccessor> makeLazyNarAccessor(const std::string & listing,
|
||||
GetNarBytes getNarBytes)
|
||||
{
|
||||
|
|
|
@ -6,10 +6,14 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
struct Source;
|
||||
|
||||
/* Return an object that provides access to the contents of a NAR
|
||||
file. */
|
||||
ref<FSAccessor> makeNarAccessor(ref<const std::string> nar);
|
||||
|
||||
ref<FSAccessor> makeNarAccessor(Source & source);
|
||||
|
||||
/* Create a NAR accessor from a NAR listing (in the format produced by
|
||||
listNar()). The callback getNarBytes(offset, length) is used by the
|
||||
readFile() method of the accessor to get the contents of files
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
MakeError(BadStorePath, Error);
|
||||
|
||||
static void checkName(std::string_view path, std::string_view name)
|
||||
{
|
||||
if (name.empty())
|
||||
|
|
|
@ -62,6 +62,7 @@ public:
|
|||
|
||||
typedef std::set<StorePath> StorePathSet;
|
||||
typedef std::vector<StorePath> StorePaths;
|
||||
typedef std::map<string, StorePath> OutputPathMap;
|
||||
|
||||
/* Extension of derivations in the Nix store. */
|
||||
const std::string drvExtension = ".drv";
|
||||
|
|
|
@ -12,30 +12,24 @@
|
|||
namespace nix {
|
||||
|
||||
|
||||
static bool cmpGensByNumber(const Generation & a, const Generation & b)
|
||||
{
|
||||
return a.number < b.number;
|
||||
}
|
||||
|
||||
|
||||
/* Parse a generation name of the format
|
||||
`<profilename>-<number>-link'. */
|
||||
static int parseName(const string & profileName, const string & name)
|
||||
static std::optional<GenerationNumber> parseName(const string & profileName, const string & name)
|
||||
{
|
||||
if (string(name, 0, profileName.size() + 1) != profileName + "-") return -1;
|
||||
if (string(name, 0, profileName.size() + 1) != profileName + "-") return {};
|
||||
string s = string(name, profileName.size() + 1);
|
||||
string::size_type p = s.find("-link");
|
||||
if (p == string::npos) return -1;
|
||||
int n;
|
||||
if (p == string::npos) return {};
|
||||
unsigned int n;
|
||||
if (string2Int(string(s, 0, p), n) && n >= 0)
|
||||
return n;
|
||||
else
|
||||
return -1;
|
||||
return {};
|
||||
}
|
||||
|
||||
|
||||
|
||||
Generations findGenerations(Path profile, int & curGen)
|
||||
std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path profile)
|
||||
{
|
||||
Generations gens;
|
||||
|
||||
|
@ -43,30 +37,34 @@ Generations findGenerations(Path profile, int & curGen)
|
|||
auto profileName = std::string(baseNameOf(profile));
|
||||
|
||||
for (auto & i : readDirectory(profileDir)) {
|
||||
int n;
|
||||
if ((n = parseName(profileName, i.name)) != -1) {
|
||||
Generation gen;
|
||||
gen.path = profileDir + "/" + i.name;
|
||||
gen.number = n;
|
||||
if (auto n = parseName(profileName, i.name)) {
|
||||
auto path = profileDir + "/" + i.name;
|
||||
struct stat st;
|
||||
if (lstat(gen.path.c_str(), &st) != 0)
|
||||
throw SysError("statting '%1%'", gen.path);
|
||||
gen.creationTime = st.st_mtime;
|
||||
gens.push_back(gen);
|
||||
if (lstat(path.c_str(), &st) != 0)
|
||||
throw SysError("statting '%1%'", path);
|
||||
gens.push_back({
|
||||
.number = *n,
|
||||
.path = path,
|
||||
.creationTime = st.st_mtime
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
gens.sort(cmpGensByNumber);
|
||||
gens.sort([](const Generation & a, const Generation & b)
|
||||
{
|
||||
return a.number < b.number;
|
||||
});
|
||||
|
||||
curGen = pathExists(profile)
|
||||
return {
|
||||
gens,
|
||||
pathExists(profile)
|
||||
? parseName(profileName, readLink(profile))
|
||||
: -1;
|
||||
|
||||
return gens;
|
||||
: std::nullopt
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
static void makeName(const Path & profile, unsigned int num,
|
||||
static void makeName(const Path & profile, GenerationNumber num,
|
||||
Path & outLink)
|
||||
{
|
||||
Path prefix = (format("%1%-%2%") % profile % num).str();
|
||||
|
@ -78,10 +76,9 @@ Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath)
|
|||
{
|
||||
/* The new generation number should be higher than old the
|
||||
previous ones. */
|
||||
int dummy;
|
||||
Generations gens = findGenerations(profile, dummy);
|
||||
auto [gens, dummy] = findGenerations(profile);
|
||||
|
||||
unsigned int num;
|
||||
GenerationNumber num;
|
||||
if (gens.size() > 0) {
|
||||
Generation last = gens.back();
|
||||
|
||||
|
@ -121,7 +118,7 @@ static void removeFile(const Path & path)
|
|||
}
|
||||
|
||||
|
||||
void deleteGeneration(const Path & profile, unsigned int gen)
|
||||
void deleteGeneration(const Path & profile, GenerationNumber gen)
|
||||
{
|
||||
Path generation;
|
||||
makeName(profile, gen, generation);
|
||||
|
@ -129,7 +126,7 @@ void deleteGeneration(const Path & profile, unsigned int gen)
|
|||
}
|
||||
|
||||
|
||||
static void deleteGeneration2(const Path & profile, unsigned int gen, bool dryRun)
|
||||
static void deleteGeneration2(const Path & profile, GenerationNumber gen, bool dryRun)
|
||||
{
|
||||
if (dryRun)
|
||||
printInfo(format("would remove generation %1%") % gen);
|
||||
|
@ -140,31 +137,29 @@ static void deleteGeneration2(const Path & profile, unsigned int gen, bool dryRu
|
|||
}
|
||||
|
||||
|
||||
void deleteGenerations(const Path & profile, const std::set<unsigned int> & gensToDelete, bool dryRun)
|
||||
void deleteGenerations(const Path & profile, const std::set<GenerationNumber> & gensToDelete, bool dryRun)
|
||||
{
|
||||
PathLocks lock;
|
||||
lockProfile(lock, profile);
|
||||
|
||||
int curGen;
|
||||
Generations gens = findGenerations(profile, curGen);
|
||||
auto [gens, curGen] = findGenerations(profile);
|
||||
|
||||
if (gensToDelete.find(curGen) != gensToDelete.end())
|
||||
if (gensToDelete.count(*curGen))
|
||||
throw Error("cannot delete current generation of profile %1%'", profile);
|
||||
|
||||
for (auto & i : gens) {
|
||||
if (gensToDelete.find(i.number) == gensToDelete.end()) continue;
|
||||
if (!gensToDelete.count(i.number)) continue;
|
||||
deleteGeneration2(profile, i.number, dryRun);
|
||||
}
|
||||
}
|
||||
|
||||
void deleteGenerationsGreaterThan(const Path & profile, int max, bool dryRun)
|
||||
void deleteGenerationsGreaterThan(const Path & profile, GenerationNumber max, bool dryRun)
|
||||
{
|
||||
PathLocks lock;
|
||||
lockProfile(lock, profile);
|
||||
|
||||
int curGen;
|
||||
bool fromCurGen = false;
|
||||
Generations gens = findGenerations(profile, curGen);
|
||||
auto [gens, curGen] = findGenerations(profile);
|
||||
for (auto i = gens.rbegin(); i != gens.rend(); ++i) {
|
||||
if (i->number == curGen) {
|
||||
fromCurGen = true;
|
||||
|
@ -186,8 +181,7 @@ void deleteOldGenerations(const Path & profile, bool dryRun)
|
|||
PathLocks lock;
|
||||
lockProfile(lock, profile);
|
||||
|
||||
int curGen;
|
||||
Generations gens = findGenerations(profile, curGen);
|
||||
auto [gens, curGen] = findGenerations(profile);
|
||||
|
||||
for (auto & i : gens)
|
||||
if (i.number != curGen)
|
||||
|
@ -200,8 +194,7 @@ void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun)
|
|||
PathLocks lock;
|
||||
lockProfile(lock, profile);
|
||||
|
||||
int curGen;
|
||||
Generations gens = findGenerations(profile, curGen);
|
||||
auto [gens, curGen] = findGenerations(profile);
|
||||
|
||||
bool canDelete = false;
|
||||
for (auto i = gens.rbegin(); i != gens.rend(); ++i)
|
||||
|
|
|
@ -9,37 +9,32 @@
|
|||
namespace nix {
|
||||
|
||||
|
||||
typedef unsigned int GenerationNumber;
|
||||
|
||||
struct Generation
|
||||
{
|
||||
int number;
|
||||
GenerationNumber number;
|
||||
Path path;
|
||||
time_t creationTime;
|
||||
Generation()
|
||||
{
|
||||
number = -1;
|
||||
}
|
||||
operator bool() const
|
||||
{
|
||||
return number != -1;
|
||||
}
|
||||
};
|
||||
|
||||
typedef list<Generation> Generations;
|
||||
typedef std::list<Generation> Generations;
|
||||
|
||||
|
||||
/* Returns the list of currently present generations for the specified
|
||||
profile, sorted by generation number. */
|
||||
Generations findGenerations(Path profile, int & curGen);
|
||||
profile, sorted by generation number. Also returns the number of
|
||||
the current generation. */
|
||||
std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path profile);
|
||||
|
||||
class LocalFSStore;
|
||||
|
||||
Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath);
|
||||
|
||||
void deleteGeneration(const Path & profile, unsigned int gen);
|
||||
void deleteGeneration(const Path & profile, GenerationNumber gen);
|
||||
|
||||
void deleteGenerations(const Path & profile, const std::set<unsigned int> & gensToDelete, bool dryRun);
|
||||
void deleteGenerations(const Path & profile, const std::set<GenerationNumber> & gensToDelete, bool dryRun);
|
||||
|
||||
void deleteGenerationsGreaterThan(const Path & profile, const int max, bool dryRun);
|
||||
void deleteGenerationsGreaterThan(const Path & profile, GenerationNumber max, bool dryRun);
|
||||
|
||||
void deleteOldGenerations(const Path & profile, bool dryRun);
|
||||
|
||||
|
|
|
@ -16,26 +16,26 @@ RemoteFSAccessor::RemoteFSAccessor(ref<Store> store, const Path & cacheDir)
|
|||
createDirs(cacheDir);
|
||||
}
|
||||
|
||||
Path RemoteFSAccessor::makeCacheFile(const Path & storePath, const std::string & ext)
|
||||
Path RemoteFSAccessor::makeCacheFile(std::string_view hashPart, const std::string & ext)
|
||||
{
|
||||
assert(cacheDir != "");
|
||||
return fmt("%s/%s.%s", cacheDir, store->parseStorePath(storePath).hashPart(), ext);
|
||||
return fmt("%s/%s.%s", cacheDir, hashPart, ext);
|
||||
}
|
||||
|
||||
void RemoteFSAccessor::addToCache(const Path & storePath, const std::string & nar,
|
||||
void RemoteFSAccessor::addToCache(std::string_view hashPart, const std::string & nar,
|
||||
ref<FSAccessor> narAccessor)
|
||||
{
|
||||
nars.emplace(storePath, narAccessor);
|
||||
nars.emplace(hashPart, narAccessor);
|
||||
|
||||
if (cacheDir != "") {
|
||||
try {
|
||||
std::ostringstream str;
|
||||
JSONPlaceholder jsonRoot(str);
|
||||
listNar(jsonRoot, narAccessor, "", true);
|
||||
writeFile(makeCacheFile(storePath, "ls"), str.str());
|
||||
writeFile(makeCacheFile(hashPart, "ls"), str.str());
|
||||
|
||||
/* FIXME: do this asynchronously. */
|
||||
writeFile(makeCacheFile(storePath, "nar"), nar);
|
||||
writeFile(makeCacheFile(hashPart, "nar"), nar);
|
||||
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
|
@ -47,23 +47,22 @@ std::pair<ref<FSAccessor>, Path> RemoteFSAccessor::fetch(const Path & path_)
|
|||
{
|
||||
auto path = canonPath(path_);
|
||||
|
||||
auto storePath = store->toStorePath(path);
|
||||
std::string restPath = std::string(path, storePath.size());
|
||||
auto [storePath, restPath] = store->toStorePath(path);
|
||||
|
||||
if (!store->isValidPath(store->parseStorePath(storePath)))
|
||||
throw InvalidPath("path '%1%' is not a valid store path", storePath);
|
||||
if (!store->isValidPath(storePath))
|
||||
throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath));
|
||||
|
||||
auto i = nars.find(storePath);
|
||||
auto i = nars.find(std::string(storePath.hashPart()));
|
||||
if (i != nars.end()) return {i->second, restPath};
|
||||
|
||||
StringSink sink;
|
||||
std::string listing;
|
||||
Path cacheFile;
|
||||
|
||||
if (cacheDir != "" && pathExists(cacheFile = makeCacheFile(storePath, "nar"))) {
|
||||
if (cacheDir != "" && pathExists(cacheFile = makeCacheFile(storePath.hashPart(), "nar"))) {
|
||||
|
||||
try {
|
||||
listing = nix::readFile(makeCacheFile(storePath, "ls"));
|
||||
listing = nix::readFile(makeCacheFile(storePath.hashPart(), "ls"));
|
||||
|
||||
auto narAccessor = makeLazyNarAccessor(listing,
|
||||
[cacheFile](uint64_t offset, uint64_t length) {
|
||||
|
@ -81,7 +80,7 @@ std::pair<ref<FSAccessor>, Path> RemoteFSAccessor::fetch(const Path & path_)
|
|||
return buf;
|
||||
});
|
||||
|
||||
nars.emplace(storePath, narAccessor);
|
||||
nars.emplace(storePath.hashPart(), narAccessor);
|
||||
return {narAccessor, restPath};
|
||||
|
||||
} catch (SysError &) { }
|
||||
|
@ -90,15 +89,15 @@ std::pair<ref<FSAccessor>, Path> RemoteFSAccessor::fetch(const Path & path_)
|
|||
*sink.s = nix::readFile(cacheFile);
|
||||
|
||||
auto narAccessor = makeNarAccessor(sink.s);
|
||||
nars.emplace(storePath, narAccessor);
|
||||
nars.emplace(storePath.hashPart(), narAccessor);
|
||||
return {narAccessor, restPath};
|
||||
|
||||
} catch (SysError &) { }
|
||||
}
|
||||
|
||||
store->narFromPath(store->parseStorePath(storePath), sink);
|
||||
store->narFromPath(storePath, sink);
|
||||
auto narAccessor = makeNarAccessor(sink.s);
|
||||
addToCache(storePath, *sink.s, narAccessor);
|
||||
addToCache(storePath.hashPart(), *sink.s, narAccessor);
|
||||
return {narAccessor, restPath};
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ class RemoteFSAccessor : public FSAccessor
|
|||
{
|
||||
ref<Store> store;
|
||||
|
||||
std::map<Path, ref<FSAccessor>> nars;
|
||||
std::map<std::string, ref<FSAccessor>> nars;
|
||||
|
||||
Path cacheDir;
|
||||
|
||||
|
@ -18,9 +18,9 @@ class RemoteFSAccessor : public FSAccessor
|
|||
|
||||
friend class BinaryCacheStore;
|
||||
|
||||
Path makeCacheFile(const Path & storePath, const std::string & ext);
|
||||
Path makeCacheFile(std::string_view hashPart, const std::string & ext);
|
||||
|
||||
void addToCache(const Path & storePath, const std::string & nar,
|
||||
void addToCache(std::string_view hashPart, const std::string & nar,
|
||||
ref<FSAccessor> narAccessor);
|
||||
|
||||
public:
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include "derivations.hh"
|
||||
#include "pool.hh"
|
||||
#include "finally.hh"
|
||||
#include "logging.hh"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
|
@ -38,6 +39,29 @@ void writeStorePaths(const Store & store, Sink & out, const StorePathSet & paths
|
|||
out << store.printStorePath(i);
|
||||
}
|
||||
|
||||
std::map<string, StorePath> readOutputPathMap(const Store & store, Source & from)
|
||||
{
|
||||
std::map<string, StorePath> pathMap;
|
||||
auto rawInput = readStrings<Strings>(from);
|
||||
if (rawInput.size() % 2)
|
||||
throw Error("got an odd number of elements from the daemon when trying to read a output path map");
|
||||
auto curInput = rawInput.begin();
|
||||
while (curInput != rawInput.end()) {
|
||||
auto thisKey = *curInput++;
|
||||
auto thisValue = *curInput++;
|
||||
pathMap.emplace(thisKey, store.parseStorePath(thisValue));
|
||||
}
|
||||
return pathMap;
|
||||
}
|
||||
|
||||
void writeOutputPathMap(const Store & store, Sink & out, const std::map<string, StorePath> & pathMap)
|
||||
{
|
||||
out << 2*pathMap.size();
|
||||
for (auto & i : pathMap) {
|
||||
out << i.first;
|
||||
out << store.printStorePath(i.second);
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Separate these store impls into different files, give them better names */
|
||||
RemoteStore::RemoteStore(const Params & params)
|
||||
|
@ -197,7 +221,7 @@ void RemoteStore::setOptions(Connection & conn)
|
|||
overrides.erase(settings.maxSilentTime.name);
|
||||
overrides.erase(settings.buildCores.name);
|
||||
overrides.erase(settings.useSubstitutes.name);
|
||||
overrides.erase(settings.showTrace.name);
|
||||
overrides.erase(loggerSettings.showTrace.name);
|
||||
conn.to << overrides.size();
|
||||
for (auto & i : overrides)
|
||||
conn.to << i.first << i.second.value;
|
||||
|
@ -412,12 +436,24 @@ StorePathSet RemoteStore::queryValidDerivers(const StorePath & path)
|
|||
StorePathSet RemoteStore::queryDerivationOutputs(const StorePath & path)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 0x16) {
|
||||
return Store::queryDerivationOutputs(path);
|
||||
}
|
||||
conn->to << wopQueryDerivationOutputs << printStorePath(path);
|
||||
conn.processStderr();
|
||||
return readStorePaths<StorePathSet>(*this, conn->from);
|
||||
}
|
||||
|
||||
|
||||
OutputPathMap RemoteStore::queryDerivationOutputMap(const StorePath & path)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopQueryDerivationOutputMap << printStorePath(path);
|
||||
conn.processStderr();
|
||||
return readOutputPathMap(*this, conn->from);
|
||||
|
||||
}
|
||||
|
||||
std::optional<StorePath> RemoteStore::queryPathFromHashPart(const std::string & hashPart)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
|
@ -430,7 +466,7 @@ std::optional<StorePath> RemoteStore::queryPathFromHashPart(const std::string &
|
|||
|
||||
|
||||
void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor)
|
||||
RepairFlag repair, CheckSigsFlag checkSigs)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
|
||||
|
|
|
@ -51,6 +51,7 @@ public:
|
|||
|
||||
StorePathSet queryDerivationOutputs(const StorePath & path) override;
|
||||
|
||||
OutputPathMap queryDerivationOutputMap(const StorePath & path) override;
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
|
||||
|
||||
StorePathSet querySubstitutablePaths(const StorePathSet & paths) override;
|
||||
|
@ -59,8 +60,7 @@ public:
|
|||
SubstitutablePathInfos & infos) override;
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & nar,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs,
|
||||
std::shared_ptr<FSAccessor> accessor) override;
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
|
||||
StorePath addToStore(const string & name, const Path & srcPath,
|
||||
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256,
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#include "globals.hh"
|
||||
#include "compression.hh"
|
||||
#include "filetransfer.hh"
|
||||
#include "istringstream_nocopy.hh"
|
||||
|
||||
#include <aws/core/Aws.h>
|
||||
#include <aws/core/VersionConfig.h>
|
||||
|
@ -262,12 +261,11 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
|
|||
std::shared_ptr<TransferManager> transferManager;
|
||||
std::once_flag transferManagerCreated;
|
||||
|
||||
void uploadFile(const std::string & path, const std::string & data,
|
||||
void uploadFile(const std::string & path,
|
||||
std::shared_ptr<std::basic_iostream<char>> istream,
|
||||
const std::string & mimeType,
|
||||
const std::string & contentEncoding)
|
||||
{
|
||||
auto stream = std::make_shared<istringstream_nocopy>(data);
|
||||
|
||||
auto maxThreads = std::thread::hardware_concurrency();
|
||||
|
||||
static std::shared_ptr<Aws::Utils::Threading::PooledThreadExecutor>
|
||||
|
@ -307,7 +305,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
|
|||
|
||||
std::shared_ptr<TransferHandle> transferHandle =
|
||||
transferManager->UploadFile(
|
||||
stream, bucketName, path, mimeType,
|
||||
istream, bucketName, path, mimeType,
|
||||
Aws::Map<Aws::String, Aws::String>(),
|
||||
nullptr /*, contentEncoding */);
|
||||
|
||||
|
@ -333,9 +331,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
|
|||
if (contentEncoding != "")
|
||||
request.SetContentEncoding(contentEncoding);
|
||||
|
||||
auto stream = std::make_shared<istringstream_nocopy>(data);
|
||||
|
||||
request.SetBody(stream);
|
||||
request.SetBody(istream);
|
||||
|
||||
auto result = checkAws(fmt("AWS error uploading '%s'", path),
|
||||
s3Helper.client->PutObject(request));
|
||||
|
@ -347,25 +343,34 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
|
|||
std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1)
|
||||
.count();
|
||||
|
||||
printInfo(format("uploaded 's3://%1%/%2%' (%3% bytes) in %4% ms") %
|
||||
bucketName % path % data.size() % duration);
|
||||
auto size = istream->tellg();
|
||||
|
||||
printInfo("uploaded 's3://%s/%s' (%d bytes) in %d ms",
|
||||
bucketName, path, size, duration);
|
||||
|
||||
stats.putTimeMs += duration;
|
||||
stats.putBytes += data.size();
|
||||
stats.putBytes += size;
|
||||
stats.put++;
|
||||
}
|
||||
|
||||
void upsertFile(const std::string & path, const std::string & data,
|
||||
void upsertFile(const std::string & path,
|
||||
std::shared_ptr<std::basic_iostream<char>> istream,
|
||||
const std::string & mimeType) override
|
||||
{
|
||||
auto compress = [&](std::string compression)
|
||||
{
|
||||
auto compressed = nix::compress(compression, StreamToSourceAdapter(istream).drain());
|
||||
return std::make_shared<std::stringstream>(std::move(*compressed));
|
||||
};
|
||||
|
||||
if (narinfoCompression != "" && hasSuffix(path, ".narinfo"))
|
||||
uploadFile(path, *compress(narinfoCompression, data), mimeType, narinfoCompression);
|
||||
uploadFile(path, compress(narinfoCompression), mimeType, narinfoCompression);
|
||||
else if (lsCompression != "" && hasSuffix(path, ".ls"))
|
||||
uploadFile(path, *compress(lsCompression, data), mimeType, lsCompression);
|
||||
uploadFile(path, compress(lsCompression), mimeType, lsCompression);
|
||||
else if (logCompression != "" && hasPrefix(path, "log/"))
|
||||
uploadFile(path, *compress(logCompression, data), mimeType, logCompression);
|
||||
uploadFile(path, compress(logCompression), mimeType, logCompression);
|
||||
else
|
||||
uploadFile(path, data, mimeType, "");
|
||||
uploadFile(path, istream, mimeType, "");
|
||||
}
|
||||
|
||||
void getFile(const std::string & path, Sink & sink) override
|
||||
|
@ -410,7 +415,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
|
|||
for (auto object : contents) {
|
||||
auto & key = object.GetKey();
|
||||
if (key.size() != 40 || !hasSuffix(key, ".narinfo")) continue;
|
||||
paths.insert(parseStorePath(storeDir + "/" + key.substr(0, key.size() - 8) + "-unknown"));
|
||||
paths.insert(parseStorePath(storeDir + "/" + key.substr(0, key.size() - 8) + "-" + MissingName));
|
||||
}
|
||||
|
||||
marker = res.GetNextMarker();
|
||||
|
|
|
@ -61,6 +61,11 @@ void SQLite::exec(const std::string & stmt)
|
|||
});
|
||||
}
|
||||
|
||||
uint64_t SQLite::getLastInsertedRowId()
|
||||
{
|
||||
return sqlite3_last_insert_rowid(db);
|
||||
}
|
||||
|
||||
void SQLiteStmt::create(sqlite3 * db, const string & sql)
|
||||
{
|
||||
checkInterrupt();
|
||||
|
@ -95,10 +100,10 @@ SQLiteStmt::Use::~Use()
|
|||
sqlite3_reset(stmt);
|
||||
}
|
||||
|
||||
SQLiteStmt::Use & SQLiteStmt::Use::operator () (const std::string & value, bool notNull)
|
||||
SQLiteStmt::Use & SQLiteStmt::Use::operator () (std::string_view value, bool notNull)
|
||||
{
|
||||
if (notNull) {
|
||||
if (sqlite3_bind_text(stmt, curArg++, value.c_str(), -1, SQLITE_TRANSIENT) != SQLITE_OK)
|
||||
if (sqlite3_bind_text(stmt, curArg++, value.data(), -1, SQLITE_TRANSIENT) != SQLITE_OK)
|
||||
throwSQLiteError(stmt.db, "binding argument");
|
||||
} else
|
||||
bind();
|
||||
|
|
|
@ -26,6 +26,8 @@ struct SQLite
|
|||
void isCache();
|
||||
|
||||
void exec(const std::string & stmt);
|
||||
|
||||
uint64_t getLastInsertedRowId();
|
||||
};
|
||||
|
||||
/* RAII wrapper to create and destroy SQLite prepared statements. */
|
||||
|
@ -54,7 +56,7 @@ struct SQLiteStmt
|
|||
~Use();
|
||||
|
||||
/* Bind the next parameter. */
|
||||
Use & operator () (const std::string & value, bool notNull = true);
|
||||
Use & operator () (std::string_view value, bool notNull = true);
|
||||
Use & operator () (const unsigned char * data, size_t len, bool notNull = true);
|
||||
Use & operator () (int64_t value, bool notNull = true);
|
||||
Use & bind(); // null
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#include "json.hh"
|
||||
#include "derivations.hh"
|
||||
#include "url.hh"
|
||||
#include "archive.hh"
|
||||
|
||||
#include <future>
|
||||
|
||||
|
@ -20,15 +21,15 @@ bool Store::isInStore(const Path & path) const
|
|||
}
|
||||
|
||||
|
||||
Path Store::toStorePath(const Path & path) const
|
||||
std::pair<StorePath, Path> Store::toStorePath(const Path & path) const
|
||||
{
|
||||
if (!isInStore(path))
|
||||
throw Error("path '%1%' is not in the Nix store", path);
|
||||
Path::size_type slash = path.find('/', storeDir.size() + 1);
|
||||
if (slash == Path::npos)
|
||||
return path;
|
||||
return {parseStorePath(path), ""};
|
||||
else
|
||||
return Path(path, 0, slash);
|
||||
return {parseStorePath(std::string_view(path).substr(0, slash)), path.substr(slash)};
|
||||
}
|
||||
|
||||
|
||||
|
@ -41,14 +42,14 @@ Path Store::followLinksToStore(std::string_view _path) const
|
|||
path = absPath(target, dirOf(path));
|
||||
}
|
||||
if (!isInStore(path))
|
||||
throw NotInStore("path '%1%' is not in the Nix store", path);
|
||||
throw BadStorePath("path '%1%' is not in the Nix store", path);
|
||||
return path;
|
||||
}
|
||||
|
||||
|
||||
StorePath Store::followLinksToStorePath(std::string_view path) const
|
||||
{
|
||||
return parseStorePath(toStorePath(followLinksToStore(path)));
|
||||
return toStorePath(followLinksToStore(path)).first;
|
||||
}
|
||||
|
||||
|
||||
|
@ -221,6 +222,40 @@ StorePath Store::computeStorePathForText(const string & name, const string & s,
|
|||
}
|
||||
|
||||
|
||||
ValidPathInfo Store::addToStoreSlow(std::string_view name, const Path & srcPath,
|
||||
FileIngestionMethod method, HashType hashAlgo,
|
||||
std::optional<Hash> expectedCAHash)
|
||||
{
|
||||
/* FIXME: inefficient: we're reading/hashing 'tmpFile' three
|
||||
times. */
|
||||
|
||||
auto [narHash, narSize] = hashPath(htSHA256, srcPath);
|
||||
|
||||
auto hash = method == FileIngestionMethod::Recursive
|
||||
? hashAlgo == htSHA256
|
||||
? narHash
|
||||
: hashPath(hashAlgo, srcPath).first
|
||||
: hashFile(hashAlgo, srcPath);
|
||||
|
||||
if (expectedCAHash && expectedCAHash != hash)
|
||||
throw Error("hash mismatch for '%s'", srcPath);
|
||||
|
||||
ValidPathInfo info(makeFixedOutputPath(method, hash, name));
|
||||
info.narHash = narHash;
|
||||
info.narSize = narSize;
|
||||
info.ca = FixedOutputHash { .method = method, .hash = hash };
|
||||
|
||||
if (!isValidPath(info.path)) {
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
dumpPath(srcPath, sink);
|
||||
});
|
||||
addToStore(info, *source);
|
||||
}
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
|
||||
Store::Store(const Params & params)
|
||||
: Config(params)
|
||||
, state({(size_t) pathInfoCacheSize})
|
||||
|
@ -242,6 +277,16 @@ bool Store::PathInfoCacheValue::isKnownNow()
|
|||
return std::chrono::steady_clock::now() < time_point + ttl;
|
||||
}
|
||||
|
||||
StorePathSet Store::queryDerivationOutputs(const StorePath & path)
|
||||
{
|
||||
auto outputMap = this->queryDerivationOutputMap(path);
|
||||
StorePathSet outputPaths;
|
||||
for (auto & i: outputMap) {
|
||||
outputPaths.emplace(std::move(i.second));
|
||||
}
|
||||
return outputPaths;
|
||||
}
|
||||
|
||||
bool Store::isValidPath(const StorePath & storePath)
|
||||
{
|
||||
std::string hashPart(storePath.hashPart());
|
||||
|
@ -306,6 +351,14 @@ ref<const ValidPathInfo> Store::queryPathInfo(const StorePath & storePath)
|
|||
}
|
||||
|
||||
|
||||
static bool goodStorePath(const StorePath & expected, const StorePath & actual)
|
||||
{
|
||||
return
|
||||
expected.hashPart() == actual.hashPart()
|
||||
&& (expected.name() == Store::MissingName || expected.name() == actual.name());
|
||||
}
|
||||
|
||||
|
||||
void Store::queryPathInfo(const StorePath & storePath,
|
||||
Callback<ref<const ValidPathInfo>> callback) noexcept
|
||||
{
|
||||
|
@ -333,7 +386,7 @@ void Store::queryPathInfo(const StorePath & storePath,
|
|||
state_->pathInfoCache.upsert(hashPart,
|
||||
res.first == NarInfoDiskCache::oInvalid ? PathInfoCacheValue{} : PathInfoCacheValue{ .value = res.second });
|
||||
if (res.first == NarInfoDiskCache::oInvalid ||
|
||||
res.second->path != storePath)
|
||||
!goodStorePath(storePath, res.second->path))
|
||||
throw InvalidPath("path '%s' is not valid", printStorePath(storePath));
|
||||
}
|
||||
return callback(ref<const ValidPathInfo>(res.second));
|
||||
|
@ -345,7 +398,7 @@ void Store::queryPathInfo(const StorePath & storePath,
|
|||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
queryPathInfoUncached(storePath,
|
||||
{[this, storePath{printStorePath(storePath)}, hashPart, callbackPtr](std::future<std::shared_ptr<const ValidPathInfo>> fut) {
|
||||
{[this, storePathS{printStorePath(storePath)}, hashPart, callbackPtr](std::future<std::shared_ptr<const ValidPathInfo>> fut) {
|
||||
|
||||
try {
|
||||
auto info = fut.get();
|
||||
|
@ -358,9 +411,11 @@ void Store::queryPathInfo(const StorePath & storePath,
|
|||
state_->pathInfoCache.upsert(hashPart, PathInfoCacheValue { .value = info });
|
||||
}
|
||||
|
||||
if (!info || info->path != parseStorePath(storePath)) {
|
||||
auto storePath = parseStorePath(storePathS);
|
||||
|
||||
if (!info || !goodStorePath(storePath, info->path)) {
|
||||
stats.narInfoMissing++;
|
||||
throw InvalidPath("path '%s' is not valid", storePath);
|
||||
throw InvalidPath("path '%s' is not valid", storePathS);
|
||||
}
|
||||
|
||||
(*callbackPtr)(ref<const ValidPathInfo>(info));
|
||||
|
@ -505,7 +560,7 @@ void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & store
|
|||
if (!narInfo->url.empty())
|
||||
jsonPath.attr("url", narInfo->url);
|
||||
if (narInfo->fileHash)
|
||||
jsonPath.attr("downloadHash", narInfo->fileHash.to_string(Base32, true));
|
||||
jsonPath.attr("downloadHash", narInfo->fileHash.to_string(hashBase, true));
|
||||
if (narInfo->fileSize)
|
||||
jsonPath.attr("downloadSize", narInfo->fileSize);
|
||||
if (showClosureSize)
|
||||
|
|
|
@ -31,7 +31,7 @@ MakeError(InvalidPath, Error);
|
|||
MakeError(Unsupported, Error);
|
||||
MakeError(SubstituteGone, Error);
|
||||
MakeError(SubstituterDisabled, Error);
|
||||
MakeError(NotInStore, Error);
|
||||
MakeError(BadStorePath, Error);
|
||||
|
||||
|
||||
class FSAccessor;
|
||||
|
@ -317,9 +317,9 @@ public:
|
|||
the Nix store. */
|
||||
bool isStorePath(std::string_view path) const;
|
||||
|
||||
/* Chop off the parts after the top-level store name, e.g.,
|
||||
/nix/store/abcd-foo/bar => /nix/store/abcd-foo. */
|
||||
Path toStorePath(const Path & path) const;
|
||||
/* Split a path like /nix/store/<hash>-<name>/<bla> into
|
||||
/nix/store/<hash>-<name> and /<bla>. */
|
||||
std::pair<StorePath, Path> toStorePath(const Path & path) const;
|
||||
|
||||
/* Follow symlinks until we end up with a path in the Nix store. */
|
||||
Path followLinksToStore(std::string_view path) const;
|
||||
|
@ -384,13 +384,16 @@ public:
|
|||
SubstituteFlag maybeSubstitute = NoSubstitute);
|
||||
|
||||
/* Query the set of all valid paths. Note that for some store
|
||||
backends, the name part of store paths may be omitted
|
||||
(i.e. you'll get /nix/store/<hash> rather than
|
||||
backends, the name part of store paths may be replaced by 'x'
|
||||
(i.e. you'll get /nix/store/<hash>-x rather than
|
||||
/nix/store/<hash>-<name>). Use queryPathInfo() to obtain the
|
||||
full store path. */
|
||||
full store path. FIXME: should return a set of
|
||||
std::variant<StorePath, HashPart> to get rid of this hack. */
|
||||
virtual StorePathSet queryAllValidPaths()
|
||||
{ unsupported("queryAllValidPaths"); }
|
||||
|
||||
constexpr static const char * MissingName = "x";
|
||||
|
||||
/* Query information about a valid path. It is permitted to omit
|
||||
the name part of the store path. */
|
||||
ref<const ValidPathInfo> queryPathInfo(const StorePath & path);
|
||||
|
@ -418,8 +421,11 @@ public:
|
|||
virtual StorePathSet queryValidDerivers(const StorePath & path) { return {}; };
|
||||
|
||||
/* Query the outputs of the derivation denoted by `path'. */
|
||||
virtual StorePathSet queryDerivationOutputs(const StorePath & path)
|
||||
{ unsupported("queryDerivationOutputs"); }
|
||||
virtual StorePathSet queryDerivationOutputs(const StorePath & path);
|
||||
|
||||
/* Query the mapping outputName=>outputPath for the given derivation */
|
||||
virtual OutputPathMap queryDerivationOutputMap(const StorePath & path)
|
||||
{ unsupported("queryDerivationOutputMap"); }
|
||||
|
||||
/* Query the full store path given the hash part of a valid store
|
||||
path, or empty if the path doesn't exist. */
|
||||
|
@ -436,8 +442,7 @@ public:
|
|||
|
||||
/* Import a path into the store. */
|
||||
virtual void addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs,
|
||||
std::shared_ptr<FSAccessor> accessor = 0) = 0;
|
||||
RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs) = 0;
|
||||
|
||||
/* Copy the contents of a path to the store and register the
|
||||
validity the resulting path. The resulting path is returned.
|
||||
|
@ -447,6 +452,13 @@ public:
|
|||
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256,
|
||||
PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair) = 0;
|
||||
|
||||
/* Copy the contents of a path to the store and register the
|
||||
validity the resulting path, using a constant amount of
|
||||
memory. */
|
||||
ValidPathInfo addToStoreSlow(std::string_view name, const Path & srcPath,
|
||||
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256,
|
||||
std::optional<Hash> expectedCAHash = {});
|
||||
|
||||
// FIXME: remove?
|
||||
virtual StorePath addToStoreFromDump(const string & dump, const string & name,
|
||||
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair)
|
||||
|
@ -613,8 +625,7 @@ public:
|
|||
the Nix store. Optionally, the contents of the NARs are
|
||||
preloaded into the specified FS accessor to speed up subsequent
|
||||
access. */
|
||||
StorePaths importPaths(Source & source, std::shared_ptr<FSAccessor> accessor,
|
||||
CheckSigsFlag checkSigs = CheckSigs);
|
||||
StorePaths importPaths(Source & source, CheckSigsFlag checkSigs = CheckSigs);
|
||||
|
||||
struct Stats
|
||||
{
|
||||
|
|
|
@ -6,7 +6,7 @@ namespace nix {
|
|||
#define WORKER_MAGIC_1 0x6e697863
|
||||
#define WORKER_MAGIC_2 0x6478696f
|
||||
|
||||
#define PROTOCOL_VERSION 0x115
|
||||
#define PROTOCOL_VERSION 0x116
|
||||
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
|
||||
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
|
||||
|
||||
|
@ -30,7 +30,7 @@ typedef enum {
|
|||
wopSetOptions = 19,
|
||||
wopCollectGarbage = 20,
|
||||
wopQuerySubstitutablePathInfo = 21,
|
||||
wopQueryDerivationOutputs = 22,
|
||||
wopQueryDerivationOutputs = 22, // obsolete
|
||||
wopQueryAllValidPaths = 23,
|
||||
wopQueryFailedPaths = 24,
|
||||
wopClearFailedPaths = 25,
|
||||
|
@ -49,6 +49,7 @@ typedef enum {
|
|||
wopNarFromPath = 38,
|
||||
wopAddToStoreNar = 39,
|
||||
wopQueryMissing = 40,
|
||||
wopQueryDerivationOutputMap = 41,
|
||||
} WorkerOp;
|
||||
|
||||
|
||||
|
@ -69,5 +70,6 @@ template<class T> T readStorePaths(const Store & store, Source & from);
|
|||
|
||||
void writeStorePaths(const Store & store, Sink & out, const StorePathSet & paths);
|
||||
|
||||
void writeOutputPathMap(const Store & store, Sink & out, const OutputPathMap & paths);
|
||||
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue