1
0
Fork 0
mirror of https://github.com/NixOS/nix synced 2025-06-26 11:41:15 +02:00

Fix 'error 9 while decompressing xz file'

Once we've started writing data to a Sink, we can't restart a download
request, because then we end up writing duplicate data to the
Sink. Therefore we shouldn't handle retries in Downloader but at a
higher level (in particular, in copyStorePath()).

Fixes #2952.

(cherry picked from commit a67cf5a358)
This commit is contained in:
Eelco Dolstra 2019-06-24 21:48:52 +02:00
parent 2fef4dd296
commit 78fa47a7f0
No known key found for this signature in database
GPG key ID: 8170B4726D7198DE
7 changed files with 156 additions and 119 deletions

View file

@ -6,10 +6,11 @@
#include "thread-pool.hh"
#include "json.hh"
#include "derivations.hh"
#include "retry.hh"
#include "download.hh"
#include <future>
namespace nix {
@ -572,54 +573,57 @@ void Store::buildPaths(const PathSet & paths, BuildMode buildMode)
void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
const Path & storePath, RepairFlag repair, CheckSigsFlag checkSigs)
{
auto srcUri = srcStore->getUri();
auto dstUri = dstStore->getUri();
retry<void>(downloadSettings.tries, [&]() {
Activity act(*logger, lvlInfo, actCopyPath,
srcUri == "local" || srcUri == "daemon"
? fmt("copying path '%s' to '%s'", storePath, dstUri)
: dstUri == "local" || dstUri == "daemon"
? fmt("copying path '%s' from '%s'", storePath, srcUri)
: fmt("copying path '%s' from '%s' to '%s'", storePath, srcUri, dstUri),
{storePath, srcUri, dstUri});
PushActivity pact(act.id);
auto srcUri = srcStore->getUri();
auto dstUri = dstStore->getUri();
auto info = srcStore->queryPathInfo(storePath);
Activity act(*logger, lvlInfo, actCopyPath,
srcUri == "local" || srcUri == "daemon"
? fmt("copying path '%s' to '%s'", storePath, dstUri)
: dstUri == "local" || dstUri == "daemon"
? fmt("copying path '%s' from '%s'", storePath, srcUri)
: fmt("copying path '%s' from '%s' to '%s'", storePath, srcUri, dstUri),
{storePath, srcUri, dstUri});
PushActivity pact(act.id);
uint64_t total = 0;
auto info = srcStore->queryPathInfo(storePath);
if (!info->narHash) {
StringSink sink;
srcStore->narFromPath({storePath}, sink);
auto info2 = make_ref<ValidPathInfo>(*info);
info2->narHash = hashString(htSHA256, *sink.s);
if (!info->narSize) info2->narSize = sink.s->size();
if (info->ultimate) info2->ultimate = false;
info = info2;
uint64_t total = 0;
StringSource source(*sink.s);
dstStore->addToStore(*info, source, repair, checkSigs);
return;
}
if (!info->narHash) {
StringSink sink;
srcStore->narFromPath({storePath}, sink);
auto info2 = make_ref<ValidPathInfo>(*info);
info2->narHash = hashString(htSHA256, *sink.s);
if (!info->narSize) info2->narSize = sink.s->size();
if (info->ultimate) info2->ultimate = false;
info = info2;
if (info->ultimate) {
auto info2 = make_ref<ValidPathInfo>(*info);
info2->ultimate = false;
info = info2;
}
StringSource source(*sink.s);
dstStore->addToStore(*info, source, repair, checkSigs);
return;
}
auto source = sinkToSource([&](Sink & sink) {
LambdaSink wrapperSink([&](const unsigned char * data, size_t len) {
sink(data, len);
total += len;
act.progress(total, info->narSize);
if (info->ultimate) {
auto info2 = make_ref<ValidPathInfo>(*info);
info2->ultimate = false;
info = info2;
}
auto source = sinkToSource([&](Sink & sink) {
LambdaSink wrapperSink([&](const unsigned char * data, size_t len) {
sink(data, len);
total += len;
act.progress(total, info->narSize);
});
srcStore->narFromPath({storePath}, wrapperSink);
}, [&]() {
throw EndOfFile("NAR for '%s' fetched from '%s' is incomplete", storePath, srcStore->getUri());
});
srcStore->narFromPath({storePath}, wrapperSink);
}, [&]() {
throw EndOfFile("NAR for '%s' fetched from '%s' is incomplete", storePath, srcStore->getUri());
});
dstStore->addToStore(*info, *source, repair, checkSigs);
dstStore->addToStore(*info, *source, repair, checkSigs);
});
}