mirror of
https://github.com/NixOS/nix
synced 2025-07-07 01:51:47 +02:00
Tagging release 2.24.9
-----BEGIN PGP SIGNATURE----- iQFHBAABCAAxFiEEtUHVUwEnDgvPFcpdgXC0cm1xmN4FAmb3K78THGVkb2xzdHJh QGdtYWlsLmNvbQAKCRCBcLRybXGY3t5xB/4mKlFd8hka45CuQrGN6lJrIs76kvn5 mXDLWpHTOyipUZN1ZKACUPlKD/8cP8sHwd3/fILlwKAOurgWCd/+QwAPltT01r/w T02E4haXGLmWwdZ+uPcm/lBdZVq8IZ1oU/9+EFKsbaYpa4O4kZPHe3joPr4ebVlO zXndiR5FDSSEg05qAXr62KndgydTf/xtjEEv6jONzMaO1MCK6OAHIKCZg2ybsV/S 5ayfUESRFwGg4/BbzSEkEO0wl8mgwo6PbD0BI83FSC9W1gaR2ImadjA9GPKBkS1o 8Rj/KrP55JZkQExEQWquptEMlKoDdruQUelXXBBeqnXErG2bORV+Z7xG =SUre -----END PGP SIGNATURE----- Merge tag '2.24.9' into sync-2.24.9 Tagging release 2.24.9
This commit is contained in:
commit
d8d38c4257
58 changed files with 605 additions and 415 deletions
|
@ -170,7 +170,9 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s, const Path * bas
|
|||
{
|
||||
if (EvalSettings::isPseudoUrl(s)) {
|
||||
auto accessor = fetchers::downloadTarball(
|
||||
EvalSettings::resolvePseudoUrl(s)).accessor;
|
||||
state.store,
|
||||
state.fetchSettings,
|
||||
EvalSettings::resolvePseudoUrl(s));
|
||||
auto storePath = fetchToStore(*state.store, SourcePath(accessor), FetchMode::Copy);
|
||||
return state.rootPath(CanonPath(state.store->toRealPath(storePath)));
|
||||
}
|
||||
|
|
|
@ -32,122 +32,6 @@ static void * oomHandler(size_t requested)
|
|||
throw std::bad_alloc();
|
||||
}
|
||||
|
||||
class BoehmGCStackAllocator : public StackAllocator
|
||||
{
|
||||
boost::coroutines2::protected_fixedsize_stack stack{
|
||||
// We allocate 8 MB, the default max stack size on NixOS.
|
||||
// A smaller stack might be quicker to allocate but reduces the stack
|
||||
// depth available for source filter expressions etc.
|
||||
std::max(boost::context::stack_traits::default_size(), static_cast<std::size_t>(8 * 1024 * 1024))};
|
||||
|
||||
// This is specific to boost::coroutines2::protected_fixedsize_stack.
|
||||
// The stack protection page is included in sctx.size, so we have to
|
||||
// subtract one page size from the stack size.
|
||||
std::size_t pfss_usable_stack_size(boost::context::stack_context & sctx)
|
||||
{
|
||||
return sctx.size - boost::context::stack_traits::page_size();
|
||||
}
|
||||
|
||||
public:
|
||||
boost::context::stack_context allocate() override
|
||||
{
|
||||
auto sctx = stack.allocate();
|
||||
|
||||
// Stacks generally start at a high address and grow to lower addresses.
|
||||
// Architectures that do the opposite are rare; in fact so rare that
|
||||
// boost_routine does not implement it.
|
||||
// So we subtract the stack size.
|
||||
GC_add_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
|
||||
return sctx;
|
||||
}
|
||||
|
||||
void deallocate(boost::context::stack_context sctx) override
|
||||
{
|
||||
GC_remove_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
|
||||
stack.deallocate(sctx);
|
||||
}
|
||||
};
|
||||
|
||||
static BoehmGCStackAllocator boehmGCStackAllocator;
|
||||
|
||||
/**
|
||||
* When a thread goes into a coroutine, we lose its original sp until
|
||||
* control flow returns to the thread.
|
||||
* While in the coroutine, the sp points outside the thread stack,
|
||||
* so we can detect this and push the entire thread stack instead,
|
||||
* as an approximation.
|
||||
* The coroutine's stack is covered by `BoehmGCStackAllocator`.
|
||||
* This is not an optimal solution, because the garbage is scanned when a
|
||||
* coroutine is active, for both the coroutine and the original thread stack.
|
||||
* However, the implementation is quite lean, and usually we don't have active
|
||||
* coroutines during evaluation, so this is acceptable.
|
||||
*/
|
||||
void fixupBoehmStackPointer(void ** sp_ptr, void * _pthread_id)
|
||||
{
|
||||
void *& sp = *sp_ptr;
|
||||
auto pthread_id = reinterpret_cast<pthread_t>(_pthread_id);
|
||||
# ifndef __APPLE__
|
||||
pthread_attr_t pattr;
|
||||
# endif
|
||||
size_t osStackSize;
|
||||
// The low address of the stack, which grows down.
|
||||
void * osStackLimit;
|
||||
void * osStackBase;
|
||||
|
||||
# ifdef __APPLE__
|
||||
osStackSize = pthread_get_stacksize_np(pthread_id);
|
||||
osStackLimit = pthread_get_stackaddr_np(pthread_id);
|
||||
# else
|
||||
if (pthread_attr_init(&pattr)) {
|
||||
throw Error("fixupBoehmStackPointer: pthread_attr_init failed");
|
||||
}
|
||||
# ifdef HAVE_PTHREAD_GETATTR_NP
|
||||
if (pthread_getattr_np(pthread_id, &pattr)) {
|
||||
throw Error("fixupBoehmStackPointer: pthread_getattr_np failed");
|
||||
}
|
||||
# elif HAVE_PTHREAD_ATTR_GET_NP
|
||||
if (!pthread_attr_init(&pattr)) {
|
||||
throw Error("fixupBoehmStackPointer: pthread_attr_init failed");
|
||||
}
|
||||
if (!pthread_attr_get_np(pthread_id, &pattr)) {
|
||||
throw Error("fixupBoehmStackPointer: pthread_attr_get_np failed");
|
||||
}
|
||||
# else
|
||||
# error "Need one of `pthread_attr_get_np` or `pthread_getattr_np`"
|
||||
# endif
|
||||
if (pthread_attr_getstack(&pattr, &osStackLimit, &osStackSize)) {
|
||||
throw Error("fixupBoehmStackPointer: pthread_attr_getstack failed");
|
||||
}
|
||||
if (pthread_attr_destroy(&pattr)) {
|
||||
throw Error("fixupBoehmStackPointer: pthread_attr_destroy failed");
|
||||
}
|
||||
# endif
|
||||
osStackBase = (char *) osStackLimit + osStackSize;
|
||||
// NOTE: We assume the stack grows down, as it does on all architectures we support.
|
||||
// Architectures that grow the stack up are rare.
|
||||
if (sp >= osStackBase || sp < osStackLimit) { // sp is outside the os stack
|
||||
sp = osStackLimit;
|
||||
}
|
||||
}
|
||||
|
||||
/* Disable GC while this object lives. Used by CoroutineContext.
|
||||
*
|
||||
* Boehm keeps a count of GC_disable() and GC_enable() calls,
|
||||
* and only enables GC when the count matches.
|
||||
*/
|
||||
class BoehmDisableGC
|
||||
{
|
||||
public:
|
||||
BoehmDisableGC()
|
||||
{
|
||||
GC_disable();
|
||||
};
|
||||
~BoehmDisableGC()
|
||||
{
|
||||
GC_enable();
|
||||
};
|
||||
};
|
||||
|
||||
static inline void initGCReal()
|
||||
{
|
||||
/* Initialise the Boehm garbage collector. */
|
||||
|
@ -168,24 +52,6 @@ static inline void initGCReal()
|
|||
|
||||
GC_set_oom_fn(oomHandler);
|
||||
|
||||
StackAllocator::defaultAllocator = &boehmGCStackAllocator;
|
||||
|
||||
// TODO: Remove __APPLE__ condition.
|
||||
// Comment suggests an implementation that works on darwin and windows
|
||||
// https://github.com/ivmai/bdwgc/issues/362#issuecomment-1936672196
|
||||
# if GC_VERSION_MAJOR >= 8 && GC_VERSION_MINOR >= 2 && GC_VERSION_MICRO >= 4 && !defined(__APPLE__)
|
||||
GC_set_sp_corrector(&fixupBoehmStackPointer);
|
||||
|
||||
if (!GC_get_sp_corrector()) {
|
||||
printTalkative("BoehmGC on this platform does not support sp_corrector; will disable GC inside coroutines");
|
||||
/* Used to disable GC when entering coroutines on macOS */
|
||||
create_coro_gc_hook = []() -> std::shared_ptr<void> { return std::make_shared<BoehmDisableGC>(); };
|
||||
}
|
||||
# else
|
||||
# warning \
|
||||
"BoehmGC version does not support GC while coroutine exists. GC will be disabled inside coroutines. Consider updating bdw-gc to 8.2.4 or later."
|
||||
# endif
|
||||
|
||||
/* Set the initial heap size to something fairly big (25% of
|
||||
physical RAM, up to a maximum of 384 MiB) so that in most cases
|
||||
we don't need to garbage collect at all. (Collection has a
|
||||
|
|
|
@ -3083,7 +3083,9 @@ std::optional<std::string> EvalState::resolveLookupPathPath(const LookupPath::Pa
|
|||
if (EvalSettings::isPseudoUrl(value)) {
|
||||
try {
|
||||
auto accessor = fetchers::downloadTarball(
|
||||
EvalSettings::resolvePseudoUrl(value)).accessor;
|
||||
store,
|
||||
fetchSettings,
|
||||
EvalSettings::resolvePseudoUrl(value));
|
||||
auto storePath = fetchToStore(*store, SourcePath(accessor), FetchMode::Copy);
|
||||
return finish(store->toRealPath(storePath));
|
||||
} catch (Error & e) {
|
||||
|
|
|
@ -3136,7 +3136,11 @@ static void prim_zipAttrsWith(EvalState & state, const PosIdx pos, Value * * arg
|
|||
std::optional<ListBuilder> list;
|
||||
};
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
std::map<Symbol, Item, std::less<Symbol>, traceable_allocator<std::pair<const Symbol, Item>>> attrsSeen;
|
||||
#else
|
||||
std::map<Symbol, Item> attrsSeen;
|
||||
#endif
|
||||
|
||||
state.forceFunction(*args[0], pos, "while evaluating the first argument passed to builtins.zipAttrsWith");
|
||||
state.forceList(*args[1], pos, "while evaluating the second argument passed to builtins.zipAttrsWith");
|
||||
|
|
|
@ -495,7 +495,11 @@ static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v
|
|||
// https://github.com/NixOS/nix/issues/4313
|
||||
auto storePath =
|
||||
unpack
|
||||
? fetchToStore(*state.store, fetchers::downloadTarball(*url).accessor, FetchMode::Copy, name)
|
||||
? fetchToStore(
|
||||
*state.store,
|
||||
fetchers::downloadTarball(state.store, state.fetchSettings, *url),
|
||||
FetchMode::Copy,
|
||||
name)
|
||||
: fetchers::downloadFile(state.store, *url, name).storePath;
|
||||
|
||||
if (expectedHash) {
|
||||
|
|
|
@ -460,7 +460,13 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this<GitRepoImpl>
|
|||
std::string re = R"(Good "git" signature for \* with .* key SHA256:[)";
|
||||
for (const fetchers::PublicKey & k : publicKeys){
|
||||
// Calculate sha256 fingerprint from public key and escape the regex symbol '+' to match the key literally
|
||||
auto fingerprint = trim(hashString(HashAlgorithm::SHA256, base64Decode(k.key)).to_string(nix::HashFormat::Base64, false), "=");
|
||||
std::string keyDecoded;
|
||||
try {
|
||||
keyDecoded = base64Decode(k.key);
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, "while decoding public key '%s' used for git signature", k.key);
|
||||
}
|
||||
auto fingerprint = trim(hashString(HashAlgorithm::SHA256, keyDecoded).to_string(nix::HashFormat::Base64, false), "=");
|
||||
auto escaped_fingerprint = std::regex_replace(fingerprint, std::regex("\\+"), "\\+" );
|
||||
re += "(" + escaped_fingerprint + ")";
|
||||
}
|
||||
|
@ -601,12 +607,16 @@ struct GitSourceAccessor : SourceAccessor
|
|||
return readBlob(path, true);
|
||||
}
|
||||
|
||||
Hash getSubmoduleRev(const CanonPath & path)
|
||||
/**
|
||||
* If `path` exists and is a submodule, return its
|
||||
* revision. Otherwise return nothing.
|
||||
*/
|
||||
std::optional<Hash> getSubmoduleRev(const CanonPath & path)
|
||||
{
|
||||
auto entry = need(path);
|
||||
auto entry = lookup(path);
|
||||
|
||||
if (git_tree_entry_type(entry) != GIT_OBJECT_COMMIT)
|
||||
throw Error("'%s' is not a submodule", showPath(path));
|
||||
if (!entry || git_tree_entry_type(entry) != GIT_OBJECT_COMMIT)
|
||||
return std::nullopt;
|
||||
|
||||
return toHash(*git_tree_entry_id(entry));
|
||||
}
|
||||
|
@ -1074,8 +1084,10 @@ std::vector<std::tuple<GitRepoImpl::Submodule, Hash>> GitRepoImpl::getSubmodules
|
|||
auto rawAccessor = getRawAccessor(rev);
|
||||
|
||||
for (auto & submodule : parseSubmodules(pathTemp)) {
|
||||
auto rev = rawAccessor->getSubmoduleRev(submodule.path);
|
||||
result.push_back({std::move(submodule), rev});
|
||||
/* Filter out .gitmodules entries that don't exist or are not
|
||||
submodules. */
|
||||
if (auto rev = rawAccessor->getSubmoduleRev(submodule.path))
|
||||
result.push_back({std::move(submodule), *rev});
|
||||
}
|
||||
|
||||
return result;
|
||||
|
|
|
@ -584,9 +584,10 @@ struct GitInputScheme : InputScheme
|
|||
}
|
||||
|
||||
try {
|
||||
setWriteTime(localRefFile, now, now);
|
||||
if (!input.getRev())
|
||||
setWriteTime(localRefFile, now, now);
|
||||
} catch (Error & e) {
|
||||
warn("could not update mtime for file '%s': %s", localRefFile, e.msg());
|
||||
warn("could not update mtime for file '%s': %s", localRefFile, e.info().msg);
|
||||
}
|
||||
if (!originalRef && !storeCachedHead(repoInfo.url, ref))
|
||||
warn("could not update cached head '%s' for '%s'", ref, repoInfo.url);
|
||||
|
|
|
@ -102,7 +102,7 @@ DownloadFileResult downloadFile(
|
|||
};
|
||||
}
|
||||
|
||||
DownloadTarballResult downloadTarball(
|
||||
static DownloadTarballResult downloadTarball_(
|
||||
const std::string & url,
|
||||
const Headers & headers)
|
||||
{
|
||||
|
@ -202,6 +202,22 @@ DownloadTarballResult downloadTarball(
|
|||
return attrsToResult(infoAttrs);
|
||||
}
|
||||
|
||||
ref<SourceAccessor> downloadTarball(
|
||||
ref<Store> store,
|
||||
const Settings & settings,
|
||||
const std::string & url)
|
||||
{
|
||||
/* Go through Input::getAccessor() to ensure that the resulting
|
||||
accessor has a fingerprint. */
|
||||
fetchers::Attrs attrs;
|
||||
attrs.insert_or_assign("type", "tarball");
|
||||
attrs.insert_or_assign("url", url);
|
||||
|
||||
auto input = Input::fromAttrs(settings, std::move(attrs));
|
||||
|
||||
return input.getAccessor(store).first;
|
||||
}
|
||||
|
||||
// An input scheme corresponding to a curl-downloadable resource.
|
||||
struct CurlInputScheme : InputScheme
|
||||
{
|
||||
|
@ -353,7 +369,7 @@ struct TarballInputScheme : CurlInputScheme
|
|||
{
|
||||
auto input(_input);
|
||||
|
||||
auto result = downloadTarball(getStrAttr(input.attrs, "url"), {});
|
||||
auto result = downloadTarball_(getStrAttr(input.attrs, "url"), {});
|
||||
|
||||
result.accessor->setPathDisplay("«" + input.to_string() + "»");
|
||||
|
||||
|
|
|
@ -14,6 +14,8 @@ struct SourceAccessor;
|
|||
|
||||
namespace nix::fetchers {
|
||||
|
||||
struct Settings;
|
||||
|
||||
struct DownloadFileResult
|
||||
{
|
||||
StorePath storePath;
|
||||
|
@ -40,8 +42,9 @@ struct DownloadTarballResult
|
|||
* Download and import a tarball into the Git cache. The result is the
|
||||
* Git tree hash of the root directory.
|
||||
*/
|
||||
DownloadTarballResult downloadTarball(
|
||||
const std::string & url,
|
||||
const Headers & headers = {});
|
||||
ref<SourceAccessor> downloadTarball(
|
||||
ref<Store> store,
|
||||
const Settings & settings,
|
||||
const std::string & url);
|
||||
|
||||
}
|
||||
|
|
|
@ -183,7 +183,7 @@ Goal::Co PathSubstitutionGoal::tryToRun(StorePath subPath, nix::ref<Store> sub,
|
|||
/* Make sure that we are allowed to start a substitution. Note that even
|
||||
if maxSubstitutionJobs == 0, we still allow a substituter to run. This
|
||||
prevents infinite waiting. */
|
||||
if (worker.getNrSubstitutions() >= std::max(1U, (unsigned int) settings.maxSubstitutionJobs)) {
|
||||
while (worker.getNrSubstitutions() >= std::max(1U, (unsigned int) settings.maxSubstitutionJobs)) {
|
||||
worker.waitForBuildSlot(shared_from_this());
|
||||
co_await Suspend{};
|
||||
}
|
||||
|
|
|
@ -184,13 +184,13 @@ void Worker::wakeUp(GoalPtr goal)
|
|||
}
|
||||
|
||||
|
||||
unsigned Worker::getNrLocalBuilds()
|
||||
size_t Worker::getNrLocalBuilds()
|
||||
{
|
||||
return nrLocalBuilds;
|
||||
}
|
||||
|
||||
|
||||
unsigned Worker::getNrSubstitutions()
|
||||
size_t Worker::getNrSubstitutions()
|
||||
{
|
||||
return nrSubstitutions;
|
||||
}
|
||||
|
|
|
@ -92,12 +92,12 @@ private:
|
|||
* Number of build slots occupied. This includes local builds but does not
|
||||
* include substitutions or remote builds via the build hook.
|
||||
*/
|
||||
unsigned int nrLocalBuilds;
|
||||
size_t nrLocalBuilds;
|
||||
|
||||
/**
|
||||
* Number of substitution slots occupied.
|
||||
*/
|
||||
unsigned int nrSubstitutions;
|
||||
size_t nrSubstitutions;
|
||||
|
||||
/**
|
||||
* Maps used to prevent multiple instantiations of a goal for the
|
||||
|
@ -235,12 +235,12 @@ public:
|
|||
* Return the number of local build processes currently running (but not
|
||||
* remote builds via the build hook).
|
||||
*/
|
||||
unsigned int getNrLocalBuilds();
|
||||
size_t getNrLocalBuilds();
|
||||
|
||||
/**
|
||||
* Return the number of substitution processes currently running.
|
||||
*/
|
||||
unsigned int getNrSubstitutions();
|
||||
size_t getNrSubstitutions();
|
||||
|
||||
/**
|
||||
* Registers a running child process. `inBuildSlot` means that
|
||||
|
|
|
@ -38,10 +38,7 @@ void builtinFetchurl(
|
|||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
|
||||
/* No need to do TLS verification, because we check the hash of
|
||||
the result anyway. */
|
||||
FileTransferRequest request(url);
|
||||
request.verifyTLS = false;
|
||||
request.decompress = false;
|
||||
|
||||
auto decompressor = makeDecompressionSink(
|
||||
|
|
|
@ -3,31 +3,53 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
namespace fs { using namespace std::filesystem; }
|
||||
|
||||
void builtinUnpackChannel(
|
||||
const BasicDerivation & drv,
|
||||
const std::map<std::string, Path> & outputs)
|
||||
{
|
||||
auto getAttr = [&](const std::string & name) {
|
||||
auto getAttr = [&](const std::string & name) -> const std::string & {
|
||||
auto i = drv.env.find(name);
|
||||
if (i == drv.env.end()) throw Error("attribute '%s' missing", name);
|
||||
return i->second;
|
||||
};
|
||||
|
||||
auto out = outputs.at("out");
|
||||
auto channelName = getAttr("channelName");
|
||||
auto src = getAttr("src");
|
||||
fs::path out{outputs.at("out")};
|
||||
auto & channelName = getAttr("channelName");
|
||||
auto & src = getAttr("src");
|
||||
|
||||
createDirs(out);
|
||||
if (fs::path{channelName}.filename().string() != channelName) {
|
||||
throw Error("channelName is not allowed to contain filesystem seperators, got %1%", channelName);
|
||||
}
|
||||
|
||||
try {
|
||||
fs::create_directories(out);
|
||||
} catch (fs::filesystem_error &) {
|
||||
throw SysError("creating directory '%1%'", out.string());
|
||||
}
|
||||
|
||||
unpackTarfile(src, out);
|
||||
|
||||
auto entries = std::filesystem::directory_iterator{out};
|
||||
auto fileName = entries->path().string();
|
||||
auto fileCount = std::distance(std::filesystem::begin(entries), std::filesystem::end(entries));
|
||||
size_t fileCount;
|
||||
std::string fileName;
|
||||
try {
|
||||
auto entries = fs::directory_iterator{out};
|
||||
fileName = entries->path().string();
|
||||
fileCount = std::distance(fs::begin(entries), fs::end(entries));
|
||||
} catch (fs::filesystem_error &) {
|
||||
throw SysError("failed to read directory %1%", out.string());
|
||||
}
|
||||
|
||||
if (fileCount != 1)
|
||||
throw Error("channel tarball '%s' contains more than one file", src);
|
||||
std::filesystem::rename(fileName, (out + "/" + channelName));
|
||||
|
||||
auto target = out / channelName;
|
||||
try {
|
||||
fs::rename(fileName, target);
|
||||
} catch (fs::filesystem_error &) {
|
||||
throw SysError("failed to rename %1% to %2%", fileName, target.string());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -169,28 +169,29 @@ protected:
|
|||
{
|
||||
try {
|
||||
checkEnabled();
|
||||
|
||||
auto request(makeRequest(path));
|
||||
|
||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
getFileTransfer()->enqueueFileTransfer(request,
|
||||
{[callbackPtr, this](std::future<FileTransferResult> result) {
|
||||
try {
|
||||
(*callbackPtr)(std::move(result.get().data));
|
||||
} catch (FileTransferError & e) {
|
||||
if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden)
|
||||
return (*callbackPtr)({});
|
||||
maybeDisable();
|
||||
callbackPtr->rethrow();
|
||||
} catch (...) {
|
||||
callbackPtr->rethrow();
|
||||
}
|
||||
}});
|
||||
|
||||
} catch (...) {
|
||||
callback.rethrow();
|
||||
return;
|
||||
}
|
||||
|
||||
auto request(makeRequest(path));
|
||||
|
||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
getFileTransfer()->enqueueFileTransfer(request,
|
||||
{[callbackPtr, this](std::future<FileTransferResult> result) {
|
||||
try {
|
||||
(*callbackPtr)(std::move(result.get().data));
|
||||
} catch (FileTransferError & e) {
|
||||
if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden)
|
||||
return (*callbackPtr)({});
|
||||
maybeDisable();
|
||||
callbackPtr->rethrow();
|
||||
} catch (...) {
|
||||
callbackPtr->rethrow();
|
||||
}
|
||||
}});
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -159,8 +159,9 @@ static Machine parseBuilderLine(const std::set<std::string> & defaultSystems, co
|
|||
const auto & str = tokens[fieldIndex];
|
||||
try {
|
||||
base64Decode(str);
|
||||
} catch (const Error & e) {
|
||||
throw FormatError("bad machine specification: a column #%lu in a row: '%s' is not valid base64 string: %s", fieldIndex, line, e.what());
|
||||
} catch (FormatError & e) {
|
||||
e.addTrace({}, "while parsing machine specification at a column #%lu in a row: '%s'", fieldIndex, line);
|
||||
throw;
|
||||
}
|
||||
return str;
|
||||
};
|
||||
|
|
|
@ -6,6 +6,16 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
static std::string parsePublicHostKey(std::string_view host, std::string_view sshPublicHostKey)
|
||||
{
|
||||
try {
|
||||
return base64Decode(sshPublicHostKey);
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, "while decoding ssh public host key for host '%s'", host);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
SSHMaster::SSHMaster(
|
||||
std::string_view host,
|
||||
std::string_view keyFile,
|
||||
|
@ -14,7 +24,7 @@ SSHMaster::SSHMaster(
|
|||
: host(host)
|
||||
, fakeSSH(host == "localhost")
|
||||
, keyFile(keyFile)
|
||||
, sshPublicHostKey(sshPublicHostKey)
|
||||
, sshPublicHostKey(parsePublicHostKey(host, sshPublicHostKey))
|
||||
, useMaster(useMaster && !fakeSSH)
|
||||
, compress(compress)
|
||||
, logFD(logFD)
|
||||
|
@ -38,7 +48,7 @@ void SSHMaster::addCommonSSHOpts(Strings & args)
|
|||
std::filesystem::path fileName = state->tmpDir->path() / "host-key";
|
||||
auto p = host.rfind("@");
|
||||
std::string thost = p != std::string::npos ? std::string(host, p + 1) : host;
|
||||
writeFile(fileName.string(), thost + " " + base64Decode(sshPublicHostKey) + "\n");
|
||||
writeFile(fileName.string(), thost + " " + sshPublicHostKey + "\n");
|
||||
args.insert(args.end(), {"-oUserKnownHostsFile=" + fileName.string()});
|
||||
}
|
||||
if (compress)
|
||||
|
|
|
@ -14,6 +14,9 @@ private:
|
|||
const std::string host;
|
||||
bool fakeSSH;
|
||||
const std::string keyFile;
|
||||
/**
|
||||
* Raw bytes, not Base64 encoding.
|
||||
*/
|
||||
const std::string sshPublicHostKey;
|
||||
const bool useMaster;
|
||||
const bool compress;
|
||||
|
|
|
@ -210,14 +210,16 @@ StorePath Store::addToStore(
|
|||
fsm = FileSerialisationMethod::NixArchive;
|
||||
break;
|
||||
}
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
dumpPath(path, sink, fsm, filter);
|
||||
std::optional<StorePath> storePath;
|
||||
auto sink = sourceToSink([&](Source & source) {
|
||||
LengthSource lengthSource(source);
|
||||
storePath = addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair);
|
||||
if (lengthSource.total >= settings.warnLargePathThreshold)
|
||||
warn("copied large path '%s' to the store (%s)", path, renderSize(lengthSource.total));
|
||||
});
|
||||
LengthSource lengthSource(*source);
|
||||
auto storePath = addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair);
|
||||
if (lengthSource.total >= settings.warnLargePathThreshold)
|
||||
warn("copied large path '%s' to the store (%s)", path, renderSize(lengthSource.total));
|
||||
return storePath;
|
||||
dumpPath(path, *sink, fsm, filter);
|
||||
sink->finish();
|
||||
return storePath.value();
|
||||
}
|
||||
|
||||
void Store::addMultipleToStore(
|
||||
|
|
|
@ -3000,6 +3000,7 @@ void LocalDerivationGoal::deleteTmpDir(bool force)
|
|||
might have privileged stuff (like a copy of netrc). */
|
||||
if (settings.keepFailed && !force && !drv->isBuiltin()) {
|
||||
printError("note: keeping build directory '%s'", tmpDir);
|
||||
chmod(topTmpDir.c_str(), 0755);
|
||||
chmod(tmpDir.c_str(), 0755);
|
||||
}
|
||||
else
|
||||
|
|
|
@ -49,6 +49,7 @@ R""(
|
|||
(if (param "_ALLOW_LOCAL_NETWORKING")
|
||||
(begin
|
||||
(allow network* (remote ip "localhost:*"))
|
||||
(allow network-inbound (local ip "*:*")) ; required to bind and listen
|
||||
|
||||
; Allow access to /etc/resolv.conf (which is a symlink to
|
||||
; /private/var/run/resolv.conf).
|
||||
|
|
|
@ -23,7 +23,7 @@ struct ArchiveSettings : Config
|
|||
false,
|
||||
#endif
|
||||
"use-case-hack",
|
||||
"Whether to enable a Darwin-specific hack for dealing with file name collisions."};
|
||||
"Whether to enable a macOS-specific hack for dealing with file name case collisions."};
|
||||
};
|
||||
|
||||
static ArchiveSettings archiveSettings;
|
||||
|
@ -214,11 +214,13 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath
|
|||
else if (t == "directory") {
|
||||
sink.createDirectory(path);
|
||||
|
||||
std::string prevName;
|
||||
|
||||
while (1) {
|
||||
s = getString();
|
||||
|
||||
if (s == "entry") {
|
||||
std::string name, prevName;
|
||||
std::string name;
|
||||
|
||||
s = getString();
|
||||
if (s != "(") throw badArchive("expected open tag");
|
||||
|
@ -241,6 +243,9 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath
|
|||
debug("case collision between '%1%' and '%2%'", i->first, name);
|
||||
name += caseHackSuffix;
|
||||
name += std::to_string(++i->second);
|
||||
auto j = names.find(name);
|
||||
if (j != names.end())
|
||||
throw Error("NAR contains file name '%s' that collides with case-hacked file name '%s'", prevName, j->first);
|
||||
} else
|
||||
names[name] = 0;
|
||||
}
|
||||
|
|
|
@ -68,10 +68,19 @@ static RestoreSinkSettings restoreSinkSettings;
|
|||
|
||||
static GlobalConfig::Register r1(&restoreSinkSettings);
|
||||
|
||||
static std::filesystem::path append(const std::filesystem::path & src, const CanonPath & path)
|
||||
{
|
||||
auto dst = src;
|
||||
if (!path.rel().empty())
|
||||
dst /= path.rel();
|
||||
return dst;
|
||||
}
|
||||
|
||||
void RestoreSink::createDirectory(const CanonPath & path)
|
||||
{
|
||||
std::filesystem::create_directory(dstPath / path.rel());
|
||||
auto p = append(dstPath, path);
|
||||
if (!std::filesystem::create_directory(p))
|
||||
throw Error("path '%s' already exists", p.string());
|
||||
};
|
||||
|
||||
struct RestoreRegularFile : CreateRegularFileSink {
|
||||
|
@ -82,14 +91,6 @@ struct RestoreRegularFile : CreateRegularFileSink {
|
|||
void preallocateContents(uint64_t size) override;
|
||||
};
|
||||
|
||||
static std::filesystem::path append(const std::filesystem::path & src, const CanonPath & path)
|
||||
{
|
||||
auto dst = src;
|
||||
if (!path.rel().empty())
|
||||
dst /= path.rel();
|
||||
return dst;
|
||||
}
|
||||
|
||||
void RestoreSink::createRegularFile(const CanonPath & path, std::function<void(CreateRegularFileSink &)> func)
|
||||
{
|
||||
auto p = append(dstPath, path);
|
||||
|
|
|
@ -245,7 +245,12 @@ Hash::Hash(std::string_view rest, HashAlgorithm algo, bool isSRI)
|
|||
}
|
||||
|
||||
else if (isSRI || rest.size() == base64Len()) {
|
||||
auto d = base64Decode(rest);
|
||||
std::string d;
|
||||
try {
|
||||
d = base64Decode(rest);
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, "While decoding hash '%s'", rest);
|
||||
}
|
||||
if (d.size() != hashSize)
|
||||
throw BadHash("invalid %s hash '%s'", isSRI ? "SRI" : "base-64", rest);
|
||||
assert(hashSize);
|
||||
|
|
|
@ -132,23 +132,24 @@ SourceAccessor::DirEntries PosixSourceAccessor::readDirectory(const CanonPath &
|
|||
{
|
||||
assertNoSymlinks(path);
|
||||
DirEntries res;
|
||||
for (auto & entry : std::filesystem::directory_iterator{makeAbsPath(path)}) {
|
||||
checkInterrupt();
|
||||
auto type = [&]() -> std::optional<Type> {
|
||||
std::filesystem::file_type nativeType;
|
||||
try {
|
||||
nativeType = entry.symlink_status().type();
|
||||
} catch (std::filesystem::filesystem_error & e) {
|
||||
// We cannot always stat the child. (Ideally there is no
|
||||
// stat because the native directory entry has the type
|
||||
// already, but this isn't always the case.)
|
||||
if (e.code() == std::errc::permission_denied || e.code() == std::errc::operation_not_permitted)
|
||||
return std::nullopt;
|
||||
else throw;
|
||||
}
|
||||
try {
|
||||
for (auto & entry : std::filesystem::directory_iterator{makeAbsPath(path)}) {
|
||||
checkInterrupt();
|
||||
auto type = [&]() -> std::optional<Type> {
|
||||
std::filesystem::file_type nativeType;
|
||||
try {
|
||||
nativeType = entry.symlink_status().type();
|
||||
} catch (std::filesystem::filesystem_error & e) {
|
||||
// We cannot always stat the child. (Ideally there is no
|
||||
// stat because the native directory entry has the type
|
||||
// already, but this isn't always the case.)
|
||||
if (e.code() == std::errc::permission_denied || e.code() == std::errc::operation_not_permitted)
|
||||
return std::nullopt;
|
||||
else throw;
|
||||
}
|
||||
|
||||
// cannot exhaustively enumerate because implementation-specific
|
||||
// additional file types are allowed.
|
||||
// cannot exhaustively enumerate because implementation-specific
|
||||
// additional file types are allowed.
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wswitch-enum"
|
||||
switch (nativeType) {
|
||||
|
@ -158,8 +159,11 @@ SourceAccessor::DirEntries PosixSourceAccessor::readDirectory(const CanonPath &
|
|||
default: return tMisc;
|
||||
}
|
||||
#pragma GCC diagnostic pop
|
||||
}();
|
||||
res.emplace(entry.path().filename().string(), type);
|
||||
}();
|
||||
res.emplace(entry.path().filename().string(), type);
|
||||
}
|
||||
} catch (std::filesystem::filesystem_error & e) {
|
||||
throw SysError("reading directory %1%", showPath(path));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -171,55 +171,6 @@ size_t StringSource::read(char * data, size_t len)
|
|||
#error Coroutines are broken in this version of Boost!
|
||||
#endif
|
||||
|
||||
/* A concrete datatype allow virtual dispatch of stack allocation methods. */
|
||||
struct VirtualStackAllocator {
|
||||
StackAllocator *allocator = StackAllocator::defaultAllocator;
|
||||
|
||||
boost::context::stack_context allocate() {
|
||||
return allocator->allocate();
|
||||
}
|
||||
|
||||
void deallocate(boost::context::stack_context sctx) {
|
||||
allocator->deallocate(sctx);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/* This class reifies the default boost coroutine stack allocation strategy with
|
||||
a virtual interface. */
|
||||
class DefaultStackAllocator : public StackAllocator {
|
||||
boost::coroutines2::default_stack stack;
|
||||
|
||||
boost::context::stack_context allocate() override {
|
||||
return stack.allocate();
|
||||
}
|
||||
|
||||
void deallocate(boost::context::stack_context sctx) override {
|
||||
stack.deallocate(sctx);
|
||||
}
|
||||
};
|
||||
|
||||
static DefaultStackAllocator defaultAllocatorSingleton;
|
||||
|
||||
StackAllocator *StackAllocator::defaultAllocator = &defaultAllocatorSingleton;
|
||||
|
||||
|
||||
std::shared_ptr<void> (*create_coro_gc_hook)() = []() -> std::shared_ptr<void> {
|
||||
return {};
|
||||
};
|
||||
|
||||
/* This class is used for entry and exit hooks on coroutines */
|
||||
class CoroutineContext {
|
||||
/* Disable GC when entering the coroutine without the boehm patch,
|
||||
* since it doesn't find the main thread stack in this case.
|
||||
* std::shared_ptr<void> performs type-erasure, so it will call the right
|
||||
* deleter. */
|
||||
const std::shared_ptr<void> coro_gc_hook = create_coro_gc_hook();
|
||||
public:
|
||||
CoroutineContext() {};
|
||||
~CoroutineContext() {};
|
||||
};
|
||||
|
||||
std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
|
||||
{
|
||||
struct SourceToSink : FinishSink
|
||||
|
@ -241,14 +192,12 @@ std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
|
|||
cur = in;
|
||||
|
||||
if (!coro) {
|
||||
CoroutineContext ctx;
|
||||
coro = coro_t::push_type(VirtualStackAllocator{}, [&](coro_t::pull_type & yield) {
|
||||
LambdaSource source([&](char *out, size_t out_len) {
|
||||
coro = coro_t::push_type([&](coro_t::pull_type & yield) {
|
||||
LambdaSource source([&](char * out, size_t out_len) {
|
||||
if (cur.empty()) {
|
||||
yield();
|
||||
if (yield.get()) {
|
||||
return (size_t)0;
|
||||
}
|
||||
if (yield.get())
|
||||
throw EndOfFile("coroutine has finished");
|
||||
}
|
||||
|
||||
size_t n = std::min(cur.size(), out_len);
|
||||
|
@ -263,20 +212,14 @@ std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
|
|||
if (!*coro) { unreachable(); }
|
||||
|
||||
if (!cur.empty()) {
|
||||
CoroutineContext ctx;
|
||||
(*coro)(false);
|
||||
}
|
||||
}
|
||||
|
||||
void finish() override
|
||||
{
|
||||
if (!coro) return;
|
||||
if (!*coro) unreachable();
|
||||
{
|
||||
CoroutineContext ctx;
|
||||
if (coro && *coro)
|
||||
(*coro)(true);
|
||||
}
|
||||
if (*coro) unreachable();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -307,8 +250,7 @@ std::unique_ptr<Source> sinkToSource(
|
|||
size_t read(char * data, size_t len) override
|
||||
{
|
||||
if (!coro) {
|
||||
CoroutineContext ctx;
|
||||
coro = coro_t::pull_type(VirtualStackAllocator{}, [&](coro_t::push_type & yield) {
|
||||
coro = coro_t::pull_type([&](coro_t::push_type & yield) {
|
||||
LambdaSink sink([&](std::string_view data) {
|
||||
if (!data.empty()) yield(std::string(data));
|
||||
});
|
||||
|
@ -320,7 +262,6 @@ std::unique_ptr<Source> sinkToSource(
|
|||
|
||||
if (pos == cur.size()) {
|
||||
if (!cur.empty()) {
|
||||
CoroutineContext ctx;
|
||||
(*coro)();
|
||||
}
|
||||
cur = coro->get();
|
||||
|
|
|
@ -557,27 +557,4 @@ struct FramedSink : nix::BufferedSink
|
|||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Stack allocation strategy for sinkToSource.
|
||||
* Mutable to avoid a boehm gc dependency in libutil.
|
||||
*
|
||||
* boost::context doesn't provide a virtual class, so we define our own.
|
||||
*/
|
||||
struct StackAllocator {
|
||||
virtual boost::context::stack_context allocate() = 0;
|
||||
virtual void deallocate(boost::context::stack_context sctx) = 0;
|
||||
|
||||
/**
|
||||
* The stack allocator to use in sinkToSource and potentially elsewhere.
|
||||
* It is reassigned by the initGC() method in libexpr.
|
||||
*/
|
||||
static StackAllocator *defaultAllocator;
|
||||
};
|
||||
|
||||
/* Disabling GC when entering a coroutine (without the boehm patch).
|
||||
mutable to avoid boehm gc dependency in libutil.
|
||||
*/
|
||||
extern std::shared_ptr<void> (*create_coro_gc_hook)();
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -14,17 +14,25 @@ BorrowedCryptoValue BorrowedCryptoValue::parse(std::string_view s)
|
|||
return {s.substr(0, colon), s.substr(colon + 1)};
|
||||
}
|
||||
|
||||
Key::Key(std::string_view s)
|
||||
Key::Key(std::string_view s, bool sensitiveValue)
|
||||
{
|
||||
auto ss = BorrowedCryptoValue::parse(s);
|
||||
|
||||
name = ss.name;
|
||||
key = ss.payload;
|
||||
|
||||
if (name == "" || key == "")
|
||||
throw Error("secret key is corrupt");
|
||||
try {
|
||||
if (name == "" || key == "")
|
||||
throw FormatError("key is corrupt");
|
||||
|
||||
key = base64Decode(key);
|
||||
key = base64Decode(key);
|
||||
} catch (Error & e) {
|
||||
std::string extra;
|
||||
if (!sensitiveValue)
|
||||
extra = fmt(" with raw value '%s'", key);
|
||||
e.addTrace({}, "while decoding key named '%s'%s", name, extra);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
std::string Key::to_string() const
|
||||
|
@ -33,7 +41,7 @@ std::string Key::to_string() const
|
|||
}
|
||||
|
||||
SecretKey::SecretKey(std::string_view s)
|
||||
: Key(s)
|
||||
: Key{s, true}
|
||||
{
|
||||
if (key.size() != crypto_sign_SECRETKEYBYTES)
|
||||
throw Error("secret key is not valid");
|
||||
|
@ -66,7 +74,7 @@ SecretKey SecretKey::generate(std::string_view name)
|
|||
}
|
||||
|
||||
PublicKey::PublicKey(std::string_view s)
|
||||
: Key(s)
|
||||
: Key{s, false}
|
||||
{
|
||||
if (key.size() != crypto_sign_PUBLICKEYBYTES)
|
||||
throw Error("public key is not valid");
|
||||
|
@ -83,7 +91,12 @@ bool PublicKey::verifyDetached(std::string_view data, std::string_view sig) cons
|
|||
|
||||
bool PublicKey::verifyDetachedAnon(std::string_view data, std::string_view sig) const
|
||||
{
|
||||
auto sig2 = base64Decode(sig);
|
||||
std::string sig2;
|
||||
try {
|
||||
sig2 = base64Decode(sig);
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, "while decoding signature '%s'", sig);
|
||||
}
|
||||
if (sig2.size() != crypto_sign_BYTES)
|
||||
throw Error("signature is not valid");
|
||||
|
||||
|
|
|
@ -31,15 +31,19 @@ struct Key
|
|||
std::string name;
|
||||
std::string key;
|
||||
|
||||
/**
|
||||
* Construct Key from a string in the format
|
||||
* ‘<name>:<key-in-base64>’.
|
||||
*/
|
||||
Key(std::string_view s);
|
||||
|
||||
std::string to_string() const;
|
||||
|
||||
protected:
|
||||
|
||||
/**
|
||||
* Construct Key from a string in the format
|
||||
* ‘<name>:<key-in-base64>’.
|
||||
*
|
||||
* @param sensitiveValue Avoid displaying the raw Base64 in error
|
||||
* messages to avoid leaking private keys.
|
||||
*/
|
||||
Key(std::string_view s, bool sensitiveValue);
|
||||
|
||||
Key(std::string_view name, std::string && key)
|
||||
: name(name), key(std::move(key)) { }
|
||||
};
|
||||
|
|
|
@ -8,6 +8,10 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
namespace fs {
|
||||
using namespace std::filesystem;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
int callback_open(struct archive *, void * self)
|
||||
|
@ -102,14 +106,14 @@ TarArchive::TarArchive(Source & source, bool raw, std::optional<std::string> com
|
|||
"Failed to open archive (%s)");
|
||||
}
|
||||
|
||||
TarArchive::TarArchive(const Path & path)
|
||||
TarArchive::TarArchive(const fs::path & path)
|
||||
: archive{archive_read_new()}
|
||||
, buffer(defaultBufferSize)
|
||||
{
|
||||
archive_read_support_filter_all(archive);
|
||||
enableSupportedFormats(archive);
|
||||
archive_read_set_option(archive, NULL, "mac-ext", NULL);
|
||||
check(archive_read_open_filename(archive, path.c_str(), 16384), "failed to open archive: %s");
|
||||
check(archive_read_open_filename(archive, path.string().c_str(), 16384), "failed to open archive: %s");
|
||||
}
|
||||
|
||||
void TarArchive::close()
|
||||
|
@ -123,7 +127,7 @@ TarArchive::~TarArchive()
|
|||
archive_read_free(this->archive);
|
||||
}
|
||||
|
||||
static void extract_archive(TarArchive & archive, const Path & destDir)
|
||||
static void extract_archive(TarArchive & archive, const fs::path & destDir)
|
||||
{
|
||||
int flags = ARCHIVE_EXTRACT_TIME | ARCHIVE_EXTRACT_SECURE_SYMLINKS | ARCHIVE_EXTRACT_SECURE_NODOTDOT;
|
||||
|
||||
|
@ -140,7 +144,7 @@ static void extract_archive(TarArchive & archive, const Path & destDir)
|
|||
else
|
||||
archive.check(r);
|
||||
|
||||
archive_entry_copy_pathname(entry, (destDir + "/" + name).c_str());
|
||||
archive_entry_copy_pathname(entry, (destDir / name).string().c_str());
|
||||
|
||||
// sources can and do contain dirs with no rx bits
|
||||
if (archive_entry_filetype(entry) == AE_IFDIR && (archive_entry_mode(entry) & 0500) != 0500)
|
||||
|
@ -149,7 +153,7 @@ static void extract_archive(TarArchive & archive, const Path & destDir)
|
|||
// Patch hardlink path
|
||||
const char * original_hardlink = archive_entry_hardlink(entry);
|
||||
if (original_hardlink) {
|
||||
archive_entry_copy_hardlink(entry, (destDir + "/" + original_hardlink).c_str());
|
||||
archive_entry_copy_hardlink(entry, (destDir / original_hardlink).string().c_str());
|
||||
}
|
||||
|
||||
archive.check(archive_read_extract(archive.archive, entry, flags));
|
||||
|
@ -158,19 +162,19 @@ static void extract_archive(TarArchive & archive, const Path & destDir)
|
|||
archive.close();
|
||||
}
|
||||
|
||||
void unpackTarfile(Source & source, const Path & destDir)
|
||||
void unpackTarfile(Source & source, const fs::path & destDir)
|
||||
{
|
||||
auto archive = TarArchive(source);
|
||||
|
||||
createDirs(destDir);
|
||||
fs::create_directories(destDir);
|
||||
extract_archive(archive, destDir);
|
||||
}
|
||||
|
||||
void unpackTarfile(const Path & tarFile, const Path & destDir)
|
||||
void unpackTarfile(const fs::path & tarFile, const fs::path & destDir)
|
||||
{
|
||||
auto archive = TarArchive(tarFile);
|
||||
|
||||
createDirs(destDir);
|
||||
fs::create_directories(destDir);
|
||||
extract_archive(archive, destDir);
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ struct TarArchive
|
|||
|
||||
void check(int err, const std::string & reason = "failed to extract archive (%s)");
|
||||
|
||||
explicit TarArchive(const Path & path);
|
||||
explicit TarArchive(const std::filesystem::path & path);
|
||||
|
||||
/// @brief Create a generic archive from source.
|
||||
/// @param source - Input byte stream.
|
||||
|
@ -37,9 +37,9 @@ struct TarArchive
|
|||
|
||||
int getArchiveFilterCodeByName(const std::string & method);
|
||||
|
||||
void unpackTarfile(Source & source, const Path & destDir);
|
||||
void unpackTarfile(Source & source, const std::filesystem::path & destDir);
|
||||
|
||||
void unpackTarfile(const Path & tarFile, const Path & destDir);
|
||||
void unpackTarfile(const std::filesystem::path & tarFile, const std::filesystem::path & destDir);
|
||||
|
||||
time_t unpackTarfileToSink(TarArchive & archive, ExtendedFileSystemObjectSink & parseSink);
|
||||
|
||||
|
|
|
@ -261,7 +261,7 @@ std::string base64Decode(std::string_view s)
|
|||
|
||||
char digit = base64DecodeChars[(unsigned char) c];
|
||||
if (digit == npos)
|
||||
throw Error("invalid character in Base64 string: '%c'", c);
|
||||
throw FormatError("invalid character in Base64 string: '%c'", c);
|
||||
|
||||
bits += 6;
|
||||
d = d << 6 | digit;
|
||||
|
|
|
@ -210,9 +210,13 @@ constexpr char treeNull[] = " ";
|
|||
|
||||
|
||||
/**
|
||||
* Base64 encoding/decoding.
|
||||
* Encode arbitrary bytes as Base64.
|
||||
*/
|
||||
std::string base64Encode(std::string_view s);
|
||||
|
||||
/**
|
||||
* Decode arbitrary bytes to Base64.
|
||||
*/
|
||||
std::string base64Decode(std::string_view s);
|
||||
|
||||
|
||||
|
|
|
@ -163,7 +163,7 @@ static void main_nix_build(int argc, char * * argv)
|
|||
script = argv[1];
|
||||
try {
|
||||
auto lines = tokenizeString<Strings>(readFile(script), "\n");
|
||||
if (std::regex_search(lines.front(), std::regex("^#!"))) {
|
||||
if (!lines.empty() && std::regex_search(lines.front(), std::regex("^#!"))) {
|
||||
lines.pop_front();
|
||||
inShebang = true;
|
||||
for (int i = 2; i < argc; ++i)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue