mirror of
https://github.com/NixOS/nix
synced 2025-07-07 06:01:48 +02:00
Merge remote-tracking branch 'origin/master' into relative-flakes
This commit is contained in:
commit
91e7d493ce
51 changed files with 1577 additions and 188 deletions
|
@ -127,14 +127,9 @@ ref<EvalState> EvalCommand::getEvalState()
|
|||
{
|
||||
if (!evalState) {
|
||||
evalState =
|
||||
#if HAVE_BOEHMGC
|
||||
std::allocate_shared<EvalState>(
|
||||
traceable_allocator<EvalState>(),
|
||||
#else
|
||||
std::make_shared<EvalState>(
|
||||
#endif
|
||||
lookupPath, getEvalStore(), fetchSettings, evalSettings, getStore())
|
||||
;
|
||||
lookupPath, getEvalStore(), fetchSettings, evalSettings, getStore());
|
||||
|
||||
evalState->repair = repair;
|
||||
|
||||
|
|
|
@ -29,11 +29,6 @@
|
|||
#include "ref.hh"
|
||||
#include "value.hh"
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
#define GC_INCLUDE_NEW
|
||||
#include <gc/gc_cpp.h>
|
||||
#endif
|
||||
|
||||
#include "strings.hh"
|
||||
|
||||
namespace nix {
|
||||
|
@ -62,9 +57,7 @@ enum class ProcessLineResult {
|
|||
struct NixRepl
|
||||
: AbstractNixRepl
|
||||
, detail::ReplCompleterMixin
|
||||
#if HAVE_BOEHMGC
|
||||
, gc
|
||||
#endif
|
||||
{
|
||||
size_t debugTraceIndex;
|
||||
|
||||
|
|
|
@ -16,8 +16,6 @@
|
|||
|
||||
#if HAVE_BOEHMGC
|
||||
# include <mutex>
|
||||
# define GC_INCLUDE_NEW 1
|
||||
# include "gc_cpp.h"
|
||||
#endif
|
||||
|
||||
nix_err nix_libexpr_init(nix_c_context * context)
|
||||
|
|
|
@ -14,12 +14,6 @@
|
|||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
# include "gc/gc.h"
|
||||
# define GC_INCLUDE_NEW 1
|
||||
# include "gc_cpp.h"
|
||||
#endif
|
||||
|
||||
void nix_set_string_return(nix_string_return * str, const char * c)
|
||||
{
|
||||
str->str = c;
|
||||
|
|
|
@ -14,12 +14,6 @@
|
|||
#include "nix_api_value.h"
|
||||
#include "value/context.hh"
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
# include "gc/gc.h"
|
||||
# define GC_INCLUDE_NEW 1
|
||||
# include "gc_cpp.h"
|
||||
#endif
|
||||
|
||||
// Internal helper functions to check [in] and [out] `Value *` parameters
|
||||
static const nix::Value & check_value_not_null(const nix_value * value)
|
||||
{
|
||||
|
|
|
@ -3,6 +3,33 @@
|
|||
|
||||
#include <cstddef>
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
|
||||
# define GC_INCLUDE_NEW
|
||||
|
||||
# include <gc/gc.h>
|
||||
# include <gc/gc_cpp.h>
|
||||
# include <gc/gc_allocator.h>
|
||||
|
||||
#else
|
||||
|
||||
/* Some dummy aliases for Boehm GC definitions to reduce the number of
|
||||
#ifdefs. */
|
||||
|
||||
template<typename T>
|
||||
using traceable_allocator = std::allocator<T>;
|
||||
|
||||
template<typename T>
|
||||
using gc_allocator = std::allocator<T>;
|
||||
|
||||
# define GC_MALLOC_ATOMIC std::malloc
|
||||
# define GC_STRDUP strdup
|
||||
|
||||
struct gc
|
||||
{};
|
||||
|
||||
#endif
|
||||
|
||||
namespace nix {
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
#include "eval.hh"
|
||||
#include "eval-gc.hh"
|
||||
#include "eval-settings.hh"
|
||||
#include "primops.hh"
|
||||
#include "print-options.hh"
|
||||
|
@ -39,16 +38,6 @@
|
|||
# include <sys/resource.h>
|
||||
#endif
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
|
||||
# define GC_INCLUDE_NEW
|
||||
|
||||
# include <gc/gc.h>
|
||||
# include <gc/gc_cpp.h>
|
||||
# include <gc/gc_allocator.h>
|
||||
|
||||
#endif
|
||||
|
||||
#include "strings-inline.hh"
|
||||
|
||||
using json = nlohmann::json;
|
||||
|
@ -58,11 +47,7 @@ namespace nix {
|
|||
static char * allocString(size_t size)
|
||||
{
|
||||
char * t;
|
||||
#if HAVE_BOEHMGC
|
||||
t = (char *) GC_MALLOC_ATOMIC(size);
|
||||
#else
|
||||
t = (char *) malloc(size);
|
||||
#endif
|
||||
if (!t) throw std::bad_alloc();
|
||||
return t;
|
||||
}
|
||||
|
@ -71,11 +56,7 @@ static char * allocString(size_t size)
|
|||
static char * dupString(const char * s)
|
||||
{
|
||||
char * t;
|
||||
#if HAVE_BOEHMGC
|
||||
t = GC_STRDUP(s);
|
||||
#else
|
||||
t = strdup(s);
|
||||
#endif
|
||||
if (!t) throw std::bad_alloc();
|
||||
return t;
|
||||
}
|
||||
|
@ -99,11 +80,7 @@ static const char * makeImmutableString(std::string_view s)
|
|||
|
||||
RootValue allocRootValue(Value * v)
|
||||
{
|
||||
#if HAVE_BOEHMGC
|
||||
return std::allocate_shared<Value *>(traceable_allocator<Value *>(), v);
|
||||
#else
|
||||
return std::make_shared<Value *>(v);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Pretty print types for assertion errors
|
||||
|
|
|
@ -139,11 +139,7 @@ struct Constant
|
|||
bool impureOnly = false;
|
||||
};
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
typedef std::map<std::string, Value *, std::less<std::string>, traceable_allocator<std::pair<const std::string, Value *> > > ValMap;
|
||||
#else
|
||||
typedef std::map<std::string, Value *> ValMap;
|
||||
#endif
|
||||
typedef std::map<std::string, Value *, std::less<std::string>, traceable_allocator<std::pair<const std::string, Value *> > > ValMap;
|
||||
|
||||
typedef std::unordered_map<PosIdx, DocComment> DocCommentMap;
|
||||
|
||||
|
@ -329,21 +325,13 @@ private:
|
|||
/**
|
||||
* A cache from path names to parse trees.
|
||||
*/
|
||||
#if HAVE_BOEHMGC
|
||||
typedef std::unordered_map<SourcePath, Expr *, std::hash<SourcePath>, std::equal_to<SourcePath>, traceable_allocator<std::pair<const SourcePath, Expr *>>> FileParseCache;
|
||||
#else
|
||||
typedef std::unordered_map<SourcePath, Expr *> FileParseCache;
|
||||
#endif
|
||||
FileParseCache fileParseCache;
|
||||
|
||||
/**
|
||||
* A cache from path names to values.
|
||||
*/
|
||||
#if HAVE_BOEHMGC
|
||||
typedef std::unordered_map<SourcePath, Value, std::hash<SourcePath>, std::equal_to<SourcePath>, traceable_allocator<std::pair<const SourcePath, Value>>> FileEvalCache;
|
||||
#else
|
||||
typedef std::unordered_map<SourcePath, Value> FileEvalCache;
|
||||
#endif
|
||||
FileEvalCache fileEvalCache;
|
||||
|
||||
/**
|
||||
|
|
|
@ -2,28 +2,15 @@
|
|||
|
||||
#include <boost/container/small_vector.hpp>
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
|
||||
#include <gc/gc.h>
|
||||
#include <gc/gc_cpp.h>
|
||||
#include <gc/gc_allocator.h>
|
||||
|
||||
#endif
|
||||
#include "value.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct Value;
|
||||
|
||||
/**
|
||||
* A GC compatible vector that may used a reserved portion of `nItems` on the stack instead of allocating on the heap.
|
||||
*/
|
||||
#if HAVE_BOEHMGC
|
||||
template <typename T, size_t nItems>
|
||||
using SmallVector = boost::container::small_vector<T, nItems, traceable_allocator<T>>;
|
||||
#else
|
||||
template <typename T, size_t nItems>
|
||||
using SmallVector = boost::container::small_vector<T, nItems>;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* A vector of value pointers. See `SmallVector`.
|
||||
|
@ -39,4 +26,4 @@ using SmallValueVector = SmallVector<Value *, nItems>;
|
|||
template <size_t nItems>
|
||||
using SmallTemporaryValueVector = SmallVector<Value, nItems>;
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -83,11 +83,7 @@ public:
|
|||
};
|
||||
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
typedef std::list<PackageInfo, traceable_allocator<PackageInfo>> PackageInfos;
|
||||
#else
|
||||
typedef std::list<PackageInfo> PackageInfos;
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
|
|
|
@ -631,11 +631,7 @@ struct CompareValues
|
|||
};
|
||||
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
typedef std::list<Value *, gc_allocator<Value *>> ValueList;
|
||||
#else
|
||||
typedef std::list<Value *> ValueList;
|
||||
#endif
|
||||
|
||||
|
||||
static Bindings::const_iterator getAttr(
|
||||
|
@ -3136,7 +3132,7 @@ static void prim_zipAttrsWith(EvalState & state, const PosIdx pos, Value * * arg
|
|||
std::optional<ListBuilder> list;
|
||||
};
|
||||
|
||||
std::map<Symbol, Item> attrsSeen;
|
||||
std::map<Symbol, Item, std::less<Symbol>, traceable_allocator<std::pair<const Symbol, Item>>> attrsSeen;
|
||||
|
||||
state.forceFunction(*args[0], pos, "while evaluating the first argument passed to builtins.zipAttrsWith");
|
||||
state.forceList(*args[1], pos, "while evaluating the second argument passed to builtins.zipAttrsWith");
|
||||
|
|
|
@ -4,15 +4,13 @@
|
|||
#include <cassert>
|
||||
#include <span>
|
||||
|
||||
#include "eval-gc.hh"
|
||||
#include "symbol-table.hh"
|
||||
#include "value/context.hh"
|
||||
#include "source-path.hh"
|
||||
#include "print-options.hh"
|
||||
#include "checked-arithmetic.hh"
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
#include <gc/gc_allocator.h>
|
||||
#endif
|
||||
#include <nlohmann/json_fwd.hpp>
|
||||
|
||||
namespace nix {
|
||||
|
@ -498,15 +496,9 @@ void Value::mkBlackhole()
|
|||
}
|
||||
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
typedef std::vector<Value *, traceable_allocator<Value *>> ValueVector;
|
||||
typedef std::unordered_map<Symbol, Value *, std::hash<Symbol>, std::equal_to<Symbol>, traceable_allocator<std::pair<const Symbol, Value *>>> ValueMap;
|
||||
typedef std::map<Symbol, ValueVector, std::less<Symbol>, traceable_allocator<std::pair<const Symbol, ValueVector>>> ValueVectorMap;
|
||||
#else
|
||||
typedef std::vector<Value *> ValueVector;
|
||||
typedef std::unordered_map<Symbol, Value *> ValueMap;
|
||||
typedef std::map<Symbol, ValueVector> ValueVectorMap;
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
|
|
|
@ -13,13 +13,17 @@
|
|||
#include <git2/describe.h>
|
||||
#include <git2/errors.h>
|
||||
#include <git2/global.h>
|
||||
#include <git2/indexer.h>
|
||||
#include <git2/object.h>
|
||||
#include <git2/odb.h>
|
||||
#include <git2/refs.h>
|
||||
#include <git2/remote.h>
|
||||
#include <git2/repository.h>
|
||||
#include <git2/revparse.h>
|
||||
#include <git2/status.h>
|
||||
#include <git2/submodule.h>
|
||||
#include <git2/sys/odb_backend.h>
|
||||
#include <git2/sys/mempack.h>
|
||||
#include <git2/tree.h>
|
||||
|
||||
#include <iostream>
|
||||
|
@ -76,6 +80,9 @@ typedef std::unique_ptr<git_status_list, Deleter<git_status_list_free>> StatusLi
|
|||
typedef std::unique_ptr<git_remote, Deleter<git_remote_free>> Remote;
|
||||
typedef std::unique_ptr<git_config, Deleter<git_config_free>> GitConfig;
|
||||
typedef std::unique_ptr<git_config_iterator, Deleter<git_config_iterator_free>> ConfigIterator;
|
||||
typedef std::unique_ptr<git_odb, Deleter<git_odb_free>> ObjectDb;
|
||||
typedef std::unique_ptr<git_packbuilder, Deleter<git_packbuilder_free>> PackBuilder;
|
||||
typedef std::unique_ptr<git_indexer, Deleter<git_indexer_free>> Indexer;
|
||||
|
||||
// A helper to ensure that we don't leak objects returned by libgit2.
|
||||
template<typename T>
|
||||
|
@ -159,11 +166,60 @@ static Object peelToTreeOrBlob(git_object * obj)
|
|||
return peelObject<Object>(obj, GIT_OBJECT_TREE);
|
||||
}
|
||||
|
||||
struct PackBuilderContext {
|
||||
std::exception_ptr exception;
|
||||
|
||||
void handleException(const char * activity, int errCode)
|
||||
{
|
||||
switch (errCode) {
|
||||
case GIT_OK:
|
||||
break;
|
||||
case GIT_EUSER:
|
||||
if (!exception)
|
||||
panic("PackBuilderContext::handleException: user error, but exception was not set");
|
||||
|
||||
std::rethrow_exception(exception);
|
||||
default:
|
||||
throw Error("%s: %i, %s", Uncolored(activity), errCode, git_error_last()->message);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
extern "C" {
|
||||
|
||||
/**
|
||||
* A `git_packbuilder_progress` implementation that aborts the pack building if needed.
|
||||
*/
|
||||
static int packBuilderProgressCheckInterrupt(int stage, uint32_t current, uint32_t total, void *payload)
|
||||
{
|
||||
PackBuilderContext & args = * (PackBuilderContext *) payload;
|
||||
try {
|
||||
checkInterrupt();
|
||||
return GIT_OK;
|
||||
} catch (const std::exception & e) {
|
||||
args.exception = std::current_exception();
|
||||
return GIT_EUSER;
|
||||
}
|
||||
};
|
||||
static git_packbuilder_progress PACKBUILDER_PROGRESS_CHECK_INTERRUPT = &packBuilderProgressCheckInterrupt;
|
||||
|
||||
} // extern "C"
|
||||
|
||||
struct GitRepoImpl : GitRepo, std::enable_shared_from_this<GitRepoImpl>
|
||||
{
|
||||
/** Location of the repository on disk. */
|
||||
std::filesystem::path path;
|
||||
/**
|
||||
* libgit2 repository. Note that new objects are not written to disk,
|
||||
* because we are using a mempack backend. For writing to disk, see
|
||||
* `flush()`, which is also called by `GitFileSystemObjectSink::sync()`.
|
||||
*/
|
||||
Repository repo;
|
||||
/**
|
||||
* In-memory object store for efficient batched writing to packfiles.
|
||||
* Owned by `repo`.
|
||||
*/
|
||||
git_odb_backend * mempack_backend;
|
||||
|
||||
GitRepoImpl(std::filesystem::path _path, bool create, bool bare)
|
||||
: path(std::move(_path))
|
||||
|
@ -177,6 +233,17 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this<GitRepoImpl>
|
|||
if (git_repository_init(Setter(repo), path.string().c_str(), bare))
|
||||
throw Error("creating Git repository '%s': %s", path, git_error_last()->message);
|
||||
}
|
||||
|
||||
ObjectDb odb;
|
||||
if (git_repository_odb(Setter(odb), repo.get()))
|
||||
throw Error("getting Git object database: %s", git_error_last()->message);
|
||||
|
||||
// mempack_backend will be owned by the repository, so we are not expected to free it ourselves.
|
||||
if (git_mempack_new(&mempack_backend))
|
||||
throw Error("creating mempack backend: %s", git_error_last()->message);
|
||||
|
||||
if (git_odb_add_backend(odb.get(), mempack_backend, 999))
|
||||
throw Error("adding mempack backend to Git object database: %s", git_error_last()->message);
|
||||
}
|
||||
|
||||
operator git_repository * ()
|
||||
|
@ -184,6 +251,62 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this<GitRepoImpl>
|
|||
return repo.get();
|
||||
}
|
||||
|
||||
void flush() override {
|
||||
checkInterrupt();
|
||||
|
||||
git_buf buf = GIT_BUF_INIT;
|
||||
Finally _disposeBuf { [&] { git_buf_dispose(&buf); } };
|
||||
PackBuilder packBuilder;
|
||||
PackBuilderContext packBuilderContext;
|
||||
git_packbuilder_new(Setter(packBuilder), *this);
|
||||
git_packbuilder_set_callbacks(packBuilder.get(), PACKBUILDER_PROGRESS_CHECK_INTERRUPT, &packBuilderContext);
|
||||
git_packbuilder_set_threads(packBuilder.get(), 0 /* autodetect */);
|
||||
|
||||
packBuilderContext.handleException(
|
||||
"preparing packfile",
|
||||
git_mempack_write_thin_pack(mempack_backend, packBuilder.get())
|
||||
);
|
||||
checkInterrupt();
|
||||
packBuilderContext.handleException(
|
||||
"writing packfile",
|
||||
git_packbuilder_write_buf(&buf, packBuilder.get())
|
||||
);
|
||||
checkInterrupt();
|
||||
|
||||
std::string repo_path = std::string(git_repository_path(repo.get()));
|
||||
while (!repo_path.empty() && repo_path.back() == '/')
|
||||
repo_path.pop_back();
|
||||
std::string pack_dir_path = repo_path + "/objects/pack";
|
||||
|
||||
// TODO (performance): could the indexing be done in a separate thread?
|
||||
// we'd need a more streaming variation of
|
||||
// git_packbuilder_write_buf, or incur the cost of
|
||||
// copying parts of the buffer to a separate thread.
|
||||
// (synchronously on the git_packbuilder_write_buf thread)
|
||||
Indexer indexer;
|
||||
git_indexer_progress stats;
|
||||
if (git_indexer_new(Setter(indexer), pack_dir_path.c_str(), 0, nullptr, nullptr))
|
||||
throw Error("creating git packfile indexer: %s", git_error_last()->message);
|
||||
|
||||
// TODO: provide index callback for checkInterrupt() termination
|
||||
// though this is about an order of magnitude faster than the packbuilder
|
||||
// expect up to 1 sec latency due to uninterruptible git_indexer_append.
|
||||
constexpr size_t chunkSize = 128 * 1024;
|
||||
for (size_t offset = 0; offset < buf.size; offset += chunkSize) {
|
||||
if (git_indexer_append(indexer.get(), buf.ptr + offset, std::min(chunkSize, buf.size - offset), &stats))
|
||||
throw Error("appending to git packfile index: %s", git_error_last()->message);
|
||||
checkInterrupt();
|
||||
}
|
||||
|
||||
if (git_indexer_commit(indexer.get(), &stats))
|
||||
throw Error("committing git packfile index: %s", git_error_last()->message);
|
||||
|
||||
if (git_mempack_reset(mempack_backend))
|
||||
throw Error("resetting git mempack backend: %s", git_error_last()->message);
|
||||
|
||||
checkInterrupt();
|
||||
}
|
||||
|
||||
uint64_t getRevCount(const Hash & rev) override
|
||||
{
|
||||
std::unordered_set<git_oid> done;
|
||||
|
@ -1006,12 +1129,14 @@ struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink
|
|||
git_tree_entry_filemode(entry));
|
||||
}
|
||||
|
||||
Hash sync() override
|
||||
Hash flush() override
|
||||
{
|
||||
updateBuilders({});
|
||||
|
||||
auto [oid, _name] = popBuilder();
|
||||
|
||||
repo->flush();
|
||||
|
||||
return toHash(oid);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -7,12 +7,16 @@ namespace nix {
|
|||
|
||||
namespace fetchers { struct PublicKey; }
|
||||
|
||||
/**
|
||||
* A sink that writes into a Git repository. Note that nothing may be written
|
||||
* until `flush()` is called.
|
||||
*/
|
||||
struct GitFileSystemObjectSink : ExtendedFileSystemObjectSink
|
||||
{
|
||||
/**
|
||||
* Flush builder and return a final Git hash.
|
||||
*/
|
||||
virtual Hash sync() = 0;
|
||||
virtual Hash flush() = 0;
|
||||
};
|
||||
|
||||
struct GitRepo
|
||||
|
@ -80,6 +84,8 @@ struct GitRepo
|
|||
|
||||
virtual ref<GitFileSystemObjectSink> getFileSystemObjectSink() = 0;
|
||||
|
||||
virtual void flush() = 0;
|
||||
|
||||
virtual void fetch(
|
||||
const std::string & url,
|
||||
const std::string & refspec,
|
||||
|
|
|
@ -261,11 +261,12 @@ struct GitArchiveInputScheme : InputScheme
|
|||
auto tarballCache = getTarballCache();
|
||||
auto parseSink = tarballCache->getFileSystemObjectSink();
|
||||
auto lastModified = unpackTarfileToSink(archive, *parseSink);
|
||||
auto tree = parseSink->flush();
|
||||
|
||||
act.reset();
|
||||
|
||||
TarballInfo tarballInfo {
|
||||
.treeHash = tarballCache->dereferenceSingletonDirectory(parseSink->sync()),
|
||||
.treeHash = tarballCache->dereferenceSingletonDirectory(tree),
|
||||
.lastModified = lastModified
|
||||
};
|
||||
|
||||
|
|
|
@ -170,6 +170,7 @@ static DownloadTarballResult downloadTarball_(
|
|||
auto tarballCache = getTarballCache();
|
||||
auto parseSink = tarballCache->getFileSystemObjectSink();
|
||||
auto lastModified = unpackTarfileToSink(archive, *parseSink);
|
||||
auto tree = parseSink->flush();
|
||||
|
||||
act.reset();
|
||||
|
||||
|
@ -184,7 +185,7 @@ static DownloadTarballResult downloadTarball_(
|
|||
} else {
|
||||
infoAttrs.insert_or_assign("etag", res->etag);
|
||||
infoAttrs.insert_or_assign("treeHash",
|
||||
tarballCache->dereferenceSingletonDirectory(parseSink->sync()).gitRev());
|
||||
tarballCache->dereferenceSingletonDirectory(tree).gitRev());
|
||||
infoAttrs.insert_or_assign("lastModified", uint64_t(lastModified));
|
||||
if (res->immutableUrl)
|
||||
infoAttrs.insert_or_assign("immutableUrl", *res->immutableUrl);
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
#include <curl/curl.h>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#ifndef _WIN32
|
||||
|
@ -363,10 +364,21 @@ void initLibStore(bool loadConfig) {
|
|||
|
||||
preloadNSS();
|
||||
|
||||
/* Because of an objc quirk[1], calling curl_global_init for the first time
|
||||
after fork() will always result in a crash.
|
||||
Up until now the solution has been to set OBJC_DISABLE_INITIALIZE_FORK_SAFETY
|
||||
for every nix process to ignore that error.
|
||||
Instead of working around that error we address it at the core -
|
||||
by calling curl_global_init here, which should mean curl will already
|
||||
have been initialized by the time we try to do so in a forked process.
|
||||
|
||||
[1] https://github.com/apple-oss-distributions/objc4/blob/01edf1705fbc3ff78a423cd21e03dfc21eb4d780/runtime/objc-initialize.mm#L614-L636
|
||||
*/
|
||||
curl_global_init(CURL_GLOBAL_ALL);
|
||||
#if __APPLE__
|
||||
/* On macOS, don't use the per-session TMPDIR (as set e.g. by
|
||||
sshd). This breaks build users because they don't have access
|
||||
to the TMPDIR, in particular in ‘nix-store --serve’. */
|
||||
#if __APPLE__
|
||||
if (hasPrefix(defaultTempDir(), "/var/folders/"))
|
||||
unsetenv("TMPDIR");
|
||||
#endif
|
||||
|
|
|
@ -427,7 +427,7 @@ extra_pkg_config_variables = {
|
|||
}
|
||||
|
||||
# Working around https://github.com/mesonbuild/meson/issues/13584
|
||||
if host_machine.system() != 'macos'
|
||||
if host_machine.system() != 'darwin'
|
||||
extra_pkg_config_variables += {
|
||||
'localstatedir' : get_option('localstatedir'),
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ subdir('build-utils-meson/subprojects')
|
|||
# HAVE_LUTIMES 1`. The `#define` is unconditional, 0 for not found and 1
|
||||
# for found. One therefore uses it with `#if` not `#ifdef`.
|
||||
check_funcs = [
|
||||
'close_range',
|
||||
# Optionally used for changing the mtime of symlinks.
|
||||
'lutimes',
|
||||
# Optionally used for creating pipes on Unix
|
||||
|
@ -50,6 +51,14 @@ endforeach
|
|||
|
||||
subdir('build-utils-meson/threads')
|
||||
|
||||
# Check if -latomic is needed
|
||||
# This is needed for std::atomic on some platforms
|
||||
# We did not manage to test this reliably on all platforms, so we hardcode
|
||||
# it for now.
|
||||
if host_machine.cpu_family() == 'arm'
|
||||
deps_other += cxx.find_library('atomic')
|
||||
endif
|
||||
|
||||
if host_machine.system() == 'windows'
|
||||
socket = cxx.find_library('ws2_32')
|
||||
deps_other += socket
|
||||
|
|
|
@ -121,10 +121,13 @@ void Pipe::create()
|
|||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if __linux__ || __FreeBSD__
|
||||
// In future we can use a syscall wrapper, but at the moment musl and older glibc version don't support it.
|
||||
static int unix_close_range(unsigned int first, unsigned int last, int flags)
|
||||
{
|
||||
#if !HAVE_CLOSE_RANGE
|
||||
return syscall(SYS_close_range, first, last, (unsigned int)flags);
|
||||
#else
|
||||
return close_range(first, last, flags);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -84,6 +84,12 @@ static inline bool getInterrupted()
|
|||
return unix::_isInterrupted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Throw `Interrupted` exception if the process has been interrupted.
|
||||
*
|
||||
* Call this in long-running loops and between slow operations to terminate
|
||||
* them as needed.
|
||||
*/
|
||||
void inline checkInterrupt()
|
||||
{
|
||||
using namespace unix;
|
||||
|
|
|
@ -243,8 +243,9 @@ std::string base64Decode(std::string_view s)
|
|||
if (c == '\n') continue;
|
||||
|
||||
char digit = base64DecodeChars[(unsigned char) c];
|
||||
if (digit == npos)
|
||||
throw Error("invalid character in Base64 string: '%c'", c);
|
||||
if (digit == npos) {
|
||||
throw Error("invalid character in Base64 string: '%c' in '%s'", c, s.data());
|
||||
}
|
||||
|
||||
bits += 6;
|
||||
d = d << 6 | digit;
|
||||
|
|
|
@ -3,7 +3,7 @@ R""(
|
|||
# Description
|
||||
|
||||
This command reads from standard input a JSON representation of a
|
||||
[store derivation] to which an [*installable*](./nix.md#installables) evaluates.
|
||||
[store derivation].
|
||||
|
||||
Store derivations are used internally by Nix. They are store paths with
|
||||
extension `.drv` that represent the build-time dependency graph to which
|
||||
|
|
|
@ -22,8 +22,20 @@ R""(
|
|||
# nix flake archive --json --dry-run nixops
|
||||
```
|
||||
|
||||
* Upload all flake inputs to a different machine for remote evaluation
|
||||
|
||||
```
|
||||
# nix flake archive --to ssh://some-machine
|
||||
```
|
||||
|
||||
On the remote machine the flake can then be accessed via its store path. That's computed like this:
|
||||
|
||||
```
|
||||
# nix flake metadata --json | jq -r '.path'
|
||||
```
|
||||
|
||||
# Description
|
||||
|
||||
FIXME
|
||||
Copy a flake and all its inputs to a store. This is useful i.e. to evaluate flakes on a different host.
|
||||
|
||||
)""
|
||||
|
|
|
@ -25,6 +25,19 @@ R""(
|
|||
→ 'github:NixOS/nixpkgs/a3a3dda3bacf61e8a39258a0ed9c924eeca8e293' (2023-07-05)
|
||||
```
|
||||
|
||||
* Update multiple inputs:
|
||||
|
||||
```console
|
||||
# nix flake update nixpkgs nixpkgs-unstable
|
||||
warning: updating lock file '/home/myself/repos/testflake/flake.lock':
|
||||
• Updated input 'nixpkgs':
|
||||
'github:nixos/nixpkgs/8f7492cce28977fbf8bd12c72af08b1f6c7c3e49' (2024-09-14)
|
||||
→ 'github:nixos/nixpkgs/086b448a5d54fd117f4dc2dee55c9f0ff461bdc1' (2024-09-16)
|
||||
• Updated input 'nixpkgs-unstable':
|
||||
'github:nixos/nixpkgs/345c263f2f53a3710abe117f28a5cb86d0ba4059' (2024-09-13)
|
||||
→ 'github:nixos/nixpkgs/99dc8785f6a0adac95f5e2ab05cc2e1bf666d172' (2024-09-16)
|
||||
```
|
||||
|
||||
* Update only a single input of a flake in a different directory:
|
||||
|
||||
```console
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
#include "current-process.hh"
|
||||
#include "command.hh"
|
||||
#include "common-args.hh"
|
||||
#include "eval-gc.hh"
|
||||
#include "eval.hh"
|
||||
#include "eval-settings.hh"
|
||||
#include "globals.hh"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue