mirror of
https://github.com/NixOS/nix
synced 2025-06-29 06:21:14 +02:00
Merge remote-tracking branch 'origin/master' into flip-coroutines
This commit is contained in:
commit
257470b58d
342 changed files with 6839 additions and 2721 deletions
|
@ -36,7 +36,7 @@ public:
|
|||
Co init() override;
|
||||
Co realisationFetched(std::shared_ptr<const Realisation> outputInfo, nix::ref<nix::Store> sub);
|
||||
|
||||
void timedOut(Error && ex) override { abort(); };
|
||||
void timedOut(Error && ex) override { unreachable(); };
|
||||
|
||||
std::string key() override;
|
||||
|
||||
|
|
|
@ -400,12 +400,12 @@ public:
|
|||
|
||||
virtual void handleChildOutput(Descriptor fd, std::string_view data)
|
||||
{
|
||||
abort();
|
||||
unreachable();
|
||||
}
|
||||
|
||||
virtual void handleEOF(Descriptor fd)
|
||||
{
|
||||
abort();
|
||||
unreachable();
|
||||
}
|
||||
|
||||
void trace(std::string_view s);
|
||||
|
|
|
@ -145,8 +145,10 @@ Goal::Co PathSubstitutionGoal::init()
|
|||
/* None left. Terminate this goal and let someone else deal
|
||||
with it. */
|
||||
|
||||
worker.failedSubstitutions++;
|
||||
worker.updateProgress();
|
||||
if (substituterFailed) {
|
||||
worker.failedSubstitutions++;
|
||||
worker.updateProgress();
|
||||
}
|
||||
|
||||
/* Hack: don't indicate failure if there were no substituters.
|
||||
In that case the calling derivation should just do a
|
||||
|
@ -158,7 +160,7 @@ Goal::Co PathSubstitutionGoal::init()
|
|||
}
|
||||
|
||||
|
||||
Goal::Co PathSubstitutionGoal::tryToRun(StorePath subPath, nix::ref<Store> sub, std::shared_ptr<const ValidPathInfo> info, bool& substituterFailed)
|
||||
Goal::Co PathSubstitutionGoal::tryToRun(StorePath subPath, nix::ref<Store> sub, std::shared_ptr<const ValidPathInfo> info, bool & substituterFailed)
|
||||
{
|
||||
trace("all references realised");
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ public:
|
|||
PathSubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
~PathSubstitutionGoal();
|
||||
|
||||
void timedOut(Error && ex) override { abort(); };
|
||||
void timedOut(Error && ex) override { unreachable(); };
|
||||
|
||||
/**
|
||||
* We prepend "a$" to the key name to ensure substitution goals
|
||||
|
@ -66,7 +66,7 @@ public:
|
|||
*/
|
||||
Co init() override;
|
||||
Co gotInfo();
|
||||
Co tryToRun(StorePath subPath, nix::ref<Store> sub, std::shared_ptr<const ValidPathInfo> info, bool& substituterFailed);
|
||||
Co tryToRun(StorePath subPath, nix::ref<Store> sub, std::shared_ptr<const ValidPathInfo> info, bool & substituterFailed);
|
||||
Co finished();
|
||||
|
||||
/**
|
||||
|
|
|
@ -216,7 +216,7 @@ void Worker::childStarted(GoalPtr goal, const std::set<MuxablePipePollState::Com
|
|||
nrLocalBuilds++;
|
||||
break;
|
||||
default:
|
||||
abort();
|
||||
unreachable();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -239,7 +239,7 @@ void Worker::childTerminated(Goal * goal, bool wakeSleepers)
|
|||
nrLocalBuilds--;
|
||||
break;
|
||||
default:
|
||||
abort();
|
||||
unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ Sink & operator << (Sink & sink, const Logger::Fields & fields)
|
|||
sink << f.i;
|
||||
else if (f.type == Logger::Field::tString)
|
||||
sink << f.s;
|
||||
else abort();
|
||||
else unreachable();
|
||||
}
|
||||
return sink;
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ struct TunnelSink : Sink
|
|||
{
|
||||
Sink & to;
|
||||
TunnelSink(Sink & to) : to(to) { }
|
||||
void operator () (std::string_view data)
|
||||
void operator () (std::string_view data) override
|
||||
{
|
||||
to << STDERR_WRITE;
|
||||
writeString(data, to);
|
||||
|
@ -1025,19 +1025,20 @@ void processConnection(
|
|||
#endif
|
||||
|
||||
/* Exchange the greeting. */
|
||||
WorkerProto::Version clientVersion =
|
||||
auto [protoVersion, features] =
|
||||
WorkerProto::BasicServerConnection::handshake(
|
||||
to, from, PROTOCOL_VERSION);
|
||||
to, from, PROTOCOL_VERSION, WorkerProto::allFeatures);
|
||||
|
||||
if (clientVersion < 0x10a)
|
||||
if (protoVersion < 0x10a)
|
||||
throw Error("the Nix client version is too old");
|
||||
|
||||
WorkerProto::BasicServerConnection conn;
|
||||
conn.to = std::move(to);
|
||||
conn.from = std::move(from);
|
||||
conn.protoVersion = clientVersion;
|
||||
conn.protoVersion = protoVersion;
|
||||
conn.features = features;
|
||||
|
||||
auto tunnelLogger = new TunnelLogger(conn.to, clientVersion);
|
||||
auto tunnelLogger = new TunnelLogger(conn.to, protoVersion);
|
||||
auto prevLogger = nix::logger;
|
||||
// FIXME
|
||||
if (!recursive)
|
||||
|
|
|
@ -54,6 +54,8 @@ struct curlFileTransfer : public FileTransfer
|
|||
bool done = false; // whether either the success or failure function has been called
|
||||
Callback<FileTransferResult> callback;
|
||||
CURL * req = 0;
|
||||
// buffer to accompany the `req` above
|
||||
char errbuf[CURL_ERROR_SIZE];
|
||||
bool active = false; // whether the handle has been added to the multi object
|
||||
std::string statusMsg;
|
||||
|
||||
|
@ -71,7 +73,10 @@ struct curlFileTransfer : public FileTransfer
|
|||
|
||||
curl_off_t writtenToSink = 0;
|
||||
|
||||
std::chrono::steady_clock::time_point startTime = std::chrono::steady_clock::now();
|
||||
|
||||
inline static const std::set<long> successfulStatuses {200, 201, 204, 206, 304, 0 /* other protocol */};
|
||||
|
||||
/* Get the HTTP status code, or 0 for other protocols. */
|
||||
long getHTTPStatus()
|
||||
{
|
||||
|
@ -367,16 +372,23 @@ struct curlFileTransfer : public FileTransfer
|
|||
if (writtenToSink)
|
||||
curl_easy_setopt(req, CURLOPT_RESUME_FROM_LARGE, writtenToSink);
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_ERRORBUFFER, errbuf);
|
||||
errbuf[0] = 0;
|
||||
|
||||
result.data.clear();
|
||||
result.bodySize = 0;
|
||||
}
|
||||
|
||||
void finish(CURLcode code)
|
||||
{
|
||||
auto finishTime = std::chrono::steady_clock::now();
|
||||
|
||||
auto httpStatus = getHTTPStatus();
|
||||
|
||||
debug("finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d bytes",
|
||||
request.verb(), request.uri, code, httpStatus, result.bodySize);
|
||||
debug("finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d bytes, duration = %.2f s",
|
||||
request.verb(), request.uri, code, httpStatus, result.bodySize,
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(finishTime - startTime).count() / 1000.0f
|
||||
);
|
||||
|
||||
appendCurrentUrl();
|
||||
|
||||
|
@ -477,8 +489,8 @@ struct curlFileTransfer : public FileTransfer
|
|||
code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code)))
|
||||
: FileTransferError(err,
|
||||
std::move(response),
|
||||
"unable to %s '%s': %s (%d)",
|
||||
request.verb(), request.uri, curl_easy_strerror(code), code);
|
||||
"unable to %s '%s': %s (%d) %s",
|
||||
request.verb(), request.uri, curl_easy_strerror(code), code, errbuf);
|
||||
|
||||
/* If this is a transient error, then maybe retry the
|
||||
download after a while. If we're writing to a
|
||||
|
@ -851,8 +863,10 @@ void FileTransfer::download(
|
|||
buffer). We don't wait forever to prevent stalling the
|
||||
download thread. (Hopefully sleeping will throttle the
|
||||
sender.) */
|
||||
if (state->data.size() > 1024 * 1024) {
|
||||
if (state->data.size() > fileTransferSettings.downloadBufferSize) {
|
||||
debug("download buffer is full; going to sleep");
|
||||
static bool haveWarned = false;
|
||||
warnOnce(haveWarned, "download buffer is full; consider increasing the 'download-buffer-size' setting");
|
||||
state.wait_for(state->request, std::chrono::seconds(10));
|
||||
}
|
||||
|
||||
|
|
|
@ -47,6 +47,12 @@ struct FileTransferSettings : Config
|
|||
|
||||
Setting<unsigned int> tries{this, 5, "download-attempts",
|
||||
"How often Nix will attempt to download a file before giving up."};
|
||||
|
||||
Setting<size_t> downloadBufferSize{this, 64 * 1024 * 1024, "download-buffer-size",
|
||||
R"(
|
||||
The size of Nix's internal download buffer during `curl` transfers. If data is
|
||||
not processed quickly enough to exceed the size of this buffer, downloads may stall.
|
||||
)"};
|
||||
};
|
||||
|
||||
extern FileTransferSettings fileTransferSettings;
|
||||
|
|
|
@ -559,7 +559,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
non-blocking flag from the server socket, so
|
||||
explicitly make it blocking. */
|
||||
if (fcntl(fdClient.get(), F_SETFL, fcntl(fdClient.get(), F_GETFL) & ~O_NONBLOCK) == -1)
|
||||
abort();
|
||||
panic("Could not set non-blocking flag on client socket");
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
|
@ -891,7 +891,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
|
||||
void LocalStore::autoGC(bool sync)
|
||||
{
|
||||
#ifdef HAVE_STATVFS
|
||||
#if HAVE_STATVFS
|
||||
static auto fakeFreeSpaceFile = getEnv("_NIX_TEST_FREE_SPACE_FILE");
|
||||
|
||||
auto getAvail = [this]() -> uint64_t {
|
||||
|
|
|
@ -64,7 +64,6 @@ Settings::Settings()
|
|||
, nixStateDir(canonPath(getEnvNonEmpty("NIX_STATE_DIR").value_or(NIX_STATE_DIR)))
|
||||
, nixConfDir(canonPath(getEnvNonEmpty("NIX_CONF_DIR").value_or(NIX_CONF_DIR)))
|
||||
, nixUserConfFiles(getUserConfigFiles())
|
||||
, nixBinDir(canonPath(getEnvNonEmpty("NIX_BIN_DIR").value_or(NIX_BIN_DIR)))
|
||||
, nixManDir(canonPath(NIX_MAN_DIR))
|
||||
, nixDaemonSocketFile(canonPath(getEnvNonEmpty("NIX_DAEMON_SOCKET_PATH").value_or(nixStateDir + DEFAULT_SOCKET_PATH)))
|
||||
{
|
||||
|
@ -95,34 +94,6 @@ Settings::Settings()
|
|||
sandboxPaths = tokenizeString<StringSet>("/System/Library/Frameworks /System/Library/PrivateFrameworks /bin/sh /bin/bash /private/tmp /private/var/tmp /usr/lib");
|
||||
allowedImpureHostPrefixes = tokenizeString<StringSet>("/System/Library /usr/lib /dev /bin/sh");
|
||||
#endif
|
||||
|
||||
/* Set the build hook location
|
||||
|
||||
For builds we perform a self-invocation, so Nix has to be self-aware.
|
||||
That is, it has to know where it is installed. We don't think it's sentient.
|
||||
|
||||
Normally, nix is installed according to `nixBinDir`, which is set at compile time,
|
||||
but can be overridden. This makes for a great default that works even if this
|
||||
code is linked as a library into some other program whose main is not aware
|
||||
that it might need to be a build remote hook.
|
||||
|
||||
However, it may not have been installed at all. For example, if it's a static build,
|
||||
there's a good chance that it has been moved out of its installation directory.
|
||||
That makes `nixBinDir` useless. Instead, we'll query the OS for the path to the
|
||||
current executable, using `getSelfExe()`.
|
||||
|
||||
As a last resort, we resort to `PATH`. Hopefully we find a `nix` there that's compatible.
|
||||
If you're porting Nix to a new platform, that might be good enough for a while, but
|
||||
you'll want to improve `getSelfExe()` to work on your platform.
|
||||
*/
|
||||
std::string nixExePath = nixBinDir + "/nix";
|
||||
if (!pathExists(nixExePath)) {
|
||||
nixExePath = getSelfExe().value_or("nix");
|
||||
}
|
||||
buildHook = {
|
||||
nixExePath,
|
||||
"__build-remote",
|
||||
};
|
||||
}
|
||||
|
||||
void loadConfFile(AbstractConfig & config)
|
||||
|
@ -297,7 +268,7 @@ template<> std::string BaseSetting<SandboxMode>::to_string() const
|
|||
if (value == smEnabled) return "true";
|
||||
else if (value == smRelaxed) return "relaxed";
|
||||
else if (value == smDisabled) return "false";
|
||||
else abort();
|
||||
else unreachable();
|
||||
}
|
||||
|
||||
template<> void BaseSetting<SandboxMode>::convertToArg(Args & args, const std::string & category)
|
||||
|
|
|
@ -84,11 +84,6 @@ public:
|
|||
*/
|
||||
std::vector<Path> nixUserConfFiles;
|
||||
|
||||
/**
|
||||
* The directory where the main programs are stored.
|
||||
*/
|
||||
Path nixBinDir;
|
||||
|
||||
/**
|
||||
* The directory where the man pages are stored.
|
||||
*/
|
||||
|
@ -246,7 +241,7 @@ public:
|
|||
)",
|
||||
{"build-timeout"}};
|
||||
|
||||
Setting<Strings> buildHook{this, {}, "build-hook",
|
||||
Setting<Strings> buildHook{this, {"nix", "__build-remote"}, "build-hook",
|
||||
R"(
|
||||
The path to the helper program that executes remote builds.
|
||||
|
||||
|
@ -286,7 +281,7 @@ public:
|
|||
For backward compatibility, `ssh://` may be omitted.
|
||||
The hostname may be an alias defined in `~/.ssh/config`.
|
||||
|
||||
2. A comma-separated list of [Nix system types](@docroot@/contributing/hacking.md#system-type).
|
||||
2. A comma-separated list of [Nix system types](@docroot@/development/building.md#system-type).
|
||||
If omitted, this defaults to the local platform type.
|
||||
|
||||
> **Example**
|
||||
|
@ -866,13 +861,13 @@ public:
|
|||
|
||||
- `ca-derivations`
|
||||
|
||||
Included by default if the [`ca-derivations` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-ca-derivations) is enabled.
|
||||
Included by default if the [`ca-derivations` experimental feature](@docroot@/development/experimental-features.md#xp-feature-ca-derivations) is enabled.
|
||||
|
||||
This system feature is implicitly required by derivations with the [`__contentAddressed` attribute](@docroot@/language/advanced-attributes.md#adv-attr-__contentAddressed).
|
||||
|
||||
- `recursive-nix`
|
||||
|
||||
Included by default if the [`recursive-nix` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-recursive-nix) is enabled.
|
||||
Included by default if the [`recursive-nix` experimental feature](@docroot@/development/experimental-features.md#xp-feature-recursive-nix) is enabled.
|
||||
|
||||
- `uid-range`
|
||||
|
||||
|
@ -1131,7 +1126,10 @@ public:
|
|||
)"};
|
||||
|
||||
Setting<uint64_t> maxFree{
|
||||
this, std::numeric_limits<uint64_t>::max(), "max-free",
|
||||
// n.b. this is deliberately int64 max rather than uint64 max because
|
||||
// this goes through the Nix language JSON parser and thus needs to be
|
||||
// representable in Nix language integers.
|
||||
this, std::numeric_limits<int64_t>::max(), "max-free",
|
||||
R"(
|
||||
When a garbage collection is triggered by the `min-free` option, it
|
||||
stops as soon as `max-free` bytes are available. The default is
|
||||
|
@ -1221,7 +1219,10 @@ public:
|
|||
|
||||
Setting<uint64_t> warnLargePathThreshold{
|
||||
this,
|
||||
std::numeric_limits<uint64_t>::max(),
|
||||
// n.b. this is deliberately int64 max rather than uint64 max because
|
||||
// this goes through the Nix language JSON parser and thus needs to be
|
||||
// representable in Nix language integers.
|
||||
std::numeric_limits<int64_t>::max(),
|
||||
"warn-large-path-threshold",
|
||||
R"(
|
||||
Warn when copying a path larger than this number of bytes to the Nix store
|
||||
|
|
|
@ -71,7 +71,6 @@ libstore_CXXFLAGS += \
|
|||
-DNIX_STATE_DIR=\"$(NIX_ROOT)$(localstatedir)/nix\" \
|
||||
-DNIX_LOG_DIR=\"$(NIX_ROOT)$(localstatedir)/log/nix\" \
|
||||
-DNIX_CONF_DIR=\"$(NIX_ROOT)$(sysconfdir)/nix\" \
|
||||
-DNIX_BIN_DIR=\"$(NIX_ROOT)$(bindir)\" \
|
||||
-DNIX_MAN_DIR=\"$(NIX_ROOT)$(mandir)\" \
|
||||
-DLSOF=\"$(NIX_ROOT)$(lsof)\"
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ configdata = configuration_data()
|
|||
# TODO rename, because it will conflict with downstream projects
|
||||
configdata.set_quoted('PACKAGE_VERSION', meson.project_version())
|
||||
|
||||
configdata.set_quoted('SYSTEM', host_machine.system())
|
||||
configdata.set_quoted('SYSTEM', host_machine.cpu_family() + '-' + host_machine.system())
|
||||
|
||||
deps_private_maybe_subproject = [
|
||||
]
|
||||
|
@ -73,6 +73,7 @@ subdir('build-utils-meson/threads')
|
|||
boost = dependency(
|
||||
'boost',
|
||||
modules : ['container'],
|
||||
include_type: 'system',
|
||||
)
|
||||
# boost is a public dependency, but not a pkg-config dependency unfortunately, so we
|
||||
# put in `deps_other`.
|
||||
|
@ -113,7 +114,7 @@ if aws_s3.found()
|
|||
'-laws-cpp-sdk-core',
|
||||
'-laws-crt-cpp',
|
||||
],
|
||||
)
|
||||
).as_system('system')
|
||||
endif
|
||||
deps_other += aws_s3
|
||||
|
||||
|
@ -327,7 +328,6 @@ prefix = get_option('prefix')
|
|||
path_opts = [
|
||||
# Meson built-ins.
|
||||
'datadir',
|
||||
'bindir',
|
||||
'mandir',
|
||||
'libdir',
|
||||
'includedir',
|
||||
|
@ -372,7 +372,6 @@ cpp_str_defines = {
|
|||
'NIX_STATE_DIR': state_dir / 'nix',
|
||||
'NIX_LOG_DIR': log_dir,
|
||||
'NIX_CONF_DIR': sysconfdir / 'nix',
|
||||
'NIX_BIN_DIR': bindir,
|
||||
'NIX_MAN_DIR': mandir,
|
||||
}
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ static bool componentsLT(const std::string_view c1, const std::string_view c2)
|
|||
}
|
||||
|
||||
|
||||
int compareVersions(const std::string_view v1, const std::string_view v2)
|
||||
std::strong_ordering compareVersions(const std::string_view v1, const std::string_view v2)
|
||||
{
|
||||
auto p1 = v1.begin();
|
||||
auto p2 = v2.begin();
|
||||
|
@ -102,11 +102,11 @@ int compareVersions(const std::string_view v1, const std::string_view v2)
|
|||
while (p1 != v1.end() || p2 != v2.end()) {
|
||||
auto c1 = nextComponent(p1, v1.end());
|
||||
auto c2 = nextComponent(p2, v2.end());
|
||||
if (componentsLT(c1, c2)) return -1;
|
||||
else if (componentsLT(c2, c1)) return 1;
|
||||
if (componentsLT(c1, c2)) return std::strong_ordering::less;
|
||||
else if (componentsLT(c2, c1)) return std::strong_ordering::greater;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return std::strong_ordering::equal;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ typedef std::list<DrvName> DrvNames;
|
|||
|
||||
std::string_view nextComponent(std::string_view::const_iterator & p,
|
||||
const std::string_view::const_iterator end);
|
||||
int compareVersions(const std::string_view v1, const std::string_view v2);
|
||||
std::strong_ordering compareVersions(const std::string_view v1, const std::string_view v2);
|
||||
DrvNames drvNamesFromArgs(const Strings & opArgs);
|
||||
|
||||
}
|
||||
|
|
|
@ -164,7 +164,7 @@ public:
|
|||
Cache & getCache(State & state, const std::string & uri)
|
||||
{
|
||||
auto i = state.caches.find(uri);
|
||||
if (i == state.caches.end()) abort();
|
||||
if (i == state.caches.end()) unreachable();
|
||||
return i->second;
|
||||
}
|
||||
|
||||
|
@ -211,7 +211,7 @@ public:
|
|||
|
||||
{
|
||||
auto r(state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority));
|
||||
if (!r.next()) { abort(); }
|
||||
if (!r.next()) { unreachable(); }
|
||||
ret.id = (int) r.getInt(0);
|
||||
}
|
||||
|
||||
|
|
|
@ -66,10 +66,7 @@ mkMesonDerivation (finalAttrs: {
|
|||
] ++ lib.optional stdenv.hostPlatform.isLinux libseccomp
|
||||
# There have been issues building these dependencies
|
||||
++ lib.optional (stdenv.hostPlatform == stdenv.buildPlatform && (stdenv.isLinux || stdenv.isDarwin))
|
||||
(aws-sdk-cpp.override {
|
||||
apis = ["s3" "transfer"];
|
||||
customMemoryManagement = false;
|
||||
})
|
||||
aws-sdk-cpp
|
||||
;
|
||||
|
||||
propagatedBuildInputs = [
|
||||
|
@ -101,12 +98,8 @@ mkMesonDerivation (finalAttrs: {
|
|||
LDFLAGS = "-fuse-ld=gold";
|
||||
};
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
||||
separateDebugInfo = !stdenv.hostPlatform.isStatic;
|
||||
|
||||
strictDeps = true;
|
||||
|
||||
hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie";
|
||||
|
||||
meta = {
|
||||
|
|
|
@ -73,8 +73,11 @@ void RemoteStore::initConnection(Connection & conn)
|
|||
StringSink saved;
|
||||
TeeSource tee(conn.from, saved);
|
||||
try {
|
||||
conn.protoVersion = WorkerProto::BasicClientConnection::handshake(
|
||||
conn.to, tee, PROTOCOL_VERSION);
|
||||
auto [protoVersion, features] = WorkerProto::BasicClientConnection::handshake(
|
||||
conn.to, tee, PROTOCOL_VERSION,
|
||||
WorkerProto::allFeatures);
|
||||
conn.protoVersion = protoVersion;
|
||||
conn.features = features;
|
||||
} catch (SerialisationError & e) {
|
||||
/* In case the other side is waiting for our input, close
|
||||
it. */
|
||||
|
@ -88,6 +91,9 @@ void RemoteStore::initConnection(Connection & conn)
|
|||
|
||||
static_cast<WorkerProto::ClientHandshakeInfo &>(conn) = conn.postHandshake(*this);
|
||||
|
||||
for (auto & feature : conn.features)
|
||||
debug("negotiated feature '%s'", feature);
|
||||
|
||||
auto ex = conn.processStderrReturn();
|
||||
if (ex) std::rethrow_exception(ex);
|
||||
}
|
||||
|
|
|
@ -220,8 +220,6 @@ std::string S3BinaryCacheStoreConfig::doc()
|
|||
|
||||
struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual S3BinaryCacheStore
|
||||
{
|
||||
std::string bucketName;
|
||||
|
||||
Stats stats;
|
||||
|
||||
S3Helper s3Helper;
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
namespace Aws { namespace Client { class ClientConfiguration; } }
|
||||
namespace Aws { namespace Client { struct ClientConfiguration; } }
|
||||
namespace Aws { namespace S3 { class S3Client; } }
|
||||
|
||||
namespace nix {
|
||||
|
|
|
@ -922,7 +922,7 @@ StorePathSet Store::exportReferences(const StorePathSet & storePaths, const Stor
|
|||
const Store::Stats & Store::getStats()
|
||||
{
|
||||
{
|
||||
auto state_(state.lock());
|
||||
auto state_(state.readLock());
|
||||
stats.pathInfoCacheSize = state_->pathInfoCache.size();
|
||||
}
|
||||
return stats;
|
||||
|
|
|
@ -201,7 +201,7 @@ protected:
|
|||
LRUCache<std::string, PathInfoCacheValue> pathInfoCache;
|
||||
};
|
||||
|
||||
Sync<State> state;
|
||||
SharedSync<State> state;
|
||||
|
||||
std::shared_ptr<NarInfoDiskCache> diskCache;
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include "file-system.hh"
|
||||
#include "child.hh"
|
||||
#include "strings.hh"
|
||||
#include "executable-path.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
@ -16,11 +17,18 @@ HookInstance::HookInstance()
|
|||
if (buildHookArgs.empty())
|
||||
throw Error("'build-hook' setting is empty");
|
||||
|
||||
auto buildHook = canonPath(buildHookArgs.front());
|
||||
std::filesystem::path buildHook = buildHookArgs.front();
|
||||
buildHookArgs.pop_front();
|
||||
|
||||
try {
|
||||
buildHook = ExecutablePath::load().findPath(buildHook);
|
||||
} catch (ExecutableLookupError & e) {
|
||||
e.addTrace(nullptr, "while resolving the 'build-hook' setting'");
|
||||
throw;
|
||||
}
|
||||
|
||||
Strings args;
|
||||
args.push_back(std::string(baseNameOf(buildHook)));
|
||||
args.push_back(buildHook.filename().string());
|
||||
|
||||
for (auto & arg : buildHookArgs)
|
||||
args.push_back(arg);
|
||||
|
@ -59,7 +67,7 @@ HookInstance::HookInstance()
|
|||
if (dup2(builderOut.readSide.get(), 5) == -1)
|
||||
throw SysError("dupping builder's stdout/stderr");
|
||||
|
||||
execv(buildHook.c_str(), stringsToCharPtrs(args).data());
|
||||
execv(buildHook.native().c_str(), stringsToCharPtrs(args).data());
|
||||
|
||||
throw SysError("executing '%s'", buildHook);
|
||||
});
|
||||
|
|
|
@ -102,7 +102,14 @@ void handleDiffHook(
|
|||
}
|
||||
}
|
||||
|
||||
// We want $HOME to be un-creatable in the sandbox. On Linux,
|
||||
// you can't create anything inside /proc since it's a virtual filesystem.
|
||||
// On Darwin it seems that `/homeless-shelter` is good enough.
|
||||
#if __linux__
|
||||
const Path LocalDerivationGoal::homeDir = "/proc/homeless-shelter";
|
||||
#else
|
||||
const Path LocalDerivationGoal::homeDir = "/homeless-shelter";
|
||||
#endif
|
||||
|
||||
|
||||
LocalDerivationGoal::~LocalDerivationGoal()
|
||||
|
@ -165,7 +172,7 @@ void LocalDerivationGoal::killSandbox(bool getStats)
|
|||
buildResult.cpuSystem = stats.cpuSystem;
|
||||
}
|
||||
#else
|
||||
abort();
|
||||
unreachable();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1258,7 +1265,7 @@ bool LocalDerivationGoal::isAllowed(const DerivedPath & req)
|
|||
struct RestrictedStoreConfig : virtual LocalFSStoreConfig
|
||||
{
|
||||
using LocalFSStoreConfig::LocalFSStoreConfig;
|
||||
const std::string name() { return "Restricted Store"; }
|
||||
const std::string name() override { return "Restricted Store"; }
|
||||
};
|
||||
|
||||
/* A wrapper around LocalStore that only allows building/querying of
|
||||
|
@ -1702,10 +1709,13 @@ void setupSeccomp()
|
|||
throw SysError("unable to add seccomp rule");
|
||||
}
|
||||
|
||||
/* Prevent builders from creating EAs or ACLs. Not all filesystems
|
||||
/* Prevent builders from using EAs or ACLs. Not all filesystems
|
||||
support these, and they're not allowed in the Nix store because
|
||||
they're not representable in the NAR serialisation. */
|
||||
if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(ENOTSUP), SCMP_SYS(setxattr), 0) != 0 ||
|
||||
if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(ENOTSUP), SCMP_SYS(getxattr), 0) != 0 ||
|
||||
seccomp_rule_add(ctx, SCMP_ACT_ERRNO(ENOTSUP), SCMP_SYS(lgetxattr), 0) != 0 ||
|
||||
seccomp_rule_add(ctx, SCMP_ACT_ERRNO(ENOTSUP), SCMP_SYS(fgetxattr), 0) != 0 ||
|
||||
seccomp_rule_add(ctx, SCMP_ACT_ERRNO(ENOTSUP), SCMP_SYS(setxattr), 0) != 0 ||
|
||||
seccomp_rule_add(ctx, SCMP_ACT_ERRNO(ENOTSUP), SCMP_SYS(lsetxattr), 0) != 0 ||
|
||||
seccomp_rule_add(ctx, SCMP_ACT_ERRNO(ENOTSUP), SCMP_SYS(fsetxattr), 0) != 0)
|
||||
throw SysError("unable to add seccomp rule");
|
||||
|
|
|
@ -49,6 +49,7 @@ R""(
|
|||
(if (param "_ALLOW_LOCAL_NETWORKING")
|
||||
(begin
|
||||
(allow network* (remote ip "localhost:*"))
|
||||
(allow network-inbound (local ip "*:*")) ; required to bind and listen
|
||||
|
||||
; Allow access to /etc/resolv.conf (which is a symlink to
|
||||
; /private/var/run/resolv.conf).
|
||||
|
|
|
@ -45,7 +45,7 @@ bool lockFile(Descriptor desc, LockType lockType, bool wait)
|
|||
if (lockType == ltRead) type = LOCK_SH;
|
||||
else if (lockType == ltWrite) type = LOCK_EX;
|
||||
else if (lockType == ltNone) type = LOCK_UN;
|
||||
else abort();
|
||||
else unreachable();
|
||||
|
||||
if (wait) {
|
||||
while (flock(desc, type) != 0) {
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
const std::set<WorkerProto::Feature> WorkerProto::allFeatures{};
|
||||
|
||||
WorkerProto::BasicClientConnection::~BasicClientConnection()
|
||||
{
|
||||
try {
|
||||
|
@ -137,8 +139,21 @@ void WorkerProto::BasicClientConnection::processStderr(bool * daemonException, S
|
|||
}
|
||||
}
|
||||
|
||||
WorkerProto::Version
|
||||
WorkerProto::BasicClientConnection::handshake(BufferedSink & to, Source & from, WorkerProto::Version localVersion)
|
||||
static std::set<WorkerProto::Feature>
|
||||
intersectFeatures(const std::set<WorkerProto::Feature> & a, const std::set<WorkerProto::Feature> & b)
|
||||
{
|
||||
std::set<WorkerProto::Feature> res;
|
||||
for (auto & x : a)
|
||||
if (b.contains(x))
|
||||
res.insert(x);
|
||||
return res;
|
||||
}
|
||||
|
||||
std::tuple<WorkerProto::Version, std::set<WorkerProto::Feature>> WorkerProto::BasicClientConnection::handshake(
|
||||
BufferedSink & to,
|
||||
Source & from,
|
||||
WorkerProto::Version localVersion,
|
||||
const std::set<WorkerProto::Feature> & supportedFeatures)
|
||||
{
|
||||
to << WORKER_MAGIC_1 << localVersion;
|
||||
to.flush();
|
||||
|
@ -153,11 +168,24 @@ WorkerProto::BasicClientConnection::handshake(BufferedSink & to, Source & from,
|
|||
if (GET_PROTOCOL_MINOR(daemonVersion) < 10)
|
||||
throw Error("the Nix daemon version is too old");
|
||||
|
||||
return std::min(daemonVersion, localVersion);
|
||||
auto protoVersion = std::min(daemonVersion, localVersion);
|
||||
|
||||
/* Exchange features. */
|
||||
std::set<WorkerProto::Feature> daemonFeatures;
|
||||
if (GET_PROTOCOL_MINOR(protoVersion) >= 38) {
|
||||
to << supportedFeatures;
|
||||
to.flush();
|
||||
daemonFeatures = readStrings<std::set<WorkerProto::Feature>>(from);
|
||||
}
|
||||
|
||||
return {protoVersion, intersectFeatures(daemonFeatures, supportedFeatures)};
|
||||
}
|
||||
|
||||
WorkerProto::Version
|
||||
WorkerProto::BasicServerConnection::handshake(BufferedSink & to, Source & from, WorkerProto::Version localVersion)
|
||||
std::tuple<WorkerProto::Version, std::set<WorkerProto::Feature>> WorkerProto::BasicServerConnection::handshake(
|
||||
BufferedSink & to,
|
||||
Source & from,
|
||||
WorkerProto::Version localVersion,
|
||||
const std::set<WorkerProto::Feature> & supportedFeatures)
|
||||
{
|
||||
unsigned int magic = readInt(from);
|
||||
if (magic != WORKER_MAGIC_1)
|
||||
|
@ -165,7 +193,18 @@ WorkerProto::BasicServerConnection::handshake(BufferedSink & to, Source & from,
|
|||
to << WORKER_MAGIC_2 << localVersion;
|
||||
to.flush();
|
||||
auto clientVersion = readInt(from);
|
||||
return std::min(clientVersion, localVersion);
|
||||
|
||||
auto protoVersion = std::min(clientVersion, localVersion);
|
||||
|
||||
/* Exchange features. */
|
||||
std::set<WorkerProto::Feature> clientFeatures;
|
||||
if (GET_PROTOCOL_MINOR(protoVersion) >= 38) {
|
||||
clientFeatures = readStrings<std::set<WorkerProto::Feature>>(from);
|
||||
to << supportedFeatures;
|
||||
to.flush();
|
||||
}
|
||||
|
||||
return {protoVersion, intersectFeatures(clientFeatures, supportedFeatures)};
|
||||
}
|
||||
|
||||
WorkerProto::ClientHandshakeInfo WorkerProto::BasicClientConnection::postHandshake(const StoreDirConfig & store)
|
||||
|
|
|
@ -23,6 +23,11 @@ struct WorkerProto::BasicConnection
|
|||
*/
|
||||
WorkerProto::Version protoVersion;
|
||||
|
||||
/**
|
||||
* The set of features that both sides support.
|
||||
*/
|
||||
std::set<Feature> features;
|
||||
|
||||
/**
|
||||
* Coercion to `WorkerProto::ReadConn`. This makes it easy to use the
|
||||
* factored out serve protocol serializers with a
|
||||
|
@ -72,8 +77,8 @@ struct WorkerProto::BasicClientConnection : WorkerProto::BasicConnection
|
|||
/**
|
||||
* Establishes connection, negotiating version.
|
||||
*
|
||||
* @return the version provided by the other side of the
|
||||
* connection.
|
||||
* @return the minimum version supported by both sides and the set
|
||||
* of protocol features supported by both sides.
|
||||
*
|
||||
* @param to Taken by reference to allow for various error handling
|
||||
* mechanisms.
|
||||
|
@ -82,8 +87,15 @@ struct WorkerProto::BasicClientConnection : WorkerProto::BasicConnection
|
|||
* handling mechanisms.
|
||||
*
|
||||
* @param localVersion Our version which is sent over
|
||||
*
|
||||
* @param features The protocol features that we support
|
||||
*/
|
||||
static Version handshake(BufferedSink & to, Source & from, WorkerProto::Version localVersion);
|
||||
// FIXME: this should probably be a constructor.
|
||||
static std::tuple<Version, std::set<Feature>> handshake(
|
||||
BufferedSink & to,
|
||||
Source & from,
|
||||
WorkerProto::Version localVersion,
|
||||
const std::set<Feature> & supportedFeatures);
|
||||
|
||||
/**
|
||||
* After calling handshake, must call this to exchange some basic
|
||||
|
@ -138,8 +150,15 @@ struct WorkerProto::BasicServerConnection : WorkerProto::BasicConnection
|
|||
* handling mechanisms.
|
||||
*
|
||||
* @param localVersion Our version which is sent over
|
||||
*
|
||||
* @param features The protocol features that we support
|
||||
*/
|
||||
static WorkerProto::Version handshake(BufferedSink & to, Source & from, WorkerProto::Version localVersion);
|
||||
// FIXME: this should probably be a constructor.
|
||||
static std::tuple<Version, std::set<Feature>> handshake(
|
||||
BufferedSink & to,
|
||||
Source & from,
|
||||
WorkerProto::Version localVersion,
|
||||
const std::set<Feature> & supportedFeatures);
|
||||
|
||||
/**
|
||||
* After calling handshake, must call this to exchange some basic
|
||||
|
|
|
@ -11,7 +11,9 @@ namespace nix {
|
|||
#define WORKER_MAGIC_1 0x6e697863
|
||||
#define WORKER_MAGIC_2 0x6478696f
|
||||
|
||||
#define PROTOCOL_VERSION (1 << 8 | 37)
|
||||
/* Note: you generally shouldn't change the protocol version. Define a
|
||||
new `WorkerProto::Feature` instead. */
|
||||
#define PROTOCOL_VERSION (1 << 8 | 38)
|
||||
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
|
||||
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
|
||||
|
||||
|
@ -131,6 +133,10 @@ struct WorkerProto
|
|||
{
|
||||
WorkerProto::Serialise<T>::write(store, conn, t);
|
||||
}
|
||||
|
||||
using Feature = std::string;
|
||||
|
||||
static const std::set<Feature> allFeatures;
|
||||
};
|
||||
|
||||
enum struct WorkerProto::Op : uint64_t
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue