1
0
Fork 0
mirror of https://github.com/NixOS/nix synced 2025-07-05 16:31:47 +02:00
nix/src/libutil/thread-pool.cc
John Ericson cc24766fa6 Expose the nix component in header include paths
For example, instead of doing

    #include "nix/store-config.hh"
    #include "nix/derived-path.hh"

Now do

    #include "nix/store/config.hh"
    #include "nix/store/derived-path.hh"

This was originally planned in the issue, and also recent requested by
Eelco.

Most of the change is purely mechanical. There is just one small
additional issue. See how, in the example above, we took this
opportunity to also turn `<comp>-config.hh` into `<comp>/config.hh`.
Well, there was already a `nix/util/config.{cc,hh}`. Even though there
is not a public configuration header for libutil (which also would be
called `nix/util/config.{cc,hh}`) that's still confusing, To avoid any
such confusion, we renamed that to `nix/util/configuration.{cc,hh}`.

Finally, note that the libflake headers already did this, so we didn't
need to do anything to them. We wouldn't want to mistakenly get
`nix/flake/flake/flake.hh`!

Progress on #7876
2025-04-01 11:40:42 -04:00

163 lines
4.4 KiB
C++

#include "nix/util/thread-pool.hh"
#include "nix/util/signals.hh"
#include "nix/util/util.hh"
namespace nix {
ThreadPool::ThreadPool(size_t _maxThreads)
: maxThreads(_maxThreads)
{
if (!maxThreads) {
maxThreads = std::thread::hardware_concurrency();
if (!maxThreads) maxThreads = 1;
}
debug("starting pool of %d threads", maxThreads - 1);
}
ThreadPool::~ThreadPool()
{
shutdown();
}
void ThreadPool::shutdown()
{
std::vector<std::thread> workers;
{
auto state(state_.lock());
quit = true;
std::swap(workers, state->workers);
}
if (workers.empty()) return;
debug("reaping %d worker threads", workers.size());
work.notify_all();
for (auto & thr : workers)
thr.join();
}
void ThreadPool::enqueue(const work_t & t)
{
auto state(state_.lock());
if (quit)
throw ThreadPoolShutDown("cannot enqueue a work item while the thread pool is shutting down");
state->pending.push(t);
/* Note: process() also executes items, so count it as a worker. */
if (state->pending.size() > state->workers.size() + 1 && state->workers.size() + 1 < maxThreads)
state->workers.emplace_back(&ThreadPool::doWork, this, false);
work.notify_one();
}
void ThreadPool::process()
{
state_.lock()->draining = true;
/* Do work until no more work is pending or active. */
try {
doWork(true);
auto state(state_.lock());
assert(quit);
if (state->exception)
std::rethrow_exception(state->exception);
} catch (...) {
/* In the exceptional case, some workers may still be
active. They may be referencing the stack frame of the
caller. So wait for them to finish. (~ThreadPool also does
this, but it might be destroyed after objects referenced by
the work item lambdas.) */
shutdown();
throw;
}
}
void ThreadPool::doWork(bool mainThread)
{
ReceiveInterrupts receiveInterrupts;
#ifndef _WIN32 // Does Windows need anything similar for async exit handling?
if (!mainThread)
unix::interruptCheck = [&]() { return (bool) quit; };
#endif
bool didWork = false;
std::exception_ptr exc;
while (true) {
work_t w;
{
auto state(state_.lock());
if (didWork) {
assert(state->active);
state->active--;
if (exc) {
if (!state->exception) {
state->exception = exc;
// Tell the other workers to quit.
quit = true;
work.notify_all();
} else {
/* Print the exception, since we can't
propagate it. */
try {
std::rethrow_exception(exc);
} catch (const Interrupted &) {
// The interrupted state may be picked up by multiple
// workers, which is expected, so we should ignore
// it silently and let the first one bubble up,
// rethrown via the original state->exception.
} catch (const ThreadPoolShutDown &) {
// Similarly expected.
} catch (std::exception & e) {
ignoreExceptionExceptInterrupt();
}
}
}
}
/* Wait until a work item is available or we're asked to
quit. */
while (true) {
if (quit) return;
if (!state->pending.empty()) break;
/* If there are no active or pending items, and the
main thread is running process(), then no new items
can be added. So exit. */
if (!state->active && state->draining) {
quit = true;
work.notify_all();
return;
}
state.wait(work);
}
w = std::move(state->pending.front());
state->pending.pop();
state->active++;
}
try {
w();
} catch (...) {
exc = std::current_exception();
}
didWork = true;
}
}
}