mirror of
https://github.com/NixOS/nix
synced 2025-06-26 11:41:15 +02:00
Enable HTTP/2 support
The binary cache store can now use HTTP/2 to do lookups. This is much more efficient than HTTP/1.1 due to multiplexing: we can issue many requests in parallel over a single TCP connection. Thus it's no longer necessary to use a bunch of concurrent TCP connections (25 by default). For example, downloading 802 .narinfo files from https://cache.nixos.org/, using a single TCP connection, takes 11.8s with HTTP/1.1, but only 0.61s with HTTP/2. This did require a fairly substantial rewrite of the Downloader class to use the curl multi interface, because otherwise curl wouldn't be able to do multiplexing for us. As a bonus, we get connection reuse even with HTTP/1.1. All downloads are now handled by a single worker thread. Clients call Downloader::enqueueDownload() to tell the worker thread to start the download, getting a std::future to the result.
This commit is contained in:
parent
a75d11a7e6
commit
90ad02bf62
9 changed files with 433 additions and 210 deletions
|
@ -13,17 +13,12 @@ private:
|
|||
|
||||
Path cacheUri;
|
||||
|
||||
Pool<Downloader> downloaders;
|
||||
|
||||
public:
|
||||
|
||||
HttpBinaryCacheStore(
|
||||
const Params & params, const Path & _cacheUri)
|
||||
: BinaryCacheStore(params)
|
||||
, cacheUri(_cacheUri)
|
||||
, downloaders(
|
||||
std::numeric_limits<size_t>::max(),
|
||||
[]() { return makeDownloader(); })
|
||||
{
|
||||
if (cacheUri.back() == '/')
|
||||
cacheUri.pop_back();
|
||||
|
@ -54,12 +49,11 @@ protected:
|
|||
bool fileExists(const std::string & path) override
|
||||
{
|
||||
try {
|
||||
auto downloader(downloaders.get());
|
||||
DownloadOptions options;
|
||||
options.showProgress = DownloadOptions::no;
|
||||
options.head = true;
|
||||
options.tries = 5;
|
||||
downloader->download(cacheUri + "/" + path, options);
|
||||
DownloadRequest request(cacheUri + "/" + path);
|
||||
request.showProgress = DownloadRequest::no;
|
||||
request.head = true;
|
||||
request.tries = 5;
|
||||
getDownloader()->download(request);
|
||||
return true;
|
||||
} catch (DownloadError & e) {
|
||||
/* S3 buckets return 403 if a file doesn't exist and the
|
||||
|
@ -77,13 +71,11 @@ protected:
|
|||
|
||||
std::shared_ptr<std::string> getFile(const std::string & path) override
|
||||
{
|
||||
auto downloader(downloaders.get());
|
||||
DownloadOptions options;
|
||||
options.showProgress = DownloadOptions::no;
|
||||
options.tries = 5;
|
||||
options.baseRetryTimeMs = 1000;
|
||||
DownloadRequest request(cacheUri + "/" + path);
|
||||
request.showProgress = DownloadRequest::no;
|
||||
request.tries = 8;
|
||||
try {
|
||||
return downloader->download(cacheUri + "/" + path, options).data;
|
||||
return getDownloader()->download(request).data;
|
||||
} catch (DownloadError & e) {
|
||||
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
|
||||
return 0;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue