1
0
Fork 0
mirror of https://github.com/NixOS/nix synced 2025-06-25 10:41:16 +02:00

Refactor unix domain socket store config (#11109)

Following what is outlined in #10766 refactor the uds-remote-store such
that the member variables (state) don't live in the store itself but in
the config object.

Additionally, the config object includes a new necessary constructor
that takes a scheme & authority.

Tests are commented out because of linking errors with the current config system.
When there is a new config system we can reenable them.

Co-authored-by: John Ericson <John.Ericson@Obsidian.Systems>
This commit is contained in:
Farid Zakaria 2024-07-17 23:32:27 -04:00 committed by GitHub
parent 17051ca80a
commit 57399bfc0e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
26 changed files with 571 additions and 215 deletions

View file

@ -1,5 +1,7 @@
#if ENABLE_S3
#include <assert.h>
#include "s3.hh"
#include "s3-binary-cache-store.hh"
#include "nar-info.hh"
@ -190,76 +192,31 @@ S3BinaryCacheStore::S3BinaryCacheStore(const Params & params)
, BinaryCacheStore(params)
{ }
struct S3BinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
S3BinaryCacheStoreConfig::S3BinaryCacheStoreConfig(
std::string_view uriScheme,
std::string_view bucketName,
const Params & params)
: StoreConfig(params)
, BinaryCacheStoreConfig(params)
, bucketName(bucketName)
{
using BinaryCacheStoreConfig::BinaryCacheStoreConfig;
// Don't want to use use AWS SDK in header, so we check the default
// here. TODO do this better after we overhaul the store settings
// system.
assert(std::string{defaultRegion} == std::string{Aws::Region::US_EAST_1});
const Setting<std::string> profile{this, "", "profile",
R"(
The name of the AWS configuration profile to use. By default
Nix will use the `default` profile.
)"};
if (bucketName.empty())
throw UsageError("`%s` store requires a bucket name in its Store URI", uriScheme);
}
const Setting<std::string> region{this, Aws::Region::US_EAST_1, "region",
R"(
The region of the S3 bucket. If your bucket is not in
`useast-1`, you should always explicitly specify the region
parameter.
)"};
std::string S3BinaryCacheStoreConfig::doc()
{
return
#include "s3-binary-cache-store.md"
;
}
const Setting<std::string> scheme{this, "", "scheme",
R"(
The scheme used for S3 requests, `https` (default) or `http`. This
option allows you to disable HTTPS for binary caches which don't
support it.
> **Note**
>
> HTTPS should be used if the cache might contain sensitive
> information.
)"};
const Setting<std::string> endpoint{this, "", "endpoint",
R"(
The URL of the endpoint of an S3-compatible service such as MinIO.
Do not specify this setting if you're using Amazon S3.
> **Note**
>
> This endpoint must support HTTPS and will use path-based
> addressing instead of virtual host based addressing.
)"};
const Setting<std::string> narinfoCompression{this, "", "narinfo-compression",
"Compression method for `.narinfo` files."};
const Setting<std::string> lsCompression{this, "", "ls-compression",
"Compression method for `.ls` files."};
const Setting<std::string> logCompression{this, "", "log-compression",
R"(
Compression method for `log/*` files. It is recommended to
use a compression method supported by most web browsers
(e.g. `brotli`).
)"};
const Setting<bool> multipartUpload{
this, false, "multipart-upload",
"Whether to use multi-part uploads."};
const Setting<uint64_t> bufferSize{
this, 5 * 1024 * 1024, "buffer-size",
"Size (in bytes) of each part in multi-part uploads."};
const std::string name() override { return "S3 Binary Cache Store"; }
std::string doc() override
{
return
#include "s3-binary-cache-store.md"
;
}
};
struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual S3BinaryCacheStore
{
@ -275,15 +232,12 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
const Params & params)
: StoreConfig(params)
, BinaryCacheStoreConfig(params)
, S3BinaryCacheStoreConfig(params)
, S3BinaryCacheStoreConfig(uriScheme, bucketName, params)
, Store(params)
, BinaryCacheStore(params)
, S3BinaryCacheStore(params)
, bucketName(bucketName)
, s3Helper(profile, region, scheme, endpoint)
{
if (bucketName.empty())
throw UsageError("`%s` store requires a bucket name in its Store URI", uriScheme);
diskCache = getNarInfoDiskCache();
}