1
0
Fork 0
mirror of https://github.com/NixOS/nix synced 2025-07-07 01:51:47 +02:00

Merge remote-tracking branch 'origin/master' into profile-names-instead-of-index

This commit is contained in:
Eelco Dolstra 2023-12-21 16:21:26 +01:00
commit 9d9d9ff0de
501 changed files with 10352 additions and 4635 deletions

View file

@ -6,6 +6,9 @@
#include "users.hh"
#include "json-utils.hh"
#include <fstream>
#include <string>
#include <regex>
#include <glob.h>
namespace nix {
@ -77,7 +80,178 @@ std::optional<std::string> RootArgs::needsCompletion(std::string_view s)
return {};
}
void RootArgs::parseCmdline(const Strings & _cmdline)
/**
* Basically this is `typedef std::optional<Parser> Parser(std::string_view s, Strings & r);`
*
* Except we can't recursively reference the Parser typedef, so we have to write a class.
*/
struct Parser {
std::string_view remaining;
/**
* @brief Parse the next character(s)
*
* @param r
* @return std::shared_ptr<Parser>
*/
virtual void operator()(std::shared_ptr<Parser> & state, Strings & r) = 0;
Parser(std::string_view s) : remaining(s) {};
virtual ~Parser() { };
};
struct ParseQuoted : public Parser {
/**
* @brief Accumulated string
*
* Parsed argument up to this point.
*/
std::string acc;
ParseQuoted(std::string_view s) : Parser(s) {};
virtual void operator()(std::shared_ptr<Parser> & state, Strings & r) override;
};
struct ParseUnquoted : public Parser {
/**
* @brief Accumulated string
*
* Parsed argument up to this point. Empty string is not representable in
* unquoted syntax, so we use it for the initial state.
*/
std::string acc;
ParseUnquoted(std::string_view s) : Parser(s) {};
virtual void operator()(std::shared_ptr<Parser> & state, Strings & r) override {
if (remaining.empty()) {
if (!acc.empty())
r.push_back(acc);
state = nullptr; // done
return;
}
switch (remaining[0]) {
case ' ': case '\t': case '\n': case '\r':
if (!acc.empty())
r.push_back(acc);
state = std::make_shared<ParseUnquoted>(ParseUnquoted(remaining.substr(1)));
return;
case '`':
if (remaining.size() > 1 && remaining[1] == '`') {
state = std::make_shared<ParseQuoted>(ParseQuoted(remaining.substr(2)));
return;
}
else
throw Error("single backtick is not a supported syntax in the nix shebang.");
// reserved characters
// meaning to be determined, or may be reserved indefinitely so that
// #!nix syntax looks unambiguous
case '$':
case '*':
case '~':
case '<':
case '>':
case '|':
case ';':
case '(':
case ')':
case '[':
case ']':
case '{':
case '}':
case '\'':
case '"':
case '\\':
throw Error("unsupported unquoted character in nix shebang: " + std::string(1, remaining[0]) + ". Use double backticks to escape?");
case '#':
if (acc.empty()) {
throw Error ("unquoted nix shebang argument cannot start with #. Use double backticks to escape?");
} else {
acc += remaining[0];
remaining = remaining.substr(1);
return;
}
default:
acc += remaining[0];
remaining = remaining.substr(1);
return;
}
assert(false);
}
};
void ParseQuoted::operator()(std::shared_ptr<Parser> &state, Strings & r) {
if (remaining.empty()) {
throw Error("unterminated quoted string in nix shebang");
}
switch (remaining[0]) {
case ' ':
if ((remaining.size() == 3 && remaining[1] == '`' && remaining[2] == '`')
|| (remaining.size() > 3 && remaining[1] == '`' && remaining[2] == '`' && remaining[3] != '`')) {
// exactly two backticks mark the end of a quoted string, but a preceding space is ignored if present.
state = std::make_shared<ParseUnquoted>(ParseUnquoted(remaining.substr(3)));
r.push_back(acc);
return;
}
else {
// just a normal space
acc += remaining[0];
remaining = remaining.substr(1);
return;
}
case '`':
// exactly two backticks mark the end of a quoted string
if ((remaining.size() == 2 && remaining[1] == '`')
|| (remaining.size() > 2 && remaining[1] == '`' && remaining[2] != '`')) {
state = std::make_shared<ParseUnquoted>(ParseUnquoted(remaining.substr(2)));
r.push_back(acc);
return;
}
// a sequence of at least 3 backticks is one escape-backtick which is ignored, followed by any number of backticks, which are verbatim
else if (remaining.size() >= 3 && remaining[1] == '`' && remaining[2] == '`') {
// ignore "escape" backtick
remaining = remaining.substr(1);
// add the rest
while (remaining.size() > 0 && remaining[0] == '`') {
acc += '`';
remaining = remaining.substr(1);
}
return;
}
else {
acc += remaining[0];
remaining = remaining.substr(1);
return;
}
default:
acc += remaining[0];
remaining = remaining.substr(1);
return;
}
assert(false);
}
Strings parseShebangContent(std::string_view s) {
Strings result;
std::shared_ptr<Parser> parserState(std::make_shared<ParseUnquoted>(ParseUnquoted(s)));
// trampoline == iterated strategy pattern
while (parserState) {
auto currentState = parserState;
(*currentState)(parserState, result);
}
return result;
}
void RootArgs::parseCmdline(const Strings & _cmdline, bool allowShebang)
{
Strings pendingArgs;
bool dashDash = false;
@ -93,6 +267,45 @@ void RootArgs::parseCmdline(const Strings & _cmdline)
}
bool argsSeen = false;
// Heuristic to see if we're invoked as a shebang script, namely,
// if we have at least one argument, it's the name of an
// executable file, and it starts with "#!".
Strings savedArgs;
if (allowShebang){
auto script = *cmdline.begin();
try {
std::ifstream stream(script);
char shebang[3]={0,0,0};
stream.get(shebang,3);
if (strncmp(shebang,"#!",2) == 0){
for (auto pos = std::next(cmdline.begin()); pos != cmdline.end();pos++)
savedArgs.push_back(*pos);
cmdline.clear();
std::string line;
std::getline(stream,line);
static const std::string commentChars("#/\\%@*-");
std::string shebangContent;
while (std::getline(stream,line) && !line.empty() && commentChars.find(line[0]) != std::string::npos){
line = chomp(line);
std::smatch match;
// We match one space after `nix` so that we preserve indentation.
// No space is necessary for an empty line. An empty line has basically no effect.
if (std::regex_match(line, match, std::regex("^#!\\s*nix(:? |$)(.*)$")))
shebangContent += match[2].str() + "\n";
}
for (const auto & word : parseShebangContent(shebangContent)) {
cmdline.push_back(word);
}
cmdline.push_back(script);
commandBaseDir = dirOf(script);
for (auto pos = savedArgs.begin(); pos != savedArgs.end();pos++)
cmdline.push_back(*pos);
}
} catch (SysError &) { }
}
for (auto pos = cmdline.begin(); pos != cmdline.end(); ) {
auto arg = *pos;
@ -148,6 +361,17 @@ void RootArgs::parseCmdline(const Strings & _cmdline)
d.completer(*completions, d.n, d.prefix);
}
Path Args::getCommandBaseDir() const
{
assert(parent);
return parent->getCommandBaseDir();
}
Path RootArgs::getCommandBaseDir() const
{
return commandBaseDir;
}
bool Args::processFlag(Strings::iterator & pos, Strings::iterator end)
{
assert(pos != end);
@ -259,7 +483,7 @@ bool Args::processArgs(const Strings & args, bool finish)
if (!anyCompleted)
exp.handler.fun(ss);
/* Move the list element to the processedArgs. This is almost the same as
/* Move the list element to the processedArgs. This is almost the same as
`processedArgs.push_back(expectedArgs.front()); expectedArgs.pop_front()`,
except that it will only adjust the next and prev pointers of the list
elements, meaning the actual contents don't move in memory. This is
@ -320,36 +544,70 @@ nlohmann::json Args::toJSON()
return res;
}
static void hashTypeCompleter(AddCompletions & completions, size_t index, std::string_view prefix)
static void hashFormatCompleter(AddCompletions & completions, size_t index, std::string_view prefix)
{
for (auto & type : hashTypes)
if (hasPrefix(type, prefix))
completions.add(type);
for (auto & format : hashFormats) {
if (hasPrefix(format, prefix)) {
completions.add(format);
}
}
}
Args::Flag Args::Flag::mkHashTypeFlag(std::string && longName, HashType * ht)
{
return Flag {
.longName = std::move(longName),
.description = "hash algorithm ('md5', 'sha1', 'sha256', or 'sha512')",
.labels = {"hash-algo"},
.handler = {[ht](std::string s) {
*ht = parseHashType(s);
}},
.completer = hashTypeCompleter,
Args::Flag Args::Flag::mkHashFormatFlagWithDefault(std::string &&longName, HashFormat * hf) {
assert(*hf == nix::HashFormat::SRI);
return Flag{
.longName = std::move(longName),
.description = "hash format ('base16', 'nix32', 'base64', 'sri'). Default: 'sri'",
.labels = {"hash-format"},
.handler = {[hf](std::string s) {
*hf = parseHashFormat(s);
}},
.completer = hashFormatCompleter,
};
}
Args::Flag Args::Flag::mkHashTypeOptFlag(std::string && longName, std::optional<HashType> * oht)
Args::Flag Args::Flag::mkHashFormatOptFlag(std::string && longName, std::optional<HashFormat> * ohf) {
return Flag{
.longName = std::move(longName),
.description = "hash format ('base16', 'nix32', 'base64', 'sri').",
.labels = {"hash-format"},
.handler = {[ohf](std::string s) {
*ohf = std::optional<HashFormat>{parseHashFormat(s)};
}},
.completer = hashFormatCompleter,
};
}
static void hashAlgoCompleter(AddCompletions & completions, size_t index, std::string_view prefix)
{
return Flag {
.longName = std::move(longName),
.description = "hash algorithm ('md5', 'sha1', 'sha256', or 'sha512'). Optional as can also be gotten from SRI hash itself.",
.labels = {"hash-algo"},
.handler = {[oht](std::string s) {
*oht = std::optional<HashType> { parseHashType(s) };
}},
.completer = hashTypeCompleter,
for (auto & algo : hashAlgorithms)
if (hasPrefix(algo, prefix))
completions.add(algo);
}
Args::Flag Args::Flag::mkHashAlgoFlag(std::string && longName, HashAlgorithm * ha)
{
return Flag{
.longName = std::move(longName),
.description = "hash algorithm ('md5', 'sha1', 'sha256', or 'sha512')",
.labels = {"hash-algo"},
.handler = {[ha](std::string s) {
*ha = parseHashAlgo(s);
}},
.completer = hashAlgoCompleter,
};
}
Args::Flag Args::Flag::mkHashAlgoOptFlag(std::string && longName, std::optional<HashAlgorithm> * oha)
{
return Flag{
.longName = std::move(longName),
.description = "hash algorithm ('md5', 'sha1', 'sha256', or 'sha512'). Optional as can also be gotten from SRI hash itself.",
.labels = {"hash-algo"},
.handler = {[oha](std::string s) {
*oha = std::optional<HashAlgorithm>{parseHashAlgo(s)};
}},
.completer = hashAlgoCompleter,
};
}
@ -398,8 +656,9 @@ std::optional<ExperimentalFeature> Command::experimentalFeature ()
return { Xp::NixCommand };
}
MultiCommand::MultiCommand(const Commands & commands_)
MultiCommand::MultiCommand(std::string_view commandName, const Commands & commands_)
: commands(commands_)
, commandName(commandName)
{
expectArgs({
.label = "subcommand",

View file

@ -14,7 +14,8 @@
namespace nix {
enum HashType : char;
enum struct HashAlgorithm : char;
enum struct HashFormat : int;
class MultiCommand;
@ -24,11 +25,12 @@ class AddCompletions;
class Args
{
public:
/**
* Return a short one-line description of the command.
*/
*/
virtual std::string description() { return ""; }
virtual bool forceImpureByDefault() { return false; }
@ -38,6 +40,16 @@ public:
*/
virtual std::string doc() { return ""; }
/**
* @brief Get the base directory for the command.
*
* @return Generally the working directory, but in case of a shebang
* interpreter, returns the directory of the script.
*
* This only returns the correct value after parseCmdline() has run.
*/
virtual Path getCommandBaseDir() const;
protected:
/**
@ -164,8 +176,10 @@ protected:
std::optional<ExperimentalFeature> experimentalFeature;
static Flag mkHashTypeFlag(std::string && longName, HashType * ht);
static Flag mkHashTypeOptFlag(std::string && longName, std::optional<HashType> * oht);
static Flag mkHashAlgoFlag(std::string && longName, HashAlgorithm * ha);
static Flag mkHashAlgoOptFlag(std::string && longName, std::optional<HashAlgorithm> * oha);
static Flag mkHashFormatFlagWithDefault(std::string && longName, HashFormat * hf);
static Flag mkHashFormatOptFlag(std::string && longName, std::optional<HashFormat> * ohf);
};
/**
@ -212,11 +226,11 @@ protected:
std::list<ExpectedArg> expectedArgs;
/**
* List of processed positional argument forms.
*
*
* All items removed from `expectedArgs` are added here. After all
* arguments were processed, this list should be exactly the same as
* `expectedArgs` was before.
*
*
* This list is used to extend the lifetime of the argument forms.
* If this is not done, some closures that reference the command
* itself will segfault.
@ -345,13 +359,16 @@ public:
*/
std::optional<std::pair<std::string, ref<Command>>> command;
MultiCommand(const Commands & commands);
MultiCommand(std::string_view commandName, const Commands & commands);
bool processFlag(Strings::iterator & pos, Strings::iterator end) override;
bool processArgs(const Strings & args, bool finish) override;
nlohmann::json toJSON() override;
protected:
std::string commandName = "";
};
Strings argvToStrings(int argc, char * * argv);
@ -398,4 +415,6 @@ public:
virtual void add(std::string completion, std::string description = "") = 0;
};
Strings parseShebangContent(std::string_view s);
}

View file

@ -29,14 +29,26 @@ struct Completions final : AddCompletions
*/
class RootArgs : virtual public Args
{
/**
* @brief The command's "working directory", but only set when top level.
*
* Use getCommandBaseDir() to get the directory regardless of whether this
* is a top-level command or subcommand.
*
* @see getCommandBaseDir()
*/
Path commandBaseDir = ".";
public:
/** Parse the command line, throwing a UsageError if something goes
* wrong.
*/
void parseCmdline(const Strings & cmdline);
void parseCmdline(const Strings & cmdline, bool allowShebang = false);
std::shared_ptr<Completions> completions;
Path getCommandBaseDir() const override;
protected:
friend class Args;

View file

@ -13,6 +13,13 @@ CanonPath::CanonPath(std::string_view raw, const CanonPath & root)
: path(absPath((Path) raw, root.abs()))
{ }
CanonPath::CanonPath(const std::vector<std::string> & elems)
: path("/")
{
for (auto & s : elems)
push(s);
}
CanonPath CanonPath::fromCwd(std::string_view path)
{
return CanonPath(unchecked_t(), absPath((Path) path));

View file

@ -6,6 +6,7 @@
#include <cassert>
#include <iostream>
#include <set>
#include <vector>
namespace nix {
@ -46,6 +47,11 @@ public:
: path(std::move(path))
{ }
/**
* Construct a canon path from a vector of elements.
*/
CanonPath(const std::vector<std::string> & elems);
static CanonPath fromCwd(std::string_view path = ".");
static CanonPath root;
@ -199,8 +205,19 @@ public:
* `CanonPath(this.makeRelative(x), this) == path`.
*/
std::string makeRelative(const CanonPath & path) const;
friend class std::hash<CanonPath>;
};
std::ostream & operator << (std::ostream & stream, const CanonPath & path);
}
template<>
struct std::hash<nix::CanonPath>
{
std::size_t operator ()(const nix::CanonPath & s) const noexcept
{
return std::hash<std::string>{}(s.path);
}
};

View file

@ -88,10 +88,9 @@ void Config::getSettings(std::map<std::string, SettingInfo> & res, bool overridd
res.emplace(opt.first, SettingInfo{opt.second.setting->to_string(), opt.second.setting->description});
}
void AbstractConfig::applyConfig(const std::string & contents, const std::string & path) {
unsigned int pos = 0;
std::vector<std::pair<std::string, std::string>> parsedContents;
static void applyConfigInner(const std::string & contents, const std::string & path, std::vector<std::pair<std::string, std::string>> & parsedContents) {
unsigned int pos = 0;
while (pos < contents.size()) {
std::string line;
@ -122,7 +121,12 @@ void AbstractConfig::applyConfig(const std::string & contents, const std::string
throw UsageError("illegal configuration line '%1%' in '%2%'", line, path);
auto p = absPath(tokens[1], dirOf(path));
if (pathExists(p)) {
applyConfigFile(p);
try {
std::string includedContents = readFile(path);
applyConfigInner(includedContents, p, parsedContents);
} catch (SysError &) {
// TODO: Do we actually want to ignore this? Or is it better to fail?
}
} else if (!ignoreMissing) {
throw Error("file '%1%' included from '%2%' not found", p, path);
}
@ -142,6 +146,12 @@ void AbstractConfig::applyConfig(const std::string & contents, const std::string
concatStringsSep(" ", Strings(i, tokens.end())),
});
};
}
void AbstractConfig::applyConfig(const std::string & contents, const std::string & path) {
std::vector<std::pair<std::string, std::string>> parsedContents;
applyConfigInner(contents, path, parsedContents);
// First apply experimental-feature related settings
for (const auto & [name, value] : parsedContents)
@ -154,14 +164,6 @@ void AbstractConfig::applyConfig(const std::string & contents, const std::string
set(name, value);
}
void AbstractConfig::applyConfigFile(const Path & path)
{
try {
std::string contents = readFile(path);
applyConfig(contents, path);
} catch (SysError &) { }
}
void Config::resetOverridden()
{
for (auto & s : _settings)
@ -330,9 +332,11 @@ template<> std::set<ExperimentalFeature> BaseSetting<std::set<ExperimentalFeatur
{
std::set<ExperimentalFeature> res;
for (auto & s : tokenizeString<StringSet>(str)) {
if (auto thisXpFeature = parseExperimentalFeature(s); thisXpFeature)
if (auto thisXpFeature = parseExperimentalFeature(s); thisXpFeature) {
res.insert(thisXpFeature.value());
else
if (thisXpFeature.value() == Xp::Flakes)
res.insert(Xp::FetchTree);
} else
warn("unknown experimental feature '%s'", s);
}
return res;

View file

@ -82,12 +82,6 @@ public:
*/
void applyConfig(const std::string & contents, const std::string & path = "<unknown>");
/**
* Applies a nix configuration file
* - path: the location of the config file to apply
*/
void applyConfigFile(const Path & path);
/**
* Resets the `overridden` flag of all Settings
*/

View file

@ -159,11 +159,11 @@ static std::string indent(std::string_view indentFirst, std::string_view indentR
/**
* A development aid for finding missing positions, to improve error messages. Example use:
*
* NIX_DEVELOPER_SHOW_UNKNOWN_LOCATIONS=1 _NIX_TEST_ACCEPT=1 make tests/lang.sh.test
* _NIX_EVAL_SHOW_UNKNOWN_LOCATIONS=1 _NIX_TEST_ACCEPT=1 make tests/lang.sh.test
* git diff -U20 tests
*
*/
static bool printUnknownLocations = getEnv("_NIX_DEVELOPER_SHOW_UNKNOWN_LOCATIONS").has_value();
static bool printUnknownLocations = getEnv("_NIX_EVAL_SHOW_UNKNOWN_LOCATIONS").has_value();
/**
* Print a position, if it is known.
@ -173,10 +173,9 @@ static bool printUnknownLocations = getEnv("_NIX_DEVELOPER_SHOW_UNKNOWN_LOCATION
static bool printPosMaybe(std::ostream & oss, std::string_view indent, const std::shared_ptr<AbstractPos> & pos) {
bool hasPos = pos && *pos;
if (hasPos) {
oss << "\n" << indent << ANSI_BLUE << "at " ANSI_WARNING << *pos << ANSI_NORMAL << ":";
oss << indent << ANSI_BLUE << "at " ANSI_WARNING << *pos << ANSI_NORMAL << ":";
if (auto loc = pos->getCodeLines()) {
oss << "\n";
printCodeLines(oss, "", *pos, *loc);
oss << "\n";
}

View file

@ -12,7 +12,19 @@ struct ExperimentalFeatureDetails
std::string_view description;
};
constexpr std::array<ExperimentalFeatureDetails, 16> xpFeatureDetails = {{
/**
* If two different PRs both add an experimental feature, and we just
* used a number for this, we *woudln't* get merge conflict and the
* counter will be incremented once instead of twice, causing a build
* failure.
*
* By instead defining this instead as 1 + the bottom experimental
* feature, we either have no issue at all if few features are not added
* at the end of the list, or a proper merge conflict if they are.
*/
constexpr size_t numXpFeatures = 1 + static_cast<size_t>(Xp::VerifiedFetches);
constexpr std::array<ExperimentalFeatureDetails, numXpFeatures> xpFeatureDetails = {{
{
.tag = Xp::CaDerivations,
.name = "ca-derivations",
@ -62,6 +74,19 @@ constexpr std::array<ExperimentalFeatureDetails, 16> xpFeatureDetails = {{
flake`](@docroot@/command-ref/new-cli/nix3-flake.md) for details.
)",
},
{
.tag = Xp::FetchTree,
.name = "fetch-tree",
.description = R"(
Enable the use of the [`fetchTree`](@docroot@/language/builtins.md#builtins-fetchTree) built-in function in the Nix language.
`fetchTree` exposes a generic interface for fetching remote file system trees from different types of remote sources.
The [`flakes`](#xp-feature-flakes) feature flag always enables `fetch-tree`.
This built-in was previously guarded by the `flakes` experimental feature because of that overlap.
Enabling just this feature serves as a "release candidate", allowing users to try it out in isolation.
)",
},
{
.tag = Xp::NixCommand,
.name = "nix-command",
@ -70,6 +95,14 @@ constexpr std::array<ExperimentalFeatureDetails, 16> xpFeatureDetails = {{
[`nix`](@docroot@/command-ref/new-cli/nix.md) for details.
)",
},
{
.tag = Xp::GitHashing,
.name = "git-hashing",
.description = R"(
Allow creating (content-addressed) store objects which are hashed via Git's hashing algorithm.
These store objects will not be understandable by older versions of Nix.
)",
},
{
.tag = Xp::RecursiveNix,
.name = "recursive-nix",
@ -218,7 +251,7 @@ constexpr std::array<ExperimentalFeatureDetails, 16> xpFeatureDetails = {{
.tag = Xp::ReadOnlyLocalStore,
.name = "read-only-local-store",
.description = R"(
Allow the use of the `read-only` parameter in [local store](@docroot@/command-ref/new-cli/nix3-help-stores.md#local-store) URIs.
Allow the use of the `read-only` parameter in [local store](@docroot@/store/types/local-store.md) URIs.
)",
},
{
@ -228,6 +261,13 @@ constexpr std::array<ExperimentalFeatureDetails, 16> xpFeatureDetails = {{
Allow the use of the [impure-env](@docroot@/command-ref/conf-file.md#conf-impure-env) setting.
)",
},
{
.tag = Xp::MountedSSHStore,
.name = "mounted-ssh-store",
.description = R"(
Allow the use of the [`mounted SSH store`](@docroot@/command-ref/new-cli/nix3-help-stores.html#experimental-ssh-store-with-filesytem-mounted).
)",
},
{
.tag = Xp::VerifiedFetches,
.name = "verified-fetches",

View file

@ -20,7 +20,9 @@ enum struct ExperimentalFeature
CaDerivations,
ImpureDerivations,
Flakes,
FetchTree,
NixCommand,
GitHashing,
RecursiveNix,
NoUrlLiterals,
FetchClosure,
@ -32,6 +34,7 @@ enum struct ExperimentalFeature
ParseTomlTimestamps,
ReadOnlyLocalStore,
ConfigurableImpureEnv,
MountedSSHStore,
VerifiedFetches,
};

View file

@ -0,0 +1,49 @@
#include "file-content-address.hh"
#include "archive.hh"
namespace nix {
void dumpPath(
SourceAccessor & accessor, const CanonPath & path,
Sink & sink,
FileIngestionMethod method,
PathFilter & filter)
{
switch (method) {
case FileIngestionMethod::Flat:
accessor.readFile(path, sink);
break;
case FileIngestionMethod::Recursive:
accessor.dumpPath(path, sink, filter);
break;
}
}
void restorePath(
const Path & path,
Source & source,
FileIngestionMethod method)
{
switch (method) {
case FileIngestionMethod::Flat:
writeFile(path, source);
break;
case FileIngestionMethod::Recursive:
restorePath(path, source);
break;
}
}
HashResult hashPath(
SourceAccessor & accessor, const CanonPath & path,
FileIngestionMethod method, HashAlgorithm ht,
PathFilter & filter)
{
HashSink sink { ht };
dumpPath(accessor, path, sink, method, filter);
return sink.finish();
}
}

View file

@ -0,0 +1,56 @@
#pragma once
///@file
#include "source-accessor.hh"
#include "fs-sink.hh"
#include "util.hh"
namespace nix {
/**
* An enumeration of the main ways we can serialize file system
* objects.
*/
enum struct FileIngestionMethod : uint8_t {
/**
* Flat-file hashing. Directly ingest the contents of a single file
*/
Flat = 0,
/**
* Recursive (or NAR) hashing. Serializes the file-system object in
* Nix Archive format and ingest that.
*/
Recursive = 1,
};
/**
* Dump a serialization of the given file system object.
*/
void dumpPath(
SourceAccessor & accessor, const CanonPath & path,
Sink & sink,
FileIngestionMethod method,
PathFilter & filter = defaultPathFilter);
/**
* Restore a serialization of the given file system object.
*
* @TODO use an arbitrary `ParseSink`.
*/
void restorePath(
const Path & path,
Source & source,
FileIngestionMethod method);
/**
* Compute the hash of the given file system object according to the
* given method.
*
* The hash is defined as (essentially) hashString(ht, dumpPath(path)).
*/
HashResult hashPath(
SourceAccessor & accessor, const CanonPath & path,
FileIngestionMethod method, HashAlgorithm ht,
PathFilter & filter = defaultPathFilter);
}

View file

@ -1,9 +1,263 @@
#include "git.hh"
#include <cerrno>
#include <algorithm>
#include <vector>
#include <map>
#include <regex>
#include <strings.h> // for strcasecmp
#include "signals.hh"
#include "config.hh"
#include "hash.hh"
#include "posix-source-accessor.hh"
#include "git.hh"
#include "serialise.hh"
namespace nix::git {
using namespace nix;
using namespace std::string_literals;
std::optional<Mode> decodeMode(RawMode m) {
switch (m) {
case (RawMode) Mode::Directory:
case (RawMode) Mode::Executable:
case (RawMode) Mode::Regular:
case (RawMode) Mode::Symlink:
return (Mode) m;
default:
return std::nullopt;
}
}
static std::string getStringUntil(Source & source, char byte)
{
std::string s;
char n[1];
source(std::string_view { n, 1 });
while (*n != byte) {
s += *n;
source(std::string_view { n, 1 });
}
return s;
}
static std::string getString(Source & source, int n)
{
std::string v;
v.resize(n);
source(v);
return v;
}
void parse(
ParseSink & sink,
const Path & sinkPath,
Source & source,
std::function<SinkHook> hook,
const ExperimentalFeatureSettings & xpSettings)
{
xpSettings.require(Xp::GitHashing);
auto type = getString(source, 5);
if (type == "blob ") {
sink.createRegularFile(sinkPath);
unsigned long long size = std::stoi(getStringUntil(source, 0));
sink.preallocateContents(size);
unsigned long long left = size;
std::string buf;
buf.reserve(65536);
while (left) {
checkInterrupt();
buf.resize(std::min((unsigned long long)buf.capacity(), left));
source(buf);
sink.receiveContents(buf);
left -= buf.size();
}
} else if (type == "tree ") {
unsigned long long size = std::stoi(getStringUntil(source, 0));
unsigned long long left = size;
sink.createDirectory(sinkPath);
while (left) {
std::string perms = getStringUntil(source, ' ');
left -= perms.size();
left -= 1;
RawMode rawMode = std::stoi(perms, 0, 8);
auto modeOpt = decodeMode(rawMode);
if (!modeOpt)
throw Error("Unknown Git permission: %o", perms);
auto mode = std::move(*modeOpt);
std::string name = getStringUntil(source, '\0');
left -= name.size();
left -= 1;
std::string hashs = getString(source, 20);
left -= 20;
Hash hash(HashAlgorithm::SHA1);
std::copy(hashs.begin(), hashs.end(), hash.hash);
hook(name, TreeEntry {
.mode = mode,
.hash = hash,
});
if (mode == Mode::Executable)
sink.isExecutable();
}
} else throw Error("input doesn't look like a Git object");
}
std::optional<Mode> convertMode(SourceAccessor::Type type)
{
switch (type) {
case SourceAccessor::tSymlink: return Mode::Symlink;
case SourceAccessor::tRegular: return Mode::Regular;
case SourceAccessor::tDirectory: return Mode::Directory;
case SourceAccessor::tMisc: return std::nullopt;
default: abort();
}
}
void restore(ParseSink & sink, Source & source, std::function<RestoreHook> hook)
{
parse(sink, "", source, [&](Path name, TreeEntry entry) {
auto [accessor, from] = hook(entry.hash);
auto stat = accessor->lstat(from);
auto gotOpt = convertMode(stat.type);
if (!gotOpt)
throw Error("file '%s' (git hash %s) has an unsupported type",
from,
entry.hash.to_string(HashFormat::Base16, false));
auto & got = *gotOpt;
if (got != entry.mode)
throw Error("git mode of file '%s' (git hash %s) is %o but expected %o",
from,
entry.hash.to_string(HashFormat::Base16, false),
(RawMode) got,
(RawMode) entry.mode);
copyRecursive(
*accessor, from,
sink, name);
});
}
void dumpBlobPrefix(
uint64_t size, Sink & sink,
const ExperimentalFeatureSettings & xpSettings)
{
xpSettings.require(Xp::GitHashing);
auto s = fmt("blob %d\0"s, std::to_string(size));
sink(s);
}
void dumpTree(const Tree & entries, Sink & sink,
const ExperimentalFeatureSettings & xpSettings)
{
xpSettings.require(Xp::GitHashing);
std::string v1;
for (auto & [name, entry] : entries) {
auto name2 = name;
if (entry.mode == Mode::Directory) {
assert(name2.back() == '/');
name2.pop_back();
}
v1 += fmt("%o %s\0"s, static_cast<RawMode>(entry.mode), name2);
std::copy(entry.hash.hash, entry.hash.hash + entry.hash.hashSize, std::back_inserter(v1));
}
{
auto s = fmt("tree %d\0"s, v1.size());
sink(s);
}
sink(v1);
}
Mode dump(
SourceAccessor & accessor, const CanonPath & path,
Sink & sink,
std::function<DumpHook> hook,
PathFilter & filter,
const ExperimentalFeatureSettings & xpSettings)
{
auto st = accessor.lstat(path);
switch (st.type) {
case SourceAccessor::tRegular:
{
accessor.readFile(path, sink, [&](uint64_t size) {
dumpBlobPrefix(size, sink, xpSettings);
});
return st.isExecutable
? Mode::Executable
: Mode::Regular;
}
case SourceAccessor::tDirectory:
{
Tree entries;
for (auto & [name, _] : accessor.readDirectory(path)) {
auto child = path + name;
if (!filter(child.abs())) continue;
auto entry = hook(child);
auto name2 = name;
if (entry.mode == Mode::Directory)
name2 += "/";
entries.insert_or_assign(std::move(name2), std::move(entry));
}
dumpTree(entries, sink, xpSettings);
return Mode::Directory;
}
case SourceAccessor::tSymlink:
case SourceAccessor::tMisc:
default:
throw Error("file '%1%' has an unsupported type", path);
}
}
TreeEntry dumpHash(
HashAlgorithm ha,
SourceAccessor & accessor, const CanonPath & path, PathFilter & filter)
{
std::function<DumpHook> hook;
hook = [&](const CanonPath & path) -> TreeEntry {
auto hashSink = HashSink(ha);
auto mode = dump(accessor, path, hashSink, hook, filter);
auto hash = hashSink.finish().first;
return {
.mode = mode,
.hash = hash,
};
};
return hook(path);
}
namespace nix {
namespace git {
std::optional<LsRemoteRefLine> parseLsRemoteLine(std::string_view line)
{
@ -22,4 +276,3 @@ std::optional<LsRemoteRefLine> parseLsRemoteLine(std::string_view line)
}
}
}

View file

@ -5,9 +5,127 @@
#include <string_view>
#include <optional>
namespace nix {
#include "types.hh"
#include "serialise.hh"
#include "hash.hh"
#include "source-accessor.hh"
#include "fs-sink.hh"
namespace git {
namespace nix::git {
using RawMode = uint32_t;
enum struct Mode : RawMode {
Directory = 0040000,
Executable = 0100755,
Regular = 0100644,
Symlink = 0120000,
};
std::optional<Mode> decodeMode(RawMode m);
/**
* An anonymous Git tree object entry (no name part).
*/
struct TreeEntry
{
Mode mode;
Hash hash;
GENERATE_CMP(TreeEntry, me->mode, me->hash);
};
/**
* A Git tree object, fully decoded and stored in memory.
*
* Directory names must end in a `/` for sake of sorting. See
* https://github.com/mirage/irmin/issues/352
*/
using Tree = std::map<std::string, TreeEntry>;
/**
* Callback for processing a child hash with `parse`
*
* The function should
*
* 1. Obtain the file system objects denoted by `gitHash`
*
* 2. Ensure they match `mode`
*
* 3. Feed them into the same sink `parse` was called with
*
* Implementations may seek to memoize resources (bandwidth, storage,
* etc.) for the same Git hash.
*/
using SinkHook = void(const Path & name, TreeEntry entry);
void parse(
ParseSink & sink, const Path & sinkPath,
Source & source,
std::function<SinkHook> hook,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
/**
* Assists with writing a `SinkHook` step (2).
*/
std::optional<Mode> convertMode(SourceAccessor::Type type);
/**
* Simplified version of `SinkHook` for `restore`.
*
* Given a `Hash`, return a `SourceAccessor` and `CanonPath` pointing to
* the file system object with that path.
*/
using RestoreHook = std::pair<SourceAccessor *, CanonPath>(Hash);
/**
* Wrapper around `parse` and `RestoreSink`
*/
void restore(ParseSink & sink, Source & source, std::function<RestoreHook> hook);
/**
* Dumps a single file to a sink
*
* @param xpSettings for testing purposes
*/
void dumpBlobPrefix(
uint64_t size, Sink & sink,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
/**
* Dumps a representation of a git tree to a sink
*/
void dumpTree(
const Tree & entries, Sink & sink,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
/**
* Callback for processing a child with `dump`
*
* The function should return the Git hash and mode of the file at the
* given path in the accessor passed to `dump`.
*
* Note that if the child is a directory, its child in must also be so
* processed in order to compute this information.
*/
using DumpHook = TreeEntry(const CanonPath & path);
Mode dump(
SourceAccessor & accessor, const CanonPath & path,
Sink & sink,
std::function<DumpHook> hook,
PathFilter & filter = defaultPathFilter,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
/**
* Recursively dumps path, hashing as we go.
*
* A smaller wrapper around `dump`.
*/
TreeEntry dumpHash(
HashAlgorithm ha,
SourceAccessor & accessor, const CanonPath & path,
PathFilter & filter = defaultPathFilter);
/**
* A line from the output of `git ls-remote --symref`.
@ -16,15 +134,17 @@ namespace git {
*
* - Symbolic references of the form
*
* ref: {target} {reference}
*
* where {target} is itself a reference and {reference} is optional
* ```
* ref: {target} {reference}
* ```
* where {target} is itself a reference and {reference} is optional
*
* - Object references of the form
*
* {target} {reference}
*
* where {target} is a commit id and {reference} is mandatory
* ```
* {target} {reference}
* ```
* where {target} is a commit id and {reference} is mandatory
*/
struct LsRemoteRefLine {
enum struct Kind {
@ -36,8 +156,9 @@ struct LsRemoteRefLine {
std::optional<std::string> reference;
};
/**
* Parse an `LsRemoteRefLine`
*/
std::optional<LsRemoteRefLine> parseLsRemoteLine(std::string_view line);
}
}

View file

@ -16,23 +16,24 @@
namespace nix {
static size_t regularHashSize(HashType type) {
static size_t regularHashSize(HashAlgorithm type) {
switch (type) {
case htMD5: return md5HashSize;
case htSHA1: return sha1HashSize;
case htSHA256: return sha256HashSize;
case htSHA512: return sha512HashSize;
case HashAlgorithm::MD5: return md5HashSize;
case HashAlgorithm::SHA1: return sha1HashSize;
case HashAlgorithm::SHA256: return sha256HashSize;
case HashAlgorithm::SHA512: return sha512HashSize;
}
abort();
}
std::set<std::string> hashTypes = { "md5", "sha1", "sha256", "sha512" };
const std::set<std::string> hashAlgorithms = {"md5", "sha1", "sha256", "sha512" };
const std::set<std::string> hashFormats = {"base64", "nix32", "base16", "sri" };
Hash::Hash(HashType type) : type(type)
Hash::Hash(HashAlgorithm algo) : algo(algo)
{
hashSize = regularHashSize(type);
hashSize = regularHashSize(algo);
assert(hashSize <= maxHashSize);
memset(hash, 0, maxHashSize);
}
@ -81,7 +82,7 @@ static std::string printHash16(const Hash & hash)
// omitted: E O U T
const std::string base32Chars = "0123456789abcdfghijklmnpqrsvwxyz";
const std::string nix32Chars = "0123456789abcdfghijklmnpqrsvwxyz";
static std::string printHash32(const Hash & hash)
@ -100,7 +101,7 @@ static std::string printHash32(const Hash & hash)
unsigned char c =
(hash.hash[i] >> j)
| (i >= hash.hashSize - 1 ? 0 : hash.hash[i + 1] << (8 - j));
s.push_back(base32Chars[c & 0x1f]);
s.push_back(nix32Chars[c & 0x1f]);
}
return s;
@ -109,23 +110,23 @@ static std::string printHash32(const Hash & hash)
std::string printHash16or32(const Hash & hash)
{
assert(hash.type);
return hash.to_string(hash.type == htMD5 ? HashFormat::Base16 : HashFormat::Base32, false);
assert(static_cast<char>(hash.algo));
return hash.to_string(hash.algo == HashAlgorithm::MD5 ? HashFormat::Base16 : HashFormat::Nix32, false);
}
std::string Hash::to_string(HashFormat hashFormat, bool includeType) const
std::string Hash::to_string(HashFormat hashFormat, bool includeAlgo) const
{
std::string s;
if (hashFormat == HashFormat::SRI || includeType) {
s += printHashType(type);
if (hashFormat == HashFormat::SRI || includeAlgo) {
s += printHashAlgo(algo);
s += hashFormat == HashFormat::SRI ? '-' : ':';
}
switch (hashFormat) {
case HashFormat::Base16:
s += printHash16(*this);
break;
case HashFormat::Base32:
case HashFormat::Nix32:
s += printHash32(*this);
break;
case HashFormat::Base64:
@ -136,7 +137,7 @@ std::string Hash::to_string(HashFormat hashFormat, bool includeType) const
return s;
}
Hash Hash::dummy(htSHA256);
Hash Hash::dummy(HashAlgorithm::SHA256);
Hash Hash::parseSRI(std::string_view original) {
auto rest = original;
@ -145,18 +146,18 @@ Hash Hash::parseSRI(std::string_view original) {
auto hashRaw = splitPrefixTo(rest, '-');
if (!hashRaw)
throw BadHash("hash '%s' is not SRI", original);
HashType parsedType = parseHashType(*hashRaw);
HashAlgorithm parsedType = parseHashAlgo(*hashRaw);
return Hash(rest, parsedType, true);
}
// Mutates the string to eliminate the prefixes when found
static std::pair<std::optional<HashType>, bool> getParsedTypeAndSRI(std::string_view & rest)
static std::pair<std::optional<HashAlgorithm>, bool> getParsedTypeAndSRI(std::string_view & rest)
{
bool isSRI = false;
// Parse the hash type before the separator, if there was one.
std::optional<HashType> optParsedType;
std::optional<HashAlgorithm> optParsedType;
{
auto hashRaw = splitPrefixTo(rest, ':');
@ -166,7 +167,7 @@ static std::pair<std::optional<HashType>, bool> getParsedTypeAndSRI(std::string_
isSRI = true;
}
if (hashRaw)
optParsedType = parseHashType(*hashRaw);
optParsedType = parseHashAlgo(*hashRaw);
}
return {optParsedType, isSRI};
@ -185,29 +186,29 @@ Hash Hash::parseAnyPrefixed(std::string_view original)
return Hash(rest, *optParsedType, isSRI);
}
Hash Hash::parseAny(std::string_view original, std::optional<HashType> optType)
Hash Hash::parseAny(std::string_view original, std::optional<HashAlgorithm> optAlgo)
{
auto rest = original;
auto [optParsedType, isSRI] = getParsedTypeAndSRI(rest);
// Either the string or user must provide the type, if they both do they
// must agree.
if (!optParsedType && !optType)
if (!optParsedType && !optAlgo)
throw BadHash("hash '%s' does not include a type, nor is the type otherwise known from context", rest);
else if (optParsedType && optType && *optParsedType != *optType)
throw BadHash("hash '%s' should have type '%s'", original, printHashType(*optType));
else if (optParsedType && optAlgo && *optParsedType != *optAlgo)
throw BadHash("hash '%s' should have type '%s'", original, printHashAlgo(*optAlgo));
HashType hashType = optParsedType ? *optParsedType : *optType;
return Hash(rest, hashType, isSRI);
HashAlgorithm hashAlgo = optParsedType ? *optParsedType : *optAlgo;
return Hash(rest, hashAlgo, isSRI);
}
Hash Hash::parseNonSRIUnprefixed(std::string_view s, HashType type)
Hash Hash::parseNonSRIUnprefixed(std::string_view s, HashAlgorithm algo)
{
return Hash(s, type, false);
return Hash(s, algo, false);
}
Hash::Hash(std::string_view rest, HashType type, bool isSRI)
: Hash(type)
Hash::Hash(std::string_view rest, HashAlgorithm algo, bool isSRI)
: Hash(algo)
{
if (!isSRI && rest.size() == base16Len()) {
@ -230,8 +231,8 @@ Hash::Hash(std::string_view rest, HashType type, bool isSRI)
for (unsigned int n = 0; n < rest.size(); ++n) {
char c = rest[rest.size() - n - 1];
unsigned char digit;
for (digit = 0; digit < base32Chars.size(); ++digit) /* !!! slow */
if (base32Chars[digit] == c) break;
for (digit = 0; digit < nix32Chars.size(); ++digit) /* !!! slow */
if (nix32Chars[digit] == c) break;
if (digit >= 32)
throw BadHash("invalid base-32 hash '%s'", rest);
unsigned int b = n * 5;
@ -257,19 +258,19 @@ Hash::Hash(std::string_view rest, HashType type, bool isSRI)
}
else
throw BadHash("hash '%s' has wrong length for hash type '%s'", rest, printHashType(this->type));
throw BadHash("hash '%s' has wrong length for hash algorithm '%s'", rest, printHashAlgo(this->algo));
}
Hash newHashAllowEmpty(std::string_view hashStr, std::optional<HashType> ht)
Hash newHashAllowEmpty(std::string_view hashStr, std::optional<HashAlgorithm> ha)
{
if (hashStr.empty()) {
if (!ht)
if (!ha)
throw BadHash("empty hash requires explicit hash type");
Hash h(*ht);
Hash h(*ha);
warn("found empty hash, assuming '%s'", h.to_string(HashFormat::SRI, true));
return h;
} else
return Hash::parseAny(hashStr, ht);
return Hash::parseAny(hashStr, ha);
}
@ -282,58 +283,58 @@ union Ctx
};
static void start(HashType ht, Ctx & ctx)
static void start(HashAlgorithm ha, Ctx & ctx)
{
if (ht == htMD5) MD5_Init(&ctx.md5);
else if (ht == htSHA1) SHA1_Init(&ctx.sha1);
else if (ht == htSHA256) SHA256_Init(&ctx.sha256);
else if (ht == htSHA512) SHA512_Init(&ctx.sha512);
if (ha == HashAlgorithm::MD5) MD5_Init(&ctx.md5);
else if (ha == HashAlgorithm::SHA1) SHA1_Init(&ctx.sha1);
else if (ha == HashAlgorithm::SHA256) SHA256_Init(&ctx.sha256);
else if (ha == HashAlgorithm::SHA512) SHA512_Init(&ctx.sha512);
}
static void update(HashType ht, Ctx & ctx,
std::string_view data)
static void update(HashAlgorithm ha, Ctx & ctx,
std::string_view data)
{
if (ht == htMD5) MD5_Update(&ctx.md5, data.data(), data.size());
else if (ht == htSHA1) SHA1_Update(&ctx.sha1, data.data(), data.size());
else if (ht == htSHA256) SHA256_Update(&ctx.sha256, data.data(), data.size());
else if (ht == htSHA512) SHA512_Update(&ctx.sha512, data.data(), data.size());
if (ha == HashAlgorithm::MD5) MD5_Update(&ctx.md5, data.data(), data.size());
else if (ha == HashAlgorithm::SHA1) SHA1_Update(&ctx.sha1, data.data(), data.size());
else if (ha == HashAlgorithm::SHA256) SHA256_Update(&ctx.sha256, data.data(), data.size());
else if (ha == HashAlgorithm::SHA512) SHA512_Update(&ctx.sha512, data.data(), data.size());
}
static void finish(HashType ht, Ctx & ctx, unsigned char * hash)
static void finish(HashAlgorithm ha, Ctx & ctx, unsigned char * hash)
{
if (ht == htMD5) MD5_Final(hash, &ctx.md5);
else if (ht == htSHA1) SHA1_Final(hash, &ctx.sha1);
else if (ht == htSHA256) SHA256_Final(hash, &ctx.sha256);
else if (ht == htSHA512) SHA512_Final(hash, &ctx.sha512);
if (ha == HashAlgorithm::MD5) MD5_Final(hash, &ctx.md5);
else if (ha == HashAlgorithm::SHA1) SHA1_Final(hash, &ctx.sha1);
else if (ha == HashAlgorithm::SHA256) SHA256_Final(hash, &ctx.sha256);
else if (ha == HashAlgorithm::SHA512) SHA512_Final(hash, &ctx.sha512);
}
Hash hashString(HashType ht, std::string_view s)
Hash hashString(HashAlgorithm ha, std::string_view s)
{
Ctx ctx;
Hash hash(ht);
start(ht, ctx);
update(ht, ctx, s);
finish(ht, ctx, hash.hash);
Hash hash(ha);
start(ha, ctx);
update(ha, ctx, s);
finish(ha, ctx, hash.hash);
return hash;
}
Hash hashFile(HashType ht, const Path & path)
Hash hashFile(HashAlgorithm ha, const Path & path)
{
HashSink sink(ht);
HashSink sink(ha);
readFile(path, sink);
return sink.finish().first;
}
HashSink::HashSink(HashType ht) : ht(ht)
HashSink::HashSink(HashAlgorithm ha) : ha(ha)
{
ctx = new Ctx;
bytes = 0;
start(ht, *ctx);
start(ha, *ctx);
}
HashSink::~HashSink()
@ -345,14 +346,14 @@ HashSink::~HashSink()
void HashSink::writeUnbuffered(std::string_view data)
{
bytes += data.size();
update(ht, *ctx, data);
update(ha, *ctx, data);
}
HashResult HashSink::finish()
{
flush();
Hash hash(ht);
nix::finish(ht, *ctx, hash.hash);
Hash hash(ha);
nix::finish(ha, *ctx, hash.hash);
return HashResult(hash, bytes);
}
@ -360,24 +361,15 @@ HashResult HashSink::currentHash()
{
flush();
Ctx ctx2 = *ctx;
Hash hash(ht);
nix::finish(ht, ctx2, hash.hash);
Hash hash(ha);
nix::finish(ha, ctx2, hash.hash);
return HashResult(hash, bytes);
}
HashResult hashPath(
HashType ht, const Path & path, PathFilter & filter)
{
HashSink sink(ht);
dumpPath(path, sink, filter);
return sink.finish();
}
Hash compressHash(const Hash & hash, unsigned int newSize)
{
Hash h(hash.type);
Hash h(hash.algo);
h.hashSize = newSize;
for (unsigned int i = 0; i < hash.hashSize; ++i)
h.hash[i % newSize] ^= hash.hash[i];
@ -388,7 +380,11 @@ Hash compressHash(const Hash & hash, unsigned int newSize)
std::optional<HashFormat> parseHashFormatOpt(std::string_view hashFormatName)
{
if (hashFormatName == "base16") return HashFormat::Base16;
if (hashFormatName == "base32") return HashFormat::Base32;
if (hashFormatName == "nix32") return HashFormat::Nix32;
if (hashFormatName == "base32") {
warn(R"("base32" is a deprecated alias for hash format "nix32".)");
return HashFormat::Nix32;
}
if (hashFormatName == "base64") return HashFormat::Base64;
if (hashFormatName == "sri") return HashFormat::SRI;
return std::nullopt;
@ -407,8 +403,8 @@ std::string_view printHashFormat(HashFormat HashFormat)
switch (HashFormat) {
case HashFormat::Base64:
return "base64";
case HashFormat::Base32:
return "base32";
case HashFormat::Nix32:
return "nix32";
case HashFormat::Base16:
return "base16";
case HashFormat::SRI:
@ -420,31 +416,31 @@ std::string_view printHashFormat(HashFormat HashFormat)
}
}
std::optional<HashType> parseHashTypeOpt(std::string_view s)
std::optional<HashAlgorithm> parseHashAlgoOpt(std::string_view s)
{
if (s == "md5") return htMD5;
if (s == "sha1") return htSHA1;
if (s == "sha256") return htSHA256;
if (s == "sha512") return htSHA512;
if (s == "md5") return HashAlgorithm::MD5;
if (s == "sha1") return HashAlgorithm::SHA1;
if (s == "sha256") return HashAlgorithm::SHA256;
if (s == "sha512") return HashAlgorithm::SHA512;
return std::nullopt;
}
HashType parseHashType(std::string_view s)
HashAlgorithm parseHashAlgo(std::string_view s)
{
auto opt_h = parseHashTypeOpt(s);
auto opt_h = parseHashAlgoOpt(s);
if (opt_h)
return *opt_h;
else
throw UsageError("unknown hash algorithm '%1%', expect 'md5', 'sha1', 'sha256', or 'sha512'", s);
}
std::string_view printHashType(HashType ht)
std::string_view printHashAlgo(HashAlgorithm ha)
{
switch (ht) {
case htMD5: return "md5";
case htSHA1: return "sha1";
case htSHA256: return "sha256";
case htSHA512: return "sha512";
switch (ha) {
case HashAlgorithm::MD5: return "md5";
case HashAlgorithm::SHA1: return "sha1";
case HashAlgorithm::SHA256: return "sha256";
case HashAlgorithm::SHA512: return "sha512";
default:
// illegal hash type enum value internally, as opposed to external input
// which should be validated with nice error message.

View file

@ -12,7 +12,7 @@ namespace nix {
MakeError(BadHash, Error);
enum HashType : char { htMD5 = 42, htSHA1, htSHA256, htSHA512 };
enum struct HashAlgorithm : char { MD5 = 42, SHA1, SHA256, SHA512 };
const int md5HashSize = 16;
@ -20,9 +20,9 @@ const int sha1HashSize = 20;
const int sha256HashSize = 32;
const int sha512HashSize = 64;
extern std::set<std::string> hashTypes;
extern const std::set<std::string> hashAlgorithms;
extern const std::string base32Chars;
extern const std::string nix32Chars;
/**
* @brief Enumeration representing the hash formats.
@ -31,8 +31,8 @@ enum struct HashFormat : int {
/// @brief Base 64 encoding.
/// @see [IETF RFC 4648, section 4](https://datatracker.ietf.org/doc/html/rfc4648#section-4).
Base64,
/// @brief Nix-specific base-32 encoding. @see base32Chars
Base32,
/// @brief Nix-specific base-32 encoding. @see nix32Chars
Nix32,
/// @brief Lowercase hexadecimal encoding. @see base16Chars
Base16,
/// @brief "<hash algo>:<Base 64 hash>", format of the SRI integrity attribute.
@ -40,6 +40,7 @@ enum struct HashFormat : int {
SRI
};
extern const std::set<std::string> hashFormats;
struct Hash
{
@ -47,12 +48,12 @@ struct Hash
size_t hashSize = 0;
uint8_t hash[maxHashSize] = {};
HashType type;
HashAlgorithm algo;
/**
* Create a zero-filled hash object.
*/
Hash(HashType type);
explicit Hash(HashAlgorithm algo);
/**
* Parse the hash from a string representation in the format
@ -61,7 +62,7 @@ struct Hash
* is not present, then the hash type must be specified in the
* string.
*/
static Hash parseAny(std::string_view s, std::optional<HashType> type);
static Hash parseAny(std::string_view s, std::optional<HashAlgorithm> optAlgo);
/**
* Parse a hash from a string representation like the above, except the
@ -73,7 +74,7 @@ struct Hash
* Parse a plain hash that musst not have any prefix indicating the type.
* The type is passed in to disambiguate.
*/
static Hash parseNonSRIUnprefixed(std::string_view s, HashType type);
static Hash parseNonSRIUnprefixed(std::string_view s, HashAlgorithm algo);
static Hash parseSRI(std::string_view original);
@ -82,7 +83,7 @@ private:
* The type must be provided, the string view must not include <type>
* prefix. `isSRI` helps disambigate the various base-* encodings.
*/
Hash(std::string_view s, HashType type, bool isSRI);
Hash(std::string_view s, HashAlgorithm algo, bool isSRI);
public:
/**
@ -103,7 +104,7 @@ public:
/**
* Returns the length of a base-16 representation of this hash.
*/
size_t base16Len() const
[[nodiscard]] size_t base16Len() const
{
return hashSize * 2;
}
@ -111,7 +112,7 @@ public:
/**
* Returns the length of a base-32 representation of this hash.
*/
size_t base32Len() const
[[nodiscard]] size_t base32Len() const
{
return (hashSize * 8 - 1) / 5 + 1;
}
@ -119,24 +120,24 @@ public:
/**
* Returns the length of a base-64 representation of this hash.
*/
size_t base64Len() const
[[nodiscard]] size_t base64Len() const
{
return ((4 * hashSize / 3) + 3) & ~3;
}
/**
* Return a string representation of the hash, in base-16, base-32
* or base-64. By default, this is prefixed by the hash type
* or base-64. By default, this is prefixed by the hash algo
* (e.g. "sha256:").
*/
std::string to_string(HashFormat hashFormat, bool includeType) const;
[[nodiscard]] std::string to_string(HashFormat hashFormat, bool includeAlgo) const;
std::string gitRev() const
[[nodiscard]] std::string gitRev() const
{
return to_string(HashFormat::Base16, false);
}
std::string gitShortRev() const
[[nodiscard]] std::string gitShortRev() const
{
return std::string(to_string(HashFormat::Base16, false), 0, 7);
}
@ -147,7 +148,7 @@ public:
/**
* Helper that defaults empty hashes to the 0 hash.
*/
Hash newHashAllowEmpty(std::string_view hashStr, std::optional<HashType> ht);
Hash newHashAllowEmpty(std::string_view hashStr, std::optional<HashAlgorithm> ha);
/**
* Print a hash in base-16 if it's MD5, or base-32 otherwise.
@ -157,24 +158,21 @@ std::string printHash16or32(const Hash & hash);
/**
* Compute the hash of the given string.
*/
Hash hashString(HashType ht, std::string_view s);
Hash hashString(HashAlgorithm ha, std::string_view s);
/**
* Compute the hash of the given file, hashing its contents directly.
*
* (Metadata, such as the executable permission bit, is ignored.)
*/
Hash hashFile(HashType ht, const Path & path);
Hash hashFile(HashAlgorithm ha, const Path & path);
/**
* Compute the hash of the given path, serializing as a Nix Archive and
* then hashing that.
* The final hash and the number of bytes digested.
*
* The hash is defined as (essentially) hashString(ht, dumpPath(path)).
* @todo Convert to proper struct
*/
typedef std::pair<Hash, uint64_t> HashResult;
HashResult hashPath(HashType ht, const Path & path,
PathFilter & filter = defaultPathFilter);
/**
* Compress a hash to the specified number of bytes by cyclically
@ -200,17 +198,17 @@ std::string_view printHashFormat(HashFormat hashFormat);
/**
* Parse a string representing a hash type.
*/
HashType parseHashType(std::string_view s);
HashAlgorithm parseHashAlgo(std::string_view s);
/**
* Will return nothing on parse error
*/
std::optional<HashType> parseHashTypeOpt(std::string_view s);
std::optional<HashAlgorithm> parseHashAlgoOpt(std::string_view s);
/**
* And the reverse.
*/
std::string_view printHashType(HashType ht);
std::string_view printHashAlgo(HashAlgorithm ha);
union Ctx;
@ -223,12 +221,12 @@ struct AbstractHashSink : virtual Sink
class HashSink : public BufferedSink, public AbstractHashSink
{
private:
HashType ht;
HashAlgorithm ha;
Ctx * ctx;
uint64_t bytes;
public:
HashSink(HashType ht);
HashSink(HashAlgorithm ha);
HashSink(const HashSink & h);
~HashSink();
void writeUnbuffered(std::string_view data) override;

View file

@ -78,20 +78,29 @@ namespace nlohmann {
*/
template<typename T>
struct adl_serializer<std::optional<T>> {
static std::optional<T> from_json(const json & json) {
/**
* @brief Convert a JSON type to an `optional<T>` treating
* `null` as `std::nullopt`.
*/
static void from_json(const json & json, std::optional<T> & t) {
static_assert(
nix::json_avoids_null<T>::value,
"null is already in use for underlying type's JSON");
return json.is_null()
t = json.is_null()
? std::nullopt
: std::optional { adl_serializer<T>::from_json(json) };
: std::make_optional(json.template get<T>());
}
static void to_json(json & json, std::optional<T> t) {
/**
* @brief Convert an optional type to a JSON type treating `std::nullopt`
* as `null`.
*/
static void to_json(json & json, const std::optional<T> & t) {
static_assert(
nix::json_avoids_null<T>::value,
"null is already in use for underlying type's JSON");
if (t)
adl_serializer<T>::to_json(json, *t);
json = *t;
else
json = nullptr;
}

View file

@ -23,6 +23,7 @@ typedef enum {
actQueryPathInfo = 109,
actPostBuildHook = 110,
actBuildWaiting = 111,
actFetchTree = 112,
} ActivityType;
typedef enum {
@ -34,6 +35,7 @@ typedef enum {
resProgress = 105,
resSetExpected = 106,
resPostBuildLogLine = 107,
resFetchStatus = 108,
} ResultType;
typedef uint64_t ActivityId;

View file

@ -121,4 +121,60 @@ CanonPath MemorySourceAccessor::addFile(CanonPath path, std::string && contents)
return path;
}
using File = MemorySourceAccessor::File;
void MemorySink::createDirectory(const Path & path)
{
auto * f = dst.open(CanonPath{path}, File { File::Directory { } });
if (!f)
throw Error("file '%s' cannot be made because some parent file is not a directory", path);
if (!std::holds_alternative<File::Directory>(f->raw))
throw Error("file '%s' is not a directory", path);
};
void MemorySink::createRegularFile(const Path & path)
{
auto * f = dst.open(CanonPath{path}, File { File::Regular {} });
if (!f)
throw Error("file '%s' cannot be made because some parent file is not a directory", path);
if (!(r = std::get_if<File::Regular>(&f->raw)))
throw Error("file '%s' is not a regular file", path);
}
void MemorySink::closeRegularFile()
{
r = nullptr;
}
void MemorySink::isExecutable()
{
assert(r);
r->executable = true;
}
void MemorySink::preallocateContents(uint64_t len)
{
assert(r);
r->contents.reserve(len);
}
void MemorySink::receiveContents(std::string_view data)
{
assert(r);
r->contents += data;
}
void MemorySink::createSymlink(const Path & path, const std::string & target)
{
auto * f = dst.open(CanonPath{path}, File { File::Symlink { } });
if (!f)
throw Error("file '%s' cannot be made because some parent file is not a directory", path);
if (auto * s = std::get_if<File::Symlink>(&f->raw))
s->target = target;
else
throw Error("file '%s' is not a symbolic link", path);
}
}

View file

@ -1,4 +1,5 @@
#include "source-accessor.hh"
#include "fs-sink.hh"
#include "variant-wrapper.hh"
namespace nix {
@ -71,4 +72,28 @@ struct MemorySourceAccessor : virtual SourceAccessor
CanonPath addFile(CanonPath path, std::string && contents);
};
/**
* Write to a `MemorySourceAccessor` at the given path
*/
struct MemorySink : ParseSink
{
MemorySourceAccessor & dst;
MemorySink(MemorySourceAccessor & dst) : dst(dst) { }
void createDirectory(const Path & path) override;
void createRegularFile(const Path & path) override;
void receiveContents(std::string_view data) override;
void isExecutable() override;
void closeRegularFile() override;
void createSymlink(const Path & path, const std::string & target) override;
void preallocateContents(uint64_t size) override;
private:
MemorySourceAccessor::File::Regular * r;
};
}

View file

@ -1,5 +1,8 @@
#include "posix-source-accessor.hh"
#include "signals.hh"
#include "sync.hh"
#include <unordered_map>
namespace nix {
@ -8,9 +11,9 @@ void PosixSourceAccessor::readFile(
Sink & sink,
std::function<void(uint64_t)> sizeCallback)
{
// FIXME: add O_NOFOLLOW since symlinks should be resolved by the
// caller?
AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
assertNoSymlinks(path);
AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC | O_NOFOLLOW);
if (!fd)
throw SysError("opening file '%1%'", path);
@ -42,30 +45,55 @@ void PosixSourceAccessor::readFile(
bool PosixSourceAccessor::pathExists(const CanonPath & path)
{
if (auto parent = path.parent()) assertNoSymlinks(*parent);
return nix::pathExists(path.abs());
}
std::optional<struct stat> PosixSourceAccessor::cachedLstat(const CanonPath & path)
{
static Sync<std::unordered_map<CanonPath, std::optional<struct stat>>> _cache;
{
auto cache(_cache.lock());
auto i = cache->find(path);
if (i != cache->end()) return i->second;
}
std::optional<struct stat> st{std::in_place};
if (::lstat(path.c_str(), &*st)) {
if (errno == ENOENT || errno == ENOTDIR)
st.reset();
else
throw SysError("getting status of '%s'", showPath(path));
}
auto cache(_cache.lock());
if (cache->size() >= 16384) cache->clear();
cache->emplace(path, st);
return st;
}
std::optional<SourceAccessor::Stat> PosixSourceAccessor::maybeLstat(const CanonPath & path)
{
struct stat st;
if (::lstat(path.c_str(), &st)) {
if (errno == ENOENT) return std::nullopt;
throw SysError("getting status of '%s'", showPath(path));
}
mtime = std::max(mtime, st.st_mtime);
if (auto parent = path.parent()) assertNoSymlinks(*parent);
auto st = cachedLstat(path);
if (!st) return std::nullopt;
mtime = std::max(mtime, st->st_mtime);
return Stat {
.type =
S_ISREG(st.st_mode) ? tRegular :
S_ISDIR(st.st_mode) ? tDirectory :
S_ISLNK(st.st_mode) ? tSymlink :
S_ISREG(st->st_mode) ? tRegular :
S_ISDIR(st->st_mode) ? tDirectory :
S_ISLNK(st->st_mode) ? tSymlink :
tMisc,
.fileSize = S_ISREG(st.st_mode) ? std::optional<uint64_t>(st.st_size) : std::nullopt,
.isExecutable = S_ISREG(st.st_mode) && st.st_mode & S_IXUSR,
.fileSize = S_ISREG(st->st_mode) ? std::optional<uint64_t>(st->st_size) : std::nullopt,
.isExecutable = S_ISREG(st->st_mode) && st->st_mode & S_IXUSR,
};
}
SourceAccessor::DirEntries PosixSourceAccessor::readDirectory(const CanonPath & path)
{
assertNoSymlinks(path);
DirEntries res;
for (auto & entry : nix::readDirectory(path.abs())) {
std::optional<Type> type;
@ -81,6 +109,7 @@ SourceAccessor::DirEntries PosixSourceAccessor::readDirectory(const CanonPath &
std::string PosixSourceAccessor::readLink(const CanonPath & path)
{
if (auto parent = path.parent()) assertNoSymlinks(*parent);
return nix::readLink(path.abs());
}
@ -89,4 +118,14 @@ std::optional<CanonPath> PosixSourceAccessor::getPhysicalPath(const CanonPath &
return path;
}
void PosixSourceAccessor::assertNoSymlinks(CanonPath path)
{
while (!path.isRoot()) {
auto st = cachedLstat(path);
if (st && S_ISLNK(st->st_mode))
throw Error("path '%s' is a symlink", showPath(path));
path.pop();
}
}
}

View file

@ -7,7 +7,7 @@ namespace nix {
/**
* A source accessor that uses the Unix filesystem.
*/
struct PosixSourceAccessor : SourceAccessor
struct PosixSourceAccessor : virtual SourceAccessor
{
/**
* The most recent mtime seen by lstat(). This is a hack to
@ -29,6 +29,15 @@ struct PosixSourceAccessor : SourceAccessor
std::string readLink(const CanonPath & path) override;
std::optional<CanonPath> getPhysicalPath(const CanonPath & path) override;
private:
/**
* Throw an error if `path` or any of its ancestors are symlinks.
*/
void assertNoSymlinks(CanonPath path);
std::optional<struct stat> cachedLstat(const CanonPath & path);
};
}

View file

@ -23,8 +23,8 @@ static void search(
static bool isBase32[256];
std::call_once(initialised, [](){
for (unsigned int i = 0; i < 256; ++i) isBase32[i] = false;
for (unsigned int i = 0; i < base32Chars.size(); ++i)
isBase32[(unsigned char) base32Chars[i]] = true;
for (unsigned int i = 0; i < nix32Chars.size(); ++i)
isBase32[(unsigned char) nix32Chars[i]] = true;
});
for (size_t i = 0; i + refLength <= s.size(); ) {
@ -110,8 +110,8 @@ void RewritingSink::flush()
prev.clear();
}
HashModuloSink::HashModuloSink(HashType ht, const std::string & modulus)
: hashSink(ht)
HashModuloSink::HashModuloSink(HashAlgorithm ha, const std::string & modulus)
: hashSink(ha)
, rewritingSink(modulus, std::string(modulus.size(), 0), hashSink)
{
}

View file

@ -46,7 +46,7 @@ struct HashModuloSink : AbstractHashSink
HashSink hashSink;
RewritingSink rewritingSink;
HashModuloSink(HashType ht, const std::string & modulus);
HashModuloSink(HashAlgorithm ha, const std::string & modulus);
void operator () (std::string_view data) override;

View file

@ -74,6 +74,10 @@ void Source::operator () (char * data, size_t len)
}
}
void Source::operator () (std::string_view data)
{
(*this)((char *)data.data(), data.size());
}
void Source::drainInto(Sink & sink)
{
@ -444,7 +448,7 @@ Error readError(Source & source)
auto msg = readString(source);
ErrorInfo info {
.level = level,
.msg = hintformat(fmt("%s", msg)),
.msg = hintfmt(msg),
};
auto havePos = readNum<size_t>(source);
assert(havePos == 0);
@ -453,7 +457,7 @@ Error readError(Source & source)
havePos = readNum<size_t>(source);
assert(havePos == 0);
info.traces.push_back(Trace {
.hint = hintformat(fmt("%s", readString(source)))
.hint = hintfmt(readString(source))
});
}
return Error(std::move(info));

View file

@ -73,6 +73,7 @@ struct Source
* an error if it is not going to be available.
*/
void operator () (char * data, size_t len);
void operator () (std::string_view data);
/**
* Store up to len in the buffer pointed to by data, and

View file

@ -7,6 +7,7 @@ static std::atomic<size_t> nextNumber{0};
SourceAccessor::SourceAccessor()
: number(++nextNumber)
, displayPrefix{"«unknown»"}
{
}
@ -38,11 +39,11 @@ void SourceAccessor::readFile(
}
Hash SourceAccessor::hashPath(
const CanonPath & path,
PathFilter & filter,
HashType ht)
const CanonPath & path,
PathFilter & filter,
HashAlgorithm ha)
{
HashSink sink(ht);
HashSink sink(ha);
dumpPath(path, sink, filter);
return sink.finish().first;
}
@ -55,9 +56,15 @@ SourceAccessor::Stat SourceAccessor::lstat(const CanonPath & path)
throw Error("path '%s' does not exist", showPath(path));
}
void SourceAccessor::setPathDisplay(std::string displayPrefix, std::string displaySuffix)
{
this->displayPrefix = std::move(displayPrefix);
this->displaySuffix = std::move(displaySuffix);
}
std::string SourceAccessor::showPath(const CanonPath & path)
{
return path.abs();
return displayPrefix + path.abs() + displaySuffix;
}
}

View file

@ -17,6 +17,8 @@ struct SourceAccessor
{
const size_t number;
std::string displayPrefix, displaySuffix;
SourceAccessor();
virtual ~SourceAccessor()
@ -24,6 +26,13 @@ struct SourceAccessor
/**
* Return the contents of a file as a string.
*
* @note Unlike Unix, this method should *not* follow symlinks. Nix
* by default wants to manipulate symlinks explicitly, and not
* implictly follow them, as they are frequently untrusted user data
* and thus may point to arbitrary locations. Acting on the targets
* targets of symlinks should only occasionally be done, and only
* with care.
*/
virtual std::string readFile(const CanonPath & path);
@ -32,7 +41,10 @@ struct SourceAccessor
* called with the size of the file before any data is written to
* the sink.
*
* Note: subclasses of `SourceAccessor` need to implement at least
* @note Like the other `readFile`, this method should *not* follow
* symlinks.
*
* @note subclasses of `SourceAccessor` need to implement at least
* one of the `readFile()` variants.
*/
virtual void readFile(
@ -85,6 +97,9 @@ struct SourceAccessor
typedef std::map<std::string, DirEntry> DirEntries;
/**
* @note Like `readFile`, this method should *not* follow symlinks.
*/
virtual DirEntries readDirectory(const CanonPath & path) = 0;
virtual std::string readLink(const CanonPath & path) = 0;
@ -95,9 +110,9 @@ struct SourceAccessor
PathFilter & filter = defaultPathFilter);
Hash hashPath(
const CanonPath & path,
PathFilter & filter = defaultPathFilter,
HashType ht = htSHA256);
const CanonPath & path,
PathFilter & filter = defaultPathFilter,
HashAlgorithm ha = HashAlgorithm::SHA256);
/**
* Return a corresponding path in the root filesystem, if
@ -117,6 +132,8 @@ struct SourceAccessor
return number < x.number;
}
void setPathDisplay(std::string displayPrefix, std::string displaySuffix = "");
virtual std::string showPath(const CanonPath & path);
};

View file

@ -53,6 +53,7 @@ TarArchive::TarArchive(Source & source, bool raw) : buffer(65536)
archive_read_support_format_raw(archive);
archive_read_support_format_empty(archive);
}
archive_read_set_option(archive, NULL, "mac-ext", NULL);
check(archive_read_open(archive, (void *)this, callback_open, callback_read, callback_close), "Failed to open archive (%s)");
}
@ -63,6 +64,7 @@ TarArchive::TarArchive(const Path & path)
archive_read_support_filter_all(archive);
archive_read_support_format_all(archive);
archive_read_set_option(archive, NULL, "mac-ext", NULL);
check(archive_read_open_filename(archive, path.c_str(), 16384), "failed to open archive: %s");
}

View file

@ -1,162 +0,0 @@
#include "canon-path.hh"
#include <gtest/gtest.h>
namespace nix {
TEST(CanonPath, basic) {
{
CanonPath p("/");
ASSERT_EQ(p.abs(), "/");
ASSERT_EQ(p.rel(), "");
ASSERT_EQ(p.baseName(), std::nullopt);
ASSERT_EQ(p.dirOf(), std::nullopt);
ASSERT_FALSE(p.parent());
}
{
CanonPath p("/foo//");
ASSERT_EQ(p.abs(), "/foo");
ASSERT_EQ(p.rel(), "foo");
ASSERT_EQ(*p.baseName(), "foo");
ASSERT_EQ(*p.dirOf(), ""); // FIXME: do we want this?
ASSERT_EQ(p.parent()->abs(), "/");
}
{
CanonPath p("foo/bar");
ASSERT_EQ(p.abs(), "/foo/bar");
ASSERT_EQ(p.rel(), "foo/bar");
ASSERT_EQ(*p.baseName(), "bar");
ASSERT_EQ(*p.dirOf(), "/foo");
ASSERT_EQ(p.parent()->abs(), "/foo");
}
{
CanonPath p("foo//bar/");
ASSERT_EQ(p.abs(), "/foo/bar");
ASSERT_EQ(p.rel(), "foo/bar");
ASSERT_EQ(*p.baseName(), "bar");
ASSERT_EQ(*p.dirOf(), "/foo");
}
}
TEST(CanonPath, pop) {
CanonPath p("foo/bar/x");
ASSERT_EQ(p.abs(), "/foo/bar/x");
p.pop();
ASSERT_EQ(p.abs(), "/foo/bar");
p.pop();
ASSERT_EQ(p.abs(), "/foo");
p.pop();
ASSERT_EQ(p.abs(), "/");
}
TEST(CanonPath, removePrefix) {
CanonPath p1("foo/bar");
CanonPath p2("foo/bar/a/b/c");
ASSERT_EQ(p2.removePrefix(p1).abs(), "/a/b/c");
ASSERT_EQ(p1.removePrefix(p1).abs(), "/");
ASSERT_EQ(p1.removePrefix(CanonPath("/")).abs(), "/foo/bar");
}
TEST(CanonPath, iter) {
{
CanonPath p("a//foo/bar//");
std::vector<std::string_view> ss;
for (auto & c : p) ss.push_back(c);
ASSERT_EQ(ss, std::vector<std::string_view>({"a", "foo", "bar"}));
}
{
CanonPath p("/");
std::vector<std::string_view> ss;
for (auto & c : p) ss.push_back(c);
ASSERT_EQ(ss, std::vector<std::string_view>());
}
}
TEST(CanonPath, concat) {
{
CanonPath p1("a//foo/bar//");
CanonPath p2("xyzzy/bla");
ASSERT_EQ((p1 + p2).abs(), "/a/foo/bar/xyzzy/bla");
}
{
CanonPath p1("/");
CanonPath p2("/a/b");
ASSERT_EQ((p1 + p2).abs(), "/a/b");
}
{
CanonPath p1("/a/b");
CanonPath p2("/");
ASSERT_EQ((p1 + p2).abs(), "/a/b");
}
{
CanonPath p("/foo/bar");
ASSERT_EQ((p + "x").abs(), "/foo/bar/x");
}
{
CanonPath p("/");
ASSERT_EQ((p + "foo" + "bar").abs(), "/foo/bar");
}
}
TEST(CanonPath, within) {
ASSERT_TRUE(CanonPath("foo").isWithin(CanonPath("foo")));
ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("bar")));
ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("fo")));
ASSERT_TRUE(CanonPath("foo/bar").isWithin(CanonPath("foo")));
ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("foo/bar")));
ASSERT_TRUE(CanonPath("/foo/bar/default.nix").isWithin(CanonPath("/")));
ASSERT_TRUE(CanonPath("/").isWithin(CanonPath("/")));
}
TEST(CanonPath, sort) {
ASSERT_FALSE(CanonPath("foo") < CanonPath("foo"));
ASSERT_TRUE (CanonPath("foo") < CanonPath("foo/bar"));
ASSERT_TRUE (CanonPath("foo/bar") < CanonPath("foo!"));
ASSERT_FALSE(CanonPath("foo!") < CanonPath("foo"));
ASSERT_TRUE (CanonPath("foo") < CanonPath("foo!"));
}
TEST(CanonPath, allowed) {
std::set<CanonPath> allowed {
CanonPath("foo/bar"),
CanonPath("foo!"),
CanonPath("xyzzy"),
CanonPath("a/b/c"),
};
ASSERT_TRUE (CanonPath("foo/bar").isAllowed(allowed));
ASSERT_TRUE (CanonPath("foo/bar/bla").isAllowed(allowed));
ASSERT_TRUE (CanonPath("foo").isAllowed(allowed));
ASSERT_FALSE(CanonPath("bar").isAllowed(allowed));
ASSERT_FALSE(CanonPath("bar/a").isAllowed(allowed));
ASSERT_TRUE (CanonPath("a").isAllowed(allowed));
ASSERT_TRUE (CanonPath("a/b").isAllowed(allowed));
ASSERT_TRUE (CanonPath("a/b/c").isAllowed(allowed));
ASSERT_TRUE (CanonPath("a/b/c/d").isAllowed(allowed));
ASSERT_TRUE (CanonPath("a/b/c/d/e").isAllowed(allowed));
ASSERT_FALSE(CanonPath("a/b/a").isAllowed(allowed));
ASSERT_FALSE(CanonPath("a/b/d").isAllowed(allowed));
ASSERT_FALSE(CanonPath("aaa").isAllowed(allowed));
ASSERT_FALSE(CanonPath("zzz").isAllowed(allowed));
ASSERT_TRUE (CanonPath("/").isAllowed(allowed));
}
TEST(CanonPath, makeRelative) {
CanonPath d("/foo/bar");
ASSERT_EQ(d.makeRelative(CanonPath("/foo/bar")), ".");
ASSERT_EQ(d.makeRelative(CanonPath("/foo")), "..");
ASSERT_EQ(d.makeRelative(CanonPath("/")), "../..");
ASSERT_EQ(d.makeRelative(CanonPath("/foo/bar/xyzzy")), "xyzzy");
ASSERT_EQ(d.makeRelative(CanonPath("/foo/bar/xyzzy/bla")), "xyzzy/bla");
ASSERT_EQ(d.makeRelative(CanonPath("/foo/xyzzy/bla")), "../xyzzy/bla");
ASSERT_EQ(d.makeRelative(CanonPath("/xyzzy/bla")), "../../xyzzy/bla");
}
}

View file

@ -1,108 +0,0 @@
#pragma once
///@file
#include <gtest/gtest.h>
#include "types.hh"
#include "environment-variables.hh"
namespace nix {
/**
* The path to the `unit-test-data` directory. See the contributing
* guide in the manual for further details.
*/
static Path getUnitTestData() {
return getEnv("_NIX_TEST_UNIT_DATA").value();
}
/**
* Whether we should update "golden masters" instead of running tests
* against them. See the contributing guide in the manual for further
* details.
*/
static bool testAccept() {
return getEnv("_NIX_TEST_ACCEPT") == "1";
}
/**
* Mixin class for writing characterization tests
*/
class CharacterizationTest : public virtual ::testing::Test
{
protected:
/**
* While the "golden master" for this characterization test is
* located. It should not be shared with any other test.
*/
virtual Path goldenMaster(PathView testStem) const = 0;
public:
/**
* Golden test for reading
*
* @param test hook that takes the contents of the file and does the
* actual work
*/
void readTest(PathView testStem, auto && test)
{
auto file = goldenMaster(testStem);
if (testAccept())
{
GTEST_SKIP()
<< "Cannot read golden master "
<< file
<< "because another test is also updating it";
}
else
{
test(readFile(file));
}
}
/**
* Golden test for writing
*
* @param test hook that produces contents of the file and does the
* actual work
*/
void writeTest(
PathView testStem, auto && test, auto && readFile2, auto && writeFile2)
{
auto file = goldenMaster(testStem);
auto got = test();
if (testAccept())
{
createDirs(dirOf(file));
writeFile2(file, got);
GTEST_SKIP()
<< "Updating golden master "
<< file;
}
else
{
decltype(got) expected = readFile2(file);
ASSERT_EQ(got, expected);
}
}
/**
* Specialize to `std::string`
*/
void writeTest(PathView testStem, auto && test)
{
writeTest(
testStem, test,
[](const Path & f) -> std::string {
return readFile(f);
},
[](const Path & f, const std::string & c) {
return writeFile(f, c);
});
}
};
}

View file

@ -1,54 +0,0 @@
#include "chunked-vector.hh"
#include <gtest/gtest.h>
namespace nix {
TEST(ChunkedVector, InitEmpty) {
auto v = ChunkedVector<int, 2>(100);
ASSERT_EQ(v.size(), 0);
}
TEST(ChunkedVector, GrowsCorrectly) {
auto v = ChunkedVector<int, 2>(100);
for (auto i = 1; i < 20; i++) {
v.add(i);
ASSERT_EQ(v.size(), i);
}
}
TEST(ChunkedVector, AddAndGet) {
auto v = ChunkedVector<int, 2>(100);
for (auto i = 1; i < 20; i++) {
auto [i2, idx] = v.add(i);
auto & i3 = v[idx];
ASSERT_EQ(i, i2);
ASSERT_EQ(&i2, &i3);
}
}
TEST(ChunkedVector, ForEach) {
auto v = ChunkedVector<int, 2>(100);
for (auto i = 1; i < 20; i++) {
v.add(i);
}
int count = 0;
v.forEach([&count](int elt) {
count++;
});
ASSERT_EQ(count, v.size());
}
TEST(ChunkedVector, OverflowOK) {
// Similar to the AddAndGet, but intentionnally use a small
// initial ChunkedVector to force it to overflow
auto v = ChunkedVector<int, 2>(2);
for (auto i = 1; i < 20; i++) {
auto [i2, idx] = v.add(i);
auto & i3 = v[idx];
ASSERT_EQ(i, i2);
ASSERT_EQ(&i2, &i3);
}
}
}

View file

@ -1,70 +0,0 @@
#include "closure.hh"
#include <gtest/gtest.h>
namespace nix {
using namespace std;
map<string, set<string>> testGraph = {
{ "A", { "B", "C", "G" } },
{ "B", { "A" } }, // Loops back to A
{ "C", { "F" } }, // Indirect reference
{ "D", { "A" } }, // Not reachable, but has backreferences
{ "E", {} }, // Just not reachable
{ "F", {} },
{ "G", { "G" } }, // Self reference
};
TEST(closure, correctClosure) {
set<string> aClosure;
set<string> expectedClosure = {"A", "B", "C", "F", "G"};
computeClosure<string>(
{"A"},
aClosure,
[&](const string currentNode, function<void(promise<set<string>> &)> processEdges) {
promise<set<string>> promisedNodes;
promisedNodes.set_value(testGraph[currentNode]);
processEdges(promisedNodes);
}
);
ASSERT_EQ(aClosure, expectedClosure);
}
TEST(closure, properlyHandlesDirectExceptions) {
struct TestExn {};
set<string> aClosure;
EXPECT_THROW(
computeClosure<string>(
{"A"},
aClosure,
[&](const string currentNode, function<void(promise<set<string>> &)> processEdges) {
throw TestExn();
}
),
TestExn
);
}
TEST(closure, properlyHandlesExceptionsInPromise) {
struct TestExn {};
set<string> aClosure;
EXPECT_THROW(
computeClosure<string>(
{"A"},
aClosure,
[&](const string currentNode, function<void(promise<set<string>> &)> processEdges) {
promise<set<string>> promise;
try {
throw TestExn();
} catch (...) {
promise.set_exception(std::current_exception());
}
processEdges(promise);
}
),
TestExn
);
}
}

View file

@ -1,96 +0,0 @@
#include "compression.hh"
#include <gtest/gtest.h>
namespace nix {
/* ----------------------------------------------------------------------------
* compress / decompress
* --------------------------------------------------------------------------*/
TEST(compress, compressWithUnknownMethod) {
ASSERT_THROW(compress("invalid-method", "something-to-compress"), UnknownCompressionMethod);
}
TEST(compress, noneMethodDoesNothingToTheInput) {
auto o = compress("none", "this-is-a-test");
ASSERT_EQ(o, "this-is-a-test");
}
TEST(decompress, decompressNoneCompressed) {
auto method = "none";
auto str = "slfja;sljfklsa;jfklsjfkl;sdjfkl;sadjfkl;sdjf;lsdfjsadlf";
auto o = decompress(method, str);
ASSERT_EQ(o, str);
}
TEST(decompress, decompressEmptyCompressed) {
// Empty-method decompression used e.g. by S3 store
// (Content-Encoding == "").
auto method = "";
auto str = "slfja;sljfklsa;jfklsjfkl;sdjfkl;sadjfkl;sdjf;lsdfjsadlf";
auto o = decompress(method, str);
ASSERT_EQ(o, str);
}
TEST(decompress, decompressXzCompressed) {
auto method = "xz";
auto str = "slfja;sljfklsa;jfklsjfkl;sdjfkl;sadjfkl;sdjf;lsdfjsadlf";
auto o = decompress(method, compress(method, str));
ASSERT_EQ(o, str);
}
TEST(decompress, decompressBzip2Compressed) {
auto method = "bzip2";
auto str = "slfja;sljfklsa;jfklsjfkl;sdjfkl;sadjfkl;sdjf;lsdfjsadlf";
auto o = decompress(method, compress(method, str));
ASSERT_EQ(o, str);
}
TEST(decompress, decompressBrCompressed) {
auto method = "br";
auto str = "slfja;sljfklsa;jfklsjfkl;sdjfkl;sadjfkl;sdjf;lsdfjsadlf";
auto o = decompress(method, compress(method, str));
ASSERT_EQ(o, str);
}
TEST(decompress, decompressInvalidInputThrowsCompressionError) {
auto method = "bzip2";
auto str = "this is a string that does not qualify as valid bzip2 data";
ASSERT_THROW(decompress(method, str), CompressionError);
}
/* ----------------------------------------------------------------------------
* compression sinks
* --------------------------------------------------------------------------*/
TEST(makeCompressionSink, noneSinkDoesNothingToInput) {
StringSink strSink;
auto inputString = "slfja;sljfklsa;jfklsjfkl;sdjfkl;sadjfkl;sdjf;lsdfjsadlf";
auto sink = makeCompressionSink("none", strSink);
(*sink)(inputString);
sink->finish();
ASSERT_STREQ(strSink.s.c_str(), inputString);
}
TEST(makeCompressionSink, compressAndDecompress) {
StringSink strSink;
auto inputString = "slfja;sljfklsa;jfklsjfkl;sdjfkl;sadjfkl;sdjf;lsdfjsadlf";
auto decompressionSink = makeDecompressionSink("bzip2", strSink);
auto sink = makeCompressionSink("bzip2", *decompressionSink);
(*sink)(inputString);
sink->finish();
decompressionSink->finish();
ASSERT_STREQ(strSink.s.c_str(), inputString);
}
}

View file

@ -1,295 +0,0 @@
#include "config.hh"
#include "args.hh"
#include <sstream>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
namespace nix {
/* ----------------------------------------------------------------------------
* Config
* --------------------------------------------------------------------------*/
TEST(Config, setUndefinedSetting) {
Config config;
ASSERT_EQ(config.set("undefined-key", "value"), false);
}
TEST(Config, setDefinedSetting) {
Config config;
std::string value;
Setting<std::string> foo{&config, value, "name-of-the-setting", "description"};
ASSERT_EQ(config.set("name-of-the-setting", "value"), true);
}
TEST(Config, getDefinedSetting) {
Config config;
std::string value;
std::map<std::string, Config::SettingInfo> settings;
Setting<std::string> foo{&config, value, "name-of-the-setting", "description"};
config.getSettings(settings, /* overriddenOnly = */ false);
const auto iter = settings.find("name-of-the-setting");
ASSERT_NE(iter, settings.end());
ASSERT_EQ(iter->second.value, "");
ASSERT_EQ(iter->second.description, "description\n");
}
TEST(Config, getDefinedOverriddenSettingNotSet) {
Config config;
std::string value;
std::map<std::string, Config::SettingInfo> settings;
Setting<std::string> foo{&config, value, "name-of-the-setting", "description"};
config.getSettings(settings, /* overriddenOnly = */ true);
const auto e = settings.find("name-of-the-setting");
ASSERT_EQ(e, settings.end());
}
TEST(Config, getDefinedSettingSet1) {
Config config;
std::string value;
std::map<std::string, Config::SettingInfo> settings;
Setting<std::string> setting{&config, value, "name-of-the-setting", "description"};
setting.assign("value");
config.getSettings(settings, /* overriddenOnly = */ false);
const auto iter = settings.find("name-of-the-setting");
ASSERT_NE(iter, settings.end());
ASSERT_EQ(iter->second.value, "value");
ASSERT_EQ(iter->second.description, "description\n");
}
TEST(Config, getDefinedSettingSet2) {
Config config;
std::map<std::string, Config::SettingInfo> settings;
Setting<std::string> setting{&config, "", "name-of-the-setting", "description"};
ASSERT_TRUE(config.set("name-of-the-setting", "value"));
config.getSettings(settings, /* overriddenOnly = */ false);
const auto e = settings.find("name-of-the-setting");
ASSERT_NE(e, settings.end());
ASSERT_EQ(e->second.value, "value");
ASSERT_EQ(e->second.description, "description\n");
}
TEST(Config, addSetting) {
class TestSetting : public AbstractSetting {
public:
TestSetting() : AbstractSetting("test", "test", {}) {}
void set(const std::string & value, bool append) override {}
std::string to_string() const override { return {}; }
bool isAppendable() override { return false; }
};
Config config;
TestSetting setting;
ASSERT_FALSE(config.set("test", "value"));
config.addSetting(&setting);
ASSERT_TRUE(config.set("test", "value"));
ASSERT_FALSE(config.set("extra-test", "value"));
}
TEST(Config, withInitialValue) {
const StringMap initials = {
{ "key", "value" },
};
Config config(initials);
{
std::map<std::string, Config::SettingInfo> settings;
config.getSettings(settings, /* overriddenOnly = */ false);
ASSERT_EQ(settings.find("key"), settings.end());
}
Setting<std::string> setting{&config, "default-value", "key", "description"};
{
std::map<std::string, Config::SettingInfo> settings;
config.getSettings(settings, /* overriddenOnly = */ false);
ASSERT_EQ(settings["key"].value, "value");
}
}
TEST(Config, resetOverridden) {
Config config;
config.resetOverridden();
}
TEST(Config, resetOverriddenWithSetting) {
Config config;
Setting<std::string> setting{&config, "", "name-of-the-setting", "description"};
{
std::map<std::string, Config::SettingInfo> settings;
setting.set("foo");
ASSERT_EQ(setting.get(), "foo");
config.getSettings(settings, /* overriddenOnly = */ true);
ASSERT_TRUE(settings.empty());
}
{
std::map<std::string, Config::SettingInfo> settings;
setting.override("bar");
ASSERT_TRUE(setting.overridden);
ASSERT_EQ(setting.get(), "bar");
config.getSettings(settings, /* overriddenOnly = */ true);
ASSERT_FALSE(settings.empty());
}
{
std::map<std::string, Config::SettingInfo> settings;
config.resetOverridden();
ASSERT_FALSE(setting.overridden);
config.getSettings(settings, /* overriddenOnly = */ true);
ASSERT_TRUE(settings.empty());
}
}
TEST(Config, toJSONOnEmptyConfig) {
ASSERT_EQ(Config().toJSON().dump(), "{}");
}
TEST(Config, toJSONOnNonEmptyConfig) {
using nlohmann::literals::operator "" _json;
Config config;
Setting<std::string> setting{
&config,
"",
"name-of-the-setting",
"description",
};
setting.assign("value");
ASSERT_EQ(config.toJSON(),
R"#({
"name-of-the-setting": {
"aliases": [],
"defaultValue": "",
"description": "description\n",
"documentDefault": true,
"value": "value",
"experimentalFeature": null
}
})#"_json);
}
TEST(Config, toJSONOnNonEmptyConfigWithExperimentalSetting) {
using nlohmann::literals::operator "" _json;
Config config;
Setting<std::string> setting{
&config,
"",
"name-of-the-setting",
"description",
{},
true,
Xp::Flakes,
};
setting.assign("value");
ASSERT_EQ(config.toJSON(),
R"#({
"name-of-the-setting": {
"aliases": [],
"defaultValue": "",
"description": "description\n",
"documentDefault": true,
"value": "value",
"experimentalFeature": "flakes"
}
})#"_json);
}
TEST(Config, setSettingAlias) {
Config config;
Setting<std::string> setting{&config, "", "some-int", "best number", { "another-int" }};
ASSERT_TRUE(config.set("some-int", "1"));
ASSERT_EQ(setting.get(), "1");
ASSERT_TRUE(config.set("another-int", "2"));
ASSERT_EQ(setting.get(), "2");
ASSERT_TRUE(config.set("some-int", "3"));
ASSERT_EQ(setting.get(), "3");
}
/* FIXME: The reapplyUnknownSettings method doesn't seem to do anything
* useful (these days). Whenever we add a new setting to Config the
* unknown settings are always considered. In which case is this function
* actually useful? Is there some way to register a Setting without calling
* addSetting? */
TEST(Config, DISABLED_reapplyUnknownSettings) {
Config config;
ASSERT_FALSE(config.set("name-of-the-setting", "unknownvalue"));
Setting<std::string> setting{&config, "default", "name-of-the-setting", "description"};
ASSERT_EQ(setting.get(), "default");
config.reapplyUnknownSettings();
ASSERT_EQ(setting.get(), "unknownvalue");
}
TEST(Config, applyConfigEmpty) {
Config config;
std::map<std::string, Config::SettingInfo> settings;
config.applyConfig("");
config.getSettings(settings);
ASSERT_TRUE(settings.empty());
}
TEST(Config, applyConfigEmptyWithComment) {
Config config;
std::map<std::string, Config::SettingInfo> settings;
config.applyConfig("# just a comment");
config.getSettings(settings);
ASSERT_TRUE(settings.empty());
}
TEST(Config, applyConfigAssignment) {
Config config;
std::map<std::string, Config::SettingInfo> settings;
Setting<std::string> setting{&config, "", "name-of-the-setting", "description"};
config.applyConfig(
"name-of-the-setting = value-from-file #useful comment\n"
"# name-of-the-setting = foo\n"
);
config.getSettings(settings);
ASSERT_FALSE(settings.empty());
ASSERT_EQ(settings["name-of-the-setting"].value, "value-from-file");
}
TEST(Config, applyConfigWithReassignedSetting) {
Config config;
std::map<std::string, Config::SettingInfo> settings;
Setting<std::string> setting{&config, "", "name-of-the-setting", "description"};
config.applyConfig(
"name-of-the-setting = first-value\n"
"name-of-the-setting = second-value\n"
);
config.getSettings(settings);
ASSERT_FALSE(settings.empty());
ASSERT_EQ(settings["name-of-the-setting"].value, "second-value");
}
TEST(Config, applyConfigFailsOnMissingIncludes) {
Config config;
std::map<std::string, Config::SettingInfo> settings;
Setting<std::string> setting{&config, "", "name-of-the-setting", "description"};
ASSERT_THROW(config.applyConfig(
"name-of-the-setting = value-from-file\n"
"# name-of-the-setting = foo\n"
"include /nix/store/does/not/exist.nix"
), Error);
}
TEST(Config, applyConfigInvalidThrows) {
Config config;
ASSERT_THROW(config.applyConfig("value == key"), UsageError);
ASSERT_THROW(config.applyConfig("value "), UsageError);
}
}

View file

@ -1,33 +0,0 @@
#include "git.hh"
#include <gtest/gtest.h>
namespace nix {
TEST(GitLsRemote, parseSymrefLineWithReference) {
auto line = "ref: refs/head/main HEAD";
auto res = git::parseLsRemoteLine(line);
ASSERT_TRUE(res.has_value());
ASSERT_EQ(res->kind, git::LsRemoteRefLine::Kind::Symbolic);
ASSERT_EQ(res->target, "refs/head/main");
ASSERT_EQ(res->reference, "HEAD");
}
TEST(GitLsRemote, parseSymrefLineWithNoReference) {
auto line = "ref: refs/head/main";
auto res = git::parseLsRemoteLine(line);
ASSERT_TRUE(res.has_value());
ASSERT_EQ(res->kind, git::LsRemoteRefLine::Kind::Symbolic);
ASSERT_EQ(res->target, "refs/head/main");
ASSERT_EQ(res->reference, std::nullopt);
}
TEST(GitLsRemote, parseObjectRefLine) {
auto line = "abc123 refs/head/main";
auto res = git::parseLsRemoteLine(line);
ASSERT_TRUE(res.has_value());
ASSERT_EQ(res->kind, git::LsRemoteRefLine::Kind::Object);
ASSERT_EQ(res->target, "abc123");
ASSERT_EQ(res->reference, "refs/head/main");
}
}

View file

@ -1,110 +0,0 @@
#include <regex>
#include <nlohmann/json.hpp>
#include <gtest/gtest.h>
#include <rapidcheck/gtest.h>
#include <hash.hh>
#include "tests/hash.hh"
namespace nix {
/* ----------------------------------------------------------------------------
* hashString
* --------------------------------------------------------------------------*/
TEST(hashString, testKnownMD5Hashes1) {
// values taken from: https://tools.ietf.org/html/rfc1321
auto s1 = "";
auto hash = hashString(HashType::htMD5, s1);
ASSERT_EQ(hash.to_string(HashFormat::Base16, true), "md5:d41d8cd98f00b204e9800998ecf8427e");
}
TEST(hashString, testKnownMD5Hashes2) {
// values taken from: https://tools.ietf.org/html/rfc1321
auto s2 = "abc";
auto hash = hashString(HashType::htMD5, s2);
ASSERT_EQ(hash.to_string(HashFormat::Base16, true), "md5:900150983cd24fb0d6963f7d28e17f72");
}
TEST(hashString, testKnownSHA1Hashes1) {
// values taken from: https://tools.ietf.org/html/rfc3174
auto s = "abc";
auto hash = hashString(HashType::htSHA1, s);
ASSERT_EQ(hash.to_string(HashFormat::Base16, true),"sha1:a9993e364706816aba3e25717850c26c9cd0d89d");
}
TEST(hashString, testKnownSHA1Hashes2) {
// values taken from: https://tools.ietf.org/html/rfc3174
auto s = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq";
auto hash = hashString(HashType::htSHA1, s);
ASSERT_EQ(hash.to_string(HashFormat::Base16, true),"sha1:84983e441c3bd26ebaae4aa1f95129e5e54670f1");
}
TEST(hashString, testKnownSHA256Hashes1) {
// values taken from: https://tools.ietf.org/html/rfc4634
auto s = "abc";
auto hash = hashString(HashType::htSHA256, s);
ASSERT_EQ(hash.to_string(HashFormat::Base16, true),
"sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad");
}
TEST(hashString, testKnownSHA256Hashes2) {
// values taken from: https://tools.ietf.org/html/rfc4634
auto s = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq";
auto hash = hashString(HashType::htSHA256, s);
ASSERT_EQ(hash.to_string(HashFormat::Base16, true),
"sha256:248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1");
}
TEST(hashString, testKnownSHA512Hashes1) {
// values taken from: https://tools.ietf.org/html/rfc4634
auto s = "abc";
auto hash = hashString(HashType::htSHA512, s);
ASSERT_EQ(hash.to_string(HashFormat::Base16, true),
"sha512:ddaf35a193617abacc417349ae20413112e6fa4e89a9"
"7ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd"
"454d4423643ce80e2a9ac94fa54ca49f");
}
TEST(hashString, testKnownSHA512Hashes2) {
// values taken from: https://tools.ietf.org/html/rfc4634
auto s = "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu";
auto hash = hashString(HashType::htSHA512, s);
ASSERT_EQ(hash.to_string(HashFormat::Base16, true),
"sha512:8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa1"
"7299aeadb6889018501d289e4900f7e4331b99dec4b5433a"
"c7d329eeb6dd26545e96e55b874be909");
}
/* ----------------------------------------------------------------------------
* parseHashFormat, parseHashFormatOpt, printHashFormat
* --------------------------------------------------------------------------*/
TEST(hashFormat, testRoundTripPrintParse) {
for (const HashFormat hashFormat: { HashFormat::Base64, HashFormat::Base32, HashFormat::Base16, HashFormat::SRI}) {
ASSERT_EQ(parseHashFormat(printHashFormat(hashFormat)), hashFormat);
ASSERT_EQ(*parseHashFormatOpt(printHashFormat(hashFormat)), hashFormat);
}
}
TEST(hashFormat, testParseHashFormatOptException) {
ASSERT_EQ(parseHashFormatOpt("sha0042"), std::nullopt);
}
}
namespace rc {
using namespace nix;
Gen<Hash> Arbitrary<Hash>::arbitrary()
{
Hash hash(htSHA1);
for (size_t i = 0; i < hash.hashSize; ++i)
hash.hash[i] = *gen::arbitrary<uint8_t>();
return gen::just(hash);
}
}

View file

@ -1,16 +0,0 @@
#pragma once
///@file
#include <rapidcheck/gen/Arbitrary.h>
#include <hash.hh>
namespace rc {
using namespace nix;
template<>
struct Arbitrary<Hash> {
static Gen<Hash> arbitrary();
};
}

View file

@ -1,66 +0,0 @@
#include "hilite.hh"
#include <gtest/gtest.h>
namespace nix {
/* ----------- tests for fmt.hh -------------------------------------------------*/
TEST(hiliteMatches, noHighlight) {
ASSERT_STREQ(hiliteMatches("Hello, world!", std::vector<std::smatch>(), "(", ")").c_str(), "Hello, world!");
}
TEST(hiliteMatches, simpleHighlight) {
std::string str = "Hello, world!";
std::regex re = std::regex("world");
auto matches = std::vector(std::sregex_iterator(str.begin(), str.end(), re), std::sregex_iterator());
ASSERT_STREQ(
hiliteMatches(str, matches, "(", ")").c_str(),
"Hello, (world)!"
);
}
TEST(hiliteMatches, multipleMatches) {
std::string str = "Hello, world, world, world, world, world, world, Hello!";
std::regex re = std::regex("world");
auto matches = std::vector(std::sregex_iterator(str.begin(), str.end(), re), std::sregex_iterator());
ASSERT_STREQ(
hiliteMatches(str, matches, "(", ")").c_str(),
"Hello, (world), (world), (world), (world), (world), (world), Hello!"
);
}
TEST(hiliteMatches, overlappingMatches) {
std::string str = "world, Hello, world, Hello, world, Hello, world, Hello, world!";
std::regex re = std::regex("Hello, world");
std::regex re2 = std::regex("world, Hello");
auto v = std::vector(std::sregex_iterator(str.begin(), str.end(), re), std::sregex_iterator());
for(auto it = std::sregex_iterator(str.begin(), str.end(), re2); it != std::sregex_iterator(); ++it) {
v.push_back(*it);
}
ASSERT_STREQ(
hiliteMatches(str, v, "(", ")").c_str(),
"(world, Hello, world, Hello, world, Hello, world, Hello, world)!"
);
}
TEST(hiliteMatches, complexOverlappingMatches) {
std::string str = "legacyPackages.x86_64-linux.git-crypt";
std::vector regexes = {
std::regex("t-cry"),
std::regex("ux\\.git-cry"),
std::regex("git-c"),
std::regex("pt"),
};
std::vector<std::smatch> matches;
for(auto regex : regexes)
{
for(auto it = std::sregex_iterator(str.begin(), str.end(), regex); it != std::sregex_iterator(); ++it) {
matches.push_back(*it);
}
}
ASSERT_STREQ(
hiliteMatches(str, matches, "(", ")").c_str(),
"legacyPackages.x86_64-lin(ux.git-crypt)"
);
}
}

View file

@ -1,29 +0,0 @@
check: libutil-tests_RUN
programs += libutil-tests
libutil-tests-exe_NAME = libnixutil-tests
libutil-tests-exe_DIR := $(d)
libutil-tests-exe_INSTALL_DIR :=
libutil-tests-exe_LIBS = libutil-tests
libutil-tests-exe_LDFLAGS := $(GTEST_LIBS)
libraries += libutil-tests
libutil-tests_NAME = libnixutil-tests
libutil-tests_DIR := $(d)
libutil-tests_INSTALL_DIR :=
libutil-tests_SOURCES := $(wildcard $(d)/*.cc)
libutil-tests_CXXFLAGS += -I src/libutil
libutil-tests_LIBS = libutil
libutil-tests_LDFLAGS := -lrapidcheck $(GTEST_LIBS)

View file

@ -1,369 +0,0 @@
#if 0
#include "logging.hh"
#include "nixexpr.hh"
#include <fstream>
#include <gtest/gtest.h>
namespace nix {
/* ----------------------------------------------------------------------------
* logEI
* --------------------------------------------------------------------------*/
const char *test_file =
"previous line of code\n"
"this is the problem line of code\n"
"next line of code\n";
const char *one_liner =
"this is the other problem line of code";
TEST(logEI, catpuresBasicProperties) {
MakeError(TestError, Error);
ErrorInfo::programName = std::optional("error-unit-test");
try {
throw TestError("an error for testing purposes");
} catch (Error &e) {
testing::internal::CaptureStderr();
logger->logEI(e.info());
auto str = testing::internal::GetCapturedStderr();
ASSERT_STREQ(str.c_str(),"\x1B[31;1merror:\x1B[0m\x1B[34;1m --- TestError --- error-unit-test\x1B[0m\nan error for testing purposes\n");
}
}
TEST(logEI, jsonOutput) {
SymbolTable testTable;
auto problem_file = testTable.create("random.nix");
testing::internal::CaptureStderr();
makeJSONLogger(*logger)->logEI({
.name = "error name",
.msg = hintfmt("this hint has %1% templated %2%!!",
"yellow",
"values"),
.errPos = Pos(foFile, problem_file, 02, 13)
});
auto str = testing::internal::GetCapturedStderr();
ASSERT_STREQ(str.c_str(), "@nix {\"action\":\"msg\",\"column\":13,\"file\":\"random.nix\",\"level\":0,\"line\":2,\"msg\":\"\\u001b[31;1merror:\\u001b[0m\\u001b[34;1m --- error name --- error-unit-test\\u001b[0m\\n\\u001b[34;1mat: \\u001b[33;1m(2:13)\\u001b[34;1m in file: \\u001b[0mrandom.nix\\n\\nerror without any code lines.\\n\\nthis hint has \\u001b[33;1myellow\\u001b[0m templated \\u001b[33;1mvalues\\u001b[0m!!\",\"raw_msg\":\"this hint has \\u001b[33;1myellow\\u001b[0m templated \\u001b[33;1mvalues\\u001b[0m!!\"}\n");
}
TEST(logEI, appendingHintsToPreviousError) {
MakeError(TestError, Error);
ErrorInfo::programName = std::optional("error-unit-test");
try {
auto e = Error("initial error");
throw TestError(e.info());
} catch (Error &e) {
ErrorInfo ei = e.info();
ei.msg = hintfmt("%s; subsequent error message.", normaltxt(e.info().msg.str()));
testing::internal::CaptureStderr();
logger->logEI(ei);
auto str = testing::internal::GetCapturedStderr();
ASSERT_STREQ(str.c_str(), "\x1B[31;1merror:\x1B[0m\x1B[34;1m --- TestError --- error-unit-test\x1B[0m\ninitial error; subsequent error message.\n");
}
}
TEST(logEI, picksUpSysErrorExitCode) {
MakeError(TestError, Error);
ErrorInfo::programName = std::optional("error-unit-test");
try {
auto x = readFile(-1);
}
catch (SysError &e) {
testing::internal::CaptureStderr();
logError(e.info());
auto str = testing::internal::GetCapturedStderr();
ASSERT_STREQ(str.c_str(), "\x1B[31;1merror:\x1B[0m\x1B[34;1m --- SysError --- error-unit-test\x1B[0m\nstatting file: \x1B[33;1mBad file descriptor\x1B[0m\n");
}
}
TEST(logEI, loggingErrorOnInfoLevel) {
testing::internal::CaptureStderr();
logger->logEI({ .level = lvlInfo,
.name = "Info name",
});
auto str = testing::internal::GetCapturedStderr();
ASSERT_STREQ(str.c_str(), "\x1B[32;1minfo:\x1B[0m\x1B[34;1m --- Info name --- error-unit-test\x1B[0m\nInfo description\n");
}
TEST(logEI, loggingErrorOnTalkativeLevel) {
verbosity = lvlTalkative;
testing::internal::CaptureStderr();
logger->logEI({ .level = lvlTalkative,
.name = "Talkative name",
});
auto str = testing::internal::GetCapturedStderr();
ASSERT_STREQ(str.c_str(), "\x1B[32;1mtalk:\x1B[0m\x1B[34;1m --- Talkative name --- error-unit-test\x1B[0m\nTalkative description\n");
}
TEST(logEI, loggingErrorOnChattyLevel) {
verbosity = lvlChatty;
testing::internal::CaptureStderr();
logger->logEI({ .level = lvlChatty,
.name = "Chatty name",
});
auto str = testing::internal::GetCapturedStderr();
ASSERT_STREQ(str.c_str(), "\x1B[32;1mchat:\x1B[0m\x1B[34;1m --- Chatty name --- error-unit-test\x1B[0m\nTalkative description\n");
}
TEST(logEI, loggingErrorOnDebugLevel) {
verbosity = lvlDebug;
testing::internal::CaptureStderr();
logger->logEI({ .level = lvlDebug,
.name = "Debug name",
});
auto str = testing::internal::GetCapturedStderr();
ASSERT_STREQ(str.c_str(), "\x1B[33;1mdebug:\x1B[0m\x1B[34;1m --- Debug name --- error-unit-test\x1B[0m\nDebug description\n");
}
TEST(logEI, loggingErrorOnVomitLevel) {
verbosity = lvlVomit;
testing::internal::CaptureStderr();
logger->logEI({ .level = lvlVomit,
.name = "Vomit name",
});
auto str = testing::internal::GetCapturedStderr();
ASSERT_STREQ(str.c_str(), "\x1B[32;1mvomit:\x1B[0m\x1B[34;1m --- Vomit name --- error-unit-test\x1B[0m\nVomit description\n");
}
/* ----------------------------------------------------------------------------
* logError
* --------------------------------------------------------------------------*/
TEST(logError, logErrorWithoutHintOrCode) {
testing::internal::CaptureStderr();
logError({
.name = "name",
});
auto str = testing::internal::GetCapturedStderr();
ASSERT_STREQ(str.c_str(), "\x1B[31;1merror:\x1B[0m\x1B[34;1m --- name --- error-unit-test\x1B[0m\nerror description\n");
}
TEST(logError, logErrorWithPreviousAndNextLinesOfCode) {
SymbolTable testTable;
auto problem_file = testTable.create(test_file);
testing::internal::CaptureStderr();
logError({
.name = "error name",
.msg = hintfmt("this hint has %1% templated %2%!!",
"yellow",
"values"),
.errPos = Pos(foString, problem_file, 02, 13),
});
auto str = testing::internal::GetCapturedStderr();
ASSERT_STREQ(str.c_str(), "\x1B[31;1merror:\x1B[0m\x1B[34;1m --- error name --- error-unit-test\x1B[0m\n\x1B[34;1mat: \x1B[33;1m(2:13)\x1B[34;1m from string\x1B[0m\n\nerror with code lines\n\n 1| previous line of code\n 2| this is the problem line of code\n | \x1B[31;1m^\x1B[0m\n 3| next line of code\n\nthis hint has \x1B[33;1myellow\x1B[0m templated \x1B[33;1mvalues\x1B[0m!!\n");
}
TEST(logError, logErrorWithInvalidFile) {
SymbolTable testTable;
auto problem_file = testTable.create("invalid filename");
testing::internal::CaptureStderr();
logError({
.name = "error name",
.msg = hintfmt("this hint has %1% templated %2%!!",
"yellow",
"values"),
.errPos = Pos(foFile, problem_file, 02, 13)
});
auto str = testing::internal::GetCapturedStderr();
ASSERT_STREQ(str.c_str(), "\x1B[31;1merror:\x1B[0m\x1B[34;1m --- error name --- error-unit-test\x1B[0m\n\x1B[34;1mat: \x1B[33;1m(2:13)\x1B[34;1m in file: \x1B[0minvalid filename\n\nerror without any code lines.\n\nthis hint has \x1B[33;1myellow\x1B[0m templated \x1B[33;1mvalues\x1B[0m!!\n");
}
TEST(logError, logErrorWithOnlyHintAndName) {
testing::internal::CaptureStderr();
logError({
.name = "error name",
.msg = hintfmt("hint %1%", "only"),
});
auto str = testing::internal::GetCapturedStderr();
ASSERT_STREQ(str.c_str(), "\x1B[31;1merror:\x1B[0m\x1B[34;1m --- error name --- error-unit-test\x1B[0m\nhint \x1B[33;1monly\x1B[0m\n");
}
/* ----------------------------------------------------------------------------
* logWarning
* --------------------------------------------------------------------------*/
TEST(logWarning, logWarningWithNameDescriptionAndHint) {
testing::internal::CaptureStderr();
logWarning({
.name = "name",
.msg = hintfmt("there was a %1%", "warning"),
});
auto str = testing::internal::GetCapturedStderr();
ASSERT_STREQ(str.c_str(), "\x1B[33;1mwarning:\x1B[0m\x1B[34;1m --- name --- error-unit-test\x1B[0m\nwarning description\n\nthere was a \x1B[33;1mwarning\x1B[0m\n");
}
TEST(logWarning, logWarningWithFileLineNumAndCode) {
SymbolTable testTable;
auto problem_file = testTable.create(test_file);
testing::internal::CaptureStderr();
logWarning({
.name = "warning name",
.msg = hintfmt("this hint has %1% templated %2%!!",
"yellow",
"values"),
.errPos = Pos(foStdin, problem_file, 2, 13),
});
auto str = testing::internal::GetCapturedStderr();
ASSERT_STREQ(str.c_str(), "\x1B[33;1mwarning:\x1B[0m\x1B[34;1m --- warning name --- error-unit-test\x1B[0m\n\x1B[34;1mat: \x1B[33;1m(2:13)\x1B[34;1m from stdin\x1B[0m\n\nwarning description\n\n 1| previous line of code\n 2| this is the problem line of code\n | \x1B[31;1m^\x1B[0m\n 3| next line of code\n\nthis hint has \x1B[33;1myellow\x1B[0m templated \x1B[33;1mvalues\x1B[0m!!\n");
}
/* ----------------------------------------------------------------------------
* traces
* --------------------------------------------------------------------------*/
TEST(addTrace, showTracesWithShowTrace) {
SymbolTable testTable;
auto problem_file = testTable.create(test_file);
auto oneliner_file = testTable.create(one_liner);
auto invalidfilename = testTable.create("invalid filename");
auto e = AssertionError(ErrorInfo {
.name = "wat",
.msg = hintfmt("it has been %1% days since our last error", "zero"),
.errPos = Pos(foString, problem_file, 2, 13),
});
e.addTrace(Pos(foStdin, oneliner_file, 1, 19), "while trying to compute %1%", 42);
e.addTrace(std::nullopt, "while doing something without a %1%", "pos");
e.addTrace(Pos(foFile, invalidfilename, 100, 1), "missing %s", "nix file");
testing::internal::CaptureStderr();
loggerSettings.showTrace.assign(true);
logError(e.info());
auto str = testing::internal::GetCapturedStderr();
ASSERT_STREQ(str.c_str(), "\x1B[31;1merror:\x1B[0m\x1B[34;1m --- AssertionError --- error-unit-test\x1B[0m\n\x1B[34;1mat: \x1B[33;1m(2:13)\x1B[34;1m from string\x1B[0m\n\nshow-traces\n\n 1| previous line of code\n 2| this is the problem line of code\n | \x1B[31;1m^\x1B[0m\n 3| next line of code\n\nit has been \x1B[33;1mzero\x1B[0m days since our last error\n\x1B[34;1m---- show-trace ----\x1B[0m\n\x1B[34;1mtrace: \x1B[0mwhile trying to compute \x1B[33;1m42\x1B[0m\n\x1B[34;1mat: \x1B[33;1m(1:19)\x1B[34;1m from stdin\x1B[0m\n\n 1| this is the other problem line of code\n | \x1B[31;1m^\x1B[0m\n\n\x1B[34;1mtrace: \x1B[0mwhile doing something without a \x1B[33;1mpos\x1B[0m\n\x1B[34;1mtrace: \x1B[0mmissing \x1B[33;1mnix file\x1B[0m\n\x1B[34;1mat: \x1B[33;1m(100:1)\x1B[34;1m in file: \x1B[0minvalid filename\n");
}
TEST(addTrace, hideTracesWithoutShowTrace) {
SymbolTable testTable;
auto problem_file = testTable.create(test_file);
auto oneliner_file = testTable.create(one_liner);
auto invalidfilename = testTable.create("invalid filename");
auto e = AssertionError(ErrorInfo {
.name = "wat",
.msg = hintfmt("it has been %1% days since our last error", "zero"),
.errPos = Pos(foString, problem_file, 2, 13),
});
e.addTrace(Pos(foStdin, oneliner_file, 1, 19), "while trying to compute %1%", 42);
e.addTrace(std::nullopt, "while doing something without a %1%", "pos");
e.addTrace(Pos(foFile, invalidfilename, 100, 1), "missing %s", "nix file");
testing::internal::CaptureStderr();
loggerSettings.showTrace.assign(false);
logError(e.info());
auto str = testing::internal::GetCapturedStderr();
ASSERT_STREQ(str.c_str(), "\x1B[31;1merror:\x1B[0m\x1B[34;1m --- AssertionError --- error-unit-test\x1B[0m\n\x1B[34;1mat: \x1B[33;1m(2:13)\x1B[34;1m from string\x1B[0m\n\nhide traces\n\n 1| previous line of code\n 2| this is the problem line of code\n | \x1B[31;1m^\x1B[0m\n 3| next line of code\n\nit has been \x1B[33;1mzero\x1B[0m days since our last error\n");
}
/* ----------------------------------------------------------------------------
* hintfmt
* --------------------------------------------------------------------------*/
TEST(hintfmt, percentStringWithoutArgs) {
const char *teststr = "this is 100%s correct!";
ASSERT_STREQ(
hintfmt(teststr).str().c_str(),
teststr);
}
TEST(hintfmt, fmtToHintfmt) {
ASSERT_STREQ(
hintfmt(fmt("the color of this this text is %1%", "not yellow")).str().c_str(),
"the color of this this text is not yellow");
}
TEST(hintfmt, tooFewArguments) {
ASSERT_STREQ(
hintfmt("only one arg %1% %2%", "fulfilled").str().c_str(),
"only one arg " ANSI_WARNING "fulfilled" ANSI_NORMAL " ");
}
TEST(hintfmt, tooManyArguments) {
ASSERT_STREQ(
hintfmt("what about this %1% %2%", "%3%", "one", "two").str().c_str(),
"what about this " ANSI_WARNING "%3%" ANSI_NORMAL " " ANSI_YELLOW "one" ANSI_NORMAL);
}
/* ----------------------------------------------------------------------------
* ErrPos
* --------------------------------------------------------------------------*/
TEST(errpos, invalidPos) {
// contains an invalid symbol, which we should not dereference!
Pos invalid;
// constructing without access violation.
ErrPos ep(invalid);
// assignment without access violation.
ep = invalid;
}
}
#endif

View file

@ -1,130 +0,0 @@
#include "lru-cache.hh"
#include <gtest/gtest.h>
namespace nix {
/* ----------------------------------------------------------------------------
* size
* --------------------------------------------------------------------------*/
TEST(LRUCache, sizeOfEmptyCacheIsZero) {
LRUCache<std::string, std::string> c(10);
ASSERT_EQ(c.size(), 0);
}
TEST(LRUCache, sizeOfSingleElementCacheIsOne) {
LRUCache<std::string, std::string> c(10);
c.upsert("foo", "bar");
ASSERT_EQ(c.size(), 1);
}
/* ----------------------------------------------------------------------------
* upsert / get
* --------------------------------------------------------------------------*/
TEST(LRUCache, getFromEmptyCache) {
LRUCache<std::string, std::string> c(10);
auto val = c.get("x");
ASSERT_EQ(val.has_value(), false);
}
TEST(LRUCache, getExistingValue) {
LRUCache<std::string, std::string> c(10);
c.upsert("foo", "bar");
auto val = c.get("foo");
ASSERT_EQ(val, "bar");
}
TEST(LRUCache, getNonExistingValueFromNonEmptyCache) {
LRUCache<std::string, std::string> c(10);
c.upsert("foo", "bar");
auto val = c.get("another");
ASSERT_EQ(val.has_value(), false);
}
TEST(LRUCache, upsertOnZeroCapacityCache) {
LRUCache<std::string, std::string> c(0);
c.upsert("foo", "bar");
auto val = c.get("foo");
ASSERT_EQ(val.has_value(), false);
}
TEST(LRUCache, updateExistingValue) {
LRUCache<std::string, std::string> c(1);
c.upsert("foo", "bar");
auto val = c.get("foo");
ASSERT_EQ(val.value_or("error"), "bar");
ASSERT_EQ(c.size(), 1);
c.upsert("foo", "changed");
val = c.get("foo");
ASSERT_EQ(val.value_or("error"), "changed");
ASSERT_EQ(c.size(), 1);
}
TEST(LRUCache, overwriteOldestWhenCapacityIsReached) {
LRUCache<std::string, std::string> c(3);
c.upsert("one", "eins");
c.upsert("two", "zwei");
c.upsert("three", "drei");
ASSERT_EQ(c.size(), 3);
ASSERT_EQ(c.get("one").value_or("error"), "eins");
// exceed capacity
c.upsert("another", "whatever");
ASSERT_EQ(c.size(), 3);
// Retrieving "one" makes it the most recent element thus
// two will be the oldest one and thus replaced.
ASSERT_EQ(c.get("two").has_value(), false);
ASSERT_EQ(c.get("another").value(), "whatever");
}
/* ----------------------------------------------------------------------------
* clear
* --------------------------------------------------------------------------*/
TEST(LRUCache, clearEmptyCache) {
LRUCache<std::string, std::string> c(10);
c.clear();
ASSERT_EQ(c.size(), 0);
}
TEST(LRUCache, clearNonEmptyCache) {
LRUCache<std::string, std::string> c(10);
c.upsert("one", "eins");
c.upsert("two", "zwei");
c.upsert("three", "drei");
ASSERT_EQ(c.size(), 3);
c.clear();
ASSERT_EQ(c.size(), 0);
}
/* ----------------------------------------------------------------------------
* erase
* --------------------------------------------------------------------------*/
TEST(LRUCache, eraseFromEmptyCache) {
LRUCache<std::string, std::string> c(10);
ASSERT_EQ(c.erase("foo"), false);
ASSERT_EQ(c.size(), 0);
}
TEST(LRUCache, eraseMissingFromNonEmptyCache) {
LRUCache<std::string, std::string> c(10);
c.upsert("one", "eins");
ASSERT_EQ(c.erase("foo"), false);
ASSERT_EQ(c.size(), 1);
ASSERT_EQ(c.get("one").value_or("error"), "eins");
}
TEST(LRUCache, eraseFromNonEmptyCache) {
LRUCache<std::string, std::string> c(10);
c.upsert("one", "eins");
ASSERT_EQ(c.erase("one"), true);
ASSERT_EQ(c.size(), 0);
ASSERT_EQ(c.get("one").value_or("empty"), "empty");
}
}

View file

@ -1,127 +0,0 @@
#include "pool.hh"
#include <gtest/gtest.h>
namespace nix {
struct TestResource
{
TestResource() {
static int counter = 0;
num = counter++;
}
int dummyValue = 1;
bool good = true;
int num;
};
/* ----------------------------------------------------------------------------
* Pool
* --------------------------------------------------------------------------*/
TEST(Pool, freshPoolHasZeroCountAndSpecifiedCapacity) {
auto isGood = [](const ref<TestResource> & r) { return r->good; };
auto createResource = []() { return make_ref<TestResource>(); };
Pool<TestResource> pool = Pool<TestResource>((size_t)1, createResource, isGood);
ASSERT_EQ(pool.count(), 0);
ASSERT_EQ(pool.capacity(), 1);
}
TEST(Pool, freshPoolCanGetAResource) {
auto isGood = [](const ref<TestResource> & r) { return r->good; };
auto createResource = []() { return make_ref<TestResource>(); };
Pool<TestResource> pool = Pool<TestResource>((size_t)1, createResource, isGood);
ASSERT_EQ(pool.count(), 0);
TestResource r = *(pool.get());
ASSERT_EQ(pool.count(), 1);
ASSERT_EQ(pool.capacity(), 1);
ASSERT_EQ(r.dummyValue, 1);
ASSERT_EQ(r.good, true);
}
TEST(Pool, capacityCanBeIncremented) {
auto isGood = [](const ref<TestResource> & r) { return r->good; };
auto createResource = []() { return make_ref<TestResource>(); };
Pool<TestResource> pool = Pool<TestResource>((size_t)1, createResource, isGood);
ASSERT_EQ(pool.capacity(), 1);
pool.incCapacity();
ASSERT_EQ(pool.capacity(), 2);
}
TEST(Pool, capacityCanBeDecremented) {
auto isGood = [](const ref<TestResource> & r) { return r->good; };
auto createResource = []() { return make_ref<TestResource>(); };
Pool<TestResource> pool = Pool<TestResource>((size_t)1, createResource, isGood);
ASSERT_EQ(pool.capacity(), 1);
pool.decCapacity();
ASSERT_EQ(pool.capacity(), 0);
}
TEST(Pool, flushBadDropsOutOfScopeResources) {
auto isGood = [](const ref<TestResource> & r) { return false; };
auto createResource = []() { return make_ref<TestResource>(); };
Pool<TestResource> pool = Pool<TestResource>((size_t)1, createResource, isGood);
{
auto _r = pool.get();
ASSERT_EQ(pool.count(), 1);
}
pool.flushBad();
ASSERT_EQ(pool.count(), 0);
}
// Test that the resources we allocate are being reused when they are still good.
TEST(Pool, reuseResource) {
auto isGood = [](const ref<TestResource> & r) { return true; };
auto createResource = []() { return make_ref<TestResource>(); };
Pool<TestResource> pool = Pool<TestResource>((size_t)1, createResource, isGood);
// Compare the instance counter between the two handles. We expect them to be equal
// as the pool should hand out the same (still) good one again.
int counter = -1;
{
Pool<TestResource>::Handle h = pool.get();
counter = h->num;
} // the first handle goes out of scope
{ // the second handle should contain the same resource (with the same counter value)
Pool<TestResource>::Handle h = pool.get();
ASSERT_EQ(h->num, counter);
}
}
// Test that the resources we allocate are being thrown away when they are no longer good.
TEST(Pool, badResourceIsNotReused) {
auto isGood = [](const ref<TestResource> & r) { return false; };
auto createResource = []() { return make_ref<TestResource>(); };
Pool<TestResource> pool = Pool<TestResource>((size_t)1, createResource, isGood);
// Compare the instance counter between the two handles. We expect them
// to *not* be equal as the pool should hand out a new instance after
// the first one was returned.
int counter = -1;
{
Pool<TestResource>::Handle h = pool.get();
counter = h->num;
} // the first handle goes out of scope
{
// the second handle should contain a different resource (with a
//different counter value)
Pool<TestResource>::Handle h = pool.get();
ASSERT_NE(h->num, counter);
}
}
}

View file

@ -1,46 +0,0 @@
#include "references.hh"
#include <gtest/gtest.h>
namespace nix {
using std::string;
struct RewriteParams {
string originalString, finalString;
StringMap rewrites;
friend std::ostream& operator<<(std::ostream& os, const RewriteParams& bar) {
StringSet strRewrites;
for (auto & [from, to] : bar.rewrites)
strRewrites.insert(from + "->" + to);
return os <<
"OriginalString: " << bar.originalString << std::endl <<
"Rewrites: " << concatStringsSep(",", strRewrites) << std::endl <<
"Expected result: " << bar.finalString;
}
};
class RewriteTest : public ::testing::TestWithParam<RewriteParams> {
};
TEST_P(RewriteTest, IdentityRewriteIsIdentity) {
RewriteParams param = GetParam();
StringSink rewritten;
auto rewriter = RewritingSink(param.rewrites, rewritten);
rewriter(param.originalString);
rewriter.flush();
ASSERT_EQ(rewritten.s, param.finalString);
}
INSTANTIATE_TEST_CASE_P(
references,
RewriteTest,
::testing::Values(
RewriteParams{ "foooo", "baroo", {{"foo", "bar"}, {"bar", "baz"}}},
RewriteParams{ "foooo", "bazoo", {{"fou", "bar"}, {"foo", "baz"}}},
RewriteParams{ "foooo", "foooo", {}}
)
);
}

View file

@ -1,43 +0,0 @@
#include "suggestions.hh"
#include <gtest/gtest.h>
namespace nix {
struct LevenshteinDistanceParam {
std::string s1, s2;
int distance;
};
class LevenshteinDistanceTest :
public testing::TestWithParam<LevenshteinDistanceParam> {
};
TEST_P(LevenshteinDistanceTest, CorrectlyComputed) {
auto params = GetParam();
ASSERT_EQ(levenshteinDistance(params.s1, params.s2), params.distance);
ASSERT_EQ(levenshteinDistance(params.s2, params.s1), params.distance);
}
INSTANTIATE_TEST_SUITE_P(LevenshteinDistance, LevenshteinDistanceTest,
testing::Values(
LevenshteinDistanceParam{"foo", "foo", 0},
LevenshteinDistanceParam{"foo", "", 3},
LevenshteinDistanceParam{"", "", 0},
LevenshteinDistanceParam{"foo", "fo", 1},
LevenshteinDistanceParam{"foo", "oo", 1},
LevenshteinDistanceParam{"foo", "fao", 1},
LevenshteinDistanceParam{"foo", "abc", 3}
)
);
TEST(Suggestions, Trim) {
auto suggestions = Suggestions::bestMatches({"foooo", "bar", "fo", "gao"}, "foo");
auto onlyOne = suggestions.trim(1);
ASSERT_EQ(onlyOne.suggestions.size(), 1);
ASSERT_TRUE(onlyOne.suggestions.begin()->suggestion == "fo");
auto closest = suggestions.trim(999, 2);
ASSERT_EQ(closest.suggestions.size(), 3);
}
}

View file

@ -1,662 +0,0 @@
#include "util.hh"
#include "types.hh"
#include "file-system.hh"
#include "processes.hh"
#include "terminal.hh"
#include <limits.h>
#include <gtest/gtest.h>
#include <numeric>
namespace nix {
/* ----------- tests for util.hh ------------------------------------------------*/
/* ----------------------------------------------------------------------------
* absPath
* --------------------------------------------------------------------------*/
TEST(absPath, doesntChangeRoot) {
auto p = absPath("/");
ASSERT_EQ(p, "/");
}
TEST(absPath, turnsEmptyPathIntoCWD) {
char cwd[PATH_MAX+1];
auto p = absPath("");
ASSERT_EQ(p, getcwd((char*)&cwd, PATH_MAX));
}
TEST(absPath, usesOptionalBasePathWhenGiven) {
char _cwd[PATH_MAX+1];
char* cwd = getcwd((char*)&_cwd, PATH_MAX);
auto p = absPath("", cwd);
ASSERT_EQ(p, cwd);
}
TEST(absPath, isIdempotent) {
char _cwd[PATH_MAX+1];
char* cwd = getcwd((char*)&_cwd, PATH_MAX);
auto p1 = absPath(cwd);
auto p2 = absPath(p1);
ASSERT_EQ(p1, p2);
}
TEST(absPath, pathIsCanonicalised) {
auto path = "/some/path/with/trailing/dot/.";
auto p1 = absPath(path);
auto p2 = absPath(p1);
ASSERT_EQ(p1, "/some/path/with/trailing/dot");
ASSERT_EQ(p1, p2);
}
/* ----------------------------------------------------------------------------
* canonPath
* --------------------------------------------------------------------------*/
TEST(canonPath, removesTrailingSlashes) {
auto path = "/this/is/a/path//";
auto p = canonPath(path);
ASSERT_EQ(p, "/this/is/a/path");
}
TEST(canonPath, removesDots) {
auto path = "/this/./is/a/path/./";
auto p = canonPath(path);
ASSERT_EQ(p, "/this/is/a/path");
}
TEST(canonPath, removesDots2) {
auto path = "/this/a/../is/a////path/foo/..";
auto p = canonPath(path);
ASSERT_EQ(p, "/this/is/a/path");
}
TEST(canonPath, requiresAbsolutePath) {
ASSERT_ANY_THROW(canonPath("."));
ASSERT_ANY_THROW(canonPath(".."));
ASSERT_ANY_THROW(canonPath("../"));
ASSERT_DEATH({ canonPath(""); }, "path != \"\"");
}
/* ----------------------------------------------------------------------------
* dirOf
* --------------------------------------------------------------------------*/
TEST(dirOf, returnsEmptyStringForRoot) {
auto p = dirOf("/");
ASSERT_EQ(p, "/");
}
TEST(dirOf, returnsFirstPathComponent) {
auto p1 = dirOf("/dir/");
ASSERT_EQ(p1, "/dir");
auto p2 = dirOf("/dir");
ASSERT_EQ(p2, "/");
auto p3 = dirOf("/dir/..");
ASSERT_EQ(p3, "/dir");
auto p4 = dirOf("/dir/../");
ASSERT_EQ(p4, "/dir/..");
}
/* ----------------------------------------------------------------------------
* baseNameOf
* --------------------------------------------------------------------------*/
TEST(baseNameOf, emptyPath) {
auto p1 = baseNameOf("");
ASSERT_EQ(p1, "");
}
TEST(baseNameOf, pathOnRoot) {
auto p1 = baseNameOf("/dir");
ASSERT_EQ(p1, "dir");
}
TEST(baseNameOf, relativePath) {
auto p1 = baseNameOf("dir/foo");
ASSERT_EQ(p1, "foo");
}
TEST(baseNameOf, pathWithTrailingSlashRoot) {
auto p1 = baseNameOf("/");
ASSERT_EQ(p1, "");
}
TEST(baseNameOf, trailingSlash) {
auto p1 = baseNameOf("/dir/");
ASSERT_EQ(p1, "dir");
}
/* ----------------------------------------------------------------------------
* isInDir
* --------------------------------------------------------------------------*/
TEST(isInDir, trivialCase) {
auto p1 = isInDir("/foo/bar", "/foo");
ASSERT_EQ(p1, true);
}
TEST(isInDir, notInDir) {
auto p1 = isInDir("/zes/foo/bar", "/foo");
ASSERT_EQ(p1, false);
}
// XXX: hm, bug or feature? :) Looking at the implementation
// this might be problematic.
TEST(isInDir, emptyDir) {
auto p1 = isInDir("/zes/foo/bar", "");
ASSERT_EQ(p1, true);
}
/* ----------------------------------------------------------------------------
* isDirOrInDir
* --------------------------------------------------------------------------*/
TEST(isDirOrInDir, trueForSameDirectory) {
ASSERT_EQ(isDirOrInDir("/nix", "/nix"), true);
ASSERT_EQ(isDirOrInDir("/", "/"), true);
}
TEST(isDirOrInDir, trueForEmptyPaths) {
ASSERT_EQ(isDirOrInDir("", ""), true);
}
TEST(isDirOrInDir, falseForDisjunctPaths) {
ASSERT_EQ(isDirOrInDir("/foo", "/bar"), false);
}
TEST(isDirOrInDir, relativePaths) {
ASSERT_EQ(isDirOrInDir("/foo/..", "/foo"), true);
}
// XXX: while it is possible to use "." or ".." in the
// first argument this doesn't seem to work in the second.
TEST(isDirOrInDir, DISABLED_shouldWork) {
ASSERT_EQ(isDirOrInDir("/foo/..", "/foo/."), true);
}
/* ----------------------------------------------------------------------------
* pathExists
* --------------------------------------------------------------------------*/
TEST(pathExists, rootExists) {
ASSERT_TRUE(pathExists("/"));
}
TEST(pathExists, cwdExists) {
ASSERT_TRUE(pathExists("."));
}
TEST(pathExists, bogusPathDoesNotExist) {
ASSERT_FALSE(pathExists("/schnitzel/darmstadt/pommes"));
}
/* ----------------------------------------------------------------------------
* concatStringsSep
* --------------------------------------------------------------------------*/
TEST(concatStringsSep, buildCommaSeparatedString) {
Strings strings;
strings.push_back("this");
strings.push_back("is");
strings.push_back("great");
ASSERT_EQ(concatStringsSep(",", strings), "this,is,great");
}
TEST(concatStringsSep, buildStringWithEmptySeparator) {
Strings strings;
strings.push_back("this");
strings.push_back("is");
strings.push_back("great");
ASSERT_EQ(concatStringsSep("", strings), "thisisgreat");
}
TEST(concatStringsSep, buildSingleString) {
Strings strings;
strings.push_back("this");
ASSERT_EQ(concatStringsSep(",", strings), "this");
}
/* ----------------------------------------------------------------------------
* hasPrefix
* --------------------------------------------------------------------------*/
TEST(hasPrefix, emptyStringHasNoPrefix) {
ASSERT_FALSE(hasPrefix("", "foo"));
}
TEST(hasPrefix, emptyStringIsAlwaysPrefix) {
ASSERT_TRUE(hasPrefix("foo", ""));
ASSERT_TRUE(hasPrefix("jshjkfhsadf", ""));
}
TEST(hasPrefix, trivialCase) {
ASSERT_TRUE(hasPrefix("foobar", "foo"));
}
/* ----------------------------------------------------------------------------
* hasSuffix
* --------------------------------------------------------------------------*/
TEST(hasSuffix, emptyStringHasNoSuffix) {
ASSERT_FALSE(hasSuffix("", "foo"));
}
TEST(hasSuffix, trivialCase) {
ASSERT_TRUE(hasSuffix("foo", "foo"));
ASSERT_TRUE(hasSuffix("foobar", "bar"));
}
/* ----------------------------------------------------------------------------
* base64Encode
* --------------------------------------------------------------------------*/
TEST(base64Encode, emptyString) {
ASSERT_EQ(base64Encode(""), "");
}
TEST(base64Encode, encodesAString) {
ASSERT_EQ(base64Encode("quod erat demonstrandum"), "cXVvZCBlcmF0IGRlbW9uc3RyYW5kdW0=");
}
TEST(base64Encode, encodeAndDecode) {
auto s = "quod erat demonstrandum";
auto encoded = base64Encode(s);
auto decoded = base64Decode(encoded);
ASSERT_EQ(decoded, s);
}
TEST(base64Encode, encodeAndDecodeNonPrintable) {
char s[256];
std::iota(std::rbegin(s), std::rend(s), 0);
auto encoded = base64Encode(s);
auto decoded = base64Decode(encoded);
EXPECT_EQ(decoded.length(), 255);
ASSERT_EQ(decoded, s);
}
/* ----------------------------------------------------------------------------
* base64Decode
* --------------------------------------------------------------------------*/
TEST(base64Decode, emptyString) {
ASSERT_EQ(base64Decode(""), "");
}
TEST(base64Decode, decodeAString) {
ASSERT_EQ(base64Decode("cXVvZCBlcmF0IGRlbW9uc3RyYW5kdW0="), "quod erat demonstrandum");
}
TEST(base64Decode, decodeThrowsOnInvalidChar) {
ASSERT_THROW(base64Decode("cXVvZCBlcm_0IGRlbW9uc3RyYW5kdW0="), Error);
}
/* ----------------------------------------------------------------------------
* getLine
* --------------------------------------------------------------------------*/
TEST(getLine, all) {
{
auto [line, rest] = getLine("foo\nbar\nxyzzy");
ASSERT_EQ(line, "foo");
ASSERT_EQ(rest, "bar\nxyzzy");
}
{
auto [line, rest] = getLine("foo\r\nbar\r\nxyzzy");
ASSERT_EQ(line, "foo");
ASSERT_EQ(rest, "bar\r\nxyzzy");
}
{
auto [line, rest] = getLine("foo\n");
ASSERT_EQ(line, "foo");
ASSERT_EQ(rest, "");
}
{
auto [line, rest] = getLine("foo");
ASSERT_EQ(line, "foo");
ASSERT_EQ(rest, "");
}
{
auto [line, rest] = getLine("");
ASSERT_EQ(line, "");
ASSERT_EQ(rest, "");
}
}
/* ----------------------------------------------------------------------------
* toLower
* --------------------------------------------------------------------------*/
TEST(toLower, emptyString) {
ASSERT_EQ(toLower(""), "");
}
TEST(toLower, nonLetters) {
auto s = "!@(*$#)(@#=\\234_";
ASSERT_EQ(toLower(s), s);
}
// std::tolower() doesn't handle unicode characters. In the context of
// store paths this isn't relevant but doesn't hurt to record this behavior
// here.
TEST(toLower, umlauts) {
auto s = "ÄÖÜ";
ASSERT_EQ(toLower(s), "ÄÖÜ");
}
/* ----------------------------------------------------------------------------
* string2Float
* --------------------------------------------------------------------------*/
TEST(string2Float, emptyString) {
ASSERT_EQ(string2Float<double>(""), std::nullopt);
}
TEST(string2Float, trivialConversions) {
ASSERT_EQ(string2Float<double>("1.0"), 1.0);
ASSERT_EQ(string2Float<double>("0.0"), 0.0);
ASSERT_EQ(string2Float<double>("-100.25"), -100.25);
}
/* ----------------------------------------------------------------------------
* string2Int
* --------------------------------------------------------------------------*/
TEST(string2Int, emptyString) {
ASSERT_EQ(string2Int<int>(""), std::nullopt);
}
TEST(string2Int, trivialConversions) {
ASSERT_EQ(string2Int<int>("1"), 1);
ASSERT_EQ(string2Int<int>("0"), 0);
ASSERT_EQ(string2Int<int>("-100"), -100);
}
/* ----------------------------------------------------------------------------
* statusOk
* --------------------------------------------------------------------------*/
TEST(statusOk, zeroIsOk) {
ASSERT_EQ(statusOk(0), true);
ASSERT_EQ(statusOk(1), false);
}
/* ----------------------------------------------------------------------------
* rewriteStrings
* --------------------------------------------------------------------------*/
TEST(rewriteStrings, emptyString) {
StringMap rewrites;
rewrites["this"] = "that";
ASSERT_EQ(rewriteStrings("", rewrites), "");
}
TEST(rewriteStrings, emptyRewrites) {
StringMap rewrites;
ASSERT_EQ(rewriteStrings("this and that", rewrites), "this and that");
}
TEST(rewriteStrings, successfulRewrite) {
StringMap rewrites;
rewrites["this"] = "that";
ASSERT_EQ(rewriteStrings("this and that", rewrites), "that and that");
}
TEST(rewriteStrings, doesntOccur) {
StringMap rewrites;
rewrites["foo"] = "bar";
ASSERT_EQ(rewriteStrings("this and that", rewrites), "this and that");
}
/* ----------------------------------------------------------------------------
* replaceStrings
* --------------------------------------------------------------------------*/
TEST(replaceStrings, emptyString) {
ASSERT_EQ(replaceStrings("", "this", "that"), "");
ASSERT_EQ(replaceStrings("this and that", "", ""), "this and that");
}
TEST(replaceStrings, successfulReplace) {
ASSERT_EQ(replaceStrings("this and that", "this", "that"), "that and that");
}
TEST(replaceStrings, doesntOccur) {
ASSERT_EQ(replaceStrings("this and that", "foo", "bar"), "this and that");
}
/* ----------------------------------------------------------------------------
* trim
* --------------------------------------------------------------------------*/
TEST(trim, emptyString) {
ASSERT_EQ(trim(""), "");
}
TEST(trim, removesWhitespace) {
ASSERT_EQ(trim("foo"), "foo");
ASSERT_EQ(trim(" foo "), "foo");
ASSERT_EQ(trim(" foo bar baz"), "foo bar baz");
ASSERT_EQ(trim(" \t foo bar baz\n"), "foo bar baz");
}
/* ----------------------------------------------------------------------------
* chomp
* --------------------------------------------------------------------------*/
TEST(chomp, emptyString) {
ASSERT_EQ(chomp(""), "");
}
TEST(chomp, removesWhitespace) {
ASSERT_EQ(chomp("foo"), "foo");
ASSERT_EQ(chomp("foo "), "foo");
ASSERT_EQ(chomp(" foo "), " foo");
ASSERT_EQ(chomp(" foo bar baz "), " foo bar baz");
ASSERT_EQ(chomp("\t foo bar baz\n"), "\t foo bar baz");
}
/* ----------------------------------------------------------------------------
* quoteStrings
* --------------------------------------------------------------------------*/
TEST(quoteStrings, empty) {
Strings s = { };
Strings expected = { };
ASSERT_EQ(quoteStrings(s), expected);
}
TEST(quoteStrings, emptyStrings) {
Strings s = { "", "", "" };
Strings expected = { "''", "''", "''" };
ASSERT_EQ(quoteStrings(s), expected);
}
TEST(quoteStrings, trivialQuote) {
Strings s = { "foo", "bar", "baz" };
Strings expected = { "'foo'", "'bar'", "'baz'" };
ASSERT_EQ(quoteStrings(s), expected);
}
TEST(quoteStrings, quotedStrings) {
Strings s = { "'foo'", "'bar'", "'baz'" };
Strings expected = { "''foo''", "''bar''", "''baz''" };
ASSERT_EQ(quoteStrings(s), expected);
}
/* ----------------------------------------------------------------------------
* tokenizeString
* --------------------------------------------------------------------------*/
TEST(tokenizeString, empty) {
Strings expected = { };
ASSERT_EQ(tokenizeString<Strings>(""), expected);
}
TEST(tokenizeString, tokenizeSpacesWithDefaults) {
auto s = "foo bar baz";
Strings expected = { "foo", "bar", "baz" };
ASSERT_EQ(tokenizeString<Strings>(s), expected);
}
TEST(tokenizeString, tokenizeTabsWithDefaults) {
auto s = "foo\tbar\tbaz";
Strings expected = { "foo", "bar", "baz" };
ASSERT_EQ(tokenizeString<Strings>(s), expected);
}
TEST(tokenizeString, tokenizeTabsSpacesWithDefaults) {
auto s = "foo\t bar\t baz";
Strings expected = { "foo", "bar", "baz" };
ASSERT_EQ(tokenizeString<Strings>(s), expected);
}
TEST(tokenizeString, tokenizeTabsSpacesNewlineWithDefaults) {
auto s = "foo\t\n bar\t\n baz";
Strings expected = { "foo", "bar", "baz" };
ASSERT_EQ(tokenizeString<Strings>(s), expected);
}
TEST(tokenizeString, tokenizeTabsSpacesNewlineRetWithDefaults) {
auto s = "foo\t\n\r bar\t\n\r baz";
Strings expected = { "foo", "bar", "baz" };
ASSERT_EQ(tokenizeString<Strings>(s), expected);
auto s2 = "foo \t\n\r bar \t\n\r baz";
Strings expected2 = { "foo", "bar", "baz" };
ASSERT_EQ(tokenizeString<Strings>(s2), expected2);
}
TEST(tokenizeString, tokenizeWithCustomSep) {
auto s = "foo\n,bar\n,baz\n";
Strings expected = { "foo\n", "bar\n", "baz\n" };
ASSERT_EQ(tokenizeString<Strings>(s, ","), expected);
}
/* ----------------------------------------------------------------------------
* get
* --------------------------------------------------------------------------*/
TEST(get, emptyContainer) {
StringMap s = { };
auto expected = nullptr;
ASSERT_EQ(get(s, "one"), expected);
}
TEST(get, getFromContainer) {
StringMap s;
s["one"] = "yi";
s["two"] = "er";
auto expected = "yi";
ASSERT_EQ(*get(s, "one"), expected);
}
TEST(getOr, emptyContainer) {
StringMap s = { };
auto expected = "yi";
ASSERT_EQ(getOr(s, "one", "yi"), expected);
}
TEST(getOr, getFromContainer) {
StringMap s;
s["one"] = "yi";
s["two"] = "er";
auto expected = "yi";
ASSERT_EQ(getOr(s, "one", "nope"), expected);
}
/* ----------------------------------------------------------------------------
* filterANSIEscapes
* --------------------------------------------------------------------------*/
TEST(filterANSIEscapes, emptyString) {
auto s = "";
auto expected = "";
ASSERT_EQ(filterANSIEscapes(s), expected);
}
TEST(filterANSIEscapes, doesntChangePrintableChars) {
auto s = "09 2q304ruyhr slk2-19024 kjsadh sar f";
ASSERT_EQ(filterANSIEscapes(s), s);
}
TEST(filterANSIEscapes, filtersColorCodes) {
auto s = "\u001b[30m A \u001b[31m B \u001b[32m C \u001b[33m D \u001b[0m";
ASSERT_EQ(filterANSIEscapes(s, true, 2), " A" );
ASSERT_EQ(filterANSIEscapes(s, true, 3), " A " );
ASSERT_EQ(filterANSIEscapes(s, true, 4), " A " );
ASSERT_EQ(filterANSIEscapes(s, true, 5), " A B" );
ASSERT_EQ(filterANSIEscapes(s, true, 8), " A B C" );
}
TEST(filterANSIEscapes, expandsTabs) {
auto s = "foo\tbar\tbaz";
ASSERT_EQ(filterANSIEscapes(s, true), "foo bar baz" );
}
TEST(filterANSIEscapes, utf8) {
ASSERT_EQ(filterANSIEscapes("foobar", true, 5), "fooba");
ASSERT_EQ(filterANSIEscapes("fóóbär", true, 6), "fóóbär");
ASSERT_EQ(filterANSIEscapes("fóóbär", true, 5), "fóóbä");
ASSERT_EQ(filterANSIEscapes("fóóbär", true, 3), "fóó");
ASSERT_EQ(filterANSIEscapes("f€€bär", true, 4), "f€€b");
ASSERT_EQ(filterANSIEscapes("f𐍈𐍈bär", true, 4), "f𐍈𐍈b");
}
}

View file

@ -1,67 +0,0 @@
#include "url-name.hh"
#include <gtest/gtest.h>
namespace nix {
/* ----------- tests for url-name.hh --------------------------------------------------*/
TEST(getNameFromURL, getsNameFromURL) {
ASSERT_EQ(getNameFromURL(parseURL("path:/home/user/project")), "project");
ASSERT_EQ(getNameFromURL(parseURL("path:~/repos/nixpkgs#packages.x86_64-linux.hello")), "hello");
ASSERT_EQ(getNameFromURL(parseURL("path:.#nonStandardAttr.mylaptop")), "nonStandardAttr.mylaptop");
ASSERT_EQ(getNameFromURL(parseURL("path:./repos/myflake#nonStandardAttr.mylaptop")), "nonStandardAttr.mylaptop");
ASSERT_EQ(getNameFromURL(parseURL("path:./nixpkgs#packages.x86_64-linux.complex^bin,man")), "complex");
ASSERT_EQ(getNameFromURL(parseURL("path:./myproj#packages.x86_64-linux.default^*")), "myproj");
ASSERT_EQ(getNameFromURL(parseURL("github:NixOS/nixpkgs#packages.x86_64-linux.hello")), "hello");
ASSERT_EQ(getNameFromURL(parseURL("github:NixOS/nixpkgs#hello")), "hello");
ASSERT_EQ(getNameFromURL(parseURL("github:NixOS/nix#packages.x86_64-linux.default")), "nix");
ASSERT_EQ(getNameFromURL(parseURL("github:NixOS/nix#")), "nix");
ASSERT_EQ(getNameFromURL(parseURL("github:NixOS/nix")), "nix");
ASSERT_EQ(getNameFromURL(parseURL("github:cachix/devenv/main#packages.x86_64-linux.default")), "devenv");
ASSERT_EQ(getNameFromURL(parseURL("github:edolstra/nix-warez?rev=1234&dir=blender&ref=master")), "blender");
ASSERT_EQ(getNameFromURL(parseURL("gitlab:NixOS/nixpkgs#packages.x86_64-linux.hello")), "hello");
ASSERT_EQ(getNameFromURL(parseURL("gitlab:NixOS/nixpkgs#hello")), "hello");
ASSERT_EQ(getNameFromURL(parseURL("gitlab:NixOS/nix#packages.x86_64-linux.default")), "nix");
ASSERT_EQ(getNameFromURL(parseURL("gitlab:NixOS/nix#")), "nix");
ASSERT_EQ(getNameFromURL(parseURL("gitlab:NixOS/nix")), "nix");
ASSERT_EQ(getNameFromURL(parseURL("gitlab:cachix/devenv/main#packages.x86_64-linux.default")), "devenv");
ASSERT_EQ(getNameFromURL(parseURL("sourcehut:NixOS/nixpkgs#packages.x86_64-linux.hello")), "hello");
ASSERT_EQ(getNameFromURL(parseURL("sourcehut:NixOS/nixpkgs#hello")), "hello");
ASSERT_EQ(getNameFromURL(parseURL("sourcehut:NixOS/nix#packages.x86_64-linux.default")), "nix");
ASSERT_EQ(getNameFromURL(parseURL("sourcehut:NixOS/nix#")), "nix");
ASSERT_EQ(getNameFromURL(parseURL("sourcehut:NixOS/nix")), "nix");
ASSERT_EQ(getNameFromURL(parseURL("sourcehut:cachix/devenv/main#packages.x86_64-linux.default")), "devenv");
ASSERT_EQ(getNameFromURL(parseURL("git://github.com/edolstra/dwarffs")), "dwarffs");
ASSERT_EQ(getNameFromURL(parseURL("git://github.com/edolstra/nix-warez?dir=blender")), "blender");
ASSERT_EQ(getNameFromURL(parseURL("git+file:///home/user/project")), "project");
ASSERT_EQ(getNameFromURL(parseURL("git+file:///home/user/project?ref=fa1e2d23a22")), "project");
ASSERT_EQ(getNameFromURL(parseURL("git+ssh://git@github.com/someuser/my-repo#")), "my-repo");
ASSERT_EQ(getNameFromURL(parseURL("git+git://github.com/someuser/my-repo?rev=v1.2.3")), "my-repo");
ASSERT_EQ(getNameFromURL(parseURL("git+ssh:///home/user/project?dir=subproject&rev=v2.4")), "subproject");
ASSERT_EQ(getNameFromURL(parseURL("git+http://not-even-real#packages.x86_64-linux.hello")), "hello");
ASSERT_EQ(getNameFromURL(parseURL("git+https://not-even-real#packages.aarch64-darwin.hello")), "hello");
ASSERT_EQ(getNameFromURL(parseURL("tarball+http://github.com/NixOS/nix/archive/refs/tags/2.18.1#packages.x86_64-linux.jq")), "jq");
ASSERT_EQ(getNameFromURL(parseURL("tarball+https://github.com/NixOS/nix/archive/refs/tags/2.18.1#packages.x86_64-linux.hg")), "hg");
ASSERT_EQ(getNameFromURL(parseURL("tarball+file:///home/user/Downloads/nixpkgs-2.18.1#packages.aarch64-darwin.ripgrep")), "ripgrep");
ASSERT_EQ(getNameFromURL(parseURL("https://github.com/NixOS/nix/archive/refs/tags/2.18.1.tar.gz#packages.x86_64-linux.pv")), "pv");
ASSERT_EQ(getNameFromURL(parseURL("http://github.com/NixOS/nix/archive/refs/tags/2.18.1.tar.gz#packages.x86_64-linux.pv")), "pv");
ASSERT_EQ(getNameFromURL(parseURL("file:///home/user/project?ref=fa1e2d23a22")), "project");
ASSERT_EQ(getNameFromURL(parseURL("file+file:///home/user/project?ref=fa1e2d23a22")), "project");
ASSERT_EQ(getNameFromURL(parseURL("file+http://not-even-real#packages.x86_64-linux.hello")), "hello");
ASSERT_EQ(getNameFromURL(parseURL("file+http://gitfantasy.com/org/user/notaflake")), "notaflake");
ASSERT_EQ(getNameFromURL(parseURL("file+https://not-even-real#packages.aarch64-darwin.hello")), "hello");
ASSERT_EQ(getNameFromURL(parseURL("https://www.github.com/")), std::nullopt);
ASSERT_EQ(getNameFromURL(parseURL("path:.")), std::nullopt);
ASSERT_EQ(getNameFromURL(parseURL("file:.#")), std::nullopt);
ASSERT_EQ(getNameFromURL(parseURL("path:.#packages.x86_64-linux.default")), std::nullopt);
ASSERT_EQ(getNameFromURL(parseURL("path:.#packages.x86_64-linux.default^*")), std::nullopt);
}
}

View file

@ -1,347 +0,0 @@
#include "url.hh"
#include <gtest/gtest.h>
namespace nix {
/* ----------- tests for url.hh --------------------------------------------------*/
std::string print_map(std::map<std::string, std::string> m) {
std::map<std::string, std::string>::iterator it;
std::string s = "{ ";
for (it = m.begin(); it != m.end(); ++it) {
s += "{ ";
s += it->first;
s += " = ";
s += it->second;
s += " } ";
}
s += "}";
return s;
}
std::ostream& operator<<(std::ostream& os, const ParsedURL& p) {
return os << "\n"
<< "url: " << p.url << "\n"
<< "base: " << p.base << "\n"
<< "scheme: " << p.scheme << "\n"
<< "authority: " << p.authority.value() << "\n"
<< "path: " << p.path << "\n"
<< "query: " << print_map(p.query) << "\n"
<< "fragment: " << p.fragment << "\n";
}
TEST(parseURL, parsesSimpleHttpUrl) {
auto s = "http://www.example.org/file.tar.gz";
auto parsed = parseURL(s);
ParsedURL expected {
.url = "http://www.example.org/file.tar.gz",
.base = "http://www.example.org/file.tar.gz",
.scheme = "http",
.authority = "www.example.org",
.path = "/file.tar.gz",
.query = (StringMap) { },
.fragment = "",
};
ASSERT_EQ(parsed, expected);
}
TEST(parseURL, parsesSimpleHttpsUrl) {
auto s = "https://www.example.org/file.tar.gz";
auto parsed = parseURL(s);
ParsedURL expected {
.url = "https://www.example.org/file.tar.gz",
.base = "https://www.example.org/file.tar.gz",
.scheme = "https",
.authority = "www.example.org",
.path = "/file.tar.gz",
.query = (StringMap) { },
.fragment = "",
};
ASSERT_EQ(parsed, expected);
}
TEST(parseURL, parsesSimpleHttpUrlWithQueryAndFragment) {
auto s = "https://www.example.org/file.tar.gz?download=fast&when=now#hello";
auto parsed = parseURL(s);
ParsedURL expected {
.url = "https://www.example.org/file.tar.gz",
.base = "https://www.example.org/file.tar.gz",
.scheme = "https",
.authority = "www.example.org",
.path = "/file.tar.gz",
.query = (StringMap) { { "download", "fast" }, { "when", "now" } },
.fragment = "hello",
};
ASSERT_EQ(parsed, expected);
}
TEST(parseURL, parsesSimpleHttpUrlWithComplexFragment) {
auto s = "http://www.example.org/file.tar.gz?field=value#?foo=bar%23";
auto parsed = parseURL(s);
ParsedURL expected {
.url = "http://www.example.org/file.tar.gz",
.base = "http://www.example.org/file.tar.gz",
.scheme = "http",
.authority = "www.example.org",
.path = "/file.tar.gz",
.query = (StringMap) { { "field", "value" } },
.fragment = "?foo=bar#",
};
ASSERT_EQ(parsed, expected);
}
TEST(parseURL, parsesFilePlusHttpsUrl) {
auto s = "file+https://www.example.org/video.mp4";
auto parsed = parseURL(s);
ParsedURL expected {
.url = "file+https://www.example.org/video.mp4",
.base = "https://www.example.org/video.mp4",
.scheme = "file+https",
.authority = "www.example.org",
.path = "/video.mp4",
.query = (StringMap) { },
.fragment = "",
};
ASSERT_EQ(parsed, expected);
}
TEST(parseURL, rejectsAuthorityInUrlsWithFileTransportation) {
auto s = "file://www.example.org/video.mp4";
ASSERT_THROW(parseURL(s), Error);
}
TEST(parseURL, parseIPv4Address) {
auto s = "http://127.0.0.1:8080/file.tar.gz?download=fast&when=now#hello";
auto parsed = parseURL(s);
ParsedURL expected {
.url = "http://127.0.0.1:8080/file.tar.gz",
.base = "https://127.0.0.1:8080/file.tar.gz",
.scheme = "http",
.authority = "127.0.0.1:8080",
.path = "/file.tar.gz",
.query = (StringMap) { { "download", "fast" }, { "when", "now" } },
.fragment = "hello",
};
ASSERT_EQ(parsed, expected);
}
TEST(parseURL, parseScopedRFC4007IPv6Address) {
auto s = "http://[fe80::818c:da4d:8975:415c\%enp0s25]:8080";
auto parsed = parseURL(s);
ParsedURL expected {
.url = "http://[fe80::818c:da4d:8975:415c\%enp0s25]:8080",
.base = "http://[fe80::818c:da4d:8975:415c\%enp0s25]:8080",
.scheme = "http",
.authority = "[fe80::818c:da4d:8975:415c\%enp0s25]:8080",
.path = "",
.query = (StringMap) { },
.fragment = "",
};
ASSERT_EQ(parsed, expected);
}
TEST(parseURL, parseIPv6Address) {
auto s = "http://[2a02:8071:8192:c100:311d:192d:81ac:11ea]:8080";
auto parsed = parseURL(s);
ParsedURL expected {
.url = "http://[2a02:8071:8192:c100:311d:192d:81ac:11ea]:8080",
.base = "http://[2a02:8071:8192:c100:311d:192d:81ac:11ea]:8080",
.scheme = "http",
.authority = "[2a02:8071:8192:c100:311d:192d:81ac:11ea]:8080",
.path = "",
.query = (StringMap) { },
.fragment = "",
};
ASSERT_EQ(parsed, expected);
}
TEST(parseURL, parseEmptyQueryParams) {
auto s = "http://127.0.0.1:8080/file.tar.gz?&&&&&";
auto parsed = parseURL(s);
ASSERT_EQ(parsed.query, (StringMap) { });
}
TEST(parseURL, parseUserPassword) {
auto s = "http://user:pass@www.example.org:8080/file.tar.gz";
auto parsed = parseURL(s);
ParsedURL expected {
.url = "http://user:pass@www.example.org/file.tar.gz",
.base = "http://user:pass@www.example.org/file.tar.gz",
.scheme = "http",
.authority = "user:pass@www.example.org:8080",
.path = "/file.tar.gz",
.query = (StringMap) { },
.fragment = "",
};
ASSERT_EQ(parsed, expected);
}
TEST(parseURL, parseFileURLWithQueryAndFragment) {
auto s = "file:///none/of//your/business";
auto parsed = parseURL(s);
ParsedURL expected {
.url = "",
.base = "",
.scheme = "file",
.authority = "",
.path = "/none/of//your/business",
.query = (StringMap) { },
.fragment = "",
};
ASSERT_EQ(parsed, expected);
}
TEST(parseURL, parsedUrlsIsEqualToItself) {
auto s = "http://www.example.org/file.tar.gz";
auto url = parseURL(s);
ASSERT_TRUE(url == url);
}
TEST(parseURL, parseFTPUrl) {
auto s = "ftp://ftp.nixos.org/downloads/nixos.iso";
auto parsed = parseURL(s);
ParsedURL expected {
.url = "ftp://ftp.nixos.org/downloads/nixos.iso",
.base = "ftp://ftp.nixos.org/downloads/nixos.iso",
.scheme = "ftp",
.authority = "ftp.nixos.org",
.path = "/downloads/nixos.iso",
.query = (StringMap) { },
.fragment = "",
};
ASSERT_EQ(parsed, expected);
}
TEST(parseURL, parsesAnythingInUriFormat) {
auto s = "whatever://github.com/NixOS/nixpkgs.git";
auto parsed = parseURL(s);
}
TEST(parseURL, parsesAnythingInUriFormatWithoutDoubleSlash) {
auto s = "whatever:github.com/NixOS/nixpkgs.git";
auto parsed = parseURL(s);
}
TEST(parseURL, emptyStringIsInvalidURL) {
ASSERT_THROW(parseURL(""), Error);
}
/* ----------------------------------------------------------------------------
* decodeQuery
* --------------------------------------------------------------------------*/
TEST(decodeQuery, emptyStringYieldsEmptyMap) {
auto d = decodeQuery("");
ASSERT_EQ(d, (StringMap) { });
}
TEST(decodeQuery, simpleDecode) {
auto d = decodeQuery("yi=one&er=two");
ASSERT_EQ(d, ((StringMap) { { "yi", "one" }, { "er", "two" } }));
}
TEST(decodeQuery, decodeUrlEncodedArgs) {
auto d = decodeQuery("arg=%3D%3D%40%3D%3D");
ASSERT_EQ(d, ((StringMap) { { "arg", "==@==" } }));
}
TEST(decodeQuery, decodeArgWithEmptyValue) {
auto d = decodeQuery("arg=");
ASSERT_EQ(d, ((StringMap) { { "arg", ""} }));
}
/* ----------------------------------------------------------------------------
* percentDecode
* --------------------------------------------------------------------------*/
TEST(percentDecode, decodesUrlEncodedString) {
std::string s = "==@==";
std::string d = percentDecode("%3D%3D%40%3D%3D");
ASSERT_EQ(d, s);
}
TEST(percentDecode, multipleDecodesAreIdempotent) {
std::string once = percentDecode("%3D%3D%40%3D%3D");
std::string twice = percentDecode(once);
ASSERT_EQ(once, twice);
}
TEST(percentDecode, trailingPercent) {
std::string s = "==@==%";
std::string d = percentDecode("%3D%3D%40%3D%3D%25");
ASSERT_EQ(d, s);
}
/* ----------------------------------------------------------------------------
* percentEncode
* --------------------------------------------------------------------------*/
TEST(percentEncode, encodesUrlEncodedString) {
std::string s = percentEncode("==@==");
std::string d = "%3D%3D%40%3D%3D";
ASSERT_EQ(d, s);
}
TEST(percentEncode, keepArgument) {
std::string a = percentEncode("abd / def");
std::string b = percentEncode("abd / def", "/");
ASSERT_EQ(a, "abd%20%2F%20def");
ASSERT_EQ(b, "abd%20/%20def");
}
TEST(percentEncode, inverseOfDecode) {
std::string original = "%3D%3D%40%3D%3D";
std::string once = percentEncode(original);
std::string back = percentDecode(once);
ASSERT_EQ(back, original);
}
TEST(percentEncode, trailingPercent) {
std::string s = percentEncode("==@==%");
std::string d = "%3D%3D%40%3D%3D%25";
ASSERT_EQ(d, s);
}
TEST(percentEncode, yen) {
// https://en.wikipedia.org/wiki/Percent-encoding#Character_data
std::string s = reinterpret_cast<const char*>(u8"");
std::string e = "%E5%86%86";
ASSERT_EQ(percentEncode(s), e);
ASSERT_EQ(percentDecode(e), s);
}
}

View file

@ -1,105 +0,0 @@
#include "xml-writer.hh"
#include <gtest/gtest.h>
#include <sstream>
namespace nix {
/* ----------------------------------------------------------------------------
* XMLWriter
* --------------------------------------------------------------------------*/
TEST(XMLWriter, emptyObject) {
std::stringstream out;
{
XMLWriter t(false, out);
}
ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n");
}
TEST(XMLWriter, objectWithEmptyElement) {
std::stringstream out;
{
XMLWriter t(false, out);
t.openElement("foobar");
}
ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar></foobar>");
}
TEST(XMLWriter, objectWithElementWithAttrs) {
std::stringstream out;
{
XMLWriter t(false, out);
XMLAttrs attrs = {
{ "foo", "bar" }
};
t.openElement("foobar", attrs);
}
ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar foo=\"bar\"></foobar>");
}
TEST(XMLWriter, objectWithElementWithEmptyAttrs) {
std::stringstream out;
{
XMLWriter t(false, out);
XMLAttrs attrs = {};
t.openElement("foobar", attrs);
}
ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar></foobar>");
}
TEST(XMLWriter, objectWithElementWithAttrsEscaping) {
std::stringstream out;
{
XMLWriter t(false, out);
XMLAttrs attrs = {
{ "<key>", "<value>" }
};
t.openElement("foobar", attrs);
}
// XXX: While "<value>" is escaped, "<key>" isn't which I think is a bug.
ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar <key>=\"&lt;value&gt;\"></foobar>");
}
TEST(XMLWriter, objectWithElementWithAttrsIndented) {
std::stringstream out;
{
XMLWriter t(true, out);
XMLAttrs attrs = {
{ "foo", "bar" }
};
t.openElement("foobar", attrs);
}
ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar foo=\"bar\">\n</foobar>\n");
}
TEST(XMLWriter, writeEmptyElement) {
std::stringstream out;
{
XMLWriter t(false, out);
t.writeEmptyElement("foobar");
}
ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar />");
}
TEST(XMLWriter, writeEmptyElementWithAttributes) {
std::stringstream out;
{
XMLWriter t(false, out);
XMLAttrs attrs = {
{ "foo", "bar" }
};
t.writeEmptyElement("foobar", attrs);
}
ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar foo=\"bar\" />");
}
}

View file

@ -8,7 +8,7 @@ namespace nix {
// URI stuff.
const static std::string pctEncoded = "(?:%[0-9a-fA-F][0-9a-fA-F])";
const static std::string schemeRegex = "(?:[a-z][a-z0-9+.-]*)";
const static std::string schemeNameRegex = "(?:[a-z][a-z0-9+.-]*)";
const static std::string ipv6AddressSegmentRegex = "[0-9a-fA-F:]+(?:%\\w+)?";
const static std::string ipv6AddressRegex = "(?:\\[" + ipv6AddressSegmentRegex + "\\]|" + ipv6AddressSegmentRegex + ")";
const static std::string unreservedRegex = "(?:[a-zA-Z0-9-._~])";

View file

@ -2,6 +2,7 @@
#include "url-parts.hh"
#include "util.hh"
#include "split.hh"
#include "canon-path.hh"
namespace nix {
@ -12,7 +13,7 @@ std::regex revRegex(revRegexS, std::regex::ECMAScript);
ParsedURL parseURL(const std::string & url)
{
static std::regex uriRegex(
"((" + schemeRegex + "):"
"((" + schemeNameRegex + "):"
+ "(?:(?://(" + authorityRegex + ")(" + absPathRegex + "))|(/?" + pathRegex + ")))"
+ "(?:\\?(" + queryRegex + "))?"
+ "(?:#(" + fragmentRegex + "))?",
@ -141,6 +142,13 @@ bool ParsedURL::operator ==(const ParsedURL & other) const
&& fragment == other.fragment;
}
ParsedURL ParsedURL::canonicalise()
{
ParsedURL res(*this);
res.path = CanonPath(res.path).abs();
return res;
}
/**
* Parse a URL scheme of the form '(applicationScheme\+)?transportScheme'
* into a tuple '(applicationScheme, transportScheme)'
@ -175,4 +183,12 @@ std::string fixGitURL(const std::string & url)
}
}
// https://www.rfc-editor.org/rfc/rfc3986#section-3.1
bool isValidSchemeName(std::string_view s)
{
static std::regex regex(schemeNameRegex, std::regex::ECMAScript);
return std::regex_match(s.begin(), s.end(), regex, std::regex_constants::match_default);
}
}

View file

@ -19,6 +19,11 @@ struct ParsedURL
std::string to_string() const;
bool operator ==(const ParsedURL & other) const;
/**
* Remove `.` and `..` path elements.
*/
ParsedURL canonicalise();
};
MakeError(BadURL, Error);
@ -50,4 +55,13 @@ ParsedUrlScheme parseUrlScheme(std::string_view scheme);
changes absolute paths into file:// URLs. */
std::string fixGitURL(const std::string & url);
/**
* Whether a string is valid as RFC 3986 scheme name.
* Colon `:` is part of the URI; not the scheme name, and therefore rejected.
* See https://www.rfc-editor.org/rfc/rfc3986#section-3.1
*
* Does not check whether the scheme is understood, as that's context-dependent.
*/
bool isValidSchemeName(std::string_view scheme);
}

View file

@ -5,6 +5,8 @@
#include <cctype>
#include <iostream>
#include <grp.h>
#include <regex>
namespace nix {

View file

@ -189,10 +189,8 @@ std::string toLower(const std::string & s);
std::string shellEscape(const std::string_view s);
/**
* Exception handling in destructors: print an error message, then
* ignore the exception.
*/
/* Exception handling in destructors: print an error message, then
ignore the exception. */
void ignoreException(Verbosity lvl = lvlError);