mirror of
https://github.com/NixOS/nix
synced 2025-06-29 14:53:16 +02:00
Merge branch 'lazy-flake-commands' into lazy-trees-v2
This commit is contained in:
commit
c891554999
10 changed files with 93 additions and 31 deletions
|
@ -2461,7 +2461,7 @@ StorePath EvalState::coerceToStorePath(const PosIdx pos, Value & v, NixStringCon
|
|||
auto path = coerceToString(pos, v, context, errorCtx, false, false, true).toOwned();
|
||||
if (auto storePath = store->maybeParseStorePath(path))
|
||||
return *storePath;
|
||||
error<EvalError>("path '%1%' is not in the Nix store", path).withTrace(pos, errorCtx).debugThrow();
|
||||
error<EvalError>("cannot coerce '%s' to a store path because it does not denote a subpath of the Nix store", path).withTrace(pos, errorCtx).debugThrow();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -332,6 +332,13 @@ struct GitArchiveInputScheme : InputScheme
|
|||
false,
|
||||
"«" + input.to_string() + "»");
|
||||
|
||||
if (!input.settings->trustTarballsFromGitForges)
|
||||
// FIXME: computing the NAR hash here is wasteful if
|
||||
// copyInputToStore() is just going to hash/copy it as
|
||||
// well.
|
||||
input.attrs.insert_or_assign("narHash",
|
||||
accessor->hashPath(CanonPath::root).to_string(HashFormat::SRI, true));
|
||||
|
||||
return {accessor, input};
|
||||
}
|
||||
|
||||
|
|
|
@ -104,6 +104,19 @@ static StorePath copyInputToStore(
|
|||
return storePath;
|
||||
}
|
||||
|
||||
static SourcePath maybeCopyInputToStore(
|
||||
EvalState & state,
|
||||
fetchers::Input & input,
|
||||
const fetchers::Input & originalInput,
|
||||
ref<SourceAccessor> accessor,
|
||||
CopyMode copyMode)
|
||||
{
|
||||
return copyMode == CopyMode::Lazy || (copyMode == CopyMode::RequireLockable && (input.isLocked() || input.getNarHash()))
|
||||
? SourcePath(accessor)
|
||||
: state.storePath(
|
||||
copyInputToStore(state, input, originalInput, accessor));
|
||||
}
|
||||
|
||||
static void forceTrivialValue(EvalState & state, Value & value, const PosIdx pos)
|
||||
{
|
||||
if (value.isThunk() && value.isTrivial())
|
||||
|
@ -401,7 +414,8 @@ static Flake getFlake(
|
|||
const FlakeRef & originalRef,
|
||||
bool useRegistries,
|
||||
FlakeCache & flakeCache,
|
||||
const InputAttrPath & lockRootAttrPath)
|
||||
const InputAttrPath & lockRootAttrPath,
|
||||
CopyMode copyMode)
|
||||
{
|
||||
// Fetch a lazy tree first.
|
||||
auto [accessor, resolvedRef, lockedRef] = fetchOrSubstituteTree(
|
||||
|
@ -423,17 +437,17 @@ static Flake getFlake(
|
|||
lockedRef = lockedRef2;
|
||||
}
|
||||
|
||||
// Copy the tree to the store.
|
||||
auto storePath = copyInputToStore(state, lockedRef.input, originalRef.input, accessor);
|
||||
|
||||
// Re-parse flake.nix from the store.
|
||||
return readFlake(state, originalRef, resolvedRef, lockedRef, state.storePath(storePath), lockRootAttrPath);
|
||||
return readFlake(
|
||||
state, originalRef, resolvedRef, lockedRef,
|
||||
maybeCopyInputToStore(state, lockedRef.input, originalRef.input, accessor, copyMode),
|
||||
lockRootAttrPath);
|
||||
}
|
||||
|
||||
Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool useRegistries)
|
||||
Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool useRegistries, CopyMode copyMode)
|
||||
{
|
||||
FlakeCache flakeCache;
|
||||
return getFlake(state, originalRef, useRegistries, flakeCache, {});
|
||||
return getFlake(state, originalRef, useRegistries, flakeCache, {}, copyMode);
|
||||
}
|
||||
|
||||
static LockFile readLockFile(
|
||||
|
@ -457,7 +471,7 @@ LockedFlake lockFlake(
|
|||
|
||||
auto useRegistries = lockFlags.useRegistries.value_or(settings.useRegistries);
|
||||
|
||||
auto flake = getFlake(state, topRef, useRegistries, flakeCache, {});
|
||||
auto flake = getFlake(state, topRef, useRegistries, flakeCache, {}, lockFlags.copyMode);
|
||||
|
||||
if (lockFlags.applyNixConfig) {
|
||||
flake.config.apply(settings);
|
||||
|
@ -502,6 +516,13 @@ LockedFlake lockFlake(
|
|||
explicitCliOverrides.insert(i.first);
|
||||
}
|
||||
|
||||
/* For locking of inputs, we require at least a NAR
|
||||
hash. I.e. we can't be fully lazy. */
|
||||
auto inputCopyMode =
|
||||
lockFlags.copyMode == CopyMode::Lazy
|
||||
? CopyMode::RequireLockable
|
||||
: lockFlags.copyMode;
|
||||
|
||||
LockFile newLockFile;
|
||||
|
||||
std::vector<FlakeRef> parents;
|
||||
|
@ -632,7 +653,7 @@ LockedFlake lockFlake(
|
|||
if (auto resolvedPath = resolveRelativePath()) {
|
||||
return readFlake(state, ref, ref, ref, *resolvedPath, inputAttrPath);
|
||||
} else {
|
||||
return getFlake(state, ref, useRegistries, flakeCache, inputAttrPath);
|
||||
return getFlake(state, ref, useRegistries, flakeCache, inputAttrPath, inputCopyMode);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -783,10 +804,10 @@ LockedFlake lockFlake(
|
|||
auto [accessor, resolvedRef, lockedRef] = fetchOrSubstituteTree(
|
||||
state, *input.ref, useRegistries, flakeCache);
|
||||
|
||||
// FIXME: allow input to be lazy.
|
||||
auto storePath = copyInputToStore(state, lockedRef.input, input.ref->input, accessor);
|
||||
|
||||
return {state.storePath(storePath), lockedRef};
|
||||
return {
|
||||
maybeCopyInputToStore(state, lockedRef.input, input.ref->input, accessor, inputCopyMode),
|
||||
lockedRef
|
||||
};
|
||||
}
|
||||
}();
|
||||
|
||||
|
@ -896,7 +917,7 @@ LockedFlake lockFlake(
|
|||
repo, so we should re-read it. FIXME: we could
|
||||
also just clear the 'rev' field... */
|
||||
auto prevLockedRef = flake.lockedRef;
|
||||
flake = getFlake(state, topRef, useRegistries);
|
||||
flake = getFlake(state, topRef, useRegistries, lockFlags.copyMode);
|
||||
|
||||
if (lockFlags.commitLockFile &&
|
||||
flake.lockedRef.input.getRev() &&
|
||||
|
|
|
@ -115,7 +115,20 @@ struct Flake
|
|||
}
|
||||
};
|
||||
|
||||
Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool useRegistries);
|
||||
enum struct CopyMode {
|
||||
//! Copy the input to the store.
|
||||
RequireStorePath,
|
||||
//! Ensure that the input is locked or has a NAR hash.
|
||||
RequireLockable,
|
||||
//! Just return a lazy source accessor.
|
||||
Lazy,
|
||||
};
|
||||
|
||||
Flake getFlake(
|
||||
EvalState & state,
|
||||
const FlakeRef & flakeRef,
|
||||
bool useRegistries,
|
||||
CopyMode copyMode = CopyMode::RequireStorePath);
|
||||
|
||||
/**
|
||||
* Fingerprint of a locked flake; used as a cache key.
|
||||
|
@ -213,6 +226,11 @@ struct LockFlags
|
|||
* for those inputs will be ignored.
|
||||
*/
|
||||
std::set<InputAttrPath> inputUpdates;
|
||||
|
||||
/**
|
||||
* If set, do not copy the flake to the Nix store.
|
||||
*/
|
||||
CopyMode copyMode = CopyMode::RequireStorePath;
|
||||
};
|
||||
|
||||
LockedFlake lockFlake(
|
||||
|
|
|
@ -214,8 +214,12 @@ StorePath Store::addToStore(
|
|||
auto sink = sourceToSink([&](Source & source) {
|
||||
LengthSource lengthSource(source);
|
||||
storePath = addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair);
|
||||
if (settings.warnLargePathThreshold && lengthSource.total >= settings.warnLargePathThreshold)
|
||||
warn("copied large path '%s' to the store (%s)", path, renderSize(lengthSource.total));
|
||||
if (settings.warnLargePathThreshold && lengthSource.total >= settings.warnLargePathThreshold) {
|
||||
static bool failOnLargePath = getEnv("_NIX_TEST_FAIL_ON_LARGE_PATH").value_or("") == "1";
|
||||
if (failOnLargePath)
|
||||
throw Error("won't copy large path '%s' to the store (%d)", path, renderSize(lengthSource.total));
|
||||
warn("copied large path '%s' to the store (%d)", path, renderSize(lengthSource.total));
|
||||
}
|
||||
});
|
||||
dumpPath(path, *sink, fsm, filter);
|
||||
sink->finish();
|
||||
|
|
|
@ -134,6 +134,7 @@ public:
|
|||
lockFlags.recreateLockFile = updateAll;
|
||||
lockFlags.writeLockFile = true;
|
||||
lockFlags.applyNixConfig = true;
|
||||
lockFlags.copyMode = CopyMode::Lazy;
|
||||
|
||||
lockFlake();
|
||||
}
|
||||
|
@ -166,6 +167,7 @@ struct CmdFlakeLock : FlakeCommand
|
|||
lockFlags.writeLockFile = true;
|
||||
lockFlags.failOnUnlocked = true;
|
||||
lockFlags.applyNixConfig = true;
|
||||
lockFlags.copyMode = CopyMode::Lazy;
|
||||
|
||||
lockFlake();
|
||||
}
|
||||
|
@ -212,11 +214,13 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
|
|||
|
||||
void run(nix::ref<nix::Store> store) override
|
||||
{
|
||||
lockFlags.copyMode = CopyMode::Lazy;
|
||||
auto lockedFlake = lockFlake();
|
||||
auto & flake = lockedFlake.flake;
|
||||
|
||||
// Currently, all flakes are in the Nix store via the rootFS accessor.
|
||||
auto storePath = store->printStorePath(store->toStorePath(flake.path.path.abs()).first);
|
||||
std::optional<StorePath> storePath;
|
||||
if (flake.lockedRef.input.getNarHash())
|
||||
storePath = flake.lockedRef.input.computeStorePath(*store);
|
||||
|
||||
if (json) {
|
||||
nlohmann::json j;
|
||||
|
@ -238,7 +242,8 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
|
|||
j["revCount"] = *revCount;
|
||||
if (auto lastModified = flake.lockedRef.input.getLastModified())
|
||||
j["lastModified"] = *lastModified;
|
||||
j["path"] = storePath;
|
||||
if (storePath)
|
||||
j["path"] = store->printStorePath(*storePath);
|
||||
j["locks"] = lockedFlake.lockFile.toJSON().first;
|
||||
if (auto fingerprint = lockedFlake.getFingerprint(store, fetchSettings))
|
||||
j["fingerprint"] = fingerprint->to_string(HashFormat::Base16, false);
|
||||
|
@ -255,9 +260,10 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
|
|||
logger->cout(
|
||||
ANSI_BOLD "Description:" ANSI_NORMAL " %s",
|
||||
*flake.description);
|
||||
if (storePath)
|
||||
logger->cout(
|
||||
ANSI_BOLD "Path:" ANSI_NORMAL " %s",
|
||||
storePath);
|
||||
store->printStorePath(*storePath));
|
||||
if (auto rev = flake.lockedRef.input.getRev())
|
||||
logger->cout(
|
||||
ANSI_BOLD "Revision:" ANSI_NORMAL " %s",
|
||||
|
|
|
@ -69,14 +69,13 @@ nix flake metadata "$flake1Dir" | grepQuiet 'URL:.*flake1.*'
|
|||
# Test 'nix flake metadata --json'.
|
||||
json=$(nix flake metadata flake1 --json | jq .)
|
||||
[[ $(echo "$json" | jq -r .description) = 'Bla bla' ]]
|
||||
[[ -d $(echo "$json" | jq -r .path) ]]
|
||||
[[ $(echo "$json" | jq -r .lastModified) = $(git -C "$flake1Dir" log -n1 --format=%ct) ]]
|
||||
hash1=$(echo "$json" | jq -r .revision)
|
||||
[[ -n $(echo "$json" | jq -r .fingerprint) ]]
|
||||
|
||||
echo foo > "$flake1Dir/foo"
|
||||
git -C "$flake1Dir" add $flake1Dir/foo
|
||||
[[ $(nix flake metadata flake1 --json --refresh | jq -r .dirtyRevision) == "$hash1-dirty" ]]
|
||||
[[ $(_NIX_TEST_FAIL_ON_LARGE_PATH=1 nix flake metadata flake1 --json --refresh --warn-large-path-threshold 1 | jq -r .dirtyRevision) == "$hash1-dirty" ]]
|
||||
[[ "$(nix flake metadata flake1 --json | jq -r .fingerprint)" != null ]]
|
||||
|
||||
echo -n '# foo' >> "$flake1Dir/flake.nix"
|
||||
|
|
|
@ -118,20 +118,23 @@ nix flake lock $flakeFollowsA
|
|||
jq -r -c '.nodes | keys | .[]' $flakeFollowsA/flake.lock | grep "^foobar$"
|
||||
|
||||
# Check that path: inputs cannot escape from their root.
|
||||
# FIXME: this test is wonky because with lazy trees, ../flakeB at the root is equivalent to /flakeB and not an error.
|
||||
cat > $flakeFollowsA/flake.nix <<EOF
|
||||
{
|
||||
description = "Flake A";
|
||||
inputs = {
|
||||
B.url = "path:../flakeB";
|
||||
};
|
||||
outputs = { ... }: {};
|
||||
outputs = { ... }: {
|
||||
x = 123;
|
||||
};
|
||||
}
|
||||
EOF
|
||||
|
||||
git -C $flakeFollowsA add flake.nix
|
||||
|
||||
expect 1 nix flake lock $flakeFollowsA 2>&1 | grep '/flakeB.*is forbidden in pure evaluation mode'
|
||||
expect 1 nix flake lock --impure $flakeFollowsA 2>&1 | grep '/flakeB.*does not exist'
|
||||
expect 1 nix eval $flakeFollowsA#x 2>&1 | grep '/flakeB.*is forbidden in pure evaluation mode'
|
||||
expect 1 nix eval --impure $flakeFollowsA#x 2>&1 | grep '/flakeB.*does not exist'
|
||||
|
||||
# Test relative non-flake inputs.
|
||||
cat > $flakeFollowsA/flake.nix <<EOF
|
||||
|
|
|
@ -35,7 +35,7 @@ echo 456 > "$flake1Dir"/x.nix
|
|||
expectStderr 1 nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" |
|
||||
grepQuiet "Will not write lock file.*because it has an unlocked input"
|
||||
|
||||
nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" --allow-dirty-locks
|
||||
_NIX_TEST_FAIL_ON_LARGE_PATH=1 nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" --allow-dirty-locks --warn-large-path-threshold 1
|
||||
|
||||
# Using a lock file with a dirty lock does not require --allow-dirty-locks, but should print a warning.
|
||||
expectStderr 0 nix eval "$flake2Dir#x" |
|
||||
|
|
|
@ -204,7 +204,7 @@ in
|
|||
cat_log()
|
||||
|
||||
# ... otherwise it should use the API
|
||||
out = client.succeed("nix flake metadata private-flake --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0")
|
||||
out = client.succeed("nix flake metadata private-flake --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0 --no-trust-tarballs-from-git-forges")
|
||||
print(out)
|
||||
info = json.loads(out)
|
||||
assert info["revision"] == "${private-flake-rev}", f"revision mismatch: {info['revision']} != ${private-flake-rev}"
|
||||
|
@ -223,6 +223,10 @@ in
|
|||
hash = client.succeed(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr '(fetchTree {info['url']}).narHash'")
|
||||
assert hash == info['locked']['narHash']
|
||||
|
||||
# Fetching with an incorrect NAR hash should fail.
|
||||
out = client.fail(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr '(fetchTree \"github:fancy-enterprise/private-flake/{info['revision']}?narHash=sha256-HsrRFZYg69qaVe/wDyWBYLeS6ca7ACEJg2Z%2BGpEFw4A%3D\").narHash' 2>&1")
|
||||
assert "NAR hash mismatch in input" in out, "NAR hash check did not fail with the expected error"
|
||||
|
||||
# Fetching without a narHash should succeed if trust-github is set and fail otherwise.
|
||||
client.succeed(f"nix eval --raw --expr 'builtins.fetchTree github:github:fancy-enterprise/private-flake/{info['revision']}'")
|
||||
out = client.fail(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr 'builtins.fetchTree github:github:fancy-enterprise/private-flake/{info['revision']}' 2>&1")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue