1
0
Fork 0
mirror of https://github.com/NixOS/nix synced 2025-07-07 10:11:47 +02:00

Remove failed build caching

This feature was implemented for Hydra, but Hydra no longer uses it.
This commit is contained in:
Eelco Dolstra 2016-04-08 18:16:53 +02:00
parent f398949b40
commit 8cffec8485
17 changed files with 12 additions and 335 deletions

View file

@ -156,12 +156,6 @@ public:
void collectGarbage(const GCOptions & options, GCResults & results) override
{ notImpl(); }
PathSet queryFailedPaths() override
{ return {}; }
void clearFailedPaths(const PathSet & paths) override
{ }
void optimiseStore() override
{ }

View file

@ -1047,11 +1047,6 @@ void DerivationGoal::haveDerivation()
return;
}
/* Check whether any output previously failed to build. If so,
don't bother. */
for (auto & i : invalidOutputs)
if (pathFailed(i)) return;
/* Reject doing a hash build of anything other than a fixed-output
derivation. */
if (buildMode == bmHash) {
@ -1322,12 +1317,6 @@ void DerivationGoal::tryToBuild()
deletePath(path);
}
/* Check again whether any output previously failed to build,
because some other process may have tried and failed before we
acquired the lock. */
for (auto & i : drv->outputs)
if (pathFailed(i.second.path)) return;
/* Don't do a remote build if the derivation has the attribute
`preferLocalBuild' set. Also, check and repair modes are only
supported for local builds. */
@ -1549,17 +1538,6 @@ void DerivationGoal::buildDone()
statusOk(status) ? BuildResult::OutputRejected :
fixedOutput || diskFull ? BuildResult::TransientFailure :
BuildResult::PermanentFailure;
/* Register the outputs of this build as "failed" so we
won't try to build them again (negative caching).
However, don't do this for fixed-output derivations,
since they're likely to fail for transient reasons
(e.g., fetchurl not being able to access the network).
Hook errors (like communication problems with the
remote machine) shouldn't be cached either. */
if (settings.cacheFailure && !fixedOutput && !diskFull)
for (auto & i : drv->outputs)
worker.store.registerFailedPath(i.second.path);
}
done(st, e.msg());
@ -2993,23 +2971,6 @@ PathSet DerivationGoal::checkPathValidity(bool returnValid, bool checkHash)
}
bool DerivationGoal::pathFailed(const Path & path)
{
if (!settings.cacheFailure) return false;
if (!worker.store.hasPathFailed(path)) return false;
printMsg(lvlError, format("builder for %1% failed previously (cached)") % path);
if (settings.printBuildTrace)
printMsg(lvlError, format("@ build-failed %1% - cached") % drvPath);
done(BuildResult::CachedFailure);
return true;
}
Path DerivationGoal::addHashRewrite(const Path & path)
{
string h1 = string(path, settings.nixStore.size() + 1, 32);
@ -3031,7 +2992,7 @@ void DerivationGoal::done(BuildResult::Status status, const string & msg)
amDone(result.success() ? ecSuccess : ecFailed);
if (result.status == BuildResult::TimedOut)
worker.timedOut = true;
if (result.status == BuildResult::PermanentFailure || result.status == BuildResult::CachedFailure)
if (result.status == BuildResult::PermanentFailure)
worker.permanentFailure = true;
}

View file

@ -52,7 +52,6 @@ Settings::Settings()
keepLog = true;
compressLog = true;
maxLogSize = 0;
cacheFailure = false;
pollInterval = 5;
checkRootReachability = false;
gcKeepOutputs = false;
@ -175,7 +174,6 @@ void Settings::update()
_get(keepLog, "build-keep-log");
_get(compressLog, "build-compress-log");
_get(maxLogSize, "build-max-log-size");
_get(cacheFailure, "build-cache-failure");
_get(pollInterval, "build-poll-interval");
_get(checkRootReachability, "gc-check-reachability");
_get(gcKeepOutputs, "gc-keep-outputs");

View file

@ -168,9 +168,6 @@ struct Settings {
before being killed (0 means no limit). */
unsigned long maxLogSize;
/* Whether to cache build failures. */
bool cacheFailure;
/* How often (in seconds) to poll for locks. */
unsigned int pollInterval;

View file

@ -198,6 +198,13 @@ LocalStore::LocalStore()
txn.commit();
}
if (curSchema < 9) {
SQLiteTxn txn(state->db);
if (sqlite3_exec(state->db, "drop table FailedPaths", 0, 0, 0) != SQLITE_OK)
throwSQLiteError(state->db, "upgrading database schema");
txn.commit();
}
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
lockFile(globalLock, ltRead, true);
@ -327,16 +334,6 @@ void LocalStore::openDB(State & state, bool create)
"select path from Refs join ValidPaths on referrer = id where reference = (select id from ValidPaths where path = ?);");
state.stmtInvalidatePath.create(db,
"delete from ValidPaths where path = ?;");
state.stmtRegisterFailedPath.create(db,
"insert or ignore into FailedPaths (path, time) values (?, ?);");
state.stmtHasPathFailed.create(db,
"select time from FailedPaths where path = ?;");
state.stmtQueryFailedPaths.create(db,
"select path from FailedPaths;");
// If the path is a derivation, then clear its outputs.
state.stmtClearFailedPath.create(db,
"delete from FailedPaths where ?1 = '*' or path = ?1 "
"or path in (select d.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where v.path = ?1);");
state.stmtAddDerivationOutput.create(db,
"insert or replace into DerivationOutputs (drv, id, path) values (?, ?, ?);");
state.stmtQueryValidDerivers.create(db,
@ -583,55 +580,6 @@ uint64_t LocalStore::addValidPath(State & state,
}
void LocalStore::registerFailedPath(const Path & path)
{
retrySQLite<void>([&]() {
auto state(_state.lock());
state->stmtRegisterFailedPath.use()(path)(time(0)).step();
});
}
bool LocalStore::hasPathFailed(const Path & path)
{
return retrySQLite<bool>([&]() {
auto state(_state.lock());
return state->stmtHasPathFailed.use()(path).next();
});
}
PathSet LocalStore::queryFailedPaths()
{
return retrySQLite<PathSet>([&]() {
auto state(_state.lock());
auto useQueryFailedPaths(state->stmtQueryFailedPaths.use());
PathSet res;
while (useQueryFailedPaths.next())
res.insert(useQueryFailedPaths.getStr(0));
return res;
});
}
void LocalStore::clearFailedPaths(const PathSet & paths)
{
retrySQLite<void>([&]() {
auto state(_state.lock());
SQLiteTxn txn(state->db);
for (auto & path : paths)
state->stmtClearFailedPath.use()(path).exec();
txn.commit();
});
}
Hash parseHashField(const Path & path, const string & s)
{
string::size_type colon = s.find(':');

View file

@ -17,8 +17,8 @@ namespace nix {
/* Nix store and database schema version. Version 1 (or 0) was Nix <=
0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10.
Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is
Nix 1.0. Version 7 is Nix 1.3. Version 8 is 1.12. */
const int nixSchemaVersion = 8;
Nix 1.0. Version 7 is Nix 1.3. Version 9 is 1.12. */
const int nixSchemaVersion = 9;
extern string drvsLogDir;
@ -71,10 +71,6 @@ private:
SQLiteStmt stmtQueryReferences;
SQLiteStmt stmtQueryReferrers;
SQLiteStmt stmtInvalidatePath;
SQLiteStmt stmtRegisterFailedPath;
SQLiteStmt stmtHasPathFailed;
SQLiteStmt stmtQueryFailedPaths;
SQLiteStmt stmtClearFailedPath;
SQLiteStmt stmtAddDerivationOutput;
SQLiteStmt stmtQueryValidDerivers;
SQLiteStmt stmtQueryDerivationOutputs;
@ -194,17 +190,6 @@ public:
void registerValidPaths(const ValidPathInfos & infos);
/* Register that the build of a derivation with output `path' has
failed. */
void registerFailedPath(const Path & path);
/* Query whether `path' previously failed to build. */
bool hasPathFailed(const Path & path);
PathSet queryFailedPaths() override;
void clearFailedPaths(const PathSet & paths) override;
void vacuumDB();
/* Repair the contents of the given path by redownloading it using

View file

@ -520,23 +520,6 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
}
PathSet RemoteStore::queryFailedPaths()
{
auto conn(connections->get());
conn->to << wopQueryFailedPaths;
conn->processStderr();
return readStorePaths<PathSet>(conn->from);
}
void RemoteStore::clearFailedPaths(const PathSet & paths)
{
auto conn(connections->get());
conn->to << wopClearFailedPaths << paths;
conn->processStderr();
readInt(conn->from);
}
void RemoteStore::optimiseStore()
{
auto conn(connections->get());
@ -545,6 +528,7 @@ void RemoteStore::optimiseStore()
readInt(conn->from);
}
bool RemoteStore::verifyStore(bool checkContents, bool repair)
{
auto conn(connections->get());

View file

@ -85,10 +85,6 @@ public:
void collectGarbage(const GCOptions & options, GCResults & results) override;
PathSet queryFailedPaths() override;
void clearFailedPaths(const PathSet & paths) override;
void optimiseStore() override;
bool verifyStore(bool checkContents, bool repair) override;

View file

@ -39,8 +39,3 @@ create table if not exists DerivationOutputs (
);
create index if not exists IndexDerivationOutputs on DerivationOutputs(path);
create table if not exists FailedPaths (
path text primary key not null,
time integer not null
);

View file

@ -148,7 +148,6 @@ struct BuildResult
InputRejected,
OutputRejected,
TransientFailure, // possibly transient
CachedFailure,
TimedOut,
MiscFailure,
DependencyFailed,
@ -325,13 +324,6 @@ public:
/* Perform a garbage collection. */
virtual void collectGarbage(const GCOptions & options, GCResults & results) = 0;
/* Return the set of paths that have failed to build.*/
virtual PathSet queryFailedPaths() = 0;
/* Clear the "failed" status of the given paths. The special
value `*' causes all failed paths to be cleared. */
virtual void clearFailedPaths(const PathSet & paths) = 0;
/* Return a string representing information about the path that
can be loaded into the database using `nix-store --load-db' or
`nix-store --register-validity'. */

View file

@ -493,23 +493,6 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
break;
}
case wopQueryFailedPaths: {
startWork();
PathSet paths = store->queryFailedPaths();
stopWork();
to << paths;
break;
}
case wopClearFailedPaths: {
PathSet paths = readStrings<PathSet>(from);
startWork();
store->clearFailedPaths(paths);
stopWork();
to << 1;
break;
}
case wopQueryPathInfo: {
Path path = readStorePath(from);
startWork();

View file

@ -821,24 +821,6 @@ static void opOptimise(Strings opFlags, Strings opArgs)
store->optimiseStore();
}
static void opQueryFailedPaths(Strings opFlags, Strings opArgs)
{
if (!opArgs.empty() || !opFlags.empty())
throw UsageError("no arguments expected");
PathSet failed = store->queryFailedPaths();
for (auto & i : failed)
cout << format("%1%\n") % i;
}
static void opClearFailedPaths(Strings opFlags, Strings opArgs)
{
if (!opFlags.empty())
throw UsageError("no flags expected");
store->clearFailedPaths(PathSet(opArgs.begin(), opArgs.end()));
}
/* Serve the nix store in a way usable by a restricted ssh user. */
static void opServe(Strings opFlags, Strings opArgs)
{
@ -1102,10 +1084,6 @@ int main(int argc, char * * argv)
op = opRepairPath;
else if (*arg == "--optimise" || *arg == "--optimize")
op = opOptimise;
else if (*arg == "--query-failed-paths")
op = opQueryFailedPaths;
else if (*arg == "--clear-failed-paths")
op = opClearFailedPaths;
else if (*arg == "--serve")
op = opServe;
else if (*arg == "--generate-binary-cache-key")