diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 000000000..d4668bc2a --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,76 @@ +name: "Test" + +on: + pull_request: + push: + +jobs: + + tests: + needs: [check_cachix] + strategy: + matrix: + os: [ubuntu-latest, macos-latest] + runs-on: ${{ matrix.os }} + timeout-minutes: 60 + steps: + - uses: actions/checkout@v2.4.0 + with: + fetch-depth: 0 + - uses: cachix/install-nix-action@v16 + - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV + - uses: cachix/cachix-action@v10 + if: needs.check_cachix.outputs.secret == 'true' + with: + name: '${{ env.CACHIX_NAME }}' + signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}' + authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' + - run: nix-build release.nix -A build.$(nix-instantiate --eval -E '(builtins.currentSystem)') + + check_cachix: + name: Cachix secret present for installer tests + runs-on: ubuntu-latest + outputs: + secret: ${{ steps.secret.outputs.secret }} + steps: + - name: Check for Cachix secret + id: secret + env: + _CACHIX_SECRETS: ${{ secrets.CACHIX_SIGNING_KEY }}${{ secrets.CACHIX_AUTH_TOKEN }} + run: echo "::set-output name=secret::${{ env._CACHIX_SECRETS != '' }}" + + installer: + needs: [tests, check_cachix] + if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true' + runs-on: ubuntu-latest + outputs: + installerURL: ${{ steps.prepare-installer.outputs.installerURL }} + steps: + - uses: actions/checkout@v2.4.0 + with: + fetch-depth: 0 + - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV + - uses: cachix/install-nix-action@v16 + - uses: cachix/cachix-action@v10 + with: + name: '${{ env.CACHIX_NAME }}' + signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}' + authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' + - id: prepare-installer + run: scripts/prepare-installer-for-github-actions + + installer_test: + needs: [installer, check_cachix] + if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true' + strategy: + matrix: + os: [ubuntu-latest, macos-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v2.4.0 + - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV + - uses: cachix/install-nix-action@v16 + with: + install_url: '${{needs.installer.outputs.installerURL}}' + install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve" + - run: nix-instantiate -E 'builtins.currentTime' --eval diff --git a/.version b/.version index 506c62f67..c790ace11 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -2.3.10 \ No newline at end of file +2.3.17 \ No newline at end of file diff --git a/Makefile.config.in b/Makefile.config.in index 7e3b35b98..5f0244176 100644 --- a/Makefile.config.in +++ b/Makefile.config.in @@ -1,3 +1,4 @@ +HOST_OS = @host_os@ AR = @AR@ BDW_GC_LIBS = @BDW_GC_LIBS@ BUILD_SHARED_LIBS = @BUILD_SHARED_LIBS@ @@ -18,6 +19,7 @@ SODIUM_LIBS = @SODIUM_LIBS@ LIBLZMA_LIBS = @LIBLZMA_LIBS@ SQLITE3_LIBS = @SQLITE3_LIBS@ LIBBROTLI_LIBS = @LIBBROTLI_LIBS@ +LIBZSTD_LIBS = @LIBZSTD_LIBS@ EDITLINE_LIBS = @EDITLINE_LIBS@ bash = @bash@ bindir = @bindir@ diff --git a/configure.ac b/configure.ac index a52830b38..b0b620a08 100644 --- a/configure.ac +++ b/configure.ac @@ -33,14 +33,6 @@ AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM], system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";; esac]) -sys_name=$(uname -s | tr 'A-Z ' 'a-z_') - -case $sys_name in - cygwin*) - sys_name=cygwin - ;; -esac - AC_MSG_RESULT($system) AC_SUBST(system) AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier ('cpu-os')]) @@ -67,10 +59,12 @@ AC_SYS_LARGEFILE # Solaris-specific stuff. AC_STRUCT_DIRENT_D_TYPE -if test "$sys_name" = sunos; then +case "$host_os" in + solaris*) # Solaris requires -lsocket -lnsl for network functions - LIBS="-lsocket -lnsl $LIBS" -fi + LDFLAGS="-lsocket -lnsl $LDFLAGS" + ;; +esac # Check for pubsetbuf. @@ -157,6 +151,30 @@ AX_BOOST_BASE([1.66], [CXXFLAGS="$BOOST_CPPFLAGS $CXXFLAGS"], [AC_MSG_ERROR([Nix # ends up with LDFLAGS being empty, so we set it afterwards. LDFLAGS="$BOOST_LDFLAGS $LDFLAGS" +# On some platforms, new-style atomics need a helper library +AC_MSG_CHECKING(whether -latomic is needed) +AC_LINK_IFELSE([AC_LANG_SOURCE([[ +#include +uint64_t v; +int main() { + return (int)__atomic_load_n(&v, __ATOMIC_ACQUIRE); +}]])], GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=no, GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=yes) +AC_MSG_RESULT($GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC) +if test "x$GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC" = xyes; then + LDFLAGS="-latomic $LDFLAGS" +fi + +PKG_PROG_PKG_CONFIG + +AC_ARG_ENABLE(shared, AC_HELP_STRING([--enable-shared], + [Build shared libraries for Nix [default=yes]]), + shared=$enableval, shared=yes) +if test "$shared" = yes; then + AC_SUBST(BUILD_SHARED_LIBS, 1, [Whether to build shared libraries.]) +else + AC_SUBST(BUILD_SHARED_LIBS, 0, [Whether to build shared libraries.]) + PKG_CONFIG="$PKG_CONFIG --static" +fi # Look for OpenSSL, a required dependency. PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"]) @@ -205,24 +223,29 @@ AC_CHECK_LIB([lzma], [lzma_stream_encoder_mt], # Look for libbrotli{enc,dec}. PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec], [CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"]) +# Look for libzstd. +PKG_CHECK_MODULES([LIBZSTD], [libzstd], [CXXFLAGS="$LIBZSTD_CFLAGS $CXXFLAGS"]) # Look for libseccomp, required for Linux sandboxing. -if test "$sys_name" = linux; then - AC_ARG_ENABLE([seccomp-sandboxing], - AC_HELP_STRING([--disable-seccomp-sandboxing], - [Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)] - )) - if test "x$enable_seccomp_sandboxing" != "xno"; then - PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp], - [CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"]) - have_seccomp=1 - AC_DEFINE([HAVE_SECCOMP], [1], [Whether seccomp is available and should be used for sandboxing.]) - else +case "$host_os" in + linux*) + AC_ARG_ENABLE([seccomp-sandboxing], + AC_HELP_STRING([--disable-seccomp-sandboxing], + [Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)] + )) + if test "x$enable_seccomp_sandboxing" != "xno"; then + PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp], + [CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"]) + have_seccomp=1 + AC_DEFINE([HAVE_SECCOMP], [1], [Whether seccomp is available and should be used for sandboxing.]) + else + have_seccomp= + fi + ;; + *) have_seccomp= - fi -else - have_seccomp= -fi + ;; +esac AC_SUBST(HAVE_SECCOMP, [$have_seccomp]) @@ -238,6 +261,7 @@ if test -n "$enable_s3"; then declare -a aws_version_tokens=($(printf '#include \nAWS_SDK_VERSION_STRING' | $CPP $CPPFLAGS - | grep -v '^#.*' | sed 's/"//g' | tr '.' ' ')) AC_DEFINE_UNQUOTED([AWS_VERSION_MAJOR], ${aws_version_tokens@<:@0@:>@}, [Major version of aws-sdk-cpp.]) AC_DEFINE_UNQUOTED([AWS_VERSION_MINOR], ${aws_version_tokens@<:@1@:>@}, [Minor version of aws-sdk-cpp.]) + AC_DEFINE_UNQUOTED([AWS_VERSION_PATCH], ${aws_version_tokens@<:@2@:>@}, [Patch version of aws-sdk-cpp.]) fi @@ -269,9 +293,11 @@ AC_CHECK_FUNCS([strsignal posix_fallocate sysconf]) # This is needed if bzip2 is a static library, and the Nix libraries # are dynamic. -if test "$(uname)" = "Darwin"; then +case "${host_os}" in + darwin*) LDFLAGS="-all_load $LDFLAGS" -fi + ;; +esac # Do we have GNU tar? @@ -290,16 +316,6 @@ AC_ARG_WITH(sandbox-shell, AC_HELP_STRING([--with-sandbox-shell=PATH], sandbox_shell=$withval) AC_SUBST(sandbox_shell) -AC_ARG_ENABLE(shared, AC_HELP_STRING([--enable-shared], - [Build shared libraries for Nix [default=yes]]), - shared=$enableval, shared=yes) -if test "$shared" = yes; then - AC_SUBST(BUILD_SHARED_LIBS, 1, [Whether to build shared libraries.]) -else - AC_SUBST(BUILD_SHARED_LIBS, 0, [Whether to build shared libraries.]) -fi - - # Expand all variables in config.status. test "$prefix" = NONE && prefix=$ac_default_prefix test "$exec_prefix" = NONE && exec_prefix='${prefix}' diff --git a/maintainers/upload-release.pl b/maintainers/upload-release.pl index 1cdf5ed16..6c5707921 100755 --- a/maintainers/upload-release.pl +++ b/maintainers/upload-release.pl @@ -89,6 +89,7 @@ downloadFile("binaryTarball.i686-linux", "1"); downloadFile("binaryTarball.x86_64-linux", "1"); downloadFile("binaryTarball.aarch64-linux", "1"); downloadFile("binaryTarball.x86_64-darwin", "1"); +downloadFile("binaryTarball.aarch64-darwin", "1"); downloadFile("installerScript", "1"); exit if $version =~ /pre/; @@ -121,6 +122,7 @@ write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix", " i686-linux = \"" . getStorePath("build.i686-linux") . "\";\n" . " aarch64-linux = \"" . getStorePath("build.aarch64-linux") . "\";\n" . " x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" . + " aarch64-darwin = \"" . getStorePath("build.aarch64-darwin") . "\";\n" . "}\n"); system("cd $nixpkgsDir && git commit -a -m 'nix: $oldName -> $version'") == 0 or die; diff --git a/misc/launchd/local.mk b/misc/launchd/local.mk index 0ba722efb..a39188fe6 100644 --- a/misc/launchd/local.mk +++ b/misc/launchd/local.mk @@ -1,4 +1,4 @@ -ifeq ($(OS), Darwin) +ifdef HOST_DARWIN $(eval $(call install-data-in, $(d)/org.nixos.nix-daemon.plist, $(prefix)/Library/LaunchDaemons)) diff --git a/misc/systemd/local.mk b/misc/systemd/local.mk index 004549fd2..7b8d0b4b8 100644 --- a/misc/systemd/local.mk +++ b/misc/systemd/local.mk @@ -1,4 +1,4 @@ -ifeq ($(OS), Linux) +ifdef HOST_LINUX $(foreach n, nix-daemon.socket nix-daemon.service, $(eval $(call install-file-in, $(d)/$(n), $(prefix)/lib/systemd/system, 0644))) diff --git a/misc/upstart/local.mk b/misc/upstart/local.mk index a73dc061e..b08e05fe2 100644 --- a/misc/upstart/local.mk +++ b/misc/upstart/local.mk @@ -1,4 +1,4 @@ -ifeq ($(OS), Linux) +ifdef HOST_LINUX $(foreach n, nix-daemon.conf, $(eval $(call install-file-in, $(d)/$(n), $(sysconfdir)/init, 0644))) diff --git a/mk/lib.mk b/mk/lib.mk index 1da51d879..064d37629 100644 --- a/mk/lib.mk +++ b/mk/lib.mk @@ -11,8 +11,25 @@ noinst-scripts := man-pages := install-tests := dist-files := -OS = $(shell uname -s) +ifdef HOST_OS + HOST_KERNEL = $(firstword $(subst -, ,$(HOST_OS))) + ifeq ($(HOST_KERNEL), cygwin) + HOST_CYGWIN = 1 + endif + ifeq ($(patsubst darwin%,,$(HOST_KERNEL)),) + HOST_DARWIN = 1 + endif + ifeq ($(patsubst freebsd%,,$(HOST_KERNEL)),) + HOST_FREEBSD = 1 + endif + ifeq ($(HOST_KERNEL), linux) + HOST_LINUX = 1 + endif + ifeq ($(patsubst solaris%,,$(HOST_KERNEL)),) + HOST_SOLARIS = 1 + endif +endif # Hack to define a literal space. space := @@ -52,16 +69,16 @@ endif BUILD_SHARED_LIBS ?= 1 ifeq ($(BUILD_SHARED_LIBS), 1) - ifeq (CYGWIN,$(findstring CYGWIN,$(OS))) + ifdef HOST_CYGWIN GLOBAL_CFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE GLOBAL_CXXFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE else GLOBAL_CFLAGS += -fPIC GLOBAL_CXXFLAGS += -fPIC endif - ifneq ($(OS), Darwin) - ifneq ($(OS), SunOS) - ifneq ($(OS), FreeBSD) + ifndef HOST_DARWIN + ifndef HOST_SOLARIS + ifndef HOST_FREEBSD GLOBAL_LDFLAGS += -Wl,--no-copy-dt-needed-entries endif endif diff --git a/mk/libraries.mk b/mk/libraries.mk index 307e29b9d..b8a518853 100644 --- a/mk/libraries.mk +++ b/mk/libraries.mk @@ -1,9 +1,9 @@ libs-list := -ifeq ($(OS), Darwin) +ifdef HOST_DARWIN SO_EXT = dylib else - ifeq (CYGWIN,$(findstring CYGWIN,$(OS))) + ifdef HOST_CYGWIN SO_EXT = dll else SO_EXT = so @@ -59,7 +59,7 @@ define build-library $(1)_OBJS := $$(addprefix $(buildprefix), $$(addsuffix .o, $$(basename $$(_srcs)))) _libs := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_PATH)) - ifeq (CYGWIN,$(findstring CYGWIN,$(OS))) + ifdef HOST_CYGWIN $(1)_INSTALL_DIR ?= $$(bindir) else $(1)_INSTALL_DIR ?= $$(libdir) @@ -73,18 +73,18 @@ define build-library ifeq ($(BUILD_SHARED_LIBS), 1) ifdef $(1)_ALLOW_UNDEFINED - ifeq ($(OS), Darwin) + ifdef HOST_DARWIN $(1)_LDFLAGS += -undefined suppress -flat_namespace endif else - ifneq ($(OS), Darwin) - ifneq (CYGWIN,$(findstring CYGWIN,$(OS))) + ifndef HOST_DARWIN + ifndef HOST_CYGWIN $(1)_LDFLAGS += -Wl,-z,defs endif endif endif - ifneq ($(OS), Darwin) + ifndef HOST_DARWIN $(1)_LDFLAGS += -Wl,-soname=$$($(1)_NAME).$(SO_EXT) endif @@ -93,7 +93,7 @@ define build-library $$($(1)_PATH): $$($(1)_OBJS) $$(_libs) | $$(_d)/ $$(trace-ld) $(CXX) -o $$(abspath $$@) -shared $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE)) $$($(1)_LDFLAGS_UNINSTALLED) - ifneq ($(OS), Darwin) + ifndef HOST_DARWIN $(1)_LDFLAGS_USE += -Wl,-rpath,$$(abspath $$(_d)) endif $(1)_LDFLAGS_USE += -L$$(_d) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME))) @@ -108,7 +108,7 @@ define build-library $$(trace-ld) $(CXX) -o $$@ -shared $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE_INSTALLED)) $(1)_LDFLAGS_USE_INSTALLED += -L$$(DESTDIR)$$($(1)_INSTALL_DIR) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME))) - ifneq ($(OS), Darwin) + ifndef HOST_DARWIN ifeq ($(SET_RPATH_TO_LIBS), 1) $(1)_LDFLAGS_USE_INSTALLED += -Wl,-rpath,$$($(1)_INSTALL_DIR) else diff --git a/mk/run_test.sh b/mk/run_test.sh new file mode 100755 index 000000000..6af5b070a --- /dev/null +++ b/mk/run_test.sh @@ -0,0 +1,28 @@ +#!/bin/sh + +set -u + +red="" +green="" +yellow="" +normal="" + +post_run_msg="ran test $1..." +if [ -t 1 ]; then + red="" + green="" + yellow="" + normal="" +fi +(cd $(dirname $1) && env ${TESTS_ENVIRONMENT} init.sh 2>/dev/null > /dev/null) +log="$(cd $(dirname $1) && env ${TESTS_ENVIRONMENT} $(basename $1) 2>&1)" +status=$? +if [ $status -eq 0 ]; then + echo "$post_run_msg [${green}PASS$normal]" +elif [ $status -eq 99 ]; then + echo "$post_run_msg [${yellow}SKIP$normal]" +else + echo "$post_run_msg [${red}FAIL$normal]" + echo "$log" | sed 's/^/ /' + exit "$status" +fi diff --git a/mk/tests.mk b/mk/tests.mk index 70c30661b..c1e140bac 100644 --- a/mk/tests.mk +++ b/mk/tests.mk @@ -1,45 +1,15 @@ # Run program $1 as part of ‘make installcheck’. + +test-deps = + define run-install-test - installcheck: $1 + installcheck: $1.test - _installcheck-list += $1 + .PHONY: $1.test + $1.test: $1 $(test-deps) + @env TEST_NAME=$(notdir $(basename $1)) TESTS_ENVIRONMENT="$(tests-environment)" mk/run_test.sh $1 < /dev/null endef -# Color code from https://unix.stackexchange.com/a/10065 -installcheck: - @total=0; failed=0; \ - red=""; \ - green=""; \ - yellow=""; \ - normal=""; \ - if [ -t 1 ]; then \ - red=""; \ - green=""; \ - yellow=""; \ - normal=""; \ - fi; \ - for i in $(_installcheck-list); do \ - total=$$((total + 1)); \ - printf "running test $$i..."; \ - log="$$(cd $$(dirname $$i) && $(tests-environment) $$(basename $$i) 2>&1)"; \ - status=$$?; \ - if [ $$status -eq 0 ]; then \ - echo " [$${green}PASS$$normal]"; \ - elif [ $$status -eq 99 ]; then \ - echo " [$${yellow}SKIP$$normal]"; \ - else \ - echo " [$${red}FAIL$$normal]"; \ - echo "$$log" | sed 's/^/ /'; \ - failed=$$((failed + 1)); \ - fi; \ - done; \ - if [ "$$failed" != 0 ]; then \ - echo "$${red}$$failed out of $$total tests failed $$normal"; \ - exit 1; \ - else \ - echo "$${green}All tests succeeded$$normal"; \ - fi - .PHONY: check installcheck diff --git a/nix.spec.in b/nix.spec.in index 6b9e37637..1aeafa174 100644 --- a/nix.spec.in +++ b/nix.spec.in @@ -28,6 +28,7 @@ Requires: curl Requires: bzip2 Requires: gzip Requires: xz +Requires: zstd BuildRequires: bison BuildRequires: boost-devel >= 1.60 BuildRequires: bzip2-devel diff --git a/perl/Makefile.config.in b/perl/Makefile.config.in index c87d4817e..d08ee65db 100644 --- a/perl/Makefile.config.in +++ b/perl/Makefile.config.in @@ -1,3 +1,4 @@ +HOST_OS = @host_os@ CC = @CC@ CFLAGS = @CFLAGS@ CXX = @CXX@ diff --git a/perl/configure.ac b/perl/configure.ac index e8e3610a8..4ee6923a4 100644 --- a/perl/configure.ac +++ b/perl/configure.ac @@ -10,6 +10,8 @@ AC_PROG_CC AC_PROG_CXX AX_CXX_COMPILE_STDCXX_11 +AC_CANONICAL_HOST + # Use 64-bit file system calls so that we can support files > 2 GiB. AC_SYS_LARGEFILE diff --git a/perl/local.mk b/perl/local.mk index b13d4c0d6..0eae651d8 100644 --- a/perl/local.mk +++ b/perl/local.mk @@ -28,7 +28,7 @@ Store_CXXFLAGS = \ Store_LDFLAGS := $(SODIUM_LIBS) $(NIX_LIBS) -ifeq (CYGWIN,$(findstring CYGWIN,$(OS))) +ifdef HOST_CYGWIN archlib = $(shell perl -E 'use Config; print $$Config{archlib};') libperl = $(shell perl -E 'use Config; print $$Config{libperl};') Store_LDFLAGS += $(shell find ${archlib} -name ${libperl}) diff --git a/release-common.nix b/release-common.nix index d66bbafa8..26355ba0b 100644 --- a/release-common.nix +++ b/release-common.nix @@ -49,7 +49,7 @@ rec { buildDeps = [ curl - bzip2 xz brotli editline + bzip2 xz brotli zstd editline openssl pkgconfig sqlite boost @@ -57,7 +57,7 @@ rec { git mercurial ] - ++ lib.optionals stdenv.isLinux [libseccomp utillinuxMinimal] + ++ lib.optionals stdenv.isLinux [libseccomp (pkgs.util-linuxMinimal or pkgs.utillinuxMinimal)] ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) ((aws-sdk-cpp.override { diff --git a/release.nix b/release.nix index 2f7573ccf..3c975c51e 100644 --- a/release.nix +++ b/release.nix @@ -1,7 +1,7 @@ { nix ? builtins.fetchGit ./. -, nixpkgs ? builtins.fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.03.tar.gz +, nixpkgs ? builtins.fetchTarball https://github.com/NixOS/nixpkgs/archive/nixos-21.05-small.tar.gz , officialRelease ? false -, systems ? [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ] +, systems ? [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" "aarch64-darwin" ] }: let @@ -108,7 +108,8 @@ let buildInputs = [ jobs.build.${system} curl bzip2 xz pkgconfig pkgs.perl boost ] - ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium; + ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium + ++ lib.optional stdenv.isDarwin darwin.apple_sdk.frameworks.Security; configureFlags = '' --with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix} @@ -223,16 +224,6 @@ let }; - #rpm_fedora27x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora27x86_64) [ ]; - - - #deb_debian8i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.debian8i386) [ "libsodium-dev" ] [ "libsodium13" ]; - #deb_debian8x86_64 = makeDeb_x86_64 (diskImageFunsFun: diskImageFunsFun.debian8x86_64) [ "libsodium-dev" ] [ "libsodium13" ]; - - #deb_ubuntu1710i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1710i386) [ ] [ "libsodium18" ]; - #deb_ubuntu1710x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1710x86_64) [ ] [ "libsodium18" "libboost-context1.62.0" ]; - - # System tests. tests.remoteBuilds = (import ./tests/remote-builds.nix rec { inherit nixpkgs; @@ -252,37 +243,6 @@ let nix = build.${system}; inherit system; }); - tests.binaryTarball = - with import nixpkgs { system = "x86_64-linux"; }; - vmTools.runInLinuxImage (runCommand "nix-binary-tarball-test" - { diskImage = vmTools.diskImages.ubuntu1204x86_64; - } - '' - set -x - useradd -m alice - su - alice -c 'tar xf ${binaryTarball.x86_64-linux}/*.tar.*' - mkdir /dest-nix - mount -o bind /dest-nix /nix # Provide a writable /nix. - chown alice /nix - su - alice -c '_NIX_INSTALLER_TEST=1 ./nix-*/install' - su - alice -c 'nix-store --verify' - su - alice -c 'PAGER= nix-store -qR ${build.x86_64-linux}' - - # Check whether 'nix upgrade-nix' works. - cat > /tmp/paths.nix </dev/null +} + +change_nixbld_names_and_ids(){ + local name uid next_id + ((next_id=NEW_NIX_FIRST_BUILD_UID)) + echo "Attempting to migrate nixbld users." + echo "Each user should change from nixbld# to _nixbld#" + echo "and their IDs relocated to $next_id+" + while read -r name uid; do + echo " Checking $name (uid: $uid)" + # iterate for a clean ID + while id_available "$next_id"; do + ((next_id++)) + if ((next_id >= 400)); then + echo "We've hit UID 400 without placing all of your users :(" + echo "You should use the commands in this script as a starting" + echo "point to review your UID-space and manually move the" + echo "remaining users (or delete them, if you don't need them)." + exit 1 + fi + done + + if [[ $name == _* ]]; then + echo " It looks like $name has already been renamed--skipping." + else + # first 3 are cleanup, it's OK if they aren't here + sudo dscl . delete /Users/$name dsAttrTypeNative:_writers_passwd &>/dev/null || true + sudo dscl . change /Users/$name NFSHomeDirectory "/private/var/empty 1" "/var/empty" &>/dev/null || true + # remove existing user from group + sudo dseditgroup -o edit -t user -d $name nixbld || true + sudo dscl . change /Users/$name UniqueID $uid $next_id + sudo dscl . change /Users/$name RecordName $name _$name + # add renamed user to group + sudo dseditgroup -o edit -t user -a _$name nixbld + echo " $name migrated to _$name (uid: $next_id)" + fi + done < <(dscl . list /Users UniqueID | grep nixbld | sort -n -k2) +} + +change_nixbld_names_and_ids diff --git a/scripts/install-darwin-multi-user.sh b/scripts/install-darwin-multi-user.sh index 49076bd5c..77b81ee50 100644 --- a/scripts/install-darwin-multi-user.sh +++ b/scripts/install-darwin-multi-user.sh @@ -4,6 +4,8 @@ set -eu set -o pipefail readonly PLIST_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist +NIX_FIRST_BUILD_UID="301" +NIX_BUILD_USER_NAME_TEMPLATE="_nixbld%d" dsclattr() { /usr/bin/dscl . -read "$1" \ diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh index a3613cb28..5db97cd2a 100644 --- a/scripts/install-multi-user.sh +++ b/scripts/install-multi-user.sh @@ -25,7 +25,9 @@ readonly RED='\033[31m' readonly NIX_USER_COUNT=${NIX_USER_COUNT:-32} readonly NIX_BUILD_GROUP_ID="30000" readonly NIX_BUILD_GROUP_NAME="nixbld" -readonly NIX_FIRST_BUILD_UID="30001" +# darwin installer needs to override these +NIX_FIRST_BUILD_UID="30001" +NIX_BUILD_USER_NAME_TEMPLATE="nixbld%d" # Please don't change this. We don't support it, because the # default shell profile that comes with Nix doesn't support it. readonly NIX_ROOT="/nix" @@ -61,8 +63,10 @@ contactme() { echo "If you can, open an issue at https://github.com/nixos/nix/issues" echo "" echo "Or feel free to contact the team," - echo " - on IRC #nixos on irc.freenode.net" + echo " - on Matrix #nix:nixos.org" + echo " - on IRC #nixos on irc.libera.chat" echo " - on twitter @nixos_org" + echo " - on our forum https://discourse.nixos.org/" } uninstall_directions() { @@ -102,7 +106,7 @@ EOF } nix_user_for_core() { - printf "nixbld%d" "$1" + printf "$NIX_BUILD_USER_NAME_TEMPLATE" "$1" } nix_uid_for_core() { diff --git a/scripts/install-nix-from-closure.sh b/scripts/install-nix-from-closure.sh index 90b7a8948..b736a2d69 100644 --- a/scripts/install-nix-from-closure.sh +++ b/scripts/install-nix-from-closure.sh @@ -154,9 +154,15 @@ fi mkdir -p $dest/store printf "copying Nix to %s..." "${dest}/store" >&2 +# Insert a newline if no progress is shown. +if [ ! -t 0 ]; then + echo "" +fi for i in $(cd "$self/store" >/dev/null && echo ./*); do - printf "." >&2 + if [ -t 0 ]; then + printf "." >&2 + fi i_tmp="$dest/store/$i.$$" if [ -e "$i_tmp" ]; then rm -rf "$i_tmp" diff --git a/scripts/install.in b/scripts/install.in index 9a281d776..5c7d1028c 100644 --- a/scripts/install.in +++ b/scripts/install.in @@ -29,8 +29,7 @@ case "$(uname -s).$(uname -m)" in Linux.i?86) system=i686-linux; hash=@binaryTarball_i686-linux@;; Linux.aarch64) system=aarch64-linux; hash=@binaryTarball_aarch64-linux@;; Darwin.x86_64) system=x86_64-darwin; hash=@binaryTarball_x86_64-darwin@;; - # eventually maybe: system=arm64-darwin; hash=@binaryTarball_arm64-darwin@;; - Darwin.arm64) system=x86_64-darwin; hash=@binaryTarball_x86_64-darwin@;; + Darwin.arm64) system=aarch64-darwin; hash=@binaryTarball_aarch64-darwin@;; *) oops "sorry, there is no binary distribution of Nix for your platform";; esac diff --git a/scripts/nix-profile-daemon.sh.in b/scripts/nix-profile-daemon.sh.in index e0c24954a..31fe4f949 100644 --- a/scripts/nix-profile-daemon.sh.in +++ b/scripts/nix-profile-daemon.sh.in @@ -17,11 +17,21 @@ elif [ -e /etc/pki/tls/certs/ca-bundle.crt ]; then # Fedora, CentOS export NIX_SSL_CERT_FILE=/etc/pki/tls/certs/ca-bundle.crt else # Fall back to what is in the nix profiles, favouring whatever is defined last. - for i in $NIX_PROFILES; do - if [ -e $i/etc/ssl/certs/ca-bundle.crt ]; then - export NIX_SSL_CERT_FILE=$i/etc/ssl/certs/ca-bundle.crt + check_nix_profiles() { + if [ "$ZSH_VERSION" ]; then + # Zsh by default doesn't split words in unquoted parameter expansion. + # Set local_options for these options to be reverted at the end of the function + # and shwordsplit to force splitting words in $NIX_PROFILES below. + setopt local_options shwordsplit fi - done + for i in $NIX_PROFILES; do + if [ -e $i/etc/ssl/certs/ca-bundle.crt ]; then + export NIX_SSL_CERT_FILE=$i/etc/ssl/certs/ca-bundle.crt + fi + done + } + check_nix_profiles + unset -f check_nix_profiles fi export NIX_PATH="nixpkgs=@localstatedir@/nix/profiles/per-user/root/channels/nixpkgs:@localstatedir@/nix/profiles/per-user/root/channels" diff --git a/src/libexpr/local.mk b/src/libexpr/local.mk index ccd5293e4..e190022ff 100644 --- a/src/libexpr/local.mk +++ b/src/libexpr/local.mk @@ -9,7 +9,7 @@ libexpr_SOURCES := $(wildcard $(d)/*.cc) $(wildcard $(d)/primops/*.cc) $(d)/lexe libexpr_LIBS = libutil libstore libexpr_LDFLAGS = -ifneq ($(OS), FreeBSD) +ifdef HOST_LINUX libexpr_LDFLAGS += -ldl endif diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc index 63cbef1dd..00bdbd47a 100644 --- a/src/libexpr/nixexpr.cc +++ b/src/libexpr/nixexpr.cc @@ -105,7 +105,7 @@ void ExprAttrs::show(std::ostream & str) const str << "{ "; for (auto & i : attrs) if (i.second.inherited) - str << "inherit " << i.first << " " << "; "; + str << "inherit " << i.first << "; "; else str << i.first << " = " << *i.second.e << "; "; for (auto & i : dynamicAttrs) @@ -211,7 +211,7 @@ string showAttrPath(const AttrPath & attrPath) if (i.symbol.set()) out << i.symbol; else - out << "\"${" << *i.expr << "}\""; + out << "${" << *i.expr << "}"; } return out.str(); } diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index d4c60f870..b65433d45 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -937,10 +937,16 @@ static void prim_hashFile(EvalState & state, const Pos & pos, Value * * args, Va if (ht == htUnknown) throw Error(format("unknown hash type '%1%', at %2%") % type % pos); - PathSet context; // discarded - Path p = state.coerceToPath(pos, *args[1], context); + PathSet context; + Path path = state.coerceToPath(pos, *args[1], context); + try { + state.realiseContext(context); + } catch (InvalidPathError & e) { + throw EvalError(format("cannot read '%1%', since path '%2%' is not valid, at %3%") + % path % e.path % pos); + } - mkString(v, hashFile(ht, state.checkSourcePath(p)).to_string(Base16, false), context); + mkString(v, hashFile(ht, state.checkSourcePath(state.toRealPath(path, context))).to_string(Base16, false)); } /* Read a directory (without . or ..) */ @@ -1350,6 +1356,10 @@ static void prim_catAttrs(EvalState & state, const Pos & pos, Value * * args, Va static void prim_functionArgs(EvalState & state, const Pos & pos, Value * * args, Value & v) { state.forceValue(*args[0]); + if (args[0]->type == tPrimOpApp || args[0]->type == tPrimOp) { + state.mkAttrs(v, 0); + return; + } if (args[0]->type != tLambda) throw TypeError(format("'functionArgs' requires a function, at %1%") % pos); @@ -1817,7 +1827,7 @@ static void prim_hashString(EvalState & state, const Pos & pos, Value * * args, PathSet context; // discarded string s = state.forceString(*args[1], context, pos); - mkString(v, hashString(ht, s).to_string(Base16, false), context); + mkString(v, hashString(ht, s).to_string(Base16, false)); } @@ -2075,7 +2085,7 @@ void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v, else if (n == "name") request.name = state.forceStringNoCtx(*attr.value, *attr.pos); else - throw EvalError(format("unsupported argument '%1%' to '%2%', at %3%") % attr.name % who % attr.pos); + throw EvalError(format("unsupported argument '%1%' to '%2%', at %3%") % attr.name % who % *attr.pos); } if (request.uri.empty()) diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 90f600284..ca5af7492 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -6,6 +6,7 @@ #include "hash.hh" #include +#include #include @@ -173,7 +174,7 @@ GitInfo exportGit(ref store, const std::string & uri, Path tmpDir = createTempDir(); AutoDelete delTmpDir(tmpDir, true); - runProgram("tar", true, { "x", "-C", tmpDir }, tar); + runProgram("tar", true, { "-x", "-f", "-", "-C", tmpDir }, tar); gitInfo.storePath = store->addToStore(name, tmpDir); diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index a14f9006a..39ed8961b 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -14,6 +14,14 @@ #include #include #include +#ifdef __linux__ +#include +#endif +#ifdef __GLIBC__ +#include +#include +#include +#endif #include @@ -95,6 +103,40 @@ static void opensslLockCallback(int mode, int type, const char * file, int line) } #endif +static std::once_flag dns_resolve_flag; + +static void preloadNSS() { + /* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of + one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already + been loaded in the parent. So we force a lookup of an invalid domain to force the NSS machinery to + load its lookup libraries in the parent before any child gets a chance to. */ + std::call_once(dns_resolve_flag, []() { +#ifdef __GLIBC__ + /* On linux, glibc will run every lookup through the nss layer. + * That means every lookup goes, by default, through nscd, which acts as a local + * cache. + * Because we run builds in a sandbox, we also remove access to nscd otherwise + * lookups would leak into the sandbox. + * + * But now we have a new problem, we need to make sure the nss_dns backend that + * does the dns lookups when nscd is not available is loaded or available. + * + * We can't make it available without leaking nix's environment, so instead we'll + * load the backend, and configure nss so it does not try to run dns lookups + * through nscd. + * + * This is technically only used for builtins:fetch* functions so we only care + * about dns. + * + * All other platforms are unaffected. + */ + if (dlopen (LIBNSS_DNS_SO, RTLD_NOW) == NULL) { + printMsg(Verbosity::lvlWarn, fmt("Unable to load nss_dns backend")); + } + __nss_configure_lookup ("hosts", "dns"); +#endif + }); +} static void sigHandler(int signo) { } @@ -158,6 +200,8 @@ void initNix() if (hasPrefix(getEnv("TMPDIR"), "/var/folders/")) unsetenv("TMPDIR"); #endif + + preloadNSS(); } diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 4ba9a5385..4b345fe37 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -45,7 +44,6 @@ /* Includes required for chroot support. */ #if __linux__ -#include #include #include #include @@ -181,6 +179,8 @@ public: virtual string key() = 0; + virtual void cleanup() { } + protected: virtual void amDone(ExitCode result); @@ -426,6 +426,8 @@ void Goal::amDone(ExitCode result) } waiters.clear(); worker.removeGoal(shared_from_this()); + + cleanup(); } @@ -1222,8 +1224,13 @@ void DerivationGoal::outputsSubstituted() /* If the substitutes form an incomplete closure, then we should build the dependencies of this derivation, but after that, we - can still use the substitutes for this derivation itself. */ - if (nrIncompleteClosure > 0) retrySubstitution = true; + can still use the substitutes for this derivation itself. + + If the nrIncompleteClosure != nrFailed, we have another issue as well. + In particular, it may be the case that the hole in the closure is + an output of the current derivation, which causes a loop if retried. + */ + if (nrIncompleteClosure > 0 && nrIncompleteClosure == nrFailed) retrySubstitution = true; nrFailed = nrNoSubstituters = nrIncompleteClosure = 0; @@ -1881,22 +1888,6 @@ PathSet DerivationGoal::exportReferences(PathSet storePaths) return paths; } -static std::once_flag dns_resolve_flag; - -static void preloadNSS() { - /* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of - one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already - been loaded in the parent. So we force a lookup of an invalid domain to force the NSS machinery to - load its lookup libraries in the parent before any child gets a chance to. */ - std::call_once(dns_resolve_flag, []() { - struct addrinfo *res = NULL; - - if (getaddrinfo("this.pre-initializes.the.dns.resolvers.invalid.", "http", NULL, &res) != 0) { - if (res) freeaddrinfo(res); - } - }); -} - void DerivationGoal::startBuilder() { /* Right platform? */ @@ -1908,9 +1899,6 @@ void DerivationGoal::startBuilder() settings.thisSystem, concatStringsSep(", ", settings.systemFeatures)); - if (drv->isBuiltin()) - preloadNSS(); - #if __APPLE__ additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or(""); #endif @@ -2060,7 +2048,9 @@ void DerivationGoal::startBuilder() if (!found) throw Error(format("derivation '%1%' requested impure path '%2%', but it was not in allowed-impure-host-deps") % drvPath % i); - dirsInChroot[i] = i; + /* Allow files in __impureHostDeps to be missing; e.g. + macOS 11+ has no /usr/lib/libSystem*.dylib */ + dirsInChroot[i] = {i, true}; } #if __linux__ @@ -2834,8 +2824,6 @@ void DerivationGoal::runChild() ss.push_back("/etc/services"); ss.push_back("/etc/hosts"); - if (pathExists("/var/run/nscd/socket")) - ss.push_back("/var/run/nscd/socket"); } for (auto & i : ss) dirsInChroot.emplace(i, i); @@ -3911,6 +3899,8 @@ public: void handleChildOutput(int fd, const string & data) override; void handleEOF(int fd) override; + void cleanup() override; + Path getStorePath() { return storePath; } void amDone(ExitCode result) override @@ -3934,15 +3924,7 @@ SubstitutionGoal::SubstitutionGoal(const Path & storePath, Worker & worker, Repa SubstitutionGoal::~SubstitutionGoal() { - try { - if (thr.joinable()) { - // FIXME: signal worker thread to quit. - thr.join(); - worker.childTerminated(this); - } - } catch (...) { - ignoreException(); - } + cleanup(); } @@ -3977,6 +3959,8 @@ void SubstitutionGoal::tryNext() { trace("trying next substituter"); + cleanup(); + if (subs.size() == 0) { /* None left. Terminate this goal and let someone else deal with it. */ @@ -4104,7 +4088,7 @@ void SubstitutionGoal::tryToRun() thr = std::thread([this]() { try { /* Wake up the worker loop when we're done. */ - Finally updateStats([this]() { outPipe.writeSide = -1; }); + Finally updateStats([this]() { outPipe.writeSide.close(); }); Activity act(*logger, actSubstitute, Logger::Fields{storePath, sub->getUri()}); PushActivity pact(act.id); @@ -4188,6 +4172,20 @@ void SubstitutionGoal::handleEOF(int fd) if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this()); } +void SubstitutionGoal::cleanup() +{ + try { + if (thr.joinable()) { + // FIXME: signal worker thread to quit. + thr.join(); + worker.childTerminated(this); + } + + outPipe.close(); + } catch (...) { + ignoreException(); + } +} ////////////////////////////////////////////////////////////////////// diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 80674a9e7..b439ddf0c 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -349,6 +349,13 @@ struct CurlDownloader : public Downloader (httpStatus == 200 || httpStatus == 201 || httpStatus == 204 || httpStatus == 206 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */)) { result.cached = httpStatus == 304; + + // In 2021, GitHub responds to If-None-Match with 304, + // but omits ETag. We just use the If-None-Match etag + // since 304 implies they are the same. + if (httpStatus == 304 && result.etag == "") + result.etag = request.expectedETag; + act.progress(result.bodySize, result.bodySize); done = true; callback(std::move(result)); diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 2c50ea9df..787a320d6 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -34,7 +34,7 @@ Settings::Settings() , nixLibexecDir(canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR))) , nixBinDir(canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR))) , nixManDir(canonPath(NIX_MAN_DIR)) - , nixDaemonSocketFile(canonPath(nixStateDir + DEFAULT_SOCKET_PATH)) + , nixDaemonSocketFile(canonPath(getEnv("NIX_DAEMON_SOCKET_PATH", nixStateDir + DEFAULT_SOCKET_PATH))) { buildUsersGroup = getuid() == 0 ? "nixbld" : ""; lockCPU = getEnv("NIX_AFFINITY_HACK", "1") == "1"; diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index f5092151a..31a6cc46f 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -1029,6 +1029,40 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source, throw Error("size mismatch importing path '%s';\n wanted: %s\n got: %s", info.path, info.narSize, hashResult.second); + if (!info.ca.empty()) { + auto ca = info.ca; + if (hasPrefix(ca, "fixed:")) { + bool recursive = ca.compare(6, 2, "r:") == 0; + Hash expectedHash(std::string(ca, recursive ? 8 : 6)); + if (info.references.empty()) { + auto actualFoHash = hashCAPath( + recursive, + expectedHash.type, + realPath + ); + if (ca != actualFoHash) { + throw Error("ca hash mismatch importing path '%s';\n specified: %s\n got: %s", + info.path, + ca, + actualFoHash); + } + } else { + throw Error("path '%s' claims to be content-addressed, but has references. This isn’t allowed", + info.path); + } + + } else if (hasPrefix(ca, "text:")) { + Hash textHash(std::string(ca, 5)); + auto actualTextHash = hashString(htSHA256, readFile(realPath)); + if (textHash != actualTextHash) { + throw Error("ca hash mismatch importing path '%s';\n specified: %s\n got: %s", + info.path, + textHash.to_string(Base32, true), + actualTextHash.to_string(Base32, true)); + } + } + } + autoGC(); canonicalisePathMetaData(realPath, -1); @@ -1450,4 +1484,20 @@ void LocalStore::createUser(const std::string & userName, uid_t userId) } +std::string LocalStore::hashCAPath( + bool recursive, + const HashType & hashType, + const Path & path +) +{ + HashSink caSink(hashType); + if (recursive) { + dumpPath(path, caSink); + } else { + readFile(path, caSink); + } + auto hash = caSink.finish().first; + return makeFixedOutputCA(recursive, hash); +} + } diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index 379a06af8..0185d0ebf 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -295,8 +295,14 @@ private: void createUser(const std::string & userName, uid_t userId) override; - friend class DerivationGoal; - friend class SubstitutionGoal; + std::string hashCAPath( + bool recursive, + const HashType & hashType, + const Path & path + ); + + friend struct DerivationGoal; + friend struct SubstitutionGoal; }; diff --git a/src/libstore/local.mk b/src/libstore/local.mk index d690fea28..e9a9e804b 100644 --- a/src/libstore/local.mk +++ b/src/libstore/local.mk @@ -9,7 +9,7 @@ libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc) libstore_LIBS = libutil libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread -ifneq ($(OS), FreeBSD) +ifdef HOST_LINUX libstore_LDFLAGS += -ldl endif @@ -21,7 +21,7 @@ ifeq ($(ENABLE_S3), 1) libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core endif -ifeq ($(OS), SunOS) +ifdef HOST_SOLARIS libstore_LDFLAGS += -lsocket endif diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index cd547a964..1e5e264fc 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -56,6 +56,10 @@ class AwsLogger : public Aws::Utils::Logging::FormattedLogSystem { debug("AWS: %s", chomp(statement)); } + +#if !(AWS_VERSION_MAJOR <= 1 && AWS_VERSION_MINOR <= 7 && AWS_VERSION_PATCH <= 115) + void Flush() override {} +#endif }; static void initAWS() diff --git a/src/libstore/sandbox-defaults.sb b/src/libstore/sandbox-defaults.sb index 351037822..2bb1ea130 100644 --- a/src/libstore/sandbox-defaults.sb +++ b/src/libstore/sandbox-defaults.sb @@ -32,7 +32,9 @@ (literal "/tmp") (subpath TMPDIR)) ; Some packages like to read the system version. -(allow file-read* (literal "/System/Library/CoreServices/SystemVersion.plist")) +(allow file-read* + (literal "/System/Library/CoreServices/SystemVersion.plist") + (literal "/System/Library/CoreServices/SystemVersionCompat.plist")) ; Without this line clang cannot write to /dev/null, breaking some configure tests. (allow file-read-metadata (literal "/dev")) diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc index a061d64f3..f1dd4a659 100644 --- a/src/libstore/sqlite.cc +++ b/src/libstore/sqlite.cc @@ -1,4 +1,5 @@ #include "sqlite.hh" +#include "globals.hh" #include "util.hh" #include @@ -27,8 +28,12 @@ namespace nix { SQLite::SQLite(const Path & path) { + // useSQLiteWAL also indicates what virtual file system we need. Using + // `unix-dotfile` is needed on NFS file systems and on Windows' Subsystem + // for Linux (WSL) where useSQLiteWAL should be false by default. + const char *vfs = settings.useSQLiteWAL ? 0 : "unix-dotfile"; if (sqlite3_open_v2(path.c_str(), &db, - SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0) != SQLITE_OK) + SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, vfs) != SQLITE_OK) throw Error(format("cannot open SQLite database '%s'") % path); } diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc index 0dd84e320..c168e8ac5 100644 --- a/src/libutil/compression.cc +++ b/src/libutil/compression.cc @@ -5,8 +5,10 @@ #include #include +#include #include #include +#include #include #include @@ -198,6 +200,78 @@ struct BrotliDecompressionSink : ChunkedCompressionSink } }; +struct ZstdDecompressionSink : CompressionSink +{ + Sink & nextSink; + ZSTD_DStream *strm; + + std::vector inbuf; + size_t outbuf_size = ZSTD_DStreamOutSize(); + uint8_t *outbuf = new uint8_t[outbuf_size]; + + ZstdDecompressionSink(Sink & nextSink) : nextSink(nextSink) + { + strm = ZSTD_createDStream(); + if (!strm) + throw CompressionError("unable to initialise zstd decoder"); + + ZSTD_initDStream(strm); + } + + ~ZstdDecompressionSink() + { + delete[] outbuf; + ZSTD_freeDStream(strm); + } + + void finish() override + { + // this call doesn't make any sense, but it's here for consistency with the other compression sinks + // CompressionSink inherits from BufferedSink, but none of the subclasses appear to ever make use of the buffer + flush(); + + // if we still have undecoded data in the input buffer, we can't signal EOF to libzstd + // if we don't, then we're done here anyway + if (inbuf.size()) + throw CompressionError("received unexpected EOF while decompressing zstd file"); + + nextSink(nullptr, 0); + } + + void write(const unsigned char * data, size_t len) override + { + inbuf.insert(inbuf.end(), data, data + len); + + ZSTD_inBuffer in = { + .src = inbuf.data(), + .size = inbuf.size(), + .pos = 0 + }; + + ZSTD_outBuffer out = { + .dst = outbuf, + .size = outbuf_size, + .pos = 0 + }; + + while (in.pos < in.size) { + out.pos = 0; + + size_t ret = ZSTD_decompressStream(strm, &out, &in); + if (ZSTD_isError(ret)) + throw CompressionError("error %s while decompressing zstd file", ZSTD_getErrorName(ret)); + + if (out.pos) + nextSink(outbuf, out.pos); + else + break; + } + + // drop consumed input + inbuf.erase(inbuf.begin(), inbuf.begin() + in.pos); + } +}; + ref decompress(const std::string & method, const std::string & in) { StringSink ssink; @@ -217,6 +291,8 @@ ref makeDecompressionSink(const std::string & method, Sink & ne return make_ref(nextSink); else if (method == "br") return make_ref(nextSink); + else if (method == "zstd") + return make_ref(nextSink); else throw UnknownCompressionMethod("unknown compression method '%s'", method); } diff --git a/src/libutil/json.cc b/src/libutil/json.cc index 0a6fb65f0..f80726d26 100644 --- a/src/libutil/json.cc +++ b/src/libutil/json.cc @@ -1,6 +1,7 @@ #include "json.hh" #include +#include #include namespace nix { diff --git a/src/libutil/local.mk b/src/libutil/local.mk index e41a67d1f..ebcfddad2 100644 --- a/src/libutil/local.mk +++ b/src/libutil/local.mk @@ -6,4 +6,4 @@ libutil_DIR := $(d) libutil_SOURCES := $(wildcard $(d)/*.cc) -libutil_LDFLAGS = $(LIBLZMA_LIBS) -lbz2 -pthread $(OPENSSL_LIBS) $(LIBBROTLI_LIBS) $(BOOST_LDFLAGS) -lboost_context +libutil_LDFLAGS = $(LIBLZMA_LIBS) -lbz2 -pthread $(OPENSSL_LIBS) $(LIBBROTLI_LIBS) $(LIBZSTD_LIBS) $(BOOST_LDFLAGS) -lboost_context diff --git a/src/libutil/util.cc b/src/libutil/util.cc index ad8cc1894..b38eac9c6 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -753,6 +753,7 @@ void AutoCloseFD::close() if (::close(fd) == -1) /* This should never happen. */ throw SysError(format("closing file descriptor %1%") % fd); + fd = -1; } } @@ -770,6 +771,12 @@ int AutoCloseFD::release() return oldFD; } +void Pipe::close() +{ + readSide.close(); + writeSide.close(); +} + void Pipe::create() { @@ -1080,7 +1087,7 @@ void runProgram2(const RunOptions & options) throw SysError("executing '%1%'", options.program); }, processOptions); - out.writeSide = -1; + out.writeSide.close(); std::thread writerThread; @@ -1093,7 +1100,7 @@ void runProgram2(const RunOptions & options) if (source) { - in.readSide = -1; + in.readSide.close(); writerThread = std::thread([&]() { try { std::vector buf(8 * 1024); @@ -1110,7 +1117,7 @@ void runProgram2(const RunOptions & options) } catch (...) { promise.set_exception(std::current_exception()); } - in.writeSide = -1; + in.writeSide.close(); }); } diff --git a/src/libutil/util.hh b/src/libutil/util.hh index f057fdb2c..e59af288b 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -190,7 +190,6 @@ public: class AutoCloseFD { int fd; - void close(); public: AutoCloseFD(); AutoCloseFD(int fd); @@ -202,6 +201,7 @@ public: int get() const; explicit operator bool() const; int release(); + void close(); }; @@ -210,6 +210,7 @@ class Pipe public: AutoCloseFD readSide, writeSide; void create(); + void close(); }; diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index 33ad28704..0bb963121 100755 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -106,7 +106,7 @@ static void _main(int argc, char * * argv) // Heuristic to see if we're invoked as a shebang script, namely, // if we have at least one argument, it's the name of an // executable file, and it starts with "#!". - if (runEnv && argc > 1 && !std::regex_search(argv[1], std::regex("nix-shell"))) { + if (runEnv && argc > 1) { script = argv[1]; try { auto lines = tokenizeString(readFile(script), "\n"); @@ -425,6 +425,7 @@ static void _main(int argc, char * * argv) "unset NIX_ENFORCE_PURITY; " "shopt -u nullglob; " "unset TZ; %6%" + "shopt -s execfail;" "%7%", (Path) tmpDir, (pure ? "" : "p=$PATH; "), diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc index 7a5cb9a37..87ad4e949 100644 --- a/src/nix-daemon/nix-daemon.cc +++ b/src/nix-daemon/nix-daemon.cc @@ -901,7 +901,11 @@ static PeerInfo getPeerInfo(int remote) #if defined(SO_PEERCRED) +#if defined(__OpenBSD__) + struct sockpeercred cred; +#else ucred cred; +#endif socklen_t credLen = sizeof(cred); if (getsockopt(remote, SOL_SOCKET, SO_PEERCRED, &cred, &credLen) == -1) throw SysError("getting peer credentials"); diff --git a/src/nix-env/user-env.cc b/src/nix-env/user-env.cc index 7b9a88281..97dfc7a29 100644 --- a/src/nix-env/user-env.cc +++ b/src/nix-env/user-env.cc @@ -50,10 +50,12 @@ bool createUserEnv(EvalState & state, DrvInfos & elems, output paths, and optionally the derivation path, as well as the meta attributes. */ Path drvPath = keepDerivations ? i.queryDrvPath() : ""; + DrvInfo::Outputs outputs = i.queryOutputs(true); + StringSet metaNames = i.queryMetaNames(); Value & v(*state.allocValue()); manifest.listElems()[n++] = &v; - state.mkAttrs(v, 16); + state.mkAttrs(v, 7 + outputs.size()); mkString(*state.allocAttr(v, state.sType), "derivation"); mkString(*state.allocAttr(v, state.sName), i.queryName()); @@ -65,7 +67,6 @@ bool createUserEnv(EvalState & state, DrvInfos & elems, mkString(*state.allocAttr(v, state.sDrvPath), i.queryDrvPath()); // Copy each output meant for installation. - DrvInfo::Outputs outputs = i.queryOutputs(true); Value & vOutputs = *state.allocAttr(v, state.sOutputs); state.mkList(vOutputs, outputs.size()); unsigned int m = 0; @@ -85,8 +86,7 @@ bool createUserEnv(EvalState & state, DrvInfos & elems, // Copy the meta attributes. Value & vMeta = *state.allocAttr(v, state.sMeta); - state.mkAttrs(vMeta, 16); - StringSet metaNames = i.queryMetaNames(); + state.mkAttrs(vMeta, metaNames.size()); for (auto & j : metaNames) { Value * v = i.queryMeta(j); if (!v) continue; diff --git a/src/resolve-system-dependencies/local.mk b/src/resolve-system-dependencies/local.mk index f9db16268..dbc041b23 100644 --- a/src/resolve-system-dependencies/local.mk +++ b/src/resolve-system-dependencies/local.mk @@ -1,4 +1,4 @@ -ifeq ($(OS), Darwin) +ifdef HOST_DARWIN programs += resolve-system-dependencies endif diff --git a/tests/common.sh.in b/tests/common.sh.in index 15d7b1ef9..e8233bf72 100644 --- a/tests/common.sh.in +++ b/tests/common.sh.in @@ -1,6 +1,6 @@ set -e -export TEST_ROOT=$(realpath ${TMPDIR:-/tmp}/nix-test) +export TEST_ROOT=$(realpath ${TMPDIR:-/tmp}/nix-test)/${TEST_NAME:-default} export NIX_STORE_DIR if ! NIX_STORE_DIR=$(readlink -f $TEST_ROOT/store 2> /dev/null); then # Maybe the build directory is symlinked. @@ -11,6 +11,7 @@ export NIX_LOCALSTATE_DIR=$TEST_ROOT/var export NIX_LOG_DIR=$TEST_ROOT/var/log/nix export NIX_STATE_DIR=$TEST_ROOT/var/nix export NIX_CONF_DIR=$TEST_ROOT/etc +export NIX_DAEMON_SOCKET_PATH=$TEST_ROOT/daemon-socket export _NIX_TEST_SHARED=$TEST_ROOT/shared if [[ -n $NIX_STORE ]]; then export _NIX_TEST_NO_SANDBOX=1 @@ -72,7 +73,7 @@ startDaemon() { rm -f $NIX_STATE_DIR/daemon-socket/socket nix-daemon & for ((i = 0; i < 30; i++)); do - if [ -e $NIX_STATE_DIR/daemon-socket/socket ]; then break; fi + if [ -e $NIX_DAEMON_SOCKET_PATH ]; then break; fi sleep 1 done pidDaemon=$! diff --git a/tests/local.mk b/tests/local.mk index 187f96ea2..83e93b123 100644 --- a/tests/local.mk +++ b/tests/local.mk @@ -12,6 +12,7 @@ nix_tests = \ timeout.sh secure-drv-outputs.sh nix-channel.sh \ multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \ binary-cache.sh nix-profile.sh repair.sh dump-db.sh case-hack.sh \ + substitute-with-invalid-ca.sh \ check-reqs.sh pass-as-file.sh tarball.sh restricted.sh \ placeholders.sh nix-shell.sh \ linux-sandbox.sh \ @@ -39,4 +40,4 @@ tests-environment = NIX_REMOTE= $(bash) -e clean-files += $(d)/common.sh -installcheck: $(d)/common.sh $(d)/plugins/libplugintest.$(SO_EXT) +test-deps += tests/common.sh tests/plugins/libplugintest.$(SO_EXT) diff --git a/tests/nix-copy-closure.nix b/tests/nix-copy-closure.nix index 0dc147fb3..662fae60b 100644 --- a/tests/nix-copy-closure.nix +++ b/tests/nix-copy-closure.nix @@ -2,7 +2,9 @@ { nixpkgs, system, nix }: -with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; }; +with import (nixpkgs + "/nixos/lib/testing-python.nix") { + inherit system; +}; makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; in { @@ -24,41 +26,46 @@ makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; in { }; }; - testScript = { nodes }: - '' - startAll; + testScript = { nodes }: '' + # fmt: off + import subprocess - # Create an SSH key on the client. - my $key = `${pkgs.openssh}/bin/ssh-keygen -t ed25519 -f key -N ""`; - $client->succeed("mkdir -m 700 /root/.ssh"); - $client->copyFileFromHost("key", "/root/.ssh/id_ed25519"); - $client->succeed("chmod 600 /root/.ssh/id_ed25519"); + start_all() - # Install the SSH key on the server. - $server->succeed("mkdir -m 700 /root/.ssh"); - $server->copyFileFromHost("key.pub", "/root/.ssh/authorized_keys"); - $server->waitForUnit("sshd"); - $client->waitForUnit("network.target"); - $client->succeed("ssh -o StrictHostKeyChecking=no " . $server->name() . " 'echo hello world'"); + # Create an SSH key on the client. + subprocess.run([ + "${pkgs.openssh}/bin/ssh-keygen", "-t", "ed25519", "-f", "key", "-N", "" + ], capture_output=True, check=True) - # Copy the closure of package A from the client to the server. - $server->fail("nix-store --check-validity ${pkgA}"); - $client->succeed("nix-copy-closure --to server --gzip ${pkgA} >&2"); - $server->succeed("nix-store --check-validity ${pkgA}"); + client.succeed("mkdir -m 700 /root/.ssh") + client.copy_from_host("key", "/root/.ssh/id_ed25519") + client.succeed("chmod 600 /root/.ssh/id_ed25519") - # Copy the closure of package B from the server to the client. - $client->fail("nix-store --check-validity ${pkgB}"); - $client->succeed("nix-copy-closure --from server --gzip ${pkgB} >&2"); - $client->succeed("nix-store --check-validity ${pkgB}"); + # Install the SSH key on the server. + server.succeed("mkdir -m 700 /root/.ssh") + server.copy_from_host("key.pub", "/root/.ssh/authorized_keys") + server.wait_for_unit("sshd") + client.wait_for_unit("network.target") + client.succeed(f"ssh -o StrictHostKeyChecking=no {server.name} 'echo hello world'") - # Copy the closure of package C via the SSH substituter. - $client->fail("nix-store -r ${pkgC}"); - # FIXME - #$client->succeed( - # "nix-store --option use-ssh-substituter true" - # . " --option ssh-substituter-hosts root\@server" - # . " -r ${pkgC} >&2"); - #$client->succeed("nix-store --check-validity ${pkgC}"); - ''; + # Copy the closure of package A from the client to the server. + server.fail("nix-store --check-validity ${pkgA}") + client.succeed("nix-copy-closure --to server --gzip ${pkgA} >&2") + server.succeed("nix-store --check-validity ${pkgA}") + # Copy the closure of package B from the server to the client. + client.fail("nix-store --check-validity ${pkgB}") + client.succeed("nix-copy-closure --from server --gzip ${pkgB} >&2") + client.succeed("nix-store --check-validity ${pkgB}") + + # Copy the closure of package C via the SSH substituter. + client.fail("nix-store -r ${pkgC}") + # FIXME + # client.succeed( + # "nix-store --option use-ssh-substituter true" + # " --option ssh-substituter-hosts root\@server" + # " -r ${pkgC} >&2" + # ) + # client.succeed("nix-store --check-validity ${pkgC}") + ''; }) diff --git a/tests/remote-builds.nix b/tests/remote-builds.nix index b867f13b4..2cf51a1df 100644 --- a/tests/remote-builds.nix +++ b/tests/remote-builds.nix @@ -2,7 +2,9 @@ { nixpkgs, system, nix }: -with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; }; +with import (nixpkgs + "/nixos/lib/testing-python.nix") { + inherit system; +}; makeTest ( @@ -65,44 +67,46 @@ in }; }; - testScript = { nodes }: - '' - startAll; + testScript = { nodes }: '' + # fmt: off + import subprocess - # Create an SSH key on the client. - my $key = `${pkgs.openssh}/bin/ssh-keygen -t ed25519 -f key -N ""`; - $client->succeed("mkdir -p -m 700 /root/.ssh"); - $client->copyFileFromHost("key", "/root/.ssh/id_ed25519"); - $client->succeed("chmod 600 /root/.ssh/id_ed25519"); + start_all() - # Install the SSH key on the builders. - $client->waitForUnit("network.target"); - foreach my $builder ($builder1, $builder2) { - $builder->succeed("mkdir -p -m 700 /root/.ssh"); - $builder->copyFileFromHost("key.pub", "/root/.ssh/authorized_keys"); - $builder->waitForUnit("sshd"); - $client->succeed("ssh -o StrictHostKeyChecking=no " . $builder->name() . " 'echo hello world'"); - } + # Create an SSH key on the client. + subprocess.run([ + "${pkgs.openssh}/bin/ssh-keygen", "-t", "ed25519", "-f", "key", "-N", "" + ], capture_output=True, check=True) + client.succeed("mkdir -p -m 700 /root/.ssh") + client.copy_from_host("key", "/root/.ssh/id_ed25519") + client.succeed("chmod 600 /root/.ssh/id_ed25519") - # Perform a build and check that it was performed on the builder. - my $out = $client->succeed( - "nix-build ${expr nodes.client.config 1} 2> build-output", - "grep -q Hello build-output" - ); - $builder1->succeed("test -e $out"); + # Install the SSH key on the builders. + client.wait_for_unit("network.target") + for builder in [builder1, builder2]: + builder.succeed("mkdir -p -m 700 /root/.ssh") + builder.copy_from_host("key.pub", "/root/.ssh/authorized_keys") + builder.wait_for_unit("sshd") + client.succeed(f"ssh -o StrictHostKeyChecking=no {builder.name} 'echo hello world'") - # And a parallel build. - my ($out1, $out2) = split /\s/, - $client->succeed('nix-store -r $(nix-instantiate ${expr nodes.client.config 2})\!out $(nix-instantiate ${expr nodes.client.config 3})\!out'); - $builder1->succeed("test -e $out1 -o -e $out2"); - $builder2->succeed("test -e $out1 -o -e $out2"); + # Perform a build and check that it was performed on the builder. + out = client.succeed( + "nix-build ${expr nodes.client.config 1} 2> build-output", + "grep -q Hello build-output" + ) + builder1.succeed(f"test -e {out}") - # And a failing build. - $client->fail("nix-build ${expr nodes.client.config 5}"); + # And a parallel build. + paths = client.succeed(r'nix-store -r $(nix-instantiate ${expr nodes.client.config 2})\!out $(nix-instantiate ${expr nodes.client.config 3})\!out') + out1, out2 = paths.split() + builder1.succeed(f"test -e {out1} -o -e {out2}") + builder2.succeed(f"test -e {out1} -o -e {out2}") - # Test whether the build hook automatically skips unavailable builders. - $builder1->block; - $client->succeed("nix-build ${expr nodes.client.config 4}"); - ''; + # And a failing build. + client.fail("nix-build ${expr nodes.client.config 5}") + # Test whether the build hook automatically skips unavailable builders. + builder1.block() + client.succeed("nix-build ${expr nodes.client.config 4}") + ''; }) diff --git a/tests/setuid.nix b/tests/setuid.nix index 77e83c8d6..b3b8fe9f1 100644 --- a/tests/setuid.nix +++ b/tests/setuid.nix @@ -2,7 +2,7 @@ { nixpkgs, system, nix }: -with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; }; +with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }; makeTest { @@ -15,94 +15,109 @@ makeTest { virtualisation.pathsInNixDB = [ pkgs.stdenv pkgs.pkgsi686Linux.stdenv ]; }; - testScript = { nodes }: - '' - startAll; + testScript = { nodes }: '' + # fmt: off + start_all() - # Copying to /tmp should succeed. - $machine->succeed('nix-build --no-sandbox -E \'(with import {}; runCommand "foo" {} " - mkdir -p $out - cp ${pkgs.coreutils}/bin/id /tmp/id - ")\' '); + # Copying to /tmp should succeed. + machine.succeed(r""" + nix-build --no-sandbox -E '(with import {}; runCommand "foo" {} " + mkdir -p $out + cp ${pkgs.coreutils}/bin/id /tmp/id + ")' + """.strip()) - $machine->succeed('[[ $(stat -c %a /tmp/id) = 555 ]]'); + machine.succeed('[[ $(stat -c %a /tmp/id) = 555 ]]') - $machine->succeed("rm /tmp/id"); + machine.succeed("rm /tmp/id") - # Creating a setuid binary should fail. - $machine->fail('nix-build --no-sandbox -E \'(with import {}; runCommand "foo" {} " - mkdir -p $out - cp ${pkgs.coreutils}/bin/id /tmp/id - chmod 4755 /tmp/id - ")\' '); + # Creating a setuid binary should fail. + machine.fail(r""" + nix-build --no-sandbox -E '(with import {}; runCommand "foo" {} " + mkdir -p $out + cp ${pkgs.coreutils}/bin/id /tmp/id + chmod 4755 /tmp/id + ")' + """.strip()) - $machine->succeed('[[ $(stat -c %a /tmp/id) = 555 ]]'); + machine.succeed('[[ $(stat -c %a /tmp/id) = 555 ]]') - $machine->succeed("rm /tmp/id"); + machine.succeed("rm /tmp/id") - # Creating a setgid binary should fail. - $machine->fail('nix-build --no-sandbox -E \'(with import {}; runCommand "foo" {} " - mkdir -p $out - cp ${pkgs.coreutils}/bin/id /tmp/id - chmod 2755 /tmp/id - ")\' '); + # Creating a setgid binary should fail. + machine.fail(r""" + nix-build --no-sandbox -E '(with import {}; runCommand "foo" {} " + mkdir -p $out + cp ${pkgs.coreutils}/bin/id /tmp/id + chmod 2755 /tmp/id + ")' + """.strip()) - $machine->succeed('[[ $(stat -c %a /tmp/id) = 555 ]]'); + machine.succeed('[[ $(stat -c %a /tmp/id) = 555 ]]') - $machine->succeed("rm /tmp/id"); + machine.succeed("rm /tmp/id") - # The checks should also work on 32-bit binaries. - $machine->fail('nix-build --no-sandbox -E \'(with import { system = "i686-linux"; }; runCommand "foo" {} " - mkdir -p $out - cp ${pkgs.coreutils}/bin/id /tmp/id - chmod 2755 /tmp/id - ")\' '); + # The checks should also work on 32-bit binaries. + machine.fail(r""" + nix-build --no-sandbox -E '(with import { system = "i686-linux"; }; runCommand "foo" {} " + mkdir -p $out + cp ${pkgs.coreutils}/bin/id /tmp/id + chmod 2755 /tmp/id + ")' + """.strip()) - $machine->succeed('[[ $(stat -c %a /tmp/id) = 555 ]]'); + machine.succeed('[[ $(stat -c %a /tmp/id) = 555 ]]') - $machine->succeed("rm /tmp/id"); + machine.succeed("rm /tmp/id") - # The tests above use fchmodat(). Test chmod() as well. - $machine->succeed('nix-build --no-sandbox -E \'(with import {}; runCommand "foo" { buildInputs = [ perl ]; } " - mkdir -p $out - cp ${pkgs.coreutils}/bin/id /tmp/id - perl -e \"chmod 0666, qw(/tmp/id) or die\" - ")\' '); + # The tests above use fchmodat(). Test chmod() as well. + machine.succeed(r""" + nix-build --no-sandbox -E '(with import {}; runCommand "foo" { buildInputs = [ perl ]; } " + mkdir -p $out + cp ${pkgs.coreutils}/bin/id /tmp/id + perl -e \"chmod 0666, qw(/tmp/id) or die\" + ")' + """.strip()) - $machine->succeed('[[ $(stat -c %a /tmp/id) = 666 ]]'); + machine.succeed('[[ $(stat -c %a /tmp/id) = 666 ]]') - $machine->succeed("rm /tmp/id"); + machine.succeed("rm /tmp/id") - $machine->fail('nix-build --no-sandbox -E \'(with import {}; runCommand "foo" { buildInputs = [ perl ]; } " - mkdir -p $out - cp ${pkgs.coreutils}/bin/id /tmp/id - perl -e \"chmod 04755, qw(/tmp/id) or die\" - ")\' '); + machine.fail(r""" + nix-build --no-sandbox -E '(with import {}; runCommand "foo" { buildInputs = [ perl ]; } " + mkdir -p $out + cp ${pkgs.coreutils}/bin/id /tmp/id + perl -e \"chmod 04755, qw(/tmp/id) or die\" + ")' + """.strip()) - $machine->succeed('[[ $(stat -c %a /tmp/id) = 555 ]]'); + machine.succeed('[[ $(stat -c %a /tmp/id) = 555 ]]') - $machine->succeed("rm /tmp/id"); + machine.succeed("rm /tmp/id") - # And test fchmod(). - $machine->succeed('nix-build --no-sandbox -E \'(with import {}; runCommand "foo" { buildInputs = [ perl ]; } " - mkdir -p $out - cp ${pkgs.coreutils}/bin/id /tmp/id - perl -e \"my \\\$x; open \\\$x, qw(/tmp/id); chmod 01750, \\\$x or die\" - ")\' '); + # And test fchmod(). + machine.succeed(r""" + nix-build --no-sandbox -E '(with import {}; runCommand "foo" { buildInputs = [ perl ]; } " + mkdir -p $out + cp ${pkgs.coreutils}/bin/id /tmp/id + perl -e \"my \\\$x; open \\\$x, qw(/tmp/id); chmod 01750, \\\$x or die\" + ")' + """.strip()) - $machine->succeed('[[ $(stat -c %a /tmp/id) = 1750 ]]'); + machine.succeed('[[ $(stat -c %a /tmp/id) = 1750 ]]') - $machine->succeed("rm /tmp/id"); + machine.succeed("rm /tmp/id") - $machine->fail('nix-build --no-sandbox -E \'(with import {}; runCommand "foo" { buildInputs = [ perl ]; } " - mkdir -p $out - cp ${pkgs.coreutils}/bin/id /tmp/id - perl -e \"my \\\$x; open \\\$x, qw(/tmp/id); chmod 04777, \\\$x or die\" - ")\' '); + machine.fail(r""" + nix-build --no-sandbox -E '(with import {}; runCommand "foo" { buildInputs = [ perl ]; } " + mkdir -p $out + cp ${pkgs.coreutils}/bin/id /tmp/id + perl -e \"my \\\$x; open \\\$x, qw(/tmp/id); chmod 04777, \\\$x or die\" + ")' + """.strip()) - $machine->succeed('[[ $(stat -c %a /tmp/id) = 555 ]]'); - - $machine->succeed("rm /tmp/id"); - ''; + machine.succeed('[[ $(stat -c %a /tmp/id) = 555 ]]') + machine.succeed("rm /tmp/id") + ''; } diff --git a/tests/substitute-with-invalid-ca.sh b/tests/substitute-with-invalid-ca.sh new file mode 100644 index 000000000..4d0b01e0f --- /dev/null +++ b/tests/substitute-with-invalid-ca.sh @@ -0,0 +1,38 @@ +source common.sh + +BINARY_CACHE=file://$cacheDir + +getHash() { + basename "$1" | cut -d '-' -f 1 +} +getRemoteNarInfo () { + echo "$cacheDir/$(getHash "$1").narinfo" +} + +cat < $TEST_HOME/good.txt +I’m a good path +EOF + +cat < $TEST_HOME/bad.txt +I’m a bad path +EOF + +good=$(nix-store --add $TEST_HOME/good.txt) +bad=$(nix-store --add $TEST_HOME/bad.txt) +nix copy --to "$BINARY_CACHE" "$good" +nix copy --to "$BINARY_CACHE" "$bad" +nix-collect-garbage >/dev/null 2>&1 + +# Falsifying the narinfo file for '$good' +goodPathNarInfo=$(getRemoteNarInfo "$good") +badPathNarInfo=$(getRemoteNarInfo "$bad") +for fieldName in URL FileHash FileSize NarHash NarSize; do + sed -i "/^$fieldName/d" "$goodPathNarInfo" + grep -E "^$fieldName" "$badPathNarInfo" >> "$goodPathNarInfo" +done + +# Copying back '$good' from the binary cache. This should fail as it is +# corrupted +if nix copy --from "$BINARY_CACHE" "$good"; then + fail "Importing a path with a wrong CA field should fail" +fi diff --git a/tests/tarball.sh b/tests/tarball.sh index ba534c626..b2acb9eb9 100644 --- a/tests/tarball.sh +++ b/tests/tarball.sh @@ -11,7 +11,7 @@ cp dependencies.nix $tarroot/default.nix cp config.nix dependencies.builder*.sh $tarroot/ tarball=$TEST_ROOT/tarball.tar.xz -(cd $TEST_ROOT && tar c tarball) | xz > $tarball +(cd $TEST_ROOT && tar cf - tarball) | xz > $tarball nix-env -f file://$tarball -qa --out-path | grep -q dependencies