From be1fa7a3955f9db7f0d9649b0f370f4616951037 Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Mon, 13 Jan 2025 15:41:52 +0200 Subject: [PATCH 01/22] Whitespace fixes --- BUILD.md | 4 ++-- Makefile | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/BUILD.md b/BUILD.md index 167d969..bd1b377 100644 --- a/BUILD.md +++ b/BUILD.md @@ -487,9 +487,9 @@ Install the WiX UI extension. You may need to open a new terminal in order to us wix extension add --global WixToolset.UI.wixext/5.x.y ``` -> [!IMPORTANT] +> [!IMPORTANT] > To avoid compatibility issues, ensure the WiX UI extension version matches the version of the WiX Toolset. -> +> > Run `wix --version` to check the version of WiX Toolset you have installed and replace `5.x.y` with the full version number without the Git revision. ### If you want to sign binaries with Authenticode diff --git a/Makefile b/Makefile index 5a8ea40..a8a714c 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,7 @@ lint: ## Check the code for linting, formatting, and typing issues with ruff and .PHONY: fix fix: ## apply all the suggestions from ruff ruff check --fix - ruff format + ruff format .PHONY: test test: From 8568b4bb9de43758189f38eb5674d6dd02169e0c Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Mon, 13 Jan 2025 16:09:07 +0200 Subject: [PATCH 02/22] Move container-only build context to `dangerzone/container` Move container-only build context (currently just the entrypoint script) from `dangerzone/gvisor_wrapper` to `dangerzone/container_helpers`. Update the rest of the scripts to use this location as well. --- .github/workflows/build.yml | 2 +- .github/workflows/ci.yml | 8 ++++---- Dockerfile | 2 +- .../{gvisor_wrapper => container_helpers}/entrypoint.py | 0 dodo.py | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) rename dangerzone/{gvisor_wrapper => container_helpers}/entrypoint.py (100%) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 35f9597..a8b7ab7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -85,7 +85,7 @@ jobs: id: cache-container-image uses: actions/cache@v4 with: - key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/common.py', 'dangerzone/conversion/doc_to_pixels.py', 'dangerzone/conversion/pixels_to_pdf.py', 'poetry.lock', 'gvisor_wrapper/entrypoint.py') }} + key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py', 'poetry.lock') }} path: | share/container.tar.gz share/image-id.txt diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dc2bb27..306de24 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -59,7 +59,7 @@ jobs: id: cache-container-image uses: actions/cache@v4 with: - key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/common.py', 'dangerzone/conversion/doc_to_pixels.py', 'dangerzone/conversion/pixels_to_pdf.py', 'poetry.lock', 'gvisor_wrapper/entrypoint.py') }} + key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py', 'poetry.lock') }} path: |- share/container.tar.gz share/image-id.txt @@ -227,7 +227,7 @@ jobs: - name: Restore container cache uses: actions/cache/restore@v4 with: - key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/common.py', 'dangerzone/conversion/doc_to_pixels.py', 'dangerzone/conversion/pixels_to_pdf.py', 'poetry.lock', 'gvisor_wrapper/entrypoint.py') }} + key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py', 'poetry.lock') }} path: |- share/container.tar.gz share/image-id.txt @@ -334,7 +334,7 @@ jobs: - name: Restore container image uses: actions/cache/restore@v4 with: - key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/common.py', 'dangerzone/conversion/doc_to_pixels.py', 'dangerzone/conversion/pixels_to_pdf.py', 'poetry.lock', 'gvisor_wrapper/entrypoint.py') }} + key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py', 'poetry.lock') }} path: |- share/container.tar.gz share/image-id.txt @@ -429,7 +429,7 @@ jobs: - name: Restore container image uses: actions/cache/restore@v4 with: - key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/common.py', 'dangerzone/conversion/doc_to_pixels.py', 'dangerzone/conversion/pixels_to_pdf.py', 'poetry.lock', 'gvisor_wrapper/entrypoint.py') }} + key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py', 'poetry.lock') }} path: |- share/container.tar.gz share/image-id.txt diff --git a/Dockerfile b/Dockerfile index c89c5a4..330933d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -97,6 +97,6 @@ COPY --from=dangerzone-image / /home/dangerzone/dangerzone-image/rootfs # store the state of its containers. RUN mkdir /home/dangerzone/.containers -COPY gvisor_wrapper/entrypoint.py / +COPY container_helpers/entrypoint.py / ENTRYPOINT ["/entrypoint.py"] diff --git a/dangerzone/gvisor_wrapper/entrypoint.py b/dangerzone/container_helpers/entrypoint.py similarity index 100% rename from dangerzone/gvisor_wrapper/entrypoint.py rename to dangerzone/container_helpers/entrypoint.py diff --git a/dodo.py b/dodo.py index 2dd3d21..54a85af 100644 --- a/dodo.py +++ b/dodo.py @@ -65,7 +65,7 @@ IMAGE_DEPS = [ "Dockerfile", "poetry.lock", *list_files("dangerzone/conversion"), - "dangerzone/gvisor_wrapper/entrypoint.py", + *list_files("dangerzone/container_helpers"), "install/common/build-image.py", ] IMAGE_TARGETS = ["share/container.tar.gz", "share/image-id.txt"] From e29837cb436e428a633766a42b60e947907a9441 Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Mon, 13 Jan 2025 16:45:02 +0200 Subject: [PATCH 03/22] Copy gVisor public key and a helper script in container helpers Download and copy the following artifacts that will be used for building a Debian-based Dangerzone container image in the subsequent commits: * The APT key for the gVisor repo [1] * A helper script for building reproducible Debian images [2] [1] https://gvisor.dev/archive.key [2] https://github.com/reproducible-containers/repro-sources-list.sh/blob/d15cf12b26395b857b24fba223b108aff1c91b26/repro-sources-list.sh --- THIRD_PARTY_NOTICE | 14 +++ dangerzone/container_helpers/gvisor.key | 29 +++++ .../container_helpers/repro-sources-list.sh | 103 ++++++++++++++++++ 3 files changed, 146 insertions(+) create mode 100644 THIRD_PARTY_NOTICE create mode 100644 dangerzone/container_helpers/gvisor.key create mode 100755 dangerzone/container_helpers/repro-sources-list.sh diff --git a/THIRD_PARTY_NOTICE b/THIRD_PARTY_NOTICE new file mode 100644 index 0000000..0de1b22 --- /dev/null +++ b/THIRD_PARTY_NOTICE @@ -0,0 +1,14 @@ +This project includes third-party components as follows: + +1. gVisor APT Key + - URL: https://gvisor.dev/archive.key + - Last updated: 2025-01-21 + - Description: This is the public key used for verifying packages from the gVisor repository. + +2. Reproducible Containers Helper Script + - URL: https://github.com/reproducible-containers/repro-sources-list.sh/blob/d15cf12b26395b857b24fba223b108aff1c91b26/repro-sources-list.sh + - Last updated: 2025-01-21 + - Description: This script is used for building reproducible Debian images. + +Please refer to the respective sources for licensing information and further details regarding the use of these components. + diff --git a/dangerzone/container_helpers/gvisor.key b/dangerzone/container_helpers/gvisor.key new file mode 100644 index 0000000..8946884 --- /dev/null +++ b/dangerzone/container_helpers/gvisor.key @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBF0meAYBEACcBYPOSBiKtid+qTQlbgKGPxUYt0cNZiQqWXylhYUT4PuNlNx5 +s+sBLFvNTpdTrXMmZ8NkekyjD1HardWvebvJT4u+Ho/9jUr4rP71cNwNtocz/w8G +DsUXSLgH8SDkq6xw0L+5eGc78BBg9cOeBeFBm3UPgxTBXS9Zevoi2w1lzSxkXvjx +cGzltzMZfPXERljgLzp9AAfhg/2ouqVQm37fY+P/NDzFMJ1XHPIIp9KJl/prBVud +jJJteFZ5sgL6MwjBQq2kw+q2Jb8Zfjl0BeXDgGMN5M5lGhX2wTfiMbfo7KWyzRnB +RpSP3BxlLqYeQUuLG5Yx8z3oA3uBkuKaFOKvXtiScxmGM/+Ri2YM3m66imwDhtmP +AKwTPI3Re4gWWOffglMVSv2sUAY32XZ74yXjY1VhK3bN3WFUPGrgQx4X7GP0A1Te +lzqkT3VSMXieImTASosK5L5Q8rryvgCeI9tQLn9EpYFCtU3LXvVgTreGNEEjMOnL +dR7yOU+Fs775stn6ucqmdYarx7CvKUrNAhgEeHMonLe1cjYScF7NfLO1GIrQKJR2 +DE0f+uJZ52inOkO8ufh3WVQJSYszuS3HCY7w5oj1aP38k/y9zZdZvVvwAWZaiqBQ +iwjVs6Kub76VVZZhRDf4iYs8k1Zh64nXdfQt250d8U5yMPF3wIJ+c1yhxwARAQAB +tCpUaGUgZ1Zpc29yIEF1dGhvcnMgPGd2aXNvci1ib3RAZ29vZ2xlLmNvbT6JAk4E +EwEKADgCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AWIQRvHfheOnHCSRjnJ9Vv +xtVU4yvZQwUCYO4TxQAKCRBvxtVU4yvZQ9UoEACLPV7CnEA2bjCPi0NCWB/Mo1WL +evqv7Wv7vmXzI1K9DrqOhxuamQW75SVXg1df0hTJWbKFmDAip6NEC2Rg5P+A8hHj +nW/VG+q4ZFT662jDhnXQiO9L7EZzjyqNF4yWYzzgnqEu/SmGkDLDYiUCcGBqS2oE +EQfk7RHJSLMJXAnNDH7OUDgrirSssg/dlQ5uAHA9Au80VvC5fsTKza8b3Aydw3SV +iB8/Yuikbl8wKbpSGiXtR4viElXjNips0+mBqaUk2xpqSBrsfN+FezcInVXaXFeq +xtpq2/3M3DYbqCRjqeyd9wNi92FHdOusNrK4MYe0pAYbGjc65BwH+F0T4oJ8ZSJV +lIt+FZ0MqM1T97XadybYFsJh8qvajQpZEPL+zzNncc4f1d80e7+lwIZV/al0FZWW +Zlp7TpbeO/uW+lHs5W14YKwaQVh1whapKXTrATipNOOSCw2hnfrT8V7Hy55QWaGZ +f4/kfy929EeCP16d/LqOClv0j0RBr6NhRBQ0l/BE/mXjJwIk6nKwi+Yi4ek1ARi6 +AlCMLn9AZF7aTGpvCiftzIrlyDfVZT5IX03TayxRHZ4b1Rj8eyJaHcjI49u83gkr +4LGX08lEawn9nxFSx4RCg2swGiYw5F436wwwAIozqJuDASeTa3QND3au5v0oYWnl +umDySUl5wPaAaALgzA== +=5/8T +-----END PGP PUBLIC KEY BLOCK----- diff --git a/dangerzone/container_helpers/repro-sources-list.sh b/dangerzone/container_helpers/repro-sources-list.sh new file mode 100755 index 0000000..ea97e47 --- /dev/null +++ b/dangerzone/container_helpers/repro-sources-list.sh @@ -0,0 +1,103 @@ +#!/bin/bash +# +# Copyright The repro-sources-list.sh Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ----------------------------------------------------------------------------- +# repro-sources-list.sh: +# configures /etc/apt/sources.list and similar files for installing packages from a snapshot. +# +# This script is expected to be executed inside Dockerfile. +# +# The following distributions are supported: +# - debian:11 (/etc/apt/sources.list) +# - debian:12 (/etc/apt/sources.list.d/debian.sources) +# - ubuntu:22.04 (/etc/apt/sources.list) +# - ubuntu:24.04 (/etc/apt/sources.listd/ubuntu.sources) +# - archlinux (/etc/pacman.d/mirrorlist) +# +# For the further information, see https://github.com/reproducible-containers/repro-sources-list.sh +# ----------------------------------------------------------------------------- + +set -eux -o pipefail + +. /etc/os-release + +: "${KEEP_CACHE:=1}" + +keep_apt_cache() { + rm -f /etc/apt/apt.conf.d/docker-clean + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache +} + +case "${ID}" in +"debian") + : "${SNAPSHOT_ARCHIVE_BASE:=http://snapshot.debian.org/archive/}" + : "${BACKPORTS:=}" + if [ -e /etc/apt/sources.list.d/debian.sources ]; then + : "${SOURCE_DATE_EPOCH:=$(stat --format=%Y /etc/apt/sources.list.d/debian.sources)}" + rm -f /etc/apt/sources.list.d/debian.sources + else + : "${SOURCE_DATE_EPOCH:=$(stat --format=%Y /etc/apt/sources.list)}" + fi + snapshot="$(printf "%(%Y%m%dT%H%M%SZ)T\n" "${SOURCE_DATE_EPOCH}")" + # TODO: use the new format for Debian >= 12 + echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}debian/${snapshot} ${VERSION_CODENAME} main" >/etc/apt/sources.list + echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}debian-security/${snapshot} ${VERSION_CODENAME}-security main" >>/etc/apt/sources.list + echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}debian/${snapshot} ${VERSION_CODENAME}-updates main" >>/etc/apt/sources.list + if [ "${BACKPORTS}" = 1 ]; then echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}debian/${snapshot} ${VERSION_CODENAME}-backports main" >>/etc/apt/sources.list; fi + if [ "${KEEP_CACHE}" = 1 ]; then keep_apt_cache; fi + ;; +"ubuntu") + : "${SNAPSHOT_ARCHIVE_BASE:=http://snapshot.ubuntu.com/}" + if [ -e /etc/apt/sources.list.d/ubuntu.sources ]; then + : "${SOURCE_DATE_EPOCH:=$(stat --format=%Y /etc/apt/sources.list.d/ubuntu.sources)}" + rm -f /etc/apt/sources.list.d/ubuntu.sources + else + : "${SOURCE_DATE_EPOCH:=$(stat --format=%Y /etc/apt/sources.list)}" + fi + snapshot="$(printf "%(%Y%m%dT%H%M%SZ)T\n" "${SOURCE_DATE_EPOCH}")" + # TODO: use the new format for Ubuntu >= 24.04 + echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME} main restricted" >/etc/apt/sources.list + echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-updates main restricted" >>/etc/apt/sources.list + echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME} universe" >>/etc/apt/sources.list + echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-updates universe" >>/etc/apt/sources.list + echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME} multiverse" >>/etc/apt/sources.list + echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-updates multiverse" >>/etc/apt/sources.list + echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-backports main restricted universe multiverse" >>/etc/apt/sources.list + echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-security main restricted" >>/etc/apt/sources.list + echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-security universe" >>/etc/apt/sources.list + echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-security multiverse" >>/etc/apt/sources.list + if [ "${KEEP_CACHE}" = 1 ]; then keep_apt_cache; fi + # http://snapshot.ubuntu.com is redirected to https, so we have to install ca-certificates + export DEBIAN_FRONTEND=noninteractive + apt-get -o Acquire::https::Verify-Peer=false update >&2 + apt-get -o Acquire::https::Verify-Peer=false install -y ca-certificates >&2 + ;; +"arch") + : "${SNAPSHOT_ARCHIVE_BASE:=http://archive.archlinux.org/}" + : "${SOURCE_DATE_EPOCH:=$(stat --format=%Y /var/log/pacman.log)}" + export SOURCE_DATE_EPOCH + # shellcheck disable=SC2016 + date -d "@${SOURCE_DATE_EPOCH}" "+Server = ${SNAPSHOT_ARCHIVE_BASE}repos/%Y/%m/%d/\$repo/os/\$arch" >/etc/pacman.d/mirrorlist + ;; +*) + echo >&2 "Unsupported distribution: ${ID}" + exit 1 + ;; +esac + +: "${WRITE_SOURCE_DATE_EPOCH:=/dev/null}" +echo "${SOURCE_DATE_EPOCH}" >"${WRITE_SOURCE_DATE_EPOCH}" +echo "SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH}" From 935396565c725186a6d52cb9de8c61b9e85cf5f9 Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Mon, 13 Jan 2025 16:29:33 +0200 Subject: [PATCH 04/22] Reuse the same rootfs for the inner and outer container Remove the need to copy the Dangerzone container image (used by the inner container) within a wrapper gVisor image (used by the outer container). Instead, use the root of the container filesystem for both containers. We can do this safely because we don't mount any secrets to the container, and because gVisor offers a read-only view of the underlying filesystem Fixes #1048 --- Dockerfile | 20 +++----------------- dangerzone/container_helpers/entrypoint.py | 6 +++--- docs/developer/gvisor.md | 6 ++++++ 3 files changed, 12 insertions(+), 20 deletions(-) diff --git a/Dockerfile b/Dockerfile index 330933d..2177583 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,7 +36,7 @@ RUN mkdir /libreoffice_ext && cd libreoffice_ext \ ########################################### # Dangerzone image -FROM alpine:latest AS dangerzone-image +FROM alpine:latest # Install dependencies RUN apk --no-cache -U upgrade && \ @@ -66,14 +66,6 @@ COPY conversion /opt/dangerzone/dangerzone/conversion RUN addgroup -g 1000 dangerzone && \ adduser -u 1000 -s /bin/true -G dangerzone -h /home/dangerzone -D dangerzone -########################################### -# gVisor wrapper image - -FROM alpine:latest - -RUN apk --no-cache -U upgrade && \ - apk --no-cache add python3 - RUN GVISOR_URL="https://storage.googleapis.com/gvisor/releases/release/latest/$(uname -m)"; \ wget "${GVISOR_URL}/runsc" "${GVISOR_URL}/runsc.sha512" && \ sha512sum -c runsc.sha512 && \ @@ -81,18 +73,12 @@ RUN GVISOR_URL="https://storage.googleapis.com/gvisor/releases/release/latest/$( chmod 555 runsc && \ mv runsc /usr/bin/ -# Add the unprivileged `dangerzone` user. -RUN addgroup dangerzone && \ - adduser -s /bin/true -G dangerzone -h /home/dangerzone -D dangerzone +RUN touch /config.json +RUN chown dangerzone:dangerzone /config.json # Switch to the dangerzone user for the rest of the script. USER dangerzone -# Copy the Dangerzone image, as created by the previous steps, into the home -# directory of the `dangerzone` user. -RUN mkdir /home/dangerzone/dangerzone-image -COPY --from=dangerzone-image / /home/dangerzone/dangerzone-image/rootfs - # Create a directory that will be used by gVisor as the place where it will # store the state of its containers. RUN mkdir /home/dangerzone/.containers diff --git a/dangerzone/container_helpers/entrypoint.py b/dangerzone/container_helpers/entrypoint.py index 8d09eb2..80a6455 100755 --- a/dangerzone/container_helpers/entrypoint.py +++ b/dangerzone/container_helpers/entrypoint.py @@ -56,7 +56,7 @@ oci_config: dict[str, typing.Any] = { {"type": "RLIMIT_NOFILE", "hard": 4096, "soft": 4096}, ], }, - "root": {"path": "rootfs", "readonly": True}, + "root": {"path": "/", "readonly": True}, "hostname": "dangerzone", "mounts": [ { @@ -133,7 +133,7 @@ if os.environ.get("RUNSC_DEBUG"): json.dump(oci_config, sys.stderr, indent=2, sort_keys=True) # json.dump doesn't print a trailing newline, so print one here: log("") -with open("/home/dangerzone/dangerzone-image/config.json", "w") as oci_config_out: +with open("/config.json", "w") as oci_config_out: json.dump(oci_config, oci_config_out, indent=2, sort_keys=True) # Run gVisor. @@ -150,7 +150,7 @@ if os.environ.get("RUNSC_DEBUG"): runsc_argv += ["--debug=true", "--alsologtostderr=true"] if os.environ.get("RUNSC_FLAGS"): runsc_argv += [x for x in shlex.split(os.environ.get("RUNSC_FLAGS", "")) if x] -runsc_argv += ["run", "--bundle=/home/dangerzone/dangerzone-image", "dangerzone"] +runsc_argv += ["run", "--bundle=/", "dangerzone"] log( "Running gVisor with command line: {}", " ".join(shlex.quote(s) for s in runsc_argv) ) diff --git a/docs/developer/gvisor.md b/docs/developer/gvisor.md index e85f84c..6898fcd 100644 --- a/docs/developer/gvisor.md +++ b/docs/developer/gvisor.md @@ -1,5 +1,11 @@ # gVisor integration +> [!NOTE] +> **Update on 2025-01-13:** There is no longer a copied container image under +> `/home/dangerzone/dangerzone-image/rootfs`. We now reuse the same container +> image both for the inner and outer container. See +> [#1048](https://github.com/freedomofpress/dangerzone/issues/1048). + Dangerzone has relied on the container runtime available in each supported operating system (Docker Desktop on Windows / macOS, Podman on Linux) to isolate the host from the sanitization process. The problem with this type of isolation From 033ce0986d6a71bb5eaf1e742572b3da52b0ab47 Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Mon, 13 Jan 2025 17:26:58 +0200 Subject: [PATCH 05/22] Switch base image to Debian Stable Switch base image from Alpine Linux to Debian Stable, in order to reduce our image footprint, improve our security posture, and build our container image reproducibly. Fixes #1046 Refs #1047 --- Dockerfile | 113 ++++++++++++------------- dangerzone/conversion/doc_to_pixels.py | 4 + 2 files changed, 57 insertions(+), 60 deletions(-) diff --git a/Dockerfile b/Dockerfile index 2177583..c5eaf7b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,78 +1,71 @@ -########################################### -# Build PyMuPDF +# NOTE: Updating the packages to their latest versions requires bumping the +# Dockerfile args below. For more info about this file, read +# docs/developer/reproducibility.md. -FROM alpine:latest as pymupdf-build -ARG ARCH -ARG REQUIREMENTS_TXT +ARG DEBIAN_IMAGE_DATE=20250113 -# Install PyMuPDF via hash-checked requirements file -COPY ${REQUIREMENTS_TXT} /tmp/requirements.txt +FROM debian:bookworm-${DEBIAN_IMAGE_DATE}-slim -# PyMuPDF provides non-arm musl wheels only. -# Only install build-dependencies if we are actually building the wheel -RUN case "$ARCH" in \ - "arm64") \ - # This is required for copying later, but is created only in the pre-built wheels - mkdir -p /usr/lib/python3.12/site-packages/PyMuPDF.libs/ \ - && apk --no-cache add linux-headers g++ linux-headers gcc make python3-dev py3-pip clang-dev ;; \ - *) \ - apk --no-cache add py3-pip ;; \ - esac -RUN pip install -vv --break-system-packages --require-hashes -r /tmp/requirements.txt +ARG GVISOR_ARCHIVE_DATE=20250113 +ARG DEBIAN_ARCHIVE_DATE=20250120 +ARG H2ORESTART_CHECKSUM=7760dc2963332c50d15eee285933ec4b48d6a1de9e0c0f6082946f93090bd132 +ARG H2ORESTART_VERSION=v0.7.0 +ENV DEBIAN_FRONTEND=noninteractive -########################################### -# Download H2ORestart -FROM alpine:latest as h2orestart-dl -ARG H2ORESTART_CHECKSUM=d09bc5c93fe2483a7e4a57985d2a8d0e4efae2efb04375fe4b59a68afd7241e2 +# The following way of installing packages is taken from +# https://github.com/reproducible-containers/repro-sources-list.sh/blob/master/Dockerfile.debian-12, +# and adapted to allow installing gVisor from each own repo as well. +RUN \ + --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + --mount=type=bind,source=./container_helpers/repro-sources-list.sh,target=/usr/local/bin/repro-sources-list.sh \ + --mount=type=bind,source=./container_helpers/gvisor.key,target=/tmp/gvisor.key \ + : "Hacky way to set a date for the Debian snapshot repos" && \ + touch -d ${DEBIAN_ARCHIVE_DATE} /etc/apt/sources.list.d/debian.sources && \ + touch -d ${DEBIAN_ARCHIVE_DATE} /etc/apt/sources.list && \ + repro-sources-list.sh && \ + : "Setup APT to install gVisor from its separate APT repo" && \ + apt-get update && \ + apt-get upgrade -y && \ + apt-get install -y --no-install-recommends apt-transport-https ca-certificates gnupg && \ + gpg -o /usr/share/keyrings/gvisor-archive-keyring.gpg --dearmor /tmp/gvisor.key && \ + echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/gvisor-archive-keyring.gpg] https://storage.googleapis.com/gvisor/releases ${GVISOR_ARCHIVE_DATE} main" > /etc/apt/sources.list.d/gvisor.list && \ + : "Install the necessary gVisor and Dangerzone dependencies" && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + python3 python3-fitz libreoffice-nogui libreoffice-java-common \ + python3 python3-magic default-jre-headless fonts-noto-cjk fonts-dejavu \ + runsc unzip wget && \ + : "Clean up for improving reproducibility (optional)" && \ + rm -rf /var/cache/fontconfig/ && \ + rm -rf /etc/ssl/certs/java/cacerts && \ + rm -rf /var/log/* /var/cache/ldconfig/aux-cache + +# Download H2ORestart from GitHub using a pinned version and hash. Note that +# it's available in Debian repos, but not in Bookworm yet. RUN mkdir /libreoffice_ext && cd libreoffice_ext \ && H2ORESTART_FILENAME=h2orestart.oxt \ - && H2ORESTART_VERSION="v0.6.6" \ && wget https://github.com/ebandal/H2Orestart/releases/download/$H2ORESTART_VERSION/$H2ORESTART_FILENAME \ && echo "$H2ORESTART_CHECKSUM $H2ORESTART_FILENAME" | sha256sum -c \ - && install -dm777 "/usr/lib/libreoffice/share/extensions/" + && install -dm777 "/usr/lib/libreoffice/share/extensions/" \ + && rm /root/.wget-hsts +# Create an unprivileged user both for gVisor and for running Dangerzone. +RUN addgroup --gid 1000 dangerzone +RUN adduser --uid 1000 --ingroup dangerzone --shell /bin/true \ + --disabled-password --home /home/dangerzone dangerzone -########################################### -# Dangerzone image - -FROM alpine:latest - -# Install dependencies -RUN apk --no-cache -U upgrade && \ - apk --no-cache add \ - libreoffice \ - openjdk8 \ - python3 \ - py3-magic \ - font-noto-cjk - -COPY --from=pymupdf-build /usr/lib/python3.12/site-packages/fitz/ /usr/lib/python3.12/site-packages/fitz -COPY --from=pymupdf-build /usr/lib/python3.12/site-packages/pymupdf/ /usr/lib/python3.12/site-packages/pymupdf -COPY --from=pymupdf-build /usr/lib/python3.12/site-packages/PyMuPDF.libs/ /usr/lib/python3.12/site-packages/PyMuPDF.libs -COPY --from=h2orestart-dl /libreoffice_ext/ /libreoffice_ext - -RUN install -dm777 "/usr/lib/libreoffice/share/extensions/" - +# Copy Dangerzone's conversion logic under /opt/dangerzone, and allow Python to +# import it. RUN mkdir -p /opt/dangerzone/dangerzone RUN touch /opt/dangerzone/dangerzone/__init__.py -COPY conversion /opt/dangerzone/dangerzone/conversion -# Add the unprivileged user. Set the UID/GID of the dangerzone user/group to -# 1000, since we will point to it from the OCI config. -# -# NOTE: A tmpfs will be mounted over /home/dangerzone directory, -# so nothing within it from the image will be persisted. -RUN addgroup -g 1000 dangerzone && \ - adduser -u 1000 -s /bin/true -G dangerzone -h /home/dangerzone -D dangerzone - -RUN GVISOR_URL="https://storage.googleapis.com/gvisor/releases/release/latest/$(uname -m)"; \ - wget "${GVISOR_URL}/runsc" "${GVISOR_URL}/runsc.sha512" && \ - sha512sum -c runsc.sha512 && \ - rm -f runsc.sha512 && \ - chmod 555 runsc && \ - mv runsc /usr/bin/ +# Copy only the Python code, and not any produced .pyc files. +COPY conversion/*.py /opt/dangerzone/dangerzone/conversion/ +# Let the entrypoint script write the OCI config for the inner container under +# /config.json. RUN touch /config.json RUN chown dangerzone:dangerzone /config.json diff --git a/dangerzone/conversion/doc_to_pixels.py b/dangerzone/conversion/doc_to_pixels.py index 6737607..3a07377 100644 --- a/dangerzone/conversion/doc_to_pixels.py +++ b/dangerzone/conversion/doc_to_pixels.py @@ -129,6 +129,10 @@ class DocumentToPixels(DangerzoneConverter): # At least .odt, .docx, .odg, .odp, .ods, and .pptx "application/zip": { "type": "libreoffice", + # NOTE: `file` command < 5.45 cannot detect hwpx files properly, so we + # enable the extension in any case. See also: + # https://github.com/freedomofpress/dangerzone/pull/460#issuecomment-1654166465 + "libreoffice_ext": "h2orestart.oxt", }, # At least .doc, .docx, .odg, .odp, .odt, .pdf, .ppt, .pptx, .xls, and .xlsx "application/octet-stream": { From 14bb6c0e39b4c335ebb78b6bc7a31a11b8704cc2 Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Mon, 13 Jan 2025 17:37:19 +0200 Subject: [PATCH 06/22] Do not use poetry.lock when building the container image Remove all the scaffolding in our `build-image.py` script for using the `poetry.lock` file, now that we install PyMuPDF from the Debian repos. --- .github/workflows/build.yml | 2 +- .github/workflows/ci.yml | 9 ++- dodo.py | 1 - install/common/build-image.py | 117 ++++++++++---------------------- install/linux/vendor-pymupdf.py | 2 +- poetry.lock | 14 +++- pyproject.toml | 2 +- 7 files changed, 56 insertions(+), 91 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a8b7ab7..108cb37 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -85,7 +85,7 @@ jobs: id: cache-container-image uses: actions/cache@v4 with: - key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py', 'poetry.lock') }} + key: v4-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }} path: | share/container.tar.gz share/image-id.txt diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 306de24..f4e7f40 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -59,7 +59,7 @@ jobs: id: cache-container-image uses: actions/cache@v4 with: - key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py', 'poetry.lock') }} + key: v4-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }} path: |- share/container.tar.gz share/image-id.txt @@ -67,7 +67,6 @@ jobs: - name: Build Dangerzone container image if: ${{ steps.cache-container-image.outputs.cache-hit != 'true' }} run: | - sudo apt-get install -y python3-poetry python3 ./install/common/build-image.py - name: Upload container image @@ -227,7 +226,7 @@ jobs: - name: Restore container cache uses: actions/cache/restore@v4 with: - key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py', 'poetry.lock') }} + key: v4-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }} path: |- share/container.tar.gz share/image-id.txt @@ -334,7 +333,7 @@ jobs: - name: Restore container image uses: actions/cache/restore@v4 with: - key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py', 'poetry.lock') }} + key: v4-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }} path: |- share/container.tar.gz share/image-id.txt @@ -429,7 +428,7 @@ jobs: - name: Restore container image uses: actions/cache/restore@v4 with: - key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py', 'poetry.lock') }} + key: v4-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }} path: |- share/container.tar.gz share/image-id.txt diff --git a/dodo.py b/dodo.py index 54a85af..d2f3ad1 100644 --- a/dodo.py +++ b/dodo.py @@ -63,7 +63,6 @@ TESSDATA_TARGETS = list_language_data() IMAGE_DEPS = [ "Dockerfile", - "poetry.lock", *list_files("dangerzone/conversion"), *list_files("dangerzone/container_helpers"), "install/common/build-image.py", diff --git a/install/common/build-image.py b/install/common/build-image.py index 6d99877..e2e3b01 100644 --- a/install/common/build-image.py +++ b/install/common/build-image.py @@ -1,6 +1,5 @@ import argparse import gzip -import os import platform import secrets import subprocess @@ -9,7 +8,6 @@ from pathlib import Path BUILD_CONTEXT = "dangerzone/" IMAGE_NAME = "dangerzone.rocks/dangerzone" -REQUIREMENTS_TXT = "container-pip-requirements.txt" if platform.system() in ["Darwin", "Windows"]: CONTAINER_RUNTIME = "docker" elif platform.system() == "Linux": @@ -84,91 +82,48 @@ def main(): with open(image_id_path, "w") as f: f.write(tag) - print("Exporting container pip dependencies") - with ContainerPipDependencies(): - if not args.use_cache: - print("Pulling base image") - subprocess.run( - [ - args.runtime, - "pull", - "alpine:latest", - ], - check=True, - ) + # Build the container image, and tag it with the calculated tag + print("Building container image") + cache_args = [] if args.use_cache else ["--no-cache"] + subprocess.run( + [ + args.runtime, + "build", + BUILD_CONTEXT, + *cache_args, + "-f", + "Dockerfile", + "--tag", + image_name_tagged, + ], + check=True, + ) - # Build the container image, and tag it with the calculated tag - print("Building container image") - cache_args = [] if args.use_cache else ["--no-cache"] - subprocess.run( + if not args.no_save: + print("Saving container image") + cmd = subprocess.Popen( [ - args.runtime, - "build", - BUILD_CONTEXT, - *cache_args, - "--build-arg", - f"REQUIREMENTS_TXT={REQUIREMENTS_TXT}", - "--build-arg", - f"ARCH={ARCH}", - "-f", - "Dockerfile", - "--tag", + CONTAINER_RUNTIME, + "save", image_name_tagged, ], - check=True, + stdout=subprocess.PIPE, ) - if not args.no_save: - print("Saving container image") - cmd = subprocess.Popen( - [ - CONTAINER_RUNTIME, - "save", - image_name_tagged, - ], - stdout=subprocess.PIPE, - ) - - print("Compressing container image") - chunk_size = 4 << 20 - with gzip.open( - tarball_path, - "wb", - compresslevel=args.compress_level, - ) as gzip_f: - while True: - chunk = cmd.stdout.read(chunk_size) - if len(chunk) > 0: - gzip_f.write(chunk) - else: - break - cmd.wait(5) - - -class ContainerPipDependencies: - """Generates PIP dependencies within container""" - - def __enter__(self): - try: - container_requirements_txt = subprocess.check_output( - ["poetry", "export", "--only", "container"], universal_newlines=True - ) - except subprocess.CalledProcessError as e: - print("FAILURE", e.returncode, e.output) - print(f"REQUIREMENTS: {container_requirements_txt}") - # XXX Export container dependencies and exclude pymupdfb since it is not needed in container - req_txt_pymupdfb_stripped = container_requirements_txt.split("pymupdfb")[0] - with open(Path(BUILD_CONTEXT) / REQUIREMENTS_TXT, "w") as f: - if ARCH == "arm64": - # PyMuPDF needs to be built on ARM64 machines - # But is already provided as a prebuilt-wheel on other architectures - f.write(req_txt_pymupdfb_stripped) - else: - f.write(container_requirements_txt) - - def __exit__(self, exc_type, exc_value, exc_tb): - print("Leaving the context...") - os.remove(Path(BUILD_CONTEXT) / REQUIREMENTS_TXT) + print("Compressing container image") + chunk_size = 4 << 20 + with gzip.open( + tarball_path, + "wb", + compresslevel=args.compress_level, + ) as gzip_f: + while True: + chunk = cmd.stdout.read(chunk_size) + if len(chunk) > 0: + gzip_f.write(chunk) + else: + break + cmd.wait(5) if __name__ == "__main__": diff --git a/install/linux/vendor-pymupdf.py b/install/linux/vendor-pymupdf.py index 0c49720..9cb5ccc 100755 --- a/install/linux/vendor-pymupdf.py +++ b/install/linux/vendor-pymupdf.py @@ -28,7 +28,7 @@ def main(): ) logger.info("Getting PyMuPDF deps as requirements.txt") - cmd = ["poetry", "export", "--only", "container"] + cmd = ["poetry", "export", "--only", "debian"] container_requirements_txt = subprocess.check_output(cmd) # XXX: Hack for Ubuntu Focal. diff --git a/poetry.lock b/poetry.lock index 543f210..1acd2a7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -767,6 +767,9 @@ optional = false python-versions = "<3.14,>=3.9" files = [ {file = "PySide6-6.8.1.1-cp39-abi3-macosx_12_0_universal2.whl", hash = "sha256:805728a7ed58352a02689b953ddbe29af1c8944f8c7f2c28312dc0b69f64b85e"}, + {file = "PySide6-6.8.1.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:70f8c4745d981ebb5bb93d7b825222532d553373b68b9db7a42cfcee25cafc9a"}, + {file = "PySide6-6.8.1.1-cp39-abi3-manylinux_2_39_aarch64.whl", hash = "sha256:de80ac62087a716b2bada2e3ddd739c5d176dc4be819abef91274d53d75f4e58"}, + {file = "PySide6-6.8.1.1-cp39-abi3-win_amd64.whl", hash = "sha256:60a2551053fa69845b893fb821507e2cc89d3a8a8b43726d568acd1250ad44fb"}, ] [package.dependencies] @@ -799,6 +802,9 @@ optional = false python-versions = "<3.14,>=3.9" files = [ {file = "PySide6_Addons-6.8.1.1-cp39-abi3-macosx_12_0_universal2.whl", hash = "sha256:83d35d7a1a7dbd1a16b4040a26ad4d5cc030a2aed4d439241babee1225d6e58a"}, + {file = "PySide6_Addons-6.8.1.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5ef45aeadca37d658e44a41e11f2b2e43dfc34c780a6be1cd09d96a7696e6cc6"}, + {file = "PySide6_Addons-6.8.1.1-cp39-abi3-manylinux_2_39_aarch64.whl", hash = "sha256:e1b4a20b0bcbc2e440faba62e0d164223b8fd6f041d749543bc3812979116c4c"}, + {file = "PySide6_Addons-6.8.1.1-cp39-abi3-win_amd64.whl", hash = "sha256:d8ae86944ac48cc9891666cf71565acebd403a953d0e050be4d41ac490788d0a"}, ] [package.dependencies] @@ -829,6 +835,9 @@ optional = false python-versions = "<3.14,>=3.9" files = [ {file = "PySide6_Essentials-6.8.1.1-cp39-abi3-macosx_12_0_universal2.whl", hash = "sha256:25f3fdb281ac3b442f08250e3284d3b1944f7c64c62ed93b57678a62c199cf46"}, + {file = "PySide6_Essentials-6.8.1.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:62b64842a91114c224c41eeb6a8c8f255ba60268bc5ac19724f944d60e2277c6"}, + {file = "PySide6_Essentials-6.8.1.1-cp39-abi3-manylinux_2_39_aarch64.whl", hash = "sha256:e0c1cc3cfb2ea5eea70748da7d22032a59ea641e24988f543d5b274c0adab065"}, + {file = "PySide6_Essentials-6.8.1.1-cp39-abi3-win_amd64.whl", hash = "sha256:085f12e16db31eb0e802b21c64eabf582f54db6c44463a1f5e1814d897b1f2c0"}, ] [package.dependencies] @@ -1071,6 +1080,9 @@ optional = false python-versions = "<3.14,>=3.9" files = [ {file = "shiboken6-6.8.1.1-cp39-abi3-macosx_12_0_universal2.whl", hash = "sha256:42fbb173a772c4e059dbeafb302e96f6ea8e1c9bacf05fab71ea7eb0d8f97b01"}, + {file = "shiboken6-6.8.1.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:d672df0f29dc5f44de7205c1acae4d0471ba8371bb1d68fdacbf1686f4d22a96"}, + {file = "shiboken6-6.8.1.1-cp39-abi3-manylinux_2_39_aarch64.whl", hash = "sha256:ff1b22a66476b042d3dc09870edca353fdac1c1f517a4cdc364b24e296213ecd"}, + {file = "shiboken6-6.8.1.1-cp39-abi3-win_amd64.whl", hash = "sha256:26f7041c77058a8ecfb9345caa187250b199de79cfb37e33936e5fbd468a7780"}, ] [[package]] @@ -1270,4 +1282,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.13" -content-hash = "30751b4a27cebd020b5222de2abef4e38fb5f6676fdf936693060e42cd35c25f" +content-hash = "035acbf0d18e9eac2f2c338c56b55ea84cfb0c59db64ac014c55607c2ea7cfb2" diff --git a/pyproject.toml b/pyproject.toml index d021422..05a57b8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -64,7 +64,7 @@ strip-ansi = "*" pytest-subprocess = "^1.5.2" pytest-rerunfailures = "^14.0" -[tool.poetry.group.container.dependencies] +[tool.poetry.group.debian.dependencies] pymupdf = "1.24.11" # Last version to support python 3.8 (needed for Ubuntu Focal support) [tool.poetry.group.dev.dependencies] From 270cae1bc0e1f738913c27de177d63230c2b17c4 Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Mon, 20 Jan 2025 12:36:57 +0200 Subject: [PATCH 07/22] Rename vendor-pymupdf.py to debian-vendor-pymupdf.py Rename the `vendor-pymupdf.py` script to `debian-vendor-pymupdf.py`, since it's used only when building Debian packages. --- debian/rules | 2 +- install/linux/{vendor-pymupdf.py => debian-vendor-pymupdf.py} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename install/linux/{vendor-pymupdf.py => debian-vendor-pymupdf.py} (100%) diff --git a/debian/rules b/debian/rules index fbcb611..3c65403 100755 --- a/debian/rules +++ b/debian/rules @@ -9,5 +9,5 @@ export DH_VERBOSE=1 dh $@ --with python3 --buildsystem=pybuild override_dh_builddeb: - ./install/linux/vendor-pymupdf.py --dest debian/dangerzone/usr/lib/python3/dist-packages/dangerzone/vendor/ + ./install/linux/debian-vendor-pymupdf.py --dest debian/dangerzone/usr/lib/python3/dist-packages/dangerzone/vendor/ dh_builddeb $@ diff --git a/install/linux/vendor-pymupdf.py b/install/linux/debian-vendor-pymupdf.py similarity index 100% rename from install/linux/vendor-pymupdf.py rename to install/linux/debian-vendor-pymupdf.py From 8e8a515b64bb50660a102b5fcda20000a7de03aa Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Mon, 13 Jan 2025 17:43:15 +0200 Subject: [PATCH 08/22] Allow using the container engine cache when building our image Remove our suggestions for not using the container cache, which stemmed from the fact that our Dangerzone image was not reproducible. Now that we have switched to Debian Stable and the Dockerfile is all we need to reproducibly build the exact same container image, we can just use the cache to speed up builds. --- docs/developer/doit.md | 14 -------------- dodo.py | 13 +------------ install/common/build-image.py | 4 ++-- pyproject.toml | 7 +------ 4 files changed, 4 insertions(+), 34 deletions(-) diff --git a/docs/developer/doit.md b/docs/developer/doit.md index a461d96..bf8fb16 100644 --- a/docs/developer/doit.md +++ b/docs/developer/doit.md @@ -44,20 +44,6 @@ doit * You can run `doit list --all -s` to see the full list of tasks, their dependencies, and whether they are up to date. * You can run `doit info ` to see which dependencies are missing. -* You can change this line in `pyproject.toml` to `true`, to allow using the - Docker/Podman build cache: - - ``` - use_cache = true - ``` - - > [!WARNING] - > Using caching may speed up image builds, but is not suitable for release - > artifacts. The ID of our base container image (Alpine Linux) does not change - > that often, but its APK package index does. So, if we use caching, we risk - > skipping the `apk upgrade` layer and end up with packages that are days - > behind. - * You can pass the following environment variables to the script, in order to affect some global parameters: - `CONTAINER_RUNTIME`: The container runtime to use. Either `podman` (default) diff --git a/dodo.py b/dodo.py index d2f3ad1..2022ffa 100644 --- a/dodo.py +++ b/dodo.py @@ -27,16 +27,6 @@ PARAM_APPLE_ID = { "help": "The Apple developer ID that will be used to sign the .dmg", } -PARAM_USE_CACHE = { - "name": "use_cache", - "long": "use-cache", - "help": ( - "Whether to use cached results or not. For reproducibility reasons," - " it's best to leave it to false" - ), - "default": False, -} - ### File dependencies # # Define all the file dependencies for our tasks in a single place, since some file @@ -205,11 +195,10 @@ def task_build_image(): return { "actions": [ - f"python install/common/build-image.py --use-cache=%(use_cache)s --runtime={CONTAINER_RUNTIME}", + f"python install/common/build-image.py --runtime={CONTAINER_RUNTIME}", ["cp", img_src, img_dst], ["cp", img_id_src, img_id_dst], ], - "params": [PARAM_USE_CACHE], "file_dep": IMAGE_DEPS, "targets": [img_src, img_dst, img_id_src, img_id_dst], "task_dep": ["init_release_dir", "check_container_runtime"], diff --git a/install/common/build-image.py b/install/common/build-image.py index e2e3b01..fb1b55f 100644 --- a/install/common/build-image.py +++ b/install/common/build-image.py @@ -51,9 +51,9 @@ def main(): "--use-cache", type=str2bool, nargs="?", - default=False, + default=True, const=True, - help="Use the builder's cache to speed up the builds (not suitable for release builds)", + help="Use the builder's cache to speed up the builds", ) args = parser.parse_args() diff --git a/pyproject.toml b/pyproject.toml index 05a57b8..d56c6c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,4 +1,4 @@ - [tool.poetry] +[tool.poetry] name = "dangerzone" version = "0.8.1" description = "Take potentially dangerous PDFs, office documents, or images and convert them to safe PDFs" @@ -73,11 +73,6 @@ httpx = "^0.27.2" [tool.doit] verbosity = 3 -[tool.doit.tasks.build_image] -# DO NOT change this to 'true' for release artifacts, else we risk building -# images that are a few days behind. See also: docs/developer/doit.md -use_cache = false - [tool.ruff.lint] select = [ # isort From fa27f4b063773c1d21a6ff069356ed31e01f02d9 Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Mon, 13 Jan 2025 18:10:44 +0200 Subject: [PATCH 09/22] Add jinja2-cli package dependency Add jinja2-cli as a package dependency, since it will be used to create the Dockerfile from some user parameters and a template. --- poetry.lock | 147 ++++++++++++++++++++++++++++++++++++++++++------- pyproject.toml | 1 + 2 files changed, 128 insertions(+), 20 deletions(-) diff --git a/poetry.lock b/poetry.lock index 1acd2a7..0d8fd02 100644 --- a/poetry.lock +++ b/poetry.lock @@ -480,6 +480,43 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "jinja2" +version = "3.1.5" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +files = [ + {file = "jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"}, + {file = "jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jinja2-cli" +version = "0.8.2" +description = "A CLI interface to Jinja2" +optional = false +python-versions = "*" +files = [ + {file = "jinja2-cli-0.8.2.tar.gz", hash = "sha256:a16bb1454111128e206f568c95938cdef5b5a139929378f72bb8cf6179e18e50"}, + {file = "jinja2_cli-0.8.2-py2.py3-none-any.whl", hash = "sha256:b91715c79496beaddad790171e7258a87db21c1a0b6d2b15bca3ba44b74aac5d"}, +] + +[package.dependencies] +jinja2 = "*" + +[package.extras] +tests = ["flake8", "jinja2", "pytest"] +toml = ["jinja2", "toml"] +xml = ["jinja2", "xmltodict"] +yaml = ["jinja2", "pyyaml"] + [[package]] name = "lief" version = "0.16.2" @@ -563,6 +600,76 @@ importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.5)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] testing = ["coverage", "pyyaml"] +[[package]] +name = "markupsafe" +version = "3.0.2" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +files = [ + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, +] + [[package]] name = "mypy" version = "1.14.1" @@ -1014,29 +1121,29 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "ruff" -version = "0.9.2" +version = "0.9.3" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.9.2-py3-none-linux_armv6l.whl", hash = "sha256:80605a039ba1454d002b32139e4970becf84b5fee3a3c3bf1c2af6f61a784347"}, - {file = "ruff-0.9.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b9aab82bb20afd5f596527045c01e6ae25a718ff1784cb92947bff1f83068b00"}, - {file = "ruff-0.9.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:fbd337bac1cfa96be615f6efcd4bc4d077edbc127ef30e2b8ba2a27e18c054d4"}, - {file = "ruff-0.9.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82b35259b0cbf8daa22a498018e300b9bb0174c2bbb7bcba593935158a78054d"}, - {file = "ruff-0.9.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b6a9701d1e371bf41dca22015c3f89769da7576884d2add7317ec1ec8cb9c3c"}, - {file = "ruff-0.9.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9cc53e68b3c5ae41e8faf83a3b89f4a5d7b2cb666dff4b366bb86ed2a85b481f"}, - {file = "ruff-0.9.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:8efd9da7a1ee314b910da155ca7e8953094a7c10d0c0a39bfde3fcfd2a015684"}, - {file = "ruff-0.9.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3292c5a22ea9a5f9a185e2d131dc7f98f8534a32fb6d2ee7b9944569239c648d"}, - {file = "ruff-0.9.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a605fdcf6e8b2d39f9436d343d1f0ff70c365a1e681546de0104bef81ce88df"}, - {file = "ruff-0.9.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c547f7f256aa366834829a08375c297fa63386cbe5f1459efaf174086b564247"}, - {file = "ruff-0.9.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d18bba3d3353ed916e882521bc3e0af403949dbada344c20c16ea78f47af965e"}, - {file = "ruff-0.9.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:b338edc4610142355ccf6b87bd356729b62bf1bc152a2fad5b0c7dc04af77bfe"}, - {file = "ruff-0.9.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:492a5e44ad9b22a0ea98cf72e40305cbdaf27fac0d927f8bc9e1df316dcc96eb"}, - {file = "ruff-0.9.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:af1e9e9fe7b1f767264d26b1075ac4ad831c7db976911fa362d09b2d0356426a"}, - {file = "ruff-0.9.2-py3-none-win32.whl", hash = "sha256:71cbe22e178c5da20e1514e1e01029c73dc09288a8028a5d3446e6bba87a5145"}, - {file = "ruff-0.9.2-py3-none-win_amd64.whl", hash = "sha256:c5e1d6abc798419cf46eed03f54f2e0c3adb1ad4b801119dedf23fcaf69b55b5"}, - {file = "ruff-0.9.2-py3-none-win_arm64.whl", hash = "sha256:a1b63fa24149918f8b37cef2ee6fff81f24f0d74b6f0bdc37bc3e1f2143e41c6"}, - {file = "ruff-0.9.2.tar.gz", hash = "sha256:b5eceb334d55fae5f316f783437392642ae18e16dcf4f1858d55d3c2a0f8f5d0"}, + {file = "ruff-0.9.3-py3-none-linux_armv6l.whl", hash = "sha256:7f39b879064c7d9670197d91124a75d118d00b0990586549949aae80cdc16624"}, + {file = "ruff-0.9.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:a187171e7c09efa4b4cc30ee5d0d55a8d6c5311b3e1b74ac5cb96cc89bafc43c"}, + {file = "ruff-0.9.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:c59ab92f8e92d6725b7ded9d4a31be3ef42688a115c6d3da9457a5bda140e2b4"}, + {file = "ruff-0.9.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dc153c25e715be41bb228bc651c1e9b1a88d5c6e5ed0194fa0dfea02b026439"}, + {file = "ruff-0.9.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:646909a1e25e0dc28fbc529eab8eb7bb583079628e8cbe738192853dbbe43af5"}, + {file = "ruff-0.9.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a5a46e09355695fbdbb30ed9889d6cf1c61b77b700a9fafc21b41f097bfbba4"}, + {file = "ruff-0.9.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c4bb09d2bbb394e3730d0918c00276e79b2de70ec2a5231cd4ebb51a57df9ba1"}, + {file = "ruff-0.9.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96a87ec31dc1044d8c2da2ebbed1c456d9b561e7d087734336518181b26b3aa5"}, + {file = "ruff-0.9.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bb7554aca6f842645022fe2d301c264e6925baa708b392867b7a62645304df4"}, + {file = "ruff-0.9.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cabc332b7075a914ecea912cd1f3d4370489c8018f2c945a30bcc934e3bc06a6"}, + {file = "ruff-0.9.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:33866c3cc2a575cbd546f2cd02bdd466fed65118e4365ee538a3deffd6fcb730"}, + {file = "ruff-0.9.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:006e5de2621304c8810bcd2ee101587712fa93b4f955ed0985907a36c427e0c2"}, + {file = "ruff-0.9.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:ba6eea4459dbd6b1be4e6bfc766079fb9b8dd2e5a35aff6baee4d9b1514ea519"}, + {file = "ruff-0.9.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:90230a6b8055ad47d3325e9ee8f8a9ae7e273078a66401ac66df68943ced029b"}, + {file = "ruff-0.9.3-py3-none-win32.whl", hash = "sha256:eabe5eb2c19a42f4808c03b82bd313fc84d4e395133fb3fc1b1516170a31213c"}, + {file = "ruff-0.9.3-py3-none-win_amd64.whl", hash = "sha256:040ceb7f20791dfa0e78b4230ee9dce23da3b64dd5848e40e3bf3ab76468dcf4"}, + {file = "ruff-0.9.3-py3-none-win_arm64.whl", hash = "sha256:800d773f6d4d33b0a3c60e2c6ae8f4c202ea2de056365acfa519aa48acf28e0b"}, + {file = "ruff-0.9.3.tar.gz", hash = "sha256:8293f89985a090ebc3ed1064df31f3b4b56320cdfcec8b60d3295bddb955c22a"}, ] [[package]] @@ -1282,4 +1389,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.13" -content-hash = "035acbf0d18e9eac2f2c338c56b55ea84cfb0c59db64ac014c55607c2ea7cfb2" +content-hash = "9c77a647be7cd12ecb7e893ef3102554eb78faf761e99bafdb1d2424d6123c50" diff --git a/pyproject.toml b/pyproject.toml index d56c6c6..457f1b4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,6 +42,7 @@ cx_freeze = {version = "^7.2.5", platform = "win32"} pywin32 = {version = "*", platform = "win32"} pyinstaller = {version = "*", platform = "darwin"} doit = "^0.36.0" +jinja2-cli = "^0.8.2" # Dependencies required for linting the code. [tool.poetry.group.lint.dependencies] From 20af68f7f2e8fac72d95d66fea9165bf59c6fbf3 Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Mon, 13 Jan 2025 18:11:57 +0200 Subject: [PATCH 10/22] Render the Dockerfile from a template and some params Allow updating the Dockerfile from a template and some envs, so that it's easier to bump the dates in it. --- BUILD.md | 6 ++++ Dockerfile.env | 9 ++++++ Dockerfile.in | 81 ++++++++++++++++++++++++++++++++++++++++++++++++++ Makefile | 3 ++ RELEASE.md | 1 + 5 files changed, 100 insertions(+) create mode 100644 Dockerfile.env create mode 100644 Dockerfile.in diff --git a/BUILD.md b/BUILD.md index bd1b377..7a47da3 100644 --- a/BUILD.md +++ b/BUILD.md @@ -515,3 +515,9 @@ poetry run .\install\windows\build-app.bat ``` When you're done you will have `dist\Dangerzone.msi`. + +## Updating the container image + +The Dangezone container image is reproducible. This means that every time we +build it, the result will be bit-for-bit the same, with some minor exceptions. +Read more on how you can update it in `docs/developer/reproducibility.md`. diff --git a/Dockerfile.env b/Dockerfile.env new file mode 100644 index 0000000..25ff6ff --- /dev/null +++ b/Dockerfile.env @@ -0,0 +1,9 @@ +# Can be bumped to the latest date in https://hub.docker.com/_/debian/tags?name=bookworm- +DEBIAN_IMAGE_DATE=20250113 +# Can be bumped to today's date +DEBIAN_ARCHIVE_DATE=20250120 +# Can be bumped to the latest date in https://github.com/google/gvisor/tags +GVISOR_ARCHIVE_DATE=20250113 +# Can be bumped to the latest version and checksum from https://github.com/ebandal/H2Orestart/releases +H2ORESTART_CHECKSUM=7760dc2963332c50d15eee285933ec4b48d6a1de9e0c0f6082946f93090bd132 +H2ORESTART_VERSION=v0.7.0 diff --git a/Dockerfile.in b/Dockerfile.in new file mode 100644 index 0000000..2824cf1 --- /dev/null +++ b/Dockerfile.in @@ -0,0 +1,81 @@ +# NOTE: Updating the packages to their latest versions requires bumping the +# Dockerfile args below. For more info about this file, read +# docs/developer/reproducibility.md. + +ARG DEBIAN_IMAGE_DATE={{DEBIAN_IMAGE_DATE}} + +FROM debian:bookworm-${DEBIAN_IMAGE_DATE}-slim + +ARG GVISOR_ARCHIVE_DATE={{GVISOR_ARCHIVE_DATE}} +ARG DEBIAN_ARCHIVE_DATE={{DEBIAN_ARCHIVE_DATE}} +ARG H2ORESTART_CHECKSUM={{H2ORESTART_CHECKSUM}} +ARG H2ORESTART_VERSION={{H2ORESTART_VERSION}} + +ENV DEBIAN_FRONTEND=noninteractive + +# The following way of installing packages is taken from +# https://github.com/reproducible-containers/repro-sources-list.sh/blob/master/Dockerfile.debian-12, +# and adapted to allow installing gVisor from each own repo as well. +RUN \ + --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + --mount=type=bind,source=./container_helpers/repro-sources-list.sh,target=/usr/local/bin/repro-sources-list.sh \ + --mount=type=bind,source=./container_helpers/gvisor.key,target=/tmp/gvisor.key \ + : "Hacky way to set a date for the Debian snapshot repos" && \ + touch -d ${DEBIAN_ARCHIVE_DATE} /etc/apt/sources.list.d/debian.sources && \ + touch -d ${DEBIAN_ARCHIVE_DATE} /etc/apt/sources.list && \ + repro-sources-list.sh && \ + : "Setup APT to install gVisor from its separate APT repo" && \ + apt-get update && \ + apt-get upgrade -y && \ + apt-get install -y --no-install-recommends apt-transport-https ca-certificates gnupg && \ + gpg -o /usr/share/keyrings/gvisor-archive-keyring.gpg --dearmor /tmp/gvisor.key && \ + echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/gvisor-archive-keyring.gpg] https://storage.googleapis.com/gvisor/releases ${GVISOR_ARCHIVE_DATE} main" > /etc/apt/sources.list.d/gvisor.list && \ + : "Install the necessary gVisor and Dangerzone dependencies" && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + python3 python3-fitz libreoffice-nogui libreoffice-java-common \ + python3 python3-magic default-jre-headless fonts-noto-cjk fonts-dejavu \ + runsc unzip wget && \ + : "Clean up for improving reproducibility (optional)" && \ + rm -rf /var/cache/fontconfig/ && \ + rm -rf /etc/ssl/certs/java/cacerts && \ + rm -rf /var/log/* /var/cache/ldconfig/aux-cache + +# Download H2ORestart from GitHub using a pinned version and hash. Note that +# it's available in Debian repos, but not in Bookworm yet. +RUN mkdir /libreoffice_ext && cd libreoffice_ext \ + && H2ORESTART_FILENAME=h2orestart.oxt \ + && wget https://github.com/ebandal/H2Orestart/releases/download/$H2ORESTART_VERSION/$H2ORESTART_FILENAME \ + && echo "$H2ORESTART_CHECKSUM $H2ORESTART_FILENAME" | sha256sum -c \ + && install -dm777 "/usr/lib/libreoffice/share/extensions/" \ + && rm /root/.wget-hsts + +# Create an unprivileged user both for gVisor and for running Dangerzone. +RUN addgroup --gid 1000 dangerzone +RUN adduser --uid 1000 --ingroup dangerzone --shell /bin/true \ + --disabled-password --home /home/dangerzone dangerzone + +# Copy Dangerzone's conversion logic under /opt/dangerzone, and allow Python to +# import it. +RUN mkdir -p /opt/dangerzone/dangerzone +RUN touch /opt/dangerzone/dangerzone/__init__.py + +# Copy only the Python code, and not any produced .pyc files. +COPY conversion/*.py /opt/dangerzone/dangerzone/conversion/ + +# Let the entrypoint script write the OCI config for the inner container under +# /config.json. +RUN touch /config.json +RUN chown dangerzone:dangerzone /config.json + +# Switch to the dangerzone user for the rest of the script. +USER dangerzone + +# Create a directory that will be used by gVisor as the place where it will +# store the state of its containers. +RUN mkdir /home/dangerzone/.containers + +COPY container_helpers/entrypoint.py / + +ENTRYPOINT ["/entrypoint.py"] diff --git a/Makefile b/Makefile index a8a714c..17a35d3 100644 --- a/Makefile +++ b/Makefile @@ -47,6 +47,9 @@ test-large: test-large-init ## Run large test set python -m pytest --tb=no tests/test_large_set.py::TestLargeSet -v $(JUNIT_FLAGS) --junitxml=$(TEST_LARGE_RESULTS) python $(TEST_LARGE_RESULTS)/report.py $(TEST_LARGE_RESULTS) +Dockerfile: Dockerfile.env Dockerfile.in + poetry run jinja2 Dockerfile.in Dockerfile.env > Dockerfile + .PHONY: build-clean build-clean: doit clean diff --git a/RELEASE.md b/RELEASE.md index 75642d2..21f092c 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -15,6 +15,7 @@ Here is a list of tasks that should be done before issuing the release: - [ ] Update the "Version" field in `install/linux/dangerzone.spec` - [ ] Bump the Debian version by adding a new changelog entry in `debian/changelog` - [ ] [Bump the minimum Docker Desktop versions](https://github.com/freedomofpress/dangerzone/blob/main/RELEASE.md#bump-the-minimum-docker-desktop-version) in `isolation_provider/container.py` +- [ ] Bump the dates in the `Dockerfile` - [ ] Update screenshot in `README.md`, if necessary - [ ] CHANGELOG.md should be updated to include a list of all major changes since the last release - [ ] A draft release should be created. Copy the release notes text from the template at [`docs/templates/release-notes`](https://github.com/freedomofpress/dangerzone/tree/main/docs/templates/) From 3ebc454b615802b3f111a24424870f78ee194995 Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Tue, 21 Jan 2025 21:01:04 +0200 Subject: [PATCH 11/22] ci: Scan the latest image for CVEs Update the Debian snapshot date to the current one, so that we always scan the latest image for CVEs. Refs #1057 --- .github/workflows/scan.yml | 10 +++++++--- .grype.yaml | 38 +++++++++++++++++++++++++++++++++----- 2 files changed, 40 insertions(+), 8 deletions(-) diff --git a/.github/workflows/scan.yml b/.github/workflows/scan.yml index c4bd6a3..e08dcef 100644 --- a/.github/workflows/scan.yml +++ b/.github/workflows/scan.yml @@ -21,13 +21,17 @@ jobs: sudo apt install pipx pipx install poetry pipx inject poetry poetry-plugin-export + poetry install --only package + - name: Bump date of Debian snapshot archive + run: | + date=$(date "+%Y%m%d") + sed -i "s/DEBIAN_ARCHIVE_DATE=[0-9]\+/DEBIAN_ARCHIVE_DATE=${date}/" Dockerfile.env + make Dockerfile - name: Build container image run: python3 ./install/common/build-image.py --runtime docker --no-save - name: Get image tag id: tag - run: | - tag=$(docker images dangerzone.rocks/dangerzone --format '{{ .Tag }}') - echo "tag=$tag" >> $GITHUB_OUTPUT + run: echo "tag=$(cat share/image-id.txt)" >> $GITHUB_OUTPUT # NOTE: Scan first without failing, else we won't be able to read the scan # report. - name: Scan container image (no fail) diff --git a/.grype.yaml b/.grype.yaml index 457f6ec..40200e9 100644 --- a/.grype.yaml +++ b/.grype.yaml @@ -2,10 +2,38 @@ # latest release of Dangerzone, and offer our analysis. ignore: - # CVE-2024-11053 + # CVE-2023-45853 # ============== # - # NVD Entry: https://nvd.nist.gov/vuln/detail/CVE-2024-11053 - # Verdict: Dangerzone is not affected because libcurl is an HTTP client, and - # the Dangerzone container does not make any network calls. - - vulnerability: CVE-2024-11053 + # Debian tracker: https://security-tracker.debian.org/tracker/CVE-2023-45853 + # Verdict: Dangerzone is not affected because the zlib library in Debian is + # built in a way that is not vulnerable. + - vulnerability: CVE-2023-45853 + # CVE-2024-38428 + # ============== + # + # Debian tracker: https://security-tracker.debian.org/tracker/CVE-2024-38428 + # Verdict: Dangerzone is not affected because it doesn't use wget in the + # container image (which also has no network connectivity). + - vulnerability: CVE-2024-38428 + # CVE-2024-57823 + # ============== + # + # Debian tracker: https://security-tracker.debian.org/tracker/CVE-2024-57823 + # Verdict: Dangerzone is not affected. First things first, LibreOffice is + # using this library for parsing RDF metadata in a document [1], and has + # issued a fix for the vendored raptor2 package they have for other distros + # [2]. + # + # On the other hand, the Debian security team has stated that this is a minor + # issue [3], and there's no fix from the developers yet. It seems that the + # Debian package is not affected somehow by this CVE, probably due to the way + # it's packaged. + # + # [1] https://wiki.documentfoundation.org/Documentation/DevGuide/Office_Development#RDF_metadata + # [2] https://cgit.freedesktop.org/libreoffice/core/commit/?id=2b50dc0e4482ac0ad27d69147b4175e05af4fba4 + # [2] From https://security-tracker.debian.org/tracker/CVE-2024-57823: + # + # [bookworm] - raptor2 (Minor issue, revisit when fixed upstream) + # + - vulnerability: CVE-2024-57823 From ab10d5b6dd5f6fda5769e87fc3352771eb7b8767 Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Tue, 14 Jan 2025 10:41:09 +0200 Subject: [PATCH 12/22] Allow setting a tag for the container image Allow setting a tag for the container image, when building it with the `build-image.py` script. This should be used for development purposes only, since the proper image name should be dictated by the script. --- install/common/build-image.py | 43 +++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/install/common/build-image.py b/install/common/build-image.py index fb1b55f..91fe79c 100644 --- a/install/common/build-image.py +++ b/install/common/build-image.py @@ -27,6 +27,29 @@ def str2bool(v): raise argparse.ArgumentTypeError("Boolean value expected.") +def determine_git_tag(): + # Designate a unique tag for this image, depending on the Git commit it was created + # from: + # 1. If created from a Git tag (e.g., 0.8.0), the image tag will be `0.8.0`. + # 2. If created from a commit, it will be something like `0.8.0-31-g6bdaa7a`. + # 3. If the contents of the Git repo are dirty, we will append a unique identifier + # for this run, something like `0.8.0-31-g6bdaa7a-fdcb` or `0.8.0-fdcb`. + dirty_ident = secrets.token_hex(2) + return ( + subprocess.check_output( + [ + "git", + "describe", + "--long", + "--first-parent", + f"--dirty=-{dirty_ident}", + ], + ) + .decode() + .strip()[1:] # remove the "v" prefix of the tag. + ) + + def main(): parser = argparse.ArgumentParser() parser.add_argument( @@ -55,6 +78,11 @@ def main(): const=True, help="Use the builder's cache to speed up the builds", ) + parser.add_argument( + "--tag", + default=None, + help="Provide a custom tag for the image (for development only)", + ) args = parser.parse_args() tarball_path = Path("share") / "container.tar.gz" @@ -62,20 +90,7 @@ def main(): print(f"Building for architecture '{ARCH}'") - # Designate a unique tag for this image, depending on the Git commit it was created - # from: - # 1. If created from a Git tag (e.g., 0.8.0), the image tag will be `0.8.0`. - # 2. If created from a commit, it will be something like `0.8.0-31-g6bdaa7a`. - # 3. If the contents of the Git repo are dirty, we will append a unique identifier - # for this run, something like `0.8.0-31-g6bdaa7a-fdcb` or `0.8.0-fdcb`. - dirty_ident = secrets.token_hex(2) - tag = ( - subprocess.check_output( - ["git", "describe", "--long", "--first-parent", f"--dirty=-{dirty_ident}"], - ) - .decode() - .strip()[1:] # remove the "v" prefix of the tag. - ) + tag = args.tag or determine_git_tag() image_name_tagged = IMAGE_NAME + ":" + tag print(f"Will tag the container image as '{image_name_tagged}'") From 94a57f997ecc56a701246ce23e3526f191ca2991 Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Tue, 14 Jan 2025 10:49:59 +0200 Subject: [PATCH 13/22] dev_scripts: Add script for enforcing image reproducibility Add a dev script for Linux platforms that verifies that a source image can be reproducibly built from the current Git commit. The reproducibility check is enforced by the `diffoci` tool, which is downloaded as part of running the script. --- dev_scripts/reproduce-image.py | 180 +++++++++++++++++++++++++++++++++ 1 file changed, 180 insertions(+) create mode 100755 dev_scripts/reproduce-image.py diff --git a/dev_scripts/reproduce-image.py b/dev_scripts/reproduce-image.py new file mode 100755 index 0000000..8af46a6 --- /dev/null +++ b/dev_scripts/reproduce-image.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python3 + +import argparse +import hashlib +import logging +import pathlib +import stat +import subprocess +import sys +import urllib.request + +logger = logging.getLogger(__name__) + +DIFFOCI_URL = "https://github.com/reproducible-containers/diffoci/releases/download/v0.1.5/diffoci-v0.1.5.linux-amd64" +DIFFOCI_CHECKSUM = "01d25fe690196945a6bd510d30559338aa489c034d3a1b895a0d82a4b860698f" +DIFFOCI_PATH = ( + pathlib.Path.home() / ".local" / "share" / "dangerzone-dev" / "helpers" / "diffoci" +) +IMAGE_NAME = "dangerzone.rocks/dangerzone" + + +def run(*args): + """Simple function that runs a command, validates it, and returns the output""" + logger.debug(f"Running command: {" ".join(args)}") + return subprocess.run( + args, + check=True, + stdout=subprocess.PIPE, + ).stdout + + +def git_commit_get(): + return run("git", "rev-parse", "--short", "HEAD").decode().strip() + + +def git_determine_tag(): + return run("git", "describe", "--long", "--first-parent").decode().strip() + + +def git_verify(commit, source): + if not commit in source: + raise RuntimeError( + f"Image '{source}' does not seem to be built from commit '{commit}'" + ) + + +def diffoci_hash_matches(diffoci): + """Check if the hash of the downloaded diffoci bin matches the expected one.""" + m = hashlib.sha256() + m.update(diffoci) + diffoci_checksum = m.hexdigest() + return diffoci_checksum == DIFFOCI_CHECKSUM + + +def diffoci_is_installed(): + """Determine if diffoci has been installed. + + Determine if diffoci has been installed, by checking if the binary exists, and if + its hash is the expected one. If the binary exists but the hash is different, then + this is a sign that we need to update the local diffoci binary. + """ + if not DIFFOCI_PATH.exists(): + return False + return diffoci_hash_matches(DIFFOCI_PATH.open("rb").read()) + + +def diffoci_download(): + """Download the diffoci tool, based on a URL and its checksum.""" + with urllib.request.urlopen(DIFFOCI_URL) as f: + diffoci_bin = f.read() + + if not diffoci_hash_matches(diffoci_bin): + raise ValueError( + "Unexpected checksum for downloaded diffoci binary:" + f" {diffoci_checksum} !={DIFFOCI_CHECKSUM}" + ) + + DIFFOCI_PATH.parent.mkdir(parents=True, exist_ok=True) + DIFFOCI_PATH.open("wb+").write(diffoci_bin) + DIFFOCI_PATH.chmod(DIFFOCI_PATH.stat().st_mode | stat.S_IEXEC) + + +def diffoci_diff(source, local_target): + """Diff the source image against the recently built target image using diffoci.""" + target = f"podman://{local_target}" + try: + return run( + str(DIFFOCI_PATH), + "diff", + source, + target, + "--semantic", + "--verbose", + ) + except subprocess.CalledProcessError as e: + error = e.stdout.decode() + raise RuntimeError( + f"Could not rebuild an identical image to {source}. Diffoci report:\n{error}" + ) + + +def build_image(tag, use_cache=False): + """Build the Dangerzone container image with a special tag.""" + run( + "python3", + "./install/common/build-image.py", + "--no-save", + "--use-cache", + str(use_cache), + "--tag", + tag, + ) + + +def parse_args(): + image_tag = git_determine_tag() + # TODO: Remove the local "podman://" prefix once we have started pushing images to a + # remote. + default_image_name = "podman://" + IMAGE_NAME + ":" + image_tag + + parser = argparse.ArgumentParser( + prog=sys.argv[0], + description="Dev script for verifying container image reproducibility", + ) + parser.add_argument( + "--source", + default=default_image_name, + help=( + "The name of the image that you want to reproduce. If the image resides in" + " the local Docker / Podman engine, you can prefix it with podman:// or" + f" docker:// accordingly (default: {default_image_name})" + ), + ) + parser.add_argument( + "--use-cache", + default=False, + action="store_true", + help="Whether to reuse the build cache (off by default for better reproducibility)", + ) + return parser.parse_args() + + +def main(): + logging.basicConfig( + level=logging.DEBUG, + format="%(asctime)s - %(levelname)s - %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + ) + args = parse_args() + + logger.info(f"Ensuring that current Git commit matches image '{args.source}'") + commit = git_commit_get() + git_verify(commit, args.source) + + if not diffoci_is_installed(): + logger.info(f"Downloading diffoci helper from {DIFFOCI_URL}") + diffoci_download() + + tag = f"reproduce-{commit}" + target = f"dangerzone.rocks/dangerzone:{tag}" + logger.info(f"Building container image and tagging it as '{target}'") + build_image(tag, args.use_cache) + + logger.info( + f"Ensuring that source image '{args.source}' is semantically identical with" + f" built image '{target}'" + ) + try: + diffoci_diff(args.source, target) + except subprocess.CalledProcessError as e: + raise RuntimeError( + f"Could not reproduce image {args.source} for commit {commit}" + ) + breakpoint() + + logger.info(f"Successfully reproduced image '{args.source}' from commit '{commit}'") + + +if __name__ == "__main__": + sys.exit(main()) From a1383fa0165f5440dd6fb3a5a51345a8d4014fa8 Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Tue, 14 Jan 2025 10:51:33 +0200 Subject: [PATCH 14/22] ci: Add a CI job that enforces image reproducibility Add a CI job that uses the `reproduce.py` dev script to enforce image reproducibility, for every PR that we send to the repo. Fixes #1047 --- .github/workflows/ci.yml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f4e7f40..0c32c9c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -471,3 +471,30 @@ jobs: # file successfully. xvfb-run -s '-ac' ./dev_scripts/env.py --distro ${{ matrix.distro }} --version ${{ matrix.version }} run --dev \ bash -c 'cd dangerzone; poetry run make test' + + check-reproducibility: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Install dev. dependencies + run: |- + sudo apt-get update + sudo apt-get install -y git python3-poetry --no-install-recommends + poetry install --only package + + - name: Verify that the Dockerfile matches the commited template and params + run: |- + cp Dockerfile Dockerfile.orig + make Dockerfile + diff Dockerfile.orig Dockerfile + + - name: Build Dangerzone container image + run: | + python3 ./install/common/build-image.py --no-save + + - name: Reproduce the same container image + run: | + ./dev_scripts/reproduce-image.py From 7e1f4bca6c0ccd724022cc8d89e14035f48f6a0c Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Tue, 14 Jan 2025 15:04:31 +0200 Subject: [PATCH 15/22] Update RELEASE.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Alexis Métaireau --- RELEASE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index 21f092c..b2b490b 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -15,7 +15,7 @@ Here is a list of tasks that should be done before issuing the release: - [ ] Update the "Version" field in `install/linux/dangerzone.spec` - [ ] Bump the Debian version by adding a new changelog entry in `debian/changelog` - [ ] [Bump the minimum Docker Desktop versions](https://github.com/freedomofpress/dangerzone/blob/main/RELEASE.md#bump-the-minimum-docker-desktop-version) in `isolation_provider/container.py` -- [ ] Bump the dates in the `Dockerfile` +- [ ] Bump the dates and versions in the `Dockerfile` - [ ] Update screenshot in `README.md`, if necessary - [ ] CHANGELOG.md should be updated to include a list of all major changes since the last release - [ ] A draft release should be created. Copy the release notes text from the template at [`docs/templates/release-notes`](https://github.com/freedomofpress/dangerzone/tree/main/docs/templates/) From 9e23802142b89c87dc84eb705735c5260b88ea0d Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Mon, 13 Jan 2025 16:43:17 +0200 Subject: [PATCH 16/22] docs: Add design document for artifact reproducibility Refs #1047 --- docs/developer/reproducibility.md | 67 +++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 docs/developer/reproducibility.md diff --git a/docs/developer/reproducibility.md b/docs/developer/reproducibility.md new file mode 100644 index 0000000..6d37087 --- /dev/null +++ b/docs/developer/reproducibility.md @@ -0,0 +1,67 @@ +# Reproducible builds + +We want to improve the transparency and auditability of our build artifacts, and +a way to achieve this is via reproducible builds. For a broader understanding of +what reproducible builds entail, check out https://reproducible-builds.org/. + +Our build artifacts consist of: +* Container images (`amd64` and `arm64` architectures) +* macOS installers (for Intel and Apple Silicon CPUs) +* Windows installer +* Fedora packages (for regular Fedora distros and Qubes) +* Debian packages (for Debian and Ubuntu) + +As of writing this, only the following artifacts are reproducible: +* Container images (see [#1047](https://github.com/freedomofpress/dangerzone/issues/1047)) + +In the following sections, we'll mention some specifics about enforcing +reproducibility for each artifact type. + +## Container image + +### Updating the image + +The fact that our image is reproducible also means that it's frozen in time. +This means that rebuilding the image without updating our Dockerfile will +**not** receive security updates. + +Here are the necessary variables that make up our image in the `Dockerfile.env` +file: +* `DEBIAN_IMAGE_DATE`: The date that the Debian container image was released +* `DEBIAN_ARCHIVE_DATE`: The Debian snapshot repo that we want to use +* `GVISOR_ARCHIVE_DATE`: The gVisor APT repo that we want to use +* `H2ORESTART_CHECKSUM`: The SHA-256 checksum of the H2ORestart plugin +* `H2ORESTART_VERSION`: The version of the H2ORestart plugin + +If you update these values in `Dockerfile.env`, you must also create a new +Dockerfile with: + +``` +make Dockerfile +``` + +Updating `Dockerfile` without bumping `Dockerfile.in` is detected and should +trigger a CI error. + +### Reproducing the image + +For a simple way to reproduce a Dangerzone container image, you can checkout the +commit this image was built from (you can find it from the image tag in its +`g` portion), and run the following command in a Linux environment: + +``` +./dev_scripts/reproduce-image.py --source +``` + +This command will download the `diffoci` helper, build a container image from +the current Git commit, and ensure that the built image matches the source one, +with the exception of image names and file timestamps. + +> [!TIP] +> If the source image is not pushed to a registry, and is local instead, you +> can prefix it with `docker://` or `podman://` accordingly, so that `diffoci` +> can load it from the local Docker / Podman container engine. For example: +> +> ``` +> ./dev_scripts/reproduce.py --source podman://dangerzone.rocks/dangerzone:0.8.0-125-g725ce3b +> ``` From df1ec758bc8b1b0a63a0a5cac5a68b248a07d912 Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Tue, 21 Jan 2025 23:23:45 +0200 Subject: [PATCH 17/22] Mask some extra paths in gVisor's OCI config Mask some paths of the outer container in the OCI config of the inner container. This is done to avoid leaking any sensitive information from Podman / Docker / gVisor, since we reuse the same rootfs Refs #1048 --- dangerzone/container_helpers/entrypoint.py | 92 +++++++++++++++++++++- 1 file changed, 89 insertions(+), 3 deletions(-) diff --git a/dangerzone/container_helpers/entrypoint.py b/dangerzone/container_helpers/entrypoint.py index 80a6455..35248b3 100755 --- a/dangerzone/container_helpers/entrypoint.py +++ b/dangerzone/container_helpers/entrypoint.py @@ -59,10 +59,28 @@ oci_config: dict[str, typing.Any] = { "root": {"path": "/", "readonly": True}, "hostname": "dangerzone", "mounts": [ + # Mask almost every system directory of the outer container, by mounting tmpfs + # on top of them. This is done to avoid leaking any sensitive information, + # either mounted by Podman/Docker, or when gVisor runs, since we reuse the same + # rootfs. We basically mask everything except for `/usr`, `/bin`, `/lib`, + # and `/etc`. + # + # Note that we set `--root /home/dangerzone/.containers` for the directory where + # gVisor will create files at runtime, which means that in principle, we are + # covered by the masking of `/home/dangerzone` that follows below. + # + # Finally, note that the following list has been taken from the dirs in our + # container image, and double-checked against the top-level dirs listed in the + # Filesystem Hierarchy Standard (FHS) [1]. It would be nice to have an allowlist + # approach instead of a denylist, but FHS is such an old standard that we don't + # expect any new top-level dirs to pop up any time soon. + # + # [1] https://en.wikipedia.org/wiki/Filesystem_Hierarchy_Standard { - "destination": "/proc", - "type": "proc", - "source": "proc", + "destination": "/boot", + "type": "tmpfs", + "source": "tmpfs", + "options": ["nosuid", "noexec", "nodev", "ro"], }, { "destination": "/dev", @@ -70,6 +88,53 @@ oci_config: dict[str, typing.Any] = { "source": "tmpfs", "options": ["nosuid", "noexec", "nodev"], }, + { + "destination": "/home", + "type": "tmpfs", + "source": "tmpfs", + "options": ["nosuid", "noexec", "nodev", "ro"], + }, + { + "destination": "/media", + "type": "tmpfs", + "source": "tmpfs", + "options": ["nosuid", "noexec", "nodev", "ro"], + }, + { + "destination": "/mnt", + "type": "tmpfs", + "source": "tmpfs", + "options": ["nosuid", "noexec", "nodev", "ro"], + }, + { + "destination": "/proc", + "type": "proc", + "source": "proc", + }, + { + "destination": "/root", + "type": "tmpfs", + "source": "tmpfs", + "options": ["nosuid", "noexec", "nodev", "ro"], + }, + { + "destination": "/run", + "type": "tmpfs", + "source": "tmpfs", + "options": ["nosuid", "noexec", "nodev"], + }, + { + "destination": "/sbin", + "type": "tmpfs", + "source": "tmpfs", + "options": ["nosuid", "noexec", "nodev", "ro"], + }, + { + "destination": "/srv", + "type": "tmpfs", + "source": "tmpfs", + "options": ["nosuid", "noexec", "nodev", "ro"], + }, { "destination": "/sys", "type": "tmpfs", @@ -82,6 +147,27 @@ oci_config: dict[str, typing.Any] = { "source": "tmpfs", "options": ["nosuid", "noexec", "nodev"], }, + { + "destination": "/var", + "type": "tmpfs", + "source": "tmpfs", + "options": ["nosuid", "noexec", "nodev"], + }, + # Also mask some files that are usually mounted by Docker / Podman. These files + # should not contain any sensitive information, since we use the `--network + # none` flag, but we want to make sure in any case. + { + "destination": "/etc/hostname", + "type": "bind", + "source": "/dev/null", + "options": ["rbind", "ro"], + }, + { + "destination": "/etc/hosts", + "type": "bind", + "source": "/dev/null", + "options": ["rbind", "ro"], + }, # LibreOffice needs a writable home directory, so just mount a tmpfs # over it. { From 6d6ac923713face2239f9cad71eae03a8eb7d599 Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Tue, 21 Jan 2025 23:50:52 +0200 Subject: [PATCH 18/22] FIXUP: Strip 'v' from image tag --- dev_scripts/reproduce-image.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dev_scripts/reproduce-image.py b/dev_scripts/reproduce-image.py index 8af46a6..970fa34 100755 --- a/dev_scripts/reproduce-image.py +++ b/dev_scripts/reproduce-image.py @@ -34,7 +34,7 @@ def git_commit_get(): def git_determine_tag(): - return run("git", "describe", "--long", "--first-parent").decode().strip() + return run("git", "describe", "--long", "--first-parent").decode().strip()[1:] def git_verify(commit, source): @@ -116,7 +116,7 @@ def parse_args(): image_tag = git_determine_tag() # TODO: Remove the local "podman://" prefix once we have started pushing images to a # remote. - default_image_name = "podman://" + IMAGE_NAME + ":" + image_tag + default_image_name = f"podman://{IMAGE_NAME}:{image_tag}" parser = argparse.ArgumentParser( prog=sys.argv[0], @@ -157,7 +157,7 @@ def main(): diffoci_download() tag = f"reproduce-{commit}" - target = f"dangerzone.rocks/dangerzone:{tag}" + target = f"{IMAGE_NAME}:{tag}" logger.info(f"Building container image and tagging it as '{target}'") build_image(tag, args.use_cache) From 414efb629feab21fc42886e64fce804a6d9effe6 Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Thu, 23 Jan 2025 17:40:55 +0200 Subject: [PATCH 19/22] WIP: Symlink /usr --- Dockerfile | 73 +++++++++++++++++++--- Dockerfile.in | 73 +++++++++++++++++++--- dangerzone/container_helpers/entrypoint.py | 23 ++----- dangerzone/conversion/doc_to_pixels.py | 2 +- 4 files changed, 131 insertions(+), 40 deletions(-) diff --git a/Dockerfile b/Dockerfile index c5eaf7b..2dd195f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ ARG DEBIAN_IMAGE_DATE=20250113 -FROM debian:bookworm-${DEBIAN_IMAGE_DATE}-slim +FROM debian:bookworm-${DEBIAN_IMAGE_DATE}-slim as dangerzone-image ARG GVISOR_ARCHIVE_DATE=20250113 ARG DEBIAN_ARCHIVE_DATE=20250120 @@ -44,7 +44,7 @@ RUN \ # Download H2ORestart from GitHub using a pinned version and hash. Note that # it's available in Debian repos, but not in Bookworm yet. -RUN mkdir /libreoffice_ext && cd libreoffice_ext \ +RUN mkdir /opt/libreoffice_ext && cd /opt/libreoffice_ext \ && H2ORESTART_FILENAME=h2orestart.oxt \ && wget https://github.com/ebandal/H2Orestart/releases/download/$H2ORESTART_VERSION/$H2ORESTART_FILENAME \ && echo "$H2ORESTART_CHECKSUM $H2ORESTART_FILENAME" | sha256sum -c \ @@ -64,18 +64,71 @@ RUN touch /opt/dangerzone/dangerzone/__init__.py # Copy only the Python code, and not any produced .pyc files. COPY conversion/*.py /opt/dangerzone/dangerzone/conversion/ -# Let the entrypoint script write the OCI config for the inner container under -# /config.json. -RUN touch /config.json -RUN chown dangerzone:dangerzone /config.json - -# Switch to the dangerzone user for the rest of the script. -USER dangerzone - # Create a directory that will be used by gVisor as the place where it will # store the state of its containers. RUN mkdir /home/dangerzone/.containers +# XXX: Create a new root hierarchy, that will be used in the final container +# image: +# +# /bin -> usr/bin +# /lib -> usr/lib +# /lib64 -> usr/lib64 +# /root +# /run +# /tmp +# /usr -> /home/dangerzone/dangerzone-image/rootfs/usr/ +# +# We have to create this hierarchy beforehand because we want to use the same +# /usr for both the inner and outer container. The problem though is that /usr +# is very sensitive, and you can't manipulate in a live system. That is, I +# haven't found a way to do the following, or something equivalent: +# +# rm -r /usr && ln -s /home/dangerzone/dangerzone-image/rootfs/usr/ /usr +# +# So, we prefer to create the symlinks here instead, and create the image +# manually in the next steps. +RUN mkdir /new_root +RUN mkdir /new_root/root /new_root/run /new_root/tmp +RUN chmod 777 /new_root/tmp +RUN ln -s /home/dangerzone/dangerzone-image/rootfs/usr/ /new_root/usr +RUN ln -s usr/bin /new_root/bin +RUN ln -s usr/lib /new_root/lib +RUN ln -s usr/lib64 /new_root/lib64 +RUN ln -s usr/sbin /new_root/sbin + +# Intermediate layer + +FROM debian:bookworm-${DEBIAN_IMAGE_DATE}-slim as debian-utils + +## Final image + +FROM scratch + +# Copy the filesystem hierarchy that we created in the previous layer, so that +# /usr can be a symlink. +COPY --from=dangerzone-image /new_root/ / + +# Copy some files that are necessary to use the outer container image, e.g., in +# order to run `apt`. We _could_ avoid doing this, but the space cost is very +# small. +COPY --from=dangerzone-image /etc/ /etc/ +COPY --from=debian-utils /var/ /var/ + +# Copy the bare minimum to run Dangerzone in the inner container image. +COPY --from=dangerzone-image /etc/ /home/dangerzone/dangerzone-image/rootfs/etc/ +COPY --from=dangerzone-image /usr/ /home/dangerzone/dangerzone-image/rootfs/usr/ +COPY --from=dangerzone-image /opt/ /home/dangerzone/dangerzone-image/rootfs/opt/ +RUN ln -s usr/bin /home/dangerzone/dangerzone-image/rootfs/bin +RUN ln -s usr/lib /home/dangerzone/dangerzone-image/rootfs/lib +RUN ln -s usr/lib64 /home/dangerzone/dangerzone-image/rootfs/lib64 + +# Allow our entrypoint script to make changes in the following folders. +RUN chown dangerzone:dangerzone /home/dangerzone /home/dangerzone/dangerzone-image/ + +# Switch to the dangerzone user for the rest of the script. +USER dangerzone + COPY container_helpers/entrypoint.py / ENTRYPOINT ["/entrypoint.py"] diff --git a/Dockerfile.in b/Dockerfile.in index 2824cf1..eb75eed 100644 --- a/Dockerfile.in +++ b/Dockerfile.in @@ -4,7 +4,7 @@ ARG DEBIAN_IMAGE_DATE={{DEBIAN_IMAGE_DATE}} -FROM debian:bookworm-${DEBIAN_IMAGE_DATE}-slim +FROM debian:bookworm-${DEBIAN_IMAGE_DATE}-slim as dangerzone-image ARG GVISOR_ARCHIVE_DATE={{GVISOR_ARCHIVE_DATE}} ARG DEBIAN_ARCHIVE_DATE={{DEBIAN_ARCHIVE_DATE}} @@ -44,7 +44,7 @@ RUN \ # Download H2ORestart from GitHub using a pinned version and hash. Note that # it's available in Debian repos, but not in Bookworm yet. -RUN mkdir /libreoffice_ext && cd libreoffice_ext \ +RUN mkdir /opt/libreoffice_ext && cd /opt/libreoffice_ext \ && H2ORESTART_FILENAME=h2orestart.oxt \ && wget https://github.com/ebandal/H2Orestart/releases/download/$H2ORESTART_VERSION/$H2ORESTART_FILENAME \ && echo "$H2ORESTART_CHECKSUM $H2ORESTART_FILENAME" | sha256sum -c \ @@ -64,18 +64,71 @@ RUN touch /opt/dangerzone/dangerzone/__init__.py # Copy only the Python code, and not any produced .pyc files. COPY conversion/*.py /opt/dangerzone/dangerzone/conversion/ -# Let the entrypoint script write the OCI config for the inner container under -# /config.json. -RUN touch /config.json -RUN chown dangerzone:dangerzone /config.json - -# Switch to the dangerzone user for the rest of the script. -USER dangerzone - # Create a directory that will be used by gVisor as the place where it will # store the state of its containers. RUN mkdir /home/dangerzone/.containers +# XXX: Create a new root hierarchy, that will be used in the final container +# image: +# +# /bin -> usr/bin +# /lib -> usr/lib +# /lib64 -> usr/lib64 +# /root +# /run +# /tmp +# /usr -> /home/dangerzone/dangerzone-image/rootfs/usr/ +# +# We have to create this hierarchy beforehand because we want to use the same +# /usr for both the inner and outer container. The problem though is that /usr +# is very sensitive, and you can't manipulate in a live system. That is, I +# haven't found a way to do the following, or something equivalent: +# +# rm -r /usr && ln -s /home/dangerzone/dangerzone-image/rootfs/usr/ /usr +# +# So, we prefer to create the symlinks here instead, and create the image +# manually in the next steps. +RUN mkdir /new_root +RUN mkdir /new_root/root /new_root/run /new_root/tmp +RUN chmod 777 /new_root/tmp +RUN ln -s /home/dangerzone/dangerzone-image/rootfs/usr/ /new_root/usr +RUN ln -s usr/bin /new_root/bin +RUN ln -s usr/lib /new_root/lib +RUN ln -s usr/lib64 /new_root/lib64 +RUN ln -s usr/sbin /new_root/sbin + +# Intermediate layer + +FROM debian:bookworm-${DEBIAN_IMAGE_DATE}-slim as debian-utils + +## Final image + +FROM scratch + +# Copy the filesystem hierarchy that we created in the previous layer, so that +# /usr can be a symlink. +COPY --from=dangerzone-image /new_root/ / + +# Copy some files that are necessary to use the outer container image, e.g., in +# order to run `apt`. We _could_ avoid doing this, but the space cost is very +# small. +COPY --from=dangerzone-image /etc/ /etc/ +COPY --from=debian-utils /var/ /var/ + +# Copy the bare minimum to run Dangerzone in the inner container image. +COPY --from=dangerzone-image /etc/ /home/dangerzone/dangerzone-image/rootfs/etc/ +COPY --from=dangerzone-image /usr/ /home/dangerzone/dangerzone-image/rootfs/usr/ +COPY --from=dangerzone-image /opt/ /home/dangerzone/dangerzone-image/rootfs/opt/ +RUN ln -s usr/bin /home/dangerzone/dangerzone-image/rootfs/bin +RUN ln -s usr/lib /home/dangerzone/dangerzone-image/rootfs/lib +RUN ln -s usr/lib64 /home/dangerzone/dangerzone-image/rootfs/lib64 + +# Allow our entrypoint script to make changes in the following folders. +RUN chown dangerzone:dangerzone /home/dangerzone /home/dangerzone/dangerzone-image/ + +# Switch to the dangerzone user for the rest of the script. +USER dangerzone + COPY container_helpers/entrypoint.py / ENTRYPOINT ["/entrypoint.py"] diff --git a/dangerzone/container_helpers/entrypoint.py b/dangerzone/container_helpers/entrypoint.py index 35248b3..479b268 100755 --- a/dangerzone/container_helpers/entrypoint.py +++ b/dangerzone/container_helpers/entrypoint.py @@ -56,14 +56,14 @@ oci_config: dict[str, typing.Any] = { {"type": "RLIMIT_NOFILE", "hard": 4096, "soft": 4096}, ], }, - "root": {"path": "/", "readonly": True}, + "root": {"path": "rootfs", "readonly": True}, "hostname": "dangerzone", "mounts": [ # Mask almost every system directory of the outer container, by mounting tmpfs # on top of them. This is done to avoid leaking any sensitive information, # either mounted by Podman/Docker, or when gVisor runs, since we reuse the same # rootfs. We basically mask everything except for `/usr`, `/bin`, `/lib`, - # and `/etc`. + # `/etc`, and `/opt`. # # Note that we set `--root /home/dangerzone/.containers` for the directory where # gVisor will create files at runtime, which means that in principle, we are @@ -153,21 +153,6 @@ oci_config: dict[str, typing.Any] = { "source": "tmpfs", "options": ["nosuid", "noexec", "nodev"], }, - # Also mask some files that are usually mounted by Docker / Podman. These files - # should not contain any sensitive information, since we use the `--network - # none` flag, but we want to make sure in any case. - { - "destination": "/etc/hostname", - "type": "bind", - "source": "/dev/null", - "options": ["rbind", "ro"], - }, - { - "destination": "/etc/hosts", - "type": "bind", - "source": "/dev/null", - "options": ["rbind", "ro"], - }, # LibreOffice needs a writable home directory, so just mount a tmpfs # over it. { @@ -219,7 +204,7 @@ if os.environ.get("RUNSC_DEBUG"): json.dump(oci_config, sys.stderr, indent=2, sort_keys=True) # json.dump doesn't print a trailing newline, so print one here: log("") -with open("/config.json", "w") as oci_config_out: +with open("/home/dangerzone/dangerzone-image/config.json", "w") as oci_config_out: json.dump(oci_config, oci_config_out, indent=2, sort_keys=True) # Run gVisor. @@ -236,7 +221,7 @@ if os.environ.get("RUNSC_DEBUG"): runsc_argv += ["--debug=true", "--alsologtostderr=true"] if os.environ.get("RUNSC_FLAGS"): runsc_argv += [x for x in shlex.split(os.environ.get("RUNSC_FLAGS", "")) if x] -runsc_argv += ["run", "--bundle=/", "dangerzone"] +runsc_argv += ["run", "--bundle=/home/dangerzone/dangerzone-image", "dangerzone"] log( "Running gVisor with command line: {}", " ".join(shlex.quote(s) for s in runsc_argv) ) diff --git a/dangerzone/conversion/doc_to_pixels.py b/dangerzone/conversion/doc_to_pixels.py index 3a07377..b59e8ca 100644 --- a/dangerzone/conversion/doc_to_pixels.py +++ b/dangerzone/conversion/doc_to_pixels.py @@ -253,7 +253,7 @@ class DocumentToPixels(DangerzoneConverter): "unzip", "-d", f"/usr/lib/libreoffice/share/extensions/{libreoffice_ext}/", - f"/libreoffice_ext/{libreoffice_ext}", + f"/opt/libreoffice_ext/{libreoffice_ext}", ] await self.run_command( unzip_args, From 3cf34e6182ba91e4c02484467bc343e9665f33b4 Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Fri, 24 Jan 2025 10:13:19 +0200 Subject: [PATCH 20/22] Ruff fixes --- dev_scripts/reproduce-image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_scripts/reproduce-image.py b/dev_scripts/reproduce-image.py index 970fa34..0f757ae 100755 --- a/dev_scripts/reproduce-image.py +++ b/dev_scripts/reproduce-image.py @@ -21,7 +21,7 @@ IMAGE_NAME = "dangerzone.rocks/dangerzone" def run(*args): """Simple function that runs a command, validates it, and returns the output""" - logger.debug(f"Running command: {" ".join(args)}") + logger.debug(f"Running command: {' '.join(args)}") return subprocess.run( args, check=True, From 8b1e4c25e78e8e49bf4a81644d978519c54a655c Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Mon, 27 Jan 2025 13:06:46 +0200 Subject: [PATCH 21/22] WIP: Allow security scanning --- Dockerfile | 133 ++++++++++++++++++++++++++++++++++++++----------- Dockerfile.env | 4 +- Dockerfile.in | 129 +++++++++++++++++++++++++++++++++++++---------- 3 files changed, 210 insertions(+), 56 deletions(-) diff --git a/Dockerfile b/Dockerfile index 2dd195f..62f56f8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,8 +6,8 @@ ARG DEBIAN_IMAGE_DATE=20250113 FROM debian:bookworm-${DEBIAN_IMAGE_DATE}-slim as dangerzone-image -ARG GVISOR_ARCHIVE_DATE=20250113 -ARG DEBIAN_ARCHIVE_DATE=20250120 +ARG GVISOR_ARCHIVE_DATE=20250120 +ARG DEBIAN_ARCHIVE_DATE=20250127 ARG H2ORESTART_CHECKSUM=7760dc2963332c50d15eee285933ec4b48d6a1de9e0c0f6082946f93090bd132 ARG H2ORESTART_VERSION=v0.7.0 @@ -68,61 +68,138 @@ COPY conversion/*.py /opt/dangerzone/dangerzone/conversion/ # store the state of its containers. RUN mkdir /home/dangerzone/.containers -# XXX: Create a new root hierarchy, that will be used in the final container +############################################################################### +# +# REUSING CONTAINER IMAGES: +# Anatomy of a hack +# ======================== +# +# The rest of the Dockerfile aims to do one thing: allow the final container +# image to actually contain two container images; one for the outer container +# (spawned by Podman/Docker Desktop), and one for the inner container (spawned +# by gVisor). +# +# This has already been done in the past, and we explain why and how in the +# design document for gVisor integration (should be in +# `docs/developer/gvisor.md`). In this iteration, we want to also +# achieve the following: +# +# 1. Have a small final image, by sharing some system paths between the inner +# and outer container image using symlinks. +# 2. Allow our security scanning tool to see the contents of the inner +# container image. +# 3. Make the outer container image operational, in the sense that you can use +# `apt` commands and perform a conversion with Dangerzone, outside the +# gVisor sandbox. This is helpful for debugging purposes. +# +# Below we'll explain how our design choices are informed by the above +# sub-goals. +# +# First, to achieve a small container image, we basically need to copy `/etc`, +# `/usr` and `/opt` from the original Dangerzone image to the **inner** +# container image (under `/home/dangerzone/dangerzone-image/rootfs/`) +# +# That's all we need. The rest of the files play no role, and we can actually +# mask them in gVisor's OCI config. +# +# Second, in order to let our security scanner find the installed packages, +# we need to copy the following dirs to the root of the **outer** container # image: +# * `/etc`, so that the security scanner can detect the image type and its +# sources +# * `/var`, so that the security scanner can have access to the APT database. # -# /bin -> usr/bin -# /lib -> usr/lib -# /lib64 -> usr/lib64 -# /root -# /run -# /tmp -# /usr -> /home/dangerzone/dangerzone-image/rootfs/usr/ +# IMPORTANT: We don't symlink the `/etc` of the **outer** container image to +# the **inner** one, in order to avoid leaking files like +# `/etc/{hostname,hosts,resolv.conf}` that Podman/Docker mounts when running +# the **outer** container image. # -# We have to create this hierarchy beforehand because we want to use the same -# /usr for both the inner and outer container. The problem though is that /usr -# is very sensitive, and you can't manipulate in a live system. That is, I +# Third, in order to have an operational Debian image, we are _mostly_ covered +# by the dirs we have copied. There's a _rare_ case where during debugging, we +# may want to install a system package that has components in `/etc` and +# `/var`, which will not be available in the **inner** container image. In that +# case, the developer can do the necessary symlinks in the live container. +# +# FILESYSTEM HIERARCHY +# ==================== +# +# The above plan leads to the following filesystem hierarchy: +# +# Outer container image: +# +# # ls -l / +# lrwxrwxrwx 1 root root 7 Jan 27 10:46 bin -> usr/bin +# -rwxr-xr-x 1 root root 7764 Jan 24 08:14 entrypoint.py +# drwxr-xr-x 1 root root 4096 Jan 27 10:47 etc +# drwxr-xr-x 1 root root 4096 Jan 27 10:46 home +# lrwxrwxrwx 1 root root 7 Jan 27 10:46 lib -> usr/lib +# lrwxrwxrwx 1 root root 9 Jan 27 10:46 lib64 -> usr/lib64 +# drwxr-xr-x 2 root root 4096 Jan 27 10:46 root +# drwxr-xr-x 1 root root 4096 Jan 27 10:47 run +# lrwxrwxrwx 1 root root 8 Jan 27 10:46 sbin -> usr/sbin +# drwxrwxrwx 2 root root 4096 Jan 27 10:46 tmp +# lrwxrwxrwx 1 root root 44 Jan 27 10:46 usr -> /home/dangerzone/dangerzone-image/rootfs/usr +# drwxr-xr-x 11 root root 4096 Jan 27 10:47 var +# +# Inner container image: +# +# # ls -l /home/dangerzone/dangerzone-image/rootfs/ +# total 12 +# lrwxrwxrwx 1 root root 7 Jan 27 10:47 bin -> usr/bin +# drwxr-xr-x 43 root root 4096 Jan 27 10:46 etc +# lrwxrwxrwx 1 root root 7 Jan 27 10:47 lib -> usr/lib +# lrwxrwxrwx 1 root root 9 Jan 27 10:47 lib64 -> usr/lib64 +# drwxr-xr-x 4 root root 4096 Jan 27 10:47 opt +# drwxr-xr-x 12 root root 4096 Jan 27 10:47 usr +# +# SYMLINKING /USR +# =============== +# +# It's surprisingly difficult (maybe even borderline impossible), to symlink +# `/usr` to a different path during image build. The problem is that /usr +# is very sensitive, and you can't manipulate it in a live system. That is, I # haven't found a way to do the following, or something equivalent: # # rm -r /usr && ln -s /home/dangerzone/dangerzone-image/rootfs/usr/ /usr # -# So, we prefer to create the symlinks here instead, and create the image -# manually in the next steps. +# The `ln` binary, even if you specify it by its full path, cannot run +# (probably because `ld-linux.so` can't be found). For this reason, we have +# to create the symlinks beforehand, in a previous build stage. Then, in an +# empty contianer image (scratch images), we can copy these symlinks and the +# /usr, and stich everything together. +############################################################################### + +# Create the filesystem hierarchy that will be used to symlink /usr. + RUN mkdir /new_root RUN mkdir /new_root/root /new_root/run /new_root/tmp RUN chmod 777 /new_root/tmp -RUN ln -s /home/dangerzone/dangerzone-image/rootfs/usr/ /new_root/usr +RUN ln -s /home/dangerzone/dangerzone-image/rootfs/usr /new_root/usr RUN ln -s usr/bin /new_root/bin RUN ln -s usr/lib /new_root/lib RUN ln -s usr/lib64 /new_root/lib64 RUN ln -s usr/sbin /new_root/sbin -# Intermediate layer - -FROM debian:bookworm-${DEBIAN_IMAGE_DATE}-slim as debian-utils - ## Final image FROM scratch -# Copy the filesystem hierarchy that we created in the previous layer, so that +# Copy the filesystem hierarchy that we created in the previous stage, so that # /usr can be a symlink. COPY --from=dangerzone-image /new_root/ / -# Copy some files that are necessary to use the outer container image, e.g., in -# order to run `apt`. We _could_ avoid doing this, but the space cost is very -# small. -COPY --from=dangerzone-image /etc/ /etc/ -COPY --from=debian-utils /var/ /var/ - # Copy the bare minimum to run Dangerzone in the inner container image. COPY --from=dangerzone-image /etc/ /home/dangerzone/dangerzone-image/rootfs/etc/ -COPY --from=dangerzone-image /usr/ /home/dangerzone/dangerzone-image/rootfs/usr/ COPY --from=dangerzone-image /opt/ /home/dangerzone/dangerzone-image/rootfs/opt/ +COPY --from=dangerzone-image /usr/ /home/dangerzone/dangerzone-image/rootfs/usr/ RUN ln -s usr/bin /home/dangerzone/dangerzone-image/rootfs/bin RUN ln -s usr/lib /home/dangerzone/dangerzone-image/rootfs/lib RUN ln -s usr/lib64 /home/dangerzone/dangerzone-image/rootfs/lib64 +# Copy the bare minimum to let the security scanner find vulnerabilities. +COPY --from=dangerzone-image /etc/ /etc/ +COPY --from=dangerzone-image /var/ /var/ + # Allow our entrypoint script to make changes in the following folders. RUN chown dangerzone:dangerzone /home/dangerzone /home/dangerzone/dangerzone-image/ diff --git a/Dockerfile.env b/Dockerfile.env index 25ff6ff..2ab94bd 100644 --- a/Dockerfile.env +++ b/Dockerfile.env @@ -1,9 +1,9 @@ # Can be bumped to the latest date in https://hub.docker.com/_/debian/tags?name=bookworm- DEBIAN_IMAGE_DATE=20250113 # Can be bumped to today's date -DEBIAN_ARCHIVE_DATE=20250120 +DEBIAN_ARCHIVE_DATE=20250127 # Can be bumped to the latest date in https://github.com/google/gvisor/tags -GVISOR_ARCHIVE_DATE=20250113 +GVISOR_ARCHIVE_DATE=20250120 # Can be bumped to the latest version and checksum from https://github.com/ebandal/H2Orestart/releases H2ORESTART_CHECKSUM=7760dc2963332c50d15eee285933ec4b48d6a1de9e0c0f6082946f93090bd132 H2ORESTART_VERSION=v0.7.0 diff --git a/Dockerfile.in b/Dockerfile.in index eb75eed..af03c89 100644 --- a/Dockerfile.in +++ b/Dockerfile.in @@ -68,61 +68,138 @@ COPY conversion/*.py /opt/dangerzone/dangerzone/conversion/ # store the state of its containers. RUN mkdir /home/dangerzone/.containers -# XXX: Create a new root hierarchy, that will be used in the final container +############################################################################### +# +# REUSING CONTAINER IMAGES: +# Anatomy of a hack +# ======================== +# +# The rest of the Dockerfile aims to do one thing: allow the final container +# image to actually contain two container images; one for the outer container +# (spawned by Podman/Docker Desktop), and one for the inner container (spawned +# by gVisor). +# +# This has already been done in the past, and we explain why and how in the +# design document for gVisor integration (should be in +# `docs/developer/gvisor.md`). In this iteration, we want to also +# achieve the following: +# +# 1. Have a small final image, by sharing some system paths between the inner +# and outer container image using symlinks. +# 2. Allow our security scanning tool to see the contents of the inner +# container image. +# 3. Make the outer container image operational, in the sense that you can use +# `apt` commands and perform a conversion with Dangerzone, outside the +# gVisor sandbox. This is helpful for debugging purposes. +# +# Below we'll explain how our design choices are informed by the above +# sub-goals. +# +# First, to achieve a small container image, we basically need to copy `/etc`, +# `/usr` and `/opt` from the original Dangerzone image to the **inner** +# container image (under `/home/dangerzone/dangerzone-image/rootfs/`) +# +# That's all we need. The rest of the files play no role, and we can actually +# mask them in gVisor's OCI config. +# +# Second, in order to let our security scanner find the installed packages, +# we need to copy the following dirs to the root of the **outer** container # image: +# * `/etc`, so that the security scanner can detect the image type and its +# sources +# * `/var`, so that the security scanner can have access to the APT database. # -# /bin -> usr/bin -# /lib -> usr/lib -# /lib64 -> usr/lib64 -# /root -# /run -# /tmp -# /usr -> /home/dangerzone/dangerzone-image/rootfs/usr/ +# IMPORTANT: We don't symlink the `/etc` of the **outer** container image to +# the **inner** one, in order to avoid leaking files like +# `/etc/{hostname,hosts,resolv.conf}` that Podman/Docker mounts when running +# the **outer** container image. # -# We have to create this hierarchy beforehand because we want to use the same -# /usr for both the inner and outer container. The problem though is that /usr -# is very sensitive, and you can't manipulate in a live system. That is, I +# Third, in order to have an operational Debian image, we are _mostly_ covered +# by the dirs we have copied. There's a _rare_ case where during debugging, we +# may want to install a system package that has components in `/etc` and +# `/var`, which will not be available in the **inner** container image. In that +# case, the developer can do the necessary symlinks in the live container. +# +# FILESYSTEM HIERARCHY +# ==================== +# +# The above plan leads to the following filesystem hierarchy: +# +# Outer container image: +# +# # ls -l / +# lrwxrwxrwx 1 root root 7 Jan 27 10:46 bin -> usr/bin +# -rwxr-xr-x 1 root root 7764 Jan 24 08:14 entrypoint.py +# drwxr-xr-x 1 root root 4096 Jan 27 10:47 etc +# drwxr-xr-x 1 root root 4096 Jan 27 10:46 home +# lrwxrwxrwx 1 root root 7 Jan 27 10:46 lib -> usr/lib +# lrwxrwxrwx 1 root root 9 Jan 27 10:46 lib64 -> usr/lib64 +# drwxr-xr-x 2 root root 4096 Jan 27 10:46 root +# drwxr-xr-x 1 root root 4096 Jan 27 10:47 run +# lrwxrwxrwx 1 root root 8 Jan 27 10:46 sbin -> usr/sbin +# drwxrwxrwx 2 root root 4096 Jan 27 10:46 tmp +# lrwxrwxrwx 1 root root 44 Jan 27 10:46 usr -> /home/dangerzone/dangerzone-image/rootfs/usr +# drwxr-xr-x 11 root root 4096 Jan 27 10:47 var +# +# Inner container image: +# +# # ls -l /home/dangerzone/dangerzone-image/rootfs/ +# total 12 +# lrwxrwxrwx 1 root root 7 Jan 27 10:47 bin -> usr/bin +# drwxr-xr-x 43 root root 4096 Jan 27 10:46 etc +# lrwxrwxrwx 1 root root 7 Jan 27 10:47 lib -> usr/lib +# lrwxrwxrwx 1 root root 9 Jan 27 10:47 lib64 -> usr/lib64 +# drwxr-xr-x 4 root root 4096 Jan 27 10:47 opt +# drwxr-xr-x 12 root root 4096 Jan 27 10:47 usr +# +# SYMLINKING /USR +# =============== +# +# It's surprisingly difficult (maybe even borderline impossible), to symlink +# `/usr` to a different path during image build. The problem is that /usr +# is very sensitive, and you can't manipulate it in a live system. That is, I # haven't found a way to do the following, or something equivalent: # # rm -r /usr && ln -s /home/dangerzone/dangerzone-image/rootfs/usr/ /usr # -# So, we prefer to create the symlinks here instead, and create the image -# manually in the next steps. +# The `ln` binary, even if you specify it by its full path, cannot run +# (probably because `ld-linux.so` can't be found). For this reason, we have +# to create the symlinks beforehand, in a previous build stage. Then, in an +# empty contianer image (scratch images), we can copy these symlinks and the +# /usr, and stich everything together. +############################################################################### + +# Create the filesystem hierarchy that will be used to symlink /usr. + RUN mkdir /new_root RUN mkdir /new_root/root /new_root/run /new_root/tmp RUN chmod 777 /new_root/tmp -RUN ln -s /home/dangerzone/dangerzone-image/rootfs/usr/ /new_root/usr +RUN ln -s /home/dangerzone/dangerzone-image/rootfs/usr /new_root/usr RUN ln -s usr/bin /new_root/bin RUN ln -s usr/lib /new_root/lib RUN ln -s usr/lib64 /new_root/lib64 RUN ln -s usr/sbin /new_root/sbin -# Intermediate layer - -FROM debian:bookworm-${DEBIAN_IMAGE_DATE}-slim as debian-utils - ## Final image FROM scratch -# Copy the filesystem hierarchy that we created in the previous layer, so that +# Copy the filesystem hierarchy that we created in the previous stage, so that # /usr can be a symlink. COPY --from=dangerzone-image /new_root/ / -# Copy some files that are necessary to use the outer container image, e.g., in -# order to run `apt`. We _could_ avoid doing this, but the space cost is very -# small. -COPY --from=dangerzone-image /etc/ /etc/ -COPY --from=debian-utils /var/ /var/ - # Copy the bare minimum to run Dangerzone in the inner container image. COPY --from=dangerzone-image /etc/ /home/dangerzone/dangerzone-image/rootfs/etc/ -COPY --from=dangerzone-image /usr/ /home/dangerzone/dangerzone-image/rootfs/usr/ COPY --from=dangerzone-image /opt/ /home/dangerzone/dangerzone-image/rootfs/opt/ +COPY --from=dangerzone-image /usr/ /home/dangerzone/dangerzone-image/rootfs/usr/ RUN ln -s usr/bin /home/dangerzone/dangerzone-image/rootfs/bin RUN ln -s usr/lib /home/dangerzone/dangerzone-image/rootfs/lib RUN ln -s usr/lib64 /home/dangerzone/dangerzone-image/rootfs/lib64 +# Copy the bare minimum to let the security scanner find vulnerabilities. +COPY --from=dangerzone-image /etc/ /etc/ +COPY --from=dangerzone-image /var/ /var/ + # Allow our entrypoint script to make changes in the following folders. RUN chown dangerzone:dangerzone /home/dangerzone /home/dangerzone/dangerzone-image/ From e388fe609094941c0d8f2f28230be6c6c145c077 Mon Sep 17 00:00:00 2001 From: Alex Pyrgiotis Date: Mon, 27 Jan 2025 13:37:42 +0200 Subject: [PATCH 22/22] WIP: Bump Python upper version --- install/linux/dangerzone.spec | 10 ---------- poetry.lock | 16 ++-------------- pyproject.toml | 2 +- 3 files changed, 3 insertions(+), 25 deletions(-) diff --git a/install/linux/dangerzone.spec b/install/linux/dangerzone.spec index 5fb17d7..409f56e 100644 --- a/install/linux/dangerzone.spec +++ b/install/linux/dangerzone.spec @@ -216,16 +216,6 @@ convert the documents within a secure sandbox. %prep %autosetup -p1 -n dangerzone-%{version} -# XXX: Bump the Python requirement in pyproject.toml from <3.13 to <3.14. Fedora -# 41 comes with Python 3.13 installed, but our pyproject.toml does not support -# it because PySide6 in PyPI works with Python 3.12 or earlier. -# -# This hack sidesteps this issue, and we haven't noticed any paticular problem -# with the package that is built from that. -%if 0%{?fedora} == 41 -sed -i 's/<3.13/<3.14/' pyproject.toml -%endif - # Bypass the version pin for Fedora as the 6.8.1.1 package is causing trouble # A 6.8.1.1 package was only released with a wheel for macOS, but was picked by # Fedora packagers. We cannot use "*" when PyPI is involved as it will fail to download the latest version. diff --git a/poetry.lock b/poetry.lock index 0d8fd02..b775da4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -874,9 +874,6 @@ optional = false python-versions = "<3.14,>=3.9" files = [ {file = "PySide6-6.8.1.1-cp39-abi3-macosx_12_0_universal2.whl", hash = "sha256:805728a7ed58352a02689b953ddbe29af1c8944f8c7f2c28312dc0b69f64b85e"}, - {file = "PySide6-6.8.1.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:70f8c4745d981ebb5bb93d7b825222532d553373b68b9db7a42cfcee25cafc9a"}, - {file = "PySide6-6.8.1.1-cp39-abi3-manylinux_2_39_aarch64.whl", hash = "sha256:de80ac62087a716b2bada2e3ddd739c5d176dc4be819abef91274d53d75f4e58"}, - {file = "PySide6-6.8.1.1-cp39-abi3-win_amd64.whl", hash = "sha256:60a2551053fa69845b893fb821507e2cc89d3a8a8b43726d568acd1250ad44fb"}, ] [package.dependencies] @@ -909,9 +906,6 @@ optional = false python-versions = "<3.14,>=3.9" files = [ {file = "PySide6_Addons-6.8.1.1-cp39-abi3-macosx_12_0_universal2.whl", hash = "sha256:83d35d7a1a7dbd1a16b4040a26ad4d5cc030a2aed4d439241babee1225d6e58a"}, - {file = "PySide6_Addons-6.8.1.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5ef45aeadca37d658e44a41e11f2b2e43dfc34c780a6be1cd09d96a7696e6cc6"}, - {file = "PySide6_Addons-6.8.1.1-cp39-abi3-manylinux_2_39_aarch64.whl", hash = "sha256:e1b4a20b0bcbc2e440faba62e0d164223b8fd6f041d749543bc3812979116c4c"}, - {file = "PySide6_Addons-6.8.1.1-cp39-abi3-win_amd64.whl", hash = "sha256:d8ae86944ac48cc9891666cf71565acebd403a953d0e050be4d41ac490788d0a"}, ] [package.dependencies] @@ -942,9 +936,6 @@ optional = false python-versions = "<3.14,>=3.9" files = [ {file = "PySide6_Essentials-6.8.1.1-cp39-abi3-macosx_12_0_universal2.whl", hash = "sha256:25f3fdb281ac3b442f08250e3284d3b1944f7c64c62ed93b57678a62c199cf46"}, - {file = "PySide6_Essentials-6.8.1.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:62b64842a91114c224c41eeb6a8c8f255ba60268bc5ac19724f944d60e2277c6"}, - {file = "PySide6_Essentials-6.8.1.1-cp39-abi3-manylinux_2_39_aarch64.whl", hash = "sha256:e0c1cc3cfb2ea5eea70748da7d22032a59ea641e24988f543d5b274c0adab065"}, - {file = "PySide6_Essentials-6.8.1.1-cp39-abi3-win_amd64.whl", hash = "sha256:085f12e16db31eb0e802b21c64eabf582f54db6c44463a1f5e1814d897b1f2c0"}, ] [package.dependencies] @@ -1187,9 +1178,6 @@ optional = false python-versions = "<3.14,>=3.9" files = [ {file = "shiboken6-6.8.1.1-cp39-abi3-macosx_12_0_universal2.whl", hash = "sha256:42fbb173a772c4e059dbeafb302e96f6ea8e1c9bacf05fab71ea7eb0d8f97b01"}, - {file = "shiboken6-6.8.1.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:d672df0f29dc5f44de7205c1acae4d0471ba8371bb1d68fdacbf1686f4d22a96"}, - {file = "shiboken6-6.8.1.1-cp39-abi3-manylinux_2_39_aarch64.whl", hash = "sha256:ff1b22a66476b042d3dc09870edca353fdac1c1f517a4cdc364b24e296213ecd"}, - {file = "shiboken6-6.8.1.1-cp39-abi3-win_amd64.whl", hash = "sha256:26f7041c77058a8ecfb9345caa187250b199de79cfb37e33936e5fbd468a7780"}, ] [[package]] @@ -1388,5 +1376,5 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" -python-versions = ">=3.9,<3.13" -content-hash = "9c77a647be7cd12ecb7e893ef3102554eb78faf761e99bafdb1d2424d6123c50" +python-versions = ">=3.9,<3.14" +content-hash = "c6395d63523761d272dfc5fe6eef7822a081b3b0fb0b739a82efec7de5346d57" diff --git a/pyproject.toml b/pyproject.toml index 457f1b4..4bb4bb4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,7 +13,7 @@ include = [ ] [tool.poetry.dependencies] -python = ">=3.9,<3.13" +python = ">=3.9,<3.14" click = "*" platformdirs = "*" PySide6 = "^6.7.1"