Compare commits

...

40 commits

Author SHA1 Message Date
Alexis Métaireau
cbd4795bf6
Verify podman/docker images against locally stored signatures
Some checks are pending
Tests / run tests (ubuntu 22.04) (push) Blocked by required conditions
Tests / run tests (ubuntu 24.04) (push) Blocked by required conditions
Tests / run tests (ubuntu 24.10) (push) Blocked by required conditions
Tests / run-lint (push) Waiting to run
Tests / build-container-image (push) Waiting to run
Tests / Download and cache Tesseract data (push) Waiting to run
Tests / windows (push) Blocked by required conditions
Tests / macOS (arch64) (push) Blocked by required conditions
Tests / macOS (x86_64) (push) Blocked by required conditions
Tests / build-deb (debian bookworm) (push) Blocked by required conditions
Tests / build-deb (debian bullseye) (push) Blocked by required conditions
Tests / build-deb (debian trixie) (push) Blocked by required conditions
Tests / build-deb (ubuntu 20.04) (push) Blocked by required conditions
Tests / build-deb (ubuntu 22.04) (push) Blocked by required conditions
Tests / build-deb (ubuntu 24.04) (push) Blocked by required conditions
Tests / build-deb (ubuntu 24.10) (push) Blocked by required conditions
Tests / install-deb (debian bookworm) (push) Blocked by required conditions
Tests / install-deb (debian bullseye) (push) Blocked by required conditions
Tests / install-deb (debian trixie) (push) Blocked by required conditions
Tests / install-deb (ubuntu 20.04) (push) Blocked by required conditions
Tests / install-deb (ubuntu 22.04) (push) Blocked by required conditions
Tests / install-deb (ubuntu 24.04) (push) Blocked by required conditions
Tests / install-deb (ubuntu 24.10) (push) Blocked by required conditions
Tests / build-install-rpm (fedora 40) (push) Blocked by required conditions
Tests / build-install-rpm (fedora 41) (push) Blocked by required conditions
Tests / run tests (debian bookworm) (push) Blocked by required conditions
Tests / run tests (debian bullseye) (push) Blocked by required conditions
Tests / run tests (debian trixie) (push) Blocked by required conditions
Tests / check-reproducibility (push) Waiting to run
Release container image / build-container-image (push) Waiting to run
2025-01-28 16:21:29 +01:00
Alexis Métaireau
47252cc31d
Automate the verification of image signatures 2025-01-28 16:21:29 +01:00
Alexis Métaireau
bcd1ec2173
Add an utility to retrieve manifest info 2025-01-28 16:21:29 +01:00
Alexis Métaireau
5817650633
Add a script to verify Github attestations 2025-01-28 16:21:29 +01:00
Alexis Métaireau
8f49cd99eb
FIXUP: test 2025-01-28 16:21:29 +01:00
Alexis Métaireau
f0ac1f885f
Add logs 2025-01-28 16:21:29 +01:00
Alexis Métaireau
554736cab3
Remove the tag from the attestation, what we attest is the hash, so no need for it 2025-01-28 16:21:29 +01:00
Alexis Métaireau
891ffe4fec
Add the tag to the subject 2025-01-28 16:21:29 +01:00
Alexis Métaireau
2a80bf0c26
Get the tag from git before retagging it 2025-01-28 16:21:29 +01:00
Alexis Métaireau
ac62a153dc
Checkout with depth:0 otherwise git commands aren't functional 2025-01-28 16:21:29 +01:00
Alexis Métaireau
13449641ca
Build: Use Github runners to build and sign container images on new tags 2025-01-28 16:21:28 +01:00
Alex Pyrgiotis
88a6b37770
Add support for Python 3.13
Some checks failed
Scan latest app and container / security-scan-container (push) Has been cancelled
Scan latest app and container / security-scan-app (push) Has been cancelled
Tests / windows (push) Has been cancelled
Tests / macOS (arch64) (push) Has been cancelled
Tests / build-deb (ubuntu 22.04) (push) Has been cancelled
Tests / macOS (x86_64) (push) Has been cancelled
Tests / build-deb (debian bookworm) (push) Has been cancelled
Tests / build-deb (debian bullseye) (push) Has been cancelled
Tests / build-deb (debian trixie) (push) Has been cancelled
Tests / build-deb (ubuntu 20.04) (push) Has been cancelled
Tests / build-deb (ubuntu 24.04) (push) Has been cancelled
Tests / build-deb (ubuntu 24.10) (push) Has been cancelled
Tests / install-deb (debian bookworm) (push) Has been cancelled
Tests / install-deb (debian bullseye) (push) Has been cancelled
Tests / install-deb (debian trixie) (push) Has been cancelled
Tests / install-deb (ubuntu 20.04) (push) Has been cancelled
Tests / install-deb (ubuntu 22.04) (push) Has been cancelled
Tests / install-deb (ubuntu 24.04) (push) Has been cancelled
Tests / install-deb (ubuntu 24.10) (push) Has been cancelled
Tests / build-install-rpm (fedora 40) (push) Has been cancelled
Tests / build-install-rpm (fedora 41) (push) Has been cancelled
Tests / run tests (debian bookworm) (push) Has been cancelled
Tests / run tests (debian bullseye) (push) Has been cancelled
Tests / run tests (debian trixie) (push) Has been cancelled
Tests / run tests (fedora 40) (push) Has been cancelled
Tests / run tests (fedora 41) (push) Has been cancelled
Tests / run tests (ubuntu 20.04) (push) Has been cancelled
Tests / run tests (ubuntu 22.04) (push) Has been cancelled
Tests / run tests (ubuntu 24.04) (push) Has been cancelled
Tests / run tests (ubuntu 24.10) (push) Has been cancelled
Bump our max supported Python version to 3.13, now that PySide6 supports
it.

Fixes #992
2025-01-27 21:40:27 +02:00
Alex Pyrgiotis
fb90243668
Symlink /usr in Debian container image
Update our Dockerfile and entrypoint script in order to reuse the /usr
dir in the inner and outer container image.

Refs #1048
2025-01-27 21:40:27 +02:00
Alex Pyrgiotis
9724a16d81
Mask some extra paths in gVisor's OCI config
Mask some paths of the outer container in the OCI config of the inner
container. This is done to avoid leaking any sensitive information from
Podman / Docker / gVisor, since we reuse the same rootfs

Refs #1048
2025-01-27 21:40:27 +02:00
Alex Pyrgiotis
cf43a7a0c4
docs: Add design document for artifact reproducibility
Refs #1047
2025-01-27 21:40:27 +02:00
Alex Pyrgiotis
cae4187550
Update RELEASE.md
Co-authored-by: Alexis Métaireau <alexis@freedom.press>
2025-01-27 21:40:27 +02:00
Alex Pyrgiotis
cfa4478ace
ci: Add a CI job that enforces image reproducibility
Add a CI job that uses the `reproduce.py` dev script to enforce image
reproducibility, for every PR that we send to the repo.

Fixes #1047
2025-01-27 21:40:27 +02:00
Alex Pyrgiotis
2557be9bc0
dev_scripts: Add script for enforcing image reproducibility
Add a dev script for Linux platforms that verifies that a source image
can be reproducibly built from the current Git commit. The
reproducibility check is enforced by the `diffoci` tool, which is
downloaded as part of running the script.
2025-01-27 21:40:27 +02:00
Alex Pyrgiotis
235d71354a
Allow setting a tag for the container image
Allow setting a tag for the container image, when building it with the
`build-image.py` script. This should be used for development purposes
only, since the proper image name should be dictated by the script.
2025-01-27 21:40:27 +02:00
Alex Pyrgiotis
5d49f5abdb
ci: Scan the latest image for CVEs
Update the Debian snapshot date to the current one, so that we always
scan the latest image for CVEs.

Refs #1057
2025-01-27 21:40:27 +02:00
Alex Pyrgiotis
0ce7773ca1
Render the Dockerfile from a template and some params
Allow updating the Dockerfile from a template and some envs, so that
it's easier to bump the dates in it.
2025-01-27 21:40:27 +02:00
Alex Pyrgiotis
fa27f4b063
Add jinja2-cli package dependency
Add jinja2-cli as a package dependency, since it will be used to create
the Dockerfile from some user parameters and a template.
2025-01-23 23:26:56 +02:00
Alex Pyrgiotis
8e8a515b64
Allow using the container engine cache when building our image
Remove our suggestions for not using the container cache, which stemmed
from the fact that our Dangerzone image was not reproducible. Now that
we have switched to Debian Stable and the Dockerfile is all we need to
reproducibly build the exact same container image, we can just use the
cache to speed up builds.
2025-01-23 23:25:43 +02:00
Alex Pyrgiotis
270cae1bc0
Rename vendor-pymupdf.py to debian-vendor-pymupdf.py
Rename the `vendor-pymupdf.py` script to `debian-vendor-pymupdf.py`,
since it's used only when building Debian packages.
2025-01-23 23:25:43 +02:00
Alex Pyrgiotis
14bb6c0e39
Do not use poetry.lock when building the container image
Remove all the scaffolding in our `build-image.py` script for using the
`poetry.lock` file, now that we install PyMuPDF from the Debian repos.
2025-01-23 23:25:39 +02:00
Alex Pyrgiotis
033ce0986d
Switch base image to Debian Stable
Switch base image from Alpine Linux to Debian Stable, in order to reduce
our image footprint, improve our security posture, and build our
container image reproducibly.

Fixes #1046
Refs #1047
2025-01-23 23:24:48 +02:00
Alex Pyrgiotis
935396565c
Reuse the same rootfs for the inner and outer container
Remove the need to copy the Dangerzone container image (used by the
inner container) within a wrapper gVisor image (used by the outer
container). Instead, use the root of the container filesystem for both
containers. We can do this safely because we don't mount any secrets to
the container, and because gVisor offers a read-only view of the
underlying filesystem

Fixes #1048
2025-01-23 23:24:48 +02:00
Alex Pyrgiotis
e29837cb43
Copy gVisor public key and a helper script in container helpers
Download and copy the following artifacts that will be used for building
a Debian-based Dangerzone container image in the subsequent commits:
* The APT key for the gVisor repo [1]
* A helper script for building reproducible Debian images [2]

[1] https://gvisor.dev/archive.key
[2] d15cf12b26/repro-sources-list.sh
2025-01-23 23:24:48 +02:00
Alex Pyrgiotis
8568b4bb9d
Move container-only build context to dangerzone/container
Move container-only build context (currently just the entrypoint script)
from `dangerzone/gvisor_wrapper` to `dangerzone/container_helpers`.
Update the rest of the scripts to use this location as well.
2025-01-23 23:24:48 +02:00
Alex Pyrgiotis
be1fa7a395
Whitespace fixes 2025-01-23 23:24:47 +02:00
Alexis Métaireau
b2f4e2d523
Bump poetry.lock
Some checks are pending
Tests / windows (push) Blocked by required conditions
Tests / macOS (arch64) (push) Blocked by required conditions
Tests / macOS (x86_64) (push) Blocked by required conditions
Tests / build-deb (debian bookworm) (push) Blocked by required conditions
Tests / build-deb (debian bullseye) (push) Blocked by required conditions
Tests / build-deb (debian trixie) (push) Blocked by required conditions
Tests / build-deb (ubuntu 20.04) (push) Blocked by required conditions
Tests / build-deb (ubuntu 22.04) (push) Blocked by required conditions
Tests / build-deb (ubuntu 24.04) (push) Blocked by required conditions
Tests / build-deb (ubuntu 24.10) (push) Blocked by required conditions
Tests / install-deb (debian bookworm) (push) Blocked by required conditions
Tests / install-deb (debian bullseye) (push) Blocked by required conditions
Tests / install-deb (debian trixie) (push) Blocked by required conditions
Tests / install-deb (ubuntu 20.04) (push) Blocked by required conditions
Tests / install-deb (ubuntu 22.04) (push) Blocked by required conditions
Tests / install-deb (ubuntu 24.04) (push) Blocked by required conditions
Tests / install-deb (ubuntu 24.10) (push) Blocked by required conditions
Tests / build-install-rpm (fedora 40) (push) Blocked by required conditions
Tests / build-install-rpm (fedora 41) (push) Blocked by required conditions
Tests / run tests (debian bookworm) (push) Blocked by required conditions
Tests / run tests (debian bullseye) (push) Blocked by required conditions
Tests / run tests (debian trixie) (push) Blocked by required conditions
Tests / run tests (fedora 40) (push) Blocked by required conditions
Tests / run tests (fedora 41) (push) Blocked by required conditions
Tests / run tests (ubuntu 20.04) (push) Blocked by required conditions
Tests / run tests (ubuntu 22.04) (push) Blocked by required conditions
Tests / run tests (ubuntu 24.04) (push) Blocked by required conditions
Tests / run tests (ubuntu 24.10) (push) Blocked by required conditions
Scan latest app and container / security-scan-container (push) Waiting to run
Scan latest app and container / security-scan-app (push) Waiting to run
2025-01-23 16:26:07 +01:00
Alexis Métaireau
7409966253
Remove ${Python3:Depends} as it's not used at the moment. 2025-01-23 16:26:06 +01:00
Alexis Métaireau
40fb6579f6
Alternatives for debian/control 2025-01-23 16:26:06 +01:00
Alexis Métaireau
6ae91b024e
Use platformdirs to find user configuration files
The previous library we were using for this (`appdirs`) is dead upstream
and not supported anymore in debian testing.

Fixes #1058
2025-01-23 16:26:06 +01:00
Alexis Métaireau
c2841dcc08
Run ruff format
Some checks failed
Tests / build-container-image (push) Has been cancelled
Tests / Download and cache Tesseract data (push) Has been cancelled
Tests / windows (push) Has been cancelled
Tests / macOS (arch64) (push) Has been cancelled
Tests / macOS (x86_64) (push) Has been cancelled
Tests / build-deb (debian bookworm) (push) Has been cancelled
Tests / build-deb (debian bullseye) (push) Has been cancelled
Tests / build-deb (debian trixie) (push) Has been cancelled
Tests / build-deb (ubuntu 20.04) (push) Has been cancelled
Tests / build-deb (ubuntu 22.04) (push) Has been cancelled
Tests / build-deb (ubuntu 24.04) (push) Has been cancelled
Tests / build-deb (ubuntu 24.10) (push) Has been cancelled
Tests / install-deb (debian bookworm) (push) Has been cancelled
Tests / install-deb (debian bullseye) (push) Has been cancelled
Tests / install-deb (debian trixie) (push) Has been cancelled
Tests / install-deb (ubuntu 20.04) (push) Has been cancelled
Tests / install-deb (ubuntu 22.04) (push) Has been cancelled
Tests / install-deb (ubuntu 24.04) (push) Has been cancelled
Tests / install-deb (ubuntu 24.10) (push) Has been cancelled
Tests / build-install-rpm (fedora 40) (push) Has been cancelled
Tests / build-install-rpm (fedora 41) (push) Has been cancelled
Tests / run tests (debian bookworm) (push) Has been cancelled
Tests / run tests (debian bullseye) (push) Has been cancelled
Tests / run tests (debian trixie) (push) Has been cancelled
Tests / run tests (fedora 40) (push) Has been cancelled
Tests / run tests (fedora 41) (push) Has been cancelled
Tests / run tests (ubuntu 20.04) (push) Has been cancelled
Tests / run tests (ubuntu 22.04) (push) Has been cancelled
Tests / run tests (ubuntu 24.04) (push) Has been cancelled
Tests / run tests (ubuntu 24.10) (push) Has been cancelled
2025-01-23 14:48:33 +01:00
Alexis Métaireau
df5ccb3f75
Fedora: bypass the shiboken specific version. 2025-01-23 14:39:50 +01:00
Alexis Métaireau
9c6c2e1051
build: pin shiboken6 to specific versions 2025-01-23 12:52:48 +01:00
Alexis Métaireau
23f3ad1f46
doc: bump the Docker Desktop version as part of the RELEASE procedure
Some checks failed
Tests / macOS (x86_64) (push) Has been cancelled
Scan latest app and container / security-scan-container (push) Has been cancelled
Scan latest app and container / security-scan-app (push) Has been cancelled
Tests / windows (push) Has been cancelled
Tests / macOS (arch64) (push) Has been cancelled
Tests / build-deb (debian bookworm) (push) Has been cancelled
Tests / build-deb (debian bullseye) (push) Has been cancelled
Tests / build-deb (debian trixie) (push) Has been cancelled
Tests / build-deb (ubuntu 20.04) (push) Has been cancelled
Tests / build-deb (ubuntu 22.04) (push) Has been cancelled
Tests / build-deb (ubuntu 24.04) (push) Has been cancelled
Tests / build-deb (ubuntu 24.10) (push) Has been cancelled
Tests / install-deb (debian bookworm) (push) Has been cancelled
Tests / install-deb (debian bullseye) (push) Has been cancelled
Tests / install-deb (debian trixie) (push) Has been cancelled
Tests / install-deb (ubuntu 20.04) (push) Has been cancelled
Tests / install-deb (ubuntu 22.04) (push) Has been cancelled
Tests / install-deb (ubuntu 24.04) (push) Has been cancelled
Tests / install-deb (ubuntu 24.10) (push) Has been cancelled
Tests / build-install-rpm (fedora 40) (push) Has been cancelled
Tests / build-install-rpm (fedora 41) (push) Has been cancelled
Tests / run tests (debian bookworm) (push) Has been cancelled
Tests / run tests (debian bullseye) (push) Has been cancelled
Tests / run tests (debian trixie) (push) Has been cancelled
Tests / run tests (fedora 40) (push) Has been cancelled
Tests / run tests (fedora 41) (push) Has been cancelled
Tests / run tests (ubuntu 20.04) (push) Has been cancelled
Tests / run tests (ubuntu 22.04) (push) Has been cancelled
Tests / run tests (ubuntu 24.04) (push) Has been cancelled
Tests / run tests (ubuntu 24.10) (push) Has been cancelled
2025-01-21 10:21:24 +01:00
Alexis Métaireau
970a82f432
Bind Alert instances to the main window alert property 2025-01-21 10:21:24 +01:00
Alexis Métaireau
3d5cacfffb
Warn users if the minimum version of Docker Desktop is not met
This only happens on Windows and macOS.

Fixes #693
2025-01-21 10:21:24 +01:00
38 changed files with 2420 additions and 652 deletions

View file

@ -85,7 +85,7 @@ jobs:
id: cache-container-image
uses: actions/cache@v4
with:
key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/common.py', 'dangerzone/conversion/doc_to_pixels.py', 'dangerzone/conversion/pixels_to_pdf.py', 'poetry.lock', 'gvisor_wrapper/entrypoint.py') }}
key: v4-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
path: |
share/container.tar.gz
share/image-id.txt

View file

@ -59,7 +59,7 @@ jobs:
id: cache-container-image
uses: actions/cache@v4
with:
key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/common.py', 'dangerzone/conversion/doc_to_pixels.py', 'dangerzone/conversion/pixels_to_pdf.py', 'poetry.lock', 'gvisor_wrapper/entrypoint.py') }}
key: v4-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
path: |-
share/container.tar.gz
share/image-id.txt
@ -67,7 +67,6 @@ jobs:
- name: Build Dangerzone container image
if: ${{ steps.cache-container-image.outputs.cache-hit != 'true' }}
run: |
sudo apt-get install -y python3-poetry
python3 ./install/common/build-image.py
- name: Upload container image
@ -227,7 +226,7 @@ jobs:
- name: Restore container cache
uses: actions/cache/restore@v4
with:
key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/common.py', 'dangerzone/conversion/doc_to_pixels.py', 'dangerzone/conversion/pixels_to_pdf.py', 'poetry.lock', 'gvisor_wrapper/entrypoint.py') }}
key: v4-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
path: |-
share/container.tar.gz
share/image-id.txt
@ -334,7 +333,7 @@ jobs:
- name: Restore container image
uses: actions/cache/restore@v4
with:
key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/common.py', 'dangerzone/conversion/doc_to_pixels.py', 'dangerzone/conversion/pixels_to_pdf.py', 'poetry.lock', 'gvisor_wrapper/entrypoint.py') }}
key: v4-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
path: |-
share/container.tar.gz
share/image-id.txt
@ -429,7 +428,7 @@ jobs:
- name: Restore container image
uses: actions/cache/restore@v4
with:
key: v3-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/common.py', 'dangerzone/conversion/doc_to_pixels.py', 'dangerzone/conversion/pixels_to_pdf.py', 'poetry.lock', 'gvisor_wrapper/entrypoint.py') }}
key: v4-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
path: |-
share/container.tar.gz
share/image-id.txt
@ -472,3 +471,30 @@ jobs:
# file successfully.
xvfb-run -s '-ac' ./dev_scripts/env.py --distro ${{ matrix.distro }} --version ${{ matrix.version }} run --dev \
bash -c 'cd dangerzone; poetry run make test'
check-reproducibility:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install dev. dependencies
run: |-
sudo apt-get update
sudo apt-get install -y git python3-poetry --no-install-recommends
poetry install --only package
- name: Verify that the Dockerfile matches the commited template and params
run: |-
cp Dockerfile Dockerfile.orig
make Dockerfile
diff Dockerfile.orig Dockerfile
- name: Build Dangerzone container image
run: |
python3 ./install/common/build-image.py --no-save
- name: Reproduce the same container image
run: |
./dev_scripts/reproduce-image.py

View file

@ -0,0 +1,57 @@
name: Release container image
on:
push:
tags:
- "container-image/**"
branches:
- "test/image-**"
workflow_dispatch:
permissions:
id-token: write
packages: write
contents: read
attestations: write
env:
REGISTRY: ghcr.io/${{ github.repository_owner }}
REGISTRY_USER: ${{ github.actor }}
REGISTRY_PASSWORD: ${{ github.token }}
IMAGE_NAME: dangerzone/dangerzone
jobs:
build-container-image:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: USERNAME
password: ${{ github.token }}
- name: Build and push the dangerzone image
id: build-image
run: |
sudo apt-get install -y python3-poetry
python3 ./install/common/build-image.py
echo ${{ github.token }} | podman login ghcr.io -u USERNAME --password-stdin
# Load the image with the final name directly
gunzip -c share/container.tar.gz | podman load
FINAL_IMAGE_NAME="${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}"
TAG=$(git describe --long --first-parent | tail -c +2)
podman tag dangerzone.rocks/dangerzone:$TAG "$FINAL_IMAGE_NAME"
podman push "$FINAL_IMAGE_NAME" --digestfile=digest
echo "digest=$(cat digest)" >> "$GITHUB_OUTPUT"
- name: Generate artifact attestation
uses: actions/attest-build-provenance@v1
with:
subject-name: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
subject-digest: "${{ steps.build-image.outputs.digest }}"
push-to-registry: true

View file

@ -21,13 +21,17 @@ jobs:
sudo apt install pipx
pipx install poetry
pipx inject poetry poetry-plugin-export
poetry install --only package
- name: Bump date of Debian snapshot archive
run: |
date=$(date "+%Y%m%d")
sed -i "s/DEBIAN_ARCHIVE_DATE=[0-9]\+/DEBIAN_ARCHIVE_DATE=${date}/" Dockerfile.env
make Dockerfile
- name: Build container image
run: python3 ./install/common/build-image.py --runtime docker --no-save
- name: Get image tag
id: tag
run: |
tag=$(docker images dangerzone.rocks/dangerzone --format '{{ .Tag }}')
echo "tag=$tag" >> $GITHUB_OUTPUT
run: echo "tag=$(cat share/image-id.txt)" >> $GITHUB_OUTPUT
# NOTE: Scan first without failing, else we won't be able to read the scan
# report.
- name: Scan container image (no fail)

View file

@ -2,10 +2,38 @@
# latest release of Dangerzone, and offer our analysis.
ignore:
# CVE-2024-11053
# CVE-2023-45853
# ==============
#
# NVD Entry: https://nvd.nist.gov/vuln/detail/CVE-2024-11053
# Verdict: Dangerzone is not affected because libcurl is an HTTP client, and
# the Dangerzone container does not make any network calls.
- vulnerability: CVE-2024-11053
# Debian tracker: https://security-tracker.debian.org/tracker/CVE-2023-45853
# Verdict: Dangerzone is not affected because the zlib library in Debian is
# built in a way that is not vulnerable.
- vulnerability: CVE-2023-45853
# CVE-2024-38428
# ==============
#
# Debian tracker: https://security-tracker.debian.org/tracker/CVE-2024-38428
# Verdict: Dangerzone is not affected because it doesn't use wget in the
# container image (which also has no network connectivity).
- vulnerability: CVE-2024-38428
# CVE-2024-57823
# ==============
#
# Debian tracker: https://security-tracker.debian.org/tracker/CVE-2024-57823
# Verdict: Dangerzone is not affected. First things first, LibreOffice is
# using this library for parsing RDF metadata in a document [1], and has
# issued a fix for the vendored raptor2 package they have for other distros
# [2].
#
# On the other hand, the Debian security team has stated that this is a minor
# issue [3], and there's no fix from the developers yet. It seems that the
# Debian package is not affected somehow by this CVE, probably due to the way
# it's packaged.
#
# [1] https://wiki.documentfoundation.org/Documentation/DevGuide/Office_Development#RDF_metadata
# [2] https://cgit.freedesktop.org/libreoffice/core/commit/?id=2b50dc0e4482ac0ad27d69147b4175e05af4fba4
# [2] From https://security-tracker.debian.org/tracker/CVE-2024-57823:
#
# [bookworm] - raptor2 <postponed> (Minor issue, revisit when fixed upstream)
#
- vulnerability: CVE-2024-57823

View file

@ -515,3 +515,9 @@ poetry run .\install\windows\build-app.bat
```
When you're done you will have `dist\Dangerzone.msi`.
## Updating the container image
The Dangezone container image is reproducible. This means that every time we
build it, the result will be bit-for-bit the same, with some minor exceptions.
Read more on how you can update it in `docs/developer/reproducibility.md`.

View file

@ -1,102 +1,211 @@
###########################################
# Build PyMuPDF
# NOTE: Updating the packages to their latest versions requires bumping the
# Dockerfile args below. For more info about this file, read
# docs/developer/reproducibility.md.
FROM alpine:latest as pymupdf-build
ARG ARCH
ARG REQUIREMENTS_TXT
ARG DEBIAN_IMAGE_DATE=20250113
# Install PyMuPDF via hash-checked requirements file
COPY ${REQUIREMENTS_TXT} /tmp/requirements.txt
FROM debian:bookworm-${DEBIAN_IMAGE_DATE}-slim as dangerzone-image
# PyMuPDF provides non-arm musl wheels only.
# Only install build-dependencies if we are actually building the wheel
RUN case "$ARCH" in \
"arm64") \
# This is required for copying later, but is created only in the pre-built wheels
mkdir -p /usr/lib/python3.12/site-packages/PyMuPDF.libs/ \
&& apk --no-cache add linux-headers g++ linux-headers gcc make python3-dev py3-pip clang-dev ;; \
*) \
apk --no-cache add py3-pip ;; \
esac
RUN pip install -vv --break-system-packages --require-hashes -r /tmp/requirements.txt
ARG GVISOR_ARCHIVE_DATE=20250120
ARG DEBIAN_ARCHIVE_DATE=20250127
ARG H2ORESTART_CHECKSUM=7760dc2963332c50d15eee285933ec4b48d6a1de9e0c0f6082946f93090bd132
ARG H2ORESTART_VERSION=v0.7.0
ENV DEBIAN_FRONTEND=noninteractive
###########################################
# Download H2ORestart
FROM alpine:latest as h2orestart-dl
ARG H2ORESTART_CHECKSUM=d09bc5c93fe2483a7e4a57985d2a8d0e4efae2efb04375fe4b59a68afd7241e2
RUN mkdir /libreoffice_ext && cd libreoffice_ext \
# The following way of installing packages is taken from
# https://github.com/reproducible-containers/repro-sources-list.sh/blob/master/Dockerfile.debian-12,
# and adapted to allow installing gVisor from each own repo as well.
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
--mount=type=bind,source=./container_helpers/repro-sources-list.sh,target=/usr/local/bin/repro-sources-list.sh \
--mount=type=bind,source=./container_helpers/gvisor.key,target=/tmp/gvisor.key \
: "Hacky way to set a date for the Debian snapshot repos" && \
touch -d ${DEBIAN_ARCHIVE_DATE} /etc/apt/sources.list.d/debian.sources && \
touch -d ${DEBIAN_ARCHIVE_DATE} /etc/apt/sources.list && \
repro-sources-list.sh && \
: "Setup APT to install gVisor from its separate APT repo" && \
apt-get update && \
apt-get upgrade -y && \
apt-get install -y --no-install-recommends apt-transport-https ca-certificates gnupg && \
gpg -o /usr/share/keyrings/gvisor-archive-keyring.gpg --dearmor /tmp/gvisor.key && \
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/gvisor-archive-keyring.gpg] https://storage.googleapis.com/gvisor/releases ${GVISOR_ARCHIVE_DATE} main" > /etc/apt/sources.list.d/gvisor.list && \
: "Install the necessary gVisor and Dangerzone dependencies" && \
apt-get update && \
apt-get install -y --no-install-recommends \
python3 python3-fitz libreoffice-nogui libreoffice-java-common \
python3 python3-magic default-jre-headless fonts-noto-cjk fonts-dejavu \
runsc unzip wget && \
: "Clean up for improving reproducibility (optional)" && \
rm -rf /var/cache/fontconfig/ && \
rm -rf /etc/ssl/certs/java/cacerts && \
rm -rf /var/log/* /var/cache/ldconfig/aux-cache
# Download H2ORestart from GitHub using a pinned version and hash. Note that
# it's available in Debian repos, but not in Bookworm yet.
RUN mkdir /opt/libreoffice_ext && cd /opt/libreoffice_ext \
&& H2ORESTART_FILENAME=h2orestart.oxt \
&& H2ORESTART_VERSION="v0.6.6" \
&& wget https://github.com/ebandal/H2Orestart/releases/download/$H2ORESTART_VERSION/$H2ORESTART_FILENAME \
&& echo "$H2ORESTART_CHECKSUM $H2ORESTART_FILENAME" | sha256sum -c \
&& install -dm777 "/usr/lib/libreoffice/share/extensions/"
&& install -dm777 "/usr/lib/libreoffice/share/extensions/" \
&& rm /root/.wget-hsts
# Create an unprivileged user both for gVisor and for running Dangerzone.
RUN addgroup --gid 1000 dangerzone
RUN adduser --uid 1000 --ingroup dangerzone --shell /bin/true \
--disabled-password --home /home/dangerzone dangerzone
###########################################
# Dangerzone image
FROM alpine:latest AS dangerzone-image
# Install dependencies
RUN apk --no-cache -U upgrade && \
apk --no-cache add \
libreoffice \
openjdk8 \
python3 \
py3-magic \
font-noto-cjk
COPY --from=pymupdf-build /usr/lib/python3.12/site-packages/fitz/ /usr/lib/python3.12/site-packages/fitz
COPY --from=pymupdf-build /usr/lib/python3.12/site-packages/pymupdf/ /usr/lib/python3.12/site-packages/pymupdf
COPY --from=pymupdf-build /usr/lib/python3.12/site-packages/PyMuPDF.libs/ /usr/lib/python3.12/site-packages/PyMuPDF.libs
COPY --from=h2orestart-dl /libreoffice_ext/ /libreoffice_ext
RUN install -dm777 "/usr/lib/libreoffice/share/extensions/"
# Copy Dangerzone's conversion logic under /opt/dangerzone, and allow Python to
# import it.
RUN mkdir -p /opt/dangerzone/dangerzone
RUN touch /opt/dangerzone/dangerzone/__init__.py
COPY conversion /opt/dangerzone/dangerzone/conversion
# Add the unprivileged user. Set the UID/GID of the dangerzone user/group to
# 1000, since we will point to it from the OCI config.
#
# NOTE: A tmpfs will be mounted over /home/dangerzone directory,
# so nothing within it from the image will be persisted.
RUN addgroup -g 1000 dangerzone && \
adduser -u 1000 -s /bin/true -G dangerzone -h /home/dangerzone -D dangerzone
###########################################
# gVisor wrapper image
FROM alpine:latest
RUN apk --no-cache -U upgrade && \
apk --no-cache add python3
RUN GVISOR_URL="https://storage.googleapis.com/gvisor/releases/release/latest/$(uname -m)"; \
wget "${GVISOR_URL}/runsc" "${GVISOR_URL}/runsc.sha512" && \
sha512sum -c runsc.sha512 && \
rm -f runsc.sha512 && \
chmod 555 runsc && \
mv runsc /usr/bin/
# Add the unprivileged `dangerzone` user.
RUN addgroup dangerzone && \
adduser -s /bin/true -G dangerzone -h /home/dangerzone -D dangerzone
# Switch to the dangerzone user for the rest of the script.
USER dangerzone
# Copy the Dangerzone image, as created by the previous steps, into the home
# directory of the `dangerzone` user.
RUN mkdir /home/dangerzone/dangerzone-image
COPY --from=dangerzone-image / /home/dangerzone/dangerzone-image/rootfs
# Copy only the Python code, and not any produced .pyc files.
COPY conversion/*.py /opt/dangerzone/dangerzone/conversion/
# Create a directory that will be used by gVisor as the place where it will
# store the state of its containers.
RUN mkdir /home/dangerzone/.containers
COPY gvisor_wrapper/entrypoint.py /
###############################################################################
#
# REUSING CONTAINER IMAGES:
# Anatomy of a hack
# ========================
#
# The rest of the Dockerfile aims to do one thing: allow the final container
# image to actually contain two container images; one for the outer container
# (spawned by Podman/Docker Desktop), and one for the inner container (spawned
# by gVisor).
#
# This has already been done in the past, and we explain why and how in the
# design document for gVisor integration (should be in
# `docs/developer/gvisor.md`). In this iteration, we want to also
# achieve the following:
#
# 1. Have a small final image, by sharing some system paths between the inner
# and outer container image using symlinks.
# 2. Allow our security scanning tool to see the contents of the inner
# container image.
# 3. Make the outer container image operational, in the sense that you can use
# `apt` commands and perform a conversion with Dangerzone, outside the
# gVisor sandbox. This is helpful for debugging purposes.
#
# Below we'll explain how our design choices are informed by the above
# sub-goals.
#
# First, to achieve a small container image, we basically need to copy `/etc`,
# `/usr` and `/opt` from the original Dangerzone image to the **inner**
# container image (under `/home/dangerzone/dangerzone-image/rootfs/`)
#
# That's all we need. The rest of the files play no role, and we can actually
# mask them in gVisor's OCI config.
#
# Second, in order to let our security scanner find the installed packages,
# we need to copy the following dirs to the root of the **outer** container
# image:
# * `/etc`, so that the security scanner can detect the image type and its
# sources
# * `/var`, so that the security scanner can have access to the APT database.
#
# IMPORTANT: We don't symlink the `/etc` of the **outer** container image to
# the **inner** one, in order to avoid leaking files like
# `/etc/{hostname,hosts,resolv.conf}` that Podman/Docker mounts when running
# the **outer** container image.
#
# Third, in order to have an operational Debian image, we are _mostly_ covered
# by the dirs we have copied. There's a _rare_ case where during debugging, we
# may want to install a system package that has components in `/etc` and
# `/var`, which will not be available in the **inner** container image. In that
# case, the developer can do the necessary symlinks in the live container.
#
# FILESYSTEM HIERARCHY
# ====================
#
# The above plan leads to the following filesystem hierarchy:
#
# Outer container image:
#
# # ls -l /
# lrwxrwxrwx 1 root root 7 Jan 27 10:46 bin -> usr/bin
# -rwxr-xr-x 1 root root 7764 Jan 24 08:14 entrypoint.py
# drwxr-xr-x 1 root root 4096 Jan 27 10:47 etc
# drwxr-xr-x 1 root root 4096 Jan 27 10:46 home
# lrwxrwxrwx 1 root root 7 Jan 27 10:46 lib -> usr/lib
# lrwxrwxrwx 1 root root 9 Jan 27 10:46 lib64 -> usr/lib64
# drwxr-xr-x 2 root root 4096 Jan 27 10:46 root
# drwxr-xr-x 1 root root 4096 Jan 27 10:47 run
# lrwxrwxrwx 1 root root 8 Jan 27 10:46 sbin -> usr/sbin
# drwxrwxrwx 2 root root 4096 Jan 27 10:46 tmp
# lrwxrwxrwx 1 root root 44 Jan 27 10:46 usr -> /home/dangerzone/dangerzone-image/rootfs/usr
# drwxr-xr-x 11 root root 4096 Jan 27 10:47 var
#
# Inner container image:
#
# # ls -l /home/dangerzone/dangerzone-image/rootfs/
# total 12
# lrwxrwxrwx 1 root root 7 Jan 27 10:47 bin -> usr/bin
# drwxr-xr-x 43 root root 4096 Jan 27 10:46 etc
# lrwxrwxrwx 1 root root 7 Jan 27 10:47 lib -> usr/lib
# lrwxrwxrwx 1 root root 9 Jan 27 10:47 lib64 -> usr/lib64
# drwxr-xr-x 4 root root 4096 Jan 27 10:47 opt
# drwxr-xr-x 12 root root 4096 Jan 27 10:47 usr
#
# SYMLINKING /USR
# ===============
#
# It's surprisingly difficult (maybe even borderline impossible), to symlink
# `/usr` to a different path during image build. The problem is that /usr
# is very sensitive, and you can't manipulate it in a live system. That is, I
# haven't found a way to do the following, or something equivalent:
#
# rm -r /usr && ln -s /home/dangerzone/dangerzone-image/rootfs/usr/ /usr
#
# The `ln` binary, even if you specify it by its full path, cannot run
# (probably because `ld-linux.so` can't be found). For this reason, we have
# to create the symlinks beforehand, in a previous build stage. Then, in an
# empty contianer image (scratch images), we can copy these symlinks and the
# /usr, and stich everything together.
###############################################################################
# Create the filesystem hierarchy that will be used to symlink /usr.
RUN mkdir /new_root
RUN mkdir /new_root/root /new_root/run /new_root/tmp
RUN chmod 777 /new_root/tmp
RUN ln -s /home/dangerzone/dangerzone-image/rootfs/usr /new_root/usr
RUN ln -s usr/bin /new_root/bin
RUN ln -s usr/lib /new_root/lib
RUN ln -s usr/lib64 /new_root/lib64
RUN ln -s usr/sbin /new_root/sbin
## Final image
FROM scratch
# Copy the filesystem hierarchy that we created in the previous stage, so that
# /usr can be a symlink.
COPY --from=dangerzone-image /new_root/ /
# Copy the bare minimum to run Dangerzone in the inner container image.
COPY --from=dangerzone-image /etc/ /home/dangerzone/dangerzone-image/rootfs/etc/
COPY --from=dangerzone-image /opt/ /home/dangerzone/dangerzone-image/rootfs/opt/
COPY --from=dangerzone-image /usr/ /home/dangerzone/dangerzone-image/rootfs/usr/
RUN ln -s usr/bin /home/dangerzone/dangerzone-image/rootfs/bin
RUN ln -s usr/lib /home/dangerzone/dangerzone-image/rootfs/lib
RUN ln -s usr/lib64 /home/dangerzone/dangerzone-image/rootfs/lib64
# Copy the bare minimum to let the security scanner find vulnerabilities.
COPY --from=dangerzone-image /etc/ /etc/
COPY --from=dangerzone-image /var/ /var/
# Allow our entrypoint script to make changes in the following folders.
RUN chown dangerzone:dangerzone /home/dangerzone /home/dangerzone/dangerzone-image/
# Switch to the dangerzone user for the rest of the script.
USER dangerzone
COPY container_helpers/entrypoint.py /
ENTRYPOINT ["/entrypoint.py"]

9
Dockerfile.env Normal file
View file

@ -0,0 +1,9 @@
# Can be bumped to the latest date in https://hub.docker.com/_/debian/tags?name=bookworm-
DEBIAN_IMAGE_DATE=20250113
# Can be bumped to today's date
DEBIAN_ARCHIVE_DATE=20250127
# Can be bumped to the latest date in https://github.com/google/gvisor/tags
GVISOR_ARCHIVE_DATE=20250120
# Can be bumped to the latest version and checksum from https://github.com/ebandal/H2Orestart/releases
H2ORESTART_CHECKSUM=7760dc2963332c50d15eee285933ec4b48d6a1de9e0c0f6082946f93090bd132
H2ORESTART_VERSION=v0.7.0

211
Dockerfile.in Normal file
View file

@ -0,0 +1,211 @@
# NOTE: Updating the packages to their latest versions requires bumping the
# Dockerfile args below. For more info about this file, read
# docs/developer/reproducibility.md.
ARG DEBIAN_IMAGE_DATE={{DEBIAN_IMAGE_DATE}}
FROM debian:bookworm-${DEBIAN_IMAGE_DATE}-slim as dangerzone-image
ARG GVISOR_ARCHIVE_DATE={{GVISOR_ARCHIVE_DATE}}
ARG DEBIAN_ARCHIVE_DATE={{DEBIAN_ARCHIVE_DATE}}
ARG H2ORESTART_CHECKSUM={{H2ORESTART_CHECKSUM}}
ARG H2ORESTART_VERSION={{H2ORESTART_VERSION}}
ENV DEBIAN_FRONTEND=noninteractive
# The following way of installing packages is taken from
# https://github.com/reproducible-containers/repro-sources-list.sh/blob/master/Dockerfile.debian-12,
# and adapted to allow installing gVisor from each own repo as well.
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
--mount=type=bind,source=./container_helpers/repro-sources-list.sh,target=/usr/local/bin/repro-sources-list.sh \
--mount=type=bind,source=./container_helpers/gvisor.key,target=/tmp/gvisor.key \
: "Hacky way to set a date for the Debian snapshot repos" && \
touch -d ${DEBIAN_ARCHIVE_DATE} /etc/apt/sources.list.d/debian.sources && \
touch -d ${DEBIAN_ARCHIVE_DATE} /etc/apt/sources.list && \
repro-sources-list.sh && \
: "Setup APT to install gVisor from its separate APT repo" && \
apt-get update && \
apt-get upgrade -y && \
apt-get install -y --no-install-recommends apt-transport-https ca-certificates gnupg && \
gpg -o /usr/share/keyrings/gvisor-archive-keyring.gpg --dearmor /tmp/gvisor.key && \
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/gvisor-archive-keyring.gpg] https://storage.googleapis.com/gvisor/releases ${GVISOR_ARCHIVE_DATE} main" > /etc/apt/sources.list.d/gvisor.list && \
: "Install the necessary gVisor and Dangerzone dependencies" && \
apt-get update && \
apt-get install -y --no-install-recommends \
python3 python3-fitz libreoffice-nogui libreoffice-java-common \
python3 python3-magic default-jre-headless fonts-noto-cjk fonts-dejavu \
runsc unzip wget && \
: "Clean up for improving reproducibility (optional)" && \
rm -rf /var/cache/fontconfig/ && \
rm -rf /etc/ssl/certs/java/cacerts && \
rm -rf /var/log/* /var/cache/ldconfig/aux-cache
# Download H2ORestart from GitHub using a pinned version and hash. Note that
# it's available in Debian repos, but not in Bookworm yet.
RUN mkdir /opt/libreoffice_ext && cd /opt/libreoffice_ext \
&& H2ORESTART_FILENAME=h2orestart.oxt \
&& wget https://github.com/ebandal/H2Orestart/releases/download/$H2ORESTART_VERSION/$H2ORESTART_FILENAME \
&& echo "$H2ORESTART_CHECKSUM $H2ORESTART_FILENAME" | sha256sum -c \
&& install -dm777 "/usr/lib/libreoffice/share/extensions/" \
&& rm /root/.wget-hsts
# Create an unprivileged user both for gVisor and for running Dangerzone.
RUN addgroup --gid 1000 dangerzone
RUN adduser --uid 1000 --ingroup dangerzone --shell /bin/true \
--disabled-password --home /home/dangerzone dangerzone
# Copy Dangerzone's conversion logic under /opt/dangerzone, and allow Python to
# import it.
RUN mkdir -p /opt/dangerzone/dangerzone
RUN touch /opt/dangerzone/dangerzone/__init__.py
# Copy only the Python code, and not any produced .pyc files.
COPY conversion/*.py /opt/dangerzone/dangerzone/conversion/
# Create a directory that will be used by gVisor as the place where it will
# store the state of its containers.
RUN mkdir /home/dangerzone/.containers
###############################################################################
#
# REUSING CONTAINER IMAGES:
# Anatomy of a hack
# ========================
#
# The rest of the Dockerfile aims to do one thing: allow the final container
# image to actually contain two container images; one for the outer container
# (spawned by Podman/Docker Desktop), and one for the inner container (spawned
# by gVisor).
#
# This has already been done in the past, and we explain why and how in the
# design document for gVisor integration (should be in
# `docs/developer/gvisor.md`). In this iteration, we want to also
# achieve the following:
#
# 1. Have a small final image, by sharing some system paths between the inner
# and outer container image using symlinks.
# 2. Allow our security scanning tool to see the contents of the inner
# container image.
# 3. Make the outer container image operational, in the sense that you can use
# `apt` commands and perform a conversion with Dangerzone, outside the
# gVisor sandbox. This is helpful for debugging purposes.
#
# Below we'll explain how our design choices are informed by the above
# sub-goals.
#
# First, to achieve a small container image, we basically need to copy `/etc`,
# `/usr` and `/opt` from the original Dangerzone image to the **inner**
# container image (under `/home/dangerzone/dangerzone-image/rootfs/`)
#
# That's all we need. The rest of the files play no role, and we can actually
# mask them in gVisor's OCI config.
#
# Second, in order to let our security scanner find the installed packages,
# we need to copy the following dirs to the root of the **outer** container
# image:
# * `/etc`, so that the security scanner can detect the image type and its
# sources
# * `/var`, so that the security scanner can have access to the APT database.
#
# IMPORTANT: We don't symlink the `/etc` of the **outer** container image to
# the **inner** one, in order to avoid leaking files like
# `/etc/{hostname,hosts,resolv.conf}` that Podman/Docker mounts when running
# the **outer** container image.
#
# Third, in order to have an operational Debian image, we are _mostly_ covered
# by the dirs we have copied. There's a _rare_ case where during debugging, we
# may want to install a system package that has components in `/etc` and
# `/var`, which will not be available in the **inner** container image. In that
# case, the developer can do the necessary symlinks in the live container.
#
# FILESYSTEM HIERARCHY
# ====================
#
# The above plan leads to the following filesystem hierarchy:
#
# Outer container image:
#
# # ls -l /
# lrwxrwxrwx 1 root root 7 Jan 27 10:46 bin -> usr/bin
# -rwxr-xr-x 1 root root 7764 Jan 24 08:14 entrypoint.py
# drwxr-xr-x 1 root root 4096 Jan 27 10:47 etc
# drwxr-xr-x 1 root root 4096 Jan 27 10:46 home
# lrwxrwxrwx 1 root root 7 Jan 27 10:46 lib -> usr/lib
# lrwxrwxrwx 1 root root 9 Jan 27 10:46 lib64 -> usr/lib64
# drwxr-xr-x 2 root root 4096 Jan 27 10:46 root
# drwxr-xr-x 1 root root 4096 Jan 27 10:47 run
# lrwxrwxrwx 1 root root 8 Jan 27 10:46 sbin -> usr/sbin
# drwxrwxrwx 2 root root 4096 Jan 27 10:46 tmp
# lrwxrwxrwx 1 root root 44 Jan 27 10:46 usr -> /home/dangerzone/dangerzone-image/rootfs/usr
# drwxr-xr-x 11 root root 4096 Jan 27 10:47 var
#
# Inner container image:
#
# # ls -l /home/dangerzone/dangerzone-image/rootfs/
# total 12
# lrwxrwxrwx 1 root root 7 Jan 27 10:47 bin -> usr/bin
# drwxr-xr-x 43 root root 4096 Jan 27 10:46 etc
# lrwxrwxrwx 1 root root 7 Jan 27 10:47 lib -> usr/lib
# lrwxrwxrwx 1 root root 9 Jan 27 10:47 lib64 -> usr/lib64
# drwxr-xr-x 4 root root 4096 Jan 27 10:47 opt
# drwxr-xr-x 12 root root 4096 Jan 27 10:47 usr
#
# SYMLINKING /USR
# ===============
#
# It's surprisingly difficult (maybe even borderline impossible), to symlink
# `/usr` to a different path during image build. The problem is that /usr
# is very sensitive, and you can't manipulate it in a live system. That is, I
# haven't found a way to do the following, or something equivalent:
#
# rm -r /usr && ln -s /home/dangerzone/dangerzone-image/rootfs/usr/ /usr
#
# The `ln` binary, even if you specify it by its full path, cannot run
# (probably because `ld-linux.so` can't be found). For this reason, we have
# to create the symlinks beforehand, in a previous build stage. Then, in an
# empty contianer image (scratch images), we can copy these symlinks and the
# /usr, and stich everything together.
###############################################################################
# Create the filesystem hierarchy that will be used to symlink /usr.
RUN mkdir /new_root
RUN mkdir /new_root/root /new_root/run /new_root/tmp
RUN chmod 777 /new_root/tmp
RUN ln -s /home/dangerzone/dangerzone-image/rootfs/usr /new_root/usr
RUN ln -s usr/bin /new_root/bin
RUN ln -s usr/lib /new_root/lib
RUN ln -s usr/lib64 /new_root/lib64
RUN ln -s usr/sbin /new_root/sbin
## Final image
FROM scratch
# Copy the filesystem hierarchy that we created in the previous stage, so that
# /usr can be a symlink.
COPY --from=dangerzone-image /new_root/ /
# Copy the bare minimum to run Dangerzone in the inner container image.
COPY --from=dangerzone-image /etc/ /home/dangerzone/dangerzone-image/rootfs/etc/
COPY --from=dangerzone-image /opt/ /home/dangerzone/dangerzone-image/rootfs/opt/
COPY --from=dangerzone-image /usr/ /home/dangerzone/dangerzone-image/rootfs/usr/
RUN ln -s usr/bin /home/dangerzone/dangerzone-image/rootfs/bin
RUN ln -s usr/lib /home/dangerzone/dangerzone-image/rootfs/lib
RUN ln -s usr/lib64 /home/dangerzone/dangerzone-image/rootfs/lib64
# Copy the bare minimum to let the security scanner find vulnerabilities.
COPY --from=dangerzone-image /etc/ /etc/
COPY --from=dangerzone-image /var/ /var/
# Allow our entrypoint script to make changes in the following folders.
RUN chown dangerzone:dangerzone /home/dangerzone /home/dangerzone/dangerzone-image/
# Switch to the dangerzone user for the rest of the script.
USER dangerzone
COPY container_helpers/entrypoint.py /
ENTRYPOINT ["/entrypoint.py"]

View file

@ -47,6 +47,9 @@ test-large: test-large-init ## Run large test set
python -m pytest --tb=no tests/test_large_set.py::TestLargeSet -v $(JUNIT_FLAGS) --junitxml=$(TEST_LARGE_RESULTS)
python $(TEST_LARGE_RESULTS)/report.py $(TEST_LARGE_RESULTS)
Dockerfile: Dockerfile.env Dockerfile.in
poetry run jinja2 Dockerfile.in Dockerfile.env > Dockerfile
.PHONY: build-clean
build-clean:
doit clean

View file

@ -8,12 +8,14 @@ Here is a list of tasks that should be done before issuing the release:
- [ ] Create a new issue named **QA and Release for version \<VERSION\>**, to track the general progress.
You can generate its content with the the `poetry run ./dev_scripts/generate-release-tasks.py` command.
- [ ] [Add new Linux platforms and remove obsolete ones](https://github.com/freedomofpress/dangerzone/blob/main/RELEASE.md#add-new-platforms-and-remove-obsolete-ones)
- [ ] [Add new Linux platforms and remove obsolete ones](https://github.com/freedomofpress/dangerzone/blob/main/RELEASE.md#add-new-linux-platforms-and-remove-obsolete-ones)
- [ ] Bump the Python dependencies using `poetry lock`
- [ ] Update `version` in `pyproject.toml`
- [ ] Update `share/version.txt`
- [ ] Update the "Version" field in `install/linux/dangerzone.spec`
- [ ] Bump the Debian version by adding a new changelog entry in `debian/changelog`
- [ ] [Bump the minimum Docker Desktop versions](https://github.com/freedomofpress/dangerzone/blob/main/RELEASE.md#bump-the-minimum-docker-desktop-version) in `isolation_provider/container.py`
- [ ] Bump the dates and versions in the `Dockerfile`
- [ ] Update screenshot in `README.md`, if necessary
- [ ] CHANGELOG.md should be updated to include a list of all major changes since the last release
- [ ] A draft release should be created. Copy the release notes text from the template at [`docs/templates/release-notes`](https://github.com/freedomofpress/dangerzone/tree/main/docs/templates/)
@ -46,6 +48,12 @@ In case of the removal of a version:
* Consult the previous paragraph, but also `grep` your way around.
2. Add a notice in our `CHANGELOG.md` about the version removal.
## Bump the minimum Docker Desktop version
We embed the minimum docker desktop versions inside Dangerzone, as an incentive for our macOS and Windows users to upgrade to the latests version.
You can find the latest version at the time of the release by looking at [their release notes](https://docs.docker.com/desktop/release-notes/)
## Large Document Testing
Parallel to the QA process, the release candidate should be put through the large document tests in a dedicated machine to run overnight.

14
THIRD_PARTY_NOTICE Normal file
View file

@ -0,0 +1,14 @@
This project includes third-party components as follows:
1. gVisor APT Key
- URL: https://gvisor.dev/archive.key
- Last updated: 2025-01-21
- Description: This is the public key used for verifying packages from the gVisor repository.
2. Reproducible Containers Helper Script
- URL: https://github.com/reproducible-containers/repro-sources-list.sh/blob/d15cf12b26395b857b24fba223b108aff1c91b26/repro-sources-list.sh
- Last updated: 2025-01-21
- Description: This script is used for building reproducible Debian images.
Please refer to the respective sources for licensing information and further details regarding the use of these components.

View file

@ -302,7 +302,7 @@ def display_banner() -> None:
+ Back.BLACK
+ Fore.LIGHTWHITE_EX
+ Style.BRIGHT
+ f"{' '*left_spaces}Dangerzone v{get_version()}{' '*right_spaces}"
+ f"{' ' * left_spaces}Dangerzone v{get_version()}{' ' * right_spaces}"
+ Fore.YELLOW
+ Style.DIM
+ ""

View file

@ -59,10 +59,28 @@ oci_config: dict[str, typing.Any] = {
"root": {"path": "rootfs", "readonly": True},
"hostname": "dangerzone",
"mounts": [
# Mask almost every system directory of the outer container, by mounting tmpfs
# on top of them. This is done to avoid leaking any sensitive information,
# either mounted by Podman/Docker, or when gVisor runs, since we reuse the same
# rootfs. We basically mask everything except for `/usr`, `/bin`, `/lib`,
# `/etc`, and `/opt`.
#
# Note that we set `--root /home/dangerzone/.containers` for the directory where
# gVisor will create files at runtime, which means that in principle, we are
# covered by the masking of `/home/dangerzone` that follows below.
#
# Finally, note that the following list has been taken from the dirs in our
# container image, and double-checked against the top-level dirs listed in the
# Filesystem Hierarchy Standard (FHS) [1]. It would be nice to have an allowlist
# approach instead of a denylist, but FHS is such an old standard that we don't
# expect any new top-level dirs to pop up any time soon.
#
# [1] https://en.wikipedia.org/wiki/Filesystem_Hierarchy_Standard
{
"destination": "/proc",
"type": "proc",
"source": "proc",
"destination": "/boot",
"type": "tmpfs",
"source": "tmpfs",
"options": ["nosuid", "noexec", "nodev", "ro"],
},
{
"destination": "/dev",
@ -70,6 +88,53 @@ oci_config: dict[str, typing.Any] = {
"source": "tmpfs",
"options": ["nosuid", "noexec", "nodev"],
},
{
"destination": "/home",
"type": "tmpfs",
"source": "tmpfs",
"options": ["nosuid", "noexec", "nodev", "ro"],
},
{
"destination": "/media",
"type": "tmpfs",
"source": "tmpfs",
"options": ["nosuid", "noexec", "nodev", "ro"],
},
{
"destination": "/mnt",
"type": "tmpfs",
"source": "tmpfs",
"options": ["nosuid", "noexec", "nodev", "ro"],
},
{
"destination": "/proc",
"type": "proc",
"source": "proc",
},
{
"destination": "/root",
"type": "tmpfs",
"source": "tmpfs",
"options": ["nosuid", "noexec", "nodev", "ro"],
},
{
"destination": "/run",
"type": "tmpfs",
"source": "tmpfs",
"options": ["nosuid", "noexec", "nodev"],
},
{
"destination": "/sbin",
"type": "tmpfs",
"source": "tmpfs",
"options": ["nosuid", "noexec", "nodev", "ro"],
},
{
"destination": "/srv",
"type": "tmpfs",
"source": "tmpfs",
"options": ["nosuid", "noexec", "nodev", "ro"],
},
{
"destination": "/sys",
"type": "tmpfs",
@ -82,6 +147,12 @@ oci_config: dict[str, typing.Any] = {
"source": "tmpfs",
"options": ["nosuid", "noexec", "nodev"],
},
{
"destination": "/var",
"type": "tmpfs",
"source": "tmpfs",
"options": ["nosuid", "noexec", "nodev"],
},
# LibreOffice needs a writable home directory, so just mount a tmpfs
# over it.
{

View file

@ -0,0 +1,29 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBF0meAYBEACcBYPOSBiKtid+qTQlbgKGPxUYt0cNZiQqWXylhYUT4PuNlNx5
s+sBLFvNTpdTrXMmZ8NkekyjD1HardWvebvJT4u+Ho/9jUr4rP71cNwNtocz/w8G
DsUXSLgH8SDkq6xw0L+5eGc78BBg9cOeBeFBm3UPgxTBXS9Zevoi2w1lzSxkXvjx
cGzltzMZfPXERljgLzp9AAfhg/2ouqVQm37fY+P/NDzFMJ1XHPIIp9KJl/prBVud
jJJteFZ5sgL6MwjBQq2kw+q2Jb8Zfjl0BeXDgGMN5M5lGhX2wTfiMbfo7KWyzRnB
RpSP3BxlLqYeQUuLG5Yx8z3oA3uBkuKaFOKvXtiScxmGM/+Ri2YM3m66imwDhtmP
AKwTPI3Re4gWWOffglMVSv2sUAY32XZ74yXjY1VhK3bN3WFUPGrgQx4X7GP0A1Te
lzqkT3VSMXieImTASosK5L5Q8rryvgCeI9tQLn9EpYFCtU3LXvVgTreGNEEjMOnL
dR7yOU+Fs775stn6ucqmdYarx7CvKUrNAhgEeHMonLe1cjYScF7NfLO1GIrQKJR2
DE0f+uJZ52inOkO8ufh3WVQJSYszuS3HCY7w5oj1aP38k/y9zZdZvVvwAWZaiqBQ
iwjVs6Kub76VVZZhRDf4iYs8k1Zh64nXdfQt250d8U5yMPF3wIJ+c1yhxwARAQAB
tCpUaGUgZ1Zpc29yIEF1dGhvcnMgPGd2aXNvci1ib3RAZ29vZ2xlLmNvbT6JAk4E
EwEKADgCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AWIQRvHfheOnHCSRjnJ9Vv
xtVU4yvZQwUCYO4TxQAKCRBvxtVU4yvZQ9UoEACLPV7CnEA2bjCPi0NCWB/Mo1WL
evqv7Wv7vmXzI1K9DrqOhxuamQW75SVXg1df0hTJWbKFmDAip6NEC2Rg5P+A8hHj
nW/VG+q4ZFT662jDhnXQiO9L7EZzjyqNF4yWYzzgnqEu/SmGkDLDYiUCcGBqS2oE
EQfk7RHJSLMJXAnNDH7OUDgrirSssg/dlQ5uAHA9Au80VvC5fsTKza8b3Aydw3SV
iB8/Yuikbl8wKbpSGiXtR4viElXjNips0+mBqaUk2xpqSBrsfN+FezcInVXaXFeq
xtpq2/3M3DYbqCRjqeyd9wNi92FHdOusNrK4MYe0pAYbGjc65BwH+F0T4oJ8ZSJV
lIt+FZ0MqM1T97XadybYFsJh8qvajQpZEPL+zzNncc4f1d80e7+lwIZV/al0FZWW
Zlp7TpbeO/uW+lHs5W14YKwaQVh1whapKXTrATipNOOSCw2hnfrT8V7Hy55QWaGZ
f4/kfy929EeCP16d/LqOClv0j0RBr6NhRBQ0l/BE/mXjJwIk6nKwi+Yi4ek1ARi6
AlCMLn9AZF7aTGpvCiftzIrlyDfVZT5IX03TayxRHZ4b1Rj8eyJaHcjI49u83gkr
4LGX08lEawn9nxFSx4RCg2swGiYw5F436wwwAIozqJuDASeTa3QND3au5v0oYWnl
umDySUl5wPaAaALgzA==
=5/8T
-----END PGP PUBLIC KEY BLOCK-----

View file

@ -0,0 +1,103 @@
#!/bin/bash
#
# Copyright The repro-sources-list.sh Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# repro-sources-list.sh:
# configures /etc/apt/sources.list and similar files for installing packages from a snapshot.
#
# This script is expected to be executed inside Dockerfile.
#
# The following distributions are supported:
# - debian:11 (/etc/apt/sources.list)
# - debian:12 (/etc/apt/sources.list.d/debian.sources)
# - ubuntu:22.04 (/etc/apt/sources.list)
# - ubuntu:24.04 (/etc/apt/sources.listd/ubuntu.sources)
# - archlinux (/etc/pacman.d/mirrorlist)
#
# For the further information, see https://github.com/reproducible-containers/repro-sources-list.sh
# -----------------------------------------------------------------------------
set -eux -o pipefail
. /etc/os-release
: "${KEEP_CACHE:=1}"
keep_apt_cache() {
rm -f /etc/apt/apt.conf.d/docker-clean
echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
}
case "${ID}" in
"debian")
: "${SNAPSHOT_ARCHIVE_BASE:=http://snapshot.debian.org/archive/}"
: "${BACKPORTS:=}"
if [ -e /etc/apt/sources.list.d/debian.sources ]; then
: "${SOURCE_DATE_EPOCH:=$(stat --format=%Y /etc/apt/sources.list.d/debian.sources)}"
rm -f /etc/apt/sources.list.d/debian.sources
else
: "${SOURCE_DATE_EPOCH:=$(stat --format=%Y /etc/apt/sources.list)}"
fi
snapshot="$(printf "%(%Y%m%dT%H%M%SZ)T\n" "${SOURCE_DATE_EPOCH}")"
# TODO: use the new format for Debian >= 12
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}debian/${snapshot} ${VERSION_CODENAME} main" >/etc/apt/sources.list
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}debian-security/${snapshot} ${VERSION_CODENAME}-security main" >>/etc/apt/sources.list
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}debian/${snapshot} ${VERSION_CODENAME}-updates main" >>/etc/apt/sources.list
if [ "${BACKPORTS}" = 1 ]; then echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}debian/${snapshot} ${VERSION_CODENAME}-backports main" >>/etc/apt/sources.list; fi
if [ "${KEEP_CACHE}" = 1 ]; then keep_apt_cache; fi
;;
"ubuntu")
: "${SNAPSHOT_ARCHIVE_BASE:=http://snapshot.ubuntu.com/}"
if [ -e /etc/apt/sources.list.d/ubuntu.sources ]; then
: "${SOURCE_DATE_EPOCH:=$(stat --format=%Y /etc/apt/sources.list.d/ubuntu.sources)}"
rm -f /etc/apt/sources.list.d/ubuntu.sources
else
: "${SOURCE_DATE_EPOCH:=$(stat --format=%Y /etc/apt/sources.list)}"
fi
snapshot="$(printf "%(%Y%m%dT%H%M%SZ)T\n" "${SOURCE_DATE_EPOCH}")"
# TODO: use the new format for Ubuntu >= 24.04
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME} main restricted" >/etc/apt/sources.list
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-updates main restricted" >>/etc/apt/sources.list
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME} universe" >>/etc/apt/sources.list
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-updates universe" >>/etc/apt/sources.list
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME} multiverse" >>/etc/apt/sources.list
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-updates multiverse" >>/etc/apt/sources.list
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-backports main restricted universe multiverse" >>/etc/apt/sources.list
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-security main restricted" >>/etc/apt/sources.list
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-security universe" >>/etc/apt/sources.list
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-security multiverse" >>/etc/apt/sources.list
if [ "${KEEP_CACHE}" = 1 ]; then keep_apt_cache; fi
# http://snapshot.ubuntu.com is redirected to https, so we have to install ca-certificates
export DEBIAN_FRONTEND=noninteractive
apt-get -o Acquire::https::Verify-Peer=false update >&2
apt-get -o Acquire::https::Verify-Peer=false install -y ca-certificates >&2
;;
"arch")
: "${SNAPSHOT_ARCHIVE_BASE:=http://archive.archlinux.org/}"
: "${SOURCE_DATE_EPOCH:=$(stat --format=%Y /var/log/pacman.log)}"
export SOURCE_DATE_EPOCH
# shellcheck disable=SC2016
date -d "@${SOURCE_DATE_EPOCH}" "+Server = ${SNAPSHOT_ARCHIVE_BASE}repos/%Y/%m/%d/\$repo/os/\$arch" >/etc/pacman.d/mirrorlist
;;
*)
echo >&2 "Unsupported distribution: ${ID}"
exit 1
;;
esac
: "${WRITE_SOURCE_DATE_EPOCH:=/dev/null}"
echo "${SOURCE_DATE_EPOCH}" >"${WRITE_SOURCE_DATE_EPOCH}"
echo "SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH}"

View file

@ -129,6 +129,10 @@ class DocumentToPixels(DangerzoneConverter):
# At least .odt, .docx, .odg, .odp, .ods, and .pptx
"application/zip": {
"type": "libreoffice",
# NOTE: `file` command < 5.45 cannot detect hwpx files properly, so we
# enable the extension in any case. See also:
# https://github.com/freedomofpress/dangerzone/pull/460#issuecomment-1654166465
"libreoffice_ext": "h2orestart.oxt",
},
# At least .doc, .docx, .odg, .odp, .odt, .pdf, .ppt, .pptx, .xls, and .xlsx
"application/octet-stream": {
@ -249,7 +253,7 @@ class DocumentToPixels(DangerzoneConverter):
"unzip",
"-d",
f"/usr/lib/libreoffice/share/extensions/{libreoffice_ext}/",
f"/libreoffice_ext/{libreoffice_ext}",
f"/opt/libreoffice_ext/{libreoffice_ext}",
]
await self.run_command(
unzip_args,

View file

@ -124,6 +124,7 @@ class MainWindow(QtWidgets.QMainWindow):
self.setWindowTitle("Dangerzone")
self.setWindowIcon(self.dangerzone.get_window_icon())
self.alert: Optional[Alert] = None
self.setMinimumWidth(600)
if platform.system() == "Darwin":
@ -226,6 +227,13 @@ class MainWindow(QtWidgets.QMainWindow):
# This allows us to make QSS rules conditional on the OS color mode.
self.setProperty("OSColorMode", self.dangerzone.app.os_color_mode.value)
if hasattr(self.dangerzone.isolation_provider, "check_docker_desktop_version"):
is_version_valid, version = (
self.dangerzone.isolation_provider.check_docker_desktop_version()
)
if not is_version_valid:
self.handle_docker_desktop_version_check(is_version_valid, version)
self.show()
def show_update_success(self) -> None:
@ -279,6 +287,46 @@ class MainWindow(QtWidgets.QMainWindow):
self.dangerzone.settings.set("updater_check", check)
self.dangerzone.settings.save()
def handle_docker_desktop_version_check(
self, is_version_valid: bool, version: str
) -> None:
hamburger_menu = self.hamburger_button.menu()
sep = hamburger_menu.insertSeparator(hamburger_menu.actions()[0])
upgrade_action = QAction("Docker Desktop should be upgraded", hamburger_menu)
upgrade_action.setIcon(
QtGui.QIcon(
load_svg_image(
"hamburger_menu_update_dot_error.svg", width=64, height=64
)
)
)
message = """
<p>A new version of Docker Desktop is available. Please upgrade your system.</p>
<p>Visit the <a href="https://www.docker.com/products/docker-desktop">Docker Desktop website</a> to download the latest version.</p>
<em>Keeping Docker Desktop up to date allows you to have more confidence that your documents are processed safely.</em>
"""
self.alert = Alert(
self.dangerzone,
title="Upgrade Docker Desktop",
message=message,
ok_text="Ok",
has_cancel=False,
)
def _launch_alert() -> None:
if self.alert:
self.alert.launch()
upgrade_action.triggered.connect(_launch_alert)
hamburger_menu.insertAction(sep, upgrade_action)
self.hamburger_button.setIcon(
QtGui.QIcon(
load_svg_image("hamburger_menu_update_error.svg", width=64, height=64)
)
)
def handle_updates(self, report: UpdateReport) -> None:
"""Handle update reports from the update checker thread.
@ -365,7 +413,7 @@ class MainWindow(QtWidgets.QMainWindow):
self.content_widget.show()
def closeEvent(self, e: QtGui.QCloseEvent) -> None:
alert_widget = Alert(
self.alert = Alert(
self.dangerzone,
message="Some documents are still being converted.\n Are you sure you want to quit?",
ok_text="Abort conversions",
@ -379,7 +427,7 @@ class MainWindow(QtWidgets.QMainWindow):
else:
self.dangerzone.app.exit(0)
else:
accept_exit = alert_widget.launch()
accept_exit = self.alert.launch()
if not accept_exit:
e.ignore()
return
@ -623,7 +671,7 @@ class ContentWidget(QtWidgets.QWidget):
def documents_selected(self, docs: List[Document]) -> None:
if self.conversion_started:
Alert(
self.alert = Alert(
self.dangerzone,
message="Dangerzone does not support adding documents after the conversion has started.",
has_cancel=False,
@ -633,7 +681,7 @@ class ContentWidget(QtWidgets.QWidget):
# Ensure all files in batch are in the same directory
dirnames = {os.path.dirname(doc.input_filename) for doc in docs}
if len(dirnames) > 1:
Alert(
self.alert = Alert(
self.dangerzone,
message="Dangerzone does not support adding documents from multiple locations.\n\n The newly added documents were ignored.",
has_cancel=False,
@ -802,14 +850,14 @@ class DocSelectionDropFrame(QtWidgets.QFrame):
text = f"{num_unsupported_docs} files are not supported."
ok_text = "Continue without these files"
alert_widget = Alert(
self.alert = Alert(
self.dangerzone,
message=f"{text}\nThe supported extensions are: "
+ ", ".join(get_supported_extensions()),
ok_text=ok_text,
)
return alert_widget.exec_()
return self.alert.exec_()
class SettingsWidget(QtWidgets.QWidget):

View file

@ -335,9 +335,9 @@ class IsolationProvider(ABC):
stderr_thread = self.start_stderr_thread(p, stderr)
if platform.system() != "Windows":
assert os.getpgid(p.pid) != os.getpgid(
os.getpid()
), "Parent shares same PGID with child"
assert os.getpgid(p.pid) != os.getpgid(os.getpid()), (
"Parent shares same PGID with child"
)
try:
yield p

View file

@ -3,7 +3,7 @@ import os
import platform
import shlex
import subprocess
from typing import List
from typing import List, Tuple
from .. import container_utils, errors
from ..document import Document
@ -11,7 +11,10 @@ from ..util import get_resource_path, get_subprocess_startupinfo
from .base import IsolationProvider, terminate_process_group
TIMEOUT_KILL = 5 # Timeout in seconds until the kill command returns.
MINIMUM_DOCKER_DESKTOP = {
"Darwin": "4.36.0",
"Windows": "4.36.0",
}
# Define startupinfo for subprocesses
if platform.system() == "Windows":
@ -121,6 +124,7 @@ class Container(IsolationProvider):
def is_available() -> bool:
container_runtime = container_utils.get_runtime()
runtime_name = container_utils.get_runtime_name()
# Can we run `docker/podman image ls` without an error
with subprocess.Popen(
[container_runtime, "image", "ls"],
@ -135,6 +139,28 @@ class Container(IsolationProvider):
)
return True
def check_docker_desktop_version(self) -> Tuple[bool, str]:
# On windows and darwin, check that the minimum version is met
version = ""
if platform.system() != "Linux":
with subprocess.Popen(
["docker", "version", "--format", "{{.Server.Platform.Name}}"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
startupinfo=get_subprocess_startupinfo(),
) as p:
stdout, stderr = p.communicate()
if p.returncode != 0:
# When an error occurs, consider that the check went
# through, as we're checking for installation compatibiliy
# somewhere else already
return True, version
# The output is like "Docker Desktop 4.35.1 (173168)"
version = stdout.decode().replace("Docker Desktop", "").split()[0]
if version < MINIMUM_DOCKER_DESKTOP[platform.system()]:
return False, version
return True, version
def doc_to_pixels_container_name(self, document: Document) -> str:
"""Unique container name for the doc-to-pixels phase."""
return f"dangerzone-doc-to-pixels-{document.id}"

View file

@ -5,11 +5,14 @@ import sys
import traceback
import unicodedata
import appdirs
try:
import platformdirs
except ImportError:
import appdirs as platformdirs
def get_config_dir() -> str:
return appdirs.user_config_dir("dangerzone")
return platformdirs.user_config_dir("dangerzone")
def get_resource_path(filename: str) -> str:

2
debian/control vendored
View file

@ -9,7 +9,7 @@ Rules-Requires-Root: no
Package: dangerzone
Architecture: any
Depends: ${misc:Depends}, ${python3:Depends}, podman, python3, python3-pyside2.qtcore, python3-pyside2.qtgui, python3-pyside2.qtwidgets, python3-pyside2.qtsvg, python3-appdirs, python3-click, python3-xdg, python3-colorama, python3-requests, python3-markdown, python3-packaging, tesseract-ocr-all
Depends: ${misc:Depends}, podman, python3, python3-pyside2.qtcore, python3-pyside2.qtgui, python3-pyside2.qtwidgets, python3-pyside2.qtsvg, python3-platformdirs | python3-appdirs, python3-click, python3-xdg, python3-colorama, python3-requests, python3-markdown, python3-packaging, tesseract-ocr-all
Description: Take potentially dangerous PDFs, office documents, or images
Dangerzone is an open source desktop application that takes potentially dangerous PDFs, office documents, or images and converts them to safe PDFs. It uses disposable VMs on Qubes OS, or container technology in other OSes, to convert the documents within a secure sandbox.
.

2
debian/rules vendored
View file

@ -9,5 +9,5 @@ export DH_VERBOSE=1
dh $@ --with python3 --buildsystem=pybuild
override_dh_builddeb:
./install/linux/vendor-pymupdf.py --dest debian/dangerzone/usr/lib/python3/dist-packages/dangerzone/vendor/
./install/linux/debian-vendor-pymupdf.py --dest debian/dangerzone/usr/lib/python3/dist-packages/dangerzone/vendor/
dh_builddeb $@

517
dev_scripts/registry.py Executable file
View file

@ -0,0 +1,517 @@
#!/usr/bin/python
import hashlib
import json
import platform
import re
import shutil
import subprocess
from base64 import b64decode
from pathlib import Path
from tempfile import NamedTemporaryFile
import click
import requests
try:
import platformdirs
except ImportError:
import appdirs as platformdirs
def get_config_dir() -> str:
return Path(platformdirs.user_config_dir("dangerzone"))
SIGNATURES_PATH = get_config_dir() / "signatures"
DEFAULT_REPO = "freedomofpress/dangerzone"
SIGSTORE_BUNDLE = "application/vnd.dev.sigstore.bundle.v0.3+json"
DOCKER_MANIFEST_DISTRIBUTION = "application/vnd.docker.distribution.manifest.v2+json"
DOCKER_MANIFEST_INDEX = "application/vnd.oci.image.index.v1+json"
OCI_IMAGE_MANIFEST = "application/vnd.oci.image.manifest.v1+json"
class RegistryClient:
def __init__(self, registry, org, image):
self._registry = registry
self._org = org
self._image = image
self._auth_token = None
self._base_url = f"https://{registry}"
self._image_url = f"{self._base_url}/v2/{self._org}/{self._image}"
@property
def image(self):
return f"{self._registry}/{self._org}/{self._image}"
def get_auth_token(self):
if not self._auth_token:
auth_url = f"{self._base_url}/token"
response = requests.get(
auth_url,
params={
"service": f"{self._registry}",
"scope": f"repository:{self._org}/{self._image}:pull",
},
)
response.raise_for_status()
self._auth_token = response.json()["token"]
return self._auth_token
def get_auth_header(self):
return {"Authorization": f"Bearer {self.get_auth_token()}"}
def list_tags(self):
url = f"{self._image_url}/tags/list"
response = requests.get(url, headers=self.get_auth_header())
response.raise_for_status()
tags = response.json().get("tags", [])
return tags
def get_manifest(self, tag, extra_headers=None):
"""Get manifest information for a specific tag"""
manifest_url = f"{self._image_url}/manifests/{tag}"
headers = {
"Accept": DOCKER_MANIFEST_DISTRIBUTION,
"Authorization": f"Bearer {self.get_auth_token()}",
}
if extra_headers:
headers.update(extra_headers)
response = requests.get(manifest_url, headers=headers)
response.raise_for_status()
return response
def list_manifests(self, tag):
return (
self.get_manifest(
tag,
{
"Accept": DOCKER_MANIFEST_INDEX,
},
)
.json()
.get("manifests")
)
def get_blob(self, hash):
url = f"{self._image_url}/blobs/{hash}"
response = requests.get(
url,
headers={
"Authorization": f"Bearer {self.get_auth_token()}",
},
)
response.raise_for_status()
return response
def get_manifest_hash(self, tag, tag_manifest_content=None):
if not tag_manifest_content:
tag_manifest_content = self.get_manifest(tag).content
return hashlib.sha256(tag_manifest_content).hexdigest()
def get_attestation(self, tag):
"""
Retrieve an attestation from a given tag.
The attestation needs to be attached using the Cosign Bundle
Specification defined at:
https://github.com/sigstore/cosign/blob/main/specs/BUNDLE_SPEC.md
"""
def _find_sigstore_bundle_manifest(manifests):
for manifest in manifests:
if manifest["artifactType"] == SIGSTORE_BUNDLE:
return manifest["mediaType"], manifest["digest"]
def _get_bundle_blob_digest(layers):
for layer in layers:
if layer.get("mediaType") == SIGSTORE_BUNDLE:
return layer["digest"]
tag_manifest_content = self.get_manifest(tag).content
# The attestation is available on the same container registry, with a
# specific tag named "sha256-{sha256(manifest)}"
tag_manifest_hash = self.get_manifest_hash(tag, tag_manifest_content)
# This will get us a "list" of manifests...
manifests = self.list_manifests(f"sha256-{tag_manifest_hash}")
# ... from which we want the sigstore bundle
bundle_manifest_mediatype, bundle_manifest_digest = (
_find_sigstore_bundle_manifest(manifests)
)
if not bundle_manifest_digest:
raise Error("Not able to find sigstore bundle manifest info")
bundle_manifest = self.get_manifest(
bundle_manifest_digest, extra_headers={"Accept": bundle_manifest_mediatype}
).json()
# From there, we will get the attestation in a blob.
# It will be the first layer listed at this manifest hash location
layers = bundle_manifest.get("layers", [])
blob_digest = _get_bundle_blob_digest(layers)
bundle = self.get_blob(blob_digest)
return tag_manifest_content, bundle.content
def _write(file, content):
file.write(content)
file.flush()
def verify_attestation(
registry_client: RegistryClient, image_tag: str, expected_repo: str
):
"""
Look up the image attestation to see if the image has been built
on Github runners, and from a given repository.
"""
manifest, bundle = registry_client.get_attestation(image_tag)
# Put the value in files and verify with cosign
with (
NamedTemporaryFile(mode="wb") as manifest_json,
NamedTemporaryFile(mode="wb") as bundle_json,
):
_write(manifest_json, manifest)
_write(bundle_json, bundle)
# Call cosign with the temporary file paths
cmd = [
"cosign",
"verify-blob-attestation",
"--bundle",
bundle_json.name,
"--new-bundle-format",
"--certificate-oidc-issuer",
"https://token.actions.githubusercontent.com",
"--certificate-identity-regexp",
f"^https://github.com/{expected_repo}/.github/workflows/release-container-image.yml@refs/heads/test/image-publication-cosign",
manifest_json.name,
]
result = subprocess.run(cmd, capture_output=True)
if result.returncode != 0:
raise Exception(f"Attestation cannot be verified. {result.stderr}")
return True
def new_image_release():
# XXX - Implement
return True
def signature_to_bundle(sig):
# Convert cosign-download signatures to the format expected by cosign bundle.
bundle = sig["Bundle"]
payload = bundle["Payload"]
return {
"base64Signature": sig["Base64Signature"],
"Payload": sig["Payload"],
"cert": sig["Cert"],
"chain": sig["Chain"],
"rekorBundle": {
"SignedEntryTimestamp": bundle["SignedEntryTimestamp"],
"Payload": {
"body": payload["body"],
"integratedTime": payload["integratedTime"],
"logIndex": payload["logIndex"],
"logID": payload["logID"],
},
},
"RFC3161Timestamp": sig["RFC3161Timestamp"],
}
def verify_signature(signature, pubkey):
"""Verify a signature against a given public key"""
signature_bundle = signature_to_bundle(signature)
# Put the value in files and verify with cosign
with (
NamedTemporaryFile(mode="w") as signature_file,
NamedTemporaryFile(mode="bw") as payload_file,
):
json.dump(signature_bundle, signature_file)
signature_file.flush()
payload_bytes = b64decode(signature_bundle["Payload"])
_write(payload_file, payload_bytes)
cmd = [
"cosign",
"verify-blob",
"--key",
pubkey,
"--bundle",
signature_file.name,
payload_file.name,
]
result = subprocess.run(cmd, capture_output=True)
if result.returncode != 0:
# XXX Raise instead?
return False
return result.stderr == b"Verified OK\n"
def get_runtime_name() -> str:
if platform.system() == "Linux":
return "podman"
return "docker"
def container_pull(image):
cmd = [get_runtime_name(), "pull", f"{image}"]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
process.communicate()
def upgrade_container_image(image, tag, pubkey, registry: RegistryClient):
if not new_image_release():
return
hash = registry.get_manifest_hash(tag)
signatures = get_signatures(image, hash)
if len(signatures) < 1:
raise Exception("Unable to retrieve signatures")
print(f"Found {len(signatures)} signature(s) for {image}")
for signature in signatures:
signature_is_valid = verify_signature(signature, pubkey)
if not signature_is_valid:
raise Exception("Unable to verify signature")
print("✅ Signature is valid")
# At this point, the signatures are verified
# We store the signatures just now to avoid storing unverified signatures
store_signatures(signatures, hash, pubkey)
# let's upgrade the image
# XXX Use the hash here to avoid race conditions
container_pull(image)
def get_file_hash(file):
with open(file, "rb") as f:
content = f.read()
return hashlib.sha256(content).hexdigest()
def load_signatures(image_hash, pubkey):
pubkey_signatures = SIGNATURES_PATH / get_file_hash(pubkey)
if not pubkey_signatures.exists():
msg = (
f"Cannot find a '{pubkey_signatures}' folder."
"You might need to download the image signatures first."
)
raise Exception(msg)
with open(pubkey_signatures / f"{image_hash}.json") as f:
return json.load(f)
def store_signatures(signatures, image_hash, pubkey):
"""
Store signatures locally in the SIGNATURE_PATH folder, like this:
~/.config/dangerzone/signatures/
<pubkey-hash>
<image-hash>.json
<image-hash>.json
The format used in the `.json` file is the one of `cosign download
signature`, which differs from the "bundle" one used afterwards.
It can be converted to the one expected by cosign verify --bundle with
the `signature_to_bundle()` function.
"""
def _get_digest(sig):
payload = json.loads(b64decode(sig["Payload"]))
return payload["critical"]["image"]["docker-manifest-digest"]
# All the signatures should share the same hash.
hashes = list(map(_get_digest, signatures))
if len(set(hashes)) != 1:
raise Exception("Signatures do not share the same image hash")
if f"sha256:{image_hash}" != hashes[0]:
raise Exception("Signatures do not match the given image hash")
pubkey_signatures = SIGNATURES_PATH / get_file_hash(pubkey)
pubkey_signatures.mkdir(exist_ok=True)
with open(pubkey_signatures / f"{image_hash}.json", "w") as f:
json.dump(signatures, f)
def verify_local_image_signature(image, pubkey):
"""
Verifies that a local image has a valid signature
"""
image_hash = get_image_hash(image)
signatures = load_signatures(image_hash, pubkey)
if len(signatures) < 1:
raise Exception("No signatures found")
for signature in signatures:
if not verify_signature(signature, pubkey):
msg = f"Unable to verify signature for {image} with pubkey {pubkey}"
raise Exception(msg)
return True
def get_image_hash(image):
"""
Returns a image hash from a local image name
"""
cmd = [get_runtime_name(), "image", "inspect", image, "-f", "{{.Digest}}"]
result = subprocess.run(cmd, capture_output=True, check=True)
return result.stdout.strip().decode().strip("sha256:")
def get_signatures(image, hash):
"""
Retrieve the signatures from cosign download signature and convert each one to the "cosign bundle" format.
"""
process = subprocess.run(
["cosign", "download", "signature", f"{image}@sha256:{hash}"],
capture_output=True,
check=True,
)
# XXX: Check the output first.
# Remove the last return, split on newlines, convert from JSON
signatures_raw = process.stdout.decode("utf-8").strip().split("\n")
return list(map(json.loads, signatures_raw))
class Image:
def __init__(self, registry, namespace, repository, tag="latest"):
self.registry = registry
self.namespace = namespace
self.repository = repository
self.tag = tag
def properties(self):
return (self.registry, self.namespace, self.repository, self.tag)
@property
def name_without_tag(self):
return f"{self.registry}/{self.namespace}/{self.repository}"
@property
def name_with_tag(self):
return f"{self.name_without_tag}:{self.tag}"
@classmethod
def from_string(cls, input_string):
"""Parses container image location into (registry, namespace, repository, tag)"""
pattern = (
r"^"
r"(?P<registry>[a-zA-Z0-9.-]+)/"
r"(?P<namespace>[a-zA-Z0-9-]+)/"
r"(?P<repository>[^:]+)"
r"(?::(?P<tag>[a-zA-Z0-9.-]+))?"
r"$"
)
match = re.match(pattern, input_string)
if not match:
raise ValueError("Malformed image location")
return cls(
match.group("registry"),
match.group("namespace"),
match.group("repository"),
match.group("tag") or "latest",
)
def parse_image_location(string):
return Image.from_string(string).properties
@click.group()
def main():
pass
@main.command()
@click.argument("image")
@click.option("--pubkey", default="pub.key")
def upgrade_image(image, pubkey):
registry, namespace, repository, tag = parse_image_location(image)
registry_client = RegistryClient(registry, namespace, repository)
upgrade_container_image(image, tag, pubkey, registry_client)
@main.command()
@click.argument("image")
@click.option("--pubkey", default="pub.key")
def verify_local_image(image, pubkey):
# XXX remove a potentiel :tag
if verify_local_image_signature(image, pubkey):
click.echo(f"✅ The local image {image} has been signed with {pubkey}")
@main.command()
@click.argument("image")
def list_tags(image):
registry, org, package, _ = parse_image_location(image)
client = RegistryClient(registry, org, package)
tags = client.list_tags()
click.echo(f"Existing tags for {client.image}")
for tag in tags:
click.echo(tag)
@main.command()
@click.argument("image")
@click.argument("tag")
def get_manifest(image, tag):
registry, org, package, _ = parse_image_location(image)
client = RegistryClient(registry, org, package)
resp = client.get_manifest(tag, extra_headers={"Accept": OCI_IMAGE_MANIFEST})
click.echo(resp.content)
@main.command()
@click.argument("image")
@click.option(
"--repo",
default=DEFAULT_REPO,
help="The github repository to check the attestation for",
)
def attest(image: str, repo: str):
"""
Look up the image attestation to see if the image has been built
on Github runners, and from a given repository.
"""
if shutil.which("cosign") is None:
click.echo("The cosign binary is needed but not installed.")
raise click.Abort()
registry, org, package, tag = parse_image_location(image)
tag = tag or "latest"
client = RegistryClient(registry, org, package)
verified = verify_attestation(client, tag, repo)
if verified:
click.echo(
f"🎉 The image available at `{client.image}:{tag}` has been built by Github Runners from the `{repo}` repository"
)
if __name__ == "__main__":
main()

180
dev_scripts/reproduce-image.py Executable file
View file

@ -0,0 +1,180 @@
#!/usr/bin/env python3
import argparse
import hashlib
import logging
import pathlib
import stat
import subprocess
import sys
import urllib.request
logger = logging.getLogger(__name__)
DIFFOCI_URL = "https://github.com/reproducible-containers/diffoci/releases/download/v0.1.5/diffoci-v0.1.5.linux-amd64"
DIFFOCI_CHECKSUM = "01d25fe690196945a6bd510d30559338aa489c034d3a1b895a0d82a4b860698f"
DIFFOCI_PATH = (
pathlib.Path.home() / ".local" / "share" / "dangerzone-dev" / "helpers" / "diffoci"
)
IMAGE_NAME = "dangerzone.rocks/dangerzone"
def run(*args):
"""Simple function that runs a command, validates it, and returns the output"""
logger.debug(f"Running command: {' '.join(args)}")
return subprocess.run(
args,
check=True,
stdout=subprocess.PIPE,
).stdout
def git_commit_get():
return run("git", "rev-parse", "--short", "HEAD").decode().strip()
def git_determine_tag():
return run("git", "describe", "--long", "--first-parent").decode().strip()[1:]
def git_verify(commit, source):
if not commit in source:
raise RuntimeError(
f"Image '{source}' does not seem to be built from commit '{commit}'"
)
def diffoci_hash_matches(diffoci):
"""Check if the hash of the downloaded diffoci bin matches the expected one."""
m = hashlib.sha256()
m.update(diffoci)
diffoci_checksum = m.hexdigest()
return diffoci_checksum == DIFFOCI_CHECKSUM
def diffoci_is_installed():
"""Determine if diffoci has been installed.
Determine if diffoci has been installed, by checking if the binary exists, and if
its hash is the expected one. If the binary exists but the hash is different, then
this is a sign that we need to update the local diffoci binary.
"""
if not DIFFOCI_PATH.exists():
return False
return diffoci_hash_matches(DIFFOCI_PATH.open("rb").read())
def diffoci_download():
"""Download the diffoci tool, based on a URL and its checksum."""
with urllib.request.urlopen(DIFFOCI_URL) as f:
diffoci_bin = f.read()
if not diffoci_hash_matches(diffoci_bin):
raise ValueError(
"Unexpected checksum for downloaded diffoci binary:"
f" {diffoci_checksum} !={DIFFOCI_CHECKSUM}"
)
DIFFOCI_PATH.parent.mkdir(parents=True, exist_ok=True)
DIFFOCI_PATH.open("wb+").write(diffoci_bin)
DIFFOCI_PATH.chmod(DIFFOCI_PATH.stat().st_mode | stat.S_IEXEC)
def diffoci_diff(source, local_target):
"""Diff the source image against the recently built target image using diffoci."""
target = f"podman://{local_target}"
try:
return run(
str(DIFFOCI_PATH),
"diff",
source,
target,
"--semantic",
"--verbose",
)
except subprocess.CalledProcessError as e:
error = e.stdout.decode()
raise RuntimeError(
f"Could not rebuild an identical image to {source}. Diffoci report:\n{error}"
)
def build_image(tag, use_cache=False):
"""Build the Dangerzone container image with a special tag."""
run(
"python3",
"./install/common/build-image.py",
"--no-save",
"--use-cache",
str(use_cache),
"--tag",
tag,
)
def parse_args():
image_tag = git_determine_tag()
# TODO: Remove the local "podman://" prefix once we have started pushing images to a
# remote.
default_image_name = f"podman://{IMAGE_NAME}:{image_tag}"
parser = argparse.ArgumentParser(
prog=sys.argv[0],
description="Dev script for verifying container image reproducibility",
)
parser.add_argument(
"--source",
default=default_image_name,
help=(
"The name of the image that you want to reproduce. If the image resides in"
" the local Docker / Podman engine, you can prefix it with podman:// or"
f" docker:// accordingly (default: {default_image_name})"
),
)
parser.add_argument(
"--use-cache",
default=False,
action="store_true",
help="Whether to reuse the build cache (off by default for better reproducibility)",
)
return parser.parse_args()
def main():
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
args = parse_args()
logger.info(f"Ensuring that current Git commit matches image '{args.source}'")
commit = git_commit_get()
git_verify(commit, args.source)
if not diffoci_is_installed():
logger.info(f"Downloading diffoci helper from {DIFFOCI_URL}")
diffoci_download()
tag = f"reproduce-{commit}"
target = f"{IMAGE_NAME}:{tag}"
logger.info(f"Building container image and tagging it as '{target}'")
build_image(tag, args.use_cache)
logger.info(
f"Ensuring that source image '{args.source}' is semantically identical with"
f" built image '{target}'"
)
try:
diffoci_diff(args.source, target)
except subprocess.CalledProcessError as e:
raise RuntimeError(
f"Could not reproduce image {args.source} for commit {commit}"
)
breakpoint()
logger.info(f"Successfully reproduced image '{args.source}' from commit '{commit}'")
if __name__ == "__main__":
sys.exit(main())

View file

@ -44,20 +44,6 @@ doit <task>
* You can run `doit list --all -s` to see the full list of tasks, their
dependencies, and whether they are up to date.
* You can run `doit info <task>` to see which dependencies are missing.
* You can change this line in `pyproject.toml` to `true`, to allow using the
Docker/Podman build cache:
```
use_cache = true
```
> [!WARNING]
> Using caching may speed up image builds, but is not suitable for release
> artifacts. The ID of our base container image (Alpine Linux) does not change
> that often, but its APK package index does. So, if we use caching, we risk
> skipping the `apk upgrade` layer and end up with packages that are days
> behind.
* You can pass the following environment variables to the script, in order to
affect some global parameters:
- `CONTAINER_RUNTIME`: The container runtime to use. Either `podman` (default)

View file

@ -1,5 +1,11 @@
# gVisor integration
> [!NOTE]
> **Update on 2025-01-13:** There is no longer a copied container image under
> `/home/dangerzone/dangerzone-image/rootfs`. We now reuse the same container
> image both for the inner and outer container. See
> [#1048](https://github.com/freedomofpress/dangerzone/issues/1048).
Dangerzone has relied on the container runtime available in each supported
operating system (Docker Desktop on Windows / macOS, Podman on Linux) to isolate
the host from the sanitization process. The problem with this type of isolation

View file

@ -0,0 +1,23 @@
# Independent Container Updates
Since version 0.9.0, Dangerzone is able to ship container images independently
from issuing a new release of the software.
This is useful as images need to be kept updated with the latest security fixes.
## Nightly images and attestations
Each night, new images are built and pushed to our container registry, alongside
with a provenance attestation, enabling anybody to ensure that the image has
been originally built by Github CI runners, from a defined source repository (in our case `freedomofpress/dangerzone`).
To verify the attestations against our expectations, use the following command:
```bash
poetry run ./dev_scripts/registry.py attest ghcr.io/freedomofpress/dangerzone/dangerzone:latest --repo freedomofpress/dangerzone
```
In case of sucess, it will report back:
```
🎉 The image available at `ghcr.io/freedomofpress/dangerzone/dangerzone:latest` has been built by Github runners from the `freedomofpress/dangerzone` repository.
```

View file

@ -0,0 +1,67 @@
# Reproducible builds
We want to improve the transparency and auditability of our build artifacts, and
a way to achieve this is via reproducible builds. For a broader understanding of
what reproducible builds entail, check out https://reproducible-builds.org/.
Our build artifacts consist of:
* Container images (`amd64` and `arm64` architectures)
* macOS installers (for Intel and Apple Silicon CPUs)
* Windows installer
* Fedora packages (for regular Fedora distros and Qubes)
* Debian packages (for Debian and Ubuntu)
As of writing this, only the following artifacts are reproducible:
* Container images (see [#1047](https://github.com/freedomofpress/dangerzone/issues/1047))
In the following sections, we'll mention some specifics about enforcing
reproducibility for each artifact type.
## Container image
### Updating the image
The fact that our image is reproducible also means that it's frozen in time.
This means that rebuilding the image without updating our Dockerfile will
**not** receive security updates.
Here are the necessary variables that make up our image in the `Dockerfile.env`
file:
* `DEBIAN_IMAGE_DATE`: The date that the Debian container image was released
* `DEBIAN_ARCHIVE_DATE`: The Debian snapshot repo that we want to use
* `GVISOR_ARCHIVE_DATE`: The gVisor APT repo that we want to use
* `H2ORESTART_CHECKSUM`: The SHA-256 checksum of the H2ORestart plugin
* `H2ORESTART_VERSION`: The version of the H2ORestart plugin
If you update these values in `Dockerfile.env`, you must also create a new
Dockerfile with:
```
make Dockerfile
```
Updating `Dockerfile` without bumping `Dockerfile.in` is detected and should
trigger a CI error.
### Reproducing the image
For a simple way to reproduce a Dangerzone container image, you can checkout the
commit this image was built from (you can find it from the image tag in its
`g<commit>` portion), and run the following command in a Linux environment:
```
./dev_scripts/reproduce-image.py --source <image>
```
This command will download the `diffoci` helper, build a container image from
the current Git commit, and ensure that the built image matches the source one,
with the exception of image names and file timestamps.
> [!TIP]
> If the source image is not pushed to a registry, and is local instead, you
> can prefix it with `docker://` or `podman://` accordingly, so that `diffoci`
> can load it from the local Docker / Podman container engine. For example:
>
> ```
> ./dev_scripts/reproduce.py --source podman://dangerzone.rocks/dangerzone:0.8.0-125-g725ce3b
> ```

16
dodo.py
View file

@ -27,16 +27,6 @@ PARAM_APPLE_ID = {
"help": "The Apple developer ID that will be used to sign the .dmg",
}
PARAM_USE_CACHE = {
"name": "use_cache",
"long": "use-cache",
"help": (
"Whether to use cached results or not. For reproducibility reasons,"
" it's best to leave it to false"
),
"default": False,
}
### File dependencies
#
# Define all the file dependencies for our tasks in a single place, since some file
@ -63,9 +53,8 @@ TESSDATA_TARGETS = list_language_data()
IMAGE_DEPS = [
"Dockerfile",
"poetry.lock",
*list_files("dangerzone/conversion"),
"dangerzone/gvisor_wrapper/entrypoint.py",
*list_files("dangerzone/container_helpers"),
"install/common/build-image.py",
]
IMAGE_TARGETS = ["share/container.tar.gz", "share/image-id.txt"]
@ -206,11 +195,10 @@ def task_build_image():
return {
"actions": [
f"python install/common/build-image.py --use-cache=%(use_cache)s --runtime={CONTAINER_RUNTIME}",
f"python install/common/build-image.py --runtime={CONTAINER_RUNTIME}",
["cp", img_src, img_dst],
["cp", img_id_src, img_id_dst],
],
"params": [PARAM_USE_CACHE],
"file_dep": IMAGE_DEPS,
"targets": [img_src, img_dst, img_id_src, img_id_dst],
"task_dep": ["init_release_dir", "check_container_runtime"],

View file

@ -1,6 +1,5 @@
import argparse
import gzip
import os
import platform
import secrets
import subprocess
@ -9,7 +8,6 @@ from pathlib import Path
BUILD_CONTEXT = "dangerzone/"
IMAGE_NAME = "dangerzone.rocks/dangerzone"
REQUIREMENTS_TXT = "container-pip-requirements.txt"
if platform.system() in ["Darwin", "Windows"]:
CONTAINER_RUNTIME = "docker"
elif platform.system() == "Linux":
@ -29,6 +27,29 @@ def str2bool(v):
raise argparse.ArgumentTypeError("Boolean value expected.")
def determine_git_tag():
# Designate a unique tag for this image, depending on the Git commit it was created
# from:
# 1. If created from a Git tag (e.g., 0.8.0), the image tag will be `0.8.0`.
# 2. If created from a commit, it will be something like `0.8.0-31-g6bdaa7a`.
# 3. If the contents of the Git repo are dirty, we will append a unique identifier
# for this run, something like `0.8.0-31-g6bdaa7a-fdcb` or `0.8.0-fdcb`.
dirty_ident = secrets.token_hex(2)
return (
subprocess.check_output(
[
"git",
"describe",
"--long",
"--first-parent",
f"--dirty=-{dirty_ident}",
],
)
.decode()
.strip()[1:] # remove the "v" prefix of the tag.
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
@ -53,9 +74,14 @@ def main():
"--use-cache",
type=str2bool,
nargs="?",
default=False,
default=True,
const=True,
help="Use the builder's cache to speed up the builds (not suitable for release builds)",
help="Use the builder's cache to speed up the builds",
)
parser.add_argument(
"--tag",
default=None,
help="Provide a custom tag for the image (for development only)",
)
args = parser.parse_args()
@ -64,39 +90,13 @@ def main():
print(f"Building for architecture '{ARCH}'")
# Designate a unique tag for this image, depending on the Git commit it was created
# from:
# 1. If created from a Git tag (e.g., 0.8.0), the image tag will be `0.8.0`.
# 2. If created from a commit, it will be something like `0.8.0-31-g6bdaa7a`.
# 3. If the contents of the Git repo are dirty, we will append a unique identifier
# for this run, something like `0.8.0-31-g6bdaa7a-fdcb` or `0.8.0-fdcb`.
dirty_ident = secrets.token_hex(2)
tag = (
subprocess.check_output(
["git", "describe", "--long", "--first-parent", f"--dirty=-{dirty_ident}"],
)
.decode()
.strip()[1:] # remove the "v" prefix of the tag.
)
tag = args.tag or determine_git_tag()
image_name_tagged = IMAGE_NAME + ":" + tag
print(f"Will tag the container image as '{image_name_tagged}'")
with open(image_id_path, "w") as f:
f.write(tag)
print("Exporting container pip dependencies")
with ContainerPipDependencies():
if not args.use_cache:
print("Pulling base image")
subprocess.run(
[
args.runtime,
"pull",
"alpine:latest",
],
check=True,
)
# Build the container image, and tag it with the calculated tag
print("Building container image")
cache_args = [] if args.use_cache else ["--no-cache"]
@ -106,10 +106,6 @@ def main():
"build",
BUILD_CONTEXT,
*cache_args,
"--build-arg",
f"REQUIREMENTS_TXT={REQUIREMENTS_TXT}",
"--build-arg",
f"ARCH={ARCH}",
"-f",
"Dockerfile",
"--tag",
@ -145,31 +141,5 @@ def main():
cmd.wait(5)
class ContainerPipDependencies:
"""Generates PIP dependencies within container"""
def __enter__(self):
try:
container_requirements_txt = subprocess.check_output(
["poetry", "export", "--only", "container"], universal_newlines=True
)
except subprocess.CalledProcessError as e:
print("FAILURE", e.returncode, e.output)
print(f"REQUIREMENTS: {container_requirements_txt}")
# XXX Export container dependencies and exclude pymupdfb since it is not needed in container
req_txt_pymupdfb_stripped = container_requirements_txt.split("pymupdfb")[0]
with open(Path(BUILD_CONTEXT) / REQUIREMENTS_TXT, "w") as f:
if ARCH == "arm64":
# PyMuPDF needs to be built on ARM64 machines
# But is already provided as a prebuilt-wheel on other architectures
f.write(req_txt_pymupdfb_stripped)
else:
f.write(container_requirements_txt)
def __exit__(self, exc_type, exc_value, exc_tb):
print("Leaving the context...")
os.remove(Path(BUILD_CONTEXT) / REQUIREMENTS_TXT)
if __name__ == "__main__":
sys.exit(main())

View file

@ -216,16 +216,11 @@ convert the documents within a secure sandbox.
%prep
%autosetup -p1 -n dangerzone-%{version}
# XXX: Bump the Python requirement in pyproject.toml from <3.13 to <3.14. Fedora
# 41 comes with Python 3.13 installed, but our pyproject.toml does not support
# it because PySide6 in PyPI works with Python 3.12 or earlier.
#
# This hack sidesteps this issue, and we haven't noticed any paticular problem
# with the package that is built from that.
%if 0%{?fedora} == 41
sed -i 's/<3.13/<3.14/' pyproject.toml
%endif
# Bypass the version pin for Fedora as the 6.8.1.1 package is causing trouble
# A 6.8.1.1 package was only released with a wheel for macOS, but was picked by
# Fedora packagers. We cannot use "*" when PyPI is involved as it will fail to download the latest version.
# For Fedora, we can pick any of the released versions.
sed -i '/shiboken6 = \[/,/\]/c\shiboken6 = "*"' pyproject.toml
%generate_buildrequires
%pyproject_buildrequires -R

View file

@ -28,7 +28,7 @@ def main():
)
logger.info("Getting PyMuPDF deps as requirements.txt")
cmd = ["poetry", "export", "--only", "container"]
cmd = ["poetry", "export", "--only", "debian"]
container_requirements_txt = subprocess.check_output(cmd)
# XXX: Hack for Ubuntu Focal.

901
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
[tool.poetry]
[tool.poetry]
name = "dangerzone"
version = "0.8.1"
description = "Take potentially dangerous PDFs, office documents, or images and convert them to safe PDFs"
@ -13,9 +13,9 @@ include = [
]
[tool.poetry.dependencies]
python = ">=3.9,<3.13"
python = ">=3.9,<3.14"
click = "*"
appdirs = "*"
platformdirs = "*"
PySide6 = "^6.7.1"
PyMuPDF = "^1.23.3" # The version in Fedora 39
colorama = "*"
@ -23,6 +23,13 @@ pyxdg = {version = "*", platform = "linux"}
requests = "*"
markdown = "*"
packaging = "*"
# shiboken6 released a 6.8.1.1 version only for macOS
# and it's getting picked by poetry, so pin it instead.
shiboken6 = [
{version = "*", platform = "darwin"},
{version = "<6.8.1.1", platform = "linux"},
{version = "<6.8.1.1", platform = "win32"},
]
[tool.poetry.scripts]
dangerzone = 'dangerzone:main'
@ -35,6 +42,7 @@ cx_freeze = {version = "^7.2.5", platform = "win32"}
pywin32 = {version = "*", platform = "win32"}
pyinstaller = {version = "*", platform = "darwin"}
doit = "^0.36.0"
jinja2-cli = "^0.8.2"
# Dependencies required for linting the code.
[tool.poetry.group.lint.dependencies]
@ -57,7 +65,7 @@ strip-ansi = "*"
pytest-subprocess = "^1.5.2"
pytest-rerunfailures = "^14.0"
[tool.poetry.group.container.dependencies]
[tool.poetry.group.debian.dependencies]
pymupdf = "1.24.11" # Last version to support python 3.8 (needed for Ubuntu Focal support)
[tool.poetry.group.dev.dependencies]
@ -66,11 +74,6 @@ httpx = "^0.27.2"
[tool.doit]
verbosity = 3
[tool.doit.tasks.build_image]
# DO NOT change this to 'true' for release artifacts, else we risk building
# images that are a few days behind. See also: docs/developer/doit.md
use_cache = false
[tool.ruff.lint]
select = [
# isort

View file

@ -587,3 +587,57 @@ def test_installation_failure_return_false(qtbot: QtBot, mocker: MockerFixture)
assert "the following error occured" in widget.label.text()
assert "The image cannot be found" in widget.traceback.toPlainText()
def test_up_to_date_docker_desktop_does_nothing(
qtbot: QtBot, mocker: MockerFixture
) -> None:
# Setup install to return False
mock_app = mocker.MagicMock()
dummy = mocker.MagicMock(spec=Container)
dummy.check_docker_desktop_version.return_value = (True, "1.0.0")
dz = DangerzoneGui(mock_app, dummy)
window = MainWindow(dz)
qtbot.addWidget(window)
menu_actions = window.hamburger_button.menu().actions()
assert "Docker Desktop should be upgraded" not in [
a.toolTip() for a in menu_actions
]
def test_outdated_docker_desktop_displays_warning(
qtbot: QtBot, mocker: MockerFixture
) -> None:
# Setup install to return False
mock_app = mocker.MagicMock()
dummy = mocker.MagicMock(spec=Container)
dummy.check_docker_desktop_version.return_value = (False, "1.0.0")
dz = DangerzoneGui(mock_app, dummy)
load_svg_spy = mocker.spy(main_window_module, "load_svg_image")
window = MainWindow(dz)
qtbot.addWidget(window)
menu_actions = window.hamburger_button.menu().actions()
assert menu_actions[0].toolTip() == "Docker Desktop should be upgraded"
# Check that the hamburger icon has changed with the expected SVG image.
assert load_svg_spy.call_count == 4
assert (
load_svg_spy.call_args_list[2].args[0] == "hamburger_menu_update_dot_error.svg"
)
alert_spy = mocker.spy(window.alert, "launch")
# Clicking the menu item should open a warning message
def _check_alert_displayed() -> None:
alert_spy.assert_any_call()
if window.alert:
window.alert.close()
QtCore.QTimer.singleShot(0, _check_alert_displayed)
menu_actions[0].trigger()

View file

@ -1,4 +1,5 @@
import os
import platform
import pytest
from pytest_mock import MockerFixture
@ -108,6 +109,92 @@ class TestContainer(IsolationProviderTest):
with pytest.raises(errors.ImageNotPresentException):
provider.install()
@pytest.mark.skipif(
platform.system() not in ("Windows", "Darwin"),
reason="macOS and Windows specific",
)
def test_old_docker_desktop_version_is_detected(
self, mocker: MockerFixture, provider: Container, fp: FakeProcess
) -> None:
fp.register_subprocess(
[
"docker",
"version",
"--format",
"{{.Server.Platform.Name}}",
],
stdout="Docker Desktop 1.0.0 (173100)",
)
mocker.patch(
"dangerzone.isolation_provider.container.MINIMUM_DOCKER_DESKTOP",
{"Darwin": "1.0.1", "Windows": "1.0.1"},
)
assert (False, "1.0.0") == provider.check_docker_desktop_version()
@pytest.mark.skipif(
platform.system() not in ("Windows", "Darwin"),
reason="macOS and Windows specific",
)
def test_up_to_date_docker_desktop_version_is_detected(
self, mocker: MockerFixture, provider: Container, fp: FakeProcess
) -> None:
fp.register_subprocess(
[
"docker",
"version",
"--format",
"{{.Server.Platform.Name}}",
],
stdout="Docker Desktop 1.0.1 (173100)",
)
# Require version 1.0.1
mocker.patch(
"dangerzone.isolation_provider.container.MINIMUM_DOCKER_DESKTOP",
{"Darwin": "1.0.1", "Windows": "1.0.1"},
)
assert (True, "1.0.1") == provider.check_docker_desktop_version()
fp.register_subprocess(
[
"docker",
"version",
"--format",
"{{.Server.Platform.Name}}",
],
stdout="Docker Desktop 2.0.0 (173100)",
)
assert (True, "2.0.0") == provider.check_docker_desktop_version()
@pytest.mark.skipif(
platform.system() not in ("Windows", "Darwin"),
reason="macOS and Windows specific",
)
def test_docker_desktop_version_failure_returns_true(
self, mocker: MockerFixture, provider: Container, fp: FakeProcess
) -> None:
fp.register_subprocess(
[
"docker",
"version",
"--format",
"{{.Server.Platform.Name}}",
],
stderr="Oopsie",
returncode=1,
)
assert provider.check_docker_desktop_version() == (True, "")
@pytest.mark.skipif(
platform.system() != "Linux",
reason="Linux specific",
)
def test_linux_skips_desktop_version_check_returns_true(
self, mocker: MockerFixture, provider: Container
) -> None:
assert (True, "") == provider.check_docker_desktop_version()
class TestContainerTermination(IsolationProviderTermination):
pass

View file

@ -12,9 +12,9 @@ VERSION_FILE_NAME = "version.txt"
def test_get_resource_path() -> None:
share_dir = Path("share").resolve()
resource_path = Path(util.get_resource_path(VERSION_FILE_NAME)).parent
assert share_dir.samefile(
resource_path
), f"{share_dir} is not the same file as {resource_path}"
assert share_dir.samefile(resource_path), (
f"{share_dir} is not the same file as {resource_path}"
)
@pytest.mark.skipif(platform.system() != "Windows", reason="Windows-specific")