mirror of
https://github.com/freedomofpress/dangerzone.git
synced 2025-05-07 22:11:50 +02:00
Compare commits
13 commits
21921f917e
...
a0bfea441d
Author | SHA1 | Date | |
---|---|---|---|
![]() |
a0bfea441d | ||
![]() |
8d05b5779d | ||
![]() |
e1dbdff1da | ||
![]() |
a1402d5b6b | ||
![]() |
51f432be6b | ||
![]() |
69234507c4 | ||
![]() |
94fad78f94 | ||
![]() |
66600f32dc | ||
![]() |
d41f604969 | ||
![]() |
6d269572ae | ||
![]() |
c7ba9ee75c | ||
![]() |
418b68d4ca | ||
![]() |
294c3aacf0 |
27 changed files with 1319 additions and 422 deletions
248
.github/workflows/build-push-image.yml
vendored
Normal file
248
.github/workflows/build-push-image.yml
vendored
Normal file
|
@ -0,0 +1,248 @@
|
||||||
|
name: Build and push multi-arch container image
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
registry:
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
registry_user:
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
image_name:
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
reproduce:
|
||||||
|
required: true
|
||||||
|
type: boolean
|
||||||
|
secrets:
|
||||||
|
registry_token:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Install dev. dependencies
|
||||||
|
run: |-
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y git python3-poetry --no-install-recommends
|
||||||
|
poetry install --only package
|
||||||
|
|
||||||
|
- name: Verify that the Dockerfile matches the commited template and params
|
||||||
|
run: |-
|
||||||
|
cp Dockerfile Dockerfile.orig
|
||||||
|
make Dockerfile
|
||||||
|
diff Dockerfile.orig Dockerfile
|
||||||
|
|
||||||
|
prepare:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
debian_archive_date: ${{ steps.params.outputs.debian_archive_date }}
|
||||||
|
source_date_epoch: ${{ steps.params.outputs.source_date_epoch }}
|
||||||
|
image: ${{ steps.params.outputs.full_image_name }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Compute image parameters
|
||||||
|
id: params
|
||||||
|
run: |
|
||||||
|
source Dockerfile.env
|
||||||
|
DEBIAN_ARCHIVE_DATE=$(date -u +'%Y%m%d')
|
||||||
|
SOURCE_DATE_EPOCH=$(date -u -d ${DEBIAN_ARCHIVE_DATE} +"%s")
|
||||||
|
TAG=${DEBIAN_ARCHIVE_DATE}-$(git describe --long --first-parent | tail -c +2)
|
||||||
|
FULL_IMAGE_NAME=${{ inputs.registry }}/${{ inputs.image_name }}:${TAG}
|
||||||
|
|
||||||
|
echo "debian_archive_date=${DEBIAN_ARCHIVE_DATE}" >> $GITHUB_OUTPUT
|
||||||
|
echo "source_date_epoch=${SOURCE_DATE_EPOCH}" >> $GITHUB_OUTPUT
|
||||||
|
echo "tag=${DEBIAN_ARCHIVE_DATE}-${TAG}" >> $GITHUB_OUTPUT
|
||||||
|
echo "full_image_name=${FULL_IMAGE_NAME}" >> $GITHUB_OUTPUT
|
||||||
|
echo "buildkit_image=${BUILDKIT_IMAGE}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
build:
|
||||||
|
name: Build ${{ matrix.platform.name }} image
|
||||||
|
runs-on: ${{ matrix.platform.runs-on }}
|
||||||
|
needs:
|
||||||
|
- prepare
|
||||||
|
outputs:
|
||||||
|
debian_archive_date: ${{ needs.prepare.outputs.debian_archive_date }}
|
||||||
|
source_date_epoch: ${{ needs.prepare.outputs.source_date_epoch }}
|
||||||
|
image: ${{ needs.prepare.outputs.image }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
platform:
|
||||||
|
- runs-on: "ubuntu-24.04"
|
||||||
|
name: "linux/amd64"
|
||||||
|
- runs-on: "ubuntu-24.04-arm"
|
||||||
|
name: "linux/arm64"
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Prepare
|
||||||
|
run: |
|
||||||
|
platform=${{ matrix.platform.name }}
|
||||||
|
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Login to GHCR
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ inputs.registry_user }}
|
||||||
|
password: ${{ secrets.registry_token }}
|
||||||
|
|
||||||
|
# Instructions for reproducibly building a container image are taken from:
|
||||||
|
# https://github.com/freedomofpress/repro-build?tab=readme-ov-file#build-and-push-a-container-image-on-github-actions
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
driver-opts: image=${{ needs.prepare.outputs.buildkit_image }}
|
||||||
|
|
||||||
|
- name: Build and push by digest
|
||||||
|
id: build
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
context: ./dangerzone/
|
||||||
|
file: Dockerfile
|
||||||
|
build-args: |
|
||||||
|
DEBIAN_ARCHIVE_DATE=${{ needs.prepare.outputs.debian_archive_date }}
|
||||||
|
SOURCE_DATE_EPOCH=${{ needs.prepare.outputs.source_date_epoch }}
|
||||||
|
provenance: false
|
||||||
|
outputs: type=image,"name=${{ inputs.registry }}/${{ inputs.image_name }}",push-by-digest=true,push=true,rewrite-timestamp=true,name-canonical=true
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
|
|
||||||
|
- name: Export digest
|
||||||
|
run: |
|
||||||
|
mkdir -p ${{ runner.temp }}/digests
|
||||||
|
digest="${{ steps.build.outputs.digest }}"
|
||||||
|
touch "${{ runner.temp }}/digests/${digest#sha256:}"
|
||||||
|
echo "Image digest is: ${digest}"
|
||||||
|
|
||||||
|
- name: Upload digest
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: digests-${{ env.PLATFORM_PAIR }}
|
||||||
|
path: ${{ runner.temp }}/digests/*
|
||||||
|
if-no-files-found: error
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
|
merge:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs:
|
||||||
|
- build
|
||||||
|
outputs:
|
||||||
|
debian_archive_date: ${{ needs.build.outputs.debian_archive_date }}
|
||||||
|
source_date_epoch: ${{ needs.build.outputs.source_date_epoch }}
|
||||||
|
image: ${{ needs.build.outputs.image }}
|
||||||
|
digest_root: ${{ steps.image.outputs.digest_root }}
|
||||||
|
digest_amd64: ${{ steps.image.outputs.digest_amd64 }}
|
||||||
|
digest_arm64: ${{ steps.image.outputs.digest_arm64 }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Download digests
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
path: ${{ runner.temp }}/digests
|
||||||
|
pattern: digests-*
|
||||||
|
merge-multiple: true
|
||||||
|
|
||||||
|
- name: Login to GHCR
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ inputs.registry_user }}
|
||||||
|
password: ${{ secrets.registry_token }}
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
||||||
|
|
||||||
|
- name: Create manifest list and push
|
||||||
|
working-directory: ${{ runner.temp }}/digests
|
||||||
|
run: |
|
||||||
|
DIGESTS=$(printf '${{ needs.build.outputs.image }}@sha256:%s ' *)
|
||||||
|
docker buildx imagetools create -t ${{ needs.build.outputs.image }} ${DIGESTS}
|
||||||
|
|
||||||
|
- name: Inspect image
|
||||||
|
id: image
|
||||||
|
run: |
|
||||||
|
# Inspect the image
|
||||||
|
docker buildx imagetools inspect ${{ needs.build.outputs.image }}
|
||||||
|
docker buildx imagetools inspect ${{ needs.build.outputs.image }} --format "{{json .Manifest}}" > manifest
|
||||||
|
|
||||||
|
# Calculate and print the digests
|
||||||
|
digest_root=$(jq -r .digest manifest)
|
||||||
|
digest_amd64=$(jq -r '.manifests[] | select(.platform.architecture=="amd64") | .digest' manifest)
|
||||||
|
digest_arm64=$(jq -r '.manifests[] | select(.platform.architecture=="arm64") | .digest' manifest)
|
||||||
|
|
||||||
|
echo "The image digests are:"
|
||||||
|
echo " Root: $digest_root"
|
||||||
|
echo " linux/amd64: $digest_amd64"
|
||||||
|
echo " linux/arm64: $digest_arm64"
|
||||||
|
|
||||||
|
# NOTE: Set the digests as an output because the `env` context is not
|
||||||
|
# available to the inputs of a reusable workflow call.
|
||||||
|
echo "digest_root=$digest_root" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "digest_amd64=$digest_amd64" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "digest_arm64=$digest_arm64" >> "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
|
# This step calls the container workflow to generate provenance and push it to
|
||||||
|
# the container registry.
|
||||||
|
provenance:
|
||||||
|
needs:
|
||||||
|
- merge
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
manifest_type:
|
||||||
|
- root
|
||||||
|
- amd64
|
||||||
|
- arm64
|
||||||
|
permissions:
|
||||||
|
actions: read # for detecting the Github Actions environment.
|
||||||
|
id-token: write # for creating OIDC tokens for signing.
|
||||||
|
packages: write # for uploading attestations.
|
||||||
|
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.0.0
|
||||||
|
with:
|
||||||
|
digest: ${{ needs.merge.outputs[format('digest_{0}', matrix.manifest_type)] }}
|
||||||
|
image: ${{ needs.merge.outputs.image }}
|
||||||
|
registry-username: ${{ inputs.registry_user }}
|
||||||
|
secrets:
|
||||||
|
registry-password: ${{ secrets.registry_token }}
|
||||||
|
|
||||||
|
# This step ensures that the image is reproducible
|
||||||
|
check-reproducibility:
|
||||||
|
if: ${{ inputs.reproduce }}
|
||||||
|
needs:
|
||||||
|
- merge
|
||||||
|
runs-on: ${{ matrix.platform.runs-on }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
platform:
|
||||||
|
- runs-on: "ubuntu-24.04"
|
||||||
|
name: "amd64"
|
||||||
|
- runs-on: "ubuntu-24.04-arm"
|
||||||
|
name: "arm64"
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Reproduce the same container image
|
||||||
|
run: |
|
||||||
|
./dev_scripts/reproduce-image.py \
|
||||||
|
--runtime \
|
||||||
|
docker \
|
||||||
|
--debian-archive-date \
|
||||||
|
${{ needs.merge.outputs.debian_archive_date }} \
|
||||||
|
--platform \
|
||||||
|
linux/${{ matrix.platform.name }} \
|
||||||
|
${{ needs.merge.outputs[format('digest_{0}', matrix.platform.name)] }}
|
17
.github/workflows/build.yml
vendored
17
.github/workflows/build.yml
vendored
|
@ -39,6 +39,8 @@ jobs:
|
||||||
version: "24.04"
|
version: "24.04"
|
||||||
- distro: ubuntu
|
- distro: ubuntu
|
||||||
version: "24.10"
|
version: "24.10"
|
||||||
|
- distro: ubuntu
|
||||||
|
version: "25.04"
|
||||||
- distro: debian
|
- distro: debian
|
||||||
version: bullseye
|
version: bullseye
|
||||||
- distro: debian
|
- distro: debian
|
||||||
|
@ -49,6 +51,8 @@ jobs:
|
||||||
version: "40"
|
version: "40"
|
||||||
- distro: fedora
|
- distro: fedora
|
||||||
version: "41"
|
version: "41"
|
||||||
|
- distro: fedora
|
||||||
|
version: "42"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
|
@ -83,19 +87,12 @@ jobs:
|
||||||
id: cache-container-image
|
id: cache-container-image
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
key: v4-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
|
key: v5-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
|
||||||
path: |
|
path: |
|
||||||
share/container.tar.gz
|
share/container.tar
|
||||||
share/image-id.txt
|
share/image-id.txt
|
||||||
|
|
||||||
- name: Build and push Dangerzone image
|
- name: Build Dangerzone image
|
||||||
if: ${{ steps.cache-container-image.outputs.cache-hit != 'true' }}
|
if: ${{ steps.cache-container-image.outputs.cache-hit != 'true' }}
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get install -y python3-poetry
|
|
||||||
python3 ./install/common/build-image.py
|
python3 ./install/common/build-image.py
|
||||||
echo ${{ github.token }} | podman login ghcr.io -u USERNAME --password-stdin
|
|
||||||
gunzip -c share/container.tar.gz | podman load
|
|
||||||
tag=$(cat share/image-id.txt)
|
|
||||||
podman push \
|
|
||||||
dangerzone.rocks/dangerzone:$tag \
|
|
||||||
${{ env.IMAGE_REGISTRY }}/dangerzone/dangerzone:tag
|
|
||||||
|
|
57
.github/workflows/ci.yml
vendored
57
.github/workflows/ci.yml
vendored
|
@ -59,9 +59,9 @@ jobs:
|
||||||
id: cache-container-image
|
id: cache-container-image
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
key: v4-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
|
key: v5-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
|
||||||
path: |-
|
path: |-
|
||||||
share/container.tar.gz
|
share/container.tar
|
||||||
share/image-id.txt
|
share/image-id.txt
|
||||||
|
|
||||||
- name: Build Dangerzone container image
|
- name: Build Dangerzone container image
|
||||||
|
@ -72,8 +72,8 @@ jobs:
|
||||||
- name: Upload container image
|
- name: Upload container image
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: container.tar.gz
|
name: container.tar
|
||||||
path: share/container.tar.gz
|
path: share/container.tar
|
||||||
|
|
||||||
download-tessdata:
|
download-tessdata:
|
||||||
name: Download and cache Tesseract data
|
name: Download and cache Tesseract data
|
||||||
|
@ -192,6 +192,8 @@ jobs:
|
||||||
version: "24.04"
|
version: "24.04"
|
||||||
- distro: ubuntu
|
- distro: ubuntu
|
||||||
version: "24.10"
|
version: "24.10"
|
||||||
|
- distro: ubuntu
|
||||||
|
version: "25.04"
|
||||||
- distro: debian
|
- distro: debian
|
||||||
version: bullseye
|
version: bullseye
|
||||||
- distro: debian
|
- distro: debian
|
||||||
|
@ -224,9 +226,9 @@ jobs:
|
||||||
- name: Restore container cache
|
- name: Restore container cache
|
||||||
uses: actions/cache/restore@v4
|
uses: actions/cache/restore@v4
|
||||||
with:
|
with:
|
||||||
key: v4-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
|
key: v5-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
|
||||||
path: |-
|
path: |-
|
||||||
share/container.tar.gz
|
share/container.tar
|
||||||
share/image-id.txt
|
share/image-id.txt
|
||||||
fail-on-cache-miss: true
|
fail-on-cache-miss: true
|
||||||
|
|
||||||
|
@ -259,6 +261,8 @@ jobs:
|
||||||
version: "24.04"
|
version: "24.04"
|
||||||
- distro: ubuntu
|
- distro: ubuntu
|
||||||
version: "24.10"
|
version: "24.10"
|
||||||
|
- distro: ubuntu
|
||||||
|
version: "25.04"
|
||||||
- distro: debian
|
- distro: debian
|
||||||
version: bullseye
|
version: bullseye
|
||||||
- distro: debian
|
- distro: debian
|
||||||
|
@ -306,7 +310,7 @@ jobs:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
distro: ["fedora"]
|
distro: ["fedora"]
|
||||||
version: ["40", "41"]
|
version: ["40", "41", "42"]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
@ -329,9 +333,9 @@ jobs:
|
||||||
- name: Restore container image
|
- name: Restore container image
|
||||||
uses: actions/cache/restore@v4
|
uses: actions/cache/restore@v4
|
||||||
with:
|
with:
|
||||||
key: v4-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
|
key: v5-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
|
||||||
path: |-
|
path: |-
|
||||||
share/container.tar.gz
|
share/container.tar
|
||||||
share/image-id.txt
|
share/image-id.txt
|
||||||
fail-on-cache-miss: true
|
fail-on-cache-miss: true
|
||||||
|
|
||||||
|
@ -385,6 +389,8 @@ jobs:
|
||||||
version: "24.04"
|
version: "24.04"
|
||||||
- distro: ubuntu
|
- distro: ubuntu
|
||||||
version: "24.10"
|
version: "24.10"
|
||||||
|
- distro: ubuntu
|
||||||
|
version: "25.04"
|
||||||
- distro: debian
|
- distro: debian
|
||||||
version: bullseye
|
version: bullseye
|
||||||
- distro: debian
|
- distro: debian
|
||||||
|
@ -395,6 +401,8 @@ jobs:
|
||||||
version: "40"
|
version: "40"
|
||||||
- distro: fedora
|
- distro: fedora
|
||||||
version: "41"
|
version: "41"
|
||||||
|
- distro: fedora
|
||||||
|
version: "42"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
|
@ -422,9 +430,9 @@ jobs:
|
||||||
- name: Restore container image
|
- name: Restore container image
|
||||||
uses: actions/cache/restore@v4
|
uses: actions/cache/restore@v4
|
||||||
with:
|
with:
|
||||||
key: v4-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
|
key: v5-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
|
||||||
path: |-
|
path: |-
|
||||||
share/container.tar.gz
|
share/container.tar
|
||||||
share/image-id.txt
|
share/image-id.txt
|
||||||
fail-on-cache-miss: true
|
fail-on-cache-miss: true
|
||||||
|
|
||||||
|
@ -465,30 +473,3 @@ jobs:
|
||||||
# file successfully.
|
# file successfully.
|
||||||
xvfb-run -s '-ac' ./dev_scripts/env.py --distro ${{ matrix.distro }} --version ${{ matrix.version }} run --dev \
|
xvfb-run -s '-ac' ./dev_scripts/env.py --distro ${{ matrix.distro }} --version ${{ matrix.version }} run --dev \
|
||||||
bash -c 'cd dangerzone; poetry run make test'
|
bash -c 'cd dangerzone; poetry run make test'
|
||||||
|
|
||||||
check-reproducibility:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Install dev. dependencies
|
|
||||||
run: |-
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y git python3-poetry --no-install-recommends
|
|
||||||
poetry install --only package
|
|
||||||
|
|
||||||
- name: Verify that the Dockerfile matches the commited template and params
|
|
||||||
run: |-
|
|
||||||
cp Dockerfile Dockerfile.orig
|
|
||||||
make Dockerfile
|
|
||||||
diff Dockerfile.orig Dockerfile
|
|
||||||
|
|
||||||
- name: Build Dangerzone container image
|
|
||||||
run: |
|
|
||||||
python3 ./install/common/build-image.py --no-save
|
|
||||||
|
|
||||||
- name: Reproduce the same container image
|
|
||||||
run: |
|
|
||||||
./dev_scripts/reproduce-image.py
|
|
||||||
|
|
22
.github/workflows/release-container-image.yml
vendored
Normal file
22
.github/workflows/release-container-image.yml
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
name: Release multi-arch container image
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- "test/**"
|
||||||
|
schedule:
|
||||||
|
- cron: "0 0 * * *" # Run every day at 00:00 UTC.
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-push-image:
|
||||||
|
uses: ./.github/workflows/build-push-image.yml
|
||||||
|
with:
|
||||||
|
registry: ghcr.io/${{ github.repository_owner }}
|
||||||
|
registry_user: ${{ github.actor }}
|
||||||
|
image_name: dangerzone/dangerzone
|
||||||
|
reproduce: true
|
||||||
|
secrets:
|
||||||
|
registry_token: ${{ secrets.GITHUB_TOKEN }}
|
17
.github/workflows/scan.yml
vendored
17
.github/workflows/scan.yml
vendored
|
@ -21,19 +21,12 @@ jobs:
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Install container build dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt install pipx
|
|
||||||
pipx install poetry
|
|
||||||
pipx inject poetry poetry-plugin-export
|
|
||||||
poetry install --only package
|
|
||||||
- name: Bump date of Debian snapshot archive
|
|
||||||
run: |
|
|
||||||
date=$(date "+%Y%m%d")
|
|
||||||
sed -i "s/DEBIAN_ARCHIVE_DATE=[0-9]\+/DEBIAN_ARCHIVE_DATE=${date}/" Dockerfile.env
|
|
||||||
make Dockerfile
|
|
||||||
- name: Build container image
|
- name: Build container image
|
||||||
run: python3 ./install/common/build-image.py --runtime docker --no-save
|
run: |
|
||||||
|
python3 ./install/common/build-image.py \
|
||||||
|
--debian-archive-date $(date "+%Y%m%d") \
|
||||||
|
--runtime docker
|
||||||
|
docker load -i share/container.tar
|
||||||
- name: Get image tag
|
- name: Get image tag
|
||||||
id: tag
|
id: tag
|
||||||
run: echo "tag=$(cat share/image-id.txt)" >> $GITHUB_OUTPUT
|
run: echo "tag=$(cat share/image-id.txt)" >> $GITHUB_OUTPUT
|
||||||
|
|
22
BUILD.md
22
BUILD.md
|
@ -109,28 +109,6 @@ sudo dnf install -y rpm-build podman python3 python3-devel python3-poetry-core \
|
||||||
pipx qt6-qtbase-gui
|
pipx qt6-qtbase-gui
|
||||||
```
|
```
|
||||||
|
|
||||||
<table>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<details>
|
|
||||||
<summary><i>:memo: Expand this section if you are on Fedora 41.</i></summary>
|
|
||||||
</br>
|
|
||||||
|
|
||||||
The default Python version that ships with Fedora 41 (3.13) is not
|
|
||||||
compatible with PySide6, which requires Python 3.12 or earlier.
|
|
||||||
|
|
||||||
You can install Python 3.12 using the `python3.12` package.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo dnf install -y python3.12
|
|
||||||
```
|
|
||||||
|
|
||||||
Poetry will automatically pick up the correct version when running.
|
|
||||||
</details>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
Install Poetry using `pipx`:
|
Install Poetry using `pipx`:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
|
|
|
@ -8,6 +8,8 @@ since 0.4.1, and this project adheres to [Semantic Versioning](https://semver.or
|
||||||
## [Unreleased](https://github.com/freedomofpress/dangerzone/compare/v0.8.1...HEAD)
|
## [Unreleased](https://github.com/freedomofpress/dangerzone/compare/v0.8.1...HEAD)
|
||||||
|
|
||||||
- Platform support: Drop support for Ubuntu Focal, since it's nearing end-of-life ([#1018](https://github.com/freedomofpress/dangerzone/issues/1018))
|
- Platform support: Drop support for Ubuntu Focal, since it's nearing end-of-life ([#1018](https://github.com/freedomofpress/dangerzone/issues/1018))
|
||||||
|
- Platform support: Add support for Fedora 42 ([#1091](https://github.com/freedomofpress/dangerzone/issues/1091))
|
||||||
|
- Platform support: Add support for Ubuntu 25.04 (Plucky Puffin)([#1090](https://github.com/freedomofpress/dangerzone/issues/1090))
|
||||||
|
|
||||||
## [0.8.1](https://github.com/freedomofpress/dangerzone/compare/v0.8.1...0.8.0)
|
## [0.8.1](https://github.com/freedomofpress/dangerzone/compare/v0.8.1...0.8.0)
|
||||||
|
|
||||||
|
|
79
Dockerfile
79
Dockerfile
|
@ -2,14 +2,14 @@
|
||||||
# Dockerfile args below. For more info about this file, read
|
# Dockerfile args below. For more info about this file, read
|
||||||
# docs/developer/reproducibility.md.
|
# docs/developer/reproducibility.md.
|
||||||
|
|
||||||
ARG DEBIAN_IMAGE_DATE=20250113
|
ARG DEBIAN_IMAGE_DATE=20250224
|
||||||
|
|
||||||
FROM debian:bookworm-${DEBIAN_IMAGE_DATE}-slim as dangerzone-image
|
FROM debian:bookworm-${DEBIAN_IMAGE_DATE}-slim AS dangerzone-image
|
||||||
|
|
||||||
ARG GVISOR_ARCHIVE_DATE=20250120
|
ARG GVISOR_ARCHIVE_DATE=20250217
|
||||||
ARG DEBIAN_ARCHIVE_DATE=20250127
|
ARG DEBIAN_ARCHIVE_DATE=20250226
|
||||||
ARG H2ORESTART_CHECKSUM=7760dc2963332c50d15eee285933ec4b48d6a1de9e0c0f6082946f93090bd132
|
ARG H2ORESTART_CHECKSUM=452331f8603ef456264bd72db6fa8a11ca72b392019a8135c0b2f3095037d7b1
|
||||||
ARG H2ORESTART_VERSION=v0.7.0
|
ARG H2ORESTART_VERSION=v0.7.1
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
@ -22,8 +22,8 @@ RUN \
|
||||||
--mount=type=bind,source=./container_helpers/repro-sources-list.sh,target=/usr/local/bin/repro-sources-list.sh \
|
--mount=type=bind,source=./container_helpers/repro-sources-list.sh,target=/usr/local/bin/repro-sources-list.sh \
|
||||||
--mount=type=bind,source=./container_helpers/gvisor.key,target=/tmp/gvisor.key \
|
--mount=type=bind,source=./container_helpers/gvisor.key,target=/tmp/gvisor.key \
|
||||||
: "Hacky way to set a date for the Debian snapshot repos" && \
|
: "Hacky way to set a date for the Debian snapshot repos" && \
|
||||||
touch -d ${DEBIAN_ARCHIVE_DATE} /etc/apt/sources.list.d/debian.sources && \
|
touch -d ${DEBIAN_ARCHIVE_DATE}Z /etc/apt/sources.list.d/debian.sources && \
|
||||||
touch -d ${DEBIAN_ARCHIVE_DATE} /etc/apt/sources.list && \
|
touch -d ${DEBIAN_ARCHIVE_DATE}Z /etc/apt/sources.list && \
|
||||||
repro-sources-list.sh && \
|
repro-sources-list.sh && \
|
||||||
: "Setup APT to install gVisor from its separate APT repo" && \
|
: "Setup APT to install gVisor from its separate APT repo" && \
|
||||||
apt-get update && \
|
apt-get update && \
|
||||||
|
@ -52,9 +52,13 @@ RUN mkdir /opt/libreoffice_ext && cd /opt/libreoffice_ext \
|
||||||
&& rm /root/.wget-hsts
|
&& rm /root/.wget-hsts
|
||||||
|
|
||||||
# Create an unprivileged user both for gVisor and for running Dangerzone.
|
# Create an unprivileged user both for gVisor and for running Dangerzone.
|
||||||
|
# XXX: Make the shadow field "date of last password change" a constant
|
||||||
|
# number.
|
||||||
RUN addgroup --gid 1000 dangerzone
|
RUN addgroup --gid 1000 dangerzone
|
||||||
RUN adduser --uid 1000 --ingroup dangerzone --shell /bin/true \
|
RUN adduser --uid 1000 --ingroup dangerzone --shell /bin/true \
|
||||||
--disabled-password --home /home/dangerzone dangerzone
|
--disabled-password --home /home/dangerzone dangerzone \
|
||||||
|
&& chage -d 99999 dangerzone \
|
||||||
|
&& rm /etc/shadow-
|
||||||
|
|
||||||
# Copy Dangerzone's conversion logic under /opt/dangerzone, and allow Python to
|
# Copy Dangerzone's conversion logic under /opt/dangerzone, and allow Python to
|
||||||
# import it.
|
# import it.
|
||||||
|
@ -165,20 +169,50 @@ RUN mkdir /home/dangerzone/.containers
|
||||||
# The `ln` binary, even if you specify it by its full path, cannot run
|
# The `ln` binary, even if you specify it by its full path, cannot run
|
||||||
# (probably because `ld-linux.so` can't be found). For this reason, we have
|
# (probably because `ld-linux.so` can't be found). For this reason, we have
|
||||||
# to create the symlinks beforehand, in a previous build stage. Then, in an
|
# to create the symlinks beforehand, in a previous build stage. Then, in an
|
||||||
# empty contianer image (scratch images), we can copy these symlinks and the
|
# empty container image (scratch images), we can copy these symlinks and the
|
||||||
# /usr, and stich everything together.
|
# /usr, and stitch everything together.
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
# Create the filesystem hierarchy that will be used to symlink /usr.
|
# Create the filesystem hierarchy that will be used to symlink /usr.
|
||||||
|
|
||||||
RUN mkdir /new_root
|
RUN mkdir -p \
|
||||||
RUN mkdir /new_root/root /new_root/run /new_root/tmp
|
/new_root \
|
||||||
RUN chmod 777 /new_root/tmp
|
/new_root/root \
|
||||||
|
/new_root/run \
|
||||||
|
/new_root/tmp \
|
||||||
|
/new_root/home/dangerzone/dangerzone-image/rootfs
|
||||||
|
|
||||||
|
# Copy the /etc and /var directories under the new root directory. Also,
|
||||||
|
# copy /etc/, /opt, and /usr to the Dangerzone image rootfs.
|
||||||
|
#
|
||||||
|
# NOTE: We also have to remove the resolv.conf file, in order to not leak any DNS
|
||||||
|
# servers added there during image build time.
|
||||||
|
RUN cp -r /etc /var /new_root/ \
|
||||||
|
&& rm /new_root/etc/resolv.conf
|
||||||
|
RUN cp -r /etc /opt /usr /new_root/home/dangerzone/dangerzone-image/rootfs \
|
||||||
|
&& rm /new_root/home/dangerzone/dangerzone-image/rootfs/etc/resolv.conf
|
||||||
|
|
||||||
RUN ln -s /home/dangerzone/dangerzone-image/rootfs/usr /new_root/usr
|
RUN ln -s /home/dangerzone/dangerzone-image/rootfs/usr /new_root/usr
|
||||||
RUN ln -s usr/bin /new_root/bin
|
RUN ln -s usr/bin /new_root/bin
|
||||||
RUN ln -s usr/lib /new_root/lib
|
RUN ln -s usr/lib /new_root/lib
|
||||||
RUN ln -s usr/lib64 /new_root/lib64
|
RUN ln -s usr/lib64 /new_root/lib64
|
||||||
RUN ln -s usr/sbin /new_root/sbin
|
RUN ln -s usr/sbin /new_root/sbin
|
||||||
|
RUN ln -s usr/bin /new_root/home/dangerzone/dangerzone-image/rootfs/bin
|
||||||
|
RUN ln -s usr/lib /new_root/home/dangerzone/dangerzone-image/rootfs/lib
|
||||||
|
RUN ln -s usr/lib64 /new_root/home/dangerzone/dangerzone-image/rootfs/lib64
|
||||||
|
|
||||||
|
# Fix permissions in /home/dangerzone, so that our entrypoint script can make
|
||||||
|
# changes in the following folders.
|
||||||
|
RUN chown dangerzone:dangerzone \
|
||||||
|
/new_root/home/dangerzone \
|
||||||
|
/new_root/home/dangerzone/dangerzone-image/
|
||||||
|
# Fix permissions in /tmp, so that it can be used by unprivileged users.
|
||||||
|
RUN chmod 777 /new_root/tmp
|
||||||
|
|
||||||
|
COPY container_helpers/entrypoint.py /new_root
|
||||||
|
# HACK: For reasons that we are not sure yet, we need to explicitly specify the
|
||||||
|
# modification time of this file.
|
||||||
|
RUN touch -d ${DEBIAN_ARCHIVE_DATE}Z /new_root/entrypoint.py
|
||||||
|
|
||||||
## Final image
|
## Final image
|
||||||
|
|
||||||
|
@ -188,24 +222,7 @@ FROM scratch
|
||||||
# /usr can be a symlink.
|
# /usr can be a symlink.
|
||||||
COPY --from=dangerzone-image /new_root/ /
|
COPY --from=dangerzone-image /new_root/ /
|
||||||
|
|
||||||
# Copy the bare minimum to run Dangerzone in the inner container image.
|
|
||||||
COPY --from=dangerzone-image /etc/ /home/dangerzone/dangerzone-image/rootfs/etc/
|
|
||||||
COPY --from=dangerzone-image /opt/ /home/dangerzone/dangerzone-image/rootfs/opt/
|
|
||||||
COPY --from=dangerzone-image /usr/ /home/dangerzone/dangerzone-image/rootfs/usr/
|
|
||||||
RUN ln -s usr/bin /home/dangerzone/dangerzone-image/rootfs/bin
|
|
||||||
RUN ln -s usr/lib /home/dangerzone/dangerzone-image/rootfs/lib
|
|
||||||
RUN ln -s usr/lib64 /home/dangerzone/dangerzone-image/rootfs/lib64
|
|
||||||
|
|
||||||
# Copy the bare minimum to let the security scanner find vulnerabilities.
|
|
||||||
COPY --from=dangerzone-image /etc/ /etc/
|
|
||||||
COPY --from=dangerzone-image /var/ /var/
|
|
||||||
|
|
||||||
# Allow our entrypoint script to make changes in the following folders.
|
|
||||||
RUN chown dangerzone:dangerzone /home/dangerzone /home/dangerzone/dangerzone-image/
|
|
||||||
|
|
||||||
# Switch to the dangerzone user for the rest of the script.
|
# Switch to the dangerzone user for the rest of the script.
|
||||||
USER dangerzone
|
USER dangerzone
|
||||||
|
|
||||||
COPY container_helpers/entrypoint.py /
|
|
||||||
|
|
||||||
ENTRYPOINT ["/entrypoint.py"]
|
ENTRYPOINT ["/entrypoint.py"]
|
||||||
|
|
|
@ -1,9 +1,13 @@
|
||||||
# Can be bumped to the latest date in https://hub.docker.com/_/debian/tags?name=bookworm-
|
# Can be bumped to the latest date in https://hub.docker.com/_/debian/tags?name=bookworm-
|
||||||
DEBIAN_IMAGE_DATE=20250113
|
DEBIAN_IMAGE_DATE=20250224
|
||||||
# Can be bumped to today's date
|
# Can be bumped to today's date
|
||||||
DEBIAN_ARCHIVE_DATE=20250127
|
DEBIAN_ARCHIVE_DATE=20250226
|
||||||
# Can be bumped to the latest date in https://github.com/google/gvisor/tags
|
# Can be bumped to the latest date in https://github.com/google/gvisor/tags
|
||||||
GVISOR_ARCHIVE_DATE=20250120
|
GVISOR_ARCHIVE_DATE=20250217
|
||||||
# Can be bumped to the latest version and checksum from https://github.com/ebandal/H2Orestart/releases
|
# Can be bumped to the latest version and checksum from https://github.com/ebandal/H2Orestart/releases
|
||||||
H2ORESTART_CHECKSUM=7760dc2963332c50d15eee285933ec4b48d6a1de9e0c0f6082946f93090bd132
|
H2ORESTART_CHECKSUM=452331f8603ef456264bd72db6fa8a11ca72b392019a8135c0b2f3095037d7b1
|
||||||
H2ORESTART_VERSION=v0.7.0
|
H2ORESTART_VERSION=v0.7.1
|
||||||
|
|
||||||
|
# Buildkit image (taken from freedomofpress/repro-build)
|
||||||
|
BUILDKIT_IMAGE="docker.io/moby/buildkit:v19.0@sha256:14aa1b4dd92ea0a4cd03a54d0c6079046ea98cd0c0ae6176bdd7036ba370cbbe"
|
||||||
|
BUILDKIT_IMAGE_ROOTLESS="docker.io/moby/buildkit:v0.19.0-rootless@sha256:e901cffdad753892a7c3afb8b9972549fca02c73888cf340c91ed801fdd96d71"
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
|
|
||||||
ARG DEBIAN_IMAGE_DATE={{DEBIAN_IMAGE_DATE}}
|
ARG DEBIAN_IMAGE_DATE={{DEBIAN_IMAGE_DATE}}
|
||||||
|
|
||||||
FROM debian:bookworm-${DEBIAN_IMAGE_DATE}-slim as dangerzone-image
|
FROM debian:bookworm-${DEBIAN_IMAGE_DATE}-slim AS dangerzone-image
|
||||||
|
|
||||||
ARG GVISOR_ARCHIVE_DATE={{GVISOR_ARCHIVE_DATE}}
|
ARG GVISOR_ARCHIVE_DATE={{GVISOR_ARCHIVE_DATE}}
|
||||||
ARG DEBIAN_ARCHIVE_DATE={{DEBIAN_ARCHIVE_DATE}}
|
ARG DEBIAN_ARCHIVE_DATE={{DEBIAN_ARCHIVE_DATE}}
|
||||||
|
@ -22,8 +22,8 @@ RUN \
|
||||||
--mount=type=bind,source=./container_helpers/repro-sources-list.sh,target=/usr/local/bin/repro-sources-list.sh \
|
--mount=type=bind,source=./container_helpers/repro-sources-list.sh,target=/usr/local/bin/repro-sources-list.sh \
|
||||||
--mount=type=bind,source=./container_helpers/gvisor.key,target=/tmp/gvisor.key \
|
--mount=type=bind,source=./container_helpers/gvisor.key,target=/tmp/gvisor.key \
|
||||||
: "Hacky way to set a date for the Debian snapshot repos" && \
|
: "Hacky way to set a date for the Debian snapshot repos" && \
|
||||||
touch -d ${DEBIAN_ARCHIVE_DATE} /etc/apt/sources.list.d/debian.sources && \
|
touch -d ${DEBIAN_ARCHIVE_DATE}Z /etc/apt/sources.list.d/debian.sources && \
|
||||||
touch -d ${DEBIAN_ARCHIVE_DATE} /etc/apt/sources.list && \
|
touch -d ${DEBIAN_ARCHIVE_DATE}Z /etc/apt/sources.list && \
|
||||||
repro-sources-list.sh && \
|
repro-sources-list.sh && \
|
||||||
: "Setup APT to install gVisor from its separate APT repo" && \
|
: "Setup APT to install gVisor from its separate APT repo" && \
|
||||||
apt-get update && \
|
apt-get update && \
|
||||||
|
@ -52,9 +52,13 @@ RUN mkdir /opt/libreoffice_ext && cd /opt/libreoffice_ext \
|
||||||
&& rm /root/.wget-hsts
|
&& rm /root/.wget-hsts
|
||||||
|
|
||||||
# Create an unprivileged user both for gVisor and for running Dangerzone.
|
# Create an unprivileged user both for gVisor and for running Dangerzone.
|
||||||
|
# XXX: Make the shadow field "date of last password change" a constant
|
||||||
|
# number.
|
||||||
RUN addgroup --gid 1000 dangerzone
|
RUN addgroup --gid 1000 dangerzone
|
||||||
RUN adduser --uid 1000 --ingroup dangerzone --shell /bin/true \
|
RUN adduser --uid 1000 --ingroup dangerzone --shell /bin/true \
|
||||||
--disabled-password --home /home/dangerzone dangerzone
|
--disabled-password --home /home/dangerzone dangerzone \
|
||||||
|
&& chage -d 99999 dangerzone \
|
||||||
|
&& rm /etc/shadow-
|
||||||
|
|
||||||
# Copy Dangerzone's conversion logic under /opt/dangerzone, and allow Python to
|
# Copy Dangerzone's conversion logic under /opt/dangerzone, and allow Python to
|
||||||
# import it.
|
# import it.
|
||||||
|
@ -165,20 +169,50 @@ RUN mkdir /home/dangerzone/.containers
|
||||||
# The `ln` binary, even if you specify it by its full path, cannot run
|
# The `ln` binary, even if you specify it by its full path, cannot run
|
||||||
# (probably because `ld-linux.so` can't be found). For this reason, we have
|
# (probably because `ld-linux.so` can't be found). For this reason, we have
|
||||||
# to create the symlinks beforehand, in a previous build stage. Then, in an
|
# to create the symlinks beforehand, in a previous build stage. Then, in an
|
||||||
# empty contianer image (scratch images), we can copy these symlinks and the
|
# empty container image (scratch images), we can copy these symlinks and the
|
||||||
# /usr, and stich everything together.
|
# /usr, and stitch everything together.
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
# Create the filesystem hierarchy that will be used to symlink /usr.
|
# Create the filesystem hierarchy that will be used to symlink /usr.
|
||||||
|
|
||||||
RUN mkdir /new_root
|
RUN mkdir -p \
|
||||||
RUN mkdir /new_root/root /new_root/run /new_root/tmp
|
/new_root \
|
||||||
RUN chmod 777 /new_root/tmp
|
/new_root/root \
|
||||||
|
/new_root/run \
|
||||||
|
/new_root/tmp \
|
||||||
|
/new_root/home/dangerzone/dangerzone-image/rootfs
|
||||||
|
|
||||||
|
# Copy the /etc and /var directories under the new root directory. Also,
|
||||||
|
# copy /etc/, /opt, and /usr to the Dangerzone image rootfs.
|
||||||
|
#
|
||||||
|
# NOTE: We also have to remove the resolv.conf file, in order to not leak any
|
||||||
|
# DNS servers added there during image build time.
|
||||||
|
RUN cp -r /etc /var /new_root/ \
|
||||||
|
&& rm /new_root/etc/resolv.conf
|
||||||
|
RUN cp -r /etc /opt /usr /new_root/home/dangerzone/dangerzone-image/rootfs \
|
||||||
|
&& rm /new_root/home/dangerzone/dangerzone-image/rootfs/etc/resolv.conf
|
||||||
|
|
||||||
RUN ln -s /home/dangerzone/dangerzone-image/rootfs/usr /new_root/usr
|
RUN ln -s /home/dangerzone/dangerzone-image/rootfs/usr /new_root/usr
|
||||||
RUN ln -s usr/bin /new_root/bin
|
RUN ln -s usr/bin /new_root/bin
|
||||||
RUN ln -s usr/lib /new_root/lib
|
RUN ln -s usr/lib /new_root/lib
|
||||||
RUN ln -s usr/lib64 /new_root/lib64
|
RUN ln -s usr/lib64 /new_root/lib64
|
||||||
RUN ln -s usr/sbin /new_root/sbin
|
RUN ln -s usr/sbin /new_root/sbin
|
||||||
|
RUN ln -s usr/bin /new_root/home/dangerzone/dangerzone-image/rootfs/bin
|
||||||
|
RUN ln -s usr/lib /new_root/home/dangerzone/dangerzone-image/rootfs/lib
|
||||||
|
RUN ln -s usr/lib64 /new_root/home/dangerzone/dangerzone-image/rootfs/lib64
|
||||||
|
|
||||||
|
# Fix permissions in /home/dangerzone, so that our entrypoint script can make
|
||||||
|
# changes in the following folders.
|
||||||
|
RUN chown dangerzone:dangerzone \
|
||||||
|
/new_root/home/dangerzone \
|
||||||
|
/new_root/home/dangerzone/dangerzone-image/
|
||||||
|
# Fix permissions in /tmp, so that it can be used by unprivileged users.
|
||||||
|
RUN chmod 777 /new_root/tmp
|
||||||
|
|
||||||
|
COPY container_helpers/entrypoint.py /new_root
|
||||||
|
# HACK: For reasons that we are not sure yet, we need to explicitly specify the
|
||||||
|
# modification time of this file.
|
||||||
|
RUN touch -d ${DEBIAN_ARCHIVE_DATE}Z /new_root/entrypoint.py
|
||||||
|
|
||||||
## Final image
|
## Final image
|
||||||
|
|
||||||
|
@ -188,24 +222,7 @@ FROM scratch
|
||||||
# /usr can be a symlink.
|
# /usr can be a symlink.
|
||||||
COPY --from=dangerzone-image /new_root/ /
|
COPY --from=dangerzone-image /new_root/ /
|
||||||
|
|
||||||
# Copy the bare minimum to run Dangerzone in the inner container image.
|
|
||||||
COPY --from=dangerzone-image /etc/ /home/dangerzone/dangerzone-image/rootfs/etc/
|
|
||||||
COPY --from=dangerzone-image /opt/ /home/dangerzone/dangerzone-image/rootfs/opt/
|
|
||||||
COPY --from=dangerzone-image /usr/ /home/dangerzone/dangerzone-image/rootfs/usr/
|
|
||||||
RUN ln -s usr/bin /home/dangerzone/dangerzone-image/rootfs/bin
|
|
||||||
RUN ln -s usr/lib /home/dangerzone/dangerzone-image/rootfs/lib
|
|
||||||
RUN ln -s usr/lib64 /home/dangerzone/dangerzone-image/rootfs/lib64
|
|
||||||
|
|
||||||
# Copy the bare minimum to let the security scanner find vulnerabilities.
|
|
||||||
COPY --from=dangerzone-image /etc/ /etc/
|
|
||||||
COPY --from=dangerzone-image /var/ /var/
|
|
||||||
|
|
||||||
# Allow our entrypoint script to make changes in the following folders.
|
|
||||||
RUN chown dangerzone:dangerzone /home/dangerzone /home/dangerzone/dangerzone-image/
|
|
||||||
|
|
||||||
# Switch to the dangerzone user for the rest of the script.
|
# Switch to the dangerzone user for the rest of the script.
|
||||||
USER dangerzone
|
USER dangerzone
|
||||||
|
|
||||||
COPY container_helpers/entrypoint.py /
|
|
||||||
|
|
||||||
ENTRYPOINT ["/entrypoint.py"]
|
ENTRYPOINT ["/entrypoint.py"]
|
||||||
|
|
|
@ -22,6 +22,7 @@ On Linux, Dangerzone uses [Podman](https://podman.io/) instead of Docker Desktop
|
||||||
an isolated environment. It will be installed automatically when installing Dangerzone.
|
an isolated environment. It will be installed automatically when installing Dangerzone.
|
||||||
|
|
||||||
Dangerzone is available for:
|
Dangerzone is available for:
|
||||||
|
- Ubuntu 25.04 (plucky)
|
||||||
- Ubuntu 24.10 (oracular)
|
- Ubuntu 24.10 (oracular)
|
||||||
- Ubuntu 24.04 (noble)
|
- Ubuntu 24.04 (noble)
|
||||||
- Ubuntu 22.04 (jammy)
|
- Ubuntu 22.04 (jammy)
|
||||||
|
@ -268,7 +269,7 @@ Our [GitHub Releases page](https://github.com/freedomofpress/dangerzone/releases
|
||||||
hosts the following files:
|
hosts the following files:
|
||||||
* Windows installer (`Dangerzone-<version>.msi`)
|
* Windows installer (`Dangerzone-<version>.msi`)
|
||||||
* macOS archives (`Dangerzone-<version>-<arch>.dmg`)
|
* macOS archives (`Dangerzone-<version>-<arch>.dmg`)
|
||||||
* Container images (`container-<version>-<arch>.tar.gz`)
|
* Container images (`container-<version>-<arch>.tar`)
|
||||||
* Source package (`dangerzone-<version>.tar.gz`)
|
* Source package (`dangerzone-<version>.tar.gz`)
|
||||||
|
|
||||||
All these files are accompanied by signatures (as `.asc` files). We'll explain
|
All these files are accompanied by signatures (as `.asc` files). We'll explain
|
||||||
|
@ -296,7 +297,7 @@ gpg --verify Dangerzone-0.6.1-i686.dmg.asc Dangerzone-0.6.1-i686.dmg
|
||||||
For the container images:
|
For the container images:
|
||||||
|
|
||||||
```
|
```
|
||||||
gpg --verify container-0.6.1-i686.tar.gz.asc container-0.6.1-i686.tar.gz
|
gpg --verify container-0.6.1-i686.tar.asc container-0.6.1-i686.tar
|
||||||
```
|
```
|
||||||
|
|
||||||
For the source package:
|
For the source package:
|
||||||
|
|
10
RELEASE.md
10
RELEASE.md
|
@ -17,6 +17,7 @@ Here is a list of tasks that should be done before issuing the release:
|
||||||
- [ ] Bump the Debian version by adding a new changelog entry in `debian/changelog`
|
- [ ] Bump the Debian version by adding a new changelog entry in `debian/changelog`
|
||||||
- [ ] [Bump the minimum Docker Desktop versions](https://github.com/freedomofpress/dangerzone/blob/main/RELEASE.md#bump-the-minimum-docker-desktop-version) in `isolation_provider/container.py`
|
- [ ] [Bump the minimum Docker Desktop versions](https://github.com/freedomofpress/dangerzone/blob/main/RELEASE.md#bump-the-minimum-docker-desktop-version) in `isolation_provider/container.py`
|
||||||
- [ ] Bump the dates and versions in the `Dockerfile`
|
- [ ] Bump the dates and versions in the `Dockerfile`
|
||||||
|
- [ ] Update the download links in our `INSTALL.md` page to point to the new version (the download links will be populated after the release)
|
||||||
- [ ] Update screenshot in `README.md`, if necessary
|
- [ ] Update screenshot in `README.md`, if necessary
|
||||||
- [ ] CHANGELOG.md should be updated to include a list of all major changes since the last release
|
- [ ] CHANGELOG.md should be updated to include a list of all major changes since the last release
|
||||||
- [ ] A draft release should be created. Copy the release notes text from the template at [`docs/templates/release-notes`](https://github.com/freedomofpress/dangerzone/tree/main/docs/templates/)
|
- [ ] A draft release should be created. Copy the release notes text from the template at [`docs/templates/release-notes`](https://github.com/freedomofpress/dangerzone/tree/main/docs/templates/)
|
||||||
|
@ -150,7 +151,7 @@ Here is what you need to do:
|
||||||
poetry run ./install/common/download-tessdata.py
|
poetry run ./install/common/download-tessdata.py
|
||||||
|
|
||||||
# Copy the container image to the assets folder
|
# Copy the container image to the assets folder
|
||||||
cp share/container.tar.gz ~dz/release-assets/$VERSION/dangerzone-$VERSION-arm64.tar.gz
|
cp share/container.tar ~dz/release-assets/$VERSION/dangerzone-$VERSION-arm64.tar
|
||||||
cp share/image-id.txt ~dz/release-assets/$VERSION/.
|
cp share/image-id.txt ~dz/release-assets/$VERSION/.
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -227,7 +228,7 @@ The Windows release is performed in a Windows 11 virtual machine (as opposed to
|
||||||
|
|
||||||
- [ ] Copy the container image into the VM
|
- [ ] Copy the container image into the VM
|
||||||
> [!IMPORTANT]
|
> [!IMPORTANT]
|
||||||
> Instead of running `python .\install\windows\build-image.py` in the VM, run the build image script on the host (making sure to build for `linux/amd64`). Copy `share/container.tar.gz` and `share/image-id.txt` from the host into the `share` folder in the VM.
|
> Instead of running `python .\install\windows\build-image.py` in the VM, run the build image script on the host (making sure to build for `linux/amd64`). Copy `share/container.tar` and `share/image-id.txt` from the host into the `share` folder in the VM.
|
||||||
- [ ] Run `poetry run .\install\windows\build-app.bat`
|
- [ ] Run `poetry run .\install\windows\build-app.bat`
|
||||||
- [ ] When you're done you will have `dist\Dangerzone.msi`
|
- [ ] When you're done you will have `dist\Dangerzone.msi`
|
||||||
|
|
||||||
|
@ -318,9 +319,8 @@ To publish the release, you can follow these steps:
|
||||||
|
|
||||||
- [ ] Run container scan on the produced container images (some time may have passed since the artifacts were built)
|
- [ ] Run container scan on the produced container images (some time may have passed since the artifacts were built)
|
||||||
```bash
|
```bash
|
||||||
gunzip --keep -c ./share/container.tar.gz > /tmp/container.tar
|
|
||||||
docker pull anchore/grype:latest
|
docker pull anchore/grype:latest
|
||||||
docker run --rm -v /tmp/container.tar:/container.tar anchore/grype:latest /container.tar
|
docker run --rm -v ./share/container.tar:/container.tar anchore/grype:latest /container.tar
|
||||||
```
|
```
|
||||||
|
|
||||||
- [ ] Collect the assets in a single directory, calculate their SHA-256 hashes, and sign them.
|
- [ ] Collect the assets in a single directory, calculate their SHA-256 hashes, and sign them.
|
||||||
|
@ -341,7 +341,7 @@ To publish the release, you can follow these steps:
|
||||||
|
|
||||||
- [ ] Update the [Dangerzone website](https://github.com/freedomofpress/dangerzone.rocks) to link to the new installers.
|
- [ ] Update the [Dangerzone website](https://github.com/freedomofpress/dangerzone.rocks) to link to the new installers.
|
||||||
- [ ] Update the brew cask release of Dangerzone with a [PR like this one](https://github.com/Homebrew/homebrew-cask/pull/116319)
|
- [ ] Update the brew cask release of Dangerzone with a [PR like this one](https://github.com/Homebrew/homebrew-cask/pull/116319)
|
||||||
- [ ] Update version and download links in `README.md`
|
- [ ] Update version and links to our installation instructions (`INSTALL.md`) in `README.md`
|
||||||
|
|
||||||
## Post-release
|
## Post-release
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
import gzip
|
|
||||||
import logging
|
import logging
|
||||||
import platform
|
import platform
|
||||||
import shutil
|
import shutil
|
||||||
|
@ -96,18 +95,26 @@ def list_image_tags() -> List[str]:
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def add_image_tag(image_id: str, new_tag: str) -> None:
|
||||||
|
"""Add a tag to the Dangerzone image."""
|
||||||
|
log.debug(f"Adding tag '{new_tag}' to image '{image_id}'")
|
||||||
|
subprocess.check_output(
|
||||||
|
[get_runtime(), "tag", image_id, new_tag],
|
||||||
|
startupinfo=get_subprocess_startupinfo(),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def delete_image_tag(tag: str) -> None:
|
def delete_image_tag(tag: str) -> None:
|
||||||
"""Delete a Dangerzone image tag."""
|
"""Delete a Dangerzone image tag."""
|
||||||
name = CONTAINER_NAME + ":" + tag
|
log.warning(f"Deleting old container image: {tag}")
|
||||||
log.warning(f"Deleting old container image: {name}")
|
|
||||||
try:
|
try:
|
||||||
subprocess.check_output(
|
subprocess.check_output(
|
||||||
[get_runtime(), "rmi", "--force", name],
|
[get_runtime(), "rmi", "--force", tag],
|
||||||
startupinfo=get_subprocess_startupinfo(),
|
startupinfo=get_subprocess_startupinfo(),
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log.warning(
|
log.warning(
|
||||||
f"Couldn't delete old container image '{name}', so leaving it there."
|
f"Couldn't delete old container image '{tag}', so leaving it there."
|
||||||
f" Original error: {e}"
|
f" Original error: {e}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -120,30 +127,44 @@ def get_expected_tag() -> str:
|
||||||
|
|
||||||
def load_image_tarball() -> None:
|
def load_image_tarball() -> None:
|
||||||
log.info("Installing Dangerzone container image...")
|
log.info("Installing Dangerzone container image...")
|
||||||
p = subprocess.Popen(
|
tarball_path = get_resource_path("container.tar")
|
||||||
[get_runtime(), "load"],
|
try:
|
||||||
stdin=subprocess.PIPE,
|
res = subprocess.run(
|
||||||
startupinfo=get_subprocess_startupinfo(),
|
[get_runtime(), "load", "-i", tarball_path],
|
||||||
)
|
startupinfo=get_subprocess_startupinfo(),
|
||||||
|
capture_output=True,
|
||||||
chunk_size = 4 << 20
|
check=True,
|
||||||
compressed_container_path = get_resource_path("container.tar.gz")
|
)
|
||||||
with gzip.open(compressed_container_path) as f:
|
except subprocess.CalledProcessError as e:
|
||||||
while True:
|
if e.stderr:
|
||||||
chunk = f.read(chunk_size)
|
error = e.stderr.decode()
|
||||||
if len(chunk) > 0:
|
|
||||||
if p.stdin:
|
|
||||||
p.stdin.write(chunk)
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
_, err = p.communicate()
|
|
||||||
if p.returncode < 0:
|
|
||||||
if err:
|
|
||||||
error = err.decode()
|
|
||||||
else:
|
else:
|
||||||
error = "No output"
|
error = "No output"
|
||||||
raise errors.ImageInstallationException(
|
raise errors.ImageInstallationException(
|
||||||
f"Could not install container image: {error}"
|
f"Could not install container image: {error}"
|
||||||
)
|
)
|
||||||
|
|
||||||
log.info("Successfully installed container image from")
|
# Loading an image built with Buildkit in Podman 3.4 messes up its name. The tag
|
||||||
|
# somehow becomes the name of the loaded image [1].
|
||||||
|
#
|
||||||
|
# We know that older Podman versions are not generally affected, since Podman v3.0.1
|
||||||
|
# on Debian Bullseye works properly. Also, Podman v4.0 is not affected, so it makes
|
||||||
|
# sense to target only Podman v3.4 for a fix.
|
||||||
|
#
|
||||||
|
# The fix is simple, tag the image properly based on the expected tag from
|
||||||
|
# `share/image-id.txt` and delete the incorrect tag.
|
||||||
|
#
|
||||||
|
# [1] https://github.com/containers/podman/issues/16490
|
||||||
|
if get_runtime_name() == "podman" and get_runtime_version() == (3, 4):
|
||||||
|
expected_tag = get_expected_tag()
|
||||||
|
bad_tag = f"localhost/{expected_tag}:latest"
|
||||||
|
good_tag = f"{CONTAINER_NAME}:{expected_tag}"
|
||||||
|
|
||||||
|
log.debug(
|
||||||
|
f"Dangerzone images loaded in Podman v3.4 usually have an invalid tag."
|
||||||
|
" Fixing it..."
|
||||||
|
)
|
||||||
|
add_image_tag(bad_tag, good_tag)
|
||||||
|
delete_image_tag(bad_tag)
|
||||||
|
|
||||||
|
log.info("Successfully installed container image")
|
||||||
|
|
|
@ -3,6 +3,7 @@ import os
|
||||||
import platform
|
import platform
|
||||||
import tempfile
|
import tempfile
|
||||||
import typing
|
import typing
|
||||||
|
from multiprocessing import freeze_support
|
||||||
from multiprocessing.pool import ThreadPool
|
from multiprocessing.pool import ThreadPool
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Optional
|
from typing import List, Optional
|
||||||
|
@ -1220,6 +1221,9 @@ class DocumentsListWidget(QtWidgets.QListWidget):
|
||||||
def start_conversion(self) -> None:
|
def start_conversion(self) -> None:
|
||||||
if not self.thread_pool_initized:
|
if not self.thread_pool_initized:
|
||||||
max_jobs = self.dangerzone.isolation_provider.get_max_parallel_conversions()
|
max_jobs = self.dangerzone.isolation_provider.get_max_parallel_conversions()
|
||||||
|
# Call freeze_support() to avoid passing unknown options to the subprocess.
|
||||||
|
# See https://github.com/freedomofpress/dangerzone/issues/873
|
||||||
|
freeze_support()
|
||||||
self.thread_pool = ThreadPool(max_jobs)
|
self.thread_pool = ThreadPool(max_jobs)
|
||||||
|
|
||||||
for doc in self.docs_list:
|
for doc in self.docs_list:
|
||||||
|
|
|
@ -97,6 +97,7 @@ class Container(IsolationProvider):
|
||||||
f"Could not find a Dangerzone container image with tag '{expected_tag}'"
|
f"Could not find a Dangerzone container image with tag '{expected_tag}'"
|
||||||
)
|
)
|
||||||
for tag in old_tags:
|
for tag in old_tags:
|
||||||
|
tag = container_utils.CONTAINER_NAME + ":" + tag
|
||||||
container_utils.delete_image_tag(tag)
|
container_utils.delete_image_tag(tag)
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
|
@ -130,7 +130,7 @@ def is_qubes_native_conversion() -> bool:
|
||||||
# This disambiguates if it is running a Qubes targetted build or not
|
# This disambiguates if it is running a Qubes targetted build or not
|
||||||
# (Qubes-specific builds don't ship the container image)
|
# (Qubes-specific builds don't ship the container image)
|
||||||
|
|
||||||
compressed_container_path = get_resource_path("container.tar.gz")
|
container_image_path = get_resource_path("container.tar")
|
||||||
return not os.path.exists(compressed_container_path)
|
return not os.path.exists(container_image_path)
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
|
@ -103,14 +103,6 @@ RUN apt-get update \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# NOTE: Fedora 41 comes with Python 3.13 installed. Our Python project is not compatible
|
|
||||||
# yet with Python 3.13, because PySide6 cannot work with this Python version. To
|
|
||||||
# sidestep this, install Python 3.12 *only* in dev environments.
|
|
||||||
DOCKERFILE_BUILD_DEV_FEDORA_41_DEPS = r"""
|
|
||||||
# Install Python 3.12 since our project is not compatible yet with Python 3.13.
|
|
||||||
RUN dnf install -y python3.12
|
|
||||||
"""
|
|
||||||
|
|
||||||
# FIXME: Install Poetry on Fedora via package manager.
|
# FIXME: Install Poetry on Fedora via package manager.
|
||||||
DOCKERFILE_BUILD_DEV_FEDORA_DEPS = r"""
|
DOCKERFILE_BUILD_DEV_FEDORA_DEPS = r"""
|
||||||
RUN dnf install -y git rpm-build podman python3 python3-devel python3-poetry-core \
|
RUN dnf install -y git rpm-build podman python3 python3-devel python3-poetry-core \
|
||||||
|
@ -538,8 +530,6 @@ class Env:
|
||||||
|
|
||||||
if self.distro == "fedora":
|
if self.distro == "fedora":
|
||||||
install_deps = DOCKERFILE_BUILD_DEV_FEDORA_DEPS
|
install_deps = DOCKERFILE_BUILD_DEV_FEDORA_DEPS
|
||||||
if self.version == "41":
|
|
||||||
install_deps += DOCKERFILE_BUILD_DEV_FEDORA_41_DEPS
|
|
||||||
else:
|
else:
|
||||||
# Use Qt6 in all of our Linux dev environments, and add a missing
|
# Use Qt6 in all of our Linux dev environments, and add a missing
|
||||||
# libxcb-cursor0 dependency
|
# libxcb-cursor0 dependency
|
||||||
|
@ -561,6 +551,8 @@ class Env:
|
||||||
"noble",
|
"noble",
|
||||||
"24.10",
|
"24.10",
|
||||||
"ocular",
|
"ocular",
|
||||||
|
"25.04",
|
||||||
|
"plucky",
|
||||||
):
|
):
|
||||||
install_deps = (
|
install_deps = (
|
||||||
DOCKERFILE_UBUNTU_REM_USER + DOCKERFILE_BUILD_DEV_DEBIAN_DEPS
|
DOCKERFILE_UBUNTU_REM_USER + DOCKERFILE_BUILD_DEV_DEBIAN_DEPS
|
||||||
|
@ -620,6 +612,8 @@ class Env:
|
||||||
"noble",
|
"noble",
|
||||||
"24.10",
|
"24.10",
|
||||||
"ocular",
|
"ocular",
|
||||||
|
"25.04",
|
||||||
|
"plucky",
|
||||||
):
|
):
|
||||||
install_deps = DOCKERFILE_UBUNTU_REM_USER + DOCKERFILE_BUILD_DEBIAN_DEPS
|
install_deps = DOCKERFILE_UBUNTU_REM_USER + DOCKERFILE_BUILD_DEBIAN_DEPS
|
||||||
package_pattern = f"dangerzone_{version}-*_*.deb"
|
package_pattern = f"dangerzone_{version}-*_*.deb"
|
||||||
|
|
|
@ -327,28 +327,6 @@ sudo dnf install -y rpm-build podman python3 python3-devel python3-poetry-core \
|
||||||
pipx qt6-qtbase-gui
|
pipx qt6-qtbase-gui
|
||||||
```
|
```
|
||||||
|
|
||||||
<table>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<details>
|
|
||||||
<summary><i>:memo: Expand this section if you are on Fedora 41.</i></summary>
|
|
||||||
</br>
|
|
||||||
|
|
||||||
The default Python version that ships with Fedora 41 (3.13) is not
|
|
||||||
compatible with PySide6, which requires Python 3.12 or earlier.
|
|
||||||
|
|
||||||
You can install Python 3.12 using the `python3.12` package.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo dnf install -y python3.12
|
|
||||||
```
|
|
||||||
|
|
||||||
Poetry will automatically pick up the correct version when running.
|
|
||||||
</details>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
Install Poetry using `pipx`:
|
Install Poetry using `pipx`:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
|
@ -1027,6 +1005,11 @@ class QAUbuntu2410(QADebianBased):
|
||||||
VERSION = "24.10"
|
VERSION = "24.10"
|
||||||
|
|
||||||
|
|
||||||
|
class QAUbuntu2504(QADebianBased):
|
||||||
|
DISTRO = "ubuntu"
|
||||||
|
VERSION = "25.04"
|
||||||
|
|
||||||
|
|
||||||
class QAFedora(QALinux):
|
class QAFedora(QALinux):
|
||||||
"""Base class for Fedora distros.
|
"""Base class for Fedora distros.
|
||||||
|
|
||||||
|
@ -1044,6 +1027,10 @@ class QAFedora(QALinux):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class QAFedora42(QAFedora):
|
||||||
|
VERSION = "42"
|
||||||
|
|
||||||
|
|
||||||
class QAFedora41(QAFedora):
|
class QAFedora41(QAFedora):
|
||||||
VERSION = "41"
|
VERSION = "41"
|
||||||
|
|
||||||
|
|
680
dev_scripts/repro-build.py
Executable file
680
dev_scripts/repro-build.py
Executable file
|
@ -0,0 +1,680 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import datetime
|
||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import pprint
|
||||||
|
import shlex
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import tarfile
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
MEDIA_TYPE_INDEX_V1_JSON = "application/vnd.oci.image.index.v1+json"
|
||||||
|
MEDIA_TYPE_MANIFEST_V1_JSON = "application/vnd.oci.image.manifest.v1+json"
|
||||||
|
|
||||||
|
ENV_RUNTIME = "REPRO_RUNTIME"
|
||||||
|
ENV_DATETIME = "REPRO_DATETIME"
|
||||||
|
ENV_SDE = "REPRO_SOURCE_DATE_EPOCH"
|
||||||
|
ENV_CACHE = "REPRO_CACHE"
|
||||||
|
ENV_BUILDKIT = "REPRO_BUILDKIT_IMAGE"
|
||||||
|
ENV_ROOTLESS = "REPRO_ROOTLESS"
|
||||||
|
|
||||||
|
DEFAULT_BUILDKIT_IMAGE = "moby/buildkit:v0.19.0@sha256:14aa1b4dd92ea0a4cd03a54d0c6079046ea98cd0c0ae6176bdd7036ba370cbbe"
|
||||||
|
DEFAULT_BUILDKIT_IMAGE_ROOTLESS = "moby/buildkit:v0.19.0-rootless@sha256:e901cffdad753892a7c3afb8b9972549fca02c73888cf340c91ed801fdd96d71"
|
||||||
|
|
||||||
|
MSG_BUILD_CTX = """Build environment:
|
||||||
|
- Container runtime: {runtime}
|
||||||
|
- BuildKit image: {buildkit_image}
|
||||||
|
- Rootless support: {rootless}
|
||||||
|
- Caching enabled: {use_cache}
|
||||||
|
- Build context: {context}
|
||||||
|
- Dockerfile: {dockerfile}
|
||||||
|
- Output: {output}
|
||||||
|
|
||||||
|
Build parameters:
|
||||||
|
- SOURCE_DATE_EPOCH: {sde}
|
||||||
|
- Build args: {build_args}
|
||||||
|
- Tag: {tag}
|
||||||
|
- Platform: {platform}
|
||||||
|
|
||||||
|
Podman-only arguments:
|
||||||
|
- BuildKit arguments: {buildkit_args}
|
||||||
|
|
||||||
|
Docker-only arguments:
|
||||||
|
- Docker Buildx arguments: {buildx_args}
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def pretty_error(obj: dict, msg: str):
|
||||||
|
raise Exception(f"{msg}\n{pprint.pprint(obj)}")
|
||||||
|
|
||||||
|
|
||||||
|
def get_key(obj: dict, key: str) -> object:
|
||||||
|
if key not in obj:
|
||||||
|
pretty_error(f"Could not find key '{key}' in the dictionary:", obj)
|
||||||
|
return obj[key]
|
||||||
|
|
||||||
|
|
||||||
|
def run(cmd, dry=False, check=True):
|
||||||
|
action = "Would have run" if dry else "Running"
|
||||||
|
logger.debug(f"{action}: {shlex.join(cmd)}")
|
||||||
|
if not dry:
|
||||||
|
subprocess.run(cmd, check=check)
|
||||||
|
|
||||||
|
|
||||||
|
def snip_contents(contents: str, num: int) -> str:
|
||||||
|
contents = contents.replace("\n", "")
|
||||||
|
if len(contents) > num:
|
||||||
|
return (
|
||||||
|
contents[:num]
|
||||||
|
+ f" [... {len(contents) - num} characters omitted."
|
||||||
|
+ " Pass --show-contents to print them in their entirety]"
|
||||||
|
)
|
||||||
|
return contents
|
||||||
|
|
||||||
|
|
||||||
|
def detect_container_runtime() -> str:
|
||||||
|
"""Auto-detect the installed container runtime in the system."""
|
||||||
|
if shutil.which("docker"):
|
||||||
|
return "docker"
|
||||||
|
elif shutil.which("podman"):
|
||||||
|
return "podman"
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def parse_runtime(args) -> str:
|
||||||
|
if args.runtime is not None:
|
||||||
|
return args.runtime
|
||||||
|
|
||||||
|
runtime = os.environ.get(ENV_RUNTIME)
|
||||||
|
if runtime is None:
|
||||||
|
raise RuntimeError("No container runtime detected in your system")
|
||||||
|
if runtime not in ("docker", "podman"):
|
||||||
|
raise RuntimeError(
|
||||||
|
"Only 'docker' or 'podman' container runtimes"
|
||||||
|
" are currently supported by this script"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_use_cache(args) -> bool:
|
||||||
|
if args.no_cache:
|
||||||
|
return False
|
||||||
|
return bool(int(os.environ.get(ENV_CACHE, "1")))
|
||||||
|
|
||||||
|
|
||||||
|
def parse_rootless(args, runtime: str) -> bool:
|
||||||
|
rootless = args.rootless or bool(int(os.environ.get(ENV_ROOTLESS, "0")))
|
||||||
|
if runtime != "podman" and rootless:
|
||||||
|
raise RuntimeError("Rootless mode is only supported with Podman runtime")
|
||||||
|
return rootless
|
||||||
|
|
||||||
|
|
||||||
|
def parse_sde(args) -> str:
|
||||||
|
sde = os.environ.get(ENV_SDE, args.source_date_epoch)
|
||||||
|
dt = os.environ.get(ENV_DATETIME, args.datetime)
|
||||||
|
|
||||||
|
if (sde is not None and dt is not None) or (sde is None and dt is None):
|
||||||
|
raise RuntimeError("You need to pass either a source date epoch or a datetime")
|
||||||
|
|
||||||
|
if sde is not None:
|
||||||
|
return str(sde)
|
||||||
|
|
||||||
|
if dt is not None:
|
||||||
|
d = datetime.datetime.fromisoformat(dt)
|
||||||
|
# If the datetime is naive, assume its timezone is UTC. The check is
|
||||||
|
# taken from:
|
||||||
|
# https://docs.python.org/3/library/datetime.html#determining-if-an-object-is-aware-or-naive
|
||||||
|
if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:
|
||||||
|
d = d.replace(tzinfo=datetime.timezone.utc)
|
||||||
|
return int(d.timestamp())
|
||||||
|
|
||||||
|
|
||||||
|
def parse_buildkit_image(args, rootless: bool, runtime: str) -> str:
|
||||||
|
default = DEFAULT_BUILDKIT_IMAGE_ROOTLESS if rootless else DEFAULT_BUILDKIT_IMAGE
|
||||||
|
img = args.buildkit_image or os.environ.get(ENV_BUILDKIT, default)
|
||||||
|
|
||||||
|
if runtime == "podman" and not img.startswith("docker.io/"):
|
||||||
|
img = "docker.io/" + img
|
||||||
|
|
||||||
|
return img
|
||||||
|
|
||||||
|
|
||||||
|
def parse_build_args(args) -> str:
|
||||||
|
return args.build_arg or []
|
||||||
|
|
||||||
|
|
||||||
|
def parse_buildkit_args(args, runtime: str) -> str:
|
||||||
|
if not args.buildkit_args:
|
||||||
|
return []
|
||||||
|
|
||||||
|
if runtime != "podman":
|
||||||
|
raise RuntimeError("Cannot specify BuildKit arguments using the Podman runtime")
|
||||||
|
|
||||||
|
return shlex.split(args.buildkit_args)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_buildx_args(args, runtime: str) -> str:
|
||||||
|
if not args.buildx_args:
|
||||||
|
return []
|
||||||
|
|
||||||
|
if runtime != "docker":
|
||||||
|
raise RuntimeError(
|
||||||
|
"Cannot specify Docker Buildx arguments using the Podman runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
return shlex.split(args.buildx_args)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_image_digest(args) -> str | None:
|
||||||
|
if not args.expected_image_digest:
|
||||||
|
return None
|
||||||
|
parsed = args.expected_image_digest.split(":", 1)
|
||||||
|
if len(parsed) == 1:
|
||||||
|
return parsed[0]
|
||||||
|
else:
|
||||||
|
return parsed[1]
|
||||||
|
|
||||||
|
|
||||||
|
def parse_path(path: str | None) -> str | None:
|
||||||
|
return path and str(Path(path).absolute())
|
||||||
|
|
||||||
|
|
||||||
|
##########################
|
||||||
|
# OCI parsing logic
|
||||||
|
#
|
||||||
|
# Compatible with:
|
||||||
|
# * https://github.com/opencontainers/image-spec/blob/main/image-layout.md
|
||||||
|
|
||||||
|
|
||||||
|
def oci_print_info(parsed: dict, full: bool) -> None:
|
||||||
|
print(f"The OCI tarball contains an index and {len(parsed) - 1} manifest(s):")
|
||||||
|
print()
|
||||||
|
print(f"Image digest: {parsed[1]['digest']}")
|
||||||
|
for i, info in enumerate(parsed):
|
||||||
|
print()
|
||||||
|
if i == 0:
|
||||||
|
print(f"Index ({info['path']}):")
|
||||||
|
else:
|
||||||
|
print(f"Manifest {i} ({info['path']}):")
|
||||||
|
print(f" Digest: {info['digest']}")
|
||||||
|
print(f" Media type: {info['media_type']}")
|
||||||
|
print(f" Platform: {info['platform'] or '-'}")
|
||||||
|
contents = info["contents"] if full else snip_contents(info["contents"], 600)
|
||||||
|
print(f" Contents: {contents}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
|
||||||
|
def oci_normalize_path(path):
|
||||||
|
if path.startswith("sha256:"):
|
||||||
|
hash_algo, checksum = path.split(":")
|
||||||
|
path = f"blobs/{hash_algo}/{checksum}"
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
def oci_get_file_from_tarball(tar: tarfile.TarFile, path: str) -> dict:
|
||||||
|
"""Get file from an OCI tarball.
|
||||||
|
|
||||||
|
If the filename cannot be found, search again by prefixing it with "./", since we
|
||||||
|
have encountered path names in OCI tarballs prefixed with "./".
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return tar.extractfile(path).read().decode()
|
||||||
|
except KeyError:
|
||||||
|
if not path.startswith("./") and not path.startswith("/"):
|
||||||
|
path = "./" + path
|
||||||
|
try:
|
||||||
|
return tar.extractfile(path).read().decode()
|
||||||
|
except KeyError:
|
||||||
|
# Do not raise here, so that we can raise the original exception below.
|
||||||
|
pass
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def oci_parse_manifest(tar: tarfile.TarFile, path: str, platform: dict | None) -> dict:
|
||||||
|
"""Parse manifest information in JSON format.
|
||||||
|
|
||||||
|
Interestingly, the platform info for a manifest is not included in the
|
||||||
|
manifest itself, but in the descriptor that points to it. So, we have to
|
||||||
|
carry it from the previous manifest and include in the info here.
|
||||||
|
"""
|
||||||
|
path = oci_normalize_path(path)
|
||||||
|
contents = oci_get_file_from_tarball(tar, path)
|
||||||
|
digest = "sha256:" + hashlib.sha256(contents.encode()).hexdigest()
|
||||||
|
contents_dict = json.loads(contents)
|
||||||
|
media_type = get_key(contents_dict, "mediaType")
|
||||||
|
manifests = contents_dict.get("manifests", [])
|
||||||
|
|
||||||
|
if platform:
|
||||||
|
os = get_key(platform, "os")
|
||||||
|
arch = get_key(platform, "architecture")
|
||||||
|
platform = f"{os}/{arch}"
|
||||||
|
|
||||||
|
return {
|
||||||
|
"path": path,
|
||||||
|
"contents": contents,
|
||||||
|
"digest": digest,
|
||||||
|
"media_type": media_type,
|
||||||
|
"platform": platform,
|
||||||
|
"manifests": manifests,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def oci_parse_manifests_dfs(
|
||||||
|
tar: tarfile.TarFile, path: str, parsed: list, platform: dict | None = None
|
||||||
|
) -> None:
|
||||||
|
info = oci_parse_manifest(tar, path, platform)
|
||||||
|
parsed.append(info)
|
||||||
|
for m in info["manifests"]:
|
||||||
|
oci_parse_manifests_dfs(tar, m["digest"], parsed, m.get("platform"))
|
||||||
|
|
||||||
|
|
||||||
|
def oci_parse_tarball(path: Path) -> dict:
|
||||||
|
parsed = []
|
||||||
|
with tarfile.TarFile.open(path) as tar:
|
||||||
|
oci_parse_manifests_dfs(tar, "index.json", parsed)
|
||||||
|
return parsed
|
||||||
|
|
||||||
|
|
||||||
|
##########################
|
||||||
|
# Image building logic
|
||||||
|
|
||||||
|
|
||||||
|
def podman_build(
|
||||||
|
context: str,
|
||||||
|
dockerfile: str | None,
|
||||||
|
tag: str | None,
|
||||||
|
buildkit_image: str,
|
||||||
|
sde: int,
|
||||||
|
rootless: bool,
|
||||||
|
use_cache: bool,
|
||||||
|
output: Path,
|
||||||
|
build_args: list,
|
||||||
|
platform: str,
|
||||||
|
buildkit_args: list,
|
||||||
|
dry: bool,
|
||||||
|
):
|
||||||
|
rootless_args = []
|
||||||
|
rootful_args = []
|
||||||
|
if rootless:
|
||||||
|
rootless_args = [
|
||||||
|
"--userns",
|
||||||
|
"keep-id:uid=1000,gid=1000",
|
||||||
|
"--security-opt",
|
||||||
|
"seccomp=unconfined",
|
||||||
|
"--security-opt",
|
||||||
|
"apparmor=unconfined",
|
||||||
|
"-e",
|
||||||
|
"BUILDKITD_FLAGS=--oci-worker-no-process-sandbox",
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
rootful_args = ["--privileged"]
|
||||||
|
|
||||||
|
dockerfile_args_podman = []
|
||||||
|
dockerfile_args_buildkit = []
|
||||||
|
if dockerfile:
|
||||||
|
dockerfile_args_podman = ["-v", f"{dockerfile}:/tmp/Dockerfile"]
|
||||||
|
dockerfile_args_buildkit = ["--local", "dockerfile=/tmp"]
|
||||||
|
else:
|
||||||
|
dockerfile_args_buildkit = ["--local", "dockerfile=/tmp/work"]
|
||||||
|
|
||||||
|
tag_args = f",name={tag}" if tag else ""
|
||||||
|
|
||||||
|
cache_args = []
|
||||||
|
if use_cache:
|
||||||
|
cache_args = [
|
||||||
|
"--export-cache",
|
||||||
|
"type=local,mode=max,dest=/tmp/cache",
|
||||||
|
"--import-cache",
|
||||||
|
"type=local,src=/tmp/cache",
|
||||||
|
]
|
||||||
|
|
||||||
|
_build_args = []
|
||||||
|
for arg in build_args:
|
||||||
|
_build_args.append("--opt")
|
||||||
|
_build_args.append(f"build-arg:{arg}")
|
||||||
|
platform_args = ["--opt", f"platform={platform}"] if platform else []
|
||||||
|
|
||||||
|
cmd = [
|
||||||
|
"podman",
|
||||||
|
"run",
|
||||||
|
"-it",
|
||||||
|
"--rm",
|
||||||
|
"-v",
|
||||||
|
"buildkit_cache:/tmp/cache",
|
||||||
|
"-v",
|
||||||
|
f"{output.parent}:/tmp/image",
|
||||||
|
"-v",
|
||||||
|
f"{context}:/tmp/work",
|
||||||
|
"--entrypoint",
|
||||||
|
"buildctl-daemonless.sh",
|
||||||
|
*rootless_args,
|
||||||
|
*rootful_args,
|
||||||
|
*dockerfile_args_podman,
|
||||||
|
buildkit_image,
|
||||||
|
"build",
|
||||||
|
"--frontend",
|
||||||
|
"dockerfile.v0",
|
||||||
|
"--local",
|
||||||
|
"context=/tmp/work",
|
||||||
|
"--opt",
|
||||||
|
f"build-arg:SOURCE_DATE_EPOCH={sde}",
|
||||||
|
*_build_args,
|
||||||
|
"--output",
|
||||||
|
f"type=docker,dest=/tmp/image/{output.name},rewrite-timestamp=true{tag_args}",
|
||||||
|
*cache_args,
|
||||||
|
*dockerfile_args_buildkit,
|
||||||
|
*platform_args,
|
||||||
|
*buildkit_args,
|
||||||
|
]
|
||||||
|
|
||||||
|
run(cmd, dry)
|
||||||
|
|
||||||
|
|
||||||
|
def docker_build(
|
||||||
|
context: str,
|
||||||
|
dockerfile: str | None,
|
||||||
|
tag: str | None,
|
||||||
|
buildkit_image: str,
|
||||||
|
sde: int,
|
||||||
|
use_cache: bool,
|
||||||
|
output: Path,
|
||||||
|
build_args: list,
|
||||||
|
platform: str,
|
||||||
|
buildx_args: list,
|
||||||
|
dry: bool,
|
||||||
|
):
|
||||||
|
builder_id = hashlib.sha256(buildkit_image.encode()).hexdigest()
|
||||||
|
builder_name = f"repro-build-{builder_id}"
|
||||||
|
tag_args = ["-t", tag] if tag else []
|
||||||
|
cache_args = [] if use_cache else ["--no-cache", "--pull"]
|
||||||
|
|
||||||
|
cmd = [
|
||||||
|
"docker",
|
||||||
|
"buildx",
|
||||||
|
"create",
|
||||||
|
"--name",
|
||||||
|
builder_name,
|
||||||
|
"--driver-opt",
|
||||||
|
f"image={buildkit_image}",
|
||||||
|
]
|
||||||
|
run(cmd, dry, check=False)
|
||||||
|
|
||||||
|
dockerfile_args = ["-f", dockerfile] if dockerfile else []
|
||||||
|
_build_args = []
|
||||||
|
for arg in build_args:
|
||||||
|
_build_args.append("--build-arg")
|
||||||
|
_build_args.append(arg)
|
||||||
|
platform_args = ["--platform", platform] if platform else []
|
||||||
|
|
||||||
|
cmd = [
|
||||||
|
"docker",
|
||||||
|
"buildx",
|
||||||
|
"--builder",
|
||||||
|
builder_name,
|
||||||
|
"build",
|
||||||
|
"--build-arg",
|
||||||
|
f"SOURCE_DATE_EPOCH={sde}",
|
||||||
|
*_build_args,
|
||||||
|
"--provenance",
|
||||||
|
"false",
|
||||||
|
"--output",
|
||||||
|
f"type=docker,dest={output},rewrite-timestamp=true",
|
||||||
|
*cache_args,
|
||||||
|
*tag_args,
|
||||||
|
*dockerfile_args,
|
||||||
|
*platform_args,
|
||||||
|
*buildx_args,
|
||||||
|
context,
|
||||||
|
]
|
||||||
|
run(cmd, dry)
|
||||||
|
|
||||||
|
|
||||||
|
##########################
|
||||||
|
# Command logic
|
||||||
|
|
||||||
|
|
||||||
|
def build(args):
|
||||||
|
runtime = parse_runtime(args)
|
||||||
|
use_cache = parse_use_cache(args)
|
||||||
|
sde = parse_sde(args)
|
||||||
|
rootless = parse_rootless(args, runtime)
|
||||||
|
buildkit_image = parse_buildkit_image(args, rootless, runtime)
|
||||||
|
build_args = parse_build_args(args)
|
||||||
|
platform = args.platform
|
||||||
|
buildkit_args = parse_buildkit_args(args, runtime)
|
||||||
|
buildx_args = parse_buildx_args(args, runtime)
|
||||||
|
tag = args.tag
|
||||||
|
dockerfile = parse_path(args.file)
|
||||||
|
output = Path(parse_path(args.output))
|
||||||
|
dry = args.dry
|
||||||
|
context = parse_path(args.context)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
MSG_BUILD_CTX.format(
|
||||||
|
runtime=runtime,
|
||||||
|
buildkit_image=buildkit_image,
|
||||||
|
sde=sde,
|
||||||
|
rootless=rootless,
|
||||||
|
use_cache=use_cache,
|
||||||
|
context=context,
|
||||||
|
dockerfile=dockerfile or "(not provided)",
|
||||||
|
tag=tag or "(not provided)",
|
||||||
|
output=output,
|
||||||
|
build_args=",".join(build_args) or "(not provided)",
|
||||||
|
platform=platform or "(default)",
|
||||||
|
buildkit_args=" ".join(buildkit_args) or "(not provided)",
|
||||||
|
buildx_args=" ".join(buildx_args) or "(not provided)",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if runtime == "docker":
|
||||||
|
docker_build(
|
||||||
|
context,
|
||||||
|
dockerfile,
|
||||||
|
tag,
|
||||||
|
buildkit_image,
|
||||||
|
sde,
|
||||||
|
use_cache,
|
||||||
|
output,
|
||||||
|
build_args,
|
||||||
|
platform,
|
||||||
|
buildx_args,
|
||||||
|
dry,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
podman_build(
|
||||||
|
context,
|
||||||
|
dockerfile,
|
||||||
|
tag,
|
||||||
|
buildkit_image,
|
||||||
|
sde,
|
||||||
|
rootless,
|
||||||
|
use_cache,
|
||||||
|
output,
|
||||||
|
build_args,
|
||||||
|
platform,
|
||||||
|
buildkit_args,
|
||||||
|
dry,
|
||||||
|
)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
logger.error(f"Failed with {e.returncode}")
|
||||||
|
sys.exit(e.returncode)
|
||||||
|
|
||||||
|
|
||||||
|
def analyze(args) -> None:
|
||||||
|
expected_image_digest = parse_image_digest(args)
|
||||||
|
tarball_path = Path(args.tarball)
|
||||||
|
|
||||||
|
parsed = oci_parse_tarball(tarball_path)
|
||||||
|
oci_print_info(parsed, args.show_contents)
|
||||||
|
|
||||||
|
if expected_image_digest:
|
||||||
|
cur_digest = parsed[1]["digest"].split(":")[1]
|
||||||
|
if cur_digest != expected_image_digest:
|
||||||
|
raise Exception(
|
||||||
|
f"The image does not have the expected digest: {cur_digest} != {expected_image_digest}"
|
||||||
|
)
|
||||||
|
print(f"✅ Image digest matches {expected_image_digest}")
|
||||||
|
|
||||||
|
|
||||||
|
def define_build_cmd_args(parser: argparse.ArgumentParser) -> None:
|
||||||
|
parser.add_argument(
|
||||||
|
"--runtime",
|
||||||
|
choices=["docker", "podman"],
|
||||||
|
default=detect_container_runtime(),
|
||||||
|
help="The container runtime for building the image (default: %(default)s)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--datetime",
|
||||||
|
metavar="YYYY-MM-DD",
|
||||||
|
default=None,
|
||||||
|
help=(
|
||||||
|
"Provide a date and (optionally) a time in ISO format, which will"
|
||||||
|
" be used as the timestamp of the image layers"
|
||||||
|
),
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--buildkit-image",
|
||||||
|
metavar="NAME:TAG@DIGEST",
|
||||||
|
default=None,
|
||||||
|
help=(
|
||||||
|
"The BuildKit container image which will be used for building the"
|
||||||
|
" reproducible container image. Make sure to pass the '-rootless'"
|
||||||
|
" variant if you are using rootless Podman"
|
||||||
|
" (default: docker.io/moby/buildkit:v0.19.0)"
|
||||||
|
),
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--source-date-epoch",
|
||||||
|
"--sde",
|
||||||
|
metavar="SECONDS",
|
||||||
|
type=int,
|
||||||
|
default=None,
|
||||||
|
help="Provide a Unix timestamp for the image layers",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--no-cache",
|
||||||
|
default=False,
|
||||||
|
action="store_true",
|
||||||
|
help="Do not use existing cached images for the container build. Build from the start with a new set of cached layers.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--rootless",
|
||||||
|
default=False,
|
||||||
|
action="store_true",
|
||||||
|
help="Run BuildKit in rootless mode (Podman only)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-f",
|
||||||
|
"--file",
|
||||||
|
metavar="FILE",
|
||||||
|
default=None,
|
||||||
|
help="Pathname of a Dockerfile",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-o",
|
||||||
|
"--output",
|
||||||
|
metavar="FILE",
|
||||||
|
default=Path.cwd() / "image.tar",
|
||||||
|
help="Path to save OCI tarball (default: %(default)s)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-t",
|
||||||
|
"--tag",
|
||||||
|
metavar="TAG",
|
||||||
|
default=None,
|
||||||
|
help="Tag the built image with the name %(metavar)s",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--build-arg",
|
||||||
|
metavar="ARG=VALUE",
|
||||||
|
action="append",
|
||||||
|
default=None,
|
||||||
|
help="Set build-time variables",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--platform",
|
||||||
|
metavar="PLAT1,PLAT2",
|
||||||
|
default=None,
|
||||||
|
help="Set platform for the image",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--buildkit-args",
|
||||||
|
metavar="'ARG1 ARG2'",
|
||||||
|
default=None,
|
||||||
|
help="Extra arguments for BuildKit (Podman only)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--buildx-args",
|
||||||
|
metavar="'ARG1 ARG2'",
|
||||||
|
default=None,
|
||||||
|
help="Extra arguments for Docker Buildx (Docker only)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--dry",
|
||||||
|
default=False,
|
||||||
|
action="store_true",
|
||||||
|
help="Do not run any commands, just print what would happen",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"context",
|
||||||
|
metavar="CONTEXT",
|
||||||
|
help="Path to the build context",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args() -> dict:
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
subparsers = parser.add_subparsers(dest="command", help="Available commands")
|
||||||
|
|
||||||
|
build_parser = subparsers.add_parser("build", help="Perform a build operation")
|
||||||
|
build_parser.set_defaults(func=build)
|
||||||
|
define_build_cmd_args(build_parser)
|
||||||
|
|
||||||
|
analyze_parser = subparsers.add_parser("analyze", help="Analyze an OCI tarball")
|
||||||
|
analyze_parser.set_defaults(func=analyze)
|
||||||
|
analyze_parser.add_argument(
|
||||||
|
"tarball",
|
||||||
|
metavar="FILE",
|
||||||
|
help="Path to OCI image in .tar format",
|
||||||
|
)
|
||||||
|
analyze_parser.add_argument(
|
||||||
|
"--expected-image-digest",
|
||||||
|
metavar="DIGEST",
|
||||||
|
default=None,
|
||||||
|
help="The expected digest for the provided image",
|
||||||
|
)
|
||||||
|
analyze_parser.add_argument(
|
||||||
|
"--show-contents",
|
||||||
|
default=False,
|
||||||
|
action="store_true",
|
||||||
|
help="Show full file contents",
|
||||||
|
)
|
||||||
|
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.DEBUG,
|
||||||
|
format="%(asctime)s - %(levelname)s - %(message)s",
|
||||||
|
datefmt="%Y-%m-%d %H:%M:%S",
|
||||||
|
)
|
||||||
|
args = parse_args()
|
||||||
|
|
||||||
|
if not hasattr(args, "func"):
|
||||||
|
args.func = build
|
||||||
|
args.func(args)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
|
@ -4,6 +4,7 @@ import argparse
|
||||||
import hashlib
|
import hashlib
|
||||||
import logging
|
import logging
|
||||||
import pathlib
|
import pathlib
|
||||||
|
import platform
|
||||||
import stat
|
import stat
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
@ -11,131 +12,72 @@ import urllib.request
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
DIFFOCI_URL = "https://github.com/reproducible-containers/diffoci/releases/download/v0.1.5/diffoci-v0.1.5.linux-amd64"
|
if platform.system() in ["Darwin", "Windows"]:
|
||||||
DIFFOCI_CHECKSUM = "01d25fe690196945a6bd510d30559338aa489c034d3a1b895a0d82a4b860698f"
|
CONTAINER_RUNTIME = "docker"
|
||||||
DIFFOCI_PATH = (
|
elif platform.system() == "Linux":
|
||||||
pathlib.Path.home() / ".local" / "share" / "dangerzone-dev" / "helpers" / "diffoci"
|
CONTAINER_RUNTIME = "podman"
|
||||||
)
|
|
||||||
IMAGE_NAME = "dangerzone.rocks/dangerzone"
|
|
||||||
|
|
||||||
|
|
||||||
def run(*args):
|
def run(*args):
|
||||||
"""Simple function that runs a command, validates it, and returns the output"""
|
"""Simple function that runs a command and checks the result."""
|
||||||
logger.debug(f"Running command: {' '.join(args)}")
|
logger.debug(f"Running command: {' '.join(args)}")
|
||||||
return subprocess.run(
|
return subprocess.run(args, check=True)
|
||||||
args,
|
|
||||||
check=True,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
).stdout
|
|
||||||
|
|
||||||
|
|
||||||
def git_commit_get():
|
def build_image(
|
||||||
return run("git", "rev-parse", "--short", "HEAD").decode().strip()
|
platform=None,
|
||||||
|
runtime=None,
|
||||||
|
cache=True,
|
||||||
def git_determine_tag():
|
date=None,
|
||||||
return run("git", "describe", "--long", "--first-parent").decode().strip()[1:]
|
):
|
||||||
|
|
||||||
|
|
||||||
def git_verify(commit, source):
|
|
||||||
if not commit in source:
|
|
||||||
raise RuntimeError(
|
|
||||||
f"Image '{source}' does not seem to be built from commit '{commit}'"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def diffoci_hash_matches(diffoci):
|
|
||||||
"""Check if the hash of the downloaded diffoci bin matches the expected one."""
|
|
||||||
m = hashlib.sha256()
|
|
||||||
m.update(diffoci)
|
|
||||||
diffoci_checksum = m.hexdigest()
|
|
||||||
return diffoci_checksum == DIFFOCI_CHECKSUM
|
|
||||||
|
|
||||||
|
|
||||||
def diffoci_is_installed():
|
|
||||||
"""Determine if diffoci has been installed.
|
|
||||||
|
|
||||||
Determine if diffoci has been installed, by checking if the binary exists, and if
|
|
||||||
its hash is the expected one. If the binary exists but the hash is different, then
|
|
||||||
this is a sign that we need to update the local diffoci binary.
|
|
||||||
"""
|
|
||||||
if not DIFFOCI_PATH.exists():
|
|
||||||
return False
|
|
||||||
return diffoci_hash_matches(DIFFOCI_PATH.open("rb").read())
|
|
||||||
|
|
||||||
|
|
||||||
def diffoci_download():
|
|
||||||
"""Download the diffoci tool, based on a URL and its checksum."""
|
|
||||||
with urllib.request.urlopen(DIFFOCI_URL) as f:
|
|
||||||
diffoci_bin = f.read()
|
|
||||||
|
|
||||||
if not diffoci_hash_matches(diffoci_bin):
|
|
||||||
raise ValueError(
|
|
||||||
"Unexpected checksum for downloaded diffoci binary:"
|
|
||||||
f" {diffoci_checksum} !={DIFFOCI_CHECKSUM}"
|
|
||||||
)
|
|
||||||
|
|
||||||
DIFFOCI_PATH.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
DIFFOCI_PATH.open("wb+").write(diffoci_bin)
|
|
||||||
DIFFOCI_PATH.chmod(DIFFOCI_PATH.stat().st_mode | stat.S_IEXEC)
|
|
||||||
|
|
||||||
|
|
||||||
def diffoci_diff(source, local_target):
|
|
||||||
"""Diff the source image against the recently built target image using diffoci."""
|
|
||||||
target = f"podman://{local_target}"
|
|
||||||
try:
|
|
||||||
return run(
|
|
||||||
str(DIFFOCI_PATH),
|
|
||||||
"diff",
|
|
||||||
source,
|
|
||||||
target,
|
|
||||||
"--semantic",
|
|
||||||
"--verbose",
|
|
||||||
)
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
error = e.stdout.decode()
|
|
||||||
raise RuntimeError(
|
|
||||||
f"Could not rebuild an identical image to {source}. Diffoci report:\n{error}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def build_image(tag, use_cache=False):
|
|
||||||
"""Build the Dangerzone container image with a special tag."""
|
"""Build the Dangerzone container image with a special tag."""
|
||||||
|
platform_args = [] if not platform else ["--platform", platform]
|
||||||
|
runtime_args = [] if not runtime else ["--runtime", runtime]
|
||||||
|
cache_args = [] if cache else ["--use-cache", "no"]
|
||||||
|
date_args = [] if not date else ["--debian-archive-date", date]
|
||||||
run(
|
run(
|
||||||
"python3",
|
"python3",
|
||||||
"./install/common/build-image.py",
|
"./install/common/build-image.py",
|
||||||
"--no-save",
|
*platform_args,
|
||||||
"--use-cache",
|
*runtime_args,
|
||||||
str(use_cache),
|
*cache_args,
|
||||||
"--tag",
|
*date_args,
|
||||||
tag,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
def parse_args():
|
||||||
image_tag = git_determine_tag()
|
|
||||||
# TODO: Remove the local "podman://" prefix once we have started pushing images to a
|
|
||||||
# remote.
|
|
||||||
default_image_name = f"podman://{IMAGE_NAME}:{image_tag}"
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
prog=sys.argv[0],
|
prog=sys.argv[0],
|
||||||
description="Dev script for verifying container image reproducibility",
|
description="Dev script for verifying container image reproducibility",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--source",
|
"--platform",
|
||||||
default=default_image_name,
|
default=None,
|
||||||
|
help=f"The platform for building the image (default: current platform)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--runtime",
|
||||||
|
choices=["docker", "podman"],
|
||||||
|
default=CONTAINER_RUNTIME,
|
||||||
|
help=f"The container runtime for building the image (default: {CONTAINER_RUNTIME})",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--no-cache",
|
||||||
|
default=False,
|
||||||
|
action="store_true",
|
||||||
help=(
|
help=(
|
||||||
"The name of the image that you want to reproduce. If the image resides in"
|
"Do not use existing cached images for the container build."
|
||||||
" the local Docker / Podman engine, you can prefix it with podman:// or"
|
" Build from the start with a new set of cached layers."
|
||||||
f" docker:// accordingly (default: {default_image_name})"
|
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--use-cache",
|
"--debian-archive-date",
|
||||||
default=False,
|
default=None,
|
||||||
action="store_true",
|
help="Use a specific Debian snapshot archive, by its date",
|
||||||
help="Whether to reuse the build cache (off by default for better reproducibility)",
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"digest",
|
||||||
|
help="The digest of the image that you want to reproduce",
|
||||||
)
|
)
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
|
@ -148,32 +90,25 @@ def main():
|
||||||
)
|
)
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
|
|
||||||
logger.info(f"Ensuring that current Git commit matches image '{args.source}'")
|
logger.info(f"Building container image")
|
||||||
commit = git_commit_get()
|
build_image(
|
||||||
git_verify(commit, args.source)
|
args.platform,
|
||||||
|
args.runtime,
|
||||||
if not diffoci_is_installed():
|
not args.no_cache,
|
||||||
logger.info(f"Downloading diffoci helper from {DIFFOCI_URL}")
|
args.debian_archive_date,
|
||||||
diffoci_download()
|
)
|
||||||
|
|
||||||
tag = f"reproduce-{commit}"
|
|
||||||
target = f"{IMAGE_NAME}:{tag}"
|
|
||||||
logger.info(f"Building container image and tagging it as '{target}'")
|
|
||||||
build_image(tag, args.use_cache)
|
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Ensuring that source image '{args.source}' is semantically identical with"
|
f"Check that the reproduced image has the expected digest: {args.digest}"
|
||||||
f" built image '{target}'"
|
)
|
||||||
|
run(
|
||||||
|
"./dev_scripts/repro-build.py",
|
||||||
|
"analyze",
|
||||||
|
"--show-contents",
|
||||||
|
"share/container.tar",
|
||||||
|
"--expected-image-digest",
|
||||||
|
args.digest,
|
||||||
)
|
)
|
||||||
try:
|
|
||||||
diffoci_diff(args.source, target)
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
raise RuntimeError(
|
|
||||||
f"Could not reproduce image {args.source} for commit {commit}"
|
|
||||||
)
|
|
||||||
breakpoint()
|
|
||||||
|
|
||||||
logger.info(f"Successfully reproduced image '{args.source}' from commit '{commit}'")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
|
@ -11,8 +11,8 @@ log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
DZ_ASSETS = [
|
DZ_ASSETS = [
|
||||||
"container-{version}-i686.tar.gz",
|
"container-{version}-i686.tar",
|
||||||
"container-{version}-arm64.tar.gz",
|
"container-{version}-arm64.tar",
|
||||||
"Dangerzone-{version}.msi",
|
"Dangerzone-{version}.msi",
|
||||||
"Dangerzone-{version}-arm64.dmg",
|
"Dangerzone-{version}-arm64.dmg",
|
||||||
"Dangerzone-{version}-i686.dmg",
|
"Dangerzone-{version}-i686.dmg",
|
||||||
|
|
|
@ -47,21 +47,21 @@ trigger a CI error.
|
||||||
|
|
||||||
For a simple way to reproduce a Dangerzone container image, you can checkout the
|
For a simple way to reproduce a Dangerzone container image, you can checkout the
|
||||||
commit this image was built from (you can find it from the image tag in its
|
commit this image was built from (you can find it from the image tag in its
|
||||||
`g<commit>` portion), and run the following command in a Linux environment:
|
`g<commit>` portion), retrieve the date it was built (also included in the image
|
||||||
|
tag), and run the following command in any environment:
|
||||||
|
|
||||||
```
|
```
|
||||||
./dev_scripts/reproduce-image.py --source <image>
|
./dev_scripts/reproduce-image.py \
|
||||||
|
--debian-archive-date <date> \
|
||||||
|
<digest>
|
||||||
```
|
```
|
||||||
|
|
||||||
This command will download the `diffoci` helper, build a container image from
|
where:
|
||||||
the current Git commit, and ensure that the built image matches the source one,
|
* `<date>` should be given in YYYYMMDD format, e.g, 20250226
|
||||||
with the exception of image names and file timestamps.
|
* `<digest>` is the SHA-256 hash of the image for the **current platform**, with
|
||||||
|
or without the `sha256:` prefix.
|
||||||
|
|
||||||
> [!TIP]
|
This command will build a container image from the current Git commit and the
|
||||||
> If the source image is not pushed to a registry, and is local instead, you
|
provided date for the Debian archives. Then, it will compare the digest of the
|
||||||
> can prefix it with `docker://` or `podman://` accordingly, so that `diffoci`
|
manifest against the provided one. This is a simple way to ensure that the
|
||||||
> can load it from the local Docker / Podman container engine. For example:
|
created image is bit-for-bit reproducible.
|
||||||
>
|
|
||||||
> ```
|
|
||||||
> ./dev_scripts/reproduce.py --source podman://dangerzone.rocks/dangerzone:0.8.0-125-g725ce3b
|
|
||||||
> ```
|
|
||||||
|
|
6
dodo.py
6
dodo.py
|
@ -57,7 +57,7 @@ IMAGE_DEPS = [
|
||||||
*list_files("dangerzone/container_helpers"),
|
*list_files("dangerzone/container_helpers"),
|
||||||
"install/common/build-image.py",
|
"install/common/build-image.py",
|
||||||
]
|
]
|
||||||
IMAGE_TARGETS = ["share/container.tar.gz", "share/image-id.txt"]
|
IMAGE_TARGETS = ["share/container.tar", "share/image-id.txt"]
|
||||||
|
|
||||||
SOURCE_DEPS = [
|
SOURCE_DEPS = [
|
||||||
*list_files("assets"),
|
*list_files("assets"),
|
||||||
|
@ -188,8 +188,8 @@ def task_download_tessdata():
|
||||||
|
|
||||||
def task_build_image():
|
def task_build_image():
|
||||||
"""Build the container image using ./install/common/build-image.py"""
|
"""Build the container image using ./install/common/build-image.py"""
|
||||||
img_src = "share/container.tar.gz"
|
img_src = "share/container.tar"
|
||||||
img_dst = RELEASE_DIR / f"container-{VERSION}-{ARCH}.tar.gz" # FIXME: Add arch
|
img_dst = RELEASE_DIR / f"container-{VERSION}-{ARCH}.tar" # FIXME: Add arch
|
||||||
img_id_src = "share/image-id.txt"
|
img_id_src = "share/image-id.txt"
|
||||||
img_id_dst = RELEASE_DIR / "image-id.txt" # FIXME: Add arch
|
img_id_dst = RELEASE_DIR / "image-id.txt" # FIXME: Add arch
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
import argparse
|
import argparse
|
||||||
import gzip
|
|
||||||
import platform
|
import platform
|
||||||
import secrets
|
import secrets
|
||||||
import subprocess
|
import subprocess
|
||||||
|
@ -13,8 +12,6 @@ if platform.system() in ["Darwin", "Windows"]:
|
||||||
elif platform.system() == "Linux":
|
elif platform.system() == "Linux":
|
||||||
CONTAINER_RUNTIME = "podman"
|
CONTAINER_RUNTIME = "podman"
|
||||||
|
|
||||||
ARCH = platform.machine()
|
|
||||||
|
|
||||||
|
|
||||||
def str2bool(v):
|
def str2bool(v):
|
||||||
if isinstance(v, bool):
|
if isinstance(v, bool):
|
||||||
|
@ -50,6 +47,16 @@ def determine_git_tag():
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def determine_debian_archive_date():
|
||||||
|
"""Get the date of the Debian archive from Dockerfile.env."""
|
||||||
|
for env in Path("Dockerfile.env").read_text().split("\n"):
|
||||||
|
if env.startswith("DEBIAN_ARCHIVE_DATE"):
|
||||||
|
return env.split("=")[1]
|
||||||
|
raise Exception(
|
||||||
|
"Could not find 'DEBIAN_ARCHIVE_DATE' build argument in Dockerfile.env"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
@ -59,16 +66,15 @@ def main():
|
||||||
help=f"The container runtime for building the image (default: {CONTAINER_RUNTIME})",
|
help=f"The container runtime for building the image (default: {CONTAINER_RUNTIME})",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--no-save",
|
"--platform",
|
||||||
action="store_true",
|
default=None,
|
||||||
help="Do not save the container image as a tarball in share/container.tar.gz",
|
help=f"The platform for building the image (default: current platform)",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--compress-level",
|
"--output",
|
||||||
type=int,
|
"-o",
|
||||||
choices=range(0, 10),
|
default=str(Path("share") / "container.tar"),
|
||||||
default=9,
|
help="Path to store the container image",
|
||||||
help="The Gzip compression level, from 0 (lowest) to 9 (highest, default)",
|
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--use-cache",
|
"--use-cache",
|
||||||
|
@ -83,63 +89,62 @@ def main():
|
||||||
default=None,
|
default=None,
|
||||||
help="Provide a custom tag for the image (for development only)",
|
help="Provide a custom tag for the image (for development only)",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--debian-archive-date",
|
||||||
|
"-d",
|
||||||
|
default=determine_debian_archive_date(),
|
||||||
|
help="Use a specific Debian snapshot archive, by its date (default %(default)s)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--dry",
|
||||||
|
default=False,
|
||||||
|
action="store_true",
|
||||||
|
help="Do not run any commands, just print what would happen",
|
||||||
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
tarball_path = Path("share") / "container.tar.gz"
|
tag = args.tag or f"{args.debian_archive_date}-{determine_git_tag()}"
|
||||||
image_id_path = Path("share") / "image-id.txt"
|
image_name_tagged = f"{IMAGE_NAME}:{tag}"
|
||||||
|
|
||||||
print(f"Building for architecture '{ARCH}'")
|
|
||||||
|
|
||||||
tag = args.tag or determine_git_tag()
|
|
||||||
image_name_tagged = IMAGE_NAME + ":" + tag
|
|
||||||
|
|
||||||
print(f"Will tag the container image as '{image_name_tagged}'")
|
print(f"Will tag the container image as '{image_name_tagged}'")
|
||||||
with open(image_id_path, "w") as f:
|
image_id_path = Path("share") / "image-id.txt"
|
||||||
f.write(tag)
|
if not args.dry:
|
||||||
|
with open(image_id_path, "w") as f:
|
||||||
|
f.write(tag)
|
||||||
|
|
||||||
# Build the container image, and tag it with the calculated tag
|
# Build the container image, and tag it with the calculated tag
|
||||||
print("Building container image")
|
print("Building container image")
|
||||||
cache_args = [] if args.use_cache else ["--no-cache"]
|
cache_args = [] if args.use_cache else ["--no-cache"]
|
||||||
|
platform_args = [] if not args.platform else ["--platform", args.platform]
|
||||||
|
rootless_args = [] if args.runtime == "docker" else ["--rootless"]
|
||||||
|
rootless_args = []
|
||||||
|
dry_args = [] if not args.dry else ["--dry"]
|
||||||
|
|
||||||
subprocess.run(
|
subprocess.run(
|
||||||
[
|
[
|
||||||
args.runtime,
|
"./dev_scripts/repro-build.py",
|
||||||
"build",
|
"build",
|
||||||
BUILD_CONTEXT,
|
"--runtime",
|
||||||
|
args.runtime,
|
||||||
|
"--build-arg",
|
||||||
|
f"DEBIAN_ARCHIVE_DATE={args.debian_archive_date}",
|
||||||
|
"--datetime",
|
||||||
|
args.debian_archive_date,
|
||||||
|
*dry_args,
|
||||||
*cache_args,
|
*cache_args,
|
||||||
"-f",
|
*platform_args,
|
||||||
"Dockerfile",
|
*rootless_args,
|
||||||
"--tag",
|
"--tag",
|
||||||
image_name_tagged,
|
image_name_tagged,
|
||||||
|
"--output",
|
||||||
|
args.output,
|
||||||
|
"-f",
|
||||||
|
"Dockerfile",
|
||||||
|
BUILD_CONTEXT,
|
||||||
],
|
],
|
||||||
check=True,
|
check=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
if not args.no_save:
|
|
||||||
print("Saving container image")
|
|
||||||
cmd = subprocess.Popen(
|
|
||||||
[
|
|
||||||
CONTAINER_RUNTIME,
|
|
||||||
"save",
|
|
||||||
image_name_tagged,
|
|
||||||
],
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
)
|
|
||||||
|
|
||||||
print("Compressing container image")
|
|
||||||
chunk_size = 4 << 20
|
|
||||||
with gzip.open(
|
|
||||||
tarball_path,
|
|
||||||
"wb",
|
|
||||||
compresslevel=args.compress_level,
|
|
||||||
) as gzip_f:
|
|
||||||
while True:
|
|
||||||
chunk = cmd.stdout.read(chunk_size)
|
|
||||||
if len(chunk) > 0:
|
|
||||||
gzip_f.write(chunk)
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
cmd.wait(5)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
|
@ -66,14 +66,14 @@ def build(build_dir, qubes=False):
|
||||||
print("* Creating a Python sdist")
|
print("* Creating a Python sdist")
|
||||||
tessdata = root / "share" / "tessdata"
|
tessdata = root / "share" / "tessdata"
|
||||||
tessdata_bak = root / "tessdata.bak"
|
tessdata_bak = root / "tessdata.bak"
|
||||||
container_tar_gz = root / "share" / "container.tar.gz"
|
container_tar = root / "share" / "container.tar"
|
||||||
container_tar_gz_bak = root / "container.tar.gz.bak"
|
container_tar_bak = root / "container.tar.bak"
|
||||||
|
|
||||||
if tessdata.exists():
|
if tessdata.exists():
|
||||||
tessdata.rename(tessdata_bak)
|
tessdata.rename(tessdata_bak)
|
||||||
stash_container = qubes and container_tar_gz.exists()
|
stash_container = qubes and container_tar.exists()
|
||||||
if stash_container and container_tar_gz.exists():
|
if stash_container and container_tar.exists():
|
||||||
container_tar_gz.rename(container_tar_gz_bak)
|
container_tar.rename(container_tar_bak)
|
||||||
try:
|
try:
|
||||||
subprocess.run(["poetry", "build", "-f", "sdist"], cwd=root, check=True)
|
subprocess.run(["poetry", "build", "-f", "sdist"], cwd=root, check=True)
|
||||||
# Copy and unlink the Dangerzone sdist, instead of just renaming it. If the
|
# Copy and unlink the Dangerzone sdist, instead of just renaming it. If the
|
||||||
|
@ -84,8 +84,8 @@ def build(build_dir, qubes=False):
|
||||||
finally:
|
finally:
|
||||||
if tessdata_bak.exists():
|
if tessdata_bak.exists():
|
||||||
tessdata_bak.rename(tessdata)
|
tessdata_bak.rename(tessdata)
|
||||||
if stash_container and container_tar_gz_bak.exists():
|
if stash_container and container_tar_bak.exists():
|
||||||
container_tar_gz_bak.rename(container_tar_gz)
|
container_tar_bak.rename(container_tar)
|
||||||
|
|
||||||
print("* Building RPM package")
|
print("* Building RPM package")
|
||||||
cmd = [
|
cmd = [
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
#
|
#
|
||||||
# * Qubes packages include some extra files under /etc/qubes-rpc, whereas
|
# * Qubes packages include some extra files under /etc/qubes-rpc, whereas
|
||||||
# regular RPM packages include the container image under
|
# regular RPM packages include the container image under
|
||||||
# /usr/share/container.tar.gz
|
# /usr/share/container.tar
|
||||||
# * Qubes packages have some extra dependencies.
|
# * Qubes packages have some extra dependencies.
|
||||||
# 3. It is best to consume this SPEC file using the `install/linux/build-rpm.py`
|
# 3. It is best to consume this SPEC file using the `install/linux/build-rpm.py`
|
||||||
# script, which handles the necessary scaffolding for building the package.
|
# script, which handles the necessary scaffolding for building the package.
|
||||||
|
|
|
@ -8,6 +8,7 @@ from pytest_subprocess import FakeProcess
|
||||||
from dangerzone import container_utils, errors
|
from dangerzone import container_utils, errors
|
||||||
from dangerzone.isolation_provider.container import Container
|
from dangerzone.isolation_provider.container import Container
|
||||||
from dangerzone.isolation_provider.qubes import is_qubes_native_conversion
|
from dangerzone.isolation_provider.qubes import is_qubes_native_conversion
|
||||||
|
from dangerzone.util import get_resource_path
|
||||||
|
|
||||||
from .base import IsolationProviderTermination, IsolationProviderTest
|
from .base import IsolationProviderTermination, IsolationProviderTest
|
||||||
|
|
||||||
|
@ -47,7 +48,7 @@ class TestContainer(IsolationProviderTest):
|
||||||
provider.is_available()
|
provider.is_available()
|
||||||
|
|
||||||
def test_install_raise_if_image_cant_be_installed(
|
def test_install_raise_if_image_cant_be_installed(
|
||||||
self, mocker: MockerFixture, provider: Container, fp: FakeProcess
|
self, provider: Container, fp: FakeProcess
|
||||||
) -> None:
|
) -> None:
|
||||||
"""When an image installation fails, an exception should be raised"""
|
"""When an image installation fails, an exception should be raised"""
|
||||||
|
|
||||||
|
@ -68,11 +69,13 @@ class TestContainer(IsolationProviderTest):
|
||||||
occurrences=2,
|
occurrences=2,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Make podman load fail
|
|
||||||
mocker.patch("gzip.open", mocker.mock_open(read_data=""))
|
|
||||||
|
|
||||||
fp.register_subprocess(
|
fp.register_subprocess(
|
||||||
[container_utils.get_runtime(), "load"],
|
[
|
||||||
|
container_utils.get_runtime(),
|
||||||
|
"load",
|
||||||
|
"-i",
|
||||||
|
get_resource_path("container.tar"),
|
||||||
|
],
|
||||||
returncode=-1,
|
returncode=-1,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -80,9 +83,13 @@ class TestContainer(IsolationProviderTest):
|
||||||
provider.install()
|
provider.install()
|
||||||
|
|
||||||
def test_install_raises_if_still_not_installed(
|
def test_install_raises_if_still_not_installed(
|
||||||
self, mocker: MockerFixture, provider: Container, fp: FakeProcess
|
self, provider: Container, fp: FakeProcess
|
||||||
) -> None:
|
) -> None:
|
||||||
"""When an image keep being not installed, it should return False"""
|
"""When an image keep being not installed, it should return False"""
|
||||||
|
fp.register_subprocess(
|
||||||
|
["podman", "version", "-f", "{{.Client.Version}}"],
|
||||||
|
stdout="4.0.0",
|
||||||
|
)
|
||||||
|
|
||||||
fp.register_subprocess(
|
fp.register_subprocess(
|
||||||
[container_utils.get_runtime(), "image", "ls"],
|
[container_utils.get_runtime(), "image", "ls"],
|
||||||
|
@ -101,10 +108,13 @@ class TestContainer(IsolationProviderTest):
|
||||||
occurrences=2,
|
occurrences=2,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Patch gzip.open and podman load so that it works
|
|
||||||
mocker.patch("gzip.open", mocker.mock_open(read_data=""))
|
|
||||||
fp.register_subprocess(
|
fp.register_subprocess(
|
||||||
[container_utils.get_runtime(), "load"],
|
[
|
||||||
|
container_utils.get_runtime(),
|
||||||
|
"load",
|
||||||
|
"-i",
|
||||||
|
get_resource_path("container.tar"),
|
||||||
|
],
|
||||||
)
|
)
|
||||||
with pytest.raises(errors.ImageNotPresentException):
|
with pytest.raises(errors.ImageNotPresentException):
|
||||||
provider.install()
|
provider.install()
|
||||||
|
@ -191,7 +201,7 @@ class TestContainer(IsolationProviderTest):
|
||||||
reason="Linux specific",
|
reason="Linux specific",
|
||||||
)
|
)
|
||||||
def test_linux_skips_desktop_version_check_returns_true(
|
def test_linux_skips_desktop_version_check_returns_true(
|
||||||
self, mocker: MockerFixture, provider: Container
|
self, provider: Container
|
||||||
) -> None:
|
) -> None:
|
||||||
assert (True, "") == provider.check_docker_desktop_version()
|
assert (True, "") == provider.check_docker_desktop_version()
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue