mirror of
https://github.com/freedomofpress/dangerzone.git
synced 2025-05-15 17:51:50 +02:00
Compare commits
12 commits
2bd9535ae3
...
d7c01e755a
Author | SHA1 | Date | |
---|---|---|---|
![]() |
d7c01e755a | ||
![]() |
cede19ca90 | ||
![]() |
f1702ab560 | ||
![]() |
bb66e9a2e9 | ||
![]() |
094d876dba | ||
![]() |
5f4f82a7a4 | ||
![]() |
c0ff351a2e | ||
![]() |
8ca1357a41 | ||
![]() |
ab0bee5688 | ||
![]() |
af8ba74294 | ||
![]() |
93b8bb0444 | ||
![]() |
2e2b6cf308 |
10 changed files with 301 additions and 249 deletions
244
.github/workflows/build-push-image.yml
vendored
Normal file
244
.github/workflows/build-push-image.yml
vendored
Normal file
|
@ -0,0 +1,244 @@
|
|||
name: Build and push multi-arch container image
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
registry_user:
|
||||
required: true
|
||||
type: string
|
||||
image_name:
|
||||
required: true
|
||||
type: string
|
||||
reproduce:
|
||||
required: true
|
||||
type: boolean
|
||||
secrets:
|
||||
registry_token:
|
||||
required: true
|
||||
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install dev. dependencies
|
||||
run: |-
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y git python3-poetry --no-install-recommends
|
||||
poetry install --only package
|
||||
|
||||
- name: Verify that the Dockerfile matches the commited template and params
|
||||
run: |-
|
||||
cp Dockerfile Dockerfile.orig
|
||||
make Dockerfile
|
||||
diff Dockerfile.orig Dockerfile
|
||||
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
debian_archive_date: ${{ steps.params.outputs.debian_archive_date }}
|
||||
source_date_epoch: ${{ steps.params.outputs.source_date_epoch }}
|
||||
image: ${{ steps.params.outputs.full_image_name }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Compute image parameters
|
||||
id: params
|
||||
run: |
|
||||
source Dockerfile.env
|
||||
DEBIAN_ARCHIVE_DATE=$(date -u +'%Y%m%d')
|
||||
SOURCE_DATE_EPOCH=$(date -u -d ${DEBIAN_ARCHIVE_DATE} +"%s")
|
||||
TAG=${DEBIAN_ARCHIVE_DATE}-$(git describe --long --first-parent | tail -c +2)
|
||||
FULL_IMAGE_NAME=${{ inputs.registry }}/${{ inputs.image_name }}:${TAG}
|
||||
|
||||
echo "debian_archive_date=${DEBIAN_ARCHIVE_DATE}" >> $GITHUB_OUTPUT
|
||||
echo "source_date_epoch=${SOURCE_DATE_EPOCH}" >> $GITHUB_OUTPUT
|
||||
echo "tag=${DEBIAN_ARCHIVE_DATE}-${TAG}" >> $GITHUB_OUTPUT
|
||||
echo "full_image_name=${FULL_IMAGE_NAME}" >> $GITHUB_OUTPUT
|
||||
echo "buildkit_image=${BUILDKIT_IMAGE}" >> $GITHUB_OUTPUT
|
||||
|
||||
build:
|
||||
name: Build ${{ matrix.platform.name }} image
|
||||
runs-on: ${{ matrix.platform.runs-on }}
|
||||
needs:
|
||||
- prepare
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- runs-on: "ubuntu-24.04"
|
||||
name: "linux/amd64"
|
||||
- runs-on: "ubuntu-24.04-arm"
|
||||
name: "linux/arm64"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Prepare
|
||||
run: |
|
||||
platform=${{ matrix.platform.name }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ inputs.registry_user }}
|
||||
password: ${{ secrets.registry_token }}
|
||||
|
||||
# Instructions for reproducibly building a container image are taken from:
|
||||
# https://github.com/freedomofpress/repro-build?tab=readme-ov-file#build-and-push-a-container-image-on-github-actions
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver-opts: image=${{ needs.prepare.outputs.buildkit_image }}
|
||||
|
||||
- name: Build and push by digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ./dangerzone/
|
||||
file: Dockerfile
|
||||
build-args: |
|
||||
DEBIAN_ARCHIVE_DATE=${{ needs.prepare.outputs.debian_archive_date }}
|
||||
SOURCE_DATE_EPOCH=${{ needs.prepare.outputs.source_date_epoch }}
|
||||
provenance: false
|
||||
outputs: type=image,"name=${{ inputs.registry }}/${{ inputs.image_name }}",push-by-digest=true,push=true,rewrite-timestamp=true,name-canonical=true
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Export digest
|
||||
run: |
|
||||
mkdir -p ${{ runner.temp }}/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "${{ runner.temp }}/digests/${digest#sha256:}"
|
||||
echo "Image digest is: ${digest}"
|
||||
|
||||
- name: Upload digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM_PAIR }}
|
||||
path: ${{ runner.temp }}/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
|
||||
merge:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- prepare # implied by build, but required here to access image params
|
||||
- build
|
||||
outputs:
|
||||
digest_root: ${{ steps.image.outputs.digest_root }}
|
||||
digest_amd64: ${{ steps.image.outputs.digest_amd64 }}
|
||||
digest_arm64: ${{ steps.image.outputs.digest_arm64 }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ${{ runner.temp }}/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ inputs.registry_user }}
|
||||
password: ${{ secrets.registry_token }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
||||
|
||||
- name: Create manifest list and push
|
||||
working-directory: ${{ runner.temp }}/digests
|
||||
run: |
|
||||
DIGESTS=$(printf '${{ needs.prepare.outputs.image }}@sha256:%s ' *)
|
||||
docker buildx imagetools create -t ${{ needs.prepare.outputs.image }} ${DIGESTS}
|
||||
|
||||
- name: Inspect image
|
||||
id: image
|
||||
run: |
|
||||
# Inspect the image
|
||||
docker buildx imagetools inspect ${{ needs.prepare.outputs.image }}
|
||||
docker buildx imagetools inspect ${{ needs.prepare.outputs.image }} --format "{{json .Manifest}}" > manifest
|
||||
|
||||
# Calculate and print the digests
|
||||
digest_root=$(jq -r .digest manifest)
|
||||
digest_amd64=$(jq -r .manifests[0].digest manifest)
|
||||
digest_arm64=$(jq -r .manifests[1].digest manifest)
|
||||
|
||||
echo "The image digests are:"
|
||||
echo " Root: $digest_root"
|
||||
echo " linux/amd64: $digest_amd64"
|
||||
echo " linux/arm64: $digest_arm64"
|
||||
|
||||
# NOTE: Set the digests as an output because the `env` context is not
|
||||
# available to the inputs of a reusable workflow call.
|
||||
echo "digest_root=$digest_root" >> "$GITHUB_OUTPUT"
|
||||
echo "digest_amd64=$digest_amd64" >> "$GITHUB_OUTPUT"
|
||||
echo "digest_arm64=$digest_arm64" >> "$GITHUB_OUTPUT"
|
||||
|
||||
# This step calls the container workflow to generate provenance and push it to
|
||||
# the container registry.
|
||||
provenance:
|
||||
needs:
|
||||
- prepare # implied by merge, but required here to access image params
|
||||
- merge
|
||||
strategy:
|
||||
matrix:
|
||||
manifest_type:
|
||||
- root
|
||||
- amd64
|
||||
- arm64
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for uploading attestations.
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.0.0
|
||||
with:
|
||||
digest: ${{ needs.merge.outputs[format('digest_{0}', matrix.digest)] }}
|
||||
image: ${{ needs.prepare.outputs.image }}
|
||||
registry-username: ${{ inputs.registry_user }}
|
||||
secrets:
|
||||
registry-password: ${{ secrets.registry_token }}
|
||||
|
||||
# This step ensures that the image is reproducible
|
||||
check-reproducibility:
|
||||
if: ${{ inputs.reproduce }}
|
||||
needs:
|
||||
- prepare # implied by merge, but required here to access image params
|
||||
- merge
|
||||
runs-on: ${{ matrix.platform.runs-on }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- runs-on: "ubuntu-24.04"
|
||||
name: "amd64"
|
||||
- runs-on: "ubuntu-24.04-arm"
|
||||
name: "arm64"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Reproduce the same container image
|
||||
run: |
|
||||
./dev_scripts/reproduce-image.py \
|
||||
--runtime \
|
||||
docker \
|
||||
--debian-archive-date \
|
||||
${{ needs.prepare.outputs.debian_archive_date }} \
|
||||
--platform \
|
||||
linux/${{ matrix.platform.name }} \
|
||||
${{ needs.merge.outputs[format('digest_{0}', matrix.platform.name)] }}
|
229
.github/workflows/release-container-image.yml
vendored
229
.github/workflows/release-container-image.yml
vendored
|
@ -9,229 +9,14 @@ on:
|
|||
schedule:
|
||||
- cron: "0 0 * * *" # Run every day at 00:00 UTC.
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io/${{ github.repository_owner }}
|
||||
REGISTRY_USER: ${{ github.actor }}
|
||||
REGISTRY_PASSWORD: ${{ github.token }}
|
||||
IMAGE_NAME: dangerzone/dangerzone
|
||||
BUILDKIT_IMAGE: "docker.io/moby/buildkit:v19.0@sha256:14aa1b4dd92ea0a4cd03a54d0c6079046ea98cd0c0ae6176bdd7036ba370cbbe"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install dev. dependencies
|
||||
run: |-
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y git python3-poetry --no-install-recommends
|
||||
poetry install --only package
|
||||
|
||||
- name: Verify that the Dockerfile matches the commited template and params
|
||||
run: |-
|
||||
cp Dockerfile Dockerfile.orig
|
||||
make Dockerfile
|
||||
diff Dockerfile.orig Dockerfile
|
||||
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
debian_archive_date: ${{ steps.params.outputs.debian_archive_date }}
|
||||
source_date_epoch: ${{ steps.params.outputs.source_date_epoch }}
|
||||
image: ${{ steps.params.outputs.full_image_name }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Compute image parameters
|
||||
id: params
|
||||
run: |
|
||||
DEBIAN_ARCHIVE_DATE=$(date -u +'%Y%m%d')
|
||||
SOURCE_DATE_EPOCH=$(date -u -d ${DEBIAN_ARCHIVE_DATE} +"%s")
|
||||
TAG=${DEBIAN_ARCHIVE_DATE}-$(git describe --long --first-parent | tail -c +2)
|
||||
FULL_IMAGE_NAME=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${TAG}
|
||||
|
||||
echo "debian_archive_date=${DEBIAN_ARCHIVE_DATE}" >> $GITHUB_OUTPUT
|
||||
echo "source_date_epoch=${SOURCE_DATE_EPOCH}" >> $GITHUB_OUTPUT
|
||||
echo "tag=${DEBIAN_ARCHIVE_DATE}-${TAG}" >> $GITHUB_OUTPUT
|
||||
echo "full_image_name=${FULL_IMAGE_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
build:
|
||||
name: Build ${{ matrix.platform.name }} image
|
||||
runs-on: ubuntu-24.04${{ matrix.platform.suffix }}
|
||||
needs:
|
||||
- prepare
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- suffix: ""
|
||||
name: "linux/amd64"
|
||||
- suffix: "-arm"
|
||||
name: "linux/arm64"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Prepare
|
||||
run: |
|
||||
platform=${{ matrix.platform.name }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Instructions for reproducibly building a container image are taken from:
|
||||
# https://github.com/freedomofpress/repro-build?tab=readme-ov-file#build-and-push-a-container-image-on-github-actions
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
||||
|
||||
- name: Build and push by digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ./dangerzone/
|
||||
file: Dockerfile
|
||||
build-args: |
|
||||
DEBIAN_ARCHIVE_DATE=${{ needs.prepare.outputs.debian_archive_date }}
|
||||
SOURCE_DATE_EPOCH=${{ needs.prepare.outputs.source_date_epoch }}
|
||||
provenance: false
|
||||
outputs: type=image,"name=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}",push-by-digest=true,push=true,rewrite-timestamp=true,name-canonical=true
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Export digest
|
||||
run: |
|
||||
mkdir -p ${{ runner.temp }}/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "${{ runner.temp }}/digests/${digest#sha256:}"
|
||||
echo "Image digest is: ${digest}"
|
||||
|
||||
- name: Upload digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM_PAIR }}
|
||||
path: ${{ runner.temp }}/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
|
||||
merge:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- prepare
|
||||
- build
|
||||
outputs:
|
||||
digest_root: ${{ steps.image.outputs.digest_root }}
|
||||
digest_amd64: ${{ steps.image.outputs.digest_amd64 }}
|
||||
digest_arm64: ${{ steps.image.outputs.digest_arm64 }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ${{ runner.temp }}/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
||||
|
||||
- name: Create manifest list and push
|
||||
working-directory: ${{ runner.temp }}/digests
|
||||
run: |
|
||||
DIGESTS=$(printf '${{ needs.prepare.outputs.image }}@sha256:%s ' *)
|
||||
docker buildx imagetools create -t ${{ needs.prepare.outputs.image }} ${DIGESTS}
|
||||
|
||||
- name: Inspect image
|
||||
id: image
|
||||
run: |
|
||||
# Inspect the image
|
||||
docker buildx imagetools inspect ${{ needs.prepare.outputs.image }}
|
||||
docker buildx imagetools inspect ${{ needs.prepare.outputs.image }} --format "{{json .Manifest}}" > manifest
|
||||
|
||||
# Calculate and print the digests
|
||||
digest_root=$(jq -r .digest manifest)
|
||||
digest_amd64=$(jq -r .manifests[0].digest manifest)
|
||||
digest_arm64=$(jq -r .manifests[1].digest manifest)
|
||||
|
||||
echo "The image digests are:"
|
||||
echo " Root: $digest_root"
|
||||
echo " linux/amd64: $digest_amd64"
|
||||
echo " linux/arm64: $digest_arm64"
|
||||
|
||||
# NOTE: Set the digests as an output because the `env` context is not
|
||||
# available to the inputs of a reusable workflow call.
|
||||
echo "digest_root=$digest_root" >> "$GITHUB_OUTPUT"
|
||||
echo "digest_amd64=$digest_amd64" >> "$GITHUB_OUTPUT"
|
||||
echo "digest_arm64=$digest_arm64" >> "$GITHUB_OUTPUT"
|
||||
|
||||
# This step calls the container workflow to generate provenance and push it to
|
||||
# the container registry.
|
||||
provenance:
|
||||
needs:
|
||||
- prepare
|
||||
- merge
|
||||
strategy:
|
||||
matrix:
|
||||
digest:
|
||||
- root
|
||||
- amd64
|
||||
- arm64
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for uploading attestations.
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.0.0
|
||||
build-push-image:
|
||||
uses: ./.github/workflows/build-push-image.yml
|
||||
with:
|
||||
digest: ${{ needs.merge.outputs[format('digest_{0}', matrix.digest)] }}
|
||||
image: ${{ needs.prepare.outputs.image }}
|
||||
registry-username: ${{ github.actor }}
|
||||
registry: ghcr.io/${{ github.repository_owner }}
|
||||
registry_user: ${{ github.actor }}
|
||||
image_name: dangerzone/dangerzone
|
||||
reproduce: true
|
||||
secrets:
|
||||
registry-password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# This step ensures that the image is reproducible
|
||||
check-reproducibility:
|
||||
needs:
|
||||
- prepare
|
||||
- merge
|
||||
runs-on: ubuntu-24.04${{ matrix.platform.suffix }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- suffix: ""
|
||||
name: "amd64"
|
||||
- suffix: "-arm"
|
||||
name: "arm64"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Reproduce the same container image
|
||||
run: |
|
||||
./dev_scripts/reproduce-image.py \
|
||||
--runtime \
|
||||
docker \
|
||||
--debian-archive-date \
|
||||
${{ needs.prepare.outputs.debian_archive_date }} \
|
||||
--platform \
|
||||
linux/${{ matrix.platform.name }} \
|
||||
${{ needs.merge.outputs[format('digest_{0}', matrix.platform.name)] }}
|
||||
registry_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
|
@ -182,8 +182,11 @@ RUN mkdir -p \
|
|||
/new_root/tmp \
|
||||
/new_root/home/dangerzone/dangerzone-image/rootfs
|
||||
|
||||
# XXX: Remove /etc/resolv.conf, so that the network configuration of the host
|
||||
# does not leak.
|
||||
# Copy the /etc and /var directories under the new root directory. Also,
|
||||
# copy /etc/, /opt, and /usr to the Dangerzone image rootfs.
|
||||
#
|
||||
# XXX: We also have to remove the resolv.conf file, in order to not leak any DNS
|
||||
# servers added there during image build time.
|
||||
RUN cp -r /etc /var /new_root/ \
|
||||
&& rm /new_root/etc/resolv.conf
|
||||
RUN cp -r /etc /opt /usr /new_root/home/dangerzone/dangerzone-image/rootfs \
|
||||
|
|
|
@ -7,3 +7,7 @@ GVISOR_ARCHIVE_DATE=20250217
|
|||
# Can be bumped to the latest version and checksum from https://github.com/ebandal/H2Orestart/releases
|
||||
H2ORESTART_CHECKSUM=452331f8603ef456264bd72db6fa8a11ca72b392019a8135c0b2f3095037d7b1
|
||||
H2ORESTART_VERSION=v0.7.1
|
||||
|
||||
# Buildkit image (taken from freedomofpress/repro-build)
|
||||
BUILDKIT_IMAGE="docker.io/moby/buildkit:v19.0@sha256:14aa1b4dd92ea0a4cd03a54d0c6079046ea98cd0c0ae6176bdd7036ba370cbbe"
|
||||
BUILDKIT_IMAGE_ROOTLESS="docker.io/moby/buildkit:v0.19.0-rootless@sha256:e901cffdad753892a7c3afb8b9972549fca02c73888cf340c91ed801fdd96d71"
|
||||
|
|
|
@ -182,8 +182,11 @@ RUN mkdir -p \
|
|||
/new_root/tmp \
|
||||
/new_root/home/dangerzone/dangerzone-image/rootfs
|
||||
|
||||
# XXX: Remove /etc/resolv.conf, so that the network configuration of the host
|
||||
# does not leak.
|
||||
# Copy the /etc and /var directories under the new root directory. Also,
|
||||
# copy /etc/, /opt, and /usr to the Dangerzone image rootfs.
|
||||
#
|
||||
# XXX: We also have to remove the resolv.conf file, in order to not leak any DNS
|
||||
# servers added there during image build time.
|
||||
RUN cp -r /etc /var /new_root/ \
|
||||
&& rm /new_root/etc/resolv.conf
|
||||
RUN cp -r /etc /opt /usr /new_root/home/dangerzone/dangerzone-image/rootfs \
|
||||
|
|
|
@ -128,23 +128,21 @@ def get_expected_tag() -> str:
|
|||
def load_image_tarball() -> None:
|
||||
log.info("Installing Dangerzone container image...")
|
||||
tarball_path = get_resource_path("container.tar")
|
||||
with open(tarball_path) as f:
|
||||
try:
|
||||
res = subprocess.run(
|
||||
[get_runtime(), "load"],
|
||||
stdin=f,
|
||||
startupinfo=get_subprocess_startupinfo(),
|
||||
capture_output=True,
|
||||
check=True,
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
if e.stderr:
|
||||
error = e.stderr.decode()
|
||||
else:
|
||||
error = "No output"
|
||||
raise errors.ImageInstallationException(
|
||||
f"Could not install container image: {error}"
|
||||
)
|
||||
try:
|
||||
res = subprocess.run(
|
||||
[get_runtime(), "load", "-i", tarball_path],
|
||||
startupinfo=get_subprocess_startupinfo(),
|
||||
capture_output=True,
|
||||
check=True,
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
if e.stderr:
|
||||
error = e.stderr.decode()
|
||||
else:
|
||||
error = "No output"
|
||||
raise errors.ImageInstallationException(
|
||||
f"Could not install container image: {error}"
|
||||
)
|
||||
|
||||
# Loading an image built with Buildkit in Podman 3.4 messes up its name. The tag
|
||||
# somehow becomes the name of the loaded image [1].
|
||||
|
|
|
@ -130,7 +130,7 @@ def is_qubes_native_conversion() -> bool:
|
|||
# This disambiguates if it is running a Qubes targetted build or not
|
||||
# (Qubes-specific builds don't ship the container image)
|
||||
|
||||
compressed_container_path = get_resource_path("container.tar")
|
||||
return not os.path.exists(compressed_container_path)
|
||||
container_image_path = get_resource_path("container.tar")
|
||||
return not os.path.exists(container_image_path)
|
||||
else:
|
||||
return False
|
||||
|
|
|
@ -220,7 +220,22 @@ def oci_normalize_path(path):
|
|||
|
||||
|
||||
def oci_get_file_from_tarball(tar: tarfile.TarFile, path: str) -> dict:
|
||||
return
|
||||
"""Get file from an OCI tarball.
|
||||
|
||||
If the filename cannot be found, search again by prefixing it with "./", since we
|
||||
have encountered path names in OCI tarballs prefixed with "./".
|
||||
"""
|
||||
try:
|
||||
return tar.extractfile(path).read().decode()
|
||||
except KeyError:
|
||||
if not path.startswith("./") and not path.startswith("/"):
|
||||
path = "./" + path
|
||||
try:
|
||||
return tar.extractfile(path).read().decode()
|
||||
except KeyError:
|
||||
# Do not raise here, so that we can raise the original exception below.
|
||||
pass
|
||||
raise
|
||||
|
||||
|
||||
def oci_parse_manifest(tar: tarfile.TarFile, path: str, platform: dict | None) -> dict:
|
||||
|
@ -231,7 +246,7 @@ def oci_parse_manifest(tar: tarfile.TarFile, path: str, platform: dict | None) -
|
|||
carry it from the previous manifest and include in the info here.
|
||||
"""
|
||||
path = oci_normalize_path(path)
|
||||
contents = tar.extractfile(path).read().decode()
|
||||
contents = oci_get_file_from_tarball(tar, path)
|
||||
digest = "sha256:" + hashlib.sha256(contents.encode()).hexdigest()
|
||||
contents_dict = json.loads(contents)
|
||||
media_type = get_key(contents_dict, "mediaType")
|
|
@ -102,7 +102,7 @@ def main():
|
|||
f"Check that the reproduced image has the expected digest: {args.digest}"
|
||||
)
|
||||
run(
|
||||
"./dev_scripts/repro-build",
|
||||
"./dev_scripts/repro-build.py",
|
||||
"analyze",
|
||||
"--show-contents",
|
||||
"share/container.tar",
|
||||
|
|
|
@ -122,7 +122,7 @@ def main():
|
|||
|
||||
subprocess.run(
|
||||
[
|
||||
"./dev_scripts/repro-build",
|
||||
"./dev_scripts/repro-build.py",
|
||||
"build",
|
||||
"--runtime",
|
||||
args.runtime,
|
||||
|
|
Loading…
Reference in a new issue