WIP: Reproduce in CI

This commit is contained in:
Alex Pyrgiotis 2025-02-25 16:17:33 +02:00
parent 8aaebfb108
commit 6fb7d93aed
No known key found for this signature in database
GPG key ID: B6C15EBA0357C9AA
6 changed files with 804 additions and 59 deletions

View file

@ -0,0 +1,211 @@
name: Release multi-arch container image
on:
workflow_dispatch:
push:
branches:
- main
- "test/**"
schedule:
- cron: "0 0 * * *" # Run every day at 00:00 UTC.
env:
REGISTRY: ghcr.io/${{ github.repository_owner }}
REGISTRY_USER: ${{ github.actor }}
REGISTRY_PASSWORD: ${{ github.token }}
IMAGE_NAME: dangerzone/dangerzone
BUILDKIT_IMAGE: "docker.io/moby/buildkit:v19.0@sha256:14aa1b4dd92ea0a4cd03a54d0c6079046ea98cd0c0ae6176bdd7036ba370cbbe"
jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install dev. dependencies
run: |-
sudo apt-get update
sudo apt-get install -y git python3-poetry --no-install-recommends
poetry install --only package
- name: Verify that the Dockerfile matches the commited template and params
run: |-
cp Dockerfile Dockerfile.orig
make Dockerfile
diff Dockerfile.orig Dockerfile
prepare:
runs-on: ubuntu-latest
outputs:
debian_archive_date: ${{ steps.date.outputs.debian_archive_date }}
source_date_epoch: ${{ steps.date.outputs.source_date_epoch }}
steps:
- name: Get current date
id: date
run: |
DEBIAN_ARCHIVE_DATE=$(date -u +'%Y%m%d')
SOURCE_DATE_EPOCH=$(date -u -d ${DEBIAN_ARCHIVE_DATE} +"%s")
echo "debian_archive_date=${DEBIAN_ARCHIVE_DATE}" >> $GITHUB_OUTPUT
echo "source_date_epoch=${SOURCE_DATE_EPOCH}" >> $GITHUB_OUTPUT
build:
runs-on: ubuntu-24.04${{ matrix.platform.suffix }}
needs:
- prepare
strategy:
fail-fast: false
matrix:
platform:
- suffix: ""
name: "linux/amd64"
- suffix: "-arm"
name: "linux/arm64"
steps:
- uses: actions/checkout@v4
- name: Prepare
run: |
platform=${{ matrix.platform.name }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Login to GHCR
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
- name: Build and push by digest
id: build
uses: docker/build-push-action@v6
with:
context: ./dangerzone/
file: Dockerfile
build-args: |
DEBIAN_ARCHIVE_DATE=${{ needs.prepare.outputs.debian_archive_date }}
SOURCE_DATE_EPOCH=${{ needs.prepare.outputs.source_date_epoch }}
# Remove potentially incorrect Docker provenance that cannot be
# reproduced.
provenance: false
platforms: ${{ matrix.platform.name }}
outputs: type=image,"name=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}",rewrite-timestamp=true,push-by-digest=true,name-canonical=true,push=true
- name: Export digest
run: |
mkdir -p ${{ runner.temp }}/digests
digest="${{ steps.build.outputs.digest }}"
touch "${{ runner.temp }}/digests/${digest#sha256:}"
echo "Image digest is: ${digest}"
- name: Upload digest
uses: actions/upload-artifact@v4
with:
name: digests-${{ env.PLATFORM_PAIR }}
path: ${{ runner.temp }}/digests/*
if-no-files-found: error
retention-days: 1
merge:
runs-on: ubuntu-latest
needs:
- prepare
- build
outputs:
digest: ${{ steps.image.outputs.digest }}
image: ${{ steps.image.outputs.image }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Compute image tag
id: tag
run: |
DEBIAN_ARCHIVE_DATE=${{ needs.prepare.outputs.debian_archive_date }}
TAG=$(git describe --long --first-parent | tail -c +2)
echo "tag=${DEBIAN_ARCHIVE_DATE}-${TAG}" >> $GITHUB_OUTPUT
- name: Download digests
uses: actions/download-artifact@v4
with:
path: ${{ runner.temp }}/digests
pattern: digests-*
merge-multiple: true
- name: Login to GHCR
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
- name: Create manifest list and push
working-directory: ${{ runner.temp }}/digests
run: |
IMAGE=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tag.outputs.tag }}
DIGESTS=$(printf '${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}@sha256:%s ' *)
docker buildx imagetools create -t ${IMAGE} ${DIGESTS}
- name: Inspect image
id: image
run: |
# NOTE: Set the image as an output because the `env` context is not
# available to the inputs of a reusable workflow call.
image_name="${REGISTRY}/${IMAGE_NAME}"
echo "image=$image_name" >> "$GITHUB_OUTPUT"
docker buildx imagetools inspect ${image_name}:${{ steps.tag.outputs.tag }}
digest=$(docker buildx imagetools inspect ${image_name}:${{ steps.tag.outputs.tag }} --format "{{json .Manifest}}" | jq -r '.digest')
echo "digest=$digest" >> "$GITHUB_OUTPUT"
# This step calls the container workflow to generate provenance and push it to
# the container registry.
provenance:
needs:
- merge
permissions:
actions: read # for detecting the Github Actions environment.
id-token: write # for creating OIDC tokens for signing.
packages: write # for uploading attestations.
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.0.0
with:
digest: ${{ needs.merge.outputs.digest }}
image: ${{ needs.merge.outputs.image }}
registry-username: ${{ github.actor }}
secrets:
registry-password: ${{ secrets.GITHUB_TOKEN }}
# This step ensures that the image is reproducible
check-reproducibility:
needs:
- prepare
- merge
runs-on: ubuntu-24.04${{ matrix.platform.suffix }}
strategy:
fail-fast: false
matrix:
platform:
- suffix: ""
name: "linux/amd64"
- suffix: "-arm"
name: "linux/arm64"
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Reproduce the same container image
run: |
./dev_scripts/reproduce-image.py \
--debian-archive-date ${{ needs.build.prepare.debian_archive_date }} \
--source ${{ needs.merge.outputs.image }}@${{ needs.merge.outputs.digest }} \
--platform ${{ matrix.platform.name }}

View file

@ -52,7 +52,7 @@ RUN mkdir /opt/libreoffice_ext && cd /opt/libreoffice_ext \
&& rm /root/.wget-hsts && rm /root/.wget-hsts
# Create an unprivileged user both for gVisor and for running Dangerzone. # Create an unprivileged user both for gVisor and for running Dangerzone.
# XXX: Make the shadow filed "date of last password change" a constant # XXX: Make the shadow field "date of last password change" a constant
# number. # number.
RUN addgroup --gid 1000 dangerzone RUN addgroup --gid 1000 dangerzone
RUN adduser --uid 1000 --ingroup dangerzone --shell /bin/true \ RUN adduser --uid 1000 --ingroup dangerzone --shell /bin/true \

View file

@ -52,9 +52,13 @@ RUN mkdir /opt/libreoffice_ext && cd /opt/libreoffice_ext \
&& rm /root/.wget-hsts && rm /root/.wget-hsts
# Create an unprivileged user both for gVisor and for running Dangerzone. # Create an unprivileged user both for gVisor and for running Dangerzone.
# XXX: Make the shadow field "date of last password change" a constant
# number.
RUN addgroup --gid 1000 dangerzone RUN addgroup --gid 1000 dangerzone
RUN adduser --uid 1000 --ingroup dangerzone --shell /bin/true \ RUN adduser --uid 1000 --ingroup dangerzone --shell /bin/true \
--disabled-password --home /home/dangerzone dangerzone --disabled-password --home /home/dangerzone dangerzone \
&& chage -d 99999 dangerzone \
&& rm /etc/shadow-
# Copy Dangerzone's conversion logic under /opt/dangerzone, and allow Python to # Copy Dangerzone's conversion logic under /opt/dangerzone, and allow Python to
# import it. # import it.
@ -165,30 +169,34 @@ RUN mkdir /home/dangerzone/.containers
# The `ln` binary, even if you specify it by its full path, cannot run # The `ln` binary, even if you specify it by its full path, cannot run
# (probably because `ld-linux.so` can't be found). For this reason, we have # (probably because `ld-linux.so` can't be found). For this reason, we have
# to create the symlinks beforehand, in a previous build stage. Then, in an # to create the symlinks beforehand, in a previous build stage. Then, in an
# empty contianer image (scratch images), we can copy these symlinks and the # empty container image (scratch images), we can copy these symlinks and the
# /usr, and stich everything together. # /usr, and stitch everything together.
############################################################################### ###############################################################################
# Create the filesystem hierarchy that will be used to symlink /usr. # Create the filesystem hierarchy that will be used to symlink /usr.
RUN mkdir -p \ RUN mkdir -p \
/new_root \ /new_root \
/new_root/etc \
/new_root/root \ /new_root/root \
/new_root/run \ /new_root/run \
/new_root/tmp \ /new_root/tmp \
/new_root/var \ /new_root/home/dangerzone/dangerzone-image/rootfs
/new_root/home/dangerzone/dangerzone-image/rootfs \
/new_root/home/dangerzone/dangerzone-image/rootfs/etc \
/new_root/home/dangerzone/dangerzone-image/rootfs/opt \
/new_root/home/dangerzone/dangerzone-image/rootfs/usr
# XXX: Remove /etc/resolv.conf, so that the network configuration of the host
# does not leak.
RUN cp -r /etc /var /new_root/ \
&& rm /new_root/etc/resolv.conf
RUN cp -r /etc /opt /usr /new_root/home/dangerzone/dangerzone-image/rootfs \
&& rm /new_root/home/dangerzone/dangerzone-image/rootfs/etc/resolv.conf
RUN ln -s /home/dangerzone/dangerzone-image/rootfs/usr /new_root/usr RUN ln -s /home/dangerzone/dangerzone-image/rootfs/usr /new_root/usr
RUN ln -s usr/bin /new_root/bin RUN ln -s usr/bin /new_root/bin
RUN ln -s usr/lib /new_root/lib RUN ln -s usr/lib /new_root/lib
RUN ln -s usr/lib64 /new_root/lib64 RUN ln -s usr/lib64 /new_root/lib64
RUN ln -s usr/sbin /new_root/sbin RUN ln -s usr/sbin /new_root/sbin
RUN ln -s usr/bin /new_root/home/dangerzone/dangerzone-image/rootfs/bin
RUN ln -s usr/lib /new_root/home/dangerzone/dangerzone-image/rootfs/lib
RUN ln -s usr/lib64 /new_root/home/dangerzone/dangerzone-image/rootfs/lib64
# Fix permissions in /home/dangerzone, so that our entrypoint script can make # Fix permissions in /home/dangerzone, so that our entrypoint script can make
# changes in the following folders. # changes in the following folders.
@ -198,43 +206,14 @@ RUN chown dangerzone:dangerzone \
# Fix permissions in /tmp, so that it can be used by unprivileged users. # Fix permissions in /tmp, so that it can be used by unprivileged users.
RUN chmod 777 /new_root/tmp RUN chmod 777 /new_root/tmp
## Intermediate image ## Final image
FROM scratch AS intermediate FROM scratch
# Copy the filesystem hierarchy that we created in the previous stage, so that # Copy the filesystem hierarchy that we created in the previous stage, so that
# /usr can be a symlink. # /usr can be a symlink.
COPY --from=dangerzone-image /new_root/ / COPY --from=dangerzone-image /new_root/ /
# Copy the bare minimum to run Dangerzone in the inner container image.
COPY --from=dangerzone-image /etc/ /home/dangerzone/dangerzone-image/rootfs/etc/
COPY --from=dangerzone-image /opt/ /home/dangerzone/dangerzone-image/rootfs/opt/
COPY --from=dangerzone-image /usr/ /home/dangerzone/dangerzone-image/rootfs/usr/
RUN ln -s usr/bin /home/dangerzone/dangerzone-image/rootfs/bin
RUN ln -s usr/lib /home/dangerzone/dangerzone-image/rootfs/lib
RUN ln -s usr/lib64 /home/dangerzone/dangerzone-image/rootfs/lib64
# Copy the bare minimum to let the security scanner find vulnerabilities.
COPY --from=dangerzone-image /etc/ /etc/
COPY --from=dangerzone-image /var/ /var/
RUN chmod g-s \
/etc/ \
/var/ \
/root/ \
/run/ \
/home/dangerzone/dangerzone-image/rootfs/etc/ \
/home/dangerzone/dangerzone-image/rootfs/opt/ \
/home/dangerzone/dangerzone-image/rootfs/usr/
### Final image
#FROM scratch
## Copy the filesystem hierarchy that we created in the previous stage, so that
## /usr can be a symlink.
#COPY --from=intermediate / /
# Switch to the dangerzone user for the rest of the script. # Switch to the dangerzone user for the rest of the script.
USER dangerzone USER dangerzone

572
dev_scripts/registry.py Normal file
View file

@ -0,0 +1,572 @@
#!/usr/bin/python
import hashlib
import json
import platform
import re
import shutil
import subprocess
from base64 import b64decode
from pathlib import Path
from tempfile import NamedTemporaryFile
import click
import requests
try:
import platformdirs
except ImportError:
import appdirs as platformdirs
def get_config_dir() -> str:
return Path(platformdirs.user_config_dir("dangerzone"))
SIGNATURES_PATH = get_config_dir() / "signatures"
DEFAULT_REPO = "freedomofpress/dangerzone"
DEFAULT_BRANCH = "main"
SIGSTORE_BUNDLE = "application/vnd.dev.sigstore.bundle.v0.3+json"
DOCKER_MANIFEST_DISTRIBUTION = "application/vnd.docker.distribution.manifest.v2+json"
DOCKER_MANIFEST_INDEX = "application/vnd.oci.image.index.v1+json"
OCI_IMAGE_MANIFEST = "application/vnd.oci.image.manifest.v1+json"
ACCEPT_MANIFESTS_HEADER = "application/vnd.docker.distribution.manifest.v1+json,application/vnd.docker.distribution.manifest.v1+prettyjws,application/vnd.docker.distribution.manifest.v2+json,application/vnd.oci.image.manifest.v1+json,application/vnd.docker.distribution.manifest.list.v2+json,application/vnd.oci.image.index.v1+json"
# NOTE: You can grab the SLSA attestation for an image/tag pair with the following
# commands:
#
# IMAGE=ghcr.io/apyrgio/dangerzone/dangerzone
# TAG=20250129-0.8.0-149-gbf2f5ac
# DIGEST=$(crane digest ${IMAGE?}:${TAG?})
# ATT_MANIFEST=${IMAGE?}:${DIGEST/:/-}.att
# ATT_BLOB=${IMAGE?}@$(crane manifest ${ATT_MANIFEST?} | jq -r '.layers[0].digest')
# crane blob ${ATT_BLOB?} | jq -r '.payload' | base64 -d | jq
CUE_POLICY = r"""
// The predicateType field must match this string
predicateType: "https://slsa.dev/provenance/v0.2"
predicate: {{
// This condition verifies that the builder is the builder we
// expect and trust. The following condition can be used
// unmodified. It verifies that the builder is the container
// workflow.
builder: {{
id: =~"^https://github.com/slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@refs/tags/v[0-9]+.[0-9]+.[0-9]+$"
}}
invocation: {{
configSource: {{
// This condition verifies the entrypoint of the workflow.
// Replace with the relative path to your workflow in your
// repository.
entryPoint: "{workflow}"
// This condition verifies that the image was generated from
// the source repository we expect. Replace this with your
// repository.
uri: =~"^git\\+{repo}@refs/heads/{branch}"
// Add a condition to check for a specific commit hash
digest: {{
sha1: "{commit}"
}}
}}
}}
}}
"""
class RegistryClient:
def __init__(self, registry, org, image):
self._registry = registry
self._org = org
self._image = image
self._auth_token = None
self._base_url = f"https://{registry}"
self._image_url = f"{self._base_url}/v2/{self._org}/{self._image}"
@property
def image(self):
return f"{self._registry}/{self._org}/{self._image}"
def get_auth_token(self):
if not self._auth_token:
auth_url = f"{self._base_url}/token"
response = requests.get(
auth_url,
params={
"service": f"{self._registry}",
"scope": f"repository:{self._org}/{self._image}:pull",
},
)
response.raise_for_status()
self._auth_token = response.json()["token"]
return self._auth_token
def get_auth_header(self):
return {"Authorization": f"Bearer {self.get_auth_token()}"}
def list_tags(self):
url = f"{self._image_url}/tags/list"
response = requests.get(url, headers=self.get_auth_header())
response.raise_for_status()
tags = response.json().get("tags", [])
return tags
def get_manifest(self, tag, extra_headers=None):
"""Get manifest information for a specific tag"""
manifest_url = f"{self._image_url}/manifests/{tag}"
headers = {
"Accept": ACCEPT_MANIFESTS_HEADER,
"Authorization": f"Bearer {self.get_auth_token()}",
}
# if extra_headers:
# headers.update(extra_headers)
response = requests.get(manifest_url, headers=headers)
response.raise_for_status()
return response
def list_manifests(self, tag):
return self.get_manifest(tag).json().get("manifests")
def get_blob(self, hash):
url = f"{self._image_url}/blobs/{hash}"
response = requests.get(
url,
headers={
"Authorization": f"Bearer {self.get_auth_token()}",
},
)
response.raise_for_status()
return response
def get_manifest_hash(self, tag, tag_manifest_content=None):
if not tag_manifest_content:
tag_manifest_content = self.get_manifest(tag).content
return hashlib.sha256(tag_manifest_content).hexdigest()
def get_attestation(self, tag):
"""
Retrieve an attestation from a given tag.
The attestation needs to be attached using the Cosign Bundle
Specification defined at:
https://github.com/sigstore/cosign/blob/main/specs/BUNDLE_SPEC.md
"""
def _find_sigstore_bundle_manifest(manifests):
for manifest in manifests:
if manifest["artifactType"] == SIGSTORE_BUNDLE:
return manifest["mediaType"], manifest["digest"]
def _get_bundle_blob_digest(layers):
for layer in layers:
if layer.get("mediaType") == SIGSTORE_BUNDLE:
return layer["digest"]
tag_manifest_content = self.get_manifest(tag).content
# The attestation is available on the same container registry, with a
# specific tag named "sha256-{sha256(manifest)}"
tag_manifest_hash = self.get_manifest_hash(tag, tag_manifest_content)
# This will get us a "list" of manifests...
manifests = self.list_manifests(f"sha256-{tag_manifest_hash}.att")
# ... from which we want the sigstore bundle
bundle_manifest_mediatype, bundle_manifest_digest = (
_find_sigstore_bundle_manifest(manifests)
)
if not bundle_manifest_digest:
raise Error("Not able to find sigstore bundle manifest info")
bundle_manifest = self.get_manifest(
bundle_manifest_digest,
# extra_headers={"Accept": bundle_manifest_mediatype}
).json()
# From there, we will get the attestation in a blob.
# It will be the first layer listed at this manifest hash location
layers = bundle_manifest.get("layers", [])
blob_digest = _get_bundle_blob_digest(layers)
bundle = self.get_blob(blob_digest)
return tag_manifest_content, bundle.content
def _write(file, content):
file.write(content)
file.flush()
def verify_attestation(image: str, policy: str):
"""
Look up the image attestation to see if the image has been built
on Github runners, and from a given repository.
"""
# Put the value in files and verify with cosign
with NamedTemporaryFile(mode="w", suffix=".cue") as policy_f:
_write(policy_f, policy)
# Call cosign with the temporary file paths
cmd = [
"cosign",
"verify-attestation",
"--type",
"slsaprovenance",
"--policy",
policy_f.name,
"--certificate-oidc-issuer",
"https://token.actions.githubusercontent.com",
"--certificate-identity-regexp",
"^https://github.com/slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@refs/tags/v[0-9]+.[0-9]+.[0-9]+$",
image,
]
result = subprocess.run(cmd, capture_output=True)
if result.returncode != 0:
raise Exception(f"Attestation cannot be verified. {result.stderr}")
return True
def new_image_release():
# XXX - Implement
return True
def signature_to_bundle(sig):
# Convert cosign-download signatures to the format expected by cosign bundle.
bundle = sig["Bundle"]
payload = bundle["Payload"]
return {
"base64Signature": sig["Base64Signature"],
"Payload": sig["Payload"],
"cert": sig["Cert"],
"chain": sig["Chain"],
"rekorBundle": {
"SignedEntryTimestamp": bundle["SignedEntryTimestamp"],
"Payload": {
"body": payload["body"],
"integratedTime": payload["integratedTime"],
"logIndex": payload["logIndex"],
"logID": payload["logID"],
},
},
"RFC3161Timestamp": sig["RFC3161Timestamp"],
}
def verify_signature(signature, pubkey):
"""Verify a signature against a given public key"""
signature_bundle = signature_to_bundle(signature)
# Put the value in files and verify with cosign
with (
NamedTemporaryFile(mode="w") as signature_file,
NamedTemporaryFile(mode="bw") as payload_file,
):
json.dump(signature_bundle, signature_file)
signature_file.flush()
payload_bytes = b64decode(signature_bundle["Payload"])
_write(payload_file, payload_bytes)
cmd = [
"cosign",
"verify-blob",
"--key",
pubkey,
"--bundle",
signature_file.name,
payload_file.name,
]
result = subprocess.run(cmd, capture_output=True)
if result.returncode != 0:
# XXX Raise instead?
return False
return result.stderr == b"Verified OK\n"
def get_runtime_name() -> str:
if platform.system() == "Linux":
return "podman"
return "docker"
def container_pull(image):
cmd = [get_runtime_name(), "pull", f"{image}"]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
process.communicate()
def upgrade_container_image(image, tag, pubkey, registry: RegistryClient):
if not new_image_release():
return
hash = registry.get_manifest_hash(tag)
signatures = get_signatures(image, hash)
if len(signatures) < 1:
raise Exception("Unable to retrieve signatures")
print(f"Found {len(signatures)} signature(s) for {image}")
for signature in signatures:
signature_is_valid = verify_signature(signature, pubkey)
if not signature_is_valid:
raise Exception("Unable to verify signature")
print("✅ Signature is valid")
# At this point, the signatures are verified
# We store the signatures just now to avoid storing unverified signatures
store_signatures(signatures, hash, pubkey)
# let's upgrade the image
# XXX Use the hash here to avoid race conditions
container_pull(image)
def get_file_hash(file):
with open(file, "rb") as f:
content = f.read()
return hashlib.sha256(content).hexdigest()
def load_signatures(image_hash, pubkey):
pubkey_signatures = SIGNATURES_PATH / get_file_hash(pubkey)
if not pubkey_signatures.exists():
msg = (
f"Cannot find a '{pubkey_signatures}' folder."
"You might need to download the image signatures first."
)
raise Exception(msg)
with open(pubkey_signatures / f"{image_hash}.json") as f:
return json.load(f)
def store_signatures(signatures, image_hash, pubkey):
"""
Store signatures locally in the SIGNATURE_PATH folder, like this:
~/.config/dangerzone/signatures/
<pubkey-hash>
<image-hash>.json
<image-hash>.json
The format used in the `.json` file is the one of `cosign download
signature`, which differs from the "bundle" one used afterwards.
It can be converted to the one expected by cosign verify --bundle with
the `signature_to_bundle()` function.
"""
def _get_digest(sig):
payload = json.loads(b64decode(sig["Payload"]))
return payload["critical"]["image"]["docker-manifest-digest"]
# All the signatures should share the same hash.
hashes = list(map(_get_digest, signatures))
if len(set(hashes)) != 1:
raise Exception("Signatures do not share the same image hash")
if f"sha256:{image_hash}" != hashes[0]:
raise Exception("Signatures do not match the given image hash")
pubkey_signatures = SIGNATURES_PATH / get_file_hash(pubkey)
pubkey_signatures.mkdir(exist_ok=True)
with open(pubkey_signatures / f"{image_hash}.json", "w") as f:
json.dump(signatures, f)
def verify_local_image_signature(image, pubkey):
"""
Verifies that a local image has a valid signature
"""
image_hash = get_image_hash(image)
signatures = load_signatures(image_hash, pubkey)
if len(signatures) < 1:
raise Exception("No signatures found")
for signature in signatures:
if not verify_signature(signature, pubkey):
msg = f"Unable to verify signature for {image} with pubkey {pubkey}"
raise Exception(msg)
return True
def generate_cue_policy(repo, workflow, commit, branch):
return CUE_POLICY.format(repo=repo, workflow=workflow, commit=commit, branch=branch)
def get_image_hash(image):
"""
Returns a image hash from a local image name
"""
cmd = [get_runtime_name(), "image", "inspect", image, "-f", "{{.Digest}}"]
result = subprocess.run(cmd, capture_output=True, check=True)
return result.stdout.strip().decode().strip("sha256:")
def get_signatures(image, hash):
"""
Retrieve the signatures from cosign download signature and convert each one to the "cosign bundle" format.
"""
process = subprocess.run(
["cosign", "download", "signature", f"{image}@sha256:{hash}"],
capture_output=True,
check=True,
)
# XXX: Check the output first.
# Remove the last return, split on newlines, convert from JSON
signatures_raw = process.stdout.decode("utf-8").strip().split("\n")
return list(map(json.loads, signatures_raw))
class Image:
def __init__(self, registry, namespace, repository, tag="latest"):
self.registry = registry
self.namespace = namespace
self.repository = repository
self.tag = tag
@property
def properties(self):
return (self.registry, self.namespace, self.repository, self.tag)
@property
def name_without_tag(self):
return f"{self.registry}/{self.namespace}/{self.repository}"
@property
def name_with_tag(self):
return f"{self.name_without_tag}:{self.tag}"
@classmethod
def from_string(cls, input_string):
"""Parses container image location into (registry, namespace, repository, tag)"""
pattern = (
r"^"
r"(?P<registry>[a-zA-Z0-9.-]+)/"
r"(?P<namespace>[a-zA-Z0-9-]+)/"
r"(?P<repository>[^:]+)"
r"(?::(?P<tag>[a-zA-Z0-9.-]+))?"
r"$"
)
match = re.match(pattern, input_string)
if not match:
raise ValueError("Malformed image location")
return cls(
match.group("registry"),
match.group("namespace"),
match.group("repository"),
match.group("tag") or "latest",
)
def parse_image_location(string):
return Image.from_string(string).properties
@click.group()
def main():
pass
@main.command()
@click.argument("image")
@click.option("--pubkey", default="pub.key")
def upgrade_image(image, pubkey):
registry, namespace, repository, tag = parse_image_location(image)
registry_client = RegistryClient(registry, namespace, repository)
upgrade_container_image(image, tag, pubkey, registry_client)
@main.command()
@click.argument("image")
@click.option("--pubkey", default="pub.key")
def verify_local_image(image, pubkey):
# XXX remove a potentiel :tag
if verify_local_image_signature(image, pubkey):
click.echo(f"✅ The local image {image} has been signed with {pubkey}")
@main.command()
@click.argument("image")
def list_tags(image):
registry, org, package, _ = parse_image_location(image)
client = RegistryClient(registry, org, package)
tags = client.list_tags()
click.echo(f"Existing tags for {client.image}")
for tag in tags:
click.echo(tag)
@main.command()
@click.argument("image")
@click.argument("tag")
def get_manifest(image, tag):
registry, org, package, _ = parse_image_location(image)
client = RegistryClient(registry, org, package)
resp = client.get_manifest(tag, extra_headers={"Accept": OCI_IMAGE_MANIFEST})
click.echo(resp.content)
@main.command()
@click.argument("image")
@click.option(
"--commit",
required=True,
help="The Git commit the image was built from",
)
@click.option(
"--workflow",
default=".github/workflows/multi_arch_build.yml",
help="The path of the GitHub actions workflow this image was created from",
)
@click.option(
"--repo",
default=DEFAULT_REPO,
help="The github repository to check the attestation for",
)
@click.option(
"--branch",
default=DEFAULT_BRANCH,
help="The Git branch that the image was built from",
)
def attest(image: str, commit: str, workflow: str, repo: str, branch: str):
"""
Look up the image attestation to see if the image has been built
on Github runners, and from a given repository.
"""
if shutil.which("cosign") is None:
click.echo("The cosign binary is needed but not installed.")
raise click.Abort()
registry, org, package, tag = parse_image_location(image)
tag = tag or "latest"
full_repo = f"https://github.com/{repo}"
policy = generate_cue_policy(full_repo, workflow, commit, branch)
verified = verify_attestation(image, policy)
if verified:
click.echo(
f"🎉 Successfully verified image '{image}' and its associated claims:"
)
click.echo(f"- ✅ SLSA Level 3 provenance")
click.echo(f"- ✅ GitHub repo: {repo}")
click.echo(f"- ✅ GitHub actions workflow: {workflow}")
click.echo(f"- ✅ Git branch: {branch}")
click.echo(f"- ✅ Git commit: {commit}")
if __name__ == "__main__":
main()

View file

@ -95,7 +95,6 @@ def diffoci_diff(runtime, source, local_target, platform=None):
"diff", "diff",
source, source,
target, target,
"--semantic",
"--verbose", "--verbose",
*platform_args, *platform_args,
) )
@ -112,13 +111,11 @@ def build_image(
platform=None, platform=None,
runtime=None, runtime=None,
date=None, date=None,
buildx=False
): ):
"""Build the Dangerzone container image with a special tag.""" """Build the Dangerzone container image with a special tag."""
platform_args = [] if not platform else ["--platform", platform] platform_args = [] if not platform else ["--platform", platform]
runtime_args = [] if not runtime else ["--runtime", runtime] runtime_args = [] if not runtime else ["--runtime", runtime]
date_args = [] if not date else ["--debian-archive-date", date] date_args = [] if not date else ["--debian-archive-date", date]
buildx_args = [] if not buildx else ["--buildx"]
run( run(
"python3", "python3",
"./install/common/build-image.py", "./install/common/build-image.py",
@ -128,7 +125,6 @@ def build_image(
*date_args, *date_args,
*platform_args, *platform_args,
*runtime_args, *runtime_args,
*buildx_args,
"--tag", "--tag",
tag, tag,
) )
@ -144,11 +140,6 @@ def parse_args():
prog=sys.argv[0], prog=sys.argv[0],
description="Dev script for verifying container image reproducibility", description="Dev script for verifying container image reproducibility",
) )
parser.add_argument(
"--buildx",
action="store_true",
help="Use the buildx platform of Docker or Podman",
)
parser.add_argument( parser.add_argument(
"--platform", "--platform",
default=None, default=None,
@ -215,7 +206,6 @@ def main():
args.platform, args.platform,
args.runtime, args.runtime,
args.debian_archive_date, args.debian_archive_date,
args.buildx,
) )
logger.info( logger.info(

View file

@ -77,13 +77,6 @@ def main():
default=str(Path("share") / "container.tar"), default=str(Path("share") / "container.tar"),
help="Path to store the container image", help="Path to store the container image",
) )
# parser.add_argument(
# "--compress-level",
# type=int,
# choices=range(0, 10),
# default=9,
# help="The Gzip compression level, from 0 (lowest) to 9 (highest, default)",
# )
parser.add_argument( parser.add_argument(
"--use-cache", "--use-cache",
type=str2bool, type=str2bool,
@ -130,7 +123,7 @@ def main():
subprocess.run( subprocess.run(
[ [
"./dev_scripts/repro-build", "./dev_scripts/repro-build", # FIXME: Add issue for un-vendoring this file.
"build", "build",
"--runtime", "--runtime",
args.runtime, args.runtime,