mirror of
https://github.com/freedomofpress/dangerzone.git
synced 2025-04-28 09:52:37 +02:00
Compare commits
313 commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
83be5fb151 | ||
![]() |
04096380ff | ||
![]() |
21ca927b8b | ||
![]() |
05040de212 | ||
![]() |
4014c8591b | ||
![]() |
6cd706af10 | ||
![]() |
634b171b97 | ||
![]() |
c99c424f87 | ||
![]() |
19fa11410b | ||
![]() |
10be85b9f2 | ||
![]() |
47d732e603 | ||
![]() |
d6451290db | ||
![]() |
f0bb65cb4e | ||
![]() |
0c741359cc | ||
![]() |
8c61894e25 | ||
![]() |
57667a96be | ||
![]() |
1a644e2506 | ||
![]() |
843e68cdf7 | ||
![]() |
33b2a183ce | ||
![]() |
c7121b69a3 | ||
![]() |
0b3bf89d5b | ||
![]() |
e0b10c5e40 | ||
![]() |
092eec55d1 | ||
![]() |
14a480c3a3 | ||
![]() |
9df825db5c | ||
![]() |
2ee22a497a | ||
![]() |
b5c09e51d8 | ||
![]() |
37c7608c0f | ||
![]() |
972b264236 | ||
![]() |
e38d8e5db0 | ||
![]() |
f92833cdff | ||
![]() |
07aad5edba | ||
![]() |
e8ca12eb11 | ||
![]() |
491cca6341 | ||
![]() |
0a7b79f61a | ||
![]() |
86eab5d222 | ||
![]() |
ed39c056bb | ||
![]() |
983622fe59 | ||
![]() |
8e99764952 | ||
![]() |
20cd9cfc5c | ||
![]() |
f082641b71 | ||
![]() |
c0215062bc | ||
![]() |
b551a4dec4 | ||
![]() |
5a56a7f055 | ||
![]() |
ab6dd9c01d | ||
![]() |
dfcb74b427 | ||
![]() |
a910ccc273 | ||
![]() |
d868699bab | ||
![]() |
d6adfbc6c1 | ||
![]() |
687bd8585f | ||
![]() |
b212bfc47e | ||
![]() |
bbc90be217 | ||
![]() |
2d321bf257 | ||
![]() |
8bfeae4eed | ||
![]() |
3ed71e8ee0 | ||
![]() |
fa8e8c6dbb | ||
![]() |
8d05b5779d | ||
![]() |
e1dbdff1da | ||
![]() |
a1402d5b6b | ||
![]() |
51f432be6b | ||
![]() |
69234507c4 | ||
![]() |
94fad78f94 | ||
![]() |
66600f32dc | ||
![]() |
d41f604969 | ||
![]() |
6d269572ae | ||
![]() |
c7ba9ee75c | ||
![]() |
418b68d4ca | ||
![]() |
9ba95b5c20 | ||
![]() |
b043c97c41 | ||
![]() |
4a48a2551b | ||
![]() |
56663023f5 | ||
![]() |
53a952235c | ||
![]() |
d2652ef6cd | ||
![]() |
a6aa66f925 | ||
![]() |
856de3fd46 | ||
![]() |
88a6b37770 | ||
![]() |
fb90243668 | ||
![]() |
9724a16d81 | ||
![]() |
cf43a7a0c4 | ||
![]() |
cae4187550 | ||
![]() |
cfa4478ace | ||
![]() |
2557be9bc0 | ||
![]() |
235d71354a | ||
![]() |
5d49f5abdb | ||
![]() |
0ce7773ca1 | ||
![]() |
fa27f4b063 | ||
![]() |
8e8a515b64 | ||
![]() |
270cae1bc0 | ||
![]() |
14bb6c0e39 | ||
![]() |
033ce0986d | ||
![]() |
935396565c | ||
![]() |
e29837cb43 | ||
![]() |
8568b4bb9d | ||
![]() |
be1fa7a395 | ||
![]() |
b2f4e2d523 | ||
![]() |
7409966253 | ||
![]() |
40fb6579f6 | ||
![]() |
6ae91b024e | ||
![]() |
c2841dcc08 | ||
![]() |
df5ccb3f75 | ||
![]() |
9c6c2e1051 | ||
![]() |
23f3ad1f46 | ||
![]() |
970a82f432 | ||
![]() |
3d5cacfffb | ||
![]() |
c407e2ff84 | ||
![]() |
7f418118e6 | ||
![]() |
02602b072a | ||
![]() |
acf20ef700 | ||
![]() |
3499010d8e | ||
![]() |
2423fc18c5 | ||
![]() |
1298e9c398 | ||
![]() |
00e58a8707 | ||
![]() |
77975a8e50 | ||
![]() |
5b9e9c82fc | ||
![]() |
f4fa1f87eb | ||
![]() |
eb345562da | ||
![]() |
d080d03f5a | ||
![]() |
767bfa7e48 | ||
![]() |
37ec91aae2 | ||
![]() |
cecfe63338 | ||
![]() |
4da6b92e12 | ||
![]() |
b06d1aebed | ||
![]() |
da5490a5a1 | ||
![]() |
e96b44e10a | ||
![]() |
7624624471 | ||
![]() |
fb7c2088e2 | ||
![]() |
1ea2f109cb | ||
![]() |
df3063a825 | ||
![]() |
57bb7286ef | ||
![]() |
fbe05065c9 | ||
![]() |
54ffc63c4f | ||
![]() |
bdc4cf13c4 | ||
![]() |
92d7bd6bee | ||
![]() |
7c5a191a5c | ||
![]() |
4bd794dbd1 | ||
![]() |
3eac00b873 | ||
![]() |
ec9f8835e0 | ||
![]() |
0383081394 | ||
![]() |
25fba42022 | ||
![]() |
e54567b7d4 | ||
![]() |
2a8355fb88 | ||
![]() |
e22c795cb7 | ||
![]() |
909560353d | ||
![]() |
6a5e76f2b4 | ||
![]() |
20152fac13 | ||
![]() |
6b51d56e9f | ||
![]() |
309bd12423 | ||
![]() |
1c0a99fcd2 | ||
![]() |
4b5f4b27d7 | ||
![]() |
f537d54ed2 | ||
![]() |
32641603ee | ||
![]() |
a915ae8442 | ||
![]() |
38a803085f | ||
![]() |
2053c98c09 | ||
![]() |
3db1ca1fbb | ||
![]() |
3fff16cc7e | ||
![]() |
8bd9c05832 | ||
![]() |
41e78c907f | ||
![]() |
265c1dde97 | ||
![]() |
ccb302462d | ||
![]() |
4eadc30605 | ||
![]() |
abb71e0fe5 | ||
![]() |
4638444290 | ||
![]() |
68da50a6b2 | ||
![]() |
cc5ba29455 | ||
![]() |
180b9442ab | ||
![]() |
f349e16523 | ||
![]() |
adddb1ecb7 | ||
![]() |
8e57d81a74 | ||
![]() |
3bcf5fc147 | ||
![]() |
60df4f7e35 | ||
![]() |
9fa3c80404 | ||
![]() |
4bf7f9cbb4 | ||
![]() |
fdc27c4d3b | ||
![]() |
23f5f96220 | ||
![]() |
5744215d99 | ||
![]() |
c89988654c | ||
![]() |
7eaa0cfe50 | ||
![]() |
9d69e3b261 | ||
![]() |
1d2a91e8c5 | ||
![]() |
82c29b2098 | ||
![]() |
ce5aca4ba1 | ||
![]() |
13f38cc8a9 | ||
![]() |
57df6fdfe5 | ||
![]() |
20354e7c11 | ||
![]() |
d722800a4b | ||
![]() |
4cfc633cdb | ||
![]() |
944d58dd8d | ||
![]() |
f3806b96af | ||
![]() |
c4bb7c28c8 | ||
![]() |
630083bdea | ||
![]() |
504a9e1df2 | ||
![]() |
a54a8f2057 | ||
![]() |
35abd14f5f | ||
![]() |
1bd18a175b | ||
![]() |
96aa56a6dc | ||
![]() |
91932046f5 | ||
![]() |
c8411de433 | ||
![]() |
95150bcfc1 | ||
![]() |
bae109717c | ||
![]() |
00480551ca | ||
![]() |
32deea10c4 | ||
![]() |
f540a67d06 | ||
![]() |
68f8338d20 | ||
![]() |
d561878e03 | ||
![]() |
59e1666c28 | ||
![]() |
95d7d8a4d9 | ||
![]() |
ed2791bbbc | ||
![]() |
c1cf16a705 | ||
![]() |
281432fcaa | ||
![]() |
71cc4b37e5 | ||
![]() |
5ed4a048a0 | ||
![]() |
50627d375c | ||
![]() |
8172195f95 | ||
![]() |
f5242078a9 | ||
![]() |
e68a43bbbf | ||
![]() |
10fb631b8e | ||
![]() |
796ca79289 | ||
![]() |
a95b612e78 | ||
![]() |
03b3c9eba8 | ||
![]() |
0ea8e71f15 | ||
![]() |
4398986970 | ||
![]() |
1ca867c295 | ||
![]() |
6e55e43fef | ||
![]() |
703bb0e42a | ||
![]() |
7ea7c8a0cc | ||
![]() |
f42bb23229 | ||
![]() |
e34c36f7bc | ||
![]() |
08f5ef6558 | ||
![]() |
57475b369f | ||
![]() |
28b7249a6a | ||
![]() |
d1e119452e | ||
![]() |
477bdfcc2e | ||
![]() |
ffcf664a48 | ||
![]() |
cd8812a85a | ||
![]() |
5bba249c87 | ||
![]() |
bc58b78db7 | ||
![]() |
fba009a7f0 | ||
![]() |
dd3ab71065 | ||
![]() |
4abd4720be | ||
![]() |
b79113c1c5 | ||
![]() |
941131f7a9 | ||
![]() |
b6bb9a1216 | ||
![]() |
eaef95b774 | ||
![]() |
13f5658947 | ||
![]() |
d832881452 | ||
![]() |
f3fbc33fcd | ||
![]() |
5a97182979 | ||
![]() |
49c3c2c6bb | ||
![]() |
8ad95981ea | ||
![]() |
8f5ae9d6ad | ||
![]() |
1eff14539f | ||
![]() |
91fbc466c5 | ||
![]() |
266d6c70a7 | ||
![]() |
44a6cc0017 | ||
![]() |
8f71df56d9 | ||
![]() |
eebf10ca3d | ||
![]() |
fed5e35e97 | ||
![]() |
fd5aafdde9 | ||
![]() |
ee991cab6b | ||
![]() |
5d98f802ea | ||
![]() |
93b960cd23 | ||
![]() |
752eff02d8 | ||
![]() |
275189587e | ||
![]() |
b5130b08b6 | ||
![]() |
dc8a22c8e7 | ||
![]() |
d6410652cb | ||
![]() |
b9a3dd63ad | ||
![]() |
8d856ff4c3 | ||
![]() |
95660c3ec7 | ||
![]() |
58b4659ffd | ||
![]() |
a001b5497c | ||
![]() |
eb2d114ea7 | ||
![]() |
a32522f6c8 | ||
![]() |
025e5dda51 | ||
![]() |
3e434d08d1 | ||
![]() |
eb10082a62 | ||
![]() |
eee405e29e | ||
![]() |
791444cd5d | ||
![]() |
830e551567 | ||
![]() |
1e30767278 | ||
![]() |
c3c7fbbc20 | ||
![]() |
9b9e265b11 | ||
![]() |
d7f80965b1 | ||
![]() |
b375a7e96e | ||
![]() |
396c3b56c8 | ||
![]() |
3002849b7f | ||
![]() |
d90f81e772 | ||
![]() |
2e3ec0cece | ||
![]() |
73b0f8b7d4 | ||
![]() |
2237f76219 | ||
![]() |
0c9f426b68 | ||
![]() |
df3b26583e | ||
![]() |
e4af44c220 | ||
![]() |
2bd09e994f | ||
![]() |
c8642cc59d | ||
![]() |
f739761405 | ||
![]() |
168f0e53a8 | ||
![]() |
cfb5e75be9 | ||
![]() |
3f86e7b465 | ||
![]() |
08f03b4bb4 | ||
![]() |
141c1e8a23 | ||
![]() |
c1dbe9c3e3 | ||
![]() |
e1e63d14f8 | ||
![]() |
069359ef15 | ||
![]() |
df3f8f7cb5 | ||
![]() |
e87547d3a6 | ||
![]() |
2da0e993a2 | ||
![]() |
2300cdef20 | ||
![]() |
162ded6a75 | ||
![]() |
210c30eb87 | ||
![]() |
add95a0d53 | ||
![]() |
b6f399be6e |
134 changed files with 7953 additions and 3652 deletions
|
@ -1,622 +0,0 @@
|
|||
version: 2.1
|
||||
|
||||
aliases:
|
||||
- &install-podman
|
||||
name: Install Podman in Ubuntu Focal
|
||||
command: ./install/linux/install-podman-ubuntu-focal.sh
|
||||
|
||||
# FIXME: Remove the following step once we drop Ubuntu Focal support. The
|
||||
# python-all dependency is an artificial requirement due to an stdeb bug
|
||||
# prior to v0.9.1. See:
|
||||
#
|
||||
# * https://github.com/astraw/stdeb/issues/153
|
||||
# * https://github.com/freedomofpress/dangerzone/issues/292#issuecomment-1349967888
|
||||
- &install-python-all
|
||||
name: Install python-all package
|
||||
command: |
|
||||
export DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true
|
||||
apt-get update
|
||||
apt-get install -y python-all
|
||||
|
||||
- &install-dependencies-deb
|
||||
name: Install dependencies (deb)
|
||||
command: |
|
||||
export DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true
|
||||
apt-get update
|
||||
apt-get install -y dh-python python3 python3-stdeb
|
||||
|
||||
- &install-dependencies-rpm
|
||||
name: Install dependencies (rpm)
|
||||
command: |
|
||||
dnf install -y rpm-build python3 python3-devel python3-poetry-core pipx
|
||||
pipx install poetry
|
||||
|
||||
- &build-deb
|
||||
name: Build the .deb package
|
||||
command: |
|
||||
./install/linux/build-deb.py
|
||||
ls -lh deb_dist/
|
||||
|
||||
- &build-rpm
|
||||
name: Build the .rpm package
|
||||
command: |
|
||||
PATH=/root/.local/bin:$PATH ./install/linux/build-rpm.py
|
||||
ls -lh dist/
|
||||
|
||||
- &build-rpm-qubes
|
||||
name: Build the Qubes .rpm package
|
||||
command: |
|
||||
PATH=/root/.local/bin:$PATH ./install/linux/build-rpm.py --qubes
|
||||
ls -lh dist/
|
||||
|
||||
- &calculate-cache-key
|
||||
name: Caculating container cache key
|
||||
command: |
|
||||
mkdir -p /caches/
|
||||
cd dangerzone/conversion/
|
||||
cat common.py doc_to_pixels.py pixels_to_pdf.py | sha1sum | cut -d' ' -f1 > /caches/cache-id.txt
|
||||
cd ../../
|
||||
|
||||
- &restore-cache
|
||||
key: v1-{{ checksum "Dockerfile" }}-{{ checksum "/caches/cache-id.txt" }}
|
||||
paths:
|
||||
- /caches/container.tar.gz
|
||||
- /caches/image-id.txt
|
||||
|
||||
- ©-image
|
||||
name: Copy container image into package
|
||||
command: |
|
||||
cp /caches/container.tar.gz share/
|
||||
cp /caches/image-id.txt share/
|
||||
|
||||
jobs:
|
||||
run-lint:
|
||||
docker:
|
||||
- image: debian:bookworm
|
||||
resource_class: small
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Install dev. dependencies
|
||||
# Install only the necessary packages to run our linters.
|
||||
#
|
||||
# We run poetry with --no-ansi, to sidestep a Poetry bug that
|
||||
# currently exists in 1.3. See:
|
||||
# https://github.com/freedomofpress/dangerzone/issues/292#issuecomment-1351368122
|
||||
command: |
|
||||
apt-get update
|
||||
apt-get install -y git make python3 python3-poetry --no-install-recommends
|
||||
poetry install --no-ansi --only lint,test
|
||||
- run:
|
||||
name: Run linters to enforce code style
|
||||
command: poetry run make lint
|
||||
- run:
|
||||
name: Check that the QA script is up to date with the docs
|
||||
command: ./dev_scripts/qa.py --check-refs
|
||||
|
||||
build-container-image:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-01
|
||||
steps:
|
||||
- checkout
|
||||
- run: *install-podman
|
||||
- run:
|
||||
name: Prepare cache directory
|
||||
command: |
|
||||
sudo mkdir -p /caches
|
||||
sudo chown -R $USER:$USER /caches
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
# setup_remote_docker
|
||||
- run:
|
||||
name: Build Dangerzone image
|
||||
command: |
|
||||
if [ -f "/caches/container.tar.gz" ]; then
|
||||
echo "Already cached, skipping"
|
||||
else
|
||||
sudo pip3 install poetry
|
||||
python3 ./install/common/build-image.py
|
||||
fi
|
||||
- run:
|
||||
name: Save Dangerzone image and image-id.txt to cache
|
||||
command: |
|
||||
if [ -f "/caches/container.tar.gz" ]; then
|
||||
echo "Already cached, skipping"
|
||||
else
|
||||
mkdir -p /caches
|
||||
podman save -o /caches/container.tar dangerzone.rocks/dangerzone
|
||||
gzip -f /caches/container.tar
|
||||
podman image ls dangerzone.rocks/dangerzone | grep "dangerzone.rocks/dangerzone" | tr -s ' ' | cut -d' ' -f3 > /caches/image-id.txt
|
||||
fi
|
||||
- run: *calculate-cache-key
|
||||
- save_cache:
|
||||
key: v1-{{ checksum "Dockerfile" }}-{{ checksum "/caches/cache-id.txt" }}
|
||||
paths:
|
||||
- /caches/container.tar.gz
|
||||
- /caches/image-id.txt
|
||||
|
||||
convert-test-docs:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-01
|
||||
steps:
|
||||
- checkout
|
||||
- run: *install-podman
|
||||
- run:
|
||||
name: Install poetry dependencies
|
||||
command: |
|
||||
sudo pip3 install poetry
|
||||
# This flag is important, due to an open upstream Poetry issue:
|
||||
# https://github.com/python-poetry/poetry/issues/7184
|
||||
poetry install --no-ansi
|
||||
- run:
|
||||
name: Install test dependencies
|
||||
command: |
|
||||
sudo apt-get install -y libqt5gui5 libxcb-cursor0 --no-install-recommends
|
||||
- run:
|
||||
name: Prepare cache directory
|
||||
command: |
|
||||
sudo mkdir -p /caches
|
||||
sudo chown -R $USER:$USER /caches
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
- run:
|
||||
name: run automated tests
|
||||
command: |
|
||||
poetry run make test
|
||||
|
||||
ci-ubuntu-noble:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-01
|
||||
steps:
|
||||
- checkout
|
||||
- run: *install-podman
|
||||
|
||||
- run:
|
||||
name: Prepare cache directory
|
||||
command: |
|
||||
sudo mkdir -p /caches
|
||||
sudo chown -R $USER:$USER /caches
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
|
||||
- run:
|
||||
name: Prepare Dangerzone environment
|
||||
command: |
|
||||
./dev_scripts/env.py --distro ubuntu --version 24.04 build-dev
|
||||
|
||||
- run:
|
||||
name: Run CI tests
|
||||
command: |
|
||||
./dev_scripts/env.py --distro ubuntu --version 24.04 run --dev \
|
||||
bash -c 'cd dangerzone; poetry run make test'
|
||||
|
||||
ci-ubuntu-mantic:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-01
|
||||
steps:
|
||||
- checkout
|
||||
- run: *install-podman
|
||||
|
||||
- run:
|
||||
name: Prepare cache directory
|
||||
command: |
|
||||
sudo mkdir -p /caches
|
||||
sudo chown -R $USER:$USER /caches
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
|
||||
- run:
|
||||
name: Prepare Dangerzone environment
|
||||
command: |
|
||||
./dev_scripts/env.py --distro ubuntu --version 23.10 build-dev
|
||||
|
||||
- run:
|
||||
name: Run CI tests
|
||||
command: |
|
||||
./dev_scripts/env.py --distro ubuntu --version 23.10 run --dev \
|
||||
bash -c 'cd dangerzone; poetry run make test'
|
||||
|
||||
ci-ubuntu-jammy:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-01
|
||||
steps:
|
||||
- checkout
|
||||
- run: *install-podman
|
||||
|
||||
- run:
|
||||
name: Prepare cache directory
|
||||
command: |
|
||||
sudo mkdir -p /caches
|
||||
sudo chown -R $USER:$USER /caches
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
|
||||
- run:
|
||||
name: Prepare Dangerzone environment
|
||||
command: |
|
||||
./dev_scripts/env.py --distro ubuntu --version 22.04 build-dev
|
||||
|
||||
- run:
|
||||
name: Run CI tests
|
||||
command: |
|
||||
./dev_scripts/env.py --distro ubuntu --version 22.04 run --dev \
|
||||
bash -c 'cd dangerzone; poetry run make test'
|
||||
|
||||
ci-ubuntu-focal:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-01
|
||||
steps:
|
||||
- checkout
|
||||
- run: *install-podman
|
||||
|
||||
- run:
|
||||
name: Prepare cache directory
|
||||
command: |
|
||||
sudo mkdir -p /caches
|
||||
sudo chown -R $USER:$USER /caches
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
|
||||
- run:
|
||||
name: Prepare Dangerzone environment
|
||||
command: |
|
||||
./dev_scripts/env.py --distro ubuntu --version 20.04 build-dev
|
||||
|
||||
- run:
|
||||
name: Run CI tests
|
||||
command: |
|
||||
./dev_scripts/env.py --distro ubuntu --version 20.04 run --dev \
|
||||
bash -c 'cd dangerzone; poetry run make test'
|
||||
|
||||
ci-fedora-40:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-01
|
||||
steps:
|
||||
- checkout
|
||||
- run: *install-podman
|
||||
|
||||
- run:
|
||||
name: Prepare cache directory
|
||||
command: |
|
||||
sudo mkdir -p /caches
|
||||
sudo chown -R $USER:$USER /caches
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
|
||||
- run:
|
||||
name: Prepare Dangerzone environment
|
||||
command: |
|
||||
./dev_scripts/env.py --distro fedora --version 40 build-dev
|
||||
|
||||
- run:
|
||||
name: Run CI tests
|
||||
command: |
|
||||
./dev_scripts/env.py --distro fedora --version 40 run --dev \
|
||||
bash -c 'cd dangerzone; poetry run make test'
|
||||
|
||||
ci-fedora-39:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-01
|
||||
steps:
|
||||
- checkout
|
||||
- run: *install-podman
|
||||
|
||||
- run:
|
||||
name: Prepare cache directory
|
||||
command: |
|
||||
sudo mkdir -p /caches
|
||||
sudo chown -R $USER:$USER /caches
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
|
||||
- run:
|
||||
name: Prepare Dangerzone environment
|
||||
command: |
|
||||
./dev_scripts/env.py --distro fedora --version 39 build-dev
|
||||
|
||||
- run:
|
||||
name: Run CI tests
|
||||
command: |
|
||||
./dev_scripts/env.py --distro fedora --version 39 run --dev \
|
||||
bash -c 'cd dangerzone; poetry run make test'
|
||||
|
||||
ci-debian-trixie:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-01
|
||||
steps:
|
||||
- checkout
|
||||
- run: *install-podman
|
||||
|
||||
- run:
|
||||
name: Prepare cache directory
|
||||
command: |
|
||||
sudo mkdir -p /caches
|
||||
sudo chown -R $USER:$USER /caches
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
|
||||
- run:
|
||||
name: Prepare Dangerzone environment
|
||||
command: |
|
||||
./dev_scripts/env.py --distro debian --version trixie build-dev
|
||||
|
||||
- run:
|
||||
name: Run CI tests
|
||||
command: |
|
||||
./dev_scripts/env.py --distro debian --version trixie run --dev \
|
||||
bash -c 'cd dangerzone; poetry run make test'
|
||||
|
||||
ci-debian-bookworm:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-01
|
||||
steps:
|
||||
- checkout
|
||||
- run: *install-podman
|
||||
|
||||
- run:
|
||||
name: Prepare cache directory
|
||||
command: |
|
||||
sudo mkdir -p /caches
|
||||
sudo chown -R $USER:$USER /caches
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
|
||||
- run:
|
||||
name: Prepare Dangerzone environment
|
||||
command: |
|
||||
./dev_scripts/env.py --distro debian --version bookworm build-dev
|
||||
|
||||
- run:
|
||||
name: Run CI tests
|
||||
command: |
|
||||
./dev_scripts/env.py --distro debian --version bookworm run --dev \
|
||||
bash -c 'cd dangerzone; poetry run make test'
|
||||
|
||||
# NOTE: Making CI tests work in Debian Bullseye requires some tip-toeing
|
||||
# around certain Podman issues, as you'll see below. Read the following for
|
||||
# more details:
|
||||
#
|
||||
# https://github.com/freedomofpress/dangerzone/issues/388
|
||||
ci-debian-bullseye:
|
||||
machine:
|
||||
image: ubuntu-2204:2023.04.2
|
||||
steps:
|
||||
- checkout
|
||||
- run: *install-podman
|
||||
- run:
|
||||
name: Configure Podman for Ubuntu 22.04
|
||||
command: |
|
||||
# This config circumvents the following issues:
|
||||
# * https://github.com/containers/podman/issues/6368
|
||||
# * https://github.com/containers/podman/issues/10987
|
||||
mkdir -p ~/.config/containers
|
||||
cat > ~/.config/containers/containers.conf \<<EOF
|
||||
[engine]
|
||||
cgroup_manager="cgroupfs"
|
||||
events_logger="file"
|
||||
EOF
|
||||
|
||||
- run:
|
||||
name: Prepare cache directory
|
||||
command: |
|
||||
sudo mkdir -p /caches
|
||||
sudo chown -R $USER:$USER /caches
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
|
||||
- run:
|
||||
name: Prepare Dangerzone environment
|
||||
command: |
|
||||
./dev_scripts/env.py --distro debian --version bullseye build-dev
|
||||
|
||||
- run:
|
||||
name: Configure Podman for Debian Bullseye
|
||||
command: |
|
||||
# Copy the Podman config into the container image we created for the
|
||||
# Dangerzone environment.
|
||||
cp ~/.config/containers/containers.conf containers.conf
|
||||
cat > Dockerfile.bullseye \<<EOF
|
||||
FROM dangerzone.rocks/build/debian:bullseye-backports
|
||||
RUN mkdir -p /home/user/.config/containers
|
||||
COPY containers.conf /home/user/.config/containers/
|
||||
EOF
|
||||
|
||||
# Create a new image from the Dangerzone environment and re-tag it.
|
||||
podman build -t dangerzone.rocks/build/debian:bullseye-backports \
|
||||
-f Dockerfile.bullseye .
|
||||
|
||||
- run:
|
||||
name: Run CI tests
|
||||
command: |
|
||||
./dev_scripts/env.py --distro debian --version bullseye run --dev \
|
||||
bash -c 'cd dangerzone; poetry run make test'
|
||||
|
||||
build-ubuntu-noble:
|
||||
docker:
|
||||
- image: ubuntu:24.04
|
||||
resource_class: medium+
|
||||
steps:
|
||||
- run: *install-dependencies-deb
|
||||
- checkout
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
- run: *build-deb
|
||||
|
||||
build-ubuntu-mantic:
|
||||
docker:
|
||||
- image: ubuntu:23.10
|
||||
resource_class: medium+
|
||||
steps:
|
||||
- run: *install-dependencies-deb
|
||||
- checkout
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
- run: *build-deb
|
||||
|
||||
build-ubuntu-jammy:
|
||||
docker:
|
||||
- image: ubuntu:22.04
|
||||
resource_class: medium+
|
||||
steps:
|
||||
- run: *install-dependencies-deb
|
||||
- checkout
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
- run: *build-deb
|
||||
|
||||
build-ubuntu-focal:
|
||||
docker:
|
||||
- image: ubuntu:20.04
|
||||
resource_class: medium+
|
||||
steps:
|
||||
- run: *install-dependencies-deb
|
||||
- run: *install-python-all
|
||||
- checkout
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
- run: *build-deb
|
||||
|
||||
build-debian-trixie:
|
||||
docker:
|
||||
- image: debian:trixie
|
||||
resource_class: medium+
|
||||
steps:
|
||||
- run: *install-dependencies-deb
|
||||
- checkout
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
- run: *build-deb
|
||||
|
||||
build-debian-bookworm:
|
||||
docker:
|
||||
- image: debian:bookworm
|
||||
resource_class: medium+
|
||||
steps:
|
||||
- run: *install-dependencies-deb
|
||||
- checkout
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
- run: *build-deb
|
||||
|
||||
build-debian-bullseye:
|
||||
docker:
|
||||
- image: debian:bullseye
|
||||
resource_class: medium+
|
||||
steps:
|
||||
- run: *install-dependencies-deb
|
||||
- checkout
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
- run: *build-deb
|
||||
|
||||
build-fedora-40:
|
||||
docker:
|
||||
- image: fedora:40
|
||||
resource_class: medium+
|
||||
steps:
|
||||
- run: *install-dependencies-rpm
|
||||
- checkout
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
- run: *build-rpm
|
||||
- run: *build-rpm-qubes
|
||||
|
||||
build-fedora-39:
|
||||
docker:
|
||||
- image: fedora:39
|
||||
resource_class: medium+
|
||||
steps:
|
||||
- run: *install-dependencies-rpm
|
||||
- checkout
|
||||
- run: *calculate-cache-key
|
||||
- restore_cache: *restore-cache
|
||||
- run: *copy-image
|
||||
- run: *build-rpm
|
||||
- run: *build-rpm-qubes
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
|
||||
build:
|
||||
jobs:
|
||||
- run-lint
|
||||
- build-container-image
|
||||
- convert-test-docs:
|
||||
requires:
|
||||
- build-container-image
|
||||
- ci-ubuntu-noble:
|
||||
requires:
|
||||
- build-container-image
|
||||
- ci-ubuntu-mantic:
|
||||
requires:
|
||||
- build-container-image
|
||||
- ci-ubuntu-jammy:
|
||||
requires:
|
||||
- build-container-image
|
||||
- ci-ubuntu-focal:
|
||||
requires:
|
||||
- build-container-image
|
||||
- ci-debian-trixie:
|
||||
requires:
|
||||
- build-container-image
|
||||
- ci-debian-bookworm:
|
||||
requires:
|
||||
- build-container-image
|
||||
- ci-debian-bullseye:
|
||||
requires:
|
||||
- build-container-image
|
||||
- ci-fedora-40:
|
||||
requires:
|
||||
- build-container-image
|
||||
- ci-fedora-39:
|
||||
requires:
|
||||
- build-container-image
|
||||
# FIXME: Currently disabled because `stdeb` does not work with Python
|
||||
# 3.12, which is the default in Ubuntu Noble. See also:
|
||||
# https://github.com/freedomofpress/dangerzone/issues/773
|
||||
#
|
||||
#- build-ubuntu-noble:
|
||||
# requires:
|
||||
# - build-container-image
|
||||
- build-ubuntu-mantic:
|
||||
requires:
|
||||
- build-container-image
|
||||
- build-ubuntu-jammy:
|
||||
requires:
|
||||
- build-container-image
|
||||
- build-ubuntu-focal:
|
||||
requires:
|
||||
- build-container-image
|
||||
- build-debian-bullseye:
|
||||
requires:
|
||||
- build-container-image
|
||||
- build-debian-trixie:
|
||||
requires:
|
||||
- build-container-image
|
||||
- build-debian-bookworm:
|
||||
requires:
|
||||
- build-container-image
|
||||
- build-fedora-40:
|
||||
requires:
|
||||
- build-container-image
|
||||
- build-fedora-39:
|
||||
requires:
|
||||
- build-container-image
|
1
.gitattributes
vendored
1
.gitattributes
vendored
|
@ -1,4 +1,5 @@
|
|||
* text=auto
|
||||
*.py text eol=lf
|
||||
*.jpg -text
|
||||
*.gif -text
|
||||
*.png -text
|
||||
|
|
67
.github/ISSUE_TEMPLATE/bug_report_linux.yml
vendored
Normal file
67
.github/ISSUE_TEMPLATE/bug_report_linux.yml
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
name: Bug Report (Linux)
|
||||
description: File a bug report for Linux.
|
||||
labels: ["bug", "triage"]
|
||||
projects: ["freedomofpress/dangerzone"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Hi, and thanks for taking the time to open this bug report.
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
attributes:
|
||||
label: What happened?
|
||||
description: What was the expected behaviour, and what was the actual behaviour? Can you specify the steps you followed, so that we can reproduce?
|
||||
placeholder: "A bug happened!"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: os-version
|
||||
attributes:
|
||||
label: Linux distribution
|
||||
description: |
|
||||
What is the name and version of your Linux distribution? You can find it out with `cat /etc/os-release`
|
||||
placeholder: Ubuntu 22.04.5 LTS
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: dangerzone-version
|
||||
attributes:
|
||||
label: Dangerzone version
|
||||
description: Which version of Dangerzone are you using?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: podman-info
|
||||
attributes:
|
||||
label: Podman info
|
||||
description: |
|
||||
Please copy and paste the following commands in your terminal, and provide us with the output:
|
||||
|
||||
```shell
|
||||
podman version
|
||||
podman info -f 'json'
|
||||
podman images
|
||||
podman run hello-world
|
||||
```
|
||||
|
||||
This will be automatically formatted into code, so no need for backticks.
|
||||
render: shell
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Document conversion logs
|
||||
description: |
|
||||
If the bug occurs during document conversion, we'd like some logs from this process. Please copy and paste the following commands in your terminal, and provide us with the output (replace `/path/to/file` with the path to your document):
|
||||
|
||||
```bash
|
||||
dangerzone-cli /path/to/file
|
||||
```
|
||||
|
||||
render: shell
|
||||
- type: textarea
|
||||
id: additional-info
|
||||
attributes:
|
||||
label: Additional info
|
||||
description: |
|
||||
Please provide us with any additional info, such as logs, extra content, that may help us debug this issue.
|
82
.github/ISSUE_TEMPLATE/bug_report_macos.yml
vendored
Normal file
82
.github/ISSUE_TEMPLATE/bug_report_macos.yml
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
name: Bug Report (MacOS)
|
||||
description: File a bug report for MacOS.
|
||||
labels: ["bug", "triage"]
|
||||
projects: ["freedomofpress/dangerzone"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Hi, and thanks for taking the time to open this bug report.
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
attributes:
|
||||
label: What happened?
|
||||
description: What was the expected behaviour, and what was the actual behaviour? Can you specify the steps you followed, so that we can reproduce?
|
||||
placeholder: "A bug happened!"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: os-version
|
||||
attributes:
|
||||
label: operating system version
|
||||
description: Which version of MacOS do you use? You can follow [this link](https://support.apple.com/en-us/109033) to find out more.
|
||||
placeholder: macOS Sequoia 15
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: proc-architecture
|
||||
attributes:
|
||||
label: Processor type
|
||||
description: |
|
||||
Which kind of processor do you use?
|
||||
|
||||
You can follow [this link](https://support.apple.com/en-us/109033) to find out more.
|
||||
options:
|
||||
- Intel
|
||||
- Apple Silicon
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: dangerzone-version
|
||||
attributes:
|
||||
label: Dangerzone version
|
||||
description: Which version of Dangerzone are you using?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: docker-info
|
||||
attributes:
|
||||
label: Docker info
|
||||
description: |
|
||||
Please copy and paste the following commands in your
|
||||
terminal, and provide us with the output:
|
||||
|
||||
```shell
|
||||
docker version
|
||||
docker info -f 'json'
|
||||
docker images
|
||||
docker run hello-world
|
||||
```
|
||||
|
||||
This will be automatically formatted into code, so no need for backticks.
|
||||
render: shell
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Document conversion logs
|
||||
description: |
|
||||
|
||||
If the bug occurs during document conversion, we'd like some logs from this process. Please copy and paste the following commands in your terminal, and provide us with the output (replace `/path/to/file` with the path to your document):
|
||||
|
||||
```bash
|
||||
|
||||
/Applications/Dangerzone.app/Contents/MacOS/dangerzone-cli /path/to/file
|
||||
```
|
||||
|
||||
render: shell
|
||||
- type: textarea
|
||||
id: additional-info
|
||||
attributes:
|
||||
label: Additional info
|
||||
description: |
|
||||
Please provide us with any additional info, such as logs, extra content, that may help us debug this issue.
|
67
.github/ISSUE_TEMPLATE/bug_report_windows.yml
vendored
Normal file
67
.github/ISSUE_TEMPLATE/bug_report_windows.yml
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
name: Bug Report (Windows)
|
||||
description: File a bug report for Windows.
|
||||
labels: ["bug", "triage"]
|
||||
projects: ["freedomofpress/dangerzone"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Hi, and thanks for taking the time to open this bug report.
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
attributes:
|
||||
label: What happened?
|
||||
description: What was the expected behaviour, and what was the actual behaviour? Can you specify the steps you followed, so that we can reproduce?
|
||||
placeholder: "A bug happened!"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: os-version
|
||||
attributes:
|
||||
label: operating system version
|
||||
description: |
|
||||
Which version of Windows do you use? Follow [this link](https://learn.microsoft.com/en-us/windows/client-management/client-tools/windows-version-search) to find out.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: dangerzone-version
|
||||
attributes:
|
||||
label: Dangerzone version
|
||||
description: Which version of Dangerzone are you using?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: docker-info
|
||||
attributes:
|
||||
label: Docker info
|
||||
description: |
|
||||
Please copy and paste the following commands in your
|
||||
terminal, and provide us with the output:
|
||||
|
||||
```shell
|
||||
docker version
|
||||
docker info -f 'json'
|
||||
docker images
|
||||
docker run hello-world
|
||||
```
|
||||
|
||||
This will be automatically formatted into code, so no need for backticks.
|
||||
render: shell
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Document conversion logs
|
||||
description: |
|
||||
If the bug occurs during document conversion, we'd like some logs from this process. Please copy and paste the following commands in your terminal, and provide us with the output (replace `\path\to\file` with the path to your document):
|
||||
|
||||
```bash
|
||||
'C:\Program Files (x86)\Dangerzone\dangerzone-cli.exe' \path\to\file
|
||||
```
|
||||
|
||||
render: shell
|
||||
- type: textarea
|
||||
id: additional-info
|
||||
attributes:
|
||||
label: Additional info
|
||||
description: |
|
||||
Please provide us with any additional info, such as logs, extra content, that may help us debug this issue.
|
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
blank_issues_enabled: true
|
21
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
21
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
**What is the feature you think should be a good addition to Dangerzone?**
|
||||
|
||||
?
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
|
||||
It's always useful for us to know more about your context, and why you think
|
||||
this would be a great addition. Don't hesitate to put some details about your
|
||||
current workflow and how this could be useful to you.
|
||||
|
||||
**Additional context**
|
||||
|
||||
Add any other context or screenshots about the feature request here.
|
248
.github/workflows/build-push-image.yml
vendored
Normal file
248
.github/workflows/build-push-image.yml
vendored
Normal file
|
@ -0,0 +1,248 @@
|
|||
name: Build and push multi-arch container image
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
registry_user:
|
||||
required: true
|
||||
type: string
|
||||
image_name:
|
||||
required: true
|
||||
type: string
|
||||
reproduce:
|
||||
required: true
|
||||
type: boolean
|
||||
secrets:
|
||||
registry_token:
|
||||
required: true
|
||||
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install dev. dependencies
|
||||
run: |-
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y git python3-poetry --no-install-recommends
|
||||
poetry install --only package
|
||||
|
||||
- name: Verify that the Dockerfile matches the commited template and params
|
||||
run: |-
|
||||
cp Dockerfile Dockerfile.orig
|
||||
make Dockerfile
|
||||
diff Dockerfile.orig Dockerfile
|
||||
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
debian_archive_date: ${{ steps.params.outputs.debian_archive_date }}
|
||||
source_date_epoch: ${{ steps.params.outputs.source_date_epoch }}
|
||||
image: ${{ steps.params.outputs.full_image_name }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Compute image parameters
|
||||
id: params
|
||||
run: |
|
||||
source Dockerfile.env
|
||||
DEBIAN_ARCHIVE_DATE=$(date -u +'%Y%m%d')
|
||||
SOURCE_DATE_EPOCH=$(date -u -d ${DEBIAN_ARCHIVE_DATE} +"%s")
|
||||
TAG=${DEBIAN_ARCHIVE_DATE}-$(git describe --long --first-parent | tail -c +2)
|
||||
FULL_IMAGE_NAME=${{ inputs.registry }}/${{ inputs.image_name }}:${TAG}
|
||||
|
||||
echo "debian_archive_date=${DEBIAN_ARCHIVE_DATE}" >> $GITHUB_OUTPUT
|
||||
echo "source_date_epoch=${SOURCE_DATE_EPOCH}" >> $GITHUB_OUTPUT
|
||||
echo "tag=${DEBIAN_ARCHIVE_DATE}-${TAG}" >> $GITHUB_OUTPUT
|
||||
echo "full_image_name=${FULL_IMAGE_NAME}" >> $GITHUB_OUTPUT
|
||||
echo "buildkit_image=${BUILDKIT_IMAGE}" >> $GITHUB_OUTPUT
|
||||
|
||||
build:
|
||||
name: Build ${{ matrix.platform.name }} image
|
||||
runs-on: ${{ matrix.platform.runs-on }}
|
||||
needs:
|
||||
- prepare
|
||||
outputs:
|
||||
debian_archive_date: ${{ needs.prepare.outputs.debian_archive_date }}
|
||||
source_date_epoch: ${{ needs.prepare.outputs.source_date_epoch }}
|
||||
image: ${{ needs.prepare.outputs.image }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- runs-on: "ubuntu-24.04"
|
||||
name: "linux/amd64"
|
||||
- runs-on: "ubuntu-24.04-arm"
|
||||
name: "linux/arm64"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Prepare
|
||||
run: |
|
||||
platform=${{ matrix.platform.name }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ inputs.registry_user }}
|
||||
password: ${{ secrets.registry_token }}
|
||||
|
||||
# Instructions for reproducibly building a container image are taken from:
|
||||
# https://github.com/freedomofpress/repro-build?tab=readme-ov-file#build-and-push-a-container-image-on-github-actions
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver-opts: image=${{ needs.prepare.outputs.buildkit_image }}
|
||||
|
||||
- name: Build and push by digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ./dangerzone/
|
||||
file: Dockerfile
|
||||
build-args: |
|
||||
DEBIAN_ARCHIVE_DATE=${{ needs.prepare.outputs.debian_archive_date }}
|
||||
SOURCE_DATE_EPOCH=${{ needs.prepare.outputs.source_date_epoch }}
|
||||
provenance: false
|
||||
outputs: type=image,"name=${{ inputs.registry }}/${{ inputs.image_name }}",push-by-digest=true,push=true,rewrite-timestamp=true,name-canonical=true
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Export digest
|
||||
run: |
|
||||
mkdir -p ${{ runner.temp }}/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "${{ runner.temp }}/digests/${digest#sha256:}"
|
||||
echo "Image digest is: ${digest}"
|
||||
|
||||
- name: Upload digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM_PAIR }}
|
||||
path: ${{ runner.temp }}/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
|
||||
merge:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build
|
||||
outputs:
|
||||
debian_archive_date: ${{ needs.build.outputs.debian_archive_date }}
|
||||
source_date_epoch: ${{ needs.build.outputs.source_date_epoch }}
|
||||
image: ${{ needs.build.outputs.image }}
|
||||
digest_root: ${{ steps.image.outputs.digest_root }}
|
||||
digest_amd64: ${{ steps.image.outputs.digest_amd64 }}
|
||||
digest_arm64: ${{ steps.image.outputs.digest_arm64 }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ${{ runner.temp }}/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ inputs.registry_user }}
|
||||
password: ${{ secrets.registry_token }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
||||
|
||||
- name: Create manifest list and push
|
||||
working-directory: ${{ runner.temp }}/digests
|
||||
run: |
|
||||
DIGESTS=$(printf '${{ needs.build.outputs.image }}@sha256:%s ' *)
|
||||
docker buildx imagetools create -t ${{ needs.build.outputs.image }} ${DIGESTS}
|
||||
|
||||
- name: Inspect image
|
||||
id: image
|
||||
run: |
|
||||
# Inspect the image
|
||||
docker buildx imagetools inspect ${{ needs.build.outputs.image }}
|
||||
docker buildx imagetools inspect ${{ needs.build.outputs.image }} --format "{{json .Manifest}}" > manifest
|
||||
|
||||
# Calculate and print the digests
|
||||
digest_root=$(jq -r .digest manifest)
|
||||
digest_amd64=$(jq -r '.manifests[] | select(.platform.architecture=="amd64") | .digest' manifest)
|
||||
digest_arm64=$(jq -r '.manifests[] | select(.platform.architecture=="arm64") | .digest' manifest)
|
||||
|
||||
echo "The image digests are:"
|
||||
echo " Root: $digest_root"
|
||||
echo " linux/amd64: $digest_amd64"
|
||||
echo " linux/arm64: $digest_arm64"
|
||||
|
||||
# NOTE: Set the digests as an output because the `env` context is not
|
||||
# available to the inputs of a reusable workflow call.
|
||||
echo "digest_root=$digest_root" >> "$GITHUB_OUTPUT"
|
||||
echo "digest_amd64=$digest_amd64" >> "$GITHUB_OUTPUT"
|
||||
echo "digest_arm64=$digest_arm64" >> "$GITHUB_OUTPUT"
|
||||
|
||||
# This step calls the container workflow to generate provenance and push it to
|
||||
# the container registry.
|
||||
provenance:
|
||||
needs:
|
||||
- merge
|
||||
strategy:
|
||||
matrix:
|
||||
manifest_type:
|
||||
- root
|
||||
- amd64
|
||||
- arm64
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for uploading attestations.
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
|
||||
with:
|
||||
digest: ${{ needs.merge.outputs[format('digest_{0}', matrix.manifest_type)] }}
|
||||
image: ${{ needs.merge.outputs.image }}
|
||||
registry-username: ${{ inputs.registry_user }}
|
||||
secrets:
|
||||
registry-password: ${{ secrets.registry_token }}
|
||||
|
||||
# This step ensures that the image is reproducible
|
||||
check-reproducibility:
|
||||
if: ${{ inputs.reproduce }}
|
||||
needs:
|
||||
- merge
|
||||
runs-on: ${{ matrix.platform.runs-on }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- runs-on: "ubuntu-24.04"
|
||||
name: "amd64"
|
||||
- runs-on: "ubuntu-24.04-arm"
|
||||
name: "arm64"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Reproduce the same container image
|
||||
run: |
|
||||
./dev_scripts/reproduce-image.py \
|
||||
--runtime \
|
||||
docker \
|
||||
--debian-archive-date \
|
||||
${{ needs.merge.outputs.debian_archive_date }} \
|
||||
--platform \
|
||||
linux/${{ matrix.platform.name }} \
|
||||
${{ needs.merge.outputs[format('digest_{0}', matrix.platform.name)] }}
|
98
.github/workflows/build.yml
vendored
Normal file
98
.github/workflows/build.yml
vendored
Normal file
|
@ -0,0 +1,98 @@
|
|||
name: Build dev environments
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- "test/**"
|
||||
schedule:
|
||||
- cron: "0 0 * * *" # Run every day at 00:00 UTC.
|
||||
|
||||
permissions:
|
||||
packages: write
|
||||
|
||||
env:
|
||||
IMAGE_REGISTRY: ghcr.io/${{ github.repository_owner }}
|
||||
REGISTRY_USER: ${{ github.actor }}
|
||||
REGISTRY_PASSWORD: ${{ github.token }}
|
||||
|
||||
# Each day, build and publish to ghcr.io:
|
||||
#
|
||||
# - the dangerzone/dangerzone container image
|
||||
# - the dangerzone/build/{debian,ubuntu,fedora}:version
|
||||
# dev environments used to run the tests
|
||||
#
|
||||
# End-user environments are not published to the GHCR because
|
||||
# they need .rpm or .deb files to be built, which is what we
|
||||
# want to test.
|
||||
|
||||
jobs:
|
||||
build-dev-environment:
|
||||
name: "Build dev-env (${{ matrix.distro }}-${{ matrix.version }})"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- distro: ubuntu
|
||||
version: "22.04"
|
||||
- distro: ubuntu
|
||||
version: "24.04"
|
||||
- distro: ubuntu
|
||||
version: "24.10"
|
||||
- distro: ubuntu
|
||||
version: "25.04"
|
||||
- distro: debian
|
||||
version: bullseye
|
||||
- distro: debian
|
||||
version: bookworm
|
||||
- distro: debian
|
||||
version: trixie
|
||||
- distro: fedora
|
||||
version: "40"
|
||||
- distro: fedora
|
||||
version: "41"
|
||||
- distro: fedora
|
||||
version: "42"
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Login to GHCR
|
||||
run: |
|
||||
echo ${{ github.token }} | podman login ghcr.io -u USERNAME --password-stdin
|
||||
|
||||
- name: Build dev environment
|
||||
run: |
|
||||
./dev_scripts/env.py --distro ${{ matrix.distro }} \
|
||||
--version ${{ matrix.version }} \
|
||||
build-dev --sync
|
||||
|
||||
build-container-image:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get current date
|
||||
id: date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache container image
|
||||
id: cache-container-image
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: v5-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
|
||||
path: |
|
||||
share/container.tar
|
||||
share/image-id.txt
|
||||
|
||||
- name: Build Dangerzone image
|
||||
if: ${{ steps.cache-container-image.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
python3 ./install/common/build-image.py
|
|
@ -1,6 +1,7 @@
|
|||
name: Check branch conformity
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
types: ["opened", "labeled", "unlabeled", "reopened", "synchronize"]
|
||||
|
||||
jobs:
|
||||
prevent-fixup-commits:
|
||||
|
@ -17,3 +18,13 @@ jobs:
|
|||
git fetch origin
|
||||
git status
|
||||
git log --pretty=format:%s origin/main..HEAD | grep -ie '^fixup\|^wip' && exit 1 || true
|
||||
|
||||
check-changelog:
|
||||
runs-on: ubuntu-latest
|
||||
name: Ensure CHANGELOG.md is populated for user-visible changes
|
||||
steps:
|
||||
# Pin the GitHub action to a specific commit that we have audited and know
|
||||
# how it works.
|
||||
- uses: tarides/changelog-check-action@509965da3b8ac786a5e2da30c2ccf9661189121f
|
||||
with:
|
||||
changelog: CHANGELOG.md
|
58
.github/workflows/check_repos.yml
vendored
58
.github/workflows/check_repos.yml
vendored
|
@ -9,6 +9,7 @@ name: Test official instructions for installing Dangerzone
|
|||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *' # Run every day at 00:00 UTC.
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
install-from-apt-repo:
|
||||
|
@ -18,14 +19,14 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- distro: ubuntu
|
||||
version: "25.04" # plucky
|
||||
- distro: ubuntu
|
||||
version: "24.10" # oracular
|
||||
- distro: ubuntu
|
||||
version: "24.04" # noble
|
||||
- distro: ubuntu
|
||||
version: "23.10" # mantic
|
||||
- distro: ubuntu
|
||||
version: "22.04" # jammy
|
||||
- distro: ubuntu
|
||||
version: "20.04" # focal
|
||||
- distro: debian
|
||||
version: "trixie" # 13
|
||||
- distro: debian
|
||||
|
@ -33,28 +34,30 @@ jobs:
|
|||
- distro: debian
|
||||
version: "11" # bullseye
|
||||
steps:
|
||||
- name: Add Podman repo for Ubuntu Focal
|
||||
if: matrix.distro == 'ubuntu' && matrix.version == 20.04
|
||||
run: |
|
||||
apt-get update && apt-get -y install curl wget gnupg2
|
||||
. /etc/os-release
|
||||
sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /' \
|
||||
> /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list"
|
||||
wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/xUbuntu_${VERSION_ID}/Release.key -O- \
|
||||
| apt-key add -
|
||||
apt update
|
||||
apt-get install python-all -y
|
||||
|
||||
- name: Add GPG key for the packages.freedom.press
|
||||
- name: Add packages.freedom.press PGP key (gpg)
|
||||
if: matrix.version != 'trixie'
|
||||
run: |
|
||||
apt-get update && apt-get install -y gnupg2 ca-certificates
|
||||
dirmngr # NOTE: This is a command that's necessary only in containers
|
||||
# The key needs to be in the GPG keybox database format so the
|
||||
# signing subkey is detected by apt-secure.
|
||||
gpg --keyserver hkps://keys.openpgp.org \
|
||||
--no-default-keyring --keyring ./fpf-apt-tools-archive-keyring.gpg \
|
||||
--recv-keys "DE28 AB24 1FA4 8260 FAC9 B8BA A7C9 B385 2260 4281"
|
||||
mkdir -p /etc/apt/keyrings/
|
||||
mv fpf-apt-tools-archive-keyring.gpg /etc/apt/keyrings
|
||||
mv ./fpf-apt-tools-archive-keyring.gpg /etc/apt/keyrings/.
|
||||
|
||||
- name: Add packages.freedom.press PGP key (sq)
|
||||
if: matrix.version == 'trixie'
|
||||
run: |
|
||||
apt-get update && apt-get install -y ca-certificates sq
|
||||
mkdir -p /etc/apt/keyrings/
|
||||
# On debian trixie, apt-secure uses `sqv` to verify the signatures
|
||||
# so we need to retrieve PGP keys and store them using the base64 format.
|
||||
sq network keyserver \
|
||||
--server hkps://keys.openpgp.org \
|
||||
search "DE28 AB24 1FA4 8260 FAC9 B8BA A7C9 B385 2260 4281" \
|
||||
--output /etc/apt/keyrings/fpf-apt-tools-archive-keyring.gpg
|
||||
- name: Add packages.freedom.press to our APT sources
|
||||
run: |
|
||||
. /etc/os-release
|
||||
|
@ -74,15 +77,28 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- distro: fedora
|
||||
version: 39
|
||||
- distro: fedora
|
||||
version: 40
|
||||
- distro: fedora
|
||||
version: 41
|
||||
- distro: fedora
|
||||
version: 42
|
||||
steps:
|
||||
- name: Add packages.freedom.press to our YUM sources
|
||||
run: |
|
||||
dnf install -y 'dnf-command(config-manager)'
|
||||
dnf config-manager --add-repo=https://packages.freedom.press/yum-tools-prod/dangerzone/dangerzone.repo
|
||||
dnf-3 config-manager --add-repo=https://packages.freedom.press/yum-tools-prod/dangerzone/dangerzone.repo
|
||||
|
||||
- name: Replace 'rawhide' string with Fedora version
|
||||
# The previous command has created a `dangerzone.repo` file. The
|
||||
# config-manager plugin should have substituted the $releasever variable
|
||||
# with the Fedora version number. However, for unreleased Fedora
|
||||
# versions, this gets translated to "rawhide", even though they do have
|
||||
# a number. To fix this, we need to substitute the "rawhide" string
|
||||
# witht the proper Fedora version.
|
||||
run: |
|
||||
source /etc/os-release
|
||||
sed -i "s/rawhide/${VERSION_ID}/g" /etc/yum.repos.d/dangerzone.repo
|
||||
|
||||
- name: Install Dangerzone
|
||||
# FIXME: We add the `-y` flag here, in lieu of a better way to check the
|
||||
|
|
445
.github/workflows/ci.yml
vendored
445
.github/workflows/ci.yml
vendored
|
@ -1,111 +1,273 @@
|
|||
name: Tests
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- "test/**"
|
||||
schedule:
|
||||
- cron: '0 0 * * *' # Run every day at 00:00 UTC.
|
||||
- cron: "2 0 * * *" # Run every day at 02:00 UTC.
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
packages: write
|
||||
|
||||
env:
|
||||
REGISTRY_USER: ${{ github.actor }}
|
||||
REGISTRY_PASSWORD: ${{ github.token }}
|
||||
IMAGE_REGISTRY: ghcr.io/${{ github.repository_owner }}
|
||||
QT_SELECT: "qt6"
|
||||
|
||||
# Disable multiple concurrent runs on the same branch
|
||||
# When a new CI build is triggered, it will cancel the
|
||||
# other in-progress ones (for the same branch)
|
||||
concurrency:
|
||||
group: ${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
run-lint:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: debian:bookworm
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install dev. dependencies
|
||||
run: |-
|
||||
apt-get update
|
||||
apt-get install -y git make python3 python3-poetry --no-install-recommends
|
||||
poetry install --only lint,test
|
||||
- name: Run linters to enforce code style
|
||||
run: poetry run make lint
|
||||
- name: Check that the QA script is up to date with the docs
|
||||
run: "./dev_scripts/qa.py --check-refs"
|
||||
|
||||
# This is already built daily by the "build.yml" file
|
||||
# But we also want to include this in the checks that run on each push.
|
||||
build-container-image:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get current date
|
||||
id: date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache container image
|
||||
id: cache-container-image
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: v5-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
|
||||
path: |-
|
||||
share/container.tar
|
||||
share/image-id.txt
|
||||
|
||||
- name: Build Dangerzone container image
|
||||
if: ${{ steps.cache-container-image.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
python3 ./install/common/build-image.py
|
||||
|
||||
- name: Upload container image
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: container.tar
|
||||
path: share/container.tar
|
||||
|
||||
download-tessdata:
|
||||
name: Download and cache Tesseract data
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Cache Tessdata
|
||||
id: cache-tessdata
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: share/tessdata/
|
||||
key: v1-tessdata-${{ hashFiles('./install/common/download-tessdata.py') }}
|
||||
enableCrossOsArchive: true
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
- name: Download Tessdata
|
||||
run: |-
|
||||
if [ -f "share/tessdata" ]; then
|
||||
echo "Already cached, skipping"
|
||||
else
|
||||
python3 ./install/common/download-tessdata.py
|
||||
fi
|
||||
|
||||
windows:
|
||||
runs-on: windows-latest
|
||||
needs:
|
||||
- download-tessdata
|
||||
env:
|
||||
DUMMY_CONVERSION: 1
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.12'
|
||||
python-version: "3.12"
|
||||
- run: pip install poetry
|
||||
- run: poetry install
|
||||
- name: Restore cached tessdata
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: share/tessdata/
|
||||
enableCrossOsArchive: true
|
||||
fail-on-cache-miss: true
|
||||
key: v1-tessdata-${{ hashFiles('./install/common/download-tessdata.py') }}
|
||||
- name: Run CLI tests
|
||||
run: poetry run make test
|
||||
# Taken from: https://github.com/orgs/community/discussions/27149#discussioncomment-3254829
|
||||
- name: Set path for candle and light
|
||||
run: echo "C:\Program Files (x86)\WiX Toolset v3.14\bin" >> $GITHUB_PATH
|
||||
shell: bash
|
||||
- name: Set up .NET CLI environment
|
||||
uses: actions/setup-dotnet@v4
|
||||
with:
|
||||
dotnet-version: "8.x"
|
||||
- name: Install WiX Toolset
|
||||
run: dotnet tool install --global wix --version 5.0.2
|
||||
- name: Add WiX UI extension
|
||||
run: wix extension add --global WixToolset.UI.wixext/5.0.2
|
||||
- name: Build the MSI installer
|
||||
# NOTE: This also builds the .exe internally.
|
||||
run: poetry run .\install\windows\build-app.bat
|
||||
- name: Upload MSI installer
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Dangerzone.msi
|
||||
path: "dist/Dangerzone.msi"
|
||||
if-no-files-found: error
|
||||
compression-level: 0
|
||||
|
||||
macOS:
|
||||
runs-on: macos-latest
|
||||
name: "macOS (${{ matrix.arch }})"
|
||||
runs-on: ${{ matrix.runner }}
|
||||
needs:
|
||||
- download-tessdata
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- runner: macos-latest # CPU type: Apple Silicon (M1)
|
||||
arch: arch64
|
||||
- runner: macos-13 # CPU type: Intel x86_64
|
||||
arch: x86_64
|
||||
env:
|
||||
DUMMY_CONVERSION: 1
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.12'
|
||||
python-version: "3.12"
|
||||
- name: Restore cached tessdata
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: share/tessdata/
|
||||
enableCrossOsArchive: true
|
||||
fail-on-cache-miss: true
|
||||
key: v1-tessdata-${{ hashFiles('./install/common/download-tessdata.py') }}
|
||||
- run: pip install poetry
|
||||
- run: poetry install
|
||||
- name: Run CLI tests
|
||||
run: poetry run make test
|
||||
|
||||
- name: Build macOS app
|
||||
run: poetry run python ./install/macos/build-app.py
|
||||
- name: Upload macOS app
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Dangerzone-${{ matrix.arch }}.app
|
||||
path: "dist/Dangerzone.app"
|
||||
if-no-files-found: error
|
||||
compression-level: 0
|
||||
build-deb:
|
||||
needs:
|
||||
- build-container-image
|
||||
name: "build-deb (${{ matrix.distro }} ${{ matrix.version }})"
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
target: debian-bookworm
|
||||
distro: debian
|
||||
version: bookworm
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- distro: ubuntu
|
||||
version: "22.04"
|
||||
- distro: ubuntu
|
||||
version: "24.04"
|
||||
- distro: ubuntu
|
||||
version: "24.10"
|
||||
- distro: ubuntu
|
||||
version: "25.04"
|
||||
- distro: debian
|
||||
version: bullseye
|
||||
- distro: debian
|
||||
version: bookworm
|
||||
- distro: debian
|
||||
version: trixie
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Build dev environment
|
||||
- name: Login to GHCR
|
||||
run: |
|
||||
./dev_scripts/env.py --distro ${{ env.distro }} \
|
||||
--version ${{ env.version }} \
|
||||
build-dev
|
||||
echo ${{ github.token }} | podman login ghcr.io -u USERNAME --password-stdin
|
||||
|
||||
- name: Install container build dependencies
|
||||
run: sudo apt install pipx && pipx install poetry
|
||||
- name: Get the dev environment
|
||||
run: |
|
||||
./dev_scripts/env.py \
|
||||
--distro ${{ matrix.distro }} \
|
||||
--version ${{ matrix.version }} \
|
||||
build-dev --sync
|
||||
|
||||
- name: Build Dangerzone image
|
||||
run: python3 ./install/common/build-image.py
|
||||
- name: Get current date
|
||||
id: date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Restore container cache
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
key: v5-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
|
||||
path: |-
|
||||
share/container.tar
|
||||
share/image-id.txt
|
||||
fail-on-cache-miss: true
|
||||
|
||||
- name: Build Dangerzone .deb
|
||||
run: |
|
||||
./dev_scripts/env.py --distro ${{ env.distro }} \
|
||||
--version ${{ env.version }} \
|
||||
./dev_scripts/env.py --distro ${{ matrix.distro }} \
|
||||
--version ${{ matrix.version }} \
|
||||
run --dev --no-gui ./dangerzone/install/linux/build-deb.py
|
||||
|
||||
- name: Upload Dangerzone .deb
|
||||
if: matrix.distro == 'debian' && matrix.version == 'bookworm'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dangerzone.deb
|
||||
path: "deb_dist/dangerzone_*_all.deb"
|
||||
path: "deb_dist/dangerzone_*_*.deb"
|
||||
if-no-files-found: error
|
||||
compression-level: 0
|
||||
|
||||
install-deb:
|
||||
name: "install-deb (${{ matrix.distro }} ${{ matrix.version }})"
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-deb
|
||||
needs:
|
||||
- build-deb
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target: ubuntu-20.04
|
||||
distro: ubuntu
|
||||
version: "20.04"
|
||||
- target: ubuntu-22.04
|
||||
distro: ubuntu
|
||||
- distro: ubuntu
|
||||
version: "22.04"
|
||||
- target: ubuntu-23.10
|
||||
distro: ubuntu
|
||||
version: "23.10"
|
||||
- target: ubuntu-24.04
|
||||
distro: ubuntu
|
||||
- distro: ubuntu
|
||||
version: "24.04"
|
||||
- target: debian-bullseye
|
||||
distro: debian
|
||||
- distro: ubuntu
|
||||
version: "24.10"
|
||||
- distro: ubuntu
|
||||
version: "25.04"
|
||||
- distro: debian
|
||||
version: bullseye
|
||||
- target: debian-bookworm
|
||||
distro: debian
|
||||
- distro: debian
|
||||
version: bookworm
|
||||
- target: debian-trixie
|
||||
distro: debian
|
||||
- distro: debian
|
||||
version: trixie
|
||||
|
||||
steps:
|
||||
|
@ -114,7 +276,7 @@ jobs:
|
|||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Download Dangerzone .deb
|
||||
uses: actions/download-artifact@v4
|
||||
|
@ -122,41 +284,17 @@ jobs:
|
|||
name: dangerzone.deb
|
||||
path: "deb_dist/"
|
||||
|
||||
- name: Create end-user environment on (${{ matrix.target }})
|
||||
- name: Build end-user environment
|
||||
run: |
|
||||
./dev_scripts/env.py --distro ${{ matrix.distro }} \
|
||||
--version ${{ matrix.version }} \
|
||||
build
|
||||
|
||||
- name: Configure Podman for Debian Bullseye specifically
|
||||
if: matrix.target == 'debian-bullseye'
|
||||
run: |
|
||||
# Create a Podman config specifically for Bullseye (see #388).
|
||||
mkdir bullseye_fix
|
||||
cd bullseye_fix
|
||||
cat > containers.conf <<EOF
|
||||
[engine]
|
||||
cgroup_manager="cgroupfs"
|
||||
events_logger="file"
|
||||
EOF
|
||||
|
||||
# Copy the Podman config into the container image we created for the
|
||||
# Dangerzone environment.
|
||||
cat > Dockerfile.bullseye <<EOF
|
||||
FROM dangerzone.rocks/debian:bullseye-backports
|
||||
RUN mkdir -p /home/user/.config/containers
|
||||
COPY containers.conf /home/user/.config/containers/
|
||||
EOF
|
||||
|
||||
# Create a new image from the Dangerzone environment and re-tag it.
|
||||
podman build -t dangerzone.rocks/debian:bullseye-backports \
|
||||
-f Dockerfile.bullseye .
|
||||
|
||||
- name: Run a test command
|
||||
run: |
|
||||
./dev_scripts/env.py --distro ${{ matrix.distro }} \
|
||||
--version ${{ matrix.version }} \
|
||||
run dangerzone-cli dangerzone/tests/test_docs/sample-pdf.pdf
|
||||
run dangerzone-cli dangerzone/tests/test_docs/sample-pdf.pdf --ocr-lang eng
|
||||
|
||||
- name: Check that the Dangerzone GUI imports work
|
||||
run: |
|
||||
|
@ -165,33 +303,55 @@ jobs:
|
|||
run dangerzone --help
|
||||
|
||||
build-install-rpm:
|
||||
name: "Build and install a Dangerzone RPM on Fedora ${{matrix.version}}"
|
||||
name: "build-install-rpm (${{ matrix.distro }} ${{matrix.version}})"
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-container-image
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- version: "39"
|
||||
- version: "40"
|
||||
distro: ["fedora"]
|
||||
version: ["40", "41", "42"]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Build dev environment
|
||||
- name: Login to GHCR
|
||||
run: |
|
||||
./dev_scripts/env.py --distro fedora --version ${{ matrix.version }} \
|
||||
build-dev
|
||||
echo ${{ github.token }} | podman login ghcr.io -u USERNAME --password-stdin
|
||||
|
||||
- name: Build Dangerzone image
|
||||
- name: Get the dev environment
|
||||
run: |
|
||||
./dev_scripts/env.py --distro fedora --version ${{ matrix.version }} \
|
||||
run --dev --no-gui \
|
||||
bash -c 'cd /home/user/dangerzone && python3 ./install/common/build-image.py'
|
||||
./dev_scripts/env.py \
|
||||
--distro ${{ matrix.distro }} \
|
||||
--version ${{ matrix.version }} \
|
||||
build-dev --sync
|
||||
|
||||
- name: Get current date
|
||||
id: date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Restore container image
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
key: v5-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
|
||||
path: |-
|
||||
share/container.tar
|
||||
share/image-id.txt
|
||||
fail-on-cache-miss: true
|
||||
|
||||
- name: Build Dangerzone .rpm
|
||||
run: |
|
||||
./dev_scripts/env.py --distro fedora --version ${{ matrix.version }} \
|
||||
./dev_scripts/env.py --distro ${{ matrix.distro }} --version ${{ matrix.version }} \
|
||||
run --dev --no-gui ./dangerzone/install/linux/build-rpm.py
|
||||
|
||||
- name: Upload Dangerzone .rpm
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dangerzone-${{ matrix.distro }}-${{ matrix.version }}.rpm
|
||||
path: "dist/dangerzone-*.x86_64.rpm"
|
||||
if-no-files-found: error
|
||||
compression-level: 0
|
||||
|
||||
# Reclaim some space in this step, now that the dev environment is no
|
||||
# longer necessary. Previously, we encountered out-of-space issues while
|
||||
# running this CI job.
|
||||
|
@ -200,15 +360,124 @@ jobs:
|
|||
|
||||
- name: Build end-user environment
|
||||
run: |
|
||||
./dev_scripts/env.py --distro fedora --version ${{ matrix.version }} \
|
||||
build --download-pyside6
|
||||
./dev_scripts/env.py --distro ${{ matrix.distro }} \
|
||||
--version ${{ matrix.version }} \
|
||||
build
|
||||
|
||||
- name: Run a test command
|
||||
run: |
|
||||
./dev_scripts/env.py --distro fedora --version ${{ matrix.version }} \
|
||||
run dangerzone-cli dangerzone/tests/test_docs/sample-pdf.pdf
|
||||
./dev_scripts/env.py --distro ${{ matrix.distro }} --version ${{ matrix.version }} \
|
||||
run dangerzone-cli dangerzone/tests/test_docs/sample-pdf.pdf --ocr-lang eng
|
||||
|
||||
- name: Check that the Dangerzone GUI imports work
|
||||
run: |
|
||||
./dev_scripts/env.py --distro fedora --version ${{ matrix.version }} \
|
||||
./dev_scripts/env.py --distro ${{ matrix.distro }} --version ${{ matrix.version }} \
|
||||
run dangerzone --help
|
||||
|
||||
run-tests:
|
||||
name: "run tests (${{ matrix.distro }} ${{ matrix.version }})"
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-container-image
|
||||
- download-tessdata
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- distro: ubuntu
|
||||
version: "22.04"
|
||||
- distro: ubuntu
|
||||
version: "24.04"
|
||||
- distro: ubuntu
|
||||
version: "24.10"
|
||||
- distro: ubuntu
|
||||
version: "25.04"
|
||||
- distro: debian
|
||||
version: bullseye
|
||||
- distro: debian
|
||||
version: bookworm
|
||||
- distro: debian
|
||||
version: trixie
|
||||
- distro: fedora
|
||||
version: "40"
|
||||
- distro: fedora
|
||||
version: "41"
|
||||
- distro: fedora
|
||||
version: "42"
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Login to GHCR
|
||||
run: |
|
||||
echo ${{ github.token }} | podman login ghcr.io -u USERNAME --password-stdin
|
||||
|
||||
- name: Get current date
|
||||
id: date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get the dev environment
|
||||
run: |
|
||||
./dev_scripts/env.py \
|
||||
--distro ${{ matrix.distro }} \
|
||||
--version ${{ matrix.version }} \
|
||||
build-dev --sync
|
||||
|
||||
- name: Restore container image
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
key: v5-${{ steps.date.outputs.date }}-${{ hashFiles('Dockerfile', 'dangerzone/conversion/*.py', 'dangerzone/container_helpers/*', 'install/common/build-image.py') }}
|
||||
path: |-
|
||||
share/container.tar
|
||||
share/image-id.txt
|
||||
fail-on-cache-miss: true
|
||||
|
||||
- name: Restore cached tessdata
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: share/tessdata/
|
||||
enableCrossOsArchive: true
|
||||
fail-on-cache-miss: true
|
||||
key: v1-tessdata-${{ hashFiles('./install/common/download-tessdata.py') }}
|
||||
|
||||
- name: Setup xvfb (Linux)
|
||||
run: |
|
||||
sudo apt update
|
||||
# Stuff copied wildly from several stackoverflow posts
|
||||
sudo apt-get install -y xvfb libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-xinerama0 libxcb-xinput0 libxcb-xfixes0 libxcb-shape0 libglib2.0-0 libgl1-mesa-dev '^libxcb.*-dev' libx11-xcb-dev libglu1-mesa-dev libxrender-dev libxi-dev libxkbcommon-dev libxkbcommon-x11-dev
|
||||
|
||||
# start xvfb in the background
|
||||
sudo /usr/bin/Xvfb $DISPLAY -screen 0 1280x1024x24 &
|
||||
|
||||
- name: Run CI tests
|
||||
run: |-
|
||||
# Pass the -ac Xserver flag, to disable host-based access controls.
|
||||
# This should be used ONLY for testing [1]. If we don't pass this
|
||||
# flag, the Podman container is not authorized [2] to access the Xvfb
|
||||
# server.
|
||||
#
|
||||
# [1] From https://www.x.org/releases/X11R6.7.0/doc/Xserver.1.html#sect4:
|
||||
#
|
||||
# disables host-based access control mechanisms. Enables access by
|
||||
# any host, and permits any host to modify the access control
|
||||
# list. Use with extreme caution. This option exists primarily for
|
||||
# running test suites remotely.
|
||||
#
|
||||
# [2] Fails with "Authorization required, but no authorization
|
||||
# protocol specified". However, we have verified with strace(1)
|
||||
# that the command in the Podman container can read the Xauthority
|
||||
# file successfully.
|
||||
xvfb-run -s '-ac' ./dev_scripts/env.py --distro ${{ matrix.distro }} --version ${{ matrix.version }} run --dev \
|
||||
bash -c 'cd dangerzone; poetry run make test'
|
||||
|
||||
- name: Upload PDF diffs
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: pdf-diffs-${{ matrix.distro }}-${{ matrix.version }}
|
||||
path: tests/test_docs/diffs/*.jpeg
|
||||
# Always run this step to publish test results, even on failures
|
||||
if: ${{ always() }}
|
||||
|
|
22
.github/workflows/close-issues.yml
vendored
Normal file
22
.github/workflows/close-issues.yml
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
name: Close inactive issues
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 1 * * *"
|
||||
|
||||
jobs:
|
||||
close-issues:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
days-before-issue-stale: 30
|
||||
days-before-issue-close: 14
|
||||
stale-issue-label: "stale"
|
||||
stale-issue-message: "Marking this issue as stale because it has been open for 30 days with no activity. It will be closed in 14 days if there's no activity, or if the `stale` label is not removed. Does anyone want to add something?"
|
||||
close-issue-message: "Closing this issue now. Don't hesitate to reopen if you have anything to add :-)"
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
any-of-labels: needs info
|
22
.github/workflows/release-container-image.yml
vendored
Normal file
22
.github/workflows/release-container-image.yml
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
name: Release multi-arch container image
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- "test/**"
|
||||
schedule:
|
||||
- cron: "0 0 * * *" # Run every day at 00:00 UTC.
|
||||
|
||||
|
||||
jobs:
|
||||
build-push-image:
|
||||
uses: ./.github/workflows/build-push-image.yml
|
||||
with:
|
||||
registry: ghcr.io/${{ github.repository_owner }}
|
||||
registry_user: ${{ github.actor }}
|
||||
image_name: dangerzone/dangerzone
|
||||
reproduce: true
|
||||
secrets:
|
||||
registry_token: ${{ secrets.GITHUB_TOKEN }}
|
43
.github/workflows/scan.yml
vendored
43
.github/workflows/scan.yml
vendored
|
@ -1,28 +1,42 @@
|
|||
name: Scan latest app and container
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
schedule:
|
||||
- cron: '0 0 * * *' # Run every day at 00:00 UTC.
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
security-scan-container:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
runs-on:
|
||||
- ubuntu-24.04
|
||||
- ubuntu-24.04-arm
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install container build dependencies
|
||||
run: sudo apt install pipx && pipx install poetry
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build container image
|
||||
run: python3 ./install/common/build-image.py --runtime docker --no-save
|
||||
run: |
|
||||
python3 ./install/common/build-image.py \
|
||||
--debian-archive-date $(date "+%Y%m%d") \
|
||||
--runtime docker
|
||||
docker load -i share/container.tar
|
||||
- name: Get image tag
|
||||
id: tag
|
||||
run: echo "tag=$(cat share/image-id.txt)" >> $GITHUB_OUTPUT
|
||||
# NOTE: Scan first without failing, else we won't be able to read the scan
|
||||
# report.
|
||||
- name: Scan container image (no fail)
|
||||
uses: anchore/scan-action@v3
|
||||
uses: anchore/scan-action@v6
|
||||
id: scan_container
|
||||
with:
|
||||
image: "dangerzone.rocks/dangerzone:latest"
|
||||
image: "dangerzone.rocks/dangerzone:${{ steps.tag.outputs.tag }}"
|
||||
fail-build: false
|
||||
only-fixed: false
|
||||
severity-cutoff: critical
|
||||
|
@ -34,22 +48,27 @@ jobs:
|
|||
- name: Inspect container scan report
|
||||
run: cat ${{ steps.scan_container.outputs.sarif }}
|
||||
- name: Scan container image
|
||||
uses: anchore/scan-action@v3
|
||||
uses: anchore/scan-action@v6
|
||||
with:
|
||||
image: "dangerzone.rocks/dangerzone:latest"
|
||||
image: "dangerzone.rocks/dangerzone:${{ steps.tag.outputs.tag }}"
|
||||
fail-build: true
|
||||
only-fixed: false
|
||||
severity-cutoff: critical
|
||||
|
||||
security-scan-app:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
runs-on:
|
||||
- ubuntu-24.04
|
||||
- ubuntu-24.04-arm
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
# NOTE: Scan first without failing, else we won't be able to read the scan
|
||||
# report.
|
||||
- name: Scan application (no fail)
|
||||
uses: anchore/scan-action@v3
|
||||
uses: anchore/scan-action@v6
|
||||
id: scan_app
|
||||
with:
|
||||
path: "."
|
||||
|
@ -64,7 +83,7 @@ jobs:
|
|||
- name: Inspect application scan report
|
||||
run: cat ${{ steps.scan_app.outputs.sarif }}
|
||||
- name: Scan application
|
||||
uses: anchore/scan-action@v3
|
||||
uses: anchore/scan-action@v6
|
||||
with:
|
||||
path: "."
|
||||
fail-build: true
|
||||
|
|
46
.github/workflows/scan_released.yml
vendored
46
.github/workflows/scan_released.yml
vendored
|
@ -2,26 +2,39 @@ name: Scan released app and container
|
|||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *' # Run every day at 00:00 UTC.
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
security-scan-container:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- runs-on: ubuntu-24.04
|
||||
arch: i686
|
||||
- runs-on: ubuntu-24.04-arm
|
||||
arch: arm64
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Download container image for the latest release
|
||||
- name: Download container image for the latest release and load it
|
||||
run: |
|
||||
VERSION=$(curl https://api.github.com/repos/freedomofpress/dangerzone/releases/latest | jq -r '.tag_name')
|
||||
wget https://github.com/freedomofpress/dangerzone/releases/download/${VERSION}/container.tar.gz
|
||||
- name: Load container image
|
||||
run: docker load -i container.tar.gz
|
||||
VERSION=$(curl https://api.github.com/repos/freedomofpress/dangerzone/releases/latest | grep "tag_name" | cut -d '"' -f 4)
|
||||
CONTAINER_FILENAME=container-${VERSION:1}-${{ matrix.arch }}.tar
|
||||
wget https://github.com/freedomofpress/dangerzone/releases/download/${VERSION}/${CONTAINER_FILENAME} -O ${CONTAINER_FILENAME}
|
||||
docker load -i ${CONTAINER_FILENAME}
|
||||
- name: Get image tag
|
||||
id: tag
|
||||
run: |
|
||||
tag=$(docker images dangerzone.rocks/dangerzone --format '{{ .Tag }}')
|
||||
echo "tag=$tag" >> $GITHUB_OUTPUT
|
||||
# NOTE: Scan first without failing, else we won't be able to read the scan
|
||||
# report.
|
||||
- name: Scan container image (no fail)
|
||||
uses: anchore/scan-action@v3
|
||||
uses: anchore/scan-action@v6
|
||||
id: scan_container
|
||||
with:
|
||||
image: "dangerzone.rocks/dangerzone:latest"
|
||||
image: "dangerzone.rocks/dangerzone:${{ steps.tag.outputs.tag }}"
|
||||
fail-build: false
|
||||
only-fixed: false
|
||||
severity-cutoff: critical
|
||||
|
@ -29,19 +42,24 @@ jobs:
|
|||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
sarif_file: ${{ steps.scan_container.outputs.sarif }}
|
||||
category: container
|
||||
category: container-${{ matrix.arch }}
|
||||
- name: Inspect container scan report
|
||||
run: cat ${{ steps.scan_container.outputs.sarif }}
|
||||
- name: Scan container image
|
||||
uses: anchore/scan-action@v3
|
||||
uses: anchore/scan-action@v6
|
||||
with:
|
||||
image: "dangerzone.rocks/dangerzone:latest"
|
||||
image: "dangerzone.rocks/dangerzone:${{ steps.tag.outputs.tag }}"
|
||||
fail-build: true
|
||||
only-fixed: false
|
||||
severity-cutoff: critical
|
||||
|
||||
security-scan-app:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
runs-on:
|
||||
- ubuntu-24.04
|
||||
- ubuntu-24.04-arm
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
@ -54,7 +72,7 @@ jobs:
|
|||
# NOTE: Scan first without failing, else we won't be able to read the scan
|
||||
# report.
|
||||
- name: Scan application (no fail)
|
||||
uses: anchore/scan-action@v3
|
||||
uses: anchore/scan-action@v6
|
||||
id: scan_app
|
||||
with:
|
||||
path: "."
|
||||
|
@ -69,7 +87,7 @@ jobs:
|
|||
- name: Inspect application scan report
|
||||
run: cat ${{ steps.scan_app.outputs.sarif }}
|
||||
- name: Scan application
|
||||
uses: anchore/scan-action@v3
|
||||
uses: anchore/scan-action@v6
|
||||
with:
|
||||
path: "."
|
||||
fail-build: true
|
||||
|
|
13
.gitignore
vendored
13
.gitignore
vendored
|
@ -22,6 +22,7 @@ var/
|
|||
wheels/
|
||||
pip-wheel-metadata/
|
||||
share/python-wheels/
|
||||
share/tessdata/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
@ -127,6 +128,15 @@ dmypy.json
|
|||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# Debian packaging
|
||||
|
||||
debian/.debhelper
|
||||
debian/dangerzone
|
||||
debian/files
|
||||
debian/debhelper-build-stamp
|
||||
debian/dangerzone.*
|
||||
.pybuild/
|
||||
|
||||
# Other
|
||||
.vscode
|
||||
*.tar.gz
|
||||
|
@ -138,4 +148,5 @@ install/windows/Dangerzone.wxs
|
|||
share/container.tar
|
||||
share/container.tar.gz
|
||||
share/image-id.txt
|
||||
container/container-pip-requirements.txt
|
||||
container/container-pip-requirements.txt
|
||||
.doit.db.db
|
||||
|
|
57
.grype.yaml
57
.grype.yaml
|
@ -2,24 +2,47 @@
|
|||
# latest release of Dangerzone, and offer our analysis.
|
||||
|
||||
ignore:
|
||||
# CVE-2023-7104
|
||||
# =============
|
||||
# CVE-2023-45853
|
||||
# ==============
|
||||
#
|
||||
# NVD Entry: https://nvd.nist.gov/vuln/detail/CVE-2023-7104
|
||||
# Verdict: Dangerzone is not affected. The rationale is the following:
|
||||
# Debian tracker: https://security-tracker.debian.org/tracker/CVE-2023-45853
|
||||
# Verdict: Dangerzone is not affected because the zlib library in Debian is
|
||||
# built in a way that is not vulnerable.
|
||||
- vulnerability: CVE-2023-45853
|
||||
# CVE-2024-38428
|
||||
# ==============
|
||||
#
|
||||
# 1. This CVE affects malicious/corrupted SQLite DBs.
|
||||
# 2. Databases can be loaded either via LibreOffice Calc or Base. Files for
|
||||
# the latter are not a valid input to Dangerzone.
|
||||
# 3. Based on the LibreOffice Calc guide [1], users can only refer to
|
||||
# external databases, not embed them in a spreadsheet.
|
||||
# 4. The actual CVSS score for this vulnerability is High, according to
|
||||
# NIST, not Critical.
|
||||
# Debian tracker: https://security-tracker.debian.org/tracker/CVE-2024-38428
|
||||
# Verdict: Dangerzone is not affected because it doesn't use wget in the
|
||||
# container image (which also has no network connectivity).
|
||||
- vulnerability: CVE-2024-38428
|
||||
# CVE-2024-57823
|
||||
# ==============
|
||||
#
|
||||
# [1]: From https://wiki.documentfoundation.org/images/f/f4/CG75-CalcGuide.pdf:
|
||||
# Debian tracker: https://security-tracker.debian.org/tracker/CVE-2024-57823
|
||||
# Verdict: Dangerzone is not affected. First things first, LibreOffice is
|
||||
# using this library for parsing RDF metadata in a document [1], and has
|
||||
# issued a fix for the vendored raptor2 package they have for other distros
|
||||
# [2].
|
||||
#
|
||||
# > The possible data sources for the pivot table are a Calc spreadsheet
|
||||
# > or an external data source that is registered in LibreOffice. [...]
|
||||
# > A registered data source is a connection to data held in a database
|
||||
# > outside of LibreOffice.
|
||||
- vulnerability: CVE-2023-7104
|
||||
# On the other hand, the Debian security team has stated that this is a minor
|
||||
# issue [3], and there's no fix from the developers yet. It seems that the
|
||||
# Debian package is not affected somehow by this CVE, probably due to the way
|
||||
# it's packaged.
|
||||
#
|
||||
# [1] https://wiki.documentfoundation.org/Documentation/DevGuide/Office_Development#RDF_metadata
|
||||
# [2] https://cgit.freedesktop.org/libreoffice/core/commit/?id=2b50dc0e4482ac0ad27d69147b4175e05af4fba4
|
||||
# [2] From https://security-tracker.debian.org/tracker/CVE-2024-57823:
|
||||
#
|
||||
# [bookworm] - raptor2 <postponed> (Minor issue, revisit when fixed upstream)
|
||||
#
|
||||
- vulnerability: CVE-2024-57823
|
||||
# CVE-2025-0665
|
||||
# ==============
|
||||
#
|
||||
# Debian tracker: https://security-tracker.debian.org/tracker/CVE-2025-0665
|
||||
# Verdict: Dangerzone is not affected because the vulnerable code is not
|
||||
# present in Debian Bookworm. Also, libcurl is an HTTP client, and the
|
||||
# Dangerzone container does not make any network calls.
|
||||
- vulnerability: CVE-2025-0665
|
||||
|
||||
|
|
1
.well-known/funding-manifest-urls
Normal file
1
.well-known/funding-manifest-urls
Normal file
|
@ -0,0 +1 @@
|
|||
https://dangerzone.rocks/assets/json/funding.json
|
122
BUILD.md
122
BUILD.md
|
@ -34,32 +34,9 @@ Install dependencies:
|
|||
</table>
|
||||
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
<details>
|
||||
<summary><i>:memo: Expand this section if you are on Ubuntu 20.04 (Focal).</i></summary>
|
||||
</br>
|
||||
|
||||
The default Python version that ships with Ubuntu Focal (3.8) is not
|
||||
compatible with PySide6, which requires Python 3.9 of greater.
|
||||
|
||||
You can install Python 3.9 using the `python3.9` package.
|
||||
|
||||
```bash
|
||||
sudo apt install -y python3.9
|
||||
```
|
||||
|
||||
Poetry will automatically pick up the correct version when running.
|
||||
</details>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
```sh
|
||||
sudo apt install -y podman dh-python build-essential fakeroot make libqt6gui6 \
|
||||
pipx python3 python3-dev python3-stdeb python3-all
|
||||
sudo apt install -y podman dh-python build-essential make libqt6gui6 \
|
||||
pipx python3 python3-dev
|
||||
```
|
||||
|
||||
Install Poetry using `pipx` (recommended) and add it to your `$PATH`:
|
||||
|
@ -70,6 +47,7 @@ methods](https://python-poetry.org/docs/#installation))_
|
|||
```sh
|
||||
pipx ensurepath
|
||||
pipx install poetry
|
||||
pipx inject poetry poetry-plugin-export
|
||||
```
|
||||
|
||||
After this, restart the terminal window, for the `poetry` command to be in your
|
||||
|
@ -97,6 +75,12 @@ Build the latest container:
|
|||
python3 ./install/common/build-image.py
|
||||
```
|
||||
|
||||
Download the OCR language data:
|
||||
|
||||
```sh
|
||||
python3 ./install/common/download-tessdata.py
|
||||
```
|
||||
|
||||
Run from source tree:
|
||||
|
||||
```sh
|
||||
|
@ -129,6 +113,7 @@ Install Poetry using `pipx`:
|
|||
|
||||
```sh
|
||||
pipx install poetry
|
||||
pipx inject poetry
|
||||
```
|
||||
|
||||
Clone this repository:
|
||||
|
@ -152,6 +137,12 @@ Build the latest container:
|
|||
python3 ./install/common/build-image.py
|
||||
```
|
||||
|
||||
Download the OCR language data:
|
||||
|
||||
```sh
|
||||
python3 ./install/common/download-tessdata.py
|
||||
```
|
||||
|
||||
Run from source tree:
|
||||
|
||||
```sh
|
||||
|
@ -196,27 +187,27 @@ Overview of the qubes you'll create:
|
|||
|--------------|----------|---------|
|
||||
| dz | app qube | Dangerzone development |
|
||||
| dz-dvm | app qube | offline disposable template for performing conversions |
|
||||
| fedora-40-dz | template | template for the other two qubes |
|
||||
| fedora-41-dz | template | template for the other two qubes |
|
||||
|
||||
#### In `dom0`:
|
||||
|
||||
The following instructions require typing commands in a terminal in dom0.
|
||||
|
||||
1. Create a new Fedora **template** (`fedora-40-dz`) for Dangerzone development:
|
||||
1. Create a new Fedora **template** (`fedora-41-dz`) for Dangerzone development:
|
||||
|
||||
```
|
||||
qvm-clone fedora-40 fedora-40-dz
|
||||
qvm-clone fedora-41 fedora-41-dz
|
||||
```
|
||||
|
||||
> :bulb: Alternatively, you can use your base Fedora 40 template in the
|
||||
> following instructions. In that case, skip this step and replace
|
||||
> `fedora-40-dz` with `fedora-40` in the steps below.
|
||||
> `fedora-41-dz` with `fedora-41` in the steps below.
|
||||
|
||||
2. Create an offline disposable template (app qube) called `dz-dvm`, based on the `fedora-40-dz`
|
||||
2. Create an offline disposable template (app qube) called `dz-dvm`, based on the `fedora-41-dz`
|
||||
template. This will be the qube where the documents will be sanitized:
|
||||
|
||||
```
|
||||
qvm-create --class AppVM --label red --template fedora-40-dz \
|
||||
qvm-create --class AppVM --label red --template fedora-41-dz \
|
||||
--prop netvm="" --prop template_for_dispvms=True \
|
||||
--prop default_dispvm='' dz-dvm
|
||||
```
|
||||
|
@ -225,12 +216,18 @@ The following instructions require typing commands in a terminal in dom0.
|
|||
and initiating the sanitization process:
|
||||
|
||||
```
|
||||
qvm-create --class AppVM --label red --template fedora-40-dz dz
|
||||
qvm-create --class AppVM --label red --template fedora-41-dz dz
|
||||
qvm-volume resize dz:private $(numfmt --from=auto 20Gi)
|
||||
```
|
||||
|
||||
> :bulb: Alternatively, you can use a different app qube for Dangerzone
|
||||
> development. In that case, replace `dz` with the qube of your choice in the
|
||||
> steps below.
|
||||
>
|
||||
> In the commands above, we also resize the private volume of the `dz` qube
|
||||
> to 20GiB, since you may need some extra storage space when developing on
|
||||
> Dangerzone (e.g., for container images, Tesseract data, and Python
|
||||
> virtualenvs).
|
||||
|
||||
4. Add an RPC policy (`/etc/qubes/policy.d/50-dangerzone.policy`) that will
|
||||
allow launching a disposable qube (`dz-dvm`) when Dangerzone converts a
|
||||
|
@ -256,10 +253,7 @@ test it.
|
|||
cd dangerzone
|
||||
```
|
||||
|
||||
2. Follow the Fedora instructions for setting up the development environment with the particularity of running the following instead of `poetry install`:
|
||||
```
|
||||
poetry install --with qubes
|
||||
```
|
||||
2. Follow the Fedora instructions for setting up the development environment.
|
||||
|
||||
3. Build a dangerzone `.rpm` for qubes with the command
|
||||
|
||||
|
@ -267,29 +261,20 @@ test it.
|
|||
./install/linux/build-rpm.py --qubes
|
||||
```
|
||||
|
||||
4. Copy the produced `.rpm` file into `fedora-40-dz`
|
||||
4. Copy the produced `.rpm` file into `fedora-41-dz`
|
||||
```sh
|
||||
qvm-copy dist/*.x86_64.rpm
|
||||
```
|
||||
|
||||
#### In the `fedora-40-dz` template
|
||||
#### In the `fedora-41-dz` template
|
||||
|
||||
1. Install the `.rpm` package you just copied
|
||||
|
||||
```sh
|
||||
sudo dnf install 'dnf-command(config-manager)'
|
||||
sudo dnf config-manager --add-repo=https://packages.freedom.press/yum-tools-prod/dangerzone/dangerzone.repo
|
||||
sudo dnf install ~/QubesIncoming/dz/*.rpm
|
||||
```
|
||||
|
||||
In the above steps, we add the Dangerzone repo because it includes the
|
||||
necessary PySide6 RPM in order to make Dangerzone work.
|
||||
|
||||
> [!NOTE]
|
||||
> During the installation, you will be asked to
|
||||
> [verify the Dangerzone GPG key](INSTALL.md#verifying-dangerzone-gpg-key).
|
||||
|
||||
2. Shutdown the `fedora-40-dz` template
|
||||
2. Shutdown the `fedora-41-dz` template
|
||||
|
||||
### Developing Dangerzone
|
||||
|
||||
|
@ -320,7 +305,7 @@ For changes in the server side components, you can simply edit them locally,
|
|||
and they will be mirrored to the disposable qube through the `dz.ConvertDev`
|
||||
RPC call.
|
||||
|
||||
The only reason to build a new Qubes RPM and install it in the `fedora-40-dz`
|
||||
The only reason to build a new Qubes RPM and install it in the `fedora-41-dz`
|
||||
template for development is if:
|
||||
1. The project requires new server-side components.
|
||||
2. The code for `qubes/dz.ConvertDev` needs to be updated.
|
||||
|
@ -357,6 +342,12 @@ Build the dangerzone container image:
|
|||
python3 ./install/common/build-image.py
|
||||
```
|
||||
|
||||
Download the OCR language data:
|
||||
|
||||
```sh
|
||||
python3 ./install/common/download-tessdata.py
|
||||
```
|
||||
|
||||
Run from source tree:
|
||||
|
||||
```sh
|
||||
|
@ -418,6 +409,12 @@ Build the dangerzone container image:
|
|||
python3 .\install\common\build-image.py
|
||||
```
|
||||
|
||||
Download the OCR language data:
|
||||
|
||||
```sh
|
||||
python3 .\install\common\download-tessdata.py
|
||||
```
|
||||
|
||||
After that you can launch dangerzone during development with:
|
||||
|
||||
```
|
||||
|
@ -431,11 +428,24 @@ poetry shell
|
|||
.\dev_scripts\dangerzone.bat
|
||||
```
|
||||
|
||||
### If you want to build the installer
|
||||
### If you want to build the Windows installer
|
||||
|
||||
* Go to https://dotnet.microsoft.com/download/dotnet-framework and download and install .NET Framework 3.5 SP1 Runtime. I downloaded `dotnetfx35.exe`.
|
||||
* Go to https://wixtoolset.org/releases/ and download and install WiX toolset. I downloaded `wix314.exe`.
|
||||
* Add `C:\Program Files (x86)\WiX Toolset v3.14\bin` to the path ([instructions](https://web.archive.org/web/20230221104142/https://windowsloop.com/how-to-add-to-windows-path/)).
|
||||
Install [.NET SDK](https://dotnet.microsoft.com/en-us/download) version 6 or later. Then, open a terminal and install the latest version of [WiX Toolset .NET tool](https://wixtoolset.org/) **v5** with:
|
||||
|
||||
```sh
|
||||
dotnet tool install --global wix --version 5.0.2
|
||||
```
|
||||
|
||||
Install the WiX UI extension. You may need to open a new terminal in order to use the newly installed `wix` .NET tool:
|
||||
|
||||
```sh
|
||||
wix extension add --global WixToolset.UI.wixext/5.0.2
|
||||
```
|
||||
|
||||
> [!IMPORTANT]
|
||||
> To avoid compatibility issues, ensure the WiX UI extension version matches the version of the WiX Toolset.
|
||||
>
|
||||
> Run `wix --version` to check the version of WiX Toolset you have installed and replace `5.x.y` with the full version number without the Git revision.
|
||||
|
||||
### If you want to sign binaries with Authenticode
|
||||
|
||||
|
@ -460,3 +470,9 @@ poetry run .\install\windows\build-app.bat
|
|||
```
|
||||
|
||||
When you're done you will have `dist\Dangerzone.msi`.
|
||||
|
||||
## Updating the container image
|
||||
|
||||
The Dangezone container image is reproducible. This means that every time we
|
||||
build it, the result will be bit-for-bit the same, with some minor exceptions.
|
||||
Read more on how you can update it in `docs/developer/reproducibility.md`.
|
||||
|
|
129
CHANGELOG.md
129
CHANGELOG.md
|
@ -5,8 +5,137 @@ All notable changes to this project will be documented in this file.
|
|||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
|
||||
since 0.4.1, and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased](https://github.com/freedomofpress/dangerzone/compare/v0.9.0...HEAD)
|
||||
|
||||
## [0.9.0](https://github.com/freedomofpress/dangerzone/compare/v0.9.0...0.8.1)
|
||||
|
||||
### Added
|
||||
|
||||
- Platform support: Add support for Fedora 42 ([#1091](https://github.com/freedomofpress/dangerzone/issues/1091))
|
||||
- Platform support: Add support for Ubuntu 25.04 (Plucky Puffin) ([#1090](https://github.com/freedomofpress/dangerzone/issues/1090))
|
||||
- (experimental): It is now possible to specify a custom container runtime in
|
||||
the settings, by using the `container_runtime` key. It should contain the path
|
||||
to the container runtime you want to use. Please note that this doesn't mean
|
||||
we support more container runtimes than Podman and Docker for the time being,
|
||||
but enables you to chose which one you want to use, independently of your
|
||||
platform. ([#925](https://github.com/freedomofpress/dangerzone/issues/925))
|
||||
- Document Operating System support [#986](https://github.com/freedomofpress/dangerzone/issues/986)
|
||||
- Tests: Look for regressions when converting PDFs [#321](https://github.com/freedomofpress/dangerzone/issues/321)
|
||||
- Ensure container image reproducibilty across different container runtimes and versions ([#1074](https://github.com/freedomofpress/dangerzone/issues/1074))
|
||||
- Implement container image attestations ([#1035](https://github.com/freedomofpress/dangerzone/issues/1035))
|
||||
- Inform user of outdated Docker Desktop Version ([#693](https://github.com/freedomofpress/dangerzone/issues/693))
|
||||
- Add support for Python 3.13 ([#992](https://github.com/freedomofpress/dangerzone/issues/992))
|
||||
- Publish the built artifacts in our CI pipelines ([#972](https://github.com/freedomofpress/dangerzone/pull/972))
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix our Debian Trixie installation instructions using Sequoia PGP ([#1052](https://github.com/freedomofpress/dangerzone/issues/1052))
|
||||
- Fix the way multiprocessing works on macOS ([#873](https://github.com/freedomofpress/dangerzone/issues/873))
|
||||
- Update minimum Docker Desktop version to fix an stdout truncation issue ([#1101](https://github.com/freedomofpress/dangerzone/issues/1101))
|
||||
|
||||
### Removed
|
||||
|
||||
- Platform support: Drop support for Ubuntu Focal, since it's nearing end-of-life ([#1018](https://github.com/freedomofpress/dangerzone/issues/1018))
|
||||
- Platform support: Drop support for Fedora 39 ([#999](https://github.com/freedomofpress/dangerzone/issues/999))
|
||||
|
||||
## Changed
|
||||
|
||||
- Switch base image to Debian Stable ([#1046](https://github.com/freedomofpress/dangerzone/issues/1046))
|
||||
- Track image tags instead of image IDs in `image-id.txt` ([#1020](https://github.com/freedomofpress/dangerzone/issues/1020))
|
||||
- Migrate to Wix 4 (windows building tool) ([#602](https://github.com/freedomofpress/dangerzone/issues/602)).
|
||||
Thanks [@jkarasti](https://github.com/jkarasti) for the contribution.
|
||||
- Add a `--debug` flag to the CLI to help retrieve more logs ([#941](https://github.com/freedomofpress/dangerzone/pull/941))
|
||||
- The `debian` base image is now fetched by digest. As a result, your local
|
||||
container storage will no longer show a tag for this dependency
|
||||
([#1116](https://github.com/freedomofpress/dangerzone/pull/1116)).
|
||||
Thanks [@sudoforge](https://github.com/sudoforge) for the contribution.
|
||||
- The `debian` base image is now referenced with a fully qualified URI,
|
||||
including the registry hostname ([#1118](https://github.com/freedomofpress/dangerzone/pull/1118)).
|
||||
Thanks [@sudoforge](https://github.com/sudoforge) for the contribution.
|
||||
- Update the Dangerzone container image and its dependencies (gVisor, Debian base image, H2Orestart) to the latest versions:
|
||||
* Debian image release: `bookworm-20250317-slim@sha256:1209d8fd77def86ceb6663deef7956481cc6c14a25e1e64daec12c0ceffcc19d`
|
||||
* Debian snapshots date: `2025-03-31`
|
||||
* gVisor release date: `2025-03-26`
|
||||
* H2Orestart plugin: `v0.7.2` (`d09bc5c93fe2483a7e4a57985d2a8d0e4efae2efb04375fe4b59a68afd7241e2`)
|
||||
|
||||
### Development changes
|
||||
|
||||
- Make container image scanning work for Silicon macOS ([#1008](https://github.com/freedomofpress/dangerzone/issues/1008))
|
||||
- Automate the main bulk of our release tasks ([#1016](https://github.com/freedomofpress/dangerzone/issues/1016))
|
||||
- CI: Enforce updating the CHANGELOG in the CI ([#1108](https://github.com/freedomofpress/dangerzone/pull/1108))
|
||||
- Add reference to funding.json (required by floss.fund application) ([#1092](https://github.com/freedomofpress/dangerzone/pull/1092))
|
||||
- Lint: add ruff for linting and formatting ([#1029](https://github.com/freedomofpress/dangerzone/pull/1029)).
|
||||
Thanks [@jkarasti](https://github.com/jkarasti) for the contribution.
|
||||
- Work around a `cx_freeze` build issue ([#974](https://github.com/freedomofpress/dangerzone/issues/974))
|
||||
- tests: mark the hancom office suite tests for rerun on failures ([#991](https://github.com/freedomofpress/dangerzone/pull/991))
|
||||
- Update reference template for Qubes to Fedora 41 ([#1078](https://github.com/freedomofpress/dangerzone/issues/1078))
|
||||
|
||||
## [0.8.1](https://github.com/freedomofpress/dangerzone/compare/v0.8.1...0.8.0)
|
||||
|
||||
- Update the container image
|
||||
|
||||
### Added
|
||||
|
||||
- Disable gVisor's DirectFS feature ([#226](https://github.com/freedomofpress/dangerzone/issues/226)).
|
||||
Thanks [EtiennePerot](https://github.com/EtiennePerot) for the contribution.
|
||||
|
||||
### Removed
|
||||
|
||||
- Platform support: Drop support for Fedora 39, since it's end-of-life ([#999](https://github.com/freedomofpress/dangerzone/pull/999))
|
||||
|
||||
## Updated
|
||||
|
||||
- Bump `slsa-framework/slsa-github-generator` from 2.0.0 to 2.1.0 ([#1109](https://github.com/freedomofpress/dangerzone/pull/1109))
|
||||
|
||||
### Development changes
|
||||
|
||||
Thanks [@jkarasti](https://github.com/jkarasti) for the contribution.
|
||||
- Automate a large portion of our release tasks with `doit` ([#1016](https://github.com/freedomofpress/dangerzone/issues/1016))
|
||||
|
||||
## [0.8.0](https://github.com/freedomofpress/dangerzone/compare/v0.8.0...0.7.1)
|
||||
|
||||
### Added
|
||||
|
||||
- Point to the installation instructions that the Tails team maintains for Dangerzone ([announcement](https://tails.net/news/dangerzone/index.en.html))
|
||||
- Installation and execution errors are now caught and displayed in the interface ([#193](https://github.com/freedomofpress/dangerzone/issues/193))
|
||||
- Prevent users from using illegal characters in output filename ([#362](https://github.com/freedomofpress/dangerzone/issues/362)). Thanks [@bnewc](https://github.com/bnewc) for the contribution!
|
||||
- Add support for Fedora 41 ([#947](https://github.com/freedomofpress/dangerzone/issues/947))
|
||||
- Add support for Ubuntu Oracular (24.10) ([#954](https://github.com/freedomofpress/dangerzone/pull/954))
|
||||
|
||||
### Fixed
|
||||
|
||||
- Update our macOS entitlements, removing now unneeded privileges ([#638](https://github.com/freedomofpress/dangerzone/issues/638))
|
||||
- Make Dangerzone work on Linux systems with SELinux in enforcing mode ([#880](https://github.com/freedomofpress/dangerzone/issues/880))
|
||||
- Process documents with embedded multimedia files without crashing ([#877](https://github.com/freedomofpress/dangerzone/issues/877))
|
||||
- Search for applications that can read PDF files in a more reliable way on Linux ([#899](https://github.com/freedomofpress/dangerzone/issues/899))
|
||||
- Handle and report some stray conversion errors ([#776](https://github.com/freedomofpress/dangerzone/issues/776)). Thanks [@amnak613](https://github.com/amnak613) for the contribution!
|
||||
- Replace occurrences of the word "Docker" in Podman-related error messages in Linux ([#212](https://github.com/freedomofpress/dangerzone/issues/212))
|
||||
|
||||
### Changed
|
||||
|
||||
- The second phase of the conversion (pixels to PDF) now happens on the host. Instead of first grabbing all of the pixel data from the first container, storing them on disk, and then reconstructing the PDF on a second container, Dangerzone now immediately reconstructs the PDF **on the host**, while the doc to pixels conversion is still running on the first container. The sanitation is no less safe, since the boundaries between the sandbox and the host are still respected ([#625](https://github.com/freedomofpress/dangerzone/issues/625))
|
||||
- PyMuPDF is now vendorized for Debian packages. This is done because the PyMuPDF package from the Debian repos lacks OCR support ([#940](https://github.com/freedomofpress/dangerzone/pull/940))
|
||||
- Always use our own seccomp policy as a default ([#908](https://github.com/freedomofpress/dangerzone/issues/908))
|
||||
- Debian packages are now amd64 only, which removes some warnings in Linux distros with 32-bit repos enabled ([#394](https://github.com/freedomofpress/dangerzone/issues/394))
|
||||
- Allow choosing installation directory on Windows platforms ([#148](https://github.com/freedomofpress/dangerzone/issues/148)). Thanks [@jkarasti](https://github.com/jkarasti) for the contribution!
|
||||
- Bumped H2ORestart LibreOffice extension to version 0.6.6 ([#943](https://github.com/freedomofpress/dangerzone/issues/943))
|
||||
- Platform support: Ubuntu Focal (20.04) is now deprecated, and support will be dropped with the next release ([#965](https://github.com/freedomofpress/dangerzone/issues/965))
|
||||
|
||||
### Removed
|
||||
|
||||
- Platform support: Drop Ubuntu Mantic (23.10), since it's end-of-life ([#977](https://github.com/freedomofpress/dangerzone/pull/977))
|
||||
|
||||
### Development changes
|
||||
|
||||
- Build Debian packages with pybuild ([#773](https://github.com/freedomofpress/dangerzone/issues/773))
|
||||
- Test Dangerzone on Intel macOS machines as well ([#932](https://github.com/freedomofpress/dangerzone/issues/932))
|
||||
- Switch from CircleCI runners to Github actions ([#674](https://github.com/freedomofpress/dangerzone/issues/674))
|
||||
- Sign Windows executables and installer with SHA256 rather than SHA1 ([#931](https://github.com/freedomofpress/dangerzone/pull/931)). Thanks [@jkarasti](https://github.com/jkarasti) for the contribution!
|
||||
|
||||
## [0.7.1](https://github.com/freedomofpress/dangerzone/compare/v0.7.1...v0.7.0)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix an `image-id.txt` mismatch happening on Docker Desktop >= 4.30.0 ([#933](https://github.com/freedomofpress/dangerzone/issues/933))
|
||||
|
||||
## [0.7.0](https://github.com/freedomofpress/dangerzone/compare/v0.7.0...v0.6.1)
|
||||
|
|
304
Dockerfile
304
Dockerfile
|
@ -1,118 +1,228 @@
|
|||
###########################################
|
||||
# Build PyMuPDF
|
||||
# NOTE: Updating the packages to their latest versions requires bumping the
|
||||
# Dockerfile args below. For more info about this file, read
|
||||
# docs/developer/reproducibility.md.
|
||||
|
||||
FROM alpine:latest as pymupdf-build
|
||||
ARG DEBIAN_IMAGE_DIGEST=sha256:1209d8fd77def86ceb6663deef7956481cc6c14a25e1e64daec12c0ceffcc19d
|
||||
|
||||
ARG REQUIREMENTS_TXT
|
||||
FROM docker.io/library/debian@${DEBIAN_IMAGE_DIGEST} AS dangerzone-image
|
||||
|
||||
# Install PyMuPDF via hash-checked requirements file
|
||||
COPY ${REQUIREMENTS_TXT} /tmp/requirements.txt
|
||||
RUN apk --no-cache add linux-headers g++ linux-headers gcc make python3-dev py3-pip clang-dev
|
||||
RUN pip install -vv --break-system-packages --require-hashes -r /tmp/requirements.txt
|
||||
ARG GVISOR_ARCHIVE_DATE=20250326
|
||||
ARG DEBIAN_ARCHIVE_DATE=20250331
|
||||
ARG H2ORESTART_CHECKSUM=935e68671bde4ca63a364128077f1c733349bbcc90b7e6973bc7a2306494ec54
|
||||
ARG H2ORESTART_VERSION=v0.7.2
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
###########################################
|
||||
# Download Tesseract data
|
||||
# The following way of installing packages is taken from
|
||||
# https://github.com/reproducible-containers/repro-sources-list.sh/blob/master/Dockerfile.debian-12,
|
||||
# and adapted to allow installing gVisor from each own repo as well.
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
--mount=type=bind,source=./container_helpers/repro-sources-list.sh,target=/usr/local/bin/repro-sources-list.sh \
|
||||
--mount=type=bind,source=./container_helpers/gvisor.key,target=/tmp/gvisor.key \
|
||||
: "Hacky way to set a date for the Debian snapshot repos" && \
|
||||
touch -d ${DEBIAN_ARCHIVE_DATE}Z /etc/apt/sources.list.d/debian.sources && \
|
||||
touch -d ${DEBIAN_ARCHIVE_DATE}Z /etc/apt/sources.list && \
|
||||
repro-sources-list.sh && \
|
||||
: "Setup APT to install gVisor from its separate APT repo" && \
|
||||
apt-get update && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install -y --no-install-recommends apt-transport-https ca-certificates gnupg && \
|
||||
gpg -o /usr/share/keyrings/gvisor-archive-keyring.gpg --dearmor /tmp/gvisor.key && \
|
||||
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/gvisor-archive-keyring.gpg] https://storage.googleapis.com/gvisor/releases ${GVISOR_ARCHIVE_DATE} main" > /etc/apt/sources.list.d/gvisor.list && \
|
||||
: "Install the necessary gVisor and Dangerzone dependencies" && \
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
python3 python3-fitz libreoffice-nogui libreoffice-java-common \
|
||||
python3 python3-magic default-jre-headless fonts-noto-cjk fonts-dejavu \
|
||||
runsc unzip wget && \
|
||||
: "Clean up for improving reproducibility (optional)" && \
|
||||
rm -rf /var/cache/fontconfig/ && \
|
||||
rm -rf /etc/ssl/certs/java/cacerts && \
|
||||
rm -rf /var/log/* /var/cache/ldconfig/aux-cache
|
||||
|
||||
FROM alpine:latest as tessdata-dl
|
||||
ARG TESSDATA_CHECKSUM=d0e3bb6f3b4e75748680524a1d116f2bfb145618f8ceed55b279d15098a530f9
|
||||
|
||||
# Download the trained models from the latest GitHub release of Tesseract, and
|
||||
# store them under /usr/share/tessdata. This is basically what distro packages
|
||||
# do under the hood.
|
||||
#
|
||||
# Because the GitHub release contains more files than just the trained models,
|
||||
# we use `find` to fetch only the '*.traineddata' files in the top directory.
|
||||
#
|
||||
# Before we untar the models, we also check if the checksum is the expected one.
|
||||
RUN mkdir /usr/share/tessdata/ && mkdir tessdata && cd tessdata \
|
||||
&& TESSDATA_VERSION=$(wget -O- -nv https://api.github.com/repos/tesseract-ocr/tessdata_fast/releases/latest \
|
||||
| sed -n 's/^.*"tag_name": "\([0-9.]\+\)".*$/\1/p') \
|
||||
&& wget https://github.com/tesseract-ocr/tessdata_fast/archive/$TESSDATA_VERSION/tessdata_fast-$TESSDATA_VERSION.tar.gz \
|
||||
&& echo "$TESSDATA_CHECKSUM tessdata_fast-$TESSDATA_VERSION.tar.gz" | sha256sum -c \
|
||||
&& tar -xzvf tessdata_fast-$TESSDATA_VERSION.tar.gz -C . \
|
||||
&& find . -name '*.traineddata' -maxdepth 2 -exec cp {} /usr/share/tessdata/ \; \
|
||||
&& cd .. && rm -r tessdata
|
||||
|
||||
|
||||
###########################################
|
||||
# Download H2ORestart
|
||||
FROM alpine:latest as h2orestart-dl
|
||||
ARG H2ORESTART_CHECKSUM=5db816a1e57b510456633f55e693cb5ef3675ef8b35df4f31c90ab9d4c66071a
|
||||
RUN mkdir /libreoffice_ext && cd libreoffice_ext \
|
||||
# Download H2ORestart from GitHub using a pinned version and hash. Note that
|
||||
# it's available in Debian repos, but not in Bookworm yet.
|
||||
RUN mkdir /opt/libreoffice_ext && cd /opt/libreoffice_ext \
|
||||
&& H2ORESTART_FILENAME=h2orestart.oxt \
|
||||
&& H2ORESTART_VERSION="v0.5.7" \
|
||||
&& wget https://github.com/ebandal/H2Orestart/releases/download/$H2ORESTART_VERSION/$H2ORESTART_FILENAME \
|
||||
&& echo "$H2ORESTART_CHECKSUM $H2ORESTART_FILENAME" | sha256sum -c \
|
||||
&& install -dm777 "/usr/lib/libreoffice/share/extensions/"
|
||||
&& install -dm777 "/usr/lib/libreoffice/share/extensions/" \
|
||||
&& rm /root/.wget-hsts
|
||||
|
||||
# Create an unprivileged user both for gVisor and for running Dangerzone.
|
||||
# XXX: Make the shadow field "date of last password change" a constant
|
||||
# number.
|
||||
RUN addgroup --gid 1000 dangerzone
|
||||
RUN adduser --uid 1000 --ingroup dangerzone --shell /bin/true \
|
||||
--disabled-password --home /home/dangerzone dangerzone \
|
||||
&& chage -d 99999 dangerzone \
|
||||
&& rm /etc/shadow-
|
||||
|
||||
###########################################
|
||||
# Dangerzone image
|
||||
|
||||
FROM alpine:latest AS dangerzone-image
|
||||
|
||||
# Install dependencies
|
||||
RUN apk --no-cache -U upgrade && \
|
||||
apk --no-cache add \
|
||||
libreoffice \
|
||||
openjdk8 \
|
||||
python3 \
|
||||
py3-magic \
|
||||
font-noto-cjk
|
||||
|
||||
COPY --from=pymupdf-build /usr/lib/python3.12/site-packages/fitz/ /usr/lib/python3.12/site-packages/fitz
|
||||
COPY --from=pymupdf-build /usr/lib/python3.12/site-packages/pymupdf/ /usr/lib/python3.12/site-packages/pymupdf
|
||||
COPY --from=tessdata-dl /usr/share/tessdata/ /usr/share/tessdata
|
||||
COPY --from=h2orestart-dl /libreoffice_ext/ /libreoffice_ext
|
||||
|
||||
RUN install -dm777 "/usr/lib/libreoffice/share/extensions/"
|
||||
|
||||
# Copy Dangerzone's conversion logic under /opt/dangerzone, and allow Python to
|
||||
# import it.
|
||||
RUN mkdir -p /opt/dangerzone/dangerzone
|
||||
RUN touch /opt/dangerzone/dangerzone/__init__.py
|
||||
COPY conversion /opt/dangerzone/dangerzone/conversion
|
||||
|
||||
# Add the unprivileged user. Set the UID/GID of the dangerzone user/group to
|
||||
# 1000, since we will point to it from the OCI config.
|
||||
#
|
||||
# NOTE: A tmpfs will be mounted over /home/dangerzone directory,
|
||||
# so nothing within it from the image will be persisted.
|
||||
RUN addgroup -g 1000 dangerzone && \
|
||||
adduser -u 1000 -s /bin/true -G dangerzone -h /home/dangerzone -D dangerzone
|
||||
|
||||
###########################################
|
||||
# gVisor wrapper image
|
||||
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache -U upgrade && \
|
||||
apk --no-cache add python3
|
||||
|
||||
# Temporarily pin gVisor to the latest working version (release-20240826.0).
|
||||
# See: https://github.com/freedomofpress/dangerzone/issues/928
|
||||
RUN GVISOR_URL="https://storage.googleapis.com/gvisor/releases/release/20240826/$(uname -m)"; \
|
||||
wget "${GVISOR_URL}/runsc" "${GVISOR_URL}/runsc.sha512" && \
|
||||
sha512sum -c runsc.sha512 && \
|
||||
rm -f runsc.sha512 && \
|
||||
chmod 555 runsc && \
|
||||
mv runsc /usr/bin/
|
||||
|
||||
# Add the unprivileged `dangerzone` user.
|
||||
RUN addgroup dangerzone && \
|
||||
adduser -s /bin/true -G dangerzone -h /home/dangerzone -D dangerzone
|
||||
|
||||
# Switch to the dangerzone user for the rest of the script.
|
||||
USER dangerzone
|
||||
|
||||
# Copy the Dangerzone image, as created by the previous steps, into the home
|
||||
# directory of the `dangerzone` user.
|
||||
RUN mkdir /home/dangerzone/dangerzone-image
|
||||
COPY --from=dangerzone-image / /home/dangerzone/dangerzone-image/rootfs
|
||||
# Copy only the Python code, and not any produced .pyc files.
|
||||
COPY conversion/*.py /opt/dangerzone/dangerzone/conversion/
|
||||
|
||||
# Create a directory that will be used by gVisor as the place where it will
|
||||
# store the state of its containers.
|
||||
RUN mkdir /home/dangerzone/.containers
|
||||
|
||||
COPY gvisor_wrapper/entrypoint.py /
|
||||
###############################################################################
|
||||
#
|
||||
# REUSING CONTAINER IMAGES:
|
||||
# Anatomy of a hack
|
||||
# ========================
|
||||
#
|
||||
# The rest of the Dockerfile aims to do one thing: allow the final container
|
||||
# image to actually contain two container images; one for the outer container
|
||||
# (spawned by Podman/Docker Desktop), and one for the inner container (spawned
|
||||
# by gVisor).
|
||||
#
|
||||
# This has already been done in the past, and we explain why and how in the
|
||||
# design document for gVisor integration (should be in
|
||||
# `docs/developer/gvisor.md`). In this iteration, we want to also
|
||||
# achieve the following:
|
||||
#
|
||||
# 1. Have a small final image, by sharing some system paths between the inner
|
||||
# and outer container image using symlinks.
|
||||
# 2. Allow our security scanning tool to see the contents of the inner
|
||||
# container image.
|
||||
# 3. Make the outer container image operational, in the sense that you can use
|
||||
# `apt` commands and perform a conversion with Dangerzone, outside the
|
||||
# gVisor sandbox. This is helpful for debugging purposes.
|
||||
#
|
||||
# Below we'll explain how our design choices are informed by the above
|
||||
# sub-goals.
|
||||
#
|
||||
# First, to achieve a small container image, we basically need to copy `/etc`,
|
||||
# `/usr` and `/opt` from the original Dangerzone image to the **inner**
|
||||
# container image (under `/home/dangerzone/dangerzone-image/rootfs/`)
|
||||
#
|
||||
# That's all we need. The rest of the files play no role, and we can actually
|
||||
# mask them in gVisor's OCI config.
|
||||
#
|
||||
# Second, in order to let our security scanner find the installed packages,
|
||||
# we need to copy the following dirs to the root of the **outer** container
|
||||
# image:
|
||||
# * `/etc`, so that the security scanner can detect the image type and its
|
||||
# sources
|
||||
# * `/var`, so that the security scanner can have access to the APT database.
|
||||
#
|
||||
# IMPORTANT: We don't symlink the `/etc` of the **outer** container image to
|
||||
# the **inner** one, in order to avoid leaking files like
|
||||
# `/etc/{hostname,hosts,resolv.conf}` that Podman/Docker mounts when running
|
||||
# the **outer** container image.
|
||||
#
|
||||
# Third, in order to have an operational Debian image, we are _mostly_ covered
|
||||
# by the dirs we have copied. There's a _rare_ case where during debugging, we
|
||||
# may want to install a system package that has components in `/etc` and
|
||||
# `/var`, which will not be available in the **inner** container image. In that
|
||||
# case, the developer can do the necessary symlinks in the live container.
|
||||
#
|
||||
# FILESYSTEM HIERARCHY
|
||||
# ====================
|
||||
#
|
||||
# The above plan leads to the following filesystem hierarchy:
|
||||
#
|
||||
# Outer container image:
|
||||
#
|
||||
# # ls -l /
|
||||
# lrwxrwxrwx 1 root root 7 Jan 27 10:46 bin -> usr/bin
|
||||
# -rwxr-xr-x 1 root root 7764 Jan 24 08:14 entrypoint.py
|
||||
# drwxr-xr-x 1 root root 4096 Jan 27 10:47 etc
|
||||
# drwxr-xr-x 1 root root 4096 Jan 27 10:46 home
|
||||
# lrwxrwxrwx 1 root root 7 Jan 27 10:46 lib -> usr/lib
|
||||
# lrwxrwxrwx 1 root root 9 Jan 27 10:46 lib64 -> usr/lib64
|
||||
# drwxr-xr-x 2 root root 4096 Jan 27 10:46 root
|
||||
# drwxr-xr-x 1 root root 4096 Jan 27 10:47 run
|
||||
# lrwxrwxrwx 1 root root 8 Jan 27 10:46 sbin -> usr/sbin
|
||||
# drwxrwxrwx 2 root root 4096 Jan 27 10:46 tmp
|
||||
# lrwxrwxrwx 1 root root 44 Jan 27 10:46 usr -> /home/dangerzone/dangerzone-image/rootfs/usr
|
||||
# drwxr-xr-x 11 root root 4096 Jan 27 10:47 var
|
||||
#
|
||||
# Inner container image:
|
||||
#
|
||||
# # ls -l /home/dangerzone/dangerzone-image/rootfs/
|
||||
# total 12
|
||||
# lrwxrwxrwx 1 root root 7 Jan 27 10:47 bin -> usr/bin
|
||||
# drwxr-xr-x 43 root root 4096 Jan 27 10:46 etc
|
||||
# lrwxrwxrwx 1 root root 7 Jan 27 10:47 lib -> usr/lib
|
||||
# lrwxrwxrwx 1 root root 9 Jan 27 10:47 lib64 -> usr/lib64
|
||||
# drwxr-xr-x 4 root root 4096 Jan 27 10:47 opt
|
||||
# drwxr-xr-x 12 root root 4096 Jan 27 10:47 usr
|
||||
#
|
||||
# SYMLINKING /USR
|
||||
# ===============
|
||||
#
|
||||
# It's surprisingly difficult (maybe even borderline impossible), to symlink
|
||||
# `/usr` to a different path during image build. The problem is that /usr
|
||||
# is very sensitive, and you can't manipulate it in a live system. That is, I
|
||||
# haven't found a way to do the following, or something equivalent:
|
||||
#
|
||||
# rm -r /usr && ln -s /home/dangerzone/dangerzone-image/rootfs/usr/ /usr
|
||||
#
|
||||
# The `ln` binary, even if you specify it by its full path, cannot run
|
||||
# (probably because `ld-linux.so` can't be found). For this reason, we have
|
||||
# to create the symlinks beforehand, in a previous build stage. Then, in an
|
||||
# empty container image (scratch images), we can copy these symlinks and the
|
||||
# /usr, and stitch everything together.
|
||||
###############################################################################
|
||||
|
||||
# Create the filesystem hierarchy that will be used to symlink /usr.
|
||||
|
||||
RUN mkdir -p \
|
||||
/new_root \
|
||||
/new_root/root \
|
||||
/new_root/run \
|
||||
/new_root/tmp \
|
||||
/new_root/home/dangerzone/dangerzone-image/rootfs
|
||||
|
||||
# Copy the /etc and /var directories under the new root directory. Also,
|
||||
# copy /etc/, /opt, and /usr to the Dangerzone image rootfs.
|
||||
#
|
||||
# NOTE: We also have to remove the resolv.conf file, in order to not leak any
|
||||
# DNS servers added there during image build time.
|
||||
RUN cp -r /etc /var /new_root/ \
|
||||
&& rm /new_root/etc/resolv.conf
|
||||
RUN cp -r /etc /opt /usr /new_root/home/dangerzone/dangerzone-image/rootfs \
|
||||
&& rm /new_root/home/dangerzone/dangerzone-image/rootfs/etc/resolv.conf
|
||||
|
||||
RUN ln -s /home/dangerzone/dangerzone-image/rootfs/usr /new_root/usr
|
||||
RUN ln -s usr/bin /new_root/bin
|
||||
RUN ln -s usr/lib /new_root/lib
|
||||
RUN ln -s usr/lib64 /new_root/lib64
|
||||
RUN ln -s usr/sbin /new_root/sbin
|
||||
RUN ln -s usr/bin /new_root/home/dangerzone/dangerzone-image/rootfs/bin
|
||||
RUN ln -s usr/lib /new_root/home/dangerzone/dangerzone-image/rootfs/lib
|
||||
RUN ln -s usr/lib64 /new_root/home/dangerzone/dangerzone-image/rootfs/lib64
|
||||
|
||||
# Fix permissions in /home/dangerzone, so that our entrypoint script can make
|
||||
# changes in the following folders.
|
||||
RUN chown dangerzone:dangerzone \
|
||||
/new_root/home/dangerzone \
|
||||
/new_root/home/dangerzone/dangerzone-image/
|
||||
# Fix permissions in /tmp, so that it can be used by unprivileged users.
|
||||
RUN chmod 777 /new_root/tmp
|
||||
|
||||
COPY container_helpers/entrypoint.py /new_root
|
||||
# HACK: For reasons that we are not sure yet, we need to explicitly specify the
|
||||
# modification time of this file.
|
||||
RUN touch -d ${DEBIAN_ARCHIVE_DATE}Z /new_root/entrypoint.py
|
||||
|
||||
## Final image
|
||||
|
||||
FROM scratch
|
||||
|
||||
# Copy the filesystem hierarchy that we created in the previous stage, so that
|
||||
# /usr can be a symlink.
|
||||
COPY --from=dangerzone-image /new_root/ /
|
||||
|
||||
# Switch to the dangerzone user for the rest of the script.
|
||||
USER dangerzone
|
||||
|
||||
ENTRYPOINT ["/entrypoint.py"]
|
||||
|
|
16
Dockerfile.env
Normal file
16
Dockerfile.env
Normal file
|
@ -0,0 +1,16 @@
|
|||
# Should be the INDEX DIGEST from an image tagged `bookworm-<DATE>-slim`:
|
||||
# https://hub.docker.com/_/debian/tags?name=bookworm-
|
||||
#
|
||||
# Tag for this digest: bookworm-20250317-slim
|
||||
DEBIAN_IMAGE_DIGEST=sha256:1209d8fd77def86ceb6663deef7956481cc6c14a25e1e64daec12c0ceffcc19d
|
||||
# Can be bumped to today's date
|
||||
DEBIAN_ARCHIVE_DATE=20250331
|
||||
# Can be bumped to the latest date in https://github.com/google/gvisor/tags
|
||||
GVISOR_ARCHIVE_DATE=20250326
|
||||
# Can be bumped to the latest version and checksum from https://github.com/ebandal/H2Orestart/releases
|
||||
H2ORESTART_CHECKSUM=935e68671bde4ca63a364128077f1c733349bbcc90b7e6973bc7a2306494ec54
|
||||
H2ORESTART_VERSION=v0.7.2
|
||||
|
||||
# Buildkit image (taken from freedomofpress/repro-build)
|
||||
BUILDKIT_IMAGE="docker.io/moby/buildkit:v19.0@sha256:14aa1b4dd92ea0a4cd03a54d0c6079046ea98cd0c0ae6176bdd7036ba370cbbe"
|
||||
BUILDKIT_IMAGE_ROOTLESS="docker.io/moby/buildkit:v0.19.0-rootless@sha256:e901cffdad753892a7c3afb8b9972549fca02c73888cf340c91ed801fdd96d71"
|
228
Dockerfile.in
Normal file
228
Dockerfile.in
Normal file
|
@ -0,0 +1,228 @@
|
|||
# NOTE: Updating the packages to their latest versions requires bumping the
|
||||
# Dockerfile args below. For more info about this file, read
|
||||
# docs/developer/reproducibility.md.
|
||||
|
||||
ARG DEBIAN_IMAGE_DIGEST={{DEBIAN_IMAGE_DIGEST}}
|
||||
|
||||
FROM docker.io/library/debian@${DEBIAN_IMAGE_DIGEST} AS dangerzone-image
|
||||
|
||||
ARG GVISOR_ARCHIVE_DATE={{GVISOR_ARCHIVE_DATE}}
|
||||
ARG DEBIAN_ARCHIVE_DATE={{DEBIAN_ARCHIVE_DATE}}
|
||||
ARG H2ORESTART_CHECKSUM={{H2ORESTART_CHECKSUM}}
|
||||
ARG H2ORESTART_VERSION={{H2ORESTART_VERSION}}
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# The following way of installing packages is taken from
|
||||
# https://github.com/reproducible-containers/repro-sources-list.sh/blob/master/Dockerfile.debian-12,
|
||||
# and adapted to allow installing gVisor from each own repo as well.
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
--mount=type=bind,source=./container_helpers/repro-sources-list.sh,target=/usr/local/bin/repro-sources-list.sh \
|
||||
--mount=type=bind,source=./container_helpers/gvisor.key,target=/tmp/gvisor.key \
|
||||
: "Hacky way to set a date for the Debian snapshot repos" && \
|
||||
touch -d ${DEBIAN_ARCHIVE_DATE}Z /etc/apt/sources.list.d/debian.sources && \
|
||||
touch -d ${DEBIAN_ARCHIVE_DATE}Z /etc/apt/sources.list && \
|
||||
repro-sources-list.sh && \
|
||||
: "Setup APT to install gVisor from its separate APT repo" && \
|
||||
apt-get update && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install -y --no-install-recommends apt-transport-https ca-certificates gnupg && \
|
||||
gpg -o /usr/share/keyrings/gvisor-archive-keyring.gpg --dearmor /tmp/gvisor.key && \
|
||||
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/gvisor-archive-keyring.gpg] https://storage.googleapis.com/gvisor/releases ${GVISOR_ARCHIVE_DATE} main" > /etc/apt/sources.list.d/gvisor.list && \
|
||||
: "Install the necessary gVisor and Dangerzone dependencies" && \
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
python3 python3-fitz libreoffice-nogui libreoffice-java-common \
|
||||
python3 python3-magic default-jre-headless fonts-noto-cjk fonts-dejavu \
|
||||
runsc unzip wget && \
|
||||
: "Clean up for improving reproducibility (optional)" && \
|
||||
rm -rf /var/cache/fontconfig/ && \
|
||||
rm -rf /etc/ssl/certs/java/cacerts && \
|
||||
rm -rf /var/log/* /var/cache/ldconfig/aux-cache
|
||||
|
||||
# Download H2ORestart from GitHub using a pinned version and hash. Note that
|
||||
# it's available in Debian repos, but not in Bookworm yet.
|
||||
RUN mkdir /opt/libreoffice_ext && cd /opt/libreoffice_ext \
|
||||
&& H2ORESTART_FILENAME=h2orestart.oxt \
|
||||
&& wget https://github.com/ebandal/H2Orestart/releases/download/$H2ORESTART_VERSION/$H2ORESTART_FILENAME \
|
||||
&& echo "$H2ORESTART_CHECKSUM $H2ORESTART_FILENAME" | sha256sum -c \
|
||||
&& install -dm777 "/usr/lib/libreoffice/share/extensions/" \
|
||||
&& rm /root/.wget-hsts
|
||||
|
||||
# Create an unprivileged user both for gVisor and for running Dangerzone.
|
||||
# XXX: Make the shadow field "date of last password change" a constant
|
||||
# number.
|
||||
RUN addgroup --gid 1000 dangerzone
|
||||
RUN adduser --uid 1000 --ingroup dangerzone --shell /bin/true \
|
||||
--disabled-password --home /home/dangerzone dangerzone \
|
||||
&& chage -d 99999 dangerzone \
|
||||
&& rm /etc/shadow-
|
||||
|
||||
# Copy Dangerzone's conversion logic under /opt/dangerzone, and allow Python to
|
||||
# import it.
|
||||
RUN mkdir -p /opt/dangerzone/dangerzone
|
||||
RUN touch /opt/dangerzone/dangerzone/__init__.py
|
||||
|
||||
# Copy only the Python code, and not any produced .pyc files.
|
||||
COPY conversion/*.py /opt/dangerzone/dangerzone/conversion/
|
||||
|
||||
# Create a directory that will be used by gVisor as the place where it will
|
||||
# store the state of its containers.
|
||||
RUN mkdir /home/dangerzone/.containers
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# REUSING CONTAINER IMAGES:
|
||||
# Anatomy of a hack
|
||||
# ========================
|
||||
#
|
||||
# The rest of the Dockerfile aims to do one thing: allow the final container
|
||||
# image to actually contain two container images; one for the outer container
|
||||
# (spawned by Podman/Docker Desktop), and one for the inner container (spawned
|
||||
# by gVisor).
|
||||
#
|
||||
# This has already been done in the past, and we explain why and how in the
|
||||
# design document for gVisor integration (should be in
|
||||
# `docs/developer/gvisor.md`). In this iteration, we want to also
|
||||
# achieve the following:
|
||||
#
|
||||
# 1. Have a small final image, by sharing some system paths between the inner
|
||||
# and outer container image using symlinks.
|
||||
# 2. Allow our security scanning tool to see the contents of the inner
|
||||
# container image.
|
||||
# 3. Make the outer container image operational, in the sense that you can use
|
||||
# `apt` commands and perform a conversion with Dangerzone, outside the
|
||||
# gVisor sandbox. This is helpful for debugging purposes.
|
||||
#
|
||||
# Below we'll explain how our design choices are informed by the above
|
||||
# sub-goals.
|
||||
#
|
||||
# First, to achieve a small container image, we basically need to copy `/etc`,
|
||||
# `/usr` and `/opt` from the original Dangerzone image to the **inner**
|
||||
# container image (under `/home/dangerzone/dangerzone-image/rootfs/`)
|
||||
#
|
||||
# That's all we need. The rest of the files play no role, and we can actually
|
||||
# mask them in gVisor's OCI config.
|
||||
#
|
||||
# Second, in order to let our security scanner find the installed packages,
|
||||
# we need to copy the following dirs to the root of the **outer** container
|
||||
# image:
|
||||
# * `/etc`, so that the security scanner can detect the image type and its
|
||||
# sources
|
||||
# * `/var`, so that the security scanner can have access to the APT database.
|
||||
#
|
||||
# IMPORTANT: We don't symlink the `/etc` of the **outer** container image to
|
||||
# the **inner** one, in order to avoid leaking files like
|
||||
# `/etc/{hostname,hosts,resolv.conf}` that Podman/Docker mounts when running
|
||||
# the **outer** container image.
|
||||
#
|
||||
# Third, in order to have an operational Debian image, we are _mostly_ covered
|
||||
# by the dirs we have copied. There's a _rare_ case where during debugging, we
|
||||
# may want to install a system package that has components in `/etc` and
|
||||
# `/var`, which will not be available in the **inner** container image. In that
|
||||
# case, the developer can do the necessary symlinks in the live container.
|
||||
#
|
||||
# FILESYSTEM HIERARCHY
|
||||
# ====================
|
||||
#
|
||||
# The above plan leads to the following filesystem hierarchy:
|
||||
#
|
||||
# Outer container image:
|
||||
#
|
||||
# # ls -l /
|
||||
# lrwxrwxrwx 1 root root 7 Jan 27 10:46 bin -> usr/bin
|
||||
# -rwxr-xr-x 1 root root 7764 Jan 24 08:14 entrypoint.py
|
||||
# drwxr-xr-x 1 root root 4096 Jan 27 10:47 etc
|
||||
# drwxr-xr-x 1 root root 4096 Jan 27 10:46 home
|
||||
# lrwxrwxrwx 1 root root 7 Jan 27 10:46 lib -> usr/lib
|
||||
# lrwxrwxrwx 1 root root 9 Jan 27 10:46 lib64 -> usr/lib64
|
||||
# drwxr-xr-x 2 root root 4096 Jan 27 10:46 root
|
||||
# drwxr-xr-x 1 root root 4096 Jan 27 10:47 run
|
||||
# lrwxrwxrwx 1 root root 8 Jan 27 10:46 sbin -> usr/sbin
|
||||
# drwxrwxrwx 2 root root 4096 Jan 27 10:46 tmp
|
||||
# lrwxrwxrwx 1 root root 44 Jan 27 10:46 usr -> /home/dangerzone/dangerzone-image/rootfs/usr
|
||||
# drwxr-xr-x 11 root root 4096 Jan 27 10:47 var
|
||||
#
|
||||
# Inner container image:
|
||||
#
|
||||
# # ls -l /home/dangerzone/dangerzone-image/rootfs/
|
||||
# total 12
|
||||
# lrwxrwxrwx 1 root root 7 Jan 27 10:47 bin -> usr/bin
|
||||
# drwxr-xr-x 43 root root 4096 Jan 27 10:46 etc
|
||||
# lrwxrwxrwx 1 root root 7 Jan 27 10:47 lib -> usr/lib
|
||||
# lrwxrwxrwx 1 root root 9 Jan 27 10:47 lib64 -> usr/lib64
|
||||
# drwxr-xr-x 4 root root 4096 Jan 27 10:47 opt
|
||||
# drwxr-xr-x 12 root root 4096 Jan 27 10:47 usr
|
||||
#
|
||||
# SYMLINKING /USR
|
||||
# ===============
|
||||
#
|
||||
# It's surprisingly difficult (maybe even borderline impossible), to symlink
|
||||
# `/usr` to a different path during image build. The problem is that /usr
|
||||
# is very sensitive, and you can't manipulate it in a live system. That is, I
|
||||
# haven't found a way to do the following, or something equivalent:
|
||||
#
|
||||
# rm -r /usr && ln -s /home/dangerzone/dangerzone-image/rootfs/usr/ /usr
|
||||
#
|
||||
# The `ln` binary, even if you specify it by its full path, cannot run
|
||||
# (probably because `ld-linux.so` can't be found). For this reason, we have
|
||||
# to create the symlinks beforehand, in a previous build stage. Then, in an
|
||||
# empty container image (scratch images), we can copy these symlinks and the
|
||||
# /usr, and stitch everything together.
|
||||
###############################################################################
|
||||
|
||||
# Create the filesystem hierarchy that will be used to symlink /usr.
|
||||
|
||||
RUN mkdir -p \
|
||||
/new_root \
|
||||
/new_root/root \
|
||||
/new_root/run \
|
||||
/new_root/tmp \
|
||||
/new_root/home/dangerzone/dangerzone-image/rootfs
|
||||
|
||||
# Copy the /etc and /var directories under the new root directory. Also,
|
||||
# copy /etc/, /opt, and /usr to the Dangerzone image rootfs.
|
||||
#
|
||||
# NOTE: We also have to remove the resolv.conf file, in order to not leak any
|
||||
# DNS servers added there during image build time.
|
||||
RUN cp -r /etc /var /new_root/ \
|
||||
&& rm /new_root/etc/resolv.conf
|
||||
RUN cp -r /etc /opt /usr /new_root/home/dangerzone/dangerzone-image/rootfs \
|
||||
&& rm /new_root/home/dangerzone/dangerzone-image/rootfs/etc/resolv.conf
|
||||
|
||||
RUN ln -s /home/dangerzone/dangerzone-image/rootfs/usr /new_root/usr
|
||||
RUN ln -s usr/bin /new_root/bin
|
||||
RUN ln -s usr/lib /new_root/lib
|
||||
RUN ln -s usr/lib64 /new_root/lib64
|
||||
RUN ln -s usr/sbin /new_root/sbin
|
||||
RUN ln -s usr/bin /new_root/home/dangerzone/dangerzone-image/rootfs/bin
|
||||
RUN ln -s usr/lib /new_root/home/dangerzone/dangerzone-image/rootfs/lib
|
||||
RUN ln -s usr/lib64 /new_root/home/dangerzone/dangerzone-image/rootfs/lib64
|
||||
|
||||
# Fix permissions in /home/dangerzone, so that our entrypoint script can make
|
||||
# changes in the following folders.
|
||||
RUN chown dangerzone:dangerzone \
|
||||
/new_root/home/dangerzone \
|
||||
/new_root/home/dangerzone/dangerzone-image/
|
||||
# Fix permissions in /tmp, so that it can be used by unprivileged users.
|
||||
RUN chmod 777 /new_root/tmp
|
||||
|
||||
COPY container_helpers/entrypoint.py /new_root
|
||||
# HACK: For reasons that we are not sure yet, we need to explicitly specify the
|
||||
# modification time of this file.
|
||||
RUN touch -d ${DEBIAN_ARCHIVE_DATE}Z /new_root/entrypoint.py
|
||||
|
||||
## Final image
|
||||
|
||||
FROM scratch
|
||||
|
||||
# Copy the filesystem hierarchy that we created in the previous stage, so that
|
||||
# /usr can be a symlink.
|
||||
COPY --from=dangerzone-image /new_root/ /
|
||||
|
||||
# Switch to the dangerzone user for the rest of the script.
|
||||
USER dangerzone
|
||||
|
||||
ENTRYPOINT ["/entrypoint.py"]
|
176
INSTALL.md
176
INSTALL.md
|
@ -1,23 +1,91 @@
|
|||
## Operating System support
|
||||
|
||||
Dangerzone can run on various Operating Systems (OS), and has automated tests
|
||||
for most of them.
|
||||
This section explains which OS we support, how long we support each version, and
|
||||
how do we test Dangerzone against these.
|
||||
|
||||
You can find general support information in this table, and more details in the
|
||||
following sections.
|
||||
|
||||
(Unless specified, the architecture of the OS is AMD64)
|
||||
|
||||
| Distribution | Supported releases | Automated tests | Manual QA |
|
||||
| ------------ | ------------------------- | ---------------------- | --------- |
|
||||
| Windows | 2 last releases | 🗹 (`windows-latest`) ◎ | 🗹 |
|
||||
| macOS intel | 3 last releases | 🗹 (`macos-13`) ◎ | 🗹 |
|
||||
| macOS silicon | 3 last releases | 🗹 (`macos-latest`) ◎ | 🗹 |
|
||||
| Ubuntu | Follow upstream support ✰ | 🗹 | 🗹 |
|
||||
| Debian | Current stable, Oldstable and LTS releases | 🗹 | 🗹 |
|
||||
| Fedora | Follow upstream support | 🗹 | 🗹 |
|
||||
| Qubes OS | [Beta support](https://github.com/freedomofpress/dangerzone/issues/413) ✢ | 🗷 | Latest Fedora template |
|
||||
| Tails | Only the last release | 🗷 | Last release only |
|
||||
|
||||
Notes:
|
||||
|
||||
✰ Support for Ubuntu Focal [was dropped](https://github.com/freedomofpress/dangerzone/issues/1018)
|
||||
|
||||
✢ Qubes OS support assumes the use of a Fedora template. The supported releases follow our general support for Fedora.
|
||||
|
||||
◎ More information about where that points [in the runner-images repository](https://github.com/actions/runner-images/tree/main)
|
||||
|
||||
## MacOS
|
||||
See instructions in [README.md](README.md#macos).
|
||||
|
||||
- Download [Dangerzone 0.9.0 for Mac (Apple Silicon CPU)](https://github.com/freedomofpress/dangerzone/releases/download/v0.9.0/Dangerzone-0.9.0-arm64.dmg)
|
||||
- Download [Dangerzone 0.9.0 for Mac (Intel CPU)](https://github.com/freedomofpress/dangerzone/releases/download/v0.9.0/Dangerzone-0.9.0-i686.dmg)
|
||||
|
||||
> [!TIP]
|
||||
> We support the releases of macOS that are still within Apple's servicing timeline. Apple usually provides security updates for the latest 3 releases, but this isn’t consistently applied and security fixes aren’t guaranteed for the non-latest releases. We are also dependent on [Docker Desktop windows support](https://docs.docker.com/desktop/setup/install/mac-install/)
|
||||
|
||||
You can also install Dangerzone for Mac using [Homebrew](https://brew.sh/): `brew install --cask dangerzone`
|
||||
|
||||
> **Note**: you will also need to install [Docker Desktop](https://www.docker.com/products/docker-desktop/).
|
||||
> This program needs to run alongside Dangerzone at all times, since it is what allows Dangerzone to
|
||||
> create the secure environment.
|
||||
|
||||
## Windows
|
||||
See instructions in [README.md](README.md#windows).
|
||||
|
||||
- Download [Dangerzone 0.9.0 for Windows](https://github.com/freedomofpress/dangerzone/releases/download/v0.9.0/Dangerzone-0.9.0.msi)
|
||||
|
||||
> **Note**: you will also need to install [Docker Desktop](https://www.docker.com/products/docker-desktop/).
|
||||
> This program needs to run alongside Dangerzone at all times, since it is what allows Dangerzone to
|
||||
> create the secure environment.
|
||||
|
||||
> [!TIP]
|
||||
> We generally support Windows releases that are still within [Microsoft’s servicing timeline](https://support.microsoft.com/en-us/help/13853/windows-lifecycle-fact-sheet).
|
||||
>
|
||||
> Docker sets the bottom line:
|
||||
>
|
||||
> > Docker only supports Docker Desktop on Windows for those versions of Windows that are still within [Microsoft’s servicing timeline](https://support.microsoft.com/en-us/help/13853/windows-lifecycle-fact-sheet). Docker Desktop is not supported on server versions of Windows, such as Windows Server 2019 or Windows Server 2022.
|
||||
|
||||
|
||||
## Linux
|
||||
|
||||
On Linux, Dangerzone uses [Podman](https://podman.io/) instead of Docker Desktop for creating
|
||||
an isolated environment. It will be installed automatically when installing Dangerzone.
|
||||
|
||||
> [!TIP]
|
||||
> We support Ubuntu, Debian, and Fedora releases that are still within
|
||||
> their respective servicing timelines, with a few twists:
|
||||
>
|
||||
> - Ubuntu: We follow upstream support with an extra cutoff date. No support for
|
||||
> versions prior to the second oldest LTS release.
|
||||
> - Fedora: We follow upstream support
|
||||
> - Debian: current stable, oldstable and LTS releases.
|
||||
|
||||
Dangerzone is available for:
|
||||
|
||||
- Ubuntu 25.04 (plucky)
|
||||
- Ubuntu 24.10 (oracular)
|
||||
- Ubuntu 24.04 (noble)
|
||||
- Ubuntu 23.10 (mantic)
|
||||
- Ubuntu 22.04 (jammy)
|
||||
- Ubuntu 20.04 (focal)
|
||||
- Debian 13 (trixie)
|
||||
- Debian 12 (bookworm)
|
||||
- Debian 11 (bullseye)
|
||||
- Fedora 42
|
||||
- Fedora 41
|
||||
- Fedora 40
|
||||
- Fedora 39
|
||||
- Tails
|
||||
- Qubes OS (beta support)
|
||||
|
||||
### Ubuntu, Debian
|
||||
|
@ -26,41 +94,7 @@ Dangerzone is available for:
|
|||
<tr>
|
||||
<td>
|
||||
<details>
|
||||
<summary><i>:memo: Expand this section if you are on Ubuntu 20.04 (Focal).</i></summary>
|
||||
</br>
|
||||
|
||||
Dangerzone requires [Podman](https://podman.io/), which is not available
|
||||
through the official Ubuntu Focal repos. To proceed with the Dangerzone
|
||||
installation, you need to add an extra OpenSUSE repo that provides Podman to
|
||||
Ubuntu Focal users. You can follow the instructions below, which have been
|
||||
copied from the [official Podman blog](https://podman.io/new/2021/06/16/new.html):
|
||||
|
||||
```bash
|
||||
sudo apt-get update && sudo apt-get install curl wget gnupg2 -y
|
||||
. /etc/os-release
|
||||
sudo sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /' \
|
||||
> /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list"
|
||||
wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/xUbuntu_${VERSION_ID}/Release.key -O- \
|
||||
| sudo apt-key add -
|
||||
sudo apt update
|
||||
```
|
||||
|
||||
Also, you need to install the `python-all` package, due to an `stdeb` bug that
|
||||
existed before v0.9.1:
|
||||
|
||||
```
|
||||
sudo apt-get install python-all -y
|
||||
```
|
||||
</details>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
<details>
|
||||
<summary><i>:information_source: Backport notice for Ubuntu 24.04 (Noble) users regarding the <code>conmon</code> package</i></summary>
|
||||
<summary><i>:information_source: Backport notice for Ubuntu 22.04 (Jammy) users regarding the <code>conmon</code> package</i></summary>
|
||||
</br>
|
||||
|
||||
The `conmon` version that Podman uses and Ubuntu Jammy ships, has a bug
|
||||
|
@ -76,9 +110,20 @@ Dangerzone is available for:
|
|||
</tr>
|
||||
</table>
|
||||
|
||||
Add our repository following these instructions:
|
||||
First, retrieve the PGP keys.
|
||||
|
||||
Download the GPG key for the repo:
|
||||
Starting with Trixie, follow these instructions to download the PGP keys:
|
||||
|
||||
```bash
|
||||
sudo apt-get update && sudo apt-get install sq -y
|
||||
mkdir -p /etc/apt/keyrings/
|
||||
sq network keyserver \
|
||||
--server hkps://keys.openpgp.org \
|
||||
search "DE28 AB24 1FA4 8260 FAC9 B8BA A7C9 B385 2260 4281" \
|
||||
--output /etc/apt/keyrings/fpf-apt-tools-archive-keyring.gpg
|
||||
```
|
||||
|
||||
On other Debian-derivatives:
|
||||
|
||||
```sh
|
||||
sudo apt-get update && sudo apt-get install gnupg2 ca-certificates -y
|
||||
|
@ -86,10 +131,12 @@ gpg --keyserver hkps://keys.openpgp.org \
|
|||
--no-default-keyring --keyring ./fpf-apt-tools-archive-keyring.gpg \
|
||||
--recv-keys "DE28 AB24 1FA4 8260 FAC9 B8BA A7C9 B385 2260 4281"
|
||||
sudo mkdir -p /etc/apt/keyrings/
|
||||
sudo mv fpf-apt-tools-archive-keyring.gpg /etc/apt/keyrings
|
||||
sudo gpg --no-default-keyring --keyring ./fpf-apt-tools-archive-keyring.gpg \
|
||||
--armor --export "DE28 AB24 1FA4 8260 FAC9 B8BA A7C9 B385 2260 4281" \
|
||||
> /etc/apt/keyrings/fpf-apt-tools-archive-keyring.gpg
|
||||
```
|
||||
|
||||
Add the URL of the repo in your APT sources:
|
||||
Then, on all distributions, add the URL of the repo in your APT sources:
|
||||
|
||||
```sh
|
||||
. /etc/os-release
|
||||
|
@ -129,28 +176,11 @@ sudo apt install -y dangerzone
|
|||
|
||||
### Fedora
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
<details>
|
||||
<summary><i>:information_source: Backport notice for Fedora users regarding the <code>python3-pyside6</code> package</i></summary>
|
||||
</br>
|
||||
|
||||
Fedora 39+ onwards does not provide official Python bindings for Qt. For
|
||||
this reason, we provide our own `python3-pyside6` package (see
|
||||
[build instructions](https://github.com/freedomofpress/maint-dangerzone-pyside6))
|
||||
from our YUM repo. For a deeper dive on this subject, you may read
|
||||
[this issue](https://github.com/freedomofpress/dangerzone/issues/211#issuecomment-1827777122).
|
||||
</details>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Type the following commands in a terminal:
|
||||
|
||||
```
|
||||
sudo dnf install 'dnf-command(config-manager)'
|
||||
sudo dnf config-manager --add-repo=https://packages.freedom.press/yum-tools-prod/dangerzone/dangerzone.repo
|
||||
sudo dnf-3 config-manager --add-repo=https://packages.freedom.press/yum-tools-prod/dangerzone/dangerzone.repo
|
||||
sudo dnf install dangerzone
|
||||
```
|
||||
|
||||
|
@ -201,8 +231,8 @@ After confirming that it matches, type `y` (for yes) and the installation should
|
|||
|
||||
> [!IMPORTANT]
|
||||
> This section will install Dangerzone in your **default template**
|
||||
> (`fedora-40` as of writing this). If you want to install it in a different
|
||||
> one, make sure to replace `fedora-40` with the template of your choice.
|
||||
> (`fedora-41` as of writing this). If you want to install it in a different
|
||||
> one, make sure to replace `fedora-41` with the template of your choice.
|
||||
|
||||
The following steps must be completed once. Make sure you run them in the
|
||||
specified qubes.
|
||||
|
@ -219,7 +249,7 @@ Create a **disposable**, offline app qube (`dz-dvm`), based on your default
|
|||
template. This will be the qube where the documents will be sanitized:
|
||||
|
||||
```
|
||||
qvm-create --class AppVM --label red --template fedora-40 \
|
||||
qvm-create --class AppVM --label red --template fedora-41 \
|
||||
--prop netvm="" --prop template_for_dispvms=True \
|
||||
--prop default_dispvm='' dz-dvm
|
||||
```
|
||||
|
@ -232,12 +262,12 @@ document, with the following contents:
|
|||
dz.Convert * @anyvm @dispvm:dz-dvm allow
|
||||
```
|
||||
|
||||
#### In the `fedora-40` template
|
||||
#### In the `fedora-41` template
|
||||
|
||||
Install Dangerzone:
|
||||
|
||||
```
|
||||
sudo dnf config-manager --add-repo=https://packages.freedom.press/yum-tools-prod/dangerzone/dangerzone.repo
|
||||
sudo dnf-3 config-manager --add-repo=https://packages.freedom.press/yum-tools-prod/dangerzone/dangerzone.repo
|
||||
sudo dnf install dangerzone-qubes
|
||||
```
|
||||
|
||||
|
@ -253,6 +283,12 @@ column to "Selected".
|
|||
You can now launch Dangerzone from the list of applications for your qube, and
|
||||
pass it a file to sanitize.
|
||||
|
||||
## Tails
|
||||
|
||||
Dangerzone is not yet available by default in Tails, but we have collaborated
|
||||
with the Tails team to offer manual
|
||||
[installation instructions](https://tails.net/doc/persistent_storage/additional_software/dangerzone/index.en.html)
|
||||
for Tails users.
|
||||
|
||||
## Build from source
|
||||
|
||||
|
@ -287,7 +323,7 @@ Our [GitHub Releases page](https://github.com/freedomofpress/dangerzone/releases
|
|||
hosts the following files:
|
||||
* Windows installer (`Dangerzone-<version>.msi`)
|
||||
* macOS archives (`Dangerzone-<version>-<arch>.dmg`)
|
||||
* Container image (`container.tar.gz`)
|
||||
* Container images (`container-<version>-<arch>.tar`)
|
||||
* Source package (`dangerzone-<version>.tar.gz`)
|
||||
|
||||
All these files are accompanied by signatures (as `.asc` files). We'll explain
|
||||
|
@ -312,10 +348,10 @@ gpg --verify Dangerzone-0.6.1-arm64.dmg.asc Dangerzone-0.6.1-arm64.dmg
|
|||
gpg --verify Dangerzone-0.6.1-i686.dmg.asc Dangerzone-0.6.1-i686.dmg
|
||||
```
|
||||
|
||||
For the container image:
|
||||
For the container images:
|
||||
|
||||
```
|
||||
gpg --verify container.tar.gz.asc container.tar.gz
|
||||
gpg --verify container-0.6.1-i686.tar.asc container-0.6.1-i686.tar
|
||||
```
|
||||
|
||||
For the source package:
|
||||
|
|
65
Makefile
65
Makefile
|
@ -1,23 +1,6 @@
|
|||
LARGE_TEST_REPO_DIR:=tests/test_docs_large
|
||||
GIT_DESC=$$(git describe)
|
||||
JUNIT_FLAGS := --capture=sys -o junit_logging=all
|
||||
|
||||
.PHONY: lint-black
|
||||
lint-black: ## check python source code formatting issues, with black
|
||||
black --check --diff ./
|
||||
|
||||
.PHONY: lint-black-apply
|
||||
lint-black-apply: ## apply black's source code formatting suggestions
|
||||
black ./
|
||||
|
||||
.PHONY: lint-isort
|
||||
lint-isort: ## check imports are organized, with isort
|
||||
isort --check --diff ./
|
||||
|
||||
.PHONY: lint-isort-apply
|
||||
lint-isort-apply: ## apply isort's imports organization suggestions
|
||||
isort ./
|
||||
|
||||
MYPY_ARGS := --ignore-missing-imports \
|
||||
--disallow-incomplete-defs \
|
||||
--disallow-untyped-defs \
|
||||
|
@ -26,26 +9,24 @@ MYPY_ARGS := --ignore-missing-imports \
|
|||
--warn-unused-ignores \
|
||||
--exclude $(LARGE_TEST_REPO_DIR)/*.py
|
||||
|
||||
mypy-host:
|
||||
.PHONY: lint
|
||||
lint: ## Check the code for linting, formatting, and typing issues with ruff and mypy
|
||||
ruff check
|
||||
ruff format --check
|
||||
mypy $(MYPY_ARGS) dangerzone
|
||||
|
||||
mypy-tests:
|
||||
mypy $(MYPY_ARGS) tests
|
||||
|
||||
mypy: mypy-host mypy-tests ## check type hints with mypy
|
||||
|
||||
.PHONY: lint
|
||||
lint: lint-black lint-isort mypy ## check the code with various linters
|
||||
|
||||
.PHONY: lint-apply
|
||||
lint-apply: lint-black-apply lint-isort-apply ## apply all the linter's suggestions
|
||||
.PHONY: fix
|
||||
fix: ## apply all the suggestions from ruff
|
||||
ruff check --fix
|
||||
ruff format
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
test: ## Run the tests
|
||||
# Make each GUI test run as a separate process, to avoid segfaults due to
|
||||
# shared state.
|
||||
# See more in https://github.com/freedomofpress/dangerzone/issues/493
|
||||
pytest --co -q tests/gui | grep -v ' collected' | xargs -n 1 pytest -v
|
||||
pytest --co -q tests/gui | grep -e '^tests/' | xargs -n 1 pytest -v
|
||||
pytest -v --cov --ignore dev_scripts --ignore tests/gui --ignore tests/test_large_set.py
|
||||
|
||||
|
||||
|
@ -66,6 +47,32 @@ test-large: test-large-init ## Run large test set
|
|||
python -m pytest --tb=no tests/test_large_set.py::TestLargeSet -v $(JUNIT_FLAGS) --junitxml=$(TEST_LARGE_RESULTS)
|
||||
python $(TEST_LARGE_RESULTS)/report.py $(TEST_LARGE_RESULTS)
|
||||
|
||||
Dockerfile: Dockerfile.env Dockerfile.in ## Regenerate the Dockerfile from its template
|
||||
poetry run jinja2 Dockerfile.in Dockerfile.env > Dockerfile
|
||||
|
||||
.PHONY: poetry-install
|
||||
poetry-install: ## Install project dependencies
|
||||
poetry install
|
||||
|
||||
.PHONY: build-clean
|
||||
build-clean:
|
||||
poetry run doit clean
|
||||
|
||||
.PHONY: build-macos-intel
|
||||
build-macos-intel: build-clean poetry-install ## Build macOS intel package (.dmg)
|
||||
poetry run doit -n 8
|
||||
|
||||
.PHONY: build-macos-arm
|
||||
build-macos-arm: build-clean poetry-install ## Build macOS Apple Silicon package (.dmg)
|
||||
poetry run doit -n 8 macos_build_dmg
|
||||
|
||||
.PHONY: build-linux
|
||||
build-linux: build-clean poetry-install ## Build linux packages (.rpm and .deb)
|
||||
poetry run doit -n 8 fedora_rpm debian_deb
|
||||
|
||||
.PHONY: regenerate-reference-pdfs
|
||||
regenerate-reference-pdfs: ## Regenerate the reference PDFs
|
||||
pytest tests/test_cli.py -k regenerate --generate-reference-pdfs
|
||||
# Makefile self-help borrowed from the securedrop-client project
|
||||
# Explaination of the below shell command should it ever break.
|
||||
# 1. Set the field separator to ": ##" and any make targets that might appear between : and ##
|
||||
|
|
197
QA.md
Normal file
197
QA.md
Normal file
|
@ -0,0 +1,197 @@
|
|||
## QA
|
||||
|
||||
To ensure that new releases do not introduce regressions, and support existing
|
||||
and newer platforms, we have to test that the produced packages work as expected.
|
||||
|
||||
Check the following:
|
||||
|
||||
- [ ] Make sure that the tip of the `main` branch passes the CI tests.
|
||||
- [ ] Make sure that the Apple account has a valid application password and has
|
||||
agreed to the latest Apple terms (see [macOS release](#macos-release)
|
||||
section).
|
||||
|
||||
Because it is repetitive, we wrote a script to help with the QA.
|
||||
It can run the tasks for you, pausing when it needs manual intervention.
|
||||
|
||||
You can run it with a command like:
|
||||
|
||||
```bash
|
||||
poetry run ./dev_scripts/qa.py {distro}-{version}
|
||||
```
|
||||
|
||||
### The checklist
|
||||
|
||||
- [ ] Create a test build in Windows and make sure it works:
|
||||
- [ ] Check if the suggested Python version is still supported.
|
||||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Build the container image and ensure the development environment uses
|
||||
the new image.
|
||||
- [ ] Download the OCR language data using `./install/common/download-tessdata.py`
|
||||
- [ ] Run the Dangerzone tests.
|
||||
- [ ] Build and run the Dangerzone .exe
|
||||
- [ ] Test some QA scenarios (see [Scenarios](#Scenarios) below).
|
||||
- [ ] Create a test build in macOS (Intel CPU) and make sure it works:
|
||||
- [ ] Check if the suggested Python version is still supported.
|
||||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Build the container image and ensure the development environment uses
|
||||
the new image.
|
||||
- [ ] Download the OCR language data using `./install/common/download-tessdata.py`
|
||||
- [ ] Run the Dangerzone tests.
|
||||
- [ ] Create and run an app bundle.
|
||||
- [ ] Test some QA scenarios (see [Scenarios](#Scenarios) below).
|
||||
- [ ] Create a test build in macOS (M1/2 CPU) and make sure it works:
|
||||
- [ ] Check if the suggested Python version is still supported.
|
||||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Build the container image and ensure the development environment uses
|
||||
the new image.
|
||||
- [ ] Download the OCR language data using `./install/common/download-tessdata.py`
|
||||
- [ ] Run the Dangerzone tests.
|
||||
- [ ] Create and run an app bundle.
|
||||
- [ ] Test some QA scenarios (see [Scenarios](#Scenarios) below).
|
||||
- [ ] Create a test build in the most recent Ubuntu LTS platform (Ubuntu 24.04
|
||||
as of writing this) and make sure it works:
|
||||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Build the container image and ensure the development environment uses
|
||||
the new image.
|
||||
- [ ] Download the OCR language data using `./install/common/download-tessdata.py`
|
||||
- [ ] Run the Dangerzone tests.
|
||||
- [ ] Create a .deb package and install it system-wide.
|
||||
- [ ] Test some QA scenarios (see [Scenarios](#Scenarios) below).
|
||||
- [ ] Create a test build in the most recent Fedora platform (Fedora 41 as of
|
||||
writing this) and make sure it works:
|
||||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Build the container image and ensure the development environment uses
|
||||
the new image.
|
||||
- [ ] Download the OCR language data using `./install/common/download-tessdata.py`
|
||||
- [ ] Run the Dangerzone tests.
|
||||
- [ ] Create an .rpm package and install it system-wide.
|
||||
- [ ] Test some QA scenarios (see [Scenarios](#Scenarios) below).
|
||||
- [ ] Create a test build in the most recent Qubes Fedora template (Fedora 40 as
|
||||
of writing this) and make sure it works:
|
||||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Run the Dangerzone tests.
|
||||
- [ ] Create a Qubes .rpm package and install it system-wide.
|
||||
- [ ] Ensure that the Dangerzone application appears in the "Applications"
|
||||
tab.
|
||||
- [ ] Test some QA scenarios (see [Scenarios](#Scenarios) below) and make sure
|
||||
they spawn disposable qubes.
|
||||
|
||||
### Scenarios
|
||||
|
||||
#### 1. Dangerzone correctly identifies that Docker/Podman is not installed
|
||||
|
||||
_(Only for MacOS / Windows)_
|
||||
|
||||
Temporarily hide the Docker/Podman binaries, e.g., rename the `docker` /
|
||||
`podman` binaries to something else. Then run Dangerzone. Dangerzone should
|
||||
prompt the user to install Docker/Podman.
|
||||
|
||||
#### 2. Dangerzone correctly identifies that Docker is not running
|
||||
|
||||
_(Only for MacOS / Windows)_
|
||||
|
||||
Stop the Docker Desktop application. Then run Dangerzone. Dangerzone should
|
||||
prompt the user to start Docker Desktop.
|
||||
|
||||
|
||||
#### 3. Updating Dangerzone handles external state correctly.
|
||||
|
||||
_(Applies to Windows/MacOS)_
|
||||
|
||||
Install the previous version of Dangerzone, downloaded from the website.
|
||||
|
||||
Open the Dangerzone application and enable some non-default settings.
|
||||
**If there are new settings, make sure to change those as well**.
|
||||
|
||||
Close the Dangerzone application and get the container image for that
|
||||
version. For example:
|
||||
|
||||
```
|
||||
$ docker images dangerzone.rocks/dangerzone
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
dangerzone.rocks/dangerzone <tag> <image ID> <date> <size>
|
||||
```
|
||||
|
||||
Then run the version under QA and ensure that the settings remain changed.
|
||||
|
||||
Afterwards check that new docker image was installed by running the same command
|
||||
and seeing the following differences:
|
||||
|
||||
```
|
||||
$ docker images dangerzone.rocks/dangerzone
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
dangerzone.rocks/dangerzone <other tag> <different ID> <newer date> <different size>
|
||||
```
|
||||
|
||||
#### 4. Dangerzone successfully installs the container image
|
||||
|
||||
_(Only for Linux)_
|
||||
|
||||
Remove the Dangerzone container image from Docker/Podman. Then run Dangerzone.
|
||||
Dangerzone should install the container image successfully.
|
||||
|
||||
#### 5. Dangerzone retains the settings of previous runs
|
||||
|
||||
Run Dangerzone and make some changes in the settings (e.g., change the OCR
|
||||
language, toggle whether to open the document after conversion, etc.). Restart
|
||||
Dangerzone. Dangerzone should show the settings that the user chose.
|
||||
|
||||
#### 6. Dangerzone reports failed conversions
|
||||
|
||||
Run Dangerzone and convert the `tests/test_docs/sample_bad_pdf.pdf` document.
|
||||
Dangerzone should fail gracefully, by reporting that the operation failed, and
|
||||
showing the following error message:
|
||||
|
||||
> The document format is not supported
|
||||
|
||||
#### 7. Dangerzone succeeds in converting multiple documents
|
||||
|
||||
Run Dangerzone against a list of documents, and tick all options. Ensure that:
|
||||
* Conversions take place sequentially.
|
||||
* Attempting to close the window while converting asks the user if they want to
|
||||
abort the conversions.
|
||||
* Conversions are completed successfully.
|
||||
* Conversions show individual progress in real-time (double-check for Qubes).
|
||||
* _(Only for Linux)_ The resulting files open with the PDF viewer of our choice.
|
||||
* OCR seems to have detected characters in the PDF files.
|
||||
* The resulting files have been saved with the proper suffix, in the proper
|
||||
location.
|
||||
* The original files have been saved in the `unsafe/` directory.
|
||||
|
||||
#### 8. Dangerzone is able to handle drag-n-drop
|
||||
|
||||
Run Dangerzone against a set of documents that you drag-n-drop. Files should be
|
||||
added and conversion should run without issue.
|
||||
|
||||
> [!TIP]
|
||||
> On our end-user container environments for Linux, we can start a file manager
|
||||
> with `thunar &`.
|
||||
|
||||
#### 9. Dangerzone CLI succeeds in converting multiple documents
|
||||
|
||||
_(Only for Windows and Linux)_
|
||||
|
||||
Run Dangerzone CLI against a list of documents. Ensure that conversions happen
|
||||
sequentially, are completed successfully, and we see their progress.
|
||||
|
||||
#### 10. Dangerzone can open a document for conversion via right-click -> "Open With"
|
||||
|
||||
_(Only for Windows, MacOS and Qubes)_
|
||||
|
||||
Go to a directory with office documents, right-click on one, and click on "Open
|
||||
With". We should be able to open the file with Dangerzone, and then convert it.
|
||||
|
||||
#### 11. Dangerzone shows helpful errors for setup issues on Qubes
|
||||
|
||||
_(Only for Qubes)_
|
||||
|
||||
Check what errors does Dangerzone throw in the following scenarios. The errors
|
||||
should point the user to the Qubes notifications in the top-right corner:
|
||||
|
||||
1. The `dz-dvm` template does not exist. We can trigger this scenario by
|
||||
temporarily renaming this template.
|
||||
2. The Dangerzone RPC policy does not exist. We can trigger this scenario by
|
||||
temporarily renaming the `dz.Convert` policy.
|
||||
3. The `dz-dvm` disposable Qube cannot start due to insufficient resources. We
|
||||
can trigger this scenario by temporarily increasing the minimum required RAM
|
||||
of the `dz-dvm` template to more than the available amount.
|
48
README.md
48
README.md
|
@ -6,33 +6,23 @@ Take potentially dangerous PDFs, office documents, or images and convert them to
|
|||
|  | 
|
||||
|--|--|
|
||||
|
||||
Dangerzone works like this: You give it a document that you don't know if you can trust (for example, an email attachment). Inside of a sandbox, Dangerzone converts the document to a PDF (if it isn't already one), and then converts the PDF into raw pixel data: a huge list of RGB color values for each page. Then, in a separate sandbox, Dangerzone takes this pixel data and converts it back into a PDF.
|
||||
Dangerzone works like this: You give it a document that you don't know if you can trust (for example, an email attachment). Inside of a sandbox, Dangerzone converts the document to a PDF (if it isn't already one), and then converts the PDF into raw pixel data: a huge list of RGB color values for each page. Then, outside of the sandbox, Dangerzone takes this pixel data and converts it back into a PDF.
|
||||
|
||||
_Read more about Dangerzone in the [official site](https://dangerzone.rocks/about.html)._
|
||||
_Read more about Dangerzone in the [official site](https://dangerzone.rocks/about/)._
|
||||
|
||||
## Getting started
|
||||
|
||||
### MacOS
|
||||
- Download [Dangerzone 0.6.1 for Mac (Apple Silicon CPU)](https://github.com/freedomofpress/dangerzone/releases/download/v0.6.1/Dangerzone-0.6.1-arm64.dmg)
|
||||
- Download [Dangerzone 0.6.1 for Mac (Intel CPU)](https://github.com/freedomofpress/dangerzone/releases/download/v0.6.1/Dangerzone-0.6.1-i686.dmg)
|
||||
Follow the instructions for each platform:
|
||||
|
||||
You can also install Dangerzone for Mac using [Homebrew](https://brew.sh/): `brew install --cask dangerzone`
|
||||
* [macOS](https://github.com/freedomofpress/dangerzone/blob/v0.9.0/INSTALL.md#macos)
|
||||
* [Windows](https://github.com/freedomofpress/dangerzone/blob/v0.9.0//INSTALL.md#windows)
|
||||
* [Ubuntu Linux](https://github.com/freedomofpress/dangerzone/blob/v0.9.0/INSTALL.md#ubuntu-debian)
|
||||
* [Debian Linux](https://github.com/freedomofpress/dangerzone/blob/v0.9.0/INSTALL.md#ubuntu-debian)
|
||||
* [Fedora Linux](https://github.com/freedomofpress/dangerzone/blob/v0.9.0/INSTALL.md#fedora)
|
||||
* [Qubes OS (beta)](https://github.com/freedomofpress/dangerzone/blob/v0.9.0/INSTALL.md#qubes-os)
|
||||
* [Tails](https://github.com/freedomofpress/dangerzone/blob/v0.9.0/INSTALL.md#tails)
|
||||
|
||||
> **Note**: you will also need to install [Docker Desktop](https://www.docker.com/products/docker-desktop/).
|
||||
> This program needs to run alongside Dangerzone at all times, since it is what allows Dangerzone to
|
||||
> create the secure environment.
|
||||
|
||||
### Windows
|
||||
|
||||
- Download [Dangerzone 0.6.1 for Windows](https://github.com/freedomofpress/dangerzone/releases/download/v0.6.1/Dangerzone-0.6.1.msi)
|
||||
|
||||
> **Note**: you will also need to install [Docker Desktop](https://www.docker.com/products/docker-desktop/).
|
||||
> This program needs to run alongside Dangerzone at all times, since it is what allows Dangerzone to
|
||||
> create the secure environment.
|
||||
|
||||
### Linux
|
||||
|
||||
See [installing Dangerzone](INSTALL.md#linux) for adding the Linux repositories to your system.
|
||||
You can read more about our operating system support [here](https://github.com/freedomofpress/dangerzone/blob/v0.9.0/INSTALL.md#operating-system-support).
|
||||
|
||||
## Some features
|
||||
|
||||
|
@ -93,18 +83,6 @@ Dangerzone gets updates to improve its features _and_ to fix problems. So, updat
|
|||
2. Now find the latest available version of Dangerzone: go to the [download page](https://dangerzone.rocks/#downloads). Look for the version number displayed. The number will be using the same format as in Step 1.
|
||||
3. Is the version on the Dangerzone download page higher than the version of your installed app? Go ahead and update.
|
||||
|
||||
### "I get `invalid json returned from container` on MacOS Big Sur or newer (MacOS 11.x.x or higher)"
|
||||
### Can I use Podman Desktop?
|
||||
|
||||
Are you using the latest version of Dangerzone? See the FAQ for: "I'm experiencing an issue while using Dangerzone."
|
||||
|
||||
You _may_ be attempting to convert a file in a directory to which Docker Desktop does not have access. Dangerzone for Mac requires Docker Desktop for conversion. Docker Desktop, in turn, requires permission from MacOS to access the directory in which your target file is located.
|
||||
|
||||
To grant this permission:
|
||||
|
||||
1. On MacOS 13, choose Apple menu > System Settings. On lower versions, choose System Preferences.
|
||||
2. Tap into Privacy & Security in the sidebar. (You may need to scroll down.)
|
||||
3. In the Privacy section, tap into Files & Folders. (Again, you may need to scroll down.)
|
||||
4. Scroll to the entry for Docker. Tap the > to expand the entry.
|
||||
5. Enable the toggle beside the directory where your file is present. For example, if the file to be converted is in the Downloads folder, enable the toggle beside Downloads.
|
||||
|
||||
(Full Disk Access permission has a similar effect, but it's enough to give Docker access to _only_ the directory containing the intended file(s) to be converted. Full Disk is unnecessary. As of 2023.04.28, granting one of these permissions continues to be required for successful conversion. Apologies for the extra steps. Dangerzone depends on Docker, and the fix for this issue needs to come from upstream. Read more on [#371](https://github.com/freedomofpress/dangerzone/issues/371#issuecomment-1516863056).)
|
||||
Yes! We've introduced [experimental support for Podman Desktop](https://github.com/freedomofpress/dangerzone/blob/main/docs/podman-desktop.md) on Windows and macOS.
|
||||
|
|
461
RELEASE.md
461
RELEASE.md
|
@ -1,27 +1,28 @@
|
|||
# Release instructions
|
||||
|
||||
This section documents the release process. Unless you're a dangerzone developer making a release, you'll probably never need to follow it.
|
||||
This section documents how we currently release Dangerzone for the different distributions we support.
|
||||
|
||||
## Pre-release
|
||||
|
||||
Before making a release, all of these should be complete:
|
||||
Here is a list of tasks that should be done before issuing the release:
|
||||
|
||||
- [ ] Copy the entirety of these instructions onto a new issue and call it **QA and Release version \<VERSION\>**
|
||||
- [ ] [Add new Linux platforms and remove obsolete ones](#add-new-platforms-and-remove-obsolete-ones)
|
||||
- [ ] Create a new issue named **QA and Release for version \<VERSION\>**, to track the general progress.
|
||||
You can generate its content with the the `poetry run ./dev_scripts/generate-release-tasks.py` command.
|
||||
- [ ] [Add new Linux platforms and remove obsolete ones](https://github.com/freedomofpress/dangerzone/blob/main/RELEASE.md#add-new-linux-platforms-and-remove-obsolete-ones)
|
||||
- [ ] Bump the Python dependencies using `poetry lock`
|
||||
- [ ] [Check for official PySide6 versions](#check-for-official-pyside6-versions)
|
||||
- [ ] Check for new [WiX releases](https://github.com/wixtoolset/wix/releases) and update it if needed
|
||||
- [ ] Update `version` in `pyproject.toml`
|
||||
- [ ] Update `share/version.txt`
|
||||
- [ ] Update the "Version" field in `install/linux/dangerzone.spec`
|
||||
- [ ] Bump the Debian version by adding a new changelog entry in `debian/changelog`
|
||||
- [ ] [Bump the minimum Docker Desktop versions](https://github.com/freedomofpress/dangerzone/blob/main/RELEASE.md#bump-the-minimum-docker-desktop-version) in `isolation_provider/container.py`
|
||||
- [ ] Bump the dates and versions in the `Dockerfile`
|
||||
- [ ] Update the download links in our `INSTALL.md` page to point to the new version (the download links will be populated after the release)
|
||||
- [ ] Update screenshot in `README.md`, if necessary
|
||||
- [ ] CHANGELOG.md should be updated to include a list of all major changes since the last release
|
||||
- [ ] Create a PGP-signed git tag for the version, e.g., for dangerzone `v0.1.0`:
|
||||
|
||||
```
|
||||
git tag -s v0.1.0
|
||||
git push origin v0.1.0
|
||||
```
|
||||
**Note**: release candidates are suffixed by `-rcX`.
|
||||
- [ ] A draft release should be created. Copy the release notes text from the template at [`docs/templates/release-notes`](https://github.com/freedomofpress/dangerzone/tree/main/docs/templates/)
|
||||
- [ ] Send the release notes to editorial for review
|
||||
- [ ] Do the QA tasks
|
||||
|
||||
## Add new Linux platforms and remove obsolete ones
|
||||
|
||||
|
@ -30,7 +31,7 @@ as a special case of Fedora, release-wise). For each of these platforms, we need
|
|||
to check if a new version has been added, or if an existing one is now EOL
|
||||
(https://endoflife.date/ is handy for this purpose).
|
||||
|
||||
In case of a new version:
|
||||
In case of a new version (beta, RC, or official release):
|
||||
|
||||
1. Add it in our CI workflows, to test if that version works.
|
||||
* See `.circleci/config.yml` and `.github/workflows/ci.yml`, as well as
|
||||
|
@ -44,21 +45,17 @@ In case of a new version:
|
|||
`BUILD.md` files where necessary.
|
||||
4. Send a PR with the above changes.
|
||||
|
||||
In case of an EOL version:
|
||||
In case of the removal of a version:
|
||||
|
||||
1. Remove any mention to this version from our repo.
|
||||
* Consult the previous paragraph, but also `grep` your way around.
|
||||
2. Add a notice in our `CHANGELOG.md` about the version removal.
|
||||
|
||||
## Check for official PySide6 versions
|
||||
## Bump the minimum Docker Desktop version
|
||||
|
||||
PySide6 6.7.0 is available from the Fedora Rawhide repo, and we expect that a
|
||||
similar version will be pushed soon to the rest of the stable releases. Prior to
|
||||
a release, we should check if this has happened already. Once this happens, we
|
||||
should update our CI tests accordingly, and remove this notice.
|
||||
We embed the minimum docker desktop versions inside Dangerzone, as an incentive for our macOS and Windows users to upgrade to the latests version.
|
||||
|
||||
For more info, read:
|
||||
https://github.com/freedomofpress/maint-dangerzone-pyside6/issues/5
|
||||
You can find the latest version at the time of the release by looking at [their release notes](https://docs.docker.com/desktop/release-notes/)
|
||||
|
||||
## Large Document Testing
|
||||
|
||||
|
@ -68,183 +65,18 @@ Follow the instructions in `docs/developer/TESTING.md` to run the tests.
|
|||
|
||||
These tests will identify any regressions or progression in terms of document coverage.
|
||||
|
||||
## QA
|
||||
|
||||
To ensure that new releases do not introduce regressions, and support existing
|
||||
and newer platforms, we have to do the following:
|
||||
|
||||
- [ ] Make sure that the tip of the `main` branch passes the CI tests.
|
||||
- [ ] Make sure that the Apple account has a valid application password and has
|
||||
agreed to the latest Apple terms (see [macOS release](#macos-release)
|
||||
section).
|
||||
- [ ] Create a test build in Windows and make sure it works:
|
||||
- [ ] Check if the suggested Python version is still supported.
|
||||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Build the container image and ensure the development environment uses
|
||||
the new image.
|
||||
- [ ] Run the Dangerzone tests.
|
||||
- [ ] Build and run the Dangerzone .exe
|
||||
- [ ] Test some QA scenarios (see [Scenarios](#Scenarios) below).
|
||||
- [ ] Create a test build in macOS (Intel CPU) and make sure it works:
|
||||
- [ ] Check if the suggested Python version is still supported.
|
||||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Build the container image and ensure the development environment uses
|
||||
the new image.
|
||||
- [ ] Run the Dangerzone tests.
|
||||
- [ ] Create and run an app bundle.
|
||||
- [ ] Test some QA scenarios (see [Scenarios](#Scenarios) below).
|
||||
- [ ] Create a test build in macOS (M1/2 CPU) and make sure it works:
|
||||
- [ ] Check if the suggested Python version is still supported.
|
||||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Build the container image and ensure the development environment uses
|
||||
the new image.
|
||||
- [ ] Run the Dangerzone tests.
|
||||
- [ ] Create and run an app bundle.
|
||||
- [ ] Test some QA scenarios (see [Scenarios](#Scenarios) below).
|
||||
- [ ] Create a test build in the most recent Ubuntu LTS platform (Ubuntu 24.04
|
||||
as of writing this) and make sure it works:
|
||||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Build the container image and ensure the development environment uses
|
||||
the new image.
|
||||
- [ ] Run the Dangerzone tests.
|
||||
- [ ] Create a .deb package and install it system-wide.
|
||||
- [ ] Test some QA scenarios (see [Scenarios](#Scenarios) below).
|
||||
- [ ] Create a test build in the most recent Fedora platform (Fedora 40 as of
|
||||
writing this) and make sure it works:
|
||||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Build the container image and ensure the development environment uses
|
||||
the new image.
|
||||
- [ ] Run the Dangerzone tests.
|
||||
- [ ] Create an .rpm package and install it system-wide.
|
||||
- [ ] Test some QA scenarios (see [Scenarios](#Scenarios) below).
|
||||
- [ ] Create a test build in the most recent Qubes Fedora template (Fedora 39 as
|
||||
of writing this) and make sure it works:
|
||||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Run the Dangerzone tests.
|
||||
- [ ] Create a Qubes .rpm package and install it system-wide.
|
||||
- [ ] Ensure that the Dangerzone application appears in the "Applications"
|
||||
tab.
|
||||
- [ ] Test some QA scenarios (see [Scenarios](#Scenarios) below) and make sure
|
||||
they spawn disposable qubes.
|
||||
|
||||
### Scenarios
|
||||
|
||||
#### 1. Dangerzone correctly identifies that Docker/Podman is not installed
|
||||
|
||||
_(Only for MacOS / Windows)_
|
||||
|
||||
Temporarily hide the Docker/Podman binaries, e.g., rename the `docker` /
|
||||
`podman` binaries to something else. Then run Dangerzone. Dangerzone should
|
||||
prompt the user to install Docker/Podman.
|
||||
|
||||
#### 2. Dangerzone correctly identifies that Docker is not running
|
||||
|
||||
_(Only for MacOS / Windows)_
|
||||
|
||||
Stop the Docker Desktop application. Then run Dangerzone. Dangerzone should
|
||||
prompt the user to start Docker Desktop.
|
||||
|
||||
|
||||
#### 3. Updating Dangerzone handles external state correctly.
|
||||
|
||||
_(Applies to Windows/MacOS)_
|
||||
|
||||
Install the previous version of Dangerzone, downloaded from the website.
|
||||
|
||||
Open the Dangerzone application and enable some non-default settings.
|
||||
**If there are new settings, make sure to change those as well**.
|
||||
|
||||
Close the Dangerzone application and get the container image for that
|
||||
version. For example:
|
||||
|
||||
```
|
||||
$ docker images dangerzone.rocks/dangerzone:latest
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
dangerzone.rocks/dangerzone latest <image ID> <date> <size>
|
||||
```
|
||||
|
||||
Then run the version under QA and ensure that the settings remain changed.
|
||||
|
||||
Afterwards check that new docker image was installed by running the same command
|
||||
and seeing the following differences:
|
||||
|
||||
```
|
||||
$ docker images dangerzone.rocks/dangerzone:latest
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
dangerzone.rocks/dangerzone latest <different ID> <newer date> <different size>
|
||||
```
|
||||
|
||||
#### 4. Dangerzone successfully installs the container image
|
||||
|
||||
_(Only for Linux)_
|
||||
|
||||
Remove the Dangerzone container image from Docker/Podman. Then run Dangerzone.
|
||||
Dangerzone should install the container image successfully.
|
||||
|
||||
#### 5. Dangerzone retains the settings of previous runs
|
||||
|
||||
Run Dangerzone and make some changes in the settings (e.g., change the OCR
|
||||
language, toggle whether to open the document after conversion, etc.). Restart
|
||||
Dangerzone. Dangerzone should show the settings that the user chose.
|
||||
|
||||
#### 6. Dangerzone reports failed conversions
|
||||
|
||||
Run Dangerzone and convert the `tests/test_docs/sample_bad_pdf.pdf` document.
|
||||
Dangerzone should fail gracefully, by reporting that the operation failed, and
|
||||
showing the following error message:
|
||||
|
||||
> The document format is not supported
|
||||
|
||||
#### 7. Dangerzone succeeds in converting multiple documents
|
||||
|
||||
Run Dangerzone against a list of documents, and tick all options. Ensure that:
|
||||
* Conversions take place sequentially.
|
||||
* Attempting to close the window while converting asks the user if they want to
|
||||
abort the conversions.
|
||||
* Conversions are completed successfully.
|
||||
* Conversions show individual progress in real-time (double-check for Qubes).
|
||||
* _(Only for Linux)_ The resulting files open with the PDF viewer of our choice.
|
||||
* OCR seems to have detected characters in the PDF files.
|
||||
* The resulting files have been saved with the proper suffix, in the proper
|
||||
location.
|
||||
* The original files have been saved in the `unsafe/` directory.
|
||||
|
||||
#### 8. Dangerzone is able to handle drag-n-drop
|
||||
|
||||
Run Dangerzone against a set of documents that you drag-n-drop. Files should be
|
||||
added and conversion should run without issue.
|
||||
|
||||
#### 9. Dangerzone CLI succeeds in converting multiple documents
|
||||
|
||||
_(Only for Windows and Linux)_
|
||||
|
||||
Run Dangerzone CLI against a list of documents. Ensure that conversions happen
|
||||
sequentially, are completed successfully, and we see their progress.
|
||||
|
||||
#### 10. Dangerzone can open a document for conversion via right-click -> "Open With"
|
||||
|
||||
_(Only for Windows, MacOS and Qubes)_
|
||||
|
||||
Go to a directory with office documents, right-click on one, and click on "Open
|
||||
With". We should be able to open the file with Dangerzone, and then convert it.
|
||||
|
||||
#### 11. Dangerzone shows helpful errors for setup issues on Qubes
|
||||
|
||||
_(Only for Qubes)_
|
||||
|
||||
Check what errors does Dangerzone throw in the following scenarios. The errors
|
||||
should point the user to the Qubes notifications in the top-right corner:
|
||||
|
||||
1. The `dz-dvm` template does not exist. We can trigger this scenario by
|
||||
temporarily renaming this template.
|
||||
2. The Dangerzone RPC policy does not exist. We can trigger this scenario by
|
||||
temporarily renaming the `dz.Convert` policy.
|
||||
3. The `dz-dvm` disposable Qube cannot start due to insufficient resources. We
|
||||
can trigger this scenario by temporarily increasing the minimum required RAM
|
||||
of the `dz-dvm` template to more than the available amount.
|
||||
|
||||
## Release
|
||||
|
||||
Once we are confident that the release will be out shortly, and doesn't need any more changes:
|
||||
|
||||
- [ ] Create a PGP-signed git tag for the version, e.g., for dangerzone `v0.1.0`:
|
||||
|
||||
```bash
|
||||
git tag -s v0.1.0
|
||||
git push origin v0.1.0
|
||||
```
|
||||
**Note**: release candidates are suffixed by `-rcX`.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Because we don't have [reproducible builds](https://github.com/freedomofpress/dangerzone/issues/188)
|
||||
> yet, building the Dangerzone container image in various platforms would lead
|
||||
|
@ -255,7 +87,19 @@ should point the user to the Qubes notifications in the top-right corner:
|
|||
|
||||
### macOS Release
|
||||
|
||||
> [!TIP]
|
||||
> You can automate these steps from your macOS terminal app with:
|
||||
>
|
||||
> ```
|
||||
> export APPLE_ID=<email>
|
||||
> make build-macos-intel # for Intel macOS
|
||||
> make build-macos-arm # for Apple Silicon macOS
|
||||
> ```
|
||||
|
||||
The following needs to happen for both Silicon and Intel chipsets.
|
||||
|
||||
#### Initial Setup
|
||||
|
||||
- Build machine must have:
|
||||
- Apple-trusted `Developer ID Application: Freedom of the Press Foundation (94ZZGGGJ3W)` code-signing certificates installed
|
||||
- Apple account must have:
|
||||
|
@ -267,38 +111,84 @@ should point the user to the Qubes notifications in the top-right corner:
|
|||
https://developer.apple.com and login with the proper Apple ID.
|
||||
|
||||
#### Releasing and Signing
|
||||
- [ ] Verify and install the latest supported Python version from [python.org](https://www.python.org/downloads/macos/)
|
||||
- [ ] Verify and checkout the git tag for this release
|
||||
- [ ] Run `poetry install`
|
||||
- [ ] Run `poetry run ./install/macos/build-app.py`; this will make `dist/Dangerzone.app`
|
||||
- [ ] Make sure that the build application works with the containerd graph
|
||||
driver (see [#933](https://github.com/freedomofpress/dangerzone/issues/933))
|
||||
- [ ] Run `poetry run ./install/macos/build-app.py --only-codesign`; this will make `dist/Dangerzone.dmg`
|
||||
* You need to run this command as the account that has access to the code signing certificate
|
||||
* You must run this command from the MacOS UI, from a terminal application.
|
||||
- [ ] Notarize it: `xcrun notarytool submit --apple-id "<email>" --keychain-profile "dz-notarytool-release-key" dist/Dangerzone.dmg`
|
||||
* In the end you'll get a `REQUEST_UUID`, which identifies the submission. Keep it to check on its status.
|
||||
* You need to change the `<email>` in the above command with the email
|
||||
associated with the Apple Developer ID.
|
||||
* This command assumes that you have created, and stored in the Keychain, an
|
||||
application password associated with your Apple Developer ID, which will be
|
||||
used specifically for `notarytool`.
|
||||
- [ ] Wait for it to get approved, check status with: `xcrun notarytool info <REQUEST_UUID> --apple-id "<email>" --keychain-profile "dz-notarytool-release-key"`
|
||||
* If it gets rejected, you should be able to see why with the same command
|
||||
(or use the `log` option for a more verbose JSON output)
|
||||
* You will also receive an update in your email.
|
||||
- [ ] After it's approved, staple the ticket: `xcrun stapler staple dist/Dangerzone.dmg`
|
||||
|
||||
This process ends up with the final file:
|
||||
Here is what you need to do:
|
||||
|
||||
```
|
||||
dist/Dangerzone.dmg
|
||||
```
|
||||
- [ ] Verify and install the latest supported Python version from
|
||||
[python.org](https://www.python.org/downloads/macos/) (do not use the one from
|
||||
brew as it is known to [cause issues](https://github.com/freedomofpress/dangerzone/issues/471))
|
||||
|
||||
Rename `Dangerzone.dmg` to `Dangerzone-$VERSION.dmg`.
|
||||
- [ ] Checkout the dependencies, and clean your local copy:
|
||||
|
||||
```bash
|
||||
|
||||
# In case of a new Python installation or minor version upgrade, e.g., from
|
||||
# 3.11 to 3.12, reinstall Poetry
|
||||
python3 -m pip install poetry
|
||||
|
||||
# You can verify the correct Python version is used
|
||||
poetry debug info
|
||||
|
||||
# Replace with the actual version
|
||||
export DZ_VERSION=$(cat share/version.txt)
|
||||
|
||||
# Verify and checkout the git tag for this release:
|
||||
git checkout -f v$VERSION
|
||||
|
||||
# Clean the git repository
|
||||
git clean -df
|
||||
|
||||
# Clean up the environment
|
||||
poetry env remove --all
|
||||
|
||||
# Install the dependencies
|
||||
poetry sync
|
||||
```
|
||||
|
||||
- [ ] Build the container image and the OCR language data
|
||||
|
||||
```bash
|
||||
poetry run ./install/common/build-image.py
|
||||
poetry run ./install/common/download-tessdata.py
|
||||
|
||||
# Copy the container image to the assets folder
|
||||
cp share/container.tar ~dz/release-assets/$VERSION/dangerzone-$VERSION-arm64.tar
|
||||
cp share/image-id.txt ~dz/release-assets/$VERSION/.
|
||||
```
|
||||
|
||||
- [ ] Build the app bundle
|
||||
|
||||
```bash
|
||||
poetry run ./install/macos/build-app.py
|
||||
```
|
||||
|
||||
- [ ] Sign the application bundle, and notarize it
|
||||
|
||||
You need to run this command as the account that has access to the code signing certificate
|
||||
|
||||
This command assumes that you have created, and stored in the Keychain, an
|
||||
application password associated with your Apple Developer ID, which will be
|
||||
used specifically for `notarytool`.
|
||||
|
||||
```bash
|
||||
# Sign the .App and make it a .dmg
|
||||
poetry run ./install/macos/build-app.py --only-codesign
|
||||
|
||||
# Notarize it. You must run this command from the MacOS UI
|
||||
# from a terminal application.
|
||||
xcrun notarytool submit ./dist/Dangerzone.dmg --apple-id $APPLE_ID --keychain-profile "dz-notarytool-release-key" --wait && xcrun stapler staple dist/Dangerzone.dmg
|
||||
|
||||
# Copy the .dmg to the assets folder
|
||||
ARCH=$(uname -m)
|
||||
if [ "$ARCH" = "x86_64" ]; then
|
||||
ARCH="i686"
|
||||
fi
|
||||
cp dist/Dangerzone.dmg ~dz/release-assets/$VERSION/Dangerzone-$VERSION-$ARCH.dmg
|
||||
```
|
||||
|
||||
### Windows Release
|
||||
The Windows release is performed in a Windows 11 virtual machine as opposed to a physical one.
|
||||
|
||||
The Windows release is performed in a Windows 11 virtual machine (as opposed to a physical one).
|
||||
|
||||
#### Initial Setup
|
||||
|
||||
|
@ -312,14 +202,34 @@ The Windows release is performed in a Windows 11 virtual machine as opposed to a
|
|||
|
||||
#### Releasing and Signing
|
||||
|
||||
- [ ] Verify and checkout the git tag for this release
|
||||
- [ ] Run `poetry install`
|
||||
- [ ] Checkout the dependencies, and clean your local copy:
|
||||
```bash
|
||||
# In case of a new Python installation or minor version upgrade, e.g., from
|
||||
# 3.11 to 3.12, reinstall Poetry
|
||||
python3 -m pip install poetry
|
||||
|
||||
# You can verify the correct Python version is used
|
||||
poetry debug info
|
||||
|
||||
# Replace with the actual version
|
||||
export DZ_VERSION=$(cat share/version.txt)
|
||||
|
||||
# Verify and checkout the git tag for this release:
|
||||
git checkout -f v$VERSION
|
||||
|
||||
# Clean the git repository
|
||||
git clean -df
|
||||
|
||||
# Clean up the environment
|
||||
poetry env remove --all
|
||||
|
||||
# Install the dependencies
|
||||
poetry sync
|
||||
```
|
||||
|
||||
- [ ] Copy the container image into the VM
|
||||
> [!IMPORTANT]
|
||||
> Instead of running `python .\install\windows\build-image.py` in the VM, run the build image script on the host (making sure to build for `linux/amd64`). Copy `share/container.tar.gz` and `share/image-id.txt` from the host into the `share` folder in the VM.
|
||||
> Also, don't forget to add the supplementary image ID (see
|
||||
> [#933](https://github.com/freedomofpress/dangerzone/issues/933)) in
|
||||
> `share/image-id.txt`)
|
||||
> Instead of running `python .\install\windows\build-image.py` in the VM, run the build image script on the host (making sure to build for `linux/amd64`). Copy `share/container.tar` and `share/image-id.txt` from the host into the `share` folder in the VM.
|
||||
- [ ] Run `poetry run .\install\windows\build-app.bat`
|
||||
- [ ] When you're done you will have `dist\Dangerzone.msi`
|
||||
|
||||
|
@ -327,6 +237,17 @@ Rename `Dangerzone.msi` to `Dangerzone-$VERSION.msi`.
|
|||
|
||||
### Linux release
|
||||
|
||||
> [!TIP]
|
||||
> You can automate these steps from any Linux distribution with:
|
||||
>
|
||||
> ```
|
||||
> make build-linux
|
||||
> ```
|
||||
>
|
||||
> You can then add the created artifacts to the appropriate APT/YUM repo.
|
||||
|
||||
Below we explain how we build packages for each Linux distribution we support.
|
||||
|
||||
#### Debian/Ubuntu
|
||||
|
||||
Because the Debian packages do not contain compiled Python code for a specific
|
||||
|
@ -338,21 +259,15 @@ instructions in our build section](https://github.com/freedomofpress/dangerzone/
|
|||
or create your own locally with:
|
||||
|
||||
```sh
|
||||
# Create and run debian bookworm development environment
|
||||
./dev_scripts/env.py --distro debian --version bookworm build-dev
|
||||
./dev_scripts/env.py --distro debian --version bookworm run --dev bash
|
||||
cd dangerzone
|
||||
```
|
||||
|
||||
Build the latest container:
|
||||
# Build the latest container
|
||||
./dev_scripts/env.py --distro debian --version bookworm run --dev bash -c "cd dangerzone && poetry run ./install/common/build-image.py"
|
||||
|
||||
```sh
|
||||
python3 ./install/common/build-image.py
|
||||
```
|
||||
|
||||
Create a .deb:
|
||||
|
||||
```sh
|
||||
./install/linux/build-deb.py
|
||||
# Create a .deb
|
||||
./dev_scripts/env.py --distro debian --version bookworm run --dev bash -c "cd dangerzone && ./install/linux/build-deb.py"
|
||||
```
|
||||
|
||||
Publish the .deb under `./deb_dist` to the
|
||||
|
@ -363,28 +278,20 @@ repo, by sending a PR. Follow the instructions in that repo on how to do so.
|
|||
|
||||
> **NOTE**: This procedure will have to be done for every supported Fedora version.
|
||||
>
|
||||
> In this section, we'll use Fedora 39 as an example.
|
||||
> In this section, we'll use Fedora 41 as an example.
|
||||
|
||||
Create a Fedora development environment. You can [follow the
|
||||
instructions in our build section](https://github.com/freedomofpress/dangerzone/blob/main/BUILD.md#fedora),
|
||||
or create your own locally with:
|
||||
|
||||
```sh
|
||||
./dev_scripts/env.py --distro fedora --version 39 build-dev
|
||||
./dev_scripts/env.py --distro fedora --version 39 run --dev bash
|
||||
cd dangerzone
|
||||
```
|
||||
./dev_scripts/env.py --distro fedora --version 41 build-dev
|
||||
|
||||
Build the latest container:
|
||||
# Build the latest container (skip if already built):
|
||||
./dev_scripts/env.py --distro fedora --version 41 run --dev bash -c "cd dangerzone && poetry run ./install/common/build-image.py"
|
||||
|
||||
```sh
|
||||
python3 ./install/common/build-image.py
|
||||
```
|
||||
|
||||
Create a .rpm:
|
||||
|
||||
```sh
|
||||
./install/linux/build-rpm.py
|
||||
# Create a .rpm:
|
||||
./dev_scripts/env.py --distro fedora --version 41 run --dev bash -c "cd dangerzone && ./install/linux/build-rpm.py"
|
||||
```
|
||||
|
||||
Publish the .rpm under `./dist` to the
|
||||
|
@ -395,7 +302,7 @@ Publish the .rpm under `./dist` to the
|
|||
Create a .rpm for Qubes:
|
||||
|
||||
```sh
|
||||
./install/linux/build-rpm.py --qubes
|
||||
./dev_scripts/env.py --distro fedora --version 41 run --dev bash -c "cd dangerzone && ./install/linux/build-rpm.py --qubes"
|
||||
```
|
||||
|
||||
and similarly publish it to the [`freedomofpress/yum-tools-prod`](https://github.com/freedomofpress/yum-tools-prod)
|
||||
|
@ -403,33 +310,39 @@ repo.
|
|||
|
||||
## Publishing the Release
|
||||
|
||||
To publish the release:
|
||||
To publish the release, you can follow these steps:
|
||||
|
||||
- [ ] Create an archive of the Dangerzone source in `tar.gz` format:
|
||||
* You can use the following command:
|
||||
|
||||
```
|
||||
export DZ_VERSION=$(cat share/version.txt)
|
||||
git archive --format=tar.gz -o dangerzone-${DZ_VERSION:?}.tar.gz --prefix=dangerzone/ v${DZ_VERSION:?}
|
||||
```
|
||||
```bash
|
||||
export VERSION=$(cat share/version.txt)
|
||||
git archive --format=tar.gz -o dangerzone-${VERSION:?}.tar.gz --prefix=dangerzone/ v${VERSION:?}
|
||||
```
|
||||
|
||||
- [ ] Run container scan on the produced container images (some time may have passed since the artifacts were built)
|
||||
```bash
|
||||
docker pull anchore/grype:latest
|
||||
docker run --rm -v ./share/container.tar:/container.tar anchore/grype:latest /container.tar
|
||||
```
|
||||
|
||||
- [ ] Collect the assets in a single directory, calculate their SHA-256 hashes, and sign them.
|
||||
* You can use `./dev_scripts/sign-assets.py`, if you want to automate this
|
||||
task.
|
||||
- [ ] Create a new **draft** release on GitHub and upload the macOS and Windows installers.
|
||||
* Copy the release notes text from the template at [`docs/templates/release-notes`](https://github.com/freedomofpress/dangerzone/tree/main/docs/templates/)
|
||||
* You can use `./dev_scripts/upload-asset.py`, if you want to upload an asset
|
||||
using an access token.
|
||||
- [ ] Upload the `container.tar.gz` i686 image that was created in the previous step
|
||||
There is an `./dev_scripts/sign-assets.py` script to automate this task.
|
||||
|
||||
**Important:** Make sure that it's the same container image as the ones that
|
||||
are shipped in other platforms (see our [Pre-release](#Pre-release) section)
|
||||
**Important:** Before running the script, make sure that it's the same container images as
|
||||
the ones that are shipped in other platforms (see our [Pre-release](#Pre-release) section)
|
||||
|
||||
- [ ] Upload the detached signatures (.asc) and checksum file.
|
||||
- [ ] Update the [Dangerzone website](https://github.com/freedomofpress/dangerzone.rocks) to link to the new installers and signatures
|
||||
```bash
|
||||
# Sign all the assets
|
||||
./dev_scripts/sign-assets.py ~/release-assets/$VERSION/github --version $VERSION
|
||||
```
|
||||
|
||||
- [ ] Upload all the assets to the draft release on GitHub.
|
||||
```bash
|
||||
find ~/release-assets/$VERSION/github | xargs -n1 ./dev_scripts/upload-asset.py --token ~/token --draft
|
||||
```
|
||||
|
||||
- [ ] Update the [Dangerzone website](https://github.com/freedomofpress/dangerzone.rocks) to link to the new installers.
|
||||
- [ ] Update the brew cask release of Dangerzone with a [PR like this one](https://github.com/Homebrew/homebrew-cask/pull/116319)
|
||||
- [ ] Update version and download links in `README.md`
|
||||
- [ ] Update version and links to our installation instructions (`INSTALL.md`) in `README.md`
|
||||
|
||||
## Post-release
|
||||
|
||||
|
|
14
THIRD_PARTY_NOTICE
Normal file
14
THIRD_PARTY_NOTICE
Normal file
|
@ -0,0 +1,14 @@
|
|||
This project includes third-party components as follows:
|
||||
|
||||
1. gVisor APT Key
|
||||
- URL: https://gvisor.dev/archive.key
|
||||
- Last updated: 2025-01-21
|
||||
- Description: This is the public key used for verifying packages from the gVisor repository.
|
||||
|
||||
2. Reproducible Containers Helper Script
|
||||
- URL: https://github.com/reproducible-containers/repro-sources-list.sh/blob/d15cf12b26395b857b24fba223b108aff1c91b26/repro-sources-list.sh
|
||||
- Last updated: 2025-01-21
|
||||
- Description: This script is used for building reproducible Debian images.
|
||||
|
||||
Please refer to the respective sources for licensing information and further details regarding the use of these components.
|
||||
|
|
@ -1,6 +1,25 @@
|
|||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Call freeze_support() to avoid passing unknown options to the subprocess.
|
||||
# See https://github.com/freedomofpress/dangerzone/issues/873
|
||||
import multiprocessing
|
||||
|
||||
multiprocessing.freeze_support()
|
||||
|
||||
|
||||
try:
|
||||
from . import vendor # type: ignore [attr-defined]
|
||||
|
||||
vendor_path: str = vendor.__path__[0]
|
||||
logger.debug(f"Using vendored PyMuPDF libraries from '{vendor_path}'")
|
||||
sys.path.insert(0, vendor_path)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
if "DANGERZONE_MODE" in os.environ:
|
||||
mode = os.environ["DANGERZONE_MODE"]
|
||||
else:
|
||||
|
|
|
@ -11,6 +11,7 @@ from .isolation_provider.container import Container
|
|||
from .isolation_provider.dummy import Dummy
|
||||
from .isolation_provider.qubes import Qubes, is_qubes_native_conversion
|
||||
from .logic import DangerzoneCore
|
||||
from .settings import Settings
|
||||
from .util import get_version, replace_control_chars
|
||||
|
||||
|
||||
|
@ -37,30 +38,62 @@ def print_header(s: str) -> None:
|
|||
)
|
||||
@click.argument(
|
||||
"filenames",
|
||||
required=True,
|
||||
required=False,
|
||||
nargs=-1,
|
||||
type=click.UNPROCESSED,
|
||||
callback=args.validate_input_filenames,
|
||||
)
|
||||
@click.option(
|
||||
"--debug",
|
||||
"debug",
|
||||
flag_value=True,
|
||||
help="Run Dangerzone in debug mode, to get logs from gVisor.",
|
||||
)
|
||||
@click.option(
|
||||
"--set-container-runtime",
|
||||
required=False,
|
||||
help=(
|
||||
"The name or full path of the container runtime you want Dangerzone to use."
|
||||
" You can specify the value 'default' if you want to take back your choice, and"
|
||||
" let Dangerzone use the default runtime for this OS"
|
||||
),
|
||||
)
|
||||
@click.version_option(version=get_version(), message="%(version)s")
|
||||
@errors.handle_document_errors
|
||||
def cli_main(
|
||||
output_filename: Optional[str],
|
||||
ocr_lang: Optional[str],
|
||||
filenames: List[str],
|
||||
filenames: Optional[List[str]],
|
||||
archive: bool,
|
||||
dummy_conversion: bool,
|
||||
debug: bool,
|
||||
set_container_runtime: Optional[str] = None,
|
||||
) -> None:
|
||||
setup_logging()
|
||||
display_banner()
|
||||
if set_container_runtime:
|
||||
settings = Settings()
|
||||
if set_container_runtime == "default":
|
||||
settings.unset_custom_runtime()
|
||||
click.echo(
|
||||
"Instructed Dangerzone to use the default container runtime for this OS"
|
||||
)
|
||||
else:
|
||||
container_runtime = settings.set_custom_runtime(
|
||||
set_container_runtime, autosave=True
|
||||
)
|
||||
click.echo(f"Set the settings container_runtime to {container_runtime}")
|
||||
sys.exit(0)
|
||||
elif not filenames:
|
||||
raise click.UsageError("Missing argument 'FILENAMES...'")
|
||||
|
||||
if getattr(sys, "dangerzone_dev", False) and dummy_conversion:
|
||||
dangerzone = DangerzoneCore(Dummy())
|
||||
elif is_qubes_native_conversion():
|
||||
dangerzone = DangerzoneCore(Qubes())
|
||||
else:
|
||||
dangerzone = DangerzoneCore(Container())
|
||||
dangerzone = DangerzoneCore(Container(debug=debug))
|
||||
|
||||
display_banner()
|
||||
if len(filenames) == 1 and output_filename:
|
||||
dangerzone.add_document_from_filename(filenames[0], output_filename, archive)
|
||||
elif len(filenames) > 1 and output_filename:
|
||||
|
@ -295,7 +328,7 @@ def display_banner() -> None:
|
|||
+ Back.BLACK
|
||||
+ Fore.LIGHTWHITE_EX
|
||||
+ Style.BRIGHT
|
||||
+ f"{' '*left_spaces}Dangerzone v{get_version()}{' '*right_spaces}"
|
||||
+ f"{' ' * left_spaces}Dangerzone v{get_version()}{' ' * right_spaces}"
|
||||
+ Fore.YELLOW
|
||||
+ Style.DIM
|
||||
+ "│"
|
||||
|
@ -313,4 +346,10 @@ def display_banner() -> None:
|
|||
+ Style.DIM
|
||||
+ "│"
|
||||
)
|
||||
print(Back.BLACK + Fore.YELLOW + Style.DIM + "╰──────────────────────────╯")
|
||||
print(
|
||||
Back.BLACK
|
||||
+ Fore.YELLOW
|
||||
+ Style.DIM
|
||||
+ "╰──────────────────────────╯"
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
|
|
|
@ -59,10 +59,28 @@ oci_config: dict[str, typing.Any] = {
|
|||
"root": {"path": "rootfs", "readonly": True},
|
||||
"hostname": "dangerzone",
|
||||
"mounts": [
|
||||
# Mask almost every system directory of the outer container, by mounting tmpfs
|
||||
# on top of them. This is done to avoid leaking any sensitive information,
|
||||
# either mounted by Podman/Docker, or when gVisor runs, since we reuse the same
|
||||
# rootfs. We basically mask everything except for `/usr`, `/bin`, `/lib`,
|
||||
# `/etc`, and `/opt`.
|
||||
#
|
||||
# Note that we set `--root /home/dangerzone/.containers` for the directory where
|
||||
# gVisor will create files at runtime, which means that in principle, we are
|
||||
# covered by the masking of `/home/dangerzone` that follows below.
|
||||
#
|
||||
# Finally, note that the following list has been taken from the dirs in our
|
||||
# container image, and double-checked against the top-level dirs listed in the
|
||||
# Filesystem Hierarchy Standard (FHS) [1]. It would be nice to have an allowlist
|
||||
# approach instead of a denylist, but FHS is such an old standard that we don't
|
||||
# expect any new top-level dirs to pop up any time soon.
|
||||
#
|
||||
# [1] https://en.wikipedia.org/wiki/Filesystem_Hierarchy_Standard
|
||||
{
|
||||
"destination": "/proc",
|
||||
"type": "proc",
|
||||
"source": "proc",
|
||||
"destination": "/boot",
|
||||
"type": "tmpfs",
|
||||
"source": "tmpfs",
|
||||
"options": ["nosuid", "noexec", "nodev", "ro"],
|
||||
},
|
||||
{
|
||||
"destination": "/dev",
|
||||
|
@ -70,6 +88,53 @@ oci_config: dict[str, typing.Any] = {
|
|||
"source": "tmpfs",
|
||||
"options": ["nosuid", "noexec", "nodev"],
|
||||
},
|
||||
{
|
||||
"destination": "/home",
|
||||
"type": "tmpfs",
|
||||
"source": "tmpfs",
|
||||
"options": ["nosuid", "noexec", "nodev", "ro"],
|
||||
},
|
||||
{
|
||||
"destination": "/media",
|
||||
"type": "tmpfs",
|
||||
"source": "tmpfs",
|
||||
"options": ["nosuid", "noexec", "nodev", "ro"],
|
||||
},
|
||||
{
|
||||
"destination": "/mnt",
|
||||
"type": "tmpfs",
|
||||
"source": "tmpfs",
|
||||
"options": ["nosuid", "noexec", "nodev", "ro"],
|
||||
},
|
||||
{
|
||||
"destination": "/proc",
|
||||
"type": "proc",
|
||||
"source": "proc",
|
||||
},
|
||||
{
|
||||
"destination": "/root",
|
||||
"type": "tmpfs",
|
||||
"source": "tmpfs",
|
||||
"options": ["nosuid", "noexec", "nodev", "ro"],
|
||||
},
|
||||
{
|
||||
"destination": "/run",
|
||||
"type": "tmpfs",
|
||||
"source": "tmpfs",
|
||||
"options": ["nosuid", "noexec", "nodev"],
|
||||
},
|
||||
{
|
||||
"destination": "/sbin",
|
||||
"type": "tmpfs",
|
||||
"source": "tmpfs",
|
||||
"options": ["nosuid", "noexec", "nodev", "ro"],
|
||||
},
|
||||
{
|
||||
"destination": "/srv",
|
||||
"type": "tmpfs",
|
||||
"source": "tmpfs",
|
||||
"options": ["nosuid", "noexec", "nodev", "ro"],
|
||||
},
|
||||
{
|
||||
"destination": "/sys",
|
||||
"type": "tmpfs",
|
||||
|
@ -82,6 +147,12 @@ oci_config: dict[str, typing.Any] = {
|
|||
"source": "tmpfs",
|
||||
"options": ["nosuid", "noexec", "nodev"],
|
||||
},
|
||||
{
|
||||
"destination": "/var",
|
||||
"type": "tmpfs",
|
||||
"source": "tmpfs",
|
||||
"options": ["nosuid", "noexec", "nodev"],
|
||||
},
|
||||
# LibreOffice needs a writable home directory, so just mount a tmpfs
|
||||
# over it.
|
||||
{
|
||||
|
@ -142,6 +213,9 @@ runsc_argv = [
|
|||
"--rootless=true",
|
||||
"--network=none",
|
||||
"--root=/home/dangerzone/.containers",
|
||||
# Disable DirectFS for to make the seccomp filter even stricter,
|
||||
# at some performance cost.
|
||||
"--directfs=false",
|
||||
]
|
||||
if os.environ.get("RUNSC_DEBUG"):
|
||||
runsc_argv += ["--debug=true", "--alsologtostderr=true"]
|
29
dangerzone/container_helpers/gvisor.key
Normal file
29
dangerzone/container_helpers/gvisor.key
Normal file
|
@ -0,0 +1,29 @@
|
|||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQINBF0meAYBEACcBYPOSBiKtid+qTQlbgKGPxUYt0cNZiQqWXylhYUT4PuNlNx5
|
||||
s+sBLFvNTpdTrXMmZ8NkekyjD1HardWvebvJT4u+Ho/9jUr4rP71cNwNtocz/w8G
|
||||
DsUXSLgH8SDkq6xw0L+5eGc78BBg9cOeBeFBm3UPgxTBXS9Zevoi2w1lzSxkXvjx
|
||||
cGzltzMZfPXERljgLzp9AAfhg/2ouqVQm37fY+P/NDzFMJ1XHPIIp9KJl/prBVud
|
||||
jJJteFZ5sgL6MwjBQq2kw+q2Jb8Zfjl0BeXDgGMN5M5lGhX2wTfiMbfo7KWyzRnB
|
||||
RpSP3BxlLqYeQUuLG5Yx8z3oA3uBkuKaFOKvXtiScxmGM/+Ri2YM3m66imwDhtmP
|
||||
AKwTPI3Re4gWWOffglMVSv2sUAY32XZ74yXjY1VhK3bN3WFUPGrgQx4X7GP0A1Te
|
||||
lzqkT3VSMXieImTASosK5L5Q8rryvgCeI9tQLn9EpYFCtU3LXvVgTreGNEEjMOnL
|
||||
dR7yOU+Fs775stn6ucqmdYarx7CvKUrNAhgEeHMonLe1cjYScF7NfLO1GIrQKJR2
|
||||
DE0f+uJZ52inOkO8ufh3WVQJSYszuS3HCY7w5oj1aP38k/y9zZdZvVvwAWZaiqBQ
|
||||
iwjVs6Kub76VVZZhRDf4iYs8k1Zh64nXdfQt250d8U5yMPF3wIJ+c1yhxwARAQAB
|
||||
tCpUaGUgZ1Zpc29yIEF1dGhvcnMgPGd2aXNvci1ib3RAZ29vZ2xlLmNvbT6JAk4E
|
||||
EwEKADgCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AWIQRvHfheOnHCSRjnJ9Vv
|
||||
xtVU4yvZQwUCYO4TxQAKCRBvxtVU4yvZQ9UoEACLPV7CnEA2bjCPi0NCWB/Mo1WL
|
||||
evqv7Wv7vmXzI1K9DrqOhxuamQW75SVXg1df0hTJWbKFmDAip6NEC2Rg5P+A8hHj
|
||||
nW/VG+q4ZFT662jDhnXQiO9L7EZzjyqNF4yWYzzgnqEu/SmGkDLDYiUCcGBqS2oE
|
||||
EQfk7RHJSLMJXAnNDH7OUDgrirSssg/dlQ5uAHA9Au80VvC5fsTKza8b3Aydw3SV
|
||||
iB8/Yuikbl8wKbpSGiXtR4viElXjNips0+mBqaUk2xpqSBrsfN+FezcInVXaXFeq
|
||||
xtpq2/3M3DYbqCRjqeyd9wNi92FHdOusNrK4MYe0pAYbGjc65BwH+F0T4oJ8ZSJV
|
||||
lIt+FZ0MqM1T97XadybYFsJh8qvajQpZEPL+zzNncc4f1d80e7+lwIZV/al0FZWW
|
||||
Zlp7TpbeO/uW+lHs5W14YKwaQVh1whapKXTrATipNOOSCw2hnfrT8V7Hy55QWaGZ
|
||||
f4/kfy929EeCP16d/LqOClv0j0RBr6NhRBQ0l/BE/mXjJwIk6nKwi+Yi4ek1ARi6
|
||||
AlCMLn9AZF7aTGpvCiftzIrlyDfVZT5IX03TayxRHZ4b1Rj8eyJaHcjI49u83gkr
|
||||
4LGX08lEawn9nxFSx4RCg2swGiYw5F436wwwAIozqJuDASeTa3QND3au5v0oYWnl
|
||||
umDySUl5wPaAaALgzA==
|
||||
=5/8T
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
103
dangerzone/container_helpers/repro-sources-list.sh
Executable file
103
dangerzone/container_helpers/repro-sources-list.sh
Executable file
|
@ -0,0 +1,103 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Copyright The repro-sources-list.sh Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# repro-sources-list.sh:
|
||||
# configures /etc/apt/sources.list and similar files for installing packages from a snapshot.
|
||||
#
|
||||
# This script is expected to be executed inside Dockerfile.
|
||||
#
|
||||
# The following distributions are supported:
|
||||
# - debian:11 (/etc/apt/sources.list)
|
||||
# - debian:12 (/etc/apt/sources.list.d/debian.sources)
|
||||
# - ubuntu:22.04 (/etc/apt/sources.list)
|
||||
# - ubuntu:24.04 (/etc/apt/sources.listd/ubuntu.sources)
|
||||
# - archlinux (/etc/pacman.d/mirrorlist)
|
||||
#
|
||||
# For the further information, see https://github.com/reproducible-containers/repro-sources-list.sh
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
set -eux -o pipefail
|
||||
|
||||
. /etc/os-release
|
||||
|
||||
: "${KEEP_CACHE:=1}"
|
||||
|
||||
keep_apt_cache() {
|
||||
rm -f /etc/apt/apt.conf.d/docker-clean
|
||||
echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
|
||||
}
|
||||
|
||||
case "${ID}" in
|
||||
"debian")
|
||||
: "${SNAPSHOT_ARCHIVE_BASE:=http://snapshot.debian.org/archive/}"
|
||||
: "${BACKPORTS:=}"
|
||||
if [ -e /etc/apt/sources.list.d/debian.sources ]; then
|
||||
: "${SOURCE_DATE_EPOCH:=$(stat --format=%Y /etc/apt/sources.list.d/debian.sources)}"
|
||||
rm -f /etc/apt/sources.list.d/debian.sources
|
||||
else
|
||||
: "${SOURCE_DATE_EPOCH:=$(stat --format=%Y /etc/apt/sources.list)}"
|
||||
fi
|
||||
snapshot="$(printf "%(%Y%m%dT%H%M%SZ)T\n" "${SOURCE_DATE_EPOCH}")"
|
||||
# TODO: use the new format for Debian >= 12
|
||||
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}debian/${snapshot} ${VERSION_CODENAME} main" >/etc/apt/sources.list
|
||||
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}debian-security/${snapshot} ${VERSION_CODENAME}-security main" >>/etc/apt/sources.list
|
||||
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}debian/${snapshot} ${VERSION_CODENAME}-updates main" >>/etc/apt/sources.list
|
||||
if [ "${BACKPORTS}" = 1 ]; then echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}debian/${snapshot} ${VERSION_CODENAME}-backports main" >>/etc/apt/sources.list; fi
|
||||
if [ "${KEEP_CACHE}" = 1 ]; then keep_apt_cache; fi
|
||||
;;
|
||||
"ubuntu")
|
||||
: "${SNAPSHOT_ARCHIVE_BASE:=http://snapshot.ubuntu.com/}"
|
||||
if [ -e /etc/apt/sources.list.d/ubuntu.sources ]; then
|
||||
: "${SOURCE_DATE_EPOCH:=$(stat --format=%Y /etc/apt/sources.list.d/ubuntu.sources)}"
|
||||
rm -f /etc/apt/sources.list.d/ubuntu.sources
|
||||
else
|
||||
: "${SOURCE_DATE_EPOCH:=$(stat --format=%Y /etc/apt/sources.list)}"
|
||||
fi
|
||||
snapshot="$(printf "%(%Y%m%dT%H%M%SZ)T\n" "${SOURCE_DATE_EPOCH}")"
|
||||
# TODO: use the new format for Ubuntu >= 24.04
|
||||
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME} main restricted" >/etc/apt/sources.list
|
||||
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-updates main restricted" >>/etc/apt/sources.list
|
||||
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME} universe" >>/etc/apt/sources.list
|
||||
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-updates universe" >>/etc/apt/sources.list
|
||||
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME} multiverse" >>/etc/apt/sources.list
|
||||
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-updates multiverse" >>/etc/apt/sources.list
|
||||
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-backports main restricted universe multiverse" >>/etc/apt/sources.list
|
||||
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-security main restricted" >>/etc/apt/sources.list
|
||||
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-security universe" >>/etc/apt/sources.list
|
||||
echo "deb [check-valid-until=no] ${SNAPSHOT_ARCHIVE_BASE}ubuntu/${snapshot} ${VERSION_CODENAME}-security multiverse" >>/etc/apt/sources.list
|
||||
if [ "${KEEP_CACHE}" = 1 ]; then keep_apt_cache; fi
|
||||
# http://snapshot.ubuntu.com is redirected to https, so we have to install ca-certificates
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get -o Acquire::https::Verify-Peer=false update >&2
|
||||
apt-get -o Acquire::https::Verify-Peer=false install -y ca-certificates >&2
|
||||
;;
|
||||
"arch")
|
||||
: "${SNAPSHOT_ARCHIVE_BASE:=http://archive.archlinux.org/}"
|
||||
: "${SOURCE_DATE_EPOCH:=$(stat --format=%Y /var/log/pacman.log)}"
|
||||
export SOURCE_DATE_EPOCH
|
||||
# shellcheck disable=SC2016
|
||||
date -d "@${SOURCE_DATE_EPOCH}" "+Server = ${SNAPSHOT_ARCHIVE_BASE}repos/%Y/%m/%d/\$repo/os/\$arch" >/etc/pacman.d/mirrorlist
|
||||
;;
|
||||
*)
|
||||
echo >&2 "Unsupported distribution: ${ID}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
: "${WRITE_SOURCE_DATE_EPOCH:=/dev/null}"
|
||||
echo "${SOURCE_DATE_EPOCH}" >"${WRITE_SOURCE_DATE_EPOCH}"
|
||||
echo "SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH}"
|
201
dangerzone/container_utils.py
Normal file
201
dangerzone/container_utils.py
Normal file
|
@ -0,0 +1,201 @@
|
|||
import logging
|
||||
import os
|
||||
import platform
|
||||
import shutil
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from . import errors
|
||||
from .settings import Settings
|
||||
from .util import get_resource_path, get_subprocess_startupinfo
|
||||
|
||||
CONTAINER_NAME = "dangerzone.rocks/dangerzone"
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Runtime(object):
|
||||
"""Represents the container runtime to use.
|
||||
|
||||
- It can be specified via the settings, using the "container_runtime" key,
|
||||
which should point to the full path of the runtime;
|
||||
- If the runtime is not specified via the settings, it defaults
|
||||
to "podman" on Linux and "docker" on macOS and Windows.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
settings = Settings()
|
||||
|
||||
if settings.custom_runtime_specified():
|
||||
self.path = Path(settings.get("container_runtime"))
|
||||
if not self.path.exists():
|
||||
raise errors.UnsupportedContainerRuntime(self.path)
|
||||
self.name = self.path.stem
|
||||
else:
|
||||
self.name = self.get_default_runtime_name()
|
||||
self.path = Runtime.path_from_name(self.name)
|
||||
|
||||
if self.name not in ("podman", "docker"):
|
||||
raise errors.UnsupportedContainerRuntime(self.name)
|
||||
|
||||
@staticmethod
|
||||
def path_from_name(name: str) -> Path:
|
||||
name_path = Path(name)
|
||||
if name_path.is_file():
|
||||
return name_path
|
||||
else:
|
||||
runtime = shutil.which(name_path)
|
||||
if runtime is None:
|
||||
raise errors.NoContainerTechException(name)
|
||||
return Path(runtime)
|
||||
|
||||
@staticmethod
|
||||
def get_default_runtime_name() -> str:
|
||||
return "podman" if platform.system() == "Linux" else "docker"
|
||||
|
||||
|
||||
def get_runtime_version(runtime: Optional[Runtime] = None) -> Tuple[int, int]:
|
||||
"""Get the major/minor parts of the Docker/Podman version.
|
||||
|
||||
Some of the operations we perform in this module rely on some Podman features
|
||||
that are not available across all of our platforms. In order to have a proper
|
||||
fallback, we need to know the Podman version. More specifically, we're fine with
|
||||
just knowing the major and minor version, since writing/installing a full-blown
|
||||
semver parser is an overkill.
|
||||
"""
|
||||
runtime = runtime or Runtime()
|
||||
|
||||
# Get the Docker/Podman version, using a Go template.
|
||||
if runtime.name == "podman":
|
||||
query = "{{.Client.Version}}"
|
||||
else:
|
||||
query = "{{.Server.Version}}"
|
||||
|
||||
cmd = [str(runtime.path), "version", "-f", query]
|
||||
try:
|
||||
version = subprocess.run(
|
||||
cmd,
|
||||
startupinfo=get_subprocess_startupinfo(),
|
||||
capture_output=True,
|
||||
check=True,
|
||||
).stdout.decode()
|
||||
except Exception as e:
|
||||
msg = f"Could not get the version of the {runtime.name.capitalize()} tool: {e}"
|
||||
raise RuntimeError(msg) from e
|
||||
|
||||
# Parse this version and return the major/minor parts, since we don't need the
|
||||
# rest.
|
||||
try:
|
||||
major, minor, _ = version.split(".", 3)
|
||||
return (int(major), int(minor))
|
||||
except Exception as e:
|
||||
msg = (
|
||||
f"Could not parse the version of the {runtime.name.capitalize()} tool"
|
||||
f" (found: '{version}') due to the following error: {e}"
|
||||
)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
|
||||
def list_image_tags() -> List[str]:
|
||||
"""Get the tags of all loaded Dangerzone images.
|
||||
|
||||
This method returns a mapping of image tags to image IDs, for all Dangerzone
|
||||
images. This can be useful when we want to find which are the local image tags,
|
||||
and which image ID does the "latest" tag point to.
|
||||
"""
|
||||
runtime = Runtime()
|
||||
return (
|
||||
subprocess.check_output(
|
||||
[
|
||||
str(runtime.path),
|
||||
"image",
|
||||
"list",
|
||||
"--format",
|
||||
"{{ .Tag }}",
|
||||
CONTAINER_NAME,
|
||||
],
|
||||
text=True,
|
||||
startupinfo=get_subprocess_startupinfo(),
|
||||
)
|
||||
.strip()
|
||||
.split()
|
||||
)
|
||||
|
||||
|
||||
def add_image_tag(image_id: str, new_tag: str) -> None:
|
||||
"""Add a tag to the Dangerzone image."""
|
||||
runtime = Runtime()
|
||||
log.debug(f"Adding tag '{new_tag}' to image '{image_id}'")
|
||||
subprocess.check_output(
|
||||
[str(runtime.path), "tag", image_id, new_tag],
|
||||
startupinfo=get_subprocess_startupinfo(),
|
||||
)
|
||||
|
||||
|
||||
def delete_image_tag(tag: str) -> None:
|
||||
"""Delete a Dangerzone image tag."""
|
||||
runtime = Runtime()
|
||||
log.warning(f"Deleting old container image: {tag}")
|
||||
try:
|
||||
subprocess.check_output(
|
||||
[str(runtime.name), "rmi", "--force", tag],
|
||||
startupinfo=get_subprocess_startupinfo(),
|
||||
)
|
||||
except Exception as e:
|
||||
log.warning(
|
||||
f"Couldn't delete old container image '{tag}', so leaving it there."
|
||||
f" Original error: {e}"
|
||||
)
|
||||
|
||||
|
||||
def get_expected_tag() -> str:
|
||||
"""Get the tag of the Dangerzone image tarball from the image-id.txt file."""
|
||||
with get_resource_path("image-id.txt").open() as f:
|
||||
return f.read().strip()
|
||||
|
||||
|
||||
def load_image_tarball() -> None:
|
||||
runtime = Runtime()
|
||||
log.info("Installing Dangerzone container image...")
|
||||
tarball_path = get_resource_path("container.tar")
|
||||
try:
|
||||
res = subprocess.run(
|
||||
[str(runtime.path), "load", "-i", str(tarball_path)],
|
||||
startupinfo=get_subprocess_startupinfo(),
|
||||
capture_output=True,
|
||||
check=True,
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
if e.stderr:
|
||||
error = e.stderr.decode()
|
||||
else:
|
||||
error = "No output"
|
||||
raise errors.ImageInstallationException(
|
||||
f"Could not install container image: {error}"
|
||||
)
|
||||
|
||||
# Loading an image built with Buildkit in Podman 3.4 messes up its name. The tag
|
||||
# somehow becomes the name of the loaded image [1].
|
||||
#
|
||||
# We know that older Podman versions are not generally affected, since Podman v3.0.1
|
||||
# on Debian Bullseye works properly. Also, Podman v4.0 is not affected, so it makes
|
||||
# sense to target only Podman v3.4 for a fix.
|
||||
#
|
||||
# The fix is simple, tag the image properly based on the expected tag from
|
||||
# `share/image-id.txt` and delete the incorrect tag.
|
||||
#
|
||||
# [1] https://github.com/containers/podman/issues/16490
|
||||
if runtime.name == "podman" and get_runtime_version(runtime) == (3, 4):
|
||||
expected_tag = get_expected_tag()
|
||||
bad_tag = f"localhost/{expected_tag}:latest"
|
||||
good_tag = f"{CONTAINER_NAME}:{expected_tag}"
|
||||
|
||||
log.debug(
|
||||
f"Dangerzone images loaded in Podman v3.4 usually have an invalid tag."
|
||||
" Fixing it..."
|
||||
)
|
||||
add_image_tag(bad_tag, good_tag)
|
||||
delete_image_tag(bad_tag)
|
||||
|
||||
log.info("Successfully installed container image")
|
|
@ -13,15 +13,6 @@ def running_on_qubes() -> bool:
|
|||
return os.path.exists("/usr/share/qubes/marker-vm")
|
||||
|
||||
|
||||
def get_tessdata_dir() -> str:
|
||||
if os.environ.get("TESSDATA_PREFIX"):
|
||||
return os.environ["TESSDATA_PREFIX"]
|
||||
elif running_on_qubes():
|
||||
return "/usr/share/tesseract/tessdata/"
|
||||
else:
|
||||
return "/usr/share/tessdata/"
|
||||
|
||||
|
||||
class DangerzoneConverter:
|
||||
def __init__(self, progress_callback: Optional[Callable] = None) -> None:
|
||||
self.percentage: float = 0.0
|
||||
|
|
|
@ -3,6 +3,17 @@ import os
|
|||
import sys
|
||||
from typing import Dict, Optional
|
||||
|
||||
# XXX: PyMUPDF logs to stdout by default [1]. The PyMuPDF devs provide a way [2] to log to
|
||||
# stderr, but it's based on environment variables. These envvars are consulted at import
|
||||
# time [3], so we have to set them here, before we import `fitz`.
|
||||
#
|
||||
# [1] https://github.com/freedomofpress/dangerzone/issues/877
|
||||
# [2] https://github.com/pymupdf/PyMuPDF/issues/3135#issuecomment-1992625724
|
||||
# [3] https://github.com/pymupdf/PyMuPDF/blob/9717935eeb2d50d15440d62575878214226795f9/src/__init__.py#L62-L63
|
||||
os.environ["PYMUPDF_MESSAGE"] = "fd:2"
|
||||
os.environ["PYMUPDF_LOG"] = "fd:2"
|
||||
|
||||
|
||||
import fitz
|
||||
import magic
|
||||
|
||||
|
@ -118,6 +129,10 @@ class DocumentToPixels(DangerzoneConverter):
|
|||
# At least .odt, .docx, .odg, .odp, .ods, and .pptx
|
||||
"application/zip": {
|
||||
"type": "libreoffice",
|
||||
# NOTE: `file` command < 5.45 cannot detect hwpx files properly, so we
|
||||
# enable the extension in any case. See also:
|
||||
# https://github.com/freedomofpress/dangerzone/pull/460#issuecomment-1654166465
|
||||
"libreoffice_ext": "h2orestart.oxt",
|
||||
},
|
||||
# At least .doc, .docx, .odg, .odp, .odt, .pdf, .ppt, .pptx, .xls, and .xlsx
|
||||
"application/octet-stream": {
|
||||
|
@ -238,7 +253,7 @@ class DocumentToPixels(DangerzoneConverter):
|
|||
"unzip",
|
||||
"-d",
|
||||
f"/usr/lib/libreoffice/share/extensions/{libreoffice_ext}/",
|
||||
f"/libreoffice_ext/{libreoffice_ext}",
|
||||
f"/opt/libreoffice_ext/{libreoffice_ext}",
|
||||
]
|
||||
await self.run_command(
|
||||
unzip_args,
|
||||
|
|
|
@ -1,152 +0,0 @@
|
|||
"""
|
||||
Here are the steps, with progress bar percentages:
|
||||
|
||||
- 50%-95%: Convert each page of pixels into a PDF (each page takes 45/n%, where n is the number of pages)
|
||||
- 95%-100%: Compress the final PDF
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
import glob
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from typing import Optional
|
||||
|
||||
from .common import DEFAULT_DPI, DangerzoneConverter, get_tessdata_dir, running_on_qubes
|
||||
|
||||
|
||||
class PixelsToPDF(DangerzoneConverter):
|
||||
async def convert(
|
||||
self, ocr_lang: Optional[str] = None, tempdir: Optional[str] = None
|
||||
) -> None:
|
||||
self.percentage = 50.0
|
||||
if tempdir is None:
|
||||
tempdir = "/safezone"
|
||||
|
||||
# XXX lazy loading of fitz module to avoid import issues on non-Qubes systems
|
||||
import fitz
|
||||
|
||||
num_pages = len(glob.glob(f"{tempdir}/pixels/page-*.rgb"))
|
||||
total_size = 0.0
|
||||
|
||||
safe_doc = fitz.Document()
|
||||
|
||||
# Convert RGB files to PDF files
|
||||
percentage_per_page = 45.0 / num_pages
|
||||
for page_num in range(1, num_pages + 1):
|
||||
filename_base = f"{tempdir}/pixels/page-{page_num}"
|
||||
rgb_filename = f"{filename_base}.rgb"
|
||||
width_filename = f"{filename_base}.width"
|
||||
height_filename = f"{filename_base}.height"
|
||||
|
||||
with open(width_filename) as f:
|
||||
width = int(f.read().strip())
|
||||
with open(height_filename) as f:
|
||||
height = int(f.read().strip())
|
||||
with open(rgb_filename, "rb") as rgb_f:
|
||||
untrusted_rgb_data = rgb_f.read()
|
||||
# The first few operations happen on a per-page basis.
|
||||
page_size = len(untrusted_rgb_data)
|
||||
total_size += page_size
|
||||
with contextlib.redirect_stdout(io.StringIO()):
|
||||
pixmap = fitz.Pixmap(
|
||||
fitz.Colorspace(fitz.CS_RGB),
|
||||
width,
|
||||
height,
|
||||
untrusted_rgb_data,
|
||||
False,
|
||||
)
|
||||
pixmap.set_dpi(DEFAULT_DPI, DEFAULT_DPI)
|
||||
if ocr_lang: # OCR the document
|
||||
self.update_progress(
|
||||
f"Converting page {page_num}/{num_pages} from pixels to searchable PDF"
|
||||
)
|
||||
if int(fitz.version[2]) >= 20230621000001:
|
||||
page_pdf_bytes = pixmap.pdfocr_tobytes(
|
||||
compress=True,
|
||||
language=ocr_lang,
|
||||
tessdata=get_tessdata_dir(),
|
||||
)
|
||||
else:
|
||||
# XXX: In PyMuPDF v1.22.5, the function signature of
|
||||
# `pdfocr_tobytes()` / `pdfocr_save()` was extended with an argument
|
||||
# to explicitly set the Tesseract data dir [1].
|
||||
#
|
||||
# In earlier versions, the PyMuPDF developers recommend setting this
|
||||
# path via the TESSDATA_PREFIX environment variable. In practice,
|
||||
# this environment variable is read at import time, so subsequent
|
||||
# changes to the environment variable are not tracked [2].
|
||||
#
|
||||
# To make things worse, any attempt to alter the internal attribute
|
||||
# (`fitz.TESSDATA_PREFIX`) makes no difference as well, when using
|
||||
# the OCR functions. That's due to the way imports work in `fitz`,
|
||||
# where somehow the internal `fitz.fitz` module is shadowed.
|
||||
#
|
||||
# A hacky solution is to grab the `fitz.fitz` module from
|
||||
# `sys.modules`, and set there the TESSDATA_PREFIX variable. We can
|
||||
# get away with this hack because we have a proper solution for
|
||||
# subsequent PyMuPDF versions, and we know that nothing will change
|
||||
# in older versions.
|
||||
#
|
||||
# TODO: Remove after oldest distro has PyMuPDF >= v1.22.5
|
||||
#
|
||||
# [1]: https://pymupdf.readthedocs.io/en/latest/pixmap.html#Pixmap.pdfocr_save
|
||||
# [2]: https://github.com/pymupdf/PyMuPDF/blob/0368e56cfa6afb55bcf6c726e7f51a2a16a5ccba/fitz/fitz.i#L308
|
||||
sys.modules["fitz.fitz"].TESSDATA_PREFIX = get_tessdata_dir() # type: ignore [attr-defined]
|
||||
|
||||
page_pdf_bytes = pixmap.pdfocr_tobytes(
|
||||
compress=True,
|
||||
language=ocr_lang,
|
||||
)
|
||||
ocr_pdf = fitz.open("pdf", page_pdf_bytes)
|
||||
else: # Don't OCR
|
||||
self.update_progress(
|
||||
f"Converting page {page_num}/{num_pages} from pixels to PDF"
|
||||
)
|
||||
page_doc = fitz.Document()
|
||||
page_doc.insert_file(pixmap)
|
||||
page_pdf_bytes = page_doc.tobytes(deflate_images=True)
|
||||
|
||||
safe_doc.insert_pdf(fitz.open("pdf", page_pdf_bytes))
|
||||
self.percentage += percentage_per_page
|
||||
|
||||
self.percentage = 100.0
|
||||
self.update_progress("Safe PDF created")
|
||||
|
||||
# Move converted files into /safezone
|
||||
if running_on_qubes():
|
||||
safe_pdf_path = f"{tempdir}/safe-output-compressed.pdf"
|
||||
else:
|
||||
safe_pdf_path = f"/safezone/safe-output-compressed.pdf"
|
||||
|
||||
safe_doc.save(safe_pdf_path, deflate_images=True)
|
||||
|
||||
def update_progress(self, text: str, *, error: bool = False) -> None:
|
||||
if running_on_qubes():
|
||||
if self.progress_callback:
|
||||
self.progress_callback(error, text, self.percentage)
|
||||
else:
|
||||
print(
|
||||
json.dumps(
|
||||
{"error": error, "text": text, "percentage": self.percentage}
|
||||
)
|
||||
)
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
async def main() -> int:
|
||||
ocr_lang = os.environ.get("OCR_LANGUAGE") if os.environ.get("OCR") == "1" else None
|
||||
converter = PixelsToPDF()
|
||||
|
||||
try:
|
||||
await converter.convert(ocr_lang)
|
||||
return 0
|
||||
except (RuntimeError, ValueError) as e:
|
||||
converter.update_progress(str(e), error=True)
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(asyncio.run(main()))
|
|
@ -1,8 +1,10 @@
|
|||
import enum
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import secrets
|
||||
from pathlib import Path
|
||||
from pathlib import Path, PurePosixPath, PureWindowsPath
|
||||
from typing import Optional
|
||||
|
||||
from . import errors, util
|
||||
|
@ -68,6 +70,20 @@ class Document:
|
|||
def validate_output_filename(filename: str) -> None:
|
||||
if not filename.endswith(".pdf"):
|
||||
raise errors.NonPDFOutputFileException()
|
||||
|
||||
if platform.system() == "Windows":
|
||||
final_filename = PureWindowsPath(filename).name
|
||||
illegal_chars_regex = re.compile(r"[\"*/:<>?\\|]")
|
||||
else:
|
||||
final_filename = PurePosixPath(filename).name
|
||||
illegal_chars_regex = re.compile(r"[\\]")
|
||||
|
||||
if platform.system() in ("Windows", "Darwin"):
|
||||
match = illegal_chars_regex.search(final_filename)
|
||||
if match:
|
||||
# filename contains illegal characters
|
||||
raise errors.IllegalOutputFilenameException(match.group(0))
|
||||
|
||||
if not os.access(Path(filename).parent, os.W_OK):
|
||||
# in unwriteable directory
|
||||
raise errors.UnwriteableOutputDirException()
|
||||
|
@ -107,6 +123,10 @@ class Document:
|
|||
self.validate_output_filename(filename)
|
||||
self._output_filename = filename
|
||||
|
||||
@property
|
||||
def sanitized_output_filename(self) -> str:
|
||||
return util.replace_control_chars(self.output_filename)
|
||||
|
||||
@property
|
||||
def suffix(self) -> str:
|
||||
return self._suffix
|
||||
|
|
|
@ -42,6 +42,13 @@ class NonPDFOutputFileException(DocumentFilenameException):
|
|||
super().__init__("Safe PDF filename must end in '.pdf'")
|
||||
|
||||
|
||||
class IllegalOutputFilenameException(DocumentFilenameException):
|
||||
"""Exception for when the output file contains illegal characters."""
|
||||
|
||||
def __init__(self, char: str) -> None:
|
||||
super().__init__(f"Illegal character: {char}")
|
||||
|
||||
|
||||
class UnwriteableOutputDirException(DocumentFilenameException):
|
||||
"""Exception for when the output file is not writeable."""
|
||||
|
||||
|
@ -110,3 +117,30 @@ def handle_document_errors(func: F) -> F:
|
|||
sys.exit(1)
|
||||
|
||||
return cast(F, wrapper)
|
||||
|
||||
|
||||
#### Container-related errors
|
||||
|
||||
|
||||
class ImageNotPresentException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ImageInstallationException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class NoContainerTechException(Exception):
|
||||
def __init__(self, container_tech: str) -> None:
|
||||
super().__init__(f"{container_tech} is not installed")
|
||||
|
||||
|
||||
class NotAvailableContainerTechException(Exception):
|
||||
def __init__(self, container_tech: str, error: str) -> None:
|
||||
self.error = error
|
||||
self.container_tech = container_tech
|
||||
super().__init__(f"{container_tech} is not available")
|
||||
|
||||
|
||||
class UnsupportedContainerRuntime(Exception):
|
||||
pass
|
||||
|
|
|
@ -5,7 +5,7 @@ import platform
|
|||
import signal
|
||||
import sys
|
||||
import typing
|
||||
from typing import Dict, List, Optional
|
||||
from typing import List, Optional
|
||||
|
||||
import click
|
||||
import colorama
|
||||
|
@ -51,7 +51,7 @@ class Application(QtWidgets.QApplication):
|
|||
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
|
||||
super(Application, self).__init__(*args, **kwargs)
|
||||
self.setQuitOnLastWindowClosed(False)
|
||||
with open(get_resource_path("dangerzone.css"), "r") as f:
|
||||
with get_resource_path("dangerzone.css").open("r") as f:
|
||||
style = f.read()
|
||||
self.setStyleSheet(style)
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ else:
|
|||
from PySide2 import QtCore, QtGui, QtWidgets
|
||||
|
||||
if platform.system() == "Linux":
|
||||
from xdg.DesktopEntry import DesktopEntry
|
||||
from xdg.DesktopEntry import DesktopEntry, ParsingError
|
||||
|
||||
from ..isolation_provider.base import IsolationProvider
|
||||
from ..logic import DangerzoneCore
|
||||
|
@ -63,7 +63,7 @@ class DangerzoneGui(DangerzoneCore):
|
|||
path = get_resource_path("dangerzone.ico")
|
||||
else:
|
||||
path = get_resource_path("icon.png")
|
||||
return QtGui.QIcon(path)
|
||||
return QtGui.QIcon(str(path))
|
||||
|
||||
def open_pdf_viewer(self, filename: str) -> None:
|
||||
if platform.system() == "Darwin":
|
||||
|
@ -123,27 +123,37 @@ class DangerzoneGui(DangerzoneCore):
|
|||
full_filename = os.path.join(search_path, filename)
|
||||
if os.path.splitext(filename)[1] == ".desktop":
|
||||
# See which ones can open PDFs
|
||||
desktop_entry = DesktopEntry(full_filename)
|
||||
desktop_entry_name = desktop_entry.getName()
|
||||
if (
|
||||
"application/pdf" in desktop_entry.getMimeTypes()
|
||||
and "dangerzone" not in desktop_entry_name.lower()
|
||||
):
|
||||
pdf_viewers[desktop_entry_name] = (
|
||||
desktop_entry.getExec()
|
||||
try:
|
||||
desktop_entry = DesktopEntry(full_filename)
|
||||
except ParsingError:
|
||||
# Do not stop when encountering malformed desktop entries
|
||||
continue
|
||||
except Exception:
|
||||
log.exception(
|
||||
"Encountered the following exception while processing desktop entry %s",
|
||||
full_filename,
|
||||
)
|
||||
else:
|
||||
desktop_entry_name = desktop_entry.getName()
|
||||
if (
|
||||
"application/pdf" in desktop_entry.getMimeTypes()
|
||||
and "dangerzone" not in desktop_entry_name.lower()
|
||||
):
|
||||
pdf_viewers[desktop_entry_name] = (
|
||||
desktop_entry.getExec()
|
||||
)
|
||||
|
||||
# Put the default entry first
|
||||
if filename == default_pdf_viewer:
|
||||
try:
|
||||
pdf_viewers.move_to_end(
|
||||
desktop_entry_name, last=False
|
||||
)
|
||||
except KeyError as e:
|
||||
# Should be unreachable
|
||||
log.error(
|
||||
f"Problem reordering applications: {e}"
|
||||
)
|
||||
# Put the default entry first
|
||||
if filename == default_pdf_viewer:
|
||||
try:
|
||||
pdf_viewers.move_to_end(
|
||||
desktop_entry_name, last=False
|
||||
)
|
||||
except KeyError as e:
|
||||
# Should be unreachable
|
||||
log.error(
|
||||
f"Problem reordering applications: {e}"
|
||||
)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
|
@ -242,7 +252,7 @@ class Alert(Dialog):
|
|||
def create_layout(self) -> QtWidgets.QBoxLayout:
|
||||
logo = QtWidgets.QLabel()
|
||||
logo.setPixmap(
|
||||
QtGui.QPixmap.fromImage(QtGui.QImage(get_resource_path("icon.png")))
|
||||
QtGui.QPixmap.fromImage(QtGui.QImage(str(get_resource_path("icon.png"))))
|
||||
)
|
||||
|
||||
label = QtWidgets.QLabel()
|
||||
|
|
|
@ -1,30 +1,32 @@
|
|||
import logging
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
import tempfile
|
||||
import typing
|
||||
from multiprocessing.pool import ThreadPool
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
# FIXME: See https://github.com/freedomofpress/dangerzone/issues/320 for more details.
|
||||
if typing.TYPE_CHECKING:
|
||||
from PySide2 import QtCore, QtGui, QtSvg, QtWidgets
|
||||
from PySide2.QtWidgets import QAction
|
||||
from PySide2.QtCore import Qt
|
||||
from PySide2.QtWidgets import QAction, QTextEdit
|
||||
else:
|
||||
try:
|
||||
from PySide6 import QtCore, QtGui, QtSvg, QtWidgets
|
||||
from PySide6.QtCore import Qt
|
||||
from PySide6.QtGui import QAction
|
||||
from PySide6.QtWidgets import QTextEdit
|
||||
except ImportError:
|
||||
from PySide2 import QtCore, QtGui, QtSvg, QtWidgets
|
||||
from PySide2.QtWidgets import QAction
|
||||
from PySide2.QtCore import Qt
|
||||
from PySide2.QtWidgets import QAction, QTextEdit
|
||||
|
||||
from .. import errors
|
||||
from ..document import SAFE_EXTENSION, Document
|
||||
from ..isolation_provider.container import Container, NoContainerTechException
|
||||
from ..isolation_provider.dummy import Dummy
|
||||
from ..isolation_provider.qubes import Qubes, is_qubes_native_conversion
|
||||
from ..util import get_resource_path, get_subprocess_startupinfo, get_version
|
||||
from ..isolation_provider.qubes import is_qubes_native_conversion
|
||||
from ..util import format_exception, get_resource_path, get_version
|
||||
from .logic import Alert, CollapsibleBox, DangerzoneGui, UpdateDialog
|
||||
from .updater import UpdateReport
|
||||
|
||||
|
@ -59,7 +61,7 @@ def load_svg_image(filename: str, width: int, height: int) -> QtGui.QPixmap:
|
|||
This answer is basically taken from: https://stackoverflow.com/a/25689790
|
||||
"""
|
||||
path = get_resource_path(filename)
|
||||
svg_renderer = QtSvg.QSvgRenderer(path)
|
||||
svg_renderer = QtSvg.QSvgRenderer(str(path))
|
||||
image = QtGui.QImage(width, height, QtGui.QImage.Format_ARGB32)
|
||||
# Set the ARGB to 0 to prevent rendering artifacts
|
||||
image.fill(0x00000000)
|
||||
|
@ -115,6 +117,7 @@ class MainWindow(QtWidgets.QMainWindow):
|
|||
|
||||
self.setWindowTitle("Dangerzone")
|
||||
self.setWindowIcon(self.dangerzone.get_window_icon())
|
||||
self.alert: Optional[Alert] = None
|
||||
|
||||
self.setMinimumWidth(600)
|
||||
if platform.system() == "Darwin":
|
||||
|
@ -126,9 +129,8 @@ class MainWindow(QtWidgets.QMainWindow):
|
|||
|
||||
# Header
|
||||
logo = QtWidgets.QLabel()
|
||||
logo.setPixmap(
|
||||
QtGui.QPixmap.fromImage(QtGui.QImage(get_resource_path("icon.png")))
|
||||
)
|
||||
icon_path = str(get_resource_path("icon.png"))
|
||||
logo.setPixmap(QtGui.QPixmap.fromImage(QtGui.QImage(icon_path)))
|
||||
header_label = QtWidgets.QLabel("Dangerzone")
|
||||
header_label.setFont(self.dangerzone.fixed_font)
|
||||
header_label.setStyleSheet("QLabel { font-weight: bold; font-size: 50px; }")
|
||||
|
@ -182,21 +184,18 @@ class MainWindow(QtWidgets.QMainWindow):
|
|||
header_layout.addWidget(self.hamburger_button)
|
||||
header_layout.addSpacing(15)
|
||||
|
||||
if isinstance(self.dangerzone.isolation_provider, Container):
|
||||
# Content widget, contains all the window content except waiting widget
|
||||
self.content_widget = ContentWidget(self.dangerzone)
|
||||
|
||||
if self.dangerzone.isolation_provider.should_wait_install():
|
||||
# Waiting widget replaces content widget while container runtime isn't available
|
||||
self.waiting_widget: WaitingWidget = WaitingWidgetContainer(self.dangerzone)
|
||||
self.waiting_widget.finished.connect(self.waiting_finished)
|
||||
|
||||
elif isinstance(self.dangerzone.isolation_provider, Dummy) or isinstance(
|
||||
self.dangerzone.isolation_provider, Qubes
|
||||
):
|
||||
else:
|
||||
# Don't wait with dummy converter and on Qubes.
|
||||
self.waiting_widget = WaitingWidget()
|
||||
self.dangerzone.is_waiting_finished = True
|
||||
|
||||
# Content widget, contains all the window content except waiting widget
|
||||
self.content_widget = ContentWidget(self.dangerzone)
|
||||
|
||||
# Only use the waiting widget if container runtime isn't available
|
||||
if self.dangerzone.is_waiting_finished:
|
||||
self.waiting_widget.hide()
|
||||
|
@ -220,6 +219,18 @@ class MainWindow(QtWidgets.QMainWindow):
|
|||
# This allows us to make QSS rules conditional on the OS color mode.
|
||||
self.setProperty("OSColorMode", self.dangerzone.app.os_color_mode.value)
|
||||
|
||||
if hasattr(self.dangerzone.isolation_provider, "check_docker_desktop_version"):
|
||||
try:
|
||||
is_version_valid, version = (
|
||||
self.dangerzone.isolation_provider.check_docker_desktop_version()
|
||||
)
|
||||
if not is_version_valid:
|
||||
self.handle_docker_desktop_version_check(is_version_valid, version)
|
||||
except errors.UnsupportedContainerRuntime as e:
|
||||
pass # It's caught later in the flow.
|
||||
except errors.NoContainerTechException as e:
|
||||
pass # It's caught later in the flow.
|
||||
|
||||
self.show()
|
||||
|
||||
def show_update_success(self) -> None:
|
||||
|
@ -273,6 +284,46 @@ class MainWindow(QtWidgets.QMainWindow):
|
|||
self.dangerzone.settings.set("updater_check", check)
|
||||
self.dangerzone.settings.save()
|
||||
|
||||
def handle_docker_desktop_version_check(
|
||||
self, is_version_valid: bool, version: str
|
||||
) -> None:
|
||||
hamburger_menu = self.hamburger_button.menu()
|
||||
sep = hamburger_menu.insertSeparator(hamburger_menu.actions()[0])
|
||||
upgrade_action = QAction("Docker Desktop should be upgraded", hamburger_menu)
|
||||
upgrade_action.setIcon(
|
||||
QtGui.QIcon(
|
||||
load_svg_image(
|
||||
"hamburger_menu_update_dot_error.svg", width=64, height=64
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
message = """
|
||||
<p>A new version of Docker Desktop is available. Please upgrade your system.</p>
|
||||
<p>Visit the <a href="https://www.docker.com/products/docker-desktop">Docker Desktop website</a> to download the latest version.</p>
|
||||
<em>Keeping Docker Desktop up to date allows you to have more confidence that your documents are processed safely.</em>
|
||||
"""
|
||||
self.alert = Alert(
|
||||
self.dangerzone,
|
||||
title="Upgrade Docker Desktop",
|
||||
message=message,
|
||||
ok_text="Ok",
|
||||
has_cancel=False,
|
||||
)
|
||||
|
||||
def _launch_alert() -> None:
|
||||
if self.alert:
|
||||
self.alert.launch()
|
||||
|
||||
upgrade_action.triggered.connect(_launch_alert)
|
||||
hamburger_menu.insertAction(sep, upgrade_action)
|
||||
|
||||
self.hamburger_button.setIcon(
|
||||
QtGui.QIcon(
|
||||
load_svg_image("hamburger_menu_update_error.svg", width=64, height=64)
|
||||
)
|
||||
)
|
||||
|
||||
def handle_updates(self, report: UpdateReport) -> None:
|
||||
"""Handle update reports from the update checker thread.
|
||||
|
||||
|
@ -359,7 +410,7 @@ class MainWindow(QtWidgets.QMainWindow):
|
|||
self.content_widget.show()
|
||||
|
||||
def closeEvent(self, e: QtGui.QCloseEvent) -> None:
|
||||
alert_widget = Alert(
|
||||
self.alert = Alert(
|
||||
self.dangerzone,
|
||||
message="Some documents are still being converted.\n Are you sure you want to quit?",
|
||||
ok_text="Abort conversions",
|
||||
|
@ -373,7 +424,7 @@ class MainWindow(QtWidgets.QMainWindow):
|
|||
else:
|
||||
self.dangerzone.app.exit(0)
|
||||
else:
|
||||
accept_exit = alert_widget.launch()
|
||||
accept_exit = self.alert.launch()
|
||||
if not accept_exit:
|
||||
e.ignore()
|
||||
return
|
||||
|
@ -384,15 +435,24 @@ class MainWindow(QtWidgets.QMainWindow):
|
|||
|
||||
|
||||
class InstallContainerThread(QtCore.QThread):
|
||||
finished = QtCore.Signal()
|
||||
finished = QtCore.Signal(str)
|
||||
|
||||
def __init__(self, dangerzone: DangerzoneGui) -> None:
|
||||
super(InstallContainerThread, self).__init__()
|
||||
self.dangerzone = dangerzone
|
||||
|
||||
def run(self) -> None:
|
||||
self.dangerzone.isolation_provider.install()
|
||||
self.finished.emit()
|
||||
error = None
|
||||
try:
|
||||
installed = self.dangerzone.isolation_provider.install()
|
||||
except Exception as e:
|
||||
log.error("Container installation problem")
|
||||
error = format_exception(e)
|
||||
else:
|
||||
if not installed:
|
||||
error = "The image cannot be found. This can be caused by a faulty container image."
|
||||
finally:
|
||||
self.finished.emit(error)
|
||||
|
||||
|
||||
class WaitingWidget(QtWidgets.QWidget):
|
||||
|
@ -402,6 +462,29 @@ class WaitingWidget(QtWidgets.QWidget):
|
|||
super(WaitingWidget, self).__init__()
|
||||
|
||||
|
||||
class TracebackWidget(QTextEdit):
|
||||
"""Reusable component to present tracebacks to the user.
|
||||
|
||||
By default, the widget is initialized but does not appear.
|
||||
You need to call `.set_content("traceback")` on it so the
|
||||
traceback is displayed.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super(TracebackWidget, self).__init__()
|
||||
# Error
|
||||
self.setReadOnly(True)
|
||||
self.setVisible(False)
|
||||
self.setProperty("style", "traceback")
|
||||
# Enable copying
|
||||
self.setTextInteractionFlags(Qt.TextSelectableByMouse)
|
||||
|
||||
def set_content(self, error: Optional[str] = None) -> None:
|
||||
if error:
|
||||
self.setPlainText(error)
|
||||
self.setVisible(True)
|
||||
|
||||
|
||||
class WaitingWidgetContainer(WaitingWidget):
|
||||
# These are the possible states that the WaitingWidget can show.
|
||||
#
|
||||
|
@ -412,7 +495,6 @@ class WaitingWidgetContainer(WaitingWidget):
|
|||
#
|
||||
# Linux states
|
||||
# - "install_container"
|
||||
finished = QtCore.Signal()
|
||||
|
||||
def __init__(self, dangerzone: DangerzoneGui) -> None:
|
||||
super(WaitingWidgetContainer, self).__init__()
|
||||
|
@ -434,10 +516,13 @@ class WaitingWidgetContainer(WaitingWidget):
|
|||
self.buttons = QtWidgets.QWidget()
|
||||
self.buttons.setLayout(buttons_layout)
|
||||
|
||||
self.traceback = TracebackWidget()
|
||||
|
||||
# Layout
|
||||
layout = QtWidgets.QVBoxLayout()
|
||||
layout.addStretch()
|
||||
layout.addWidget(self.label)
|
||||
layout.addWidget(self.traceback)
|
||||
layout.addStretch()
|
||||
layout.addWidget(self.buttons)
|
||||
layout.addStretch()
|
||||
|
@ -448,53 +533,98 @@ class WaitingWidgetContainer(WaitingWidget):
|
|||
|
||||
def check_state(self) -> None:
|
||||
state: Optional[str] = None
|
||||
error: Optional[str] = None
|
||||
|
||||
try:
|
||||
if isinstance( # Sanity check
|
||||
self.dangerzone.isolation_provider, Container
|
||||
):
|
||||
container_runtime = self.dangerzone.isolation_provider.get_runtime()
|
||||
except NoContainerTechException as e:
|
||||
self.dangerzone.isolation_provider.is_available()
|
||||
except errors.NoContainerTechException as e:
|
||||
log.error(str(e))
|
||||
state = "not_installed"
|
||||
|
||||
except errors.NotAvailableContainerTechException as e:
|
||||
log.error(str(e))
|
||||
state = "not_running"
|
||||
error = e.error
|
||||
except Exception as e:
|
||||
log.error(str(e))
|
||||
state = "not_running"
|
||||
error = format_exception(e)
|
||||
else:
|
||||
# Can we run `docker image ls` without an error
|
||||
with subprocess.Popen(
|
||||
[container_runtime, "image", "ls"],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
startupinfo=get_subprocess_startupinfo(),
|
||||
) as p:
|
||||
p.communicate()
|
||||
if p.returncode != 0:
|
||||
log.error("Docker is not running")
|
||||
state = "not_running"
|
||||
else:
|
||||
# Always try installing the container
|
||||
state = "install_container"
|
||||
state = "install_container"
|
||||
|
||||
# Update the state
|
||||
self.state_change(state)
|
||||
self.state_change(state, error)
|
||||
|
||||
def state_change(self, state: str) -> None:
|
||||
if state == "not_installed":
|
||||
self.label.setText(
|
||||
"<strong>Dangerzone Requires Docker Desktop</strong><br><br><a href='https://www.docker.com/products/docker-desktop'>Download Docker Desktop</a>, install it, and open it."
|
||||
def show_error(self, msg: str, details: Optional[str] = None) -> None:
|
||||
self.label.setText(msg)
|
||||
show_traceback = details is not None
|
||||
if show_traceback:
|
||||
self.traceback.set_content(details)
|
||||
self.traceback.setVisible(show_traceback)
|
||||
self.buttons.show()
|
||||
|
||||
def show_message(self, msg: str) -> None:
|
||||
self.label.setText(msg)
|
||||
self.traceback.setVisible(False)
|
||||
self.buttons.hide()
|
||||
|
||||
def installation_finished(self, error: Optional[str] = None) -> None:
|
||||
if error:
|
||||
msg = (
|
||||
"During installation of the dangerzone image, <br>"
|
||||
"the following error occured:"
|
||||
)
|
||||
self.buttons.show()
|
||||
elif state == "not_running":
|
||||
self.label.setText(
|
||||
"<strong>Dangerzone Requires Docker Desktop</strong><br><br>Docker is installed but isn't running.<br><br>Open Docker and make sure it's running in the background."
|
||||
)
|
||||
self.buttons.show()
|
||||
self.show_error(msg, error)
|
||||
else:
|
||||
self.label.setText(
|
||||
"Installing the Dangerzone container image.<br><br>This might take a few minutes..."
|
||||
self.finished.emit()
|
||||
|
||||
def state_change(self, state: str, error: Optional[str] = None) -> None:
|
||||
custom_runtime = self.dangerzone.settings.custom_runtime_specified()
|
||||
|
||||
if state == "not_installed":
|
||||
if custom_runtime:
|
||||
self.show_error(
|
||||
"<strong>We could not find the container runtime defined in your settings</strong><br><br>"
|
||||
"Please check your settings, install it if needed, and retry."
|
||||
)
|
||||
elif platform.system() == "Linux":
|
||||
self.show_error(
|
||||
"<strong>Dangerzone requires Podman</strong><br><br>"
|
||||
"Install it and retry."
|
||||
)
|
||||
else:
|
||||
self.show_error(
|
||||
"<strong>Dangerzone requires Docker Desktop</strong><br><br>"
|
||||
"<a href='https://www.docker.com/products/docker-desktop'>Download Docker Desktop</a>"
|
||||
", install it, and open it."
|
||||
)
|
||||
|
||||
elif state == "not_running":
|
||||
if custom_runtime:
|
||||
self.show_error(
|
||||
"<strong>We were unable to start the container runtime defined in your settings</strong><br><br>"
|
||||
"Please check your settings, install it if needed, and retry."
|
||||
)
|
||||
elif platform.system() == "Linux":
|
||||
# "not_running" here means that the `podman image ls` command failed.
|
||||
self.show_error(
|
||||
"<strong>Dangerzone requires Podman</strong><br><br>"
|
||||
"Podman is installed but cannot run properly. See errors below",
|
||||
error,
|
||||
)
|
||||
else:
|
||||
self.show_error(
|
||||
"<strong>Dangerzone requires Docker Desktop</strong><br><br>"
|
||||
"Docker is installed but isn't running.<br><br>"
|
||||
"Open Docker and make sure it's running in the background.",
|
||||
error,
|
||||
)
|
||||
else:
|
||||
self.show_message(
|
||||
"Installing the Dangerzone container image.<br><br>"
|
||||
"This might take a few minutes..."
|
||||
)
|
||||
self.buttons.hide()
|
||||
self.install_container_t = InstallContainerThread(self.dangerzone)
|
||||
self.install_container_t.finished.connect(self.finished)
|
||||
self.install_container_t.finished.connect(self.installation_finished)
|
||||
self.install_container_t.start()
|
||||
|
||||
|
||||
|
@ -538,7 +668,7 @@ class ContentWidget(QtWidgets.QWidget):
|
|||
|
||||
def documents_selected(self, docs: List[Document]) -> None:
|
||||
if self.conversion_started:
|
||||
Alert(
|
||||
self.alert = Alert(
|
||||
self.dangerzone,
|
||||
message="Dangerzone does not support adding documents after the conversion has started.",
|
||||
has_cancel=False,
|
||||
|
@ -548,7 +678,7 @@ class ContentWidget(QtWidgets.QWidget):
|
|||
# Ensure all files in batch are in the same directory
|
||||
dirnames = {os.path.dirname(doc.input_filename) for doc in docs}
|
||||
if len(dirnames) > 1:
|
||||
Alert(
|
||||
self.alert = Alert(
|
||||
self.dangerzone,
|
||||
message="Dangerzone does not support adding documents from multiple locations.\n\n The newly added documents were ignored.",
|
||||
has_cancel=False,
|
||||
|
@ -717,14 +847,14 @@ class DocSelectionDropFrame(QtWidgets.QFrame):
|
|||
text = f"{num_unsupported_docs} files are not supported."
|
||||
ok_text = "Continue without these files"
|
||||
|
||||
alert_widget = Alert(
|
||||
self.alert = Alert(
|
||||
self.dangerzone,
|
||||
message=f"{text}\nThe supported extensions are: "
|
||||
+ ", ".join(get_supported_extensions()),
|
||||
ok_text=ok_text,
|
||||
)
|
||||
|
||||
return alert_widget.exec_()
|
||||
return self.alert.exec_()
|
||||
|
||||
|
||||
class SettingsWidget(QtWidgets.QWidget):
|
||||
|
@ -754,23 +884,23 @@ class SettingsWidget(QtWidgets.QWidget):
|
|||
self.safe_extension = QtWidgets.QLineEdit()
|
||||
self.safe_extension.setStyleSheet("margin-left: -6px;") # no left margin
|
||||
self.safe_extension.textChanged.connect(self.update_ui)
|
||||
self.safe_extension_invalid = QtWidgets.QLabel("(must end in .pdf)")
|
||||
self.safe_extension_invalid = QtWidgets.QLabel("")
|
||||
self.safe_extension_invalid.setStyleSheet("color: red")
|
||||
self.safe_extension_invalid.hide()
|
||||
self.safe_extension_name_layout = QtWidgets.QHBoxLayout()
|
||||
self.safe_extension_name_layout.setSpacing(0)
|
||||
self.safe_extension_name_layout.addWidget(self.safe_extension_filename)
|
||||
self.safe_extension_name_layout.addWidget(self.safe_extension)
|
||||
|
||||
# FIXME: Workaround for https://github.com/freedomofpress/dangerzone/issues/339.
|
||||
# We should drop this once we drop Ubuntu Focal support.
|
||||
if hasattr(QtGui, "QRegularExpressionValidator"):
|
||||
dot_pdf_regex = QtCore.QRegularExpression(r".*\.[Pp][Dd][Ff]")
|
||||
validator = QtGui.QRegularExpressionValidator(dot_pdf_regex)
|
||||
self.dot_pdf_validator = QtGui.QRegularExpressionValidator(
|
||||
QtCore.QRegularExpression(r".*\.[Pp][Dd][Ff]")
|
||||
)
|
||||
if platform.system() == "Linux":
|
||||
illegal_chars_regex = r"[/]"
|
||||
elif platform.system() == "Darwin":
|
||||
illegal_chars_regex = r"[\\]"
|
||||
else:
|
||||
dot_pdf_regex = QtCore.QRegExp(r".*\.[Pp][Dd][Ff]") # type: ignore [assignment]
|
||||
validator = QtGui.QRegExpValidator(dot_pdf_regex) # type: ignore [call-overload]
|
||||
self.safe_extension.setValidator(validator)
|
||||
illegal_chars_regex = r"[\"*/:<>?\\|]"
|
||||
self.illegal_chars_regex = QtCore.QRegularExpression(illegal_chars_regex)
|
||||
self.safe_extension_layout = QtWidgets.QHBoxLayout()
|
||||
self.safe_extension_layout.addWidget(self.save_checkbox)
|
||||
self.safe_extension_layout.addWidget(self.safe_extension_label)
|
||||
|
@ -913,14 +1043,32 @@ class SettingsWidget(QtWidgets.QWidget):
|
|||
# ignore validity if not saving file
|
||||
self.safe_extension_invalid.hide()
|
||||
return True
|
||||
return (
|
||||
self.check_safe_extension_illegal_chars()
|
||||
and self.check_safe_extension_dot_pdf()
|
||||
)
|
||||
|
||||
if self.safe_extension.hasAcceptableInput():
|
||||
self.safe_extension_invalid.hide()
|
||||
return True
|
||||
else:
|
||||
# prevent starting conversion until correct
|
||||
self.safe_extension_invalid.show()
|
||||
def check_safe_extension_illegal_chars(self) -> bool:
|
||||
match = self.illegal_chars_regex.match(self.safe_extension.text())
|
||||
if match.hasMatch():
|
||||
self.set_safe_extension_invalid_label(
|
||||
f"illegal character: {match.captured()}"
|
||||
)
|
||||
return False
|
||||
self.safe_extension_invalid.hide()
|
||||
return True
|
||||
|
||||
def check_safe_extension_dot_pdf(self) -> bool:
|
||||
self.safe_extension.setValidator(self.dot_pdf_validator)
|
||||
if not self.safe_extension.hasAcceptableInput():
|
||||
self.set_safe_extension_invalid_label("must end in .pdf")
|
||||
return False
|
||||
self.safe_extension_invalid.hide()
|
||||
return True
|
||||
|
||||
def set_safe_extension_invalid_label(self, string: str) -> None:
|
||||
self.safe_extension_invalid.setText(string)
|
||||
self.safe_extension_invalid.show()
|
||||
|
||||
def check_either_save_or_open(self) -> bool:
|
||||
return (
|
||||
|
@ -1171,7 +1319,7 @@ class DocumentWidget(QtWidgets.QWidget):
|
|||
|
||||
def load_status_image(self, filename: str) -> QtGui.QPixmap:
|
||||
path = get_resource_path(filename)
|
||||
img = QtGui.QImage(path)
|
||||
img = QtGui.QImage(str(path))
|
||||
image = QtGui.QPixmap.fromImage(img)
|
||||
return image.scaled(QtCore.QSize(15, 15))
|
||||
|
||||
|
|
|
@ -1,32 +1,62 @@
|
|||
import contextlib
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
from io import BytesIO
|
||||
from typing import IO, Callable, Iterator, Optional
|
||||
|
||||
import fitz
|
||||
from colorama import Fore, Style
|
||||
|
||||
from ..conversion import errors
|
||||
from ..conversion.common import INT_BYTES
|
||||
from ..conversion.common import DEFAULT_DPI, INT_BYTES
|
||||
from ..document import Document
|
||||
from ..util import replace_control_chars
|
||||
from ..util import get_tessdata_dir, replace_control_chars
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
MAX_CONVERSION_LOG_CHARS = 150 * 50 # up to ~150 lines of 50 characters
|
||||
DOC_TO_PIXELS_LOG_START = "----- DOC TO PIXELS LOG START -----"
|
||||
DOC_TO_PIXELS_LOG_END = "----- DOC TO PIXELS LOG END -----"
|
||||
PIXELS_TO_PDF_LOG_START = "----- PIXELS TO PDF LOG START -----"
|
||||
PIXELS_TO_PDF_LOG_END = "----- PIXELS TO PDF LOG END -----"
|
||||
|
||||
TIMEOUT_EXCEPTION = 15
|
||||
TIMEOUT_GRACE = 15
|
||||
TIMEOUT_FORCE = 5
|
||||
|
||||
|
||||
def _signal_process_group(p: subprocess.Popen, signo: int) -> None:
|
||||
"""Send a signal to a process group."""
|
||||
try:
|
||||
os.killpg(os.getpgid(p.pid), signo)
|
||||
except (ProcessLookupError, PermissionError):
|
||||
# If the process no longer exists, we may encounter the above errors, either
|
||||
# when looking for the process group (ProcessLookupError), or when trying to
|
||||
# kill a process group that no longer exists (PermissionError)
|
||||
return
|
||||
except Exception:
|
||||
log.exception(
|
||||
f"Unexpected error while sending signal {signo} to the"
|
||||
f"document-to-pixels process group (PID: {p.pid})"
|
||||
)
|
||||
|
||||
|
||||
def terminate_process_group(p: subprocess.Popen) -> None:
|
||||
"""Terminate a process group."""
|
||||
if platform.system() == "Windows":
|
||||
p.terminate()
|
||||
else:
|
||||
_signal_process_group(p, signal.SIGTERM)
|
||||
|
||||
|
||||
def kill_process_group(p: subprocess.Popen) -> None:
|
||||
"""Forcefully kill a process group."""
|
||||
if platform.system() == "Windows":
|
||||
p.kill()
|
||||
else:
|
||||
_signal_process_group(p, signal.SIGKILL)
|
||||
|
||||
|
||||
def read_bytes(f: IO[bytes], size: int, exact: bool = True) -> bytes:
|
||||
"""Read bytes from a file-like object."""
|
||||
buf = f.read(size)
|
||||
|
@ -43,9 +73,9 @@ def read_int(f: IO[bytes]) -> int:
|
|||
return int.from_bytes(untrusted_int, "big", signed=False)
|
||||
|
||||
|
||||
def read_debug_text(f: IO[bytes], size: int) -> str:
|
||||
"""Read arbitrarily long text (for debug purposes), and sanitize it."""
|
||||
untrusted_text = f.read(size).decode("ascii", errors="replace")
|
||||
def sanitize_debug_text(text: bytes) -> str:
|
||||
"""Read all the buffer and return a sanitized version"""
|
||||
untrusted_text = text.decode("ascii", errors="replace")
|
||||
return replace_control_chars(untrusted_text, keep_newlines=True)
|
||||
|
||||
|
||||
|
@ -54,12 +84,16 @@ class IsolationProvider(ABC):
|
|||
Abstracts an isolation provider
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
if getattr(sys, "dangerzone_dev", False) is True:
|
||||
def __init__(self, debug: bool = False) -> None:
|
||||
self.debug = debug
|
||||
if self.should_capture_stderr():
|
||||
self.proc_stderr = subprocess.PIPE
|
||||
else:
|
||||
self.proc_stderr = subprocess.DEVNULL
|
||||
|
||||
def should_capture_stderr(self) -> bool:
|
||||
return self.debug or getattr(sys, "dangerzone_dev", False)
|
||||
|
||||
@abstractmethod
|
||||
def install(self) -> bool:
|
||||
pass
|
||||
|
@ -73,11 +107,8 @@ class IsolationProvider(ABC):
|
|||
self.progress_callback = progress_callback
|
||||
document.mark_as_converting()
|
||||
try:
|
||||
with tempfile.TemporaryDirectory() as t:
|
||||
Path(f"{t}/pixels").mkdir()
|
||||
with self.doc_to_pixels_proc(document) as conversion_proc:
|
||||
self.doc_to_pixels(document, t, conversion_proc)
|
||||
self.pixels_to_pdf(document, t, ocr_lang)
|
||||
with self.doc_to_pixels_proc(document) as conversion_proc:
|
||||
self.convert_with_proc(document, ocr_lang, conversion_proc)
|
||||
document.mark_as_safe()
|
||||
if document.archive_after_conversion:
|
||||
document.archive()
|
||||
|
@ -91,8 +122,45 @@ class IsolationProvider(ABC):
|
|||
self.print_progress(document, True, str(e), 0)
|
||||
document.mark_as_failed()
|
||||
|
||||
def doc_to_pixels(
|
||||
self, document: Document, tempdir: str, p: subprocess.Popen
|
||||
def ocr_page(self, pixmap: fitz.Pixmap, ocr_lang: str) -> bytes:
|
||||
"""Get a single page as pixels, OCR it, and return a PDF as bytes."""
|
||||
return pixmap.pdfocr_tobytes(
|
||||
compress=True,
|
||||
language=ocr_lang,
|
||||
tessdata=str(get_tessdata_dir()),
|
||||
)
|
||||
|
||||
def pixels_to_pdf_page(
|
||||
self,
|
||||
untrusted_data: bytes,
|
||||
untrusted_width: int,
|
||||
untrusted_height: int,
|
||||
ocr_lang: Optional[str],
|
||||
) -> fitz.Document:
|
||||
"""Convert a byte array of RGB pixels into a PDF page, optionally with OCR."""
|
||||
pixmap = fitz.Pixmap(
|
||||
fitz.Colorspace(fitz.CS_RGB),
|
||||
untrusted_width,
|
||||
untrusted_height,
|
||||
untrusted_data,
|
||||
False,
|
||||
)
|
||||
pixmap.set_dpi(DEFAULT_DPI, DEFAULT_DPI)
|
||||
|
||||
if ocr_lang: # OCR the document
|
||||
page_pdf_bytes = self.ocr_page(pixmap, ocr_lang)
|
||||
else: # Don't OCR
|
||||
page_doc = fitz.Document()
|
||||
page_doc.insert_file(pixmap)
|
||||
page_pdf_bytes = page_doc.tobytes(deflate_images=True)
|
||||
|
||||
return fitz.open("pdf", page_pdf_bytes)
|
||||
|
||||
def convert_with_proc(
|
||||
self,
|
||||
document: Document,
|
||||
ocr_lang: Optional[str],
|
||||
p: subprocess.Popen,
|
||||
) -> None:
|
||||
percentage = 0.0
|
||||
with open(document.input_filename, "rb") as f:
|
||||
|
@ -107,10 +175,15 @@ class IsolationProvider(ABC):
|
|||
n_pages = read_int(p.stdout)
|
||||
if n_pages == 0 or n_pages > errors.MAX_PAGES:
|
||||
raise errors.MaxPagesException()
|
||||
percentage_per_page = 49.0 / n_pages
|
||||
step = 100 / n_pages
|
||||
|
||||
safe_doc = fitz.Document()
|
||||
|
||||
for page in range(1, n_pages + 1):
|
||||
text = f"Converting page {page}/{n_pages} to pixels"
|
||||
searchable = "searchable " if ocr_lang else ""
|
||||
text = (
|
||||
f"Converting page {page}/{n_pages} from pixels to {searchable}PDF"
|
||||
)
|
||||
self.print_progress(document, False, text, percentage)
|
||||
|
||||
width = read_int(p.stdout)
|
||||
|
@ -126,39 +199,27 @@ class IsolationProvider(ABC):
|
|||
num_pixels,
|
||||
)
|
||||
|
||||
# Wrapper code
|
||||
with open(f"{tempdir}/pixels/page-{page}.width", "w") as f_width:
|
||||
f_width.write(str(width))
|
||||
with open(f"{tempdir}/pixels/page-{page}.height", "w") as f_height:
|
||||
f_height.write(str(height))
|
||||
with open(f"{tempdir}/pixels/page-{page}.rgb", "wb") as f_rgb:
|
||||
f_rgb.write(untrusted_pixels)
|
||||
page_pdf = self.pixels_to_pdf_page(
|
||||
untrusted_pixels,
|
||||
width,
|
||||
height,
|
||||
ocr_lang,
|
||||
)
|
||||
safe_doc.insert_pdf(page_pdf)
|
||||
|
||||
percentage += percentage_per_page
|
||||
percentage += step
|
||||
|
||||
# Ensure nothing else is read after all bitmaps are obtained
|
||||
p.stdout.close()
|
||||
|
||||
# Saving it with a different name first, because PyMuPDF cannot handle
|
||||
# non-Unicode chars.
|
||||
safe_doc.save(document.sanitized_output_filename)
|
||||
os.replace(document.sanitized_output_filename, document.output_filename)
|
||||
|
||||
# TODO handle leftover code input
|
||||
text = "Converted document to pixels"
|
||||
self.print_progress(document, False, text, percentage)
|
||||
|
||||
if getattr(sys, "dangerzone_dev", False):
|
||||
assert p.stderr
|
||||
debug_log = read_debug_text(p.stderr, MAX_CONVERSION_LOG_CHARS)
|
||||
p.stderr.close()
|
||||
log.info(
|
||||
"Conversion output (doc to pixels)\n"
|
||||
f"{DOC_TO_PIXELS_LOG_START}\n"
|
||||
f"{debug_log}" # no need for an extra newline here
|
||||
f"{DOC_TO_PIXELS_LOG_END}"
|
||||
)
|
||||
|
||||
@abstractmethod
|
||||
def pixels_to_pdf(
|
||||
self, document: Document, tempdir: str, ocr_lang: Optional[str]
|
||||
) -> None:
|
||||
pass
|
||||
text = "Successfully converted document"
|
||||
self.print_progress(document, False, text, 100)
|
||||
|
||||
def print_progress(
|
||||
self, document: Document, error: bool, text: str, percentage: float
|
||||
|
@ -194,6 +255,16 @@ class IsolationProvider(ABC):
|
|||
)
|
||||
return errors.exception_from_error_code(error_code)
|
||||
|
||||
@abstractmethod
|
||||
def should_wait_install(self) -> bool:
|
||||
"""Whether this isolation provider takes a lot of time to install."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def is_available(self) -> bool:
|
||||
"""Whether the backing implementation of the isolation provider is available."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_max_parallel_conversions(self) -> int:
|
||||
pass
|
||||
|
@ -240,7 +311,7 @@ class IsolationProvider(ABC):
|
|||
)
|
||||
|
||||
# Forcefully kill the running process.
|
||||
p.kill()
|
||||
kill_process_group(p)
|
||||
try:
|
||||
p.wait(timeout_force)
|
||||
except subprocess.TimeoutExpired:
|
||||
|
@ -258,7 +329,16 @@ class IsolationProvider(ABC):
|
|||
timeout_force: int = TIMEOUT_FORCE,
|
||||
) -> Iterator[subprocess.Popen]:
|
||||
"""Start a conversion process, pass it to the caller, and then clean it up."""
|
||||
# Store the proc stderr in memory
|
||||
stderr = BytesIO()
|
||||
p = self.start_doc_to_pixels_proc(document)
|
||||
stderr_thread = self.start_stderr_thread(p, stderr)
|
||||
|
||||
if platform.system() != "Windows":
|
||||
assert os.getpgid(p.pid) != os.getpgid(os.getpid()), (
|
||||
"Parent shares same PGID with child"
|
||||
)
|
||||
|
||||
try:
|
||||
yield p
|
||||
except errors.ConverterProcException as e:
|
||||
|
@ -269,74 +349,40 @@ class IsolationProvider(ABC):
|
|||
document, p, timeout_grace=timeout_grace, timeout_force=timeout_force
|
||||
)
|
||||
|
||||
if stderr_thread:
|
||||
# Wait for the thread to complete. If it's still alive, mention it in the debug log.
|
||||
stderr_thread.join(timeout=1)
|
||||
|
||||
# From global_common:
|
||||
debug_bytes = stderr.getvalue()
|
||||
debug_log = sanitize_debug_text(debug_bytes)
|
||||
|
||||
# def validate_convert_to_pixel_output(self, common, output):
|
||||
# """
|
||||
# Take the output from the convert to pixels tasks and validate it. Returns
|
||||
# a tuple like: (success (boolean), error_message (str))
|
||||
# """
|
||||
# max_image_width = 10000
|
||||
# max_image_height = 10000
|
||||
incomplete = "(incomplete) " if stderr_thread.is_alive() else ""
|
||||
|
||||
# # Did we hit an error?
|
||||
# for line in output.split("\n"):
|
||||
# if (
|
||||
# "failed:" in line
|
||||
# or "The document format is not supported" in line
|
||||
# or "Error" in line
|
||||
# ):
|
||||
# return False, output
|
||||
log.info(
|
||||
"Conversion output (doc to pixels)\n"
|
||||
f"----- DOC TO PIXELS LOG START {incomplete}-----\n"
|
||||
f"{debug_log}" # no need for an extra newline here
|
||||
"----- DOC TO PIXELS LOG END -----"
|
||||
)
|
||||
|
||||
# # How many pages was that?
|
||||
# num_pages = None
|
||||
# for line in output.split("\n"):
|
||||
# if line.startswith("Document has "):
|
||||
# num_pages = line.split(" ")[2]
|
||||
# break
|
||||
# if not num_pages or not num_pages.isdigit() or int(num_pages) <= 0:
|
||||
# return False, "Invalid number of pages returned"
|
||||
# num_pages = int(num_pages)
|
||||
def start_stderr_thread(
|
||||
self, process: subprocess.Popen, stderr: IO[bytes]
|
||||
) -> Optional[threading.Thread]:
|
||||
"""Start a thread to read stderr from the process"""
|
||||
|
||||
# # Make sure we have the files we expect
|
||||
# expected_filenames = []
|
||||
# for i in range(1, num_pages + 1):
|
||||
# expected_filenames += [
|
||||
# f"page-{i}.rgb",
|
||||
# f"page-{i}.width",
|
||||
# f"page-{i}.height",
|
||||
# ]
|
||||
# expected_filenames.sort()
|
||||
# actual_filenames = os.listdir(common.pixel_dir.name)
|
||||
# actual_filenames.sort()
|
||||
def _stream_stderr(process_stderr: IO[bytes]) -> None:
|
||||
try:
|
||||
for line in process_stderr:
|
||||
stderr.write(line)
|
||||
except (ValueError, IOError) as e:
|
||||
log.debug(f"Stderr stream closed: {e}")
|
||||
|
||||
# if expected_filenames != actual_filenames:
|
||||
# return (
|
||||
# False,
|
||||
# f"We expected these files:\n{expected_filenames}\n\nBut we got these files:\n{actual_filenames}",
|
||||
# )
|
||||
|
||||
# # Make sure the files are the correct sizes
|
||||
# for i in range(1, num_pages + 1):
|
||||
# with open(f"{common.pixel_dir.name}/page-{i}.width") as f:
|
||||
# w_str = f.read().strip()
|
||||
# with open(f"{common.pixel_dir.name}/page-{i}.height") as f:
|
||||
# h_str = f.read().strip()
|
||||
# w = int(w_str)
|
||||
# h = int(h_str)
|
||||
# if (
|
||||
# not w_str.isdigit()
|
||||
# or not h_str.isdigit()
|
||||
# or w <= 0
|
||||
# or w > max_image_width
|
||||
# or h <= 0
|
||||
# or h > max_image_height
|
||||
# ):
|
||||
# return False, f"Page {i} has invalid geometry"
|
||||
|
||||
# # Make sure the RGB file is the correct size
|
||||
# if os.path.getsize(f"{common.pixel_dir.name}/page-{i}.rgb") != w * h * 3:
|
||||
# return False, f"Page {i} has an invalid RGB file size"
|
||||
|
||||
# return True, True
|
||||
if process.stderr:
|
||||
stderr_thread = threading.Thread(
|
||||
target=_stream_stderr,
|
||||
args=(process.stderr,),
|
||||
daemon=True,
|
||||
)
|
||||
stderr_thread.start()
|
||||
return stderr_thread
|
||||
return None
|
||||
|
|
|
@ -1,22 +1,21 @@
|
|||
import gzip
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import shlex
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from typing import Any, List, Optional, Tuple
|
||||
from typing import List, Tuple
|
||||
|
||||
from ..conversion import errors
|
||||
from .. import container_utils, errors
|
||||
from ..container_utils import Runtime
|
||||
from ..document import Document
|
||||
from ..util import get_tmp_dir # NOQA : required for mocking in our tests.
|
||||
from ..util import get_resource_path, get_subprocess_startupinfo
|
||||
from .base import PIXELS_TO_PDF_LOG_END, PIXELS_TO_PDF_LOG_START, IsolationProvider
|
||||
from .base import IsolationProvider, terminate_process_group
|
||||
|
||||
TIMEOUT_KILL = 5 # Timeout in seconds until the kill command returns.
|
||||
|
||||
MINIMUM_DOCKER_DESKTOP = {
|
||||
"Darwin": "4.40.0",
|
||||
"Windows": "4.40.0",
|
||||
}
|
||||
|
||||
# Define startupinfo for subprocesses
|
||||
if platform.system() == "Windows":
|
||||
|
@ -29,73 +28,8 @@ else:
|
|||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NoContainerTechException(Exception):
|
||||
def __init__(self, container_tech: str) -> None:
|
||||
super().__init__(f"{container_tech} is not installed")
|
||||
|
||||
|
||||
class Container(IsolationProvider):
|
||||
# Name of the dangerzone container
|
||||
CONTAINER_NAME = "dangerzone.rocks/dangerzone"
|
||||
|
||||
@staticmethod
|
||||
def get_runtime_name() -> str:
|
||||
if platform.system() == "Linux":
|
||||
runtime_name = "podman"
|
||||
else:
|
||||
# Windows, Darwin, and unknown use docker for now, dangerzone-vm eventually
|
||||
runtime_name = "docker"
|
||||
return runtime_name
|
||||
|
||||
@staticmethod
|
||||
def get_runtime_version() -> Tuple[int, int]:
|
||||
"""Get the major/minor parts of the Docker/Podman version.
|
||||
|
||||
Some of the operations we perform in this module rely on some Podman features
|
||||
that are not available across all of our platforms. In order to have a proper
|
||||
fallback, we need to know the Podman version. More specifically, we're fine with
|
||||
just knowing the major and minor version, since writing/installing a full-blown
|
||||
semver parser is an overkill.
|
||||
"""
|
||||
# Get the Docker/Podman version, using a Go template.
|
||||
runtime = Container.get_runtime_name()
|
||||
if runtime == "podman":
|
||||
query = "{{.Client.Version}}"
|
||||
else:
|
||||
query = "{{.Server.Version}}"
|
||||
|
||||
cmd = [runtime, "version", "-f", query]
|
||||
try:
|
||||
version = subprocess.run(
|
||||
cmd,
|
||||
startupinfo=get_subprocess_startupinfo(),
|
||||
capture_output=True,
|
||||
check=True,
|
||||
).stdout.decode()
|
||||
except Exception as e:
|
||||
msg = f"Could not get the version of the {runtime.capitalize()} tool: {e}"
|
||||
raise RuntimeError(msg) from e
|
||||
|
||||
# Parse this version and return the major/minor parts, since we don't need the
|
||||
# rest.
|
||||
try:
|
||||
major, minor, _ = version.split(".", 3)
|
||||
return (int(major), int(minor))
|
||||
except Exception as e:
|
||||
msg = (
|
||||
f"Could not parse the version of the {runtime.capitalize()} tool"
|
||||
f" (found: '{version}') due to the following error: {e}"
|
||||
)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
@staticmethod
|
||||
def get_runtime() -> str:
|
||||
container_tech = Container.get_runtime_name()
|
||||
runtime = shutil.which(container_tech)
|
||||
if runtime is None:
|
||||
raise NoContainerTechException(container_tech)
|
||||
return runtime
|
||||
|
||||
@staticmethod
|
||||
def get_runtime_security_args() -> List[str]:
|
||||
"""Security options applicable to the outer Dangerzone container.
|
||||
|
@ -109,38 +43,46 @@ class Container(IsolationProvider):
|
|||
* Set the `container_engine_t` SELinux label, which allows gVisor to work on
|
||||
SELinux-enforcing systems
|
||||
(see https://github.com/freedomofpress/dangerzone/issues/880).
|
||||
* Set a custom seccomp policy for every container engine, since the `ptrace(2)`
|
||||
system call is forbidden by some.
|
||||
|
||||
For Podman specifically, where applicable, we also add the following:
|
||||
* Do not log the container's output.
|
||||
* Use a newer seccomp policy (for Podman 3.x versions only).
|
||||
* Do not map the host user to the container, with `--userns nomap` (available
|
||||
from Podman 4.1 onwards)
|
||||
- This particular argument is specified in `start_doc_to_pixels_proc()`, but
|
||||
should move here once #748 is merged.
|
||||
"""
|
||||
# This file has been copied as is [1] from the official Podman repo. See:
|
||||
#
|
||||
# [1] https://github.com/containers/common/blob/d3283f8401eeeb21f3c59a425b5461f069e199a7/pkg/seccomp/seccomp.json
|
||||
seccomp_json_path = get_resource_path("seccomp.gvisor.json")
|
||||
custom_seccomp_policy_arg = ["--security-opt", f"seccomp={seccomp_json_path}"]
|
||||
if Container.get_runtime_name() == "podman":
|
||||
runtime = Runtime()
|
||||
if runtime.name == "podman":
|
||||
security_args = ["--log-driver", "none"]
|
||||
security_args += ["--security-opt", "no-new-privileges"]
|
||||
|
||||
# NOTE: Ubuntu Focal/Jammy have Podman version 3, and their seccomp policy
|
||||
# does not include the `ptrace()` syscall. This system call is required for
|
||||
# running gVisor, so we enforce a newer seccomp policy file in that case.
|
||||
#
|
||||
# See also https://github.com/freedomofpress/dangerzone/issues/846
|
||||
if Container.get_runtime_version() < (4, 0):
|
||||
security_args += custom_seccomp_policy_arg
|
||||
if container_utils.get_runtime_version() >= (4, 1):
|
||||
# We perform a platform check to avoid the following Podman Desktop
|
||||
# error on Windows:
|
||||
#
|
||||
# Error: nomap is only supported in rootless mode
|
||||
#
|
||||
# See also: https://github.com/freedomofpress/dangerzone/issues/1127
|
||||
if platform.system() != "Windows":
|
||||
security_args += ["--userns", "nomap"]
|
||||
else:
|
||||
security_args = ["--security-opt=no-new-privileges:true"]
|
||||
# Older Docker Desktop versions may have a seccomp policy that does not
|
||||
# allow `ptrace(2)`. In these cases, we specify our own. See:
|
||||
# https://github.com/freedomofpress/dangerzone/issues/846
|
||||
if Container.get_runtime_version() < (25, 0):
|
||||
security_args += custom_seccomp_policy_arg
|
||||
|
||||
# We specify a custom seccomp policy uniformly, because on certain container
|
||||
# engines the default policy might not allow the `ptrace(2)` syscall [1]. Our
|
||||
# custom seccomp policy has been copied as is [2] from the official Podman repo.
|
||||
#
|
||||
# [1] https://github.com/freedomofpress/dangerzone/issues/846
|
||||
# [2] https://github.com/containers/common/blob/d3283f8401eeeb21f3c59a425b5461f069e199a7/pkg/seccomp/seccomp.json
|
||||
seccomp_json_path = str(get_resource_path("seccomp.gvisor.json"))
|
||||
# We perform a platform check to avoid the following Podman Desktop
|
||||
# error on Windows:
|
||||
#
|
||||
# Error: opening seccomp profile failed: open
|
||||
# C:\[...]\dangerzone\share\seccomp.gvisor.json: no such file or directory
|
||||
#
|
||||
# See also: https://github.com/freedomofpress/dangerzone/issues/1127
|
||||
if runtime.name == "podman" and platform.system() != "Windows":
|
||||
security_args += ["--security-opt", f"seccomp={seccomp_json_path}"]
|
||||
|
||||
security_args += ["--cap-drop", "all"]
|
||||
security_args += ["--cap-add", "SYS_CHROOT"]
|
||||
|
@ -153,81 +95,92 @@ class Container(IsolationProvider):
|
|||
|
||||
@staticmethod
|
||||
def install() -> bool:
|
||||
"""Install the container image tarball, or verify that it's already installed.
|
||||
|
||||
Perform the following actions:
|
||||
1. Get the tags of any locally available images that match Dangerzone's image
|
||||
name.
|
||||
2. Get the expected image tag from the image-id.txt file.
|
||||
- If this tag is present in the local images, then we can return.
|
||||
- Else, prune the older container images and continue.
|
||||
3. Load the image tarball and make sure it matches the expected tag.
|
||||
"""
|
||||
Make sure the podman container is installed. Linux only.
|
||||
"""
|
||||
if Container.is_container_installed():
|
||||
old_tags = container_utils.list_image_tags()
|
||||
expected_tag = container_utils.get_expected_tag()
|
||||
|
||||
if expected_tag not in old_tags:
|
||||
# Prune older container images.
|
||||
log.info(
|
||||
f"Could not find a Dangerzone container image with tag '{expected_tag}'"
|
||||
)
|
||||
for tag in old_tags:
|
||||
tag = container_utils.CONTAINER_NAME + ":" + tag
|
||||
container_utils.delete_image_tag(tag)
|
||||
else:
|
||||
return True
|
||||
|
||||
# Load the container into podman
|
||||
log.info("Installing Dangerzone container image...")
|
||||
# Load the image tarball into the container runtime.
|
||||
container_utils.load_image_tarball()
|
||||
|
||||
p = subprocess.Popen(
|
||||
[Container.get_runtime(), "load"],
|
||||
stdin=subprocess.PIPE,
|
||||
startupinfo=get_subprocess_startupinfo(),
|
||||
)
|
||||
# Check that the container image has the expected image tag.
|
||||
# See https://github.com/freedomofpress/dangerzone/issues/988 for an example
|
||||
# where this was not the case.
|
||||
new_tags = container_utils.list_image_tags()
|
||||
if expected_tag not in new_tags:
|
||||
raise errors.ImageNotPresentException(
|
||||
f"Could not find expected tag '{expected_tag}' after loading the"
|
||||
" container image tarball"
|
||||
)
|
||||
|
||||
chunk_size = 10240
|
||||
compressed_container_path = get_resource_path("container.tar.gz")
|
||||
with gzip.open(compressed_container_path) as f:
|
||||
while True:
|
||||
chunk = f.read(chunk_size)
|
||||
if len(chunk) > 0:
|
||||
if p.stdin:
|
||||
p.stdin.write(chunk)
|
||||
else:
|
||||
break
|
||||
p.communicate()
|
||||
|
||||
if not Container.is_container_installed():
|
||||
log.error("Failed to install the container image")
|
||||
return False
|
||||
|
||||
log.info("Container image installed")
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def is_container_installed() -> bool:
|
||||
"""
|
||||
See if the podman container is installed. Linux only.
|
||||
"""
|
||||
# Get the image id
|
||||
with open(get_resource_path("image-id.txt")) as f:
|
||||
expected_image_ids = f.read().strip().split()
|
||||
def should_wait_install() -> bool:
|
||||
return True
|
||||
|
||||
# See if this image is already installed
|
||||
installed = False
|
||||
found_image_id = subprocess.check_output(
|
||||
[
|
||||
Container.get_runtime(),
|
||||
"image",
|
||||
"list",
|
||||
"--format",
|
||||
"{{.ID}}",
|
||||
Container.CONTAINER_NAME,
|
||||
],
|
||||
text=True,
|
||||
@staticmethod
|
||||
def is_available() -> bool:
|
||||
runtime = Runtime()
|
||||
|
||||
# Can we run `docker/podman image ls` without an error
|
||||
with subprocess.Popen(
|
||||
[str(runtime.path), "image", "ls"],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.PIPE,
|
||||
startupinfo=get_subprocess_startupinfo(),
|
||||
)
|
||||
found_image_id = found_image_id.strip()
|
||||
|
||||
if found_image_id in expected_image_ids:
|
||||
installed = True
|
||||
elif found_image_id == "":
|
||||
pass
|
||||
else:
|
||||
log.info("Deleting old dangerzone container image")
|
||||
|
||||
try:
|
||||
subprocess.check_output(
|
||||
[Container.get_runtime(), "rmi", "--force", found_image_id],
|
||||
startupinfo=get_subprocess_startupinfo(),
|
||||
) as p:
|
||||
_, stderr = p.communicate()
|
||||
if p.returncode != 0:
|
||||
raise errors.NotAvailableContainerTechException(
|
||||
runtime.name, stderr.decode()
|
||||
)
|
||||
except Exception:
|
||||
log.warning("Couldn't delete old container image, so leaving it there")
|
||||
return True
|
||||
|
||||
return installed
|
||||
def check_docker_desktop_version(self) -> Tuple[bool, str]:
|
||||
# On windows and darwin, check that the minimum version is met
|
||||
version = ""
|
||||
runtime = Runtime()
|
||||
runtime_is_docker = runtime.name == "docker"
|
||||
platform_is_not_linux = platform.system() != "Linux"
|
||||
|
||||
if runtime_is_docker and platform_is_not_linux:
|
||||
with subprocess.Popen(
|
||||
["docker", "version", "--format", "{{.Server.Platform.Name}}"],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
startupinfo=get_subprocess_startupinfo(),
|
||||
) as p:
|
||||
stdout, stderr = p.communicate()
|
||||
if p.returncode != 0:
|
||||
# When an error occurs, consider that the check went
|
||||
# through, as we're checking for installation compatibiliy
|
||||
# somewhere else already
|
||||
return True, version
|
||||
# The output is like "Docker Desktop 4.35.1 (173168)"
|
||||
version = stdout.decode().replace("Docker Desktop", "").split()[0]
|
||||
if version < MINIMUM_DOCKER_DESKTOP[platform.system()]:
|
||||
return False, version
|
||||
return True, version
|
||||
|
||||
def doc_to_pixels_container_name(self, document: Document) -> str:
|
||||
"""Unique container name for the doc-to-pixels phase."""
|
||||
|
@ -237,31 +190,6 @@ class Container(IsolationProvider):
|
|||
"""Unique container name for the pixels-to-pdf phase."""
|
||||
return f"dangerzone-pixels-to-pdf-{document.id}"
|
||||
|
||||
def assert_field_type(self, val: Any, _type: object) -> None:
|
||||
# XXX: Use a stricter check than isinstance because `bool` is a subclass of
|
||||
# `int`.
|
||||
#
|
||||
# See https://stackoverflow.com/a/37888668
|
||||
if not type(val) == _type:
|
||||
raise ValueError("Status field has incorrect type")
|
||||
|
||||
def parse_progress_trusted(self, document: Document, line: str) -> None:
|
||||
"""
|
||||
Parses a line returned by the container.
|
||||
"""
|
||||
try:
|
||||
status = json.loads(line)
|
||||
text = status["text"]
|
||||
self.assert_field_type(text, str)
|
||||
error = status["error"]
|
||||
self.assert_field_type(error, bool)
|
||||
percentage = status["percentage"]
|
||||
self.assert_field_type(percentage, float)
|
||||
self.print_progress(document, error, text, percentage)
|
||||
except Exception:
|
||||
error_message = f"Invalid JSON returned from container:\n\n\t {line}"
|
||||
self.print_progress(document, True, error_message, -1)
|
||||
|
||||
def exec(
|
||||
self,
|
||||
args: List[str],
|
||||
|
@ -275,31 +203,39 @@ class Container(IsolationProvider):
|
|||
stdout=subprocess.PIPE,
|
||||
stderr=self.proc_stderr,
|
||||
startupinfo=startupinfo,
|
||||
# Start the conversion process in a new session, so that we can later on
|
||||
# kill the process group, without killing the controlling script.
|
||||
start_new_session=True,
|
||||
)
|
||||
|
||||
def exec_container(
|
||||
self,
|
||||
command: List[str],
|
||||
name: str,
|
||||
extra_args: List[str] = [],
|
||||
) -> subprocess.Popen:
|
||||
container_runtime = self.get_runtime()
|
||||
runtime = Runtime()
|
||||
security_args = self.get_runtime_security_args()
|
||||
debug_args = []
|
||||
if self.debug:
|
||||
debug_args += ["-e", "RUNSC_DEBUG=1"]
|
||||
|
||||
enable_stdin = ["-i"]
|
||||
set_name = ["--name", name]
|
||||
prevent_leakage_args = ["--rm"]
|
||||
image_name = [
|
||||
container_utils.CONTAINER_NAME + ":" + container_utils.get_expected_tag()
|
||||
]
|
||||
args = (
|
||||
["run"]
|
||||
+ security_args
|
||||
+ debug_args
|
||||
+ prevent_leakage_args
|
||||
+ enable_stdin
|
||||
+ set_name
|
||||
+ extra_args
|
||||
+ [self.CONTAINER_NAME]
|
||||
+ image_name
|
||||
+ command
|
||||
)
|
||||
args = [container_runtime] + args
|
||||
return self.exec(args)
|
||||
return self.exec([str(runtime.path)] + args)
|
||||
|
||||
def kill_container(self, name: str) -> None:
|
||||
"""Terminate a spawned container.
|
||||
|
@ -311,8 +247,8 @@ class Container(IsolationProvider):
|
|||
connected to the Docker daemon, and killing it will just close the associated
|
||||
standard streams.
|
||||
"""
|
||||
container_runtime = self.get_runtime()
|
||||
cmd = [container_runtime, "kill", name]
|
||||
runtime = Runtime()
|
||||
cmd = [str(runtime.path), "kill", name]
|
||||
try:
|
||||
# We do not check the exit code of the process here, since the container may
|
||||
# have stopped right before invoking this command. In that case, the
|
||||
|
@ -337,84 +273,6 @@ class Container(IsolationProvider):
|
|||
f"Unexpected error occurred while killing container '{name}': {str(e)}"
|
||||
)
|
||||
|
||||
def pixels_to_pdf(
|
||||
self, document: Document, tempdir: str, ocr_lang: Optional[str]
|
||||
) -> None:
|
||||
# Convert pixels to safe PDF
|
||||
command = [
|
||||
"/usr/bin/python3",
|
||||
"-m",
|
||||
"dangerzone.conversion.pixels_to_pdf",
|
||||
]
|
||||
extra_args = [
|
||||
"-v",
|
||||
f"{tempdir}:/safezone:Z",
|
||||
"-e",
|
||||
f"OCR={0 if ocr_lang is None else 1}",
|
||||
"-e",
|
||||
f"OCR_LANGUAGE={ocr_lang}",
|
||||
]
|
||||
# XXX: Until #748 gets merged, we have to run our pixels to PDF phase in a
|
||||
# container, which involves mounting two temp dirs. This does not bode well with
|
||||
# gVisor for two reasons:
|
||||
#
|
||||
# 1. Our gVisor integration chroot()s into /home/dangerzone/dangerzone-image/rootfs,
|
||||
# meaning that the location of the temp dirs must be relevant to that path.
|
||||
# 2. Reading and writing to these temp dirs requires permissions which are not
|
||||
# available to the user within gVisor's user namespace.
|
||||
#
|
||||
# For these reasons, and because the pixels to PDF phase is more trusted (and
|
||||
# will soon stop being containerized), we circumvent gVisor support by doing the
|
||||
# following:
|
||||
#
|
||||
# 1. Override our entrypoint script with a no-op command (/usr/bin/env).
|
||||
# 2. Set the PYTHONPATH so that we can import the Python code within
|
||||
# /home/dangerzone/dangerzone-image/rootfs
|
||||
# 3. Run the container as the root user, so that it can always write to the
|
||||
# mounted directories. This container is trusted, so running as root has no
|
||||
# impact to the security of Dangerzone.
|
||||
img_root = "/home/dangerzone/dangerzone-image/rootfs"
|
||||
extra_args += [
|
||||
"--entrypoint",
|
||||
"/usr/bin/env",
|
||||
"-e",
|
||||
f"PYTHONPATH={img_root}/opt/dangerzone:{img_root}/usr/lib/python3.12/site-packages",
|
||||
"-e",
|
||||
f"TESSDATA_PREFIX={img_root}/usr/share/tessdata",
|
||||
"-u",
|
||||
"root",
|
||||
]
|
||||
|
||||
name = self.pixels_to_pdf_container_name(document)
|
||||
pixels_to_pdf_proc = self.exec_container(command, name, extra_args)
|
||||
if pixels_to_pdf_proc.stdout:
|
||||
for line in pixels_to_pdf_proc.stdout:
|
||||
self.parse_progress_trusted(document, line.decode())
|
||||
error_code = pixels_to_pdf_proc.wait()
|
||||
|
||||
# In case of a dev run, log everything from the second container.
|
||||
if getattr(sys, "dangerzone_dev", False):
|
||||
assert pixels_to_pdf_proc.stderr
|
||||
out = pixels_to_pdf_proc.stderr.read().decode()
|
||||
text = (
|
||||
f"Conversion output: (pixels to PDF)\n"
|
||||
f"{PIXELS_TO_PDF_LOG_START}\n{out}\n{PIXELS_TO_PDF_LOG_END}"
|
||||
)
|
||||
log.info(text)
|
||||
|
||||
if error_code != 0:
|
||||
log.error("pixels-to-pdf failed")
|
||||
raise errors.exception_from_error_code(error_code)
|
||||
else:
|
||||
# Move the final file to the right place
|
||||
if os.path.exists(document.output_filename):
|
||||
os.remove(document.output_filename)
|
||||
|
||||
container_output_filename = os.path.join(
|
||||
tempdir, "safe-output-compressed.pdf"
|
||||
)
|
||||
shutil.move(container_output_filename, document.output_filename)
|
||||
|
||||
def start_doc_to_pixels_proc(self, document: Document) -> subprocess.Popen:
|
||||
# Convert document to pixels
|
||||
command = [
|
||||
|
@ -422,15 +280,8 @@ class Container(IsolationProvider):
|
|||
"-m",
|
||||
"dangerzone.conversion.doc_to_pixels",
|
||||
]
|
||||
# NOTE: Using `--userns nomap` is available only on Podman >= 4.1.0.
|
||||
# XXX: Move this under `get_runtime_security_args()` once #748 is merged.
|
||||
extra_args = []
|
||||
if Container.get_runtime_name() == "podman":
|
||||
if Container.get_runtime_version() >= (4, 1):
|
||||
extra_args += ["--userns", "nomap"]
|
||||
|
||||
name = self.doc_to_pixels_container_name(document)
|
||||
return self.exec_container(command, name=name, extra_args=extra_args)
|
||||
return self.exec_container(command, name=name)
|
||||
|
||||
def terminate_doc_to_pixels_proc(
|
||||
self, document: Document, p: subprocess.Popen
|
||||
|
@ -441,7 +292,7 @@ class Container(IsolationProvider):
|
|||
#
|
||||
# See also https://github.com/freedomofpress/dangerzone/issues/791
|
||||
self.kill_container(self.doc_to_pixels_container_name(document))
|
||||
p.terminate()
|
||||
terminate_process_group(p)
|
||||
|
||||
def ensure_stop_doc_to_pixels_proc( # type: ignore [no-untyped-def]
|
||||
self, document: Document, *args, **kwargs
|
||||
|
@ -453,10 +304,10 @@ class Container(IsolationProvider):
|
|||
# after a podman kill / docker kill invocation, this will likely be the case,
|
||||
# else the container runtime (Docker/Podman) has experienced a problem, and we
|
||||
# should report it.
|
||||
container_runtime = self.get_runtime()
|
||||
runtime = Runtime()
|
||||
name = self.doc_to_pixels_container_name(document)
|
||||
all_containers = subprocess.run(
|
||||
[container_runtime, "ps", "-a"],
|
||||
[str(runtime.path), "ps", "-a"],
|
||||
capture_output=True,
|
||||
startupinfo=get_subprocess_startupinfo(),
|
||||
)
|
||||
|
@ -467,19 +318,20 @@ class Container(IsolationProvider):
|
|||
# FIXME hardcoded 1 until length conversions are better handled
|
||||
# https://github.com/freedomofpress/dangerzone/issues/257
|
||||
return 1
|
||||
runtime = Runtime() # type: ignore [unreachable]
|
||||
|
||||
n_cpu = 1 # type: ignore [unreachable]
|
||||
n_cpu = 1
|
||||
if platform.system() == "Linux":
|
||||
# if on linux containers run natively
|
||||
cpu_count = os.cpu_count()
|
||||
if cpu_count is not None:
|
||||
n_cpu = cpu_count
|
||||
|
||||
elif self.get_runtime_name() == "docker":
|
||||
elif runtime.name == "docker":
|
||||
# For Windows and MacOS containers run in VM
|
||||
# So we obtain the CPU count for the VM
|
||||
n_cpu_str = subprocess.check_output(
|
||||
[self.get_runtime(), "info", "--format", "{{.NCPU}}"],
|
||||
[str(runtime.path), "info", "--format", "{{.NCPU}}"],
|
||||
text=True,
|
||||
startupinfo=get_subprocess_startupinfo(),
|
||||
)
|
||||
|
|
|
@ -1,18 +1,25 @@
|
|||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from typing import Callable, Optional
|
||||
|
||||
from ..conversion.common import DangerzoneConverter
|
||||
from ..document import Document
|
||||
from ..util import get_resource_path
|
||||
from .base import IsolationProvider
|
||||
from .base import IsolationProvider, terminate_process_group
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def dummy_script() -> None:
|
||||
sys.stdin.buffer.read()
|
||||
pages = 2
|
||||
width = height = 9
|
||||
DangerzoneConverter._write_int(pages)
|
||||
for page in range(pages):
|
||||
DangerzoneConverter._write_int(width)
|
||||
DangerzoneConverter._write_int(height)
|
||||
DangerzoneConverter._write_bytes(width * height * 3 * b"A")
|
||||
|
||||
|
||||
class Dummy(IsolationProvider):
|
||||
"""Dummy Isolation Provider (FOR TESTING ONLY)
|
||||
|
||||
|
@ -27,59 +34,38 @@ class Dummy(IsolationProvider):
|
|||
"Dummy isolation provider is UNSAFE and should never be "
|
||||
+ "called in a non-testing system."
|
||||
)
|
||||
super().__init__()
|
||||
|
||||
def install(self) -> bool:
|
||||
return True
|
||||
|
||||
def convert(
|
||||
self,
|
||||
document: Document,
|
||||
ocr_lang: Optional[str],
|
||||
progress_callback: Optional[Callable] = None,
|
||||
) -> None:
|
||||
self.progress_callback = None
|
||||
log.debug("Dummy converter started:")
|
||||
log.debug(
|
||||
f" - document: {os.path.basename(document.input_filename)} ({document.id})"
|
||||
)
|
||||
log.debug(f" - ocr : {ocr_lang}")
|
||||
log.debug("\n(simulating conversion)")
|
||||
success = True
|
||||
progress = [
|
||||
[False, "Converting to PDF using GraphicsMagick", 0.0],
|
||||
[False, "Separating document into pages", 3.0],
|
||||
[False, "Converting page 1/1 to pixels", 5.0],
|
||||
[False, "Converted document to pixels", 50.0],
|
||||
[False, "Converting page 1/1 from pixels to PDF", 50.0],
|
||||
[False, "Merging 1 pages into a single PDF", 95.0],
|
||||
[False, "Compressing PDF", 97.0],
|
||||
[False, "Safe PDF created", 100.0],
|
||||
]
|
||||
for error, text, percentage in progress:
|
||||
self.print_progress(document, error, text, percentage) # type: ignore [arg-type]
|
||||
if error:
|
||||
success = False
|
||||
time.sleep(0.2)
|
||||
if success:
|
||||
shutil.copy(
|
||||
get_resource_path("dummy_document.pdf"), document.output_filename
|
||||
)
|
||||
document.mark_as_safe()
|
||||
if document.archive_after_conversion:
|
||||
document.archive()
|
||||
@staticmethod
|
||||
def is_available() -> bool:
|
||||
return True
|
||||
|
||||
def pixels_to_pdf(
|
||||
self, document: Document, tempdir: str, ocr_lang: Optional[str]
|
||||
) -> None:
|
||||
pass
|
||||
@staticmethod
|
||||
def should_wait_install() -> bool:
|
||||
return False
|
||||
|
||||
def start_doc_to_pixels_proc(self, document: Document) -> subprocess.Popen:
|
||||
return subprocess.Popen("True")
|
||||
cmd = [
|
||||
sys.executable,
|
||||
"-c",
|
||||
"from dangerzone.isolation_provider.dummy import dummy_script;"
|
||||
" dummy_script()",
|
||||
]
|
||||
return subprocess.Popen(
|
||||
cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=self.proc_stderr,
|
||||
start_new_session=True,
|
||||
)
|
||||
|
||||
def terminate_doc_to_pixels_proc(
|
||||
self, document: Document, p: subprocess.Popen
|
||||
) -> None:
|
||||
pass
|
||||
terminate_process_group(p)
|
||||
|
||||
def get_max_parallel_conversions(self) -> int:
|
||||
return 1
|
||||
|
|
|
@ -1,20 +1,16 @@
|
|||
import asyncio
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
from typing import IO, Optional
|
||||
from typing import IO
|
||||
|
||||
from ..conversion import errors
|
||||
from ..conversion.common import running_on_qubes
|
||||
from ..conversion.pixels_to_pdf import PixelsToPDF
|
||||
from ..document import Document
|
||||
from ..util import get_resource_path
|
||||
from .base import PIXELS_TO_PDF_LOG_END, PIXELS_TO_PDF_LOG_START, IsolationProvider
|
||||
from .base import IsolationProvider
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -25,27 +21,13 @@ class Qubes(IsolationProvider):
|
|||
def install(self) -> bool:
|
||||
return True
|
||||
|
||||
def pixels_to_pdf(
|
||||
self, document: Document, tempdir: str, ocr_lang: Optional[str]
|
||||
) -> None:
|
||||
def print_progress_wrapper(error: bool, text: str, percentage: float) -> None:
|
||||
self.print_progress(document, error, text, percentage)
|
||||
@staticmethod
|
||||
def is_available() -> bool:
|
||||
return True
|
||||
|
||||
converter = PixelsToPDF(progress_callback=print_progress_wrapper)
|
||||
try:
|
||||
asyncio.run(converter.convert(ocr_lang, tempdir))
|
||||
except (RuntimeError, ValueError) as e:
|
||||
raise errors.UnexpectedConversionError(str(e))
|
||||
finally:
|
||||
if getattr(sys, "dangerzone_dev", False):
|
||||
out = converter.captured_output.decode()
|
||||
text = (
|
||||
f"Conversion output: (pixels to PDF)\n"
|
||||
f"{PIXELS_TO_PDF_LOG_START}\n{out}{PIXELS_TO_PDF_LOG_END}"
|
||||
)
|
||||
log.info(text)
|
||||
|
||||
shutil.move(f"{tempdir}/safe-output-compressed.pdf", document.output_filename)
|
||||
@staticmethod
|
||||
def should_wait_install() -> bool:
|
||||
return False
|
||||
|
||||
def get_max_parallel_conversions(self) -> int:
|
||||
return 1
|
||||
|
@ -67,6 +49,9 @@ class Qubes(IsolationProvider):
|
|||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=stderr,
|
||||
# Start the conversion process in a new session, so that we can later on
|
||||
# kill the process group, without killing the controlling script.
|
||||
start_new_session=True,
|
||||
)
|
||||
|
||||
if dev_mode:
|
||||
|
@ -93,14 +78,18 @@ class Qubes(IsolationProvider):
|
|||
standard streams explicitly, so that we can afterwards use `Popen.wait()` to
|
||||
learn if the qube terminated.
|
||||
|
||||
Note that we don't close the stderr stream because we want to read debug logs
|
||||
from it. In the rare case where a qube cannot terminate because it's stuck
|
||||
writing at stderr (this is not the expected behavior), we expect that the
|
||||
process will still be forcefully killed after the soft termination timeout
|
||||
expires.
|
||||
|
||||
[1]: https://github.com/freedomofpress/dangerzone/issues/563#issuecomment-2034803232
|
||||
"""
|
||||
if p.stdin:
|
||||
p.stdin.close()
|
||||
if p.stdout:
|
||||
p.stdout.close()
|
||||
if p.stderr:
|
||||
p.stderr.close()
|
||||
|
||||
def teleport_dz_module(self, wpipe: IO[bytes]) -> None:
|
||||
"""Send the dangerzone module to another qube, as a zipfile."""
|
||||
|
@ -141,7 +130,6 @@ def is_qubes_native_conversion() -> bool:
|
|||
# This disambiguates if it is running a Qubes targetted build or not
|
||||
# (Qubes-specific builds don't ship the container image)
|
||||
|
||||
compressed_container_path = get_resource_path("container.tar.gz")
|
||||
return not os.path.exists(compressed_container_path)
|
||||
return not get_resource_path("container.tar").exists()
|
||||
else:
|
||||
return False
|
||||
|
|
|
@ -23,16 +23,13 @@ class DangerzoneCore(object):
|
|||
# Initialize terminal colors
|
||||
colorama.init(autoreset=True)
|
||||
|
||||
# App data folder
|
||||
self.appdata_path = util.get_config_dir()
|
||||
|
||||
# Languages supported by tesseract
|
||||
with open(get_resource_path("ocr-languages.json"), "r") as f:
|
||||
with get_resource_path("ocr-languages.json").open("r") as f:
|
||||
unsorted_ocr_languages = json.load(f)
|
||||
self.ocr_languages = dict(sorted(unsorted_ocr_languages.items()))
|
||||
|
||||
# Load settings
|
||||
self.settings = Settings(self)
|
||||
self.settings = Settings()
|
||||
self.documents: List[Document] = []
|
||||
self.isolation_provider = isolation_provider
|
||||
|
||||
|
@ -65,11 +62,18 @@ class DangerzoneCore(object):
|
|||
self, ocr_lang: Optional[str], stdout_callback: Optional[Callable] = None
|
||||
) -> None:
|
||||
def convert_doc(document: Document) -> None:
|
||||
self.isolation_provider.convert(
|
||||
document,
|
||||
ocr_lang,
|
||||
stdout_callback,
|
||||
)
|
||||
try:
|
||||
self.isolation_provider.convert(
|
||||
document,
|
||||
ocr_lang,
|
||||
stdout_callback,
|
||||
)
|
||||
|
||||
except Exception:
|
||||
log.exception(
|
||||
f"Unexpected error occurred while converting '{document}'"
|
||||
)
|
||||
document.mark_as_failed()
|
||||
|
||||
max_jobs = self.isolation_provider.get_max_parallel_conversions()
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=max_jobs) as executor:
|
||||
|
|
|
@ -1,29 +1,24 @@
|
|||
import json
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any, Dict
|
||||
|
||||
from packaging import version
|
||||
|
||||
from .document import SAFE_EXTENSION
|
||||
from .util import get_version
|
||||
from .util import get_config_dir, get_version
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .logic import DangerzoneCore
|
||||
|
||||
SETTINGS_FILENAME: str = "settings.json"
|
||||
|
||||
|
||||
class Settings:
|
||||
settings: Dict[str, Any]
|
||||
|
||||
def __init__(self, dangerzone: "DangerzoneCore") -> None:
|
||||
self.dangerzone = dangerzone
|
||||
self.settings_filename = os.path.join(
|
||||
self.dangerzone.appdata_path, SETTINGS_FILENAME
|
||||
)
|
||||
def __init__(self) -> None:
|
||||
self.settings_filename = get_config_dir() / SETTINGS_FILENAME
|
||||
self.default_settings: Dict[str, Any] = self.generate_default_settings()
|
||||
self.load()
|
||||
|
||||
|
@ -45,6 +40,22 @@ class Settings:
|
|||
"updater_errors": 0,
|
||||
}
|
||||
|
||||
def custom_runtime_specified(self) -> bool:
|
||||
return "container_runtime" in self.settings
|
||||
|
||||
def set_custom_runtime(self, runtime: str, autosave: bool = False) -> Path:
|
||||
from .container_utils import Runtime # Avoid circular import
|
||||
|
||||
container_runtime = Runtime.path_from_name(runtime)
|
||||
self.settings["container_runtime"] = str(container_runtime)
|
||||
if autosave:
|
||||
self.save()
|
||||
return container_runtime
|
||||
|
||||
def unset_custom_runtime(self) -> None:
|
||||
self.settings.pop("container_runtime")
|
||||
self.save()
|
||||
|
||||
def get(self, key: str) -> Any:
|
||||
return self.settings[key]
|
||||
|
||||
|
@ -91,6 +102,6 @@ class Settings:
|
|||
self.save()
|
||||
|
||||
def save(self) -> None:
|
||||
os.makedirs(self.dangerzone.appdata_path, exist_ok=True)
|
||||
with open(self.settings_filename, "w") as settings_file:
|
||||
self.settings_filename.parent.mkdir(parents=True, exist_ok=True)
|
||||
with self.settings_filename.open("w") as settings_file:
|
||||
json.dump(self.settings, settings_file, indent=4)
|
||||
|
|
|
@ -1,53 +1,76 @@
|
|||
import pathlib
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
import traceback
|
||||
import unicodedata
|
||||
from typing import Optional
|
||||
from pathlib import Path
|
||||
|
||||
import appdirs
|
||||
try:
|
||||
import platformdirs
|
||||
except ImportError:
|
||||
import appdirs as platformdirs
|
||||
|
||||
|
||||
def get_config_dir() -> str:
|
||||
return appdirs.user_config_dir("dangerzone")
|
||||
def get_config_dir() -> Path:
|
||||
return Path(platformdirs.user_config_dir("dangerzone"))
|
||||
|
||||
|
||||
def get_tmp_dir() -> Optional[str]:
|
||||
"""Get the parent dir for the Dangerzone temporary dirs.
|
||||
|
||||
This function returns the parent directory where Dangerzone will store its temporary
|
||||
directories. The default behavior is to let Python choose for us (e.g., in `/tmp`
|
||||
for Linux), which is why we return None. However, we still need to define this
|
||||
function in order to be able to set this dir via mocking in our tests.
|
||||
"""
|
||||
return None
|
||||
|
||||
|
||||
def get_resource_path(filename: str) -> str:
|
||||
def get_resource_path(filename: str) -> Path:
|
||||
if getattr(sys, "dangerzone_dev", False):
|
||||
# Look for resources directory relative to python file
|
||||
project_root = pathlib.Path(__file__).parent.parent
|
||||
project_root = Path(__file__).parent.parent
|
||||
prefix = project_root / "share"
|
||||
else:
|
||||
if platform.system() == "Darwin":
|
||||
bin_path = pathlib.Path(sys.executable)
|
||||
bin_path = Path(sys.executable)
|
||||
app_path = bin_path.parent.parent
|
||||
prefix = app_path / "Resources" / "share"
|
||||
elif platform.system() == "Linux":
|
||||
prefix = pathlib.Path(sys.prefix) / "share" / "dangerzone"
|
||||
prefix = Path(sys.prefix) / "share" / "dangerzone"
|
||||
elif platform.system() == "Windows":
|
||||
exe_path = pathlib.Path(sys.executable)
|
||||
exe_path = Path(sys.executable)
|
||||
dz_install_path = exe_path.parent
|
||||
prefix = dz_install_path / "share"
|
||||
else:
|
||||
raise NotImplementedError(f"Unsupported system {platform.system()}")
|
||||
resource_path = prefix / filename
|
||||
return str(resource_path)
|
||||
return prefix / filename
|
||||
|
||||
|
||||
def get_tessdata_dir() -> Path:
|
||||
if getattr(sys, "dangerzone_dev", False) or platform.system() in (
|
||||
"Windows",
|
||||
"Darwin",
|
||||
):
|
||||
# Always use the tessdata path from the Dangerzone ./share directory, for
|
||||
# development builds, or in Windows/macOS platforms.
|
||||
return get_resource_path("tessdata")
|
||||
|
||||
# In case of Linux systems, grab the Tesseract data from any of the following
|
||||
# locations. We have found some of the locations through trial and error, whereas
|
||||
# others are taken from the docs:
|
||||
#
|
||||
# [...] Possibilities are /usr/share/tesseract-ocr/tessdata or
|
||||
# /usr/share/tessdata or /usr/share/tesseract-ocr/4.00/tessdata. [1]
|
||||
#
|
||||
# [1] https://tesseract-ocr.github.io/tessdoc/Installation.html
|
||||
tessdata_dirs = [
|
||||
Path("/usr/share/tessdata/"), # on some Debian
|
||||
Path("/usr/share/tesseract/tessdata/"), # on Fedora
|
||||
Path("/usr/share/tesseract-ocr/tessdata/"), # ? (documented)
|
||||
Path("/usr/share/tesseract-ocr/4.00/tessdata/"), # on Debian Bullseye
|
||||
Path("/usr/share/tesseract-ocr/5/tessdata/"), # on Debian Trixie
|
||||
]
|
||||
|
||||
for dir in tessdata_dirs:
|
||||
if dir.is_dir():
|
||||
return dir
|
||||
|
||||
raise RuntimeError("Tesseract language data are not installed in the system")
|
||||
|
||||
|
||||
def get_version() -> str:
|
||||
try:
|
||||
with open(get_resource_path("version.txt")) as f:
|
||||
with get_resource_path("version.txt").open() as f:
|
||||
version = f.read().strip()
|
||||
except FileNotFoundError:
|
||||
# In dev mode, in Windows, get_resource_path doesn't work properly for the container, but luckily
|
||||
|
@ -97,3 +120,13 @@ def replace_control_chars(untrusted_str: str, keep_newlines: bool = False) -> st
|
|||
else:
|
||||
sanitized_str += "<EFBFBD>"
|
||||
return sanitized_str
|
||||
|
||||
|
||||
def format_exception(e: Exception) -> str:
|
||||
# The signature of traceback.format_exception has changed in python 3.10
|
||||
if sys.version_info < (3, 10):
|
||||
output = traceback.format_exception(*sys.exc_info())
|
||||
else:
|
||||
output = traceback.format_exception(e)
|
||||
|
||||
return "".join(output)
|
||||
|
|
29
debian/changelog
vendored
Normal file
29
debian/changelog
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
dangerzone (0.9.0) unstable; urgency=low
|
||||
|
||||
* Released Dangerzone 0.9.0
|
||||
|
||||
-- Freedom of the Press Foundation <info@freedom.press> Mon, 31 Mar 2025 15:57:18 +0300
|
||||
|
||||
dangerzone (0.8.1) unstable; urgency=low
|
||||
|
||||
* Released Dangerzone 0.8.1
|
||||
|
||||
-- Freedom of the Press Foundation <info@freedom.press> Tue, 22 Dec 2024 22:03:28 +0300
|
||||
|
||||
dangerzone (0.8.0) unstable; urgency=low
|
||||
|
||||
* Released Dangerzone 0.8.0
|
||||
|
||||
-- Freedom of the Press Foundation <info@freedom.press> Tue, 30 Oct 2024 01:56:28 +0300
|
||||
|
||||
dangerzone (0.7.1) unstable; urgency=low
|
||||
|
||||
* Released Dangerzone 0.7.1
|
||||
|
||||
-- Freedom of the Press Foundation <info@freedom.press> Tue, 1 Oct 2024 17:02:28 +0300
|
||||
|
||||
dangerzone (0.7.0) unstable; urgency=low
|
||||
|
||||
* Removed stdeb in favor of direct debian packaging tools
|
||||
|
||||
-- Freedom of the Press Foundation <info@freedom.press> Tue, 27 Aug 2024 14:39:28 +0200
|
1
debian/compat
vendored
Normal file
1
debian/compat
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
10
|
15
debian/control
vendored
Normal file
15
debian/control
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
Source: dangerzone
|
||||
Maintainer: Freedom of the Press Foundation <info@freedom.press>
|
||||
Section: python
|
||||
Priority: optional
|
||||
Build-Depends: dh-python, python3-setuptools, python3, dpkg-dev, debhelper (>= 9)
|
||||
Standards-Version: 4.5.1
|
||||
Homepage: https://github.com/freedomofpress/dangerzone
|
||||
Rules-Requires-Root: no
|
||||
|
||||
Package: dangerzone
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends}, podman, python3, python3-pyside2.qtcore, python3-pyside2.qtgui, python3-pyside2.qtwidgets, python3-pyside2.qtsvg, python3-platformdirs | python3-appdirs, python3-click, python3-xdg, python3-colorama, python3-requests, python3-markdown, python3-packaging, tesseract-ocr-all
|
||||
Description: Take potentially dangerous PDFs, office documents, or images
|
||||
Dangerzone is an open source desktop application that takes potentially dangerous PDFs, office documents, or images and converts them to safe PDFs. It uses disposable VMs on Qubes OS, or container technology in other OSes, to convert the documents within a secure sandbox.
|
||||
.
|
8
debian/copyright
vendored
Normal file
8
debian/copyright
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||
Upstream-Name: dangerzone
|
||||
Source: https://github.com/freedomofpress/dangerzone
|
||||
|
||||
Files: *
|
||||
Copyright: 2020-2021 First Look Media
|
||||
2022- Freedom of the Press Foundation, and Dangerzone contributors
|
||||
License: AGPL-3.0-or-later
|
13
debian/rules
vendored
Executable file
13
debian/rules
vendored
Executable file
|
@ -0,0 +1,13 @@
|
|||
#!/usr/bin/make -f
|
||||
export PYBUILD_NAME=dangerzone
|
||||
export DEB_BUILD_OPTIONS=nocheck
|
||||
export PYBUILD_INSTALL_ARGS=--install-lib=/usr/lib/python3/dist-packages
|
||||
export PYTHONDONTWRITEBYTECODE=1
|
||||
export DH_VERBOSE=1
|
||||
|
||||
%:
|
||||
dh $@ --with python3 --buildsystem=pybuild
|
||||
|
||||
override_dh_builddeb:
|
||||
./install/linux/debian-vendor-pymupdf.py --dest debian/dangerzone/usr/lib/python3/dist-packages/dangerzone/vendor/
|
||||
dh_builddeb $@
|
1
debian/source/format
vendored
Normal file
1
debian/source/format
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
3.0 (native)
|
7
debian/source/options
vendored
Normal file
7
debian/source/options
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
compression = "gzip"
|
||||
tar-ignore = "dev_scripts"
|
||||
tar-ignore = ".*"
|
||||
tar-ignore = "__pycache__"
|
||||
# Ignore the 'share/tessdata' dir, since it slows down the process, and we
|
||||
# install Tesseract data via Debian packages anyway.
|
||||
tar-ignore = "share/tessdata"
|
3
dev_scripts/containers.conf
Normal file
3
dev_scripts/containers.conf
Normal file
|
@ -0,0 +1,3 @@
|
|||
[engine]
|
||||
cgroup_manager="cgroupfs"
|
||||
events_logger="file"
|
|
@ -1,62 +1,33 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import os
|
||||
import pathlib
|
||||
import platform
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import urllib.request
|
||||
from datetime import date
|
||||
|
||||
DEFAULT_GUI = True
|
||||
DEFAULT_USER = "user"
|
||||
DEFAULT_DRY = False
|
||||
DEFAULT_DEV = False
|
||||
DEFAULT_SHOW_DOCKERFILE = False
|
||||
DEFAULT_DOWNLOAD_PYSIDE6 = False
|
||||
|
||||
PYSIDE6_VERSION = "6.7.1"
|
||||
PYSIDE6_RPM = "python3-pyside6-{pyside6_version}-1.fc{fedora_version}.x86_64.rpm"
|
||||
PYSIDE6_URL = (
|
||||
"https://packages.freedom.press/yum-tools-prod/dangerzone/f{fedora_version}/%s"
|
||||
% PYSIDE6_RPM
|
||||
)
|
||||
|
||||
PYSIDE6_DL_MESSAGE = """\
|
||||
Downloading PySide6 RPM from:
|
||||
|
||||
{pyside6_url}
|
||||
|
||||
into the following local path:
|
||||
|
||||
{pyside6_local_path}
|
||||
|
||||
The RPM is over 100 MB, so this operation may take a while...
|
||||
"""
|
||||
|
||||
PYSIDE6_NOT_FOUND_ERROR = """\
|
||||
The following package is not present in your system:
|
||||
|
||||
{pyside6_local_path}
|
||||
|
||||
You can build it locally and copy it in the expected path, following the instructions
|
||||
in:
|
||||
|
||||
https://github.com/freedomofpress/python3-pyside6-rpm
|
||||
|
||||
Alternatively, you can rerun the command adding the '--download-pyside6' flag, which
|
||||
will download it from:
|
||||
|
||||
{pyside6_url}
|
||||
"""
|
||||
|
||||
# The Linux distributions that we currently support.
|
||||
# FIXME: Add a version mapping to avoid mistakes.
|
||||
# FIXME: Maybe create an enum for these values.
|
||||
DISTROS = ["debian", "fedora", "ubuntu"]
|
||||
CONTAINER_RUNTIMES = ["podman", "docker"]
|
||||
IMAGE_NAME_BUILD_DEV_FMT = "dangerzone.rocks/build/{distro}:{version}"
|
||||
IMAGE_NAME_BUILD_FMT = "dangerzone.rocks/{distro}:{version}"
|
||||
IMAGES_REGISTRY = "ghcr.io/freedomofpress/"
|
||||
IMAGE_NAME_BUILD_DEV_FMT = (
|
||||
IMAGES_REGISTRY + "v2/dangerzone/build-dev/{distro}-{version}:{date}-{hash}"
|
||||
)
|
||||
IMAGE_NAME_BUILD_ENDUSER_FMT = (
|
||||
IMAGES_REGISTRY + "v2/dangerzone/end-user/{distro}-{version}:{date}-{hash}"
|
||||
)
|
||||
|
||||
EPILOG = """\
|
||||
Examples:
|
||||
|
@ -89,24 +60,6 @@ Run Dangerzone in the end-user environment:
|
|||
|
||||
"""
|
||||
|
||||
# NOTE: For Ubuntu 20.04 specifically, we need to install some extra deps, mainly for
|
||||
# Podman. This needs to take place both in our dev and end-user environment. See the
|
||||
# corresponding note in our Installation section:
|
||||
#
|
||||
# https://github.com/freedomofpress/dangerzone/blob/main/INSTALL.md#ubuntu-debian
|
||||
DOCKERFILE_UBUNTU_2004_DEPS = r"""
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y python-all python3.9 curl wget gnupg2 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN . /etc/os-release \
|
||||
&& sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_$VERSION_ID/ /' \
|
||||
> /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" \
|
||||
&& wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/xUbuntu_$VERSION_ID/Release.key -O- \
|
||||
| apt-key add -
|
||||
"""
|
||||
|
||||
# XXX: overcome the fact that ubuntu images (starting on 23.04) ship with the 'ubuntu'
|
||||
# user by default https://bugs.launchpad.net/cloud-images/+bug/2005129
|
||||
# Related issue https://github.com/freedomofpress/dangerzone/pull/461
|
||||
|
@ -138,35 +91,30 @@ RUN apt-get update \
|
|||
&& apt-get install -y --no-install-recommends podman uidmap slirp4netns \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends dh-python make build-essential \
|
||||
git fakeroot {qt_deps} pipx python3 python3-dev python3-venv python3-stdeb \
|
||||
python3-all \
|
||||
&& apt-get install -y passt || echo "Skipping installation of passt package" \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
# NOTE: `pipx install poetry` fails on Ubuntu Focal, when installed through APT. By
|
||||
# installing the latest version, we sidestep this issue.
|
||||
RUN bash -c 'if [[ "$(pipx --version)" < "1" ]]; then \
|
||||
apt-get update \
|
||||
&& apt-get remove -y pipx \
|
||||
&& apt-get install -y --no-install-recommends python3-pip \
|
||||
&& pip install pipx \
|
||||
&& rm -rf /var/lib/apt/lists/*; \
|
||||
else true; fi'
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends mupdf \
|
||||
&& apt-get install -y --no-install-recommends dh-python make build-essential \
|
||||
git {qt_deps} pipx python3 python3-pip python3-venv dpkg-dev debhelper python3-setuptools \
|
||||
python3-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN pipx install poetry
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends mupdf thunar \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
"""
|
||||
|
||||
# FIXME: Install Poetry on Fedora via package manager.
|
||||
DOCKERFILE_BUILD_DEV_FEDORA_DEPS = r"""
|
||||
RUN dnf install -y git rpm-build podman python3 python3-devel python3-poetry-core \
|
||||
pipx make qt6-qtbase-gui \
|
||||
pipx make qt6-qtbase-gui gcc gcc-c++\
|
||||
&& dnf clean all
|
||||
|
||||
# FIXME: Drop this fix after it's resolved upstream.
|
||||
# See https://github.com/freedomofpress/dangerzone/issues/286#issuecomment-1347149783
|
||||
RUN rpm --restore shadow-utils
|
||||
|
||||
RUN dnf install -y mupdf && dnf clean all
|
||||
RUN dnf install -y mupdf thunar && dnf clean all
|
||||
"""
|
||||
|
||||
# The Dockerfile for building a development environment for Dangerzone. Parts of the
|
||||
|
@ -202,6 +150,7 @@ COPY storage.conf /home/user/.config/containers
|
|||
# FIXME: pipx install poetry does not work for Ubuntu Focal.
|
||||
ENV PATH="$PATH:/home/user/.local/bin"
|
||||
RUN pipx install poetry
|
||||
RUN pipx inject poetry poetry-plugin-export
|
||||
|
||||
COPY pyproject.toml poetry.lock /home/user/dangerzone/
|
||||
RUN cd /home/user/dangerzone && poetry --no-ansi install
|
||||
|
@ -210,17 +159,12 @@ RUN cd /home/user/dangerzone && poetry --no-ansi install
|
|||
DOCKERFILE_BUILD_DEBIAN_DEPS = r"""
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends mupdf \
|
||||
&& apt-get install -y --no-install-recommends mupdf thunar \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
"""
|
||||
|
||||
DOCKERFILE_BUILD_FEDORA_39_DEPS = r"""
|
||||
COPY {pyside6_rpm} /tmp/pyside6.rpm
|
||||
RUN dnf install -y /tmp/pyside6.rpm
|
||||
"""
|
||||
|
||||
DOCKERFILE_BUILD_FEDORA_DEPS = r"""
|
||||
RUN dnf install -y mupdf && dnf clean all
|
||||
RUN dnf install -y mupdf thunar && dnf clean all
|
||||
|
||||
# FIXME: Drop this fix after it's resolved upstream.
|
||||
# See https://github.com/freedomofpress/dangerzone/issues/286#issuecomment-1347149783
|
||||
|
@ -273,9 +217,27 @@ def git_root():
|
|||
return pathlib.Path(path)
|
||||
|
||||
|
||||
def user_data():
|
||||
"""Get the user data dir in (which differs on different OSes)"""
|
||||
home = pathlib.Path.home()
|
||||
system = platform.system()
|
||||
|
||||
if system == "Windows":
|
||||
return home / "AppData" / "Local"
|
||||
elif system == "Linux":
|
||||
return home / ".local" / "share"
|
||||
elif system == "Darwin":
|
||||
return home / "Library" / "Application Support"
|
||||
|
||||
|
||||
def dz_dev_root():
|
||||
"""Get the directory where we will store dangerzone-dev related files"""
|
||||
return user_data() / "dangerzone-dev"
|
||||
|
||||
|
||||
def distro_root(distro, version):
|
||||
"""Get the root directory for the specific Linux environment."""
|
||||
return git_root() / f"dev_scripts/envs/{distro}/{version}"
|
||||
return dz_dev_root() / "envs" / distro / version
|
||||
|
||||
|
||||
def distro_state(distro, version):
|
||||
|
@ -288,14 +250,46 @@ def distro_build(distro, version):
|
|||
return distro_root(distro, version) / "build"
|
||||
|
||||
|
||||
def image_name_build(distro, version):
|
||||
def get_current_date():
|
||||
return date.today().strftime("%Y-%m-%d")
|
||||
|
||||
|
||||
def get_build_dir_sources(distro, version):
|
||||
"""Return the files needed to build an image."""
|
||||
sources = [
|
||||
git_root() / "pyproject.toml",
|
||||
git_root() / "poetry.lock",
|
||||
git_root() / "dev_scripts" / "env.py",
|
||||
git_root() / "dev_scripts" / "storage.conf",
|
||||
git_root() / "dev_scripts" / "containers.conf",
|
||||
]
|
||||
|
||||
if distro == "ubuntu" and version in ("22.04", "jammy"):
|
||||
sources.extend(
|
||||
[
|
||||
git_root() / "dev_scripts" / "apt-tools-prod.pref",
|
||||
git_root() / "dev_scripts" / "apt-tools-prod.sources",
|
||||
]
|
||||
)
|
||||
return sources
|
||||
|
||||
|
||||
def image_name_build_dev(distro, version):
|
||||
"""Get the container image for the dev variant of a Dangerzone environment."""
|
||||
return IMAGE_NAME_BUILD_DEV_FMT.format(distro=distro, version=version)
|
||||
hash = hash_files(get_build_dir_sources(distro, version))
|
||||
|
||||
return IMAGE_NAME_BUILD_DEV_FMT.format(
|
||||
distro=distro, version=version, hash=hash, date=get_current_date()
|
||||
)
|
||||
|
||||
|
||||
def image_name_install(distro, version):
|
||||
"""Get the container image for the Dangerzone environment."""
|
||||
return IMAGE_NAME_BUILD_FMT.format(distro=distro, version=version)
|
||||
def image_name_build_enduser(distro, version):
|
||||
"""Get the container image for the Dangerzone end-user environment."""
|
||||
|
||||
hash = hash_files(get_files_in("install/linux", "debian"))
|
||||
return IMAGE_NAME_BUILD_ENDUSER_FMT.format(
|
||||
distro=distro, version=version, hash=hash, date=get_current_date()
|
||||
)
|
||||
|
||||
|
||||
def dz_version():
|
||||
|
@ -304,71 +298,23 @@ def dz_version():
|
|||
return f.read().strip()
|
||||
|
||||
|
||||
class PySide6Manager:
|
||||
"""Provision PySide6 RPMs in our Dangerzone environments.
|
||||
def hash_files(file_paths: list[pathlib.Path]) -> str:
|
||||
"""Returns the hash value of a list of files using the sha256 hashing algorithm."""
|
||||
hash_obj = hashlib.new("sha256")
|
||||
for path in file_paths:
|
||||
with open(path, "rb") as file:
|
||||
file_data = file.read()
|
||||
hash_obj.update(file_data)
|
||||
|
||||
This class holds all the logic around checking and downloading PySide RPMs. It can
|
||||
check if the required RPM version is present under "/dist", and optionally download
|
||||
it.
|
||||
"""
|
||||
return hash_obj.hexdigest()
|
||||
|
||||
def __init__(self, distro_name, distro_version):
|
||||
if distro_name != "fedora":
|
||||
raise RuntimeError("Managing PySide6 RPMs is available only in Fedora")
|
||||
self.distro_name = distro_name
|
||||
self.distro_version = distro_version
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
"""The version of the PySide6 RPM."""
|
||||
return PYSIDE6_VERSION
|
||||
|
||||
@property
|
||||
def rpm_name(self):
|
||||
"""The name of the PySide6 RPM."""
|
||||
return PYSIDE6_RPM.format(
|
||||
pyside6_version=self.version, fedora_version=self.distro_version
|
||||
)
|
||||
|
||||
@property
|
||||
def rpm_url(self):
|
||||
"""The URL of the PySide6 RPM, as hosted in FPF's RPM repo."""
|
||||
return PYSIDE6_URL.format(
|
||||
pyside6_version=self.version,
|
||||
fedora_version=self.distro_version,
|
||||
)
|
||||
|
||||
@property
|
||||
def rpm_local_path(self):
|
||||
"""The local path where this script will look for the PySide6 RPM."""
|
||||
return git_root() / "dist" / self.rpm_name
|
||||
|
||||
@property
|
||||
def is_rpm_present(self):
|
||||
"""Check if PySide6 RPM is present in the user's system."""
|
||||
return self.rpm_local_path.exists()
|
||||
|
||||
def download_rpm(self):
|
||||
"""Download PySide6 from FPF's RPM repo."""
|
||||
print(
|
||||
PYSIDE6_DL_MESSAGE.format(
|
||||
pyside6_url=self.rpm_url,
|
||||
pyside6_local_path=self.rpm_local_path,
|
||||
),
|
||||
file=sys.stderr,
|
||||
)
|
||||
try:
|
||||
with urllib.request.urlopen(self.rpm_url) as r, open(
|
||||
self.rpm_local_path, "wb"
|
||||
) as f:
|
||||
shutil.copyfileobj(r, f)
|
||||
except:
|
||||
# NOTE: We purposefully catch all exceptions, since we want to catch Ctrl-C
|
||||
# as well.
|
||||
print("Download interrupted, removing file", file=sys.stderr)
|
||||
self.rpm_local_path.unlink()
|
||||
raise
|
||||
print("PySide6 was downloaded successfully", file=sys.stderr)
|
||||
def get_files_in(*folders: list[str]) -> list[pathlib.Path]:
|
||||
"""Return the list of all files present in the given folders"""
|
||||
files = []
|
||||
for folder in folders:
|
||||
files.extend([p for p in (git_root() / folder).glob("**") if p.is_file()])
|
||||
return files
|
||||
|
||||
|
||||
class Env:
|
||||
|
@ -411,6 +357,28 @@ class Env:
|
|||
"""Create an Env class from CLI arguments"""
|
||||
return cls(distro=args.distro, version=args.version, runtime=args.runtime)
|
||||
|
||||
def find_dz_package(self, path, pattern):
|
||||
"""Get the full path of the Dangerzone package in the specified dir.
|
||||
|
||||
There are times where we don't know the exact name of the Dangerzone package
|
||||
that we've built, e.g., because its patch level may have changed.
|
||||
|
||||
Auto-detect the Dangerzone package based on a pattern that a user has provided,
|
||||
and fail if there are none, or multiple matches. If there's a single match, then
|
||||
return the full path for the package.
|
||||
"""
|
||||
matches = list(path.glob(pattern))
|
||||
if len(matches) == 0:
|
||||
raise RuntimeError(
|
||||
f"Could not find Dangerzone package '{pattern}' in '{path}'"
|
||||
)
|
||||
elif len(matches) > 1:
|
||||
raise RuntimeError(
|
||||
f"Found more than one matches for Dangerzone package '{pattern}' in"
|
||||
f" '{path}'"
|
||||
)
|
||||
return matches[0]
|
||||
|
||||
def runtime_run(self, *args):
|
||||
"""Run a command for a specific container runtime.
|
||||
|
||||
|
@ -512,13 +480,13 @@ class Env:
|
|||
run_cmd += [
|
||||
"--hostname",
|
||||
"dangerzone-dev",
|
||||
image_name_build(self.distro, self.version),
|
||||
image_name_build_dev(self.distro, self.version),
|
||||
]
|
||||
else:
|
||||
run_cmd += [
|
||||
"--hostname",
|
||||
"dangerzone",
|
||||
image_name_install(self.distro, self.version),
|
||||
image_name_build_enduser(self.distro, self.version),
|
||||
]
|
||||
|
||||
run_cmd += cmd
|
||||
|
@ -534,8 +502,33 @@ class Env:
|
|||
(dist_state / ".bash_history").touch(exist_ok=True)
|
||||
self.runtime_run(*run_cmd)
|
||||
|
||||
def build_dev(self, show_dockerfile=DEFAULT_SHOW_DOCKERFILE):
|
||||
def pull_image_from_registry(self, image):
|
||||
try:
|
||||
subprocess.run(self.runtime_cmd + ["pull", image], check=True)
|
||||
return True
|
||||
except subprocess.CalledProcessError:
|
||||
# Do not log an error here, we are just checking if the image exists
|
||||
# on the registry.
|
||||
return False
|
||||
|
||||
def push_image_to_registry(self, image):
|
||||
try:
|
||||
subprocess.run(self.runtime_cmd + ["push", image], check=True)
|
||||
return True
|
||||
except subprocess.CalledProcessError as e:
|
||||
print("An error occured when pulling the image: ", e)
|
||||
return False
|
||||
|
||||
def build_dev(self, show_dockerfile=DEFAULT_SHOW_DOCKERFILE, sync=False):
|
||||
"""Build a Linux environment and install tools for Dangerzone development."""
|
||||
image = image_name_build_dev(self.distro, self.version)
|
||||
|
||||
if sync and self.pull_image_from_registry(image):
|
||||
print("Image has been pulled from the registry, no need to build it.")
|
||||
return
|
||||
elif sync:
|
||||
print("Image label not in registry, building it")
|
||||
|
||||
if self.distro == "fedora":
|
||||
install_deps = DOCKERFILE_BUILD_DEV_FEDORA_DEPS
|
||||
else:
|
||||
|
@ -545,12 +538,7 @@ class Env:
|
|||
# See https://github.com/freedomofpress/dangerzone/issues/482
|
||||
qt_deps = "libqt6gui6 libxcb-cursor0"
|
||||
install_deps = DOCKERFILE_BUILD_DEV_DEBIAN_DEPS
|
||||
if self.distro == "ubuntu" and self.version in ("20.04", "focal"):
|
||||
qt_deps = "libqt5gui5 libxcb-cursor0" # Ubuntu Focal has only Qt5.
|
||||
install_deps = (
|
||||
DOCKERFILE_UBUNTU_2004_DEPS + DOCKERFILE_BUILD_DEV_DEBIAN_DEPS
|
||||
)
|
||||
elif self.distro == "ubuntu" and self.version in ("22.04", "jammy"):
|
||||
if self.distro == "ubuntu" and self.version in ("22.04", "jammy"):
|
||||
# Ubuntu Jammy misses a dependency to `libxkbcommon-x11-0`, which we can
|
||||
# install indirectly via `qt6-qpa-plugins`.
|
||||
qt_deps += " qt6-qpa-plugins"
|
||||
|
@ -560,10 +548,12 @@ class Env:
|
|||
DOCKERFILE_CONMON_UPDATE + DOCKERFILE_BUILD_DEV_DEBIAN_DEPS
|
||||
)
|
||||
elif self.distro == "ubuntu" and self.version in (
|
||||
"23.10",
|
||||
"mantic",
|
||||
"24.04",
|
||||
"noble",
|
||||
"24.10",
|
||||
"ocular",
|
||||
"25.04",
|
||||
"plucky",
|
||||
):
|
||||
install_deps = (
|
||||
DOCKERFILE_UBUNTU_REM_USER + DOCKERFILE_BUILD_DEV_DEBIAN_DEPS
|
||||
|
@ -585,24 +575,21 @@ class Env:
|
|||
os.makedirs(build_dir, exist_ok=True)
|
||||
|
||||
# Populate the build context.
|
||||
shutil.copy(git_root() / "pyproject.toml", build_dir)
|
||||
shutil.copy(git_root() / "poetry.lock", build_dir)
|
||||
shutil.copy(git_root() / "dev_scripts" / "storage.conf", build_dir)
|
||||
if self.distro == "ubuntu" and self.version in ("22.04", "jammy"):
|
||||
shutil.copy(git_root() / "dev_scripts" / "apt-tools-prod.pref", build_dir)
|
||||
shutil.copy(
|
||||
git_root() / "dev_scripts" / "apt-tools-prod.sources", build_dir
|
||||
)
|
||||
for source in get_build_dir_sources(self.distro, self.version):
|
||||
shutil.copy(source, build_dir)
|
||||
|
||||
with open(build_dir / "Dockerfile", mode="w") as f:
|
||||
f.write(dockerfile)
|
||||
|
||||
image = image_name_build(self.distro, self.version)
|
||||
self.runtime_run("build", "-t", image, build_dir)
|
||||
|
||||
if sync:
|
||||
if not self.push_image_to_registry(image):
|
||||
print("An error occured while trying to push to the container registry")
|
||||
|
||||
def build(
|
||||
self,
|
||||
show_dockerfile=DEFAULT_SHOW_DOCKERFILE,
|
||||
download_pyside6=DEFAULT_DOWNLOAD_PYSIDE6,
|
||||
):
|
||||
"""Build a Linux environment and install Dangerzone in it."""
|
||||
build_dir = distro_build(self.distro, self.version)
|
||||
|
@ -610,50 +597,29 @@ class Env:
|
|||
version = dz_version()
|
||||
if self.distro == "fedora":
|
||||
install_deps = DOCKERFILE_BUILD_FEDORA_DEPS
|
||||
package = f"dangerzone-{version}-1.fc{self.version}.x86_64.rpm"
|
||||
package_src = git_root() / "dist" / package
|
||||
package_pattern = f"dangerzone-{version}-*.fc{self.version}.x86_64.rpm"
|
||||
package_src = self.find_dz_package(git_root() / "dist", package_pattern)
|
||||
package = package_src.name
|
||||
package_dst = build_dir / package
|
||||
install_cmd = "dnf install -y"
|
||||
|
||||
# NOTE: For Fedora 39+ onward, we check if a PySide6 RPM package exists in
|
||||
# the user's system. If not, we either throw an error or download it from
|
||||
# FPF's repo, according to the user's choice.
|
||||
pyside6 = PySide6Manager(self.distro, self.version)
|
||||
if not pyside6.is_rpm_present:
|
||||
if download_pyside6:
|
||||
pyside6.download_rpm()
|
||||
else:
|
||||
print(
|
||||
PYSIDE6_NOT_FOUND_ERROR.format(
|
||||
pyside6_local_path=pyside6.rpm_local_path,
|
||||
pyside6_url=pyside6.rpm_url,
|
||||
),
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 1
|
||||
shutil.copy(pyside6.rpm_local_path, build_dir / pyside6.rpm_name)
|
||||
install_deps = (
|
||||
DOCKERFILE_BUILD_FEDORA_DEPS + DOCKERFILE_BUILD_FEDORA_39_DEPS
|
||||
).format(pyside6_rpm=pyside6.rpm_name)
|
||||
else:
|
||||
install_deps = DOCKERFILE_BUILD_DEBIAN_DEPS
|
||||
if self.distro == "ubuntu" and self.version in ("20.04", "focal"):
|
||||
install_deps = (
|
||||
DOCKERFILE_UBUNTU_2004_DEPS + DOCKERFILE_BUILD_DEBIAN_DEPS
|
||||
)
|
||||
elif self.distro == "ubuntu" and self.version in ("22.04", "jammy"):
|
||||
if self.distro == "ubuntu" and self.version in ("22.04", "jammy"):
|
||||
# Ubuntu Jammy requires a more up-to-date conmon
|
||||
# package (see https://github.com/freedomofpress/dangerzone/issues/685)
|
||||
install_deps = DOCKERFILE_CONMON_UPDATE + DOCKERFILE_BUILD_DEBIAN_DEPS
|
||||
elif self.distro == "ubuntu" and self.version in (
|
||||
"23.10",
|
||||
"mantic",
|
||||
"24.04",
|
||||
"noble",
|
||||
"24.10",
|
||||
"ocular",
|
||||
"25.04",
|
||||
"plucky",
|
||||
):
|
||||
install_deps = DOCKERFILE_UBUNTU_REM_USER + DOCKERFILE_BUILD_DEBIAN_DEPS
|
||||
package = f"dangerzone_{version}-1_all.deb"
|
||||
package_src = git_root() / "deb_dist" / package
|
||||
package_pattern = f"dangerzone_{version}-*_*.deb"
|
||||
package_src = self.find_dz_package(git_root() / "deb_dist", package_pattern)
|
||||
package = package_src.name
|
||||
package_dst = build_dir / package
|
||||
install_cmd = "apt-get update && apt-get install -y"
|
||||
|
||||
|
@ -671,6 +637,7 @@ class Env:
|
|||
# Populate the build context.
|
||||
shutil.copy(package_src, package_dst)
|
||||
shutil.copy(git_root() / "dev_scripts" / "storage.conf", build_dir)
|
||||
shutil.copy(git_root() / "dev_scripts" / "containers.conf", build_dir)
|
||||
if self.distro == "ubuntu" and self.version in ("22.04", "jammy"):
|
||||
shutil.copy(git_root() / "dev_scripts" / "apt-tools-prod.pref", build_dir)
|
||||
shutil.copy(
|
||||
|
@ -679,7 +646,7 @@ class Env:
|
|||
with open(build_dir / "Dockerfile", mode="w") as f:
|
||||
f.write(dockerfile)
|
||||
|
||||
image = image_name_install(self.distro, self.version)
|
||||
image = image_name_build_enduser(self.distro, self.version)
|
||||
self.runtime_run("build", "-t", image, build_dir)
|
||||
|
||||
|
||||
|
@ -698,7 +665,7 @@ def env_run(args):
|
|||
def env_build_dev(args):
|
||||
"""Invoke the 'build-dev' command based on the CLI args."""
|
||||
env = Env.from_args(args)
|
||||
return env.build_dev(show_dockerfile=args.show_dockerfile)
|
||||
return env.build_dev(show_dockerfile=args.show_dockerfile, sync=args.sync)
|
||||
|
||||
|
||||
def env_build(args):
|
||||
|
@ -706,7 +673,6 @@ def env_build(args):
|
|||
env = Env.from_args(args)
|
||||
return env.build(
|
||||
show_dockerfile=args.show_dockerfile,
|
||||
download_pyside6=args.download_pyside6,
|
||||
)
|
||||
|
||||
|
||||
|
@ -784,6 +750,12 @@ def parse_args():
|
|||
action="store_true",
|
||||
help="Do not build, only show the Dockerfile",
|
||||
)
|
||||
parser_build_dev.add_argument(
|
||||
"--sync",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Attempt to pull the image, build it if not found and push it to the container registry",
|
||||
)
|
||||
|
||||
# Build a development variant of a Dangerzone environment.
|
||||
parser_build = subparsers.add_parser(
|
||||
|
@ -797,12 +769,6 @@ def parse_args():
|
|||
action="store_true",
|
||||
help="Do not build, only show the Dockerfile",
|
||||
)
|
||||
parser_build.add_argument(
|
||||
"--download-pyside6",
|
||||
default=DEFAULT_DOWNLOAD_PYSIDE6,
|
||||
action="store_true",
|
||||
help="Download PySide6 from FPF's RPM repo",
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
|
2
dev_scripts/envs/.gitignore
vendored
2
dev_scripts/envs/.gitignore
vendored
|
@ -1,2 +0,0 @@
|
|||
*
|
||||
!.gitignore
|
254
dev_scripts/generate-release-notes.py
Executable file
254
dev_scripts/generate-release-notes.py
Executable file
|
@ -0,0 +1,254 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import re
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import httpx
|
||||
|
||||
REPOSITORY = "https://github.com/freedomofpress/dangerzone/"
|
||||
TEMPLATE = "- {title} ([#{number}]({url}))"
|
||||
|
||||
|
||||
def parse_version(version: str) -> Tuple[int, int]:
|
||||
"""Extract major.minor from version string, ignoring patch"""
|
||||
match = re.match(r"v?(\d+)\.(\d+)", version)
|
||||
if not match:
|
||||
raise ValueError(f"Invalid version format: {version}")
|
||||
return (int(match.group(1)), int(match.group(2)))
|
||||
|
||||
|
||||
async def get_last_minor_release(
|
||||
client: httpx.AsyncClient, owner: str, repo: str
|
||||
) -> Optional[str]:
|
||||
"""Get the latest minor release date (ignoring patches)"""
|
||||
response = await client.get(f"https://api.github.com/repos/{owner}/{repo}/releases")
|
||||
response.raise_for_status()
|
||||
releases = response.json()
|
||||
|
||||
if not releases:
|
||||
return None
|
||||
|
||||
# Get the latest minor version by comparing major.minor numbers
|
||||
current_version = parse_version(releases[0]["tag_name"])
|
||||
latest_date = None
|
||||
|
||||
for release in releases:
|
||||
try:
|
||||
version = parse_version(release["tag_name"])
|
||||
if version < current_version:
|
||||
latest_date = release["published_at"]
|
||||
break
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
return latest_date
|
||||
|
||||
|
||||
async def get_issue_details(
|
||||
client: httpx.AsyncClient, owner: str, repo: str, issue_number: int
|
||||
) -> Optional[dict]:
|
||||
"""Get issue title and number if it exists"""
|
||||
response = await client.get(
|
||||
f"https://api.github.com/repos/{owner}/{repo}/issues/{issue_number}"
|
||||
)
|
||||
if response.is_success:
|
||||
data = response.json()
|
||||
return {
|
||||
"title": data["title"],
|
||||
"number": data["number"],
|
||||
"url": data["html_url"],
|
||||
}
|
||||
return None
|
||||
|
||||
|
||||
def extract_issue_number(pr_body: Optional[str]) -> Optional[int]:
|
||||
"""Extract issue number from PR body looking for common formats like 'Fixes #123' or 'Closes #123'"""
|
||||
if not pr_body:
|
||||
return None
|
||||
|
||||
patterns = [
|
||||
r"(?:closes|fixes|resolves)\s*#(\d+)",
|
||||
r"(?:close|fix|resolve)\s*#(\d+)",
|
||||
]
|
||||
|
||||
for pattern in patterns:
|
||||
match = re.search(pattern, pr_body.lower())
|
||||
if match:
|
||||
return int(match.group(1))
|
||||
|
||||
return None
|
||||
|
||||
|
||||
async def verify_commit_in_master(
|
||||
client: httpx.AsyncClient, owner: str, repo: str, commit_id: str
|
||||
) -> bool:
|
||||
"""Verify if a commit exists in master"""
|
||||
response = await client.get(
|
||||
f"https://api.github.com/repos/{owner}/{repo}/commits/{commit_id}"
|
||||
)
|
||||
return response.is_success and response.json().get("commit") is not None
|
||||
|
||||
|
||||
async def process_issue_events(
|
||||
client: httpx.AsyncClient, owner: str, repo: str, issue: Dict
|
||||
) -> Optional[Dict]:
|
||||
"""Process events for a single issue"""
|
||||
events_response = await client.get(f"{issue['url']}/events")
|
||||
if not events_response.is_success:
|
||||
return None
|
||||
|
||||
for event in events_response.json():
|
||||
if event["event"] == "closed" and event.get("commit_id"):
|
||||
if await verify_commit_in_master(client, owner, repo, event["commit_id"]):
|
||||
return {
|
||||
"title": issue["title"],
|
||||
"number": issue["number"],
|
||||
"url": issue["html_url"],
|
||||
}
|
||||
return None
|
||||
|
||||
|
||||
async def get_closed_issues(
|
||||
client: httpx.AsyncClient, owner: str, repo: str, since: str
|
||||
) -> List[Dict]:
|
||||
"""Get issues closed by commits to master since the given date"""
|
||||
response = await client.get(
|
||||
f"https://api.github.com/repos/{owner}/{repo}/issues",
|
||||
params={
|
||||
"state": "closed",
|
||||
"sort": "updated",
|
||||
"direction": "desc",
|
||||
"since": since,
|
||||
"per_page": 100,
|
||||
},
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
tasks = []
|
||||
since_date = datetime.strptime(since, "%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
for issue in response.json():
|
||||
if "pull_request" in issue:
|
||||
continue
|
||||
|
||||
closed_at = datetime.strptime(issue["closed_at"], "%Y-%m-%dT%H:%M:%SZ")
|
||||
if closed_at <= since_date:
|
||||
continue
|
||||
|
||||
tasks.append(process_issue_events(client, owner, repo, issue))
|
||||
|
||||
results = await asyncio.gather(*tasks)
|
||||
return [r for r in results if r is not None]
|
||||
|
||||
|
||||
async def process_pull_request(
|
||||
client: httpx.AsyncClient,
|
||||
owner: str,
|
||||
repo: str,
|
||||
pr: Dict,
|
||||
closed_issues: List[Dict],
|
||||
) -> Optional[str]:
|
||||
"""Process a single pull request"""
|
||||
issue_number = extract_issue_number(pr.get("body"))
|
||||
if issue_number:
|
||||
issue = await get_issue_details(client, owner, repo, issue_number)
|
||||
if issue:
|
||||
if not any(i["number"] == issue["number"] for i in closed_issues):
|
||||
return TEMPLATE.format(**issue)
|
||||
return None
|
||||
|
||||
return TEMPLATE.format(title=pr["title"], number=pr["number"], url=pr["html_url"])
|
||||
|
||||
|
||||
async def get_changes_since_last_release(
|
||||
owner: str, repo: str, token: Optional[str] = None
|
||||
) -> List[str]:
|
||||
headers = {
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
if token:
|
||||
headers["Authorization"] = f"token {token}"
|
||||
else:
|
||||
print(
|
||||
"Warning: No token provided. API rate limiting may occur.", file=sys.stderr
|
||||
)
|
||||
|
||||
async with httpx.AsyncClient(headers=headers, timeout=30.0) as client:
|
||||
# Get the date of last minor release
|
||||
since = await get_last_minor_release(client, owner, repo)
|
||||
if not since:
|
||||
return []
|
||||
|
||||
changes = []
|
||||
|
||||
# Get issues closed by commits to master
|
||||
closed_issues = await get_closed_issues(client, owner, repo, since)
|
||||
changes.extend([TEMPLATE.format(**issue) for issue in closed_issues])
|
||||
|
||||
# Get merged PRs
|
||||
response = await client.get(
|
||||
f"https://api.github.com/repos/{owner}/{repo}/pulls",
|
||||
params={
|
||||
"state": "closed",
|
||||
"sort": "updated",
|
||||
"direction": "desc",
|
||||
"per_page": 100,
|
||||
},
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
# Process PRs in parallel
|
||||
pr_tasks = []
|
||||
for pr in response.json():
|
||||
if not pr["merged_at"]:
|
||||
continue
|
||||
if since and pr["merged_at"] <= since:
|
||||
break
|
||||
|
||||
pr_tasks.append(
|
||||
process_pull_request(client, owner, repo, pr, closed_issues)
|
||||
)
|
||||
|
||||
pr_results = await asyncio.gather(*pr_tasks)
|
||||
changes.extend([r for r in pr_results if r is not None])
|
||||
|
||||
return changes
|
||||
|
||||
|
||||
async def main_async():
|
||||
parser = argparse.ArgumentParser(description="Generate release notes from GitHub")
|
||||
parser.add_argument("--token", "-t", help="the file path to the GitHub API token")
|
||||
args = parser.parse_args()
|
||||
|
||||
token = None
|
||||
if args.token:
|
||||
with open(args.token) as f:
|
||||
token = f.read().strip()
|
||||
try:
|
||||
url_path = REPOSITORY.rstrip("/").split("github.com/")[1]
|
||||
owner, repo = url_path.split("/")[-2:]
|
||||
except (ValueError, IndexError):
|
||||
print("Error: Invalid GitHub URL", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
notes = await get_changes_since_last_release(owner, repo, token)
|
||||
print("\n".join(notes))
|
||||
except httpx.HTTPError as e:
|
||||
print(f"Error: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"Error: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main():
|
||||
asyncio.run(main_async())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
67
dev_scripts/generate-release-tasks.py
Executable file
67
dev_scripts/generate-release-tasks.py
Executable file
|
@ -0,0 +1,67 @@
|
|||
#!/usr/bin/env python3
|
||||
import pathlib
|
||||
import subprocess
|
||||
|
||||
RELEASE_FILE = "RELEASE.md"
|
||||
QA_FILE = "QA.md"
|
||||
|
||||
|
||||
def git_root():
|
||||
"""Get the root directory of the Git repo."""
|
||||
# FIXME: Use a Git Python binding for this.
|
||||
# FIXME: Make this work if called outside the repo.
|
||||
path = (
|
||||
subprocess.run(
|
||||
["git", "rev-parse", "--show-toplevel"],
|
||||
check=True,
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
.stdout.decode()
|
||||
.strip("\n")
|
||||
)
|
||||
return pathlib.Path(path)
|
||||
|
||||
|
||||
def extract_checkboxes(filename):
|
||||
headers = []
|
||||
result = []
|
||||
|
||||
with open(filename, "r") as f:
|
||||
lines = f.readlines()
|
||||
|
||||
current_level = 0
|
||||
for line in lines:
|
||||
line = line.rstrip()
|
||||
|
||||
# If it's a header, store it
|
||||
if line.startswith("#"):
|
||||
# Count number of # to determine header level
|
||||
level = len(line) - len(line.lstrip("#"))
|
||||
if level < current_level or not current_level:
|
||||
headers.extend(["", line, ""])
|
||||
current_level = level
|
||||
elif level > current_level:
|
||||
continue
|
||||
else:
|
||||
headers = ["", line, ""]
|
||||
|
||||
# If it's a checkbox
|
||||
elif "- [ ]" in line or "- [x]" in line or "- [X]" in line:
|
||||
# Print the last header if we haven't already
|
||||
if headers:
|
||||
result.extend(headers)
|
||||
headers = []
|
||||
current_level = 0
|
||||
|
||||
# If this is the "Do the QA tasks" line, recursively get QA tasks
|
||||
if "Do the QA tasks" in line:
|
||||
result.append(line)
|
||||
qa_tasks = extract_checkboxes(git_root() / QA_FILE)
|
||||
result.append(qa_tasks)
|
||||
else:
|
||||
result.append(line)
|
||||
return "\n".join(result)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print(extract_checkboxes(git_root() / RELEASE_FILE))
|
|
@ -3,28 +3,49 @@
|
|||
import abc
|
||||
import argparse
|
||||
import difflib
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import selectors
|
||||
import subprocess
|
||||
import sys
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
PYTHON_VERSION = "3.12"
|
||||
EOL_PYTHON_URL = "https://endoflife.date/api/python.json"
|
||||
|
||||
CONTENT_QA = r"""## QA
|
||||
|
||||
To ensure that new releases do not introduce regressions, and support existing
|
||||
and newer platforms, we have to do the following:
|
||||
and newer platforms, we have to test that the produced packages work as expected.
|
||||
|
||||
Check the following:
|
||||
|
||||
- [ ] Make sure that the tip of the `main` branch passes the CI tests.
|
||||
- [ ] Make sure that the Apple account has a valid application password and has
|
||||
agreed to the latest Apple terms (see [macOS release](#macos-release)
|
||||
section).
|
||||
|
||||
Because it is repetitive, we wrote a script to help with the QA.
|
||||
It can run the tasks for you, pausing when it needs manual intervention.
|
||||
|
||||
You can run it with a command like:
|
||||
|
||||
```bash
|
||||
poetry run ./dev_scripts/qa.py {distro}-{version}
|
||||
```
|
||||
|
||||
### The checklist
|
||||
|
||||
- [ ] Create a test build in Windows and make sure it works:
|
||||
- [ ] Check if the suggested Python version is still supported.
|
||||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Build the container image and ensure the development environment uses
|
||||
the new image.
|
||||
- [ ] Download the OCR language data using `./install/common/download-tessdata.py`
|
||||
- [ ] Run the Dangerzone tests.
|
||||
- [ ] Build and run the Dangerzone .exe
|
||||
- [ ] Test some QA scenarios (see [Scenarios](#Scenarios) below).
|
||||
|
@ -33,6 +54,7 @@ and newer platforms, we have to do the following:
|
|||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Build the container image and ensure the development environment uses
|
||||
the new image.
|
||||
- [ ] Download the OCR language data using `./install/common/download-tessdata.py`
|
||||
- [ ] Run the Dangerzone tests.
|
||||
- [ ] Create and run an app bundle.
|
||||
- [ ] Test some QA scenarios (see [Scenarios](#Scenarios) below).
|
||||
|
@ -41,6 +63,7 @@ and newer platforms, we have to do the following:
|
|||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Build the container image and ensure the development environment uses
|
||||
the new image.
|
||||
- [ ] Download the OCR language data using `./install/common/download-tessdata.py`
|
||||
- [ ] Run the Dangerzone tests.
|
||||
- [ ] Create and run an app bundle.
|
||||
- [ ] Test some QA scenarios (see [Scenarios](#Scenarios) below).
|
||||
|
@ -49,18 +72,20 @@ and newer platforms, we have to do the following:
|
|||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Build the container image and ensure the development environment uses
|
||||
the new image.
|
||||
- [ ] Download the OCR language data using `./install/common/download-tessdata.py`
|
||||
- [ ] Run the Dangerzone tests.
|
||||
- [ ] Create a .deb package and install it system-wide.
|
||||
- [ ] Test some QA scenarios (see [Scenarios](#Scenarios) below).
|
||||
- [ ] Create a test build in the most recent Fedora platform (Fedora 40 as of
|
||||
- [ ] Create a test build in the most recent Fedora platform (Fedora 41 as of
|
||||
writing this) and make sure it works:
|
||||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Build the container image and ensure the development environment uses
|
||||
the new image.
|
||||
- [ ] Download the OCR language data using `./install/common/download-tessdata.py`
|
||||
- [ ] Run the Dangerzone tests.
|
||||
- [ ] Create an .rpm package and install it system-wide.
|
||||
- [ ] Test some QA scenarios (see [Scenarios](#Scenarios) below).
|
||||
- [ ] Create a test build in the most recent Qubes Fedora template (Fedora 39 as
|
||||
- [ ] Create a test build in the most recent Qubes Fedora template (Fedora 40 as
|
||||
of writing this) and make sure it works:
|
||||
- [ ] Create a new development environment with Poetry.
|
||||
- [ ] Run the Dangerzone tests.
|
||||
|
@ -102,9 +127,9 @@ Close the Dangerzone application and get the container image for that
|
|||
version. For example:
|
||||
|
||||
```
|
||||
$ docker images dangerzone.rocks/dangerzone:latest
|
||||
$ docker images dangerzone.rocks/dangerzone
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
dangerzone.rocks/dangerzone latest <image ID> <date> <size>
|
||||
dangerzone.rocks/dangerzone <tag> <image ID> <date> <size>
|
||||
```
|
||||
|
||||
Then run the version under QA and ensure that the settings remain changed.
|
||||
|
@ -113,9 +138,9 @@ Afterwards check that new docker image was installed by running the same command
|
|||
and seeing the following differences:
|
||||
|
||||
```
|
||||
$ docker images dangerzone.rocks/dangerzone:latest
|
||||
$ docker images dangerzone.rocks/dangerzone
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
dangerzone.rocks/dangerzone latest <different ID> <newer date> <different size>
|
||||
dangerzone.rocks/dangerzone <other tag> <different ID> <newer date> <different size>
|
||||
```
|
||||
|
||||
#### 4. Dangerzone successfully installs the container image
|
||||
|
@ -158,6 +183,10 @@ Run Dangerzone against a list of documents, and tick all options. Ensure that:
|
|||
Run Dangerzone against a set of documents that you drag-n-drop. Files should be
|
||||
added and conversion should run without issue.
|
||||
|
||||
> [!TIP]
|
||||
> On our end-user container environments for Linux, we can start a file manager
|
||||
> with `thunar &`.
|
||||
|
||||
#### 9. Dangerzone CLI succeeds in converting multiple documents
|
||||
|
||||
_(Only for Windows and Linux)_
|
||||
|
@ -222,32 +251,9 @@ Install dependencies:
|
|||
</table>
|
||||
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
<details>
|
||||
<summary><i>:memo: Expand this section if you are on Ubuntu 20.04 (Focal).</i></summary>
|
||||
</br>
|
||||
|
||||
The default Python version that ships with Ubuntu Focal (3.8) is not
|
||||
compatible with PySide6, which requires Python 3.9 of greater.
|
||||
|
||||
You can install Python 3.9 using the `python3.9` package.
|
||||
|
||||
```bash
|
||||
sudo apt install -y python3.9
|
||||
```
|
||||
|
||||
Poetry will automatically pick up the correct version when running.
|
||||
</details>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
```sh
|
||||
sudo apt install -y podman dh-python build-essential fakeroot make libqt6gui6 \
|
||||
pipx python3 python3-dev python3-stdeb python3-all
|
||||
sudo apt install -y podman dh-python build-essential make libqt6gui6 \
|
||||
pipx python3 python3-dev
|
||||
```
|
||||
|
||||
Install Poetry using `pipx` (recommended) and add it to your `$PATH`:
|
||||
|
@ -258,6 +264,7 @@ methods](https://python-poetry.org/docs/#installation))_
|
|||
```sh
|
||||
pipx ensurepath
|
||||
pipx install poetry
|
||||
pipx inject poetry poetry-plugin-export
|
||||
```
|
||||
|
||||
After this, restart the terminal window, for the `poetry` command to be in your
|
||||
|
@ -285,6 +292,12 @@ Build the latest container:
|
|||
python3 ./install/common/build-image.py
|
||||
```
|
||||
|
||||
Download the OCR language data:
|
||||
|
||||
```sh
|
||||
python3 ./install/common/download-tessdata.py
|
||||
```
|
||||
|
||||
Run from source tree:
|
||||
|
||||
```sh
|
||||
|
@ -318,6 +331,7 @@ Install Poetry using `pipx`:
|
|||
|
||||
```sh
|
||||
pipx install poetry
|
||||
pipx inject poetry
|
||||
```
|
||||
|
||||
Clone this repository:
|
||||
|
@ -341,6 +355,12 @@ Build the latest container:
|
|||
python3 ./install/common/build-image.py
|
||||
```
|
||||
|
||||
Download the OCR language data:
|
||||
|
||||
```sh
|
||||
python3 ./install/common/download-tessdata.py
|
||||
```
|
||||
|
||||
Run from source tree:
|
||||
|
||||
```sh
|
||||
|
@ -399,6 +419,12 @@ Build the dangerzone container image:
|
|||
python3 .\install\common\build-image.py
|
||||
```
|
||||
|
||||
Download the OCR language data:
|
||||
|
||||
```sh
|
||||
python3 .\install\common\download-tessdata.py
|
||||
```
|
||||
|
||||
After that you can launch dangerzone during development with:
|
||||
|
||||
```
|
||||
|
@ -503,7 +529,7 @@ class Reference:
|
|||
# Convert spaces to dashes
|
||||
anchor = anchor.replace(" ", "-")
|
||||
# Remove non-alphanumeric (except dash and underscore)
|
||||
anchor = re.sub("[^a-zA-Z\-_]", "", anchor)
|
||||
anchor = re.sub("[^a-zA-Z-_]", "", anchor)
|
||||
|
||||
return anchor
|
||||
|
||||
|
@ -522,8 +548,8 @@ class QABase(abc.ABC):
|
|||
|
||||
platforms = {}
|
||||
|
||||
REF_QA = Reference("RELEASE.md", content=CONTENT_QA)
|
||||
REF_QA_SCENARIOS = Reference("RELEASE.md", content=CONTENT_QA_SCENARIOS)
|
||||
REF_QA = Reference("QA.md", content=CONTENT_QA)
|
||||
REF_QA_SCENARIOS = Reference("QA.md", content=CONTENT_QA_SCENARIOS)
|
||||
|
||||
# The following class method is available since Python 3.6. For more details, see:
|
||||
# https://docs.python.org/3.6/whatsnew/3.6.html#pep-487-simpler-customization-of-class-creation
|
||||
|
@ -732,6 +758,10 @@ class QABase(abc.ABC):
|
|||
self.prompt("Does it pass?", choices=["y", "n"])
|
||||
logger.info("Successfully completed QA scenarios")
|
||||
|
||||
@task("Download Tesseract data", auto=True)
|
||||
def download_tessdata(self):
|
||||
self.run("python", str(Path("install", "common", "download-tessdata.py")))
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
def get_id(cls):
|
||||
|
@ -758,6 +788,40 @@ class QAWindows(QABase):
|
|||
while msvcrt.kbhit():
|
||||
msvcrt.getch()
|
||||
|
||||
def get_latest_python_release(self):
|
||||
with urllib.request.urlopen(EOL_PYTHON_URL) as f:
|
||||
resp = f.read()
|
||||
releases = json.loads(resp)
|
||||
for release in releases:
|
||||
if release["cycle"] == PYTHON_VERSION:
|
||||
# Transform the Python version string (e.g., "3.12.7") into a list
|
||||
# (e.g., [3, 12, 7]), and return it
|
||||
return [int(num) for num in release["latest"].split(".")]
|
||||
|
||||
raise RuntimeError(
|
||||
f"Could not find a Python release for version {PYTHON_VERSION}"
|
||||
)
|
||||
|
||||
@QABase.task(
|
||||
f"Install the latest version of Python {PYTHON_VERSION}", ref=REF_BUILD
|
||||
)
|
||||
def install_python(self):
|
||||
logger.info("Getting latest Python release")
|
||||
try:
|
||||
latest_version = self.get_latest_python_release()
|
||||
except Exception:
|
||||
logger.error("Could not verify that the latest Python version is installed")
|
||||
|
||||
cur_version = list(sys.version_info[:3])
|
||||
if latest_version > cur_version:
|
||||
self.prompt(
|
||||
f"You need to install the latest Python version ({latest_version})"
|
||||
)
|
||||
elif latest_version == cur_version:
|
||||
logger.info(
|
||||
f"Verified that the latest Python version ({latest_version}) is installed"
|
||||
)
|
||||
|
||||
@QABase.task("Install and Run Docker Desktop", ref=REF_BUILD)
|
||||
def install_docker(self):
|
||||
logger.info("Checking if Docker Desktop is installed and running")
|
||||
|
@ -772,7 +836,7 @@ class QAWindows(QABase):
|
|||
)
|
||||
def install_poetry(self):
|
||||
self.run("python", "-m", "pip", "install", "poetry")
|
||||
self.run("poetry", "install")
|
||||
self.run("poetry", "sync")
|
||||
|
||||
@QABase.task("Build Dangerzone container image", ref=REF_BUILD, auto=True)
|
||||
def build_image(self):
|
||||
|
@ -794,9 +858,11 @@ class QAWindows(QABase):
|
|||
return "windows"
|
||||
|
||||
def start(self):
|
||||
self.install_python()
|
||||
self.install_docker()
|
||||
self.install_poetry()
|
||||
self.build_image()
|
||||
self.download_tessdata()
|
||||
self.run_tests()
|
||||
self.build_dangerzone_exe()
|
||||
|
||||
|
@ -871,7 +937,6 @@ class QALinux(QABase):
|
|||
"--version",
|
||||
self.VERSION,
|
||||
"build",
|
||||
"--download-pyside6",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
|
@ -889,6 +954,7 @@ class QALinux(QABase):
|
|||
def start(self):
|
||||
self.build_dev_image()
|
||||
self.build_container_image()
|
||||
self.download_tessdata()
|
||||
self.run_tests()
|
||||
self.build_package()
|
||||
self.build_qa_image()
|
||||
|
@ -924,26 +990,26 @@ class QADebianTrixie(QADebianBased):
|
|||
VERSION = "trixie"
|
||||
|
||||
|
||||
class QAUbuntu2004(QADebianBased):
|
||||
DISTRO = "ubuntu"
|
||||
VERSION = "20.04"
|
||||
|
||||
|
||||
class QAUbuntu2204(QADebianBased):
|
||||
DISTRO = "ubuntu"
|
||||
VERSION = "22.04"
|
||||
|
||||
|
||||
class QAUbuntu2310(QADebianBased):
|
||||
DISTRO = "ubuntu"
|
||||
VERSION = "23.10"
|
||||
|
||||
|
||||
class QAUbuntu2404(QADebianBased):
|
||||
DISTRO = "ubuntu"
|
||||
VERSION = "24.04"
|
||||
|
||||
|
||||
class QAUbuntu2410(QADebianBased):
|
||||
DISTRO = "ubuntu"
|
||||
VERSION = "24.10"
|
||||
|
||||
|
||||
class QAUbuntu2504(QADebianBased):
|
||||
DISTRO = "ubuntu"
|
||||
VERSION = "25.04"
|
||||
|
||||
|
||||
class QAFedora(QALinux):
|
||||
"""Base class for Fedora distros.
|
||||
|
||||
|
@ -961,14 +1027,18 @@ class QAFedora(QALinux):
|
|||
)
|
||||
|
||||
|
||||
class QAFedora42(QAFedora):
|
||||
VERSION = "42"
|
||||
|
||||
|
||||
class QAFedora41(QAFedora):
|
||||
VERSION = "41"
|
||||
|
||||
|
||||
class QAFedora40(QAFedora):
|
||||
VERSION = "40"
|
||||
|
||||
|
||||
class QAFedora39(QAFedora):
|
||||
VERSION = "39"
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
prog=sys.argv[0],
|
||||
|
|
680
dev_scripts/repro-build.py
Executable file
680
dev_scripts/repro-build.py
Executable file
|
@ -0,0 +1,680 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import datetime
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import pprint
|
||||
import shlex
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tarfile
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MEDIA_TYPE_INDEX_V1_JSON = "application/vnd.oci.image.index.v1+json"
|
||||
MEDIA_TYPE_MANIFEST_V1_JSON = "application/vnd.oci.image.manifest.v1+json"
|
||||
|
||||
ENV_RUNTIME = "REPRO_RUNTIME"
|
||||
ENV_DATETIME = "REPRO_DATETIME"
|
||||
ENV_SDE = "REPRO_SOURCE_DATE_EPOCH"
|
||||
ENV_CACHE = "REPRO_CACHE"
|
||||
ENV_BUILDKIT = "REPRO_BUILDKIT_IMAGE"
|
||||
ENV_ROOTLESS = "REPRO_ROOTLESS"
|
||||
|
||||
DEFAULT_BUILDKIT_IMAGE = "moby/buildkit:v0.19.0@sha256:14aa1b4dd92ea0a4cd03a54d0c6079046ea98cd0c0ae6176bdd7036ba370cbbe"
|
||||
DEFAULT_BUILDKIT_IMAGE_ROOTLESS = "moby/buildkit:v0.19.0-rootless@sha256:e901cffdad753892a7c3afb8b9972549fca02c73888cf340c91ed801fdd96d71"
|
||||
|
||||
MSG_BUILD_CTX = """Build environment:
|
||||
- Container runtime: {runtime}
|
||||
- BuildKit image: {buildkit_image}
|
||||
- Rootless support: {rootless}
|
||||
- Caching enabled: {use_cache}
|
||||
- Build context: {context}
|
||||
- Dockerfile: {dockerfile}
|
||||
- Output: {output}
|
||||
|
||||
Build parameters:
|
||||
- SOURCE_DATE_EPOCH: {sde}
|
||||
- Build args: {build_args}
|
||||
- Tag: {tag}
|
||||
- Platform: {platform}
|
||||
|
||||
Podman-only arguments:
|
||||
- BuildKit arguments: {buildkit_args}
|
||||
|
||||
Docker-only arguments:
|
||||
- Docker Buildx arguments: {buildx_args}
|
||||
"""
|
||||
|
||||
|
||||
def pretty_error(obj: dict, msg: str):
|
||||
raise Exception(f"{msg}\n{pprint.pprint(obj)}")
|
||||
|
||||
|
||||
def get_key(obj: dict, key: str) -> object:
|
||||
if key not in obj:
|
||||
pretty_error(f"Could not find key '{key}' in the dictionary:", obj)
|
||||
return obj[key]
|
||||
|
||||
|
||||
def run(cmd, dry=False, check=True):
|
||||
action = "Would have run" if dry else "Running"
|
||||
logger.debug(f"{action}: {shlex.join(cmd)}")
|
||||
if not dry:
|
||||
subprocess.run(cmd, check=check)
|
||||
|
||||
|
||||
def snip_contents(contents: str, num: int) -> str:
|
||||
contents = contents.replace("\n", "")
|
||||
if len(contents) > num:
|
||||
return (
|
||||
contents[:num]
|
||||
+ f" [... {len(contents) - num} characters omitted."
|
||||
+ " Pass --show-contents to print them in their entirety]"
|
||||
)
|
||||
return contents
|
||||
|
||||
|
||||
def detect_container_runtime() -> str:
|
||||
"""Auto-detect the installed container runtime in the system."""
|
||||
if shutil.which("docker"):
|
||||
return "docker"
|
||||
elif shutil.which("podman"):
|
||||
return "podman"
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def parse_runtime(args) -> str:
|
||||
if args.runtime is not None:
|
||||
return args.runtime
|
||||
|
||||
runtime = os.environ.get(ENV_RUNTIME)
|
||||
if runtime is None:
|
||||
raise RuntimeError("No container runtime detected in your system")
|
||||
if runtime not in ("docker", "podman"):
|
||||
raise RuntimeError(
|
||||
"Only 'docker' or 'podman' container runtimes"
|
||||
" are currently supported by this script"
|
||||
)
|
||||
|
||||
|
||||
def parse_use_cache(args) -> bool:
|
||||
if args.no_cache:
|
||||
return False
|
||||
return bool(int(os.environ.get(ENV_CACHE, "1")))
|
||||
|
||||
|
||||
def parse_rootless(args, runtime: str) -> bool:
|
||||
rootless = args.rootless or bool(int(os.environ.get(ENV_ROOTLESS, "0")))
|
||||
if runtime != "podman" and rootless:
|
||||
raise RuntimeError("Rootless mode is only supported with Podman runtime")
|
||||
return rootless
|
||||
|
||||
|
||||
def parse_sde(args) -> str:
|
||||
sde = os.environ.get(ENV_SDE, args.source_date_epoch)
|
||||
dt = os.environ.get(ENV_DATETIME, args.datetime)
|
||||
|
||||
if (sde is not None and dt is not None) or (sde is None and dt is None):
|
||||
raise RuntimeError("You need to pass either a source date epoch or a datetime")
|
||||
|
||||
if sde is not None:
|
||||
return str(sde)
|
||||
|
||||
if dt is not None:
|
||||
d = datetime.datetime.fromisoformat(dt)
|
||||
# If the datetime is naive, assume its timezone is UTC. The check is
|
||||
# taken from:
|
||||
# https://docs.python.org/3/library/datetime.html#determining-if-an-object-is-aware-or-naive
|
||||
if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:
|
||||
d = d.replace(tzinfo=datetime.timezone.utc)
|
||||
return int(d.timestamp())
|
||||
|
||||
|
||||
def parse_buildkit_image(args, rootless: bool, runtime: str) -> str:
|
||||
default = DEFAULT_BUILDKIT_IMAGE_ROOTLESS if rootless else DEFAULT_BUILDKIT_IMAGE
|
||||
img = args.buildkit_image or os.environ.get(ENV_BUILDKIT, default)
|
||||
|
||||
if runtime == "podman" and not img.startswith("docker.io/"):
|
||||
img = "docker.io/" + img
|
||||
|
||||
return img
|
||||
|
||||
|
||||
def parse_build_args(args) -> str:
|
||||
return args.build_arg or []
|
||||
|
||||
|
||||
def parse_buildkit_args(args, runtime: str) -> str:
|
||||
if not args.buildkit_args:
|
||||
return []
|
||||
|
||||
if runtime != "podman":
|
||||
raise RuntimeError("Cannot specify BuildKit arguments using the Podman runtime")
|
||||
|
||||
return shlex.split(args.buildkit_args)
|
||||
|
||||
|
||||
def parse_buildx_args(args, runtime: str) -> str:
|
||||
if not args.buildx_args:
|
||||
return []
|
||||
|
||||
if runtime != "docker":
|
||||
raise RuntimeError(
|
||||
"Cannot specify Docker Buildx arguments using the Podman runtime"
|
||||
)
|
||||
|
||||
return shlex.split(args.buildx_args)
|
||||
|
||||
|
||||
def parse_image_digest(args) -> str | None:
|
||||
if not args.expected_image_digest:
|
||||
return None
|
||||
parsed = args.expected_image_digest.split(":", 1)
|
||||
if len(parsed) == 1:
|
||||
return parsed[0]
|
||||
else:
|
||||
return parsed[1]
|
||||
|
||||
|
||||
def parse_path(path: str | None) -> str | None:
|
||||
return path and str(Path(path).absolute())
|
||||
|
||||
|
||||
##########################
|
||||
# OCI parsing logic
|
||||
#
|
||||
# Compatible with:
|
||||
# * https://github.com/opencontainers/image-spec/blob/main/image-layout.md
|
||||
|
||||
|
||||
def oci_print_info(parsed: dict, full: bool) -> None:
|
||||
print(f"The OCI tarball contains an index and {len(parsed) - 1} manifest(s):")
|
||||
print()
|
||||
print(f"Image digest: {parsed[1]['digest']}")
|
||||
for i, info in enumerate(parsed):
|
||||
print()
|
||||
if i == 0:
|
||||
print(f"Index ({info['path']}):")
|
||||
else:
|
||||
print(f"Manifest {i} ({info['path']}):")
|
||||
print(f" Digest: {info['digest']}")
|
||||
print(f" Media type: {info['media_type']}")
|
||||
print(f" Platform: {info['platform'] or '-'}")
|
||||
contents = info["contents"] if full else snip_contents(info["contents"], 600)
|
||||
print(f" Contents: {contents}")
|
||||
print()
|
||||
|
||||
|
||||
def oci_normalize_path(path):
|
||||
if path.startswith("sha256:"):
|
||||
hash_algo, checksum = path.split(":")
|
||||
path = f"blobs/{hash_algo}/{checksum}"
|
||||
return path
|
||||
|
||||
|
||||
def oci_get_file_from_tarball(tar: tarfile.TarFile, path: str) -> dict:
|
||||
"""Get file from an OCI tarball.
|
||||
|
||||
If the filename cannot be found, search again by prefixing it with "./", since we
|
||||
have encountered path names in OCI tarballs prefixed with "./".
|
||||
"""
|
||||
try:
|
||||
return tar.extractfile(path).read().decode()
|
||||
except KeyError:
|
||||
if not path.startswith("./") and not path.startswith("/"):
|
||||
path = "./" + path
|
||||
try:
|
||||
return tar.extractfile(path).read().decode()
|
||||
except KeyError:
|
||||
# Do not raise here, so that we can raise the original exception below.
|
||||
pass
|
||||
raise
|
||||
|
||||
|
||||
def oci_parse_manifest(tar: tarfile.TarFile, path: str, platform: dict | None) -> dict:
|
||||
"""Parse manifest information in JSON format.
|
||||
|
||||
Interestingly, the platform info for a manifest is not included in the
|
||||
manifest itself, but in the descriptor that points to it. So, we have to
|
||||
carry it from the previous manifest and include in the info here.
|
||||
"""
|
||||
path = oci_normalize_path(path)
|
||||
contents = oci_get_file_from_tarball(tar, path)
|
||||
digest = "sha256:" + hashlib.sha256(contents.encode()).hexdigest()
|
||||
contents_dict = json.loads(contents)
|
||||
media_type = get_key(contents_dict, "mediaType")
|
||||
manifests = contents_dict.get("manifests", [])
|
||||
|
||||
if platform:
|
||||
os = get_key(platform, "os")
|
||||
arch = get_key(platform, "architecture")
|
||||
platform = f"{os}/{arch}"
|
||||
|
||||
return {
|
||||
"path": path,
|
||||
"contents": contents,
|
||||
"digest": digest,
|
||||
"media_type": media_type,
|
||||
"platform": platform,
|
||||
"manifests": manifests,
|
||||
}
|
||||
|
||||
|
||||
def oci_parse_manifests_dfs(
|
||||
tar: tarfile.TarFile, path: str, parsed: list, platform: dict | None = None
|
||||
) -> None:
|
||||
info = oci_parse_manifest(tar, path, platform)
|
||||
parsed.append(info)
|
||||
for m in info["manifests"]:
|
||||
oci_parse_manifests_dfs(tar, m["digest"], parsed, m.get("platform"))
|
||||
|
||||
|
||||
def oci_parse_tarball(path: Path) -> dict:
|
||||
parsed = []
|
||||
with tarfile.TarFile.open(path) as tar:
|
||||
oci_parse_manifests_dfs(tar, "index.json", parsed)
|
||||
return parsed
|
||||
|
||||
|
||||
##########################
|
||||
# Image building logic
|
||||
|
||||
|
||||
def podman_build(
|
||||
context: str,
|
||||
dockerfile: str | None,
|
||||
tag: str | None,
|
||||
buildkit_image: str,
|
||||
sde: int,
|
||||
rootless: bool,
|
||||
use_cache: bool,
|
||||
output: Path,
|
||||
build_args: list,
|
||||
platform: str,
|
||||
buildkit_args: list,
|
||||
dry: bool,
|
||||
):
|
||||
rootless_args = []
|
||||
rootful_args = []
|
||||
if rootless:
|
||||
rootless_args = [
|
||||
"--userns",
|
||||
"keep-id:uid=1000,gid=1000",
|
||||
"--security-opt",
|
||||
"seccomp=unconfined",
|
||||
"--security-opt",
|
||||
"apparmor=unconfined",
|
||||
"-e",
|
||||
"BUILDKITD_FLAGS=--oci-worker-no-process-sandbox",
|
||||
]
|
||||
else:
|
||||
rootful_args = ["--privileged"]
|
||||
|
||||
dockerfile_args_podman = []
|
||||
dockerfile_args_buildkit = []
|
||||
if dockerfile:
|
||||
dockerfile_args_podman = ["-v", f"{dockerfile}:/tmp/Dockerfile"]
|
||||
dockerfile_args_buildkit = ["--local", "dockerfile=/tmp"]
|
||||
else:
|
||||
dockerfile_args_buildkit = ["--local", "dockerfile=/tmp/work"]
|
||||
|
||||
tag_args = f",name={tag}" if tag else ""
|
||||
|
||||
cache_args = []
|
||||
if use_cache:
|
||||
cache_args = [
|
||||
"--export-cache",
|
||||
"type=local,mode=max,dest=/tmp/cache",
|
||||
"--import-cache",
|
||||
"type=local,src=/tmp/cache",
|
||||
]
|
||||
|
||||
_build_args = []
|
||||
for arg in build_args:
|
||||
_build_args.append("--opt")
|
||||
_build_args.append(f"build-arg:{arg}")
|
||||
platform_args = ["--opt", f"platform={platform}"] if platform else []
|
||||
|
||||
cmd = [
|
||||
"podman",
|
||||
"run",
|
||||
"-it",
|
||||
"--rm",
|
||||
"-v",
|
||||
"buildkit_cache:/tmp/cache",
|
||||
"-v",
|
||||
f"{output.parent}:/tmp/image",
|
||||
"-v",
|
||||
f"{context}:/tmp/work",
|
||||
"--entrypoint",
|
||||
"buildctl-daemonless.sh",
|
||||
*rootless_args,
|
||||
*rootful_args,
|
||||
*dockerfile_args_podman,
|
||||
buildkit_image,
|
||||
"build",
|
||||
"--frontend",
|
||||
"dockerfile.v0",
|
||||
"--local",
|
||||
"context=/tmp/work",
|
||||
"--opt",
|
||||
f"build-arg:SOURCE_DATE_EPOCH={sde}",
|
||||
*_build_args,
|
||||
"--output",
|
||||
f"type=docker,dest=/tmp/image/{output.name},rewrite-timestamp=true{tag_args}",
|
||||
*cache_args,
|
||||
*dockerfile_args_buildkit,
|
||||
*platform_args,
|
||||
*buildkit_args,
|
||||
]
|
||||
|
||||
run(cmd, dry)
|
||||
|
||||
|
||||
def docker_build(
|
||||
context: str,
|
||||
dockerfile: str | None,
|
||||
tag: str | None,
|
||||
buildkit_image: str,
|
||||
sde: int,
|
||||
use_cache: bool,
|
||||
output: Path,
|
||||
build_args: list,
|
||||
platform: str,
|
||||
buildx_args: list,
|
||||
dry: bool,
|
||||
):
|
||||
builder_id = hashlib.sha256(buildkit_image.encode()).hexdigest()
|
||||
builder_name = f"repro-build-{builder_id}"
|
||||
tag_args = ["-t", tag] if tag else []
|
||||
cache_args = [] if use_cache else ["--no-cache", "--pull"]
|
||||
|
||||
cmd = [
|
||||
"docker",
|
||||
"buildx",
|
||||
"create",
|
||||
"--name",
|
||||
builder_name,
|
||||
"--driver-opt",
|
||||
f"image={buildkit_image}",
|
||||
]
|
||||
run(cmd, dry, check=False)
|
||||
|
||||
dockerfile_args = ["-f", dockerfile] if dockerfile else []
|
||||
_build_args = []
|
||||
for arg in build_args:
|
||||
_build_args.append("--build-arg")
|
||||
_build_args.append(arg)
|
||||
platform_args = ["--platform", platform] if platform else []
|
||||
|
||||
cmd = [
|
||||
"docker",
|
||||
"buildx",
|
||||
"--builder",
|
||||
builder_name,
|
||||
"build",
|
||||
"--build-arg",
|
||||
f"SOURCE_DATE_EPOCH={sde}",
|
||||
*_build_args,
|
||||
"--provenance",
|
||||
"false",
|
||||
"--output",
|
||||
f"type=docker,dest={output},rewrite-timestamp=true",
|
||||
*cache_args,
|
||||
*tag_args,
|
||||
*dockerfile_args,
|
||||
*platform_args,
|
||||
*buildx_args,
|
||||
context,
|
||||
]
|
||||
run(cmd, dry)
|
||||
|
||||
|
||||
##########################
|
||||
# Command logic
|
||||
|
||||
|
||||
def build(args):
|
||||
runtime = parse_runtime(args)
|
||||
use_cache = parse_use_cache(args)
|
||||
sde = parse_sde(args)
|
||||
rootless = parse_rootless(args, runtime)
|
||||
buildkit_image = parse_buildkit_image(args, rootless, runtime)
|
||||
build_args = parse_build_args(args)
|
||||
platform = args.platform
|
||||
buildkit_args = parse_buildkit_args(args, runtime)
|
||||
buildx_args = parse_buildx_args(args, runtime)
|
||||
tag = args.tag
|
||||
dockerfile = parse_path(args.file)
|
||||
output = Path(parse_path(args.output))
|
||||
dry = args.dry
|
||||
context = parse_path(args.context)
|
||||
|
||||
logger.info(
|
||||
MSG_BUILD_CTX.format(
|
||||
runtime=runtime,
|
||||
buildkit_image=buildkit_image,
|
||||
sde=sde,
|
||||
rootless=rootless,
|
||||
use_cache=use_cache,
|
||||
context=context,
|
||||
dockerfile=dockerfile or "(not provided)",
|
||||
tag=tag or "(not provided)",
|
||||
output=output,
|
||||
build_args=",".join(build_args) or "(not provided)",
|
||||
platform=platform or "(default)",
|
||||
buildkit_args=" ".join(buildkit_args) or "(not provided)",
|
||||
buildx_args=" ".join(buildx_args) or "(not provided)",
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
if runtime == "docker":
|
||||
docker_build(
|
||||
context,
|
||||
dockerfile,
|
||||
tag,
|
||||
buildkit_image,
|
||||
sde,
|
||||
use_cache,
|
||||
output,
|
||||
build_args,
|
||||
platform,
|
||||
buildx_args,
|
||||
dry,
|
||||
)
|
||||
else:
|
||||
podman_build(
|
||||
context,
|
||||
dockerfile,
|
||||
tag,
|
||||
buildkit_image,
|
||||
sde,
|
||||
rootless,
|
||||
use_cache,
|
||||
output,
|
||||
build_args,
|
||||
platform,
|
||||
buildkit_args,
|
||||
dry,
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"Failed with {e.returncode}")
|
||||
sys.exit(e.returncode)
|
||||
|
||||
|
||||
def analyze(args) -> None:
|
||||
expected_image_digest = parse_image_digest(args)
|
||||
tarball_path = Path(args.tarball)
|
||||
|
||||
parsed = oci_parse_tarball(tarball_path)
|
||||
oci_print_info(parsed, args.show_contents)
|
||||
|
||||
if expected_image_digest:
|
||||
cur_digest = parsed[1]["digest"].split(":")[1]
|
||||
if cur_digest != expected_image_digest:
|
||||
raise Exception(
|
||||
f"The image does not have the expected digest: {cur_digest} != {expected_image_digest}"
|
||||
)
|
||||
print(f"✅ Image digest matches {expected_image_digest}")
|
||||
|
||||
|
||||
def define_build_cmd_args(parser: argparse.ArgumentParser) -> None:
|
||||
parser.add_argument(
|
||||
"--runtime",
|
||||
choices=["docker", "podman"],
|
||||
default=detect_container_runtime(),
|
||||
help="The container runtime for building the image (default: %(default)s)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--datetime",
|
||||
metavar="YYYY-MM-DD",
|
||||
default=None,
|
||||
help=(
|
||||
"Provide a date and (optionally) a time in ISO format, which will"
|
||||
" be used as the timestamp of the image layers"
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--buildkit-image",
|
||||
metavar="NAME:TAG@DIGEST",
|
||||
default=None,
|
||||
help=(
|
||||
"The BuildKit container image which will be used for building the"
|
||||
" reproducible container image. Make sure to pass the '-rootless'"
|
||||
" variant if you are using rootless Podman"
|
||||
" (default: docker.io/moby/buildkit:v0.19.0)"
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--source-date-epoch",
|
||||
"--sde",
|
||||
metavar="SECONDS",
|
||||
type=int,
|
||||
default=None,
|
||||
help="Provide a Unix timestamp for the image layers",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-cache",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Do not use existing cached images for the container build. Build from the start with a new set of cached layers.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--rootless",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Run BuildKit in rootless mode (Podman only)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-f",
|
||||
"--file",
|
||||
metavar="FILE",
|
||||
default=None,
|
||||
help="Pathname of a Dockerfile",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--output",
|
||||
metavar="FILE",
|
||||
default=Path.cwd() / "image.tar",
|
||||
help="Path to save OCI tarball (default: %(default)s)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
"--tag",
|
||||
metavar="TAG",
|
||||
default=None,
|
||||
help="Tag the built image with the name %(metavar)s",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--build-arg",
|
||||
metavar="ARG=VALUE",
|
||||
action="append",
|
||||
default=None,
|
||||
help="Set build-time variables",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--platform",
|
||||
metavar="PLAT1,PLAT2",
|
||||
default=None,
|
||||
help="Set platform for the image",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--buildkit-args",
|
||||
metavar="'ARG1 ARG2'",
|
||||
default=None,
|
||||
help="Extra arguments for BuildKit (Podman only)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--buildx-args",
|
||||
metavar="'ARG1 ARG2'",
|
||||
default=None,
|
||||
help="Extra arguments for Docker Buildx (Docker only)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dry",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Do not run any commands, just print what would happen",
|
||||
)
|
||||
parser.add_argument(
|
||||
"context",
|
||||
metavar="CONTEXT",
|
||||
help="Path to the build context",
|
||||
)
|
||||
|
||||
|
||||
def parse_args() -> dict:
|
||||
parser = argparse.ArgumentParser()
|
||||
subparsers = parser.add_subparsers(dest="command", help="Available commands")
|
||||
|
||||
build_parser = subparsers.add_parser("build", help="Perform a build operation")
|
||||
build_parser.set_defaults(func=build)
|
||||
define_build_cmd_args(build_parser)
|
||||
|
||||
analyze_parser = subparsers.add_parser("analyze", help="Analyze an OCI tarball")
|
||||
analyze_parser.set_defaults(func=analyze)
|
||||
analyze_parser.add_argument(
|
||||
"tarball",
|
||||
metavar="FILE",
|
||||
help="Path to OCI image in .tar format",
|
||||
)
|
||||
analyze_parser.add_argument(
|
||||
"--expected-image-digest",
|
||||
metavar="DIGEST",
|
||||
default=None,
|
||||
help="The expected digest for the provided image",
|
||||
)
|
||||
analyze_parser.add_argument(
|
||||
"--show-contents",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Show full file contents",
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG,
|
||||
format="%(asctime)s - %(levelname)s - %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
args = parse_args()
|
||||
|
||||
if not hasattr(args, "func"):
|
||||
args.func = build
|
||||
args.func(args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
115
dev_scripts/reproduce-image.py
Executable file
115
dev_scripts/reproduce-image.py
Executable file
|
@ -0,0 +1,115 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import logging
|
||||
import pathlib
|
||||
import platform
|
||||
import stat
|
||||
import subprocess
|
||||
import sys
|
||||
import urllib.request
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if platform.system() in ["Darwin", "Windows"]:
|
||||
CONTAINER_RUNTIME = "docker"
|
||||
elif platform.system() == "Linux":
|
||||
CONTAINER_RUNTIME = "podman"
|
||||
|
||||
|
||||
def run(*args):
|
||||
"""Simple function that runs a command and checks the result."""
|
||||
logger.debug(f"Running command: {' '.join(args)}")
|
||||
return subprocess.run(args, check=True)
|
||||
|
||||
|
||||
def build_image(
|
||||
platform=None,
|
||||
runtime=None,
|
||||
cache=True,
|
||||
date=None,
|
||||
):
|
||||
"""Build the Dangerzone container image with a special tag."""
|
||||
platform_args = [] if not platform else ["--platform", platform]
|
||||
runtime_args = [] if not runtime else ["--runtime", runtime]
|
||||
cache_args = [] if cache else ["--use-cache", "no"]
|
||||
date_args = [] if not date else ["--debian-archive-date", date]
|
||||
run(
|
||||
"python3",
|
||||
"./install/common/build-image.py",
|
||||
*platform_args,
|
||||
*runtime_args,
|
||||
*cache_args,
|
||||
*date_args,
|
||||
)
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
prog=sys.argv[0],
|
||||
description="Dev script for verifying container image reproducibility",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--platform",
|
||||
default=None,
|
||||
help=f"The platform for building the image (default: current platform)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--runtime",
|
||||
choices=["docker", "podman"],
|
||||
default=CONTAINER_RUNTIME,
|
||||
help=f"The container runtime for building the image (default: {CONTAINER_RUNTIME})",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-cache",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help=(
|
||||
"Do not use existing cached images for the container build."
|
||||
" Build from the start with a new set of cached layers."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--debian-archive-date",
|
||||
default=None,
|
||||
help="Use a specific Debian snapshot archive, by its date",
|
||||
)
|
||||
parser.add_argument(
|
||||
"digest",
|
||||
help="The digest of the image that you want to reproduce",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG,
|
||||
format="%(asctime)s - %(levelname)s - %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
args = parse_args()
|
||||
|
||||
logger.info(f"Building container image")
|
||||
build_image(
|
||||
args.platform,
|
||||
args.runtime,
|
||||
not args.no_cache,
|
||||
args.debian_archive_date,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Check that the reproduced image has the expected digest: {args.digest}"
|
||||
)
|
||||
run(
|
||||
"./dev_scripts/repro-build.py",
|
||||
"analyze",
|
||||
"--show-contents",
|
||||
"share/container.tar",
|
||||
"--expected-image-digest",
|
||||
args.digest,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
|
@ -11,7 +11,8 @@ log = logging.getLogger(__name__)
|
|||
|
||||
|
||||
DZ_ASSETS = [
|
||||
"container.tar.gz",
|
||||
"container-{version}-i686.tar",
|
||||
"container-{version}-arm64.tar",
|
||||
"Dangerzone-{version}.msi",
|
||||
"Dangerzone-{version}-arm64.dmg",
|
||||
"Dangerzone-{version}-i686.dmg",
|
||||
|
@ -94,11 +95,11 @@ def main():
|
|||
parser.add_argument(
|
||||
"--version",
|
||||
required=True,
|
||||
help=f"look for assets with this Dangerzone version",
|
||||
help="look for assets with this Dangerzone version",
|
||||
)
|
||||
parser.add_argument(
|
||||
"dir",
|
||||
help=f"look for assets in this directory",
|
||||
help="look for assets in this directory",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
setup_logging()
|
||||
|
|
|
@ -104,6 +104,8 @@ def main():
|
|||
|
||||
if args.token:
|
||||
log.debug(f"Reading token from {args.token}")
|
||||
# Ensure we are not uploading the token as an asset
|
||||
assert args.file != args.token
|
||||
with open(args.token) as f:
|
||||
token = f.read().strip()
|
||||
else:
|
||||
|
|
33
docs/advisories/2024-12-24.md
Normal file
33
docs/advisories/2024-12-24.md
Normal file
|
@ -0,0 +1,33 @@
|
|||
Security Advisory 2024-12-24
|
||||
|
||||
In Dangerzone, a security vulnerability was detected in the quarantined
|
||||
environment where documents are opened. Vulnerabilities like this are expected
|
||||
and do not compromise the security of Dangerzone. However, in combination with
|
||||
another more serious vulnerability (also called container escape), a malicious
|
||||
document may be able to breach the security of Dangerzone. We are not aware of
|
||||
any container escapes that affect Dangerzone. **To reduce that risk, you are
|
||||
strongly advised to update Dangerzone to the latest version**.
|
||||
|
||||
# Summary
|
||||
|
||||
A series of vulnerabilities in gst-plugins-base (CVE-2024-47538, CVE-2024-47607
|
||||
and CVE-2024-47615) affects the **contained** environment where the document
|
||||
rendering takes place.
|
||||
|
||||
If one attempts to convert a malicious file with an embedded Vorbis or Opus
|
||||
media elements, arbitrary code may run within that environment. Such files
|
||||
look like regular Office documents, which means that you cannot avoid a specific
|
||||
extension. Other programs that open Office documents, such as LibreOffice, are
|
||||
also affected, unless the system has been upgraded in the meantime.
|
||||
|
||||
# How does this impact me?
|
||||
|
||||
The expectation is that malicious code will run in a container without Internet
|
||||
access, meaning that it won't be able to infect the rest of the system.
|
||||
|
||||
If you are running Dangerzone via the Qubes OS, you are not impacted.
|
||||
|
||||
# What do I need to do?
|
||||
|
||||
You are **strongly** advised to update your Dangerzone installation to 0.8.1 as
|
||||
soon as possible.
|
54
docs/developer/doit.md
Normal file
54
docs/developer/doit.md
Normal file
|
@ -0,0 +1,54 @@
|
|||
# Using the Doit Automation Tool
|
||||
|
||||
Developers can use the [Doit](https://pydoit.org/) automation tool to create
|
||||
release artifacts. The purpose of the tool is to automate the manual release
|
||||
instructions in `RELEASE.md` file. Not everything is automated yet, since we're
|
||||
still experimenting with this tool. You can find our task definitions in this
|
||||
repo's `dodo.py` file.
|
||||
|
||||
## Why Doit?
|
||||
|
||||
We picked Doit out of the various tools out there for the following reasons:
|
||||
|
||||
* **Pythonic:** The configuration file and tasks can be written in Python. Where
|
||||
applicable, it's easy to issue shell commands as well.
|
||||
* **File targets:** Doit borrows the file target concept from Makefiles. Tasks
|
||||
can have file dependencies, and targets they build. This makes it easy to
|
||||
define a dependency graph for tasks.
|
||||
* **Hash-based caching:** Unlike Makefiles, doit does not look at the
|
||||
modification timestamp of source/target files, to figure out if it needs to
|
||||
run them. Instead, it hashes those files, and will run a task only if the
|
||||
hash of a file dependency has changed.
|
||||
* **Parallelization:** Tasks can be run in parallel with the `-n` argument,
|
||||
which is similar to `make`'s `-j` argument.
|
||||
|
||||
## How to Doit?
|
||||
|
||||
First, enter your Poetry shell. Then, make sure that your environment is clean,
|
||||
and you have ample disk space. You can run:
|
||||
|
||||
```bash
|
||||
doit clean --dry-run # if you want to see what would happen
|
||||
doit clean # you'll be asked to cofirm that you want to clean everything
|
||||
```
|
||||
|
||||
Finally, you can build all the release artifacts with `doit`, or a specific task
|
||||
with:
|
||||
|
||||
```
|
||||
doit <task>
|
||||
```
|
||||
|
||||
## Tips and tricks
|
||||
|
||||
* You can run `doit list --all -s` to see the full list of tasks, their
|
||||
dependencies, and whether they are up to date (U) or will run (R). Note that
|
||||
certain small tasks are always configured to run.
|
||||
* You can run `doit info <task>` to see which dependencies are missing.
|
||||
* You can pass the following environment variables to the script, in order to
|
||||
affect some global parameters:
|
||||
- `CONTAINER_RUNTIME`: The container runtime to use. Either `podman` (default)
|
||||
or `docker`.
|
||||
- `RELEASE_DIR`: Where to store the release artifacts. Default path is
|
||||
`~/release-assets/<version>`
|
||||
- `APPLE_ID`: The Apple ID to use when signing/notarizing the macOS DMG.
|
|
@ -1,5 +1,11 @@
|
|||
# gVisor integration
|
||||
|
||||
> [!NOTE]
|
||||
> **Update on 2025-01-13:** There is no longer a copied container image under
|
||||
> `/home/dangerzone/dangerzone-image/rootfs`. We now reuse the same container
|
||||
> image both for the inner and outer container. See
|
||||
> [#1048](https://github.com/freedomofpress/dangerzone/issues/1048).
|
||||
|
||||
Dangerzone has relied on the container runtime available in each supported
|
||||
operating system (Docker Desktop on Windows / macOS, Podman on Linux) to isolate
|
||||
the host from the sanitization process. The problem with this type of isolation
|
||||
|
@ -59,9 +65,9 @@ Spawning the container now becomes a multi-stage process:
|
|||
The `Container` isolation provider spawns the container as before, with the
|
||||
following changes:
|
||||
|
||||
* It adds two Linux capabilities to the **outer** container that didn't exist
|
||||
before: `SETFCAP` and `SYS_CHROOT`. Those capabilities are necessary to run
|
||||
`runsc` rootless, and are not inherited by the **inner** container.
|
||||
* It adds the `SYS_CHROOT` Linux capability, which was previously dropped, to
|
||||
the **outer** container. This capability is necessary to run `runsc`
|
||||
rootless, and is not inherited by the **inner** container.
|
||||
* It removes the `--userns keep-id` argument, which mapped the user outside the
|
||||
container to the same UID (normally `1000`) within the container. This was
|
||||
originally required when we were mounting host directories within the
|
||||
|
@ -70,8 +76,13 @@ following changes:
|
|||
- In distributions that offer Podman version 4 or greater, we use the
|
||||
`--userns nomap` flag. This flag greatly minimizes the attack surface,
|
||||
since the host user is not mapped within the container at all.
|
||||
* In distributions that offer Podman 3.x, we add a seccomp filter that adds the
|
||||
`ptrace` syscall, which is required for running gVisor.
|
||||
* We use our custom seccomp policy across container engines, since some do not
|
||||
allow the `ptrace` syscall (see
|
||||
[#846](https://github.com/freedomofpress/dangerzone/issues/846)).
|
||||
* It labels the **outer** container with the `container_engine_t` SELinux label.
|
||||
This label is reserved for running a container engine within a container, and
|
||||
is necessary in environments where SELinux is enabled in enforcing mode (see
|
||||
[#880](https://github.com/freedomofpress/dangerzone/issues/880)).
|
||||
|
||||
Then, the following happens when Podman/Docker spawns the container:
|
||||
|
||||
|
|
67
docs/developer/reproducibility.md
Normal file
67
docs/developer/reproducibility.md
Normal file
|
@ -0,0 +1,67 @@
|
|||
# Reproducible builds
|
||||
|
||||
We want to improve the transparency and auditability of our build artifacts, and
|
||||
a way to achieve this is via reproducible builds. For a broader understanding of
|
||||
what reproducible builds entail, check out https://reproducible-builds.org/.
|
||||
|
||||
Our build artifacts consist of:
|
||||
* Container images (`amd64` and `arm64` architectures)
|
||||
* macOS installers (for Intel and Apple Silicon CPUs)
|
||||
* Windows installer
|
||||
* Fedora packages (for regular Fedora distros and Qubes)
|
||||
* Debian packages (for Debian and Ubuntu)
|
||||
|
||||
As of writing this, only the following artifacts are reproducible:
|
||||
* Container images (see [#1047](https://github.com/freedomofpress/dangerzone/issues/1047))
|
||||
|
||||
In the following sections, we'll mention some specifics about enforcing
|
||||
reproducibility for each artifact type.
|
||||
|
||||
## Container image
|
||||
|
||||
### Updating the image
|
||||
|
||||
The fact that our image is reproducible also means that it's frozen in time.
|
||||
This means that rebuilding the image without updating our Dockerfile will
|
||||
**not** receive security updates.
|
||||
|
||||
Here are the necessary variables that make up our image in the `Dockerfile.env`
|
||||
file:
|
||||
* `DEBIAN_IMAGE_DIGEST`: The index digest for the Debian container image
|
||||
* `DEBIAN_ARCHIVE_DATE`: The Debian snapshot repo that we want to use
|
||||
* `GVISOR_ARCHIVE_DATE`: The gVisor APT repo that we want to use
|
||||
* `H2ORESTART_CHECKSUM`: The SHA-256 checksum of the H2ORestart plugin
|
||||
* `H2ORESTART_VERSION`: The version of the H2ORestart plugin
|
||||
|
||||
If you update these values in `Dockerfile.env`, you must also create a new
|
||||
Dockerfile with:
|
||||
|
||||
```
|
||||
make Dockerfile
|
||||
```
|
||||
|
||||
Updating `Dockerfile` without bumping `Dockerfile.in` is detected and should
|
||||
trigger a CI error.
|
||||
|
||||
### Reproducing the image
|
||||
|
||||
For a simple way to reproduce a Dangerzone container image, you can checkout the
|
||||
commit this image was built from (you can find it from the image tag in its
|
||||
`g<commit>` portion), retrieve the date it was built (also included in the image
|
||||
tag), and run the following command in any environment:
|
||||
|
||||
```
|
||||
./dev_scripts/reproduce-image.py \
|
||||
--debian-archive-date <date> \
|
||||
<digest>
|
||||
```
|
||||
|
||||
where:
|
||||
* `<date>` should be given in YYYYMMDD format, e.g, 20250226
|
||||
* `<digest>` is the SHA-256 hash of the image for the **current platform**, with
|
||||
or without the `sha256:` prefix.
|
||||
|
||||
This command will build a container image from the current Git commit and the
|
||||
provided date for the Debian archives. Then, it will compare the digest of the
|
||||
manifest against the provided one. This is a simple way to ensure that the
|
||||
created image is bit-for-bit reproducible.
|
53
docs/podman-desktop.md
Normal file
53
docs/podman-desktop.md
Normal file
|
@ -0,0 +1,53 @@
|
|||
# Podman Desktop support
|
||||
|
||||
Starting with Dangerzone 0.9.0, it is possible to use Podman Desktop on
|
||||
Windows and macOS. The support for this container runtime is currently only
|
||||
experimental. If you try it out and encounter issues, please reach to us, we'll
|
||||
be glad to help.
|
||||
|
||||
With [Podman Desktop](https://podman-desktop.io/) installed on your machine,
|
||||
here are the required steps to change the dangerzone container runtime.
|
||||
|
||||
You will be required to open a terminal and follow these steps:
|
||||
|
||||
## On macOS
|
||||
|
||||
You will need to configure podman to access the shared Dangerzone resources:
|
||||
|
||||
```bash
|
||||
podman machine stop
|
||||
podman machine rm
|
||||
cat > ~/.config/containers/containers.conf <<EOF
|
||||
[machine]
|
||||
volumes = ["/Users:/Users", "/private:/private", "/var/folders:/var/folders", "/Applications/Dangerzone.app:/Applications/Dangerzone.app"]
|
||||
EOF
|
||||
podman machine init
|
||||
podman machine set --rootful=false
|
||||
podman machine start
|
||||
```
|
||||
Then, set the container runtime to podman using this command:
|
||||
|
||||
```bash
|
||||
/Applications/Dangerzone.app/Contents/MacOS/dangerzone-cli --set-container-runtime podman
|
||||
```
|
||||
|
||||
In order to get back to the default behaviour (Docker Desktop on macOS), pass
|
||||
the `default` value instead:
|
||||
|
||||
```bash
|
||||
/Applications/Dangerzone.app/Contents/MacOS/dangerzone-cli --set-container-runtime default
|
||||
```
|
||||
|
||||
## On Windows
|
||||
|
||||
To set the container runtime to podman, use this command:
|
||||
|
||||
```bash
|
||||
'C:\Program Files\Dangerzone\dangerzone-cli.exe' --set-container-runtime podman
|
||||
```
|
||||
|
||||
To revert back to the default behavior, pass the `default` value:
|
||||
|
||||
```bash
|
||||
'C:\Program Files\Dangerzone\dangerzone-cli.exe' --set-container-runtime podman
|
||||
```
|
379
dodo.py
Normal file
379
dodo.py
Normal file
|
@ -0,0 +1,379 @@
|
|||
import json
|
||||
import os
|
||||
import platform
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
from doit.action import CmdAction
|
||||
|
||||
ARCH = "arm64" if platform.machine() == "arm64" else "i686"
|
||||
VERSION = open("share/version.txt").read().strip()
|
||||
FEDORA_VERSIONS = ["40", "41", "42"]
|
||||
|
||||
### Global parameters
|
||||
|
||||
CONTAINER_RUNTIME = os.environ.get("CONTAINER_RUNTIME", "podman")
|
||||
DEFAULT_RELEASE_DIR = Path.home() / "release-assets" / VERSION
|
||||
RELEASE_DIR = Path(os.environ.get("RELEASE_DIR", DEFAULT_RELEASE_DIR))
|
||||
APPLE_ID = os.environ.get("APPLE_ID", None)
|
||||
|
||||
### Task Parameters
|
||||
|
||||
PARAM_APPLE_ID = {
|
||||
"name": "apple_id",
|
||||
"long": "apple-id",
|
||||
"default": APPLE_ID,
|
||||
"help": "The Apple developer ID that will be used to sign the .dmg",
|
||||
}
|
||||
|
||||
### File dependencies
|
||||
#
|
||||
# Define all the file dependencies for our tasks in a single place, since some file
|
||||
# dependencies are shared between tasks.
|
||||
|
||||
|
||||
def list_files(path, recursive=False):
|
||||
"""List files in a directory, and optionally traverse into subdirectories."""
|
||||
glob_fn = Path(path).rglob if recursive else Path(path).glob
|
||||
return [f for f in glob_fn("*") if f.is_file() and not f.suffix == ".pyc"]
|
||||
|
||||
|
||||
def list_language_data():
|
||||
"""List the expected language data that Dangerzone downloads and stores locally."""
|
||||
tessdata_dir = Path("share") / "tessdata"
|
||||
langs = json.loads(open(tessdata_dir.parent / "ocr-languages.json").read()).values()
|
||||
targets = [tessdata_dir / f"{lang}.traineddata" for lang in langs]
|
||||
return targets
|
||||
|
||||
|
||||
TESSDATA_DEPS = ["install/common/download-tessdata.py", "share/ocr-languages.json"]
|
||||
TESSDATA_TARGETS = list_language_data()
|
||||
|
||||
IMAGE_DEPS = [
|
||||
"Dockerfile",
|
||||
*list_files("dangerzone/conversion"),
|
||||
*list_files("dangerzone/container_helpers"),
|
||||
"install/common/build-image.py",
|
||||
]
|
||||
IMAGE_TARGETS = ["share/container.tar", "share/image-id.txt"]
|
||||
|
||||
SOURCE_DEPS = [
|
||||
*list_files("assets"),
|
||||
*list_files("share"),
|
||||
*list_files("dangerzone", recursive=True),
|
||||
]
|
||||
|
||||
PYTHON_DEPS = ["poetry.lock", "pyproject.toml"]
|
||||
|
||||
DMG_DEPS = [
|
||||
*list_files("install/macos"),
|
||||
*TESSDATA_TARGETS,
|
||||
*IMAGE_TARGETS,
|
||||
*PYTHON_DEPS,
|
||||
*SOURCE_DEPS,
|
||||
]
|
||||
|
||||
LINUX_DEPS = [
|
||||
*list_files("install/linux"),
|
||||
*IMAGE_TARGETS,
|
||||
*PYTHON_DEPS,
|
||||
*SOURCE_DEPS,
|
||||
]
|
||||
|
||||
DEB_DEPS = [*LINUX_DEPS, *list_files("debian")]
|
||||
RPM_DEPS = [*LINUX_DEPS, *list_files("qubes")]
|
||||
|
||||
|
||||
def copy_dir(src, dst):
|
||||
"""Copy a directory to a destination dir, and overwrite it if it exists."""
|
||||
shutil.rmtree(dst, ignore_errors=True)
|
||||
shutil.copytree(src, dst)
|
||||
|
||||
|
||||
def create_release_dir():
|
||||
RELEASE_DIR.mkdir(parents=True, exist_ok=True)
|
||||
(RELEASE_DIR / "tmp").mkdir(exist_ok=True)
|
||||
|
||||
|
||||
def build_linux_pkg(distro, version, cwd, qubes=False):
|
||||
"""Generic command for building a .deb/.rpm in a Dangerzone dev environment."""
|
||||
pkg = "rpm" if distro == "fedora" else "deb"
|
||||
cmd = [
|
||||
"python3",
|
||||
"./dev_scripts/env.py",
|
||||
"--distro",
|
||||
distro,
|
||||
"--version",
|
||||
version,
|
||||
"run",
|
||||
"--no-gui",
|
||||
"--dev",
|
||||
f"./dangerzone/install/linux/build-{pkg}.py",
|
||||
]
|
||||
if qubes:
|
||||
cmd += ["--qubes"]
|
||||
return CmdAction(" ".join(cmd), cwd=cwd)
|
||||
|
||||
|
||||
def build_deb(cwd):
|
||||
"""Build a .deb package on Debian Bookworm."""
|
||||
return build_linux_pkg(distro="debian", version="bookworm", cwd=cwd)
|
||||
|
||||
|
||||
def build_rpm(version, cwd, qubes=False):
|
||||
"""Build an .rpm package on the requested Fedora distro."""
|
||||
return build_linux_pkg(distro="fedora", version=version, cwd=cwd, qubes=qubes)
|
||||
|
||||
|
||||
### Tasks
|
||||
|
||||
|
||||
def task_clean_container_runtime():
|
||||
"""Clean the storage space of the container runtime."""
|
||||
return {
|
||||
"actions": None,
|
||||
"clean": [
|
||||
[CONTAINER_RUNTIME, "system", "prune", "-a", "-f"],
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def task_check_container_runtime():
|
||||
"""Test that the container runtime is ready."""
|
||||
return {
|
||||
"actions": [
|
||||
["which", CONTAINER_RUNTIME],
|
||||
[CONTAINER_RUNTIME, "ps"],
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def task_macos_check_cert():
|
||||
"""Test that the Apple developer certificate can be used."""
|
||||
return {
|
||||
"actions": [
|
||||
"xcrun notarytool history --apple-id %(apple_id)s --keychain-profile dz-notarytool-release-key"
|
||||
],
|
||||
"params": [PARAM_APPLE_ID],
|
||||
}
|
||||
|
||||
|
||||
def task_macos_check_system():
|
||||
"""Run macOS specific system checks, as well as the generic ones."""
|
||||
return {
|
||||
"actions": None,
|
||||
"task_dep": ["check_container_runtime", "macos_check_cert"],
|
||||
}
|
||||
|
||||
|
||||
def task_init_release_dir():
|
||||
"""Create a directory for release artifacts."""
|
||||
return {
|
||||
"actions": [create_release_dir],
|
||||
"clean": [f"rm -rf {RELEASE_DIR}"],
|
||||
}
|
||||
|
||||
|
||||
def task_download_tessdata():
|
||||
"""Download the Tesseract data using ./install/common/download-tessdata.py"""
|
||||
return {
|
||||
"actions": ["python install/common/download-tessdata.py"],
|
||||
"file_dep": TESSDATA_DEPS,
|
||||
"targets": TESSDATA_TARGETS,
|
||||
"clean": True,
|
||||
}
|
||||
|
||||
|
||||
def task_build_image():
|
||||
"""Build the container image using ./install/common/build-image.py"""
|
||||
img_src = "share/container.tar"
|
||||
img_dst = RELEASE_DIR / f"container-{VERSION}-{ARCH}.tar" # FIXME: Add arch
|
||||
img_id_src = "share/image-id.txt"
|
||||
img_id_dst = RELEASE_DIR / "image-id.txt" # FIXME: Add arch
|
||||
|
||||
return {
|
||||
"actions": [
|
||||
f"python install/common/build-image.py --runtime={CONTAINER_RUNTIME}",
|
||||
["cp", img_src, img_dst],
|
||||
["cp", img_id_src, img_id_dst],
|
||||
],
|
||||
"file_dep": IMAGE_DEPS,
|
||||
"targets": [img_src, img_dst, img_id_src, img_id_dst],
|
||||
"task_dep": ["init_release_dir", "check_container_runtime"],
|
||||
"clean": True,
|
||||
}
|
||||
|
||||
|
||||
def task_poetry_install():
|
||||
"""Setup the Poetry environment"""
|
||||
return {"actions": ["poetry sync"], "clean": ["poetry env remove --all"]}
|
||||
|
||||
|
||||
def task_macos_build_dmg():
|
||||
"""Build the macOS .dmg file for Dangerzone."""
|
||||
dz_dir = RELEASE_DIR / "tmp" / "macos"
|
||||
dmg_src = dz_dir / "dist" / "Dangerzone.dmg"
|
||||
dmg_dst = RELEASE_DIR / f"Dangerzone-{VERSION}-{ARCH}.dmg" # FIXME: Add -arch
|
||||
|
||||
return {
|
||||
"actions": [
|
||||
(copy_dir, [".", dz_dir]),
|
||||
f"cd {dz_dir} && poetry run install/macos/build-app.py --with-codesign",
|
||||
(
|
||||
"xcrun notarytool submit --wait --apple-id %(apple_id)s"
|
||||
f" --keychain-profile dz-notarytool-release-key {dmg_src}"
|
||||
),
|
||||
f"xcrun stapler staple {dmg_src}",
|
||||
["cp", dmg_src, dmg_dst],
|
||||
["rm", "-rf", dz_dir],
|
||||
],
|
||||
"params": [PARAM_APPLE_ID],
|
||||
"file_dep": DMG_DEPS,
|
||||
"task_dep": [
|
||||
"macos_check_system",
|
||||
"init_release_dir",
|
||||
"poetry_install",
|
||||
"download_tessdata",
|
||||
],
|
||||
"targets": [dmg_src, dmg_dst],
|
||||
"clean": True,
|
||||
}
|
||||
|
||||
|
||||
def task_debian_env():
|
||||
"""Build a Debian Bookworm dev environment."""
|
||||
return {
|
||||
"actions": [
|
||||
[
|
||||
"python3",
|
||||
"./dev_scripts/env.py",
|
||||
"--distro",
|
||||
"debian",
|
||||
"--version",
|
||||
"bookworm",
|
||||
"build-dev",
|
||||
]
|
||||
],
|
||||
"task_dep": ["check_container_runtime"],
|
||||
}
|
||||
|
||||
|
||||
def task_debian_deb():
|
||||
"""Build a Debian package for Debian Bookworm."""
|
||||
dz_dir = RELEASE_DIR / "tmp" / "debian"
|
||||
deb_name = f"dangerzone_{VERSION}-1_amd64.deb"
|
||||
deb_src = dz_dir / "deb_dist" / deb_name
|
||||
deb_dst = RELEASE_DIR / deb_name
|
||||
|
||||
return {
|
||||
"actions": [
|
||||
(copy_dir, [".", dz_dir]),
|
||||
build_deb(cwd=dz_dir),
|
||||
["cp", deb_src, deb_dst],
|
||||
["rm", "-rf", dz_dir],
|
||||
],
|
||||
"file_dep": DEB_DEPS,
|
||||
"task_dep": ["init_release_dir", "debian_env"],
|
||||
"targets": [deb_dst],
|
||||
"clean": True,
|
||||
}
|
||||
|
||||
|
||||
def task_fedora_env():
|
||||
"""Build Fedora dev environments."""
|
||||
for version in FEDORA_VERSIONS:
|
||||
yield {
|
||||
"name": version,
|
||||
"doc": f"Build Fedora {version} dev environments",
|
||||
"actions": [
|
||||
[
|
||||
"python3",
|
||||
"./dev_scripts/env.py",
|
||||
"--distro",
|
||||
"fedora",
|
||||
"--version",
|
||||
version,
|
||||
"build-dev",
|
||||
],
|
||||
],
|
||||
"task_dep": ["check_container_runtime"],
|
||||
}
|
||||
|
||||
|
||||
def task_fedora_rpm():
|
||||
"""Build Fedora packages for every supported version."""
|
||||
for version in FEDORA_VERSIONS:
|
||||
for qubes in (True, False):
|
||||
qubes_ident = "-qubes" if qubes else ""
|
||||
qubes_desc = " for Qubes" if qubes else ""
|
||||
dz_dir = RELEASE_DIR / "tmp" / f"f{version}{qubes_ident}"
|
||||
rpm_names = [
|
||||
f"dangerzone{qubes_ident}-{VERSION}-1.fc{version}.x86_64.rpm",
|
||||
f"dangerzone{qubes_ident}-{VERSION}-1.fc{version}.src.rpm",
|
||||
]
|
||||
rpm_src = [dz_dir / "dist" / rpm_name for rpm_name in rpm_names]
|
||||
rpm_dst = [RELEASE_DIR / rpm_name for rpm_name in rpm_names]
|
||||
|
||||
yield {
|
||||
"name": version + qubes_ident,
|
||||
"doc": f"Build a Fedora {version} package{qubes_desc}",
|
||||
"actions": [
|
||||
(copy_dir, [".", dz_dir]),
|
||||
build_rpm(version, cwd=dz_dir, qubes=qubes),
|
||||
["cp", *rpm_src, RELEASE_DIR],
|
||||
["rm", "-rf", dz_dir],
|
||||
],
|
||||
"file_dep": RPM_DEPS,
|
||||
"task_dep": ["init_release_dir", f"fedora_env:{version}"],
|
||||
"targets": rpm_dst,
|
||||
"clean": True,
|
||||
}
|
||||
|
||||
|
||||
def task_git_archive():
|
||||
"""Build a Git archive of the repo."""
|
||||
target = f"{RELEASE_DIR}/dangerzone-{VERSION}.tar.gz"
|
||||
return {
|
||||
"actions": [
|
||||
f"git archive --format=tar.gz -o {target} --prefix=dangerzone/ v{VERSION}"
|
||||
],
|
||||
"targets": [target],
|
||||
"task_dep": ["init_release_dir"],
|
||||
}
|
||||
|
||||
|
||||
#######################################################################################
|
||||
#
|
||||
# END OF TASKS
|
||||
#
|
||||
# The following task should be the LAST one in the dodo file, so that it runs first when
|
||||
# running `do clean`.
|
||||
|
||||
|
||||
def clean_prompt():
|
||||
ans = input(
|
||||
f"""
|
||||
You have not specified a target to clean.
|
||||
This means that doit will clean the following targets:
|
||||
|
||||
* ALL the containers, images, and build cache in {CONTAINER_RUNTIME.capitalize()}
|
||||
* ALL the built targets and directories
|
||||
|
||||
For a full list of the targets that doit will clean, run: doit clean --dry-run
|
||||
|
||||
Are you sure you want to clean everything (y/N): \
|
||||
"""
|
||||
)
|
||||
if ans.lower() in ["yes", "y"]:
|
||||
return
|
||||
else:
|
||||
print("Exiting...")
|
||||
exit(1)
|
||||
|
||||
|
||||
def task_clean_prompt():
|
||||
"""Make sure that the user really wants to run the clean tasks."""
|
||||
return {
|
||||
"actions": None,
|
||||
"clean": [clean_prompt],
|
||||
}
|
|
@ -1,20 +1,62 @@
|
|||
import argparse
|
||||
import gzip
|
||||
import os
|
||||
import platform
|
||||
import secrets
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
BUILD_CONTEXT = "dangerzone/"
|
||||
TAG = "dangerzone.rocks/dangerzone:latest"
|
||||
REQUIREMENTS_TXT = "container-pip-requirements.txt"
|
||||
BUILD_CONTEXT = "dangerzone"
|
||||
IMAGE_NAME = "dangerzone.rocks/dangerzone"
|
||||
if platform.system() in ["Darwin", "Windows"]:
|
||||
CONTAINER_RUNTIME = "docker"
|
||||
elif platform.system() == "Linux":
|
||||
CONTAINER_RUNTIME = "podman"
|
||||
|
||||
|
||||
def str2bool(v):
|
||||
if isinstance(v, bool):
|
||||
return v
|
||||
if v.lower() in ("yes", "true", "t", "y", "1"):
|
||||
return True
|
||||
elif v.lower() in ("no", "false", "f", "n", "0"):
|
||||
return False
|
||||
else:
|
||||
raise argparse.ArgumentTypeError("Boolean value expected.")
|
||||
|
||||
|
||||
def determine_git_tag():
|
||||
# Designate a unique tag for this image, depending on the Git commit it was created
|
||||
# from:
|
||||
# 1. If created from a Git tag (e.g., 0.8.0), the image tag will be `0.8.0`.
|
||||
# 2. If created from a commit, it will be something like `0.8.0-31-g6bdaa7a`.
|
||||
# 3. If the contents of the Git repo are dirty, we will append a unique identifier
|
||||
# for this run, something like `0.8.0-31-g6bdaa7a-fdcb` or `0.8.0-fdcb`.
|
||||
dirty_ident = secrets.token_hex(2)
|
||||
return (
|
||||
subprocess.check_output(
|
||||
[
|
||||
"git",
|
||||
"describe",
|
||||
"--long",
|
||||
"--first-parent",
|
||||
f"--dirty=-{dirty_ident}",
|
||||
],
|
||||
)
|
||||
.decode()
|
||||
.strip()[1:] # remove the "v" prefix of the tag.
|
||||
)
|
||||
|
||||
|
||||
def determine_debian_archive_date():
|
||||
"""Get the date of the Debian archive from Dockerfile.env."""
|
||||
for env in Path("Dockerfile.env").read_text().split("\n"):
|
||||
if env.startswith("DEBIAN_ARCHIVE_DATE"):
|
||||
return env.split("=")[1]
|
||||
raise Exception(
|
||||
"Could not find 'DEBIAN_ARCHIVE_DATE' build argument in Dockerfile.env"
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
|
@ -24,108 +66,85 @@ def main():
|
|||
help=f"The container runtime for building the image (default: {CONTAINER_RUNTIME})",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-save",
|
||||
action="store_true",
|
||||
help="Do not save the container image as a tarball in share/container.tar.gz",
|
||||
"--platform",
|
||||
default=None,
|
||||
help=f"The platform for building the image (default: current platform)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--compress-level",
|
||||
type=int,
|
||||
choices=range(0, 10),
|
||||
default=9,
|
||||
help="The Gzip compression level, from 0 (lowest) to 9 (highest, default)",
|
||||
"--output",
|
||||
"-o",
|
||||
default=str(Path("share") / "container.tar"),
|
||||
help="Path to store the container image",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--use-cache",
|
||||
type=str2bool,
|
||||
nargs="?",
|
||||
default=True,
|
||||
const=True,
|
||||
help="Use the builder's cache to speed up the builds",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--tag",
|
||||
default=None,
|
||||
help="Provide a custom tag for the image (for development only)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--debian-archive-date",
|
||||
"-d",
|
||||
default=determine_debian_archive_date(),
|
||||
help="Use a specific Debian snapshot archive, by its date (default %(default)s)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dry",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Do not run any commands, just print what would happen",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
print("Exporting container pip dependencies")
|
||||
with ContainerPipDependencies():
|
||||
print("Pulling base image")
|
||||
subprocess.run(
|
||||
[
|
||||
args.runtime,
|
||||
"pull",
|
||||
"alpine:latest",
|
||||
],
|
||||
check=True,
|
||||
)
|
||||
tag = args.tag or f"{args.debian_archive_date}-{determine_git_tag()}"
|
||||
image_name_tagged = f"{IMAGE_NAME}:{tag}"
|
||||
|
||||
print("Building container image")
|
||||
subprocess.run(
|
||||
[
|
||||
args.runtime,
|
||||
"build",
|
||||
BUILD_CONTEXT,
|
||||
"--build-arg",
|
||||
f"REQUIREMENTS_TXT={REQUIREMENTS_TXT}",
|
||||
"-f",
|
||||
"Dockerfile",
|
||||
"--tag",
|
||||
TAG,
|
||||
],
|
||||
check=True,
|
||||
)
|
||||
print(f"Will tag the container image as '{image_name_tagged}'")
|
||||
image_id_path = Path("share") / "image-id.txt"
|
||||
if not args.dry:
|
||||
with open(image_id_path, "w") as f:
|
||||
f.write(tag)
|
||||
|
||||
if not args.no_save:
|
||||
print("Saving container image")
|
||||
cmd = subprocess.Popen(
|
||||
[
|
||||
CONTAINER_RUNTIME,
|
||||
"save",
|
||||
TAG,
|
||||
],
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
# Build the container image, and tag it with the calculated tag
|
||||
print("Building container image")
|
||||
cache_args = [] if args.use_cache else ["--no-cache"]
|
||||
platform_args = [] if not args.platform else ["--platform", args.platform]
|
||||
rootless_args = [] if args.runtime == "docker" else ["--rootless"]
|
||||
rootless_args = []
|
||||
dry_args = [] if not args.dry else ["--dry"]
|
||||
|
||||
print("Compressing container image")
|
||||
chunk_size = 4 << 20
|
||||
with gzip.open(
|
||||
"share/container.tar.gz",
|
||||
"wb",
|
||||
compresslevel=args.compress_level,
|
||||
) as gzip_f:
|
||||
while True:
|
||||
chunk = cmd.stdout.read(chunk_size)
|
||||
if len(chunk) > 0:
|
||||
gzip_f.write(chunk)
|
||||
else:
|
||||
break
|
||||
cmd.wait(5)
|
||||
|
||||
print("Looking up the image id")
|
||||
image_id = subprocess.check_output(
|
||||
subprocess.run(
|
||||
[
|
||||
sys.executable,
|
||||
str(Path("dev_scripts") / "repro-build.py"),
|
||||
"build",
|
||||
"--runtime",
|
||||
args.runtime,
|
||||
"image",
|
||||
"list",
|
||||
"--format",
|
||||
"{{.ID}}",
|
||||
TAG,
|
||||
"--build-arg",
|
||||
f"DEBIAN_ARCHIVE_DATE={args.debian_archive_date}",
|
||||
"--datetime",
|
||||
args.debian_archive_date,
|
||||
*dry_args,
|
||||
*cache_args,
|
||||
*platform_args,
|
||||
*rootless_args,
|
||||
"--tag",
|
||||
image_name_tagged,
|
||||
"--output",
|
||||
args.output,
|
||||
"-f",
|
||||
"Dockerfile",
|
||||
BUILD_CONTEXT,
|
||||
],
|
||||
text=True,
|
||||
check=True,
|
||||
)
|
||||
with open("share/image-id.txt", "w") as f:
|
||||
f.write(image_id)
|
||||
|
||||
|
||||
class ContainerPipDependencies:
|
||||
"""Generates PIP dependencies within container"""
|
||||
|
||||
def __enter__(self):
|
||||
try:
|
||||
container_requirements_txt = subprocess.check_output(
|
||||
["poetry", "export", "--only", "container"], universal_newlines=True
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print("FAILURE", e.returncode, e.output)
|
||||
print(f"REQUIREMENTS: {container_requirements_txt}")
|
||||
# XXX Export container dependencies and exclude pymupdfb since it is not needed in container
|
||||
req_txt_pymupdfb_stripped = container_requirements_txt.split("pymupdfb")[0]
|
||||
with open(Path(BUILD_CONTEXT) / REQUIREMENTS_TXT, "w") as f:
|
||||
f.write(req_txt_pymupdfb_stripped)
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_tb):
|
||||
print("Leaving the context...")
|
||||
os.remove(Path(BUILD_CONTEXT) / REQUIREMENTS_TXT)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
94
install/common/download-tessdata.py
Normal file
94
install/common/download-tessdata.py
Normal file
|
@ -0,0 +1,94 @@
|
|||
import hashlib
|
||||
import io
|
||||
import json
|
||||
import logging
|
||||
import pathlib
|
||||
import subprocess
|
||||
import sys
|
||||
import tarfile
|
||||
import urllib.request
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TESSDATA_RELEASES_URL = (
|
||||
"https://api.github.com/repos/tesseract-ocr/tessdata_fast/releases/latest"
|
||||
)
|
||||
TESSDATA_ARCHIVE_URL = "https://github.com/tesseract-ocr/tessdata_fast/archive/{tessdata_version}/tessdata_fast-{tessdata_version}.tar.gz"
|
||||
TESSDATA_CHECKSUM = "d0e3bb6f3b4e75748680524a1d116f2bfb145618f8ceed55b279d15098a530f9"
|
||||
|
||||
|
||||
def git_root():
|
||||
"""Get the root directory of the Git repo."""
|
||||
# FIXME: Use a Git Python binding for this.
|
||||
# FIXME: Make this work if called outside the repo.
|
||||
cmd = ["git", "rev-parse", "--show-toplevel"]
|
||||
path = (
|
||||
subprocess.run(cmd, check=True, stdout=subprocess.PIPE)
|
||||
.stdout.decode()
|
||||
.strip("\n")
|
||||
)
|
||||
return pathlib.Path(path)
|
||||
|
||||
|
||||
def main():
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG,
|
||||
format="%(asctime)s - %(levelname)s - %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
|
||||
share_dir = git_root() / "share"
|
||||
tessdata_dir = share_dir / "tessdata"
|
||||
|
||||
# Get the list of OCR languages that Dangerzone supports.
|
||||
with open(share_dir / "ocr-languages.json") as f:
|
||||
langs_short = sorted(json.loads(f.read()).values())
|
||||
|
||||
# Check if these languages have already been downloaded.
|
||||
if tessdata_dir.exists():
|
||||
expected_files = {f"{lang}.traineddata" for lang in langs_short}
|
||||
files = {f.name for f in tessdata_dir.iterdir()}
|
||||
if files == expected_files:
|
||||
logger.info("Skipping tessdata download, language data already exists")
|
||||
return
|
||||
elif not files:
|
||||
logger.info("Tesseract dir is empty, proceeding to download language data")
|
||||
else:
|
||||
logger.info(f"Found {tessdata_dir} but contents do not match")
|
||||
return 1
|
||||
|
||||
# Get latest release of Tesseract data.
|
||||
logger.info("Getting latest tessdata release")
|
||||
with urllib.request.urlopen(TESSDATA_RELEASES_URL) as f:
|
||||
resp = f.read()
|
||||
releases = json.loads(resp)
|
||||
tag = releases["tag_name"]
|
||||
|
||||
# Get latest release of Tesseract data.
|
||||
logger.info(f"Downloading tessdata release {tag}")
|
||||
archive_url = TESSDATA_ARCHIVE_URL.format(tessdata_version=tag)
|
||||
with urllib.request.urlopen(archive_url) as f:
|
||||
archive = f.read()
|
||||
digest = hashlib.sha256(archive).hexdigest()
|
||||
if digest != TESSDATA_CHECKSUM:
|
||||
raise RuntimeError(f"Checksum mismatch {digest} != {TESSDATA_CHECKSUM}")
|
||||
|
||||
# Extract the languages models from the tessdata archive.
|
||||
logger.info(f"Extracting tessdata archive into {tessdata_dir}")
|
||||
with tarfile.open(fileobj=io.BytesIO(archive)) as t:
|
||||
for lang in langs_short:
|
||||
member = f"tessdata_fast-{tag}/{lang}.traineddata"
|
||||
logger.info(f"Extracting {member}")
|
||||
# NOTE: We want `filter="data"` because it ignores ownership info, as
|
||||
# recorded in the tarfile. This filter will become the default in Python
|
||||
# 3.14. See:
|
||||
#
|
||||
# https://docs.python.org/3/library/tarfile.html#tarfile-extraction-filter
|
||||
t.extract(member=member, path=share_dir, filter="data")
|
||||
|
||||
tessdata_dl_dir = share_dir / f"tessdata_fast-{tag}"
|
||||
tessdata_dl_dir.rename(tessdata_dir)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
|
@ -2,19 +2,17 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import argparse
|
||||
import inspect
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
root = os.path.dirname(
|
||||
os.path.dirname(
|
||||
os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
|
||||
)
|
||||
)
|
||||
# .absolute() is needed for python<=3.8, for which
|
||||
# __file__ returns an absolute path.
|
||||
root = Path(__file__).parent.parent.parent.absolute()
|
||||
|
||||
with open(os.path.join(root, "share", "version.txt")) as f:
|
||||
with open(root / "share" / "version.txt") as f:
|
||||
version = f.read().strip()
|
||||
|
||||
|
||||
|
@ -39,8 +37,8 @@ def main():
|
|||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
dist_path = os.path.join(root, "dist")
|
||||
deb_dist_path = os.path.join(root, "deb_dist")
|
||||
dist_path = root / "dist"
|
||||
deb_dist_path = root / "deb_dist"
|
||||
|
||||
print("* Deleting old dist and deb_dist")
|
||||
if os.path.exists(dist_path):
|
||||
|
@ -49,31 +47,27 @@ def main():
|
|||
shutil.rmtree(deb_dist_path)
|
||||
|
||||
print("* Building DEB package")
|
||||
# NOTE: This command first builds the Debian source package, and then creates the
|
||||
# final DEB package. We could simply call `bdist_deb`, which performs `sdist_dsc`
|
||||
# implicitly, but we wouldn't be able to pass the Debian version argument. Because
|
||||
# we do this in a single invocation though, there's no performance cost.
|
||||
if args.distro is None:
|
||||
deb_ver_args = ()
|
||||
deb_ver = "1"
|
||||
else:
|
||||
deb_ver_args = ("--debian-version", args.distro)
|
||||
deb_ver = args.distro
|
||||
|
||||
run(
|
||||
[
|
||||
"python3",
|
||||
"setup.py",
|
||||
"--command-packages=stdeb.command",
|
||||
"sdist_dsc",
|
||||
*deb_ver_args,
|
||||
"bdist_deb",
|
||||
"dpkg-buildpackage",
|
||||
]
|
||||
)
|
||||
|
||||
os.makedirs(deb_dist_path, exist_ok=True)
|
||||
print("")
|
||||
print("* To install run:")
|
||||
print(f"sudo dpkg -i deb_dist/dangerzone_{version}-{deb_ver}_all.deb")
|
||||
|
||||
# dpkg-buildpackage produces a .deb file in the parent folder
|
||||
# that needs to be copied to the `deb_dist` folder manually
|
||||
src = root.parent / f"dangerzone_{version}_amd64.deb"
|
||||
destination = root / "deb_dist" / f"dangerzone_{version}-{deb_ver}_amd64.deb"
|
||||
shutil.move(src, destination)
|
||||
print(f"sudo dpkg -i {destination}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import argparse
|
||||
import inspect
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
root = Path(__file__).parent.parent.parent
|
||||
|
@ -66,11 +64,16 @@ def build(build_dir, qubes=False):
|
|||
os.symlink(dist_path, srpm_dir)
|
||||
|
||||
print("* Creating a Python sdist")
|
||||
container_tar_gz = root / "share" / "container.tar.gz"
|
||||
container_tar_gz_bak = root / "container.tar.gz.bak"
|
||||
stash_container = qubes and container_tar_gz.exists()
|
||||
if stash_container:
|
||||
container_tar_gz.rename(container_tar_gz_bak)
|
||||
tessdata = root / "share" / "tessdata"
|
||||
tessdata_bak = root / "tessdata.bak"
|
||||
container_tar = root / "share" / "container.tar"
|
||||
container_tar_bak = root / "container.tar.bak"
|
||||
|
||||
if tessdata.exists():
|
||||
tessdata.rename(tessdata_bak)
|
||||
stash_container = qubes and container_tar.exists()
|
||||
if stash_container and container_tar.exists():
|
||||
container_tar.rename(container_tar_bak)
|
||||
try:
|
||||
subprocess.run(["poetry", "build", "-f", "sdist"], cwd=root, check=True)
|
||||
# Copy and unlink the Dangerzone sdist, instead of just renaming it. If the
|
||||
|
@ -79,8 +82,10 @@ def build(build_dir, qubes=False):
|
|||
shutil.copy2(sdist_path, build_dir / "SOURCES" / sdist_name)
|
||||
sdist_path.unlink()
|
||||
finally:
|
||||
if stash_container:
|
||||
container_tar_gz_bak.rename(container_tar_gz)
|
||||
if tessdata_bak.exists():
|
||||
tessdata_bak.rename(tessdata)
|
||||
if stash_container and container_tar_bak.exists():
|
||||
container_tar_bak.rename(container_tar)
|
||||
|
||||
print("* Building RPM package")
|
||||
cmd = [
|
||||
|
@ -98,7 +103,7 @@ def build(build_dir, qubes=False):
|
|||
if qubes:
|
||||
cmd += [
|
||||
"--define",
|
||||
f"_qubes 1",
|
||||
"_qubes 1",
|
||||
]
|
||||
subprocess.run(cmd, check=True)
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#
|
||||
# * Qubes packages include some extra files under /etc/qubes-rpc, whereas
|
||||
# regular RPM packages include the container image under
|
||||
# /usr/share/container.tar.gz
|
||||
# /usr/share/container.tar
|
||||
# * Qubes packages have some extra dependencies.
|
||||
# 3. It is best to consume this SPEC file using the `install/linux/build-rpm.py`
|
||||
# script, which handles the necessary scaffolding for building the package.
|
||||
|
@ -32,7 +32,7 @@ Name: dangerzone-qubes
|
|||
Name: dangerzone
|
||||
%endif
|
||||
|
||||
Version: 0.7.1
|
||||
Version: 0.9.0
|
||||
Release: 1%{?dist}
|
||||
Summary: Take potentially dangerous PDFs, office documents, or images and convert them to safe PDFs
|
||||
|
||||
|
@ -72,13 +72,12 @@ BuildRequires: python3-devel
|
|||
%if 0%{?_qubes}
|
||||
# Qubes-only requirements (server-side)
|
||||
Requires: python3-magic
|
||||
Requires: python3-PyMuPDF
|
||||
Requires: libreoffice
|
||||
# Qubes-only requirements (client-side)
|
||||
Requires: GraphicsMagick
|
||||
Requires: ghostscript
|
||||
Requires: poppler-utils
|
||||
Requires: tesseract
|
||||
%else
|
||||
# Container-only requirements
|
||||
Requires: podman
|
||||
%endif
|
||||
|
||||
# Explicitly require every tesseract model:
|
||||
# See: https://github.com/freedomofpress/dangerzone/issues/431
|
||||
Requires: tesseract-langpack-afr
|
||||
|
@ -204,10 +203,6 @@ Requires: tesseract-langpack-uzb_cyrl
|
|||
Requires: tesseract-langpack-vie
|
||||
Requires: tesseract-langpack-yid
|
||||
Requires: tesseract-langpack-yor
|
||||
%else
|
||||
# Container-only requirements
|
||||
Requires: podman
|
||||
%endif
|
||||
|
||||
%description
|
||||
Dangerzone is an open source desktop application that takes potentially
|
||||
|
|
59
install/linux/debian-vendor-pymupdf.py
Executable file
59
install/linux/debian-vendor-pymupdf.py
Executable file
|
@ -0,0 +1,59 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DZ_VENDOR_DIR = Path("./dangerzone/vendor")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--dest",
|
||||
default=DZ_VENDOR_DIR,
|
||||
help="The destination directory for the vendored packages (default: ./dangerzone/vendor)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG,
|
||||
format="%(asctime)s - %(levelname)s - %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
|
||||
logger.info("Getting PyMuPDF deps as requirements.txt")
|
||||
cmd = ["poetry", "export", "--only", "debian"]
|
||||
container_requirements_txt = subprocess.check_output(cmd)
|
||||
|
||||
logger.info(f"Vendoring PyMuPDF under '{args.dest}'")
|
||||
# We prefer to call the CLI version of `pip`, instead of importing it directly, as
|
||||
# instructed here:
|
||||
# https://pip.pypa.io/en/latest/user_guide/#using-pip-from-your-program
|
||||
cmd = [
|
||||
sys.executable,
|
||||
"-m",
|
||||
"pip",
|
||||
"install",
|
||||
"--no-cache-dir",
|
||||
"--no-compile",
|
||||
"--target",
|
||||
args.dest,
|
||||
"--requirement",
|
||||
"/proc/self/fd/0", # XXX: pip does not read requirements.txt from stdin
|
||||
]
|
||||
subprocess.run(cmd, check=True, input=container_requirements_txt)
|
||||
|
||||
if not os.listdir(args.dest):
|
||||
logger.error(f"Failed to vendor PyMuPDF under '{args.dest}'")
|
||||
|
||||
logger.info(f"Successfully vendored PyMuPDF under '{args.dest}'")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
|
@ -1,40 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Development script for installing Podman on Ubuntu Focal. Mainly to be used as
|
||||
# part of our CI pipelines, where we may install Podman on environments that
|
||||
# don't have sudo.
|
||||
|
||||
set -e
|
||||
|
||||
if [[ "$EUID" -ne 0 ]]; then
|
||||
SUDO=sudo
|
||||
else
|
||||
SUDO=
|
||||
fi
|
||||
|
||||
provide() {
|
||||
$SUDO apt-get update
|
||||
$SUDO apt-get install curl wget gnupg2 -y
|
||||
source /etc/os-release
|
||||
$SUDO sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /' \
|
||||
> /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list"
|
||||
wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/xUbuntu_${VERSION_ID}/Release.key -O- \
|
||||
| $SUDO apt-key add -
|
||||
$SUDO apt-get update -qq -y
|
||||
}
|
||||
|
||||
install() {
|
||||
$SUDO apt-get -qq --yes install podman
|
||||
podman --version
|
||||
}
|
||||
|
||||
if [[ "$1" == "--repo-only" ]]; then
|
||||
provide
|
||||
elif [[ "$1" == "" ]]; then
|
||||
provide
|
||||
install
|
||||
else
|
||||
echo "Unexpected argument: $1"
|
||||
echo "Usage: $0 [--repo-only]"
|
||||
exit 1
|
||||
fi
|
|
@ -10,11 +10,5 @@
|
|||
<true/>
|
||||
<key>com.apple.security.network.client</key>
|
||||
<true/>
|
||||
<key>com.apple.security.network.server</key>
|
||||
<true/>
|
||||
<key>com.apple.security.hypervisor</key>
|
||||
<true/>
|
||||
<key>com.apple.security.cs.allow-unsigned-executable-memory</key>
|
||||
<true/>
|
||||
</dict>
|
||||
</plist>
|
||||
</plist>
|
||||
|
|
|
@ -2,26 +2,38 @@ REM delete old dist and build files
|
|||
rmdir /s /q dist
|
||||
rmdir /s /q build
|
||||
|
||||
REM build the exe
|
||||
REM build the gui and cli exe
|
||||
python .\setup-windows.py build
|
||||
|
||||
REM code sign dangerzone.exe
|
||||
signtool.exe sign /v /d "Dangerzone" /a /n "Freedom of the Press Foundation" /fd sha1 /t http://time.certum.pl/ build\exe.win-amd64-3.12\dangerzone.exe
|
||||
signtool.exe sign /v /d "Dangerzone" /a /n "Freedom of the Press Foundation" /fd sha1 /t http://time.certum.pl/ build\exe.win-amd64-3.12\dangerzone-cli.exe
|
||||
signtool.exe sign /v /d "Dangerzone" /a /n "Freedom of the Press Foundation" /fd sha256 /t http://time.certum.pl/ build\exe.win-amd64-3.12\dangerzone.exe
|
||||
|
||||
REM build the wix file
|
||||
python install\windows\build-wxs.py > build\Dangerzone.wxs
|
||||
REM verify the signature of dangerzone.exe
|
||||
signtool.exe verify /pa build\exe.win-amd64-3.12\dangerzone.exe
|
||||
|
||||
REM code sign dangerzone-cli.exe
|
||||
signtool.exe sign /v /d "Dangerzone" /a /n "Freedom of the Press Foundation" /fd sha256 /t http://time.certum.pl/ build\exe.win-amd64-3.12\dangerzone-cli.exe
|
||||
|
||||
REM verify the signature of dangerzone-cli.exe
|
||||
signtool.exe verify /pa build\exe.win-amd64-3.12\dangerzone-cli.exe
|
||||
|
||||
REM build the wxs file
|
||||
python install\windows\build-wxs.py
|
||||
|
||||
REM build the msi package
|
||||
cd build
|
||||
candle.exe Dangerzone.wxs
|
||||
light.exe -ext WixUIExtension Dangerzone.wixobj
|
||||
wix build -arch x64 -ext WixToolset.UI.wixext .\Dangerzone.wxs -out Dangerzone.msi
|
||||
|
||||
REM code sign dangerzone.msi
|
||||
insignia.exe -im Dangerzone.msi
|
||||
signtool.exe sign /v /d "Dangerzone" /a /n "Freedom of the Press Foundation" /fd sha1 /t http://time.certum.pl/ Dangerzone.msi
|
||||
REM validate Dangerzone.msi
|
||||
wix msi validate Dangerzone.msi
|
||||
|
||||
REM moving Dangerzone.msi to dist
|
||||
REM code sign Dangerzone.msi
|
||||
signtool.exe sign /v /d "Dangerzone" /a /n "Freedom of the Press Foundation" /fd sha256 /t http://time.certum.pl/ Dangerzone.msi
|
||||
|
||||
REM verify the signature of Dangerzone.msi
|
||||
signtool.exe verify /pa Dangerzone.msi
|
||||
|
||||
REM move Dangerzone.msi to dist
|
||||
cd ..
|
||||
mkdir dist
|
||||
move build\Dangerzone.msi dist
|
||||
|
|
|
@ -4,114 +4,75 @@ import uuid
|
|||
import xml.etree.ElementTree as ET
|
||||
|
||||
|
||||
def build_data(dirname, dir_prefix, id_, name):
|
||||
def build_data(base_path, path_prefix, dir_id, dir_name):
|
||||
data = {
|
||||
"id": id_,
|
||||
"name": name,
|
||||
"directory_name": dir_name,
|
||||
"directory_id": dir_id,
|
||||
"files": [],
|
||||
"dirs": [],
|
||||
}
|
||||
|
||||
for basename in os.listdir(dirname):
|
||||
filename = os.path.join(dirname, basename)
|
||||
if os.path.isfile(filename):
|
||||
data["files"].append(os.path.join(dir_prefix, basename))
|
||||
elif os.path.isdir(filename):
|
||||
if id_ == "INSTALLDIR":
|
||||
id_prefix = "Folder"
|
||||
if dir_id == "INSTALLFOLDER":
|
||||
data["component_id"] = "ApplicationFiles"
|
||||
else:
|
||||
data["component_id"] = "Component" + dir_id
|
||||
data["component_guid"] = str(uuid.uuid4()).upper()
|
||||
|
||||
for entry in os.listdir(base_path):
|
||||
entry_path = os.path.join(base_path, entry)
|
||||
if os.path.isfile(entry_path):
|
||||
data["files"].append(os.path.join(path_prefix, entry))
|
||||
elif os.path.isdir(entry_path):
|
||||
if dir_id == "INSTALLFOLDER":
|
||||
next_dir_prefix = "Folder"
|
||||
else:
|
||||
id_prefix = id_
|
||||
next_dir_prefix = dir_id
|
||||
|
||||
# Skip lib/PySide6/examples folder due to ilegal file names
|
||||
if "\\build\\exe.win-amd64-3.12\\lib\\PySide6\\examples" in dirname:
|
||||
if "\\build\\exe.win-amd64-3.12\\lib\\PySide6\\examples" in base_path:
|
||||
continue
|
||||
|
||||
# Skip lib/PySide6/qml/QtQuick folder due to ilegal file names
|
||||
# XXX Since we're not using Qml it should be no problem
|
||||
if "\\build\\exe.win-amd64-3.12\\lib\\PySide6\\qml\\QtQuick" in dirname:
|
||||
if "\\build\\exe.win-amd64-3.12\\lib\\PySide6\\qml\\QtQuick" in base_path:
|
||||
continue
|
||||
|
||||
id_value = f"{id_prefix}{basename.capitalize().replace('-', '_')}"
|
||||
data["dirs"].append(
|
||||
build_data(
|
||||
os.path.join(dirname, basename),
|
||||
os.path.join(dir_prefix, basename),
|
||||
id_value,
|
||||
basename,
|
||||
)
|
||||
next_dir_id = next_dir_prefix + entry.capitalize().replace("-", "_")
|
||||
subdata = build_data(
|
||||
os.path.join(base_path, entry),
|
||||
os.path.join(path_prefix, entry),
|
||||
next_dir_id,
|
||||
entry,
|
||||
)
|
||||
|
||||
if len(data["files"]) > 0:
|
||||
if id_ == "INSTALLDIR":
|
||||
data["component_id"] = "ApplicationFiles"
|
||||
else:
|
||||
data["component_id"] = "FolderComponent" + id_[len("Folder") :]
|
||||
data["component_guid"] = str(uuid.uuid4())
|
||||
# Add the subdirectory only if it contains files or subdirectories
|
||||
if subdata["files"] or subdata["dirs"]:
|
||||
data["dirs"].append(subdata)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def build_dir_xml(root, data):
|
||||
def build_directory_xml(root, data):
|
||||
attrs = {}
|
||||
if "id" in data:
|
||||
attrs["Id"] = data["id"]
|
||||
if "name" in data:
|
||||
attrs["Name"] = data["name"]
|
||||
el = ET.SubElement(root, "Directory", attrs)
|
||||
attrs["Id"] = data["directory_id"]
|
||||
attrs["Name"] = data["directory_name"]
|
||||
directory_el = ET.SubElement(root, "Directory", attrs)
|
||||
for subdata in data["dirs"]:
|
||||
build_dir_xml(el, subdata)
|
||||
|
||||
# If this is the ProgramMenuFolder, add the menu component
|
||||
if "id" in data and data["id"] == "ProgramMenuFolder":
|
||||
component_el = ET.SubElement(
|
||||
el,
|
||||
"Component",
|
||||
Id="ApplicationShortcuts",
|
||||
Guid="539e7de8-a124-4c09-aa55-0dd516aad7bc",
|
||||
)
|
||||
ET.SubElement(
|
||||
component_el,
|
||||
"Shortcut",
|
||||
Id="ApplicationShortcut1",
|
||||
Name="Dangerzone",
|
||||
Description="Dangerzone",
|
||||
Target="[INSTALLDIR]dangerzone.exe",
|
||||
WorkingDirectory="INSTALLDIR",
|
||||
)
|
||||
ET.SubElement(
|
||||
component_el,
|
||||
"RegistryValue",
|
||||
Root="HKCU",
|
||||
Key="Software\Freedom of the Press Foundation\Dangerzone",
|
||||
Name="installed",
|
||||
Type="integer",
|
||||
Value="1",
|
||||
KeyPath="yes",
|
||||
)
|
||||
build_directory_xml(directory_el, subdata)
|
||||
|
||||
|
||||
def build_components_xml(root, data):
|
||||
component_ids = []
|
||||
if "component_id" in data:
|
||||
component_ids.append(data["component_id"])
|
||||
|
||||
component_el = ET.SubElement(
|
||||
root,
|
||||
"Component",
|
||||
Id=data["component_id"],
|
||||
Guid=data["component_guid"],
|
||||
Directory=data["directory_id"],
|
||||
)
|
||||
for filename in data["files"]:
|
||||
ET.SubElement(component_el, "File", Source=filename)
|
||||
for subdata in data["dirs"]:
|
||||
if "component_guid" in subdata:
|
||||
dir_ref_el = ET.SubElement(root, "DirectoryRef", Id=subdata["id"])
|
||||
component_el = ET.SubElement(
|
||||
dir_ref_el,
|
||||
"Component",
|
||||
Id=subdata["component_id"],
|
||||
Guid=subdata["component_guid"],
|
||||
)
|
||||
for filename in subdata["files"]:
|
||||
file_el = ET.SubElement(
|
||||
component_el, "File", Source=filename, Id="file_" + uuid.uuid4().hex
|
||||
)
|
||||
|
||||
component_ids += build_components_xml(root, subdata)
|
||||
|
||||
return component_ids
|
||||
build_components_xml(root, subdata)
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -125,114 +86,196 @@ def main():
|
|||
# -rc markers.
|
||||
version = f.read().strip().split("-")[0]
|
||||
|
||||
dist_dir = os.path.join(
|
||||
build_dir = os.path.join(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
|
||||
"build",
|
||||
"exe.win-amd64-3.12",
|
||||
)
|
||||
|
||||
cx_freeze_dir = "exe.win-amd64-3.12"
|
||||
|
||||
dist_dir = os.path.join(build_dir, cx_freeze_dir)
|
||||
|
||||
if not os.path.exists(dist_dir):
|
||||
print("You must build the dangerzone binary before running this")
|
||||
return
|
||||
|
||||
data = {
|
||||
"id": "TARGETDIR",
|
||||
"name": "SourceDir",
|
||||
"dirs": [
|
||||
{
|
||||
"id": "ProgramFilesFolder",
|
||||
"dirs": [],
|
||||
},
|
||||
{
|
||||
"id": "ProgramMenuFolder",
|
||||
"dirs": [],
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
data["dirs"][0]["dirs"].append(
|
||||
build_data(
|
||||
dist_dir,
|
||||
"exe.win-amd64-3.12",
|
||||
"INSTALLDIR",
|
||||
"Dangerzone",
|
||||
)
|
||||
# Prepare data for WiX file harvesting from the output of cx_Freeze
|
||||
data = build_data(
|
||||
dist_dir,
|
||||
cx_freeze_dir,
|
||||
"INSTALLFOLDER",
|
||||
"Dangerzone",
|
||||
)
|
||||
|
||||
root_el = ET.Element("Wix", xmlns="http://schemas.microsoft.com/wix/2006/wi")
|
||||
product_el = ET.SubElement(
|
||||
root_el,
|
||||
"Product",
|
||||
# Add the Wix root element
|
||||
wix_el = ET.Element(
|
||||
"Wix",
|
||||
{
|
||||
"xmlns": "http://wixtoolset.org/schemas/v4/wxs",
|
||||
"xmlns:ui": "http://wixtoolset.org/schemas/v4/wxs/ui",
|
||||
},
|
||||
)
|
||||
|
||||
# Add the Package element
|
||||
package_el = ET.SubElement(
|
||||
wix_el,
|
||||
"Package",
|
||||
Name="Dangerzone",
|
||||
Manufacturer="Freedom of the Press Foundation",
|
||||
Id="*",
|
||||
UpgradeCode="$(var.ProductUpgradeCode)",
|
||||
UpgradeCode="12B9695C-965B-4BE0-BC33-21274E809576",
|
||||
Language="1033",
|
||||
Codepage="1252",
|
||||
Version="$(var.ProductVersion)",
|
||||
)
|
||||
ET.SubElement(
|
||||
product_el,
|
||||
"Package",
|
||||
Id="*",
|
||||
Keywords="Installer",
|
||||
Description="Dangerzone $(var.ProductVersion) Installer",
|
||||
Manufacturer="Freedom of the Press Foundation",
|
||||
InstallerVersion="100",
|
||||
Languages="1033",
|
||||
Compressed="yes",
|
||||
SummaryCodepage="1252",
|
||||
Codepage="1252",
|
||||
Version=version,
|
||||
)
|
||||
ET.SubElement(product_el, "Media", Id="1", Cabinet="product.cab", EmbedCab="yes")
|
||||
ET.SubElement(
|
||||
product_el, "Icon", Id="ProductIcon", SourceFile="..\\share\\dangerzone.ico"
|
||||
package_el,
|
||||
"SummaryInformation",
|
||||
Keywords="Installer",
|
||||
Description="Dangerzone " + version + " Installer",
|
||||
Codepage="1252",
|
||||
)
|
||||
ET.SubElement(product_el, "Property", Id="ARPPRODUCTICON", Value="ProductIcon")
|
||||
ET.SubElement(package_el, "MediaTemplate", EmbedCab="yes")
|
||||
ET.SubElement(
|
||||
product_el,
|
||||
package_el, "Icon", Id="ProductIcon", SourceFile="..\\share\\dangerzone.ico"
|
||||
)
|
||||
ET.SubElement(package_el, "Property", Id="ARPPRODUCTICON", Value="ProductIcon")
|
||||
ET.SubElement(
|
||||
package_el,
|
||||
"Property",
|
||||
Id="ARPHELPLINK",
|
||||
Value="https://dangerzone.rocks",
|
||||
)
|
||||
ET.SubElement(
|
||||
product_el,
|
||||
package_el,
|
||||
"Property",
|
||||
Id="ARPURLINFOABOUT",
|
||||
Value="https://freedom.press",
|
||||
)
|
||||
ET.SubElement(product_el, "UIRef", Id="WixUI_Minimal")
|
||||
ET.SubElement(product_el, "UIRef", Id="WixUI_ErrorProgressText")
|
||||
ET.SubElement(
|
||||
product_el,
|
||||
package_el, "ui:WixUI", Id="WixUI_InstallDir", InstallDirectory="INSTALLFOLDER"
|
||||
)
|
||||
ET.SubElement(package_el, "UIRef", Id="WixUI_ErrorProgressText")
|
||||
ET.SubElement(
|
||||
package_el,
|
||||
"WixVariable",
|
||||
Id="WixUILicenseRtf",
|
||||
Value="..\\install\\windows\\license.rtf",
|
||||
)
|
||||
ET.SubElement(
|
||||
product_el,
|
||||
package_el,
|
||||
"WixVariable",
|
||||
Id="WixUIDialogBmp",
|
||||
Value="..\\install\\windows\\dialog.bmp",
|
||||
)
|
||||
ET.SubElement(
|
||||
product_el,
|
||||
package_el,
|
||||
"MajorUpgrade",
|
||||
AllowSameVersionUpgrades="yes",
|
||||
DowngradeErrorMessage="A newer version of [ProductName] is already installed. If you are sure you want to downgrade, remove the existing installation via Programs and Features.",
|
||||
)
|
||||
|
||||
build_dir_xml(product_el, data)
|
||||
component_ids = build_components_xml(product_el, data)
|
||||
# Workaround for an issue after upgrading from WiX Toolset v3 to v5 where the previous
|
||||
# version of Dangerzone is not uninstalled during the upgrade by checking if the older installation
|
||||
# exists in "C:\Program Files (x86)\Dangerzone".
|
||||
#
|
||||
# Also handle a special case for Dangerzone 0.8.0 which allows choosing the install location
|
||||
# during install by checking if the registry key for it exists.
|
||||
#
|
||||
# Note that this seems to allow installing Dangerzone 0.8.0 after installing Dangerzone from this branch.
|
||||
# In this case the installer errors until Dangerzone 0.8.0 is uninstalled again
|
||||
#
|
||||
# TODO: Revert this once we are reasonably certain there aren't too many affected Dangerzone installations.
|
||||
find_old_el = ET.SubElement(package_el, "Property", Id="OLDDANGERZONEFOUND")
|
||||
directory_search_el = ET.SubElement(
|
||||
find_old_el,
|
||||
"DirectorySearch",
|
||||
Id="dangerzone_install_folder",
|
||||
Path="C:\\Program Files (x86)\\Dangerzone",
|
||||
)
|
||||
ET.SubElement(directory_search_el, "FileSearch", Name="dangerzone.exe")
|
||||
registry_search_el = ET.SubElement(package_el, "Property", Id="DANGERZONE08FOUND")
|
||||
ET.SubElement(
|
||||
registry_search_el,
|
||||
"RegistrySearch",
|
||||
Root="HKLM",
|
||||
Key="SOFTWARE\\WOW6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\{03C2D2B2-9955-4AED-831F-DA4E67FC0FDB}",
|
||||
Name="DisplayName",
|
||||
Type="raw",
|
||||
)
|
||||
ET.SubElement(
|
||||
registry_search_el,
|
||||
"RegistrySearch",
|
||||
Root="HKLM",
|
||||
Key="SOFTWARE\\WOW6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\{8AAC0808-3556-4164-9D15-6EC1FB673AB2}",
|
||||
Name="DisplayName",
|
||||
Type="raw",
|
||||
)
|
||||
ET.SubElement(
|
||||
package_el,
|
||||
"Launch",
|
||||
Condition="NOT OLDDANGERZONEFOUND AND NOT DANGERZONE08FOUND",
|
||||
Message='A previous version of [ProductName] is already installed. Please uninstall it from "Apps & Features" before proceeding with the installation.',
|
||||
)
|
||||
|
||||
feature_el = ET.SubElement(product_el, "Feature", Id="DefaultFeature", Level="1")
|
||||
for component_id in component_ids:
|
||||
ET.SubElement(feature_el, "ComponentRef", Id=component_id)
|
||||
# Add the ProgramMenuFolder StandardDirectory
|
||||
programmenufolder_el = ET.SubElement(
|
||||
package_el,
|
||||
"StandardDirectory",
|
||||
Id="ProgramMenuFolder",
|
||||
)
|
||||
# Add a shortcut for Dangerzone in the Start menu
|
||||
shortcut_el = ET.SubElement(
|
||||
programmenufolder_el,
|
||||
"Component",
|
||||
Id="ApplicationShortcuts",
|
||||
Guid="539E7DE8-A124-4C09-AA55-0DD516AAD7BC",
|
||||
)
|
||||
ET.SubElement(
|
||||
shortcut_el,
|
||||
"Shortcut",
|
||||
Id="DangerzoneStartMenuShortcut",
|
||||
Name="Dangerzone",
|
||||
Description="Dangerzone",
|
||||
Target="[INSTALLFOLDER]dangerzone.exe",
|
||||
WorkingDirectory="INSTALLFOLDER",
|
||||
)
|
||||
ET.SubElement(
|
||||
shortcut_el,
|
||||
"RegistryValue",
|
||||
Root="HKCU",
|
||||
Key="Software\\Freedom of the Press Foundation\\Dangerzone",
|
||||
Name="installed",
|
||||
Type="integer",
|
||||
Value="1",
|
||||
KeyPath="yes",
|
||||
)
|
||||
|
||||
# Add the ProgramFilesFolder StandardDirectory
|
||||
programfilesfolder_el = ET.SubElement(
|
||||
package_el,
|
||||
"StandardDirectory",
|
||||
Id="ProgramFiles64Folder",
|
||||
)
|
||||
|
||||
# Create the directory structure for the installed product
|
||||
build_directory_xml(programfilesfolder_el, data)
|
||||
|
||||
# Create a component group for application components
|
||||
applicationcomponents_el = ET.SubElement(
|
||||
package_el, "ComponentGroup", Id="ApplicationComponents"
|
||||
)
|
||||
# Populate the application components group with components for the installed package
|
||||
build_components_xml(applicationcomponents_el, data)
|
||||
|
||||
# Add the Feature element
|
||||
feature_el = ET.SubElement(package_el, "Feature", Id="DefaultFeature", Level="1")
|
||||
ET.SubElement(feature_el, "ComponentGroupRef", Id="ApplicationComponents")
|
||||
ET.SubElement(feature_el, "ComponentRef", Id="ApplicationShortcuts")
|
||||
|
||||
print('<?xml version="1.0" encoding="windows-1252"?>')
|
||||
print(f'<?define ProductVersion = "{version}"?>')
|
||||
print('<?define ProductUpgradeCode = "12b9695c-965b-4be0-bc33-21274e809576"?>')
|
||||
ET.indent(root_el)
|
||||
print(ET.tostring(root_el).decode())
|
||||
ET.indent(wix_el, space=" ")
|
||||
|
||||
with open(os.path.join(build_dir, "Dangerzone.wxs"), "w") as wxs_file:
|
||||
wxs_file.write(ET.tostring(wix_el).decode())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
1535
poetry.lock
generated
1535
poetry.lock
generated
File diff suppressed because it is too large
Load diff
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "dangerzone"
|
||||
version = "0.7.1"
|
||||
version = "0.9.0"
|
||||
description = "Take potentially dangerous PDFs, office documents, or images and convert them to safe PDFs"
|
||||
authors = ["Freedom of the Press Foundation <info@freedom.press>", "Micah Lee <micah.lee@theintercept.com>"]
|
||||
license = "AGPL-3.0"
|
||||
|
@ -13,10 +13,11 @@ include = [
|
|||
]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.9,<3.13"
|
||||
python = ">=3.9,<3.14"
|
||||
click = "*"
|
||||
appdirs = "*"
|
||||
platformdirs = "*"
|
||||
PySide6 = "^6.7.1"
|
||||
PyMuPDF = "^1.23.3" # The version in Fedora 39
|
||||
colorama = "*"
|
||||
pyxdg = {version = "*", platform = "linux"}
|
||||
requests = "*"
|
||||
|
@ -30,17 +31,21 @@ dangerzone-cli = 'dangerzone:main'
|
|||
# Dependencies required for packaging the code on various platforms.
|
||||
[tool.poetry.group.package.dependencies]
|
||||
setuptools = "*"
|
||||
cx_freeze = {version = "^7.1.1", platform = "win32"}
|
||||
cx_freeze = {version = "^7.2.5", platform = "win32"}
|
||||
pywin32 = {version = "*", platform = "win32"}
|
||||
pyinstaller = {version = "*", platform = "darwin"}
|
||||
doit = "^0.36.0"
|
||||
jinja2-cli = "^0.8.2"
|
||||
|
||||
# Dependencies required for linting the code.
|
||||
[tool.poetry.group.lint.dependencies]
|
||||
black = "*"
|
||||
isort = "*"
|
||||
click = "*" # Install click so mypy is able to reason about it.
|
||||
mypy = "*"
|
||||
ruff = "*"
|
||||
types-colorama = "*"
|
||||
types-PySide2 = "*"
|
||||
types-Markdown = "*"
|
||||
types-pygments = "*"
|
||||
types-requests = "*"
|
||||
|
||||
# Dependencies required for testing the code.
|
||||
|
@ -50,24 +55,24 @@ pytest-mock = "^3.10.0"
|
|||
pytest-qt = "^4.2.0"
|
||||
pytest-cov = "^5.0.0"
|
||||
strip-ansi = "*"
|
||||
pytest-subprocess = "^1.5.2"
|
||||
pytest-rerunfailures = "^14.0"
|
||||
numpy = "2.0" # bump when we remove python 3.9 support
|
||||
|
||||
[tool.poetry.group.qubes.dependencies]
|
||||
pymupdf = "^1.23.6"
|
||||
[tool.poetry.group.debian.dependencies]
|
||||
pymupdf = "^1.24.11"
|
||||
|
||||
[tool.poetry.group.container.dependencies]
|
||||
# Starting with 1.24.6, pymupdf started shipping wheels, but only for x86_64.
|
||||
# Currently, pymupdfb is removed from the generated container's requirements.txt
|
||||
# because we are building the binary manually.
|
||||
#
|
||||
# We plan on using the wheels when they are provided for all the supported
|
||||
# platforms. Until then, we pin pymupdf.
|
||||
pymupdf = "1.24.5"
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
httpx = "^0.27.2"
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
skip_gitignore = true
|
||||
# This is necessary due to https://github.com/PyCQA/isort/issues/1835
|
||||
follow_links = false
|
||||
[tool.doit]
|
||||
verbosity = 3
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = [
|
||||
# isort
|
||||
"I",
|
||||
]
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core>=1.2.0"]
|
||||
|
|
|
@ -4,7 +4,6 @@ from cx_Freeze import Executable, setup
|
|||
with open("share/version.txt") as f:
|
||||
version = f.read().strip()
|
||||
|
||||
packages = ["dangerzone", "dangerzone.gui"]
|
||||
|
||||
setup(
|
||||
name="dangerzone",
|
||||
|
@ -12,10 +11,9 @@ setup(
|
|||
# On Windows description will show as the app's name in the "Open With" menu. See:
|
||||
# https://github.com/freedomofpress/dangerzone/issues/283#issuecomment-1365148805
|
||||
description="Dangerzone",
|
||||
packages=packages,
|
||||
options={
|
||||
"build_exe": {
|
||||
"packages": packages,
|
||||
"packages": ["dangerzone", "dangerzone.gui", "pymupdf._wxcolors"],
|
||||
"excludes": ["test", "tkinter"],
|
||||
"include_files": [("share", "share"), ("LICENSE", "LICENSE")],
|
||||
"include_msvcr": True,
|
||||
|
|
|
@ -48,3 +48,27 @@ QLabel.version {
|
|||
font-size: 20px;
|
||||
padding-bottom: 5px; /* align with 'dangerzone' font */
|
||||
}
|
||||
|
||||
QTextEdit[style="traceback"] {
|
||||
font-family: Consolas, Monospace;
|
||||
font-size: 12px;
|
||||
background-color: #ffffff;
|
||||
color: #000000;
|
||||
padding: 10px;
|
||||
}
|
||||
|
||||
QLabel[style="warning"] {
|
||||
background-color: #FFF3CD;
|
||||
color: #856404;
|
||||
border: 1px solid #FFEEBA;
|
||||
border-radius: 4px;
|
||||
padding: 10px;
|
||||
margin: 10px;
|
||||
}
|
||||
|
||||
|
||||
MainWindow[OSColorMode="dark"] QLabel[style="warning"] {
|
||||
background-color: #332D00;
|
||||
color: #FFD970;
|
||||
border-color: #665A00;
|
||||
}
|
||||
|
|
|
@ -1 +1 @@
|
|||
0.7.1
|
||||
0.9.0
|
||||
|
|
11
stdeb.cfg
11
stdeb.cfg
|
@ -1,11 +0,0 @@
|
|||
[DEFAULT]
|
||||
Package3: dangerzone
|
||||
Depends3: podman, python3, python3-pyside2.qtcore, python3-pyside2.qtgui, python3-pyside2.qtwidgets, python3-pyside2.qtsvg, python3-appdirs, python3-click, python3-xdg, python3-colorama, python3-requests, python3-markdown, python3-packaging
|
||||
Build-Depends: dh-python, python3, python3-setuptools, python3-stdeb
|
||||
Suite: bionic
|
||||
X-Python3-Version: >= 3.8
|
||||
# Do not trigger testing when we build the package. Assume that the user
|
||||
# has tested the package already. For more info, see:
|
||||
# https://github.com/freedomofpress/dangerzone/issues/292#issuecomment-1349967888
|
||||
Setup-Env-Vars: DEB_BUILD_OPTIONS=nocheck
|
||||
Maintainer: Freedom of the Press Foundation <info@freedom.press>
|
|
@ -122,7 +122,7 @@ test_docs_compressed_dir = Path(__file__).parent.joinpath(SAMPLE_COMPRESSED_DIRE
|
|||
|
||||
test_docs = [
|
||||
p
|
||||
for p in test_docs_dir.rglob("*")
|
||||
for p in test_docs_dir.glob("*")
|
||||
if p.is_file()
|
||||
and not (p.name.endswith(SAFE_EXTENSION) or p.name.startswith("sample_bad"))
|
||||
]
|
||||
|
@ -160,3 +160,31 @@ def for_each_external_doc(glob_pattern: str = "*") -> Callable:
|
|||
|
||||
class TestBase:
|
||||
sample_doc = str(test_docs_dir.joinpath(BASIC_SAMPLE_PDF))
|
||||
|
||||
|
||||
def pytest_configure(config: pytest.Config) -> None:
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
"reference_generator: Used to mark the test cases that regenerate reference documents",
|
||||
)
|
||||
|
||||
|
||||
def pytest_addoption(parser: pytest.Parser) -> None:
|
||||
parser.addoption(
|
||||
"--generate-reference-pdfs",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Regenerate reference PDFs",
|
||||
)
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(
|
||||
config: pytest.Config, items: List[pytest.Item]
|
||||
) -> None:
|
||||
if not config.getoption("--generate-reference-pdfs"):
|
||||
skip_generator = pytest.mark.skip(
|
||||
reason="Only run when --generate-reference-pdfs is provided"
|
||||
)
|
||||
for item in items:
|
||||
if "reference_generator" in item.keywords:
|
||||
item.add_marker(skip_generator)
|
||||
|
|
|
@ -21,34 +21,25 @@ def get_qt_app() -> Application:
|
|||
|
||||
def generate_isolated_updater(
|
||||
tmp_path: Path,
|
||||
monkeypatch: MonkeyPatch,
|
||||
app_mocker: Optional[MockerFixture] = None,
|
||||
mocker: MockerFixture,
|
||||
mock_app: bool = False,
|
||||
) -> UpdaterThread:
|
||||
"""Generate an Updater class with its own settings."""
|
||||
if app_mocker:
|
||||
app = app_mocker.MagicMock()
|
||||
else:
|
||||
app = get_qt_app()
|
||||
app = mocker.MagicMock() if mock_app else get_qt_app()
|
||||
|
||||
dummy = Dummy()
|
||||
# XXX: We can monkey-patch global state without wrapping it in a context manager, or
|
||||
# worrying that it will leak between tests, for two reasons:
|
||||
#
|
||||
# 1. Parallel tests in PyTest take place in different processes.
|
||||
# 2. The monkeypatch fixture tears down the monkey-patch after each test ends.
|
||||
monkeypatch.setattr(util, "get_config_dir", lambda: tmp_path)
|
||||
mocker.patch("dangerzone.settings.get_config_dir", return_value=tmp_path)
|
||||
|
||||
dangerzone = DangerzoneGui(app, isolation_provider=dummy)
|
||||
updater = UpdaterThread(dangerzone)
|
||||
return updater
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def updater(
|
||||
tmp_path: Path, monkeypatch: MonkeyPatch, mocker: MockerFixture
|
||||
) -> UpdaterThread:
|
||||
return generate_isolated_updater(tmp_path, monkeypatch, mocker)
|
||||
def updater(tmp_path: Path, mocker: MockerFixture) -> UpdaterThread:
|
||||
return generate_isolated_updater(tmp_path, mocker, mock_app=True)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def qt_updater(tmp_path: Path, monkeypatch: MonkeyPatch) -> UpdaterThread:
|
||||
return generate_isolated_updater(tmp_path, monkeypatch)
|
||||
def qt_updater(tmp_path: Path, mocker: MockerFixture) -> UpdaterThread:
|
||||
return generate_isolated_updater(tmp_path, mocker, mock_app=False)
|
||||
|
|
|
@ -7,7 +7,7 @@ import pytest
|
|||
from dangerzone.gui.logic import DangerzoneGui
|
||||
|
||||
if platform.system() == "Linux":
|
||||
from xdg.DesktopEntry import DesktopEntry
|
||||
from xdg.DesktopEntry import DesktopEntry, ParsingError
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() != "Linux", reason="Linux-only test")
|
||||
|
@ -33,17 +33,19 @@ def test_order_mime_handers() -> None:
|
|||
"LibreOffice",
|
||||
]
|
||||
|
||||
with mock.patch(
|
||||
"subprocess.check_output", return_value=b"libreoffice-draw.desktop"
|
||||
) as mock_default_mime_hander, mock.patch(
|
||||
"os.listdir",
|
||||
side_effect=[
|
||||
["org.gnome.Evince.desktop"],
|
||||
["org.pwmt.zathura-pdf-mupdf.desktop"],
|
||||
["libreoffice-draw.desktop"],
|
||||
],
|
||||
) as mock_list, mock.patch(
|
||||
"dangerzone.gui.logic.DesktopEntry", return_value=mock_desktop
|
||||
with (
|
||||
mock.patch(
|
||||
"subprocess.check_output", return_value=b"libreoffice-draw.desktop"
|
||||
) as mock_default_mime_hander,
|
||||
mock.patch(
|
||||
"os.listdir",
|
||||
side_effect=[
|
||||
["org.gnome.Evince.desktop"],
|
||||
["org.pwmt.zathura-pdf-mupdf.desktop"],
|
||||
["libreoffice-draw.desktop"],
|
||||
],
|
||||
) as mock_list,
|
||||
mock.patch("dangerzone.gui.logic.DesktopEntry", return_value=mock_desktop),
|
||||
):
|
||||
dz = DangerzoneGui(mock_app, dummy)
|
||||
|
||||
|
@ -77,18 +79,20 @@ def test_mime_handers_succeeds_no_default_found() -> None:
|
|||
"LibreOffice",
|
||||
]
|
||||
|
||||
with mock.patch(
|
||||
"subprocess.check_output",
|
||||
side_effect=subprocess.CalledProcessError(1, "Oh no, xdg-mime error!)"),
|
||||
) as mock_default_mime_hander, mock.patch(
|
||||
"os.listdir",
|
||||
side_effect=[
|
||||
["org.gnome.Evince.desktop"],
|
||||
["org.pwmt.zathura-pdf-mupdf.desktop"],
|
||||
["libreoffice-draw.desktop"],
|
||||
],
|
||||
) as mock_list, mock.patch(
|
||||
"dangerzone.gui.logic.DesktopEntry", return_value=mock_desktop
|
||||
with (
|
||||
mock.patch(
|
||||
"subprocess.check_output",
|
||||
side_effect=subprocess.CalledProcessError(1, "Oh no, xdg-mime error!)"),
|
||||
) as mock_default_mime_hander,
|
||||
mock.patch(
|
||||
"os.listdir",
|
||||
side_effect=[
|
||||
["org.gnome.Evince.desktop"],
|
||||
["org.pwmt.zathura-pdf-mupdf.desktop"],
|
||||
["libreoffice-draw.desktop"],
|
||||
],
|
||||
) as mock_list,
|
||||
mock.patch("dangerzone.gui.logic.DesktopEntry", return_value=mock_desktop),
|
||||
):
|
||||
dz = DangerzoneGui(mock_app, dummy)
|
||||
|
||||
|
@ -98,3 +102,28 @@ def test_mime_handers_succeeds_no_default_found() -> None:
|
|||
mock_list.assert_called()
|
||||
assert len(dz.pdf_viewers) == 3
|
||||
assert dz.pdf_viewers.popitem(last=False)[0] == "Evince"
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() != "Linux", reason="Linux-only test")
|
||||
def test_malformed_desktop_entry_is_catched() -> None:
|
||||
"""
|
||||
Given a failure to read a desktop entry,
|
||||
ensure that the exception is not thrown to the end-user.
|
||||
"""
|
||||
mock_app = mock.MagicMock()
|
||||
dummy = mock.MagicMock()
|
||||
|
||||
with (
|
||||
mock.patch("dangerzone.gui.logic.DesktopEntry") as mock_desktop,
|
||||
mock.patch(
|
||||
"os.listdir",
|
||||
side_effect=[
|
||||
["malformed.desktop", "another.desktop"],
|
||||
[],
|
||||
[],
|
||||
],
|
||||
),
|
||||
):
|
||||
mock_desktop.side_effect = ParsingError("Oh noes!", "malformed.desktop")
|
||||
DangerzoneGui(mock_app, dummy)
|
||||
mock_desktop.assert_called()
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import os
|
||||
import pathlib
|
||||
import platform
|
||||
import shutil
|
||||
import time
|
||||
from typing import List
|
||||
|
@ -8,17 +9,24 @@ from pytest import MonkeyPatch, fixture
|
|||
from pytest_mock import MockerFixture
|
||||
from pytestqt.qtbot import QtBot
|
||||
|
||||
from dangerzone import errors
|
||||
from dangerzone.document import Document
|
||||
from dangerzone.gui import MainWindow
|
||||
from dangerzone.gui import main_window as main_window_module
|
||||
from dangerzone.gui import updater as updater_module
|
||||
from dangerzone.gui.logic import DangerzoneGui
|
||||
from dangerzone.gui.main_window import ( # import Pyside related objects from here to avoid duplicating import logic.
|
||||
|
||||
# import Pyside related objects from here to avoid duplicating import logic.
|
||||
from dangerzone.gui.main_window import (
|
||||
ContentWidget,
|
||||
InstallContainerThread,
|
||||
QtCore,
|
||||
QtGui,
|
||||
WaitingWidgetContainer,
|
||||
)
|
||||
from dangerzone.gui.updater import UpdateReport, UpdaterThread
|
||||
from dangerzone.isolation_provider.container import Container
|
||||
from dangerzone.isolation_provider.dummy import Dummy
|
||||
|
||||
from .test_updater import assert_report_equal, default_updater_settings
|
||||
|
||||
|
@ -492,3 +500,144 @@ def test_drop_1_invalid_2_valid_documents(
|
|||
content_widget.doc_selection_wrapper.dropEvent(
|
||||
drag_1_invalid_and_2_valid_files_event
|
||||
)
|
||||
|
||||
|
||||
def test_not_available_container_tech_exception(
|
||||
qtbot: QtBot, mocker: MockerFixture
|
||||
) -> None:
|
||||
# Setup
|
||||
mock_app = mocker.MagicMock()
|
||||
dummy = Dummy()
|
||||
fn = mocker.patch.object(dummy, "is_available")
|
||||
fn.side_effect = errors.NotAvailableContainerTechException(
|
||||
"podman", "podman image ls logs"
|
||||
)
|
||||
|
||||
dz = DangerzoneGui(mock_app, dummy)
|
||||
widget = WaitingWidgetContainer(dz)
|
||||
qtbot.addWidget(widget)
|
||||
|
||||
# Assert that the error is displayed in the GUI
|
||||
if platform.system() in ["Darwin", "Windows"]:
|
||||
assert "Dangerzone requires Docker Desktop" in widget.label.text()
|
||||
else:
|
||||
assert "Podman is installed but cannot run properly" in widget.label.text()
|
||||
|
||||
assert "podman image ls logs" in widget.traceback.toPlainText()
|
||||
|
||||
|
||||
def test_no_container_tech_exception(qtbot: QtBot, mocker: MockerFixture) -> None:
|
||||
# Setup
|
||||
mock_app = mocker.MagicMock()
|
||||
dummy = mocker.MagicMock()
|
||||
|
||||
# Raise
|
||||
dummy.is_available.side_effect = errors.NoContainerTechException("podman")
|
||||
|
||||
dz = DangerzoneGui(mock_app, dummy)
|
||||
widget = WaitingWidgetContainer(dz)
|
||||
qtbot.addWidget(widget)
|
||||
|
||||
# Assert that the error is displayed in the GUI
|
||||
if platform.system() in ["Darwin", "Windows"]:
|
||||
assert "Dangerzone requires Docker Desktop" in widget.label.text()
|
||||
else:
|
||||
assert "Dangerzone requires Podman" in widget.label.text()
|
||||
|
||||
|
||||
def test_installation_failure_exception(qtbot: QtBot, mocker: MockerFixture) -> None:
|
||||
"""Ensures that if an exception is raised during image installation,
|
||||
it is shown in the GUI.
|
||||
"""
|
||||
# Setup install to raise an exception
|
||||
mock_app = mocker.MagicMock()
|
||||
dummy = mocker.MagicMock(spec=Container)
|
||||
dummy.install.side_effect = RuntimeError("Error during install")
|
||||
|
||||
dz = DangerzoneGui(mock_app, dummy)
|
||||
|
||||
# Mock the InstallContainerThread to call the original run method instead of
|
||||
# starting a new thread
|
||||
mocker.patch.object(InstallContainerThread, "start", InstallContainerThread.run)
|
||||
widget = WaitingWidgetContainer(dz)
|
||||
qtbot.addWidget(widget)
|
||||
|
||||
assert dummy.install.call_count == 1
|
||||
|
||||
assert "Error during install" in widget.traceback.toPlainText()
|
||||
assert "RuntimeError" in widget.traceback.toPlainText()
|
||||
|
||||
|
||||
def test_installation_failure_return_false(qtbot: QtBot, mocker: MockerFixture) -> None:
|
||||
"""Ensures that if the installation returns False, the error is shown in the GUI."""
|
||||
# Setup install to return False
|
||||
mock_app = mocker.MagicMock()
|
||||
dummy = mocker.MagicMock(spec=Container)
|
||||
dummy.install.return_value = False
|
||||
|
||||
dz = DangerzoneGui(mock_app, dummy)
|
||||
|
||||
# Mock the InstallContainerThread to call the original run method instead of
|
||||
# starting a new thread
|
||||
mocker.patch.object(InstallContainerThread, "start", InstallContainerThread.run)
|
||||
widget = WaitingWidgetContainer(dz)
|
||||
qtbot.addWidget(widget)
|
||||
|
||||
assert dummy.install.call_count == 1
|
||||
|
||||
assert "the following error occured" in widget.label.text()
|
||||
assert "The image cannot be found" in widget.traceback.toPlainText()
|
||||
|
||||
|
||||
def test_up_to_date_docker_desktop_does_nothing(
|
||||
qtbot: QtBot, mocker: MockerFixture
|
||||
) -> None:
|
||||
# Setup install to return False
|
||||
mock_app = mocker.MagicMock()
|
||||
dummy = mocker.MagicMock(spec=Container)
|
||||
dummy.check_docker_desktop_version.return_value = (True, "1.0.0")
|
||||
dz = DangerzoneGui(mock_app, dummy)
|
||||
|
||||
window = MainWindow(dz)
|
||||
qtbot.addWidget(window)
|
||||
|
||||
menu_actions = window.hamburger_button.menu().actions()
|
||||
assert "Docker Desktop should be upgraded" not in [
|
||||
a.toolTip() for a in menu_actions
|
||||
]
|
||||
|
||||
|
||||
def test_outdated_docker_desktop_displays_warning(
|
||||
qtbot: QtBot, mocker: MockerFixture
|
||||
) -> None:
|
||||
# Setup install to return False
|
||||
mock_app = mocker.MagicMock()
|
||||
dummy = mocker.MagicMock(spec=Container)
|
||||
dummy.check_docker_desktop_version.return_value = (False, "1.0.0")
|
||||
|
||||
dz = DangerzoneGui(mock_app, dummy)
|
||||
|
||||
load_svg_spy = mocker.spy(main_window_module, "load_svg_image")
|
||||
|
||||
window = MainWindow(dz)
|
||||
qtbot.addWidget(window)
|
||||
|
||||
menu_actions = window.hamburger_button.menu().actions()
|
||||
assert menu_actions[0].toolTip() == "Docker Desktop should be upgraded"
|
||||
|
||||
# Check that the hamburger icon has changed with the expected SVG image.
|
||||
assert load_svg_spy.call_count == 4
|
||||
assert (
|
||||
load_svg_spy.call_args_list[2].args[0] == "hamburger_menu_update_dot_error.svg"
|
||||
)
|
||||
|
||||
alert_spy = mocker.spy(window.alert, "launch")
|
||||
|
||||
# Clicking the menu item should open a warning message
|
||||
def _check_alert_displayed() -> None:
|
||||
alert_spy.assert_any_call()
|
||||
if window.alert:
|
||||
window.alert.close()
|
||||
|
||||
QtCore.QTimer.singleShot(0, _check_alert_displayed)
|
||||
menu_actions[0].trigger()
|
||||
|
|
|
@ -48,9 +48,7 @@ def test_default_updater_settings(updater: UpdaterThread) -> None:
|
|||
)
|
||||
|
||||
|
||||
def test_pre_0_4_2_settings(
|
||||
tmp_path: Path, monkeypatch: MonkeyPatch, mocker: MockerFixture
|
||||
) -> None:
|
||||
def test_pre_0_4_2_settings(tmp_path: Path, mocker: MockerFixture) -> None:
|
||||
"""Check settings of installations prior to 0.4.2.
|
||||
|
||||
Check that installations that have been upgraded from a version < 0.4.2 to >= 0.4.2
|
||||
|
@ -58,7 +56,7 @@ def test_pre_0_4_2_settings(
|
|||
in their settings.json file.
|
||||
"""
|
||||
save_settings(tmp_path, default_settings_0_4_1())
|
||||
updater = generate_isolated_updater(tmp_path, monkeypatch, mocker)
|
||||
updater = generate_isolated_updater(tmp_path, mocker, mock_app=True)
|
||||
assert (
|
||||
updater.dangerzone.settings.get_updater_settings() == default_updater_settings()
|
||||
)
|
||||
|
@ -83,12 +81,10 @@ def test_post_0_4_2_settings(
|
|||
# version is 0.4.3.
|
||||
expected_settings = default_updater_settings()
|
||||
expected_settings["updater_latest_version"] = "0.4.3"
|
||||
monkeypatch.setattr(
|
||||
settings, "get_version", lambda: expected_settings["updater_latest_version"]
|
||||
)
|
||||
monkeypatch.setattr(settings, "get_version", lambda: "0.4.3")
|
||||
|
||||
# Ensure that the Settings class will correct the latest version field to 0.4.3.
|
||||
updater = generate_isolated_updater(tmp_path, monkeypatch, mocker)
|
||||
updater = generate_isolated_updater(tmp_path, mocker, mock_app=True)
|
||||
assert updater.dangerzone.settings.get_updater_settings() == expected_settings
|
||||
|
||||
# Simulate an updater check that found a newer Dangerzone version (e.g., 0.4.4).
|
||||
|
@ -118,9 +114,7 @@ def test_linux_no_check(updater: UpdaterThread, monkeypatch: MonkeyPatch) -> Non
|
|||
assert updater.dangerzone.settings.get_updater_settings() == expected_settings
|
||||
|
||||
|
||||
def test_user_prompts(
|
||||
updater: UpdaterThread, monkeypatch: MonkeyPatch, mocker: MockerFixture
|
||||
) -> None:
|
||||
def test_user_prompts(updater: UpdaterThread, mocker: MockerFixture) -> None:
|
||||
"""Test prompting users to ask them if they want to enable update checks."""
|
||||
# First run
|
||||
#
|
||||
|
@ -370,8 +364,6 @@ def test_update_errors(
|
|||
def test_update_check_prompt(
|
||||
qtbot: QtBot,
|
||||
qt_updater: UpdaterThread,
|
||||
monkeypatch: MonkeyPatch,
|
||||
mocker: MockerFixture,
|
||||
) -> None:
|
||||
"""Test that the prompt to enable update checks works properly."""
|
||||
# Force Dangerzone to check immediately for updates
|
||||
|
|
|
@ -7,7 +7,6 @@ from pytest_mock import MockerFixture
|
|||
from dangerzone.conversion import errors
|
||||
from dangerzone.document import Document
|
||||
from dangerzone.isolation_provider import base
|
||||
from dangerzone.isolation_provider.qubes import running_on_qubes
|
||||
|
||||
TIMEOUT_STARTUP = 60 # Timeout in seconds until the conversion sandbox starts.
|
||||
|
||||
|
@ -29,7 +28,7 @@ class IsolationProviderTest:
|
|||
|
||||
p = provider.start_doc_to_pixels_proc(doc)
|
||||
with pytest.raises(errors.ConverterProcException):
|
||||
provider.doc_to_pixels(doc, tmpdir, p)
|
||||
provider.convert_with_proc(doc, None, p)
|
||||
assert provider.get_proc_exception(p) == errors.MaxPagesException
|
||||
|
||||
def test_max_pages_client_enforcement(
|
||||
|
@ -46,7 +45,7 @@ class IsolationProviderTest:
|
|||
doc = Document(sample_doc)
|
||||
p = provider.start_doc_to_pixels_proc(doc)
|
||||
with pytest.raises(errors.MaxPagesException):
|
||||
provider.doc_to_pixels(doc, tmpdir, p)
|
||||
provider.convert_with_proc(doc, None, p)
|
||||
|
||||
def test_max_dimensions(
|
||||
self,
|
||||
|
@ -60,12 +59,12 @@ class IsolationProviderTest:
|
|||
doc = Document(sample_bad_width)
|
||||
p = provider.start_doc_to_pixels_proc(doc)
|
||||
with pytest.raises(errors.MaxPageWidthException):
|
||||
provider.doc_to_pixels(doc, tmpdir, p)
|
||||
provider.convert_with_proc(doc, None, p)
|
||||
|
||||
doc = Document(sample_bad_height)
|
||||
p = provider.start_doc_to_pixels_proc(doc)
|
||||
with pytest.raises(errors.MaxPageHeightException):
|
||||
provider.doc_to_pixels(doc, tmpdir, p)
|
||||
provider.convert_with_proc(doc, None, p)
|
||||
|
||||
|
||||
class IsolationProviderTermination:
|
||||
|
@ -83,18 +82,18 @@ class IsolationProviderTermination:
|
|||
|
||||
def test_completed(
|
||||
self,
|
||||
provider_wait: base.IsolationProvider,
|
||||
provider: base.IsolationProvider,
|
||||
mocker: MockerFixture,
|
||||
) -> None:
|
||||
# Check that we don't need to terminate any process, if the conversion completes
|
||||
# successfully.
|
||||
doc = Document()
|
||||
provider_wait.progress_callback = mocker.MagicMock()
|
||||
get_proc_exception_spy = mocker.spy(provider_wait, "get_proc_exception")
|
||||
terminate_proc_spy = mocker.spy(provider_wait, "terminate_doc_to_pixels_proc")
|
||||
provider.progress_callback = mocker.MagicMock()
|
||||
get_proc_exception_spy = mocker.spy(provider, "get_proc_exception")
|
||||
terminate_proc_spy = mocker.spy(provider, "terminate_doc_to_pixels_proc")
|
||||
popen_kill_spy = mocker.spy(subprocess.Popen, "kill")
|
||||
|
||||
with provider_wait.doc_to_pixels_proc(doc) as proc:
|
||||
with provider.doc_to_pixels_proc(doc) as proc:
|
||||
assert proc.stdin
|
||||
proc.stdin.close()
|
||||
proc.wait(TIMEOUT_STARTUP)
|
||||
|
@ -106,18 +105,18 @@ class IsolationProviderTermination:
|
|||
|
||||
def test_linger_terminate(
|
||||
self,
|
||||
provider_wait: base.IsolationProvider,
|
||||
provider: base.IsolationProvider,
|
||||
mocker: MockerFixture,
|
||||
) -> None:
|
||||
# Check that successful conversions that linger for a little while are
|
||||
# terminated gracefully.
|
||||
doc = Document()
|
||||
provider_wait.progress_callback = mocker.MagicMock()
|
||||
get_proc_exception_spy = mocker.spy(provider_wait, "get_proc_exception")
|
||||
terminate_proc_spy = mocker.spy(provider_wait, "terminate_doc_to_pixels_proc")
|
||||
provider.progress_callback = mocker.MagicMock()
|
||||
get_proc_exception_spy = mocker.spy(provider, "get_proc_exception")
|
||||
terminate_proc_spy = mocker.spy(provider, "terminate_doc_to_pixels_proc")
|
||||
popen_kill_spy = mocker.spy(subprocess.Popen, "kill")
|
||||
|
||||
with provider_wait.doc_to_pixels_proc(doc) as proc:
|
||||
with provider.doc_to_pixels_proc(doc) as proc:
|
||||
# We purposefully do nothing here, so that the process remains running.
|
||||
pass
|
||||
|
||||
|
@ -128,80 +127,80 @@ class IsolationProviderTermination:
|
|||
|
||||
def test_linger_kill(
|
||||
self,
|
||||
provider_wait: base.IsolationProvider,
|
||||
provider: base.IsolationProvider,
|
||||
mocker: MockerFixture,
|
||||
) -> None:
|
||||
# Check that successful conversions that cannot be terminated gracefully, are
|
||||
# killed forcefully.
|
||||
doc = Document()
|
||||
get_proc_exception_spy = mocker.spy(provider_wait, "get_proc_exception")
|
||||
get_proc_exception_spy = mocker.spy(provider, "get_proc_exception")
|
||||
# We mock the terminate_doc_to_pixels_proc() method, so that the process must be
|
||||
# killed.
|
||||
terminate_proc_mock = mocker.patch.object(
|
||||
provider_wait, "terminate_doc_to_pixels_proc", return_value=None
|
||||
provider, "terminate_doc_to_pixels_proc", return_value=None
|
||||
)
|
||||
popen_kill_spy = mocker.spy(subprocess.Popen, "kill")
|
||||
kill_pg_spy = mocker.spy(base, "kill_process_group")
|
||||
|
||||
with provider_wait.doc_to_pixels_proc(doc, timeout_grace=0) as proc:
|
||||
with provider.doc_to_pixels_proc(doc, timeout_grace=0) as proc:
|
||||
pass
|
||||
|
||||
get_proc_exception_spy.assert_not_called()
|
||||
terminate_proc_mock.assert_called()
|
||||
popen_kill_spy.assert_called()
|
||||
kill_pg_spy.assert_called()
|
||||
assert proc.poll() is not None
|
||||
|
||||
def test_linger_unkillable(
|
||||
self,
|
||||
provider_wait: base.IsolationProvider,
|
||||
provider: base.IsolationProvider,
|
||||
mocker: MockerFixture,
|
||||
) -> None:
|
||||
# Check that if a conversion process cannot be killed, at least it will not
|
||||
# block the operation.
|
||||
doc = Document()
|
||||
get_proc_exception_spy = mocker.spy(provider_wait, "get_proc_exception")
|
||||
get_proc_exception_spy = mocker.spy(provider, "get_proc_exception")
|
||||
# We mock both the terminate_doc_to_pixels_proc() method, and our kill
|
||||
# invocation, so that the process will seem as unkillable.
|
||||
terminate_proc_orig = provider_wait.terminate_doc_to_pixels_proc
|
||||
terminate_proc_orig = provider.terminate_doc_to_pixels_proc
|
||||
terminate_proc_mock = mocker.patch.object(
|
||||
provider_wait, "terminate_doc_to_pixels_proc", return_value=None
|
||||
provider, "terminate_doc_to_pixels_proc", return_value=None
|
||||
)
|
||||
popen_kill_mock = mocker.patch.object(
|
||||
subprocess.Popen, "kill", return_value=None
|
||||
kill_pg_orig = base.kill_process_group
|
||||
kill_pg_mock = mocker.patch(
|
||||
"dangerzone.isolation_provider.base.kill_process_group", return_value=None
|
||||
)
|
||||
|
||||
with provider_wait.doc_to_pixels_proc(
|
||||
doc, timeout_grace=0, timeout_force=0
|
||||
) as proc:
|
||||
with provider.doc_to_pixels_proc(doc, timeout_grace=0, timeout_force=0) as proc:
|
||||
pass
|
||||
|
||||
get_proc_exception_spy.assert_not_called()
|
||||
terminate_proc_mock.assert_called()
|
||||
popen_kill_mock.assert_called()
|
||||
kill_pg_mock.assert_called()
|
||||
assert proc.poll() is None
|
||||
|
||||
# Reset the function to the original state.
|
||||
provider_wait.terminate_doc_to_pixels_proc = terminate_proc_orig # type: ignore [method-assign]
|
||||
provider.terminate_doc_to_pixels_proc = terminate_proc_orig # type: ignore [method-assign]
|
||||
base.kill_process_group = kill_pg_orig
|
||||
|
||||
# Really kill the spawned process, so that it doesn't linger after the tests
|
||||
# complete.
|
||||
provider_wait.ensure_stop_doc_to_pixels_proc(doc, proc)
|
||||
provider.ensure_stop_doc_to_pixels_proc(doc, proc)
|
||||
assert proc.poll() is not None
|
||||
|
||||
def test_failed(
|
||||
self,
|
||||
provider_wait: base.IsolationProvider,
|
||||
provider: base.IsolationProvider,
|
||||
mocker: MockerFixture,
|
||||
) -> None:
|
||||
# Check that we don't need to terminate any process, if the conversion fails.
|
||||
# However, we should be able to get the return code.
|
||||
doc = Document()
|
||||
provider_wait.progress_callback = mocker.MagicMock()
|
||||
get_proc_exception_spy = mocker.spy(provider_wait, "get_proc_exception")
|
||||
terminate_proc_spy = mocker.spy(provider_wait, "terminate_doc_to_pixels_proc")
|
||||
provider.progress_callback = mocker.MagicMock()
|
||||
get_proc_exception_spy = mocker.spy(provider, "get_proc_exception")
|
||||
terminate_proc_spy = mocker.spy(provider, "terminate_doc_to_pixels_proc")
|
||||
popen_kill_spy = mocker.spy(subprocess.Popen, "kill")
|
||||
|
||||
with pytest.raises(errors.DocFormatUnsupported):
|
||||
with provider_wait.doc_to_pixels_proc(doc, timeout_exception=0) as proc:
|
||||
with provider.doc_to_pixels_proc(doc, timeout_exception=0) as proc:
|
||||
assert proc.stdin
|
||||
# Sending an invalid file to the conversion process should report it as
|
||||
# an unsupported format.
|
||||
|
@ -220,19 +219,19 @@ class IsolationProviderTermination:
|
|||
|
||||
def test_failed_linger(
|
||||
self,
|
||||
provider_wait: base.IsolationProvider,
|
||||
provider: base.IsolationProvider,
|
||||
mocker: MockerFixture,
|
||||
) -> None:
|
||||
# Check that if the failed process has not exited, the error code that will be
|
||||
# returned is UnexpectedExceptionError.
|
||||
doc = Document()
|
||||
provider_wait.progress_callback = mocker.MagicMock()
|
||||
get_proc_exception_spy = mocker.spy(provider_wait, "get_proc_exception")
|
||||
terminate_proc_spy = mocker.spy(provider_wait, "terminate_doc_to_pixels_proc")
|
||||
provider.progress_callback = mocker.MagicMock()
|
||||
get_proc_exception_spy = mocker.spy(provider, "get_proc_exception")
|
||||
terminate_proc_spy = mocker.spy(provider, "terminate_doc_to_pixels_proc")
|
||||
popen_kill_spy = mocker.spy(subprocess.Popen, "kill")
|
||||
|
||||
with pytest.raises(errors.UnexpectedConversionError):
|
||||
with provider_wait.doc_to_pixels_proc(doc, timeout_exception=0) as proc:
|
||||
with provider.doc_to_pixels_proc(doc, timeout_exception=0) as proc:
|
||||
raise errors.ConverterProcException
|
||||
|
||||
get_proc_exception_spy.assert_called()
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
import os
|
||||
import subprocess
|
||||
import time
|
||||
import platform
|
||||
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
from pytest_subprocess import FakeProcess
|
||||
|
||||
from dangerzone.document import Document
|
||||
from dangerzone.isolation_provider import base
|
||||
from dangerzone import errors
|
||||
from dangerzone.container_utils import Runtime
|
||||
from dangerzone.isolation_provider.container import Container
|
||||
from dangerzone.isolation_provider.qubes import is_qubes_native_conversion
|
||||
from dangerzone.util import get_resource_path
|
||||
|
||||
from .base import IsolationProviderTermination, IsolationProviderTest
|
||||
|
||||
|
@ -24,80 +25,196 @@ def provider() -> Container:
|
|||
return Container()
|
||||
|
||||
|
||||
class ContainerWait(Container):
|
||||
"""Container isolation provider that blocks until the container has started."""
|
||||
|
||||
def exec_container(self, *args, **kwargs): # type: ignore [no-untyped-def]
|
||||
# Check every 100ms if a container with the expected name has showed up.
|
||||
# Else, closing the file descriptors may not work.
|
||||
name = kwargs["name"]
|
||||
runtime = self.get_runtime()
|
||||
p = super().exec_container(*args, **kwargs)
|
||||
for i in range(50):
|
||||
containers = subprocess.run(
|
||||
[runtime, "ps"], capture_output=True
|
||||
).stdout.decode()
|
||||
if name in containers:
|
||||
return p
|
||||
time.sleep(0.1)
|
||||
|
||||
raise RuntimeError(f"Container {name} did not start within 5 seconds")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def provider_wait() -> ContainerWait:
|
||||
return ContainerWait()
|
||||
def runtime_path() -> str:
|
||||
return str(Runtime().path)
|
||||
|
||||
|
||||
class TestContainer(IsolationProviderTest):
|
||||
pass
|
||||
def test_is_available_raises(
|
||||
self, provider: Container, fp: FakeProcess, runtime_path: str
|
||||
) -> None:
|
||||
"""
|
||||
NotAvailableContainerTechException should be raised when
|
||||
the "podman image ls" command fails.
|
||||
"""
|
||||
fp.register_subprocess(
|
||||
[runtime_path, "image", "ls"],
|
||||
returncode=-1,
|
||||
stderr="podman image ls logs",
|
||||
)
|
||||
with pytest.raises(errors.NotAvailableContainerTechException):
|
||||
provider.is_available()
|
||||
|
||||
def test_is_available_works(
|
||||
self, provider: Container, fp: FakeProcess, runtime_path: str
|
||||
) -> None:
|
||||
"""
|
||||
No exception should be raised when the "podman image ls" can return properly.
|
||||
"""
|
||||
fp.register_subprocess(
|
||||
[runtime_path, "image", "ls"],
|
||||
)
|
||||
provider.is_available()
|
||||
|
||||
def test_install_raise_if_image_cant_be_installed(
|
||||
self, provider: Container, fp: FakeProcess, runtime_path: str
|
||||
) -> None:
|
||||
"""When an image installation fails, an exception should be raised"""
|
||||
|
||||
fp.register_subprocess(
|
||||
[runtime_path, "image", "ls"],
|
||||
)
|
||||
|
||||
# First check should return nothing.
|
||||
fp.register_subprocess(
|
||||
[
|
||||
runtime_path,
|
||||
"image",
|
||||
"list",
|
||||
"--format",
|
||||
"{{ .Tag }}",
|
||||
"dangerzone.rocks/dangerzone",
|
||||
],
|
||||
occurrences=2,
|
||||
)
|
||||
|
||||
fp.register_subprocess(
|
||||
[
|
||||
runtime_path,
|
||||
"load",
|
||||
"-i",
|
||||
get_resource_path("container.tar").absolute(),
|
||||
],
|
||||
returncode=-1,
|
||||
)
|
||||
|
||||
with pytest.raises(errors.ImageInstallationException):
|
||||
provider.install()
|
||||
|
||||
def test_install_raises_if_still_not_installed(
|
||||
self, provider: Container, fp: FakeProcess, runtime_path: str
|
||||
) -> None:
|
||||
"""When an image keep being not installed, it should return False"""
|
||||
fp.register_subprocess(
|
||||
[runtime_path, "version", "-f", "{{.Client.Version}}"],
|
||||
stdout="4.0.0",
|
||||
)
|
||||
|
||||
fp.register_subprocess(
|
||||
[runtime_path, "image", "ls"],
|
||||
)
|
||||
|
||||
# First check should return nothing.
|
||||
fp.register_subprocess(
|
||||
[
|
||||
runtime_path,
|
||||
"image",
|
||||
"list",
|
||||
"--format",
|
||||
"{{ .Tag }}",
|
||||
"dangerzone.rocks/dangerzone",
|
||||
],
|
||||
occurrences=2,
|
||||
)
|
||||
|
||||
fp.register_subprocess(
|
||||
[
|
||||
runtime_path,
|
||||
"load",
|
||||
"-i",
|
||||
get_resource_path("container.tar").absolute(),
|
||||
],
|
||||
)
|
||||
with pytest.raises(errors.ImageNotPresentException):
|
||||
provider.install()
|
||||
|
||||
@pytest.mark.skipif(
|
||||
platform.system() not in ("Windows", "Darwin"),
|
||||
reason="macOS and Windows specific",
|
||||
)
|
||||
def test_old_docker_desktop_version_is_detected(
|
||||
self, mocker: MockerFixture, provider: Container, fp: FakeProcess
|
||||
) -> None:
|
||||
fp.register_subprocess(
|
||||
[
|
||||
"docker",
|
||||
"version",
|
||||
"--format",
|
||||
"{{.Server.Platform.Name}}",
|
||||
],
|
||||
stdout="Docker Desktop 1.0.0 (173100)",
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"dangerzone.isolation_provider.container.MINIMUM_DOCKER_DESKTOP",
|
||||
{"Darwin": "1.0.1", "Windows": "1.0.1"},
|
||||
)
|
||||
assert (False, "1.0.0") == provider.check_docker_desktop_version()
|
||||
|
||||
@pytest.mark.skipif(
|
||||
platform.system() not in ("Windows", "Darwin"),
|
||||
reason="macOS and Windows specific",
|
||||
)
|
||||
def test_up_to_date_docker_desktop_version_is_detected(
|
||||
self, mocker: MockerFixture, provider: Container, fp: FakeProcess
|
||||
) -> None:
|
||||
fp.register_subprocess(
|
||||
[
|
||||
"docker",
|
||||
"version",
|
||||
"--format",
|
||||
"{{.Server.Platform.Name}}",
|
||||
],
|
||||
stdout="Docker Desktop 1.0.1 (173100)",
|
||||
)
|
||||
|
||||
# Require version 1.0.1
|
||||
mocker.patch(
|
||||
"dangerzone.isolation_provider.container.MINIMUM_DOCKER_DESKTOP",
|
||||
{"Darwin": "1.0.1", "Windows": "1.0.1"},
|
||||
)
|
||||
assert (True, "1.0.1") == provider.check_docker_desktop_version()
|
||||
|
||||
fp.register_subprocess(
|
||||
[
|
||||
"docker",
|
||||
"version",
|
||||
"--format",
|
||||
"{{.Server.Platform.Name}}",
|
||||
],
|
||||
stdout="Docker Desktop 2.0.0 (173100)",
|
||||
)
|
||||
assert (True, "2.0.0") == provider.check_docker_desktop_version()
|
||||
|
||||
@pytest.mark.skipif(
|
||||
platform.system() not in ("Windows", "Darwin"),
|
||||
reason="macOS and Windows specific",
|
||||
)
|
||||
def test_docker_desktop_version_failure_returns_true(
|
||||
self, mocker: MockerFixture, provider: Container, fp: FakeProcess
|
||||
) -> None:
|
||||
fp.register_subprocess(
|
||||
[
|
||||
"docker",
|
||||
"version",
|
||||
"--format",
|
||||
"{{.Server.Platform.Name}}",
|
||||
],
|
||||
stderr="Oopsie",
|
||||
returncode=1,
|
||||
)
|
||||
assert provider.check_docker_desktop_version() == (True, "")
|
||||
|
||||
@pytest.mark.skipif(
|
||||
platform.system() != "Linux",
|
||||
reason="Linux specific",
|
||||
)
|
||||
def test_linux_skips_desktop_version_check_returns_true(
|
||||
self, provider: Container
|
||||
) -> None:
|
||||
assert (True, "") == provider.check_docker_desktop_version()
|
||||
|
||||
|
||||
class TestContainerTermination(IsolationProviderTermination):
|
||||
|
||||
def test_linger_runtime_kill(
|
||||
self,
|
||||
provider_wait: base.IsolationProvider,
|
||||
mocker: MockerFixture,
|
||||
) -> None:
|
||||
# Check that conversions that remain stuck on `docker|podman kill` are
|
||||
# terminated forcefully.
|
||||
doc = Document()
|
||||
provider_wait.progress_callback = mocker.MagicMock()
|
||||
get_proc_exception_spy = mocker.spy(provider_wait, "get_proc_exception")
|
||||
terminate_proc_spy = mocker.spy(provider_wait, "terminate_doc_to_pixels_proc")
|
||||
popen_kill_spy = mocker.spy(subprocess.Popen, "kill")
|
||||
|
||||
# Switch the subprocess.run() function with a patched function that
|
||||
# intercepts the `kill` command and switches it with `wait` instead. This way,
|
||||
# we emulate a `docker|podman kill` command that has hang.
|
||||
orig_subprocess_run = subprocess.run
|
||||
|
||||
def patched_subprocess_run(*args, **kwargs): # type: ignore [no-untyped-def]
|
||||
assert len(args) == 1
|
||||
cmd = args[0]
|
||||
if cmd[1] == "kill":
|
||||
# Switch the `kill` command with `wait`, thereby triggering a timeout.
|
||||
cmd[1] = "wait"
|
||||
|
||||
# Make sure that a timeout has been specified, and make it 0, so that
|
||||
# the test ends us quickly as possible.
|
||||
assert "timeout" in kwargs
|
||||
kwargs[timeout] = 0
|
||||
|
||||
# Make sure that the modified command times out.
|
||||
with pytest.raises(subprocess.TimeoutExpired):
|
||||
orig_subprocess_run(cmd, **kwargs)
|
||||
else:
|
||||
return orig_subprocess_run(*args, **kwargs)
|
||||
|
||||
mocker.patch("subprocess.run", patched_subprocess_run)
|
||||
|
||||
with provider_wait.doc_to_pixels_proc(doc, timeout_grace=0) as proc:
|
||||
# We purposefully do nothing here, so that the process remains running.
|
||||
pass
|
||||
|
||||
get_proc_exception_spy.assert_not_called()
|
||||
terminate_proc_spy.assert_called()
|
||||
popen_kill_spy.assert_called()
|
||||
assert proc.poll() is not None
|
||||
pass
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
import os
|
||||
import subprocess
|
||||
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from dangerzone.conversion import errors
|
||||
from dangerzone.document import Document
|
||||
from dangerzone.isolation_provider.base import IsolationProvider
|
||||
from dangerzone.isolation_provider.dummy import Dummy
|
||||
|
||||
|
@ -16,37 +14,20 @@ if not os.environ.get("DUMMY_CONVERSION", False):
|
|||
pytest.skip("Dummy conversion is not enabled", allow_module_level=True)
|
||||
|
||||
|
||||
class DummyWait(Dummy):
|
||||
"""Dummy isolation provider that spawns a blocking process."""
|
||||
|
||||
def start_doc_to_pixels_proc(self, document: Document) -> subprocess.Popen:
|
||||
return subprocess.Popen(
|
||||
["python3"],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
|
||||
def terminate_doc_to_pixels_proc(
|
||||
self, document: Document, p: subprocess.Popen
|
||||
) -> None:
|
||||
p.terminate()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def provider_wait() -> DummyWait:
|
||||
return DummyWait()
|
||||
def provider() -> Dummy:
|
||||
return Dummy()
|
||||
|
||||
|
||||
class TestDummyTermination(IsolationProviderTermination):
|
||||
def test_failed(
|
||||
self,
|
||||
provider_wait: IsolationProvider,
|
||||
provider: IsolationProvider,
|
||||
mocker: MockerFixture,
|
||||
) -> None:
|
||||
mocker.patch.object(
|
||||
provider_wait,
|
||||
provider,
|
||||
"get_proc_exception",
|
||||
return_value=errors.DocFormatUnsupported(),
|
||||
)
|
||||
super().test_failed(provider_wait, mocker)
|
||||
super().test_failed(provider, mocker)
|
||||
|
|
|
@ -20,11 +20,6 @@ elif os.environ.get("DUMMY_CONVERSION", False):
|
|||
pytest.skip("Dummy conversion is enabled", allow_module_level=True)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def provider() -> Qubes:
|
||||
return Qubes()
|
||||
|
||||
|
||||
class QubesWait(Qubes):
|
||||
"""Qubes isolation provider that blocks until the disposable qube has started."""
|
||||
|
||||
|
@ -53,7 +48,7 @@ class QubesWait(Qubes):
|
|||
|
||||
|
||||
@pytest.fixture
|
||||
def provider_wait() -> QubesWait:
|
||||
def provider() -> QubesWait:
|
||||
return QubesWait()
|
||||
|
||||
|
||||
|
@ -79,7 +74,7 @@ class TestQubes(IsolationProviderTest):
|
|||
)
|
||||
with pytest.raises(errors.ConverterProcException):
|
||||
doc = Document(sample_doc)
|
||||
provider.doc_to_pixels(doc, tmpdir, proc)
|
||||
provider.convert_with_proc(doc, None, proc)
|
||||
assert provider.get_proc_exception(proc) == errors.QubesQrexecFailed
|
||||
|
||||
|
||||
|
|
|
@ -7,13 +7,16 @@ import platform
|
|||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
from typing import Optional, Sequence
|
||||
from unittest import mock
|
||||
|
||||
import fitz
|
||||
import numpy as np
|
||||
import pytest
|
||||
from click.testing import CliRunner, Result
|
||||
from pytest_mock import MockerFixture
|
||||
from strip_ansi import strip_ansi
|
||||
|
||||
from dangerzone.cli import cli_main, display_banner
|
||||
|
@ -133,29 +136,19 @@ class TestCli:
|
|||
if os.environ.get("DUMMY_CONVERSION", False):
|
||||
args = ("--unsafe-dummy-conversion", *args)
|
||||
|
||||
with tempfile.TemporaryDirectory() as t:
|
||||
tmp_dir = Path(t)
|
||||
# TODO: Replace this with `contextlib.chdir()` [1], which was added in
|
||||
# Python 3.11.
|
||||
#
|
||||
# [1]: https://docs.python.org/3/library/contextlib.html#contextlib.chdir
|
||||
try:
|
||||
if tmp_path is not None:
|
||||
cwd = os.getcwd()
|
||||
os.chdir(tmp_path)
|
||||
# TODO: Replace this with `contextlib.chdir()` [1], which was added in
|
||||
# Python 3.11.
|
||||
#
|
||||
# [1]: https://docs.python.org/3/library/contextlib.html#contextlib.chdir
|
||||
try:
|
||||
if tmp_path is not None:
|
||||
cwd = os.getcwd()
|
||||
os.chdir(tmp_path)
|
||||
|
||||
with mock.patch(
|
||||
"dangerzone.isolation_provider.container.get_tmp_dir",
|
||||
return_value=t,
|
||||
):
|
||||
result = CliRunner().invoke(cli_main, args)
|
||||
finally:
|
||||
if tmp_path is not None:
|
||||
os.chdir(cwd)
|
||||
|
||||
if tmp_dir.exists():
|
||||
stale_files = list(tmp_dir.iterdir())
|
||||
assert not stale_files
|
||||
result = CliRunner().invoke(cli_main, args)
|
||||
finally:
|
||||
if tmp_path is not None:
|
||||
os.chdir(cwd)
|
||||
|
||||
# XXX Print stdout so that junitXML exports with output capturing
|
||||
# actually include the stdout + stderr (they are combined into stdout)
|
||||
|
@ -200,11 +193,68 @@ class TestCliConversion(TestCliBasic):
|
|||
result = self.run_cli([sample_pdf, "--ocr-lang", "piglatin"])
|
||||
result.assert_failure()
|
||||
|
||||
@pytest.mark.reference_generator
|
||||
@for_each_doc
|
||||
def test_formats(self, doc: Path) -> None:
|
||||
result = self.run_cli(str(doc))
|
||||
def test_regenerate_reference(self, doc: Path) -> None:
|
||||
reference = (doc.parent / "reference" / doc.stem).with_suffix(".pdf")
|
||||
|
||||
result = self.run_cli([str(doc), "--output-filename", str(reference)])
|
||||
result.assert_success()
|
||||
|
||||
@for_each_doc
|
||||
def test_formats(self, doc: Path, tmp_path_factory: pytest.TempPathFactory) -> None:
|
||||
reference = (doc.parent / "reference" / doc.stem).with_suffix(".pdf")
|
||||
destination = tmp_path_factory.mktemp(doc.stem).with_suffix(".pdf")
|
||||
|
||||
result = self.run_cli([str(doc), "--output-filename", str(destination)])
|
||||
result.assert_success()
|
||||
|
||||
# Do not check against reference versions when using a dummy isolation provider
|
||||
if os.environ.get("DUMMY_CONVERSION", False):
|
||||
return
|
||||
|
||||
converted = fitz.open(destination)
|
||||
ref = fitz.open(reference)
|
||||
errors = []
|
||||
if len(converted) != len(ref):
|
||||
errors.append("different number of pages")
|
||||
|
||||
diffs = doc.parent / "diffs"
|
||||
diffs.mkdir(parents=True, exist_ok=True)
|
||||
for page, ref_page in zip(converted, ref):
|
||||
curr_pixmap = page.get_pixmap(dpi=150)
|
||||
ref_pixmap = ref_page.get_pixmap(dpi=150)
|
||||
if curr_pixmap.tobytes() != ref_pixmap.tobytes():
|
||||
errors.append(f"page {page.number} differs")
|
||||
|
||||
t0 = time.perf_counter()
|
||||
|
||||
arr_ref = np.frombuffer(ref_pixmap.samples, dtype=np.uint8).reshape(
|
||||
ref_pixmap.height, ref_pixmap.width, ref_pixmap.n
|
||||
)
|
||||
arr_curr = np.frombuffer(curr_pixmap.samples, dtype=np.uint8).reshape(
|
||||
curr_pixmap.height, curr_pixmap.width, curr_pixmap.n
|
||||
)
|
||||
|
||||
# Find differences (any channel differs)
|
||||
diff = (arr_ref != arr_curr).any(axis=2)
|
||||
|
||||
# Get coordinates of differences
|
||||
diff_coords = np.where(diff)
|
||||
# Mark differences in red
|
||||
for y, x in zip(diff_coords[0], diff_coords[1]):
|
||||
# Note: PyMuPDF's set_pixel takes (x, y) not (y, x)
|
||||
ref_pixmap.set_pixel(int(x), int(y), (255, 0, 0)) # Red
|
||||
|
||||
t1 = time.perf_counter()
|
||||
print(f"diff took {t1 - t0} seconds")
|
||||
ref_pixmap.save(diffs / f"{destination.stem}_{page.number}.jpeg")
|
||||
|
||||
if len(errors) > 0:
|
||||
raise AssertionError(
|
||||
f"The resulting document differs from the reference. See {str(diffs)} for a visual diff."
|
||||
)
|
||||
|
||||
def test_output_filename(self, sample_pdf: str) -> None:
|
||||
temp_dir = tempfile.mkdtemp(prefix="dangerzone-")
|
||||
output_filename = str(Path(temp_dir) / "safe.pdf")
|
||||
|
@ -219,6 +269,23 @@ class TestCliConversion(TestCliBasic):
|
|||
result = self.run_cli([sample_pdf, "--output-filename", output_filename])
|
||||
result.assert_success()
|
||||
|
||||
### Test method for swallowed exception
|
||||
def test_output_filename_pokemon_handler(
|
||||
self,
|
||||
sample_pdf: str,
|
||||
mocker: MockerFixture,
|
||||
) -> None:
|
||||
"""Ensure that we catch top-level errors."""
|
||||
mock_conv = mocker.patch(
|
||||
"dangerzone.isolation_provider.base.IsolationProvider.convert"
|
||||
)
|
||||
mock_conv.side_effect = Exception("It happens")
|
||||
result = self.run_cli([sample_pdf])
|
||||
# FIXME: The following does not work, because the log is somehow not captured by
|
||||
# Click's CliRunner.
|
||||
# result.assert_failure(message="It happens")
|
||||
result.assert_failure()
|
||||
|
||||
def test_output_filename_new_dir(self, sample_pdf: str) -> None:
|
||||
output_filename = str(Path("fake-directory") / "my-output.pdf")
|
||||
result = self.run_cli([sample_pdf, "--output-filename", output_filename])
|
||||
|
@ -328,6 +395,7 @@ class TestCliConversion(TestCliBasic):
|
|||
|
||||
class TestExtraFormats(TestCli):
|
||||
@for_each_external_doc("*hwp*")
|
||||
@pytest.mark.flaky(reruns=2)
|
||||
def test_hancom_office(self, doc: str) -> None:
|
||||
if is_qubes_native_conversion():
|
||||
pytest.skip("HWP / HWPX formats are not supported on this platform")
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue