diff --git a/.fix-pylint-output-for-gitlab-ci.py b/.fix-pylint-output-for-gitlab-ci.py new file mode 100755 index 0000000..5de8f06 --- /dev/null +++ b/.fix-pylint-output-for-gitlab-ci.py @@ -0,0 +1,10 @@ +import re +import sys + +def fix_output(matchobj): + return f'{matchobj.group(1)}{float(matchobj.group(2)) * 10}/{int(matchobj.group(3)) * 10}' + +pattern = re.compile(r'(Your code has been rated at )([0-9.]+)/(10)') +for line in sys.stdin: + line.rstrip() + print(re.sub(pattern, fix_output, line), end='') diff --git a/.gitignore b/.gitignore index e695a0f..43f1f96 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ __pycache__ venv .env public +*.swp diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0ba1f5a..6cda68b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,25 +1,52 @@ image: python:3.11 stages: + - install - test - deploy -before_script: - - make install +default: + cache: &global_cache + key: "$CI_COMMIT_SHA" + paths: + - venv/ + +.pull-cache: &pull_cache + cache: + <<: *global_cache + policy: pull + +install: + stage: install + script: + - make venv + cache: + <<: *global_cache + policy: push pytest: + <<: *pull_cache stage: test script: - make tests djlint: + <<: *pull_cache stage: test script: - make djlint -pages: - stage: deploy +pylint: + <<: *pull_cache + stage: test + script: + - make pylint | python3 .fix-pylint-output-for-gitlab-ci.py + allow_failure: true + coverage: '/Your code has been rated at ([0-9.]+)\/100/' +pages: + <<: *pull_cache + stage: deploy script: - pwd - ls diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000..041c69d --- /dev/null +++ b/.pylintrc @@ -0,0 +1,645 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.11 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots= + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + p, + db, + Run, + _ + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=yes + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +#typealias-rgx= + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-symbolic-message-instead, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + fixme, + too-few-public-methods, + unused-argument, + import-outside-toplevel, + no-self-argument, + singleton-comparison, + missing-module-docstring, + missing-class-docstring, + missing-function-docstring + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/Makefile b/Makefile index 47c5700..ae7b27d 100644 --- a/Makefile +++ b/Makefile @@ -5,18 +5,21 @@ ORANGE=\033[0;33m BLUE=\033[0;34m NC=\033[0m # No Color +.PHONY: tests djlint pylint + venv: ## Create the venv python3 -m venv venv -install: venv ## Install the project locally venv/bin/pip install -e ".[dev,docs]" docs: cog ## Build the docs venv/bin/sphinx-build docs public cog: ## Run cog, to integrate the CLI options to the docs. venv/bin/cog -r docs/*.md -tests: install ## Run the tests +tests: venv ## Run the tests venv/bin/pytest -djlint: install +djlint: venv venv/bin/djlint --ignore=H030,H031 --lint argos/server/templates/*html +pylint: venv + venv/bin/pylint argos help: @python3 -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST) diff --git a/argos/agent.py b/argos/agent.py index e1dc0d4..fe3d222 100644 --- a/argos/agent.py +++ b/argos/agent.py @@ -1,10 +1,15 @@ +"""Argos agent + +Fetchs the tasks from the server, execute it and send the result to the server +""" import asyncio +import json import logging import socket from typing import List import httpx -from tenacity import retry, wait_random +from tenacity import retry, wait_random # type: ignore from argos.checks import get_registered_check from argos.logging import logger @@ -12,6 +17,7 @@ from argos.schemas import AgentResult, SerializableException, Task def log_failure(retry_state): + """Log failures, with a different log level depending on the number of attempts.""" if retry_state.attempt_number < 1: loglevel = logging.INFO else: @@ -33,21 +39,21 @@ class ArgosAgent: self.max_tasks = max_tasks self.wait_time = wait_time self.auth = auth + headers = { + "Authorization": f"Bearer {self.auth}", + } + self._http_client = httpx.AsyncClient(headers=headers) self.agent_id = socket.gethostname() @retry(after=log_failure, wait=wait_random(min=1, max=2)) async def run(self): - logger.info(f"Running agent against {self.server}") - headers = { - "Authorization": f"Bearer {self.auth}", - } - self._http_client = httpx.AsyncClient(headers=headers) + logger.info("Running agent against %s", self.server) async with self._http_client: while "forever": retry_now = await self._get_and_complete_tasks() if not retry_now: - logger.error(f"Waiting {self.wait_time} seconds before next retry") + logger.error("Waiting %i seconds before next retry", self.wait_time) await asyncio.sleep(self.wait_time) async def _complete_task(self, task: dict) -> dict: @@ -59,10 +65,10 @@ class ArgosAgent: status = result.status context = result.context - except Exception as e: + except Exception as err: # pylint: disable=broad-except status = "error" - context = SerializableException.from_exception(e) - msg = f"An exception occured when running {task}. {e.__class__.__name__} : {e}" + context = SerializableException.from_exception(err) + msg = f"An exception occured when running {task}. {err.__class__.__name__} : {err}" logger.error(msg) return AgentResult(task_id=task.id, status=status, context=context) @@ -76,7 +82,7 @@ class ArgosAgent: if response.status_code == httpx.codes.OK: # XXX Maybe we want to group the tests by URL ? (to issue one request per URL) data = response.json() - logger.info(f"Received {len(data)} tasks from the server") + logger.info("Received %i tasks from the server", len(data)) tasks = [] for task in data: @@ -86,13 +92,13 @@ class ArgosAgent: results = await asyncio.gather(*tasks) await self._post_results(results) return True - else: - logger.error("Got no tasks from the server.") - return False - else: - logger.error(f"Failed to fetch tasks: {response.read()}") + + logger.error("Got no tasks from the server.") return False + logger.error("Failed to fetch tasks: %s", response.read()) + return False + async def _post_results(self, results: List[AgentResult]): data = [r.model_dump() for r in results] response = await self._http_client.post( @@ -100,7 +106,7 @@ class ArgosAgent: ) if response.status_code == httpx.codes.CREATED: - logger.error(f"Successfully posted results {response.json()}") + logger.error("Successfully posted results %s", json.dumps(response.json())) else: - logger.error(f"Failed to post results: {response.read()}") + logger.error("Failed to post results: %s", response.read()) return response diff --git a/argos/checks/__init__.py b/argos/checks/__init__.py index 8238d21..be78950 100644 --- a/argos/checks/__init__.py +++ b/argos/checks/__init__.py @@ -1,7 +1,11 @@ -from argos.checks.base import ( +from argos.checks.base import ( # NOQA BaseCheck, CheckNotFound, get_registered_check, get_registered_checks, ) -from argos.checks.checks import HTTPBodyContains, HTTPStatus, SSLCertificateExpiration +from argos.checks.checks import ( # NOQA + HTTPBodyContains, + HTTPStatus, + SSLCertificateExpiration, +) diff --git a/argos/checks/base.py b/argos/checks/base.py index 8781294..92f6b72 100644 --- a/argos/checks/base.py +++ b/argos/checks/base.py @@ -1,13 +1,17 @@ +"""Various base classes for checks""" + from dataclasses import dataclass -from typing import Type +from typing import Type, Union import httpx from pydantic import BaseModel -from argos.schemas import Task +from argos.schemas.models import Task class Status: + """Possible statuses of the checks""" + ON_CHECK = "on-check" SUCCESS = "success" FAILURE = "failure" @@ -15,6 +19,8 @@ class Status: class Severity: + """Possible statuses of the checks’ results""" + OK = "ok" WARNING = "warning" CRITICAL = "critical" @@ -30,6 +36,7 @@ class Response: @classmethod def new(cls, status, **kwargs): + """Normalize results of checks.""" if isinstance(status, bool): status = Status.SUCCESS if status else Status.FAILURE @@ -63,9 +70,9 @@ class InvalidResponse(Exception): class BaseCheck: config: str - expected_cls: Type[BaseExpectedValue] = None + expected_cls: Union[None, Type[BaseExpectedValue]] = None - _registry = [] + _registry = [] # type: ignore[var-annotated] def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) @@ -73,10 +80,12 @@ class BaseCheck: @classmethod def get_registered_checks(cls): + """Return existing checks""" return {c.config: c for c in cls._registry} @classmethod def get_registered_check(cls, name): + """Get a check from its name""" check = cls.get_registered_checks().get(name) if not check: raise CheckNotFound(name) @@ -88,16 +97,20 @@ class BaseCheck: @property def expected(self): - return self.expected_cls(expected=self.task.expected).get_converted() + """Convert the task’s class to simpler class""" + if self.expected_cls is not None: + return self.expected_cls(expected=self.task.expected).get_converted() + return None def response(self, **kwargs): + """Ensure that the response has a status and return a Response""" if "status" not in kwargs: raise InvalidResponse(kwargs) status = kwargs.pop("status") return Response.new(status, **kwargs) @classmethod - async def finalize(self, config, result, **context): + async def finalize(cls, config, result, **context): """By default, the finalize considers that : - All FAILUREs should be reported as CRITICAL @@ -109,9 +122,9 @@ class BaseCheck: """ if result.status in (Status.SUCCESS, Status.ERROR): return result.status, Severity.OK - elif result.status == Status.FAILURE: + if result.status == Status.FAILURE: return result.status, Severity.CRITICAL - elif result.status == Status.ON_CHECK: + if result.status == Status.ON_CHECK: msg = ( "Status is 'on-check', but the Check class " "didn't provide a finalize() method." diff --git a/argos/checks/checks.py b/argos/checks/checks.py index f8174dc..b23695c 100644 --- a/argos/checks/checks.py +++ b/argos/checks/checks.py @@ -1,3 +1,5 @@ +"""Define the available checks""" + from datetime import datetime from argos.checks.base import ( @@ -61,18 +63,19 @@ class SSLCertificateExpiration(BaseCheck): async def finalize(cls, config, result, **context): if result.status != Status.ON_CHECK: return result.status, Severity.WARNING - elif "expires_in" in context: + + if "expires_in" in context: thresholds = config.ssl.thresholds thresholds.sort() for days, severity in thresholds: if context["expires_in"] < days: return Status.FAILURE, severity return Status.SUCCESS, Severity.OK - else: - raise ValueError( - "The SSLCertificateExpiration check didn't provide an 'expires_in' " - "context variable." - ) + + raise ValueError( + "The SSLCertificateExpiration check didn't provide an 'expires_in' " + "context variable." + ) @classmethod def get_description(cls, config): diff --git a/argos/commands.py b/argos/commands.py index e142203..793613c 100644 --- a/argos/commands.py +++ b/argos/commands.py @@ -36,7 +36,7 @@ def server(): default="INFO", type=click.Choice(logging.LOG_LEVELS, case_sensitive=False), ) -def agent(server, auth, max_tasks, wait_time, log_level): +def agent(server_url, auth, max_tasks, wait_time, log_level): """Get and run tasks to the provided server. Will wait for new tasks. Usage: argos agent https://argos.server "auth-token-here" @@ -50,8 +50,8 @@ def agent(server, auth, max_tasks, wait_time, log_level): from argos.logging import logger logger.setLevel(log_level) - agent = ArgosAgent(server, auth, max_tasks, wait_time) - asyncio.run(agent.run()) + agent_ = ArgosAgent(server_url, auth, max_tasks, wait_time) + asyncio.run(agent_.run()) @server.command() diff --git a/argos/logging.py b/argos/logging.py index c441daf..74de43c 100644 --- a/argos/logging.py +++ b/argos/logging.py @@ -12,4 +12,4 @@ def set_log_level(log_level): if not isinstance(level, int): raise ValueError(f"Invalid log level: {log_level}") logger.setLevel(level=level) - logger.info("Log level set to {}".format(log_level)) + logger.info("Log level set to %s", log_level) diff --git a/argos/schemas/__init__.py b/argos/schemas/__init__.py index ae8757c..e682aed 100644 --- a/argos/schemas/__init__.py +++ b/argos/schemas/__init__.py @@ -1,2 +1,2 @@ -from .config import * -from .models import * +from .config import * # NOQA +from .models import * # NOQA diff --git a/argos/schemas/config.py b/argos/schemas/config.py index 486af14..df5bb3a 100644 --- a/argos/schemas/config.py +++ b/argos/schemas/config.py @@ -1,18 +1,21 @@ +"""Pydantic schemas for configuration + +For database models, see argos.server.models. +""" from typing import Dict, List, Literal, Optional, Tuple from pydantic import BaseModel, ConfigDict, HttpUrl, field_validator from pydantic.functional_validators import BeforeValidator from typing_extensions import Annotated +from argos.checks import get_registered_checks from argos.schemas.utils import string_to_duration -# This file contains the pydantic schemas. -# For the database models, check in argos.server.models. - Severity = Literal["warning", "error", "critical"] def parse_threshold(value): + """Parse duration threshold for SSL certificate validity""" for duration_str, severity in value.items(): days = string_to_duration(duration_str, "days") # Return here because it's one-item dicts. @@ -36,17 +39,16 @@ class WebsiteCheck(BaseModel): def validate(cls, value): if isinstance(value, str): return {"expected": value} - elif isinstance(value, dict): + if isinstance(value, dict): return value - elif isinstance(value, list): + if isinstance(value, list): return {"expected": value} - else: - raise ValueError("Invalid type") + + raise ValueError("Invalid type") def parse_checks(value): - # To avoid circular imports - from argos.checks import get_registered_checks + """Check that checks are valid (i.e. registered) checks""" available_names = get_registered_checks().keys() @@ -76,26 +78,36 @@ class Website(BaseModel): @field_validator("frequency", mode="before") def parse_frequency(cls, value): + """Convert the configured frequency to hours""" if value: return string_to_duration(value, "hours") + return None + class Service(BaseModel): + """List of agents’ token""" + secrets: List[str] class Alert(BaseModel): + """List of way to handle alerts, by severity""" + error: List[str] warning: List[str] alert: List[str] class General(BaseModel): + """Frequency for the checks and alerts""" + frequency: int alerts: Alert @field_validator("frequency", mode="before") def parse_frequency(cls, value): + """Convert the configured frequency to minutes""" return string_to_duration(value, "minutes") diff --git a/argos/schemas/models.py b/argos/schemas/models.py index c3d0e82..8d3f0f9 100644 --- a/argos/schemas/models.py +++ b/argos/schemas/models.py @@ -1,3 +1,7 @@ +"""Pydantic schemas for data + +For database models, see argos.server.models. +""" import traceback from datetime import datetime from typing import Literal @@ -8,6 +12,7 @@ from pydantic import BaseModel, ConfigDict class Task(BaseModel): + """A task corresponds to a check to execute""" id: int url: str domain: str @@ -19,27 +24,29 @@ class Task(BaseModel): model_config = ConfigDict(from_attributes=True) def __str__(self): - id = self.id + task_id = self.id url = self.url check = self.check - return f"Task ({id}): {url} - {check}" + return f"Task ({task_id}): {url} - {check}" class SerializableException(BaseModel): + """Task exception""" error_message: str error_type: str error_details: str @staticmethod - def from_exception(e: BaseException): + def from_exception(err: BaseException): return SerializableException( - error_message=str(e), - error_type=str(type(e).__name__), + error_message=str(err), + error_type=str(type(err).__name__), error_details=traceback.format_exc(), ) class AgentResult(BaseModel): + """Task’s result sent by agent""" task_id: int # The on-check status means that the service needs to finish the check # and will then determine the severity. diff --git a/argos/schemas/utils.py b/argos/schemas/utils.py index b0bbaa8..517a441 100644 --- a/argos/schemas/utils.py +++ b/argos/schemas/utils.py @@ -1,7 +1,7 @@ -from typing import Literal +from typing import Literal, Union -def string_to_duration(value: str, target: Literal["days", "hours", "minutes"]): +def string_to_duration(value: str, target: Literal["days", "hours", "minutes"]) -> Union[int,float]: """Convert a string to a number of hours, days or minutes""" num = int("".join(filter(str.isdigit, value))) @@ -27,15 +27,14 @@ def string_to_duration(value: str, target: Literal["days", "hours", "minutes"]): elif "mo" in value: num = num * 60 * 24 * 30 # considers 30d in a month elif "y" in value: - num = num * 60 * 24 * 365 - elif "m" in value: - num = num - else: + num = num * 60 * 24 * 365 # considers 365d in a year + elif "m" not in value: raise ValueError("Invalid duration value", value) if target == "hours": - num = num / 60 - elif target == "days": - num = num / 60 / 24 + return num / 60 + if target == "days": + return num / 60 / 24 + # target == "minutes" return num diff --git a/argos/server/alerting.py b/argos/server/alerting.py index d158833..5f5821a 100644 --- a/argos/server/alerting.py +++ b/argos/server/alerting.py @@ -1,6 +1,9 @@ from argos.logging import logger +# XXX Implement mail alerts https://framagit.org/framasoft/framaspace/argos/-/issues/15 +# XXX Implement gotify alerts https://framagit.org/framasoft/framaspace/argos/-/issues/16 def handle_alert(config, result, task, severity): + """Dispatch alert through configured alert channels""" msg = f"task={task.id}, status={result.status}, {severity=}" - logger.error(f"Alerting stub: {msg}") + logger.error("Alerting stub: %s", msg) diff --git a/argos/server/main.py b/argos/server/main.py index 0375e73..ce3f534 100644 --- a/argos/server/main.py +++ b/argos/server/main.py @@ -12,86 +12,93 @@ from argos.server.settings import get_app_settings, read_yaml_config def get_application() -> FastAPI: + """Spawn Argos FastAPI server""" settings = get_app_settings() - app = FastAPI() + appli = FastAPI() - config = read_config(app, settings) + config = read_config(appli, settings) # Settings is the pydantic settings object # Config is the argos config object (built from yaml) - app.state.config = config - app.state.settings = settings + appli.state.config = config + appli.state.settings = settings - app.add_event_handler( + appli.add_event_handler( "startup", - create_start_app_handler(app), + create_start_app_handler(appli), ) - app.add_event_handler( + appli.add_event_handler( "shutdown", - create_stop_app_handler(app), + create_stop_app_handler(appli), ) - app.include_router(routes.api, prefix="/api") - app.include_router(routes.views) - app.mount("/static", StaticFiles(directory="argos/server/static"), name="static") - return app + appli.include_router(routes.api, prefix="/api") + appli.include_router(routes.views) + appli.mount("/static", StaticFiles(directory="argos/server/static"), name="static") + return appli -def create_start_app_handler(app): +def create_start_app_handler(appli): + """Warmup the server: + setup database connection and update the tasks in it before making it available + """ async def read_config_and_populate_db(): - setup_database(app) + setup_database(appli) - db = await connect_to_db(app) - await queries.update_from_config(db, app.state.config) + db = await connect_to_db(appli) + await queries.update_from_config(db, appli.state.config) return read_config_and_populate_db -async def connect_to_db(app): - app.state.db = app.state.SessionLocal() - return app.state.db +async def connect_to_db(appli): + appli.state.db = appli.state.SessionLocal() + return appli.state.db -def create_stop_app_handler(app): +def create_stop_app_handler(appli): + """Gracefully shutdown the server: + close database connection. + """ async def stop_app(): - app.state.db.close() + appli.state.db.close() return stop_app -def read_config(app, settings): +def read_config(appli, settings): try: config = read_yaml_config(settings.yaml_file) - app.state.config = config + appli.state.config = config return config - except ValidationError as e: + except ValidationError as err: logger.error("Errors where found while reading configuration:") - for error in e.errors(): - logger.error(f"{error['loc']} is {error['type']}") + for error in err.errors(): + logger.error("%s is %s", error['loc'], error['type']) sys.exit(1) -def setup_database(app): - settings = app.state.settings +def setup_database(appli): + settings = appli.state.settings # For sqlite, we need to add connect_args={"check_same_thread": False} - logger.debug(f"Using database URL {settings.database_url}") + logger.debug("Using database URL %s", settings.database_url) if settings.database_url.startswith("sqlite:////tmp"): logger.warning("Using sqlite in /tmp is not recommended for production") - + extra_settings = {} if settings.db_pool_size: extra_settings.setdefault("pool_size", settings.db_pool_size) if settings.db_max_overflow: extra_settings.setdefault("max_overflow", settings.db_max_overflow) - + engine = create_engine( settings.database_url, **extra_settings ) - app.state.SessionLocal = sessionmaker( + appli.state.SessionLocal = sessionmaker( autocommit=False, autoflush=False, bind=engine ) - app.state.engine = engine + appli.state.engine = engine models.Base.metadata.create_all(bind=engine) diff --git a/argos/server/models.py b/argos/server/models.py index a894ab6..53e3b28 100644 --- a/argos/server/models.py +++ b/argos/server/models.py @@ -1,3 +1,4 @@ +"""Database models""" from datetime import datetime, timedelta from typing import List, Literal @@ -20,8 +21,8 @@ class Task(Base): """ There is one task per check. - It contains all information needed to run the jobs on the workers. - Workers will return information in the result table. + It contains all information needed to run the jobs on the agents. + Agents will return information in the result table. """ __tablename__ = "tasks" @@ -50,6 +51,7 @@ class Task(Base): return get_registered_check(self.check) def set_times_and_deselect(self): + """Removes the lock on task and set the time for the next run""" self.selected_by = None now = datetime.now() @@ -58,24 +60,34 @@ class Task(Base): @property def last_result(self): + """Get last result of the task""" if not self.results: return None return max(self.results, key=lambda r: r.id) @property def status(self): + """Get status of the task, i.e. the status of its last result""" if not self.last_result: return None return self.last_result.status @property def severity(self): + """Get severity of the task""" if not self.last_result: return None return self.last_result.severity class Result(Base): + """There is multiple results per tasks. + + The results uses the informations returned by the agents. + + The status is "Was the agent able to do the check?" while the severity + depends on the return value of the check. + """ __tablename__ = "results" id: Mapped[int] = mapped_column(primary_key=True) task_id: Mapped[int] = mapped_column(ForeignKey("tasks.id")) diff --git a/argos/server/queries.py b/argos/server/queries.py index aa3b863..420fe3b 100644 --- a/argos/server/queries.py +++ b/argos/server/queries.py @@ -1,3 +1,4 @@ +"""Functions to ease SQL queries management""" from datetime import datetime, timedelta from urllib.parse import urljoin @@ -29,8 +30,8 @@ async def list_tasks(db: Session, agent_id: str, limit: int = 100): return tasks -async def get_task(db: Session, id: int) -> Task: - return db.get(Task, id) +async def get_task(db: Session, task_id: int) -> Task: + return db.get(Task, task_id) async def create_result(db: Session, agent_result: schemas.AgentResult, agent_id: str): @@ -60,6 +61,7 @@ async def count_results(db: Session): async def update_from_config(db: Session, config: schemas.Config): + """Update tasks from config file""" tasks = [] unique_properties = [] for website in config.websites: @@ -84,8 +86,9 @@ async def update_from_config(db: Session, config: schemas.Config): if frequency != existing_task.frequency: existing_task.frequency = frequency - msg = f"Skipping db task creation for {url=}, {check_key=}, {expected=}, {frequency=}." - logger.debug(msg) + logger.debug("Skipping db task creation for url=%s, " \ + "check_key=%s, expected=%s, frequency=%s.", + url, check_key, expected, frequency) else: properties = (url, check_key, expected) @@ -98,7 +101,7 @@ async def update_from_config(db: Session, config: schemas.Config): expected=expected, frequency=frequency, ) - logger.debug(f"Adding a new task in the db: {task}") + logger.debug("Adding a new task in the db: %s", task) tasks.append(task) db.add_all(tasks) @@ -106,16 +109,17 @@ async def update_from_config(db: Session, config: schemas.Config): async def get_severity_counts(db: Session) -> dict: + """Get the severities (ok, warning, critical…) and their count""" # Get the last result of each task subquery = ( - db.query(Result.task_id, func.max(Result.id).label("max_result_id")) + db.query(Result.task_id, func.max(Result.id).label("max_result_id")) # pylint: disable-msg=not-callable .group_by(Result.task_id) .subquery() ) # Join this back to get full result rows, and group by status query = ( - db.query(Result.severity, func.count(Result.id).label("count")) + db.query(Result.severity, func.count(Result.id).label("count")) # pylint: disable-msg=not-callable .join(subquery, Result.id == subquery.columns.max_result_id) .group_by(Result.severity) ) @@ -141,7 +145,7 @@ async def remove_old_results(db: Session, max_results: int): .limit(max_results) .subquery() ) - min_id = db.query(func.min(subquery.c.id)).scalar() + min_id = db.query(func.min(subquery.c.id)).scalar() # pylint: disable-msg=not-callable # Delete all the results older than min_id if min_id: @@ -156,6 +160,7 @@ async def remove_old_results(db: Session, max_results: int): async def release_old_locks(db: Session, max_lock_seconds: int): + """Remove outdated locks on tasks""" # Get all the jobs that have been selected_at for more than max_lock_time max_acceptable_time = datetime.now() - timedelta(seconds=max_lock_seconds) subquery = ( diff --git a/argos/server/routes/api.py b/argos/server/routes/api.py index 18e7b9b..57001c6 100644 --- a/argos/server/routes/api.py +++ b/argos/server/routes/api.py @@ -1,4 +1,5 @@ -from typing import List +"""Web interface for machines""" +from typing import List, Union from fastapi import APIRouter, Depends, Request from sqlalchemy.orm import Session @@ -17,8 +18,9 @@ async def read_tasks( request: Request, db: Session = Depends(get_db), limit: int = 10, - agent_id: str = None, + agent_id: Union[None,str] = None, ): + """Return a list of tasks to execute""" agent_id = agent_id or request.client.host tasks = await queries.list_tasks(db, agent_id=agent_id, limit=limit) return tasks @@ -30,7 +32,7 @@ async def create_results( results: List[AgentResult], db: Session = Depends(get_db), config: Config = Depends(get_config), - agent_id: str = None, + agent_id: Union[None,str] = None, ): """Get the results from the agents and store them locally. @@ -47,7 +49,7 @@ async def create_results( # XXX Get all the tasks at once, to limit the queries on the db task = await queries.get_task(db, agent_result.task_id) if not task: - logger.error(f"Unable to find task {agent_result.task_id}") + logger.error("Unable to find task %i", agent_result.task_id) else: check = task.get_check() status, severity = await check.finalize(config, result, **result.context) @@ -63,6 +65,7 @@ async def create_results( @route.get("/stats") async def get_stats(db: Session = Depends(get_db)): + """Get tasks statistics""" return { "upcoming_tasks_count": await queries.count_tasks(db, selected=False), "results_count": await queries.count_results(db), diff --git a/argos/server/routes/dependencies.py b/argos/server/routes/dependencies.py index 7c5eb61..e543eea 100644 --- a/argos/server/routes/dependencies.py +++ b/argos/server/routes/dependencies.py @@ -19,6 +19,7 @@ def get_config(request: Request): async def verify_token( request: Request, token: HTTPAuthorizationCredentials = Depends(auth_scheme) ): + """Verify agent token""" if token.credentials not in request.app.state.config.service.secrets: raise HTTPException(status_code=401, detail="Unauthorized") return token diff --git a/argos/server/routes/views.py b/argos/server/routes/views.py index 174f0c6..287168d 100644 --- a/argos/server/routes/views.py +++ b/argos/server/routes/views.py @@ -1,3 +1,4 @@ +"""Web interface for humans""" from collections import defaultdict from urllib.parse import urlparse @@ -18,7 +19,7 @@ templates = Jinja2Templates(directory="argos/server/templates") @route.get("/") async def get_severity_counts(request: Request, db: Session = Depends(get_db)): - """Returns the number of results per severity""" + """Shows the number of results per severity""" counts_dict = await queries.get_severity_counts(db) agents = db.query(Result.agent_id).distinct().all() @@ -35,6 +36,7 @@ async def get_severity_counts(request: Request, db: Session = Depends(get_db)): @route.get("/details") async def read_tasks(request: Request, db: Session = Depends(get_db)): + """Show all tasks and their current state""" tasks = db.query(Task).order_by(Task.domain).all() results = ( @@ -79,7 +81,8 @@ async def read_tasks(request: Request, db: Session = Depends(get_db)): async def get_domain_tasks( request: Request, domain: str, db: Session = Depends(get_db) ): - tasks = db.query(Task).filter(Task.domain.contains(domain)).all() + """Show all tasks attached to a domain""" + tasks = db.query(Task).filter(Task.domain.contains(domain)).all() # type: ignore[attr-defined] return templates.TemplateResponse( "domain.html", {"request": request, "domain": domain, "tasks": tasks} ) @@ -87,6 +90,7 @@ async def get_domain_tasks( @route.get("/result/{result_id}") async def get_result(request: Request, result_id: int, db: Session = Depends(get_db)): + """Show the details of a result""" result = db.query(Result).get(result_id) return templates.TemplateResponse( "result.html", {"request": request, "result": result} @@ -100,10 +104,11 @@ async def get_task_results( db: Session = Depends(get_db), config: Config = Depends(get_config), ): + """Show history of a task’s results""" results = ( db.query(Result) .filter(Result.task_id == task_id) - .order_by(Result.submitted_at.desc()) + .order_by(Result.submitted_at.desc()) # type: ignore[attr-defined] .all() ) task = db.query(Task).get(task_id) @@ -121,6 +126,7 @@ async def get_task_results( @route.get("/agents") async def get_agents(request: Request, db: Session = Depends(get_db)): + """Show argos agents and the last time the server saw them""" t1 = aliased(Result, name="t1") t2 = aliased(Result, name="t2") diff --git a/argos/server/settings.py b/argos/server/settings.py index 8116bb1..0465f77 100644 --- a/argos/server/settings.py +++ b/argos/server/settings.py @@ -1,7 +1,8 @@ +"""Pydantic schemas for server""" import os from functools import lru_cache from os import environ -from typing import Optional +from typing import Optional,Union import yaml from pydantic_settings import BaseSettings, SettingsConfigDict @@ -20,14 +21,22 @@ class Settings(BaseSettings): class DevSettings(Settings): + """Settings for dev environment. + + Uses config.yaml as config file. + Uses a SQLite database.""" app_env: str = "dev" - database_url: str = "sqlite:////tmp/argos.db" yaml_file: str = "config.yaml" db_pool_size: Optional[int] = None db_max_overflow: Optional[int] = None + database_url: str = "sqlite:////tmp/argos.db" class TestSettings(Settings): + """Settings for test environment. + + Uses tests/config.yaml as config file. + Uses a SQLite database.""" app_env: str = "test" yaml_file: str = "tests/config.yaml" database_url: str = "sqlite:////tmp/test-argos.db" @@ -36,6 +45,7 @@ class TestSettings(Settings): class ProdSettings(Settings): + """Settings for prod environment.""" app_env: str = "prod" db_pool_size: Optional[int] = 10 db_max_overflow: Optional[int] = 20 @@ -49,10 +59,13 @@ environments = { @lru_cache() -def get_app_settings() -> Settings: +def get_app_settings() -> Union[None,Settings]: + """Load settings depending on the environment""" app_env = environ.get("ARGOS_APP_ENV", "dev") settings = environments.get(app_env) - return settings() + if settings is not None: + return settings() + return None def read_yaml_config(filename): @@ -66,5 +79,5 @@ def _load_yaml(filename): loader_class=yaml.FullLoader, base_dir=base_dir ) - with open(filename, "r") as stream: + with open(filename, "r", encoding='utf-8') as stream: return yaml.load(stream, Loader=yaml.FullLoader) diff --git a/config.yaml b/config.yaml new file mode 100644 index 0000000..634628b --- /dev/null +++ b/config.yaml @@ -0,0 +1,25 @@ +general: + frequency: "5m" # Run checks every minute. + alerts: + error: + - local + warning: + - local + alert: + - local +service: + secrets: + - Shorts-Tribunal-Plentiful-Penknife-Lazily-Move0 + # Secrets can be generated using `openssl rand -base64 32`. + +ssl: + thresholds: + - "15d": critical + - "25d": warning + +# It's also possible to define the checks in another file +# with the include syntax: +# +# websites: !include websites.yaml +# +websites: !include websites.yaml diff --git a/pyproject.toml b/pyproject.toml index 7593da0..f4a47ff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,7 +42,8 @@ dev = [ "ipdb>=0.13,<0.14", "sphinx-autobuild", "ruff==0.1.5,<1", - "djlint>=1.34.0" + "djlint>=1.34.0", + "pylint>=3.0.2", ] postgres = [ "psycopg2-binary>=2.9,<3",