Use Pylint

This commit is contained in:
Luc Didry 2023-12-05 08:07:03 +00:00 committed by Luc Didry
parent ca09506a11
commit e10f4e9c2d
25 changed files with 933 additions and 127 deletions

View file

@ -0,0 +1,10 @@
import re
import sys
def fix_output(matchobj):
return f'{matchobj.group(1)}{float(matchobj.group(2)) * 10}/{int(matchobj.group(3)) * 10}'
pattern = re.compile(r'(Your code has been rated at )([0-9.]+)/(10)')
for line in sys.stdin:
line.rstrip()
print(re.sub(pattern, fix_output, line), end='')

1
.gitignore vendored
View file

@ -4,3 +4,4 @@ __pycache__
venv venv
.env .env
public public
*.swp

View file

@ -1,25 +1,52 @@
image: python:3.11 image: python:3.11
stages: stages:
- install
- test - test
- deploy - deploy
before_script: default:
- make install cache: &global_cache
key: "$CI_COMMIT_SHA"
paths:
- venv/
.pull-cache: &pull_cache
cache:
<<: *global_cache
policy: pull
install:
stage: install
script:
- make venv
cache:
<<: *global_cache
policy: push
pytest: pytest:
<<: *pull_cache
stage: test stage: test
script: script:
- make tests - make tests
djlint: djlint:
<<: *pull_cache
stage: test stage: test
script: script:
- make djlint - make djlint
pages: pylint:
stage: deploy <<: *pull_cache
stage: test
script:
- make pylint | python3 .fix-pylint-output-for-gitlab-ci.py
allow_failure: true
coverage: '/Your code has been rated at ([0-9.]+)\/100/'
pages:
<<: *pull_cache
stage: deploy
script: script:
- pwd - pwd
- ls - ls

645
.pylintrc Normal file
View file

@ -0,0 +1,645 @@
[MAIN]
# Analyse import fallback blocks. This can be used to support both Python 2 and
# 3 compatible code, which means that the block might have code that exists
# only in one or another interpreter, leading to false positives when analysed.
analyse-fallback-blocks=no
# Clear in-memory caches upon conclusion of linting. Useful if running pylint
# in a server-like mode.
clear-cache-post-run=no
# Load and enable all available extensions. Use --list-extensions to see a list
# all available extensions.
#enable-all-extensions=
# In error mode, messages with a category besides ERROR or FATAL are
# suppressed, and no reports are done by default. Error mode is compatible with
# disabling specific errors.
#errors-only=
# Always return a 0 (non-error) status code, even if lint errors are found.
# This is primarily useful in continuous integration scripts.
#exit-zero=
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code.
extension-pkg-allow-list=
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code. (This is an alternative name to extension-pkg-allow-list
# for backward compatibility.)
extension-pkg-whitelist=
# Return non-zero exit code if any of these messages/categories are detected,
# even if score is above --fail-under value. Syntax same as enable. Messages
# specified are enabled, while categories only check already-enabled messages.
fail-on=
# Specify a score threshold under which the program will exit with error.
fail-under=10
# Interpret the stdin as a python script, whose filename needs to be passed as
# the module_or_package argument.
#from-stdin=
# Files or directories to be skipped. They should be base names, not paths.
ignore=CVS
# Add files or directories matching the regular expressions patterns to the
# ignore-list. The regex matches against paths and can be in Posix or Windows
# format. Because '\\' represents the directory delimiter on Windows systems,
# it can't be used as an escape character.
ignore-paths=
# Files or directories matching the regular expression patterns are skipped.
# The regex matches against base names, not paths. The default value ignores
# Emacs file locks
ignore-patterns=^\.#
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis). It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
# number of processors available to use, and will cap the count on Windows to
# avoid hangs.
jobs=1
# Control the amount of potential inferred values when inferring a single
# object. This can help the performance when dealing with large functions or
# complex, nested conditions.
limit-inference-results=100
# List of plugins (as comma separated values of python module names) to load,
# usually to register additional checkers.
load-plugins=
# Pickle collected data for later comparisons.
persistent=yes
# Minimum Python version to use for version dependent checks. Will default to
# the version used to run pylint.
py-version=3.11
# Discover python modules and packages in the file system subtree.
recursive=no
# Add paths to the list of the source roots. Supports globbing patterns. The
# source root is an absolute path or a path relative to the current working
# directory used to determine a package namespace for modules located under the
# source root.
source-roots=
# When enabled, pylint would attempt to guess common misconfiguration and emit
# user-friendly hints instead of false-positive error messages.
suggestion-mode=yes
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
# In verbose mode, extra non-checker-related info will be displayed.
#verbose=
[BASIC]
# Naming style matching correct argument names.
argument-naming-style=snake_case
# Regular expression matching correct argument names. Overrides argument-
# naming-style. If left empty, argument names will be checked with the set
# naming style.
#argument-rgx=
# Naming style matching correct attribute names.
attr-naming-style=snake_case
# Regular expression matching correct attribute names. Overrides attr-naming-
# style. If left empty, attribute names will be checked with the set naming
# style.
#attr-rgx=
# Bad variable names which should always be refused, separated by a comma.
bad-names=foo,
bar,
baz,
toto,
tutu,
tata
# Bad variable names regexes, separated by a comma. If names match any regex,
# they will always be refused
bad-names-rgxs=
# Naming style matching correct class attribute names.
class-attribute-naming-style=any
# Regular expression matching correct class attribute names. Overrides class-
# attribute-naming-style. If left empty, class attribute names will be checked
# with the set naming style.
#class-attribute-rgx=
# Naming style matching correct class constant names.
class-const-naming-style=UPPER_CASE
# Regular expression matching correct class constant names. Overrides class-
# const-naming-style. If left empty, class constant names will be checked with
# the set naming style.
#class-const-rgx=
# Naming style matching correct class names.
class-naming-style=PascalCase
# Regular expression matching correct class names. Overrides class-naming-
# style. If left empty, class names will be checked with the set naming style.
#class-rgx=
# Naming style matching correct constant names.
const-naming-style=UPPER_CASE
# Regular expression matching correct constant names. Overrides const-naming-
# style. If left empty, constant names will be checked with the set naming
# style.
#const-rgx=
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
# Naming style matching correct function names.
function-naming-style=snake_case
# Regular expression matching correct function names. Overrides function-
# naming-style. If left empty, function names will be checked with the set
# naming style.
#function-rgx=
# Good variable names which should always be accepted, separated by a comma.
good-names=i,
j,
k,
ex,
p,
db,
Run,
_
# Good variable names regexes, separated by a comma. If names match any regex,
# they will always be accepted
good-names-rgxs=
# Include a hint for the correct naming format with invalid-name.
include-naming-hint=yes
# Naming style matching correct inline iteration names.
inlinevar-naming-style=any
# Regular expression matching correct inline iteration names. Overrides
# inlinevar-naming-style. If left empty, inline iteration names will be checked
# with the set naming style.
#inlinevar-rgx=
# Naming style matching correct method names.
method-naming-style=snake_case
# Regular expression matching correct method names. Overrides method-naming-
# style. If left empty, method names will be checked with the set naming style.
#method-rgx=
# Naming style matching correct module names.
module-naming-style=snake_case
# Regular expression matching correct module names. Overrides module-naming-
# style. If left empty, module names will be checked with the set naming style.
#module-rgx=
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# List of decorators that produce properties, such as abc.abstractproperty. Add
# to this list to register other decorators that produce valid properties.
# These decorators are taken in consideration only for invalid-name.
property-classes=abc.abstractproperty
# Regular expression matching correct type alias names. If left empty, type
# alias names will be checked with the set naming style.
#typealias-rgx=
# Regular expression matching correct type variable names. If left empty, type
# variable names will be checked with the set naming style.
#typevar-rgx=
# Naming style matching correct variable names.
variable-naming-style=snake_case
# Regular expression matching correct variable names. Overrides variable-
# naming-style. If left empty, variable names will be checked with the set
# naming style.
#variable-rgx=
[CLASSES]
# Warn about protected attribute access inside special methods
check-protected-access-in-special-methods=no
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,
__new__,
setUp,
asyncSetUp,
__post_init__
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=mcs
[DESIGN]
# List of regular expressions of class ancestor names to ignore when counting
# public methods (see R0903)
exclude-too-few-public-methods=
# List of qualified class names to ignore when counting class parents (see
# R0901)
ignored-parents=
# Maximum number of arguments for function / method.
max-args=5
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Maximum number of boolean expressions in an if statement (see R0916).
max-bool-expr=5
# Maximum number of branch for function / method body.
max-branches=12
# Maximum number of locals for function / method body.
max-locals=15
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
# Maximum number of return / yield for function / method body.
max-returns=6
# Maximum number of statements in function / method body.
max-statements=50
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
[EXCEPTIONS]
# Exceptions that will emit a warning when caught.
overgeneral-exceptions=builtins.BaseException,builtins.Exception
[FORMAT]
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Maximum number of characters on a single line.
max-line-length=100
# Maximum number of lines in a module.
max-module-lines=1000
# Allow the body of a class to be on the same line as the declaration if body
# contains single statement.
single-line-class-stmt=no
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
[IMPORTS]
# List of modules that can be imported at any level, not just the top level
# one.
allow-any-import-level=
# Allow explicit reexports by alias from a package __init__.
allow-reexport-from-package=no
# Allow wildcard imports from modules that define __all__.
allow-wildcard-with-all=no
# Deprecated modules which should not be used, separated by a comma.
deprecated-modules=
# Output a graph (.gv or any supported image format) of external dependencies
# to the given file (report RP0402 must not be disabled).
ext-import-graph=
# Output a graph (.gv or any supported image format) of all (i.e. internal and
# external) dependencies to the given file (report RP0402 must not be
# disabled).
import-graph=
# Output a graph (.gv or any supported image format) of internal dependencies
# to the given file (report RP0402 must not be disabled).
int-import-graph=
# Force import order to recognize a module as part of the standard
# compatibility libraries.
known-standard-library=
# Force import order to recognize a module as part of a third party library.
known-third-party=enchant
# Couples of modules and preferred modules, separated by a comma.
preferred-modules=
[LOGGING]
# The type of string formatting that logging methods do. `old` means using %
# formatting, `new` is for `{}` formatting.
logging-format-style=old
# Logging modules to check that the string format arguments are in logging
# function parameter format.
logging-modules=logging
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE,
# UNDEFINED.
confidence=HIGH,
CONTROL_FLOW,
INFERENCE,
INFERENCE_FAILURE,
UNDEFINED
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once). You can also use "--disable=all" to
# disable everything first and then re-enable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use "--disable=all --enable=classes
# --disable=W".
disable=raw-checker-failed,
bad-inline-option,
locally-disabled,
file-ignored,
suppressed-message,
useless-suppression,
deprecated-pragma,
use-symbolic-message-instead,
use-implicit-booleaness-not-comparison-to-string,
use-implicit-booleaness-not-comparison-to-zero,
fixme,
too-few-public-methods,
unused-argument,
import-outside-toplevel,
no-self-argument,
singleton-comparison,
missing-module-docstring,
missing-class-docstring,
missing-function-docstring
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
enable=
[METHOD_ARGS]
# List of qualified names (i.e., library.method) which require a timeout
# parameter e.g. 'requests.api.get,requests.api.post'
timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,
XXX,
TODO
# Regular expression of note tags to take in consideration.
notes-rgx=
[REFACTORING]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
# Complete name of functions that never returns. When checking for
# inconsistent-return-statements if a never returning function is called then
# it will be considered as an explicit return statement and no message will be
# printed.
never-returning-functions=sys.exit,argparse.parse_error
[REPORTS]
# Python expression which should return a score less than or equal to 10. You
# have access to the variables 'fatal', 'error', 'warning', 'refactor',
# 'convention', and 'info' which contain the number of messages in each
# category, as well as 'statement' which is the total number of statements
# analyzed. This score is used by the global evaluation report (RP0004).
evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details.
msg-template=
# Set the output format. Available formats are: text, parseable, colorized,
# json2 (improved json format), json (old json format) and msvs (visual
# studio). You can also give a reporter class, e.g.
# mypackage.mymodule.MyReporterClass.
#output-format=
# Tells whether to display a full report or only the messages.
reports=no
# Activate the evaluation score.
score=yes
[SIMILARITIES]
# Comments are removed from the similarity computation
ignore-comments=yes
# Docstrings are removed from the similarity computation
ignore-docstrings=yes
# Imports are removed from the similarity computation
ignore-imports=yes
# Signatures are removed from the similarity computation
ignore-signatures=yes
# Minimum lines number of a similarity.
min-similarity-lines=4
[SPELLING]
# Limits count of emitted suggestions for spelling mistakes.
max-spelling-suggestions=4
# Spelling dictionary name. No available dictionaries : You need to install
# both the python package and the system dependency for enchant to work.
spelling-dict=
# List of comma separated words that should be considered directives if they
# appear at the beginning of a comment and should not be checked.
spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains the private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to the private dictionary (see the
# --spelling-private-dict-file option) instead of raising a message.
spelling-store-unknown-words=no
[STRING]
# This flag controls whether inconsistent-quotes generates a warning when the
# character used as a quote delimiter is used inconsistently within a module.
check-quote-consistency=no
# This flag controls whether the implicit-str-concat should generate a warning
# on implicit string concatenation in sequences defined over several lines.
check-str-concat-over-line-jumps=no
[TYPECHECK]
# List of decorators that produce context managers, such as
# contextlib.contextmanager. Add to this list to register other decorators that
# produce valid context managers.
contextmanager-decorators=contextlib.contextmanager
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=
# Tells whether to warn about missing members when the owner of the attribute
# is inferred to be None.
ignore-none=yes
# This flag controls whether pylint should warn about no-member and similar
# checks whenever an opaque object is returned when inferring. The inference
# can return multiple potential results while evaluating a Python object, but
# some branches might not be evaluated, which results in partial inference. In
# that case, it might be useful to still emit no-member and other checks for
# the rest of the inferred objects.
ignore-on-opaque-inference=yes
# List of symbolic message names to ignore for Mixin members.
ignored-checks-for-mixins=no-member,
not-async-context-manager,
not-context-manager,
attribute-defined-outside-init
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace
# Show a hint with possible names when a member name was not found. The aspect
# of finding the hint is based on edit distance.
missing-member-hint=yes
# The minimum edit distance a name should have in order to be considered a
# similar match for a missing member name.
missing-member-hint-distance=1
# The total number of similar names that should be taken in consideration when
# showing a hint for a missing member.
missing-member-max-choices=1
# Regex pattern to define which classes are considered mixins.
mixin-class-rgx=.*[Mm]ixin
# List of decorators that change the signature of a decorated function.
signature-mutators=
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid defining new builtins when possible.
additional-builtins=
# Tells whether unused global variables should be treated as a violation.
allow-global-unused-variables=yes
# List of names allowed to shadow builtins
allowed-redefined-builtins=
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,
_cb
# A regular expression matching the name of dummy variables (i.e. expected to
# not be used).
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
# Argument names that match this expression will be ignored.
ignored-argument-names=_.*|^ignored_|^unused_
# Tells whether we should check for unused import in __init__ files.
init-import=no
# List of qualified module names which can have objects that can redefine
# builtins.
redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io

View file

@ -5,18 +5,21 @@ ORANGE=\033[0;33m
BLUE=\033[0;34m BLUE=\033[0;34m
NC=\033[0m # No Color NC=\033[0m # No Color
.PHONY: tests djlint pylint
venv: ## Create the venv venv: ## Create the venv
python3 -m venv venv python3 -m venv venv
install: venv ## Install the project locally
venv/bin/pip install -e ".[dev,docs]" venv/bin/pip install -e ".[dev,docs]"
docs: cog ## Build the docs docs: cog ## Build the docs
venv/bin/sphinx-build docs public venv/bin/sphinx-build docs public
cog: ## Run cog, to integrate the CLI options to the docs. cog: ## Run cog, to integrate the CLI options to the docs.
venv/bin/cog -r docs/*.md venv/bin/cog -r docs/*.md
tests: install ## Run the tests tests: venv ## Run the tests
venv/bin/pytest venv/bin/pytest
djlint: install djlint: venv
venv/bin/djlint --ignore=H030,H031 --lint argos/server/templates/*html venv/bin/djlint --ignore=H030,H031 --lint argos/server/templates/*html
pylint: venv
venv/bin/pylint argos
help: help:
@python3 -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST) @python3 -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST)

View file

@ -1,10 +1,15 @@
"""Argos agent
Fetchs the tasks from the server, execute it and send the result to the server
"""
import asyncio import asyncio
import json
import logging import logging
import socket import socket
from typing import List from typing import List
import httpx import httpx
from tenacity import retry, wait_random from tenacity import retry, wait_random # type: ignore
from argos.checks import get_registered_check from argos.checks import get_registered_check
from argos.logging import logger from argos.logging import logger
@ -12,6 +17,7 @@ from argos.schemas import AgentResult, SerializableException, Task
def log_failure(retry_state): def log_failure(retry_state):
"""Log failures, with a different log level depending on the number of attempts."""
if retry_state.attempt_number < 1: if retry_state.attempt_number < 1:
loglevel = logging.INFO loglevel = logging.INFO
else: else:
@ -33,21 +39,21 @@ class ArgosAgent:
self.max_tasks = max_tasks self.max_tasks = max_tasks
self.wait_time = wait_time self.wait_time = wait_time
self.auth = auth self.auth = auth
headers = {
"Authorization": f"Bearer {self.auth}",
}
self._http_client = httpx.AsyncClient(headers=headers)
self.agent_id = socket.gethostname() self.agent_id = socket.gethostname()
@retry(after=log_failure, wait=wait_random(min=1, max=2)) @retry(after=log_failure, wait=wait_random(min=1, max=2))
async def run(self): async def run(self):
logger.info(f"Running agent against {self.server}") logger.info("Running agent against %s", self.server)
headers = {
"Authorization": f"Bearer {self.auth}",
}
self._http_client = httpx.AsyncClient(headers=headers)
async with self._http_client: async with self._http_client:
while "forever": while "forever":
retry_now = await self._get_and_complete_tasks() retry_now = await self._get_and_complete_tasks()
if not retry_now: if not retry_now:
logger.error(f"Waiting {self.wait_time} seconds before next retry") logger.error("Waiting %i seconds before next retry", self.wait_time)
await asyncio.sleep(self.wait_time) await asyncio.sleep(self.wait_time)
async def _complete_task(self, task: dict) -> dict: async def _complete_task(self, task: dict) -> dict:
@ -59,10 +65,10 @@ class ArgosAgent:
status = result.status status = result.status
context = result.context context = result.context
except Exception as e: except Exception as err: # pylint: disable=broad-except
status = "error" status = "error"
context = SerializableException.from_exception(e) context = SerializableException.from_exception(err)
msg = f"An exception occured when running {task}. {e.__class__.__name__} : {e}" msg = f"An exception occured when running {task}. {err.__class__.__name__} : {err}"
logger.error(msg) logger.error(msg)
return AgentResult(task_id=task.id, status=status, context=context) return AgentResult(task_id=task.id, status=status, context=context)
@ -76,7 +82,7 @@ class ArgosAgent:
if response.status_code == httpx.codes.OK: if response.status_code == httpx.codes.OK:
# XXX Maybe we want to group the tests by URL ? (to issue one request per URL) # XXX Maybe we want to group the tests by URL ? (to issue one request per URL)
data = response.json() data = response.json()
logger.info(f"Received {len(data)} tasks from the server") logger.info("Received %i tasks from the server", len(data))
tasks = [] tasks = []
for task in data: for task in data:
@ -86,13 +92,13 @@ class ArgosAgent:
results = await asyncio.gather(*tasks) results = await asyncio.gather(*tasks)
await self._post_results(results) await self._post_results(results)
return True return True
else:
logger.error("Got no tasks from the server.") logger.error("Got no tasks from the server.")
return False
else:
logger.error(f"Failed to fetch tasks: {response.read()}")
return False return False
logger.error("Failed to fetch tasks: %s", response.read())
return False
async def _post_results(self, results: List[AgentResult]): async def _post_results(self, results: List[AgentResult]):
data = [r.model_dump() for r in results] data = [r.model_dump() for r in results]
response = await self._http_client.post( response = await self._http_client.post(
@ -100,7 +106,7 @@ class ArgosAgent:
) )
if response.status_code == httpx.codes.CREATED: if response.status_code == httpx.codes.CREATED:
logger.error(f"Successfully posted results {response.json()}") logger.error("Successfully posted results %s", json.dumps(response.json()))
else: else:
logger.error(f"Failed to post results: {response.read()}") logger.error("Failed to post results: %s", response.read())
return response return response

View file

@ -1,7 +1,11 @@
from argos.checks.base import ( from argos.checks.base import ( # NOQA
BaseCheck, BaseCheck,
CheckNotFound, CheckNotFound,
get_registered_check, get_registered_check,
get_registered_checks, get_registered_checks,
) )
from argos.checks.checks import HTTPBodyContains, HTTPStatus, SSLCertificateExpiration from argos.checks.checks import ( # NOQA
HTTPBodyContains,
HTTPStatus,
SSLCertificateExpiration,
)

View file

@ -1,13 +1,17 @@
"""Various base classes for checks"""
from dataclasses import dataclass from dataclasses import dataclass
from typing import Type from typing import Type, Union
import httpx import httpx
from pydantic import BaseModel from pydantic import BaseModel
from argos.schemas import Task from argos.schemas.models import Task
class Status: class Status:
"""Possible statuses of the checks"""
ON_CHECK = "on-check" ON_CHECK = "on-check"
SUCCESS = "success" SUCCESS = "success"
FAILURE = "failure" FAILURE = "failure"
@ -15,6 +19,8 @@ class Status:
class Severity: class Severity:
"""Possible statuses of the checks results"""
OK = "ok" OK = "ok"
WARNING = "warning" WARNING = "warning"
CRITICAL = "critical" CRITICAL = "critical"
@ -30,6 +36,7 @@ class Response:
@classmethod @classmethod
def new(cls, status, **kwargs): def new(cls, status, **kwargs):
"""Normalize results of checks."""
if isinstance(status, bool): if isinstance(status, bool):
status = Status.SUCCESS if status else Status.FAILURE status = Status.SUCCESS if status else Status.FAILURE
@ -63,9 +70,9 @@ class InvalidResponse(Exception):
class BaseCheck: class BaseCheck:
config: str config: str
expected_cls: Type[BaseExpectedValue] = None expected_cls: Union[None, Type[BaseExpectedValue]] = None
_registry = [] _registry = [] # type: ignore[var-annotated]
def __init_subclass__(cls, **kwargs): def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs) super().__init_subclass__(**kwargs)
@ -73,10 +80,12 @@ class BaseCheck:
@classmethod @classmethod
def get_registered_checks(cls): def get_registered_checks(cls):
"""Return existing checks"""
return {c.config: c for c in cls._registry} return {c.config: c for c in cls._registry}
@classmethod @classmethod
def get_registered_check(cls, name): def get_registered_check(cls, name):
"""Get a check from its name"""
check = cls.get_registered_checks().get(name) check = cls.get_registered_checks().get(name)
if not check: if not check:
raise CheckNotFound(name) raise CheckNotFound(name)
@ -88,16 +97,20 @@ class BaseCheck:
@property @property
def expected(self): def expected(self):
return self.expected_cls(expected=self.task.expected).get_converted() """Convert the tasks class to simpler class"""
if self.expected_cls is not None:
return self.expected_cls(expected=self.task.expected).get_converted()
return None
def response(self, **kwargs): def response(self, **kwargs):
"""Ensure that the response has a status and return a Response"""
if "status" not in kwargs: if "status" not in kwargs:
raise InvalidResponse(kwargs) raise InvalidResponse(kwargs)
status = kwargs.pop("status") status = kwargs.pop("status")
return Response.new(status, **kwargs) return Response.new(status, **kwargs)
@classmethod @classmethod
async def finalize(self, config, result, **context): async def finalize(cls, config, result, **context):
"""By default, the finalize considers that : """By default, the finalize considers that :
- All FAILUREs should be reported as CRITICAL - All FAILUREs should be reported as CRITICAL
@ -109,9 +122,9 @@ class BaseCheck:
""" """
if result.status in (Status.SUCCESS, Status.ERROR): if result.status in (Status.SUCCESS, Status.ERROR):
return result.status, Severity.OK return result.status, Severity.OK
elif result.status == Status.FAILURE: if result.status == Status.FAILURE:
return result.status, Severity.CRITICAL return result.status, Severity.CRITICAL
elif result.status == Status.ON_CHECK: if result.status == Status.ON_CHECK:
msg = ( msg = (
"Status is 'on-check', but the Check class " "Status is 'on-check', but the Check class "
"didn't provide a finalize() method." "didn't provide a finalize() method."

View file

@ -1,3 +1,5 @@
"""Define the available checks"""
from datetime import datetime from datetime import datetime
from argos.checks.base import ( from argos.checks.base import (
@ -61,18 +63,19 @@ class SSLCertificateExpiration(BaseCheck):
async def finalize(cls, config, result, **context): async def finalize(cls, config, result, **context):
if result.status != Status.ON_CHECK: if result.status != Status.ON_CHECK:
return result.status, Severity.WARNING return result.status, Severity.WARNING
elif "expires_in" in context:
if "expires_in" in context:
thresholds = config.ssl.thresholds thresholds = config.ssl.thresholds
thresholds.sort() thresholds.sort()
for days, severity in thresholds: for days, severity in thresholds:
if context["expires_in"] < days: if context["expires_in"] < days:
return Status.FAILURE, severity return Status.FAILURE, severity
return Status.SUCCESS, Severity.OK return Status.SUCCESS, Severity.OK
else:
raise ValueError( raise ValueError(
"The SSLCertificateExpiration check didn't provide an 'expires_in' " "The SSLCertificateExpiration check didn't provide an 'expires_in' "
"context variable." "context variable."
) )
@classmethod @classmethod
def get_description(cls, config): def get_description(cls, config):

View file

@ -36,7 +36,7 @@ def server():
default="INFO", default="INFO",
type=click.Choice(logging.LOG_LEVELS, case_sensitive=False), type=click.Choice(logging.LOG_LEVELS, case_sensitive=False),
) )
def agent(server, auth, max_tasks, wait_time, log_level): def agent(server_url, auth, max_tasks, wait_time, log_level):
"""Get and run tasks to the provided server. Will wait for new tasks. """Get and run tasks to the provided server. Will wait for new tasks.
Usage: argos agent https://argos.server "auth-token-here" Usage: argos agent https://argos.server "auth-token-here"
@ -50,8 +50,8 @@ def agent(server, auth, max_tasks, wait_time, log_level):
from argos.logging import logger from argos.logging import logger
logger.setLevel(log_level) logger.setLevel(log_level)
agent = ArgosAgent(server, auth, max_tasks, wait_time) agent_ = ArgosAgent(server_url, auth, max_tasks, wait_time)
asyncio.run(agent.run()) asyncio.run(agent_.run())
@server.command() @server.command()

View file

@ -12,4 +12,4 @@ def set_log_level(log_level):
if not isinstance(level, int): if not isinstance(level, int):
raise ValueError(f"Invalid log level: {log_level}") raise ValueError(f"Invalid log level: {log_level}")
logger.setLevel(level=level) logger.setLevel(level=level)
logger.info("Log level set to {}".format(log_level)) logger.info("Log level set to %s", log_level)

View file

@ -1,2 +1,2 @@
from .config import * from .config import * # NOQA
from .models import * from .models import * # NOQA

View file

@ -1,18 +1,21 @@
"""Pydantic schemas for configuration
For database models, see argos.server.models.
"""
from typing import Dict, List, Literal, Optional, Tuple from typing import Dict, List, Literal, Optional, Tuple
from pydantic import BaseModel, ConfigDict, HttpUrl, field_validator from pydantic import BaseModel, ConfigDict, HttpUrl, field_validator
from pydantic.functional_validators import BeforeValidator from pydantic.functional_validators import BeforeValidator
from typing_extensions import Annotated from typing_extensions import Annotated
from argos.checks import get_registered_checks
from argos.schemas.utils import string_to_duration from argos.schemas.utils import string_to_duration
# This file contains the pydantic schemas.
# For the database models, check in argos.server.models.
Severity = Literal["warning", "error", "critical"] Severity = Literal["warning", "error", "critical"]
def parse_threshold(value): def parse_threshold(value):
"""Parse duration threshold for SSL certificate validity"""
for duration_str, severity in value.items(): for duration_str, severity in value.items():
days = string_to_duration(duration_str, "days") days = string_to_duration(duration_str, "days")
# Return here because it's one-item dicts. # Return here because it's one-item dicts.
@ -36,17 +39,16 @@ class WebsiteCheck(BaseModel):
def validate(cls, value): def validate(cls, value):
if isinstance(value, str): if isinstance(value, str):
return {"expected": value} return {"expected": value}
elif isinstance(value, dict): if isinstance(value, dict):
return value return value
elif isinstance(value, list): if isinstance(value, list):
return {"expected": value} return {"expected": value}
else:
raise ValueError("Invalid type") raise ValueError("Invalid type")
def parse_checks(value): def parse_checks(value):
# To avoid circular imports """Check that checks are valid (i.e. registered) checks"""
from argos.checks import get_registered_checks
available_names = get_registered_checks().keys() available_names = get_registered_checks().keys()
@ -76,26 +78,36 @@ class Website(BaseModel):
@field_validator("frequency", mode="before") @field_validator("frequency", mode="before")
def parse_frequency(cls, value): def parse_frequency(cls, value):
"""Convert the configured frequency to hours"""
if value: if value:
return string_to_duration(value, "hours") return string_to_duration(value, "hours")
return None
class Service(BaseModel): class Service(BaseModel):
"""List of agents token"""
secrets: List[str] secrets: List[str]
class Alert(BaseModel): class Alert(BaseModel):
"""List of way to handle alerts, by severity"""
error: List[str] error: List[str]
warning: List[str] warning: List[str]
alert: List[str] alert: List[str]
class General(BaseModel): class General(BaseModel):
"""Frequency for the checks and alerts"""
frequency: int frequency: int
alerts: Alert alerts: Alert
@field_validator("frequency", mode="before") @field_validator("frequency", mode="before")
def parse_frequency(cls, value): def parse_frequency(cls, value):
"""Convert the configured frequency to minutes"""
return string_to_duration(value, "minutes") return string_to_duration(value, "minutes")

View file

@ -1,3 +1,7 @@
"""Pydantic schemas for data
For database models, see argos.server.models.
"""
import traceback import traceback
from datetime import datetime from datetime import datetime
from typing import Literal from typing import Literal
@ -8,6 +12,7 @@ from pydantic import BaseModel, ConfigDict
class Task(BaseModel): class Task(BaseModel):
"""A task corresponds to a check to execute"""
id: int id: int
url: str url: str
domain: str domain: str
@ -19,27 +24,29 @@ class Task(BaseModel):
model_config = ConfigDict(from_attributes=True) model_config = ConfigDict(from_attributes=True)
def __str__(self): def __str__(self):
id = self.id task_id = self.id
url = self.url url = self.url
check = self.check check = self.check
return f"Task ({id}): {url} - {check}" return f"Task ({task_id}): {url} - {check}"
class SerializableException(BaseModel): class SerializableException(BaseModel):
"""Task exception"""
error_message: str error_message: str
error_type: str error_type: str
error_details: str error_details: str
@staticmethod @staticmethod
def from_exception(e: BaseException): def from_exception(err: BaseException):
return SerializableException( return SerializableException(
error_message=str(e), error_message=str(err),
error_type=str(type(e).__name__), error_type=str(type(err).__name__),
error_details=traceback.format_exc(), error_details=traceback.format_exc(),
) )
class AgentResult(BaseModel): class AgentResult(BaseModel):
"""Tasks result sent by agent"""
task_id: int task_id: int
# The on-check status means that the service needs to finish the check # The on-check status means that the service needs to finish the check
# and will then determine the severity. # and will then determine the severity.

View file

@ -1,7 +1,7 @@
from typing import Literal from typing import Literal, Union
def string_to_duration(value: str, target: Literal["days", "hours", "minutes"]): def string_to_duration(value: str, target: Literal["days", "hours", "minutes"]) -> Union[int,float]:
"""Convert a string to a number of hours, days or minutes""" """Convert a string to a number of hours, days or minutes"""
num = int("".join(filter(str.isdigit, value))) num = int("".join(filter(str.isdigit, value)))
@ -27,15 +27,14 @@ def string_to_duration(value: str, target: Literal["days", "hours", "minutes"]):
elif "mo" in value: elif "mo" in value:
num = num * 60 * 24 * 30 # considers 30d in a month num = num * 60 * 24 * 30 # considers 30d in a month
elif "y" in value: elif "y" in value:
num = num * 60 * 24 * 365 num = num * 60 * 24 * 365 # considers 365d in a year
elif "m" in value: elif "m" not in value:
num = num
else:
raise ValueError("Invalid duration value", value) raise ValueError("Invalid duration value", value)
if target == "hours": if target == "hours":
num = num / 60 return num / 60
elif target == "days": if target == "days":
num = num / 60 / 24 return num / 60 / 24
# target == "minutes"
return num return num

View file

@ -1,6 +1,9 @@
from argos.logging import logger from argos.logging import logger
# XXX Implement mail alerts https://framagit.org/framasoft/framaspace/argos/-/issues/15
# XXX Implement gotify alerts https://framagit.org/framasoft/framaspace/argos/-/issues/16
def handle_alert(config, result, task, severity): def handle_alert(config, result, task, severity):
"""Dispatch alert through configured alert channels"""
msg = f"task={task.id}, status={result.status}, {severity=}" msg = f"task={task.id}, status={result.status}, {severity=}"
logger.error(f"Alerting stub: {msg}") logger.error("Alerting stub: %s", msg)

View file

@ -12,68 +12,75 @@ from argos.server.settings import get_app_settings, read_yaml_config
def get_application() -> FastAPI: def get_application() -> FastAPI:
"""Spawn Argos FastAPI server"""
settings = get_app_settings() settings = get_app_settings()
app = FastAPI() appli = FastAPI()
config = read_config(app, settings) config = read_config(appli, settings)
# Settings is the pydantic settings object # Settings is the pydantic settings object
# Config is the argos config object (built from yaml) # Config is the argos config object (built from yaml)
app.state.config = config appli.state.config = config
app.state.settings = settings appli.state.settings = settings
app.add_event_handler( appli.add_event_handler(
"startup", "startup",
create_start_app_handler(app), create_start_app_handler(appli),
) )
app.add_event_handler( appli.add_event_handler(
"shutdown", "shutdown",
create_stop_app_handler(app), create_stop_app_handler(appli),
) )
app.include_router(routes.api, prefix="/api") appli.include_router(routes.api, prefix="/api")
app.include_router(routes.views) appli.include_router(routes.views)
app.mount("/static", StaticFiles(directory="argos/server/static"), name="static") appli.mount("/static", StaticFiles(directory="argos/server/static"), name="static")
return app return appli
def create_start_app_handler(app): def create_start_app_handler(appli):
"""Warmup the server:
setup database connection and update the tasks in it before making it available
"""
async def read_config_and_populate_db(): async def read_config_and_populate_db():
setup_database(app) setup_database(appli)
db = await connect_to_db(app) db = await connect_to_db(appli)
await queries.update_from_config(db, app.state.config) await queries.update_from_config(db, appli.state.config)
return read_config_and_populate_db return read_config_and_populate_db
async def connect_to_db(app): async def connect_to_db(appli):
app.state.db = app.state.SessionLocal() appli.state.db = appli.state.SessionLocal()
return app.state.db return appli.state.db
def create_stop_app_handler(app): def create_stop_app_handler(appli):
"""Gracefully shutdown the server:
close database connection.
"""
async def stop_app(): async def stop_app():
app.state.db.close() appli.state.db.close()
return stop_app return stop_app
def read_config(app, settings): def read_config(appli, settings):
try: try:
config = read_yaml_config(settings.yaml_file) config = read_yaml_config(settings.yaml_file)
app.state.config = config appli.state.config = config
return config return config
except ValidationError as e: except ValidationError as err:
logger.error("Errors where found while reading configuration:") logger.error("Errors where found while reading configuration:")
for error in e.errors(): for error in err.errors():
logger.error(f"{error['loc']} is {error['type']}") logger.error("%s is %s", error['loc'], error['type'])
sys.exit(1) sys.exit(1)
def setup_database(app): def setup_database(appli):
settings = app.state.settings settings = appli.state.settings
# For sqlite, we need to add connect_args={"check_same_thread": False} # For sqlite, we need to add connect_args={"check_same_thread": False}
logger.debug(f"Using database URL {settings.database_url}") logger.debug("Using database URL %s", settings.database_url)
if settings.database_url.startswith("sqlite:////tmp"): if settings.database_url.startswith("sqlite:////tmp"):
logger.warning("Using sqlite in /tmp is not recommended for production") logger.warning("Using sqlite in /tmp is not recommended for production")
@ -88,10 +95,10 @@ def setup_database(app):
settings.database_url, settings.database_url,
**extra_settings **extra_settings
) )
app.state.SessionLocal = sessionmaker( appli.state.SessionLocal = sessionmaker(
autocommit=False, autoflush=False, bind=engine autocommit=False, autoflush=False, bind=engine
) )
app.state.engine = engine appli.state.engine = engine
models.Base.metadata.create_all(bind=engine) models.Base.metadata.create_all(bind=engine)

View file

@ -1,3 +1,4 @@
"""Database models"""
from datetime import datetime, timedelta from datetime import datetime, timedelta
from typing import List, Literal from typing import List, Literal
@ -20,8 +21,8 @@ class Task(Base):
""" """
There is one task per check. There is one task per check.
It contains all information needed to run the jobs on the workers. It contains all information needed to run the jobs on the agents.
Workers will return information in the result table. Agents will return information in the result table.
""" """
__tablename__ = "tasks" __tablename__ = "tasks"
@ -50,6 +51,7 @@ class Task(Base):
return get_registered_check(self.check) return get_registered_check(self.check)
def set_times_and_deselect(self): def set_times_and_deselect(self):
"""Removes the lock on task and set the time for the next run"""
self.selected_by = None self.selected_by = None
now = datetime.now() now = datetime.now()
@ -58,24 +60,34 @@ class Task(Base):
@property @property
def last_result(self): def last_result(self):
"""Get last result of the task"""
if not self.results: if not self.results:
return None return None
return max(self.results, key=lambda r: r.id) return max(self.results, key=lambda r: r.id)
@property @property
def status(self): def status(self):
"""Get status of the task, i.e. the status of its last result"""
if not self.last_result: if not self.last_result:
return None return None
return self.last_result.status return self.last_result.status
@property @property
def severity(self): def severity(self):
"""Get severity of the task"""
if not self.last_result: if not self.last_result:
return None return None
return self.last_result.severity return self.last_result.severity
class Result(Base): class Result(Base):
"""There is multiple results per tasks.
The results uses the informations returned by the agents.
The status is "Was the agent able to do the check?" while the severity
depends on the return value of the check.
"""
__tablename__ = "results" __tablename__ = "results"
id: Mapped[int] = mapped_column(primary_key=True) id: Mapped[int] = mapped_column(primary_key=True)
task_id: Mapped[int] = mapped_column(ForeignKey("tasks.id")) task_id: Mapped[int] = mapped_column(ForeignKey("tasks.id"))

View file

@ -1,3 +1,4 @@
"""Functions to ease SQL queries management"""
from datetime import datetime, timedelta from datetime import datetime, timedelta
from urllib.parse import urljoin from urllib.parse import urljoin
@ -29,8 +30,8 @@ async def list_tasks(db: Session, agent_id: str, limit: int = 100):
return tasks return tasks
async def get_task(db: Session, id: int) -> Task: async def get_task(db: Session, task_id: int) -> Task:
return db.get(Task, id) return db.get(Task, task_id)
async def create_result(db: Session, agent_result: schemas.AgentResult, agent_id: str): async def create_result(db: Session, agent_result: schemas.AgentResult, agent_id: str):
@ -60,6 +61,7 @@ async def count_results(db: Session):
async def update_from_config(db: Session, config: schemas.Config): async def update_from_config(db: Session, config: schemas.Config):
"""Update tasks from config file"""
tasks = [] tasks = []
unique_properties = [] unique_properties = []
for website in config.websites: for website in config.websites:
@ -84,8 +86,9 @@ async def update_from_config(db: Session, config: schemas.Config):
if frequency != existing_task.frequency: if frequency != existing_task.frequency:
existing_task.frequency = frequency existing_task.frequency = frequency
msg = f"Skipping db task creation for {url=}, {check_key=}, {expected=}, {frequency=}." logger.debug("Skipping db task creation for url=%s, " \
logger.debug(msg) "check_key=%s, expected=%s, frequency=%s.",
url, check_key, expected, frequency)
else: else:
properties = (url, check_key, expected) properties = (url, check_key, expected)
@ -98,7 +101,7 @@ async def update_from_config(db: Session, config: schemas.Config):
expected=expected, expected=expected,
frequency=frequency, frequency=frequency,
) )
logger.debug(f"Adding a new task in the db: {task}") logger.debug("Adding a new task in the db: %s", task)
tasks.append(task) tasks.append(task)
db.add_all(tasks) db.add_all(tasks)
@ -106,16 +109,17 @@ async def update_from_config(db: Session, config: schemas.Config):
async def get_severity_counts(db: Session) -> dict: async def get_severity_counts(db: Session) -> dict:
"""Get the severities (ok, warning, critical…) and their count"""
# Get the last result of each task # Get the last result of each task
subquery = ( subquery = (
db.query(Result.task_id, func.max(Result.id).label("max_result_id")) db.query(Result.task_id, func.max(Result.id).label("max_result_id")) # pylint: disable-msg=not-callable
.group_by(Result.task_id) .group_by(Result.task_id)
.subquery() .subquery()
) )
# Join this back to get full result rows, and group by status # Join this back to get full result rows, and group by status
query = ( query = (
db.query(Result.severity, func.count(Result.id).label("count")) db.query(Result.severity, func.count(Result.id).label("count")) # pylint: disable-msg=not-callable
.join(subquery, Result.id == subquery.columns.max_result_id) .join(subquery, Result.id == subquery.columns.max_result_id)
.group_by(Result.severity) .group_by(Result.severity)
) )
@ -141,7 +145,7 @@ async def remove_old_results(db: Session, max_results: int):
.limit(max_results) .limit(max_results)
.subquery() .subquery()
) )
min_id = db.query(func.min(subquery.c.id)).scalar() min_id = db.query(func.min(subquery.c.id)).scalar() # pylint: disable-msg=not-callable
# Delete all the results older than min_id # Delete all the results older than min_id
if min_id: if min_id:
@ -156,6 +160,7 @@ async def remove_old_results(db: Session, max_results: int):
async def release_old_locks(db: Session, max_lock_seconds: int): async def release_old_locks(db: Session, max_lock_seconds: int):
"""Remove outdated locks on tasks"""
# Get all the jobs that have been selected_at for more than max_lock_time # Get all the jobs that have been selected_at for more than max_lock_time
max_acceptable_time = datetime.now() - timedelta(seconds=max_lock_seconds) max_acceptable_time = datetime.now() - timedelta(seconds=max_lock_seconds)
subquery = ( subquery = (

View file

@ -1,4 +1,5 @@
from typing import List """Web interface for machines"""
from typing import List, Union
from fastapi import APIRouter, Depends, Request from fastapi import APIRouter, Depends, Request
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
@ -17,8 +18,9 @@ async def read_tasks(
request: Request, request: Request,
db: Session = Depends(get_db), db: Session = Depends(get_db),
limit: int = 10, limit: int = 10,
agent_id: str = None, agent_id: Union[None,str] = None,
): ):
"""Return a list of tasks to execute"""
agent_id = agent_id or request.client.host agent_id = agent_id or request.client.host
tasks = await queries.list_tasks(db, agent_id=agent_id, limit=limit) tasks = await queries.list_tasks(db, agent_id=agent_id, limit=limit)
return tasks return tasks
@ -30,7 +32,7 @@ async def create_results(
results: List[AgentResult], results: List[AgentResult],
db: Session = Depends(get_db), db: Session = Depends(get_db),
config: Config = Depends(get_config), config: Config = Depends(get_config),
agent_id: str = None, agent_id: Union[None,str] = None,
): ):
"""Get the results from the agents and store them locally. """Get the results from the agents and store them locally.
@ -47,7 +49,7 @@ async def create_results(
# XXX Get all the tasks at once, to limit the queries on the db # XXX Get all the tasks at once, to limit the queries on the db
task = await queries.get_task(db, agent_result.task_id) task = await queries.get_task(db, agent_result.task_id)
if not task: if not task:
logger.error(f"Unable to find task {agent_result.task_id}") logger.error("Unable to find task %i", agent_result.task_id)
else: else:
check = task.get_check() check = task.get_check()
status, severity = await check.finalize(config, result, **result.context) status, severity = await check.finalize(config, result, **result.context)
@ -63,6 +65,7 @@ async def create_results(
@route.get("/stats") @route.get("/stats")
async def get_stats(db: Session = Depends(get_db)): async def get_stats(db: Session = Depends(get_db)):
"""Get tasks statistics"""
return { return {
"upcoming_tasks_count": await queries.count_tasks(db, selected=False), "upcoming_tasks_count": await queries.count_tasks(db, selected=False),
"results_count": await queries.count_results(db), "results_count": await queries.count_results(db),

View file

@ -19,6 +19,7 @@ def get_config(request: Request):
async def verify_token( async def verify_token(
request: Request, token: HTTPAuthorizationCredentials = Depends(auth_scheme) request: Request, token: HTTPAuthorizationCredentials = Depends(auth_scheme)
): ):
"""Verify agent token"""
if token.credentials not in request.app.state.config.service.secrets: if token.credentials not in request.app.state.config.service.secrets:
raise HTTPException(status_code=401, detail="Unauthorized") raise HTTPException(status_code=401, detail="Unauthorized")
return token return token

View file

@ -1,3 +1,4 @@
"""Web interface for humans"""
from collections import defaultdict from collections import defaultdict
from urllib.parse import urlparse from urllib.parse import urlparse
@ -18,7 +19,7 @@ templates = Jinja2Templates(directory="argos/server/templates")
@route.get("/") @route.get("/")
async def get_severity_counts(request: Request, db: Session = Depends(get_db)): async def get_severity_counts(request: Request, db: Session = Depends(get_db)):
"""Returns the number of results per severity""" """Shows the number of results per severity"""
counts_dict = await queries.get_severity_counts(db) counts_dict = await queries.get_severity_counts(db)
agents = db.query(Result.agent_id).distinct().all() agents = db.query(Result.agent_id).distinct().all()
@ -35,6 +36,7 @@ async def get_severity_counts(request: Request, db: Session = Depends(get_db)):
@route.get("/details") @route.get("/details")
async def read_tasks(request: Request, db: Session = Depends(get_db)): async def read_tasks(request: Request, db: Session = Depends(get_db)):
"""Show all tasks and their current state"""
tasks = db.query(Task).order_by(Task.domain).all() tasks = db.query(Task).order_by(Task.domain).all()
results = ( results = (
@ -79,7 +81,8 @@ async def read_tasks(request: Request, db: Session = Depends(get_db)):
async def get_domain_tasks( async def get_domain_tasks(
request: Request, domain: str, db: Session = Depends(get_db) request: Request, domain: str, db: Session = Depends(get_db)
): ):
tasks = db.query(Task).filter(Task.domain.contains(domain)).all() """Show all tasks attached to a domain"""
tasks = db.query(Task).filter(Task.domain.contains(domain)).all() # type: ignore[attr-defined]
return templates.TemplateResponse( return templates.TemplateResponse(
"domain.html", {"request": request, "domain": domain, "tasks": tasks} "domain.html", {"request": request, "domain": domain, "tasks": tasks}
) )
@ -87,6 +90,7 @@ async def get_domain_tasks(
@route.get("/result/{result_id}") @route.get("/result/{result_id}")
async def get_result(request: Request, result_id: int, db: Session = Depends(get_db)): async def get_result(request: Request, result_id: int, db: Session = Depends(get_db)):
"""Show the details of a result"""
result = db.query(Result).get(result_id) result = db.query(Result).get(result_id)
return templates.TemplateResponse( return templates.TemplateResponse(
"result.html", {"request": request, "result": result} "result.html", {"request": request, "result": result}
@ -100,10 +104,11 @@ async def get_task_results(
db: Session = Depends(get_db), db: Session = Depends(get_db),
config: Config = Depends(get_config), config: Config = Depends(get_config),
): ):
"""Show history of a tasks results"""
results = ( results = (
db.query(Result) db.query(Result)
.filter(Result.task_id == task_id) .filter(Result.task_id == task_id)
.order_by(Result.submitted_at.desc()) .order_by(Result.submitted_at.desc()) # type: ignore[attr-defined]
.all() .all()
) )
task = db.query(Task).get(task_id) task = db.query(Task).get(task_id)
@ -121,6 +126,7 @@ async def get_task_results(
@route.get("/agents") @route.get("/agents")
async def get_agents(request: Request, db: Session = Depends(get_db)): async def get_agents(request: Request, db: Session = Depends(get_db)):
"""Show argos agents and the last time the server saw them"""
t1 = aliased(Result, name="t1") t1 = aliased(Result, name="t1")
t2 = aliased(Result, name="t2") t2 = aliased(Result, name="t2")

View file

@ -1,7 +1,8 @@
"""Pydantic schemas for server"""
import os import os
from functools import lru_cache from functools import lru_cache
from os import environ from os import environ
from typing import Optional from typing import Optional,Union
import yaml import yaml
from pydantic_settings import BaseSettings, SettingsConfigDict from pydantic_settings import BaseSettings, SettingsConfigDict
@ -20,14 +21,22 @@ class Settings(BaseSettings):
class DevSettings(Settings): class DevSettings(Settings):
"""Settings for dev environment.
Uses config.yaml as config file.
Uses a SQLite database."""
app_env: str = "dev" app_env: str = "dev"
database_url: str = "sqlite:////tmp/argos.db"
yaml_file: str = "config.yaml" yaml_file: str = "config.yaml"
db_pool_size: Optional[int] = None db_pool_size: Optional[int] = None
db_max_overflow: Optional[int] = None db_max_overflow: Optional[int] = None
database_url: str = "sqlite:////tmp/argos.db"
class TestSettings(Settings): class TestSettings(Settings):
"""Settings for test environment.
Uses tests/config.yaml as config file.
Uses a SQLite database."""
app_env: str = "test" app_env: str = "test"
yaml_file: str = "tests/config.yaml" yaml_file: str = "tests/config.yaml"
database_url: str = "sqlite:////tmp/test-argos.db" database_url: str = "sqlite:////tmp/test-argos.db"
@ -36,6 +45,7 @@ class TestSettings(Settings):
class ProdSettings(Settings): class ProdSettings(Settings):
"""Settings for prod environment."""
app_env: str = "prod" app_env: str = "prod"
db_pool_size: Optional[int] = 10 db_pool_size: Optional[int] = 10
db_max_overflow: Optional[int] = 20 db_max_overflow: Optional[int] = 20
@ -49,10 +59,13 @@ environments = {
@lru_cache() @lru_cache()
def get_app_settings() -> Settings: def get_app_settings() -> Union[None,Settings]:
"""Load settings depending on the environment"""
app_env = environ.get("ARGOS_APP_ENV", "dev") app_env = environ.get("ARGOS_APP_ENV", "dev")
settings = environments.get(app_env) settings = environments.get(app_env)
return settings() if settings is not None:
return settings()
return None
def read_yaml_config(filename): def read_yaml_config(filename):
@ -66,5 +79,5 @@ def _load_yaml(filename):
loader_class=yaml.FullLoader, base_dir=base_dir loader_class=yaml.FullLoader, base_dir=base_dir
) )
with open(filename, "r") as stream: with open(filename, "r", encoding='utf-8') as stream:
return yaml.load(stream, Loader=yaml.FullLoader) return yaml.load(stream, Loader=yaml.FullLoader)

25
config.yaml Normal file
View file

@ -0,0 +1,25 @@
general:
frequency: "5m" # Run checks every minute.
alerts:
error:
- local
warning:
- local
alert:
- local
service:
secrets:
- Shorts-Tribunal-Plentiful-Penknife-Lazily-Move0
# Secrets can be generated using `openssl rand -base64 32`.
ssl:
thresholds:
- "15d": critical
- "25d": warning
# It's also possible to define the checks in another file
# with the include syntax:
#
# websites: !include websites.yaml
#
websites: !include websites.yaml

View file

@ -42,7 +42,8 @@ dev = [
"ipdb>=0.13,<0.14", "ipdb>=0.13,<0.14",
"sphinx-autobuild", "sphinx-autobuild",
"ruff==0.1.5,<1", "ruff==0.1.5,<1",
"djlint>=1.34.0" "djlint>=1.34.0",
"pylint>=3.0.2",
] ]
postgres = [ postgres = [
"psycopg2-binary>=2.9,<3", "psycopg2-binary>=2.9,<3",