main #31

Merged
juan merged 154 commits from main into lab 2024-10-16 15:22:43 +00:00
25 changed files with 1284 additions and 58 deletions
Showing only changes of commit f6bef9c98a - Show all commits

2
.gitignore vendored
View File

@ -4,4 +4,4 @@
.passbolt.yml
inventories/local
venv
context/_build
inventories/local

8
.passbolt.tpl.yml Normal file
View File

@ -0,0 +1,8 @@
PASSBOLT_BASE_URL: https://passbolt.domain.local/
PASSBOLT_PASSPHRASE: "S3cr3tP4$$w0rd"
PASSBOLT_PRIVATE_KEY: |
-----BEGIN PGP PRIVATE KEY BLOCK-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
-----END PGP PRIVATE KEY BLOCK-----

View File

@ -4,23 +4,21 @@ Collection of Ansible playbooks used in the Verdnatura server farm.
## Setup Ansible
Install Ansible on Debian.
### Debian
Install Ansible package.
```
apt install ansible
```
Create Python virtual environment.
### Python
Create a Python virtual environment.
```
python3 -m venv venv
source venv/bin/activate
pip install --upgrade pip ansible==10.1.0 ansible-builder==3.1.0
pip install -r requirements.txt
deactivate
```
Install dependencies.
```
ansible-galaxy collection install -r collections/requirements.yml
```
Before running any Ansible command, activate the Python virtual environment.
@ -28,14 +26,27 @@ Before running any Ansible command, activate the Python virtual environment.
source venv/bin/activate
```
Once you're done, deactivate the virtual environment.
```
deactivate
```
### All platforms
Install dependencies.
```
ansible-galaxy collection install -r collections/requirements.yml
```
## Run playbook
Before merging changes into protected branches, playbooks should be tested
locally to ensure they work properly.
locally to ensure they work properly. The *local* inventory can also be used,
wich is not uploaded to the repository.
Run playbook on inventory host.
```
ansible-playbook -i inventories/lab -l <host> [-t tag1,tag2...] playbooks/ping.yml
ansible-playbook -i inventories/local -l <host> [-t tag1,tag2...] playbooks/ping.yml
```
Run playbook on the fly on a host not declared in the inventory.
@ -55,6 +66,13 @@ When running playbooks that use any of the keystores mentioned above, the
*run-playbook.sh* script can be used, it is an ovelay over the original
*ansible-playbook* command which injects the necessary parameters.
### Passbolt
Add the necessary environment variables to the *.passbolt.yml* file, the
template file *.passbolt.tpl.yml* is included as a reference:
* https://galaxy.ansible.com/ui/repo/published/anatomicjc/passbolt/docs/
### Ansible vault
To manage Ansible vault place the encryption password into *.vault-pass* file.
@ -64,16 +82,9 @@ Manage the vault.
ansible-vault {view,edit,create} --vault-pass-file .vault-pass .vault.yml
```
> [!CAUTION]
> The files used for the vault must only be used locally and
> under **no** circumstances can they be uploaded to the repository.
### Passbolt
Add the necessary environment variables to the *.passbolt.yml* file:
* https://galaxy.ansible.com/ui/repo/published/anatomicjc/passbolt/docs/
## Build execution environment for AWX
Create an image with *ansible-builder* and upload it to registry.
@ -95,5 +106,5 @@ ansible-builder build --tag awx-ee:vn1
* https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_vars_facts.html
* https://ansible.readthedocs.io/projects/builder/en/latest/
* https://www.ansible.com/blog/introduction-to-ansible-builder/
* https://github.com/ansible/awx-ee/tree/devel
* https://github.com/ansible/awx-ee/
* https://www.passbolt.com/blog/managing-secrets-in-ansible-using-passbolt

18
context/_build/bindep.txt Normal file
View File

@ -0,0 +1,18 @@
git-core [platform:rpm]
python3.11-devel [platform:rpm compile]
libcurl-devel [platform:rpm compile]
krb5-devel [platform:rpm compile]
krb5-workstation [platform:rpm]
subversion [platform:rpm]
subversion [platform:dpkg]
git-lfs [platform:rpm]
sshpass [platform:rpm]
rsync [platform:rpm]
epel-release [platform:rpm]
unzip [platform:rpm]
podman-remote [platform:rpm]
cmake [platform:rpm compile]
gcc [platform:rpm compile]
gcc-c++ [platform:rpm compile]
make [platform:rpm compile]
openssl-devel [platform:rpm compile]

View File

@ -0,0 +1,3 @@
py-passbolt==0.0.18
cryptography==3.3.2
PGPy==0.6.0

View File

@ -0,0 +1,10 @@
collections:
- name: ansible.utils
version: '>=4.1.0'
type: galaxy
- name: ansible.windows
version: '>=2.3.0'
type: galaxy
- name: anatomicjc.passbolt
version: '>=0.0.14'
type: galaxy

169
context/_build/scripts/assemble Executable file
View File

@ -0,0 +1,169 @@
#!/bin/bash
# Copyright (c) 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Make a list of bindep dependencies and a collection of built binary
# wheels for the repo in question as well as its python dependencies.
# Install javascript tools as well to support python that needs javascript
# at build time.
set -ex
RELEASE=$(source /etc/os-release; echo $ID)
# NOTE(pabelanger): Allow users to force either microdnf or dnf as a package
# manager.
PKGMGR="${PKGMGR:-}"
PKGMGR_OPTS="${PKGMGR_OPTS:-}"
PKGMGR_PRESERVE_CACHE="${PKGMGR_PRESERVE_CACHE:-}"
PYCMD="${PYCMD:=/usr/bin/python3}"
PIPCMD="${PIPCMD:=$PYCMD -m pip}"
if [ -z $PKGMGR ]; then
# Expect dnf to be installed, however if we find microdnf default to it.
PKGMGR=/usr/bin/dnf
if [ -f "/usr/bin/microdnf" ]; then
PKGMGR=/usr/bin/microdnf
fi
fi
if [ "$PKGMGR" = "/usr/bin/microdnf" ]
then
if [ -z "${PKGMGR_OPTS}" ]; then
# NOTE(pabelanger): skip install docs and weak dependencies to
# make smaller images. Sadly, setting these in dnf.conf don't
# appear to work.
PKGMGR_OPTS="--nodocs --setopt install_weak_deps=0"
fi
fi
# NOTE(pabelanger): Ensure all the directory we use exists regardless
# of the user first creating them or not.
mkdir -p /output/bindep
mkdir -p /output/wheels
mkdir -p /tmp/src
cd /tmp/src
function install_bindep {
# Protect from the bindep builder image use of the assemble script
# to produce a wheel. Note we append because we want all
# sibling packages in here too
if [ -f bindep.txt ] ; then
bindep -l newline | sort >> /output/bindep/run.txt || true
if [ "$RELEASE" == "centos" ] ; then
bindep -l newline -b epel | sort >> /output/bindep/stage.txt || true
grep -Fxvf /output/bindep/run.txt /output/bindep/stage.txt >> /output/bindep/epel.txt || true
rm -rf /output/bindep/stage.txt
fi
compile_packages=$(bindep -b compile || true)
if [ ! -z "$compile_packages" ] ; then
$PKGMGR install -y $PKGMGR_OPTS ${compile_packages}
fi
fi
}
function install_wheels {
# NOTE(pabelanger): If there are build requirements to install, do so.
# However do not cache them as we do not want them in the final image.
if [ -f /tmp/src/build-requirements.txt ] && [ ! -f /tmp/src/.build-requirements.txt ] ; then
$PIPCMD install $CONSTRAINTS $PIP_OPTS --no-cache -r /tmp/src/build-requirements.txt
touch /tmp/src/.build-requirements.txt
fi
# Build a wheel so that we have an install target.
# pip install . in the container context with the mounted
# source dir gets ... exciting, if setup.py exists.
# We run sdist first to trigger code generation steps such
# as are found in zuul, since the sequencing otherwise
# happens in a way that makes wheel content copying unhappy.
# pip wheel isn't used here because it puts all of the output
# in the output dir and not the wheel cache, so it's not
# possible to tell what is the wheel for the project and
# what is the wheel cache.
if [ -f setup.py ] ; then
$PYCMD setup.py sdist bdist_wheel -d /output/wheels
fi
# Install everything so that the wheel cache is populated with
# transitive depends. If a requirements.txt file exists, install
# it directly so that people can use git url syntax to do things
# like pick up patched but unreleased versions of dependencies.
# Only do this for the main package (i.e. only write requirements
# once).
if [ -f /tmp/src/requirements.txt ] && [ ! -f /output/requirements.txt ] ; then
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels -r /tmp/src/requirements.txt
cp /tmp/src/requirements.txt /output/requirements.txt
fi
# If we didn't build wheels, we can skip trying to install it.
if [ $(ls -1 /output/wheels/*whl 2>/dev/null | wc -l) -gt 0 ]; then
$PIPCMD uninstall -y /output/wheels/*.whl
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels /output/wheels/*whl
fi
}
PACKAGES=$*
PIP_OPTS="${PIP_OPTS-}"
# bindep the main package
install_bindep
# go through ZUUL_SIBLINGS, if any, and build those wheels too
for sibling in ${ZUUL_SIBLINGS:-}; do
pushd .zuul-siblings/${sibling}
install_bindep
popd
done
# Use a clean virtualenv for install steps to prevent things from the
# current environment making us not build a wheel.
# NOTE(pabelanger): We allow users to install distro python packages of
# libraries. This is important for projects that eventually want to produce
# an RPM or offline install.
$PYCMD -m venv /tmp/venv --system-site-packages --without-pip
source /tmp/venv/bin/activate
# If there is an upper-constraints.txt file in the source tree,
# use it in the pip commands.
if [ -f /tmp/src/upper-constraints.txt ] ; then
cp /tmp/src/upper-constraints.txt /output/upper-constraints.txt
CONSTRAINTS="-c /tmp/src/upper-constraints.txt"
fi
# If we got a list of packages, install them, otherwise install the
# main package.
if [[ $PACKAGES ]] ; then
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels $PACKAGES
for package in $PACKAGES ; do
echo "$package" >> /output/packages.txt
done
else
install_wheels
fi
# go through ZUUL_SIBLINGS, if any, and build those wheels too
for sibling in ${ZUUL_SIBLINGS:-}; do
pushd .zuul-siblings/${sibling}
install_wheels
popd
done
if [ -z $PKGMGR_PRESERVE_CACHE ]; then
$PKGMGR clean all
rm -rf /var/cache/{dnf,yum}
fi
rm -rf /var/lib/dnf/history.*
rm -rf /var/log/{dnf.*,hawkey.log}
rm -rf /tmp/venv

View File

@ -0,0 +1,110 @@
#!/bin/bash
# Copyright (c) 2023 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#####################################################################
# Script to validate that Ansible and Ansible Runner are installed.
#
# Usage: check_ansible <PYCMD>
#
# Options:
# PYCMD - The path to the python executable to use.
#####################################################################
set -x
PYCMD=$1
if [ -z "$PYCMD" ]
then
echo "Usage: check_ansible <PYCMD>"
exit 1
fi
if [ ! -x "$PYCMD" ]
then
echo "$PYCMD is not an executable"
exit 1
fi
ansible --version
if [ $? -ne 0 ]
then
cat<<EOF
**********************************************************************
ERROR - Missing Ansible installation
An Ansible installation cannot be found in the final builder image.
Ansible must be installed in the final image. If you are using a
recent enough version of the execution environment file, you may
use the 'dependencies.ansible_core' configuration option to install
Ansible for you, or use 'additional_build_steps' to manually do
this yourself. Alternatively, use a base image with Ansible already
installed.
**********************************************************************
EOF
exit 1
fi
ansible-runner --version
if [ $? -ne 0 ]
then
cat<<EOF
**********************************************************************
ERROR - Missing Ansible Runner installation
An Ansible Runner installation cannot be found in the final builder
image.
Ansible Runner must be installed in the final image. If you are
using a recent enough version of the execution environment file, you
may use the 'dependencies.ansible_runner' configuration option to
install Ansible Runner for you, or use 'additional_build_steps' to
manually do this yourself. Alternatively, use a base image with
Ansible Runner already installed.
**********************************************************************
EOF
exit 1
fi
$PYCMD -c 'import ansible ; import ansible_runner'
if [ $? -ne 0 ]
then
cat<<EOF
**********************************************************************
ERROR - Missing Ansible or Ansible Runner for selected Python
An Ansible and/or Ansible Runner installation cannot be found in
the final builder image using the following Python interpreter:
$PYCMD
Ansible and Ansible Runner must be installed in the final image and
available to the selected Python interpreter. If you are using a
recent enough version of the execution environment file, you may use
the 'dependencies.ansible_core' configuration option to install
Ansible and the 'dependencies.ansible_runner' configuration option
to install Ansible Runner. You can also use 'additional_build_steps'
to manually do this yourself. Alternatively, use a base image with
Ansible and Ansible Runner already installed.
**********************************************************************
EOF
exit 1
fi
exit 0

View File

@ -0,0 +1,46 @@
#!/bin/bash
# Copyright (c) 2023 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#####################################################################
# Script to validate that Ansible Galaxy is installed on the system.
#####################################################################
set -x
ansible-galaxy --version
if [ $? -ne 0 ]
then
cat<<EOF
**********************************************************************
ERROR - Missing Ansible installation
The 'ansible-galaxy' command is not found in the base image. This
image is used to create the intermediary image that performs the
Galaxy collection and role installation process.
Ansible must be installed in the base image. If you are using a
recent enough version of the execution environment file, you may
use the 'dependencies.ansible_core' configuration option to install
Ansible for you, or use 'additional_build_steps' to manually do
this yourself. Alternatively, use a base image with Ansible already
installed.
**********************************************************************
EOF
exit 1
fi
exit 0

152
context/_build/scripts/entrypoint Executable file
View File

@ -0,0 +1,152 @@
#!/usr/bin/env bash
# Copyright: (c) 2023, Ansible Project
# Apache License, Version 2.0 (see LICENSE.md or https://www.apache.org/licenses/LICENSE-2.0)
# This entrypoint script papers over a number of problems that manifest under different container runtimes when
# using ephemeral UIDs, then chain-execs to the requested init system and/or command. It is an implementation
# detail for the convenience of Ansible execution environments built by ansible-builder.
#
# If we're running as a legit user that has an entry in /etc/passwd and a valid and writeable homedir, we're all good.
#
# If the current uid is not in /etc/passwd, we'll attempt to add it, but /etc/passwd is often not writable by GID 0.
# `ansible-builder` defaults to making /etc/passwd writable by GID0 by default for maximum compatibility, but this is
# not guaranteed. Some runtimes/wrappers (eg podman, cri-o) already create an /etc/passwd entry on the fly as-needed,
# but they may set the homedir to something inaccessible (eg, `/`, WORKDIR).
#
# There are numerous cases where a missing or incorrect homedir in /etc/passwd are fatal. It breaks
# `async` in ansible-core, things like `echo ~someuid`, and numerous other software packages that assume a valid POSIX
# user configuration.
#
# If the homedir listed in /etc/passwd is not writeable by the current user (supposed to be primary GID0), we'll try
# to make it writeable (except `/`), or select another writeable home directory from `$HOME`, `/runner`, or `/tmp` and
# update $HOME (and /etc/passwd if possible) accordingly for the current process chain.
#
# This script is generally silent by default, but some likely-fatal cases will issue a brief warning to stderr. The
# envvars described below can be set before container init to cause faster failures and/or get tracing output.
# options:
# EP_BASH_DEBUG=1 (enable set -x)
# EP_DEBUG_TRACE=1 (enable debug trace to stderr)
# EP_ON_ERROR=ignore/warn/fail (default ignore)
set -eu
if (( "${EP_BASH_DEBUG:=0}" == 1 )); then
set -x
fi
: "${EP_DEBUG_TRACE:=0}"
: "${EP_ON_ERROR:=warn}"
: "${HOME:=}"
CUR_UID=$(id -u)
CUR_USERNAME=$(id -u -n 2> /dev/null || true) # whoami-free way to get current username, falls back to current uid
DEFAULT_HOME="/runner"
DEFAULT_SHELL="/bin/bash"
if (( "$EP_DEBUG_TRACE" == 1 )); then
function log_debug() { echo "EP_DEBUG: $1" 1>&2; }
else
function log_debug() { :; }
fi
log_debug "entrypoint.sh started"
case "$EP_ON_ERROR" in
"fail")
function maybe_fail() { echo "EP_FAIL: $1" 1>&2; exit 1; }
;;
"warn")
function maybe_fail() { echo "EP_WARN: $1" 1>&2; }
;;
*)
function maybe_fail() { log_debug "EP_FAIL (ignored): $1"; }
;;
esac
function is_dir_writable() {
[ -d "$1" ] && [ -w "$1" ] && [ -x "$1" ]
}
function ensure_current_uid_in_passwd() {
log_debug "is current uid ${CUR_UID} in /etc/passwd?"
if ! getent passwd "${CUR_USERNAME}" &> /dev/null ; then
if [ -w "/etc/passwd" ]; then
log_debug "appending missing uid ${CUR_UID} into /etc/passwd"
# use the default homedir; we may have to rewrite it to another value later if it's inaccessible
echo "${CUR_UID}:x:${CUR_UID}:0:container user ${CUR_UID}:${DEFAULT_HOME}:${DEFAULT_SHELL}" >> /etc/passwd
else
maybe_fail "uid ${CUR_UID} is missing from /etc/passwd, which is not writable; this error is likely fatal"
fi
else
log_debug "current uid is already in /etc/passwd"
fi
}
function ensure_writeable_homedir() {
if (is_dir_writable "${CANDIDATE_HOME}") ; then
log_debug "candidate homedir ${CANDIDATE_HOME} is valid and writeable"
else
if [ "${CANDIDATE_HOME}" == "/" ]; then
log_debug "skipping attempt to fix permissions on / as homedir"
return 1
fi
log_debug "candidate homedir ${CANDIDATE_HOME} is missing or not writeable; attempt to fix"
if ! (mkdir -p "${CANDIDATE_HOME}" >& /dev/null && chmod -R ug+rwx "${CANDIDATE_HOME}" >& /dev/null) ; then
log_debug "candidate homedir ${CANDIDATE_HOME} cannot be made writeable"
return 1
else
log_debug "candidate homedir ${CANDIDATE_HOME} was successfully made writeable"
fi
fi
# this might work; export it even if we end up not being able to update /etc/passwd
# this ensures the envvar matches current reality for this session; future sessions should set automatically if /etc/passwd is accurate
export HOME=${CANDIDATE_HOME}
if [ "${CANDIDATE_HOME}" == "${PASSWD_HOME}" ] ; then
log_debug "candidate homedir ${CANDIDATE_HOME} matches /etc/passwd"
return 0
fi
if ! [ -w /etc/passwd ]; then
log_debug "candidate homedir ${CANDIDATE_HOME} is valid for ${CUR_USERNAME}, but /etc/passwd is not writable to update it"
return 1
fi
log_debug "resetting homedir for user ${CUR_USERNAME} to ${CANDIDATE_HOME} in /etc/passwd"
# sed -i wants to create a tempfile next to the original, which won't work with /etc permissions in many cases,
# so just do it in memory and overwrite the existing file if we succeeded
NEWPW=$(sed -r "s;(^${CUR_USERNAME}:(.*:){4})(.*:);\1${CANDIDATE_HOME}:;g" /etc/passwd)
echo "${NEWPW}" > /etc/passwd
}
ensure_current_uid_in_passwd
log_debug "current value of HOME is ${HOME}"
PASSWD_HOME=$(getent passwd "${CUR_USERNAME}" | cut -d: -f6)
log_debug "user ${CUR_USERNAME} homedir from /etc/passwd is ${PASSWD_HOME}"
CANDIDATE_HOMES=("${PASSWD_HOME}" "${HOME}" "${DEFAULT_HOME}" "/tmp")
# we'll set this in the loop as soon as we find a writeable dir
unset HOME
for CANDIDATE_HOME in "${CANDIDATE_HOMES[@]}"; do
if ensure_writeable_homedir ; then
break
fi
done
if ! [ -v HOME ] ; then
maybe_fail "a valid homedir could not be set for ${CUR_USERNAME}; this is likely fatal"
fi
# chain exec whatever we were asked to run (ideally an init system) to keep any envvar state we've set
log_debug "chain exec-ing requested command $*"
exec "${@}"

View File

@ -0,0 +1,105 @@
#!/bin/bash
# Copyright (c) 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
# NOTE(pabelanger): Allow users to force either microdnf or dnf as a package
# manager.
PKGMGR="${PKGMGR:-}"
PKGMGR_OPTS="${PKGMGR_OPTS:-}"
PKGMGR_PRESERVE_CACHE="${PKGMGR_PRESERVE_CACHE:-}"
PYCMD="${PYCMD:=/usr/bin/python3}"
PIPCMD="${PIPCMD:=$PYCMD -m pip}"
PIP_OPTS="${PIP_OPTS-}"
if [ -z $PKGMGR ]; then
# Expect dnf to be installed, however if we find microdnf default to it.
PKGMGR=/usr/bin/dnf
if [ -f "/usr/bin/microdnf" ]; then
PKGMGR=/usr/bin/microdnf
fi
fi
if [ "$PKGMGR" = "/usr/bin/microdnf" ]
then
if [ -z "${PKGMGR_OPTS}" ]; then
# NOTE(pabelanger): skip install docs and weak dependencies to
# make smaller images. Sadly, setting these in dnf.conf don't
# appear to work.
PKGMGR_OPTS="--nodocs --setopt install_weak_deps=0"
fi
fi
if [ -f /output/bindep/run.txt ] ; then
PACKAGES=$(cat /output/bindep/run.txt)
if [ ! -z "$PACKAGES" ]; then
$PKGMGR install -y $PKGMGR_OPTS $PACKAGES
fi
fi
if [ -f /output/bindep/epel.txt ] ; then
EPEL_PACKAGES=$(cat /output/bindep/epel.txt)
if [ ! -z "$EPEL_PACKAGES" ]; then
$PKGMGR install -y $PKGMGR_OPTS --enablerepo epel $EPEL_PACKAGES
fi
fi
# If there's a constraints file, use it.
if [ -f /output/upper-constraints.txt ] ; then
CONSTRAINTS="-c /output/upper-constraints.txt"
fi
# If a requirements.txt file exists,
# install it directly so that people can use git url syntax
# to do things like pick up patched but unreleased versions
# of dependencies.
if [ -f /output/requirements.txt ] ; then
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels -r /output/requirements.txt
fi
# Add any requested extras to the list of things to install
EXTRAS=""
for extra in $* ; do
EXTRAS="${EXTRAS} -r /output/$extra/requirements.txt"
done
if [ -f /output/packages.txt ] ; then
# If a package list was passed to assemble, install that in the final
# image.
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels -r /output/packages.txt $EXTRAS
else
# Install the wheels. Uninstall any existing version as siblings maybe
# be built with the same version number as the latest release, but we
# really want the speculatively built wheels installed over any
# automatic dependencies.
# NOTE(pabelanger): It is possible a project may not have a wheel, but does have requirements.txt
if [ $(ls -1 /output/wheels/*whl 2>/dev/null | wc -l) -gt 0 ]; then
$PIPCMD uninstall -y /output/wheels/*.whl
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels /output/wheels/*.whl $EXTRAS
elif [ ! -z "$EXTRAS" ] ; then
$PIPCMD uninstall -y $EXTRAS
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels $EXTRAS
fi
fi
# clean up after ourselves, unless requested to keep the cache
if [[ "$PKGMGR_PRESERVE_CACHE" != always ]]; then
$PKGMGR clean all
rm -rf /var/cache/{dnf,yum}
fi
rm -rf /var/lib/dnf/history.*
rm -rf /var/log/{dnf.*,hawkey.log}

View File

@ -0,0 +1,507 @@
from __future__ import annotations
import argparse
import logging
import os
import re
import sys
import yaml
from packaging.requirements import InvalidRequirement, Requirement
BASE_COLLECTIONS_PATH = '/usr/share/ansible/collections'
# regex for a comment at the start of a line, or embedded with leading space(s)
COMMENT_RE = re.compile(r'(?:^|\s+)#.*$')
EXCLUDE_REQUIREMENTS = frozenset((
# obviously already satisfied or unwanted
'ansible', 'ansible-base', 'python', 'ansible-core',
# general python test requirements
'tox', 'pycodestyle', 'yamllint', 'pylint',
'flake8', 'pytest', 'pytest-xdist', 'coverage', 'mock', 'testinfra',
# test requirements highly specific to Ansible testing
'ansible-lint', 'molecule', 'galaxy-importer', 'voluptuous',
# already present in image for py3 environments
'yaml', 'pyyaml', 'json',
))
logger = logging.getLogger(__name__)
class CollectionDefinition:
"""
This class represents the dependency metadata for a collection
should be replaced by logic to hit the Galaxy API if made available
"""
def __init__(self, collection_path):
self.reference_path = collection_path
# NOTE: Filenames should match constants.DEAFULT_EE_BASENAME and constants.YAML_FILENAME_EXTENSIONS.
meta_file_base = os.path.join(collection_path, 'meta', 'execution-environment')
ee_exists = False
for ext in ('yml', 'yaml'):
meta_file = f"{meta_file_base}.{ext}"
if os.path.exists(meta_file):
with open(meta_file, 'r') as f:
self.raw = yaml.safe_load(f)
ee_exists = True
break
if not ee_exists:
self.raw = {'version': 1, 'dependencies': {}}
# Automatically infer requirements for collection
for entry, filename in [('python', 'requirements.txt'), ('system', 'bindep.txt')]:
candidate_file = os.path.join(collection_path, filename)
if has_content(candidate_file):
self.raw['dependencies'][entry] = filename
def target_dir(self):
namespace, name = self.namespace_name()
return os.path.join(
BASE_COLLECTIONS_PATH, 'ansible_collections',
namespace, name
)
def namespace_name(self):
"Returns 2-tuple of namespace and name"
path_parts = [p for p in self.reference_path.split(os.path.sep) if p]
return tuple(path_parts[-2:])
def get_dependency(self, entry):
"""A collection is only allowed to reference a file by a relative path
which is relative to the collection root
"""
req_file = self.raw.get('dependencies', {}).get(entry)
if req_file is None:
return None
if os.path.isabs(req_file):
raise RuntimeError(
'Collections must specify relative paths for requirements files. '
f'The file {req_file} specified by {self.reference_path} violates this.'
)
return req_file
def line_is_empty(line):
return bool((not line.strip()) or line.startswith('#'))
def read_req_file(path):
"""Provide some minimal error and display handling for file reading"""
if not os.path.exists(path):
print(f'Expected requirements file not present at: {os.path.abspath(path)}')
with open(path, 'r') as f:
return f.read()
def pip_file_data(path):
pip_content = read_req_file(path)
pip_lines = []
for line in pip_content.split('\n'):
if line_is_empty(line):
continue
if line.startswith('-r') or line.startswith('--requirement'):
_, new_filename = line.split(None, 1)
new_path = os.path.join(os.path.dirname(path or '.'), new_filename)
pip_lines.extend(pip_file_data(new_path))
else:
pip_lines.append(line)
return pip_lines
def bindep_file_data(path):
sys_content = read_req_file(path)
sys_lines = []
for line in sys_content.split('\n'):
if line_is_empty(line):
continue
sys_lines.append(line)
return sys_lines
def process_collection(path):
"""Return a tuple of (python_dependencies, system_dependencies) for the
collection install path given.
Both items returned are a list of dependencies.
:param str path: root directory of collection (this would contain galaxy.yml file)
"""
col_def = CollectionDefinition(path)
py_file = col_def.get_dependency('python')
pip_lines = []
if py_file:
pip_lines = pip_file_data(os.path.join(path, py_file))
sys_file = col_def.get_dependency('system')
bindep_lines = []
if sys_file:
bindep_lines = bindep_file_data(os.path.join(path, sys_file))
return (pip_lines, bindep_lines)
def process(data_dir=BASE_COLLECTIONS_PATH,
user_pip=None,
user_bindep=None,
exclude_pip=None,
exclude_bindep=None,
exclude_collections=None):
"""
Build a dictionary of Python and system requirements from any collections
installed in data_dir, and any user specified requirements.
Excluded requirements, if any, will be inserted into the return dict.
Example return dict:
{
'python': {
'collection.a': ['abc', 'def'],
'collection.b': ['ghi'],
'user': ['jkl'],
'exclude: ['abc'],
},
'system': {
'collection.a': ['ZYX'],
'user': ['WVU'],
'exclude': ['ZYX'],
},
'excluded_collections': [
'a.b',
]
}
"""
paths = []
path_root = os.path.join(data_dir, 'ansible_collections')
# build a list of all the valid collection paths
if os.path.exists(path_root):
for namespace in sorted(os.listdir(path_root)):
if not os.path.isdir(os.path.join(path_root, namespace)):
continue
for name in sorted(os.listdir(os.path.join(path_root, namespace))):
collection_dir = os.path.join(path_root, namespace, name)
if not os.path.isdir(collection_dir):
continue
files_list = os.listdir(collection_dir)
if 'galaxy.yml' in files_list or 'MANIFEST.json' in files_list:
paths.append(collection_dir)
# populate the requirements content
py_req = {}
sys_req = {}
for path in paths:
col_pip_lines, col_sys_lines = process_collection(path)
col_def = CollectionDefinition(path)
namespace, name = col_def.namespace_name()
key = f'{namespace}.{name}'
if col_pip_lines:
py_req[key] = col_pip_lines
if col_sys_lines:
sys_req[key] = col_sys_lines
# add on entries from user files, if they are given
if user_pip:
col_pip_lines = pip_file_data(user_pip)
if col_pip_lines:
py_req['user'] = col_pip_lines
if exclude_pip:
col_pip_exclude_lines = pip_file_data(exclude_pip)
if col_pip_exclude_lines:
py_req['exclude'] = col_pip_exclude_lines
if user_bindep:
col_sys_lines = bindep_file_data(user_bindep)
if col_sys_lines:
sys_req['user'] = col_sys_lines
if exclude_bindep:
col_sys_exclude_lines = bindep_file_data(exclude_bindep)
if col_sys_exclude_lines:
sys_req['exclude'] = col_sys_exclude_lines
retval = {
'python': py_req,
'system': sys_req,
}
if exclude_collections:
# This file should just be a newline separated list of collection names,
# so reusing bindep_file_data() to read it should work fine.
excluded_collection_list = bindep_file_data(exclude_collections)
if excluded_collection_list:
retval['excluded_collections'] = excluded_collection_list
return retval
def has_content(candidate_file):
"""Beyond checking that the candidate exists, this also assures
that the file has something other than whitespace,
which can cause errors when given to pip.
"""
if not os.path.exists(candidate_file):
return False
with open(candidate_file, 'r') as f:
content = f.read()
return bool(content.strip().strip('\n'))
def strip_comments(reqs: dict[str, list]) -> dict[str, list]:
"""
Filter any comments out of the Python collection requirements input.
:param dict reqs: A dict of Python requirements, keyed by collection name.
:return: Same as the input parameter, except with no comment lines.
"""
result: dict[str, list] = {}
for collection, lines in reqs.items():
for line in lines:
# strip comments
if (base_line := COMMENT_RE.sub('', line.strip())):
result.setdefault(collection, []).append(base_line)
return result
def should_be_excluded(value: str, exclusion_list: list[str]) -> bool:
"""
Test if `value` matches against any value in `exclusion_list`.
The exclusion_list values are either strings to be compared in a case-insensitive
manner against value, OR, they are regular expressions to be tested against the
value. A regular expression will contain '~' as the first character.
:return: True if the value should be excluded, False otherwise.
"""
for exclude_value in exclusion_list:
if exclude_value[0] == "~":
pattern = exclude_value[1:]
if re.fullmatch(pattern.lower(), value.lower()):
return True
elif exclude_value.lower() == value.lower():
return True
return False
def filter_requirements(reqs: dict[str, list],
exclude: list[str] | None = None,
exclude_collections: list[str] | None = None,
is_python: bool = True) -> list[str]:
"""
Given a dictionary of Python requirement lines keyed off collections,
return a list of cleaned up (no source comments) requirements
annotated with comments indicating the sources based off the collection keys.
Currently, non-pep508 compliant Python entries are passed through. We also no
longer attempt to normalize names (replace '_' with '-', etc), other than
lowercasing it for exclusion matching, since we no longer are attempting
to combine similar entries.
:param dict reqs: A dict of either Python or system requirements, keyed by collection name.
:param list exclude: A list of requirements to be excluded from the output.
:param list exclude_collections: A list of collection names from which to exclude all requirements.
:param bool is_python: This should be set to True for Python requirements, as each
will be tested for PEP508 compliance. This should be set to False for system requirements.
:return: A list of filtered and annotated requirements.
"""
exclusions: list[str] = []
collection_ignore_list: list[str] = []
if exclude:
exclusions = exclude.copy()
if exclude_collections:
collection_ignore_list = exclude_collections.copy()
annotated_lines: list[str] = []
uncommented_reqs = strip_comments(reqs)
for collection, lines in uncommented_reqs.items():
# Bypass this collection if we've been told to ignore all requirements from it.
if should_be_excluded(collection, collection_ignore_list):
logger.debug("# Excluding all requirements from collection '%s'", collection)
continue
for line in lines:
# Determine the simple name based on type of requirement
if is_python:
try:
parsed_req = Requirement(line)
name = parsed_req.name
except InvalidRequirement:
logger.warning(
"Passing through non-PEP508 compliant line '%s' from collection '%s'",
line, collection
)
annotated_lines.append(line) # We intentionally won't annotate these lines (multi-line?)
continue
else:
# bindep system requirements have the package name as the first "word" on the line
name = line.split(maxsplit=1)[0]
if collection.lower() not in {'user', 'exclude'}:
lower_name = name.lower()
if lower_name in EXCLUDE_REQUIREMENTS:
logger.debug("# Excluding requirement '%s' from '%s'", name, collection)
continue
if should_be_excluded(lower_name, exclusions):
logger.debug("# Explicitly excluding requirement '%s' from '%s'", name, collection)
continue
annotated_lines.append(f'{line} # from collection {collection}')
return annotated_lines
def parse_args(args=None):
parser = argparse.ArgumentParser(
prog='introspect',
description=(
'ansible-builder introspection; injected and used during execution environment build'
)
)
subparsers = parser.add_subparsers(
help='The command to invoke.',
dest='action',
required=True,
)
create_introspect_parser(subparsers)
return parser.parse_args(args)
def run_introspect(args, log):
data = process(args.folder,
user_pip=args.user_pip,
user_bindep=args.user_bindep,
exclude_pip=args.exclude_pip,
exclude_bindep=args.exclude_bindep,
exclude_collections=args.exclude_collections)
log.info('# Dependency data for %s', args.folder)
excluded_collections = data.pop('excluded_collections', None)
data['python'] = filter_requirements(
data['python'],
exclude=data['python'].pop('exclude', []),
exclude_collections=excluded_collections,
)
data['system'] = filter_requirements(
data['system'],
exclude=data['system'].pop('exclude', []),
exclude_collections=excluded_collections,
is_python=False
)
print('---')
print(yaml.dump(data, default_flow_style=False))
if args.write_pip and data.get('python'):
write_file(args.write_pip, data.get('python') + [''])
if args.write_bindep and data.get('system'):
write_file(args.write_bindep, data.get('system') + [''])
sys.exit(0)
def create_introspect_parser(parser):
introspect_parser = parser.add_parser(
'introspect',
help='Introspects collections in folder.',
description=(
'Loops over collections in folder and returns data about dependencies. '
'This is used internally and exposed here for verification. '
'This is targeted toward collection authors and maintainers.'
)
)
introspect_parser.add_argument('--sanitize', action='store_true',
help=argparse.SUPPRESS)
introspect_parser.add_argument(
'folder', default=BASE_COLLECTIONS_PATH, nargs='?',
help=(
'Ansible collections path(s) to introspect. '
'This should have a folder named ansible_collections inside of it.'
)
)
introspect_parser.add_argument(
'--user-pip', dest='user_pip',
help='An additional file to combine with collection pip requirements.'
)
introspect_parser.add_argument(
'--user-bindep', dest='user_bindep',
help='An additional file to combine with collection bindep requirements.'
)
introspect_parser.add_argument(
'--exclude-bindep-reqs', dest='exclude_bindep',
help='An additional file to exclude specific bindep requirements from collections.'
)
introspect_parser.add_argument(
'--exclude-pip-reqs', dest='exclude_pip',
help='An additional file to exclude specific pip requirements from collections.'
)
introspect_parser.add_argument(
'--exclude-collection-reqs', dest='exclude_collections',
help='An additional file to exclude all requirements from the listed collections.'
)
introspect_parser.add_argument(
'--write-pip', dest='write_pip',
help='Write the combined pip requirements file to this location.'
)
introspect_parser.add_argument(
'--write-bindep', dest='write_bindep',
help='Write the combined bindep requirements file to this location.'
)
return introspect_parser
def write_file(filename: str, lines: list) -> bool:
parent_dir = os.path.dirname(filename)
if parent_dir and not os.path.exists(parent_dir):
logger.warning('Creating parent directory for %s', filename)
os.makedirs(parent_dir)
new_text = '\n'.join(lines)
if os.path.exists(filename):
with open(filename, 'r') as f:
if f.read() == new_text:
logger.debug("File %s is already up-to-date.", filename)
return False
logger.warning('File %s had modifications and will be rewritten', filename)
with open(filename, 'w') as f:
f.write(new_text)
return True
def main():
args = parse_args()
if args.action == 'introspect':
run_introspect(args, logger)
logger.error("An error has occurred.")
sys.exit(1)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,56 @@
#!/bin/bash
# Copyright (c) 2024 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#####################################################################
# Script to encapsulate pip installation.
#
# Usage: pip_install <PYCMD>
#
# Options:
# PYCMD - The path to the python executable to use.
#####################################################################
set -x
PYCMD=$1
if [ -z "$PYCMD" ]
then
echo "Usage: pip_install <PYCMD>"
exit 1
fi
if [ ! -x "$PYCMD" ]
then
echo "$PYCMD is not an executable"
exit 1
fi
# This is going to be our default functionality for now. This will likely
# need to change if we add support for non-RHEL distros.
$PYCMD -m ensurepip --root /
if [ $? -ne 0 ]
then
cat<<EOF
**********************************************************************
ERROR - pip installation failed for Python $PYCMD
**********************************************************************
EOF
exit 1
fi
exit 0

View File

@ -1,4 +1,5 @@
ansible_host: "{{inventory_hostname_short}}.{{host_domain}}"
hostname_fqdn: "{{inventory_hostname_short}}.{{host_domain}}"
ansible_host: "{{hostname_fqdn}}"
passbolt: 'anatomicjc.passbolt.passbolt'
passbolt_inventory: 'anatomicjc.passbolt.passbolt_inventory'
sysadmin_mail: sysadmin@verdnatura.es
@ -33,3 +34,4 @@ base_packages:
locales_present:
- en_US.UTF-8
- es_ES.UTF-8
passbolt_folder: e0d517be-6783-4b97-9742-acaa9b09742f

View File

@ -26,6 +26,7 @@ kubetest-master[01:03]
kubetest-worker[01:04]
[laboratory]
ansible-test
corelab-proxy1
zammad
matrix

View File

@ -1,6 +1,12 @@
- name: Fetch passbolt password
- name: Fetch or create passbolt password
hosts: all
gather_facts: no
tasks:
- debug:
msg: "Password: {{ lookup(passbolt, 'test').password }}"
msg: "{{ lookup(passbolt, 'test', password=passbolt_password) }}"
vars:
passbolt_password: 'S3cR3tP4$$w0rd'
environment:
PASSBOLT_CREATE_NEW_RESOURCE: true
PASSBOLT_NEW_RESOURCE_PASSWORD_LENGTH: 18
PASSBOLT_NEW_RESOURCE_PASSWORD_SPECIAL_CHARS: false

View File

@ -1,2 +1,3 @@
py-passbolt==0.0.18
cryptography==3.3.2
passlib==1.7.4

View File

@ -8,7 +8,7 @@ idle_timelimit 60
base {{ ldap_base }}
binddn cn=nss,ou=admins,{{ ldap_base }}
bindpw {{ lookup(passbolt, 'nslcd').password }}
bindpw {{ lookup(passbolt, 'nslcd', folder_parent_id=passbolt_folder).password }}
pagesize 500
filter group (&(objectClass=posixGroup)(cn={{ sysadmin_group }}))

View File

@ -5,5 +5,5 @@
- name: Populating hosts file with hostname
lineinfile:
path: /etc/hosts
regexp: '^127.0.1.1'
line: '127.0.1.1 {{ ansible_host }} {{ inventory_hostname_short }}'
regexp: '^127\.0\.1\.1'
line: '127.0.1.1 {{ hostname_fqdn }} {{ inventory_hostname_short }}'

View File

@ -1,7 +1,19 @@
- name: Generate a random root password
set_fact:
root_password: "{{ lookup('password', '/dev/null length=18 chars=ascii_letters,digits') }}"
- name: Save the root password to a file
- name: Save root password into Passbolt
set_fact:
msg: >
{{
lookup(passbolt, inventory_hostname_short,
username='root',
password=root_password,
uri='ssh://'+hostname_fqdn
)
}}
environment:
PASSBOLT_CREATE_NEW_RESOURCE: true
- name: Save the root password to file
copy:
content: "{{ root_password }}\n"
dest: /root/root_password.txt

View File

@ -2,3 +2,7 @@
service:
name: nagios-nrpe-server
state: restarted
- name: restart-sysctl
service:
name: systemd-sysctl
state: restarted

View File

@ -1,31 +1,4 @@
- name: Set NRPE PVE configuration
copy:
src: nrpe.cfg
dest: /etc/nagios/nrpe.d/95-pve.cfg
owner: root
group: root
mode: u=rw,g=r,o=r
notify: restart-nrpe
- name: Copy PVE NRPE plugins
copy:
src: nrpe/
dest: /etc/nagios/plugins/
owner: root
group: root
mode: u=rwx,g=rx,o=rx
notify: restart-nrpe
- name: Add nagios to sudoers
copy:
src: sudoers
dest: /etc/sudoers.d/nagios
mode: u=rw,g=r,o=
owner: root
group: root
notify: restart-nrpe
- name: Configure memory regions
copy:
src: vhost.conf
dest: /etc/modprobe.d/
mode: u=rw,g=r,o=r
owner: root
group: root
- import_tasks: nrpe.yml
tags: nrpe
- import_tasks: vhost.yml
tags: vhost

24
roles/pve/tasks/nrpe.yml Normal file
View File

@ -0,0 +1,24 @@
- name: Set NRPE PVE configuration
copy:
src: nrpe.cfg
dest: /etc/nagios/nrpe.d/95-pve.cfg
owner: root
group: root
mode: u=rw,g=r,o=r
notify: restart-nrpe
- name: Copy PVE NRPE plugins
copy:
src: nrpe/
dest: /etc/nagios/plugins/
owner: root
group: root
mode: u=rwx,g=rx,o=rx
notify: restart-nrpe
- name: Add nagios to sudoers
copy:
src: sudoers
dest: /etc/sudoers.d/nagios
mode: u=rw,g=r,o=
owner: root
group: root
notify: restart-nrpe

View File

@ -0,0 +1,8 @@
- name: Configure memory regions
copy:
src: vhost.conf
dest: /etc/modprobe.d/
mode: u=rw,g=r,o=r
owner: root
group: root
notify: restart-sysctl

View File

@ -9,5 +9,5 @@ if [ -f .vaultpass ]; then
EXTRA_ARGS+=("--vault-password-file" ".vaultpass")
fi
export PYTHONPATH=./venv/lib/python3.12/site-packages/
#export PYTHONPATH=./venv/lib/python3.12/site-packages/
ansible-playbook ${EXTRA_ARGS[@]} $@