add influxdb2 #27

Closed
davidl wants to merge 0 commits from davidl-add-influxdb2 into main
157 changed files with 679 additions and 11648 deletions

13
.gitignore vendored
View File

@ -1,12 +1,3 @@
.vscode/
venv
.vault-pass
.vault.yml
.passbolt.yml
inventories/local
inventories/local.yml
inventories/local.yaml
inventories/host_vars/*/local.yml
inventories/host_vars/*/local.yaml
inventories/group_vars/*/local.yml
inventories/group_vars/*/local.yaml
.vaultpass
.pyenv

View File

@ -1,8 +0,0 @@
PASSBOLT_BASE_URL: https://passbolt.domain.local/
PASSBOLT_PASSPHRASE: "S3cr3tP4$$w0rd"
PASSBOLT_PRIVATE_KEY: |
-----BEGIN PGP PRIVATE KEY BLOCK-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
-----END PGP PRIVATE KEY BLOCK-----

17
LICENSE
View File

@ -1,17 +0,0 @@
Copyright (C) 2024 - Verdnatura Levante S.L.
This package is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
On Debian systems, the complete text of the GNU General Public
License can be found in "/usr/share/common-licenses/GPL-3".

107
README.md
View File

@ -2,110 +2,53 @@
Collection of Ansible playbooks used in the Verdnatura server farm.
## Setup Ansible
## Install Ansible
### Debian
Install Ansible package.
Instal Ansible on Debian.
```
apt install ansible
```
### Python
Create a Python virtual environment.
```
python3 -m venv venv
source venv/bin/activate
pip install --upgrade pip ansible==10.1.0 ansible-builder==3.1.0
```
Before running any Python dependent command, activate the virtual environment.
```
source venv/bin/activate
```
Once you are done, deactivate the virtual environment.
```
deactivate
```
### All platforms
Install dependencies.
```
pip install -r requirements.txt
ansible-galaxy collection install -r collections/requirements.yml
```
Create Python virtual environment.
```
python3 -m venv .pyenv
source .pyenv/bin/activate
pip install -r requirements.txt
```
## Run playbook
It is advisable to use a different repository to store inventories.
Before merging changes into protected branches, playbooks should be tested
locally to ensure they work properly.
Run playbook on inventory host.
Launch playbook on the fly on a host not declared in the inventory.
```
ansible-playbook -i inventories/local -l <host> [-t tag1,tag2...] playbooks/ping.yml
```
Run playbook on the fly on a host not declared in the inventory.
```
ansible-playbook -i <ip_or_hostname>, playbooks/ping.yml
ansible-playbook -i <ip_or_hostname>, [-t tag1,tag2] playbooks/test.yml
```
*Note the comma at the end of the hostname or IP.*
List available tags for playbook.
## Manage vault
To manage Ansible vault place the password into *.vaultpass* file.
View or edit the vault file.
```
ansible-playbook playbooks/<playbook_name>.yml --list-tags
ansible-vault {view,edit} --vault-pass-file .vaultpass vault.yml
```
## Playbook testing
Before merging changes into protected branches, playbooks should be tested
locally to ensure they work properly. Take a look to *.gitignore* to known
the *inventories* file patterns that are excluded from remote.
* https://docs.ansible.com/ansible/latest/inventory_guide/intro_inventory.html#organizing-host-and-group-variables
## Manage secrets
Secrets can be managed by using Ansible vault or an external keystore, Passbolt
is used in this case. It is recommended to use an external keystore to avoid
publicly exposing the secrets, even if they are encrypted.
When running playbooks that use any of the keystores mentioned above, the
*run-playbook.sh* script can be used, it is an ovelay over the original
*ansible-playbook* command which injects the necessary parameters.
### Passbolt
Add the necessary environment variables to the *.passbolt.yml* file, the
template file *.passbolt.tpl.yml* is included as a reference:
* https://galaxy.ansible.com/ui/repo/published/anatomicjc/passbolt/docs/
### Ansible vault
To manage Ansible vault place the encryption password into *.vault-pass* file.
Manage the vault.
```
ansible-vault {view,edit,create} --vault-pass-file .vault-pass .vault.yml
```
> The files used for the vault must only be used locally and
> under **no** circumstances can they be uploaded to the repository.
## Build execution environment for AWX
Create an image with *ansible-builder* and upload it to registry.
```
ansible-builder build --tag awx-ee:vn1
```
When running playbooks that use the vault the *vault-playbook.sh* script can
be used, it is ovelay over the original *ansible-playbook* command.
## Common playbooks
* **debug.yml**: Debugging tasks: facts, vars, ping...
* **facts.yml**: Collect and display facts from a host
* **ping.yml**: Check that a host is alive and reachable
* **awx.yml**: Create and configure AWX user
* **debian.yml**: Setup base Debian server
@ -114,7 +57,5 @@ ansible-builder build --tag awx-ee:vn1
* https://docs.ansible.com/ansible/latest/reference_appendices/config.html
* https://docs.ansible.com/ansible/latest/collections/ansible/builtin/gather_facts_module.html
* https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_vars_facts.html
* https://ansible.readthedocs.io/projects/builder/en/latest/
* https://www.ansible.com/blog/introduction-to-ansible-builder/
* https://github.com/ansible/awx-ee/
* https://www.passbolt.com/blog/managing-secrets-in-ansible-using-passbolt
* https://galaxy.ansible.com/ui/repo/published/anatomicjc/passbolt/

View File

@ -2,10 +2,9 @@
remote_user = root
host_key_checking = False
roles_path = ./roles
inventory = ./inventories/local
inventory = ./inventories/servers
gathering = smart
interpreter_python = auto_silent
deprecation_warnings = False
[privilege_escalation]
become = True

View File

@ -1,19 +1,16 @@
collections:
- name: ansible.utils
version: '>=4.1.0'
- name: community.general
version: '>=9.0.0'
type: galaxy
- name: ansible.posix
version: '>=1.5.4'
type: galaxy
- name: ansible.utils
version: '>=4.1.0'
type: galaxy
- name: ansible.windows
version: '>=2.3.0'
type: galaxy
- name: anatomicjc.passbolt
version: '>=0.0.14'
type: galaxy
- name: community.crypto
version: '>=2.14.0'
type: galaxy
- name: community.general
version: '>=9.5.0'
type: galaxy

View File

@ -1,96 +0,0 @@
ARG EE_BASE_IMAGE="quay.io/centos/centos:stream9"
ARG PYCMD="/usr/bin/python3.12"
ARG PYPKG="python3.12"
ARG PKGMGR_PRESERVE_CACHE=""
ARG ANSIBLE_GALAXY_CLI_COLLECTION_OPTS=""
ARG ANSIBLE_GALAXY_CLI_ROLE_OPTS=""
ARG ANSIBLE_INSTALL_REFS="ansible-core>=2.17.0 ansible-runner==2.4.0"
ARG PKGMGR="/usr/bin/dnf"
# Base build stage
FROM $EE_BASE_IMAGE as base
USER root
ENV PIP_BREAK_SYSTEM_PACKAGES=1
ARG EE_BASE_IMAGE
ARG PYCMD
ARG PYPKG
ARG PKGMGR_PRESERVE_CACHE
ARG ANSIBLE_GALAXY_CLI_COLLECTION_OPTS
ARG ANSIBLE_GALAXY_CLI_ROLE_OPTS
ARG ANSIBLE_INSTALL_REFS
ARG PKGMGR
COPY _build/scripts/ /output/scripts/
COPY _build/scripts/entrypoint /opt/builder/bin/entrypoint
RUN $PKGMGR install $PYPKG -y ; if [ -z $PKGMGR_PRESERVE_CACHE ]; then $PKGMGR clean all; fi
RUN /output/scripts/pip_install $PYCMD
RUN $PYCMD -m pip install --no-cache-dir $ANSIBLE_INSTALL_REFS
# Galaxy build stage
FROM base as galaxy
ARG EE_BASE_IMAGE
ARG PYCMD
ARG PYPKG
ARG PKGMGR_PRESERVE_CACHE
ARG ANSIBLE_GALAXY_CLI_COLLECTION_OPTS
ARG ANSIBLE_GALAXY_CLI_ROLE_OPTS
ARG ANSIBLE_INSTALL_REFS
ARG PKGMGR
RUN /output/scripts/check_galaxy
COPY _build /build
WORKDIR /build
RUN mkdir -p /usr/share/ansible
RUN ansible-galaxy role install $ANSIBLE_GALAXY_CLI_ROLE_OPTS -r requirements.yml --roles-path "/usr/share/ansible/roles"
RUN ANSIBLE_GALAXY_DISABLE_GPG_VERIFY=1 ansible-galaxy collection install $ANSIBLE_GALAXY_CLI_COLLECTION_OPTS -r requirements.yml --collections-path "/usr/share/ansible/collections"
# Builder build stage
FROM base as builder
ENV PIP_BREAK_SYSTEM_PACKAGES=1
WORKDIR /build
ARG EE_BASE_IMAGE
ARG PYCMD
ARG PYPKG
ARG PKGMGR_PRESERVE_CACHE
ARG ANSIBLE_GALAXY_CLI_COLLECTION_OPTS
ARG ANSIBLE_GALAXY_CLI_ROLE_OPTS
ARG ANSIBLE_INSTALL_REFS
ARG PKGMGR
RUN $PYCMD -m pip install --no-cache-dir bindep pyyaml packaging
COPY --from=galaxy /usr/share/ansible /usr/share/ansible
COPY _build/requirements.txt requirements.txt
COPY _build/bindep.txt bindep.txt
RUN $PYCMD /output/scripts/introspect.py introspect --user-pip=requirements.txt --user-bindep=bindep.txt --write-bindep=/tmp/src/bindep.txt --write-pip=/tmp/src/requirements.txt
RUN /output/scripts/assemble
# Final build stage
FROM base as final
ENV PIP_BREAK_SYSTEM_PACKAGES=1
ARG EE_BASE_IMAGE
ARG PYCMD
ARG PYPKG
ARG PKGMGR_PRESERVE_CACHE
ARG ANSIBLE_GALAXY_CLI_COLLECTION_OPTS
ARG ANSIBLE_GALAXY_CLI_ROLE_OPTS
ARG ANSIBLE_INSTALL_REFS
ARG PKGMGR
RUN /output/scripts/check_ansible $PYCMD
COPY --from=galaxy /usr/share/ansible /usr/share/ansible
COPY --from=builder /output/ /output/
RUN /output/scripts/install-from-bindep && rm -rf /output/wheels
RUN chmod ug+rw /etc/passwd
RUN mkdir -p /runner && chgrp 0 /runner && chmod -R ug+rwx /runner
WORKDIR /runner
RUN $PYCMD -m pip install --no-cache-dir 'dumb-init==1.2.5'
RUN rm -rf /output
LABEL ansible-execution-environment=true
USER 1000
ENTRYPOINT ["/opt/builder/bin/entrypoint", "dumb-init"]
CMD ["bash"]

View File

@ -1,18 +0,0 @@
git-core [platform:rpm]
python3.11-devel [platform:rpm compile]
libcurl-devel [platform:rpm compile]
krb5-devel [platform:rpm compile]
krb5-workstation [platform:rpm]
subversion [platform:rpm]
subversion [platform:dpkg]
git-lfs [platform:rpm]
sshpass [platform:rpm]
rsync [platform:rpm]
epel-release [platform:rpm]
unzip [platform:rpm]
podman-remote [platform:rpm]
cmake [platform:rpm compile]
gcc [platform:rpm compile]
gcc-c++ [platform:rpm compile]
make [platform:rpm compile]
openssl-devel [platform:rpm compile]

View File

@ -1,3 +0,0 @@
py-passbolt==0.0.18
cryptography==3.3.2
passlib==1.7.4

View File

@ -1,16 +0,0 @@
collections:
- name: ansible.utils
version: '>=4.1.0'
type: galaxy
- name: ansible.windows
version: '>=2.3.0'
type: galaxy
- name: anatomicjc.passbolt
version: '>=0.0.14'
type: galaxy
- name: community.crypto
version: '>=2.14.0'
type: galaxy
- name: community.general
version: '>=9.5.0'
type: galaxy

View File

@ -1,169 +0,0 @@
#!/bin/bash
# Copyright (c) 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Make a list of bindep dependencies and a collection of built binary
# wheels for the repo in question as well as its python dependencies.
# Install javascript tools as well to support python that needs javascript
# at build time.
set -ex
RELEASE=$(source /etc/os-release; echo $ID)
# NOTE(pabelanger): Allow users to force either microdnf or dnf as a package
# manager.
PKGMGR="${PKGMGR:-}"
PKGMGR_OPTS="${PKGMGR_OPTS:-}"
PKGMGR_PRESERVE_CACHE="${PKGMGR_PRESERVE_CACHE:-}"
PYCMD="${PYCMD:=/usr/bin/python3}"
PIPCMD="${PIPCMD:=$PYCMD -m pip}"
if [ -z $PKGMGR ]; then
# Expect dnf to be installed, however if we find microdnf default to it.
PKGMGR=/usr/bin/dnf
if [ -f "/usr/bin/microdnf" ]; then
PKGMGR=/usr/bin/microdnf
fi
fi
if [ "$PKGMGR" = "/usr/bin/microdnf" ]
then
if [ -z "${PKGMGR_OPTS}" ]; then
# NOTE(pabelanger): skip install docs and weak dependencies to
# make smaller images. Sadly, setting these in dnf.conf don't
# appear to work.
PKGMGR_OPTS="--nodocs --setopt install_weak_deps=0"
fi
fi
# NOTE(pabelanger): Ensure all the directory we use exists regardless
# of the user first creating them or not.
mkdir -p /output/bindep
mkdir -p /output/wheels
mkdir -p /tmp/src
cd /tmp/src
function install_bindep {
# Protect from the bindep builder image use of the assemble script
# to produce a wheel. Note we append because we want all
# sibling packages in here too
if [ -f bindep.txt ] ; then
bindep -l newline | sort >> /output/bindep/run.txt || true
if [ "$RELEASE" == "centos" ] ; then
bindep -l newline -b epel | sort >> /output/bindep/stage.txt || true
grep -Fxvf /output/bindep/run.txt /output/bindep/stage.txt >> /output/bindep/epel.txt || true
rm -rf /output/bindep/stage.txt
fi
compile_packages=$(bindep -b compile || true)
if [ ! -z "$compile_packages" ] ; then
$PKGMGR install -y $PKGMGR_OPTS ${compile_packages}
fi
fi
}
function install_wheels {
# NOTE(pabelanger): If there are build requirements to install, do so.
# However do not cache them as we do not want them in the final image.
if [ -f /tmp/src/build-requirements.txt ] && [ ! -f /tmp/src/.build-requirements.txt ] ; then
$PIPCMD install $CONSTRAINTS $PIP_OPTS --no-cache -r /tmp/src/build-requirements.txt
touch /tmp/src/.build-requirements.txt
fi
# Build a wheel so that we have an install target.
# pip install . in the container context with the mounted
# source dir gets ... exciting, if setup.py exists.
# We run sdist first to trigger code generation steps such
# as are found in zuul, since the sequencing otherwise
# happens in a way that makes wheel content copying unhappy.
# pip wheel isn't used here because it puts all of the output
# in the output dir and not the wheel cache, so it's not
# possible to tell what is the wheel for the project and
# what is the wheel cache.
if [ -f setup.py ] ; then
$PYCMD setup.py sdist bdist_wheel -d /output/wheels
fi
# Install everything so that the wheel cache is populated with
# transitive depends. If a requirements.txt file exists, install
# it directly so that people can use git url syntax to do things
# like pick up patched but unreleased versions of dependencies.
# Only do this for the main package (i.e. only write requirements
# once).
if [ -f /tmp/src/requirements.txt ] && [ ! -f /output/requirements.txt ] ; then
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels -r /tmp/src/requirements.txt
cp /tmp/src/requirements.txt /output/requirements.txt
fi
# If we didn't build wheels, we can skip trying to install it.
if [ $(ls -1 /output/wheels/*whl 2>/dev/null | wc -l) -gt 0 ]; then
$PIPCMD uninstall -y /output/wheels/*.whl
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels /output/wheels/*whl
fi
}
PACKAGES=$*
PIP_OPTS="${PIP_OPTS-}"
# bindep the main package
install_bindep
# go through ZUUL_SIBLINGS, if any, and build those wheels too
for sibling in ${ZUUL_SIBLINGS:-}; do
pushd .zuul-siblings/${sibling}
install_bindep
popd
done
# Use a clean virtualenv for install steps to prevent things from the
# current environment making us not build a wheel.
# NOTE(pabelanger): We allow users to install distro python packages of
# libraries. This is important for projects that eventually want to produce
# an RPM or offline install.
$PYCMD -m venv /tmp/venv --system-site-packages --without-pip
source /tmp/venv/bin/activate
# If there is an upper-constraints.txt file in the source tree,
# use it in the pip commands.
if [ -f /tmp/src/upper-constraints.txt ] ; then
cp /tmp/src/upper-constraints.txt /output/upper-constraints.txt
CONSTRAINTS="-c /tmp/src/upper-constraints.txt"
fi
# If we got a list of packages, install them, otherwise install the
# main package.
if [[ $PACKAGES ]] ; then
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels $PACKAGES
for package in $PACKAGES ; do
echo "$package" >> /output/packages.txt
done
else
install_wheels
fi
# go through ZUUL_SIBLINGS, if any, and build those wheels too
for sibling in ${ZUUL_SIBLINGS:-}; do
pushd .zuul-siblings/${sibling}
install_wheels
popd
done
if [ -z $PKGMGR_PRESERVE_CACHE ]; then
$PKGMGR clean all
rm -rf /var/cache/{dnf,yum}
fi
rm -rf /var/lib/dnf/history.*
rm -rf /var/log/{dnf.*,hawkey.log}
rm -rf /tmp/venv

View File

@ -1,110 +0,0 @@
#!/bin/bash
# Copyright (c) 2023 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#####################################################################
# Script to validate that Ansible and Ansible Runner are installed.
#
# Usage: check_ansible <PYCMD>
#
# Options:
# PYCMD - The path to the python executable to use.
#####################################################################
set -x
PYCMD=$1
if [ -z "$PYCMD" ]
then
echo "Usage: check_ansible <PYCMD>"
exit 1
fi
if [ ! -x "$PYCMD" ]
then
echo "$PYCMD is not an executable"
exit 1
fi
ansible --version
if [ $? -ne 0 ]
then
cat<<EOF
**********************************************************************
ERROR - Missing Ansible installation
An Ansible installation cannot be found in the final builder image.
Ansible must be installed in the final image. If you are using a
recent enough version of the execution environment file, you may
use the 'dependencies.ansible_core' configuration option to install
Ansible for you, or use 'additional_build_steps' to manually do
this yourself. Alternatively, use a base image with Ansible already
installed.
**********************************************************************
EOF
exit 1
fi
ansible-runner --version
if [ $? -ne 0 ]
then
cat<<EOF
**********************************************************************
ERROR - Missing Ansible Runner installation
An Ansible Runner installation cannot be found in the final builder
image.
Ansible Runner must be installed in the final image. If you are
using a recent enough version of the execution environment file, you
may use the 'dependencies.ansible_runner' configuration option to
install Ansible Runner for you, or use 'additional_build_steps' to
manually do this yourself. Alternatively, use a base image with
Ansible Runner already installed.
**********************************************************************
EOF
exit 1
fi
$PYCMD -c 'import ansible ; import ansible_runner'
if [ $? -ne 0 ]
then
cat<<EOF
**********************************************************************
ERROR - Missing Ansible or Ansible Runner for selected Python
An Ansible and/or Ansible Runner installation cannot be found in
the final builder image using the following Python interpreter:
$PYCMD
Ansible and Ansible Runner must be installed in the final image and
available to the selected Python interpreter. If you are using a
recent enough version of the execution environment file, you may use
the 'dependencies.ansible_core' configuration option to install
Ansible and the 'dependencies.ansible_runner' configuration option
to install Ansible Runner. You can also use 'additional_build_steps'
to manually do this yourself. Alternatively, use a base image with
Ansible and Ansible Runner already installed.
**********************************************************************
EOF
exit 1
fi
exit 0

View File

@ -1,46 +0,0 @@
#!/bin/bash
# Copyright (c) 2023 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#####################################################################
# Script to validate that Ansible Galaxy is installed on the system.
#####################################################################
set -x
ansible-galaxy --version
if [ $? -ne 0 ]
then
cat<<EOF
**********************************************************************
ERROR - Missing Ansible installation
The 'ansible-galaxy' command is not found in the base image. This
image is used to create the intermediary image that performs the
Galaxy collection and role installation process.
Ansible must be installed in the base image. If you are using a
recent enough version of the execution environment file, you may
use the 'dependencies.ansible_core' configuration option to install
Ansible for you, or use 'additional_build_steps' to manually do
this yourself. Alternatively, use a base image with Ansible already
installed.
**********************************************************************
EOF
exit 1
fi
exit 0

View File

@ -1,152 +0,0 @@
#!/usr/bin/env bash
# Copyright: (c) 2023, Ansible Project
# Apache License, Version 2.0 (see LICENSE.md or https://www.apache.org/licenses/LICENSE-2.0)
# This entrypoint script papers over a number of problems that manifest under different container runtimes when
# using ephemeral UIDs, then chain-execs to the requested init system and/or command. It is an implementation
# detail for the convenience of Ansible execution environments built by ansible-builder.
#
# If we're running as a legit user that has an entry in /etc/passwd and a valid and writeable homedir, we're all good.
#
# If the current uid is not in /etc/passwd, we'll attempt to add it, but /etc/passwd is often not writable by GID 0.
# `ansible-builder` defaults to making /etc/passwd writable by GID0 by default for maximum compatibility, but this is
# not guaranteed. Some runtimes/wrappers (eg podman, cri-o) already create an /etc/passwd entry on the fly as-needed,
# but they may set the homedir to something inaccessible (eg, `/`, WORKDIR).
#
# There are numerous cases where a missing or incorrect homedir in /etc/passwd are fatal. It breaks
# `async` in ansible-core, things like `echo ~someuid`, and numerous other software packages that assume a valid POSIX
# user configuration.
#
# If the homedir listed in /etc/passwd is not writeable by the current user (supposed to be primary GID0), we'll try
# to make it writeable (except `/`), or select another writeable home directory from `$HOME`, `/runner`, or `/tmp` and
# update $HOME (and /etc/passwd if possible) accordingly for the current process chain.
#
# This script is generally silent by default, but some likely-fatal cases will issue a brief warning to stderr. The
# envvars described below can be set before container init to cause faster failures and/or get tracing output.
# options:
# EP_BASH_DEBUG=1 (enable set -x)
# EP_DEBUG_TRACE=1 (enable debug trace to stderr)
# EP_ON_ERROR=ignore/warn/fail (default ignore)
set -eu
if (( "${EP_BASH_DEBUG:=0}" == 1 )); then
set -x
fi
: "${EP_DEBUG_TRACE:=0}"
: "${EP_ON_ERROR:=warn}"
: "${HOME:=}"
CUR_UID=$(id -u)
CUR_USERNAME=$(id -u -n 2> /dev/null || true) # whoami-free way to get current username, falls back to current uid
DEFAULT_HOME="/runner"
DEFAULT_SHELL="/bin/bash"
if (( "$EP_DEBUG_TRACE" == 1 )); then
function log_debug() { echo "EP_DEBUG: $1" 1>&2; }
else
function log_debug() { :; }
fi
log_debug "entrypoint.sh started"
case "$EP_ON_ERROR" in
"fail")
function maybe_fail() { echo "EP_FAIL: $1" 1>&2; exit 1; }
;;
"warn")
function maybe_fail() { echo "EP_WARN: $1" 1>&2; }
;;
*)
function maybe_fail() { log_debug "EP_FAIL (ignored): $1"; }
;;
esac
function is_dir_writable() {
[ -d "$1" ] && [ -w "$1" ] && [ -x "$1" ]
}
function ensure_current_uid_in_passwd() {
log_debug "is current uid ${CUR_UID} in /etc/passwd?"
if ! getent passwd "${CUR_USERNAME}" &> /dev/null ; then
if [ -w "/etc/passwd" ]; then
log_debug "appending missing uid ${CUR_UID} into /etc/passwd"
# use the default homedir; we may have to rewrite it to another value later if it's inaccessible
echo "${CUR_UID}:x:${CUR_UID}:0:container user ${CUR_UID}:${DEFAULT_HOME}:${DEFAULT_SHELL}" >> /etc/passwd
else
maybe_fail "uid ${CUR_UID} is missing from /etc/passwd, which is not writable; this error is likely fatal"
fi
else
log_debug "current uid is already in /etc/passwd"
fi
}
function ensure_writeable_homedir() {
if (is_dir_writable "${CANDIDATE_HOME}") ; then
log_debug "candidate homedir ${CANDIDATE_HOME} is valid and writeable"
else
if [ "${CANDIDATE_HOME}" == "/" ]; then
log_debug "skipping attempt to fix permissions on / as homedir"
return 1
fi
log_debug "candidate homedir ${CANDIDATE_HOME} is missing or not writeable; attempt to fix"
if ! (mkdir -p "${CANDIDATE_HOME}" >& /dev/null && chmod -R ug+rwx "${CANDIDATE_HOME}" >& /dev/null) ; then
log_debug "candidate homedir ${CANDIDATE_HOME} cannot be made writeable"
return 1
else
log_debug "candidate homedir ${CANDIDATE_HOME} was successfully made writeable"
fi
fi
# this might work; export it even if we end up not being able to update /etc/passwd
# this ensures the envvar matches current reality for this session; future sessions should set automatically if /etc/passwd is accurate
export HOME=${CANDIDATE_HOME}
if [ "${CANDIDATE_HOME}" == "${PASSWD_HOME}" ] ; then
log_debug "candidate homedir ${CANDIDATE_HOME} matches /etc/passwd"
return 0
fi
if ! [ -w /etc/passwd ]; then
log_debug "candidate homedir ${CANDIDATE_HOME} is valid for ${CUR_USERNAME}, but /etc/passwd is not writable to update it"
return 1
fi
log_debug "resetting homedir for user ${CUR_USERNAME} to ${CANDIDATE_HOME} in /etc/passwd"
# sed -i wants to create a tempfile next to the original, which won't work with /etc permissions in many cases,
# so just do it in memory and overwrite the existing file if we succeeded
NEWPW=$(sed -r "s;(^${CUR_USERNAME}:(.*:){4})(.*:);\1${CANDIDATE_HOME}:;g" /etc/passwd)
echo "${NEWPW}" > /etc/passwd
}
ensure_current_uid_in_passwd
log_debug "current value of HOME is ${HOME}"
PASSWD_HOME=$(getent passwd "${CUR_USERNAME}" | cut -d: -f6)
log_debug "user ${CUR_USERNAME} homedir from /etc/passwd is ${PASSWD_HOME}"
CANDIDATE_HOMES=("${PASSWD_HOME}" "${HOME}" "${DEFAULT_HOME}" "/tmp")
# we'll set this in the loop as soon as we find a writeable dir
unset HOME
for CANDIDATE_HOME in "${CANDIDATE_HOMES[@]}"; do
if ensure_writeable_homedir ; then
break
fi
done
if ! [ -v HOME ] ; then
maybe_fail "a valid homedir could not be set for ${CUR_USERNAME}; this is likely fatal"
fi
# chain exec whatever we were asked to run (ideally an init system) to keep any envvar state we've set
log_debug "chain exec-ing requested command $*"
exec "${@}"

View File

@ -1,105 +0,0 @@
#!/bin/bash
# Copyright (c) 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
# NOTE(pabelanger): Allow users to force either microdnf or dnf as a package
# manager.
PKGMGR="${PKGMGR:-}"
PKGMGR_OPTS="${PKGMGR_OPTS:-}"
PKGMGR_PRESERVE_CACHE="${PKGMGR_PRESERVE_CACHE:-}"
PYCMD="${PYCMD:=/usr/bin/python3}"
PIPCMD="${PIPCMD:=$PYCMD -m pip}"
PIP_OPTS="${PIP_OPTS-}"
if [ -z $PKGMGR ]; then
# Expect dnf to be installed, however if we find microdnf default to it.
PKGMGR=/usr/bin/dnf
if [ -f "/usr/bin/microdnf" ]; then
PKGMGR=/usr/bin/microdnf
fi
fi
if [ "$PKGMGR" = "/usr/bin/microdnf" ]
then
if [ -z "${PKGMGR_OPTS}" ]; then
# NOTE(pabelanger): skip install docs and weak dependencies to
# make smaller images. Sadly, setting these in dnf.conf don't
# appear to work.
PKGMGR_OPTS="--nodocs --setopt install_weak_deps=0"
fi
fi
if [ -f /output/bindep/run.txt ] ; then
PACKAGES=$(cat /output/bindep/run.txt)
if [ ! -z "$PACKAGES" ]; then
$PKGMGR install -y $PKGMGR_OPTS $PACKAGES
fi
fi
if [ -f /output/bindep/epel.txt ] ; then
EPEL_PACKAGES=$(cat /output/bindep/epel.txt)
if [ ! -z "$EPEL_PACKAGES" ]; then
$PKGMGR install -y $PKGMGR_OPTS --enablerepo epel $EPEL_PACKAGES
fi
fi
# If there's a constraints file, use it.
if [ -f /output/upper-constraints.txt ] ; then
CONSTRAINTS="-c /output/upper-constraints.txt"
fi
# If a requirements.txt file exists,
# install it directly so that people can use git url syntax
# to do things like pick up patched but unreleased versions
# of dependencies.
if [ -f /output/requirements.txt ] ; then
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels -r /output/requirements.txt
fi
# Add any requested extras to the list of things to install
EXTRAS=""
for extra in $* ; do
EXTRAS="${EXTRAS} -r /output/$extra/requirements.txt"
done
if [ -f /output/packages.txt ] ; then
# If a package list was passed to assemble, install that in the final
# image.
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels -r /output/packages.txt $EXTRAS
else
# Install the wheels. Uninstall any existing version as siblings maybe
# be built with the same version number as the latest release, but we
# really want the speculatively built wheels installed over any
# automatic dependencies.
# NOTE(pabelanger): It is possible a project may not have a wheel, but does have requirements.txt
if [ $(ls -1 /output/wheels/*whl 2>/dev/null | wc -l) -gt 0 ]; then
$PIPCMD uninstall -y /output/wheels/*.whl
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels /output/wheels/*.whl $EXTRAS
elif [ ! -z "$EXTRAS" ] ; then
$PIPCMD uninstall -y $EXTRAS
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels $EXTRAS
fi
fi
# clean up after ourselves, unless requested to keep the cache
if [[ "$PKGMGR_PRESERVE_CACHE" != always ]]; then
$PKGMGR clean all
rm -rf /var/cache/{dnf,yum}
fi
rm -rf /var/lib/dnf/history.*
rm -rf /var/log/{dnf.*,hawkey.log}

View File

@ -1,507 +0,0 @@
from __future__ import annotations
import argparse
import logging
import os
import re
import sys
import yaml
from packaging.requirements import InvalidRequirement, Requirement
BASE_COLLECTIONS_PATH = '/usr/share/ansible/collections'
# regex for a comment at the start of a line, or embedded with leading space(s)
COMMENT_RE = re.compile(r'(?:^|\s+)#.*$')
EXCLUDE_REQUIREMENTS = frozenset((
# obviously already satisfied or unwanted
'ansible', 'ansible-base', 'python', 'ansible-core',
# general python test requirements
'tox', 'pycodestyle', 'yamllint', 'pylint',
'flake8', 'pytest', 'pytest-xdist', 'coverage', 'mock', 'testinfra',
# test requirements highly specific to Ansible testing
'ansible-lint', 'molecule', 'galaxy-importer', 'voluptuous',
# already present in image for py3 environments
'yaml', 'pyyaml', 'json',
))
logger = logging.getLogger(__name__)
class CollectionDefinition:
"""
This class represents the dependency metadata for a collection
should be replaced by logic to hit the Galaxy API if made available
"""
def __init__(self, collection_path):
self.reference_path = collection_path
# NOTE: Filenames should match constants.DEAFULT_EE_BASENAME and constants.YAML_FILENAME_EXTENSIONS.
meta_file_base = os.path.join(collection_path, 'meta', 'execution-environment')
ee_exists = False
for ext in ('yml', 'yaml'):
meta_file = f"{meta_file_base}.{ext}"
if os.path.exists(meta_file):
with open(meta_file, 'r') as f:
self.raw = yaml.safe_load(f)
ee_exists = True
break
if not ee_exists:
self.raw = {'version': 1, 'dependencies': {}}
# Automatically infer requirements for collection
for entry, filename in [('python', 'requirements.txt'), ('system', 'bindep.txt')]:
candidate_file = os.path.join(collection_path, filename)
if has_content(candidate_file):
self.raw['dependencies'][entry] = filename
def target_dir(self):
namespace, name = self.namespace_name()
return os.path.join(
BASE_COLLECTIONS_PATH, 'ansible_collections',
namespace, name
)
def namespace_name(self):
"Returns 2-tuple of namespace and name"
path_parts = [p for p in self.reference_path.split(os.path.sep) if p]
return tuple(path_parts[-2:])
def get_dependency(self, entry):
"""A collection is only allowed to reference a file by a relative path
which is relative to the collection root
"""
req_file = self.raw.get('dependencies', {}).get(entry)
if req_file is None:
return None
if os.path.isabs(req_file):
raise RuntimeError(
'Collections must specify relative paths for requirements files. '
f'The file {req_file} specified by {self.reference_path} violates this.'
)
return req_file
def line_is_empty(line):
return bool((not line.strip()) or line.startswith('#'))
def read_req_file(path):
"""Provide some minimal error and display handling for file reading"""
if not os.path.exists(path):
print(f'Expected requirements file not present at: {os.path.abspath(path)}')
with open(path, 'r') as f:
return f.read()
def pip_file_data(path):
pip_content = read_req_file(path)
pip_lines = []
for line in pip_content.split('\n'):
if line_is_empty(line):
continue
if line.startswith('-r') or line.startswith('--requirement'):
_, new_filename = line.split(None, 1)
new_path = os.path.join(os.path.dirname(path or '.'), new_filename)
pip_lines.extend(pip_file_data(new_path))
else:
pip_lines.append(line)
return pip_lines
def bindep_file_data(path):
sys_content = read_req_file(path)
sys_lines = []
for line in sys_content.split('\n'):
if line_is_empty(line):
continue
sys_lines.append(line)
return sys_lines
def process_collection(path):
"""Return a tuple of (python_dependencies, system_dependencies) for the
collection install path given.
Both items returned are a list of dependencies.
:param str path: root directory of collection (this would contain galaxy.yml file)
"""
col_def = CollectionDefinition(path)
py_file = col_def.get_dependency('python')
pip_lines = []
if py_file:
pip_lines = pip_file_data(os.path.join(path, py_file))
sys_file = col_def.get_dependency('system')
bindep_lines = []
if sys_file:
bindep_lines = bindep_file_data(os.path.join(path, sys_file))
return (pip_lines, bindep_lines)
def process(data_dir=BASE_COLLECTIONS_PATH,
user_pip=None,
user_bindep=None,
exclude_pip=None,
exclude_bindep=None,
exclude_collections=None):
"""
Build a dictionary of Python and system requirements from any collections
installed in data_dir, and any user specified requirements.
Excluded requirements, if any, will be inserted into the return dict.
Example return dict:
{
'python': {
'collection.a': ['abc', 'def'],
'collection.b': ['ghi'],
'user': ['jkl'],
'exclude: ['abc'],
},
'system': {
'collection.a': ['ZYX'],
'user': ['WVU'],
'exclude': ['ZYX'],
},
'excluded_collections': [
'a.b',
]
}
"""
paths = []
path_root = os.path.join(data_dir, 'ansible_collections')
# build a list of all the valid collection paths
if os.path.exists(path_root):
for namespace in sorted(os.listdir(path_root)):
if not os.path.isdir(os.path.join(path_root, namespace)):
continue
for name in sorted(os.listdir(os.path.join(path_root, namespace))):
collection_dir = os.path.join(path_root, namespace, name)
if not os.path.isdir(collection_dir):
continue
files_list = os.listdir(collection_dir)
if 'galaxy.yml' in files_list or 'MANIFEST.json' in files_list:
paths.append(collection_dir)
# populate the requirements content
py_req = {}
sys_req = {}
for path in paths:
col_pip_lines, col_sys_lines = process_collection(path)
col_def = CollectionDefinition(path)
namespace, name = col_def.namespace_name()
key = f'{namespace}.{name}'
if col_pip_lines:
py_req[key] = col_pip_lines
if col_sys_lines:
sys_req[key] = col_sys_lines
# add on entries from user files, if they are given
if user_pip:
col_pip_lines = pip_file_data(user_pip)
if col_pip_lines:
py_req['user'] = col_pip_lines
if exclude_pip:
col_pip_exclude_lines = pip_file_data(exclude_pip)
if col_pip_exclude_lines:
py_req['exclude'] = col_pip_exclude_lines
if user_bindep:
col_sys_lines = bindep_file_data(user_bindep)
if col_sys_lines:
sys_req['user'] = col_sys_lines
if exclude_bindep:
col_sys_exclude_lines = bindep_file_data(exclude_bindep)
if col_sys_exclude_lines:
sys_req['exclude'] = col_sys_exclude_lines
retval = {
'python': py_req,
'system': sys_req,
}
if exclude_collections:
# This file should just be a newline separated list of collection names,
# so reusing bindep_file_data() to read it should work fine.
excluded_collection_list = bindep_file_data(exclude_collections)
if excluded_collection_list:
retval['excluded_collections'] = excluded_collection_list
return retval
def has_content(candidate_file):
"""Beyond checking that the candidate exists, this also assures
that the file has something other than whitespace,
which can cause errors when given to pip.
"""
if not os.path.exists(candidate_file):
return False
with open(candidate_file, 'r') as f:
content = f.read()
return bool(content.strip().strip('\n'))
def strip_comments(reqs: dict[str, list]) -> dict[str, list]:
"""
Filter any comments out of the Python collection requirements input.
:param dict reqs: A dict of Python requirements, keyed by collection name.
:return: Same as the input parameter, except with no comment lines.
"""
result: dict[str, list] = {}
for collection, lines in reqs.items():
for line in lines:
# strip comments
if (base_line := COMMENT_RE.sub('', line.strip())):
result.setdefault(collection, []).append(base_line)
return result
def should_be_excluded(value: str, exclusion_list: list[str]) -> bool:
"""
Test if `value` matches against any value in `exclusion_list`.
The exclusion_list values are either strings to be compared in a case-insensitive
manner against value, OR, they are regular expressions to be tested against the
value. A regular expression will contain '~' as the first character.
:return: True if the value should be excluded, False otherwise.
"""
for exclude_value in exclusion_list:
if exclude_value[0] == "~":
pattern = exclude_value[1:]
if re.fullmatch(pattern.lower(), value.lower()):
return True
elif exclude_value.lower() == value.lower():
return True
return False
def filter_requirements(reqs: dict[str, list],
exclude: list[str] | None = None,
exclude_collections: list[str] | None = None,
is_python: bool = True) -> list[str]:
"""
Given a dictionary of Python requirement lines keyed off collections,
return a list of cleaned up (no source comments) requirements
annotated with comments indicating the sources based off the collection keys.
Currently, non-pep508 compliant Python entries are passed through. We also no
longer attempt to normalize names (replace '_' with '-', etc), other than
lowercasing it for exclusion matching, since we no longer are attempting
to combine similar entries.
:param dict reqs: A dict of either Python or system requirements, keyed by collection name.
:param list exclude: A list of requirements to be excluded from the output.
:param list exclude_collections: A list of collection names from which to exclude all requirements.
:param bool is_python: This should be set to True for Python requirements, as each
will be tested for PEP508 compliance. This should be set to False for system requirements.
:return: A list of filtered and annotated requirements.
"""
exclusions: list[str] = []
collection_ignore_list: list[str] = []
if exclude:
exclusions = exclude.copy()
if exclude_collections:
collection_ignore_list = exclude_collections.copy()
annotated_lines: list[str] = []
uncommented_reqs = strip_comments(reqs)
for collection, lines in uncommented_reqs.items():
# Bypass this collection if we've been told to ignore all requirements from it.
if should_be_excluded(collection, collection_ignore_list):
logger.debug("# Excluding all requirements from collection '%s'", collection)
continue
for line in lines:
# Determine the simple name based on type of requirement
if is_python:
try:
parsed_req = Requirement(line)
name = parsed_req.name
except InvalidRequirement:
logger.warning(
"Passing through non-PEP508 compliant line '%s' from collection '%s'",
line, collection
)
annotated_lines.append(line) # We intentionally won't annotate these lines (multi-line?)
continue
else:
# bindep system requirements have the package name as the first "word" on the line
name = line.split(maxsplit=1)[0]
if collection.lower() not in {'user', 'exclude'}:
lower_name = name.lower()
if lower_name in EXCLUDE_REQUIREMENTS:
logger.debug("# Excluding requirement '%s' from '%s'", name, collection)
continue
if should_be_excluded(lower_name, exclusions):
logger.debug("# Explicitly excluding requirement '%s' from '%s'", name, collection)
continue
annotated_lines.append(f'{line} # from collection {collection}')
return annotated_lines
def parse_args(args=None):
parser = argparse.ArgumentParser(
prog='introspect',
description=(
'ansible-builder introspection; injected and used during execution environment build'
)
)
subparsers = parser.add_subparsers(
help='The command to invoke.',
dest='action',
required=True,
)
create_introspect_parser(subparsers)
return parser.parse_args(args)
def run_introspect(args, log):
data = process(args.folder,
user_pip=args.user_pip,
user_bindep=args.user_bindep,
exclude_pip=args.exclude_pip,
exclude_bindep=args.exclude_bindep,
exclude_collections=args.exclude_collections)
log.info('# Dependency data for %s', args.folder)
excluded_collections = data.pop('excluded_collections', None)
data['python'] = filter_requirements(
data['python'],
exclude=data['python'].pop('exclude', []),
exclude_collections=excluded_collections,
)
data['system'] = filter_requirements(
data['system'],
exclude=data['system'].pop('exclude', []),
exclude_collections=excluded_collections,
is_python=False
)
print('---')
print(yaml.dump(data, default_flow_style=False))
if args.write_pip and data.get('python'):
write_file(args.write_pip, data.get('python') + [''])
if args.write_bindep and data.get('system'):
write_file(args.write_bindep, data.get('system') + [''])
sys.exit(0)
def create_introspect_parser(parser):
introspect_parser = parser.add_parser(
'introspect',
help='Introspects collections in folder.',
description=(
'Loops over collections in folder and returns data about dependencies. '
'This is used internally and exposed here for verification. '
'This is targeted toward collection authors and maintainers.'
)
)
introspect_parser.add_argument('--sanitize', action='store_true',
help=argparse.SUPPRESS)
introspect_parser.add_argument(
'folder', default=BASE_COLLECTIONS_PATH, nargs='?',
help=(
'Ansible collections path(s) to introspect. '
'This should have a folder named ansible_collections inside of it.'
)
)
introspect_parser.add_argument(
'--user-pip', dest='user_pip',
help='An additional file to combine with collection pip requirements.'
)
introspect_parser.add_argument(
'--user-bindep', dest='user_bindep',
help='An additional file to combine with collection bindep requirements.'
)
introspect_parser.add_argument(
'--exclude-bindep-reqs', dest='exclude_bindep',
help='An additional file to exclude specific bindep requirements from collections.'
)
introspect_parser.add_argument(
'--exclude-pip-reqs', dest='exclude_pip',
help='An additional file to exclude specific pip requirements from collections.'
)
introspect_parser.add_argument(
'--exclude-collection-reqs', dest='exclude_collections',
help='An additional file to exclude all requirements from the listed collections.'
)
introspect_parser.add_argument(
'--write-pip', dest='write_pip',
help='Write the combined pip requirements file to this location.'
)
introspect_parser.add_argument(
'--write-bindep', dest='write_bindep',
help='Write the combined bindep requirements file to this location.'
)
return introspect_parser
def write_file(filename: str, lines: list) -> bool:
parent_dir = os.path.dirname(filename)
if parent_dir and not os.path.exists(parent_dir):
logger.warning('Creating parent directory for %s', filename)
os.makedirs(parent_dir)
new_text = '\n'.join(lines)
if os.path.exists(filename):
with open(filename, 'r') as f:
if f.read() == new_text:
logger.debug("File %s is already up-to-date.", filename)
return False
logger.warning('File %s had modifications and will be rewritten', filename)
with open(filename, 'w') as f:
f.write(new_text)
return True
def main():
args = parse_args()
if args.action == 'introspect':
run_introspect(args, logger)
logger.error("An error has occurred.")
sys.exit(1)
if __name__ == '__main__':
main()

View File

@ -1,56 +0,0 @@
#!/bin/bash
# Copyright (c) 2024 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#####################################################################
# Script to encapsulate pip installation.
#
# Usage: pip_install <PYCMD>
#
# Options:
# PYCMD - The path to the python executable to use.
#####################################################################
set -x
PYCMD=$1
if [ -z "$PYCMD" ]
then
echo "Usage: pip_install <PYCMD>"
exit 1
fi
if [ ! -x "$PYCMD" ]
then
echo "$PYCMD is not an executable"
exit 1
fi
# This is going to be our default functionality for now. This will likely
# need to change if we add support for non-RHEL distros.
$PYCMD -m ensurepip --root /
if [ $? -ne 0 ]
then
cat<<EOF
**********************************************************************
ERROR - pip installation failed for Python $PYCMD
**********************************************************************
EOF
exit 1
fi
exit 0

View File

@ -1,33 +0,0 @@
version: 3
images:
base_image:
name: quay.io/centos/centos:stream9
dependencies:
python: requirements.txt
galaxy: collections/requirements.yml
python_interpreter:
package_system: python3.12
python_path: /usr/bin/python3.12
ansible_core:
package_pip: ansible-core>=2.17.0
ansible_runner:
package_pip: ansible-runner==2.4.0
system: |
git-core [platform:rpm]
python3.11-devel [platform:rpm compile]
libcurl-devel [platform:rpm compile]
krb5-devel [platform:rpm compile]
krb5-workstation [platform:rpm]
subversion [platform:rpm]
subversion [platform:dpkg]
git-lfs [platform:rpm]
sshpass [platform:rpm]
rsync [platform:rpm]
epel-release [platform:rpm]
unzip [platform:rpm]
podman-remote [platform:rpm]
cmake [platform:rpm compile]
gcc [platform:rpm compile]
gcc-c++ [platform:rpm compile]
make [platform:rpm compile]
openssl-devel [platform:rpm compile]

31
inventories/core Normal file
View File

@ -0,0 +1,31 @@
[all:vars]
host_domain=core.dc.verdnatura.es
[backup:vars]
host_domain=backup.dc.verdnatura.es
[ceph]
ceph[1:3]
[ceph_gw]
ceph-gw[1:2]
[pve]
pve[01:05]
[infra:children]
ceph
ceph_gw
pve
[core]
core-agent
core-proxy
[backup]
bacula-dir
bacula-db
bacularis
backup-nas
tftp
kube-backup

View File

@ -1,23 +1,22 @@
hostname_fqdn: "{{inventory_hostname_short}}.{{host_domain}}"
ansible_host: "{{hostname_fqdn}}"
passbolt: 'anatomicjc.passbolt.passbolt'
passbolt_inventory: 'anatomicjc.passbolt.passbolt_inventory'
sysadmin_mail: sysadmin@domain.local
ansible_host: "{{inventory_hostname_short}}.{{host_domain}}"
sysadmin_mail: sysadmin@verdnatura.es
sysadmin_group: sysadmin
smtp_server: smtp.domain.local
homes_server: homes.domain.local
nagios_server: nagios.domain.local
time_server: time1.domain.local time2.domain.local
main_dns_server: ns1.domain.local
ldap_uri: ldap://ldap.domain.local
ldap_base: dc=domain,dc=local
smtp_server: smtp.verdnatura.es
homes_server: homes.servers.dc.verdnatura.es
nagios_server: nagios.verdnatura.es
time_server: time1.verdnatura.es time2.verdnatura.es
main_dns_server: ns1.verdnatura.es
ldap_uri: ldap://ldap.verdnatura.es
ldap_base: dc=verdnatura,dc=es
dc_net: "10.0.0.0/16"
resolv:
domain: verdnatura.es
search: verdnatura.es
resolvers:
- '8.8.8.8'
- '8.8.4.4'
awx_email: awx@domain.local
- '10.0.0.4'
- '10.0.0.5'
awx_email: awx@verdnatura.es
awx_pub_key: >
ssh-ed25519
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
awx@domain.local
passbolt_folder: 00000000-0000-0000-0000-000000000000
AAAAC3NzaC1lZDI1NTE5AAAAIKzAwWm+IsqZCgMzjdZ7Do3xWtVtoUCpWJpH7KSi2a/H
awx@verdnatura.es

38
inventories/lab Normal file
View File

@ -0,0 +1,38 @@
[all:vars]
host_domain=lab.verdnatura.es
[cephlab]
cephlab[01:03]
[pvelab]
pvelab[01:03]
[infra:children]
cephlab
pvelab
[cephtest]
cephtest[01:03]
[kubepre]
kubepre-helm
kubepre-proxy1
kubepre-master[1:3]
kubepre-worker[1:4]
[kubetest]
kubetest-helm
kubetest-master[01:03]
kubetest-worker[01:04]
[laboratory]
corelab-proxy1
zammad
matrix
influxdb2
[guest:children]
cephtest
kubepre
kubetest
laboratory

81
inventories/servers Normal file
View File

@ -0,0 +1,81 @@
[all:vars]
host_domain=servers.dc.verdnatura.es
[kube_master]
kube-master[1:5]
[kube_worker]
kube-worker[1:5]
[kube_proxy]
kube-proxy[1:2]
[kube_helper]
kube-helm
[kubernetes:children]
kube_master
kube_worker
kube_proxy
kube_helper
[ad]
dc[1:2]
server
[db]
db-proxy[1:2]
db[1:2]
[ldap]
ldap-proxy[1:2]
ldap[1:3]
[mail]
dovecot
mailgw[1:2]
postfix
spamd
spamd-db
[monitoring]
cacti
logger
nagios
nagiosql-db
librenms
[network]
dhcp[1:2]
ns[1:2]
unifi
vpn
time[1:2]
[princ]
pbx
homes
doku
iventoy
[rds]
ts-proxy[1:2]
profiles
[test]
test-db1
test-db-proxy[1:2]
monthly-db
dev-db
[guest:children]
ad
db
kubernetes
ldap
mail
monitoring
network
princ
rds
test

View File

@ -1,20 +0,0 @@
[all:vars]
host_domain=domain.local
[pve:vars]
host_domain=core.domain.local
[ceph]
ceph[1:3]
[pve]
pve[1:5]
[infra:children]
ceph
pve
[servers]
server1 ansible_host=10.0.0.1
server1 ansible_host=10.0.0.2
server3 ansible_host=10.0.0.3

View File

@ -1,8 +1,5 @@
- name: Configure Ceph
hosts: all
tasks:
- import_role:
name: debian
- import_role:
name: ceph
tags: service

View File

@ -2,30 +2,22 @@
hosts: all
gather_facts: no
tasks:
- name: Delete old awx-user and it's configuration
block:
- name: Delete awx-user
tags: awx
user:
name: awx-user
state: absent
remove: yes
- name: Delete awx-user sudoers file
tags: awx
- name: Delete awx-user sudoers file
file:
path: /etc/sudoers.d/awx-user
state: absent
tags: awx
- name: Delete old MOTD configuration
tags: motd
file:
path: /etc/profile.d/mymotd.sh
state: absent
- name: Delete old profile configuration
tags: profile
block:
tags: motd
- name: Delete old Ansible bashrc configuration
blockinfile:
path: /root/.bashrc
@ -33,6 +25,7 @@
marker_end: 'END ANSIBLE MANAGED BLOCK'
marker: "# {mark}"
state: absent
tags: bashrc
- name: Delete old custom bashrc configuration
replace:
path: /root/.bashrc
@ -41,20 +34,3 @@
vars:
start_delimiter: '### 4Loo'
end_delimiter: 'esac'
- name: Delete old vn-host package
tags: vn-host
block:
- name: Get vn-host package version
shell: "dpkg-query -W -f='${Version}' vn-host 2>/dev/null || echo '0'"
register: vn_host_version
changed_when: false
- name: Display vn-host version
debug:
msg: "Version: {{ vn_host_version.stdout }}"
- name: Uninstall vn-host if old version
apt:
name: vn-host
state: absent
when: >
vn_host_version.stdout is version('3.0.0', '<')

View File

@ -1,8 +0,0 @@
- name: Configure DB
hosts: all
tasks:
- import_role:
name: debian
- import_role:
name: db
tags: service

View File

@ -1,5 +1,15 @@
- name: Configure base Debian host
hosts: all
vars_files: ../vault.yml
tasks:
- import_role:
name: debian
- name: Configure base system
import_role:
name: debian-base
- name: Configure guest
import_role:
name: debian-guest
when: ansible_virtualization_role == 'guest'
- name: Configure virtual machine
import_role:
name: debian-qemu
when: ansible_virtualization_role == 'guest' and ansible_virtualization_type == 'kvm'

View File

@ -1,35 +0,0 @@
- name: Gather facts from host and debug
hosts: all
gather_facts: yes
tasks:
- name: Print ansible facts
tags: facts
debug:
var: ansible_facts
- name: Print all variables
tags: vars
debug:
var: vars
- name: Print variable value
tags: [facts, vars, var]
when: var_name is defined
debug:
msg: "{{ var_name }}: {{ lookup('vars', var_name, default='undefined') }}"
- name: Check whether host is alive and reachable
tags: ping
ping:
- name: Fetch or create passbolt password
tags: passbolt
debug:
msg: "{{ lookup(passbolt, 'test', password=passbolt_password) }}"
vars:
passbolt_password: 'S3cR3tP4$$w0rd'
environment:
PASSBOLT_CREATE_NEW_RESOURCE: true
PASSBOLT_NEW_RESOURCE_PASSWORD_LENGTH: 18
PASSBOLT_NEW_RESOURCE_PASSWORD_SPECIAL_CHARS: false

10
playbooks/facts.yml Normal file
View File

@ -0,0 +1,10 @@
- name: Gather facts from host
hosts: all
gather_facts: yes
tasks:
- name: Print all available facts
debug:
var: ansible_facts
- name: Print variable value
debug:
msg: "Variable: {{ ansible_fqdn }}"

View File

@ -1,8 +1,5 @@
- name: Configure Kubernetes
hosts: all
tasks:
- import_role:
name: debian
- import_role:
name: kube
tags: service

View File

@ -1,22 +0,0 @@
- name: Change machine-id in Debian
hosts: all
gather_facts: no
become: yes
tasks:
- name: Remove files with old machine-id
file:
path: "{{ item }}"
state: absent
loop:
- /etc/machine-id
- /var/lib/dbus/machine-id
- name: Ensure a new UUID is generated for /etc/machine-id
command:
cmd: dbus-uuidgen --ensure=/etc/machine-id
- name: Create symbolic link for /var/lib/dbus/machine-id
file:
src: /etc/machine-id
dest: /var/lib/dbus/machine-id
state: link

10
playbooks/passbolt.yml Normal file
View File

@ -0,0 +1,10 @@
- name: Fetch passbolt password
hosts: all
gather_facts: no
tasks:
- name: Print password
debug:
msg: "Variable: {{ lookup(passbolt, 'test') }}"
vars:
passbolt: 'anatomicjc.passbolt.passbolt'
passbolt_inventory: 'anatomicjc.passbolt.passbolt_inventory'

6
playbooks/ping.yml Normal file
View File

@ -0,0 +1,6 @@
- name: Check whether host is alive and reachable
hosts: all
gather_facts: no
become: no
tasks:
- ping:

View File

@ -1,8 +1,5 @@
- name: Configure PVE
hosts: all
tasks:
- import_role:
name: debian
- import_role:
name: pve
tags: service

View File

@ -1,7 +0,0 @@
- name: Configure Directory, Time, and Database Services
hosts: all
tasks:
- name: Configure services to install in the server
import_role:
name: services

View File

@ -1,3 +1,3 @@
py-passbolt==0.0.18
cryptography==3.3.2
passlib==1.7.4
ansible==2.1.0

View File

@ -1,42 +0,0 @@
mariadb_base_packages:
- mariadb-server
- mariadb-backup
- pmm2-client
- pigz
mariadb_requeriments:
- curl
- apt-transport-https
certificates:
- { content: '{{ ca_company_deprecated }}', dest: '/etc/mysql/ca.pem', mode: 'u=rw,g=r,o=r' }
- { content: '{{ cert_mysql }}', dest: '/etc/mysql/cert.pem', mode: 'u=rw,g=r,o=r' }
- { content: '{{ cert_mysql_key }}', dest: '/etc/mysql/key.pem', mode: 'u=rw,g=,o=' }
required_directories_master:
- { path: /mnt/local-backup, owner: root, group: root, mode: 'u=rwx,g=rx,o=rx' }
- { path: /mnt/mysqlbin, owner: root, group: root, mode: 'u=rwx,g=rx,o=rx' }
- { path: /mnt/mysqlbin/binlog, owner: mysql, group: mysql, mode: 'u=rwx,g=,o=' }
required_directories:
- { path: /mnt/mysqltmp, owner: root, group: root, mode: 'u=rwx,g=rwx,o=rwxt' }
- { path: /root/scripts, owner: root, group: root, mode: 'u=rwx,g=rx,o=rx' }
- { path: /root/mariabackup, owner: root, group: root, mode: 'u=rwx,g=rx,o=rx' }
- { path: /mnt/mysqldata/mysql, owner: mysql, group: mysql, mode: 'u=rwx,g=rx,o=rx' }
- { path: /etc/systemd/system/mariadb.service.d, owner: root, group: root, mode: 'u=rwx,g=rx,o=rx' }
required_mariabackup_files_and_scripts:
- { src: mysql-flush.sh, dest: /etc/qemu/fsfreeze-hook.d/mysql-flush.sh, mode: u=rwx,g=rx,o=rx }
- { src: mariabackup/bacula-before.sh, dest: /root/mariabackup/bacula-before.sh, mode: u=rwx,g=rx,o=rx }
- { src: mariabackup/config.sh, dest: /root/mariabackup/config.sh, mode: u=rwx,g=rx,o=x }
- { src: mariabackup/inc-backup.sh, dest: /root/mariabackup/inc-backup.sh, mode: u=rwx,g=rx,o=rx }
- { src: mariabackup/restore-backup.sh, dest: /root/mariabackup/restore-backup.sh, mode: u=rwx,g=rx,o=rx }
- { src: scripts/check-memory.sh, dest: /root/scripts/check-memory.sh, mode: u=rwx,g=rx,o=rx }
- { src: scripts/export-privs.sh, dest: /root/scripts/export-privs.sh, mode: u=rwx,g=rx,o=rx }
- { src: scripts/mysqltuner.pl, dest: /root/scripts/mysqltuner.pl, mode: u=rwx,g=rx,o=rx }
- { src: scripts/promote-master.sh, dest: /root/scripts/promote-master.sh, mode: u=rwx,g=rx,o=rx }
- { src: scripts/promote-slave.sh, dest: /root/scripts/promote-slave.sh, mode: u=rwx,g=rx,o=rx }
- { src: scripts/README.md, dest: /root/scripts/README.md, mode: u=rw,g=r,o=r }
- { src: scripts/scheduler-log.sh, dest: /root/scripts/scheduler-log.sh, mode: u=rwx,g=rx,o=rx }
downloads:
- url: https://r.mariadb.com/downloads/mariadb_repo_setup
dest: /tmp/mariadb_repo_setup
mode: u=rwx,g=rx,o=rx
- url: https://repo.percona.com/apt/percona-release_latest.generic_all.deb
dest: /tmp/percona-release_latest.generic_all.deb
mode: u=rw,g=r,o=r

View File

@ -1,98 +0,0 @@
[mysqld]
# Docs: https://mariadb.com/kb/en/server-system-variables
lc_messages = es_ES
lc_time_names = es_ES
character-set-server = utf8
collation-server = utf8_unicode_ci
explicit_defaults_for_timestamp = ON
datadir = /mnt/mysqldata/mysql
tmpdir = /mnt/mysqltmp
log_bin_trust_function_creators = 1
sql_mode = NO_ENGINE_SUBSTITUTION
bind-address = 0.0.0.0
max_password_errors = 50
#++++++++++++++++++++++++++++++++++++++++ Threads
thread_stack = 512K
join_buffer_size = 2M
sort_buffer_size = 4M
net_buffer_length = 256K
max_allowed_packet = 16M
read_buffer_size = 1M
read_rnd_buffer_size = 512K
#++++++++++++++++++++++++++++++++++++++++ Performance
thread_cache_size = 450
interactive_timeout = 1800
wait_timeout = 1800
open_files_limit = 20000
low_priority_updates = 1
table_open_cache = 40000
table_definition_cache = 10000
table_open_cache_instances = 1
key_buffer_size = 256K
max_heap_table_size = 128M
tmp_table_size = 128M
concurrent_insert = ALWAYS
group_concat_max_len = 10000
max_connect_errors = 50
#++++++++++++++++++++++++++++++++++++++++ Binary log
max_binlog_size = 1GB
binlog_cache_size = 16M
binlog_stmt_cache_size = 16M
binlog_row_image = noblob
binlog_format = row
#++++++++++++++++++++++++++++++++++++++++ InnoDB
transaction-isolation = READ-COMMITTED
idle_transaction_timeout = 60
innodb_io_capacity = 100
innodb_io_capacity_max = 100
innodb_monitor_enable = all
innodb_read_io_threads = 16
innodb_write_io_threads = 16
innodb_checksum_algorithm = crc32
innodb_adaptive_hash_index = 0
innodb_flush_method = O_DIRECT
innodb_log_buffer_size = 32M
innodb_purge_threads = 4
innodb_buffer_pool_dump_at_shutdown = ON
innodb_buffer_pool_load_at_startup = ON
#++++++++++++++++++++++++++++++++++++++++ Logging
log_error = /var/log/mysql/error.log
log_output = TABLE
general_log = OFF
slow_query_log = ON
long_query_time = 2
min_examined_row_limit = 0
log_slow_admin_statements = ON
log_queries_not_using_indexes = OFF
max_error_count = 100
#++++++++++++++++++++++++++++++++++++++++ SSL
ssl-ca = /etc/mysql/ca.pem
ssl-cert = /etc/mysql/cert.pem
ssl-key = /etc/mysql/key.pem
#++++++++++++++++++++++++++++++++++++++++ Query cache
query_cache_limit = 0
query_cache_type = OFF
query_cache_size = 0
#++++++++++++++++++++++++++++++++++++++++ Performance Schema
performance_schema = ON
performance_schema_digests_size = 20000
performance-schema-consumer-events-statements-history = ON
performance_schema_consumer_events_statements_history_long = ON
userstat = ON

View File

@ -1,14 +0,0 @@
[mysqld]
port = 3307
bind-address = 0.0.0.0
innodb_buffer_pool_size = 18G
event-scheduler = OFF
innodb_log_file_size = 5G
log_warnings = 2
#++++++++++++++++++++++++++++++++++++++++ Binary log
log-bin = bin.log
expire_logs_days = 1
relay_log = mysqld-relay-bin

View File

@ -1,29 +0,0 @@
[mysqld]
port = 3306
innodb_log_file_size = 8G
log_warnings = 1
#++++++++++++++++++++++++++++++++++++++++ Binary log
log-bin = /mnt/mysqlbin/binlog/bin.log
max_connections = 1000
expire_logs_days = 7
innodb_buffer_pool_size = 64G
relay_log = /mnt/mysqlbin/binlog/relay.log
binlog-ignore-db = tmp
binlog-ignore-db = PERCONA_SCHEMA
#++++++++++++++++++++++++++++++++++++++++ Replication
event-scheduler = ON
slave_exec_mode = STRICT
replicate-ignore-db = tmp
replicate-ignore-table = util.eventLog
replicate-ignore-table = cache.cache_calc
replicate-ignore-table = cache.available
replicate-ignore-table = cache.availableNoRaids
replicate-ignore-table = cache.cache_valid
replicate-ignore-table = cache.stock
replicate-ignore-table = cache.visible

View File

@ -1,23 +0,0 @@
UPDATE vn2008.tblContadores
SET dbproduccion = FALSE;
DELETE FROM util.binlogQueue;
GRANT
SELECT,
INSERT,
UPDATE,
DELETE,
CREATE,
DROP,
INDEX,
ALTER,
CREATE TEMPORARY TABLES,
CREATE VIEW,
EVENT,
TRIGGER,
SHOW VIEW,
CREATE ROUTINE,
ALTER ROUTINE,
EXECUTE
ON *.* TO developerBoss;

View File

@ -1,6 +0,0 @@
UPDATE vn2008.tblContadores
SET dbproduccion = FALSE;
DELETE FROM util.binlogQueue;
UPDATE `account`.`user` SET `active` = TRUE WHERE `name` = 'mindshore';

View File

@ -1,51 +0,0 @@
#!/bin/bash
set -e
myDir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
. "$myDir/config.sh"
. "$myDir/apply.config.sh"
todayDir=$(date +%Y-%m-%d)
pattern="$baculaDir/mnt/local-backup/${todayDir}_??-??_full.gz"
files=($pattern)
backupFile="${files[0]}"
"$myDir/restore-backup.sh" "$backupFile"
rm -r "$baculaDir"
if [[ "${#dbClusterSiblings[@]}" -gt "0" ]]; then
for node in "${dbClusterSiblings[@]}"; do
ssh root@$node service mysql stop
ssh root@$node "if pgrep mariadbd; then pkill -9 mariadbd; fi"
done
galera_new_cluster
else
service mariadb start
fi
echo "Applying custom script."
mysql -e "UPDATE util.config SET environment = '$dbEnvironment', lastDump = NOW()"
mysql < "$myDir/apply.sql"
echo "Upgrading tables."
mysql_upgrade
echo "Applying repository changes."
curl --silent --request POST --location --user "$jenkinsAuth" "$jenkinsUrl/build?delay=0sec"
echo "Waiting for Jenkins job to end."
jobResult=null
while [ "$jobResult" = "null" ]; do
sleep 10
jobResult=$(curl --silent --location --user "$jenkinsAuth" "$jenkinsUrl/lastBuild/api/json" | jq --raw-output ".result")
done
echo "Job result: $jobResult"
echo "Promoting to master."
"/root/scripts/promote-master.sh"
for node in "${dbClusterSiblings[@]}"; do
ssh root@$node service mysql start
done

View File

@ -1,31 +0,0 @@
#!/bin/bash
# https://mariadb.com/kb/en/mariabackup/
set -e
myDir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
. "$myDir/config.sh"
todayDir="$(date +%Y-%m-%d)"
backupName="${todayDir}_$(date +"%H-%M")_full"
backupFile="$backupDir/$backupName.gz"
if [ -d "$backupDir" ]; then
rm -rf "$backupDir/"*
fi
ulimit -n 8192
mariabackup \
--defaults-extra-file="$myDir/my.cnf" \
--backup \
--extra-lsndir="$backupDir/$backupName" \
--history="$todayDir" \
--stream=xbstream \
--parallel=4 \
2>> "$logFile" \
| pigz -p 12 \
> "$backupFile"
if [ $? != "0" ]; then
echo "An error ocurred during backup, please take a look at log file: $logFile"
exit 1
fi

View File

@ -1,20 +0,0 @@
#!/bin/bash
# Destination file for backup logs
logFile=/var/log/vn-mariabackup.log
# Temporary local directory to save backups
backupDir=/mnt/local-backup
# Directory for backup history
historyDir=/mnt/backup4mariadb
# Number of days for backup rotation
cleanDays=90
# Directory for temporal restore data
restoreDir=/mnt/mysqldata/mysql-restore
# Directory of MySQL data
dataDir=/mnt/mysqldata/mysql

View File

@ -1,38 +0,0 @@
#!/bin/bash
# https://mariadb.com/kb/en/incremental-backup-and-restore-with-mariabackup/
set -e
myDir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
. "$myDir/config.sh"
todayDir="$(date +%Y-%m-%d)"
todayPath="$historyDir/$todayDir"
pattern="$todayPath/${todayDir}_??-??_full.xb.gz.enc"
files=($pattern)
backupFile="${files[0]}"
backupBase=$(basename -- "$backupFile")
backupName="${backupBase%%.*}"
incrementalName="${todayDir}_$(date +"%H-%M")_incremental"
incrementalFile="$backupDir/${incrementalName}.xb.gz.enc"
ulimit -n 24098
mariabackup \
--defaults-extra-file="$myDir/my.cnf" \
--backup \
--incremental-basedir="$backupDir/$backupName" \
--extra-lsndir="$backupDir/$incrementalName" \
--incremental-history-name="$todayDir" \
2>> "$logFile" \
| gzip \
| openssl enc -aes-256-cbc -pbkdf2 -kfile "$myDir/xbcrypt.key" \
> "$incrementalFile"
if [ $? != "0" ]; then
echo "An error ocurred during backup, please take a look at log file: $logFile"
exit 1
fi
cp "$incrementalFile" "$todayPath"
cp -r "$backupDir/$incrementalName" "$todayPath"

View File

@ -1,59 +0,0 @@
#!/bin/bash
# https://mariadb.com/kb/en/using-encryption-and-compression-tools-with-mariabackup/
set -e
myDir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
. "$myDir/config.sh"
backupFile=$1
formatted_date() {
date '+%Y-%m-%d %H:%M:%S'
}
if [ -z "$backupFile" ]; then
echo "Backup file not defined."
exit 1
fi
if [ ! -f "$backupFile" ]; then
echo "Backup file does not exist: $backupFile"
exit 2
fi
echo "Restoring MySQL data from backup."
rm -rf "$restoreDir"
mkdir -p "$restoreDir"
echo "$(formatted_date)"
echo "Decompresing backup."
pigz --decompress --processes 4 --stdout "$backupFile" \
| mbstream --extract --parallel=4 --directory="$restoreDir"
echo "Preparing backup."
mariabackup \
--defaults-extra-file="$myDir/my.cnf" \
--prepare \
--target-dir="$restoreDir"
echo "$(formatted_date)"
echo "Stopping service."
service mariadb stop
if pgrep mariadbd; then pkill -9 mariadbd; fi
echo "Restoring data."
rm -rf "$dataDir"
mariabackup \
--defaults-extra-file="$myDir/my.cnf" \
--move-back \
--target-dir="$restoreDir" \
2>> "$logFile"
chown -R mysql:mysql "$dataDir"
chmod 755 "$dataDir"
rm "$dataDir/mysql/slow_log."*
rm "$dataDir/mysql/general_log."*
echo "Removing restore data."
rm -r "$restoreDir"

View File

@ -1,3 +0,0 @@
[Service]
LimitNOFILE=600000
LimitMEMLOCK=2M

View File

@ -1,57 +0,0 @@
#!/bin/sh
# https://github.com/qemu/qemu/blob/master/scripts/qemu-guest-agent/fsfreeze-hook.d/mysql-flush.sh.sample
# Flush MySQL tables to the disk before the filesystem is frozen.
# At the same time, this keeps a read lock in order to avoid write accesses
# from the other clients until the filesystem is thawed.
MYSQL="/usr/bin/mysql"
MYSQL_OPTS="-uroot" #"-prootpassword"
FIFO=/var/run/mysql-flush.fifo
# Check mysql is installed and the server running
[ -x "$MYSQL" ] && "$MYSQL" $MYSQL_OPTS < /dev/null || exit 0
flush_and_wait() {
printf "FLUSH TABLES WITH READ LOCK \\G\n"
trap 'printf "$(date): $0 is killed\n">&2' HUP INT QUIT ALRM TERM
read < $FIFO
printf "UNLOCK TABLES \\G\n"
rm -f $FIFO
}
case "$1" in
freeze)
mkfifo $FIFO || exit 1
flush_and_wait | "$MYSQL" $MYSQL_OPTS &
# wait until every block is flushed
while [ "$(echo 'SHOW STATUS LIKE "Key_blocks_not_flushed"' |\
"$MYSQL" $MYSQL_OPTS | tail -1 | cut -f 2)" -gt 0 ]; do
sleep 1
done
# for InnoDB, wait until every log is flushed
INNODB_STATUS=$(mktemp /tmp/mysql-flush.XXXXXX)
[ $? -ne 0 ] && exit 2
trap "rm -f $INNODB_STATUS; exit 1" HUP INT QUIT ALRM TERM
while :; do
printf "SHOW ENGINE INNODB STATUS \\G" |\
"$MYSQL" $MYSQL_OPTS > $INNODB_STATUS
LOG_CURRENT=$(grep 'Log sequence number' $INNODB_STATUS |\
tr -s ' ' | cut -d' ' -f4)
LOG_FLUSHED=$(grep 'Log flushed up to' $INNODB_STATUS |\
tr -s ' ' | cut -d' ' -f5)
[ "$LOG_CURRENT" = "$LOG_FLUSHED" ] && break
sleep 1
done
rm -f $INNODB_STATUS
;;
thaw)
[ ! -p $FIFO ] && exit 1
echo > $FIFO
;;
*)
exit 1
;;
esac

View File

@ -1,4 +0,0 @@
command[check_disk_mysqldata]=/usr/lib/nagios/plugins/check_disk -w 10% -c 5% -p /mnt/mysqldata
command[check_disk_mysqlbin]=/usr/lib/nagios/plugins/check_disk -w 10% -c 5% -p /mnt/mysqlbin
command[check_disk_backup]=/usr/lib/nagios/plugins/check_disk -w 10% -c 5% -p /mnt/local-backup
command[check_mysql_scheduler]=/etc/nagios/plugins/check_mysql_scheduler

View File

@ -1 +0,0 @@
*/30 * * * * root /root/scripts/scheduler-log.sh

View File

@ -1,19 +0,0 @@
# Scripts to maintain MariaDB
## scheduler-log.sh
The following table should be created into MySQL/MariaDB database.
```
CREATE TABLE `eventLog` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`date` datetime NOT NULL,
`event` varchar(512) NOT NULL,
`error` varchar(1024) NOT NULL,
PRIMARY KEY (`id`),
KEY `date` (`date`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_unicode_ci COMMENT='Event scheduler error log'
```
Then adjust the *$logTable* variable to the correct schema.

View File

@ -1,9 +0,0 @@
#!/bin/bash
minFree=1
memFree=$(free --gibi | awk '$1 == "Mem:" { print $7 }')
if [ "$memFree" -le "$minFree" ]; then
echo "Free memory is ${memFree}Gi, restarting mariadb service to prevent OOM killer..."
systemctl restart mariadb
fi

View File

@ -1,25 +0,0 @@
#!/bin/bash
OUTFILE=privs.sql
SCHEMA=mysql
TABLES=(
global_priv
db
tables_priv
columns_priv
procs_priv
proxies_priv
roles_mapping
)
echo "USE \`$SCHEMA\`;" > "$OUTFILE"
for TABLE in "${TABLES[@]}"
do
echo "TRUNCATE TABLE \`$SCHEMA\`.\`$TABLE\`;" >> "$OUTFILE"
done
echo "" >> "$OUTFILE"
mysqldump --no-create-info --skip-triggers "$SCHEMA" ${TABLES[@]} >> "$OUTFILE"
echo "FLUSH PRIVILEGES;" >> "$OUTFILE"

File diff suppressed because it is too large Load Diff

View File

@ -1,15 +0,0 @@
#!/bin/bash
mysql -e "SET GLOBAL event_scheduler = OFF"
echo "SELECT db, name FROM mysql.event WHERE status = 'SLAVESIDE_DISABLED'" | mysql --raw --silent | \
awk '{
gsub("`", "``", $1);
gsub("`", "``", $2);
print "`"$1"`.`"$2"`";
}' | \
while read event; do
mysql -e "ALTER EVENT $event ENABLE"
done
mysql -e "SET GLOBAL event_scheduler = ON"

View File

@ -1,16 +0,0 @@
#!/bin/bash
mysql -e "SET GLOBAL event_scheduler = OFF"
echo "SELECT db, name FROM mysql.event WHERE status = 'ENABLED'" | mysql --raw --silent | \
awk '{
gsub("`", "``", $1);
gsub("`", "``", $2);
print "`"$1"`.`"$2"`";
}' | \
while read event; do
mysql -e "ALTER EVENT $event DISABLE ON SLAVE"
done
mysql -e "SET GLOBAL event_scheduler = ON"

View File

@ -1,73 +0,0 @@
#!/bin/bash
set -e
logFile="/var/log/mysql/error.log"
dateFile="/tmp/mysql_scheduler_log-lastdate"
logSchema="util"
logTable="eventLog"
pattern='^\d{4}-\d{2}-\d{2}\s+\d{1,2}:\d{2}:\d{2}\s+\d+\s+\[ERROR\] Event Scheduler:'
purgeDays=30
quote() {
local str=${1//\'/\'\'/}
local str=${str//\\/\\\\}
echo "'$str'"
}
mysql -e "SELECT TRUE" > /dev/null 2>&1
if [ "$?" -ne "0" ]; then
exit
fi
tableExists=$(mysql -Ns -e "SHOW TABLES FROM $logSchema LIKE '$logTable'")
if [ -z "$tableExists" ]; then
mysql <<-EOF
CREATE SCHEMA IF NOT EXISTS $logSchema;
CREATE TABLE $logSchema.$logTable (
id int(11) NOT NULL AUTO_INCREMENT,
date datetime NOT NULL,
event varchar(512) NOT NULL,
error varchar(1024) NOT NULL,
PRIMARY KEY (id),
KEY date (date)
) ENGINE=InnoDB COMMENT='Event scheduler error log';
EOF
fi
if [ -f "$dateFile" ]; then
read -r fromDate < "$dateFile"
else
fromDate=$(date -d "-$purgeDays days" +%s)
fi
toDate=$(date +%s)
grep -P "$pattern" "$logFile" | awk -v fromDate="$fromDate" -v toDate="$toDate" '{
split($1, date, "-");
split($2, time, ":");
timestamp = mktime(date[1]" "date[2]" "date[3]" "time[1]" "time[2]" "time[3])
if (timestamp >= fromDate && timestamp < toDate) {
printf $1" "$2" "$7;
for (i=8; i<=NF; i++) printf FS $i ;
print "";
}
}' | \
while read line; do
date="$(echo "$line" | cut -d' ' -f1,2)"
event="$(echo "$line" | cut -d' ' -f3)"
error="$(echo "$line" | cut -d' ' -f4-)"
mysql <<-EOF
INSERT INTO $logSchema.$logTable SET
date = $(quote "$date"),
event = $(quote "$event"),
error = $(quote "$error")
EOF
done
echo "$toDate" > "$dateFile"
mysql <<-EOF
DELETE FROM $logSchema.$logTable
WHERE date < TIMESTAMPADD(DAY, -$purgeDays, NOW())
EOF

View File

@ -1,11 +0,0 @@
- name: reload-systemd
command:
cmd: systemctl daemon-reload
- name: restart-mariadb
systemd:
name: mariadb
state: restarted
- name: restart-nrpe
service:
name: nagios-nrpe-server
state: restarted

View File

@ -1,5 +0,0 @@
- import_tasks: mariadb.yml
- when: db.branch == 'master'
import_tasks: production.yml
- when: db.branch in ['dev', 'test']
import_tasks: test.yml

View File

@ -1,155 +0,0 @@
- name: Ensure Install requirements for MariaDB repository setup script
apt:
name: "{{ mariadb_requeriments }}"
state: present
install_recommends: no
- name: Download required setup files
get_url:
url: "{{ item.url }}"
dest: "{{ item.dest }}"
mode: "{{ item.mode }}"
loop: "{{ downloads }}"
- name: Run MariaDB repository setup script
command:
cmd: "/bin/bash /tmp/mariadb_repo_setup --mariadb-server-version=10.11.10"
creates: "/etc/apt/sources.list.d/mariadb.list"
- name: Install Percona repository package
apt:
deb: "/tmp/percona-release_latest.generic_all.deb"
state: present
install_recommends: no
- name: Update apt cache
apt:
update_cache: yes
- name: Install MariaDB packages
apt:
name: "{{ mariadb_base_packages }}"
state: present
install_recommends: no
- name: Add tmpfs in /etc/fstab
blockinfile:
path: /etc/fstab
marker: "# {mark} ANSIBLE-MANAGED TMPFS ENTRY"
block: |
tmpfs /mnt/mysqltmp tmpfs rw,size={{ mysqltmpsize }} 0 0
register: fstab
- name: Configure MariaDB memory check CRON
template:
src: check-memory.cron
dest: /etc/cron.d/vn-check-memory
owner: root
group: root
mode: u=rw,g=r,o=r
- name: Configure MariaDB scheduler log CRON
copy:
src: scheduler-log.cron
dest: /etc/cron.d/vn-scheduler-log
owner: root
group: root
mode: u=rw,g=r,o=r
- name: Insert MySQL certificates
no_log: true
copy:
content: "{{ item.content }}"
dest: "{{ item.dest }}"
owner: mysql
group: mysql
mode: "{{ item.mode }}"
loop: "{{ certificates }}"
notify: restart-mariadb
- name: Ensure required directories exist
file:
path: "{{ item.path }}"
state: directory
owner: "{{ item.owner }}"
group: "{{ item.group }}"
mode: "{{ item.mode }}"
loop: "{{ required_directories }}"
- name: Copy required MariaBackup files and scripts
copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: root
group: root
mode: "{{ item.mode }}"
loop: "{{ required_mariabackup_files_and_scripts }}"
- name: Set MariaDB common configuration
copy:
src: conf/z90-vn.cnf
dest: /etc/mysql/mariadb.conf.d/
owner: root
group: root
mode: u=rw,g=r,o=r
notify: restart-mariadb
- name: Set MariaDB local configuration template
template:
src: conf/z99-local.cnf
dest: /etc/mysql/mariadb.conf.d/
owner: root
group: root
mode: u=rw,g=r,o=r
- name: Set MariaBackup connection configuration
template:
src: mariabackup/my.cnf
dest: /root/mariabackup/
owner: root
group: root
mode: u=rw,g=,o=
- name: Override MariaDB systemd service configuration
copy:
src: mariadb_override.conf
dest: /etc/systemd/system/mariadb.service.d/override.conf
owner: root
group: root
mode: u=rw,g=r,o=r
notify: reload-systemd
- name: Set MariaDB NRPE configuration
copy:
src: nrpe/95-mariadb.cfg
dest: /etc/nagios/nrpe.d/
owner: root
group: root
mode: u=rw,g=r,o=r
notify: restart-nrpe
- name: Check if /var/lib/mysql/ exists
stat:
path: /var/lib/mysql/
register: mysql_dir
- when: mysql_dir.stat.exists
block:
- name: Sync MySQL data directory
synchronize:
src: /var/lib/mysql/
dest: /mnt/mysqldata/mysql/
archive: true
compress: true
recursive: true
delegate_to: "{{ inventory_hostname }}"
- name: Remove old MySQL data after sync
file:
path: /var/lib/mysql/
state: absent
- name: Mount all filesystems from /etc/fstab
command: mount -a
when: fstab.changed

View File

@ -1,26 +0,0 @@
- name: Ensure production required directories exist
file:
path: "{{ item.path }}"
state: directory
owner: "{{ item.owner }}"
group: "{{ item.group }}"
mode: "{{ item.mode }}"
loop: "{{ required_directories_master }}"
- name: Set production MariaDB custom configuration
copy:
src: conf/z95-production.cnf
dest: /etc/mysql/mariadb.conf.d/
owner: root
group: root
mode: u=rw,g=r,o=r
notify: restart-mariadb
- name: Reminder to check production mount points
debug:
msg: |
Remember to check the following mount points:
- /var/lib/mysql
- /mnt/mysqlbin
- /mnt/local-backup
Make sure they are correctly configured and accessible.

View File

@ -1,39 +0,0 @@
- name: Set test Mariabackup files
copy:
src: mariabackup/bacula-after.sh
dest: /root/mariabackup/
owner: root
group: root
mode: u=rwx,g=rx,o=rx
- name: Set test MariaBackup apply config configuration
template:
src: mariabackup/apply.config.sh
dest: /root/mariabackup/
owner: root
group: root
mode: u=rw,g=,o=
- name: Set test MariaBackup apply SQL script
copy:
src: mariabackup/apply.{{db.branch}}.sql
dest: /root/mariabackup/apply.sql
owner: root
group: root
mode: u=rw,g=,o=
- name: Set test MariaDB custom configuration
copy:
src: conf/z92-test.cnf
dest: /etc/mysql/mariadb.conf.d/
owner: root
group: root
mode: u=rw,g=r,o=r
notify: restart-mariadb
- name: Reminder to check test mount points environment
debug:
msg: |
Remember to check the following mount points:
- /mnt/mysqltmp
Make sure they are correctly configured and accessible.

View File

@ -1,3 +0,0 @@
MAILTO="{{ sysadmin_mail }}"
*/15 * * * * root /root/scripts/check-memory.sh

View File

@ -1,7 +0,0 @@
[mysqld]
server-id = {{ serverid }}
#bind-address = 127.0.0.1
#event-scheduler = OFF
#skip-log-bin
#skip-slave-start

View File

@ -1,19 +0,0 @@
#!/bin/bash
# Bacula directory for restore
baculaDir=/mnt/mysqldata/bacula-restore
# Database branch name
dbBranch={{ db.branch }}
# Database environment
dbEnvironment={{ db.environment }}
# MariaDB cluster sibling node hostnames
dbClusterSiblings=()
# Jenkins authentication string
jenkinsAuth=jenkins:{{ lookup(passbolt, 'jenkinsAuth', folder_parent_id=passbolt_folder).password }}
# Jenkins job URL
jenkinsUrl=https://jenkins.verdnatura.es/job/Scheduler/job/db-apply-changes-{{ db.branch }}

View File

@ -1,7 +0,0 @@
[mariabackup]
host = localhost
user = mariabackup
password = {{ lookup(passbolt, 'mariabackup', folder_parent_id=passbolt_folder).password }}
use-memory = 1G
parallel = 4
stream = xbstream

View File

@ -0,0 +1,7 @@
default_user: user
root_password: Pa$$w0rd
fail2ban:
email: "{{ sysadmin_mail }}"
bantime: 600
maxretry: 4
ignore: "127.0.0.0/8 {{ dc_net }}"

View File

@ -9,39 +9,26 @@ BLINK="\033[5m"
# Environment
FQDN=$(hostname --fqdn)
PRO="\033[1;5;31m"
LAB="\033[0;35m"
VN="\033[0;32m"
UNKNOWN="\033[0;33m"
if [ -f "/etc/vn/env" ]; then
read -r VN_ENV < /etc/vn/env
case "$VN_ENV" in
lab)
ENV_COLOR="\033[0;32m"
ENV_TEXT="Laboratory"
FQDN=$(hostname --fqdn)
case "$FQDN" in
*.dc.verdnatura.es)
ENVIRONMENT="${PRO}Production${RESET}"
;;
pre)
ENV_COLOR="\033[0;35m"
ENV_TEXT="Pre-production"
*.lab.verdnatura.es)
ENVIRONMENT="${LAB}Laboratory${RESET}"
;;
test)
ENV_COLOR="\033[0;33m"
ENV_TEXT="Testing"
;;
pro)
ENV_COLOR="\033[1;5;31m"
ENV_TEXT="Production"
*.verdnatura.es)
ENVIRONMENT="${VN}Verdnatura${RESET}"
;;
*)
ENV_COLOR="\033[0;36m"
ENV_TEXT="$VN_ENV"
ENVIRONMENT="${UNKNOWN}Unknown${RESET}"
;;
esac
fi
if [ -z "$ENV_TEXT" ]; then
ENV_COLOR="\033[0;37m"
ENV_TEXT="Undefined"
fi
ENV_TEXT="${ENV_COLOR}${ENV_TEXT}${RESET}"
esac
# Last login
@ -70,7 +57,7 @@ if [ $SHOW_UPGRADEABLE -eq 1 ] ; then
UPGRADEABLE="$(apt list --upgradable 2>/dev/null | tail -n +2 | wc -l)"
if [ "$UPGRADEABLE" -gt 0 ]; then
UPGRADEABLE_ALERT="($UPGRADEABLE upgradeable)"
UPGRADEABLE_ALERT="${BLINK}($UPGRADEABLE upgradeable)${RESET}"
fi
fi
@ -110,6 +97,6 @@ echo -e "${LABEL}Packages :${RESET} $PACKAGES $UPGRADEABLE_ALERT"
echo -e "${LABEL}IP :${RESET}"
echo -e "$NET_IPS"
echo -e "${LABEL}Last Login :${RESET} $LAST_LOGIN"
echo -e "${LABEL}Environment :${RESET} $ENV_TEXT"
echo -e "${LABEL}Environment :${RESET} $ENVIRONMENT"
echo -e "${LABEL}Connected users :${RESET}"
echo -e "$CONNECTED_USERS"

View File

@ -0,0 +1,47 @@
#!/bin/bash
# Prompt
FQDN=$(hostname --fqdn)
if [[ $FQDN == *.verdnatura.es ]]; then
SHORT_HOST=${FQDN%.verdnatura.es}
case "$SHORT_HOST" in
*.dc)
ENVIRONMENT="\[\033[01;31m\]PRO\[\033[00m\]"
;;
*.lab)
ENVIRONMENT="\[\033[01;35m\]LAB\[\033[00m\]"
;;
*)
ENVIRONMENT="\[\033[01;32m\]VN\[\033[00m\]"
;;
esac
PS1="\u@$SHORT_HOST[$ENVIRONMENT]:\w"
if [ "$(id -u)" -eq 0 ]; then
PS1="$PS1# "
else
PS1="$PS1\$ "
fi
fi
# History
HISTSIZE=10000
HISTFILESIZE=50000
HISTTIMEFORMAT="%Y-%m-%d %H:%M:%S "
# Security
TMOUT=3600
# Aliases
#export LS_OPTIONS='--color=auto'
#eval "$(dircolors)"
#alias ls='ls $LS_OPTIONS'
#alias ll='ls $LS_OPTIONS -l'
#alias la='ls $LS_OPTIONS -la'

View File

@ -0,0 +1,8 @@
#!/bin/bash
echo 'tzdata tzdata/Areas select Europe' | debconf-set-selections
echo 'tzdata tzdata/Zones/Europe select Madrid' | debconf-set-selections
echo 'tzdata tzdata/Zones/Etc select UTC' | debconf-set-selections
rm /etc/timezone
rm /etc/localtime
dpkg-reconfigure -f noninteractive tzdata

View File

@ -0,0 +1,21 @@
- name: restart-timesyncd
service:
name: systemd-timesyncd
state: restarted
- name: restart-exim
service:
name: exim4
state: restarted
- name: restart-ssh
service:
name: ssh
state: restarted
- name: restart-fail2ban
service:
name: fail2ban
state: restarted
- name: restart-nrpe
service:
name: nagios-nrpe-server
state: restarted

View File

@ -0,0 +1,20 @@
- name: Install Bacula FD packages
apt:
name: bacula-fd
state: present
- name: Load Bacula default passwords
slurp:
src: /etc/bacula/common_default_passwords
register: bacula_passwords
- name: Configure Bacula FD
template:
src: bacula-fd.conf
dest: /etc/bacula/bacula-fd.conf
owner: root
group: bacula
mode: '0640'
backup: true
- name: Restart Bacula FD service
service:
name: bacula-fd
state: restarted

View File

@ -0,0 +1,15 @@
- name: Install fail2ban packages
apt:
name: fail2ban
state: present
loop:
- fail2ban
- rsyslog
- name: Configure fail2ban service
template:
src: jail.local
dest: /etc/fail2ban/jail.local
owner: root
group: root
mode: '0644'
notify: restart-fail2ban

View File

@ -0,0 +1,10 @@
- name: Install base packages
apt:
name: "{{ item }}"
state: present
with_items:
- htop
- psmisc
- bash-completion
- screen
- aptitude

View File

@ -0,0 +1,15 @@
- name: Enable locale languages
lineinfile:
dest: /etc/locale.gen
regexp: "{{item.regexp}}"
line: "{{item.line}}"
state: present
with_items:
- regexp: "^# es_ES.UTF-8 UTF-8"
line: "es_ES.UTF-8 UTF-8"
- regexp: "^# en_US.UTF-8 UTF-8"
line: "en_US.UTF-8 UTF-8"
- name: Generate locale
command: locale-gen
- name: Update locale
command: update-locale LANG=en_US.UTF-8

View File

@ -0,0 +1,16 @@
- import_tasks: install.yml
tags: install
- import_tasks: locale.yml
tags: locale
- import_tasks: tzdata.yml
tags: tzdata
- import_tasks: relayhost.yml
tags: relayhost
- import_tasks: motd.yml
tags: motd
- import_tasks: profile.yml
tags: profile
- import_tasks: vim.yml
tags: vim
- import_tasks: nrpe.yml
tags: nrpe

View File

@ -2,6 +2,6 @@
copy:
src: motd
dest: /etc/update-motd.d/90-vn
mode: u=rwx,g=rx,o=rx
mode: '755'
owner: root
group: root

View File

@ -1,8 +1,10 @@
- name: Install NRPE packages
apt:
name: "{{ nagios_packages }}"
name: "{{ item }}"
state: present
install_recommends: no
loop:
- nagios-nrpe-server
- nagios-plugins-contrib
- name: Set NRPE generic configuration
template:
src: nrpe.cfg

View File

@ -2,6 +2,6 @@
copy:
src: profile.sh
dest: /etc/profile.d/vn.sh
mode: u=rw,g=r,o=r
mode: '644'
owner: root
group: root

View File

@ -0,0 +1,48 @@
- name: Install exim packages
apt:
name: exim4
state: present
- name: Prepare exim configuration
lineinfile:
dest: /etc/exim4/update-exim4.conf.conf
regexp: "{{ item.regexp }}"
line: "{{ item.line }}"
state: present
mode: 0644
with_items:
- regexp: '^dc_eximconfig_configtype'
line: "dc_eximconfig_configtype='satellite'"
- regexp: '^dc_other_hostnames'
line: "dc_other_hostnames='{{ ansible_fqdn }}'"
- regexp: '^dc_local_interfaces'
line: "dc_local_interfaces='127.0.0.1'"
- regexp: '^dc_readhost'
line: "dc_readhost='{{ ansible_fqdn }}'"
- regexp: '^dc_relay_domains'
line: "dc_relay_domains=''"
- regexp: '^dc_minimaldns'
line: "dc_minimaldns='false'"
- regexp: '^dc_relay_nets'
line: "dc_relay_nets=''"
- regexp: '^dc_smarthost'
line: "dc_smarthost='{{ smtp_server }}'"
- regexp: '^CFILEMODE'
line: "CFILEMODE='644'"
- regexp: '^dc_use_split_config'
line: "dc_use_split_config='false'"
- regexp: '^dc_hide_mailname'
line: "dc_hide_mailname='true'"
- regexp: '^dc_mailname_in_oh'
line: "dc_mailname_in_oh='true'"
- regexp: '^dc_localdelivery'
line: "dc_localdelivery='mail_spool'"
notify: restart-exim
register: exim_config
- name: Update exim configuration
command: update-exim4.conf
when: exim_config.changed
- name: Sending mail to verify relay host configuration works
shell: >
echo "If you see this message, relayhost on {{ ansible_fqdn }} has been configured correctly." \
| mailx -s "Relayhost test for {{ ansible_fqdn }}" "{{ sysadmin_mail }}"
when: exim_config.changed

View File

@ -0,0 +1,9 @@
- name: Delete default user
user:
name: "{{ default_user }}"
state: absent
remove: yes
- name: Change root password
user:
name: root
password: "{{ root_password | password_hash('sha512') }}"

View File

@ -0,0 +1,21 @@
- name: Configure /etc/systemd/timesyncd.conf
lineinfile:
path: /etc/systemd/timesyncd.conf
regexp: '^#NTP'
line: "NTP={{ time_server }}"
owner: root
group: root
mode: '0644'
- name: Configure /etc/systemd/timesyncd.conf
lineinfile:
path: /etc/systemd/timesyncd.conf
regexp: '^#?FallbackNTP='
line: "FallbackNTP=ntp.roa.es"
owner: root
group: root
mode: '0644'
notify: restart systemd-timesyncd
- name: Service should start on boot
service:
name: systemd-timesyncd
enabled: yes

View File

@ -0,0 +1,2 @@
- name: Configure the time zone
script: set-timezone.sh

View File

@ -6,6 +6,6 @@
copy:
src: vimrc.local
dest: /etc/vim/
mode: u=rw,g=r,o=r
mode: '644'
owner: root
group: root

View File

@ -0,0 +1,12 @@
- name: Download vn-host Debian package
get_url:
url: "{{ vn_host.url }}/{{ vn_host.package }}"
dest: "/tmp/{{ vn_host.package }}"
mode: '0644'
- name: Install package
apt:
deb: "/tmp/{{ vn_host.package }}"
- name: Delete package
file:
path: "/tmp/{{ vn_host.package }}"
state: absent

View File

@ -1,10 +1,10 @@
Director {
Name = bacula-dir
Password = "{{ bacula_passwords.fdpasswd }}"
Password = "{{ FDPASSWD }}"
}
Director {
Name = bacula-mon
Password = "{{ bacula_passwords.fdmpasswd }}"
Password = "{{ FDMPASSWD }}"
Monitor = yes
}
FileDaemon {

View File

@ -14,9 +14,7 @@ action = %(action_)s
#+++++++++++++++ Jails
[sshd]
ignoreip = 127.0.0.1/8
enabled = true
port = 0:65535
filter = sshd
logpath = {{ fail2ban.logpath }}
action = %(action_mwl)s
logpath = %(sshd_log)s

View File

@ -1,5 +1,4 @@
allowed_hosts={{ nagios_server }}
server_address={{ ansible_default_ipv4.address }}
command[check_disk_root]=/usr/lib/nagios/plugins/check_disk -w 10% -c 5% -p /
command[check_disk_var]=/usr/lib/nagios/plugins/check_disk -w 10% -c 5% -p /var

View File

@ -0,0 +1,3 @@
vn_host:
url: http://apt.verdnatura.es/pool/main/v/vn-host
package: vn-host_2.0.2_all.deb

View File

@ -0,0 +1,6 @@
- name: restart-nslcd
service:
name: nslcd
state: restarted
- name: pam-update-ldap
shell: pam-auth-update --enable ldap

View File

@ -11,7 +11,7 @@
mode: '0640'
notify:
- restart-nslcd
register: nslcd
- pam-update-ldap
- name: Configure nsswitch to use NSLCD
lineinfile:
dest: /etc/nsswitch.conf

View File

@ -0,0 +1,4 @@
- import_tasks: auth.yml
tags: auth
- import_tasks: sudoers.yml
tags: sudoers

View File

@ -8,7 +8,7 @@ idle_timelimit 60
base {{ ldap_base }}
binddn cn=nss,ou=admins,{{ ldap_base }}
bindpw {{ lookup(passbolt, 'nslcd', folder_parent_id=passbolt_folder).password }}
bindpw {{ nslcd_password }}
pagesize 500
filter group (&(objectClass=posixGroup)(cn={{ sysadmin_group }}))

View File

@ -0,0 +1 @@
homes_path: /mnt/homes

Some files were not shown because too many files have changed in this diff Show More