Merge pull request 'Release 10.1.0' (#113) from unstable into stable
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/tag Build is passing Details

Reviewed-on: #113
This commit is contained in:
Jérémy Lecour 2020-08-21 14:51:29 +02:00
commit 8460938f35
97 changed files with 2211 additions and 528 deletions

View File

@ -20,6 +20,63 @@ The **patch** part changes incrementally at each release.
### Security
## [10.1.0] 2020-08-21
### Added
* certbot: detect HAProxy cert directory
* filebeat: allow using a template
* generate-ldif: add NVMe disk support
* haproxy: add deny_ips file to reject connections
* haproxy: add some comments to default config
* haproxy: enable stats frontend with access lists
* haproxy: preconfigure SSL with defaults
* lxc-php: Don't disable putenv() by default in PHP settings
* lxc-php: Install php-sqlite by default
* metricbeat: allow using a template
* mysql: activate binary logs by specifying log_bin path
* mysql: option to define as read only
* mysql: specify a custom server_id
* nagios-nrpe/evolinux-base: brand new check for hardware raid on HP servers gen 10
* nginx: make default vhost configurable
* packweb-apache: Install zip & unzip by default
* php: Don't disable putenv() by default in PHP settings
* php: Install php-sqlite by default
### Changed
* certbot: fix haproxy hook (ssl cert directory detection)
* certbot: install certbot dependencies non-interactively for jessie
* elasticsearch: configure cluster with seed hosts and initial masters
* elasticsearch: set tmpdir before datadir
* evoacme: read values from environment before defaults file
* evoacme: update for new certbot role
* evoacme: upstream release 20.08
* haproxy: adapt backports installed package list to distibution
* haproxy: chroot and socket path are configurable
* haproxy: deport SSL tuning to Mozilla SSL generator
* haproxy: rotate logs with date extension and immediate compression
* haproxy: split stats variables
* lxc-php: Do --no-install-recommends for ssmtp/opensmtpd
* mongodb: install custom munin plugins
* nginx: read server-status values before changing the config
* packweb-apache: Don't turn on mod-evasive emails by default
* redis: create sudoers file if missing
* redis: new syntax for match filter
* redis: raise an error is port 6379 is used in "instance" mode
### Fixed
* certbot: restore compatibility with old Nginx
* evobackup-client: fixed the ssh connection test
* generate-ldif: better detection of computerOS field
* generate-ldif: skip some odd ethernet devices
* lxc-php: Install opensmtpd as intended
* mongodb: fix logrotate patterm on Debian buster
* nagios-nrpe: check_amavis: updated regex
* squid: better regex to match sa-update domains
* varnish: fix start command when multiple addresses are present
## [10.0.0] - 2020-05-13
### Added
@ -45,8 +102,6 @@ The **patch** part changes incrementally at each release.
* minifirewall: add a variable to force the check scripts update
* mongodb: mongodb: compatibility with Debian 10
* mysql-oracle: backport tasks from mysql role
* mysql: activate binary logs by specifying log_bin path
* mysql: specify a custom server_id
* networkd-to-ifconfig: add variables for configuration by variables
* packweb-apache: Deploy opcache.php to give some insights on PHP's opcache status
* php: variable to install the mysqlnd module instead of the default mysql module

View File

@ -16,7 +16,7 @@ found_renewed_lineage() {
test -f "${RENEWED_LINEAGE}/fullchain.pem" && test -f "${RENEWED_LINEAGE}/privkey.pem"
}
config_check() {
${haproxy_bin} -c -f /etc/haproxy/haproxy.cfg > /dev/null 2>&1
${haproxy_bin} -c -f "${haproxy_config_file}" > /dev/null 2>&1
}
concat_files() {
# shellcheck disable=SC2174
@ -34,6 +34,22 @@ cert_and_key_mismatch() {
test "${haproxy_cert_md5}" != "${haproxy_key_md5}"
}
detect_haproxy_cert_dir() {
# get last field or line wich defines the crt directory
config_cert_dir=$(grep -r -o -E -h '^\s*bind .* crt /etc/\S+' "${haproxy_config_file}" | head -1 | awk '{ print $(NF)}')
if [ -n "${config_cert_dir}" ]; then
debug "Cert directory is configured with ${config_cert_dir}"
echo "${config_cert_dir}"
elif [ -d "/etc/haproxy/ssl" ]; then
debug "No configured cert directory found, but /etc/haproxy/ssl exists"
echo "/etc/haproxy/ssl"
elif [ -d "/etc/ssl/haproxy" ]; then
debug "No configured cert directory found, but /etc/ssl/haproxy exists"
echo "/etc/ssl/haproxy"
else
error "Cert directory not found."
fi
}
main() {
if [ -z "${RENEWED_LINEAGE}" ]; then
error "This script must be called only by certbot!"
@ -70,6 +86,7 @@ readonly VERBOSE=${VERBOSE:-"0"}
readonly QUIET=${QUIET:-"0"}
readonly haproxy_bin=$(command -v haproxy)
readonly haproxy_cert_dir="/etc/ssl/haproxy"
readonly haproxy_config_file="/etc/haproxy/haproxy.cfg"
readonly haproxy_cert_dir=$(detect_haproxy_cert_dir)
main

View File

@ -20,4 +20,4 @@
daemon_reload: yes
- name: install certbot-auto
command: /usr/local/bin/certbot --install-only
command: /usr/local/bin/certbot --noninteractive --install-only

View File

@ -1,5 +1,9 @@
location ~ /.well-known/acme-challenge {
{% if ansible_distribution == "Debian" and ansible_distribution_major_version is version('8', '<=') %}
alias {{ certbot_work_dir }}/.well-known/acme-challenge;
{% else %}
alias {{ certbot_work_dir }}/;
{% endif %}
try_files $uri =404;
allow all;
}

View File

@ -5,9 +5,12 @@ elasticsearch_cluster_name: Null
elasticsearch_cluster_members: Null
elasticsearch_minimum_master_nodes: Null
elasticsearch_node_name: "${HOSTNAME}"
elasticsearch_network_host: "[_local_]"
elasticsearch_network_host:
- "_local_"
elasticsearch_network_publish_host: Null
elasticsearch_http_publish_host: Null
elasticsearch_discovery_seed_hosts: Null
elasticsearch_cluster_initial_master_nodes: Null
elasticsearch_custom_datadir: Null
elasticsearch_custom_tmpdir: Null
elasticsearch_default_tmpdir: /var/lib/elasticsearch/tmp

View File

@ -14,6 +14,7 @@ galaxy_info:
versions:
- jessie
- stretch
- buster
galaxy_tags: []
# List tags for your role here, one per line. A tag is

View File

@ -22,7 +22,7 @@
- name: Configure network host
lineinfile:
dest: /etc/elasticsearch/elasticsearch.yml
line: "network.host: {{ elasticsearch_network_host }}"
line: "network.host: {{ elasticsearch_network_host }}"
regexp: "^network.host:"
insertafter: "^# *network.host:"
when: elasticsearch_network_host|default("", True)
@ -32,7 +32,7 @@
- name: Configure network publish_host
lineinfile:
dest: /etc/elasticsearch/elasticsearch.yml
line: "network.publish_host: {{ elasticsearch_network_publish_host }}"
line: "network.publish_host: {{ elasticsearch_network_publish_host }}"
regexp: "^network.publish_host:"
insertafter: "^network.host:"
when: elasticsearch_network_publish_host|default("", True)
@ -42,13 +42,31 @@
- name: Configure http publish_host
lineinfile:
dest: /etc/elasticsearch/elasticsearch.yml
line: "http.publish_host: {{ elasticsearch_http_publish_host }}"
line: "http.publish_host: {{ elasticsearch_http_publish_host }}"
regexp: "^http.publish_host:"
insertafter: "^http.port:"
when: elasticsearch_http_publish_host|default("", True)
tags:
- config
- name: Configure discovery seed hosts
lineinfile:
dest: /etc/elasticsearch/elasticsearch.yml
line: "discovery.seed_hosts: {{ elasticsearch_discovery_seed_hosts | to_yaml }}"
regexp: "^discovery.seed_hosts:"
when: elasticsearch_discovery_seed_hosts
tags:
- config
- name: Configure initial master nodes
lineinfile:
dest: /etc/elasticsearch/elasticsearch.yml
line: "cluster.initial_master_nodes: {{ elasticsearch_cluster_initial_master_nodes | to_yaml }}"
regexp: "^cluster.initial_master_nodes:"
when: elasticsearch_cluster_initial_master_nodes
tags:
- config
- name: Configure RESTART_ON_UPGRADE
lineinfile:
dest: /etc/default/elasticsearch
@ -93,5 +111,3 @@
when: elasticsearch_minimum_master_nodes|default("", True)
tags:
- config

View File

@ -6,10 +6,10 @@
- include: bootstrap_checks.yml
- include: datadir.yml
- include: tmpdir.yml
- include: datadir.yml
- include: logs.yml
- include: additional_scripts.yml

View File

@ -5,7 +5,7 @@ evoacme_dhparam_size: 2048
evoacme_acme_dir: /var/lib/letsencrypt
evoacme_csr_dir: /etc/ssl/requests
evoacme_crt_dir: /etc/letsencrypt
evoacme_hooks_dir: "{{ evoacme_crt_dir }}/hooks"
evoacme_hooks_dir: "{{ evoacme_crt_dir }}/renewal-hooks/deploy"
evoacme_log_dir: /var/log/evoacme
evoacme_ssl_minday: 30
evoacme_ssl_ct: 'FR'

View File

@ -118,21 +118,21 @@ main() {
[ "$1" = "-V" ] || [ "$1" = "--version" ] && show_version && exit 0
mkdir -p "${ACME_DIR}"
chown acme: "${ACME_DIR}"
chown root: "${ACME_DIR}"
[ -w "${ACME_DIR}" ] || error "Directory ${ACME_DIR} is not writable"
[ -d "${CSR_DIR}" ] || error "Directory ${CSR_DIR} is not found"
mkdir -p "${CRT_DIR}"
chown acme: "${CRT_DIR}"
chown root: "${CRT_DIR}"
[ -w "${CRT_DIR}" ] || error "Directory ${CRT_DIR} is not writable"
mkdir -p "${LOG_DIR}"
chown acme: "${LOG_DIR}"
chown root: "${LOG_DIR}"
[ -w "${LOG_DIR}" ] || error "Directory ${LOG_DIR} is not writable"
mkdir -p "${HOOKS_DIR}"
chown acme: "${HOOKS_DIR}"
chown root: "${HOOKS_DIR}"
[ -d "${HOOKS_DIR}" ] || error "Directory ${HOOKS_DIR} is not found"
readonly VHOST=$(basename "$1" .conf)
@ -195,7 +195,7 @@ main() {
[ -d "${NEW_DIR}" ] && error "${NEW_DIR} directory already exists, remove it manually."
mkdir -p "${NEW_DIR}"
chown -R acme: "${CRT_DIR}"
chown -R root: "${CRT_DIR}"
chmod -R 0700 "${CRT_DIR}"
chmod -R g+rX "${CRT_DIR}"
debug "New cert will be created in ${NEW_DIR}"
@ -218,15 +218,14 @@ main() {
CERTBOT_REGISTRATION="${CERTBOT_REGISTRATION} --register-unsafely-without-email"
fi
# Permissions checks for acme user
sudo -u acme test -r "${CSR_FILE}" || error "File ${CSR_FILE} is not readable by user 'acme'"
sudo -u acme test -w "${NEW_DIR}" || error "Directory ${NEW_DIR} is not writable by user 'acme'"
# Permissions checks
test -r "${CSR_FILE}" || error "File ${CSR_FILE} is not readable"
test -w "${NEW_DIR}" || error "Directory ${NEW_DIR} is not writable"
# create a certificate with certbot
# we disable the set -e during the certbot call
set +e
sudo -u acme \
"${CERTBOT_BIN}" \
"${CERTBOT_BIN}" \
certonly \
${CERTBOT_MODE} \
${CERTBOT_REGISTRATION} \
@ -286,7 +285,7 @@ main() {
export EVOACME_FULLCHAIN="${LIVE_FULLCHAIN}"
# search for files in hooks directory
for hook in $(find ${HOOKS_DIR} -type f); do
for hook in $(find ${HOOKS_DIR} -type f -executable | sort); do
# keep only executables files, not containing a "."
if [ -x "${hook}" ] && (basename "${hook}" | grep -vqF "."); then
debug "Executing ${hook}"
@ -304,7 +303,7 @@ readonly QUIET=${QUIET:-"0"}
readonly TEST=${TEST:-"0"}
readonly DRY_RUN=${DRY_RUN:-"0"}
readonly VERSION="19.11"
readonly VERSION="20.08"
# Read configuration file, if it exists
[ -r /etc/default/evoacme ] && . /etc/default/evoacme
@ -315,7 +314,7 @@ readonly ACME_DIR=${ACME_DIR:-"/var/lib/letsencrypt"}
readonly CSR_DIR=${CSR_DIR:-"/etc/ssl/requests"}
readonly CRT_DIR=${CRT_DIR:-"/etc/letsencrypt"}
readonly LOG_DIR=${LOG_DIR:-"/var/log/evoacme"}
readonly HOOKS_DIR=${HOOKS_DIR:-"${CRT_DIR}/hooks"}
readonly HOOKS_DIR=${HOOKS_DIR:-"${CRT_DIR}/renewal-hooks/deploy"}
readonly SSL_MINDAY=${SSL_MINDAY:-"30"}
readonly SSL_EMAIL=${SSL_EMAIL:-""}

View File

@ -1,18 +0,0 @@
#!/bin/sh
git_bin=$(command -v git)
letsencrypt_dir=/etc/letsencrypt
export GIT_DIR="/etc/.git"
export GIT_WORK_TREE="/etc"
if test -x "${git_bin}" && test -d "${GIT_DIR}" && test -d "${GIT_WORK_TREE}"; then
changed_lines=$(${git_bin} status --porcelain -- ${letsencrypt_dir} | wc -l | tr -d ' ')
if [ "${changed_lines}" != "0" ]; then
${git_bin} add --all ${letsencrypt_dir}
message="[letsencrypt] certificates renewal (${RENEWED_DOMAINS})"
${git_bin} commit --message "${message}" --quiet
else
echo "Weird, nothing has changed but the hook has been executed for '${RENEWED_DOMAINS}'"
fi
fi

View File

@ -1,30 +0,0 @@
#!/bin/sh
readonly PROGNAME=$(basename "$0")
# shellcheck disable=SC2124,SC2034
readonly ARGS=$@
readonly VERBOSE=${VERBOSE:-"0"}
readonly QUIET=${QUIET:-"0"}
error() {
>&2 echo "${PROGNAME}: $1"
exit 1
}
debug() {
if [ "${VERBOSE}" = "1" ] && [ "${QUIET}" != "1" ]; then
>&2 echo "${PROGNAME}: $1"
fi
}
if [ -n "$(pidof apache2)" ]; then
# shellcheck disable=SC2091
if $($(command -v apache2ctl) -t 2> /dev/null); then
debug "Apache detected... reloading"
service apache2 reload
else
error " Apache config is broken, you must fix it !"
fi
else
debug "Apache is not running. Skip."
fi

View File

@ -1,35 +0,0 @@
#!/bin/sh
readonly PROGNAME=$(basename "$0")
# shellcheck disable=SC2124,SC2034
readonly ARGS=$@
readonly VERBOSE=${VERBOSE:-"0"}
readonly QUIET=${QUIET:-"0"}
error() {
>&2 echo "${PROGNAME}: $1"
exit 1
}
debug() {
if [ "${VERBOSE}" = "1" ] && [ "${QUIET}" != "1" ]; then
>&2 echo "${PROGNAME}: $1"
fi
}
if [ -n "$(pidof dovecot)" ]; then
# shellcheck disable=SC2091
if $($(command -v doveconf) > /dev/null); then
# shellcheck disable=SC2091
if $($(command -v doveconf)|grep -E "^ssl_cert[^_]"|grep -q "letsencrypt"); then
debug "Dovecot detected... reloading"
service dovecot reload
else
debug "Dovecot doesn't use Let's Encrypt certificate. Skip."
fi
else
error "Dovecot config is broken, you must fix it !"
fi
else
debug "Dovecot is not running. Skip."
fi

View File

@ -1,30 +0,0 @@
#!/bin/sh
readonly PROGNAME=$(basename "$0")
# shellcheck disable=SC2124,SC2034
readonly ARGS=$@
readonly VERBOSE=${VERBOSE:-"0"}
readonly QUIET=${QUIET:-"0"}
error() {
>&2 echo "${PROGNAME}: $1"
exit 1
}
debug() {
if [ "${VERBOSE}" = "1" ] && [ "${QUIET}" != "1" ]; then
>&2 echo "${PROGNAME}: $1"
fi
}
if [ -n "$(pidof nginx)" ]; then
# shellcheck disable=SC2091
if $($(command -v nginx) -t 2> /dev/null); then
debug "Nginx detected... reloading"
service nginx reload
else
error "Nginx config is broken, you must fix it !"
fi
else
debug "Nginx is not running. Skip."
fi

View File

@ -1,35 +0,0 @@
#!/bin/sh
readonly PROGNAME=$(basename "$0")
# shellcheck disable=SC2124,SC2034
readonly ARGS=$@
readonly VERBOSE=${VERBOSE:-"0"}
readonly QUIET=${QUIET:-"0"}
error() {
>&2 echo "${PROGNAME}: $1"
exit 1
}
debug() {
if [ "${VERBOSE}" = "1" ] && [ "${QUIET}" != "1" ]; then
>&2 echo "${PROGNAME}: $1"
fi
}
if [ -n "$(pidof master)" ]; then
# shellcheck disable=SC2091
if $($(command -v postconf) > /dev/null); then
# shellcheck disable=SC2091
if $($(command -v postconf)|grep -E "^smtpd_tls_cert_file"|grep -q "letsencrypt"); then
debug "Postfix detected... reloading"
service postfix reload
else
debug "Postfix doesn't use Let's Encrypt certificate. Skip."
fi
else
error "Postfix config is broken, you must fix it !"
fi
else
debug "Postfix is not running. Skip."
fi

View File

@ -204,8 +204,8 @@ main() {
[ "$1" = "-V" ] || [ "$1" = "--version" ] && show_version && exit 0
if [ -t 0 ]; then
# We have STDIN, so we should have 2 arguments
[ "$#" -eq 2 ] || error "invalid argument(s)"
# We have STDIN, so we should at least 2 arguments
[ "$#" -ge 2 ] || error "invalid argument(s)"
# read VHOST from first argument
VHOST="$1"
@ -265,7 +265,7 @@ readonly ARGS=$@
readonly VERBOSE=${VERBOSE:-"0"}
readonly QUIET=${QUIET:-"0"}
readonly VERSION="19.11"
readonly VERSION="20.08"
# Read configuration file, if it exists
[ -r /etc/default/evoacme ] && . /etc/default/evoacme

View File

@ -170,7 +170,7 @@ readonly ARGS=$@
readonly VERBOSE=${VERBOSE:-"0"}
readonly QUIET=${QUIET:-"0"}
readonly VERSION="19.11"
readonly VERSION="20.08"
readonly SRV_IP=${SRV_IP:-""}

View File

@ -1,61 +0,0 @@
---
- name: Create acme group
group:
name: acme
state: present
- name: Create acme user
user:
name: acme
group: acme
state: present
createhome: no
home: "{{ evoacme_acme_dir }}"
shell: /bin/false
system: yes
- name: Fix crt dir's right
file:
path: "{{ evoacme_crt_dir }}"
mode: "0755"
owner: acme
group: acme
state: directory
- name: "Fix hooks directory permissions"
file:
path: "{{ evoacme_hooks_dir }}"
mode: "0700"
owner: acme
group: acme
state: directory
- name: Fix log dir's right
file:
path: "{{ evoacme_log_dir }}"
mode: "0755"
owner: acme
group: acme
state: directory
- name: Fix challenge dir's right
file:
path: "{{ evoacme_acme_dir }}"
mode: "0755"
owner: acme
group: acme
state: directory
- name: Is /etc/aliases present?
stat:
path: /etc/aliases
register: etc_aliases
- name: Set acme aliases
lineinfile:
state: present
dest: /etc/aliases
line: 'acme: root'
regexp: 'acme:'
when: etc_aliases.stat.exists
notify: "newaliases"

View File

@ -1,25 +0,0 @@
- name: Create conf dirs
file:
path: "/etc/apache2/{{ item }}"
state: directory
with_items:
- 'conf-available'
- 'conf-enabled'
- name: Copy acme challenge conf
template:
src: templates/apache.conf.j2
dest: /etc/apache2/conf-available/letsencrypt.conf
owner: root
group: root
mode: "0644"
notify: reload apache2
- name: Enable acme challenge conf
file:
src: /etc/apache2/conf-available/letsencrypt.conf
dest: /etc/apache2/conf-enabled/letsencrypt.conf
state: link
owner: root
group: root
notify: reload apache2

View File

@ -1,45 +1,20 @@
---
- name: Use backports for jessie
block:
- name: install jessie-backports
include_role:
name: evolix/apt
tasks_from: backports.yml
- name: Add exceptions for certbot dependencies
copy:
src: backports-certbot
dest: /etc/apt/preferences.d/z-backports-certbot
notify: apt update
- meta: flush_handlers
when: ansible_distribution_release == "jessie"
- name: Install certbot with apt
apt:
name: certbot
state: latest
- include_role:
name: evolix/certbot
- include_role:
name: evolix/remount-usr
- name: Remove certbot symlink for apt install
file:
path: /usr/local/bin/certbot
state: absent
- name: Disable /etc/cron.d/certbot
command: mv /etc/cron.d/certbot /etc/cron.d/certbot.disabled
command: mv -f /etc/cron.d/certbot /etc/cron.d/certbot.disabled
args:
removes: /etc/cron.d/certbot
creates: /etc/cron.d/certbot.disabled
- name: Disable /etc/cron.daily/certbot
command: mv /etc/cron.daily/certbot /etc/cron.daily/certbot.disabled
command: mv -f /etc/cron.daily/certbot /etc/cron.daily/certbot.disabled
args:
removes: /etc/cron.daily/certbot
creates: /etc/cron.daily/certbot.disabled
- name: Install evoacme custom cron
copy:

View File

@ -1,5 +1,10 @@
---
- name: "Create {{ hook_name }} hook directory"
file:
dest: "{{ evoacme_hooks_dir }}"
state: directory
- name: "Search for {{ hook_name }} hook"
command: "find {{ evoacme_hooks_dir }} -type f \\( -name '{{ hook_name }}' -o -name '{{ hook_name }}.*' \\)"
check_mode: no

View File

@ -7,36 +7,14 @@
- include: certbot.yml
- include: acme.yml
- include: permissions.yml
- include: evoacme_hook.yml
vars:
hook_name: "{{ item }}"
with_items:
- reload_apache
- reload_nginx
- reload_dovecot
- reload_postfix
- commit
# Enable this task if you want to deploy hooks
# - include: evoacme_hook.yml
# vars:
# hook_name: "{{ item }}"
# loop: []
- include: conf.yml
- include: scripts.yml
- name: Determine Apache presence
stat:
path: /etc/apache2/apache2.conf
check_mode: no
register: sta
- name: Determine Nginx presence
stat:
path: /etc/nginx/nginx.conf
check_mode: no
register: stn
- include: apache.yml
when: sta.stat.isreg is defined and sta.stat.isreg
- include: nginx.yml
when: stn.stat.isreg is defined and stn.stat.isreg

View File

@ -1,35 +0,0 @@
---
- name: move acme challenge conf if missplaced
command: mv /etc/nginx/letsencrypt.conf /etc/nginx/snippets/letsencrypt.conf
args:
removes: /etc/nginx/letsencrypt.conf
creates: /etc/nginx/snippets/letsencrypt.conf
- name: Copy acme challenge conf
template:
src: templates/nginx.conf.j2
dest: /etc/nginx/snippets/letsencrypt.conf
owner: root
group: root
mode: "0644"
- name: look for old path
command: grep -r /etc/nginx/letsencrypt.conf /etc/nginx
changed_when: False
failed_when: False
check_mode: no
register: grep_letsencrypt_old_path
- name: Keep a symlink for vhosts with old path
file:
src: /etc/nginx/snippets/letsencrypt.conf
dest: /etc/nginx/letsencrypt.conf
state: link
when: grep_letsencrypt_old_path.rc == 0
- name: Remove symlink if no vhost with old path
file:
dest: /etc/nginx/letsencrypt.conf
state: absent
when: grep_letsencrypt_old_path.rc == 1

View File

@ -0,0 +1,33 @@
---
- name: Fix crt directory permissions
file:
path: "{{ evoacme_crt_dir }}"
mode: "0755"
owner: root
group: root
state: directory
- name: "Fix hooks directory permissions"
file:
path: "{{ evoacme_hooks_dir }}"
mode: "0700"
owner: root
group: root
state: directory
- name: Fix log directory permissions
file:
path: "{{ evoacme_log_dir }}"
mode: "0755"
owner: root
group: root
state: directory
- name: Fix challenge directory permissions
file:
path: "{{ evoacme_acme_dir }}"
mode: "0755"
owner: root
group: root
state: directory

View File

@ -1,8 +1,9 @@
### File generated by Ansible ###
SSL_KEY_DIR={{ evoacme_ssl_key_dir }}
ACME_DIR={{ evoacme_acme_dir }}
CSR_DIR={{ evoacme_csr_dir }}
CRT_DIR={{ evoacme_crt_dir }}
LOG_DIR={{ evoacme_log_dir }}
SSL_MINDAY={{ evoacme_ssl_minday }}
SSL_KEY_DIR=${SSL_KEY_DIR:-{{ evoacme_ssl_key_dir }}}
ACME_DIR=${ACME_DIR:-{{ evoacme_acme_dir }}}
CSR_DIR=${CSR_DIR:-{{ evoacme_csr_dir }}}
CRT_DIR=${CRT_DIR:-{{ evoacme_crt_dir }}}
HOOKS_DIR=${HOOKS_DIR:-"{{ evoacme_hooks_dir }}"}
LOG_DIR=${LOG_DIR:-{{ evoacme_log_dir }}}
SSL_MINDAY=${SSL_MINDAY:-{{ evoacme_ssl_minday }}}

View File

@ -1,5 +1,5 @@
#!/bin/sh
# Careful, the zzz_evobackup template was last updated on 2020/04/15
# Careful, the zzz_evobackup template was last updated on 2020/06/08
#
# Script Evobackup client
# See https://gitea.evolix.org/evolix/evobackup
@ -76,7 +76,7 @@ test_server() {
port=$(echo "${item}" | cut -d':' -f2)
# Test if the server is accepting connections
ssh -q -o "ConnectTimeout ${SSH_CONNECT_TIMEOUT}" -i /root/.ssh/evobackup_id "${host}" -p "${port}" -t "exit"
ssh -q -o "ConnectTimeout ${SSH_CONNECT_TIMEOUT}" -i {{ evobackup_client__root_key_path }} "${host}" -p "${port}" -t "exit"
# shellcheck disable=SC2181
if [ $? = 0 ]; then
# SSH connection is OK

View File

@ -214,3 +214,6 @@ evolinux_listupgrade_include: True
# Generate ldif
evolinux_generateldif_include: True
# Cron check_hpraid
evolinux_cron_checkhpraid_frequency: daily

View File

@ -0,0 +1,91 @@
#!/usr/bin/env bash
set -euo pipefail
# This script is meant to be executed as a cron by executing Nagios
# NRPE plugin check_hpraid and notify by mail any errors
TMPDIR=/tmp
md5sum=$(command -v md5sum)
awk=$(command -v awk)
check_hpraid="/usr/local/lib/nagios/plugins/check_hpraid -v -p"
check_hpraid_output=$(mktemp -p $TMPDIR check_hpraid_XXX)
check_hpraid_last="$TMPDIR/check_hpraid_last"
# set to false to use cron output (MAILTO)
# otherwise send output with mail command
use_mail=true
body=$(mktemp --tmpdir=/tmp check_hpraid_XXX)
clientmail=$(grep EVOMAINTMAIL /etc/evomaintenance.cf | cut -d'=' -f2)
hostname=$(grep HOSTNAME /etc/evomaintenance.cf | cut -d'=' -f2)
hostname=${hostname%%.evolix.net}
# If hostname is composed with -, remove the first part.
if [[ $hostname =~ "-" ]]; then
hostname=$(echo "$hostname" | cut -d'-' -f2-)
fi
trap trapFunc EXIT ERR
testDeps() {
test -x "$md5sum" || (echo "md5sum binary not found"; exit 1)
test -x "$awk" || (echo "awk binary not found"; exit 1)
}
main() {
if ! $check_hpraid > "$check_hpraid_output"; then
error=true
else
error=false
fi
# If check_hpraid returned error, display output, save status and
# exit
if $error; then
cp "$check_hpraid_output" "$check_hpraid_last"
if $use_mail; then
mail -s "RAID error on $hostname" "$clientmail" \
< "$check_hpraid_output"
else
cat "$check_hpraid_output"
fi
exit 1
fi
if [ ! -f $check_hpraid_last ]; then
cp "$check_hpraid_output" $check_hpraid_last
fi
# If output and last check is different, display differences and
# exit
md5_now=$(md5sum "$check_hpraid_output" | awk '{print $1}')
md5_last=$(md5sum $check_hpraid_last | awk '{print $1}')
if [[ "$md5_now" != "$md5_last" ]]; then
cat << EOT > "$body"
Different RAID state detected.
Was:
$(sed 's/^/> /g' "$check_hpraid_last")
###########################
Is now:
$(sed 's/^/> /g' "$check_hpraid_output")
EOT
if $use_mail; then
mail -s "RAID status is different on $hostname" \
"$clientmail" < "$body"
else
cat "$body"
fi
cp "$check_hpraid_output" "$check_hpraid_last"
exit 1
fi
}
trapFunc() {
rm "$check_hpraid_output" "$body"
}
testDeps
main

View File

@ -25,15 +25,17 @@
when: broadcom_netextreme_search.rc == 0
## RAID
# Dell and others: MegaRAID SAS
# HP gen <10: Hewlett-Packard Company Smart Array
# HP gen >=10: Adaptec Smart Storage PQI
- name: Detect if RAID is installed
shell: lspci | grep "RAID bus controller" | grep -v Intel
shell: lspci -q | grep -e "RAID bus controller" -e "Serial Attached SCSI controller"
check_mode: no
register: raidmodel
changed_when: "'FAILED' in raidmodel.stdout"
failed_when: "'FAILED' in raidmodel.stdout"
- name: HP Smart Array package is present
- name: HPE Smart Storage Administrator (ssacli) is present
block:
- name: Add HPE GPG key
apt_key:
@ -44,28 +46,45 @@
apt_repository:
repo: 'deb https://downloads.linux.hpe.com/SDR/repo/mcp {{ ansible_distribution_release }}/current non-free'
state: present
- name: Install packages for HP hardware
- name: Install HPE Smart Storage Administrator (ssacli)
apt:
name:
- cciss-vol-status
- ssacli
name: ssacli
when:
- "'Hewlett-Packard Company Smart Array' in raidmodel.stdout"
- "'Adaptec Smart Storage PQI' in raidmodel.stdout"
# NOTE: check_hpraid cron use check_hpraid from nagios-nrpe role
# So, if nagios-nrpe role is not installed it will not work
- name: Install and configure check_hpraid cron (HP gen >=10)
block:
- name: check_hpraid cron is present (HP gen >=10)
copy:
src: check_hpraid.cron.sh
dest: /etc/cron.{{ evolinux_cron_checkhpraid_frequency | mandatory }}/check_hpraid
mode: "0755"
when: "'Adaptec Smart Storage PQI' in raidmodel.stdout"
- name: Install and configure cciss-vol-status (HP gen <10)
block:
- name: Install cciss-vol-status (HP gen <10)
apt:
name: cciss-vol-status
state: present
- name: cciss-vol-statusd init script is present
- name: cciss-vol-statusd init script is present (HP gen <10)
template:
src: hardware/cciss-vol-statusd.j2
dest: /etc/init.d/cciss-vol-statusd
mode: "0755"
- name: Configure cciss-vol-statusd
- name: Configure cciss-vol-statusd (HP gen <10)
lineinfile:
dest: /etc/default/cciss-vol-statusd
line: 'MAILTO="{{ raid_alert_email or general_alert_email | mandatory }}"'
regexp: 'MAILTO='
create: yes
- name: Enable HP hardware in systemd
- name: Enable cciss-vol-status in systemd (HP gen <10)
service:
name: cciss-vol-statusd
enabled: true

View File

@ -1,4 +1,8 @@
- debug:
msg: "Online DNS servers fails sometimes! Please change them in /etc/resolv.conf."
- name: custom NTP server for Online servers
set_fact:
nagios_nrpe_default_ntp_server: "ntp.online.net"
# - meta: flush_handlers

View File

@ -4,3 +4,14 @@ elastic_stack_version: "6.x"
filebeat_logstash_plugin: False
filebeat_processors_cloud_metadata: False
filebeat_elasticsearch_hosts:
- "localhost:9200"
filebeat_elasticsearch_protocol: "http"
filebeat_elasticsearch_auth_api_key: ""
filebeat_elasticsearch_auth_username: ""
filebeat_elasticsearch_auth_password: ""
filebeat_use_config_template: False
filebeat_update_config: True
filebeat_force_config: True

View File

@ -66,18 +66,79 @@
- logstash_plugin.stat.exists
- not logstash_plugin_installed | success
- name: cloud_metadata processor is disabled
replace:
dest: /etc/filebeat/filebeat.yml
regexp: '^(\s+)(- add_cloud_metadata:)'
replace: '\1# \2'
notify: restart filebeat
when: not filebeat_processors_cloud_metadata
# When we don't use a config template (default)
- block:
- name: cloud_metadata processor is disabled
replace:
dest: /etc/filebeat/filebeat.yml
regexp: '^(\s+)(- add_cloud_metadata:)'
replace: '\1# \2'
notify: restart filebeat
when: not filebeat_processors_cloud_metadata
- name: cloud_metadata processor is disabled
- name: cloud_metadata processor is disabled
lineinfile:
dest: /etc/filebeat/filebeat.yml
line: " - add_cloud_metadata: ~"
insert_after: '^processors:'
notify: restart filebeat
when: filebeat_processors_cloud_metadata
- name: Filebeat knows where to find Elasticsearch
lineinfile:
dest: /etc/filebeat/filebeat.yml
regexp: '^ hosts: .*'
line: " hosts: [\"{{ filebeat_elasticsearch_hosts | join('\", \"') }}\"]"
insertafter: "output.elasticsearch:"
notify: restart filebeat
when:
- filebeat_elasticsearch_hosts
- name: Filebeat protocol for Elasticsearch
lineinfile:
dest: /etc/filebeat/filebeat.yml
regexp: '^ #?protocol: .*'
line: " protocol: \"{{ filebeat_elasticsearch_protocol }}\""
insertafter: "output.elasticsearch:"
notify: restart filebeat
when: filebeat_elasticsearch_protocol == "http" or filebeat_elasticsearch_protocol == "https"
- name: Filebeat auth/username for Elasticsearch are configured
lineinfile:
dest: /etc/filebeat/filebeat.yml
regexp: '{{ item.regexp }}'
line: '{{ item.line }}'
insertafter: "output.elasticsearch:"
with_items:
- { regexp: '^ #?username: .*', line: ' username: "{{ filebeat_elasticsearch_auth_username }}"' }
- { regexp: '^ #?password: .*', line: ' password: "{{ filebeat_elasticsearch_auth_password }}"' }
notify: restart filebeat
when:
- filebeat_elasticsearch_auth_username
- filebeat_elasticsearch_auth_password
when: not filebeat_use_config_template
- name: Filebeat api_key for Elasticsearch are configured
lineinfile:
dest: /etc/filebeat/filebeat.yml
line: " - add_cloud_metadata: ~"
insert_after: '^processors:'
regexp: '^ #?api_key: .*'
line: ' api_key: "{{ filebeat_elasticsearch_auth_api_key }}"'
insertafter: "output.elasticsearch:"
notify: restart filebeat
when: filebeat_processors_cloud_metadata
when: filebeat_elasticsearch_auth_api_key
# When we use a config template
- block:
- name: Configuration is up-to-date
template:
src: "{{ item }}"
dest: /etc/filebeat/filebeat.yml
force: "{{ filebeat_force_config }}"
with_first_found:
- "templates/filebeat/filebeat.{{ inventory_hostname }}.yml.j2"
- "templates/filebeat/filebeat.{{ host_group }}.yml.j2"
- "templates/filebeat/filebeat.default.yml.j2"
- "filebeat.default.yml.j2"
notify: restart filebeat
when: filebeat_update_config
when: filebeat_use_config_template

View File

@ -0,0 +1,247 @@
###################### Filebeat Configuration Example #########################
# This file is an example configuration file highlighting only the most common
# options. The filebeat.reference.yml file from the same directory contains all the
# supported options with more comments. You can use it as a reference.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/filebeat/index.html
# For more available modules and options, please see the filebeat.reference.yml sample
# configuration file.
# ============================== Filebeat inputs ===============================
filebeat.inputs:
# Each - is an input. Most options can be set at the input level, so
# you can use different inputs for various configurations.
# Below are the input specific configurations.
- type: log
# Change to true to enable this input configuration.
enabled: false
# Paths that should be crawled and fetched. Glob based paths.
paths:
- /var/log/*.log
#- c:\programdata\elasticsearch\logs\*
# Exclude lines. A list of regular expressions to match. It drops the lines that are
# matching any regular expression from the list.
#exclude_lines: ['^DBG']
# Include lines. A list of regular expressions to match. It exports the lines that are
# matching any regular expression from the list.
#include_lines: ['^ERR', '^WARN']
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
# are matching any regular expression from the list. By default, no files are dropped.
#exclude_files: ['.gz$']
# Optional additional fields. These fields can be freely picked
# to add additional information to the crawled log files for filtering
#fields:
# level: debug
# review: 1
### Multiline options
# Multiline can be used for log messages spanning multiple lines. This is common
# for Java Stack Traces or C-Line Continuation
# The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
#multiline.pattern: ^\[
# Defines if the pattern set under pattern should be negated or not. Default is false.
#multiline.negate: false
# Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
# that was (not) matched before or after or as long as a pattern is not matched based on negate.
# Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
#multiline.match: after
# ============================== Filebeat modules ==============================
filebeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: false
# Period on which files under path should be checked for changes
#reload.period: 10s
# ======================= Elasticsearch template setting =======================
setup.template.settings:
index.number_of_shards: 1
#index.codec: best_compression
#_source.enabled: false
# ================================== General ===================================
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
#name:
# The tags of the shipper are included in their own field with each
# transaction published.
#tags: ["service-X", "web-tier"]
# Optional fields that you can specify to add additional information to the
# output.
#fields:
# env: staging
# ================================= Dashboards =================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards is disabled by default and can be enabled either by setting the
# options here or by using the `setup` command.
#setup.dashboards.enabled: false
# The URL from where to download the dashboards archive. By default this URL
# has a value which is computed based on the Beat name and version. For released
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
# website.
#setup.dashboards.url:
# =================================== Kibana ===================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
#host: "localhost:5601"
# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
# the Default Space will be used.
#space.id:
# =============================== Elastic Cloud ================================
# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/).
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:
# ================================== Outputs ===================================
# Configure what output to use when sending the data collected by the beat.
# ---------------------------- Elasticsearch Output ----------------------------
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["{{ filebeat_elasticsearch_hosts | join('", "') }}"]
# Protocol - either `http` (default) or `https`.
protocol: "{{ filebeat_elasticsearch_protocol | default('http') }}"
# Authentication credentials - either API key or username/password.
{% if filebeat_elasticsearch_auth_api_key %}
api_key: "{{ filebeat_elasticsearch_auth_api_key }}"
{% endif %}
{% if filebeat_elasticsearch_auth_username %}
username: "{{ filebeat_elasticsearch_auth_username }}"
{% endif %}
{% if filebeat_elasticsearch_auth_password %}
password: "{{ filebeat_elasticsearch_auth_password }}"
{% endif %}
# ------------------------------ Logstash Output -------------------------------
#output.logstash:
# The Logstash hosts
#hosts: ["localhost:5044"]
# Optional SSL. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# ================================= Processors =================================
processors:
- add_host_metadata: ~
{% if filebeat_processors_cloud_metadata %}
- add_cloud_metadata: ~
{% endif %}
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
# ================================== Logging ===================================
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
#logging.level: debug
# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publish", "service".
#logging.selectors: ["*"]
# ============================= X-Pack Monitoring ==============================
# Filebeat can export internal metrics to a central Elasticsearch monitoring
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
# reporting is disabled by default.
# Set to true to enable the monitoring reporter.
#monitoring.enabled: false
# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
#monitoring.cluster_uuid:
# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well.
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
# Any setting that is not set is automatically inherited from the Elasticsearch
# output configuration, so if you have the Elasticsearch output configured such
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
# uncomment the following line.
#monitoring.elasticsearch:
# ============================== Instrumentation ===============================
# Instrumentation support for the filebeat.
#instrumentation:
# Set to true to enable instrumentation of filebeat.
#enabled: false
# Environment in which filebeat is running on (eg: staging, production, etc.)
#environment: ""
# APM Server hosts to report instrumentation results to.
#hosts:
# - http://localhost:8200
# API Key for the APM Server(s).
# If api_key is set then secret_token will be ignored.
#api_key:
# Secret token for the APM Server(s).
#secret_token:
# ================================= Migration ==================================
# This allows to enable 6.7 migration aliases
#migration.6_to_7.enabled: true

View File

@ -25,7 +25,8 @@ EvoComputerName=$(hostname -s)
dnsPTRrecord=$(hostname -f)
HardwareMark=$(dmidecode -s system-manufacturer | grep -v '^#')
computerIP=$(hostname -i | cut -d' ' -f1)
computerOS=$(lsb_release -s -d | sed 's#\..##')
# The sed part does not works for squeeze and previous
computerOS=$(lsb_release -s -d | sed -E 's#\.[0-9]{1,}##')
computerKernel=$(uname -r)
HardwareSerial=$(dmidecode -s system-serial-number | grep -v '^#')
@ -71,7 +72,7 @@ if (test -b /dev/vda); then
sdaModel="Virtual VirtIO Disk"
elif [ -d /proc/vz ] && [ ! -d /proc/bc ]; then
sdaModel="OpenVZ SIMFS disk"
else
elif (lsblk -d -r -n -o TYPE,SIZE,PATH | grep -q sda); then
hdparm -I /dev/sda 2>&1 | grep -q bad
if [ $? -eq 0 ]; then
if (test -n "${raidModel}"); then
@ -82,6 +83,9 @@ else
else
sdaModel=$(hdparm -I /dev/sda | grep Model | tr -s '\t' ' ' | cut -d' ' -f4-)
fi
# hdparm does not support NVME, use smartctl
elif (lsblk -d -r -n -o TYPE,SIZE,PATH | grep -q nvme); then
sdaModel="SSD NVMe: $(smartctl -a /dev/nvme0n1 | grep "Model Number" | tr -s ' ' | cut -d' ' -f3-)"
fi
ldif_file="/root/${EvoComputerName}.$(date +"%Y%m%d%H%M%S").ldif"
@ -273,7 +277,10 @@ for net in $(ls /sys/class/net); do
echo $path | grep -q virtual
if [ $? -ne 0 ]; then
hw=$(cat ${path}/address)
# In some cases some devices does not have a vendor or device, skip it
test -f ${path}/device/vendor || continue
vendor_id=$(cat ${path}/device/vendor)
test -f ${path}/device/device || continue
dev_id=$(cat ${path}/device/device)
[ "${dev_id}" = "0x0001" ] && dev_id="0x1000"
dev=$(lspci -d "${vendor_id}:${dev_id}" -vm)

View File

@ -1,6 +1,30 @@
---
# backward compatibility with a previously used variable
haproxy_stats_ssl: True
haproxy_stats_host: "*"
haproxy_stats_port: "8080"
haproxy_stats_path: "/"
haproxy_stats_bind_directive: "{{ haproxy_stats_host }}:{{ haproxy_stats_port }} {% if haproxy_stats_ssl %}ssl crt {{ haproxy_ssl_dir }}{% endif %}"
haproxy_stats_internal_url: "{% if haproxy_stats_ssl %}https:{% else %}http:{% endif %}//{% if haproxy_stats_host == '*' or haproxy_stats_host == '0.0.0.0' %}127.0.0.1{% else %}{{ haproxy_stats_host }}{% endif %}:{{ haproxy_stats_port }}{{ haproxy_stats_path }}"
haproxy_stats_external_url: "{% if haproxy_stats_ssl %}https:{% else %}http:{% endif %}//{{ ansible_fqdn }}:{{ haproxy_stats_port }}{{ haproxy_stats_path }}"
haproxy_backports: "{{ haproxy_jessie_backports | default(false, true) }}"
haproxy_stats_url: "http://127.0.0.1:8080/"
haproxy_update_config: True
haproxy_force_config: True
haproxy_socket: /run/haproxy/admin.sock
haproxy_chroot: /var/lib/haproxy
haproxy_stats_access_ips: []
haproxy_stats_admin_ips: []
haproxy_maintenance_ips: []
haproxy_deny_ips: []
haproxy_ssl_dir: "/etc/haproxy/ssl/"
haproxy_stats_enable: False
haproxy_stats_bind: "*:8080 ssl crt /etc/haproxy/ssl/"
haproxy_backports_packages_stretch: haproxy libssl1.0.0
haproxy_backports_packages_buster: haproxy

View File

@ -4,8 +4,74 @@
name: ssl-cert
state: present
tags:
- haproxy
- packages
- haproxy
- packages
- name: HAProxy SSL directory is present
file:
path: /etc/haproxy/ssl
owner: root
group: root
mode: "0700"
state: directory
tags:
- haproxy
- config
- name: Self-signed certificate is present in HAProxy ssl directory
shell: "cat /etc/ssl/certs/ssl-cert-snakeoil.pem /etc/ssl/private/ssl-cert-snakeoil.key > /etc/haproxy/ssl/ssl-cert-snakeoil.pem"
args:
creates: /etc/haproxy/ssl/ssl-cert-snakeoil.pem
notify: reload haproxy
tags:
- haproxy
- config
- name: HAProxy stats_access_ips are present
blockinfile:
dest: /etc/haproxy/stats_access_ips
create: yes
block: |
{% for ip in haproxy_stats_access_ips | default([]) %}
{{ ip }}
{% endfor %}
notify: reload haproxy
tags:
- haproxy
- config
- name: HAProxy stats_admin_ips are present
blockinfile:
dest: /etc/haproxy/stats_admin_ips
create: yes
block: |
{% for ip in haproxy_stats_admin_ips | default([]) %}
{{ ip }}
{% endfor %}
notify: reload haproxy
tags:
- haproxy
- config
- name: HAProxy maintenance_ips are present
blockinfile:
dest: /etc/haproxy/maintenance_ips
create: yes
block: |
{% for ip in haproxy_maintenance_ips | default([]) %}
{{ ip }}
{% endfor %}
notify: reload haproxy
- name: HAProxy deny_ips are present
blockinfile:
dest: /etc/haproxy/deny_ips
create: yes
block: |
{% for ip in haproxy_deny_ips | default([]) %}
{{ ip }}
{% endfor %}
notify: reload haproxy
- include: packages_backports.yml
when: haproxy_backports
@ -15,8 +81,8 @@
name: haproxy
state: present
tags:
- haproxy
- packages
- haproxy
- packages
- name: Copy HAProxy configuration
template:
@ -25,14 +91,34 @@
force: "{{ haproxy_force_config }}"
validate: "haproxy -c -f %s"
with_first_found:
- "templates/haproxy/haproxy.{{ inventory_hostname }}.cfg.j2"
- "templates/haproxy/haproxy.{{ host_group }}.cfg.j2"
- "templates/haproxy/haproxy.default.cfg.j2"
- "haproxy.default.cfg.j2"
- "templates/haproxy/haproxy.{{ inventory_hostname }}.cfg.j2"
- "templates/haproxy/haproxy.{{ host_group }}.cfg.j2"
- "templates/haproxy/haproxy.default.cfg.j2"
- "haproxy.default.cfg.j2"
notify: reload haproxy
when: "{{ haproxy_update_config }}"
when: haproxy_update_config
tags:
- haproxy
- config
- haproxy
- config
- name: Rotate logs with dateext
lineinfile:
dest: /etc/logrotate.d/haproxy
line: ' dateext'
regexp: '^\s*#*\s*(no)?dateext'
insertbefore: '}'
tags:
- haproxy
- config
- name: Rotate logs with nodelaycompress
lineinfile:
dest: /etc/logrotate.d/haproxy
line: ' nodelaycompress'
regexp: '^\s*#*\s*(no)?delaycompress'
insertbefore: '}'
tags:
- haproxy
- config
- include: munin.yml

View File

@ -4,8 +4,16 @@
name: evolix/apt
tasks_from: backports.yml
tags:
- haproxy
- packages
- haproxy
- packages
- set_fact:
haproxy_backports_packages: "{{ haproxy_backports_packages_stretch }}"
when: ansible_distribution_release == 'stretch'
- set_fact:
haproxy_backports_packages: "{{ haproxy_backports_packages_buster }}"
when: ansible_distribution_release == 'buster'
- name: Prefer HAProxy package from backports
template:
@ -15,13 +23,13 @@
mode: "0640"
register: haproxy_apt_preferences
tags:
- haproxy
- packages
- haproxy
- packages
- name: update apt
apt:
update_cache: yes
when: haproxy_apt_preferences is changed
tags:
- haproxy
- packages
- haproxy
- packages

View File

@ -3,8 +3,8 @@
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin
chroot {{ haproxy_chroot }}
stats socket {{ haproxy_socket }} mode 660 level admin
stats timeout 30s
user haproxy
group haproxy
@ -14,20 +14,18 @@ global
ca-base /etc/ssl/certs
crt-base /etc/ssl/private
# Default ciphers to use on SSL-enabled listening sockets.
# For more information, see ciphers(1SSL). This list is from:
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS
ssl-default-bind-options no-sslv3
# Go to https://ssl-config.mozilla.org/ and build your SSL configuration
defaults
log global
mode http
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
timeout connect 5000
timeout client 50000
timeout server 50000
errorfile 400 /etc/haproxy/errors/400.http
errorfile 403 /etc/haproxy/errors/403.http
errorfile 408 /etc/haproxy/errors/408.http
@ -35,3 +33,46 @@ defaults
errorfile 502 /etc/haproxy/errors/502.http
errorfile 503 /etc/haproxy/errors/503.http
errorfile 504 /etc/haproxy/errors/504.http
{% if haproxy_stats_enable %}
listen stats
mode http
bind {{ haproxy_stats_bind_directive }}
stats enable
stats refresh 10s
stats uri {{ haproxy_stats_path }}
stats show-legends
stats show-node
stats admin if { src -f /etc/haproxy/stats_admin_ips }
http-request deny if !{ src -f /etc/haproxy/stats_access_ips }
http-request set-log-level silent
{% endif %}
# frontend http-https
# bind 0.0.0.0:80
# bind 0.0.0.0:443 ssl crt {{ haproxy_ssl_dir }}
#
# capture request header Host len 32
#
# option forwardfor
#
# acl self hdr(host) -i {{ ansible_fqdn }}
#
# # Detect Let's Encrypt challenge requests
# acl letsencrypt path_dir -i /.well-known/acme-challenge
#
# # Reject the request at the TCP level if source is in the denylist
# tcp-request connection reject if { src -f /etc/haproxy/deny_ips }
#
# http-request set-header X-Forwarded-Proto https if { ssl_fc }
# http-request set-header X-Forwarded-Port 443 if { ssl_fc }
#
# use_backend local if letsencrypt || self
#
# backend local
# mode http
# option forwardfor
#
# server localhost 127.0.0.1:81 send-proxy-v2

View File

@ -1,3 +1,3 @@
Package: haproxy libssl1.0.0
Package: {{ haproxy_backports_packages }}
Pin: release a={{ ansible_distribution_release }}-backports
Pin-Priority: 999

View File

@ -1,2 +1,2 @@
[haproxy_*]
env.url {{ haproxy_stats_url }};csv;norefresh
env.url {{ haproxy_stats_internal_url }};csv;norefresh

View File

@ -5,7 +5,7 @@ php_conf_display_errors: "Off"
php_conf_log_errors: "On"
php_conf_html_errors: "Off"
php_conf_allow_url_fopen: "Off"
php_conf_disable_functions: "exec,shell-exec,system,passthru,putenv,popen"
php_conf_disable_functions: "exec,shell-exec,system,passthru,popen"
lxc_php_version: Null

View File

@ -3,7 +3,7 @@
- name: "{{ lxc_php_version }} - Install opensmtpd"
lxc_container:
name: "{{ lxc_php_version }}"
container_command: "DEBIAN_FRONTEND=noninteractive apt install -y ssmtp"
container_command: "DEBIAN_FRONTEND=noninteractive apt install --no-install-recommends -y opensmtpd"
- name: "{{ lxc_php_version }} - Configure opensmtpd (in the container)"
template:

View File

@ -3,7 +3,7 @@
- name: "{{ lxc_php_version }} - Install ssmtp"
lxc_container:
name: "{{ lxc_php_version }}"
container_command: "DEBIAN_FRONTEND=noninteractive apt install -y ssmtp"
container_command: "DEBIAN_FRONTEND=noninteractive apt install --no-install-recommends -y ssmtp "
- name: "{{ lxc_php_version }} - Configure ssmtp"
template:

View File

@ -3,7 +3,7 @@
- name: "{{ lxc_php_version }} - Install PHP packages"
lxc_container:
name: "{{ lxc_php_version }}"
container_command: "DEBIAN_FRONTEND=noninteractive apt install -y php5-fpm php5-cli php5-gd php5-imap php5-ldap php5-mcrypt php5-mysql php5-pgsql php-gettext php5-intl php5-curl php5-ssh2 libphp-phpmailer ssmtp"
container_command: "DEBIAN_FRONTEND=noninteractive apt install -y php5-fpm php5-cli php5-gd php5-imap php5-ldap php5-mcrypt php5-mysql php5-pgsql php5-sqlite php-gettext php5-intl php5-curl php5-ssh2 libphp-phpmailer"
- name: "{{ lxc_php_version }} - Copy evolinux PHP configuration"
template:

View File

@ -3,7 +3,7 @@
- name: "{{ lxc_php_version }} - Install PHP packages"
lxc_container:
name: "{{ lxc_php_version }}"
container_command: "DEBIAN_FRONTEND=noninteractive apt install -y php-fpm php-cli php-gd php-intl php-imap php-ldap php-mcrypt php-mysql php-pgsql php-gettext php-curl php-ssh2 php-zip php-mbstring composer libphp-phpmailer"
container_command: "DEBIAN_FRONTEND=noninteractive apt install -y php-fpm php-cli php-gd php-intl php-imap php-ldap php-mcrypt php-mysql php-pgsql php-sqlite3 php-gettext php-curl php-ssh2 php-zip php-mbstring composer libphp-phpmailer"
- name: "{{ lxc_php_version }} - Copy evolinux PHP configuration"
template:

View File

@ -3,7 +3,7 @@
- name: "{{ lxc_php_version }} - Install PHP packages"
lxc_container:
name: "{{ lxc_php_version }}"
container_command: "DEBIAN_FRONTEND=noninteractive apt install -y php-fpm php-cli php-gd php-intl php-imap php-ldap php-mysql php-pgsql php-gettext php-curl php-ssh2 php-zip php-mbstring php-zip composer libphp-phpmailer"
container_command: "DEBIAN_FRONTEND=noninteractive apt install -y php-fpm php-cli php-gd php-intl php-imap php-ldap php-mysql php-pgsql php-sqlite3 php-gettext php-curl php-ssh2 php-zip php-mbstring php-zip composer libphp-phpmailer"
- name: "{{ lxc_php_version }} - Copy evolinux PHP configuration"
template:

View File

@ -1,4 +1,4 @@
# filebeat
# metricbeat
Install Metricbeat.

View File

@ -1,10 +1,25 @@
---
elastic_stack_version: "6.x"
metricbeat_elasticsearch_protocol: ""
metricbeat_elasticsearch_hosts:
- "localhost:9200"
metricbeat_elasticsearch_protocol: ""
metricbeat_elasticsearch_auth_api_key: ""
metricbeat_elasticsearch_auth_username: ""
metricbeat_elasticsearch_auth_password: ""
metricbeat_processors_cloud_metadata: False
metricbeat_use_config_template: False
metricbeat_update_config: True
metricbeat_force_config: True
# Example :
# metricbeat_tags:
# - "service-X"
# - "web-tier"
metricbeat_tags: Null
# Example :
# metricbeat_fields:
# - "env: staging"
metricbeat_fields: Null

View File

@ -40,51 +40,79 @@
name: metricbeat
enabled: yes
- name: Metricbeat knows where to find Elasticsearch
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
regexp: '^ hosts: .*'
line: " hosts: [\"{{ metricbeat_elasticsearch_hosts | join('\", \"') }}\"]"
insertafter: "output.elasticsearch:"
notify: restart metricbeat
when:
- metricbeat_elasticsearch_hosts
# When we don't use a config template (default)
- block:
- name: Metricbeat knows where to find Elasticsearch
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
regexp: '^ hosts: .*'
line: " hosts: [\"{{ metricbeat_elasticsearch_hosts | join('\", \"') }}\"]"
insertafter: "output.elasticsearch:"
notify: restart metricbeat
when:
- metricbeat_elasticsearch_hosts
- name: Metricbeat protocol for Elasticsearch
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
regexp: '^ #?protocol: .*'
line: " protocol: \"{{ metricbeat_elasticsearch_protocol }}\""
insertafter: "output.elasticsearch:"
notify: restart metricbeat
when: metricbeat_elasticsearch_protocol == "http" or metricbeat_elasticsearch_protocol == "https"
- name: Metricbeat protocol for Elasticsearch
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
regexp: '^ #?protocol: .*'
line: " protocol: \"{{ metricbeat_elasticsearch_protocol }}\""
insertafter: "output.elasticsearch:"
notify: restart metricbeat
when: metricbeat_elasticsearch_protocol == "http" or metricbeat_elasticsearch_protocol == "https"
- name: Metricbeat auth/username for Elasticsearch are configured
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
regexp: '{{ item.regexp }}'
line: '{{ item.line }}'
insertafter: "output.elasticsearch:"
with_items:
- { regexp: '^ #?username: .*', line: ' username: "{{ metricbeat_elasticsearch_auth_username }}"' }
- { regexp: '^ #?password: .*', line: ' password: "{{ metricbeat_elasticsearch_auth_password }}"' }
notify: restart metricbeat
when:
- metricbeat_elasticsearch_auth_username != ""
- metricbeat_elasticsearch_auth_password != ""
- name: Metricbeat auth/username for Elasticsearch are configured
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
regexp: '{{ item.regexp }}'
line: '{{ item.line }}'
insertafter: "output.elasticsearch:"
with_items:
- { regexp: '^ #?username: .*', line: ' username: "{{ metricbeat_elasticsearch_auth_username }}"' }
- { regexp: '^ #?password: .*', line: ' password: "{{ metricbeat_elasticsearch_auth_password }}"' }
notify: restart metricbeat
when:
- metricbeat_elasticsearch_auth_username
- metricbeat_elasticsearch_auth_password
- name: disable cloud_metadata
replace:
dest: /etc/metricbeat/metricbeat.yml
regexp: '^(\s+)(- add_cloud_metadata:)'
replace: '\1# \2'
notify: restart metricbeat
when: not metricbeat_processors_cloud_metadata
- name: Metricbeat api_key for Elasticsearch are configured
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
regexp: '^ #?api_key: .*'
line: ' api_key: "{{ metricbeat_elasticsearch_auth_api_key }}"'
insertafter: "output.elasticsearch:"
notify: restart metricbeat
when: metricbeat_elasticsearch_auth_api_key
- name: cloud_metadata processor is disabled
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
line: " - add_cloud_metadata: ~"
insert_after: '^processors:'
notify: restart metricbeat
when: metricbeat_processors_cloud_metadata
- name: disable cloud_metadata
replace:
dest: /etc/metricbeat/metricbeat.yml
regexp: '^(\s+)(- add_cloud_metadata:)'
replace: '\1# \2'
notify: restart metricbeat
when: not metricbeat_processors_cloud_metadata
- name: cloud_metadata processor is disabled
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
line: " - add_cloud_metadata: ~"
insert_after: '^processors:'
notify: restart metricbeat
when: metricbeat_processors_cloud_metadata
when: not metricbeat_use_config_template
# When we use a config template
- block:
- name: Configuration is up-to-date
template:
src: "{{ item }}"
dest: /etc/metricbeat/metricbeat.yml
force: "{{ metricbeat_force_config }}"
with_first_found:
- "templates/metricbeat/metricbeat.{{ inventory_hostname }}.yml.j2"
- "templates/metricbeat/metricbeat.{{ host_group }}.yml.j2"
- "templates/metricbeat/metricbeat.default.yml.j2"
- "metricbeat.default.yml.j2"
notify: restart metricbeat
when: metricbeat_update_config
when: metricbeat_use_config_template

View File

@ -0,0 +1,180 @@
###################### Metricbeat Configuration Example #######################
# This file is an example configuration file highlighting only the most common
# options. The metricbeat.reference.yml file from the same directory contains all the
# supported options with more comments. You can use it as a reference.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/metricbeat/index.html
# =========================== Modules configuration ============================
metricbeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: false
# Period on which files under path should be checked for changes
#reload.period: 10s
# ======================= Elasticsearch template setting =======================
setup.template.settings:
index.number_of_shards: 1
index.codec: best_compression
#_source.enabled: false
# ================================== General ===================================
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
#name:
# The tags of the shipper are included in their own field with each
# transaction published.
{% if metricbeat_tags %}
tags: ["{{ metricbeat_tags | join('", "') }}"]
{% endif %}
# Optional fields that you can specify to add additional information to the
# output.
{% if metricbeat_fields %}
fields:
{% for field in metricbeat_fields %}
{{ field }}
{% endfor %}
{% endif %}
# ================================= Dashboards =================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards is disabled by default and can be enabled either by setting the
# options here or by using the `setup` command.
#setup.dashboards.enabled: false
# The URL from where to download the dashboards archive. By default this URL
# has a value which is computed based on the Beat name and version. For released
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
# website.
#setup.dashboards.url:
# =================================== Kibana ===================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
#host: "localhost:5601"
# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
# the Default Space will be used.
#space.id:
# =============================== Elastic Cloud ================================
# These settings simplify using Metricbeat with the Elastic Cloud (https://cloud.elastic.co/).
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:
# ================================== Outputs ===================================
# Configure what output to use when sending the data collected by the beat.
# ---------------------------- Elasticsearch Output ----------------------------
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["{{ metricbeat_elasticsearch_hosts | join('", "') }}"]
# Protocol - either `http` (default) or `https`.
protocol: "{{ metricbeat_elasticsearch_protocol | default('http') }}"
# Authentication credentials - either API key or username/password.
{% if metricbeat_elasticsearch_auth_api_key %}
api_key: "{{ metricbeat_elasticsearch_auth_api_key }}"
{% endif %}
{% if metricbeat_elasticsearch_auth_username %}
username: "{{ metricbeat_elasticsearch_auth_username }}"
{% endif %}
{% if metricbeat_elasticsearch_auth_password %}
password: "{{ metricbeat_elasticsearch_auth_password }}"
{% endif %}
# ------------------------------ Logstash Output -------------------------------
#output.logstash:
# The Logstash hosts
#hosts: ["localhost:5044"]
# Optional SSL. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# ================================= Processors =================================
# Configure processors to enhance or manipulate events generated by the beat.
processors:
- add_host_metadata: ~
{% if metricbeat_processors_cloud_metadata %}
- add_cloud_metadata: ~
{% endif %}
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
# ================================== Logging ===================================
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
#logging.level: debug
# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publish", "service".
#logging.selectors: ["*"]
# ============================= X-Pack Monitoring ==============================
# Metricbeat can export internal metrics to a central Elasticsearch monitoring
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
# reporting is disabled by default.
# Set to true to enable the monitoring reporter.
#monitoring.enabled: false
# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
# Metricbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
#monitoring.cluster_uuid:
# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well.
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
# Any setting that is not set is automatically inherited from the Elasticsearch
# output configuration, so if you have the Elasticsearch output configured such
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
# uncomment the following line.
#monitoring.elasticsearch:
# ================================= Migration ==================================
# This allows to enable 6.7 migration aliases
#migration.6_to_7.enabled: true

63
mongodb/files/munin/mongo_btree Executable file
View File

@ -0,0 +1,63 @@
#!/usr/bin/env python
## GENERATED FILE - DO NOT EDIT
import urllib2
import sys
import os
import pymongo
def getClient():
if 'MONGO_DB_URI' in os.environ:
return pymongo.MongoClient(os.environ['MONGO_DB_URI'])
else:
return pymongo.MongoClient()
def getServerStatus():
c = getClient()
return c.admin.command('serverStatus', workingSet=True)
def get():
return getServerStatus()["indexCounters"]
def doData():
for k,v in get().iteritems():
print( str(k) + ".value " + str(int(v)) )
def doConfig():
print "graph_title MongoDB btree stats"
print "graph_args --base 1000 -l 0"
print "graph_vlabel mb ${graph_period}"
print "graph_category MongoDB"
for k in get():
print k + ".label " + k
print k + ".min 0"
print k + ".type COUNTER"
print k + ".max 500000"
print k + ".draw LINE1"
if __name__ == "__main__":
from os import environ
if 'HOST' in environ:
host = environ['HOST']
if 'PORT' in environ:
port = environ['PORT']
if 'USER' in environ:
user = environ['USER']
if 'PASSWORD' in environ:
password = environ['PASSWORD']
if len(sys.argv) > 1 and sys.argv[1] == "config":
doConfig()
else:
doData()

View File

@ -0,0 +1,106 @@
#!/usr/bin/env python
## GENERATED FILE - DO NOT EDIT
import urllib2
import sys
import os
import pymongo
def getClient():
if 'MONGO_DB_URI' in os.environ:
return pymongo.MongoClient(os.environ['MONGO_DB_URI'])
else:
return pymongo.MongoClient()
def getServerStatus():
c = getClient()
return c.admin.command('serverStatus', workingSet=True)
import re
FIELD_ESCAPE = re.compile("[^A-Za-z_]")
def escape_field(name):
return FIELD_ESCAPE.sub("_", name)
def need_multigraph():
if 'MUNIN_CAP_MULTIGRAPH' not in os.environ:
sys.stderr.write('MUNIN_CAP_MULTIGRAPH not found in environment\n')
sys.exit(1)
def collections(include_stats=False):
c = getClient()
for db in c.database_names():
for collection in c[db].collection_names():
name = db + "." + collection
if include_stats:
yield name, c[db].command("collstats", collection)
else:
yield name
def doData():
need_multigraph()
data = list(collections(True))
print "multigraph collection_count"
for name, stats in data:
print(escape_field(name) + ".value " + str(stats["count"]))
print "multigraph collection_size"
for name, stats in data:
print(escape_field(name) + ".value " + str(stats["size"]))
def doConfig():
need_multigraph()
names = list(collections())
print "multigraph collection_count"
print "graph_title MongoDB collection document count"
print "graph_args --base 1000 -l 0"
print "graph_vlabel collection document count"
print "graph_category MongoDB"
print "graph_total total"
for name in names:
field_name = escape_field(name)
print field_name + ".label " + name
print field_name + ".min 0"
print field_name + ".type GAUGE"
print field_name + ".draw LINE1"
print "multigraph collection_size"
print "graph_title MongoDB collection size"
print "graph_args --base 1024 -l 0"
print "graph_vlabel collection size"
print "graph_category MongoDB"
print "graph_total total"
for name in names:
field_name = escape_field(name)
print field_name + ".label " + name
print field_name + ".min 0"
print field_name + ".type GAUGE"
print field_name + ".draw LINE1"
if __name__ == "__main__":
from os import environ
if 'HOST' in environ:
host = environ['HOST']
if 'PORT' in environ:
port = environ['PORT']
if 'USER' in environ:
user = environ['USER']
if 'PASSWORD' in environ:
password = environ['PASSWORD']
if len(sys.argv) > 1 and sys.argv[1] == "config":
doConfig()
else:
doData()

57
mongodb/files/munin/mongo_conn Executable file
View File

@ -0,0 +1,57 @@
#!/usr/bin/env python
## GENERATED FILE - DO NOT EDIT
import urllib2
import sys
import os
import pymongo
def getClient():
if 'MONGO_DB_URI' in os.environ:
return pymongo.MongoClient(os.environ['MONGO_DB_URI'])
else:
return pymongo.MongoClient()
def getServerStatus():
c = getClient()
return c.admin.command('serverStatus', workingSet=True)
name = "connections"
def doData():
print name + ".value " + str( getServerStatus()["connections"]["current"] )
def doConfig():
print "graph_title MongoDB current connections"
print "graph_args --base 1000 -l 0"
print "graph_vlabel connections"
print "graph_category MongoDB"
print name + ".label " + name
if __name__ == "__main__":
from os import environ
if 'HOST' in environ:
host = environ['HOST']
if 'PORT' in environ:
port = environ['PORT']
if 'USER' in environ:
user = environ['USER']
if 'PASSWORD' in environ:
password = environ['PASSWORD']
if len(sys.argv) > 1 and sys.argv[1] == "config":
doConfig()
else:
doData()

72
mongodb/files/munin/mongo_docs Executable file
View File

@ -0,0 +1,72 @@
#!/usr/bin/env python
## GENERATED FILE - DO NOT EDIT
import urllib2
import sys
import os
import pymongo
def getClient():
if 'MONGO_DB_URI' in os.environ:
return pymongo.MongoClient(os.environ['MONGO_DB_URI'])
else:
return pymongo.MongoClient()
def getServerStatus():
c = getClient()
return c.admin.command('serverStatus', workingSet=True)
def getDatabasesStats():
host = "127.0.0.1"
port = 27017
c = getClient()
dbs = {}
for k in c.database_names():
if k != "admin" and k != "local" and k != "":
db = c[k]
dbs[k] = {}
for coll in db.collection_names():
if '.' not in coll:
dbs[k][coll] = db[coll].count()
return dbs
def doData():
ss = getDatabasesStats()
for k,v in ss.iteritems():
for a,b in v.iteritems():
print(str(k)+str(a) + ".value " + str(b))
def doConfig():
print "graph_title MongoDB documents count"
print "graph_args --base 1000 -l 0 --vertical-label Docs"
print "graph_category MongoDB"
ss = getDatabasesStats()
for k,v in ss.iteritems():
for a,b in v.iteritems():
print str(k)+str(a) + ".label " + str(k) + " " + str(a)
print str(k)+str(a) + ".draw LINE1"
if __name__ == "__main__":
from os import environ
if 'HOST' in environ:
host = environ['HOST']
if 'PORT' in environ:
port = environ['PORT']
if 'USER' in environ:
user = environ['USER']
if 'PASSWORD' in environ:
password = environ['PASSWORD']
if len(sys.argv) > 1 and sys.argv[1] == "config":
doConfig()
else:
doData()

56
mongodb/files/munin/mongo_lock Executable file
View File

@ -0,0 +1,56 @@
#!/usr/bin/env python
## GENERATED FILE - DO NOT EDIT
import urllib2
import sys
import os
import pymongo
def getClient():
if 'MONGO_DB_URI' in os.environ:
return pymongo.MongoClient(os.environ['MONGO_DB_URI'])
else:
return pymongo.MongoClient()
def getServerStatus():
c = getClient()
return c.admin.command('serverStatus', workingSet=True)
name = "locked"
def doData():
print name + ".value " + str( 100 * (getServerStatus()["globalLock"]["lockTime"]/getServerStatus()["globalLock"]["totalTime"]) )
def doConfig():
print "graph_title MongoDB global write lock percentage"
print "graph_args --base 1000 -l 0 "
print "graph_vlabel percentage"
print "graph_category MongoDB"
print name + ".label " + name
if __name__ == "__main__":
from os import environ
if 'HOST' in environ:
host = environ['HOST']
if 'PORT' in environ:
port = environ['PORT']
if 'USER' in environ:
user = environ['USER']
if 'PASSWORD' in environ:
password = environ['PASSWORD']
if len(sys.argv) > 1 and sys.argv[1] == "config":
doConfig()
else:
doData()

62
mongodb/files/munin/mongo_mem Executable file
View File

@ -0,0 +1,62 @@
#!/usr/bin/env python
## GENERATED FILE - DO NOT EDIT
import urllib2
import sys
import os
import pymongo
def getClient():
if 'MONGO_DB_URI' in os.environ:
return pymongo.MongoClient(os.environ['MONGO_DB_URI'])
else:
return pymongo.MongoClient()
def getServerStatus():
c = getClient()
return c.admin.command('serverStatus', workingSet=True)
def ok(s):
return s == "resident" or s == "virtual" or s == "mapped"
def doData():
for k,v in getServerStatus()["mem"].iteritems():
if ok(k):
print( str(k) + ".value " + str(v * 1024 * 1024) )
def doConfig():
print "graph_title MongoDB memory usage"
print "graph_args --base 1024 -l 0 --vertical-label Bytes"
print "graph_category MongoDB"
for k in getServerStatus()["mem"]:
if ok( k ):
print k + ".label " + k
print k + ".draw LINE1"
if __name__ == "__main__":
from os import environ
if 'HOST' in environ:
host = environ['HOST']
if 'PORT' in environ:
port = environ['PORT']
if 'USER' in environ:
user = environ['USER']
if 'PASSWORD' in environ:
password = environ['PASSWORD']
if len(sys.argv) > 1 and sys.argv[1] == "config":
doConfig()
else:
doData()

58
mongodb/files/munin/mongo_ops Executable file
View File

@ -0,0 +1,58 @@
#!/usr/bin/env python
## GENERATED FILE - DO NOT EDIT
import urllib2
import sys
import os
import pymongo
def getClient():
if 'MONGO_DB_URI' in os.environ:
return pymongo.MongoClient(os.environ['MONGO_DB_URI'])
else:
return pymongo.MongoClient()
def getServerStatus():
c = getClient()
return c.admin.command('serverStatus', workingSet=True)
def doData():
ss = getServerStatus()
for k,v in ss["opcounters"].iteritems():
print( str(k) + ".value " + str(v) )
def doConfig():
print "graph_title MongoDB ops"
print "graph_args --base 1000 -l 0"
print "graph_vlabel ops / ${graph_period}"
print "graph_category MongoDB"
print "graph_total total"
for k in getServerStatus()["opcounters"]:
print k + ".label " + k
print k + ".min 0"
print k + ".type COUNTER"
print k + ".max 500000"
print k + ".draw LINE1"
if __name__ == "__main__":
from os import environ
if 'HOST' in environ:
host = environ['HOST']
if 'PORT' in environ:
port = environ['PORT']
if 'USER' in environ:
user = environ['USER']
if 'PASSWORD' in environ:
password = environ['PASSWORD']
if len(sys.argv) > 1 and sys.argv[1] == "config":
doConfig()
else:
doData()

View File

@ -0,0 +1,57 @@
#!/usr/bin/env python
## GENERATED FILE - DO NOT EDIT
import urllib2
import sys
import os
import pymongo
def getServerStatus():
if 'MONGO_DB_URI' in os.environ:
c = pymongo.MongoClient(os.environ['MONGO_DB_URI'])
else:
c = pymongo.MongoClient()
return c.admin.command('serverStatus', workingSet=True)
name = "page_faults"
def get():
return getServerStatus()["extra_info"][name]
def doData():
print(name + ".value " + str(get()))
def doConfig():
print "graph_title MongoDB page faults"
print "graph_args --base 1000 -l 0"
print "graph_vlabel faults / ${graph_period}"
print "graph_category MongoDB"
print "graph_total total"
print name + ".label " + name
print name + ".min 0"
print name + ".type COUNTER"
print name + ".max 10000"
print name + ".draw LINE1"
if __name__ == "__main__":
from os import environ
if 'HOST' in environ:
host = environ['HOST']
if 'PORT' in environ:
port = environ['PORT']
if 'USER' in environ:
user = environ['USER']
if 'PASSWORD' in environ:
password = environ['PASSWORD']
if len(sys.argv) > 1 and sys.argv[1] == "config":
doConfig()
else:
doData()

View File

@ -9,3 +9,8 @@
service:
name: mongodb
state: restarted
- name: restart munin-node
systemd:
name: munin-node
state: restarted

View File

@ -44,3 +44,35 @@
dest: /etc/logrotate.d/mongodb
force: yes
backup: no
- name: Munin plugins are present
copy:
src: "munin/{{ item }}"
dest: '/usr/local/share/munin/plugins/{{ item }}'
force: yes
with_items:
- mongo_btree
- mongo_collections
- mongo_conn
- mongo_docs
- mongo_lock
- mongo_mem
- mongo_ops
- mongo_page_faults
notify: restart munin-node
- name: Enable core Munin plugins
file:
src: '/usr/local/share/munin/plugins/{{ item }}'
dest: /etc/munin/plugins/{{ item }}
state: link
with_items:
- mongo_btree
- mongo_collections
- mongo_conn
- mongo_docs
- mongo_lock
- mongo_mem
- mongo_ops
- mongo_page_faults
notify: restart munin-node

View File

@ -1,6 +1,6 @@
# {{ ansible_managed }}
/var/log/mongodb/mongodb.log {
/var/log/mongodb/mongod.log {
daily
missingok
rotate 365

View File

@ -15,11 +15,13 @@ Tasks are extracted in several files, included in `tasks/main.yml` :
* `munin.yml` : Munin plugins ;
* `log2mail.yml` : log2mail patterns ;
* `utils.yml` : useful tools.
* `replication.yml`: install and configure prerequisites for mysql replication, do not forget to set `mysql_bind_address`, `mysql_server_id` and `mysql_log_bin`
## Available variables
* `mysql_variant` : install Oracle's MySQL or MariaDB (default: `oracle`) [Debian 8 only];
* `mysql_replace_root_with_mysqladmin`: switch from `root` to `mysqladmin` user or not ;
* `mysql_replication`: setup all prerequisites for replication.
* `mysql_thread_cache_size`: number of threads for the cache ;
* `mysql_innodb_buffer_pool_size`: amount of RAM dedicated to InnoDB ;
* `mysql_bind_address` : (default: `Null`, default evolinux config is then used) ;
@ -30,8 +32,7 @@ Tasks are extracted in several files, included in `tasks/main.yml` :
* `mysql_max_heap_table_size`: (default: `Null`, default evolinux config is then used) ;
* `mysql_query_cache_limit`: (default: `Null`, default evolinux config is then used) ;
* `mysql_query_cache_size`: (default: `Null`, default evolinux config is then used) ;
* `mysql_log_bin`: (default: `Null`, activates binlogs if used) ;
* `mysql_server_id`: (default: `Null`, MySQL version default is then used) ;
* `mysql_server_id`: (default: `Null`, only used with `mysql_replication`, default mysql server id will be used otherwise) ;
* `mysql_custom_datadir`: custom datadir.
* `mysql_custom_tmpdir`: custom tmpdir.
* `general_alert_email`: email address to send various alert messages (default: `root@localhost`).
@ -41,5 +42,9 @@ Tasks are extracted in several files, included in `tasks/main.yml` :
* `mysql_force_new_nrpe_password` : change the password for NRPE even if it exists already (default: `False`).
* `mysql_install_libclient`: install mysql client libraries (default: `False`).
* `mysql_restart_if_needed` : should the restart handler be executed (default: `True`)
* `mysql_log_bin`: (default: `Null`, activates binlogs if used with `mysql_replication`) ;
* `mysql_repl_password`: Password hash for replication user, only creates a user if set.
## Notes
Changing the _datadir_ location can be done multiple times, as long as it is not restored to the default initial location, (because a symlink is created and can't be switched back, yet).
NB : changing the _datadir_ location can be done multiple times, as long as it is not restored to the default initial location, (because a symlink is created and can't be switched back, yet).
When using replication, note that the connections from the client server on the haproxy 8306 and mysql 3306 ports need to be open and the sql servers need to communicate on port 3306.

View File

@ -21,7 +21,6 @@ mysql_innodb_buffer_pool_size: '{{ (ansible_memtotal_mb * 0.3) | int }}M'
# If these variables are changed to non-Null values,
# they will be added in the zzz-evolinux-custom.cnf file.
# Otherwise, the value from de the z-evolinux-defaults.cnf file will preveil.
mysql_bind_address: Null
mysql_max_connections: Null
mysql_max_connect_errors: Null
mysql_table_cache: Null
@ -29,8 +28,10 @@ mysql_tmp_table_size: Null
mysql_max_heap_table_size: Null
mysql_query_cache_limit: Null
mysql_query_cache_size: Null
mysql_log_bin: Null
mysql_server_id: Null
mysql_max_allowed_packet: Null
mysql_force_custom_config: 'no'
mysql_innodb_log_file_size: Null
mysql_lower_case_table_names: Null
mysql_cron_optimize: True
mysql_cron_optimize_frequency: weekly
@ -44,3 +45,13 @@ mysql_evolinux_defaults_file: z-evolinux-defaults.cnf
mysql_evolinux_custom_file: zzz-evolinux-custom.cnf
mysql_restart_if_needed: True
# replication variables:
mysql_replication: false
mysql_log_bin: null
mysql_binlog_format: mixed
mysql_server_id: null
mysql_bind_address: null
mysql_repl_password: ''
mysql_read_only: 0

View File

@ -0,0 +1,13 @@
# Ansible managed
service mysqlchk
{
socket_type = stream
port = 8306
protocol = tcp
wait = no
type = UNLISTED
user = root
server = /usr/share/scripts/mysqlchk.sh
log_on_failure += USERID
disable = no
}

View File

@ -0,0 +1,54 @@
#!/bin/sh
# Ansible managed
#
# http://sysbible.org/x/2008/12/04/having-haproxy-check-mysql-status-through-a-xinetd-script/
#
# This script checks if a mysql server is healthy running on localhost. It will
# return:
#
# "HTTP/1.x 200 OK\r" (if mysql is running smoothly)
#
# - OR -
#
# "HTTP/1.x 500 Internal Server Error\r" (else)
#
# The purpose of this script is make haproxy capable of monitoring mysql properly
#
# Author: Unai Rodriguez
#
# It is recommended that a low-privileged-mysql user is created to be used by
# this script. Something like this:
#
# mysql> GRANT SELECT on mysql.* TO 'mysqlchkusr'@'localhost' \
# -> IDENTIFIED BY '257retfg2uysg218' WITH GRANT OPTION;
# mysql> flush privileges;
TMP_FILE="/tmp/mysqlchk.out"
ERR_FILE="/tmp/mysqlchk.err"
#
# We perform a simple query that should return a few results :-p
#
/usr/bin/mysql --defaults-file=/etc/mysql/debian.cnf -e "show databases;" > $TMP_FILE 2> $ERR_FILE
#
# Check the output. If it is not empty then everything is fine and we return
# something. Else, we just do not return anything.
#
if [ "$(/bin/cat $TMP_FILE)" != "" ]; then
# mysql is fine, return http 200
/bin/echo -e "HTTP/1.1 200 OK\r\n"
/bin/echo -e "Content-Type: Content-Type: text/plain\r\n"
/bin/echo -e "\r\n"
/bin/echo -e "MySQL is running.\r\n"
/bin/echo -e "\r\n"
else
# mysql is fine, return http 503
/bin/echo -e "HTTP/1.1 503 Service Unavailable\r\n"
/bin/echo -e "Content-Type: Content-Type: text/plain\r\n"
/bin/echo -e "\r\n"
/bin/echo -e "MySQL is *down*.\r\n"
/bin/echo -e "\r\n"
fi

View File

@ -4,11 +4,6 @@
name: munin-node
state: restarted
- name: restart nagios-nrpe-server
service:
name: nagios-nrpe-server
state: restarted
- name: restart mysql
service:
name: mysql
@ -23,3 +18,8 @@
systemd:
name: mysql
daemon_reload: yes
- name: 'restart xinetd'
service:
name: 'xinetd'
state: 'restart'

View File

@ -21,6 +21,6 @@
owner: root
group: root
mode: "0644"
force: no
force: "{{ mysql_force_custom_config }}"
tags:
- mysql

View File

@ -21,7 +21,7 @@
owner: root
group: root
mode: "0644"
force: no
force: "{{ mysql_force_custom_config }}"
tags:
- mysql

View File

@ -22,6 +22,9 @@
- include: config_jessie.yml
when: ansible_distribution_release == "jessie"
- include: replication.yml
when: mysql_replication
- include: datadir.yml
- include: logdir.yml

View File

@ -0,0 +1,41 @@
---
- name: 'Copy MySQL configuration for replication'
template:
src: 'replication.cnf.j2'
dest: "{{ mysql_config_directory }}/zzzz-replication.cnf"
mode: "0644"
notify: 'restart mysql'
- name: 'Create repl user'
mysql_user:
name: 'repl'
host: '%'
encrypted: true
password: "{{ mysql_repl_password }}"
priv: '*.*:REPLICATION SLAVE,REPLICATION CLIENT'
update_password: 'on_create'
state: 'present'
register: create_repl_user
when: mysql_repl_password | length > 0
- name: 'Install xinetd'
apt:
name: 'xinetd'
- name: 'Add xinetd configuration for MySQL HAProxy check'
copy:
src: 'xinetd/mysqlchk'
dest: '/etc/xinetd.d/'
mode: '0644'
notify: 'restart xinetd'
# /!\ Warning, this is a temporary hack
- include_role:
name: remount-usr
- name: 'Copy mysqlchk script'
copy:
src: 'xinetd/mysqlchk.sh'
dest: '/usr/share/scripts/'
mode: '0755'

View File

@ -29,9 +29,13 @@ query_cache_limit = {{ mysql_query_cache_limit }}
{% if mysql_query_cache_limit %}
query_cache_size = {{ mysql_query_cache_size }}
{% endif %}
{% if mysql_log_bin %}
log_bin = {{ mysql_log_bin }}
{% if mysql_max_allowed_packet %}
max_allowed_packet = {{ mysql_max_allowed_packet }}
{% endif %}
{% if mysql_server_id %}
server_id = {{ mysql_server_id }}
{% if mysql_lower_case_table_names %}
lower_case_table_names = {{ mysql_lower_case_table_names }}
{% endif %}
{% if mysql_innodb_log_file_size %}
innodb_log_file_size = {{ mysql_innodb_log_file_size }}
{% endif %}
read_only = {{ mysql_read_only }}

View File

@ -0,0 +1,8 @@
# {{ansible_managed}}
[mysqld]
{% if mysql_log_bin %}
log_bin = {{ mysql_log_bin }}
{% endif %}
server_id = {{ mysql_server_id }}
binlog_format = {{ mysql_binlog_format }}

View File

@ -4,9 +4,12 @@ nagios_nrpe_additional_allowed_hosts: []
nagios_nrpe_allowed_hosts: "{{ nagios_nrpe_default_allowed_hosts | union(nagios_nrpe_additional_allowed_hosts) | unique }}"
nagios_nrpe_pgsql_passwd: PGSQL_PASSWD
nagios_nrpe_amavis_from: "foobar@{{ ansible_domain }}"
nagios_nrpe_default_ntp_server: "pool.ntp.org"
nagios_nrpe_ntp_server: Null
nagios_nrpe_force_update_allowed_hosts: False
nagios_nrpe_check_proxy_host: "www.example.com"
nagios_plugins_directory: "/usr/local/lib/nagios/plugins"

View File

@ -65,7 +65,7 @@ $smtp->close();
print "$result\n";
if ($result =~/2.7.0 Ok, discarded, id=[^,]+ - INFECTED: Eicar-Test-Signature/) {
if ($result =~/2.7.0 Ok, discarded, id=\S+ - INFECTED: Eicar-Signature/) {
print "OK - All fine\n";
exit 0;
} else {

View File

@ -0,0 +1,289 @@
#!/usr/bin/env bash
# shellcheck disable=SC2028
set -euo pipefail
# This check_hpraid is a fork from check_cciss v0.15 written by Simone Rosa.
# Fork written by Evolix and for Evolix usage (Debian only).
# Usage of old tools and drivers were removed to use only the smartpqi or hpsa drivers and the ssacli tool from HP.
# Tools not used on Debian were also removed.
# Linting tool shellcheck was used to use a better bash coding style.
# Upstream at:
# https://gitea.evolix.org/evolix/ansible-roles/src/branch/stable/nagios-nrpe/files/plugins
# Source of the fork:
# https://exchange.nagios.org/directory/Plugins/Hardware/Storage-Systems/RAID-Controllers/check_cciss--2D-HP-and-Compaq-Smart-Array-Hardware-status/details
#
# Licence: GPLv2
# Description:
#
# This plugin checks hardware status for Smart Array Controllers,
# using HPE Smart Storage Administrator. It should support Debian 9 and over.
# (Array, controller, cache, battery, etc...)
#
# Known working RAID controllers:
#
# - Adaptec Smart Storage PQI 12G SAS/PCIe 3 (rev 01)
# | Smart Array P408i-a SR Gen10
# | Smart Array P408i-p SR Gen10
# | Smart Array E208i-a SR Gen10
#
#
# NOTE:
#
# You need to install the proprietary tool HPE Smart Storage Administrator (ssacli) from:
# https://downloads.linux.hpe.com/SDR/repo/mcp
# Also NRPE need to launch ssacli as root.
#
# Please add this line to /etc/sudoers :
# --------------------------------------------------
# nagios ALL=NOPASSWD: /usr/sbin/ssacli
#
# Examples:
#
# ./check_hpraid
# ----------------
# RAID OK
#
# ./check_hpraid -v
# -------------------
# RAID OK: Smart Array 6i in Slot 0 array A logicaldrive 1 (67.8 GB, RAID 1+0, OK)
# [Controller Status: OK Cache Status: OK Battery Status: OK]
#
# RAID CRITICAL - HP Smart Array Failed: Smart Array 6i in Slot 0 (Embedded) \
# array A logicaldrive 1 (33.9 GB, RAID 1, Interim Recovery Mode) \
# physicaldrive 1:0 (port 1:id 0 , Parallel SCSI, --- GB, Failed)
#
# RAID WARNING - HP Smart Array Rebuilding: Smart Array 6i in Slot 0 (Embedded) \
# array A logicaldrive 1 (33.9 GB, RAID 1, Recovering, 26% complete) \
# physicaldrive 1:0 (port 1:id 0 , Parallel SCSI, 36.4 GB, Rebuilding)
#
# ./check_hpraid -v -p
# --------------------
# RAID OK: Smart Array 6i in Slot 0 (Embedded) array A logicaldrive 1 (33.9 GB, RAID 1, OK)
# physicaldrive 2:0 (port 2:id 0 , Parallel SCSI, 36.4 GB, OK)
# physicaldrive 2:1 (port 2:id 1 , Parallel SCSI, 36.4 GB, OK)
# physicaldrive 1:5 (port 1:id 5 , Parallel SCSI, 72.8 GB, OK, spare)
# [Controller Status: OK Cache Status: OK Battery/Capacitor Status: OK]
#
# RAID CRITICAL - HP Smart Array Failed: Smart Array 6i in Slot 0 (Embedded) \
# array A logicaldrive 1 (33.9 GB, RAID 1, Interim Recovery Mode) \
# physicaldrive 1:0 (port 1:id 0 , Parallel SCSI, --- GB, Failed) \
# physicaldrive 1:1 (port 1:id 1 , Parallel SCSI, 36.4 GB, OK)
#
# RAID WARNING - HP Smart Array Rebuilding: Smart Array 6i in Slot 0 (Embedded) \
# array A logicaldrive 1 (33.9 GB, RAID 1, Recovering, 26% complete) \
# physicaldrive 1:0 (port 1:id 0 , Parallel SCSI, 36.4 GB, Rebuilding) \
# physicaldrive 1:1 (port 1:id 1 , Parallel SCSI, 36.4 GB, OK)
#
# ./check_hpraid -v -b
# ----------------
#
# RAID OK: Smart Array 6i in Slot 0 (Embedded) array A logicaldrive 1 (33.9 GB, RAID 1, OK) [Controller Status: OK]
#
# [insted of]
# RAID CRITICAL - HP Smart Array Failed: Smart Array 6i in Slot 0 (Embedded) \
# Controller Status: OK Cache Status: Temporarily Disabled \
# Battery/Capacitor Status: Failed (Replace Batteries/Capacitors)
PROGNAME=$(basename "$0")
NAGIOS_PLUGINS="/usr/lib/nagios/plugins"
REVISION="0.16-evolix"
DEBUG="0"
VERBOSE="0"
ssacli=$(command -v ssacli)
PHYSICAL_DRIVE=0
# shellcheck source=/dev/null
. ${NAGIOS_PLUGINS}/utils.sh
print_usage() {
echo ""
echo "Usage: $PROGNAME [-v] [-p] [-e <number>] [-E <name>] [-b] [-s] [-d]"
echo "Usage: $PROGNAME [-h]"
echo "Usage: $PROGNAME [-V]"
echo ""
echo " -v = show status and informations about RAID"
echo " -p = show detail for physical drives"
echo " -e <number> = exclude slot number"
echo " -b = exclude battery/capacitor/cache status check"
echo " -d = use for debug (command line mode)"
echo " -h = help information"
echo " -V = version information"
echo ""
echo " ============="
}
print_help() {
print_revision "$PROGNAME" "$REVISION"
echo ""
print_usage
echo ""
echo "This plugin checks hardware status for Smart Array Controllers,"
echo "using HPE Smart Storage Administrator."
echo ""
support
exit 0
}
while getopts "N:cvpbsde:Vh" options
do
case $options in
N) ;;
c) ;;
v) VERBOSE=1;;
p) PHYSICAL_DRIVE=1;;
d) DEBUG=1;;
e) EXCLUDE_SLOT=1
excludeslot="$OPTARG";;
b) EXCLUDE_BATTERY=1;;
V) print_revision "$PROGNAME" "$REVISION"
exit 0;;
h) print_help
exit 0;;
\?) print_usage
exit 0;;
*) print_usage
exit 0;;
esac
done
# Check if smartpqi or hpsa driver is loaded
# https://manpages.debian.org/buster/manpages/smartpqi.4.en.html
if [ -d /sys/bus/pci/drivers/smartpqi ] || [ -d /sys/bus/pci/drivers/hpsa ]; then
driverPresent='YES.'
else
driverPresent='No!'
fi
if [ "$DEBUG" = "1" ]; then
echo "### Check if \"HP Smart Array\" driver is present >>>\n${driverPresent}\n"
fi
if [[ "$driverPresent" == "No!" ]]; then
echo "RAID UNKNOWN - HP Smart Array not found"
exit "$STATE_UNKNOWN"
fi
# Check if "HP Array Utility CLI" is present
if [ "$DEBUG" = "1" ]; then
echo "### Check if \"ssacli\" is present >>>\n"
fi
if [ ! -x "$ssacli" ]; then
if [ -x "$ssacli" ]; then
if [ "$DEBUG" = "1" ]; then
echo "### \"ssacli\" is present >>>\n"
fi
else
echo "ERROR: ssacli tools should be installed and with right sudoers/permissions (see the notes above)"
exit "$STATE_UNKNOWN"
fi
fi
# Check if "HP Controller" work correctly
check=$(sudo -u root "$ssacli" controller all show status 2>&1)
status=$?
if [ "$DEBUG" = "1" ]; then
echo "### Check if \"HP Controller\" work correctly >>>\n""${check}""\n"
fi
if test ${status} -ne 0; then
echo "RAID UNKNOWN - $ssacli did not execute properly : ""${check}"
exit "$STATE_UNKNOWN"
fi
# Get "Slot" & exclude slot needed
EXCLUDE_SLOT=${EXCLUDE_SLOT:-0}
if [ "$EXCLUDE_SLOT" = "1" ]; then
slots=$(grep -E -o "Slot \w" <<< "$check" | awk '{print $NF}' | grep -v "$excludeslot")
else
slots=$(grep -E -o "Slot \w" <<< "$check" | awk '{print $NF}')
fi
if [ "$DEBUG" = "1" ]; then
echo "### Get \"Slot\" & exclude slot not needed >>>\n""${slots}""\n"
fi
for slot in $slots; do
# Get "logicaldrive" for slot
set +e
check2b=$(sudo -u root "$ssacli" controller slot="$slot" logicaldrive all show 2>&1)
status=$?
if test ${status} -ne 0; then
# Skip empty slots
if grep -q "The specified device does not have any logical drives." <<< "$check2b"; then
break
fi
echo "RAID UNKNOWN - $ssacli did not execute properly : ""${check2b}"
exit "$STATE_UNKNOWN"
fi
set -e
check2=${check2:-}
check2="$check2$check2b"
if [ "$DEBUG" = "1" ]; then
echo "### Get \"logicaldrive\" for slot >>>\n""${check2b}""\n"
fi
# Get "physicaldrive" for slot
if [ "$PHYSICAL_DRIVE" = "1" ] || [ "$DEBUG" = "1" ]; then
check2b=$(sudo -u root "$ssacli" controller slot="$slot" physicaldrive all show | sed -e 's/\?/\-/g' 2>&1 | grep "physicaldrive")
else
check2b=$(sudo -u root "$ssacli" controller slot="$slot" physicaldrive all show | sed -e 's/\?/\-/g' 2>&1 | grep "physicaldrive" | (grep "\(Failure\|Failed\|Rebuilding\)" || true))
fi
status=$?
if [ "$PHYSICAL_DRIVE" = "1" ] || [ "$DEBUG" = "1" ]; then
if test ${status} -ne 0; then
echo "RAID UNKNOWN - $ssacli did not execute properly : ""${check2b}"
exit "$STATE_UNKNOWN"
fi
fi
printf -v check2 "%s\n%s" "$check2" "$check2b"
if [ "$DEBUG" = "1" ]; then
echo "### Get \"physicaldrive\" for slot >>>\n""${check2b}""\n"
fi
done
# Check STATUS
if [ "$DEBUG" = "1" ]; then
echo "### Check STATUS >>>"
fi
# Omit battery/capacitor/cache status check if requested
EXCLUDE_BATTERY=${EXCLUDE_BATTERY:-0}
if [ "$EXCLUDE_BATTERY" = "1" ]; then
check=$(grep -v 'Battery/Capacitor Status: Failed (Replace Batteries/Capacitors)' <<< "$check")
check=$(grep -v 'Cache Status: Temporarily Disabled' <<< "$check")
fi
check=${check:-}
check2=${check2:-}
check3=${check3:-}
if grep -qiE Failed <<< "$check"; then
echo "RAID CRITICAL - HP Smart Array Failed: ${check}"
exit "$STATE_CRITICAL"
elif grep -qiE Disabled <<< "$check"; then
echo "RAID CRITICAL - HP Smart Array Problem: ${check}"
exit "$STATE_CRITICAL"
elif grep -qiE Failed <<< "$check2"; then
echo "RAID CRITICAL - HP Smart Array Failed: ${check2}"
exit "$STATE_CRITICAL"
elif grep -qiE Failure <<< "$check2"; then
echo "RAID WARNING - Component Failure: ${check2}"
exit "$STATE_WARNING"
elif grep -qiE Rebuild <<< "$check2"; then
echo "RAID WARNING - HP Smart Array Rebuilding: ${check2}"
exit "$STATE_WARNING"
elif grep -qiE Recover <<< "$check2"; then
echo "RAID WARNING - HP Smart Array Recovering: ${check2}"
exit "$STATE_WARNING"
elif grep -qiE "Cache Status: Temporarily Disabled" <<< "$check"; then
echo "RAID WARNING - HP Smart Array Cache Disabled: ${check}"
exit "$STATE_WARNING"
elif grep -qiE FIRMWARE <<< "$check"; then
echo "RAID WARNING - ${check}"
exit "$STATE_WARNING"
else
if [ "$DEBUG" = "1" ] || [ "$VERBOSE" = "1" ]; then
check3=$(grep -E Status <<< "$check")
printf "RAID OK: %s\n%s\n" "$check2" "$check3"
else
echo "RAID OK"
fi
exit "$STATE_OK"
fi
exit "$STATE_UNKNOWN"

View File

@ -17,7 +17,7 @@ command[check_users]=/usr/lib/nagios/plugins/check_users -w 5 -c 10
# Generic services checks
command[check_smtp]=/usr/lib/nagios/plugins/check_smtp -H localhost
command[check_dns]=/usr/lib/nagios/plugins/check_dns -H evolix.net
command[check_ntp]=/usr/lib/nagios/plugins/check_ntp -H ntp2.evolix.net
command[check_ntp]=/usr/lib/nagios/plugins/check_ntp -H {{ nagios_nrpe_ntp_server or nagios_nrpe_default_ntp_server | mandatory }}
command[check_ssh]=/usr/lib/nagios/plugins/check_ssh localhost
command[check_mailq]=/usr/lib/nagios/plugins/check_mailq -M postfix -w 10 -c 20
@ -69,6 +69,7 @@ command[check_varnish]={{ nagios_plugins_directory }}/check_varnish_health -i 12
command[check_haproxy]=sudo {{ nagios_plugins_directory }}/check_haproxy_stats -s /run/haproxy/admin.sock -w 80 -c 90 --ignore-maint --ignore-nolb
command[check_minifirewall]=sudo {{ nagios_plugins_directory }}/check_minifirewall
command[check_redis_instances]={{ nagios_plugins_directory }}/check_redis_instances
command[check_hpraid]={{ nagios_plugins_directory }}/check_hpraid
# Check HTTP "many". Use this to check many websites (http, https, ports, sockets and SSL certificates).
# Beware! All checks must not take more than 10s!

View File

@ -22,3 +22,10 @@ nginx_evolinux_default_enabled: True
nginx_serverstatus_suffix: ""
nginx_serverstatus_suffix_file: "/etc/evolinux/nginx_serverstatus_suffix"
nginx_force_default_template: False
nginx_default_template_regular: "evolinux-default.conf.j2"
nginx_default_template_minimal: "evolinux-default.minimal.conf.j2"
nginx_service_state: started
nginx_service_enabled: True

View File

@ -12,9 +12,10 @@
- name: Copy default vhost
template:
src: evolinux-default.minimal.conf.j2
src: "{{ nginx_default_template_minimal }}"
dest: /etc/nginx/sites-available/evolinux-default.minimal.conf
mode: 0644
force: "{{ nginx_force_default_template | default(False) }}"
notify: reload nginx
tags:
- nginx

View File

@ -2,6 +2,10 @@
- include: packages.yml
- include: server_status_read.yml
tags:
- nginx
# TODO: find a way to override the main configuration
# without touching the main file
@ -89,10 +93,10 @@
- name: nginx vhost is installed
template:
src: evolinux-default.conf.j2
src: "{{ nginx_default_template_regular }}"
dest: /etc/nginx/sites-available/evolinux-default.conf
mode: "0640"
force: no
force: "{{ nginx_force_default_template | default(False) }}"
notify: reload nginx
tags:
- nginx
@ -108,7 +112,7 @@
tags:
- nginx
- include: server_status.yml
- include: server_status_write.yml
tags:
- nginx

View File

@ -7,7 +7,15 @@
apt:
name: "{{ nginx_package_name }}"
state: present
notify: restart nginx
tags:
- nginx
- packages
- name: Ensure nginx service is running as configured.
service:
name: nginx
state: "{{ nginx_service_state }}"
enabled: "{{ nginx_service_enabled }}"
tags:
- nginx
- packages

View File

@ -34,22 +34,3 @@
- debug:
var: nginx_serverstatus_suffix
verbosity: 1
- name: replace server-status suffix in default site index
replace:
dest: /var/www/index.html
regexp: '__SERVERSTATUS_SUFFIX__'
replace: "{{ nginx_serverstatus_suffix }}"
- name: add server-status suffix in default site index if missing
replace:
dest: /var/www/index.html
regexp: '"/nginx_status-?"'
replace: '"/nginx_status-{{ nginx_serverstatus_suffix }}"'
- name: add server-status suffix in default VHost
replace:
dest: /etc/nginx/sites-available/evolinux-default.conf
regexp: 'location /nginx_status-? {'
replace: 'location /nginx_status-{{ nginx_serverstatus_suffix }} {'
notify: reload nginx

View File

@ -0,0 +1,20 @@
---
- name: replace server-status suffix in default site index
replace:
dest: /var/www/index.html
regexp: '__SERVERSTATUS_SUFFIX__'
replace: "{{ nginx_serverstatus_suffix }}"
- name: add server-status suffix in default site index if missing
replace:
dest: /var/www/index.html
regexp: '"/nginx_status-?"'
replace: '"/nginx_status-{{ nginx_serverstatus_suffix }}"'
- name: add server-status suffix in default VHost
replace:
dest: /etc/nginx/sites-available/evolinux-default.conf
regexp: 'location /nginx_status-? {'
replace: 'location /nginx_status-{{ nginx_serverstatus_suffix }} {'
notify: reload nginx

View File

@ -59,6 +59,8 @@
- name: Set folder permissions to 0750
file:
path: "/etc/opendkim/"
owner: opendkim
group: opendkim
mode: "0750"
force: yes
tags:

View File

@ -5,6 +5,13 @@
when:
- ansible_distribution != "Debian" or ansible_distribution_major_version is version('8', '<')
- name: Additional packages are installed
apt:
name:
- zip
- unzip
state: present
- name: install info.php
copy:
src: info.php

View File

@ -5,5 +5,5 @@ DOSSiteCount 30
DOSPageInterval 3
DOSSiteInterval 1
DOSBlockingPeriod 60
DOSEmailNotify {{ general_alert_email }}
#DOSEmailNotify {{ general_alert_email }}
</IfModule>

View File

@ -23,7 +23,7 @@
dest: "{{ php_apache_defaults_ini_file }}"
section: PHP
option: disable_functions
value: "exec,shell-exec,system,passthru,putenv,popen"
value: "exec,shell-exec,system,passthru,popen"
mode: "0644"
- name: Custom php.ini

View File

@ -24,7 +24,7 @@
dest: "{{ php_fpm_defaults_ini_file }}"
section: PHP
option: disable_functions
value: "exec,shell-exec,system,passthru,putenv,popen"
value: "exec,shell-exec,system,passthru,popen"
notify: "restart {{ php_fpm_service_name }}"
- name: Custom php.ini for FPM

View File

@ -27,6 +27,7 @@
- php-mysql
# php-mcrypt is no longer packaged for PHP 7.2
- php-pgsql
- php-sqlite3
- php-gettext
- php-curl
- php-ssh2
@ -76,7 +77,7 @@
- include: config_fpm.yml
when: php_fpm_enable
- name: Enforce permissions on PHP fpm directory
file:
dest: /etc/php/7.3/fpm
@ -85,7 +86,7 @@
- include: config_apache.yml
when: php_apache_enable
- name: Enforce permissions on PHP apache2 directory
file:
dest: /etc/php/7.3/apache2

View File

@ -26,6 +26,7 @@
- php5-mcrypt
- "{{ php_modules_mysqlnd | bool | ternary('php5-mysqlnd','php5-mysql') }}"
- php5-pgsql
- php5-sqlite
- php-gettext
- php5-intl
- php5-curl

View File

@ -26,6 +26,7 @@
- php-ldap
- "{{ php_modules_mysqlnd | bool | ternary('php-mysqlnd','php-mysql') }}"
- php-pgsql
- php-sqlite3
# php-mcrypt is no longer packaged for PHP 7.2
- php-gettext
- php-curl
@ -77,7 +78,7 @@
- include: config_fpm.yml
when: php_fpm_enable
- name: Enforce permissions on PHP fpm directory
file:
dest: /etc/php/7.0/fpm
@ -86,7 +87,7 @@
- include: config_apache.yml
when: php_apache_enable
- name: Enforce permissions on PHP apache2 directory
file:
dest: /etc/php/7.0/apache2

View File

@ -1,5 +1,11 @@
---
- name: Verify Redis port
assert:
that:
- redis_port != 6379
msg: "If you want to use port 6379, use the default instance, not a named instance."
- name: "Instance '{{ redis_instance_name }}' group is present"
group:
name: "redis-{{ redis_instance_name }}"

View File

@ -50,6 +50,7 @@
line: 'nagios ALL = NOPASSWD: {{ redis_check_redis_path }}'
insertafter: '^nagios'
validate: "visudo -cf %s"
create: yes
when: redis_instance_name is defined
tags:
- redis

View File

@ -22,7 +22,7 @@ UMask=007
PrivateTmp=yes
LimitNOFILE=65535
PrivateDevices=yes
ProtectHome={{ redis_data_dir_prefix | match('/home') | ternary('no', 'yes') }}
ProtectHome={{ redis_data_dir_prefix is match('/home') | ternary('no', 'yes') }}
ReadOnlyDirectories=/
ReadWriteDirectories=-{{ redis_data_dir_prefix }}-%i
ReadWriteDirectories=-{{ redis_log_dir_prefix }}-%i

View File

@ -7,7 +7,7 @@
^hwraid\.le-vert\.net$
^.*\.clamav\.net$
^spamassassin\.apache\.org$
^.*\.sa-update.*$
^.*sa-update.*$
^pear\.php\.net$
^repo\.mysql\.com$
^deb\.nodesource\.com$

View File

@ -7,7 +7,7 @@ http://www.kernel.org/.*
http://hwraid.le-vert.net/.*
http://.*.clamav.net/.*
http://spamassassin.apache.org/.*
http://.*.sa-update.*
http://.*sa-update.*
http://pear.php.net/.*
http://repo.mysql.com/.*

View File

@ -1,7 +1,7 @@
---
varnish_addresses:
- 0.0.0.0:80
- 0.0.0.0:80
varnish_management_address: localhost:6082

View File

@ -2,6 +2,6 @@
[Service]
ExecStart=
ExecStart=/usr/sbin/varnishd -F -a {{ varnish_addresses | join(',') }} -T {{ varnish_management_address }} -f {{ varnish_config_file }} -S {{ varnish_secret_file }} -s {{ varnish_storage }} -p thread_pools={{ varnish_thread_pools }} -p thread_pool_add_delay={{ varnish_thread_pool_add_delay }} -p thread_pool_min={{ varnish_thread_pool_min }} -p thread_pool_max={{ varnish_thread_pool_max }}
ExecStart=/usr/sbin/varnishd -F {{ varnish_addresses | map('regex_replace', '^(.*)$', '-a \\1') | list | join(' ') }} -T {{ varnish_management_address }} -f {{ varnish_config_file }} -S {{ varnish_secret_file }} -s {{ varnish_storage }} -p thread_pools={{ varnish_thread_pools }} -p thread_pool_add_delay={{ varnish_thread_pool_add_delay }} -p thread_pool_min={{ varnish_thread_pool_min }} -p thread_pool_max={{ varnish_thread_pool_max }}
ExecReload=
ExecReload=/etc/varnish/reload-vcl.sh