Merge branch 'unstable' into packweb-multi-php2

This commit is contained in:
Mathieu Trossevin 2020-10-19 14:13:06 +02:00
commit 672cb8a4ef
Signed by: mtrossevin
GPG key ID: D1DBB7EA828374E9
76 changed files with 2294 additions and 468 deletions

View file

@ -12,49 +12,95 @@ The **patch** part changes incrementally at each release.
### Added
* certbot: detect HAProxy cert directory
* haproxy: add deny_ips file to reject connections
* haproxy: add some comments to default config
* haproxy: enable stats frontend with access lists
* haproxy: preconfigure SSL with defaults
* lxc-php: Install php-sqlite by default
* lxc-php: Don't disable putenv() by default in PHP settings
* mysql: activate binary logs by specifying log_bin path
* mysql: specify a custom server_id
* mysql: option to define as read only
* nginx: make default vhost configurable
* packweb-apache: Install zip & unzip by default
* php: Install php-sqlite by default
* php: Don't disable putenv() by default in PHP settings
* nextcloud: New role to setup a nextcloud instance
### Changed
* lxc-php: Do --no-install-recommends for ssmtp/opensmtpd
* packweb-apache: Don't turn on mod-evasive emails by default
* haproxy: deport SSL tuning to Mozilla SSL generator
* haproxy: chroot and socket path are configurable
* haproxy: adapt backports installed package list to distibution
* haproxy: split stats variables
* nginx: read server-status values before changing the config
* redis: create sudoers file if missing
* redis: new syntax for match filter
* redis: raise an error is port 6379 is used in "instance" mode
* evoacme: upstream release 20.06.1
* evoacme: read values from environment before defaults file
* certbot: install certbot dependencies non-interactively for jessie
### Fixed
* certbot: restore compatibility with old Nginx
* lxc-php: Install opensmtpd as intended
* mongodb: fix logrotate patterm on Debian buster
* evobackup-client: fixed the ssh connection test
* varnish: fix start command when multiple addresses are present
### Removed
### Security
## [10.2.0] 2020-09-17
### Added
* evoacme: remount /usr if necessary
* evolinux-base: swappiness is customizable
* evolinux-base: install wget
* tomcat: root directory owner/group are configurable
### Changed
* Change default public SSH/SFTP port from 2222 to 22222
### Fixed
* certbot: an empty change shouldn't raise an exception
* certbot: fix "no-self-upgrade" option
### Removed
* evoacme: remove Debian 9 support
## [10.1.0] 2020-08-21
### Added
* certbot: detect HAProxy cert directory
* filebeat: allow using a template
* generate-ldif: add NVMe disk support
* haproxy: add deny_ips file to reject connections
* haproxy: add some comments to default config
* haproxy: enable stats frontend with access lists
* haproxy: preconfigure SSL with defaults
* lxc-php: Don't disable putenv() by default in PHP settings
* lxc-php: Install php-sqlite by default
* metricbeat: allow using a template
* mysql: activate binary logs by specifying log_bin path
* mysql: option to define as read only
* mysql: specify a custom server_id
* nagios-nrpe/evolinux-base: brand new check for hardware raid on HP servers gen 10
* nginx: make default vhost configurable
* packweb-apache: Install zip & unzip by default
* php: Don't disable putenv() by default in PHP settings
* php: Install php-sqlite by default
### Changed
* certbot: fix haproxy hook (ssl cert directory detection)
* certbot: install certbot dependencies non-interactively for jessie
* elasticsearch: configure cluster with seed hosts and initial masters
* elasticsearch: set tmpdir before datadir
* evoacme: read values from environment before defaults file
* evoacme: update for new certbot role
* evoacme: upstream release 20.08
* haproxy: adapt backports installed package list to distibution
* haproxy: chroot and socket path are configurable
* haproxy: deport SSL tuning to Mozilla SSL generator
* haproxy: rotate logs with date extension and immediate compression
* haproxy: split stats variables
* lxc-php: Do --no-install-recommends for ssmtp/opensmtpd
* mongodb: install custom munin plugins
* nginx: read server-status values before changing the config
* packweb-apache: Don't turn on mod-evasive emails by default
* redis: create sudoers file if missing
* redis: new syntax for match filter
* redis: raise an error is port 6379 is used in "instance" mode
### Fixed
* certbot: restore compatibility with old Nginx
* evobackup-client: fixed the ssh connection test
* generate-ldif: better detection of computerOS field
* generate-ldif: skip some odd ethernet devices
* lxc-php: Install opensmtpd as intended
* mongodb: fix logrotate patterm on Debian buster
* nagios-nrpe: check_amavis: updated regex
* squid: better regex to match sa-update domains
* varnish: fix start command when multiple addresses are present
## [10.0.0] - 2020-05-13
### Added

View file

@ -122,6 +122,10 @@ ec2_evolinux_security_group:
from_port: 2222
to_port: 2222
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 22222
to_port: 22222
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 2223
to_port: 2223

View file

@ -8,4 +8,4 @@
SHELL=/bin/sh
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
0 */12 * * * root test -x /usr/local/bin/certbot && perl -e 'sleep int(rand(3600))' && /usr/local/bin/certbot --no-self-update -q renew
0 */12 * * * root test -x /usr/local/bin/certbot && perl -e 'sleep int(rand(3600))' && /usr/local/bin/certbot --no-self-upgrade -q renew

View file

@ -36,7 +36,7 @@ cert_and_key_mismatch() {
}
detect_haproxy_cert_dir() {
# get last field or line wich defines the crt directory
config_cert_dir=$(grep -r -o -E -h '^\s*bind .* crt /etc/.+\b' "${haproxy_config_file}" | head -1 | awk '{ print $(NF)}')
config_cert_dir=$(grep -r -o -E -h '^\s*bind .* crt /etc/\S+' "${haproxy_config_file}" | head -1 | awk '{ print $(NF)}')
if [ -n "${config_cert_dir}" ]; then
debug "Cert directory is configured with ${config_cert_dir}"
echo "${config_cert_dir}"

View file

@ -22,7 +22,7 @@ main() {
message="[letsencrypt] certificates renewal (${RENEWED_DOMAINS})"
${git_bin} commit --message "${message}" --quiet
else
error "Weird, nothing has changed but the hook has been executed for '${RENEWED_DOMAINS}'"
debug "Weird, nothing has changed but the hook has been executed for '${RENEWED_DOMAINS}'"
fi
fi
}

View file

@ -5,9 +5,12 @@ elasticsearch_cluster_name: Null
elasticsearch_cluster_members: Null
elasticsearch_minimum_master_nodes: Null
elasticsearch_node_name: "${HOSTNAME}"
elasticsearch_network_host: "[_local_]"
elasticsearch_network_host:
- "_local_"
elasticsearch_network_publish_host: Null
elasticsearch_http_publish_host: Null
elasticsearch_discovery_seed_hosts: Null
elasticsearch_cluster_initial_master_nodes: Null
elasticsearch_custom_datadir: Null
elasticsearch_custom_tmpdir: Null
elasticsearch_default_tmpdir: /var/lib/elasticsearch/tmp

View file

@ -14,6 +14,7 @@ galaxy_info:
versions:
- jessie
- stretch
- buster
galaxy_tags: []
# List tags for your role here, one per line. A tag is

View file

@ -22,7 +22,7 @@
- name: Configure network host
lineinfile:
dest: /etc/elasticsearch/elasticsearch.yml
line: "network.host: {{ elasticsearch_network_host }}"
line: "network.host: {{ elasticsearch_network_host }}"
regexp: "^network.host:"
insertafter: "^# *network.host:"
when: elasticsearch_network_host|default("", True)
@ -32,7 +32,7 @@
- name: Configure network publish_host
lineinfile:
dest: /etc/elasticsearch/elasticsearch.yml
line: "network.publish_host: {{ elasticsearch_network_publish_host }}"
line: "network.publish_host: {{ elasticsearch_network_publish_host }}"
regexp: "^network.publish_host:"
insertafter: "^network.host:"
when: elasticsearch_network_publish_host|default("", True)
@ -42,13 +42,31 @@
- name: Configure http publish_host
lineinfile:
dest: /etc/elasticsearch/elasticsearch.yml
line: "http.publish_host: {{ elasticsearch_http_publish_host }}"
line: "http.publish_host: {{ elasticsearch_http_publish_host }}"
regexp: "^http.publish_host:"
insertafter: "^http.port:"
when: elasticsearch_http_publish_host|default("", True)
tags:
- config
- name: Configure discovery seed hosts
lineinfile:
dest: /etc/elasticsearch/elasticsearch.yml
line: "discovery.seed_hosts: {{ elasticsearch_discovery_seed_hosts | to_yaml }}"
regexp: "^discovery.seed_hosts:"
when: elasticsearch_discovery_seed_hosts
tags:
- config
- name: Configure initial master nodes
lineinfile:
dest: /etc/elasticsearch/elasticsearch.yml
line: "cluster.initial_master_nodes: {{ elasticsearch_cluster_initial_master_nodes | to_yaml }}"
regexp: "^cluster.initial_master_nodes:"
when: elasticsearch_cluster_initial_master_nodes
tags:
- config
- name: Configure RESTART_ON_UPGRADE
lineinfile:
dest: /etc/default/elasticsearch
@ -93,5 +111,3 @@
when: elasticsearch_minimum_master_nodes|default("", True)
tags:
- config

View file

@ -6,10 +6,10 @@
- include: bootstrap_checks.yml
- include: datadir.yml
- include: tmpdir.yml
- include: datadir.yml
- include: logs.yml
- include: additional_scripts.yml

View file

@ -5,7 +5,7 @@ evoacme_dhparam_size: 2048
evoacme_acme_dir: /var/lib/letsencrypt
evoacme_csr_dir: /etc/ssl/requests
evoacme_crt_dir: /etc/letsencrypt
evoacme_hooks_dir: "{{ evoacme_crt_dir }}/hooks"
evoacme_hooks_dir: "{{ evoacme_crt_dir }}/renewal-hooks/deploy"
evoacme_log_dir: /var/log/evoacme
evoacme_ssl_minday: 30
evoacme_ssl_ct: 'FR'

View file

@ -285,7 +285,7 @@ main() {
export EVOACME_FULLCHAIN="${LIVE_FULLCHAIN}"
# search for files in hooks directory
for hook in $(find ${HOOKS_DIR} -type f); do
for hook in $(find ${HOOKS_DIR} -type f -executable | sort); do
# keep only executables files, not containing a "."
if [ -x "${hook}" ] && (basename "${hook}" | grep -vqF "."); then
debug "Executing ${hook}"
@ -303,7 +303,7 @@ readonly QUIET=${QUIET:-"0"}
readonly TEST=${TEST:-"0"}
readonly DRY_RUN=${DRY_RUN:-"0"}
readonly VERSION="20.06.1"
readonly VERSION="20.08"
# Read configuration file, if it exists
[ -r /etc/default/evoacme ] && . /etc/default/evoacme
@ -314,7 +314,7 @@ readonly ACME_DIR=${ACME_DIR:-"/var/lib/letsencrypt"}
readonly CSR_DIR=${CSR_DIR:-"/etc/ssl/requests"}
readonly CRT_DIR=${CRT_DIR:-"/etc/letsencrypt"}
readonly LOG_DIR=${LOG_DIR:-"/var/log/evoacme"}
readonly HOOKS_DIR=${HOOKS_DIR:-"${CRT_DIR}/hooks"}
readonly HOOKS_DIR=${HOOKS_DIR:-"${CRT_DIR}/renewal-hooks/deploy"}
readonly SSL_MINDAY=${SSL_MINDAY:-"30"}
readonly SSL_EMAIL=${SSL_EMAIL:-""}

View file

@ -1,18 +0,0 @@
#!/bin/sh
git_bin=$(command -v git)
letsencrypt_dir=/etc/letsencrypt
export GIT_DIR="/etc/.git"
export GIT_WORK_TREE="/etc"
if test -x "${git_bin}" && test -d "${GIT_DIR}" && test -d "${GIT_WORK_TREE}"; then
changed_lines=$(${git_bin} status --porcelain -- ${letsencrypt_dir} | wc -l | tr -d ' ')
if [ "${changed_lines}" != "0" ]; then
${git_bin} add --all ${letsencrypt_dir}
message="[letsencrypt] certificates renewal (${RENEWED_DOMAINS})"
${git_bin} commit --message "${message}" --quiet
else
echo "Weird, nothing has changed but the hook has been executed for '${RENEWED_DOMAINS}'"
fi
fi

View file

@ -1,30 +0,0 @@
#!/bin/sh
readonly PROGNAME=$(basename "$0")
# shellcheck disable=SC2124,SC2034
readonly ARGS=$@
readonly VERBOSE=${VERBOSE:-"0"}
readonly QUIET=${QUIET:-"0"}
error() {
>&2 echo "${PROGNAME}: $1"
exit 1
}
debug() {
if [ "${VERBOSE}" = "1" ] && [ "${QUIET}" != "1" ]; then
>&2 echo "${PROGNAME}: $1"
fi
}
if [ -n "$(pidof apache2)" ]; then
# shellcheck disable=SC2091
if $($(command -v apache2ctl) -t 2> /dev/null); then
debug "Apache detected... reloading"
service apache2 reload
else
error " Apache config is broken, you must fix it !"
fi
else
debug "Apache is not running. Skip."
fi

View file

@ -1,35 +0,0 @@
#!/bin/sh
readonly PROGNAME=$(basename "$0")
# shellcheck disable=SC2124,SC2034
readonly ARGS=$@
readonly VERBOSE=${VERBOSE:-"0"}
readonly QUIET=${QUIET:-"0"}
error() {
>&2 echo "${PROGNAME}: $1"
exit 1
}
debug() {
if [ "${VERBOSE}" = "1" ] && [ "${QUIET}" != "1" ]; then
>&2 echo "${PROGNAME}: $1"
fi
}
if [ -n "$(pidof dovecot)" ]; then
# shellcheck disable=SC2091
if $($(command -v doveconf) > /dev/null); then
# shellcheck disable=SC2091
if $($(command -v doveconf)|grep -E "^ssl_cert[^_]"|grep -q "letsencrypt"); then
debug "Dovecot detected... reloading"
service dovecot reload
else
debug "Dovecot doesn't use Let's Encrypt certificate. Skip."
fi
else
error "Dovecot config is broken, you must fix it !"
fi
else
debug "Dovecot is not running. Skip."
fi

View file

@ -1,30 +0,0 @@
#!/bin/sh
readonly PROGNAME=$(basename "$0")
# shellcheck disable=SC2124,SC2034
readonly ARGS=$@
readonly VERBOSE=${VERBOSE:-"0"}
readonly QUIET=${QUIET:-"0"}
error() {
>&2 echo "${PROGNAME}: $1"
exit 1
}
debug() {
if [ "${VERBOSE}" = "1" ] && [ "${QUIET}" != "1" ]; then
>&2 echo "${PROGNAME}: $1"
fi
}
if [ -n "$(pidof nginx)" ]; then
# shellcheck disable=SC2091
if $($(command -v nginx) -t 2> /dev/null); then
debug "Nginx detected... reloading"
service nginx reload
else
error "Nginx config is broken, you must fix it !"
fi
else
debug "Nginx is not running. Skip."
fi

View file

@ -1,35 +0,0 @@
#!/bin/sh
readonly PROGNAME=$(basename "$0")
# shellcheck disable=SC2124,SC2034
readonly ARGS=$@
readonly VERBOSE=${VERBOSE:-"0"}
readonly QUIET=${QUIET:-"0"}
error() {
>&2 echo "${PROGNAME}: $1"
exit 1
}
debug() {
if [ "${VERBOSE}" = "1" ] && [ "${QUIET}" != "1" ]; then
>&2 echo "${PROGNAME}: $1"
fi
}
if [ -n "$(pidof master)" ]; then
# shellcheck disable=SC2091
if $($(command -v postconf) > /dev/null); then
# shellcheck disable=SC2091
if $($(command -v postconf)|grep -E "^smtpd_tls_cert_file"|grep -q "letsencrypt"); then
debug "Postfix detected... reloading"
service postfix reload
else
debug "Postfix doesn't use Let's Encrypt certificate. Skip."
fi
else
error "Postfix config is broken, you must fix it !"
fi
else
debug "Postfix is not running. Skip."
fi

View file

@ -265,7 +265,7 @@ readonly ARGS=$@
readonly VERBOSE=${VERBOSE:-"0"}
readonly QUIET=${QUIET:-"0"}
readonly VERSION="20.06.1"
readonly VERSION="20.08"
# Read configuration file, if it exists
[ -r /etc/default/evoacme ] && . /etc/default/evoacme

View file

@ -170,7 +170,7 @@ readonly ARGS=$@
readonly VERBOSE=${VERBOSE:-"0"}
readonly QUIET=${QUIET:-"0"}
readonly VERSION="20.06.1"
readonly VERSION="20.08"
readonly SRV_IP=${SRV_IP:-""}

View file

@ -11,8 +11,8 @@ galaxy_info:
platforms:
- name: Debian
versions:
- jessie
- stretch
- buster
dependencies: []
# List your role dependencies here, one per line.

View file

@ -1,61 +0,0 @@
---
- name: Create acme group
group:
name: acme
state: present
- name: Create acme user
user:
name: acme
group: acme
state: present
createhome: no
home: "{{ evoacme_acme_dir }}"
shell: /bin/false
system: yes
- name: Fix crt dir's right
file:
path: "{{ evoacme_crt_dir }}"
mode: "0755"
owner: acme
group: acme
state: directory
- name: "Fix hooks directory permissions"
file:
path: "{{ evoacme_hooks_dir }}"
mode: "0700"
owner: acme
group: acme
state: directory
- name: Fix log dir's right
file:
path: "{{ evoacme_log_dir }}"
mode: "0755"
owner: acme
group: acme
state: directory
- name: Fix challenge dir's right
file:
path: "{{ evoacme_acme_dir }}"
mode: "0755"
owner: acme
group: acme
state: directory
- name: Is /etc/aliases present?
stat:
path: /etc/aliases
register: etc_aliases
- name: Set acme aliases
lineinfile:
state: present
dest: /etc/aliases
line: 'acme: root'
regexp: 'acme:'
when: etc_aliases.stat.exists
notify: "newaliases"

View file

@ -1,25 +0,0 @@
- name: Create conf dirs
file:
path: "/etc/apache2/{{ item }}"
state: directory
with_items:
- 'conf-available'
- 'conf-enabled'
- name: Copy acme challenge conf
template:
src: templates/apache.conf.j2
dest: /etc/apache2/conf-available/letsencrypt.conf
owner: root
group: root
mode: "0644"
notify: reload apache2
- name: Enable acme challenge conf
file:
src: /etc/apache2/conf-available/letsencrypt.conf
dest: /etc/apache2/conf-enabled/letsencrypt.conf
state: link
owner: root
group: root
notify: reload apache2

View file

@ -1,45 +1,20 @@
---
- name: Use backports for jessie
block:
- name: install jessie-backports
include_role:
name: evolix/apt
tasks_from: backports.yml
- name: Add exceptions for certbot dependencies
copy:
src: backports-certbot
dest: /etc/apt/preferences.d/z-backports-certbot
notify: apt update
- meta: flush_handlers
when: ansible_distribution_release == "jessie"
- name: Install certbot with apt
apt:
name: certbot
state: latest
- include_role:
name: evolix/certbot
- include_role:
name: evolix/remount-usr
- name: Remove certbot symlink for apt install
file:
path: /usr/local/bin/certbot
state: absent
- name: Disable /etc/cron.d/certbot
command: mv /etc/cron.d/certbot /etc/cron.d/certbot.disabled
command: mv -f /etc/cron.d/certbot /etc/cron.d/certbot.disabled
args:
removes: /etc/cron.d/certbot
creates: /etc/cron.d/certbot.disabled
- name: Disable /etc/cron.daily/certbot
command: mv /etc/cron.daily/certbot /etc/cron.daily/certbot.disabled
command: mv -f /etc/cron.daily/certbot /etc/cron.daily/certbot.disabled
args:
removes: /etc/cron.daily/certbot
creates: /etc/cron.daily/certbot.disabled
- name: Install evoacme custom cron
copy:

View file

@ -1,5 +1,10 @@
---
- name: "Create {{ hook_name }} hook directory"
file:
dest: "{{ evoacme_hooks_dir }}"
state: directory
- name: "Search for {{ hook_name }} hook"
command: "find {{ evoacme_hooks_dir }} -type f \\( -name '{{ hook_name }}' -o -name '{{ hook_name }}.*' \\)"
check_mode: no

View file

@ -1,42 +1,22 @@
---
- fail:
msg: only compatible with Debian >= 8
when:
- ansible_distribution != "Debian" or ansible_distribution_major_version is version('8', '<')
- name: Verify Debian version
assert:
that:
- ansible_distribution == "Debian"
- ansible_distribution_major_version is version('9', '>=')
msg: only compatible with Debian >= 9
- include: certbot.yml
- include: acme.yml
- include: permissions.yml
- include: evoacme_hook.yml
vars:
hook_name: "{{ item }}"
with_items:
- reload_apache
- reload_nginx
- reload_dovecot
- reload_postfix
- commit
# Enable this task if you want to deploy hooks
# - include: evoacme_hook.yml
# vars:
# hook_name: "{{ item }}"
# loop: []
- include: conf.yml
- include: scripts.yml
- name: Determine Apache presence
stat:
path: /etc/apache2/apache2.conf
check_mode: no
register: sta
- name: Determine Nginx presence
stat:
path: /etc/nginx/nginx.conf
check_mode: no
register: stn
- include: apache.yml
when: sta.stat.isreg is defined and sta.stat.isreg
- include: nginx.yml
when: stn.stat.isreg is defined and stn.stat.isreg

View file

@ -1,35 +0,0 @@
---
- name: move acme challenge conf if missplaced
command: mv /etc/nginx/letsencrypt.conf /etc/nginx/snippets/letsencrypt.conf
args:
removes: /etc/nginx/letsencrypt.conf
creates: /etc/nginx/snippets/letsencrypt.conf
- name: Copy acme challenge conf
template:
src: templates/nginx.conf.j2
dest: /etc/nginx/snippets/letsencrypt.conf
owner: root
group: root
mode: "0644"
- name: look for old path
command: grep -r /etc/nginx/letsencrypt.conf /etc/nginx
changed_when: False
failed_when: False
check_mode: no
register: grep_letsencrypt_old_path
- name: Keep a symlink for vhosts with old path
file:
src: /etc/nginx/snippets/letsencrypt.conf
dest: /etc/nginx/letsencrypt.conf
state: link
when: grep_letsencrypt_old_path.rc == 0
- name: Remove symlink if no vhost with old path
file:
dest: /etc/nginx/letsencrypt.conf
state: absent
when: grep_letsencrypt_old_path.rc == 1

View file

@ -0,0 +1,33 @@
---
- name: Fix crt directory permissions
file:
path: "{{ evoacme_crt_dir }}"
mode: "0755"
owner: root
group: root
state: directory
- name: "Fix hooks directory permissions"
file:
path: "{{ evoacme_hooks_dir }}"
mode: "0700"
owner: root
group: root
state: directory
- name: Fix log directory permissions
file:
path: "{{ evoacme_log_dir }}"
mode: "0755"
owner: root
group: root
state: directory
- name: Fix challenge directory permissions
file:
path: "{{ evoacme_acme_dir }}"
mode: "0755"
owner: root
group: root
state: directory

View file

@ -1,4 +1,8 @@
---
- include_role:
name: evolix/remount-usr
- name: Create CSR dir
file:
path: "{{ evoacme_csr_dir }}"
@ -36,5 +40,5 @@
path: "/usr/local/bin/{{ item }}"
state: absent
with_items:
- 'make-csr'
- 'evoacme'
- 'make-csr'
- 'evoacme'

View file

@ -4,5 +4,6 @@ SSL_KEY_DIR=${SSL_KEY_DIR:-{{ evoacme_ssl_key_dir }}}
ACME_DIR=${ACME_DIR:-{{ evoacme_acme_dir }}}
CSR_DIR=${CSR_DIR:-{{ evoacme_csr_dir }}}
CRT_DIR=${CRT_DIR:-{{ evoacme_crt_dir }}}
HOOKS_DIR=${HOOKS_DIR:-"{{ evoacme_hooks_dir }}"}
LOG_DIR=${LOG_DIR:-{{ evoacme_log_dir }}}
SSL_MINDAY=${SSL_MINDAY:-{{ evoacme_ssl_minday }}}

View file

@ -50,7 +50,8 @@ evolinux_kernel_include: True
evolinux_kernel_reboot_after_panic: True
evolinux_kernel_disable_tcp_timestamps: True
evolinux_kernel_reduce_swapiness: True
evolinux_kernel_customize_swappiness: True
evolinux_kernel_swappiness: 20
evolinux_kernel_cve20165696: True
# fstab
@ -214,3 +215,6 @@ evolinux_listupgrade_include: True
# Generate ldif
evolinux_generateldif_include: True
# Cron check_hpraid
evolinux_cron_checkhpraid_frequency: daily

View file

@ -0,0 +1,91 @@
#!/usr/bin/env bash
set -euo pipefail
# This script is meant to be executed as a cron by executing Nagios
# NRPE plugin check_hpraid and notify by mail any errors
TMPDIR=/tmp
md5sum=$(command -v md5sum)
awk=$(command -v awk)
check_hpraid="/usr/local/lib/nagios/plugins/check_hpraid -v -p"
check_hpraid_output=$(mktemp -p $TMPDIR check_hpraid_XXX)
check_hpraid_last="$TMPDIR/check_hpraid_last"
# set to false to use cron output (MAILTO)
# otherwise send output with mail command
use_mail=true
body=$(mktemp --tmpdir=/tmp check_hpraid_XXX)
clientmail=$(grep EVOMAINTMAIL /etc/evomaintenance.cf | cut -d'=' -f2)
hostname=$(grep HOSTNAME /etc/evomaintenance.cf | cut -d'=' -f2)
hostname=${hostname%%.evolix.net}
# If hostname is composed with -, remove the first part.
if [[ $hostname =~ "-" ]]; then
hostname=$(echo "$hostname" | cut -d'-' -f2-)
fi
trap trapFunc EXIT ERR
testDeps() {
test -x "$md5sum" || (echo "md5sum binary not found"; exit 1)
test -x "$awk" || (echo "awk binary not found"; exit 1)
}
main() {
if ! $check_hpraid > "$check_hpraid_output"; then
error=true
else
error=false
fi
# If check_hpraid returned error, display output, save status and
# exit
if $error; then
cp "$check_hpraid_output" "$check_hpraid_last"
if $use_mail; then
mail -s "RAID error on $hostname" "$clientmail" \
< "$check_hpraid_output"
else
cat "$check_hpraid_output"
fi
exit 1
fi
if [ ! -f $check_hpraid_last ]; then
cp "$check_hpraid_output" $check_hpraid_last
fi
# If output and last check is different, display differences and
# exit
md5_now=$(md5sum "$check_hpraid_output" | awk '{print $1}')
md5_last=$(md5sum $check_hpraid_last | awk '{print $1}')
if [[ "$md5_now" != "$md5_last" ]]; then
cat << EOT > "$body"
Different RAID state detected.
Was:
$(sed 's/^/> /g' "$check_hpraid_last")
###########################
Is now:
$(sed 's/^/> /g' "$check_hpraid_output")
EOT
if $use_mail; then
mail -s "RAID status is different on $hostname" \
"$clientmail" < "$body"
else
cat "$body"
fi
cp "$check_hpraid_output" "$check_hpraid_last"
exit 1
fi
}
trapFunc() {
rm "$check_hpraid_output" "$body"
}
testDeps
main

View file

@ -25,15 +25,17 @@
when: broadcom_netextreme_search.rc == 0
## RAID
# Dell and others: MegaRAID SAS
# HP gen <10: Hewlett-Packard Company Smart Array
# HP gen >=10: Adaptec Smart Storage PQI
- name: Detect if RAID is installed
shell: lspci | grep "RAID bus controller" | grep -v Intel
shell: lspci -q | grep -e "RAID bus controller" -e "Serial Attached SCSI controller"
check_mode: no
register: raidmodel
changed_when: "'FAILED' in raidmodel.stdout"
failed_when: "'FAILED' in raidmodel.stdout"
- name: HP Smart Array package is present
- name: HPE Smart Storage Administrator (ssacli) is present
block:
- name: Add HPE GPG key
apt_key:
@ -44,28 +46,45 @@
apt_repository:
repo: 'deb https://downloads.linux.hpe.com/SDR/repo/mcp {{ ansible_distribution_release }}/current non-free'
state: present
- name: Install packages for HP hardware
- name: Install HPE Smart Storage Administrator (ssacli)
apt:
name:
- cciss-vol-status
- ssacli
name: ssacli
when:
- "'Hewlett-Packard Company Smart Array' in raidmodel.stdout"
- "'Adaptec Smart Storage PQI' in raidmodel.stdout"
# NOTE: check_hpraid cron use check_hpraid from nagios-nrpe role
# So, if nagios-nrpe role is not installed it will not work
- name: Install and configure check_hpraid cron (HP gen >=10)
block:
- name: check_hpraid cron is present (HP gen >=10)
copy:
src: check_hpraid.cron.sh
dest: /etc/cron.{{ evolinux_cron_checkhpraid_frequency | mandatory }}/check_hpraid
mode: "0755"
when: "'Adaptec Smart Storage PQI' in raidmodel.stdout"
- name: Install and configure cciss-vol-status (HP gen <10)
block:
- name: Install cciss-vol-status (HP gen <10)
apt:
name: cciss-vol-status
state: present
- name: cciss-vol-statusd init script is present
- name: cciss-vol-statusd init script is present (HP gen <10)
template:
src: hardware/cciss-vol-statusd.j2
dest: /etc/init.d/cciss-vol-statusd
mode: "0755"
- name: Configure cciss-vol-statusd
- name: Configure cciss-vol-statusd (HP gen <10)
lineinfile:
dest: /etc/default/cciss-vol-statusd
line: 'MAILTO="{{ raid_alert_email or general_alert_email | mandatory }}"'
regexp: 'MAILTO='
create: yes
- name: Enable HP hardware in systemd
- name: Enable cciss-vol-status in systemd (HP gen <10)
service:
name: cciss-vol-statusd
enabled: true

View file

@ -32,14 +32,14 @@
reload: yes
when: evolinux_kernel_disable_tcp_timestamps
- name: Reduce the swapiness
- name: Customize the swappiness
sysctl:
name: vm.swappiness
value: 20
value: "{{ evolinux_kernel_swappiness }}"
sysctl_file: "{{ evolinux_kernel_sysctl_path }}"
state: present
reload: yes
when: evolinux_kernel_reduce_swapiness
when: evolinux_kernel_customize_swappiness
- name: Patch for TCP stack vulnerability CVE-2016-5696
sysctl:

View file

@ -30,6 +30,7 @@
- tcpdump
- mtr-tiny
- curl
- wget
- telnet
- traceroute
- man

View file

@ -28,7 +28,7 @@ action_mwl = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(proto
action = %(action_mwl)s
[sshd]
port = ssh,2222
port = ssh,2222,22222
logpath = %(sshd_log)s
backend = %(sshd_backend)s
maxretry = 10

View file

@ -4,3 +4,20 @@ elastic_stack_version: "6.x"
filebeat_logstash_plugin: False
filebeat_processors_cloud_metadata: False
filebeat_elasticsearch_hosts:
- "localhost:9200"
filebeat_elasticsearch_protocol: "http"
filebeat_elasticsearch_auth_api_key: ""
filebeat_elasticsearch_auth_username: ""
filebeat_elasticsearch_auth_password: ""
filebeat_logstash_hosts: []
filebeat_logstash_protocol: "http"
filebeat_logstash_auth_api_key: ""
filebeat_logstash_auth_username: ""
filebeat_logstash_auth_password: ""
filebeat_use_config_template: False
filebeat_update_config: True
filebeat_force_config: True

View file

@ -66,18 +66,79 @@
- logstash_plugin.stat.exists
- not logstash_plugin_installed | success
- name: cloud_metadata processor is disabled
replace:
dest: /etc/filebeat/filebeat.yml
regexp: '^(\s+)(- add_cloud_metadata:)'
replace: '\1# \2'
notify: restart filebeat
when: not filebeat_processors_cloud_metadata
# When we don't use a config template (default)
- block:
- name: cloud_metadata processor is disabled
replace:
dest: /etc/filebeat/filebeat.yml
regexp: '^(\s+)(- add_cloud_metadata:)'
replace: '\1# \2'
notify: restart filebeat
when: not filebeat_processors_cloud_metadata
- name: cloud_metadata processor is disabled
- name: cloud_metadata processor is disabled
lineinfile:
dest: /etc/filebeat/filebeat.yml
line: " - add_cloud_metadata: ~"
insert_after: '^processors:'
notify: restart filebeat
when: filebeat_processors_cloud_metadata
- name: Filebeat knows where to find Elasticsearch
lineinfile:
dest: /etc/filebeat/filebeat.yml
regexp: '^ hosts: .*'
line: " hosts: [\"{{ filebeat_elasticsearch_hosts | join('\", \"') }}\"]"
insertafter: "output.elasticsearch:"
notify: restart filebeat
when:
- filebeat_elasticsearch_hosts
- name: Filebeat protocol for Elasticsearch
lineinfile:
dest: /etc/filebeat/filebeat.yml
regexp: '^ #?protocol: .*'
line: " protocol: \"{{ filebeat_elasticsearch_protocol }}\""
insertafter: "output.elasticsearch:"
notify: restart filebeat
when: filebeat_elasticsearch_protocol == "http" or filebeat_elasticsearch_protocol == "https"
- name: Filebeat auth/username for Elasticsearch are configured
lineinfile:
dest: /etc/filebeat/filebeat.yml
regexp: '{{ item.regexp }}'
line: '{{ item.line }}'
insertafter: "output.elasticsearch:"
with_items:
- { regexp: '^ #?username: .*', line: ' username: "{{ filebeat_elasticsearch_auth_username }}"' }
- { regexp: '^ #?password: .*', line: ' password: "{{ filebeat_elasticsearch_auth_password }}"' }
notify: restart filebeat
when:
- filebeat_elasticsearch_auth_username
- filebeat_elasticsearch_auth_password
when: not filebeat_use_config_template
- name: Filebeat api_key for Elasticsearch are configured
lineinfile:
dest: /etc/filebeat/filebeat.yml
line: " - add_cloud_metadata: ~"
insert_after: '^processors:'
regexp: '^ #?api_key: .*'
line: ' api_key: "{{ filebeat_elasticsearch_auth_api_key }}"'
insertafter: "output.elasticsearch:"
notify: restart filebeat
when: filebeat_processors_cloud_metadata
when: filebeat_elasticsearch_auth_api_key
# When we use a config template
- block:
- name: Configuration is up-to-date
template:
src: "{{ item }}"
dest: /etc/filebeat/filebeat.yml
force: "{{ filebeat_force_config }}"
with_first_found:
- "templates/filebeat/filebeat.{{ inventory_hostname }}.yml.j2"
- "templates/filebeat/filebeat.{{ host_group }}.yml.j2"
- "templates/filebeat/filebeat.default.yml.j2"
- "filebeat.default.yml.j2"
notify: restart filebeat
when: filebeat_update_config
when: filebeat_use_config_template

View file

@ -0,0 +1,254 @@
###################### Filebeat Configuration Example #########################
# This file is an example configuration file highlighting only the most common
# options. The filebeat.reference.yml file from the same directory contains all the
# supported options with more comments. You can use it as a reference.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/filebeat/index.html
# For more available modules and options, please see the filebeat.reference.yml sample
# configuration file.
# ============================== Filebeat inputs ===============================
filebeat.inputs:
# Each - is an input. Most options can be set at the input level, so
# you can use different inputs for various configurations.
# Below are the input specific configurations.
- type: log
# Change to true to enable this input configuration.
enabled: false
# Paths that should be crawled and fetched. Glob based paths.
paths:
- /var/log/*.log
#- c:\programdata\elasticsearch\logs\*
# Exclude lines. A list of regular expressions to match. It drops the lines that are
# matching any regular expression from the list.
#exclude_lines: ['^DBG']
# Include lines. A list of regular expressions to match. It exports the lines that are
# matching any regular expression from the list.
#include_lines: ['^ERR', '^WARN']
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
# are matching any regular expression from the list. By default, no files are dropped.
#exclude_files: ['.gz$']
# Optional additional fields. These fields can be freely picked
# to add additional information to the crawled log files for filtering
#fields:
# level: debug
# review: 1
### Multiline options
# Multiline can be used for log messages spanning multiple lines. This is common
# for Java Stack Traces or C-Line Continuation
# The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
#multiline.pattern: ^\[
# Defines if the pattern set under pattern should be negated or not. Default is false.
#multiline.negate: false
# Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
# that was (not) matched before or after or as long as a pattern is not matched based on negate.
# Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
#multiline.match: after
# ============================== Filebeat modules ==============================
filebeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: false
# Period on which files under path should be checked for changes
#reload.period: 10s
# ======================= Elasticsearch template setting =======================
setup.template.settings:
index.number_of_shards: 1
#index.codec: best_compression
#_source.enabled: false
# ================================== General ===================================
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
#name:
# The tags of the shipper are included in their own field with each
# transaction published.
#tags: ["service-X", "web-tier"]
# Optional fields that you can specify to add additional information to the
# output.
#fields:
# env: staging
# ================================= Dashboards =================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards is disabled by default and can be enabled either by setting the
# options here or by using the `setup` command.
#setup.dashboards.enabled: false
# The URL from where to download the dashboards archive. By default this URL
# has a value which is computed based on the Beat name and version. For released
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
# website.
#setup.dashboards.url:
# =================================== Kibana ===================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
#host: "localhost:5601"
# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
# the Default Space will be used.
#space.id:
# =============================== Elastic Cloud ================================
# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/).
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:
# ================================== Outputs ===================================
# Configure what output to use when sending the data collected by the beat.
{% if filebeat_elasticsearch_hosts %}
# ---------------------------- Elasticsearch Output ----------------------------
output.elasticsearch:
hosts: ["{{ filebeat_elasticsearch_hosts | join('", "') }}"]
protocol: "{{ filebeat_elasticsearch_protocol | default('http') }}"
{% if filebeat_elasticsearch_auth_api_key %}
api_key: "{{ filebeat_elasticsearch_auth_api_key }}"
{% endif %}
{% if filebeat_elasticsearch_auth_username %}
username: "{{ filebeat_elasticsearch_auth_username }}"
{% endif %}
{% if filebeat_elasticsearch_auth_password %}
password: "{{ filebeat_elasticsearch_auth_password }}"
{% endif %}
{% endif %}
{% if filebeat_logstash_hosts %}
# ---------------------------- Logstash Output ---------------------------------
output.logstash:
hosts: ["{{ filebeat_logstash_hosts | join('", "') }}"]
protocol: "{{ filebeat_logstash_protocol | default('http') }}"
{% if filebeat_logstash_auth_api_key %}
api_key: "{{ filebeat_logstash_auth_api_key }}"
{% endif %}
{% if filebeat_logstash_auth_username %}
username: "{{ filebeat_logstash_auth_username }}"
{% endif %}
{% if filebeat_logstash_auth_password %}
password: "{{ filebeat_logstash_auth_password }}"
{% endif %}
{% endif %}
# Optional SSL. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# ================================= Processors =================================
processors:
- add_host_metadata: ~
{% if filebeat_processors_cloud_metadata %}
- add_cloud_metadata: ~
{% endif %}
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
# ================================== Logging ===================================
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
#logging.level: debug
# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publish", "service".
#logging.selectors: ["*"]
# ============================= X-Pack Monitoring ==============================
# Filebeat can export internal metrics to a central Elasticsearch monitoring
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
# reporting is disabled by default.
# Set to true to enable the monitoring reporter.
#monitoring.enabled: false
# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
#monitoring.cluster_uuid:
# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well.
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
# Any setting that is not set is automatically inherited from the Elasticsearch
# output configuration, so if you have the Elasticsearch output configured such
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
# uncomment the following line.
#monitoring.elasticsearch:
# ============================== Instrumentation ===============================
# Instrumentation support for the filebeat.
#instrumentation:
# Set to true to enable instrumentation of filebeat.
#enabled: false
# Environment in which filebeat is running on (eg: staging, production, etc.)
#environment: ""
# APM Server hosts to report instrumentation results to.
#hosts:
# - http://localhost:8200
# API Key for the APM Server(s).
# If api_key is set then secret_token will be ignored.
#api_key:
# Secret token for the APM Server(s).
#secret_token:
# ================================= Migration ==================================
# This allows to enable 6.7 migration aliases
#migration.6_to_7.enabled: true

View file

@ -25,7 +25,8 @@ EvoComputerName=$(hostname -s)
dnsPTRrecord=$(hostname -f)
HardwareMark=$(dmidecode -s system-manufacturer | grep -v '^#')
computerIP=$(hostname -i | cut -d' ' -f1)
computerOS=$(lsb_release -s -d | sed 's#\..##')
# The sed part does not works for squeeze and previous
computerOS=$(lsb_release -s -d | sed -E 's#\.[0-9]{1,}##')
computerKernel=$(uname -r)
HardwareSerial=$(dmidecode -s system-serial-number | grep -v '^#')
@ -71,7 +72,7 @@ if (test -b /dev/vda); then
sdaModel="Virtual VirtIO Disk"
elif [ -d /proc/vz ] && [ ! -d /proc/bc ]; then
sdaModel="OpenVZ SIMFS disk"
else
elif (lsblk -d -r -n -o TYPE,SIZE,PATH | grep -q sda); then
hdparm -I /dev/sda 2>&1 | grep -q bad
if [ $? -eq 0 ]; then
if (test -n "${raidModel}"); then
@ -82,6 +83,9 @@ else
else
sdaModel=$(hdparm -I /dev/sda | grep Model | tr -s '\t' ' ' | cut -d' ' -f4-)
fi
# hdparm does not support NVME, use smartctl
elif (lsblk -d -r -n -o TYPE,SIZE,PATH | grep -q nvme); then
sdaModel="SSD NVMe: $(smartctl -a /dev/nvme0n1 | grep "Model Number" | tr -s ' ' | cut -d' ' -f3-)"
fi
ldif_file="/root/${EvoComputerName}.$(date +"%Y%m%d%H%M%S").ldif"
@ -273,7 +277,10 @@ for net in $(ls /sys/class/net); do
echo $path | grep -q virtual
if [ $? -ne 0 ]; then
hw=$(cat ${path}/address)
# In some cases some devices does not have a vendor or device, skip it
test -f ${path}/device/vendor || continue
vendor_id=$(cat ${path}/device/vendor)
test -f ${path}/device/device || continue
dev_id=$(cat ${path}/device/device)
[ "${dev_id}" = "0x0001" ] && dev_id="0x1000"
dev=$(lspci -d "${vendor_id}:${dev_id}" -vm)

View file

@ -101,4 +101,24 @@
- haproxy
- config
- name: Rotate logs with dateext
lineinfile:
dest: /etc/logrotate.d/haproxy
line: ' dateext'
regexp: '^\s*#*\s*(no)?dateext'
insertbefore: '}'
tags:
- haproxy
- config
- name: Rotate logs with nodelaycompress
lineinfile:
dest: /etc/logrotate.d/haproxy
line: ' nodelaycompress'
regexp: '^\s*#*\s*(no)?delaycompress'
insertbefore: '}'
tags:
- haproxy
- config
- include: munin.yml

View file

@ -1,4 +1,4 @@
# filebeat
# metricbeat
Install Metricbeat.

View file

@ -1,10 +1,25 @@
---
elastic_stack_version: "6.x"
metricbeat_elasticsearch_protocol: ""
metricbeat_elasticsearch_hosts:
- "localhost:9200"
metricbeat_elasticsearch_protocol: ""
metricbeat_elasticsearch_auth_api_key: ""
metricbeat_elasticsearch_auth_username: ""
metricbeat_elasticsearch_auth_password: ""
metricbeat_processors_cloud_metadata: False
metricbeat_use_config_template: False
metricbeat_update_config: True
metricbeat_force_config: True
# Example :
# metricbeat_tags:
# - "service-X"
# - "web-tier"
metricbeat_tags: Null
# Example :
# metricbeat_fields:
# - "env: staging"
metricbeat_fields: Null

View file

@ -40,51 +40,79 @@
name: metricbeat
enabled: yes
- name: Metricbeat knows where to find Elasticsearch
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
regexp: '^ hosts: .*'
line: " hosts: [\"{{ metricbeat_elasticsearch_hosts | join('\", \"') }}\"]"
insertafter: "output.elasticsearch:"
notify: restart metricbeat
when:
- metricbeat_elasticsearch_hosts
# When we don't use a config template (default)
- block:
- name: Metricbeat knows where to find Elasticsearch
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
regexp: '^ hosts: .*'
line: " hosts: [\"{{ metricbeat_elasticsearch_hosts | join('\", \"') }}\"]"
insertafter: "output.elasticsearch:"
notify: restart metricbeat
when:
- metricbeat_elasticsearch_hosts
- name: Metricbeat protocol for Elasticsearch
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
regexp: '^ #?protocol: .*'
line: " protocol: \"{{ metricbeat_elasticsearch_protocol }}\""
insertafter: "output.elasticsearch:"
notify: restart metricbeat
when: metricbeat_elasticsearch_protocol == "http" or metricbeat_elasticsearch_protocol == "https"
- name: Metricbeat protocol for Elasticsearch
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
regexp: '^ #?protocol: .*'
line: " protocol: \"{{ metricbeat_elasticsearch_protocol }}\""
insertafter: "output.elasticsearch:"
notify: restart metricbeat
when: metricbeat_elasticsearch_protocol == "http" or metricbeat_elasticsearch_protocol == "https"
- name: Metricbeat auth/username for Elasticsearch are configured
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
regexp: '{{ item.regexp }}'
line: '{{ item.line }}'
insertafter: "output.elasticsearch:"
with_items:
- { regexp: '^ #?username: .*', line: ' username: "{{ metricbeat_elasticsearch_auth_username }}"' }
- { regexp: '^ #?password: .*', line: ' password: "{{ metricbeat_elasticsearch_auth_password }}"' }
notify: restart metricbeat
when:
- metricbeat_elasticsearch_auth_username != ""
- metricbeat_elasticsearch_auth_password != ""
- name: Metricbeat auth/username for Elasticsearch are configured
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
regexp: '{{ item.regexp }}'
line: '{{ item.line }}'
insertafter: "output.elasticsearch:"
with_items:
- { regexp: '^ #?username: .*', line: ' username: "{{ metricbeat_elasticsearch_auth_username }}"' }
- { regexp: '^ #?password: .*', line: ' password: "{{ metricbeat_elasticsearch_auth_password }}"' }
notify: restart metricbeat
when:
- metricbeat_elasticsearch_auth_username
- metricbeat_elasticsearch_auth_password
- name: disable cloud_metadata
replace:
dest: /etc/metricbeat/metricbeat.yml
regexp: '^(\s+)(- add_cloud_metadata:)'
replace: '\1# \2'
notify: restart metricbeat
when: not metricbeat_processors_cloud_metadata
- name: Metricbeat api_key for Elasticsearch are configured
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
regexp: '^ #?api_key: .*'
line: ' api_key: "{{ metricbeat_elasticsearch_auth_api_key }}"'
insertafter: "output.elasticsearch:"
notify: restart metricbeat
when: metricbeat_elasticsearch_auth_api_key
- name: cloud_metadata processor is disabled
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
line: " - add_cloud_metadata: ~"
insert_after: '^processors:'
notify: restart metricbeat
when: metricbeat_processors_cloud_metadata
- name: disable cloud_metadata
replace:
dest: /etc/metricbeat/metricbeat.yml
regexp: '^(\s+)(- add_cloud_metadata:)'
replace: '\1# \2'
notify: restart metricbeat
when: not metricbeat_processors_cloud_metadata
- name: cloud_metadata processor is disabled
lineinfile:
dest: /etc/metricbeat/metricbeat.yml
line: " - add_cloud_metadata: ~"
insert_after: '^processors:'
notify: restart metricbeat
when: metricbeat_processors_cloud_metadata
when: not metricbeat_use_config_template
# When we use a config template
- block:
- name: Configuration is up-to-date
template:
src: "{{ item }}"
dest: /etc/metricbeat/metricbeat.yml
force: "{{ metricbeat_force_config }}"
with_first_found:
- "templates/metricbeat/metricbeat.{{ inventory_hostname }}.yml.j2"
- "templates/metricbeat/metricbeat.{{ host_group }}.yml.j2"
- "templates/metricbeat/metricbeat.default.yml.j2"
- "metricbeat.default.yml.j2"
notify: restart metricbeat
when: metricbeat_update_config
when: metricbeat_use_config_template

View file

@ -0,0 +1,180 @@
###################### Metricbeat Configuration Example #######################
# This file is an example configuration file highlighting only the most common
# options. The metricbeat.reference.yml file from the same directory contains all the
# supported options with more comments. You can use it as a reference.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/metricbeat/index.html
# =========================== Modules configuration ============================
metricbeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: false
# Period on which files under path should be checked for changes
#reload.period: 10s
# ======================= Elasticsearch template setting =======================
setup.template.settings:
index.number_of_shards: 1
index.codec: best_compression
#_source.enabled: false
# ================================== General ===================================
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
#name:
# The tags of the shipper are included in their own field with each
# transaction published.
{% if metricbeat_tags %}
tags: ["{{ metricbeat_tags | join('", "') }}"]
{% endif %}
# Optional fields that you can specify to add additional information to the
# output.
{% if metricbeat_fields %}
fields:
{% for field in metricbeat_fields %}
{{ field }}
{% endfor %}
{% endif %}
# ================================= Dashboards =================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards is disabled by default and can be enabled either by setting the
# options here or by using the `setup` command.
#setup.dashboards.enabled: false
# The URL from where to download the dashboards archive. By default this URL
# has a value which is computed based on the Beat name and version. For released
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
# website.
#setup.dashboards.url:
# =================================== Kibana ===================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
#host: "localhost:5601"
# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
# the Default Space will be used.
#space.id:
# =============================== Elastic Cloud ================================
# These settings simplify using Metricbeat with the Elastic Cloud (https://cloud.elastic.co/).
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:
# ================================== Outputs ===================================
# Configure what output to use when sending the data collected by the beat.
# ---------------------------- Elasticsearch Output ----------------------------
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["{{ metricbeat_elasticsearch_hosts | join('", "') }}"]
# Protocol - either `http` (default) or `https`.
protocol: "{{ metricbeat_elasticsearch_protocol | default('http') }}"
# Authentication credentials - either API key or username/password.
{% if metricbeat_elasticsearch_auth_api_key %}
api_key: "{{ metricbeat_elasticsearch_auth_api_key }}"
{% endif %}
{% if metricbeat_elasticsearch_auth_username %}
username: "{{ metricbeat_elasticsearch_auth_username }}"
{% endif %}
{% if metricbeat_elasticsearch_auth_password %}
password: "{{ metricbeat_elasticsearch_auth_password }}"
{% endif %}
# ------------------------------ Logstash Output -------------------------------
#output.logstash:
# The Logstash hosts
#hosts: ["localhost:5044"]
# Optional SSL. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# ================================= Processors =================================
# Configure processors to enhance or manipulate events generated by the beat.
processors:
- add_host_metadata: ~
{% if metricbeat_processors_cloud_metadata %}
- add_cloud_metadata: ~
{% endif %}
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
# ================================== Logging ===================================
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
#logging.level: debug
# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publish", "service".
#logging.selectors: ["*"]
# ============================= X-Pack Monitoring ==============================
# Metricbeat can export internal metrics to a central Elasticsearch monitoring
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
# reporting is disabled by default.
# Set to true to enable the monitoring reporter.
#monitoring.enabled: false
# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
# Metricbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
#monitoring.cluster_uuid:
# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well.
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
# Any setting that is not set is automatically inherited from the Elasticsearch
# output configuration, so if you have the Elasticsearch output configured such
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
# uncomment the following line.
#monitoring.elasticsearch:
# ================================= Migration ==================================
# This allows to enable 6.7 migration aliases
#migration.6_to_7.enabled: true

View file

@ -19,7 +19,7 @@ minifirewall_privilegied_ips: []
minifirewall_protected_ports_tcp: [22]
minifirewall_protected_ports_udp: []
minifirewall_public_ports_tcp: [25, 53, 443, 993, 995, 2222]
minifirewall_public_ports_tcp: [25, 53, 443, 993, 995, 22222]
minifirewall_public_ports_udp: [53]
minifirewall_semipublic_ports_tcp: [20, 21, 22, 80, 110, 143]
minifirewall_semipublic_ports_udp: []

View file

@ -29,7 +29,7 @@ SERVICESTCP1p='22'
SERVICESUDP1p=''
# Public services (IPv4/IPv6)
SERVICESTCP1='25 53 443 993 995 2222'
SERVICESTCP1='25 53 443 993 995 22222'
SERVICESUDP1='53'
# Semi-public services (IPv4)

63
mongodb/files/munin/mongo_btree Executable file
View file

@ -0,0 +1,63 @@
#!/usr/bin/env python
## GENERATED FILE - DO NOT EDIT
import urllib2
import sys
import os
import pymongo
def getClient():
if 'MONGO_DB_URI' in os.environ:
return pymongo.MongoClient(os.environ['MONGO_DB_URI'])
else:
return pymongo.MongoClient()
def getServerStatus():
c = getClient()
return c.admin.command('serverStatus', workingSet=True)
def get():
return getServerStatus()["indexCounters"]
def doData():
for k,v in get().iteritems():
print( str(k) + ".value " + str(int(v)) )
def doConfig():
print "graph_title MongoDB btree stats"
print "graph_args --base 1000 -l 0"
print "graph_vlabel mb ${graph_period}"
print "graph_category MongoDB"
for k in get():
print k + ".label " + k
print k + ".min 0"
print k + ".type COUNTER"
print k + ".max 500000"
print k + ".draw LINE1"
if __name__ == "__main__":
from os import environ
if 'HOST' in environ:
host = environ['HOST']
if 'PORT' in environ:
port = environ['PORT']
if 'USER' in environ:
user = environ['USER']
if 'PASSWORD' in environ:
password = environ['PASSWORD']
if len(sys.argv) > 1 and sys.argv[1] == "config":
doConfig()
else:
doData()

View file

@ -0,0 +1,106 @@
#!/usr/bin/env python
## GENERATED FILE - DO NOT EDIT
import urllib2
import sys
import os
import pymongo
def getClient():
if 'MONGO_DB_URI' in os.environ:
return pymongo.MongoClient(os.environ['MONGO_DB_URI'])
else:
return pymongo.MongoClient()
def getServerStatus():
c = getClient()
return c.admin.command('serverStatus', workingSet=True)
import re
FIELD_ESCAPE = re.compile("[^A-Za-z_]")
def escape_field(name):
return FIELD_ESCAPE.sub("_", name)
def need_multigraph():
if 'MUNIN_CAP_MULTIGRAPH' not in os.environ:
sys.stderr.write('MUNIN_CAP_MULTIGRAPH not found in environment\n')
sys.exit(1)
def collections(include_stats=False):
c = getClient()
for db in c.database_names():
for collection in c[db].collection_names():
name = db + "." + collection
if include_stats:
yield name, c[db].command("collstats", collection)
else:
yield name
def doData():
need_multigraph()
data = list(collections(True))
print "multigraph collection_count"
for name, stats in data:
print(escape_field(name) + ".value " + str(stats["count"]))
print "multigraph collection_size"
for name, stats in data:
print(escape_field(name) + ".value " + str(stats["size"]))
def doConfig():
need_multigraph()
names = list(collections())
print "multigraph collection_count"
print "graph_title MongoDB collection document count"
print "graph_args --base 1000 -l 0"
print "graph_vlabel collection document count"
print "graph_category MongoDB"
print "graph_total total"
for name in names:
field_name = escape_field(name)
print field_name + ".label " + name
print field_name + ".min 0"
print field_name + ".type GAUGE"
print field_name + ".draw LINE1"
print "multigraph collection_size"
print "graph_title MongoDB collection size"
print "graph_args --base 1024 -l 0"
print "graph_vlabel collection size"
print "graph_category MongoDB"
print "graph_total total"
for name in names:
field_name = escape_field(name)
print field_name + ".label " + name
print field_name + ".min 0"
print field_name + ".type GAUGE"
print field_name + ".draw LINE1"
if __name__ == "__main__":
from os import environ
if 'HOST' in environ:
host = environ['HOST']
if 'PORT' in environ:
port = environ['PORT']
if 'USER' in environ:
user = environ['USER']
if 'PASSWORD' in environ:
password = environ['PASSWORD']
if len(sys.argv) > 1 and sys.argv[1] == "config":
doConfig()
else:
doData()

57
mongodb/files/munin/mongo_conn Executable file
View file

@ -0,0 +1,57 @@
#!/usr/bin/env python
## GENERATED FILE - DO NOT EDIT
import urllib2
import sys
import os
import pymongo
def getClient():
if 'MONGO_DB_URI' in os.environ:
return pymongo.MongoClient(os.environ['MONGO_DB_URI'])
else:
return pymongo.MongoClient()
def getServerStatus():
c = getClient()
return c.admin.command('serverStatus', workingSet=True)
name = "connections"
def doData():
print name + ".value " + str( getServerStatus()["connections"]["current"] )
def doConfig():
print "graph_title MongoDB current connections"
print "graph_args --base 1000 -l 0"
print "graph_vlabel connections"
print "graph_category MongoDB"
print name + ".label " + name
if __name__ == "__main__":
from os import environ
if 'HOST' in environ:
host = environ['HOST']
if 'PORT' in environ:
port = environ['PORT']
if 'USER' in environ:
user = environ['USER']
if 'PASSWORD' in environ:
password = environ['PASSWORD']
if len(sys.argv) > 1 and sys.argv[1] == "config":
doConfig()
else:
doData()

72
mongodb/files/munin/mongo_docs Executable file
View file

@ -0,0 +1,72 @@
#!/usr/bin/env python
## GENERATED FILE - DO NOT EDIT
import urllib2
import sys
import os
import pymongo
def getClient():
if 'MONGO_DB_URI' in os.environ:
return pymongo.MongoClient(os.environ['MONGO_DB_URI'])
else:
return pymongo.MongoClient()
def getServerStatus():
c = getClient()
return c.admin.command('serverStatus', workingSet=True)
def getDatabasesStats():
host = "127.0.0.1"
port = 27017
c = getClient()
dbs = {}
for k in c.database_names():
if k != "admin" and k != "local" and k != "":
db = c[k]
dbs[k] = {}
for coll in db.collection_names():
if '.' not in coll:
dbs[k][coll] = db[coll].count()
return dbs
def doData():
ss = getDatabasesStats()
for k,v in ss.iteritems():
for a,b in v.iteritems():
print(str(k)+str(a) + ".value " + str(b))
def doConfig():
print "graph_title MongoDB documents count"
print "graph_args --base 1000 -l 0 --vertical-label Docs"
print "graph_category MongoDB"
ss = getDatabasesStats()
for k,v in ss.iteritems():
for a,b in v.iteritems():
print str(k)+str(a) + ".label " + str(k) + " " + str(a)
print str(k)+str(a) + ".draw LINE1"
if __name__ == "__main__":
from os import environ
if 'HOST' in environ:
host = environ['HOST']
if 'PORT' in environ:
port = environ['PORT']
if 'USER' in environ:
user = environ['USER']
if 'PASSWORD' in environ:
password = environ['PASSWORD']
if len(sys.argv) > 1 and sys.argv[1] == "config":
doConfig()
else:
doData()

56
mongodb/files/munin/mongo_lock Executable file
View file

@ -0,0 +1,56 @@
#!/usr/bin/env python
## GENERATED FILE - DO NOT EDIT
import urllib2
import sys
import os
import pymongo
def getClient():
if 'MONGO_DB_URI' in os.environ:
return pymongo.MongoClient(os.environ['MONGO_DB_URI'])
else:
return pymongo.MongoClient()
def getServerStatus():
c = getClient()
return c.admin.command('serverStatus', workingSet=True)
name = "locked"
def doData():
print name + ".value " + str( 100 * (getServerStatus()["globalLock"]["lockTime"]/getServerStatus()["globalLock"]["totalTime"]) )
def doConfig():
print "graph_title MongoDB global write lock percentage"
print "graph_args --base 1000 -l 0 "
print "graph_vlabel percentage"
print "graph_category MongoDB"
print name + ".label " + name
if __name__ == "__main__":
from os import environ
if 'HOST' in environ:
host = environ['HOST']
if 'PORT' in environ:
port = environ['PORT']
if 'USER' in environ:
user = environ['USER']
if 'PASSWORD' in environ:
password = environ['PASSWORD']
if len(sys.argv) > 1 and sys.argv[1] == "config":
doConfig()
else:
doData()

62
mongodb/files/munin/mongo_mem Executable file
View file

@ -0,0 +1,62 @@
#!/usr/bin/env python
## GENERATED FILE - DO NOT EDIT
import urllib2
import sys
import os
import pymongo
def getClient():
if 'MONGO_DB_URI' in os.environ:
return pymongo.MongoClient(os.environ['MONGO_DB_URI'])
else:
return pymongo.MongoClient()
def getServerStatus():
c = getClient()
return c.admin.command('serverStatus', workingSet=True)
def ok(s):
return s == "resident" or s == "virtual" or s == "mapped"
def doData():
for k,v in getServerStatus()["mem"].iteritems():
if ok(k):
print( str(k) + ".value " + str(v * 1024 * 1024) )
def doConfig():
print "graph_title MongoDB memory usage"
print "graph_args --base 1024 -l 0 --vertical-label Bytes"
print "graph_category MongoDB"
for k in getServerStatus()["mem"]:
if ok( k ):
print k + ".label " + k
print k + ".draw LINE1"
if __name__ == "__main__":
from os import environ
if 'HOST' in environ:
host = environ['HOST']
if 'PORT' in environ:
port = environ['PORT']
if 'USER' in environ:
user = environ['USER']
if 'PASSWORD' in environ:
password = environ['PASSWORD']
if len(sys.argv) > 1 and sys.argv[1] == "config":
doConfig()
else:
doData()

58
mongodb/files/munin/mongo_ops Executable file
View file

@ -0,0 +1,58 @@
#!/usr/bin/env python
## GENERATED FILE - DO NOT EDIT
import urllib2
import sys
import os
import pymongo
def getClient():
if 'MONGO_DB_URI' in os.environ:
return pymongo.MongoClient(os.environ['MONGO_DB_URI'])
else:
return pymongo.MongoClient()
def getServerStatus():
c = getClient()
return c.admin.command('serverStatus', workingSet=True)
def doData():
ss = getServerStatus()
for k,v in ss["opcounters"].iteritems():
print( str(k) + ".value " + str(v) )
def doConfig():
print "graph_title MongoDB ops"
print "graph_args --base 1000 -l 0"
print "graph_vlabel ops / ${graph_period}"
print "graph_category MongoDB"
print "graph_total total"
for k in getServerStatus()["opcounters"]:
print k + ".label " + k
print k + ".min 0"
print k + ".type COUNTER"
print k + ".max 500000"
print k + ".draw LINE1"
if __name__ == "__main__":
from os import environ
if 'HOST' in environ:
host = environ['HOST']
if 'PORT' in environ:
port = environ['PORT']
if 'USER' in environ:
user = environ['USER']
if 'PASSWORD' in environ:
password = environ['PASSWORD']
if len(sys.argv) > 1 and sys.argv[1] == "config":
doConfig()
else:
doData()

View file

@ -0,0 +1,57 @@
#!/usr/bin/env python
## GENERATED FILE - DO NOT EDIT
import urllib2
import sys
import os
import pymongo
def getServerStatus():
if 'MONGO_DB_URI' in os.environ:
c = pymongo.MongoClient(os.environ['MONGO_DB_URI'])
else:
c = pymongo.MongoClient()
return c.admin.command('serverStatus', workingSet=True)
name = "page_faults"
def get():
return getServerStatus()["extra_info"][name]
def doData():
print(name + ".value " + str(get()))
def doConfig():
print "graph_title MongoDB page faults"
print "graph_args --base 1000 -l 0"
print "graph_vlabel faults / ${graph_period}"
print "graph_category MongoDB"
print "graph_total total"
print name + ".label " + name
print name + ".min 0"
print name + ".type COUNTER"
print name + ".max 10000"
print name + ".draw LINE1"
if __name__ == "__main__":
from os import environ
if 'HOST' in environ:
host = environ['HOST']
if 'PORT' in environ:
port = environ['PORT']
if 'USER' in environ:
user = environ['USER']
if 'PASSWORD' in environ:
password = environ['PASSWORD']
if len(sys.argv) > 1 and sys.argv[1] == "config":
doConfig()
else:
doData()

View file

@ -9,3 +9,8 @@
service:
name: mongodb
state: restarted
- name: restart munin-node
systemd:
name: munin-node
state: restarted

View file

@ -44,3 +44,35 @@
dest: /etc/logrotate.d/mongodb
force: yes
backup: no
- name: Munin plugins are present
copy:
src: "munin/{{ item }}"
dest: '/usr/local/share/munin/plugins/{{ item }}'
force: yes
with_items:
- mongo_btree
- mongo_collections
- mongo_conn
- mongo_docs
- mongo_lock
- mongo_mem
- mongo_ops
- mongo_page_faults
notify: restart munin-node
- name: Enable core Munin plugins
file:
src: '/usr/local/share/munin/plugins/{{ item }}'
dest: /etc/munin/plugins/{{ item }}
state: link
with_items:
- mongo_btree
- mongo_collections
- mongo_conn
- mongo_docs
- mongo_lock
- mongo_mem
- mongo_ops
- mongo_page_faults
notify: restart munin-node

View file

@ -65,7 +65,7 @@ $smtp->close();
print "$result\n";
if ($result =~/2.7.0 Ok, discarded, id=[^,]+ - INFECTED: Eicar-Test-Signature/) {
if ($result =~/2.7.0 Ok, discarded, id=\S+ - INFECTED: Eicar-Signature/) {
print "OK - All fine\n";
exit 0;
} else {

View file

@ -0,0 +1,289 @@
#!/usr/bin/env bash
# shellcheck disable=SC2028
set -euo pipefail
# This check_hpraid is a fork from check_cciss v0.15 written by Simone Rosa.
# Fork written by Evolix and for Evolix usage (Debian only).
# Usage of old tools and drivers were removed to use only the smartpqi or hpsa drivers and the ssacli tool from HP.
# Tools not used on Debian were also removed.
# Linting tool shellcheck was used to use a better bash coding style.
# Upstream at:
# https://gitea.evolix.org/evolix/ansible-roles/src/branch/stable/nagios-nrpe/files/plugins
# Source of the fork:
# https://exchange.nagios.org/directory/Plugins/Hardware/Storage-Systems/RAID-Controllers/check_cciss--2D-HP-and-Compaq-Smart-Array-Hardware-status/details
#
# Licence: GPLv2
# Description:
#
# This plugin checks hardware status for Smart Array Controllers,
# using HPE Smart Storage Administrator. It should support Debian 9 and over.
# (Array, controller, cache, battery, etc...)
#
# Known working RAID controllers:
#
# - Adaptec Smart Storage PQI 12G SAS/PCIe 3 (rev 01)
# | Smart Array P408i-a SR Gen10
# | Smart Array P408i-p SR Gen10
# | Smart Array E208i-a SR Gen10
#
#
# NOTE:
#
# You need to install the proprietary tool HPE Smart Storage Administrator (ssacli) from:
# https://downloads.linux.hpe.com/SDR/repo/mcp
# Also NRPE need to launch ssacli as root.
#
# Please add this line to /etc/sudoers :
# --------------------------------------------------
# nagios ALL=NOPASSWD: /usr/sbin/ssacli
#
# Examples:
#
# ./check_hpraid
# ----------------
# RAID OK
#
# ./check_hpraid -v
# -------------------
# RAID OK: Smart Array 6i in Slot 0 array A logicaldrive 1 (67.8 GB, RAID 1+0, OK)
# [Controller Status: OK Cache Status: OK Battery Status: OK]
#
# RAID CRITICAL - HP Smart Array Failed: Smart Array 6i in Slot 0 (Embedded) \
# array A logicaldrive 1 (33.9 GB, RAID 1, Interim Recovery Mode) \
# physicaldrive 1:0 (port 1:id 0 , Parallel SCSI, --- GB, Failed)
#
# RAID WARNING - HP Smart Array Rebuilding: Smart Array 6i in Slot 0 (Embedded) \
# array A logicaldrive 1 (33.9 GB, RAID 1, Recovering, 26% complete) \
# physicaldrive 1:0 (port 1:id 0 , Parallel SCSI, 36.4 GB, Rebuilding)
#
# ./check_hpraid -v -p
# --------------------
# RAID OK: Smart Array 6i in Slot 0 (Embedded) array A logicaldrive 1 (33.9 GB, RAID 1, OK)
# physicaldrive 2:0 (port 2:id 0 , Parallel SCSI, 36.4 GB, OK)
# physicaldrive 2:1 (port 2:id 1 , Parallel SCSI, 36.4 GB, OK)
# physicaldrive 1:5 (port 1:id 5 , Parallel SCSI, 72.8 GB, OK, spare)
# [Controller Status: OK Cache Status: OK Battery/Capacitor Status: OK]
#
# RAID CRITICAL - HP Smart Array Failed: Smart Array 6i in Slot 0 (Embedded) \
# array A logicaldrive 1 (33.9 GB, RAID 1, Interim Recovery Mode) \
# physicaldrive 1:0 (port 1:id 0 , Parallel SCSI, --- GB, Failed) \
# physicaldrive 1:1 (port 1:id 1 , Parallel SCSI, 36.4 GB, OK)
#
# RAID WARNING - HP Smart Array Rebuilding: Smart Array 6i in Slot 0 (Embedded) \
# array A logicaldrive 1 (33.9 GB, RAID 1, Recovering, 26% complete) \
# physicaldrive 1:0 (port 1:id 0 , Parallel SCSI, 36.4 GB, Rebuilding) \
# physicaldrive 1:1 (port 1:id 1 , Parallel SCSI, 36.4 GB, OK)
#
# ./check_hpraid -v -b
# ----------------
#
# RAID OK: Smart Array 6i in Slot 0 (Embedded) array A logicaldrive 1 (33.9 GB, RAID 1, OK) [Controller Status: OK]
#
# [insted of]
# RAID CRITICAL - HP Smart Array Failed: Smart Array 6i in Slot 0 (Embedded) \
# Controller Status: OK Cache Status: Temporarily Disabled \
# Battery/Capacitor Status: Failed (Replace Batteries/Capacitors)
PROGNAME=$(basename "$0")
NAGIOS_PLUGINS="/usr/lib/nagios/plugins"
REVISION="0.16-evolix"
DEBUG="0"
VERBOSE="0"
ssacli=$(command -v ssacli)
PHYSICAL_DRIVE=0
# shellcheck source=/dev/null
. ${NAGIOS_PLUGINS}/utils.sh
print_usage() {
echo ""
echo "Usage: $PROGNAME [-v] [-p] [-e <number>] [-E <name>] [-b] [-s] [-d]"
echo "Usage: $PROGNAME [-h]"
echo "Usage: $PROGNAME [-V]"
echo ""
echo " -v = show status and informations about RAID"
echo " -p = show detail for physical drives"
echo " -e <number> = exclude slot number"
echo " -b = exclude battery/capacitor/cache status check"
echo " -d = use for debug (command line mode)"
echo " -h = help information"
echo " -V = version information"
echo ""
echo " ============="
}
print_help() {
print_revision "$PROGNAME" "$REVISION"
echo ""
print_usage
echo ""
echo "This plugin checks hardware status for Smart Array Controllers,"
echo "using HPE Smart Storage Administrator."
echo ""
support
exit 0
}
while getopts "N:cvpbsde:Vh" options
do
case $options in
N) ;;
c) ;;
v) VERBOSE=1;;
p) PHYSICAL_DRIVE=1;;
d) DEBUG=1;;
e) EXCLUDE_SLOT=1
excludeslot="$OPTARG";;
b) EXCLUDE_BATTERY=1;;
V) print_revision "$PROGNAME" "$REVISION"
exit 0;;
h) print_help
exit 0;;
\?) print_usage
exit 0;;
*) print_usage
exit 0;;
esac
done
# Check if smartpqi or hpsa driver is loaded
# https://manpages.debian.org/buster/manpages/smartpqi.4.en.html
if [ -d /sys/bus/pci/drivers/smartpqi ] || [ -d /sys/bus/pci/drivers/hpsa ]; then
driverPresent='YES.'
else
driverPresent='No!'
fi
if [ "$DEBUG" = "1" ]; then
echo "### Check if \"HP Smart Array\" driver is present >>>\n${driverPresent}\n"
fi
if [[ "$driverPresent" == "No!" ]]; then
echo "RAID UNKNOWN - HP Smart Array not found"
exit "$STATE_UNKNOWN"
fi
# Check if "HP Array Utility CLI" is present
if [ "$DEBUG" = "1" ]; then
echo "### Check if \"ssacli\" is present >>>\n"
fi
if [ ! -x "$ssacli" ]; then
if [ -x "$ssacli" ]; then
if [ "$DEBUG" = "1" ]; then
echo "### \"ssacli\" is present >>>\n"
fi
else
echo "ERROR: ssacli tools should be installed and with right sudoers/permissions (see the notes above)"
exit "$STATE_UNKNOWN"
fi
fi
# Check if "HP Controller" work correctly
check=$(sudo -u root "$ssacli" controller all show status 2>&1)
status=$?
if [ "$DEBUG" = "1" ]; then
echo "### Check if \"HP Controller\" work correctly >>>\n""${check}""\n"
fi
if test ${status} -ne 0; then
echo "RAID UNKNOWN - $ssacli did not execute properly : ""${check}"
exit "$STATE_UNKNOWN"
fi
# Get "Slot" & exclude slot needed
EXCLUDE_SLOT=${EXCLUDE_SLOT:-0}
if [ "$EXCLUDE_SLOT" = "1" ]; then
slots=$(grep -E -o "Slot \w" <<< "$check" | awk '{print $NF}' | grep -v "$excludeslot")
else
slots=$(grep -E -o "Slot \w" <<< "$check" | awk '{print $NF}')
fi
if [ "$DEBUG" = "1" ]; then
echo "### Get \"Slot\" & exclude slot not needed >>>\n""${slots}""\n"
fi
for slot in $slots; do
# Get "logicaldrive" for slot
set +e
check2b=$(sudo -u root "$ssacli" controller slot="$slot" logicaldrive all show 2>&1)
status=$?
if test ${status} -ne 0; then
# Skip empty slots
if grep -q "The specified device does not have any logical drives." <<< "$check2b"; then
break
fi
echo "RAID UNKNOWN - $ssacli did not execute properly : ""${check2b}"
exit "$STATE_UNKNOWN"
fi
set -e
check2=${check2:-}
check2="$check2$check2b"
if [ "$DEBUG" = "1" ]; then
echo "### Get \"logicaldrive\" for slot >>>\n""${check2b}""\n"
fi
# Get "physicaldrive" for slot
if [ "$PHYSICAL_DRIVE" = "1" ] || [ "$DEBUG" = "1" ]; then
check2b=$(sudo -u root "$ssacli" controller slot="$slot" physicaldrive all show | sed -e 's/\?/\-/g' 2>&1 | grep "physicaldrive")
else
check2b=$(sudo -u root "$ssacli" controller slot="$slot" physicaldrive all show | sed -e 's/\?/\-/g' 2>&1 | grep "physicaldrive" | (grep "\(Failure\|Failed\|Rebuilding\)" || true))
fi
status=$?
if [ "$PHYSICAL_DRIVE" = "1" ] || [ "$DEBUG" = "1" ]; then
if test ${status} -ne 0; then
echo "RAID UNKNOWN - $ssacli did not execute properly : ""${check2b}"
exit "$STATE_UNKNOWN"
fi
fi
printf -v check2 "%s\n%s" "$check2" "$check2b"
if [ "$DEBUG" = "1" ]; then
echo "### Get \"physicaldrive\" for slot >>>\n""${check2b}""\n"
fi
done
# Check STATUS
if [ "$DEBUG" = "1" ]; then
echo "### Check STATUS >>>"
fi
# Omit battery/capacitor/cache status check if requested
EXCLUDE_BATTERY=${EXCLUDE_BATTERY:-0}
if [ "$EXCLUDE_BATTERY" = "1" ]; then
check=$(grep -v 'Battery/Capacitor Status: Failed (Replace Batteries/Capacitors)' <<< "$check")
check=$(grep -v 'Cache Status: Temporarily Disabled' <<< "$check")
fi
check=${check:-}
check2=${check2:-}
check3=${check3:-}
if grep -qiE Failed <<< "$check"; then
echo "RAID CRITICAL - HP Smart Array Failed: ${check}"
exit "$STATE_CRITICAL"
elif grep -qiE Disabled <<< "$check"; then
echo "RAID CRITICAL - HP Smart Array Problem: ${check}"
exit "$STATE_CRITICAL"
elif grep -qiE Failed <<< "$check2"; then
echo "RAID CRITICAL - HP Smart Array Failed: ${check2}"
exit "$STATE_CRITICAL"
elif grep -qiE Failure <<< "$check2"; then
echo "RAID WARNING - Component Failure: ${check2}"
exit "$STATE_WARNING"
elif grep -qiE Rebuild <<< "$check2"; then
echo "RAID WARNING - HP Smart Array Rebuilding: ${check2}"
exit "$STATE_WARNING"
elif grep -qiE Recover <<< "$check2"; then
echo "RAID WARNING - HP Smart Array Recovering: ${check2}"
exit "$STATE_WARNING"
elif grep -qiE "Cache Status: Temporarily Disabled" <<< "$check"; then
echo "RAID WARNING - HP Smart Array Cache Disabled: ${check}"
exit "$STATE_WARNING"
elif grep -qiE FIRMWARE <<< "$check"; then
echo "RAID WARNING - ${check}"
exit "$STATE_WARNING"
else
if [ "$DEBUG" = "1" ] || [ "$VERBOSE" = "1" ]; then
check3=$(grep -E Status <<< "$check")
printf "RAID OK: %s\n%s\n" "$check2" "$check3"
else
echo "RAID OK"
fi
exit "$STATE_OK"
fi
exit "$STATE_UNKNOWN"

View file

@ -69,6 +69,7 @@ command[check_varnish]={{ nagios_plugins_directory }}/check_varnish_health -i 12
command[check_haproxy]=sudo {{ nagios_plugins_directory }}/check_haproxy_stats -s /run/haproxy/admin.sock -w 80 -c 90 --ignore-maint --ignore-nolb
command[check_minifirewall]=sudo {{ nagios_plugins_directory }}/check_minifirewall
command[check_redis_instances]={{ nagios_plugins_directory }}/check_redis_instances
command[check_hpraid]={{ nagios_plugins_directory }}/check_hpraid
# Check HTTP "many". Use this to check many websites (http, https, ports, sockets and SSL certificates).
# Beware! All checks must not take more than 10s!

View file

@ -9,6 +9,6 @@ proftpd_ftps_port: 990
proftpd_ftps_cert: "/etc/ssl/certs/ssl-cert-snakeoil.pem"
proftpd_ftps_key: "/etc/ssl/private/ssl-cert-snakeoil.key"
proftpd_sftp_enable: False
proftpd_sftp_port: 2222
proftpd_sftp_port: 22222
proftpd_accounts: []
proftpd_accounts_final: []

View file

@ -25,6 +25,7 @@
DefaultRoot ~
PassivePorts 60000 61000
TransferLog /var/log/proftpd/xferlog
<Limit LOGIN>
AllowGroup ftpusers

View file

@ -12,6 +12,7 @@
DefaultRoot ~
SFTPLog /var/log/proftpd/sftp.log
TransferLog /var/log/proftpd/xferlog
SFTPAuthMethods password
SFTPHostKey /etc/ssh/ssh_host_ecdsa_key

View file

@ -7,7 +7,7 @@
^hwraid\.le-vert\.net$
^.*\.clamav\.net$
^spamassassin\.apache\.org$
^.*\.sa-update.*$
^.*sa-update.*$
^pear\.php\.net$
^repo\.mysql\.com$
^deb\.nodesource\.com$

View file

@ -7,7 +7,7 @@ http://www.kernel.org/.*
http://hwraid.le-vert.net/.*
http://.*.clamav.net/.*
http://spamassassin.apache.org/.*
http://.*.sa-update.*
http://.*sa-update.*
http://pear.php.net/.*
http://repo.mysql.com/.*

View file

@ -1,2 +1,5 @@
---
tomcat_instance_root: '/srv/tomcat'
tomcat_root_dir_owner: root
tomcat_root_dir_group: root

View file

@ -33,8 +33,8 @@
file:
path: "{{ tomcat_instance_root }}"
state: directory
owner: 'root'
group: 'root'
owner: "{{ tomcat_root_dir_owner | default('root') }}"
group: "{{ tomcat_root_dir_group | default('root') }}"
mode: "0755"
- name: Copy systemd unit

View file

@ -0,0 +1,19 @@
---
nextcloud_webserver: 'nginx'
nextcloud_version: "19.0.0"
nextcloud_archive_name: "nextcloud-{{ nextcloud_version }}.tar.bz2"
nextcloud_releases_baseurl: "https://download.nextcloud.com/server/releases/"
nextcloud_instance_name: "nextcloud"
nextcloud_user: "{{ nextcloud_instance_name }}"
nextcloud_domains: []
nextcloud_home: "/home/{{ nextcloud_user }}"
nextcloud_webroot: "{{ nextcloud_home }}/nextcloud"
nextcloud_data: "{{ nextcloud_webroot }}/data"
nextcloud_db_user: "{{ nextcloud_user }}"
nextcloud_db_name: "{{ nextcloud_instance_name }}"
nextcloud_admin_login: "admin"
nextcloud_admin_password: ""

View file

@ -0,0 +1,10 @@
---
- name: reload php-fpm
service:
name: php7.3-fpm
state: reloaded
- name: reload nginx
service:
name: nginx
state: reloaded

View file

@ -0,0 +1,4 @@
---
# dependencies:
# - { role: nginx, when: nextcloud_webserver == 'nginx' }
# - { role: php, php_fpm_enable: True }

View file

@ -0,0 +1,37 @@
---
- name: Retrieve Nextcloud archive
get_url:
url: "{{ nextcloud_releases_baseurl }}{{ nextcloud_archive_name }}"
dest: "{{ nextcloud_home }}/{{ nextcloud_archive_name }}"
force: no
tags:
- nextcloud
- name: Retrieve Nextcloud sha256 checksum
get_url:
url: "{{ nextcloud_releases_baseurl }}{{ nextcloud_archive_name }}.sha256"
dest: "{{ nextcloud_home }}/{{ nextcloud_archive_name }}.sha256"
force: no
tags:
- nextcloud
- name: Verify Nextcloud sha256 checksum
command: "sha256sum -c {{ nextcloud_archive_name }}.sha256"
changed_when: "False"
args:
chdir: "{{ nextcloud_home }}"
tags:
- nextcloud
- name: Extract Nextcloud archive
unarchive:
src: "{{ nextcloud_home }}/{{ nextcloud_archive_name }}"
dest: "{{ nextcloud_home }}"
creates: "{{ nextcloud_home }}/nextcloud"
remote_src: True
mode: "0750"
owner: "{{ nextcloud_user }}"
group: "{{ nextcloud_user }}"
tags:
- nextcloud

View file

@ -0,0 +1,81 @@
---
- block:
- name: Generate admin password
command: 'apg -n 1 -m 16 -M lcN'
register: nextcloud_admin_password_apg
check_mode: no
changed_when: False
- debug:
var: nextcloud_admin_password_apg
- set_fact:
nextcloud_admin_password: "{{ nextcloud_admin_password_apg.stdout }}"
tags:
- nextcloud
when: nextcloud_admin_password == ""
- name: Get Nextcloud Status
shell: "php ./occ status --output json | grep -v 'Nextcloud is not installed'"
args:
chdir: "{{ nextcloud_webroot }}"
become_user: "{{ nextcloud_user }}"
register: nc_status
check_mode: no
tags:
- nextcloud
- name: Install Nextcloud
command: "php ./occ maintenance:install --database mysql --database-name {{ nextcloud_db_name | mandatory }} --database-user {{ nextcloud_db_user | mandatory }} --database-pass {{ nextcloud_db_pass | mandatory }} --admin-user {{ nextcloud_admin_login | mandatory }} --admin-pass {{ nextcloud_admin_password | mandatory }} --data-dir {{ nextcloud_data | mandatory }}"
args:
chdir: "{{ nextcloud_webroot }}"
creates: "{{ nextcloud_home }}/config/config.php"
become_user: "{{ nextcloud_user }}"
when: (nc_status.stdout | from_json).installed == false
tags:
- nextcloud
- name: Configure Nextcloud Mysql password
replace:
dest: "{{ nextcloud_home }}/nextcloud/config/config.php"
regexp: "'dbpassword' => '([^']*)',"
replace: "'dbpassword' => '{{ nextcloud_db_pass }}',"
tags:
- nextcloud
- name: Configure Nextcloud cron
cron:
name: 'Nextcloud'
minute: "*/5"
job: "php -f {{ nextcloud_webroot }}/cron.php"
user: "{{ nextcloud_user }}"
tags:
- nextcloud
- name: Erase previously trusted domains config
command: "php ./occ config:system:set trusted_domains"
args:
chdir: "{{ nextcloud_webroot }}"
become_user: "{{ nextcloud_user }}"
tags:
- nextcloud
- name: Configure trusted domains
command: "php ./occ config:system:set trusted_domains {{ item.0 }} --value {{ item.1 }}"
args:
chdir: "{{ nextcloud_webroot }}"
with_indexed_items:
- "{{ nextcloud_domains }}"
become_user: "{{ nextcloud_user }}"
tags:
- nextcloud
#- name: Configure memcache local to APCu
# command: "php ./occ config:system:set memcache.local --value '\\OC\\Memcache\\APCu'"
# args:
# chdir: "{{ nextcloud_webroot }}"
# become_user: "{{ nextcloud_user }}"
# tags:
# - nextcloud

View file

@ -0,0 +1,31 @@
---
- name: Install dependencies
apt:
state: present
name:
- bzip2
- php-gd
- php-json
- php-xml
- php-mbstring
- php-zip
- php-curl
- php-bz2
- php-intl
- php-gmp
- php-apcu
- php-redis
- php-bcmath
- python-mysqldb
tags:
- nextcloud
- include: user.yml
- include: archive.yml
- include: vhost.yml
- include: mysql.yml
- include: config.yml

View file

@ -0,0 +1,62 @@
---
- name: Get actual Mysql password
shell: "grep password {{ nextcloud_home }}/.my.cnf | awk '{ print $3 }'"
register: nextcloud_db_pass_grep
check_mode: no
changed_when: False
failed_when: False
tags:
- nextcloud
- name: Generate Mysql password
command: 'apg -n 1 -m 16 -M lcN'
register: nextcloud_db_pass_apg
check_mode: no
changed_when: False
tags:
- nextcloud
- name: Set Mysql password
set_fact:
nextcloud_db_pass: "{{ nextcloud_db_pass_grep.stdout | default(nextcloud_db_pass_apg.stdout, True) }}"
tags:
- nextcloud
- debug:
var: nextcloud_db_pass
verbosity: 1
- name: Create Mysql database
mysql_db:
name: "{{ nextcloud_db_name }}"
config_file: "/root/.my.cnf"
state: present
tags:
- nextcloud
- name: Create Mysql user
mysql_user:
name: "{{ nextcloud_db_user }}"
password: '{{ nextcloud_db_pass }}'
priv: "{{ nextcloud_db_name }}.*:ALL"
config_file: "/root/.my.cnf"
update_password: always
state: present
tags:
- nextcloud
- name: Store credentials in my.cnf
ini_file:
dest: "{{ nextcloud_home }}/.my.cnf"
owner: "{{ nextcloud_user }}"
group: "{{ nextcloud_user }}"
mode: "0600"
section: client
option: "{{ item.option }}"
value: "{{ item.value }}"
with_items:
- { option: "user", value: "{{ nextcloud_db_user }}" }
- { option: "database", value: "{{ nextcloud_db_name }}" }
- { option: "password", value: "{{ nextcloud_db_pass }}" }
tags:
- nextcloud

View file

@ -0,0 +1,38 @@
---
- name: Create Nextcloud group
group:
name: "{{ nextcloud_instance_name | mandatory }}"
state: present
tags:
- nextcloud
- name: Create Nextcloud user
user:
name: "{{ nextcloud_user | mandatory }}"
group: "{{ nextcloud_user }}"
home: "{{ nextcloud_home | mandatory }}"
shell: '/bin/bash'
createhome: True
state: present
tags:
- nextcloud
- name: Add the user 'www-data' to Nextcloud group
user:
name: www-data
groups: "{{ nextcloud_user | mandatory }}"
append: yes
- name: Create top-level directories
file:
dest: "{{ item }}"
state: directory
mode: "0770"
owner: "{{ nextcloud_user }}"
group: "{{ nextcloud_user }}"
with_items:
- "{{ nextcloud_home }}/log"
- "{{ nextcloud_home }}/tmp"
- "{{ nextcloud_home }}/data"
tags:
- nextcloud

View file

@ -0,0 +1,34 @@
---
- block:
- name: Copy Nginx vhost
template:
src: nginx.conf.j2
dest: "/etc/nginx/sites-available/{{ nextcloud_instance_name }}.conf"
mode: "0640"
notify: reload nginx
tags:
- nextcloud
- name: Enable Nginx vhost
file:
src: "/etc/nginx/sites-available/{{ nextcloud_instance_name }}.conf"
dest: "/etc/nginx/sites-enabled/{{ nextcloud_instance_name }}.conf"
state: link
notify: reload nginx
tags:
- nextcloud
- name: Generate ssl config
shell:
cmd: "/usr/local/sbin/vhost-domains {{ nextcloud_instance_name }} | /usr/local/sbin/make-csr {{ nextcloud_instance_name }}"
creates: "/etc/nginx/ssl/{{ nextcloud_instance_name }}.conf"
- name: Copy PHP-FPM pool
template:
src: php-fpm.conf.j2
dest: "/etc/php/7.3/fpm/pool.d/{{ nextcloud_instance_name }}.conf"
mode: "0640"
notify: reload php-fpm
tags:
- nextcloud
when: nextcloud_webserver == 'nginx'

View file

@ -0,0 +1,134 @@
upstream php-handler-{{ nextcloud_instance_name }} {
server unix:/var/run/php/php-fpm-{{ nextcloud_instance_name }}.sock;
}
server {
listen 80;
listen [::]:80;
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name {{ nextcloud_domains | join(' ') }};
access_log {{ nextcloud_home }}/log/access.log;
error_log {{ nextcloud_home }}/log/error.log;
include /etc/nginx/snippets/letsencrypt.conf;
include /etc/nginx/ssl/{{ nextcloud_instance_name }}.conf;
add_header Referrer-Policy "no-referrer" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-Download-Options "noopen" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Permitted-Cross-Domain-Policies "none" always;
add_header X-Robots-Tag "none" always;
add_header X-XSS-Protection "1; mode=block" always;
# Remove X-Powered-By, which is an information leak
fastcgi_hide_header X-Powered-By;
root {{ nextcloud_webroot }};
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
# The following 2 rules are only needed for the user_webfinger app.
# Uncomment it if you're planning to use this app.
rewrite ^/.well-known/host-meta /public.php?service=host-meta last;
rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json last;
# The following rule is only needed for the Social app.
# Uncomment it if you're planning to use this app.
rewrite ^/.well-known/webfinger /public.php?service=webfinger last;
location = /.well-known/carddav {
return 301 $scheme://$host:$server_port/remote.php/dav;
}
location = /.well-known/caldav {
return 301 $scheme://$host:$server_port/remote.php/dav;
}
# set max upload size
client_max_body_size 512M;
fastcgi_buffers 64 4K;
# Enable gzip but do not remove ETag headers
gzip on;
gzip_vary on;
gzip_comp_level 4;
gzip_min_length 256;
gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;
gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;
location / {
rewrite ^ /index.php;
}
location ~ ^\/(?:build|tests|config|lib|3rdparty|templates|data)\/ {
deny all;
}
location ~ ^\/(?:\.|autotest|occ|issue|indie|db_|console) {
deny all;
}
location ~ ^\/(?:index|remote|public|cron|core\/ajax\/update|status|ocs\/v[12]|updater\/.+|oc[ms]-provider\/.+|.+\/richdocumentscode\/proxy)\.php(?:$|\/) {
fastcgi_split_path_info ^(.+?\.php)(\/.*|)$;
set $path_info $fastcgi_path_info;
try_files $fastcgi_script_name =404;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $path_info;
fastcgi_param HTTPS on;
# Avoid sending the security headers twice
fastcgi_param modHeadersAvailable true;
# Enable pretty urls
fastcgi_param front_controller_active true;
fastcgi_pass php-handler-{{ nextcloud_instance_name }};
fastcgi_intercept_errors on;
fastcgi_request_buffering off;
}
location ~ ^\/(?:updater|oc[ms]-provider)(?:$|\/) {
try_files $uri/ =404;
index index.php;
}
# Adding the cache control header for js, css and map files
# Make sure it is BELOW the PHP block
location ~ \.(?:css|js|woff2?|svg|gif|map)$ {
try_files $uri /index.php$request_uri;
add_header Cache-Control "public, max-age=15778463";
# Add headers to serve security related headers (It is intended to
# have those duplicated to the ones above)
# Before enabling Strict-Transport-Security headers please read into
# this topic first.
#add_header Strict-Transport-Security "max-age=15768000; includeSubDomains; preload;" always;
#
# WARNING: Only add the preload option once you read about
# the consequences in https://hstspreload.org/. This option
# will add the domain to a hardcoded list that is shipped
# in all major browsers and getting removed from this list
# could take several months.
add_header Referrer-Policy "no-referrer" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-Download-Options "noopen" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Permitted-Cross-Domain-Policies "none" always;
add_header X-Robots-Tag "none" always;
add_header X-XSS-Protection "1; mode=block" always;
# Optional: Don't log access to assets
access_log off;
}
location ~ \.(?:png|html|ttf|ico|jpg|jpeg|bcmap|mp4|webm)$ {
try_files $uri /index.php$request_uri;
# Optional: Don't log access to other assets
access_log off;
}
}

View file

@ -0,0 +1,17 @@
[{{ nextcloud_instance_name }}]
user = {{ nextcloud_user }}
group = {{ nextcloud_user }}
listen = /run/php/php-fpm-{{ nextcloud_instance_name }}.sock
listen.owner = {{ nextcloud_user }}
listen.group = {{ nextcloud_user }}
pm = ondemand
pm.max_children = 50
pm.process_idle_timeout = 120s
pm.status_path = /fpm_status
env[HOSTNAME] = $HOSTNAME
env[PATH] = /usr/local/bin:/usr/bin:/bin
env[TMP] = {{ nextcloud_home }}/tmp
env[TMPDIR] = {{ nextcloud_home }}/tmp
env[TEMP] = {{ nextcloud_home }}/tmp