Merge branch 'unstable' into stable

This commit is contained in:
Jérémy Lecour 2017-12-08 10:34:07 +01:00 committed by Jérémy Lecour
commit e4daf04110
66 changed files with 818 additions and 318 deletions

View File

@ -1,4 +1,6 @@
---
elastic_stack_version: "5.x"
elasticsearch_cluster_name: Null
elasticsearch_node_name: "${HOSTNAME}"
elasticsearch_network_host: "[_site_, _local_]"

View File

@ -1,9 +1,13 @@
---
- name: Use the correct debian repository
set_fact:
curator_debian_repository: '{% if ansible_distribution_release == "jessie" %}debian{% else %}debian9{% endif %}'
- name: Curator sources list is available
apt_repository:
repo: "deb http://packages.elastic.co/curator/4/debian stable main"
filename: elastic
repo: "deb https://packages.elastic.co/curator/5/{{ curator_debian_repository }} stable main"
filename: curator
update_cache: yes
state: present
tags:

View File

@ -19,7 +19,7 @@
- name: Elastic sources list is available
apt_repository:
repo: "deb https://artifacts.elastic.co/packages/5.x/apt stable main"
repo: "deb https://artifacts.elastic.co/packages/{{ elastic_stack_version | mandatory }}/apt stable main"
filename: elastic
state: present
update_cache: yes

View File

@ -1,51 +1,57 @@
# Evoacme 1.5
# Evoacme 2.0
EvoAcme is an [Ansible](https://www.ansible.com/) role and a [Certbot](https://certbot.eff.org) wrapper for generate [Let's Encrypt](https://letsencrypt.org/) certificates.
It is a project hosted at [Evolix's forge](https://forge.evolix.org/projects/ansible-roles/repository/)
# How to install
Evoacme is open source software licensed under the AGPLv3 License.
1 - Create a playbook with evoacme role
## Install
### 1 - Create a playbook with evoacme role
~~~
---
- hosts: hostname
become: yes
roles:
- role: evoacme
- hosts: hostname
become: yes
roles:
- evoacme
~~~
2 - Install evoacme prerequisite with ansible
### 2 - Install evoacme prerequisite with ansible
~~~
ansible-playbook playbook.yml -Kl hostname
# ansible-playbook playbook.yml -K --limit hostname
~~~
3 - Include letsencrypt.conf in your webserver
### 3 - Include letsencrypt.conf in your webserver
For Apache, you just need to ensure that you don't overwrite "/.well-known/acme-challenge" Alias with a Redirect or Rewrite directive.
For Nginx, you must include letsencrypt.conf in all wanted vhost :
For Nginx, you must include `/etc/nginx/snippets/letsencrypt.conf` in all wanted vhosts :
~~~
include /etc/nginx/letsencrypt.conf;
nginx -t
service nginx reload
server {
[…]
include /etc/nginx/snippets/letsencrypt.conf;
[…]
}
~~~
4 - Create a CSR for a vhost with make-csr
then reload the Nginx configuration :
~~~
# make-csr look for this file :
# /etc/nginx/sites-enabled/vhostname
# /etc/nginx/sites-enabled/vhostname.conf
# /etc/apache2/sites-enabled/vhostname
# /etc/apache2/sites-enabled/vhostname.conf
make-csr vhostname
# nginx -t
# service nginx reload
~~~
5 - Generate the certificate with evoacme
### 4 - Create a CSR for a vhost with make-csr
~~~
# make-csr vhostname domain...
~~~
### 5 - Generate the certificate with evoacme
~~~
# evoacme look for /etc/ssl/requests/vhostname
@ -53,7 +59,7 @@ make-csr vhostname
evoacme vhostname
~~~
6 - Include ssl configuration
### 6 - Include ssl configuration
Sll configuration has generated, you must include it in your vhost.
@ -68,7 +74,3 @@ For Nginx :
~~~
include /etc/nginx/ssl/vhost.conf;
~~~
# License
Evoacme is open source software licensed under the AGPLv3 License.

View File

@ -6,7 +6,7 @@
name: apt
tasks_from: backports.yml
- name: Add exceptions for certbot dependances
- name: Add exceptions for certbot dependencies
copy:
src: backports-certbot
dest: /etc/apt/preferences.d/z-backports-certbot
@ -28,26 +28,14 @@
path: /usr/local/bin/certbot
state: absent
- name: stat /etc/cron.d/certbot
stat:
path: /etc/cron.d/certbot
register: etc_cron_d_certbot
- name: Disable /etc/cron.d/certbot
command: mv /etc/cron.d/certbot /etc/cron.d/certbot.disabled
args:
removes: /etc/cron.d/certbot
creates: /etc/cron.d/certbot.disabled
- name: Rename certbot dpkg cron to .disabled
- name: Install evoacme custom cron
copy:
remote_src: True
src: /etc/cron.d/certbot
dest: /etc/cron.d/certbot.disabled
when: etc_cron_d_certbot.stat.exists
- name: Remove certbot dpkg cron
file:
path: /etc/cron.d/certbot
state: absent
- name: Install certbot custom cron
copy:
src: certbot.cron
dest: /etc/cron.daily/certbot
src: evoacme.cron
dest: /etc/cron.daily/evoacme
mode: "0755"

View File

@ -1,7 +1,15 @@
---
- name: move acme challenge conf if missplaced
command: mv /etc/nginx/letsencrypt.conf /etc/nginx/snippets/letsencrypt.conf
args:
removes: /etc/nginx/letsencrypt.conf
creates: /etc/nginx/snippets/letsencrypt.conf
- name: Copy acme challenge conf
template:
src: templates/nginx.conf.j2
dest: /etc/nginx/letsencrypt.conf
dest: /etc/nginx/snippets/letsencrypt.conf
owner: root
group: root
mode: "0644"

View File

@ -1,4 +1,9 @@
---
- name: dbus is installed
apt:
name: dbus
state: installed
- name: Set hostname "{{ evolinux_hostname }}"
hostname:
name: "{{ evolinux_hostname }}"

View File

@ -32,6 +32,7 @@
- mtr-tiny
- curl
- telnet
- traceroute
when: evolinux_packages_diagnostic
- name: Install/Update hardware tools
@ -59,12 +60,6 @@
- pinentry-curses
when: evolinux_packages_common
- name: Install/Update serveur-base meta-package
apt:
name: serveur-base
allow_unauthenticated: yes
when: evolinux_packages_serveur_base
- name: Be sure that openntpd package is absent/purged
apt:
name: openntpd
@ -72,6 +67,12 @@
purge: yes
when: evolinux_packages_purge_openntpd
- name: Install/Update serveur-base meta-package
apt:
name: serveur-base
allow_unauthenticated: yes
when: evolinux_packages_serveur_base
- name: Install/Update packages for Stretch and later
apt:
name: "{{ item }}"

View File

@ -1,3 +1,5 @@
---
elastic_stack_version: "5.x"
filebeat_kibana_dashboards: False
filebeat_logstash_plugin: False

View File

@ -19,7 +19,7 @@
- name: Elastic sources list is available
apt_repository:
repo: "deb https://artifacts.elastic.co/packages/5.x/apt stable main"
repo: "deb https://artifacts.elastic.co/packages/{{ elastic_stack_version | mandatory }}/apt stable main"
filename: elastic
state: present
update_cache: yes

View File

@ -507,12 +507,12 @@ fi
if [ -n "${clamav_version}" ]; then
cat <<EOT >> "${ldif_file}"
dn: ServiceName=clamav_db,${computer_dn}
dn: ServiceName=clamd,${computer_dn}
NagiosEnabled: TRUE
objectClass: EvoService
ServiceName: clamav_db
ServiceName: clamd
ServiceType: antivirus
ServiceVersion: ClamAV ${clamav_version}
ServiceVersion: Clamd ${clamav_version}
EOT
fi

View File

@ -1,5 +1,10 @@
---
## TODO: add those URLs or domains to the proxy whitelist
# http://pkg.jenkins-ci.org/.*
# http://mirrors.jenkins.io/.*
# http://jenkins.mirror.isppower.de/.*
- name: Add jenkins GPG key
apt_key:
# url: https://jenkins-ci.org/debian/jenkins-ci.org.key

View File

@ -1,4 +1,5 @@
---
elastic_stack_version: "5.x"
kibana_server_host: "127.0.0.1"
kibana_server_basepath: ""

View File

@ -19,7 +19,7 @@
- name: Elastic sources list is available
apt_repository:
repo: "deb https://artifacts.elastic.co/packages/5.x/apt stable main"
repo: "deb https://artifacts.elastic.co/packages/{{ elastic_stack_version | mandatory }}/apt stable main"
filename: elastic
state: present
update_cache: yes
@ -64,27 +64,27 @@
owner: root
group: root
- name: Get mount options for /usr partition
shell: "mount | grep 'on /usr type'"
args:
warn: no
register: mount
changed_when: False
failed_when: False
when: not ansible_check_mode
- block:
- include_role:
name: remount-usr
- name: Move kibana optimize directory
shell: "mv /usr/share/kibana/{{ item }} /var/lib/kibana/{{ item }} && ln -s /var/lib/kibana/{{ item }} /usr/share/kibana/{{ item }}"
args:
creates: "/var/lib/kibana/{{ item }}"
notify: restart kibana
with_items:
- optimize
- data
# - name: Get mount options for /usr partition
# shell: "mount | grep 'on /usr type'"
# args:
# warn: no
# register: mount
# changed_when: False
# failed_when: False
# when: not ansible_check_mode
#
# - block:
# - include_role:
# name: remount-usr
#
# - name: Move kibana optimize directory
# shell: "mv /usr/share/kibana/{{ item }} /var/lib/kibana/{{ item }} && ln -s /var/lib/kibana/{{ item }} /usr/share/kibana/{{ item }}"
# args:
# creates: "/var/lib/kibana/{{ item }}"
# notify: restart kibana
# with_items:
# - optimize
# - data
- include: proxy_nginx.yml
when: kibana_proxy_nginx

View File

@ -1,4 +1,6 @@
---
elastic_stack_version: "5.x"
logstash_jvm_xms: 256m
logstash_jvm_xmx: 1g
logstash_log_rotate_days: 365

View File

@ -19,7 +19,7 @@
- name: Elastic sources list is available
apt_repository:
repo: "deb https://artifacts.elastic.co/packages/5.x/apt stable main"
repo: "deb https://artifacts.elastic.co/packages/{{ elastic_stack_version | mandatory }}/apt stable main"
filename: elastic
state: present
update_cache: yes
@ -59,6 +59,9 @@
template:
src: "{{ item }}"
dest: /etc/logstash/conf.d/logstash.conf
owner: logstash
group: logstash
mode: "0640"
force: yes
with_first_found:
- "templates/logstash/logstash.{{ inventory_hostname }}.conf.j2"

View File

@ -50,7 +50,7 @@ DNSSERVEURS='0.0.0.0/0'
# HTTP authorizations
# (you can use DNS names but set cron to reload minifirewall regularly)
# (if you have HTTP proxy, set 0.0.0.0/0)
HTTPSITES='security.debian.org pub.evolix.net volatile.debian.org mirror.evolix.org backports.debian.org hwraid.le-vert.net zidane.evolix.net antispam00.evolix.org spamassassin.apache.org sa-update.space-pro.be sa-update.secnap.net www.sa-update.pccc.com sa-update.dnswl.org'
HTTPSITES='security.debian.org pub.evolix.net volatile.debian.org mirror.evolix.org backports.debian.org hwraid.le-vert.net antispam00.evolix.org spamassassin.apache.org sa-update.space-pro.be sa-update.secnap.net www.sa-update.pccc.com sa-update.dnswl.org'
# HTTPS authorizations
HTTPSSITES='0.0.0.0/0'

View File

@ -1,5 +1,12 @@
---
- debug:
var: minifirewall_trusted_ips
verbosity: 1
- debug:
var: minifirewall_privilegied_ips
verbosity: 1
- name: Check if minifirewall is running
shell: /sbin/iptables -L -n | grep -E "^(DROP\s+udp|ACCEPT\s+icmp)\s+--\s+0\.0\.0\.0\/0\s+0\.0\.0\.0\/0\s*$"
changed_when: False

View File

@ -1,6 +1,8 @@
# mongodb-org
# mongodb
Install latest MongoDB from 10Gen repository.
Install MongoDB
We use packages from 10Gen for Jessie and packages from Debian for Stretch.
## Tasks
@ -8,8 +10,6 @@ Everything is in the `tasks/main.yml` file.
## Available variables
* `mongodb_pidfile_path`: PID file path (default: `/var/lib/mongodb/mongod.lock`)
* `mongodb_logfile_path`: log file path (default: `/var/log/mongodb/mongod.log`)
* `mongodb_port`: port to listen to (default: `27017`)
* `mongodb_bind`: IP to bind to (default: `127.0.0.1`)

View File

@ -1,5 +1,4 @@
---
mongodb_pidfile_path: /var/lib/mongodb/mongod.lock
mongodb_logfile_path: /var/log/mongodb/mongod.log
mongodb_port: 27017
mongodb_bind: 127.0.0.1

View File

@ -1,7 +1,11 @@
---
# handlers file for mongodb
- name: restart mongodb
- name: restart mongod
service:
name: mongod
state: restarted
- name: restart mongodb
service:
name: mongodb
state: restarted

View File

@ -1,44 +1,12 @@
---
- fail:
msg: only compatible with Debian 8
when:
- ansible_distribution != "Debian" or ansible_distribution_release != "jessie"
# - fail:
# msg: only compatible with Debian 8
# when:
# - ansible_distribution != "Debian" or ansible_distribution_release != "jessie"
# Attention à bien indiquer le protocole et le port, sinon le firewall ne laisse pas passer
- name: MongoDB public GPG Key
apt_key:
# url: https://www.mongodb.org/static/pgp/server-3.4.asc
data: "{{ lookup('file', 'server-3.4.asc') }}"
- include: main_jessie.yml
when: ansible_distribution_release == "jessie"
- name: enable APT sources list
apt_repository:
repo: deb http://repo.mongodb.org/apt/debian jessie/mongodb-org/3.4 main
state: present
filename: mongodb
update_cache: yes
- name: Install packages
apt:
name: mongodb-org
state: installed
- name: Custom configuration
template:
src: mongod.conf.j2
dest: /etc/mongod.conf
force: yes
backup: no
notify: restart mongodb
- name: Configure logrotate
template:
src: logrotate.j2
dest: /etc/logrotate.d/mongodb
force: yes
backup: no
- name: enable mongod service
service:
name: mongod
enabled: yes
- include: main_stretch.yml
when: ansible_distribution_major_version | version_compare('9', '>=')

View File

@ -0,0 +1,33 @@
---
- name: MongoDB public GPG Key
apt_key:
# url: https://www.mongodb.org/static/pgp/server-3.4.asc
data: "{{ lookup('file', 'server-3.4.asc') }}"
- name: enable APT sources list
apt_repository:
repo: deb http://repo.mongodb.org/apt/debian jessie/mongodb-org/3.4 main
state: present
filename: mongodb
update_cache: yes
- name: Install packages
apt:
name: mongodb-org
state: installed
- name: Custom configuration
template:
src: mongod_jessie.conf.j2
dest: "/etc/mongod.conf"
force: yes
backup: no
notify: restart mongod
- name: Configure logrotate
template:
src: logrotate_jessie.j2
dest: /etc/logrotate.d/mongodb
force: yes
backup: no

View File

@ -0,0 +1,29 @@
---
- name: Install packages
apt:
name: "{{ item }}"
state: installed
with_items:
- mongodb
- mongo-tools
- name: Custom configuration
template:
src: mongodb_stretch.conf.j2
dest: "/etc/mongodb.conf"
force: yes
backup: no
notify: restart mongodb
- name: enable service
service:
name: mongodb
enabled: yes
- name: Configure logrotate
template:
src: logrotate_stretch.j2
dest: /etc/logrotate.d/mongodb
force: yes
backup: no

View File

@ -1,6 +1,6 @@
# {{ ansible_managed }}
{{ mongodb_logfile_path }} {
/var/log/mongodb/mongod.log {
daily
missingok
rotate 365
@ -10,6 +10,6 @@
notifempty
sharedscripts
postrotate
kill -0 $(cat {{ mongodb_pidfile_path }}) && kill -USR1 $(cat {{ mongodb_pidfile_path }})
pidof mongod | xargs kill -USR1
endscript
}

View File

@ -0,0 +1,15 @@
# {{ ansible_managed }}
/var/log/mongodb/mongodb.log {
daily
missingok
rotate 365
dateext
compress
delaycompress
notifempty
sharedscripts
postrotate
pidof mongod | xargs kill -USR1
endscript
}

View File

@ -17,7 +17,7 @@ systemLog:
destination: file
logRotate: reopen
logAppend: true
path: {{ mongodb_logfile_path }}
path: /var/log/mongodb/mongod.log
# network interfaces
net:
@ -25,7 +25,7 @@ net:
bindIp: {{ mongodb_bind }}
processManagement:
pidFilePath: {{ mongodb_pidfile_path }}
pidFilePath: /var/lib/mongodb/mongod.lock
#security:

View File

@ -0,0 +1,39 @@
# mongodb.conf - {{ ansible_managed }}
# for documentation of all options, see:
# http://docs.mongodb.org/manual/reference/configuration-options/
# Where and how to store data.
storage:
dbPath: /var/lib/mongodb
journal:
enabled: true
# engine:
# mmapv1:
# wiredTiger:
# where to write logging data.
systemLog:
destination: file
logRotate: reopen
logAppend: true
path: /var/log/mongodb/mongodb.log
# network interfaces
net:
port: {{ mongodb_port }}
bindIp: {{ mongodb_bind }}
#security:
#operationProfiling:
#replication:
#sharding:
## Enterprise-Only Options:
#auditLog:
#snmp:

View File

@ -22,3 +22,6 @@ mysql_cron_mysqltuner: True
mysql_cron_mysqltuner_frequency: monthly
mysql_force_new_nrpe_password: False
mysql_evolinux_defaults_file: z-evolinux-defaults.cnf
mysql_evolinux_custom_file: zzz-evolinux-custom.cnf

View File

@ -1,8 +1,12 @@
---
- set_fact:
mysql_config_directory: /etc/mysql/conf.d
- name: "Copy MySQL defaults config file (jessie)"
copy:
src: evolinux-defaults.cnf
dest: /etc/mysql/conf.d/z-evolinux-defaults.cnf
dest: "{{ mysql_config_directory }}/{{ mysql_evolinux_defaults_file }}"
owner: root
group: root
mode: "0644"
@ -13,7 +17,7 @@
- name: "Copy MySQL custom config file (jessie)"
template:
src: evolinux-custom.cnf.j2
dest: /etc/mysql/conf.d/zzz-evolinux-custom.cnf
dest: "{{ mysql_config_directory }}/{{ mysql_evolinux_custom_file }}"
owner: root
group: root
mode: "0644"

View File

@ -1,8 +1,12 @@
---
- set_fact:
mysql_config_directory: /etc/mysql/mariadb.conf.d
- name: "Copy MySQL defaults config file (Debian 9 or later)"
copy:
src: evolinux-defaults.cnf
dest: /etc/mysql/mariadb.conf.d/z-evolinux-defaults.cnf
dest: "{{ mysql_config_directory }}/{{ mysql_evolinux_defaults_file }}"
owner: root
group: root
mode: "0644"
@ -13,7 +17,7 @@
- name: "Copy MySQL custom config file (Debian 9 or later)"
template:
src: evolinux-custom.cnf.j2
dest: /etc/mysql/mariadb.conf.d/zzz-evolinux-custom.cnf
dest: "{{ mysql_config_directory }}/{{ mysql_evolinux_custom_file }}"
owner: root
group: root
mode: "0644"

View File

@ -39,4 +39,7 @@
state: started
tags:
- mysql
when: mysql_custom_datadir != '' and mysql_custom_datadir != mysql_current_real_datadir_test.stdout and not mysql_custom_datadir_test.stat.exists
when:
- mysql_custom_datadir != ''
- mysql_custom_datadir != mysql_current_real_datadir_test.stdout
- not mysql_custom_datadir_test.stat.exists

View File

@ -22,6 +22,7 @@
- name: Create a password for NRPE
command: "apg -n 1 -m 16 -M lcN"
register: mysql_nrpe_password
check_mode: no
changed_when: False
- name: Create nrpe user

View File

@ -13,7 +13,7 @@
- name: Configure tmpdir
ini_file:
dest: /etc/mysql/conf.d/zzz_evolinux.cnf
dest: "{{ mysql_config_directory }}/{{ mysql_evolinux_custom_file }}"
section: mysqld
option: tmpdir
value: "{{ mysql_custom_tmpdir }}"

View File

@ -22,3 +22,8 @@ file = /var/log/syslog
pattern = "as a STORAGE ENGINE failed"
mailto = {{ log2mail_alert_email or general_alert_email | mandatory }}
template = /etc/log2mail/mail
file = /var/log/syslog
pattern = "The total blob data length"
mailto = {{ log2mail_alert_email or general_alert_email | mandatory }}
template = /etc/log2mail/mail

View File

@ -50,6 +50,7 @@ command[check_ssl]=/usr/lib/nagios/plugins/check_http -f follow -I 127.0.0.1 -S
command[check_elasticsearch]=/usr/lib/nagios/plugins/check_http -I 127.0.0.1 -u /_cat/health?h=st -p 9200 -r 'red' --invert-regex
command[check_memcached]=/usr/lib/nagios/plugins/check_tcp -H 127.0.0.1 -p 11211
command[check_opendkim]=/usr/lib/nagios/plugins/check_tcp -H 127.0.0.1 -p 54321
command[check_bkctld]=/usr/lib/nagios/plugins/check_bkctld
# Local checks (not packaged)
command[check_mem]={{ nagios_plugins_directory }}/check_mem -f -C -w 20 -c 10

View File

@ -1,3 +1,3 @@
Package: nginx nginx-common nginx-doc nginx-extras nginx-extras-dbg nginx-full nginx-full-dbg nginx-light nginx-light-dbg libnginx-mod-* libssl1.0.0
Package: nginx nginx-* libnginx-* libssl*
Pin: release a=jessie-backports
Pin-Priority: 999

View File

@ -1,130 +0,0 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: spawn-fcgi-munin-graph
# Required-Start: $all
# Required-Stop: $all
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Description: starts FastCGI for Munin-Graph
### END INIT INFO
# --------------------------------------------------------------
# Munin-CGI-Graph Spawn-FCGI Startscript by Julien Schmidt
# eMail: munin-trac at julienschmidt.com
# www: http://www.julienschmidt.com
# --------------------------------------------------------------
# Install:
# 1. Copy this file to /etc/init.d
# 2. Edit the variables below
# 3. run "update-rc.d spawn-fcgi-munin-graph defaults"
# --------------------------------------------------------------
# Special thanks for their help to:
# Frantisek Princ
# J<>r<EFBFBD>me Warnier
# --------------------------------------------------------------
# Last Update: 14. February 2013
#
# Please change the following variables:
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
NAME=spawn-fcgi-munin-graph
PID_FILE=/var/run/munin/$NAME.pid
SOCK_FILE=/var/run/munin/$NAME.sock
SOCK_USER=www-data
FCGI_USER=munin
FCGI_GROUP=munin
FCGI_WORKERS=2
DAEMON=/usr/bin/spawn-fcgi
DAEMON_OPTS="-s $SOCK_FILE -F $FCGI_WORKERS -U $SOCK_USER -u $FCGI_USER -g $FCGI_GROUP -P $PID_FILE -- /usr/lib/munin/cgi/munin-cgi-graph"
# --------------------------------------------------------------
# No edits necessary beyond this line
# --------------------------------------------------------------
if [ ! -x $DAEMON ]; then
echo "File not found or is not executable: $DAEMON!"
exit 0
fi
status() {
if [ ! -r $PID_FILE ]; then
return 1
fi
for FCGI_PID in `cat $PID_FILE`; do
if [ -z "${FCGI_PID}" ]; then
return 1
fi
FCGI_RUNNING=`ps -p ${FCGI_PID} | grep ${FCGI_PID}`
if [ -z "${FCGI_RUNNING}" ]; then
return 1
fi
done;
return 0
}
start() {
if status; then
echo "FCGI is already running!"
exit 1
else
$DAEMON $DAEMON_OPTS
fi
}
stop () {
if ! status; then
echo "No PID-file at $PID_FILE found or PID not valid. Maybe not running"
exit 1
fi
# Kill processes
for PID_RUNNING in `cat $PID_FILE`; do
kill -9 $PID_RUNNING
done
# Remove PID-file
rm -f $PID_FILE
# Remove Sock-File
rm -f $SOCK_FILE
}
case "$1" in
start)
echo "Starting $NAME: "
start
echo "... DONE"
;;
stop)
echo "Stopping $NAME: "
stop
echo "... DONE"
;;
force-reload|restart)
echo "Stopping $NAME: "
stop
echo "Starting $NAME: "
start
echo "... DONE"
;;
status)
if status; then
echo "FCGI is RUNNING"
else
echo "FCGI is NOT RUNNING"
fi
;;
*)
echo "Usage: $0 {start|stop|force-reload|restart|status}"
exit 1
;;
esac
exit 0

View File

@ -0,0 +1,10 @@
[Unit]
Description=Munin zoom for nginx.
After=network.target
[Service]
ExecStart=/usr/bin/spawn-fcgi -s /var/run/munin/spawn-fcgi-munin-graph.sock -U www-data -u munin -g munin /usr/lib/munin/cgi/munin-cgi-graph
Type=forking
[Install]
WantedBy=default.target

View File

@ -12,29 +12,24 @@
state: present
with_items:
- liblwp-useragent-determined-perl
- libcgi-fast-perl
- spawn-fcgi
- name: Adjust rights for munin-cgi
file:
path: '{{ item }}'
owner: munin
group: adm
with_fileglob:
- /var/log/munin/munin-cgi-*
shell: "chown --verbose www-data:munin /var/log/munin/munin-cgi-*"
register: command_result
changed_when: "'changed' in command_result.stdout"
args:
warn: no
- name: Install Init script for Munin-fcgi
copy:
src: init.d/spawn-fcgi-munin-graph
dest: /etc/init.d/
mode: "0755"
register: install_spawn_fcgi_munin_graph
src: systemd/spawn-fcgi-munin-graph.service
dest: /etc/systemd/system/spawn-fcgi-munin-graph.service
- name: Reload systemd
command: systemctl daemon-reload
when: install_spawn_fcgi_munin_graph | changed
- name: Ensure that Munin-fcgi is started/stopped correctly
service:
- name: Enable and start Munin-fcgi
systemd:
name: spawn-fcgi-munin-graph
daemon_reload: yes
enabled: yes
state: started

View File

@ -4,5 +4,6 @@ ntpd_servers:
ntpd_acls:
- '127.0.0.1'
- '::1'
- '-4 ignore'
- '-6 ignore'
- '-4 default ignore'
- '-6 default ignore'

View File

@ -1,4 +1,11 @@
---
- name: Remove openntpd package
apt:
name: openntpd
state: absent
tags:
- ntp
- name: Install ntp package
apt:
name: ntp

View File

@ -48,6 +48,7 @@
- name: Custom php.ini for CLI
copy:
dest: "{{ phpini_cli_custom_file }}"
mode: "0644"
content: |
; Put customized values here.
force: no
@ -62,4 +63,3 @@
with_items:
- { option: "date.timezone", value: "Europe/Paris" }
when: php_symfony_requirements

View File

@ -49,6 +49,7 @@
- name: "Custom php.ini for CLI (Debian 9 or later)"
copy:
dest: "{{ phpini_cli_custom_file }}"
mode: "0644"
content: |
; Put customized values here.
; default_charset = "ISO-8859-1"

View File

@ -103,7 +103,7 @@
- name: enable spam.sh cron
lineinfile:
dest: /etc/cron.d/spam
line: "42 * * * * /usr/share/scripts/spam.sh"
line: "42 * * * * root /usr/share/scripts/spam.sh"
create: yes
state: present
mode: "0640"

View File

@ -6,7 +6,7 @@
changed_when: check_ftp_account.rc != 0
register: check_ftp_account
tags:
- proftpd
- proftpd
- name: Generate FTP password
command: apg -n1
@ -14,14 +14,14 @@
check_mode: no
when: check_ftp_account.rc != 0
tags:
- proftpd
- proftpd
- name: Print generated password
debug:
msg: "{{ ftp_password.stdout }}"
when: check_ftp_account.rc != 0
tags:
- proftpd
- proftpd
- name: Hash generated FTP password
set_fact:
@ -29,7 +29,7 @@
check_mode: no
when: check_ftp_account.rc != 0
tags:
- proftpd
- proftpd
- name: Get current FTP password
shell: grep "^{{ proftpd_name }}:" /etc/proftpd/vpasswd | cut -d':' -f2
@ -38,7 +38,7 @@
when: check_ftp_account.rc == 0
changed_when: false
tags:
- proftpd
- proftpd
- name: Get current FTP password
set_fact:
@ -47,7 +47,7 @@
when: check_ftp_account.rc == 0
changed_when: false
tags:
- proftpd
- proftpd
- name: Create FTP account
lineinfile:
@ -58,7 +58,7 @@
line: "{{ proftpd_name }}:{{ proftpd_password }}:{{ proftpd_uid }}:{{ proftpd_gid }}::{{ proftpd_home }}:/bin/false"
notify: restart proftpd
tags:
- proftpd
- proftpd
- name: Allow FTP account
lineinfile:
@ -68,4 +68,4 @@
insertbefore: "DenyAll"
notify: restart proftpd
tags:
- proftpd
- proftpd

View File

@ -4,8 +4,8 @@
name: proftpd-basic
state: present
tags:
- proftpd
- packages
- proftpd
- packages
- name: ftpusers groupe exists
group:
@ -13,7 +13,7 @@
state: present
notify: restart proftpd
tags:
- proftpd
- proftpd
- name: local jail is installed
template:
@ -23,7 +23,7 @@
force: no
notify: restart proftpd
tags:
- proftpd
- proftpd
- name: mod_tls_memcache is disabled
replace:
@ -32,7 +32,7 @@
replace: '#LoadModule mod_tls_memcache.c'
notify: restart proftpd
tags:
- proftpd
- proftpd
- name: Put empty vpasswd file if missing
copy:
@ -41,7 +41,7 @@
force: no
notify: restart proftpd
tags:
- proftpd
- proftpd
# Why 440? Because should be edited with ftpasswd.
# So, readonly when opened with vim.
@ -54,4 +54,4 @@
group: root
notify: restart proftpd
tags:
- proftpd
- proftpd

View File

@ -0,0 +1,4 @@
---
rabbitmq_connections_critical: 200
rabbitmq_connections_warning: 150

View File

@ -0,0 +1,226 @@
#!/usr/bin/env python2
from optparse import OptionParser
import shlex
import subprocess
import sys
import requests
import json
if "check_output" not in dir( subprocess ): # duck punch it in!
def f(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
subprocess.check_output = f
class RabbitCmdWrapper(object):
"""So basically this just runs rabbitmqctl commands and returns parsed output.
Typically this means you need root privs for this to work.
Made this it's own class so it could be used in other monitoring tools
if desired."""
@classmethod
def list_connections(cls):
args = shlex.split("sudo rabbitmqctl list_connections")
cmd_result = subprocess.check_output(args).strip()
results = cls._parse_list_results(cmd_result)
return results
@classmethod
def list_queues(cls):
args = shlex.split('sudo rabbitmqctl list_queues')
cmd_result = subprocess.check_output(args).strip()
results = cls._parse_list_results(cmd_result)
return results
@classmethod
def status(cls):
args = shlex.split('sudo rabbitmqctl status')
cmd_result = subprocess.check_output(args).strip()
results = cls._parse_list_results(cmd_result)
return results
@classmethod
def _parse_list_results(cls, result_string):
results = result_string.strip().split('\n')
#remove text fluff
if "Listing connections ..." in results: results.remove("Listing connections ...")
if "Listing queues ..." in results: results.remove("Listing queues ...")
return_data = []
for row in results:
return_data.append(row.split('\t'))
return return_data
def check_connection_count(critical=0, warning=0):
"""Checks to make sure the numbers of connections are within parameters."""
try:
count = len(RabbitCmdWrapper.list_connections())
if count >= critical:
print "CRITICAL - Connection Count %d" % count
sys.exit(2)
elif count >= warning:
print "WARNING - Connection Count %d" % count
sys.exit(1)
else:
print "OK - Connection Count %d" % count
except Exception, err:
print "CRITICAL - %s" % err
def check_queues_count(critical=1000, warning=1000):
"""
A blanket check to make sure all queues are within count parameters.
TODO: Possibly break this out so test can be done on individual queues.
"""
try:
critical_q = []
warning_q = []
results = RabbitCmdWrapper.list_queues()
for queue in results:
if queue.count == 2:
count = int(queue[1])
if count >= critical:
critical_q.append("%s: %s" % (queue[0], count))
elif count >= warning:
warning_q.append("%s: %s" % (queue[0], count))
if critical_q:
print "CRITICAL - %s" % ", ".join(critical_q)
sys.exit(2)
elif warning_q:
print "WARNING - %s" % ", ".join(warning_q)
sys.exit(1)
else:
print "OK - NO QUEUES EXCEED THRESHOLDS"
sys.exit(0)
except Exception, err:
print "CRITICAL - %s" % err
sys.exit(2)
def check_mem_usage(critical=75, warning=50):
"""Check to make sure the RAM usage of rabbitmq process does not exceed 50%% of its max"""
try:
results = RabbitCmdWrapper.status()
for idx,val in enumerate(results):
if "memory," in str(val):
mem_used_raw = str(results[idx + 1])
if "vm_memory_limit" in str(val):
mem_limit_raw = str(val)
memory_used = float(filter(str.isdigit, mem_used_raw))
memory_limit = float(filter(str.isdigit, mem_limit_raw))
percent_usage = int(memory_used/memory_limit * 100)
if percent_usage > critical:
print "CRITICAL - RABBITMQ RAM USAGE at %s%% of max" % percent_usage
sys.exit(2)
elif percent_usage > warning:
print "WARNING - RABBITMQ RAM USAGE at %s%% of max" % percent_usage
sys.exit(1)
else:
print "OK - RABBITMQ RAM USAGE OK at %s%% of max" % percent_usage
sys.exit(0)
except Exception, err:
print "Critical - %s" % err
sys.exit(2)
def check_aliveness(username, password, timeout, cluster):
"""Declares a test queue, then publishes and consumes a message. Intended for use by monitoring tools. If everything is working correctly, will return HTTP status 200 with body"""
try:
r = requests.get("http://%s:15672/api/aliveness-test/%%2F" % cluster, auth=(username, password), timeout=timeout)
except requests.exceptions.RequestException as e: # Throw error if rabbitmq is down
print "Critical - %s" % e
sys.exit(2)
if r.status_code == 200:
print "OK - RABBITMQ Aliveness Test Returns: %s" % r
sys.exit(0)
elif r.status_code != 200:
print "CRITICAL - RabbitMQ Error: %s" % r.content
sys.exit(2)
else:
print "UNKNOWN - RABBITMQ Aliveness Test"
sys.ext(1)
def check_cluster(username, password, timeout, cluster):
"""Checks the health of a cluster, if a node is not running mark as offline """
try:
url = "http://%s:15672/api/nodes" % cluster
r = requests.get(url, auth=(username, password), timeout=timeout)
except requests.exceptions.RequestException as e: # Throw error if no response
print "Critical - %s" % e
sys.exit(2)
text = r.text
nodes = json.loads(text)
running_nodes = []
failed_nodes = []
for node in nodes:
if not node['running']:
failed_nodes.append(node['name'])
if node['running']:
running_nodes.append(node['name'])
if len(failed_nodes) == 1:
print "WARNING: RabbitMQ cluster is degraged: Not running %s" % failed_nodes[0]
sys.exit(1)
elif len(failed_nodes) >= 2:
print "CRITICAL: RabbitMQ cluster is critical: Not running %s" % failed_nodes
sys.exit(2)
else:
print "OK: RabbitMQ cluster members: %s" % (" ".join(running_nodes))
sys.exit(0)
USAGE = """Usage: ./check_rabbitmq -a [action] -C [critical] -W [warning]
Actions:
- connection_count
checks the number of connection in rabbitmq's list_connections
- queues_count
checks the count in each of the queues in rabbitmq's list_queues
- mem_usage
checks to ensure mem usage of rabbitmq process does not exceed 50%
- aliveness
Use the /api/aliveness-test API to send/receive a message. (requires -u username -p password args)
- cluster_status
Parse /api/nodes to check the cluster status. (requires -u username -p password"""
if __name__ == "__main__":
parser = OptionParser(USAGE)
parser.add_option("-a", "--action", dest="action",
help="Action to Check")
parser.add_option("-C", "--critical", dest="critical",
type="int", help="Critical Threshold")
parser.add_option("-W", "--warning", dest="warning",
type="int", help="Warning Threshold")
parser.add_option("-u", "--username", dest="username", default="guest",
type="string", help="RabbitMQ username, Default guest")
parser.add_option("-p", "--password", dest="password", default="guest",
type="string", help="RabbitMQ password, Default guest")
parser.add_option("-t", "--timeout", dest="timeout", default=1,
type="int", help="Request Timeout, defaults to 1 second")
parser.add_option("-c", "--cluster", dest="cluster", default="localhost",
type="string", help="Cluster IP/DNS name, defaults to localhost")
(options, args) = parser.parse_args()
if options.action == "connection_count":
check_connection_count(options.critical, options.warning)
elif options.action == "queues_count":
check_queues_count(options.critical, options.warning)
elif options.action == "mem_usage":
check_mem_usage(options.critical, options.warning)
elif options.action == "aliveness":
check_aliveness(options.username, options.password, options.timeout, options.cluster)
elif options.action == "cluster_status":
check_cluster(options.username, options.password, options.timeout, options.cluster)
else:
print "Invalid action: %s" % options.action
print USAGE

View File

@ -0,0 +1,66 @@
#!/bin/sh
#
# Plugin to monitor the number of connections to RabbitMQ
#
# Usage: Link or copy into /etc/munin/node.d/
#
# Parameters
# env.conn_warn <warning connections>
# env.conn_crit <critical connections>
#
# Magic markers (optional - only used by munin-config and some
# installation scripts):
#
#%# family=auto
#%# capabilities=autoconf
# If run with the "autoconf"-parameter, give our opinion on wether we
# should be run on this system or not. This is optinal, and only used by
# munin-config. In the case of this plugin, we should most probably
# always be included.
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
HOME=/tmp/
# If run with the "config"-parameter, give out information on how the
# graphs should look.
if [ "$1" = "config" ]; then
CONN_WARN=${queue_warn:-500}
CONN_CRIT=${queue_crit:-1000}
# The host name this plugin is for. (Can be overridden to have
# one machine answer for several)
# The title of the graph
echo 'graph_title RabbitMQ connections'
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
echo 'graph_args --base 1000 -l 0'
# The Y-axis label
echo 'graph_vlabel connections'
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
# 420 milliload)
#echo 'graph_scale no'
echo 'graph_category RabbitMQ'
echo "connections.label Connections"
echo "connections.warning $CONN_WARN"
echo "connections.critical $CONN_CRIT"
echo "connections.info Number of active connections"
echo 'graph_info Shows the number of connections to RabbitMQ'
# Last, if run with the "config"-parameter, quit here (don't
# display any data)
exit 0
fi
# If not run with any parameters at all (or only unknown ones), do the
# real work - i.e. display the data. Almost always this will be
# "value" subfield for every data field.
echo "connections.value $(HOME=$HOME rabbitmqctl list_connections | grep -v "^Listing" | grep -v "done.$" | wc -l)"

View File

@ -4,3 +4,12 @@
name: rabbitmq-server
state: restarted
- name: restart nagios-nrpe-server
service:
name: nagios-nrpe-server
state: restarted
- name: restart munin-node
service:
name: munin-node
state: restarted

View File

@ -27,3 +27,25 @@
lineinfile:
dest: /etc/default/rabbitmq-server
line: ulimit -n 2048
- name: is NRPE present ?
stat:
path: /etc/nagios/nrpe.d/evolix.cfg
check_mode: no
register: nrpe_evolix_config
tags:
- nrpe
- include: nrpe.yml
when: nrpe_evolix_config.stat.exists
- name: is Munin present ?
stat:
path: /etc/munin
check_mode: no
register: etc_munin_directory
tags:
- nrpe
- include: munin.yml
when: etc_munin_directory.stat.exists

45
rabbitmq/tasks/munin.yml Normal file
View File

@ -0,0 +1,45 @@
---
- include_role:
name: remount-usr
tags:
- rabbitmq
- munin
- name: Create local munin directory
file:
name: /usr/local/share/munin/
state: directory
mode: "0755"
tags:
- rabbitmq
- munin
- name: Create local plugins directory
file:
name: /usr/local/share/munin/plugins/
state: directory
mode: "0755"
tags:
- rabbitmq
- munin
- name: Copy rabbitmq_connections munin plugin
copy:
src: rabbitmq_connections
dest: /usr/local/share/munin/plugins/rabbitmq_connections
mode: "0755"
notify: restart munin-node
tags:
- rabbitmq
- munin
- name: Enable rabbitmq_connections munin plugin
file:
src: /usr/local/share/munin/plugins/rabbitmq_connections
dest: "/etc/munin/plugins/rabbitmq_connections"
state: link
notify: restart munin-node
tags:
- rabbitmq
- munin

34
rabbitmq/tasks/nrpe.yml Normal file
View File

@ -0,0 +1,34 @@
---
- name: check_rabbitmq dependencies
apt:
name: python-requests
state: installed
- include_role:
name: remount-usr
# https://raw.githubusercontent.com/CaptPhunkosis/check_rabbitmq/master/check_rabbitmq
- name: check_rabbitmq is installed
copy:
src: check_rabbitmq
dest: /usr/local/lib/nagios/plugins/check_rabbitmq
owner: root
group: root
mode: "0755"
force: yes
- name: check_rabbitmq is available for NRPE
lineinfile:
dest: /etc/nagios/nrpe.d/evolix.cfg
regexp: 'command\[check_rab_connection_count\]'
line: 'command[check_rab_connection_count]=sudo /usr/local/lib/nagios/plugins/check_rabbitmq -a connection_count -C {{ rabbitmq_connections_critical }} -W {{ rabbitmq_connections_warning }}'
notify: restart nagios-nrpe-server
- name: sudo without password for nagios
lineinfile:
dest: /etc/sudoers.d/evolinux
regexp: 'check_rabbitmq'
line: 'nagios ALL = NOPASSWD: /usr/local/lib/nagios/plugins/check_rabbitmq'
insertafter: '^nagios'
validate: "visudo -cf %s"

View File

@ -1,6 +1,6 @@
---
rbenv_version: v1.1.0
rbenv_ruby_version: 2.4.1
rbenv_version: v1.1.1
rbenv_ruby_version: 2.4.2
rbenv_root: "~/.rbenv"
rbenv_repo: "https://github.com/rbenv/rbenv.git"
rbenv_plugins:

13
remount-usr/README.md Normal file
View File

@ -0,0 +1,13 @@
# remount-usr
This is a role for mount /usr partition in rw and remount it with a handler.
Usefull when you use ro option in your /etc/fstab for /usr partition.
## Usage
Include this role in task before write on /usr partition (eg. copy a file) :
~~~
- include_role:
name: remount-usr
~~~

View File

@ -43,7 +43,7 @@
- name: enable sa-update.sh cron
lineinfile:
dest: /etc/cron.d/sa-update
line: "42 6 5 1,4,7,10 * /usr/share/scripts/sa-update.sh"
line: "42 6 5 1,4,7,10 * root /usr/share/scripts/sa-update.sh"
create: yes
state: present
mode: "0640"

View File

@ -1,7 +1,7 @@
# tomcat
Install a Tomcat depndancies for multiple tomcat instance.
Install Tomcat and its dependencies for multiple instances.
## Available variables
**tomcat_instance_root:** Root dir for Tomcat instance (default: /srv/tomcat)
**tomcat_instance_root**: Root dir for Tomcat instance (default: /srv/tomcat)

View File

@ -0,0 +1,15 @@
[Unit]
Description=Tomcat %u.
After=network.target
[Service]
WorkingDirectory=%h
Environment="CATALINA_BASE=%h"
EnvironmentFile=%h/conf/env
UMask=0002
ExecStart=/usr/share/tomcat8/bin/startup.sh
ExecStop=/usr/share/tomcat8/bin/shutdown.sh
Type=forking
[Install]
WantedBy=default.target

View File

@ -1,3 +1,8 @@
---
- include: packages.yml
- include: packages_jessie.yml
when: ansible_distribution_release == "jessie"
- include: packages_stretch.yml
when: ansible_distribution_major_version | version_compare('9', '>=')
- include: nagios.yml

View File

@ -1,5 +1,5 @@
---
- name: Install dependancy
- name: Install packages
apt:
name: "{{ item }}"
state: present
@ -18,7 +18,7 @@
- name: Copy systemd unit
copy:
src: 'tomcat.service'
src: 'tomcat_jessie.service'
dest: "/etc/systemd/user/tomcat.service"
mode: "0755"

View File

@ -0,0 +1,29 @@
---
- name: Install packages
apt:
name: "{{ item }}"
state: present
with_items:
- 'tomcat8'
- 'tomcat8-user'
- 'libpam-systemd'
- name: Create tomcat root dir
file:
path: "{{ tomcat_instance_root }}"
state: directory
owner: 'root'
group: 'root'
mode: "0755"
- name: Copy systemd unit
copy:
src: 'tomcat_stretch.service'
dest: "/etc/systemd/user/tomcat.service"
mode: "0755"
- name: Disable default tomcat8 service
service:
name: tomcat8
state: stopped
enabled: false

View File

@ -5,6 +5,12 @@
state: reloaded
daemon_reload: yes
- name: restart varnish
systemd:
name: varnish
state: restarted
daemon_reload: yes
- name: reload systemd
command: systemctl daemon-reload

View File

@ -14,6 +14,7 @@
- /etc/default/varnish
- /etc/default/varnishncsa
- /etc/default/varnishlog
notify: reload varnish
tags:
- varnish
@ -24,6 +25,7 @@
mode: "0700"
owner: root
group: root
notify: reload varnish
tags:
- varnish
@ -39,7 +41,9 @@
src: varnish.conf.j2
dest: /etc/systemd/system/varnish.service.d/evolinux.conf
force: yes
notify: reload systemd
notify:
- reload systemd
- restart varnish
tags:
- varnish