From f68a79c022784f4d75a60d4d7c5c0e8d82c2dc57 Mon Sep 17 00:00:00 2001 From: Jeremy Lecour Date: Tue, 18 Aug 2020 14:00:46 +0200 Subject: [PATCH] filebeat: allow using a template --- CHANGELOG.md | 1 + filebeat/defaults/main.yml | 11 + filebeat/tasks/main.yml | 83 ++++++- filebeat/templates/filebeat.default.yml.j2 | 247 +++++++++++++++++++++ 4 files changed, 331 insertions(+), 11 deletions(-) create mode 100644 filebeat/templates/filebeat.default.yml.j2 diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c76d489..874df943 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ The **patch** part changes incrementally at each release. ### Added * certbot: detect HAProxy cert directory +* filebeat: allow using a template * haproxy: add deny_ips file to reject connections * haproxy: add some comments to default config * haproxy: enable stats frontend with access lists diff --git a/filebeat/defaults/main.yml b/filebeat/defaults/main.yml index 008daa27..cd92eb3c 100644 --- a/filebeat/defaults/main.yml +++ b/filebeat/defaults/main.yml @@ -4,3 +4,14 @@ elastic_stack_version: "6.x" filebeat_logstash_plugin: False filebeat_processors_cloud_metadata: False + +filebeat_elasticsearch_hosts: + - "localhost:9200" +filebeat_elasticsearch_protocol: "http" +filebeat_elasticsearch_auth_api_key: "" +filebeat_elasticsearch_auth_username: "" +filebeat_elasticsearch_auth_password: "" + +filebeat_use_config_template: False +filebeat_update_config: True +filebeat_force_config: True diff --git a/filebeat/tasks/main.yml b/filebeat/tasks/main.yml index 0aca06d6..34feb5c7 100644 --- a/filebeat/tasks/main.yml +++ b/filebeat/tasks/main.yml @@ -66,18 +66,79 @@ - logstash_plugin.stat.exists - not logstash_plugin_installed | success -- name: cloud_metadata processor is disabled - replace: - dest: /etc/filebeat/filebeat.yml - regexp: '^(\s+)(- add_cloud_metadata:)' - replace: '\1# \2' - notify: restart filebeat - when: not filebeat_processors_cloud_metadata +# When we don't use a config template (default) +- block: + - name: cloud_metadata processor is disabled + replace: + dest: /etc/filebeat/filebeat.yml + regexp: '^(\s+)(- add_cloud_metadata:)' + replace: '\1# \2' + notify: restart filebeat + when: not filebeat_processors_cloud_metadata -- name: cloud_metadata processor is disabled + - name: cloud_metadata processor is disabled + lineinfile: + dest: /etc/filebeat/filebeat.yml + line: " - add_cloud_metadata: ~" + insert_after: '^processors:' + notify: restart filebeat + when: filebeat_processors_cloud_metadata + + - name: Filebeat knows where to find Elasticsearch + lineinfile: + dest: /etc/filebeat/filebeat.yml + regexp: '^ hosts: .*' + line: " hosts: [\"{{ filebeat_elasticsearch_hosts | join('\", \"') }}\"]" + insertafter: "output.elasticsearch:" + notify: restart filebeat + when: + - filebeat_elasticsearch_hosts + + - name: Filebeat protocol for Elasticsearch + lineinfile: + dest: /etc/filebeat/filebeat.yml + regexp: '^ #?protocol: .*' + line: " protocol: \"{{ filebeat_elasticsearch_protocol }}\"" + insertafter: "output.elasticsearch:" + notify: restart filebeat + when: filebeat_elasticsearch_protocol == "http" or filebeat_elasticsearch_protocol == "https" + + - name: Filebeat auth/username for Elasticsearch are configured + lineinfile: + dest: /etc/filebeat/filebeat.yml + regexp: '{{ item.regexp }}' + line: '{{ item.line }}' + insertafter: "output.elasticsearch:" + with_items: + - { regexp: '^ #?username: .*', line: ' username: "{{ filebeat_elasticsearch_auth_username }}"' } + - { regexp: '^ #?password: .*', line: ' password: "{{ filebeat_elasticsearch_auth_password }}"' } + notify: restart filebeat + when: + - filebeat_elasticsearch_auth_username + - filebeat_elasticsearch_auth_password + when: not filebeat_use_config_template + +- name: Filebeat api_key for Elasticsearch are configured lineinfile: dest: /etc/filebeat/filebeat.yml - line: " - add_cloud_metadata: ~" - insert_after: '^processors:' + regexp: '^ #?api_key: .*' + line: ' api_key: "{{ filebeat_elasticsearch_auth_api_key }}"' + insertafter: "output.elasticsearch:" notify: restart filebeat - when: filebeat_processors_cloud_metadata + when: filebeat_elasticsearch_auth_api_key + +# When we use a config template +- block: + - name: Configuration is up-to-date + template: + src: "{{ item }}" + dest: /etc/filebeat/filebeat.yml + force: "{{ filebeat_force_config }}" + with_first_found: + - "templates/filebeat/filebeat.{{ inventory_hostname }}.yml.j2" + - "templates/filebeat/filebeat.{{ host_group }}.yml.j2" + - "templates/filebeat/filebeat.default.yml.j2" + - "filebeat.default.yml.j2" + notify: restart filebeat + when: filebeat_update_config + when: filebeat_use_config_template diff --git a/filebeat/templates/filebeat.default.yml.j2 b/filebeat/templates/filebeat.default.yml.j2 new file mode 100644 index 00000000..65a15fd1 --- /dev/null +++ b/filebeat/templates/filebeat.default.yml.j2 @@ -0,0 +1,247 @@ +###################### Filebeat Configuration Example ######################### + +# This file is an example configuration file highlighting only the most common +# options. The filebeat.reference.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/filebeat/index.html + +# For more available modules and options, please see the filebeat.reference.yml sample +# configuration file. + +# ============================== Filebeat inputs =============================== + +filebeat.inputs: + +# Each - is an input. Most options can be set at the input level, so +# you can use different inputs for various configurations. +# Below are the input specific configurations. + +- type: log + + # Change to true to enable this input configuration. + enabled: false + + # Paths that should be crawled and fetched. Glob based paths. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. + #include_lines: ['^ERR', '^WARN'] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #exclude_files: ['.gz$'] + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + ### Multiline options + + # Multiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #multiline.pattern: ^\[ + + # Defines if the pattern set under pattern should be negated or not. Default is false. + #multiline.negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + #multiline.match: after + +# ============================== Filebeat modules ============================== + +filebeat.config.modules: + # Glob pattern for configuration loading + path: ${path.config}/modules.d/*.yml + + # Set to true to enable config reloading + reload.enabled: false + + # Period on which files under path should be checked for changes + #reload.period: 10s + +# ======================= Elasticsearch template setting ======================= + +setup.template.settings: + index.number_of_shards: 1 + #index.codec: best_compression + #_source.enabled: false + + +# ================================== General =================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. +#fields: +# env: staging + +# ================================= Dashboards ================================= +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards is disabled by default and can be enabled either by setting the +# options here or by using the `setup` command. +#setup.dashboards.enabled: false + +# The URL from where to download the dashboards archive. By default this URL +# has a value which is computed based on the Beat name and version. For released +# versions, this URL points to the dashboard archive on the artifacts.elastic.co +# website. +#setup.dashboards.url: + +# =================================== Kibana =================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + #host: "localhost:5601" + + # Kibana Space ID + # ID of the Kibana Space into which the dashboards should be loaded. By default, + # the Default Space will be used. + #space.id: + +# =============================== Elastic Cloud ================================ + +# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +# ================================== Outputs =================================== + +# Configure what output to use when sending the data collected by the beat. + +# ---------------------------- Elasticsearch Output ---------------------------- +output.elasticsearch: + # Array of hosts to connect to. + hosts: ["{{ filebeat_elasticsearch_hosts | join('", "') }}"] + + # Protocol - either `http` (default) or `https`. + protocol: "{{ filebeat_elasticsearch_protocol | default('http') }}" + + # Authentication credentials - either API key or username/password. +{% if filebeat_elasticsearch_auth_api_key %} + api_key: "{{ filebeat_elasticsearch_auth_api_key }}" +{% endif %} +{% if filebeat_elasticsearch_auth_username %} + username: "{{ filebeat_elasticsearch_auth_username }}" +{% endif %} +{% if filebeat_elasticsearch_auth_password %} + password: "{{ filebeat_elasticsearch_auth_password }}" +{% endif %} + +# ------------------------------ Logstash Output ------------------------------- +#output.logstash: + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Optional SSL. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + +# ================================= Processors ================================= +processors: + - add_host_metadata: ~ +{% if filebeat_processors_cloud_metadata %} + - add_cloud_metadata: ~ +{% endif %} + - add_docker_metadata: ~ + - add_kubernetes_metadata: ~ + +# ================================== Logging =================================== + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: debug + +# At debug level, you can selectively enable logging only for some components. +# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# "publish", "service". +#logging.selectors: ["*"] + +# ============================= X-Pack Monitoring ============================== +# Filebeat can export internal metrics to a central Elasticsearch monitoring +# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The +# reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#monitoring.enabled: false + +# Sets the UUID of the Elasticsearch cluster under which monitoring data for this +# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch +# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch. +#monitoring.cluster_uuid: + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. +# Note that the settings should point to your Elasticsearch *monitoring* cluster. +# Any setting that is not set is automatically inherited from the Elasticsearch +# output configuration, so if you have the Elasticsearch output configured such +# that it is pointing to your Elasticsearch monitoring cluster, you can simply +# uncomment the following line. +#monitoring.elasticsearch: + +# ============================== Instrumentation =============================== + +# Instrumentation support for the filebeat. +#instrumentation: + # Set to true to enable instrumentation of filebeat. + #enabled: false + + # Environment in which filebeat is running on (eg: staging, production, etc.) + #environment: "" + + # APM Server hosts to report instrumentation results to. + #hosts: + # - http://localhost:8200 + + # API Key for the APM Server(s). + # If api_key is set then secret_token will be ignored. + #api_key: + + # Secret token for the APM Server(s). + #secret_token: + + +# ================================= Migration ================================== + +# This allows to enable 6.7 migration aliases +#migration.6_to_7.enabled: true