Add observabiliy-elk

This commit is contained in:
bsquillari 2023-10-31 13:53:04 +00:00
parent fcc2189674
commit 807ae7548e
45 changed files with 1710 additions and 3 deletions

View File

@ -34,6 +34,13 @@ preparation:
- echo "BROWSER_CLIENT_PROD_IMAGE_NAME=${IMAGE_BASE}/browser-client:prod-${BUILD_ID}" >> context.env - echo "BROWSER_CLIENT_PROD_IMAGE_NAME=${IMAGE_BASE}/browser-client:prod-${BUILD_ID}" >> context.env
- echo "BROWSER_CLIENT_TEST_IMAGE_NAME=${IMAGE_BASE}/browser-client:test-${BUILD_ID}" >> context.env - echo "BROWSER_CLIENT_TEST_IMAGE_NAME=${IMAGE_BASE}/browser-client:test-${BUILD_ID}" >> context.env
- echo "ELK_SETUP_PROD_IMAGE_NAME=${IMAGE_BASE}/elk-setup:prod-${BUILD_ID}" >> context.env
- echo "ELK_PROD_IMAGE_NAME=${IMAGE_BASE}/elasticsearch:prod-${BUILD_ID}" >> context.env
- echo "HEARTBEAT_PROD_IMAGE_NAME=${IMAGE_BASE}/heartbeat:prod-${BUILD_ID}" >> context.env
- echo "CURATOR_PROD_IMAGE_NAME=${IMAGE_BASE}/curator:prod-${BUILD_ID}" >> context.env
- echo "KIBANA_PROD_IMAGE_NAME=${IMAGE_BASE}/kibana:prod-${BUILD_ID}" >> context.env
- echo "LOGSTASH_PROD_IMAGE_NAME=${IMAGE_BASE}/logstash:prod-${BUILD_ID}" >> context.env
- echo "DOCKER_HUB_SCREEN_CLIENT_IMAGE=$DOCKER_HUB_USER/screens-client:${BUILD_ID}" >> context.env - echo "DOCKER_HUB_SCREEN_CLIENT_IMAGE=$DOCKER_HUB_USER/screens-client:${BUILD_ID}" >> context.env
- echo "DOCKER_HUB_BROWSER_CLIENT_IMAGE=$DOCKER_HUB_USER/browser-client:${BUILD_ID}" >> context.env - echo "DOCKER_HUB_BROWSER_CLIENT_IMAGE=$DOCKER_HUB_USER/browser-client:${BUILD_ID}" >> context.env
- echo "DOCKER_HUB_GATEWAY_IMAGE=$DOCKER_HUB_USER/gateway:${BUILD_ID}" >> context.env - echo "DOCKER_HUB_GATEWAY_IMAGE=$DOCKER_HUB_USER/gateway:${BUILD_ID}" >> context.env
@ -138,6 +145,35 @@ build-gateway:
- job: preparation - job: preparation
artifacts: true artifacts: true
build-and-run-elk:
stage: build
tags:
- dev
script:
- export $(cat context.env | xargs)
- docker build observability/elk/setup -f observability/elk/setup/Dockerfile --build-arg "ELASTIC_VERSION=8.7.1" -t ${ELK_SETUP_PROD_IMAGE_NAME}
- docker build observability/elk/elasticsearch -f observability/elk/elasticsearch/Dockerfile --build-arg "ELASTIC_VERSION=8.7.1" -t ${ELK_PROD_IMAGE_NAME}
- docker build observability/elk/logstash -f observability/elk/logstash/Dockerfile --build-arg "ELASTIC_VERSION=8.7.1" -t ${LOGSTASH_PROD_IMAGE_NAME}
- docker build observability/elk/kibana -f observability/elk/kibana/Dockerfile --build-arg "ELASTIC_VERSION=8.7.1" -t ${KIBANA_PROD_IMAGE_NAME}
- docker build observability/elk/extensions/curator -f observability/elk/extensions/curator/Dockerfile -t ${CURATOR_PROD_IMAGE_NAME}
- docker build observability/elk/extensions/heartbeat -f observability/elk/extensions/heartbeat/Dockerfile --build-arg "ELASTIC_VERSION=8.7.1" -t ${HEARTBEAT_PROD_IMAGE_NAME}
- docker login -u $CI_REGISTRY_USER --password $CI_JOB_TOKEN $CI_REGISTRY
- docker push ${ELK_SETUP_PROD_IMAGE_NAME}
- docker push ${ELK_PROD_IMAGE_NAME}
- docker push ${HEARTBEAT_PROD_IMAGE_NAME}
- docker push ${CURATOR_PROD_IMAGE_NAME}
- docker push ${KIBANA_PROD_IMAGE_NAME}
- docker push ${LOGSTASH_PROD_IMAGE_NAME}
- docker compose -f observability/docker-compose.yml --env-file $ENV_DEV_FILE --profile setup up -d
- docker compose -f observability/docker-compose.yml -f observability/elk/extensions/curator/curator-compose.yml -f observability/elk/extensions/heartbeat/heartbeat-compose.yml --env-file $ENV_DEV_FILE up -d
needs:
- job: preparation
artifacts: true
test-auth-api: test-auth-api:
stage: test stage: test
tags: tags:
@ -222,6 +258,7 @@ test-gateway:
needs: needs:
- job: preparation - job: preparation
- job: build-gateway - job: build-gateway
- job: build-and-run-elk
artifacts: true artifacts: true
test-integration: test-integration:
@ -314,11 +351,25 @@ deliver-dockerhub:
- docker tag $BROWSER_CLIENT_PROD_IMAGE_NAME $DOCKER_HUB_BROWSER_CLIENT_IMAGE - docker tag $BROWSER_CLIENT_PROD_IMAGE_NAME $DOCKER_HUB_BROWSER_CLIENT_IMAGE
- docker tag $SCREEN_CLIENT_PROD_IMAGE_NAME $DOCKER_HUB_SCREEN_CLIENT_IMAGE - docker tag $SCREEN_CLIENT_PROD_IMAGE_NAME $DOCKER_HUB_SCREEN_CLIENT_IMAGE
- docker tag $ELK_SETUP_PROD_IMAGE_NAME $DOCKER_HUB_ELK_SETUP_IMAGE_NAME
- docker tag $ELK_PROD_IMAGE_NAME $DOCKER_HUB_ELK_IMAGE_NAME
- docker tag $HEARTBEAT_PROD_IMAGE_NAME $DOCKER_HUB_HEARTBEAT_IMAGE_NAME
- docker tag $CURATOR_PROD_IMAGE_NAME $DOCKER_HUB_CURATOR_IMAGE_NAME
- docker tag $KIBANA_PROD_IMAGE_NAME $DOCKER_HUB_KIBANA_IMAGE_NAME
- docker tag $LOGSTASH_PROD_IMAGE_NAME $DOCKER_HUB_LOGSTASH_IMAGE_NAME
- docker push $DOCKER_HUB_FLIGHT_INFO_IMAGE - docker push $DOCKER_HUB_FLIGHT_INFO_IMAGE
- docker push $DOCKER_HUB_USER_MANAGER_IMAGE - docker push $DOCKER_HUB_USER_MANAGER_IMAGE
- docker push $DOCKER_HUB_GATEWAY_IMAGE - docker push $DOCKER_HUB_GATEWAY_IMAGE
- docker push $DOCKER_HUB_BROWSER_CLIENT_IMAGE - docker push $DOCKER_HUB_BROWSER_CLIENT_IMAGE
- docker push $DOCKER_HUB_SCREEN_CLIENT_IMAGE - docker push $DOCKER_HUB_SCREEN_CLIENT_IMAGE
- ducker push $DOCKER_HUB_ELK_SETUP_IMAGE_NAME
- ducker push $DOCKER_HUB_ELK_IMAGE_NAME
- ducker push $DOCKER_HUB_HEARTBEAT_IMAGE_NAME
- ducker push $DOCKER_HUB_CURATOR_IMAGE_NAME
- ducker push $DOCKER_HUB_KIBANA_IMAGE_NAME
- ducker push $DOCKER_HUB_LOGSTASH_IMAGE_NAME
needs: needs:
- job: test-screen-integration - job: test-screen-integration
- job: test-browser-integration - job: test-browser-integration
@ -337,6 +388,13 @@ deploy-prod:
- docker login -u $DOCKER_HUB_USER --password $DOCKER_HUB_PASS - docker login -u $DOCKER_HUB_USER --password $DOCKER_HUB_PASS
- docker compose -f observability/docker-compose.yml --env-file $ENV_DEV_FILE stop
- docker compose -f observability/docker-compose.yml -f observability/elk/extensions/curator/curator-compose.yml -f observability/elk/extensions/heartbeat/heartbeat-compose.yml --env-file $ENV_DEV_FILE down
- docker compose -f observability/docker-compose.yml --env-file $ENV_DEV_FILE rm -f
- docker compose -f observability/docker-compose.yml --env-file $ENV_DEV_FILE pull
- docker compose -f observability/docker-compose.yml --env-file $ENV_DEV_FILE --profile setup up -d
- docker compose -f observability/docker-compose.yml -f observability/elk/extensions/curator/curator-compose.yml -f observability/elk/extensions/heartbeat/heartbeat-compose.yml --env-file $ENV_DEV_FILE up -d
- export API_IMAGE=$DOCKER_HUB_FLIGHT_INFO_IMAGE - export API_IMAGE=$DOCKER_HUB_FLIGHT_INFO_IMAGE
- docker compose -f flights-domain/docker-compose.yml --env-file $ENV_PROD_FILE stop - docker compose -f flights-domain/docker-compose.yml --env-file $ENV_PROD_FILE stop
- docker compose -f flights-domain/docker-compose.yml --env-file $ENV_PROD_FILE rm -f - docker compose -f flights-domain/docker-compose.yml --env-file $ENV_PROD_FILE rm -f
@ -370,4 +428,5 @@ deploy-prod:
needs: needs:
- job: deliver-dockerhub - job: deliver-dockerhub
- job: preparation - job: preparation
- job: build-and-run-elk
artifacts: true artifacts: true

View File

@ -8,7 +8,8 @@ services:
ports: ports:
- 5000:5002 - 5000:5002
healthcheck: healthcheck:
test: ["CMD", "nc", "-vz", "-w1", "localhost", "5002"] # test: ["CMD", "nc", "-vz", "-w1", "localhost", "5002"]
test: ["CMD", "curl", "-f", "fids_logstash:9600/_node/pipelines"]
interval: 2s interval: 2s
timeout: 2s timeout: 2s
retries: 5 retries: 5
@ -21,6 +22,11 @@ services:
- auth - auth
- flights - flights
- gateway - gateway
- elk
# logging:
# driver: gelf
# options:
# gelf-address: "udp://fids_logstash:12201"
networks: networks:
auth: auth:
@ -29,5 +35,8 @@ networks:
flights: flights:
name: flights-domain_flights name: flights-domain_flights
external: true external: true
elk:
name: observability_elk
external: true
gateway: gateway:
driver: bridge driver: bridge

View File

@ -3,4 +3,5 @@ fastapi[all]==0.103.2
pyjwt==2.6.0 pyjwt==2.6.0
gunicorn==20.1.0 gunicorn==20.1.0
requests==2.31.0 requests==2.31.0
aiohttp==3.8.6 aiohttp==3.8.6
graypy

5
gateway/sleep.sh Normal file
View File

@ -0,0 +1,5 @@
#!/bin/sh
while ! curl -f 'fids_logstash:9600/_node/pipelines'; do
sleep 1
done

View File

@ -1,3 +1,4 @@
API_USERS = "http://fids_usermanager_api:5000/users" API_USERS = "http://fids_usermanager_api:5000/users"
API_FLIGHTS = "http://fids_flights_api:5000/flights" API_FLIGHTS = "http://fids_flights_api:5000/flights"
API_AUTH = "http://fids_usermanager_api:5000/auth" API_AUTH = "http://fids_usermanager_api:5000/auth"
LOGS_UPD = "udp://fids_logstash:12201"

View File

@ -3,6 +3,18 @@ from fastapi.middleware.cors import CORSMiddleware
from src.api.routes import auth, flights, health, users from src.api.routes import auth, flights, health, users
import logging
import graypy
from src.api.config import LOGS_UPD
my_logger = logging.getLogger('test_logger')
my_logger.setLevel(logging.DEBUG)
handler = graypy.GELFUDPHandler("fids_logstash", 12201)
my_logger.addHandler(handler)
# my_logger.debug('Hello Graylog.')
app = FastAPI(title="Flights Information API") app = FastAPI(title="Flights Information API")
app.include_router(flights.router, prefix="/flights") app.include_router(flights.router, prefix="/flights")
app.include_router(health.router, prefix="/health") app.include_router(health.router, prefix="/health")

View File

@ -1,8 +1,17 @@
from fastapi import APIRouter from fastapi import APIRouter
# from src.api.main import my_logger
import logging
my_logger = logging.getLogger('test_logger')
# my_logger.setLevel(logging.DEBUG)
# handler = graypy.GELFUDPHandler("fids_logstash", 12201)
# my_logger.addHandler(handler)
router = APIRouter() router = APIRouter()
@router.get("", status_code=200) @router.get("", status_code=200)
async def get_health(): async def get_health():
my_logger.debug("{\"health\":\"OK\"}")
return {"status": "OK"} return {"status": "OK"}

View File

@ -0,0 +1,9 @@
ELASTIC_VERSION=8.7.1
ELASTIC_PASSWORD='papanata'
LOGSTASH_INTERNAL_PASSWORD='papanata'
KIBANA_SYSTEM_PASSWORD='papanata'
METRICBEAT_INTERNAL_PASSWORD=''
FILEBEAT_INTERNAL_PASSWORD=''
HEARTBEAT_INTERNAL_PASSWORD='papanata'
MONITORING_INTERNAL_PASSWORD=''
BEATS_SYSTEM_PASSWORD='papanata'

2
observability/.gitattributes vendored Normal file
View File

@ -0,0 +1,2 @@
# Declare files that will always have LF line endings on checkout.
*.sh text eol=lf

View File

@ -0,0 +1,91 @@
version: '3.9'
services:
setup:
profiles:
- setup
image: ${ELK_SETUP_IMAGE}
init: true
volumes:
- ./elk/setup/entrypoint.sh:/entrypoint.sh:ro,Z
- ./elk/setup/lib.sh:/lib.sh:ro,Z
- ./elk/setup/roles:/roles:ro,Z
environment:
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
METRICBEAT_INTERNAL_PASSWORD: ${METRICBEAT_INTERNAL_PASSWORD:-}
FILEBEAT_INTERNAL_PASSWORD: ${FILEBEAT_INTERNAL_PASSWORD:-}
HEARTBEAT_INTERNAL_PASSWORD: ${HEARTBEAT_INTERNAL_PASSWORD:-}
MONITORING_INTERNAL_PASSWORD: ${MONITORING_INTERNAL_PASSWORD:-}
BEATS_SYSTEM_PASSWORD: ${BEATS_SYSTEM_PASSWORD:-}
networks:
- elk
depends_on:
- elasticsearch
elasticsearch:
image: ${ELASTICSEARCH_IMAGE}
volumes:
- ./elk/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro,Z
- elasticsearch:/usr/share/elasticsearch/data:Z
ports:
- 9200:9200
- 9300:9300
environment:
node.name: elasticsearch
ES_JAVA_OPTS: -Xms512m -Xmx512m
# Bootstrap password.
# Used to initialize the keystore during the initial startup of
# Elasticsearch. Ignored on subsequent runs.
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
# Use single node discovery in order to disable production mode and avoid bootstrap checks.
# see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
discovery.type: single-node
networks:
- elk
restart: unless-stopped
logstash:
container_name: fids_logstash
image: ${LOGSTASH_IMAGE}
volumes:
- ./elk/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z
- ./elk/logstash/pipeline:/usr/share/logstash/pipeline:ro,Z
ports:
- 5044:5044
- 50000:50000/tcp
- 50000:50000/udp
- 12201:12201/tcp
- 12201:12201/udp
- 9600:9600
environment:
LS_JAVA_OPTS: -Xms256m -Xmx256m
LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
networks:
- elk
depends_on:
- elasticsearch
restart: unless-stopped
kibana:
image: ${KIBANA_IMAGE}
volumes:
- ./elk/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro,Z
ports:
- 5601:5601
environment:
KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
networks:
- elk
depends_on:
- elasticsearch
restart: unless-stopped
networks:
elk:
driver: bridge
volumes:
elasticsearch:

View File

@ -0,0 +1,7 @@
# Cambio
Dentro de kibana:
Stack Managment > Roles > logstash_writer
Agregar los permisos que se deseen a los indices que se van a usar, en el caso del codigo de ejemplo, los logs tienen el formato `trips_*`.

21
observability/elk/LICENSE Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Anthony Lapenna
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

503
observability/elk/README.md Normal file
View File

@ -0,0 +1,503 @@
REPO ORIGINAL:
https://github.com/deviantony/docker-elk
# Elastic stack (ELK) on Docker
[![Elastic Stack version](https://img.shields.io/badge/Elastic%20Stack-8.7.1-00bfb3?style=flat&logo=elastic-stack)](https://www.elastic.co/blog/category/releases)
[![Build Status](https://github.com/deviantony/docker-elk/workflows/CI/badge.svg?branch=main)](https://github.com/deviantony/docker-elk/actions?query=workflow%3ACI+branch%3Amain)
[![Join the chat](https://badges.gitter.im/Join%20Chat.svg)](https://app.gitter.im/#/room/#deviantony_docker-elk:gitter.im)
Run the latest version of the [Elastic stack][elk-stack] with Docker and Docker Compose.
It gives you the ability to analyze any data set by using the searching/aggregation capabilities of Elasticsearch and
the visualization power of Kibana.
Based on the [official Docker images][elastic-docker] from Elastic:
* [Elasticsearch](https://github.com/elastic/elasticsearch/tree/main/distribution/docker)
* [Logstash](https://github.com/elastic/logstash/tree/main/docker)
* [Kibana](https://github.com/elastic/kibana/tree/main/src/dev/build/tasks/os_packages/docker_generator)
Other available stack variants:
* [`tls`](https://github.com/deviantony/docker-elk/tree/tls): TLS encryption enabled in Elasticsearch, Kibana (opt in),
and Fleet
* [`searchguard`](https://github.com/deviantony/docker-elk/tree/searchguard): Search Guard support
> **Note**
> [Platinum][subscriptions] features are enabled by default for a [trial][license-mngmt] duration of **30 days**. After
> this evaluation period, you will retain access to all the free features included in the Open Basic license seamlessly,
> without manual intervention required, and without losing any data. Refer to the [How to disable paid
> features](#how-to-disable-paid-features) section to opt out of this behaviour.
---
## tl;dr
```sh
docker-compose up setup
```
```sh
docker-compose up
```
![Animated demo](https://user-images.githubusercontent.com/3299086/155972072-0c89d6db-707a-47a1-818b-5f976565f95a.gif)
---
## Philosophy
We aim at providing the simplest possible entry into the Elastic stack for anybody who feels like experimenting with
this powerful combo of technologies. This project's default configuration is purposely minimal and unopinionated. It
does not rely on any external dependency, and uses as little custom automation as necessary to get things up and
running.
Instead, we believe in good documentation so that you can use this repository as a template, tweak it, and make it _your
own_. [sherifabdlnaby/elastdocker][elastdocker] is one example among others of project that builds upon this idea.
---
## Contents
- [Elastic stack (ELK) on Docker](#elastic-stack-elk-on-docker)
- [tl;dr](#tldr)
- [Philosophy](#philosophy)
- [Contents](#contents)
- [Requirements](#requirements)
- [Host setup](#host-setup)
- [Docker Desktop](#docker-desktop)
- [Windows](#windows)
- [macOS](#macos)
- [Usage](#usage)
- [Bringing up the stack](#bringing-up-the-stack)
- [Initial setup](#initial-setup)
- [Setting up user authentication](#setting-up-user-authentication)
- [Injecting data](#injecting-data)
- [Cleanup](#cleanup)
- [Version selection](#version-selection)
- [Configuration](#configuration)
- [How to configure Elasticsearch](#how-to-configure-elasticsearch)
- [How to configure Kibana](#how-to-configure-kibana)
- [How to configure Logstash](#how-to-configure-logstash)
- [How to disable paid features](#how-to-disable-paid-features)
- [How to scale out the Elasticsearch cluster](#how-to-scale-out-the-elasticsearch-cluster)
- [How to re-execute the setup](#how-to-re-execute-the-setup)
- [How to reset a password programmatically](#how-to-reset-a-password-programmatically)
- [Extensibility](#extensibility)
- [How to add plugins](#how-to-add-plugins)
- [How to enable the provided extensions](#how-to-enable-the-provided-extensions)
- [JVM tuning](#jvm-tuning)
- [How to specify the amount of memory used by a service](#how-to-specify-the-amount-of-memory-used-by-a-service)
- [How to enable a remote JMX connection to a service](#how-to-enable-a-remote-jmx-connection-to-a-service)
- [Going further](#going-further)
- [Plugins and integrations](#plugins-and-integrations)
## Requirements
### Host setup
* [Docker Engine][docker-install] version **18.06.0** or newer
* [Docker Compose][compose-install] version **1.28.0** or newer (including [Compose V2][compose-v2])
* 1.5 GB of RAM
> **Note**
> Especially on Linux, make sure your user has the [required permissions][linux-postinstall] to interact with the Docker
> daemon.
By default, the stack exposes the following ports:
* 5044: Logstash Beats input
* 50000: Logstash TCP input
* 9600: Logstash monitoring API
* 9200: Elasticsearch HTTP
* 9300: Elasticsearch TCP transport
* 5601: Kibana
> **Warning**
> Elasticsearch's [bootstrap checks][bootstrap-checks] were purposely disabled to facilitate the setup of the Elastic
> stack in development environments. For production setups, we recommend users to set up their host according to the
> instructions from the Elasticsearch documentation: [Important System Configuration][es-sys-config].
### Docker Desktop
#### Windows
If you are using the legacy Hyper-V mode of _Docker Desktop for Windows_, ensure [File Sharing][win-filesharing] is
enabled for the `C:` drive.
#### macOS
The default configuration of _Docker Desktop for Mac_ allows mounting files from `/Users/`, `/Volume/`, `/private/`,
`/tmp` and `/var/folders` exclusively. Make sure the repository is cloned in one of those locations or follow the
instructions from the [documentation][mac-filesharing] to add more locations.
## Usage
> **Warning**
> You must rebuild the stack images with `docker-compose build` whenever you switch branch or update the
> [version](#version-selection) of an already existing stack.
### Bringing up the stack
Clone this repository onto the Docker host that will run the stack with the command below:
```sh
git clone https://github.com/deviantony/docker-elk.git
```
Then, initialize the Elasticsearch users and groups required by docker-elk by executing the command:
```sh
docker-compose up setup
```
If everything went well and the setup completed without error, start the other stack components:
```sh
docker-compose up
```
> **Note**
> You can also run all services in the background (detached mode) by appending the `-d` flag to the above command.
Give Kibana about a minute to initialize, then access the Kibana web UI by opening <http://localhost:5601> in a web
browser and use the following (default) credentials to log in:
* user: *elastic*
* password: *changeme*
> **Note**
> Upon the initial startup, the `elastic`, `logstash_internal` and `kibana_system` Elasticsearch users are intialized
> with the values of the passwords defined in the [`.env`](.env) file (_"changeme"_ by default). The first one is the
> [built-in superuser][builtin-users], the other two are used by Kibana and Logstash respectively to communicate with
> Elasticsearch. This task is only performed during the _initial_ startup of the stack. To change users' passwords
> _after_ they have been initialized, please refer to the instructions in the next section.
### Initial setup
#### Setting up user authentication
> **Note**
> Refer to [Security settings in Elasticsearch][es-security] to disable authentication.
> **Warning**
> Starting with Elastic v8.0.0, it is no longer possible to run Kibana using the bootstraped privileged `elastic` user.
The _"changeme"_ password set by default for all aforementioned users is **unsecure**. For increased security, we will
reset the passwords of all aforementioned Elasticsearch users to random secrets.
1. Reset passwords for default users
The commands below reset the passwords of the `elastic`, `logstash_internal` and `kibana_system` users. Take note
of them.
```sh
docker-compose exec elasticsearch bin/elasticsearch-reset-password --batch --user elastic
```
```sh
docker-compose exec elasticsearch bin/elasticsearch-reset-password --batch --user logstash_internal
```
```sh
docker-compose exec elasticsearch bin/elasticsearch-reset-password --batch --user kibana_system
```
If the need for it arises (e.g. if you want to [collect monitoring information][ls-monitoring] through Beats and
other components), feel free to repeat this operation at any time for the rest of the [built-in
users][builtin-users].
1. Replace usernames and passwords in configuration files
Replace the password of the `elastic` user inside the `.env` file with the password generated in the previous step.
Its value isn't used by any core component, but [extensions](#how-to-enable-the-provided-extensions) use it to
connect to Elasticsearch.
> **Note**
> In case you don't plan on using any of the provided [extensions](#how-to-enable-the-provided-extensions), or
> prefer to create your own roles and users to authenticate these services, it is safe to remove the
> `ELASTIC_PASSWORD` entry from the `.env` file altogether after the stack has been initialized.
Replace the password of the `logstash_internal` user inside the `.env` file with the password generated in the
previous step. Its value is referenced inside the Logstash pipeline file (`logstash/pipeline/logstash.conf`).
Replace the password of the `kibana_system` user inside the `.env` file with the password generated in the previous
step. Its value is referenced inside the Kibana configuration file (`kibana/config/kibana.yml`).
See the [Configuration](#configuration) section below for more information about these configuration files.
1. Restart Logstash and Kibana to re-connect to Elasticsearch using the new passwords
```sh
docker-compose up -d logstash kibana
```
> **Note**
> Learn more about the security of the Elastic stack at [Secure the Elastic Stack][sec-cluster].
#### Injecting data
Launch the Kibana web UI by opening <http://localhost:5601> in a web browser, and use the following credentials to log
in:
* user: *elastic*
* password: *\<your generated elastic password>*
Now that the stack is fully configured, you can go ahead and inject some log entries.
The shipped Logstash configuration allows you to send data over the TCP port 50000. For example, you can use one of the
following commands — depending on your installed version of `nc` (Netcat) — to ingest the content of the log file
`/path/to/logfile.log` in Elasticsearch, via Logstash:
```sh
# Execute `nc -h` to determine your `nc` version
cat /path/to/logfile.log | nc -q0 localhost 50000 # BSD
cat /path/to/logfile.log | nc -c localhost 50000 # GNU
cat /path/to/logfile.log | nc --send-only localhost 50000 # nmap
```
You can also load the sample data provided by your Kibana installation.
### Cleanup
Elasticsearch data is persisted inside a volume by default.
In order to entirely shutdown the stack and remove all persisted data, use the following Docker Compose command:
```sh
docker-compose down -v
```
### Version selection
This repository stays aligned with the latest version of the Elastic stack. The `main` branch tracks the current major
version (8.x).
To use a different version of the core Elastic components, simply change the version number inside the [`.env`](.env)
file. If you are upgrading an existing stack, remember to rebuild all container images using the `docker-compose build`
command.
> **Warning**
> Always pay attention to the [official upgrade instructions][upgrade] for each individual component before performing a
> stack upgrade.
Older major versions are also supported on separate branches:
* [`release-7.x`](https://github.com/deviantony/docker-elk/tree/release-7.x): 7.x series
* [`release-6.x`](https://github.com/deviantony/docker-elk/tree/release-6.x): 6.x series (End-of-life)
* [`release-5.x`](https://github.com/deviantony/docker-elk/tree/release-5.x): 5.x series (End-of-life)
## Configuration
> **Note**
> Configuration is not dynamically reloaded, you will need to restart individual components after any configuration
> change.
### How to configure Elasticsearch
The Elasticsearch configuration is stored in [`elasticsearch/config/elasticsearch.yml`][config-es].
You can also specify the options you want to override by setting environment variables inside the Compose file:
```yml
elasticsearch:
environment:
network.host: _non_loopback_
cluster.name: my-cluster
```
Please refer to the following documentation page for more details about how to configure Elasticsearch inside Docker
containers: [Install Elasticsearch with Docker][es-docker].
### How to configure Kibana
The Kibana default configuration is stored in [`kibana/config/kibana.yml`][config-kbn].
You can also specify the options you want to override by setting environment variables inside the Compose file:
```yml
kibana:
environment:
SERVER_NAME: kibana.example.org
```
Please refer to the following documentation page for more details about how to configure Kibana inside Docker
containers: [Install Kibana with Docker][kbn-docker].
### How to configure Logstash
The Logstash configuration is stored in [`logstash/config/logstash.yml`][config-ls].
You can also specify the options you want to override by setting environment variables inside the Compose file:
```yml
logstash:
environment:
LOG_LEVEL: debug
```
Please refer to the following documentation page for more details about how to configure Logstash inside Docker
containers: [Configuring Logstash for Docker][ls-docker].
### How to disable paid features
Switch the value of Elasticsearch's `xpack.license.self_generated.type` setting from `trial` to `basic` (see [License
settings][license-settings]).
You can also cancel an ongoing trial before its expiry date — and thus revert to a basic license — either from the
[License Management][license-mngmt] panel of Kibana, or using Elasticsearch's [Licensing APIs][license-apis].
### How to scale out the Elasticsearch cluster
Follow the instructions from the Wiki: [Scaling out Elasticsearch](https://github.com/deviantony/docker-elk/wiki/Elasticsearch-cluster)
### How to re-execute the setup
To run the setup container again and re-initialize all users for which a password was defined inside the `.env` file,
simply "up" the `setup` Compose service again:
```console
$ docker-compose up setup
⠿ Container docker-elk-elasticsearch-1 Running
⠿ Container docker-elk-setup-1 Created
Attaching to docker-elk-setup-1
...
docker-elk-setup-1 | [+] User 'monitoring_internal'
docker-elk-setup-1 | ⠿ User does not exist, creating
docker-elk-setup-1 | [+] User 'beats_system'
docker-elk-setup-1 | ⠿ User exists, setting password
docker-elk-setup-1 exited with code 0
```
### How to reset a password programmatically
If for any reason your are unable to use Kibana to change the password of your users (including [built-in
users][builtin-users]), you can use the Elasticsearch API instead and achieve the same result.
In the example below, we reset the password of the `elastic` user (notice "/user/elastic" in the URL):
```sh
curl -XPOST -D- 'http://localhost:9200/_security/user/elastic/_password' \
-H 'Content-Type: application/json' \
-u elastic:<your current elastic password> \
-d '{"password" : "<your new password>"}'
```
## Extensibility
### How to add plugins
To add plugins to any ELK component you have to:
1. Add a `RUN` statement to the corresponding `Dockerfile` (eg. `RUN logstash-plugin install logstash-filter-json`)
1. Add the associated plugin code configuration to the service configuration (eg. Logstash input/output)
1. Rebuild the images using the `docker-compose build` command
### How to enable the provided extensions
A few extensions are available inside the [`extensions`](extensions) directory. These extensions provide features which
are not part of the standard Elastic stack, but can be used to enrich it with extra integrations.
The documentation for these extensions is provided inside each individual subdirectory, on a per-extension basis. Some
of them require manual changes to the default ELK configuration.
## JVM tuning
### How to specify the amount of memory used by a service
The startup scripts for Elasticsearch and Logstash can append extra JVM options from the value of an environment
variable, allowing the user to adjust the amount of memory that can be used by each component:
| Service | Environment variable |
|---------------|----------------------|
| Elasticsearch | ES_JAVA_OPTS |
| Logstash | LS_JAVA_OPTS |
To accomodate environments where memory is scarce (Docker Desktop for Mac has only 2 GB available by default), the Heap
Size allocation is capped by default in the `docker-compose.yml` file to 512 MB for Elasticsearch and 256 MB for
Logstash. If you want to override the default JVM configuration, edit the matching environment variable(s) in the
`docker-compose.yml` file.
For example, to increase the maximum JVM Heap Size for Logstash:
```yml
logstash:
environment:
LS_JAVA_OPTS: -Xms1g -Xmx1g
```
When these options are not set:
* Elasticsearch starts with a JVM Heap Size that is [determined automatically][es-heap].
* Logstash starts with a fixed JVM Heap Size of 1 GB.
### How to enable a remote JMX connection to a service
As for the Java Heap memory (see above), you can specify JVM options to enable JMX and map the JMX port on the Docker
host.
Update the `{ES,LS}_JAVA_OPTS` environment variable with the following content (I've mapped the JMX service on the port
18080, you can change that). Do not forget to update the `-Djava.rmi.server.hostname` option with the IP address of your
Docker host (replace **DOCKER_HOST_IP**):
```yml
logstash:
environment:
LS_JAVA_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=18080 -Dcom.sun.management.jmxremote.rmi.port=18080 -Djava.rmi.server.hostname=DOCKER_HOST_IP -Dcom.sun.management.jmxremote.local.only=false
```
## Going further
### Plugins and integrations
See the following Wiki pages:
* [External applications](https://github.com/deviantony/docker-elk/wiki/External-applications)
* [Popular integrations](https://github.com/deviantony/docker-elk/wiki/Popular-integrations)
[elk-stack]: https://www.elastic.co/what-is/elk-stack
[elastic-docker]: https://www.docker.elastic.co/
[subscriptions]: https://www.elastic.co/subscriptions
[es-security]: https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html
[license-settings]: https://www.elastic.co/guide/en/elasticsearch/reference/current/license-settings.html
[license-mngmt]: https://www.elastic.co/guide/en/kibana/current/managing-licenses.html
[license-apis]: https://www.elastic.co/guide/en/elasticsearch/reference/current/licensing-apis.html
[elastdocker]: https://github.com/sherifabdlnaby/elastdocker
[docker-install]: https://docs.docker.com/get-docker/
[compose-install]: https://docs.docker.com/compose/install/
[compose-v2]: https://docs.docker.com/compose/compose-v2/
[linux-postinstall]: https://docs.docker.com/engine/install/linux-postinstall/
[bootstrap-checks]: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
[es-sys-config]: https://www.elastic.co/guide/en/elasticsearch/reference/current/system-config.html
[es-heap]: https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#heap-size-settings
[win-filesharing]: https://docs.docker.com/desktop/settings/windows/#file-sharing
[mac-filesharing]: https://docs.docker.com/desktop/settings/mac/#file-sharing
[builtin-users]: https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html
[ls-monitoring]: https://www.elastic.co/guide/en/logstash/current/monitoring-with-metricbeat.html
[sec-cluster]: https://www.elastic.co/guide/en/elasticsearch/reference/current/secure-cluster.html
[connect-kibana]: https://www.elastic.co/guide/en/kibana/current/connect-to-elasticsearch.html
[index-pattern]: https://www.elastic.co/guide/en/kibana/current/index-patterns.html
[config-es]: ./elasticsearch/config/elasticsearch.yml
[config-kbn]: ./kibana/config/kibana.yml
[config-ls]: ./logstash/config/logstash.yml
[es-docker]: https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html
[kbn-docker]: https://www.elastic.co/guide/en/kibana/current/docker.html
[ls-docker]: https://www.elastic.co/guide/en/logstash/current/docker-config.html
[upgrade]: https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html

View File

@ -0,0 +1,6 @@
# Ignore Docker build files
Dockerfile
.dockerignore
# Ignore OS artifacts
**/.DS_Store

View File

@ -0,0 +1,7 @@
ARG ELASTIC_VERSION
# https://www.docker.elastic.co/
FROM docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION}
# Add your elasticsearch plugins setup here
# Example: RUN elasticsearch-plugin install analysis-icu

View File

@ -0,0 +1,13 @@
---
## Default Elasticsearch configuration from Elasticsearch base image.
## https://github.com/elastic/elasticsearch/blob/main/distribution/docker/src/docker/config/elasticsearch.yml
#
cluster.name: docker-cluster
network.host: 0.0.0.0
## X-Pack settings
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html
#
xpack.license.self_generated.type: trial
xpack.security.enabled: true
cluster.routing.allocation.disk.threshold_enabled: false

View File

@ -0,0 +1,78 @@
[2023-10-29T18:17:12.012+00:00][INFO ][node] Kibana process configured with roles: [background_tasks, ui]
[2023-10-29T18:17:35.228+00:00][INFO ][plugins-service] Plugin "cloudChat" is disabled.
[2023-10-29T18:17:35.229+00:00][INFO ][plugins-service] Plugin "cloudExperiments" is disabled.
[2023-10-29T18:17:35.229+00:00][INFO ][plugins-service] Plugin "cloudFullStory" is disabled.
[2023-10-29T18:17:35.230+00:00][INFO ][plugins-service] Plugin "cloudGainsight" is disabled.
[2023-10-29T18:17:35.253+00:00][INFO ][plugins-service] Plugin "profiling" is disabled.
[2023-10-29T18:17:35.474+00:00][INFO ][http.server.Preboot] http server running at http://0.0.0.0:5601
[2023-10-29T18:17:35.551+00:00][INFO ][plugins-system.preboot] Setting up [1] plugins: [interactiveSetup]
[2023-10-29T18:17:35.653+00:00][WARN ][config.deprecation] The default mechanism for Reporting privileges will work differently in future versions, which will affect the behavior of this cluster. Set "xpack.reporting.roles.enabled" to "false" to adopt the future behavior before upgrading.
[2023-10-29T18:17:36.228+00:00][INFO ][plugins-system.standard] Setting up [132] plugins: [translations,monitoringCollection,licensing,globalSearch,globalSearchProviders,features,mapsEms,licenseApiGuard,customBranding,usageCollection,taskManager,cloud,guidedOnboarding,telemetryCollectionManager,telemetryCollectionXpack,kibanaUsageCollection,share,screenshotMode,banners,newsfeed,ftrApis,fieldFormats,expressions,screenshotting,dataViews,charts,esUiShared,customIntegrations,home,searchprofiler,painlessLab,grokdebugger,management,cloudDataMigration,advancedSettings,spaces,security,snapshotRestore,lists,encryptedSavedObjects,telemetry,licenseManagement,files,eventLog,actions,notifications,console,contentManagement,bfetch,data,watcher,fileUpload,ingestPipelines,ecsDataQualityDashboard,alerting,unifiedSearch,unifiedFieldList,savedSearch,savedObjects,graph,savedObjectsTagging,savedObjectsManagement,eventAnnotation,embeddable,reporting,uiActionsEnhanced,presentationUtil,expressionShape,expressionRevealImage,expressionRepeatImage,expressionMetric,expressionImage,controls,dataViewFieldEditor,triggersActionsUi,transform,stackConnectors,stackAlerts,ruleRegistry,visualizations,canvas,visTypeXy,visTypeVislib,visTypeVega,visTypeTimeseries,visTypeTimelion,visTypeTagcloud,visTypeTable,visTypeMetric,visTypeHeatmap,visTypeMarkdown,dashboard,dashboardEnhanced,expressionXY,expressionTagcloud,expressionPartitionVis,visTypePie,expressionMetricVis,expressionLegacyMetricVis,expressionHeatmap,expressionGauge,lens,maps,cases,timelines,sessionView,kubernetesSecurity,threatIntelligence,aiops,discover,observability,fleet,osquery,indexManagement,rollup,remoteClusters,crossClusterReplication,indexLifecycleManagement,cloudSecurityPosture,discoverEnhanced,dataVisualizer,ml,synthetics,securitySolution,infra,upgradeAssistant,monitoring,logstash,enterpriseSearch,apm,visTypeGauge,dataViewManagement]
[2023-10-29T18:17:36.265+00:00][INFO ][custom-branding-service] CustomBrandingService registering plugin: customBranding
[2023-10-29T18:17:36.274+00:00][INFO ][plugins.taskManager] TaskManager is identified by the Kibana UUID: 1ca569ff-ec62-4279-a960-51761699b682
[2023-10-29T18:17:36.538+00:00][WARN ][plugins.security.config] Generating a random key for xpack.security.encryptionKey. To prevent sessions from being invalidated on restart, please set xpack.security.encryptionKey in the kibana.yml or use the bin/kibana-encryption-keys command.
[2023-10-29T18:17:36.539+00:00][WARN ][plugins.security.config] Session cookies will be transmitted over insecure connections. This is not recommended.
[2023-10-29T18:17:36.616+00:00][WARN ][plugins.security.config] Generating a random key for xpack.security.encryptionKey. To prevent sessions from being invalidated on restart, please set xpack.security.encryptionKey in the kibana.yml or use the bin/kibana-encryption-keys command.
[2023-10-29T18:17:36.617+00:00][WARN ][plugins.security.config] Session cookies will be transmitted over insecure connections. This is not recommended.
[2023-10-29T18:17:36.637+00:00][WARN ][plugins.encryptedSavedObjects] Saved objects encryption key is not set. This will severely limit Kibana functionality. Please set xpack.encryptedSavedObjects.encryptionKey in the kibana.yml or use the bin/kibana-encryption-keys command.
[2023-10-29T18:17:36.663+00:00][WARN ][plugins.actions] APIs are disabled because the Encrypted Saved Objects plugin is missing encryption key. Please set xpack.encryptedSavedObjects.encryptionKey in the kibana.yml or use the bin/kibana-encryption-keys command.
[2023-10-29T18:17:36.676+00:00][INFO ][plugins.notifications] Email Service Error: Email connector not specified.
[2023-10-29T18:17:36.892+00:00][WARN ][plugins.alerting] APIs are disabled because the Encrypted Saved Objects plugin is missing encryption key. Please set xpack.encryptedSavedObjects.encryptionKey in the kibana.yml or use the bin/kibana-encryption-keys command.
[2023-10-29T18:17:36.991+00:00][WARN ][plugins.reporting.config] Generating a random key for xpack.reporting.encryptionKey. To prevent sessions from being invalidated on restart, please set xpack.reporting.encryptionKey in the kibana.yml or use the bin/kibana-encryption-keys command.
[2023-10-29T18:17:36.993+00:00][WARN ][plugins.reporting.config] Found 'server.host: "0.0.0.0"' in Kibana configuration. Reporting is not able to use this as the Kibana server hostname. To enable PNG/PDF Reporting to work, 'xpack.reporting.kibanaServer.hostname: localhost' is automatically set in the configuration. You can prevent this message by adding 'xpack.reporting.kibanaServer.hostname: localhost' in kibana.yml.
[2023-10-29T18:17:37.055+00:00][INFO ][plugins.ruleRegistry] Installing common resources shared between all indices
[2023-10-29T18:17:37.676+00:00][INFO ][plugins.cloudSecurityPosture] Registered task successfully [Task: cloud_security_posture-stats_task]
[2023-10-29T18:17:38.770+00:00][INFO ][plugins.screenshotting.config] Chromium sandbox provides an additional layer of protection, and is supported for Linux Ubuntu 20.04 OS. Automatically enabling Chromium sandbox.
[2023-10-29T18:17:38.866+00:00][ERROR][elasticsearch-service] Unable to retrieve version information from Elasticsearch nodes. connect ECONNREFUSED 172.23.0.2:9200
[2023-10-29T18:17:40.829+00:00][INFO ][plugins.screenshotting.chromium] Browser executable: /usr/share/kibana/x-pack/plugins/screenshotting/chromium/headless_shell-linux_x64/headless_shell
[2023-10-29T18:17:56.356+00:00][ERROR][elasticsearch-service] Unable to retrieve version information from Elasticsearch nodes. security_exception
Root causes:
security_exception: unable to authenticate user [kibana_system] for REST request [/_nodes?filter_path=nodes.*.version%2Cnodes.*.http.publish_address%2Cnodes.*.ip]
[2023-10-29T18:17:59.299+00:00][INFO ][savedobjects-service] Waiting until all Elasticsearch nodes are compatible with Kibana before starting saved objects migrations...
[2023-10-29T18:17:59.300+00:00][INFO ][savedobjects-service] Starting saved objects migrations
[2023-10-29T18:17:59.408+00:00][INFO ][savedobjects-service] [.kibana_task_manager] INIT -> OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT. took: 58ms.
[2023-10-29T18:17:59.420+00:00][INFO ][savedobjects-service] [.kibana] INIT -> OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT. took: 81ms.
[2023-10-29T18:18:00.162+00:00][INFO ][savedobjects-service] [.kibana] OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT -> OUTDATED_DOCUMENTS_SEARCH_READ. took: 742ms.
[2023-10-29T18:18:00.167+00:00][INFO ][savedobjects-service] [.kibana_task_manager] OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT -> OUTDATED_DOCUMENTS_SEARCH_READ. took: 759ms.
[2023-10-29T18:18:00.271+00:00][INFO ][savedobjects-service] [.kibana_task_manager] OUTDATED_DOCUMENTS_SEARCH_READ -> OUTDATED_DOCUMENTS_SEARCH_CLOSE_PIT. took: 104ms.
[2023-10-29T18:18:00.286+00:00][INFO ][savedobjects-service] [.kibana_task_manager] OUTDATED_DOCUMENTS_SEARCH_CLOSE_PIT -> CHECK_TARGET_MAPPINGS. took: 15ms.
[2023-10-29T18:18:00.287+00:00][INFO ][savedobjects-service] [.kibana_task_manager] CHECK_TARGET_MAPPINGS -> CHECK_VERSION_INDEX_READY_ACTIONS. took: 1ms.
[2023-10-29T18:18:00.288+00:00][INFO ][savedobjects-service] [.kibana_task_manager] CHECK_VERSION_INDEX_READY_ACTIONS -> DONE. took: 1ms.
[2023-10-29T18:18:00.288+00:00][INFO ][savedobjects-service] [.kibana_task_manager] Migration completed after 938ms
[2023-10-29T18:18:00.297+00:00][INFO ][savedobjects-service] [.kibana] OUTDATED_DOCUMENTS_SEARCH_READ -> OUTDATED_DOCUMENTS_SEARCH_CLOSE_PIT. took: 135ms.
[2023-10-29T18:18:00.302+00:00][INFO ][savedobjects-service] [.kibana] OUTDATED_DOCUMENTS_SEARCH_CLOSE_PIT -> CHECK_TARGET_MAPPINGS. took: 5ms.
[2023-10-29T18:18:00.303+00:00][INFO ][savedobjects-service] [.kibana] CHECK_TARGET_MAPPINGS -> CHECK_VERSION_INDEX_READY_ACTIONS. took: 1ms.
[2023-10-29T18:18:00.303+00:00][INFO ][savedobjects-service] [.kibana] CHECK_VERSION_INDEX_READY_ACTIONS -> DONE. took: 0ms.
[2023-10-29T18:18:00.303+00:00][INFO ][savedobjects-service] [.kibana] Migration completed after 964ms
[2023-10-29T18:18:00.317+00:00][INFO ][plugins-system.preboot] Stopping all plugins.
[2023-10-29T18:18:00.319+00:00][INFO ][plugins-system.standard] Starting [132] plugins: [translations,monitoringCollection,licensing,globalSearch,globalSearchProviders,features,mapsEms,licenseApiGuard,customBranding,usageCollection,taskManager,cloud,guidedOnboarding,telemetryCollectionManager,telemetryCollectionXpack,kibanaUsageCollection,share,screenshotMode,banners,newsfeed,ftrApis,fieldFormats,expressions,screenshotting,dataViews,charts,esUiShared,customIntegrations,home,searchprofiler,painlessLab,grokdebugger,management,cloudDataMigration,advancedSettings,spaces,security,snapshotRestore,lists,encryptedSavedObjects,telemetry,licenseManagement,files,eventLog,actions,notifications,console,contentManagement,bfetch,data,watcher,fileUpload,ingestPipelines,ecsDataQualityDashboard,alerting,unifiedSearch,unifiedFieldList,savedSearch,savedObjects,graph,savedObjectsTagging,savedObjectsManagement,eventAnnotation,embeddable,reporting,uiActionsEnhanced,presentationUtil,expressionShape,expressionRevealImage,expressionRepeatImage,expressionMetric,expressionImage,controls,dataViewFieldEditor,triggersActionsUi,transform,stackConnectors,stackAlerts,ruleRegistry,visualizations,canvas,visTypeXy,visTypeVislib,visTypeVega,visTypeTimeseries,visTypeTimelion,visTypeTagcloud,visTypeTable,visTypeMetric,visTypeHeatmap,visTypeMarkdown,dashboard,dashboardEnhanced,expressionXY,expressionTagcloud,expressionPartitionVis,visTypePie,expressionMetricVis,expressionLegacyMetricVis,expressionHeatmap,expressionGauge,lens,maps,cases,timelines,sessionView,kubernetesSecurity,threatIntelligence,aiops,discover,observability,fleet,osquery,indexManagement,rollup,remoteClusters,crossClusterReplication,indexLifecycleManagement,cloudSecurityPosture,discoverEnhanced,dataVisualizer,ml,synthetics,securitySolution,infra,upgradeAssistant,monitoring,logstash,enterpriseSearch,apm,visTypeGauge,dataViewManagement]
[2023-10-29T18:18:01.896+00:00][INFO ][plugins.fleet] Task Fleet-Usage-Sender-1.1.0 scheduled with interval 1h
[2023-10-29T18:18:01.945+00:00][INFO ][plugins.monitoring.monitoring] config sourced from: production cluster
[2023-10-29T18:18:02.291+00:00][INFO ][plugins.monitoring.monitoring.kibana-monitoring] Starting monitoring stats collection
[2023-10-29T18:18:02.292+00:00][INFO ][plugins.fleet] Beginning fleet setup
[2023-10-29T18:18:02.302+00:00][INFO ][status] Kibana is now degraded
[2023-10-29T18:18:02.306+00:00][INFO ][plugins.fleet] Task Fleet-Usage-Logger-Task scheduled with interval 15m
[2023-10-29T18:18:02.330+00:00][INFO ][plugins.ruleRegistry] Installed common resources shared between all indices
[2023-10-29T18:18:02.331+00:00][INFO ][plugins.ruleRegistry] Installing resources for index .alerts-observability.uptime.alerts
[2023-10-29T18:18:02.332+00:00][INFO ][plugins.ruleRegistry] Installing resources for index .alerts-security.alerts
[2023-10-29T18:18:02.332+00:00][INFO ][plugins.ruleRegistry] Installing resources for index .preview.alerts-security.alerts
[2023-10-29T18:18:02.333+00:00][INFO ][plugins.ruleRegistry] Installing resources for index .alerts-observability.logs.alerts
[2023-10-29T18:18:02.333+00:00][INFO ][plugins.ruleRegistry] Installing resources for index .alerts-observability.metrics.alerts
[2023-10-29T18:18:02.334+00:00][INFO ][plugins.ruleRegistry] Installing resources for index .alerts-observability.apm.alerts
[2023-10-29T18:18:02.418+00:00][INFO ][plugins.ruleRegistry] Installed resources for index .alerts-security.alerts
[2023-10-29T18:18:02.500+00:00][INFO ][plugins.ruleRegistry] Installed resources for index .preview.alerts-security.alerts
[2023-10-29T18:18:02.697+00:00][INFO ][plugins.ruleRegistry] Installed resources for index .alerts-observability.uptime.alerts
[2023-10-29T18:18:02.707+00:00][INFO ][plugins.ruleRegistry] Installed resources for index .alerts-observability.metrics.alerts
[2023-10-29T18:18:02.708+00:00][INFO ][plugins.ruleRegistry] Installed resources for index .alerts-observability.apm.alerts
[2023-10-29T18:18:02.712+00:00][INFO ][plugins.ruleRegistry] Installed resources for index .alerts-observability.logs.alerts
[2023-10-29T18:18:03.444+00:00][INFO ][plugins.ml] Task ML:saved-objects-sync-task: scheduled with interval 1h
[2023-10-29T18:18:03.526+00:00][INFO ][plugins.fleet] Fleet setup completed
[2023-10-29T18:18:03.544+00:00][INFO ][plugins.securitySolution] Dependent plugin setup complete - Starting ManifestTask
[2023-10-29T18:18:03.689+00:00][INFO ][plugins.synthetics] Installed synthetics index templates
[2023-10-29T18:18:06.081+00:00][INFO ][plugins.fleet] Running Fleet Usage telemetry send task
[2023-10-29T18:18:06.381+00:00][INFO ][plugins.fleet] Fleet Usage: {"agents_enabled":true,"agents":{"total_enrolled":0,"healthy":0,"unhealthy":0,"offline":0,"inactive":0,"unenrolled":0,"total_all_statuses":0,"updating":0},"fleet_server":{"total_enrolled":0,"healthy":0,"unhealthy":0,"offline":0,"updating":0,"total_all_statuses":0,"num_host_urls":1}}
[2023-10-29T18:18:08.650+00:00][INFO ][plugins.ml] Task ML:saved-objects-sync-task: No ML saved objects in need of synchronization
[2023-10-29T18:18:10.475+00:00][INFO ][status] Kibana is now available (was degraded)
[2023-10-29T18:18:28.254+00:00][INFO ][http.server.Kibana] http server running at http://0.0.0.0:5601
[2023-10-29T18:18:45.417+00:00][INFO ][plugins.security.routes] Logging in with provider "basic" (basic)
[2023-10-29T18:22:13.703+00:00][INFO ][plugins.securitySolution.endpoint:metadata-check-transforms-task:0.0.1] no endpoint installation found

View File

@ -0,0 +1,3 @@
# Extensions
Third-party extensions that enable extra integrations with the Elastic stack.

View File

@ -0,0 +1,6 @@
# Ignore Docker build files
Dockerfile
.dockerignore
# Ignore OS artifacts
**/.DS_Store

View File

@ -0,0 +1,9 @@
FROM untergeek/curator:8.0.2
USER root
RUN >>/var/spool/cron/crontabs/nobody \
echo '* * * * * /curator/curator /.curator/delete_log_files_curator.yml'
ENTRYPOINT ["crond"]
CMD ["-f", "-d8"]

View File

@ -0,0 +1,20 @@
# Curator
Elasticsearch Curator helps you curate or manage your indices.
## Usage
If you want to include the Curator extension, run Docker Compose from the root of the repository with an additional
command line argument referencing the `curator-compose.yml` file:
```bash
$ docker-compose -f docker-compose.yml -f extensions/curator/curator-compose.yml up
```
This sample setup demonstrates how to run `curator` every minute using `cron`.
All configuration files are available in the `config/` directory.
## Documentation
[Curator Reference](https://www.elastic.co/guide/en/elasticsearch/client/curator/current/index.html)

View File

@ -0,0 +1,13 @@
# Curator configuration
# https://www.elastic.co/guide/en/elasticsearch/client/curator/current/configfile.html
elasticsearch:
client:
hosts: [ http://elasticsearch:9200 ]
other_settings:
username: elastic
password: ${ELASTIC_PASSWORD}
logging:
loglevel: INFO
logformat: default

View File

@ -0,0 +1,21 @@
actions:
1:
action: delete_indices
description: >-
Delete indices. Find which to delete by first limiting the list to
logstash- prefixed indices. Then further filter those to prevent deletion
of anything less than the number of days specified by unit_count.
Ignore the error if the filter does not result in an actionable list of
indices (ignore_empty_list) and exit cleanly.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: logstash-
- filtertype: age
source: creation_date
direction: older
unit: days
unit_count: 3

View File

@ -0,0 +1,15 @@
version: '3.7'
services:
curator:
image: ${CURATOR_IMAGE}
init: true
volumes:
- ./elk/extensions/curator/config/curator.yml:/.curator/curator.yml:ro,Z
- ./elk/extensions/curator/config/delete_log_files_curator.yml:/.curator/delete_log_files_curator.yml:ro,Z
environment:
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
networks:
- elk
depends_on:
- elasticsearch

View File

@ -0,0 +1,6 @@
# Ignore Docker build files
Dockerfile
.dockerignore
# Ignore OS artifacts
**/.DS_Store

View File

@ -0,0 +1,3 @@
ARG ELASTIC_VERSION
FROM docker.elastic.co/beats/heartbeat:${ELASTIC_VERSION}

View File

@ -0,0 +1,41 @@
# Heartbeat
Heartbeat is a lightweight daemon that periodically checks the status of your services and determines whether they are
available.
## Usage
**This extension requires the `heartbeat_internal` and `beats_system` users to be created and initialized with a
password.** In case you haven't done that during the initial startup of the stack, please refer to [How to re-execute
the setup][setup] to run the setup container again and initialize these users.
To include Heartbeat in the stack, run Docker Compose from the root of the repository with an additional command line
argument referencing the `heartbeat-compose.yml` file:
```console
$ docker-compose -f docker-compose.yml -f extensions/heartbeat/heartbeat-compose.yml up
```
## Configuring Heartbeat
The Heartbeat configuration is stored in [`config/heartbeat.yml`](./config/heartbeat.yml). You can modify this file
with the help of the [Configuration reference][heartbeat-config].
Any change to the Heartbeat configuration requires a restart of the Heartbeat container:
```console
$ docker-compose -f docker-compose.yml -f extensions/heartbeat/heartbeat-compose.yml restart heartbeat
```
Please refer to the following documentation page for more details about how to configure Heartbeat inside a
Docker container: [Run Heartbeat on Docker][heartbeat-docker].
## See also
[Heartbeat documentation][heartbeat-doc]
[heartbeat-config]: https://www.elastic.co/guide/en/beats/heartbeat/current/heartbeat-reference-yml.html
[heartbeat-docker]: https://www.elastic.co/guide/en/beats/heartbeat/current/running-on-docker.html
[heartbeat-doc]: https://www.elastic.co/guide/en/beats/heartbeat/current/index.html
[setup]: ../../README.md#how-to-re-execute-the-setup

View File

@ -0,0 +1,49 @@
## Heartbeat configuration
## https://github.com/elastic/beats/blob/main/deploy/docker/heartbeat.docker.yml
#
name: heartbeat
heartbeat.monitors:
- type: http
name: elasticsearch
schedule: '@every 5s'
urls:
- http://elasticsearch:9200
username: heartbeat_internal
password: ${HEARTBEAT_INTERNAL_PASSWORD}
- type: icmp
name: elasticsearch
schedule: '@every 5s'
hosts:
- elasticsearch
# - type: http
# name: dummy
# schedule: '@every 5s'
# check.response.status: [200]
# urls:
# - http://localhost:5000
processors:
- add_cloud_metadata: ~
monitoring:
enabled: true
elasticsearch:
username: beats_system
password: ${BEATS_SYSTEM_PASSWORD}
output.elasticsearch:
hosts: [ http://elasticsearch:9200 ]
username: heartbeat_internal
password: ${HEARTBEAT_INTERNAL_PASSWORD}
## HTTP endpoint for health checking
## https://www.elastic.co/guide/en/beats/heartbeat/current/http-endpoint.html
#
http:
enabled: true
host: 0.0.0.0

View File

@ -0,0 +1,21 @@
version: '3.7'
services:
heartbeat:
image: ${HEARTBEAT_IMAGE}
command:
# Log to stderr.
- -e
# Disable config file permissions checks. Allows mounting
# 'config/heartbeat.yml' even if it's not owned by root.
# see: https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html
- --strict.perms=false
volumes:
- ./elk/extensions/heartbeat/config/heartbeat.yml:/usr/share/heartbeat/heartbeat.yml:ro,Z
environment:
HEARTBEAT_INTERNAL_PASSWORD: ${HEARTBEAT_INTERNAL_PASSWORD:-}
BEATS_SYSTEM_PASSWORD: ${BEATS_SYSTEM_PASSWORD:-}
networks:
- elk
depends_on:
- elasticsearch

View File

@ -0,0 +1,6 @@
# Ignore Docker build files
Dockerfile
.dockerignore
# Ignore OS artifacts
**/.DS_Store

View File

@ -0,0 +1,7 @@
ARG ELASTIC_VERSION
# https://www.docker.elastic.co/
FROM docker.elastic.co/kibana/kibana:${ELASTIC_VERSION}
# Add your kibana plugins setup here
# Example: RUN kibana-plugin install <name|url>

View File

@ -0,0 +1,94 @@
---
## Default Kibana configuration from Kibana base image.
## https://github.com/elastic/kibana/blob/main/src/dev/build/tasks/os_packages/docker_generator/templates/kibana_yml.template.ts
#
server.name: marte
server.host: 0.0.0.0
elasticsearch.hosts: [ http://elasticsearch:9200 ]
monitoring.ui.container.elasticsearch.enabled: true
monitoring.ui.container.logstash.enabled: true
## X-Pack security credentials
#
elasticsearch.username: kibana_system
elasticsearch.password: ${KIBANA_SYSTEM_PASSWORD}
## Encryption keys (optional but highly recommended)
##
## Generate with either
## $ docker container run --rm docker.elastic.co/kibana/kibana:8.6.2 bin/kibana-encryption-keys generate
## $ openssl rand -hex 32
##
## https://www.elastic.co/guide/en/kibana/current/using-kibana-with-security.html
## https://www.elastic.co/guide/en/kibana/current/kibana-encryption-keys.html
#
#xpack.security.encryptionKey:
#xpack.encryptedSavedObjects.encryptionKey:
#xpack.reporting.encryptionKey:
## Fleet
## https://www.elastic.co/guide/en/kibana/current/fleet-settings-kb.html
#
xpack.fleet.agents.fleet_server.hosts: [ http://fleet-server:8220 ]
xpack.fleet.outputs:
- id: fleet-default-output
name: default
type: elasticsearch
hosts: [ http://elasticsearch:9200 ]
is_default: true
is_default_monitoring: true
xpack.fleet.packages:
- name: fleet_server
version: latest
- name: system
version: latest
- name: elastic_agent
version: latest
- name: apm
version: latest
xpack.fleet.agentPolicies:
- name: Fleet Server Policy
id: fleet-server-policy
description: Static agent policy for Fleet Server
monitoring_enabled:
- logs
- metrics
package_policies:
- name: fleet_server-1
package:
name: fleet_server
- name: system-1
package:
name: system
- name: elastic_agent-1
package:
name: elastic_agent
- name: Agent Policy APM Server
id: agent-policy-apm-server
description: Static agent policy for the APM Server integration
monitoring_enabled:
- logs
- metrics
package_policies:
- name: system-1
package:
name: system
- name: elastic_agent-1
package:
name: elastic_agent
- name: apm-1
package:
name: apm
# See the APM package manifest for a list of possible inputs.
# https://github.com/elastic/apm-server/blob/v8.5.0/apmpackage/apm/manifest.yml#L41-L168
inputs:
- type: apm
vars:
- name: host
value: 0.0.0.0:8200
- name: url
value: http://apm-server:8200

View File

@ -0,0 +1,6 @@
# Ignore Docker build files
Dockerfile
.dockerignore
# Ignore OS artifacts
**/.DS_Store

View File

@ -0,0 +1,7 @@
ARG ELASTIC_VERSION
# https://www.docker.elastic.co/
FROM docker.elastic.co/logstash/logstash:${ELASTIC_VERSION}
# Add your logstash plugins setup here
# Example: RUN logstash-plugin install logstash-filter-json

View File

@ -0,0 +1,7 @@
---
## Default Logstash configuration from Logstash base image.
## https://github.com/elastic/logstash/blob/main/docker/data/logstash/config/logstash-full.yml
#
http.host: 0.0.0.0
node.name: logstash

View File

@ -0,0 +1,31 @@
input {
gelf {
id => "gelf_docker"
type => docker
port => 12201
}
}
filter {
mutate {
remove_field => [ "host" ]
}
json {
source => "message"
target => "jsoncontent" # with multiple layers structure
}
}
## Add your filters / logstash plugins configuration here
output {
elasticsearch {
index => "logs-%{+YYYY.MM.dd}"
# template => "ecs-logstash"
# template_name => "%{container_name}-index-*"
# template_overwrite => true
hosts => "elasticsearch:9200"
user => "logstash_internal"
password => "${LOGSTASH_INTERNAL_PASSWORD}"
}
}

View File

@ -0,0 +1,9 @@
# Ignore Docker build files
Dockerfile
.dockerignore
# Ignore OS artifacts
**/.DS_Store
# Ignore Git files
.gitignore

View File

@ -0,0 +1,6 @@
ARG ELASTIC_VERSION
# https://www.docker.elastic.co/
FROM docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION}
ENTRYPOINT ["/entrypoint.sh"]

View File

@ -0,0 +1,119 @@
#!/usr/bin/env bash
set -eu
set -o pipefail
source "${BASH_SOURCE[0]%/*}"/lib.sh
# --------------------------------------------------------
# Users declarations
declare -A users_passwords
users_passwords=(
[logstash_internal]="${LOGSTASH_INTERNAL_PASSWORD:-}"
[kibana_system]="${KIBANA_SYSTEM_PASSWORD:-}"
[metricbeat_internal]="${METRICBEAT_INTERNAL_PASSWORD:-}"
[filebeat_internal]="${FILEBEAT_INTERNAL_PASSWORD:-}"
[heartbeat_internal]="${HEARTBEAT_INTERNAL_PASSWORD:-}"
[monitoring_internal]="${MONITORING_INTERNAL_PASSWORD:-}"
[beats_system]="${BEATS_SYSTEM_PASSWORD=:-}"
)
declare -A users_roles
users_roles=(
[logstash_internal]='logstash_writer'
[metricbeat_internal]='metricbeat_writer'
[filebeat_internal]='filebeat_writer'
[heartbeat_internal]='heartbeat_writer'
[monitoring_internal]='remote_monitoring_collector'
)
# --------------------------------------------------------
# Roles declarations
declare -A roles_files
roles_files=(
[logstash_writer]='logstash_writer.json'
[metricbeat_writer]='metricbeat_writer.json'
[filebeat_writer]='filebeat_writer.json'
[heartbeat_writer]='heartbeat_writer.json'
)
# --------------------------------------------------------
log 'Waiting for availability of Elasticsearch. This can take several minutes.'
declare -i exit_code=0
wait_for_elasticsearch || exit_code=$?
if ((exit_code)); then
case $exit_code in
6)
suberr 'Could not resolve host. Is Elasticsearch running?'
;;
7)
suberr 'Failed to connect to host. Is Elasticsearch healthy?'
;;
28)
suberr 'Timeout connecting to host. Is Elasticsearch healthy?'
;;
*)
suberr "Connection to Elasticsearch failed. Exit code: ${exit_code}"
;;
esac
exit $exit_code
fi
sublog 'Elasticsearch is running'
log 'Waiting for initialization of built-in users'
wait_for_builtin_users || exit_code=$?
if ((exit_code)); then
suberr 'Timed out waiting for condition'
exit $exit_code
fi
sublog 'Built-in users were initialized'
for role in "${!roles_files[@]}"; do
log "Role '$role'"
declare body_file
body_file="${BASH_SOURCE[0]%/*}/roles/${roles_files[$role]:-}"
if [[ ! -f "${body_file:-}" ]]; then
sublog "No role body found at '${body_file}', skipping"
continue
fi
sublog 'Creating/updating'
ensure_role "$role" "$(<"${body_file}")"
done
for user in "${!users_passwords[@]}"; do
log "User '$user'"
if [[ -z "${users_passwords[$user]:-}" ]]; then
sublog 'No password defined, skipping'
continue
fi
declare -i user_exists=0
user_exists="$(check_user_exists "$user")"
if ((user_exists)); then
sublog 'User exists, setting password'
set_user_password "$user" "${users_passwords[$user]}"
else
if [[ -z "${users_roles[$user]:-}" ]]; then
suberr ' No role defined, skipping creation'
continue
fi
sublog 'User does not exist, creating'
create_user "$user" "${users_passwords[$user]}" "${users_roles[$user]}"
fi
done

View File

@ -0,0 +1,240 @@
#!/usr/bin/env bash
# Log a message.
function log {
echo "[+] $1"
}
# Log a message at a sub-level.
function sublog {
echo "$1"
}
# Log an error.
function err {
echo "[x] $1" >&2
}
# Log an error at a sub-level.
function suberr {
echo "$1" >&2
}
# Poll the 'elasticsearch' service until it responds with HTTP code 200.
function wait_for_elasticsearch {
local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}"
local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}' "http://${elasticsearch_host}:9200/" )
if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then
args+=( '-u' "elastic:${ELASTIC_PASSWORD}" )
fi
local -i result=1
local output
# retry for max 300s (60*5s)
for _ in $(seq 1 60); do
local -i exit_code=0
output="$(curl "${args[@]}")" || exit_code=$?
if ((exit_code)); then
result=$exit_code
fi
if [[ "${output: -3}" -eq 200 ]]; then
result=0
break
fi
sleep 5
done
if ((result)) && [[ "${output: -3}" -ne 000 ]]; then
echo -e "\n${output::-3}"
fi
return $result
}
# Poll the Elasticsearch users API until it returns users.
function wait_for_builtin_users {
local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}"
local -a args=( '-s' '-D-' '-m15' "http://${elasticsearch_host}:9200/_security/user?pretty" )
if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then
args+=( '-u' "elastic:${ELASTIC_PASSWORD}" )
fi
local -i result=1
local line
local -i exit_code
local -i num_users
# retry for max 30s (30*1s)
for _ in $(seq 1 30); do
num_users=0
# read exits with a non-zero code if the last read input doesn't end
# with a newline character. The printf without newline that follows the
# curl command ensures that the final input not only contains curl's
# exit code, but causes read to fail so we can capture the return value.
# Ref. https://unix.stackexchange.com/a/176703/152409
while IFS= read -r line || ! exit_code="$line"; do
if [[ "$line" =~ _reserved.+true ]]; then
(( num_users++ ))
fi
done < <(curl "${args[@]}"; printf '%s' "$?")
if ((exit_code)); then
result=$exit_code
fi
# we expect more than just the 'elastic' user in the result
if (( num_users > 1 )); then
result=0
break
fi
sleep 1
done
return $result
}
# Verify that the given Elasticsearch user exists.
function check_user_exists {
local username=$1
local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}"
local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}'
"http://${elasticsearch_host}:9200/_security/user/${username}"
)
if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then
args+=( '-u' "elastic:${ELASTIC_PASSWORD}" )
fi
local -i result=1
local -i exists=0
local output
output="$(curl "${args[@]}")"
if [[ "${output: -3}" -eq 200 || "${output: -3}" -eq 404 ]]; then
result=0
fi
if [[ "${output: -3}" -eq 200 ]]; then
exists=1
fi
if ((result)); then
echo -e "\n${output::-3}"
else
echo "$exists"
fi
return $result
}
# Set password of a given Elasticsearch user.
function set_user_password {
local username=$1
local password=$2
local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}"
local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}'
"http://${elasticsearch_host}:9200/_security/user/${username}/_password"
'-X' 'POST'
'-H' 'Content-Type: application/json'
'-d' "{\"password\" : \"${password}\"}"
)
if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then
args+=( '-u' "elastic:${ELASTIC_PASSWORD}" )
fi
local -i result=1
local output
output="$(curl "${args[@]}")"
if [[ "${output: -3}" -eq 200 ]]; then
result=0
fi
if ((result)); then
echo -e "\n${output::-3}\n"
fi
return $result
}
# Create the given Elasticsearch user.
function create_user {
local username=$1
local password=$2
local role=$3
local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}"
local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}'
"http://${elasticsearch_host}:9200/_security/user/${username}"
'-X' 'POST'
'-H' 'Content-Type: application/json'
'-d' "{\"password\":\"${password}\",\"roles\":[\"${role}\"]}"
)
if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then
args+=( '-u' "elastic:${ELASTIC_PASSWORD}" )
fi
local -i result=1
local output
output="$(curl "${args[@]}")"
if [[ "${output: -3}" -eq 200 ]]; then
result=0
fi
if ((result)); then
echo -e "\n${output::-3}\n"
fi
return $result
}
# Ensure that the given Elasticsearch role is up-to-date, create it if required.
function ensure_role {
local name=$1
local body=$2
local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}"
local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}'
"http://${elasticsearch_host}:9200/_security/role/${name}"
'-X' 'POST'
'-H' 'Content-Type: application/json'
'-d' "$body"
)
if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then
args+=( '-u' "elastic:${ELASTIC_PASSWORD}" )
fi
local -i result=1
local output
output="$(curl "${args[@]}")"
if [[ "${output: -3}" -eq 200 ]]; then
result=0
fi
if ((result)); then
echo -e "\n${output::-3}\n"
fi
return $result
}

View File

@ -0,0 +1,19 @@
{
"cluster": [
"manage_ilm",
"manage_index_templates",
"monitor",
"read_pipeline"
],
"indices": [
{
"names": [
"filebeat-*"
],
"privileges": [
"create_doc",
"manage"
]
}
]
}

View File

@ -0,0 +1,18 @@
{
"cluster": [
"manage_ilm",
"manage_index_templates",
"monitor"
],
"indices": [
{
"names": [
"heartbeat-*"
],
"privileges": [
"create_doc",
"manage"
]
}
]
}

View File

@ -0,0 +1,33 @@
{
"cluster": [
"manage_index_templates",
"monitor",
"manage_ilm"
],
"indices": [
{
"names": [
"logs-generic-default",
"logs-*",
"ecs-logstash-*"
],
"privileges": [
"write",
"create",
"create_index",
"manage",
"manage_ilm"
]
},
{
"names": [
"logstash",
"ecs-logstash"
],
"privileges": [
"write",
"manage"
]
}
]
}

View File

@ -0,0 +1,19 @@
{
"cluster": [
"manage_ilm",
"manage_index_templates",
"monitor"
],
"indices": [
{
"names": [
".monitoring-*-mb",
"metricbeat-*"
],
"privileges": [
"create_doc",
"manage"
]
}
]
}

46
run.sh
View File

@ -5,6 +5,38 @@ usage() {
exit 0 exit 0
} }
build_elk() {
export ELK=observability/elk
export ELK_VERSION=8.7.1
export ELASTICSEARCH_IMAGE=$USER/elasticsearch:prod
export ELK_SETUP_IMAGE=$USER/elk-setup:prod
export LOGSTASH_IMAGE=$USER/logstash:prod
export KIBANA_IMAGE=$USER/kibana:prod
export CURATOR_IMAGE=$USER/curator:prod
export HEARTBEAT_IMAGE=$USER/heartbeat:prod
docker build $ELK/elasticsearch -f $ELK/elasticsearch/Dockerfile --build-arg "ELASTIC_VERSION=$ELK_VERSION" -t $ELASTICSEARCH_IMAGE
docker build $ELK/setup -f $ELK/setup/Dockerfile --build-arg "ELASTIC_VERSION=$ELK_VERSION" -t $ELK_SETUP_IMAGE
docker build $ELK/logstash -f $ELK/logstash/Dockerfile --build-arg "ELASTIC_VERSION=$ELK_VERSION" -t $LOGSTASH_IMAGE
docker build $ELK/kibana -f $ELK/kibana/Dockerfile --build-arg "ELASTIC_VERSION=$ELK_VERSION" -t $KIBANA_IMAGE
docker build $ELK/extensions/curator -f $ELK/extensions/curator/Dockerfile -t $CURATOR_IMAGE
docker build $ELK/extensions/heartbeat -f $ELK/extensions/heartbeat/Dockerfile --build-arg "ELASTIC_VERSION=$ELK_VERSION" -t $HEARTBEAT_IMAGE
}
compose_elk() {
docker compose -f observability/docker-compose.yml --env-file observability/.env.prod --profile setup up -d
docker compose -f observability/docker-compose.yml -f observability/elk/extensions/curator/curator-compose.yml -f observability/elk/extensions/heartbeat/heartbeat-compose.yml --env-file observability/.env.prod up -d
}
down_elk() {
export ELASTICSEARCH_IMAGE=$USER/elasticsearch:prod
export ELK_SETUP_IMAGE=$USER/elk-setup:prod
export LOGSTASH_IMAGE=$USER/logstash:prod
export KIBANA_IMAGE=$USER/kibana:prod
export CURATOR_IMAGE=$USER/curator:prod
export HEARTBEAT_IMAGE=$USER/heartbeat:prod
docker compose -f observability/docker-compose.yml -f observability/elk/extensions/curator/curator-compose.yml -f observability/elk/extensions/heartbeat/heartbeat-compose.yml --env-file observability/.env.prod down
}
domain= domain=
down= down=
tests= tests=
@ -40,6 +72,9 @@ if [ -n "$domain" ] && [ -n "$down" ]; then
export CLIENT_IMAGE=$USER/browser-client:prod export CLIENT_IMAGE=$USER/browser-client:prod
docker compose -f browser-domain/docker-compose.yml down docker compose -f browser-domain/docker-compose.yml down
;; ;;
'elk')
down_elk
;;
*) exit 1 ;; *) exit 1 ;;
esac esac
elif [ -n "$domain" ] && [ -z "$down" ]; then elif [ -n "$domain" ] && [ -z "$down" ]; then
@ -101,9 +136,16 @@ elif [ -n "$domain" ] && [ -z "$down" ]; then
export CLIENT_IMAGE=$USER/browser-client:prod export CLIENT_IMAGE=$USER/browser-client:prod
docker compose -f browser-domain/docker-compose.yml up -d docker compose -f browser-domain/docker-compose.yml up -d
;; ;;
'elk')
build_elk
compose_elk
;;
*) exit 1 ;; *) exit 1 ;;
esac esac
elif [ -n "$down" ]; then elif [ -n "$down" ]; then
down_elk
export API_IMAGE=$USER/flights-information:prod export API_IMAGE=$USER/flights-information:prod
docker compose -f flights-domain/docker-compose.yml --env-file flights-domain/.env.prod down docker compose -f flights-domain/docker-compose.yml --env-file flights-domain/.env.prod down
export API_IMAGE=$USER/user-manager:prod export API_IMAGE=$USER/user-manager:prod
@ -125,6 +167,10 @@ else
docker build screen-domain -f screen-domain/Dockerfile.prod --build-arg "REACT_APP_ORIGIN=$REACT_APP_ORIGIN" -t $USER/screen-client:prod docker build screen-domain -f screen-domain/Dockerfile.prod --build-arg "REACT_APP_ORIGIN=$REACT_APP_ORIGIN" -t $USER/screen-client:prod
docker build browser-domain -f browser-domain/Dockerfile.prod -t $USER/browser-client:prod docker build browser-domain -f browser-domain/Dockerfile.prod -t $USER/browser-client:prod
# ELK
build_elk
compose_elk
export API_IMAGE=$USER/flights-information:prod export API_IMAGE=$USER/flights-information:prod
docker compose -f flights-domain/docker-compose.yml --env-file flights-domain/.env.prod down docker compose -f flights-domain/docker-compose.yml --env-file flights-domain/.env.prod down
docker compose -f flights-domain/docker-compose.yml --env-file flights-domain/.env.prod up -d docker compose -f flights-domain/docker-compose.yml --env-file flights-domain/.env.prod up -d