提交 77c1df1b authored 作者: chenhuan's avatar chenhuan

初始化

上级
FROM golang:latest
MAINTAINER Razil "chenhuan@liaosearch.com"
RUN go env -w GO111MODULE=on && go env -w GOPROXY=https://goproxy.cn,direct
\ No newline at end of file
差异被折叠。
FROM debian:buster
LABEL maintainer="Colin Wilson colin@wyveo.com"
# Let the container know that there is no tty
ENV DEBIAN_FRONTEND noninteractive
ENV NGINX_VERSION 1.19.2-1~buster
ENV php_conf /etc/php/7.4/fpm/php.ini
ENV fpm_conf /etc/php/7.4/fpm/pool.d/www.conf
ENV COMPOSER_VERSION 1.10.10
# Install Basic Requirements
RUN buildDeps='curl gcc make autoconf libc-dev zlib1g-dev pkg-config' \
&& set -x \
&& apt-get update \
&& apt-get install --no-install-recommends $buildDeps --no-install-suggests -q -y gnupg2 dirmngr wget apt-transport-https lsb-release ca-certificates \
&& \
NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \
found=''; \
for server in \
ha.pool.sks-keyservers.net \
hkp://keyserver.ubuntu.com:80 \
hkp://p80.pool.sks-keyservers.net:80 \
pgp.mit.edu \
; do \
echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
apt-key adv --batch --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
done; \
test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
echo "deb http://nginx.org/packages/mainline/debian/ buster nginx" >> /etc/apt/sources.list \
&& wget -O /etc/apt/trusted.gpg.d/php.gpg https://packages.sury.org/php/apt.gpg \
&& echo "deb https://packages.sury.org/php/ $(lsb_release -sc) main" > /etc/apt/sources.list.d/php.list \
&& apt-get update \
&& apt-get install --no-install-recommends --no-install-suggests -q -y \
apt-utils \
nano \
zip \
unzip \
python-pip \
python-setuptools \
git \
libmemcached-dev \
libmemcached11 \
libmagickwand-dev \
nginx=${NGINX_VERSION} \
php7.4-fpm \
php7.4-cli \
php7.4-bcmath \
php7.4-dev \
php7.4-common \
php7.4-json \
php7.4-opcache \
php7.4-readline \
php7.4-mbstring \
php7.4-curl \
php7.4-gd \
php7.4-imagick \
php7.4-mysql \
php7.4-zip \
php7.4-pgsql \
php7.4-intl \
php7.4-xml \
php-pear \
&& pecl -d php_suffix=7.4 install -o -f redis memcached \
&& mkdir -p /run/php \
&& pip install wheel \
&& pip install supervisor supervisor-stdout \
&& echo "#!/bin/sh\nexit 0" > /usr/sbin/policy-rc.d \
&& rm -rf /etc/nginx/conf.d/default.conf \
&& sed -i -e "s/;cgi.fix_pathinfo=1/cgi.fix_pathinfo=0/g" ${php_conf} \
&& sed -i -e "s/memory_limit\s*=\s*.*/memory_limit = 256M/g" ${php_conf} \
&& sed -i -e "s/upload_max_filesize\s*=\s*2M/upload_max_filesize = 100M/g" ${php_conf} \
&& sed -i -e "s/post_max_size\s*=\s*8M/post_max_size = 100M/g" ${php_conf} \
&& sed -i -e "s/variables_order = \"GPCS\"/variables_order = \"EGPCS\"/g" ${php_conf} \
&& sed -i -e "s/;daemonize\s*=\s*yes/daemonize = no/g" /etc/php/7.4/fpm/php-fpm.conf \
&& sed -i -e "s/;catch_workers_output\s*=\s*yes/catch_workers_output = yes/g" ${fpm_conf} \
&& sed -i -e "s/pm.max_children = 5/pm.max_children = 4/g" ${fpm_conf} \
&& sed -i -e "s/pm.start_servers = 2/pm.start_servers = 3/g" ${fpm_conf} \
&& sed -i -e "s/pm.min_spare_servers = 1/pm.min_spare_servers = 2/g" ${fpm_conf} \
&& sed -i -e "s/pm.max_spare_servers = 3/pm.max_spare_servers = 4/g" ${fpm_conf} \
&& sed -i -e "s/pm.max_requests = 500/pm.max_requests = 200/g" ${fpm_conf} \
&& sed -i -e "s/www-data/nginx/g" ${fpm_conf} \
&& sed -i -e "s/^;clear_env = no$/clear_env = no/" ${fpm_conf} \
&& echo "extension=redis.so" > /etc/php/7.4/mods-available/redis.ini \
&& echo "extension=memcached.so" > /etc/php/7.4/mods-available/memcached.ini \
&& ln -sf /etc/php/7.4/mods-available/redis.ini /etc/php/7.4/fpm/conf.d/20-redis.ini \
&& ln -sf /etc/php/7.4/mods-available/redis.ini /etc/php/7.4/cli/conf.d/20-redis.ini \
&& ln -sf /etc/php/7.4/mods-available/memcached.ini /etc/php/7.4/fpm/conf.d/20-memcached.ini \
&& ln -sf /etc/php/7.4/mods-available/memcached.ini /etc/php/7.4/cli/conf.d/20-memcached.ini \
# Install Composer
&& curl -o /tmp/composer-setup.php https://getcomposer.org/installer \
&& curl -o /tmp/composer-setup.sig https://composer.github.io/installer.sig \
&& php -r "if (hash('SHA384', file_get_contents('/tmp/composer-setup.php')) !== trim(file_get_contents('/tmp/composer-setup.sig'))) { unlink('/tmp/composer-setup.php'); echo 'Invalid installer' . PHP_EOL; exit(1); }" \
&& php /tmp/composer-setup.php --no-ansi --install-dir=/usr/local/bin --filename=composer --version=${COMPOSER_VERSION} \
&& rm -rf /tmp/composer-setup.php \
# Clean up
&& rm -rf /tmp/pear \
&& apt-get purge -y --auto-remove $buildDeps \
&& apt-get clean \
&& apt-get autoremove \
&& rm -rf /var/lib/apt/lists/*
# Supervisor config
ADD ./supervisord.conf /etc/supervisord.conf
# Override nginx's default config
ADD ./default.conf /etc/nginx/conf.d/default.conf
# Override default nginx welcome page
COPY html /usr/share/nginx/html
# ADD-CRONTABS
COPY ./crontabs/default /var/spool/cron/crontabs/
RUN cat /var/spool/cron/crontabs/default >> /var/spool/cron/crontabs/root
RUN mkdir -p /var/log/cron \
&& touch /var/log/cron/cron.log
VOLUME /var/log/cron
# Add Scripts
ADD ./start.sh /start.sh
EXPOSE 80
CMD ["/start.sh"]
Copyright (c) 2020 wyveo.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
[![Docker Hub; wyveo/nginx-php-fpm](https://img.shields.io/badge/docker%20hub-wyveo%2Fnginx--php--fpm-blue.svg?&logo=docker&style=for-the-badge)](https://hub.docker.com/r/wyveo/nginx-php-fpm/) [![](https://badges.weareopensource.me/docker/pulls/wyveo/nginx-php-fpm?style=for-the-badge)](https://hub.docker.com/r/wyveo/nginx-php-fpm/) [![](https://badges.weareopensource.me/docker/image-size/wyveo/nginx-php-fpm/php74?style=for-the-badge)](https://microbadger.com/images/wyveo/nginx-php-fpm) [![](https://img.shields.io/microbadger/layers/wyveo/nginx-php-fpm/latest.svg?&style=for-the-badge)](https://microbadger.com/images/wyveo/nginx-php-fpm) [![nginx 1.19.2](https://img.shields.io/badge/nginx-1.19.2-brightgreen.svg?&logo=nginx&logoColor=white&style=for-the-badge)](https://nginx.org/en/CHANGES) [![php 7.4.9](https://img.shields.io/badge/php--fpm-7.4.9-blue.svg?&logo=php&logoColor=white&style=for-the-badge)](https://secure.php.net/releases/7_4_9.php) [![License MIT](https://img.shields.io/badge/license-MIT-blue.svg?&style=for-the-badge)](https://github.com/wyveo/nginx-php-fpm/blob/master/LICENSE)
## Introduction
This is a Dockerfile to build a debian based container image running nginx and php-fpm 7.4.x / 7.3.x / 7.2.x / 7.1.x / 7.0.x & Composer.
### Versioning
| Docker Tag | GitHub Release | Nginx Version | PHP Version | Debian Version |
|-----|-------|-----|--------|--------|
| latest | master Branch |1.19.2 | 7.4.9 | buster |
| php74 | php74 Branch |1.19.2 | 7.4.9 | buster |
| php73 | php73 Branch |1.19.2 | 7.3.21 | buster |
| php72 | php72 Branch |1.19.2 | 7.2.33 | buster |
| php71 | php71 Branch |1.19.2 | 7.1.33 | buster |
| php70 | php70 Branch |1.19.2 | 7.0.33 | buster |
## Building from source
To build from source you need to clone the git repo and run docker build:
```
$ git clone https://github.com/wyveo/nginx-php-fpm.git
$ cd nginx-php-fpm
```
followed by
```
$ docker build -t nginx-php-fpm:latest . # PHP 7.4.x
```
or
```
$ docker build -t nginx-php-fpm:php74 . # PHP 7.4.x
```
## Pulling from Docker Hub
```
$ docker pull wyveo/nginx-php-fpm:latest
```
## Running
To run the container:
```
$ sudo docker run -d wyveo/nginx-php-fpm:latest
```
Default web root:
```
/usr/share/nginx/html
```
# do daily/weekly/monthly maintenance
# min hour day month weekday command
# * * * * * /usr/bin/php /usr/share/nginx/html/artisan schedule:run >> /dev/null 2>&1
server {
listen 80; ## listen for ipv4; this line is default and implied
listen [::]:80 default ipv6only=on; ## listen for ipv6
root /usr/share/nginx/html/public;
index index.php index.html index.htm;
# Make site accessible from http://localhost/
server_name _;
# Disable sendfile as per https://docs.vagrantup.com/v2/synced-folders/virtualbox.html
sendfile off;
# Security - Hide nginx version number in error pages and Server header
server_tokens off;
# Add stdout logging
error_log /dev/stdout info;
access_log /dev/stdout;
# reduce the data that needs to be sent over network
gzip on;
gzip_min_length 10240;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css text/xml application/json text/javascript application/x-javascript application/xml;
gzip_disable "MSIE [1-6]\.";
location / {
# First attempt to serve request as file, then
# as directory, then fall back to index.php
try_files $uri $uri/ /index.php?$query_string;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
# pass the PHP scripts to FastCGI server listening on socket
#
location ~ \.php$ {
try_files $uri $uri/ /index.php?$query_string;
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass unix:/run/php/php7.4-fpm.sock;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info;
}
location ~* \.(jpg|jpeg|gif|png|css|js|ico|xml)$ {
expires 5d;
}
# deny access to . files, for security
#
location ~ /\. {
log_not_found off;
deny all;
}
}
/*! normalize.css v3.0.2 | MIT License | git.io/normalize */img,legend{border:0}legend,td,th{padding:0}html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,optgroup,strong{font-weight:700}dfn{font-style:italic}h1{font-size:2em;margin:.67em 0}mark{background:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{-moz-box-sizing:content-box;box-sizing:content-box;height:0}pre,textarea{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{color:inherit;font:inherit;margin:0}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}input{line-height:normal}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}table{border-collapse:collapse;border-spacing:0}
.column,.columns,.container,.u-full-width{width:100%;box-sizing:border-box}h1,h2,h3{letter-spacing:-.1rem}body,h6{line-height:1.6}.container{position:relative;max-width:960px;margin:0 auto;padding:0 20px}ol,p,ul{margin-top:0}.column,.columns{float:left}@media (min-width:400px){.container{width:85%;padding:0}}html{font-size:62.5%}body{font-size:1.5em;font-weight:400;font-family:Montserrat,HelveticaNeue,"Helvetica Neue",Helvetica,Arial,sans-serif;color:#222}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:2rem;font-weight:300}h1{font-size:4rem;line-height:1.2}h2{font-size:3.6rem;line-height:1.25}h3{font-size:3rem;line-height:1.3}h4{font-size:2.4rem;line-height:1.35;letter-spacing:-.08rem}h5{font-size:1.8rem;line-height:1.5;letter-spacing:-.05rem}h6{font-size:1.5rem;letter-spacing:0}@media (min-width:550px){.container{width:80%}.column,.columns{margin-left:4%}.column:first-child,.columns:first-child{margin-left:0}.one.column,.one.columns{width:4.66666666667%}.two.columns{width:13.3333333333%}.three.columns{width:22%}.four.columns{width:30.6666666667%}.five.columns{width:39.3333333333%}.six.columns{width:48%}.seven.columns{width:56.6666666667%}.eight.columns{width:65.3333333333%}.nine.columns{width:74%}.ten.columns{width:82.6666666667%}.eleven.columns{width:91.3333333333%}.twelve.columns{width:100%;margin-left:0}.one-third.column{width:30.6666666667%}.two-thirds.column{width:65.3333333333%}.one-half.column{width:48%}.offset-by-one.column,.offset-by-one.columns{margin-left:8.66666666667%}.offset-by-two.column,.offset-by-two.columns{margin-left:17.3333333333%}.offset-by-three.column,.offset-by-three.columns{margin-left:26%}.offset-by-four.column,.offset-by-four.columns{margin-left:34.6666666667%}.offset-by-five.column,.offset-by-five.columns{margin-left:43.3333333333%}.offset-by-six.column,.offset-by-six.columns{margin-left:52%}.offset-by-seven.column,.offset-by-seven.columns{margin-left:60.6666666667%}.offset-by-eight.column,.offset-by-eight.columns{margin-left:69.3333333333%}.offset-by-nine.column,.offset-by-nine.columns{margin-left:78%}.offset-by-ten.column,.offset-by-ten.columns{margin-left:86.6666666667%}.offset-by-eleven.column,.offset-by-eleven.columns{margin-left:95.3333333333%}.offset-by-one-third.column,.offset-by-one-third.columns{margin-left:34.6666666667%}.offset-by-two-thirds.column,.offset-by-two-thirds.columns{margin-left:69.3333333333%}.offset-by-one-half.column,.offset-by-one-half.columns{margin-left:52%}h1{font-size:5rem}h2{font-size:4.2rem}h3{font-size:3.6rem}h4{font-size:3rem}h5{font-size:2.4rem}h6{font-size:1.5rem}}a{color:#1EAEDB}a:hover{color:#0FA0CE}.button,button,input[type=submit],input[type=reset],input[type=button]{display:inline-block;height:38px;padding:0 30px;color:#555;text-align:center;font-size:11px;font-weight:600;line-height:38px;letter-spacing:.1rem;text-transform:uppercase;text-decoration:none;white-space:nowrap;background-color:transparent;border-radius:4px;border:1px solid #bbb;cursor:pointer;box-sizing:border-box}.button:focus,.button:hover,button:focus,button:hover,input[type=submit]:focus,input[type=submit]:hover,input[type=reset]:focus,input[type=reset]:hover,input[type=button]:focus,input[type=button]:hover{color:#333;border-color:#888;outline:0}.button.button-primary,button.button-primary,input[type=submit].button-primary,input[type=reset].button-primary,input[type=button].button-primary{color:#FFF;background-color:#33C3F0;border-color:#33C3F0}.button.button-primary:focus,.button.button-primary:hover,button.button-primary:focus,button.button-primary:hover,input[type=submit].button-primary:focus,input[type=submit].button-primary:hover,input[type=reset].button-primary:focus,input[type=reset].button-primary:hover,input[type=button].button-primary:focus,input[type=button].button-primary:hover{color:#FFF;background-color:#1EAEDB;border-color:#1EAEDB}input[type=tel],input[type=url],input[type=password],input[type=email],input[type=number],input[type=search],input[type=text],select,textarea{height:38px;padding:6px 10px;background-color:#fff;border:1px solid #D1D1D1;border-radius:4px;box-shadow:none;box-sizing:border-box}input[type=tel],input[type=url],input[type=password],input[type=email],input[type=number],input[type=search],input[type=text],textarea{-webkit-appearance:none;-moz-appearance:none;appearance:none}textarea{min-height:65px;padding-top:6px;padding-bottom:6px}input[type=tel]:focus,input[type=url]:focus,input[type=password]:focus,input[type=email]:focus,input[type=number]:focus,input[type=search]:focus,input[type=text]:focus,select:focus,textarea:focus{border:1px solid #33C3F0;outline:0}label,legend{display:block;margin-bottom:.5rem;font-weight:600}fieldset{padding:0;border-width:0}input[type=checkbox],input[type=radio]{display:inline}label>.label-body{display:inline-block;margin-left:.5rem;font-weight:400}ul{list-style:circle inside}ol{list-style:decimal inside}ol,ul{padding-left:0}ol ol,ol ul,ul ol,ul ul{margin:1.5rem 0 1.5rem 3rem;font-size:90%}.button,button,li{margin-bottom:1rem}code{padding:.2rem .5rem;margin:0 .2rem;font-size:90%;white-space:nowrap;background:#F1F1F1;border:1px solid #E1E1E1;border-radius:4px}pre>code{display:block;padding:1rem 1.5rem;white-space:pre}td,th{padding:12px 15px;text-align:left;border-bottom:1px solid #E1E1E1}td:first-child,th:first-child{padding-left:0}td:last-child,th:last-child{padding-right:0}fieldset,input,select,textarea{margin-bottom:1.5rem}blockquote,dl,figure,form,ol,p,pre,table,ul{margin-bottom:2.5rem}.u-max-full-width{max-width:100%;box-sizing:border-box}.u-pull-right{float:right}.u-pull-left{float:left}hr{margin-top:3rem;margin-bottom:3.5rem;border-width:0;border-top:1px solid #E1E1E1}.container:after,.row:after,.u-cf{content:"";display:table;clear:both}
<!DOCTYPE html>
<html lang="en">
<head>
<!-- Basic Page Needs
–––––––––––––––––––––––––––––––––––––––––––––––––– -->
<meta charset="utf-8">
<title>Docker | wyveo/nginx-php-fpm</title>
<meta name="description" content="">
<meta name="author" content="">
<!-- Mobile Specific Metas
–––––––––––––––––––––––––––––––––––––––––––––––––– -->
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- FONT
–––––––––––––––––––––––––––––––––––––––––––––––––– -->
<link href="https://fonts.googleapis.com/css?family=Montserrat" rel="stylesheet">
<!-- CSS
–––––––––––––––––––––––––––––––––––––––––––––––––– -->
<link rel="stylesheet" href="css/normalize.min.css">
<link rel="stylesheet" href="css/skeleton.min.css">
<!-- Favicon
–––––––––––––––––––––––––––––––––––––––––––––––––– -->
<link rel="icon" type="image/png" href="images/favicon.png">
</head>
<body>
<!-- Primary Page Layout
–––––––––––––––––––––––––––––––––––––––––––––––––– -->
<div class="container">
<div class="row">
<div class="one-half column" style="margin-top: 25%">
<img height="45px" src="data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIGRhdGEtbmFtZT0iTGF5ZXIgMSIgdmlld0JveD0iMCAwIDYyOS4yMiA2OTkuNSI+PHBhdGggZmlsbD0iI2ZmZiIgZD0iTTM0OS4yNSAxOS43NWMtMTkuMDUtMTEtNTAuMjMtMTEtNjkuMjggMGwtMjMzLjgzIDEzNWMtMTkgMTEtMzQuNjQgMzgtMzQuNjQgNjB2MjcwYzAgMjIgMTUuNTkgNDkgMzQuNjQgNjBsMjMzLjg2IDEzNWMxOSAxMSA1MC4yMyAxMSA2OS4yOCAwbDIzMy44My0xMzVjMTktMTEgMzQuNjQtMzggMzQuNjQtNjB2LTI3MGMwLTIyLTE1LjU5LTQ5LTM0LjY0LTYweiIvPjxwYXRoIGZpbGw9Im5vbmUiIHN0cm9rZT0iIzFmMWYxZiIgc3Ryb2tlLW1pdGVybGltaXQ9IjEwIiBzdHJva2Utd2lkdGg9IjIzIiBkPSJNMzQ5LjI1IDE5Ljc1Yy0xOS4wNS0xMS01MC4yMy0xMS02OS4yOCAwbC0yMzMuODMgMTM1Yy0xOSAxMS0zNC42NCAzOC0zNC42NCA2MHYyNzBjMCAyMiAxNS41OSA0OSAzNC42NCA2MGwyMzMuODYgMTM1YzE5IDExIDUwLjIzIDExIDY5LjI4IDBsMjMzLjgzLTEzNWMxOS0xMSAzNC42NC0zOCAzNC42NC02MHYtMjcwYzAtMjItMTUuNTktNDktMzQuNjQtNjB6Ii8+PHBhdGggZD0iTTE1Ni40MSAyNzIuNzJjLTIuOTQtMTYuMzItNy41MS0zNCAxNi0zNGgyMi41M2MyMi44NCAwIDI1Ljc4IDE3LjYzIDI2Ljc2IDI1Ljc5bDI0LjE1IDE0NC42aDJjMy45Mi0zNS4yNiAxOS4yNi0xMzMuNSAyMS4yMi0xNDQgMi4yOC0xNS42NyAzLjkyLTI2LjQ0IDI0LjgxLTI2LjQ0SDMzNWMyMC41NyAwIDIzLjUgMTggMjQuNDggMjYuNDQgMy4yNyAyNS40NiAxOCAxMjAuMTIgMjIuNTIgMTQ0LjI3aDJjMy41OS0zNC42IDE4LjI4LTEzNC40OCAxOS45MS0xNDQuNiAyLjYxLTE2IDMuOTItMjYuMTEgMjQuODEtMjYuMTFoMzJjMTYuMzIgMCAxNiAxNy42MyAxMy4wNSAzMi04LjE2IDM5LjE3LTMxLjY2IDEzOC43Mi0zNC45MiAxNTIuNDMtMy4yNiAxNi01Ljg4IDM3LjU0LTMzLjYyIDM3LjU0aC00MC41OGMtMTQuMzYgMC0yNi40My01Ljg4LTI4LjA2LTI0LjE2LTItMTctMTcuMzEtMTE1Ljg3LTIxLjg3LTE0MWgtMmMtNC4yNSAzNS45LTE3Ljk1IDEyMC4xMS0yMC44OSAxNDEuNjYtMi4yOSAxOC4yNy0xMy4wNiAyMy41LTI4LjA3IDIzLjVoLTQyLjA3Yy0yNC40OCAwLTI4LjcyLTE5LjU5LTMyLjMxLTMzLTMuOTItMTYuODUtMjguMzgtMTMxLjA5LTMyLjk3LTE1NC45MnoiLz48L3N2Zz4=">
<h4>Congratulations!</h4>
<p>You have successfully deployed a <strong>docker</strong> container running our <strong>NGINX</strong> with <strong>PHP-FPM 7.x</strong> image</p>
<p><strong>NGINX: </strong>v<?php echo $_ENV['NGINX_VERSION'] ?><br><strong>PHP-FPM: </strong>v<?php echo phpversion(); ?><br><strong>HOSTNAME: </strong><?php echo gethostname(); ?><br><strong>WEB ROOT: </strong><?php echo $_ENV['DOCUMENT_ROOT'] ?></p>
<em>Thank you for using <a style="text-decoration:none" href="https://wyveo.com" target="_blank">wyveo.com</a></em>
</div>
</div>
</div>
<!-- End Document
–––––––––––––––––––––––––––––––––––––––––––––––––– -->
</body>
</html>
#!/bin/bash
# Update nginx to match worker_processes to no. of cpu's
procs=$(cat /proc/cpuinfo | grep processor | wc -l)
sed -i -e "s/worker_processes 1/worker_processes $procs/" /etc/nginx/nginx.conf
# Always chown webroot for better mounting
chown -Rf nginx.nginx /usr/share/nginx/html
# Start supervisord and services
/usr/local/bin/supervisord -n -c /etc/supervisord.conf
/usr/sbin/crond -f -L /var/log/cron/cron.log
[unix_http_server]
file=/tmp/supervisor.sock ; (the path to the socket file)
username=nobody
password=nobody
[supervisord]
logfile=/tmp/supervisord.log ; (main log file;default $CWD/supervisord.log)
logfile_maxbytes=50MB ; (max main logfile bytes b4 rotation;default 50MB)
logfile_backups=10 ; (num of main logfile rotation backups;default 10)
loglevel=info ; (log level;default info; others: debug,warn,trace)
pidfile=/tmp/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
nodaemon=false ; (start in foreground if true;default false)
minfds=1024 ; (min. avail startup file descriptors;default 1024)
minprocs=200 ; (min. avail process descriptors;default 200)
user=root ; (default is current user, required if root)
; the below section must remain in the config file for RPC
; (supervisorctl/web interface) to work, additional interfaces may be
; added by defining them in separate rpcinterface: sections
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl=unix:///tmp/supervisor.sock ; use a unix:// URL for a unix socket
[program:php-fpm7]
command=/usr/sbin/php-fpm7.4 --nodaemonize --fpm-config=/etc/php/7.4/fpm/pool.d/www.conf
autostart=true
autorestart=true
priority=5
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
[program:nginx]
command=/usr/sbin/nginx -g "daemon off;"
autostart=true
autorestart=true
priority=10
stdout_events_enabled=true
stderr_events_enabled=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
[eventlistener:stdout]
command = supervisor_stdout
buffer_size = 100
events = PROCESS_LOG
result_handler = supervisor_stdout:event_handler
# Ignore OS artifacts
**/.DS_Store
# Declare files that will always have LF line endings on checkout.
*.sh text eol=lf
\ No newline at end of file
<!--
Because we focus purely on integrating the Elastic stack using Docker and not on the individual stack components themselves, we kindly ask our users to submit questions about Elastic products in the Elastic Discussion Forums @ https://discuss.elastic.co/.
General questions regarding this project can be asked in the docker-elk Gitter chat room @ https://gitter.im/deviantony/docker-elk.
-->
### Problem description
<!-- Be as descriptive as possible regarding the encountered issue versus the expected outcome. -->
### Extra information
<!-- Please include the following information in your issue report. -->
#### Stack configuration
<!-- Detail all performed configuration changes, including to Dockerfiles. -->
#### Docker setup
```
<!-- Replace this comment with the full output of the `docker version` command. -->
```
```
<!-- Replace this comment with the full output of the `docker-compose version` command. -->
```
#### Docker logs
```
<!-- Replace this comment with the full output of the `docker-compose logs` command. -->
```
name: CI
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
test-compose:
name: 'Test suite: Compose'
# List of supported runners:
# https://docs.github.com/en/free-pro-team@latest/actions/reference/specifications-for-github-hosted-runners#supported-runners-and-hardware-resources
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
#####################################################
# #
# Install all dependencies required by test suites. #
# #
#####################################################
- name: Prepare environment
run: |
# Install Linux packages
#
# List of packages pre-installed in the runner:
# https://docs.github.com/en/free-pro-team@latest/actions/reference/specifications-for-github-hosted-runners#supported-software
sudo apt install -y expect
# Pre-build container images
docker-compose build
########################################################
# #
# Ensure §"Initial setup" of the README remains valid. #
# #
########################################################
- name: Set password of every built-in user to 'testpasswd'
run: |
# Change password of 'elastic' user from 'changeme' to 'testpasswd' in config files
sed -i -e 's/\(xpack.monitoring.elasticsearch.username:\) elastic/\1 logstash_system/g' -e 's/\(xpack.monitoring.elasticsearch.password:\) changeme/\1 testpasswd/g' logstash/config/logstash.yml
sed -i 's/\(password =>\) "changeme"/\1 "testpasswd"/g' logstash/pipeline/logstash.conf
sed -i -e 's/\(elasticsearch.username:\) elastic/\1 kibana_system/g' -e 's/\(elasticsearch.password:\) changeme/\1 testpasswd/g' kibana/config/kibana.yml
sed -i -e 's/\(elasticsearch.password:\) changeme/\1 testpasswd/g' -e 's/\(secret_management.encryption_keys:\)/\1 [test-encrypt]/g' extensions/enterprise-search/config/enterprise-search.yml
sed -i 's/\(password:\) changeme/\1 testpasswd/g' extensions/apm-server/config/apm-server.yml
# Run Elasticsearch and wait for its availability
docker-compose up -d elasticsearch
source .github/workflows/scripts/lib/testing.sh
poll_ready "$(container_id elasticsearch)" "http://$(service_ip elasticsearch):9200/" -u 'elastic:changeme'
# Set passwords
.github/workflows/scripts/elasticsearch-setup-passwords.exp
##########################################################
# #
# Test core components: Elasticsearch, Logstash, Kibana. #
# #
##########################################################
- name: Run the stack
run: docker-compose up -d
- name: Execute core test suite
run: .github/workflows/scripts/run-tests-core.sh
- name: 'debug: Display state and logs (core)'
# https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions#jobsjob_idif
# https://docs.github.com/en/free-pro-team@latest/actions/reference/context-and-expression-syntax-for-github-actions#job-status-check-functions
if: always()
run: |
docker-compose ps
docker-compose logs elasticsearch
docker-compose logs logstash
docker-compose logs kibana
# next steps don't need Logstash
docker-compose stop logstash
##############################
# #
# Test supported extensions. #
# #
##############################
#
# Enterprise Search
#
- name: Execute Enterprise Search test suite
run: |
# Set mandatory Elasticsearch settings
sed -i '$ a xpack.security.authc.api_key.enabled: true' elasticsearch/config/elasticsearch.yml
# Restart Elasticsearch for changes to take effect
docker-compose restart elasticsearch
# Run Enterprise Search and execute tests
docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml up -d enterprise-search
.github/workflows/scripts/run-tests-enterprise-search.sh
# Revert changes to Elasticsearch configuration
sed -i '/xpack.security.authc.api_key.enabled: true/d' elasticsearch/config/elasticsearch.yml
docker-compose restart elasticsearch
- name: 'debug: Display state and logs (Enterprise Search)'
if: always()
run: |
docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml ps
docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml logs enterprise-search
# next steps don't need Enterprise Search
docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml stop enterprise-search
#
# APM Server
#
- name: Execute APM Server test suite
run: |
docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml up -d apm-server
.github/workflows/scripts/run-tests-apm-server.sh
- name: 'debug: Display state and logs (APM Server)'
if: always()
run: |
docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml ps
docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml logs apm-server
# next steps don't need APM Server
docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml stop apm-server
##############
# #
# Tear down. #
# #
##############
- name: Terminate all components
if: always()
run: >-
docker-compose
-f docker-compose.yml
-f extensions/enterprise-search/enterprise-search-compose.yml
-f extensions/apm-server/apm-server-compose.yml
down -v
test-swarm:
name: 'Test suite: Swarm'
runs-on: ubuntu-latest
env:
MODE: swarm
steps:
- uses: actions/checkout@v2
#####################################################
# #
# Install all dependencies required by test suites. #
# #
#####################################################
- name: Prepare environment
run: |
# Install Linux packages
sudo apt install -y expect
# Enable Swarm mode
docker swarm init
########################################################
# #
# Ensure §"Initial setup" of the README remains valid. #
# #
########################################################
- name: Set password of every built-in user to 'testpasswd'
run: |
# Change password of 'elastic' user from 'changeme' to 'testpasswd' in config files
sed -i -e 's/\(xpack.monitoring.elasticsearch.username:\) elastic/\1 logstash_system/g' -e 's/\(xpack.monitoring.elasticsearch.password:\) changeme/\1 testpasswd/g' logstash/config/logstash.yml
sed -i 's/\(password =>\) "changeme"/\1 "testpasswd"/g' logstash/pipeline/logstash.conf
sed -i -e 's/\(elasticsearch.username:\) elastic/\1 kibana_system/g' -e 's/\(elasticsearch.password:\) changeme/\1 testpasswd/g' kibana/config/kibana.yml
# Run Elasticsearch and wait for its availability
docker stack deploy -c ./docker-stack.yml elk
docker service scale elk_logstash=0 elk_kibana=0
source .github/workflows/scripts/lib/testing.sh
poll_ready "$(container_id elasticsearch)" "http://$(service_ip elasticsearch):9200/" -u 'elastic:changeme'
# Set passwords
.github/workflows/scripts/elasticsearch-setup-passwords.exp swarm
##########################################################
# #
# Test core components: Elasticsearch, Logstash, Kibana. #
# #
##########################################################
- name: Run the stack
run: docker service scale elk_logstash=1 elk_kibana=1
- name: Execute core test suite
run: .github/workflows/scripts/run-tests-core.sh swarm
- name: 'debug: Display state and logs (core)'
if: always()
run: |
docker stack services elk
docker service logs elk_elasticsearch
docker service logs elk_kibana
docker service logs elk_logstash
##############
# #
# Tear down. #
# #
##############
- name: Terminate all components
if: always()
run: docker stack rm elk
name: Documentation
on:
schedule:
- cron: '0 0 * * 0' # At 00:00 every Sunday
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
markdown-check:
name: Check Markdown
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Check links
uses: gaurav-nelson/github-action-markdown-link-check@v1
with:
config-file: .github/workflows/mlc_config.json
- name: Lint
uses: avto-dev/markdown-lint@v1
with:
args: '**/*.md'
config: .github/workflows/lint/markdown.yaml
default: false # includes/excludes all rules by default
# Heading levels should only increment by one level at a time <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md001>
MD001: true
# Heading style <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md003>
MD003: true
# Unordered list style <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md004>
MD004: true
# Inconsistent indentation for list items at the same level <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md005>
MD005: true
# Consider starting bulleted lists at the beginning of the line <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md006>
MD006: true
# Unordered list indentation <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md007>
MD007: true
# Trailing spaces <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md009>
MD009: true
# Hard tabs <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md010>
MD010: true
# Reversed link syntax <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md011>
MD011: true
# Multiple consecutive blank lines <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md012>
MD012: true
# Line length <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md013>
MD013:
line_length: 120
code_blocks: false
# Dollar signs used before commands without showing output <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md014>
MD014: false
# No space after hash on atx style heading <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md018>
MD018: true
# Multiple spaces after hash on atx style heading <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md019>
MD019: true
# No space inside hashes on closed atx style heading <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md020>
MD020: true
# Multiple spaces inside hashes on closed atx style heading <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md021>
MD021: true
# Headings should be surrounded by blank lines <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md022>
MD022: true
# Headings must start at the beginning of the line <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md023>
MD023: true
# Multiple headings with the same content <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md024>
MD024:
allow_different_nesting: true
# Multiple top level headings in the same document <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md025>
MD025: true
# Trailing punctuation in heading <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md026>
MD026: true
# Multiple spaces after blockquote symbol <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md027>
MD027: true
# Blank line inside blockquote <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md028>
MD028: false
# Ordered list item prefix <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md029>
MD029:
style: 'one'
# Spaces after list markers <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md030>
MD030: true
# Fenced code blocks should be surrounded by blank lines <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md031>
MD031: true
# Lists should be surrounded by blank lines <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md032>
MD032: true
# Inline HTML <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md033>
MD033: true
# Bare URL used <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md034>
MD034: true
# Horizontal rule style <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md035>
MD035:
style: '---'
# Emphasis used instead of a heading <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md036>
MD036: true
# Spaces inside emphasis markers <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md037>
MD037: true
# Spaces inside code span elements <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md038>
MD038: true
# Spaces inside link text <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md039>
MD039: true
# Fenced code blocks should have a language specified <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md040>
MD040: true
# First line in file should be a top level heading <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md041>
MD041: true
# No empty links <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md042>
MD042: true
# Required heading structure <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md043>
MD043: false
# Proper names should have the correct capitalization <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md044>
MD044:
names:
- docker-elk
- Elasticsearch
- Logstash
- Kibana
- Docker
- Compose
- macOS
code_blocks: false
# Images should have alternate text (alt text) <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md045>
MD045: true
# Code block style <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md046>
MD046:
style: fenced
# Files should end with a single newline character <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md047>
MD047: true
# Code fence style <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md048>
MD048:
style: 'backtick'
# Custom rules:
CHANGELOG-RULE-001: true
CHANGELOG-RULE-002: true
CHANGELOG-RULE-003: true
CHANGELOG-RULE-004: true
{
"ignorePatterns": [
{ "pattern": "^http:\/\/localhost:" }
]
}
#!/usr/bin/expect -f
# List of expected users with dummy password
set user "(elastic|apm_system|kibana_system|logstash_system|beats_system|remote_monitoring_user)"
set password "testpasswd"
# Find elasticsearch container id
set MODE [lindex $argv 0]
if { [string match "swarm" $MODE] } {
set cid [exec docker ps -q -f label=com.docker.swarm.service.name=elk_elasticsearch]
} else {
set cid [exec docker ps -q -f label=com.docker.compose.service=elasticsearch]
}
set cmd "docker exec -it $cid bin/elasticsearch-setup-passwords interactive -s -b -u http://localhost:9200"
spawn {*}$cmd
expect {
-re "(E|Ree)nter password for \\\[$user\\\]: " {
send "$password\r"
exp_continue
}
eof
}
lassign [wait] pid spawnid os_error_flag value
exit $value
#!/usr/bin/env bash
# Log a message.
function log {
echo -e "\n[+] $1\n"
}
# Log an error.
function err {
echo -e "\n[x] $1\n"
}
# Return the ID of the container running the given service.
function container_id {
local svc=$1
local label
if [[ "${MODE:-}" == "swarm" ]]; then
label="com.docker.swarm.service.name=elk_${svc}"
else
label="com.docker.compose.service=${svc}"
fi
local cid
# retry for max 60s (30*2s)
for _ in $(seq 1 30); do
cid="$(docker container ls -aq -f label="$label")"
if [ -n "$cid" ]; then
break
fi
echo -n '.' >&2
sleep 2
done
echo -e '\n' >&2
if [ -z "${cid:-}" ]; then
err "Timed out waiting for creation of container with label ${label}"
return 1
fi
echo "$cid"
}
# Return the IP address at which a service can be reached.
# In Compose mode, returns the container's IP.
# In Swarm mode, returns the IP of the node to ensure traffic enters the routing mesh (ingress).
function service_ip {
local svc=$1
local ip
if [[ "${MODE:-}" == "swarm" ]]; then
#ingress_net="$(docker network inspect ingress --format '{{ .Id }}')"
#ip="$(docker service inspect elk_"$svc" --format "{{ range .Endpoint.VirtualIPs }}{{ if eq .NetworkID \"${ingress_net}\" }}{{ .Addr }}{{ end }}{{ end }}" | cut -d/ -f1)"
node="$(docker node ls --format '{{ .ID }}')"
ip="$(docker node inspect "$node" --format '{{ .Status.Addr }}')"
if [ -z "${ip:-}" ]; then
err "Node ${node} has no IP address"
return 1
fi
echo "$ip"
return
fi
local cid
cid="$(container_id "$svc")"
ip="$(docker container inspect "$cid" --format '{{ (index .NetworkSettings.Networks "docker-elk_elk").IPAddress }}')"
if [ -z "${ip:-}" ]; then
err "Container ${cid} has no IP address"
return 1
fi
echo "$ip"
}
# Poll the given service at the given port:/path until it responds with HTTP code 200.
function poll_ready {
local cid=$1
local url=$2
local -a args=( '-s' '-D-' '-m3' '-w' '%{http_code}' "$url" )
if [ "$#" -ge 3 ]; then
args+=( ${@:3} )
fi
echo "curl arguments: ${args[*]}"
local -i result=1
local output
# retry for max 180s (36*5s)
for _ in $(seq 1 36); do
if [[ $(docker container inspect "$cid" --format '{{ .State.Status}}') == 'exited' ]]; then
err "Container exited ($(docker container inspect "$cid" --format '{{ .Name }}'))"
return 1
fi
output="$(curl "${args[@]}" || true)"
if [ "${output: -3}" -eq 200 ]; then
result=0
break
fi
echo -n 'x' >&2
sleep 5
done
echo -e '\n' >&2
echo -e "\n${output::-3}"
return $result
}
#!/usr/bin/env bash
set -eu
set -o pipefail
source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh"
cid="$(container_id apm-server)"
ip="$(service_ip apm-server)"
log 'Waiting for readiness of APM Server'
poll_ready "$cid" "http://${ip}:8200/"
#!/usr/bin/env bash
set -eu
set -o pipefail
source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh"
cid_es="$(container_id elasticsearch)"
cid_ls="$(container_id logstash)"
cid_kb="$(container_id kibana)"
ip_es="$(service_ip elasticsearch)"
ip_ls="$(service_ip logstash)"
ip_kb="$(service_ip kibana)"
log 'Waiting for readiness of Elasticsearch'
poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd'
log 'Waiting for readiness of Logstash'
poll_ready "$cid_ls" "http://${ip_ls}:9600/_node/pipelines/main?pretty"
log 'Waiting for readiness of Kibana'
poll_ready "$cid_kb" "http://${ip_kb}:5601/api/status" -u 'kibana_system:testpasswd'
log 'Creating Logstash index pattern in Kibana'
source .env
curl -X POST -D- "http://${ip_kb}:5601/api/saved_objects/index-pattern" \
-s -w '\n' \
-H 'Content-Type: application/json' \
-H "kbn-version: ${ELK_VERSION}" \
-u elastic:testpasswd \
-d '{"attributes":{"title":"logstash-*","timeFieldName":"@timestamp"}}'
log 'Searching index pattern via Kibana API'
response="$(curl "http://${ip_kb}:5601/api/saved_objects/_find?type=index-pattern" -s -u elastic:testpasswd)"
echo "$response"
count="$(jq -rn --argjson data "${response}" '$data.total')"
if [[ $count -ne 1 ]]; then
echo "Expected 1 index pattern, got ${count}"
exit 1
fi
log 'Sending message to Logstash TCP input'
echo 'dockerelk' | nc -q0 "$ip_ls" 5000
sleep 1
curl -X POST "http://${ip_es}:9200/_refresh" -u elastic:testpasswd \
-s -w '\n'
log 'Searching message in Elasticsearch'
response="$(curl "http://${ip_es}:9200/_count?q=message:dockerelk&pretty" -s -u elastic:testpasswd)"
echo "$response"
count="$(jq -rn --argjson data "${response}" '$data.count')"
if [[ $count -ne 1 ]]; then
echo "Expected 1 document, got ${count}"
exit 1
fi
#!/usr/bin/env bash
set -eu
set -o pipefail
source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh"
cid_es="$(container_id elasticsearch)"
cid_en="$(container_id enterprise-search)"
ip_es="$(service_ip elasticsearch)"
ip_en="$(service_ip enterprise-search)"
log 'Waiting for readiness of Elasticsearch'
poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd'
log 'Waiting for readiness of Enterprise Search'
poll_ready "$cid_en" "http://${ip_en}:3002/api/ent/v1/internal/health" -u 'elastic:testpasswd'
log 'Retrieving private key from Elasticsearch'
response="$(curl "http://${ip_es}:9200/.ent-search-actastic-app_search_api_tokens_v2/_search?q=name:private-key" -s -u elastic:testpasswd)"
hits="$(jq -rn --argjson data "${response}" '$data.hits.hits')"
echo "$hits"
count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')"
if [[ $count -ne 1 ]]; then
echo "Private key not found. Expected 1 result, got ${count}"
exit 1
fi
key="$(jq -rn --argjson data "${hits}" '$data[0]._source.authentication_token')"
log 'Creating App Search engine'
response="$(curl "http://${ip_en}:3002/api/as/v1/engines" -s -d '{"name": "dockerelk"}' -H "Authorization: Bearer ${key}")"
echo "$response"
name="$(jq -rn --argjson data "${response}" '$data.name')"
if [[ $name != 'dockerelk' ]]; then
echo 'Failed to create engine'
exit 1
fi
The MIT License (MIT)
Copyright (c) 2015 Anthony Lapenna
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
差异被折叠。
version: '3.2'
services:
elasticsearch:
build:
context: elasticsearch/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- type: bind
source: ./elasticsearch/config/elasticsearch.yml
target: /usr/share/elasticsearch/config/elasticsearch.yml
read_only: true
- type: volume
source: elasticsearch
target: /usr/share/elasticsearch/data
ports:
- "9209:9200"
- "9300:9300"
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
ELASTIC_PASSWORD: hudongtang2020
# Use single node discovery in order to disable production mode and avoid bootstrap checks.
# see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
discovery.type: single-node
networks:
- elk
logstash:
build:
context: logstash/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- type: bind
source: ./logstash/config/logstash.yml
target: /usr/share/logstash/config/logstash.yml
read_only: true
- type: bind
source: ./logstash/pipeline
target: /usr/share/logstash/pipeline
read_only: true
ports:
- "5044:5044"
- "5000:5000/tcp"
- "5000:5000/udp"
- "9600:9600"
environment:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
networks:
- elk
depends_on:
- elasticsearch
kibana:
build:
context: kibana/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- type: bind
source: ./kibana/config/kibana.yml
target: /usr/share/kibana/config/kibana.yml
read_only: true
ports:
- "5601:5601"
networks:
- elk
depends_on:
- elasticsearch
networks:
elk:
driver: bridge
volumes:
elasticsearch:
version: '3.3'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.9.2
ports:
- "9200:9200"
- "9300:9300"
configs:
- source: elastic_config
target: /usr/share/elasticsearch/config/elasticsearch.yml
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
ELASTIC_PASSWORD: changeme
# Use single node discovery in order to disable production mode and avoid bootstrap checks.
# see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
discovery.type: single-node
# Force publishing on the 'elk' overlay.
network.publish_host: _eth0_
networks:
- elk
deploy:
mode: replicated
replicas: 1
logstash:
image: docker.elastic.co/logstash/logstash:7.9.2
ports:
- "5044:5044"
- "5000:5000"
- "9600:9600"
configs:
- source: logstash_config
target: /usr/share/logstash/config/logstash.yml
- source: logstash_pipeline
target: /usr/share/logstash/pipeline/logstash.conf
environment:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
networks:
- elk
deploy:
mode: replicated
replicas: 1
kibana:
image: docker.elastic.co/kibana/kibana:7.9.2
ports:
- "5601:5601"
configs:
- source: kibana_config
target: /usr/share/kibana/config/kibana.yml
networks:
- elk
deploy:
mode: replicated
replicas: 1
configs:
elastic_config:
file: ./elasticsearch/config/elasticsearch.yml
logstash_config:
file: ./logstash/config/logstash.yml
logstash_pipeline:
file: ./logstash/pipeline/logstash.conf
kibana_config:
file: ./kibana/config/kibana.yml
networks:
elk:
driver: overlay
ARG ELK_VERSION
# https://www.docker.elastic.co/
FROM docker.elastic.co/elasticsearch/elasticsearch:${ELK_VERSION}
# Add your elasticsearch plugins setup here
# Example: RUN elasticsearch-plugin install analysis-icu
---
## Default Elasticsearch configuration from Elasticsearch base image.
## https://github.com/elastic/elasticsearch/blob/master/distribution/docker/src/docker/config/elasticsearch.yml
#
cluster.name: "docker-cluster"
network.host: 0.0.0.0
## X-Pack settings
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-xpack.html
#
xpack.license.self_generated.type: trial
xpack.security.enabled: false
xpack.monitoring.collection.enabled: true
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization,X-Requested-With,Content-Length,Content-Type
# Extensions
Third-party extensions that enable extra integrations with the Elastic stack.
ARG ELK_VERSION
FROM docker.elastic.co/apm/apm-server:${ELK_VERSION}
# APM Server extension
The APM Server receives data from APM agents and transforms them into Elasticsearch documents that can be visualised in
Kibana.
## Usage
To include APM Server in the stack, run Docker Compose from the root of the repository with an additional command line
argument referencing the `apm-server-compose.yml` file:
```console
$ docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml up
```
Meanwhile, you can navigate to the **APM** application in Kibana and follow the setup instructions to get started.
## Connecting an agent to APM Server
The most basic configuration to send traces to APM server is to specify the `SERVICE_NAME` and `SERVICE_URL`. Here is an
example Python Flask configuration:
```python
import elasticapm
from elasticapm.contrib.flask import ElasticAPM
from flask import Flask
app = Flask(__name__)
app.config['ELASTIC_APM'] = {
# Set required service name. Allowed characters:
# a-z, A-Z, 0-9, -, _, and space
'SERVICE_NAME': 'PYTHON_FLASK_TEST_APP',
# Set custom APM Server URL (default: http://localhost:8200)
'SERVER_URL': 'http://apm-server:8200',
'DEBUG': True,
}
```
Configuration settings for each supported language are available in the APM documentation: [APM Agents][apm-agents].
## Checking connectivity and importing default APM dashboards
1. On the Kibana home page, click `Add APM` under the _Observability_ panel.
1. Click `Check APM Server status` to confirm the server is up and running.
1. Click `Check agent status` to verify your agent has registered properly.
1. Click `Load Kibana objects` to create an index pattern for APM.
1. Click `Launch APM` to be taken to the APM dashboard.
## See also
[Running APM Server on Docker][apm-docker]
[apm-agents]: https://www.elastic.co/guide/en/apm/get-started/current/components.html#_apm_agents
[apm-docker]: https://www.elastic.co/guide/en/apm/server/current/running-on-docker.html
version: '3.2'
services:
apm-server:
build:
context: extensions/apm-server/
args:
ELK_VERSION: $ELK_VERSION
command:
# Disable strict permission checking on 'apm-server.yml' configuration file
# https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html
- --strict.perms=false
volumes:
- type: bind
source: ./extensions/apm-server/config/apm-server.yml
target: /usr/share/apm-server/apm-server.yml
read_only: true
ports:
- '8200:8200'
networks:
- elk
depends_on:
- elasticsearch
apm-server:
host: 0.0.0.0:8200
output:
elasticsearch:
hosts: ['http://elasticsearch:9200']
username: elastic
password: changeme
FROM bitnami/elasticsearch-curator:5.8.1
USER root
RUN install_packages cron && \
echo \
'* * * * *' \
root \
LC_ALL=C.UTF-8 LANG=C.UTF-8 \
/opt/bitnami/python/bin/curator \
--config=/usr/share/curator/config/curator.yml \
/usr/share/curator/config/delete_log_files_curator.yml \
'>/proc/1/fd/1' '2>/proc/1/fd/2' \
>>/etc/crontab
ENTRYPOINT ["cron"]
CMD ["-f", "-L8"]
# Curator
Elasticsearch Curator helps you curate or manage your indices.
## Usage
If you want to include the Curator extension, run Docker Compose from the root of the repository with an additional
command line argument referencing the `curator-compose.yml` file:
```bash
$ docker-compose -f docker-compose.yml -f extensions/curator/curator-compose.yml up
```
This sample setup demonstrates how to run `curator` every minute using `cron`.
All configuration files are available in the `config/` directory.
## Documentation
[Curator Reference](https://www.elastic.co/guide/en/elasticsearch/client/curator/current/index.html)
# Curator configuration
# https://www.elastic.co/guide/en/elasticsearch/client/curator/current/configfile.html
client:
hosts:
- elasticsearch
port: 9200
http_auth: elastic:changeme
logging:
loglevel: INFO
logformat: default
actions:
1:
action: delete_indices
description: >-
Delete indices. Find which to delete by first limiting the list to
logstash- prefixed indices. Then further filter those to prevent deletion
of anything less than the number of days specified by unit_count.
Ignore the error if the filter does not result in an actionable list of
indices (ignore_empty_list) and exit cleanly.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: logstash-
- filtertype: age
source: creation_date
direction: older
unit: days
unit_count: 2
version: '3.2'
services:
curator:
build:
context: extensions/curator/
init: true
volumes:
- type: bind
source: ./extensions/curator/config/curator.yml
target: /usr/share/curator/config/curator.yml
read_only: true
- type: bind
source: ./extensions/curator/config/delete_log_files_curator.yml
target: /usr/share/curator/config/delete_log_files_curator.yml
read_only: true
networks:
- elk
depends_on:
- elasticsearch
ARG ELK_VERSION
# https://www.docker.elastic.co/
FROM docker.elastic.co/enterprise-search/enterprise-search:${ELK_VERSION}
# Enterprise Search extension
Elastic Enterprise Search is a suite of products for search applications backed by the Elastic Stack.
## Requirements
* 2 GB of free RAM, on top of the resources required by the other stack components and extensions.
Enterprise Search exposes the TCP port `3002` for its Web UI and API.
## Usage
### Generate an encryption key
Enterprise Search requires one or more [encryption keys][enterprisesearch-encryption] to be configured before the
initial startup. Failing to do so prevents the server from starting.
Encryption keys can contain any series of characters. Elastic recommends using 256-bit keys for optimal security.
Those encryption keys must be added manually to the [`config/enterprise-search.yml`][config-enterprisesearch] file. By
default, the list of encryption keys is empty and must be populated using one of the following formats:
```yaml
secret_management.encryption_keys:
- my_first_encryption_key
- my_second_encryption_key
- ...
```
```yaml
secret_management.encryption_keys: [my_first_encryption_key, my_second_encryption_key, ...]
```
> :information_source: To generate a strong encryption key, for example using the AES-256 cipher, you can use the
> OpenSSL utility or any other online/offline tool of your choice:
>
> ```console
> $ openssl enc -aes-256 -P
>
> enter aes-256-cbc encryption password: <a strong password>
> Verifying - enter aes-256-cbc encryption password: <repeat your strong password>
> ...
>
> key=<generated AES key>
> ```
### Enable Elasticsearch's API key service
Enterprise Search requires Elasticsearch's built-in [API key service][es-security] to be enabled in order to start.
Unless Elasticsearch is configured to enable TLS on the HTTP interface (disabled by default), this service is disabled
by default.
To enable it, modify the Elasticsearch configuration file in [`elasticsearch/config/elasticsearch.yml`][config-es] and
add the following setting:
```yaml
xpack.security.authc.api_key.enabled: true
```
### Start the server
To include Enterprise Search in the stack, run Docker Compose from the root of the repository with an additional command
line argument referencing the `enterprise-search-compose.yml` file:
```console
$ docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml up
```
Allow a few minutes for the stack to start, then open your web browser at the address <http://localhost:3002> to see the
Enterprise Search home page.
Enterprise Search is configured on first boot with the following default credentials:
* user: *enterprise_search*
* password: *changeme*
## Security
The Enterprise Search password is defined inside the Compose file via the `ENT_SEARCH_DEFAULT_PASSWORD` environment
variable. We highly recommend choosing a more secure password than the default one for security reasons.
To do so, change the value `ENT_SEARCH_DEFAULT_PASSWORD` environment variable inside the Compose file **before the first
boot**:
```yaml
enterprise-search:
environment:
ENT_SEARCH_DEFAULT_PASSWORD: {{some strong password}}
```
> :warning: The default Enterprise Search password can only be set during the initial boot. Once the password is
> persisted in Elasticsearch, it can only be changed via the Elasticsearch API.
For more information, please refer to [User Management and Security][enterprisesearch-security].
## Configuring Enterprise Search
The Enterprise Search configuration is stored in [`config/enterprise-search.yml`][config-enterprisesearch]. You can
modify this file using the [Default Enterprise Search configuration][enterprisesearch-config] as a reference.
You can also specify the options you want to override by setting environment variables inside the Compose file:
```yaml
enterprise-search:
environment:
ent_search.auth.source: standard
worker.threads: '6'
```
Any change to the Enterprise Search configuration requires a restart of the Enterprise Search container:
```console
$ docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml restart enterprise-search
```
Please refer to the following documentation page for more details about how to configure Enterprise Search inside a
Docker container: [Running Enterprise Search Using Docker][enterprisesearch-docker].
## See also
[Enterprise Search documentation][enterprisesearch-docs]
[config-enterprisesearch]: ./config/enterprise-search.yml
[enterprisesearch-encryption]: https://www.elastic.co/guide/en/enterprise-search/current/encryption-keys.html
[enterprisesearch-security]: https://www.elastic.co/guide/en/workplace-search/current/workplace-search-security.html
[enterprisesearch-config]: https://www.elastic.co/guide/en/enterprise-search/current/configuration.html
[enterprisesearch-docker]: https://www.elastic.co/guide/en/enterprise-search/current/docker.html
[enterprisesearch-docs]: https://www.elastic.co/guide/en/enterprise-search/current/index.html
[es-security]: https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#api-key-service-settings
[config-es]: ../../elasticsearch/config/elasticsearch.yml
---
## Enterprise Search core configuration
## https://www.elastic.co/guide/en/enterprise-search/current/configuration.html
#
## --------------------- REQUIRED ---------------------
# Encryption keys to protect application secrets.
secret_management.encryption_keys:
# add encryption keys below
#- add encryption keys here
## ----------------------------------------------------
# IP address Enterprise Search listens on
ent_search.listen_host: 0.0.0.0
# URL at which users reach Enterprise Search
ent_search.external_url: http://localhost:3002
# Elasticsearch URL and credentials
elasticsearch.host: http://elasticsearch:9200
elasticsearch.username: elastic
elasticsearch.password: changeme
# Allow Enterprise Search to modify Elasticsearch settings. Used to enable auto-creation of Elasticsearch indexes.
allow_es_settings_modification: true
version: '3.2'
services:
enterprise-search:
build:
context: extensions/enterprise-search/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- type: bind
source: ./extensions/enterprise-search/config/enterprise-search.yml
target: /usr/share/enterprise-search/config/enterprise-search.yml
read_only: true
environment:
JAVA_OPTS: -Xmx2g -Xms2g
ENT_SEARCH_DEFAULT_PASSWORD: changeme
ports:
- '3002:3002'
networks:
- elk
depends_on:
- elasticsearch
# uses ONBUILD instructions described here:
# https://github.com/gliderlabs/logspout/tree/master/custom
FROM gliderlabs/logspout:master
ENV SYSLOG_FORMAT rfc3164
# Logspout extension
Logspout collects all Docker logs using the Docker logs API, and forwards them to Logstash without any additional
configuration.
## Usage
If you want to include the Logspout extension, run Docker Compose from the root of the repository with an additional
command line argument referencing the `logspout-compose.yml` file:
```bash
$ docker-compose -f docker-compose.yml -f extensions/logspout/logspout-compose.yml up
```
In your Logstash pipeline configuration, enable the `udp` input and set the input codec to `json`:
```logstash
input {
udp {
port => 5000
codec => json
}
}
```
## Documentation
<https://github.com/looplab/logspout-logstash>
#!/bin/sh
# unmodified from:
# https://github.com/gliderlabs/logspout/blob/67ee3831cbd0594361bb3381380c65bdbeb3c20f/custom/build.sh
set -e
apk add --update go git mercurial build-base
mkdir -p /go/src/github.com/gliderlabs
cp -r /src /go/src/github.com/gliderlabs/logspout
cd /go/src/github.com/gliderlabs/logspout
export GOPATH=/go
go get
go build -ldflags "-X main.Version=$1" -o /bin/logspout
apk del go git mercurial build-base
rm -rf /go /var/cache/apk/* /root/.glide
# backwards compatibility
ln -fs /tmp/docker.sock /var/run/docker.sock
version: '3.2'
services:
logspout:
build:
context: extensions/logspout
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
ROUTE_URIS: logstash://logstash:5000
LOGSTASH_TAGS: docker-elk
networks:
- elk
depends_on:
- logstash
restart: on-failure
package main
// installs the Logstash adapter for Logspout, and required dependencies
// https://github.com/looplab/logspout-logstash
import (
_ "github.com/looplab/logspout-logstash"
_ "github.com/gliderlabs/logspout/transports/udp"
_ "github.com/gliderlabs/logspout/transports/tcp"
)
ARG ELK_VERSION
# https://www.docker.elastic.co/
FROM docker.elastic.co/kibana/kibana:${ELK_VERSION}
# Add your kibana plugins setup here
# Example: RUN kibana-plugin install <name|url>
---
## Default Kibana configuration from Kibana base image.
## https://github.com/elastic/kibana/blob/master/src/dev/build/tasks/os_packages/docker_generator/templates/kibana_yml.template.js
#
server.name: kibana
server.host: 0.0.0.0
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
## X-Pack security credentials
#
elasticsearch.username: elastic
elasticsearch.password: hudongtang2020
ARG ELK_VERSION
# https://www.docker.elastic.co/
FROM docker.elastic.co/logstash/logstash:${ELK_VERSION}
# Add your logstash plugins setup here
# Example: RUN logstash-plugin install logstash-filter-json
---
## Default Logstash configuration from Logstash base image.
## https://github.com/elastic/logstash/blob/master/docker/data/logstash/config/logstash-full.yml
#
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ]
## X-Pack security credentials
#
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.username: elastic
xpack.monitoring.elasticsearch.password: hudongtang2020
input {
beats {
port => 5000
}
}
filter {
ruby {
code => "event.set('timestamp', event.get('@timestamp').time.localtime + 8*60*60)"
}
ruby {
code => "event.set('@timestamp',event.get('timestamp'))"
}
mutate{
remove_field=>["@version"]
remove_field=>["log"]
remove_field=>["@version"]
remove_field=>["input"]
remove_field=>["fields"]
remove_field=>["tags"]
remove_field=>["host"]
remove_field=>["agent"]
remove_field=>["ecs"]
remove_field=>["timestamp"]
}
}
## Add your filters / logstash plugins configuration here
output {
if [json][http_activity_id]!="" {
elasticsearch {
hosts => "elasticsearch:9200"
user => "elastic"
password => "hudongtang2020"
index=>"visit-log-%{+YYYYMM}" #设置索引名字
}
}
}
\ No newline at end of file
# Ignore OS artifacts
**/.DS_Store
ELK_VERSION=7.5.1
# Declare files that will always have LF line endings on checkout.
*.sh text eol=lf
\ No newline at end of file
<!--
Because we focus purely on integrating the Elastic stack using Docker and not on the individual stack components themselves, we kindly ask our users to submit questions about Elastic products in the Elastic Discussion Forums @ https://discuss.elastic.co/.
General questions regarding this project can be asked in the docker-elk Gitter chat room @ https://gitter.im/deviantony/docker-elk.
-->
### Problem description
<!-- Be as descriptive as possible regarding the encountered issue versus the expected outcome. -->
### Extra information
<!-- Please include the following information in your issue report. -->
#### Stack configuration
<!-- Detail all performed configuration changes, including to Dockerfiles. -->
#### Docker setup
```
<!-- Replace this comment with the full output of the `docker version` command. -->
```
```
<!-- Replace this comment with the full output of the `docker-compose version` command. -->
```
#### Docker logs
```
<!-- Replace this comment with the full output of the `docker-compose logs` command. -->
```
name: CI
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
test-compose:
name: 'Test suite: Compose'
# List of supported runners:
# https://docs.github.com/en/free-pro-team@latest/actions/reference/specifications-for-github-hosted-runners#supported-runners-and-hardware-resources
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
#####################################################
# #
# Install all dependencies required by test suites. #
# #
#####################################################
- name: Prepare environment
run: |
# Install Linux packages
#
# List of packages pre-installed in the runner:
# https://docs.github.com/en/free-pro-team@latest/actions/reference/specifications-for-github-hosted-runners#supported-software
sudo apt install -y expect
# Pre-build container images
docker-compose build
########################################################
# #
# Ensure §"Initial setup" of the README remains valid. #
# #
########################################################
- name: Set password of every built-in user to 'testpasswd'
run: |
# Change password of 'elastic' user from 'changeme' to 'testpasswd' in config files
sed -i -e 's/\(xpack.monitoring.elasticsearch.username:\) elastic/\1 logstash_system/g' -e 's/\(xpack.monitoring.elasticsearch.password:\) changeme/\1 testpasswd/g' logstash/config/logstash.yml
sed -i 's/\(password =>\) "changeme"/\1 "testpasswd"/g' logstash/pipeline/logstash.conf
sed -i -e 's/\(elasticsearch.username:\) elastic/\1 kibana_system/g' -e 's/\(elasticsearch.password:\) changeme/\1 testpasswd/g' kibana/config/kibana.yml
sed -i -e 's/\(elasticsearch.password:\) changeme/\1 testpasswd/g' -e 's/\(secret_management.encryption_keys:\)/\1 [test-encrypt]/g' extensions/enterprise-search/config/enterprise-search.yml
sed -i 's/\(password:\) changeme/\1 testpasswd/g' extensions/apm-server/config/apm-server.yml
# Run Elasticsearch and wait for its availability
docker-compose up -d elasticsearch
source .github/workflows/scripts/lib/testing.sh
poll_ready "$(container_id elasticsearch)" "http://$(service_ip elasticsearch):9200/" -u 'elastic:changeme'
# Set passwords
.github/workflows/scripts/elasticsearch-setup-passwords.exp
##########################################################
# #
# Test core components: Elasticsearch, Logstash, Kibana. #
# #
##########################################################
- name: Run the stack
run: docker-compose up -d
- name: Execute core test suite
run: .github/workflows/scripts/run-tests-core.sh
- name: 'debug: Display state and logs (core)'
# https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions#jobsjob_idif
# https://docs.github.com/en/free-pro-team@latest/actions/reference/context-and-expression-syntax-for-github-actions#job-status-check-functions
if: always()
run: |
docker-compose ps
docker-compose logs elasticsearch
docker-compose logs logstash
docker-compose logs kibana
# next steps don't need Logstash
docker-compose stop logstash
##############################
# #
# Test supported extensions. #
# #
##############################
#
# Enterprise Search
#
- name: Execute Enterprise Search test suite
run: |
# Set mandatory Elasticsearch settings
sed -i '$ a xpack.security.authc.api_key.enabled: true' elasticsearch/config/elasticsearch.yml
# Restart Elasticsearch for changes to take effect
docker-compose restart elasticsearch
# Run Enterprise Search and execute tests
docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml up -d enterprise-search
.github/workflows/scripts/run-tests-enterprise-search.sh
# Revert changes to Elasticsearch configuration
sed -i '/xpack.security.authc.api_key.enabled: true/d' elasticsearch/config/elasticsearch.yml
docker-compose restart elasticsearch
- name: 'debug: Display state and logs (Enterprise Search)'
if: always()
run: |
docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml ps
docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml logs enterprise-search
# next steps don't need Enterprise Search
docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml stop enterprise-search
#
# APM Server
#
- name: Execute APM Server test suite
run: |
docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml up -d apm-server
.github/workflows/scripts/run-tests-apm-server.sh
- name: 'debug: Display state and logs (APM Server)'
if: always()
run: |
docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml ps
docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml logs apm-server
# next steps don't need APM Server
docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml stop apm-server
##############
# #
# Tear down. #
# #
##############
- name: Terminate all components
if: always()
run: >-
docker-compose
-f docker-compose.yml
-f extensions/enterprise-search/enterprise-search-compose.yml
-f extensions/apm-server/apm-server-compose.yml
down -v
test-swarm:
name: 'Test suite: Swarm'
runs-on: ubuntu-latest
env:
MODE: swarm
steps:
- uses: actions/checkout@v2
#####################################################
# #
# Install all dependencies required by test suites. #
# #
#####################################################
- name: Prepare environment
run: |
# Install Linux packages
sudo apt install -y expect
# Enable Swarm mode
docker swarm init
########################################################
# #
# Ensure §"Initial setup" of the README remains valid. #
# #
########################################################
- name: Set password of every built-in user to 'testpasswd'
run: |
# Change password of 'elastic' user from 'changeme' to 'testpasswd' in config files
sed -i -e 's/\(xpack.monitoring.elasticsearch.username:\) elastic/\1 logstash_system/g' -e 's/\(xpack.monitoring.elasticsearch.password:\) changeme/\1 testpasswd/g' logstash/config/logstash.yml
sed -i 's/\(password =>\) "changeme"/\1 "testpasswd"/g' logstash/pipeline/logstash.conf
sed -i -e 's/\(elasticsearch.username:\) elastic/\1 kibana_system/g' -e 's/\(elasticsearch.password:\) changeme/\1 testpasswd/g' kibana/config/kibana.yml
# Run Elasticsearch and wait for its availability
docker stack deploy -c ./docker-stack.yml elk
docker service scale elk_logstash=0 elk_kibana=0
source .github/workflows/scripts/lib/testing.sh
poll_ready "$(container_id elasticsearch)" "http://$(service_ip elasticsearch):9200/" -u 'elastic:changeme'
# Set passwords
.github/workflows/scripts/elasticsearch-setup-passwords.exp swarm
##########################################################
# #
# Test core components: Elasticsearch, Logstash, Kibana. #
# #
##########################################################
- name: Run the stack
run: docker service scale elk_logstash=1 elk_kibana=1
- name: Execute core test suite
run: .github/workflows/scripts/run-tests-core.sh swarm
- name: 'debug: Display state and logs (core)'
if: always()
run: |
docker stack services elk
docker service logs elk_elasticsearch
docker service logs elk_kibana
docker service logs elk_logstash
##############
# #
# Tear down. #
# #
##############
- name: Terminate all components
if: always()
run: docker stack rm elk
name: Documentation
on:
schedule:
- cron: '0 0 * * 0' # At 00:00 every Sunday
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
markdown-check:
name: Check Markdown
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Check links
uses: gaurav-nelson/github-action-markdown-link-check@v1
with:
config-file: .github/workflows/mlc_config.json
- name: Lint
uses: avto-dev/markdown-lint@v1
with:
args: '**/*.md'
config: .github/workflows/lint/markdown.yaml
default: false # includes/excludes all rules by default
# Heading levels should only increment by one level at a time <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md001>
MD001: true
# Heading style <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md003>
MD003: true
# Unordered list style <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md004>
MD004: true
# Inconsistent indentation for list items at the same level <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md005>
MD005: true
# Consider starting bulleted lists at the beginning of the line <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md006>
MD006: true
# Unordered list indentation <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md007>
MD007: true
# Trailing spaces <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md009>
MD009: true
# Hard tabs <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md010>
MD010: true
# Reversed link syntax <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md011>
MD011: true
# Multiple consecutive blank lines <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md012>
MD012: true
# Line length <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md013>
MD013:
line_length: 120
code_blocks: false
# Dollar signs used before commands without showing output <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md014>
MD014: false
# No space after hash on atx style heading <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md018>
MD018: true
# Multiple spaces after hash on atx style heading <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md019>
MD019: true
# No space inside hashes on closed atx style heading <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md020>
MD020: true
# Multiple spaces inside hashes on closed atx style heading <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md021>
MD021: true
# Headings should be surrounded by blank lines <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md022>
MD022: true
# Headings must start at the beginning of the line <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md023>
MD023: true
# Multiple headings with the same content <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md024>
MD024:
allow_different_nesting: true
# Multiple top level headings in the same document <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md025>
MD025: true
# Trailing punctuation in heading <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md026>
MD026: true
# Multiple spaces after blockquote symbol <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md027>
MD027: true
# Blank line inside blockquote <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md028>
MD028: false
# Ordered list item prefix <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md029>
MD029:
style: 'one'
# Spaces after list markers <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md030>
MD030: true
# Fenced code blocks should be surrounded by blank lines <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md031>
MD031: true
# Lists should be surrounded by blank lines <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md032>
MD032: true
# Inline HTML <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md033>
MD033: true
# Bare URL used <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md034>
MD034: true
# Horizontal rule style <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md035>
MD035:
style: '---'
# Emphasis used instead of a heading <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md036>
MD036: true
# Spaces inside emphasis markers <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md037>
MD037: true
# Spaces inside code span elements <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md038>
MD038: true
# Spaces inside link text <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md039>
MD039: true
# Fenced code blocks should have a language specified <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md040>
MD040: true
# First line in file should be a top level heading <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md041>
MD041: true
# No empty links <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md042>
MD042: true
# Required heading structure <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md043>
MD043: false
# Proper names should have the correct capitalization <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md044>
MD044:
names:
- docker-elk
- Elasticsearch
- Logstash
- Kibana
- Docker
- Compose
- macOS
code_blocks: false
# Images should have alternate text (alt text) <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md045>
MD045: true
# Code block style <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md046>
MD046:
style: fenced
# Files should end with a single newline character <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md047>
MD047: true
# Code fence style <https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md048>
MD048:
style: 'backtick'
# Custom rules:
CHANGELOG-RULE-001: true
CHANGELOG-RULE-002: true
CHANGELOG-RULE-003: true
CHANGELOG-RULE-004: true
{
"ignorePatterns": [
{ "pattern": "^http:\/\/localhost:" }
]
}
#!/usr/bin/expect -f
# List of expected users with dummy password
set user "(elastic|apm_system|kibana_system|logstash_system|beats_system|remote_monitoring_user)"
set password "testpasswd"
# Find elasticsearch container id
set MODE [lindex $argv 0]
if { [string match "swarm" $MODE] } {
set cid [exec docker ps -q -f label=com.docker.swarm.service.name=elk_elasticsearch]
} else {
set cid [exec docker ps -q -f label=com.docker.compose.service=elasticsearch]
}
set cmd "docker exec -it $cid bin/elasticsearch-setup-passwords interactive -s -b -u http://localhost:9200"
spawn {*}$cmd
expect {
-re "(E|Ree)nter password for \\\[$user\\\]: " {
send "$password\r"
exp_continue
}
eof
}
lassign [wait] pid spawnid os_error_flag value
exit $value
#!/usr/bin/env bash
# Log a message.
function log {
echo -e "\n[+] $1\n"
}
# Log an error.
function err {
echo -e "\n[x] $1\n"
}
# Return the ID of the container running the given service.
function container_id {
local svc=$1
local label
if [[ "${MODE:-}" == "swarm" ]]; then
label="com.docker.swarm.service.name=elk_${svc}"
else
label="com.docker.compose.service=${svc}"
fi
local cid
# retry for max 60s (30*2s)
for _ in $(seq 1 30); do
cid="$(docker container ls -aq -f label="$label")"
if [ -n "$cid" ]; then
break
fi
echo -n '.' >&2
sleep 2
done
echo -e '\n' >&2
if [ -z "${cid:-}" ]; then
err "Timed out waiting for creation of container with label ${label}"
return 1
fi
echo "$cid"
}
# Return the IP address at which a service can be reached.
# In Compose mode, returns the container's IP.
# In Swarm mode, returns the IP of the node to ensure traffic enters the routing mesh (ingress).
function service_ip {
local svc=$1
local ip
if [[ "${MODE:-}" == "swarm" ]]; then
#ingress_net="$(docker network inspect ingress --format '{{ .Id }}')"
#ip="$(docker service inspect elk_"$svc" --format "{{ range .Endpoint.VirtualIPs }}{{ if eq .NetworkID \"${ingress_net}\" }}{{ .Addr }}{{ end }}{{ end }}" | cut -d/ -f1)"
node="$(docker node ls --format '{{ .ID }}')"
ip="$(docker node inspect "$node" --format '{{ .Status.Addr }}')"
if [ -z "${ip:-}" ]; then
err "Node ${node} has no IP address"
return 1
fi
echo "$ip"
return
fi
local cid
cid="$(container_id "$svc")"
ip="$(docker container inspect "$cid" --format '{{ (index .NetworkSettings.Networks "docker-elk_elk").IPAddress }}')"
if [ -z "${ip:-}" ]; then
err "Container ${cid} has no IP address"
return 1
fi
echo "$ip"
}
# Poll the given service at the given port:/path until it responds with HTTP code 200.
function poll_ready {
local cid=$1
local url=$2
local -a args=( '-s' '-D-' '-m3' '-w' '%{http_code}' "$url" )
if [ "$#" -ge 3 ]; then
args+=( ${@:3} )
fi
echo "curl arguments: ${args[*]}"
local -i result=1
local output
# retry for max 180s (36*5s)
for _ in $(seq 1 36); do
if [[ $(docker container inspect "$cid" --format '{{ .State.Status}}') == 'exited' ]]; then
err "Container exited ($(docker container inspect "$cid" --format '{{ .Name }}'))"
return 1
fi
output="$(curl "${args[@]}" || true)"
if [ "${output: -3}" -eq 200 ]; then
result=0
break
fi
echo -n 'x' >&2
sleep 5
done
echo -e '\n' >&2
echo -e "\n${output::-3}"
return $result
}
#!/usr/bin/env bash
set -eu
set -o pipefail
source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh"
cid="$(container_id apm-server)"
ip="$(service_ip apm-server)"
log 'Waiting for readiness of APM Server'
poll_ready "$cid" "http://${ip}:8200/"
#!/usr/bin/env bash
set -eu
set -o pipefail
source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh"
cid_es="$(container_id elasticsearch)"
cid_ls="$(container_id logstash)"
cid_kb="$(container_id kibana)"
ip_es="$(service_ip elasticsearch)"
ip_ls="$(service_ip logstash)"
ip_kb="$(service_ip kibana)"
log 'Waiting for readiness of Elasticsearch'
poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd'
log 'Waiting for readiness of Logstash'
poll_ready "$cid_ls" "http://${ip_ls}:9600/_node/pipelines/main?pretty"
log 'Waiting for readiness of Kibana'
poll_ready "$cid_kb" "http://${ip_kb}:5601/api/status" -u 'kibana_system:testpasswd'
log 'Creating Logstash index pattern in Kibana'
source .env
curl -X POST -D- "http://${ip_kb}:5601/api/saved_objects/index-pattern" \
-s -w '\n' \
-H 'Content-Type: application/json' \
-H "kbn-version: ${ELK_VERSION}" \
-u elastic:testpasswd \
-d '{"attributes":{"title":"logstash-*","timeFieldName":"@timestamp"}}'
log 'Searching index pattern via Kibana API'
response="$(curl "http://${ip_kb}:5601/api/saved_objects/_find?type=index-pattern" -s -u elastic:testpasswd)"
echo "$response"
count="$(jq -rn --argjson data "${response}" '$data.total')"
if [[ $count -ne 1 ]]; then
echo "Expected 1 index pattern, got ${count}"
exit 1
fi
log 'Sending message to Logstash TCP input'
echo 'dockerelk' | nc -q0 "$ip_ls" 5000
sleep 1
curl -X POST "http://${ip_es}:9200/_refresh" -u elastic:testpasswd \
-s -w '\n'
log 'Searching message in Elasticsearch'
response="$(curl "http://${ip_es}:9200/_count?q=message:dockerelk&pretty" -s -u elastic:testpasswd)"
echo "$response"
count="$(jq -rn --argjson data "${response}" '$data.count')"
if [[ $count -ne 1 ]]; then
echo "Expected 1 document, got ${count}"
exit 1
fi
#!/usr/bin/env bash
set -eu
set -o pipefail
source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh"
cid_es="$(container_id elasticsearch)"
cid_en="$(container_id enterprise-search)"
ip_es="$(service_ip elasticsearch)"
ip_en="$(service_ip enterprise-search)"
log 'Waiting for readiness of Elasticsearch'
poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd'
log 'Waiting for readiness of Enterprise Search'
poll_ready "$cid_en" "http://${ip_en}:3002/api/ent/v1/internal/health" -u 'elastic:testpasswd'
log 'Retrieving private key from Elasticsearch'
response="$(curl "http://${ip_es}:9200/.ent-search-actastic-app_search_api_tokens_v2/_search?q=name:private-key" -s -u elastic:testpasswd)"
hits="$(jq -rn --argjson data "${response}" '$data.hits.hits')"
echo "$hits"
count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')"
if [[ $count -ne 1 ]]; then
echo "Private key not found. Expected 1 result, got ${count}"
exit 1
fi
key="$(jq -rn --argjson data "${hits}" '$data[0]._source.authentication_token')"
log 'Creating App Search engine'
response="$(curl "http://${ip_en}:3002/api/as/v1/engines" -s -d '{"name": "dockerelk"}' -H "Authorization: Bearer ${key}")"
echo "$response"
name="$(jq -rn --argjson data "${response}" '$data.name')"
if [[ $name != 'dockerelk' ]]; then
echo 'Failed to create engine'
exit 1
fi
The MIT License (MIT)
Copyright (c) 2015 Anthony Lapenna
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
差异被折叠。
version: '3.2'
services:
elasticsearch:
build:
context: elasticsearch/
args:
ELK_VERSION: 7.5.1
volumes:
- type: bind
source: ./elasticsearch/config/elasticsearch.yml
target: /usr/share/elasticsearch/config/elasticsearch.yml
read_only: true
- type: volume
source: elasticsearch
target: /usr/share/elasticsearch/data
ports:
- "9209:9200"
- "9300:9300"
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
ELASTIC_PASSWORD: hudongtang2020
# Use single node discovery in order to disable production mode and avoid bootstrap checks.
# see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
discovery.type: single-node
networks:
- elk
logstash:
build:
context: logstash/
args:
ELK_VERSION: 7.5.1
volumes:
- type: bind
source: ./logstash/config/logstash.yml
target: /usr/share/logstash/config/logstash.yml
read_only: true
- type: bind
source: ./logstash/pipeline
target: /usr/share/logstash/pipeline
read_only: true
ports:
- "5044:5044"
- "5000:5000/tcp"
- "5000:5000/udp"
- "9600:9600"
environment:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
networks:
- elk
depends_on:
- elasticsearch
kibana:
build:
context: kibana/
args:
ELK_VERSION: 7.5.1
volumes:
- type: bind
source: ./kibana/config/kibana.yml
target: /usr/share/kibana/config/kibana.yml
read_only: true
ports:
- "5601:5601"
networks:
- elk
depends_on:
- elasticsearch
networks:
elk:
driver: bridge
volumes:
elasticsearch:
version: '3.3'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.9.2
ports:
- "9200:9200"
- "9300:9300"
configs:
- source: elastic_config
target: /usr/share/elasticsearch/config/elasticsearch.yml
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
ELASTIC_PASSWORD: changeme
# Use single node discovery in order to disable production mode and avoid bootstrap checks.
# see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
discovery.type: single-node
# Force publishing on the 'elk' overlay.
network.publish_host: _eth0_
networks:
- elk
deploy:
mode: replicated
replicas: 1
logstash:
image: docker.elastic.co/logstash/logstash:7.9.2
ports:
- "5044:5044"
- "5000:5000"
- "9600:9600"
configs:
- source: logstash_config
target: /usr/share/logstash/config/logstash.yml
- source: logstash_pipeline
target: /usr/share/logstash/pipeline/logstash.conf
environment:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
networks:
- elk
deploy:
mode: replicated
replicas: 1
kibana:
image: docker.elastic.co/kibana/kibana:7.9.2
ports:
- "5601:5601"
configs:
- source: kibana_config
target: /usr/share/kibana/config/kibana.yml
networks:
- elk
deploy:
mode: replicated
replicas: 1
configs:
elastic_config:
file: ./elasticsearch/config/elasticsearch.yml
logstash_config:
file: ./logstash/config/logstash.yml
logstash_pipeline:
file: ./logstash/pipeline/logstash.conf
kibana_config:
file: ./kibana/config/kibana.yml
networks:
elk:
driver: overlay
ARG ELK_VERSION
# https://www.docker.elastic.co/
FROM docker.elastic.co/elasticsearch/elasticsearch:7.5.1
# Add your elasticsearch plugins setup here
# Example: RUN elasticsearch-plugin install analysis-icu
---
## Default Elasticsearch configuration from Elasticsearch base image.
## https://github.com/elastic/elasticsearch/blob/master/distribution/docker/src/docker/config/elasticsearch.yml
#
cluster.name: "docker-cluster"
network.host: 0.0.0.0
## X-Pack settings
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-xpack.html
#
xpack.license.self_generated.type: trial
xpack.security.enabled: false
xpack.monitoring.collection.enabled: true
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization,X-Requested-With,Content-Length,Content-Type
\ No newline at end of file
# Extensions
Third-party extensions that enable extra integrations with the Elastic stack.
ARG ELK_VERSION
FROM docker.elastic.co/apm/apm-server:${ELK_VERSION}
# APM Server extension
The APM Server receives data from APM agents and transforms them into Elasticsearch documents that can be visualised in
Kibana.
## Usage
To include APM Server in the stack, run Docker Compose from the root of the repository with an additional command line
argument referencing the `apm-server-compose.yml` file:
```console
$ docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml up
```
Meanwhile, you can navigate to the **APM** application in Kibana and follow the setup instructions to get started.
## Connecting an agent to APM Server
The most basic configuration to send traces to APM server is to specify the `SERVICE_NAME` and `SERVICE_URL`. Here is an
example Python Flask configuration:
```python
import elasticapm
from elasticapm.contrib.flask import ElasticAPM
from flask import Flask
app = Flask(__name__)
app.config['ELASTIC_APM'] = {
# Set required service name. Allowed characters:
# a-z, A-Z, 0-9, -, _, and space
'SERVICE_NAME': 'PYTHON_FLASK_TEST_APP',
# Set custom APM Server URL (default: http://localhost:8200)
'SERVER_URL': 'http://apm-server:8200',
'DEBUG': True,
}
```
Configuration settings for each supported language are available in the APM documentation: [APM Agents][apm-agents].
## Checking connectivity and importing default APM dashboards
1. On the Kibana home page, click `Add APM` under the _Observability_ panel.
1. Click `Check APM Server status` to confirm the server is up and running.
1. Click `Check agent status` to verify your agent has registered properly.
1. Click `Load Kibana objects` to create an index pattern for APM.
1. Click `Launch APM` to be taken to the APM dashboard.
## See also
[Running APM Server on Docker][apm-docker]
[apm-agents]: https://www.elastic.co/guide/en/apm/get-started/current/components.html#_apm_agents
[apm-docker]: https://www.elastic.co/guide/en/apm/server/current/running-on-docker.html
version: '3.2'
services:
apm-server:
build:
context: extensions/apm-server/
args:
ELK_VERSION: $ELK_VERSION
command:
# Disable strict permission checking on 'apm-server.yml' configuration file
# https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html
- --strict.perms=false
volumes:
- type: bind
source: ./extensions/apm-server/config/apm-server.yml
target: /usr/share/apm-server/apm-server.yml
read_only: true
ports:
- '8200:8200'
networks:
- elk
depends_on:
- elasticsearch
apm-server:
host: 0.0.0.0:8200
output:
elasticsearch:
hosts: ['http://elasticsearch:9200']
username: elastic
password: changeme
FROM bitnami/elasticsearch-curator:5.8.1
USER root
RUN install_packages cron && \
echo \
'* * * * *' \
root \
LC_ALL=C.UTF-8 LANG=C.UTF-8 \
/opt/bitnami/python/bin/curator \
--config=/usr/share/curator/config/curator.yml \
/usr/share/curator/config/delete_log_files_curator.yml \
'>/proc/1/fd/1' '2>/proc/1/fd/2' \
>>/etc/crontab
ENTRYPOINT ["cron"]
CMD ["-f", "-L8"]
# Curator
Elasticsearch Curator helps you curate or manage your indices.
## Usage
If you want to include the Curator extension, run Docker Compose from the root of the repository with an additional
command line argument referencing the `curator-compose.yml` file:
```bash
$ docker-compose -f docker-compose.yml -f extensions/curator/curator-compose.yml up
```
This sample setup demonstrates how to run `curator` every minute using `cron`.
All configuration files are available in the `config/` directory.
## Documentation
[Curator Reference](https://www.elastic.co/guide/en/elasticsearch/client/curator/current/index.html)
# Curator configuration
# https://www.elastic.co/guide/en/elasticsearch/client/curator/current/configfile.html
client:
hosts:
- elasticsearch
port: 9200
http_auth: elastic:changeme
logging:
loglevel: INFO
logformat: default
actions:
1:
action: delete_indices
description: >-
Delete indices. Find which to delete by first limiting the list to
logstash- prefixed indices. Then further filter those to prevent deletion
of anything less than the number of days specified by unit_count.
Ignore the error if the filter does not result in an actionable list of
indices (ignore_empty_list) and exit cleanly.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: logstash-
- filtertype: age
source: creation_date
direction: older
unit: days
unit_count: 2
version: '3.2'
services:
curator:
build:
context: extensions/curator/
init: true
volumes:
- type: bind
source: ./extensions/curator/config/curator.yml
target: /usr/share/curator/config/curator.yml
read_only: true
- type: bind
source: ./extensions/curator/config/delete_log_files_curator.yml
target: /usr/share/curator/config/delete_log_files_curator.yml
read_only: true
networks:
- elk
depends_on:
- elasticsearch
ARG ELK_VERSION
# https://www.docker.elastic.co/
FROM docker.elastic.co/enterprise-search/enterprise-search:${ELK_VERSION}
# Enterprise Search extension
Elastic Enterprise Search is a suite of products for search applications backed by the Elastic Stack.
## Requirements
* 2 GB of free RAM, on top of the resources required by the other stack components and extensions.
Enterprise Search exposes the TCP port `3002` for its Web UI and API.
## Usage
### Generate an encryption key
Enterprise Search requires one or more [encryption keys][enterprisesearch-encryption] to be configured before the
initial startup. Failing to do so prevents the server from starting.
Encryption keys can contain any series of characters. Elastic recommends using 256-bit keys for optimal security.
Those encryption keys must be added manually to the [`config/enterprise-search.yml`][config-enterprisesearch] file. By
default, the list of encryption keys is empty and must be populated using one of the following formats:
```yaml
secret_management.encryption_keys:
- my_first_encryption_key
- my_second_encryption_key
- ...
```
```yaml
secret_management.encryption_keys: [my_first_encryption_key, my_second_encryption_key, ...]
```
> :information_source: To generate a strong encryption key, for example using the AES-256 cipher, you can use the
> OpenSSL utility or any other online/offline tool of your choice:
>
> ```console
> $ openssl enc -aes-256 -P
>
> enter aes-256-cbc encryption password: <a strong password>
> Verifying - enter aes-256-cbc encryption password: <repeat your strong password>
> ...
>
> key=<generated AES key>
> ```
### Enable Elasticsearch's API key service
Enterprise Search requires Elasticsearch's built-in [API key service][es-security] to be enabled in order to start.
Unless Elasticsearch is configured to enable TLS on the HTTP interface (disabled by default), this service is disabled
by default.
To enable it, modify the Elasticsearch configuration file in [`elasticsearch/config/elasticsearch.yml`][config-es] and
add the following setting:
```yaml
xpack.security.authc.api_key.enabled: true
```
### Start the server
To include Enterprise Search in the stack, run Docker Compose from the root of the repository with an additional command
line argument referencing the `enterprise-search-compose.yml` file:
```console
$ docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml up
```
Allow a few minutes for the stack to start, then open your web browser at the address <http://localhost:3002> to see the
Enterprise Search home page.
Enterprise Search is configured on first boot with the following default credentials:
* user: *enterprise_search*
* password: *changeme*
## Security
The Enterprise Search password is defined inside the Compose file via the `ENT_SEARCH_DEFAULT_PASSWORD` environment
variable. We highly recommend choosing a more secure password than the default one for security reasons.
To do so, change the value `ENT_SEARCH_DEFAULT_PASSWORD` environment variable inside the Compose file **before the first
boot**:
```yaml
enterprise-search:
environment:
ENT_SEARCH_DEFAULT_PASSWORD: {{some strong password}}
```
> :warning: The default Enterprise Search password can only be set during the initial boot. Once the password is
> persisted in Elasticsearch, it can only be changed via the Elasticsearch API.
For more information, please refer to [User Management and Security][enterprisesearch-security].
## Configuring Enterprise Search
The Enterprise Search configuration is stored in [`config/enterprise-search.yml`][config-enterprisesearch]. You can
modify this file using the [Default Enterprise Search configuration][enterprisesearch-config] as a reference.
You can also specify the options you want to override by setting environment variables inside the Compose file:
```yaml
enterprise-search:
environment:
ent_search.auth.source: standard
worker.threads: '6'
```
Any change to the Enterprise Search configuration requires a restart of the Enterprise Search container:
```console
$ docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml restart enterprise-search
```
Please refer to the following documentation page for more details about how to configure Enterprise Search inside a
Docker container: [Running Enterprise Search Using Docker][enterprisesearch-docker].
## See also
[Enterprise Search documentation][enterprisesearch-docs]
[config-enterprisesearch]: ./config/enterprise-search.yml
[enterprisesearch-encryption]: https://www.elastic.co/guide/en/enterprise-search/current/encryption-keys.html
[enterprisesearch-security]: https://www.elastic.co/guide/en/workplace-search/current/workplace-search-security.html
[enterprisesearch-config]: https://www.elastic.co/guide/en/enterprise-search/current/configuration.html
[enterprisesearch-docker]: https://www.elastic.co/guide/en/enterprise-search/current/docker.html
[enterprisesearch-docs]: https://www.elastic.co/guide/en/enterprise-search/current/index.html
[es-security]: https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#api-key-service-settings
[config-es]: ../../elasticsearch/config/elasticsearch.yml
---
## Enterprise Search core configuration
## https://www.elastic.co/guide/en/enterprise-search/current/configuration.html
#
## --------------------- REQUIRED ---------------------
# Encryption keys to protect application secrets.
secret_management.encryption_keys:
# add encryption keys below
#- add encryption keys here
## ----------------------------------------------------
# IP address Enterprise Search listens on
ent_search.listen_host: 0.0.0.0
# URL at which users reach Enterprise Search
ent_search.external_url: http://localhost:3002
# Elasticsearch URL and credentials
elasticsearch.host: http://elasticsearch:9200
elasticsearch.username: elastic
elasticsearch.password: changeme
# Allow Enterprise Search to modify Elasticsearch settings. Used to enable auto-creation of Elasticsearch indexes.
allow_es_settings_modification: true
version: '3.2'
services:
enterprise-search:
build:
context: extensions/enterprise-search/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- type: bind
source: ./extensions/enterprise-search/config/enterprise-search.yml
target: /usr/share/enterprise-search/config/enterprise-search.yml
read_only: true
environment:
JAVA_OPTS: -Xmx2g -Xms2g
ENT_SEARCH_DEFAULT_PASSWORD: changeme
ports:
- '3002:3002'
networks:
- elk
depends_on:
- elasticsearch
# uses ONBUILD instructions described here:
# https://github.com/gliderlabs/logspout/tree/master/custom
FROM gliderlabs/logspout:master
ENV SYSLOG_FORMAT rfc3164
# Logspout extension
Logspout collects all Docker logs using the Docker logs API, and forwards them to Logstash without any additional
configuration.
## Usage
If you want to include the Logspout extension, run Docker Compose from the root of the repository with an additional
command line argument referencing the `logspout-compose.yml` file:
```bash
$ docker-compose -f docker-compose.yml -f extensions/logspout/logspout-compose.yml up
```
In your Logstash pipeline configuration, enable the `udp` input and set the input codec to `json`:
```logstash
input {
udp {
port => 5000
codec => json
}
}
```
## Documentation
<https://github.com/looplab/logspout-logstash>
#!/bin/sh
# unmodified from:
# https://github.com/gliderlabs/logspout/blob/67ee3831cbd0594361bb3381380c65bdbeb3c20f/custom/build.sh
set -e
apk add --update go git mercurial build-base
mkdir -p /go/src/github.com/gliderlabs
cp -r /src /go/src/github.com/gliderlabs/logspout
cd /go/src/github.com/gliderlabs/logspout
export GOPATH=/go
go get
go build -ldflags "-X main.Version=$1" -o /bin/logspout
apk del go git mercurial build-base
rm -rf /go /var/cache/apk/* /root/.glide
# backwards compatibility
ln -fs /tmp/docker.sock /var/run/docker.sock
version: '3.2'
services:
logspout:
build:
context: extensions/logspout
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
ROUTE_URIS: logstash://logstash:5000
LOGSTASH_TAGS: docker-elk
networks:
- elk
depends_on:
- logstash
restart: on-failure
package main
// installs the Logstash adapter for Logspout, and required dependencies
// https://github.com/looplab/logspout-logstash
import (
_ "github.com/looplab/logspout-logstash"
_ "github.com/gliderlabs/logspout/transports/udp"
_ "github.com/gliderlabs/logspout/transports/tcp"
)
ARG ELK_VERSION
# https://www.docker.elastic.co/
FROM docker.elastic.co/kibana/kibana:7.5.1
# Add your kibana plugins setup here
# Example: RUN kibana-plugin install <name|url>
---
## Default Kibana configuration from Kibana base image.
## https://github.com/elastic/kibana/blob/master/src/dev/build/tasks/os_packages/docker_generator/templates/kibana_yml.template.js
#
server.name: kibana
server.host: 0.0.0.0
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
## X-Pack security credentials
#
elasticsearch.username: elastic
elasticsearch.password: changeme
ARG ELK_VERSION
# https://www.docker.elastic.co/
FROM docker.elastic.co/logstash/logstash:7.5.1
# Add your logstash plugins setup here
# Example: RUN logstash-plugin install logstash-filter-json
---
## Default Logstash configuration from Logstash base image.
## https://github.com/elastic/logstash/blob/master/docker/data/logstash/config/logstash-full.yml
#
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ]
## X-Pack security credentials
#
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.username: elastic
xpack.monitoring.elasticsearch.password: changeme
input {
beats {
port => 5044
}
tcp {
port => 5000
}
}
## Add your filters / logstash plugins configuration here
output {
elasticsearch {
hosts => "elasticsearch:9200"
user => "elastic"
password => "hudongtang2020"
}
}
input {
beats {
port => 5044
}
tcp {
port => 5000
}
}
## Add your filters / logstash plugins configuration here
output {
elasticsearch {
hosts => "elasticsearch:9200"
user => "elastic"
password => "hudongtang2020"
index =>"nginx-access-log-%{+YYYY.MM}.log"
}
}
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论