微服务日志监控系统ELK之docker-compose搭建ELK

  • Post author:
  • Post category:其他




docker-compose搭建日志监控系统ELK



1、基础环境



环境要求

服务器:centos 7.0

环境: docker、dockers-compose

内存: 4G



2、环境配置

elasticsearch需要设置系统内核参数,否则会因为内存不足无法启动。

# 改变设置
sysctl -w vm.max_map_count=262144
# 使之立即生效
sysctl -p



3、配置启动docker-compose.yml

创建logstash的配置文件 logstash-springboot.conf

input {
  tcp {
    mode => "server"
    host => "0.0.0.0"
    port => 4560
    codec => json_lines
  }
}
output {
  elasticsearch {
    hosts => "es:9200"
    index => "ms-%{+YYYY.MM.dd}" 
  }
}

启动文件 docker-compose.yml

version: '3'
services:
  elasticsearch:
    image: elasticsearch:6.4.0
    container_name: elasticsearch
    environment:
      - "cluster.name=elasticsearch" #设置集群名称为elasticsearch
      - "discovery.type=single-node" #以单一节点模式启动
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m" #设置使用jvm内存大小
      - TZ=Asia/Shanghai
    volumes:
      - /data/elk/elasticsearch/plugins:/usr/share/elasticsearch/plugins #插件文件挂载
      - /data/elk/elasticsearch/data:/usr/share/elasticsearch/data #数据文件挂载
    ports:
      - 9200:9200
  kibana:
    image: kibana:6.4.0
    container_name: kibana
    links:
      - elasticsearch:es #可以用es这个域名访问elasticsearch服务
    depends_on:
      - elasticsearch #kibana在elasticsearch启动之后再启动
    environment:
      - "elasticsearch.hosts=http://es:9200" #设置访问elasticsearch的地址
      - TZ=Asia/Shanghai
    ports:
      - 5601:5601
  logstash:
    image: logstash:6.4.0
    container_name: logstash
    volumes:
      - /data/elk/logstash/logstash-springboot.conf:/usr/share/logstash/pipeline/logstash.conf #挂载logstash的配置文件
    environment:
      - TZ=Asia/Shanghai #环境设置时区
    depends_on:
      - elasticsearch #kibana在elasticsearch启动之后再启动
    links:
      - elasticsearch:es #可以用es这个域名访问elasticsearch服务
    ports:
      - 4560:4560

给挂载目录授权

chmod 777 /data/elk/elasticsearch/plugins
chmod 777 /data/elk/elasticsearch/data

在docker-compose.yml文件夹下执行 命令启动服务

docker-compose up -d

执行启动成功

在这里插入图片描述

在logstash中安装json_lines插件

# 进入logstash容器
docker exec -it logstash /bin/bash
# 进入bin目录
cd /bin/
# 安装插件
logstash-plugin install logstash-codec-json_lines
# 退出容器
exit
# 重启logstash服务
docker restart logstash

关闭防火墙,或开启相关接口即可访问kibana.

systemctl stop firewalld

本地地址:


http://192.168.2.113:5601

kibana页面:

在这里插入图片描述

通常ELK可应用微服务的日志收集,线上查看,日志统计等。



4.spring booot输出日志到ELK

spring boot日志中配置logback.xml向logstash中发送日志。

配置文件如下:

<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" debug="true">
    <!-- 微服务资源配置文件 -->
    <property resource="bootstrap.yml"/>
    <property name="APP_NAME" value="${appId}"/>
    <!-- 日志输出目录 -->
    <property name="LOG_DIR" value="/data/log/${APP_NAME}"/>
    <contextName>default</contextName>
    <jmxConfigurator/>
	<!-- 控制台打印STDOUT标准日志-->
    <appender name="STDOUT标准" class="ch.qos.logback.core.ConsoleAppender">
        <encoder>
            <pattern>[%d{HH:mm:ss}] [%thread] |%level| %marker %logger{50} -> %X{traceId} %msg %n%ex</pattern>
        </encoder>
    </appender>
		<!-- 控制台打印DEBUG标准日志-->
    <appender name="DEBUG" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <file>${LOG_DIR}/debug.log</file>
        <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
            <fileNamePattern>${LOG_DIR}/history/debug-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <maxHistory>60</maxHistory>
            <maxFileSize>50MB</maxFileSize>
        </rollingPolicy>
        <encoder charset="UTF-8">
            <Pattern>[%d{HH:mm:ss}] [%thread] |%level| %marker %logger{50} -> %X{traceId} %msg %n%ex</Pattern>
        </encoder>
    </appender>

    <appender name="ASYNC" class="ch.qos.logback.classic.AsyncAppender">
        <discardingThreshold>0</discardingThreshold>
        <queueSize>1000</queueSize>
        <appender-ref ref="DEBUG"/>
    </appender>

    <appender name="INFO" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <file>${LOG_DIR}/info.log</file>
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>error</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
        <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
            <fileNamePattern>${LOG_DIR}/history/info-%d{yyyy-MM}.%i.log</fileNamePattern>
            <maxHistory>12</maxHistory>
            <maxFileSize>50MB</maxFileSize>
        </rollingPolicy>
        <encoder charset="UTF-8">
            <Pattern>[%d] [%thread] |%level| %marker %logger{10} -> %X{traceId} %msg %n%ex</Pattern>
        </encoder>
    </appender>

    <appender name="ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <file>${LOG_DIR}/error.log</file>
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>error</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
        <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
            <fileNamePattern>${LOG_DIR}/history/error-%d{yyyy-MM}.%i.log</fileNamePattern>
            <maxHistory>12</maxHistory>
            <maxFileSize>50MB</maxFileSize>
        </rollingPolicy>
        <encoder charset="UTF-8">
            <Pattern>[%d] [%thread] |%level| %marker %logger{50} -> %X{traceId} %msg %n%ex</Pattern>
        </encoder>
    </appender>

    <appender name="ACCESS" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <file>${LOG_DIR}/access.log</file>
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>error</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
        <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
            <fileNamePattern>${LOG_DIR}/history/access-%d{yyyy-MM}.%i.log</fileNamePattern>
            <maxHistory>12</maxHistory>
            <maxFileSize>50MB</maxFileSize>
        </rollingPolicy>
        <encoder charset="UTF-8">
            <Pattern>[%d] [%thread] |%level| %marker %logger{10} -> %X{traceId} %msg %n%ex</Pattern>
        </encoder>
    </appender>
    <!-- logstash 配置部分 appanme 根据实际情况修改 -->
    <!--<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
        <destination>192.168.112.224:4560</destination>
        <encoder class="net.logstash.logback.encoder.LogstashEncoder">
        <includeContext>false</includeContext>
        <customFields>{"appname": "${APP_NAME}", "server": "${HOSTNAME},"}</customFields>
        </encoder>
    </appender>-->
    <root level="info">
        <appender-ref ref="STDOUT"/>
        <appender-ref ref="ASYNC"/>
        <appender-ref ref="INFO"/>
        <appender-ref ref="ERROR"/>
        <appender-ref ref="DEBUG"/>
        <!--<appender-ref ref="LOGSTASH"/>-->
    </root>

    <logger name="org.springframework" level="info" additivity="false">
        <appender-ref ref="DEBUG"/>
        <appender-ref ref="INFO"/>
        <appender-ref ref="STDOUT"/>
        <!--<appender-ref ref="LOGSTASH"/>-->
    </logger>

    <logger name="com.upanda.coyhzx.dao" level="DEBUG" additivity="true">
        <appender-ref ref="DEBUG"/>
        <appender-ref ref="INFO"/>
        <appender-ref ref="ERROR"/>
        <!--<appender-ref ref="LOGSTASH"/>-->
    </logger>
</configuration>



版权声明:本文为qq_39504520原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。