iSt0ne's Notes

CentOS 7 Docker 安装Elastic Stack 5.1.1

服务器准备

准备三台服务器:

a、172.16.37.203  elasticsearch(node.data=false)、kibana、grafana
b、172.16.37.143  elasticsearch(node.data=true)、logstash
c、172.16.37.144  elasticsearch(node.data=true)、logstash

elk

使用Puppet部署Docker环境

# 安装Docker

class { 'docker':
  version                  => '1.12.6-1.el7.centos',
  docker_command           => 'dockerd',
  daemon_subcommand        => '',
  root_dir                 => '/data1/docker',
  insecure_registry        => 'r.example.com',
  storage_driver           => 'devicemapper',
  dm_thinpooldev           => '/dev/mapper/docker-thinpool',
  dm_use_deferred_removal  => ‘true',
  dm_use_deferred_deletion => 'true',
}

# 安装compose

class {'docker::compose':
  ensure => present,
}

使用compose部署Docker应用

a、172.16.37.203配置

# 创建配置文件目录

mkdir /etc/elastic

# 编写compose配置文件

vi /etc/elastic/docker-compose.yml
version: '2'
services:
  elasticsearch1:
    image: docker.elastic.co/elasticsearch/elasticsearch:5.1.1
    container_name: elasticsearch1
    environment:
      - node.name=es1-front
      - node.master=false
      - node.data=false
      - node.ingest=false
      - cluster.name=qianyi-es-cluster
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms8192m -Xmx8192m"
      - "network.host=_eth0_"
      - "network.publish_host=172.16.37.203"
      - "discovery.zen.ping.unicast.hosts=172.16.37.203,172.16.37.143,172.16.37.144"
      - "discovery.zen.minimum_master_nodes=2"
    ulimits:
      memlock:
        soft: -1
        hard: -1
      nofile:
        soft: 65536
        hard: 65536
    mem_limit: 10g
    cap_add:
      - IPC_LOCK
    volumes:
      - /data1/esdata:/usr/share/elasticsearch/data
    ports:
      - 9200:9200
      - 9300:9300
    networks:
      - esnet
  kibana:
    image: docker.elastic.co/kibana/kibana:5.1.1
    environment:
      SERVER_NAME: 'log.example.com'
      ELASTICSEARCH_URL: 'http://172.16.37.203:9200'
      ELASTICSEARCH_USERNAME: 'elastic'
      ELASTICSEARCH_PASSWORD: 'pwd'
      XPACK_SECURITY_ENCRYPTIONKEY: 'CZLGEgq1PN3hjQ82A3FC9g+PuKrWq6ttoxKB5OGG8Jl'
      XPACK_SECURITY_SESSIONTIMEOUT: 1800000
    restart: always
    depends_on:
      - elasticsearch1
    ports:
      - 5601:5601
    networks:
      - esnet

networks:
  esnet:
    driver: bridge

# 启动docker应用

cd /etc/elastic
docker-compose up -d

# 销毁docker-compose 启动的应用

docker-compose down

# 启动grafana,用于展示日志统计图表

docker run -d -p 3000:3000 -v /data1/grafana:/var/lib/grafana -e "GF_SECURITY_ADMIN_PASSWORD=pwd" grafana/grafana

b、172.16.37.143和172.16.37.144配置

# 创建配置文件目录

mkdir /etc/elastic

# 编写compose配置文件

注意:在172.16.37.144做相应的修改

将elasticsearch2改为elasticsearch3,将node.name=es2-data改为node.name=es3-data,将network.publish_host=172.16.37.143改为network.publish_host=172.16.37.144。

vi /etc/elastic/docker-compose.yml
version: '2'
services:
  elasticsearch2:
    image: docker.elastic.co/elasticsearch/elasticsearch:5.1.1
    container_name: elasticsearch2
    environment:
      - node.name=es2-data
      - cluster.name=qianyi-es-cluster
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms8192m -Xmx8192m"
      - "network.host=_eth0_"
      - "network.publish_host=172.16.37.143"
      - "discovery.zen.ping.unicast.hosts=172.16.37.203,172.16.37.143,172.16.37.144"
      - "discovery.zen.minimum_master_nodes=2"
    ulimits:
      memlock:
        soft: -1
        hard: -1
      nofile:
        soft: 65536
        hard: 65536
    mem_limit: 10g
    restart: always
    cap_add:
      - IPC_LOCK
    volumes:
      - /data1/esdata:/usr/share/elasticsearch/data
    ports:
      - 9200:9200
      - 9300:9300
    networks:
      - esnet
  logstash:
    image: docker.elastic.co/logstash/logstash:5.1.1
    ports:
      - 5044:5044
    depends_on:
      - elasticsearch2
    volumes:
      - /etc/elastic/pipeline/:/usr/share/logstash/pipeline/
      - /var/log/logstash/:/var/log/logstash/
    networks:
      - esnet

networks:
  esnet:
    driver: bridge

# 创建logstash pipeline配置文件

mkdir pipeline
vi pipeline/logstash.conf
input {
  beats {
    port => 5044
  }
}

filter{
  if [type] == "varnishlog" {
    grok {
      match => { "message" => '%{WORD:poolname} %{HOSTNAME:servername} %{IP:client} %{USER:ident} %{USER:auth} \[%{HTTPDATE:timestamp}\] "(?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest})" %{NUMBER:status} (?:%{NUMBER:bytes}|-) %{QS:referrer} %{QS:agent} %{WORD:varnish_hit}' }
    }
  }

  if [type]  == "apachelog" {
    grok {
      match => { "message" => '%{WORD:poolname} %{HOSTNAME:servername} %{IP:client} %{USER:ident} %{USER:auth} \[%{HTTPDATE:timestamp}\] %{NUMBER:response} "(?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|-)" %{NUMBER:status} (?:%{NUMBER:bytes}|-) %{QS:referrer} %{QS:agent} %{QS:cookie}' }
    }
  }

  if [type] == "linux_secure" {
    grok {
      match => { "message" => "%{SYSLOGTIMESTAMP:date} %{SYSLOGHOST:host} %{DATA:app}(?:\[%{POSINT:pid}\])?: %{GREEDYDATA:msg}" }
      add_field => [ "received_at", "%{@timestamp}" ]
      add_field => [ "received_from", "%{host}" ]
    }
    grok {
      match => { "msg" => "Failed password for %{USER:user} from %{IP:src_ip} port %{NUMBER:src_port} ssh2" }
      add_field => ["action", "failure"]
      add_field => ["eventtype", "failed_login"]
    }
    grok {
      match => { "msg" => "pam_unix\(sshd:auth\): authentication failure; logname= uid=%{NUMBER:uid} euid=%{NUMBER:euid} tty=%{WORD:tty} ruser= rhost=%{IP:src_ip}  user=%{USER:user}" }
      add_field => ["action", "failure"]
      add_field => ["eventtype", "failed_login"]
    }
    grok {
      match => { "msg" => "PAM 5 more authentication failures; logname= uid=%{NUMBER:uid} euid=%{NUMBER:euid} tty=%{WORD:tty} ruser= rhost=%{IP:src_ip}  user=%{USER:user}" }
      add_field => ["action", "failure"]
      add_field => ["eventtype", "failed_login"]
    }
    grok {
      match => { "msg" => "error: PAM: Authentication failure for %{USER:user} from %{IP:src_ip}" }
      add_field => ["action", "failure"]
      add_field => ["eventtype", "failed_login"]
      add_field => ["app", "google_auth"]
    }
    grok {
      match => { "msg" =>"Postponed keyboard-interactive/pam for %{USER:user} from %{IP:src_ip} port %{NUMBER:src_port} ssh2" }
      add_field => ["action", "failure"]
      add_field => ["eventtype", "failure_login"]
      add_field => ["app", "sshd"]
    }
    grok {
      match => { "msg" => "pam_unix\(%{NOTSPACE:app}:session\): session opened for user %{USER:su_user} by %{USER:user}\(uid=%{NUMBER:uid}\)" }
      add_field => ["action", "success"]
      add_field => ["eventtype", "success_login"]
    }
    grok {
      match => { "msg" =>"Accepted keyboard-interactive/pam for %{USER:user} from %{IP:src_ip} port %{NUMBER:src_port} ssh2" }
      add_field => ["action", "success"]
      add_field => ["eventtype", "success_login"]
      add_field => ["app", "sshd"]
    }
    grok {
      match => { "msg" => "pam_unix\(sshd:session\): session opened for user %{USER:user} by \(uid=%{NUMBER:uid}\)" }
      add_field => ["action", "success"]
      add_field => ["eventtype", "success_login"]
      add_field => ["app", "sshd"]
    }
    grok {
      match => { "msg" => "pam_unix\(%{NOTSPACE:app}:session\): session closed for user %{USER:user}" }
      add_field => ["eventtype", "success_logout"]
    }
    mutate {
      replace => ["date", "%{date} 2017"]
    }
    date {
      match => ["date",
                "MMM  d HH:mm:ss YYYY",
                "MMM dd HH:mm:ss YYYY"]
      target => "@timestamp"
    }
  }

  if [type] == "linux_audit" {
    grok {
      match => { "message" => "type=%{WORD:audit_type} msg=audit\(%{NUMBER:audit_epoch}:%{NUMBER:audit_counter}\): user pid=%{NUMBER:audit_pid} uid=%{NUMBER:audit_uid} auid=%{NUMBER:audit_audid} ses=%{NUMBER:audit_ses} msg=\'%{GREEDYDATA:audit_message}\'" }
    }
    grok {
      match => { "message" => "type=%{WORD:audit_type} msg=audit\(%{NUMBER:audit_epoch}:%{NUMBER:audit_counter}\): pid=%{NUMBER:audit_pid} uid=%{NUMBER:audit_uid} old auid=%{NUMBER:old_auid} new auid=%{NUMBER:new_auid} old ses=%{NUMBER:old_ses} new ses=%{NUMBER:new_ses}" }
    }
    kv {
      source => "audit_message"
    }
    date {
      match => [ "audit_epoch", "UNIX_MS" ]
      target => "@timestamp"
    }
  }

  if [type] == "syslog" {
    grok {
      match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" }
      add_field => [ "received_at", "%{@timestamp}" ]
      add_field => [ "received_from", "%{host}" ]
    }
    mutate {
      replace => ["syslog_timestamp", "%{syslog_timestamp} 2017"]
    }
    date {
      match => ["syslog_timestamp",
                "MMM  d HH:mm:ss YYYY",
                "MMM dd HH:mm:ss YYYY"]
      target => "@timestamp"
    }
  }

  if [message] =~ "ping.html" {
    drop {}
  }
}

output {
  if "_grokparsefailure" in [tags] {
    file {
      path => "/var/log/logstash/grokparsefailure-%{[type]}-%{+YYYY.MM.dd}.log"
    }
  }
  if [type] == "syslog" {
    elasticsearch {
      hosts => ["elasticsearch2:9200"]
      user => "logstash_w"
      password => "scE_fGl9D_18"
      sniffing => false
      manage_template => false
      index => "filebeat-syslog-%{+YYYY.MM.dd}"
      document_type => "%{[@metadata][type]}"
    }
  } else if [type] == "linux_secure" {
    elasticsearch {
      hosts => ["elasticsearch2:9200"]
      user => "logstash_w"
      password => "scE_fGl9D_18"
      sniffing => false
      manage_template => false
      index => "filebeat-securelog-%{+YYYY.MM.dd}"
      document_type => "%{[@metadata][type]}"
    }
  } else if [type] == "linux_audit" {
    elasticsearch {
      hosts => ["elasticsearch2:9200"]
      user => "logstash_w"
      password => "scE_fGl9D_18"
      sniffing => false
      manage_template => false
      index => "filebeat-auditlog-%{+YYYY.MM.dd}"
      document_type => "%{[@metadata][type]}"
    }
  } else {
    elasticsearch {
      hosts => ["elasticsearch2:9200"]
      user => "logstash_w"
      password => "scE_fGl9D_18"
      sniffing => false
      manage_template => false
      index => "filebeat-apachelog-%{+YYYY.MM.dd}"
      document_type => "%{[@metadata][type]}"
    }
  }
}

# 启动docker应用

cd /etc/elastic
docker-compose up -d

# 销毁docker-compose 启动的应用

docker-compose down

c、创建索引写入账号和读取账号

注:elastic账号默认密码changeme

# 创建索引写入角色

curl -X POST -u elastic http://127.0.0.1:9200/_xpack/security/role/logstash_writer -d '
{
  "cluster": ["manage_index_templates", "monitor"],
  "indices": [
    {
      "names": [ "filebeat-*","logstash-*" ],
      "privileges": ["write","delete","create_index"]
    }
  ]
}'

# 创建索引写入账号

curl -X POST -u elastic http://127.0.0.1:9200/_xpack/security/user/logstash_w -d '
{
  "password" : "scE_fGl9D_18",
  "roles" : [ "logstash_writer"],
  "full_name" : "Internal Logstash User"
}'

# 创建索引读取角色

curl -X POST -u elastic http://127.0.0.1:9200/_xpack/security/role/logstash_reader -d '
{
  "indices": [
    {
      "names": [ "filebeat-*","logstash-*" ],
      "privileges": ["read","view_index_metadata"]
    }
  ]
}'

# 创建索引读取账号

curl -X POST -u elastic http://127.0.0.1:9200/_xpack/security/user/logstash_r -d '
{
  "password" : "scE_fGl9D_18",
  "roles" : [ "logstash_reader"],
  "full_name" : "Kibana User"
}'

# 修改elastic账号密码

curl -XPUT -u elastic http://127.0.0.1:9200/_xpack/security/user/elastic/_password -d '{
  "password" : "pwd"
}'

###d、导入filebeat模板

curl -XPUT -u elastic 'http://127.0.0.1:9200/_template/filebeat' -d@/etc/elastic/filebeat.template.json

/etc/elastic/filebeat.template.json 内容如下:

{
  "mappings": {
    "_default_": {
      "_all": {
        "norms": false
      },
      "_meta": {
        "version": "5.1.1"
      },
      "dynamic_templates": [
        {
          "bytes_fields" : {
            "match" : "bytes",
            "match_mapping_type" : "string",
            "mapping" : {
              "type" : "integer"
            }
          }
        },
        {
          "response_fields" : {
            "match" : "response",
            "match_mapping_type" : "string",
            "mapping" : {
              "type" : "integer"
            }
          }
        },
        {
          "strings_as_keyword": {
            "mapping": {
              "ignore_above": 1024,
              "type": "keyword"
            },
            "match_mapping_type": "string"
          }
        }
      ],
      "properties": {
        "@timestamp": {
          "type": "date"
        },
        "bytes": {
          "type": "integer"
        },
        "response": {
          "type": "integer"
        },
        "beat": {
          "properties": {
            "hostname": {
              "ignore_above": 1024,
              "type": "keyword"
            },
            "name": {
              "ignore_above": 1024,
              "type": "keyword"
            },
            "version": {
              "ignore_above": 1024,
              "type": "keyword"
            }
          }
        },
        "input_type": {
          "ignore_above": 1024,
          "type": "keyword"
        },
        "message": {
          "norms": false,
          "type": "text"
        },
        "meta": {
          "properties": {
            "cloud": {
              "properties": {
                "availability_zone": {
                  "ignore_above": 1024,
                  "type": "keyword"
                },
                "instance_id": {
                  "ignore_above": 1024,
                  "type": "keyword"
                },
                "machine_type": {
                  "ignore_above": 1024,
                  "type": "keyword"
                },
                "project_id": {
                  "ignore_above": 1024,
                  "type": "keyword"
                },
                "provider": {
                  "ignore_above": 1024,
                  "type": "keyword"
                },
                "region": {
                  "ignore_above": 1024,
                  "type": "keyword"
                }
              }
            }
          }
        },
        "offset": {
          "type": "long"
        },
        "source": {
          "ignore_above": 1024,
          "type": "keyword"
        },
        "tags": {
          "ignore_above": 1024,
          "type": "keyword"
        },
        "type": {
          "ignore_above": 1024,
          "type": "keyword"
        }
      }
    }
  },
  "order": 0,
  "settings": {
    "index.refresh_interval": "5s"
  },
  "template": "filebeat-*"
}

配置filebeat收集日志并通过logstash写入elasticsearch

\# add elk shipper filebeat
class { 'filebeat':
  package_ensure => '5.1.2-1',
  outputs => {
    'logstash' => {
     'hosts' => [
       '172.16.37.143:5044',
       '172.16.37.144:5044',
     ],
     'loadbalance' => true,
    },
  },
}
filebeat::prospector { 'apachelog':
  paths    => [
    '/data1/logs/all.log',
  ],
  doc_type => 'apachelog',
  fields   => {
    service => 'apache',
    zone    => 'm6',
  },
}
filebeat::prospector { 'varnishlog':
  paths    => [
    '/data1/log/varnish/varnishncsa.log',
  ],
  doc_type => 'varnishlog',
  fields   => {
    service => 'varnish',
    zone    => 'm6',
  },
}
filebeat::prospector { 'linux_secure':
  paths    => [
    '/var/log/secure',
  ],
  doc_type => 'linux_secure',
  fields   => {
    service => 'system',
    zone    => $idc,
  },
}
filebeat::prospector { 'linux_audit':
  paths    => [
    '/var/log/audit/audit.log',
  ],
  doc_type => 'linux_audit',
  fields   => {
    service => 'system',
    zone    => $idc,
  },
}
filebeat::prospector { 'syslog':
  paths    => [
    '/var/log/messages',
  ],
  doc_type => 'syslog',
  fields   => {
    service => 'system',
    zone    => $idc,
  },
}

测试安装情况

访问Kibana:http://172.16.37.203:5601/ 通过elastic账号登陆(如果登陆的账号权限不够,有些索引文件可能看不到)

# 三个节点均处于正常状态
elk monitor

# 配置index pattern
kibana config index pattern

# 发现
kibana discover

# 仪表盘
kibana dashborad

# Granfana http://172.16.37.203:3000/
grafana

参考

# 删除角色

curl -X DELETE -u elastic http://127.0.0.1:9200/_xpack/security/role/logstash_writer

# 删除用户

curl -X DELETE -u elastic http://127.0.0.1:9200/_xpack/security/user/logstash_w

# 查看索引列表
curl -XGET -u elastic http://127.0.0.1:9200/_cat/indices

# 官方文档
https://www.elastic.co/guide/index.html

# Kibana “Hello World” Example – Part 3 of the ELK
https://oliverveits.wordpress.com/2016/11/20/kibana-hello-world-example/

# Elasticsearch参考
http://www.cnblogs.com/wxw16/tag/Elasticsearch/