Elasticsearch&Logstash&Kibana日志搜索架构搭建(2)

接着上一篇继续写,上一篇讲完了三个应用是如何进行安装、配置、启动的但是并没有结合起来!并且kibana的启动后的网页初始化设置也没有讲解。不过上次没有讲的内容这次全部补齐!

如果你还没有完全了解ELK当中的任何组件请看上一篇,这一篇就不详细进行展开!首先贴上实验的架构:

其中172.16.1.10这边安装上nginx以及tomcat,并随后安装logstash将日志通过管道交给redis。

172.16.1.20只安装redis存储logstash_agent的数据

172.16.1.30只安装logstash从redis取出数据进行加工后(其实一般来说日志数据大多通过应用程序就能修改格式无需增加logstash的负担)通过output交给elasticsearch

172.16.1.40安装elasticsearch和kibana,其实可以分为两台主机,但是我的机子内存肯定不够也没必要。生产环境中如果elasticsearch消耗资源较大建议分开。

其实172.16.1.10可以做多台主机,但是由于博主的内存问题只设立了一台…

安装与配置elasticsearch+logstash+kibana:

首先配置172.16.1.40的主机,然后172.16.1.30以此类推!

172.16.1.40主机配置:

#首先准保好以下包,jdk可以使用yum安装openjdk!
[root@localhost ~] ls
elasticsearch-5.6.1.rpm jdk-8u111-linux-x64.tar.gz kibana-5.6.1-x86_64.rpm
[root@localhost ~] tar -xf jdk-8u111-linux-x64.tar.gz -C /usr/local/
[root@localhost ~] ln -s /usr/local/jdk1.8.0_111/ /usr/local/jdk
[root@localhost local] vim /etc/profile.d/java.sh
export JAVA_HOME=/usr/local/jdk
export PATH=$JAVA_HOME/bin:$PATH
#随后安装elasticsearch
[root@localhost ~] yum install elasticsearch-5.6.1.rpm -y
[root@localhost ~] cd /etc/elasticsearch/
[root@localhost elasticsearch]# vim elasticsearch.yml
# Use a descriptive name for the node:
#
#node.name: node-1
node.name: test
# Path to directory where to store the data (separate multiple locations by comma):
#
path.data: /data/elasticsearch/
# Set the bind address to a specific IP (IPv4 or IPv6):
#
network.host: 172.16.1.40
#
# Set a custom port for HTTP:
#
http.port: 9200
[root@localhost elasticsearch] mkdir -p /data/elasticsearch #数据单独存放
[root@localhost elasticsearch] chown elasticsearch:elasticsearch /data/elasticsearch/
[root@localhost elasticsearch] vim /etc/sysconfig/elasticsearch
# Elasticsearch Java path
JAVA_HOME=/usr/local/jdk
# Elasticsearch data directory
DATA_DIR=/data/elasticsearch
[root@localhost elasticsearch] systemctl start elasticsearch
#启动后我的虚拟机差点爆了...即使使用:
-Xms512m #还记得jvm.options配置文件吗?
-Xmx512m
#接下来安装kibana并且配置启动
[root@localhost ~] yum install kibana-5.6.1-x86_64.rpm -y
[root@localhost ~] vim /etc/kibana/kibana.yml
# Kibana is served by a back end server. This setting specifies the port to use.
server.port: 5601
# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
# The default is 'localhost', which usually means remote machines will not be able to connect.
# To allow connections from remote users, set this parameter to a non-loopback address.
server.host: "172.16.1.40"
# The URL of the Elasticsearch instance to use for all your queries.
elasticsearch.url: "http://172.16.1.40:9200"
[root@localhost ~] systemctl start kibana

172.16.1.30主机配置:

#这里只有Logstash为了方便所以我直接安装openjdk了
[root@localhost ~] yum install java-1.8.0-openjdk java-1.8.0-openjdk-devel java-1.8.0-openjdk-headless
[root@localhost ~] yum install logstash-5.6.1.rpm -y
#随后直接写配置文件
[root@localhost logstash] vim conf.d/logstash_index.conf
input {
 redis { #使用redis插件从redis读取数据
 port => "6379"
 host => ["172.16.1.20"]
 data_type => "list" #队列形式传送进来
 key => "nginxlog" #这边的key表示存储在redis的key!
 type => "nginx"
 }
 redis {
 port => "6379"
 host => ["172.16.1.20"]
 data_type => "list"
 key => "tomcatlog"
 type => "tomcat"
 }
}
output {
if "_grokparsefailure" in [tags] { #这句话可以不用,主要对某些grok之后语句不正确所匹配的。
}else{
 if [type] == "nginx"{
 elasticsearch {
 hosts => ["172.16.1.40:9200"]
 index => "logstash_nginx"
 }
 }
 if [type] == "tomcat"{
 elasticsearch {
 hosts => ["172.16.1.40:9200"]
 index => "logstash_tomcat"
 }
 }
}
#随后先别启动先继续配置redis。

172.16.1.20主机配置:

#为了简单起见我直接使用yum安装redis就不用编译安装了,记得要有epel源
[root@localhost ~] yum install redis -y
[root@localhost ~] vim /etc/redis.conf
# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
# JUST COMMENT THE FOLLOWING LINE.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bind 172.16.1.20 #修改监听地址即可,0.0.0.0也不是不可以
[root@localhost ~] systemctl start redis

172.16.1.10主机配置:

#首先安装tomcat和nginx,先安装Nginx
[root@localhost ~] yum install -y nginx
[root@localhost ~] systemctl start nginx
#然后安装tomcat
[root@localhost ~] tar -xf apache-tomcat-8.5.6.tar.gz -C /usr/local/
[root@localhost ~] tar -xf jdk-8u111-linux-x64.tar.gz -C /usr/local/
[root@localhost ~] cd /usr/local
[root@localhost local] ln -s apache-tomcat-8.5.6/ tomcat
[root@localhost local] ln -s jdk1.8.0_111/ jdk
[root@localhost local] vim /etc/profile.d/tomcat.sh
export CATALINA_BASE=/usr/local/tomcat
export PATH=$CATALINA_BASE/bin:$PATH
[root@localhost local] vim /etc/profile.d/java.sh
export JAVA_HOME=/usr/local/jdk
export PATH=$JAVA_HOME/bin:$PATH
[root@localhost local] source /etc/profile.d/java.sh
[root@localhost local] source /etc/profile.d/tomcat.sh
[root@localhost local] catalina.sh start
Using CATALINA_BASE: /usr/local/tomcat
Using CATALINA_HOME: /usr/local/tomcat
Using CATALINA_TMPDIR: /usr/local/tomcat/temp
Using JRE_HOME: /usr/local/jdk
Using CLASSPATH: /usr/local/tomcat/bin/bootstrap.jar:/usr/local/tomcat/bin/tomcat-juli.jar
Tomcat started.
#随后安装Logstash并且配置
[root@localhost ~] cd /usr/bin
[root@localhost bin] ln -s /usr/local/jdk/bin/java java #添加软链否则yum安装会失败
[root@localhost ~] yum install -y logstash-5.6.1.rpm
[root@localhost logstash] vim /etc/logstash/logstash.yml
path.config: /etc/logstash/conf.d/* #找到并修改,我有点不太放心默认配置...
[root@localhost etc] vim /etc/logstash/conf.d/logstash_index.conf
input{
 file{
type => "nginx"
path => "/var/log/nginx/access.log"
start_position => "beginning"
}
 file{
type => "tomcat"
path => "/usr/local/tomcat/logs/catalina.out"
start_position => "beginning"
}
}

output{
 if [type] == "nginx"{
 redis{
 host=>["172.16.1.20"] #redis的配置在上面都讲过这里就不重复了。
 port=>"6379"
 data_type=>"list"
 key=>"nginxlog"
 }
 stdout{ #这里的输出至屏幕主要原因是用于排错使用!
 codec=>rubydebug
 }
 }
 if [type] == "tomcat"{
 redis{
 host=>["172.16.1.20"]
 port=>"6379"
 data_type=>"list"
 key=>"tomcatlog"
 }
 stdout{
 codec=>rubydebug
 }
 }
}
#配置好了172.16.1.10的logstash之后启动会遇到一个问题,下面来说先看我的启动方式:
[root@localhost logstash] cd /usr/share/logstash/bin/
[root@localhost bin] nohup ./logstash -f /etc/logstash/conf.d/logstash_index.conf >/dev/null 2>&1 &

对了随后别忘了开启172.16.1.30的logstash呢!这个可以使用systemctl!

[root@localhost logstash] systemctl start logstash
Pushing flush onto pipeline无日志输出排错指南:

下面来说说上面为什么不能使用systemctl启动吧,首先我发现最后kibana没有指定的索引数据,后来我的第一反应将172.16.1.30机器关闭,随后刷一下172.16.1.10的nginx日志去redis中查看数据。当然肯定是没有任何数据的…随后我将172.16.1.10和172.16.1.20将日志全部调整为TRACE和DEBUG级别后更奇怪的事情发生了,logstash:172.16.1.10的日志显示:

[2017-09-25T14:57:10,919][TRACE][logstash.inputs.file ] Registering file input {:path=>["/var/log/nginx/access.log"]}
[2017-09-25T14:57:10,933][TRACE][logstash.inputs.file ] Registering file input {:path=>["/usr/local/tomcat/logs/catalina.out"]}
[2017-09-25T14:57:10,940][INFO ][logstash.pipeline ] Pipeline main started
[2017-09-25T14:57:10,967][DEBUG][logstash.agent ] Starting puma
[2017-09-25T14:57:10,975][DEBUG][logstash.agent ] Trying to start WebServer {:port=>9600}
[2017-09-25T14:57:10,982][DEBUG][logstash.api.service ] [api-service] start
[2017-09-25T14:57:10,991][DEBUG][logstash.inputs.file ] _globbed_files: /var/log/nginx/access.log: glob is: []
[2017-09-25T14:57:10,993][DEBUG][logstash.inputs.file ] _globbed_files: /usr/local/tomcat/logs/catalina.out: glob is: []
[2017-09-25T14:57:11,034][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
[2017-09-25T14:57:15,972][DEBUG][logstash.pipeline ] Pushing flush onto pipeline
[2017-09-25T14:57:20,979][DEBUG][logstash.pipeline ] Pushing flush onto pipeline
[2017-09-25T14:57:25,041][DEBUG][logstash.inputs.file ] _globbed_files: /usr/local/tomcat/logs/catalina.out: glob is: []
[2017-09-25T14:57:25,042][DEBUG][logstash.inputs.file ] _globbed_files: /var/log/nginx/access.log: glob is: []
[2017-09-25T14:57:25,996][DEBUG][logstash.pipeline ] Pushing flush onto pipeline
[2017-09-25T14:57:30,997][DEBUG][logstash.pipeline ] Pushing flush onto pipeline
[2017-09-25T14:57:35,998][DEBUG][logstash.pipeline ] Pushing flush onto pipeline

看到竟然没有任何数据传送心里就咯噔一下的检查是不是将input插件的file写错了?结果是正确的,随后不死心的看看redis的日志:

13150:M 25 Sep 14:58:43.825 - 0 clients connected (0 slaves), 754864 bytes in use
13150:M 25 Sep 14:58:48.862 - 0 clients connected (0 slaves), 754864 bytes in use
13150:M 25 Sep 14:58:53.896 - 0 clients connected (0 slaves), 754864 bytes in use
13150:M 25 Sep 14:58:58.938 - 0 clients connected (0 slaves), 754864 bytes in use
13150:M 25 Sep 14:58:03.976 - 0 clients connected (0 slaves), 754864 bytes in use

啥都没有,然后我刷一边nginx让其出现日志,结果还是一样!后来去看了官方文档看了一下是不是redis的插件没有安装?结果在/usr/share/logstash/bin/下面执行的脚本./logstash-plugin list  结果出现了redis的output插件。看来不是插件的问题。经过反复思考决定直接使用logstash的启动脚本直接启动(也就是使用二进制安装的启动办法使用自带脚本启动!)./logstash -f /etc/logstash/conf.d/logstash_index.conf  这边和上面完整的命令不太一样稍后来解读!结果使用脚本启动后过了一会一大串日志(我开了stdout插件用于排错)打在屏幕上:

 "@version" => "1",
 "host" => "localhost.localdomain",
 "path" => "/var/log/nginx/access.log",
 "@timestamp" => 2017-09-25T06:46:56.395Z,
 "message" => "192.168.1.254 - - [25/Sep/2017:14:31:59 +0800] \"GET /poweredby.png HTTP/1.1\" 304 0 \"http://192.168.1.10/\" \"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko\" \"-\"",
 "type" => "nginx"
}
{
 "@version" => "1",
 "host" => "localhost.localdomain",
 "path" => "/var/log/nginx/access.log",
 "@timestamp" => 2017-09-25T06:46:56.395Z,
 "message" => "192.168.1.254 - - [25/Sep/2017:14:36:36 +0800] \"GET / HTTP/1.1\" 304 0 \"-\" \"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko\" \"-\"",
 "type" => "nginx"
}
{
 "@version" => "1",
 "host" => "localhost.localdomain",
 "path" => "/var/log/nginx/access.log",
 "@timestamp" => 2017-09-25T06:46:56.396Z,
 "message" => "192.168.1.254 - - [25/Sep/2017:14:36:36 +0800] \"GET /nginx-logo.png HTTP/1.1\" 304 0 \"http://192.168.1.10/\" \"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko\" \"-\"",
 "type" => "nginx"
}
{
 "@version" => "1",
 "host" => "localhost.localdomain",
 "path" => "/var/log/nginx/access.log",
 "@timestamp" => 2017-09-25T06:46:56.396Z,
 "message" => "192.168.1.254 - - [25/Sep/2017:14:36:36 +0800] \"GET /poweredby.png HTTP/1.1\" 304 0 \"http://192.168.1.10/\" \"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko\" \"-\"",
 "type" => "nginx"
}
......

随后启动了172.16.1.30的主机,再去kibana中查看,日志都出现了!

排错的自我分析:

首先解读一下上面的命令./logstash -f /etc/logstash/conf.d/logstash_index.conf 这个是直接启动的命令,会直接在前台运行显示,所以我使用了nohup命令让前台运行的程序放在后台中运行,nohup 的用途就是让提交的命令忽略停止信号将其挂在后台运行,语法:nohup COMMAND &表示。

然后根据现有的线索我将问题大致推测出来可能是这两个问题:

  1. systemctl启动的时候肯定会要加载各个模块进行(包括插件)可能这个rpm包的systemd脚本有错误,或者我在配置文件中没有进行配置(找了半天没找到)所以启动可能没有加载redis插件,因为在日志中我看到日志并没有读取和将日志打出到日志和redis中。
  2. 由于172.16.1.30使用的是openjdk安装的,而172.16.1.10的输入日志的Logstash使用的是oracle jdk并且是二进制包。所以可能和java环境有关系可能logstash使用的并不只是使用java命令启动可能会涉及到其他目录以及位置,但是logstash并没有对JAVA_HOME的配置,只有java这个二进制文件的位置配置!

以上都是我的个人见解并不是正确看法,有时间我还会对上面两个问题进行对比研究!如果有大神知道问题答案请速速给小弟邮件!

好了到这里已经算配置完成了接下来搞kibana

初始化kibana并且添加索引:

启动之后使用游览器输入172.16.1.40:5601即可访问,初始化让你输入index的索引名称:

这里由172.16.1.30主机的logstash填写的配置中得到index_nginx和ingex_tomcat所以这里索引写成logstash_*即可将两个日志全部导入进来!那么如果要细分某某应用的日志呢?其实也非常简单,可以这样填写:

上图:首先找到Management菜单栏随后找到index patters选项点入:

上图:随后你会看到初始设置的logstash_*的日志索引栏,随后点击create index pattern选项。

上图:首先我添加nginx的日志,点击create创建。

上图:随后再添加tocat日志。

下面回到控制面板中查看效果吧:

上图:首先点击Discover找到我画圈的位置选择刚刚添加的索引即可!

结语:

到此位置已经将中小结构的ELK日志管理应用搭建完毕。虽然留下的问题不过以后有时间研究之后我会给出对比,如有大神看到博客有自己的见解欢迎给我邮件!

Comments

Leave a Reply

Your email address will not be published. Name and email are required