kubernetes云原生纪元:k8s 应用日志采集(下)

kubernetes云原生纪元:k8s 应用日志采集(下)

实践方案

image-20200203121912504

基础环境

ElasticSearch +LogPilot +Kibana

先搭建ElasticSearch,然后搭建LogPilot,最后搭建Kibana

搭建ElasticSearch

Ip Cpu Memory
192.168.181.101 4 16G
192.168.181.101 4 16G
192.168.181.101 4 16G

搭建ES集群需要很大的内存空间不然无法运行起来

两个Service elasticsearch-api``elasticsearch-discovery,端口9200,9300,对外提供了两种服务。9200是跟外部通信使用的http,9300是ES节点之前通信使用的tcp

StatefulSet 定义了具体ES服务, replicas: 3ES 对高可用的要求

elasticsearch.yaml

---
apiVersion: v1
kind: Service
metadata:
  name: elasticsearch-api
  namespace: kube-system
  labels:
    name: elasticsearch
spec:
  selector:
    app: es
  ports:
  - name: transport
    port: 9200
    protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
  name: elasticsearch-discovery
  namespace: kube-system
  labels:
    name: elasticsearch
spec:
  selector:
    app: es
  ports:
  - name: transport
    port: 9300
    protocol: TCP
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
  name: elasticsearch
  namespace: kube-system
  labels:
    kubernetes.io/cluster-service: "true"
spec:
  replicas: 3
  serviceName: "elasticsearch-service"
  selector:
    matchLabels:
      app: es
  template:
    metadata:
      labels:
        app: es
    spec:
      tolerations:
      - effect: NoSchedule # 让容器可以调度在主节点
        key: node-role.kubernetes.io/master
      serviceAccountName: dashboard-admin 
      initContainers:
      - name: init-sysctl
        image: busybox:1.27
        command:
        - sysctl
        - -w         #  临时修改系统的参数,调整足够的虚拟内存空间
        - vm.max_map_count=262144
        securityContext:
          privileged: true
      containers:
      - name: elasticsearch
        image: registry.cn-hangzhou.aliyuncs.com/imooc/elasticsearch:5.5.1
        ports:
        - containerPort: 9200
          protocol: TCP
        - containerPort: 9300
          protocol: TCP
        securityContext:
          capabilities:
            add:
              - IPC_LOCK
              - SYS_RESOURCE
        resources: # 资源设置
          limits:
            memory: 4000Mi
          requests:
            cpu: 100m
            memory: 2000Mi
        env: # 环境变量
          - name: "http.host"
            value: "0.0.0.0"
          - name: "network.host"
            value: "_eth0_"
          - name: "cluster.name"
            value: "docker-cluster"
          - name: "bootstrap.memory_lock"
            value: "false"
          - name: "discovery.zen.ping.unicast.hosts"
            value: "elasticsearch-discovery"
          - name: "discovery.zen.ping.unicast.hosts.resolve_timeout"
            value: "10s"
          - name: "discovery.zen.ping_timeout"
            value: "6s"
          - name: "discovery.zen.minimum_master_nodes"
            value: "2"
          - name: "discovery.zen.fd.ping_interval"
            value: "2s"
          - name: "discovery.zen.no_master_block"
            value: "write"
          - name: "gateway.expected_nodes"
            value: "2"
          - name: "gateway.expected_master_nodes"
            value: "1"
          - name: "transport.tcp.connect_timeout"
            value: "60s"
          - name: "ES_JAVA_OPTS"
            value: "-Xms2g -Xmx2g"
        livenessProbe: # 健康检查
          tcpSocket:
            port: transport
          initialDelaySeconds: 20
          periodSeconds: 10
        volumeMounts:
        - name: es-data
          mountPath: /data
      terminationGracePeriodSeconds: 30
      volumes: # 数据卷 默认挂载到宿主机,我们也可以挂载到共享存储上
      - name: es-data
        hostPath:
          path: /es-data

创建一下

[root@master-001 ~]# kubectl apply -f elasticsearch.yaml
service/elasticsearch-api created
service/elasticsearch-discovery created
statefulset.apps/elasticsearch created

查看一下service

[root@master-001 ~]# kubectl get svc -n kube-system
NAME                      TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                  AGE
elasticsearch-api         ClusterIP   10.109.77.41     <none>        9200/TCP                 2m12s
elasticsearch-discovery   ClusterIP   10.106.199.250   <none>        9300/TCP                 2m12s

等10分钟查看下statefulset ,三个实例正常

[root@master-001 ~]# kubectl get statefulset -n kube-system
NAME            READY   AGE
elasticsearch   3/3     72s

搭建LogPilot

使用DaemonSet做配置,因为他要在每个节点上运行一个

DaemonSet 控制器确保所有(或一部分)的节点都运行了一个指定的 Pod 副本

动态修改filebeat实现的日志采集

log-pilot.yaml

---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: log-pilot
  namespace: kube-system
  labels:
    k8s-app: log-pilot
    kubernetes.io/cluster-service: "true"
spec:
  template:
    metadata:
      labels:
        k8s-app: log-es
        kubernetes.io/cluster-service: "true"
        version: v1.22
    spec:
      tolerations:
      - key: node-role.kubernetes.io/master
        effect: NoSchedule
      serviceAccountName: dashboard-admin
      containers:
      - name: log-pilot
        image: registry.cn-hangzhou.aliyuncs.com/imooc/log-pilot:0.9-filebeat
        resources:
          limits:
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 200Mi
        env:
          - name: "FILEBEAT_OUTPUT"
            value: "elasticsearch"
          - name: "ELASTICSEARCH_HOST"
            value: "elasticsearch-api"
          - name: "ELASTICSEARCH_PORT"
            value: "9200"
          - name: "ELASTICSEARCH_USER"
            value: "elastic"
          - name: "ELASTICSEARCH_PASSWORD"
            value: "changeme"
        volumeMounts:
        - name: sock # 操作docker 的权限
          mountPath: /var/run/docker.sock
        - name: root
          mountPath: /host
          readOnly: true
        - name: varlib
          mountPath: /var/lib/filebeat
        - name: varlog
          mountPath: /var/log/filebeat
        securityContext:
          capabilities:
            add:
            - SYS_ADMIN
      terminationGracePeriodSeconds: 30
      volumes:
      - name: sock
        hostPath: # 操作docker 的权限
          path: /var/run/docker.sock
      - name: root
        hostPath:
          path: / # 把宿主机的整个磁盘都有访问权限,只有这个才能哪个docker   的任意配置
      - name: varlib
        hostPath:
          path: /var/lib/filebeat
          type: DirectoryOrCreate
      - name: varlog
        hostPath:
          path: /var/log/filebeat
          type: DirectoryOrCreate


创建一下

[root@master-001 ~]# kubectl apply -f log-pilot.yaml

查看一下

[root@master-001 ~]# kubectl get ds -n kube-system
image-20200203215726786

搭建 kibana

定义了一个service 端口80标签为component: kibana,定义一个ingress来暴露service,定义了 Deployment 来部署kibana。

kibana对外端口是5601

kibana.yaml

---
apiVersion: v1
kind: Service
metadata:
  name: kibana
  namespace: kube-system
  labels:
    component: kibana
spec:
  selector:
    component: kibana
  ports:
  - name: http
    port: 80
    targetPort: http
---
#ingress
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: kibana
  namespace: kube-system
spec:
  rules:
  - host: kibana.mooc.com
    http:
      paths:
      - path: /
        backend:
          serviceName: kibana
          servicePort: 80
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
  name: kibana
  namespace: kube-system
  labels:
    component: kibana
spec:
  replicas: 1
  selector:
    matchLabels:
     component: kibana
  template:
    metadata:
      labels:
        component: kibana
    spec:
      containers:
      - name: kibana
        image: registry.cn-hangzhou.aliyuncs.com/imooc/kibana:5.5.1
        env:
        - name: CLUSTER_NAME
          value: docker-cluster
        - name: ELASTICSEARCH_URL
          value: http://elasticsearch-api:9200/
        resources:
          limits:
            cpu: 1000m
          requests:
            cpu: 100m
        ports:
        - containerPort: 5601
          name: http

创建一下

[root@master-001 ~]# kubectl apply -f kibana.yaml

查看

[root@master-001 ~]# kubectl get deploy -n kube-system

image-20200203220552564

在我们要访问的机器配置host

192.168.181.103 kibana.mooc.com

访问

配置web服务,让LogPilot采集日志

当前我们的logpilot 发现了容器,并没有日志的配置,全部都跳过了这些容器

image-20200203221059027

我们做一个web服务配置一下日志,让他采集。

其他跟一个普通的web 项目配置一样,主要看这里 env部分

声明出要采集的目录

aliyun_logs_catalina 这个名字开头必须是aliyun_log_ ,catalina 是我们自己起的名字,这个名字如果对接是ES,它表示的就是索引,如果对接的是kafka就表示topic,不同的后端有不同的含义。

value: "stdout"容器的标准输出。

aliyun_logs_access这个名字的value对应具体目录 "/usr/local/tomcat/logs/*"下面所有东西,如果我们只想要log,可以改成"/usr/local/tomcat/logs/*.log".out

还有 volumeMounts:部分

我们日志要采集目录是"/usr/local/tomcat/logs/,我们要把这个目录挂载到宿主机上,名字是accesslogs

挂载到宿主机什么位置? emptyDirdocker 默认生成的位置,不用手动指定

web.yaml

#deploy
apiVersion: apps/v1
kind: Deployment
metadata:
  name: web-demo
spec:
  selector:
    matchLabels:
      app: web-demo
  replicas: 3 #三个实例,几个都无所谓
  template:
    metadata:
      labels:
        app: web-demo
    spec:
      containers:
      - name: web-demo
        image: hub.mooc.com/kubernetes/web:v1
        ports:
        - containerPort: 8080
        env: # 主要看这里   aliyun_logs_catalina 这个名字开头必须是aliyun
        - name: aliyun_logs_catalina
          value: "stdout"
        - name: aliyun_logs_access
          value: "/usr/local/tomcat/logs/*"
        volumeMounts:
        - mountPath: /usr/local/tomcat/logs
          name: accesslogs
      volumes:
      - name: accesslogs
        emptyDir: {}
---
#service
apiVersion: v1
kind: Service
metadata:
  name: web-demo
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 8080
  selector:
    app: web-demo
  type: ClusterIP

---
#ingress
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: web-demo
spec:
  rules:
  - host: web.mooc.com
    http:
      paths:
      - path: /
        backend:
          serviceName: web-demo
          servicePort: 80

创建一下web服务

[root@master-001 ~]# kubectl apply -f web.yaml

查看一下

[root@master-001 ~]# kubectl get pods
image-20200203223116128

我们返回logpilot 的容器看一下它的log

发现它发现了一个容器日志 access是日志的目录 /host/var/lib,因为它把宿主机的根目录挂载到容器的/host目录

我们在web配置的是stout标准输出,它会在docker自己的目录下面json.log, ,说明它既处理了标准输出,也处理了我们声明的日志目录

image-20200203223325834

我们查看一下logpilot 容器目录

image-20200203223930811

在看下宿主机的,这些log对应就是/usr/local/tomcat/logs/*目录的log

image-20200203224012063

配置一下索引access*

image-20200203224155135

在配置一个catalina*

image-20200203224333758

我们访问一下我们的应用,然后在kibana 查一下日志

image-20200203224559255 image-20200203224731560

当然我们不一定必须要用ES+kibana ,我们也可以把Logpilot后端配置成kafa,kafa把消息发送到存储的后端比如logstash,然后把日志文件按照容器或者是pod名区分,写到日志的服务器上,就可以非常方便的按文件去查看日志了。

发布了27 篇原创文章 · 获赞 3 · 访问量 775

猜你喜欢

转载自blog.csdn.net/weixin_37546425/article/details/104259079