$ kubectl apply -f ./node_exporter
daemonset.apps/prometheus-node-exporter created
service/prometheus-node-exporter created
$ kubectl get all -n monitoring
NAME READY STATUS RESTARTS AGE
pod/prometheus-node-exporter-d4wg7 1/1 Running 0 4m7s
pod/prometheus-node-exporter-tqczz 1/1 Running 0 4m7s
pod/prometheus-node-exporter-wcrh6 1/1 Running 0 4m7s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/prometheus-node-exporter ClusterIP None <none> 9100/TCP 4m7s
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
daemonset.apps/prometheus-node-exporter 3 3 3 3 3 <none> 4m7s
$ kubectl apply -f ./prometheus
configmap/prometheus-config created
deployment.apps/prometheus-server created
clusterrole.rbac.authorization.k8s.io/prometheus created
serviceaccount/prometheus created
clusterrolebinding.rbac.authorization.k8s.io/prometheus created
service/prometheus created
$ kubectl get all -n prom
NAME READY STATUS RESTARTS AGE
pod/prometheus-node-exporter-d4wg7 1/1 Running 0 9m
pod/prometheus-node-exporter-tqczz 1/1 Running 0 9m
pod/prometheus-node-exporter-wcrh6 1/1 Running 0 9m
pod/prometheus-server-5fcbdbcc6f-nt4wj 1/1 Running 0 2m24s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/prometheus NodePort 10.107.112.119 <none> 9090:30090/TCP 2m
service/prometheus-node-exporter ClusterIP None <none> 9100/TCP 9m
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
daemonset.apps/prometheus-node-exporter 3 3 3 3 3 <none> 9m
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
deployment.apps/prometheus-server 1 1 1 1 2m
NAME DESIRED CURRENT READY AGE
replicaset.apps/prometheus-server-5fcbdbcc6f 1 1 1 2m
部署kube-state-metrics
$ ls kube-state-metrics
kube-state-metrics-deploy.yaml kube-state-metrics-rbac.yaml kube-state-metrics-svc.yaml
$ kubectl apply -f ./kube-state-metrics
deployment.apps/kube-state-metrics created
serviceaccount/kube-state-metrics created
clusterrole.rbac.authorization.k8s.io/kube-state-metrics created
clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created
service/kube-state-metrics created
等一会查看:
$ kubectl get pod -n monitoring
NAME READY STATUS RESTARTS AGE
kube-state-metrics-667fb54645-xj8gr 1/1 Running 0 116s
$ kubectl get svc -n monitoring
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-state-metrics ClusterIP 10.104.171.60 <none> 8080/TCP 2m50s
$ openssl x509 -req -in serving.csr -CA /etc/kubernetes/pki/ca.crt -CAkey /etc/kubernetes/pki/ca.key -CAcreateserial -out serving.crt -days 3650
Signature ok
subject=/CN=serving
Getting CA Private Key
$ ls
serving.crt serving.csr serving.key
创建secret:
$ kubectl create secret generic cm-adapter-serving-certs --from-file=serving.crt=./serving.crt --from-file=serving.key=./serving.key -n monitoring
secret/cm-adapter-serving-certs created
$ kubectl get secrets -n monitoring
NAME TYPE DATA AGE
cm-adapter-serving-certs Opaque 2 49s
应用资源清单文件:
$ kubectl apply -f ./k8s-prometheus-adapter
clusterrolebinding.rbac.authorization.k8s.io/custom-metrics:system:auth-delegator created
rolebinding.rbac.authorization.k8s.io/custom-metrics-auth-reader created
deployment.apps/custom-metrics-apiserver created
clusterrolebinding.rbac.authorization.k8s.io/custom-metrics-resource-reader created
serviceaccount/custom-metrics-apiserver created
service/custom-metrics-apiserver created
apiservice.apiregistration.k8s.io/v1beta1.custom.metrics.k8s.io created
clusterrole.rbac.authorization.k8s.io/custom-metrics-server-resources created
configmap/adapter-config created
clusterrole.rbac.authorization.k8s.io/custom-metrics-resource-reader created
clusterrolebinding.rbac.authorization.k8s.io/hpa-controller-custom-metrics created
$ cat grafana/grafana.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: monitoring-grafana
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
task: monitoring
k8s-app: grafana
template:
metadata:
labels:
task: monitoring
k8s-app: grafana
spec:
containers:
- name: grafana
image: k8s.gcr.io/heapster-grafana-amd64:v5.0.4
ports:
- containerPort: 3000
protocol: TCP
volumeMounts:
- mountPath: /etc/ssl/certs
name: ca-certificates
readOnly: true
- mountPath: /var
name: grafana-storage
env:
#- name: INFLUXDB_HOST
# value: monitoring-influxdb
- name: GF_SERVER_HTTP_PORT
value: "3000"
# The following env variables are required to make Grafana accessible via
# the kubernetes api-server proxy. On production clusters, we recommend
# removing these env variables, setup auth for grafana, and expose the grafana
# service using a LoadBalancer or a public IP.
- name: GF_AUTH_BASIC_ENABLED
value: "false"
- name: GF_AUTH_ANONYMOUS_ENABLED
value: "true"
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
value: Admin
- name: GF_SERVER_ROOT_URL
# If you're only using the API Server proxy, set this value instead:
# value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy
value: /
volumes:
- name: ca-certificates
hostPath:
path: /etc/ssl/certs
- name: grafana-storage
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
labels:
# For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
# If you are NOT using this as an addon, you should comment out this line.
kubernetes.io/cluster-service: 'true'
kubernetes.io/name: monitoring-grafana
name: monitoring-grafana
namespace: monitoring
spec:
# In a production setup, we recommend accessing Grafana through an external Loadbalancer
# or through a public IP.
# type: LoadBalancer
# You could also use NodePort to expose the service at a randomly-generated port
# type: NodePort
ports:
- port: 80
targetPort: 3000
selector:
k8s-app: grafana
type: NodePort
有三点要说明的是
挂载的volume grafana-storage应该为持久卷,这里测试为挂载为emptyDir
grafana的svc使用了NodePort,便于集群之外访问。
取消了环境变量INFLUXDB_HOST。
应用并查看:
kubectl apply -f grafana/grafana.yaml
$ kubectl get pod -n monitoring |grep grafana
NAME READY STATUS RESTARTS AGE
monitoring-grafana-7f99994bc4-mpmhz 1/1 Running 0 3m
$ kubectl get svc -n monitoring |grep grafana
monitoring-grafana NodePort 10.109.154.210 <none> 80:31337/TCP 6d18h