1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171 | root@ig1-k8smaster-01:~# kubectl -n kube-system describe pod kube-dns-4101612645-2nfs6
Name: kube-dns-4101612645-2nfs6
Namespace: kube-system
Node: ig1-k8s-01/10.3.13.82
Start Time: Tue, 18 Apr 2017 15:32:19 +0000
Labels: k8s-app=kube-dns
pod-template-hash=4101612645
Annotations: kubernetes.io/created-by={"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"ReplicaSet","namespace":"kube-system","name":"kube-dns-4101612645","uid":"423e5fd8-15eb-11e7-aebd-00505694...
scheduler.alpha.kubernetes.io/critical-pod=
scheduler.alpha.kubernetes.io/tolerations=[{"key":"CriticalAddonsOnly", "operator":"Exists"}]
Status: Running
IP: 10.1.80.6
Controllers: ReplicaSet/kube-dns-4101612645
Containers:
kubedns:
Container ID: docker://61793b14cb35a056df7c20ec4e7a613c4668659f115f882a9e4e1645cca9110a
Image: gcr.io/google_containers/kubedns-amd64:1.9
Image ID: docker-pullable://gcr.io/google_containers/kubedns-amd64@sha256:3d3d67f519300af646e00adcf860b2f380d35ed4364e550d74002dadace20ead
Ports: 10053/UDP, 10053/TCP, 10055/TCP
Args:
--domain=cluster.local.
--dns-port=10053
--config-map=kube-dns
--v=0
State: Waiting
Reason: CrashLoopBackOff
Last State: Terminated
Reason: Error
Exit Code: 137
Started: Mon, 01 Jan 0001 00:00:00 +0000
Finished: Wed, 19 Apr 2017 13:51:09 +0000
Ready: False
Restart Count: 329
Limits:
memory: 170Mi
Requests:
cpu: 100m
memory: 70Mi
Liveness: http-get http://:8080/healthz-kubedns delay=60s timeout=5s period=10s #success=1 #failure=5
Readiness: http-get http://:8081/readiness delay=3s timeout=5s period=10s #success=1 #failure=3
Environment:
PROMETHEUS_PORT: 10055
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-hzrrp (ro)
dnsmasq:
Container ID: docker://3f2a8036e1198d5623efc5132be7615ef0c892dc9a57867368cd23a8a9e5665f
Image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4
Image ID: docker-pullable://gcr.io/google_containers/kube-dnsmasq-amd64@sha256:a722df15c0cf87779aad8ba2468cf072dd208cb5d7cfcaedd90e66b3da9ea9d2
Ports: 53/UDP, 53/TCP
Args:
--cache-size=1000
--no-resolv
--server=127.0.0.1#10053
--log-facility=-
State: Running
Started: Wed, 19 Apr 2017 13:51:09 +0000
Last State: Terminated
Reason: Error
Exit Code: 137
Started: Mon, 01 Jan 0001 00:00:00 +0000
Finished: Wed, 19 Apr 2017 13:45:59 +0000
Ready: True
Restart Count: 328
Requests:
cpu: 150m
memory: 10Mi
Liveness: http-get http://:8080/healthz-dnsmasq delay=60s timeout=5s period=10s #success=1 #failure=5
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-hzrrp (ro)
dnsmasq-metrics:
Container ID: docker://c0393cefbf84ae35f9cb3486aa31e723dafc2e15024fe57cba94d571cd9bf536
Image: gcr.io/google_containers/dnsmasq-metrics-amd64:1.0
Image ID: docker-pullable://gcr.io/google_containers/dnsmasq-metrics-amd64@sha256:4063e37fd9b2fd91b7cc5392ed32b30b9c8162c4c7ad2787624306fc133e80a9
Port: 10054/TCP
Args:
--v=2
--logtostderr
State: Running
Started: Tue, 18 Apr 2017 15:32:28 +0000
Ready: True
Restart Count: 0
Requests:
memory: 10Mi
Liveness: http-get http://:10054/metrics delay=60s timeout=5s period=10s #success=1 #failure=5
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-hzrrp (ro)
healthz:
Container ID: docker://245d30cd71d24a66075e4123d3353135abfe49bc63f6306f68951d930b1ff044
Image: gcr.io/google_containers/exechealthz-amd64:1.2
Image ID: docker-pullable://gcr.io/google_containers/exechealthz-amd64@sha256:503e158c3f65ed7399f54010571c7c977ade7fe59010695f48d9650d83488c0a
Port: 8080/TCP
Args:
--cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null
--url=/healthz-dnsmasq
--cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1:10053 >/dev/null
--url=/healthz-kubedns
--port=8080
--quiet
State: Running
Started: Tue, 18 Apr 2017 15:32:30 +0000
Ready: True
Restart Count: 0
Limits:
memory: 50Mi
Requests:
cpu: 10m
memory: 50Mi
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-hzrrp (ro)
Conditions:
Type Status
Initialized True
Ready False
PodScheduled True
Volumes:
default-token-hzrrp:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-hzrrp
Optional: false
QoS Class: Burstable
Node-Selectors: <none>
Tolerations: node.alpha.kubernetes.io/notReady=:Exists:NoExecute for 300s
node.alpha.kubernetes.io/unreachable=:Exists:NoExecute for 300s
Events:
FirstSeen LastSeen Count From SubObjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
22h 14m 249 kubelet, ig1-k8s-01 Warning FailedSync Error syncing pod, skipping: [failed to "StartContainer" for "dnsmasq" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=dnsmasq pod=kube-dns-4101612645-2nfs6_kube-system(35862c93-244c-11e7-b7ef-005056949324)"
, failed to "StartContainer" for "kubedns" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=kubedns pod=kube-dns-4101612645-2nfs6_kube-system(35862c93-244c-11e7-b7ef-005056949324)"
]
22h 4m 1874 kubelet, ig1-k8s-01 Warning FailedSync Error syncing pod, skipping: [failed to "StartContainer" for "kubedns" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=kubedns pod=kube-dns-4101612645-2nfs6_kube-system(35862c93-244c-11e7-b7ef-005056949324)"
, failed to "StartContainer" for "dnsmasq" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=dnsmasq pod=kube-dns-4101612645-2nfs6_kube-system(35862c93-244c-11e7-b7ef-005056949324)"
]
22h 2m 329 kubelet, ig1-k8s-01 spec.containers{kubedns} Normal Pulled Container image "gcr.io/google_containers/kubedns-amd64:1.9" already present on machine
21h 1m 1909 kubelet, ig1-k8s-01 Warning FailedSync Error syncing pod, skipping: failed to "StartContainer" for "dnsmasq" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=dnsmasq pod=kube-dns-4101612645-2nfs6_kube-system(35862c93-244c-11e7-b7ef-005056949324)"
22h 1m 649 kubelet, ig1-k8s-01 spec.containers{dnsmasq} Normal Killing (events with common reason combined)
22h 1m 328 kubelet, ig1-k8s-01 spec.containers{dnsmasq} Normal Pulled Container image "gcr.io/google_containers/kube-dnsmasq-amd64:1.4" already present on machine
22h 1m 652 kubelet, ig1-k8s-01 spec.containers{dnsmasq} Normal Created (events with common reason combined)
22h 1m 652 kubelet, ig1-k8s-01 spec.containers{dnsmasq} Normal Started (events with common reason combined)
22h 19s 8094 kubelet, ig1-k8s-01 spec.containers{kubedns} Warning BackOff Back-off restarting failed container
22h 19s 1910 kubelet, ig1-k8s-01 Warning FailedSync Error syncing pod, skipping: failed to "StartContainer" for "kubedns" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=kubedns pod=kube-dns-4101612645-2nfs6_kube-system(35862c93-244c-11e7-b7ef-005056949324)"
Events:
FirstSeen LastSeen Count From SubObjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
50m 50m 1 kubelet, ig1-k8s-04 spec.containers{kubernetes-dashboard} Normal Killing Killing container with id docker://29ca8a2ef938a97b219eb3697bc183618241ab634fc0425981798cb1e87ccd10:pod "kubernetes-dashboard-2917854236-mwhdl_kube-system(181dd756-244c-11e7-b7ef-005056949324)" container "kubernetes-dashboard" is unhealthy, it will be killed and re-created.
22h 25s 5528 kubelet, ig1-k8s-04 spec.containers{kubernetes-dashboard} Warning BackOff Back-off restarting failed container
22h 25s 5499 kubelet, ig1-k8s-04 Warning FailedSync Error syncing pod, skipping: failed to "StartContainer" for "kubernetes-dashboard" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=kubernetes-dashboard pod=kubernetes-dashboard-2917854236-mwhdl_kube-system(181dd756-244c-11e7-b7ef-005056949324)"
22h 13s 275 kubelet, ig1-k8s-04 spec.containers{kubernetes-dashboard} Normal Pulled Container image "gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.0" already present on machine
21h 12s 266 kubelet, ig1-k8s-04 spec.containers{kubernetes-dashboard} Normal Created (events with common reason combined)
21h 12s 266 kubelet, ig1-k8s-04 spec.containers{kubernetes-dashboard} Normal Started (events with common reason combined)
root@ig1-k8smaster-01:~# kubectl -n kube-system logs kubernetes-dashboard-2917854236-mwhdl
Using HTTP port: 9090
Creating API server client for https://10.152.183.1:443
Error while initializing connection to Kubernetes apiserver. This most likely means that the cluster is misconfigured (e.g., it has invalid apiserver certificates or service accounts configuration) or the --apiserver-host param points to a server that does not exist. Reason: Get https://10.152.183.1:443/version: dial tcp 10.152.183.1:443: i/o timeout
Refer to the troubleshooting guide for more information: https://github.com/kubernetes/dashboard/blob/master/docs/user-guide/troubleshooting.md
root@ig1-k8smaster-01:~# kubectl get services --all-namespaces
NAMESPACE NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default default-http-backend 10.152.183.107 <none> 80/TCP 62d
default kubernetes 10.152.183.1 <none> 443/TCP 62d
kube-system heapster 10.152.183.140 <none> 80/TCP 62d
kube-system kube-dns 10.152.183.10 <none> 53/UDP,53/TCP 62d
kube-system kubernetes-dashboard 10.152.183.235 <none> 80/TCP 62d
kube-system monitoring-grafana 10.152.183.207 <none> 80/TCP 62d
kube-system monitoring-influxdb 10.152.183.106 <none> 8083/TCP,8086/TCP 62d
|