1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139 | root@ig1-k8smaster-01:~/snap/cdk-addons/current/addons# kubectl get pods -o wide --all-namespaces
sh: 0: getcwd() failed: No such file or directory
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE
default default-http-backend-35bpm 1/1 Running 1 62d 10.1.80.5 ig1-k8s-01
default nginx-ingress-controller-07vf5 1/1 Running 1 1d 10.3.13.86 ig1-k8s-05
default nginx-ingress-controller-3kcwn 1/1 Running 1 1d 10.3.13.85 ig1-k8s-04
default nginx-ingress-controller-fz2rl 1/1 Running 1 1d 10.3.13.84 ig1-k8s-03
default nginx-ingress-controller-pkcvq 1/1 Running 1 62d 10.3.13.82 ig1-k8s-01
default nginx-ingress-controller-wrhpt 1/1 Running 1 1d 10.3.13.83 ig1-k8s-02
kube-system heapster-v1.3.0-1027286811-pkpcj 1/4 ImagePullBackOff 0 3m 10.1.79.6 ig1-k8s-02
kube-system kube-dns-806549836-w842j 1/3 ErrImagePull 0 3m 10.1.79.7 ig1-k8s-02
kube-system kubernetes-dashboard-2917854236-qmvn3 0/1 CrashLoopBackOff 3 3m 10.1.36.7 ig1-k8s-04
kube-system monitoring-influxdb-grafana-v4-f7n6q 1/2 ImagePullBackOff 0 3m 10.1.19.8 ig1-k8s-05
root@ig1-k8smaster-01:~/snap/cdk-addons/current/addons# kubectl -n kube-system describe pod kube-dns-806549836-w842j
sh: 0: getcwd() failed: No such file or directory
Name: kube-dns-806549836-w842j
Namespace: kube-system
Node: ig1-k8s-02/10.3.13.83
Start Time: Wed, 19 Apr 2017 15:28:00 +0000
Labels: k8s-app=kube-dns
pod-template-hash=806549836
Annotations: kubernetes.io/created-by={"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"ReplicaSet","namespace":"kube-system","name":"kube-dns-806549836","uid":"c5807baa-2514-11e7-b7ef-005056949...
scheduler.alpha.kubernetes.io/critical-pod=
Status: Pending
IP: 10.1.79.7
Controllers: ReplicaSet/kube-dns-806549836
Containers:
kubedns:
Container ID: docker://7db9dc19d7e4e0319037622640bb1b3e9957c0a81d03cb168db3136bb223cd79
Image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1
Image ID: docker-pullable://gcr.io/google_containers/k8s-dns-kube-dns-amd64@sha256:33914315e600dfb756e550828307dfa2b21fb6db24fe3fe495e33d1022f9245d
Ports: 10053/UDP, 10053/TCP, 10055/TCP
Args:
--domain=cluster.local.
--dns-port=10053
--config-dir=/kube-dns-config
--v=2
State: Running
Started: Wed, 19 Apr 2017 15:30:08 +0000
Ready: False
Restart Count: 0
Limits:
memory: 170Mi
Requests:
cpu: 100m
memory: 70Mi
Liveness: http-get http://:10054/healthcheck/kubedns delay=60s timeout=5s period=10s #success=1 #failure=5
Readiness: http-get http://:8081/readiness delay=3s timeout=5s period=10s #success=1 #failure=3
Environment:
PROMETHEUS_PORT: 10055
Mounts:
/kube-dns-config from kube-dns-config (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-dns-token-sp73k (ro)
dnsmasq:
Container ID:
Image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1
Image ID:
Ports: 53/UDP, 53/TCP
Args:
-v=2
-logtostderr
-configDir=/etc/k8s/dns/dnsmasq-nanny
-restartDnsmasq=true
--
-k
--cache-size=1000
--log-facility=-
--server=/cluster.local/127.0.0.1#10053
--server=/in-addr.arpa/127.0.0.1#10053
--server=/ip6.arpa/127.0.0.1#10053
State: Waiting
Reason: ErrImagePull
Ready: False
Restart Count: 0
Requests:
cpu: 150m
memory: 20Mi
Liveness: http-get http://:10054/healthcheck/dnsmasq delay=60s timeout=5s period=10s #success=1 #failure=5
Environment: <none>
Mounts:
/etc/k8s/dns/dnsmasq-nanny from kube-dns-config (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-dns-token-sp73k (ro)
sidecar:
Container ID: docker://ef8a2d4750097841a667e8aa54eaf20fada967412da9285c126fc24e3199950a
Image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1
Image ID: docker-pullable://gcr.io/google_containers/k8s-dns-sidecar-amd64@sha256:d33a91a5d65c223f410891001cd379ac734d036429e033865d700a4176e944b0
Port: 10054/TCP
Args:
--v=2
--logtostderr
--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
State: Running
Started: Wed, 19 Apr 2017 15:29:32 +0000
Ready: True
Restart Count: 0
Requests:
cpu: 10m
memory: 20Mi
Liveness: http-get http://:10054/metrics delay=60s timeout=5s period=10s #success=1 #failure=5
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-dns-token-sp73k (ro)
Conditions:
Type Status
Initialized True
Ready False
PodScheduled True
Volumes:
kube-dns-config:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: kube-dns
Optional: true
kube-dns-token-sp73k:
Type: Secret (a volume populated by a Secret)
SecretName: kube-dns-token-sp73k
Optional: false
QoS Class: Burstable
Node-Selectors: <none>
Tolerations: CriticalAddonsOnly=:Exists
node.alpha.kubernetes.io/notReady=:Exists:NoExecute for 300s
node.alpha.kubernetes.io/unreachable=:Exists:NoExecute for 300s
Events:
FirstSeen LastSeen Count From SubObjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
3m 3m 1 default-scheduler Normal Scheduled Successfully assigned kube-dns-806549836-w842j to ig1-k8s-02
2m 2m 1 kubelet, ig1-k8s-02 spec.containers{dnsmasq} Warning Failed Failed to pull image "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1": rpc error: code = 2 desc = Error response from daemon: {"message":"Get https://gcr.io/v2/google_containers/k8s-dns-dnsmasq-nanny-amd64/manifests/1.14.1: dial tcp 64.233.166.82:443: i/o timeout"}
2m 2m 1 kubelet, ig1-k8s-02 spec.containers{sidecar} Normal Pulling pulling image "gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1"
1m 1m 1 kubelet, ig1-k8s-02 spec.containers{kubedns} Normal Pulling pulling image "gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1"
1m 1m 1 kubelet, ig1-k8s-02 spec.containers{sidecar} Normal Pulled Successfully pulled image "gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1"
1m 1m 1 kubelet, ig1-k8s-02 spec.containers{sidecar} Normal Created Created container with id ef8a2d4750097841a667e8aa54eaf20fada967412da9285c126fc24e3199950a
1m 1m 1 kubelet, ig1-k8s-02 spec.containers{sidecar} Normal Started Started container with id ef8a2d4750097841a667e8aa54eaf20fada967412da9285c126fc24e3199950a
1m 1m 1 kubelet, ig1-k8s-02 spec.containers{kubedns} Normal Pulled Successfully pulled image "gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1"
1m 1m 1 kubelet, ig1-k8s-02 spec.containers{kubedns} Normal Created Created container with id 7db9dc19d7e4e0319037622640bb1b3e9957c0a81d03cb168db3136bb223cd79
1m 1m 1 kubelet, ig1-k8s-02 spec.containers{kubedns} Normal Started Started container with id 7db9dc19d7e4e0319037622640bb1b3e9957c0a81d03cb168db3136bb223cd79
1m 1m 1 kubelet, ig1-k8s-02 Warning FailedSync Error syncing pod, skipping: failed to "StartContainer" for "dnsmasq" with ErrImagePull: "rpc error: code = 2 desc = Error response from daemon: {\"message\":\"Get https://gcr.io/v2/google_containers/k8s-dns-dnsmasq-nanny-amd64/manifests/1.14.1: dial tcp 64.233.166.82:443: i/o timeout\"}"
3m 1m 2 kubelet, ig1-k8s-02 spec.containers{dnsmasq} Normal Pulling pulling image "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1"
|