• 데몬셋 기반의 pod를 배포한다.
  • 특징 : 노드마다 하나의 pod만 배포하는 방식 (pod를 일정하게 배포)
  • 데몬셋은 스케일아웃을 지원하지 않는다.
  • 노드가 2개인 경우엔 곧 pod가 2개 배포 된다는 뜻.

DaemonSet 기본 확인

root@ip-172-31-4-27:~/controller# kubectl get po --all-namespaces -o wide | grep ip-172-31-13-180
calico-system     calico-node-nrj94                          1/1     Running   1          2d3h   172.31.13.180    ip-172-31-13-180   <none>           <none>
calico-system     calico-typha-564cccbfc5-r7pww              1/1     Running   0          161m   172.31.13.180    ip-172-31-13-180   <none>           <none>
kube-system       kube-proxy-28mlr                           1/1     Running   1          2d3h   172.31.13.180    ip-172-31-13-180   <none>           <none>


#@ daemonset들이 총 두개 있고, 노드가 두개이므로 DESIRED가 2개인 것을 확인 할 수 있다.
root@ip-172-31-4-27:~/controller# kubectl get ds --all-namespaces 
NAMESPACE       NAME          DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE
calico-system   calico-node   2         2         2       2            2           kubernetes.io/os=linux   2d3h
kube-system     kube-proxy    2         2         2       2            2           kubernetes.io/os=linux   2d3h

 

DaemonSet 시나리오1

1.ds-1.yaml 명세서 작성

apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: ds-1 #ds-1으로 올릴꺼다
spec:
  selector:
    matchLabels: #상위 매치레이블 정보
      type: app
  template:
    metadata:
      labels:
        type: app
    spec:
      containers:
      - name: container
        image: nasamjang02/app:v1
        ports:
        - containerPort: 80
          hostPort: 10000

2.ds-2.yaml 명세서 작성

apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: ds-2
spec:
  selector:
    matchLabels:
      type: app
  template:
    metadata:
      labels:
        type: app
    spec:
      nodeSelector: # nodeSelector 옵션 때문에 label등록전엔 pending이 된다.
        env: dev
      containers:
      - name: container
        image: nasamjang02/app:v1
        ports:
        - containerPort: 80

3. 수행 및 확인

#@ 차이점 ds-2는 nodeSelector를 지정했다. ds-1은 안했다.
kubectl create -f ds-1.yaml
	root@ip-172-31-4-27:~/controller# kubectl create -f ds-1.yaml
	daemonset.apps/ds-1 created
	root@ip-172-31-4-27:~/controller# kubectl get po -o wide
	NAME         READY   STATUS    RESTARTS   AGE   IP               NODE               NOMINATED NODE   READINESS GATES
	ds-1-gzgjt   1/1     Running   0          29s   192.168.82.46    ip-172-31-13-180   <none>           <none>
	ds-1-lxqbb   1/1     Running   0          29s   192.168.51.222   ip-172-31-4-27     <none>           <none>
	#@ daemonset이므로 노드가 2개이므로 DESIRED가 2개 즉 pod가 2개 생성됨을 확인 각각의 노드에 배포됨을 확인한다

kubectl create -f ds-2.yaml #@pending이 되는 이유

kubectl label nodes ip-172-31-4-27 env=dev
kubectl label nodes ip-172-31-13-180 env=prod

kubectl -o po -o wide

	root@ip-172-31-4-27:~/controller# kubectl label nodes ip-172-31-4-27 env=dev
	node/ip-172-31-4-27 labeled
	root@ip-172-31-4-27:~/controller# kubectl label nodes ip-172-31-13-180 env=prod
	node/ip-172-31-13-180 labeled
	root@ip-172-31-4-27:~/controller# kubectl get po -o wide
	NAME         READY   STATUS    RESTARTS   AGE     IP               NODE               NOMINATED NODE   READINESS GATES
	ds-1-gzgjt   1/1     Running   0          3m18s   192.168.82.46    ip-172-31-13-180   <none>           <none>
	ds-1-lxqbb   1/1     Running   0          3m18s   192.168.51.222   ip-172-31-4-27     <none>           <none>
	ds-2-z4c84   1/1     Running   0          32s     192.168.51.223   ip-172-31-4-27     <none>           <none>

kubectl label nodes ip-172-31-13-180 env-
kubectl label nodes ip-172-31-13-180 env=dev
kubectl -o po -o wide

	#@ worker에 label을 env=dev로 설정하니 daemonset을 통해 pod가 등록된 것을 확인 할 수 있다.
	root@ip-172-31-4-27:~/controller# kubectl label nodes ip-172-31-13-180 env-
	node/ip-172-31-13-180 labeled
	root@ip-172-31-4-27:~/controller# kubectl label nodes ip-172-31-13-180 env=dev
	node/ip-172-31-13-180 labeled
	root@ip-172-31-4-27:~/controller# kubectl get po -o wide
	NAME         READY   STATUS    RESTARTS   AGE     IP               NODE               NOMINATED NODE   READINESS GATES
	ds-1-gzgjt   1/1     Running   0          3m53s   192.168.82.46    ip-172-31-13-180   <none>           <none>
	ds-1-lxqbb   1/1     Running   0          3m53s   192.168.51.222   ip-172-31-4-27     <none>           <none>
	ds-2-2pxkf   1/1     Running   0          10s     192.168.82.47    ip-172-31-13-180   <none>           <none>
	ds-2-z4c84   1/1     Running   0          67s     192.168.51.223   ip-172-31-4-27     <none>           <none>

 

DaemonSet 시나리오2

  • kubectl edit ds ds-1 (rollingupdate test) #@ds도 rollingupdate 지원한다.
  • --image=nasamjang02/app:v2 #@이미지를 v2로 바꿔서 rollingupdate 가 진행되는지 확인한다.

1. 생성 및 확인

root@master1:~# kubectl edit ds ds-1  (Type=RollingUpdate를 ---> OnDelete , image=rosehs00/app:v1으로 변경 )
daemonset.apps/ds-1 edited

	spec:
      containers:
      - image: nasamjang02/app:v2
        imagePullPolicy: IfNotPresent
        name: container
        ports:


	updateStrategy:
    rollingUpdate:
      maxUnavailable: 1
    type: OnDelete


#@TODO 이미지도 v2로 변경해 주어야 한다.

root@ip-172-31-4-27:~# kubectl get po -o wide
NAME         READY   STATUS    RESTARTS   AGE
ds-1-fptcg   1/1     Running   0          63m
ds-1-krlkw   1/1     Running   0          63m
ds-1-r2f8p   1/1     Running   0          38m
ds-2-4wdsd   1/1     Running   0          66m
ds-2-pp52m   1/1     Running   0          3m57s
secondary    1/1     Running   0          4h17m

root@ip-172-31-4-27:~/controller# curl 172.31.4.27:10000 (node_ip:hostport) #@여전히 v2인 것을 확인한다.
This is app v1 test…


	#@ v2를 변경 후 curl로 요청시 v2가 찍힘을 확인한다.
	root@ip-172-31-4-27:~/controller# kubectl get po -o wide
	NAME         READY   STATUS    RESTARTS   AGE     IP               NODE               NOMINATED NODE   READINESS GATES
	ds-1-92tf4   1/1     Running   0          6m55s   192.168.82.49    ip-172-31-13-180   <none>           <none>
	ds-1-htf6c   1/1     Running   0          6m55s   192.168.51.226   ip-172-31-4-27     <none>           <none>
	ds-2-kgxjf   1/1     Running   0          6m36s   192.168.51.227   ip-172-31-4-27     <none>           <none>
	ds-2-nthxq   1/1     Running   0          6m36s   192.168.82.50    ip-172-31-13-180   <none>           <none>
	root@ip-172-31-4-27:~/controller# kubectl delete po ds-1-htf6c 
	pod "ds-1-htf6c" deleted
	root@ip-172-31-4-27:~/controller# curl 172.31.4.27:10000
	This is app v2 test…



root@ip-172-31-4-27:~# kubectl delete po ds-1-fptcg #@바로 트리거링이 안되는것을 확인한다.
pod "ds-1-fptcg" deleted

root@ip-172-31-4-27:~# curl 172.31.13.91:10000 #@해당 pod의 ip로 접근이 되는지 확인한다.
This is app v1 test...

ec2 instance 추가하기
ami-00099c928597181c5

 

DaemonSet 시나리오3

  • worker2 Join

1. 생성 및 확인

root@ip-172-31-4-27:~# kubeadm token create --print-join-command #@토큰 Create를 다시해야 한다. (기존 토큰은 만료되었음)
kubeadm join 172.31.8.183:6443 --token tp8ek7.57mmzy5w8vv5dzuw     --discovery-token-ca-cert-hash sha256e95285012a67b480fa97b3f9fa0c7bf5cf467722d476fc486aa0350 

root@ip-172-31-4-27:~# kubeadm token list
TOKEN                     TTL         EXPIRES                USAGES                   DESCRIPTION                                      EXTRA GROUPS

tp8ek7.57mmzy5w8vv5dzuw   23h         2020-09-18T02:31:32Z   authentication,signing   <none>                                           system:bootstrappers:kubeadm:default-node-token

worker2>kubeadm join 172.31.8.183:6443 --token tp8ek7.57mmzy5w8vv5dzuw     --discovery-token-ca-cert-hash sha256e95285012a67b480fa97b3f9fa0c7bf5cf467722d476fc486aa0350 

root@ip-172-31-4-27:~# kubectl get nodes
NAME      STATUS     ROLES    AGE     VERSION
kops-m    NotReady   <none>   22s     v1.19.1
ip-172-31-4-27   Ready      master   3d21h   v1.19.1
ip-172-31-13-180   Ready      <none>   3d16h   v1.19.1

root@ip-172-31-4-27:~# kubectl get po #@인스턴스가 추가되어 새로운 worker에 pod가 추가되는것을 확인한다.
NAME         READY   STATUS              RESTARTS   AGE
ds-1-fptcg   1/1     Running             0          24m
ds-1-krlkw   1/1     Running             0          25m
ds-1-r2f8p   0/1     ContainerCreating   0          8s

'클라우드 컴퓨팅 & NoSQL > k8s' 카테고리의 다른 글

Service Controller  (0) 2020.12.03
Job/CronJob Controller  (0) 2020.12.03
Scheduler  (0) 2020.12.02
Configmaps  (0) 2020.12.02
Storage Class  (0) 2020.12.02

+ Recent posts