카테고리 없음

[AEWS] 9주차 - EKS Upgrade

summary-aws 2025. 3. 31. 13:37

Amazon EKS Upgrades: Strategies and Best Practices

1. 실습환경 확인

2. 업그레이드 사전준비

  - Preparing for Cluster Upgrades : 업그레이드 전 준비
  - In-place 와 Blue-Green 업그레이드 전략 중 선택
  - EKS Upgrade Insights 

3. Upgrading EKS (1.25 → 1.26)
   - Control Plane Upgrade 후 상태 확인, Add-on 업그레이드
  - [3.1 데이터부 - 관리형노드그룹] Upgrading EKS Managed Node groups

  - [3.2 데이터부 - 카펜터노드] Upgrading Karpenter managed nodes

  - [3.3 데이터부 - 셀프노드] Upgrading EKS Self-managed Nodes

4. EKS 1.26   1.27   1.28 업그레이드 

5. Blue-Green Cluster Upgrades 

 

AWS 워크샵 주소 :

https://catalog.us-east-1.prod.workshops.aws/workshops/693bdee4-bc31-41d5-841f-54e3e54f8f4a/en-US

 

 


1. 실습환경확인

Workshop 접속

 

IDE 페이지 접근

whoami
ec2-user

pwd
/home/ec2-user/environment

ec2-user:~/environment:$ export
declare -x ASSETS_BUCKET="ws-event-f4fef182-00c-us-west-2/d2117abb-06fa-4e89-8a6b-8e2b5d6fc697/assets/"
declare -x AWS_DEFAULT_REGION="us-west-2"
declare -x AWS_PAGER=""
declare -x AWS_REGION="us-west-2"
declare -x BROWSER="/usr/lib/code-server/lib/vscode/bin/helpers/browser.sh"
declare -x CLUSTER_NAME="eksworkshop-eksctl"
declare -x COLORTERM="truecolor"
declare -x EC2_PRIVATE_IP="192.168.0.20"
declare -x EKS_CLUSTER_NAME="eksworkshop-eksctl"
declare -x GIT_ASKPASS="/usr/lib/code-server/lib/vscode/extensions/git/dist/askpass.sh"
declare -x HISTCONTROL="ignoredups"
declare -x HISTSIZE="1000"
declare -x HOME="/home/ec2-user"
declare -x HOSTNAME="ip-192-168-0-20.us-west-2.compute.internal"
declare -x IDE_DOMAIN="d7eolsohq4yl3.cloudfront.net"
declare -x IDE_PASSWORD="OIqqFfzRIG6Ug6AwgsoA5wO0QgiXoU8z"
declare -x IDE_URL="https://d7eolsohq4yl3.cloudfront.net"
declare -x INSTANCE_IAM_ROLE_ARN="arn:aws:iam::335393502037:role/workshop-stack-IdeIdeRoleD654ADD4-RWOFFYCPb8vk"
declare -x INSTANCE_IAM_ROLE_NAME="workshop-stack-IdeIdeRoleD654ADD4-RWOFFYCPb8vk"
declare -x INVOCATION_ID="15d45d415ade4c3e8523f3aa2bf8d829"
declare -x JOURNAL_STREAM="8:43093"
declare -x LANG="C.UTF-8"
declare -x LESSOPEN="||/usr/bin/lesspipe.sh %s"
declare -x LOGNAME="ec2-user"
...


# s3 버킷 확인
aws s3 ls
025-03-30 05:10:04 workshop-stack-tfstatebackendbucketf0fc9a9d-oof4tmghmnjw




# 환경변수(테라폼 포함) 및 단축키 alias 등 확인
cat ~/.bashrc
...
export EKS_CLUSTER_NAME=eksworkshop-eksctl
export CLUSTER_NAME=eksworkshop-eksctl
export AWS_DEFAULT_REGION=us-west-2
export REGION=us-west-2
export AWS_REGION=us-west-2
export TF_VAR_eks_cluster_id=eksworkshop-eksctl
export TF_VAR_aws_region=us-west-2
export ASSETS_BUCKET=ws-event-069b6df5-757-us-west-2/d2117abb-06fa-4e89-8a6b-8e2b5d6fc697/assets/
export TF_STATE_S3_BUCKET=workshop-stack-tfstatebackendbucketf0fc9a9d-isuyoohioh8p
alias k=kubectl
alias kgn="kubectl get nodes -o wide"
alias kgs="kubectl get svc -o wide"
alias kgd="kubectl get deploy -o wide"
alias kgsa="kubectl get svc -A -o wide"
alias kgda="kubectl get deploy -A -o wide"
alias kgp="kubectl get pods -o wide"
alias kgpa="kubectl get pods -A -o wide"
aws eks update-kubeconfig --name eksworkshop-eksctl
export AWS_PAGER=""

# eks 플랫폼 버전 eks.44
aws eks describe-cluster --name $EKS_CLUSTER_NAME | jq
    "createdAt": "2025-03-30T05:13:57.577000+00:00",
    "version": "1.25",

 

EKS 클러스터 정보확인

#
eksctl get cluster
NAME                    REGION          EKSCTL CREATED
eksworkshop-eksctl      us-west-2       False

ec2-user:~/environment:$ eksctl get nodegroup --cluster $CLUSTER_NAME
CLUSTER                 NODEGROUP                               STATUS  CREATED                 MIN SIZE        MAX SIZE        DESIRED CAPACITY        INSTANCE TYPE          IMAGE ID ASG NAME                                                                        TYPE
eksworkshop-eksctl      blue-mng-2025033005234453330000002a     ACTIVE  2025-03-30T05:23:46Z    1               2               1                       m5.large,m6a.large,m6i.large    AL2_x86_64      eks-blue-mng-2025033005234453330000002a-cecaf2c0-c09e-547e-f241-781ba0f4ac3b    managed
eksworkshop-eksctl      initial-2025033005234453970000002c      ACTIVE  2025-03-30T05:23:47Z    2               10              2                       m5.large,m6a.large,m6i.large    AL2_x86_64      eks-initial-2025033005234453970000002c-f6caf2c0-c0a1-59fc-6729-fc06bc9a51d9     managed

eksctl get fargateprofile --cluster $CLUSTER_NAME
NAME            SELECTOR_NAMESPACE      SELECTOR_LABELS POD_EXECUTION_ROLE_ARN                                                  SUBNETS       TAGS                                                                                                                                     STATUS
fp-profile      assets                  <none>          arn:aws:iam::271345173787:role/fp-profile-2025032502301523520000001f    subnet-0aeb12f673d69f7c5,subnet-047ab61ad85c50486,subnet-01bbd11a892aec6ee     Blueprint=eksworkshop-eksctl,GithubRepo=github.com/aws-ia/terraform-aws-eks-blueprints,karpenter.sh/discovery=eksworkshop-eksctl       ACTIVE

eksctl get addon --cluster $CLUSTER_NAME
NAME                    VERSION                 STATUS  ISSUES  IAMROLE                                                                                         UPDATE AVAILABLECONFIGURATION VALUES
aws-ebs-csi-driver      v1.41.0-eksbuild.1      ACTIVE  0       arn:aws:iam::335393502037:role/eksworkshop-eksctl-ebs-csi-driver-2025033005230985480000001d
coredns                 v1.8.7-eksbuild.10      ACTIVE  0                                                                                                       v1.9.3-eksbuild.22,v1.9.3-eksbuild.21,v1.9.3-eksbuild.19,v1.9.3-eksbuild.17,v1.9.3-eksbuild.15,v1.9.3-eksbuild.11,v1.9.3-eksbuild.10,v1.9.3-eksbuild.9,v1.9.3-eksbuild.7,v1.9.3-eksbuild.6,v1.9.3-eksbuild.5,v1.9.3-eksbuild.3,v1.9.3-eksbuild.2
kube-proxy              v1.25.16-eksbuild.8     ACTIVE  0
vpc-cni                 v1.19.3-eksbuild.1      ACTIVE  0

#
kubectl get node --label-columns=eks.amazonaws.com/capacityType,node.kubernetes.io/lifecycle,karpenter.sh/capacity-type,eks.amazonaws.com/compute-type

kubectl get node -L eks.amazonaws.com/nodegroup,karpenter.sh/nodepool

kubectl get nodepools
k get NAME      NODECLASS
default   default

kubectl get nodeclaims -o yaml
kubectl get nodeclaims

kubectl get node --label-columns=node.kubernetes.io/instance-type,kubernetes.io/arch,kubernetes.io/os,topology.kubernetes.io/zone

kubectl cluster-info

kubectl get nodes -owide

 

#
kubectl get crd
NAME                                         CREATED AT
applications.argoproj.io                     2025-03-30T05:27:22Z
applicationsets.argoproj.io                  2025-03-30T05:27:22Z
appprojects.argoproj.io                      2025-03-30T05:27:22Z
cninodes.vpcresources.k8s.aws                2025-03-30T05:18:51Z
ec2nodeclasses.karpenter.k8s.aws             2025-03-30T05:27:48Z
eniconfigs.crd.k8s.amazonaws.com             2025-03-30T05:21:09Z
ingressclassparams.elbv2.k8s.aws             2025-03-30T05:27:48Z
nodeclaims.karpenter.sh                      2025-03-30T05:27:48Z
nodepools.karpenter.sh                       2025-03-30T05:27:48Z
policyendpoints.networking.k8s.aws           2025-03-30T05:18:51Z
securitygrouppolicies.vpcresources.k8s.aws   2025-03-30T05:18:51Z
targetgroupbindings.elbv2.k8s.aws            2025-03-30T05:27:48Z

helm list -A
NAME                            NAMESPACE       REVISION        UPDATED                                 STATUS          CHART                                   APP VERSION
argo-cd                         argocd          1               2025-03-30 05:27:20.014634881 +0000 UTC deployed        argo-cd-5.55.0                          v2.10.0    
aws-efs-csi-driver              kube-system     1               2025-03-30 05:27:49.222504269 +0000 UTC deployed        aws-efs-csi-driver-2.5.6                1.7.6      
aws-load-balancer-controller    kube-system     1               2025-03-30 05:27:50.87934255 +0000 UTC  deployed        aws-load-balancer-controller-1.7.1      v2.7.1     
karpenter                       karpenter       1               2025-03-30 05:27:48.374818264 +0000 UTC deployed        karpenter-0.37.0                        0.37.0     
metrics-server                  kube-system     1               2025-03-30 05:27:20.001466714 +0000 UTC deployed        metrics-server-3.12.0                   0.7.0     

kubectl get applications -n argocd
NAME        SYNC STATUS   HEALTH STATUS
apps        Synced        Healthy
assets      Synced        Healthy
carts       Synced        Healthy
catalog     Synced        Healthy
checkout    Synced        Healthy
karpenter   Synced        Healthy
orders      Synced        Healthy
other       Synced        Healthy
rabbitmq    Synced        Healthy
ui          OutOfSync     Healthy

#
kubectl get pod -A
NAMESPACE     NAME                                                        READY   STATUS    RESTARTS       AGE
argocd        argo-cd-argocd-application-controller-0                     1/1     Running   0             23h
argocd        argo-cd-argocd-applicationset-controller-74d9c9c5c7-zxm2x   1/1     Running   0             23h
argocd        argo-cd-argocd-dex-server-6dbbd57479-jj7kn                  1/1     Running   0             23h
argocd        argo-cd-argocd-notifications-controller-fb4b954d5-xr446     1/1     Running   0             23h
argocd        argo-cd-argocd-redis-76b4c599dc-j7xmr                       1/1     Running   0             23h
argocd        argo-cd-argocd-repo-server-6b777b579d-w7pjp                 1/1     Running   0             23h
argocd        argo-cd-argocd-server-86bdbd7b89-wkkr2                      1/1     Running   0             23h
assets        assets-7ccc84cb4d-dwtnx                                     1/1     Running   0             23h
carts         carts-7ddbc698d8-p2xpl                                      1/1     Running   0             23h
carts         carts-dynamodb-6594f86bb9-2r8p5                             1/1     Running   0             23h
catalog       catalog-857f89d57d-z86t4                                    1/1     Running   3 (23h ago)   23h
catalog       catalog-mysql-0                                             1/1     Running   0             23h
checkout      checkout-558f7777c-b7pjm                                    1/1     Running   0             23h
checkout      checkout-redis-f54bf7cb5-nkhhn                              1/1     Running   0             23h
karpenter     karpenter-67cff98f65-lwssg                                  1/1     Running   1 (23h ago)   23h
karpenter     karpenter-67cff98f65-n4vzc                                  1/1     Running   1 (23h ago)   23h
kube-system   aws-load-balancer-controller-669694d6f9-6rkvn               1/1     Running   0             23h
kube-system   aws-load-balancer-controller-669694d6f9-8rvgr               1/1     Running   0             23h
kube-system   aws-node-6znn9                                              2/2     Running   0             23h
kube-system   aws-node-ck6ch                                              2/2     Running   0             23h
kube-system   aws-node-cl8jm                                              2/2     Running   0             23h
kube-system   aws-node-gk9mt                                              2/2     Running   0             23h
kube-system   aws-node-pqfph                                              2/2     Running   0             23h
kube-system   aws-node-rprqz                                              2/2     Running   0             23h
kube-system   coredns-98f76fbc4-hgmm6                                     1/1     Running   0             23h
kube-system   coredns-98f76fbc4-lrqk2                                     1/1     Running   0             23h
kube-system   ebs-csi-controller-6b575b5f4d-69g7f                         6/6     Running   0             23h
kube-system   ebs-csi-controller-6b575b5f4d-ql2lv                         6/6     Running   0             23h
kube-system   ebs-csi-node-5fgcf                                          3/3     Running   0             23h
kube-system   ebs-csi-node-5xqsz                                          3/3     Running   0             23h
kube-system   ebs-csi-node-kc9vk                                          3/3     Running   0             23h
kube-system   ebs-csi-node-mw7n6                                          3/3     Running   0             23h
kube-system   ebs-csi-node-qg9h4                                          3/3     Running   0             23h
kube-system   ebs-csi-node-rj77x                                          3/3     Running   0             23h
kube-system   efs-csi-controller-5d74ddd947-6nfjv                         3/3     Running   0             23h
kube-system   efs-csi-controller-5d74ddd947-r2kbc                         3/3     Running   0             23h
kube-system   efs-csi-node-4px46                                          3/3     Running   0             23h
kube-system   efs-csi-node-9zrd6                                          3/3     Running   0             23h
kube-system   efs-csi-node-crvq4                                          3/3     Running   0             23h
kube-system   efs-csi-node-fpdq2                                          3/3     Running   0             23h
kube-system   efs-csi-node-t74nj                                          3/3     Running   0             23h
kube-system   efs-csi-node-tr8lt                                          3/3     Running   0             23h
kube-system   kube-proxy-66tbg                                            1/1     Running   0             23h
kube-system   kube-proxy-82t2q                                            1/1     Running   0             23h
kube-system   kube-proxy-8jvnt                                            1/1     Running   0             23h
kube-system   kube-proxy-ffm4t                                            1/1     Running   0             23h
kube-system   kube-proxy-gtl2g                                            1/1     Running   0             23h
kube-system   kube-proxy-qnb26                                            1/1     Running   0             23h
kube-system   metrics-server-785cd745cd-lw7f4                             1/1     Running   0             23h
orders        orders-5b97745747-ckmpb                                     1/1     Running   2 (23h ago)   23h
orders        orders-mysql-b9b997d9d-87wbj                                1/1     Running   0             23h
rabbitmq      rabbitmq-0                                                  1/1     Running   0             23h
ui            ui-5dfb7d65fc-ph495                                         1/1     Running   0             23h

#
kubectl get pdb -A
NAMESPACE     NAME                           MIN AVAILABLE   MAX UNAVAILABLE   ALLOWED DISRUPTIONS   AGE
karpenter     karpenter                      N/A             1                 1                     145m
kube-system   aws-load-balancer-controller   N/A             1                 1                     145m
kube-system   coredns                        N/A             1                 1                     153m
kube-system   ebs-csi-controller             N/A             1                 1                     145m

#
kubectl get svc -n argocd argo-cd-argocd-server
NAME                    TYPE           CLUSTER-IP      EXTERNAL-IP                                                                   PORT(S)                      AGE
argo-cd-argocd-server   LoadBalancer   172.20.10.210   k8s-argocd-argocdar-eb7166e616-ed2069d8c15177c9.elb.us-west-2.amazonaws.com   80:32065/TCP,443:31156/TCP   150m

ec2-user:~/environment:$ kubectl get targetgroupbindings -n argocd
NAME                             SERVICE-NAME            SERVICE-PORT   TARGET-TYPE   AGE
k8s-argocd-argocdar-52112fd4f3   argo-cd-argocd-server   443            ip            23h
k8s-argocd-argocdar-52f5e79a85   argo-cd-argocd-server   80             ip            23h

 

실습 편리 설정 : kube-ops-view , krew , eks-node-view

# kube-ops-view
helm repo add geek-cookbook https://geek-cookbook.github.io/charts/
helm repo update
helm install kube-ops-view geek-cookbook/kube-ops-view --version 1.2.2 --namespace kube-system

#
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
  annotations:
    service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
    service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
    service.beta.kubernetes.io/aws-load-balancer-type: external
  labels:
    app.kubernetes.io/instance: kube-ops-view
    app.kubernetes.io/name: kube-ops-view
  name: kube-ops-view-nlb
  namespace: kube-system
spec:
  type: LoadBalancer
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
  selector:
    app.kubernetes.io/instance: kube-ops-view
    app.kubernetes.io/name: kube-ops-view
EOF


# kube-ops-view 접속 URL 확인 (1.5, 1.3 배율)
kubectl get svc -n kube-system kube-ops-view-nlb -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' | awk '{ print "KUBE-OPS-VIEW URL = http://"$1"/#scale=1.5"}'
kubectl get svc -n kube-system kube-ops-view-nlb -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' | awk '{ print "KUBE-OPS-VIEW URL = http://"$1"/#scale=1.3"}'

URL : http://k8s-kubesyst-kubeopsv-b40ca8445f-87386b268d1541c9.elb.us-west-2.amazonaws.com/#scale=1.5



# 설치
(
  set -x; cd "$(mktemp -d)" &&
  OS="$(uname | tr '[:upper:]' '[:lower:]')" &&
  ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')" &&
  KREW="krew-${OS}_${ARCH}" &&
  curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/latest/download/${KREW}.tar.gz" &&
  tar zxvf "${KREW}.tar.gz" &&
  ./"${KREW}" install krew
)

# PATH
export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH"
vi ~/.bashrc
-----------
export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH"
-----------

ec2-user:~/environment:$ . ~/.bashrc 
Updated context arn:aws:eks:us-west-2:335393502037:cluster/eksworkshop-eksctl in /home/ec2-user/.kube/config


# 플러그인 설치
kubectl krew install ctx ns df-pv get-all neat stern oomd whoami rbac-tool rolesum
kubectl krew list

PLUGIN     VERSION
ctx        v0.9.5
df-pv      v0.3.0
get-all    v1.3.8
krew       v0.4.5
neat       v2.0.4
ns         v0.9.5
oomd       v0.0.7
rbac-tool  v1.20.0
rolesum    v1.5.5
stern      v1.32.0
whoami     v0.0.46

#
kubectl df-pv
 PV NAME                                   PVC NAME            NAMESPACE  NODE NAME                                  POD NAME                        VOLUME MOUNT NAME  SIZE  USED   AVAILABLE  %USED  IUSED  IFREE   %IUSED 
 pvc-f70e3d82-cee3-47ab-a9a6-bbd8ae4adb97  order-mysql-pvc     orders     ip-10-0-10-123.us-west-2.compute.internal  orders-mysql-b9b997d9d-87wbj    data               3Gi   201Mi  3Gi        5.09   199    261945  0.08   
 pvc-b9c2d769-4f4e-488c-8d9b-c9fb97241f8b  catalog-mysql-pvc   catalog    ip-10-0-8-126.us-west-2.compute.internal   catalog-mysql-0                 data               3Gi   201Mi  3Gi        5.09   201    261943  0.08   
 pvc-3ca41e94-601e-4194-ae26-b52a728c0981  checkout-redis-pvc  checkout   ip-10-0-7-203.us-west-2.compute.internal   checkout-redis-f54bf7cb5-nkhhn  data               3Gi   24Ki   3Gi        0.00   11     262133  0.00   

#
kubectl whoami --all
User:   admin
Groups:
        system:masters
        system:authenticated
ARN:
        arn:aws:sts::335393502037:assumed-role/workshop-stack-IdeIdeRoleD654ADD4-RWOFFYCPb8vk/i-0fb63d774a16f66b5

wget -O eks-node-viewer https://github.com/awslabs/eks-node-viewer/releases/download/v0.7.1/eks-node-viewer_Linux_x86_64 chmod +x eks-node-viewer sudo mv -v eks-node-viewer /usr/local/bin

 

실습을 위해 배포한 테라폼 확인

# 파일 확인
ec2-user:~/environment:$ ls -lrt terraform/
total 40
-rw-r--r--. 1 ec2-user ec2-user  625 Aug 27  2024 vpc.tf
-rw-r--r--. 1 ec2-user ec2-user  542 Aug 27  2024 versions.tf
-rw-r--r--. 1 ec2-user ec2-user  353 Aug 27  2024 variables.tf
-rw-r--r--. 1 ec2-user ec2-user  282 Aug 27  2024 outputs.tf
-rw-r--r--. 1 ec2-user ec2-user 1291 Aug 27  2024 gitops-setup.tf
-rw-r--r--. 1 ec2-user ec2-user 4512 Feb  5 01:15 base.tf
-rw-r--r--. 1 ec2-user ec2-user 5953 Feb 13 19:55 addons.tf
-rw-r--r--. 1 ec2-user ec2-user   33 Mar 25 02:26 backend_override.tf



# 해당 경로에 tfstate 파일은 없다.
ec2-user:~/environment:$ terraform output

│ Warning: No outputs found

#
aws s3 ls
2025-03-28 11:56:25 workshop-stack-tfstatebackendbucketf0fc9a9d-2phddn8usnlw

aws s3 ls s3://workshop-stack-tfstatebackendbucketf0fc9a9d-2phddn8usnlw
2025-03-28 12:18:23     940996 terraform.tfstate

# (옵션) terraform.tfstate 복사 후 IDE-Server 에서 terraform.tfstate 열어보기
aws s3 cp s3://workshop-stack-tfstatebackendbucketf0fc9a9d-2phddn8usnlw/terraform.tfstate .



# backend_override.tf 수정
terraform { 
  backend "s3" {
    bucket = "workshop-stack-tfstatebackendbucketf0fc9a9d-2phddn8usnlw"
    region = "us-west-2"
    key    = "terraform.tfstate"
  }
}

# 확인
terraform state list
terraform output
configure_kubectl = "aws eks --region us-west-2 update-kubeconfig --name eksworkshop-eksctl"

 

application : UI Service(NLB) 설정 - ArgoCD 

#
cd ~/environment
git clone codecommit::${REGION}://eks-gitops-repo

#
sudo yum install tree -y
tree eks-gitops-repo/ -L 2
eks-gitops-repo/
├── app-of-apps
│   ├── Chart.yaml
│   ├── templates
│   └── values.yaml
└── apps
    ├── assets
    ├── carts
    ├── catalog
    ├── checkout
    ├── karpenter
    ├── kustomization.yaml
    ├── orders
    ├── other
    ├── rabbitmq
    └── ui

# Login to ArgoCD Console using credentials from following commands:
export ARGOCD_SERVER=$(kubectl get svc argo-cd-argocd-server -n argocd -o json | jq --raw-output '.status.loadBalancer.ingress[0].hostname')
echo "ArgoCD URL: http://${ARGOCD_SERVER}"
k8s-argocd-argocdar-623c67e87f-cb657c014d86670e.elb.us-west-2.amazonaws.comhttp://k8s-argocd-argocdar-01634fea43-3cdeb4d8a7e05ff9.elb.us-west-2.amazonaws.com

export ARGOCD_USER="admin"
export ARGOCD_PWD=$(kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d)
echo "Username: ${ARGOCD_USER}"
echo "Password: ${ARGOCD_PWD}"
Username: admin
Password: YVFM2RNIiMJEbZCF

 

ArgoCD CLI 확인
#
argocd login ${ARGOCD_SERVER} --username ${ARGOCD_USER} --password ${ARGOCD_PWD} --insecure --skip-test-tls --grpc-web
'admin:login' logged in successfully
Context 'k8s-argocd-argocdar-623c67e87f-cb657c014d86670e.elb.us-west-2.amazonaws.com' updated

#
argocd repo list
TYPE  NAME  REPO                                                                     INSECURE  OCI    LFS    CREDS  STATUS      MESSAGE  PROJECT
git         https://git-codecommit.us-west-2.amazonaws.com/v1/repos/eks-gitops-repo false     false  false  true   Successful   

#
argocd app list
argocd/apps       https://kubernetes.default.svc default  Synced     Healthy  Auto        <none>      https://git-codecommit.us-west-2.amazonaws.com/v1/repos/eks-gitops-repo app-of-apps     
argocd/assets     https://kubernetes.default.svc default  Synced     Healthy  Auto-Prune  <none>      https://git-codecommit.us-west-2.amazonaws.com/v1/repos/eks-gitops-repo apps/assets     main
argocd/carts      https://kubernetes.default.svc default  Synced     Healthy  Auto-Prune  <none>      https://git-codecommit.us-west-2.amazonaws.com/v1/repos/eks-gitops-repo apps/carts      main
argocd/catalog    https://kubernetes.default.svc default  Synced     Healthy  Auto-Prune  <none>      https://git-codecommit.us-west-2.amazonaws.com/v1/repos/eks-gitops-repo apps/catalog    main
argocd/checkout   https://kubernetes.default.svc default  Synced     Healthy  Auto-Prune  <none>      https://git-codecommit.us-west-2.amazonaws.com/v1/repos/eks-gitops-repo apps/checkout   main
argocd/karpenter  https://kubernetes.default.svc default  Synced     Healthy  Auto-Prune  <none>      https://git-codecommit.us-west-2.amazonaws.com/v1/repos/eks-gitops-repo apps/karpenter  main
argocd/orders     https://kubernetes.default.svc default  Synced     Healthy  Auto-Prune  <none>      https://git-codecommit.us-west-2.amazonaws.com/v1/repos/eks-gitops-repo apps/orders     main
argocd/other      https://kubernetes.default.svc default  Synced     Healthy  Auto-Prune  <none>      https://git-codecommit.us-west-2.amazonaws.com/v1/repos/eks-gitops-repo apps/other      main
argocd/rabbitmq   https://kubernetes.default.svc default  Synced     Healthy  Auto-Prune  <none>      https://git-codecommit.us-west-2.amazonaws.com/v1/repos/eks-gitops-repo apps/rabbitmq   main
argocd/ui         https://kubernetes.default.svc default  OutOfSync  Healthy  Auto-Prune  <none>      https://git-codecommit.us-west-2.amazonaws.com/v1/repos/eks-gitops-repo apps/ui         main

argocd app get apps
Name:               argocd/apps
Project:            default
Server:             https://kubernetes.default.svc
Namespace:          
URL:                https://k8s-argocd-argocdar-eb7166e616-ed2069d8c15177c9.elb.us-west-2.amazonaws.com/applications/apps
Source:
- Repo:             https://git-codecommit.us-west-2.amazonaws.com/v1/repos/eks-gitops-repo
  Target:           
  Path:             app-of-apps
SyncWindow:         Sync Allowed
Sync Policy:        Automated
Sync Status:        Synced to  (acc257a)
Health Status:      Healthy

GROUP        KIND         NAMESPACE  NAME       STATUS  HEALTH  HOOK  MESSAGE
argoproj.io  Application  argocd     karpenter  Synced                application.argoproj.io/karpenter created
argoproj.io  Application  argocd     carts      Synced                application.argoproj.io/carts created
argoproj.io  Application  argocd     assets     Synced                application.argoproj.io/assets created
argoproj.io  Application  argocd     catalog    Synced                application.argoproj.io/catalog created
argoproj.io  Application  argocd     checkout   Synced                application.argoproj.io/checkout created
argoproj.io  Application  argocd     rabbitmq   Synced                application.argoproj.io/rabbitmq created
argoproj.io  Application  argocd     other      Synced                application.argoproj.io/other created
argoproj.io  Application  argocd     ui         Synced                application.argoproj.io/ui created
argoproj.io  Application  argocd     orders     Synced                application.argoproj.io/orders created

argocd app get carts
Name:               argocd/carts
Project:            default
Server:             https://kubernetes.default.svc
Namespace:          
URL:                https://k8s-argocd-argocdar-eb7166e616-ed2069d8c15177c9.elb.us-west-2.amazonaws.com/applications/carts
Source:
- Repo:             https://git-codecommit.us-west-2.amazonaws.com/v1/repos/eks-gitops-repo
  Target:           main
  Path:             apps/carts
SyncWindow:         Sync Allowed
Sync Policy:        Automated (Prune)
Sync Status:        Synced to main (acc257a)
Health Status:      Healthy

GROUP  KIND            NAMESPACE  NAME            STATUS  HEALTH   HOOK  MESSAGE
       Namespace                  carts           Synced                 namespace/carts created
       ServiceAccount  carts      carts           Synced                 serviceaccount/carts created
       ConfigMap       carts      carts           Synced                 configmap/carts created
       Service         carts      carts-dynamodb  Synced  Healthy        service/carts-dynamodb created
       Service         carts      carts           Synced  Healthy        service/carts created
apps   Deployment      carts      carts           Synced  Healthy        deployment.apps/carts created
apps   Deployment      carts      carts-dynamodb  Synced  Healthy        deployment.apps/carts-dynamodb created

#
argocd app get carts -o yaml
...
spec:
  destination:
    server: https://kubernetes.default.svc
  ignoreDifferences:
  - group: apps
    jsonPointers:
    - /spec/replicas
    - /metadata/annotations/deployment.kubernetes.io/revision
    kind: Deployment
  - group: autoscaling
    jsonPointers:
    - /status
    kind: HorizontalPodAutoscaler
  project: default
  source:
    path: apps/carts
    repoURL: https://git-codecommit.us-west-2.amazonaws.com/v1/repos/eks-gitops-repo
    targetRevision: main
  syncPolicy:
    automated:
      prune: true
      selfHeal: true
    syncOptions:
    - RespectIgnoreDifferences=true
...

argocd app get ui -o yaml
...
spec:
  destination:
    server: https://kubernetes.default.svc
  ignoreDifferences:
  - group: apps
    jsonPointers:
    - /spec/replicas
    - /metadata/annotations/deployment.kubernetes.io/revision
    kind: Deployment
  - group: autoscaling
    jsonPointers:
    - /status
    kind: HorizontalPodAutoscaler
  project: default
  source:
    path: apps/ui
    repoURL: https://git-codecommit.us-west-2.amazonaws.com/v1/repos/eks-gitops-repo
    targetRevision: main
  syncPolicy:
    automated:
      prune: true
      selfHeal: true
    syncOptions:
    - RespectIgnoreDifferences=true
...

#cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
  annotations:
    service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
    service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
    service.beta.kubernetes.io/aws-load-balancer-type: external
  labels:
    app.kubernetes.io/instance: ui
    app.kubernetes.io/name: ui
  name: ui-nlb
  namespace: ui
spec:
  type: LoadBalancer
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
  selector:
    app.kubernetes.io/instance: ui
    app.kubernetes.io/name: ui
EOF


# UI 접속 URL 확인 (1.5, 1.3 배율)
kubectl get svc -n ui ui-nlb -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' | awk '{ print "UI URL = http://"$1""}'

 


Preparing for Cluster Upgrades : 업그레이드 전 준비

클러스터 업그레이드를 시작하기 전에 다음 요구 사항을 확인하세요.
 
Amazon EKS에서는 클러스터를 생성할 때 지정한 서브넷에서 사용 가능한 IP 주소를 최대 5개까지 필요로 합니다 .

클러스터의 AWS Identity and Access Management(IAM) 역할과 보안 그룹 이 AWS 계정에 있어야 합니다 .
 
비밀 암호화를 활성화하는 경우 클러스터 IAM 역할에 AWS Key Management Service(AWS KMS) 키 권한이 있어야 합니다 .
 
업그레이드 워크플로
 
Amazon EKS 및 Kubernetes 버전에 대한 주요 업데이트 식별 Identify
 
사용 중단 정책 이해 및 적절한 매니페스트 리팩토링 Understand , Refactor
 
올바른 업그레이드 전략을 사용하여 EKS 제어 평면 데이터 평면 업데이트 Update
 
마지막으로 다운스트림 애드온 종속성 업그레이드
+--------------------------------------+
|        Start Upgrade Process         |
+--------------------------------------+
                    |
                    |
+--------------------------------------+
| Identify Major Updates for Amazon    |
|      EKS and Kubernetes Versions     |
+--------------------------------------+
                    |
                    |
+--------------------------------------+
| Understand Deprecation Policy and    |
| Refactor Manifests Accordingly       |
+--------------------------------------+
                    |
                    |
+--------------------------------------+
| Update EKS Control Plane and Data    |
| Plane Using Right Upgrade Strategy   |
+--------------------------------------+
                    |
                    |
+--------------------------------------+
| Upgrade Downstream Add-on            |
|           Dependencies               |
+--------------------------------------+
                    |
                    |
+--------------------------------------+
|        Upgrade Completed             |
+--------------------------------------+

 

EKS Upgrade Insights

 - 버전 별 API 호환성을 알려준다. (업그레이드 후 최대 1일 동안 동기화가 안될 경우가 있다)
 - 2단계 이상 업그레이드 시 새로고침이 발생할때까지 기다린 후 후속업그레이드를 진행한다.
 - aws eks list-insights --filter kubernetesVersions=1.26 --cluster-name $CLUSTER_NAME | jq .
 - aws eks describe-insight --region $AWS_REGION --id 로 상세 정보 확인 가능

 

ADD-ON 업그레이드

 - 클러스터 버전이 업그레이드 될 경우 ADD-ON 버전 업그레이드가 필요하다.

일반적인 추가 기능의 다음 예제와 관련 업그레이드 설명서를 참조하세요.

 

Control-Plan 업그레이드 전 기본요구사항

  • EKS ControlPlane 업그레이드 전 기본 요구 사항 확인
    AWS는 업그레이드 프로세스를 완료하기 위해 계정의 특정 리소스를 요구합니다. 이러한 리소스가 없으면 클러스터를 업그레이드할 수 없습니다. 제어 평면 업그레이드에는 다음 리소스가 필요합니다.
    사용 가능한 IP 주소 확인
    • Amazon EKS 클러스터를 업그레이드하려면 원래 클러스터 생성 시 지정한 서브넷 내에 최소 5개의 여유 IP 주소가 필요합니다. 서브넷에 충분한 여유 IP 주소가 있는지 확인하려면 아래와 같이 특정 명령을 실행할 수 있습니다.
    새로운 서브넷 요구 사항
    • 가용성 영역(AZ) : 새 서브넷은 클러스터 생성 중에 원래 선택한 동일한 AZ 집합 내에 있어야 합니다.
    • VPC 멤버십 : 새 서브넷은 클러스터와 연결된 동일한 VPC에 속해야 합니다.
    IP 풀 확장(선택 사항)
    • 서브넷 업데이트 후에도 충분한 IP가 없다면 추가 CIDR 블록을 VPC에 연결하는 것을 고려하세요. 이렇게 하면 기본적으로 사용 가능한 IP 주소 풀이 늘어납니다. 할 수 있는 일은 다음과 같습니다.
      • 개인 CIDR 블록 추가 : RFC 1918을 준수하여 새로운 개인 IP 범위를 도입하여 IP 풀을 확장할 수 있습니다.
      • 새로운 CIDR 기반 서브넷 업데이트 : VPC 내에서 새로 구성된 CIDR 블록을 반영하도록 클러스터 서브넷을 업데이트합니다.
    • 이러한 지침을 따르면 EKS 클러스터가 원활한 Kubernetes 버전 업그레이드를 수행하는 데 필요한 IP 리소스를 확보할 수 있습니다.
    EKS IAM Role 확인
    IAM 역할을 사용할 수 있는지, 그리고 계정에 올바른 assume role policy이 있는지 확인하려면 다음 명령을 실행할 수 있습니다.
    EKS Security Groups: What You Need to Know
    • 클러스터를 만들면 Amazon EKS는 eks-cluster-sg-my-cluster-uniqueID라는 이름의 보안 그룹을 만듭니다.
    • 이 보안 그룹에는 다음과 같은 기본 규칙이 있습니다.
    Amazon EKS 보안 그룹: 연결 및 규칙
    Amazon EKS가 보안 그룹을 활용하는 방식에 대한 세부 내용은 다음과 같습니다.
    1. 자동 태그
      : EKS는 클러스터에 대해 만든 보안 그룹에 특정 태그를 주입합니다. 이러한 태그는 작동에 필수적이며 제거하면 다시 추가됩니다.
    2. 리소스 연결
      : 이 보안 그룹은 여러 리소스에 자동으로 연결됩니다.
      • 네트워크 인터페이스(ENI): 클러스터를 프로비저닝하면 2-4개의 ENI가 생성됩니다. 이러한 ENI는 EKS에서 생성한 보안 그룹과도 연관됩니다.
      • 관리형 노드 그룹 ENI: 사용자가 생성하는 모든 관리형 노드 그룹의 ENI도 이 보안 그룹에 연결됩니다.
    3. 기본 보안 규칙
      : 처음에 보안 그룹은 제한 없는 통신을 허용합니다.
      • 인바운드 트래픽: 모든 트래픽은 클러스터 제어 평면과 노드 간에 자유롭게 흐를 수 있습니다.
      • 아웃바운드 트래픽: 노드는 모든 목적지로 트래픽을 보낼 수 있습니다.
    4. 사용자 지정 보안 그룹(선택 사항)
      : 클러스터 생성 중에 선택적으로 자체 보안 그룹을 지정할 수 있습니다. 그렇게 하는 경우:
      • ENI 연결: EKS는 이러한 사용자 정의 그룹을 클러스터의 ENI와 연결합니다.
      • 노드 그룹 제한: 그러나 이러한 사용자 지정 그룹은 귀하가 만든 노드 그룹에는 적용되지 않습니다. 이러한 연결을 별도로 관리해야 합니다.
    기본적으로 EKS는 광범위한 권한을 가진 기본 보안 그룹을 제공하지만, 사용자가 사용자 정의 그룹을 사용하여 더 엄격한 보안 정책을 구현할 수 있도록 합니다.

    EKS 클러스터의 보안 강화
    EKS 클러스터 내에서 트래픽 흐름을 제한하고 싶으신가요? 알아야 할 사항은 다음과 같습니다.
    1. 기본 규칙은 개방적입니다. 기본적으로 EKS는 클러스터 구성 요소 간의 무제한 통신을 허용합니다.
    2. 아웃바운드 트래픽 사용자 지정: 열려 있는 포트를 제한하려면 기본 아웃바운드 규칙을 제거하고 다음 최소 요구 사항을 구현합니다.
      • 노드 간 통신: 노드 간 통신에 사용하는 모든 프로토콜과 포트에 대한 규칙을 정의합니다.
    3. 인바운드 규칙 지속성: EKS는 업데이트 중에 기본 인바운드 규칙을 제거한 후에 해당 규칙을 자동으로 다시 생성합니다.
    추가 고려 사항
    1. 아웃바운드 인터넷 액세스(선택 사항)
      : 노드에 인터넷 액세스가 필요한 경우(예: EKS API 호출 또는 초기 등록) 특정 포트에 대한 아웃바운드 규칙을 구성합니다. 개인 클러스터의 경우 인터넷 액세스가 필요하지 않을 수 있습니다.
    2. 컨테이너 이미지 액세스
      : 노드는 이미지를 가져오기 위해 컨테이너 레지스트리(예: Amazon ECR 또는 DockerHub)에 액세스해야 합니다. 적절한 레지스트리 엔드포인트에 대한 규칙을 만듭니다.
    3. IPv4/IPv6에 대한 별도 규칙
      : VPC가 두 주소 패밀리를 모두 사용하는 경우 각각에 대해 별도의 규칙이 필요합니다.
    테스트는 매우 중요합니다. 프로덕션 클러스터에 변경 사항을 배포하기 전에 모든 Pod를 철저히 테스트하여 새로운 보안 규칙에 따라 올바르게 작동하는지 확인하세요.

In-place 와 Blue-Green 업그레이드 전략 중 선택

  1. 가동 중지 허용 범위 : 업그레이드 프로세스 중에 애플리케이션과 서비스에 대한 허용 가능한 가동 중지 시간 수준을 고려하세요.
  2. 복잡성 업그레이드 : 애플리케이션 아키텍처, 종속성 및 상태 구성 요소의 복잡성을 평가합니다.
  3. Kubernetes 버전 차이 : 현재 Kubernetes 버전과 대상 버전 간의 차이와 애플리케이션 및 애드온의 호환성을 평가합니다.
  4. 리소스 제약 : 업그레이드 프로세스 중에 여러 클러스터를 유지하기 위한 사용 가능한 인프라 리소스와 예산을 고려하세요. 블루/그린과 유사한 카나리아 전략은 워크로드를 늘리는 동안 이전 클러스터를 확장하는 동안 새 클러스터를 확장하는 것을 제외하고는 이를 최소화합니다.
  5. 팀 전문성 : 여러 클러스터를 관리하고 트래픽 전환 전략을 구현하는 데 대한 팀의 전문성과 익숙함을 평가합니다.

 

업그레이드작업

 - 테라폼으로 작성되어 있으므로 terraform 을 수정하여 진행한다.

# 현재 버전 확인 : 파일로 저장해두기
kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].image}" | tr -s '[[:space:]]' '\n' | sort | uniq -c > 1.25.txt
kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].image}" | tr -s '[[:space:]]' '\n' | sort | uniq -c
      6 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:v1.19.3-eksbuild.1
      6 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon/aws-network-policy-agent:v1.2.0-eksbuild.1
      8 602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/aws-ebs-csi-driver:v1.41.0
      2 602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/coredns:v1.8.7-eksbuild.10
      2 602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/csi-attacher:v4.8.1-eks-1-32-7
      6 602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/csi-node-driver-registrar:v2.13.0-eks-1-32-7
      2 602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/csi-provisioner:v5.2.0-eks-1-32-7
      2 602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/csi-resizer:v1.13.2-eks-1-32-7
      2 602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/csi-snapshotter:v8.2.1-eks-1-32-7
      6 602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/kube-proxy:v1.25.16-minimal-eksbuild.8
      8 602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/livenessprobe:v2.14.0-eks-1-32-7
      8 amazon/aws-efs-csi-driver:v1.7.6
      1 amazon/dynamodb-local:1.13.1
      1 ghcr.io/dexidp/dex:v2.38.0
      1 hjacobs/kube-ops-view:20.4.0
      1 public.ecr.aws/aws-containers/retail-store-sample-assets:0.4.0
      1 public.ecr.aws/aws-containers/retail-store-sample-cart:0.7.0
      1 public.ecr.aws/aws-containers/retail-store-sample-catalog:0.4.0
      1 public.ecr.aws/aws-containers/retail-store-sample-checkout:0.4.0
      1 public.ecr.aws/aws-containers/retail-store-sample-orders:0.4.0
      1 public.ecr.aws/aws-containers/retail-store-sample-ui:0.4.0
      1 public.ecr.aws/bitnami/rabbitmq:3.11.1-debian-11-r0
      2 public.ecr.aws/docker/library/mysql:8.0
      1 public.ecr.aws/docker/library/redis:6.0-alpine
      1 public.ecr.aws/docker/library/redis:7.0.15-alpine
      2 public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner:v3.6.3-eks-1-29-2
      8 public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe:v2.11.0-eks-1-29-2
      6 public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar:v2.9.3-eks-1-29-2
      2 public.ecr.aws/eks/aws-load-balancer-controller:v2.7.1
      2 public.ecr.aws/karpenter/controller:0.37.0@sha256:157f478f5db1fe999f5e2d27badcc742bf51cc470508b3cebe78224d0947674f
      5 quay.io/argoproj/argocd:v2.10.0
      1 registry.k8s.io/metrics-server/metrics-server:v0.7.0


# IDE-Server 혹은 자신의 PC에서 반복 접속 해두자!
export UI_WEB=$(kubectl get svc -n ui ui-nlb -o jsonpath='{.status.loadBalancer.ingress[0].hostname}'/actuator/health/liveness)
curl -s $UI_WEB ; echo
{"status":"UP"}

# 반복 접속 1
UI_WEB=k8s-ui-uinlb-d75345d621-e2b7d1ff5cf09378.elb.us-west-2.amazonaws.com/actuator/health/liveness
while true; do curl -s $UI_WEB ; date; sleep 1; echo; done

# 반복 접속 2 : aws cli 자격증명 설정 필요
aws eks describe-cluster --name $EKS_CLUSTER_NAME | egrep 'version|endpoint"|issuer|platformVersion'
        "version": "1.25",
        "endpoint": "https://A77BDC5EEBAE5EC887F1747B6AE965B3.gr7.us-west-2.eks.amazonaws.com",
                "issuer": "https://oidc.eks.us-west-2.amazonaws.com/id/A77BDC5EEBAE5EC887F1747B6AE965B3"
        "platformVersion": "eks.44",

# 반복 접속 2
while true; do curl -s $UI_WEB; date; aws eks describe-cluster --name eksworkshop-eksctl | egrep 'version|endpoint"|issuer|platformVersion'; echo ; sleep 2; echo; don

 

1.26으로 변경





terraform plan

terraform plan -no-color > plan-output.txt



terraform apply -auto-approve 진행

 

Control Plane Upgrade 후 상태 확인





연결 특이사항없음 (ngrinder 나 부하Tool 사용하는 방법도 있다)


 

[2. 애드온] Upgrading EKS Addons

CoreDNS (https://docs.aws.amazon.com/eks/latest/userguide/managing-coredns.html)

kube-proxy (https://docs.aws.amazon.com/eks/latest/userguide/managing-kube-proxy.html)

VPC CNI (https://docs.aws.amazon.com/eks/latest/userguide/managing-vpc-cni.html)



eksctl get addon --cluster $CLUSTER_NAME

kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].image}" | tr -s '[[:space:]]' '\n' | sort | uniq -c

aws eks describe-addon-versions --addon-name coredns --kubernetes-version 1.26 --output table \ --query "addons[].addonVersions[:10].{Version:addonVersion,DefaultVersion:compatibilities[0].defaultVersion}"

aws eks describe-addon-versions --addon-name kube-proxy --kubernetes-version 1.26 --output table \ --query "addons[].addonVersions[:10].{Version:addonVersion,DefaultVersion:compatibilities[0].defaultVersion}"

값을 조회된 최신 값으로 변경 


terraform plan -no-color | tee addon.txt
  # module.eks_blueprints_addons.aws_eks_addon.this["coredns"] will be updated in-place
  ~ resource "aws_eks_addon" "this" {
      ~ addon_version               = "v1.8.7-eksbuild.10" -> "v1.9.3-eksbuild.22"
        id                          = "eksworkshop-eksctl:coredns"
        tags                        = {
            "Blueprint"  = "eksworkshop-eksctl"
            "GithubRepo" = "github.com/aws-ia/terraform-aws-eks-blueprints"
        }
        # (11 unchanged attributes hidden)

        # (1 unchanged block hidden)
    }

  # module.eks_blueprints_addons.aws_eks_addon.this["kube-proxy"] will be updated in-place
  ~ resource "aws_eks_addon" "this" {
      ~ addon_version               = "v1.25.16-eksbuild.8" -> "v1.26.15-eksbuild.24"
        id                          = "eksworkshop-eksctl:kube-proxy"
        tags                        = {
            "Blueprint"  = "eksworkshop-eksctl"
            "GithubRepo" = "github.com/aws-ia/terraform-aws-eks-blueprints"
        }
        # (11 unchanged attributes hidden)

        # (1 unchanged block hidden)
    }


 

 

[3.1 데이터부 - 관리형노드그룹] Upgrading EKS Managed Node groups


Terraform , 해당 방법으로 실제 업그레이드 실행!
#
cat base.tf
...
  eks_managed_node_group_defaults = {
    cluster_version = var.mng_cluster_version
  }

  eks_managed_node_groups = { # 버전 명시가 없으면, 상단 default 버전 사용 -> variables.tf 확인
    initial = {
      instance_types = ["m5.large", "m6a.large", "m6i.large"]
      min_size     = 2
      max_size     = 10
      desired_size = 2
      update_config = {
        max_unavailable_percentage = 35
      }
    }
    
    blue-mng={
      instance_types = ["m5.large", "m6a.large", "m6i.large"]
      cluster_version = "1.25"
      min_size     = 1
      max_size     = 2
      desired_size = 1
      update_config = {
        max_unavailable_percentage = 35
      }
      labels = {
        type = "OrdersMNG"
      }
      subnet_ids = [module.vpc.private_subnets[0]]
      taints = [
        {
          key    = "dedicated"
          value  = "OrdersApp"
          effect = "NO_SCHEDULE"
        }
      ]
    }

kubernetes 버전 1.25의 ami_id를 가져오고 variable.tf 파일의 변수 ami_id를 결과 값으로 대체

aws ssm get-parameter --name /aws/service/eks/optimized-ami/1.25/amazon-linux-2/recommended/image_id \
  --region $AWS_REGION --query "Parameter.Value" --output text
ami-0078a0f78fafda978


terraform plan
terraform apply -auto-approve

custom 된 AMI로도 구성이 가능하다.



초기 관리 노드그룹에는 정의된 클러스터 버전이 없으며 기본적으로 eks_managed_node_group_defaults에 정의된 클러스터 버전을 사용하도록 설정됩니다.

1.26 버전에서 제공되는 ami 를 확인 후
#
aws ssm get-parameter --name /aws/service/eks/optimized-ami/1.26/amazon-linux-2/recommended/image_id \
  --region $AWS_REGION --query "Parameter.Value" --output text
ami-086414611b43bb691

코드 변경
 - mng 버전 및 지원되는 ami로 


terraform apply -auto-approve




Custom 삭제 후 terraform apply -auto-approve 실행

 

[3.1-2 데이터부 - 관리형노드그룹 : 블루-그린 업그레이드] Blue Green approach for Managed Node group Upgrade
 - green 만든 후 Increase orders replicas 2 한 후 blue 쪽을 삭제 한다.



ec2-user:~/environment/terraform:$ kubectl get nodes -l type=OrdersMNG
NAME                                        STATUS   ROLES    AGE   VERSION
ip-10-0-10-123.us-west-2.compute.internal   Ready    <none>   25h   v1.25.16-eks-59bf375

ec2-user:~/environment
cat eks-gitops-repo/apps/orders/deployment.yaml , cat eks-gitops-repo/apps/orders/deployment-mysql.yaml



base.tf에 
    green-mng={
      instance_types = ["m5.large", "m6a.large", "m6i.large"]
      subnet_ids = [module.vpc.private_subnets[0]]
      min_size     = 1
      max_size     = 2
      desired_size = 1
      update_config = {
        max_unavailable_percentage = 35
      }
      labels = {
        type = "OrdersMNG"
      }
      taints = [
        {
          key    = "dedicated"
          value  = "OrdersApp"
          effect = "NO_SCHEDULE"
        }
      ]
    }
추가한다.


-- yaml파일의 replicas 변경--
cd ~/environment/eks-gitops-repo/
sed -i 's/replicas: 1/replicas: 2/' apps/orders/deployment.yaml
git add apps/orders/deployment.yaml
git commit -m "Increase orders replicas 2"
git push

argocd app sync orders



두개로 늘려서 하려 하였으나 mysql 에러간 난다. (볼륨 관련)



에서



되고 kubectl scale deploy -n orders orders --replicas 1 은 필요해보인다.

 

[3.2 데이터부 - 카펜터노드] Upgrading Karpenter managed nodes


#
aws ec2 describe-instances --query "Reservations[*].Instances[*].[Tags[?Key=='Name'].Value | [0], ImageId]" --filters "Name=tag:Blueprint,Values=eksworkshop-eksctl" --output table

----------------------------------------------
|              DescribeInstances             |
+------------------+-------------------------+
|  initial         |  ami-0078a0f78fafda978  |
|  custom          |  ami-0078a0f78fafda978  |
|  green-mng       |  ami-086414611b43bb691  |
|  blue-mng        |  ami-0078a0f78fafda978  |
|  custom          |  ami-086414611b43bb691  |
|  initial         |  ami-086414611b43bb691  |
|  initial         |  ami-086414611b43bb691  |
|  default-selfmng |  ami-0ee947a6f4880da75  |
|  initial         |  ami-0078a0f78fafda978  |
|  default-selfmng |  ami-0ee947a6f4880da75  |
|  initial         |  ami-086414611b43bb691  |
|  custom          |  ami-086414611b43bb691  |
|  custom          |  ami-086414611b43bb691  |
|  default-selfmng |  ami-0078a0f78fafda978  |
|  initial         |  ami-086414611b43bb691  |
+------------------+-------------------------+
# 기본 노드풀을 통해 프로비저닝된 노드가 버전 v1.25.16-eks-59bf375에 있는 것을 확인할 수 있습니다.
kubectl get nodes -l team=checkout
NAME                                       STATUS   ROLES    AGE   VERSION
ip-10-0-7-203.us-west-2.compute.internal   Ready    <none>   25h   v1.25.16-eks-59bf375

# Check taints applied on the nodes.
kubectl get nodes -l team=checkout -o jsonpath="{range .items[*]}{.metadata.name} {.spec.taints}{\"\n\"}{end}"
kubectl get nodes -l team=checkout -o jsonpath="{range .items[*]}{.metadata.name} {.spec.taints[?(@.effect=='NoSchedule')]}{\"\n\"}{end}"
ip-10-0-39-95.us-west-2.compute.internal {"effect":"NoSchedule","key":"dedicated","value":"CheckoutApp"}

#
kubectl get pods -n checkout -o wide
NAME                             READY   STATUS    RESTARTS   AGE   IP            NODE                                       NOMINATED NODE   READINESS GATES
checkout-558f7777c-b7pjm         1/1     Running   0          25h   10.0.9.103    ip-10-0-7-203.us-west-2.compute.internal   <none>           <none>
checkout-redis-f54bf7cb5-nkhhn   1/1     Running   0          25h   10.0.11.203   ip-10-0-7-203.us-west-2.compute.internal   <none>           <none>
# 모니터링
kubectl get nodeclaim
kubectl get nodes -l team=checkout -o jsonpath="{range .items[*]}{.metadata.name} {.spec.taints}{\"\n\"}{end}"
kubectl get pods -n checkout -o wide
while true; do kubectl get nodeclaim; echo ; kubectl get nodes -l team=checkout; echo ; kubectl get nodes -l team=checkout -o jsonpath="{range .items[*]}{.metadata.name} {.spec.taints}{\"\n\"}{end}"; echo ; kubectl get pods -n checkout -o wide; echo ; date; sleep 1; echo; done

 

#
while true; do kubectl get nodeclaim; echo ; kubectl get nodes -l team=checkout; echo ; kubectl get nodes -l team=checkout -o jsonpath="{range .items[*]}{.metadata.name} {.spec.taints}{\"\n\"}{end}"; echo ; kubectl get pods -n checkout -o wide; echo ; date; sleep 1; echo; done


#
cd ~/environment/eks-gitops-repo
git add apps/checkout/deployment.yaml
git commit -m "scale checkout app"
git push --set-upstream origin main

# You can force the sync using ArgoCD console or following command:
argocd app sync checkout

# LIVE(k8s)에 직접 scale 실행
kubectl scale deploy checkout -n checkout --replicas 15

# 현재는 1.25.16 2대 사용 중 확인
# Karpenter will scale looking at the aggregate resource requirements of unscheduled pods. We will see we have now two nodes provisioned via karpenter.
ec2-user:~/environment/eks-gitops-repo:$ kubectl get nodes -l team=checkout
NAME                                       STATUS   ROLES    AGE   VERSION
ip-10-0-39-33.us-west-2.compute.internal   Ready    <none>   68s   v1.25.16-eks-59bf375
ip-10-0-7-203.us-west-2.compute.internal   Ready    <none>   25h   v1.25.16-eks-59bf375

# Lets get the ami id for the AMI build for kubernetes version 1.26.
 aws ssm get-parameter --name /aws/service/eks/optimized-ami/1.26/amazon-linux-2/recommended/image_id \
    --region ${AWS_REGION} --query "Parameter.Value" --output text
ami-086414611b43bb691




변경 후

#
while true; do kubectl get nodeclaim; echo ; kubectl get nodes -l team=checkout; echo ; kubectl get nodes -l team=checkout -o jsonpath="{range .items[*]}{.metadata.name} {.spec.taints}{\"\n\"}{end}"; echo ; kubectl get pods -n checkout -o wide; echo ; date; sleep 1; echo; done

# 10분 소요 (예상) 실습 포함
cd ~/environment/eks-gitops-repo
git add apps/karpenter/default-ec2nc.yaml apps/karpenter/default-np.yaml
git commit -m "disruption changes"
git push --set-upstream origin main
argocd app sync karpenter

# Once Argo CD sync the karpenter app, we can see the disruption event in karpenter controller logs. It will then provision new nodes with kubernetes version 1.26 and delete the old nodes.
kubectl -n karpenter logs deployment/karpenter -c controller --tail=33 -f
혹은
kubectl stern -n karpenter deployment/karpenter -c controller

# Lets get the ami id for the AMI build for kubernetes version 1.26.
 aws ssm get-parameter --name /aws/service/eks/optimized-ami/1.26/amazon-linux-2/recommended/image_id \
    --region ${AWS_REGION} --query "Parameter.Value" --output text
ami-086414611b43bb691

으로 변경됨
ec2-user:~/environment/eks-gitops-repo:$ kubectl get nodes -l team=checkout
NAME                                        STATUS   ROLES    AGE   VERSION
ip-10-0-11-146.us-west-2.compute.internal   Ready    <none>   28s   v1.26.15-eks-59bf375
ip-10-0-46-204.us-west-2.compute.internal   Ready    <none>   91s   v1.26.15-eks-59bf375

 

[3.3 데이터부 - 셀프노드] Upgrading EKS Self-managed Nodes

ec2-user:~/environment/terraform:$ kubectl get nodes -l node.kubernetes.io/lifecycle=self-managed
NAME                                       STATUS   ROLES    AGE   VERSION
ip-10-0-30-13.us-west-2.compute.internal   Ready    <none>   80m   v1.25.16-eks-59bf375
ip-10-0-38-76.us-west-2.compute.internal   Ready    <none>   17s   v1.26.15-eks-59bf375
ip-10-0-6-105.us-west-2.compute.internal   Ready    <none>   85m   v1.25.16-eks-59bf375# Lets explore the self-managed nodes.
kubectl get nodes --show-labels | grep self-managed
ip-10-0-26-119.us-west-2.compute.internal           Ready    <none>   30h     v1.25.16-eks-59bf375   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/instance-type=m5.large,beta.kubernetes.io/os=linux,failure-domain.beta.kubernetes.io/region=us-west-2,failure-domain.beta.kubernetes.io/zone=us-west-2b,k8s.io/cloud-provider-aws=a94967527effcefb5f5829f529c0a1b9,kubernetes.io/arch=amd64,kubernetes.io/hostname=ip-10-0-26-119.us-west-2.compute.internal,kubernetes.io/os=linux,node.kubernetes.io/instance-type=m5.large,node.kubernetes.io/lifecycle=self-managed,team=carts,topology.ebs.csi.aws.com/zone=us-west-2b,topology.kubernetes.io/region=us-west-2,topology.kubernetes.io/zone=us-west-2b
ip-10-0-6-184.us-west-2.compute.internal            Ready    <none>   30h     v1.25.16-eks-59bf375   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/instance-type=m5.large,beta.kubernetes.io/os=linux,failure-domain.beta.kubernetes.io/region=us-west-2,failure-domain.beta.kubernetes.io/zone=us-west-2a,k8s.io/cloud-provider-aws=a94967527effcefb5f5829f529c0a1b9,kubernetes.io/arch=amd64,kubernetes.io/hostname=ip-10-0-6-184.us-west-2.compute.internal,kubernetes.io/os=linux,node.kubernetes.io/instance-type=m5.large,node.kubernetes.io/lifecycle=self-managed,team=carts,topology.ebs.csi.aws.com/zone=us-west-2a,topology.kubernetes.io/region=us-west-2,topology.kubernetes.io/zone=us-west-2a

# Verify if the pods are scheduled on self-managed nodes.
kubectl get pods -n carts -o wide
NAME                              READY   STATUS    RESTARTS   AGE   IP            NODE                                        NOMINATED NODE   READINESS GATES
carts-7ddbc698d8-zts9k            1/1     Running   0          30h   10.0.31.72    ip-10-0-26-119.us-west-2.compute.internal   <none>           <none>
carts-dynamodb-6594f86bb9-8pzk5   1/1     Running   0          30h   10.0.30.122   ip-10-0-26-119.us-west-2.compute.internal   <none>           <none>

# lets perform in-place upgrade on the self-managed nodes.
자가 관리 노드를 업그레이드하려면 실행 템플릿에서 AMI를 업데이트해야 합니다. 
이 워크숍에서는 테라폼을 사용하여 자가 관리 노드를 생성했습니다. 
이를 통해 /home/ec2-user/Environment/terraform/base.tf 파일에서 AMI를 업데이트하고 변경 사항을 적용할 수 있습니다. 
하지만 먼저 Kubernetes 버전 1.26에 해당하는 최신 AMI를 확인해 보겠습니다.
aws ssm get-parameter --name /aws/service/eks/optimized-ami/1.26/amazon-linux-2/recommended/image_id --region $AWS_REGION --query "Parameter.Value" --output text
ami-086414611b43bb691

ami 변경



ec2-user:~/environment/terraform:$ kubectl get nodes -l node.kubernetes.io/lifecycle=self-managed
NAME                                       STATUS   ROLES    AGE   VERSION
ip-10-0-30-13.us-west-2.compute.internal   Ready    <none>   80m   v1.25.16-eks-59bf375
ip-10-0-38-76.us-west-2.compute.internal   Ready    <none>   17s   v1.26.15-eks-59bf375
ip-10-0-6-105.us-west-2.compute.internal   Ready    <none>   85m   v1.25.16-eks-59bf375

node 하나씩 교체

 


4. EKS 1.26 -> 1.27 업그레이드

1. cluster 버전 업그레이드
 - variable에 버전 변경 (1.27로)
 - terraform apply -auto-approve

2. Add-on 업그레이드
 - coredns, kube-proxy : most_recent = true 로 변경하여 진행



3. blue-green 노드 변경


변경
    blue-mng={
      instance_types = ["m5.large", "m6a.large", "m6i.large"]
      cluster_version = "1.27"
      min_size     = 1
      max_size     = 2
      desired_size = 1
      update_config = {
        max_unavailable_percentage = 35
      }
      labels = {
        type = "OrdersMNG"
      }
      subnet_ids = [module.vpc.private_subnets[0]]
      taints = [
        {
          key    = "dedicated"
          value  = "OrdersApp"
          effect = "NO_SCHEDULE"
        }
      ]
    }
    green-mng={
      instance_types = ["m5.large", "m6a.large", "m6i.large"]
      subnet_ids = [module.vpc.private_subnets[0]]
      cluster_version = "1.26"
      min_size     = 1
      max_size     = 2
      desired_size = 1
      update_config = {
        max_unavailable_percentage = 35
      }
      labels = {
        type = "OrdersMNG"
      }
      taints = [
        {
          key    = "dedicated"
          value  = "OrdersApp"
          effect = "NO_SCHEDULE"
        }
      ]
    }
  }

kubectl scale deploy -n orders orders --replicas 2


테라폼에서 green 삭제 후
kubectl scale deploy -n orders orders --replicas 1




1.27 AMI : ami-0df82bd30ec649a9e

4. Karpenter 노드변경
 - 변경 ami-0df82bd30ec649a9e 로

cd ~/environment/eks-gitops-repo
git add apps/karpenter/default-ec2nc.yaml apps/karpenter/default-np.yaml
git commit -m "disruption changes"
git push --set-upstream origin main
argocd app sync karpenter


5. Upgrading AWS Fargate Nodes
# 디플로이먼트 재시작 Restart
kubectl rollout restart deployment assets -n assets

# Lets wait for the new pods to become ready.
kubectl wait --for=condition=Ready pods --all -n assets --timeout=180s

rolling update

6. self-managed node


결과

 

동일하게 1.27 -> 1.28 업그레이드 진행 결과

aws ssm get-parameter --name /aws/service/eks/optimized-ami/1.28/amazon-linux-2/recommended/image_id \
  --region $AWS_REGION --query "Parameter.Value" --output text

ami-0258326aa9473bd11

 


5. Blue-Green Cluster Upgrades 

#
export EFS_ID=$(aws efs describe-file-systems --query "FileSystems[*].FileSystemId" --output text)
echo $EFS_ID
fs-097ae8acc90a61a11

#
cd eksgreen-terraform
terraform init
terraform plan -var efs_id=$EFS_ID
terraform apply -var efs_id=$EFS_ID -auto-approve



#
aws eks --region ${AWS_REGION} update-kubeconfig --name ${EKS_CLUSTER_NAME} --alias blue && \
  kubectl config use-context blue

aws eks --region ${AWS_REGION} update-kubeconfig --name ${EKS_CLUSTER_NAME}-gr --alias green && \
  kubectl config use-context green

#
cat ~/.kube/config
kubectl ctx
kubectl ctx green

# Verify the EC2 worker nodes are attached to the cluster
kubectl get nodes --context green
NAME                                        STATUS   ROLES    AGE     VERSION
ip-10-0-1-190.us-west-2.compute.internal    Ready    <none>   7m14s   v1.30.9-eks-5d632ec
ip-10-0-3-223.us-west-2.compute.internal    Ready    <none>   6m42s   v1.30.9-eks-5d632ec
ip-10-0-31-125.us-west-2.compute.internal   Ready    <none>   6m41s   v1.30.9-eks-5d632ec
ip-10-0-40-125.us-west-2.compute.internal   Ready    <none>   7m17s   v1.30.9-eks-5d632ec

# Verify the operational Addons on the cluster:
helm list -A --kube-context green
NAME                            NAMESPACE       REVISION        UPDATED                                 STATUS          CHART                                APP VERSION
argo-cd                         argocd          1               2025-03-26 11:58:02.453602353 +0000 UTC deployed        argo-cd-5.55.0                       v2.10.0    
aws-efs-csi-driver              kube-system     1               2025-03-26 11:58:30.395245866 +0000 UTC deployed        aws-efs-csi-driver-2.5.6             1.7.6      
aws-load-balancer-controller    kube-system     1               2025-03-26 11:58:31.887699595 +0000 UTC deployed        aws-load-balancer-controller-1.7.1   v2.7.1     
karpenter                       karpenter       1               2025-03-26 11:58:31.926407743 +0000 UTC deployed        karpenter-0.37.0                     0.37.0     
metrics-server                  kube-system     1               2025-03-26 11:58:02.447165223 +0000 UTC deployed        metrics-server-3.12.0                0.7.0      

kube-ops-view 설치
#
kubectl ctx green

# kube-ops-view
helm repo add geek-cookbook https://geek-cookbook.github.io/charts/
helm repo update
helm install kube-ops-view geek-cookbook/kube-ops-view --version 1.2.2 --namespace kube-system

#
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
  annotations:
    service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
    service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
    service.beta.kubernetes.io/aws-load-balancer-type: external
  labels:
    app.kubernetes.io/instance: kube-ops-view
    app.kubernetes.io/name: kube-ops-view
  name: kube-ops-view-nlb
  namespace: kube-system
spec:
  type: LoadBalancer
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
  selector:
    app.kubernetes.io/instance: kube-ops-view
    app.kubernetes.io/name: kube-ops-view
EOF


# kube-ops-view 접속 URL 확인 (1.5, 1.3 배율)
kubectl get svc -n kube-system kube-ops-view-nlb -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' | awk '{ print "KUBE-OPS-VIEW URL = http://"$1"/#scale=1.5"}'
kubectl get svc -n kube-system kube-ops-view-nlb -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' | awk '{ print "KUBE-OPS-VIEW URL = http://"$1"/#scale=1.3"}'


 

#
cd ~/environment/eks-gitops-repo
git status
git branch
* main

# Create the new local branch green
git switch -c green
git branch -a
* green
  main
  remotes/origin/HEAD -> origin/main
  remotes/origin/main

export AL2023_130_AMI=$(aws ssm get-parameter --name /aws/service/eks/optimized-ami/1.30/amazon-linux-2023/x86_64/standard/recommended/image_id --region ${AWS_REGION} --query "Parameter.Value" --output text)
echo $AL2023_130_AMI
ami-08eb2eb81143e2902

cat << EOF > ~/environment/eks-gitops-repo/apps/karpenter/default-ec2nc.yaml
apiVersion: karpenter.k8s.aws/v1beta1
kind: EC2NodeClass
metadata:
  name: default
spec:
  amiFamily: AL2023
  amiSelectorTerms:
  - id: "${AL2023_130_AMI}" # Latest EKS 1.30 AMI
  role: karpenter-eksworkshop-eksctl-gr
  securityGroupSelectorTerms:
  - tags:
      karpenter.sh/discovery: eksworkshop-eksctl-gr
  subnetSelectorTerms:
  - tags:
      karpenter.sh/discovery: eksworkshop-eksctl
  tags:
    intent: apps
    managed-by: karpenter
    team: checkout
EOF

kubectl convert -f apps/ui/hpa.yaml --output-version autoscaling/v2 -o yaml > apps/ui/tmp.yaml && mv apps/ui/tmp.yaml apps/ui/hpa.yaml


#
cat app-of-apps/values.yaml 
spec:
  destination:
    # HIGHLIGHT
    server: https://kubernetes.default.svc
  source:
    # HIGHLIGHT
    repoURL: https://git-codecommit.us-west-2.amazonaws.com/v1/repos/eks-gitops-repo
    # HIGHLIGHT
    targetRevision: main

# HIGHLIGHT
applications:
  - name: assets
  - name: carts
  - name: catalog
  - name: checkout
  - name: orders
  - name: other
  - name: rabbitmq
  - name: ui
  - name: karpenter

#
sed -i 's/targetRevision: main/targetRevision: green/' app-of-apps/values.yaml

# Commit the change to green branch and push it to the CodeCommit repo.
git add .  && git commit -m "1.30 changes"
git push -u origin green


# Login to ArgoCD using credentials from the following commands:
export ARGOCD_SERVER_GR=$(kubectl get svc argo-cd-argocd-server -n argocd -o json --context green | jq --raw-output '.status.loadBalancer.ingress[0].hostname')
echo "ArgoCD URL: http://${ARGOCD_SERVER_GR}"
export ARGOCD_USER_GR="admin"
export ARGOCD_PWD_GR=$(kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" --context green | base64 -d)
echo "Username: ${ARGOCD_USER_GR}"
echo "Password: ${ARGOCD_PWD_GR}"

# Alternatively you can login using ArgoCD CLI:
argocd login --name green ${ARGOCD_SERVER_GR} --username ${ARGOCD_USER_GR} --password ${ARGOCD_PWD_GR} --insecure --skip-test-tls --grpc-web
'admin:login' logged in successfully
Context 'green' updated

Argocd 정보
ArgoCD URL: http://a91993ed366c540d8b1a3b24cc46056e-2086622909.us-west-2.elb.amazonaws.com
Username: admin
Password: o9kEZMlYkTUuwrmF

#
argo_creds=$(aws secretsmanager get-secret-value --secret-id argocd-user-creds --query SecretString --output text)

argocd repo add $(echo $argo_creds | jq -r .url) --username $(echo $argo_creds | jq -r .username) --password $(echo $argo_creds | jq -r .password) --server ${ARGOCD_SERVER_GR}
Repository 'https://git-codecommit.us-west-2.amazonaws.com/v1/repos/eks-gitops-repo' added



 

green 확인

 

blue 삭제~
 - blue 정보, 예제에서는 생성한 Green을 다시 삭제

terraform destroy -var efs_id=$EFS_ID -auto-approve