private에 node 구축 합니다. 워커 노드를 **프라이빗 서브넷(app_a, app_b)**에 배치 필수적인 EBS CSI 드라이버 권한과 SSM 관리 권한을 함께 추가 EKS 1.33부터 AL2가 지원 종료되어 AL2023으로 변경해야 합니다. 1 # 유료 계정으로 가능 # 무료 계정으로는 안되는 코드 - 디스크 용량 부족 cd mkdir 22 cd 22 # 1. main.tf 생성 cat <<'EOF' > main.tf terraform { required_version = ">= 1.5.0" required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } variable "aws_region" { type = string default = "ap-northeast-2" } variable "name_prefix" { type = string default = "data" } variable "vpc_cidr" { type = string default = "10.0.16.0/20" } provider "aws" { region = var.aws_region } data "aws_availability_zones" "available" { state = "available" } locals { common_tags = { Project = var.name_prefix Managed = "terraform" } subnet_blocks = cidrsubnets(var.vpc_cidr, 4, 4, 2, 2, 4, 4) } resource "aws_vpc" "main" { cidr_block = var.vpc_cidr enable_dns_support = true enable_dns_hostnames = true tags = merge(local.common_tags, { Name = "${var.name_prefix}-vpc" }) } resource "aws_internet_gateway" "igw" { vpc_id = aws_vpc.main.id tags = merge(local.common_tags, { Name = "${var.name_prefix}-igw" }) } resource "aws_subnet" "public_a" { vpc_id = aws_vpc.main.id cidr_block = local.subnet_blocks[0] availability_zone = data.aws_availability_zones.available.names[0] map_public_ip_on_launch = true tags = merge(local.common_tags, { Name = "${var.name_prefix}-pub-a", Tier = "public", "kubernetes.io/role/elb" = "1" }) } resource "aws_subnet" "public_b" { vpc_id = aws_vpc.main.id cidr_block = local.subnet_blocks[1] availability_zone = data.aws_availability_zones.available.names[1] map_public_ip_on_launch = true tags = merge(local.common_tags, { Name = "${var.name_prefix}-pub-b", Tier = "public", "kubernetes.io/role/elb" = "1" }) } resource "aws_subnet" "app_a" { vpc_id = aws_vpc.main.id cidr_block = local.subnet_blocks[2] availability_zone = data.aws_availability_zones.available.names[0] tags = merge(local.common_tags, { Name = "${var.name_prefix}-app-a", Tier = "app", "kubernetes.io/role/internal-elb" = "1" }) } resource "aws_subnet" "app_b" { vpc_id = aws_vpc.main.id cidr_block = local.subnet_blocks[3] availability_zone = data.aws_availability_zones.available.names[1] tags = merge(local.common_tags, { Name = "${var.name_prefix}-app-b", Tier = "app", "kubernetes.io/role/internal-elb" = "1" }) } resource "aws_subnet" "db_a" { vpc_id = aws_vpc.main.id cidr_block = local.subnet_blocks[4] availability_zone = data.aws_availability_zones.available.names[0] tags = merge(local.common_tags, { Name = "${var.name_prefix}-db-a", Tier = "db" }) } resource "aws_subnet" "db_b" { vpc_id = aws_vpc.main.id cidr_block = local.subnet_blocks[5] availability_zone = data.aws_availability_zones.available.names[1] tags = merge(local.common_tags, { Name = "${var.name_prefix}-db-b", Tier = "db" }) } resource "aws_eip" "nat_a" { domain = "vpc" } resource "aws_nat_gateway" "nat_a" { allocation_id = aws_eip.nat_a.id subnet_id = aws_subnet.public_a.id depends_on = [aws_internet_gateway.igw] } resource "aws_eip" "nat_b" { domain = "vpc" } resource "aws_nat_gateway" "nat_b" { allocation_id = aws_eip.nat_b.id subnet_id = aws_subnet.public_b.id depends_on = [aws_internet_gateway.igw] } resource "aws_route_table" "public" { vpc_id = aws_vpc.main.id } resource "aws_route" "public_default" { route_table_id = aws_route_table.public.id destination_cidr_block = "0.0.0.0/0" gateway_id = aws_internet_gateway.igw.id } resource "aws_route_table" "app_a" { vpc_id = aws_vpc.main.id } resource "aws_route" "app_a_default" { route_table_id = aws_route_table.app_a.id destination_cidr_block = "0.0.0.0/0" nat_gateway_id = aws_nat_gateway.nat_a.id } resource "aws_route_table" "app_b" { vpc_id = aws_vpc.main.id } resource "aws_route" "app_b_default" { route_table_id = aws_route_table.app_b.id destination_cidr_block = "0.0.0.0/0" nat_gateway_id = aws_nat_gateway.nat_b.id } resource "aws_route_table_association" "public_a" { subnet_id = aws_subnet.public_a.id route_table_id = aws_route_table.public.id } resource "aws_route_table_association" "public_b" { subnet_id = aws_subnet.public_b.id route_table_id = aws_route_table.public.id } resource "aws_route_table_association" "app_a" { subnet_id = aws_subnet.app_a.id route_table_id = aws_route_table.app_a.id } resource "aws_route_table_association" "app_b" { subnet_id = aws_subnet.app_b.id route_table_id = aws_route_table.app_b.id } output "vpc_id" { value = aws_vpc.main.id } EOF # 2. eks.tf 생성 (1.34 버전) cat <<'EOF' > eks.tf resource "aws_iam_role" "eks_cluster_role" { name = "${var.name_prefix}-eks-cluster-role" assume_role_policy = jsonencode({ Version = "2012-10-17" Statement = [{ Effect = "Allow" Principal = { Service = "eks.amazonaws.com" } Action = "sts:AssumeRole" }] }) tags = local.common_tags } resource "aws_iam_role_policy_attachment" "eks_cluster_policy" { role = aws_iam_role.eks_cluster_role.name policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" } resource "aws_iam_role" "eks_node_role" { name = "${var.name_prefix}-eks-node-role" assume_role_policy = jsonencode({ Version = "2012-10-17" Statement = [{ Effect = "Allow" Principal = { Service = "ec2.amazonaws.com" } Action = "sts:AssumeRole" }] }) tags = local.common_tags } resource "aws_iam_role_policy_attachment" "worker_node_policy" { role = aws_iam_role.eks_node_role.name policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" } resource "aws_iam_role_policy_attachment" "cni_policy" { role = aws_iam_role.eks_node_role.name policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" } resource "aws_iam_role_policy_attachment" "registry_policy" { role = aws_iam_role.eks_node_role.name policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" } resource "aws_iam_role_policy_attachment" "ebs_csi_policy" { role = aws_iam_role.eks_node_role.name policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy" } resource "aws_iam_role_policy_attachment" "ssm_policy" { role = aws_iam_role.eks_node_role.name policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" } resource "aws_eks_cluster" "eks" { name = "${var.name_prefix}-eks" role_arn = aws_iam_role.eks_cluster_role.arn version = "1.34" vpc_config { subnet_ids = [ aws_subnet.public_a.id, aws_subnet.public_b.id, aws_subnet.app_a.id, aws_subnet.app_b.id ] } depends_on = [aws_iam_role_policy_attachment.eks_cluster_policy] tags = merge(local.common_tags, { Name = "${var.name_prefix}-eks" }) } resource "aws_eks_node_group" "node" { cluster_name = aws_eks_cluster.eks.name node_group_name = "${var.name_prefix}-node" node_role_arn = aws_iam_role.eks_node_role.arn subnet_ids = [aws_subnet.app_a.id, aws_subnet.app_b.id] scaling_config { desired_size = 2 min_size = 1 max_size = 3 } instance_types = ["t3.small"] ami_type = "AL2023_x86_64_STANDARD" capacity_type = "ON_DEMAND" depends_on = [ aws_iam_role_policy_attachment.worker_node_policy, aws_iam_role_policy_attachment.cni_policy, aws_iam_role_policy_attachment.registry_policy ] tags = merge(local.common_tags, { Name = "${var.name_prefix}-node" }) } output "cluster_endpoint" { value = aws_eks_cluster.eks.endpoint } output "cluster_certificate_authority_data" { value = aws_eks_cluster.eks.certificate_authority[0].data } EOF terraform init terraform apply -auto-approve 3 # 권한 부여 aws eks update-kubeconfig --region ap-northeast-2 --name data-eks kubectl get nodes aws eks update-kubeconfig --region ap-northeast-2 --name data2-eks kubectl get nodes 4 --------------- # 트라블 슈팅 - 여러번 설치시 role 중복 ╷ │ Error: creating IAM Role (data-eks-cluster-role): operation error IAM: CreateRole, https response error StatusCode: 409, RequestID: dc8b1b6c-aae3-4b18-8259-cd32f50fb38a, EntityAlreadyExists: Role with name data-eks-cluster-role already exists. │ │ with aws_iam_role.eks_cluster_role, │ on eks.tf line 1, in resource "aws_iam_role" "eks_cluster_role": │ 1: resource "aws_iam_role" "eks_cluster_role" { │ ╵ ╷ │ Error: creating IAM Role (data-eks-node-role): operation error IAM: CreateRole, https response error StatusCode: 409, RequestID: 7a9a4b9d-3204-4833-a5d8-4e63f28f6e48, EntityAlreadyExists: Role with name data-eks-node-role already exists. │ │ with aws_iam_role.eks_node_role, │ on eks.tf line 19, in resource "aws_iam_role" "eks_node_role": │ 19: resource "aws_iam_role" "eks_node_role" { │ ╵++ 5 방법1 variables.tf에서 name_prefix 값을 data2 등으로 수정하면 충돌을 피할 수 있습니다. # main.tf 상단 변수 수정 예시 variable "name_prefix" { type = string default = "data-v2" # 'data'에서 'data-v2'로 변경 } 방법2 # 1. 기존 리소스를 Terraform 상태로 가져오기 terraform import aws_iam_role.eks_cluster_role data-eks-cluster-role terraform import aws_iam_role.eks_node_role data-eks-node-role # 2. 정책 연결 상태도 가져오기 (필요 시) # (이미 코드가 있으므로 바로 apply를 시도해도 됩니다) # 3. 다시 실행 terraform init terraform apply -auto-approve ------------------ 6 terraform destroy