Permalink
Cannot retrieve contributors at this time
Name already in use
A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
terraform-aws-eks/eks-worker-nodes.tf
Go to fileThis commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
183 lines (162 sloc)
6.63 KB
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# | |
# EKS Worker Nodes Resources | |
# * IAM role allowing Kubernetes actions to access other AWS services | |
# * EC2 Security Group to allow networking traffic | |
# * Data source to fetch latest EKS worker AMI | |
# * AutoScaling Launch Configuration to configure worker instances | |
# * AutoScaling Group to launch worker instances | |
# | |
# Create an IAM role for worker nodes | |
resource "aws_iam_role" "node" { | |
name = "terraform-eks-${var.cluster_name}-node" | |
assume_role_policy = <<POLICY | |
{ | |
"Version": "2012-10-17", | |
"Statement": [ | |
{ | |
"Effect": "Allow", | |
"Principal": { | |
"Service": "ec2.amazonaws.com" | |
}, | |
"Action": "sts:AssumeRole" | |
} | |
] | |
} | |
POLICY | |
} | |
# Attach the AWS managed AmazonEKSWorkerNodePolicy to our worker node role | |
resource "aws_iam_role_policy_attachment" "node-AmazonEKSWorkerNodePolicy" { | |
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" | |
role = "${aws_iam_role.node.name}" | |
} | |
# Attach the AWS managed AmazonEKS_CNI_Policy to our worker node role | |
resource "aws_iam_role_policy_attachment" "node-AmazonEKS_CNI_Policy" { | |
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" | |
role = "${aws_iam_role.node.name}" | |
} | |
# Attach the AWS managed AmazonEC2ContainerRegistryReadOnly policy to our worker node role | |
resource "aws_iam_role_policy_attachment" "node-AmazonEC2ContainerRegistryReadOnly" { | |
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" | |
role = "${aws_iam_role.node.name}" | |
} | |
# Create an instance profile with our worker node role attached | |
resource "aws_iam_instance_profile" "node" { | |
name = "terraform-eks-${var.cluster_name}" | |
role = "${aws_iam_role.node.name}" | |
} | |
# Create a security group for worker nodes | |
resource "aws_security_group" "node" { | |
name = "terraform-eks-${var.cluster_name}-node" | |
description = "Security group for all nodes in the cluster" | |
vpc_id = "${aws_vpc.cluster.id}" | |
egress { | |
from_port = 0 | |
to_port = 0 | |
protocol = "-1" | |
cidr_blocks = ["0.0.0.0/0"] | |
} | |
tags = "${merge( | |
local.common_tags, | |
map( | |
"Name", "terraform-eks-${var.cluster_name}-node", | |
"kubernetes.io/cluster/${var.cluster_name}", "owned", | |
) | |
)}" | |
} | |
# Allow worker nodes to communicate with each other | |
resource "aws_security_group_rule" "node-ingress-self" { | |
description = "Allow node to communicate with each other" | |
from_port = 0 | |
protocol = "-1" | |
security_group_id = "${aws_security_group.node.id}" | |
source_security_group_id = "${aws_security_group.node.id}" | |
to_port = 65535 | |
type = "ingress" | |
} | |
# Allow worker Kubelets and pods to receive communication from the cluster control plane | |
resource "aws_security_group_rule" "node-ingress-cluster" { | |
description = "Allow worker Kubelets and pods to receive communication from the cluster control plane" | |
from_port = 1025 | |
protocol = "tcp" | |
security_group_id = "${aws_security_group.node.id}" | |
source_security_group_id = "${aws_security_group.cluster.id}" | |
to_port = 65535 | |
type = "ingress" | |
} | |
# Find the latest Amazon-provided EKS worker node machine image | |
data "aws_ami" "eks-worker" { | |
filter { | |
name = "name" | |
values = ["eks-worker-*"] | |
} | |
most_recent = true | |
owners = ["602401143452"] # Amazon | |
tags = "${local.common_tags}" | |
} | |
# EKS currently documents this required userdata for EKS worker nodes to | |
# properly configure Kubernetes applications on the EC2 instance. | |
# We utilize a Terraform local here to simplify Base64 encoding this | |
# information into the AutoScaling Launch Configuration. | |
# More information: https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml | |
locals { | |
node-userdata = <<USERDATA | |
#!/bin/bash -xe | |
CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki | |
CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt | |
mkdir -p $CA_CERTIFICATE_DIRECTORY | |
echo "${aws_eks_cluster.cluster.certificate_authority.0.data}" | base64 -d > $CA_CERTIFICATE_FILE_PATH | |
INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) | |
sed -i s,MASTER_ENDPOINT,${aws_eks_cluster.cluster.endpoint},g /var/lib/kubelet/kubeconfig | |
sed -i s,CLUSTER_NAME,${var.cluster_name},g /var/lib/kubelet/kubeconfig | |
sed -i s,REGION,${data.aws_region.current.name},g /etc/systemd/system/kubelet.service | |
sed -i s,MAX_PODS,20,g /etc/systemd/system/kubelet.service | |
sed -i s,MASTER_ENDPOINT,${aws_eks_cluster.cluster.endpoint},g /etc/systemd/system/kubelet.service | |
sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service | |
DNS_CLUSTER_IP=10.100.0.10 | |
if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi | |
sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service | |
sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig | |
sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service | |
systemctl daemon-reload | |
systemctl restart kubelet | |
USERDATA | |
} | |
# Create an EC2 launch configuration for the worker nodes | |
resource "aws_launch_configuration" "node" { | |
associate_public_ip_address = true | |
iam_instance_profile = "${aws_iam_instance_profile.node.name}" | |
image_id = "${data.aws_ami.eks-worker.id}" | |
instance_type = "m4.large" | |
name_prefix = "terraform-eks-${var.cluster_name}" | |
security_groups = ["${aws_security_group.node.id}"] | |
user_data_base64 = "${base64encode(local.node-userdata)}" | |
lifecycle { | |
create_before_destroy = true | |
} | |
} | |
# Transform local.common_tags (a map) into the structure required by | |
# aws_autoscaling_group resources (a list of maps) | |
data "null_data_source" "asg_common_tags" { | |
count = "${length(keys(local.common_tags))}" | |
inputs = { | |
key = "${element(keys(local.common_tags), count.index)}" | |
value = "${element(values(local.common_tags), count.index)}" | |
propagate_at_launch = true | |
} | |
} | |
# Create an EC2 autoscaling group for the worker nodes | |
resource "aws_autoscaling_group" "cluster" { | |
desired_capacity = 2 | |
launch_configuration = "${aws_launch_configuration.node.id}" | |
max_size = 2 | |
min_size = 1 | |
name = "terraform-eks-${var.cluster_name}" | |
vpc_zone_identifier = ["${aws_subnet.cluster.*.id}"] | |
tags = ["${concat( | |
list( | |
map("key", "Name", "value", "terraform-eks-${var.cluster_name}", "propagate_at_launch", true), | |
map("key", "kubernetes.io/cluster/${var.cluster_name}", "value", "owned", "propagate_at_launch", true) | |
), | |
data.null_data_source.asg_common_tags.*.outputs | |
)}"] | |
} |