diff --git a/README.md b/README.md index b828610..22a30ee 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,18 @@ Adapted from https://github.com/terraform-providers/terraform-provider-aws For details, see https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html +Configures an AWS account with the following resources: + +* EKS Kubernetes API server +* VPC with: + * "10.0.0.0/16" CIDR block + * 2 subnets, each in a different availability zone with: + * "10.0.x.0/24" CIDR block + * Internet gateway +* EC2 autoscaling group for worker nodes, with: + * 1-2 m4.large instances + * 2 subnets + ## Variables ### Required diff --git a/eks-cluster.tf b/eks-cluster.tf index e338f1d..48bef6f 100644 --- a/eks-cluster.tf +++ b/eks-cluster.tf @@ -5,6 +5,7 @@ # * EKS Cluster # +# Create an IAM role for EKS to manage other AWS resources on our behalf resource "aws_iam_role" "eksServiceRole" { name = "terraform-eks-${var.cluster_name}-eksServiceRole" @@ -24,16 +25,19 @@ resource "aws_iam_role" "eksServiceRole" { POLICY } +# Attach Amazon's managed AmazonEKSClusterPolicy to our EKS service role resource "aws_iam_role_policy_attachment" "AmazonEKSClusterPolicy" { policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" role = "${aws_iam_role.eksServiceRole.name}" } +# Attach Amazon's managed AmazonEKSServicePolicy to our EKS service role resource "aws_iam_role_policy_attachment" "AmazonEKSServicePolicy" { policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy" role = "${aws_iam_role.eksServiceRole.name}" } +# Create a security group for cluster internal communication resource "aws_security_group" "cluster" { name = "terraform-eks-${var.cluster_name}-cluster" description = "Cluster communication with worker nodes" @@ -51,6 +55,7 @@ resource "aws_security_group" "cluster" { } } +# Allow pods to communicate with the cluster API server resource "aws_security_group_rule" "cluster-ingress-node-https" { description = "Allow pods to communicate with the cluster API Server" from_port = 443 @@ -71,6 +76,7 @@ resource "aws_security_group_rule" "cluster-ingress-node-https" { # type = "ingress" #} +# Create an EKS cluster resource "aws_eks_cluster" "cluster" { name = "${var.cluster_name}" role_arn = "${aws_iam_role.eksServiceRole.arn}" @@ -80,6 +86,8 @@ resource "aws_eks_cluster" "cluster" { subnet_ids = ["${aws_subnet.cluster.*.id}"] } + # Ensure the policies are attached to our EKS service role before creating + # the EKS cluster depends_on = [ "aws_iam_role_policy_attachment.AmazonEKSClusterPolicy", "aws_iam_role_policy_attachment.AmazonEKSServicePolicy", diff --git a/eks-worker-nodes.tf b/eks-worker-nodes.tf index c0f5650..b9c9cc1 100644 --- a/eks-worker-nodes.tf +++ b/eks-worker-nodes.tf @@ -7,6 +7,7 @@ # * AutoScaling Group to launch worker instances # +# Create an IAM role for worker nodes resource "aws_iam_role" "node" { name = "terraform-eks-${var.cluster_name}-node" @@ -26,26 +27,31 @@ resource "aws_iam_role" "node" { POLICY } +# Attach the AWS managed AmazonEKSWorkerNodePolicy to our worker node role resource "aws_iam_role_policy_attachment" "node-AmazonEKSWorkerNodePolicy" { policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" role = "${aws_iam_role.node.name}" } +# Attach the AWS managed AmazonEKS_CNI_Policy to our worker node role resource "aws_iam_role_policy_attachment" "node-AmazonEKS_CNI_Policy" { policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" role = "${aws_iam_role.node.name}" } +# Attach the AWS managed AmazonEC2ContainerRegistryReadOnly policy to our worker node role resource "aws_iam_role_policy_attachment" "node-AmazonEC2ContainerRegistryReadOnly" { policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" role = "${aws_iam_role.node.name}" } +# Create an instance profile with our worker node role attached resource "aws_iam_instance_profile" "node" { name = "terraform-eks-${var.cluster_name}" role = "${aws_iam_role.node.name}" } +# Create a security group for worker nodes resource "aws_security_group" "node" { name = "terraform-eks-${var.cluster_name}-node" description = "Security group for all nodes in the cluster" @@ -66,6 +72,7 @@ resource "aws_security_group" "node" { }" } +# Allow worker nodes to communicate with each other resource "aws_security_group_rule" "node-ingress-self" { description = "Allow node to communicate with each other" from_port = 0 @@ -76,6 +83,7 @@ resource "aws_security_group_rule" "node-ingress-self" { type = "ingress" } +# Allow worker Kubelets and pods to receive communication from the cluster control plane resource "aws_security_group_rule" "node-ingress-cluster" { description = "Allow worker Kubelets and pods to receive communication from the cluster control plane" from_port = 1025 @@ -86,6 +94,7 @@ resource "aws_security_group_rule" "node-ingress-cluster" { type = "ingress" } +# Find the latest Amazon-provided EKS worker node machine image data "aws_ami" "eks-worker" { filter { name = "name" @@ -126,6 +135,7 @@ systemctl restart kubelet USERDATA } +# Create an EC2 launch configuration for the worker nodes resource "aws_launch_configuration" "node" { associate_public_ip_address = true iam_instance_profile = "${aws_iam_instance_profile.node.name}" @@ -140,6 +150,7 @@ resource "aws_launch_configuration" "node" { } } +# Create an EC2 autoscaling group for the worker nodes resource "aws_autoscaling_group" "cluster" { desired_capacity = 2 launch_configuration = "${aws_launch_configuration.node.id}" diff --git a/variables.tf b/variables.tf index 6d613ce..7b93081 100644 --- a/variables.tf +++ b/variables.tf @@ -16,5 +16,5 @@ variable "account_id" { variable "availability_zones" { type = "list" default = ["us-east-1a", "us-east-1b"] - description = "List of 2 availability zones in which to create the cluster" + description = "List of 2 availability zones in which to create the worker nodes" }