Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
Working on EKS
dshafer committed Jul 31, 2018
1 parent 57385d4 commit 8a7a80c
Showing 4 changed files with 32 additions and 1 deletion.
12 changes: 12 additions & 0 deletions README.md
@@ -6,6 +6,18 @@ Adapted from https://github.com/terraform-providers/terraform-provider-aws

For details, see https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html

Configures an AWS account with the following resources:

* EKS Kubernetes API server
* VPC with:
* "10.0.0.0/16" CIDR block
* 2 subnets, each in a different availability zone with:
* "10.0.x.0/24" CIDR block
* Internet gateway
* EC2 autoscaling group for worker nodes, with:
* 1-2 m4.large instances
* 2 subnets

## Variables

### Required
8 changes: 8 additions & 0 deletions eks-cluster.tf
@@ -5,6 +5,7 @@
# * EKS Cluster
#

# Create an IAM role for EKS to manage other AWS resources on our behalf
resource "aws_iam_role" "eksServiceRole" {
name = "terraform-eks-${var.cluster_name}-eksServiceRole"

@@ -24,16 +25,19 @@ resource "aws_iam_role" "eksServiceRole" {
POLICY
}

# Attach Amazon's managed AmazonEKSClusterPolicy to our EKS service role
resource "aws_iam_role_policy_attachment" "AmazonEKSClusterPolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = "${aws_iam_role.eksServiceRole.name}"
}

# Attach Amazon's managed AmazonEKSServicePolicy to our EKS service role
resource "aws_iam_role_policy_attachment" "AmazonEKSServicePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
role = "${aws_iam_role.eksServiceRole.name}"
}

# Create a security group for cluster internal communication
resource "aws_security_group" "cluster" {
name = "terraform-eks-${var.cluster_name}-cluster"
description = "Cluster communication with worker nodes"
@@ -51,6 +55,7 @@ resource "aws_security_group" "cluster" {
}
}

# Allow pods to communicate with the cluster API server
resource "aws_security_group_rule" "cluster-ingress-node-https" {
description = "Allow pods to communicate with the cluster API Server"
from_port = 443
@@ -71,6 +76,7 @@ resource "aws_security_group_rule" "cluster-ingress-node-https" {
# type = "ingress"
#}

# Create an EKS cluster
resource "aws_eks_cluster" "cluster" {
name = "${var.cluster_name}"
role_arn = "${aws_iam_role.eksServiceRole.arn}"
@@ -80,6 +86,8 @@ resource "aws_eks_cluster" "cluster" {
subnet_ids = ["${aws_subnet.cluster.*.id}"]
}

# Ensure the policies are attached to our EKS service role before creating
# the EKS cluster
depends_on = [
"aws_iam_role_policy_attachment.AmazonEKSClusterPolicy",
"aws_iam_role_policy_attachment.AmazonEKSServicePolicy",
11 changes: 11 additions & 0 deletions eks-worker-nodes.tf
@@ -7,6 +7,7 @@
# * AutoScaling Group to launch worker instances
#

# Create an IAM role for worker nodes
resource "aws_iam_role" "node" {
name = "terraform-eks-${var.cluster_name}-node"

@@ -26,26 +27,31 @@ resource "aws_iam_role" "node" {
POLICY
}

# Attach the AWS managed AmazonEKSWorkerNodePolicy to our worker node role
resource "aws_iam_role_policy_attachment" "node-AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = "${aws_iam_role.node.name}"
}

# Attach the AWS managed AmazonEKS_CNI_Policy to our worker node role
resource "aws_iam_role_policy_attachment" "node-AmazonEKS_CNI_Policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = "${aws_iam_role.node.name}"
}

# Attach the AWS managed AmazonEC2ContainerRegistryReadOnly policy to our worker node role
resource "aws_iam_role_policy_attachment" "node-AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = "${aws_iam_role.node.name}"
}

# Create an instance profile with our worker node role attached
resource "aws_iam_instance_profile" "node" {
name = "terraform-eks-${var.cluster_name}"
role = "${aws_iam_role.node.name}"
}

# Create a security group for worker nodes
resource "aws_security_group" "node" {
name = "terraform-eks-${var.cluster_name}-node"
description = "Security group for all nodes in the cluster"
@@ -66,6 +72,7 @@ resource "aws_security_group" "node" {
}"
}

# Allow worker nodes to communicate with each other
resource "aws_security_group_rule" "node-ingress-self" {
description = "Allow node to communicate with each other"
from_port = 0
@@ -76,6 +83,7 @@ resource "aws_security_group_rule" "node-ingress-self" {
type = "ingress"
}

# Allow worker Kubelets and pods to receive communication from the cluster control plane
resource "aws_security_group_rule" "node-ingress-cluster" {
description = "Allow worker Kubelets and pods to receive communication from the cluster control plane"
from_port = 1025
@@ -86,6 +94,7 @@ resource "aws_security_group_rule" "node-ingress-cluster" {
type = "ingress"
}

# Find the latest Amazon-provided EKS worker node machine image
data "aws_ami" "eks-worker" {
filter {
name = "name"
@@ -126,6 +135,7 @@ systemctl restart kubelet
USERDATA
}

# Create an EC2 launch configuration for the worker nodes
resource "aws_launch_configuration" "node" {
associate_public_ip_address = true
iam_instance_profile = "${aws_iam_instance_profile.node.name}"
@@ -140,6 +150,7 @@ resource "aws_launch_configuration" "node" {
}
}

# Create an EC2 autoscaling group for the worker nodes
resource "aws_autoscaling_group" "cluster" {
desired_capacity = 2
launch_configuration = "${aws_launch_configuration.node.id}"
2 changes: 1 addition & 1 deletion variables.tf
@@ -16,5 +16,5 @@ variable "account_id" {
variable "availability_zones" {
type = "list"
default = ["us-east-1a", "us-east-1b"]
description = "List of 2 availability zones in which to create the cluster"
description = "List of 2 availability zones in which to create the worker nodes"
}

0 comments on commit 8a7a80c

Please sign in to comment.