Skip to content
Permalink
master
Switch branches/tags

Name already in use

A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
Go to file
 
 
Cannot retrieve contributors at this time
---
- hosts: localhost
connection: local
gather_facts: False
tasks:
- name: Build training_node_rules security group rules - bastion hosts - SSH tcp/22
set_fact:
training_node_rules: "{{ training_node_rules | default([]) | union( [{ 'proto': 'tcp' , 'ports': '22', 'cidr_ip': item, 'rule_desc': 'SSH from bastion'}] ) }}"
loop: "{{ bastion_internal_ip }}"
- name: Build training_node_rules security group rules - ALB port tcp/80
set_fact:
training_node_rules: "{{ training_node_rules | default([]) | union( [{ 'proto': 'tcp' , 'ports': '80', 'cidr_ip': '0.0.0.0/0', 'rule_desc': 'web traffic port 80'}] ) }}"
- name: Build training_node_rules security group rules - ALB port tcp/443
set_fact:
training_node_rules: "{{ training_node_rules | default([]) | union( [{ 'proto': 'tcp' , 'ports': '443', 'cidr_ip': '0.0.0.0/0', 'rule_desc': 'web traffic port 443'}] ) }}"
- name: Security group COmanage training node
ec2_group:
name: "comanage-training--node"
tags:
Name: "comanage-training-node"
description: "COmanage training node"
vpc_id: "{{ comanage_training_vpc.vpc.id }}"
region: "{{ comanage_training_region }}"
rules: "{{ training_node_rules }}"
register: training_node_sg
- name: Provision COmanage training nodes
ec2_instance:
key_name: "{{ training_node_ssh_key_name }}"
security_group: "{{ training_node_sg.group_id }}"
instance_type: "{{ training_node_instance_type }}"
image_id: "{{ training_node_ami_id }}"
region: "{{ comanage_training_region }}"
network:
assign_public_ip: false
instance_initiated_shutdown_behavior: stop
detailed_monitoring: false
# We only provision into one subnet since we do not need high
# availability for training.
vpc_subnet_id: "{{ private_subnet_id_by_az | dictsort | first | last }}"
volumes:
- device_name: "{{ training_node_device_name }}"
ebs:
volume_type: "{{ training_node_volume_type }}"
volume_size: "{{ training_node_volume_size }}"
delete_on_termination: yes
tags:
Name: "comanage-training-node-{{ item }}"
private_fqdn: "node{{ item }}-private.{{ r53_dns_domain }}"
public_fqdn: "node{{ item }}.{{ r53_dns_domain }}"
comanage_training: True
role: comanage_registry
exact_count: 1
wait: true
register: training_nodes
loop: "{{ range(1, lookup('vars', 'training_node_count') + 1, 1) | list }}"
- name: Build Ansible inventory host group of training node hosts
add_host:
name: "{{ item.instances[0].private_ip_address }}"
groups: ssh_training_node_hosts
loop: "{{ training_nodes.results }}"
- name: Create A record entries for private interface for training node hosts
route53:
state: present
zone: "{{ r53_hosted_zone }}"
record: "{{ item.instances[0].tags.private_fqdn }}"
value: "{{ item.instances[0].private_ip_address }}"
type: A
ttl: 30
overwrite: yes
wait: no
loop: "{{ training_nodes.results }}"
- name: Wait for SSH to come up on training node hosts
delegate_to: "{{ item.instances[0].private_ip_address }}"
wait_for_connection:
timeout: 300
register: training_nodes_ssh_connections
loop: "{{ training_nodes.results }}"
- name: Create ALB target group for each training node host
elb_target_group:
name: "{{ item.instances[0].tags.Name }}"
protocol: http
port: 80
vpc_id: "{{ comanage_training_vpc.vpc.id }}"
region: "{{ comanage_training_region }}"
health_check_path: /
health_check_interval: 15
health_check_port: traffic-port
health_check_protocol: http
healthy_threshold_count: 3
successful_response_codes: "200,301,302"
unhealthy_threshold_count: 5
targets:
- Id: "{{ item.instances[0].instance_id }}"
Port: 80
tags:
Name: "{{ item.instances[0].tags.Name }}"
state: present
wait: no
register: training_nodes_target_groups
loop: "{{ training_nodes.results }}"
- name: Create ALB target group for IdP node
elb_target_group:
name: "{{ idp_node.instances[0].tags.Name }}"
protocol: http
port: 8080
vpc_id: "{{ comanage_training_vpc.vpc.id }}"
region: "{{ comanage_training_region }}"
health_check_path: /idp/
health_check_interval: 15
health_check_port: traffic-port
health_check_protocol: http
healthy_threshold_count: 3
successful_response_codes: "200"
unhealthy_threshold_count: 5
targets:
- Id: "{{ idp_node.instances[0].instance_id }}"
Port: 8080
tags:
Name: "{{ idp_node.instances[0].tags.Name }}"
state: present
wait: no
register: idp_node_target_group
- name: Create default target group for ALB
elb_target_group:
name: "comanage-default"
protocol: http
port: 80
vpc_id: "{{ comanage_training_vpc.vpc.id }}"
region: "{{ comanage_training_region }}"
tags:
Name: "comanage-default"
state: present
wait: no
- name: Construct rules for application load balancer - training nodes
set_fact:
alb_rules: "{{ alb_rules | default([]) | union( [{ 'Conditions': [{'Field': 'host-header', 'Values': [item.instances[0].tags.public_fqdn]}], 'Priority': my_idx + 1, 'Actions': [{'TargetGroupName': item.instances[0].tags.Name, 'Type': 'forward'}] }] ) }}"
loop: "{{ training_nodes.results }}"
loop_control:
index_var: my_idx
- name: Construct rules for application load balancer - idp node
set_fact:
alb_rules: "{{ alb_rules | default([]) | union( [{ 'Conditions': [{'Field': 'host-header', 'Values': [idp_node.instances[0].tags.public_fqdn]}], 'Priority': '100', 'Actions': [{'TargetGroupName': idp_node.instances[0].tags.Name, 'Type': 'forward'}] }] ) }}"
- name: List application load balancer rules
debug:
msg: "{{ alb_rules }}"
- name: Security group COmanage training ALB
ec2_group:
name: "comanage-training-alb"
tags:
Name: "comanage-training-alb"
description: "COmanage training ALB"
vpc_id: "{{ comanage_training_vpc.vpc.id }}"
region: "{{ comanage_training_region }}"
rules:
- proto: tcp
ports:
- 80
- 443
cidr_ip: 0.0.0.0/0
register: alb_sg
- name: List application load balancer security group
debug:
msg: "{{ alb_sg }}"
- name: Create application load balancer
elb_application_lb:
name: comanage-training-alb
subnets: "{{ public_subnet_ids }}"
security_groups:
- "{{ alb_sg.group_name }}"
scheme: internet-facing
state: present
listeners:
- Protocol: HTTPS
Port: 443
DefaultActions:
- Type: forward
TargetGroupName: comanage-default
Certificates:
- CertificateArn: "{{ aws_cert_manager_cert_arn }}"
SslPolicy: ELBSecurityPolicy-FS-1-2-2019-08
Rules: "{{ alb_rules }}"
- Protocol: HTTP
Port: 80
DefaultActions:
- Type: redirect
RedirectConfig:
Protocol: HTTPS
Port: "443"
Host: "#{host}"
Path: "/#{path}"
Query: "#{query}"
StatusCode: "HTTP_301"
register: alb
- name: List application load balancer details
debug:
msg: "{{ alb }}"
- name: Create CNAME for IdP
route53:
state: present
zone: "{{ r53_hosted_zone }}"
record: "{{ idp_node.instances[0].tags.public_fqdn }}"
value: "{{ alb.dns_name }}"
type: CNAME
ttl: 30
overwrite: yes
wait: no
- name: Create CNAME for training nodes
route53:
state: present
zone: "{{ r53_hosted_zone }}"
record: "{{ item.instances[0].tags.public_fqdn }}"
value: "{{ alb.dns_name }}"
type: CNAME
ttl: 30
overwrite: yes
wait: no
loop: "{{ training_nodes.results }}"
- name: Refresh inventory to pick up ec2 tags
meta: refresh_inventory
- name: Pause two minutes for training node to come up
ansible.builtin.pause:
minutes: 2
- hosts: tag_role_comanage_registry
become: yes
gather_facts: True
strategy: free
tasks:
- import_role:
name: common
tags:
- training_nodes
- import_role:
name: swarm
tags:
- training_nodes
- import_role:
name: training
tags:
- training_nodes