From 1646ad3e4ece440a2951be2e727b229881c0fc1e Mon Sep 17 00:00:00 2001 From: Shayna Atkinson Date: Tue, 12 Dec 2023 10:58:27 -0500 Subject: [PATCH] changes for Fall 2023. Upgraded ansible and other components --- README.md | 8 +-- group_vars/all.yml | 25 +++++---- idp_node.yml | 30 +++++----- roles/common/tasks/users.yml | 2 +- roles/idp/files/shibboleth-idp-stack.yml | 2 +- roles/swarm/tasks/main.yml | 11 ++-- roles/training/files/comanage-match-stack.yml | 9 +-- .../files/comanage-registry-stack.yml | 10 ++-- ssh_bastion.yml | 56 +++++++++++++------ training_nodes.yml | 52 ++++++++--------- 10 files changed, 113 insertions(+), 92 deletions(-) diff --git a/README.md b/README.md index 740a4bc..3a28f97 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,7 @@ cd comanage-registry-training-ansible python3 -m venv . source bin/activate pip install --upgrade pip -pip install ansible==2.10.7 +pip install ansible==5.10.0 pip install boto pip install boto3 ansible-galaxy collection install amazon.aws @@ -94,7 +94,7 @@ Do this each time to run ansible commands or playbooks to set up the environment: ``` -cd comanage-registry-training-deployment +cd comanage-registry-training-ansible source bin/activate export ANSIBLE_CONFIG=`pwd`/ansible.cfg @@ -272,7 +272,7 @@ trainee is expected to create a few secrets. Once successfully deployed, COmanage Registry is available at the URL ``` -https://node11.comanage.incommon.training +https://node1.comanage.incommon.training ``` for node 1, and @@ -306,7 +306,7 @@ used by ansible, it might help to start with a fresh agent when you begin your work for the say: ``` -cd comanage-registry-training-deployment +cd comanage-registry-training-ansible rm ./ssh_mux_* kill $SSH_AGENT_PID unset SSH_AUTH_SOCK diff --git a/group_vars/all.yml b/group_vars/all.yml index 9497375..f423807 100644 --- a/group_vars/all.yml +++ b/group_vars/all.yml @@ -5,11 +5,12 @@ # ansible-vault encrypt_string 'THE_PASSWORD' --name 'comanage_training_password' comanage_training_password: !vault | $ANSIBLE_VAULT;1.1;AES256 - 39336132313462653438613837623964363334316532646639303938353736393365626532363634 - 6664376163323637616463383437356538356438303534340a353630616538643730333330363638 - 30663032323634613537663334613638343031333436333030666131393037316232356539383133 - 3734383064343431340a333431303465346132323332386131613031366537393830633830663137 - 3333 + 37623864383639323431323832323334643964356137333336343434333438396665393563303130 + 3438333666393535633338343261663766373366333431330a656361353362646563383830366564 + 31346161393165363861343735653832383934623962336434656164323239616363393532343366 + 3234366534623766330a313232393062313034646566383230653661653737363161393131623766 + 6365 + # It should not be necessary to change the password salt. comanage_training_password_salt: !vault | $ANSIBLE_VAULT;1.1;AES256 @@ -49,7 +50,7 @@ vpc_availability_zone: ssh_bastion_instance_type: t2.nano # Most current Debian AMD x86_64, see https://wiki.debian.org/Cloud/AmazonEC2Image/ -ssh_bastion_ami_id: ami-0c1b4dff690b5d229 +ssh_bastion_ami_id: ami-0aa49e41c7088702f ssh_bastion_user: admin ssh_bastion_device_name: /dev/xvda ssh_bastion_volume_type: gp2 @@ -57,26 +58,26 @@ ssh_bastion_volume_size: 10 idp_node_instance_type: t2.small # Most current Debian AMD x86_64, see https://wiki.debian.org/Cloud/AmazonEC2Image/ -idp_node_ami_id: ami-0c1b4dff690b5d229 +idp_node_ami_id: ami-0aa49e41c7088702f idp_node_user: admin idp_node_device_name: /dev/xvda idp_node_volume_type: gp2 idp_node_volume_size: 20 -training_node_count: 2 +training_node_count: 30 training_node_instance_type: t2.small # Most current Debian AMD x86_64, see https://wiki.debian.org/Cloud/AmazonEC2Image/ -training_node_ami_id: ami-0c1b4dff690b5d229 +training_node_ami_id: ami-0aa49e41c7088702f training_node_user: admin training_node_device_name: /dev/xvda training_node_volume_type: gp2 training_node_volume_size: 20 # Docker version -docker_ce_package_version: "5:23.0.4-1~debian.11~bullseye" -docker_ce_cli_package_version: "5:23.0.4-1~debian.11~bullseye" -containerd_io_package_version: "1.6.20-1" +docker_ce_package_version: "5:24.0.6-1~debian.12~bookworm" +docker_ce_cli_package_version: "5:24.0.6-1~debian.12~bookworm" +containerd_io_package_version: "1.6.24-1" diff --git a/idp_node.yml b/idp_node.yml index ec9b4f3..e89f91d 100644 --- a/idp_node.yml +++ b/idp_node.yml @@ -26,53 +26,53 @@ register: idp_node_sg - name: Provision COmanage IdP node - amazon.aws.ec2: + amazon.aws.ec2_instance: key_name: "{{ training_node_ssh_key_name }}" - group_id: "{{ idp_node_sg.group_id }}" + security_group: "{{ idp_node_sg.group_id }}" instance_type: "{{ idp_node_instance_type }}" - image: "{{ idp_node_ami_id }}" + image_id: "{{ idp_node_ami_id }}" region: "{{ comanage_training_region }}" - assign_public_ip: no + network: + assign_public_ip: false instance_initiated_shutdown_behavior: stop - monitoring: no + detailed_monitoring: false # We only provision into one subnet since we do not need high # availability for training. vpc_subnet_id: "{{ private_subnet_id_by_az | dictsort | first | last }}" volumes: - device_name: "{{ idp_node_device_name }}" - volume_type: "{{ idp_node_volume_type }}" - volume_size: "{{ idp_node_volume_size }}" - delete_on_termination: yes - instance_tags: + ebs: + volume_type: "{{ idp_node_volume_type }}" + volume_size: "{{ idp_node_volume_size }}" + delete_on_termination: yes + tags: Name: "comanage-idp-node" private_fqdn: "login-private.{{ r53_dns_domain }}" public_fqdn: "login.{{ r53_dns_domain }}" comanage_training: True role: idp - count_tag: - Name: "comanage-idp-node" exact_count: 1 wait: true register: idp_node - name: Build Ansible inventory host group of IdP node add_host: - name: "{{ idp_node.tagged_instances[0].private_ip }}" + name: "{{ idp_node.instances[0].private_ip_address }}" groups: ssh_idp_node_host - name: Create A record entry for IdP node private interface community.aws.route53: state: present zone: "{{ r53_hosted_zone }}" - record: "{{ idp_node.tagged_instances[0].tags.private_fqdn }}" - value: "{{ idp_node.tagged_instances[0].private_ip }}" + record: "{{ idp_node.instances[0].tags.private_fqdn }}" + value: "{{ idp_node.instances[0].private_ip_address }}" type: A ttl: 30 overwrite: yes wait: no - name: Wait for SSH to come up on IdP node - delegate_to: "{{ idp_node.tagged_instances[0].private_ip }}" + delegate_to: "{{ idp_node.instances[0].private_ip_address }}" wait_for_connection: timeout: 300 register: idp_node_ssh_connection diff --git a/roles/common/tasks/users.yml b/roles/common/tasks/users.yml index 5bce572..4767d0d 100644 --- a/roles/common/tasks/users.yml +++ b/roles/common/tasks/users.yml @@ -85,7 +85,7 @@ comment: COmanage Training User uid: 2000 home: /home/training - password: "{{ comanage_training_password | string | password_hash('sha512', comanage_training_password_salt) }}" + password: "{{ comanage_training_password | string | password_hash('sha512') }}" shell: /bin/bash group: training append: yes diff --git a/roles/idp/files/shibboleth-idp-stack.yml b/roles/idp/files/shibboleth-idp-stack.yml index 86f85cf..6f34498 100644 --- a/roles/idp/files/shibboleth-idp-stack.yml +++ b/roles/idp/files/shibboleth-idp-stack.yml @@ -36,7 +36,7 @@ services: tag: "shibboleth-idp_{{.Name}}" ldap: - image: sphericalcowgroup/comanage-registry-slapd:8 + image: sphericalcowgroup/comanage-registry-slapd:20231011 command: ["slapd", "-d", "256", "-h", "ldapi:/// ldap:///", "-u", "openldap", "-g", "openldap"] volumes: - /srv/docker/var/lib/ldap:/var/lib/ldap diff --git a/roles/swarm/tasks/main.yml b/roles/swarm/tasks/main.yml index c071fb9..47381e1 100644 --- a/roles/swarm/tasks/main.yml +++ b/roles/swarm/tasks/main.yml @@ -22,7 +22,7 @@ - name: Add Docker CE repository apt_repository: - repo: deb [arch=amd64] https://download.docker.com/linux/debian bullseye stable + repo: deb [arch=amd64] https://download.docker.com/linux/debian bookworm stable - name: Install Docker apt: @@ -70,11 +70,10 @@ name: python3-pip update_cache: no - - name: Install Python3 docker module - pip: - executable: /usr/bin/pip3 - name: docker - state: present + - name: Install python3 docker module + apt: + name: python3-docker + update_cache: no - name: Initialize single node swarm community.docker.docker_swarm: diff --git a/roles/training/files/comanage-match-stack.yml b/roles/training/files/comanage-match-stack.yml index aae031a..a20b17f 100644 --- a/roles/training/files/comanage-match-stack.yml +++ b/roles/training/files/comanage-match-stack.yml @@ -21,7 +21,7 @@ services: tag: "postgresql-{{.Name}}" match: - image: i2incommon/comanage-match:1.1.0-internet2-tap-1 + image: i2incommon/comanage-match:1.1.1-20230726 sysctls: - net.ipv4.tcp_keepalive_time=60 - net.ipv4.tcp_keepalive_intvl=60 @@ -42,10 +42,11 @@ services: - COMANAGE_MATCH_DATABASE_USER=match_user - COMANAGE_MATCH_DATABASE_USER_PASSWORD_FILE=/run/secrets/comanage_match_database_user_password - COMANAGE_MATCH_EMAIL_TRANSPORT=Smtp - - COMANAGE_MATCH_EMAIL_HOST=tls://smtp.gmail.com - - COMANAGE_MATCH_EMAIL_PORT=465 + - COMANAGE_MATCH_EMAIL_CLASS_NAME=Smtp + - COMANAGE_MATCH_EMAIL_HOST=smtp.gmail.com + - COMANAGE_MATCH_EMAIL_PORT=587 - COMANAGE_MATCH_EMAIL_ACCOUNT=comanagetraining@gmail.com - - COMANAGE_MATCH_EMAIL_ACCOUNT_PASSWORD_FILE=/run/secrets/comanage_registry_email_account_password + - COMANAGE_MATCH_EMAIL_ACCOUNT_PASSWORD=uzpvcdtniaeqdnso - COMANAGE_MATCH_EMAIL_FROM_EMAIL=comanagetraining@gmail.com - COMANAGE_MATCH_EMAIL_FROM_NAME=Match - SHIBBOLETH_SP_ENCRYPT_CERT=/run/secrets/shibboleth_sp_encrypt_cert diff --git a/roles/training/files/comanage-registry-stack.yml b/roles/training/files/comanage-registry-stack.yml index 53c0bc5..c73a62e 100644 --- a/roles/training/files/comanage-registry-stack.yml +++ b/roles/training/files/comanage-registry-stack.yml @@ -2,7 +2,7 @@ version: '3.8' services: database: - image: mariadb:10.4.28 + image: mariadb:10.5 volumes: - /srv/docker/var/lib/mysql:/var/lib/mysql environment: @@ -21,7 +21,7 @@ services: tag: "mariadb-{{.Name}}" campusdatabase: - image: mariadb:10.4.28 + image: mariadb:10.5 volumes: - /srv/docker/var/lib/campussql:/var/lib/mysql environment: @@ -40,7 +40,7 @@ services: tag: "mariadb-{{.Name}}" registry: - image: i2incommon/comanage-registry:4.1.1-20230202 + image: i2incommon/comanage-registry:4.2.1-20230725 volumes: - /srv/docker/srv/comanage-registry/local:/srv/comanage-registry/local - /srv/docker/etc/shibboleth/shibboleth2.xml:/etc/shibboleth/shibboleth2.xml @@ -85,7 +85,7 @@ services: tag: "registry_{{.Name}}" cron: - image: i2incommon/comanage-registry-cron:4.1.1-20230202 + image: i2incommon/comanage-registry-cron:4.2.1-20230725 volumes: - /srv/docker/srv/comanage-registry/local:/srv/comanage-registry/local environment: @@ -99,7 +99,7 @@ services: tag: "cron_{{.Name}}" ldap: - image: sphericalcowgroup/comanage-registry-slapd:8 + image: sphericalcowgroup/comanage-registry-slapd:20231011 command: ["slapd", "-d", "256", "-h", "ldapi:/// ldap:///", "-u", "openldap", "-g", "openldap"] volumes: - /srv/docker/var/lib/ldap:/var/lib/ldap diff --git a/ssh_bastion.yml b/ssh_bastion.yml index 536795e..d95005f 100644 --- a/ssh_bastion.yml +++ b/ssh_bastion.yml @@ -23,46 +23,46 @@ # For each public subnet, build a bastion host - name: Provision SSH bastion hosts - amazon.aws.ec2: + amazon.aws.ec2_instance: key_name: "{{ training_node_ssh_key_name }}" - group_id: "{{ bastion_ssh_security_group.group_id }}" + security_group: "{{ bastion_ssh_security_group.group_id }}" instance_type: "{{ ssh_bastion_instance_type }}" - image: "{{ ssh_bastion_ami_id }}" + image_id: "{{ ssh_bastion_ami_id }}" wait: true region: "{{ comanage_training_region }}" - assign_public_ip: yes + network: + assign_public_ip: true + private_ip: "{{ item.item.value.bastion_ip }}" instance_initiated_shutdown_behavior: stop - monitoring: no + detailed_monitoring: false vpc_subnet_id: "{{ item.subnet.id }}" - private_ip: "{{ item.item.value.bastion_ip }}" volumes: - device_name: "{{ ssh_bastion_device_name }}" - volume_type: "{{ ssh_bastion_volume_type }}" - volume_size: "{{ ssh_bastion_volume_size }}" - delete_on_termination: yes - instance_tags: + ebs: + volume_type: "{{ ssh_bastion_volume_type }}" + volume_size: "{{ ssh_bastion_volume_size }}" + delete_on_termination: yes + tags: Name: "comanage_training_{{ item.item.value.bastion_hostname }}" public_fqdn: "{{ item.item.value.bastion_hostname }}.{{ r53_dns_domain }}" private_fqdn: "{{ item.item.value.bastion_hostname }}.{{ r53_dns_domain }}" comanage_training: True role : bastion - count_tag: - Name: "comanage_training_{{ item.item.value.bastion_hostname }}" exact_count: 1 register: bastion loop: "{{ subnet_public.results }}" - name: List EC2 instance ID information debug: - msg: "{{ item.tagged_instances[0].id }}" + msg: "{{ item.instances[0].instance_id }}" loop: "{{ bastion.results }}" - name: Create CNAME entries for bastion hosts community.aws.route53: state: present zone: "{{ r53_hosted_zone }}" - record: "{{ item.tagged_instances[0].tags.public_fqdn }}" - value: "{{ item.tagged_instances[0].public_dns_name }}" + record: "{{ item.instances[0].tags.public_fqdn }}" + value: "{{ item.instances[0].public_dns_name }}" type: CNAME ttl: 30 overwrite: yes @@ -71,7 +71,7 @@ - name: Build Ansible inventory host group of bastions add_host: - name: "{{ item.tagged_instances[0].public_dns_name }}" + name: "{{ item.instances[0].public_dns_name }}" groups: ssh_bastion_hosts loop: "{{ bastion.results }}" @@ -83,11 +83,11 @@ - name: Build bastion_internal_ip from bastion host list set_fact: - bastion_internal_ip: "{{ bastion_internal_ip | default([]) + [item.tagged_instances[0].private_ip + '/32']}}" + bastion_internal_ip: "{{ bastion_internal_ip | default([]) + [item.instances[0].private_ip_address + '/32']}}" loop: "{{ bastion.results }}" - name: Wait for SSH to come up on SSH bastion hosts - delegate_to: "{{ item.tagged_instances[0].public_dns_name }}" + delegate_to: "{{ item.instances[0].public_dns_name }}" wait_for_connection: timeout: 300 register: bastion_ssh_connections @@ -116,6 +116,26 @@ line: "prepend domain-search \"{{ r53_dns_domain }}\";" register: bastion_domain_config + - name: Configure systemd-resolved service + blockinfile: + path: /etc/systemd/resolved.conf + block: | + Domains={{ r53_dns_domain }} + LLMNR=no + backup: yes + + - name: Configure Name Service Switch + lineinfile: + path: /etc/nsswitch.conf + regexp: "^hosts:" + line: "hosts: files resove dns" + + - name: Restart systemd-resolved service + systemd: + state: restarted + name: systemd-resolved + + - name: Reboot bastion host reboot: when: bastion_domain_config.changed diff --git a/training_nodes.yml b/training_nodes.yml index 1331475..f8cee65 100644 --- a/training_nodes.yml +++ b/training_nodes.yml @@ -30,31 +30,31 @@ register: training_node_sg - name: Provision COmanage training nodes - ec2: + ec2_instance: key_name: "{{ training_node_ssh_key_name }}" - group_id: "{{ training_node_sg.group_id }}" + security_group: "{{ training_node_sg.group_id }}" instance_type: "{{ training_node_instance_type }}" - image: "{{ training_node_ami_id }}" + image_id: "{{ training_node_ami_id }}" region: "{{ comanage_training_region }}" - assign_public_ip: no + network: + assign_public_ip: false instance_initiated_shutdown_behavior: stop - monitoring: no + detailed_monitoring: false # We only provision into one subnet since we do not need high # availability for training. vpc_subnet_id: "{{ private_subnet_id_by_az | dictsort | first | last }}" volumes: - device_name: "{{ training_node_device_name }}" - volume_type: "{{ training_node_volume_type }}" - volume_size: "{{ training_node_volume_size }}" - delete_on_termination: yes - instance_tags: + ebs: + volume_type: "{{ training_node_volume_type }}" + volume_size: "{{ training_node_volume_size }}" + delete_on_termination: yes + tags: Name: "comanage-training-node-{{ item }}" private_fqdn: "node{{ item }}-private.{{ r53_dns_domain }}" public_fqdn: "node{{ item }}.{{ r53_dns_domain }}" comanage_training: True role: comanage_registry - count_tag: - Name: "comanage-training-node-{{ item }}" exact_count: 1 wait: true register: training_nodes @@ -62,7 +62,7 @@ - name: Build Ansible inventory host group of training node hosts add_host: - name: "{{ item.tagged_instances[0].private_ip }}" + name: "{{ item.instances[0].private_ip_address }}" groups: ssh_training_node_hosts loop: "{{ training_nodes.results }}" @@ -70,8 +70,8 @@ route53: state: present zone: "{{ r53_hosted_zone }}" - record: "{{ item.tagged_instances[0].tags.private_fqdn }}" - value: "{{ item.tagged_instances[0].private_ip }}" + record: "{{ item.instances[0].tags.private_fqdn }}" + value: "{{ item.instances[0].private_ip_address }}" type: A ttl: 30 overwrite: yes @@ -79,7 +79,7 @@ loop: "{{ training_nodes.results }}" - name: Wait for SSH to come up on training node hosts - delegate_to: "{{ item.tagged_instances[0].private_ip }}" + delegate_to: "{{ item.instances[0].private_ip_address }}" wait_for_connection: timeout: 300 register: training_nodes_ssh_connections @@ -87,12 +87,12 @@ - name: Create ALB target group for each training node host elb_target_group: - name: "{{ item.tagged_instances[0].tags.Name }}" + name: "{{ item.instances[0].tags.Name }}" protocol: http port: 80 vpc_id: "{{ comanage_training_vpc.vpc.id }}" region: "{{ comanage_training_region }}" - health_check_path: /registry/ + health_check_path: / health_check_interval: 15 health_check_port: traffic-port health_check_protocol: http @@ -100,10 +100,10 @@ successful_response_codes: "200,301,302" unhealthy_threshold_count: 5 targets: - - Id: "{{ item.tagged_instances[0].id }}" + - Id: "{{ item.instances[0].instance_id }}" Port: 80 tags: - Name: "{{ item.tagged_instances[0].tags.Name }}" + Name: "{{ item.instances[0].tags.Name }}" state: present wait: no register: training_nodes_target_groups @@ -111,7 +111,7 @@ - name: Create ALB target group for IdP node elb_target_group: - name: "{{ idp_node.tagged_instances[0].tags.Name }}" + name: "{{ idp_node.instances[0].tags.Name }}" protocol: http port: 8080 vpc_id: "{{ comanage_training_vpc.vpc.id }}" @@ -124,10 +124,10 @@ successful_response_codes: "200" unhealthy_threshold_count: 5 targets: - - Id: "{{ idp_node.tagged_instances[0].id }}" + - Id: "{{ idp_node.instances[0].instance_id }}" Port: 8080 tags: - Name: "{{ idp_node.tagged_instances[0].tags.Name }}" + Name: "{{ idp_node.instances[0].tags.Name }}" state: present wait: no register: idp_node_target_group @@ -146,14 +146,14 @@ - name: Construct rules for application load balancer - training nodes set_fact: - alb_rules: "{{ alb_rules | default([]) | union( [{ 'Conditions': [{'Field': 'host-header', 'Values': [item.tagged_instances[0].tags.public_fqdn]}], 'Priority': my_idx + 1, 'Actions': [{'TargetGroupName': item.tagged_instances[0].tags.Name, 'Type': 'forward'}] }] ) }}" + alb_rules: "{{ alb_rules | default([]) | union( [{ 'Conditions': [{'Field': 'host-header', 'Values': [item.instances[0].tags.public_fqdn]}], 'Priority': my_idx + 1, 'Actions': [{'TargetGroupName': item.instances[0].tags.Name, 'Type': 'forward'}] }] ) }}" loop: "{{ training_nodes.results }}" loop_control: index_var: my_idx - name: Construct rules for application load balancer - idp node set_fact: - alb_rules: "{{ alb_rules | default([]) | union( [{ 'Conditions': [{'Field': 'host-header', 'Values': [idp_node.tagged_instances[0].tags.public_fqdn]}], 'Priority': '100', 'Actions': [{'TargetGroupName': idp_node.tagged_instances[0].tags.Name, 'Type': 'forward'}] }] ) }}" + alb_rules: "{{ alb_rules | default([]) | union( [{ 'Conditions': [{'Field': 'host-header', 'Values': [idp_node.instances[0].tags.public_fqdn]}], 'Priority': '100', 'Actions': [{'TargetGroupName': idp_node.instances[0].tags.Name, 'Type': 'forward'}] }] ) }}" - name: List application load balancer rules debug: @@ -218,7 +218,7 @@ route53: state: present zone: "{{ r53_hosted_zone }}" - record: "{{ idp_node.tagged_instances[0].tags.public_fqdn }}" + record: "{{ idp_node.instances[0].tags.public_fqdn }}" value: "{{ alb.dns_name }}" type: CNAME ttl: 30 @@ -229,7 +229,7 @@ route53: state: present zone: "{{ r53_hosted_zone }}" - record: "{{ item.tagged_instances[0].tags.public_fqdn }}" + record: "{{ item.instances[0].tags.public_fqdn }}" value: "{{ alb.dns_name }}" type: CNAME ttl: 30