Skip to content
Permalink
main
Switch branches/tags

Name already in use

A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
Go to file
 
 
Cannot retrieve contributors at this time
<?xml version="1.0" encoding="UTF-8"?>
<!--
Federation metadata signing process for the UK federation.
The following are among the callable targets for this process:
* "ant process.create-aggregates.test" generates the set of unsigned aggregates, and does nothing else
* "ant process.mergetomaster.deferred" takes the deferred branch of the data repository, and merges it to master, ready to create aggregates.
* "ant process.mergetomaster.immediate" takes the immediate branch of the data repository, and merges it to master, ready to create aggregates.
* "ant process.create-aggregates" generates the set of unsigned aggregates from the other files.
* "ant process.sign-aggregates.sign" signs each aggregate and verifies the signatures
* "ant process.collect-mdq" collects the static cache of MDQ responses
* "ant process.publish" sends all files to the metadata distribution servers.
-->
<project default="echoproperties" xmlns:if="ant:if" xmlns:unless="ant:unless">
<!--
*******************************
*** ***
*** P R O P E R T I E S ***
*** ***
*******************************
-->
<!--
When invoking the targets related to the signing process, a production deployment
MUST define the following properties:
* shared.ws.dir - the full path the to the shared workspace that all this will execute under
* sign.uk.keyPassword - password for the pkcs11 keystore
The following properties MUST be provided as arguments when invoking the targets that make use of them:
* jenkins.url.to.trigger.signing - full URL to trigger the Jenkins task responsible for signing
* jenkins.url.to.trigger.publication - full URL to trigger the Jenkins task responsible for publication
-->
<!--
*******************************************
*** ***
*** P R O P E R T Y S O U R C E S ***
*** ***
*******************************************
-->
<!--
os.family
Distinguish between the general kind of operating systems
we could be executing on.
Values: windows, macosx, linux, other.
-->
<condition property="os.family" value="windows">
<os family="windows"/>
</condition>
<condition property="os.family" value="macosx">
<os family="mac"/>
</condition>
<condition property="os.family" value="linux">
<os family="unix" name="Linux"/>
</condition>
<property name="os.family" value="other"/>
<!--
env
Distinguish between different classes of deployment,
e.g., development vs. production.
Values: dev, preprod, prod, prod-old
Defaults to "dev" here, but that can be overridden from the command
line, a Jenkins job, or in build.properties.
-->
<property name="env" value="dev"/>
<!--
External property files.
Pull in properties from external files. Because Ant properties are
immutable, amy properties declared in this file should be regarded as
defaults, and will be overridden by any definitions in the following
locations:
* on the command line
* in a Jenkins job definition
* in any of the external files listed below
The first location specifying a particular property sets the final value.
The build.properties file is not source controlled, and should be used
to override properties for a particular deployment, or to include
credentials.
-->
<property file="build.properties"/>
<property file="${os.family}.properties"/>
<property file="${env}.properties"/>
<property file="default.properties"/>
<!--
*******************************************************
*** ***
*** S A M L O U T P U T P R O P E R T I E S ***
*** ***
*******************************************************
-->
<!-- Default validUntil duration for all generated metadata, in days. -->
<property name="validUntil.default.days" value="21"/>
<!-- Specific validUntil duration for aggregates, in days. -->
<property name="validUntil.aggregate.days" value="${validUntil.default.days}"/>
<!-- Same value, as an ISO 8601 duration. -->
<property name="validUntil.aggregate.duration" value="P${validUntil.aggregate.days}D"/>
<!-- Specific validUntil duration for per-entity metadata, in days. -->
<property name="validUntil.perEntity.days" value="${validUntil.default.days}"/>
<!-- Same value, as an ISO 8601 duration. -->
<property name="validUntil.perEntity.duration" value="P${validUntil.perEntity.days}D"/>
<!-- Default cacheDuration for all generated metadata, as an ISO 8601 duration. -->
<property name="cacheDuration.default.duration" value="PT6H"/>
<!-- Specific cacheDuration for aggregates. -->
<property name="cacheDuration.aggregate.duration" value="${cacheDuration.default.duration}"/>
<!-- Specific cacheDuration for per-entity metadata. -->
<property name="cacheDuration.perEntity.duration" value="${cacheDuration.default.duration}"/>
<!--
***************************************************************
*** ***
*** G I T I N T E G R A T I O N P R O P E R T I E S ***
*** ***
***************************************************************
-->
<property name="git.executable" value="git"/>
<!--
This project is integrated with two others using Git.
The following properties describe where the repositories for those projects are located.
-->
<property name="git.repo.user" value="git"/>
<property name="git.repo.host" value="repo.infr.ukfederation.org.uk"/>
<property name="git.repo.group" value="ukf"/>
<!--
Name each of the project repositories.
-->
<property name="git.repo.project.data" value="ukf-data"/>
<property name="git.repo.project.products" value="ukf-products"/>
<property name="git.repo.project.tooling" value="ukf-meta"/>
<property name="git.repo.project.tooling.config" value="ukf-meta-config"/>
<!--
Build access URLs for each repository.
-->
<property name="git.repo.data.url" value="${git.repo.user}@${git.repo.host}:${git.repo.group}/${git.repo.project.data}"/>
<property name="git.repo.products.url" value="${git.repo.user}@${git.repo.host}:${git.repo.group}/${git.repo.project.products}"/>
<property name="git.repo.tooling.url" value="${git.repo.user}@${git.repo.host}:${git.repo.group}/${git.repo.project.tooling}"/>
<property name="git.repo.tooling.config.url" value="${git.repo.user}@${git.repo.host}:${git.repo.group}/${git.repo.project.tooling.config}"/>
<!--
*********************************************
*** ***
*** S E R V E R P R O P E R T I E S ***
*** ***
*********************************************
-->
<!--
Metadata Distribution Service server properties.
-->
<property name="md.user" value="mdscp"/>
<property name="md.dist.host-ne-01.name" value="md-ne-01.infr.ukfederation.org.uk"/>
<property name="md.dist.host-ne-02.name" value="md-ne-02.infr.ukfederation.org.uk"/>
<property name="md.dist.host-we-01.name" value="md-we-01.infr.ukfederation.org.uk"/>
<property name="md.dist.host-we-02.name" value="md-we-02.infr.ukfederation.org.uk"/>
<property name="mdq.dist.name" value="mdq.ukfederation.org.uk"/>
<property name="md.dist.path.name" value="/"/>
<!--
Middlebox server properties.
-->
<property name="orchestrator.user" value="jenkins"/>
<property name="orchestrator.hostname" value="orchestrator.infr.ukfederation.org.uk"/>
<property name="orchestrator.path" value="${shared.ws.dir}/build"/>
<property name="orchestrator.url" value="${orchestrator.user}@${orchestrator.hostname}:${orchestrator.path}"/>
<!--
Signing server properties.
-->
<property name="keymaster.user" value="jenkinsslave"/>
<property name="keymaster.hostname" value="keymaster.infr.ukfederation.org.uk"/>
<property name="keymaster.path" value="${shared.ws.dir}/build"/>
<property name="keymaster.url" value="${keymaster.user}@${keymaster.hostname}:${keymaster.path}"/>
<!--
Web server properties.
-->
<property name="www.user" value="wwwscp"/>
<property name="www.hostname" value="www-ne-01.infr.ukfederation.org.uk"/>
<property name="www.path.stats" value="/var/www/www-prod/html/fed"/>
<property name="www.path.members" value="/var/www/externals"/>
<property name="www.url.stats" value="${www.user}@${www.hostname}:${www.path.stats}"/>
<property name="www.url.members" value="${www.user}@${www.hostname}:${www.path.members}"/>
<!--
Repo properties.
-->
<property name="repo.user" value="mdscp"/>
<property name="repo.hostname" value="repo.infr.ukfederation.org.uk"/>
<property name="repo.path" value="/tmp"/>
<property name="repo.url" value="${repo.user}@${repo.hostname}:${repo.path}"/>
<!--
*******************************************************
*** ***
*** F I L E S Y S T E M P R O P E R T I E S ***
*** ***
*******************************************************
-->
<!--
Shared workspace location.
The property shared.ws.dir must be defined in order to locate the shared
workspace used for inputs to and outputs from the tooling. In production,
this location is passed through from the Jenkins job. In the dev environment,
it is assumed (by dev.properties) to be in this repository's parent directory.
-->
<fail unless="shared.ws.dir" message="shared.ws.dir was not defined"/>
<!--
There must be a build directory in the shared workspace.
-->
<fail message="shared workspace ${shared.ws.dir} doesn't contain a build directory">
<condition>
<not>
<available file="${shared.ws.dir}/build" type="dir"/>
</not>
</condition>
</fail>
<!--
External specialised directories.
-->
<property name="aggregates.dir" value="${shared.ws.dir}/${git.repo.project.products}/aggregates"/>
<property name="blocklists.dir" value="${shared.ws.dir}/${git.repo.project.data}/blocklists"/>
<property name="edugain.dir" value="${shared.ws.dir}/${git.repo.project.data}/edugain"/>
<property name="entities.dir" value="${shared.ws.dir}/${git.repo.project.data}/entities"/>
<property name="members.dir" value="${shared.ws.dir}/${git.repo.project.data}/members"/>
<property name="output.dir" value="${shared.ws.dir}/build"/>
<property name="mdq.output.dir" value="${shared.ws.dir}/build/mdq"/>
<property name="temp.dir" value="${shared.ws.dir}/build"/>
<!--
Local specialised directories.
-->
<property name="build.dir" value="${basedir}/build"/>
<property name="mdx.dir" value="${basedir}/mdx"/>
<property name="edugain.dir" value="${mdx.dir}/int_edugain"/>
<property name="rules.dir" value="${mdx.dir}/_rules"/>
<property name="utilities.dir" value="${basedir}/utilities"/>
<property name="githook.dir" value="${utilities.dir}/githooks"/>
<!--
Location of externally supplied tool bundles.
-->
<property name="tools.dir" value="tools"/>
<property name="tools.ant" value="${tools.dir}/ant"/>
<property name="tools.mda" value="${tools.dir}/aggregator-cli-0.9.2"/>
<property name="tools.mdnorm" value="${tools.dir}/mdnorm"/>
<property name="tools.slacktee" value="${tools.dir}/slacktee"/>
<property name="tools.xmlsectool" value="${tools.dir}/xmlsectool-2.0.0"/>
<!--
Location of configuration for externally supplied tool bundles.
-->
<property name="tools.config.dir" value="${shared.ws.dir}/${git.repo.project.tooling.config}/tools"/>
<property name="tools.slacktee.config" value="${tools.config.dir}/slacktee"/>
<!--
Full path to a commonly used temporary file.
-->
<property name="temp.xml" value="${temp.dir}/temp.xml"/>
<!--
Lockfile (the signing process should only be allowed to begin if this is not present)
-->
<property name="lockfile" value="${shared.ws.dir}/lockfile"/>
<!--
There are many separate processing "streams": production, test, export,
fallback, "wayf", "cdsall" and "wugen".
Each stream has its own unsigned file, as follows:
-->
<property name="mdaggr.prod.unsigned" value="ukfederation-metadata-unsigned.xml"/>
<property name="mdaggr.test.unsigned" value="ukfederation-test-unsigned.xml"/>
<property name="mdaggr.export.unsigned" value="ukfederation-export-unsigned.xml"/>
<property name="mdaggr.export.preview.unsigned"
value="ukfederation-export-preview-unsigned.xml"/>
<property name="mdaggr.back.unsigned" value="ukfederation-back-unsigned.xml"/>
<property name="mdaggr.wayf.unsigned" value="ukfederation-wayf-unsigned.xml"/>
<property name="mdaggr.cdsall.unsigned" value="ukfederation-cdsall-unsigned.xml"/>
<property name="mdaggr.wugen.unsigned" value="ukfederation-wugen-unsigned.xml"/>
<!--
Each stream also has its own signed file, as follows:
-->
<property name="mdaggr.prod.signed" value="ukfederation-metadata.xml"/>
<property name="mdaggr.test.signed" value="ukfederation-test.xml"/>
<property name="mdaggr.export.signed" value="ukfederation-export.xml"/>
<property name="mdaggr.export.preview.signed"
value="ukfederation-export-preview.xml"/>
<property name="mdaggr.back.signed" value="ukfederation-back.xml"/>
<property name="mdaggr.wayf.signed" value="ukfederation-wayf.xml"/>
<property name="mdaggr.cdsall.signed" value="ukfederation-cdsall.xml"/>
<property name="mdaggr.wugen.signed" value="ukfederation-wugen.xml"/>
<!--
Other files
-->
<property name="mdq.cache" value="mdqcache.tar.gz"/>
<property name="mdaggr.stats" value="ukfederation-stats.html"/>
<property name="post-receive-githook" value="post-receive"/>
<property name="md.githook.path" value="/var/git/ukf-products/.git/hooks" />
<property name="mdaggr.discofeed.filtered" value="discofeed.json"/>
<property name="mdaggr.discofeed.all" value="discofeed-all.json"/>
<property name="md.discofeed.path" value="/tmp" />
<!--
*************************************************
*** ***
*** M I S C E L L A N E O U S S E T U P ***
*** ***
*************************************************
-->
<!--
Additional ANT task definitions.
-->
<taskdef resource="net/sf/antcontrib/antlib.xml">
<classpath>
<pathelement location="${tools.ant}/lib/ant-contrib-1.0b3.jar"/>
</classpath>
</taskdef>
<!--
Java memory requirements.
This is used as the max heap setting for all Java invocations. This amount will
be more than some invocations require, but there's no harm in having a higher
limit for all of them.
Note that this value is often overridden in production contexts (see, for example,
prod.properties) to give the production environment extra headroom. The value is
kept lower here so that we'll get warnings in development before the production
environment is affected.
The overrides for production environments should be updated in step with the
value here whenever this is necessary.
-->
<property name="java.max.memory" value="1536m"/>
<!--
*************************************************
*** ***
*** E N T R Y P O I N T T A R G E T S ***
*** ***
*************************************************
-->
<!--
Stage 0 of md process: test creation of aggregates, looking for errors.
Uses deferred branch.
Assumes you've run a git.pull.all first, as this target doesn't do that.
Runs on: aggr
Process:
* Git: Checkout deferred branch
* SAML MD: Run UK Verify flow
-->
<target name="process.create-aggregates.test.deferredbranch" depends="
git.data.deferredbranch.checkout,
samlmd.aggregates.generate.dry-run">
<echo>Test aggregates built successfully from deferred branch.</echo>
</target>
<!--
Stage 0 of md process: test creation of aggregates, looking for errors.
Uses immediate branch.
Assumes you've run a git.pull.all first, as this target doesn't do that.
Runs on: aggr
Process:
* Git: Checkout immediate branch
* SAML MD: Run UK Verify flow
-->
<target name="process.create-aggregates.test.immediatebranch" depends="
git.data.immediatebranch.checkout,
samlmd.aggregates.generate.dry-run">
<echo>Test aggregates built successfully from immediate branch.</echo>
</target>
<!--
Stage 0 of md process: test creation of aggregates, looking for errors.
Uses master branch.
Assumes you've run a git.pull.all first, as this target doesn't do that.
Runs on: aggr
Process:
* Git: Checkout master branch
* SAML MD: Run UK Verify flow
-->
<target name="process.create-aggregates.test.masterbranch" depends="
git.data.masterbranch.checkout,
samlmd.aggregates.generate.dry-run">
<echo>Test aggregates built successfully from master branch.</echo>
</target>
<!--
Stage 1 (normal process) of md process: merge the updates into master
Runs on: aggr
Process:
* Lock: Check for presence of lockfile, if not present, create it
* API: Pause the API
* FS: Make sure output directory is clear
* Git: Make sure all repos up to date
* Git: Merge deferred branch into immediate branch
* Git: Merge immediate branch into master branch
-->
<target name="process.mergetomaster.deferred" depends="
lock.check,
lock.lock,
api.pause,
fs.clear.outputdir,
git.pull.all,
git.data.merge.deferredintoimmediate,
git.data.merge.immediateintomaster">
<echo>Stage 1 (normal) Success: Lockfile created, API paused, deferred branch merged into immediate and then into master.</echo>
</target>
<!--
Stage 1 (emergency change process) of md process: merge the updates into master
Runs on: aggr
Process:
* Lock: Check for presence of lockfile, if not present, create it
* API: Pause the API
* FS: Make sure output directory is clear
* Git: Make sure all repos up to date
* Git: Merge immediate tree into master
-->
<target name="process.mergetomaster.immediate" depends="
lock.check,
lock.lock,
api.pause,
fs.clear.outputdir,
git.pull.all,
git.data.merge.immediateintomaster">
<echo>Stage 1 (emergency) Success: Lockfile created, API paused, immediate branch merged into master.</echo>
</target>
<!--
Stage 1 of md process: manually triggered re-aggregation, signing & publication (without first merging any working branch).
Runs on: aggr
Process:
* Lock: Check for presence of lockfile, if not present, create it
* API: Pause the API
* FS: Make sure output directory is clear
* Git: Make sure all repos up to date
* Git: Merge immediate tree into master
-->
<target name="process.manual.retrigger" depends="
lock.check,
lock.lock,
fs.clear.outputdir,
api.pause,
git.pull.all">
<echo>Stage 1 (manual) Success: Lockfile created, API paused.</echo>
</target>
<!--
Stage 2 of md process: generate the unsigned aggregates
Runs on: aggr
Process:
* API: Unpause
* Git: Make sure we're on the master branch
* SAML MD: Generate unsigned aggregates
* Scp: Copy unsigned aggregates to orchestrator
* Jenkins: Trigger job on ochestrator to start signing process
-->
<target name="process.create-aggregates" depends="
api.unpause,
git.data.masterbranch.checkout,
samlmd.aggregates.generate,
fs.scp.unsigned.files.to.orchestrator,
jenkins.triggerjob.signing">
<echo>Stage 2 Success: Unsigned aggregates created, copied to orchestrator. Message sent to start signing.</echo>
</target>
<!--
Stage 3.0 of md process: create the signed aggregates / clear output dir
Runs on: keymaster
Process:
* FS: Clear outputdir
-->
<target name="process.sign-aggregates.clear.keymaster" depends="
fs.clear.outputdir">
<echo>Stage 3.0 Success: Output directory on keymaster cleared.</echo>
</target>
<!--
Stage 3.1 of md process: create the signed aggregates / update and copy to keymaster
Runs on: orchestrator
Process:
* Git: Make sure all repos up to date
* Git: Push tooling repos to keymaster
* SCP: Copy files to keymaster
-->
<target name="process.sign-aggregates.prepare.and.scp" depends="
git.pull.all,
git.orchestrator.push.tooling.to.keymaster,
fs.scp.unsigned.files.to.keymaster">
<echo>Stage 3.1 Success: Aggregates send to keymaster for processing.</echo>
</target>
<!--
Stage 3.2 of md process: create the signed aggregates / sign
Runs on: keymaster
Process:
* SAML MD: Sign aggregates
* SAML MD: Verify signed aggregates
-->
<target name="process.sign-aggregates.sign" depends="
samlmd.aggregates.sign,
samlmd.aggregates.verify">
<echo>Stage 3.2 Success: Aggregates signed and verified".</echo>
</target>
<!--
Stage 4.1 of md process: Create the cache of MDQ responses, tar the whole lot.
DEPRECATED because mdq.createcache uses both MDA and xmlsectool
Runs on: keymaster
Process:
* MDQ: Create mdq cache.
* Tar: Tar the cache up into a single file
-->
<target name="process.create-mdq-cache.create.and.tar" depends="
mdq.createcache,
fs.tar.mdqcache">
<echo>Stage 4 Success: MDQ cache created; all files comitted to data repository.</echo>
</target>
<!--
Stage 4.1.1 of md process: sign fragment files with MDA
Runs on: keymaster
-->
<target name="process.create-mdq-cache.sign.fragments" depends="samlmd.mdq.sign.fragments">
<echo>Stage 4.1.1 Success: MDQ fragments signed</echo>
</target>
<!--
Stage 4.1.2 of md process: sign MDQ all entities with xmlsectool
Runs on: keymaster
-->
<target name="process.create-mdq-cache.sign.all.entities" depends="samlmd.mdq.sign.all.entities">
<echo>Stage 4.1.2 Success: MDQ all entities file signed</echo>
</target>
<!--
Stage 4.1.3 of md process: create tarfile of files for MDQ
Runs on: keymaster
-->
<target name="process.create-mdq-cache.tarfile" depends="fs.tar.mdqcache">
<echo>Stage 4.1.3 Success: Created tarfile of MDQ fragments and all entities</echo>
</target>
<!--
Stage 4.2 of md process: Copy files from keymaster, push.
Runs on: orchestrator
Process:
* SCP: Copy files from keymaster
* FS: Copy other files from output dir into aggregates.dir so it'll get checked in
* Git: Add newly created files
* Git: Commit
-->
<target name="process.create-mdq-cache.scp.and.push" depends="
fs.scp.signed.files.from.keymaster,
fs.scp.mdqcache.from.keymaster,
fs.cp.other.files.to.aggregates.dir,
git.products.addallnewfiles,
git.products.commit.signed">
<echo>Stage 4.2 Success: Signed aggregates and stats file comitted to data repository.</echo>
</target>
<!--
Stage 5 of md process: Create Tag on products repository, push all to repo (incl. SCPing mdq cache)
Runs on: orchestrator
* Git: Create New Tag
* Git: Push to origin
* SCP: Copy mdq cache to repo
* Jenkins: Trigger publish task
-->
<target name="process.bagandtag" depends="
git.products.masterbranch.pushtoorigin,
git.products.createtagandpushtoorigin,
fs.scp.mdqcache.to.repo,
jenkins.triggerjob.publish">
<echo>Stage 5 Success: Master branch pushed to origin, new tag created and pushed, mdq cache sent to repo, message sent to start publication.</echo>
</target>
<!--
Stage 6 of md process: publish
Runs on: aggr
* SCP: Get mdq cache from repo
* Git: Make sure repos are up to date
* Git: Merge master branch into immediate
* Git: Merge immediate branch into deferred
* Git/SCP: Files to backend md servers
* SAML MD: Verify remote MD.
* Azure: Send purge to CDN
* Git: Make sure we're on master branch (to calculate git commits)
* Slack: Send notification to UKf channel
-->
<target name="process.publish" depends="
fs.scp.mdqcache.from.repo,
git.pull.all,
git.data.merge.masterintoimmediate,
git.data.merge.immediateintodeferred,
git.data.allbranches.pushtoorigin,
scp.githook,
publish.mdqcache,
publish.json,
publish.md,
publish.otherfiles,
samlmd.aggregates.verify.remote,
samlmd.mdq.verify.remote,
azure.purgecdn,
lock.unlock,
git.data.masterbranch.checkout,
slack.notify.publication.success">
<echo>Stage 6 Success: Aggregates and MDQ cache pushed and verified.</echo>
</target>
<!--
Wrapper target to perform periodic status check on embedded certificates.
Uses deferred branch.
Assumes you've run a git.pull.all first, as this target doesn't do that.
Runs on: aggr
Process:
* Git: Checkout deferred branch
* check embedded certificates
-->
<target name="process.check.embedded.deferredbranch" depends="
git.data.deferredbranch.checkout,
check.embedded">
<echo>Checked embedded certificates.</echo>
</target>
<!--
***************************************************
*** ***
*** A P I H A N D L I N G T A R G E T S ***
*** ***
***************************************************
-->
<target name="api.pause">
<exec executable="echo" failonerror="true">
<arg value="'API pause not yet implemented. This is not a failure, other than a moral one.'"/>
</exec>
</target>
<target name="api.unpause">
<exec executable="echo" failonerror="true">
<arg value="'API unpause not yet implemented. This is not a failure, other than a moral one.'"/>
</exec>
</target>
<!--
*****************************************
*** ***
*** L O C K I N G T A R G E T S ***
*** ***
*****************************************
-->
<target name="lock.check">
<echo>Checking for presence of lockfile...</echo>
<fail message="-> Lockfile present! Aborting.">
<condition>
<available file="${lockfile}"/>
</condition>
</fail>
<echo>-> No lockfile, continuing...</echo>
</target>
<target name="lock.lock">
<touch file="${lockfile}"/>
</target>
<target name="lock.unlock">
<delete file="${lockfile}"/>
</target>
<!--
*********************************
*** ***
*** G I T T A R G E T S ***
*** ***
*********************************
-->
<!--
Full hard reset of all repositories
-->
<target name="git.hardreset.all">
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="master"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="fetch"/>
<arg value="origin"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="reset"/>
<arg value="--hard"/>
<arg value="origin/master"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="immediate"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="fetch"/>
<arg value="origin"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="reset"/>
<arg value="--hard"/>
<arg value="origin/immediate"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="deferred"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="fetch"/>
<arg value="origin"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="reset"/>
<arg value="--hard"/>
<arg value="deferred"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.products}" failonerror="true">
<arg value="fetch"/>
<arg value="origin"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.products}" failonerror="true">
<arg value="reset"/>
<arg value="--hard"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.tooling}" failonerror="true">
<arg value="fetch"/>
<arg value="origin"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.tooling}" failonerror="true">
<arg value="reset"/>
<arg value="--hard"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.tooling.config}" failonerror="true">
<arg value="fetch"/>
<arg value="origin"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.tooling.config}" failonerror="true">
<arg value="reset"/>
<arg value="--hard"/>
</exec>
<echo>All branches on all repositories, reset HARD.</echo>
</target>
<!--
Full clean of all repositories
-->
<target name="git.clean.all">
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="master"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="clean"/>
<arg value="-xdf"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="immediate"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="clean"/>
<arg value="-xdf"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="deferred"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="clean"/>
<arg value="-xdf"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.products}" failonerror="true">
<arg value="fetch"/>
<arg value="origin"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.products}" failonerror="true">
<arg value="clean"/>
<arg value="-xdf"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.tooling}" failonerror="true">
<arg value="fetch"/>
<arg value="origin"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.tooling}" failonerror="true">
<arg value="clean"/>
<arg value="-xdf"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.tooling.config}" failonerror="true">
<arg value="clean"/>
<arg value="-xdf"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.tooling.config}" failonerror="true">
<arg value="clean"/>
<arg value="-xdf"/>
</exec>
<echo>All branches on all repositories, cleaned.</echo>
</target>
<!--
Updates all of the local main repositories (all branches) from the origin.
-->
<target name="git.pull.all">
<echo>Pulling the latest state from all Git repositories (all branches).</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="master"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="pull"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="immediate"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="pull"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="deferred"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="pull"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.products}" failonerror="true">
<arg value="pull"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.tooling}" failonerror="true">
<arg value="pull"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.tooling.config}" failonerror="true">
<arg value="pull"/>
</exec>
</target>
<!--
Merges deferred branch into immediate branch of data repo.
-->
<target name="git.data.merge.deferredintoimmediate">
<echo>Merging deferred branch into immediate branch of data repository.</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="immediate"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="merge"/>
<arg value="deferred"/>
<arg value="--ff-only"/>
<arg value="-m"/>
<arg value="Merge deferred branch into immediate branch"/>
</exec>
</target>
<!--
Merges immediate branch into master branch of data repo
-->
<target name="git.data.merge.immediateintomaster">
<echo>Merging immediate branch into master branch of data repository.</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="master"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="merge"/>
<arg value="immediate"/>
<arg value="--ff-only"/>
<arg value="-m"/>
<arg value="Merge immediate branch into master branch"/>
</exec>
</target>
<!--
Merges master branch into immediate branch of data repo
-->
<target name="git.data.merge.masterintoimmediate">
<echo>Merging master branch into immediate branch of data repository.</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="immediate"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="merge"/>
<arg value="master"/>
<arg value="--ff-only"/>
<arg value="-m"/>
<arg value="Merge master branch into immediate branch"/>
</exec>
</target>
<!--
Merges immediate branch into deferred branch of data repo
-->
<target name="git.data.merge.immediateintodeferred">
<echo>Merging immediate branch into deferred branch of data repository.</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="deferred"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="merge"/>
<arg value="immediate"/>
<arg value="-m"/>
<arg value="Merge immedate branch into deferred branch"/>
</exec>
</target>
<!--
Merges master branch into deferred branch of data repo
-->
<target name="git.data.merge.masterintodeferred">
<echo>Merging master branch into deferred branch of data repository.</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="deferred"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="merge"/>
<arg value="master"/>
<arg value="--ff-only"/>
<arg value="-m"/>
<arg value="Merge master branch into deferred branch"/>
</exec>
</target>
<!--
Merges deferred branch into master branch of data repo
-->
<target name="git.data.merge.deferredintomaster">
<echo>Merging deferred branch into master branch of data repository.</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="master"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="merge"/>
<arg value="deferred"/>
<arg value="--ff-only"/>
<arg value="-m"/>
<arg value="Merge deferred branch into master branch"/>
</exec>
</target>
<!--
Checks out master branch of data repository
-->
<target name="git.data.masterbranch.checkout">
<echo>Switching to master branch in data repository.</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="master"/>
</exec>
</target>
<!--
Checks out deferred branch of data repository
-->
<target name="git.data.deferredbranch.checkout">
<echo>Switching to deferred branch in data repository.</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="deferred"/>
</exec>
</target>
<!--
Checks out immediate branch of data repository
-->
<target name="git.data.immediatebranch.checkout">
<echo>Switching to immediate branch in data repository.</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="immediate"/>
</exec>
</target>
<!--
Push all branches of data repo to the origin
-->
<target name="git.data.allbranches.pushtoorigin">
<echo>Pushing all branches of data repository to origin</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="master"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="push"/>
<arg value="-u"/>
<arg value="origin"/>
<arg value="master"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="deferred"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="push"/>
<arg value="-u"/>
<arg value="origin"/>
<arg value="deferred"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="immediate"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="push"/>
<arg value="-u"/>
<arg value="origin"/>
<arg value="immediate"/>
</exec>
</target>
<!--
Push master branch of products repo to the origin
-->
<target name="git.products.masterbranch.pushtoorigin">
<echo>Pushing master branch of products repository to origin</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.products}" failonerror="true">
<arg value="checkout"/>
<arg value="master"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.products}" failonerror="true">
<arg value="push"/>
<arg value="-u"/>
<arg value="origin"/>
<arg value="master"/>
</exec>
</target>
<!--
Push master branch of data repo to the origin
-->
<target name="git.products.databranch.pushtoorigin">
<echo>Pushing master branch of data repository to origin</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="master"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="push"/>
<arg value="-u"/>
<arg value="origin"/>
<arg value="master"/>
</exec>
</target>
<!--
Push immediate branch of data repo to the origin
-->
<target name="git.data.immediatebranch.pushtoorigin">
<echo>Pushing immediate branch of data repository to origin</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="immediate"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="push"/>
<arg value="-u"/>
<arg value="origin"/>
<arg value="immediate"/>
</exec>
</target>
<!--
Push deferred branch of data repo to the origin
-->
<target name="git.data.deferredbranch.pushtoorigin">
<echo>Pushing deferred branch of data repository to origin</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="checkout"/>
<arg value="deferred"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.data}" failonerror="true">
<arg value="push"/>
<arg value="-u"/>
<arg value="origin"/>
<arg value="deferred"/>
</exec>
</target>
<!--
Add any new files in /aggregates of products repository to working set
-->
<target name="git.products.addallnewfiles">
<echo>Adding all new files in aggregates/ directory of products repository into Git working set.</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.products}" failonerror="true">
<arg value="add"/>
<arg value="aggregates/"/>
</exec>
</target>
<!--
Commit unsigned files to local products repository
-->
<target name="git.products.commit.unsigned">
<echo>Commiting all changes in products repository.</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.products}" failonerror="true">
<arg value="commit"/>
<arg value="-m"/>
<arg value="Commit new unsigned files into products repo"/>
</exec>
</target>
<!--
Commit signed files to local data repository (release branch):
-->
<target name="git.products.commit.signed">
<echo>Commiting all changes in products repository.</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.products}" failonerror="true">
<arg value="commit"/>
<arg value="-m"/>
<arg value="Commit new signed files into products repo"/>
</exec>
</target>
<!--
Creates a new tag on the master branch
-->
<target name="git.products.createtagandpushtoorigin">
<echo>Creating new Tag in master branch of products repository.</echo>
<tstamp>
<format property="DATE_UTC" pattern="yyyy-MM-dd" locale="UTC"/>
</tstamp>
<tstamp>
<format property="TIME_UTC" pattern="HH-mm" locale="UTC"/>
</tstamp>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.products}" failonerror="true">
<arg value="tag"/>
<arg value="-a"/>
<arg value="UKf_${DATE_UTC}_${TIME_UTC}"/>
<arg value="-m"/>
<arg value="'UK federation publication - ${DATE_UTC} ${TIME_UTC}'"/>
</exec>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.products}" failonerror="true">
<arg value="push"/>
<arg value="origin"/>
<arg value="UKf_${DATE_UTC}_${TIME_UTC}"/>
</exec>
</target>
<!--
Commit unsigned files to local products repository
-->
<target name="git.orchestrator.push.tooling.to.keymaster">
<echo>Pushing the latest tooling repository to keymaster.</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.tooling}" failonerror="true">
<arg value="push"/>
<arg value="keymaster"/>
<arg value="master"/>
</exec>
</target>
<!--
*****************************************
*** ***
*** J E N K I N S T A R G E T S ***
*** ***
*****************************************
-->
<!--
Does a HTTP Get on the URL that triggers the Jenkins signing job, but only if one is not currently in progress.
-->
<target name="jenkins.triggerjob.signing">
<echo>Triggering Jenkins signing job</echo>
<get src="${jenkins.url.to.trigger.signing}" dest="${temp.dir}/get.out"/>
</target>
<!--
Does a HTTP Get on the URL that triggers the Jenkins publication job.
-->
<target name="jenkins.triggerjob.publish">
<echo>Triggering Jenkins publication Job.</echo>
<get src="${jenkins.url.to.trigger.publication}" dest="${temp.dir}/get.out"/>
</target>
<!--
*************************************
*** ***
*** S T A T S T A R G E T S ***
*** ***
*************************************
-->
<!--
Sync the logfiles from the backend servers into a central location
-->
<target name="stats.sync">
<echo>Updating local stats cache</echo>
<exec executable="bash" failonerror="true">
<arg value="${utilities.dir}/stats-sync.sh"/>
</exec>
</target>
<!--
Runs the stats-generate script for yesterday
-->
<target name="stats.daily">
<echo>Triggering daily stats job</echo>
<exec executable="bash" failonerror="true" outputproperty="stats.daily.output">
<arg value="${utilities.dir}/stats-generate.sh"/>
<arg value="day"/>
</exec>
<SLACK.send conf="${tools.slacktee.config}/repo-jenkins.conf" colour="good" channel="ukf-stats"
message="${stats.daily.output}"/>
</target>
<!--
Runs the stats-generate script for the previous month
-->
<target name="stats.monthly">
<echo>Triggering monthly stats job</echo>
<exec executable="bash" failonerror="true">
<arg value="${utilities.dir}/stats-generate.sh"/>
<arg value="month"/>
</exec>
</target>
<!--
Runs the stats-generate script for the previous year
-->
<target name="stats.yearly">
<echo>Triggering yearly stats job</echo>
<exec executable="bash" failonerror="true">
<arg value="${utilities.dir}/stats-generate.sh"/>
<arg value="year"/>
</exec>
</target>
<!--
****************************************************
*** ***
*** S A M L M D V E R I F I C A T I O N ***
*** ***
****************************************************
-->
<!--
Verify a metadata file held on the master distribution site, using the ukfederation-2014.pem key
-->
<macrodef name="VFY.remote">
<attribute name="i"/>
<sequential>
<echo>Verifying @{i}...</echo>
<delete file="${temp.xml}" quiet="true" verbose="false"/>
<get src="@{i}" dest="${temp.xml}"/>
<!--
Verify using xmlsectool.
-->
<XMLSECTOOL.VFY.uk i="${temp.xml}"/>
<!--
Delete the temporary file.
-->
<delete file="${temp.xml}" quiet="true" verbose="false"/>
</sequential>
</macrodef>
<!--
Verify a metadata file held on the master distribution site, using the ukfederation-mdq.pem key
-->
<macrodef name="VFY.MDQ.remote">
<attribute name="i"/>
<sequential>
<echo>Verifying @{i}...</echo>
<delete file="${temp.xml}" quiet="true" verbose="false"/>
<get src="@{i}" dest="${temp.xml}"/>
<!--
Verify using xmlsectool.
-->
<XMLSECTOOL.VFY.MDQ.uk i="${temp.xml}"/>
<!--
Delete the temporary file.
-->
<delete file="${temp.xml}" quiet="true" verbose="false"/>
</sequential>
</macrodef>
<!--
Verify a metadata file held on the master distribution site.
Additionally, it'll compare the provided checksum against that of the
downloaded file, to ensure the file has the content you expected.
-->
<macrodef name="VFY.remote.and.checksum">
<attribute name="i"/>
<attribute name="checksum"/>
<sequential>
<echo>Verifying @{i}...</echo>
<delete file="${temp.xml}" quiet="true" verbose="false"/>
<get src="@{i}" dest="${temp.xml}"/>
<!--
Check the checksum matches what was expected.
-->
<local name="checksum.of.downloaded.file"/>
<checksum file="${temp.xml}" property="checksum.of.downloaded.file"/>
<fail message="Checksum of file in repository and on backend server does NOT match.">
<condition>
<not>
<equals arg1="@{checksum}" arg2="${checksum.of.downloaded.file}"/>
</not>
</condition>
</fail>
<echo>Checksum of file matches expected value</echo>
<!--
Verify using xmlsectool.
-->
<XMLSECTOOL.VFY.uk i="${temp.xml}"/>
<!--
Delete the temporary file.
-->
<delete file="${temp.xml}" quiet="true" verbose="false"/>
</sequential>
</macrodef>
<!--
Verify file checksum of a metadata file held on the master distribution site.
-->
<macrodef name="VFY.remote.checksum.only">
<attribute name="i"/>
<attribute name="checksum"/>
<sequential>
<echo>Verifying checksum of @{i}...</echo>
<delete file="${temp.xml}" quiet="true" verbose="false"/>
<get src="@{i}" dest="${temp.xml}"/>
<!--
Check the checksum matches what was expected.
-->
<local name="checksum.of.downloaded.file"/>
<checksum file="${temp.xml}" property="checksum.of.downloaded.file"/>
<fail message="Checksum of file in repository and on backend server does NOT match.">
<condition>
<not>
<equals arg1="@{checksum}" arg2="${checksum.of.downloaded.file}"/>
</not>
</condition>
</fail>
<echo>Checksum of file matches expected value</echo>
<!--
Delete the temporary file.
-->
<delete file="${temp.xml}" quiet="true" verbose="false"/>
</sequential>
</macrodef>
<!--
Verify metadata files held on the master distribution site.
-->
<target name="samlmd.aggregates.verify.remote">
<echo>Computing checksums of each aggregate</echo>
<checksum file="${aggregates.dir}/${mdaggr.prod.signed}"
property="mdaggr.prod.signed.checksum"/>
<checksum file="${aggregates.dir}/${mdaggr.wayf.signed}"
property="mdaggr.wayf.signed.checksum"/>
<checksum file="${aggregates.dir}/${mdaggr.cdsall.signed}"
property="mdaggr.cdsall.signed.checksum"/>
<checksum file="${aggregates.dir}/${mdaggr.test.signed}"
property="mdaggr.test.signed.checksum"/>
<checksum file="${aggregates.dir}/${mdaggr.back.signed}"
property="mdaggr.back.signed.checksum"/>
<checksum file="${aggregates.dir}/${mdaggr.export.signed}"
property="mdaggr.export.signed.checksum"/>
<checksum file="${aggregates.dir}/${mdaggr.export.preview.signed}"
property="mdaggr.export.preview.signed.checksum"/>
<checksum file="${aggregates.dir}/${mdaggr.wugen.signed}"
property="mdaggr.wugen.signed.checksum"/>
<echo>Verifying metadata held at ${md.dist.host-ne-01.name}</echo>
<VFY.remote.checksum.only i="http://${md.dist.host-ne-01.name}${md.dist.path.name}${mdaggr.prod.signed}"
checksum="${mdaggr.prod.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-ne-01.name}${md.dist.path.name}${mdaggr.wayf.signed}"
checksum="${mdaggr.wayf.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-ne-01.name}${md.dist.path.name}${mdaggr.cdsall.signed}"
checksum="${mdaggr.cdsall.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-ne-01.name}${md.dist.path.name}${mdaggr.test.signed}"
checksum="${mdaggr.test.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-ne-01.name}${md.dist.path.name}${mdaggr.back.signed}"
checksum="${mdaggr.back.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-ne-01.name}${md.dist.path.name}${mdaggr.export.signed}"
checksum="${mdaggr.export.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-ne-01.name}${md.dist.path.name}${mdaggr.export.preview.signed}"
checksum="${mdaggr.export.preview.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-ne-01.name}${md.dist.path.name}${mdaggr.wugen.signed}"
checksum="${mdaggr.wugen.signed.checksum}"/>
<echo>Verifying metadata held at ${md.dist.host-ne-02.name}</echo>
<VFY.remote.checksum.only i="http://${md.dist.host-ne-02.name}${md.dist.path.name}${mdaggr.prod.signed}"
checksum="${mdaggr.prod.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-ne-02.name}${md.dist.path.name}${mdaggr.wayf.signed}"
checksum="${mdaggr.wayf.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-ne-02.name}${md.dist.path.name}${mdaggr.cdsall.signed}"
checksum="${mdaggr.cdsall.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-ne-02.name}${md.dist.path.name}${mdaggr.test.signed}"
checksum="${mdaggr.test.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-ne-02.name}${md.dist.path.name}${mdaggr.back.signed}"
checksum="${mdaggr.back.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-ne-02.name}${md.dist.path.name}${mdaggr.export.signed}"
checksum="${mdaggr.export.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-ne-02.name}${md.dist.path.name}${mdaggr.export.preview.signed}"
checksum="${mdaggr.export.preview.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-ne-02.name}${md.dist.path.name}${mdaggr.wugen.signed}"
checksum="${mdaggr.wugen.signed.checksum}"/>
<echo>Verifying metadata held at ${md.dist.host-we-01.name}</echo>
<VFY.remote.checksum.only i="http://${md.dist.host-we-01.name}${md.dist.path.name}${mdaggr.prod.signed}"
checksum="${mdaggr.prod.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-we-01.name}${md.dist.path.name}${mdaggr.wayf.signed}"
checksum="${mdaggr.wayf.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-we-01.name}${md.dist.path.name}${mdaggr.cdsall.signed}"
checksum="${mdaggr.cdsall.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-we-01.name}${md.dist.path.name}${mdaggr.test.signed}"
checksum="${mdaggr.test.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-we-01.name}${md.dist.path.name}${mdaggr.back.signed}"
checksum="${mdaggr.back.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-we-01.name}${md.dist.path.name}${mdaggr.export.signed}"
checksum="${mdaggr.export.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-we-01.name}${md.dist.path.name}${mdaggr.export.preview.signed}"
checksum="${mdaggr.export.preview.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-we-01.name}${md.dist.path.name}${mdaggr.wugen.signed}"
checksum="${mdaggr.wugen.signed.checksum}"/>
<echo>Verifying metadata held at ${md.dist.host-we-02.name}</echo>
<VFY.remote.checksum.only i="http://${md.dist.host-we-02.name}${md.dist.path.name}${mdaggr.prod.signed}"
checksum="${mdaggr.prod.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-we-02.name}${md.dist.path.name}${mdaggr.wayf.signed}"
checksum="${mdaggr.wayf.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-we-02.name}${md.dist.path.name}${mdaggr.cdsall.signed}"
checksum="${mdaggr.cdsall.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-we-02.name}${md.dist.path.name}${mdaggr.test.signed}"
checksum="${mdaggr.test.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-we-02.name}${md.dist.path.name}${mdaggr.back.signed}"
checksum="${mdaggr.back.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-we-02.name}${md.dist.path.name}${mdaggr.export.signed}"
checksum="${mdaggr.export.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-we-02.name}${md.dist.path.name}${mdaggr.export.preview.signed}"
checksum="${mdaggr.export.preview.signed.checksum}"/>
<VFY.remote.checksum.only i="http://${md.dist.host-we-02.name}${md.dist.path.name}${mdaggr.wugen.signed}"
checksum="${mdaggr.wugen.signed.checksum}"/>
<echo>Verification completed.</echo>
</target>
<!--
Verify a few select mdq files held on the master distribution site.
-->
<target name="samlmd.mdq.verify.remote">
<echo>Verifying MDQ held at ${mdq.dist.name}</echo>
<VFY.MDQ.remote i="http://${mdq.dist.name}/entities"/>
<VFY.MDQ.remote i="http://${mdq.dist.name}/entities/https%3A%2F%2Ftest-idp.ukfederation.org.uk%2Fidp%2Fshibboleth"/>
<VFY.MDQ.remote i="http://${mdq.dist.name}/entities/https%3A%2F%2Ftest.ukfederation.org.uk%2Fentity"/>
</target>
<!--
*************************************************
*** ***
*** M E T A D A T A G E N E R A T I O N ***
*** ***
*************************************************
-->
<!--
uk.collected
This is where the flow.uk.collect places its output.
-->
<property name="uk.collected" value="${mdx.dir}/uk/collected.xml"/>
<!--
flow.uk.collect
Runs the mda code to collect all entities registered with the
UK federation registrar.
-->
<target name="flow.uk.collect">
<CHANNEL.do channel="uk" verb="collect"/>
</target>
<!--
Runs the MDA on the uk channel's verify verb to verify that the UK-registered
metadata passes all the checks that would be imposed during a signing run.
No output files are produced by this operation.
-->
<target name="samlmd.aggregates.generate.dry-run">
<echo>Performing dry run of generating UKfed MD.</echo>
<CHANNEL.do channel="uk" verb="verify"/>
<echo>All UKfed metadata successfully verified.</echo>
</target>
<!--
Unsigned metadata generation for the UK Federation.
-->
<target name="samlmd.aggregates.generate">
<echo>Generating unsigned UKfed metadata files.</echo>
<!--
Call the generate verb in the uk mdx channel
to generate the following:
production aggregate
WAYF/CDS aggregates
test aggregate
export aggregate
export preview aggregate
fallback aggregate
statistics
-->
<CHANNEL.do channel="uk" verb="generate"/>
<!--
Generate discovery feeds.
-->
<CHANNEL.do channel="uk" verb="discofeeds"/>
<!--
Post-process mda-generated output files.
-->
<MDNORM i="${output.dir}/${mdaggr.prod.unsigned}"/>
<MDNORM i="${output.dir}/${mdaggr.wayf.unsigned}"/>
<MDNORM.noblank i="${output.dir}/${mdaggr.cdsall.unsigned}"/>
<MDNORM.noblank i="${output.dir}/${mdaggr.wugen.unsigned}"/>
<MDNORM i="${output.dir}/${mdaggr.test.unsigned}"/>
<MDNORM i="${output.dir}/${mdaggr.export.unsigned}"/>
<MDNORM i="${output.dir}/${mdaggr.export.preview.unsigned}"/>
<MDNORM i="${output.dir}/${mdaggr.back.unsigned}"/>
<fixcrlf file="${output.dir}/${mdaggr.stats}" eol="lf" encoding="UTF-8"/>
<echo>Generated UK unsigned metadata.</echo>
</target>
<!--
***************************
*** ***
*** M D A T O O L ***
*** ***
***************************
-->
<!--
Property definitions for the mda tool.
Any Ant property with a name of the form "mda.*" is passed through
as a system property to the mda invocation with the "mda." stripped
off. Thus, a property "mda.foo" will be passed through as a system
property called "foo".
The individual properties listed here can be augmented or overridden
by properties defined in the external properties files, e.g., build.properties.
-->
<property name="mda.blocklists.dir" value="${blocklists.dir}"/>
<property name="mda.cacheDuration.aggregate.duration" value="${cacheDuration.aggregate.duration}"/>
<property name="mda.cacheDuration.perEntity.duration" value="${cacheDuration.perEntity.duration}"/>
<property name="mda.edugain.dir" value="${edugain.dir}"/>
<property name="mda.entities.dir" value="${entities.dir}"/>
<property name="mda.mdx.dir" value="${mdx.dir}"/>
<property name="mda.members.dir" value="${members.dir}"/>
<property name="mda.output.dir" value="${output.dir}"/>
<property name="mda.sign.keyAlias" value="${sign.uk.keyAlias}"/>
<property name="mda.sign.pkcs11Config" value="${sign.uk.pkcs11Config}"/>
<property name="mda.mdq.output" value="${mdq.output.dir}"/>
<property name="mda.validUntil.aggregate.days" value="${validUntil.aggregate.days}"/>
<property name="mda.validUntil.aggregate.duration" value="${validUntil.aggregate.duration}"/>
<property name="mda.validUntil.perEntity.duration" value="${validUntil.perEntity.duration}"/>
<!--
Build a property set of all the properties to be passed through, with
the "mda." prefix stripped off.
-->
<propertyset id="mda.properties">
<propertyref prefix="mda."/>
<mapper type="glob" from="mda.*" to="*"/>
</propertyset>
<!--
Macro to run the mda (metadata aggregator CLI) tool.
-->
<macrodef name="MDA">
<!-- Spring pipeline configuration file -->
<attribute name="config"/>
<!-- pipeline bean ID -->
<attribute name="pipeline"/>
<sequential>
<java classname="net.shibboleth.metadata.cli.SimpleCommandLine"
fork="true" failonerror="true" maxmemory="${java.max.memory}">
<sysproperty key="java.library.path" path="${mda.jni.path}"
if:set="mda.jni.path"/>
<classpath>
<!-- Spring "classpath:" imports can be under the MDX directory -->
<pathelement path="${mdx.dir}"/>
<!-- Include all APIs we may want to use. -->
<!-- The Shibboleth MDA. -->
<fileset dir="${tools.mda}/lib">
<include name="*.jar"/>
</fileset>
<!-- UK federation MDA beans, and ukf-members. -->
<fileset dir="${tools.dir}/ukf-mda">
<include name="*.jar"/>
</fileset>
<!-- InCommon MDA beans. -->
<fileset dir="${tools.dir}/inc-mda">
<include name="*.jar"/>
</fileset>
<!-- Support libraries for any of the above. -->
<fileset dir="${tools.dir}/lib">
<include name="*.jar"/>
</fileset>
<!--
Use Xalan as our XML processor.
Note: this is not the old endorsement mechanism,
despite the directory name. Instead, we just load
it via the classpath.
-->
<fileset dir="${tools.dir}/xalan/impl">
<include name="*.jar"/>
</fileset>
<!--
Xalan-dependent UK federation classes.
-->
<fileset dir="${tools.dir}/xalan/lib">
<include name="sdss-xalan-md-*.jar"/>
</fileset>
<!-- Include a per-target directory if set. -->
<fileset dir="${tools.dir}">
<include name="${mda.classpath.extra}/*.jar"
if="mda.classpath.extra"/>
</fileset>
</classpath>
<syspropertyset>
<propertyset refid="mda.properties"/>
</syspropertyset>
<jvmarg value="-enableassertions"/>
<arg value="--logConfig"/>
<arg value="${tools.dir}/mda-logging.xml"/>
<arg value="@{config}"/>
<arg value="@{pipeline}"/>
</java>
</sequential>
</macrodef>
<!--
*********************************
*** ***
*** M D N O R M T O O L ***
*** ***
*********************************
-->
<!--
MDNORM
Macro to call tool to perform textual normalisation on an XML file
containing a metadata aggregate.
Parameter 'i' is the file to be normalised.
-->
<macrodef name="MDNORM">
<attribute name="i"/><!-- input file -->
<element name="args" optional="yes"/>
<sequential>
<java fork="true" maxmemory="${java.max.memory}" failonerror="true" classname="uk.org.ukfederation.mdnorm.Normalise">
<classpath>
<fileset dir="${tools.mdnorm}/lib">
<include name="*.jar"/>
</fileset>
</classpath>
<args/>
<arg value="@{i}"/>
</java>
</sequential>
</macrodef>
<!--
MDNORM.noblank
Call MDNORM but discard blank lines from the output.
-->
<macrodef name="MDNORM.noblank">
<attribute name="i"/><!-- input file -->
<sequential>
<MDNORM i="@{i}">
<args>
<arg value="--discardBlankLines"/>
</args>
</MDNORM>
</sequential>
</macrodef>
<!--
***********************************
*** ***
*** S L A C K T E E T O O L ***
*** ***
***********************************
-->
<macrodef name="SLACK.send">
<attribute name="conf"/>
<attribute name="colour" default="none"/>
<attribute name="channel"/>
<attribute name="message"/>
<sequential>
<if>
<equals arg1="@{colour}" arg2="none"/>
<then>
<exec executable="bash" failonerror="true">
<arg value="-c"/>
<arg value="echo -e '@{message}' | ${tools.slacktee}/bin/slacktee.sh -p --config @{conf} -c @{channel}"/>
</exec>
</then>
<else>
<exec executable="bash" failonerror="true">
<arg value="-c"/>
<arg value="echo -e '@{message}' | ${tools.slacktee}/bin/slacktee.sh -p --config @{conf} -a @{colour} -c @{channel}"/>
</exec>
</else>
</if>
</sequential>
</macrodef>
<!--
Send success notification to slack channel
-->
<target name="slack.notify.publication.success">
<!-- Only send when we're doing prod flows -->
<if>
<equals arg1="${env}" arg2="prod"/>
<then>
<exec executable="bash" failonerror="true" outputproperty="diff.between.publications">
<arg value="${utilities.dir}/diff-between-publications.sh"/>
<arg value="${shared.ws.dir}"/>
<arg value="${git.repo.group}"/>
<arg value="${git.repo.project.data}"/>
<arg value="${git.repo.project.products}"/>
</exec>
<SLACK.send conf="${tools.slacktee.config}/aggr-ant.conf" colour="good" channel="ukf-events-publish"
message="${diff.between.publications}"/>
</then>
</if>
</target>
<!--
*****************************************
*** ***
*** X M L S E C T O O L T O O L ***
*** ***
*****************************************
-->
<macrodef name="XMLSECTOOL">
<attribute name="i"/><!-- input file -->
<element name="args" optional="yes"/>
<sequential>
<java classname="net.shibboleth.tool.xmlsectool.XMLSecTool"
fork="true" failonerror="true" maxmemory="${java.max.memory}">
<classpath>
<fileset dir="${tools.xmlsectool}/lib">
<include name="*.jar"/>
</fileset>
</classpath>
<args/>
<arg value="--validateSchema"/>
<arg value="--schemaDirectory"/>
<arg value="${mdx.dir}/schema"/>
<arg value="--inFile"/>
<arg value="@{i}"/>
</java>
</sequential>
</macrodef>
<macrodef name="XMLSECTOOL.SIGN.uk">
<attribute name="i"/><!-- input file -->
<attribute name="o"/><!-- output file -->
<attribute name="digest"/><!-- digest function to use -->
<sequential>
<!-- delete the temporary file to be sure we don't use old data -->
<delete file="${temp.xml}" quiet="true" verbose="false"/>
<echo>Signing @{i} using digest @{digest}.</echo>
<!-- perform signing operation into temporary file -->
<XMLSECTOOL i="@{i}">
<args>
<arg value="--sign"/>
<!-- set digest to use -->
<arg value="--digest"/>
<arg value="@{digest}"/>
<!--
If we have a PKCS#11 configuration specified, include it.
-->
<arg if:set="sign.uk.pkcs11Config" value="--pkcs11Config"/>
<arg if:set="sign.uk.pkcs11Config" value="${sign.uk.pkcs11Config}"/>
<!--
If we have a non-default keystore provider specified, include it.
-->
<arg if:set="sign.uk.keystoreProvider" value="--keystoreProvider"/>
<arg if:set="sign.uk.keystoreProvider" value="${sign.uk.keystoreProvider}"/>
<!--
The "key" option can represent either a key file or a key alias.
Different properties are used for the two cases (see XSTJ-67).
-->
<arg if:set="sign.uk.keyFile" value="--key"/>
<arg if:set="sign.uk.keyFile" value="${sign.uk.keyFile}"/>
<arg if:set="sign.uk.keyAlias" value="--key"/>
<arg if:set="sign.uk.keyAlias" value="${sign.uk.keyAlias}"/>
<!--
Include an X.509 certificate if one is specified.
-->
<arg if:set="sign.uk.certificate" value="--certificate"/>
<arg if:set="sign.uk.certificate" value="${sign.uk.certificate}"/>
<arg value="--keyPassword"/>
<arg value="${sign.uk.keyPassword}"/>
<arg value="--outFile"/>
<arg value="@{o}"/>
<arg value="--referenceIdAttributeName"/>
<arg value="ID"/>
<!--
<arg value="- -quiet"/>
-->
</args>
</XMLSECTOOL>
<!-- Force the output file to use Unix line endings -->
<fixcrlf file="@{o}" eol="lf" encoding="UTF-8"/>
</sequential>
</macrodef>
<macrodef name="XMLSECTOOL.VFY.uk">
<attribute name="i"/><!-- input file -->
<sequential>
<XMLSECTOOL i="@{i}">
<args>
<arg value="--verifySignature"/>
<arg value="--certificate"/>
<arg value="${mdx.dir}/uk/ukfederation-2014.pem"/>
<!--
<arg value="- -quiet"/>
-->
</args>
</XMLSECTOOL>
</sequential>
</macrodef>
<macrodef name="XMLSECTOOL.VFY.MDQ.uk">
<attribute name="i"/><!-- input file -->
<sequential>
<XMLSECTOOL i="@{i}">
<args>
<arg value="--verifySignature"/>
<arg value="--certificate"/>
<arg value="${mdx.dir}/uk/ukfederation-mdq.pem"/>
<!--
<arg value="- -quiet"/>
-->
</args>
</XMLSECTOOL>
</sequential>
</macrodef>
<!--
*******************************************
*** ***
*** M E T A D A T A S I G N I N G ***
*** ***
*******************************************
-->
<!--
Acquire the signing keystore password.
Note: this will not result in a prompt if the sign.uk.keyPassword property
is already defined.
-->
<target name="get.sign.uk.keyPassword" unless="sign.uk.keyPassword">
<input addproperty="sign.uk.keyPassword">
Please enter the password for the keystores:
</input>
</target>
<!--
Select tool to sign UK federation metadata with.
-->
<macrodef name="SIGN.uk">
<attribute name="i"/>
<attribute name="o"/>
<attribute name="digest"/><!-- digest function to use -->
<sequential>
<XMLSECTOOL.SIGN.uk i="@{i}" o="@{o}" digest="@{digest}"/>
</sequential>
</macrodef>
<!--
Signs the unsigned aggregates
-->
<target name="samlmd.aggregates.sign" depends="get.sign.uk.keyPassword">
<echo>Signing unsigned aggregates.</echo>
<echo>Signing UKfed prod metadata.</echo>
<SIGN.uk i="${output.dir}/${mdaggr.prod.unsigned}" o="${output.dir}/${mdaggr.prod.signed}" digest="SHA-256"/>
<echo>Signing UKfed WAYF metadata.</echo>
<SIGN.uk i="${output.dir}/${mdaggr.wayf.unsigned}" o="${output.dir}/${mdaggr.wayf.signed}" digest="SHA-256"/>
<echo>Signing UKfed CDS full metadata.</echo>
<SIGN.uk i="${output.dir}/${mdaggr.cdsall.unsigned}" o="${output.dir}/${mdaggr.cdsall.signed}" digest="SHA-256"/>
<echo>Signing UKfed test metadata.</echo>
<SIGN.uk i="${output.dir}/${mdaggr.test.unsigned}" o="${output.dir}/${mdaggr.test.signed}" digest="SHA-256"/>
<echo>Signing UKfed export metadata.</echo>
<SIGN.uk i="${output.dir}/${mdaggr.export.unsigned}" o="${output.dir}/${mdaggr.export.signed}" digest="SHA-256"/>
<echo>Signing UKfed export preview metadata.</echo>
<SIGN.uk i="${output.dir}/${mdaggr.export.preview.unsigned}" o="${output.dir}/${mdaggr.export.preview.signed}" digest="SHA-256"/>
<echo>Signing UKfed fallback metadata.</echo>
<SIGN.uk i="${output.dir}/${mdaggr.back.unsigned}" o="${output.dir}/${mdaggr.back.signed}" digest="SHA-256"/>
<echo>Signing UKfed Wugen metadata.</echo>
<SIGN.uk i="${output.dir}/${mdaggr.wugen.unsigned}" o="${output.dir}/${mdaggr.wugen.signed}" digest="SHA-256"/>
</target>
<!--
Tests signing the unsigned main aggregate
-->
<target name="samlmd.aggregates.sign.test" depends="get.sign.uk.keyPassword">
<echo>Testing signing the unsigned main aggregate.</echo>
<echo>Test signing UKfed prod metadata.</echo>
<SIGN.uk i="${output.dir}/${mdaggr.prod.unsigned}" o="${output.dir}/${mdaggr.prod.signed}" digest="SHA-256"/>
</target>
<!--
Verify the signed aggregates.
-->
<target name="samlmd.aggregates.verify">
<echo>Verifying signed UK metadata.</echo>
<XMLSECTOOL.VFY.uk i="${output.dir}/${mdaggr.prod.signed}"/>
<echo>Verifying signed UK WAYF metadata.</echo>
<XMLSECTOOL.VFY.uk i="${output.dir}/${mdaggr.wayf.signed}"/>
<echo>Verifying signed UK CDS full metadata.</echo>
<XMLSECTOOL.VFY.uk i="${output.dir}/${mdaggr.cdsall.signed}"/>
<echo>Verifying signed UK test metadata.</echo>
<XMLSECTOOL.VFY.uk i="${output.dir}/${mdaggr.test.signed}"/>
<echo>Verifying signed UK export metadata.</echo>
<XMLSECTOOL.VFY.uk i="${output.dir}/${mdaggr.export.signed}"/>
<echo>Verifying signed UK export preview metadata.</echo>
<XMLSECTOOL.VFY.uk i="${output.dir}/${mdaggr.export.preview.signed}"/>
<echo>Verifying signed UK fallback metadata.</echo>
<XMLSECTOOL.VFY.uk i="${output.dir}/${mdaggr.back.signed}"/>
<echo>Verifying signed UK Wugen metadata.</echo>
<XMLSECTOOL.VFY.uk i="${output.dir}/${mdaggr.wugen.signed}"/>
<echo>Verification completed.</echo>
</target>
<!--
Tests verifying the signed main aggregate.
-->
<target name="samlmd.aggregates.verify.test">
<echo>Testing verifying the signed main aggregate.</echo>
<XMLSECTOOL.VFY.uk i="${output.dir}/${mdaggr.prod.signed}"/>
<echo>Test verification completed.</echo>
</target>
<!--
Sign MDQ components: EntityDescriptor files and EntitiesDescriptor
-->
<target name="samlmd.mdq.sign" depends="
samlmd.mdq.sign.fragments,
samlmd.mdq.sign.all.entities">
<echo>MDQ fragments and all entities files signed.</echo>
</target>
<!--
Break the production aggregate into per-entity metadata, sign the
individual documents and write them as individual files into a
destination directory.
-->
<target name="samlmd.mdq.sign.fragments" depends="get.sign.uk.keyPassword">
<property name="mda.mdq.input" value="${output.dir}/${mdaggr.prod.unsigned}"/>
<property name="mda.sign.keyPassword" value="${sign.uk.keyPassword}"/>
<echo>Generating per-entity metadata in ${mda.mdq.output}</echo>
<echo> from production aggregate in ${mda.mdq.input}</echo>
<delete dir="${mdq.output.dir}" quiet="true"/>
<mkdir dir="${mdq.output.dir}"/>
<CHANNEL.do channel="uk" verb="mdq-multisign"/>
<echo>Generation complete.</echo>
</target>
<!--
Sign the unsigned production aggregate, call it all.xml when signed for MDQ.
-->
<target name="samlmd.mdq.sign.all.entities" depends="get.sign.uk.keyPassword">
<echo>Generating all.xml in ${mda.mdq.output}</echo>
<SIGN.uk i="${output.dir}/${mdaggr.prod.unsigned}" o="${mdq.output.dir}/all.xml" digest="SHA-256"/>
<echo>Generation complete.</echo>
</target>
<!--
Simple local test of per-entity metadata generation, based on a single
entity rather than the whole production aggregate, because dev environment
PKCS#11 tokens are not fast enough to sign everything.
-->
<target name="samlmd.mdq.sign.test" depends="get.sign.uk.keyPassword">
<property name="mda.mdq.input" value="${entities.dir}/uk000006.xml"/>
<property name="mda.sign.keyPassword" value="${sign.uk.keyPassword}"/>
<echo>Generating per-entity metadata in ${mda.mdq.output}</echo>
<echo> from test metadata in ${mda.mdq.input}</echo>
<delete dir="${mdq.output.dir}" quiet="true"/>
<mkdir dir="${mdq.output.dir}"/>
<CHANNEL.do channel="uk" verb="mdq-multisign"/>
<echo>Generation complete.</echo>
</target>
<!--
*******************************************************
*** ***
*** F I L E S Y S T E M O P S T A R G E T S ***
*** ***
*******************************************************
-->
<target name="fs.clear.outputdir">
<echo>Clearing output directory.</echo>
<delete includeemptydirs="true">
<fileset dir="${output.dir}" includes="**/*"/>
</delete>
</target>
<target name="fs.tar.mdqcache">
<echo>Creating tar of MDQ cache.</echo>
<tar compression="gzip" longfile="posix" destfile="${output.dir}/${mdq.cache}" basedir="${mdq.output.dir}"/>
</target>
<target name="fs.cp.other.files.to.aggregates.dir">
<echo>CPing other files that should be checked into git into orchestrator's aggregates dir.</echo>
<copy failonerror="true" todir="${aggregates.dir}">
<fileset dir="${output.dir}">
<include name="${mdaggr.stats}"/>
</fileset>
</copy>
</target>
<target name="fs.scp.unsigned.files.to.orchestrator">
<echo>SCPing unsigned files and stats file from output dir to orchestrator's build dir.</echo>
<scp failonerror="true" remoteTodir="${orchestrator.url}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${output.dir}">
<include name="${mdaggr.prod.unsigned}"/>
<include name="${mdaggr.wayf.unsigned}"/>
<include name="${mdaggr.cdsall.unsigned}"/>
<include name="${mdaggr.test.unsigned}"/>
<include name="${mdaggr.back.unsigned}"/>
<include name="${mdaggr.export.unsigned}"/>
<include name="${mdaggr.export.preview.unsigned}"/>
<include name="${mdaggr.wugen.unsigned}"/>
<include name="${mdaggr.stats}"/>
</fileset>
</scp>
</target>
<target name="fs.scp.unsigned.files.to.keymaster">
<echo>SCPing unsigned aggregates from orchestrator's output dir to keymaster's build.dir.</echo>
<scp failonerror="true" remoteTodir="${keymaster.url}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${output.dir}">
<include name="${mdaggr.prod.unsigned}"/>
<include name="${mdaggr.wayf.unsigned}"/>
<include name="${mdaggr.cdsall.unsigned}"/>
<include name="${mdaggr.test.unsigned}"/>
<include name="${mdaggr.back.unsigned}"/>
<include name="${mdaggr.export.unsigned}"/>
<include name="${mdaggr.export.preview.unsigned}"/>
<include name="${mdaggr.wugen.unsigned}"/>
</fileset>
</scp>
</target>
<target name="fs.scp.signed.files.from.keymaster">
<echo>SCPing signed aggregates from keymaster's output dir into orchestrator's aggregates dir.</echo>
<scp failonerror="true" remoteFile="${keymaster.url}/${mdaggr.prod.signed}" todir="${aggregates.dir}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts"/>
<scp failonerror="true" remoteFile="${keymaster.url}/${mdaggr.wayf.signed}" todir="${aggregates.dir}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts"/>
<scp failonerror="true" remoteFile="${keymaster.url}/${mdaggr.cdsall.signed}" todir="${aggregates.dir}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts"/>
<scp failonerror="true" remoteFile="${keymaster.url}/${mdaggr.test.signed}" todir="${aggregates.dir}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts"/>
<scp failonerror="true" remoteFile="${keymaster.url}/${mdaggr.back.signed}" todir="${aggregates.dir}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts"/>
<scp failonerror="true" remoteFile="${keymaster.url}/${mdaggr.export.signed}" todir="${aggregates.dir}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts"/>
<scp failonerror="true" remoteFile="${keymaster.url}/${mdaggr.export.preview.signed}" todir="${aggregates.dir}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts"/>
<scp failonerror="true" remoteFile="${keymaster.url}/${mdaggr.wugen.signed}" todir="${aggregates.dir}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts"/>
</target>
<target name="fs.scp.mdqcache.from.keymaster">
<echo>SCPing mdq cache from keymaster's output dir to a directory on orchestrator.</echo>
<scp failonerror="true" remoteFile="${keymaster.url}/${mdq.cache}" todir="${output.dir}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts"/>
</target>
<target name="fs.scp.mdqcache.to.repo">
<echo>SCPing mdq cache from orchestrator's build dir to a temp directory on repo.</echo>
<scp failonerror="true" remoteTodir="${repo.url}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${output.dir}">
<include name="${mdq.cache}"/>
</fileset>
</scp>
</target>
<target name="fs.scp.mdqcache.from.repo">
<echo>SCPing mdq cache from temp directory on repo to output.dir on aggr.</echo>
<scp failonerror="true" remoteFile="${repo.url}/${mdq.cache}" todir="${output.dir}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts"/>
</target>
<!--
***************************************************
*** ***
*** M D Q H A N D L I N G T A R G E T S ***
*** ***
***************************************************
-->
<target name="mdq.createcache" depends="samlmd.mdq.sign">
<echo>MDQ cache created.</echo>
</target>
<!--
**************************************
*** ***
*** A Z U R E T A R G E T S ***
*** ***
**************************************
-->
<target name="azure.purgecdn">
<echo>Sending Purge command to Azure CDN.</echo>
<echo>-> Not yet implemented. This is not a failure, other than a moral one</echo>
</target>
<!--
***********************************************
*** ***
*** P U B L I S H I N G T A R G E T S ***
*** ***
***********************************************
-->
<target name="scp.githook">
<!--
Metadata servers have a post-receive githook to fix up outputs of metadata aggregation.
Not needed for CDI because githook included in the container already.
-->
<echo>Pushing post-receive githook to MD dist.</echo>
<echo>-> MD-NE-01</echo>
<scp failonerror="true" remoteTodir="${md.user}@${md.dist.host-ne-01.name}:${md.githook.path}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${githook.dir}">
<include name="${post-receive-githook}"/>
</fileset>
</scp>
<sshexec
username="${md.user}" host="${md.dist.host-ne-01.name}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts"
command="/usr/bin/chmod a+x ${md.githook.path}/${post-receive-githook}"
failonerror="true"
/>
<echo>-> MD-NE-02</echo>
<scp failonerror="true" remoteTodir="${md.user}@${md.dist.host-ne-02.name}:${md.githook.path}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${githook.dir}">
<include name="${post-receive-githook}"/>
</fileset>
</scp>
<sshexec
username="${md.user}" host="${md.dist.host-ne-02.name}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts"
command="/usr/bin/chmod a+x ${md.githook.path}/${post-receive-githook}"
failonerror="true"
/>
<echo>-> MD-WE-01</echo>
<scp failonerror="true" remoteTodir="${md.user}@${md.dist.host-we-01.name}:${md.githook.path}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${githook.dir}">
<include name="${post-receive-githook}"/>
</fileset>
</scp>
<sshexec
username="${md.user}" host="${md.dist.host-we-01.name}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts"
command="/usr/bin/chmod a+x ${md.githook.path}/${post-receive-githook}"
failonerror="true"
/>
<echo>-> MD-WE-02</echo>
<scp failonerror="true" remoteTodir="${md.user}@${md.dist.host-we-02.name}:${md.githook.path}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${githook.dir}">
<include name="${post-receive-githook}"/>
</fileset>
</scp>
<sshexec
username="${md.user}" host="${md.dist.host-we-02.name}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts"
command="/usr/bin/chmod a+x ${md.githook.path}/${post-receive-githook}"
failonerror="true"
/>
</target>
<target name="publish.md">
<!--
Push metadata files for the UK Federation to the MD dist servers
-->
<echo>Pushing UK Federation metadata files to MD dist.</echo>
<echo>-> MD-NE-01</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.products}" failonerror="true">
<arg value="push"/>
<arg value="md-ne-01"/>
<arg value="master"/>
</exec>
<echo>-> MD-NE-02</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.products}" failonerror="true">
<arg value="push"/>
<arg value="md-ne-02"/>
<arg value="master"/>
</exec>
<echo>-> MD-WE-01</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.products}" failonerror="true">
<arg value="push"/>
<arg value="md-we-01"/>
<arg value="master"/>
</exec>
<echo>-> MD-WE-02</echo>
<exec executable="${git.executable}" dir="${shared.ws.dir}/${git.repo.project.products}" failonerror="true">
<arg value="push"/>
<arg value="md-we-02"/>
<arg value="master"/>
</exec>
</target>
<target name="publish.mdqcache">
<!--
Push mdq cache tar to the MD dist servers
-->
<echo>Pushing UK Federation mdq cache to MD dist.</echo>
<echo>-> MD-NE-01</echo>
<scp failonerror="true" remoteTodir="${md.user}@${md.dist.host-ne-01.name}:/tmp" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${output.dir}">
<include name="${mdq.cache}"/>
</fileset>
</scp>
<echo>-> MD-NE-02</echo>
<scp failonerror="true" remoteTodir="${md.user}@${md.dist.host-ne-02.name}:/tmp" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${output.dir}">
<include name="${mdq.cache}"/>
</fileset>
</scp>
<echo>-> MD-WE-01</echo>
<scp failonerror="true" remoteTodir="${md.user}@${md.dist.host-we-01.name}:/tmp" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${output.dir}">
<include name="${mdq.cache}"/>
</fileset>
</scp>
<echo>-> MD-WE-02</echo>
<scp failonerror="true" remoteTodir="${md.user}@${md.dist.host-we-02.name}:/tmp" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${output.dir}">
<include name="${mdq.cache}"/>
</fileset>
</scp>
</target>
<target name="publish.json">
<!--
Push 2 JSON files to the MD dist servers
-->
<echo>Pushing UK Federation JSON files to MD dist.</echo>
<echo>-> MD-NE-01</echo>
<scp failonerror="true" remoteTodir="${md.user}@${md.dist.host-ne-01.name}:${md.discofeed.path}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${output.dir}">
<include name="${mdaggr.discofeed.filtered}"/>
<include name="${mdaggr.discofeed.all}"/>
</fileset>
</scp>
<echo>-> MD-NE-02</echo>
<scp failonerror="true" remoteTodir="${md.user}@${md.dist.host-ne-02.name}:${md.discofeed.path}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${output.dir}">
<include name="${mdaggr.discofeed.filtered}"/>
<include name="${mdaggr.discofeed.all}"/>
</fileset>
</scp>
<echo>-> MD-WE-01</echo>
<scp failonerror="true" remoteTodir="${md.user}@${md.dist.host-we-01.name}:${md.discofeed.path}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${output.dir}">
<include name="${mdaggr.discofeed.filtered}"/>
<include name="${mdaggr.discofeed.all}"/>
</fileset>
</scp>
<echo>-> MD-WE-02</echo>
<scp failonerror="true" remoteTodir="${md.user}@${md.dist.host-we-02.name}:${md.discofeed.path}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${output.dir}">
<include name="${mdaggr.discofeed.filtered}"/>
<include name="${mdaggr.discofeed.all}"/>
</fileset>
</scp>
</target>
<target name="publish.otherfiles">
<!--
Push other files for the UK Federation to the web server - but only when in prod env!
-->
<if>
<equals arg1="${env}" arg2="prod"/>
<then>
<echo>Pushing UK Federation other files to web site.</echo>
<echo>-> Web1</echo>
<scp failonerror="true" remoteTodir="${www.url.stats}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${aggregates.dir}">
<include name="${mdaggr.stats}"/>
</fileset>
</scp>
<scp failonerror="true" remoteTodir="${www.url.members}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${members.dir}">
<include name="members.xml"/>
</fileset>
</scp>
</then>
</if>
</target>
<target name="publish.generated.html">
<!--
Push HTML files generated outwith production pipeline to the web server - but only when in prod env!
-->
<if>
<equals arg1="${env}" arg2="prod"/>
<then>
<echo>Pushing HTML files to web site.</echo>
<echo>-> Web1</echo>
<scp failonerror="true" remoteTodir="${www.url.members}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${build.dir}">
<include name="orgnamescope.html"/>
<include name="uai.html"/>
<include name="dml.html"/>
</fileset>
</scp>
<scp failonerror="true" remoteTodir="${www.url.stats}" keyfile="~/.ssh/id_rsa" knownhosts="~/.ssh/known_hosts">
<fileset dir="${build.dir}">
<include name="randssps.html"/>
</fileset>
</scp>
</then>
</if>
</target>
<!--
*****************************************
*** ***
*** M E T A D A T A I M P O R T ***
*** ***
*****************************************
-->
<target name="import.metadata">
<echo>Importing metadata from ${entities.dir}/import.xml</echo>
<delete file="${entities.dir}/imported.xml" quiet="true" verbose="false"/>
<CHANNEL.do channel="uk" verb="import.metadata"/>
<echo>Imported metadata to ${entities.dir}/imported.xml</echo>
<fixcrlf file="${entities.dir}/imported.xml" encoding="UTF-8"/>
</target>
<!--
*********************************************
*** ***
*** M E T A D A T A E X C H A N G E ***
*** ***
*********************************************
-->
<!--
Aggregator based work is divided into channels, each of
which lives in a directory under /mdx/. Each channel
can have a number of verbs which can be executed.
Conventions for this system are documented in
/mdx/conventions.md.
-->
<!--
CHANNEL.do
Run a particular flow in the named channel.
-->
<macrodef name="CHANNEL.do">
<attribute name="channel"/><!-- channel name -->
<attribute name="verb"/><!-- verb to perform -->
<sequential>
<echo>Running @{channel} @{verb} flow.</echo>
<if>
<available file="${mdx.dir}/@{channel}/@{verb}.xml"/>
<then>
<MDA config="${mdx.dir}/@{channel}/@{verb}.xml"
pipeline="@{verb}"/>
</then>
<else>
<MDA config="${mdx.dir}/@{channel}/verbs.xml"
pipeline="@{verb}"/>
</else>
</if>
<echo>Completed @{channel} @{verb} flow.</echo>
</sequential>
</macrodef>
<!--
flow
Prompt for a channel name and a verb, and run that verb in that channel.
-->
<target name="flow">
<input addproperty="channel">
Please select the channel to use (e.g., us_incommon):
</input>
<input addproperty="verb">
Please select the verb to execute (e.g., import):
</input>
<CHANNEL.do channel="${channel}" verb="${verb}"/>
</target>
<!--
CHANNEL.import
Run the import flow from the named channel.
-->
<macrodef name="CHANNEL.import">
<attribute name="channel"/><!-- channel name -->
<sequential>
<CHANNEL.do channel="@{channel}" verb="import"/>
</sequential>
</macrodef>
<!--
*******************************************************************
*** ***
*** M D X : N O N - P R O D U C T I O N C H A N N E L S ***
*** ***
*******************************************************************
-->
<target name="flow.import">
<input addproperty="channel">
Please select the channel to use (e.g., us_incommon):
</input>
<CHANNEL.import channel="${channel}"/>
</target>
<!--
flow.verify.cobweb
Verify the COBWEB metadata. Callable from Jenkins.
-->
<target name="flow.verify.cobweb">
<CHANNEL.do verb="verifyProduction" channel="int_cobweb"/>
</target>
<!--
flow.verifyEdugain.input
Verify the eduGAIN entities from a particular channel.
The intention is that this be called within Jenkins
with the channel name passed as a property, e.g.:
ant flow.verifyEdugain.input -Dchannel=se_swamid
-->
<target name="flow.verifyEdugain.input">
<CHANNEL.do verb="verifyEdugain" channel="${channel}"/>
</target>
<!--
flow.verifyEdugain.output
Verify the eduGAIN production aggregate.
The intention is that these targets be called from Jenkins.
-->
<target name="flow.verifyEdugain.output">
<CHANNEL.do verb="verify" channel="int_edugain"/>
</target>
<target name="flow.verifyEdugain.output.new">
<CHANNEL.do verb="verify.new" channel="int_edugain"/>
</target>
<target name="flow.verifyEdugain.output.all">
<CHANNEL.do verb="verify.all" channel="int_edugain"/>
</target>
<target name="flow.verifyEdugain.output.recovered">
<CHANNEL.do verb="verify.recovered" channel="int_edugain"/>
</target>
<!--
*******************************
*** ***
*** M I S C E L L A N Y ***
*** ***
*******************************
-->
<!--
Statistics generation
Note that statistics are generated from the full registered fragment
data, so that the statistics process has access to information that will not
be included in published metadata.
This target does stand-alone statistics generation; in normal use, the
statistics are generated as a side-effect of the generate target.
-->
<target name="stats">
<CHANNEL.do channel="uk" verb="statistics"/>
<fixcrlf file="${output.dir}/${mdaggr.stats}" eol="lf" encoding="UTF-8"/>
</target>
<!--
This variant generates a much simpler file, intended for use when building the
monthly chart pack.
-->
<target name="stats.charting">
<CHANNEL.do channel="uk" verb="statistics.charting"/>
<fixcrlf file="${output.dir}/${mdaggr.stats}" eol="lf" encoding="UTF-8"/>
</target>
<!--
Check mailing list against current metadata
-->
<target name="check.mailing.list" depends="flow.uk.collect">
<echo>Checking mailing list entries.</echo>
<exec executable="perl" dir="${build.dir}">
<arg value="${utilities.dir}/addresses.pl"/>
</exec>
</target>
<!--
Extract TLS locations from the UK federation metadata.
-->
<target name="extract.locs" depends="flow.uk.collect">
<echo>Extracting TLS locations</echo>
<exec executable="perl" dir="${build.dir}"
output="${build.dir}/locations.txt">
<arg value="${build.dir}/extract_locs.pl"/>
</exec>
</target>
<target name="extract.locs.noports" depends="flow.uk.collect">
<echo>Extracting TLS locations</echo>
<exec executable="perl" dir="${build.dir}"
output="${build.dir}/locations_noports.txt">
<arg value="${build.dir}/extract_locs_noports.pl"/>
</exec>
</target>
<!--
Utility to fold overlong embedded certificates.
-->
<target name="fold.embedded.certs">
<echo>Folding embedded certificates</echo>
<for param="file">
<path>
<fileset dir="${entities.dir}" includes="uk*.xml"/>
</path>
<sequential>
<exec executable="perl" dir="${entities.dir}">
<arg value="-i"/>
<arg value="${build.dir}/fold_cert.pl"/>
<arg value="@{file}"/>
</exec>
</sequential>
</for>
</target>
<!--
Utility to remove the old Eduserv gateway certificate.
-->
<!--
<target name="remove.old.eduserv.cert">
<echo>Removing old Eduserv gateway certificate</echo>
<for param="file">
<path>
<fileset dir="${entities.dir}" includes="uk*.xml"/>
</path>
<sequential>
<exec executable="perl" dir="${entities.dir}">
<arg value="-i"/>
<arg value="${build.dir}/remove_old_eduserv_cert.pl"/>
<arg value="@{file}"/>
</exec>
</sequential>
</for>
</target>
-->
<!--
Utility to add the second Eduserv gateway certificate.
-->
<!--
<target name="add.second.eduserv.cert">
<echo>Adding second Eduserv gateway certificate</echo>
<for param="file">
<path>
<fileset dir="${entities.dir}" includes="uk*.xml"/>
</path>
<sequential>
<exec executable="perl" dir="${entities.dir}">
<arg value="-i"/>
<arg value="${build.dir}/add_second_eduserv_cert.pl"/>
<arg value="@{file}"/>
</exec>
</sequential>
</for>
</target>
-->
<!--
Utility to apply a one-off change to every fragment file.
The perl script is applied in "-i" mode to perform in-place
editing; this only works well on Unix-like systems.
Comment this out when not in use to avoid accidents.
-->
<!--
<target name="fix.fragments">
<for param="file">
<path>
<fileset dir="${entities.dir}" includes="uk*.xml"/>
</path>
<sequential>
<echo>processing @{file}</echo>
<exec executable="perl" dir="${entities.dir}">
<arg value="-i"/>
<arg value="${build.dir}/fix_fragment.pl"/>
<arg value="@{file}"/>
</exec>
</sequential>
</for>
</target>
-->
<!--
Extract embedded certificates
-->
<target name="extract.embedded" depends="flow.uk.collect">
<echo>Extracting embedded certificates</echo>
<exec executable="xsltproc" failonerror="true">
<arg value="--output"/>
<arg value="${temp.dir}/embedded.pem"/>
<arg value="${build.dir}/extract_embedded.xsl"/>
<arg value="${uk.collected}"/>
</exec>
</target>
<!--
Check embedded certificates.
-->
<target name="check.embedded" depends="extract.embedded">
<echo>Checking embedded certificates</echo>
<exec executable="perl" dir="${utilities.dir}"
input="${temp.dir}/embedded.pem">
<arg value="${utilities.dir}/check_embedded.pl"/>
<arg value="${entities.dir}/expiry_whitelist.txt"/>
</exec>
<delete file="${temp.dir}/embedded.pem" quiet="true" verbose="false"/>
</target>
<!--
Check embedded certificates in our production aggregate.
You can ignore almost all of the output from this, other than
the summary information at the end and in particular the
number of distinct RSA moduli.
-->
<target name="check.embedded.all">
<echo>Extracting embedded certificates</echo>
<exec executable="xsltproc" failonerror="true">
<arg value="--output"/>
<arg value="${temp.dir}/embedded.pem"/>
<arg value="${build.dir}/extract_embedded.xsl"/>
<arg value="${aggregates.dir}/${mdaggr.prod.signed}"/>
</exec>
<echo>Checking embedded certificates</echo>
<echo>Note: ignore expiry on eduGAIN entities</echo>
<exec executable="perl" dir="${utilities.dir}"
input="${temp.dir}/embedded.pem">
<arg value="${utilities.dir}/check_embedded.pl"/>
<arg value="${entities.dir}/expiry_whitelist.txt"/>
</exec>
<delete file="${temp.dir}/embedded.pem" quiet="true" verbose="false"/>
</target>
<!--
Check for IdPs using the single-port configuration.
-->
<target name="check.ports">
<echo>Checking vhost use</echo>
<CHANNEL.do verb="checkPorts" channel="uk"/>
<echo>Checked.</echo>
</target>
<!--
check.uk.future
Run a set of possible future rulesets against the existing collection
of UK-federation registered metadata.
-->
<target name="check.uk.future">
<echo>Checking against future rulesets.</echo>
<CHANNEL.do verb="checkFuture" channel="uk"/>
<echo>Check complete.</echo>
</target>
<!--
compare.members
Runs the compare-members.sh script, comparing Salesforce and UKf member data
-->
<target name="compare.members" depends="extract.members.to.csv">
<echo>Running compare.members</echo>
<exec executable="bash" failonerror="true">
<arg value="${utilities.dir}/compare-members.sh"/>
<arg value="${temp.dir}/ukf-members.csv"/>
</exec>
<delete file="${temp.dir}/ukf-members.csv" quiet="true" verbose="false"/>
</target>
<!--
extract.members.to.csv
Extract the ID and Name elements in UKf members to a CSV file for compare-members.sh to use
-->
<target name="extract.members.to.csv">
<echo>Running extract.members.to.csv</echo>
<exec executable="xsltproc" failonerror="true">
<arg value="--output"/>
<arg value="${temp.dir}/ukf-members.csv"/>
<arg value="${utilities.dir}/members-to-csv.xsl"/>
<arg value="../ukf-data/members/members.xml"/>
</exec>
</target>
<!--
generate.html.orgnamescope
Generates a HTML table of IdPs and their scopes
bodge-eacute.pl is an awful bodge to fix encodings
-->
<target name="generate.html.orgnamescope">
<echo>Running generate.html.orgnamescope XSLT</echo>
<exec executable="xsltproc" failonerror="true">
<arg value="--output" />
<arg value="${build.dir}/orgnamescope.html" />
<arg value="${utilities.dir}/orgnamescope.xsl" />
<arg value="${aggregates.dir}/ukfederation-metadata.xml" />
</exec>
<exec executable="perl" failonerror="true">
<arg value="${utilities.dir}/bodge-eacute.pl" />
<arg value="${build.dir}/orgnamescope.html" />
</exec>
</target>
<!--
generate.html.members
Generates a HTML table of member names and comments
bodge-eacute.pl is an awful bodge to fix encodings
-->
<target name="generate.html.members">
<echo>Running generate.html.members XSLT</echo>
<exec executable="xsltproc" failonerror="true">
<arg value="--output" />
<arg value="${build.dir}/dml.html" />
<arg value="${utilities.dir}/memberlist.xsl" />
<arg value="${members.dir}/members.xml" />
</exec>
<exec executable="perl" failonerror="true">
<arg value="${utilities.dir}/bodge-eacute.pl" />
<arg value="${build.dir}/dml.html" />
</exec>
</target>
<!--
generate.html.randssps
Generates a HTML table of SPs which assert the R&S entity category
-->
<target name="generate.html.randssps">
<echo>Running generate.html.randssps XSLT</echo>
<exec executable="xsltproc" failonerror="true">
<arg value="--output" />
<arg value="${build.dir}/randssps.html" />
<arg value="${utilities.dir}/list_rands_sps.xsl" />
<arg value="${aggregates.dir}/ukfederation-metadata.xml" />
</exec>
</target>
<!--
generate.html.uai
Generates a HTML table of IdPs asserting User Accountability
-->
<target name="generate.html.uai">
<echo>Running generate.html.uai XSLT</echo>
<exec executable="xsltproc" failonerror="true">
<arg value="--output" />
<arg value="${build.dir}/uai.html" />
<arg value="${utilities.dir}/ua-idp.xsl" />
<arg value="${aggregates.dir}/ukfederation-metadata.xml" />
</exec>
</target>
<!--
echoproperties
List all the properties ant is using.
-->
<target name="echoproperties">
<echo>All properties:</echo>
<echoproperties/>
<echo>MDA properties:</echo>
<echoproperties>
<propertyset>
<propertyset refid="mda.properties"/>
</propertyset>
</echoproperties>
</target>
<!--
*********************************************
*** ***
*** I N C O M M O N S P E C I F I C ***
*** ***
*********************************************
-->
<!--
Location of most InCommon-specific files.
-->
<property name="mda.incdir" location="mdx/incommon"/>
<!--
Default local file location from which to read the
(not necessarily signed) production aggregate.
This can be overridden in build.properties.
In a development environment, use the inc.fetch.production
target to read a copy of the current real production
aggregate into this location for testing.
-->
<property name="mda.inc.production.xml" value="${mda.incdir}/production.xml"/>
<!--
Default local file location to which to write the export aggregate.
This can be overridden in build.properties.
-->
<property name="mda.inc.exported.xml" value="${mda.incdir}/exported.xml"/>
<!--
Default local file location to which to write the import aggregate.
This can be overridden in build.properties.
-->
<property name="mda.inc.imported.xml" value="${mda.incdir}/imported.xml"/>
<!--
Default local file location to which to write the IdP-only aggregate.
This can be overridden in build.properties.
-->
<property name="mda.inc.imported-idp.xml" value="${mda.incdir}/imported-idp.xml"/>
<!--
Default local file location to which to write the eduGAIN aggregate.
This can be overridden in build.properties.
-->
<property name="mda.inc.edugain.xml" value="${mda.incdir}/edugain.xml"/>
<!--
Default local file location from which to acquire the InCommon
entity blacklist.
-->
<property name="mda.inc.entity.blacklist"
value="classpath:incommon/incommon-blacklist.xml"/>
<!--
inc.fetch.production
This target is for use in a development environment. It reads the
current signed InCommon production aggregate into the local file location
specified by mda.inc.production.xml so that it can be used to test the
inc.generate.export target.
-->
<target name="inc.fetch.production">
<echo>Fetching InCommon production aggregate to ${mda.inc.production.xml}...</echo>
<get src="http://md.incommon.org/InCommon/InCommon-metadata.xml"
dest="${mda.inc.production.xml}"/>
<echo>Fetch complete.</echo>
</target>
<!--
inc.generate.export
Generate the InCommon export aggregate.
-->
<target name="inc.generate.export">
<echo>Generating InCommon export aggregate in ${mda.inc.exported.xml}</echo>
<echo> from production aggregate in ${mda.inc.production.xml}...</echo>
<CHANNEL.do channel="incommon" verb="export"/>
<echo>Generation complete.</echo>
</target>
<!--
inc.edugain.download
Downloads the eduGAIN aggregate and writes it into a file if successful.
-->
<target name="inc.edugain.download">
<echo>Downloading eduGAIN aggregate to ${mda.inc.edugain.xml}...</echo>
<CHANNEL.do channel="incommon" verb="download-edugain"/>
<echo>Download to ${mda.inc.edugain.xml} completed.</echo>
</target>
<!--
inc.generate.import
Generate the InCommon import aggregate.
-->
<target name="inc.generate.import">
<echo>Generating InCommon import aggregate in ${mda.inc.imported.xml}</echo>
<echo> (IdP-only aggregate in ${mda.inc.imported-idp.xml})</echo>
<echo> from production aggregate in ${mda.inc.production.xml}</echo>
<echo> and selected eduGAIN entities from ${mda.inc.edugain.xml}...</echo>
<CHANNEL.do channel="incommon" verb="import"/>
<echo>Generation complete.</echo>
</target>
<!--
inc.generate.import_sign
Generate the InCommon import aggregate signed using AWS CloudHSM
-->
<target name="inc.generate.import_sign">
<property name="mda.sign.keyHandle" value="${sign.keyHandle}"/>
<property name="mda.sign.keyPassword" value="${sign.keyPassword}"/>
<property name="mda.sign.keyUser" value="${sign.keyUser}"/>
<property name="mda.classpath.extra" value="inc-mda-cloudhsm"/>
<property name="mda.jni.path" value="/opt/cloudhsm/lib"/>
<echo>Generating InCommon signed import aggregate in ${mda.inc.imported.xml}</echo>
<echo> (IdP-only aggregate in ${mda.inc.imported-idp.xml})</echo>
<echo> from production aggregate in ${mda.inc.production.xml}</echo>
<echo> and selected eduGAIN entities from ${mda.inc.edugain.xml}</echo>
<echo> signed using AWS CloudHSM</echo>
<CHANNEL.do channel="incommon" verb="import_sign"/>
<echo>Generation complete.</echo>
</target>
<!--
inc.generate.sign
Sign an aggregate using CloudHSM
-->
<target name="inc.generate.sign">
<property name="mda.sign.keyHandle" value="${sign.keyHandle}"/>
<property name="mda.sign.keyPassword" value="${sign.keyPassword}"/>
<property name="mda.sign.keyUser" value="${sign.keyUser}"/>
<property name="mda.classpath.extra" value="inc-mda-cloudhsm"/>
<property name="mda.jni.path" value="/opt/cloudhsm/lib"/>
<echo>Generating signed aggregate in ${mda.inc.imported.xml}</echo>
<echo> from aggregate in ${mda.inc.production.xml}</echo>
<echo> signed using AWS CloudHSM</echo>
<CHANNEL.do channel="incommon" verb="sign"/>
<echo>Generation complete.</echo>
</target>
<!--
inc.mdq.generate.localkey
Generate per-entity metadata for the InCommon federation
using the localkey configuration.
Properties:
Set sign.uk.keyPassword (note the slightly anomalous "uk" in that
identifier) to set the password to access the key resource. If not set,
this will be prompted for.
-->
<target name="inc.mdq.generate.localkey" depends="get.sign.uk.keyPassword">
<property name="mda.mdq.input" value="${mda.inc.imported.xml}"/>
<property name="mda.sign.keyPassword" value="${sign.uk.keyPassword}"/>
<echo>Generating per-entity metadata in ${mda.mdq.output}</echo>
<echo> from unsigned aggregate in ${mda.mdq.input}</echo>
<delete dir="${mdq.output.dir}" quiet="true"/>
<mkdir dir="${mdq.output.dir}"/>
<CHANNEL.do channel="incommon" verb="mdq-multisign-localkey"/>
<echo>Generation complete.</echo>
</target>
<!--
inc.mdq.generate.all.localkey
Generate all metadata required for the InCommon federation MDQ service
using the local key configuration.
Properties:
Set sign.uk.keyPassword (note the slightly anomalous "uk" in that
identifier) to set the password to access the key resource. If not set,
this will be prompted for.
-->
<target name="inc.mdq.generate.all.localkey" depends="get.sign.uk.keyPassword">
<property name="mda.mdq.input" value="${mda.inc.imported.xml}"/>
<property name="mda.sign.keyPassword" value="${sign.keyPassword}"/>
<echo>Generating MDQ metadata in ${mda.mdq.output}</echo>
<echo> from unsigned aggregate in ${mda.mdq.input}</echo>
<delete dir="${mdq.output.dir}" quiet="true"/>
<mkdir dir="${mdq.output.dir}"/>
<mkdir dir="${mdq.output.dir}/idps"/>
<CHANNEL.do channel="incommon" verb="mdq-all-localkey"/>
<echo>Generation complete.</echo>
</target>
<!--
inc.generate.mdq
Generate per-entity metadata for the InCommon federation.
-->
<target name="inc.generate.mdq" depends="inc.mdq.generate.localkey">
<!-- temporary alias for inc.mdq.generate.localkey -->
</target>
<!--
inc.mdq.generate.cloudhsm
Generate per-entity metadata for the InCommon federation
using AWS CloudHSM.
Properties:
Set sign.keyHandle to indicate the key to be used for signing.
Set sign.keyUser and sign.keyPassword with credentials for the HSM user
accessing the key.
-->
<target name="inc.mdq.generate.cloudhsm">
<property name="mda.mdq.input" value="${mda.inc.imported.xml}"/>
<property name="mda.sign.keyHandle" value="${sign.keyHandle}"/>
<property name="mda.sign.keyUser" value="${sign.keyUser}"/>
<property name="mda.sign.keyPassword" value="${sign.keyPassword}"/>
<property name="mda.classpath.extra" value="inc-mda-cloudhsm"/>
<property name="mda.jni.path" value="/opt/cloudhsm/lib"/>
<echo>Generating per-entity metadata in ${mda.mdq.output}</echo>
<echo> from unsigned aggregate in ${mda.mdq.input}</echo>
<delete dir="${mdq.output.dir}" quiet="true"/>
<mkdir dir="${mdq.output.dir}"/>
<CHANNEL.do channel="incommon" verb="mdq-multisign-cloudhsm"/>
<echo>Generation complete.</echo>
</target>
<!--
inc.mdq.generate.all.cloudhsm
Generate all metadata required for the InCommon federation MDQ service
using the AWS CloudHSM configuration.
Properties:
Set sign.keyUser and sign.keyPassword with credentials for the HSM user
accessing the key.
-->
<target name="inc.mdq.generate.all.cloudhsm">
<property name="mda.classpath.extra" value="inc-mda-cloudhsm"/>
<property name="mda.jni.path" value="/opt/cloudhsm/lib"/>
<property name="mda.mdq.input" value="${mda.inc.imported.xml}"/>
<property name="mda.sign.keyHandle" value="${sign.keyHandle}"/>
<property name="mda.sign.keyPassword" value="${sign.keyPassword}"/>
<property name="mda.sign.keyUser" value="${sign.keyUser}"/>
<echo>Generating MDQ metadata in ${mda.mdq.output}</echo>
<echo> from unsigned aggregate in ${mda.mdq.input}</echo>
<delete dir="${mdq.output.dir}" quiet="true"/>
<mkdir dir="${mdq.output.dir}"/>
<mkdir dir="${mdq.output.dir}/idps"/>
<CHANNEL.do channel="incommon" verb="mdq-all-cloudhsm"/>
<echo>Generation complete.</echo>
</target>
<!--
inc.edugain.report
Report on the eduGAIN entities filtered out because of errors detected.
-->
<target name="inc.edugain.report">
<echo>Looking for errors in eduGAIN entities from ${mda.inc.edugain.xml}...</echo>
<CHANNEL.do channel="incommon" verb="report"/>
<echo>Report complete.</echo>
</target>
</project>