Alright so after some hackery I've managed to do this.
Firstly, the service itself (in this case it's a Spring Boot project) gets a cdk
directory in it's root. This basically just sets up the CI part of the CI/CD pipeline:
const appName: string = this.node.tryGetContext('app-name');
const ecrRepo = new ecr.Repository(this, `${appName}Repository`, {
repositoryName: appName,
imageScanOnPush: true,
removalPolicy: cdk.RemovalPolicy.DESTROY,
});
const bbSource = codebuild.Source.bitBucket({
// BitBucket account
owner: 'mycompany',
// Name of the repository this project belongs to
repo: 'reponame',
// Enable webhook
webhook: true,
// Configure so webhook only fires when the master branch has an update to any code other than this CDK project (e.g. Spring source only)
webhookFilters: [codebuild.FilterGroup.inEventOf(codebuild.EventAction.PUSH).andBranchIs('master').andFilePathIsNot('./cdk/*')],
});
const buildSpec = {
version: '0.2',
phases: {
pre_build: {
// Get the git commit hash that triggered this build
commands: ['env', 'export TAG=${CODEBUILD_RESOLVED_SOURCE_VERSION}'],
},
build: {
commands: [
// Build Java project
'./mvnw clean install -Dskiptests',
// Log in to ECR repository that contains the Corretto image
'aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin 489478819445.dkr.ecr.us-west-2.amazonaws.com',
// Build docker images and tag them with the commit hash as well as 'latest'
'docker build -t $ECR_REPO_URI:$TAG -t $ECR_REPO_URI:latest .',
// Log in to our own ECR repository to push
'$(aws ecr get-login --no-include-email)',
// Push docker images to ECR repository defined above
'docker push $ECR_REPO_URI:$TAG',
'docker push $ECR_REPO_URI:latest',
],
},
post_build: {
commands: [
// Prepare the image definitions artifact file
'printf \'[{"name":"servicename","imageUri":"%s"}]\' $ECR_REPO_URI:$TAG > imagedefinitions.json',
'pwd; ls -al; cat imagedefinitions.json',
],
},
},
// Define the image definitions artifact - is required for deployments by other CDK projects
artifacts: {
files: ['imagedefinitions.json'],
},
};
const buildProject = new codebuild.Project(this, `${appName}BuildProject`, {
projectName: appName,
source: bbSource,
environment: {
buildImage: codebuild.LinuxBuildImage.AMAZON_LINUX_2_3,
privileged: true,
environmentVariables: {
// Required for tagging/pushing image
ECR_REPO_URI: { value: ecrRepo.repositoryUri },
},
},
buildSpec: codebuild.BuildSpec.fromObject(buildSpec),
});
!!buildProject.role &&
buildProject.role.addToPrincipalPolicy(
new iam.PolicyStatement({
effect: iam.Effect.ALLOW,
actions: ['ecr:*'],
resources: ['*'],
}),
);
Once this is set up, the CodeBuild project has to be built manually once so the ECR repo has a valid 'latest' image (otherwise the ECS service won't get created correctly).
Now in the separate infrastructure codebase you can create the ECS cluster and service as normal, getting the ECR repository from a lookup:
const repo = ecr.Repository.fromRepositoryName(this, 'SomeRepository', 'reponame'); // reponame here has to match what you defined in the bbSource previously
const cluster = new ecs.Cluster(this, `Cluster`, { vpc });
const service = new ecs_patterns.ApplicationLoadBalancedFargateService(this, 'Service', {
cluster,
serviceName: 'servicename',
taskImageOptions: {
image: ecs.ContainerImage.fromEcrRepository(repo, 'latest'),
containerName: repo.repositoryName,
containerPort: 8080,
},
});
Finally create a deployment construct which listens to ECR events, manually converts the generated imageDetail.json file into a valid imagedefinitions.json file, then deploys to the existing service.
const sourceOutput = new cp.Artifact();
const ecrAction = new cpa.EcrSourceAction({
actionName: 'ECR-action',
output: sourceOutput,
repository: repo, // this is the same repo from where the service was originally defined
});
const buildProject = new codebuild.Project(this, 'BuildProject', {
environment: {
buildImage: codebuild.LinuxBuildImage.AMAZON_LINUX_2_3,
privileged: true,
},
buildSpec: codebuild.BuildSpec.fromObject({
version: '0.2',
phases: {
build: {
commands: [
'cat imageDetail.json | jq "[. | {name: .RepositoryName, imageUri: .ImageURI}]" > imagedefinitions.json',
'cat imagedefinitions.json',
],
},
},
artifacts: {
files: ['imagedefinitions.json'],
},
}),
});
const convertOutput = new cp.Artifact();
const convertAction = new cpa.CodeBuildAction({
actionName: 'Convert-Action',
input: sourceOutput,
outputs: [convertOutput],
project: buildProject,
});
const deployAction = new cpa.EcsDeployAction({
actionName: 'Deploy-Action',
service: service.service,
input: convertOutput,
});
new cp.Pipeline(this, 'Pipeline', {
stages: [
{ stageName: 'Source', actions: [ecrAction] },
{ stageName: 'Convert', actions: [convertAction] },
{ stageName: 'Deploy', actions: [deployAction] },
],
});
Obviously this isn't as clean as it otherwise could be once CloudFormation supports this fully, but it works pretty well.