diff --git a/tests/ci/build_run_benchmarks.sh b/tests/ci/build_run_benchmarks.sh deleted file mode 100755 index c005f605e0..0000000000 --- a/tests/ci/build_run_benchmarks.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env bash -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 OR ISC - -set -x - -# set default value of directory name -if [ -z "${PR_FOLDER_NAME}" ]; then export PR_FOLDER_NAME=aws-lc; fi - -# Get AWS_ACCOUNT_ID -if [ -z "${AWS_ACCOUNT_ID}" ]; then AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text); fi - -AWSLC_PR_ROOT=$(pwd) - -cd .. - -# run this from the bm_framework root directory! -AWSLC_PR_ROOT=$(pwd)/"${PR_FOLDER_NAME}" -AWSLC_PROD_ROOT=$(pwd)/aws-lc-prod - -source ${AWSLC_PR_ROOT}/tests/ci/common_posix_setup.sh - -# clone the various repositories we need (we already have aws-lc-pr since we need it to run this script) -git clone https://github.com/aws/aws-lc.git aws-lc-prod - -# build AWSLC pr -mkdir -p "${PR_FOLDER_NAME}"/build -${CMAKE_COMMAND} -B"${PR_FOLDER_NAME}"/build -H"${PR_FOLDER_NAME}" -GNinja -DCMAKE_BUILD_TYPE=Release \ - -DBUILD_TESTING=OFF -ninja -C "${PR_FOLDER_NAME}"/build - -# build FIPS compliant version of AWSLC pr -mkdir -p "${PR_FOLDER_NAME}"/fips_build -${CMAKE_COMMAND} -B"${PR_FOLDER_NAME}"/fips_build -H"${PR_FOLDER_NAME}" -GNinja -DFIPS=1 -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=TRUE -ninja -C "${PR_FOLDER_NAME}"/fips_build - -# build AWSLC prod -mkdir -p aws-lc-prod/build -${CMAKE_COMMAND} -Baws-lc-prod/build -Haws-lc-prod -GNinja -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTING=OFF -ninja -C aws-lc-prod/build - -#build FIPS compliant version of AWSLC prod -mkdir -p aws-lc-prod/fips_build -${CMAKE_COMMAND} -Baws-lc-prod/fips_build -Haws-lc-prod -GNinja -DFIPS=1 -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=TRUE -ninja -C aws-lc-prod/fips_build - -./"${PR_FOLDER_NAME}"/build/tool/bssl speed -timeout 1 -json > aws-lc-pr_bm.json -./"${PR_FOLDER_NAME}"/fips_build/tool/bssl speed -timeout 1 -json > aws-lc-pr_fips_bm.json - -./aws-lc-prod/build/tool/bssl speed -timeout 1 -json > aws-lc-prod_bm.json -./aws-lc-prod/fips_build/tool/bssl speed -timeout 1 -json > aws-lc-prod_fips_bm.json - - -./"${PR_FOLDER_NAME}"/build/tool/bssl speed -filter trusttoken -timeout 1 -json > aws-lc-pr_tt_bm.json -./"${PR_FOLDER_NAME}"/fips_build/tool/bssl speed -filter trusttoken -timeout 1 -json > aws-lc-pr_tt_fips_bm.json -./aws-lc-prod/build/tool/bssl speed -filter trusttoken -timeout 1 -json > aws-lc-prod_tt_bm.json -./aws-lc-prod/fips_build/tool/bssl speed -filter trusttoken -timeout 1 -json > aws-lc-prod_tt_fips_bm.json - -# convert results from .json to .csv -python3 "${PR_FOLDER_NAME}"/tests/ci/benchmark_framework/convert_json_to_csv.py aws-lc-pr_bm.json -python3 "${PR_FOLDER_NAME}"/tests/ci/benchmark_framework/convert_json_to_csv.py aws-lc-pr_fips_bm.json -python3 "${PR_FOLDER_NAME}"/tests/ci/benchmark_framework/convert_json_to_csv.py aws-lc-prod_bm.json -python3 "${PR_FOLDER_NAME}"/tests/ci/benchmark_framework/convert_json_to_csv.py aws-lc-prod_fips_bm.json - -python3 "${PR_FOLDER_NAME}"/tests/ci/benchmark_framework/convert_json_to_csv.py aws-lc-pr_tt_bm.json -python3 "${PR_FOLDER_NAME}"/tests/ci/benchmark_framework/convert_json_to_csv.py aws-lc-pr_tt_fips_bm.json -python3 "${PR_FOLDER_NAME}"/tests/ci/benchmark_framework/convert_json_to_csv.py aws-lc-prod_tt_bm.json -python3 "${PR_FOLDER_NAME}"/tests/ci/benchmark_framework/convert_json_to_csv.py aws-lc-prod_tt_fips_bm.json - -# once we have csvs, we want to update the main benchmark results files with the sequential trusttoken results -# files will be updated in place -python3 "${PR_FOLDER_NAME}"/tests/ci/benchmark_framework/update_results.py aws-lc-pr_bm.csv aws-lc-pr_tt_bm.csv python3 "${PR_FOLDER_NAME}"/tests/ci/benchmark_framework/update_results.py aws-lc-pr_fips_bm.csv aws-lc-pr_tt_fips_bm.csv python3 "${PR_FOLDER_NAME}"/tests/ci/benchmark_framework/update_results.py aws-lc-prod_bm.csv aws-lc-prod_tt_bm.csv python3 "${PR_FOLDER_NAME}"/tests/ci/benchmark_framework/update_results.py aws-lc-prod_fips_bm.csv aws-lc-prod_tt_fips_bm.csv - -# check for regressions! -python3 "${PR_FOLDER_NAME}"/tests/ci/benchmark_framework/compare_results.py aws-lc-prod_bm.csv aws-lc-pr_bm.csv prod_vs_pr.csv -prod_vs_pr_code="$?" -python3 "${PR_FOLDER_NAME}"/tests/ci/benchmark_framework/compare_results.py aws-lc-prod_fips_bm.csv aws-lc-pr_fips_bm.csv prod_vs_pr_fips.csv -prod_vs_pr_fips_code="$?" - -# upload results to s3 -aws s3 cp aws-lc-pr_bm.csv s3://"${AWS_ACCOUNT_ID}-aws-lc-ci-bm-framework-pr-bucket/${CODEBUILD_SOURCE_VERSION}/aws-lc-pr_bm.csv" -aws s3 cp aws-lc-pr_fips_bm.csv s3://"${AWS_ACCOUNT_ID}-aws-lc-ci-bm-framework-pr-bucket/${CODEBUILD_SOURCE_VERSION}/aws-lc-pr_fips_bm.csv" -aws s3 cp aws-lc-prod_bm.csv s3://"${AWS_ACCOUNT_ID}-aws-lc-ci-bm-framework-prod-bucket/${CODEBUILD_SOURCE_VERSION}/aws-lc-prod_bm.csv" -aws s3 cp aws-lc-prod_fips_bm.csv s3://"${AWS_ACCOUNT_ID}-aws-lc-ci-bm-framework-prod-bucket/${CODEBUILD_SOURCE_VERSION}/aws-lc-prod_fips_bm.csv" - -# upload results to lastest folders in s3 -aws s3 mv aws-lc-pr_bm.csv s3://"${AWS_ACCOUNT_ID}-aws-lc-ci-bm-framework-pr-bucket/latest-${CODEBUILD_WEBHOOK_TRIGGER}/aws-lc-pr_bm.csv" -aws s3 mv aws-lc-pr_fips_bm.csv s3://"${AWS_ACCOUNT_ID}-aws-lc-ci-bm-framework-pr-bucket/latest-${CODEBUILD_WEBHOOK_TRIGGER}/aws-lc-pr_fips_bm.csv" -aws s3 mv aws-lc-prod_bm.csv s3://"${AWS_ACCOUNT_ID}-aws-lc-ci-bm-framework-prod-bucket/latest/aws-lc-prod_bm.csv" -aws s3 mv aws-lc-prod_fips_bm.csv s3://"${AWS_ACCOUNT_ID}-aws-lc-ci-bm-framework-prod-bucket/latest/aws-lc-prod_fips_bm.csv" - -# if any of the results gave an exit code of 5, there's a performance regression -# we only want to actually fail the vote if we've detected a regression in the pr version of aws-lc and tip of main of aws-lc (for fips and non-fips) -exit_fail=false -if [ "${prod_vs_pr_code}" != 0 ]; then - aws s3 cp prod_vs_pr.csv s3://"${AWS_ACCOUNT_ID}-aws-lc-ci-bm-framework-pr-bucket/${CODEBUILD_SOURCE_VERSION}/prod_vs_pr.csv" - aws s3 mv prod_vs_pr.csv s3://"${AWS_ACCOUNT_ID}-aws-lc-ci-bm-framework-pr-bucket/latest-${CODEBUILD_WEBHOOK_TRIGGER}/prod_vs_pr.csv" - exit_fail=true -fi -if [ "${prod_vs_pr_fips_code}" != 0 ]; then - aws s3 cp prod_vs_pr_fips.csv s3://"${AWS_ACCOUNT_ID}-aws-lc-ci-bm-framework-pr-bucket/${CODEBUILD_SOURCE_VERSION}/prod_vs_pr_fips.csv" - aws s3 mv prod_vs_pr_fips.csv s3://"${AWS_ACCOUNT_ID}-aws-lc-ci-bm-framework-pr-bucket/latest-${CODEBUILD_WEBHOOK_TRIGGER}/prod_vs_pr_fips.csv" - exit_fail=true -fi - -if [ "${exit_fail}" = true ]; then - exit 1 -fi diff --git a/tests/ci/cdk/README.md b/tests/ci/cdk/README.md index c03b920a9d..57d57afbe9 100644 --- a/tests/ci/cdk/README.md +++ b/tests/ci/cdk/README.md @@ -2,7 +2,26 @@ AWS-LC CI uses AWS CDK to define and deploy AWS resources (e.g. AWS CodeBuild, ECR). -## CI Setup +## Table of Contents +- [CDK Setup](#cdk-setup) + - [Before running CDK command](#before-running-cdk-command) + - [Minimal permissions](#minimal-permissions) + - [Pipeline Commands](#pipeline-commands) + - [CI Commands](#ci-commands) +- [AWS-LC Benchmarking Framework](#aws-lc-benchmarking-framework) + - [Framework Setup](#framework-setup) + - [How to Use](#how-to-use) + - [Start from Pull Request](#start-from-pull-request) + - [Start Locally](#start-locally) + - [Examine Output](#examine-output) +- [Files](#files) +- [Development Reference](#development-reference) + - [Useful commands](#useful-commands) + - [Useful Docker image build commands](#useful-docker-image-build-commands) + - [Linux Docker image build](#linux-docker-image-build) + - [Windows Docker image build (DEPRECATED)](#windows-docker-image-build-deprecated) + +## CDK Setup ### Before running CDK command: @@ -17,6 +36,7 @@ AWS-LC CI uses AWS CDK to define and deploy AWS resources (e.g. AWS CodeBuild, E * step 4: click **Connect using OAuth** and **Connect to GitHub**. * step 5: follow the OAuth app to grant access. * Setup Python environment: + * From `aws-lc/tests/ci` run: ```shell python -m pip install -r requirements.txt @@ -63,43 +83,92 @@ To setup or update the CI in your account you will need the following IAM permis * secretsmanager:DeleteSecret * secretsmanager:GetSecretValue -### Commands - -These commands are run from `aws-lc/tests/ci/cdk`. \ -If not done previously, bootstrap cdk before running the commands below: -```shell -cdk bootstrap aws://${AWS_ACCOUNT_ID}/us-west-2 -``` - -You may also need to request an increase to certain account quotas: -```shell -open https://${CDK_DEPLOY_REGION}.console.aws.amazon.com/servicequotas/home/services/ec2/quotas -``` -* **EC2-VPC Elastic IPs** = 20 - -Note: `GITHUB_REPO_OWNER` specifies the GitHub repo targeted by this CI setup. -* https://github.com/${GITHUB_REPO_OWNER}/aws-lc.git - -To set up AWS-LC CI, run command: -``` -./run-cdk.sh --github-repo-owner ${GITHUB_REPO_OWNER} --action deploy-ci --aws-account ${AWS_ACCOUNT_ID} -``` - -To update AWS-LC CI, run command: -``` -./run-cdk.sh --github-repo-owner ${GITHUB_REPO_OWNER} --action update-ci --aws-account ${AWS_ACCOUNT_ID} -``` - -To create/update Linux Docker images, run command: -``` -./run-cdk.sh --github-repo-owner ${GITHUB_REPO_OWNER} --action build-linux-img --aws-account ${AWS_ACCOUNT_ID} -``` - -To destroy AWS-LC CI resources created above, run command: -``` -# NOTE: this command will destroy all resources (AWS CodeBuild and ECR). -./run-cdk.sh --github-repo-owner ${GITHUB_REPO_OWNER} --action destroy-ci --aws-account ${AWS_ACCOUNT_ID} -``` +### Pipeline Commands +Use the following commands to deploy the CI pipeline. Any changes to the CI or Docker images will be updated automatically after the pipeline is deployed. + +1. Ensure you are in `aws-lc/tests/ci/cdk` +2. Export the relevant environment variables: + - `PIPELINE_ACCOUNT_ID` (required): the AWS account to host your pipeline + - `DEPLOY_ACCOUNT_ID` (optional) : the AWS account to deploy Docker images and CodeBuild CI tests to, used for dev pipelines only (can be the same as `PIPELINE_ACCOUNT_ID`) + - `GITHUB_REPO_OWNER` (optional): GitHub repo targeted by the pipeline (i.e, your personal Github account) + - `GITHUB_SOURCE_VERSION` (optional): Git branch holding the latest pipeline code (default: main) + +3. [SKIP IF NO CROSS-ACCOUNT DEPLOYMENT] Give the pipeline account administrator access to the deployment account's CloudFormation. Repeat this step depending on how many deployment environment there are. You only need to run this step once when the pipeline is deploying to a new account for the first time. + ```shell + cdk bootstrap aws://${DEPLOY_ACCOUNT_ID}/us-west-2 --trust ${PIPELINE_ACCOUNT_ID} --trust-for-lookup ${PIPELINE_ACCOUNT_ID} --cloudformation-execution-policies arn:aws:iam::aws:policy/AdministratorAccess + ``` +4. If not done previously, bootstrap cdk for the pipeline account before running the next commands. + ```shell + cdk bootstrap aws://${PIPELINE_ACCOUNT_ID}/us-west-2 + ``` +5. (Optional) You may also need to request an increase to certain account quotas: + ```shell + open https://${DEPLOY_REGION}.console.aws.amazon.com/servicequotas/home/services/ec2/quotas + ``` + Set EC2-VPC Elastic IPs = 20 (default is only 5) + + +6. Choose 1 of the following options to deploy the pipeline: + - To deploy dev pipeline to the same account as your CI + ```shell + ./run-cdk.sh --github-repo-owner ${GITHUB_REPO_OWNER} --github-source-version ${GITHUB_SOURCE_VERSION} --deploy-account ${PIPELINE_ACCOUNT_ID} --action deploy-dev-pipeline + ``` + - To deploy dev pipeline but pipeline is hosted in a separate account: + ```shell + ./run-cdk.sh --github-repo-owner ${GITHUB_REPO_OWNER} --github-source-version ${GITHUB_SOURCE_VERSION} --pipeline-account ${PIPELINE_ACCOUNT_ID} --deploy-account ${DEPLOY_ACCOUNT_ID} --action deploy-dev-pipeline + ``` + - To deploy production pipeline using default parameters: + ```shell + ./run-cdk.sh --action deploy-production-pipeline + ``` + +**Note**: If this is your first time deploying the pipeline and it's failing on the Source stage, this is normal and expected, since you haven't given CodePipeline access to your repo. +To fix this: +1. Go to your Console > CodePipeline > Settings > Connections. You should see a pending connection named `AwsLcCiPipelineGitHubConnection`. Click on it. +2. Click on `Update Pending Connection.` +3. On the pop up, you would see an `App Installation - optional`. Click on `Install a new app` (or choose an existing app if you have one). This step is REQUIRED to allow CodePipeline to detect new events from your repo. +4. Click `Connect`. The connection status should become `Available` now + +### CI Commands +Use these commands if you wish to deploy individual stacks instead of the entire pipeline. + +1. Ensure you are in `aws-lc/tests/ci/cdk` +2. Export the relevant environment variables: + - `DEPLOY_ACCOUNT_ID` (required): AWS account you wish to deploy the CI stacks to + - `GITHUB_REPO_OWNER` (required): the GitHub repo targeted by this CI setup. + +2. If not done previously, bootstrap cdk before running the commands below. + ```shell + cdk bootstrap aws://${DEPLOY_ACCOUNT_ID}/us-west-2 + ``` + +3. (Optional) You may also need to request an increase to certain account quotas: + ```shell + open https://${DEPLOY_REGION}.console.aws.amazon.com/servicequotas/home/services/ec2/quotas + ``` + Set EC2-VPC Elastic IPs = 20 (default is only 5) + + +4. Choose 1 of the following command options: + - To set up AWS-LC CI, run command: + ```shell + ./run-cdk.sh --github-repo-owner ${GITHUB_REPO_OWNER} --action deploy-ci --deploy-account ${DEPLOY_ACCOUNT_ID} + ``` + + - To update AWS-LC CI, run command: + ```shell + ./run-cdk.sh --github-repo-owner ${GITHUB_REPO_OWNER} --action update-ci --deploy-account ${DEPLOY_ACCOUNT_ID} + ``` + - To create/update Linux Docker images, run command: + ```shell + ./run-cdk.sh --github-repo-owner ${GITHUB_REPO_OWNER} --action build-linux-img --deploy-account ${DEPLOY_ACCOUNT_ID} + ``` + + - To destroy AWS-LC CI resources created above, run command: + ```shell + ./run-cdk.sh --github-repo-owner ${GITHUB_REPO_OWNER} --action destroy-ci --deploy-account ${DEPLOY_ACCOUNT_ID} + ``` + NOTE: this command will destroy all resources (AWS CodeBuild and ECR). For help, run command: ``` @@ -155,6 +224,10 @@ Below is CI file structure. │   ├── __init__.py │   ├── ecr_stack.py │   ├── ... +├── pipeline +│   ├── __init__.py +│   ├── pipeline_stack.py +│   ├── ... ├── cdk.json ├── requirements.txt ├── run-cdk.sh @@ -167,7 +240,8 @@ Below is CI file structure. * `README.md` — The introductory README for this project. * `app.py` — The “main” for this sample application. * `cdk.json` — A configuration file for CDK that defines what executable CDK should run to generate the CDK construct tree. -* `cdk` — A CDK module directory +* `cdk` — A module directory that contains all CI-related stacks and utilities +* `pipeline` - A module directory that defines a continuous deployment pipeline for the CI. * `requirements.txt` — This file is used by pip to install all of the dependencies for your application. In this case, it contains only -e . This tells pip to install the requirements specified in setup.py. It also tells pip to run python setup.py develop to install the code in the cdk module so that it can be edited in place. * `setup.py` — Defines how this Python package would be constructed and what the dependencies are. @@ -244,7 +318,7 @@ aws codebuild start-build-batch --project-name aws-lc-docker-image-build-linux # Go to AWS console, you can check CodeBuild by clicking "Developer Tools > CodeBuild > Build projects". ``` -#### Windows Docker image build +#### Windows Docker image build (DEPRECATED) Windows docker image build requires more resources (like EC2 host, S3, SSM and so on) set up because DIND (Docker in Docker) is not supported by Windows. Below are some commands specific to windows docker image build. diff --git a/tests/ci/cdk/app.py b/tests/ci/cdk/app.py index 74f4e0352f..6f198ceb9c 100644 --- a/tests/ci/cdk/app.py +++ b/tests/ci/cdk/app.py @@ -5,56 +5,47 @@ from aws_cdk import Environment, App -# from cdk.bm_framework_stack import BmFrameworkStack -from cdk.aws_lc_analytics_stack import AwsLcGitHubAnalyticsStack -from cdk.aws_lc_android_ci_stack import AwsLcAndroidCIStack -from cdk.aws_lc_github_ci_stack import AwsLcGitHubCIStack -from cdk.aws_lc_github_fuzz_ci_stack import AwsLcGitHubFuzzCIStack -from cdk.aws_lc_ec2_test_framework_ci_stack import AwsLcEC2TestingCIStack from cdk.linux_docker_image_batch_build_stack import LinuxDockerImageBatchBuildStack +from pipeline.ci_util import add_ci_stacks +from pipeline.pipeline_stack import AwsLcCiPipeline from cdk.windows_docker_image_build_stack import WindowsDockerImageBuildStack -from cdk.aws_lc_github_ci_x509_stack import AwsLcGitHubX509CIStack from cdk.ecr_stack import EcrStack -from util.metadata import AWS_ACCOUNT, AWS_REGION, LINUX_X86_ECR_REPO, LINUX_AARCH_ECR_REPO, WINDOWS_X86_ECR_REPO +from util.metadata import ( + LINUX_X86_ECR_REPO, + LINUX_AARCH_ECR_REPO, + WINDOWS_X86_ECR_REPO, + PIPELINE_ACCOUNT, + PIPELINE_REGION, + DEPLOY_ACCOUNT, + DEPLOY_REGION, +) # Initialize app. app = App() -# Initialize env. -env = Environment(account=AWS_ACCOUNT, region=AWS_REGION) - -# Define AWS ECR stacks. -# ECR holds the docker images, which are pre-built to accelerate the code builds/tests of git pull requests. -EcrStack(app, "aws-lc-ecr-linux-x86", LINUX_X86_ECR_REPO, env=env) -EcrStack(app, "aws-lc-ecr-linux-aarch", LINUX_AARCH_ECR_REPO, env=env) -EcrStack(app, "aws-lc-ecr-windows-x86", WINDOWS_X86_ECR_REPO, env=env) - -# Define CodeBuild Batch job for building Docker images. -LinuxDockerImageBatchBuildStack(app, "aws-lc-docker-image-build-linux", env=env) - -# AWS CodeBuild cannot build Windows Docker images because DIND (Docker In Docker) is not supported on Windows. -# Windows Docker images are created by running commands in Windows EC2 instance. -WindowsDockerImageBuildStack(app, "aws-lc-docker-image-build-windows", env=env) - -# Define CodeBuild Batch job for testing code. -x86_build_spec_file = "cdk/codebuild/github_ci_linux_x86_omnibus.yaml" -AwsLcGitHubCIStack(app, "aws-lc-ci-linux-x86", x86_build_spec_file, env=env) -arm_build_spec_file = "cdk/codebuild/github_ci_linux_arm_omnibus.yaml" -AwsLcGitHubCIStack(app, "aws-lc-ci-linux-arm", arm_build_spec_file, env=env) -integration_build_spec_file = "cdk/codebuild/github_ci_integration_omnibus.yaml" -AwsLcGitHubCIStack(app, "aws-lc-ci-integration", integration_build_spec_file, env=env) -win_x86_build_spec_file = "cdk/codebuild/github_ci_windows_x86_omnibus.yaml" -AwsLcGitHubCIStack(app, "aws-lc-ci-windows-x86", win_x86_build_spec_file, env=env) -fuzz_build_spec_file = "cdk/codebuild/github_ci_fuzzing_omnibus.yaml" -AwsLcGitHubFuzzCIStack(app, "aws-lc-ci-fuzzing", fuzz_build_spec_file, env=env) -analytics_build_spec_file = "cdk/codebuild/github_ci_analytics_omnibus.yaml" -AwsLcGitHubAnalyticsStack(app, "aws-lc-ci-analytics", analytics_build_spec_file, env=env) -# bm_framework_build_spec_file = "cdk/codebuild/bm_framework_omnibus.yaml" -# BmFrameworkStack(app, "aws-lc-ci-bm-framework", bm_framework_build_spec_file, env=env) -ec2_test_framework_build_spec_file = "cdk/codebuild/ec2_test_framework_omnibus.yaml" -AwsLcEC2TestingCIStack(app, "aws-lc-ci-ec2-test-framework", ec2_test_framework_build_spec_file, env=env) -android_build_spec_file = "cdk/codebuild/github_ci_android_omnibus.yaml" -AwsLcAndroidCIStack(app, "aws-lc-ci-devicefarm-android", android_build_spec_file, env=env) -AwsLcGitHubX509CIStack(app, "aws-lc-ci-x509") +AwsLcCiPipeline( + app, + "AwsLcCiPipeline", + env=Environment(account=PIPELINE_ACCOUNT, region=PIPELINE_REGION), +) + +if DEPLOY_ACCOUNT and DEPLOY_REGION: + # Initialize env. + env = Environment(account=DEPLOY_ACCOUNT, region=DEPLOY_REGION) + + # Define AWS ECR stacks. + # ECR holds the docker images, which are pre-built to accelerate the code builds/tests of git pull requests. + EcrStack(app, "aws-lc-ecr-linux-x86", LINUX_X86_ECR_REPO, env=env) + EcrStack(app, "aws-lc-ecr-linux-aarch", LINUX_AARCH_ECR_REPO, env=env) + EcrStack(app, "aws-lc-ecr-windows-x86", WINDOWS_X86_ECR_REPO, env=env) + + # Define CodeBuild Batch job for building Docker images. + LinuxDockerImageBatchBuildStack(app, "aws-lc-docker-image-build-linux", env=env) + + # AWS CodeBuild cannot build Windows Docker images because DIND (Docker In Docker) is not supported on Windows. + # Windows Docker images are created by running commands in Windows EC2 instance. + WindowsDockerImageBuildStack(app, "aws-lc-docker-image-build-windows", env=env) + + add_ci_stacks(app, env=env) app.synth() diff --git a/tests/ci/cdk/cdk/aws_lc_analytics_stack.py b/tests/ci/cdk/cdk/aws_lc_analytics_stack.py index 216c33e4db..3340197ebb 100644 --- a/tests/ci/cdk/cdk/aws_lc_analytics_stack.py +++ b/tests/ci/cdk/cdk/aws_lc_analytics_stack.py @@ -1,29 +1,48 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 OR ISC +import typing -from aws_cdk import Duration, Stack, aws_codebuild as codebuild, aws_iam as iam, aws_ec2 as ec2, aws_efs as efs +from aws_cdk import ( + Duration, + Stack, + aws_codebuild as codebuild, + aws_iam as iam, + aws_ec2 as ec2, + aws_efs as efs, + Environment, +) from constructs import Construct +from cdk.aws_lc_base_ci_stack import AwsLcBaseCiStack from cdk.components import PruneStaleGitHubBuilds from util.iam_policies import code_build_publish_metrics_in_json -from util.metadata import GITHUB_REPO_OWNER, GITHUB_REPO_NAME +from util.metadata import ( + GITHUB_REPO_OWNER, + GITHUB_REPO_NAME, + PRE_PROD_ACCOUNT, + STAGING_GITHUB_REPO_OWNER, + STAGING_GITHUB_REPO_NAME, +) from util.build_spec_loader import BuildSpecLoader -class AwsLcGitHubAnalyticsStack(Stack): +class AwsLcGitHubAnalyticsStack(AwsLcBaseCiStack): """Define a stack used to batch execute AWS-LC tests in GitHub.""" - def __init__(self, - scope: Construct, - id: str, - spec_file_path: str, - **kwargs) -> None: - super().__init__(scope, id, **kwargs) - - # Define CodeBuild resource. - git_hub_source = codebuild.Source.git_hub( - owner=GITHUB_REPO_OWNER, - repo=GITHUB_REPO_NAME, + def __init__( + self, + scope: Construct, + id: str, + spec_file_path: str, + env: typing.Union[Environment, typing.Dict[str, typing.Any]], + **kwargs + ) -> None: + super().__init__(scope, id, env=env, timeout=120, **kwargs) + + # Override default CodeBuild resource. + self.git_hub_source = codebuild.Source.git_hub( + owner=self.github_repo_owner, + repo=self.github_repo_name, webhook=True, webhook_filters=[ codebuild.FilterGroup.in_event_of(codebuild.EventAction.PUSH) @@ -31,28 +50,42 @@ def __init__(self, # the branch or create a new FIPS branch it should be updated to '(main)|(fips.*)' .and_branch_is("main") ], - webhook_triggers_batch_build=True) + webhook_triggers_batch_build=True, + ) # Define a IAM role for this stack. - metrics_policy = iam.PolicyDocument.from_json(code_build_publish_metrics_in_json()) + metrics_policy = iam.PolicyDocument.from_json( + code_build_publish_metrics_in_json(env) + ) inline_policies = {"metric_policy": metrics_policy} - role = iam.Role(scope=self, - id="{}-role".format(id), - assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"), - inline_policies=inline_policies) + role = iam.Role( + scope=self, + id="{}-role".format(id), + assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"), + inline_policies=inline_policies, + ) # Define CodeBuild. analytics = codebuild.Project( scope=self, id="AnalyticsCodeBuild", project_name=id, - source=git_hub_source, + source=self.git_hub_source, role=role, timeout=Duration.minutes(120), - environment=codebuild.BuildEnvironment(compute_type=codebuild.ComputeType.LARGE, - privileged=True, - build_image=codebuild.LinuxBuildImage.STANDARD_4_0), - build_spec=BuildSpecLoader.load(spec_file_path)) + environment=codebuild.BuildEnvironment( + compute_type=codebuild.ComputeType.LARGE, + privileged=True, + build_image=codebuild.LinuxBuildImage.STANDARD_4_0, + ), + build_spec=BuildSpecLoader.load(spec_file_path, env), + ) analytics.enable_batch_builds() - PruneStaleGitHubBuilds(scope=self, id="PruneStaleGitHubBuilds", project=analytics, ec2_permissions=False) + PruneStaleGitHubBuilds( + scope=self, + id="PruneStaleGitHubBuilds", + project=analytics, + ec2_permissions=False, + env=env, + ) diff --git a/tests/ci/cdk/cdk/aws_lc_android_ci_stack.py b/tests/ci/cdk/cdk/aws_lc_android_ci_stack.py index 9a3fc00a61..9db467bfbd 100644 --- a/tests/ci/cdk/cdk/aws_lc_android_ci_stack.py +++ b/tests/ci/cdk/cdk/aws_lc_android_ci_stack.py @@ -1,68 +1,88 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 OR ISC +import typing -from aws_cdk import Duration, Stack, aws_codebuild as codebuild, aws_iam as iam +from aws_cdk import ( + Duration, + Stack, + aws_codebuild as codebuild, + aws_iam as iam, + Environment, +) from constructs import Construct +from cdk.aws_lc_base_ci_stack import AwsLcBaseCiStack from cdk.components import PruneStaleGitHubBuilds -from util.iam_policies import code_build_batch_policy_in_json, device_farm_access_policy_in_json -from util.metadata import GITHUB_REPO_OWNER, GITHUB_REPO_NAME, GITHUB_PUSH_CI_BRANCH_TARGETS +from util.iam_policies import ( + code_build_batch_policy_in_json, + device_farm_access_policy_in_json, +) +from util.metadata import ( + GITHUB_REPO_OWNER, + GITHUB_REPO_NAME, + GITHUB_PUSH_CI_BRANCH_TARGETS, + PRE_PROD_ACCOUNT, + STAGING_GITHUB_REPO_OWNER, + STAGING_GITHUB_REPO_NAME, +) from util.build_spec_loader import BuildSpecLoader -class AwsLcAndroidCIStack(Stack): +class AwsLcAndroidCIStack(AwsLcBaseCiStack): """Define a stack used to batch execute AWS-LC tests in GitHub.""" # The Device Farm resource used to in this CI spec, must be manually created. # TODO: Automate Device Farm creation with cdk script. - def __init__(self, - scope: Construct, - id: str, - spec_file_path: str, - **kwargs) -> None: - super().__init__(scope, id, **kwargs) - - # Define CodeBuild resource. - git_hub_source = codebuild.Source.git_hub( - owner=GITHUB_REPO_OWNER, - repo=GITHUB_REPO_NAME, - webhook=True, - webhook_filters=[ - codebuild.FilterGroup.in_event_of( - codebuild.EventAction.PULL_REQUEST_CREATED, - codebuild.EventAction.PULL_REQUEST_UPDATED, - codebuild.EventAction.PULL_REQUEST_REOPENED), - codebuild.FilterGroup.in_event_of(codebuild.EventAction.PUSH).and_branch_is( - GITHUB_PUSH_CI_BRANCH_TARGETS), - ], - webhook_triggers_batch_build=True) + def __init__( + self, + scope: Construct, + id: str, + spec_file_path: str, + env: typing.Union[Environment, typing.Dict[str, typing.Any]], + **kwargs + ) -> None: + super().__init__(scope, id, env=env, timeout=180, **kwargs) # Define a IAM role for this stack. code_build_batch_policy = iam.PolicyDocument.from_json( - code_build_batch_policy_in_json([id]) + code_build_batch_policy_in_json([id], env) ) device_farm_policy = iam.PolicyDocument.from_json( - device_farm_access_policy_in_json() + device_farm_access_policy_in_json(env) + ) + inline_policies = { + "code_build_batch_policy": code_build_batch_policy, + "device_farm_policy": device_farm_policy, + } + role = iam.Role( + scope=self, + id="{}-role".format(id), + assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"), + inline_policies=inline_policies, ) - inline_policies = {"code_build_batch_policy": code_build_batch_policy, "device_farm_policy": device_farm_policy} - role = iam.Role(scope=self, - id="{}-role".format(id), - assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"), - inline_policies=inline_policies) # Define CodeBuild. project = codebuild.Project( scope=self, id=id, project_name=id, - source=git_hub_source, + source=self.git_hub_source, role=role, - timeout=Duration.minutes(180), - environment=codebuild.BuildEnvironment(compute_type=codebuild.ComputeType.SMALL, - privileged=False, - build_image=codebuild.LinuxBuildImage.STANDARD_4_0), - build_spec=BuildSpecLoader.load(spec_file_path)) + timeout=Duration.minutes(self.timeout), + environment=codebuild.BuildEnvironment( + compute_type=codebuild.ComputeType.SMALL, + privileged=False, + build_image=codebuild.LinuxBuildImage.STANDARD_4_0, + ), + build_spec=BuildSpecLoader.load(spec_file_path, env), + ) project.enable_batch_builds() - PruneStaleGitHubBuilds(scope=self, id="PruneStaleGitHubBuilds", project=project, ec2_permissions=False) + PruneStaleGitHubBuilds( + scope=self, + id="PruneStaleGitHubBuilds", + project=project, + ec2_permissions=False, + env=env, + ) diff --git a/tests/ci/cdk/cdk/aws_lc_base_ci_stack.py b/tests/ci/cdk/cdk/aws_lc_base_ci_stack.py new file mode 100644 index 0000000000..15d9564edd --- /dev/null +++ b/tests/ci/cdk/cdk/aws_lc_base_ci_stack.py @@ -0,0 +1,58 @@ +import typing + +from aws_cdk import aws_codebuild as codebuild, Environment, Stack +from constructs import Construct + +from cdk.components import PruneStaleGitHubBuilds +from util.metadata import ( + STAGING_GITHUB_REPO_OWNER, + STAGING_GITHUB_REPO_NAME, + PRE_PROD_ACCOUNT, + GITHUB_REPO_OWNER, + GITHUB_REPO_NAME, + GITHUB_PUSH_CI_BRANCH_TARGETS, +) + + +class AwsLcBaseCiStack(Stack): + def __init__( + self, + scope: Construct, + id: str, + env: typing.Union[Environment, typing.Dict[str, typing.Any]], + ignore_failure: typing.Optional[bool] = False, + timeout: typing.Optional[int] = 60, + **kwargs + ) -> None: + super().__init__(scope, id, env=env, **kwargs) + self.ignore_failure = ignore_failure + self.timeout = timeout + self.env = env + + self.github_repo_owner = ( + STAGING_GITHUB_REPO_OWNER + if (env.account == PRE_PROD_ACCOUNT) + else GITHUB_REPO_OWNER + ) + self.github_repo_name = ( + STAGING_GITHUB_REPO_NAME + if (env.account == PRE_PROD_ACCOUNT) + else GITHUB_REPO_NAME + ) + + self.git_hub_source = codebuild.Source.git_hub( + owner=self.github_repo_owner, + repo=self.github_repo_name, + webhook=True, + webhook_filters=[ + codebuild.FilterGroup.in_event_of( + codebuild.EventAction.PULL_REQUEST_CREATED, + codebuild.EventAction.PULL_REQUEST_UPDATED, + codebuild.EventAction.PULL_REQUEST_REOPENED, + ), + codebuild.FilterGroup.in_event_of( + codebuild.EventAction.PUSH + ).and_branch_is(GITHUB_PUSH_CI_BRANCH_TARGETS), + ], + webhook_triggers_batch_build=True, + ) diff --git a/tests/ci/cdk/cdk/aws_lc_ec2_test_framework_ci_stack.py b/tests/ci/cdk/cdk/aws_lc_ec2_test_framework_ci_stack.py index 0dccf5b02a..e72b76368c 100644 --- a/tests/ci/cdk/cdk/aws_lc_ec2_test_framework_ci_stack.py +++ b/tests/ci/cdk/cdk/aws_lc_ec2_test_framework_ci_stack.py @@ -2,118 +2,187 @@ # SPDX-License-Identifier: Apache-2.0 OR ISC import subprocess +import typing + import boto3 from botocore.exceptions import ClientError -from aws_cdk import CfnTag, Duration, Stack, Tags, aws_ec2 as ec2, aws_codebuild as codebuild, aws_iam as iam, aws_s3 as s3, aws_logs as logs +from aws_cdk import ( + CfnTag, + Duration, + Stack, + Tags, + aws_ec2 as ec2, + aws_codebuild as codebuild, + aws_iam as iam, + aws_s3 as s3, + aws_logs as logs, + Environment, +) from constructs import Construct +from cdk.aws_lc_base_ci_stack import AwsLcBaseCiStack from cdk.components import PruneStaleGitHubBuilds -from util.metadata import AWS_ACCOUNT, AWS_REGION, GITHUB_PUSH_CI_BRANCH_TARGETS, GITHUB_REPO_OWNER, GITHUB_REPO_NAME, LINUX_AARCH_ECR_REPO, \ - LINUX_X86_ECR_REPO -from util.iam_policies import code_build_batch_policy_in_json, ec2_policies_in_json, ssm_policies_in_json, s3_read_write_policy_in_json, ecr_power_user_policy_in_json +from util.metadata import ( + GITHUB_PUSH_CI_BRANCH_TARGETS, + GITHUB_REPO_OWNER, + GITHUB_REPO_NAME, + LINUX_AARCH_ECR_REPO, + LINUX_X86_ECR_REPO, + PRE_PROD_ACCOUNT, + STAGING_GITHUB_REPO_OWNER, + STAGING_GITHUB_REPO_NAME, +) +from util.iam_policies import ( + code_build_batch_policy_in_json, + ec2_policies_in_json, + ssm_policies_in_json, + s3_read_write_policy_in_json, + ecr_power_user_policy_in_json, +) from util.build_spec_loader import BuildSpecLoader # detailed documentation can be found here: https://docs.aws.amazon.com/cdk/api/latest/docs/aws-ec2-readme.html -class AwsLcEC2TestingCIStack(Stack): + +class AwsLcEC2TestingCIStack(AwsLcBaseCiStack): """Define a stack used to create a CodeBuild instance on which to execute the AWS-LC m1 ci ec2 instance""" - def __init__(self, - scope: Construct, - id: str, - spec_file_path: str, - **kwargs) -> None: - super().__init__(scope, id, **kwargs) - - # Define CodeBuild resource. - git_hub_source = codebuild.Source.git_hub( - owner=GITHUB_REPO_OWNER, - repo=GITHUB_REPO_NAME, - webhook=True, - webhook_filters=[ - codebuild.FilterGroup.in_event_of( - codebuild.EventAction.PULL_REQUEST_CREATED, - codebuild.EventAction.PULL_REQUEST_UPDATED, - codebuild.EventAction.PULL_REQUEST_REOPENED), - codebuild.FilterGroup.in_event_of(codebuild.EventAction.PUSH).and_branch_is( - GITHUB_PUSH_CI_BRANCH_TARGETS), - ], - webhook_triggers_batch_build=True) + def __init__( + self, + scope: Construct, + id: str, + spec_file_path: str, + env: typing.Union[Environment, typing.Dict[str, typing.Any]], + **kwargs + ) -> None: + super().__init__(scope, id, env=env, timeout=120, **kwargs) # S3 bucket for testing internal fixes. - s3_read_write_policy = iam.PolicyDocument.from_json(s3_read_write_policy_in_json("aws-lc-codebuild")) - ecr_power_user_policy = iam.PolicyDocument.from_json(ecr_power_user_policy_in_json([LINUX_X86_ECR_REPO, LINUX_AARCH_ECR_REPO])) - ec2_inline_policies = {"s3_read_write_policy": s3_read_write_policy, "ecr_power_user_policy": ecr_power_user_policy} - ec2_role = iam.Role(scope=self, id="{}-ec2-role".format(id), - role_name="{}-ec2-role".format(id), - assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"), - inline_policies=ec2_inline_policies, - managed_policies=[ - iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSSMManagedInstanceCore"), - iam.ManagedPolicy.from_aws_managed_policy_name("CloudWatchAgentServerPolicy") - ]) - iam.CfnInstanceProfile(scope=self, id="{}-ec2-profile".format(id), - roles=[ec2_role.role_name], - instance_profile_name="{}-ec2-profile".format(id)) + s3_read_write_policy = iam.PolicyDocument.from_json( + s3_read_write_policy_in_json("aws-lc-codebuild") + ) + ecr_power_user_policy = iam.PolicyDocument.from_json( + ecr_power_user_policy_in_json( + [LINUX_X86_ECR_REPO, LINUX_AARCH_ECR_REPO], env + ) + ) + ec2_inline_policies = { + "s3_read_write_policy": s3_read_write_policy, + "ecr_power_user_policy": ecr_power_user_policy, + } + ec2_role = iam.Role( + scope=self, + id="{}-ec2-role".format(id), + role_name="{}-ec2-role".format(id), + assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"), + inline_policies=ec2_inline_policies, + managed_policies=[ + iam.ManagedPolicy.from_aws_managed_policy_name( + "AmazonSSMManagedInstanceCore" + ), + iam.ManagedPolicy.from_aws_managed_policy_name( + "CloudWatchAgentServerPolicy" + ), + ], + ) + iam.CfnInstanceProfile( + scope=self, + id="{}-ec2-profile".format(id), + roles=[ec2_role.role_name], + instance_profile_name="{}-ec2-profile".format(id), + ) # create vpc for ec2s - vpc = ec2.Vpc(self, id="{}-ec2-vpc".format(id)) - selected_subnets = vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS) + vpc = ec2.Vpc( + self, + id="{}-ec2-vpc".format(id), + nat_gateways=1 # minimize the number of idle NAT gateways and thus elastic IPs + ) - # create security group with default rules - security_group = ec2.SecurityGroup(self, id="{}-ec2-sg".format(id), - allow_all_outbound=True, - vpc=vpc, - security_group_name='codebuild_ec2_sg') + selected_subnets = vpc.select_subnets( + subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS + ) + # create security group with default rules + security_group = ec2.SecurityGroup( + self, + id="{}-ec2-sg".format(id), + allow_all_outbound=True, + vpc=vpc, + security_group_name="codebuild_ec2_sg", + ) # Define a IAM role for this stack. - code_build_batch_policy = iam.PolicyDocument.from_json(code_build_batch_policy_in_json([id])) - ec2_policy = iam.PolicyDocument.from_json(ec2_policies_in_json(ec2_role.role_name, security_group.security_group_id, selected_subnets.subnets[0].subnet_id, vpc.vpc_id)) - ssm_policy = iam.PolicyDocument.from_json(ssm_policies_in_json()) - codebuild_inline_policies = {"code_build_batch_policy": code_build_batch_policy, - "ec2_policy": ec2_policy, - "ssm_policy": ssm_policy} - codebuild_role = iam.Role(scope=self, - id="{}-codebuild-role".format(id), - assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"), - inline_policies=codebuild_inline_policies, - managed_policies=[ - iam.ManagedPolicy.from_aws_managed_policy_name("CloudWatchAgentServerPolicy") - ]) + code_build_batch_policy = iam.PolicyDocument.from_json( + code_build_batch_policy_in_json([id], env) + ) + ec2_policy = iam.PolicyDocument.from_json( + ec2_policies_in_json( + ec2_role.role_name, + security_group.security_group_id, + selected_subnets.subnets[0].subnet_id, + vpc.vpc_id, + env, + ) + ) + ssm_policy = iam.PolicyDocument.from_json(ssm_policies_in_json(env)) + codebuild_inline_policies = { + "code_build_batch_policy": code_build_batch_policy, + "ec2_policy": ec2_policy, + "ssm_policy": ssm_policy, + } + codebuild_role = iam.Role( + scope=self, + id="{}-codebuild-role".format(id), + assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"), + inline_policies=codebuild_inline_policies, + managed_policies=[ + iam.ManagedPolicy.from_aws_managed_policy_name( + "CloudWatchAgentServerPolicy" + ) + ], + ) # Define CodeBuild. project = codebuild.Project( scope=self, id=id, project_name=id, - source=git_hub_source, + source=self.git_hub_source, role=codebuild_role, - timeout=Duration.minutes(120), - environment=codebuild.BuildEnvironment(compute_type=codebuild.ComputeType.SMALL, - privileged=False, - build_image=codebuild.LinuxBuildImage.STANDARD_4_0), - build_spec=BuildSpecLoader.load(spec_file_path), - environment_variables= { + timeout=Duration.minutes(self.timeout), + environment=codebuild.BuildEnvironment( + compute_type=codebuild.ComputeType.SMALL, + privileged=False, + build_image=codebuild.LinuxBuildImage.STANDARD_4_0, + ), + build_spec=BuildSpecLoader.load(spec_file_path, env), + environment_variables={ "EC2_SECURITY_GROUP_ID": codebuild.BuildEnvironmentVariable( value=security_group.security_group_id ), "EC2_SUBNET_ID": codebuild.BuildEnvironmentVariable( value=selected_subnets.subnets[0].subnet_id ), - "EC2_VPC_ID": codebuild.BuildEnvironmentVariable( - value=vpc.vpc_id - ), - }) + "EC2_VPC_ID": codebuild.BuildEnvironmentVariable(value=vpc.vpc_id), + }, + ) project.enable_batch_builds() - PruneStaleGitHubBuilds(scope=self, id="PruneStaleGitHubBuilds", project=project, ec2_permissions=True) + PruneStaleGitHubBuilds( + scope=self, + id="PruneStaleGitHubBuilds", + project=project, + ec2_permissions=True, + env=env, + ) # Define logs for SSM. log_group_name = "{}-cw-logs".format(id) - log_group = logs.CfnLogGroup(self, log_group_name, + log_group = logs.CfnLogGroup( + self, + log_group_name, log_group_name=log_group_name, retention_in_days=365, ) - diff --git a/tests/ci/cdk/cdk/aws_lc_github_ci_stack.py b/tests/ci/cdk/cdk/aws_lc_github_ci_stack.py index f3a262af3d..474b84a89b 100644 --- a/tests/ci/cdk/cdk/aws_lc_github_ci_stack.py +++ b/tests/ci/cdk/cdk/aws_lc_github_ci_stack.py @@ -1,69 +1,84 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 OR ISC +import typing -from aws_cdk import Duration, Stack, aws_codebuild as codebuild, aws_iam as iam, aws_s3_assets, aws_logs as logs +from aws_cdk import ( + Duration, + Stack, + aws_codebuild as codebuild, + aws_iam as iam, + aws_s3_assets, + aws_logs as logs, + Environment, +) from constructs import Construct +from cdk.aws_lc_base_ci_stack import AwsLcBaseCiStack from cdk.components import PruneStaleGitHubBuilds -from util.iam_policies import code_build_batch_policy_in_json, code_build_publish_metrics_in_json, code_build_cloudwatch_logs_policy_in_json -from util.metadata import CAN_AUTOLOAD, GITHUB_PUSH_CI_BRANCH_TARGETS, GITHUB_REPO_OWNER, GITHUB_REPO_NAME +from util.iam_policies import ( + code_build_batch_policy_in_json, + code_build_publish_metrics_in_json, + code_build_cloudwatch_logs_policy_in_json, +) +from util.metadata import ( + GITHUB_PUSH_CI_BRANCH_TARGETS, + GITHUB_REPO_OWNER, + GITHUB_REPO_NAME, + PRE_PROD_ACCOUNT, + STAGING_GITHUB_REPO_OWNER, + STAGING_GITHUB_REPO_NAME, +) from util.build_spec_loader import BuildSpecLoader -class AwsLcGitHubCIStack(Stack): +class AwsLcGitHubCIStack(AwsLcBaseCiStack): """Define a stack used to batch execute AWS-LC tests in GitHub.""" - def __init__(self, - scope: Construct, - id: str, - spec_file_path: str, - **kwargs) -> None: - super().__init__(scope, id, **kwargs) - - # Define CodeBuild resource. - git_hub_source = codebuild.Source.git_hub( - owner=GITHUB_REPO_OWNER, - repo=GITHUB_REPO_NAME, - webhook=True, - webhook_filters=[ - codebuild.FilterGroup.in_event_of( - codebuild.EventAction.PULL_REQUEST_CREATED, - codebuild.EventAction.PULL_REQUEST_UPDATED, - codebuild.EventAction.PULL_REQUEST_REOPENED), - codebuild.FilterGroup.in_event_of(codebuild.EventAction.PUSH).and_branch_is( - GITHUB_PUSH_CI_BRANCH_TARGETS), - ], - webhook_triggers_batch_build=True) + def __init__( + self, + scope: Construct, + id: str, + spec_file_path: str, + env: typing.Union[Environment, typing.Dict[str, typing.Any]], + **kwargs + ) -> None: + super().__init__(scope, id, env=env, timeout=180, **kwargs) # Define a IAM role for accessing build resources log_group = logs.LogGroup(self, id="{}-public-logs".format(id)) code_build_cloudwatch_logs_policy = iam.PolicyDocument.from_json( code_build_cloudwatch_logs_policy_in_json([log_group]) ) - resource_access_role = iam.Role(scope=self, - id="{}-resource-role".format(id), - assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"), - inline_policies={ - "code_build_cloudwatch_logs_policy": code_build_cloudwatch_logs_policy - }) + resource_access_role = iam.Role( + scope=self, + id="{}-resource-role".format(id), + assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"), + inline_policies={ + "code_build_cloudwatch_logs_policy": code_build_cloudwatch_logs_policy, + }, + ) # Define a IAM role for this stack. code_build_batch_policy = iam.PolicyDocument.from_json( - code_build_batch_policy_in_json([id]) + code_build_batch_policy_in_json([id], env) + ) + metrics_policy = iam.PolicyDocument.from_json( + code_build_publish_metrics_in_json(env) + ) + + inline_policies = { + "code_build_batch_policy": code_build_batch_policy, + "metrics_policy": metrics_policy, + } + role = iam.Role( + scope=self, + id="{}-role".format(id), + assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"), + inline_policies=inline_policies, ) - metrics_policy = iam.PolicyDocument.from_json(code_build_publish_metrics_in_json()) - inline_policies = {"code_build_batch_policy": code_build_batch_policy, - "metrics_policy": metrics_policy, - } - role = iam.Role(scope=self, - id="{}-role".format(id), - assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"), - inline_policies=inline_policies) logging_options = codebuild.LoggingOptions( - cloud_watch=codebuild.CloudWatchLoggingOptions( - log_group=log_group - ) + cloud_watch=codebuild.CloudWatchLoggingOptions(log_group=log_group) ) # Define CodeBuild. @@ -71,17 +86,28 @@ def __init__(self, scope=self, id=id, project_name=id, - source=git_hub_source, + source=self.git_hub_source, role=role, - timeout=Duration.minutes(180), + timeout=Duration.minutes(self.timeout), logging=logging_options, - environment=codebuild.BuildEnvironment(compute_type=codebuild.ComputeType.SMALL, - privileged=False, - build_image=codebuild.LinuxBuildImage.STANDARD_4_0), - build_spec=BuildSpecLoader.load(spec_file_path)) + environment=codebuild.BuildEnvironment( + compute_type=codebuild.ComputeType.SMALL, + privileged=False, + build_image=codebuild.LinuxBuildImage.STANDARD_4_0, + ), + build_spec=BuildSpecLoader.load(spec_file_path, env=env), + ) cfn_project = project.node.default_child cfn_project.add_property_override("Visibility", "PUBLIC_READ") - cfn_project.add_property_override("ResourceAccessRole", resource_access_role.role_arn) + cfn_project.add_property_override( + "ResourceAccessRole", resource_access_role.role_arn + ) project.enable_batch_builds() - PruneStaleGitHubBuilds(scope=self, id="PruneStaleGitHubBuilds", project=project, ec2_permissions=False) + PruneStaleGitHubBuilds( + scope=self, + id="PruneStaleGitHubBuilds", + project=project, + ec2_permissions=False, + env=env, + ) diff --git a/tests/ci/cdk/cdk/aws_lc_github_ci_x509_stack.py b/tests/ci/cdk/cdk/aws_lc_github_ci_x509_stack.py index a6e4faf400..11a59d3d6e 100644 --- a/tests/ci/cdk/cdk/aws_lc_github_ci_x509_stack.py +++ b/tests/ci/cdk/cdk/aws_lc_github_ci_x509_stack.py @@ -1,38 +1,35 @@ -from aws_cdk import Duration, Stack, aws_codebuild as codebuild, aws_s3 as s3 +import typing + +from aws_cdk import ( + Duration, + Stack, + aws_codebuild as codebuild, + aws_s3 as s3, + Environment, +) from constructs import Construct + +from cdk.aws_lc_base_ci_stack import AwsLcBaseCiStack from util.build_spec_loader import BuildSpecLoader from util.metadata import ( GITHUB_PUSH_CI_BRANCH_TARGETS, GITHUB_REPO_NAME, GITHUB_REPO_OWNER, + PRE_PROD_ACCOUNT, + STAGING_GITHUB_REPO_OWNER, + STAGING_GITHUB_REPO_NAME, ) -class AwsLcGitHubX509CIStack(Stack): +class AwsLcGitHubX509CIStack(AwsLcBaseCiStack): def __init__( self, scope: Construct, id: str, + env: typing.Union[Environment, typing.Dict[str, typing.Any]], **kwargs, ) -> None: - super().__init__(scope, id, **kwargs) - - git_hub_source = codebuild.Source.git_hub( - owner=GITHUB_REPO_OWNER, - repo=GITHUB_REPO_NAME, - webhook=True, - webhook_filters=[ - codebuild.FilterGroup.in_event_of( - codebuild.EventAction.PULL_REQUEST_CREATED, - codebuild.EventAction.PULL_REQUEST_UPDATED, - codebuild.EventAction.PULL_REQUEST_REOPENED, - ), - codebuild.FilterGroup.in_event_of( - codebuild.EventAction.PUSH - ).and_branch_is(GITHUB_PUSH_CI_BRANCH_TARGETS), - ], - webhook_triggers_batch_build=True, - ) + super().__init__(scope, id, env=env, **kwargs) self.reports_bucket = s3.Bucket( self, @@ -72,13 +69,13 @@ def __init__( noncurrent_version_expiration=Duration.days(1), ) - self.codebuild_project = codebuild.Project( + self.project = codebuild.Project( self, id, project_name=id, - source=git_hub_source, + source=self.git_hub_source, build_spec=BuildSpecLoader.load( - "cdk/codebuild/github_ci_x509_omnibus.yaml" + "cdk/codebuild/github_ci_x509_omnibus.yaml", env ), environment=codebuild.BuildEnvironment( build_image=codebuild.LinuxBuildImage.STANDARD_6_0, diff --git a/tests/ci/cdk/cdk/aws_lc_github_fuzz_ci_stack.py b/tests/ci/cdk/cdk/aws_lc_github_fuzz_ci_stack.py index 6f6e2d4d06..7587f4404d 100644 --- a/tests/ci/cdk/cdk/aws_lc_github_fuzz_ci_stack.py +++ b/tests/ci/cdk/cdk/aws_lc_github_fuzz_ci_stack.py @@ -1,57 +1,74 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 OR ISC +import typing -from aws_cdk import Duration, Size, Stack, aws_codebuild as codebuild, aws_iam as iam, aws_ec2 as ec2, aws_efs as efs +from aws_cdk import ( + Duration, + Size, + Stack, + aws_codebuild as codebuild, + aws_iam as iam, + aws_ec2 as ec2, + aws_efs as efs, + Environment, +) from constructs import Construct +from cdk.aws_lc_base_ci_stack import AwsLcBaseCiStack from cdk.components import PruneStaleGitHubBuilds -from util.ecr_util import ecr_arn -from util.iam_policies import code_build_batch_policy_in_json, \ - code_build_publish_metrics_in_json -from util.metadata import AWS_ACCOUNT, AWS_REGION, GITHUB_PUSH_CI_BRANCH_TARGETS, GITHUB_REPO_OWNER, GITHUB_REPO_NAME +from util.iam_policies import ( + code_build_batch_policy_in_json, + code_build_publish_metrics_in_json, +) +from util.metadata import ( + GITHUB_PUSH_CI_BRANCH_TARGETS, + GITHUB_REPO_OWNER, + GITHUB_REPO_NAME, + PRE_PROD_ACCOUNT, + STAGING_GITHUB_REPO_OWNER, + STAGING_GITHUB_REPO_NAME, +) from util.build_spec_loader import BuildSpecLoader -class AwsLcGitHubFuzzCIStack(Stack): +class AwsLcGitHubFuzzCIStack(AwsLcBaseCiStack): """Define a stack used to batch execute AWS-LC tests in GitHub.""" - def __init__(self, - scope: Construct, - id: str, - spec_file_path: str, - **kwargs) -> None: - super().__init__(scope, id, **kwargs) - - # Define CodeBuild resource. - git_hub_source = codebuild.Source.git_hub( - owner=GITHUB_REPO_OWNER, - repo=GITHUB_REPO_NAME, - webhook=True, - webhook_filters=[ - codebuild.FilterGroup.in_event_of( - codebuild.EventAction.PULL_REQUEST_CREATED, - codebuild.EventAction.PULL_REQUEST_UPDATED, - codebuild.EventAction.PULL_REQUEST_REOPENED), - codebuild.FilterGroup.in_event_of(codebuild.EventAction.PUSH).and_branch_is( - GITHUB_PUSH_CI_BRANCH_TARGETS), - ], - webhook_triggers_batch_build=True) + def __init__( + self, + scope: Construct, + id: str, + spec_file_path: str, + env: typing.Union[Environment, typing.Dict[str, typing.Any]], + **kwargs + ) -> None: + super().__init__(scope, id, env=env, timeout=120, **kwargs) # Define a IAM role for this stack. code_build_batch_policy = iam.PolicyDocument.from_json( - code_build_batch_policy_in_json([id]) + code_build_batch_policy_in_json([id], env) + ) + fuzz_policy = iam.PolicyDocument.from_json( + code_build_publish_metrics_in_json(env) + ) + inline_policies = { + "code_build_batch_policy": code_build_batch_policy, + "fuzz_policy": fuzz_policy, + } + role = iam.Role( + scope=self, + id="{}-role".format(id), + assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"), + inline_policies=inline_policies, ) - fuzz_policy = iam.PolicyDocument.from_json(code_build_publish_metrics_in_json()) - inline_policies = {"code_build_batch_policy": code_build_batch_policy, - "fuzz_policy": fuzz_policy} - role = iam.Role(scope=self, - id="{}-role".format(id), - assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"), - inline_policies=inline_policies) # Create the VPC for EFS and CodeBuild - public_subnet = ec2.SubnetConfiguration(name="PublicFuzzingSubnet", subnet_type=ec2.SubnetType.PUBLIC) - private_subnet = ec2.SubnetConfiguration(name="PrivateFuzzingSubnet", subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS) + public_subnet = ec2.SubnetConfiguration( + name="PublicFuzzingSubnet", subnet_type=ec2.SubnetType.PUBLIC + ) + private_subnet = ec2.SubnetConfiguration( + name="PrivateFuzzingSubnet", subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS + ) # Create a VPC with a single public and private subnet in a single AZ. This is to avoid the elastic IP limit # being used up by a bunch of idle NAT gateways @@ -59,21 +76,21 @@ def __init__(self, scope=self, id="{}-FuzzingVPC".format(id), subnet_configuration=[public_subnet, private_subnet], - max_azs=1 + max_azs=1, ) build_security_group = ec2.SecurityGroup( - scope=self, - id="{}-FuzzingSecurityGroup".format(id), - vpc=fuzz_vpc + scope=self, id="{}-FuzzingSecurityGroup".format(id), vpc=fuzz_vpc ) build_security_group.add_ingress_rule( peer=build_security_group, connection=ec2.Port.all_traffic(), - description="Allow all traffic inside security group" + description="Allow all traffic inside security group", ) - efs_subnet_selection = ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS) + efs_subnet_selection = ec2.SubnetSelection( + subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS + ) # Create the EFS to store the corpus and logs. EFS allows new filesystems to burst to 100 MB/s for the first 2 # TB of data read/written, after that the rate is limited based on the size of the filesystem. As of late @@ -102,15 +119,18 @@ def __init__(self, scope=self, id="FuzzingCodeBuild", project_name=id, - source=git_hub_source, + source=self.git_hub_source, role=role, - timeout=Duration.minutes(120), - environment=codebuild.BuildEnvironment(compute_type=codebuild.ComputeType.LARGE, - privileged=True, - build_image=codebuild.LinuxBuildImage.STANDARD_4_0), - build_spec=BuildSpecLoader.load(spec_file_path), + timeout=Duration.minutes(self.timeout), + environment=codebuild.BuildEnvironment( + compute_type=codebuild.ComputeType.LARGE, + privileged=True, + build_image=codebuild.LinuxBuildImage.STANDARD_4_0, + ), + build_spec=BuildSpecLoader.load(spec_file_path, env), vpc=fuzz_vpc, - security_groups=[build_security_group]) + security_groups=[build_security_group], + ) fuzz_codebuild.enable_batch_builds() # CDK raw overrides: https://docs.aws.amazon.com/cdk/latest/guide/cfn_layer.html#cfn_layer_raw @@ -121,11 +141,23 @@ def __init__(self, # # TODO: add this to the CDK project above when it supports EfsFileSystemLocation cfn_codebuild = fuzz_codebuild.node.default_child - cfn_codebuild.add_override("Properties.FileSystemLocations", [{ - "Identifier": "fuzzing_root", - "Location": "%s.efs.%s.amazonaws.com:/" % (fuzz_filesystem.file_system_id, AWS_REGION), - "MountPoint": "/efs_fuzzing_root", - "Type": "EFS" - }]) + cfn_codebuild.add_override( + "Properties.FileSystemLocations", + [ + { + "Identifier": "fuzzing_root", + "Location": "%s.efs.%s.amazonaws.com:/" + % (fuzz_filesystem.file_system_id, env.region), + "MountPoint": "/efs_fuzzing_root", + "Type": "EFS", + } + ], + ) - PruneStaleGitHubBuilds(scope=self, id="PruneStaleGitHubBuilds", project=fuzz_codebuild, ec2_permissions=False) + PruneStaleGitHubBuilds( + scope=self, + id="PruneStaleGitHubBuilds", + project=fuzz_codebuild, + ec2_permissions=False, + env=env, + ) diff --git a/tests/ci/cdk/cdk/bm_framework_stack.py b/tests/ci/cdk/cdk/bm_framework_stack.py deleted file mode 100644 index c9a880ab45..0000000000 --- a/tests/ci/cdk/cdk/bm_framework_stack.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 OR ISC - -import subprocess -import boto3 - -from botocore.exceptions import ClientError -from aws_cdk import Duration, Stack, aws_ec2 as ec2, aws_codebuild as codebuild, aws_iam as iam, aws_logs as logs -from constructs import Construct - -from cdk.components import PruneStaleGitHubBuilds -from util.metadata import AWS_ACCOUNT, AWS_REGION, GITHUB_REPO_OWNER, GITHUB_REPO_NAME -from util.iam_policies import code_build_batch_policy_in_json, ec2_bm_framework_policies_in_json, \ - ssm_bm_framework_policies_in_json, ecr_power_user_policy_in_json -from util.build_spec_loader import BuildSpecLoader - -# detailed documentation can be found here: https://docs.aws.amazon.com/cdk/api/latest/docs/aws-ec2-readme.html - -class BmFrameworkStack(Stack): - """Define a stack used to create a CodeBuild instance on which to execute the AWS-LC benchmarking framework""" - - def __init__(self, - scope: Construct, - id: str, - spec_file_path: str, - **kwargs) -> None: - super().__init__(scope, id, **kwargs) - - # Define some variables that will be commonly used - CLOUDWATCH_LOGS = "{}-{}-cw-logs".format(AWS_ACCOUNT, id) - - # Define CodeBuild resource. - git_hub_source = codebuild.Source.git_hub( - owner=GITHUB_REPO_OWNER, - repo=GITHUB_REPO_NAME, - webhook=True, - webhook_filters=[ - codebuild.FilterGroup.in_event_of( - codebuild.EventAction.PULL_REQUEST_CREATED, - codebuild.EventAction.PULL_REQUEST_UPDATED, - codebuild.EventAction.PULL_REQUEST_REOPENED) - ], - webhook_triggers_batch_build=True) - - # Define a IAM role for this stack. - code_build_batch_policy = iam.PolicyDocument.from_json(code_build_batch_policy_in_json([id])) - ec2_bm_framework_policy = iam.PolicyDocument.from_json(ec2_bm_framework_policies_in_json()) - ssm_bm_framework_policy = iam.PolicyDocument.from_json(ssm_bm_framework_policies_in_json()) - codebuild_inline_policies = {"code_build_batch_policy": code_build_batch_policy, - "ec2_bm_framework_policy": ec2_bm_framework_policy, - "ssm_bm_framework_policy": ssm_bm_framework_policy} - codebuild_role = iam.Role(scope=self, - id="{}-codebuild-role".format(id), - assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"), - inline_policies=codebuild_inline_policies, - managed_policies=[ - iam.ManagedPolicy.from_aws_managed_policy_name("CloudWatchAgentServerPolicy") - ]) - - # Define CodeBuild. - project = codebuild.Project( - scope=self, - id=id, - project_name=id, - source=git_hub_source, - role=codebuild_role, - timeout=Duration.minutes(120), - environment=codebuild.BuildEnvironment(compute_type=codebuild.ComputeType.SMALL, - privileged=False, - build_image=codebuild.LinuxBuildImage.STANDARD_4_0), - build_spec=BuildSpecLoader.load(spec_file_path)) - project.enable_batch_builds() - - PruneStaleGitHubBuilds(scope=self, id="PruneStaleGitHubBuilds", project=project, ec2_permissions=False) - - # use boto3 to determine if a cloudwatch logs group with the name we want exists, and if it doesn't, create it - logs_client = boto3.client('logs', region_name=AWS_REGION) - try: - logs_client.describe_log_groups(logGroupNamePrefix=CLOUDWATCH_LOGS) - except ClientError: - # define CloudWatch Logs groups - logs.LogGroup(self, "{}-cw-logs".format(id), - log_group_name=CLOUDWATCH_LOGS) diff --git a/tests/ci/cdk/cdk/codebuild/bm_framework_omnibus.yaml b/tests/ci/cdk/cdk/codebuild/bm_framework_omnibus.yaml deleted file mode 100644 index ea1aea37c0..0000000000 --- a/tests/ci/cdk/cdk/codebuild/bm_framework_omnibus.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 OR ISC - -version: 0.2 - -# Doc for batch https://docs.aws.amazon.com/codebuild/latest/userguide/batch-build-buildspec.html#build-spec.batch.build-list -batch: - build-list: - - - identifier: ubuntu2004_bm_framework - buildspec: ./tests/ci/codebuild/linux-x86/run_bm_framework.yml - env: - type: LINUX_CONTAINER - privileged-mode: true - compute-type: BUILD_GENERAL1_LARGE - image: 620771051181.dkr.ecr.us-west-2.amazonaws.com/aws-lc-docker-images-linux-x86:ubuntu-20.04_clang-7x-bm-framework_latest diff --git a/tests/ci/cdk/cdk/components.py b/tests/ci/cdk/cdk/components.py index dd4a7bfab5..2788e75f4b 100644 --- a/tests/ci/cdk/cdk/components.py +++ b/tests/ci/cdk/cdk/components.py @@ -1,75 +1,115 @@ import pathlib +import typing -from aws_cdk import aws_codebuild as codebuild, aws_lambda as lambda_, aws_ecr as ecr, aws_secretsmanager as sm, \ - aws_events as events, aws_events_targets as events_targets, aws_iam as iam, Duration +from aws_cdk import ( + aws_codebuild as codebuild, + aws_lambda as lambda_, + aws_ecr_assets as ecr_assets, + aws_secretsmanager as sm, + aws_events as events, + aws_events_targets as events_targets, + aws_iam as iam, + Duration, + Environment, +) from constructs import Construct -from util.metadata import AWS_REGION, AWS_ACCOUNT, GITHUB_REPO_OWNER, GITHUB_TOKEN_SECRET_NAME +from util.metadata import GITHUB_REPO_OWNER, GITHUB_TOKEN_SECRET_NAME class PruneStaleGitHubBuilds(Construct): - def __init__(self, scope: Construct, id: str, *, project: codebuild.IProject, ec2_permissions: bool) -> None: + def __init__( + self, + scope: Construct, + id: str, + *, + project: codebuild.IProject, + env: typing.Union[Environment, typing.Dict[str, typing.Any]], + ec2_permissions: bool + ) -> None: super().__init__(scope, id) - github_token_secret = sm.Secret.from_secret_name_v2(scope=self, - id="{}-GitHubToken".format(id), - secret_name=GITHUB_TOKEN_SECRET_NAME) + github_token_secret = sm.Secret.from_secret_name_v2( + scope=self, + id="{}-GitHubToken".format(id), + secret_name=GITHUB_TOKEN_SECRET_NAME, + ) - lambda_function = lambda_.Function(scope=self, - id="LambdaFunction", - code=lambda_.Code.from_asset_image( - directory=str(pathlib.Path().joinpath("..", "lambda")), - target="purge-stale-builds"), - handler=lambda_.Handler.FROM_IMAGE, - runtime=lambda_.Runtime.FROM_IMAGE, - environment={ - "CODEBUILD_PROJECT_NAME": project.project_name, - "GITHUB_REPO_OWNER": GITHUB_REPO_OWNER, - "GITHUB_TOKEN_SECRET_NAME": github_token_secret.secret_name, - "RUST_LOG": "info", - }) + lambda_function = lambda_.Function( + scope=self, + id="LambdaFunction", + code=lambda_.Code.from_asset_image( + directory=str(pathlib.Path().joinpath("..", "lambda")), + target="purge-stale-builds", + platform=ecr_assets.Platform.LINUX_AMD64, + ), + handler=lambda_.Handler.FROM_IMAGE, + runtime=lambda_.Runtime.FROM_IMAGE, + environment={ + "CODEBUILD_PROJECT_NAME": project.project_name, + "GITHUB_REPO_OWNER": GITHUB_REPO_OWNER, + "GITHUB_TOKEN_SECRET_NAME": github_token_secret.secret_name, + "RUST_LOG": "info", + }, + ) github_token_secret.grant_read(lambda_function) lambda_function.add_to_role_policy( - iam.PolicyStatement(effect=iam.Effect.ALLOW, - actions=[ - "codebuild:BatchGetBuildBatches", - "codebuild:ListBuildBatchesForProject", - "codebuild:StopBuildBatch" - ], - resources=[project.project_arn])) + iam.PolicyStatement( + effect=iam.Effect.ALLOW, + actions=[ + "codebuild:BatchGetBuildBatches", + "codebuild:ListBuildBatchesForProject", + "codebuild:StopBuildBatch", + ], + resources=[project.project_arn], + ) + ) if ec2_permissions: lambda_function.add_to_role_policy( - iam.PolicyStatement(effect=iam.Effect.ALLOW, - actions=[ - "ec2:TerminateInstances", - ], - resources=["arn:aws:ec2:{}:{}:instance/*".format(AWS_REGION, AWS_ACCOUNT)], - conditions={ - "StringEquals": { - "ec2:ResourceTag/ec2-framework-host": "ec2-framework-host" - } - })) + iam.PolicyStatement( + effect=iam.Effect.ALLOW, + actions=[ + "ec2:TerminateInstances", + ], + resources=[ + "arn:aws:ec2:{}:{}:instance/*".format(env.region, env.account) + ], + conditions={ + "StringEquals": { + "ec2:ResourceTag/ec2-framework-host": "ec2-framework-host" + } + }, + ) + ) # ec2:Describe* API actions do not support resource-level permissions. lambda_function.add_to_role_policy( - iam.PolicyStatement(effect=iam.Effect.ALLOW, - actions=[ - "ec2:DescribeInstances", - ], - resources=["*"])) + iam.PolicyStatement( + effect=iam.Effect.ALLOW, + actions=[ + "ec2:DescribeInstances", + ], + resources=["*"], + ) + ) lambda_function.add_to_role_policy( - iam.PolicyStatement(effect=iam.Effect.ALLOW, - actions=[ - "ssm:ListDocuments", - "ssm:DeleteDocument", - ], - resources=["arn:aws:ssm:{}:{}:*".format(AWS_REGION, AWS_ACCOUNT)])) + iam.PolicyStatement( + effect=iam.Effect.ALLOW, + actions=[ + "ssm:ListDocuments", + "ssm:DeleteDocument", + ], + resources=["arn:aws:ssm:{}:{}:*".format(env.region, env.account)], + ) + ) - - events.Rule(scope=self, id="PurgeEventRule", - description="Purge stale GitHub codebuild jobs and ec2 instances (once per minute)", - enabled=True, - schedule=events.Schedule.rate(Duration.minutes(1)), - targets=[events_targets.LambdaFunction(handler=lambda_function)]) + events.Rule( + scope=self, + id="PurgeEventRule", + description="Purge stale GitHub codebuild jobs and ec2 instances (once per minute)", + enabled=True, + schedule=events.Schedule.rate(Duration.minutes(1)), + targets=[events_targets.LambdaFunction(handler=lambda_function)], + ) diff --git a/tests/ci/cdk/cdk/ecr_stack.py b/tests/ci/cdk/cdk/ecr_stack.py index ff8a4b54ed..61cedad148 100644 --- a/tests/ci/cdk/cdk/ecr_stack.py +++ b/tests/ci/cdk/cdk/ecr_stack.py @@ -1,7 +1,7 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 OR ISC -from aws_cdk import Stack, aws_ecr as ecr, aws_iam as iam +from aws_cdk import Stack, Duration, aws_ecr as ecr, aws_iam as iam from constructs import Construct @@ -14,3 +14,19 @@ def __init__(self, scope: Construct, id: str, repo_name: str, **kwargs) -> None: repo = ecr.Repository(scope=self, id=id, repository_name=repo_name) repo.grant_pull_push(iam.ServicePrincipal("codebuild.amazonaws.com")) repo.grant_pull(iam.ArnPrincipal("arn:aws:iam::222961743098:role/scrutini-ecr")) + repo.add_lifecycle_rule( + description="Retain latest images", + tag_pattern_list=["*_latest"], + max_image_age=Duration.days(7300), + ) + + repo.add_lifecycle_rule( + description="Expire images older than 1 month", + max_image_age=Duration.days(30), + ) + + repo.add_lifecycle_rule( + description="Remove untagged images after 1 day", + tag_status=ecr.TagStatus.UNTAGGED, + max_image_age=Duration.days(1), + ) diff --git a/tests/ci/cdk/cdk/linux_docker_image_batch_build_stack.py b/tests/ci/cdk/cdk/linux_docker_image_batch_build_stack.py index c350d0a22e..1b32874e9d 100644 --- a/tests/ci/cdk/cdk/linux_docker_image_batch_build_stack.py +++ b/tests/ci/cdk/cdk/linux_docker_image_batch_build_stack.py @@ -1,53 +1,99 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 OR ISC +import typing -from aws_cdk import Duration, Stack, aws_codebuild as codebuild, aws_iam as iam, aws_ec2 as ec2 +from aws_cdk import ( + Duration, + Stack, + aws_codebuild as codebuild, + aws_iam as iam, + aws_ec2 as ec2, + Environment, +) from constructs import Construct -from util.metadata import AWS_ACCOUNT, GITHUB_REPO_OWNER, GITHUB_REPO_NAME, GITHUB_SOURCE_VERSION, LINUX_AARCH_ECR_REPO, \ - LINUX_X86_ECR_REPO -from util.iam_policies import code_build_batch_policy_in_json, ecr_power_user_policy_in_json +from util.metadata import ( + GITHUB_REPO_OWNER, + GITHUB_REPO_NAME, + GITHUB_SOURCE_VERSION, + LINUX_AARCH_ECR_REPO, + LINUX_X86_ECR_REPO, +) +from util.iam_policies import ( + code_build_batch_policy_in_json, + ecr_power_user_policy_in_json, +) from util.yml_loader import YmlLoader class LinuxDockerImageBatchBuildStack(Stack): - """Define a temporary stack used to batch build Linux Docker images. After build, this stack will be destroyed.""" + """Define a temporary stack used to batch build Linux Docker images.""" - def __init__(self, scope: Construct, id: str, **kwargs) -> None: - super().__init__(scope, id, **kwargs) + def __init__( + self, + scope: Construct, + id: str, + env: typing.Union[Environment, typing.Dict[str, typing.Any]], + **kwargs + ) -> None: + super().__init__(scope, id, env=env, **kwargs) + + github_repo_owner = GITHUB_REPO_OWNER + github_repo_name = GITHUB_REPO_NAME # Define CodeBuild resource. git_hub_source = codebuild.Source.git_hub( - owner=GITHUB_REPO_OWNER, - repo=GITHUB_REPO_NAME, + owner=github_repo_owner, + repo=github_repo_name, webhook=False, branch_or_ref=GITHUB_SOURCE_VERSION, - clone_depth=1) + clone_depth=1, + ) # Define a role. - code_build_batch_policy = iam.PolicyDocument.from_json(code_build_batch_policy_in_json([id])) + code_build_batch_policy = iam.PolicyDocument.from_json( + code_build_batch_policy_in_json([id], env) + ) ecr_repo_names = [LINUX_AARCH_ECR_REPO, LINUX_X86_ECR_REPO] - ecr_power_user_policy = iam.PolicyDocument.from_json(ecr_power_user_policy_in_json(ecr_repo_names)) - inline_policies = {"code_build_batch_policy": code_build_batch_policy, - "ecr_power_user_policy": ecr_power_user_policy} - role = iam.Role(scope=self, - id="{}-role".format(id), - assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"), - inline_policies=inline_policies) + ecr_power_user_policy = iam.PolicyDocument.from_json( + ecr_power_user_policy_in_json(ecr_repo_names, env) + ) + inline_policies = { + "code_build_batch_policy": code_build_batch_policy, + "ecr_power_user_policy": ecr_power_user_policy, + } + role = iam.Role( + scope=self, + id="{}-role".format(id), + assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"), + inline_policies=inline_policies, + ) # Create build spec. - build_spec_content = YmlLoader.load("./cdk/codebuild/linux_img_build_omnibus.yaml") + build_spec_content = YmlLoader.load( + "./cdk/codebuild/linux_img_build_omnibus.yaml" + ) # Define environment variables. environment_variables = { - "AWS_ACCOUNT_ID": codebuild.BuildEnvironmentVariable(value=AWS_ACCOUNT), - "AWS_ECR_REPO_X86": codebuild.BuildEnvironmentVariable(value=LINUX_X86_ECR_REPO), - "AWS_ECR_REPO_AARCH": codebuild.BuildEnvironmentVariable(value=LINUX_AARCH_ECR_REPO), - "GITHUB_REPO_OWNER": codebuild.BuildEnvironmentVariable(value=GITHUB_REPO_OWNER), + "AWS_ACCOUNT_ID": codebuild.BuildEnvironmentVariable(value=env.account), + "AWS_ECR_REPO_X86": codebuild.BuildEnvironmentVariable( + value=LINUX_X86_ECR_REPO + ), + "AWS_ECR_REPO_AARCH": codebuild.BuildEnvironmentVariable( + value=LINUX_AARCH_ECR_REPO + ), + "GITHUB_REPO_OWNER": codebuild.BuildEnvironmentVariable( + value=GITHUB_REPO_OWNER + ), } # Define VPC - vpc = ec2.Vpc(self, id="{}-ec2-vpc".format(id)) + vpc = ec2.Vpc( + self, + id="{}-ec2-vpc".format(id), + nat_gateways=1 # minimize the number of idle NAT gateways and thus elastic IPs + ) # Define CodeBuild project. project = codebuild.Project( @@ -59,9 +105,11 @@ def __init__(self, scope: Construct, id: str, **kwargs) -> None: environment=codebuild.BuildEnvironment( compute_type=codebuild.ComputeType.SMALL, privileged=False, - build_image=codebuild.LinuxBuildImage.STANDARD_4_0), + build_image=codebuild.LinuxBuildImage.STANDARD_4_0, + ), environment_variables=environment_variables, role=role, timeout=Duration.minutes(180), - build_spec=codebuild.BuildSpec.from_object(build_spec_content)) + build_spec=codebuild.BuildSpec.from_object(build_spec_content), + ) project.enable_batch_builds() diff --git a/tests/ci/cdk/cdk/ssm/bm_framework_ssm_document.yaml b/tests/ci/cdk/cdk/ssm/bm_framework_ssm_document.yaml deleted file mode 100644 index 6264088f24..0000000000 --- a/tests/ci/cdk/cdk/ssm/bm_framework_ssm_document.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 OR ISC - ---- -schemaVersion: '2.2' -description: aws-lc:bmFrameworkEc2Benchmark -mainSteps: - - action: aws:runShellScript - name: runShellScript - inputs: - timeoutSeconds: '7200' - runCommand: - - sudo -i - - export DEBIAN_FRONTEND=noninteractive - - export CPU_TYPE=$(dpkg --print-architecture) - # if we want to disable CPU features, pass in the {OPENSSL_ia32cap} value - - if [ {OPENSSL_ia32cap} ]; then export OPENSSL_ia32cap={OPENSSL_ia32cap}; fi - - echo "${OPENSSL_ia32cap}" - # if we have a cpu type of x86, we want linux-x86 - - if [ "${CPU_TYPE}" = amd64 ]; then export CPU_ARCH=linux-x86; export AWS_CLI_PREFIX=x86_; sudo sh -c "echo 1 > /sys/devices/system/cpu/intel_pstate/no_turbo"; fi - # if we have a cpu type of arm, we want linux-aarch - - if [ "${CPU_TYPE}" = arm64 ]; then export CPU_ARCH=linux-aarch; export AWS_CLI_PREFIX=aarch; fi - # install aws-cli - - apt-get -y install unzip - - curl "https://awscli.amazonaws.com/awscli-exe-linux-${AWS_CLI_PREFIX}64.zip" -o "awscliv2.zip" - - unzip awscliv2.zip - - ./aws/install - # create bm_framework directory and checkout aws-lc - - mkdir bm_framework - - cd bm_framework - - git clone {GITHUB_REPO} aws-lc-pr - - cd aws-lc-pr - - git checkout {COMMIT_ID} - - cd ../ - # install docker if its not already installed - - chmod +x aws-lc-pr/tests/ci/benchmark_framework/install_docker.sh - - ./aws-lc-pr/tests/ci/benchmark_framework/install_docker.sh - # log into docker and get needed docker image from ecr - - export ECR_REPO="{AWS_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com/aws-lc-docker-images-${CPU_ARCH}" - - docker login -u AWS -p $(aws ecr get-login-password) https://"${ECR_REPO}" - - docker pull "${ECR_REPO}:ubuntu-20.04_clang-7x-bm-framework_latest" - - # start the container and run the bm script - - exec_docker="docker run --env PR_FOLDER_NAME=aws-lc-pr --env OPENSSL_ia32cap=${OPENSSL_ia32cap} --env AWS_ACCOUNT_ID={AWS_ACCOUNT_ID} --env PR_NUM={PR_NUM} --env COMMIT_ID={COMMIT_ID} --env CPU_TYPE=${CPU_TYPE} --env NOHW_TYPE={NOHW_TYPE} -v `pwd`:`pwd` -w `pwd` ${ECR_REPO}:ubuntu-20.04_clang-7x-bm-framework_latest" - - chmod +x aws-lc-pr/tests/ci/build_run_benchmarks.sh - - $exec_docker ./aws-lc-pr/tests/ci/build_run_benchmarks.sh \ No newline at end of file diff --git a/tests/ci/cdk/cdk/ssm/windows_docker_build_ssm_document.yaml b/tests/ci/cdk/cdk/ssm/windows_docker_build_ssm_document.yaml index b5288d9370..3d2a80eff8 100644 --- a/tests/ci/cdk/cdk/ssm/windows_docker_build_ssm_document.yaml +++ b/tests/ci/cdk/cdk/ssm/windows_docker_build_ssm_document.yaml @@ -10,6 +10,10 @@ mainSteps: inputs: timeoutSeconds: '7200' runCommand: + - $ErrorActionPreference = 'Stop' + - $TRIGGER_TYPE = '{{ TriggerType }}' + - "Get-ChildItem Env: | Sort-Object Name" + - if (Test-Path "docker-images") { Remove-Item -Recurse -Force "docker-images" } - mkdir docker-images - cd docker-images - Set-ExecutionPolicy Bypass -Scope Process -Force; [Net.ServicePointManager]::SecurityProtocol = [Net.ServicePointManager]::SecurityProtocol -bor [Net.SecurityProtocolType]::Tls12; $env:chocolateyUseWindowsCompression = 'true'; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1')) | Out-Null @@ -23,3 +27,7 @@ mainSteps: - Invoke-Expression -Command (Get-ECRLoginCommand -Region REGION_PLACEHOLDER).Command - .\build_images.ps1 - .\push_images.ps1 ECR_PLACEHOLDER +parameters: + TriggerType: + type: String + description: Specifies how this command was initiated. diff --git a/tests/ci/cdk/cdk/windows_docker_image_build_stack.py b/tests/ci/cdk/cdk/windows_docker_image_build_stack.py index fa1079c7ed..45627f4b51 100644 --- a/tests/ci/cdk/cdk/windows_docker_image_build_stack.py +++ b/tests/ci/cdk/cdk/windows_docker_image_build_stack.py @@ -1,74 +1,86 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 OR ISC +import typing from aws_cdk import ( Stack, Tags, + PhysicalName, + Environment, aws_ec2 as ec2, aws_s3 as s3, aws_iam as iam, aws_ssm as ssm, ) from constructs import Construct + from util.iam_policies import ( ecr_power_user_policy_in_json, s3_read_write_policy_in_json, ) from util.metadata import ( - AWS_ACCOUNT, - AWS_REGION, WINDOWS_X86_ECR_REPO, - S3_BUCKET_NAME, GITHUB_REPO_OWNER, WIN_EC2_TAG_KEY, WIN_EC2_TAG_VALUE, SSM_DOCUMENT_NAME, GITHUB_SOURCE_VERSION, + S3_FOR_WIN_DOCKER_IMG_BUILD, ) from util.yml_loader import YmlLoader class WindowsDockerImageBuildStack(Stack): - """Define a temporary stack used to build Windows Docker images. After build, this stack will be destroyed.""" + """Define a temporary stack used to build Windows Docker images.""" - def __init__(self, scope: Construct, id: str, **kwargs) -> None: - super().__init__(scope, id, **kwargs) + def __init__( + self, + scope: Construct, + id: str, + env: typing.Union[Environment, typing.Dict[str, typing.Any]], + **kwargs, + ) -> None: + super().__init__(scope, id, env=env, **kwargs) # Define SSM command document. + # ecr_uri = ecr_windows_x86.ecr_repo.repository_uri ecr_repo = "{}.dkr.ecr.{}.amazonaws.com/{}".format( - AWS_ACCOUNT, AWS_REGION, WINDOWS_X86_ECR_REPO + env.account, env.region, WINDOWS_X86_ECR_REPO ) + placeholder_map = { "ECR_PLACEHOLDER": ecr_repo, "GITHUB_OWNER_PLACEHOLDER": GITHUB_REPO_OWNER, - "REGION_PLACEHOLDER": AWS_REGION, + "REGION_PLACEHOLDER": env.region, "GITHUB_SOURCE_VERSION_PLACEHOLDER": GITHUB_SOURCE_VERSION, } content = YmlLoader.load( "./cdk/ssm/windows_docker_build_ssm_document.yaml", placeholder_map ) + ssm.CfnDocument( scope=self, id="{}-ssm-document".format(id), name=SSM_DOCUMENT_NAME, content=content, document_type="Command", + update_method="NewVersion", ) # Define a S3 bucket to store windows docker files and build scripts. - s3.Bucket( + bucket = s3.Bucket( scope=self, id="{}-s3".format(id), - bucket_name=S3_BUCKET_NAME, + bucket_name=f"{env.account}-{S3_FOR_WIN_DOCKER_IMG_BUILD}", block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) # Define a role for EC2. ecr_power_user_policy = iam.PolicyDocument.from_json( - ecr_power_user_policy_in_json([WINDOWS_X86_ECR_REPO]) + ecr_power_user_policy_in_json([WINDOWS_X86_ECR_REPO], env) ) s3_read_write_policy = iam.PolicyDocument.from_json( - s3_read_write_policy_in_json(S3_BUCKET_NAME) + s3_read_write_policy_in_json(bucket.bucket_name) ) inline_policies = { "ecr_power_user_policy": ecr_power_user_policy, @@ -90,7 +102,11 @@ def __init__(self, scope: Construct, id: str, **kwargs) -> None: machine_image = ec2.MachineImage.latest_windows( ec2.WindowsVersion.WINDOWS_SERVER_2019_ENGLISH_FULL_BASE ) - vpc = ec2.Vpc(scope=self, id="{}-vpc".format(id)) + vpc = ec2.Vpc( + scope=self, + id="{}-vpc".format(id), + nat_gateways=1 # minimize the number of idle NAT gateways and thus elastic IPs + ) block_device_volume = ec2.BlockDeviceVolume.ebs( volume_size=200, delete_on_termination=True ) @@ -119,6 +135,11 @@ def __init__(self, scope: Construct, id: str, **kwargs) -> None: vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC), machine_image=machine_image, user_data=setup_user_data, + instance_name="{}-instance".format(id), ) Tags.of(instance).add(WIN_EC2_TAG_KEY, WIN_EC2_TAG_VALUE) + + self.output = { + "s3_bucket_name": f"{env.account}-{S3_FOR_WIN_DOCKER_IMG_BUILD}", + } diff --git a/tests/ci/cdk/pipeline/__init__.py b/tests/ci/cdk/pipeline/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/ci/cdk/pipeline/ci_stage.py b/tests/ci/cdk/pipeline/ci_stage.py new file mode 100644 index 0000000000..74d3ed65aa --- /dev/null +++ b/tests/ci/cdk/pipeline/ci_stage.py @@ -0,0 +1,155 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 OR ISC +import builtins +import re +import typing + +from aws_cdk import ( + Stage, + Environment, + Duration, + pipelines, + aws_iam as iam, + aws_codebuild as codebuild, +) +from constructs import Construct + +from cdk.aws_lc_base_ci_stack import AwsLcBaseCiStack +from pipeline.ci_util import add_ci_stacks +from pipeline.codebuild_batch_step import CodeBuildBatchStep +from util.metadata import ( + PRE_PROD_ACCOUNT, + GITHUB_TOKEN_SECRET_NAME, + STAGING_GITHUB_REPO_OWNER, + STAGING_GITHUB_REPO_NAME, +) + + +class CiStage(Stage): + def __init__( + self, + scope: Construct, + id: str, + pipeline_environment: typing.Union[Environment, typing.Dict[str, typing.Any]], + deploy_environment: typing.Union[Environment, typing.Dict[str, typing.Any]], + **kwargs, + ): + super().__init__( + scope, + id, + env=pipeline_environment, + **kwargs, + ) + + # Add CodeBuild Batch job for testing code. + add_ci_stacks(self, env=deploy_environment) + + @property + def stacks(self) -> typing.List[AwsLcBaseCiStack]: + return [ + child for child in self.node.children if isinstance(child, AwsLcBaseCiStack) + ] + + def add_stage_to_pipeline( + self, + pipeline: pipelines.CodePipeline, + input: pipelines.FileSet, + role: iam.Role, + max_retry: typing.Optional[int] = 2, + env: typing.Optional[typing.Mapping[str, str]] = None, + ): + stack_names = [stack.stack_name for stack in self.stacks] + + private_repo_sync_step = None + + if self.stacks[0].account == PRE_PROD_ACCOUNT: + private_repo_sync_step = pipelines.CodeBuildStep( + "PrivateRepoSync", + build_environment=codebuild.BuildEnvironment( + environment_variables={ + "GITHUB_PAT": codebuild.BuildEnvironmentVariable( + type=codebuild.BuildEnvironmentVariableType.SECRETS_MANAGER, + value=GITHUB_TOKEN_SECRET_NAME, + ), + } + ), + commands=[ + "env", + 'curl -H "Authorization: token ${GITHUB_PAT}" https://api.github.com/user', + "git clone https://${GITHUB_PAT}@github.com/${STAGING_GITHUB_REPO_OWNER}/${STAGING_GITHUB_REPO_NAME}.git", + "git remote add upstream https://github.com/aws/aws-lc.git", + "git fetch upstream", + "git checkout main", + "git merge --ff-only upstream/main", + "git remote set-url origin https://${GITHUB_PAT}@github.com/${STAGING_GITHUB_REPO_OWNER}/${STAGING_GITHUB_REPO_NAME}.git", + "git push origin main", + ], + env={ + "STAGING_GITHUB_REPO_OWNER": STAGING_GITHUB_REPO_OWNER, + "STAGING_GITHUB_REPO_NAME": STAGING_GITHUB_REPO_NAME, + }, + role=role, + timeout=Duration.minutes(60), + ) + + env = env or {} + + prebuild_check_step = pipelines.CodeBuildStep( + "PrebuildCheck", + input=input, + commands=[ + "cd tests/ci/cdk/pipeline/scripts", + 'trigger_conditions=$(./check_trigger_conditions.sh --build-type ci --stacks "${STACKS}")', + "export NEED_REBUILD=$(echo $trigger_conditions | sed -n 's/.*\(NEED_REBUILD=[0-9]*\).*/\\1/p' | cut -d'=' -f2 )", + ], + env={ + **env, + "STACKS": " ".join(stack_names), + }, + role=role, + timeout=Duration.minutes(60), + ) + + batch_timeout = max([stack.timeout for stack in self.stacks]) * (max_retry + 1) + batch_build_jobs = { + "build-list": [ + { + "identifier": re.sub(r"[^a-zA-Z0-9]", "_", stack.stack_name), + "ignore-failure": stack.ignore_failure, + "env": { + "variables": { + "PROJECT": stack.stack_name, + "TIMEOUT": batch_timeout, + } + }, + } + for stack in self.stacks + ] + } + + ci_run_step = CodeBuildBatchStep( + f"BuildStep", + action_name="StartWait", + input=input, + commands=[ + "cd tests/ci/cdk/pipeline/scripts", + "./build_target.sh --build-type ci --project ${PROJECT} --max-retry ${MAX_RETRY} --timeout ${TIMEOUT}", + ], + role=role, + timeout=batch_timeout, + project_description=f"Pipeline step AwsLcCiPipeline/{self.stage_name}/StartWait", + partial_batch_build_spec=batch_build_jobs, + env={ + **env, + "MAX_RETRY": max_retry, + "NEED_REBUILD": prebuild_check_step.exported_variable("NEED_REBUILD"), + }, + ) + + ci_run_step.add_step_dependency(prebuild_check_step) + + pipeline.add_stage( + self, + pre=[private_repo_sync_step] if private_repo_sync_step else None, + post=[prebuild_check_step, ci_run_step], + ) diff --git a/tests/ci/cdk/pipeline/ci_util.py b/tests/ci/cdk/pipeline/ci_util.py new file mode 100644 index 0000000000..55337efc93 --- /dev/null +++ b/tests/ci/cdk/pipeline/ci_util.py @@ -0,0 +1,99 @@ +import typing + +from aws_cdk import Environment +from constructs import Construct + +from cdk.aws_lc_analytics_stack import AwsLcGitHubAnalyticsStack +from cdk.aws_lc_android_ci_stack import AwsLcAndroidCIStack +from cdk.aws_lc_ec2_test_framework_ci_stack import AwsLcEC2TestingCIStack +from cdk.aws_lc_github_ci_stack import AwsLcGitHubCIStack +from cdk.aws_lc_github_fuzz_ci_stack import AwsLcGitHubFuzzCIStack + + +# Define CodeBuild Batch jobs for testing code. +def add_ci_stacks( + scope: Construct, + env: typing.Union[Environment, typing.Dict[str, typing.Any]], +): + # define customized settings to run CodeBuild jobs from CodePipeline + build_options = [] + + x86_build_spec_file = "cdk/codebuild/github_ci_linux_x86_omnibus.yaml" + AwsLcGitHubCIStack( + scope, + "aws-lc-ci-linux-x86", + x86_build_spec_file, + env=env, + ignore_failure=False, + stack_name="aws-lc-ci-linux-x86", + ) + + arm_build_spec_file = "cdk/codebuild/github_ci_linux_arm_omnibus.yaml" + AwsLcGitHubCIStack( + scope, + "aws-lc-ci-linux-arm", + arm_build_spec_file, + env=env, + ignore_failure=False, + stack_name="aws-lc-ci-linux-arm", + ) + + integration_build_spec_file = "cdk/codebuild/github_ci_integration_omnibus.yaml" + AwsLcGitHubCIStack( + scope, + "aws-lc-ci-integration", + integration_build_spec_file, + env=env, + ignore_failure=True, + stack_name="aws-lc-ci-integration", + ) + + fuzz_build_spec_file = "cdk/codebuild/github_ci_fuzzing_omnibus.yaml" + AwsLcGitHubFuzzCIStack( + scope, + "aws-lc-ci-fuzzing", + fuzz_build_spec_file, + env=env, + ignore_failure=False, + stack_name="aws-lc-ci-fuzzing", + ) + + analytics_build_spec_file = "cdk/codebuild/github_ci_analytics_omnibus.yaml" + AwsLcGitHubAnalyticsStack( + scope, + "aws-lc-ci-analytics", + analytics_build_spec_file, + env=env, + ignore_failure=True, + stack_name="aws-lc-ci-analytics", + ) + + ec2_test_framework_build_spec_file = "cdk/codebuild/ec2_test_framework_omnibus.yaml" + AwsLcEC2TestingCIStack( + scope, + "aws-lc-ci-ec2-test-framework", + ec2_test_framework_build_spec_file, + env=env, + ignore_failure=True, + stack_name="aws-lc-ci-ec2-test-framework", + ) + + android_build_spec_file = "cdk/codebuild/github_ci_android_omnibus.yaml" + AwsLcAndroidCIStack( + scope, + "aws-lc-ci-devicefarm-android", + android_build_spec_file, + env=env, + ignore_failure=False, + stack_name="aws-lc-ci-devicefarm-android", + ) + + win_x86_build_spec_file = "cdk/codebuild/github_ci_windows_x86_omnibus.yaml" + AwsLcGitHubCIStack( + scope, + "aws-lc-ci-windows-x86", + win_x86_build_spec_file, + env=env, + ignore_failure=False, + stack_name="aws-lc-ci-windows-x86", + ) diff --git a/tests/ci/cdk/pipeline/codebuild_batch_step.py b/tests/ci/cdk/pipeline/codebuild_batch_step.py new file mode 100644 index 0000000000..b7cd6afaf8 --- /dev/null +++ b/tests/ci/cdk/pipeline/codebuild_batch_step.py @@ -0,0 +1,98 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 OR ISC +import builtins +import re +import typing + +import jsii +from aws_cdk import ( + Duration, + pipelines, + aws_codepipeline_actions as cp_actions, + aws_codebuild as codebuild, + aws_codepipeline as codepipeline, + aws_iam as iam, +) + + +@jsii.implements(pipelines.ICodePipelineActionFactory) +class CodeBuildBatchStep(pipelines.Step): + """ + Create a CodeBuildBatchStep given shell commands and batch build settings. + + :param id: The id of the step. + :param input: The input file set producer. + :param action_name: Name of the action produced by this step. + :param commands: The CodeBuild commands to be run. + :param partial_batch_build_spec: The batch build settings for the project. + :param role: The role to use for the CodeBuild project. + :param timeout: Timeout of the batch build project, in minutes. + :param env: The environment variables to use for the CodeBuild project. + + :return: A new CodeBuildBatchStep. + """ + + def __init__( + self, + id, + input: pipelines.FileSet, + action_name: str, + commands: typing.List[str], + partial_batch_build_spec: typing.Mapping[builtins.str, typing.Any], + role: iam.Role, + timeout: int = 300, + project_description: str = None, + env: typing.Optional[typing.Mapping[str, str]] = None, + ): + super().__init__(id) + + self._discover_referenced_outputs(env) + + self.input = input + self.action_name = action_name + self.commands = commands + self.partial_batch_build_spec = partial_batch_build_spec + self.role = role + self.timeout = timeout + self.project_description = project_description + self.env = ( + { + key: codebuild.BuildEnvironmentVariable(value=value) + for key, value in env.items() + } + if env + else {} + ) + + def produce_action( + self, + stage: codepipeline.IStage, + options: pipelines.ProduceActionOptions, + ) -> pipelines.CodePipelineActionFactoryResult: + batch_build_project = codebuild.PipelineProject( + options.scope, + self.action_name, + build_spec=codebuild.BuildSpec.from_object( + { + "version": 0.2, + "batch": self.partial_batch_build_spec, + "phases": {"build": {"commands": self.commands}}, + } + ), + role=self.role, + description=self.project_description, + timeout=Duration.minutes(self.timeout), + ) + + batch_build_action = cp_actions.CodeBuildAction( + action_name=self.action_name, + input=options.artifacts.to_code_pipeline(self.input), + run_order=options.run_order, + project=batch_build_project, + execute_batch_build=True, + environment_variables=self.env, + ) + + stage.add_action(batch_build_action) + + return pipelines.CodePipelineActionFactoryResult(run_orders_consumed=1) diff --git a/tests/ci/cdk/pipeline/linux_docker_image_build_stage.py b/tests/ci/cdk/pipeline/linux_docker_image_build_stage.py new file mode 100644 index 0000000000..4ad90a1220 --- /dev/null +++ b/tests/ci/cdk/pipeline/linux_docker_image_build_stage.py @@ -0,0 +1,102 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 OR ISC +import typing + +from aws_cdk import Stage, Environment, Stack, Duration, aws_iam as iam, pipelines +from aws_cdk.pipelines import CodeBuildStep +from constructs import Construct + +from cdk.ecr_stack import EcrStack +from cdk.linux_docker_image_batch_build_stack import LinuxDockerImageBatchBuildStack +from util.metadata import LINUX_X86_ECR_REPO, LINUX_AARCH_ECR_REPO + + +class LinuxDockerImageBuildStage(Stage): + def __init__( + self, + scope: Construct, + id: str, + pipeline_environment: typing.Union[Environment, typing.Dict[str, typing.Any]], + deploy_environment: typing.Union[Environment, typing.Dict[str, typing.Any]], + **kwargs + ): + super().__init__( + scope, + id, + env=pipeline_environment, + **kwargs, + ) + + # Define AWS ECR stacks. + # ECR holds the docker images, which are pre-built to accelerate the code builds/tests of git pull requests. + self.ecr_linux_x86_stack = EcrStack( + self, + "aws-lc-ecr-linux-x86", + LINUX_X86_ECR_REPO, + env=deploy_environment, + stack_name="aws-lc-ecr-linux-x86", + ) + self.ecr_linux_aarch_stack = EcrStack( + self, + "aws-lc-ecr-linux-aarch", + LINUX_AARCH_ECR_REPO, + env=deploy_environment, + stack_name="aws-lc-ecr-linux-aarch", + ) + + # Define CodeBuild Batch job for building Docker images. + self.linux_docker_build_stack = LinuxDockerImageBatchBuildStack( + self, + "aws-lc-docker-image-build-linux", + env=deploy_environment, + stack_name="aws-lc-docker-image-build-linux", + ) + self.linux_docker_build_stack.add_dependency(self.ecr_linux_x86_stack) + self.linux_docker_build_stack.add_dependency(self.ecr_linux_aarch_stack) + + self.ecr_repo_names = [LINUX_X86_ECR_REPO, LINUX_AARCH_ECR_REPO] + self.need_rebuild = None + + @property + def stacks(self) -> typing.List[Stack]: + return [child for child in self.node.children if isinstance(child, Stack)] + + def add_stage_to_wave( + self, + wave: pipelines.Wave, + input: pipelines.FileSet, + role: iam.Role, + max_retry: typing.Optional[int] = 2, + additional_stacks: typing.Optional[typing.List[str]] = None, + env: typing.Optional[typing.Mapping[str, str]] = None, + ): + stacks = self.stacks + (additional_stacks if additional_stacks else []) + stack_names = [stack.stack_name for stack in stacks] + + env = env if env else {} + timeout = (max_retry + 1) * 120 + + docker_build_step = CodeBuildStep( + "StartWait", + input=input, + commands=[ + "cd tests/ci/cdk/pipeline/scripts", + './cleanup_orphaned_images.sh --repos "${ECR_REPOS}"', + 'trigger_conditions=$(./check_trigger_conditions.sh --build-type docker --platform linux --stacks "${STACKS}")', + "export NEED_REBUILD=$(echo $trigger_conditions | sed -n -e 's/.*\(NEED_REBUILD=[0-9]*\).*/\\1/p' | cut -d'=' -f2 )", + "./build_target.sh --build-type docker --platform linux --max-retry ${MAX_RETRY} --timeout ${TIMEOUT}", + ], + env={ + **env, + "STACKS": " ".join(stack_names), + "ECR_REPOS": " ".join(self.ecr_repo_names), + "MAX_RETRY": str(max_retry), + "TIMEOUT": str(timeout), + }, + role=role, + timeout=Duration.minutes(timeout), + ) + + wave.add_stage(self, post=[docker_build_step]) + + self.need_rebuild = docker_build_step.exported_variable("NEED_REBUILD") diff --git a/tests/ci/cdk/pipeline/pipeline_stack.py b/tests/ci/cdk/pipeline/pipeline_stack.py new file mode 100644 index 0000000000..623ab1efd7 --- /dev/null +++ b/tests/ci/cdk/pipeline/pipeline_stack.py @@ -0,0 +1,314 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 OR ISC +import typing +from enum import Enum + +from aws_cdk import Stack, Environment, Duration +from aws_cdk import ( + pipelines, + aws_codestarconnections as codestarconnections, + aws_codepipeline as codepipeline, + aws_iam as iam, + aws_events as events, + aws_events_targets as targets, + aws_codebuild as codebuild, +) +from aws_cdk.pipelines import CodeBuildStep +from constructs import Construct + +from pipeline.ci_stage import CiStage +from pipeline.linux_docker_image_build_stage import LinuxDockerImageBuildStage +from pipeline.setup_stage import SetupStage +from pipeline.windows_docker_image_build_stage import WindowsDockerImageBuildStage +from util.metadata import * + + +class DeployEnvironmentType(Enum): + PRE_PROD = "Staging" + PROD = "Prod" + DEV = "Dev" + + +class AwsLcCiPipeline(Stack): + def __init__( + self, + scope: Construct, + id: str, + **kwargs, + ) -> None: + super().__init__( + scope, + id, + **kwargs, + ) + + gh_connection = codestarconnections.CfnConnection( + self, + "GitHubConnection", + connection_name="AwsLcCiPipelineGitHubConnection", + provider_type="GitHub", + ) + + cross_account_role = iam.Role( + self, + "CrossAccountPipelineRole", + role_name="CrossAccountPipelineRole", + assumed_by=iam.CompositePrincipal( + iam.ServicePrincipal("codebuild.amazonaws.com"), + iam.ServicePrincipal("codepipeline.amazonaws.com"), + ), + ) + + cross_account_role.add_to_policy( + iam.PolicyStatement( + effect=iam.Effect.ALLOW, + resources=["*"], + actions=[ + "codepipeline:GetPipelineExecution", + "secretsmanager:GetSecretValue", + "kms:Decrypt", + ], + ) + ) + + source = pipelines.CodePipelineSource.connection( + f"{GITHUB_REPO_OWNER}/{GITHUB_REPO_NAME}", + GITHUB_SOURCE_VERSION, + connection_arn=gh_connection.attr_connection_arn, + code_build_clone_output=True, + ) + + # Create a base pipeline to upgrade the default pipeline type + base_pipeline = codepipeline.Pipeline( + self, + "AwsLcCiPipeline", + execution_mode=codepipeline.ExecutionMode.QUEUED, + pipeline_type=codepipeline.PipelineType.V2, + pipeline_name="AwsLcCiPipeline", + cross_account_keys=True, + enable_key_rotation=True, + restart_execution_on_update=True, + ) + + # Bucket contains artifacts from old pipeline executions + # These artifacts are kept for 60 days in case we need to do a rollback + base_pipeline.artifact_bucket.add_lifecycle_rule( + enabled=True, + expiration=Duration.days(60), + ) + + cdk_env = { + "GITHUB_REPO_OWNER": GITHUB_REPO_OWNER, + "GITHUB_REPO_NAME": GITHUB_REPO_NAME, + "GITHUB_SOURCE_VERSION": GITHUB_SOURCE_VERSION, + "GITHUB_TOKEN_SECRET_NAME": GITHUB_TOKEN_SECRET_NAME, + "PIPELINE_ACCOUNT": PIPELINE_ACCOUNT, + "PIPELINE_REGION": PIPELINE_REGION, + "WIN_EC2_TAG_KEY": WIN_EC2_TAG_KEY, + "WIN_EC2_TAG_VALUE": WIN_EC2_TAG_VALUE, + "WIN_DOCKER_BUILD_SSM_DOCUMENT": SSM_DOCUMENT_NAME, + "LINUX_AARCH_ECR_REPO": LINUX_AARCH_ECR_REPO, + "LINUX_X86_ECR_REPO": LINUX_X86_ECR_REPO, + "WINDOWS_X86_ECR_REPO": WINDOWS_X86_ECR_REPO, + "IS_DEV": str(IS_DEV), + } + + if DEPLOY_ACCOUNT is not None and DEPLOY_REGION is not None: + cdk_env["DEPLOY_ACCOUNT"] = DEPLOY_ACCOUNT + cdk_env["DEPLOY_REGION"] = DEPLOY_REGION + + pipeline = pipelines.CodePipeline( + self, + "CdkPipeline", + code_pipeline=base_pipeline, + # pipeline_name="AwsLcCiPipeline", + synth=pipelines.ShellStep( + "Synth", + input=source, + commands=[ + 'echo "Environment variables:"', + "env", + "npm install -g aws-cdk", + "cd tests/ci", + "python -m pip install -r requirements.txt", + "cd cdk", + "cdk synth", + ], + env=cdk_env, + primary_output_directory="tests/ci/cdk/cdk.out", + ), + self_mutation=True, + code_build_defaults=pipelines.CodeBuildOptions( + build_environment=codebuild.BuildEnvironment( + compute_type=codebuild.ComputeType.MEDIUM, + ), + role_policy=[ + iam.PolicyStatement( + effect=iam.Effect.ALLOW, + resources=["*"], + actions=["sts:AssumeRole"], + conditions={ + "StringEquals": { + "iam:ResourceTag/aws-cdk:bootstrap-role": "lookup", + } + }, + ), + ], + ), + ) + + if IS_DEV: + self.deploy_to_environment( + DeployEnvironmentType.DEV, + pipeline=pipeline, + source=source, + cross_account_role=cross_account_role, + ) + else: + self.deploy_to_environment( + DeployEnvironmentType.PRE_PROD, + pipeline=pipeline, + source=source, + cross_account_role=cross_account_role, + ) + + # TODO: add prod env + + pipeline.build_pipeline() + + # Schedule pipeline to run every Tuesday 15:00 UTC or 7:00 PST + events.Rule( + self, + "WeeklyCodePipelineRun", + schedule=events.Schedule.cron( + minute="0", + hour="15", + week_day="TUE", + ), + targets=[targets.CodePipeline(pipeline=base_pipeline)], + ) + + def deploy_to_environment( + self, + deploy_environment_type: DeployEnvironmentType, + pipeline: pipelines.CodePipeline, + source: pipelines.CodePipelineSource, + cross_account_role: iam.Role, + codebuild_environment_variables: typing.Optional[ + typing.Mapping[str, str] + ] = None, + ): + pipeline_environment = Environment( + account=PIPELINE_ACCOUNT, region=PIPELINE_REGION + ) + + if deploy_environment_type == DeployEnvironmentType.PRE_PROD: + deploy_environment = Environment( + account=PRE_PROD_ACCOUNT, region=PRE_PROD_REGION + ) + elif deploy_environment_type == DeployEnvironmentType.DEV: + deploy_environment = Environment( + account=DEPLOY_ACCOUNT, region=DEPLOY_REGION + ) + else: + deploy_environment = Environment(account=PROD_ACCOUNT, region=PROD_REGION) + + codebuild_environment_variables = ( + codebuild_environment_variables if codebuild_environment_variables else {} + ) + + codebuild_environment_variables = { + **codebuild_environment_variables, + "PIPELINE_EXECUTION_ID": "#{codepipeline.PipelineExecutionId}", + "DEPLOY_ACCOUNT": deploy_environment.account, + "DEPLOY_REGION": deploy_environment.region, + } + + cross_account_role.add_to_policy( + iam.PolicyStatement( + effect=iam.Effect.ALLOW, + resources=[ + f"arn:aws:iam::{deploy_environment.account}:role/CrossAccountBuildRole" + ], + actions=["sts:AssumeRole"], + ) + ) + + setup_stage = SetupStage( + self, + f"{deploy_environment_type.value}-Setup", + pipeline_environment=pipeline_environment, + deploy_environment=deploy_environment, + ) + + pipeline.add_stage(setup_stage) + + docker_build_wave = pipeline.add_wave( + f"{deploy_environment_type.value}-DockerImageBuild" + ) + + linux_stage = LinuxDockerImageBuildStage( + self, + f"{deploy_environment_type.value}-LinuxDockerImageBuild", + pipeline_environment=pipeline_environment, + deploy_environment=deploy_environment, + ) + + linux_stage.add_stage_to_wave( + wave=docker_build_wave, + input=source.primary_output, + role=cross_account_role, + additional_stacks=setup_stage.stacks, + max_retry=MAX_TEST_RETRY, + env=codebuild_environment_variables, + ) + + windows_stage = WindowsDockerImageBuildStage( + self, + f"{deploy_environment_type.value}-WindowsDockerImageBuild", + pipeline_environment=pipeline_environment, + deploy_environment=deploy_environment, + ) + + windows_stage.add_stage_to_wave( + wave=docker_build_wave, + input=source.primary_output, + role=cross_account_role, + additional_stacks=setup_stage.stacks, + max_retry=MAX_TEST_RETRY, + env=codebuild_environment_variables, + ) + + docker_build_wave.add_post( + CodeBuildStep( + f"{deploy_environment_type.value}-FinalizeImages", + input=source, + commands=[ + "cd tests/ci/cdk/pipeline/scripts", + './finalize_images.sh --repos "${ECR_REPOS}"', + ], + env={ + **codebuild_environment_variables, + "ECR_REPOS": f"{' '.join(linux_stage.ecr_repo_names)} {' '.join(windows_stage.ecr_repo_names)}", + }, + role=cross_account_role, + ) + ) + + ci_stage = CiStage( + self, + f"{deploy_environment_type.value}-CiTests", + pipeline_environment=pipeline_environment, + deploy_environment=deploy_environment, + ) + + ci_stage.add_stage_to_pipeline( + pipeline=pipeline, + input=source.primary_output, + role=cross_account_role, + max_retry=MAX_TEST_RETRY, + env={ + **codebuild_environment_variables, + "PREVIOUS_REBUILDS": f"{linux_stage.need_rebuild} {linux_stage.need_rebuild}", + }, + ) diff --git a/tests/ci/cdk/pipeline/scripts/build_target.sh b/tests/ci/cdk/pipeline/scripts/build_target.sh new file mode 100755 index 0000000000..83ba25fb0b --- /dev/null +++ b/tests/ci/cdk/pipeline/scripts/build_target.sh @@ -0,0 +1,178 @@ +#!/usr/bin/env bash +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 OR ISC + +set -exuo pipefail + +source util.sh + +echo \"Environment variables:\" +env + +if [[ -z "${NEED_REBUILD:+x}" || ${NEED_REBUILD} -eq 0 ]]; then + echo "No rebuild needed" + exit 0 +fi + +export COMMIT_HASH=${COMMIT_HASH:-$CODEBUILD_RESOLVED_SOURCE_VERSION} +export CROSS_ACCOUNT_BUILD_ROLE_ARN="arn:aws:iam::${DEPLOY_ACCOUNT}:role/CrossAccountBuildRole" +export CROSS_ACCOUNT_BUILD_SESSION="pipeline-${COMMIT_HASH}" + +function build_codebuild_ci_project() { + local attempt=0 + local project=${1} + + if [[ -z ${project} ]]; then + echo "No project name provided." + exit 1 + fi + + if [[ ${DEPLOY_ACCOUNT} == '351119683581' ]]; then + source_version="main" + else + source_version=${COMMIT_HASH} + fi + + while [[ ${attempt} -le ${MAX_RETRY} ]]; do + if [[ ${attempt} -eq 0 ]]; then + echo "Starting CI tests in ${project}" + start_codebuild_project "${project}" "${source_version}" + else + echo "Retrying ${attempt}/${MAX_RETRY}..." + retry_batch_build + fi + + echo "Waiting for docker images creation. Building the docker images need to take 1 hour." + # TODO(CryptoAlg-624): These image build may fail due to the Docker Hub pull limits made on 2020-11-01. + if codebuild_build_status_check "${TIMEOUT}"; then + echo "All tests completed successfully" + exit 0 + fi + + attempt=$((attempt + 1)) + done + + echo "CI tests failed." + exit 1 +} + +function build_linux_docker_images() { + local attempt=0 + + while [[ ${attempt} -le ${MAX_RETRY} ]]; do + if [[ ${attempt} -eq 0 ]]; then + echo "Activating AWS CodeBuild to build Linux aarch & x86 docker images." + start_codebuild_project aws-lc-docker-image-build-linux "${COMMIT_HASH}" + else + echo "Retrying ${attempt}/${MAX_RETRY}..." + retry_batch_build + fi + + echo "Waiting for docker images creation. Building the docker images need to take 1 hour." + # TODO(CryptoAlg-624): These image build may fail due to the Docker Hub pull limits made on 2020-11-01. + if codebuild_build_status_check "${TIMEOUT}"; then + echo "Successfully built Linux docker images" + exit 0 + fi + + attempt=$((attempt + 1)) + done + + echo "Failed to build Linux docker images" + exit 1 +} + +function build_win_docker_images() { + local attempt=0 + + while [[ ${attempt} -le ${MAX_RETRY} ]]; do + echo "Executing AWS SSM commands to build Windows docker images." + if ! start_windows_img_build; then + echo "Failed to start build" + continue + fi + + echo "Waiting for docker images creation. Building the docker images need to take 1 hour." + # TODO(CryptoAlg-624): These image build may fail due to the Docker Hub pull limits made on 2020-11-01. + if ! win_docker_img_build_status_check "${TIMEOUT}"; then + echo "Build failed" + attempt=$((attempt + 1)) + echo "Retrying ${attempt}/${MAX_RETRY}..." + continue + fi + + echo "Successfully built Windows docker images" + exit 0 + done + + echo "Failed to build Windows docker images" + exit 1 +} + +while [[ $# -gt 0 ]]; do + case ${1} in + --build-type) + BUILD_TYPE="${2}" + shift + ;; + --platform) + PLATFORM="${2}" + shift + ;; + --project) + PROJECT="${2}" + shift + ;; + --max-retry) + MAX_RETRY="${2}" + shift + ;; + --timeout) + TIMEOUT="${2}" + shift + ;; + *) + echo "${1} is not supported." + exit 1 + ;; + esac + shift +done + +MAX_RETRY=${MAX_RETRY:-0} +TIMEOUT=${TIMEOUT:-180} # 3 hours F + +if [[ -z ${BUILD_TYPE} ]]; then + echo "No build type provided." + exit 1 +fi + +assume_role + +if [[ -z "${BUILD_TYPE:+x}" ]]; then + echo "No build type provided." + exit 1 +fi + +if [[ ${BUILD_TYPE} == "docker" ]]; then + if [[ -z "${PLATFORM:+x}" ]]; then + echo "When building Docker images, a platform must be specified." + exit 1 + fi + + if [[ ${PLATFORM} == "linux" ]]; then + build_linux_docker_images + elif [[ ${PLATFORM} == "windows" ]]; then + build_win_docker_images + fi + exit 0 +fi + +if [[ ${BUILD_TYPE} == "ci" ]]; then + if [[ -z "${PROJECT:+x}" ]]; then + echo "When building CI tests, a project name must be specified." + exit 1 + fi + + build_codebuild_ci_project "${PROJECT}" +fi diff --git a/tests/ci/cdk/pipeline/scripts/check_trigger_conditions.sh b/tests/ci/cdk/pipeline/scripts/check_trigger_conditions.sh new file mode 100755 index 0000000000..4fb9114bb6 --- /dev/null +++ b/tests/ci/cdk/pipeline/scripts/check_trigger_conditions.sh @@ -0,0 +1,124 @@ +#!/usr/bin/env bash +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 OR ISC + +set -exuo pipefail + +source util.sh + +NEED_REBUILD=${NEED_REBUILD:-0} +COMMIT_HASH=${COMMIT_HASH:-$CODEBUILD_RESOLVED_SOURCE_VERSION} + +LINUX_DOCKER_PATH="tests/ci/docker_images/(dependencies|linux)" +WINDOWS_DOCKER_PATH="tests/ci/docker_images/windows" +PIPELINE_PATH="tests/ci/cdk/pipeline" + +export CROSS_ACCOUNT_BUILD_ROLE_ARN="arn:aws:iam::${DEPLOY_ACCOUNT}:role/CrossAccountBuildRole" +export CROSS_ACCOUNT_BUILD_SESSION="pipeline-${COMMIT_HASH}" + +function check_pipeline_trigger_type() { + trigger_type=$(aws codepipeline get-pipeline-execution \ + --pipeline-name AwsLcCiPipeline \ + --pipeline-execution-id ${PIPELINE_EXECUTION_ID} \ + --query 'pipelineExecution.trigger.triggerType' \ + --output text) + + # unblock execution for self-mutation, weekly cron job, and manual start/forced deploy + if [[ "$trigger_type" == "StartPipelineExecution" || "$trigger_type" == "CloudWatchEvent" ]]; then + NEED_REBUILD=$((NEED_REBUILD + 1)) + fi +} + +function get_commit_changed_files() { + local file_paths + if [[ ${PLATFORM} == "linux" ]]; then + file_paths=("${LINUX_DOCKER_PATH}" "${PIPELINE_PATH}") + elif [[ ${PLATFORM} == "windows" ]]; then + file_paths=("${WINDOWS_DOCKER_PATH}" "${PIPELINE_PATH}") + fi + + changed_files=$(git diff-tree --no-commit-id --name-only -r "${COMMIT_HASH}") + + for file_path in "${file_paths[@]}"; do + if (echo "$changed_files" | grep -E "^${file_path}"); then + NEED_REBUILD=$((NEED_REBUILD + 1)) + break + fi + done +} + +function get_cfn_changeset() { + for stack in ${STACKS}; do + change_set_arn=$(aws cloudformation describe-stacks \ + --stack-name "${stack}" \ + --query "Stacks[0].ChangeSetId" \ + --output text) + changes_count=$(aws cloudformation describe-change-set \ + --change-set-name "${change_set_arn}" \ + --stack-name "${stack}" \ + --query "Changes" | jq 'length') + if [ "$changes_count" -gt 0 ]; then + NEED_REBUILD=$((NEED_REBUILD + 1)) + break + fi + done +} + +echo \"Environment variables:\" +env + +while [[ $# -gt 0 ]]; do + case ${1} in + --stacks) + STACKS="${2}" + shift + ;; + --build-type) + BUILD_TYPE="${2}" + shift + ;; + --platform) + PLATFORM="${2}" + shift + ;; + *) + echo "${1} is not supported." + exit 1 + ;; + esac + shift +done + +if [[ -z "${BUILD_TYPE:+x}" ]]; then + echo "No build type provided." + exit 1 +fi + +if [[ -z "${STACKS:+x}" ]]; then + echo "No stacks provided." + exit 1 +fi + +if [[ -n "${PREVIOUS_REBUILDS:-}" ]]; then + for previous_rebuild in ${PREVIOUS_REBUILDS}; do + NEED_REBUILD=$((NEED_REBUILD + previous_rebuild)) + done +fi + +if [[ ${BUILD_TYPE} == "docker" ]]; then + if [[ -z "${PLATFORM:+x}" ]]; then + echo "A platform must be specified" + exit 1 + fi + + check_pipeline_trigger_type + + assume_role + get_commit_changed_files + get_cfn_changeset +elif [[ ${BUILD_TYPE} == "ci" ]]; then + assume_role + get_cfn_changeset +fi + +echo "NEED_REBUILD=$NEED_REBUILD" diff --git a/tests/ci/cdk/pipeline/scripts/cleanup_orphaned_images.sh b/tests/ci/cdk/pipeline/scripts/cleanup_orphaned_images.sh new file mode 100755 index 0000000000..db1528a61e --- /dev/null +++ b/tests/ci/cdk/pipeline/scripts/cleanup_orphaned_images.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 OR ISC + +set -exuo pipefail + +source util.sh + +export CROSS_ACCOUNT_BUILD_ROLE_ARN="arn:aws:iam::${DEPLOY_ACCOUNT}:role/CrossAccountBuildRole" +export CROSS_ACCOUNT_BUILD_SESSION="pipeline-${CODEBUILD_RESOLVED_SOURCE_VERSION}" + +function remove_pending_images() { + local repo=${1} + + # List all images in the repository and filter the ones with any tag ending with '_pending' + image_details=$(aws ecr describe-images --repository-name "$repo" --query 'imageDetails[?length(imageTags) > `0` && imageTags[?ends_with(@, `_pending`)]].{ImageDigest:imageDigest,Tags:imageTags}' --output json) + + if [ -z "$image_details" ]; then + echo "No images found with tags ending in '_pending'." + exit 0 + fi + + # Loop through and delete each image by its digest + for image in $(echo "${image_details}" | jq -c '.[]'); do + image_digest=$(echo "$image" | jq -r '.ImageDigest') + tags=$(echo "$image" | jq -r '.Tags[]') + + for tag in $tags; do + if [[ "$tag" == *"_pending" ]]; then + new_tag="${tag%_pending}_latest" # Replace '_pending' with '_latest' + + if echo "${tags}" | grep -q "${new_tag}"; then + echo "Image with digest $image_digest is tagged as latest. Will only be removing pending tag..." + # Delete the pending tag + aws ecr batch-delete-image --repository-name "$repo" --image-ids imageTag="$tag" + else + echo "Deleting image with digest: $image_digest..." + + # Delete the image by its digest + aws ecr batch-delete-image --repository-name "$repo" --image-ids imageDigest="$image_digest" + fi + + if [ $? -eq 0 ]; then + echo "Image $image_digest with _pending tag removed successfully." + else + echo "Failed to cleanup image $image_digest." + fi + break + fi + done + done + + echo "Cleanup complete." +} + +while [[ $# -gt 0 ]]; do + case ${1} in + --repos) + REPOS="${2}" + shift + ;; + *) + echo "${1} is not supported." + exit 1 + ;; + esac + shift +done + +if [[ -z "${REPOS:+x}" ]]; then + echo "No build type provided." + exit 1 +fi + +assume_role + +for repo in ${REPOS}; do + remove_pending_images "${repo}" & +done + +wait diff --git a/tests/ci/cdk/pipeline/scripts/finalize_images.sh b/tests/ci/cdk/pipeline/scripts/finalize_images.sh new file mode 100755 index 0000000000..cca1466d97 --- /dev/null +++ b/tests/ci/cdk/pipeline/scripts/finalize_images.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 OR ISC + +set -exuo pipefail + +source util.sh + +export CROSS_ACCOUNT_BUILD_ROLE_ARN="arn:aws:iam::${DEPLOY_ACCOUNT}:role/CrossAccountBuildRole" +export CROSS_ACCOUNT_BUILD_SESSION="pipeline-${CODEBUILD_RESOLVED_SOURCE_VERSION}" + +function promote_pending_tags_to_latest() { + local repo=${1} + + # Get the list of images with tags ending in "_pending" + echo "Fetching images from repository '$repo'..." + + # List all images in the repository and filter the ones with any tag ending with '_pending' + image_details=$(aws ecr describe-images --repository-name "$repo" --query 'imageDetails[?length(imageTags) > `0` && imageTags[?ends_with(@, `_pending`)]].{ImageDigest:imageDigest,Tags:imageTags}' --output json) + + if [ -z "$image_details" ]; then + echo "No images found with tags ending in '_pending'." + exit 0 + fi + + # Loop through each image and update the tags + for image in $(echo "${image_details}" | jq -c '.[]'); do + image_digest=$(echo "$image" | jq -r '.ImageDigest') + tags=$(echo "$image" | jq -r '.Tags[]') + + # Check if any tag ends with '_pending' + for tag in $tags; do + if [[ "$tag" == *"_pending" ]]; then + new_tag="${tag%_pending}_latest" # Replace '_pending' with '_latest' + + if echo "${tags}" | grep -q "${new_tag}"; then + echo "Image with digest $image_digest already has tag '$new_tag' - skipping tag update" + # Delete the pending tag + aws ecr batch-delete-image --repository-name "$repo" --image-ids imageTag="$tag" + break + else + echo "Updating tag '$tag' to '$new_tag' for image with digest: $image_digest" + + # Get the image manifest using the image digest + image_manifest=$(aws ecr batch-get-image --repository-name "$repo" --image-ids imageDigest="$image_digest" --query 'images[0].imageManifest' --output text) + + # Push the new tag using batch-put-image + aws ecr put-image --repository-name "$repo" --image-manifest "$image_manifest" --image-tag "$new_tag" + aws ecr batch-delete-image --repository-name "$repo" --image-ids imageTag="$tag" + fi + + if [ $? -eq 0 ]; then + echo "Successfully updated tag '$tag' to '$new_tag'." + else + echo "Failed to update tag '$tag' to '$new_tag'." + fi + break + fi + done + done + + echo "Tag update complete." +} + +while [[ $# -gt 0 ]]; do + case ${1} in + --repos) + REPOS="${2}" + shift + ;; + *) + echo "${1} is not supported." + exit 1 + ;; + esac + shift +done + +if [[ -z "${REPOS:+x}" ]]; then + echo "No build type provided." + exit 1 +fi + +assume_role + +for repo in ${REPOS}; do + promote_pending_tags_to_latest "${repo}" & +done + +wait diff --git a/tests/ci/cdk/pipeline/scripts/util.sh b/tests/ci/cdk/pipeline/scripts/util.sh new file mode 100755 index 0000000000..be96fb0e5c --- /dev/null +++ b/tests/ci/cdk/pipeline/scripts/util.sh @@ -0,0 +1,157 @@ +#!/usr/bin/env bash +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 OR ISC + +set -ex + +if [[ -z "${PIPELINE_EXECUTION_ID:+x}" ]]; then + TRIGGER_TYPE="manual" +else + TRIGGER_TYPE="pipeline" +fi + +function assume_role() { + set +x + if [[ -z ${CROSS_ACCOUNT_BUILD_ROLE_ARN} ]]; then + echo "No role arn provided" + return 1 + fi + + local session_name=${CROSS_ACCOUNT_BUILD_SESSION:-"build-session"} + CREDENTIALS=$(aws sts assume-role --role-arn "${CROSS_ACCOUNT_BUILD_ROLE_ARN}" --role-session-name "${session_name}") + export AWS_ACCESS_KEY_ID=$(echo $CREDENTIALS | jq -r .Credentials.AccessKeyId) + export AWS_SECRET_ACCESS_KEY=$(echo $CREDENTIALS | jq -r .Credentials.SecretAccessKey) + export AWS_SESSION_TOKEN=$(echo $CREDENTIALS | jq -r .Credentials.SessionToken) + set -x +} + +function refresh_session() { + unset AWS_ACCESS_KEY_ID + unset AWS_SECRET_ACCESS_KEY + unset AWS_SESSION_TOKEN + + if [[ -z "${PIPELINE_EXECUTION_ID:+x}" ]]; then + echo "Security token expired. Please monitor build progress on the console" + exit 1 + fi + + assume_role +} + +function start_codebuild_project() { + local project=${1} + local commit_hash=${2:-main} + + if [[ -z ${project} ]]; then + echo "No project name provided." + exit 1 + fi + + # https://awscli.amazonaws.com/v2/documentation/api/latest/reference/codebuild/start-build-batch.html + build_id=$(aws codebuild start-build-batch --project-name ${project} \ + --source-version ${commit_hash} \ + --environment-variables-override "name=TRIGGER_TYPE,value=${TRIGGER_TYPE},type=PLAINTEXT" \ + --query "buildBatch.id" \ + --output text) + export BUILD_BATCH_ID=${build_id} +} + +function retry_batch_build() { + aws codebuild retry-build-batch --id "${BUILD_BATCH_ID}" \ + --retry-type RETRY_FAILED_BUILDS +} + +function codebuild_build_status_check() { + # Every 5 min, this function checks if the linux docker image batch code build finished successfully. + # Normally, docker img build can take up to 1 hour. By default, we wait up to 30 * 5 min. + local timeout=${1:-180} + local status_check_max=$((timeout / 5)) + for i in $(seq 1 ${status_check_max}); do + # https://docs.aws.amazon.com/cli/latest/reference/codebuild/batch-get-build-batches.html + build_batch_status=$(aws codebuild batch-get-build-batches --ids "${BUILD_BATCH_ID}" \ + --query "buildBatches[0].buildBatchStatus" \ + --output text 2>&1) + if [[ ${build_batch_status} == "SUCCEEDED" ]]; then + echo "Build ${BUILD_BATCH_ID} finished successfully." + return 0 + elif [[ ${build_batch_status} == "FAILED" ]]; then + echo "Build ${BUILD_BATCH_ID} failed." + return 1 + elif [[ ${build_batch_status} == "IN_PROGRESS" ]]; then + echo "${i}: Wait 5 min for build job finish." + sleep 300 + elif echo "${build_batch_status}" | grep -q "ExpiredTokenException"; then + refresh_session + else + echo "Build ${BUILD_BATCH_ID} returns: ${build_batch_status}. Exiting..." + return 1 + fi + done + echo "Build ${BUILD_BATCH_ID} takes more time than expected." + return 1 +} + +function start_windows_img_build() { + # EC2 takes several minutes to be ready for running command. + echo "Wait 3 min for EC2 ready for SSM command execution." + sleep 180 + + # Run commands on windows EC2 instance to build windows docker images. + for i in {1..60}; do + instance_id=$(aws ec2 describe-instances \ + --filters "Name=tag:${WIN_EC2_TAG_KEY},Values=${WIN_EC2_TAG_VALUE}" | jq -r '.Reservations[0].Instances[0].InstanceId') + if [[ "${instance_id}" == "null" ]]; then + sleep 60 + continue + fi + instance_ping_status=$(aws ssm describe-instance-information \ + --filters "Key=InstanceIds,Values=${instance_id}" | jq -r '.InstanceInformationList[0].PingStatus') + if [[ "${instance_ping_status}" == "Online" ]]; then + # https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ssm/send-command.html + command_id=$(aws ssm send-command \ + --instance-ids "${instance_id}" \ + --document-name "${WIN_DOCKER_BUILD_SSM_DOCUMENT}" \ + --output-s3-bucket-name "${S3_FOR_WIN_DOCKER_IMG_BUILD}" \ + --output-s3-key-prefix 'runcommand' \ + --parameters "TriggerType=[\"${TRIGGER_TYPE}\"]" | + jq -r '.Command.CommandId') + # Export for checking command run status. + export WINDOWS_DOCKER_IMG_BUILD_COMMAND_ID="${command_id}" + echo "Windows ec2 is executing SSM command." + return 0 + else + echo "${i}: Current instance ping status: ${instance_ping_status}. Wait 1 minute to retry SSM command execution." + sleep 60 + fi + done + echo "After 60 minutes, Windows ec2 is still not ready for SSM commands execution. Exit." + return 1 +} + +function win_docker_img_build_status_check() { + # Every 5 min, this function checks if the windows docker image build is finished successfully. + # Normally, docker img build can take up to 1 hour. + local timeout=${1:-150} + local status_check_max=$((timeout / 5)) + for i in $(seq 1 ${status_check_max}); do + # https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ssm/list-commands.html + command_run_status=$(aws ssm list-commands --command-id "${WINDOWS_DOCKER_IMG_BUILD_COMMAND_ID}" | jq -r '.Commands[0].Status') + if [[ ${command_run_status} == "Success" ]]; then + echo "SSM command ${WINDOWS_DOCKER_IMG_BUILD_COMMAND_ID} finished successfully." + return 0 + elif [[ ${command_run_status} == "Failed" ]]; then + echo "SSM command ${WINDOWS_DOCKER_IMG_BUILD_COMMAND_ID} failed." + return 1 + elif [[ ${command_run_status} == "InProgress" ]]; then + echo "${i}: Wait 5 min for build job finish." + sleep 300 + elif echo "${command_run_status}" | grep -q "ExpiredTokenException"; then + refresh_session + else + echo "SSM commands ${WINDOWS_DOCKER_IMG_BUILD_COMMAND_ID} returns: ${command_run_status}. Exiting..." + return 1 + fi + done + echo "SSM command ${WINDOWS_DOCKER_IMG_BUILD_COMMAND_ID} takes more time than expected." + return 1 +} diff --git a/tests/ci/cdk/pipeline/setup_stage.py b/tests/ci/cdk/pipeline/setup_stage.py new file mode 100644 index 0000000000..b7fb613b88 --- /dev/null +++ b/tests/ci/cdk/pipeline/setup_stage.py @@ -0,0 +1,94 @@ +import typing + +from aws_cdk import ( + Stage, + Environment, + Stack, + aws_iam as iam, +) +from constructs import Construct + + +class SetupStage(Stage): + """Define a stack of IAM role to allow cross-account deployment""" + + def __init__( + self, + scope: Construct, + id: str, + pipeline_environment: typing.Union[Environment, typing.Dict[str, typing.Any]], + deploy_environment: typing.Union[Environment, typing.Dict[str, typing.Any]], + **kwargs, + ): + super().__init__( + scope, + id, + env=pipeline_environment, + **kwargs, + ) + + self.setup_stack = SetupStack( + self, + "aws-lc-ci-pipeline-setup", + pipeline_environment=pipeline_environment, + deploy_environment=deploy_environment, + stack_name="aws-lc-ci-pipeline-setup", + **kwargs, + ) + + @property + def stacks(self): + return [child for child in self.node.children if isinstance(child, Stack)] + + +class SetupStack(Stack): + """Define a stack of IAM role to allow cross-account deployment""" + + def __init__( + self, + scope: Construct, + id: str, + pipeline_environment: typing.Union[Environment, typing.Dict[str, typing.Any]], + deploy_environment: typing.Union[Environment, typing.Dict[str, typing.Any]], + **kwargs, + ) -> None: + super().__init__(scope, id, env=deploy_environment, **kwargs) + + cross_account_role = iam.Role( + self, + "CrossAccountBuildRole", + role_name="CrossAccountBuildRole", + assumed_by=iam.ArnPrincipal( + f"arn:aws:iam::{pipeline_environment.account}:role/CrossAccountPipelineRole" + ), + ) + + # Grant access to all CodeBuild projects + cross_account_role.add_to_policy( + iam.PolicyStatement( + effect=iam.Effect.ALLOW, + actions=["codebuild:*"], + resources=[ + f"arn:aws:codebuild:{deploy_environment.region}:{deploy_environment.account}:project/aws-lc-*" + ], + ) + ) + + cross_account_role.add_to_policy( + iam.PolicyStatement( + effect=iam.Effect.ALLOW, + actions=[ + "cloudformation:DescribeChangeSet", + "cloudformation:DescribeStacks", + "ec2:DescribeInstances", + "ssm:DescribeInstanceInformation", + "ssm:SendCommand", + "ssm:ListCommands", + "ecr:DescribeImages", + "ecr:BatchGetImage", + "ecr:PutImage", + "ecr:BatchDeleteImage", + ], + resources=["*"], + ) + ) diff --git a/tests/ci/cdk/pipeline/windows_docker_image_build_stage.py b/tests/ci/cdk/pipeline/windows_docker_image_build_stage.py new file mode 100644 index 0000000000..a4bcdde0c8 --- /dev/null +++ b/tests/ci/cdk/pipeline/windows_docker_image_build_stage.py @@ -0,0 +1,102 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 OR ISC +import typing + +from aws_cdk import Stage, Environment, Stack, Duration, aws_iam as iam, pipelines +from aws_cdk.pipelines import CodeBuildStep +from constructs import Construct + +from cdk.ecr_stack import EcrStack +from cdk.windows_docker_image_build_stack import WindowsDockerImageBuildStack +from util.metadata import ( + WINDOWS_X86_ECR_REPO, + WIN_EC2_TAG_KEY, + WIN_EC2_TAG_VALUE, + SSM_DOCUMENT_NAME, +) + + +class WindowsDockerImageBuildStage(Stage): + def __init__( + self, + scope: Construct, + id: str, + pipeline_environment: typing.Union[Environment, typing.Dict[str, typing.Any]], + deploy_environment: typing.Union[Environment, typing.Dict[str, typing.Any]], + **kwargs + ): + super().__init__( + scope, + id, + env=pipeline_environment, + **kwargs, + ) + + self.ecr_windows_x86 = EcrStack( + self, + "aws-lc-ecr-windows-x86", + WINDOWS_X86_ECR_REPO, + env=deploy_environment, + stack_name="aws-lc-ecr-windows-x86", + ) + + self.windows_docker_build_stack = WindowsDockerImageBuildStack( + self, + "aws-lc-docker-image-build-windows", + env=deploy_environment, + stack_name="aws-lc-docker-image-build-windows", + ) + self.windows_docker_build_stack.add_dependency(self.ecr_windows_x86) + + self.ecr_repo_names = [WINDOWS_X86_ECR_REPO] + self.s3_bucket_name = self.windows_docker_build_stack.output["s3_bucket_name"] + + self.need_rebuild = None + + @property + def stacks(self) -> typing.List[Stack]: + return [child for child in self.node.children if isinstance(child, Stack)] + + def add_stage_to_wave( + self, + wave: pipelines.Wave, + input: pipelines.FileSet, + role: iam.Role, + max_retry: typing.Optional[int] = 2, + additional_stacks: typing.Optional[typing.List[str]] = None, + env: typing.Optional[typing.Mapping[str, str]] = None, + ): + stacks = self.stacks + (additional_stacks if additional_stacks else []) + stack_names = [stack.stack_name for stack in stacks] + + env = env if env else {} + timeout = (max_retry + 1) * 120 + + docker_build_step = CodeBuildStep( + "StartWait", + input=input, + commands=[ + "cd tests/ci/cdk/pipeline/scripts", + './cleanup_orphaned_images.sh --repos "${ECR_REPOS}"', + 'trigger_conditions=$(./check_trigger_conditions.sh --build-type docker --platform windows --stacks "${STACKS}")', + "export NEED_REBUILD=$(echo $trigger_conditions | sed -n -e 's/.*\(NEED_REBUILD=[0-9]*\).*/\\1/p' | cut -d'=' -f2 )", + "./build_target.sh --build-type docker --platform windows --max-retry ${MAX_RETRY} --timeout ${TIMEOUT}", + ], + env={ + **env, + "STACKS": " ".join(stack_names), + "ECR_REPOS": " ".join(self.ecr_repo_names), + "MAX_RETRY": str(max_retry), + "TIMEOUT": str(timeout), + "WIN_EC2_TAG_KEY": WIN_EC2_TAG_KEY, + "WIN_EC2_TAG_VALUE": WIN_EC2_TAG_VALUE, + "WIN_DOCKER_BUILD_SSM_DOCUMENT": SSM_DOCUMENT_NAME, + "S3_FOR_WIN_DOCKER_IMG_BUILD": self.s3_bucket_name, + }, + role=role, + timeout=Duration.minutes(timeout), + ) + + wave.add_stage(self, post=[docker_build_step]) + + self.need_rebuild = docker_build_step.exported_variable("NEED_REBUILD") diff --git a/tests/ci/cdk/run-cdk.sh b/tests/ci/cdk/run-cdk.sh index 983c7beef4..9794ad4179 100755 --- a/tests/ci/cdk/run-cdk.sh +++ b/tests/ci/cdk/run-cdk.sh @@ -4,6 +4,8 @@ set -exuo pipefail +source pipeline/scripts/util.sh + # -e: Exit on any failure # -x: Print the command before running # -u: Any variable that is not set will cause an error if used @@ -14,11 +16,7 @@ function delete_s3_buckets() { aws s3api list-buckets --query "Buckets[].Name" | jq '.[]' | while read -r i; do bucket_name=$(echo "${i}" | tr -d '"') # Delete the bucket if its name uses AWS_LC_S3_BUCKET_PREFIX. - if [[ "${bucket_name}" == *"${AWS_LC_S3_BUCKET_PREFIX}"* ]]; then - aws s3 rm "s3://${bucket_name}" --recursive - aws s3api delete-bucket --bucket "${bucket_name}" - # Delete bm-framework buckets if we're not on the team account - elif [[ "${CDK_DEPLOY_ACCOUNT}" != "620771051181" ]] && [[ "${bucket_name}" == *"${aws-lc-ci-bm-framework}"* ]]; then + if [[ "${bucket_name}" == *"${S3_FOR_WIN_DOCKER_IMG_BUILD}"* ]]; then aws s3 rm "s3://${bucket_name}" --recursive aws s3api delete-bucket --bucket "${bucket_name}" fi @@ -39,7 +37,7 @@ function delete_container_repositories() { } function destroy_ci() { - if [[ "${CDK_DEPLOY_ACCOUNT}" == "620771051181" ]]; then + if [[ "${DEPLOY_ACCOUNT}" == "620771051181" || "${DEPLOY_ACCOUNT}" == "351119683581" ]]; then echo "destroy_ci should not be executed on team account." exit 1 fi @@ -62,8 +60,6 @@ function destroy_docker_img_build_stack() { } function create_linux_docker_img_build_stack() { - # Clean up build stacks if exists. - destroy_docker_img_build_stack # Deploy aws-lc ci stacks. # When repeatedly deploy, error 'EIP failed Reason: Maximum number of addresses has been reached' can happen. # @@ -74,8 +70,6 @@ function create_linux_docker_img_build_stack() { } function create_win_docker_img_build_stack() { - # Clean up build stacks if exists. - destroy_docker_img_build_stack # Deploy aws-lc ci stacks. # When repeatedly deploy, error 'EIP failed Reason: Maximum number of addresses has been reached' can happen. # @@ -97,8 +91,8 @@ function run_linux_img_build() { function run_windows_img_build() { # EC2 takes several minutes to be ready for running command. - echo "Wait 3 min for EC2 ready for SSM command execution." - sleep 180 +# echo "Wait 3 min for EC2 ready for SSM command execution." +# sleep 180 # Run commands on windows EC2 instance to build windows docker images. for i in {1..60}; do @@ -116,7 +110,9 @@ function run_windows_img_build() { --instance-ids "${instance_id}" \ --document-name "${WIN_DOCKER_BUILD_SSM_DOCUMENT}" \ --output-s3-bucket-name "${S3_FOR_WIN_DOCKER_IMG_BUILD}" \ - --output-s3-key-prefix 'runcommand' | jq -r '.Command.CommandId') + --output-s3-key-prefix 'runcommand' \ + --parameters "TriggerType=[\"manual\"]" | \ + jq -r '.Command.CommandId') # Export for checking command run status. export WINDOWS_DOCKER_IMG_BUILD_COMMAND_ID="${command_id}" echo "Windows ec2 is executing SSM command." @@ -177,9 +173,6 @@ function win_docker_img_build_status_check() { } function build_linux_docker_images() { - # Always destroy docker build stacks (which include EC2 instance) on EXIT. - trap destroy_docker_img_build_stack EXIT - # Create/update aws-ecr repo. cdk deploy 'aws-lc-ecr-linux-*' --require-approval never @@ -195,15 +188,17 @@ function build_linux_docker_images() { } function build_win_docker_images() { - # Always destroy docker build stacks (which include EC2 instance) on EXIT. - trap destroy_docker_img_build_stack EXIT - # Create/update aws-ecr repo. cdk deploy 'aws-lc-ecr-windows-*' --require-approval never # Create aws windows build stack create_win_docker_img_build_stack + S3_FOR_WIN_DOCKER_IMG_BUILD=$(aws cloudformation describe-stack-resources \ + --stack-name aws-lc-docker-image-build-windows \ + --query "StackResources[?ResourceType=='AWS::S3::Bucket'].PhysicalResourceId" \ + --output text) + echo "Executing AWS SSM commands to build Windows docker images." run_windows_img_build @@ -220,6 +215,33 @@ function setup_ci() { create_android_resources } +function deploy_production_pipeline() { + cdk deploy AwsLcCiPipeline --require-approval never +} + +function deploy_dev_pipeline() { + if [[ -z "${DEPLOY_ACCOUNT:+x}" ]]; then + echo "The pipeline needs a deployment acount to know where to deploy the CI to." + exit 1 + fi + + if [[ ${DEPLOY_ACCOUNT} == '620771051181' ]]; then + echo "Dev pipeline cannot deploy to production account." + exit 1 + fi + + if [[ -z "${PIPELINE_ACCOUNT:+x}" ]]; then + export PIPELINE_ACCOUNT=${DEPLOY_ACCOUNT} + fi + + if [[ ${PIPELINE_ACCOUNT} == '774305600158' ]]; then + echo "Cannot deploy. The production pipeline is hosted with the same name in this pipeline account." + exit 1 + fi + + cdk deploy AwsLcCiPipeline --require-approval never +} + function create_android_resources() { # Use aws cli to create Device Farm project and get project arn to create device pools. # TODO: Move resource creation to aws cdk when cdk has support for device form resource constructs. @@ -264,8 +286,8 @@ For aws-lc continuous integration setup, this script uses aws cli to build some Options: --help Displays this help - --aws-account AWS account for CDK deploy/destroy. Default to '620771051181'. - --aws-region AWS region for AWS resources creation. Default to 'us-west-2'. + --deploy-account AWS account for CDK deploy/destroy. Default to '620771051181'. + --deploy-region AWS region for AWS resources creation. Default to 'us-west-2'. --github-repo-owner GitHub repository owner. Default to 'aws'. --github-source-version GitHub source version. Default to 'main'. --action Required. The value can be @@ -280,38 +302,41 @@ Options: 'diff': compares the specified stack with the deployed stack. 'synth': synthesizes and prints the CloudFormation template for the stacks. 'bootstrap': Bootstraps the CDK stack. This is needed before deployment or updating the CI. + 'invoke': invoke a custom command. Provide the custom command through '--command ' + --command Custom command to invoke. Required for '--action invoke'. EOF } function export_global_variables() { # If these variables are not set or empty, defaults are export. - if [[ -z "${CDK_DEPLOY_ACCOUNT+x}" || -z "${CDK_DEPLOY_ACCOUNT}" ]]; then - export CDK_DEPLOY_ACCOUNT='620771051181' + if [[ -z "${DEPLOY_ACCOUNT:+x}" ]]; then + export DEPLOY_ACCOUNT='620771051181' fi - if [[ -z "${CDK_DEPLOY_REGION+x}" || -z "${CDK_DEPLOY_REGION}" ]]; then - export CDK_DEPLOY_REGION='us-west-2' - export AWS_DEFAULT_REGION="${CDK_DEPLOY_REGION}" + if [[ -z "${DEPLOY_REGION:+x}" ]]; then + export DEPLOY_REGION='us-west-2' + export AWS_DEFAULT_REGION="${DEPLOY_REGION}" fi - if [[ -z "${GITHUB_REPO_OWNER+x}" || -z "${GITHUB_REPO_OWNER}" ]]; then + if [[ -z "${GITHUB_REPO_OWNER:+x}" ]]; then export GITHUB_REPO_OWNER='aws' fi - if [[ -z "${GITHUB_SOURCE_VERSION+x}" || -z "${GITHUB_SOURCE_VERSION}" ]]; then + if [[ -z "${GITHUB_SOURCE_VERSION:+x}" ]]; then export GITHUB_SOURCE_VERSION='main' fi # Other variables for managing resources. - DATE_NOW="$(date +%Y-%m-%d-%H-%M)" - export GITHUB_REPO='aws-lc' +# DATE_NOW="$(date +%Y-%m-%d-%H-%M)" + export GITHUB_REPO_NAME='aws-lc' export ECR_LINUX_AARCH_REPO_NAME='aws-lc-docker-images-linux-aarch' export ECR_LINUX_X86_REPO_NAME='aws-lc-docker-images-linux-x86' export ECR_WINDOWS_X86_REPO_NAME='aws-lc-docker-images-windows-x86' export AWS_LC_S3_BUCKET_PREFIX='aws-lc-windows-docker-image-build-s3' - export S3_FOR_WIN_DOCKER_IMG_BUILD="${AWS_LC_S3_BUCKET_PREFIX}-${DATE_NOW}" export WIN_EC2_TAG_KEY='aws-lc' - export WIN_EC2_TAG_VALUE="aws-lc-windows-docker-image-build-${DATE_NOW}" - export WIN_DOCKER_BUILD_SSM_DOCUMENT="windows-ssm-document-${DATE_NOW}" + export WIN_EC2_TAG_VALUE='aws-lc-windows-docker-image-build' + export WIN_DOCKER_BUILD_SSM_DOCUMENT='AWSLC-BuildWindowsDockerImages' + export S3_FOR_WIN_DOCKER_IMG_BUILD='aws-lc-windows-docker-image-build-s3' + export MAX_TEST_RETRY=2 export IMG_BUILD_STATUS='unknown' - # 620771051181 is AWS-LC team AWS account. - if [[ "${CDK_DEPLOY_ACCOUNT}" != "620771051181" ]] && [[ "${GITHUB_REPO_OWNER}" == 'aws' ]]; then + # 620771051181 and 351119683581 is AWS-LC team AWS account. + if [[ "${DEPLOY_ACCOUNT}" != "620771051181" && "${DEPLOY_ACCOUNT}" != '351119683581' ]] && [[ "${GITHUB_REPO_OWNER}" == 'aws' ]]; then echo "Only team account is allowed to create CI stacks on aws repo." exit 1 fi @@ -325,13 +350,21 @@ function main() { script_helper exit 0 ;; - --aws-account) - export CDK_DEPLOY_ACCOUNT="${2}" + --deploy-account) + export DEPLOY_ACCOUNT="${2}" + shift + ;; + --deploy-region) + export DEPLOY_REGION="${2}" + export AWS_DEFAULT_REGION="${DEPLOY_REGION}" + shift + ;; + --pipeline-account) + export PIPELINE_ACCOUNT="${2}" shift ;; - --aws-region) - export CDK_DEPLOY_REGION="${2}" - export AWS_DEFAULT_REGION="${CDK_DEPLOY_REGION}" + --pipeline-region) + export PIPELINE_REGION="${2}" shift ;; --github-repo-owner) @@ -346,6 +379,10 @@ function main() { export ACTION="${2}" shift ;; + --command) + COMMAND="${2}" + shift + ;; *) echo "${1} is not supported." exit 1 @@ -356,7 +393,7 @@ function main() { done # Make sure action is set. - if [[ -z "${ACTION+x}" || -z "${ACTION}" ]]; then + if [[ -z "${ACTION:+x}" ]]; then echo "${ACTION} is required input." exit 1 fi @@ -366,6 +403,14 @@ function main() { # Execute the action. case ${ACTION} in + deploy-production-pipeline) + export IS_DEV="False" + deploy_production_pipeline + ;; + deploy-dev-pipeline) + export IS_DEV="True" + deploy_dev_pipeline + ;; deploy-ci) setup_ci ;; @@ -388,7 +433,7 @@ function main() { build_win_docker_images ;; synth) - cdk synth 'aws-lc-ci-*' + cdk synth '*' ;; diff) cdk diff aws-lc-ci-* @@ -396,6 +441,13 @@ function main() { bootstrap) cdk bootstrap ;; + invoke) + if [[ -z "${COMMAND:+x}" ]]; then + echo "--action invoke requires a command." + exit 1 + fi + ${COMMAND:?} + ;; *) echo "--action is required. Use '--help' to see allowed actions." exit 1 diff --git a/tests/ci/cdk/util/build_spec_loader.py b/tests/ci/cdk/util/build_spec_loader.py index 7ce522e4d9..bd65e1e4ac 100644 --- a/tests/ci/cdk/util/build_spec_loader.py +++ b/tests/ci/cdk/util/build_spec_loader.py @@ -3,8 +3,8 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 OR ISC -from aws_cdk import aws_codebuild as codebuild, aws_s3_assets -from util.metadata import CAN_AUTOLOAD, TEAM_ACCOUNT, AWS_ACCOUNT, DEFAULT_REGION, AWS_REGION +from aws_cdk import aws_codebuild as codebuild +from util.metadata import PROD_ACCOUNT, PROD_REGION import tempfile @@ -12,25 +12,30 @@ class BuildSpecLoader(object): """Responsible for loading the BuildSpec yml file as python object.""" @staticmethod - def load(file_path): + def load(file_path, env): """ Used to load yml file and replace some placeholders if needed. :param file_path: path to the yml file. :return: python object. """ + # Indicate if the BuildSpec files can be automatically loaded without manual deployment. + can_autoload = (env.account == PROD_ACCOUNT) and (env.region == PROD_REGION) + # If the deployment uses team account, the change of batch BuildSpec file is loaded automatically without deployment. # else, the change will require manual deployment via CDK command. - if CAN_AUTOLOAD: - return codebuild.BuildSpec.from_source_filename("tests/ci/cdk/{}".format(file_path)) + if can_autoload: + return codebuild.BuildSpec.from_source_filename( + "tests/ci/cdk/{}".format(file_path) + ) # TODO(CryptoAlg-1276): remove below when the batch BuildSpec supports the env variable of account and region. placeholder_map = { - TEAM_ACCOUNT: AWS_ACCOUNT, - DEFAULT_REGION: AWS_REGION, + PROD_ACCOUNT: env.account, + PROD_REGION: env.region, } with open(file_path) as original_file: file_text = original_file.read() for key in placeholder_map.keys(): file_text = file_text.replace(key, placeholder_map[key]) - with tempfile.NamedTemporaryFile(mode='w+', delete=False) as temp_file: + with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp_file: temp_file.write(file_text) return codebuild.BuildSpec.from_asset(temp_file.name) diff --git a/tests/ci/cdk/util/devicefarm_util.py b/tests/ci/cdk/util/devicefarm_util.py index 372ecedf12..595e5e9059 100644 --- a/tests/ci/cdk/util/devicefarm_util.py +++ b/tests/ci/cdk/util/devicefarm_util.py @@ -11,4 +11,4 @@ DEVICEFARM_PROJECT = EnvUtil.get("DEVICEFARM_PROJECT", None) DEVICEFARM_DEVICE_POOL = EnvUtil.get("DEVICEFARM_DEVICE_POOL", None) ANDROID_TEST_NAME = EnvUtil.get("ANDROID_TEST_NAME", "AWS-LC Android Test") -AWS_REGION = EnvUtil.get("AWS_REGION", None) \ No newline at end of file +AWS_REGION = EnvUtil.get("AWS_REGION", None) diff --git a/tests/ci/cdk/util/ecr_util.py b/tests/ci/cdk/util/ecr_util.py index 2dfd3d521d..88361a1ce3 100644 --- a/tests/ci/cdk/util/ecr_util.py +++ b/tests/ci/cdk/util/ecr_util.py @@ -1,5 +1,4 @@ -from util.metadata import AWS_ACCOUNT, AWS_REGION - - -def ecr_arn(ecr_repo_name): - return "{}.dkr.ecr.{}.amazonaws.com/{}".format(AWS_ACCOUNT, AWS_REGION, ecr_repo_name) +def ecr_arn(ecr_repo_name, env): + return "{}.dkr.ecr.{}.amazonaws.com/{}".format( + env.account, env.region, ecr_repo_name + ) diff --git a/tests/ci/cdk/util/env_util.py b/tests/ci/cdk/util/env_util.py index 700b0569d4..e2a7c832c9 100644 --- a/tests/ci/cdk/util/env_util.py +++ b/tests/ci/cdk/util/env_util.py @@ -11,11 +11,15 @@ class EnvUtil(object): """An util helps get environment variable.""" @staticmethod - def get(key, defalut_value: typing.Optional[str] = None): + def get(key, default_value: typing.Optional[str] = None): val = os.environ.get(key) if val is None: - val = defalut_value + val = default_value if val is None: raise ValueError("{} env variable is not set.".format(key)) else: return val + + @staticmethod + def get_optional(key): + return os.environ.get(key) diff --git a/tests/ci/cdk/util/iam_policies.py b/tests/ci/cdk/util/iam_policies.py index 2558fff73a..d52150bb25 100644 --- a/tests/ci/cdk/util/iam_policies.py +++ b/tests/ci/cdk/util/iam_policies.py @@ -3,9 +3,10 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 OR ISC -from util.metadata import AWS_REGION, AWS_ACCOUNT -def ec2_policies_in_json(ec2_role_name, ec2_security_group_id, ec2_subnet_id, ec2_vpc_id): +def ec2_policies_in_json( + ec2_role_name, ec2_security_group_id, ec2_subnet_id, ec2_vpc_id, env +): """ Define an IAM policy that gives permissions for starting, stopping, and getting details of EC2 instances and their Vpcs :return: an IAM policy statement in json. @@ -23,20 +24,29 @@ def ec2_policies_in_json(ec2_role_name, ec2_security_group_id, ec2_subnet_id, ec "ec2:DescribeInstances", ], "Resource": [ - "arn:aws:iam::{}:role/{}".format(AWS_ACCOUNT, ec2_role_name), - "arn:aws:ec2:{}:{}:instance/*".format(AWS_REGION, AWS_ACCOUNT), - "arn:aws:ec2:{}::image/*".format(AWS_REGION), - "arn:aws:ec2:{}:{}:network-interface/*".format(AWS_REGION, AWS_ACCOUNT), - "arn:aws:ec2:{}:{}:volume/*".format(AWS_REGION, AWS_ACCOUNT), - "arn:aws:ec2:{}:{}:security-group/{}".format(AWS_REGION, AWS_ACCOUNT, ec2_security_group_id), - "arn:aws:ec2:{}:{}:subnet/{}".format(AWS_REGION, AWS_ACCOUNT, ec2_subnet_id), - "arn:aws:ec2:{}:{}:vpc/{}".format(AWS_REGION, AWS_ACCOUNT, ec2_vpc_id), - ] - }] + "arn:aws:iam::{}:role/{}".format(env.account, ec2_role_name), + "arn:aws:ec2:{}:{}:instance/*".format(env.region, env.account), + "arn:aws:ec2:{}::image/*".format(env.region), + "arn:aws:ec2:{}:{}:network-interface/*".format( + env.region, env.account + ), + "arn:aws:ec2:{}:{}:volume/*".format(env.region, env.account), + "arn:aws:ec2:{}:{}:security-group/{}".format( + env.region, env.account, ec2_security_group_id + ), + "arn:aws:ec2:{}:{}:subnet/{}".format( + env.region, env.account, ec2_subnet_id + ), + "arn:aws:ec2:{}:{}:vpc/{}".format( + env.region, env.account, ec2_vpc_id + ), + ], + } + ], } -def ssm_policies_in_json(): +def ssm_policies_in_json(env): """ Define an IAM policy that gives permissions to creating documents and running commands. :return: an IAM policy statement in json. @@ -51,17 +61,21 @@ def ssm_policies_in_json(): "ssm:CreateDocument", "ssm:DeleteDocument", "ssm:ListCommands", - "ssm:DescribeInstanceInformation" + "ssm:DescribeInstanceInformation", ], "Resource": [ - "arn:aws:ec2:{}:{}:instance/*".format(AWS_REGION, AWS_ACCOUNT), # Needed for ssm:SendCommand - "arn:aws:ssm:{}:{}:*".format(AWS_REGION, AWS_ACCOUNT), - "arn:aws:ssm:{}:{}:document/*".format(AWS_REGION, AWS_ACCOUNT), - ] - }] + "arn:aws:ec2:{}:{}:instance/*".format( + env.region, env.account + ), # Needed for ssm:SendCommand + "arn:aws:ssm:{}:{}:*".format(env.region, env.account), + "arn:aws:ssm:{}:{}:document/*".format(env.region, env.account), + ], + } + ], } -def code_build_batch_policy_in_json(project_ids): + +def code_build_batch_policy_in_json(project_ids, env): """ Define an IAM policy statement for CodeBuild batch operation. :param project_ids: a list of CodeBuild project id. @@ -69,7 +83,11 @@ def code_build_batch_policy_in_json(project_ids): """ resources = [] for project_id in project_ids: - resources.append("arn:aws:codebuild:{}:{}:project/{}*".format(AWS_REGION, AWS_ACCOUNT, project_id)) + resources.append( + "arn:aws:codebuild:{}:{}:project/{}*".format( + env.region, env.account, project_id + ) + ) return { "Version": "2012-10-17", "Statement": [ @@ -78,13 +96,14 @@ def code_build_batch_policy_in_json(project_ids): "Action": [ "codebuild:StartBuild", "codebuild:StopBuild", - "codebuild:RetryBuild" + "codebuild:RetryBuild", ], - "Resource": resources + "Resource": resources, } - ] + ], } + def code_build_cloudwatch_logs_policy_in_json(log_groups): """ Define an IAM policy statement for CloudWatch logs associated with CodeBuild projects. @@ -97,17 +116,12 @@ def code_build_cloudwatch_logs_policy_in_json(log_groups): return { "Version": "2012-10-17", "Statement": [ - { - "Effect": "Allow", - "Action": [ - "logs:GetLogEvents" - ], - "Resource": resources - } - ] + {"Effect": "Allow", "Action": ["logs:GetLogEvents"], "Resource": resources} + ], } -def code_build_publish_metrics_in_json(): + +def code_build_publish_metrics_in_json(env): """ Define an IAM policy that only grants access to publish CloudWatch metrics to the current region in the same namespace used in the calls to PutMetricData in tests/ci/common_fuzz.sh. @@ -121,21 +135,15 @@ def code_build_publish_metrics_in_json(): "Resource": "*", "Condition": { "StringEquals": { - "aws:RequestedRegion": [ - AWS_REGION - ], - "cloudwatch:namespace": [ - "AWS-LC-Fuzz", - "AWS-LC" - ] + "aws:RequestedRegion": [env.region], + "cloudwatch:namespace": ["AWS-LC-Fuzz", "AWS-LC"], } - } + }, } - ] + ], } - def s3_read_write_policy_in_json(s3_bucket_name): """ Define an IAM policy statement for reading and writing to S3 bucket. @@ -146,46 +154,39 @@ def s3_read_write_policy_in_json(s3_bucket_name): "Statement": [ { "Effect": "Allow", - "Action": [ - "s3:PutObject", - "s3:GetObject" - ], - "Resource": [ - "arn:aws:s3:::{}/*".format(s3_bucket_name) - ] + "Action": ["s3:PutObject", "s3:GetObject"], + "Resource": ["arn:aws:s3:::{}/*".format(s3_bucket_name)], } - ] + ], } -def ecr_repo_arn(repo_name): +def ecr_repo_arn(repo_name, env): """ Create a ECR repository arn. See https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonelasticcontainerregistry.html :param repo_name: repository name. :return: arn:aws:ecr:${Region}:${Account}:repository/${RepositoryName} """ - ecr_arn_prefix = "arn:aws:ecr:{}:{}:repository".format(AWS_REGION, AWS_ACCOUNT) + ecr_arn_prefix = "arn:aws:ecr:{}:{}:repository".format(env.region, env.account) return "{}/{}".format(ecr_arn_prefix, repo_name) -def ecr_power_user_policy_in_json(ecr_repo_names): +def ecr_power_user_policy_in_json(ecr_repo_names, env): """ Define an AWS-LC specific IAM policy statement for AWS ECR power user used to create new docker images. :return: an IAM policy statement in json. """ ecr_arns = [] for ecr_repo_name in ecr_repo_names: - ecr_arns.append(ecr_repo_arn(ecr_repo_name)) + ecr_arns.append(ecr_repo_arn(ecr_repo_name, env)) return { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", - "Action": [ - "ecr:GetAuthorizationToken" - ], - "Resource": "*" + "Action": ["ecr:GetAuthorizationToken"], + "Resource": "*", }, { "Effect": "Allow", @@ -204,20 +205,21 @@ def ecr_power_user_policy_in_json(ecr_repo_names): "ecr:InitiateLayerUpload", "ecr:UploadLayerPart", "ecr:CompleteLayerUpload", - "ecr:PutImage" + "ecr:PutImage", ], - "Resource": ecr_arns - } - ] + "Resource": ecr_arns, + }, + ], } -def device_farm_access_policy_in_json(): + +def device_farm_access_policy_in_json(env): """ Define an IAM policy statement for Device Farm operations. :return: an IAM policy statement in json. """ resources = [] - resources.append("arn:aws:devicefarm:{}:{}:*:*".format(AWS_REGION, AWS_ACCOUNT)) + resources.append("arn:aws:devicefarm:{}:{}:*:*".format(env.region, env.account)) return { "Version": "2012-10-17", "Statement": [ @@ -235,7 +237,7 @@ def device_farm_access_policy_in_json(): "devicefarm:ListSuites", "devicefarm:ListTests", ], - "Resource": resources + "Resource": resources, } - ] + ], } diff --git a/tests/ci/cdk/util/metadata.py b/tests/ci/cdk/util/metadata.py index 7c45210078..3b8e2a606a 100644 --- a/tests/ci/cdk/util/metadata.py +++ b/tests/ci/cdk/util/metadata.py @@ -6,28 +6,52 @@ from util.env_util import EnvUtil # Used when AWS CDK defines AWS resources. -TEAM_ACCOUNT = "620771051181" -DEFAULT_REGION = "us-west-2" -AWS_ACCOUNT = EnvUtil.get("CDK_DEPLOY_ACCOUNT", TEAM_ACCOUNT) -AWS_REGION = EnvUtil.get("CDK_DEPLOY_REGION", DEFAULT_REGION) -# Indicate if the BuildSpec files can be automatically loaded without manualy deployment. -CAN_AUTOLOAD = (AWS_ACCOUNT == TEAM_ACCOUNT) and (AWS_REGION == DEFAULT_REGION) +PROD_ACCOUNT = "620771051181" +PROD_REGION = "us-west-2" +PRE_PROD_ACCOUNT = "351119683581" +PRE_PROD_REGION = "us-west-2" +PIPELINE_ACCOUNT = EnvUtil.get("PIPELINE_ACCOUNT", "774305600158") +PIPELINE_REGION = EnvUtil.get("PIPELINE_REGION", "us-west-2") + +DEPLOY_ACCOUNT = EnvUtil.get_optional("DEPLOY_ACCOUNT") +DEPLOY_REGION = EnvUtil.get_optional("DEPLOY_REGION") + +STAGING_GITHUB_REPO_OWNER = "aws" +STAGING_GITHUB_REPO_NAME = "private-aws-lc-staging" + +IS_DEV = EnvUtil.get("IS_DEV", "False") == "True" # TODO: change default value to true +MAX_TEST_RETRY = int(EnvUtil.get("MAX_TEST_RETRY", "2")) # Used when AWS CDK defines ECR repos. -LINUX_AARCH_ECR_REPO = EnvUtil.get("ECR_LINUX_AARCH_REPO_NAME", "aws-lc-docker-images-linux-aarch") -LINUX_X86_ECR_REPO = EnvUtil.get("ECR_LINUX_X86_REPO_NAME", "aws-lc-docker-images-linux-x86") -WINDOWS_X86_ECR_REPO = EnvUtil.get("ECR_WINDOWS_X86_REPO_NAME", "aws-lc-docker-images-windows-x86") +LINUX_AARCH_ECR_REPO = EnvUtil.get( + "ECR_LINUX_AARCH_REPO_NAME", "aws-lc-docker-images-linux-aarch" +) +LINUX_X86_ECR_REPO = EnvUtil.get( + "ECR_LINUX_X86_REPO_NAME", "aws-lc-docker-images-linux-x86" +) +WINDOWS_X86_ECR_REPO = EnvUtil.get( + "ECR_WINDOWS_X86_REPO_NAME", "aws-lc-docker-images-windows-x86" +) # Used when AWS CodeBuild needs to create web_hooks. GITHUB_REPO_OWNER = EnvUtil.get("GITHUB_REPO_OWNER", "aws") GITHUB_REPO_NAME = EnvUtil.get("GITHUB_REPO_NAME", "aws-lc") GITHUB_SOURCE_VERSION = EnvUtil.get("GITHUB_SOURCE_VERSION", "main") -GITHUB_TOKEN_SECRET_NAME = EnvUtil.get("GITHUB_TOKEN_SECRET_NAME", "aws-lc/ci/github/token") +GITHUB_TOKEN_SECRET_NAME = EnvUtil.get( + "GITHUB_TOKEN_SECRET_NAME", "aws-lc/ci/github/token" +) # Used when AWS CDK defines resources for Windows docker image build. -S3_BUCKET_NAME = EnvUtil.get("S3_FOR_WIN_DOCKER_IMG_BUILD", "aws-lc-windows-docker-image-build") WIN_EC2_TAG_KEY = EnvUtil.get("WIN_EC2_TAG_KEY", "aws-lc") -WIN_EC2_TAG_VALUE = EnvUtil.get("WIN_EC2_TAG_VALUE", "aws-lc-windows-docker-image-build") -SSM_DOCUMENT_NAME = EnvUtil.get("WIN_DOCKER_BUILD_SSM_DOCUMENT", "windows-ssm-document") +WIN_EC2_TAG_VALUE = EnvUtil.get( + "WIN_EC2_TAG_VALUE", "aws-lc-windows-docker-image-build" +) +SSM_DOCUMENT_NAME = EnvUtil.get( + "WIN_DOCKER_BUILD_SSM_DOCUMENT", "AWSLC-BuildWindowsDockerImages" +) + +S3_FOR_WIN_DOCKER_IMG_BUILD = EnvUtil.get( + "S3_FOR_WIN_DOCKER_IMG_BUILD", "aws-lc-windows-docker-image-build-s3" +) GITHUB_PUSH_CI_BRANCH_TARGETS = r"(main|fips-\d{4}-\d{2}-\d{2}.*)" diff --git a/tests/ci/cdk/util/yml_loader.py b/tests/ci/cdk/util/yml_loader.py index bef17e3e38..bdefa11766 100644 --- a/tests/ci/cdk/util/yml_loader.py +++ b/tests/ci/cdk/util/yml_loader.py @@ -12,13 +12,16 @@ class YmlLoader(object): """Responsible for loading yml file as python object.""" @staticmethod - def load(file_path, placeholder_map: typing.Optional[typing.Mapping[str, str]] = {}): + def load( + file_path, placeholder_map: typing.Optional[typing.Mapping[str, str]] = None + ): """ Used to load yml file and replace some placeholders if needed. :param file_path: path to the yml file. :param placeholder_map: a mapping from placeholder to corresponding value. :return: python object. """ + placeholder_map = placeholder_map or {} with open(file_path) as file: file_text = file.read() for key in placeholder_map.keys(): diff --git a/tests/ci/codebuild/linux-x86/run_bm_framework.yml b/tests/ci/codebuild/linux-x86/run_bm_framework.yml deleted file mode 100644 index 04e14054bb..0000000000 --- a/tests/ci/codebuild/linux-x86/run_bm_framework.yml +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 OR ISC - -version: 0.2 - -env: - variables: - GOPROXY: https://proxy.golang.org,direct - -phases: - build: - commands: - - ./tests/ci/build_run_benchmarks.sh diff --git a/tests/ci/docker_images/linux-aarch/common.sh b/tests/ci/docker_images/linux-aarch/common.sh index d1390f9a65..b3d123c22a 100755 --- a/tests/ci/docker_images/linux-aarch/common.sh +++ b/tests/ci/docker_images/linux-aarch/common.sh @@ -4,6 +4,12 @@ set -ex +if [[ -n "${TRIGGER_TYPE:+x}" && "${TRIGGER_TYPE}" == "pipeline" ]]; then + TAG_SUFFIX="pending" +else + TAG_SUFFIX="latest" +fi + function validate_input() { key="${1}" value="${2}" @@ -20,10 +26,10 @@ function tag_and_push_img() { target="${2}" validate_input 'target' "${target}" img_push_date=$(date +%Y-%m-%d) - docker_img_with_latest="${target}_latest" + docker_img_with_tag="${target}_${TAG_SUFFIX}" docker_img_with_date="${target}_${img_push_date}" - docker tag "${source}" "${docker_img_with_latest}" + docker tag "${source}" "${docker_img_with_tag}" docker tag "${source}" "${docker_img_with_date}" - docker push "${docker_img_with_latest}" + docker push "${docker_img_with_tag}" docker push "${docker_img_with_date}" } diff --git a/tests/ci/docker_images/linux-x86/common.sh b/tests/ci/docker_images/linux-x86/common.sh index d1390f9a65..b3d123c22a 100755 --- a/tests/ci/docker_images/linux-x86/common.sh +++ b/tests/ci/docker_images/linux-x86/common.sh @@ -4,6 +4,12 @@ set -ex +if [[ -n "${TRIGGER_TYPE:+x}" && "${TRIGGER_TYPE}" == "pipeline" ]]; then + TAG_SUFFIX="pending" +else + TAG_SUFFIX="latest" +fi + function validate_input() { key="${1}" value="${2}" @@ -20,10 +26,10 @@ function tag_and_push_img() { target="${2}" validate_input 'target' "${target}" img_push_date=$(date +%Y-%m-%d) - docker_img_with_latest="${target}_latest" + docker_img_with_tag="${target}_${TAG_SUFFIX}" docker_img_with_date="${target}_${img_push_date}" - docker tag "${source}" "${docker_img_with_latest}" + docker tag "${source}" "${docker_img_with_tag}" docker tag "${source}" "${docker_img_with_date}" - docker push "${docker_img_with_latest}" + docker push "${docker_img_with_tag}" docker push "${docker_img_with_date}" } diff --git a/tests/ci/docker_images/windows/push_images.ps1 b/tests/ci/docker_images/windows/push_images.ps1 index 4fde9b1f83..09481efaef 100644 --- a/tests/ci/docker_images/windows/push_images.ps1 +++ b/tests/ci/docker_images/windows/push_images.ps1 @@ -2,6 +2,11 @@ # SPDX-License-Identifier: Apache-2.0 OR ISC $ECS_REPO=$args[0] +$TAG_SUFFIX = if (-not [string]::IsNullOrEmpty($TRIGGER_TYPE) -and $TRIGGER_TYPE -eq "pipeline") { + "pending" +} else { + "latest" +} if ($args[0] -eq $null) { # This is a ECS repository in our CI account @@ -10,12 +15,12 @@ if ($args[0] -eq $null) { Write-Host "$ECS_REPO" -docker tag vs2015 ${ECS_REPO}:vs2015_latest +docker tag vs2015 ${ECS_REPO}:vs2015_${TAG_SUFFIX} docker tag vs2015 ${ECS_REPO}:vs2015-$(Get-Date -UFormat %Y-%m-%d-%H) -docker push ${ECS_REPO}:vs2015_latest +docker push ${ECS_REPO}:vs2015_${TAG_SUFFIX} docker push ${ECS_REPO}:vs2015-$(Get-Date -UFormat %Y-%m-%d-%H) -docker tag vs2017 ${ECS_REPO}:vs2017_latest +docker tag vs2017 ${ECS_REPO}:vs2017_${TAG_SUFFIX} docker tag vs2017 ${ECS_REPO}:vs2017-$(Get-Date -UFormat %Y-%m-%d-%H) -docker push ${ECS_REPO}:vs2017_latest +docker push ${ECS_REPO}:vs2017_${TAG_SUFFIX} docker push ${ECS_REPO}:vs2017-$(Get-Date -UFormat %Y-%m-%d-%H) diff --git a/tests/ci/run_bm_framework.sh b/tests/ci/run_bm_framework.sh deleted file mode 100755 index a2c56cbdeb..0000000000 --- a/tests/ci/run_bm_framework.sh +++ /dev/null @@ -1,154 +0,0 @@ -#!/usr/bin/env bash -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 OR ISC - -set -exo pipefail - -# Please run from project root folder! -# You'll want to set the codebuild env variables set if running locally -source tests/ci/common_ssm_setup.sh - -# cleanup code -cleanup() { - set +e - # kill ec2 instances after we're done w/ them - for id in ${instance_ids};do - aws ec2 terminate-instances --instance-ids "${id}" - done - - # delete the various documents that we created - for name in ${ssm_document_names};do - aws ssm delete-document --name "${name}" - done -} - -# we wanna run the cleanup code on exit -trap cleanup EXIT - -# print some information for reference -echo GitHub PR Number: "${CODEBUILD_WEBHOOK_TRIGGER}" -echo GitHub Commit Version: "${CODEBUILD_SOURCE_VERSION}" -AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) -echo AWS Account ID: "${AWS_ACCOUNT_ID}" -echo GitHub Repo Link: "${CODEBUILD_SOURCE_REPO_URL}" - -# get information for ec2 instances -vpc_id="$(aws ec2 describe-vpcs --filter Name=tag:Name,Values=aws-lc-ci-bm-framework/aws-lc-ci-bm-framework-ec2-vpc --query Vpcs[*].VpcId --output text)" -sg_id="$(aws ec2 describe-security-groups --filter Name=vpc-id,Values="${vpc_id}" --filter Name=group-name,Values=bm_framework_ec2_sg --query SecurityGroups[*].GroupId --output text)" -subnet_id="$(aws ec2 describe-subnets --filter Name=vpc-id,Values="${vpc_id}" --filter Name=state,Values=available --filter Name=tag:Name,Values=aws-lc-ci-bm-framework/aws-lc-ci-bm-framework-ec2-vpc/PrivateSubnet1 --query Subnets[*].SubnetId --output text)" - -#$1 is nohw type, $2 is OPENSSL_ia32cap value -generate_ssm_document_file() { - # use sed to replace placeholder values inside preexisting document - sed -e "s,{AWS_ACCOUNT_ID},${AWS_ACCOUNT_ID},g" \ - -e "s,{PR_NUM},${CODEBUILD_WEBHOOK_TRIGGER},g" \ - -e "s,{COMMIT_ID},${CODEBUILD_SOURCE_VERSION},g" \ - -e "s,{GITHUB_REPO},${CODEBUILD_SOURCE_REPO_URL},g" \ - -e "s,{OPENSSL_ia32cap},$2,g" \ - -e "s,{NOHW_TYPE},$1,g" \ - tests/ci/cdk/cdk/ssm/bm_framework_ssm_document.yaml \ - >tests/ci/cdk/cdk/ssm/bm_framework_"$1"_ssm_document.yaml -} - -# create the ssm documents that will be used for the various ssm commands -generate_ssm_document_file "" "" -generate_ssm_document_file "nosha" "~0x100000000" -generate_ssm_document_file "noavx" "~0x1000000000000000:0xC0010020" - -#$1 for ami, $2 for instance-type, echos the instance id so we can capture the output -create_ec2_instances() { - local instance_id - instance_id="$(aws ec2 run-instances --image-id "$1" --count 1 \ - --instance-type "$2" --security-group-ids "${sg_id}" --subnet-id "${subnet_id}" \ - --block-device-mappings 'DeviceName="/dev/sda1",Ebs={DeleteOnTermination=True,VolumeSize=200}' \ - --tag-specifications 'ResourceType="instance",Tags=[{Key="aws-lc",Value="aws-lc-ci-bm-framework-ec2-x86-instance"}]' \ - --iam-instance-profile Name=aws-lc-ci-bm-framework-ec2-profile \ - --placement 'AvailabilityZone=us-west-2a' \ - --query Instances[*].InstanceId --output text)" - echo "${instance_id}" -} - -# create ec2 instances for x86 and arm -x86_id=$(create_ec2_instances "ami-01773ce53581acf22" "c5.metal") -arm_id=$(create_ec2_instances "ami-018e246d8c0f39ae5" "c6g.metal") -x86_nosha_id=$(create_ec2_instances "ami-01773ce53581acf22" "m5.metal") -x86_noavx_id=$(create_ec2_instances "ami-01773ce53581acf22" "c5.metal") -instance_ids="${x86_id} ${arm_id} ${x86_nosha_id} ${x86_noavx_id}" - -# if any of the ids are blank, ec2 creation failed -if [[ -z "${x86_id}" ]] || [[ -z "${arm_id}" ]] || [[ -z "${x86_nosha_id}" ]] || [[ -z "${x86_noavx_id}" ]]; then - exit 1 -fi - -# Give a few minutes for the ec2 instances to be ready -sleep 60 - -for i in {1..30}; do - ready=true - for id in ${instance_ids}; do - status=$(aws ssm describe-instance-information --filter Key="InstanceIds",Values="${id}" \ - --query InstanceInformationList[*].PingStatus --output text) - if [ "${status}" != Online ]; then - ready=false - fi - done - if [ "${ready}" = true ]; then - break - fi - echo "Wait for instances to be able to run the SSM commands" - - # if we've hit the 30 minute mark and still aren't ready, then something has gone wrong - if [ "${i}" = 30 ]; then exit 1; fi - sleep 60 -done - -# Create, and run ssm command for arm & x86 -ssm_doc_name=$(create_ssm_document "bm_framework_") -nosha_ssm_doc_name=$(create_ssm_document "bm_framework_nosha") -noavx_ssm_doc_name=$(create_ssm_document "bm_framework_noavx") -ssm_document_names="${ssm_doc_name} ${nosha_ssm_doc_name} ${noavx_ssm_doc_name}" - -# delete contents of 'latest' folders before uploading anything new to them -aws s3 rm s3://"${AWS_ACCOUNT_ID}-aws-lc-ci-bm-framework-pr-bucket/latest-${CODEBUILD_WEBHOOK_TRIGGER}" --recursive -aws s3 rm s3://"${AWS_ACCOUNT_ID}-aws-lc-ci-bm-framework-prod-bucket/latest" --recursive - -cloudwatch_group_name="aws-lc-ci-bm-framework-cw-logs" -x86_ssm_command_id=$(run_ssm_command "${ssm_doc_name}" "${x86_id}" "${cloudwatch_group_name}") -arm_ssm_command_id=$(run_ssm_command "${ssm_doc_name}" "${arm_id}" "${cloudwatch_group_name}") -nosha_ssm_command_id=$(run_ssm_command "${nosha_ssm_doc_name}" "${x86_nosha_id}" "${cloudwatch_group_name}") -noavx_ssm_command_id=$(run_ssm_command "${noavx_ssm_doc_name}" "${x86_noavx_id}" "${cloudwatch_group_name}") -ssm_command_ids="${x86_ssm_command_id} ${arm_ssm_command_id} ${nosha_ssm_command_id} ${noavx_ssm_command_id}" - -# Give some time for the commands to run -for i in {1..30}; do - echo "${i}: Continue to wait 3 min for SSM commands to finish." - sleep 180 - done=true - success=true - # for each command, check its status - for id in ${ssm_command_ids}; do - ssm_command_status="$(aws ssm list-commands --command-id "${id}" --query Commands[*].Status --output text)" - ssm_target_count="$(aws ssm list-commands --command-id "${id}" --query Commands[*].TargetCount --output text)" - ssm_completed_count="$(aws ssm list-commands --command-id "${id}" --query Commands[*].CompletedCount --output text)" - if [[ ${ssm_command_status} == 'Success' && ${ssm_completed_count} == "${ssm_target_count}" ]]; then - echo "SSM command ${id} finished successfully." - elif [[ ${ssm_command_status} == 'Failed' && ${ssm_completed_count} == "${ssm_target_count}" ]]; then - echo "SSM command ${id} failed." - success=false - else - done=false - fi - done - - # if after the loop finish and done is still true, then we're done - if [ "${done}" = true ]; then - echo "All SSM commands have finished." - - # if success is still true here, then none of the commands failed - if [ "${success}" != true ]; then - echo "An SSM command failed!" - exit 1 - fi - break - fi -done