Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions .github/actions/setup-opentofu/action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,13 @@ runs:
run: |
variables=(
"apply_database_updates_immediately" "consumer_container_count"
"consumer_cpu" "consumer_memory" "database_instance_count"
"consumer_container_max" "consumer_cpu" "consumer_memory"
"consumer_message_threshold" "database_instance_count"
"database_skip_final_snapshot" "deletion_protection"
"deployment_environments" "environment" "export_expiration"
"image_tags_mutable" "key_recovery_period" "log_level" "program"
"project" "redoer_container_count" "redoer_cpu" "redoer_memory"
"region" "repository"
"project" "queue_empty_threshold" "redoer_container_count"
"redoer_cpu" "redoer_memory" "region" "repository"
)
for var in ${variables[@]}; do
name="TF_VAR_$(echo $var | tr '[:lower:]' '[:upper:]')"
Expand Down
6 changes: 6 additions & 0 deletions .github/workflows/deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,10 @@ jobs:
AWS_ROLE_ARN: ${{ secrets.AWS_ROLE_ARN }}
TF_VAR_APPLY_DATABASE_UPDATES_IMMEDIATELY: ${{ secrets.TF_VAR_APPLY_DATABASE_UPDATES_IMMEDIATELY }}
TF_VAR_CONSUMER_CONTAINER_COUNT: ${{ secrets.TF_VAR_CONSUMER_CONTAINER_COUNT }}
TF_VAR_CONSUMER_CONTAINER_MAX: ${{ secrets.TF_VAR_CONSUMER_CONTAINER_MAX }}
TF_VAR_CONSUMER_CPU: ${{ secrets.TF_VAR_CONSUMER_CPU }}
TF_VAR_CONSUMER_MEMORY: ${{ secrets.TF_VAR_CONSUMER_MEMORY }}
TF_VAR_CONSUMER_MESSAGE_THRESHOLD: ${{ secrets.TF_VAR_CONSUMER_MESSAGE_THRESHOLD }}
TF_VAR_DATABASE_SKIP_FINAL_SNAPSHOT: ${{ secrets.TF_VAR_DATABASE_SKIP_FINAL_SNAPSHOT }}
TF_VAR_DELETION_PROTECTION: ${{ secrets.TF_VAR_DELETION_PROTECTION }}
TF_VAR_DEPLOYMENT_ENVIRONMENTS: ${{ secrets.TF_VAR_DEPLOYMENT_ENVIRONMENTS }}
Expand All @@ -54,6 +56,7 @@ jobs:
TF_VAR_LOG_LEVEL: ${{ secrets.TF_VAR_LOG_LEVEL }}
TF_VAR_PROGRAM: ${{ secrets.TF_VAR_PROGRAM }}
TF_VAR_PROJECT: ${{ secrets.TF_VAR_PROJECT }}
TF_VAR_QUEUE_EMPTY_THRESHOLD: ${{ secrets.TF_VAR_QUEUE_EMPTY_THRESHOLD }}
TF_VAR_REDOER_CONTAINER_COUNT: ${{ secrets.TF_VAR_REDOER_CONTAINER_COUNT }}
TF_VAR_REDOER_CPU: ${{ secrets.TF_VAR_REDOER_CPU }}
TF_VAR_REDOER_MEMORY: ${{ secrets.TF_VAR_REDOER_MEMORY }}
Expand Down Expand Up @@ -95,8 +98,10 @@ jobs:
env:
TF_VAR_APPLY_DATABASE_UPDATES_IMMEDIATELY: ${{ secrets.TF_VAR_APPLY_DATABASE_UPDATES_IMMEDIATELY }}
TF_VAR_CONSUMER_CONTAINER_COUNT: ${{ secrets.TF_VAR_CONSUMER_CONTAINER_COUNT }}
TF_VAR_CONSUMER_CONTAINER_MAX: ${{ secrets.TF_VAR_CONSUMER_CONTAINER_MAX }}
TF_VAR_CONSUMER_CPU: ${{ secrets.TF_VAR_CONSUMER_CPU }}
TF_VAR_CONSUMER_MEMORY: ${{ secrets.TF_VAR_CONSUMER_MEMORY }}
TF_VAR_CONSUMER_MESSAGE_THRESHOLD: ${{ secrets.TF_VAR_CONSUMER_MESSAGE_THRESHOLD }}
TF_VAR_DATABASE_SKIP_FINAL_SNAPSHOT: ${{ secrets.TF_VAR_DATABASE_SKIP_FINAL_SNAPSHOT }}
TF_VAR_DATABASE_INSTANCE_COUNT: ${{ secrets.TF_VAR_DATABASE_INSTANCE_COUNT }}
TF_VAR_DELETION_PROTECTION: ${{ secrets.TF_VAR_DELETION_PROTECTION }}
Expand All @@ -108,6 +113,7 @@ jobs:
TF_VAR_LOG_LEVEL: ${{ secrets.TF_VAR_LOG_LEVEL }}
TF_VAR_PROJECT: ${{ secrets.TF_VAR_PROJECT }}
TF_VAR_PROGRAM: ${{ secrets.TF_VAR_PROGRAM }}
TF_VAR_QUEUE_EMPTY_THRESHOLD: ${{ secrets.TF_VAR_QUEUE_EMPTY_THRESHOLD }}
TF_VAR_REDOER_CONTAINER_COUNT: ${{ secrets.TF_VAR_REDOER_CONTAINER_COUNT }}
TF_VAR_REDOER_CPU: ${{ secrets.TF_VAR_REDOER_CPU }}
TF_VAR_REDOER_MEMORY: ${{ secrets.TF_VAR_REDOER_MEMORY }}
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/export.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,10 @@ jobs:
env:
TF_VAR_APPLY_DATABASE_UPDATES_IMMEDIATELY: ${{ secrets.TF_VAR_APPLY_DATABASE_UPDATES_IMMEDIATELY }}
TF_VAR_CONSUMER_CONTAINER_COUNT: ${{ secrets.TF_VAR_CONSUMER_CONTAINER_COUNT }}
TF_VAR_CONSUMER_CONTAINER_MAX: ${{ secrets.TF_VAR_CONSUMER_CONTAINER_MAX }}
TF_VAR_CONSUMER_CPU: ${{ secrets.TF_VAR_CONSUMER_CPU }}
TF_VAR_CONSUMER_MEMORY: ${{ secrets.TF_VAR_CONSUMER_MEMORY }}
TF_VAR_CONSUMER_MESSAGE_THRESHOLD: ${{ secrets.TF_VAR_CONSUMER_MESSAGE_THRESHOLD }}
TF_VAR_DATABASE_SKIP_FINAL_SNAPSHOT: ${{ secrets.TF_VAR_DATABASE_SKIP_FINAL_SNAPSHOT }}
TF_VAR_DATABASE_INSTANCE_COUNT: ${{ secrets.TF_VAR_DATABASE_INSTANCE_COUNT }}
TF_VAR_DELETION_PROTECTION: ${{ secrets.TF_VAR_DELETION_PROTECTION }}
Expand All @@ -51,6 +53,7 @@ jobs:
TF_VAR_LOG_LEVEL: ${{ secrets.TF_VAR_LOG_LEVEL }}
TF_VAR_PROJECT: ${{ secrets.TF_VAR_PROJECT }}
TF_VAR_PROGRAM: ${{ secrets.TF_VAR_PROGRAM }}
TF_VAR_QUEUE_EMPTY_THRESHOLD: ${{ secrets.TF_VAR_QUEUE_EMPTY_THRESHOLD }}
TF_VAR_REPO_OIDC_ARN: ${{ secrets.TF_VAR_REPO_OIDC_ARN }}
TF_VAR_REPOSITORY: ${{ secrets.TF_VAR_REPOSITORY }}
TF_VAR_VPC_CIDR: ${{ secrets.TF_VAR_VPC_CIDR }}
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/launch-tools.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,10 @@ jobs:
env:
TF_VAR_APPLY_DATABASE_UPDATES_IMMEDIATELY: ${{ secrets.TF_VAR_APPLY_DATABASE_UPDATES_IMMEDIATELY }}
TF_VAR_CONSUMER_CONTAINER_COUNT: ${{ secrets.TF_VAR_CONSUMER_CONTAINER_COUNT }}
TF_VAR_CONSUMER_CONTAINER_MAX: ${{ secrets.TF_VAR_CONSUMER_CONTAINER_MAX }}
TF_VAR_CONSUMER_CPU: ${{ secrets.TF_VAR_CONSUMER_CPU }}
TF_VAR_CONSUMER_MEMORY: ${{ secrets.TF_VAR_CONSUMER_MEMORY }}
TF_VAR_CONSUMER_MESSAGE_THRESHOLD: ${{ secrets.TF_VAR_CONSUMER_MESSAGE_THRESHOLD }}
TF_VAR_DATABASE_SKIP_FINAL_SNAPSHOT: ${{ secrets.TF_VAR_DATABASE_SKIP_FINAL_SNAPSHOT }}
TF_VAR_DATABASE_INSTANCE_COUNT: ${{ secrets.TF_VAR_DATABASE_INSTANCE_COUNT }}
TF_VAR_DELETION_PROTECTION: ${{ secrets.TF_VAR_DELETION_PROTECTION }}
Expand All @@ -59,6 +61,7 @@ jobs:
TF_VAR_LOG_LEVEL: ${{ secrets.TF_VAR_LOG_LEVEL }}
TF_VAR_PROJECT: ${{ secrets.TF_VAR_PROJECT }}
TF_VAR_PROGRAM: ${{ secrets.TF_VAR_PROGRAM }}
TF_VAR_QUEUE_EMPTY_THRESHOLD: ${{ secrets.TF_VAR_QUEUE_EMPTY_THRESHOLD }}
TF_VAR_REPO_OIDC_ARN: ${{ secrets.TF_VAR_REPO_OIDC_ARN }}
TF_VAR_REPOSITORY: ${{ secrets.TF_VAR_REPOSITORY }}
TF_VAR_VPC_CIDR: ${{ secrets.TF_VAR_VPC_CIDR }}
Expand Down
9 changes: 9 additions & 0 deletions .github/workflows/plan.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,14 @@ on:
required: false
TF_VAR_CONSUMER_CONTAINER_COUNT:
required: false
TF_VAR_CONSUMER_CONTAINER_MAX:
required: false
TF_VAR_CONSUMER_CPU:
required: false
TF_VAR_CONSUMER_MEMORY:
required: false
TF_VAR_CONSUMER_MESSAGE_THRESHOLD:
required: false
TF_VAR_DATABASE_SKIP_FINAL_SNAPSHOT:
required: false
TF_VAR_DELETION_PROTECTION:
Expand All @@ -54,6 +58,8 @@ on:
required: false
TF_VAR_PROJECT:
required: false
TF_VAR_QUEUE_EMPTY_THRESHOLD:
required: false
TF_VAR_REDOER_CONTAINER_COUNT:
required: false
TF_VAR_REDOER_CPU:
Expand Down Expand Up @@ -113,8 +119,10 @@ jobs:
env:
TF_VAR_APPLY_DATABASE_UPDATES_IMMEDIATELY: ${{ secrets.TF_VAR_APPLY_DATABASE_UPDATES_IMMEDIATELY }}
TF_VAR_CONSUMER_CONTAINER_COUNT: ${{ secrets.TF_VAR_CONSUMER_CONTAINER_COUNT }}
TF_VAR_CONSUMER_CONTAINER_MAX: ${{ secrets.TF_VAR_CONSUMER_CONTAINER_MAX }}
TF_VAR_CONSUMER_CPU: ${{ secrets.TF_VAR_CONSUMER_CPU }}
TF_VAR_CONSUMER_MEMORY: ${{ secrets.TF_VAR_CONSUMER_MEMORY }}
TF_VAR_CONSUMER_MESSAGE_THRESHOLD: ${{ secrets.TF_VAR_CONSUMER_MESSAGE_THRESHOLD }}
TF_VAR_DATABASE_SKIP_FINAL_SNAPSHOT: ${{ secrets.TF_VAR_DATABASE_SKIP_FINAL_SNAPSHOT }}
TF_VAR_DATABASE_INSTANCE_COUNT: ${{ secrets.TF_VAR_DATABASE_INSTANCE_COUNT }}
TF_VAR_DELETION_PROTECTION: ${{ secrets.TF_VAR_DELETION_PROTECTION }}
Expand All @@ -126,6 +134,7 @@ jobs:
TF_VAR_LOG_LEVEL: ${{ secrets.TF_VAR_LOG_LEVEL }}
TF_VAR_PROJECT: ${{ secrets.TF_VAR_PROJECT }}
TF_VAR_PROGRAM: ${{ secrets.TF_VAR_PROGRAM }}
TF_VAR_QUEUE_EMPTY_THRESHOLD: ${{ secrets.TF_VAR_QUEUE_EMPTY_THRESHOLD }}
TF_VAR_REDOER_CONTAINER_COUNT: ${{ secrets.TF_VAR_REDOER_CONTAINER_COUNT }}
TF_VAR_REDOER_CPU: ${{ secrets.TF_VAR_REDOER_CPU }}
TF_VAR_REDOER_MEMORY: ${{ secrets.TF_VAR_REDOER_MEMORY }}
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/pull-request.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -49,15 +49,18 @@ jobs:
TF_VAR_APPLY_DATABASE_UPDATES_IMMEDIATELY: ${{ secrets.TF_VAR_APPLY_DATABASE_UPDATES_IMMEDIATELY }}
TF_VAR_DATABASE_SKIP_FINAL_SNAPSHOT: ${{ secrets.TF_VAR_DATABASE_SKIP_FINAL_SNAPSHOT }}
TF_VAR_CONSUMER_CONTAINER_COUNT: ${{ secrets.TF_VAR_CONSUMER_CONTAINER_COUNT }}
TF_VAR_CONSUMER_CONTAINER_MAX: ${{ secrets.TF_VAR_CONSUMER_CONTAINER_MAX }}
TF_VAR_CONSUMER_CPU: ${{ secrets.TF_VAR_CONSUMER_CPU }}
TF_VAR_CONSUMER_MEMORY: ${{ secrets.TF_VAR_CONSUMER_MEMORY }}
TF_VAR_CONSUMER_MESSAGE_THRESHOLD: ${{ secrets.TF_VAR_CONSUMER_MESSAGE_THRESHOLD }}
TF_VAR_DELETION_PROTECTION: ${{ secrets.TF_VAR_DELETION_PROTECTION }}
TF_VAR_DEPLOYMENT_ENVIRONMENTS: ${{ secrets.TF_VAR_DEPLOYMENT_ENVIRONMENTS }}
TF_VAR_EXPORT_EXPIRATION: ${{ secrets.TF_VAR_EXPORT_EXPIRATION }}
TF_VAR_IMAGE_TAGS_MUTABLE: ${{ secrets.TF_VAR_IMAGE_TAGS_MUTABLE }}
TF_VAR_KEY_RECOVERY_PERIOD: ${{ secrets.TF_VAR_KEY_RECOVERY_PERIOD }}
TF_VAR_LOG_LEVEL: ${{ secrets.TF_VAR_LOG_LEVEL }}
TF_VAR_PROGRAM: ${{ secrets.TF_VAR_PROGRAM }}
TF_VAR_QUEUE_EMPTY_THRESHOLD: ${{ secrets.TF_VAR_QUEUE_EMPTY_THRESHOLD }}
TF_VAR_REDOER_CONTAINER_COUNT: ${{ secrets.TF_VAR_REDOER_CONTAINER_COUNT }}
TF_VAR_REDOER_CPU: ${{ secrets.TF_VAR_REDOER_CPU }}
TF_VAR_REDOER_MEMORY: ${{ secrets.TF_VAR_REDOER_MEMORY }}
Expand Down
7 changes: 7 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,13 @@ sz_command -C add_record \
PEOPLE 1 '{"NAME_FULL":"Robert Smith", "DATE_OF_BIRTH":"7/4/1976", "PHONE_NUMBER":"555-555-2088"}'
```

You can also run commands using the tools container without entering an
interactive shell. For example, to purge the repository, you could run:

```bash
docker compose run tools sz_command -C "purge_repository --FORCEPURGE"
```

#### Loading sample data

From inside the tools container:
Expand Down
35 changes: 19 additions & 16 deletions tofu/config/service/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,16 @@ module "inputs" {
module "system" {
source = "../../modules/system"

environment = var.environment
project = var.project
export_expiration = var.export_expiration
key_recovery_period = var.key_recovery_period
logging_bucket = module.inputs.values["logging/bucket"]
logging_key_arn = module.inputs.values["logging/key"]
log_level = var.log_level
tags = merge({ awsApplication : module.inputs.values["application/tag"] }, var.tags)
vpc_id = module.inputs.values["vpc/id"]
environment = var.environment
project = var.project
export_expiration = var.export_expiration
key_recovery_period = var.key_recovery_period
logging_bucket = module.inputs.values["logging/bucket"]
logging_key_arn = module.inputs.values["logging/key"]
log_level = var.log_level
tags = merge({ awsApplication : module.inputs.values["application/tag"] }, var.tags)
vpc_id = module.inputs.values["vpc/id"]
queue_empty_threshold = var.queue_empty_threshold

database_subnets = split(",", module.inputs.values["vpc/private_subnets"])
apply_database_updates_immediately = var.apply_database_updates_immediately
Expand All @@ -36,11 +37,13 @@ module "system" {
image_tag = local.image_tag
image_tags_mutable = var.image_tags_mutable

container_subnets = split(",", module.inputs.values["vpc/private_subnets"])
consumer_container_count = var.consumer_container_count
consumer_cpu = var.consumer_cpu
consumer_memory = var.consumer_memory
redoer_container_count = var.redoer_container_count
redoer_cpu = var.redoer_cpu
redoer_memory = var.redoer_memory
container_subnets = split(",", module.inputs.values["vpc/private_subnets"])
consumer_container_count = var.consumer_container_count
consumer_container_max = var.consumer_container_max
consumer_cpu = var.consumer_cpu
consumer_memory = var.consumer_memory
consumer_message_threshold = var.consumer_message_threshold
redoer_container_count = var.redoer_container_count
redoer_cpu = var.redoer_cpu
redoer_memory = var.redoer_memory
}
18 changes: 18 additions & 0 deletions tofu/config/service/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,12 @@ variable "consumer_container_count" {
default = 1
}

variable "consumer_container_max" {
type = number
description = "Maximum number of consumer containers to run."
default = 10
}

variable "consumer_cpu" {
type = number
description = "Number of virtual CPUs to allocate to each consumer container."
Expand All @@ -22,6 +28,12 @@ variable "consumer_memory" {
default = 4096
}

variable "consumer_message_threshold" {
type = number
description = "Number of messages in the SQS queue that will trigger scaling up the number of consumer containers."
default = 250000
}

variable "database_instance_count" {
type = number
description = "Number of instances in the database cluster."
Expand Down Expand Up @@ -103,6 +115,12 @@ variable "project" {
default = "sqs-senzing"
}

variable "queue_empty_threshold" {
type = number
description = "Number of minutes that the SQS queue must have zero messages before we consider it empty."
default = 15
}

variable "redoer_container_count" {
type = number
description = "Desired number of redoer containers to run."
Expand Down
3 changes: 3 additions & 0 deletions tofu/modules/persistent_service/locals.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
locals {
prefix = join("-", compact([var.project, var.environment, var.service]))
}
22 changes: 17 additions & 5 deletions tofu/modules/persistent_service/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,13 @@ module "service" {
source = "HENNGE/ecs/aws//modules/core/service"
version = "5.3.0"

cluster = var.cluster_arn
name = join("-", compact([var.project, var.environment, var.service]))
create_task_definition = false
task_definition_arn = module.task.task_definition_arn
desired_count = var.desired_containers
cluster = var.cluster_name
name = local.prefix
create_task_definition = false
task_definition_arn = module.task.task_definition_arn

# Ignore changes to the desired count to prevent conflicts with auto-scaling.
ignore_desired_count_changes = true

launch_type = "FARGATE"
task_requires_compatibilities = ["FARGATE"]
Expand All @@ -48,3 +50,13 @@ module "service" {

tags = var.tags
}

module "scaling_target" {
source = "HENNGE/ecs/aws//modules/core/ecs-autoscaling-target"
version = "5.3.0"

ecs_cluster_name = var.cluster_name
ecs_service_name = module.service.name
min_capacity = var.desired_containers
max_capacity = var.max_containers
}
10 changes: 10 additions & 0 deletions tofu/modules/persistent_service/outputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,16 @@ output "service_name" {
value = module.service.name
}

output "scale_down_policy_arn" {
description = "ARN of the scale-down auto-scaling policy."
value = var.scale_down_policy.enabled ? aws_appautoscaling_policy.down["this"].arn : null
}

output "scale_up_policy_arn" {
description = "ARN of the scale-up auto-scaling policy."
value = var.scale_up_policy.enabled ? aws_appautoscaling_policy.up["this"].arn : null
}

output "task_definition_arn" {
description = "ARN of the ECS task definition."
value = module.task.task_definition_arn
Expand Down
62 changes: 62 additions & 0 deletions tofu/modules/persistent_service/scaling.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
resource "aws_appautoscaling_policy" "up" {
for_each = var.scale_up_policy.enabled ? toset(["this"]) : toset([])

name = "${local.prefix}-up"
policy_type = "StepScaling"
resource_id = module.scaling_target.resource_id
scalable_dimension = module.scaling_target.scalable_dimension
service_namespace = module.scaling_target.service_namespace

step_scaling_policy_configuration {
adjustment_type = "ExactCapacity"
cooldown = 60

dynamic "step_adjustment" {
for_each = range(1, var.max_containers + 1)

content {
scaling_adjustment = step_adjustment.value

# If we're scaling from 0 to 1, we want the lower bound to be set to our
# starting value, minus 1 to make it inclusive. Otherwise, we want to
# calculate the lower bound based on the step size and current step
# value, and add 1 to make sure we're into the next step.
metric_interval_lower_bound = (step_adjustment.value == 1
? var.scale_up_policy.start - 1
: (step_adjustment.value - 1) * var.scale_up_policy.step + 1
)

# If we're at the max containers, we don't want to set an upper bound
# since we can't scale any higher. Otherwise, we calculate the upper
# bound based on the step size and current step value, and add 1 because
# the upper bound is exclusive.
metric_interval_upper_bound = (step_adjustment.value == var.max_containers
? null
: step_adjustment.value * var.scale_up_policy.step + 1
)
}
}
}
}

resource "aws_appautoscaling_policy" "down" {
for_each = var.scale_down_policy.enabled ? toset(["this"]) : toset([])

name = "${local.prefix}-down"
policy_type = "StepScaling"
resource_id = module.scaling_target.resource_id
scalable_dimension = module.scaling_target.scalable_dimension
service_namespace = module.scaling_target.service_namespace

step_scaling_policy_configuration {
# We've already waited for the queue to be empty for a threshold of time, so
# we don't need a big delay here.
adjustment_type = "ExactCapacity"
cooldown = 60

step_adjustment {
metric_interval_upper_bound = 0
scaling_adjustment = 0
}
}
}
Loading