Two ALB Target Groups One ECS with Pulumi & Python

Introduction
When designing a scalable and flexible architecture on AWS, it's common to use an Application Load Balancer (ALB) to distribute traffic across multiple targets, such as EC2 instances and ECS tasks. However, a challenge arises when you must support both EC2 instances and ECS Fargate tasks with the same ALB, especially when the target types differ. This article explains why and how multiple target groups for an ALB can be used to support both EC2 instances and ECS Fargate tasks.
Problem Statement
In a recent work project, I encountered an issue where I needed to attach both EC2 instances and ECS Fargate tasks to an ALB. The ALB target group for ECS Fargate tasks requires target_type="ip", while the target group for EC2 instances requires target_type="instance". Attempting to use a single target group for both resulted in compatibility issues. When coding with Pulumi, AWS interprets the instance ID as an IPv6 address using target_type="ip", resulting in a failed creation attempt.
Solution: Multiple Target Groups
To resolve this issue, we decided to create two separate target groups:
Target Group for EC2 Instances: Uses
target_type="instance".Target Group for ECS Fargate Tasks: Uses
target_type="ip".
This approach allows us to leverage the strengths of both EC2 instances and ECS Fargate tasks while ensuring compatibility with the ALB.
Implementation
Here’s a step-by-step guide on implementing this solution using Pulumi in Python.
Step 1: Define the ALB with Two Target Groups
First, we must create an ALB with two target groups: one for EC2 instances and one for ECS Fargate tasks.
import pulumi
import pulumi_aws as aws
from pulumi import ResourceOptions, Output
from typing import Mapping, List
class ALBArgs:
"""The arguments necessary to construct an `ALB` resource"""
def __init__(
self,
base_tags: Mapping[str, str],
internal: bool,
subnets: List[str],
security_groups: List[str],
enable_deletion_protection: bool,
vpc_id: str,
webserver_id: str,
alb_listener: str,
):
self.internal = internal
self.subnets = subnets
self.security_groups = security_groups
self.enable_deletion_protection = enable_deletion_protection
self.vpc_id = vpc_id
self.webserver_id = webserver_id
self.alb_listener = alb_listener
self.base_tags = base_tags
class JCProjectALB(pulumi.ComponentResource):
def __init__(self, resource_name: str, args: ALBArgs, opts: ResourceOptions = None):
super().__init__("cority:aws:alb", resource_name, {}, opts)
self.alb_name = resource_name
self.internal = args.internal
self.subnets = args.subnets
self.security_groups = args.security_groups
self.enable_deletion_protection = args.enable_deletion_protection
self.vpc_id = args.vpc_id
self.webserver_id = args.webserver_id
self.alb_listener = args.alb_listener
self.base_tags = args.base_tags
self.alb = aws.lb.LoadBalancer(
self.alb_name,
internal=self.internal,
load_balancer_type="application",
security_groups=self.security_groups,
subnets=self.subnets,
enable_deletion_protection=self.enable_deletion_protection,
tags=self.base_tags,
opts=ResourceOptions(parent=self),
)
# Target group for EC2 instances
self.alb_target_group_instance = aws.lb.TargetGroup(
f"{self.alb_name[:20]}-tg-instance",
port=80,
protocol="HTTP",
target_type="instance",
vpc_id=self.vpc_id,
opts=ResourceOptions(parent=self.alb),
)
# Target group for Fargate tasks
self.alb_target_group_ip = aws.lb.TargetGroup(
f"{self.alb_name[:20]}-tg-ip",
port=80,
protocol="HTTP",
target_type="ip",
vpc_id=self.vpc_id,
opts=ResourceOptions(parent=self.alb),
)
self.alb_target_group_attachment = aws.lb.TargetGroupAttachment(
f"{self.alb_name[:10]}-tg-attachment",
target_id=self.webserver_id,
target_group_arn=self.alb_target_group_instance.arn,
port=80,
opts=ResourceOptions(parent=self.alb_target_group_instance),
)
self.listener = aws.lb.Listener(
f"{self.alb_name}-listener",
load_balancer_arn=self.alb.arn,
port=80,
default_actions=[
aws.lb.ListenerDefaultActionArgs(
type="forward",
target_group_arn=self.alb_target_group_ip.arn,
)
],
opts=ResourceOptions(parent=self.alb),
)
pulumi.export("alb_name", self.alb_name)
pulumi.export("listener_arn", self.listener.arn)
pulumi.export("targetgroup_instance_arn", self.alb_target_group_instance.arn)
pulumi.export("targetgroup_ip_arn", self.alb_target_group_ip.arn)
self.register_outputs(
{
"alb_name": self.alb_name,
"listener_arn": self.listener.arn,
"targetgroup_instance_arn": self.alb_target_group_instance.arn,
"targetgroup_ip_arn": self.alb_target_group_ip.arn,
}
)
Step 2: Define the ECS Service to Use the IP Target Group
Next, we must configure the ECS service to use the target group with target_type="ip".
import pulumi
import pulumi_aws as aws
from pulumi import ResourceOptions, Output
from typing import Mapping, List
import applicationloadbalancer
import compute
class ECSArgs:
"""The arguments necessary to construct an `ECS` resource"""
def __init__(
self,
base_tags: Mapping[str, str],
internal: bool,
subnets: List[str],
security_groups: List[str],
enable_deletion_protection: bool,
vpc_id: str,
container_name: str,
alb_listener: aws.lb.Listener,
alb_target_group_arn: str,
):
self.internal = internal
self.subnets = subnets
self.security_groups = security_groups
self.enable_deletion_protection = enable_deletion_protection
self.vpc_id = vpc_id
self.base_tags = base_tags
self.alb_target_group_arn = alb_target_group_arn
self.alb_listener = alb_listener
self.container_name = container_name
class JCProjectECS(pulumi.ComponentResource):
def __init__(self, resource_name: str, args: ECSArgs, opts: ResourceOptions = None):
super().__init__("cority:aws:ecs", resource_name, {}, opts)
cluster_name = resource_name
self.internal = args.internal
self.subnets = args.subnets
self.security_groups = args.security_groups
self.enable_deletion_protection = args.enable_deletion_protection
self.vpc_id = args.vpc_id
self.base_tags = args.base_tags
self.alb_target_group_arn = args.alb_target_group_arn
self.alb_listener = args.alb_listener
self.container_name = args.container_name
# Create an ECS cluster
self.cluster = aws.ecs.Cluster(cluster_name)
# Create an IAM role for task execution
task_execution_role = aws.iam.Role(
"taskExecutionRole",
assume_role_policy="""{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ecs-tasks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}""",
)
# Attach the required policies to the task execution role
task_execution_role_policy = aws.iam.RolePolicyAttachment(
"taskExecutionRolePolicy",
role=task_execution_role.name,
policy_arn="arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy",
)
# Create a Fargate Task Definition
self.task_definition = aws.ecs.TaskDefinition(
"task_definition",
family="my-fargate-task",
cpu="256",
memory="512",
network_mode="awsvpc",
requires_compatibilities=["FARGATE"],
execution_role_arn=task_execution_role.arn,
container_definitions=Output.all().apply(
lambda _: """[
{
"name": "investorportalcont",
"image": "nginx",
"cpu": 256,
"memory": 512,
"essential": true,
"portMappings": [
{
"containerPort": 80,
"hostPort": 80,
"protocol": "tcp"
}
]
}
]"""
),
)
# ECS Service connected to the existing ALB
self.service = aws.ecs.Service(
"ecs_service",
cluster=self.cluster.arn,
task_definition=self.task_definition.arn,
desired_count=1,
launch_type="FARGATE",
network_configuration=aws.ecs.ServiceNetworkConfigurationArgs(
subnets=args.subnets,
security_groups=args.security_groups,
),
load_balancers=[
aws.ecs.ServiceLoadBalancerArgs(
target_group_arn=args.alb_target_group_arn,
container_name=args.container_name,
container_port=80,
),
],
opts=ResourceOptions(depends_on=[args.alb_listener]),
)
pulumi.export("cluster_name", self.cluster.name)
pulumi.export("service_name", self.service.name)
self.register_outputs(
{
"cluster_name": self.cluster.name,
"service_name": self.service.name,
}
)
Step 3: Putting everything together main.py
We must now code main.py to pass through both alb target groups, one for the ECS and one for the EC2 instance; the actual connection for the EC2 instance happens within the ALB code as we point it to the `
target_group_arn=self.alb_target_group_ip.arn` line.
import pulumi
import pulumi_aws as aws
import os
import sys
#### This Section of Code will go and look for the PKG folder to then be able to import the modules
#### It also sets the PYTHONPATH which is needed to succesfully see the modules.
module_path = "../pkg"
if module_path not in sys.path:
sys.path.append(module_path)
# Try to import your custom module
try:
import compute ## This is just a module that we are testing for.
pulumi.export("module_import_status", "success")
except ImportError as e:
pulumi.export("module_import_status", "failed")
pulumi.export("import_error_message", str(e))
#### END OF Import checking
# Import custom modules
import compute
import applicationloadbalancer
import networking
import ecs
# Define the VPC ID (Replace with your actual VPC ID)
vpc_id = "vpc-0f0587443694534b4"
# Networking infrastructure
cnetworking = networking.ExistingVPC(vpc_id)
vpcsubnet_id = cnetworking.subnet_id
vpcsecgroup_id = cnetworking.security_group_id
# Convert Security Groups into Input of Strings
security_group_strings = [sg for sg in vpcsecgroup_id]
# Compute infrastructure
ccompute = compute.CreateEC2Instances(
"my-ec2-instance",
compute.EC2Args(
base_tags={"Project": "Investor Portal"},
internal=False,
ami_instance_size="t2.micro",
subnet_id=vpcsubnet_id[0],
security_groups=security_group_strings,
),
)
# Define ALB arguments and create ALB
alb_args = applicationloadbalancer.ALBArgs(
subnets=vpcsubnet_id,
security_groups=vpcsecgroup_id,
base_tags={"Project": "Investor Portal"},
internal=False,
enable_deletion_protection=False,
vpc_id=vpc_id,
webserver_id=ccompute.server_instance.id,
alb_listener="corityalblistener",
)
build_alb = applicationloadbalancer.JCProjectALB(
"investorpalb",
alb_args,
opts=pulumi.ResourceOptions(depends_on=[ccompute]),
)
# Export ALB status
pulumi.export("alb_status", build_alb)
# Define ECS arguments
ecs_args = ecs.ECSArgs(
base_tags={"Project": "Investor Portal"},
internal=False,
subnets=vpcsubnet_id,
security_groups=security_group_strings,
enable_deletion_protection=False,
vpc_id=vpc_id,
container_name="investorportalcont",
alb_target_group_arn=build_alb.alb_target_group_ip.arn,
alb_listener=build_alb.listener,
)
# Create ECS
ecs_cluster = ecs.JCProjectECS(
"my-ecs", ecs_args, opts=pulumi.ResourceOptions(depends_on=[build_alb.listener])
)
# Export ECS status
pulumi.export("ecs_cluster_name", ecs_cluster.cluster.name)
pulumi.export("ecs_service_name", ecs_cluster.service.name)
Conclusion
When working with cloud infrastructure and Pulumi, you sometimes have to think outside the box as different errors come flying at you. In this instance, I hope I have shown you how to get around the simple issue of putting two completely different resources onto the same ALB.
Happy coding,
Cloud Dude



