1. Prepare the Docker image locally and test locally before deploying to AWS#
- Dockerfile and src directory which has index.html file. The index.html can be just a line “Helloworld!”
FROM nginx:latest
# Copy your source files to /etc/nginx/html
COPY ./src /etc/nginx/html
# Start Nginx
CMD ["nginx", "-g", "daemon off;"]
docker build -t nginx:v1 .
- Run the nginx in docker locally
docker run -d -p 80:80 nginx:v1
2. Pick up one of IAM user(“suzurigo”) you want to use for this work. Set the right permission for this user. Here are the permission needed.#
- AmazonEC2ContainerRegistryFullAccess
- Some permissions needed later
"ecs:CreateCluster",
"ecs:UpdateCluster",
"ecs:DeleteCluster",
"ecs:DescribeClusters",
"ecs:CreateService",
"ecs:UpdateService",
"ecs:DeleteService",
"ecs:DescribeServices",
"ecs:TagResource",
"ecs:RegisterTaskDefinition",
"ecs:DescribeTaskDefinition",
"ecs:DeregisterTaskDefinition",
"ecs:ListTasks",
"ecs:DescribeTasks",
"route53:ListHostedZones",
"route53:GetHostedZone",
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:ListTagsForResource",
"route53:GetChange",
3. Register the Docker image in AWS Elastic Container Registry(ECR)#
- Create repository. This time use name “nginx”
##4. Loading the docker image to AWS ECR
- Login to ecr
aws ecr get-login-password --region ap-northeast-1 --profile suzurigo| docker login --username AWS --password-stdin <your aim-id>.dkr.ecr.ap-northeast-1.amazonaws.com
docker tag nginx:v1 <your aim-id>.dkr.ecr.ap-northeast-1.amazonaws.com/nginx:v1
docker push <your aim-id>.dkr.ecr.ap-northeast-1.amazonaws.com/nginx:v1
5. Create VPC#
- Create vpc with name “cat-vpc” at “192.168.10.0/24”
- Create 2 subnets for load balancer “cat-subnet-1”:“192.168.10.0/26” “cat-subnet-2”:“192.168.10.64/26”
- Define internet gateway attaches to vpc
- Define security group attaches to vpc
- Define route table
- Terraform for VPC
provider "aws" {
region = <Your region>
}
resource "aws_vpc" "vpc_l" {
cidr_block = "192.168.10.0/24"
instance_tenancy = "default"
tags = {
Name = var.vpc_name
}
}
resource "aws_subnet" "subnet_l_1" {
vpc_id = aws_vpc.vpc_l.id
cidr_block = "192.168.10.0/26"
availability_zone = <pick up a availability zone>
tags = {
Name = var.subnet_name_1
}
}
resource "aws_subnet" "subnet_l_2" {
vpc_id = aws_vpc.vpc_l.id
cidr_block = "192.168.10.64/26"
availability_zone = <pick up a availability zone>
tags = {
Name = var.subnet_name_2
}
}
resource "aws_internet_gateway" "igw_l" {
vpc_id = aws_vpc.vpc_l.id
tags = {
Name = var.internet_gateway_name
}
}
resource "aws_route_table" "rt_l" {
vpc_id = aws_vpc.vpc_l.id
route {
cidr_block = "192.168.10.0/24"
gateway_id = "local"
}
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.igw_l.id
}
tags = {
Name = var.route_table_name
}
}
resource "aws_route_table_association" "rta_l_1" {
subnet_id = aws_subnet.subnet_l_1.id
route_table_id = aws_route_table.rt_l.id
}
resource "aws_route_table_association" "rta_l_2" {
subnet_id = aws_subnet.subnet_l_2.id
route_table_id = aws_route_table.rt_l.id
}
resource "aws_security_group" "sg_l" {
description = "Allow traffic on port range 0-8888"
vpc_id = aws_vpc.vpc_l.id
ingress {
from_port = 0
to_port = 8888
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = var.security_group_name
}
}
# variables
variable "vpc_name" {
description = "Name of the VPC"
type = string
default = "cat-vpc"
}
variable "subnet_name_1" {
description = "Name of the first Subnet"
type = string
default = "cat-subnet-1"
}
variable "subnet_name_2" {
description = "Name of the second Subnet"
type = string
default = "cat-subnet-2"
}
variable "internet_gateway_name" {
description = "Name of the Internet Gateway"
type = string
default = "cat-igw"
}
variable "route_table_name" {
description = "Name of the Route Table"
type = string
default = "cat-rt"
}
variable "security_group_name" {
description = "Name of the Security Group"
type = string
default = "cat-sg"
}
6. Set up the Application LoadBalancer#
- load balancer
- load balancer target group
- load balancer listener
- Create A-Record for DNS name used to access from external through route53
resource "aws_lb" "main" {
name = "${var.app_name}-alb"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.main.id]
subnets = [data.aws_subnet.selected_1.id, data.aws_subnet.selected_2.id]
enable_deletion_protection = false
idle_timeout = 1800
tags = {
Name = "${var.app_name}-alb"
}
}
resource "aws_lb_target_group" "main" {
name = "${var.app_name}-tg"
port = local.lb_target_port
protocol = "HTTP"
vpc_id = data.aws_vpc.selected.id
target_type = "ip"
health_check {
interval = 30
path = "/"
timeout = 5
healthy_threshold = 2
unhealthy_threshold = 2
}
tags = {
Name = "${var.app_name}-tg"
}
}
resource "aws_lb_listener" "http" {
load_balancer_arn = aws_lb.main.arn
port = local.lb_listener_port
protocol = "HTTP"
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.main.arn
}
}
resource "aws_route53_record" "main" {
zone_id = data.aws_route53_zone.main.zone_id
name = "${var.app_name}.${var.domain_name}"
type = "A"
alias {
name = aws_lb.main.dns_name
zone_id = aws_lb.main.zone_id
evaluate_target_health = true
}
}
7. Create a S3 bucket to store static contents for nginx to serve#
- for example: my-s3-bucket
- find the endpoint of this bucket: http://my-s3bucket.s3-website-.amazonaws.com
8. Create the ECS Cluster/server/task#
provider "aws" {
region = <your region>
}
locals {
ecs_cluster_name = "${var.app_name}-cluster"
ecs_service_name = "${var.app_name}-service"
ecs_task_name = "${var.app_name}-task"
ecs_container = "${var.app_name}-con"
container_image = "your docker image"
cpu_architecture = "ARM64"
ecs_container_port = 80
ecs_container_host_port = 80
lb_listener_port = 80
lb_target_port = 80
}
data "aws_vpc" "selected" {
filter {
name = "tag:Name"
values = [var.vpc_name]
}
}
data "aws_subnet" "selected_1" {
filter {
name = "tag:Name"
values = [var.subnet_name_1]
}
filter {
name = "vpc-id"
values = [data.aws_vpc.selected.id]
}
}
data "aws_subnet" "selected_2" {
filter {
name = "tag:Name"
values = [var.subnet_name_2]
}
filter {
name = "vpc-id"
values = [data.aws_vpc.selected.id]
}
}
data "aws_route53_zone" "main" {
name = var.domain_name
}
resource "aws_ecs_cluster" "cluster" {
name = local.ecs_cluster_name
}
resource "aws_ecs_service" "service" {
name = local.ecs_service_name
cluster = aws_ecs_cluster.cluster.id
task_definition = aws_ecs_task_definition.task.arn
deployment_controller {
type = "ECS"
}
desired_count = 1
launch_type = "FARGATE"
load_balancer {
target_group_arn = aws_lb_target_group.main.arn
container_name = local.ecs_container
container_port = local.ecs_container_port
}
lifecycle {
ignore_changes = [desired_count, load_balancer]
}
network_configuration {
subnets = [data.aws_subnet.selected_1.id, data.aws_subnet.selected_2.id]
security_groups = [aws_security_group.main.id]
assign_public_ip = true
}
deployment_minimum_healthy_percent = 100
deployment_maximum_percent = 200
deployment_circuit_breaker {
enable = true
rollback = true
}
tags = {
Name = local.ecs_service_name
}
enable_ecs_managed_tags = true
depends_on = [aws_lb_listener.http]
}
resource "aws_ecs_task_definition" "task" {
family = local.ecs_task_name
execution_role_arn = "arn:aws:iam::<your id>:role/ecsTaskExecutionRole"
network_mode = "awsvpc"
cpu = "512"
memory = "1024"
requires_compatibilities = ["FARGATE"]
runtime_platform {
cpu_architecture = local.cpu_architecture
operating_system_family = "LINUX"
}
container_definitions = jsonencode([
{
name = local.ecs_container
image = local.container_image
cpu = 0
memory = 1024
essential = true
portMappings = [
{
name = "${local.ecs_container}-80-tcp"
containerPort = local.ecs_container_port
hostPort = local.ecs_container_host_port
protocol = "tcp"
appProtocol = "http"
}
]
environment = []
environmentFiles = []
mountPoints = []
volumesFrom = []
ulimits = []
systemControls = []
}
])
tags = {
Name = local.ecs_task_name
}
}
output "end_point" {
description = "End Point to access"
value = "http://${var.app_name}.${var.domain_name}"
}
9. Now the nginx server is live. You can access the endpoint in the output.#
10. In order to have nginx to be the reverse proxy of s3 bucket, need to customize the nginx configuration and rebuild the docker image.#
- Customize the nginx.conf
- a new dockerfile
FROM nginx:latest
COPY ./nginx.conf /etc/nginx/nginx.conf
CMD ["nginx", "-g", "daemon off;"]
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
server {
listen 80;
server_name <contents server name>;
location /{
proxy_pass <your website behind nginx>;
proxy_set_header <Host contents server name>;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
}
11. with step 10, you can manage the web contents in s3 without changing anything of the container running nginx.#
12. Take a break and enjoy the results!#