Deploying Full-Stack Applications to AWS and GCP: A Complete DevOps Guide
· 9 min read
Cloud deployment is essential for modern applications. Whether you choose AWS or Google Cloud Platform, understanding deployment strategies, infrastructure as code, and CI/CD pipelines is crucial for successful production deployments.
AWS Deployment Architecture
Setting Up Your AWS Infrastructure
Let's start with a typical full-stack application infrastructure on AWS:
# infrastructure/cloudformation/main.yaml
AWSTemplateFormatVersion: '2010-09-09'
Description: 'Full-stack application infrastructure'
Parameters:
Environment:
Type: String
Default: 'dev'
AllowedValues: ['dev', 'staging', 'prod']
ApplicationName:
Type: String
Default: 'my-app'
Resources:
# VPC Configuration
VPC:
Type: AWS::EC2::VPC
Properties:
CidrBlock: 10.0.0.0/16
EnableDnsHostnames: true
EnableDnsSupport: true
Tags:
- Key: Name
Value: !Sub '${ApplicationName}-${Environment}-vpc'
# Public Subnets
PublicSubnet1:
Type: AWS::EC2::Subnet
Properties:
VpcId: !Ref VPC
CidrBlock: 10.0.1.0/24
AvailabilityZone: !Select [0, !GetAZs '']
MapPublicIpOnLaunch: true
PublicSubnet2:
Type: AWS::EC2::Subnet
Properties:
VpcId: !Ref VPC
CidrBlock: 10.0.2.0/24
AvailabilityZone: !Select [1, !GetAZs '']
MapPublicIpOnLaunch: true
# Private Subnets
PrivateSubnet1:
Type: AWS::EC2::Subnet
Properties:
VpcId: !Ref VPC
CidrBlock: 10.0.3.0/24
AvailabilityZone: !Select [0, !GetAZs '']
PrivateSubnet2:
Type: AWS::EC2::Subnet
Properties:
VpcId: !Ref VPC
CidrBlock: 10.0.4.0/24
AvailabilityZone: !Select [1, !GetAZs '']
# Internet Gateway
InternetGateway:
Type: AWS::EC2::InternetGateway
InternetGatewayAttachment:
Type: AWS::EC2::VPCGatewayAttachment
Properties:
InternetGatewayId: !Ref InternetGateway
VpcId: !Ref VPC
# Application Load Balancer
ApplicationLoadBalancer:
Type: AWS::ElasticLoadBalancingV2::LoadBalancer
Properties:
Name: !Sub '${ApplicationName}-${Environment}-alb'
Scheme: internet-facing
Type: application
Subnets:
- !Ref PublicSubnet1
- !Ref PublicSubnet2
SecurityGroups:
- !Ref ALBSecurityGroup
# ECS Cluster
ECSCluster:
Type: AWS::ECS::Cluster
Properties:
ClusterName: !Sub '${ApplicationName}-${Environment}'
CapacityProviders:
- FARGATE
- FARGATE_SPOT
# RDS Database
DatabaseSubnetGroup:
Type: AWS::RDS::DBSubnetGroup
Properties:
DBSubnetGroupDescription: Subnet group for RDS database
SubnetIds:
- !Ref PrivateSubnet1
- !Ref PrivateSubnet2
DatabaseInstance:
Type: AWS::RDS::DBInstance
Properties:
DBInstanceIdentifier: !Sub '${ApplicationName}-${Environment}-db'
DBInstanceClass: db.t3.micro
Engine: postgres
EngineVersion: '13.7'
AllocatedStorage: 20
DBName: !Sub '${ApplicationName}db'
MasterUsername: dbadmin
MasterUserPassword: !Sub '{{resolve:secretsmanager:${DatabaseSecret}:SecretString:password}}'
VPCSecurityGroups:
- !Ref DatabaseSecurityGroup
DBSubnetGroupName: !Ref DatabaseSubnetGroup
BackupRetentionPeriod: 7
MultiAZ: !If [IsProd, true, false]
# Security Groups
ALBSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Security group for Application Load Balancer
VpcId: !Ref VPC
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: 80
ToPort: 80
CidrIp: 0.0.0.0/0
- IpProtocol: tcp
FromPort: 443
ToPort: 443
CidrIp: 0.0.0.0/0
ECSSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Security group for ECS tasks
VpcId: !Ref VPC
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: 3000
ToPort: 3000
SourceSecurityGroupId: !Ref ALBSecurityGroup
DatabaseSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Security group for RDS database
VpcId: !Ref VPC
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: 5432
ToPort: 5432
SourceSecurityGroupId: !Ref ECSSecurityGroup
Conditions:
IsProd: !Equals [!Ref Environment, 'prod']
Outputs:
VPCId:
Description: VPC ID
Value: !Ref VPC
Export:
Name: !Sub '${ApplicationName}-${Environment}-VPC'
ALBArn:
Description: Application Load Balancer ARN
Value: !Ref ApplicationLoadBalancer
Export:
Name: !Sub '${ApplicationName}-${Environment}-ALB'
ECS Task Definition and Service
# infrastructure/ecs/task-definition.yaml
family: my-app-api
networkMode: awsvpc
requiresCompatibilities:
- FARGATE
cpu: '256'
memory: '512'
executionRoleArn: !Sub 'arn:aws:iam::${AWS::AccountId}:role/ecsTaskExecutionRole'
taskRoleArn: !Sub 'arn:aws:iam::${AWS::AccountId}:role/ecsTaskRole'
containerDefinitions:
- name: api
image: !Sub '${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/my-app-api:latest'
portMappings:
- containerPort: 3000
protocol: tcp
environment:
- name: NODE_ENV
value: production
- name: PORT
value: '3000'
secrets:
- name: DATABASE_URL
valueFrom: !Sub 'arn:aws:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:my-app-db-credentials'
- name: JWT_SECRET
valueFrom: !Sub 'arn:aws:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:my-app-jwt-secret'
logConfiguration:
logDriver: awslogs
options:
awslogs-group: !Sub '/ecs/my-app-${Environment}'
awslogs-region: !Ref 'AWS::Region'
awslogs-stream-prefix: ecs
healthCheck:
command:
- CMD-SHELL
- curl -f http://localhost:3000/health || exit 1
interval: 30
timeout: 5
retries: 3
startPeriod: 60
Terraform Alternative
# infrastructure/terraform/main.tf
terraform {
required_version = ">= 1.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
}
backend "s3" {
bucket = "my-app-terraform-state"
key = "infrastructure/terraform.tfstate"
region = "us-west-2"
}
}
provider "aws" {
region = var.aws_region
}
# Variables
variable "environment" {
description = "Environment name"
type = string
default = "dev"
}
variable "application_name" {
description = "Application name"
type = string
default = "my-app"
}
variable "aws_region" {
description = "AWS region"
type = string
default = "us-west-2"
}
# Data sources
data "aws_availability_zones" "available" {
state = "available"
}
# VPC Module
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
name = "${var.application_name}-${var.environment}"
cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 2)
private_subnets = ["10.0.1.0/24", "10.0.2.0/24"]
public_subnets = ["10.0.101.0/24", "10.0.102.0/24"]
enable_nat_gateway = true
enable_vpn_gateway = false
tags = {
Environment = var.environment
Terraform = "true"
}
}
# ECS Cluster
resource "aws_ecs_cluster" "main" {
name = "${var.application_name}-${var.environment}"
capacity_providers = ["FARGATE", "FARGATE_SPOT"]
default_capacity_provider_strategy {
capacity_provider = "FARGATE"
weight = 1
}
setting {
name = "containerInsights"
value = "enabled"
}
tags = {
Environment = var.environment
}
}
# Application Load Balancer
resource "aws_lb" "main" {
name = "${var.application_name}-${var.environment}-alb"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.alb.id]
subnets = module.vpc.public_subnets
enable_deletion_protection = false
tags = {
Environment = var.environment
}
}
# Security Groups
resource "aws_security_group" "alb" {
name_prefix = "${var.application_name}-${var.environment}-alb"
vpc_id = module.vpc.vpc_id
ingress {
protocol = "tcp"
from_port = 80
to_port = 80
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
protocol = "tcp"
from_port = 443
to_port = 443
cidr_blocks = ["0.0.0.0/0"]
}
egress {
protocol = "-1"
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Environment = var.environment
}
}
# RDS Database
module "db" {
source = "terraform-aws-modules/rds/aws"
identifier = "${var.application_name}-${var.environment}-db"
engine = "postgres"
engine_version = "13.7"
instance_class = "db.t3.micro"
allocated_storage = 20
db_name = "${var.application_name}db"
username = "dbadmin"
password = random_password.db_password.result
vpc_security_group_ids = [aws_security_group.rds.id]
db_subnet_group_name = module.vpc.database_subnet_group
backup_retention_period = 7
backup_window = "03:00-04:00"
maintenance_window = "Mon:04:00-Mon:05:00"
skip_final_snapshot = true
tags = {
Environment = var.environment
}
}
# Random password for database
resource "random_password" "db_password" {
length = 16
special = true
}
Google Cloud Platform Deployment
GCP Infrastructure with Cloud Run
# infrastructure/gcp/cloudbuild.yaml
steps:
# Build the container image
- name: 'gcr.io/cloud-builders/docker'
args:
- 'build'
- '-t'
- 'gcr.io/$PROJECT_ID/my-app-api:$COMMIT_SHA'
- '.'
dir: 'backend'
# Push the container image to Container Registry
- name: 'gcr.io/cloud-builders/docker'
args:
- 'push'
- 'gcr.io/$PROJECT_ID/my-app-api:$COMMIT_SHA'
# Deploy to Cloud Run
- name: 'gcr.io/google.com/cloudsdktool/cloud-sdk'
entrypoint: 'gcloud'
args:
- 'run'
- 'deploy'
- 'my-app-api'
- '--image=gcr.io/$PROJECT_ID/my-app-api:$COMMIT_SHA'
- '--region=us-central1'
- '--platform=managed'
- '--allow-unauthenticated'
- '--set-env-vars=NODE_ENV=production'
- '--set-secrets=DATABASE_URL=DATABASE_URL:latest'
- '--memory=512Mi'
- '--cpu=1'
- '--max-instances=10'
- '--min-instances=1'
# Deploy frontend to Firebase Hosting
- name: 'node:16'
entrypoint: 'npm'
args: ['ci']
dir: 'frontend'
- name: 'node:16'
entrypoint: 'npm'
args: ['run', 'build']
dir: 'frontend'
env:
- 'REACT_APP_API_URL=https://my-app-api-xxxxx-uc.a.run.app'
- name: 'gcr.io/$PROJECT_ID/firebase'
args:
- 'deploy'
- '--only=hosting'
dir: 'frontend'
timeout: 1200s
options:
logging: CLOUD_LOGGING_ONLY
Terraform for GCP
# infrastructure/gcp-terraform/main.tf
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "~> 4.0"
}
}
}
provider "google" {
project = var.project_id
region = var.region
}
variable "project_id" {
description = "GCP Project ID"
type = string
}
variable "region" {
description = "GCP region"
type = string
default = "us-central1"
}
variable "environment" {
description = "Environment name"
type = string
default = "dev"
}
# Enable required APIs
resource "google_project_service" "services" {
for_each = toset([
"run.googleapis.com",
"cloudbuild.googleapis.com",
"secretmanager.googleapis.com",
"sql-component.googleapis.com",
"sqladmin.googleapis.com",
"cloudresourcemanager.googleapis.com"
])
project = var.project_id
service = each.value
disable_dependent_services = true
}
# Cloud SQL Database
resource "google_sql_database_instance" "main" {
name = "my-app-${var.environment}-db"
database_version = "POSTGRES_13"
region = var.region
settings {
tier = "db-f1-micro"
ip_configuration {
ipv4_enabled = false
private_network = google_compute_network.vpc.self_link
}
backup_configuration {
enabled = true
start_time = "03:00"
point_in_time_recovery_enabled = true
}
database_flags {
name = "log_checkpoints"
value = "on"
}
}
deletion_protection = false
depends_on = [
google_service_networking_connection.private_vpc_connection
]
}
# VPC Network
resource "google_compute_network" "vpc" {
name = "my-app-${var.environment}-vpc"
auto_create_subnetworks = false
}
# Private services access
resource "google_compute_global_address" "private_ip_address" {
name = "private-ip-address"
purpose = "VPC_PEERING"
address_type = "INTERNAL"
prefix_length = 16
network = google_compute_network.vpc.id
}
resource "google_service_networking_connection" "private_vpc_connection" {
network = google_compute_network.vpc.id
service = "servicenetworking.googleapis.com"
reserved_peering_ranges = [google_compute_global_address.private_ip_address.name]
}
# Cloud Run Service
resource "google_cloud_run_service" "api" {
name = "my-app-api"
location = var.region
template {
spec {
containers {
image = "gcr.io/${var.project_id}/my-app-api:latest"
env {
name = "NODE_ENV"
value = "production"
}
env {
name = "DATABASE_URL"
value_from {
secret_key_ref {
name = google_secret_manager_secret.database_url.secret_id
key = "latest"
}
}
}
resources {
limits = {
cpu = "1000m"
memory = "512Mi"
}
}
ports {
container_port = 3000
}
}
container_concurrency = 80
timeout_seconds = 300
}
metadata {
annotations = {
"autoscaling.knative.dev/maxScale" = "10"
"autoscaling.knative.dev/minScale" = "1"
"run.googleapis.com/vpc-access-connector" = google_vpc_access_connector.connector.name
}
}
}
traffic {
percent = 100
latest_revision = true
}
depends_on = [google_project_service.services]
}
# VPC Access Connector for Cloud Run
resource "google_vpc_access_connector" "connector" {
name = "my-app-connector"
region = var.region
ip_cidr_range = "10.8.0.0/28"
network = google_compute_network.vpc.name
}
# Secret Manager
resource "google_secret_manager_secret" "database_url" {
secret_id = "DATABASE_URL"
replication {
automatic = true
}
}
# Cloud Build Trigger
resource "google_cloudbuild_trigger" "main" {
name = "my-app-${var.environment}-trigger"
github {
owner = "your-github-username"
name = "my-app"
push {
branch = var.environment == "prod" ? "main" : "develop"
}
}
filename = "infrastructure/gcp/cloudbuild.yaml"
substitutions = {
_ENVIRONMENT = var.environment
}
}
CI/CD Pipelines
GitHub Actions for AWS
# .github/workflows/deploy-aws.yml
name: Deploy to AWS
on:
push:
branches: [main, develop]
pull_request:
branches: [main]
env:
AWS_REGION: us-west-2
ECR_REPOSITORY: my-app-api
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup Node.js
uses: actions/setup-node@v3
with:
node-version: '18'
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Run tests
run: npm test
- name: Run linting
run: npm run lint
- name: Type checking
run: npm run type-check
build-and-deploy:
needs: test
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/develop'
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v1
- name: Build, tag, and push image to Amazon ECR
id: build-image
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
IMAGE_TAG: ${{ github.sha }}
run: |
docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
echo "image=$ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG" >> $GITHUB_OUTPUT
- name: Deploy to ECS
env:
IMAGE_URI: ${{ steps.build-image.outputs.image }}
run: |
# Update ECS service with new image
aws ecs update-service \
--cluster my-app-${{ github.ref == 'refs/heads/main' && 'prod' || 'dev' }} \
--service my-app-api \
--force-new-deployment
- name: Deploy frontend to S3
run: |
cd frontend
npm ci
npm run build
aws s3 sync build/ s3://my-app-frontend-${{ github.ref == 'refs/heads/main' && 'prod' || 'dev' }} --delete
aws cloudfront create-invalidation --distribution-id ${{ secrets.CLOUDFRONT_DISTRIBUTION_ID }} --paths "/*"
GitLab CI for Multi-Cloud
# .gitlab-ci.yml
stages:
- test
- build
- deploy
variables:
DOCKER_DRIVER: overlay2
DOCKER_TLS_CERTDIR: "/certs"
before_script:
- echo "Starting CI/CD pipeline..."
test:
stage: test
image: node:18-alpine
cache:
paths:
- node_modules/
script:
- npm ci
- npm run lint
- npm run test:coverage
- npm run type-check
coverage: '/Statements\s*:\s*([^%]+)/'
artifacts:
reports:
coverage_report:
coverage_format: cobertura
path: coverage/cobertura-coverage.xml
build:
stage: build
image: docker:latest
services:
- docker:dind
only:
- main
- develop
script:
- echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER --password-stdin $CI_REGISTRY
- docker build -t $CI_REGISTRY_IMAGE/api:$CI_COMMIT_SHA .
- docker push $CI_REGISTRY_IMAGE/api:$CI_COMMIT_SHA
deploy_aws:
stage: deploy
image: amazon/aws-cli:latest
only:
- main
- develop
before_script:
- yum install -y jq
- aws configure set region $AWS_DEFAULT_REGION
script:
- |
ENVIRONMENT=$([ "$CI_COMMIT_REF_NAME" = "main" ] && echo "prod" || echo "dev")
# Update ECS task definition
TASK_DEFINITION=$(aws ecs describe-task-definition --task-definition my-app-api-$ENVIRONMENT --query taskDefinition)
NEW_TASK_DEFINITION=$(echo $TASK_DEFINITION | jq --arg IMAGE "$CI_REGISTRY_IMAGE/api:$CI_COMMIT_SHA" '.containerDefinitions[0].image = $IMAGE | del(.taskDefinitionArn) | del(.revision) | del(.status) | del(.requiresAttributes) | del(.placementConstraints) | del(.compatibilities) | del(.registeredAt) | del(.registeredBy)')
# Register new task definition
aws ecs register-task-definition --cli-input-json "$NEW_TASK_DEFINITION"
# Update service
aws ecs update-service --cluster my-app-$ENVIRONMENT --service my-app-api --task-definition my-app-api-$ENVIRONMENT
deploy_gcp:
stage: deploy
image: google/cloud-sdk:alpine
only:
- main
- develop
script:
- echo $GCP_SERVICE_KEY | base64 -d > gcp-key.json
- gcloud auth activate-service-account --key-file gcp-key.json
- gcloud config set project $GCP_PROJECT_ID
- |
ENVIRONMENT=$([ "$CI_COMMIT_REF_NAME" = "main" ] && echo "prod" || echo "dev")
# Deploy to Cloud Run
gcloud run deploy my-app-api-$ENVIRONMENT \
--image $CI_REGISTRY_IMAGE/api:$CI_COMMIT_SHA \
--platform managed \
--region us-central1 \
--allow-unauthenticated \
--memory 512Mi \
--cpu 1
Monitoring and Observability
AWS CloudWatch Setup
// monitoring/aws-cloudwatch.ts
import AWS from 'aws-sdk';
const cloudwatch = new AWS.CloudWatch({ region: process.env.AWS_REGION });
export class CloudWatchLogger {
private logGroupName: string;
constructor(logGroupName: string) {
this.logGroupName = logGroupName;
}
async putMetric(metricName: string, value: number, unit: string = 'Count') {
const params = {
Namespace: 'MyApp/Performance',
MetricData: [
{
MetricName: metricName,
Value: value,
Unit: unit,
Timestamp: new Date()
}
]
};
try {
await cloudwatch.putMetricData(params).promise();
} catch (error) {
console.error('Error putting metric to CloudWatch:', error);
}
}
async createAlarm(alarmName: string, metricName: string, threshold: number) {
const params = {
AlarmName: alarmName,
ComparisonOperator: 'GreaterThanThreshold',
EvaluationPeriods: 2,
MetricName: metricName,
Namespace: 'MyApp/Performance',
Period: 300,
Statistic: 'Average',
Threshold: threshold,
ActionsEnabled: true,
AlarmActions: [process.env.SNS_TOPIC_ARN!],
AlarmDescription: `Alarm for ${metricName}`,
Unit: 'Count'
};
try {
await cloudwatch.putMetricAlarm(params).promise();
console.log(`Alarm ${alarmName} created successfully`);
} catch (error) {
console.error('Error creating alarm:', error);
}
}
}
// Usage in Express middleware
export function metricsMiddleware(req: any, res: any, next: any) {
const start = Date.now();
res.on('finish', () => {
const duration = Date.now() - start;
const logger = new CloudWatchLogger('/aws/ecs/my-app');
logger.putMetric('ResponseTime', duration, 'Milliseconds');
logger.putMetric('RequestCount', 1);
if (res.statusCode >= 400) {
logger.putMetric('ErrorRate', 1);
}
});
next();
}
Google Cloud Monitoring
// monitoring/gcp-monitoring.ts
import { Monitoring } from '@google-cloud/monitoring';
const monitoring = new Monitoring.MetricServiceClient();
export class GCPMonitoring {
private projectId: string;
constructor(projectId: string) {
this.projectId = projectId;
}
async createCustomMetric(metricType: string, value: number) {
const projectPath = monitoring.projectPath(this.projectId);
const timeSeriesData = {
metric: {
type: `custom.googleapis.com/${metricType}`,
},
resource: {
type: 'gce_instance',
labels: {
instance_id: 'my-instance',
zone: 'us-central1-a',
},
},
points: [
{
interval: {
endTime: {
seconds: Date.now() / 1000,
},
},
value: {
doubleValue: value,
},
},
],
};
const request = {
name: projectPath,
timeSeries: [timeSeriesData],
};
try {
await monitoring.createTimeSeries(request);
console.log('Custom metric created successfully');
} catch (error) {
console.error('Error creating custom metric:', error);
}
}
async createAlertPolicy(displayName: string, metricType: string, threshold: number) {
const projectPath = monitoring.projectPath(this.projectId);
const alertPolicy = {
displayName,
conditions: [
{
displayName: `${metricType} condition`,
conditionThreshold: {
filter: `resource.type="gce_instance" AND metric.type="custom.googleapis.com/${metricType}"`,
comparison: 'COMPARISON_GREATER_THAN',
thresholdValue: threshold,
duration: {
seconds: 300,
},
},
},
],
notificationChannels: [process.env.GCP_NOTIFICATION_CHANNEL!],
enabled: true,
};
try {
const [policy] = await monitoring.createAlertPolicy({
name: projectPath,
alertPolicy,
});
console.log('Alert policy created:', policy.name);
} catch (error) {
console.error('Error creating alert policy:', error);
}
}
}
Security Best Practices
Infrastructure Security
# security/aws-security-config.yaml
SecurityGroups:
DatabaseSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Database security group
VpcId: !Ref VPC
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: 5432
ToPort: 5432
SourceSecurityGroupId: !Ref ApplicationSecurityGroup
SecurityGroupEgress: []
ApplicationSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Application security group
VpcId: !Ref VPC
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: 3000
ToPort: 3000
SourceSecurityGroupId: !Ref LoadBalancerSecurityGroup
SecurityGroupEgress:
- IpProtocol: tcp
FromPort: 443
ToPort: 443
CidrIp: 0.0.0.0/0
- IpProtocol: tcp
FromPort: 5432
ToPort: 5432
DestinationSecurityGroupId: !Ref DatabaseSecurityGroup
# WAF Configuration
WebACL:
Type: AWS::WAFv2::WebACL
Properties:
Name: !Sub '${ApplicationName}-${Environment}-waf'
Scope: REGIONAL
DefaultAction:
Allow: {}
Rules:
- Name: RateLimitRule
Priority: 1
Statement:
RateBasedStatement:
Limit: 2000
AggregateKeyType: IP
Action:
Block: {}
VisibilityConfig:
SampledRequestsEnabled: true
CloudWatchMetricsEnabled: true
MetricName: RateLimitRule
- Name: SQLInjectionRule
Priority: 2
Statement:
ManagedRuleGroupStatement:
VendorName: AWS
Name: AWSManagedRulesSQLiRuleSet
OverrideAction:
None: {}
VisibilityConfig:
SampledRequestsEnabled: true
CloudWatchMetricsEnabled: true
MetricName: SQLInjectionRule
Conclusion
Successful cloud deployment requires:
- Infrastructure as Code: Use CloudFormation, Terraform, or Pulumi for reproducible infrastructure
- Container Orchestration: Leverage ECS, Cloud Run, or Kubernetes for scalable deployments
- CI/CD Pipelines: Automate testing, building, and deployment processes
- Security: Implement proper network security, IAM policies, and secrets management
- Monitoring: Set up comprehensive logging, metrics, and alerting
- Cost Optimization: Use appropriate instance sizes, auto-scaling, and reserved instances
These patterns and practices will help you deploy robust, scalable applications to any cloud platform.
Ready for more? Next, I'll explore Kubernetes deployment strategies and service mesh patterns!
