Terraform State
Remote Backends
# S3 backend (with DynamoDB locking)
terraform {
backend "s3" {
bucket = "my-terraform-state"
key = "production/vpc/terraform.tfstate"
region = "us-east-1"
encrypt = true
kms_key_id = "alias/terraform-state"
dynamodb_table = "terraform-lock"
# IAM role for cross-account
role_arn = "arn:aws:iam::123456789012:role/TerraformBackend"
}
}
# GCS backend
terraform {
backend "gcs" {
bucket = "my-terraform-state"
prefix = "terraform/state"
}
}
# Azure backend
terraform {
backend "azurerm" {
resource_group_name = "tfstate-rg"
storage_account_name = "tfstateacct"
container_name = "tfstate"
key = "production.tfstate"
}
}
# Terraform Cloud
terraform {
cloud {
organization = "my-org"
workspaces {
name = "production"
}
}
}
State Commands
# List all resources in state
terraform state list
# Show details of specific resource
terraform state show aws_instance.web
# Move resource (rename or move to module)
terraform state mv aws_instance.web aws_instance.app
terraform state mv aws_instance.web module.compute.aws_instance.web
# Remove resource from state (without destroying)
terraform state rm aws_instance.legacy
# Pull state to local file
terraform state pull > state.json
# Push local state to remote backend
terraform state push state.json
# Force unlock (if lock is stuck)
terraform force-unlock LOCK_ID
Importing Existing Resources
# CLI import (Terraform >= 1.5: use import block instead)
terraform import aws_instance.web i-1234567890abcdef0
terraform import aws_s3_bucket.data my-existing-bucket
terraform import aws_iam_role.deploy arn:aws:iam::123456789012:role/DeployRole
# Import block (Terraform >= 1.5, preferred)
import {
to = aws_instance.web
id = "i-1234567890abcdef0"
}
# Generate config for existing resource
terraform plan -generate-config-out=generated.tf
# Import in bulk with for_each
import {
for_each = {
web = "i-1111111111111111"
api = "i-2222222222222222"
db = "i-3333333333333333"
}
to = aws_instance.servers[each.key]
id = each.value
}
Workspaces
# Create and switch workspace
terraform workspace new staging
terraform workspace new production
terraform workspace list
terraform workspace select production
# Use workspace in configuration
resource "aws_s3_bucket" "app" {
bucket = "my-app-${terraform.workspace}"
}
locals {
config = {
dev = { instance_type = "t3.micro", count = 1 }
staging = { instance_type = "t3.small", count = 2 }
production = { instance_type = "t3.medium", count = 5 }
}
env_config = local.config[terraform.workspace]
}
# Note: For complex multi-env setups, prefer separate state files
# (separate S3 keys) over workspaces
Sensitive Values
# Mark variable as sensitive
variable "db_password" {
type = string
sensitive = true
}
# Mark output as sensitive
output "connection_string" {
value = "postgresql://admin:${var.db_password}@${aws_db_instance.main.endpoint}/app"
sensitive = true
}
# sensitive() function
locals {
db_url = sensitive("postgresql://admin:${var.db_password}@db.example.com/app")
}
# nonsensitive() - use carefully
output "db_host" {
value = nonsensitive(aws_db_instance.main.address)
}
# Note: sensitive values are STILL stored in tfstate
# Use remote state with encryption and restrict state access
Terraform Lifecycle & Refresh
# Refresh state (sync with real infrastructure)
terraform refresh # deprecated, use plan -refresh-only
terraform plan -refresh-only
terraform apply -refresh-only
# Plan/apply targeting specific resources
terraform plan -target=module.vpc
terraform apply -target=aws_instance.web
# Replace (force recreate)
terraform apply -replace=aws_instance.web
# Auto-approve (CI/CD)
terraform apply -auto-approve
# Destroy specific resource
terraform destroy -target=aws_instance.old