tf console # access interactive command-line console to experiment with expressions
Terraform Workspace
use different state files for each workspace
tf workspace show # show current workspace
tf workspace list # list all available workspaces
tf workspace new dev # create and switch to new workspace named dev
tf workspace new prod # create a new workspace named prod
tf workspace selectdev# switch to the dev workspace
tf validate # validate the configuration for syntax validity
Terraform Format
tf fmt # format the configuration files to standardize style
Terraform Upgrade
tf 0.13upgrade .# upgrade to major release v0.13
Terraform Init
tf init # initialize terraform config files
tf init -upgrade # upgrade to the latest provider
Envrionment Variable
setx TF_VAR_instancetype m5.large # set a variable via environment variable (Windows)export TF_VAR_instancetype="t2.nano"# set a variable via environment variable (Linux)
Terraform Get
tf get # install and update modules
Terraform Plan
tf plan # pre-deployment plan
tf plan -out=newplan # save the plan to a file
tf plan -var="instancetype=t2.small"# explicitly define variable value
tf plan -var-file="custom.tfvars"# use custom tfvars file name
tf plan -target aws_security_group.sg_allow_ssh # detect changes in a specific resource
tf plan -refresh=false # skip the refresh of all resources in the configuration
tf plan -destroy # plan a destroy without committing
Terraform Apply
tf apply # prompt before creating resources described in tf
tf apply --auto-approve # create resources without prompt
tf apply "newplan"# apply plan from plan file
Terraform Destroy
tf destroy # destroy all
tf destroy -target aws_instace.my_instance # only destroy specific resource
Terraform Taint
tf taint aws_instance.myec2 # mark ec2 instance to destroy and recreate on next apply
Terraform State
tf show -json | jq # output the state in json
tf state show aws_eip.myeip # inspect the state of an elastic ip resource
tf state list # list the resources in the state file
tf refresh # update the state to match .tf configs
tf state mv aws_instance.my_webapp aws_instance.my_ec2 # move ec2 instance within state without destroying and recreating
tf state pull # manually download and output the remote state
tf state push # manually upload a local state file to remote loc
tf state rm aws_instance.my_ec2 # removes ec2 instance from state (tf no longer aware)
Terraform Graph
tf graph # visual dependency graph
Terraform Import
tf import azurerm_storage.account.storage_account # import the azure storage account specified in tf
tf import aws_instance.myec2 i-041886ebb7e97bd20 # import ec2 instance with instance ID
tf import module.vm.azurerm_linux_virtual_machine vm1 # import azure vm named "vm1" into module "vm"
Terraform Output
tf output # output the values within the output configuration
tf output iam_arm # output the value iam_arn specified within the output configuration
HEREDOC
# input stream literal and redirect to command# insert script from source for azure vm extensionextension {
name="${var.server_name}-extension"publisher="Microsoft.Compute"type="CustomScriptExtension"type_handler_version="1.10"settings=<<SETTINGS { "fileUris" : ["https://raw.githubusercontent.com/eltimmo/learning/master/azureInstallWebServer.ps1"], "commandToExecute" : "start powershell -ExecutionPolicy Unrestricted -File azureInstallWebServer.ps1" } SETTINGS
}
Count and Count Index
/*if the environment variable is equal to "prod"then create 0(none), else create 1*/count=var.environment=="production"?0:1/*if the environment variable is equal to "prod"then set allocation method to "static", else set to "Dynamic"*/allocation_method=var.environment=="prod"?"static":"Dynamic"
# main.tf# create 5 users, appending the count index (0-4) for eachresource"aws_iam_user""iam_user" {
name="ec2user.${count.index}"count=5path="/system/"
}
# main.tf# iterate through the index of variable namesvariable"ec2_names" {
type=listdefault=["dev-ec2user", "stage-ec2user", "prod-ec2user"]
}
resource"aws_iam_user""iam_user" {
name=var.ec2_names[count.index]
count=3path="/system/"
}
# main.tf# create a dev ec2 instance or prod ec2 instancevariable"dev_env" {}
resource"aws_instance""instance_one" {
ami="ami082c5a44755e0e6f"instance_type="t2.micro"count=var.dev_env==true?1:0
}
resource"aws_instance""instance_one" {
ami="ami082c5a44755e0e6f"instance_type="t2.large"count=var.dev_env==false?1:0
}
Multiple Resources with Multiple Regions/Accounts
# main.tf# use alias for multiple providers blocksprovider"aws" {
region="us-east-1"version=">=2.8"
}
provider"aws" {
alias="mumbai"region="ap-south-1"
}
resource"aws_eip""myeip" {
vpc="true"
}
resource"aws_eip""myeip01" {
vpc="true"provider=aws.mumbai
}
# main.tf# use different creds from ~/.aws/credentialsprovider"aws" {
region="us-east-1"version=">=2.8"
}
provider"aws" {
alias="tfadmin2cred"region="ap-south-1"profile="tfadmin2"
}
resource"aws_eip""myeip" {
vpc="true"
}
resource"aws_eip""myeip01" {
vpc="true"provider=aws.tfadmin2cred
}
Logging
export TF_LOG=TRACE # enable trace loggingexport TF_LOG_PATH="terraform.txt"# set log path to output to a file
Terraform Block
# main.tf# must use terraform v0.11 or earlier# must use aws provider in the v2.0 rangeterraform {
required_version="< 0.11"required_providers {
aws="~> 2.0"
}
}
provider"aws" {
region="ap-southeast-1"access_key="YOUR-KEY"secret_key="YOUR-KEY"
}
resource"aws_instance""myec2" {
ami="ami-0b1e534a4ff9019e0"instance_type="t2.micro"
}
Comments
/*resource "digitalocean_droplet" "my_droplet" { image = "ubuntu-18-04-x64" name = "web-1" region = "nyc1" size = "s-1vcpu-1gb"}*/
Data Types
# variables.tfvariable"data-type-example" {
type=stringtype=list# e.g. ["us-east-1a", "us-west-2b"]type=map# e.g. {name = "Chad", age = 34}type=number
}
# main.tfvariable"region" {
default="us-east-1"
}
variable"ami" {
type=mapdefault={
"us-east-1"="ami-0323c3dd2da7fb37d""us-west-2"="ami-0d6621c01e8c2de2c""ap-south-1"="ami-0470e33cd681b2476"
}
}
variable"tags" {
type=listdefault=["firstec2","secondec2"]
}
# use the file function to use the public key from id_rsa.pub in the module pathresource"aws_key_pair""loginkey" {
key_name="login-key"public_key=file("${path.module}/id_rsa.pub")
}
# use the lookup function to insert ami ID# use the element function to interate through tags for each instance countresource"aws_instance""app-dev" {
ami=lookup(var.ami,var.region)
instance_type="t2.micro"key_name=aws_key_pair.loginkey.key_namecount=2tags={
Name =element(var.tags,count.index)
}
}
Data Sources
# main.tf# retrieve the correct ami for the ap-southeast-1 regionprovider"aws" {
region="ap-southeast-1"
}
data"aws_ami""my_ami" {
most_recent=trueowners=["amazon"]
filter {
name="name"values=["amzn2-ami-hvm*"]
}
}
resource"aws_instance""instance-1" {
ami=data.aws_ami.my_ami.idinstance_type="t2.micro"
}
Dynamic Block
# main.tf# dynamically create multiple security group rules for each port# the iterator reassigns the element namevariable"sg_ports" {
type=list(number)
description="list of ingress ports"default=[8200, 8201,8300, 9200, 9500]
}
resource"aws_security_group""dynamicsg" {
name="dynamic-sg"description="Ingress for Vault"dynamic"ingress" {
for_each=var.sg_portsiterator=port
content {
from_port=port.valueto_port=port.valueprotocol="tcp"cidr_blocks=["0.0.0.0/0"]
}
}
dynamic"egress" {
for_each=var.sg_portscontent {
from_port=egress.valueto_port=egress.valueprotocol="tcp"cidr_blocks=["0.0.0.0/0"]
}
}
}
Splat Expression
# main.tf
# output the ARN for all 3 users
resource "aws_iam_user" "ec2_user" {
name = "iamuser.${count.index}"
count = 3
path = "/system/"
}
output "arns" {
value = aws_iam_user.ec2_user[*].arn
}
Provisioner
# main.tf# create ec2 instance and use remote-exec to ssh into the instance and install nginxresource"aws_instance""myec2" {
ami="ami-082b5a644766e0e6f"instance_type="t2.micro"key_name="kplabs-terraform"provisioner"remote-exec" {
inline=[
"sudo amazon-linux-extras install -y nginx1.12",
"sudo systemctl start nginx"
]
connection {
type="ssh"user="ec2-user"private_key=file("./kplabs-terraform.pem")
host=self.public_ip
}
}
}
# main.tf# create ec2 instance and use local exec to copy private ip to a fileresource"aws_instance""myec2" {
ami="ami-082b5a644766e0e6f"instance_type="t2.micro"provisioner"local-exec" {
command="echo ${aws_instance.myec2.private_ip} >> private_ips.txt"
}
}
# main.tf# create ec2 instance and use remote-exec to install nano via ssh# uninstall nano when tf destroy is run# if nano install fails, it marks resource as taintedresource"aws_instance""myec2" {
ami="ami-0b1e534a4ff9019e0"instance_type="t2.micro"key_name="ec2-key"vpc_security_group_ids=[aws_security_group.allow_ssh.id]
provisioner"remote-exec" {
inline=[
"sudo yum -y install nano"
]
}
provisioner"remote-exec" {
when=destroy
inline=[
"sudo yum -y remove nano"
]
}
connection {
type="ssh"user="ec2-user"private_key=file("./ec2-key.pem")
host=self.public_ip
}
}
# main.tf# create ec2 instance and use remote-exec to install nano via ssh# uninstall nano when tf destroy is run# if nano install fails, it will NOT mark the instance as taintedresource"aws_instance""myec2" {
ami="ami-0b1e534a4ff9019e0"instance_type="t2.micro"key_name="ec2-key"vpc_security_group_ids=[aws_security_group.allow_ssh.id]
provisioner"remote-exec" {
on_failure=continue
inline=[
"sudo yum -y install nano"
]
}
provisioner"remote-exec" {
when=destroy
inline=[
"sudo yum -y remove nano"
]
}
connection {
type="ssh"user="ec2-user"private_key=file("./ec2-key.pem")
host=self.public_ip
}
}
# main.tf# module source is a generic git repomodule"my_module" {
source="git::https://github.com/chadmcrowell/tmp-repo.git"
}
# main.tf# module source is a github repomodule"my_module" {
source="github.com/chadmcrowell/tmp-repo"
}
# main.tf# module source is a specific branch of a generaic github repomodule"my_module" {
source="git::https://github.com/chadmcrowell/tmp-repo.git?ref=develop"
}
# backend.tf# store the terraform.tfstate file in an s3 bucketterraform {
backend"s3" {
bucket="my-remote-state-s3-bucket"key="terraform.tfstate"region="us-east-1"access_key="MY_ACCESS_KEY"secret_key="MY_SECRET_KEY"
}
}
# backend.tf# store the terraform.tfstate file in an s3 bucket with STATE LOCK enabledterraform {
backend"s3" {
bucket="my-remote-state-s3-bucket"key="terraform.tfstate"region="us-east-1"access_key="MY_ACCESS_KEY"secret_key="MY_SECRET_KEY"dynamodb_table="s3-state-lock"
}
}