INDEX
########################################################### 2024-01-08 11:15 ########################################################### That Devops Guy... Getting started with EKS and Terraform https://www.youtube.com/watch?v=Qy2A_yJH5-o Scripts have to be maintained - Terraform creates modular templates of infra # Need to authenticate wuth amazon cli docker run -it --rm -v ${PWD}:/work -w /work --entrypoint /bin/sh amazon/aws-cli:2.0.43 yum install -u jq gzip vim tar git unzip wget # Misc useful tools in container # In amazon dashboard, "My security credentials" - make new key aws configure # Asks for credentials, region and default output (json) # Download terraform inside container (get url from website) curl -o /tmp/terraform.zip -LO URL; unzip /tmp/terraform.zip chmod +x terraform && mv terraform /usr/local/bin # Move to binaries folder Terraform lets you define infrastructure in a file - use variables to define important info # variables.tf variable "region" { default = "ap-southeast-2" description = "AWS region" } variable "cluster_name" { default = "getting-started-eks" } variable "map_accounts" { description = "Additional AWS account numbers to add to the aws-auth configmap" type = list(string) default = [ "77777777777777", "88888888888888", ] } variable "map_roles" { description = "Additional IAM roles to add to the aws-auth configmap" type = list(object({ rolearn = string username = string groups = list(string) })) default = [ { rolearn = "arn:aws:iam:66666666666666:role/role1" username = "role1" groups = ["system:masters"] }, ] } variable "map_users" { description = "Additional IAM users to add to the aws-auth configmap" type = list(object({ userarn = string username = string groups = list(string) })) default = [ { userarn = "arn:aws:iam:66666666666666:user/user1" username = "user1" groups = ["system:masters"] }, { userarn = "arn:aws:iam:66666666666666:user/user2" username = "user2" groups = ["system:masters"] }, ] } Make a "main.tf" file for all the config - can also have multiple files joined together # main.tf terraform { required_version = ">= 0.12.0" } provider "aws" { version = ">= 2.28.1" region = var.region } ## Define the data sources for different purposes data "aws_eks_cluster" "cluster" { name = module.eks.cluster } data "aws_eks_cluster_auth" "cluster" { name = module.eks.cluster } data "aws_eks_cluster" "cluster" { } ## Define the security rules for a node group resource "aws_security_group" "worker_group_mgmt_one" { # A specific worker group name_prefix = "worker_group_mgmt_one" vpc_id = module.vpc.vpc_id # Tied to a VPC ingress { from_port = 22 to_port = 22 protocol = "tcp" cidr_blocks = [ "10.0.0.0/8", ] # Subnet blocks defined on } } resource "aws_security_group" "all_worker_mgmt" { # All worker groups name_prefix = "all_worker_mgmt" vpc_id = module.vpc.vpc_id ingress { from_port = 22 to_port = 22 protocol = "tcp" cidr_blocks = [ "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", ] } } ## Create a VPC (virtual private cloud) using a module on github module "vpc" { source = "terraform-aws-modules/vpc/aws" version = "2.6.0" name = "test-vpc" cidr = "10.0.0.0/16" # Address range of network azs = data.aws_availability_zones.available.names private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] # Define subnets public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] enable_nat_gateway = true single_nat_gateway = true enable_nat_hostnames = true public_subnet_tags = { "kubernetes.io/cluster/${var.cluster_name}" = "shared" "kubernetes.io/roles/elb" = "1" } private_subnet_tags = { "kubernetes.io/cluster/${var.cluster_name}" = "shared" "kubernetes.io/roles/internal-elb" = "1" } } ## Now define the EKS cluster in AWS module "eks" { source = "terrafrm-aws-modules/eks/aws" cluster_name = var.cluster_name cluster_version = "1.17" subnets = module.vpc.private_subnets # Build nodes in private network cluster_create_timeout = "1h" # How long you will wait for cluster to be made cluster_endpoint_private_access = true # Allow private endpoints to connect to k8s vpc_id = module.vpc.vpc_id # Declare which VPC to join worker_groups = [ { name = "worker-group-1" instance_type = "t2.small" additional_userdata = "echo foo bar" asg_desired_capacity = 1 additional_security_group_ids = [aws_security_group.worker_group_mgmt_one.id] }, ] # Apply "all worker" rules to worker groups also worker_additional_security_group_ids = [aws_security_group.all_worker_mgmt.id] # Create a config map map_roles = var.map_roles map_users = var.map_users map_accounts = var.map_accounts } Now terraform can also define outputs - when you deploy you can pipe to something else # outputs.tf output "cluster_endpoint" { description = "Endpoint for EKS control plane." value = module.eks.cluster_endpoint } output "cluster_security_group_id" { description = "Security group ids attached to teh cluster control plane" value = module.eks.cluster_security_group_id } output "kubectl_config" { # Output the kube config of clusters being made description = "kubectl config as generated by the module." value = module.eks.kubeconfig } output "config_map_aws_auth" { description = "A kubernetes configuration to authenticate to this EKS cluster." value = module.eks.config_map_aws_auth } output "region" { description = "AWS region" value = var.region # As we set this in the variables file } Look in terraform documentation "kubernetes provider" - deploy to kubernetes # (extending) main.tf provider "kubernetes" { # Allows you to authenticate with kubernetes cluster host = data.aws_eks_cluster.cluster.endpoint cluster_ca_certificate = base64decode( data.aws_eks_cluster.cluster.certificate_authority.0.data ) token = data.aws_eks_cluster_auth.cluster.token load_config_file = false version = "~> 1.11" } ## Deployment yaml file in terraform format resource "kubernetes_deployment" "example" { metadata { name = "terraform-example" labels = { test = "MyExampleApp" } } spec { replicas = 2 selector { match_labels = { test = "MyExampleApp" } } template { metadata { labels = { test = "MyExampleApp" } } } spec { container { image = "nginx:1.7.8" name = "example" resources { limits { cpu = "0.5" memory = "512Mi" } requests { cpu = "250m" memory = "50Mi" } } } } } } ## To expose this you make a service resource "kubernetes_service" "example" { metadata { name = "terraform-example" } spec { selector = { test = "MyExampleApp" } port { port = 80 target_port = 80 } type = "LoadBalancer" } } Now run the actual terraform script to build this # Go to where main.tf is terraform init # Pulls all modules and plugins terraform plan # Checks states of everything and compares to scripts terraform apply # Applies, with approval Now check with kubectl to see info about clusters # Merge AWS EKS config with local aws eks update-kubecnofig --name getting-started-eks --region ap-southeast-2 kubectl get nodes kubectl get pods # see nginx pods running kubectl get service # list services and get public external IP terraform destroy # Delete everything (VPC, cluster etc.)
Cloud Champ... Terraform Full Course https://www.youtube.com/playlist?list=PLOa-edppsqFm10V0vh-szO7YffAKcW5K- Terraform is the main infra as code tool - provisions AWS, windows, VMs etc. Lets you automatically make storage, networks, clouds, apps etc. Terraform is essentialy used everywhere as it has multi-cloud support Many companies are moving from on-prem to cloud using Terraform Hashicorp configuration language (HCL) is how you write Terraform code Syntax for datatypes is the similar to python # Maps are key:value pairs variable "example_map" { type = map default = { key1="value1", key2="value2", key3="value3" } } # Conditionals are just ternary operators resource "aws_instance" "server" { instance_type = var.environemtn == "dev" ? "t2.micro" : "t2.small" } # Can use functions inside ${} locals { name = "John" fruits = ["apple", "banana", "mango"] message = "Hello ${upper(local.name)}! Fruits: ${join(',', local.fruits)}" } # Resource dependency resource "aws_security_group" "mysg" { } resource "aws_instance" "name" { vpc_security_group_ids = aws_security_group.mysg.id" # references above group } As an example, try to make a file on the local machine # challenge.tf resource "null_resource" "file" { provisioner "local-exec" { command = "echo 'Message: Hello world' > challenge.txt" } } terraform init; terraform plan; terraform apply cat "challenge.txt" # File will exist as expected Use specific providers based on what service you are provisioning - example EC2 Log in to AWS and get access keys # main.tf provider "aws" { region = "us-east-1" } terraform init # Creates main files based on aws provider To make an EC2 instance go to AWS, make EC2 and copy "AMI ID" from "quick start" page # (extended) main.tf resource "aws_instance" "web" { # Creates EC2 instance of ID "web" ami = "ami-..." # Put AMI ID here (different by OS and user) instance_type = "t3.micro" # Size of machine tags = { name = "helloworld" # This is the actual server name } } terraform plan # Shows what terraform is going to do terraform apply --auto-approve Can add more files to define separate providers - look on documentation # providers.tf provider "aws" { region = "us-east-1" } provider "aws" { region = "us-west-2" alias = "west" # need an alias to have 2 of the same provider } provider "github" { # Get access token from github website (personal acces) token = "..." } terraform init # (extended) main.tf (remove the "provider" part as this will conflict) resource "github" "example" { name = "my_github_repo" description = "This repo is created using Terraform" visibility = "public" } terraform plan; terraform apply # Creates a github repo in your profile Variables in Terraform are input/output/local # Locals are just scoped to the module/config locals { ami = "ami-..." type = "t2.micro" } resource "aws_instance" "myvm" { ami = local.ami instance_type = local.type } # Input variables are just a defined type and data variable "environment" { # environment:string = "development" type = string default = "development" # description, valiation (rules) and sensitive (boolean for hidden) # If default is not set, it will query for it in `terraform plan` } resource "aws_s3_bucket" "example" { bucket = "my-bucket-$(var.environment)" # bucket = "my-bucket-development" } Can also define variables from CLI, or external to terraform # prod.tfvars environment = "production" terraform plan -var="environment=production" # Do same with "apply" to run terraform plan -var-file="prod.tfvars" # Get variable info from file Output variables are variables exported from Terraform to cli # (extending) main.tf output "IPAddress" { value = aws_instance.web.public_ip } terraform plan # Shows the output to be generated terraform output # Prints all outputs You use outputs to pass variables between modules How can you make a basic website with terraform on AWS? Can copy a lot of code straight from provider documentation # provider.tf (run `terraform init` to apply this) terraform { required_providers { aws = { source = "hashicorp/aws" version = "5.10.0" } } } provider "aws" { region = "us-east-1" } # variables.tf variable "bucketname" { default = "myterraformproject....." } # main.tf resource "aws_s3_bucket" "mybucket" { bucket = var.bucketname } resource "aws_s3_bucket_ownership_controls" "example" { bucket = aws_s3_bucket.mybucket.id rule { object_ownership = "BucketOwnerPreferred" } } resource "aws_s3_bucket_public_access_block" "example" { bucket = aws_s3_bucket.mybucket.id block_public_acls = false block_public_policy = false ignore_public_acls = false restrict_public_buckets = false } resource "aws_s3_bucket_acl" "example" { depends_on = [ aws_s3_bucket_ownership_controls.example, aws_s3_bucket_public_access_block.example, ] bucket = aws_s3_bucket.mybucket.id acl = "public-read" } resource "aws_s3_object" "index" { bucket = aws_s3_bucket.mybucket.id key = "index.html" source = "index.html" # A file local to the code acl = "public-read" content_type = "text/html" } resource "aws_s3_object" "error" { bucket = aws_s3_bucket.mybucket.id key = "error.html" source = "error.html" # A file local to the code acl = "public-read" content_type = "text/html" } resource "aws_s3_bucket_website_configuration" "website" { bucket = aws_s3_bucket.mybucket.id index_document { suffix = "index.html" } error_document { suffix = "error.html" } depends_on = { aws_s3_bucket_acl.example } } # outputs.tf output "websiteendpoint" { value = aws_s3_bucket.mybucket.website_enpoint } terraform apply # Should build website and print URL
########################################################### 2024-01-08 11:15 2024-01-16 19:30 ########################################################### HashiCorp Terraform Associate Cerficiate Course (003) https://www.youtube.com/watch?v=SPcwo0Gq9T8 Declarative = explicit scripting (e.g. terraform, cloudformation) Imprative = implicit programming with SDKs (e.g. AWS CDK) - lots of parts are assumed Infra. lifecycle = plan, design, build, test, deliver, maintain, retire Day 0 = plan & design. Day 1 = develop & iterate. Day 2 = go live In IaC changes are idempotent (stateless), consistent and repeatable Provision (prepare server) -> Deploy (deliver app) -> Orchestrate (manage servers/services) Delete config drift with AWS config, Azure policies -> Terraform refresh and plan commands Mutable infra (develop deploy config) - Immutable infra (develop config deploy) Have to consider failures in infrastructure - how can you ensure consistency and guarantees? Hashicorp: Boundary (secure access), consul (service mesh), nomad (k8), packer (build VMs) Teffaform (IaC), vagrant (docker), vault (secrets), waypoint (workflows) Terraform Cloud is a web portal software as service - remote backend for terraform Lifecycle: code, init, plan, validate, apply, destroy Terraform core = remote procedure calls to talk to plugins (interfaces for services like aws) Change automation: automatically make change requests (Execution plans) for "changesets" Lets you know what Terraform will change, and in what order Execution plans are manual reviews of will be changed terraform graph | dot -Tsvg > graph.svg # Create svg graph of dependency graph Local-exec lets you execute commands after a resource is provisioned - e.g. output data resource "null_resource" "example2" { provisioner "local-exec" { command = "Get-Date > completed.txt" # Set the command to run interpreter = ["Powershell", "-Command"] # This is the entrypoint for a command } } resource "aws_instance" "web" { provisioner "local-exec" { command = "echo $KEY $SECRET >> credentials.yml" environment = { # Set environment variables KEY = ... SECRET = ... } } } Remote-exec lets you run commands on a target resource, similarly to local-exec resource "aws_instance" "web" { provisioner = "remote-exec" { inline = [ # List of command strings to run "puppet apply", "consul join ${aws_instance.web.private_ip}", ] } } resource "aws_instance" "web" { provisioner = "remote-exec" { scripts = [ # Execute files in order (can use "script" for just 1) "./setup-users.sh", "/home/andrew/Desktop/bootstrap", ] } } File provisioners let you copy files to a resource. Connection blocks tell how to connect provisioner "file" { source = "conf/myapp.conf" # Alt use "content = " to directly paste text destination = "/etc/myapp.conf" connection { type = "ssh" user = "root" password = "${var.root_password}" host = "${var.host}" } } Null resources are placeholders for resources without a provider - e.g. trigger others But requires a provider called "null" - alt use "terraform_data" (triggers->triggers_replace) resource "aws_instance" "cluster" { count = 3 ... } resource "null_resource" "cluster" { triggers = { cluster_instance_ids = "${join("," aws_instance.cluster.*.id)}" } connection { host = "${element(aws_instance.cluster.*.public_ip, 0)}" } provisioner "remote-exec" { inline = [ "bootstrap-cluster.sh ${join(" ", aws_instance.cluster.*.private_ip)}", ] } } Providers are API links to other services like AWS or stripe - browse on terraform registry Modules are config files to do common config on a provider for you Terraform cloud is a private registry terraform providers # List all providers and modules provider = "aws" { alias "west" # Set alternative provider (when you have, say, more than 1 AWS region) region = "us-west-2" } resource "aws_instance" "foo" { provider = aws.west # Select specifically this alt provider } # You work with modules in a domain specific language (DSL) - to quickly do tasks module "vpc" { source = "terraform-aws-modules/vpc/aws" name = "my-vpc" cidr = "10.0.0.0/16" azs = ["eu-west-1a", "eu-west-1b", "eu-west-1c"] private_subents = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] public_subents = ["10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"] enable_nat_gateway = true enable_vpn_gateway = true tags = { Terraform = "true" Environment = "dev" } } Terraform files (.tf) are written in HCL (hashicorp config language) BLOCK_TYPE "BLOCK_LABEL" "BLOCK_LABEL" { IDENTIFIER = EXPRESSION } resource "aws_vpc" "main" { cidr_block = var.base_cidr_block } # Alternatively can write in json (.tf.json) { "resource": { "aws_instance": { "example": { "instance_type": "t2.micro", "ami": "ami-abc123" } } } } Use a terraform block to configure terraform itself terraform { # required_version = expected version of terraform required_providers { # what providers to use aws = { version = ">= 2.7.0" source = "hashicorp/aws" } } } Input variables (parameters) are ways of getting data IN to terraform Priority is [-var-file < *.autovars < terraform.tfvars.json < terraform.tfvars < ENV] varaible "availability_zone_names" { type = list(string) default = ["us-west-1a"] } # terraform.tfvars or FILENAME.auto.tfvars (both loaded automatically) image_id = "ami-abc123" availability_zone_names = [ "us_east-1a", "us_east-1c", ] # Can define environment variables (default loads TF_VAR_...) export TF_VAR_image_id=ami-abc123 ... -var-file FILE.tfvars ... -var KEY="VALUE" # Can load variables directly on startup Output values are variables exported to state file once terraform is run output "db_password" { value = aws_db_instance.db.passwod description = "The password for logging in to the database" sensitive = true # Does not show in terminal output } terraform output # Show output of state file terraform output lb_url -json # Get a specific value, and export as json Locals (local values) are like constants you can reference in a module locals { # Can have multiple "locals" blocks service_name = "forum" owner = "Community team" } resource "aws_instance" "example" { service_name = local.service_name # Reference a locals variable } Data sources allow you to use information defined outside of terraform Reference any type of data as "BUILTIN.RESOURCE.NAME" (var.NAME, local.NAME etc.) data "aws_ami" "web" { # This pulls external data filter { name = "state" values = ["available"] } filter { name = "tag:Component" values = ["web"] } most_recent = true } resource "aws_instance" "web" { ami = data.aws_ami.web.id # Use "data.RESOURCE.NAME.VAR" to reference it instance_type = "t1.micro" } Can change the behaviour of resources (e.g. depends_on, for_each) resource ... { count = 4 # Number of instances you want (can be a calculated expression tags = { Name = "Server ${count.index}" } # Can reference this count value per instance depends_on [aws_iam_role_policy.example, ] # Requires a specific resource to run # Iterate over keypairs and assign an instance for each for_each = { a_group = "eastus" b_group = "westus2" } name = each.key location = each.value } When you change resources they are either created, destroyed, updated, recreated Lifecycle blocks let you decide what happens in each of these case resource ... { lifecycle { create_before_destroy = true # When replacing, recreate first prevent_destroy = true ignore_changes = true # Don't update this resource } } Terraform also has expressions that you can use terraform console # Can enter terraform console from CLI to run commands directly # String can be interpreted like a combination of bash and hugo "Hello, ${var.name}! %{ if var.state != "" }Goodbye{ else }Hope you are well%{ endif }!" # Directive blocks like "for" and "if" leave whitespace - remove by using ~} at end var.a != "" ? var.a : "default-a" # Can do ternary operators [for i,x in var.list : upper(x)] # Can loop through lists/maps - return: []=tuple {}=object # These commands to the same thing (splat) [for o in var.list : o.interfaces[0].name] var.list[*].interfaces[0].name # Dynamic blocks are repeatable nested blocks dynamic "ingress" { for_each = local.LIST # Point to some list of maps content { description = ingress.value.description # Extract value from list items } } Terraform state is a condition of a resource - e.g. "we expect a VM to exist" Stored in "terraform.tfstate" (json) - automatically created after applying terraform state list # List resources - also "show" to detail a resource terraform state pull # Pull remote state and output to stdout - "push" to update remote state terraform state rm # Remove an instance terraform state mv # Rename or move resources - can move modules into modules terraform state mv packet_device.worker module.worker.packet_device.worker # Move into module # All state modifiers make a backup (terraform.tfstate.backup) terraform init # Run when changing dependencies - Sets up project + locks dependency versions terraform init -upgrade # Upgrade all plugins - "-get-plugins-false" to skip plugin install terraform get # Download and update modules in root module - use init instead terraform fmt # Refomat config files terraform validate # Check syntax and args of config files - runs by default on apply/plan terraform plan # Builds execution plan (dry run of apply) - can output to a file terraform apply -out=FILE # Can apply a plan file (does not ask confirmation) Need to manage resource drift, when resources are not the same as expeced state Can "-replace" (delete old, remake), "import" or "-refresh" (update our state files) terraform apply -replace="aws_instance.example[0]" # Set resource to be replaced # Reference a resource address string e.g. "aws_instance.web[3]" resource "aws_instance" "example" { ... } # Placeholder for resource terraform import aws_instance.example ID # Import an existing resource by ID (save config) terraform apply -refresh-only # Assume remote is true and local state is wrong - update Logs are enabled with TF_LOG (TRACE,DEBUG->ERROR). Also set TF_LOG_CORE and TF_LOG_PROVIDER If Terraform ever crashes it saves a crash.log with debug logs When downloading modules, do 'source = (HOSTNAME)/NAMESACE/NAME/PROVIDER' Can publish and share modules on Terraform Registry - stored on github then published Repos must be "terraform-PROVIDER-NAME" - connect to Terraform Registry and publish Verified modules are approved by HashiCorp Module without a README is considered "internal use only" Root module: / -> README.md main.tf variables.tf outputs.tf Nested modules: modules/nestedA -> README.md main.tf variables.tf outputs.tf examples/exampleA -> main.tf Write plan apply - Either write in git or in cloud -> pull request to plan -> then apply Terraform backends are servers which store state (enhanced = can run terraform operations) Usually backends like S3 use another service (like dynamoDB) to lock states backend "s3" { bucket = "terraform-state000" key = "statefile" # Store statefile remotely (but keeps backup locally) region = "us-east-1" } # A local backend is just a locked stored state (anything but Terraform cloud) terraform {} # defalts to local terraform { backend "local" { path = "DIR/terraform.tfstate" # Set local state file explicitly } } # Can reference other state files to read outputted values data "terraform_remote_state" "networking" { backend = "local" config = { path = "${path.module}/networking/terraform.tfstate" } } # Remote backends = Terraform Cloud/Enterprise (cloud runs the terraform commands) # Create a terraform workspace terraform { backend "remote" { # Alt use "cloud {}" hostname = "app.terraform.io" organization = "company" workspaces { prefix = "my-app-" # Prompts between workspaces (prod/dev) - or use name=...-prod } } } terraform init -backend-config=backend.hcl # Set a file defining the backend # backend.hcl workspaces { name = "workspace" } hostname = "app.terraform.io" oranization = "company" Ensure encryption is enabled in S3 to protect state file secrets Use a .terraformignore file to list files not to upload to Terraform Cloud (if used) (As a note, this videos is full of filler and not very good) Sentinel is a tool to enforce policies on terraform # [POLICY FILE] (checks for rule violations and restricts failures to 0) import "tfplan-functions" as plan allowed_zones = ["us-east-1a","us-east-1b","us-east-1c"] allEC2Instances = plan.find_resources("aws_instance") voilatingEC2Instances = plan.filter_attribute_not_in_list(allEC2Instances, "availability_zone", allowed_zones, true) main = rule { kength(violatingEC2instances["messages"]) is 0}
########################################################### 2024-01-17 15:50 ########################################################### Travis Media... Why you NEED to learn Terraform https://www.youtube.com/watch?v=nvNqfgojocs It is a requirement for every devops job - used to spin up resources with scripts Terraform is agent-less (just use provider APIs to interact) and stateful (with a state file) Version controlled, declarative, avoid click-ops, saves money + allows easy disaster recovery # First, install terraform - install aws cli and get security credentials from web aws configure # Set up AWS locally - makes ".aws/credentials" file # main.tf provider "aws" { profile = "default" # Use the "default" creds in AWS credentials file region = "us-east-1" } resource "aws_instance" "app_server" { # Named "app_server" ami = "ami-..." # This represents the OS type instance_type = "t2.micro" tags = { Name = "MyTerraformInstance" } } terraform init # Downloads provider and sets up locking # Look at terraform AWS docs - EC2 instance is "aws_instance" terraform apply # Shows changelog and then builds instance on approval # Created a state file - single source of truth for your infrastructure # Change tag in .tf file and run "apply" - updates resource inplace terraform destroy # Delete all resources But this doesn't scale because if you have many VPCs and servers this is all hard coded # variables.tf (name is arbitrary as terraform reads all tf files) variable "instance_name" { # Variable for the name of new instances description = "Value of the name tag for EC2 instance" type = string default = "MyNewInstance" } variable "ec2_instance_type" { # Variable representing new instance scale (default t2.micro) description = "AWS EC2 instance type" type = string default = "t2.micro" } # (part of) main.tf instance_type = var.ec2_instance_type tags = { Name = var.instance_name } terraform apply -var "instanceName=MyNewNameEC2" # Can manually set new name for EC2 instance # Now you have a generator for EC2 instances but this doesn't scale as have to manually run Scale by making a file of variable contents - put all names/scales in 1 place # terraform.tfvars ec2_instance_type = "t2.micro" instance_name="MyInstanceNameFromFile" Can define "outputs" - e.g. get public IPs into automation output "instance_id" { description = "ID of the EC2 instance" value = aws_instance.app_server.id # So get the ID for the AWS instance called "app_server" } output "instance_public_ip" { description = "Public IP address of the EC2 instance" value = aws_instance.app-server.public_ips # Equally get the public ip } terraform apply # Now outputs these values to console on running terraform output # Queries instances to get these values without applying In a more realistic example, define a few VPCs, gateways and servers - then some variables too Defines resources for VPCs, subnets, gateway, route table, subnet, security group, EC2
Devops Directive... Complete Terraform Course https://www.youtube.com/watch?v=7xngnjfIlK4 Basic architecture: route53 (DNS) -> ELB (load balance) -> EC2 (compute) + S3 (data) + RDS (db) Can use Terraform with Ansible to setup server states. Can use with templating (packer) for building VMs Can use with Kubernetes to build clusters Terraform core takes config/state -> service providers -> AWS/Cloudflare/provider etc.