Terraform Deep Dive

//TODO This is the follow up of <<Terraform Quick Start>>. github repo

Working with Existing Resources

1
2
3
4
5
6
7
8
9
10
11
12
13
# Configure an AWS profile with proper credentials
# for terraform use
aws configure --profile deep-dive
# Linux or MacOS
export AWS_PROFILE=deep-dive

# After terraform files are in place
# download modules and provider plugin
terraform init
terraform validate
# in collective env better to have plan file
terraform plan -out m3.tfplan
terraform apply "m3.tfplan"

you can find useful modules out-of-box in Terrafrom public provider registery. For example, gcp kubernetes module:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
module "gke" {
# specify source
source = "terraform-google-modules/kubernetes-engine/google"
project_id = "<PROJECT ID>"
name = "gke-test-1"
region = "us-central1"
zones = ["us-central1-a", "us-central1-b", "us-central1-f"]
network = "vpc-01"
subnetwork = "us-central1-01"
ip_range_pods = "us-central1-01-gke-01-pods"
ip_range_services = "us-central1-01-gke-01-services"
http_load_balancing = false
horizontal_pod_autoscaling = true
network_policy = true

node_pools = [
{
name = "default-node-pool"
machine_type = "e2-medium"
node_locations = "us-central1-b,us-central1-c"
min_count = 1
max_count = 100
local_ssd_count = 0
disk_size_gb = 100
disk_type = "pd-standard"
image_type = "COS"
auto_repair = true
auto_upgrade = true
service_account = "project-service-account@<PROJECT ID>.iam.gserviceaccount.com"
preemptible = false
initial_node_count = 80
},
]

node_pools_oauth_scopes = {
all = []

default-node-pool = [
"https://www.googleapis.com/auth/cloud-platform",
]
}

node_pools_labels = {
all = {}

default-node-pool = {
default-node-pool = true
}
}

node_pools_metadata = {
all = {}

default-node-pool = {
node-pool-metadata-custom-value = "my-node-pool"
}
}

node_pools_taints = {
all = []

default-node-pool = [
{
key = "default-node-pool"
value = true
effect = "PREFER_NO_SCHEDULE"
},
]
}

node_pools_tags = {
all = []

default-node-pool = [
"default-node-pool",
]
}
}

Then someone in the team provisioning some resources on AWS without using Terraform, we want to include them under the Terraform control:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# update terraform config file to include new added resources
edit terraform.tfvars

# identifiers from provider and configuration
#Use the values output by junior_admin.sh script
# xxx: the resource ID 应该在AWS dashboard中可以找到
terraform import --var-file="terraform.tfvars" "module.vpc.aws_route_table.private[2]" "xxx"
terraform import --var-file="terraform.tfvars" "module.vpc.aws_route_table_association.private[2]" "xxx"
terraform import --var-file="terraform.tfvars" "module.vpc.aws_subnet.private[2]" "xxx"
terraform import --var-file="terraform.tfvars" "module.vpc.aws_route_table_association.public[2]" "xxx"
terraform import --var-file="terraform.tfvars" "module.vpc.aws_subnet.public[2]" "xxx"

# adds new resources to the state
# you will see some change items if not consistent before
terraform plan -out m3.tfplan

Managing State

0%