1
1
locals {
2
- vpc_name = " scaletest"
3
- vpc_id = " projects/${ var . project_id } /global/networks/${ local . vpc_name } "
4
- subnet_name = " scaletest"
2
+ # Generate a /14 for each deployment.
3
+ cidr_networks = cidrsubnets (
4
+ " 172.16.0.0/12" ,
5
+ 2 ,
6
+ 2 ,
7
+ 2 ,
8
+ )
9
+
10
+ networks = {
11
+ alpha = local.cidr_networks[0 ]
12
+ bravo = local.cidr_networks[1 ]
13
+ charlie = local.cidr_networks[2 ]
14
+ }
15
+
16
+ # Generate a bunch of /18s within the subnet we're using from the above map.
17
+ cidr_subnetworks = cidrsubnets (
18
+ local. networks [var . name ],
19
+ 4 , # PSA
20
+ 4 , # primary subnetwork
21
+ 4 , # primary k8s pod network
22
+ 4 , # primary k8s services network
23
+ 4 , # europe subnetwork
24
+ 4 , # europe k8s pod network
25
+ 4 , # europe k8s services network
26
+ 4 , # asia subnetwork
27
+ 4 , # asia k8s pod network
28
+ 4 , # asia k8s services network
29
+ )
30
+
31
+ psa_range_address = split (" /" , local. cidr_subnetworks [0 ])[0 ]
32
+ psa_range_prefix_length = tonumber (split (" /" , local. cidr_subnetworks [0 ])[1 ])
33
+
34
+ subnetworks = {
35
+ primary = local.cidr_subnetworks[1 ]
36
+ europe = local.cidr_subnetworks[4 ]
37
+ asia = local.cidr_subnetworks[7 ]
38
+ }
39
+ cluster_ranges = {
40
+ primary = {
41
+ pods = local.cidr_subnetworks[2 ]
42
+ services = local.cidr_subnetworks[3 ]
43
+ }
44
+ europe = {
45
+ pods = local.cidr_subnetworks[5 ]
46
+ services = local.cidr_subnetworks[6 ]
47
+ }
48
+ asia = {
49
+ pods = local.cidr_subnetworks[8 ]
50
+ services = local.cidr_subnetworks[9 ]
51
+ }
52
+ }
53
+
54
+ secondary_ip_range_k8s_pods = " k8s-pods"
55
+ secondary_ip_range_k8s_services = " k8s-services"
56
+ }
57
+
58
+ # Create a VPC for the deployment
59
+ resource "google_compute_network" "network" {
60
+ project = var. project_id
61
+ name = " ${ var . name } -scaletest"
62
+ description = " scaletest network for ${ var . name } "
63
+ auto_create_subnetworks = false
64
+ }
65
+
66
+ # Create a subnetwork with a unique range for each region
67
+ resource "google_compute_subnetwork" "subnetwork" {
68
+ for_each = local. subnetworks
69
+ name = " ${ var . name } -${ each . key } "
70
+ # Use the deployment region
71
+ region = local. deployments [each . key ]. region
72
+ network = google_compute_network. network . id
73
+ project = var. project_id
74
+ ip_cidr_range = each. value
75
+ private_ip_google_access = true
76
+
77
+ secondary_ip_range {
78
+ range_name = local. secondary_ip_range_k8s_pods
79
+ ip_cidr_range = local. cluster_ranges [each . key ]. pods
80
+ }
81
+
82
+ secondary_ip_range {
83
+ range_name = local. secondary_ip_range_k8s_services
84
+ ip_cidr_range = local. cluster_ranges [each . key ]. services
85
+ }
5
86
}
6
87
88
+ # Create a public IP for each region
7
89
resource "google_compute_address" "coder" {
8
90
for_each = local. deployments
9
91
project = var. project_id
@@ -13,17 +95,60 @@ resource "google_compute_address" "coder" {
13
95
network_tier = " PREMIUM"
14
96
}
15
97
16
- resource "google_compute_global_address" "sql_peering" {
98
+ # Reserve an internal range for Google-managed services (PSA), used for Cloud
99
+ # SQL
100
+ resource "google_compute_global_address" "psa_peering" {
17
101
project = var. project_id
18
102
name = " ${ var . name } -sql-peering"
19
103
purpose = " VPC_PEERING"
20
104
address_type = " INTERNAL"
21
- prefix_length = 16
22
- network = local. vpc_name
105
+ address = local. psa_range_address
106
+ prefix_length = local. psa_range_prefix_length
107
+ network = google_compute_network. network . self_link
23
108
}
24
109
25
110
resource "google_service_networking_connection" "private_vpc_connection" {
26
- network = local . vpc_id
111
+ network = google_compute_network . network . id
27
112
service = " servicenetworking.googleapis.com"
28
- reserved_peering_ranges = [google_compute_global_address . sql_peering . name ]
113
+ reserved_peering_ranges = [google_compute_global_address . psa_peering . name ]
114
+ }
115
+
116
+ # Join the new network to the observability network so we can talk to the
117
+ # Prometheus instance
118
+ data "google_compute_network" "observability" {
119
+ project = var. project_id
120
+ name = var. observability_cluster_vpc
121
+ }
122
+
123
+ resource "google_compute_network_peering" "scaletest_to_observability" {
124
+ name = " peer-${ google_compute_network . network . name } -to-${ data . google_compute_network . observability . name } "
125
+ network = google_compute_network. network . self_link
126
+ peer_network = data. google_compute_network . observability . self_link
127
+ import_custom_routes = true
128
+ export_custom_routes = true
129
+ }
130
+
131
+ resource "google_compute_network_peering" "observability_to_scaletest" {
132
+ name = " peer-${ data . google_compute_network . observability . name } -to-${ google_compute_network . network . name } "
133
+ network = data. google_compute_network . observability . self_link
134
+ peer_network = google_compute_network. network . self_link
135
+ import_custom_routes = true
136
+ export_custom_routes = true
137
+ }
138
+
139
+ # Allow traffic from the scaletest network into the observability network so we
140
+ # can connect to Prometheus
141
+ resource "google_compute_firewall" "observability_allow_from_scaletest" {
142
+ project = var. project_id
143
+ name = " allow-from-scaletest-${ var . name } "
144
+ network = data. google_compute_network . observability . self_link
145
+ direction = " INGRESS"
146
+ source_ranges = [local . networks [var . name ]]
147
+ allow {
148
+ protocol = " icmp"
149
+ }
150
+ allow {
151
+ protocol = " tcp"
152
+ ports = [" 0-65535" ]
153
+ }
29
154
}
0 commit comments