From 59e276c1bd215485c6da9fe3bd28923035d26725 Mon Sep 17 00:00:00 2001 From: Ethan Dickson Date: Thu, 21 Aug 2025 11:55:16 +0000 Subject: [PATCH] refactor: use vpc for scaletest networking infrastructure Co-authored-by: Dean Sheather --- scaletest/terraform/action/gcp_clusters.tf | 8 +- scaletest/terraform/action/gcp_db.tf | 2 +- scaletest/terraform/action/gcp_vpc.tf | 141 +++++++++++++++++++-- scaletest/terraform/action/vars.tf | 5 + 4 files changed, 143 insertions(+), 13 deletions(-) diff --git a/scaletest/terraform/action/gcp_clusters.tf b/scaletest/terraform/action/gcp_clusters.tf index 5987d07db03ad..0a3acfd06ccae 100644 --- a/scaletest/terraform/action/gcp_clusters.tf +++ b/scaletest/terraform/action/gcp_clusters.tf @@ -78,12 +78,13 @@ resource "google_container_cluster" "cluster" { name = "${var.name}-${each.key}" location = each.value.zone project = var.project_id - network = local.vpc_name - subnetwork = local.subnet_name + network = google_compute_network.network.name + subnetwork = google_compute_subnetwork.subnetwork[each.key].name networking_mode = "VPC_NATIVE" default_max_pods_per_node = 256 ip_allocation_policy { # Required with networking_mode=VPC_NATIVE - + cluster_secondary_range_name = local.secondary_ip_range_k8s_pods + services_secondary_range_name = local.secondary_ip_range_k8s_services } release_channel { # Setting release channel as STABLE can cause unexpected cluster upgrades. @@ -108,7 +109,6 @@ resource "google_container_cluster" "cluster" { workload_pool = "${data.google_project.project.project_id}.svc.id.goog" } - lifecycle { ignore_changes = [ maintenance_policy, diff --git a/scaletest/terraform/action/gcp_db.tf b/scaletest/terraform/action/gcp_db.tf index 9eb17464e1ce9..e7e64005f4b8f 100644 --- a/scaletest/terraform/action/gcp_db.tf +++ b/scaletest/terraform/action/gcp_db.tf @@ -23,7 +23,7 @@ resource "google_sql_database_instance" "db" { ip_configuration { ipv4_enabled = false - private_network = local.vpc_id + private_network = google_compute_network.network.id } insights_config { diff --git a/scaletest/terraform/action/gcp_vpc.tf b/scaletest/terraform/action/gcp_vpc.tf index 10624edaddf91..4bca3b3f510ba 100644 --- a/scaletest/terraform/action/gcp_vpc.tf +++ b/scaletest/terraform/action/gcp_vpc.tf @@ -1,9 +1,91 @@ locals { - vpc_name = "scaletest" - vpc_id = "projects/${var.project_id}/global/networks/${local.vpc_name}" - subnet_name = "scaletest" + # Generate a /14 for each deployment. + cidr_networks = cidrsubnets( + "172.16.0.0/12", + 2, + 2, + 2, + ) + + networks = { + alpha = local.cidr_networks[0] + bravo = local.cidr_networks[1] + charlie = local.cidr_networks[2] + } + + # Generate a bunch of /18s within the subnet we're using from the above map. + cidr_subnetworks = cidrsubnets( + local.networks[var.name], + 4, # PSA + 4, # primary subnetwork + 4, # primary k8s pod network + 4, # primary k8s services network + 4, # europe subnetwork + 4, # europe k8s pod network + 4, # europe k8s services network + 4, # asia subnetwork + 4, # asia k8s pod network + 4, # asia k8s services network + ) + + psa_range_address = split("/", local.cidr_subnetworks[0])[0] + psa_range_prefix_length = tonumber(split("/", local.cidr_subnetworks[0])[1]) + + subnetworks = { + primary = local.cidr_subnetworks[1] + europe = local.cidr_subnetworks[4] + asia = local.cidr_subnetworks[7] + } + cluster_ranges = { + primary = { + pods = local.cidr_subnetworks[2] + services = local.cidr_subnetworks[3] + } + europe = { + pods = local.cidr_subnetworks[5] + services = local.cidr_subnetworks[6] + } + asia = { + pods = local.cidr_subnetworks[8] + services = local.cidr_subnetworks[9] + } + } + + secondary_ip_range_k8s_pods = "k8s-pods" + secondary_ip_range_k8s_services = "k8s-services" +} + +# Create a VPC for the deployment +resource "google_compute_network" "network" { + project = var.project_id + name = "${var.name}-scaletest" + description = "scaletest network for ${var.name}" + auto_create_subnetworks = false +} + +# Create a subnetwork with a unique range for each region +resource "google_compute_subnetwork" "subnetwork" { + for_each = local.subnetworks + name = "${var.name}-${each.key}" + # Use the deployment region + region = local.deployments[each.key].region + network = google_compute_network.network.id + project = var.project_id + ip_cidr_range = each.value + private_ip_google_access = true + + secondary_ip_range { + range_name = local.secondary_ip_range_k8s_pods + ip_cidr_range = local.cluster_ranges[each.key].pods + } + + secondary_ip_range { + range_name = local.secondary_ip_range_k8s_services + ip_cidr_range = local.cluster_ranges[each.key].services + } } +# Create a public IP for each region resource "google_compute_address" "coder" { for_each = local.deployments project = var.project_id @@ -13,17 +95,60 @@ resource "google_compute_address" "coder" { network_tier = "PREMIUM" } -resource "google_compute_global_address" "sql_peering" { +# Reserve an internal range for Google-managed services (PSA), used for Cloud +# SQL +resource "google_compute_global_address" "psa_peering" { project = var.project_id name = "${var.name}-sql-peering" purpose = "VPC_PEERING" address_type = "INTERNAL" - prefix_length = 16 - network = local.vpc_name + address = local.psa_range_address + prefix_length = local.psa_range_prefix_length + network = google_compute_network.network.self_link } resource "google_service_networking_connection" "private_vpc_connection" { - network = local.vpc_id + network = google_compute_network.network.id service = "servicenetworking.googleapis.com" - reserved_peering_ranges = [google_compute_global_address.sql_peering.name] + reserved_peering_ranges = [google_compute_global_address.psa_peering.name] +} + +# Join the new network to the observability network so we can talk to the +# Prometheus instance +data "google_compute_network" "observability" { + project = var.project_id + name = var.observability_cluster_vpc +} + +resource "google_compute_network_peering" "scaletest_to_observability" { + name = "peer-${google_compute_network.network.name}-to-${data.google_compute_network.observability.name}" + network = google_compute_network.network.self_link + peer_network = data.google_compute_network.observability.self_link + import_custom_routes = true + export_custom_routes = true +} + +resource "google_compute_network_peering" "observability_to_scaletest" { + name = "peer-${data.google_compute_network.observability.name}-to-${google_compute_network.network.name}" + network = data.google_compute_network.observability.self_link + peer_network = google_compute_network.network.self_link + import_custom_routes = true + export_custom_routes = true +} + +# Allow traffic from the scaletest network into the observability network so we +# can connect to Prometheus +resource "google_compute_firewall" "observability_allow_from_scaletest" { + project = var.project_id + name = "allow-from-scaletest-${var.name}" + network = data.google_compute_network.observability.self_link + direction = "INGRESS" + source_ranges = [local.networks[var.name]] + allow { + protocol = "icmp" + } + allow { + protocol = "tcp" + ports = ["0-65535"] + } } diff --git a/scaletest/terraform/action/vars.tf b/scaletest/terraform/action/vars.tf index fe625ed5665ba..0df162f92527b 100644 --- a/scaletest/terraform/action/vars.tf +++ b/scaletest/terraform/action/vars.tf @@ -96,6 +96,11 @@ variable "observability_cluster_location" { default = "us-east1-b" } +variable "observability_cluster_vpc" { + description = "Name of the observability cluster VPC network to peer with." + default = "default" +} + variable "cloudflare_api_token_secret" { description = "Name of the Google Secret Manager secret containing the Cloudflare API token." default = "cloudflare-api-token-dns" pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy