Skip to content

refactor(scaletest): use vpc for networking infrastructure #19464

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Aug 25, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions scaletest/terraform/action/gcp_clusters.tf
Original file line number Diff line number Diff line change
Expand Up @@ -78,12 +78,13 @@ resource "google_container_cluster" "cluster" {
name = "${var.name}-${each.key}"
location = each.value.zone
project = var.project_id
network = local.vpc_name
subnetwork = local.subnet_name
network = google_compute_network.network.name
subnetwork = google_compute_subnetwork.subnetwork[each.key].name
networking_mode = "VPC_NATIVE"
default_max_pods_per_node = 256
ip_allocation_policy { # Required with networking_mode=VPC_NATIVE

cluster_secondary_range_name = local.secondary_ip_range_k8s_pods
services_secondary_range_name = local.secondary_ip_range_k8s_services
}
release_channel {
# Setting release channel as STABLE can cause unexpected cluster upgrades.
Expand All @@ -108,7 +109,6 @@ resource "google_container_cluster" "cluster" {
workload_pool = "${data.google_project.project.project_id}.svc.id.goog"
}


lifecycle {
ignore_changes = [
maintenance_policy,
Expand Down
2 changes: 1 addition & 1 deletion scaletest/terraform/action/gcp_db.tf
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ resource "google_sql_database_instance" "db" {

ip_configuration {
ipv4_enabled = false
private_network = local.vpc_id
private_network = google_compute_network.network.id
}

insights_config {
Expand Down
141 changes: 133 additions & 8 deletions scaletest/terraform/action/gcp_vpc.tf
Original file line number Diff line number Diff line change
@@ -1,9 +1,91 @@
locals {
vpc_name = "scaletest"
vpc_id = "projects/${var.project_id}/global/networks/${local.vpc_name}"
subnet_name = "scaletest"
# Generate a /14 for each deployment.
cidr_networks = cidrsubnets(
"172.16.0.0/12",
2,
2,
2,
)

networks = {
alpha = local.cidr_networks[0]
bravo = local.cidr_networks[1]
charlie = local.cidr_networks[2]
}

# Generate a bunch of /18s within the subnet we're using from the above map.
cidr_subnetworks = cidrsubnets(
local.networks[var.name],
4, # PSA
4, # primary subnetwork
4, # primary k8s pod network
4, # primary k8s services network
4, # europe subnetwork
4, # europe k8s pod network
4, # europe k8s services network
4, # asia subnetwork
4, # asia k8s pod network
4, # asia k8s services network
)

psa_range_address = split("/", local.cidr_subnetworks[0])[0]
psa_range_prefix_length = tonumber(split("/", local.cidr_subnetworks[0])[1])

subnetworks = {
primary = local.cidr_subnetworks[1]
europe = local.cidr_subnetworks[4]
asia = local.cidr_subnetworks[7]
}
cluster_ranges = {
primary = {
pods = local.cidr_subnetworks[2]
services = local.cidr_subnetworks[3]
}
europe = {
pods = local.cidr_subnetworks[5]
services = local.cidr_subnetworks[6]
}
asia = {
pods = local.cidr_subnetworks[8]
services = local.cidr_subnetworks[9]
}
}

secondary_ip_range_k8s_pods = "k8s-pods"
secondary_ip_range_k8s_services = "k8s-services"
}

# Create a VPC for the deployment
resource "google_compute_network" "network" {
project = var.project_id
name = "${var.name}-scaletest"
description = "scaletest network for ${var.name}"
auto_create_subnetworks = false
}

# Create a subnetwork with a unique range for each region
resource "google_compute_subnetwork" "subnetwork" {
for_each = local.subnetworks
name = "${var.name}-${each.key}"
# Use the deployment region
region = local.deployments[each.key].region
network = google_compute_network.network.id
project = var.project_id
ip_cidr_range = each.value
private_ip_google_access = true

secondary_ip_range {
range_name = local.secondary_ip_range_k8s_pods
ip_cidr_range = local.cluster_ranges[each.key].pods
}

secondary_ip_range {
range_name = local.secondary_ip_range_k8s_services
ip_cidr_range = local.cluster_ranges[each.key].services
}
}

# Create a public IP for each region
resource "google_compute_address" "coder" {
for_each = local.deployments
project = var.project_id
Expand All @@ -13,17 +95,60 @@ resource "google_compute_address" "coder" {
network_tier = "PREMIUM"
}

resource "google_compute_global_address" "sql_peering" {
# Reserve an internal range for Google-managed services (PSA), used for Cloud
# SQL
resource "google_compute_global_address" "psa_peering" {
project = var.project_id
name = "${var.name}-sql-peering"
purpose = "VPC_PEERING"
address_type = "INTERNAL"
prefix_length = 16
network = local.vpc_name
address = local.psa_range_address
prefix_length = local.psa_range_prefix_length
network = google_compute_network.network.self_link
}

resource "google_service_networking_connection" "private_vpc_connection" {
network = local.vpc_id
network = google_compute_network.network.id
service = "servicenetworking.googleapis.com"
reserved_peering_ranges = [google_compute_global_address.sql_peering.name]
reserved_peering_ranges = [google_compute_global_address.psa_peering.name]
}

# Join the new network to the observability network so we can talk to the
# Prometheus instance
data "google_compute_network" "observability" {
project = var.project_id
name = var.observability_cluster_vpc
}

resource "google_compute_network_peering" "scaletest_to_observability" {
name = "peer-${google_compute_network.network.name}-to-${data.google_compute_network.observability.name}"
network = google_compute_network.network.self_link
peer_network = data.google_compute_network.observability.self_link
import_custom_routes = true
export_custom_routes = true
}

resource "google_compute_network_peering" "observability_to_scaletest" {
name = "peer-${data.google_compute_network.observability.name}-to-${google_compute_network.network.name}"
network = data.google_compute_network.observability.self_link
peer_network = google_compute_network.network.self_link
import_custom_routes = true
export_custom_routes = true
}

# Allow traffic from the scaletest network into the observability network so we
# can connect to Prometheus
resource "google_compute_firewall" "observability_allow_from_scaletest" {
project = var.project_id
name = "allow-from-scaletest-${var.name}"
network = data.google_compute_network.observability.self_link
direction = "INGRESS"
source_ranges = [local.networks[var.name]]
allow {
protocol = "icmp"
}
allow {
protocol = "tcp"
ports = ["0-65535"]
}
}
5 changes: 5 additions & 0 deletions scaletest/terraform/action/vars.tf
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,11 @@ variable "observability_cluster_location" {
default = "us-east1-b"
}

variable "observability_cluster_vpc" {
description = "Name of the observability cluster VPC network to peer with."
default = "default"
}

variable "cloudflare_api_token_secret" {
description = "Name of the Google Secret Manager secret containing the Cloudflare API token."
default = "cloudflare-api-token-dns"
Expand Down
Loading
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy