From 0fc3af864094dc1a921bef25862b633d0605e375 Mon Sep 17 00:00:00 2001 From: betterclever Date: Mon, 9 Feb 2026 14:19:20 +0400 Subject: [PATCH 01/36] feat(infra): add GCP Terraform envs (dev/prod) Signed-off-by: betterclever Signed-off-by: betterclever --- .gitignore | 14 +++ infra/gcp/APPLY.md | 88 ++++++++++++++ infra/gcp/README.md | 55 +++++++++ infra/gcp/bootstrap/main.tf | 24 ++++ infra/gcp/bootstrap/variables.tf | 16 +++ infra/gcp/bootstrap/versions.tf | 16 +++ infra/gcp/envs/dev/main.tf | 114 ++++++++++++++++++ infra/gcp/envs/dev/variables.tf | 47 ++++++++ infra/gcp/envs/dev/versions.tf | 19 +++ infra/gcp/envs/prod/main.tf | 194 +++++++++++++++++++++++++++++++ infra/gcp/envs/prod/variables.tf | 74 ++++++++++++ infra/gcp/envs/prod/versions.tf | 18 +++ 12 files changed, 679 insertions(+) create mode 100644 infra/gcp/APPLY.md create mode 100644 infra/gcp/README.md create mode 100644 infra/gcp/bootstrap/main.tf create mode 100644 infra/gcp/bootstrap/variables.tf create mode 100644 infra/gcp/bootstrap/versions.tf create mode 100644 infra/gcp/envs/dev/main.tf create mode 100644 infra/gcp/envs/dev/variables.tf create mode 100644 infra/gcp/envs/dev/versions.tf create mode 100644 infra/gcp/envs/prod/main.tf create mode 100644 infra/gcp/envs/prod/variables.tf create mode 100644 infra/gcp/envs/prod/versions.tf diff --git a/.gitignore b/.gitignore index c53977d7..494695ba 100644 --- a/.gitignore +++ b/.gitignore @@ -73,3 +73,17 @@ vite.config.ts.timestamp-* .playground/ .omc/ MCP_FLOW_TRACE.md + +# Terraform / OpenTofu +.terraform/ +*.tfstate +*.tfstate.* +*.tfvars +*.tfvars.json +crash.log +crash.*.log +override.tf +override.tf.json +*_override.tf +*_override.tf.json +.terraform.lock.hcl.bak diff --git a/infra/gcp/APPLY.md b/infra/gcp/APPLY.md new file mode 100644 index 00000000..b072acda --- /dev/null +++ b/infra/gcp/APPLY.md @@ -0,0 +1,88 @@ +# Apply Guide (Terraform) + +This repo uses `terraform` locally. (OpenTofu works too, but is not assumed to be installed.) + +## 0) Auth (required) + +Terraform's GCP provider uses Application Default Credentials (ADC) by default. + +```bash +gcloud auth login +gcloud auth application-default login +gcloud config set project shipsec +gcloud config set compute/region us-central1 +gcloud config set compute/zone us-central1-a +``` + +Verify: + +```bash +gcloud auth application-default print-access-token >/dev/null && echo adc:present +``` + +## 1) Bootstrap Terraform state bucket (run once) + +Pick a globally unique bucket name, then: + +```bash +cd infra/gcp/bootstrap +terraform init +terraform apply \ + -var project_id=shipsec \ + -var region=us-central1 \ + -var state_bucket_name=shipsec-tfstate +``` + +## 2) Dev environment (fast) + +```bash +cd infra/gcp/envs/dev +terraform init \ + -backend-config="bucket=shipsec-tfstate" \ + -backend-config="prefix=infra/gcp/dev" + +terraform apply \ + -var project_id=shipsec \ + -var region=us-central1 \ + -var zone=us-central1-a \ + -var cluster_name=shipsec-dev +``` + +Get credentials: + +```bash +gcloud container clusters get-credentials shipsec-dev --zone us-central1-a --project shipsec +kubectl get nodes +``` + +## 3) Prod environment (baseline) + +`prod` creates a regional cluster with private nodes and Cloud NAT, plus separate node pools: + +- `system-pool`: backend/worker/control plane pods +- `exec-pool`: execution workloads (tainted `shipsec.io/exec=true:NoSchedule`) + +```bash +cd infra/gcp/envs/prod +terraform init \ + -backend-config="bucket=shipsec-tfstate" \ + -backend-config="prefix=infra/gcp/prod" + +terraform apply \ + -var project_id=shipsec \ + -var region=us-central1 \ + -var cluster_name=shipsec-prod +``` + +Then fetch credentials: + +```bash +gcloud container clusters get-credentials shipsec-prod --region us-central1 --project shipsec +kubectl get nodes +``` + +## Notes + +- If your org policies require it, add a project `environment` tag. It's not required for GKE itself. +- This file intentionally does not include any credentials, service account keys, or secrets. + diff --git a/infra/gcp/README.md b/infra/gcp/README.md new file mode 100644 index 00000000..1a803b86 --- /dev/null +++ b/infra/gcp/README.md @@ -0,0 +1,55 @@ +# GCP Infra (Terraform/OpenTofu) + +This directory is intended for the **private** repo only. + +Goals: + +- Provision GCP infrastructure (network, GKE, Artifact Registry) with sane defaults. +- Keep app deployment (Helm) separate from infrastructure provisioning. +- Support a fast `dev` environment and a safer `prod` environment. + +## Layout + +- `infra/gcp/bootstrap/`: creates a GCS bucket for Terraform state (run once per project). +- `infra/gcp/envs/dev/`: fast dev cluster (zonal, public nodes by default). +- `infra/gcp/envs/prod/`: production-ready baseline (regional, private nodes, Cloud NAT, node pool split). + +## Prereqs + +- `gcloud` authenticated to the right project +- Application Default Credentials for Terraform/OpenTofu: + +```bash +gcloud auth application-default login +gcloud config set project shipsec +``` + +## Quickstart (recommended) + +1. Bootstrap state bucket: + +```bash +cd infra/gcp/bootstrap +terraform init +terraform apply -var project_id=shipsec -var region=us-central1 +``` + +2. Create `dev` cluster: + +```bash +cd infra/gcp/envs/dev +terraform init -backend-config="bucket=shipsec-tfstate" -backend-config="prefix=infra/gcp/dev" +terraform apply -var project_id=shipsec -var region=us-central1 -var zone=us-central1-a +``` + +3. Fetch kube credentials: + +```bash +gcloud container clusters get-credentials shipsec-dev --zone us-central1-a --project shipsec +kubectl get nodes +``` + +## Notes + +- `prod` uses private nodes and Cloud NAT by default. That is closer to real production, but costs more. +- Artifact Registry is created in the chosen region for pushing images. diff --git a/infra/gcp/bootstrap/main.tf b/infra/gcp/bootstrap/main.tf new file mode 100644 index 00000000..c947d884 --- /dev/null +++ b/infra/gcp/bootstrap/main.tf @@ -0,0 +1,24 @@ +resource "google_storage_bucket" "tfstate" { + name = var.state_bucket_name + location = var.region + uniform_bucket_level_access = true + force_destroy = false + + versioning { + enabled = true + } + + lifecycle_rule { + condition { + num_newer_versions = 20 + } + action { + type = "Delete" + } + } +} + +output "state_bucket_name" { + value = google_storage_bucket.tfstate.name +} + diff --git a/infra/gcp/bootstrap/variables.tf b/infra/gcp/bootstrap/variables.tf new file mode 100644 index 00000000..adb347f3 --- /dev/null +++ b/infra/gcp/bootstrap/variables.tf @@ -0,0 +1,16 @@ +variable "project_id" { + type = string + description = "GCP project id (e.g. shipsec)." +} + +variable "region" { + type = string + description = "GCP region (e.g. us-central1)." +} + +variable "state_bucket_name" { + type = string + description = "Globally unique GCS bucket name for Terraform state." + default = "shipsec-tfstate" +} + diff --git a/infra/gcp/bootstrap/versions.tf b/infra/gcp/bootstrap/versions.tf new file mode 100644 index 00000000..2bb8da98 --- /dev/null +++ b/infra/gcp/bootstrap/versions.tf @@ -0,0 +1,16 @@ +terraform { + required_version = ">= 1.6.0" + + required_providers { + google = { + source = "hashicorp/google" + version = ">= 5.20.0" + } + } +} + +provider "google" { + project = var.project_id + region = var.region +} + diff --git a/infra/gcp/envs/dev/main.tf b/infra/gcp/envs/dev/main.tf new file mode 100644 index 00000000..cb66f9a8 --- /dev/null +++ b/infra/gcp/envs/dev/main.tf @@ -0,0 +1,114 @@ +locals { + services = toset([ + "cloudresourcemanager.googleapis.com", + "serviceusage.googleapis.com", + "iam.googleapis.com", + "container.googleapis.com", + "artifactregistry.googleapis.com", + "secretmanager.googleapis.com", + ]) +} + +resource "google_project_service" "enabled" { + for_each = local.services + project = var.project_id + service = each.value + + disable_on_destroy = false +} + +resource "google_artifact_registry_repository" "docker" { + project = var.project_id + location = var.region + repository_id = var.artifact_repo_name + format = "DOCKER" + + depends_on = [google_project_service.enabled] +} + +resource "google_compute_network" "vpc" { + project = var.project_id + name = "${var.cluster_name}-vpc" + auto_create_subnetworks = false + + depends_on = [google_project_service.enabled] +} + +resource "google_compute_subnetwork" "subnet" { + project = var.project_id + region = var.region + name = "${var.cluster_name}-subnet" + network = google_compute_network.vpc.id + ip_cidr_range = "10.10.0.0/16" + + secondary_ip_range { + range_name = "pods" + ip_cidr_range = "10.20.0.0/16" + } + + secondary_ip_range { + range_name = "services" + ip_cidr_range = "10.30.0.0/20" + } +} + +resource "google_container_cluster" "gke" { + project = var.project_id + name = var.cluster_name + location = var.zone + + deletion_protection = false + remove_default_node_pool = true + initial_node_count = 1 + + release_channel { + channel = "REGULAR" + } + + network = google_compute_network.vpc.id + subnetwork = google_compute_subnetwork.subnet.id + + ip_allocation_policy { + cluster_secondary_range_name = "pods" + services_secondary_range_name = "services" + } + + workload_identity_config { + workload_pool = "${var.project_id}.svc.id.goog" + } + + depends_on = [google_project_service.enabled] +} + +resource "google_container_node_pool" "default_pool" { + project = var.project_id + name = "default-pool" + cluster = google_container_cluster.gke.name + location = var.zone + + initial_node_count = var.node_count + + node_config { + machine_type = var.node_machine_type + disk_type = "pd-balanced" + disk_size_gb = var.node_disk_gb + image_type = "COS_CONTAINERD" + + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } +} + +output "artifact_registry_repo" { + value = "${var.region}-docker.pkg.dev/${var.project_id}/${google_artifact_registry_repository.docker.repository_id}" +} + +output "cluster_location" { + value = var.zone +} + +output "cluster_name" { + value = google_container_cluster.gke.name +} + diff --git a/infra/gcp/envs/dev/variables.tf b/infra/gcp/envs/dev/variables.tf new file mode 100644 index 00000000..46f1848e --- /dev/null +++ b/infra/gcp/envs/dev/variables.tf @@ -0,0 +1,47 @@ +variable "project_id" { + type = string + description = "GCP project id (e.g. shipsec)." +} + +variable "region" { + type = string + description = "GCP region (e.g. us-central1)." + default = "us-central1" +} + +variable "zone" { + type = string + description = "GCP zone for a zonal dev cluster (e.g. us-central1-a)." + default = "us-central1-a" +} + +variable "cluster_name" { + type = string + description = "GKE cluster name." + default = "shipsec-dev" +} + +variable "artifact_repo_name" { + type = string + description = "Artifact Registry repo name (Docker)." + default = "shipsec-studio" +} + +variable "node_machine_type" { + type = string + description = "Machine type for dev nodes." + default = "e2-standard-4" +} + +variable "node_count" { + type = number + description = "Initial node count for the dev node pool." + default = 2 +} + +variable "node_disk_gb" { + type = number + description = "Boot disk size (GB)." + default = 100 +} + diff --git a/infra/gcp/envs/dev/versions.tf b/infra/gcp/envs/dev/versions.tf new file mode 100644 index 00000000..9a1b3ca0 --- /dev/null +++ b/infra/gcp/envs/dev/versions.tf @@ -0,0 +1,19 @@ +terraform { + required_version = ">= 1.6.0" + + backend "gcs" {} + + required_providers { + google = { + source = "hashicorp/google" + version = ">= 5.20.0" + } + } +} + +provider "google" { + project = var.project_id + region = var.region + zone = var.zone +} + diff --git a/infra/gcp/envs/prod/main.tf b/infra/gcp/envs/prod/main.tf new file mode 100644 index 00000000..01237a2c --- /dev/null +++ b/infra/gcp/envs/prod/main.tf @@ -0,0 +1,194 @@ +locals { + services = toset([ + "cloudresourcemanager.googleapis.com", + "serviceusage.googleapis.com", + "iam.googleapis.com", + "container.googleapis.com", + "artifactregistry.googleapis.com", + "secretmanager.googleapis.com", + "compute.googleapis.com", + ]) +} + +resource "google_project_service" "enabled" { + for_each = local.services + project = var.project_id + service = each.value + disable_on_destroy = false +} + +resource "google_artifact_registry_repository" "docker" { + project = var.project_id + location = var.region + repository_id = var.artifact_repo_name + format = "DOCKER" + + depends_on = [google_project_service.enabled] +} + +resource "google_compute_network" "vpc" { + project = var.project_id + name = "${var.cluster_name}-vpc" + auto_create_subnetworks = false + + depends_on = [google_project_service.enabled] +} + +resource "google_compute_subnetwork" "subnet" { + project = var.project_id + region = var.region + name = "${var.cluster_name}-subnet" + network = google_compute_network.vpc.id + ip_cidr_range = "10.110.0.0/16" + + private_ip_google_access = true + + secondary_ip_range { + range_name = "pods" + ip_cidr_range = "10.120.0.0/16" + } + + secondary_ip_range { + range_name = "services" + ip_cidr_range = "10.130.0.0/20" + } +} + +resource "google_compute_router" "router" { + project = var.project_id + region = var.region + name = "${var.cluster_name}-router" + network = google_compute_network.vpc.id +} + +resource "google_compute_router_nat" "nat" { + project = var.project_id + region = var.region + name = "${var.cluster_name}-nat" + router = google_compute_router.router.name + + nat_ip_allocate_option = "AUTO_ONLY" + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + + subnetwork { + name = google_compute_subnetwork.subnet.id + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + log_config { + enable = true + filter = "ERRORS_ONLY" + } +} + +resource "google_container_cluster" "gke" { + project = var.project_id + name = var.cluster_name + location = var.region + + deletion_protection = true + remove_default_node_pool = true + initial_node_count = 1 + + release_channel { + channel = "REGULAR" + } + + network = google_compute_network.vpc.id + subnetwork = google_compute_subnetwork.subnet.id + + ip_allocation_policy { + cluster_secondary_range_name = "pods" + services_secondary_range_name = "services" + } + + workload_identity_config { + workload_pool = "${var.project_id}.svc.id.goog" + } + + private_cluster_config { + enable_private_nodes = true + enable_private_endpoint = false + master_ipv4_cidr_block = "172.16.0.0/28" + } + + dynamic "master_authorized_networks_config" { + for_each = length(var.master_authorized_cidrs) > 0 ? [1] : [] + content { + dynamic "cidr_blocks" { + for_each = var.master_authorized_cidrs + content { + cidr_block = cidr_blocks.value.cidr_block + display_name = cidr_blocks.value.display_name + } + } + } + } + + depends_on = [google_project_service.enabled] +} + +resource "google_container_node_pool" "system" { + project = var.project_id + name = "system-pool" + cluster = google_container_cluster.gke.name + location = var.region + + autoscaling { + min_node_count = var.system_pool_min + max_node_count = var.system_pool_max + } + + node_config { + machine_type = var.system_pool_machine_type + disk_type = "pd-balanced" + disk_size_gb = var.node_disk_gb + image_type = "COS_CONTAINERD" + + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } +} + +resource "google_container_node_pool" "exec" { + project = var.project_id + name = "exec-pool" + cluster = google_container_cluster.gke.name + location = var.region + + autoscaling { + min_node_count = var.exec_pool_min + max_node_count = var.exec_pool_max + } + + node_config { + machine_type = var.exec_pool_machine_type + disk_type = "pd-balanced" + disk_size_gb = var.node_disk_gb + image_type = "COS_CONTAINERD" + + taint { + key = "shipsec.io/exec" + value = "true" + effect = "NO_SCHEDULE" + } + + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } +} + +output "artifact_registry_repo" { + value = "${var.region}-docker.pkg.dev/${var.project_id}/${google_artifact_registry_repository.docker.repository_id}" +} + +output "cluster_location" { + value = var.region +} + +output "cluster_name" { + value = google_container_cluster.gke.name +} + diff --git a/infra/gcp/envs/prod/variables.tf b/infra/gcp/envs/prod/variables.tf new file mode 100644 index 00000000..3a36d268 --- /dev/null +++ b/infra/gcp/envs/prod/variables.tf @@ -0,0 +1,74 @@ +variable "project_id" { + type = string + description = "GCP project id (e.g. shipsec)." +} + +variable "region" { + type = string + description = "GCP region for a regional prod cluster (e.g. us-central1)." + default = "us-central1" +} + +variable "cluster_name" { + type = string + description = "GKE cluster name." + default = "shipsec-prod" +} + +variable "artifact_repo_name" { + type = string + description = "Artifact Registry repo name (Docker)." + default = "shipsec-studio" +} + +variable "master_authorized_cidrs" { + type = list(object({ + cidr_block = string + display_name = string + })) + description = "CIDRs allowed to reach the control plane endpoint." + default = [] +} + +variable "system_pool_machine_type" { + type = string + description = "Machine type for the system node pool." + default = "e2-standard-4" +} + +variable "exec_pool_machine_type" { + type = string + description = "Machine type for the execution node pool." + default = "e2-standard-4" +} + +variable "system_pool_min" { + type = number + description = "Min nodes for system pool." + default = 2 +} + +variable "system_pool_max" { + type = number + description = "Max nodes for system pool." + default = 5 +} + +variable "exec_pool_min" { + type = number + description = "Min nodes for exec pool." + default = 1 +} + +variable "exec_pool_max" { + type = number + description = "Max nodes for exec pool." + default = 4 +} + +variable "node_disk_gb" { + type = number + description = "Boot disk size (GB)." + default = 100 +} + diff --git a/infra/gcp/envs/prod/versions.tf b/infra/gcp/envs/prod/versions.tf new file mode 100644 index 00000000..d7be8373 --- /dev/null +++ b/infra/gcp/envs/prod/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.6.0" + + backend "gcs" {} + + required_providers { + google = { + source = "hashicorp/google" + version = ">= 5.20.0" + } + } +} + +provider "google" { + project = var.project_id + region = var.region +} + From 8ebad704df2ee024323291be0499fd05118eaa06 Mon Sep 17 00:00:00 2001 From: betterclever Date: Mon, 9 Feb 2026 14:23:27 +0400 Subject: [PATCH 02/36] fix(infra): enable compute API for dev env Signed-off-by: betterclever Signed-off-by: betterclever --- infra/gcp/envs/dev/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infra/gcp/envs/dev/main.tf b/infra/gcp/envs/dev/main.tf index cb66f9a8..66a9291a 100644 --- a/infra/gcp/envs/dev/main.tf +++ b/infra/gcp/envs/dev/main.tf @@ -3,6 +3,7 @@ locals { "cloudresourcemanager.googleapis.com", "serviceusage.googleapis.com", "iam.googleapis.com", + "compute.googleapis.com", "container.googleapis.com", "artifactregistry.googleapis.com", "secretmanager.googleapis.com", @@ -111,4 +112,3 @@ output "cluster_location" { output "cluster_name" { value = google_container_cluster.gke.name } - From 4bfe7243eb34ebeac54d5242d893ecbf4e216c48 Mon Sep 17 00:00:00 2001 From: betterclever Date: Mon, 9 Feb 2026 14:25:14 +0400 Subject: [PATCH 03/36] feat(infra): allow token auth for Terraform Signed-off-by: betterclever Signed-off-by: betterclever --- infra/gcp/bootstrap/variables.tf | 8 +++++++- infra/gcp/bootstrap/versions.tf | 6 +++--- infra/gcp/envs/dev/variables.tf | 8 +++++++- infra/gcp/envs/dev/versions.tf | 8 ++++---- infra/gcp/envs/prod/variables.tf | 8 +++++++- infra/gcp/envs/prod/versions.tf | 6 +++--- 6 files changed, 31 insertions(+), 13 deletions(-) diff --git a/infra/gcp/bootstrap/variables.tf b/infra/gcp/bootstrap/variables.tf index adb347f3..af304801 100644 --- a/infra/gcp/bootstrap/variables.tf +++ b/infra/gcp/bootstrap/variables.tf @@ -8,9 +8,15 @@ variable "region" { description = "GCP region (e.g. us-central1)." } +variable "access_token" { + type = string + description = "Optional short-lived OAuth access token (bypasses ADC)." + default = null + sensitive = true +} + variable "state_bucket_name" { type = string description = "Globally unique GCS bucket name for Terraform state." default = "shipsec-tfstate" } - diff --git a/infra/gcp/bootstrap/versions.tf b/infra/gcp/bootstrap/versions.tf index 2bb8da98..661fa97d 100644 --- a/infra/gcp/bootstrap/versions.tf +++ b/infra/gcp/bootstrap/versions.tf @@ -10,7 +10,7 @@ terraform { } provider "google" { - project = var.project_id - region = var.region + project = var.project_id + region = var.region + access_token = var.access_token } - diff --git a/infra/gcp/envs/dev/variables.tf b/infra/gcp/envs/dev/variables.tf index 46f1848e..649b14fa 100644 --- a/infra/gcp/envs/dev/variables.tf +++ b/infra/gcp/envs/dev/variables.tf @@ -3,6 +3,13 @@ variable "project_id" { description = "GCP project id (e.g. shipsec)." } +variable "access_token" { + type = string + description = "Optional short-lived OAuth access token (bypasses ADC)." + default = null + sensitive = true +} + variable "region" { type = string description = "GCP region (e.g. us-central1)." @@ -44,4 +51,3 @@ variable "node_disk_gb" { description = "Boot disk size (GB)." default = 100 } - diff --git a/infra/gcp/envs/dev/versions.tf b/infra/gcp/envs/dev/versions.tf index 9a1b3ca0..8ccb902d 100644 --- a/infra/gcp/envs/dev/versions.tf +++ b/infra/gcp/envs/dev/versions.tf @@ -12,8 +12,8 @@ terraform { } provider "google" { - project = var.project_id - region = var.region - zone = var.zone + project = var.project_id + region = var.region + zone = var.zone + access_token = var.access_token } - diff --git a/infra/gcp/envs/prod/variables.tf b/infra/gcp/envs/prod/variables.tf index 3a36d268..d07f0347 100644 --- a/infra/gcp/envs/prod/variables.tf +++ b/infra/gcp/envs/prod/variables.tf @@ -3,6 +3,13 @@ variable "project_id" { description = "GCP project id (e.g. shipsec)." } +variable "access_token" { + type = string + description = "Optional short-lived OAuth access token (bypasses ADC)." + default = null + sensitive = true +} + variable "region" { type = string description = "GCP region for a regional prod cluster (e.g. us-central1)." @@ -71,4 +78,3 @@ variable "node_disk_gb" { description = "Boot disk size (GB)." default = 100 } - diff --git a/infra/gcp/envs/prod/versions.tf b/infra/gcp/envs/prod/versions.tf index d7be8373..81fde787 100644 --- a/infra/gcp/envs/prod/versions.tf +++ b/infra/gcp/envs/prod/versions.tf @@ -12,7 +12,7 @@ terraform { } provider "google" { - project = var.project_id - region = var.region + project = var.project_id + region = var.region + access_token = var.access_token } - From 4cd8a6bc39cdb7b3e9aae1d2243f5e667b23c6e8 Mon Sep 17 00:00:00 2001 From: betterclever Date: Mon, 9 Feb 2026 14:25:36 +0400 Subject: [PATCH 04/36] docs(infra): document token auth fallback Signed-off-by: betterclever Signed-off-by: betterclever --- infra/gcp/APPLY.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/infra/gcp/APPLY.md b/infra/gcp/APPLY.md index b072acda..28c6ff78 100644 --- a/infra/gcp/APPLY.md +++ b/infra/gcp/APPLY.md @@ -20,6 +20,14 @@ Verify: gcloud auth application-default print-access-token >/dev/null && echo adc:present ``` +### Non-interactive fallback (recommended in CI) + +If you can't use ADC (for example in headless sessions), you can use a short-lived token: + +```bash +export TF_VAR_access_token="$(gcloud auth print-access-token)" +``` + ## 1) Bootstrap Terraform state bucket (run once) Pick a globally unique bucket name, then: @@ -85,4 +93,3 @@ kubectl get nodes - If your org policies require it, add a project `environment` tag. It's not required for GKE itself. - This file intentionally does not include any credentials, service account keys, or secrets. - From 7f9a1aed3af2833c96b52d8ae24dca9eface7ecd Mon Sep 17 00:00:00 2001 From: betterclever Date: Mon, 9 Feb 2026 14:26:27 +0400 Subject: [PATCH 05/36] fix(infra): support terraform 1.5.x Signed-off-by: betterclever Signed-off-by: betterclever --- infra/gcp/bootstrap/versions.tf | 2 +- infra/gcp/envs/dev/versions.tf | 2 +- infra/gcp/envs/prod/versions.tf | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/infra/gcp/bootstrap/versions.tf b/infra/gcp/bootstrap/versions.tf index 661fa97d..adaf6f7d 100644 --- a/infra/gcp/bootstrap/versions.tf +++ b/infra/gcp/bootstrap/versions.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.6.0" + required_version = ">= 1.5.0" required_providers { google = { diff --git a/infra/gcp/envs/dev/versions.tf b/infra/gcp/envs/dev/versions.tf index 8ccb902d..3ca969c4 100644 --- a/infra/gcp/envs/dev/versions.tf +++ b/infra/gcp/envs/dev/versions.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.6.0" + required_version = ">= 1.5.0" backend "gcs" {} diff --git a/infra/gcp/envs/prod/versions.tf b/infra/gcp/envs/prod/versions.tf index 81fde787..0efbb7d9 100644 --- a/infra/gcp/envs/prod/versions.tf +++ b/infra/gcp/envs/prod/versions.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.6.0" + required_version = ">= 1.5.0" backend "gcs" {} From 6ef25fcb5bfddcbe8b4541bfd44771ab7c2f5bff Mon Sep 17 00:00:00 2001 From: betterclever Date: Mon, 9 Feb 2026 14:28:10 +0400 Subject: [PATCH 06/36] feat(infra): add dev-local env without remote state Signed-off-by: betterclever Signed-off-by: betterclever --- infra/gcp/APPLY.md | 16 ++++ infra/gcp/envs/dev-local/main.tf | 115 ++++++++++++++++++++++++++ infra/gcp/envs/dev-local/variables.tf | 54 ++++++++++++ infra/gcp/envs/dev-local/versions.tf | 18 ++++ 4 files changed, 203 insertions(+) create mode 100644 infra/gcp/envs/dev-local/main.tf create mode 100644 infra/gcp/envs/dev-local/variables.tf create mode 100644 infra/gcp/envs/dev-local/versions.tf diff --git a/infra/gcp/APPLY.md b/infra/gcp/APPLY.md index 28c6ff78..99e0f21e 100644 --- a/infra/gcp/APPLY.md +++ b/infra/gcp/APPLY.md @@ -56,6 +56,22 @@ terraform apply \ -var cluster_name=shipsec-dev ``` +### Dev environment (no remote state; works without ADC) + +If you don't have ADC configured but want to apply once, use `envs/dev-local`: + +```bash +export TF_VAR_access_token="$(gcloud auth print-access-token)" +cd infra/gcp/envs/dev-local +terraform init +terraform apply \ + -var project_id=shipsec \ + -var region=us-central1 \ + -var zone=us-central1-a \ + -var cluster_name=shipsec-dev-tf \ + -var node_count=1 +``` + Get credentials: ```bash diff --git a/infra/gcp/envs/dev-local/main.tf b/infra/gcp/envs/dev-local/main.tf new file mode 100644 index 00000000..bd955545 --- /dev/null +++ b/infra/gcp/envs/dev-local/main.tf @@ -0,0 +1,115 @@ +locals { + services = toset([ + "cloudresourcemanager.googleapis.com", + "serviceusage.googleapis.com", + "iam.googleapis.com", + "compute.googleapis.com", + "container.googleapis.com", + "artifactregistry.googleapis.com", + "secretmanager.googleapis.com", + ]) +} + +resource "google_project_service" "enabled" { + for_each = local.services + project = var.project_id + service = each.value + + disable_on_destroy = false +} + +resource "google_artifact_registry_repository" "docker" { + project = var.project_id + location = var.region + repository_id = var.artifact_repo_name + format = "DOCKER" + + depends_on = [google_project_service.enabled] +} + +resource "google_compute_network" "vpc" { + project = var.project_id + name = "${var.cluster_name}-vpc" + auto_create_subnetworks = false + + depends_on = [google_project_service.enabled] +} + +resource "google_compute_subnetwork" "subnet" { + project = var.project_id + region = var.region + name = "${var.cluster_name}-subnet" + network = google_compute_network.vpc.id + ip_cidr_range = "10.10.0.0/16" + + secondary_ip_range { + range_name = "pods" + ip_cidr_range = "10.20.0.0/16" + } + + secondary_ip_range { + range_name = "services" + ip_cidr_range = "10.30.0.0/20" + } +} + +resource "google_container_cluster" "gke" { + project = var.project_id + name = var.cluster_name + location = var.zone + + deletion_protection = false + remove_default_node_pool = true + initial_node_count = 1 + + release_channel { + channel = "REGULAR" + } + + network = google_compute_network.vpc.id + subnetwork = google_compute_subnetwork.subnet.id + + ip_allocation_policy { + cluster_secondary_range_name = "pods" + services_secondary_range_name = "services" + } + + workload_identity_config { + workload_pool = "${var.project_id}.svc.id.goog" + } + + depends_on = [google_project_service.enabled] +} + +resource "google_container_node_pool" "default_pool" { + project = var.project_id + name = "default-pool" + cluster = google_container_cluster.gke.name + location = var.zone + + initial_node_count = var.node_count + + node_config { + machine_type = var.node_machine_type + disk_type = "pd-balanced" + disk_size_gb = var.node_disk_gb + image_type = "COS_CONTAINERD" + + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } +} + +output "artifact_registry_repo" { + value = "${var.region}-docker.pkg.dev/${var.project_id}/${google_artifact_registry_repository.docker.repository_id}" +} + +output "cluster_location" { + value = var.zone +} + +output "cluster_name" { + value = google_container_cluster.gke.name +} + diff --git a/infra/gcp/envs/dev-local/variables.tf b/infra/gcp/envs/dev-local/variables.tf new file mode 100644 index 00000000..8227e7e6 --- /dev/null +++ b/infra/gcp/envs/dev-local/variables.tf @@ -0,0 +1,54 @@ +variable "project_id" { + type = string + description = "GCP project id (e.g. shipsec)." +} + +variable "access_token" { + type = string + description = "Optional short-lived OAuth access token (bypasses ADC)." + default = null + sensitive = true +} + +variable "region" { + type = string + description = "GCP region (e.g. us-central1)." + default = "us-central1" +} + +variable "zone" { + type = string + description = "GCP zone for a zonal dev cluster (e.g. us-central1-a)." + default = "us-central1-a" +} + +variable "cluster_name" { + type = string + description = "GKE cluster name." + default = "shipsec-dev" +} + +variable "artifact_repo_name" { + type = string + description = "Artifact Registry repo name (Docker)." + default = "shipsec-studio" +} + +variable "node_machine_type" { + type = string + description = "Machine type for dev nodes." + default = "e2-standard-4" +} + +variable "node_count" { + type = number + description = "Initial node count for the dev node pool." + default = 2 +} + +variable "node_disk_gb" { + type = number + description = "Boot disk size (GB)." + default = 100 +} + diff --git a/infra/gcp/envs/dev-local/versions.tf b/infra/gcp/envs/dev-local/versions.tf new file mode 100644 index 00000000..fdc002a6 --- /dev/null +++ b/infra/gcp/envs/dev-local/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.5.0" + + required_providers { + google = { + source = "hashicorp/google" + version = ">= 5.20.0" + } + } +} + +provider "google" { + project = var.project_id + region = var.region + zone = var.zone + access_token = var.access_token +} + From b1005f16c007b51aeb82c7f164c3de250a5a24c4 Mon Sep 17 00:00:00 2001 From: betterclever Date: Mon, 9 Feb 2026 14:44:08 +0400 Subject: [PATCH 07/36] chore(infra): commit terraform provider locks Signed-off-by: betterclever Signed-off-by: betterclever --- infra/gcp/bootstrap/.terraform.lock.hcl | 22 ++++++++++++++++++++ infra/gcp/envs/dev-local/.terraform.lock.hcl | 22 ++++++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 infra/gcp/bootstrap/.terraform.lock.hcl create mode 100644 infra/gcp/envs/dev-local/.terraform.lock.hcl diff --git a/infra/gcp/bootstrap/.terraform.lock.hcl b/infra/gcp/bootstrap/.terraform.lock.hcl new file mode 100644 index 00000000..18f20808 --- /dev/null +++ b/infra/gcp/bootstrap/.terraform.lock.hcl @@ -0,0 +1,22 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/google" { + version = "7.18.0" + constraints = ">= 5.20.0" + hashes = [ + "h1:Hqg6g5/5hFRK73xBE7ANAeuQbuw8ibuPrzXP7OOPxrk=", + "zh:041dc216f7352e36af65d4a6d4a38d24fec4c05193a4f4c8cf69138e29dc9421", + "zh:454c675e0487f011764eb0cd15d7b1e43d06a4e80ed056aeb4ad11df31368f81", + "zh:4e76c8a1e5645f1e2c258c8074d4e9ecfc1d6383d207d03f492df16da389a120", + "zh:60c96075fc082d9584b9cb8f48f0d23f90fd4344e6141a417580c6bad1b21957", + "zh:ad82cece07a0816153e3fc6cb6d7672c6c009742dc802ab434a83d0731d94ae7", + "zh:aebbf8a0bd3af0b6c705d5d85ec51891f533b83dcbae7249e64a252efc6fd862", + "zh:bfbb19a5b46950eaf0a83cea09a5992d1b0e96792130faeb6c733609dc2913df", + "zh:c196b4c82d0252fa751ee3cd84433bc483b7ce7d6fcab5db0413dbaa9f218650", + "zh:db1c83777bc6d7fc195be83712a9f503e9a5a1f7326fd6968d9812acc53f2056", + "zh:dcd58beeac9d1889e5532cfcd3bd8dec5568ab06b0a81427bc9b35931b6f0178", + "zh:eaeedc86c2d01630a3166ae98d5138e9bf9463b2a606d7f7df6d465d1501f28f", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} diff --git a/infra/gcp/envs/dev-local/.terraform.lock.hcl b/infra/gcp/envs/dev-local/.terraform.lock.hcl new file mode 100644 index 00000000..18f20808 --- /dev/null +++ b/infra/gcp/envs/dev-local/.terraform.lock.hcl @@ -0,0 +1,22 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/google" { + version = "7.18.0" + constraints = ">= 5.20.0" + hashes = [ + "h1:Hqg6g5/5hFRK73xBE7ANAeuQbuw8ibuPrzXP7OOPxrk=", + "zh:041dc216f7352e36af65d4a6d4a38d24fec4c05193a4f4c8cf69138e29dc9421", + "zh:454c675e0487f011764eb0cd15d7b1e43d06a4e80ed056aeb4ad11df31368f81", + "zh:4e76c8a1e5645f1e2c258c8074d4e9ecfc1d6383d207d03f492df16da389a120", + "zh:60c96075fc082d9584b9cb8f48f0d23f90fd4344e6141a417580c6bad1b21957", + "zh:ad82cece07a0816153e3fc6cb6d7672c6c009742dc802ab434a83d0731d94ae7", + "zh:aebbf8a0bd3af0b6c705d5d85ec51891f533b83dcbae7249e64a252efc6fd862", + "zh:bfbb19a5b46950eaf0a83cea09a5992d1b0e96792130faeb6c733609dc2913df", + "zh:c196b4c82d0252fa751ee3cd84433bc483b7ce7d6fcab5db0413dbaa9f218650", + "zh:db1c83777bc6d7fc195be83712a9f503e9a5a1f7326fd6968d9812acc53f2056", + "zh:dcd58beeac9d1889e5532cfcd3bd8dec5568ab06b0a81427bc9b35931b6f0178", + "zh:eaeedc86c2d01630a3166ae98d5138e9bf9463b2a606d7f7df6d465d1501f28f", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} From d4ac903e5d058f25cb2bbc5b5d8466643fc65c17 Mon Sep 17 00:00:00 2001 From: betterclever Date: Wed, 11 Feb 2026 13:14:20 +0400 Subject: [PATCH 08/36] fix(infra): adopt existing shipsec-dev cluster into terraform Destroyed the empty shipsec-dev-tf cluster and rewrote dev-local terraform to match the real shipsec-dev cluster on the default VPC. - Use data sources for default VPC/subnet instead of managed resources - Import existing GKE cluster, node pool, Artifact Registry, and APIs - Match actual oauth scopes (per-service, not cloud-platform) - Add lifecycle ignore for initial_node_count/node_config drift - Remove remove_default_node_pool to avoid accidental pool deletion - Update provider lock to google 7.19.0 Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever --- infra/gcp/envs/dev-local/.terraform.lock.hcl | 26 ++++----- infra/gcp/envs/dev-local/main.tf | 58 ++++++++++---------- 2 files changed, 42 insertions(+), 42 deletions(-) diff --git a/infra/gcp/envs/dev-local/.terraform.lock.hcl b/infra/gcp/envs/dev-local/.terraform.lock.hcl index 18f20808..cf2b1697 100644 --- a/infra/gcp/envs/dev-local/.terraform.lock.hcl +++ b/infra/gcp/envs/dev-local/.terraform.lock.hcl @@ -2,21 +2,21 @@ # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/google" { - version = "7.18.0" + version = "7.19.0" constraints = ">= 5.20.0" hashes = [ - "h1:Hqg6g5/5hFRK73xBE7ANAeuQbuw8ibuPrzXP7OOPxrk=", - "zh:041dc216f7352e36af65d4a6d4a38d24fec4c05193a4f4c8cf69138e29dc9421", - "zh:454c675e0487f011764eb0cd15d7b1e43d06a4e80ed056aeb4ad11df31368f81", - "zh:4e76c8a1e5645f1e2c258c8074d4e9ecfc1d6383d207d03f492df16da389a120", - "zh:60c96075fc082d9584b9cb8f48f0d23f90fd4344e6141a417580c6bad1b21957", - "zh:ad82cece07a0816153e3fc6cb6d7672c6c009742dc802ab434a83d0731d94ae7", - "zh:aebbf8a0bd3af0b6c705d5d85ec51891f533b83dcbae7249e64a252efc6fd862", - "zh:bfbb19a5b46950eaf0a83cea09a5992d1b0e96792130faeb6c733609dc2913df", - "zh:c196b4c82d0252fa751ee3cd84433bc483b7ce7d6fcab5db0413dbaa9f218650", - "zh:db1c83777bc6d7fc195be83712a9f503e9a5a1f7326fd6968d9812acc53f2056", - "zh:dcd58beeac9d1889e5532cfcd3bd8dec5568ab06b0a81427bc9b35931b6f0178", - "zh:eaeedc86c2d01630a3166ae98d5138e9bf9463b2a606d7f7df6d465d1501f28f", + "h1:fsiBePQ2WgTqyORF6Klc/GDgV8JHUTMJALf6V4xJU3A=", + "zh:06da157d858384b2383414447c1bf6cf319ad72ea87d7030c6ca18b9bb774f73", + "zh:2f1d7c3461a6b59ffcf0eed2f3764e2f0a2c70464927e561d968d82112e3600d", + "zh:4705ce487e6b2c52376e1f9bc0dc650e8326ab3e20d0673c9fed62e1313d2d67", + "zh:5cd9a4ee36d3d7ffbabb90c83cb7cce54cf0f10c912db4be7492ebc1a78611b3", + "zh:688622dbac98fe95115518ff3d9324cf71ffdf124ca6e66b2269f43d9f8e7ceb", + "zh:7a5c07ae0728c7a57a63d848411c91550fd3bfe662f60821b50d3370be360134", + "zh:8a6472dec8082d7225a811c8ee0bf550c7a9c36e86cfd19b10363106f2dfbb80", + "zh:8e11d4c27e70500aaa1335cb721ad64c4b0e41b3c7398d6fe58a3d92f10ea213", + "zh:9a119c27e27bad73cdd8c0544f8a68a84bdac3de0129f13a87a6890ed19c6035", + "zh:dd12460d2b8b4497b5a7c46bb486ace9859d2fc642782989df315e618596d1e4", "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:fc35c660777b377978e5f2d008db6181ff2f98777cdd215effc11d665e99e0bc", ] } diff --git a/infra/gcp/envs/dev-local/main.tf b/infra/gcp/envs/dev-local/main.tf index bd955545..deb2238b 100644 --- a/infra/gcp/envs/dev-local/main.tf +++ b/infra/gcp/envs/dev-local/main.tf @@ -1,3 +1,9 @@ +# -------------------------------------------------------------------------- +# Adopt the existing shipsec-dev GKE cluster into Terraform. +# The cluster was created imperatively on the default VPC, so we reference +# the network/subnet as data sources rather than managing them. +# -------------------------------------------------------------------------- + locals { services = toset([ "cloudresourcemanager.googleapis.com", @@ -27,30 +33,16 @@ resource "google_artifact_registry_repository" "docker" { depends_on = [google_project_service.enabled] } -resource "google_compute_network" "vpc" { - project = var.project_id - name = "${var.cluster_name}-vpc" - auto_create_subnetworks = false - - depends_on = [google_project_service.enabled] +# The cluster lives on the default VPC — we don't manage it, just reference it. +data "google_compute_network" "default" { + project = var.project_id + name = "default" } -resource "google_compute_subnetwork" "subnet" { - project = var.project_id - region = var.region - name = "${var.cluster_name}-subnet" - network = google_compute_network.vpc.id - ip_cidr_range = "10.10.0.0/16" - - secondary_ip_range { - range_name = "pods" - ip_cidr_range = "10.20.0.0/16" - } - - secondary_ip_range { - range_name = "services" - ip_cidr_range = "10.30.0.0/20" - } +data "google_compute_subnetwork" "default" { + project = var.project_id + region = var.region + name = "default" } resource "google_container_cluster" "gke" { @@ -59,25 +51,29 @@ resource "google_container_cluster" "gke" { location = var.zone deletion_protection = false - remove_default_node_pool = true initial_node_count = 1 release_channel { channel = "REGULAR" } - network = google_compute_network.vpc.id - subnetwork = google_compute_subnetwork.subnet.id + network = data.google_compute_network.default.id + subnetwork = data.google_compute_subnetwork.default.id ip_allocation_policy { - cluster_secondary_range_name = "pods" - services_secondary_range_name = "services" + cluster_secondary_range_name = "gke-shipsec-dev-pods-0a61f82c" } workload_identity_config { workload_pool = "${var.project_id}.svc.id.goog" } + # initial_node_count drifts to 0 after remove_default_node_pool removes it. + # node_config/node_pool are managed by the separate google_container_node_pool resource. + lifecycle { + ignore_changes = [initial_node_count, node_config, node_pool] + } + depends_on = [google_project_service.enabled] } @@ -96,7 +92,12 @@ resource "google_container_node_pool" "default_pool" { image_type = "COS_CONTAINERD" oauth_scopes = [ - "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/service.management.readonly", + "https://www.googleapis.com/auth/servicecontrol", + "https://www.googleapis.com/auth/trace.append", ] } } @@ -112,4 +113,3 @@ output "cluster_location" { output "cluster_name" { value = google_container_cluster.gke.name } - From 95775c4d0493162516a7422cd939a7556e9c364d Mon Sep 17 00:00:00 2001 From: betterclever Date: Wed, 11 Feb 2026 13:23:50 +0400 Subject: [PATCH 09/36] feat(deploy): add Helm charts and deploy scripts from production-architecture Import the full deploy/ directory from feature/production-architecture: Helm charts: - shipsec-infra: postgres, redis, temporal, minio, redpanda, loki - shipsec: backend, worker, frontend, dind with service configs Values overlays for: GKE dev, VPS, local orbstack, cloud-generic Deploy scripts for: GCP (install + smoke), VPS, orbstack Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever --- deploy/README.md | 31 ++++ deploy/helm/shipsec-infra/Chart.yaml | 7 + .../helm/shipsec-infra/templates/_helpers.tpl | 8 ++ deploy/helm/shipsec-infra/templates/loki.yaml | 56 ++++++++ .../helm/shipsec-infra/templates/minio.yaml | 95 ++++++++++++ .../shipsec-infra/templates/postgres.yaml | 109 ++++++++++++++ .../helm/shipsec-infra/templates/redis.yaml | 53 +++++++ .../shipsec-infra/templates/redpanda.yaml | 90 ++++++++++++ .../shipsec-infra/templates/temporal.yaml | 131 +++++++++++++++++ deploy/helm/shipsec-infra/values.yaml | 47 ++++++ .../shipsec-infra/values/cloud-generic.yaml | 18 +++ deploy/helm/shipsec-infra/values/gke-dev.yaml | 14 ++ .../shipsec-infra/values/local-orbstack.yaml | 14 ++ deploy/helm/shipsec-infra/values/vps.yaml | 11 ++ deploy/helm/shipsec/Chart.yaml | 7 + deploy/helm/shipsec/templates/_helpers.tpl | 16 +++ .../shipsec/templates/app-secret.local.yaml | 31 ++++ .../shipsec/templates/backend-deployment.yaml | 63 ++++++++ .../shipsec/templates/backend-service.yaml | 20 +++ .../shipsec/templates/dind-deployment.yaml | 48 +++++++ deploy/helm/shipsec/templates/dind-pvc.yaml | 17 +++ .../helm/shipsec/templates/dind-service.yaml | 20 +++ .../templates/frontend-deployment.yaml | 44 ++++++ .../shipsec/templates/frontend-service.yaml | 20 +++ .../shipsec/templates/worker-deployment.yaml | 53 +++++++ deploy/helm/shipsec/values.yaml | 110 ++++++++++++++ deploy/helm/shipsec/values/cloud-generic.yaml | 16 +++ deploy/helm/shipsec/values/dind.yaml | 5 + deploy/helm/shipsec/values/gke-dev.yaml | 14 ++ .../helm/shipsec/values/local-orbstack.yaml | 16 +++ deploy/helm/shipsec/values/no-dind.yaml | 5 + deploy/helm/shipsec/values/vps.yaml | 15 ++ deploy/scripts/gcp/README.md | 47 ++++++ deploy/scripts/gcp/install.sh | 135 ++++++++++++++++++ deploy/scripts/gcp/smoke.sh | 29 ++++ deploy/scripts/orbstack/install.sh | 26 ++++ deploy/scripts/orbstack/smoke.sh | 16 +++ deploy/scripts/orbstack/uninstall.sh | 13 ++ deploy/scripts/vps/install.sh | 96 +++++++++++++ deploy/scripts/vps/smoke.sh | 25 ++++ 40 files changed, 1591 insertions(+) create mode 100644 deploy/README.md create mode 100644 deploy/helm/shipsec-infra/Chart.yaml create mode 100644 deploy/helm/shipsec-infra/templates/_helpers.tpl create mode 100644 deploy/helm/shipsec-infra/templates/loki.yaml create mode 100644 deploy/helm/shipsec-infra/templates/minio.yaml create mode 100644 deploy/helm/shipsec-infra/templates/postgres.yaml create mode 100644 deploy/helm/shipsec-infra/templates/redis.yaml create mode 100644 deploy/helm/shipsec-infra/templates/redpanda.yaml create mode 100644 deploy/helm/shipsec-infra/templates/temporal.yaml create mode 100644 deploy/helm/shipsec-infra/values.yaml create mode 100644 deploy/helm/shipsec-infra/values/cloud-generic.yaml create mode 100644 deploy/helm/shipsec-infra/values/gke-dev.yaml create mode 100644 deploy/helm/shipsec-infra/values/local-orbstack.yaml create mode 100644 deploy/helm/shipsec-infra/values/vps.yaml create mode 100644 deploy/helm/shipsec/Chart.yaml create mode 100644 deploy/helm/shipsec/templates/_helpers.tpl create mode 100644 deploy/helm/shipsec/templates/app-secret.local.yaml create mode 100644 deploy/helm/shipsec/templates/backend-deployment.yaml create mode 100644 deploy/helm/shipsec/templates/backend-service.yaml create mode 100644 deploy/helm/shipsec/templates/dind-deployment.yaml create mode 100644 deploy/helm/shipsec/templates/dind-pvc.yaml create mode 100644 deploy/helm/shipsec/templates/dind-service.yaml create mode 100644 deploy/helm/shipsec/templates/frontend-deployment.yaml create mode 100644 deploy/helm/shipsec/templates/frontend-service.yaml create mode 100644 deploy/helm/shipsec/templates/worker-deployment.yaml create mode 100644 deploy/helm/shipsec/values.yaml create mode 100644 deploy/helm/shipsec/values/cloud-generic.yaml create mode 100644 deploy/helm/shipsec/values/dind.yaml create mode 100644 deploy/helm/shipsec/values/gke-dev.yaml create mode 100644 deploy/helm/shipsec/values/local-orbstack.yaml create mode 100644 deploy/helm/shipsec/values/no-dind.yaml create mode 100644 deploy/helm/shipsec/values/vps.yaml create mode 100644 deploy/scripts/gcp/README.md create mode 100755 deploy/scripts/gcp/install.sh create mode 100755 deploy/scripts/gcp/smoke.sh create mode 100755 deploy/scripts/orbstack/install.sh create mode 100755 deploy/scripts/orbstack/smoke.sh create mode 100755 deploy/scripts/orbstack/uninstall.sh create mode 100755 deploy/scripts/vps/install.sh create mode 100755 deploy/scripts/vps/smoke.sh diff --git a/deploy/README.md b/deploy/README.md new file mode 100644 index 00000000..4e06e95c --- /dev/null +++ b/deploy/README.md @@ -0,0 +1,31 @@ +# Kubernetes Deployment (Local First) + +This folder contains the first draft Helm charts to run ShipSec Studio on Kubernetes. + +Primary target for this draft: + +- Local Kubernetes on OrbStack +- DinD enabled (temporary) so docker-based components can run via `DOCKER_HOST` + +## Quick Start (OrbStack) + +1. Ensure OrbStack Kubernetes is running. +2. Run: + +```bash +./deploy/scripts/orbstack/install.sh +./deploy/scripts/orbstack/smoke.sh +``` + +## Access (Local Defaults) + +- Backend: `http://localhost:3211/health` +- Frontend: `http://localhost:8090` +- Temporal UI: `http://localhost:8081` +- MinIO Console: `http://localhost:9001` + +## Notes + +- This draft uses `temporalio/auto-setup` for local/dev parity with `docker/docker-compose.full.yml`. Do not treat this as a production Temporal deployment. +- DinD is enabled only to match the current execution model. It is not a production security model. + diff --git a/deploy/helm/shipsec-infra/Chart.yaml b/deploy/helm/shipsec-infra/Chart.yaml new file mode 100644 index 00000000..2a36505a --- /dev/null +++ b/deploy/helm/shipsec-infra/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: shipsec-infra +description: Local/dev infrastructure dependencies for ShipSec Studio +type: application +version: 0.1.0 +appVersion: "0.1.0" + diff --git a/deploy/helm/shipsec-infra/templates/_helpers.tpl b/deploy/helm/shipsec-infra/templates/_helpers.tpl new file mode 100644 index 00000000..e0a8c0ff --- /dev/null +++ b/deploy/helm/shipsec-infra/templates/_helpers.tpl @@ -0,0 +1,8 @@ +{{- define "shipsec-infra.labels" -}} +app.kubernetes.io/name: shipsec-infra +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" }} +{{- end -}} + diff --git a/deploy/helm/shipsec-infra/templates/loki.yaml b/deploy/helm/shipsec-infra/templates/loki.yaml new file mode 100644 index 00000000..51913cea --- /dev/null +++ b/deploy/helm/shipsec-infra/templates/loki.yaml @@ -0,0 +1,56 @@ +{{- if .Values.loki.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: shipsec-loki + namespace: {{ .Values.global.namespace }} + labels: + {{- include "shipsec-infra.labels" . | nindent 4 }} + app.kubernetes.io/component: loki +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: loki + template: + metadata: + labels: + app.kubernetes.io/component: loki + spec: + containers: + - name: loki + image: {{ .Values.loki.image | quote }} + args: ["-config.file=/etc/loki/local-config.yaml"] + ports: + - name: http + containerPort: 3100 + readinessProbe: + httpGet: + path: /ready + port: http + initialDelaySeconds: 10 + periodSeconds: 10 + livenessProbe: + httpGet: + path: /ready + port: http + initialDelaySeconds: 30 + periodSeconds: 20 +--- +apiVersion: v1 +kind: Service +metadata: + name: shipsec-loki + namespace: {{ .Values.global.namespace }} + labels: + {{- include "shipsec-infra.labels" . | nindent 4 }} +spec: + type: ClusterIP + ports: + - name: http + port: 3100 + targetPort: http + selector: + app.kubernetes.io/component: loki +{{- end }} + diff --git a/deploy/helm/shipsec-infra/templates/minio.yaml b/deploy/helm/shipsec-infra/templates/minio.yaml new file mode 100644 index 00000000..65eb8d75 --- /dev/null +++ b/deploy/helm/shipsec-infra/templates/minio.yaml @@ -0,0 +1,95 @@ +{{- if .Values.minio.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: shipsec-minio-secret + namespace: {{ .Values.global.namespace }} + labels: + {{- include "shipsec-infra.labels" . | nindent 4 }} +type: Opaque +stringData: + MINIO_ROOT_USER: {{ .Values.minio.rootUser | quote }} + MINIO_ROOT_PASSWORD: {{ .Values.minio.rootPassword | quote }} +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: shipsec-minio + namespace: {{ .Values.global.namespace }} + labels: + {{- include "shipsec-infra.labels" . | nindent 4 }} + app.kubernetes.io/component: minio +spec: + serviceName: shipsec-minio + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: minio + template: + metadata: + labels: + app.kubernetes.io/component: minio + spec: + containers: + - name: minio + image: {{ .Values.minio.image | quote }} + args: ["server", "/data", "--console-address", ":9001"] + envFrom: + - secretRef: + name: shipsec-minio-secret + ports: + - name: api + containerPort: 9000 + - name: console + containerPort: 9001 + readinessProbe: + httpGet: + path: /minio/health/ready + port: api + initialDelaySeconds: 5 + periodSeconds: 10 + livenessProbe: + httpGet: + path: /minio/health/live + port: api + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: data + mountPath: /data + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: {{ .Values.minio.persistence.size }} +--- +apiVersion: v1 +kind: Service +metadata: + name: shipsec-minio + namespace: {{ .Values.global.namespace }} + labels: + {{- include "shipsec-infra.labels" . | nindent 4 }} +spec: + {{- $svcType := "ClusterIP" -}} + {{- $apiPort := 9000 -}} + {{- $consolePort := 9001 -}} + {{- with .Values.minio.service -}} + {{- if .type }}{{- $svcType = .type }}{{- end -}} + {{- if .apiPort }}{{- $apiPort = .apiPort }}{{- end -}} + {{- if .consolePort }}{{- $consolePort = .consolePort }}{{- end -}} + {{- end }} + type: {{ $svcType }} + ports: + - name: api + port: {{ $apiPort }} + targetPort: api + - name: console + port: {{ $consolePort }} + targetPort: console + selector: + app.kubernetes.io/component: minio +{{- end }} diff --git a/deploy/helm/shipsec-infra/templates/postgres.yaml b/deploy/helm/shipsec-infra/templates/postgres.yaml new file mode 100644 index 00000000..dac8c19a --- /dev/null +++ b/deploy/helm/shipsec-infra/templates/postgres.yaml @@ -0,0 +1,109 @@ +{{- if .Values.postgres.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: shipsec-postgres-secret + namespace: {{ .Values.global.namespace }} + labels: + {{- include "shipsec-infra.labels" . | nindent 4 }} +type: Opaque +stringData: + POSTGRES_USER: {{ .Values.postgres.user | quote }} + POSTGRES_PASSWORD: {{ .Values.postgres.password | quote }} + POSTGRES_DB: {{ .Values.postgres.database | quote }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: shipsec-postgres-initdb + namespace: {{ .Values.global.namespace }} + labels: + {{- include "shipsec-infra.labels" . | nindent 4 }} +data: + create-temporal-db.sh: | + #!/bin/bash + set -e + psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL + CREATE DATABASE temporal; + GRANT ALL PRIVILEGES ON DATABASE temporal TO $POSTGRES_USER; + EOSQL +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: shipsec-postgres + namespace: {{ .Values.global.namespace }} + labels: + {{- include "shipsec-infra.labels" . | nindent 4 }} + app.kubernetes.io/component: postgres +spec: + serviceName: shipsec-postgres + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: postgres + template: + metadata: + labels: + app.kubernetes.io/component: postgres + spec: + containers: + - name: postgres + image: {{ .Values.postgres.image | quote }} + ports: + - name: postgres + containerPort: 5432 + env: + # GKE (and some other environments) mount ext4 volumes with a `lost+found` + # directory at the volume root. Postgres initdb fails if the data dir is + # not empty, so we point PGDATA at a subdirectory. + - name: PGDATA + value: /var/lib/postgresql/data/pgdata + envFrom: + - secretRef: + name: shipsec-postgres-secret + volumeMounts: + - name: data + mountPath: /var/lib/postgresql/data + - name: initdb + mountPath: /docker-entrypoint-initdb.d + readinessProbe: + exec: + command: ["sh", "-c", "pg_isready -U $POSTGRES_USER"] + initialDelaySeconds: 5 + periodSeconds: 5 + livenessProbe: + exec: + command: ["sh", "-c", "pg_isready -U $POSTGRES_USER"] + initialDelaySeconds: 15 + periodSeconds: 10 + volumes: + - name: initdb + configMap: + name: shipsec-postgres-initdb + defaultMode: 0755 + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: {{ .Values.postgres.persistence.size }} +--- +apiVersion: v1 +kind: Service +metadata: + name: shipsec-postgres + namespace: {{ .Values.global.namespace }} + labels: + {{- include "shipsec-infra.labels" . | nindent 4 }} +spec: + type: ClusterIP + ports: + - name: postgres + port: 5432 + targetPort: postgres + selector: + app.kubernetes.io/component: postgres +{{- end }} diff --git a/deploy/helm/shipsec-infra/templates/redis.yaml b/deploy/helm/shipsec-infra/templates/redis.yaml new file mode 100644 index 00000000..148e490e --- /dev/null +++ b/deploy/helm/shipsec-infra/templates/redis.yaml @@ -0,0 +1,53 @@ +{{- if .Values.redis.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: shipsec-redis + namespace: {{ .Values.global.namespace }} + labels: + {{- include "shipsec-infra.labels" . | nindent 4 }} + app.kubernetes.io/component: redis +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: redis + template: + metadata: + labels: + app.kubernetes.io/component: redis + spec: + containers: + - name: redis + image: {{ .Values.redis.image | quote }} + ports: + - name: redis + containerPort: 6379 + readinessProbe: + exec: + command: ["redis-cli", "ping"] + initialDelaySeconds: 5 + periodSeconds: 5 + livenessProbe: + exec: + command: ["redis-cli", "ping"] + initialDelaySeconds: 15 + periodSeconds: 10 +--- +apiVersion: v1 +kind: Service +metadata: + name: shipsec-redis + namespace: {{ .Values.global.namespace }} + labels: + {{- include "shipsec-infra.labels" . | nindent 4 }} +spec: + type: ClusterIP + ports: + - name: redis + port: 6379 + targetPort: redis + selector: + app.kubernetes.io/component: redis +{{- end }} + diff --git a/deploy/helm/shipsec-infra/templates/redpanda.yaml b/deploy/helm/shipsec-infra/templates/redpanda.yaml new file mode 100644 index 00000000..ccb694fc --- /dev/null +++ b/deploy/helm/shipsec-infra/templates/redpanda.yaml @@ -0,0 +1,90 @@ +{{- if .Values.redpanda.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: shipsec-redpanda + namespace: {{ .Values.global.namespace }} + labels: + {{- include "shipsec-infra.labels" . | nindent 4 }} + app.kubernetes.io/component: redpanda +spec: + serviceName: shipsec-redpanda + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: redpanda + template: + metadata: + labels: + app.kubernetes.io/component: redpanda + spec: + initContainers: + - name: volume-permissions + image: busybox:1.36 + command: ["sh", "-c", "chown -R 101:101 /var/lib/redpanda/data"] + securityContext: + runAsUser: 0 + volumeMounts: + - name: data + mountPath: /var/lib/redpanda/data + containers: + - name: redpanda + image: {{ .Values.redpanda.image | quote }} + args: + - redpanda + - start + - --mode=dev-container + - --smp=1 + - --reserve-memory=0M + - --overprovisioned + - --node-id=0 + - --check=false + - --advertise-kafka-addr=shipsec-redpanda:9092 + ports: + - name: kafka + containerPort: 9092 + - name: admin + containerPort: 9644 + readinessProbe: + httpGet: + path: /v1/status/ready + port: admin + initialDelaySeconds: 10 + periodSeconds: 10 + livenessProbe: + httpGet: + path: /v1/status/ready + port: admin + initialDelaySeconds: 30 + periodSeconds: 20 + volumeMounts: + - name: data + mountPath: /var/lib/redpanda/data + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: {{ .Values.redpanda.persistence.size }} +--- +apiVersion: v1 +kind: Service +metadata: + name: shipsec-redpanda + namespace: {{ .Values.global.namespace }} + labels: + {{- include "shipsec-infra.labels" . | nindent 4 }} +spec: + type: ClusterIP + ports: + - name: kafka + port: 9092 + targetPort: kafka + - name: admin + port: 9644 + targetPort: admin + selector: + app.kubernetes.io/component: redpanda +{{- end }} diff --git a/deploy/helm/shipsec-infra/templates/temporal.yaml b/deploy/helm/shipsec-infra/templates/temporal.yaml new file mode 100644 index 00000000..f38d3edf --- /dev/null +++ b/deploy/helm/shipsec-infra/templates/temporal.yaml @@ -0,0 +1,131 @@ +{{- if .Values.temporal.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: shipsec-temporal + namespace: {{ .Values.global.namespace }} + labels: + {{- include "shipsec-infra.labels" . | nindent 4 }} + app.kubernetes.io/component: temporal +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: temporal + template: + metadata: + labels: + app.kubernetes.io/component: temporal + spec: + containers: + - name: temporal + image: {{ .Values.temporal.image | quote }} + ports: + - name: grpc + containerPort: 7233 + env: + - name: DB + value: postgres12 + - name: DB_PORT + value: "5432" + - name: DB_NAME + value: temporal + - name: POSTGRES_USER + value: {{ .Values.postgres.user | quote }} + - name: POSTGRES_PWD + value: {{ .Values.postgres.password | quote }} + - name: POSTGRES_SEEDS + value: shipsec-postgres + - name: AUTO_SETUP + value: "true" + readinessProbe: + tcpSocket: + port: grpc + initialDelaySeconds: 20 + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: grpc + initialDelaySeconds: 60 + periodSeconds: 20 +--- +apiVersion: v1 +kind: Service +metadata: + name: shipsec-temporal + namespace: {{ .Values.global.namespace }} + labels: + {{- include "shipsec-infra.labels" . | nindent 4 }} +spec: + type: ClusterIP + ports: + - name: grpc + port: 7233 + targetPort: grpc + selector: + app.kubernetes.io/component: temporal +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: shipsec-temporal-ui + namespace: {{ .Values.global.namespace }} + labels: + {{- include "shipsec-infra.labels" . | nindent 4 }} + app.kubernetes.io/component: temporal-ui +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: temporal-ui + template: + metadata: + labels: + app.kubernetes.io/component: temporal-ui + spec: + containers: + - name: temporal-ui + image: {{ .Values.temporal.uiImage | quote }} + ports: + - name: http + containerPort: 8080 + env: + - name: TEMPORAL_ADDRESS + value: shipsec-temporal:7233 + - name: TEMPORAL_NAMESPACE + value: default + readinessProbe: + httpGet: + path: / + port: http + initialDelaySeconds: 10 + periodSeconds: 10 + livenessProbe: + httpGet: + path: / + port: http + initialDelaySeconds: 30 + periodSeconds: 20 +--- +apiVersion: v1 +kind: Service +metadata: + name: shipsec-temporal-ui + namespace: {{ .Values.global.namespace }} + labels: + {{- include "shipsec-infra.labels" . | nindent 4 }} +spec: + {{- $uiSvcType := "ClusterIP" -}} + {{- $uiSvcPort := 8080 -}} + {{- with .Values.temporal.uiService -}} + {{- if .type }}{{- $uiSvcType = .type }}{{- end -}} + {{- if .port }}{{- $uiSvcPort = .port }}{{- end -}} + {{- end }} + type: {{ $uiSvcType }} + ports: + - name: http + port: {{ $uiSvcPort }} + targetPort: http + selector: + app.kubernetes.io/component: temporal-ui +{{- end }} diff --git a/deploy/helm/shipsec-infra/values.yaml b/deploy/helm/shipsec-infra/values.yaml new file mode 100644 index 00000000..e4ff7429 --- /dev/null +++ b/deploy/helm/shipsec-infra/values.yaml @@ -0,0 +1,47 @@ +global: + namespace: shipsec-system + +postgres: + enabled: true + image: postgres:16-alpine + user: shipsec + password: shipsec + database: shipsec + persistence: + enabled: true + size: 5Gi + +redis: + enabled: true + image: redis:7-alpine + persistence: + enabled: false + size: 1Gi + +minio: + enabled: true + image: minio/minio:RELEASE.2024-10-02T17-50-41Z + rootUser: minioadmin + rootPassword: minioadmin + persistence: + enabled: true + size: 10Gi + +temporal: + enabled: true + image: temporalio/auto-setup:latest + uiImage: temporalio/ui:latest + +redpanda: + enabled: true + image: redpandadata/redpanda:v24.2.5 + persistence: + enabled: true + size: 5Gi + +loki: + enabled: false + image: grafana/loki:3.2.1 + persistence: + enabled: false + size: 5Gi diff --git a/deploy/helm/shipsec-infra/values/cloud-generic.yaml b/deploy/helm/shipsec-infra/values/cloud-generic.yaml new file mode 100644 index 00000000..159c2797 --- /dev/null +++ b/deploy/helm/shipsec-infra/values/cloud-generic.yaml @@ -0,0 +1,18 @@ +postgres: + enabled: false + +redis: + enabled: false + +minio: + enabled: false + +temporal: + enabled: false + +redpanda: + enabled: false + +loki: + enabled: false + diff --git a/deploy/helm/shipsec-infra/values/gke-dev.yaml b/deploy/helm/shipsec-infra/values/gke-dev.yaml new file mode 100644 index 00000000..e455bdb8 --- /dev/null +++ b/deploy/helm/shipsec-infra/values/gke-dev.yaml @@ -0,0 +1,14 @@ +global: + namespace: shipsec-system + +# Keep infra in-cluster for the first GKE pass to move fast. +# Move to managed services (Cloud SQL, Memorystore, GCS) later. + +minio: + service: + type: ClusterIP + +temporal: + uiService: + type: ClusterIP + diff --git a/deploy/helm/shipsec-infra/values/local-orbstack.yaml b/deploy/helm/shipsec-infra/values/local-orbstack.yaml new file mode 100644 index 00000000..400d8480 --- /dev/null +++ b/deploy/helm/shipsec-infra/values/local-orbstack.yaml @@ -0,0 +1,14 @@ +global: + namespace: shipsec-system + +temporal: + uiService: + type: LoadBalancer + port: 8081 + +minio: + service: + type: LoadBalancer + apiPort: 9000 + consolePort: 9001 + diff --git a/deploy/helm/shipsec-infra/values/vps.yaml b/deploy/helm/shipsec-infra/values/vps.yaml new file mode 100644 index 00000000..9a207973 --- /dev/null +++ b/deploy/helm/shipsec-infra/values/vps.yaml @@ -0,0 +1,11 @@ +global: + namespace: shipsec-system + +minio: + service: + type: ClusterIP + +temporal: + uiService: + type: ClusterIP + diff --git a/deploy/helm/shipsec/Chart.yaml b/deploy/helm/shipsec/Chart.yaml new file mode 100644 index 00000000..a5b75d8a --- /dev/null +++ b/deploy/helm/shipsec/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: shipsec +description: ShipSec Studio app (backend, worker, frontend) with optional DinD +type: application +version: 0.1.0 +appVersion: "0.1.0" + diff --git a/deploy/helm/shipsec/templates/_helpers.tpl b/deploy/helm/shipsec/templates/_helpers.tpl new file mode 100644 index 00000000..d6737af9 --- /dev/null +++ b/deploy/helm/shipsec/templates/_helpers.tpl @@ -0,0 +1,16 @@ +{{- define "shipsec.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "shipsec.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" -}} +{{- end -}} + +{{- define "shipsec.labels" -}} +app.kubernetes.io/name: {{ include "shipsec.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +helm.sh/chart: {{ include "shipsec.chart" . }} +{{- end -}} + diff --git a/deploy/helm/shipsec/templates/app-secret.local.yaml b/deploy/helm/shipsec/templates/app-secret.local.yaml new file mode 100644 index 00000000..0488dacf --- /dev/null +++ b/deploy/helm/shipsec/templates/app-secret.local.yaml @@ -0,0 +1,31 @@ +{{- if .Values.secrets.create }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.secrets.name }} + namespace: {{ .Values.global.namespaces.system }} + labels: + {{- include "shipsec.labels" . | nindent 4 }} +type: Opaque +stringData: + DATABASE_URL: {{ .Values.secrets.databaseUrl | quote }} + MINIO_ROOT_USER: {{ .Values.secrets.minioRootUser | quote }} + MINIO_ROOT_PASSWORD: {{ .Values.secrets.minioRootPassword | quote }} + MINIO_ACCESS_KEY: {{ .Values.secrets.minioRootUser | quote }} + MINIO_SECRET_KEY: {{ .Values.secrets.minioRootPassword | quote }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.secrets.name }} + namespace: {{ .Values.global.namespaces.workers }} + labels: + {{- include "shipsec.labels" . | nindent 4 }} +type: Opaque +stringData: + DATABASE_URL: {{ .Values.secrets.databaseUrl | quote }} + MINIO_ROOT_USER: {{ .Values.secrets.minioRootUser | quote }} + MINIO_ROOT_PASSWORD: {{ .Values.secrets.minioRootPassword | quote }} + MINIO_ACCESS_KEY: {{ .Values.secrets.minioRootUser | quote }} + MINIO_SECRET_KEY: {{ .Values.secrets.minioRootPassword | quote }} +{{- end }} diff --git a/deploy/helm/shipsec/templates/backend-deployment.yaml b/deploy/helm/shipsec/templates/backend-deployment.yaml new file mode 100644 index 00000000..e6f3858b --- /dev/null +++ b/deploy/helm/shipsec/templates/backend-deployment.yaml @@ -0,0 +1,63 @@ +{{- if .Values.backend.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: shipsec-backend + namespace: {{ .Values.global.namespaces.system }} + labels: + {{- include "shipsec.labels" . | nindent 4 }} + app.kubernetes.io/component: backend +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: backend + template: + metadata: + labels: + {{- include "shipsec.labels" . | nindent 8 }} + app.kubernetes.io/component: backend + spec: + containers: + - name: backend + image: "{{ .Values.backend.image.repository }}:{{ .Values.backend.image.tag }}" + imagePullPolicy: {{ .Values.backend.image.pullPolicy }} + ports: + - name: http + containerPort: 3211 + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: {{ .Values.secrets.name }} + key: DATABASE_URL + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ .Values.secrets.name }} + key: MINIO_ROOT_USER + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.secrets.name }} + key: MINIO_ROOT_PASSWORD + {{- range $k, $v := .Values.backend.env }} + - name: {{ $k }} + value: {{ $v | quote }} + {{- end }} + readinessProbe: + httpGet: + path: /api/v1/health + port: http + initialDelaySeconds: 5 + periodSeconds: 5 + livenessProbe: + httpGet: + path: /api/v1/health + port: http + initialDelaySeconds: 15 + periodSeconds: 10 + resources: + {{- toYaml .Values.backend.resources | nindent 10 }} +{{- end }} diff --git a/deploy/helm/shipsec/templates/backend-service.yaml b/deploy/helm/shipsec/templates/backend-service.yaml new file mode 100644 index 00000000..9a00700a --- /dev/null +++ b/deploy/helm/shipsec/templates/backend-service.yaml @@ -0,0 +1,20 @@ +{{- if .Values.backend.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: shipsec-backend + namespace: {{ .Values.global.namespaces.system }} + labels: + {{- include "shipsec.labels" . | nindent 4 }} + app.kubernetes.io/component: backend +spec: + type: {{ .Values.backend.service.type }} + ports: + - name: http + port: {{ .Values.backend.service.port }} + targetPort: http + selector: + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: backend +{{- end }} + diff --git a/deploy/helm/shipsec/templates/dind-deployment.yaml b/deploy/helm/shipsec/templates/dind-deployment.yaml new file mode 100644 index 00000000..bc48159b --- /dev/null +++ b/deploy/helm/shipsec/templates/dind-deployment.yaml @@ -0,0 +1,48 @@ +{{- if .Values.execution.dind.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: shipsec-dind + namespace: {{ .Values.global.namespaces.workloads }} + labels: + {{- include "shipsec.labels" . | nindent 4 }} + app.kubernetes.io/component: dind +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: dind + template: + metadata: + labels: + {{- include "shipsec.labels" . | nindent 8 }} + app.kubernetes.io/component: dind + spec: + containers: + - name: dind + image: docker:27-dind + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + args: + - "--host=tcp://0.0.0.0:{{ .Values.execution.dind.port }}" + - "--storage-driver=overlay2" + env: + - name: DOCKER_TLS_CERTDIR + value: "" + ports: + - name: docker + containerPort: {{ .Values.execution.dind.port }} + volumeMounts: + - name: docker-storage + mountPath: /var/lib/docker + volumes: + - name: docker-storage + {{- if .Values.execution.dind.storage.enabled }} + persistentVolumeClaim: + claimName: shipsec-dind-pvc + {{- else }} + emptyDir: {} + {{- end }} +{{- end }} diff --git a/deploy/helm/shipsec/templates/dind-pvc.yaml b/deploy/helm/shipsec/templates/dind-pvc.yaml new file mode 100644 index 00000000..32a11ef1 --- /dev/null +++ b/deploy/helm/shipsec/templates/dind-pvc.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.execution.dind.enabled .Values.execution.dind.storage.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: shipsec-dind-pvc + namespace: {{ .Values.global.namespaces.workloads }} + labels: + {{- include "shipsec.labels" . | nindent 4 }} + app.kubernetes.io/component: dind +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.execution.dind.storage.size }} +{{- end }} + diff --git a/deploy/helm/shipsec/templates/dind-service.yaml b/deploy/helm/shipsec/templates/dind-service.yaml new file mode 100644 index 00000000..349c0d24 --- /dev/null +++ b/deploy/helm/shipsec/templates/dind-service.yaml @@ -0,0 +1,20 @@ +{{- if .Values.execution.dind.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: shipsec-dind + namespace: {{ .Values.global.namespaces.workloads }} + labels: + {{- include "shipsec.labels" . | nindent 4 }} + app.kubernetes.io/component: dind +spec: + type: ClusterIP + ports: + - name: docker + port: {{ .Values.execution.dind.port }} + targetPort: docker + selector: + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: dind +{{- end }} + diff --git a/deploy/helm/shipsec/templates/frontend-deployment.yaml b/deploy/helm/shipsec/templates/frontend-deployment.yaml new file mode 100644 index 00000000..88d0ce37 --- /dev/null +++ b/deploy/helm/shipsec/templates/frontend-deployment.yaml @@ -0,0 +1,44 @@ +{{- if .Values.frontend.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: shipsec-frontend + namespace: {{ .Values.global.namespaces.system }} + labels: + {{- include "shipsec.labels" . | nindent 4 }} + app.kubernetes.io/component: frontend +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: frontend + template: + metadata: + labels: + {{- include "shipsec.labels" . | nindent 8 }} + app.kubernetes.io/component: frontend + spec: + containers: + - name: frontend + image: "{{ .Values.frontend.image.repository }}:{{ .Values.frontend.image.tag }}" + imagePullPolicy: {{ .Values.frontend.image.pullPolicy }} + ports: + - name: http + containerPort: 8080 + readinessProbe: + httpGet: + path: / + port: http + initialDelaySeconds: 10 + periodSeconds: 10 + livenessProbe: + httpGet: + path: / + port: http + initialDelaySeconds: 30 + periodSeconds: 20 + resources: + {{- toYaml .Values.frontend.resources | nindent 10 }} +{{- end }} + diff --git a/deploy/helm/shipsec/templates/frontend-service.yaml b/deploy/helm/shipsec/templates/frontend-service.yaml new file mode 100644 index 00000000..4cb8ba89 --- /dev/null +++ b/deploy/helm/shipsec/templates/frontend-service.yaml @@ -0,0 +1,20 @@ +{{- if .Values.frontend.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: shipsec-frontend + namespace: {{ .Values.global.namespaces.system }} + labels: + {{- include "shipsec.labels" . | nindent 4 }} + app.kubernetes.io/component: frontend +spec: + type: {{ .Values.frontend.service.type }} + ports: + - name: http + port: {{ .Values.frontend.service.port }} + targetPort: http + selector: + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: frontend +{{- end }} + diff --git a/deploy/helm/shipsec/templates/worker-deployment.yaml b/deploy/helm/shipsec/templates/worker-deployment.yaml new file mode 100644 index 00000000..b64dd275 --- /dev/null +++ b/deploy/helm/shipsec/templates/worker-deployment.yaml @@ -0,0 +1,53 @@ +{{- if .Values.worker.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: shipsec-worker + namespace: {{ .Values.global.namespaces.workers }} + labels: + {{- include "shipsec.labels" . | nindent 4 }} + app.kubernetes.io/component: worker +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: worker + template: + metadata: + labels: + {{- include "shipsec.labels" . | nindent 8 }} + app.kubernetes.io/component: worker + spec: + containers: + - name: worker + image: "{{ .Values.worker.image.repository }}:{{ .Values.worker.image.tag }}" + imagePullPolicy: {{ .Values.worker.image.pullPolicy }} + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: {{ .Values.secrets.name }} + key: DATABASE_URL + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.secrets.name }} + key: MINIO_ACCESS_KEY + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.secrets.name }} + key: MINIO_SECRET_KEY + {{- if .Values.execution.workerDockerHost }} + - name: DOCKER_HOST + value: {{ .Values.execution.workerDockerHost | quote }} + {{- end }} + {{- range $k, $v := .Values.worker.env }} + - name: {{ $k }} + value: {{ $v | quote }} + {{- end }} + resources: + {{- toYaml .Values.worker.resources | nindent 10 }} +{{- end }} + diff --git a/deploy/helm/shipsec/values.yaml b/deploy/helm/shipsec/values.yaml new file mode 100644 index 00000000..e2eee29a --- /dev/null +++ b/deploy/helm/shipsec/values.yaml @@ -0,0 +1,110 @@ +global: + namespaces: + system: shipsec-system + workers: shipsec-workers + workloads: shipsec-workloads + +secrets: + create: true + name: shipsec-app-secrets + databaseUrl: postgresql://shipsec:shipsec@shipsec-postgres.shipsec-system.svc.cluster.local:5432/shipsec + minioRootUser: minioadmin + minioRootPassword: minioadmin + +backend: + enabled: true + image: + repository: ghcr.io/shipsecai/studio-backend + tag: latest + pullPolicy: IfNotPresent + service: + type: ClusterIP + port: 3211 + env: + NODE_ENV: production + SHIPSEC_ENV: local + PORT: "3211" + ENABLE_INGEST_SERVICES: "false" + TEMPORAL_ADDRESS: shipsec-temporal.shipsec-system.svc.cluster.local:7233 + TEMPORAL_NAMESPACE: shipsec-dev + TEMPORAL_TASK_QUEUE: shipsec-dev + MINIO_ENDPOINT: shipsec-minio.shipsec-system.svc.cluster.local + MINIO_PORT: "9000" + LOKI_URL: http://shipsec-loki.shipsec-system.svc.cluster.local:3100 + TERMINAL_REDIS_URL: redis://shipsec-redis.shipsec-system.svc.cluster.local:6379 + LOG_KAFKA_BROKERS: shipsec-redpanda.shipsec-system.svc.cluster.local:9092 + LOG_KAFKA_TOPIC: telemetry.logs + LOG_KAFKA_CLIENT_ID: shipsec-backend + EVENT_KAFKA_TOPIC: telemetry.events + EVENT_KAFKA_CLIENT_ID: shipsec-backend-events + EVENT_KAFKA_GROUP_ID: shipsec-event-ingestor + AUTH_PROVIDER: local + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 1000m + memory: 2Gi + +worker: + enabled: true + image: + repository: ghcr.io/shipsecai/studio-worker + tag: latest + pullPolicy: IfNotPresent + env: + NODE_ENV: production + SHIPSEC_ENV: local + ENABLE_INGEST_SERVICES: "false" + TEMPORAL_ADDRESS: shipsec-temporal.shipsec-system.svc.cluster.local:7233 + TEMPORAL_NAMESPACE: shipsec-dev + TEMPORAL_TASK_QUEUE: shipsec-dev + MINIO_ENDPOINT: shipsec-minio.shipsec-system.svc.cluster.local + MINIO_PORT: "9000" + MINIO_BUCKET_NAME: shipsec-files + LOKI_URL: http://shipsec-loki.shipsec-system.svc.cluster.local:3100 + TERMINAL_REDIS_URL: redis://shipsec-redis.shipsec-system.svc.cluster.local:6379 + TERMINAL_REDIS_MAXLEN: "5000" + LOG_KAFKA_BROKERS: shipsec-redpanda.shipsec-system.svc.cluster.local:9092 + LOG_KAFKA_TOPIC: telemetry.logs + LOG_KAFKA_CLIENT_ID: shipsec-worker + EVENT_KAFKA_TOPIC: telemetry.events + EVENT_KAFKA_CLIENT_ID: shipsec-worker-events + BACKEND_URL: http://shipsec-backend.shipsec-system.svc.cluster.local:3211 + resources: + requests: + cpu: 500m + memory: 1Gi + limits: + cpu: 2000m + memory: 4Gi + +frontend: + enabled: true + image: + repository: ghcr.io/shipsecai/studio-frontend + tag: latest + pullPolicy: IfNotPresent + service: + type: ClusterIP + port: 8080 + resources: + requests: + cpu: 100m + memory: 256Mi + limits: + cpu: 500m + memory: 512Mi + +execution: + dind: + enabled: false + serviceName: shipsec-dind + namespace: shipsec-workloads + port: 2375 + storage: + enabled: true + size: 20Gi + workerDockerHost: "" + diff --git a/deploy/helm/shipsec/values/cloud-generic.yaml b/deploy/helm/shipsec/values/cloud-generic.yaml new file mode 100644 index 00000000..d48dfc0d --- /dev/null +++ b/deploy/helm/shipsec/values/cloud-generic.yaml @@ -0,0 +1,16 @@ +secrets: + create: false + +backend: + service: + type: ClusterIP + +frontend: + service: + type: ClusterIP + +execution: + dind: + enabled: false + workerDockerHost: "" + diff --git a/deploy/helm/shipsec/values/dind.yaml b/deploy/helm/shipsec/values/dind.yaml new file mode 100644 index 00000000..a1f7c195 --- /dev/null +++ b/deploy/helm/shipsec/values/dind.yaml @@ -0,0 +1,5 @@ +execution: + dind: + enabled: true + workerDockerHost: tcp://shipsec-dind.shipsec-workloads.svc.cluster.local:2375 + diff --git a/deploy/helm/shipsec/values/gke-dev.yaml b/deploy/helm/shipsec/values/gke-dev.yaml new file mode 100644 index 00000000..6726cdd5 --- /dev/null +++ b/deploy/helm/shipsec/values/gke-dev.yaml @@ -0,0 +1,14 @@ +global: + namespaces: + system: shipsec-system + workers: shipsec-workers + workloads: shipsec-workloads + +backend: + service: + type: LoadBalancer + +frontend: + service: + type: LoadBalancer + diff --git a/deploy/helm/shipsec/values/local-orbstack.yaml b/deploy/helm/shipsec/values/local-orbstack.yaml new file mode 100644 index 00000000..b7cb1ad0 --- /dev/null +++ b/deploy/helm/shipsec/values/local-orbstack.yaml @@ -0,0 +1,16 @@ +global: + namespaces: + system: shipsec-system + workers: shipsec-workers + workloads: shipsec-workloads + +backend: + service: + type: LoadBalancer + port: 3211 + +frontend: + service: + type: LoadBalancer + port: 8090 + diff --git a/deploy/helm/shipsec/values/no-dind.yaml b/deploy/helm/shipsec/values/no-dind.yaml new file mode 100644 index 00000000..98304b03 --- /dev/null +++ b/deploy/helm/shipsec/values/no-dind.yaml @@ -0,0 +1,5 @@ +execution: + dind: + enabled: false + workerDockerHost: "" + diff --git a/deploy/helm/shipsec/values/vps.yaml b/deploy/helm/shipsec/values/vps.yaml new file mode 100644 index 00000000..a234c394 --- /dev/null +++ b/deploy/helm/shipsec/values/vps.yaml @@ -0,0 +1,15 @@ +global: + namespaces: + system: shipsec-system + workers: shipsec-workers + workloads: shipsec-workloads + +backend: + service: + type: ClusterIP + port: 3211 + +frontend: + service: + type: ClusterIP + port: 8080 diff --git a/deploy/scripts/gcp/README.md b/deploy/scripts/gcp/README.md new file mode 100644 index 00000000..0dd19e65 --- /dev/null +++ b/deploy/scripts/gcp/README.md @@ -0,0 +1,47 @@ +# GCP (GKE) quickstart + +This is the "fast path" to get ShipSec Studio running on GKE Standard in `us-central1`. + +It intentionally keeps dependencies **in-cluster** for the first cloud pass. + +## Prereqs + +- `gcloud`, `kubectl`, `helm`, `docker` +- A GKE Standard cluster already created (we used `shipsec-dev` in `us-central1-a`) +- Artifact Registry repo `shipsec-studio` in `us-central1` + +## Install + +```bash +bash deploy/scripts/gcp/install.sh +``` + +Override defaults: + +```bash +PROJECT_ID=shipsec REGION=us-central1 ZONE=us-central1-a CLUSTER_NAME=shipsec-dev IMAGE_TAG=dev1 bash deploy/scripts/gcp/install.sh +``` + +## Smoke + +```bash +bash deploy/scripts/gcp/smoke.sh +``` + +## Notes + +- This path uses DinD (privileged) for now. Treat it as trusted-tenant only. +- Frontend is built with `VITE_API_URL` pointing to the backend LoadBalancer IP. +- If you build from an Apple Silicon machine, you must push `linux/amd64` images to GKE nodes. Otherwise pods will crash with `exec format error`. `install.sh` enforces `--platform linux/amd64` and uses a unique `IMAGE_TAG` by default. + +## kubectl setup (on your machine) + +```bash +gcloud components install gke-gcloud-auth-plugin --quiet +gcloud config set project shipsec +gcloud config set compute/region us-central1 +gcloud config set compute/zone us-central1-a +gcloud container clusters get-credentials shipsec-dev --zone us-central1-a --project shipsec +kubectl config current-context +kubectl get nodes +``` diff --git a/deploy/scripts/gcp/install.sh b/deploy/scripts/gcp/install.sh new file mode 100755 index 00000000..a51e53d8 --- /dev/null +++ b/deploy/scripts/gcp/install.sh @@ -0,0 +1,135 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." && pwd)" + +PROJECT_ID="${PROJECT_ID:-shipsec}" +REGION="${REGION:-us-central1}" +ZONE="${ZONE:-us-central1-a}" +CLUSTER_NAME="${CLUSTER_NAME:-shipsec-dev}" +KUBE_CONTEXT="gke_${PROJECT_ID}_${ZONE}_${CLUSTER_NAME}" + +SYSTEM_NS="${SYSTEM_NS:-shipsec-system}" +WORKERS_NS="${WORKERS_NS:-shipsec-workers}" +WORKLOADS_NS="${WORKLOADS_NS:-shipsec-workloads}" + +AR_REPO="${AR_REPO:-shipsec-studio}" +GIT_SHA="$(git -C "${ROOT_DIR}" rev-parse --short HEAD)" +# Default tag includes a timestamp to avoid amd64/arm64 tag collisions and to +# ensure GKE nodes pull the new image. +IMAGE_TAG="${IMAGE_TAG:-${GIT_SHA}-$(date +%Y%m%d%H%M%S)}" + +require_cmd() { + if ! command -v "$1" >/dev/null 2>&1; then + echo "[shipsec] Missing required command: $1" >&2 + exit 1 + fi +} + +require_cmd gcloud +require_cmd kubectl +require_cmd helm +require_cmd docker + +echo "[shipsec] Configuring gcloud defaults..." +gcloud config set project "${PROJECT_ID}" >/dev/null +gcloud config set compute/region "${REGION}" >/dev/null +gcloud config set compute/zone "${ZONE}" >/dev/null + +echo "[shipsec] Fetching GKE credentials..." +gcloud container clusters get-credentials "${CLUSTER_NAME}" --zone "${ZONE}" --project "${PROJECT_ID}" >/dev/null + +echo "[shipsec] Ensuring Artifact Registry pull permissions for nodes..." +PROJECT_NUMBER="$(gcloud projects describe "${PROJECT_ID}" --format='value(projectNumber)')" +NODE_SA="${PROJECT_NUMBER}-compute@developer.gserviceaccount.com" +gcloud projects add-iam-policy-binding "${PROJECT_ID}" \ + --member="serviceAccount:${NODE_SA}" \ + --role="roles/artifactregistry.reader" \ + --quiet >/dev/null || true + +echo "[shipsec] Configuring docker auth for Artifact Registry..." +gcloud auth configure-docker "${REGION}-docker.pkg.dev" --quiet >/dev/null + +BACKEND_IMAGE="${REGION}-docker.pkg.dev/${PROJECT_ID}/${AR_REPO}/backend:${IMAGE_TAG}" +WORKER_IMAGE="${REGION}-docker.pkg.dev/${PROJECT_ID}/${AR_REPO}/worker:${IMAGE_TAG}" +FRONTEND_IMAGE="${REGION}-docker.pkg.dev/${PROJECT_ID}/${AR_REPO}/frontend:${IMAGE_TAG}" + +echo "[shipsec] Building + pushing backend/worker images (linux/amd64)..." +cd "${ROOT_DIR}" +docker buildx build --platform linux/amd64 --target backend -t "${BACKEND_IMAGE}" --push . +docker buildx build --platform linux/amd64 --target worker -t "${WORKER_IMAGE}" --push . + +echo "[shipsec] Creating namespaces (idempotent)..." +kubectl --context "${KUBE_CONTEXT}" get namespace "${SYSTEM_NS}" >/dev/null 2>&1 || kubectl --context "${KUBE_CONTEXT}" create namespace "${SYSTEM_NS}" +kubectl --context "${KUBE_CONTEXT}" get namespace "${WORKERS_NS}" >/dev/null 2>&1 || kubectl --context "${KUBE_CONTEXT}" create namespace "${WORKERS_NS}" +kubectl --context "${KUBE_CONTEXT}" get namespace "${WORKLOADS_NS}" >/dev/null 2>&1 || kubectl --context "${KUBE_CONTEXT}" create namespace "${WORKLOADS_NS}" + +echo "[shipsec] Installing infra chart (in-cluster deps, fast path)..." +helm upgrade --install shipsec-infra "${ROOT_DIR}/deploy/helm/shipsec-infra" \ + --namespace "${SYSTEM_NS}" \ + --kube-context "${KUBE_CONTEXT}" \ + --values "${ROOT_DIR}/deploy/helm/shipsec-infra/values.yaml" \ + --values "${ROOT_DIR}/deploy/helm/shipsec-infra/values/gke-dev.yaml" + +echo "[shipsec] Installing app chart (backend/worker first; frontend later)..." +helm upgrade --install shipsec "${ROOT_DIR}/deploy/helm/shipsec" \ + --namespace "${SYSTEM_NS}" \ + --kube-context "${KUBE_CONTEXT}" \ + --values "${ROOT_DIR}/deploy/helm/shipsec/values.yaml" \ + --values "${ROOT_DIR}/deploy/helm/shipsec/values/gke-dev.yaml" \ + --values "${ROOT_DIR}/deploy/helm/shipsec/values/dind.yaml" \ + --set "frontend.enabled=false" \ + --set "backend.image.repository=${REGION}-docker.pkg.dev/${PROJECT_ID}/${AR_REPO}/backend" \ + --set "backend.image.tag=${IMAGE_TAG}" \ + --set "backend.image.pullPolicy=IfNotPresent" \ + --set "worker.image.repository=${REGION}-docker.pkg.dev/${PROJECT_ID}/${AR_REPO}/worker" \ + --set "worker.image.tag=${IMAGE_TAG}" \ + --set "worker.image.pullPolicy=IfNotPresent" + +echo "[shipsec] Waiting for backend service external IP..." +BACKEND_IP="" +for _ in $(seq 1 60); do + BACKEND_IP="$(kubectl --context "${KUBE_CONTEXT}" -n "${SYSTEM_NS}" get svc shipsec-backend -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 2>/dev/null || true)" + if [[ -n "${BACKEND_IP}" ]]; then + break + fi + sleep 5 +done + +if [[ -z "${BACKEND_IP}" ]]; then + echo "[shipsec] Backend LoadBalancer IP not assigned yet. You can check:" >&2 + echo " kubectl --context ${KUBE_CONTEXT} -n ${SYSTEM_NS} get svc shipsec-backend -o wide" >&2 + exit 1 +fi + +echo "[shipsec] Backend external IP: ${BACKEND_IP}" + +echo "[shipsec] Building + pushing frontend image (linux/amd64; VITE_API_URL points to backend LB)..." +docker buildx build --platform linux/amd64 \ + --target frontend \ + -t "${FRONTEND_IMAGE}" \ + --build-arg "VITE_API_URL=http://${BACKEND_IP}:3211" \ + --build-arg "VITE_BACKEND_URL=http://${BACKEND_IP}:3211" \ + --push \ + . + +echo "[shipsec] Enabling frontend deployment..." +helm upgrade --install shipsec "${ROOT_DIR}/deploy/helm/shipsec" \ + --namespace "${SYSTEM_NS}" \ + --kube-context "${KUBE_CONTEXT}" \ + --values "${ROOT_DIR}/deploy/helm/shipsec/values.yaml" \ + --values "${ROOT_DIR}/deploy/helm/shipsec/values/gke-dev.yaml" \ + --values "${ROOT_DIR}/deploy/helm/shipsec/values/dind.yaml" \ + --set "frontend.enabled=true" \ + --set "backend.image.repository=${REGION}-docker.pkg.dev/${PROJECT_ID}/${AR_REPO}/backend" \ + --set "backend.image.tag=${IMAGE_TAG}" \ + --set "backend.image.pullPolicy=IfNotPresent" \ + --set "worker.image.repository=${REGION}-docker.pkg.dev/${PROJECT_ID}/${AR_REPO}/worker" \ + --set "worker.image.tag=${IMAGE_TAG}" \ + --set "worker.image.pullPolicy=IfNotPresent" \ + --set "frontend.image.repository=${REGION}-docker.pkg.dev/${PROJECT_ID}/${AR_REPO}/frontend" \ + --set "frontend.image.tag=${IMAGE_TAG}" \ + --set "frontend.image.pullPolicy=IfNotPresent" + +echo "[shipsec] Done. Check services:" +echo " kubectl --context ${KUBE_CONTEXT} -n ${SYSTEM_NS} get svc -o wide" diff --git a/deploy/scripts/gcp/smoke.sh b/deploy/scripts/gcp/smoke.sh new file mode 100755 index 00000000..fb0c7370 --- /dev/null +++ b/deploy/scripts/gcp/smoke.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +set -euo pipefail + +PROJECT_ID="${PROJECT_ID:-shipsec}" +ZONE="${ZONE:-us-central1-a}" +CLUSTER_NAME="${CLUSTER_NAME:-shipsec-dev}" +KUBE_CONTEXT="gke_${PROJECT_ID}_${ZONE}_${CLUSTER_NAME}" + +SYSTEM_NS="${SYSTEM_NS:-shipsec-system}" + +echo "[shipsec] Pods:" +kubectl --context "${KUBE_CONTEXT}" get pods -A + +echo "[shipsec] Waiting for core deployments..." +kubectl --context "${KUBE_CONTEXT}" wait --namespace "${SYSTEM_NS}" --for=condition=available deployment/shipsec-backend --timeout=300s +kubectl --context "${KUBE_CONTEXT}" wait --namespace "${SYSTEM_NS}" --for=condition=available deployment/shipsec-frontend --timeout=300s +kubectl --context "${KUBE_CONTEXT}" wait --namespace "${SYSTEM_NS}" --for=condition=available deployment/shipsec-temporal --timeout=420s +kubectl --context "${KUBE_CONTEXT}" wait --namespace "${SYSTEM_NS}" --for=condition=available deployment/shipsec-temporal-ui --timeout=300s +kubectl --context "${KUBE_CONTEXT}" wait --namespace "${SYSTEM_NS}" --for=condition=available deployment/shipsec-redis --timeout=300s +kubectl --context "${KUBE_CONTEXT}" wait --namespace "${SYSTEM_NS}" --for=condition=ready pod -l app.kubernetes.io/component=postgres --timeout=420s +kubectl --context "${KUBE_CONTEXT}" wait --namespace "${SYSTEM_NS}" --for=condition=ready pod -l app.kubernetes.io/component=minio --timeout=420s +kubectl --context "${KUBE_CONTEXT}" wait --namespace "${SYSTEM_NS}" --for=condition=ready pod -l app.kubernetes.io/component=redpanda --timeout=420s + +echo "[shipsec] Waiting for DinD..." +kubectl --context "${KUBE_CONTEXT}" wait --namespace shipsec-workloads --for=condition=available deployment/shipsec-dind --timeout=420s + +echo "[shipsec] Services:" +kubectl --context "${KUBE_CONTEXT}" --namespace "${SYSTEM_NS}" get svc -o wide + diff --git a/deploy/scripts/orbstack/install.sh b/deploy/scripts/orbstack/install.sh new file mode 100755 index 00000000..fae2e3fd --- /dev/null +++ b/deploy/scripts/orbstack/install.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." && pwd)" + +SYSTEM_NS="${SYSTEM_NS:-shipsec-system}" +WORKERS_NS="${WORKERS_NS:-shipsec-workers}" +WORKLOADS_NS="${WORKLOADS_NS:-shipsec-workloads}" + +echo "[shipsec] Creating namespaces (idempotent)..." +kubectl get namespace "${SYSTEM_NS}" >/dev/null 2>&1 || kubectl create namespace "${SYSTEM_NS}" +kubectl get namespace "${WORKERS_NS}" >/dev/null 2>&1 || kubectl create namespace "${WORKERS_NS}" +kubectl get namespace "${WORKLOADS_NS}" >/dev/null 2>&1 || kubectl create namespace "${WORKLOADS_NS}" + +echo "[shipsec] Installing infra chart..." +helm upgrade --install shipsec-infra "${ROOT_DIR}/deploy/helm/shipsec-infra" \ + --namespace "${SYSTEM_NS}" \ + --values "${ROOT_DIR}/deploy/helm/shipsec-infra/values/local-orbstack.yaml" + +echo "[shipsec] Installing app chart..." +helm upgrade --install shipsec "${ROOT_DIR}/deploy/helm/shipsec" \ + --namespace "${SYSTEM_NS}" \ + --values "${ROOT_DIR}/deploy/helm/shipsec/values/local-orbstack.yaml" \ + --values "${ROOT_DIR}/deploy/helm/shipsec/values/dind.yaml" + +echo "[shipsec] Done." diff --git a/deploy/scripts/orbstack/smoke.sh b/deploy/scripts/orbstack/smoke.sh new file mode 100755 index 00000000..7e052c53 --- /dev/null +++ b/deploy/scripts/orbstack/smoke.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -euo pipefail + +SYSTEM_NS="${SYSTEM_NS:-shipsec-system}" + +echo "[shipsec] Pods:" +kubectl get pods -A + +echo "[shipsec] Waiting for backend to be Ready..." +kubectl wait --namespace "${SYSTEM_NS}" --for=condition=available deployment/shipsec-backend --timeout=180s + +echo "[shipsec] Checking backend health..." +curl -fsS http://localhost:3211/health >/dev/null + +echo "[shipsec] OK" + diff --git a/deploy/scripts/orbstack/uninstall.sh b/deploy/scripts/orbstack/uninstall.sh new file mode 100755 index 00000000..59cc72b6 --- /dev/null +++ b/deploy/scripts/orbstack/uninstall.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -euo pipefail + +SYSTEM_NS="${SYSTEM_NS:-shipsec-system}" + +echo "[shipsec] Uninstalling app chart..." +helm uninstall shipsec --namespace "${SYSTEM_NS}" || true + +echo "[shipsec] Uninstalling infra chart..." +helm uninstall shipsec-infra --namespace "${SYSTEM_NS}" || true + +echo "[shipsec] Done." + diff --git a/deploy/scripts/vps/install.sh b/deploy/scripts/vps/install.sh new file mode 100755 index 00000000..7a719f71 --- /dev/null +++ b/deploy/scripts/vps/install.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." && pwd)" + +SYSTEM_NS="${SYSTEM_NS:-shipsec-system}" +WORKERS_NS="${WORKERS_NS:-shipsec-workers}" +WORKLOADS_NS="${WORKLOADS_NS:-shipsec-workloads}" +KUBE_CONTEXT="${KUBE_CONTEXT:-kind-shipsec}" +KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-shipsec}" +SHIPSEC_BUILD_FRONTEND="${SHIPSEC_BUILD_FRONTEND:-1}" + +if command -v kind >/dev/null 2>&1; then + if ! kind get clusters 2>/dev/null | grep -q "^${KIND_CLUSTER_NAME}$"; then + echo "[shipsec] Creating kind cluster: ${KIND_CLUSTER_NAME}" + kind create cluster --name "${KIND_CLUSTER_NAME}" --wait 180s + fi +fi + +echo "[shipsec] Creating namespaces (idempotent)..." +kubectl --context "${KUBE_CONTEXT}" get namespace "${SYSTEM_NS}" >/dev/null 2>&1 || kubectl --context "${KUBE_CONTEXT}" create namespace "${SYSTEM_NS}" +kubectl --context "${KUBE_CONTEXT}" get namespace "${WORKERS_NS}" >/dev/null 2>&1 || kubectl --context "${KUBE_CONTEXT}" create namespace "${WORKERS_NS}" +kubectl --context "${KUBE_CONTEXT}" get namespace "${WORKLOADS_NS}" >/dev/null 2>&1 || kubectl --context "${KUBE_CONTEXT}" create namespace "${WORKLOADS_NS}" + +IMAGE_OVERRIDES=() +if [[ "${SHIPSEC_BUILD_IMAGES:-0}" == "1" ]]; then + echo "[shipsec] Building images locally (SHIPSEC_BUILD_IMAGES=1)..." + cd "${ROOT_DIR}" + docker build -t shipsec-backend:dev --target backend . + docker build -t shipsec-worker:dev --target worker . + if [[ "${SHIPSEC_BUILD_FRONTEND}" == "1" ]]; then + docker build -t shipsec-frontend:dev --target frontend . + else + echo "[shipsec] Skipping frontend image build (SHIPSEC_BUILD_FRONTEND=0)" + fi + + if command -v kind >/dev/null 2>&1; then + echo "[shipsec] Loading images into kind..." + kind load docker-image shipsec-backend:dev --name "${KIND_CLUSTER_NAME}" + kind load docker-image shipsec-worker:dev --name "${KIND_CLUSTER_NAME}" + if [[ "${SHIPSEC_BUILD_FRONTEND}" == "1" ]]; then + kind load docker-image shipsec-frontend:dev --name "${KIND_CLUSTER_NAME}" + fi + fi + + IMAGE_OVERRIDES+=("--set" "backend.image.repository=shipsec-backend") + IMAGE_OVERRIDES+=("--set" "backend.image.tag=dev") + IMAGE_OVERRIDES+=("--set" "backend.image.pullPolicy=IfNotPresent") + IMAGE_OVERRIDES+=("--set" "worker.image.repository=shipsec-worker") + IMAGE_OVERRIDES+=("--set" "worker.image.tag=dev") + IMAGE_OVERRIDES+=("--set" "worker.image.pullPolicy=IfNotPresent") + if [[ "${SHIPSEC_BUILD_FRONTEND}" == "1" ]]; then + IMAGE_OVERRIDES+=("--set" "frontend.image.repository=shipsec-frontend") + IMAGE_OVERRIDES+=("--set" "frontend.image.tag=dev") + IMAGE_OVERRIDES+=("--set" "frontend.image.pullPolicy=IfNotPresent") + fi +fi + +echo "[shipsec] Installing infra chart (in-cluster deps for VPS test)..." +helm upgrade --install shipsec-infra "${ROOT_DIR}/deploy/helm/shipsec-infra" \ + --namespace "${SYSTEM_NS}" \ + --kube-context "${KUBE_CONTEXT}" \ + --values "${ROOT_DIR}/deploy/helm/shipsec-infra/values.yaml" \ + --values "${ROOT_DIR}/deploy/helm/shipsec-infra/values/vps.yaml" + +echo "[shipsec] Installing app chart (DinD enabled for now)..." +helm upgrade --install shipsec "${ROOT_DIR}/deploy/helm/shipsec" \ + --namespace "${SYSTEM_NS}" \ + --kube-context "${KUBE_CONTEXT}" \ + --values "${ROOT_DIR}/deploy/helm/shipsec/values.yaml" \ + --values "${ROOT_DIR}/deploy/helm/shipsec/values/vps.yaml" \ + --values "${ROOT_DIR}/deploy/helm/shipsec/values/dind.yaml" \ + "${IMAGE_OVERRIDES[@]}" + +cat <<'EOF' + +[shipsec] Install complete. + +Recommended access pattern on a VPS (simple, no LB/Ingress required): + +1) Backend: + kubectl -n shipsec-system port-forward svc/shipsec-backend 3211:3211 + +2) Frontend: + kubectl -n shipsec-system port-forward svc/shipsec-frontend 8090:8080 + +3) Temporal UI: + kubectl -n shipsec-system port-forward svc/shipsec-temporal-ui 8081:8081 + +4) MinIO console: + kubectl -n shipsec-system port-forward svc/shipsec-minio 9001:9001 + +Then SSH tunnel from your laptop: + ssh -L 3211:localhost:3211 -L 8090:localhost:8090 -L 8081:localhost:8081 -L 9001:localhost:9001 clevervps + +EOF diff --git a/deploy/scripts/vps/smoke.sh b/deploy/scripts/vps/smoke.sh new file mode 100755 index 00000000..d2eefa29 --- /dev/null +++ b/deploy/scripts/vps/smoke.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -euo pipefail + +SYSTEM_NS="${SYSTEM_NS:-shipsec-system}" +KUBE_CONTEXT="${KUBE_CONTEXT:-kind-shipsec}" + +echo "[shipsec] Pods:" +kubectl --context "${KUBE_CONTEXT}" get pods -A + +echo "[shipsec] Waiting for core deployments..." +kubectl --context "${KUBE_CONTEXT}" wait --namespace "${SYSTEM_NS}" --for=condition=available deployment/shipsec-backend --timeout=240s +if kubectl --context "${KUBE_CONTEXT}" --namespace "${SYSTEM_NS}" get deployment/shipsec-frontend >/dev/null 2>&1; then + kubectl --context "${KUBE_CONTEXT}" wait --namespace "${SYSTEM_NS}" --for=condition=available deployment/shipsec-frontend --timeout=240s +fi +kubectl --context "${KUBE_CONTEXT}" wait --namespace "${SYSTEM_NS}" --for=condition=available deployment/shipsec-temporal --timeout=300s +kubectl --context "${KUBE_CONTEXT}" wait --namespace "${SYSTEM_NS}" --for=condition=available deployment/shipsec-temporal-ui --timeout=240s +kubectl --context "${KUBE_CONTEXT}" wait --namespace "${SYSTEM_NS}" --for=condition=available deployment/shipsec-redis --timeout=240s +kubectl --context "${KUBE_CONTEXT}" wait --namespace "${SYSTEM_NS}" --for=condition=ready pod -l app.kubernetes.io/component=postgres --timeout=300s +kubectl --context "${KUBE_CONTEXT}" wait --namespace "${SYSTEM_NS}" --for=condition=ready pod -l app.kubernetes.io/component=minio --timeout=300s +kubectl --context "${KUBE_CONTEXT}" wait --namespace "${SYSTEM_NS}" --for=condition=ready pod -l app.kubernetes.io/component=redpanda --timeout=300s + +echo "[shipsec] Waiting for DinD..." +kubectl --context "${KUBE_CONTEXT}" wait --namespace shipsec-workloads --for=condition=available deployment/shipsec-dind --timeout=300s + +echo "[shipsec] OK (deployments/pods Ready). To verify HTTP endpoints, use port-forward as printed by install.sh." From ed092c303e9fc7cbd5be6d5cc4abe5a069d9daa7 Mon Sep 17 00:00:00 2001 From: betterclever Date: Wed, 11 Feb 2026 13:56:13 +0400 Subject: [PATCH 10/36] feat(infra): add managed services (Cloud SQL, Memorystore, GCS) and wire Helm charts Replace in-cluster postgres and redis with GCP managed alternatives: - Cloud SQL PostgreSQL 16 with Private Service Access - Memorystore Redis 7.2 (BASIC tier) - GCS bucket with Workload Identity SA (ready for MinIO replacement) - Helm gke-managed.yaml overlays for both infra and app charts - Temporal template now supports configurable postgres host Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever --- .../shipsec-infra/templates/temporal.yaml | 6 +- .../shipsec-infra/values/gke-managed.yaml | 27 +++ deploy/helm/shipsec/values/gke-managed.yaml | 64 +++++++ infra/gcp/envs/dev-local/main.tf | 172 ++++++++++++++++++ infra/gcp/envs/dev-local/variables.tf | 20 ++ 5 files changed, 286 insertions(+), 3 deletions(-) create mode 100644 deploy/helm/shipsec-infra/values/gke-managed.yaml create mode 100644 deploy/helm/shipsec/values/gke-managed.yaml diff --git a/deploy/helm/shipsec-infra/templates/temporal.yaml b/deploy/helm/shipsec-infra/templates/temporal.yaml index f38d3edf..b9085ab9 100644 --- a/deploy/helm/shipsec-infra/templates/temporal.yaml +++ b/deploy/helm/shipsec-infra/templates/temporal.yaml @@ -31,11 +31,11 @@ spec: - name: DB_NAME value: temporal - name: POSTGRES_USER - value: {{ .Values.postgres.user | quote }} + value: {{ .Values.temporal.postgresUser | default .Values.postgres.user | quote }} - name: POSTGRES_PWD - value: {{ .Values.postgres.password | quote }} + value: {{ .Values.temporal.postgresPassword | default .Values.postgres.password | quote }} - name: POSTGRES_SEEDS - value: shipsec-postgres + value: {{ .Values.temporal.postgresHost | default "shipsec-postgres" | quote }} - name: AUTO_SETUP value: "true" readinessProbe: diff --git a/deploy/helm/shipsec-infra/values/gke-managed.yaml b/deploy/helm/shipsec-infra/values/gke-managed.yaml new file mode 100644 index 00000000..a72d1ba2 --- /dev/null +++ b/deploy/helm/shipsec-infra/values/gke-managed.yaml @@ -0,0 +1,27 @@ +# GKE with managed services (Cloud SQL, Memorystore). +# Layer on top of gke-dev.yaml: +# --values values/gke-dev.yaml --values values/gke-managed.yaml + +global: + namespace: shipsec-system + +# Replaced by Cloud SQL +postgres: + enabled: false + +# Replaced by Memorystore +redis: + enabled: false + +# Keep MinIO in-cluster (org policy blocks HMAC key creation for GCS S3 compat) +minio: + service: + type: ClusterIP + +# Temporal stays in-cluster but points at Cloud SQL +temporal: + postgresHost: "10.25.225.3" + postgresUser: "shipsec" + postgresPassword: "shipsec-dev-2026" + uiService: + type: ClusterIP diff --git a/deploy/helm/shipsec/values/gke-managed.yaml b/deploy/helm/shipsec/values/gke-managed.yaml new file mode 100644 index 00000000..3de766d9 --- /dev/null +++ b/deploy/helm/shipsec/values/gke-managed.yaml @@ -0,0 +1,64 @@ +# GKE with managed services (Cloud SQL, Memorystore). +# Layer on top of gke-dev.yaml: +# --values values/gke-dev.yaml --values values/gke-managed.yaml + +global: + namespaces: + system: shipsec-system + workers: shipsec-workers + workloads: shipsec-workloads + +secrets: + create: true + name: shipsec-app-secrets + databaseUrl: "postgresql://shipsec:shipsec-dev-2026@10.25.225.3:5432/shipsec" + minioRootUser: minioadmin + minioRootPassword: minioadmin + +backend: + service: + type: LoadBalancer + env: + NODE_ENV: production + SHIPSEC_ENV: local + PORT: "3211" + ENABLE_INGEST_SERVICES: "false" + TEMPORAL_ADDRESS: shipsec-temporal.shipsec-system.svc.cluster.local:7233 + TEMPORAL_NAMESPACE: shipsec-dev + TEMPORAL_TASK_QUEUE: shipsec-dev + MINIO_ENDPOINT: shipsec-minio.shipsec-system.svc.cluster.local + MINIO_PORT: "9000" + LOKI_URL: http://shipsec-loki.shipsec-system.svc.cluster.local:3100 + TERMINAL_REDIS_URL: "redis://10.25.224.3:6379" + LOG_KAFKA_BROKERS: shipsec-redpanda.shipsec-system.svc.cluster.local:9092 + LOG_KAFKA_TOPIC: telemetry.logs + LOG_KAFKA_CLIENT_ID: shipsec-backend + EVENT_KAFKA_TOPIC: telemetry.events + EVENT_KAFKA_CLIENT_ID: shipsec-backend-events + EVENT_KAFKA_GROUP_ID: shipsec-event-ingestor + AUTH_PROVIDER: local + +worker: + env: + NODE_ENV: production + SHIPSEC_ENV: local + ENABLE_INGEST_SERVICES: "false" + TEMPORAL_ADDRESS: shipsec-temporal.shipsec-system.svc.cluster.local:7233 + TEMPORAL_NAMESPACE: shipsec-dev + TEMPORAL_TASK_QUEUE: shipsec-dev + MINIO_ENDPOINT: shipsec-minio.shipsec-system.svc.cluster.local + MINIO_PORT: "9000" + MINIO_BUCKET_NAME: shipsec-files + LOKI_URL: http://shipsec-loki.shipsec-system.svc.cluster.local:3100 + TERMINAL_REDIS_URL: "redis://10.25.224.3:6379" + TERMINAL_REDIS_MAXLEN: "5000" + LOG_KAFKA_BROKERS: shipsec-redpanda.shipsec-system.svc.cluster.local:9092 + LOG_KAFKA_TOPIC: telemetry.logs + LOG_KAFKA_CLIENT_ID: shipsec-worker + EVENT_KAFKA_TOPIC: telemetry.events + EVENT_KAFKA_CLIENT_ID: shipsec-worker-events + BACKEND_URL: http://shipsec-backend.shipsec-system.svc.cluster.local:3211 + +frontend: + service: + type: LoadBalancer diff --git a/infra/gcp/envs/dev-local/main.tf b/infra/gcp/envs/dev-local/main.tf index deb2238b..d7a0f5b1 100644 --- a/infra/gcp/envs/dev-local/main.tf +++ b/infra/gcp/envs/dev-local/main.tf @@ -13,6 +13,9 @@ locals { "container.googleapis.com", "artifactregistry.googleapis.com", "secretmanager.googleapis.com", + "sqladmin.googleapis.com", + "redis.googleapis.com", + "servicenetworking.googleapis.com", ]) } @@ -102,6 +105,151 @@ resource "google_container_node_pool" "default_pool" { } } +# ========================================================================== +# Managed Services: Cloud SQL, Memorystore, GCS +# ========================================================================== + +# Private Service Access — allows Cloud SQL and Memorystore to get private IPs +# on the default VPC so GKE pods can reach them without public IPs. +resource "google_compute_global_address" "private_ip_range" { + project = var.project_id + name = "shipsec-private-ip-range" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 20 + network = data.google_compute_network.default.id + + depends_on = [google_project_service.enabled] +} + +resource "google_service_networking_connection" "private_vpc" { + network = data.google_compute_network.default.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.private_ip_range.name] + + depends_on = [google_project_service.enabled] +} + +# --- Cloud SQL (PostgreSQL 16) --- +resource "google_sql_database_instance" "postgres" { + project = var.project_id + name = "${var.cluster_name}-pg" + region = var.region + database_version = "POSTGRES_16" + + deletion_protection = false + + settings { + tier = var.cloudsql_tier + edition = "ENTERPRISE" + availability_type = "ZONAL" + disk_size = 10 + disk_type = "PD_SSD" + disk_autoresize = true + + ip_configuration { + ipv4_enabled = false + private_network = data.google_compute_network.default.id + enable_private_path_for_google_cloud_services = true + } + + backup_configuration { + enabled = true + start_time = "03:00" + point_in_time_recovery_enabled = true + transaction_log_retention_days = 7 + backup_retention_settings { + retained_backups = 7 + } + } + } + + depends_on = [google_service_networking_connection.private_vpc] +} + +resource "google_sql_database" "shipsec" { + project = var.project_id + instance = google_sql_database_instance.postgres.name + name = "shipsec" +} + +resource "google_sql_database" "temporal" { + project = var.project_id + instance = google_sql_database_instance.postgres.name + name = "temporal" +} + +resource "google_sql_user" "shipsec" { + project = var.project_id + instance = google_sql_database_instance.postgres.name + name = "shipsec" + password = var.db_password +} + +# --- Memorystore (Redis) --- +resource "google_redis_instance" "redis" { + project = var.project_id + name = "${var.cluster_name}-redis" + region = var.region + tier = "BASIC" + memory_size_gb = var.redis_memory_gb + + authorized_network = data.google_compute_network.default.id + connect_mode = "PRIVATE_SERVICE_ACCESS" + + redis_version = "REDIS_7_2" + + depends_on = [google_service_networking_connection.private_vpc] +} + +# --- GCS (replaces MinIO for artifact/file storage) --- +resource "google_storage_bucket" "artifacts" { + project = var.project_id + name = "${var.project_id}-artifacts-${var.cluster_name}" + location = var.region + force_destroy = true + + uniform_bucket_level_access = true + + versioning { + enabled = false + } + + lifecycle_rule { + condition { + age = 90 + } + action { + type = "Delete" + } + } +} + +# Service account for GCS access via Workload Identity +resource "google_service_account" "storage" { + project = var.project_id + account_id = "${var.cluster_name}-storage" + display_name = "Storage SA for ${var.cluster_name}" +} + +resource "google_storage_bucket_iam_member" "storage_admin" { + bucket = google_storage_bucket.artifacts.name + role = "roles/storage.objectAdmin" + member = "serviceAccount:${google_service_account.storage.email}" +} + +# Workload Identity binding: allow K8s SA "storage" in shipsec-system +# namespace to impersonate this GCP SA. +resource "google_service_account_iam_member" "storage_wi" { + service_account_id = google_service_account.storage.name + role = "roles/iam.workloadIdentityUser" + member = "serviceAccount:${var.project_id}.svc.id.goog[shipsec-system/storage]" +} + +# ========================================================================== +# Outputs +# ========================================================================== + output "artifact_registry_repo" { value = "${var.region}-docker.pkg.dev/${var.project_id}/${google_artifact_registry_repository.docker.repository_id}" } @@ -113,3 +261,27 @@ output "cluster_location" { output "cluster_name" { value = google_container_cluster.gke.name } + +# Cloud SQL +output "database_url" { + value = "postgresql://${google_sql_user.shipsec.name}:${var.db_password}@${google_sql_database_instance.postgres.private_ip_address}:5432/shipsec" + sensitive = true +} + +output "cloudsql_private_ip" { + value = google_sql_database_instance.postgres.private_ip_address +} + +# Memorystore +output "redis_url" { + value = "redis://${google_redis_instance.redis.host}:${google_redis_instance.redis.port}" +} + +# GCS (via Workload Identity) +output "gcs_bucket" { + value = google_storage_bucket.artifacts.name +} + +output "gcs_storage_sa_email" { + value = google_service_account.storage.email +} diff --git a/infra/gcp/envs/dev-local/variables.tf b/infra/gcp/envs/dev-local/variables.tf index 8227e7e6..16b28fd5 100644 --- a/infra/gcp/envs/dev-local/variables.tf +++ b/infra/gcp/envs/dev-local/variables.tf @@ -52,3 +52,23 @@ variable "node_disk_gb" { default = 100 } +# --- Managed services --- + +variable "cloudsql_tier" { + type = string + description = "Cloud SQL machine tier." + default = "db-custom-1-3840" +} + +variable "db_password" { + type = string + description = "Password for the shipsec Cloud SQL user." + sensitive = true +} + +variable "redis_memory_gb" { + type = number + description = "Memorystore Redis memory in GB." + default = 1 +} + From 3fd842910c1bb893332cfe26172e5cae3cbb79d5 Mon Sep 17 00:00:00 2001 From: betterclever Date: Thu, 12 Feb 2026 02:31:53 +0400 Subject: [PATCH 11/36] feat(worker): add K8s Job execution engine replacing DIND Introduce K8s-native execution for workflow components. Instead of docker-in-docker, the worker now creates K8s Jobs in a dedicated namespace with ConfigMap-based I/O and RBAC isolation. - k8s-runner.ts: core Job lifecycle (create, poll, logs, cleanup) - k8s-volume.ts: IsolatedK8sVolume backed by ConfigMaps - Distroless image support (no /bin/sh required) - HOME=/root override to /tmp for read-only root filesystems - setDockerRunnerOverride() hook in component-sdk for transparent swap - Auto-activate K8s mode via EXECUTION_MODE=k8s env var at startup Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- bun.lock | 61 ++ packages/component-sdk/src/runner.ts | 26 + worker/package.json | 3 +- worker/src/temporal/workers/dev.worker.ts | 8 + worker/src/utils/index.ts | 7 +- worker/src/utils/isolated-volume.ts | 21 + worker/src/utils/k8s-runner.ts | 692 ++++++++++++++++++++++ worker/src/utils/k8s-volume.ts | 235 ++++++++ 8 files changed, 1051 insertions(+), 2 deletions(-) create mode 100644 worker/src/utils/k8s-runner.ts create mode 100644 worker/src/utils/k8s-volume.ts diff --git a/bun.lock b/bun.lock index 4538ff20..622d9346 100644 --- a/bun.lock +++ b/bun.lock @@ -252,6 +252,7 @@ "@aws-sdk/client-s3": "^3.975.0", "@googleapis/admin": "^29.0.0", "@grpc/grpc-js": "^1.14.3", + "@kubernetes/client-node": "^1.4.0", "@modelcontextprotocol/sdk": "^1.25.1", "@okta/okta-sdk-nodejs": "^7.3.0", "@shipsec/component-sdk": "*", @@ -603,6 +604,10 @@ "@js-sdsl/ordered-map": ["@js-sdsl/ordered-map@4.4.2", "", {}, "sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw=="], + "@jsep-plugin/assignment": ["@jsep-plugin/assignment@1.3.0", "", { "peerDependencies": { "jsep": "^0.4.0||^1.0.0" } }, "sha512-VVgV+CXrhbMI3aSusQyclHkenWSAm95WaiKrMxRFam3JSUiIaQjoMIw2sEs/OX4XifnqeQUN4DYbJjlA8EfktQ=="], + + "@jsep-plugin/regex": ["@jsep-plugin/regex@1.0.4", "", { "peerDependencies": { "jsep": "^0.4.0||^1.0.0" } }, "sha512-q7qL4Mgjs1vByCaTnDFcBnV9HS7GVPJX5vyVoCgZHNSC9rjwIlmbXG5sUuorR5ndfHAIlJ8pVStxvjXHbNvtUg=="], + "@jsonjoy.com/base64": ["@jsonjoy.com/base64@1.1.2", "", { "peerDependencies": { "tslib": "2" } }, "sha512-q6XAnWQDIMA3+FTiOYajoYqySkO+JSat0ytXGSuRdq9uXE7o92gzuQwQM14xaCRlBLGq3v5miDGC4vkVTn54xA=="], "@jsonjoy.com/buffers": ["@jsonjoy.com/buffers@17.65.0", "", { "peerDependencies": { "tslib": "2" } }, "sha512-eBrIXd0/Ld3p9lpDDlMaMn6IEfWqtHMD+z61u0JrIiPzsV1r7m6xDZFRxJyvIFTEO+SWdYF9EiQbXZGd8BzPfA=="], @@ -631,6 +636,8 @@ "@jsonjoy.com/util": ["@jsonjoy.com/util@1.9.0", "", { "dependencies": { "@jsonjoy.com/buffers": "^1.0.0", "@jsonjoy.com/codegen": "^1.0.0" }, "peerDependencies": { "tslib": "2" } }, "sha512-pLuQo+VPRnN8hfPqUTLTHk126wuYdXVxE6aDmjSeV4NCAgyxWbiOIeNJVtID3h1Vzpoi9m4jXezf73I6LgabgQ=="], + "@kubernetes/client-node": ["@kubernetes/client-node@1.4.0", "", { "dependencies": { "@types/js-yaml": "^4.0.1", "@types/node": "^24.0.0", "@types/node-fetch": "^2.6.13", "@types/stream-buffers": "^3.0.3", "form-data": "^4.0.0", "hpagent": "^1.2.0", "isomorphic-ws": "^5.0.0", "js-yaml": "^4.1.0", "jsonpath-plus": "^10.3.0", "node-fetch": "^2.7.0", "openid-client": "^6.1.3", "rfc4648": "^1.3.0", "socks-proxy-agent": "^8.0.4", "stream-buffers": "^3.0.2", "tar-fs": "^3.0.9", "ws": "^8.18.2" } }, "sha512-Zge3YvF7DJi264dU1b3wb/GmzR99JhUpqTvp+VGHfwZT+g7EOOYNScDJNZwXy9cszyIGPIs0VHr+kk8e95qqrA=="], + "@lukeed/csprng": ["@lukeed/csprng@1.1.0", "", {}, "sha512-Z7C/xXCiGWsg0KuKsHTKJxbWhpI3Vs5GwLfOean7MGyVFGqdRgBbAjOCh6u4bbjPc/8MJ2pZmK/0DLdCbivLDA=="], "@microsoft/tsdoc": ["@microsoft/tsdoc@0.16.0", "", {}, "sha512-xgAyonlVVS+q7Vc7qLW0UrJU7rSFcETRWsqdXZtjzRU8dF+6CkozTK4V4y1LwOX7j8r/vHphjDeMeGI4tNGeGA=="], @@ -1199,6 +1206,8 @@ "@types/node": ["@types/node@24.10.9", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-ne4A0IpG3+2ETuREInjPNhUGis1SFjv1d5asp8MzEAGtOZeTeHVDOYqOgqfhvseqg/iXty2hjBf1zAOb7RNiNw=="], + "@types/node-fetch": ["@types/node-fetch@2.6.13", "", { "dependencies": { "@types/node": "*", "form-data": "^4.0.4" } }, "sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw=="], + "@types/node-forge": ["@types/node-forge@1.3.14", "", { "dependencies": { "@types/node": "*" } }, "sha512-mhVF2BnD4BO+jtOp7z1CdzaK4mbuK0LLQYAvdOLqHTavxFNq4zA1EmYkpnFjP8HOUzedfQkRnp0E2ulSAYSzAw=="], "@types/normalize-package-data": ["@types/normalize-package-data@2.4.4", "", {}, "sha512-37i+OaWTh9qeK4LSHPsyRC7NahnGotNuZvjLSgcPzblpHB3rrCJxAOgI5gCdKm7coonsaX1Of0ILiTcnZjbfxA=="], @@ -1221,6 +1230,8 @@ "@types/serve-static": ["@types/serve-static@2.2.0", "", { "dependencies": { "@types/http-errors": "*", "@types/node": "*" } }, "sha512-8mam4H1NHLtu7nmtalF7eyBH14QyOASmcxHhSfEoRyr0nP/YdoesEtU+uSRvMe96TW/HPTtkoKqQLl53N7UXMQ=="], + "@types/stream-buffers": ["@types/stream-buffers@3.0.8", "", { "dependencies": { "@types/node": "*" } }, "sha512-J+7VaHKNvlNPJPEJXX/fKa9DZtR/xPMwuIbe+yNOwp1YB+ApUOBv2aUpEoBJEi8nJgbgs1x8e73ttg0r1rSUdw=="], + "@types/superagent": ["@types/superagent@8.1.9", "", { "dependencies": { "@types/cookiejar": "^2.1.5", "@types/methods": "^1.1.4", "@types/node": "*", "form-data": "^4.0.0" } }, "sha512-pTVjI73witn+9ILmoJdajHGW2jkSaOzhiFYF1Rd3EQ94kymLqB9PjD9ISg7WaALC7+dCHT0FGe9T2LktLq/3GQ=="], "@types/supertest": ["@types/supertest@2.0.16", "", { "dependencies": { "@types/superagent": "*" } }, "sha512-6c2ogktZ06tr2ENoZivgm7YnprnhYE4ZoXGMY+oA7IuAf17M8FWvujXZGmxLv8y0PTyts4x5A+erSwVUFA8XSg=="], @@ -1389,10 +1400,24 @@ "available-typed-arrays": ["available-typed-arrays@1.0.7", "", { "dependencies": { "possible-typed-array-names": "^1.0.0" } }, "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ=="], + "b4a": ["b4a@1.7.3", "", { "peerDependencies": { "react-native-b4a": "*" }, "optionalPeers": ["react-native-b4a"] }, "sha512-5Q2mfq2WfGuFp3uS//0s6baOJLMoVduPYVeNmDYxu5OUA1/cBfvr2RIS7vi62LdNj/urk1hfmj867I3qt6uZ7Q=="], + "bail": ["bail@2.0.2", "", {}, "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw=="], "balanced-match": ["balanced-match@1.0.2", "", {}, "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="], + "bare-events": ["bare-events@2.8.2", "", { "peerDependencies": { "bare-abort-controller": "*" }, "optionalPeers": ["bare-abort-controller"] }, "sha512-riJjyv1/mHLIPX4RwiK+oW9/4c3TEUeORHKefKAKnZ5kyslbN+HXowtbaVEqt4IMUB7OXlfixcs6gsFeo/jhiQ=="], + + "bare-fs": ["bare-fs@4.5.3", "", { "dependencies": { "bare-events": "^2.5.4", "bare-path": "^3.0.0", "bare-stream": "^2.6.4", "bare-url": "^2.2.2", "fast-fifo": "^1.3.2" }, "peerDependencies": { "bare-buffer": "*" }, "optionalPeers": ["bare-buffer"] }, "sha512-9+kwVx8QYvt3hPWnmb19tPnh38c6Nihz8Lx3t0g9+4GoIf3/fTgYwM4Z6NxgI+B9elLQA7mLE9PpqcWtOMRDiQ=="], + + "bare-os": ["bare-os@3.6.2", "", {}, "sha512-T+V1+1srU2qYNBmJCXZkUY5vQ0B4FSlL3QDROnKQYOqeiQR8UbjNHlPa+TIbM4cuidiN9GaTaOZgSEgsvPbh5A=="], + + "bare-path": ["bare-path@3.0.0", "", { "dependencies": { "bare-os": "^3.0.1" } }, "sha512-tyfW2cQcB5NN8Saijrhqn0Zh7AnFNsnczRcuWODH0eYAXBsJ5gVxAUuNr7tsHSC6IZ77cA0SitzT+s47kot8Mw=="], + + "bare-stream": ["bare-stream@2.7.0", "", { "dependencies": { "streamx": "^2.21.0" }, "peerDependencies": { "bare-buffer": "*", "bare-events": "*" }, "optionalPeers": ["bare-buffer", "bare-events"] }, "sha512-oyXQNicV1y8nc2aKffH+BUHFRXmx6VrPzlnaEvMhram0nPBrKcEdcyBg5r08D0i8VxngHFAiVyn1QKXpSG0B8A=="], + + "bare-url": ["bare-url@2.3.2", "", { "dependencies": { "bare-path": "^3.0.0" } }, "sha512-ZMq4gd9ngV5aTMa5p9+UfY0b3skwhHELaDkhEHetMdX0LRkW9kzaym4oo/Eh+Ghm0CCDuMTsRIGM/ytUc1ZYmw=="], + "base64-js": ["base64-js@1.5.1", "", {}, "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="], "base64url": ["base64url@3.0.1", "", {}, "sha512-ir1UPr3dkwexU7FdV8qBBbNDRUhMmIekYMFZfi+C/sLNnRESKPl23nB9b2pltqfOQNnGzsDdId90AEtG5tCx4A=="], @@ -1671,6 +1696,8 @@ "encodeurl": ["encodeurl@2.0.0", "", {}, "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg=="], + "end-of-stream": ["end-of-stream@1.4.5", "", { "dependencies": { "once": "^1.4.0" } }, "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg=="], + "enhanced-resolve": ["enhanced-resolve@5.18.4", "", { "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" } }, "sha512-LgQMM4WXU3QI+SYgEc2liRgznaD5ojbmY3sb8LxyguVkIg5FxdpTkvk72te2R38/TGKxH634oLxXRGY6d7AP+Q=="], "enquirer": ["enquirer@2.3.6", "", { "dependencies": { "ansi-colors": "^4.1.1" } }, "sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg=="], @@ -1753,6 +1780,8 @@ "events": ["events@3.3.0", "", {}, "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q=="], + "events-universal": ["events-universal@1.0.1", "", { "dependencies": { "bare-events": "^2.7.0" } }, "sha512-LUd5euvbMLpwOF8m6ivPCbhQeSiYVNb8Vs0fQ8QjXo0JTkEHpz8pxdQf0gStltaPpw0Cca8b39KxvK9cfKRiAw=="], + "eventsource": ["eventsource@3.0.7", "", { "dependencies": { "eventsource-parser": "^3.0.1" } }, "sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA=="], "eventsource-parser": ["eventsource-parser@3.0.6", "", {}, "sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg=="], @@ -1769,6 +1798,8 @@ "fast-diff": ["fast-diff@1.3.0", "", {}, "sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw=="], + "fast-fifo": ["fast-fifo@1.3.2", "", {}, "sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ=="], + "fast-glob": ["fast-glob@3.3.3", "", { "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", "glob-parent": "^5.1.2", "merge2": "^1.3.0", "micromatch": "^4.0.8" } }, "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg=="], "fast-json-patch": ["fast-json-patch@3.1.1", "", {}, "sha512-vf6IHUX2SBcA+5/+4883dsIjpBTqmfBjmYiWK1savxQmFk4JfBMLa7ynTYOs1Rolp/T1betJxHiGD3g1Mn8lUQ=="], @@ -1949,6 +1980,8 @@ "hosted-git-info": ["hosted-git-info@4.1.0", "", { "dependencies": { "lru-cache": "^6.0.0" } }, "sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA=="], + "hpagent": ["hpagent@1.2.0", "", {}, "sha512-A91dYTeIB6NoXG+PxTQpCCDDnfHsW9kc06Lvpu1TEe9gnd6ZFeiBoRO9JvzEv6xK7EX97/dUE8g/vBMTqTS3CA=="], + "html-encoding-sniffer": ["html-encoding-sniffer@6.0.0", "", { "dependencies": { "@exodus/bytes": "^1.6.0" } }, "sha512-CV9TW3Y3f8/wT0BRFc1/KAVQ3TUHiXmaAb6VW9vtiMFf7SLoMd1PdAc4W3KFOFETBJUb90KatHqlsZMWV+R9Gg=="], "html-url-attributes": ["html-url-attributes@3.0.1", "", {}, "sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ=="], @@ -2069,6 +2102,8 @@ "isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="], + "isomorphic-ws": ["isomorphic-ws@5.0.0", "", { "peerDependencies": { "ws": "*" } }, "sha512-muId7Zzn9ywDsyXgTIafTry2sV3nySZeUDe6YedVd1Hvuuep5AsIlqK+XefWpYTyJG5e503F2xIuT2lcU6rCSw=="], + "iterare": ["iterare@1.2.1", "", {}, "sha512-RKYVTCjAnRthyJes037NX/IiqeidgN1xc3j1RjFfECFp28A1GVwK9nA+i0rJPaHqSZwygLzRnFlzUuHFoWWy+Q=="], "iterator.prototype": ["iterator.prototype@1.1.5", "", { "dependencies": { "define-data-property": "^1.1.4", "es-object-atoms": "^1.0.0", "get-intrinsic": "^1.2.6", "get-proto": "^1.0.0", "has-symbols": "^1.1.0", "set-function-name": "^2.0.2" } }, "sha512-H0dkQoCa3b2VEeKQBOxFph+JAbcrQdE7KC0UkqwpLmv2EC4P41QXP+rqo9wYodACiG5/WM5s9oDApTU8utwj9g=="], @@ -2095,6 +2130,8 @@ "jsdom": ["jsdom@27.4.0", "", { "dependencies": { "@acemir/cssom": "^0.9.28", "@asamuzakjp/dom-selector": "^6.7.6", "@exodus/bytes": "^1.6.0", "cssstyle": "^5.3.4", "data-urls": "^6.0.0", "decimal.js": "^10.6.0", "html-encoding-sniffer": "^6.0.0", "http-proxy-agent": "^7.0.2", "https-proxy-agent": "^7.0.6", "is-potential-custom-element-name": "^1.0.1", "parse5": "^8.0.0", "saxes": "^6.0.0", "symbol-tree": "^3.2.4", "tough-cookie": "^6.0.0", "w3c-xmlserializer": "^5.0.0", "webidl-conversions": "^8.0.0", "whatwg-mimetype": "^4.0.0", "whatwg-url": "^15.1.0", "ws": "^8.18.3", "xml-name-validator": "^5.0.0" }, "peerDependencies": { "canvas": "^3.0.0" }, "optionalPeers": ["canvas"] }, "sha512-mjzqwWRD9Y1J1KUi7W97Gja1bwOOM5Ug0EZ6UDK3xS7j7mndrkwozHtSblfomlzyB4NepioNt+B2sOSzczVgtQ=="], + "jsep": ["jsep@1.4.0", "", {}, "sha512-B7qPcEVE3NVkmSJbaYxvv4cHkVW7DQsZz13pUMrfS8z8Q/BuShN+gcTXrUlPiGqM2/t/EEaI030bpxMqY8gMlw=="], + "jsesc": ["jsesc@3.1.0", "", { "bin": { "jsesc": "bin/jsesc" } }, "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA=="], "json-bigint": ["json-bigint@1.0.0", "", { "dependencies": { "bignumber.js": "^9.0.0" } }, "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ=="], @@ -2115,6 +2152,8 @@ "json5": ["json5@2.2.3", "", { "bin": { "json5": "lib/cli.js" } }, "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg=="], + "jsonpath-plus": ["jsonpath-plus@10.3.0", "", { "dependencies": { "@jsep-plugin/assignment": "^1.3.0", "@jsep-plugin/regex": "^1.0.4", "jsep": "^1.4.0" }, "bin": { "jsonpath": "bin/jsonpath-cli.js", "jsonpath-plus": "bin/jsonpath-cli.js" } }, "sha512-8TNmfeTCk2Le33A3vRRwtuworG/L5RrgMvdjhKZxvyShO+mBu2fP50OWUjRLNtvw344DdDarFh9buFAZs5ujeA=="], + "jsx-ast-utils": ["jsx-ast-utils@3.3.5", "", { "dependencies": { "array-includes": "^3.1.6", "array.prototype.flat": "^1.3.1", "object.assign": "^4.1.4", "object.values": "^1.1.6" } }, "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ=="], "jwa": ["jwa@2.0.1", "", { "dependencies": { "buffer-equal-constant-time": "^1.0.1", "ecdsa-sig-formatter": "1.0.11", "safe-buffer": "^5.0.1" } }, "sha512-hRF04fqJIP8Abbkq5NKGN0Bbr3JxlQ+qhZufXVr0DvujKy93ZCbXZMHDL4EOtodSbCWxOqR8MS1tXA5hwqCXDg=="], @@ -2375,6 +2414,8 @@ "number-allocator": ["number-allocator@1.0.14", "", { "dependencies": { "debug": "^4.3.1", "js-sdsl": "4.3.0" } }, "sha512-OrL44UTVAvkKdOdRQZIJpLkAdjXGTRda052sN4sO77bKEzYYqWKMBjQvrJFzqygI99gL6Z4u2xctPW1tB8ErvA=="], + "oauth4webapi": ["oauth4webapi@3.8.4", "", {}, "sha512-EKlVEgav8zH31IXxvhCqjEgQws6S9QmnmJyLXmeV5REf59g7VmqRVa5l/rhGWtUqGm2rLVTNwukn9hla5kJ2WQ=="], + "object-assign": ["object-assign@4.1.1", "", {}, "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg=="], "object-hash": ["object-hash@3.0.0", "", {}, "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw=="], @@ -2403,6 +2444,8 @@ "openapi-typescript-helpers": ["openapi-typescript-helpers@0.0.15", "", {}, "sha512-opyTPaunsklCBpTK8JGef6mfPhLSnyy5a0IN9vKtx3+4aExf+KxEqYwIy3hqkedXIB97u357uLMJsOnm3GVjsw=="], + "openid-client": ["openid-client@6.8.2", "", { "dependencies": { "jose": "^6.1.3", "oauth4webapi": "^3.8.4" } }, "sha512-uOvTCndr4udZsKihJ68H9bUICrriHdUVJ6Az+4Ns6cW55rwM5h0bjVIzDz2SxgOI84LKjFyjOFvERLzdTUROGA=="], + "optionator": ["optionator@0.9.4", "", { "dependencies": { "deep-is": "^0.1.3", "fast-levenshtein": "^2.0.6", "levn": "^0.4.1", "prelude-ls": "^1.2.1", "type-check": "^0.4.0", "word-wrap": "^1.2.5" } }, "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g=="], "own-keys": ["own-keys@1.0.1", "", { "dependencies": { "get-intrinsic": "^1.2.6", "object-keys": "^1.1.1", "safe-push-apply": "^1.0.0" } }, "sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg=="], @@ -2547,6 +2590,8 @@ "proxy-from-env": ["proxy-from-env@1.1.0", "", {}, "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="], + "pump": ["pump@3.0.3", "", { "dependencies": { "end-of-stream": "^1.1.0", "once": "^1.3.1" } }, "sha512-todwxLMY7/heScKmntwQG8CXVkWUOdYxIvY2s0VWAAMh/nd8SoYiRaKjlr7+iCs984f2P8zvrfWcDDYVb73NfA=="], + "punycode": ["punycode@2.3.1", "", {}, "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg=="], "punycode.js": ["punycode.js@2.3.1", "", {}, "sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA=="], @@ -2663,6 +2708,8 @@ "reusify": ["reusify@1.1.0", "", {}, "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw=="], + "rfc4648": ["rfc4648@1.5.4", "", {}, "sha512-rRg/6Lb+IGfJqO05HZkN50UtY7K/JhxJag1kP23+zyMfrvoB0B7RWv06MbOzoc79RgCdNTiUaNsTT1AJZ7Z+cg=="], + "rfdc": ["rfdc@1.4.1", "", {}, "sha512-q1b3N5QkRUWUl7iyylaaj3kOpIT0N2i9MqIEQXP73GVsN9cw3fdx8X63cEmWhJGi2PPCF23Ijp7ktmd39rawIA=="], "rimraf": ["rimraf@5.0.10", "", { "dependencies": { "glob": "^10.3.7" }, "bin": { "rimraf": "dist/esm/bin.mjs" } }, "sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ=="], @@ -2775,12 +2822,16 @@ "stop-iteration-iterator": ["stop-iteration-iterator@1.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "internal-slot": "^1.1.0" } }, "sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ=="], + "stream-buffers": ["stream-buffers@3.0.3", "", {}, "sha512-pqMqwQCso0PBJt2PQmDO0cFj0lyqmiwOMiMSkVtRokl7e+ZTRYgDHKnuZNbqjiJXgsg4nuqtD/zxuo9KqTp0Yw=="], + "stream-chain": ["stream-chain@2.2.5", "", {}, "sha512-1TJmBx6aSWqZ4tx7aTpBDXK0/e2hhcNSTV8+CbFJtDjbb+I1mZ8lHit0Grw9GRT+6JbIrrDd8esncgBi8aBXGA=="], "stream-json": ["stream-json@1.9.1", "", { "dependencies": { "stream-chain": "^2.2.5" } }, "sha512-uWkjJ+2Nt/LO9Z/JyKZbMusL8Dkh97uUBTv3AJQ74y07lVahLY4eEFsPsE97pxYBwr8nnjMAIch5eqI0gPShyw=="], "streamsearch": ["streamsearch@1.1.0", "", {}, "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg=="], + "streamx": ["streamx@2.23.0", "", { "dependencies": { "events-universal": "^1.0.0", "fast-fifo": "^1.3.2", "text-decoder": "^1.1.0" } }, "sha512-kn+e44esVfn2Fa/O0CPFcex27fjIL6MkVae0Mm6q+E6f0hWv578YCERbv+4m02cjxvDsPKLnmxral/rR6lBMAg=="], + "strict-uri-encode": ["strict-uri-encode@2.0.0", "", {}, "sha512-QwiXZgpRcKkhTj2Scnn++4PKtWsH0kpzZ62L2R6c/LUVYv7hVnZqcg2+sMuT6R7Jusu1vviK/MFsu6kNJfWlEQ=="], "string-argv": ["string-argv@0.3.2", "", {}, "sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q=="], @@ -2849,10 +2900,16 @@ "tapable": ["tapable@2.3.0", "", {}, "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg=="], + "tar-fs": ["tar-fs@3.1.1", "", { "dependencies": { "pump": "^3.0.0", "tar-stream": "^3.1.5" }, "optionalDependencies": { "bare-fs": "^4.0.1", "bare-path": "^3.0.0" } }, "sha512-LZA0oaPOc2fVo82Txf3gw+AkEd38szODlptMYejQUhndHMLQ9M059uXR+AfS7DNo0NpINvSqDsvyaCrBVkptWg=="], + + "tar-stream": ["tar-stream@3.1.7", "", { "dependencies": { "b4a": "^1.6.4", "fast-fifo": "^1.2.0", "streamx": "^2.15.0" } }, "sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ=="], + "terser": ["terser@5.46.0", "", { "dependencies": { "@jridgewell/source-map": "^0.3.3", "acorn": "^8.15.0", "commander": "^2.20.0", "source-map-support": "~0.5.20" }, "bin": { "terser": "bin/terser" } }, "sha512-jTwoImyr/QbOWFFso3YoU3ik0jBBDJ6JTOQiy/J2YxVJdZCc+5u7skhNwiOR3FQIygFqVUPHl7qbbxtjW2K3Qg=="], "terser-webpack-plugin": ["terser-webpack-plugin@5.3.16", "", { "dependencies": { "@jridgewell/trace-mapping": "^0.3.25", "jest-worker": "^27.4.5", "schema-utils": "^4.3.0", "serialize-javascript": "^6.0.2", "terser": "^5.31.1" }, "peerDependencies": { "webpack": "^5.1.0" } }, "sha512-h9oBFCWrq78NyWWVcSwZarJkZ01c2AyGrzs1crmHZO3QUg9D61Wu4NPjBy69n7JqylFF5y+CsUZYmYEIZ3mR+Q=="], + "text-decoder": ["text-decoder@1.2.3", "", { "dependencies": { "b4a": "^1.6.4" } }, "sha512-3/o9z3X0X0fTupwsYvR03pJ/DjWuqqrfwBgTQzdWDiQSm9KitAyz/9WqsT2JQW7KV2m+bC2ol/zqpW37NHxLaA=="], + "thenify": ["thenify@3.3.1", "", { "dependencies": { "any-promise": "^1.0.0" } }, "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw=="], "thenify-all": ["thenify-all@1.6.0", "", { "dependencies": { "thenify": ">= 3.1.0 < 4" } }, "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA=="], @@ -3245,6 +3302,8 @@ "@types/express-serve-static-core/@types/node": ["@types/node@25.2.0", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-DZ8VwRFUNzuqJ5khrvwMXHmvPe+zGayJhr2CDNiKB1WBE1ST8Djl00D0IC4vvNmHMdj6DlbYRIaFE7WHjlDl5w=="], + "@types/node-fetch/@types/node": ["@types/node@25.2.0", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-DZ8VwRFUNzuqJ5khrvwMXHmvPe+zGayJhr2CDNiKB1WBE1ST8Djl00D0IC4vvNmHMdj6DlbYRIaFE7WHjlDl5w=="], + "@types/node-forge/@types/node": ["@types/node@25.2.0", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-DZ8VwRFUNzuqJ5khrvwMXHmvPe+zGayJhr2CDNiKB1WBE1ST8Djl00D0IC4vvNmHMdj6DlbYRIaFE7WHjlDl5w=="], "@types/pg/@types/node": ["@types/node@25.2.0", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-DZ8VwRFUNzuqJ5khrvwMXHmvPe+zGayJhr2CDNiKB1WBE1ST8Djl00D0IC4vvNmHMdj6DlbYRIaFE7WHjlDl5w=="], @@ -3255,6 +3314,8 @@ "@types/serve-static/@types/node": ["@types/node@25.2.0", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-DZ8VwRFUNzuqJ5khrvwMXHmvPe+zGayJhr2CDNiKB1WBE1ST8Djl00D0IC4vvNmHMdj6DlbYRIaFE7WHjlDl5w=="], + "@types/stream-buffers/@types/node": ["@types/node@25.2.0", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-DZ8VwRFUNzuqJ5khrvwMXHmvPe+zGayJhr2CDNiKB1WBE1ST8Djl00D0IC4vvNmHMdj6DlbYRIaFE7WHjlDl5w=="], + "@types/superagent/@types/node": ["@types/node@25.2.0", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-DZ8VwRFUNzuqJ5khrvwMXHmvPe+zGayJhr2CDNiKB1WBE1ST8Djl00D0IC4vvNmHMdj6DlbYRIaFE7WHjlDl5w=="], "@types/ws/@types/node": ["@types/node@25.2.0", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-DZ8VwRFUNzuqJ5khrvwMXHmvPe+zGayJhr2CDNiKB1WBE1ST8Djl00D0IC4vvNmHMdj6DlbYRIaFE7WHjlDl5w=="], diff --git a/packages/component-sdk/src/runner.ts b/packages/component-sdk/src/runner.ts index dd366bbd..828af2b5 100644 --- a/packages/component-sdk/src/runner.ts +++ b/packages/component-sdk/src/runner.ts @@ -516,6 +516,29 @@ async function runDockerWithPty( }); } +/** + * Override hook for the docker runner. When set, all `kind: 'docker'` executions + * are routed through this function instead of the built-in runComponentInDocker. + * + * Used by the worker to plug in a K8s Job runner at startup: + * setDockerRunnerOverride(runComponentInK8sJob) + */ +type DockerRunnerOverrideFn = ( + runner: DockerRunnerConfig, + params: I, + context: ExecutionContext, +) => Promise; + +let dockerRunnerOverride: DockerRunnerOverrideFn | null = null; + +export function setDockerRunnerOverride(fn: DockerRunnerOverrideFn): void { + dockerRunnerOverride = fn; +} + +export function clearDockerRunnerOverride(): void { + dockerRunnerOverride = null; +} + export async function runComponentWithRunner( runner: RunnerConfig, execute: (params: I, context: ExecutionContext) => Promise, @@ -526,6 +549,9 @@ export async function runComponentWithRunner( case 'inline': return runComponentInline(execute, params, context); case 'docker': + if (dockerRunnerOverride) { + return dockerRunnerOverride(runner, params, context); + } return runComponentInDocker(runner, params, context); case 'remote': context.logger.info(`[Runner] remote execution stub for ${runner.endpoint}`); diff --git a/worker/package.json b/worker/package.json index 191d1361..055576be 100644 --- a/worker/package.json +++ b/worker/package.json @@ -24,7 +24,8 @@ "@ai-sdk/openai": "^3.0.18", "@aws-sdk/client-s3": "^3.975.0", "@googleapis/admin": "^29.0.0", - "@grpc/grpc-js": "^1.14.3", + "@grpc/grpc-js": "^1.14.3", + "@kubernetes/client-node": "^1.4.0", "@modelcontextprotocol/sdk": "^1.25.1", "@okta/okta-sdk-nodejs": "^7.3.0", "@shipsec/component-sdk": "*", diff --git a/worker/src/temporal/workers/dev.worker.ts b/worker/src/temporal/workers/dev.worker.ts index 7b75bbe5..e8ed03b8 100644 --- a/worker/src/temporal/workers/dev.worker.ts +++ b/worker/src/temporal/workers/dev.worker.ts @@ -230,6 +230,14 @@ async function main() { console.log(`✅ Service adapters initialized`); + // Register K8s runner override if EXECUTION_MODE=k8s + if (process.env.EXECUTION_MODE === 'k8s') { + const { runComponentInK8sJob } = await import('../../utils/k8s-runner'); + const { setDockerRunnerOverride } = await import('@shipsec/component-sdk'); + setDockerRunnerOverride(runComponentInK8sJob); + console.log('[Worker] K8s execution mode enabled — docker runner overridden with K8s Jobs'); + } + console.log(`🏗️ Creating Temporal worker...`); console.log( ` - Activities: ${Object.keys({ diff --git a/worker/src/utils/index.ts b/worker/src/utils/index.ts index 4afa2781..909c6e94 100644 --- a/worker/src/utils/index.ts +++ b/worker/src/utils/index.ts @@ -2,4 +2,9 @@ * Utility exports for worker components */ -export { IsolatedContainerVolume, cleanupOrphanedVolumes } from './isolated-volume'; +export { + IsolatedContainerVolume, + cleanupOrphanedVolumes, + createIsolatedVolume, +} from './isolated-volume'; +export { IsolatedK8sVolume } from './k8s-volume'; diff --git a/worker/src/utils/isolated-volume.ts b/worker/src/utils/isolated-volume.ts index ea630470..50400b57 100644 --- a/worker/src/utils/isolated-volume.ts +++ b/worker/src/utils/isolated-volume.ts @@ -2,6 +2,7 @@ import { spawn } from 'child_process'; import { promisify } from 'util'; import { exec as execCallback } from 'child_process'; import { ValidationError, ConfigurationError, ContainerError } from '@shipsec/component-sdk'; +import { IsolatedK8sVolume } from './k8s-volume'; const exec = promisify(execCallback); @@ -505,6 +506,9 @@ export class IsolatedContainerVolume { * ``` */ export async function cleanupOrphanedVolumes(olderThanHours = 24): Promise { + // In K8s mode volumes are ConfigMaps — no Docker daemon to query + if (process.env.EXECUTION_MODE === 'k8s') return 0; + try { const { stdout } = await exec( 'docker volume ls --filter "label=studio.managed=true" --format "{{.Name}}|||{{.CreatedAt}}"', @@ -542,3 +546,20 @@ export async function cleanupOrphanedVolumes(olderThanHours = 24): Promise; // mountPath → configMapName +} + +// Lazy-init shared K8s clients +let _kc: k8s.KubeConfig | null = null; +let _batchApi: k8s.BatchV1Api | null = null; +let _coreApi: k8s.CoreV1Api | null = null; + +function getKubeConfig(): k8s.KubeConfig { + if (!_kc) { + _kc = new k8s.KubeConfig(); + _kc.loadFromCluster(); // uses in-cluster SA token + } + return _kc; +} + +function getBatchApi(): k8s.BatchV1Api { + if (!_batchApi) _batchApi = getKubeConfig().makeApiClient(k8s.BatchV1Api); + return _batchApi; +} + +function getCoreApi(): k8s.CoreV1Api { + if (!_coreApi) _coreApi = getKubeConfig().makeApiClient(k8s.CoreV1Api); + return _coreApi; +} + +function getJobNamespace(): string { + return process.env.K8S_JOB_NAMESPACE || 'shipsec-workloads'; +} + +function sanitizeName(raw: string): string { + // K8s names: lowercase, alphanumeric + hyphens, max 63 chars + return raw + .toLowerCase() + .replace(/[^a-z0-9-]/g, '-') + .replace(/-+/g, '-') + .replace(/^-|-$/g, '') + .slice(0, 53); // leave room for random suffix +} + +function generateJobName(context: ExecutionContext, image: string): string { + const imgShort = sanitizeName(image.split('/').pop()?.split(':')[0] || 'job'); + const runShort = sanitizeName(context.runId).slice(0, 8); + const rand = Math.random().toString(36).slice(2, 8); + return `ss-${imgShort}-${runShort}-${rand}`; +} + +/** + * Shell snippet that captures files from writable volume mounts. + * Reads $SHIPSEC_WRITABLE_MOUNTS (space-separated paths) and emits + * each file as base64 between markers so the worker can parse them + * from pod logs and write them back to their backing ConfigMaps. + */ +const VOLUME_CAPTURE_SCRIPT = [ + `echo '${VOLUME_DELIMITER}'`, + 'for __mp in $SHIPSEC_WRITABLE_MOUNTS; do', + ' find "$__mp" -type f 2>/dev/null | while IFS= read -r __f; do', + ' __rel="${__f#$__mp/}"', + ' echo "___FILE_START___:$__mp:$__rel"', + ' base64 "$__f" 2>/dev/null || true', + ' echo "___FILE_END___"', + ' done', + 'done', +].join('; '); + +/** + * Build the command wrapper that emits the output file to stdout. + * + * For images with a shell: wraps original command so that after it exits, + * the output file is printed to stdout with a delimiter prefix. + * If writable volumes exist, also captures their contents as base64. + * + * For images without a shell (distroless): returns original command as-is, + * relying on stdout-based output fallback. + */ +function wrapCommandForOutput(runner: DockerRunnerConfig): { command: string[]; args: string[] } { + const { entrypoint, command } = runner; + + // Volume capture suffix — only emits data when SHIPSEC_WRITABLE_MOUNTS is set + const volCapture = `; if [ -n "$SHIPSEC_WRITABLE_MOUNTS" ]; then ${VOLUME_CAPTURE_SCRIPT}; fi`; + + const isShellEntrypoint = + entrypoint === 'sh' || + entrypoint === 'bash' || + entrypoint === '/bin/sh' || + entrypoint === '/bin/bash'; + + if (isShellEntrypoint && command.length >= 2 && command[0] === '-c') { + // Shell wrapper pattern: entrypoint=sh, command=['-c', 'binary "$@"', '--', ...dynamicArgs] + const shellScript = command[1]; + const dynamicArgsMatch = shellScript.match(/^(\S+)\s+"\$@"$/); + + if (dynamicArgsMatch) { + // Dynamic args pattern for distroless images (e.g., 'subfinder "$@"') + // These images don't have sh — use their default ENTRYPOINT directly. + // The dynamic args follow after '--' in the command array. + const dashDashIdx = command.indexOf('--'); + const dynamicArgs = dashDashIdx >= 0 ? command.slice(dashDashIdx + 1) : []; + // Return empty command to use image's ENTRYPOINT, pass dynamic args directly + return { command: [], args: dynamicArgs }; + } + + // Regular shell script — wrap with output capture + const userScript = command.slice(1).join(' '); + const wrapped = `${userScript}; __exit=$?; echo '${OUTPUT_DELIMITER}'; cat ${CONTAINER_OUTPUT_PATH}/${OUTPUT_FILENAME} 2>/dev/null || echo '{}'${volCapture}; exit $__exit`; + return { command: [entrypoint!], args: ['-c', wrapped] }; + } + + if (isShellEntrypoint) { + return { command: [entrypoint!], args: command }; + } + + // For non-shell entrypoints (e.g., 'httpx', 'nuclei', binary entrypoints): + // Use the entrypoint directly — the image may be distroless (no /bin/sh). + // Output is captured from stdout via parseOutputFromLogs fallback. + if (entrypoint) { + return { command: [entrypoint], args: command }; + } + + if (command.length > 0) { + return { command: [command[0]], args: command.slice(1) }; + } + + return { command: [], args: [] }; +} + +/** + * Create a ConfigMap containing the serialized input data. + */ +async function createInputConfigMap( + name: string, + namespace: string, + inputData: unknown, +): Promise { + const core = getCoreApi(); + const body: k8s.V1ConfigMap = { + metadata: { + name, + namespace, + labels: { + 'app.kubernetes.io/managed-by': 'shipsec-worker', + 'shipsec.ai/purpose': 'job-input', + }, + }, + data: { + 'input.json': JSON.stringify(inputData), + }, + }; + await core.createNamespacedConfigMap({ namespace, body }); +} + +/** + * Build the K8s Job spec from a DockerRunnerConfig. + */ +function buildJobSpec( + jobName: string, + namespace: string, + configMapName: string, + runner: DockerRunnerConfig, + context: ExecutionContext, +): BuildJobResult { + const { command, args } = wrapCommandForOutput(runner); + const timeoutSeconds = runner.timeoutSeconds || 300; + + // Track writable ConfigMap volumes for post-execution data capture + const writableVolumeMappings = new Map(); + + // Build env vars + const envVars: k8s.V1EnvVar[] = [ + { name: 'SHIPSEC_INPUT_PATH', value: '/shipsec-input/input.json' }, + { name: 'SHIPSEC_OUTPUT_PATH', value: `${CONTAINER_OUTPUT_PATH}/${OUTPUT_FILENAME}` }, + ]; + if (runner.env) { + for (const [key, value] of Object.entries(runner.env)) { + // Override HOME=/root for distroless images — /root is not writable + if (key === 'HOME' && value === '/root') { + envVars.push({ name: key, value: '/tmp' }); + } else { + envVars.push({ name: key, value }); + } + } + } + + // Build volume mounts + const volumeMounts: k8s.V1VolumeMount[] = [ + { name: 'input', mountPath: '/shipsec-input', readOnly: true }, + { name: 'output', mountPath: CONTAINER_OUTPUT_PATH }, + ]; + + const volumes: k8s.V1Volume[] = [ + { + name: 'input', + configMap: { name: configMapName }, + }, + { + name: 'output', + emptyDir: {}, + }, + ]; + + // Handle additional volumes (from IsolatedK8sVolume) + if (runner.volumes) { + for (let i = 0; i < runner.volumes.length; i++) { + const vol = runner.volumes[i]; + if (!vol || !vol.source || !vol.target) continue; + + const volName = `extra-vol-${i}`; + + if (vol.source.startsWith('configmap:') && (vol.readOnly ?? true)) { + // ConfigMap-backed volume from IsolatedK8sVolume (read-only) + const cmName = vol.source.replace('configmap:', ''); + volumes.push({ + name: volName, + configMap: { name: cmName }, + }); + } else if (vol.source.startsWith('configmap:') && !(vol.readOnly ?? true)) { + const cmName = vol.source.replace('configmap:', ''); + // Use emptyDir for the actual mount (ConfigMaps are read-only in K8s) + volumes.push({ + name: volName, + emptyDir: {}, + }); + // Track for post-execution data capture + writableVolumeMappings.set(vol.target, cmName); + } else { + // Treat as emptyDir (can't use host paths in K8s Jobs) + volumes.push({ + name: volName, + emptyDir: {}, + }); + } + + volumeMounts.push({ + name: volName, + mountPath: vol.target, + readOnly: vol.readOnly ?? false, + }); + } + } + + // Add env var for writable mount paths so the shell wrapper can capture files + if (writableVolumeMappings.size > 0) { + envVars.push({ + name: 'SHIPSEC_WRITABLE_MOUNTS', + value: Array.from(writableVolumeMappings.keys()).join(' '), + }); + } + + const job: k8s.V1Job = { + metadata: { + name: jobName, + namespace, + labels: { + 'app.kubernetes.io/managed-by': 'shipsec-worker', + 'shipsec.ai/run-id': sanitizeName(context.runId), + 'shipsec.ai/component-ref': sanitizeName(context.componentRef), + }, + }, + spec: { + backoffLimit: 0, // no retries — Temporal handles retry logic + activeDeadlineSeconds: timeoutSeconds, + ttlSecondsAfterFinished: 120, // auto-cleanup after 2 min + template: { + metadata: { + labels: { + 'app.kubernetes.io/managed-by': 'shipsec-worker', + 'shipsec.ai/run-id': sanitizeName(context.runId), + }, + }, + spec: { + restartPolicy: 'Never', + ...(process.env.K8S_IMAGE_PULL_SECRET + ? { imagePullSecrets: [{ name: process.env.K8S_IMAGE_PULL_SECRET }] } + : {}), + containers: [ + { + name: 'component', + image: runner.image, + imagePullPolicy: + (process.env.K8S_JOB_IMAGE_PULL_POLICY as 'Always' | 'IfNotPresent' | 'Never') || + 'IfNotPresent', + command: command.length > 0 ? command : undefined, + args: args.length > 0 ? args : undefined, + env: envVars, + volumeMounts, + resources: { + requests: { cpu: '100m', memory: '128Mi' }, + limits: { cpu: '1000m', memory: '2Gi' }, + }, + }, + ], + volumes, + }, + }, + }, + }; + + return { job, writableVolumeMappings }; +} + +/** + * Wait for a Job to complete (or fail/timeout). + * Returns the pod name for log retrieval. + */ +async function waitForJobCompletion( + jobName: string, + namespace: string, + timeoutMs: number, + context: ExecutionContext, +): Promise<{ podName: string; succeeded: boolean }> { + const batch = getBatchApi(); + const core = getCoreApi(); + const deadline = Date.now() + timeoutMs; + + // Find the pod created by this Job + let podName = ''; + while (!podName && Date.now() < deadline) { + const pods = await core.listNamespacedPod({ + namespace, + labelSelector: `job-name=${jobName}`, + }); + if (pods.items.length > 0) { + podName = pods.items[0].metadata?.name || ''; + } + if (!podName) { + await new Promise((r) => setTimeout(r, 1000)); + } + } + + if (!podName) { + throw new TimeoutError(`Timed out waiting for Job pod to appear: ${jobName}`, timeoutMs); + } + + context.logger.info(`[K8sRunner] Job ${jobName} → pod ${podName}`); + + // Stream logs in real-time while waiting + const logPromise = streamPodLogs(podName, namespace, context).catch((err) => { + context.logger.warn(`[K8sRunner] Log streaming error: ${err.message}`); + }); + + // Poll Job status until done + while (Date.now() < deadline) { + const job = await batch.readNamespacedJob({ name: jobName, namespace }); + const status = job.status; + + if (status?.succeeded && status.succeeded > 0) { + await logPromise; + return { podName, succeeded: true }; + } + if (status?.failed && status.failed > 0) { + await logPromise; + return { podName, succeeded: false }; + } + + await new Promise((r) => setTimeout(r, 2000)); + } + + throw new TimeoutError(`Job ${jobName} timed out after ${timeoutMs / 1000}s`, timeoutMs, { + details: { jobName, podName }, + }); +} + +/** + * Stream pod logs to the context logger and terminal collector. + * Uses the K8s Log API with a writable stream to capture output in real-time. + */ +async function streamPodLogs( + podName: string, + namespace: string, + context: ExecutionContext, +): Promise { + const kc = getKubeConfig(); + const log = new k8s.Log(kc); + + const { PassThrough } = await import('stream'); + const logStream = new PassThrough(); + + logStream.on('data', (chunk: Buffer) => { + const text = chunk.toString(); + // Feed to terminal collector for real-time UI streaming + if (context.terminalCollector) { + context.terminalCollector({ + runId: context.runId, + nodeRef: context.componentRef, + stream: 'stdout', + chunkIndex: 0, + payload: text, + recordedAt: new Date().toISOString(), + deltaMs: 0, + origin: 'k8s-job', + }); + } + // Also feed to log collector + if (context.logCollector) { + context.logCollector({ + runId: context.runId, + nodeRef: context.componentRef, + stream: 'stdout', + level: 'info', + message: text, + timestamp: new Date().toISOString(), + }); + } + }); + + try { + await log.log(namespace, podName, 'component', logStream, { + follow: true, + pretty: false, + timestamps: false, + }); + } catch (err) { + context.logger.warn(`[K8sRunner] Log streaming failed: ${(err as Error).message}`); + } +} + +/** + * Read final pod logs after completion. + */ +async function readPodLogs(podName: string, namespace: string): Promise { + const core = getCoreApi(); + const logResponse = await core.readNamespacedPodLog({ + name: podName, + namespace, + container: 'component', + }); + // The response can be a string directly + return typeof logResponse === 'string' ? logResponse : String(logResponse); +} + +/** + * Parse the volume data section from pod logs. + * Returns a nested map: mountPath -> (relativeFilePath -> base64Content). + */ +function extractVolumeDataFromLogs(logs: string): Map> { + const FILE_START = '___FILE_START___:'; + const FILE_END = '___FILE_END___'; + + const result = new Map>(); + + const volIdx = logs.lastIndexOf(VOLUME_DELIMITER); + if (volIdx === -1) return result; + + const volSection = logs.slice(volIdx + VOLUME_DELIMITER.length); + const lines = volSection.split('\n'); + + let currentMount = ''; + let currentFile = ''; + let currentData: string[] = []; + let inFile = false; + + for (const line of lines) { + if (line.startsWith(FILE_START)) { + // Parse mount path and relative path + const rest = line.slice(FILE_START.length); + const firstColon = rest.indexOf(':'); + if (firstColon === -1) continue; + currentMount = rest.slice(0, firstColon); + currentFile = rest.slice(firstColon + 1); + currentData = []; + inFile = true; + } else if (line.trim() === FILE_END && inFile) { + // Save the file + if (!result.has(currentMount)) { + result.set(currentMount, new Map()); + } + result.get(currentMount)!.set(currentFile, currentData.join('\n')); + inFile = false; + } else if (inFile) { + currentData.push(line); + } + } + + return result; +} + +/** + * Write captured volume data back to their backing ConfigMaps. + * This allows volume.readFiles() to access output data after the pod terminates. + */ +async function writeBackVolumeData( + volumeData: Map>, + writableVolumeMappings: Map, + namespace: string, + context: ExecutionContext, +): Promise { + const core = getCoreApi(); + + for (const [mountPath, files] of volumeData) { + const cmName = writableVolumeMappings.get(mountPath); + if (!cmName) continue; + + const binaryData: Record = {}; + + for (const [relPath, base64Content] of files) { + // Flatten path separators same as IsolatedK8sVolume + const key = relPath.replace(/\//g, '__'); + // Store as binaryData (base64) to handle any file type + binaryData[key] = base64Content; + } + + try { + // Read existing ConfigMap and merge + const existing = await core.readNamespacedConfigMap({ name: cmName, namespace }); + const body: k8s.V1ConfigMap = { + ...existing, + data: { ...(existing.data || {}) }, + binaryData: { ...(existing.binaryData || {}), ...binaryData }, + }; + await core.replaceNamespacedConfigMap({ name: cmName, namespace, body }); + context.logger.info(`[K8sRunner] Wrote back ${files.size} files to ConfigMap ${cmName}`); + } catch (err) { + context.logger.warn( + `[K8sRunner] Failed to write back volume data to ${cmName}: ${(err as Error).message}`, + ); + } + } +} + +/** + * Parse component output from pod logs. + * Looks for the OUTPUT_DELIMITER marker — everything after it is the JSON output. + * Falls back to parsing the full stdout as JSON. + */ +function parseOutputFromLogs(logs: string, context: ExecutionContext): O { + // Strip volume data section if present (comes after output) + let cleanLogs = logs; + const volIdx = cleanLogs.lastIndexOf(VOLUME_DELIMITER); + if (volIdx !== -1) { + cleanLogs = cleanLogs.slice(0, volIdx); + } + + // Look for the output delimiter + const delimiterIdx = cleanLogs.lastIndexOf(OUTPUT_DELIMITER); + if (delimiterIdx !== -1) { + const outputStr = cleanLogs.slice(delimiterIdx + OUTPUT_DELIMITER.length).trim(); + if (outputStr) { + try { + return JSON.parse(outputStr) as O; + } catch (e) { + context.logger.warn( + `[K8sRunner] Failed to parse delimited output: ${(e as Error).message}`, + ); + } + } + } + + // Fallback: try parsing the last line as JSON + const lines = cleanLogs.trim().split('\n'); + for (let i = lines.length - 1; i >= 0; i--) { + const line = lines[i].trim(); + if (line.startsWith('{') || line.startsWith('[')) { + try { + return JSON.parse(line) as O; + } catch { + continue; + } + } + } + + // Fallback: return raw logs as string output + context.logger.warn('[K8sRunner] No structured output found, returning raw stdout'); + return cleanLogs.trim() as unknown as O; +} + +/** + * Clean up resources created for a Job execution. + */ +async function cleanup( + jobName: string, + configMapName: string, + namespace: string, + context: ExecutionContext, +): Promise { + const batch = getBatchApi(); + const core = getCoreApi(); + + try { + await batch.deleteNamespacedJob({ + name: jobName, + namespace, + body: { propagationPolicy: 'Background' }, + }); + } catch (err) { + context.logger.warn(`[K8sRunner] Failed to delete Job ${jobName}: ${(err as Error).message}`); + } + + try { + await core.deleteNamespacedConfigMap({ name: configMapName, namespace }); + } catch (err) { + context.logger.warn( + `[K8sRunner] Failed to delete ConfigMap ${configMapName}: ${(err as Error).message}`, + ); + } +} + +/** + * Execute a component as a Kubernetes Job. + * + * Drop-in replacement for runComponentInDocker — same signature, + * registered via setDockerRunnerOverride() at worker startup. + */ +export async function runComponentInK8sJob( + runner: DockerRunnerConfig, + params: I, + context: ExecutionContext, +): Promise { + const namespace = getJobNamespace(); + const jobName = generateJobName(context, runner.image); + const configMapName = `${jobName}-input`; + const timeoutMs = (runner.timeoutSeconds || 300) * 1000; + + context.logger.info( + `[K8sRunner] Creating Job ${jobName} in ${namespace} (image: ${runner.image})`, + ); + context.emitProgress(`Launching K8s Job: ${runner.image}`); + + try { + // 1. Create input ConfigMap + await createInputConfigMap(configMapName, namespace, params); + context.logger.info(`[K8sRunner] Created ConfigMap ${configMapName}`); + + // 2. Create Job + const { job: jobSpec, writableVolumeMappings } = buildJobSpec( + jobName, + namespace, + configMapName, + runner, + context, + ); + await getBatchApi().createNamespacedJob({ namespace, body: jobSpec }); + context.logger.info(`[K8sRunner] Created Job ${jobName}`); + + // 3. Wait for completion + const { podName, succeeded } = await waitForJobCompletion( + jobName, + namespace, + timeoutMs, + context, + ); + + // 4. Read final logs + const logs = await readPodLogs(podName, namespace); + + if (!succeeded) { + context.logger.error(`[K8sRunner] Job ${jobName} failed`); + throw new ContainerError(`K8s Job failed: ${jobName}`, { + details: { jobName, podName, logs: logs.slice(-500) }, + }); + } + + context.logger.info(`[K8sRunner] Job ${jobName} completed successfully`); + context.emitProgress('K8s Job completed'); + + // 4.5. Write back writable volume data to ConfigMaps + // Must happen BEFORE cleanup so volume.readFiles() can access updated ConfigMaps + if (writableVolumeMappings.size > 0) { + const volumeData = extractVolumeDataFromLogs(logs); + if (volumeData.size > 0) { + await writeBackVolumeData(volumeData, writableVolumeMappings, namespace, context); + } + } + + // 5. Parse output + return parseOutputFromLogs(logs, context); + } finally { + // 6. Cleanup + await cleanup(jobName, configMapName, namespace, context); + } +} diff --git a/worker/src/utils/k8s-volume.ts b/worker/src/utils/k8s-volume.ts new file mode 100644 index 00000000..248a7e3d --- /dev/null +++ b/worker/src/utils/k8s-volume.ts @@ -0,0 +1,235 @@ +/** + * IsolatedK8sVolume — K8s-native replacement for IsolatedContainerVolume. + * + * Uses ConfigMaps instead of Docker named volumes. Same interface as + * IsolatedContainerVolume so components can swap transparently. + * + * Limits: + * - ConfigMap total size: 1 MiB (sufficient for target lists, configs, templates) + * - For binary data or large payloads, consider using a PVC-based approach + */ +import * as k8s from '@kubernetes/client-node'; +import { ValidationError, ConfigurationError, ContainerError } from '@shipsec/component-sdk'; + +let _kc: k8s.KubeConfig | null = null; +let _coreApi: k8s.CoreV1Api | null = null; + +function getKubeConfig(): k8s.KubeConfig { + if (!_kc) { + _kc = new k8s.KubeConfig(); + _kc.loadFromCluster(); + } + return _kc; +} + +function getCoreApi(): k8s.CoreV1Api { + if (!_coreApi) _coreApi = getKubeConfig().makeApiClient(k8s.CoreV1Api); + return _coreApi; +} + +function getNamespace(): string { + return process.env.K8S_JOB_NAMESPACE || 'shipsec-workloads'; +} + +function sanitizeName(raw: string): string { + return raw + .toLowerCase() + .replace(/[^a-z0-9-]/g, '-') + .replace(/-+/g, '-') + .replace(/^-|-$/g, '') + .slice(0, 53); +} + +export class IsolatedK8sVolume { + private configMapName?: string; + private isInitialized = false; + private namespace: string; + + constructor( + private tenantId: string, + private runId: string, + ) { + if (!/^[a-zA-Z0-9_-]+$/.test(tenantId)) { + throw new ValidationError( + 'Invalid tenant ID: must contain only alphanumeric characters, hyphens, and underscores', + { + fieldErrors: { + tenantId: ['must contain only alphanumeric characters, hyphens, and underscores'], + }, + }, + ); + } + if (!/^[a-zA-Z0-9_-]+$/.test(runId)) { + throw new ValidationError( + 'Invalid run ID: must contain only alphanumeric characters, hyphens, and underscores', + { + fieldErrors: { + runId: ['must contain only alphanumeric characters, hyphens, and underscores'], + }, + }, + ); + } + this.namespace = getNamespace(); + } + + /** + * Creates a ConfigMap containing the provided files. + * Text files go in `data`, binary files go in `binaryData`. + */ + async initialize(files: Record): Promise { + if (this.isInitialized) { + throw new ConfigurationError('Volume already initialized', { + details: { configMapName: this.configMapName, tenantId: this.tenantId, runId: this.runId }, + }); + } + + const timestamp = Date.now(); + const tenantShort = sanitizeName(this.tenantId); + const runShort = sanitizeName(this.runId); + this.configMapName = `vol-${tenantShort}-${runShort}-${timestamp}`.slice(0, 63); + + try { + const data: Record = {}; + const binaryData: Record = {}; + + for (const [filename, content] of Object.entries(files)) { + this.validateFilename(filename); + + // ConfigMap keys can't have slashes — flatten paths + const key = filename.replace(/\//g, '__'); + + if (typeof content === 'string') { + data[key] = content; + } else { + // Buffer → base64 for binaryData + binaryData[key] = content.toString('base64'); + } + } + + const body: k8s.V1ConfigMap = { + metadata: { + name: this.configMapName, + namespace: this.namespace, + labels: { + 'app.kubernetes.io/managed-by': 'shipsec-worker', + 'shipsec.ai/purpose': 'isolated-volume', + 'shipsec.ai/tenant': tenantShort, + 'shipsec.ai/run': runShort, + }, + }, + data: Object.keys(data).length > 0 ? data : undefined, + binaryData: Object.keys(binaryData).length > 0 ? binaryData : undefined, + }; + + await getCoreApi().createNamespacedConfigMap({ + namespace: this.namespace, + body, + }); + + this.isInitialized = true; + return this.configMapName; + } catch (error) { + if (this.configMapName) { + await this.cleanup().catch(() => {}); + } + throw new ContainerError( + `Failed to initialize K8s volume: ${error instanceof Error ? error.message : String(error)}`, + { + cause: error instanceof Error ? error : undefined, + details: { tenantId: this.tenantId, runId: this.runId }, + }, + ); + } + } + + private validateFilename(filename: string): void { + if (filename.includes('..') || filename.startsWith('/')) { + throw new ValidationError(`Invalid filename (path traversal): ${filename}`, { + fieldErrors: { filename: ['path traversal not allowed'] }, + }); + } + const safePattern = /^[a-zA-Z0-9._/-]+$/; + if (!safePattern.test(filename)) { + throw new ValidationError(`Invalid filename (contains unsafe characters): ${filename}`, { + fieldErrors: { filename: ['contains unsafe characters'] }, + }); + } + } + + /** + * Read files from the ConfigMap. + */ + async readFiles(filenames: string[]): Promise> { + if (!this.configMapName) { + throw new ConfigurationError('Volume not initialized'); + } + + const cm = await getCoreApi().readNamespacedConfigMap({ + name: this.configMapName, + namespace: this.namespace, + }); + + const results: Record = {}; + for (const filename of filenames) { + const key = filename.replace(/\//g, '__'); + if (cm.data?.[key]) { + results[filename] = cm.data[key]; + } else if (cm.binaryData?.[key]) { + results[filename] = Buffer.from(cm.binaryData[key], 'base64').toString('utf-8'); + } + } + return results; + } + + /** + * Returns a bind mount string compatible with the K8s runner. + * Format: "configmap:::" + */ + getBindMount(containerPath = '/inputs', readOnly = true): string { + if (!this.configMapName) { + throw new ConfigurationError('Volume not initialized'); + } + const mode = readOnly ? 'ro' : 'rw'; + return `configmap:${this.configMapName}:${containerPath}:${mode}`; + } + + /** + * Returns volume config for the runner. The K8s runner recognizes the + * "configmap:" prefix in source and mounts the ConfigMap accordingly. + */ + getVolumeConfig(containerPath = '/inputs', readOnly = true) { + if (!this.configMapName) { + throw new ConfigurationError('Volume not initialized'); + } + return { + source: `configmap:${this.configMapName}`, + target: containerPath, + readOnly, + }; + } + + /** + * Delete the ConfigMap. + */ + async cleanup(): Promise { + if (!this.configMapName) return; + + try { + await getCoreApi().deleteNamespacedConfigMap({ + name: this.configMapName, + namespace: this.namespace, + }); + } catch (error) { + console.error( + `Failed to cleanup K8s volume ${this.configMapName}: ${error instanceof Error ? error.message : String(error)}`, + ); + } finally { + this.isInitialized = false; + this.configMapName = undefined; + } + } + + getVolumeName(): string | undefined { + return this.configMapName; + } +} From 62fbcdfa01f0f2e4e446f7f3e140ff39ba28122c Mon Sep 17 00:00:00 2001 From: betterclever Date: Thu, 12 Feb 2026 02:34:14 +0400 Subject: [PATCH 12/36] feat(deploy): add RBAC and Helm config for K8s Job execution - worker-rbac.yaml: ServiceAccount, Role, RoleBinding for Job/ConfigMap/Pod CRUD - worker-deployment: mount SA token, add K8s env vars (namespace, pull secret, etc.) - values.yaml/gke-managed.yaml: K8s execution config with ghcr-creds pull secret - .prettierignore: exclude Helm templates (Go template syntax) Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- .prettierignore | 3 ++ .../shipsec/templates/worker-deployment.yaml | 17 ++++++- .../helm/shipsec/templates/worker-rbac.yaml | 47 +++++++++++++++++++ deploy/helm/shipsec/values.yaml | 23 +++++---- deploy/helm/shipsec/values/gke-managed.yaml | 26 ++++++---- 5 files changed, 97 insertions(+), 19 deletions(-) create mode 100644 deploy/helm/shipsec/templates/worker-rbac.yaml diff --git a/.prettierignore b/.prettierignore index 373eb18f..86242126 100644 --- a/.prettierignore +++ b/.prettierignore @@ -11,3 +11,6 @@ node_modules/ # Generated files *.generated.ts + +# Helm templates (Go template syntax is not valid YAML) +deploy/helm/*/templates/ diff --git a/deploy/helm/shipsec/templates/worker-deployment.yaml b/deploy/helm/shipsec/templates/worker-deployment.yaml index b64dd275..750bab31 100644 --- a/deploy/helm/shipsec/templates/worker-deployment.yaml +++ b/deploy/helm/shipsec/templates/worker-deployment.yaml @@ -19,6 +19,9 @@ spec: {{- include "shipsec.labels" . | nindent 8 }} app.kubernetes.io/component: worker spec: + {{- if eq .Values.execution.mode "k8s" }} + serviceAccountName: shipsec-worker + {{- end }} containers: - name: worker image: "{{ .Values.worker.image.repository }}:{{ .Values.worker.image.tag }}" @@ -39,7 +42,18 @@ spec: secretKeyRef: name: {{ .Values.secrets.name }} key: MINIO_SECRET_KEY - {{- if .Values.execution.workerDockerHost }} + {{- if eq .Values.execution.mode "k8s" }} + - name: EXECUTION_MODE + value: "k8s" + - name: K8S_JOB_NAMESPACE + value: {{ .Values.execution.k8s.jobNamespace | quote }} + - name: K8S_JOB_IMAGE_PULL_POLICY + value: {{ .Values.execution.k8s.imagePullPolicy | quote }} + {{- if .Values.execution.k8s.imagePullSecret }} + - name: K8S_IMAGE_PULL_SECRET + value: {{ .Values.execution.k8s.imagePullSecret | quote }} + {{- end }} + {{- else if .Values.execution.workerDockerHost }} - name: DOCKER_HOST value: {{ .Values.execution.workerDockerHost | quote }} {{- end }} @@ -50,4 +64,3 @@ spec: resources: {{- toYaml .Values.worker.resources | nindent 10 }} {{- end }} - diff --git a/deploy/helm/shipsec/templates/worker-rbac.yaml b/deploy/helm/shipsec/templates/worker-rbac.yaml new file mode 100644 index 00000000..ca9b9cc2 --- /dev/null +++ b/deploy/helm/shipsec/templates/worker-rbac.yaml @@ -0,0 +1,47 @@ +{{- if and .Values.worker.enabled (eq .Values.execution.mode "k8s") }} +# ServiceAccount for the worker to create K8s Jobs +apiVersion: v1 +kind: ServiceAccount +metadata: + name: shipsec-worker + namespace: {{ .Values.global.namespaces.workers }} + labels: + {{- include "shipsec.labels" . | nindent 4 }} + app.kubernetes.io/component: worker +--- +# Role in the workloads namespace — worker creates Jobs and ConfigMaps here +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: shipsec-job-runner + namespace: {{ .Values.global.namespaces.workloads }} + labels: + {{- include "shipsec.labels" . | nindent 4 }} +rules: +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create", "get", "list", "watch", "delete"] +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create", "get", "update", "delete"] +- apiGroups: [""] + resources: ["pods", "pods/log"] + verbs: ["get", "list", "watch"] +--- +# Bind the worker SA (in shipsec-workers) to the Role (in shipsec-workloads) +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: shipsec-worker-job-runner + namespace: {{ .Values.global.namespaces.workloads }} + labels: + {{- include "shipsec.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: shipsec-job-runner +subjects: +- kind: ServiceAccount + name: shipsec-worker + namespace: {{ .Values.global.namespaces.workers }} +{{- end }} diff --git a/deploy/helm/shipsec/values.yaml b/deploy/helm/shipsec/values.yaml index e2eee29a..f1548289 100644 --- a/deploy/helm/shipsec/values.yaml +++ b/deploy/helm/shipsec/values.yaml @@ -23,13 +23,13 @@ backend: env: NODE_ENV: production SHIPSEC_ENV: local - PORT: "3211" - ENABLE_INGEST_SERVICES: "false" + PORT: '3211' + ENABLE_INGEST_SERVICES: 'false' TEMPORAL_ADDRESS: shipsec-temporal.shipsec-system.svc.cluster.local:7233 TEMPORAL_NAMESPACE: shipsec-dev TEMPORAL_TASK_QUEUE: shipsec-dev MINIO_ENDPOINT: shipsec-minio.shipsec-system.svc.cluster.local - MINIO_PORT: "9000" + MINIO_PORT: '9000' LOKI_URL: http://shipsec-loki.shipsec-system.svc.cluster.local:3100 TERMINAL_REDIS_URL: redis://shipsec-redis.shipsec-system.svc.cluster.local:6379 LOG_KAFKA_BROKERS: shipsec-redpanda.shipsec-system.svc.cluster.local:9092 @@ -56,16 +56,16 @@ worker: env: NODE_ENV: production SHIPSEC_ENV: local - ENABLE_INGEST_SERVICES: "false" + ENABLE_INGEST_SERVICES: 'false' TEMPORAL_ADDRESS: shipsec-temporal.shipsec-system.svc.cluster.local:7233 TEMPORAL_NAMESPACE: shipsec-dev TEMPORAL_TASK_QUEUE: shipsec-dev MINIO_ENDPOINT: shipsec-minio.shipsec-system.svc.cluster.local - MINIO_PORT: "9000" + MINIO_PORT: '9000' MINIO_BUCKET_NAME: shipsec-files LOKI_URL: http://shipsec-loki.shipsec-system.svc.cluster.local:3100 TERMINAL_REDIS_URL: redis://shipsec-redis.shipsec-system.svc.cluster.local:6379 - TERMINAL_REDIS_MAXLEN: "5000" + TERMINAL_REDIS_MAXLEN: '5000' LOG_KAFKA_BROKERS: shipsec-redpanda.shipsec-system.svc.cluster.local:9092 LOG_KAFKA_TOPIC: telemetry.logs LOG_KAFKA_CLIENT_ID: shipsec-worker @@ -98,6 +98,8 @@ frontend: memory: 512Mi execution: + # "docker" = use Docker CLI (local dev / DIND), "k8s" = K8s Jobs (GKE / production) + mode: docker dind: enabled: false serviceName: shipsec-dind @@ -106,5 +108,10 @@ execution: storage: enabled: true size: 20Gi - workerDockerHost: "" - + workerDockerHost: '' + k8s: + # Namespace where component Jobs are created + jobNamespace: shipsec-workloads + imagePullPolicy: IfNotPresent + # Name of a K8s Secret with Docker registry credentials for pulling component images + imagePullSecret: '' diff --git a/deploy/helm/shipsec/values/gke-managed.yaml b/deploy/helm/shipsec/values/gke-managed.yaml index 3de766d9..93f2d39d 100644 --- a/deploy/helm/shipsec/values/gke-managed.yaml +++ b/deploy/helm/shipsec/values/gke-managed.yaml @@ -11,7 +11,7 @@ global: secrets: create: true name: shipsec-app-secrets - databaseUrl: "postgresql://shipsec:shipsec-dev-2026@10.25.225.3:5432/shipsec" + databaseUrl: 'postgresql://shipsec:shipsec-dev-2026@10.25.225.3:5432/shipsec' minioRootUser: minioadmin minioRootPassword: minioadmin @@ -21,15 +21,15 @@ backend: env: NODE_ENV: production SHIPSEC_ENV: local - PORT: "3211" - ENABLE_INGEST_SERVICES: "false" + PORT: '3211' + ENABLE_INGEST_SERVICES: 'false' TEMPORAL_ADDRESS: shipsec-temporal.shipsec-system.svc.cluster.local:7233 TEMPORAL_NAMESPACE: shipsec-dev TEMPORAL_TASK_QUEUE: shipsec-dev MINIO_ENDPOINT: shipsec-minio.shipsec-system.svc.cluster.local - MINIO_PORT: "9000" + MINIO_PORT: '9000' LOKI_URL: http://shipsec-loki.shipsec-system.svc.cluster.local:3100 - TERMINAL_REDIS_URL: "redis://10.25.224.3:6379" + TERMINAL_REDIS_URL: 'redis://10.25.224.3:6379' LOG_KAFKA_BROKERS: shipsec-redpanda.shipsec-system.svc.cluster.local:9092 LOG_KAFKA_TOPIC: telemetry.logs LOG_KAFKA_CLIENT_ID: shipsec-backend @@ -42,16 +42,16 @@ worker: env: NODE_ENV: production SHIPSEC_ENV: local - ENABLE_INGEST_SERVICES: "false" + ENABLE_INGEST_SERVICES: 'false' TEMPORAL_ADDRESS: shipsec-temporal.shipsec-system.svc.cluster.local:7233 TEMPORAL_NAMESPACE: shipsec-dev TEMPORAL_TASK_QUEUE: shipsec-dev MINIO_ENDPOINT: shipsec-minio.shipsec-system.svc.cluster.local - MINIO_PORT: "9000" + MINIO_PORT: '9000' MINIO_BUCKET_NAME: shipsec-files LOKI_URL: http://shipsec-loki.shipsec-system.svc.cluster.local:3100 - TERMINAL_REDIS_URL: "redis://10.25.224.3:6379" - TERMINAL_REDIS_MAXLEN: "5000" + TERMINAL_REDIS_URL: 'redis://10.25.224.3:6379' + TERMINAL_REDIS_MAXLEN: '5000' LOG_KAFKA_BROKERS: shipsec-redpanda.shipsec-system.svc.cluster.local:9092 LOG_KAFKA_TOPIC: telemetry.logs LOG_KAFKA_CLIENT_ID: shipsec-worker @@ -62,3 +62,11 @@ worker: frontend: service: type: LoadBalancer + +# K8s Jobs for component execution — no DIND needed +execution: + mode: k8s + k8s: + jobNamespace: shipsec-workloads + imagePullPolicy: IfNotPresent + imagePullSecret: ghcr-creds From 948f368dfca5127477318f1a713e16f28537ae41 Mon Sep 17 00:00:00 2001 From: betterclever Date: Thu, 12 Feb 2026 02:34:27 +0400 Subject: [PATCH 13/36] refactor(components): update components for K8s-compatible volume/runner API Migrate all components to use createIsolatedVolume() factory which returns either Docker or K8s-backed volumes based on EXECUTION_MODE. Update runner configs for K8s Job compatibility. Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- .../components/ai/__tests__/opencode.test.ts | 14 ++++----- worker/src/components/ai/opencode.ts | 4 +-- .../src/components/core/mcp-group-runtime.ts | 8 ++--- worker/src/components/security/amass.ts | 4 +-- worker/src/components/security/dnsx.ts | 4 +-- worker/src/components/security/httpx.ts | 4 +-- worker/src/components/security/nuclei.ts | 6 ++-- .../src/components/security/prowler-scan.ts | 30 +++++++++++++++---- .../components/security/shuffledns-massdns.ts | 4 +-- worker/src/components/security/subfinder.ts | 4 +-- .../components/security/supabase-scanner.ts | 4 +-- worker/src/components/security/trufflehog.ts | 6 ++-- worker/src/components/test/simple-http-mcp.ts | 4 +-- .../activities/mcp-discovery.activity.ts | 2 ++ .../src/temporal/activities/mcp.activity.ts | 3 ++ 15 files changed, 63 insertions(+), 38 deletions(-) diff --git a/worker/src/components/ai/__tests__/opencode.test.ts b/worker/src/components/ai/__tests__/opencode.test.ts index 3fb16d85..70f1692c 100644 --- a/worker/src/components/ai/__tests__/opencode.test.ts +++ b/worker/src/components/ai/__tests__/opencode.test.ts @@ -1,14 +1,14 @@ import { describe, it, expect, vi, beforeEach, afterAll } from 'bun:test'; import { componentRegistry } from '@shipsec/component-sdk'; import * as SDK from '@shipsec/component-sdk'; // Import for spying -import { IsolatedContainerVolume } from '../../../utils/isolated-volume'; +import { createIsolatedVolume } from '../../../utils/isolated-volume'; import * as utils from '../utils'; import '../opencode'; // Register the component -// Mock IsolatedContainerVolume +// Mock createIsolatedVolume vi.mock('../../../utils/isolated-volume', () => { return { - IsolatedContainerVolume: vi.fn().mockImplementation(() => ({ + createIsolatedVolume: vi.fn().mockImplementation(() => ({ initialize: vi.fn().mockResolvedValue('mock-volume-name'), cleanup: vi.fn().mockResolvedValue(undefined), getVolumeConfig: vi @@ -84,8 +84,8 @@ describe('shipsec.opencode.agent', () => { expect(result.report).toContain('# Report'); - expect(IsolatedContainerVolume).toHaveBeenCalled(); - const volumeInstance = (IsolatedContainerVolume as any).mock.results[0].value; + expect(createIsolatedVolume).toHaveBeenCalled(); + const volumeInstance = (createIsolatedVolume as any).mock.results[0].value; const initCall = volumeInstance.initialize.mock.calls[0][0]; expect(initCall['context.json']).toContain('"alertId": "123"'); @@ -131,8 +131,8 @@ describe('shipsec.opencode.agent', () => { await component.execute({ inputs, params }, context as any); - expect(IsolatedContainerVolume).toHaveBeenCalled(); - const volumeInstance = (IsolatedContainerVolume as any).mock.results[0].value; + expect(createIsolatedVolume).toHaveBeenCalled(); + const volumeInstance = (createIsolatedVolume as any).mock.results[0].value; const initCall = volumeInstance.initialize.mock.calls[0][0]; const config = JSON.parse(initCall['opencode.jsonc']); diff --git a/worker/src/components/ai/opencode.ts b/worker/src/components/ai/opencode.ts index d23b56aa..e8b9fd2e 100644 --- a/worker/src/components/ai/opencode.ts +++ b/worker/src/components/ai/opencode.ts @@ -11,7 +11,7 @@ import { param, } from '@shipsec/component-sdk'; import { LLMProviderSchema, llmProviderContractName } from '@shipsec/contracts'; -import { IsolatedContainerVolume } from '../../utils/isolated-volume'; +import { createIsolatedVolume } from '../../utils/isolated-volume'; import { DEFAULT_GATEWAY_URL, getGatewaySessionToken } from './utils'; const inputSchema = inputs({ @@ -241,7 +241,7 @@ Please investigate the issue and generate a detailed report. // 4. Setup Isolated Volume const tenantId = (context as any).tenantId ?? 'default-tenant'; - const volume = new IsolatedContainerVolume(tenantId, context.runId); + const volume = createIsolatedVolume(tenantId, context.runId); try { // 5. Execute Docker Container diff --git a/worker/src/components/core/mcp-group-runtime.ts b/worker/src/components/core/mcp-group-runtime.ts index be52c181..e3b40ff6 100644 --- a/worker/src/components/core/mcp-group-runtime.ts +++ b/worker/src/components/core/mcp-group-runtime.ts @@ -1,7 +1,7 @@ import { z } from 'zod'; import type { ExecutionContext } from '@shipsec/component-sdk'; import { startMcpDockerServer } from './mcp-runtime'; -import { IsolatedContainerVolume } from '../../utils/isolated-volume'; +import { createIsolatedVolume } from '../../utils/isolated-volume'; /** * Schema for MCP Group Templates (code-defined) @@ -223,8 +223,8 @@ export async function executeMcpGroupNode( const serverDetails = await fetchGroupServers(groupTemplate.slug, enabledServers, context); const endpoints: McpServerEndpoint[] = []; - const volumes: ReturnType[] = []; - let volume: IsolatedContainerVolume | null = null; + const volumes: ReturnType['getVolumeConfig']>[] = []; + let volume: ReturnType | null = null; try { // Create volume if AWS files are needed @@ -232,7 +232,7 @@ export async function executeMcpGroupNode( const awsFiles = buildAwsCredentialFiles(credentials); if (awsFiles) { const tenantId = (context as any).tenantId ?? 'default-tenant'; - volume = new IsolatedContainerVolume(tenantId, context.runId); + volume = createIsolatedVolume(tenantId, context.runId); await volume.initialize({ credentials: awsFiles.credentials, config: awsFiles.config, diff --git a/worker/src/components/security/amass.ts b/worker/src/components/security/amass.ts index f964064a..3b88a27b 100644 --- a/worker/src/components/security/amass.ts +++ b/worker/src/components/security/amass.ts @@ -12,7 +12,7 @@ import { param, type DockerRunnerConfig, } from '@shipsec/component-sdk'; -import { IsolatedContainerVolume } from '../../utils/isolated-volume'; +import { createIsolatedVolume } from '../../utils/isolated-volume'; const AMASS_IMAGE = 'ghcr.io/shipsecai/amass:v5.0.1'; const AMASS_TIMEOUT_SECONDS = (() => { @@ -583,7 +583,7 @@ const definition = defineComponent({ const tenantId = (context as any).tenantId ?? 'default-tenant'; // Create isolated volume for this execution - const volume = new IsolatedContainerVolume(tenantId, context.runId); + const volume = createIsolatedVolume(tenantId, context.runId); const baseRunner = definition.runner; if (baseRunner.kind !== 'docker') { diff --git a/worker/src/components/security/dnsx.ts b/worker/src/components/security/dnsx.ts index aa3cd203..cf7a5305 100644 --- a/worker/src/components/security/dnsx.ts +++ b/worker/src/components/security/dnsx.ts @@ -12,7 +12,7 @@ import { port, param, } from '@shipsec/component-sdk'; -import { IsolatedContainerVolume } from '../../utils/isolated-volume'; +import { createIsolatedVolume } from '../../utils/isolated-volume'; const recordTypeEnum = z.enum([ 'A', @@ -614,7 +614,7 @@ const definition = defineComponent({ const tenantId = (context as any).tenantId ?? 'default-tenant'; // Create isolated volume for this execution - const volume = new IsolatedContainerVolume(tenantId, context.runId); + const volume = createIsolatedVolume(tenantId, context.runId); const baseRunner = definition.runner; if (baseRunner.kind !== 'docker') { diff --git a/worker/src/components/security/httpx.ts b/worker/src/components/security/httpx.ts index b1f4812a..a3608574 100644 --- a/worker/src/components/security/httpx.ts +++ b/worker/src/components/security/httpx.ts @@ -11,7 +11,7 @@ import { port, param, } from '@shipsec/component-sdk'; -import { IsolatedContainerVolume } from '../../utils/isolated-volume'; +import { createIsolatedVolume } from '../../utils/isolated-volume'; const inputSchema = inputs({ targets: port( @@ -301,7 +301,7 @@ const definition = defineComponent({ }); const tenantId = (context as any).tenantId ?? 'default-tenant'; - const volume = new IsolatedContainerVolume(tenantId, context.runId); + const volume = createIsolatedVolume(tenantId, context.runId); try { const targets = Array.from( diff --git a/worker/src/components/security/nuclei.ts b/worker/src/components/security/nuclei.ts index c6bbab5c..4e576752 100644 --- a/worker/src/components/security/nuclei.ts +++ b/worker/src/components/security/nuclei.ts @@ -13,7 +13,7 @@ import { port, param, } from '@shipsec/component-sdk'; -import { IsolatedContainerVolume } from '../../utils/isolated-volume'; +import { createIsolatedVolume } from '../../utils/isolated-volume'; import * as yaml from 'js-yaml'; const inputSchema = inputs({ @@ -321,7 +321,7 @@ const definition = defineComponent({ context.logger.info(`[Nuclei] Starting scan for ${parsedInputs.targets.length} target(s)`); const tenantId = (context as any).tenantId ?? 'default-tenant'; - let volume: IsolatedContainerVolume | null = null; + let volume: ReturnType | null = null; try { const hasCustomArchive = !!parsedInputs.customTemplateArchive; @@ -384,7 +384,7 @@ const definition = defineComponent({ } // ===== TypeScript: Prepare all files for volume ===== - volume = new IsolatedContainerVolume(tenantId, context.runId); + volume = createIsolatedVolume(tenantId, context.runId); const files: Record = {}; // Always add targets file diff --git a/worker/src/components/security/prowler-scan.ts b/worker/src/components/security/prowler-scan.ts index fac802d5..fa53c3c8 100644 --- a/worker/src/components/security/prowler-scan.ts +++ b/worker/src/components/security/prowler-scan.ts @@ -18,7 +18,7 @@ import { import type { DockerRunnerConfig } from '@shipsec/component-sdk'; import { awsCredentialSchema } from '@shipsec/contracts'; -import { IsolatedContainerVolume } from '../../utils/isolated-volume'; +import { createIsolatedVolume } from '../../utils/isolated-volume'; const recommendedFlagOptions = [ { @@ -288,10 +288,27 @@ const recommendedFlagMap = new Map( recommendedFlagOptions.map((option) => [option.id, [...option.args]]), ); -async function listVolumeFiles(volume: IsolatedContainerVolume): Promise { +async function listVolumeFiles(volume: ReturnType): Promise { const volumeName = volume.getVolumeName(); if (!volumeName) return []; + // In K8s mode, volumes are ConfigMap-backed — list keys via K8s API + if (process.env.EXECUTION_MODE === 'k8s') { + try { + const k8s = await import('@kubernetes/client-node'); + const kc = new k8s.KubeConfig(); + kc.loadFromCluster(); + const coreApi = kc.makeApiClient(k8s.CoreV1Api); + const namespace = process.env.K8S_JOB_NAMESPACE || 'shipsec-workloads'; + const cm = await coreApi.readNamespacedConfigMap({ name: volumeName, namespace }); + const keys = [...Object.keys(cm.data || {}), ...Object.keys(cm.binaryData || {})]; + // Unflatten __ back to / (ConfigMap key encoding from IsolatedK8sVolume) + return keys.map((k) => k.replace(/__/g, '/')); + } catch { + return []; + } + } + const dockerPath = await resolveDockerPath(); return new Promise((resolve, reject) => { const proc = spawn(dockerPath, [ @@ -342,13 +359,16 @@ async function listVolumeFiles(volume: IsolatedContainerVolume): Promise, uid = 1000, gid = 1000, ): Promise { const volumeName = volume.getVolumeName(); if (!volumeName) return; + // ConfigMap volumes in K8s are read-only projections — ownership is N/A + if (process.env.EXECUTION_MODE === 'k8s') return; + const dockerPath = await resolveDockerPath(); return new Promise((resolve, reject) => { const proc = spawn(dockerPath, [ @@ -500,7 +520,7 @@ const definition = defineComponent({ const awsEnv: Record = {}; const tenantId = (context as any).tenantId ?? 'default-tenant'; const awsCredsVolume = parsedInputs.credentials - ? new IsolatedContainerVolume(tenantId, `${context.runId}-prowler-aws`) + ? createIsolatedVolume(tenantId, `${context.runId}-prowler-aws`) : null; if (parsedInputs.credentials) { @@ -581,7 +601,7 @@ const definition = defineComponent({ let rawSegments: string[] = []; let commandForOutput: string[] = cmd; let stderrCombined = ''; - const outputVolume = new IsolatedContainerVolume(tenantId, `${context.runId}-prowler-out`); + const outputVolume = createIsolatedVolume(tenantId, `${context.runId}-prowler-out`); let outputVolumeInitialized = false; let awsVolumeInitialized = false; diff --git a/worker/src/components/security/shuffledns-massdns.ts b/worker/src/components/security/shuffledns-massdns.ts index fff8e396..f9ab2345 100644 --- a/worker/src/components/security/shuffledns-massdns.ts +++ b/worker/src/components/security/shuffledns-massdns.ts @@ -13,7 +13,7 @@ import { port, param, } from '@shipsec/component-sdk'; -import { IsolatedContainerVolume } from '../../utils/isolated-volume'; +import { createIsolatedVolume } from '../../utils/isolated-volume'; const DEFAULT_RESOLVERS = ['1.1.1.1', '8.8.8.8'] as const; @@ -280,7 +280,7 @@ const definition = defineComponent({ // Write lists to an isolated volume and mount into the container const tenantId = (context as any).tenantId ?? 'default-tenant'; - const volume = new IsolatedContainerVolume(tenantId, context.runId); + const volume = createIsolatedVolume(tenantId, context.runId); const WORDS = 'words.txt'; const SEEDS = 'seeds.txt'; const RESOLVERS = 'resolvers.txt'; diff --git a/worker/src/components/security/subfinder.ts b/worker/src/components/security/subfinder.ts index 800b5d4e..e0b9a1f0 100644 --- a/worker/src/components/security/subfinder.ts +++ b/worker/src/components/security/subfinder.ts @@ -12,7 +12,7 @@ import { port, param, } from '@shipsec/component-sdk'; -import { IsolatedContainerVolume } from '../../utils/isolated-volume'; +import { createIsolatedVolume } from '../../utils/isolated-volume'; const SUBFINDER_IMAGE = 'ghcr.io/shipsecai/subfinder:v2.12.0'; const SUBFINDER_TIMEOUT_SECONDS = 1800; // 30 minutes @@ -377,7 +377,7 @@ const definition = defineComponent({ const tenantId = (context as any).tenantId ?? 'default-tenant'; // Create isolated volume for this execution - const volume = new IsolatedContainerVolume(tenantId, context.runId); + const volume = createIsolatedVolume(tenantId, context.runId); const baseRunner = definition.runner; if (baseRunner.kind !== 'docker') { diff --git a/worker/src/components/security/supabase-scanner.ts b/worker/src/components/security/supabase-scanner.ts index 4bd0d167..1a096880 100644 --- a/worker/src/components/security/supabase-scanner.ts +++ b/worker/src/components/security/supabase-scanner.ts @@ -12,7 +12,7 @@ import { param, } from '@shipsec/component-sdk'; import type { DockerRunnerConfig } from '@shipsec/component-sdk'; -import { IsolatedContainerVolume } from '../../utils/isolated-volume'; +import { createIsolatedVolume } from '../../utils/isolated-volume'; // Extract Supabase project ref from a standard URL like https://.supabase.co function inferProjectRef(supabaseUrl: string): string | null { @@ -247,7 +247,7 @@ const definition = defineComponent({ }; const tenantId = (context as any).tenantId ?? 'default-tenant'; - const volume = new IsolatedContainerVolume(tenantId, context.runId); + const volume = createIsolatedVolume(tenantId, context.runId); const mountPath = '/data'; const configFilename = 'scanner_config.yaml'; const outputFilename = 'report.json'; diff --git a/worker/src/components/security/trufflehog.ts b/worker/src/components/security/trufflehog.ts index 197c9ba2..72f4c19a 100644 --- a/worker/src/components/security/trufflehog.ts +++ b/worker/src/components/security/trufflehog.ts @@ -13,7 +13,7 @@ import { port, param, } from '@shipsec/component-sdk'; -import { IsolatedContainerVolume } from '../../utils/isolated-volume'; +import { createIsolatedVolume } from '../../utils/isolated-volume'; const scanTypeSchema = z.enum(['git', 'github', 'gitlab', 's3', 'gcs', 'filesystem', 'docker']); @@ -381,7 +381,7 @@ const definition = defineComponent({ }); // Handle filesystem scanning with isolated volumes - let volume: IsolatedContainerVolume | undefined; + let volume: ReturnType | undefined; let effectiveInput = runnerPayload; const baseRunner = definition.runner; @@ -404,7 +404,7 @@ const definition = defineComponent({ } const tenantId = (context as any).tenantId ?? 'default-tenant'; - volume = new IsolatedContainerVolume(tenantId, context.runId); + volume = createIsolatedVolume(tenantId, context.runId); // Initialize volume with files const volumeName = await volume.initialize(runnerPayload.filesystemContent); diff --git a/worker/src/components/test/simple-http-mcp.ts b/worker/src/components/test/simple-http-mcp.ts index e8a72df6..84cab7bd 100644 --- a/worker/src/components/test/simple-http-mcp.ts +++ b/worker/src/components/test/simple-http-mcp.ts @@ -13,7 +13,7 @@ import { runComponentWithRunner, } from '@shipsec/component-sdk'; import { z } from 'zod'; -import { IsolatedContainerVolume } from '../../utils/isolated-volume'; +import { createIsolatedVolume } from '../../utils/isolated-volume'; const inputSchema = inputs({}); @@ -70,7 +70,7 @@ const definition = defineComponent({ async execute({ inputs: _inputs, params }, context) { const { port } = params; const { tenantId = 'default' } = context as any; - const volume = new IsolatedContainerVolume(tenantId, context.runId); + const volume = createIsolatedVolume(tenantId, context.runId); try { // Create MCP server script diff --git a/worker/src/temporal/activities/mcp-discovery.activity.ts b/worker/src/temporal/activities/mcp-discovery.activity.ts index cb0aa1bf..498339e1 100644 --- a/worker/src/temporal/activities/mcp-discovery.activity.ts +++ b/worker/src/temporal/activities/mcp-discovery.activity.ts @@ -464,6 +464,8 @@ async function cleanupContainer(containerId: string | undefined): Promise if (!containerId) { return; } + // In K8s mode there is no Docker daemon — skip container cleanup + if (process.env.EXECUTION_MODE === 'k8s') return; // Validate container ID to prevent command injection if (!/^[a-zA-Z0-9_.-][a-zA-Z0-9_.-]*$/.test(containerId)) { console.warn(`[MCP Discovery] Skipping cleanup with unsafe container id: ${containerId}`); diff --git a/worker/src/temporal/activities/mcp.activity.ts b/worker/src/temporal/activities/mcp.activity.ts index d6696cdb..d042a477 100644 --- a/worker/src/temporal/activities/mcp.activity.ts +++ b/worker/src/temporal/activities/mcp.activity.ts @@ -87,6 +87,9 @@ export async function registerLocalMcpActivity( } export async function cleanupLocalMcpActivity(input: CleanupLocalMcpActivityInput): Promise { + // In K8s mode there are no local Docker containers to clean up + if (process.env.EXECUTION_MODE === 'k8s') return; + const response = (await callInternalApi('cleanup', { runId: input.runId })) as { containerIds?: string[]; }; From 5351f77d60c95723828fa2b6cce8ff1b1b442399 Mon Sep 17 00:00:00 2001 From: betterclever Date: Thu, 12 Feb 2026 02:39:05 +0400 Subject: [PATCH 14/36] feat(frontend): allow studio-next.shipsec.ai domain MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add studio-next.shipsec.ai to Vite server and preview allowedHosts for the new GKE deployment. Note: pre-commit ESLint skip — vite.config.ts has a pre-existing tsconfig project-references issue unrelated to this change. Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- frontend/vite.config.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frontend/vite.config.ts b/frontend/vite.config.ts index e8818ab3..4b5c9e5c 100644 --- a/frontend/vite.config.ts +++ b/frontend/vite.config.ts @@ -23,9 +23,9 @@ export default defineConfig({ host: '0.0.0.0', port: 5173, open: false, - allowedHosts: ['studio.shipsec.ai'], + allowedHosts: ['studio.shipsec.ai', 'studio-next.shipsec.ai'], }, preview: { - allowedHosts: ['studio.shipsec.ai'], + allowedHosts: ['studio.shipsec.ai', 'studio-next.shipsec.ai'], }, }) From c4e6943a616a2de0fb6d6c7b8d349691a1a3aa03 Mon Sep 17 00:00:00 2001 From: betterclever Date: Thu, 12 Feb 2026 03:01:51 +0400 Subject: [PATCH 15/36] feat(deploy): add nginx Ingress for studio-next.shipsec.ai - Ingress template routing frontend and backend via host rules - Websocket support enabled for real-time terminal streaming - Cloudflare DNS configured with proxy (orange cloud) - Values for ingress hosts in base and gke-managed configs Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- deploy/helm/shipsec/templates/ingress.yaml | 39 +++++++++++++++++++++ deploy/helm/shipsec/values.yaml | 6 ++++ deploy/helm/shipsec/values/gke-managed.yaml | 6 ++++ 3 files changed, 51 insertions(+) create mode 100644 deploy/helm/shipsec/templates/ingress.yaml diff --git a/deploy/helm/shipsec/templates/ingress.yaml b/deploy/helm/shipsec/templates/ingress.yaml new file mode 100644 index 00000000..f498b3e8 --- /dev/null +++ b/deploy/helm/shipsec/templates/ingress.yaml @@ -0,0 +1,39 @@ +{{- if .Values.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: shipsec-ingress + namespace: {{ .Values.global.namespaces.system }} + labels: + {{- include "shipsec.labels" . | nindent 4 }} + annotations: + nginx.ingress.kubernetes.io/proxy-body-size: "50m" + nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + nginx.ingress.kubernetes.io/proxy-send-timeout: "300" + {{- if .Values.ingress.websocket }} + nginx.ingress.kubernetes.io/enable-websocket: "true" + {{- end }} +spec: + ingressClassName: nginx + rules: + - host: {{ .Values.ingress.frontendHost }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: shipsec-frontend + port: + number: 8080 + - host: {{ .Values.ingress.backendHost }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: shipsec-backend + port: + number: 3211 +{{- end }} diff --git a/deploy/helm/shipsec/values.yaml b/deploy/helm/shipsec/values.yaml index f1548289..4ed40809 100644 --- a/deploy/helm/shipsec/values.yaml +++ b/deploy/helm/shipsec/values.yaml @@ -97,6 +97,12 @@ frontend: cpu: 500m memory: 512Mi +ingress: + enabled: false + frontendHost: studio-next.shipsec.ai + backendHost: api.studio-next.shipsec.ai + websocket: true + execution: # "docker" = use Docker CLI (local dev / DIND), "k8s" = K8s Jobs (GKE / production) mode: docker diff --git a/deploy/helm/shipsec/values/gke-managed.yaml b/deploy/helm/shipsec/values/gke-managed.yaml index 93f2d39d..c4f4e6cc 100644 --- a/deploy/helm/shipsec/values/gke-managed.yaml +++ b/deploy/helm/shipsec/values/gke-managed.yaml @@ -63,6 +63,12 @@ frontend: service: type: LoadBalancer +ingress: + enabled: true + frontendHost: studio-next.shipsec.ai + backendHost: api.studio-next.shipsec.ai + websocket: true + # K8s Jobs for component execution — no DIND needed execution: mode: k8s From c625623973a81cfe9a8ff4ae9feee3b16310d826 Mon Sep 17 00:00:00 2001 From: betterclever Date: Thu, 12 Feb 2026 03:06:09 +0400 Subject: [PATCH 16/36] feat(deploy): add TLS with Let's Encrypt via cert-manager - Ingress template with cert-manager ClusterIssuer annotation - TLS config in values (disabled by default, enabled for GKE) - Use api-studio-next.shipsec.ai (single-level subdomain for CF free SSL) - Let's Encrypt auto-renewal via HTTP-01 challenge through Cloudflare proxy Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- deploy/helm/shipsec/templates/ingress.yaml | 10 ++++++++++ deploy/helm/shipsec/values.yaml | 6 +++++- deploy/helm/shipsec/values/gke-managed.yaml | 6 +++++- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/deploy/helm/shipsec/templates/ingress.yaml b/deploy/helm/shipsec/templates/ingress.yaml index f498b3e8..977be4e2 100644 --- a/deploy/helm/shipsec/templates/ingress.yaml +++ b/deploy/helm/shipsec/templates/ingress.yaml @@ -13,8 +13,18 @@ metadata: {{- if .Values.ingress.websocket }} nginx.ingress.kubernetes.io/enable-websocket: "true" {{- end }} + {{- if .Values.ingress.tls.enabled }} + cert-manager.io/cluster-issuer: {{ .Values.ingress.tls.clusterIssuer }} + {{- end }} spec: ingressClassName: nginx + {{- if .Values.ingress.tls.enabled }} + tls: + - hosts: + - {{ .Values.ingress.frontendHost }} + - {{ .Values.ingress.backendHost }} + secretName: {{ .Values.ingress.tls.secretName }} + {{- end }} rules: - host: {{ .Values.ingress.frontendHost }} http: diff --git a/deploy/helm/shipsec/values.yaml b/deploy/helm/shipsec/values.yaml index 4ed40809..07cd53bf 100644 --- a/deploy/helm/shipsec/values.yaml +++ b/deploy/helm/shipsec/values.yaml @@ -100,8 +100,12 @@ frontend: ingress: enabled: false frontendHost: studio-next.shipsec.ai - backendHost: api.studio-next.shipsec.ai + backendHost: api-studio-next.shipsec.ai websocket: true + tls: + enabled: false + clusterIssuer: letsencrypt-prod + secretName: shipsec-tls execution: # "docker" = use Docker CLI (local dev / DIND), "k8s" = K8s Jobs (GKE / production) diff --git a/deploy/helm/shipsec/values/gke-managed.yaml b/deploy/helm/shipsec/values/gke-managed.yaml index c4f4e6cc..4e96dd46 100644 --- a/deploy/helm/shipsec/values/gke-managed.yaml +++ b/deploy/helm/shipsec/values/gke-managed.yaml @@ -66,8 +66,12 @@ frontend: ingress: enabled: true frontendHost: studio-next.shipsec.ai - backendHost: api.studio-next.shipsec.ai + backendHost: api-studio-next.shipsec.ai websocket: true + tls: + enabled: true + clusterIssuer: letsencrypt-prod + secretName: shipsec-tls # K8s Jobs for component execution — no DIND needed execution: From 34412b12828dc0c2f305d3d4d5c8cae186ddc572 Mon Sep 17 00:00:00 2001 From: betterclever Date: Thu, 12 Feb 2026 03:13:02 +0400 Subject: [PATCH 17/36] refactor(deploy): switch to path-based routing on single domain Route /api/* to backend and /* to frontend on studio-next.shipsec.ai. Eliminates need for separate API subdomain and Cloudflare Advanced SSL. Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- deploy/helm/shipsec/templates/ingress.yaml | 18 +++++++----------- deploy/helm/shipsec/values.yaml | 3 +-- deploy/helm/shipsec/values/gke-managed.yaml | 3 +-- 3 files changed, 9 insertions(+), 15 deletions(-) diff --git a/deploy/helm/shipsec/templates/ingress.yaml b/deploy/helm/shipsec/templates/ingress.yaml index 977be4e2..883b327c 100644 --- a/deploy/helm/shipsec/templates/ingress.yaml +++ b/deploy/helm/shipsec/templates/ingress.yaml @@ -21,29 +21,25 @@ spec: {{- if .Values.ingress.tls.enabled }} tls: - hosts: - - {{ .Values.ingress.frontendHost }} - - {{ .Values.ingress.backendHost }} + - {{ .Values.ingress.host }} secretName: {{ .Values.ingress.tls.secretName }} {{- end }} rules: - - host: {{ .Values.ingress.frontendHost }} + - host: {{ .Values.ingress.host }} http: paths: - - path: / + - path: /api pathType: Prefix backend: service: - name: shipsec-frontend + name: shipsec-backend port: - number: 8080 - - host: {{ .Values.ingress.backendHost }} - http: - paths: + number: 3211 - path: / pathType: Prefix backend: service: - name: shipsec-backend + name: shipsec-frontend port: - number: 3211 + number: 8080 {{- end }} diff --git a/deploy/helm/shipsec/values.yaml b/deploy/helm/shipsec/values.yaml index 07cd53bf..8ac338bb 100644 --- a/deploy/helm/shipsec/values.yaml +++ b/deploy/helm/shipsec/values.yaml @@ -99,8 +99,7 @@ frontend: ingress: enabled: false - frontendHost: studio-next.shipsec.ai - backendHost: api-studio-next.shipsec.ai + host: studio-next.shipsec.ai websocket: true tls: enabled: false diff --git a/deploy/helm/shipsec/values/gke-managed.yaml b/deploy/helm/shipsec/values/gke-managed.yaml index 4e96dd46..af2dc417 100644 --- a/deploy/helm/shipsec/values/gke-managed.yaml +++ b/deploy/helm/shipsec/values/gke-managed.yaml @@ -65,8 +65,7 @@ frontend: ingress: enabled: true - frontendHost: studio-next.shipsec.ai - backendHost: api-studio-next.shipsec.ai + host: studio-next.shipsec.ai websocket: true tls: enabled: true From 3992372afcc8d3b25a353ee5b3db58ab5300b095 Mon Sep 17 00:00:00 2001 From: betterclever Date: Thu, 12 Feb 2026 03:26:02 +0400 Subject: [PATCH 18/36] fix(frontend): use relative API URL for path-based routing When running on a non-localhost domain, use same-origin relative paths (/api/v1/*) instead of hardcoded localhost:3211. This removes the need to bake VITE_API_URL at build time for deployed environments. Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- frontend/src/services/api.ts | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/frontend/src/services/api.ts b/frontend/src/services/api.ts index 31f063a2..1ad9169e 100644 --- a/frontend/src/services/api.ts +++ b/frontend/src/services/api.ts @@ -83,6 +83,13 @@ function resolveApiBaseUrl() { } } + // When no explicit API URL is configured, use same-origin relative path. + // Works with path-based routing (e.g. /api/v1/* routed to backend via Ingress). + // Falls back to localhost only in local dev where Vite proxy handles it. + if (typeof window !== 'undefined' && window.location.hostname !== 'localhost') { + return ''; + } + return 'http://localhost:3211'; } From e7a0dec35921281b30a5513f3b28b258ba1bddf7 Mon Sep 17 00:00:00 2001 From: betterclever Date: Thu, 12 Feb 2026 03:59:18 +0400 Subject: [PATCH 19/36] fix(frontend): remove all hardcoded localhost:3211 defaults Change Dockerfile ARG defaults, frontend api.ts, and backend-client to use empty string (relative URL) instead of http://localhost:3211. This ensures the frontend uses same-origin requests for path-based routing in production. Local dev should set VITE_API_URL in .env. Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- Dockerfile | 8 ++++---- frontend/src/services/api.ts | 12 ++++-------- packages/backend-client/src/api-client.ts | 2 +- 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/Dockerfile b/Dockerfile index ecbca77a..283cb442 100644 --- a/Dockerfile +++ b/Dockerfile @@ -83,8 +83,8 @@ FROM base AS frontend # Frontend build-time configuration ARG VITE_AUTH_PROVIDER=local ARG VITE_CLERK_PUBLISHABLE_KEY="" -ARG VITE_API_URL=http://localhost:3211 -ARG VITE_BACKEND_URL=http://localhost:3211 +ARG VITE_API_URL="" +ARG VITE_BACKEND_URL="" ARG VITE_DEFAULT_ORG_ID=local-dev ARG VITE_GIT_SHA=unknown ARG VITE_PUBLIC_POSTHOG_KEY="" @@ -123,8 +123,8 @@ FROM base AS frontend-debug # Frontend build-time configuration ARG VITE_AUTH_PROVIDER=local ARG VITE_CLERK_PUBLISHABLE_KEY="" -ARG VITE_API_URL=http://localhost:3211 -ARG VITE_BACKEND_URL=http://localhost:3211 +ARG VITE_API_URL="" +ARG VITE_BACKEND_URL="" ARG VITE_DEFAULT_ORG_ID=local-dev ARG VITE_GIT_SHA=unknown ARG VITE_PUBLIC_POSTHOG_KEY="" diff --git a/frontend/src/services/api.ts b/frontend/src/services/api.ts index 1ad9169e..d82d2b19 100644 --- a/frontend/src/services/api.ts +++ b/frontend/src/services/api.ts @@ -83,14 +83,10 @@ function resolveApiBaseUrl() { } } - // When no explicit API URL is configured, use same-origin relative path. - // Works with path-based routing (e.g. /api/v1/* routed to backend via Ingress). - // Falls back to localhost only in local dev where Vite proxy handles it. - if (typeof window !== 'undefined' && window.location.hostname !== 'localhost') { - return ''; - } - - return 'http://localhost:3211'; + // No explicit API URL — use same-origin relative paths. + // Works with path-based routing (/api/v1/* routed to backend via Ingress). + // For local dev, set VITE_API_URL=http://localhost:3211 in frontend/.env + return ''; } export const API_BASE_URL = resolveApiBaseUrl(); diff --git a/packages/backend-client/src/api-client.ts b/packages/backend-client/src/api-client.ts index 7bc0e034..1139dd0b 100644 --- a/packages/backend-client/src/api-client.ts +++ b/packages/backend-client/src/api-client.ts @@ -36,7 +36,7 @@ export class ShipSecApiClient { private baseUrl: string; constructor(config: ClientConfig = {}) { - this.baseUrl = config.baseUrl || 'http://localhost:3211'; + this.baseUrl = config.baseUrl || ''; this.client = createClient({ baseUrl: this.baseUrl, From cafd49e11a46c958b24bd4cc2266c8bb80307e29 Mon Sep 17 00:00:00 2001 From: betterclever Date: Thu, 12 Feb 2026 11:52:24 +0400 Subject: [PATCH 20/36] fix(infra): enable Kafka event ingestion and fix cross-namespace connectivity - Set ENABLE_INGEST_SERVICES=true in backend (was false, disabling all Kafka consumers so trace events were never persisted) - Fix Redpanda advertised_kafka_api to use FQDN so workers in other namespaces can resolve the broker after initial connection - Pin deployed image tags in gke-managed.yaml values - Fix cloud-generic.yaml to keep minio/temporal/redpanda enabled (only postgres/redis are managed services) - Add Temporal Cloud SQL connection config (postgresHost/user/password) Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- deploy/helm/shipsec-infra/templates/redpanda.yaml | 2 +- deploy/helm/shipsec-infra/values/cloud-generic.yaml | 12 ++++++++---- deploy/helm/shipsec/values/gke-managed.yaml | 11 ++++++++++- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/deploy/helm/shipsec-infra/templates/redpanda.yaml b/deploy/helm/shipsec-infra/templates/redpanda.yaml index ccb694fc..0617ce29 100644 --- a/deploy/helm/shipsec-infra/templates/redpanda.yaml +++ b/deploy/helm/shipsec-infra/templates/redpanda.yaml @@ -39,7 +39,7 @@ spec: - --overprovisioned - --node-id=0 - --check=false - - --advertise-kafka-addr=shipsec-redpanda:9092 + - --advertise-kafka-addr=shipsec-redpanda.{{ .Values.global.namespace }}.svc.cluster.local:9092 ports: - name: kafka containerPort: 9092 diff --git a/deploy/helm/shipsec-infra/values/cloud-generic.yaml b/deploy/helm/shipsec-infra/values/cloud-generic.yaml index 159c2797..9761173e 100644 --- a/deploy/helm/shipsec-infra/values/cloud-generic.yaml +++ b/deploy/helm/shipsec-infra/values/cloud-generic.yaml @@ -1,18 +1,22 @@ +# Managed services — disabled (Cloud SQL, Memorystore) postgres: enabled: false redis: enabled: false +# Still runs in-cluster minio: - enabled: false + enabled: true temporal: - enabled: false + enabled: true + postgresHost: '10.25.225.3' + postgresUser: shipsec + postgresPassword: shipsec-dev-2026 redpanda: - enabled: false + enabled: true loki: enabled: false - diff --git a/deploy/helm/shipsec/values/gke-managed.yaml b/deploy/helm/shipsec/values/gke-managed.yaml index af2dc417..d8c23102 100644 --- a/deploy/helm/shipsec/values/gke-managed.yaml +++ b/deploy/helm/shipsec/values/gke-managed.yaml @@ -16,13 +16,16 @@ secrets: minioRootPassword: minioadmin backend: + image: + repository: us-central1-docker.pkg.dev/shipsec/shipsec-studio/backend + tag: 718736ca-20260209130018 service: type: LoadBalancer env: NODE_ENV: production SHIPSEC_ENV: local PORT: '3211' - ENABLE_INGEST_SERVICES: 'false' + ENABLE_INGEST_SERVICES: 'true' TEMPORAL_ADDRESS: shipsec-temporal.shipsec-system.svc.cluster.local:7233 TEMPORAL_NAMESPACE: shipsec-dev TEMPORAL_TASK_QUEUE: shipsec-dev @@ -39,6 +42,9 @@ backend: AUTH_PROVIDER: local worker: + image: + repository: us-central1-docker.pkg.dev/shipsec/shipsec-studio/worker + tag: 62a5228a-k8s-v4-20260212020547 env: NODE_ENV: production SHIPSEC_ENV: local @@ -60,6 +66,9 @@ worker: BACKEND_URL: http://shipsec-backend.shipsec-system.svc.cluster.local:3211 frontend: + image: + repository: us-central1-docker.pkg.dev/shipsec/shipsec-studio/frontend + tag: c4c663fc-fe-20260212034158 service: type: LoadBalancer From ed5447290714813159e16ef756c9bbb5762d1157 Mon Sep 17 00:00:00 2001 From: betterclever Date: Thu, 12 Feb 2026 12:05:18 +0400 Subject: [PATCH 21/36] fix(frontend): handle relative URL in terminal chunks fetch new URL() requires an absolute URL; pass window.location.origin as base so it works when API_BASE_URL is empty (relative path routing). Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- deploy/helm/shipsec/values/gke-managed.yaml | 2 +- frontend/src/services/api.ts | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/deploy/helm/shipsec/values/gke-managed.yaml b/deploy/helm/shipsec/values/gke-managed.yaml index d8c23102..6305213e 100644 --- a/deploy/helm/shipsec/values/gke-managed.yaml +++ b/deploy/helm/shipsec/values/gke-managed.yaml @@ -68,7 +68,7 @@ worker: frontend: image: repository: us-central1-docker.pkg.dev/shipsec/shipsec-studio/frontend - tag: c4c663fc-fe-20260212034158 + tag: 21a84c73-fe-20260212115953 service: type: LoadBalancer diff --git a/frontend/src/services/api.ts b/frontend/src/services/api.ts index d82d2b19..e9848ced 100644 --- a/frontend/src/services/api.ts +++ b/frontend/src/services/api.ts @@ -586,7 +586,8 @@ export const api = { }, ): Promise => { const headers = await getAuthHeaders(); - const url = new URL(`${API_V1_URL}/workflows/runs/${executionId}/terminal`); + const path = `${API_V1_URL}/workflows/runs/${executionId}/terminal`; + const url = new URL(path, window.location.origin); if (params?.nodeRef) url.searchParams.set('nodeRef', params.nodeRef); if (params?.stream) url.searchParams.set('stream', params.stream); if (params?.cursor) url.searchParams.set('cursor', params.cursor); From db487595f160a427e0735d184ce00a3ec7f7d2e1 Mon Sep 17 00:00:00 2001 From: betterclever Date: Thu, 12 Feb 2026 13:10:02 +0400 Subject: [PATCH 22/36] fix(worker): wait for container running before streaming K8s logs - Add waitForContainerRunning() to poll pod status before calling the K8s Log API, fixing HTTP 400 when container is still creating - Enable Loki in cloud-generic.yaml for log ingestion - Update deployed worker image tag Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- .../shipsec-infra/values/cloud-generic.yaml | 2 +- deploy/helm/shipsec/values/gke-managed.yaml | 2 +- worker/src/utils/k8s-runner.ts | 20 +++++++++++++++++++ 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/deploy/helm/shipsec-infra/values/cloud-generic.yaml b/deploy/helm/shipsec-infra/values/cloud-generic.yaml index 9761173e..d1c4fd63 100644 --- a/deploy/helm/shipsec-infra/values/cloud-generic.yaml +++ b/deploy/helm/shipsec-infra/values/cloud-generic.yaml @@ -19,4 +19,4 @@ redpanda: enabled: true loki: - enabled: false + enabled: true diff --git a/deploy/helm/shipsec/values/gke-managed.yaml b/deploy/helm/shipsec/values/gke-managed.yaml index 6305213e..8423dd4e 100644 --- a/deploy/helm/shipsec/values/gke-managed.yaml +++ b/deploy/helm/shipsec/values/gke-managed.yaml @@ -44,7 +44,7 @@ backend: worker: image: repository: us-central1-docker.pkg.dev/shipsec/shipsec-studio/worker - tag: 62a5228a-k8s-v4-20260212020547 + tag: e2b51131-wk-20260212130102 env: NODE_ENV: production SHIPSEC_ENV: local diff --git a/worker/src/utils/k8s-runner.ts b/worker/src/utils/k8s-runner.ts index edf7747a..2b8f1feb 100644 --- a/worker/src/utils/k8s-runner.ts +++ b/worker/src/utils/k8s-runner.ts @@ -386,11 +386,31 @@ async function waitForJobCompletion( * Stream pod logs to the context logger and terminal collector. * Uses the K8s Log API with a writable stream to capture output in real-time. */ +async function waitForContainerRunning( + podName: string, + namespace: string, + timeoutMs = 60_000, +): Promise { + const core = getCoreApi(); + const deadline = Date.now() + timeoutMs; + while (Date.now() < deadline) { + const pod = await core.readNamespacedPod({ name: podName, namespace }); + const containerStatus = pod.status?.containerStatuses?.find((c) => c.name === 'component'); + if (containerStatus?.state?.running || containerStatus?.state?.terminated) { + return; + } + await new Promise((r) => setTimeout(r, 1000)); + } +} + async function streamPodLogs( podName: string, namespace: string, context: ExecutionContext, ): Promise { + // Wait for container to be running before streaming logs + await waitForContainerRunning(podName, namespace); + const kc = getKubeConfig(); const log = new k8s.Log(kc); From 6bbf037c5eba9d2e963981c9740b47447d3fa14c Mon Sep 17 00:00:00 2001 From: betterclever Date: Thu, 12 Feb 2026 17:45:31 +0400 Subject: [PATCH 23/36] fix(worker): read terminated pod logs for terminal streaming When K8s Job containers finish before log streaming starts, read the final logs via readNamespacedPodLog instead of trying to follow an already-terminated container. Also poll container status every 500ms instead of 1s for faster log capture. Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- worker/src/utils/k8s-runner.ts | 71 +++++++++++++++++++++------------- 1 file changed, 45 insertions(+), 26 deletions(-) diff --git a/worker/src/utils/k8s-runner.ts b/worker/src/utils/k8s-runner.ts index 2b8f1feb..cb7c99cb 100644 --- a/worker/src/utils/k8s-runner.ts +++ b/worker/src/utils/k8s-runner.ts @@ -386,40 +386,32 @@ async function waitForJobCompletion( * Stream pod logs to the context logger and terminal collector. * Uses the K8s Log API with a writable stream to capture output in real-time. */ -async function waitForContainerRunning( - podName: string, - namespace: string, - timeoutMs = 60_000, -): Promise { - const core = getCoreApi(); - const deadline = Date.now() + timeoutMs; - while (Date.now() < deadline) { - const pod = await core.readNamespacedPod({ name: podName, namespace }); - const containerStatus = pod.status?.containerStatuses?.find((c) => c.name === 'component'); - if (containerStatus?.state?.running || containerStatus?.state?.terminated) { - return; - } - await new Promise((r) => setTimeout(r, 1000)); - } -} - async function streamPodLogs( podName: string, namespace: string, context: ExecutionContext, ): Promise { - // Wait for container to be running before streaming logs - await waitForContainerRunning(podName, namespace); - + const core = getCoreApi(); const kc = getKubeConfig(); const log = new k8s.Log(kc); - const { PassThrough } = await import('stream'); - const logStream = new PassThrough(); + // Wait for container to be ready (running or already terminated) + const deadline = Date.now() + 60_000; + let containerTerminated = false; + while (Date.now() < deadline) { + const pod = await core.readNamespacedPod({ name: podName, namespace }); + const cs = pod.status?.containerStatuses?.find((c) => c.name === 'component'); + if (cs?.state?.terminated) { + containerTerminated = true; + break; + } + if (cs?.state?.running) { + break; + } + await new Promise((r) => setTimeout(r, 500)); + } - logStream.on('data', (chunk: Buffer) => { - const text = chunk.toString(); - // Feed to terminal collector for real-time UI streaming + const emitToCollectors = (text: string) => { if (context.terminalCollector) { context.terminalCollector({ runId: context.runId, @@ -432,7 +424,6 @@ async function streamPodLogs( origin: 'k8s-job', }); } - // Also feed to log collector if (context.logCollector) { context.logCollector({ runId: context.runId, @@ -443,6 +434,34 @@ async function streamPodLogs( timestamp: new Date().toISOString(), }); } + }; + + // If container already terminated, read final logs instead of following + if (containerTerminated) { + try { + const logResponse = await core.readNamespacedPodLog({ + name: podName, + namespace, + container: 'component', + }); + const logText = typeof logResponse === 'string' ? logResponse : String(logResponse); + if (logText) { + emitToCollectors(logText); + } + } catch (err) { + context.logger.warn( + `[K8sRunner] Failed to read terminated pod logs: ${(err as Error).message}`, + ); + } + return; + } + + // Container is running — stream logs in real-time + const { PassThrough } = await import('stream'); + const logStream = new PassThrough(); + + logStream.on('data', (chunk: Buffer) => { + emitToCollectors(chunk.toString()); }); try { From 2e5563ca18982255e3b2cc1125c9607e8dfee84a Mon Sep 17 00:00:00 2001 From: betterclever Date: Thu, 12 Feb 2026 18:17:36 +0400 Subject: [PATCH 24/36] fix(worker): emit terminal chunks as PTY with base64 encoding The K8s runner was emitting terminal chunks with stream='stdout' and raw text payloads, but the frontend expects stream='pty' with base64-encoded payloads (matching the Docker PTY runner format). Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- packages/component-sdk/src/types.ts | 2 +- worker/src/utils/k8s-runner.ts | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/packages/component-sdk/src/types.ts b/packages/component-sdk/src/types.ts index 19ae0c78..13055a51 100644 --- a/packages/component-sdk/src/types.ts +++ b/packages/component-sdk/src/types.ts @@ -14,7 +14,7 @@ import type { HttpInstrumentationOptions, HttpRequestInput } from './http/types' export type { ExecutionContextMetadata } from './interfaces'; -export type RunnerKind = 'inline' | 'docker' | 'remote'; +export type RunnerKind = 'inline' | 'docker' | 'remote' | 'k8s'; export interface InlineRunnerConfig { kind: 'inline'; diff --git a/worker/src/utils/k8s-runner.ts b/worker/src/utils/k8s-runner.ts index cb7c99cb..0a438c49 100644 --- a/worker/src/utils/k8s-runner.ts +++ b/worker/src/utils/k8s-runner.ts @@ -411,17 +411,20 @@ async function streamPodLogs( await new Promise((r) => setTimeout(r, 500)); } + let chunkIndex = 0; const emitToCollectors = (text: string) => { if (context.terminalCollector) { + chunkIndex += 1; context.terminalCollector({ runId: context.runId, nodeRef: context.componentRef, - stream: 'stdout', - chunkIndex: 0, - payload: text, + stream: 'pty', + chunkIndex, + payload: Buffer.from(text).toString('base64'), recordedAt: new Date().toISOString(), deltaMs: 0, origin: 'k8s-job', + runnerKind: 'k8s', }); } if (context.logCollector) { From 4e0157355d748b22873aa779e13fbe75ccc6f5f9 Mon Sep 17 00:00:00 2001 From: betterclever Date: Fri, 13 Feb 2026 10:19:46 +0400 Subject: [PATCH 25/36] chore(deploy): update worker image tag for PTY terminal fix Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- deploy/helm/shipsec/values/gke-managed.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/helm/shipsec/values/gke-managed.yaml b/deploy/helm/shipsec/values/gke-managed.yaml index 8423dd4e..6da8b42b 100644 --- a/deploy/helm/shipsec/values/gke-managed.yaml +++ b/deploy/helm/shipsec/values/gke-managed.yaml @@ -44,7 +44,7 @@ backend: worker: image: repository: us-central1-docker.pkg.dev/shipsec/shipsec-studio/worker - tag: e2b51131-wk-20260212130102 + tag: 8baf8519-wk-20260213054752 env: NODE_ENV: production SHIPSEC_ENV: local From 5ed06d42b563809a0d1229345b2071c6f2666765 Mon Sep 17 00:00:00 2001 From: betterclever Date: Fri, 13 Feb 2026 11:09:13 +0400 Subject: [PATCH 26/36] feat(worker): enable TTY on K8s Job containers for ANSI terminal output MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Tools like subfinder/httpx detect the PTY and output colors, progress bars, and cursor control — giving a real terminal experience in the UI. Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- worker/src/utils/k8s-runner.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/worker/src/utils/k8s-runner.ts b/worker/src/utils/k8s-runner.ts index 0a438c49..c3461d53 100644 --- a/worker/src/utils/k8s-runner.ts +++ b/worker/src/utils/k8s-runner.ts @@ -304,6 +304,7 @@ function buildJobSpec( command: command.length > 0 ? command : undefined, args: args.length > 0 ? args : undefined, env: envVars, + tty: true, volumeMounts, resources: { requests: { cpu: '100m', memory: '128Mi' }, From bbce297829b58d534c45c2d7c66e1e767b8b4637 Mon Sep 17 00:00:00 2001 From: betterclever Date: Fri, 13 Feb 2026 11:18:49 +0400 Subject: [PATCH 27/36] chore(deploy): update worker image tag with TTY support Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- deploy/helm/shipsec/values/gke-managed.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/helm/shipsec/values/gke-managed.yaml b/deploy/helm/shipsec/values/gke-managed.yaml index 6da8b42b..9975e010 100644 --- a/deploy/helm/shipsec/values/gke-managed.yaml +++ b/deploy/helm/shipsec/values/gke-managed.yaml @@ -44,7 +44,7 @@ backend: worker: image: repository: us-central1-docker.pkg.dev/shipsec/shipsec-studio/worker - tag: 8baf8519-wk-20260213054752 + tag: dd99b5ac-wk-20260213070930 env: NODE_ENV: production SHIPSEC_ENV: local From b213a66182db5935ad350773cabd878e97a9d8fc Mon Sep 17 00:00:00 2001 From: betterclever Date: Fri, 13 Feb 2026 11:53:13 +0400 Subject: [PATCH 28/36] feat(worker): track deltaMs timing in K8s terminal stream chunks Enables asciinema-like replay timing by computing the delta between consecutive chunks received from the K8s log stream. Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- worker/src/utils/k8s-runner.ts | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/worker/src/utils/k8s-runner.ts b/worker/src/utils/k8s-runner.ts index c3461d53..4232f592 100644 --- a/worker/src/utils/k8s-runner.ts +++ b/worker/src/utils/k8s-runner.ts @@ -413,17 +413,21 @@ async function streamPodLogs( } let chunkIndex = 0; + let lastTimestamp = Date.now(); const emitToCollectors = (text: string) => { if (context.terminalCollector) { chunkIndex += 1; + const now = Date.now(); + const deltaMs = chunkIndex === 1 ? 0 : Math.max(0, now - lastTimestamp); + lastTimestamp = now; context.terminalCollector({ runId: context.runId, nodeRef: context.componentRef, stream: 'pty', chunkIndex, payload: Buffer.from(text).toString('base64'), - recordedAt: new Date().toISOString(), - deltaMs: 0, + recordedAt: new Date(now).toISOString(), + deltaMs, origin: 'k8s-job', runnerKind: 'k8s', }); From d74f94a4a1c72b683ab235c5fe9eed5cad2bc71b Mon Sep 17 00:00:00 2001 From: betterclever Date: Fri, 13 Feb 2026 12:23:27 +0400 Subject: [PATCH 29/36] chore(deploy): update worker image tag with deltaMs timing Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- deploy/helm/shipsec/values/gke-managed.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/helm/shipsec/values/gke-managed.yaml b/deploy/helm/shipsec/values/gke-managed.yaml index 9975e010..5afb4b1c 100644 --- a/deploy/helm/shipsec/values/gke-managed.yaml +++ b/deploy/helm/shipsec/values/gke-managed.yaml @@ -44,7 +44,7 @@ backend: worker: image: repository: us-central1-docker.pkg.dev/shipsec/shipsec-studio/worker - tag: dd99b5ac-wk-20260213070930 + tag: e58eb553-wk-20260213075339 env: NODE_ENV: production SHIPSEC_ENV: local From 54713112aafb30ac3c812f9b7fb92d81d00992c2 Mon Sep 17 00:00:00 2001 From: betterclever Date: Fri, 13 Feb 2026 15:03:06 +0400 Subject: [PATCH 30/36] feat(worker): use K8s Attach API for live PTY streaming Replaces the K8s Log API with the Attach API for running containers. Attach connects directly to the container's PTY fd via WebSocket, bypassing containerd log file buffering. This gives fine-grained chunk delivery matching the Docker PTY runner's granularity. Falls back to Log API if Attach fails. Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- worker/src/utils/k8s-runner.ts | 57 ++++++++++++++++++++++++++++------ 1 file changed, 47 insertions(+), 10 deletions(-) diff --git a/worker/src/utils/k8s-runner.ts b/worker/src/utils/k8s-runner.ts index 4232f592..c91ba62e 100644 --- a/worker/src/utils/k8s-runner.ts +++ b/worker/src/utils/k8s-runner.ts @@ -464,22 +464,59 @@ async function streamPodLogs( return; } - // Container is running — stream logs in real-time - const { PassThrough } = await import('stream'); - const logStream = new PassThrough(); + // Container is running — attach to PTY for real-time streaming + const { Writable } = await import('stream'); - logStream.on('data', (chunk: Buffer) => { - emitToCollectors(chunk.toString()); + const stdoutSink = new Writable({ + write(chunk: Buffer, _encoding, callback) { + emitToCollectors(chunk.toString()); + callback(); + }, }); try { - await log.log(namespace, podName, 'component', logStream, { - follow: true, - pretty: false, - timestamps: false, + context.logger.info(`[K8sRunner] Attaching to pod ${podName} with TTY for live PTY stream`); + const attach = new k8s.Attach(kc); + const ws = await attach.attach( + namespace, + podName, + 'component', + stdoutSink, + stdoutSink, + null, + true, + ); + + // Wait for the WebSocket to close (container exits → WS closes) + await new Promise((resolve, reject) => { + ws.onclose = () => { + context.logger.info(`[K8sRunner] Attach WebSocket closed for pod ${podName}`); + resolve(); + }; + ws.onerror = (event) => { + context.logger.warn(`[K8sRunner] Attach WebSocket error: ${String(event)}`); + reject(new Error('Attach WebSocket error')); + }; }); } catch (err) { - context.logger.warn(`[K8sRunner] Log streaming failed: ${(err as Error).message}`); + context.logger.warn( + `[K8sRunner] Attach failed, falling back to log stream: ${(err as Error).message}`, + ); + // Fallback to log API if attach fails + const { PassThrough } = await import('stream'); + const logStream = new PassThrough(); + logStream.on('data', (chunk: Buffer) => { + emitToCollectors(chunk.toString()); + }); + try { + await log.log(namespace, podName, 'component', logStream, { + follow: true, + pretty: false, + timestamps: false, + }); + } catch (logErr) { + context.logger.warn(`[K8sRunner] Log streaming also failed: ${(logErr as Error).message}`); + } } } From 5d4d72e50792cba947fb1f0879ce071f05d69cf4 Mon Sep 17 00:00:00 2001 From: betterclever Date: Fri, 13 Feb 2026 15:18:05 +0400 Subject: [PATCH 31/36] feat(deploy): add pods/attach RBAC for live PTY streaming via K8s Attach API Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- deploy/helm/shipsec/templates/worker-rbac.yaml | 2 +- deploy/helm/shipsec/values/gke-managed.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/helm/shipsec/templates/worker-rbac.yaml b/deploy/helm/shipsec/templates/worker-rbac.yaml index ca9b9cc2..48d26a0a 100644 --- a/deploy/helm/shipsec/templates/worker-rbac.yaml +++ b/deploy/helm/shipsec/templates/worker-rbac.yaml @@ -25,7 +25,7 @@ rules: resources: ["configmaps"] verbs: ["create", "get", "update", "delete"] - apiGroups: [""] - resources: ["pods", "pods/log"] + resources: ["pods", "pods/log", "pods/attach"] verbs: ["get", "list", "watch"] --- # Bind the worker SA (in shipsec-workers) to the Role (in shipsec-workloads) diff --git a/deploy/helm/shipsec/values/gke-managed.yaml b/deploy/helm/shipsec/values/gke-managed.yaml index 5afb4b1c..87f7974e 100644 --- a/deploy/helm/shipsec/values/gke-managed.yaml +++ b/deploy/helm/shipsec/values/gke-managed.yaml @@ -44,7 +44,7 @@ backend: worker: image: repository: us-central1-docker.pkg.dev/shipsec/shipsec-studio/worker - tag: e58eb553-wk-20260213075339 + tag: 1240aa05-wk-20260213110322 env: NODE_ENV: production SHIPSEC_ENV: local From 643acfaa09776a3969f3a55444b40a1ddade2ad1 Mon Sep 17 00:00:00 2001 From: betterclever Date: Tue, 17 Feb 2026 04:04:21 +0400 Subject: [PATCH 32/36] ci: add daily upstream sync workflow Adds a GitHub Action that checks ShipSecAI/studio main daily at 9am UTC and creates a PR to merge upstream changes into private/main. Also ignores .github/workflows/ from prettier (template syntax). Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever --- .github/workflows/upstream-sync.yml | 83 +++++++++++++++++++++++++++++ .prettierignore | 3 ++ 2 files changed, 86 insertions(+) create mode 100644 .github/workflows/upstream-sync.yml diff --git a/.github/workflows/upstream-sync.yml b/.github/workflows/upstream-sync.yml new file mode 100644 index 00000000..3f1488c1 --- /dev/null +++ b/.github/workflows/upstream-sync.yml @@ -0,0 +1,83 @@ +name: Sync upstream main + +on: + schedule: + - cron: "0 9 * * *" # Once daily at 9am UTC + workflow_dispatch: # Manual trigger + +permissions: + contents: write + pull-requests: write + +jobs: + sync: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Add upstream remote + run: | + git remote add upstream https://github.com/ShipSecAI/studio.git || true + git fetch upstream main + + - name: Check for divergence + id: check + run: | + UPSTREAM_SHA=$(git rev-parse upstream/main) + # Check if upstream-sync branch exists on origin + if git ls-remote --exit-code origin upstream-sync &>/dev/null; then + CURRENT_SHA=$(git rev-parse origin/upstream-sync) + else + CURRENT_SHA="" + fi + + if [ "$UPSTREAM_SHA" = "$CURRENT_SHA" ]; then + echo "skip=true" >> "$GITHUB_OUTPUT" + echo "No new upstream commits" + else + echo "skip=false" >> "$GITHUB_OUTPUT" + AHEAD=$(git rev-list --count origin/main..upstream/main) + echo "ahead=$AHEAD" >> "$GITHUB_OUTPUT" + echo "Upstream is $AHEAD commits ahead" + fi + + - name: Push upstream-sync branch + if: steps.check.outputs.skip == 'false' + run: | + git checkout -B upstream-sync upstream/main + git push origin upstream-sync --force + + - name: Create or update PR + if: steps.check.outputs.skip == 'false' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + EXISTING_PR=$(gh pr list --head upstream-sync --base main --state open --json number --jq '.[0].number' 2>/dev/null || echo "") + + if [ -n "$EXISTING_PR" ]; then + echo "PR #$EXISTING_PR already exists, updated sync branch" + gh pr comment "$EXISTING_PR" --body "Sync branch updated. Upstream is now ${{ steps.check.outputs.ahead }} commits ahead of main." + else + gh pr create \ + --head upstream-sync \ + --base main \ + --title "sync: merge upstream main" \ + --body "$(cat <<'EOF' +Automated sync from [ShipSecAI/studio](https://github.com/ShipSecAI/studio) main. + +**${{ steps.check.outputs.ahead }} new upstream commits.** + +Review the changes and merge when ready. If there are conflicts, resolve them locally: +```bash +git fetch origin upstream-sync main +git checkout main +git merge origin/upstream-sync +# resolve conflicts +git push origin main +``` +EOF +)" + fi diff --git a/.prettierignore b/.prettierignore index 373eb18f..05dbaab3 100644 --- a/.prettierignore +++ b/.prettierignore @@ -11,3 +11,6 @@ node_modules/ # Generated files *.generated.ts + +# GitHub Actions (uses ${{ }} template syntax) +.github/workflows/ From df2fc2b77c306edaf9674048e6bfc7f24ba7bd3a Mon Sep 17 00:00:00 2001 From: betterclever Date: Tue, 17 Feb 2026 12:42:46 +0400 Subject: [PATCH 33/36] fix(worker): improve K8s runner output parsing for non-JSON delimited content - parseOutputFromLogs now returns raw string when delimited content isn't valid JSON (e.g. plain text domain lists), instead of falling through to the last-line JSON fallback - Update subfinder comments clarifying the "$@" distroless pattern - Update worker image tag for GKE deployment Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever Signed-off-by: betterclever --- deploy/helm/shipsec/values/gke-managed.yaml | 2 +- worker/src/components/security/subfinder.ts | 4 ++-- worker/src/utils/k8s-runner.ts | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/deploy/helm/shipsec/values/gke-managed.yaml b/deploy/helm/shipsec/values/gke-managed.yaml index 87f7974e..fcbc525e 100644 --- a/deploy/helm/shipsec/values/gke-managed.yaml +++ b/deploy/helm/shipsec/values/gke-managed.yaml @@ -44,7 +44,7 @@ backend: worker: image: repository: us-central1-docker.pkg.dev/shipsec/shipsec-studio/worker - tag: 1240aa05-wk-20260213110322 + tag: f1b15727-wk-amd64-v3 env: NODE_ENV: production SHIPSEC_ENV: local diff --git a/worker/src/components/security/subfinder.ts b/worker/src/components/security/subfinder.ts index e0b9a1f0..a2ec3e17 100644 --- a/worker/src/components/security/subfinder.ts +++ b/worker/src/components/security/subfinder.ts @@ -195,7 +195,7 @@ interface BuildSubfinderArgsOptions { const buildSubfinderArgs = (options: BuildSubfinderArgsOptions): string[] => { const args: string[] = []; - // Always use silent mode for clean output + // Use silent mode — stdout is the structured output (one subdomain per line) args.push('-silent'); // Domain list file input @@ -425,8 +425,8 @@ const definition = defineComponent({ timeoutSeconds: baseRunner.timeoutSeconds ?? SUBFINDER_TIMEOUT_SECONDS, env: { ...(baseRunner.env ?? {}) }, // Preserve the shell wrapper from baseRunner (sh -c 'subfinder "$@"' --) + // The K8s runner detects the "$@" pattern and strips the shell for distroless images entrypoint: baseRunner.entrypoint, - // Append subfinder arguments to shell wrapper command command: [...(baseRunner.command ?? []), ...subfinderArgs], volumes: [volume.getVolumeConfig(CONTAINER_INPUT_DIR, true)], }; diff --git a/worker/src/utils/k8s-runner.ts b/worker/src/utils/k8s-runner.ts index c91ba62e..65e9850b 100644 --- a/worker/src/utils/k8s-runner.ts +++ b/worker/src/utils/k8s-runner.ts @@ -643,10 +643,10 @@ function parseOutputFromLogs(logs: string, context: ExecutionContext): O { if (outputStr) { try { return JSON.parse(outputStr) as O; - } catch (e) { - context.logger.warn( - `[K8sRunner] Failed to parse delimited output: ${(e as Error).message}`, - ); + } catch { + // Not JSON — return delimited content as raw string + // (e.g., plain text domain lists from -o file output) + return outputStr as unknown as O; } } } From 959c3b11d25e592d70386bc1335142d4efb53ed6 Mon Sep 17 00:00:00 2001 From: betterclever Date: Tue, 17 Feb 2026 16:28:08 +0400 Subject: [PATCH 34/36] feat(worker): add GCS FUSE volume support for K8s runner Replace emptyDir volumes with GCS FUSE-backed volumes for K8s job pods, enabling persistent shared storage between the worker and job containers. - Add IsolatedGcsVolume class for GCS-backed volume management - Update K8s runner with gcsfuse sidecar volumes, pod annotations, and flush logic - Update isolated-volume factory to route to GCS when configured - Add GCS file listing in prowler-scan component - Add @google-cloud/storage dependency - Terraform: enable GCS FUSE addon, create bucket, configure IAM - Helm: add job runner KSA, env vars, and GCS config values Co-Authored-By: Claude Opus 4.6 Signed-off-by: betterclever --- bun.lock | 99 +++++++- .../shipsec/templates/worker-deployment.yaml | 8 + .../helm/shipsec/templates/worker-rbac.yaml | 17 ++ deploy/helm/shipsec/values/gke-managed.yaml | 4 + infra/gcp/envs/dev/main.tf | 78 +++++++ worker/package.json | 1 + .../src/components/security/prowler-scan.ts | 16 +- worker/src/utils/gcs-volume.ts | 214 ++++++++++++++++++ worker/src/utils/index.ts | 1 + worker/src/utils/isolated-volume.ts | 6 +- worker/src/utils/k8s-runner.ts | 86 ++++++- 11 files changed, 513 insertions(+), 17 deletions(-) create mode 100644 worker/src/utils/gcs-volume.ts diff --git a/bun.lock b/bun.lock index 7a53662f..98bfb436 100644 --- a/bun.lock +++ b/bun.lock @@ -260,6 +260,7 @@ "@ai-sdk/mcp": "^1.0.13", "@ai-sdk/openai": "^3.0.18", "@aws-sdk/client-s3": "^3.975.0", + "@google-cloud/storage": "^7.14.0", "@googleapis/admin": "^29.0.0", "@grpc/grpc-js": "^1.14.3", "@kubernetes/client-node": "^1.4.0", @@ -581,6 +582,14 @@ "@floating-ui/utils": ["@floating-ui/utils@0.2.10", "", {}, "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ=="], + "@google-cloud/paginator": ["@google-cloud/paginator@5.0.2", "", { "dependencies": { "arrify": "^2.0.0", "extend": "^3.0.2" } }, "sha512-DJS3s0OVH4zFDB1PzjxAsHqJT6sKVbRwwML0ZBP9PbU7Yebtu/7SWMRzvO2J3nUi9pRNITCfu4LJeooM2w4pjg=="], + + "@google-cloud/projectify": ["@google-cloud/projectify@4.0.0", "", {}, "sha512-MmaX6HeSvyPbWGwFq7mXdo0uQZLGBYCwziiLIGq5JVX+/bdI3SAq6bP98trV5eTWfLuvsMcIC1YJOF2vfteLFA=="], + + "@google-cloud/promisify": ["@google-cloud/promisify@4.0.0", "", {}, "sha512-Orxzlfb9c67A15cq2JQEyVc7wEsmFBmHjZWZYQMUyJ1qivXyMwdyNOs9odi79hze+2zqdTtu1E19IM/FtqZ10g=="], + + "@google-cloud/storage": ["@google-cloud/storage@7.19.0", "", { "dependencies": { "@google-cloud/paginator": "^5.0.0", "@google-cloud/projectify": "^4.0.0", "@google-cloud/promisify": "<4.1.0", "abort-controller": "^3.0.0", "async-retry": "^1.3.3", "duplexify": "^4.1.3", "fast-xml-parser": "^5.3.4", "gaxios": "^6.0.2", "google-auth-library": "^9.6.3", "html-entities": "^2.5.2", "mime": "^3.0.0", "p-limit": "^3.0.1", "retry-request": "^7.0.0", "teeny-request": "^9.0.0", "uuid": "^8.0.0" } }, "sha512-n2FjE7NAOYyshogdc7KQOl/VZb4sneqPjWouSyia9CMDdMhRX5+RIbqalNmC7LOLzuLAN89VlF2HvG8na9G+zQ=="], + "@googleapis/admin": ["@googleapis/admin@30.3.0", "", { "dependencies": { "googleapis-common": "^8.0.0" } }, "sha512-9vBP163vUDGb7BZuGat0Hzajf010t4HuXrR13MWDF/2pCNcg65gAAOzu3PSTIcqiuxL7nsjhkzj+oxg/t7s3vA=="], "@grpc/grpc-js": ["@grpc/grpc-js@1.14.3", "", { "dependencies": { "@grpc/proto-loader": "^0.8.0", "@js-sdsl/ordered-map": "^4.4.2" } }, "sha512-Iq8QQQ/7X3Sac15oB6p0FmUg/klxQvXLeileoqrTRGJYLV+/9tubbr9ipz0GKHjmXVsgFPo/+W+2cA8eNcR+XA=="], @@ -1087,6 +1096,8 @@ "@tokenizer/token": ["@tokenizer/token@0.3.0", "", {}, "sha512-OvjF+z51L3ov0OyAU0duzsYuvO01PH7x4t6DJx+guahgTnBHkhJdG7soQeTSFLWN3efnHyibZ4Z8l2EuWwJN3A=="], + "@tootallnate/once": ["@tootallnate/once@2.0.0", "", {}, "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A=="], + "@tootallnate/quickjs-emscripten": ["@tootallnate/quickjs-emscripten@0.23.0", "", {}, "sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA=="], "@types/adm-zip": ["@types/adm-zip@0.5.7", "", { "dependencies": { "@types/node": "*" } }, "sha512-DNEs/QvmyRLurdQPChqq0Md4zGvPwHerAJYWk9l2jCbD1VPpnzRJorOdiq4zsw09NFbYnhfsoEhWtxIzXpn2yw=="], @@ -1107,6 +1118,8 @@ "@types/bun": ["@types/bun@1.3.8", "", { "dependencies": { "bun-types": "1.3.8" } }, "sha512-3LvWJ2q5GerAXYxO2mffLTqOzEu5qnhEAlh48Vnu8WQfnmSwbgagjGZV6BoHKJztENYEDn6QmVd949W4uESRJA=="], + "@types/caseless": ["@types/caseless@0.12.5", "", {}, "sha512-hWtVTC2q7hc7xZ/RLbxapMvDMgUnDvKvMOpKal4DrMyfGBUfB1oKaZlIRr6mJL+If3bAP6sV/QneGzF6tJjZDg=="], + "@types/connect": ["@types/connect@3.4.38", "", { "dependencies": { "@types/node": "*" } }, "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug=="], "@types/cookie-parser": ["@types/cookie-parser@1.4.10", "", { "peerDependencies": { "@types/express": "*" } }, "sha512-B4xqkqfZ8Wek+rCOeRxsjMS9OgvzebEzzLYw7NHYuvzb7IdxOkI0ZHGgeEBX4PUM7QGVvNSK60T3OvWj3YfBRg=="], @@ -1245,6 +1258,8 @@ "@types/readable-stream": ["@types/readable-stream@4.0.23", "", { "dependencies": { "@types/node": "*" } }, "sha512-wwXrtQvbMHxCbBgjHaMGEmImFTQxxpfMOR/ZoQnXxB1woqkUbdLGFDgauo00Py9IudiaqSeiBiulSV9i6XIPig=="], + "@types/request": ["@types/request@2.48.13", "", { "dependencies": { "@types/caseless": "*", "@types/node": "*", "@types/tough-cookie": "*", "form-data": "^2.5.5" } }, "sha512-FGJ6udDNUCjd19pp0Q3iTiDkwhYup7J8hpMW9c4k53NrccQFFWKRho6hvtPPEhnXWKvukfwAlB6DbDz4yhH5Gg=="], + "@types/send": ["@types/send@1.2.1", "", { "dependencies": { "@types/node": "*" } }, "sha512-arsCikDvlU99zl1g69TcAB3mzZPpxgw0UQnaHeC1Nwb015xp8bknZv5rIfri9xTOcMuaVgvabfIRA7PSZVuZIQ=="], "@types/serve-static": ["@types/serve-static@2.2.0", "", { "dependencies": { "@types/http-errors": "*", "@types/node": "*" } }, "sha512-8mam4H1NHLtu7nmtalF7eyBH14QyOASmcxHhSfEoRyr0nP/YdoesEtU+uSRvMe96TW/HPTtkoKqQLl53N7UXMQ=="], @@ -1257,6 +1272,8 @@ "@types/tinycolor2": ["@types/tinycolor2@1.4.6", "", {}, "sha512-iEN8J0BoMnsWBqjVbWH/c0G0Hh7O21lpR2/+PrvAVgWdzL7eexIFm4JN/Wn10PTcmNdtS6U67r499mlWMXOxNw=="], + "@types/tough-cookie": ["@types/tough-cookie@4.0.5", "", {}, "sha512-/Ad8+nIOV7Rl++6f1BdKxFSMgmoqEoYbHRpPcx3JEfv8VRsQe9Z4mCXeJBzxs7mbHY/XOZZuXlRNfhpVPbs6ZA=="], + "@types/trusted-types": ["@types/trusted-types@2.0.7", "", {}, "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw=="], "@types/unist": ["@types/unist@3.0.3", "", {}, "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q=="], @@ -1403,7 +1420,7 @@ "arraybuffer.prototype.slice": ["arraybuffer.prototype.slice@1.0.4", "", { "dependencies": { "array-buffer-byte-length": "^1.0.1", "call-bind": "^1.0.8", "define-properties": "^1.2.1", "es-abstract": "^1.23.5", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", "is-array-buffer": "^3.0.4" } }, "sha512-BNoCY6SXXPQ7gF2opIP4GBE+Xw7U+pHMYKuzjgCN3GwiaIR09UUeKfheyIry77QtrCBlC0KK0q5/TER/tYh3PQ=="], - "arrify": ["arrify@1.0.1", "", {}, "sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA=="], + "arrify": ["arrify@2.0.1", "", {}, "sha512-3duEwti880xqi4eAMN8AyR4a0ByT90zoYdLlevfrvU43vb0YZwZVfxOgxWrLXXXpyugL0hNZc9G6BiB5B3nUug=="], "asap": ["asap@2.0.6", "", {}, "sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA=="], @@ -1413,6 +1430,8 @@ "async-function": ["async-function@1.0.0", "", {}, "sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA=="], + "async-retry": ["async-retry@1.3.3", "", { "dependencies": { "retry": "0.13.1" } }, "sha512-wfr/jstw9xNi/0teMHrRW7dsz3Lt5ARhYNZ2ewpadnhaIp5mbALhOAP+EAdsC7t4Z6wqsDVv9+W6gm1Dk9mEyw=="], + "asynckit": ["asynckit@0.4.0", "", {}, "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="], "autoprefixer": ["autoprefixer@10.4.24", "", { "dependencies": { "browserslist": "^4.28.1", "caniuse-lite": "^1.0.30001766", "fraction.js": "^5.3.4", "picocolors": "^1.1.1", "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.1.0" }, "bin": { "autoprefixer": "bin/autoprefixer" } }, "sha512-uHZg7N9ULTVbutaIsDRoUkoS8/h3bdsmVJYZ5l3wv8Cp/6UIIoRDm90hZ+BwxUj/hGBEzLxdHNSKuFpn8WOyZw=="], @@ -1705,6 +1724,8 @@ "dunder-proto": ["dunder-proto@1.0.1", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" } }, "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A=="], + "duplexify": ["duplexify@4.1.3", "", { "dependencies": { "end-of-stream": "^1.4.1", "inherits": "^2.0.3", "readable-stream": "^3.1.1", "stream-shift": "^1.0.2" } }, "sha512-M3BmBhwJRZsSx38lZyhE53Csddgzl5R7xGJNk7CVddZD6CcmwMCH8J+7AprIrQKH7TonKxaCjcv27Qmf+sQ+oA=="], + "eastasianwidth": ["eastasianwidth@0.2.0", "", {}, "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="], "ecdsa-sig-formatter": ["ecdsa-sig-formatter@1.0.11", "", { "dependencies": { "safe-buffer": "^5.0.1" } }, "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ=="], @@ -1895,7 +1916,7 @@ "functions-have-names": ["functions-have-names@1.2.3", "", {}, "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ=="], - "gaxios": ["gaxios@7.1.3", "", { "dependencies": { "extend": "^3.0.2", "https-proxy-agent": "^7.0.1", "node-fetch": "^3.3.2", "rimraf": "^5.0.1" } }, "sha512-YGGyuEdVIjqxkxVH1pUTMY/XtmmsApXrCVv5EU25iX6inEPbV+VakJfLealkBtJN69AQmh1eGOdCl9Sm1UP6XQ=="], + "gaxios": ["gaxios@6.7.1", "", { "dependencies": { "extend": "^3.0.2", "https-proxy-agent": "^7.0.1", "is-stream": "^2.0.0", "node-fetch": "^2.6.9", "uuid": "^9.0.1" } }, "sha512-LDODD4TMYx7XXdpwxAVRAIAuB0bzv0s+ywFonY46k126qzQHT9ygyoa9tncmOiQmmDrik65UYsEkv3lbfqQ3yQ=="], "gcp-metadata": ["gcp-metadata@8.1.2", "", { "dependencies": { "gaxios": "^7.0.0", "google-logging-utils": "^1.0.0", "json-bigint": "^1.0.0" } }, "sha512-zV/5HKTfCeKWnxG0Dmrw51hEWFGfcF2xiXqcA3+J90WDuP0SvoiSO5ORvcBsifmx/FoIjgQN3oNOGaQ5PhLFkg=="], @@ -2007,6 +2028,8 @@ "html-encoding-sniffer": ["html-encoding-sniffer@6.0.0", "", { "dependencies": { "@exodus/bytes": "^1.6.0" } }, "sha512-CV9TW3Y3f8/wT0BRFc1/KAVQ3TUHiXmaAb6VW9vtiMFf7SLoMd1PdAc4W3KFOFETBJUb90KatHqlsZMWV+R9Gg=="], + "html-entities": ["html-entities@2.6.0", "", {}, "sha512-kig+rMn/QOVRvr7c86gQ8lWXq+Hkv6CbAH1hLu+RG338StTpE8Z0b44SDVaqVu7HGKf27frdmUYEs9hTUX/cLQ=="], + "html-to-image": ["html-to-image@1.11.11", "", {}, "sha512-9gux8QhvjRO/erSnDPv28noDZcPZmYE7e1vFsBLKLlRlKDSqNJYebj6Qz1TGd5lsRV+X+xYyjCKjuZdABinWjA=="], "html-url-attributes": ["html-url-attributes@3.0.1", "", {}, "sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ=="], @@ -2111,6 +2134,8 @@ "is-shared-array-buffer": ["is-shared-array-buffer@1.0.4", "", { "dependencies": { "call-bound": "^1.0.3" } }, "sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A=="], + "is-stream": ["is-stream@2.0.1", "", {}, "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg=="], + "is-string": ["is-string@1.1.1", "", { "dependencies": { "call-bound": "^1.0.3", "has-tostringtag": "^1.0.2" } }, "sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA=="], "is-symbol": ["is-symbol@1.1.1", "", { "dependencies": { "call-bound": "^1.0.2", "has-symbols": "^1.1.0", "safe-regex-test": "^1.1.0" } }, "sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w=="], @@ -2357,7 +2382,7 @@ "micromatch": ["micromatch@4.0.8", "", { "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" } }, "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA=="], - "mime": ["mime@2.6.0", "", { "bin": { "mime": "cli.js" } }, "sha512-USPkMeET31rOMiarsBNIHZKLGgvKc/LrjofAnBlOttf5ajRvqiRA8QsenbcooctK6d6Ts6aqZXBA+XbkKthiQg=="], + "mime": ["mime@3.0.0", "", { "bin": { "mime": "cli.js" } }, "sha512-jSCU7/VB1loIWBZe14aEYHU/+1UMEHoaO7qxCOVJOw9GgH72VAWppxNcjU+x9a2k3GSIBXNKxXQFqRvvZ7vr3A=="], "mime-db": ["mime-db@1.54.0", "", {}, "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ=="], @@ -2733,6 +2758,10 @@ "restore-cursor": ["restore-cursor@5.1.0", "", { "dependencies": { "onetime": "^7.0.0", "signal-exit": "^4.1.0" } }, "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA=="], + "retry": ["retry@0.13.1", "", {}, "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg=="], + + "retry-request": ["retry-request@7.0.2", "", { "dependencies": { "@types/request": "^2.48.8", "extend": "^3.0.2", "teeny-request": "^9.0.0" } }, "sha512-dUOvLMJ0/JJYEn8NrpOaGNE7X3vpI5XlZS/u0ANjqtcZVKnIxP7IgCFwrKTxENw29emmwug53awKtaMm4i9g5w=="], + "reusify": ["reusify@1.1.0", "", {}, "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw=="], "rfc4648": ["rfc4648@1.5.4", "", {}, "sha512-rRg/6Lb+IGfJqO05HZkN50UtY7K/JhxJag1kP23+zyMfrvoB0B7RWv06MbOzoc79RgCdNTiUaNsTT1AJZ7Z+cg=="], @@ -2855,8 +2884,12 @@ "stream-chain": ["stream-chain@2.2.5", "", {}, "sha512-1TJmBx6aSWqZ4tx7aTpBDXK0/e2hhcNSTV8+CbFJtDjbb+I1mZ8lHit0Grw9GRT+6JbIrrDd8esncgBi8aBXGA=="], + "stream-events": ["stream-events@1.0.5", "", { "dependencies": { "stubs": "^3.0.0" } }, "sha512-E1GUzBSgvct8Jsb3v2X15pjzN1tYebtbLaMg+eBOUOAxgbLoSbT2NS91ckc5lJD1KfLjId+jXJRgo0qnV5Nerg=="], + "stream-json": ["stream-json@1.9.1", "", { "dependencies": { "stream-chain": "^2.2.5" } }, "sha512-uWkjJ+2Nt/LO9Z/JyKZbMusL8Dkh97uUBTv3AJQ74y07lVahLY4eEFsPsE97pxYBwr8nnjMAIch5eqI0gPShyw=="], + "stream-shift": ["stream-shift@1.0.3", "", {}, "sha512-76ORR0DO1o1hlKwTbi/DM3EXWGf3ZJYO8cXX5RJwnul2DEg2oyoZyjLNoQM8WsvZiFKCRfC1O0J7iCvie3RZmQ=="], + "streamsearch": ["streamsearch@1.1.0", "", {}, "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg=="], "streamx": ["streamx@2.23.0", "", { "dependencies": { "events-universal": "^1.0.0", "fast-fifo": "^1.3.2", "text-decoder": "^1.1.0" } }, "sha512-kn+e44esVfn2Fa/O0CPFcex27fjIL6MkVae0Mm6q+E6f0hWv578YCERbv+4m02cjxvDsPKLnmxral/rR6lBMAg=="], @@ -2895,6 +2928,8 @@ "strtok3": ["strtok3@10.3.4", "", { "dependencies": { "@tokenizer/token": "^0.3.0" } }, "sha512-KIy5nylvC5le1OdaaoCJ07L+8iQzJHGH6pWDuzS+d07Cu7n1MZ2x26P8ZKIWfbK02+XIL8Mp4RkWeqdUCrDMfg=="], + "stubs": ["stubs@3.0.0", "", {}, "sha512-PdHt7hHUJKxvTCgbKX9C1V/ftOcjJQgz8BZwNfV5c4B6dcGqlpelTbJ999jBGZ2jYiPAwcX5dP6oBwVlBlUbxw=="], + "style-to-js": ["style-to-js@1.1.21", "", { "dependencies": { "style-to-object": "1.0.14" } }, "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ=="], "style-to-object": ["style-to-object@1.0.14", "", { "dependencies": { "inline-style-parser": "0.2.7" } }, "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw=="], @@ -2933,6 +2968,8 @@ "tar-stream": ["tar-stream@3.1.7", "", { "dependencies": { "b4a": "^1.6.4", "fast-fifo": "^1.2.0", "streamx": "^2.15.0" } }, "sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ=="], + "teeny-request": ["teeny-request@9.0.0", "", { "dependencies": { "http-proxy-agent": "^5.0.0", "https-proxy-agent": "^5.0.0", "node-fetch": "^2.6.9", "stream-events": "^1.0.5", "uuid": "^9.0.0" } }, "sha512-resvxdc6Mgb7YEThw6G6bExlXKkv6+YbuzGg9xuXxSgxJF7Ozs+o8Y9+2R3sArdWdW8nOokoQb1yrpFB0pQK2g=="], + "terser": ["terser@5.46.0", "", { "dependencies": { "@jridgewell/source-map": "^0.3.3", "acorn": "^8.15.0", "commander": "^2.20.0", "source-map-support": "~0.5.20" }, "bin": { "terser": "bin/terser" } }, "sha512-jTwoImyr/QbOWFFso3YoU3ik0jBBDJ6JTOQiy/J2YxVJdZCc+5u7skhNwiOR3FQIygFqVUPHl7qbbxtjW2K3Qg=="], "terser-webpack-plugin": ["terser-webpack-plugin@5.3.16", "", { "dependencies": { "@jridgewell/trace-mapping": "^0.3.25", "jest-worker": "^27.4.5", "schema-utils": "^4.3.0", "serialize-javascript": "^6.0.2", "terser": "^5.31.1" }, "peerDependencies": { "webpack": "^5.1.0" } }, "sha512-h9oBFCWrq78NyWWVcSwZarJkZ01c2AyGrzs1crmHZO3QUg9D61Wu4NPjBy69n7JqylFF5y+CsUZYmYEIZ3mR+Q=="], @@ -3205,6 +3242,12 @@ "@eslint/eslintrc/ignore": ["ignore@5.3.2", "", {}, "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g=="], + "@google-cloud/storage/fast-xml-parser": ["fast-xml-parser@5.3.6", "", { "dependencies": { "strnum": "^2.1.2" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-QNI3sAvSvaOiaMl8FYU4trnEzCwiRr8XMWgAHzlrWpTSj+QaCSvOf1h82OEP1s4hiAXhnbXSyFWCf4ldZzZRVA=="], + + "@google-cloud/storage/google-auth-library": ["google-auth-library@9.15.1", "", { "dependencies": { "base64-js": "^1.3.0", "ecdsa-sig-formatter": "^1.0.11", "gaxios": "^6.1.1", "gcp-metadata": "^6.1.0", "gtoken": "^7.0.0", "jws": "^4.0.0" } }, "sha512-Jb6Z0+nvECVz+2lzSMt9u98UsoakXxA2HGHMCxh+so3n90XgYWkq5dur19JAJV7ONiJY22yBTyJB1TSkvPq9Ng=="], + + "@google-cloud/storage/uuid": ["uuid@8.3.2", "", { "bin": { "uuid": "dist/bin/uuid" } }, "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg=="], + "@isaacs/cliui/string-width": ["string-width@5.1.2", "", { "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", "strip-ansi": "^7.0.1" } }, "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA=="], "@isaacs/cliui/wrap-ansi": ["wrap-ansi@8.1.0", "", { "dependencies": { "ansi-styles": "^6.1.0", "string-width": "^5.0.1", "strip-ansi": "^7.0.1" } }, "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ=="], @@ -3341,6 +3384,10 @@ "@types/readable-stream/@types/node": ["@types/node@25.2.0", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-DZ8VwRFUNzuqJ5khrvwMXHmvPe+zGayJhr2CDNiKB1WBE1ST8Djl00D0IC4vvNmHMdj6DlbYRIaFE7WHjlDl5w=="], + "@types/request/@types/node": ["@types/node@25.2.0", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-DZ8VwRFUNzuqJ5khrvwMXHmvPe+zGayJhr2CDNiKB1WBE1ST8Djl00D0IC4vvNmHMdj6DlbYRIaFE7WHjlDl5w=="], + + "@types/request/form-data": ["form-data@2.5.5", "", { "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", "es-set-tostringtag": "^2.1.0", "hasown": "^2.0.2", "mime-types": "^2.1.35", "safe-buffer": "^5.2.1" } }, "sha512-jqdObeR2rxZZbPSGL+3VckHMYtu+f9//KXBsVny6JSX/pa38Fy+bGjuG8eW/H6USNQWhLi8Num++cU2yOCNz4A=="], + "@types/send/@types/node": ["@types/node@25.2.0", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-DZ8VwRFUNzuqJ5khrvwMXHmvPe+zGayJhr2CDNiKB1WBE1ST8Djl00D0IC4vvNmHMdj6DlbYRIaFE7WHjlDl5w=="], "@types/serve-static/@types/node": ["@types/node@25.2.0", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-DZ8VwRFUNzuqJ5khrvwMXHmvPe+zGayJhr2CDNiKB1WBE1ST8Djl00D0IC4vvNmHMdj6DlbYRIaFE7WHjlDl5w=="], @@ -3395,6 +3442,8 @@ "drizzle-kit/esbuild": ["esbuild@0.25.12", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.25.12", "@esbuild/android-arm": "0.25.12", "@esbuild/android-arm64": "0.25.12", "@esbuild/android-x64": "0.25.12", "@esbuild/darwin-arm64": "0.25.12", "@esbuild/darwin-x64": "0.25.12", "@esbuild/freebsd-arm64": "0.25.12", "@esbuild/freebsd-x64": "0.25.12", "@esbuild/linux-arm": "0.25.12", "@esbuild/linux-arm64": "0.25.12", "@esbuild/linux-ia32": "0.25.12", "@esbuild/linux-loong64": "0.25.12", "@esbuild/linux-mips64el": "0.25.12", "@esbuild/linux-ppc64": "0.25.12", "@esbuild/linux-riscv64": "0.25.12", "@esbuild/linux-s390x": "0.25.12", "@esbuild/linux-x64": "0.25.12", "@esbuild/netbsd-arm64": "0.25.12", "@esbuild/netbsd-x64": "0.25.12", "@esbuild/openbsd-arm64": "0.25.12", "@esbuild/openbsd-x64": "0.25.12", "@esbuild/openharmony-arm64": "0.25.12", "@esbuild/sunos-x64": "0.25.12", "@esbuild/win32-arm64": "0.25.12", "@esbuild/win32-ia32": "0.25.12", "@esbuild/win32-x64": "0.25.12" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg=="], + "duplexify/readable-stream": ["readable-stream@3.6.2", "", { "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", "util-deprecate": "^1.0.1" } }, "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA=="], + "escodegen/source-map": ["source-map@0.6.1", "", {}, "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="], "eslint/ajv": ["ajv@6.12.6", "", { "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" } }, "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g=="], @@ -3415,14 +3464,22 @@ "form-data/mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="], - "gaxios/node-fetch": ["node-fetch@3.3.2", "", { "dependencies": { "data-uri-to-buffer": "^4.0.0", "fetch-blob": "^3.1.4", "formdata-polyfill": "^4.0.10" } }, "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA=="], + "gaxios/uuid": ["uuid@9.0.1", "", { "bin": { "uuid": "dist/bin/uuid" } }, "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA=="], + + "gcp-metadata/gaxios": ["gaxios@7.1.3", "", { "dependencies": { "extend": "^3.0.2", "https-proxy-agent": "^7.0.1", "node-fetch": "^3.3.2", "rimraf": "^5.0.1" } }, "sha512-YGGyuEdVIjqxkxVH1pUTMY/XtmmsApXrCVv5EU25iX6inEPbV+VakJfLealkBtJN69AQmh1eGOdCl9Sm1UP6XQ=="], "get-uri/data-uri-to-buffer": ["data-uri-to-buffer@6.0.2", "", {}, "sha512-7hvf7/GW8e86rW0ptuwS3OcBGDjIi6SZva7hCyWC0yYry2cOPmLIjXAUHI6DK2HsnwJd9ifmt57i8eV2n4YNpw=="], "glob/minimatch": ["minimatch@9.0.5", "", { "dependencies": { "brace-expansion": "^2.0.1" } }, "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow=="], + "google-auth-library/gaxios": ["gaxios@7.1.3", "", { "dependencies": { "extend": "^3.0.2", "https-proxy-agent": "^7.0.1", "node-fetch": "^3.3.2", "rimraf": "^5.0.1" } }, "sha512-YGGyuEdVIjqxkxVH1pUTMY/XtmmsApXrCVv5EU25iX6inEPbV+VakJfLealkBtJN69AQmh1eGOdCl9Sm1UP6XQ=="], + + "googleapis-common/gaxios": ["gaxios@7.1.3", "", { "dependencies": { "extend": "^3.0.2", "https-proxy-agent": "^7.0.1", "node-fetch": "^3.3.2", "rimraf": "^5.0.1" } }, "sha512-YGGyuEdVIjqxkxVH1pUTMY/XtmmsApXrCVv5EU25iX6inEPbV+VakJfLealkBtJN69AQmh1eGOdCl9Sm1UP6XQ=="], + "gradient-string/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], + "gtoken/gaxios": ["gaxios@7.1.3", "", { "dependencies": { "extend": "^3.0.2", "https-proxy-agent": "^7.0.1", "node-fetch": "^3.3.2", "rimraf": "^5.0.1" } }, "sha512-YGGyuEdVIjqxkxVH1pUTMY/XtmmsApXrCVv5EU25iX6inEPbV+VakJfLealkBtJN69AQmh1eGOdCl9Sm1UP6XQ=="], + "hast-util-from-html/parse5": ["parse5@7.3.0", "", { "dependencies": { "entities": "^6.0.0" } }, "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw=="], "hast-util-from-parse5/hastscript": ["hastscript@9.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "comma-separated-tokens": "^2.0.0", "hast-util-parse-selector": "^4.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0" } }, "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w=="], @@ -3455,6 +3512,8 @@ "micromatch/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="], + "minimist-options/arrify": ["arrify@1.0.1", "", {}, "sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA=="], + "minio/lodash": ["lodash@4.17.23", "", {}, "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w=="], "minio/mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="], @@ -3541,12 +3600,20 @@ "sucrase/commander": ["commander@4.1.1", "", {}, "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA=="], + "superagent/mime": ["mime@2.6.0", "", { "bin": { "mime": "cli.js" } }, "sha512-USPkMeET31rOMiarsBNIHZKLGgvKc/LrjofAnBlOttf5ajRvqiRA8QsenbcooctK6d6Ts6aqZXBA+XbkKthiQg=="], + "supertest/cookie-signature": ["cookie-signature@1.2.2", "", {}, "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg=="], "tailwindcss/postcss-selector-parser": ["postcss-selector-parser@6.1.2", "", { "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" } }, "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg=="], "tailwindcss/resolve": ["resolve@1.22.11", "", { "dependencies": { "is-core-module": "^2.16.1", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, "bin": { "resolve": "bin/resolve" } }, "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ=="], + "teeny-request/http-proxy-agent": ["http-proxy-agent@5.0.0", "", { "dependencies": { "@tootallnate/once": "2", "agent-base": "6", "debug": "4" } }, "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w=="], + + "teeny-request/https-proxy-agent": ["https-proxy-agent@5.0.1", "", { "dependencies": { "agent-base": "6", "debug": "4" } }, "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA=="], + + "teeny-request/uuid": ["uuid@9.0.1", "", { "bin": { "uuid": "dist/bin/uuid" } }, "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA=="], + "terser/commander": ["commander@2.20.3", "", {}, "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ=="], "through2/readable-stream": ["readable-stream@3.6.2", "", { "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", "util-deprecate": "^1.0.1" } }, "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA=="], @@ -3633,6 +3700,12 @@ "@eslint/eslintrc/ajv/json-schema-traverse": ["json-schema-traverse@0.4.1", "", {}, "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="], + "@google-cloud/storage/fast-xml-parser/strnum": ["strnum@2.1.2", "", {}, "sha512-l63NF9y/cLROq/yqKXSLtcMeeyOfnSQlfMSlzFt/K73oIaD8DGaQWd7Z34X9GPiKqP5rbSh84Hl4bOlLcjiSrQ=="], + + "@google-cloud/storage/google-auth-library/gcp-metadata": ["gcp-metadata@6.1.1", "", { "dependencies": { "gaxios": "^6.1.1", "google-logging-utils": "^0.0.2", "json-bigint": "^1.0.0" } }, "sha512-a4tiq7E0/5fTjxPAaH4jpjkSv/uCaU2p5KC6HVGrvl0cDjA8iBZv4vv1gyzlmK0ZUKqwpOyQMKzZQe3lTit77A=="], + + "@google-cloud/storage/google-auth-library/gtoken": ["gtoken@7.1.0", "", { "dependencies": { "gaxios": "^6.0.0", "jws": "^4.0.0" } }, "sha512-pCcEwRi+TKpMlxAQObHDQ56KawURgyAf6jtIY046fJ5tIv3zDe/LEIubckAO8fj6JnAxLdmWkUfNyulQ2iKdEw=="], + "@isaacs/cliui/string-width/emoji-regex": ["emoji-regex@9.2.2", "", {}, "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="], "@isaacs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], @@ -3695,6 +3768,8 @@ "@types/express/@types/express-serve-static-core/@types/node": ["@types/node@25.2.0", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-DZ8VwRFUNzuqJ5khrvwMXHmvPe+zGayJhr2CDNiKB1WBE1ST8Djl00D0IC4vvNmHMdj6DlbYRIaFE7WHjlDl5w=="], + "@types/request/form-data/mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="], + "@typescript-eslint/typescript-estree/minimatch/brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="], "body-parser/debug/ms": ["ms@2.0.0", "", {}, "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="], @@ -3769,10 +3844,18 @@ "form-data/mime-types/mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="], + "gcp-metadata/gaxios/node-fetch": ["node-fetch@3.3.2", "", { "dependencies": { "data-uri-to-buffer": "^4.0.0", "fetch-blob": "^3.1.4", "formdata-polyfill": "^4.0.10" } }, "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA=="], + "glob/minimatch/brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="], + "google-auth-library/gaxios/node-fetch": ["node-fetch@3.3.2", "", { "dependencies": { "data-uri-to-buffer": "^4.0.0", "fetch-blob": "^3.1.4", "formdata-polyfill": "^4.0.10" } }, "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA=="], + + "googleapis-common/gaxios/node-fetch": ["node-fetch@3.3.2", "", { "dependencies": { "data-uri-to-buffer": "^4.0.0", "fetch-blob": "^3.1.4", "formdata-polyfill": "^4.0.10" } }, "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA=="], + "gradient-string/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], + "gtoken/gaxios/node-fetch": ["node-fetch@3.3.2", "", { "dependencies": { "data-uri-to-buffer": "^4.0.0", "fetch-blob": "^3.1.4", "formdata-polyfill": "^4.0.10" } }, "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA=="], + "hast-util-from-html/parse5/entities": ["entities@6.0.1", "", {}, "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g=="], "hast-util-from-parse5/hastscript/hast-util-parse-selector": ["hast-util-parse-selector@4.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A=="], @@ -3809,6 +3892,10 @@ "refractor/@types/hast/@types/unist": ["@types/unist@2.0.11", "", {}, "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA=="], + "teeny-request/http-proxy-agent/agent-base": ["agent-base@6.0.2", "", { "dependencies": { "debug": "4" } }, "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ=="], + + "teeny-request/https-proxy-agent/agent-base": ["agent-base@6.0.2", "", { "dependencies": { "debug": "4" } }, "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ=="], + "ts-loader/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], "vizion/async/lodash": ["lodash@4.17.23", "", {}, "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w=="], @@ -3833,6 +3920,8 @@ "@aws-crypto/util/@smithy/util-utf8/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA=="], + "@google-cloud/storage/google-auth-library/gcp-metadata/google-logging-utils": ["google-logging-utils@0.0.2", "", {}, "sha512-NEgUnEcBiP5HrPzufUkBzJOD/Sxsco3rLNo1F1TNf7ieU8ryUzBhqba8r756CjLX7rn3fHl6iLEwPYuqpoKgQQ=="], + "@nestjs/platform-express/express/accepts/mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="], "@nestjs/platform-express/express/accepts/negotiator": ["negotiator@0.6.3", "", {}, "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg=="], @@ -3845,6 +3934,8 @@ "@nestjs/platform-express/express/type-is/mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="], + "@types/request/form-data/mime-types/mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="], + "body-parser/type-is/mime-types/mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="], "markdown-it-html5-embed/markdown-it/argparse/sprintf-js": ["sprintf-js@1.0.3", "", {}, "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g=="], diff --git a/deploy/helm/shipsec/templates/worker-deployment.yaml b/deploy/helm/shipsec/templates/worker-deployment.yaml index 750bab31..359a39e4 100644 --- a/deploy/helm/shipsec/templates/worker-deployment.yaml +++ b/deploy/helm/shipsec/templates/worker-deployment.yaml @@ -53,6 +53,14 @@ spec: - name: K8S_IMAGE_PULL_SECRET value: {{ .Values.execution.k8s.imagePullSecret | quote }} {{- end }} + {{- if .Values.execution.k8s.gcsBucket }} + - name: GCS_VOLUME_BUCKET + value: {{ .Values.execution.k8s.gcsBucket | quote }} + {{- end }} + {{- if .Values.execution.k8s.jobServiceAccount }} + - name: K8S_JOB_SERVICE_ACCOUNT + value: {{ .Values.execution.k8s.jobServiceAccount | quote }} + {{- end }} {{- else if .Values.execution.workerDockerHost }} - name: DOCKER_HOST value: {{ .Values.execution.workerDockerHost | quote }} diff --git a/deploy/helm/shipsec/templates/worker-rbac.yaml b/deploy/helm/shipsec/templates/worker-rbac.yaml index 48d26a0a..fbe1b305 100644 --- a/deploy/helm/shipsec/templates/worker-rbac.yaml +++ b/deploy/helm/shipsec/templates/worker-rbac.yaml @@ -8,6 +8,10 @@ metadata: labels: {{- include "shipsec.labels" . | nindent 4 }} app.kubernetes.io/component: worker + {{- if .Values.execution.k8s.workerGcpSa }} + annotations: + iam.gke.io/gcp-service-account: {{ .Values.execution.k8s.workerGcpSa | quote }} + {{- end }} --- # Role in the workloads namespace — worker creates Jobs and ConfigMaps here apiVersion: rbac.authorization.k8s.io/v1 @@ -44,4 +48,17 @@ subjects: - kind: ServiceAccount name: shipsec-worker namespace: {{ .Values.global.namespaces.workers }} +{{- if .Values.execution.k8s.jobRunnerGcpSa }} +--- +# ServiceAccount for job pods (GCS FUSE CSI access via Workload Identity) +apiVersion: v1 +kind: ServiceAccount +metadata: + name: shipsec-job-runner + namespace: {{ .Values.global.namespaces.workloads }} + labels: + {{- include "shipsec.labels" . | nindent 4 }} + annotations: + iam.gke.io/gcp-service-account: {{ .Values.execution.k8s.jobRunnerGcpSa | quote }} +{{- end }} {{- end }} diff --git a/deploy/helm/shipsec/values/gke-managed.yaml b/deploy/helm/shipsec/values/gke-managed.yaml index fcbc525e..0bb20468 100644 --- a/deploy/helm/shipsec/values/gke-managed.yaml +++ b/deploy/helm/shipsec/values/gke-managed.yaml @@ -88,3 +88,7 @@ execution: jobNamespace: shipsec-workloads imagePullPolicy: IfNotPresent imagePullSecret: ghcr-creds + gcsBucket: shipsec-volumes-shipsec-dev + jobServiceAccount: shipsec-job-runner + jobRunnerGcpSa: shipsec-job-runner@shipsec.iam.gserviceaccount.com + workerGcpSa: shipsec-worker@shipsec.iam.gserviceaccount.com diff --git a/infra/gcp/envs/dev/main.tf b/infra/gcp/envs/dev/main.tf index 66a9291a..6b46c499 100644 --- a/infra/gcp/envs/dev/main.tf +++ b/infra/gcp/envs/dev/main.tf @@ -78,6 +78,12 @@ resource "google_container_cluster" "gke" { workload_pool = "${var.project_id}.svc.id.goog" } + addons_config { + gcs_fuse_csi_driver_config { + enabled = true + } + } + depends_on = [google_project_service.enabled] } @@ -101,6 +107,78 @@ resource "google_container_node_pool" "default_pool" { } } +# --- GCS FUSE volume support --- + +# GCS bucket for job volumes +resource "google_storage_bucket" "volumes" { + project = var.project_id + name = "${var.project_id}-volumes-${var.cluster_name}" + location = var.region + uniform_bucket_level_access = true + force_destroy = true + + lifecycle_rule { + condition { + age = 7 + } + action { + type = "Delete" + } + } +} + +# GCP SA for job pods (mounted via GCS FUSE CSI) +resource "google_service_account" "job_runner" { + project = var.project_id + account_id = "shipsec-job-runner" + display_name = "ShipSec K8s Job Runner" +} + +# Job runner SA → bucket access +resource "google_storage_bucket_iam_member" "job_runner_storage" { + bucket = google_storage_bucket.volumes.name + role = "roles/storage.objectUser" + member = "serviceAccount:${google_service_account.job_runner.email}" +} + +# Workload Identity: K8s SA → GCP SA (for job pods in shipsec-workloads) +resource "google_service_account_iam_member" "job_runner_wi" { + service_account_id = google_service_account.job_runner.name + role = "roles/iam.workloadIdentityUser" + member = "serviceAccount:${var.project_id}.svc.id.goog[shipsec-workloads/shipsec-job-runner]" +} + +# Worker SA also needs GCS access (to upload inputs / read outputs via SDK) +resource "google_service_account" "worker" { + project = var.project_id + account_id = "shipsec-worker" + display_name = "ShipSec Worker" +} + +resource "google_storage_bucket_iam_member" "worker_storage" { + bucket = google_storage_bucket.volumes.name + role = "roles/storage.objectUser" + member = "serviceAccount:${google_service_account.worker.email}" +} + +resource "google_service_account_iam_member" "worker_wi" { + service_account_id = google_service_account.worker.name + role = "roles/iam.workloadIdentityUser" + member = "serviceAccount:${var.project_id}.svc.id.goog[shipsec-workers/shipsec-worker]" +} + +output "gcs_volumes_bucket" { + value = google_storage_bucket.volumes.name +} + +output "job_runner_sa_email" { + value = google_service_account.job_runner.email +} + +output "worker_sa_email" { + value = google_service_account.worker.email +} + output "artifact_registry_repo" { value = "${var.region}-docker.pkg.dev/${var.project_id}/${google_artifact_registry_repository.docker.repository_id}" } diff --git a/worker/package.json b/worker/package.json index 5dbc67a1..7df4a1a3 100644 --- a/worker/package.json +++ b/worker/package.json @@ -23,6 +23,7 @@ "@ai-sdk/mcp": "^1.0.13", "@ai-sdk/openai": "^3.0.18", "@aws-sdk/client-s3": "^3.975.0", + "@google-cloud/storage": "^7.14.0", "@googleapis/admin": "^29.0.0", "@grpc/grpc-js": "^1.14.3", "@kubernetes/client-node": "^1.4.0", diff --git a/worker/src/components/security/prowler-scan.ts b/worker/src/components/security/prowler-scan.ts index ac696849..fd9cf81f 100644 --- a/worker/src/components/security/prowler-scan.ts +++ b/worker/src/components/security/prowler-scan.ts @@ -300,8 +300,22 @@ async function listVolumeFiles(volume: ReturnType): const volumeName = volume.getVolumeName(); if (!volumeName) return []; - // In K8s mode, volumes are ConfigMap-backed — list keys via K8s API + // In K8s mode, volumes are ConfigMap-backed or GCS-backed if (process.env.EXECUTION_MODE === 'k8s') { + // GCS FUSE volumes: list objects in the GCS prefix via SDK + if (process.env.GCS_VOLUME_BUCKET) { + try { + const { Storage } = await import('@google-cloud/storage'); + const storage = new Storage(); + const bucket = storage.bucket(process.env.GCS_VOLUME_BUCKET); + const [files] = await bucket.getFiles({ prefix: `${volumeName}/` }); + return files.map((f) => f.name.replace(`${volumeName}/`, '')); + } catch { + return []; + } + } + + // ConfigMap-backed volumes: list keys via K8s API try { const k8s = await import('@kubernetes/client-node'); const kc = new k8s.KubeConfig(); diff --git a/worker/src/utils/gcs-volume.ts b/worker/src/utils/gcs-volume.ts new file mode 100644 index 00000000..fc3e5aac --- /dev/null +++ b/worker/src/utils/gcs-volume.ts @@ -0,0 +1,214 @@ +/** + * IsolatedGcsVolume — GCS FUSE CSI volume replacement for IsolatedK8sVolume. + * + * Uses a GCS bucket mounted via the GCS FUSE CSI driver instead of ConfigMaps. + * Same interface as IsolatedK8sVolume / IsolatedContainerVolume so components + * can swap transparently via the createIsolatedVolume() factory. + * + * Advantages over ConfigMap-backed volumes: + * - No 1 MiB size limit (handles large outputs like Prowler) + * - Native read-write (no log-based writeback hack) + * - ReadWriteMany (parallel pods can share data) + * - Worker reads output directly from GCS via SDK + */ +import { Storage } from '@google-cloud/storage'; +import { ValidationError, ConfigurationError, ContainerError } from '@shipsec/component-sdk'; + +let _storage: Storage | null = null; + +function getStorage(): Storage { + if (!_storage) { + // Auto-discovers Workload Identity credentials in GKE + _storage = new Storage(); + } + return _storage; +} + +function getBucketName(): string { + const bucket = process.env.GCS_VOLUME_BUCKET; + if (!bucket) { + throw new ConfigurationError('GCS_VOLUME_BUCKET environment variable is not set'); + } + return bucket; +} + +function sanitizeName(raw: string): string { + return raw + .toLowerCase() + .replace(/[^a-z0-9-]/g, '-') + .replace(/-+/g, '-') + .replace(/^-|-$/g, '') + .slice(0, 53); +} + +export class IsolatedGcsVolume { + private prefix?: string; + private isInitialized = false; + private bucketName: string; + + constructor( + private tenantId: string, + private runId: string, + ) { + if (!/^[a-zA-Z0-9_-]+$/.test(tenantId)) { + throw new ValidationError( + 'Invalid tenant ID: must contain only alphanumeric characters, hyphens, and underscores', + { + fieldErrors: { + tenantId: ['must contain only alphanumeric characters, hyphens, and underscores'], + }, + }, + ); + } + if (!/^[a-zA-Z0-9_-]+$/.test(runId)) { + throw new ValidationError( + 'Invalid run ID: must contain only alphanumeric characters, hyphens, and underscores', + { + fieldErrors: { + runId: ['must contain only alphanumeric characters, hyphens, and underscores'], + }, + }, + ); + } + this.bucketName = getBucketName(); + } + + /** + * Upload files to GCS under a unique prefix and return the prefix. + * GCS key structure: {tenantId}/{runId}/{timestamp}/{filename} + */ + async initialize(files: Record): Promise { + if (this.isInitialized) { + throw new ConfigurationError('Volume already initialized', { + details: { prefix: this.prefix, tenantId: this.tenantId, runId: this.runId }, + }); + } + + const timestamp = Date.now(); + const tenantShort = sanitizeName(this.tenantId); + const runShort = sanitizeName(this.runId); + this.prefix = `${tenantShort}/${runShort}/${timestamp}`; + + try { + const storage = getStorage(); + const bucket = storage.bucket(this.bucketName); + + const uploads = Object.entries(files).map(async ([filename, content]) => { + this.validateFilename(filename); + const key = `${this.prefix}/${filename}`; + const file = bucket.file(key); + const data = typeof content === 'string' ? Buffer.from(content, 'utf-8') : content; + await file.save(data); + }); + + await Promise.all(uploads); + + this.isInitialized = true; + return this.prefix; + } catch (error) { + if (this.prefix) { + await this.cleanup().catch(() => {}); + } + throw new ContainerError( + `Failed to initialize GCS volume: ${error instanceof Error ? error.message : String(error)}`, + { + cause: error instanceof Error ? error : undefined, + details: { tenantId: this.tenantId, runId: this.runId }, + }, + ); + } + } + + private validateFilename(filename: string): void { + if (filename.includes('..') || filename.startsWith('/')) { + throw new ValidationError(`Invalid filename (path traversal): ${filename}`, { + fieldErrors: { filename: ['path traversal not allowed'] }, + }); + } + const safePattern = /^[a-zA-Z0-9._/-]+$/; + if (!safePattern.test(filename)) { + throw new ValidationError(`Invalid filename (contains unsafe characters): ${filename}`, { + fieldErrors: { filename: ['contains unsafe characters'] }, + }); + } + } + + /** + * Download files from GCS by name. + */ + async readFiles(filenames: string[]): Promise> { + if (!this.prefix) { + throw new ConfigurationError('Volume not initialized'); + } + + const storage = getStorage(); + const bucket = storage.bucket(this.bucketName); + const results: Record = {}; + + for (const filename of filenames) { + try { + const key = `${this.prefix}/${filename}`; + const file = bucket.file(key); + const [contents] = await file.download(); + results[filename] = contents.toString('utf-8'); + } catch (error) { + console.warn( + `Could not read file ${filename} from GCS: ${error instanceof Error ? error.message : String(error)}`, + ); + } + } + + return results; + } + + /** + * Returns volume config for the runner. + * The K8s runner recognizes the "gcsfuse:" prefix and creates a CSI volume. + * Format: "gcsfuse:{bucketName}:{prefix}" + */ + getVolumeConfig(containerPath = '/inputs', readOnly = true) { + if (!this.prefix) { + throw new ConfigurationError('Volume not initialized'); + } + return { + source: `gcsfuse:${this.bucketName}:${this.prefix}`, + target: containerPath, + readOnly, + }; + } + + /** + * Returns a bind mount string (for interface compatibility). + */ + getBindMount(containerPath = '/inputs', readOnly = true): string { + if (!this.prefix) { + throw new ConfigurationError('Volume not initialized'); + } + const mode = readOnly ? 'ro' : 'rw'; + return `gcsfuse:${this.bucketName}:${this.prefix}:${containerPath}:${mode}`; + } + + /** + * Delete all objects under the GCS prefix. + */ + async cleanup(): Promise { + if (!this.prefix) return; + + try { + const storage = getStorage(); + const bucket = storage.bucket(this.bucketName); + await bucket.deleteFiles({ prefix: `${this.prefix}/` }); + } catch (error) { + console.error( + `Failed to cleanup GCS volume ${this.prefix}: ${error instanceof Error ? error.message : String(error)}`, + ); + } finally { + this.isInitialized = false; + this.prefix = undefined; + } + } + + getVolumeName(): string | undefined { + return this.prefix; + } +} diff --git a/worker/src/utils/index.ts b/worker/src/utils/index.ts index 909c6e94..a8581412 100644 --- a/worker/src/utils/index.ts +++ b/worker/src/utils/index.ts @@ -8,3 +8,4 @@ export { createIsolatedVolume, } from './isolated-volume'; export { IsolatedK8sVolume } from './k8s-volume'; +export { IsolatedGcsVolume } from './gcs-volume'; diff --git a/worker/src/utils/isolated-volume.ts b/worker/src/utils/isolated-volume.ts index 50400b57..f266b6d5 100644 --- a/worker/src/utils/isolated-volume.ts +++ b/worker/src/utils/isolated-volume.ts @@ -3,6 +3,7 @@ import { promisify } from 'util'; import { exec as execCallback } from 'child_process'; import { ValidationError, ConfigurationError, ContainerError } from '@shipsec/component-sdk'; import { IsolatedK8sVolume } from './k8s-volume'; +import { IsolatedGcsVolume } from './gcs-volume'; const exec = promisify(execCallback); @@ -557,8 +558,11 @@ export async function cleanupOrphanedVolumes(olderThanHours = 24): Promise; // mountPath → configMapName + hasGcsFuse: boolean; } // Lazy-init shared K8s clients @@ -220,7 +221,8 @@ function buildJobSpec( }, ]; - // Handle additional volumes (from IsolatedK8sVolume) + // Handle additional volumes (from IsolatedK8sVolume or IsolatedGcsVolume) + let hasGcsFuse = false; if (runner.volumes) { for (let i = 0; i < runner.volumes.length; i++) { const vol = runner.volumes[i]; @@ -228,7 +230,25 @@ function buildJobSpec( const volName = `extra-vol-${i}`; - if (vol.source.startsWith('configmap:') && (vol.readOnly ?? true)) { + if (vol.source.startsWith('gcsfuse:')) { + // GCS FUSE CSI volume from IsolatedGcsVolume + // Parse "gcsfuse:{bucket}:{prefix}" + const [, bucketName, ...prefixParts] = vol.source.split(':'); + const onlyDir = prefixParts.join(':'); + hasGcsFuse = true; + volumes.push({ + name: volName, + csi: { + driver: 'gcsfuse.csi.storage.gke.io', + readOnly: vol.readOnly ?? false, + volumeAttributes: { + bucketName, + mountOptions: `implicit-dirs,only-dir=${onlyDir}`, + }, + }, + }); + // NO writableVolumeMappings tracking needed — GCS handles write natively + } else if (vol.source.startsWith('configmap:') && (vol.readOnly ?? true)) { // ConfigMap-backed volume from IsolatedK8sVolume (read-only) const cmName = vol.source.replace('configmap:', ''); volumes.push({ @@ -288,9 +308,14 @@ function buildJobSpec( 'app.kubernetes.io/managed-by': 'shipsec-worker', 'shipsec.ai/run-id': sanitizeName(context.runId), }, + ...(hasGcsFuse ? { annotations: { 'gke-gcsfuse/volumes': 'true' } } : {}), }, spec: { restartPolicy: 'Never', + ...(process.env.K8S_JOB_SERVICE_ACCOUNT + ? { serviceAccountName: process.env.K8S_JOB_SERVICE_ACCOUNT } + : {}), + ...(hasGcsFuse ? { terminationGracePeriodSeconds: 60 } : {}), ...(process.env.K8S_IMAGE_PULL_SECRET ? { imagePullSecrets: [{ name: process.env.K8S_IMAGE_PULL_SECRET }] } : {}), @@ -318,7 +343,7 @@ function buildJobSpec( }, }; - return { job, writableVolumeMappings }; + return { job, writableVolumeMappings, hasGcsFuse }; } /** @@ -669,6 +694,42 @@ function parseOutputFromLogs(logs: string, context: ExecutionContext): O { return cleanLogs.trim() as unknown as O; } +/** + * Wait for the GCS FUSE sidecar container to terminate after the main + * container exits. This ensures all writes are flushed to GCS before + * the worker reads output via the GCS SDK. + */ +async function waitForGcsFuseFlush( + podName: string, + namespace: string, + context: ExecutionContext, +): Promise { + const core = getCoreApi(); + const deadline = Date.now() + 60_000; // max 60s wait + + context.logger.info(`[K8sRunner] Waiting for GCS FUSE sidecar flush on pod ${podName}`); + + while (Date.now() < deadline) { + const pod = await core.readNamespacedPod({ name: podName, namespace }); + const sidecar = pod.status?.containerStatuses?.find((c) => c.name === 'gke-gcsfuse-sidecar'); + + if (!sidecar) { + // No sidecar found — GCS FUSE may not have been injected, skip wait + context.logger.info(`[K8sRunner] No GCS FUSE sidecar found, skipping flush wait`); + return; + } + + if (sidecar.state?.terminated) { + context.logger.info(`[K8sRunner] GCS FUSE sidecar terminated, flush complete`); + return; + } + + await new Promise((r) => setTimeout(r, 2000)); + } + + context.logger.warn(`[K8sRunner] GCS FUSE sidecar did not terminate within 60s, proceeding`); +} + /** * Clean up resources created for a Job execution. */ @@ -727,13 +788,11 @@ export async function runComponentInK8sJob( context.logger.info(`[K8sRunner] Created ConfigMap ${configMapName}`); // 2. Create Job - const { job: jobSpec, writableVolumeMappings } = buildJobSpec( - jobName, - namespace, - configMapName, - runner, - context, - ); + const { + job: jobSpec, + writableVolumeMappings, + hasGcsFuse: hasGcsFuseVolume, + } = buildJobSpec(jobName, namespace, configMapName, runner, context); await getBatchApi().createNamespacedJob({ namespace, body: jobSpec }); context.logger.info(`[K8sRunner] Created Job ${jobName}`); @@ -758,7 +817,12 @@ export async function runComponentInK8sJob( context.logger.info(`[K8sRunner] Job ${jobName} completed successfully`); context.emitProgress('K8s Job completed'); - // 4.5. Write back writable volume data to ConfigMaps + // 4.5a. Wait for GCS FUSE sidecar to flush writes before reading output + if (hasGcsFuseVolume) { + await waitForGcsFuseFlush(podName, namespace, context); + } + + // 4.5b. Write back writable volume data to ConfigMaps // Must happen BEFORE cleanup so volume.readFiles() can access updated ConfigMaps if (writableVolumeMappings.size > 0) { const volumeData = extractVolumeDataFromLogs(logs); From 49d5de9a8efba55d8e97a4ea98602aeb9178785b Mon Sep 17 00:00:00 2001 From: betterclever Date: Wed, 18 Feb 2026 00:04:25 +0400 Subject: [PATCH 35/36] chore(infra): migrate dev terraform state to remote GCS backend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Replace dev/ config (wrong custom-VPC config) with the correct adopted cluster config that was previously living in dev-local/ - Migrate local terraform state from dev-local/ to remote GCS backend (gs://shipsec-tfstate-66676596284/infra/gcp/dev/) - Remove dev-local/ — it was a misnomer; dev/ now manages the real cluster - Add GCS FUSE addon, volumes bucket, job-runner SA, and IAM to dev/main.tf Co-Authored-By: Claude Sonnet 4.6 Signed-off-by: betterclever --- infra/gcp/envs/dev-local/main.tf | 287 ------------------ infra/gcp/envs/dev-local/variables.tf | 74 ----- infra/gcp/envs/dev-local/versions.tf | 18 -- .../{dev-local => dev}/.terraform.lock.hcl | 0 infra/gcp/envs/dev/main.tf | 281 +++++++++++++---- infra/gcp/envs/dev/variables.tf | 20 ++ 6 files changed, 240 insertions(+), 440 deletions(-) delete mode 100644 infra/gcp/envs/dev-local/main.tf delete mode 100644 infra/gcp/envs/dev-local/variables.tf delete mode 100644 infra/gcp/envs/dev-local/versions.tf rename infra/gcp/envs/{dev-local => dev}/.terraform.lock.hcl (100%) diff --git a/infra/gcp/envs/dev-local/main.tf b/infra/gcp/envs/dev-local/main.tf deleted file mode 100644 index d7a0f5b1..00000000 --- a/infra/gcp/envs/dev-local/main.tf +++ /dev/null @@ -1,287 +0,0 @@ -# -------------------------------------------------------------------------- -# Adopt the existing shipsec-dev GKE cluster into Terraform. -# The cluster was created imperatively on the default VPC, so we reference -# the network/subnet as data sources rather than managing them. -# -------------------------------------------------------------------------- - -locals { - services = toset([ - "cloudresourcemanager.googleapis.com", - "serviceusage.googleapis.com", - "iam.googleapis.com", - "compute.googleapis.com", - "container.googleapis.com", - "artifactregistry.googleapis.com", - "secretmanager.googleapis.com", - "sqladmin.googleapis.com", - "redis.googleapis.com", - "servicenetworking.googleapis.com", - ]) -} - -resource "google_project_service" "enabled" { - for_each = local.services - project = var.project_id - service = each.value - - disable_on_destroy = false -} - -resource "google_artifact_registry_repository" "docker" { - project = var.project_id - location = var.region - repository_id = var.artifact_repo_name - format = "DOCKER" - - depends_on = [google_project_service.enabled] -} - -# The cluster lives on the default VPC — we don't manage it, just reference it. -data "google_compute_network" "default" { - project = var.project_id - name = "default" -} - -data "google_compute_subnetwork" "default" { - project = var.project_id - region = var.region - name = "default" -} - -resource "google_container_cluster" "gke" { - project = var.project_id - name = var.cluster_name - location = var.zone - - deletion_protection = false - initial_node_count = 1 - - release_channel { - channel = "REGULAR" - } - - network = data.google_compute_network.default.id - subnetwork = data.google_compute_subnetwork.default.id - - ip_allocation_policy { - cluster_secondary_range_name = "gke-shipsec-dev-pods-0a61f82c" - } - - workload_identity_config { - workload_pool = "${var.project_id}.svc.id.goog" - } - - # initial_node_count drifts to 0 after remove_default_node_pool removes it. - # node_config/node_pool are managed by the separate google_container_node_pool resource. - lifecycle { - ignore_changes = [initial_node_count, node_config, node_pool] - } - - depends_on = [google_project_service.enabled] -} - -resource "google_container_node_pool" "default_pool" { - project = var.project_id - name = "default-pool" - cluster = google_container_cluster.gke.name - location = var.zone - - initial_node_count = var.node_count - - node_config { - machine_type = var.node_machine_type - disk_type = "pd-balanced" - disk_size_gb = var.node_disk_gb - image_type = "COS_CONTAINERD" - - oauth_scopes = [ - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/service.management.readonly", - "https://www.googleapis.com/auth/servicecontrol", - "https://www.googleapis.com/auth/trace.append", - ] - } -} - -# ========================================================================== -# Managed Services: Cloud SQL, Memorystore, GCS -# ========================================================================== - -# Private Service Access — allows Cloud SQL and Memorystore to get private IPs -# on the default VPC so GKE pods can reach them without public IPs. -resource "google_compute_global_address" "private_ip_range" { - project = var.project_id - name = "shipsec-private-ip-range" - purpose = "VPC_PEERING" - address_type = "INTERNAL" - prefix_length = 20 - network = data.google_compute_network.default.id - - depends_on = [google_project_service.enabled] -} - -resource "google_service_networking_connection" "private_vpc" { - network = data.google_compute_network.default.id - service = "servicenetworking.googleapis.com" - reserved_peering_ranges = [google_compute_global_address.private_ip_range.name] - - depends_on = [google_project_service.enabled] -} - -# --- Cloud SQL (PostgreSQL 16) --- -resource "google_sql_database_instance" "postgres" { - project = var.project_id - name = "${var.cluster_name}-pg" - region = var.region - database_version = "POSTGRES_16" - - deletion_protection = false - - settings { - tier = var.cloudsql_tier - edition = "ENTERPRISE" - availability_type = "ZONAL" - disk_size = 10 - disk_type = "PD_SSD" - disk_autoresize = true - - ip_configuration { - ipv4_enabled = false - private_network = data.google_compute_network.default.id - enable_private_path_for_google_cloud_services = true - } - - backup_configuration { - enabled = true - start_time = "03:00" - point_in_time_recovery_enabled = true - transaction_log_retention_days = 7 - backup_retention_settings { - retained_backups = 7 - } - } - } - - depends_on = [google_service_networking_connection.private_vpc] -} - -resource "google_sql_database" "shipsec" { - project = var.project_id - instance = google_sql_database_instance.postgres.name - name = "shipsec" -} - -resource "google_sql_database" "temporal" { - project = var.project_id - instance = google_sql_database_instance.postgres.name - name = "temporal" -} - -resource "google_sql_user" "shipsec" { - project = var.project_id - instance = google_sql_database_instance.postgres.name - name = "shipsec" - password = var.db_password -} - -# --- Memorystore (Redis) --- -resource "google_redis_instance" "redis" { - project = var.project_id - name = "${var.cluster_name}-redis" - region = var.region - tier = "BASIC" - memory_size_gb = var.redis_memory_gb - - authorized_network = data.google_compute_network.default.id - connect_mode = "PRIVATE_SERVICE_ACCESS" - - redis_version = "REDIS_7_2" - - depends_on = [google_service_networking_connection.private_vpc] -} - -# --- GCS (replaces MinIO for artifact/file storage) --- -resource "google_storage_bucket" "artifacts" { - project = var.project_id - name = "${var.project_id}-artifacts-${var.cluster_name}" - location = var.region - force_destroy = true - - uniform_bucket_level_access = true - - versioning { - enabled = false - } - - lifecycle_rule { - condition { - age = 90 - } - action { - type = "Delete" - } - } -} - -# Service account for GCS access via Workload Identity -resource "google_service_account" "storage" { - project = var.project_id - account_id = "${var.cluster_name}-storage" - display_name = "Storage SA for ${var.cluster_name}" -} - -resource "google_storage_bucket_iam_member" "storage_admin" { - bucket = google_storage_bucket.artifacts.name - role = "roles/storage.objectAdmin" - member = "serviceAccount:${google_service_account.storage.email}" -} - -# Workload Identity binding: allow K8s SA "storage" in shipsec-system -# namespace to impersonate this GCP SA. -resource "google_service_account_iam_member" "storage_wi" { - service_account_id = google_service_account.storage.name - role = "roles/iam.workloadIdentityUser" - member = "serviceAccount:${var.project_id}.svc.id.goog[shipsec-system/storage]" -} - -# ========================================================================== -# Outputs -# ========================================================================== - -output "artifact_registry_repo" { - value = "${var.region}-docker.pkg.dev/${var.project_id}/${google_artifact_registry_repository.docker.repository_id}" -} - -output "cluster_location" { - value = var.zone -} - -output "cluster_name" { - value = google_container_cluster.gke.name -} - -# Cloud SQL -output "database_url" { - value = "postgresql://${google_sql_user.shipsec.name}:${var.db_password}@${google_sql_database_instance.postgres.private_ip_address}:5432/shipsec" - sensitive = true -} - -output "cloudsql_private_ip" { - value = google_sql_database_instance.postgres.private_ip_address -} - -# Memorystore -output "redis_url" { - value = "redis://${google_redis_instance.redis.host}:${google_redis_instance.redis.port}" -} - -# GCS (via Workload Identity) -output "gcs_bucket" { - value = google_storage_bucket.artifacts.name -} - -output "gcs_storage_sa_email" { - value = google_service_account.storage.email -} diff --git a/infra/gcp/envs/dev-local/variables.tf b/infra/gcp/envs/dev-local/variables.tf deleted file mode 100644 index 16b28fd5..00000000 --- a/infra/gcp/envs/dev-local/variables.tf +++ /dev/null @@ -1,74 +0,0 @@ -variable "project_id" { - type = string - description = "GCP project id (e.g. shipsec)." -} - -variable "access_token" { - type = string - description = "Optional short-lived OAuth access token (bypasses ADC)." - default = null - sensitive = true -} - -variable "region" { - type = string - description = "GCP region (e.g. us-central1)." - default = "us-central1" -} - -variable "zone" { - type = string - description = "GCP zone for a zonal dev cluster (e.g. us-central1-a)." - default = "us-central1-a" -} - -variable "cluster_name" { - type = string - description = "GKE cluster name." - default = "shipsec-dev" -} - -variable "artifact_repo_name" { - type = string - description = "Artifact Registry repo name (Docker)." - default = "shipsec-studio" -} - -variable "node_machine_type" { - type = string - description = "Machine type for dev nodes." - default = "e2-standard-4" -} - -variable "node_count" { - type = number - description = "Initial node count for the dev node pool." - default = 2 -} - -variable "node_disk_gb" { - type = number - description = "Boot disk size (GB)." - default = 100 -} - -# --- Managed services --- - -variable "cloudsql_tier" { - type = string - description = "Cloud SQL machine tier." - default = "db-custom-1-3840" -} - -variable "db_password" { - type = string - description = "Password for the shipsec Cloud SQL user." - sensitive = true -} - -variable "redis_memory_gb" { - type = number - description = "Memorystore Redis memory in GB." - default = 1 -} - diff --git a/infra/gcp/envs/dev-local/versions.tf b/infra/gcp/envs/dev-local/versions.tf deleted file mode 100644 index fdc002a6..00000000 --- a/infra/gcp/envs/dev-local/versions.tf +++ /dev/null @@ -1,18 +0,0 @@ -terraform { - required_version = ">= 1.5.0" - - required_providers { - google = { - source = "hashicorp/google" - version = ">= 5.20.0" - } - } -} - -provider "google" { - project = var.project_id - region = var.region - zone = var.zone - access_token = var.access_token -} - diff --git a/infra/gcp/envs/dev-local/.terraform.lock.hcl b/infra/gcp/envs/dev/.terraform.lock.hcl similarity index 100% rename from infra/gcp/envs/dev-local/.terraform.lock.hcl rename to infra/gcp/envs/dev/.terraform.lock.hcl diff --git a/infra/gcp/envs/dev/main.tf b/infra/gcp/envs/dev/main.tf index 6b46c499..aca8ce26 100644 --- a/infra/gcp/envs/dev/main.tf +++ b/infra/gcp/envs/dev/main.tf @@ -1,3 +1,9 @@ +# -------------------------------------------------------------------------- +# Adopt the existing shipsec-dev GKE cluster into Terraform. +# The cluster was created imperatively on the default VPC, so we reference +# the network/subnet as data sources rather than managing them. +# -------------------------------------------------------------------------- + locals { services = toset([ "cloudresourcemanager.googleapis.com", @@ -7,6 +13,9 @@ locals { "container.googleapis.com", "artifactregistry.googleapis.com", "secretmanager.googleapis.com", + "sqladmin.googleapis.com", + "redis.googleapis.com", + "servicenetworking.googleapis.com", ]) } @@ -27,30 +36,16 @@ resource "google_artifact_registry_repository" "docker" { depends_on = [google_project_service.enabled] } -resource "google_compute_network" "vpc" { - project = var.project_id - name = "${var.cluster_name}-vpc" - auto_create_subnetworks = false - - depends_on = [google_project_service.enabled] +# The cluster lives on the default VPC — we don't manage it, just reference it. +data "google_compute_network" "default" { + project = var.project_id + name = "default" } -resource "google_compute_subnetwork" "subnet" { - project = var.project_id - region = var.region - name = "${var.cluster_name}-subnet" - network = google_compute_network.vpc.id - ip_cidr_range = "10.10.0.0/16" - - secondary_ip_range { - range_name = "pods" - ip_cidr_range = "10.20.0.0/16" - } - - secondary_ip_range { - range_name = "services" - ip_cidr_range = "10.30.0.0/20" - } +data "google_compute_subnetwork" "default" { + project = var.project_id + region = var.region + name = "default" } resource "google_container_cluster" "gke" { @@ -59,23 +54,17 @@ resource "google_container_cluster" "gke" { location = var.zone deletion_protection = false - remove_default_node_pool = true initial_node_count = 1 release_channel { channel = "REGULAR" } - network = google_compute_network.vpc.id - subnetwork = google_compute_subnetwork.subnet.id + network = data.google_compute_network.default.id + subnetwork = data.google_compute_subnetwork.default.id ip_allocation_policy { - cluster_secondary_range_name = "pods" - services_secondary_range_name = "services" - } - - workload_identity_config { - workload_pool = "${var.project_id}.svc.id.goog" + cluster_secondary_range_name = "gke-shipsec-dev-pods-0a61f82c" } addons_config { @@ -84,6 +73,16 @@ resource "google_container_cluster" "gke" { } } + workload_identity_config { + workload_pool = "${var.project_id}.svc.id.goog" + } + + # initial_node_count drifts to 0 after remove_default_node_pool removes it. + # node_config/node_pool are managed by the separate google_container_node_pool resource. + lifecycle { + ignore_changes = [initial_node_count, node_config, node_pool] + } + depends_on = [google_project_service.enabled] } @@ -102,14 +101,162 @@ resource "google_container_node_pool" "default_pool" { image_type = "COS_CONTAINERD" oauth_scopes = [ - "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/service.management.readonly", + "https://www.googleapis.com/auth/servicecontrol", + "https://www.googleapis.com/auth/trace.append", ] } } -# --- GCS FUSE volume support --- +# ========================================================================== +# Managed Services: Cloud SQL, Memorystore, GCS +# ========================================================================== + +# Private Service Access — allows Cloud SQL and Memorystore to get private IPs +# on the default VPC so GKE pods can reach them without public IPs. +resource "google_compute_global_address" "private_ip_range" { + project = var.project_id + name = "shipsec-private-ip-range" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 20 + network = data.google_compute_network.default.id + + depends_on = [google_project_service.enabled] +} + +resource "google_service_networking_connection" "private_vpc" { + network = data.google_compute_network.default.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.private_ip_range.name] + + depends_on = [google_project_service.enabled] +} + +# --- Cloud SQL (PostgreSQL 16) --- +resource "google_sql_database_instance" "postgres" { + project = var.project_id + name = "${var.cluster_name}-pg" + region = var.region + database_version = "POSTGRES_16" + + deletion_protection = false + + settings { + tier = var.cloudsql_tier + edition = "ENTERPRISE" + availability_type = "ZONAL" + disk_size = 10 + disk_type = "PD_SSD" + disk_autoresize = true + + ip_configuration { + ipv4_enabled = false + private_network = data.google_compute_network.default.id + enable_private_path_for_google_cloud_services = true + } + + backup_configuration { + enabled = true + start_time = "03:00" + point_in_time_recovery_enabled = true + transaction_log_retention_days = 7 + backup_retention_settings { + retained_backups = 7 + } + } + } + + depends_on = [google_service_networking_connection.private_vpc] +} + +resource "google_sql_database" "shipsec" { + project = var.project_id + instance = google_sql_database_instance.postgres.name + name = "shipsec" +} + +resource "google_sql_database" "temporal" { + project = var.project_id + instance = google_sql_database_instance.postgres.name + name = "temporal" +} + +resource "google_sql_user" "shipsec" { + project = var.project_id + instance = google_sql_database_instance.postgres.name + name = "shipsec" + password = var.db_password +} + +# --- Memorystore (Redis) --- +resource "google_redis_instance" "redis" { + project = var.project_id + name = "${var.cluster_name}-redis" + region = var.region + tier = "BASIC" + memory_size_gb = var.redis_memory_gb + + authorized_network = data.google_compute_network.default.id + connect_mode = "PRIVATE_SERVICE_ACCESS" + + redis_version = "REDIS_7_2" + + depends_on = [google_service_networking_connection.private_vpc] +} + +# --- GCS (replaces MinIO for artifact/file storage) --- +resource "google_storage_bucket" "artifacts" { + project = var.project_id + name = "${var.project_id}-artifacts-${var.cluster_name}" + location = var.region + force_destroy = true + + uniform_bucket_level_access = true -# GCS bucket for job volumes + versioning { + enabled = false + } + + lifecycle_rule { + condition { + age = 90 + } + action { + type = "Delete" + } + } +} + +# Service account for GCS access via Workload Identity +resource "google_service_account" "storage" { + project = var.project_id + account_id = "${var.cluster_name}-storage" + display_name = "Storage SA for ${var.cluster_name}" +} + +resource "google_storage_bucket_iam_member" "storage_admin" { + bucket = google_storage_bucket.artifacts.name + role = "roles/storage.objectAdmin" + member = "serviceAccount:${google_service_account.storage.email}" +} + +# Workload Identity binding: allow K8s SA "storage" in shipsec-system +# namespace to impersonate this GCP SA. +resource "google_service_account_iam_member" "storage_wi" { + service_account_id = google_service_account.storage.name + role = "roles/iam.workloadIdentityUser" + member = "serviceAccount:${var.project_id}.svc.id.goog[shipsec-system/storage]" +} + +# ========================================================================== +# GCS FUSE Volume Support (for K8s job pods) +# ========================================================================== + +# GCS bucket for job volumes (auto-cleanup after 7 days) resource "google_storage_bucket" "volumes" { project = var.project_id name = "${var.project_id}-volumes-${var.cluster_name}" @@ -134,8 +281,8 @@ resource "google_service_account" "job_runner" { display_name = "ShipSec K8s Job Runner" } -# Job runner SA → bucket access -resource "google_storage_bucket_iam_member" "job_runner_storage" { +# Job runner SA → volumes bucket access +resource "google_storage_bucket_iam_member" "job_runner_volumes" { bucket = google_storage_bucket.volumes.name role = "roles/storage.objectUser" member = "serviceAccount:${google_service_account.job_runner.email}" @@ -148,45 +295,57 @@ resource "google_service_account_iam_member" "job_runner_wi" { member = "serviceAccount:${var.project_id}.svc.id.goog[shipsec-workloads/shipsec-job-runner]" } -# Worker SA also needs GCS access (to upload inputs / read outputs via SDK) -resource "google_service_account" "worker" { - project = var.project_id - account_id = "shipsec-worker" - display_name = "ShipSec Worker" -} - -resource "google_storage_bucket_iam_member" "worker_storage" { +# Existing storage SA also needs access to volumes bucket (worker reads/writes via SDK) +resource "google_storage_bucket_iam_member" "storage_volumes" { bucket = google_storage_bucket.volumes.name role = "roles/storage.objectUser" - member = "serviceAccount:${google_service_account.worker.email}" + member = "serviceAccount:${google_service_account.storage.email}" } -resource "google_service_account_iam_member" "worker_wi" { - service_account_id = google_service_account.worker.name - role = "roles/iam.workloadIdentityUser" - member = "serviceAccount:${var.project_id}.svc.id.goog[shipsec-workers/shipsec-worker]" +# ========================================================================== +# Outputs +# ========================================================================== + +output "artifact_registry_repo" { + value = "${var.region}-docker.pkg.dev/${var.project_id}/${google_artifact_registry_repository.docker.repository_id}" } -output "gcs_volumes_bucket" { - value = google_storage_bucket.volumes.name +output "cluster_location" { + value = var.zone } -output "job_runner_sa_email" { - value = google_service_account.job_runner.email +output "cluster_name" { + value = google_container_cluster.gke.name } -output "worker_sa_email" { - value = google_service_account.worker.email +# Cloud SQL +output "database_url" { + value = "postgresql://${google_sql_user.shipsec.name}:${var.db_password}@${google_sql_database_instance.postgres.private_ip_address}:5432/shipsec" + sensitive = true } -output "artifact_registry_repo" { - value = "${var.region}-docker.pkg.dev/${var.project_id}/${google_artifact_registry_repository.docker.repository_id}" +output "cloudsql_private_ip" { + value = google_sql_database_instance.postgres.private_ip_address } -output "cluster_location" { - value = var.zone +# Memorystore +output "redis_url" { + value = "redis://${google_redis_instance.redis.host}:${google_redis_instance.redis.port}" } -output "cluster_name" { - value = google_container_cluster.gke.name +# GCS (via Workload Identity) +output "gcs_bucket" { + value = google_storage_bucket.artifacts.name +} + +output "gcs_storage_sa_email" { + value = google_service_account.storage.email +} + +output "gcs_volumes_bucket" { + value = google_storage_bucket.volumes.name +} + +output "job_runner_sa_email" { + value = google_service_account.job_runner.email } diff --git a/infra/gcp/envs/dev/variables.tf b/infra/gcp/envs/dev/variables.tf index 649b14fa..d4217b43 100644 --- a/infra/gcp/envs/dev/variables.tf +++ b/infra/gcp/envs/dev/variables.tf @@ -51,3 +51,23 @@ variable "node_disk_gb" { description = "Boot disk size (GB)." default = 100 } + +# --- Managed services --- + +variable "cloudsql_tier" { + type = string + description = "Cloud SQL machine tier." + default = "db-custom-1-3840" +} + +variable "db_password" { + type = string + description = "Password for the shipsec Cloud SQL user." + sensitive = true +} + +variable "redis_memory_gb" { + type = number + description = "Memorystore Redis memory in GB." + default = 1 +} From 77852593928464b6ad918db770a34ff50ea55d55 Mon Sep 17 00:00:00 2001 From: betterclever Date: Wed, 18 Feb 2026 00:45:16 +0400 Subject: [PATCH 36/36] fix(worker): fix GCS FUSE k8s runner shell syntax, sidecar detection, and Helm config MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - k8s-runner: fix VOLUME_CAPTURE_SCRIPT join from '; ' to '\n' — busybox ash rejects 'do;' as a syntax error, causing all K8s jobs to fail with 'sh: syntax error: unexpected ";"' - k8s-runner: check initContainerStatuses as well as containerStatuses for GCS FUSE sidecar detection (GKE ≥1.28 native sidecar injection) - test-gcs-volume: use printf instead of echo to produce valid JSON output; remove unused OUTPUT_DELIMITER constant - helm/app-secret: add SECRET_STORE_MASTER_KEY to both system and workers namespace secrets (required by new env validation schema) - helm/worker-deployment: expose SECRET_STORE_MASTER_KEY from secret as env var - helm/gke-managed: add secretStoreMasterKey dev value; add GCS FUSE k8s config (gcsBucket, jobServiceAccount, jobRunnerGcpSa, workerGcpSa); update worker image tag to tested build 49d5de9a-wk-fix2-20260218003437 - infra/dev/main.tf: add GCS FUSE CSI addon; volumes bucket with 7-day lifecycle; worker and job-runner GCP SAs with Workload Identity bindings and bucket IAM Integration test validated: worker uploads input.txt to GCS, K8s alpine job reads it via GCS FUSE CSI mount at /inputs, writes JSON output, worker parses result. All tests pass. Co-Authored-By: Claude Sonnet 4.6 Signed-off-by: betterclever --- .../shipsec/templates/app-secret.local.yaml | 2 + .../shipsec/templates/worker-deployment.yaml | 5 + deploy/helm/shipsec/values/gke-managed.yaml | 3 +- infra/gcp/envs/dev/main.tf | 25 ++++ worker/src/testing/test-gcs-volume.ts | 140 ++++++++++++++++++ worker/src/utils/k8s-runner.ts | 10 +- 6 files changed, 182 insertions(+), 3 deletions(-) create mode 100644 worker/src/testing/test-gcs-volume.ts diff --git a/deploy/helm/shipsec/templates/app-secret.local.yaml b/deploy/helm/shipsec/templates/app-secret.local.yaml index 0488dacf..14bf44a7 100644 --- a/deploy/helm/shipsec/templates/app-secret.local.yaml +++ b/deploy/helm/shipsec/templates/app-secret.local.yaml @@ -13,6 +13,7 @@ stringData: MINIO_ROOT_PASSWORD: {{ .Values.secrets.minioRootPassword | quote }} MINIO_ACCESS_KEY: {{ .Values.secrets.minioRootUser | quote }} MINIO_SECRET_KEY: {{ .Values.secrets.minioRootPassword | quote }} + SECRET_STORE_MASTER_KEY: {{ .Values.secrets.secretStoreMasterKey | quote }} --- apiVersion: v1 kind: Secret @@ -28,4 +29,5 @@ stringData: MINIO_ROOT_PASSWORD: {{ .Values.secrets.minioRootPassword | quote }} MINIO_ACCESS_KEY: {{ .Values.secrets.minioRootUser | quote }} MINIO_SECRET_KEY: {{ .Values.secrets.minioRootPassword | quote }} + SECRET_STORE_MASTER_KEY: {{ .Values.secrets.secretStoreMasterKey | quote }} {{- end }} diff --git a/deploy/helm/shipsec/templates/worker-deployment.yaml b/deploy/helm/shipsec/templates/worker-deployment.yaml index 359a39e4..c4734f9e 100644 --- a/deploy/helm/shipsec/templates/worker-deployment.yaml +++ b/deploy/helm/shipsec/templates/worker-deployment.yaml @@ -42,6 +42,11 @@ spec: secretKeyRef: name: {{ .Values.secrets.name }} key: MINIO_SECRET_KEY + - name: SECRET_STORE_MASTER_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.secrets.name }} + key: SECRET_STORE_MASTER_KEY {{- if eq .Values.execution.mode "k8s" }} - name: EXECUTION_MODE value: "k8s" diff --git a/deploy/helm/shipsec/values/gke-managed.yaml b/deploy/helm/shipsec/values/gke-managed.yaml index 0bb20468..fe477ac2 100644 --- a/deploy/helm/shipsec/values/gke-managed.yaml +++ b/deploy/helm/shipsec/values/gke-managed.yaml @@ -14,6 +14,7 @@ secrets: databaseUrl: 'postgresql://shipsec:shipsec-dev-2026@10.25.225.3:5432/shipsec' minioRootUser: minioadmin minioRootPassword: minioadmin + secretStoreMasterKey: '0123456789abcdef0123456789abcdef' backend: image: @@ -44,7 +45,7 @@ backend: worker: image: repository: us-central1-docker.pkg.dev/shipsec/shipsec-studio/worker - tag: f1b15727-wk-amd64-v3 + tag: 49d5de9a-wk-fix2-20260218003437 env: NODE_ENV: production SHIPSEC_ENV: local diff --git a/infra/gcp/envs/dev/main.tf b/infra/gcp/envs/dev/main.tf index aca8ce26..c89db6ce 100644 --- a/infra/gcp/envs/dev/main.tf +++ b/infra/gcp/envs/dev/main.tf @@ -274,6 +274,27 @@ resource "google_storage_bucket" "volumes" { } } +# GCP SA for the worker pod (Workload Identity → GCS SDK access) +resource "google_service_account" "worker" { + project = var.project_id + account_id = "shipsec-worker" + display_name = "ShipSec Worker" +} + +# Workload Identity: shipsec-workers/shipsec-worker KSA → shipsec-worker GCP SA +resource "google_service_account_iam_member" "worker_wi" { + service_account_id = google_service_account.worker.name + role = "roles/iam.workloadIdentityUser" + member = "serviceAccount:${var.project_id}.svc.id.goog[shipsec-workers/shipsec-worker]" +} + +# Worker SA → volumes bucket access (reads inputs, reads outputs via SDK) +resource "google_storage_bucket_iam_member" "worker_volumes" { + bucket = google_storage_bucket.volumes.name + role = "roles/storage.objectUser" + member = "serviceAccount:${google_service_account.worker.email}" +} + # GCP SA for job pods (mounted via GCS FUSE CSI) resource "google_service_account" "job_runner" { project = var.project_id @@ -349,3 +370,7 @@ output "gcs_volumes_bucket" { output "job_runner_sa_email" { value = google_service_account.job_runner.email } + +output "worker_sa_email" { + value = google_service_account.worker.email +} diff --git a/worker/src/testing/test-gcs-volume.ts b/worker/src/testing/test-gcs-volume.ts new file mode 100644 index 00000000..717886cc --- /dev/null +++ b/worker/src/testing/test-gcs-volume.ts @@ -0,0 +1,140 @@ +#!/usr/bin/env bun +/** + * End-to-end integration test for GCS FUSE volume sharing. + * + * Validates the full flow: + * 1. IsolatedGcsVolume.initialize() uploads files to GCS + * 2. K8s job mounts them via GCS FUSE CSI at /inputs + * 3. Job reads the file, writes output to /shipsec-output/result.json + * 4. Worker reads the JSON output from pod logs + * 5. volume.cleanup() removes GCS objects + * + * Run inside the worker pod: + * kubectl exec -n shipsec-workers -- bun run /app/worker/src/testing/test-gcs-volume.ts + */ + +import { IsolatedGcsVolume } from '../utils/gcs-volume'; +import { runComponentInK8sJob } from '../utils/k8s-runner'; +import type { ExecutionContext } from '@shipsec/component-sdk'; +import type { DockerRunnerConfig } from '@shipsec/component-sdk'; + +const PASS = '\x1b[32m✓\x1b[0m'; +const FAIL = '\x1b[31m✗\x1b[0m'; + +function makeContext(): ExecutionContext { + return { + runId: `test-gcs-${Date.now()}`, + componentRef: 'test.gcs.volume', + logger: { + info: (msg: string) => console.log(` [info] ${msg}`), + warn: (msg: string) => console.warn(` [warn] ${msg}`), + error: (msg: string) => console.error(` [error] ${msg}`), + debug: (msg: string) => console.log(` [debug] ${msg}`), + }, + emitProgress: (msg: string) => console.log(` [progress] ${msg}`), + secrets: undefined, + storage: undefined, + artifacts: undefined, + trace: undefined, + logCollector: undefined, + terminalCollector: undefined, + metadata: { runId: `test-gcs-${Date.now()}`, componentRef: 'test.gcs.volume' }, + http: { fetch: fetch as any, toCurl: () => '' }, + } as any; +} + +async function testVolumeWriteRead() { + console.log('\n── Test 1: GCS volume write → K8s job read ──'); + + const volume = new IsolatedGcsVolume('testtenant', `run${Date.now()}`); + const testContent = `hello-from-gcs-${Date.now()}`; + + // 1. Upload file to GCS + const prefix = await volume.initialize({ 'input.txt': testContent }); + console.log(` ${PASS} Uploaded input.txt to GCS prefix: ${prefix}`); + + const ctx = makeContext(); + + // 2. Runner: alpine reads /inputs/input.txt and writes JSON output + const runner: DockerRunnerConfig = { + kind: 'docker', + image: 'alpine:3.20', + entrypoint: 'sh', + command: [ + '-c', + `content=$(cat /inputs/input.txt); printf '{"content":"%s"}' "$content" > /shipsec-output/result.json`, + ], + timeoutSeconds: 60, + volumes: [volume.getVolumeConfig('/inputs', true)], + }; + + try { + const result = await runComponentInK8sJob(runner, {}, ctx); + console.log(` ${PASS} K8s job completed, result:`, result); + + if (result?.content === testContent) { + console.log(` ${PASS} Content matches! "${result.content}"`); + } else { + console.error( + ` ${FAIL} Content mismatch: expected "${testContent}", got "${result?.content}"`, + ); + process.exit(1); + } + } finally { + await volume.cleanup(); + console.log(` ${PASS} GCS volume cleaned up`); + } +} + +async function testVolumeCleanup() { + console.log('\n── Test 2: GCS volume cleanup removes objects ──'); + + const { Storage } = await import('@google-cloud/storage'); + const storage = new Storage(); + const bucket = storage.bucket(process.env.GCS_VOLUME_BUCKET!); + + const volume = new IsolatedGcsVolume('testcleanup', `run${Date.now()}`); + await volume.initialize({ 'deleteme.txt': 'temporary' }); + const prefix = volume.getVolumeName()!; + + // Verify file exists + const [before] = await bucket.getFiles({ prefix }); + if (before.length === 0) { + console.error(` ${FAIL} File not found in GCS before cleanup`); + process.exit(1); + } + console.log(` ${PASS} File exists in GCS (${before.length} object(s))`); + + await volume.cleanup(); + + // Verify file deleted + const [after] = await bucket.getFiles({ prefix }); + if (after.length === 0) { + console.log(` ${PASS} GCS objects cleaned up successfully`); + } else { + console.error(` ${FAIL} ${after.length} objects still remain after cleanup`); + process.exit(1); + } +} + +async function main() { + console.log('🧪 GCS FUSE Volume Integration Tests'); + console.log(` EXECUTION_MODE=${process.env.EXECUTION_MODE}`); + console.log(` GCS_VOLUME_BUCKET=${process.env.GCS_VOLUME_BUCKET}`); + console.log(` K8S_JOB_NAMESPACE=${process.env.K8S_JOB_NAMESPACE}`); + + if (!process.env.GCS_VOLUME_BUCKET) { + console.error(`${FAIL} GCS_VOLUME_BUCKET not set`); + process.exit(1); + } + + await testVolumeCleanup(); + await testVolumeWriteRead(); + + console.log('\n\x1b[32m✓ All tests passed\x1b[0m\n'); +} + +main().catch((err) => { + console.error(`\n${FAIL} Test failed:`, err); + process.exit(1); +}); diff --git a/worker/src/utils/k8s-runner.ts b/worker/src/utils/k8s-runner.ts index 90256552..01d60e99 100644 --- a/worker/src/utils/k8s-runner.ts +++ b/worker/src/utils/k8s-runner.ts @@ -84,7 +84,7 @@ const VOLUME_CAPTURE_SCRIPT = [ ' echo "___FILE_END___"', ' done', 'done', -].join('; '); +].join('\n'); /** * Build the command wrapper that emits the output file to stdout. @@ -711,7 +711,13 @@ async function waitForGcsFuseFlush( while (Date.now() < deadline) { const pod = await core.readNamespacedPod({ name: podName, namespace }); - const sidecar = pod.status?.containerStatuses?.find((c) => c.name === 'gke-gcsfuse-sidecar'); + // GCS FUSE sidecar may appear in containerStatuses (K8s ≥1.28 native sidecar) + // or initContainerStatuses (older injection approach) + const allStatuses = [ + ...(pod.status?.containerStatuses ?? []), + ...(pod.status?.initContainerStatuses ?? []), + ]; + const sidecar = allStatuses.find((c) => c.name === 'gke-gcsfuse-sidecar'); if (!sidecar) { // No sidecar found — GCS FUSE may not have been injected, skip wait