0

Im trying to set up a Postgresql prometheus exporter running a cloud sql auth proxy sidecar on my GKE cluster, however my auth proxy fails with the following error. So far, I've tried giving the workload identity more access (i was using the cloudsql.client role before) but I doubt that is the case. perhaps my proxy connection command is wrong? I'm very new to GCP. thanks.

Get "https://sqladmin.googleapis.com/sql/v1beta4/projects/PROJECT/instances/REGION~CLOUDSQLINSTANCE?alt=json&prettyPrint=false": metadata: GCE metadata "instance/service-accounts/default/token?scopes=https%!A(MISSING)%!F(MISSING)%!F(MISSING)www.googleapis.com%!F(MISSING)auth%!F(MISSING)sqlservice.admin" not defined

My cluster is deployed using terraform, so my resources are as follows:

resource "kubernetes_deployment" "postgres_exporter" {
  metadata {
    name      = format("%s-%s", var.cluster_name, var.app_name)
    namespace = var.namespace
    labels = {
      app        = var.app_name
      maintainer = "redacted"
      cluster    = var.cluster_name
    }
  }
  spec {
    replicas = var.replicas
    selector {
      match_labels = {
        app     = var.app_name
        cluster = var.cluster_name
      }
    }
    template {
      metadata {
        labels = {
          app     = var.app_name
          cluster = var.cluster_name
        }
      }
      spec {
        service_account_name = KSA_NAME
        container {
          name  = var.postgres-exporter.name 
          image = var.postgres-exporter.image
          command = ["postgres_exporter"]
          port {
            name           = "http"
            container_port = 9187
            protocol       = "TCP"
          }
          resources {
            limits = {
              cpu    = var.postgres-exporter.resources.limits.cpu 
              memory = var.postgres-exporter.resources.limits.memory
            }

            requests = {
              cpu    = var.postgres-exporter.resources.requests.cpu
              memory = var.postgres-exporter.resources.requests.memory

            }
          }
          liveness_probe {
            http_get {
              path   = "/healthz"
              port   = "http"
              scheme = "HTTP"
            }

            initial_delay_seconds = 5
            timeout_seconds       = 5
            period_seconds        = 5
            success_threshold     = 1
            failure_threshold     = 3
          }
          readiness_probe {
            http_get {
              path   = "/healthz"
              port   = "http"
              scheme = "HTTP"
            }
            initial_delay_seconds = 1
            timeout_seconds       = 5
            period_seconds        = 5
            success_threshold     = 1
            failure_threshold     = 3
          }
          lifecycle {
            pre_stop {
              exec {
                command = ["/bin/bash", "-c", "sleep 20"]
              }
            }
          }
          image_pull_policy = "IfNotPresent"
          security_context {
            capabilities {
              drop = ["SETPCAP", "MKNOD", "AUDIT_WRITE", "CHOWN", "NET_RAW", "DAC_OVERRIDE", "FOWNER", "FSETID", "KILL", "SETGID", "SETUID", "NET_BIND_SERVICE", "SYS_CHROOT", "SETFCAP"]
            }
            read_only_root_filesystem = true
          }
        }
        container {
          name = "gce-proxy-pg"
          image = var.gce-proxy.image
          resources {
            limits = {
              cpu    = var.gce-proxy.resources.limits.cpu
              memory = var.gce-proxy.resources.limits.memory
            }
            requests = {
              cpu    = var.gce-proxy.resources.requests.cpu
              memory = var.gce-proxy.resources.requests.memory
            }
          }
          port {
            container_port = 5432
            name = "proxy-pg"
            protocol = "TCP"
          }
          command = [ "/cloud_sql_proxy", "-instances=PROJECT:REGION:INSTANCE=tcp:<PRIVATE_IP>:5432", "-enable-iam-login=true", "-ip_address_types=PRIVATE"]
          security_context {
            capabilities {
              drop = ["SETPCAP", "MKNOD", "AUDIT_WRITE", "CHOWN", "NET_RAW", "DAC_OVERRIDE", "FOWNER", "FSETID", "KILL", "SETGID", "SETUID", "NET_BIND_SERVICE", "SYS_CHROOT", "SETFCAP"]
            }
            read_only_root_filesystem = true
          }
        }
        restart_policy = "Always"
        security_context {
          run_as_user     = 1000
          run_as_non_root = true
        }
        image_pull_secrets {
          name = "redacted"
        }
        topology_spread_constraint {
          max_skew           = 1
          topology_key       = "topology.kubernetes.io/zone"
          when_unsatisfiable = "DoNotSchedule"
          label_selector {
            match_labels = {
              app = var.app_name
            }
          }
        }
        topology_spread_constraint {
          max_skew           = 1
          topology_key       = "kubernetes.io/hostname"
          when_unsatisfiable = "ScheduleAnyway"
          label_selector {
            match_labels = {
              app = var.app_name
            }
          }
        }
      }
    }
    strategy {
      type = "RollingUpdate"
      rolling_update {
        max_surge = "1"
      }
    }
  }
}
resource "kubernetes_service" "postgres_exporter" {
  metadata {
    name      = format("%s-%s", var.cluster_name, var.app_name)
    namespace = var.namespace
    labels = {
      app        = var.app_name
      maintainer = ""
      cluster    = var.cluster_name
    }
  }
  spec {
    port {
      name        = "http"
      protocol    = "TCP"
      port        = 9187
      target_port = "9187"
    }
    port {
      name = "proxy"
      protocol = "TCP"
      port = 5432
      target_port = "5432"
    }

    selector = {
      app     = var.app_name
      cluster = var.cluster_name
    }
    type = "ClusterIP"
  }
}
resource "kubernetes_service_account" "exporter-ksa" {
  metadata {
    name        = "postgres-exporter"
    namespace   = var.namespace
    annotations = {
      "iam.gke.io/gcp-service-account" = "mail@PROJECT.iam.gserviceaccount.com"
    }
  }
}
### Workload Identitiy 
resource "google_service_account" "sql-monitoring" {
  account_id   = "sql-monitoring" 
  display_name = "sql-monitoring"
  description  = ""
}

resource "google_service_account_iam_binding" "sql-monitoring-binding" {
  service_account_id = google_service_account.sql-monitoring.id
  role               = "roles/iam.workloadIdentityUser"
  members = [
    "serviceAccount:${data.google_project.current.project_id}.svc.id.goog[NAMESPACE/KSA-NAME]"
  ]
}

resource "google_project_iam_member" "sql-monitoring-iam-member" {
  for_each = toset([
    "roles/cloudsql.editor"
  ])

  project = data.google_project.current.project_id
  role    = each.key
  member  = "serviceAccount:${google_service_account.sql-monitoring.email}"
Desolar1um
  • 85
  • 5
  • I since figured out that the error means I'm lacking on of the following OAuth scopes. No idea how to go about getting that yet. source: https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1beta4/instances/get – Desolar1um Oct 12 '22 at 17:23

1 Answers1

2

I was using the wrong mail here, the mail should be the GSA's mail and I used the name of the kubernetes_service_account.

resource "kubernetes_service_account" "exporter-ksa" {
  metadata {
    name        = "postgres-exporter"
    namespace   = var.namespace
    annotations = {
      "iam.gke.io/gcp-service-account" = "mail@PROJECT.iam.gserviceaccount.com"
    }
  }
}```
Desolar1um
  • 85
  • 5
  • Thanks for adding the answer. Did you update it in your question too? Or is this the wrong config? It is not clear from your answer. – Rualark Jun 19 '23 at 23:51