0

I created a VM and snapshot schedule with terraform modules on GCP. The code is attaching the additional disks but not the boot disk. Any idea what is needed to be changed to the below code to include the boot disk.

Any help will be appreciated.

locals {
  attached_disks = {
    for disk in var.attached_disks :
    disk.name => merge(disk, {
      options = disk.options == null ? var.attached_disk_defaults : disk.options
    })
  }
  attached_disks_pairs = {
    for pair in setproduct(keys(local.names), keys(local.attached_disks)) :
    "${pair[0]}-${pair[1]}" => { disk_name = pair[1], name = pair[0] }
  }
  iam_roles = var.use_instance_template ? {} : {
    for pair in setproduct(var.iam_roles, keys(local.names)) :
    "${pair.0}/${pair.1}" => { role = pair.0, name = pair.1 }
  }
  names = (
    var.use_instance_template ? { (var.name) = 0 } : {
      for i in range(0, var.instance_count) : format("${var.name}-%04d", i + 1) => i
    }
  )
  service_account_email = (
    var.service_account_create
    ? (
      length(google_service_account.service_account) > 0
      ? google_service_account.service_account[0].email
      : null
    )
    : var.service_account
  )
  service_account_scopes = (
    length(var.service_account_scopes) > 0
    ? var.service_account_scopes
    : (
      var.service_account_create
      ? ["https://www.googleapis.com/auth/cloud-platform"
    )
  )
  zones_list = length(var.zones) == 0 ? ["${var.region}-b"] : var.zones
  zones = {
    for name, i in local.names : name => element(local.zones_list, i)
  }
}

resource "google_compute_disk" "disks" {
  for_each = var.use_instance_template ? {} : local.attached_disks_pairs
  project  = var.project_id
  zone     = local.zones[each.value.name]
  name     = each.key
  type     = local.attached_disks[each.value.disk_name].options.type
  size     = local.attached_disks[each.value.disk_name].size
  image    = local.attached_disks[each.value.disk_name].image
  labels = merge(var.labels, {
    disk_name = local.attached_disks[each.value.disk_name].name
    disk_type = local.attached_disks[each.value.disk_name].options.type

    # Disk images usually have slashes, which is against label
    # restrictions
    # image     = local.attached_disks[each.value.disk_name].image
  })
  dynamic disk_encryption_key {
    for_each = var.encryption != null ? [""] : []

    content {
      raw_key           = var.encryption.disk_encryption_key_raw
      kms_key_self_link = var.encryption.kms_key_self_link
    }
  }
}

locals {
  snapshot_policy_name = "${var.region}-${var.project_id}-${var.name}-default"
}

resource "google_compute_disk_resource_policy_attachment" "snapshot_attachments" {
  for_each = var.use_instance_template ? {} : local.attached_disks_pairs
  project  = var.project_id
  zone     = local.zones[each.value.name]
  name = local.snapshot_policy_name
  disk = google_compute_disk.disks[each.key].name
  depends_on = [ google_compute_resource_policy.snapshot_policy ]
}

resource "google_compute_resource_policy" "snapshot_policy" {
  count = var.use_instance_template ? 0 : 1
  #for_each = var.use_instance_template ? {} : local.attached_disks_pairs
  project  = var.project_id
  region   = var.region
  name     = local.snapshot_policy_name
  snapshot_schedule_policy {
    schedule {
      daily_schedule {
        days_in_cycle = 1
        start_time     = "09:00"
      }
    }
    retention_policy {
      max_retention_days    = 15
      on_source_disk_delete = "KEEP_AUTO_SNAPSHOTS"
    }
    snapshot_properties {
      storage_locations = ["us"]
      guest_flush       = false
    }
  }
}

resource "google_compute_instance" "default" {
  for_each                  = var.use_instance_template ? {} : local.names
  project                   = var.project_id
  zone                      = local.zones[each.key]
  name                      = each.key
  hostname                  = var.hostname
  description               = "Managed by the compute-vm Terraform module."
  tags                      = var.tags
  machine_type              = var.instance_type
  min_cpu_platform          = var.min_cpu_platform
  can_ip_forward            = var.can_ip_forward
  allow_stopping_for_update = var.options.allow_stopping_for_update
  deletion_protection       = var.options.deletion_protection
  enable_display            = var.enable_display
  labels                    = var.labels
  metadata = merge(
    var.metadata, try(element(var.metadata_list, each.value), {})
  )

  lifecycle {
    ignore_changes = [
      metadata
    ]
  }

  dynamic attached_disk {
    for_each = {
      for resource_name, pair in local.attached_disks_pairs :
      resource_name => local.attached_disks[pair.disk_name] if pair.name == each.key
    }
    iterator = config
    content {
      device_name = config.value.name
      mode        = config.value.options.mode
      source      = google_compute_disk.disks[config.key].name
    }
  }

  boot_disk {
    initialize_params {
      type  = var.boot_disk.type
      image = var.boot_disk.image
      size  = var.boot_disk.size
    }
    disk_encryption_key_raw = var.encryption != null ? var.encryption.disk_encryption_key_raw : null
    kms_key_self_link       = var.encryption != null ? var.encryption.kms_key_self_link : null
  }

1 Answers1

0

As stated in the documentation [1], there is some situations where managing the attached disks via the compute instances config isn't prefereable or possible, such as attaching dynamic numbers of disks using the count variable.


[1] https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_attached_disk

BraveAdmin
  • 105
  • 6