I have built out a Terraform config that deploys a MongoDB atlas cloud cluster, and sets up a VPC peer with my AWS account. The terraform configuration stores the credentials in AWS Secrets Manager. Unfortunately, my Lambda is unable to access the Secrets Manager API endpoint or the MongoDB cluster hosted on Atlas. I have read that I need to set up a NAT gateway on my VPC to access the public internet. I am not a networking pro, and I have tried adding a bunch of different configurations to no avail. Please help:
- Do I need to set up a NAT gateway for my VPC to access Secrets Manager? Or can I just host the Secret in the VPC somehow? What is best practice here?
- Do I need to set up a NAT gateway for my Lambda to access my Atlas hosted MongoDB cluster, even though they are on the same VPC and I have whitelisted the security group my Lambda is in?
- How do I set up a NAT Gateway to allow my Lambda to connect to my Atlas Cluster in Terraform?
Ideally I would like to lockdown the connection to the outside internet as much as possible, but if that's not an option, I am fine with any implementation that just works.
Here is my Terraform config
variable "admin_profile" {
type = string
default = "superadmin"
}
variable "region" {
type = string
default = "us-west-2"
}
provider "aws" {
profile = var.admin_profile
region = "us-west-2"
alias = "admin"
}
// create mongo db organization + cluster on atlas
provider "mongodbatlas" {
public_key = var.atlas_public_key
private_key = var.atlas_private_key
}
//superadmin creds
variable aws_account_id {
type = string
}
variable atlas_private_key {
type = string
}
variable atlas_public_key {
type = string
}
variable atlas_region {
type = string
default = "US_WEST_2"
}
variable atlas_org_id {
type = string
default = "" #EXCLUDE THIS
}
// generated creds for db
variable atlas_db_user {
default = "mongo_user"
}
resource "random_password" "password" {
length = 16
special = false
#override_special = "_%-"
}
locals {
atlas_db_password = random_password.password.result
}
variable atlas_db_vpc_cidr {
default = "192.168.224.0/21"
}
// resources
resource "mongodbatlas_project" "cluster-partner-project" {
name = "live"
org_id = var.atlas_org_id
}
resource "mongodbatlas_cluster" "cluster-partner" {
project_id = mongodbatlas_project.cluster-partner-project.id
name = "cluster-partner"
num_shards = 1
replication_factor = 3
provider_backup_enabled = true
cluster_type = "REPLICASET"
auto_scaling_disk_gb_enabled = true
mongo_db_major_version = "4.2"
//Provider Settings "block"
provider_name = "AWS"
disk_size_gb = 40
provider_disk_iops = 120
provider_volume_type = "STANDARD"
provider_encrypt_ebs_volume = true
provider_instance_size_name = "M10"
provider_region_name = var.atlas_region
}
resource "mongodbatlas_database_user" "cluster-partner-user" {
username = var.atlas_db_user
password = local.atlas_db_password
auth_database_name = "admin"
project_id = mongodbatlas_project.cluster-partner-project.id
roles {
role_name = "readAnyDatabase"
database_name = "admin"
}
roles {
role_name = "readWrite"
database_name = "app_db"
}
}
resource "mongodbatlas_network_container" "cluster-partner-network" {
atlas_cidr_block = var.atlas_db_vpc_cidr
project_id = mongodbatlas_project.cluster-partner-project.id
provider_name = "AWS"
region_name = var.atlas_region
}
resource "mongodbatlas_network_peering" "cluster-partner-network-peering" {
accepter_region_name = var.region
project_id = mongodbatlas_project.cluster-partner-project.id
container_id = mongodbatlas_network_container.cluster-partner-network.container_id
provider_name = "AWS"
route_table_cidr_block = aws_vpc.primary.cidr_block
vpc_id = aws_vpc.primary.id
aws_account_id = var.aws_account_id
}
resource "mongodbatlas_project_ip_whitelist" "default-db-access" {
project_id = mongodbatlas_project.cluster-partner-project.id
aws_security_group = aws_security_group.primary_default.id
comment = "Access for App to MongoDB"
depends_on = [mongodbatlas_network_peering.cluster-partner-network-peering]
}
// create a vpc in AWS
resource "aws_vpc" "primary" {
provider = aws.admin
cidr_block = "10.0.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
}
// Internet Gateway
resource "aws_internet_gateway" "primary" {
provider = aws.admin
vpc_id = aws_vpc.primary.id
}
// route table
resource "aws_route" "primary-internet_access" {
provider = aws.admin
route_table_id = aws_vpc.primary.main_route_table_id
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.primary.id
}
resource "aws_route" "peeraccess" {
provider = aws.admin
route_table_id = aws_vpc.primary.main_route_table_id
destination_cidr_block = var.atlas_db_vpc_cidr
vpc_peering_connection_id = mongodbatlas_network_peering.cluster-partner-network-peering.connection_id
depends_on = [aws_vpc_peering_connection_accepter.peer]
}
//subnets
//public
resource "aws_subnet" "primary-az1" {
provider = aws.admin
tags = {
Name = "public primary subnet"
}
vpc_id = aws_vpc.primary.id
cidr_block = "10.0.1.0/24"
map_public_ip_on_launch = true
availability_zone = "${var.region}a"
}
//private
resource "aws_subnet" "primary-az2" {
provider = aws.admin
tags = {
Name = "private subnet 0"
}
vpc_id = aws_vpc.primary.id
cidr_block = "10.0.2.0/24"
map_public_ip_on_launch = false
availability_zone = "${var.region}b"
}
// security groups for mongo vpc connect
resource "aws_security_group" "primary_default" {
provider = aws.admin
name_prefix = "defaultvpc-"
description = "Default security group for all instances in VPC ${aws_vpc.primary.id}"
vpc_id = aws_vpc.primary.id
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = [
aws_vpc.primary.cidr_block,
var.atlas_db_vpc_cidr
]
# cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
// vpc peering auto accept
resource "aws_vpc_peering_connection_accepter" "peer" {
provider = aws.admin
vpc_peering_connection_id = mongodbatlas_network_peering.cluster-partner-network-peering.connection_id
auto_accept = true
}
// save mongo account details to secret manager
resource "aws_secretsmanager_secret" "partner_iam_mongo_access" {
provider = aws.admin
name = "mongo-access"
}
locals {
mongo_credentials = {
connection_strings = mongodbatlas_cluster.cluster-partner.connection_strings
password = local.atlas_db_password
}
}
resource "aws_secretsmanager_secret_version" "partner_iam_mongo_access" {
provider = aws.admin
secret_id = aws_secretsmanager_secret.partner_iam_mongo_access.id
secret_string = jsonencode(local.mongo_credentials)
}
// create lambdas for each of the key steps in the app
// have to add the vpc
resource "aws_iam_role_policy" "lambda_policy" {
provider = aws.admin
name = "lambda_policy"
role = aws_iam_role.lambda_role.id
policy = file("./lambda-policy.json")
}
data "aws_iam_policy" "aws_lambda_vpc_access_execution_role" {
provider = aws.admin
arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole"
}
resource "aws_iam_role" "lambda_role" {
provider = aws.admin
name = "lambda-vpc-role-managed"
assume_role_policy = file("./lambda-assume-policy.json")
}
data "archive_file" "test-connection" {
type = "zip"
source_file = "./test-connection"
output_path = "./test-connection_deploy.zip"
}
resource "aws_lambda_function" "test-connection" {
provider = aws.admin
filename = "./test-connection_deploy.zip"
function_name = "test-connection"
role = aws_iam_role.lambda_role.arn
handler = "test-connection"
runtime = "go1.x"
timeout = 15
source_code_hash = data.archive_file.test-connection.output_base64sha256
vpc_config {
subnet_ids = [aws_subnet.primary-az1.id] // public subnet
security_group_ids = [aws_security_group.primary_default.id]
}
}
Here are my tfvars
admin_profile = "default"
atlas_private_key =
atlas_public_key =
atlas_org_id =
aws_account_id =
Here is my Lambda policy (lambda-policy.json)
{
"Version":"2012-10-17",
"Statement":[
{
"Effect":"Allow",
"Action":[
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:DescribeInstances",
"ec2:AttachNetworkInterface",
"secretsmanager:DescribeSecret",
"secretsmanager:GetSecretValue",
"secretsmanager:ListSecretVersionIds",
"secretsmanager:ListSecrets"
],
"Resource":"*"
}
]
}
Here is my Lambda policy (lambda-assume-policy.json)
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": ["lambda.amazonaws.com", "ec2.amazonaws.com"]
},
"Effect": "Allow",
"Sid": ""
}
]
}
And here is the (GoLang) code for my Lambda
package main
import (
"context"
"fmt"
"errors"
"time"
"encoding/json"
"github.com/aws/aws-lambda-go/lambda"
"github.com/sparrc/go-ping"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"go.mongodb.org/mongo-driver/mongo"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
)
type MongoCreds struct {
ConnectionStrings []map[string]interface{} `json:"connection_strings"`
Password string `json:"password"`
}
var MainRegion = "us-west-2"
func HandleRequest(ctx context.Context, updatedValues interface{}) (string, error) {
fmt.Println("we are pinging")
pinger, err := ping.NewPinger("www.google.com")
if err != nil {
panic(err)
}
pinger.Count = 3
pinger.Run() // blocks until finished
stats := pinger.Statistics() // get send/receive/rtt stats
fmt.Println(stats)
fmt.Println("connecting to mongo")
err = ConnectToMongoClient()
if err != nil {
fmt.Println("failure to connect to mongo")
}
return "", err
}
func ConnectToMongoClient() error {
sess := session.Must(session.NewSession(&aws.Config{
Region: aws.String(MainRegion),
}))
svc := secretsmanager.New(sess)
input := &secretsmanager.GetSecretValueInput{
SecretId: aws.String("mongo-access"),
}
fmt.Println("getting credentials")
secret, err := svc.GetSecretValue(input)
if err != nil {
return err
}
var mongo_creds MongoCreds
secretJson := []byte(*secret.SecretString)
err = json.Unmarshal(secretJson, &mongo_creds)
fmt.Println("credentials fetched")
fmt.Println(mongo_creds)
if err != nil {
return err
}
var mongoURI string
for _, connection := range(mongo_creds.ConnectionStrings) {
if val, ok := connection["standard_srv"]; ok {
mongoURI = val.(string)
}
}
if mongoURI == "" {
return errors.New("Unable to parse a connecting string from secret")
}
clientOptions := options.Client().ApplyURI(mongoURI).SetAuth(options.Credential{Username: "mongo_user", Password: mongo_creds.Password})
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
client, err := mongo.Connect(ctx, clientOptions)
fmt.Println("connecting")
if err != nil {
fmt.Println(err.Error())
return err
}
ctx, _ = context.WithTimeout(context.Background(), 5*time.Second)
if err = client.Ping(ctx, readpref.Primary()); err != nil {
return err
}
return err
}
func main() {
lambda.Start(HandleRequest)
}
If anyone can recommend an implementation or adjustment to my VPC configuration or my Lambda code that would allow access to the Secrets Manager and my Mongo Cluster. Ideally, keeping all traffic in the VPC, but if public internet access is necessary, so be it.
Edit The error I am getting is a Timeout. Note, even if I hardcode the credentials (and skip the Secret Manager step), I am still timing out when attempting to connect to the Atlas hosted Mongo instance.