I'm trying to create a terraform configuration to spin up multiple VPCs in different regions and create VPC peer connections between them.
This is my module for the VPC
# Required Variables
variable "region" {}
variable "cluster_name" {}
variable "region_name" {}
variable "nb_nodes" {}
variable "vpc_cidr" {}
# Default Variables
variable "instance_type" {
default = "t2.nano"
}
variable "public_key_path" {
default = "id_rsa.pub"
}
variable "private_key_path" {
default = "id_rsa"
}
variable "ami-username" {
default = "ubuntu"
}
variable "ami" {
type = "map"
default = {
us-east-1 = "ami-0f9cf087c1f27d9b1"
us-east-2 = "ami-0653e888ec96eab9b"
}
}
variable "availability_zone" {
type = "map"
default = {
us-east-1 = "us-east-1a"
us-east-2 = "us-east-2a"
}
}
provider "aws" {
region = "${var.region}"
}
# Network Resources
resource "aws_vpc" "vpc" {
cidr_block = "${var.vpc_cidr}"
enable_dns_hostnames = true
tags {
Name = "${var.cluster_name}-${var.region_name}-vpc"
}
}
resource "aws_subnet" "subnet" {
vpc_id = "${aws_vpc.vpc.id}"
cidr_block = "${var.vpc_cidr}"
availability_zone = "${lookup(var.availability_zone, var.region)}"
tags {
Name = "${var.cluster_name}-${var.region_name}-subnet"
}
}
resource "aws_security_group" "sg" {
name = "vpc_test"
description = "Allow all"
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
vpc_id="${aws_vpc.vpc.id}"
tags {
Name = "${var.cluster_name}-${var.region_name}-security-group"
}
}
resource "aws_internet_gateway" "gw" {
vpc_id = "${aws_vpc.vpc.id}"
tags {
Name = "${var.cluster_name}-${var.region_name}-gateway"
}
}
resource "aws_route_table" "public-rt" {
vpc_id = "${aws_vpc.vpc.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.gw.id}"
}
tags {
Name = "${var.cluster_name}-${var.region_name}-subnet-rt"
}
}
resource "aws_route_table_association" "public-rt" {
subnet_id = "${aws_subnet.subnet.id}"
route_table_id = "${aws_route_table.public-rt.id}"
}
# Instance Resources
resource "aws_key_pair" "kp" {
key_name = "${var.cluster_name}-${var.region_name}-key"
public_key = "${file("${var.public_key_path}")}"
}
resource "aws_instance" "node" {
ami = "${lookup(var.ami, var.region)}"
instance_type = "${var.instance_type}"
count = "${var.nb_nodes}"
key_name = "${aws_key_pair.kp.id}"
subnet_id = "${aws_subnet.subnet.id}"
vpc_security_group_ids = ["${aws_security_group.sg.id}"]
source_dest_check = false
associate_public_ip_address = true
root_block_device {
volume_size = 20
}
tags {
Name = "${var.cluster_name}-${var.region_name}-${count.index}"
}
}
output "region" {
value = "${var.region}"
}
output "vpc_id" {
value = "${aws_vpc.vpc.id}"
}
and this is the module to create the peer connections
# Required Variables
variable "request_vpc_id" {}
variable "accept_vpc_id" {}
variable "request_region" {}
variable "accept_region" {}
data "aws_caller_identity" "current" {}
provider "aws" {
region = "${var.request_region}"
}
resource "aws_vpc_peering_connection" "con" {
peer_owner_id = "${data.aws_caller_identity.current.account_id}"
vpc_id = "${var.request_vpc_id}"
peer_vpc_id = "${var.accept_vpc_id}"
auto_accept = true
}
If I do something like this which creates 2 VPCs in the same region it works fine
variable "cluster_name"{
default = "aws-multi-region"
}
variable "nodes_per_region" {
default = "1"
}
module "region-1" {
source = "./simple_region/"
region = "us-east-1"
cluster_name = "${var.cluster_name}"
region_name = "east"
vpc_cidr = "10.0.0.0/24"
nb_nodes = "${var.nodes_per_region}"
}
module "region-2" {
source = "./simple_region/"
region = "us-east-1"
cluster_name = "${var.cluster_name}"
region_name = "west"
vpc_cidr = "11.1.1.0/24"
nb_nodes = "${var.nodes_per_region}"
}
module "vpc_peer_1" {
source = "./vpc_peer/"
request_region = "${module.region-1.region}"
request_vpc_id = "${module.region-1.vpc_id}"
accept_region = "${module.region-2.region}"
accept_vpc_id = "${module.region-2.vpc_id}"
}
The problem comes if I try to create the VPCs in different regions like this
variable "cluster_name"{
default = "aws-multi-region"
}
variable "nodes_per_region" {
default = "1"
}
module "region-1" {
source = "./simple_region/"
region = "us-east-1"
cluster_name = "${var.cluster_name}"
region_name = "east"
vpc_cidr = "10.0.0.0/24"
nb_nodes = "${var.nodes_per_region}"
}
module "region-2" {
source = "./simple_region/"
region = "us-east-2"
cluster_name = "${var.cluster_name}"
region_name = "west"
vpc_cidr = "11.1.1.0/24"
nb_nodes = "${var.nodes_per_region}"
}
module "vpc_peer_1" {
source = "./vpc_peer/"
request_region = "${module.region-1.region}"
request_vpc_id = "${module.region-1.vpc_id}"
accept_region = "${module.region-2.region}"
accept_vpc_id = "${module.region-2.vpc_id}"
}
I get an error
Error: Error applying plan:
1 error(s) occurred:
* module.vpc_peer_1.aws_vpc_peering_connection.con: 1 error(s) occurred:
* aws_vpc_peering_connection.con: Error waiting for VPC Peering Connection to become available: Error waiting for VPC Peering Connection (pcx-0d423f938490fde63) to become available: Failed due to incorrect VPC-ID, Account ID, or overlapping CIDR range
Terraform does not automatically rollback in the face of errors.
Instead, your Terraform state file has been partially updated with
any resources that successfully completed. Please address the error
above and apply again to incrementally change your infrastructure.
I've tried manually creating the vpc connections in the aws ui and that works fine so I'm wondering if this is a terraform bug or if I need to do something with the regions in the vpc_peer module.