2016-03-09 54 views
0

我有一個terraform計劃(如下),它在AWS上的私有VPC中創建了一對節點。一切似乎都運行良好,但我無法在VPC中的節點之間進行ssh或ping操作。Terraform AWS在私有子網上的兩個或更多節點之間路由

我從以下配置中錯過了什麼,以允許專用網絡中的2個節點能夠相互通話?

provider "aws" { 
    region = "${var.aws_region}" 
    access_key = "${var.aws_access_key}" 
    secret_key = "${var.aws_secret_key}" 
} 


# Create a VPC to launch our instances into 
resource "aws_vpc" "default" { 
    cidr_block = "10.0.0.0/16" 


    tags { 
    Name = "SolrCluster1" 
    } 
} 


# Create an internet gateway to give our subnet access to the outside world 
resource "aws_internet_gateway" "default" { 
    vpc_id = "${aws_vpc.default.id}" 


    tags { 
    Name = "SolrCluster1" 
    } 
} 


# Grant the VPC internet access on its main route table 
resource "aws_route" "internet_access" { 
    route_table_id = "${aws_vpc.default.main_route_table_id}" 
    destination_cidr_block = "0.0.0.0/0" 
    gateway_id = "${aws_internet_gateway.default.id}" 
} 




# Create a subnet to launch our instances into 
resource "aws_subnet" "private" { 
    vpc_id = "${aws_vpc.default.id}" 
    cidr_block = "10.0.1.0/24" 


    # if true, instances launched into this subnet should be assigned a public IP 
    map_public_ip_on_launch = true 


    # availability_zone = 


    tags { 
    Name = "SolrCluster1" 
    } 
} 




# Security Group to Access the instances over SSH, and 8983 
resource "aws_security_group" "main_security_group" { 
    name = "SolrCluster1" 
    description = "Allow access to the servers via port 22" 


    vpc_id = "${aws_vpc.default.id}" 


    // allow traffic from the SG itself for tcp 
    ingress { 
    from_port = 1 
    to_port = 65535 
    protocol = "tcp" 
    self = true 
    } 


    // allow traffic from the SG itself for udp 
    ingress { 
    from_port = 1 
    to_port = 65535 
    protocol = "udp" 
    self = true 
    } 


    // allow SSH traffic from anywhere TODO: Button this up a bit? 
    ingress { 
    from_port = 22 
    to_port = 22 
    protocol = "tcp" 
    cidr_blocks = ["0.0.0.0/0"] 
    } 


    // allow ICMP 
    ingress { 
    from_port = -1 
    to_port = -1 
    protocol = "icmp" 
    cidr_blocks = ["0.0.0.0/0"] 
    } 


} 


resource "aws_instance" "solr" { 
    ami = "ami-408c7f28" 
    instance_type = "t1.micro" 


    # The name of our SSH keypair we created above. 
    # key_name = "${aws_key_pair.auth.id}" 
    key_name = "${var.key_name}" 


    vpc_security_group_ids = ["${aws_security_group.main_security_group.id}"] 


    # Launch the instances into our subnet 
    subnet_id = "${aws_subnet.private.id}" 


    # The connection block tells our provisioner how to communicate with the 
    # resource (instance) 
    connection { 
    # The default username for our AMI 
    user = "ubuntu" 
    # The connection will use the local SSH agent for authentication. 
    private_key = "${file(var.private_key_path)}" 
    } 


    /* provisioner "remote-exec" { */ 
    /* inline = [ */ 
    /*  "sudo apt-get -y update", */ 
    /*  "sudo apt-get -y --force-yes install nginx", */ 
    /*  "sudo service nginx start" */ 
    /* ] */ 
    /* } */ 


    tags { 
    Name = "SolrDev${count.index}" 
    } 


    count = 2 
} 

回答

1

原來我離開了我的子網的出口規則:

egress { 
    from_port = 1 
    to_port = 65535 
    protocol = "tcp" 
    self = true 
    } 

    // allow traffic from the SG itself for udp 
    egress { 
    from_port = 1 
    to_port = 65535 
    protocol = "udp" 
    self = true 
    } 
相關問題