This playbook appears to be SSHing onto my local machine rather than the remote one. This condition is guessed based on the output I've included at the bottom.
I've adapted the example from here: http://docs.ansible.com/ansible/guide_aws.html#provisioning
The playbook is split into two plays:
creation of the EC2 instance and
configuration of the EC2 instance
Note: To run this you'll need to create a key-pair with the same name as the project (you can get more information here: https://us-west-2.console.aws.amazon.com/ec2/v2/home?region=us-west-2#KeyPairs:sort=keyName)
The playbook is listed below:
# Create instance
- hosts: 127.0.0.1
connection: local
gather_facts: false
vars:
project_name: my-test
tasks:
- name: Get the current username
local_action: command whoami
register: username_on_the_host
- name: Capture current instances
ec2_remote_facts:
region: "us-west-2"
register: ec2_instances
- name: Create instance
ec2:
region: "us-west-2"
zone: "us-west-2c"
keypair: "{{ project_name }}"
group:
- "SSH only"
instance_type: "t2.nano"
image: "ami-59799439" # debian:jessie amd64 hvm on us-west 2
count_tag: "{{ project_name }}-{{ username_on_the_host.stdout }}-test"
exact_count: 1
wait: yes
instance_tags:
Name: "{{ project_name }}-{{ username_on_the_host.stdout }}-test"
"{{ project_name }}-{{ username_on_the_host.stdout }}-test": simple_ec2
Creator: "{{ username_on_the_host.stdout }}"
register: ec2_info
- name: Wait for instances to listen on port 22
wait_for:
state: started
host: "{{ item.public_dns_name }}"
port: 22
with_items: "{{ ec2_info.instances }}"
when: ec2_info|changed
- name: Add new instance to launched group
add_host:
hostname: "{{ item.public_dns_name }}"
groupname: launched
with_items: "{{ ec2_info.instances }}"
when: ec2_info|changed
- name: Get ec2_info information
debug:
msg: "{{ ec2_info }}"
# Configure and install all we need
- hosts: launched
remote_user: admin
gather_facts: true
tasks:
- name: Display all variables/facts known for a host
debug:
var: hostvars[inventory_hostname]
- name: List hosts
debug: msg="groups={{groups}}"
- name: Get current user
command: whoami
- name: Prepare system
become: yes
become_method: sudo
apt: "name={{item}} state=latest"
with_items:
- software-properties-common
- python-software-properties
- devscripts
- build-essential
- libffi-dev
- libssl-dev
- vim
The output I have is:
TASK [Get current user] ********************************************************
changed: [ec2-35-167-142-43.us-west-2.compute.amazonaws.com] => {"changed": true, "cmd": ["whoami"], "delta": "0:00:00.006532", "end": "2017-01-09 14:53:55.806000", "rc": 0, "start": "2017-01-09 14:53:55.799468", "stderr": "", "stdout": "brianbruggeman", "stdout_lines": ["brianbruggeman"], "warnings": []}
TASK [Prepare system] **********************************************************
failed: [ec2-35-167-142-43.us-west-2.compute.amazonaws.com] (item=['software-properties-common', 'python-software-properties', 'devscripts', 'build-essential', 'libffi-dev', 'libssl-dev', 'vim']) => {"failed": true, "item": ["software-properties-common", "python-software-properties", "devscripts", "build-essential", "libffi-dev", "libssl-dev", "vim"], "module_stderr": "sudo: a password is required\n", "module_stdout": "", "msg": "MODULE FAILURE"}
This should work.
- name: Create Ec2 Instances
hosts: localhost
connection: local
gather_facts: False
vars:
project_name: device-graph
ami_id: ami-59799439 # debian jessie 64-bit hvm
region: us-west-2
zone: "us-west-2c"
instance_size: "t2.nano"
tasks:
- name: Provision a set of instances
ec2:
key_name: my_key
group: ["SSH only"]
instance_type: "{{ instance_size }}"
image: "{{ ami_id }}"
wait: true
exact_count: 1
count_tag:
Name: "{{ project_name }}-{{ username.stdout }}-test"
Creator: "{{ username.stdout }}"
Project: "{{ project_name }}"
instance_tags:
Name: "{{ project_name }}-{{ username.stdout }}-test"
Creator: "{{ username.stdout }}"
Project: "{{ project_name }}"
register: ec2
- name: Add all instance public IPs to host group
add_host:
hostname: "{{ item.public_ip }}"
groups: launched_ec2_hosts
with_items: "{{ ec2.tagged_instances }}"
- name: configuration play
hosts: launched_ec2_hosts
user: admin
gather_facts: true
vars:
ansible_ssh_private_key_file: "~/.ssh/project-name.pem"
tasks:
- name: get the username running the deploy
shell: whoami
register: username
Related
I created a playbook aws.yml and want to run it over my local host
---
- hosts: webserver
- vars:
region: ap-south-1
instance_type: t2.micro
ami: ami-005956c5f0f757d37 # Amazon linux LTS
keypair: ansible # pem file name
- tasks:
- ec2:
key_name: "{{ ansible }}"
group: ansible # security group name
instance_type: "{{ t2.micro }}"
image: "{{ ami-005956c5f0f757d37 }}"
wait: true
wait_timeout: 500
region: "{{ ap-south-1 }}"
count: 1 # default
count_tag:
Name: Prod-Instance
instance_tags:
Name: Prod-Instance
vpc_subnet_id: subnet-00efd068
assign_public_ip: yes
Content of the host file /etc/ansible/hosts
[web]
localhost
When I try to run my aws.yml, it gives the below error
[root#localhost ~]# ansible-playbook aws.yml
PLAY [web] ********************************************************************************************************************************************************************************************************
ERROR! the field 'hosts' is required but was not set
[root#localhost ~]#
Your playbook and your hosts group name does not match.
In aws.yml
- hosts: webserver
And in /etc/ansible/hosts
[web]
You should either change your aws.yml playbook to read
- hosts: web
Or your hosts file /etc/ansible/hosts to read
[webserver]
Too many -s. Remove the ones in front of tasks and vars.
Also, use delegate_to: localhost on the ec2 task:
---
- hosts: webserver
vars:
region: ap-south-1
instance_type: t2.micro
ami: ami-005956c5f0f757d37 # Amazon linux LTS
keypair: ansible # pem file name
tasks:
- ec2:
key_name: "{{ ansible }}"
group: ansible # security group name
instance_type: "{{ t2.micro }}"
image: "{{ ami-005956c5f0f757d37 }}"
wait: true
wait_timeout: 500
region: "{{ ap-south-1 }}"
count: 1 # default
count_tag:
Name: Prod-Instance
instance_tags:
Name: Prod-Instance
vpc_subnet_id: subnet-00efd068
assign_public_ip: yes
delegate_to: localhost
I'm using the following code
- name: create a instance
gcp_compute_instance:
name: test_object
machine_type: n1-standard-1
disks:
- auto_delete: 'false'
boot: 'true'
source: "{{ disk }}"
metadata:
startup-script-url:
cost-center:
labels:
environment: production
network_interfaces:
- network: "{{ network }}"
access_configs:
- name: External NAT
nat_ip: "{{ address }}"
type: ONE_TO_ONE_NAT
zone: us-central1-a
project: test-12y38912634812648
auth_kind: serviceaccount
service_account_file: "~/programming/gcloud/test-1283891264812-8h3981f3.json"
state: present
and I saved the file as create2.yml
Then I run Ansible-playbook create2.yml and I get the following error
ERROR! 'gcp_compute_instance' is not a valid attribute for a Play
The error appears to be in '/Users/xxx/programming/gcloud-test/create2.yml': line 1, column 3, but may
be elsewhere in the file depending on the exact syntax problem.
The offending line appears to be:
- name: create a instance
^ here
I followed the documentation. What am I doing wrong and how do I fix it?
You haven't created a playbook, you've just created a file with a task which won't run on it's own as you've discovered.
A playbook is a collection of tasks. You should start with the playbook documentation:
Playbook Documentation
For GCP, here's a working example to create a network, external IP, disk and VM.
- name: 'Deploy gcp vm'
hosts: localhost
connection: local
become: false
gather_facts: no
vars:
gcp_project: "671245944514"
gcp_cred_kind: "serviceaccount"
gcp_cred_file: "/tmp/test-project.json"
gcp_region: "us-central1"
gcp_zone: "us-central1-a"
# Roles & Tasks
tasks:
- name: create a disk
gcp_compute_disk:
name: disk-instance
size_gb: 50
source_image: projects/ubuntu-os-cloud/global/images/family/ubuntu-2004-lts
zone: "{{ gcp_zone }}"
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: disk
- name: create a network
gcp_compute_network:
name: network-instance
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: network
- name: create a address
gcp_compute_address:
name: address-instance
region: "{{ gcp_region }}"
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: address
- name: create a instance
gcp_compute_instance:
name: vm-instance
project: "{{ gcp_project }}"
zone: "{{ gcp_zone }}"
machine_type: n1-standard-1
disks:
- auto_delete: 'true'
boot: 'true'
source: "{{ disk }}"
labels:
environment: testing
network_interfaces:
- network: "{{ network }}"
access_configs:
- name: External NAT
nat_ip: "{{ address }}"
type: ONE_TO_ONE_NAT
auth_kind: serviceaccount
service_account_file: "{{ gcp_cred_file }}"
state: present
The following ansible playbook runs fine, no error at all but the URL just don't resolve/load afterwards. If I use the public IP created for the instance, the page loads.
---
- name: Provision an EC2 Instance
hosts: local
remote_user: ubuntu
become: yes
connection: local
gather_facts: false
vars:
instance_type: t2.micro
security_group: "Web Subnet Security Group"
image: ami-0c5199d385b432989
region: us-east-1
keypair: demo-key
count: 1
vars_files:
- keys.yml
tasks:
- name: Create key pair using ouw own pubkey
ec2_key:
ec2_access_key: "{{ ec2_access_key }}"
ec2_secret_key: "{{ ec2_secret_key }}"
name: demo-key
key_material: "{{ lookup('file', '/root/.ssh/id_rsa.pub') }}"
region: us-east-1
state: present
- name: Launch the new EC2 Instance
ec2:
ec2_access_key: "{{ ec2_access_key }}"
ec2_secret_key: "{{ ec2_secret_key }}"
assign_public_ip: yes
vpc_subnet_id: subnet-0c799bda2a466f8d4
group: "{{ security_group }}"
instance_type: "{{ instance_type}}"
image: "{{ image }}"
wait: true
region: "{{ region }}"
keypair: "{{ keypair }}"
count: "{{ count }}"
state: present
register: ec2
- name: Add tag to Instance(s)
ec2_tag:
ec2_access_key: "{{ ec2_access_key }}"
ec2_secret_key: "{{ ec2_secret_key }}"
resource: "{{ item.id }}"
region: "{{ region }}"
state: present
tags:
Name: demo-webserver
with_items: "{{ ec2.instances }}"
- name: Add the newly created EC2 instance(s) to the local host group (located inside the directory)
lineinfile:
path="./hosts"
line="{{ item.public_ip }}"
insertafter='\[demo-webserver\]'
state=present
with_items: "{{ ec2.instances }}"
- name: Pause for 2 minutes
pause:
minutes: 2
- name: Write the new ec2 instance host key to known hosts
connection: local
shell: "ssh-keyscan -H {{ item.public_ip }} >> ~/.ssh/known_hosts"
with_items: "{{ ec2.instances }}"
- name: Waiting for the instance to come
local_action: wait_for
host="{{ item.public_ip }}"
delay=10
connect_timeout=300
state=started
port=22
with_items: "{{ ec2.instances }}"
- name: Install packages
delegate_to: "{{ item.public_ip }}"
raw: bash -c "test -e /usr/bin/python || (apt -qqy update && apt install -qqy python-minimal && apt install -qqy apache2 && systemctl start apache2 && systemctl enable apache2)"
with_items: "{{ ec2.instances }}"
- name: Register new domain
route53_zone:
ec2_access_key: "{{ ec2_access_key }}"
ec2_secret_key: "{{ ec2_secret_key }}"
zone: ansible-demo-domain.com
- name: Create new DNS record
route53:
ec2_access_key: "{{ ec2_access_key }}"
ec2_secret_key: "{{ ec2_secret_key }}"
zone: ansible-demo-domain.com
record: ansible-demo-domain.com
type: A
ttl: 300
value: "{{ item.public_ip }}"
state: present
overwrite: yes
private_zone: no
wait: yes
with_items: "{{ ec2.instances }}"
- name: Create new DNS record
route53:
ec2_access_key: "{{ ec2_access_key }}"
ec2_secret_key: "{{ ec2_secret_key }}"
zone: ansible-demo-domain.com
record: www.ansible-demo-domain.com
type: CNAME
ttl: 300
value: ansible-demo-domain.com
state: present
overwrite: yes
private_zone: no
wait: yes
Appreciate your help to point what/where I'm missing is. I usually wait at least 5 minutes before testing the URL but really doens't resolve/load.
Thank you!
20190301_Update: Here's how the hosted zone looks like after provisioning:
hosted-zone-after-provisioning and its associated TTLs ttl
What I want to achieve
I want to create an EC2 instance with LAMP stack installed using one Ansible playbook.
Problem
The instance creation works fine, and I can modify it in the EC2 Console, but the problem appears when trying to access the instance for example install apache or create keys.
This is the error:
fatal: [35.154.26.86]: UNREACHABLE! => {
"changed": false,
"msg": "[Errno None] Unable to connect to port 22 on or 35.154.26.86",
"unreachable": true
}
Error Screenshot
Code
This is my playbook:
---
- name: Power up an ec2 with LAMP stack installed
hosts: localhost
become: true
become_user: root
gather_facts: False
vars:
keypair: myKeyPair
security_group: launch-wizard-1
instance_type: t2.micro
image: ami-47205e28
region: x-x-x
tasks:
- name: Adding Python-pip
apt: name=python-pip state=latest
- name: Install Boto Library
pip: name=boto
- name: Launch instance (Amazon Linux)
ec2:
key_name: "{{ keypair }}"
group: "{{ security_group }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: true
region: "{{ region }}"
aws_access_key: "xxxxxxxxxxxxxxxxxxx"
aws_secret_key: "Xxxxxxxxxxxxxxxxxxx"
register: ec2
- name: Print all ec2 variables
debug: var=ec2
- name: Add all instance public IPs to host group
add_host: hostname={{ item.public_ip }} groups=ec2hosts
with_items: "{{ ec2.instances }}"
- hosts: ec2hosts
remote_user: ec2-user
become: true
gather_facts: false
tasks:
#I need help here, don't know what to do.
- name: Create an EC2 key
ec2_key:
name: "privateKey"
region: "x-x-x"
register: ec2_key
- name: Save private key
copy: content="{{ ec2_key.private_key }}" dest="./privateKey.pem" mode=0600
when: ec2_key.changed
# The Rest is installing LAMP
Information:
1- My hosts file is default.
2- I used this command to run the playbook:
sudo ansible-playbook lamp.yml -vvv -c paramiko
3- launch-wizard-1 has SSH.
4- myKeyPair is a public key imported from my device to the console(don't know if this is ok)
5- I am a big newbie
Ansible requires Python installed on VM to work.
Here is your required code:
- name: upload an ssh keypair to ec2
hosts: localhost
connection: local
gather_facts: False
vars:
keypair_name: Key_name
key_material: "{{ lookup('file', 'keyfile') }}"
region: "{{ region }}"
tasks:
- name: ssh keypair for ec2
ec2_key:
aws_access_key: "xxxxxxxxxxxxxxxxxxx"
aws_secret_key: "Xxxxxxxxxxxxxxxxxxx"
region: "{{ region }}"
name: "{{ keypair_name }}"
key_material: "{{ key_material }}"
state: present
- name: Power up an ec2 with LAMP stack installed
hosts: localhost
become: true
become_user: root
gather_facts: False
vars:
keypair: myKeyPair
security_group: launch-wizard-1
instance_type: t2.micro
image: ami-47205e28
region: x-x-x
my_user_data: | # install Python: Ansible needs Python pre-installed on the instance to work!
#!/bin/bash
sudo apt-get install python -y
tasks:
- name: Adding Python-pip
apt: name=python-pip state=latest
- name: Install Boto Library
pip: name=boto
- name: Launch instance (Amazon Linux)
ec2:
key_name: "{{ keypair }}"
group: "{{ security_group }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: true
wait_timeout: 300
user_data: "{{my_user_data}}"
region: "{{ region }}"
aws_access_key: "xxxxxxxxxxxxxxxxxxx"
aws_secret_key: "Xxxxxxxxxxxxxxxxxxx"
register: ec2
- name: Add all instance public IPs to host group
add_host: hostname={{ item.public_ip }} groups=ec2hosts
with_items: "{{ ec2.instances }}"
Please give advice. How to use vars from ec2_remote_facts in the roles?
---
- name: Sandbox
hosts: localhost
connection: local
gather_facts: false
tasks:
- name: Get facts by filter
ec2_remote_facts:
region: "{{ aws_default_region }}"
filters:
instance-state-name: running
"tag:Group": "{{ aws_default_instance_tag_group }}"
register: ec2_remote_facts
- name: Add running sandbox instances to in-memory inventory host group
add_host:
hostname: "{{ item.public_ip_address }}"
groups: running
with_items: "{{ ec2_remote_facts.instances }}"
- name: prov
hosts: running
gather_facts: true
user: ec2-user
become: true
roles:
- httpd
In this case I would like to use some of the vars from running instances for httpd role.
Thanks in advance!
I see at least 3 ways:
Define variable within add_host task:
- name: Add running sandbox instances to in-memory inventory host group
add_host:
hostname: "{{ item.public_ip_address }}"
groups: running
ec2_interfaces: "{{ item.interfaces }}"
with_items: "{{ ec2_remote_facts.instances }}"
Call ec2_facts as pre-task:
- name: prov
hosts: running
gather_facts: true
user: ec2-user
become: true
pre_tasks:
- ec2_facts:
roles:
- httpd
Use hostvars:
- name: prov
hosts: running
gather_facts: true
user: ec2-user
become: true
roles:
- role: httpd
first_instance_interfaces: "{{ hostvars['localhost'].ec2_remote_facts.instances[0].interfaces }}"