How to get the list of Nitro system based EC2 instance type by CLI? - amazon-web-services

I know this page lists up the instance types which based on Nitro system but I would like to know the list in a dynamic way with CLI. (for example, using aws ec2 describe-instances). Is it possible to get Nitro based instance type other than parsing the static page? If so, could you tell me the how?

You'd have to write a bit of additional code to get that information. aws ec2 describe-instances will give you InstanceType property. You should use a programming language to parse the JSON, extract InstanceType and then call describe-instances like so: https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instance-types.html?highlight=nitro
From the JSON you get back, extract hypervisor. That'll give you Nitro if the instance is Nitro.
Here's a Python code that might work. I have not tested it fully but you can tweak this to get the results you want.
"""List all EC2 instances"""
import boto3
def ec2_connection():
"""Connect to AWS using API"""
region = 'us-east-2'
aws_key = 'xxx'
aws_secret = 'xxx'
session = boto3.Session(
aws_access_key_id = aws_key,
aws_secret_access_key = aws_secret
)
ec2 = session.client('ec2', region_name = region)
return ec2
def get_reservations(ec2):
"""Get a list of instances as a dictionary"""
response = ec2.describe_instances()
return response['Reservations']
def process_instances(reservations, ec2):
"""Print a colorful list of IPs and instances"""
if len(reservations) == 0:
print('No instance found. Quitting')
return
for reservation in reservations:
for instance in reservation['Instances']:
# get friendly name of the server
# only try this for mysql1.local server
friendly_name = get_friendly_name(instance)
if friendly_name.lower() != 'mysql1.local':
continue
# get the hypervisor based on the instance type
instance_type = get_instance_info(instance['InstanceType'], ec2)
# print findings
print(f'{friendly_name} // {instance["InstanceType"]} is {instance_type}')
break
def get_instance_info(instance_type, ec2):
"""Get hypervisor from the instance type"""
response = ec2.describe_instance_types(
InstanceTypes=[instance_type]
)
return response['InstanceTypes'][0]['Hypervisor']
def get_friendly_name(instance):
"""Get friendly name of the instance"""
tags = instance['Tags']
for tag in tags:
if tag['Key'] == 'Name':
return tag['Value']
return 'Unknown'
def run():
"""Main method to call"""
ec2 = ec2_connection()
reservations = get_reservations(ec2)
process_instances(reservations, ec2)
if __name__ == '__main__':
run()
print('Done')

In the above answer , the statement "From the JSON you get back, extract hypervisor. That'll give you Nitro if the instance is Nitro " is not longer accurate.
As per the latest AWS documentation,
hypervisor - The hypervisor type of the instance (ovm | xen ). The value xen is used for both Xen and Nitro hypervisors.

Cleaned up, verified working code below:
# Get all instance types that run on Nitro hypervisor
import boto3
def get_nitro_instance_types():
"""Get all instance types that run on Nitro hypervisor"""
ec2 = boto3.client('ec2', region_name = 'us-east-1')
response = ec2.describe_instance_types(
Filters=[
{
'Name': 'hypervisor',
'Values': [
'nitro',
]
},
],
)
instance_types = []
for instance_type in response['InstanceTypes']:
instance_types.append(instance_type['InstanceType'])
return instance_types
get_nitro_instance_types()
Example output as of 12/06/2022 below:
['r5dn.8xlarge', 'x2iedn.xlarge', 'r6id.2xlarge', 'r6gd.medium',
'm5zn.2xlarge', 'r6idn.16xlarge', 'c6a.48xlarge', 'm5a.16xlarge',
'im4gn.2xlarge', 'c6gn.16xlarge', 'c6in.24xlarge', 'r5ad.24xlarge',
'r6i.xlarge', 'c6i.32xlarge', 'x2iedn.2xlarge', 'r6id.xlarge',
'i3en.24xlarge', 'i3en.12xlarge', 'm5d.8xlarge', 'c6i.8xlarge',
'r6g.large', 'm6gd.4xlarge', 'r6a.2xlarge', 'x2iezn.4xlarge',
'c6i.large', 'r6in.24xlarge', 'm6gd.xlarge', 'm5dn.2xlarge',
'd3en.2xlarge', 'c6id.8xlarge', 'm6a.large', 'is4gen.xlarge',
'r6g.8xlarge', 'm6idn.large', 'm6a.2xlarge', 'c6i.4xlarge',
'i4i.16xlarge', 'm5zn.6xlarge', 'm5.8xlarge', 'm6id.xlarge',
'm5n.16xlarge', 'c6g.16xlarge', 'r5n.12xlarge', 't4g.nano',
'm5ad.12xlarge', 'r6in.12xlarge', 'm6idn.12xlarge', 'g5.2xlarge',
'trn1.32xlarge', 'x2gd.8xlarge', 'is4gen.4xlarge', 'r6gd.xlarge',
'r5a.xlarge', 'r5a.2xlarge', 'c5ad.24xlarge', 'r6a.xlarge',
'r6g.medium', 'm6id.12xlarge', 'r6idn.2xlarge', 'c5n.2xlarge',
'g5.4xlarge', 'm5d.xlarge', 'i3en.3xlarge', 'r5.24xlarge',
'r6gd.2xlarge', 'c5d.large', 'm6gd.12xlarge', 'm6id.2xlarge',
'm6i.large', 'z1d.2xlarge', 'm5a.4xlarge', 'm5a.2xlarge',
'c6in.xlarge', 'r6id.16xlarge', 'c7g.8xlarge', 'm5dn.12xlarge',
'm6gd.medium', 'im4gn.8xlarge', 'm5dn.large', 'c5ad.4xlarge',
'r6g.16xlarge', 'c6a.24xlarge', 'c6a.16xlarge']

"""List all EC2 instances"""
import boto3
def ec2_connection():
"""Connect to AWS using API"""
region = 'us-east-2'
aws_key = 'xxx'
aws_secret = 'xxx'
session = boto3.Session(
aws_access_key_id = aws_key,
aws_secret_access_key = aws_secret
)
ec2 = session.client('ec2', region_name = region)
return ec2
def get_reservations(ec2):
"""Get a list of instances as a dictionary"""
response = ec2.describe_instances()
return response['Reservations']
def process_instances(reservations, ec2):
"""Print a colorful list of IPs and instances"""
if len(reservations) == 0:
print('No instance found. Quitting')
return
for reservation in reservations:
for instance in reservation['Instances']:
# get friendly name of the server
# only try this for mysql1.local server
friendly_name = get_friendly_name(instance)
if friendly_name.lower() != 'mysql1.local':
continue
# get the hypervisor based on the instance type
instance_type = get_instance_info(instance['InstanceType'], ec2)
# print findings
print(f'{friendly_name} // {instance["InstanceType"]} is {instance_type}')
break
def get_instance_info(instance_type, ec2):
"""Get hypervisor from the instance type"""
response = ec2.describe_instance_types(
InstanceTypes=[instance_type]
)
return response['InstanceTypes'][0]['Hypervisor']
def get_friendly_name(instance):
"""Get friendly name of the instance"""
tags = instance['Tags']
for tag in tags:
if tag['Key'] == 'Name':
return tag['Value']
return 'Unknown'
def run():
"""Main method to call"""
ec2 = ec2_connection()
reservations = get_reservations(ec2)
process_instances(reservations, ec2)
if name == 'main':
run()
print('Done')

Related

Lambda function for creating an EC2 instance

I ran my code to create an EC2 instance but I keep getting this error.
"errorMessage": "'message'",
"errorType": "KeyError",
The full code
import boto3
import os
AMI = os.environ['AMI']
INSTANCE_TYPE = os.environ['INSTANCE_TYPE']
KEY_NAME = os.environ['KEY_NAME']
SUBNET_ID = os.environ['SUBNET_ID']
REGION = os.environ['AWS_REGION']
ec2 = boto3.client('ec2', region_name=REGION)
def lambda_handler(event, context):
message = event['message']
instance = ec2.run_instances(
ImageId=AMI,
InstanceType=INSTANCE_TYPE,
KeyName=KEY_NAME,
SubnetId=SUBNET_ID,
MaxCount=1,
MinCount=1,
InstanceInitiatedShutdownBehavior='terminate',
UserData=init_script
)
instance_id = instance['Instances'][0]['InstanceId']
print instance_id
return instance_id
What could be triggering this key error?
As an environmental variable, am I supposed to use the full key name including its file type.
Ex: "key.pem" instead of "key"
The error indicates that you have an error on this line:
message = event['message']
Most likely the lambda event does not have the 'message' key you are expecting. You should print out the event to CloudWatch Logs and take a look.
I ran the rest of your code and it created an EC2 instance successfully.

AWS Lambda create EC2 and associate EIP

I am trying to deploy an EC2 instance and associate an EIP to it, but I am getting and error when trying to associate the EIP because the instance is not running. This is my code:
import boto3
from botocore.exceptions import ClientError
AMI = 'ami-0bf84....'
INSTANCE_TYPE = 't2.micro'
KEY_NAME = 'EC2company'
SUBNET_ID = 'subnet-065....'
ec2 = boto3.client('ec2')
def lambda_handler(event, context):
instance = ec2.run_instances(
ImageId=AMI,
InstanceType=INSTANCE_TYPE,
KeyName=KEY_NAME,
SubnetId=SUBNET_ID,
MaxCount=1,
MinCount=1
)
waiter = ec2.get_waiter('instance_running')
try:
response = ec2.associate_address(
AllocationId='eipalloc-0bc.....',
InstanceId=instance['Instances'][0]['InstanceId'],
)
print(response)
except ClientError as e:
print(e)
I suppose that the issue is related to be applying the waiter in the wrong way, and not sure how i should do it.
As per EC2 waiters, you can create a waiter with:
waiter = client.get_waiter('instance_running')
You then activate the waiter with:
waiter.wait(InstanceIds=['i-xxx']
It polls EC2.Client.describe_instances() every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.

How to get list of active connections on RDS using boto3

I can see following information regarding the RDS Instance
I want to know how can I get value of current activity using boto3. Current value as shown in below screenshot is 0.
I tried
response=client.describe_db_instances()
But it didnt returned the value of active connections.
You can get that data from CloudWatch. RDS sends any state information there and just render a few metrics in RDS dashboard.
#ivan thanks for the directions.
I created following python script to get information about instances with 0 connections and delete them after that. I hope it helps someone.
import datetime
import boto3
class RDSTermination:
#Strandard constructor for RDSTermination class
def __init__(self, cloudwatch_object, rds_object):
self.cloudwatch_object = cloudwatch_object
self.rds_object = rds_object
#Getter and setters for variables.
#property
def cloudwatch_object(self):
return self._cloudwatch_object
#cloudwatch_object.setter
def cloudwatch_object(self, cloudwatch_object):
self._cloudwatch_object = cloudwatch_object
#property
def rds_object(self):
return self._rds_object
#rds_object.setter
def rds_object(self, rds_object):
self._rds_object = rds_object
# Fetch connections details for all the RDS instances.Filter the list and return
# only those instances which are having 0 connections at the time of this script run
def _get_instance_connection_info(self):
rds_instances_connection_details = {}
response = self.cloudwatch_object.get_metric_data(
MetricDataQueries=[
{
'Id': 'fetching_data_for_something',
'Expression': "SEARCH('{AWS/RDS,DBInstanceIdentifier} MetricName=\"DatabaseConnections\"', 'Average', 300)",
'ReturnData': True
},
],
EndTime=datetime.datetime.utcnow(),
StartTime=datetime.datetime.utcnow() - datetime.timedelta(hours=2),
ScanBy='TimestampDescending',
MaxDatapoints=123
)
# response is of type dictionary with MetricDataResults as key
for instance_info in response['MetricDataResults']:
if len(instance_info['Timestamps']) > 0:
rds_instances_connection_details[instance_info['Label']] = instance_info['Values'][-1]
return rds_instances_connection_details
# Fetches list of all instances and there status.
def _fetch_all_rds_instance_state(self):
all_rds_instance_state = {}
response = self.rds_object.describe_db_instances()
instance_details = response['DBInstances']
for instance in instance_details:
all_rds_instance_state[instance['DBInstanceIdentifier']] = instance['DBInstanceStatus']
return all_rds_instance_state
# We further refine the list and remove instances which are stopped. We will work on
# Instances with Available state only
def _get_instance_allowed_for_deletion(self):
instances = self._get_instance_connection_info()
all_instance_state = self._fetch_all_rds_instance_state()
instances_to_delete = []
try:
for instance_name in instances.keys():
if instances[instance_name] == 0.0 and all_instance_state[instance_name] == 'available':
instances_to_delete.append(instance_name)
except BaseException:
print("Check if instance connection_info is empty")
return instances_to_delete
# Function to delete the instances reported in final list.It deletes instances with 0 connection
# and status as available
def terminate_rds_instances(self, dry_run=True):
if dry_run:
message = 'DRY-RUN'
else:
message = 'DELETE'
rdsnames = self._get_instance_allowed_for_deletion()
if len(rdsnames) > 0:
for rdsname in rdsnames:
try:
response = self.rds_object.describe_db_instances(
DBInstanceIdentifier=rdsname
)
termination_protection = response['DBInstances'][0]['DeletionProtection']
except BaseException as e:
print('[ERROR]: reading details' + str(e))
exit(1)
if termination_protection is True:
try:
print("Removing delete termination for {}".format(rdsname))
if not dry_run:
response = self.rds_object.modify_db_instance(
DBInstanceIdentifier=rdsname,
DeletionProtection=False
)
except BaseException as e:
print(
"[ERROR]: Could not modify db termination protection "
"due to following error:\n " + str(
e))
exit(1)
try:
if not dry_run:
print("i got executed")
response = self.rds_object.delete_db_instance(
DBInstanceIdentifier=rdsname,
SkipFinalSnapshot=True,
)
print('[{}]: RDS instance {} deleted'.format(message, rdsname))
except BaseException:
print("[ERROR]: {} rds instance not found".format(rdsname))
else:
print("No RDS instance marked for deletion")
if __name__ == "__main__":
cloud_watch_object = boto3.client('cloudwatch', region_name='us-east-1')
rds_object = boto3.client('rds', region_name='us-east-1')
rds_termination_object = RDSTermination(cloud_watch_object, rds_object)
rds_termination_object.terminate_rds_instances(dry_run=True)

How to get boto3 to display _all_ RDS instances?

Trying to get all RDS instances with boto3 - does not return all RDS instances.
When I look at my RDS instances in Oregon (us-west-2), I see the following:
However, if I run the below Python3 script, I only get one result:
$ python3 ./stackoverflow.py
RDS instances in Oregon
------------------------------
aurora-5-7-yasmin.cazdggrmkpt1.us-west-2.rds.amazonaws.com qa test db.t2.small aurora-5-7-yasmin
$
Can you suggest a way to get boto3 to display all RDS instances?
$ cat ./stackoverflow.py
import collections
import boto3
import datetime
import pygsheets
REGIONS = ('us-west-2',)
REGIONS_H = ('Oregon',)
currentDT = str(datetime.datetime.now())
def create_spreadsheet(outh_file, spreadsheet_name = "AWS usage"):
client = pygsheets.authorize(outh_file=outh_file, outh_nonlocal=True)
client.list_ssheets(parent_id=None)
spread_sheet = client.create(spreadsheet_name)
return spread_sheet
def rds_worksheet_creation(spread_sheet):
for i in range(len(REGIONS)):
region = REGIONS[i]
region_h = REGIONS_H[i]
print()
print("{} instances in {}".format("RDS", region_h))
print("------------------------------")
client = boto3.client('rds', region_name=region)
db_instances = client.describe_db_instances()
for i in range(len(db_instances)):
j = i - 1
try:
DBName = db_instances['DBInstances'][j]['DBName']
MasterUsername = db_instances['DBInstances'][0]['MasterUsername']
DBInstanceClass = db_instances['DBInstances'][0]['DBInstanceClass']
DBInstanceIdentifier = db_instances['DBInstances'][0]['DBInstanceIdentifier']
Endpoint = db_instances['DBInstances'][0]['Endpoint']
Address = db_instances['DBInstances'][0]['Endpoint']['Address']
print("{} {} {} {} {}".format(Address, MasterUsername, DBName, DBInstanceClass,
DBInstanceIdentifier))
except KeyError:
continue
if __name__ == "__main__":
spread_sheet = create_spreadsheet(spreadsheet_name = "AWS usage", outh_file = '../client_secret.json')
spread_sheet.link(syncToCloud=False)
rds_worksheet_creation(spread_sheet)
$ cat ../client_secret.json
{"installed":{"client_id":"362799999999-uml0m2XX4v999999mr2s03XX9g8l9odi.apps.googleusercontent.com","project_id":"amiable-shuttle-198516","auth_uri":"https://accounts.google.com/o/oauth2/auth","token_uri":"https://accounts.google.com/o/oauth2/token","auth_provider_x509_cert_url":"https://www.googleapis.com/oauth2/v1/certs","client_secret":"XXXXxQH434Qg-xxxx99_n0vW","redirect_uris":["urn:ietf:wg:oauth:2.0:oob","http://localhost"]}}
$
Edit 1:
Following Michael's comment, I changed the script to the following, but even though one more related line appeared, most of the RDS instances are still not returned:
$ python3 ./stackoverflow.py
RDS instances in Oregon
------------------------------
aurora-5-7-yasmin.cazdggrmkpt1.us-west-2.rds.amazonaws.com qa +++ DBName gave KeyError +++ db.t2.small aurora-5-7-yasmin
aurora-5-7-yasmin.cazdggrmkpt1.us-west-2.rds.amazonaws.com qa test db.t2.small aurora-5-7-yasmin
$
$ cat ./stackoverflow.py
import collections
import boto3
import datetime
import pygsheets
REGIONS = ('us-west-2',)
REGIONS_H = ('Oregon',)
currentDT = str(datetime.datetime.now())
def create_spreadsheet(outh_file, spreadsheet_name = "AWS usage"):
client = pygsheets.authorize(outh_file=outh_file, outh_nonlocal=True)
client.list_ssheets(parent_id=None)
spread_sheet = client.create(spreadsheet_name)
return spread_sheet
def rds_worksheet_creation(spread_sheet):
for i in range(len(REGIONS)):
region = REGIONS[i]
region_h = REGIONS_H[i]
print()
print("{} instances in {}".format("RDS", region_h))
print("------------------------------")
client = boto3.client('rds', region_name=region)
db_instances = client.describe_db_instances()
for i in range(len(db_instances)):
j = i - 1
try:
DBName = db_instances['DBInstances'][j]['DBName']
except KeyError:
DBName = "+++ DBName gave KeyError +++"
MasterUsername = db_instances['DBInstances'][0]['MasterUsername']
DBInstanceClass = db_instances['DBInstances'][0]['DBInstanceClass']
DBInstanceIdentifier = db_instances['DBInstances'][0]['DBInstanceIdentifier']
Endpoint = db_instances['DBInstances'][0]['Endpoint']
Address = db_instances['DBInstances'][0]['Endpoint']['Address']
print("{} {} {} {} {}".format(Address, MasterUsername, DBName, DBInstanceClass,
DBInstanceIdentifier))
if __name__ == "__main__":
spread_sheet = create_spreadsheet(spreadsheet_name = "AWS usage", outh_file = '../client_secret.json')
spread_sheet.link(syncToCloud=False)
rds_worksheet_creation(spread_sheet)
You have an error in your original code but if you want this code to scale to a large number of instances (it is unlikely you'll need this) then you'll want to use something like the following:
import boto3
available_regions = boto3.Session().get_available_regions('rds')
for region in available_regions:
rds = boto3.client('rds', region_name=region)
paginator = rds.get_paginator('describe_db_instances').paginate()
for page in paginator:
for dbinstance in page['DBInstances']:
print("{DBInstanceClass}".format(**dbinstance))
You can get rid of the paginator and just use the first loop if you know each region will have fewer than 100s of instances:
for region in available_regions:
rds = boto3.client('rds', region_name=region)
for dbinstance in rds.describe_db_instances():
print("{DBInstanceClass}".format(**dbinstance))
Additionally you can provide a simple
dbinstance.get('DBName', 'No Name Set')
instead of excepting around the KeyError.
Your for loop range is getting the value of 2 since db_instancesis dict type.
Instead of
for i in range(len(db_instances)):
It should be
for i in range(len(db_instances['DBInstances'])):
Which gives list type and correct length to iterate the loop.
This Code will list all RDS instances present in the account
Try this 100 % working code
#!/usr/bin/env python
import boto3
client = boto3.client('rds')
response = client.describe_db_instances()
for i in response['DBInstances']:
db_name = i['DBName']
db_instance_name = i['DBInstanceIdentifier']
db_type = i['DBInstanceClass']
db_storage = i['AllocatedStorage']
db_engine = i['Engine']
print db_instance_name,db_type,db_storage,db_engine
FYI, the more Pythonic way to do loops in this case would be:
for instance in db_instances['DBInstances']:
MasterUsername = instance['MasterUsername']
DBInstanceClass = instance['DBInstanceClass']
etc.
This avoids the need for i-type iterators.

How to print instance name based on instance id using boto 3

How can I retrieve an instance name based on InstanceId using boto3?
connection = boto3.resource('ec2')
instances = connection.instances.filter(InstanceIds=[instanceid])
for instance in instances:
instance_name=???
You need to get the Name tag:
def get_name(instance):
for tag in instance.tags:
if tag['Key'] == 'Name':
return tag['Value']