Forgive my ignorance, I am new to AWS.
When I attempt to run the following Python code sample, it is returning a failure with when calling the CreateDBInstance with an invalid security group...but the groupId "appears" to be correct...value is correct but it is enclosed in double quotes vs single quotes....but the GroupName is blank. I have confirmed that the security id is correct. What do I need to pass into the VpcSecurityGroupIds to avoid the failure. I have attempted to hardcode both the Group ID and Group Name without success
response = rds_client.create_db_instance(
DBInstanceIdentifier=rds_identifier,
DBName=db_name,
DBInstanceClass='db.t2.micro',
Engine='mariadb',
MasterUsername='masteruser',
MasterUserPassword='mymasterpassw0rd1!',
VpcSecurityGroupIds=[sg_id_number],
AllocatedStorage=20,
Tags=[
{
'Key': 'POC-Email',
'Value': admin_email
},
{
'Key': 'Purpose',
'Value': 'AWS Developer Study Guide Demo'
}
]
)
I think you need to pass the security group ID enclosed in double quotes for example
VpcSecurityGroupIds=["sg-123456"]
Refer to: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.create_db_instance
You can run the below code in python 3.7 (your rds instance will be placed in default vpc so you need the sg from default vpc)
import boto3
def lambda_handler(event, context):
rds_client = boto3.client('rds')
response = rds_client.create_db_instance(
DBInstanceIdentifier="rds-identifier",
DBName="db_name",
DBInstanceClass='db.t2.micro',
Engine='mariadb',
MasterUsername='masteruser',
MasterUserPassword='mymasterpassw0rd1!',
VpcSecurityGroupIds=['sg-***'], #sg in defautl VPC
AllocatedStorage=20,
Tags=[
{
'Key': 'POC-Email',
'Value': "admin_email"
},
{
'Key': 'Purpose',
'Value': 'AWS Developer Study Guide Demo'
}
]
)
Related
I want to register/deregister two ec2 instance which is i-26377gdhdhj and i-9876277sgshj in aws alb target group using lambda function python script.
I want to know how to add both instance id under Targets id simultaneously.Please help.
This is my current script.
import boto3
clients=boto3.client('elbv2')
response_tg = clients.register_targets(
TargetGroupArn='arn:aws:elasticloadbalancing:us-east-1:123456789123:targetgroup/target-demo/c64e6bfc00b4658f',
Targets=[
{
'Id': 'i-26377gdhdhj',
},
]
)
response_tg = clients.register_targets(
TargetGroupArn='arn:aws:elasticloadbalancing:us-east-1:123456789123:targetgroup/target-demo/c64e6bfc00b4658f',
Targets=[
{
'Id': 'i-26377gdhdhj',
},
{
'Id': 'i-9876277sgshj',
}
]
)
Since Targets is a list, you can pass them both in.
The below-mentioned code is created for exporting all the findings from the security hub to an S3 bucket using lambda functions. The filters are set for exporting only CIS-AWS foundations benchmarks. There are more than 20 accounts added as the members in security hub. The issue that I'm facing here is even though I'm using the NextToken configuration. The output doesn't have information about all the accounts. Instead, it just displays any one of the account's data randomly.
Can somebody look into the code and let me know what could be the issue, please?
import boto3
import json
from botocore.exceptions import ClientError
import time
import glob
client = boto3.client('securityhub')
s3 = boto3.resource('s3')
storedata = {}
_filter = Filters={
'GeneratorId': [
{
'Value': 'arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark',
'Comparison': 'PREFIX'
}
],
}
def lambda_handler(event, context):
response = client.get_findings(
Filters={
'GeneratorId': [
{
'Value': 'arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark',
'Comparison': 'PREFIX'
},
],
},
)
results = response["Findings"]
while "NextToken" in response:
response = client.get_findings(Filters=_filter,NextToken=response["NextToken"])
results.extend(response["Findings"])
storedata = json.dumps(response)
print(storedata)
save_file = open("/tmp/SecurityHub-Findings.json", "w")
save_file.write(storedata)
save_file.close()
for name in glob.glob("/tmp/*"):
s3.meta.client.upload_file(name, "xxxxx-security-hubfindings", name)
TooManyRequestsException error is also getting now.
The problem is in this code that paginates the security findings results:
while "NextToken" in response:
response = client.get_findings(Filters=_filter,NextToken=response["NextToken"])
results.extend(response["Findings"])
storedata = json.dumps(response)
print(storedata)
The value of storedata after the while loop has completed is the last page of security findings, rather than the aggregate of the security findings.
However, you're already aggregating the security findings in results, so you can use that:
save_file = open("/tmp/SecurityHub-Findings.json", "w")
save_file.write(json.dumps(results))
save_file.close()
I would like to get the usage cost report of each instance in my aws account form a period of time.
I'm able to get linked_account_id and service in the output but I need instance_id as well. Please help
import argparse
import boto3
import datetime
cd = boto3.client('ce', 'ap-south-1')
results = []
token = None
while True:
if token:
kwargs = {'NextPageToken': token}
else:
kwargs = {}
data = cd.get_cost_and_usage(
TimePeriod={'Start': '2019-01-01', 'End': '2019-06-30'},
Granularity='MONTHLY',
Metrics=['BlendedCost','UnblendedCost'],
GroupBy=[
{'Type': 'DIMENSION', 'Key': 'LINKED_ACCOUNT'},
{'Type': 'DIMENSION', 'Key': 'SERVICE'}
], **kwargs)
results += data['ResultsByTime']
token = data.get('NextPageToken')
if not token:
break
print('\t'.join(['Start_date', 'End_date', 'LinkedAccount', 'Service', 'blended_cost','unblended_cost', 'Unit', 'Estimated']))
for result_by_time in results:
for group in result_by_time['Groups']:
blended_cost = group['Metrics']['BlendedCost']['Amount']
unblended_cost = group['Metrics']['UnblendedCost']['Amount']
unit = group['Metrics']['UnblendedCost']['Unit']
print(result_by_time['TimePeriod']['Start'], '\t',
result_by_time['TimePeriod']['End'],'\t',
'\t'.join(group['Keys']), '\t',
blended_cost,'\t',
unblended_cost, '\t',
unit, '\t',
result_by_time['Estimated'])
As far as I know, Cost Explorer can't treat the usage per instance. There is a function Cost and Usage Reports which gives a detailed billing report by dump files. In this file, you can see the instance id.
It can also be connected to the AWS Athena. Once you did this, then directly query to the file on Athena.
Here is my presto example.
select
lineitem_resourceid,
sum(lineitem_unblendedcost) as unblended_cost,
sum(lineitem_blendedcost) as blended_cost
from
<table>
where
lineitem_productcode = 'AmazonEC2' and
product_operation like 'RunInstances%'
group by
lineitem_resourceid
The result is
lineitem_resourceid unblended_cost blended_cost
i-***************** 279.424 279.424
i-***************** 139.948 139.948
i-******** 68.198 68.198
i-***************** 3.848 3.848
i-***************** 0.013 0.013
where the resourceid containes the instance id. The amount of cost is summed for all usage in this month. For other type of product_operation, it will contains different resource ids.
You can add an individual tag to all instances (e.g. Id) and then group by that tag:
GroupBy=[
{
'Type': 'TAG',
'Key': 'Id'
},
],
I am creating a S3 bucket and configure it to serve static website via boto2.
I want to create a R53 ALIAS record set that will connect the S3 bucket name (eg something.some.com) with the same subdomain (something.some.com).
I wrote the following piece of code but it returns an error.
changes = boto.route53.record.ResourceRecordSets(R53conn, HostedZone_id)
change = changes.add_change(
action="CREATE",
name="something.some.com",
type="A",
alias_hosted_zone_id=HostedZone_id,
alias_dns_name='s3-website-us-east-1.amazonaws.com',
alias_evaluate_target_health=False
)
result = changes.commit()
and the error I get is
File "F:\Python27\lib\site-packages\boto\route53\connection.py", line 475, in change_rrsets
body)
boto.route53.exception.DNSServerError: DNSServerError: 400 Bad Request
<?xml version="1.0"?>
<ErrorResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/"><Error><Type>Sender</Type><Code>InvalidChangeBatch</Code><Message>Tried to create an alias that targets s3-website-us-east-1.amazonaws.com., type A in zone Z26JTS7LAE8OIN, but the alias target name does not lie within the target zone</Message></Error><RequestId>74e609ed-be51-11e6-99bd-69e41e07a223</RequestId></ErrorResponse>
which I cannot understand... . What am I missing?
I could not make progress with Boto2 so I solved it with Boto3 (far better).
Here is the code:
Route53_Hosted_Zone_ID = "Z3AQBSTGFYJSTF" #Notice: Fixed ID for us-east-1
Custom_HZ_id = "use_yours"
record_set_obj = clientR53.change_resource_record_sets(
HostedZoneId=Custom_HZ_id,
ChangeBatch={
'Changes': [
{
'Action': 'CREATE',
'ResourceRecordSet': {
'Name': client_bucket_name,
'Type': 'A',
'AliasTarget': {
'HostedZoneId': Route53_Hosted_Zone_ID,
'DNSName': 's3-website-us-east-1.amazonaws.com',
'EvaluateTargetHealth': False
},
}
},
]
}
)
I hope it will help someone :]
Is there any way to find unused VPCs in an AWS account?
I mean the VPCs that don't have any EC2 instances, RDS and other services associated with it.
One way is to just search with VPC ID in running instances, RDS and for other services to find out whether it is in use or not. Is there any other way or AWS CLI to find unused VPCs?
There are many resources that be included in a VPC, such as:
Amazon EC2 instances
Amazon RDS instances
Amazon Redshift instances
Amazon Elasticache instances
Elastic Load Balancers
Elastic Network Interfaces
and so on!
Rather than trying to iterate through each of these services, you could iterate through the Elastic Network Interfaces (ENIs), since everything connects to a VPC via an ENI.
Here's a command you could run using the AWS Command-Line Interface (CLI) that shows ENIs attached to a given VPC:
aws ec2 describe-network-interfaces --filters 'Name=vpc-id,Values=vpc-abcd1234' --query 'NetworkInterfaces[*].NetworkInterfaceId'
If no ENIs are returned, then you'd probably call it an unused VPC.
This might sound crazy, but I am pretty sure you can attempt to delete the VPC. It should protect from deletion any VPC that has resources running in it. Of course, you should give this a quick try before you do it. But its probably the fastest/cleanest.
Please use the following script to identify the unused Subnets for your AWS accounts in all regions:
USAGE:
Please add Account list in accounts variable as accounts=["a1","a2","a3"]
It will query and provide the list of subnets in all the regions for
respective accounts
A single CSV file will be created at end of each run for one account
Logic:
Query all the subnets across all the regions for an AWS account
Get currently available IP details for the subnet(It is provided by AWS API)
Get Subnet CIDR, calculate total IPs count, and subtract 5 counts (5
because 2 are used for Network and Broadcast and the other 3 are
reserved by AWS by default)
Then, Subtract Total IPs - Available = Currently used IP. If Used IP
= 0 , subnet can be cleaned
import boto3
import sys
import csv
import ipaddress
def describe_regions(session):
try:
aws_regions = []
ec2_client = session.client('ec2')
response_regions = ec2_client.describe_regions()['Regions']
for region in response_regions:
aws_regions.append(region['RegionName'])
return aws_regions
except Exception:
print("Unexpected error:", sys.exc_info()[0])
def describe_vpc(ec2,aws_region,writer,profile_name):
try:
response_vpc = ec2.describe_vpcs()['Vpcs']
for vpc in response_vpc:
print('=' * 50)
count = 0
filters = [
{'Name': 'vpc-id',
'Values': [vpc['VpcId']]}
]
response_subnets = ec2.describe_subnets(Filters=filters)['Subnets']
for subnets in response_subnets:
count += 1
total_count = (ipaddress.ip_network(subnets['CidrBlock']).num_addresses) - 5
Used_IP = total_count - subnets['AvailableIpAddressCount']
writer.writerow({"Account": profile_name, "VpcId": vpc['VpcId'], "VpcCidr": vpc['CidrBlock'], "Region": aws_region,
"Subnet": subnets['CidrBlock'], "SubnetId": subnets['SubnetId'], "AvailableIPv4": subnets['AvailableIpAddressCount'], "Total_Network_IP": str(total_count),
"AvailabilityZone": subnets['AvailabilityZone'],"Used_IP": str(Used_IP)})
print({"Account": profile_name, "VpcId": vpc['VpcId'], "VpcCidr": vpc['CidrBlock'], "Region": aws_region,
"Subnet": subnets['CidrBlock'], "SubnetId": subnets['SubnetId'], "AvailableIPv4": subnets['AvailableIpAddressCount'], "Total_Network_IP": str(total_count),
"AvailabilityZone": subnets['AvailabilityZone'],"Used_IP": str(Used_IP)})
print('='*50)
except Exception:
print("Unexpected error:", sys.exc_info()[0])
def main():
try:
accounts=["<Account names here as list>"]
for profile in accounts:
session = boto3.session.Session(
profile_name=profile
)
file_name = profile
print("File Name: " +file_name)
profile_name = profile
print("Profile_name: " +profile_name)
with open(file_name + ".csv", "w", newline="") as csvfile:
fieldnames = [
"Account", "VpcId",
"VpcCidr", "Region",
"Subnet", "SubnetId",
"AvailableIPv4","Total_Network_IP",
"AvailabilityZone","Used_IP"
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
aws_regions = describe_regions(session)
for aws_region in aws_regions:
ec2 = session.client('ec2', region_name=aws_region)
print("Scanning region: {}".format(aws_region))
describe_vpc(ec2,aws_region, writer, profile_name)
except Exception:
print("Unexpected error:", sys.exc_info()[0])
raise
if __name__ == "__main__":
main()
This AWS Knowledge Center post will give good help. It contains even better aws-cli commands to use. https://aws.amazon.com/premiumsupport/knowledge-center/troubleshoot-dependency-error-delete-vpc/