DynamoDB using the Serverless Python Template gives KeyError for body - amazon-web-services

The code for the lambda function is the following:
import json
import logging
import os
import time
import uuid
import boto3
dynamodb = boto3.resource('dynamodb')
def create(event, context):
data = json.loads(event['body'])
if 'text' not in data:
logging.error("Validation Failed")
raise Exception("Couldn't create the todo item.")
timestamp = str(time.time())
table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])
item = {
'id': str(uuid.uuid1()),
'name': data['text'],
'description': data['text'],
'price': data['text'],
'createdAt': timestamp,
'updatedAt': timestamp,
}
# write the todo to the database
table.put_item(Item=item)
# create a response
response = {
"statusCode": 200,
"body": json.dumps(item)
}
return response
The test using AWS' Lambda's testing feature is:
{
"name": "Masks",
"description": "A box of 50 disposable masks",
"price": "$10"
}
The log output is:
START RequestId: 5cf1c00a-dba5-4ef6-b5e7-b692d8235ffe Version: $LATEST
[ERROR] KeyError: 'body'
Traceback (most recent call last):
  File "/var/task/todos/create.py", line 12, in create
    data = json.loads(event['body'])END RequestId: 5cf1c00a-dba5-4ef6-b5e7-b692d8235ffe
Why is "body" giving me a key error? How do I fix this? The template is directly from www.severless.com, and based off of online tutorials, people have used the exact same code, albie with different values, successfully?
I've tried changing variable names and value to no avail.
sls deploy
Does successfully create the table, but I am unable to input any data into it.
Edit 1: For those of you unfamiliar with AWS' Lambda Test feature, using Postman to input the same data is leading either to a 502 Gateway Error.

Assuming that this is the correct event object:
{
"name": "Masks",
"description": "A box of 50 disposable masks",
"price": "$10"
}
your code which matches this event should be:
import json
import logging
import os
import time
import uuid
import boto3
dynamodb = boto3.resource('dynamodb')
def create(event, context):
timestamp = str(time.time())
table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])
item = {
'id': str(uuid.uuid1()),
'name': event['name'],
'description': event['description'],
'price': event['price'],
'createdAt': timestamp,
'updatedAt': timestamp,
}
# write the todo to the database
table.put_item(Item=item)
# create a response
response = {
"statusCode": 200,
"body": json.dumps(item)
}
return response

Related

Using Postman and AWS Lambdas to upload and download to S3

I have 2 lambda functions, to upload and downlaod a file from an S3 bucket. Im using Postman and configuring the POST and GET requests to have either a filename sent via json (GET Json payload: {"thefile" : "test_upload.txt"} ) and have set a form-data key of "thefile" and the value with the test file selected from my working directory on the computer.
The issue comes when sending the API requests via postman. Its giving me 'Internal Server Error'
The code for my lambdas is below:
**UPLOAD **
import json
import boto3
import os
def lambda_handler(event, context):
s3 = boto3.client('s3')
data = json.loads(event["body"])
file = data["thefile"]
print(file)
try:
s3.upload_file(file, os.environ['BUCKET_NAME'], file)
url = s3.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': os.environ['BUCKET_NAME'],
'Key': file
},
ExpiresIn=24 * 3600
)
print("Upload Successful", url)
return {
"headers": { "Content-Type": "application/json" },
"statusCode": 200,
"isBase64Encoded": False,
"body": str(url)
}
except FileNotFoundError:
print("The file was not found")
return {
"headers": { "Content-Type": "application/json" },
"statusCode": 404,
"isBase64Encoded": False,
"body": "File not found"
}
**DOWNLOAD **
import json
import boto3
import botocore
import base64
from botocore.vendored import requests
def lambda_handler(event, context):
s3 = boto3.client('s3')
data = json.loads(event["body"])
file = data["thefile"]
try:
response = s3.get_object(Bucket=BUCKET_NAME, Key=file,)
download_file = response["Body"].read()
filereturn = base64.b64encode(download_file).decode("utf-8")
return {
"headers": { "Content-Type": "application/json" },
"statusCode": 200,
"body": json.dumps(filereturn),
"isBase64Encoded": True,
"File Downloaded": "File Downloaded successfully"
}
except Exception as e:
return {
"headers": { "Content-Type": "application/json" },
"statusCode": 404,
"body": "Error: File not found!"
}
Can anyone tell me what I'm doing wrong? The Lambdas have full access to S3 in their policy, I've even switched off authorisation and made the bucket public in case it was a permissions error but still nothing. It must be something stupid im either forgetting or have mis-coded but I cant for the life of me figure it out!

Why is django TestCase not creating a test database?

I am writing a Django test inheriting from django.test.TestCase. Everywhere, in the docs, this tutorial and even in this accepted SO accepted answer is stated that when using Django TestCase, a test db will be automatically created.
Previously I have worked with DRF APITestCases and all worked well. Here I am using the very standard approach but the setUpTestData class method is using my production db.
What do I do wrong and what has to be done so a test db is spawned and used for the test?
Please see my code bellow.
from django.test import TestCase
from agregator.models import AgregatorProduct
from django.db.models import signals
def sample_product_one():
sample_product_one = {
# "id": 1,
"name": "testProdOne",
"dph": 21,
"updated": datetime.now(),
"active": True,
"updatedinstore": False,
"imagechanged": False,
"isVirtualProduct": False,
}
return sample_product_one
class TestCreateProdToCategory(TestCase):
"""
Test for correct creation of records
"""
#classmethod
#factory.django.mute_signals(signals.pre_save, signals.post_save)
def setUpTestData(cls):
AgregatorProduct.objects.create(
**sample_product_one()
)
def test_create_prod_to_cat(self):
product = AgregatorProduct.objects.get(id=1)
self.assertEqual(product.id, 1)
DB set up:
DATABASES = {
'agregator': {
'NAME': 'name',
'ENGINE': 'sql_server.pyodbc',
'HOST': 'my_ip',
'USER': 'my_user',
'PASSWORD': 'my_pwd',
'OPTIONS': {
'driver': 'ODBC Driver 17 for SQL Server',
'isolation_level': 'READ UNCOMMITTED',
},
}
}
The test results in
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\xevos\xevosadmin\agregator\tests\test_admin_actions\test_products_to_categories_admin_action.py", line 64, in test_create_prod_to_cat
product = AgregatorProduct.objects.get(id=1)
File "C:\xevos\xevosadmin\.venv\lib\site-packages\django\db\models\manager.py", line 82, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "C:\xevos\xevosadmin\.venv\lib\site-packages\django\db\models\query.py", line 397, in get
raise self.model.DoesNotExist(
agregator.models.AgregatorProduct.DoesNotExist: AgregatorProduct matching query does not exist.
----------------------------------------------------------------------
which is a result of id being autoincrementing and given there are products in the production db already, it gets id of eg 151545
(AgregatorProduct matching query does not exist. is a result of the fact that the product which used to have id=1 was deleted a long time ago in the production db.)
So the test writes to the existing database and the data persist there even after the test is finished.
To create a test database, use the setUp method inside TestCase and run it using python manage.py test
from django.test import TestCase
from myapp.models import Animal
class AnimalTestCase(TestCase):
def setUp(self):
Animal.objects.create(name="lion", sound="roar")
Animal.objects.create(name="cat", sound="meow")
def test_animals_can_speak(self):
"""Animals that can speak are correctly identified"""
lion = Animal.objects.get(name="lion")
cat = Animal.objects.get(name="cat")
self.assertEqual(lion.speak(), 'The lion says "roar"')
self.assertEqual(cat.speak(), 'The cat says "meow"')
The database will be created and deleted automatically after tests are done
https://docs.djangoproject.com/en/3.1/topics/testing/overview/#writing-tests

AWS Lambda: key error when sending a POST message

I have a very simple problem:
my lamda function works fine as long as i do not write something like "a = event["key"], but a = "test":
This is from Cloudwatch:
#message
[ERROR] KeyError: 'key1' Traceback (most recent call last):
#message
[ERROR] KeyError: 'key1' Traceback (most recent call last): File "/var/task/lambda_function.py", line 5, in lambda_handler a = event["key1]
This is what i have sent with postman (i even tried curl) in the body as raw data:
{
"key1": "value1",
"key2": "value2",
"key3": "value3"
}
My lamda function looks like this:
import json
def lambda_handler(event, context):
# TODO implement
a = event["key1"]
return {
'statusCode': 200,
'body': json.dumps(a)
}
REST Api LAMBDA will pass the request as is where as LAMBDA_PROXY will append additonal metadata on query parms, api keys, etc. so, the input request body is passed as json string as attribute body. json.loads(event['body']) will give us the actual request body.
More details on changing integration type is here
Below code can extract key1 from input json object for Lambda_Proxy.
import json
def lambda_handler(event, context):
print(event)
a = json.loads(event['body'])['key1']
return {
'statusCode': 200,
'body': json.dumps(a)
}
Fastest way for me was to use a HTTP API and use form-data with key1=test. Then i printed event["body"] and found out that my body was base64 encoded. I used the following code to make that visible:
import json
import base64
def lambda_handler(event, context):
# TODO implement
a = event["body"]
print(a)
message_bytes = base64.b64decode(a)
message = message_bytes.decode('ascii')
return {
'statusCode': 200,
'body': json.dumps(message)
}
The output was:
"----------------------------852732202793625384403314\r\nContent-Disposition: form-data; name=\"key1\"\r\n\r\ntest\r\n----------------------------852732202793625384403314--\r\n"

How get Cognito users list in JSON-format

I'm going to backup of my Cognito users with Lambda but I can't get Cognito users list in JSON-format with boto3. I do:
import boto3
import os
import json
from botocore.exceptions import ClientError
COGNITO_POOL_ID = os.getenv('POOL_ID')
S3_BUCKET = os.getenv('BACKUP_BUCKET')
ENV_NAME = os.getenv('ENV_NAME')
filename = ENV_NAME + "-cognito-backup.json"
REGION = os.getenv('REGION')
cognito = boto3.client('cognito-idp', region_name=REGION)
s3 = boto3.resource('s3')
def lambda_handler (event,context):
try:
response = (cognito.list_users(UserPoolId=COGNITO_POOL_ID,AttributesToGet=['email_verified','email']))['Users']
data = json.dumps(str(response)).encode('UTF-8')
s3object = s3.Object(S3_BUCKET, filename)
s3object.put(Body=(bytes(data)))
except ClientError as error:
print(error)
But get one string and I'm not sure that is JSON at all:
[{'Username': 'user1', 'Attributes': [{'Name': 'email_verified', 'Value': 'true'}, {'Name': 'email', 'Value': 'user1#xxxx.com'}], 'UserCreateDate': datetime.datetime(2020, 2, 10, 13, 13, 34, 457000, tzinfo=tzlocal()), 'UserLastModifiedDate': datetime.datetime(2020, 2, 10, 13, 13, 34, 457000, tzinfo=tzlocal()), 'Enabled': True, 'UserStatus': 'FORCE_CHANGE_PASSWORD'}]
I need something like this:
[
{
"Username": "user1",
"Attributes": [
{
"Name": "email_verified",
"Value": "true"
},
{
"Name": "email",
"Value": "user1#xxxx.com"
}
],
"Enabled": "true",
"UserStatus": "CONFIRMED"
}
]
Try this:
import ast
import json
print(ast.literal_eval(json.dumps(response)))
For the dict response from the SDK?
Edit: Just realized since the list_users SDK also UserCreateDate object, json.dumps will complain about the transformation due to the datatime value of the UserCreateDate key. If you get that off, this will work without the ast module -
import json
data = {'Username': 'Google_11761250', 'Attributes': [{'Name': 'email', 'Value': 'abc#gmail.com'}],'Enabled': True, 'UserStatus': 'EXTERNAL_PROVIDER'}
print((json.dumps(data)))
> {"Username": "Google_1176125910", "Attributes": [{"Name": "email", "Value": "123#gmail.com"}], "Enabled": true, "UserStatus": "EXTERNAL_PROVIDER"}
You can check the output type by using
type(output)
I guess that it can be list type, so you can convert it into JSON and prettyprint by using:
print(json.dumps(output, indent=4))

Why does the scheduler fail on the first 3 or 4 times I run it?

I have a Cloud Function running on Google Cloud with HTTP trigger.
The trigger works fine and the function as well but I am getting errors to execute it via Google Scheduler.
The function connect to Cloud SQL and insert and retrieve information from the database:
Here is the function:
from os import getenv
import requests
import pymysql
import json
from pymysql.err import OperationalError
# TODO(developer): specify SQL connection details
CONNECTION_NAME = getenv('INSTANCE_CONNECTION_NAME', 'name')
DB_USER = getenv('MYSQL_USER', 'root')
DB_PASSWORD = getenv('MYSQL_PASSWORD', 'password')
DB_NAME = getenv('MYSQL_DATABASE', 'dbName')
mysql_config = {
'user': DB_USER,
'password': DB_PASSWORD,
'db': DB_NAME,
'charset': 'utf8mb4',
'cursorclass': pymysql.cursors.DictCursor,
'autocommit': True
}
# Create SQL connection globally to enable reuse
# PyMySQL does not include support for connection pooling
mysql_conn = None
def __get_cursor():
"""
Helper function to get a cursor
PyMySQL does NOT automatically reconnect,
so we must reconnect explicitly using ping()
"""
try:
return mysql_conn.cursor()
except OperationalError:
mysql_conn.ping(reconnect=True)
return mysql_conn.cursor()
def authMonzo(request):
global mysql_conn
global endpoint
endpoint = 'https://****/oauth2/token'
# Initialize connections lazily, in case SQL access isn't needed for this
# GCF instance. Doing so minimizes the number of active SQL connections,
# which helps keep your GCF instances under SQL connection limits.
if not mysql_conn:
try:
mysql_conn = pymysql.connect(**mysql_config)
except OperationalError:
# If production settings fail, use local development ones
mysql_config['unix_socket'] = f'/cloudsql/{CONNECTION_NAME}'
mysql_conn = pymysql.connect(**mysql_config)
# Remember to close SQL resources declared while running this function.
# Keep any declared in global scope (e.g. mysql_conn) for later reuse.
with __get_cursor() as cursor:
cursor.execute("SELECT * FROM TOKENS ORDER BY ID DESC LIMIT 1")
result = cursor.fetchall()
clientSecret = result[len(result) - 1]['CLIENT_SECRET']
clientId = result[len(result) - 1]['CLIENT_ID']
refreshToken = result[len(result) - 1]['REFRESH_TOKEN']
# Change the last refresh token for a new access token
tokenRequest = requests.post(endpoint, data={'grant_type': 'refresh_token', 'client_id': clientId, 'client_secret': clientSecret, 'refresh_token': refreshToken})
tokenJson = json.loads(tokenRequest.content)
print(tokenJson)
# Assing the values to variables
accessToken = tokenJson['access_token']
refreshToken = tokenJson['refresh_token']
scope = tokenJson['scope']
clientId = tokenJson['client_id']
expiresIn = tokenJson['expires_in']
userId = tokenJson['user_id']
tokenType = tokenJson['token_type']
# Insert the new token on the table tokens
SQLToken = ("INSERT INTO TOKENS (CLIENT_SECRET, ACCESS_TOKEN, REFRESH_TOKEN, TOKEN_TYPE, USER_ID, CLIENT_ID, EXPIRES_IN, SCOPE) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)")
Values = (clientSecret, accessToken, refreshToken, tokenType, userId, clientId, expiresIn, scope)
# Insert new account information
cursor.execute(SQLToken, Values)
return str(accessToken)
After try more than 3 or 4 times I got the Success status for the execution.
Follow the Json extracted from the log:
{
insertId: "000000-dca60f91-5c0f-439f-b21c-bda99114ddce"
labels: {
execution_id: ""
}
logName: "projects/quidsave-237317/logs/cloudfunctions.googleapis.com%2Fcloud-functions"
receiveTimestamp: "2019-05-02T12:15:55.145395087Z"
resource: {
labels: {
function_name: "authMonzo"
project_id: "quidsave-237317"
region: "europe-west2"
}
type: "cloud_function"
}
severity: "ERROR"
textPayload: "Error: function crashed. Function invocation was interrupted.
"
timestamp: "2019-05-02T12:15:48.766445135Z"
}
This is the error:
severity: "ERROR"
textPayload: "Traceback (most recent call last):
File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 346, in run_http_function
result = _function_handler.invoke_user_function(flask.request)
File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 217, in invoke_user_function
return call_user_function(request_or_event)
File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 210, in call_user_function
return self._user_function(request_or_event)
File "/user_code/main.py", line 60, in authMonzo
cursor.execute("SELECT * FROM TOKENS ORDER BY ID DESC LIMIT 1")
File "/env/local/lib/python3.7/site-packages/pymysql/cursors.py", line 170, in execute
result = self._query(query)
File "/env/local/lib/python3.7/site-packages/pymysql/cursors.py", line 328, in _query
conn.query(q)
File "/env/local/lib/python3.7/site-packages/pymysql/connections.py", line 516, in query
self._execute_command(COMMAND.COM_QUERY, sql)
File "/env/local/lib/python3.7/site-packages/pymysql/connections.py", line 750, in _execute_command
raise err.InterfaceError("(0, '')")
pymysql.err.InterfaceError: (0, '')