I was following the guide posted here on youtube https://www.youtube.com/watch?v=jgiZ9QUYqyM and is defiantly what I want. I posted the code that I had for mine and the image of what everything looks like in my AWS.
I have a dynamodb table and linked it to my s3 bucket with a trigger. That trigger is giving me some error message which is posted above. "Decimal('1') is not JSON serializable". Though I was testing it with the helloworld.
This is the code :
import boto3
import json
import os
s3 = boto3.client('s3')
ddb = boto3.resource('dynamodb')
table = ddb.Table('test_table')
def lambda_handler(event, context):
response = table.scan()
body = json.dumps(response['Items'])
response = s3.put_object(Bucket='s3-testing',
Key = 's3-testing.json' ,
Body=body,
ContentType='application/json')
Can someone point me in the right direction? These are the snippets I got
https://i.stack.imgur.com/I0jAn.png
https://i.stack.imgur.com/2hMc9.png
This is the execution log:
Response:
{
"stackTrace": [
[
"/var/task/lambda_function.py",
20,
"lambda_handler",
"body = json.dumps(response['Items'])"
],
[
"/usr/lib64/python2.7/json/__init__.py",
244,
"dumps",
"return _default_encoder.encode(obj)"
],
[
"/usr/lib64/python2.7/json/encoder.py",
207,
"encode",
"chunks = self.iterencode(o, _one_shot=True)"
],
[
"/usr/lib64/python2.7/json/encoder.py",
270,
"iterencode",
"return _iterencode(o, 0)"
],
[
"/usr/lib64/python2.7/json/encoder.py",
184,
"default",
"raise TypeError(repr(o) + \" is not JSON serializable\")"
]
],
"errorType": "TypeError",
"errorMessage": "Decimal('1') is not JSON serializable"
}
Function log:
START RequestId: 31719509-94c7-11e8-a0d4-a9b76b7b212c Version: $LATEST
Decimal('1') is not JSON serializable: TypeError
Traceback (most recent call last):
File "/var/task/lambda_function.py", line 20, in lambda_handler
body = json.dumps(response['Items'])
File "/usr/lib64/python2.7/json/__init__.py", line 244, in dumps
return _default_encoder.encode(obj)
File "/usr/lib64/python2.7/json/encoder.py", line 207, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/lib64/python2.7/json/encoder.py", line 270, in iterencode
return _iterencode(o, 0)
File "/usr/lib64/python2.7/json/encoder.py", line 184, in default
raise TypeError(repr(o) + " is not JSON serializable")
TypeError: Decimal('1') is not JSON serializable
Decimal object is not json serializable. Considering casting the Decimal into a float using a helper function. (json.dumps() takes a default function)
import boto3
import json
import os
from decimal import Decimal
s3 = boto3.client('s3')
ddb = boto3.resource('dynamodb')
table = ddb.Table('test_table')
def lambda_handler(event, context):
response = table.scan()
body = json.dumps(response['Items'], default=handle_decimal_type)
response = s3.put_object(Bucket='s3-testing',
Key = 's3-testing.json' ,
Body=body,
ContentType='application/json')
def handle_decimal_type(obj):
if isinstance(obj, Decimal):
if float(obj).is_integer():
return int(obj)
else:
return float(obj)
raise TypeError
The problem is that the Dynamo Python library is converting numeric values to Decimal objects, but those aren't JSON serializable by default, so json.dumps blows up. You will need to provide json.dumps with a converter for Decimal objects.
See Python JSON serialize a Decimal object
Related
I am trying to send an email using Amazon SES, AWS S3, and AWS Lambda together. I have been hitting an error like this for awhile now and I am not completely sure what to do here. I have the stack trace from the error below.
Edit: I have a fully verified Amazon SES domain and I am receiving emails to trigger the Lambda function. I am also able to successfully send emails using the built-in testing features, just not using this function.
{
"errorMessage": "'list' object has no attribute 'encode'",
"errorType": "AttributeError",
"stackTrace": [
" File \"/var/task/lambda_function.py\", line 222, in lambda_handler\n message = create_message(file_dict, header_from, header_to)\n",
" File \"/var/task/lambda_function.py\", line 175, in create_message\n \"Data\": msg.as_string()\n",
" File \"/var/lang/lib/python3.7/email/message.py\", line 158, in as_string\n g.flatten(self, unixfrom=unixfrom)\n",
" File \"/var/lang/lib/python3.7/email/generator.py\", line 116, in flatten\n self._write(msg)\n",
" File \"/var/lang/lib/python3.7/email/generator.py\", line 195, in _write\n self._write_headers(msg)\n",
" File \"/var/lang/lib/python3.7/email/generator.py\", line 222, in _write_headers\n self.write(self.policy.fold(h, v))\n",
" File \"/var/lang/lib/python3.7/email/_policybase.py\", line 326, in fold\n return self._fold(name, value, sanitize=True)\n",
" File \"/var/lang/lib/python3.7/email/_policybase.py\", line 369, in _fold\n parts.append(h.encode(linesep=self.linesep, maxlinelen=maxlinelen))\n"
]
}
Additionally, here is the relevant code. The start of the code is within a create_message() method
import os
import boto3
import email
import re
from botocore.exceptions import ClientError
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
. . .
# Create a MIME container.
msg = MIMEMultipart()
# Create a MIME text part.
text_part = MIMEText(body_text, _subtype="html")
# Attach the text part to the MIME message.
msg.attach(text_part)
# Add subject, from and to lines.
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = recipient
# Create a new MIME object.
att = MIMEApplication(file_dict["file"], filename)
att.add_header("Content-Disposition", 'attachment', filename=filename)
# Attach the file object to the message.
msg.attach(att)
message = {
"Source": sender,
"Destinations": recipient,
"Data": msg.as_string() # The error occurs here
}
return message
def send_email(message):
aws_region = os.environ['Region']
# Create a new SES client.
client_ses = boto3.client('ses', region)
# Send the email.
try:
#Provide the contents of the email.
response = client_ses.send_raw_email(
Source=message['Source'],
Destinations=[
message['Destinations']
],
RawMessage={
'Data':message['Data']
}
)
If you have any insight as far as what should be done, that would be greatly appreciated. I've looked at similar questions but they did resolve my issue. Thanks for your help!
I have created a cloud formation template for cross s3-bucket object replication using lambda which is written in python, Below is the lambda code.
import json
import logging
import signal
import boto3
from urllib.request import *
s3 = boto3.resource('s3')
s3client = boto3.client('s3')
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
def lambda_handler(event, context):
sourcebucketname = event['ResourceProperties']['SourceBucketName']
destinationbucketname = event['ResourceProperties']['DestinationBucketName']
accountid = boto3.client('sts').get_caller_identity()['Account']
try:
LOGGER.info('REQUEST RECEIVED:\n %s', event)
LOGGER.info('REQUEST RECEIVED:\n %s', context)
if event['RequestType'] == 'Create':
LOGGER.info('CREATE!')
response = s3client.list_objects(Bucket=sourcebucketname)
print(response)
for record in response['Contents']:
key = record['Key']
dest_key = key
copy_source = {'Bucket': sourcebucketname, 'Key': key}
destbucket = s3.Bucket(destinationbucketname)
response = destbucket.copy(copy_source, dest_key, ExtraArgs={'ACL':'bucket-owner-full-control'})
print(response)
print('{} transferred to destination bucket'.format(key))
send_response(event, context, "SUCCESS",
{"Message": "Resource creation successful!"})
elif event['RequestType'] == 'Update':
LOGGER.info('UPDATE!')
send_response(event, context, "SUCCESS",
{"Message": "Resource update successful!"})
elif event['RequestType'] == 'Delete':
LOGGER.info('DELETE!')
send_response(event, context, "SUCCESS",
{"Message": "Resource deletion successful!"})
else:
LOGGER.info('FAILED!')
send_response(event, context, "FAILED",
{"Message": "Unexpected event received from CloudFormation"})
except: #pylint: disable=W0702
LOGGER.info('FAILED!')
send_response(event, context, "FAILED", {
"Message": "Exception during processing"})
def send_response(event, context, response_status, response_data):
'''Send a resource manipulation status response to CloudFormation'''
response_body = json.dumps({
"Status": response_status,
"Reason": "See the details in CloudWatch Log Stream: " + context.log_stream_name,
"PhysicalResourceId": context.log_stream_name,
"StackId": event['StackId'],
"RequestId": event['RequestId'],
"LogicalResourceId": event['LogicalResourceId'],
"Data": response_data
})
response_bdy=response_body.encode('utf-8')
LOGGER.info('ResponseURL: %s', event['ResponseURL'])
LOGGER.info('ResponseBody: %s', response_body)
opener = build_opener(HTTPHandler)
request = Request(event['ResponseURL'], data=response_bdy)
request.add_header('Content-Type', '')
request.add_header('Content-Length', len(response_body))
request.get_method = lambda: 'PUT'
response = opener.open(request)
LOGGER.info("Status code: %s", response.getcode())
LOGGER.info("Status message: %s", response.msg)
the s3 objects are getting copied to the destination bucket successfully, but the lambda function is failing to send event responses back to cloud formation. below is the error Im getting.
[ERROR] TypeError: POST data should be bytes, an iterable of bytes, or a file object. It cannot be of type str.
Traceback (most recent call last):
File "/var/task/index.py", line 47, in lambda_handler
send_response(event, context, "FAILED", {
File "/var/task/index.py", line 69, in send_response
response = opener.open(request)
File "/var/lang/lib/python3.9/urllib/request.py", line 514, in open
req = meth(req)
File "/var/lang/lib/python3.9/urllib/request.py", line 1277, in do_request_
raise TypeError(msg)
[ERROR] TypeError: POST data should be bytes, an iterable of bytes, or a file object. It cannot be of type str. Traceback (most recent call last): File "/var/task/index.py", line 47, in lambda_handler send_response(event, context, "FAILED", { File "/var/task/index.py", line 69, in send_response response = opener.open(request) File "/var/lang/lib/python3.9/urllib/request.py", line 514, in open req = meth(req) File "/var/lang/lib/python3.9/urllib/request.py", line 1277, in do_request_ raise TypeError(msg)
send_response function getting failed with the above error, please help where it's going wrong.
The error message is telling you what's wrong.
[ERROR] TypeError: POST data should be bytes, an iterable of bytes, or a file object. It cannot be of type str.
In your code, response_body is a str. You can convert it to bytes by doing response_body.encode('utf-8').
I am Adding a new User List using Google AdWords API userListService,I have passed all the required inputs and the adword client instance got created successfully.
But while calling the mutate method it is throwing an attribute error
"AttributeError: class HttpTransport has no attribute '_HttpTransport__get_request_url'"
Please find the StackTrace:
Traceback (most recent call last):
File "C:\Program Files (x86)\JetBrains\PyCharm Community Edition 5\helpers\pydev\pydevd.py", line 2411, in
globals = debugger.run(setup['file'], None, None, is_module)
File "C:\Program Files (x86)\JetBrains\PyCharm Community Edition 5\helpers\pydev\pydevd.py", line 1802, in run
launch(file, globals, locals) # execute the script
File "D:/Python Studies/SVN Code/Work In Progress/Source Code/doubleclick-Dmp-Integration/DmpIntegrationApplication/dmpintegration/dmpex/adwords.py", line 93, in
main(adwords_client)
File "D:/Python Studies/SVN Code/Work In Progress/Source Code/doubleclick-Dmp-Integration/DmpIntegrationApplication/dmpintegration/dmpex/adwords.py", line 33, in main
result = user_list_service.mutate(operations)
File "C:\Python27\lib\site-packages\googleads\common.py", line 720, in MakeSoapRequest
*[_PackForSuds(arg, self.suds_client.factory) for arg in args])
File "C:\Python27\lib\site-packages\suds\client.py", line 542, in call
return client.invoke(args, kwargs)
File "C:\Python27\lib\site-packages\suds\client.py", line 602, in invoke
result = self.send(soapenv)
File "C:\Python27\lib\site-packages\suds\client.py", line 637, in send
reply = transport.send(request)
File "C:\Python27\lib\site-packages\googleads\util.py", line 92, in PatchedHttpTransportSend
url = http_transport._HttpTransport__get_request_url(request)
AttributeError: class HttpTransport has no attribute '_HttpTransport__get_request_url'
Here is my code:
import uuid
from googleads import adwords
from googleads import oauth2
def main(client):
# Initialize appropriate service.
user_list_service = client.GetService(
'AdwordsUserListService', version='v201702')
# Construct operations and add a user list.
operations = [
{
'operator': 'ADD',
'operand': {
'xsi_type': 'BasicUserList',
'name': 'Mars cruise customers #%s' % uuid.uuid4(),
'description': 'A list of mars cruise customers in the last
year',
'membershipLifeSpan': '365',
'conversionTypes': [
{
'name': ('Mars cruise customers #%s'
% uuid.uuid4())
}
],
# Optional field.
'status': 'OPEN',
}
}
]
result = user_list_service.mutate(operations)
if __name__ == '__main__':
CLIENT_ID = 'xxx'
CLIENT_SECRET = 'xxx'
REFRESH_TOKEN = 'xxx'
DEVELOPER_TOKEN = 'xxx'
USER_AGENT = 'xxx'
CLIENT_CUSTOMER_ID = 'xxx'
oauth2_client = oauth2.GoogleRefreshTokenClient(CLIENT_ID, CLIENT_SECRET, REFRESH_TOKEN)
adwords_client = adwords.AdWordsClient(DEVELOPER_TOKEN, oauth2_client, USER_AGENT,client_customer_id=CLIENT_CUSTOMER_ID)
main(adwords_client)
I can't use the download reports feature with the Python client. I'm using with adwords-15.9.0 with v201306. It always fails with:
$ ./classifications.py
Traceback (most recent call last):
File "./classifications.py", line 48, in <module>
download_report(client, client_id)
File "./classifications.py", line 32, in download_report
file_path = report_downloader.DownloadReportWithAwql(report_query, 'CSV', file_path=path)
File "/Users/mike/.virtualenvs/xxx/lib/python2.7/site-packages/adspygoogle/adwords/ReportDownloader.py", line 127, in DownloadReportWithAwql
fileobj) or file_path
File "/Users/mike/.virtualenvs/xxx/lib/python2.7/site-packages/adspygoogle/adwords/ReportDownloader.py", line 169, in __DownloadAdHocReportWithAwql
return self.__DownloadReport(payload, return_micros, fileobj)
File "/Users/mike/.virtualenvs/xxx/lib/python2.7/site-packages/adspygoogle/adwords/ReportDownloader.py", line 184, in __DownloadReport
headers = self.__GenerateHeaders(return_micros)
File "/Users/mike/.virtualenvs/xxx/lib/python2.7/site-packages/adspygoogle/adwords/ReportDownloader.py", line 282, in __GenerateHeaders
self._headers['oauth2credentials'].apply(headers)
File "/Users/mike/.virtualenvs/xxx/lib/python2.7/site-packages/oauth2client/client.py", line 533, in apply
headers['Authorization'] = 'Bearer ' + self.access_token
TypeError: cannot concatenate 'str' and 'NoneType' objects
Example scripts get_report_fields.py and get_campaign_stats.py work fine but download_criteria_report.py and download_criteria_report_with_awql.py fail with the same error.
Any ideas?
My code:
#!/usr/bin/env python
import csv
import os
import MySQLdb as mdb
from adspygoogle.adwords.AdWordsClient import AdWordsClient
MATCH_TYPES = {
'b': 'Broad',
'e': 'Exact',
'p': 'Phrase',
}
DEVICE_TYPES = {
'c': 'Desktop',
'm': 'Mobile',
't': 'Tablet',
}
REPORT_TYPE = 'CREATIVE_CONVERSION_REPORT'
def download_report(client, client_id):
# Initialize appropriate service.
report_downloader = client.GetReportDownloader(version='v201306')
# Create report query.
report_query = ('SELECT AdGroupId', 'CampaignId', 'CreativeId FROM CREATIVE_CONVERSION_REPORT DURING LAST_7_DAYS')
path = '/tmp/report_%d.csv' % client_id
file_path = report_downloader.DownloadReportWithAwql(report_query, 'CSV', file_path=path)
print 'Report was downloaded to \'%s\'.' % file_path
if __name__ == '__main__':
client = AdWordsClient()
conn = mdb.connect('xxx.us-east-1.rds.amazonaws.com', 'xxx', 'xxx', 'xxx');
with conn:
cur = conn.cursor(mdb.cursors.DictCursor)
cur.execute("SELECT * FROM xxx.adwords_accounts")
rows = cur.fetchall()
for row in rows:
client_id = row['id']
client.SetClientCustomerId(client_id)
download_report(client, client_id)
Something's wrong with your authentication as indicated by the OAuth2Credentials object's attribute access_token being None.
If you didn't already, take a look at the use_oath2.py example to see how authentication via OAuth2 is handled. You will also need to create a Google API Console application to obtain a client ID and secret.
It's a known bug. Fixed in v15.9.1
This code to fetch skills from linkedin is working fine with python 2.6
from linkedin import helper
from liclient import LinkedInAPI
import json
AUTH_TOKEN='{"oauth_token_secret": "TOKEN SECRET", "oauth_authorization_expires_in": "0", "oauth_token": "OAUTH TOKEN", "oauth_expires_in": "0"}'
consumer_key = 'CONSUMER KEY'
consumer_secret = 'CONSUMER SECRET'
APIClient =LinkedInAPI(consumer_key, consumer_secret)
request_token = APIClient.get_request_token()
field_selector_string = ['skills']
results = APIClient.get_user_profile(json.loads(AUTH_TOKEN), field_selector_string)
Skills = results[0].skills
print Skills
But when I run same code with python 2.7 gettting this error
Traceback (most recent call last):
File "linkedintest.py", line 10, in <module>
results = APIClient.get_user_profile(json.loads(AUTH_TOKEN), field_selector_string)
File "/var/www/shine/liclient/__init__.py", line 82, in get_user_profile
resp, content = client.request(url, 'GET')
File "/var/www/shine/liclient/oauth2/__init__.py", line 603, in request
req.sign_request(self.method, self.consumer, self.token)
File "/var/www/shine/liclient/oauth2/__init__.py", line 357, in sign_request
self['oauth_signature'] = signature_method.sign(self, consumer, token)
File "/var/www/shine/liclient/oauth2/__init__.py", line 683, in sign
hashed = hmac.new(key, raw, sha)
File "/usr/lib/python2.7/hmac.py", line 133, in new
return HMAC(key, msg, digestmod)
File "/usr/lib/python2.7/hmac.py", line 72, in __init__
self.outer.update(key.translate(trans_5C))
TypeError: character mapping must return integer, None or unicode
Try this:
from linkedin import helper
from liclient import LinkedInAPI
import json
AUTH_TOKEN='{"oauth_token_secret": "TOKEN SECRET", "oauth_authorization_expires_in": "0", "oauth_token": "OAUTH TOKEN", "oauth_expires_in": "0"}'
# converting unicode dict to str dict
AUTH_TOKEN=json.loads(AUTH_TOKEN)
AUTH_TOKEN_DICT = {}
for auth in AUTH_TOKEN:
AUTH_TOKEN_STR[str(auth)] = str(AUTH_TOKEN[auth])
consumer_key = 'CONSUMER KEY'
consumer_secret = 'CONSUMER SECRET'
APIClient =LinkedInAPI(consumer_key, consumer_secret)
request_token = APIClient.get_request_token()
field_selector_string = ['skills']
results = APIClient.get_user_profile(AUTH_TOKEN_DICT, field_selector_string)
Skills = results[0].skills
print Skills