I am looking for some help on an alarm clock that interacted with google calendar.
I have some problem with the code now where is not pulling down the events.
here is the errore i get now:
INFO:main:Polling calendar for events...
INFO:googleapiclient.discovery:URL being requested: GET https://www.googleapis.com/calendar/v3/calendars/primary/events?alt=json&singleEvents=true
INFO:main:Polling calendar for events...
INFO:googleapiclient.discovery:URL being requested: GET https://www.googleapis.com/calendar/v3/calendars/primary/events?alt=json&singleEvents=true
Process finished with exit code -1
# Inspired from 'Raspberry Pi as a Google Calender Alarm Clock'
# http://www.esologic.com/?p=634
#and this link as well https://github.com/ehamiter/get-on-the-bus
from datetime import datetime
import logging, os, platform, re, time
from apiclient import discovery
import httplib2
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run_flow
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Google Calendar API Python Quickstart'
FREQUENCY_CHECK = 5 # in second
MP3_FOLDER = 'E:\Users\Andrew.Price\PycharmProjects\SimpleAlarm\MP3'
CALENDAR_ID ='primary'
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Alarm():
system = platform.system().lower()
flow = flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.params['access_type'] = 'offline'
flow.params['approval_prompt'] = 'force'
storage = Storage('calendar.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
credentials = run_flow(flow, storage)
http = httplib2.Http()
http = credentials.authorize(http)
service = discovery.build('calendar', 'v3', http=http)
#service = build(serviceName='calendar', version='v3', http=http, developerKey=API_KEY)
def check_credentials(self):
if self.credentials is None or self.credentials.invalid == True:
credentials = run_flow(self.flow, self.storage)
def calendar_event_query(self):
self.check_credentials()
today = datetime.today()
events = self.service.events().list(singleEvents=True, calendarId=CALENDAR_ID).execute()
#events = self.service.events().list(singleEvents=True).execute()
for i, event in enumerate(events['items']):
name = event['summary'].lower()
try:
start = event['start']['dateTime'][:-9]
except KeyError:
start = ''
description = event.get('description', '')
repeat = True if description.lower() == 'repeat' else False
now = today.strftime('%Y-%m-%dT%H:%M')
if start >= now:
logger.debug('Event #%s, Name: %s, Start: %s', i, name, start)
if start == now:
if name.startswith('say'):
name = re.sub(r'[^a-zA-Z0-9\s\']', '', name)
command = '{0} "{1}"'.format('say' if system == 'darwin' else 'espeak -ven+m2', name[4:])
logger.info('Event starting. Announcing \'%s\'...', name[4:])
else:
mp3_files = os.listdir(MP3_FOLDER)
mp3_name = name.replace(' ', '_') + '.mp3'
mp3_name = mp3_name if mp3_name in mp3_files else 'default.mp3'
command = 'mpg123 \'{}/{}\''.format(MP3_FOLDER, mp3_name)
logger.info('Event %s starting. Playing mp3 file %s...', name, mp3_name)
os.system(command)
if repeat == False:
time.sleep(60)
def poll(self):
logger.info('Polling calendar for events...')
self.calendar_event_query()
while True:
a = Alarm()
a.poll()
time.sleep(FREQUENCY_CHECK)
I have changed the code and got it to work using this code from Matt http://mattdyson.org/projects/alarmpi/#comment-20249
This is the code that I have changed. It is not pretty yet just works for now
from __future__ import print_function
import pytz
import dateutil.parser
import httplib2
from oauth2client import tools
from oauth2client import client
import datetime
import logging
from googleapiclient.discovery import build
from apiclient import discovery
from oauth2client.file import Storage
import Settings
import os
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
log = logging.getLogger('root')
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/calendar-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Smart-Alarm'
class AlarmGatherer:
def __init__(self):
#home_dir = os.path.expanduser('~')
#credential_dir = os.path.join(home_dir, 'calendar.dat')
#if not os.path.exists(credential_dir):
# os.makedirs(credential_dir)
#credential_path = os.path.join(credential_dir, 'client_secret.json')
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Smart-Alarm'
self.FLOW = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
self.storage = Storage('calendar.dat')
self.credentials = self.storage.get()
if not self.checkCredentials():
log.error("GCal credentials have expired")
log.warn("Remove calendar.dat and run 'python AlarmGatherer.py' to fix")
return
http = httplib2.Http()
http = self.credentials.authorize(http)
self.service = build('calendar', 'v3', http=http)
def checkCredentials(self):
return not (self.credentials is None or self.credentials.invalid == True)
def generateAuth(self):
self.credentials = tools.run_flow(self.FLOW, self.storage)
def getNextEvent(self, today=False):
log.debug("Fetching details of next event")
if not self.checkCredentials():
log.error("GCal credentials have expired")
log.warn("Remove calendar.dat and run 'python AlarmGatherer.py' to fix")
raise Exception("GCal credentials not authorized")
#time = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC
time = datetime.datetime.now()
if not today:
# We want to find events tomorrow, rather than another one today
log.debug("Skipping events from today")
#time += datetime.timedelta(days=1) # Move to tomorrow
time = time.replace(hour=10, minute=0, second=0, microsecond=0) # Reset to 10am the next day
# 10am is late enough that a night shift from today won't be caught, but a morning shift
# from tomorrow will be caught
result = self.service.events().list(
calendarId='primary',
timeMin="%sZ" % (time.isoformat()),
maxResults=1,
singleEvents=True,
orderBy='startTime'
).execute()
events = result.get('items', [])
return events[0]
def getNextEventTime(self, includeToday=False):
log.debug("Fetching next event time (including today=%s)" % (includeToday))
nextEvent = self.getNextEvent(today=includeToday)
start = dateutil.parser.parse(nextEvent['start']['dateTime'])
# start = dateutil.parser.parse(nextEvent['start']['dateTime'],ignoretz=True)
# start = start.replace(tzinfo=pytz.timezone('Africa/Johannesburg'))
return start
def getNextEventLocation(self, includeToday=False):
log.debug("Fetching next event location (including today=%s)" % (includeToday))
nextEvent = self.getNextEvent(today=includeToday)
if (nextEvent['location']):
return nextEvent['location']
return None
def getDefaultAlarmTime(self):
defaultTime = ('0600')
#defaultTime = self.settings.getint('default_wake')
#defaultTime = self.settings.getint('default_wake')
defaultHour = int(defaultTime[:2])
defaultMin = int(defaultTime[2:])
alarm = datetime.datetime.now(pytz.timezone('Africa/Johannesburg'))
alarm += datetime.timedelta(days=1) # Move to tomorrow
alarm = alarm.replace(hour=defaultHour, minute=defaultMin, second=0, microsecond=0)
return alarm
if __name__ == '__main__':
print("Running credential check")
a = AlarmGatherer()
try:
if not a.checkCredentials():
raise Exception("Credential check failed")
except:
print("Credentials not correct, please generate new code")
a.generateAuth()
a = AlarmGatherer()
print(a.getNextEventTime())
print(a.getNextEventLocation())
Related
i want to run a schedule query when HTTP cloud function is triggered
This written python 3.7
import time from google.protobuf.timestamp_pb2
import Timestamp from google.cloud
import bigquery_datatransfer_v1
def runQuery (parent, requested_run_time):
client = bigquery_datatransfer_v1.DataTransferServiceClient()
projectid = '917960740952' # Enter your projectID here
transferid = '630d5269-0000-2622-98d2-3c286d4314be'
parent = client.project_transfer_config_path(projectid, transferid)
start_time = bigquery_datatransfer_v1.types.Timestamp(seconds=int(time.time() + 10))
response = client.start_manual_transfer_runs(parent, requested_run_time=start_time)
print(response)
Any help Please
I changed the parent path to be and worked with me
parent = 'projects/917960740952/locations/europe/transferConfigs/630d5269-0000-2622-98d2-3c286d4314be'
from google.cloud import bigquery_datatransfer_v1
import time
from google.protobuf.timestamp_pb2 import Timestamp
def runQuery(request):
client = bigquery_datatransfer_v1.DataTransferServiceClient()
PROJECT_ID = 'YOUR_PROJECT_NAME'
LOCATION_NAME = 'YOUR_TRANSFER_REGION'
TRANSFER_CONFIG_ID = 'YOUR_TRANSFER_ID'
parent = "projects/{0}/locations/{1}/transferConfigs/{2}".format(PROJECT_ID, LOCATION_NAME, TRANSFER_CONFIG_ID)
start_time = bigquery_datatransfer_v1.types.Timestamp(seconds=int(time.time() + 10))
response = client.start_manual_transfer_runs(parent, requested_run_time=start_time)
print(response)
I have a web application which first gets user authentication for an API token, then I want to run the latter part of the code every hour using the APScheduler module. I dont want to run the whole app from the start, because the first part requires user interaction to authorise the app again, which is unnecessary after the first run because we have the token, plus i obviously cant be there to click the authorise button every hour. WHere do i put the sched.start() part of the code? THe error i get is RuntimeError: Working outside of request context.
import requests
import json
from flask import Flask, render_template, request, redirect, session, url_for
from flask.json import jsonify
import os
from requests_oauthlib import OAuth2Session
from apscheduler.schedulers.background import BackgroundScheduler
import atexit
from datetime import datetime
app = Flask(__name__)
client_id = "x"
client_secret = "x"
scope = 'read_station'
password = 'x'
#grant_type = 'authorization_code'
grant_type = 'password'
username='x'
authurl = 'https://api.netatmo.com/oauth2/authorize?'
token_url = 'https://api.netatmo.com/oauth2/token'
redirect_uri = 'x'
response_type = 'code'
code = None
payload= {'grant_type':grant_type,'client_id':client_id,'client_secret':client_secret,
'username':username,'password':password,'scope':scope}
rip={}
CITIES = {'bolzano' : 'lat_ne=46.30&lon_ne=11.23&lat_sw=46.28&lon_sw=11.14',
'florence' : 'lat_ne=43.51&lon_ne=11.21&lat_sw=43.44&lon_sw=11.02',
'manchester' : 'lat_ne=53.35&lon_ne=-2.0011.21&lat_sw=53.21&lon_sw=-2.36',
}
dicty = {}
def dooby(CITIES, Header):
for city in CITIES.keys():
i = requests.get('https://api.netatmo.com/api/getpublicdata?'+CITIES[city]+'&filter=false', headers = Header).json()
dicty[str(city)]=i
return dicty
#app.route('/')
def auth():
redirect_uri = url_for('.redir', _external = True)
oauth = OAuth2Session(client_id, redirect_uri = redirect_uri,
scope = scope)
authorization_url, state = oauth.authorization_url(authurl)
session['oauth_state'] = state
return redirect(authorization_url)
#app.route('/redir', methods = ["GET","POST"])
def redir():
code = request.args.get('code')
payload['code']=code
rip = requests.post(token_url, data=payload)
rs = rip.content.decode()
response = json.loads(rs)
session['oauth_token'] = response['access_token']
session['expiry'] = response['expires_in']
session['refresh_token'] = response['refresh_token']
return redirect(url_for('.profile'))
#app.route('/profile', methods = ["GET","POST"])
def profile():
Header = {'Authorization':'Bearer '+session['oauth_token']}
def repeat():
return dooby(CITIES, Header)
i = repeat()
job = json.dumps(i)
dt = datetime.now().strftime("%Y_%m_%d %H_%M_%S")
f = open(r'C:\Users\freak\OneDrive\Documents\UHIpaper\{}.json'.format(dt),"w")
f.write(job)
f.close()
sched = BackgroundScheduler(daemon=True)
sched.add_job(func = profile,trigger='interval',minutes=2)
sched.start()
return jsonify(i)
if __name__ == "__main__":
os.environ['DEBUG'] = "1"
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = "1"
app.secret_key = os.urandom(24)
app.run(debug=True)
calling jsonify in your profile() func is causing the out of context error because you're calling a Flask function without the Flask app context.
Refer to this answer on how to add context or do not use jsonify in your profile() func but standard json lib instead.
I try to use AWS Lambda for mass email sending, the code we use as the link below:
https://aws.amazon.com/cn/premiumsupport/knowledge-center/mass-email-ses-lambda/
from __future__ import print_function
import StringIO
import csv
import json
import os
import urllib
import zlib
from time import strftime, gmtime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import boto3
import botocore
import concurrent.futures
__author__ = 'Said Ali Samed'
__date__ = '10/04/2016'
__version__ = '1.0'
# Get Lambda environment variables
region = os.environ['us-east-1']
max_threads = os.environ['10']
text_message_file = os.environ['email_body.txt']
html_message_file = os.environ['email_body.html']
# Initialize clients
s3 = boto3.client('s3', region_name=region)
ses = boto3.client('ses', region_name=region)
send_errors = []
mime_message_text = ''
mime_message_html = ''
def current_time():
return strftime("%Y-%m-%d %H:%M:%S UTC", gmtime())
def mime_email(subject, from_address, to_address, text_message=None, html_message=None):
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = from_address
msg['To'] = to_address
if text_message:
msg.attach(MIMEText(text_message, 'plain'))
if html_message:
msg.attach(MIMEText(html_message, 'html'))
return msg.as_string()
def send_mail(from_address, to_address, message):
global send_errors
try:
response = ses.send_raw_email(
Source=from_address,
Destinations=[
to_address,
],
RawMessage={
'Data': message
}
)
if not isinstance(response, dict): # log failed requests only
send_errors.append('%s, %s, %s' % (current_time(), to_address, response))
except botocore.exceptions.ClientError as e:
send_errors.append('%s, %s, %s, %s' %
(current_time(),
to_address,
', '.join("%s=%r" % (k, v) for (k, v) in e.response['ResponseMetadata'].iteritems()),
e.message))
def lambda_handler(event, context):
global send_errors
global mime_message_text
global mime_message_html
try:
# Read the uploaded csv file from the bucket into python dictionary list
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.unquote_plus(event['Records'][0]['s3']['object']['key']).decode('utf8')
response = s3.get_object(Bucket=bucket, Key=key)
body = zlib.decompress(response['Body'].read(), 16+zlib.MAX_WBITS)
reader = csv.DictReader(StringIO.StringIO(body),
fieldnames=['from_address', 'to_address', 'subject', 'message'])
# Read the message files
try:
response = s3.get_object(Bucket=bucket, Key=text_message_file)
mime_message_text = response['Body'].read()
except:
mime_message_text = None
print('Failed to read text message file. Did you upload %s?' % text_message_file)
try:
response = s3.get_object(Bucket=bucket, Key=html_message_file)
mime_message_html = response['Body'].read()
except:
mime_message_html = None
print('Failed to read html message file. Did you upload %s?' % html_message_file)
if not mime_message_text and not mime_message_html:
raise ValueError('Cannot continue without a text or html message file.')
# Send in parallel using several threads
e = concurrent.futures.ThreadPoolExecutor(max_workers=max_threads)
for row in reader:
from_address = row['from_address'].strip()
to_address = row['to_address'].strip()
subject = row['subject'].strip()
message = mime_email(subject, from_address, to_address, mime_message_text, mime_message_html)
e.submit(send_mail, from_address, to_address, message)
e.shutdown()
except Exception as e:
print(e.message + ' Aborting...')
raise e
print('Send email complete.')
# Remove the uploaded csv file
try:
response = s3.delete_object(Bucket=bucket, Key=key)
if 'ResponseMetadata' in response.keys() and response['ResponseMetadata']['HTTPStatusCode'] == 204:
print('Removed s3://%s/%s' % (bucket, key))
except Exception as e:
print(e)
# Upload errors if any to S3
if len(send_errors) > 0:
try:
result_data = '\n'.join(send_errors)
logfile_key = key.replace('.csv.gz', '') + '_error.log'
response = s3.put_object(Bucket=bucket, Key=logfile_key, Body=result_data)
if 'ResponseMetadata' in response.keys() and response['ResponseMetadata']['HTTPStatusCode'] == 200:
print('Send email errors saved in s3://%s/%s' % (bucket, logfile_key))
except Exception as e:
print(e)
raise e
# Reset publish error log
send_errors = []
if __name__ == "__main__":
json_content = json.loads(open('event.json', 'r').read())
lambda_handler(json_content, None)
but it has problem when i choose python 2.7.the error is
module initialization error 'us-east-1'
when i choose python 3.6 the error is
Unable to import module 'lambda_function': No module named 'StringIO'
anyone can tell me what is the problem it is ?
From Python v3, the StringIO module has gone. Instead, import the io module and use io.StringIO.
The problem with the v27 version is presumably that the following statement is failing:
region = os.environ['us-east-1']
This will result in a KeyError if us-east-1 is not an available environment variable. Instead use AWS_REGION or AWS_DEFAULT_REGION. See the full list of Lambda environment variables.
Please set the environment variables as described in step 4 of the article:
"Configure Lambda environment variables appropriate to your usage scenario. For example, the following variables would be valid for a given use case:
REGION=us-east-1, MAX_THREADS=10, TEXT_MESSAGE_FILE=email_body.txt, HTML_MESSAGE_FILE=email_body.html."
What was done (as per the code provided in the question) is replacing names of environment variables with their values, which means that python is looking for e.g. 'us-east-1' environment variable which isn't there...
This is the original code
# Get Lambda environment variables
region = os.environ['REGION']
max_threads = os.environ['MAX_THREADS']
text_message_file = os.environ['TEXT_MESSAGE_FILE']
html_message_file = os.environ['HTML_MESSAGE_FILE']
You can also hard-code the values, like below:
# Get Lambda environment variables
region = 'us-east-1'
max_threads = '10'
text_message_file = 'email_body.txt'
html_message_file = 'email_body.html'
but I'd suggest to set the environment variables instead (and use the version of script provided by the article author). When it comes to setting environment variables in Lambda, see this article :)
I am terribly new in python and my progress is like a snail:(
I want to make a telegram bot that send a message at specific date and time. I used apscheduler and telepot libraries for that. and this is my code:
import telepot
import sys
import time
from time import sleep
from datetime import datetime
from apscheduler.scheduler import Scheduler
import logging
bot = telepot.Bot("***")
logging.basicConfig()
sched = Scheduler()
sched.start()
exec_date = datetime(2017, 9, 12 ,1,51,0)
def handle(msg):
content_type,chat_type,chat_id = telepot.glance(msg)
print(content_type,chat_type,chat_id)
if content_type == 'text' :
bot.sendMessage(chat_id,msg['text'])
def sendSimpleText():
# content_type,chat_type,chat_id = telepot.glance(msg)
# print(content_type,chat_type,chat_id)
#
# if content_type == 'text' :
chat_id = telepot.
bot.sendMessage(chat_id,'faez')
def main():
job = sched.add_date_job(sendSimpleText, exec_date)
while True:
sleep(1)
sys.stdout.write('.'); sys.stdout.flush()
# bot.message_loop(handle)
# # job = sched.add_date_job(sendSimpleText, '2017-09-11 21:35:00', ['testFuckBot'])
# while True:
# time.sleep(10)
if __name__ == '__main__':
main()
my question is what do I pass to sendSimpleText as argument in add_date_job? in this line:
job = sched.add_date_job(sendSimpleText, exec_date)
I know that msg is the message that user is typed so for add_date_job I have nothing?
You are used an old (2.1.2) version of APScheduler.
New version has a new syntax.
A function add_date_job no more available.
This is a worked solution for you:
import telepot
import sys
import time
from datetime import datetime
from apscheduler.schedulers.background import BackgroundScheduler
from telepot.loop import MessageLoop
import logging
bot = telepot.Bot("***YOUR_BOT_TOKEN***")
logging.basicConfig()
sched = BackgroundScheduler()
exec_date = datetime(2017, 9, 12 ,3,5,0)
def handle(msg):
content_type,chat_type,chat_id = telepot.glance(msg)
print(content_type,chat_type,chat_id)
if content_type == 'text' :
bot.sendMessage(chat_id,msg['text'])
def sendSimpleText(chat_id):
bot.sendMessage(chat_id,'faez')
def main():
MessageLoop(bot, handle).run_as_thread()
job = sched.add_job(sendSimpleText, run_date=exec_date, args=['**YOUR_TELEGRAM_ID**'])
while True:
time.sleep(1)
sys.stdout.write('.'); sys.stdout.flush()
if __name__ == '__main__':
sched.start()
main()
We are using boto3 for our DynamoDB and we need to do a full scan of our tables to enable to do that based on other post we need to do a pagination. However, we are unable to find a working sample of pagination. Here is what we did.
import boto3
client_setting = boto3.client('dynamodb', region_name='ap-southeast-2')
paginator = client_setting.get_paginator('scan')
esk = {}
data = []
unconverted_ga = ourQuery(params1, params2)
for page in unconverted_ga:
data.append(page)
esk = page['LastEvaluatedKey']
We dont know exactly how to make the esk as the ExclusiveStartKey of our next query. What should be the expected value of ExclusiveStartkey parameter? We are still new in DynamoDB and there's many things we need to learn including this. thanks!
From the answer by Tay B at https://stackoverflow.com/a/38619425/3176550
import boto3
dynamodb = boto3.resource('dynamodb',
aws_session_token=aws_session_token,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region
)
table = dynamodb.Table('widgetsTableName')
response = table.scan()
data = response['Items']
while 'LastEvaluatedKey' in response:
response = table.scan(ExclusiveStartKey=response['LastEvaluatedKey'])
data.update(response['Items'])
After hour of search, i've finally found a better solution. For those who are new to DynamoDB, we should'nt missed this - http://docs.aws.amazon.com/amazondynamodb/latest/gettingstartedguide/GettingStarted.Python.04.html
from __future__ import print_function # Python 2/3 compatibility
import boto3
import json
import decimal
from boto3.dynamodb.conditions import Key, Attr
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
dynamodb = boto3.resource('dynamodb', region_name='us-west-2', endpoint_url="http://localhost:8000")
table = dynamodb.Table('Movies')
fe = Key('year').between(1950, 1959)
pe = "#yr, title, info.rating"
# Expression Attribute Names for Projection Expression only.
ean = { "#yr": "year", }
esk = None
response = table.scan(
FilterExpression=fe,
ProjectionExpression=pe,
ExpressionAttributeNames=ean
)
for i in response['Items']:
print(json.dumps(i, cls=DecimalEncoder))
// As long as LastEvaluatedKey is in response it means there are still items from the query related to the data
while 'LastEvaluatedKey' in response:
response = table.scan(
ProjectionExpression=pe,
FilterExpression=fe,
ExpressionAttributeNames= ean,
ExclusiveStartKey=response['LastEvaluatedKey']
)
for i in response['Items']:
print(json.dumps(i, cls=DecimalEncoder))
You can try with following code:
esk = None
while True:
scan_generator = YourTableName.scan(max_results=10, exclusive_start_key=esk)
for item in scan_generator:
# your code for processing
# condition to check if entire table is scanned
else:
break;
# Load the last keys
esk = scan_generator.kwargs['exclusive_start_key'].values()
Here is the reference documentation link.
Hope that helps
Bit more verbose but I like it.
def fetch_from_table(last_key=None):
if last_key:
response = table.query(
IndexName='advertCatalogIdx',
KeyConditionExpression=Key('sk').eq('CATALOG'),
Limit=5,
ExclusiveStartKey=last_key
)
else:
response = table.query(
IndexName='advertCatalogIdx',
KeyConditionExpression=Key('sk').eq('CATALOG'),
Limit=5
)
# print(response)
for item in response['Items']:
print(item['address'])
print('***************************')
return response.get('LastEvaluatedKey')
last_key = fetch_from_table()
while last_key != None:
print("Running again : ")
last_key = fetch_from_table(last_key)
import sys
import boto3
client = boto3.client('dynamodb')
marker = None
while True:
paginator = client.get_paginator('list_tables')
page_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 1000,
'PageSize': 100,
'StartingToken': marker})
for page in page_iterator:
tables=page['TableNames']
for table in tables:
print (table)
try:
marker = page['NextToken']
except KeyError:
sys.exit()