Cloud Function Error runQuery() missing 1 required positional argument - google-cloud-platform

i want to run a schedule query when HTTP cloud function is triggered
This written python 3.7
import time from google.protobuf.timestamp_pb2
import Timestamp from google.cloud
import bigquery_datatransfer_v1
def runQuery (parent, requested_run_time):
client = bigquery_datatransfer_v1.DataTransferServiceClient()
projectid = '917960740952' # Enter your projectID here
transferid = '630d5269-0000-2622-98d2-3c286d4314be'
parent = client.project_transfer_config_path(projectid, transferid)
start_time = bigquery_datatransfer_v1.types.Timestamp(seconds=int(time.time() + 10))
response = client.start_manual_transfer_runs(parent, requested_run_time=start_time)
print(response)
Any help Please

I changed the parent path to be and worked with me
parent = 'projects/917960740952/locations/europe/transferConfigs/630d5269-0000-2622-98d2-3c286d4314be'

from google.cloud import bigquery_datatransfer_v1
import time
from google.protobuf.timestamp_pb2 import Timestamp
def runQuery(request):
client = bigquery_datatransfer_v1.DataTransferServiceClient()
PROJECT_ID = 'YOUR_PROJECT_NAME'
LOCATION_NAME = 'YOUR_TRANSFER_REGION'
TRANSFER_CONFIG_ID = 'YOUR_TRANSFER_ID'
parent = "projects/{0}/locations/{1}/transferConfigs/{2}".format(PROJECT_ID, LOCATION_NAME, TRANSFER_CONFIG_ID)
start_time = bigquery_datatransfer_v1.types.Timestamp(seconds=int(time.time() + 10))
response = client.start_manual_transfer_runs(parent, requested_run_time=start_time)
print(response)

Related

telebot register_next_step_handler_by_chat_id not works in celery shared task

I have a small Telegram bot Django project and the bot needs to send a message to users that have been inactive for over an hour and then wait for input from the user using tg_nick_or_phone_input_handler and write it to the TelegramBotClientModel instance, sending more messages to chat.
def tg_nick_or_phone_input_handler(message):
chat_id = message.chat.id
bot_client = TelegramBotClientModel.objects.get(chat_id=chat_id)
bot_client.phone_or_nickname = message.text
bot_client.request_sent = True
bot_client.save()
bot.send_message(
chat_id=chat_id,
text='REQUEST SENT'
)
bot.send_message(
chat_id=chat_id,
text='some message'
)
import django.utils.timezone as tz
from celery import shared_task
from tg_funnel_bot.bot import bot
from .views import tg_nick_or_phone_input_handler
from .models import TelegramBotClientModel, BotMessagesSettingsModel
# celery task where should register next step handler
#shared_task(name='send_message_for_interrupted_dialog_after_one_hour')
def send_message_for_interrupted_dialog_after_one_hour():
bot_messages_settings = BotMessagesSettingsModel.objects.all().first()
inactive_for_hour_clients = TelegramBotClientModel.objects.filter(
updated_at__lte=tz.now() - tz.timedelta(minutes=5),
request_sent=False,
message_for_one_hour_inactive_sent=False
)
for inactive_client in inactive_for_hour_clients:
chat_id = inactive_client.chat_id
bot.send_message(
chat_id=chat_id,
text=bot_messages_settings.user_inactive_for_hour_message_text_first_part
)
bot.send_message(
chat_id=chat_id,
text=bot_messages_settings.user_inactive_for_hour_message_text_second_part,
reply_markup=bot_messages_settings.get_inactive_message_second_part_markup()
)
bot.register_next_step_handler_by_chat_id(
chat_id=chat_id,
callback=tg_nick_or_phone_input_handler
)
inactive_client = TelegramBotClientModel.objects.get(pk=inactive_client.pk)
inactive_client.message_for_one_hour_inactive_sent = True
inactive_client.save()
return tz.now()
bot.register_next_step_handler_by_chat_id not register handler and follow input in bot chat not processing.
Maybe I cannot use bot instance in another processes, but I can send messages and register_next_step_handler_by_chat_id works if use it in file with webhook update APIView like this:
from telebot import types
from rest_framework.views import APIView
from rest_framework.response import Response
from tg_funnel_bot.bot import bot
from .models import BotMessagesSettingsModel, TelegramBotClientModel
class UpdatesHandlerBotAPIView(APIView):
def post(self, request):
json_data = request.body.decode('UTF-8')
update_data = types.Update.de_json(json_data)
bot.process_new_updates([update_data])
return Response({'code': 200})
def after_loading_questions_data_handler(message):
chat_id = message.chat.id
bot_messages_settings = BotMessagesSettingsModel.objects.all().first()
after_loading_questions_data_text = bot_messages_settings.after_data_loading_text
bot.send_message(
chat_id=chat_id,
text=after_loading_questions_data_text
)
bot.register_next_step_handler_by_chat_id(
chat_id=chat_id,
callback=tg_nick_or_phone_input_handler
)
def tg_nick_or_phone_input_handler(message):
chat_id = message.chat.id
bot_client = TelegramBotClientModel.objects.get(chat_id=chat_id)
bot_client.phone_or_nickname = message.text
bot_client.request_sent = True
bot_client.save()
bot.send_message(
chat_id=chat_id,
text='REQUEST SENT'
)
bot.send_message(
chat_id=chat_id,
text='some text'
)
Celery configs
CELERY_BROKER_URL = 'redis://localhost:6379/0'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
CELERY_BEAT_SCHEDULE = {
'add-every-10-minutes': {
'task': 'send_message_for_interrupted_dialog_after_one_hour',
'schedule': crontab(minute='*/1'),
}
}
This is so strange, Give me a hand, please.

Cannot fix start manual transfer runs method for bigquery_datatransfer_v1

I am trying to run a scheduled query only when a table updates in big query. For this I am trying to make this python code to work in cloud functions, but it is giving me error. Would highly appreciate any help.
I am running this python code :
import time
from google.protobuf.timestamp_pb2 import Timestamp
from google.cloud import bigquery_datatransfer_v1
def runQuery (parent,requested_run_time):
client = bigquery_datatransfer_v1.DataTransferServiceClient()
projectid = '629586xxxx' # Enter your projectID here
transferid = '60cc15f8-xxxx-xxxx-8ba2-xxxxx41bc' # Enter your transferId here
parent = client.transfer_config_path(projectid, transferid)
start_time = Timestamp(seconds=int(time.time() + 10))
response = client.start_manual_transfer_runs(parent, requested_run_time=start_time)
print(response)
I get this error
start_manual_transfer_runs() got an unexpected keyword argument 'requested_run_time'
I created doing this using cloud function and it worked for me.
import time
from google.protobuf.timestamp_pb2 import Timestamp
from google.cloud import bigquery_datatransfer_v1
def runQuery(parent, requested_run_time):
client = bigquery_datatransfer_v1.DataTransferServiceClient()
PROJECT_ID = 'graphical-reach-285218'
TRANSFER_CONFIG_ID = '61adfc39-0000-206b-a7b0-089e08324288'
parent = client.project_transfer_config_path(PROJECT_ID, TRANSFER_CONFIG_ID)
start_time = bigquery_datatransfer_v1.types.Timestamp(seconds=int(time.time() + 10))
response = client.start_manual_transfer_runs(parent, requested_run_time=start_time)
print(parent)
print(response)
Replace with your project id and transfer config id. The above code will go in main.py and in requirements.txt , please keep
google-cloud-bigquery-datatransfer==1

where to put scheduler inside my web app code?

I have a web application which first gets user authentication for an API token, then I want to run the latter part of the code every hour using the APScheduler module. I dont want to run the whole app from the start, because the first part requires user interaction to authorise the app again, which is unnecessary after the first run because we have the token, plus i obviously cant be there to click the authorise button every hour. WHere do i put the sched.start() part of the code? THe error i get is RuntimeError: Working outside of request context.
import requests
import json
from flask import Flask, render_template, request, redirect, session, url_for
from flask.json import jsonify
import os
from requests_oauthlib import OAuth2Session
from apscheduler.schedulers.background import BackgroundScheduler
import atexit
from datetime import datetime
app = Flask(__name__)
client_id = "x"
client_secret = "x"
scope = 'read_station'
password = 'x'
#grant_type = 'authorization_code'
grant_type = 'password'
username='x'
authurl = 'https://api.netatmo.com/oauth2/authorize?'
token_url = 'https://api.netatmo.com/oauth2/token'
redirect_uri = 'x'
response_type = 'code'
code = None
payload= {'grant_type':grant_type,'client_id':client_id,'client_secret':client_secret,
'username':username,'password':password,'scope':scope}
rip={}
CITIES = {'bolzano' : 'lat_ne=46.30&lon_ne=11.23&lat_sw=46.28&lon_sw=11.14',
'florence' : 'lat_ne=43.51&lon_ne=11.21&lat_sw=43.44&lon_sw=11.02',
'manchester' : 'lat_ne=53.35&lon_ne=-2.0011.21&lat_sw=53.21&lon_sw=-2.36',
}
dicty = {}
def dooby(CITIES, Header):
for city in CITIES.keys():
i = requests.get('https://api.netatmo.com/api/getpublicdata?'+CITIES[city]+'&filter=false', headers = Header).json()
dicty[str(city)]=i
return dicty
#app.route('/')
def auth():
redirect_uri = url_for('.redir', _external = True)
oauth = OAuth2Session(client_id, redirect_uri = redirect_uri,
scope = scope)
authorization_url, state = oauth.authorization_url(authurl)
session['oauth_state'] = state
return redirect(authorization_url)
#app.route('/redir', methods = ["GET","POST"])
def redir():
code = request.args.get('code')
payload['code']=code
rip = requests.post(token_url, data=payload)
rs = rip.content.decode()
response = json.loads(rs)
session['oauth_token'] = response['access_token']
session['expiry'] = response['expires_in']
session['refresh_token'] = response['refresh_token']
return redirect(url_for('.profile'))
#app.route('/profile', methods = ["GET","POST"])
def profile():
Header = {'Authorization':'Bearer '+session['oauth_token']}
def repeat():
return dooby(CITIES, Header)
i = repeat()
job = json.dumps(i)
dt = datetime.now().strftime("%Y_%m_%d %H_%M_%S")
f = open(r'C:\Users\freak\OneDrive\Documents\UHIpaper\{}.json'.format(dt),"w")
f.write(job)
f.close()
sched = BackgroundScheduler(daemon=True)
sched.add_job(func = profile,trigger='interval',minutes=2)
sched.start()
return jsonify(i)
if __name__ == "__main__":
os.environ['DEBUG'] = "1"
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = "1"
app.secret_key = os.urandom(24)
app.run(debug=True)
calling jsonify in your profile() func is causing the out of context error because you're calling a Flask function without the Flask app context.
Refer to this answer on how to add context or do not use jsonify in your profile() func but standard json lib instead.

Get information about a file uploaded to S3

i have created a lambda function that sends emails whenever a file is uploaded on s3 bucket, but now i want to have all the informations related to that file as the name, size, date and time of upload, and if it's possible where it comes from.
I have all this infortmation on aws console, but want to have it in the email body.
i am using serverless framework. v 1.22.0
here is my code
import json
import boto3
import botocore
import logging
import sys
import os
import traceback
from botocore.exceptions import ClientError
from pprint import pprint
from time import strftime, gmtime
email_from = '********#*****.com'
email_to = '********#*****.com'
email_subject = 'new event on s3 '
email_body = 'a new file is uploaded'
#setup simple logging for INFO
logger = logging.getLogger()
logger.setLevel(logging.INFO)
from botocore.exceptions import ClientError
def sthree(event, context):
"""Send email whenever a file is uploaded to S3"""
body = {}
status_code = 200
email_body = str(context)
try:
s3 = boto3.client('s3')
ses = boto3.client('ses')
ses.send_email(Source = email_from,
Destination = {'ToAddresses': [email_to,],},
Message = {'Subject': {'Data': email_subject}, 'Body':{'Text' : {'Data': email_body}}}
)
except Exception as e:
print(traceback.format_exc())
status_code = 500
body["message"] = json.dumps(e)
response = {
"statusCode": 200,
"body": json.dumps(body)
}
return response
Here is the event json structure sent by S3 upon object creation:
http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html
You can get the file names, sizes and source ip like this:
for record in event['Records']:
filename = record['s3']['object']['key'];
filesize = record['s3']['object']['size'];
source = record['requestParameters']['sourceIPAddress'];
eventTime = record['eventTime'];
def lambda_handler(event, context):
s3 = boto3.client('s3')
email_from = 'XXXXXXXXX#XXX.com'
email_to = 'XXXXXXXXX#XXX.com'
email_subject = 'new event on s3'
email_body = "File Name :" + event[u'Records'][0][u's3'][u'object'][u'key'] + "\n" + "File Size :" + str(event[u'Records'][0][u's3'][u'object'][u'size']) + "\n" + "Upload Time :" + event[u'Records'][0][u'eventTime'] + "\n" + "User Details :" + event[u'Records'][0][u'userIdentity'][u'principalId']
ses = boto3.client('ses')
ses.send_email(Source = email_from,
Destination = {'ToAddresses': [email_to,],},
Message = {'Subject': {'Data': email_subject}, 'Body':{'Text' : {'Data': email_body}}}
)
print("Function execution Completed !!!")

Alarm clock that interacted with google calendar API

I am looking for some help on an alarm clock that interacted with google calendar.
I have some problem with the code now where is not pulling down the events.
here is the errore i get now:
INFO:main:Polling calendar for events...
INFO:googleapiclient.discovery:URL being requested: GET https://www.googleapis.com/calendar/v3/calendars/primary/events?alt=json&singleEvents=true
INFO:main:Polling calendar for events...
INFO:googleapiclient.discovery:URL being requested: GET https://www.googleapis.com/calendar/v3/calendars/primary/events?alt=json&singleEvents=true
Process finished with exit code -1
# Inspired from 'Raspberry Pi as a Google Calender Alarm Clock'
# http://www.esologic.com/?p=634
#and this link as well https://github.com/ehamiter/get-on-the-bus
from datetime import datetime
import logging, os, platform, re, time
from apiclient import discovery
import httplib2
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run_flow
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Google Calendar API Python Quickstart'
FREQUENCY_CHECK = 5 # in second
MP3_FOLDER = 'E:\Users\Andrew.Price\PycharmProjects\SimpleAlarm\MP3'
CALENDAR_ID ='primary'
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Alarm():
system = platform.system().lower()
flow = flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.params['access_type'] = 'offline'
flow.params['approval_prompt'] = 'force'
storage = Storage('calendar.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
credentials = run_flow(flow, storage)
http = httplib2.Http()
http = credentials.authorize(http)
service = discovery.build('calendar', 'v3', http=http)
#service = build(serviceName='calendar', version='v3', http=http, developerKey=API_KEY)
def check_credentials(self):
if self.credentials is None or self.credentials.invalid == True:
credentials = run_flow(self.flow, self.storage)
def calendar_event_query(self):
self.check_credentials()
today = datetime.today()
events = self.service.events().list(singleEvents=True, calendarId=CALENDAR_ID).execute()
#events = self.service.events().list(singleEvents=True).execute()
for i, event in enumerate(events['items']):
name = event['summary'].lower()
try:
start = event['start']['dateTime'][:-9]
except KeyError:
start = ''
description = event.get('description', '')
repeat = True if description.lower() == 'repeat' else False
now = today.strftime('%Y-%m-%dT%H:%M')
if start >= now:
logger.debug('Event #%s, Name: %s, Start: %s', i, name, start)
if start == now:
if name.startswith('say'):
name = re.sub(r'[^a-zA-Z0-9\s\']', '', name)
command = '{0} "{1}"'.format('say' if system == 'darwin' else 'espeak -ven+m2', name[4:])
logger.info('Event starting. Announcing \'%s\'...', name[4:])
else:
mp3_files = os.listdir(MP3_FOLDER)
mp3_name = name.replace(' ', '_') + '.mp3'
mp3_name = mp3_name if mp3_name in mp3_files else 'default.mp3'
command = 'mpg123 \'{}/{}\''.format(MP3_FOLDER, mp3_name)
logger.info('Event %s starting. Playing mp3 file %s...', name, mp3_name)
os.system(command)
if repeat == False:
time.sleep(60)
def poll(self):
logger.info('Polling calendar for events...')
self.calendar_event_query()
while True:
a = Alarm()
a.poll()
time.sleep(FREQUENCY_CHECK)
I have changed the code and got it to work using this code from Matt http://mattdyson.org/projects/alarmpi/#comment-20249
This is the code that I have changed. It is not pretty yet just works for now
from __future__ import print_function
import pytz
import dateutil.parser
import httplib2
from oauth2client import tools
from oauth2client import client
import datetime
import logging
from googleapiclient.discovery import build
from apiclient import discovery
from oauth2client.file import Storage
import Settings
import os
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
log = logging.getLogger('root')
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/calendar-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Smart-Alarm'
class AlarmGatherer:
def __init__(self):
#home_dir = os.path.expanduser('~')
#credential_dir = os.path.join(home_dir, 'calendar.dat')
#if not os.path.exists(credential_dir):
# os.makedirs(credential_dir)
#credential_path = os.path.join(credential_dir, 'client_secret.json')
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Smart-Alarm'
self.FLOW = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
self.storage = Storage('calendar.dat')
self.credentials = self.storage.get()
if not self.checkCredentials():
log.error("GCal credentials have expired")
log.warn("Remove calendar.dat and run 'python AlarmGatherer.py' to fix")
return
http = httplib2.Http()
http = self.credentials.authorize(http)
self.service = build('calendar', 'v3', http=http)
def checkCredentials(self):
return not (self.credentials is None or self.credentials.invalid == True)
def generateAuth(self):
self.credentials = tools.run_flow(self.FLOW, self.storage)
def getNextEvent(self, today=False):
log.debug("Fetching details of next event")
if not self.checkCredentials():
log.error("GCal credentials have expired")
log.warn("Remove calendar.dat and run 'python AlarmGatherer.py' to fix")
raise Exception("GCal credentials not authorized")
#time = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC
time = datetime.datetime.now()
if not today:
# We want to find events tomorrow, rather than another one today
log.debug("Skipping events from today")
#time += datetime.timedelta(days=1) # Move to tomorrow
time = time.replace(hour=10, minute=0, second=0, microsecond=0) # Reset to 10am the next day
# 10am is late enough that a night shift from today won't be caught, but a morning shift
# from tomorrow will be caught
result = self.service.events().list(
calendarId='primary',
timeMin="%sZ" % (time.isoformat()),
maxResults=1,
singleEvents=True,
orderBy='startTime'
).execute()
events = result.get('items', [])
return events[0]
def getNextEventTime(self, includeToday=False):
log.debug("Fetching next event time (including today=%s)" % (includeToday))
nextEvent = self.getNextEvent(today=includeToday)
start = dateutil.parser.parse(nextEvent['start']['dateTime'])
# start = dateutil.parser.parse(nextEvent['start']['dateTime'],ignoretz=True)
# start = start.replace(tzinfo=pytz.timezone('Africa/Johannesburg'))
return start
def getNextEventLocation(self, includeToday=False):
log.debug("Fetching next event location (including today=%s)" % (includeToday))
nextEvent = self.getNextEvent(today=includeToday)
if (nextEvent['location']):
return nextEvent['location']
return None
def getDefaultAlarmTime(self):
defaultTime = ('0600')
#defaultTime = self.settings.getint('default_wake')
#defaultTime = self.settings.getint('default_wake')
defaultHour = int(defaultTime[:2])
defaultMin = int(defaultTime[2:])
alarm = datetime.datetime.now(pytz.timezone('Africa/Johannesburg'))
alarm += datetime.timedelta(days=1) # Move to tomorrow
alarm = alarm.replace(hour=defaultHour, minute=defaultMin, second=0, microsecond=0)
return alarm
if __name__ == '__main__':
print("Running credential check")
a = AlarmGatherer()
try:
if not a.checkCredentials():
raise Exception("Credential check failed")
except:
print("Credentials not correct, please generate new code")
a.generateAuth()
a = AlarmGatherer()
print(a.getNextEventTime())
print(a.getNextEventLocation())