How to remove file after uploading it to Google Drive - python-2.7

I have written code in python that uploads a file to Google Drive, but after it uploads I cannot delete it from local drive, because I get error "Access Denied", but if I exit out of function then I can delete the file. So my question is how can I delete the file from inside the function?
GDriveUpload.py
import os
import httplib2
import ntpath
import oauth2client
from googleapiclient.discovery import build
from googleapiclient.http import MediaFileUpload
# Copy your credentials here
_CLIENT_ID = 'YOUR_CLIENT_ID'
_CLIENT_SECRET = 'YOUR_CLIENT_SECRET'
_REFRESH_TOKEN = 'YOUR_REFRESH_TOKEN'
_PARENT_FOLDER_ID = 'YOUR_PARENT_FOLDER_ID'
_DATA_FILE = 'datafile.dat'
# ====================================================================================
# Upload file to Google Drive
def UploadFile(client_id, client_secret, refresh_token, parent_folder_id, local_file, DeleteOnExit=False):
cred = oauth2client.client.GoogleCredentials(None,client_id,client_secret,refresh_token,None,'https://accounts.google.com/o/oauth2/token',None)
http = cred.authorize(httplib2.Http())
drive_service = build('drive', 'v2', http=http)
media_body = MediaFileUpload(local_file, mimetype='application/octet-stream', chunksize=5242880, resumable=True)
body = {
'title': (ntpath.basename(local_file)),
'parents': [{'id': parent_folder_id}],
'mimeType': 'application/octet-stream'
}
request = drive_service.files().insert(body=body, media_body=media_body)
response = None
while response is None:
status, response = request.next_chunk()
if status:
print "Uploaded %.2f%%" % (status.progress() * 100)
if DeleteOnExit == True:
os.remove(local_file)
# ====================================================================================
if __name__ == '__main__':
UploadFile(_CLIENT_ID, _CLIENT_SECRET, _REFRESH_TOKEN, _PARENT_FOLDER_ID, _DATA_FILE, DeleteOnExit=True)

Related

I'm not able to stream the chunks from in_memory_file from google cloud storage bucket

PYTHON
This is the Python code, We are streaming our large file from google cloud storage to cloud run. In this code the large csv file is splitting into chunks and those chunks are going into in_memory_file whenever we got the first chunks it should immediately start streaming it and the remaining chunks will stream in response but we are not able to stream those chunks from in_memory_file
import os
import signal
import sys
import json
import pandas as pd
import structlog
import time
import threading
import io
import csv
from concurrent.futures import ProcessPoolExecutor
from concurrent.futures import ThreadPoolExecutor
from types import FrameType
from io import BytesIO, StringIO
from google.cloud import storage
from google.oauth2 import service_account
from flask import Flask, Response, request
app = Flask(__name__)
# SigTerm Log
def getJSONLogger() -> structlog._config.BoundLoggerLazyProxy:
structlog.configure(
processors=[`enter code here`
structlog.stdlib.add_log_level,
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.TimeStamper("iso"),
structlog.processors.JSONRenderer(),
],
wrapper_class=structlog.stdlib.BoundLogger,
)
return structlog.get_logger()
logger = getJSONLogger()
# SigTerm Handler
def shutdown_handler(signal: int, frame: FrameType) -> None:
logger.info("Signal received, safely shutting down.")
print("Exiting process.", flush=True)
sys.exit(0)
signal.signal(signal.SIGTERM, shutdown_handler)
# Split files to chunks
def split_byte_size(size: int, uri: str, bucket: str, key: str) -> list:
byte_list = []
chunks = 50
start = 0
for i in range(size, size * chunks + 1, size):
stop = i // chunks
byte_list.append({"uri": uri, "start": start, "end": stop, "bucket": bucket, "key": key})
start = stop + 1
return byte_list
#Cloud Storage connection
project = 'XYZ'
service_account_credentials_path = 'key.json'
credentials = service_account.Credentials.from_service_account_file(service_account_credentials_path)
storage_client = storage.Client(project=project, credentials=credentials)
# Download objects as chunks
def downloader(input: dict) -> object:
bucket_object = storage_client.get_bucket(bucket_or_name=input["bucket"])
blob = bucket_object.blob(input["key"])
in_memory_file = io.BytesIO()
blob.download_to_file(in_memory_file, start=input['start'], end=input['end'])
#print("Chunk " + str(input['start']) + " to " + str(input['end']) + "completed")
return in_memory_file
#app.route("/chunk_data")
def chunk_data():
bucket_name = 'cloudrundemofile'
source_blob_name = 'demofile.csv'
bucket_object = storage_client.get_bucket(bucket_name)
blob = bucket_object.get_blob(source_blob_name)
split_bytes = split_byte_size(blob.size, project, bucket_name, source_blob_name)
print(split_bytes)
#Async Thread
with ThreadPoolExecutor(max_workers=5) as ex:
results = ex.map(downloader, split_bytes)
resp = Response(results, 206, mimetype='text/csv')
#resp.headers.add('Content-Range', 'bytes {0}-{1}/{2}'.format(start, start + length - 1, file_size))
return resp
#return "Success"
if __name__ == "__main__":
signal.signal(signal.SIGINT, shutdown_handler)
app.run(host="0.0.0.0", port=8080)
else:
signal.signal(signal.SIGTERM, shutdown_handler)**

Why different error when using different network connection to put a file to a s3 bucket with Sign v4?

Here are my code:
First the driver script
#!/usr/bin/env python
import os
import sys
from gen_url import sign
import requests
import uuid
def upload_me(file_path, key=None, secret=None):
access_key = 'ABC' if key is None else key
secret_key = 'EDF' if secret is None else secret
s3_bucket = 'my-work-bucket'
object_name = '1-2-{uuid}.jpeg'.format(uuid=uuid.uuid4())
mime_type = 'image/jpeg'
expires = 24 * 60 * 60 # link expiry in sec
os.environ['AWS_ACCESS_KEY_ID'] = access_key
os.environ['AWS_SECRET_ACCESS_KEY'] = secret_key
region = 'us-west-2'
url = sign(key, secret, s3_bucket, object_name, mime_type, expires, region)
with open(file_path, 'r') as f:
resp = requests.post(url, data=f)
print resp.content
if __name__ == '__main__':
argc = len(sys.argv)
key = secret = None
if argc == 2 or argc == 4:
file_path = sys.argv[1]
if argc == 4:
key = sys.argv[2]
secret = sys.argv[3]
else:
raise Exception('Expect 1 or 3 arguments')
upload_me(file_path, key, secret)
The code of sign function in gen_url module
import sys
import boto3
from botocore.client import Config
from datetime import datetime, date, time
def sign(access_key, secret_key, s3_bucket, object_name, mime_type, expires, region):
s3_client = boto3.client('s3',
region_name=region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
# Don't include content type
# 'ContentType': mime_type
params = {
'Bucket': s3_bucket,
'Key': object_name,
}
response = s3_client.generate_presigned_url('put_object',
Params=params,
ExpiresIn=expires)
return response
When I am using the internet connection at home, it is the error I got:
requests.exceptions.ConnectionError: ('Connection aborted.', error(32, 'Broken pipe'))
But I use tethering with my iphone, the command gives a different error:
<Error><Code>SignatureDoesNotMatch</Code><Message>The request signature we calculated does not match the signature you provided. Check your key and signing method.</Message>
Why totally different output when the networks are different?
It turns out the last three lines of the driver script should be:
with open(file_path, 'rb') as f:
resp = requests.put(url, data=f)
print resp.content
Then there is no issue with either connections.
(error(32, 'Broken pipe') could be just a coincidence that there were connection issue with my ISP.

Lambda-API gateway : "message": "Internal server error"

I am using AWS CodeStar (Lambda + API Gateway) to build my serverless API. My lambda function works well in the Lambda console but strangely throws this error when I run the code on AWS CodeStar:
"message": "Internal server error"
Kindly help me with this issue.
import json
import os
import bz2
import pprint
import hashlib
import sqlite3
import re
from collections import namedtuple
from gzip import GzipFile
from io import BytesIO
from botocore.vendored import requests
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def handler(event, context):
logger.info('## ENVIRONMENT VARIABLES')
logger.info(os.environ)
logger.info('## EVENT')
logger.info(event)
n = get_package_list()
n1 = str(n)
dat = {"total_pack":n1}
return {'statusCode': 200,
'headers': {'Content-Type': 'application/json'},
'body': json.dumps(dat)
}
def get_package_list():
url = "http://amazonlinux.us-east-2.amazonaws.com/2/core/2.0/x86_64/c60ceaf6dfa3bc10e730c9e803b51543250c8a12bb009af00e527a598394cd5e/repodata/primary.sqlite.gz"
db_filename = "dbfile"
resp = requests.get(url, stream=True)
remote_data = resp.raw.read()
cached_fh = BytesIO(remote_data)
compressed_fh = GzipFile(fileobj=cached_fh)
with open(os.path.join('/tmp',db_filename), "wb") as local_fh:
local_fh.write(compressed_fh.read())
package_obj_list = []
db = sqlite3.connect(os.path.join('/tmp',db_filename))
c = db.cursor()
c.execute('SELECT name FROM packages')
for package in c.fetchall():
package_obj_list.append(package)
no_of_packages = len(package_obj_list)
return no_of_packages
Expected Result: should return an Integer (no_of_packages).

How to download specific Google Drive folder using Python?

I'm trying to download specific folders from Google Drive.
I tried this example
http://www.mwclearning.com/?p=1608 but its download all the files from G-Drive.
EX: If I have two folders in Google Drive say..
A folder having -> 1 , 2 Files
B folder having -> 3, 4, 5 Files
If I want to download folder A then only 1 , 2 files should get downloaded..
Any suggestion or help could be very helpful.
Thanks in advance.
Use Drive credentials.json Downloaded from your Drive API
from __future__ import print_function
import pickle
import os
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
from apiclient.http import MediaFileUpload, MediaIoBaseDownload
import io
from apiclient import errors
from apiclient import http
import logging
from apiclient import discovery
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/drive']
# To list folders
def listfolders(service, filid, des):
results = service.files().list(
pageSize=1000, q="\'" + filid + "\'" + " in parents",
fields="nextPageToken, files(id, name, mimeType)").execute()
# logging.debug(folder)
folder = results.get('files', [])
for item in folder:
if str(item['mimeType']) == str('application/vnd.google-apps.folder'):
if not os.path.isdir(des+"/"+item['name']):
os.mkdir(path=des+"/"+item['name'])
print(item['name'])
listfolders(service, item['id'], des+"/"+item['name']) # LOOP un-till the files are found
else:
downloadfiles(service, item['id'], item['name'], des)
print(item['name'])
return folder
# To Download Files
def downloadfiles(service, dowid, name,dfilespath):
request = service.files().get_media(fileId=dowid)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Download %d%%." % int(status.progress() * 100))
with io.open(dfilespath + "/" + name, 'wb') as f:
fh.seek(0)
f.write(fh.read())
def main():
"""Shows basic usage of the Drive v3 API.
Prints the names and ids of the first 10 files the user has access to.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES) # credentials.json download from drive API
creds = flow.run_local_server()
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('drive', 'v3', credentials=creds)
# Call the Drive v3 API
Folder_id = "'PAST YOUR SHARED FOLDER ID'" # Enter The Downloadable folder ID From Shared Link
results = service.files().list(
pageSize=1000, q=Folder_id+" in parents", fields="nextPageToken, files(id, name, mimeType)").execute()
items = results.get('files', [])
if not items:
print('No files found.')
else:
print('Files:')
for item in items:
if item['mimeType'] == 'application/vnd.google-apps.folder':
if not os.path.isdir("Folder"):
os.mkdir("Folder")
bfolderpath = os.getcwd()+"/Folder/"
if not os.path.isdir(bfolderpath+item['name']):
os.mkdir(bfolderpath+item['name'])
folderpath = bfolderpath+item['name']
listfolders(service, item['id'], folderpath)
else:
if not os.path.isdir("Folder"):
os.mkdir("Folder")
bfolderpath = os.getcwd()+"/Folder/"
if not os.path.isdir(bfolderpath + item['name']):
os.mkdir(bfolderpath + item['name'])
filepath = bfolderpath + item['name']
downloadfiles(service, item['id'], item['name'], filepath)
if __name__ == '__main__':
main()
Try to check the Google Drive API documentation, you can see here the sample code use to perform a file download using Python.
file_id = '0BwwA4oUTeiV1UVNwOHItT0xfa2M'
request = drive_service.files().get_media(fileId=file_id)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print "Download %d%%." % int(status.progress() * 100)
For the folders part, you can check here on how to get it.
For more information, you can check this tutorial and YT video.
Here's just the code that deals specifically with downloading a folder recursively.
I've tried to keep it to-the-point, omitting code that's described in tutorials already. I expect you to already have the ID of the folder that you want to download.
The part elif not itemType.startswith('application/'): has the purpose of skipping any Drive-format documents. However, the check is overly-simplistic, so you might want to improve it or remove it.
from __future__ import print_function
import pickle
import os.path
import io
from googleapiclient.discovery import build
from googleapiclient.http import MediaIoBaseDownload
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/drive.readonly']
def main():
"""Based on the quickStart.py example at
https://developers.google.com/drive/api/v3/quickstart/python
"""
creds = getCredentials()
service = build('drive', 'v3', credentials=creds)
folderId = ""
destinationFolder = ""
downloadFolder(service, folderId, destinationFolder)
def downloadFolder(service, fileId, destinationFolder):
if not os.path.isdir(destinationFolder):
os.mkdir(path=destinationFolder)
results = service.files().list(
pageSize=300,
q="parents in '{0}'".format(fileId),
fields="files(id, name, mimeType)"
).execute()
items = results.get('files', [])
for item in items:
itemName = item['name']
itemId = item['id']
itemType = item['mimeType']
filePath = destinationFolder + "/" + itemName
if itemType == 'application/vnd.google-apps.folder':
print("Stepping into folder: {0}".format(filePath))
downloadFolder(service, itemId, filePath) # Recursive call
elif not itemType.startswith('application/'):
downloadFile(service, itemId, filePath)
else:
print("Unsupported file: {0}".format(itemName))
def downloadFile(service, fileId, filePath):
# Note: The parent folders in filePath must exist
print("-> Downloading file with id: {0} name: {1}".format(fileId, filePath))
request = service.files().get_media(fileId=fileId)
fh = io.FileIO(filePath, mode='wb')
try:
downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024)
done = False
while done is False:
status, done = downloader.next_chunk(num_retries = 2)
if status:
print("Download %d%%." % int(status.progress() * 100))
print("Download Complete!")
finally:
fh.close()
Please do download the 'client_id.json' file as specified in the tutorial link for downloading follow steps 5-7
In the last line of the code change the "folder_id" to the id of the folder you want to download from drive by right clicking on the folder and enabling share link. The id will be the part of URL after "id=" and also changing the "savepath" to the path where you want to save the downloaded folder to be on your system.
from __future__ import print_function
from googleapiclient import discovery
from httplib2 import Http
from oauth2client import file, client, tools
import os, io
from apiclient.http import MediaFileUpload, MediaIoBaseDownload
SCOPES = 'https://www.googleapis.com/auth/drive'
store = file.Storage('storage.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_id.json', SCOPES)
creds = tools.run_flow(flow, store)
DRIVE = discovery.build('drive', 'v3', http=creds.authorize(Http()))
def retaining_folder_structure(query,filepath):
results = DRIVE.files().list(fields="nextPageToken, files(id, name, kind, mimeType)",q=query).execute()
items = results.get('files', [])
for item in items:
#print(item['name'])
if item['mimeType']=='application/vnd.google-apps.folder':
fold=item['name']
path=filepath+'/'+fold
if os.path.isdir(path):
retaining_folder_structure("'%s' in parents"%(item['id']),path)
else:
os.mkdir(path)
retaining_folder_structure("'%s' in parents"%(item['id']),path)
else:
request = DRIVE.files().get_media(fileId=item['id'])
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Download %d%%." % int(status.progress() * 100))
path=filepath+'/'+item['name']
#print(path)
with io.open(path,'wb') as f:
fh.seek(0)
f.write(fh.read())
retaining_floder_structure("'folder_id' in parents",'savepath')

App engine on development environement not printing log

I am trying to print log on the local environment of Google App engine. It seems the way it should be but still i am not able to print the log. Need some helping hand here?
I need this output on the standard console.
import webapp2
from google.appengine.api import urlfetch
from Webx import WebxClass
import json
import logging
class SearchHandler(webapp2.RequestHandler):
def __init__(self,*args, **kwargs):
super(SearchHandler,self).__init__(*args, **kwargs)
self.result=[]
self.searchPortals = [WebxClass()]
self.asa = []
def handleCallBack(self,rpc,portalObject):
try:
rr = rpc.get_result()
if rr.status_code == 200:
if isinstance(portalObject, WebxClass):
resultList=portalObject.getResultList(rr.content)
self.result.extend(resultList)
except urlfetch.DownloadError:
self.result = 'Error while fetching from portal - ' + portalObject.getName()
def getSearchResult(self):
rpcs=[]
searchKeyword=self.request.get('searchString')
logging.error("------------------------------")
for portal in self.searchPortals:
rpc = urlfetch.create_rpc(deadline=5)
rpc.callback = lambda: self.handleCallBack(rpc, portal)
urlfetch.make_fetch_call(rpc, portal.getSearchURL(searchKeyword))
rpcs.append(rpc)
for rpc in rpcs:
rpc.wait()
self.response.status_int = 200
self.response.headers['Content-Type'] = 'application/json'
self.response.headers.add_header("Access-Control-Allow-Origin", "*")
self.response.write(json.dumps(self.result))
app = webapp2.WSGIApplication([
webapp2.Route(r'/search', methods=['GET'], handler='Torrent.SearchHandler:getSearchResult')
], debug=True)
def main():
logging.getLogger().setLevel(logging.DEBUG)
logging.debug("------------------------------")
app.run()
if __name__ == '__main__':
main()