batch update contacts - google people api - google-people-api

Is there a way to batch CRUD contacts with the new google people api (i see getBatchGet exists for READS)? My app is gonna hit ratelimits left and right if we upgrade from the old gdata contacts api.

Follow the Google People APIs to learn how to populate your objects, the most important part is the way of using Google Batch API in a do-while loop:
const { google } = require('googleapis')
function extractJSON (str) {
const result = []
let firstOpen = 0
let firstClose = 0
let candidate = 0
firstOpen = str.indexOf('{', firstOpen + 1)
do {
firstClose = str.lastIndexOf('}')
if ( firstClose <= firstOpen ) {
return []
}
do {
candidate = str.substring(firstOpen, firstClose + 1)
try {
result.push(JSON.parse(candidate))
firstOpen = firstClose
} catch (e) {
firstClose = str.substr(0, firstClose).lastIndexOf('}')
}
} while ( firstClose > firstOpen )
firstOpen = str.indexOf('{', firstOpen + 1)
} while ( firstOpen !== -1 )
return result
}
async function batchDeleteContacts (resourceIds) {
/*
resourceIds = [
'c1504716451892127784',
'c1504716451892127785',
'c1504716451892127786,
....'
]
Setup your google-api client and extract the oauth header/
*/
const authHeader = await google.oAuth2Client.getRequestHeaders()
let counter = 0
let confirmed = []
try {
do {
const temp = resourceIds.splice(0, 25)
const multipart = temp.map((resourceId, index) => ({
'Content-Type': 'application/http',
'Content-ID': (counter * 25) + index,
'body': `DELETE /v1/people/${resourceId}:deleteContact HTTP/1.1\n`
}))
const responseString = await request.post({
url: 'https://people.googleapis.com/batch',
method: 'POST',
multipart: multipart,
headers: {
'Authorization': authHeader.Authorization,
'content-type': 'multipart/mixed'
}
})
const result = extractJSON(responseString)
confirmed = confirmed.concat(result)
counter ++
} while ( resourceIds.length > 0 )
} catch (ex) {
// Handling exception here
}
return confirmed
}
async function batchInsertContacts (contacts) {
/*
Follow the Google People APIs documentation to learn how to generate contact objects,
Its easy and depends on your needs.
contacts = [{
resource: {
clientData,
names,
nicknames,
birthdays,
urls,
addresses,
emailAddresses,
phoneNumbers,
biographies,
organizations
}
}]
Setup your google-api client and extract the oauth header/
*/
const authHeader = await google.oAuth2Client.getRequestHeaders()
let counter = 0
let confirmed = []
const authHeader = await this.oAuth2Client.getRequestHeaders()
try {
do {
const temp = contacts.splice(0, 25)
const multipart = temp.map((contact, index) => ({
'Content-Type': 'application/http',
'Content-ID': (counter * 25) + index,
'body': 'POST /v1/people:createContact HTTP/1.1\n'
+ 'Content-Type: application/json\n\n'
+ JSON.stringify(contact.resource)
}))
const responseString = await request.post({
url: 'https://people.googleapis.com/batch',
method: 'POST',
multipart: multipart,
headers: {
'Authorization': authHeader.Authorization,
'content-type': 'multipart/mixed'
}
})
const result = extractJSON(responseString)
confirmed = confirmed.concat(result)
counter ++
} while ( contacts.length > 0 )
} catch (ex) {
// Handling exception here
}
return confirmed
}
async function batchUpdateContacts (contacts) {
/*
Follow the Google People APIs documentation to learn how to generate contact objects,
Its easy and depends on your needs.
contacts = [{
resourceId: 'c1504616451882127785'
resource: {
clientData,
names,
nicknames,
birthdays,
urls,
addresses,
emailAddresses,
phoneNumbers,
biographies,
organizations
}
}]
Setup your google-api client and extract the oauth header/
*/
const authHeader = await google.oAuth2Client.getRequestHeaders()
const updatePersonFields = 'names,nicknames,birthdays,urls,addresses,emailAddresses,phoneNumbers,biographies,organizations'
let counter = 0
let confirmed = []
const authHeader = await this.oAuth2Client.getRequestHeaders()
try {
do {
const temp = contacts.splice(0, 25)
const multipart = temp.map((contact, index) => ({
'Content-Type': 'application/http',
'Content-ID': (counter * 25) + index,
'body': `PATCH /v1/people/${contact.resourceId}:updateContact?updatePersonFields=${updatePersonFields} HTTP/1.1\n`
+ 'Content-Type: application/json\n\n'
+ JSON.stringify(contact.resource)
}))
const responseString = await request.post({
url: 'https://people.googleapis.com/batch',
method: 'POST',
multipart: multipart,
headers: {
'Authorization': authHeader.Authorization,
'content-type': 'multipart/mixed'
}
})
const result = extractJSON(responseString)
confirmed = confirmed.concat(result)
counter ++
} while ( contacts.length > 0 )
} catch (ex) {
// Handling exception here
}
return confirmed
}

Short answer: no.
However, you may be able to prevent rate limiting with the quotaUser query param on your requests.
Lets you enforce per-user quotas from a server-side application even
in cases when the user's IP address is unknown. This can occur, for
example, with applications that run cron jobs on App Engine on a
user's behalf.
You can choose any arbitrary string that uniquely
identifies a user, but it is limited to 40 characters.
I have been using the People API on a new development. Seems pretty limited, e.g. contact search isn't even available yet. I'd keep the Contacts API/GData around for features that aren't available in the newer API yet.

Related

Make a Google Cloud signed url public

I have been trying to search through the documentations and I think I found a solution but it isn't working and I continue to get an error. What I am doing for a signedUrl is
const [url] = await googleCloud
.bucket(bucketName)
.file(filename)
.makePublic()
.getSignedUrl(options);
return { url };
What I added from what I found in the docs was makePublic() but this throws and error as it is followed by getSignedUrl. I don't have any problem if I remove makePublic() and upload the file from the signed url but the file won't be public.
Any advice on how I should proceed?
UPDATE
The error I am seeing when trying to get a signed url with makePublic is [Unhandled promise rejection: Error: googleCloud.bucket(...).file(...).makePublic(...).getSignedUrl is not a function].
Additional information in regards to the options being send with getSignedUrl function are
const options = {
version: 'v4',
action: 'write',
expires: Date.now() + 15 * 60 * 1000, // 15 minutes
contentType: 'video/quicktime',
};
UPDATE (How frontend handles signed url)
const submitReview = async () => {
const url = await getSignedUrl({
variables: { filename: "google3.mov" }
}).then(async response => {
if (response.data && response.data.getSignedUrl) {
const url = response.data.getSignedUrl.url;
const pathUrl = url.split('?');
const videoPath = pathUrl[0];
const uploadedReponse = await uploadToGoogleCloud(url);
}
});
}
const uploadToGoogleCloud = async (url) => {
const videoFile = await fetch(video.uri);
const blob = await videoFile.blob();
const response = await fetch(url, {
method: 'PUT',
body: blob
}).then(res => console.log("thres is ", res)).catch(e => console.log(e));
}

How to point AWS-SDK DynamoDB to a serverless DynamoDB local

I am trying to write a script that will loop thru an array of items for a DynamoDB table and run a batch write command. My functionality is good, but I am having trouble with DynamoDB. Would be great if I could point my AWS.DynamoDB.DocumentClient() to my localhost running DynamoDB. Any tips?
Would also consider a way to just run the commands via the aws cli but I am not sure how to do that. I am running Node.js so it maybe possible?
Here is my code:
var AWS = require('aws-sdk');
AWS.config.update({ region: 'eu-central-1' });
var DynamoDB = new AWS.DynamoDB.DocumentClient()
DynamoDB.endpoint = 'http://localhost:8000';
const allItems = require('./resource.json');
const tableName = 'some-table-name';
console.log({ tableName, allItems });
var batches = [];
var currentBatch = [];
var count = 0;
for (let i = 0; i < allItems.length; i++) {
//push item to the current batch
count++;
currentBatch.push(allItems[i]);
if (count % 25 === 0) {
batches.push(currentBatch);
currentBatch = [];
}
}
//if there are still items left in the curr batch, add to the collection of batches
if (currentBatch.length > 0 && currentBatch.length !== 25) {
batches.push(currentBatch);
}
var completedRequests = 0;
var errors = false;
//request handler for DynamoDB
function requestHandler(err, data) {
console.log('In the request handler...');
return function (err, data) {
completedRequests++;
errors = errors ? true : err;
//log error
if (errors) {
console.error(JSON.stringify(err, null, 2));
console.error('Request caused a DB error.');
console.error('ERROR: ' + err);
console.error(JSON.stringify(err, null, 2));
} else {
var res = {
statusCode: 200,
headers: {
'Content-Type': 'application/json',
'Access-Control-Allow-Methods': 'GET,POST,OPTIONS',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': true,
},
body: JSON.stringify(data),
isBase64Encoded: false,
};
console.log(`Success: returned ${data}`);
return res;
}
if (completedRequests == batches.length) {
return errors;
}
};
}
//Make request
var params;
for (let j = 0; j < batches.length; j++) {
//items go in params.RequestedItems.id array
//format for the items is {PutRequest : {Item: ITEM_OBJECT}}
params = '{"RequestItems": {"' + tableName + '": []}}';
params = JSON.parse(params);
params.RequestItems[tableName] = batches[j];
console.log('before db.batchWriteItem: ', params);
//send to db
DynamoDB.batchWrite(params, requestHandler(params));
}
I figured it out and will leave this here for anyone that may need it.
var { DynamoDB } = require('aws-sdk');
var db = new DynamoDB.DocumentClient({
region: 'localhost',
endpoint: 'http://localhost:8000',
});

AWS Lambda#edge to set cookies to origin response

My objective is to protect an aws s3 bucket link and I'm trying to solve this by using cloudfront as the link via which the s3 buckets are accessible, hence when a user tries to access the cloudfront link, there is a basic auth if there's no cookie in their browser, but if there's a cookie, then auth values in this cookie is checked and user is granted access.
PS: This is not a website, my quest is to protect s3 bucket links.
Here is my attempt, using lambda#edge, on viewer request, there's the auth page if user is not logged in, otherwise, they're allowed access, it works but I can't set cookies, because somewhere in aws documentation, cloudfront deletes set-cookies in header files: CloudFront removes the Cookie header from requests that it forwards to your origin and removes the Set-Cookie header from responses that it returns to your viewers
Here is my code:
'use strict';
// returns a response error
const responseError = {
status: '401',
statusDescription: 'Unauthorized',
headers: {
'www-authenticate': [{key: 'WWW-Authenticate', value:'Basic'}]
}
};
exports.handler = (event, context, callback) => {
// Get request and request headers
console.log(event.Records[0]);
const request = event.Records[0].cf.request;
const response = event.Records[0].cf.response;
const headers = request.headers;
// checks to see if headers exists with cookies
let hasTheHeader = (request, headerKey) => {
if (request.headers[headerKey]) {
return true;
}
else return false;
};
// Add set-cookie header to origin response
const setCookie = function(response, cookie) {
const cookieValue = `${cookie}`;
console.log(`Setting cookie ${cookieValue}`);
response.headers['set-cookie'] = [{ key: "Set-Cookie", value: cookieValue }];
}
// Configure authentication
const authUser = 'someuser';
const authPass = 'testpassword';
let authToken;
let authString;
// Construct the Auth string
const buff = new Buffer(authUser + ':' + authPass).toString('base64');
authString = 'Basic ' + buff;
const authCookie = 'testAuthToken';
//execute this on viewer request that is if request type is viewer request:
if(event.Records[0].cf.config.eventType == 'viewer-request'){
//check if cookies exists and assign authToken if it does not
if(hasTheHeader(request, 'cookie') ){
for (let i = 0; i < headers.cookie.length; i++)
{
if (headers.cookie[i].value.indexOf(authString) >= 0)
{
authToken = authString;
console.log(authToken);
break;
}
}
}
if (!authToken)
{
if (headers && headers.authorization && headers.authorization[0].value === authString)
{
// Set-Cookie: testAuthToken= new Buffer(authUser + ':' + authPass).toString('base64')
authToken = authString;
request.header.cookie = [];
//put cookie value to custom header - format is important
request.headers.cookie.push({'key': 'Cookie', 'value': authString});
}
else
{
callback(null, responseError);
}
// continue forwarding request
callback(null, request);
}
else{
//strip out "Basic " to extract Basic credential in base 64
var authInfo = authToken.slice(6);
var userCredentials = new Buffer(authInfo, 'base64');
var userLoginNamePass = userCredentials.toString();
var baseCredentials = userLoginNamePass.split(":");
var username = baseCredentials[0];
var userPass = baseCredentials[1];
if (username != authUser && userPass != authPass) {
//user auth failed
callback(null, responseError);
} else {
request.header.cookie = [];
//put cookie value to custom header - format is important
request.headers.cookie.push({'key': 'Cookie', 'value': authString});
}
// continue forwarding request
callback(null, request);
}
}
else if(event.Records[0].cf.config.eventType == 'origin-response')
{
if(hasTheHeader(request, 'cookie')){
for (let i = 0; i < headers.cookie.length; i++)
{
if (headers.cookie[i].value.indexOf(authString) >= 0)
{
setCookie(response, authString);
break;
}
}
}
// console.log(res_headers);
console.log("response: " + JSON.stringify(response));
callback(null, response);
}
};
Your suggestions will be most welcome. Thanks in advance.

Google Cloud functions (with Pubsub) error

We are using GCP with Cloud functions (with Pubsub topic triggers), and though it works fine most of the time, we do see it often throwing the following authentication error:
Error: Request had invalid authentication credentials. Expected OAuth 2 access token, login cookie or other valid authentication credential. See https://developers.google.com/identity/sign-in/web/devconsole-project.
at (/user_code/node_modules/#google-cloud/pubsub/node_modules/grpc/src/node/src/client.js:569)
Has anyone seen this or know what causes it or how to fix?
Cloud function code:
require('dotenv').config()
const datastore = require('#google-cloud/datastore')()
const pubsub = require('#google-cloud/pubsub')()
const promiseRetry = require('promise-retry')
const elasticsearch = require('elasticsearch')
const SupplierSearchManager = require('shared-managers').SupplierSearchManager
const BaseManager = require('shared-managers').BaseManager
const Util = require('shared-managers').Util
const StatsManager = require('shared-managers').StatsManager
var unitTestMode = false
var searchManagerMock, pubsubMock
exports.setUnitTestMode = (searchManagerMockP, pubsubMockP) => {
unitTestMode = true
searchManagerMock = searchManagerMockP
pubsubMock = pubsubMockP
}
exports.updateElastic = event => {
let dataObject
try {
dataObject = JSON.parse(Buffer.from(event.data.data, 'base64').toString())
} catch (err) {
console.log(err)
console.log(event.data.data)
return Promise.reject(err)
}
const baseManager = new BaseManager(datastore, new Util('base-manager'), pubsub)
const statsManager = new StatsManager(baseManager, new Util('stats-manager'))
let supplierId = dataObject.supplierId
let retryCount = dataObject.retryCount
let refresh = dataObject.refresh === true
if (!retryCount) retryCount = 0
return new Promise((resolve, reject) => {
let elasticClient = new elasticsearch.Client({
host: process.env.ELASTIC_HOST,
httpAuth: process.env.ELASTIC_AUTH
})
const supplierSearchManager = unitTestMode ? searchManagerMock : new SupplierSearchManager(elasticClient, baseManager, statsManager)
supplierSearchManager.indexSupplier(process.env.ELASTIC_INDEX, supplierId, refresh).then(resolve, err => {
console.log(err)
if (retryCount < 2) {
const topic = unitTestMode ? pubsubMock : pubsub.topic(process.env.PUBSUB_PREFIX + process.env.PUBSUB_TOPIC_ELASTIC)
promiseRetry({retries: 4, maxTimeout: 8000}, (retry, number) => {
return topic.publish({ supplierId: supplierId, retryCount: retryCount + 1 }).catch(err => {
retry(err)
})
}).then(resolve, err => {
console.log(err)
reject(err)
})
} else {
resolve(true)
}
})
})
}

How to set content-length-range for s3 browser upload via boto

The Issue
I'm trying to upload images directly to S3 from the browser and am getting stuck applying the content-length-range permission via boto's S3Connection.generate_url method.
There's plenty of information about signing POST forms, setting policies in general and even a heroku method for doing a similar submission. What I can't figure out for the life of me is how to add the "content-length-range" to the signed url.
With boto's generate_url method (example below), I can specify policy headers and have got it working for normal uploads. What I can't seem to add is a policy restriction on max file size.
Server Signing Code
## django request handler
from boto.s3.connection import S3Connection
from django.conf import settings
from django.http import HttpResponse
import mimetypes
import json
conn = S3Connection(settings.S3_ACCESS_KEY, settings.S3_SECRET_KEY)
object_name = request.GET['objectName']
content_type = mimetypes.guess_type(object_name)[0]
signed_url = conn.generate_url(
expires_in = 300,
method = "PUT",
bucket = settings.BUCKET_NAME,
key = object_name,
headers = {'Content-Type': content_type, 'x-amz-acl':'public-read'})
return HttpResponse(json.dumps({'signedUrl': signed_url}))
On the client, I'm using the ReactS3Uploader which is based on tadruj's s3upload.js script. It shouldn't be affecting anything as it seems to just pass along whatever the signedUrls covers, but copied below for simplicity.
ReactS3Uploader JS Code (simplified)
uploadFile: function() {
new S3Upload({
fileElement: this.getDOMNode(),
signingUrl: /api/get_signing_url/,
onProgress: this.props.onProgress,
onFinishS3Put: this.props.onFinish,
onError: this.props.onError
});
},
render: function() {
return this.transferPropsTo(
React.DOM.input({type: 'file', onChange: this.uploadFile})
);
}
S3upload.js
S3Upload.prototype.signingUrl = '/sign-s3';
S3Upload.prototype.fileElement = null;
S3Upload.prototype.onFinishS3Put = function(signResult) {
return console.log('base.onFinishS3Put()', signResult.publicUrl);
};
S3Upload.prototype.onProgress = function(percent, status) {
return console.log('base.onProgress()', percent, status);
};
S3Upload.prototype.onError = function(status) {
return console.log('base.onError()', status);
};
function S3Upload(options) {
if (options == null) {
options = {};
}
for (option in options) {
if (options.hasOwnProperty(option)) {
this[option] = options[option];
}
}
this.handleFileSelect(this.fileElement);
}
S3Upload.prototype.handleFileSelect = function(fileElement) {
this.onProgress(0, 'Upload started.');
var files = fileElement.files;
var result = [];
for (var i=0; i < files.length; i++) {
var f = files[i];
result.push(this.uploadFile(f));
}
return result;
};
S3Upload.prototype.createCORSRequest = function(method, url) {
var xhr = new XMLHttpRequest();
if (xhr.withCredentials != null) {
xhr.open(method, url, true);
}
else if (typeof XDomainRequest !== "undefined") {
xhr = new XDomainRequest();
xhr.open(method, url);
}
else {
xhr = null;
}
return xhr;
};
S3Upload.prototype.executeOnSignedUrl = function(file, callback) {
var xhr = new XMLHttpRequest();
xhr.open('GET', this.signingUrl + '&objectName=' + file.name, true);
xhr.overrideMimeType && xhr.overrideMimeType('text/plain; charset=x-user-defined');
xhr.onreadystatechange = function() {
if (xhr.readyState === 4 && xhr.status === 200) {
var result;
try {
result = JSON.parse(xhr.responseText);
} catch (error) {
this.onError('Invalid signing server response JSON: ' + xhr.responseText);
return false;
}
return callback(result);
} else if (xhr.readyState === 4 && xhr.status !== 200) {
return this.onError('Could not contact request signing server. Status = ' + xhr.status);
}
}.bind(this);
return xhr.send();
};
S3Upload.prototype.uploadToS3 = function(file, signResult) {
var xhr = this.createCORSRequest('PUT', signResult.signedUrl);
if (!xhr) {
this.onError('CORS not supported');
} else {
xhr.onload = function() {
if (xhr.status === 200) {
this.onProgress(100, 'Upload completed.');
return this.onFinishS3Put(signResult);
} else {
return this.onError('Upload error: ' + xhr.status);
}
}.bind(this);
xhr.onerror = function() {
return this.onError('XHR error.');
}.bind(this);
xhr.upload.onprogress = function(e) {
var percentLoaded;
if (e.lengthComputable) {
percentLoaded = Math.round((e.loaded / e.total) * 100);
return this.onProgress(percentLoaded, percentLoaded === 100 ? 'Finalizing.' : 'Uploading.');
}
}.bind(this);
}
xhr.setRequestHeader('Content-Type', file.type);
xhr.setRequestHeader('x-amz-acl', 'public-read');
return xhr.send(file);
};
S3Upload.prototype.uploadFile = function(file) {
return this.executeOnSignedUrl(file, function(signResult) {
return this.uploadToS3(file, signResult);
}.bind(this));
};
module.exports = S3Upload;
Any help would be greatly appreciated here as I've been banging my head against the wall for quite a few hours now.
You can't add it to a signed PUT URL. This only works with the signed policy that goes along with a POST because the two mechanisms are very different.
Signing a URL is a lossy (for lack of a better term) process. You generate the string to sign, then sign it. You send the signature with the request, but you discard and do not send the string to sign. S3 then reconstructs what the string to sign should have been, for the request it receives, and generates the signature you should have sent with that request. There's only one correct answer, and S3 doesn't know what string you actually signed. The signature matches, or doesn't, either because you built the string to sign incorrectly, or your credentials don't match, and it doesn't know which of these possibilities is the case. It only knows, based on the request you sent, the string you should have signed and what the signature should have been.
With that in mind, for content-length-range to work with a signed URL, the client would need to actually send such a header with the request... which doesn't make a lot of sense.
Conversely, with POST uploads, there is more information communicated to S3. It's not only going on whether your signature is valid, it also has your policy document... so it's possible to include directives -- policies -- with the request. They are protected from alteration by the signature, but they aren't encrypted or hashed -- the entire policy is readable by S3 (so, by contrast, we'll call this the opposite, "lossless.")
This difference is why you can't do what you are trying to do with PUT while you can with POST.