My objective is to protect an aws s3 bucket link and I'm trying to solve this by using cloudfront as the link via which the s3 buckets are accessible, hence when a user tries to access the cloudfront link, there is a basic auth if there's no cookie in their browser, but if there's a cookie, then auth values in this cookie is checked and user is granted access.
PS: This is not a website, my quest is to protect s3 bucket links.
Here is my attempt, using lambda#edge, on viewer request, there's the auth page if user is not logged in, otherwise, they're allowed access, it works but I can't set cookies, because somewhere in aws documentation, cloudfront deletes set-cookies in header files: CloudFront removes the Cookie header from requests that it forwards to your origin and removes the Set-Cookie header from responses that it returns to your viewers
Here is my code:
'use strict';
// returns a response error
const responseError = {
status: '401',
statusDescription: 'Unauthorized',
headers: {
'www-authenticate': [{key: 'WWW-Authenticate', value:'Basic'}]
}
};
exports.handler = (event, context, callback) => {
// Get request and request headers
console.log(event.Records[0]);
const request = event.Records[0].cf.request;
const response = event.Records[0].cf.response;
const headers = request.headers;
// checks to see if headers exists with cookies
let hasTheHeader = (request, headerKey) => {
if (request.headers[headerKey]) {
return true;
}
else return false;
};
// Add set-cookie header to origin response
const setCookie = function(response, cookie) {
const cookieValue = `${cookie}`;
console.log(`Setting cookie ${cookieValue}`);
response.headers['set-cookie'] = [{ key: "Set-Cookie", value: cookieValue }];
}
// Configure authentication
const authUser = 'someuser';
const authPass = 'testpassword';
let authToken;
let authString;
// Construct the Auth string
const buff = new Buffer(authUser + ':' + authPass).toString('base64');
authString = 'Basic ' + buff;
const authCookie = 'testAuthToken';
//execute this on viewer request that is if request type is viewer request:
if(event.Records[0].cf.config.eventType == 'viewer-request'){
//check if cookies exists and assign authToken if it does not
if(hasTheHeader(request, 'cookie') ){
for (let i = 0; i < headers.cookie.length; i++)
{
if (headers.cookie[i].value.indexOf(authString) >= 0)
{
authToken = authString;
console.log(authToken);
break;
}
}
}
if (!authToken)
{
if (headers && headers.authorization && headers.authorization[0].value === authString)
{
// Set-Cookie: testAuthToken= new Buffer(authUser + ':' + authPass).toString('base64')
authToken = authString;
request.header.cookie = [];
//put cookie value to custom header - format is important
request.headers.cookie.push({'key': 'Cookie', 'value': authString});
}
else
{
callback(null, responseError);
}
// continue forwarding request
callback(null, request);
}
else{
//strip out "Basic " to extract Basic credential in base 64
var authInfo = authToken.slice(6);
var userCredentials = new Buffer(authInfo, 'base64');
var userLoginNamePass = userCredentials.toString();
var baseCredentials = userLoginNamePass.split(":");
var username = baseCredentials[0];
var userPass = baseCredentials[1];
if (username != authUser && userPass != authPass) {
//user auth failed
callback(null, responseError);
} else {
request.header.cookie = [];
//put cookie value to custom header - format is important
request.headers.cookie.push({'key': 'Cookie', 'value': authString});
}
// continue forwarding request
callback(null, request);
}
}
else if(event.Records[0].cf.config.eventType == 'origin-response')
{
if(hasTheHeader(request, 'cookie')){
for (let i = 0; i < headers.cookie.length; i++)
{
if (headers.cookie[i].value.indexOf(authString) >= 0)
{
setCookie(response, authString);
break;
}
}
}
// console.log(res_headers);
console.log("response: " + JSON.stringify(response));
callback(null, response);
}
};
Your suggestions will be most welcome. Thanks in advance.
Related
I've spent a few hours to solve redirection from www to non-www version of static site hosted on AWS.
You need to use two lamda#edge with cloudfront.
First lambda#edge - CloudFront Event is Viewer request
exports.handler = (event, context, callback) => {
const request = event.Records[0].cf.request
request.headers['x-forwarded-host'] = [
{ key: 'X-Forwarded-Host', value: request.headers.host[0].value }
]
return callback(null, request)
}
Second lambda#edge - CloudFront Event is Origin request
exports.handler = function handler(event, context, callback) {
const { request } = event.Records[0].cf;
const { uri } = request;
const { suffix, appendToDirs, removeTrailingSlash } = config;
const requestHost = request.headers['x-forwarded-host'][0].value;
if (requestHost.startsWith("www.")) {
const domain_without_www = requestHost.replace("www.","");
var location = "https://" + domain_without_www + request.uri;
var responseWithRedirect = {
status: '301',
statusDescription: `Redirecting to apex domain`,
headers: {
location: [{
key: 'Location',
value: location
}]
}
};
callback(null, responseWithRedirect);
return;
}
// or return request unchanged
callback(null, request);
}
The last step is add "x-forwarded-host" header to whitelist headers in you distribution - behavior section.
I've found this solution in AWS forum, but it was difficult to find it. I believe this will help you.
I've my website's source code stored in AWS S3 and I'm using AWS Cloudfront to deliver my content.
I want to use AWS Lamda#Edge to remove .html extension from all the web links that's served through Cloudfront.
My required output should be www.example.com/foo instead of www.example.com/foo.html or example.com/foo1 instead of example.com/foo1.html.
Please help me to implement this as I can't find clear solution to use. I've referred the point 3 mentioned on this article: https://forums.aws.amazon.com/thread.jspa?messageID=796961&tstart=0. But it's not clear what I need to do.
PFB the lambda code, how can I modify it-
const config = {
suffix: '.html',
appendToDirs: 'index.html',
removeTrailingSlash: false,
};
const regexSuffixless = /\/[^/.]+$/; // e.g. "/some/page" but not "/", "/some/" or "/some.jpg"
const regexTrailingSlash = /.+\/$/; // e.g. "/some/" or "/some/page/" but not root "/"
exports.handler = function handler(event, context, callback) {
const { request } = event.Records[0].cf;
const { uri } = request;
const { suffix, appendToDirs, removeTrailingSlash } = config;
// Append ".html" to origin request
if (suffix && uri.match(regexSuffixless)) {
request.uri = uri + suffix;
callback(null, request);
return;
}
// Append "index.html" to origin request
if (appendToDirs && uri.match(regexTrailingSlash)) {
request.uri = uri + appendToDirs;
callback(null, request);
return;
}
// Redirect (301) non-root requests ending in "/" to URI without trailing slash
if (removeTrailingSlash && uri.match(/.+\/$/)) {
const response = {
// body: '',
// bodyEncoding: 'text',
headers: {
'location': [{
key: 'Location',
value: uri.slice(0, -1)
}]
},
status: '301',
statusDescription: 'Moved Permanently'
};
callback(null, response);
return;
}
// If nothing matches, return request unchanged
callback(null, request);
};
Please help me to remove .html extension from my website and what updated code do I need to paste in my AWS Lambda
Thanks in advance!!
Is there a way to batch CRUD contacts with the new google people api (i see getBatchGet exists for READS)? My app is gonna hit ratelimits left and right if we upgrade from the old gdata contacts api.
Follow the Google People APIs to learn how to populate your objects, the most important part is the way of using Google Batch API in a do-while loop:
const { google } = require('googleapis')
function extractJSON (str) {
const result = []
let firstOpen = 0
let firstClose = 0
let candidate = 0
firstOpen = str.indexOf('{', firstOpen + 1)
do {
firstClose = str.lastIndexOf('}')
if ( firstClose <= firstOpen ) {
return []
}
do {
candidate = str.substring(firstOpen, firstClose + 1)
try {
result.push(JSON.parse(candidate))
firstOpen = firstClose
} catch (e) {
firstClose = str.substr(0, firstClose).lastIndexOf('}')
}
} while ( firstClose > firstOpen )
firstOpen = str.indexOf('{', firstOpen + 1)
} while ( firstOpen !== -1 )
return result
}
async function batchDeleteContacts (resourceIds) {
/*
resourceIds = [
'c1504716451892127784',
'c1504716451892127785',
'c1504716451892127786,
....'
]
Setup your google-api client and extract the oauth header/
*/
const authHeader = await google.oAuth2Client.getRequestHeaders()
let counter = 0
let confirmed = []
try {
do {
const temp = resourceIds.splice(0, 25)
const multipart = temp.map((resourceId, index) => ({
'Content-Type': 'application/http',
'Content-ID': (counter * 25) + index,
'body': `DELETE /v1/people/${resourceId}:deleteContact HTTP/1.1\n`
}))
const responseString = await request.post({
url: 'https://people.googleapis.com/batch',
method: 'POST',
multipart: multipart,
headers: {
'Authorization': authHeader.Authorization,
'content-type': 'multipart/mixed'
}
})
const result = extractJSON(responseString)
confirmed = confirmed.concat(result)
counter ++
} while ( resourceIds.length > 0 )
} catch (ex) {
// Handling exception here
}
return confirmed
}
async function batchInsertContacts (contacts) {
/*
Follow the Google People APIs documentation to learn how to generate contact objects,
Its easy and depends on your needs.
contacts = [{
resource: {
clientData,
names,
nicknames,
birthdays,
urls,
addresses,
emailAddresses,
phoneNumbers,
biographies,
organizations
}
}]
Setup your google-api client and extract the oauth header/
*/
const authHeader = await google.oAuth2Client.getRequestHeaders()
let counter = 0
let confirmed = []
const authHeader = await this.oAuth2Client.getRequestHeaders()
try {
do {
const temp = contacts.splice(0, 25)
const multipart = temp.map((contact, index) => ({
'Content-Type': 'application/http',
'Content-ID': (counter * 25) + index,
'body': 'POST /v1/people:createContact HTTP/1.1\n'
+ 'Content-Type: application/json\n\n'
+ JSON.stringify(contact.resource)
}))
const responseString = await request.post({
url: 'https://people.googleapis.com/batch',
method: 'POST',
multipart: multipart,
headers: {
'Authorization': authHeader.Authorization,
'content-type': 'multipart/mixed'
}
})
const result = extractJSON(responseString)
confirmed = confirmed.concat(result)
counter ++
} while ( contacts.length > 0 )
} catch (ex) {
// Handling exception here
}
return confirmed
}
async function batchUpdateContacts (contacts) {
/*
Follow the Google People APIs documentation to learn how to generate contact objects,
Its easy and depends on your needs.
contacts = [{
resourceId: 'c1504616451882127785'
resource: {
clientData,
names,
nicknames,
birthdays,
urls,
addresses,
emailAddresses,
phoneNumbers,
biographies,
organizations
}
}]
Setup your google-api client and extract the oauth header/
*/
const authHeader = await google.oAuth2Client.getRequestHeaders()
const updatePersonFields = 'names,nicknames,birthdays,urls,addresses,emailAddresses,phoneNumbers,biographies,organizations'
let counter = 0
let confirmed = []
const authHeader = await this.oAuth2Client.getRequestHeaders()
try {
do {
const temp = contacts.splice(0, 25)
const multipart = temp.map((contact, index) => ({
'Content-Type': 'application/http',
'Content-ID': (counter * 25) + index,
'body': `PATCH /v1/people/${contact.resourceId}:updateContact?updatePersonFields=${updatePersonFields} HTTP/1.1\n`
+ 'Content-Type: application/json\n\n'
+ JSON.stringify(contact.resource)
}))
const responseString = await request.post({
url: 'https://people.googleapis.com/batch',
method: 'POST',
multipart: multipart,
headers: {
'Authorization': authHeader.Authorization,
'content-type': 'multipart/mixed'
}
})
const result = extractJSON(responseString)
confirmed = confirmed.concat(result)
counter ++
} while ( contacts.length > 0 )
} catch (ex) {
// Handling exception here
}
return confirmed
}
Short answer: no.
However, you may be able to prevent rate limiting with the quotaUser query param on your requests.
Lets you enforce per-user quotas from a server-side application even
in cases when the user's IP address is unknown. This can occur, for
example, with applications that run cron jobs on App Engine on a
user's behalf.
You can choose any arbitrary string that uniquely
identifies a user, but it is limited to 40 characters.
I have been using the People API on a new development. Seems pretty limited, e.g. contact search isn't even available yet. I'd keep the Contacts API/GData around for features that aren't available in the newer API yet.
I have configured an ASOS OpenIdConnect Server using and an asp.net core mvc app that uses the "Microsoft.AspNetCore.Authentication.OpenIdConnect": "1.0.0 and "Microsoft.AspNetCore.Authentication.Cookies": "1.0.0". I have tested the "Authorization Code" workflow and everything works.
The client web app processes the authentication as expected and creates a cookie storing the id_token, access_token, and refresh_token.
How do I force Microsoft.AspNetCore.Authentication.OpenIdConnect to request a new access_token when it expires?
The asp.net core mvc app ignores the expired access_token.
I would like to have openidconnect see the expired access_token then make a call using the refresh token to get a new access_token. It should also update the cookie values. If the refresh token request fails I would expect openidconnect to "sign out" the cookie (remove it or something).
app.UseCookieAuthentication(new CookieAuthenticationOptions
{
AutomaticAuthenticate = true,
AutomaticChallenge = true,
AuthenticationScheme = "Cookies"
});
app.UseOpenIdConnectAuthentication(new OpenIdConnectOptions
{
ClientId = "myClient",
ClientSecret = "secret_secret_secret",
PostLogoutRedirectUri = "http://localhost:27933/",
RequireHttpsMetadata = false,
GetClaimsFromUserInfoEndpoint = true,
SaveTokens = true,
ResponseType = OpenIdConnectResponseType.Code,
AuthenticationMethod = OpenIdConnectRedirectBehavior.RedirectGet,
Authority = http://localhost:27933,
MetadataAddress = "http://localhost:27933/connect/config",
Scope = { "email", "roles", "offline_access" },
});
It seems there is no programming in the openidconnect authentication for asp.net core to manage the access_token on the server after received.
I found that I can intercept the cookie validation event and check if the access token has expired. If so, make a manual HTTP call to the token endpoint with the grant_type=refresh_token.
By calling context.ShouldRenew = true; this will cause the cookie to be updated and sent back to the client in the response.
I have provided the basis of what I have done and will work to update this answer once all work as been resolved.
app.UseCookieAuthentication(new CookieAuthenticationOptions
{
AutomaticAuthenticate = true,
AutomaticChallenge = true,
AuthenticationScheme = "Cookies",
ExpireTimeSpan = new TimeSpan(0, 0, 20),
SlidingExpiration = false,
CookieName = "WebAuth",
Events = new CookieAuthenticationEvents()
{
OnValidatePrincipal = context =>
{
if (context.Properties.Items.ContainsKey(".Token.expires_at"))
{
var expire = DateTime.Parse(context.Properties.Items[".Token.expires_at"]);
if (expire > DateTime.Now) //TODO:change to check expires in next 5 mintues.
{
logger.Warn($"Access token has expired, user: {context.HttpContext.User.Identity.Name}");
//TODO: send refresh token to ASOS. Update tokens in context.Properties.Items
//context.Properties.Items["Token.access_token"] = newToken;
context.ShouldRenew = true;
}
}
return Task.FromResult(0);
}
}
});
You must enable the generation of refresh_token by setting in startup.cs:
Setting values to AuthorizationEndpointPath = "/connect/authorize"; // needed for refreshtoken
Setting values to TokenEndpointPath = "/connect/token"; // standard token endpoint name
In your token provider, before validating the token request at the end of the HandleTokenrequest method, make sure you have set the offline scope:
// Call SetScopes with the list of scopes you want to grant
// (specify offline_access to issue a refresh token).
ticket.SetScopes(
OpenIdConnectConstants.Scopes.Profile,
OpenIdConnectConstants.Scopes.OfflineAccess);
If that is setup properly, you should receive a refresh_token back when you login with a password grant_type.
Then from your client you must issue the following request (I'm using Aurelia):
refreshToken() {
let baseUrl = yourbaseUrl;
let data = "client_id=" + this.appState.clientId
+ "&grant_type=refresh_token"
+ "&refresh_token=myRefreshToken";
return this.http.fetch(baseUrl + 'connect/token', {
method: 'post',
body : data,
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'
}
});
}
and that's it, make sure that your auth provider in HandleRequestToken is not trying to manipulate the request that is of type refresh_token:
public override async Task HandleTokenRequest(HandleTokenRequestContext context)
{
if (context.Request.IsPasswordGrantType())
{
// Password type request processing only
// code that shall not touch any refresh_token request
}
else if(!context.Request.IsRefreshTokenGrantType())
{
context.Reject(
error: OpenIdConnectConstants.Errors.InvalidGrant,
description: "Invalid grant type.");
return;
}
return;
}
The refresh_token shall just be able to pass through this method and is handled by another piece of middleware that handles refresh_token.
If you want more in depth knowledge about what the auth server is doing, you can have a look at the code of the OpenIdConnectServerHandler:
https://github.com/aspnet-contrib/AspNet.Security.OpenIdConnect.Server/blob/master/src/AspNet.Security.OpenIdConnect.Server/OpenIdConnectServerHandler.Exchange.cs
On the client side you must also be able to handle the auto refresh of the token, here is an example of an http interceptor for Angular 1.X, where one handles 401 reponses, refresh the token, then retry the request:
'use strict';
app.factory('authInterceptorService',
['$q', '$injector', '$location', 'localStorageService',
function ($q, $injector, $location, localStorageService) {
var authInterceptorServiceFactory = {};
var $http;
var _request = function (config) {
config.headers = config.headers || {};
var authData = localStorageService.get('authorizationData');
if (authData) {
config.headers.Authorization = 'Bearer ' + authData.token;
}
return config;
};
var _responseError = function (rejection) {
var deferred = $q.defer();
if (rejection.status === 401) {
var authService = $injector.get('authService');
console.log("calling authService.refreshToken()");
authService.refreshToken().then(function (response) {
console.log("token refreshed, retrying to connect");
_retryHttpRequest(rejection.config, deferred);
}, function () {
console.log("that didn't work, logging out.");
authService.logOut();
$location.path('/login');
deferred.reject(rejection);
});
} else {
deferred.reject(rejection);
}
return deferred.promise;
};
var _retryHttpRequest = function (config, deferred) {
console.log('autorefresh');
$http = $http || $injector.get('$http');
$http(config).then(function (response) {
deferred.resolve(response);
},
function (response) {
deferred.reject(response);
});
}
authInterceptorServiceFactory.request = _request;
authInterceptorServiceFactory.responseError = _responseError;
authInterceptorServiceFactory.retryHttpRequest = _retryHttpRequest;
return authInterceptorServiceFactory;
}]);
And here is an example I just did for Aurelia, this time I wrapped my http client into an http handler that checks if the token is expired or not. If it is expired it will first refresh the token, then perform the request. It uses a promise to keep the interface with the client-side data services consistent. This handler exposes the same interface as the aurelia-fetch client.
import {inject} from 'aurelia-framework';
import {HttpClient} from 'aurelia-fetch-client';
import {AuthService} from './authService';
#inject(HttpClient, AuthService)
export class HttpHandler {
constructor(httpClient, authService) {
this.http = httpClient;
this.authService = authService;
}
fetch(url, options){
let _this = this;
if(this.authService.tokenExpired()){
console.log("token expired");
return new Promise(
function(resolve, reject) {
console.log("refreshing");
_this.authService.refreshToken()
.then(
function (response) {
console.log("token refreshed");
_this.http.fetch(url, options).then(
function (success) {
console.log("call success", url);
resolve(success);
},
function (error) {
console.log("call failed", url);
reject(error);
});
}, function (error) {
console.log("token refresh failed");
reject(error);
});
}
);
}
else {
// token is not expired, we return the promise from the fetch client
return this.http.fetch(url, options);
}
}
}
For jquery you can look a jquery oAuth:
https://github.com/esbenp/jquery-oauth
Hope this helps.
Following on from #longday's answer, I have had success in using this code to force a client refresh without having to manually query an open id endpoint:
OnValidatePrincipal = context =>
{
if (context.Properties.Items.ContainsKey(".Token.expires_at"))
{
var expire = DateTime.Parse(context.Properties.Items[".Token.expires_at"]);
if (expire > DateTime.Now) //TODO:change to check expires in next 5 mintues.
{
context.ShouldRenew = true;
context.RejectPrincipal();
}
}
return Task.FromResult(0);
}
The Issue
I'm trying to upload images directly to S3 from the browser and am getting stuck applying the content-length-range permission via boto's S3Connection.generate_url method.
There's plenty of information about signing POST forms, setting policies in general and even a heroku method for doing a similar submission. What I can't figure out for the life of me is how to add the "content-length-range" to the signed url.
With boto's generate_url method (example below), I can specify policy headers and have got it working for normal uploads. What I can't seem to add is a policy restriction on max file size.
Server Signing Code
## django request handler
from boto.s3.connection import S3Connection
from django.conf import settings
from django.http import HttpResponse
import mimetypes
import json
conn = S3Connection(settings.S3_ACCESS_KEY, settings.S3_SECRET_KEY)
object_name = request.GET['objectName']
content_type = mimetypes.guess_type(object_name)[0]
signed_url = conn.generate_url(
expires_in = 300,
method = "PUT",
bucket = settings.BUCKET_NAME,
key = object_name,
headers = {'Content-Type': content_type, 'x-amz-acl':'public-read'})
return HttpResponse(json.dumps({'signedUrl': signed_url}))
On the client, I'm using the ReactS3Uploader which is based on tadruj's s3upload.js script. It shouldn't be affecting anything as it seems to just pass along whatever the signedUrls covers, but copied below for simplicity.
ReactS3Uploader JS Code (simplified)
uploadFile: function() {
new S3Upload({
fileElement: this.getDOMNode(),
signingUrl: /api/get_signing_url/,
onProgress: this.props.onProgress,
onFinishS3Put: this.props.onFinish,
onError: this.props.onError
});
},
render: function() {
return this.transferPropsTo(
React.DOM.input({type: 'file', onChange: this.uploadFile})
);
}
S3upload.js
S3Upload.prototype.signingUrl = '/sign-s3';
S3Upload.prototype.fileElement = null;
S3Upload.prototype.onFinishS3Put = function(signResult) {
return console.log('base.onFinishS3Put()', signResult.publicUrl);
};
S3Upload.prototype.onProgress = function(percent, status) {
return console.log('base.onProgress()', percent, status);
};
S3Upload.prototype.onError = function(status) {
return console.log('base.onError()', status);
};
function S3Upload(options) {
if (options == null) {
options = {};
}
for (option in options) {
if (options.hasOwnProperty(option)) {
this[option] = options[option];
}
}
this.handleFileSelect(this.fileElement);
}
S3Upload.prototype.handleFileSelect = function(fileElement) {
this.onProgress(0, 'Upload started.');
var files = fileElement.files;
var result = [];
for (var i=0; i < files.length; i++) {
var f = files[i];
result.push(this.uploadFile(f));
}
return result;
};
S3Upload.prototype.createCORSRequest = function(method, url) {
var xhr = new XMLHttpRequest();
if (xhr.withCredentials != null) {
xhr.open(method, url, true);
}
else if (typeof XDomainRequest !== "undefined") {
xhr = new XDomainRequest();
xhr.open(method, url);
}
else {
xhr = null;
}
return xhr;
};
S3Upload.prototype.executeOnSignedUrl = function(file, callback) {
var xhr = new XMLHttpRequest();
xhr.open('GET', this.signingUrl + '&objectName=' + file.name, true);
xhr.overrideMimeType && xhr.overrideMimeType('text/plain; charset=x-user-defined');
xhr.onreadystatechange = function() {
if (xhr.readyState === 4 && xhr.status === 200) {
var result;
try {
result = JSON.parse(xhr.responseText);
} catch (error) {
this.onError('Invalid signing server response JSON: ' + xhr.responseText);
return false;
}
return callback(result);
} else if (xhr.readyState === 4 && xhr.status !== 200) {
return this.onError('Could not contact request signing server. Status = ' + xhr.status);
}
}.bind(this);
return xhr.send();
};
S3Upload.prototype.uploadToS3 = function(file, signResult) {
var xhr = this.createCORSRequest('PUT', signResult.signedUrl);
if (!xhr) {
this.onError('CORS not supported');
} else {
xhr.onload = function() {
if (xhr.status === 200) {
this.onProgress(100, 'Upload completed.');
return this.onFinishS3Put(signResult);
} else {
return this.onError('Upload error: ' + xhr.status);
}
}.bind(this);
xhr.onerror = function() {
return this.onError('XHR error.');
}.bind(this);
xhr.upload.onprogress = function(e) {
var percentLoaded;
if (e.lengthComputable) {
percentLoaded = Math.round((e.loaded / e.total) * 100);
return this.onProgress(percentLoaded, percentLoaded === 100 ? 'Finalizing.' : 'Uploading.');
}
}.bind(this);
}
xhr.setRequestHeader('Content-Type', file.type);
xhr.setRequestHeader('x-amz-acl', 'public-read');
return xhr.send(file);
};
S3Upload.prototype.uploadFile = function(file) {
return this.executeOnSignedUrl(file, function(signResult) {
return this.uploadToS3(file, signResult);
}.bind(this));
};
module.exports = S3Upload;
Any help would be greatly appreciated here as I've been banging my head against the wall for quite a few hours now.
You can't add it to a signed PUT URL. This only works with the signed policy that goes along with a POST because the two mechanisms are very different.
Signing a URL is a lossy (for lack of a better term) process. You generate the string to sign, then sign it. You send the signature with the request, but you discard and do not send the string to sign. S3 then reconstructs what the string to sign should have been, for the request it receives, and generates the signature you should have sent with that request. There's only one correct answer, and S3 doesn't know what string you actually signed. The signature matches, or doesn't, either because you built the string to sign incorrectly, or your credentials don't match, and it doesn't know which of these possibilities is the case. It only knows, based on the request you sent, the string you should have signed and what the signature should have been.
With that in mind, for content-length-range to work with a signed URL, the client would need to actually send such a header with the request... which doesn't make a lot of sense.
Conversely, with POST uploads, there is more information communicated to S3. It's not only going on whether your signature is valid, it also has your policy document... so it's possible to include directives -- policies -- with the request. They are protected from alteration by the signature, but they aren't encrypted or hashed -- the entire policy is readable by S3 (so, by contrast, we'll call this the opposite, "lossless.")
This difference is why you can't do what you are trying to do with PUT while you can with POST.