0
I cannot get canary to hit api gateway endpoint. I keep getting an error below. I can hit the api using postman without authentication key so not sure why I can't do the same using synthetic canary. The API is within the same vpc as the canary also if this helps. Please can anyone help me with this? (Arn changed for privacy so please ignore this fact.)
{"message":"User: anonymous is not authorized to perform: execute-api:Invoke on resource: arn:aws:execute-api:us-east-1:********2234:54354534534/test/GET/mezzanine with an explicit deny"}
The code I have for the canary is:
const synthetics = require('Synthetics'); const log = require('SyntheticsLogger'); const syntheticsConfiguration = synthetics.getConfiguration();
const apiCanaryBlueprint = async function () {
syntheticsConfiguration.setConfig({
restrictedHeaders: [], // Value of these headers will be redacted from logs and reports
restrictedUrlParameters: [] // Values of these url parameters will be redacted from logs and reports
});
// Handle validation for positive scenario
const validateSuccessful = async function(res) {
return new Promise((resolve, reject) => {
if (res.statusCode < 200 || res.statusCode > 299) {
throw new Error(res.statusCode + ' ' + res.statusMessage);
}
let responseBody = '';
res.on('data', (d) => {
responseBody += d;
});
res.on('end', () => {
// Add validation on 'responseBody' here if required.
resolve();
});
});
};
// Set request option for Verify https://453453453.execute-api.us-east-1.amazonaws.com
let requestOptionsStep1 = {
hostname: '4534535.execute-api.us-east-1.amazonaws.com',
method: 'GET',
path: '/test/mezzanine',
port: '443',
protocol: 'https:',
body: "",
headers: {"health":"true"}
};
requestOptionsStep1['headers']['User-Agent'] = [synthetics.getCanaryUserAgentString(), requestOptionsStep1['headers']['User-Agent']].join(' ');
// Set step config option for Verify https://5345345345435.execute-api.us-east-1.amazonaws.com
let stepConfig1 = { includeRequestHeaders: true, includeResponseHeaders: true, includeRequestBody: true, includeResponseBody: true, continueOnHttpStepFailure: true };
await synthetics.executeHttpStep('Verify https://45345334535.execute-api.us-east- 1.amazonaws.com', requestOptionsStep1, validateSuccessful, stepConfig1);
};
exports.handler = async () => { return await apiCanaryBlueprint(); };
For the API I have no Auth on and the resource Policy is:
{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": "", "Action": "execute-api:Invoke", "Resource": "arn:aws:execute-api:us-east-1:34324234234234:343243242///" }, { "Effect": "Deny", "Principal": "", "Action": "execute-api:Invoke", "Resource": "arn:aws:execute-api:us-east-1:3432434234:434324234///", "Condition": { "NotIpAddress": { "aws:SourceIp": [ "23.23.23.23/32", "23.23.23.23/32", "23.23.23.23/32" ] } } } ] }
I have also tried leaving the resource policy blank and also with the below code but still cannot get this canary to work.
{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": "", "Action": "execute-api:Invoke", "Resource": "arn:aws:execute-api:us-east-1:4534534543:4543534543534///" } ] }
Want to check endpoint
Related
I have a role called awsiotsdk that has full access to iot. I'm trying to get temporary credentials for Connect, Subscribe, and Receive, but my credentials are undefined. What could be the reason for this?
var AWS = require("aws-sdk");
var sts = new AWS.STS({ apiVersion: "2011-06-15" });
const iotpolicy = `{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualStudioCode",
"Effect": "Allow",
"Action": [
"iot:Connect",
"iot:Subscribe",
"iot:Receive"
],
"Resource": "*"
}
]
}`;
const role = {
RoleArn: "arn:aws:iam::258030452305:role/awsiotsdk",
Policy: iotpolicy,
RoleSessionName: "RoleSession1",
};
sts.assumeRole(role, (err, data) => {
console.log({
accessKeyId: data.Credentials.accessKeyId,
secretAcessKey: data.Credentials.secretAccessKey,
sessionToken: data.Credentials.sessionToken,
});
});
Output:
{
accessKeyId: undefined,
secretAcessKey: undefined,
sessionToken: undefined
}
I am facing this timeout error in aws editor(nodejs environment). I understand that there maybe be timeout set for 3 seconds, still this small code should run easily within 3 secs. Also, I have used other methods to put data in my dynamoDB like batchWrite and it works fine!
Response:
{
"errorMessage": "2021-02-08T15:58:57.631Z 66890dfe-6f3a-42ae-b689-510bb8027a9d Task timed out after 3.03 seconds"
}
My code:
const AWS = require('aws-sdk');
const docClient = new AWS.DynamoDB.DocumentClient(
{
region:'ap-south-1'
});
exports.handle = function(event , context ,callback) {
var params = {
TableName: 'guestbook',
Item:{
"date":Date.now(),
"message":"I love your website"
}
};
docClient.put(params, function(err,data){
if(err){
callback(err,null);
}
else{
callback(null,data);
}
});
};
I am trying to put data into my dynamoDB,.
My role policy is
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": "dynamodb:*",
"Resource": "*"
}
]
}
I want to allow Lambda service to create a deployment inside my VPC, thus I have subnet ids array of type Output<string>[] that I want to put into role policy as follows:
export const createNetworkInterfacePolicy = new aws.iam.RolePolicy(
"network-interface-policy-2",
{
policy: pulumi.interpolate `{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["ec2:CreateNetworkInterfacePermission"],
"Resource": [
"arn:aws:ec2:${region}:${callerIdentity.accountId}:network-interface/*"
],
"Condition": {
"StringEquals": {
"ec2:Subnet": ${JSON.stringify(vpc.vpcPrivateSubnetIds.map(item => item.apply(JSON.stringify)))},
"ec2:AuthorizedService": "lambda.amazonaws.com"
}
}
}
]
}`,
role: deploymentRole
}
);
Unfortunately what I end up with is:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:CreateNetworkInterfacePermission"
],
"Resource": [
"arn:aws:ec2:us-east-2:removedAccountId:network-interface/*"
],
"Condition": {
"StringEquals": {
"ec2:Subnet": [
"Calling [toJSON] on an [Output<T>] is not supported.\n\nTo get the value of an Output as a JSON value or JSON string consider either:\n 1: o.apply(v => v.toJSON())\n 2: o.apply(v => JSON.stringify(v))\n\nSee https://pulumi.io/help/outputs for more details.\nThis function may throw in a future version of #pulumi/pulumi.",
"Calling [toJSON] on an [Output<T>] is not supported.\n\nTo get the value of an Output as a JSON value or JSON string consider either:\n 1: o.apply(v => v.toJSON())\n 2: o.apply(v => JSON.stringify(v))\n\nSee https://pulumi.io/help/outputs for more details.\nThis function may throw in a future version of #pulumi/pulumi."
],
"ec2:AuthorizedService": "lambda.amazonaws.com"
}
}
}
]
}
I tried many combinations but none of them work. How do I generate JSON array from Output<string>[]?
Sometimes it's easiest to wrap an apply around the entire creation of another resource. In this case appTaskPolicy becomes an OutputInstance<aws.iam.Policy> which you can then feed into other parts of your program using it's own Outputs.
You'll need to import * as pulumi from '#pulumi/pulumi'; if you haven't already for this to work
const vpc = awsx.Network.getDefault();
const appTaskPolicyName = named('app-task-policy');
const appTaskPolicy = pulumi.all(vpc.publicSubnetIds).apply(([...subnetIds]) => {
return new aws.iam.Policy(appTaskPolicyName, {
policy: {
Version: '2012-10-17',
Statement: [
{
Action: ['sqs:GetQueueUrl', 'sqs:SendMessage'],
Resource: [
'someresourcearn'
],
Effect: 'Allow',
Condition: {
StringEquals: {
'ec2:Subnet': subnetIds,
'ec2:AuthorizedService': 'lambda.amazonaws.com'
}
}
}
]
}
});
});
i want to know how i can set an assume role policy document to something more complex than a service...
this is what i found till now and maybe this will work:
this.TestRole = new iam.Role(this, "Test", {
assumedBy: new iam.ServicePrincipal("ec2.amazonaws.com"),
roleName: "TestRole"
})
But i want to add something like this:
"AssumeRolePolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sts:AssumeRole"
],
"Principal": {
"AWS": [
"arn:aws:iam::account1:role/Role1",
"arn:aws:iam::account2:role/Role2"
]
}
},
{
"Effect": "Allow",
"Action": [
"sts:AssumeRoleWithSAML"
],
"Principal": {
"Federated": {
some sub and so on
}
},
"Condition": {
"StringEquals": {
"SAML:aud": some saml stuff
}
}
}
]
},
I have no clue how to achieve this... can you help me?
Ok, its possible to do something like this:
this.TestRole = new iam.Role(this, "Test", {
assumedBy: new iam.FederatedPrincipal(new cdk.FnSub("arn:aws:iam::${AWS::AccountId}:saml-provider/SAMLIDP"), {
"StringEquals": {
"SAML:aud": "https://signin.aws.amazon.com/saml"
}
}, "sts:AssumeRoleWithSAML"),
roleName: parent.getApplicationName().charAt(0).toUpperCase() + parent.getApplicationName().slice(1)
})
that was easy :-/ But now i want to add the two roles with action sts:AssumeRole - i don't know how to add another principal...
Fortunately, https://github.com/aws/aws-cdk/pull/1377 delivered the fix we need. You can now use aws_iam.CompositePrincipal to add multiple Principle including service Principles.
For example, in Python for a Data Pipeline Role:
pipeline_role = aws_iam.Role(
scope=self, id='pipeline-role',
role_name='pipeline',
assumed_by=aws_iam.CompositePrincipal(
aws_iam.ServicePrincipal('datapipeline.amazonaws.com'),
aws_iam.ServicePrincipal('elasticmapreduce.amazonaws.com')
)
)
The documentation for iam.RoleProps#assumedBy mentions that you can access the assume policy using the iam.Role#assumeRolePolicy attribute. You could try something like the following:
this.TestRole = new iam.Role(this, 'Test', {
assumedBy: new iam.FederatedPrincipal(/*...*/)
/* ... */
});
this.TestRole.assumeRolePolicy.addStatement(
new iam.PolicyStatement().allow()
.addAction('sts:AssumeRole')
.addAwsPrincipal('arn:aws:iam::account1:role/Role1')
.addAwsPrincipal('arn:aws:iam::account2:role/Role2')
);
I am working on understanding Amazon Lambda. This is modified only slightly from the s3 template to view content type of uploaded file.
my received event log, and params log both show up in cloud watch however it's like the s3.getObject() never gets executed, as neither the error log of the data log ever get shown in the logs and all I get is a task timed out after timeout period.
var aws = require('aws-sdk');
var s3 = new aws.S3({apiVersion: 'latest'});
exports.handler = function(event, context) {
console.log('Received event:', JSON.stringify(event, null, 2));
// Get the object from the event and show its content type
var bucket = event.Records[0].s3.bucket.name;
var key = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, ' '));
var params = {
Bucket: bucket,
Key: key
};
console.log(params);
s3.getObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
/*
if (err) {
var message = "Error getting object " + key + " from bucket " + bucket +
". Make sure they exist and your bucket is in the same region as this function.";
console.log(message);
context.fail(message);
} else {
console.log('CONTENT TYPE:', data.ContentType);
context.succeed(data.ContentType);
}
*/
});
};
Here is the current IM role do I need to make some changes?
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"ec2:CreateNetworkInterface",
"ec2:DescribeNetworkInterfaces",
"ec2:DetachNetworkInterface",
"ec2:DeleteNetworkInterface"],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:ListAllMyBuckets",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject"],
"Resource": "arn:aws:s3:::*"
},
{
"Action": "lambda:*",
"Effect": "Allow",
"Resource": "arn:aws:lambda:*"
}]
}
I've run into this issue before. This happens after the validate, build, and sign callbacks execute, but before the send callback occurs. Essentially, one of the sockets go into Zombie mode. I've reported this to AWS, but they have not yet produced a fix. To "fix", set the socket timeout when instantiating the s3 client. The socket will timeout and the operation will automatically retry.
var s3 = new AWS.S3({httpOptions: { timeout: 2000 }});
By default, the timeout is two minutes, which is why it is problematic if this occurs in a Lambda where the timeout is significantly shorter.
Before ListBucket you need ListAllBuckets.
I advise you to restrict your lambda access.
{
"Statement":[
{
"Effect":"Allow",
"Action":[
"s3:ListAllMyBuckets"
],
"Resource":"arn:aws:s3:::*"
},
{
"Effect":"Allow",
"Action":[
"s3:ListBucket",
"s3:GetBucketLocation"
],
"Resource":"arn:aws:s3:::yourBucket"
},
{
"Effect":"Allow",
"Action":[
"s3:GetObject"
],
"Resource":"arn:aws:s3:::yourBucket/*"
}
]
}
You must include an explicit callback at the end of your lambda or it'll stop running automatically before your s3 callback function ever gets hit.
As an example:
module.exports.getS3Object = async (event, context, callback) => {
const AWS = require('aws-sdk');
const S3 = new AWS.S3();
S3.getObject({ Bucket: "exampleBucket", Key: "exampleKey" })
.promise()
.then(data => console.log("finished"))
.catch(err => console.log("there was an error" + err))
callback(null, {statusCode: "200", body:"yay"}); //MUST INCLUDE
}