How to use GSI in dynamodb? - amazon-web-services

I am using AWS Console and NodeJS.
I have the dynamodb table of users with partition key (user_id) and sort key (company_id) and other attributes.
One of my attributes is email of user. Email is unique attribute.
I need to get user_id by email but I haven't his user_id and company_id.
I think that I should use a Global Secondary Index.
I clicked on the users table, opened the Indexes tab and created GSI for this table. (name: email, type: GSI, Partition Key: email string, attributes: user_id)
I am using method Query from documentClient. This is my payload:
payload = {
"TableName": "users",
"IndexName": "email",
"KeyConditionExpression": "#index = :index_value",
"ExpressionAttributeNames":{
"#index": "email"
},
"ExpressionAttributeValues": {
":index_value": {"S": "test#gmail.com"}
},
"ProjectionExpression": "user_id",
"ScanIndexForward": false
};
}
This is my error from CloudWatch:
"errorMessage": "One or more parameter values were invalid: Condition parameter type does not match schema type"

I have found a solution while I was writing this question.
So as I use documentClient my payload should looks like this
payload = {
"TableName": "users",
"IndexName": "email",
"KeyConditionExpression": "#index = :index_value",
"ExpressionAttributeNames":{
"#index": "email"
},
"ExpressionAttributeValues": {
":index_value": "test#gmail.com" // <----------------
},
"ProjectionExpression": "user_id",
"ScanIndexForward": false
};
}
Hope it helps to someone

Related

How to use direct lambda resolver in appsync?

I have tried to use direct lambda resolver with appsync schema and trying to get items from dynamoDB table but getting error. I can see the results in cloudwatch log but I can't get the result in query page.
One other question that my post has #hasMany comments and comments #belongsTo post how can I get the comments directly with post using direct lambda resolver. I think I have to do a separate query for comments? Appsync pipeline is good at getting queries but it is very very slow.
My lambda function:
const AWS = require("aws-sdk");
const dynamo = new AWS.DynamoDB.DocumentClient();
exports.handler = async (event) => {
const response = await dynamo.get({
TableName: "Post-xxxxxxxxxxxxx",
Key: {
id: event.arguments.id
}
}).promise();
console.log(response); // I can see the response here in cloudwatch.
return JSON.stringify(response);
};
My query :
query MyQuery {
getPost(id: "xxxxxx-xxxxx-xxxx-xxxxxxx") {
id
title
}
}
Query result:
{
"data": {
"getPost": null
},
"errors": [
{
"path": [
"getPost",
"id"
],
"locations": null,
"message": "Cannot return null for non-nullable type: 'ID' within parent 'Post' (/getPost/id)"
},
{
"path": [
"getPost",
"title"
],
"locations": null,
"message": "Cannot return null for non-nullable type: 'String' within parent 'Post' (/getPost/title)"
}
]
}

Account Locked attribute not getting added in response for scim2 GET Users API in wso2

I am trying retrieve the user list which have locked accounts in WSO2 IS 5.9 version.
I tried after adding account lock attribute to below claims:
http://schemas.xmlsoap.org/ws/2005/05/identity
urn:ietf:params:scim:schemas:core:2.0
urn:ietf:params:scim:schemas:core:2.0:User
urn:ietf:params:scim:schemas:extension:enterprise:2.0:User
I have followed below URL as well to add the custom claim:
https://is.docs.wso2.com/en/5.9.0/develop/extending-scim2-user-schemas/
{ "attributeURI":"urn:ietf:params:scim:schemas:extension:enterprise:2.0:User:accountLock",
"attributeName":"accountLock",
"dataType":"boolean",
"multiValued":"false",
"description":"Account lock",
"required":"false",
"caseExact":"false",
"mutability":"readwrite",
"returned":"default",
"uniqueness":"none",
"subAttributes":"null",
"multiValuedAttributeChildName":"null",
"canonicalValues":[],
"referenceTypes":[]
}
But still i am not able to get the accountLock attribute in response to GET Users API of scim2.
Response
"totalResults": 10,
"startIndex": 1,
"itemsPerPage": 10,
"schemas": [
"urn:ietf:params:scim:api:messages:2.0:ListResponse"
],
"Resources": [
{
"emails": [
"divya#abc.com"
],
"meta": {
"created": "2020-06-25T07:49:35.465Z",
"lastModified": "2020-06-25T11:20:13.482Z",
"resourceType": "User"
},
"name": {
"givenName": "guest",
"familyName": "guest"
},
"groups": [
{
"display": "Application/sp1"
},
{
"display": "Application/sp2"
},
{
"display": "Application/Read"
}
],
"id": "9ffbed2e-3703-470c-a2c8-e738f4c09709",
"userName": "guest12"
}
]}```
The following reasons may cause to accoutLock attribute does not appear in SCIM2 GET user response.
You might missed to add the new attribute ( "urn:ietf:params:scim:schemas:extension:enterprise:2.0:User:accountLock") as a sub attribute of urn:ietf:params:scim:schemas:extension:enterprise:2.0:User object. (Point 3 in https://is.docs.wso2.com/en/5.9.0/develop/extending-scim2-user-schemas/#extending-the-scim-20-api.
"subAttributes":"accoutLock verifyEmail askPassword employeeNumber costCenter organization division department manager")
"attributeURI":"urn:ietf:params:scim:schemas:extension:enterprise:2.0:User",
"attributeName":"urn:ietf:params:scim:schemas:extension:enterprise:2.0:User",
"dataType":"complex",
"multiValued":"false",
"description":"Enterprise User",
"required":"false",
"caseExact":"false",
"mutability":"readWrite",
"returned":"default",
"uniqueness":"none",
"subAttributes":"accoutLock verifyEmail askPassword employeeNumber costCenter organization division department manager",
"canonicalValues":[],
"referenceTypes":["external"]
}```
The mapped attribute of the added custom claim (https://is.docs.wso2.com/en/5.9.0/develop/extending-scim2-user-schemas/#add-the-custom-claim) should be an existing attribute in LDAP schema if you are using the default LDAP userstore. (However, if you have done this mistake you won't be able to update/add claim value. It gives One or more attributes you are trying to add/update are not supported by underlying LDAP for user: error)
The response of SCIM2 GET users doesn't contain the attributes which don't have a value. Thus, set true/false to the claim value.
Moreover, it is enough to add the new attribute to urn:ietf:params:scim:schemas:extension:enterprise:2.0:User claim dialect. Follow steps in https://is.docs.wso2.com/en/5.9.0/develop/extending-scim2-user-schemas/

AppSync check if DynamoDB record exists

I am trying to write a resolver for AppSync that derives the value for a Boolean field based on the existence of a record in DynamoDB.
I currently have the following request mapping template:
{
"version": "2017-02-28",
"operation": "GetItem",
"key": {
"field1": $util.dynamodb.toDynamoDBJson($ctx.args.field1),
"field2": $util.dynamodb.toDynamoDBJson($ctx.args.field2)
}
}
And the following response mapping template:
#if($util.isNull($ctx.result))
#set($exists = false)
#else
#set($exists = true)
#end
$util.toJson({
"field1": $ctx.args.field1,
"field2": $ctx.args.field2,
"exists": $exists
})
This works correctly if the record exists but if it does not then AppSync simply returns "null" for the entire API call and does not seem to evaluate the response mapping template at all. Is there any way I can instruct it not to do this?
Another option would be to perform a query and look at the length of the response but I have no idea how to check length in these templates.
This is an expected behavior for the 2017 version of the Request template. If you would like the $ctx.result to be evaluated, switch to the 2018 version as below:
{
"version": "2018-05-29",
"operation": "GetItem",
"key": {
"id": $util.dynamodb.toDynamoDBJson($ctx.args.id),
},
}
Refer to this change log for additional details.

HIVE_INVALID_METADATA in Amazon Athena

How can I work around the following error in Amazon Athena?
HIVE_INVALID_METADATA: com.facebook.presto.hive.DataCatalogException: Error: : expected at the position 8 of 'struct<x-amz-request-id:string,action:string,label:string,category:string,when:string>' but '-' is found. (Service: null; Status Code: 0; Error Code: null; Request ID: null)
When looking at position 8 in the database table connected to Athena generated by AWS Glue, I can see that it has a column named attributes with a corresponding struct data type:
struct <
x-amz-request-id:string,
action:string,
label:string,
category:string,
when:string
>
My guess is that the error occurs because the attributes field is not always populated (c.f. the _session.start event below) and does not always contain all fields (e.g. the DocumentHandling event below does not contain the attributes.x-amz-request-id field). What is the appropriate way to address this problem? Can I make a column optional in Glue? Can (should?) Glue fill the struct with empty strings? Other options?
Background: I have the following backend structure:
Amazon PinPoint Analytics collects metrics from my application.
The PinPoint event stream has been configured to forward the events to an Amazon Kinesis Firehose delivery stream.
Kinesis Firehose writes data to S3
Use AWS Glue to crawl S3
Use Athena to write queries based on the databases and tables generated by AWS Glue
I can see PinPoint events successfully being added to json files in S3, e.g.
First event in a file:
{
"event_type": "_session.start",
"event_timestamp": 1524835188519,
"arrival_timestamp": 1524835192884,
"event_version": "3.1",
"application": {
"app_id": "[an app id]",
"cognito_identity_pool_id": "[a pool id]",
"sdk": {
"name": "Mozilla",
"version": "5.0"
}
},
"client": {
"client_id": "[a client id]",
"cognito_id": "[a cognito id]"
},
"device": {
"locale": {
"code": "en_GB",
"country": "GB",
"language": "en"
},
"make": "generic web browser",
"model": "Unknown",
"platform": {
"name": "macos",
"version": "10.12.6"
}
},
"session": {
"session_id": "[a session id]",
"start_timestamp": 1524835188519
},
"attributes": {},
"client_context": {
"custom": {
"legacy_identifier": "50ebf77917c74f9590c0c0abbe5522d2"
}
},
"awsAccountId": "672057540201"
}
Second event in the same file:
{
"event_type": "DocumentHandling",
"event_timestamp": 1524835194932,
"arrival_timestamp": 1524835200692,
"event_version": "3.1",
"application": {
"app_id": "[an app id]",
"cognito_identity_pool_id": "[a pool id]",
"sdk": {
"name": "Mozilla",
"version": "5.0"
}
},
"client": {
"client_id": "[a client id]",
"cognito_id": "[a cognito id]"
},
"device": {
"locale": {
"code": "en_GB",
"country": "GB",
"language": "en"
},
"make": "generic web browser",
"model": "Unknown",
"platform": {
"name": "macos",
"version": "10.12.6"
}
},
"session": {},
"attributes": {
"action": "Button-click",
"label": "FavoriteStar",
"category": "Navigation"
},
"metrics": {
"details": 40.0
},
"client_context": {
"custom": {
"legacy_identifier": "50ebf77917c74f9590c0c0abbe5522d2"
}
},
"awsAccountId": "[aws account id]"
}
Next, AWS Glue has generated a database and a table. Specifically, I see that there is a column named attributes that has the value of
struct <
x-amz-request-id:string,
action:string,
label:string,
category:string,
when:string
>
However, when I attempt to Preview table from Athena, i.e. execute the query
SELECT * FROM "pinpoint-test"."pinpoint_testfirehose" limit 10;
I get the error message described earlier.
Side note, I have tried to remove the attributes field (by editing the database table from Glue), but that results in Internal error when executing the SQL query from Athena.
This is a known limitation. Athena table and database names allow only underscore special characters#
Athena table and database names cannot contain special characters, other than underscore (_).
Source: http://docs.aws.amazon.com/athena/latest/ug/known-limitations.html
Use tick (`) when table name has - in the name
Example:
SELECT * FROM `pinpoint-test`.`pinpoint_testfirehose` limit 10;
Make sure you select "default" database on the left pane.
I believe the problem is your struct element name: x-amz-request-id
The "-" in the name.
I'm currently dealing with a similar issue since my elements in my struct have "::" in the name.
Sample data:
some_key: {
"system::date": date,
"system::nps_rating": 0
}
Glue derived struct Schema (it tried to escape them with ):
struct <
system\:\:date:String
system\:\:nps_rating:Int
>
But that still gives me an error in Athena.
I don't have a good solution for this other than changing Struct to STRING and trying to process the data that way.

AppSync loading incorrect resolver for field

AppSync appears to be loading the incorrect resolver template for some fields of nested objects. Also, it appears to only happen when the nested object has a field with the same name as a field on the parent object.
I've included an example below because I think that might be the best way to explain the issue. As you can see, the id fields for the nested objects are not being resolved correctly.
Each type, Task, User, List, and Tag, have a resolver for their id field because the data for each has a prefix on the id field. For example, Task.id has a resolver that returns $context.source.task_id and User.id has a resolver that returns $context.source.user_id. Same for List and Tag.
What appears to be happening is AppSync is loading the id resolver template for the parent type. You can see that this is the case for task.owner.id, where owner is a User but the id gets resolved as "$context.source.task_id". Same for task.list.id where list is a List. Again we can see this for task.tags[0].owner.id. owner is once again a User except this time the parent is a Tag so task.tags[0].owner.id is resolved as "$context.source.tag_id". These three example show that the problem is not with a particular type since User and List are behaving the same when they are nested in a Task. Also, we can see that the issue is not with Task since User is behaving similarly when nested in a Tag. Lastly, we can see that task.tags[1].owner.id actually behaves correctly. This indicates that the issue only presents itself on first execution.
At this point I strongly suspect this is a bug with AppSync however, I'm not 100% on that. Has anyone else experienced this issue? Am I doing something terribly wrong?
Example
Query
{
task(id: "task-123") {
id,
title,
owner {
id,
username,
},
list {
id,
name,
},
tags {
id,
name,
owner {
id,
username,
},
},
},
}
Result
{
"data": {
"task": {
"id": "task-123",
"title": "First Task",
"owner": {
"id": "$context.source.task_id",
"username": "tom"
},
"list": {
"id": "$context.source.task_id",
"name": "Inbox"
},
"tags": [
{
"id": "tag-123",
"name": "one",
"owner": {
"id": "$context.source.tag_id",
"username": "tom"
}
},
{
"id": "tag-234",
"name": "two",
"owner": {
"id": "user-123",
"username": "tom"
}
}
]
}
}
}
Task Schema
type Task {
id: ID!
title: String!
owner: User!
list: List
tags: [Tag]
}
User Schema
type User {
id: ID!
username: String!
}
List Schema
type List {
id: ID!
name: String!
}
Tag Schema
type Tag {
id: ID!
name: String!
owner: User!
}
Task Data
{
task_id: "task-123",
title: "First Task",
owner_id: "user-123",
list_id: "list-123",
tags: [
"tag-123",
"tag-234"
]
}
User Data
{
user_id: "user-123",
username: "tom"
}
List Data
{
list_id: "list-123",
name: "Inbox"
}
Tag Data
{
tag_id: "tag-123",
name: "one",
owner_id: "user-123"
}
{
tag_id: "tag-234",
name: "two",
owner_id: "user-123"
}
Example id resolver (User)
Request Mapping Template
{
"version": "2017-02-28",
"payload": "$context.source.user_id"
}
Response Mapping Template
$util.toJson($context.result)
The id resolvers for the other types are very similar
It may also be worth noting that I created different None Data Sources for each type, Task, User, List, and Tag. The id resolver for each type is using their respective None Data Source.