I have troubles sending emails with AWS golang sdk using SendRawEmail operation. Even though I get no errors and receive MessageId back from AWS, I do not receive the email.
Sending emails using SendEmail works fine and I receive the email.
My code:
session, err := session.NewSession()
if err != nil {
return err
}
svc := ses.New(session, &aws.Config{Region: aws.String("eu-west-1")})
messageContent := `From: "Alice" <xxx#xxx>
To: "Bob" <xxx#xxx>
Return-Path: <xxx#xxx>
Subject: Hello
Content-Language: en-US
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
This is a test email`
base64messageContent := base64.StdEncoding.EncodeToString([]byte(messageContent))
source := aws.String("xxx#xxx")
destinations := []*string{aws.String("xxx#xxx")}
message := ses.RawMessage{Data: []byte(base64messageContent)}
input := ses.SendRawEmailInput{Source: source, Destinations: destinations, RawMessage: &message}
output, err := svc.SendRawEmail(&input)
if err != nil {
return err
}
log.Println("Response from SES", output)
return nil
}
I am using my Gmail as the destination email, if that makes any difference.
Data in RawData should not be base64 encoded. As documentation states:
// Data is automatically base64 encoded/decoded by the SDK.
Related
I am using AWS S3 service to upload images. Yesterday I updated the SDK v1 to v2 and found that the image upload is failing with the following error:
operation error S3: PutObject, https response error StatusCode: 403, RequestID: XXXXXXXXXXX, HostID: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX, api error SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. Check your key and signing method.
UPDATED:
I have aws credentials on my home folder in linux in .aws folder in the following format:
[default]
aws_access_key_id = XXXXXXXXXXXXXXXXXXX
aws_secret_access_key = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXx
Here is the code:
package main
import (
"context"
"fmt"
"io"
"net/http"
"github.com/aws/aws-sdk-go-v2/aws"
awsconfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3"
)
func main() {
fileName := "test123.jpg"
filePath := "/BUCKET_NAME/uploads/aman/2021/6/25/"
res, err := http.Get("https://images.app.goo.gl/mpQ5nXYXjdUMKGgW7")
if err != nil || res.StatusCode != 200 {
// handle errors
}
defer res.Body.Close()
UploadFileInS3Bucket(res.Body, fileName, filePath)
}
func UploadFileInS3Bucket(file io.Reader, fileName, filePath string) {
cfg, err := awsconfig.LoadDefaultConfig(context.TODO(),
awsconfig.WithRegion("REGION"),
)
client := s3.NewFromConfig(cfg)
uploader := manager.NewUploader(client)
uploadResp, err := uploader.Upload(context.TODO(), &s3.PutObjectInput{
Bucket: aws.String(filePath),
Key: aws.String(fileName),
Body: file,
ContentType: aws.String("image"),
})
fmt.Println(uploadResp)
fmt.Println(err)
}
I did not change any credentials/buckets/regions in my code.However if I run the code with SDK v1 then it works fine & images are uploading.
What is going wrong with the SDK v2 ?
After spending a couple of days, I came to know that SDK V2 takes following format for Bucket & Key field:
fileName := "uploads/2021/6/25/test123.jpg"
filePath := "BUCKET_NAME"
Basically for these fields there is vice versa behaviour in SDK V1 & V2. Above is the V2. Below is the V1:
fileName := "test123.jpg"
filePath := "/BUCKET_NAME/uploads/2021/6/25/"
I have a go services that generate a presigned url to upload a file:
sess, err := session.NewSession(&aws.Config{
Region: aws.String(os.Getenv(AwsRegionEnv))},
)
if err != nil {
return nil, err
}
svc := s3.New(sess)
req, _ := svc.PutObjectRequest(&s3.PutObjectInput{
Bucket: aws.String(os.Getenv(BucketNameEnv)),
Key: aws.String(getFileName(file, customer)),
})
minutesTimeout, err := strconv.Atoi(os.Getenv(TimeoutURL))
if err != nil {
return nil, err
}
str, err := req.Presign(time.Duration(minutesTimeout) * time.Minute)
if err != nil {
return nil, err
}
So, I can upload a file using this presigned url using curl:
curl -vT test.pdf '<<URL PRESIGNED>>'
But, when I add an ACL, this not work, the modification are:
req, _ := svc.PutObjectRequest(&s3.PutObjectInput{
Bucket: aws.String(os.Getenv(BucketNameEnv)),
Key: aws.String(getFileName(file, customer)),
ACL: aws.String(s3.ObjectCannedACLAuthenticatedRead),
})
When I tried upload a file using the presigned url with ACL, I got this error:
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>SignatureDoesNotMatch</Code>
<Message>The request signature we calculated does not match the signature you provided. Check your key and signing method.</Message>
<AWSAccessKeyId>ASIAS...</AWSAccessKeyId>
<StringToSign>AWS4-HMAC-SHA256
20210316T135400Z
20210316/us-east-2/s3/aws4_request
d7ab7d377b719636610b11793e3e68e104a3f41fb9f9f5608138a8c2b19ceaf3</StringToSign>
<SignatureProvided>bd59fbb080..</SignatureProvided>
<StringToSignBytes>41 57 53...</StringToSignBytes>
<CanonicalRequest>PUT
/35527810/sampleVpacheco3.pdf
X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=...%2Fus-east-2%2Fs3%2Faws4_request&X-Amz-Date=20210316T135400Z&X-Amz-Expires=300&X-Amz-Security-Token=IQoJb3JpZ2luX2VjEN7%2F%2F...&X-Amz-SignedHeaders=host%3Bx-amz-acl
host:adl-digital-dev-document-manager.s3.us-east-2.amazonaws.com
x-amz-acl:
host;x-amz-acl
UNSIGNED-PAYLOAD</CanonicalRequest>
<CanonicalRequestBytes>50 55 54...</CanonicalRequestBytes>
<RequestId>0V6FWNNGK2QCDA1V</RequestId>
<HostId>rE4rkv...</HostId>
</Error>
Any idea how I can add ACL and can upload a file successfully?
Full URL is:
https://document-manager.s3.us-east-2.amazonaws.com/35527810/sampleVpacheco5.pdf?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=ASIASN3IRSVR%2F20210316%2Fus-east-2%2Fs3%2Faws4_request&X-Amz-Date=20210316T143240Z&X-Amz-Expires=300&X-Amz-Security-Token=IQoJb3JpZ2luX2VjEN7%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEaCXVzLWVhc3QtMSJGMEQCIGW4j1R7H3wIxUAN8FytKbDTRne4pJGJ9I4ofpXeh%2FcaAiBFTdRNVug9WODzLdCoMcFRVzFZ%2FtGeaweeteSSTY6yMSqtAwgXEAIaDDE2NzE4NjEwOTc5NSIMh1JvraENxW8E5aBqKooDLx39b6Lx1%2Fw6AtGMSzlYRILNIXdB2Ouviq0pUlfPVCSFlZnPzo%2F%2B6%2B8ZcIpHM8E%2FDjEn1NF1lvcz9QKsuXJI94XuVCSRGiBBRvpIdm%2Ff001q3C%2FmZW2I1aMsfV518LTtEQigJ%2Fv80TPVSv7ZozoR9Zae4W3C3efjm2sJ%2BkVkI%2FBm7z6Vd97Q%2BbpVztf8Lp4GImDp1G72wtOP7wq9wSDYzFEzUja91r7g97py1Wzin6%2BXUNX68yAH%2BRePqyW6by4Lht8086B7YQcj6h77kxwE89C1NMYhKPiNl1y%2Ff4NukwWxW%2FTefqSW3Qr26eDfTV%2FVyR7%2FeNCf7OOtpkGZEmOnFbd%2FyY6wVOARcTdixQkPKKu2GAkz%2B8xuNY10uTGoh2vul3gUWBZF4Yl13R7kIq%2FPBb1UVl%2BatCwN%2BDBMj22cM4Pn%2BOJPyqxCjcfyIXwRsiYDTmmtiSIWrTvSEQaWf1Dc95lQVToA2ZsAxB8LO88%2FEz0t3FUpPw0ncgbLbHedcRYqvV62RDRQK%2FI9zjCz78KCBjqnAfzDcfP25%2BIr6ia4elbxSDOWIIv%2FjZOLlRDedHdqLKCDjYbgXoWrTQTt%2BZCRlV7UtJxo%2ByVeJvsjmb3BdI4IjI8wd8XjkV5qMejJbFcmFIQV7df0cdGY7U6nOO8gxGK9fj7Fb1Y0DtZaCxaZU8D0d2iTfUn8kl%2FT0GwSPDZqz1I6oJuG58KLR%2BVKRhuZrhTq8%2Fm98cLg7diuwt%2Bt1RwL%2BK9oonqHqXcE&X-Amz-SignedHeaders=host%3Bx-amz-acl&X-Amz-Signature=27d1fae2f60187dce85b175980c4e91334fe2a0f192d220244aa4a27e798ec9f
I Tired this:
add header x-amc-acl: -H "x-amc-acl: authenticated-read"
add header host: -H "host: 127.0.0.1"
Modify URL, changing host%3Bx-amz-acl by host;x-amc-acl
Thanks!
it worked for me after adding acl properties in query param
req, _ := svc.GetObjectRequest(&s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
})
q := req.HTTPRequest.URL.Query()
q.Add("x-amz-acl", "public-read")
q.Add("Content-Type", contentType)
req.HTTPRequest.URL.RawQuery = q.Encode()
presigned, err := req.Presign(5 * time.Minute)
if err != nil {
fmt.Printf("Error presigning URL: %v\n", err)
}
fmt.Println(presigned)
I have a REST API with 2 endpoints: 1 handles the POST request to upload to S3 and the other serves files for the GET request which downloads the file from the bucket and serves it to the client. Now:
I am getting an access denied response when I try to download the file from S3 after 24 hours of upload although I was able to retrieve the file on the same day. Is there something that needs to be configured to prevent this? Does it relate to something like this https://medium.com/driven-by-code/on-the-importance-of-correct-headers-and-metadata-in-s3-origins-for-aws-cloudfront-50da2f9370ae?
when the download is initiated to get the file from S3, I get a time out error after 30 sec. The server is configure with a WriteTimeout of 15 minutes. How can one prevent this timeout? Note: This is not a lambda function or any type of serverless function. I am not using CloudFront either (not at my knowledge). Here is a snippet code for the GET request.
buf := aws.NewWriteAtBuffer([]byte{})
s3Svc := s3.New(s)
downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Downloader) {
d.PartSize = 64 * MB
d.Concurrency = 3
})
result, err := downloader.Download(
buf, &s3.GetObjectInput{
Bucket: aws.String(rc.AWS.Bucket),
Key: aws.String(fileName),
})
if err != nil {
return nil, err
}
d := bytes.NewReader(buf.Bytes())
w.Header().Set("Content-Disposition", "filename="OriginalFileName)
http.ServeContent(w, r, filename, time.Now(), d)
When trying to publish a message to a topic using the AWS IoT SDK for go I get the following error: "x509: certificate signed by unknown authority".
I am on windows and all I did was install different root CA's (literally via doubleclick) and a device certificate generated by AWS IoT Console.
I feel like I should somehow specify the path to the certificate but unlike the Python SDk the one for go does not mention that anywhere. I also added my credentials via the AWS Cli so that should not be the issue.
package main
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/iotdataplane"
)
func main() {
sess, err := session.NewSession(&aws.Config{
Region: aws.String("eu-central-1"), Endpoint: aws.String("xxxxxxxxxx.iot.eu-central-1.amazonaws.com")},
)
if err != nil {
log.Fatal(err)
}
iotDataSvc := iotdataplane.New(sess)
input := &iotdataplane.PublishInput{
Payload: []byte(`{
'state': {
'desired':{
'humidity':10,
'temp':10
}
}
}`),
Topic: aws.String("/update"),
Qos: aws.Int64(0),
}
resp, err := iotDataSvc.Publish(input)
if err != nil {
log.Fatal(err)
}
fmt.Println(resp)
}
Found the mistake: xxxxxxxxxx.iot.eu-central-1.amazonaws.com needs to be xxxxxxxxxx-ats.iot.eu-central-1.amazonaws.com.
I am trying to achieve following using my program:
Create log-group on aws cloudwatch
Create log-stream under above log-group
Put log-events under above log-stream
All this using go lang
package main
import (
"time"
"fmt"
"github.com/jcxplorer/cwlogger"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/aws"
)
func main() {
sess := session.Must(session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
}))
svc := cloudwatchlogs.New(sess)
logGroupName := "my-log-group";
logStreamName := "my-log-stream";
logGroupInput := cloudwatchlogs.CreateLogGroupInput{LogGroupName: &logGroupName}
svc.CreateLogGroup(&logGroupInput);
logStreamInput := cloudwatchlogs.CreateLogStreamInput{LogGroupName: &logGroupName, LogStreamName: &logStreamName}
svc.CreateLogStream(&logStreamInput)
logevents := make([]*cloudwatchlogs.InputLogEvent, 1)
logevents = append(logevents, &cloudwatchlogs.InputLogEvent{
Message: aws.String("Simple log message"),
Timestamp: aws.Int64(111),
})
p := cloudwatchlogs.PutLogEventsInput{LogEvents: logevents, LogGroupName: &logGroupName, LogStreamName: &logStreamName}
resp, err := svc.PutLogEvents(&p)
if err != nil {
panic(err)
}
fmt.Print("Next Token: {}", resp.NextSequenceToken)
}
Now when I run above code, it successfully creates log-group and log-stream and I can verify that in aws cloudwatch. But for some reason PutLogEvents fails with following error:
panic: SerializationException:
status code: 400, request id: 0685efcc-47e3-11e9-b528-81f33ec2f468
I am not sure what may be wrong here. Any suggestion or direction will be really helpful.
Thanks in advance.
Reason for SerializationException was:logevents := make([]*cloudwatchlogs.InputLogEvent, 1)
followed by append which created first empty entry in slice. I replaced code with
logevents := make([]*cloudwatchlogs.InputLogEvent, 0) and it got resolved.
Additionally while debugging to find why logs were not getting populated I figured out that timestamp value used is not valid one. According to aws documentation timestamp for each event can't be older than 14 days and can't be more than 2hr in future.
Here is link: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html
Hope it will be helpful to someone facing similar issue in future.