How do I find if all the S3 objects are restored - amazon-web-services

I recently have restored S3 Deep Archive objects using Python.
There are over a million of them and I need to make it sure clearly.
If possible, I would like to use python to check if all objects have been restored.
Can someone please advice me how to do this?
Thank you

From restore_object() documentation:
To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response.
And under head_object():
If an archive copy is already restored, the header value indicates when Amazon S3 is scheduled to delete the object copy. For example:
x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 GMT"

Although not Python, here is the logic to perform this use case using AWS SDK for Java v2.
package com.example.s3;
import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.HeadObjectRequest;
import software.amazon.awssdk.services.s3.model.HeadObjectResponse;
import software.amazon.awssdk.services.s3.model.S3Exception;
public class GetObjectRestoreStatus {
public static void main(String[] args) {
final String usage = "\n" +
"Usage:\n" +
" <bucketName> <keyName> \n\n" +
"Where:\n" +
" bucketName - The Amazon S3 bucket name. \n\n"+
" keyName - A key name that represents the object. \n\n";
if (args.length != 2) {
System.out.println(usage);
System.exit(1);
}
String bucketName = args[0];
String keyName = args[1];
ProfileCredentialsProvider credentialsProvider = ProfileCredentialsProvider.create();
Region region = Region.US_EAST_1;
S3Client s3 = S3Client.builder()
.region(region)
.credentialsProvider(credentialsProvider)
.build();
checkStatus(s3,bucketName,keyName);
s3.close();
}
// snippet-start:[s3.java2.getrestorestatus.main]
public static void checkStatus(S3Client s3, String bucketName, String keyName) {
try {
HeadObjectRequest headObjectRequest = HeadObjectRequest.builder()
.bucket(bucketName)
.key(keyName)
.build();
HeadObjectResponse response = s3.headObject(headObjectRequest);
System.out.println("The Amazon S3 object restoration status is "+response.restore());
} catch (S3Exception e) {
System.err.println(e.awsErrorDetails().errorMessage());
System.exit(1);
}
}
// snippet-end:[s3.java2.getrestorestatus.main]
}

Related

How to empty Amazon S3 Bucket using AWS Java SDK?

I was unable to find any documentation on how to empty an Amazon S3 bucket programmatically in Java. Any help will be appreciated.
To delete Amazon S3 objects using Java, you can use the AWS SDK for Java V2. To empty a bucket, first get a list of all objects in the Bucket using this code:
public static void listBucketObjects(S3Client s3, String bucketName ) {
try {
ListObjectsRequest listObjects = ListObjectsRequest
.builder()
.bucket(bucketName)
.build();
ListObjectsResponse res = s3.listObjects(listObjects);
List<S3Object> objects = res.contents();
for (ListIterator iterVals = objects.listIterator(); iterVals.hasNext(); ) {
S3Object myValue = (S3Object) iterVals.next();
System.out.print("\n The name of the key is " + myValue.key());
System.out.print("\n The object is " + calKb(myValue.size()) + " KBs");
System.out.print("\n The owner is " + myValue.owner());
}
} catch (S3Exception e) {
System.err.println(e.awsErrorDetails().errorMessage());
System.exit(1);
}
}
For each object, you can delete it using this code:
public static void deleteBucketObjects(S3Client s3, String bucketName, String objectName) {
ArrayList<ObjectIdentifier> toDelete = new ArrayList<ObjectIdentifier>();
toDelete.add(ObjectIdentifier.builder().key(objectName).build());
try {
DeleteObjectsRequest dor = DeleteObjectsRequest.builder()
.bucket(bucketName)
.delete(Delete.builder().objects(toDelete).build())
.build();
s3.deleteObjects(dor);
} catch (S3Exception e) {
System.err.println(e.awsErrorDetails().errorMessage());
System.exit(1);
}
System.out.println("Done!");
}
You can find these examples and many others in AWS Github here:
Amazon S3 Java code examples

If Amazon S3 buckets are supposed to be "globally unique" across the whole AWS cloud, how do I have a bucket named "todo"?

I created a bucket in Amazon S3 and named it "todo", and the bucket was created successfully with no errors. There is absolutely no way that no one else has used that bucket name, so if buckets are supposed to be globally unique, how did this happen?
I tried creating a bucket with the name todo and got an exception as expected. I suppose you were the 1st to name a bucket with that name.
public static void main(String[] args) throws URISyntaxException {
final String USAGE = "\n" +
"Usage:\n" +
" <bucketName> \n\n" +
"Where:\n" +
" bucketName - the name of the bucket to create. The bucket name must be unique, or an error occurs.\n\n" ;
// if (args.length != 1) {
// System.out.println(USAGE);
// System.exit(1);
// }
String bucketName = "todo"; //args[0];
System.out.format("Creating a bucket named %s\n",
bucketName);
Region region = Region.US_EAST_1;
S3Client s3 = S3Client.builder()
.region(region)
.build();
createBucket (s3, bucketName);
s3.close();
}
// snippet-start:[s3.java2.create_bucket_waiters.main]
public static void createBucket( S3Client s3Client, String bucketName) {
try {
S3Waiter s3Waiter = s3Client.waiter();
CreateBucketRequest bucketRequest = CreateBucketRequest.builder()
.bucket(bucketName)
.build();
s3Client.createBucket(bucketRequest);
HeadBucketRequest bucketRequestWait = HeadBucketRequest.builder()
.bucket(bucketName)
.build();
// Wait until the bucket is created and print out the response.
WaiterResponse<HeadBucketResponse> waiterResponse = s3Waiter.waitUntilBucketExists(bucketRequestWait);
waiterResponse.matched().response().ifPresent(System.out::println);
System.out.println(bucketName +" is ready");
} catch (S3Exception e) {
System.err.println(e.awsErrorDetails().errorMessage());
System.exit(1);
}
}

How to read a file from an Amazon S3 bucket using the AWS SDK for Java V2

I was able to read the file using AmazonS3Client, but now I'm trying read the file using the package software.amazon.awssdk.services.s3.S3Client. I did not find the option to pass the file name to the GetObjectRequest constructoe unlike com.amazonaws.services.s3 package. Here is my sample code.
import software.amazon.awssdk.services.s3.model.ListObjectsV2Response;
import software.amazon.awssdk.services.s3.model.S3Object;
import software.amazon.awssdk.services.s3.model.ListObjectsV2Request;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.GetObjectRequest;
import software.amazon.awssdk.services.s3.model.GetObjectResponse;
main(String[] args){
String bucketName = "some-name-s3";
Region region = Region.US_EAST_1;
S3Client s3client = S3Client
.builder()
.region(region)
.build();
ListObjectsV2Request listObjectsV2Request = ListObjectsV2Request.builder().bucket(bucketName).build();
ListObjectsV2Response listObjectsV2 = s3client.listObjectsV2(listObjectsV2Request);
List<S3Object> s3Objects = listObjectsV2.contents();
}
Now how to read the each file and process the stream from s3Objects.
You are using the wrong logic to read an object from an Amazon S3 bucket using the AWS SDK for Java V2. You are calling list buckets. You can get metadata about each object calling listObjectsV2. For example, you can invoke the S3Object's key() method to get the key name.
Now to read an object from an Amazon S3 bucket, you need the bucket name and key name and then invoke getObjectAsBytes, as shown in this Java logic which shows how to read a PDF document and write it to a local path:
package com.example.s3;
import software.amazon.awssdk.core.ResponseBytes;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.GetObjectRequest;
import software.amazon.awssdk.services.s3.model.S3Exception;
import software.amazon.awssdk.services.s3.model.GetObjectResponse;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
/**
* To run this AWS code example, ensure that you have setup your development environment, including your AWS credentials.
*
* For information, see this documentation topic:
*
* https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/get-started.html
*/
public class GetObjectData {
public static void main(String[] args) {
final String USAGE = "\n" +
"Usage:\n" +
" GetObjectData <bucketName> <keyName> <path>\n\n" +
"Where:\n" +
" bucketName - the Amazon S3 bucket name. \n\n"+
" keyName - the key name. \n\n"+
" path - the path where the file is written to. \n\n";
if (args.length != 3) {
System.out.println(USAGE);
System.exit(1);
}
String bucketName = "myBucket";
String keyName = "book.pdf";
String path = "C:/AWS/book.pdf";
Region region = Region.US_EAST_1;
S3Client s3 = S3Client.builder()
.region(region)
.build();
getObjectBytes(s3,bucketName,keyName, path);
s3.close();
}
public static void getObjectBytes (S3Client s3, String bucketName, String keyName, String path ) {
try {
GetObjectRequest objectRequest = GetObjectRequest
.builder()
.key(keyName)
.bucket(bucketName)
.build();
ResponseBytes<GetObjectResponse> objectBytes = s3.getObjectAsBytes(objectRequest);
byte[] data = objectBytes.asByteArray();
// Write the data to a local file
File myFile = new File(path );
OutputStream os = new FileOutputStream(myFile);
os.write(data);
System.out.println("Successfully obtained bytes from an S3 object");
os.close();
} catch (IOException ex) {
ex.printStackTrace();
} catch (S3Exception e) {
System.err.println(e.awsErrorDetails().errorMessage());
System.exit(1);
}
}
}
Find this example and many other Amazon S3 Java V2 code examples in Github here:
https://github.com/awsdocs/aws-doc-sdk-examples/tree/master/javav2/example_code/s3

Amazon S3 files sharing

In my project there is a need for creating share link for external users without aws user from my researching found out a couple ways for doing so
Bucket policy based on tag
Lambda that creates sign url every time some user request the file
The question is what is the best practice for doing so
I need the download to be available until the user sharing the file stopes it
Thank guys for any answers
Using the AWS SDK, you can use Amazon S3 Pre-sign functionality. You can perform this task in any of the supported programming languages (Java, JS, Python, etc).
The following code shows how to sign an object via the Amazon S3 Java V2 API.
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.time.Duration;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.model.GetObjectRequest;
import software.amazon.awssdk.services.s3.model.S3Exception;
import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest;
import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest;
import software.amazon.awssdk.services.s3.presigner.S3Presigner;
import software.amazon.awssdk.utils.IoUtils;
// snippet-end:[presigned.java2.getobjectpresigned.import]
/**
* To run this AWS code example, ensure that you have setup your development environment, including your AWS credentials.
*
* For information, see this documentation topic:
*
* https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/get-started.html
*/
public class GetObjectPresignedUrl {
public static void main(String[] args) {
final String USAGE = "\n" +
"Usage:\n" +
" GetObjectPresignedUrl <bucketName> <keyName> \n\n" +
"Where:\n" +
" bucketName - the Amazon S3 bucket name. \n\n"+
" keyName - a key name that represents a text file. \n\n";
if (args.length != 2) {
System.out.println(USAGE);
System.exit(1);
}
String bucketName = args[0];
String keyName = args[1];
Region region = Region.US_WEST_2;
S3Presigner presigner = S3Presigner.builder()
.region(region)
.build();
getPresignedUrl(presigner, bucketName, keyName);
presigner.close();
}
// snippet-start:[presigned.java2.getobjectpresigned.main]
public static void getPresignedUrl(S3Presigner presigner, String bucketName, String keyName ) {
try {
GetObjectRequest getObjectRequest =
GetObjectRequest.builder()
.bucket(bucketName)
.key(keyName)
.build();
GetObjectPresignRequest getObjectPresignRequest = GetObjectPresignRequest.builder()
.signatureDuration(Duration.ofMinutes(10))
.getObjectRequest(getObjectRequest)
.build();
// Generate the presigned request
PresignedGetObjectRequest presignedGetObjectRequest =
presigner.presignGetObject(getObjectPresignRequest);
// Log the presigned URL
System.out.println("Presigned URL: " + presignedGetObjectRequest.url());
HttpURLConnection connection = (HttpURLConnection) presignedGetObjectRequest.url().openConnection();
presignedGetObjectRequest.httpRequest().headers().forEach((header, values) -> {
values.forEach(value -> {
connection.addRequestProperty(header, value);
});
});
// Send any request payload that the service needs (not needed when isBrowserExecutable is true)
if (presignedGetObjectRequest.signedPayload().isPresent()) {
connection.setDoOutput(true);
try (InputStream signedPayload = presignedGetObjectRequest.signedPayload().get().asInputStream();
OutputStream httpOutputStream = connection.getOutputStream()) {
IoUtils.copy(signedPayload, httpOutputStream);
}
}
// Download the result of executing the request
try (InputStream content = connection.getInputStream()) {
System.out.println("Service returned response: ");
IoUtils.copy(content, System.out);
}
} catch (S3Exception e) {
e.getStackTrace();
} catch (IOException e) {
e.getStackTrace();
}
// snippet-end:[presigned.java2.getobjectpresigned.main]
}
}

S3 - PutObject results in 403 - forbidden - public access?

I am confused by the S3 bucket privacy settings.
I have created a Bucket that blocks all public access and I have created an IAM user which has the permission AmazonS3FullAccess.
Then I use the Access Key and Access Secret of that IAM user to place objects in the bucket, but this fails as PutObject returns a 403-forbidden error. I don't understand why, because the user has the role to access S3 fully.
My bucket permissions looks like this:
So now I have two questions:
Did I forgot something to get this working?
I know it works when I enable public access - but what does that mean exactly? Can any S3 user access my bucket to download/upload/access the files in that bucket or only the user which is displayed in the ACL - which is currently only the owner of the bucket?
Thanks for clarification!
You did not specify you how are placing an object in the bucket. Lets make an assumption here. Assume you want to use code to perform this task. Now assume you set your IAM creds in a file named credentials in C:\Users\USERNAME.aws\credentials on Windows or ~/.aws/credentials on Linux, macOS, or Unix. Assume you setup you IAM user by following this doc:
https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-started_create-admin-group.html#getting-started_create-admin-group-console
(I suspect you have a wrong setting for IAM - check this doc)
You can upload an object to an Amazon S3 bucket using this code (this is the Java example - but you can find other examples for different programming languages).
import software.amazon.awssdk.core.sync.RequestBody;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.PutObjectRequest;
import software.amazon.awssdk.services.s3.model.PutObjectResponse;
import software.amazon.awssdk.services.s3.model.S3Exception;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
public class PutObject {
public static void main(String[] args) {
final String USAGE = "\n" +
"Usage:\n" +
" PutObject <bucketName> <objectKey> <objectPath> \n\n" +
"Where:\n" +
" bucketName - the Amazon S3 bucket to upload an object into.\n" +
" objectKey - the object to upload (for example, book.pdf).\n" +
" objectPath - the path where the file is located (for example, C:/AWS/book2.pdf). \n\n" ;
if (args.length != 3) {
System.out.println(USAGE);
System.exit(1);
}
String bucketName = args[0];
String objectKey = args[1];
String objectPath = args[2];
System.out.println("Putting object " + objectKey +" into bucket "+bucketName);
System.out.println(" in bucket: " + bucketName);
Region region = Region.US_EAST_1;
S3Client s3 = S3Client.builder()
.region(region)
.build();
String result = putS3Object(s3, bucketName, objectKey, objectPath);
System.out.println("Tag information: "+result);
s3.close();
}
// snippet-start:[s3.java2.s3_object_upload.main]
public static String putS3Object(S3Client s3,
String bucketName,
String objectKey,
String objectPath) {
try {
Map<String, String> metadata = new HashMap<>();
metadata.put("myVal", "test");
PutObjectRequest putOb = PutObjectRequest.builder()
.bucket(bucketName)
.key(objectKey)
.metadata(metadata)
.build();
PutObjectResponse response = s3.putObject(putOb,
RequestBody.fromBytes(getObjectFile(objectPath)));
return response.eTag();
} catch (S3Exception e) {
System.err.println(e.getMessage());
System.exit(1);
}
return "";
}
// Return a byte array
private static byte[] getObjectFile(String filePath) {
FileInputStream fileInputStream = null;
byte[] bytesArray = null;
try {
File file = new File(filePath);
bytesArray = new byte[(int) file.length()];
fileInputStream = new FileInputStream(file);
fileInputStream.read(bytesArray);
} catch (IOException e) {
e.printStackTrace();
} finally {
if (fileInputStream != null) {
try {
fileInputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
return bytesArray;
}
}