AWS lambda function and s3 in Asp NetCore - amazon-web-services

I am new in AWS and Asp NetCore. Help me to find the solution.
I am using Asp NetCore, how should I use lambda function to upload clipped file in s3?
I have a IFormFile but I want to use s3 url instead of IFormFile and upload in s3 bucket. I have used ffmpeg library to cut the video and I want to send url of s3 using lambda function and upload the output of clipped video file again in s3 bucket directory that i created in s3 console.
using Abp.UI;
using FFmpeg.NET;
using Microsoft.AspNetCore.Hosting;
using Microsoft.AspNetCore.Http;
using System;
using System.IO;
using Amazon.S3;
using System.Threading.Tasks;
using Amazon.S3.Transfer;
using Amazon.S3.Model;
using IdentityModel.Client;
using Amazon;
using System.Net.Http;
namespace VideoSlicer.About
{
public class AboutAppService : IAboutAppService
{
//Assigning readonly IHostingEnvironment
private readonly IHostingEnvironment _appEnvironment;
private readonly string bucketName = "videoslicerpuru";
private static readonly RegionEndpoint bucketRegion = RegionEndpoint.Region;
/*
private const string newFile = "QQ.mp4";
private const string newFile1 = "637156754716419260QQ.mp4";
private const string filePath = #"C:\Users\Purushottam\Downloads\VideoSlicer\5.1.1\aspnet-core\src\VideoSlicer.Web.Host\wwwroot\images\";*/
private static IAmazonS3 client;
//Making Constructor and passin IHostingEnvironment
public AboutAppService(IHostingEnvironment appEnvironment)
{
_appEnvironment = appEnvironment;
}
//Function which upload video and slice video accordingly start time and finish time given by the user
public async Task UploadImage(IFormFile file, string Start, string Finish)
{
HttpClient cli = new HttpClient();
cli.DefaultRequestHeaders.ExpectContinue = false;
try
{
//Taking out the extension of the file
var convertedExtension = file.FileName.Substring(file.FileName.LastIndexOf("."));
//Comparing the extension whether uploaded file is video or not
if (convertedExtension != ".mp4" && convertedExtension != ".flv" && convertedExtension != ".3GP" && convertedExtension != ".OGG" && convertedExtension != ".AVI" && convertedExtension != ".WMV ")
{
throw new UserFriendlyException("Please Select Valid Video file to clip!!");
}
//Checking if the file is null or has not been selected any file
if (file == null || file.Length == 0)
{
throw new UserFriendlyException("Please Select any file.");
}
//declaring web root path
string path_Root = _appEnvironment.WebRootPath;
//setting path root and saving video to the given path
string pathOfVideo = path_Root + "\\video\\";
string path_to_Video = path_Root + "\\images\\" + file.FileName;
using (var stream = new FileStream(path_to_Video, FileMode.Create))
{
await file.CopyToAsync(stream);
}
client = new AmazonS3Client("AccessKey", "SecretKey", bucketRegion);
TransferUtility utility = new TransferUtility(client);
TransferUtilityUploadRequest request = new TransferUtilityUploadRequest();
string nameOfFile = file.FileName.ToString();
string videoPath = path_to_Video.ToString();
await utility.UploadAsync(videoPath, bucketName, nameOfFile);
Console.WriteLine("Upload 2 completed");
//To cut video, using ffmpeg media file where the video is saved
var input = new MediaFile(path_to_Video);
//The output of the clipped video and saving in a different folder
string nameFile = DateTime.Now.Ticks + file.FileName;
string nameBucket = bucketName + "/videoslice/";
var output = new MediaFile(path_Root + "/video/" + nameFile);
client = new AmazonS3Client(bucketRegion);
//making the object of Engine using ffmpeg application file
var ffmpeg = new Engine("C:\\ffmpeg\\bin\\ffmpeg.exe");
//making object of Conversion Option of ffmpeg
var options = new ConversionOptions();
//Converting string to double
var startTime = Convert.ToDouble($"{Start}");
var finishTime = Convert.ToDouble($"{Finish}");
//clipping video by given user input
options.CutMedia(TimeSpan.FromSeconds(startTime), TimeSpan.FromSeconds(finishTime - startTime));
await ffmpeg.ConvertAsync(input, output, options);
string clipPath = path_Root + "\\video\\" + nameFile;
//Checking if start time is greater then finish time
if (startTime > finishTime)
{
throw new UserFriendlyException("Please Enter Valid Second to proceed");
}
//Checking if start time is equal to finish time
if (startTime == finishTime)
{
throw new UserFriendlyException("Please Enter Valid Second to proceed");
}
var a = utility.UploadAsync(clipPath, nameBucket, nameFile);
Console.WriteLine("Upload 2 completed");
}
catch (AmazonS3Exception e)
{
string err = e.Message;
throw e;
}
catch (InternalBufferOverflowException a)
{
string me = a.Message;
throw a;
}
catch (HttpRequestException c)
{
string error = c.Message;
throw c;
}
catch (DriveNotFoundException e)
{
string mes = e.Message;
throw e;
}
catch(NullReferenceException e)
{
throw new UserFriendlyException (500, "Please Select a file to proceed",e.Message);
}
}
}
}

Related

Upload to GCP Storage signed url chunk by chunk for file

I was reading this post -> upload to google cloud storage signed url with javascript
and it reads the entire file into the reader, then seems to send the entire file. Is there a way instead to read a chunk, send a chunk with GCP Storage signed urls? In this way, we do not blow memory on a very large file and can do a progress bar as well as we upload?
We are fine with any javascript client as we do not currently use any right now.
thanks,
Dean
A resumable uploads work by sending multiple requests, each of which contains a portion of the object you're uploading.
When working with resumable uploads, you only create and use a signed URL for the POST request that initiates the upload. This initial request returns a session URI that you use in subsequent PUT requests to upload the data. Since the session URI acts as an authentication token, the PUT requests do not use any signed URLs.
Once you've initiated a resumable upload, there are two ways to upload the object's data:
In a single chunk: This approach is usually best, since it requires fewer requests and thus has better performance.
In multiple chunks: Use this approach if you need to reduce the amount of data transferred in any single request, such as when there is a fixed time limit for individual requests, or if you don't know the total size of the upload at the time the upload begins.
You can use the Cloud Storage Node.js library. Do note that when using a signed URL to start a resumable upload session, you will need to specify the x-goog-resumable header with start value in the request or else signature validation will fail. Refer to this documentation for additional samples, and guides for getting a signed url to allow limited time access to a bucket.
We are doing chunked uploads with composing - so we chunk the file and create a signed URL for every chunk. These chunks are then composed.
Here is a fully working C# example for chunked upload and download of a test file to a Google cloud storage bucket (it took me a long time to put my original solution together because I didn't find much online). To compile you need to install from Nuget:
https://www.nuget.org/packages/MimeTypes
https://www.nuget.org/packages/Crc32.NET/1.2.0/
You also need to install the Google Cloud storage API
https://www.nuget.org/packages/Google.Cloud.Storage.V1/
Finally it is assumed that you have a JSON file with credentials downloaded from the Google cloud console (here it is called credentials.json).
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Google.Cloud.Storage.V1;
using Google.Apis.Storage.v1.Data;
using System.Net.Http;
using System.Net.Http.Headers;
using System.IO;
using System.Xml;
using System.Web;
using Google.Apis.Auth.OAuth2;
using System.Security.Cryptography;
using Force.Crc32;
namespace GoogleCloudPOC
{
class Program
{
static StorageClient storage;
static UrlSigner urlSigner;
static string bucketName = "ratiodata";
static void Main(string[] args)
{
var credential = GoogleCredential.FromFile("credentials.json");
storage = StorageClient.Create(credential);
urlSigner = UrlSigner.FromServiceAccountPath("credentials.json");
//create a dummy file
var arr = new byte[1000000];
var r = new Random();
for(int i = 0; i < arr.Length; i++)
{
arr[i] = (byte) r.Next(255);
}
//now upload this file in two chunks - we use two threads to illustrate that it is done in parallel
Console.WriteLine("Starting parallel upload ...");
string cloudFileName = "parallel_upload_test.dat";
var threadpool = new Thread[2];
int offset = 0;
int buflength = 100000;
int blockNumber = 0;
var blockList = new SortedDictionary<int, string>();
for(int t = 0; t < threadpool.Length; t++)
{
threadpool[t] = new Thread(delegate ()
{
while (true)
{
int currentOffset = -1;
int currentBlocknumber = -1;
lock (arr)
{
if (offset >= arr.Length) { break; }
currentOffset = offset;
currentBlocknumber = blockNumber;
offset += buflength;
blockNumber++;
}
int len = buflength;
if (currentOffset + len > arr.Length)
{
len = arr.Length - currentOffset;
}
//create signed url
var dict = new Dictionary<string, string>();
//calculate hash
var crcHash = Crc32CAlgorithm.Compute(arr, currentOffset, len);
var b = BitConverter.GetBytes(crcHash);
if (BitConverter.IsLittleEndian)
{
Array.Reverse(b);
}
string blockID = $"__TEMP__/{cloudFileName.Replace('/', '*')}.part_{currentBlocknumber}_{Convert.ToBase64String(b)}";
lock (blockList)
{
blockList.Add(currentBlocknumber, blockID);
}
dict.Add("x-goog-hash", $"crc32c={Convert.ToBase64String(b)}");
//add custom time
var dt = DateTimeOffset.UtcNow.AddHours(-23); //cloud storage will delete the temp files 6 hours after through lifecycle policy (if set to 1 day after custom time)
var CustomTime = String.Format("{0:D4}-{1:D2}-{2:D2}T{3:D2}:{4:D2}:{5:D2}.{6:D2}Z", dt.Year, dt.Month, dt.Day, dt.Hour, dt.Minute, dt.Second, dt.Millisecond / 10);
dict.Add("x-goog-custom-time", CustomTime);
var signedUrl = getSignedUrl(blockID, 1, "upload", dict);
//now perform the actual upload with this URL - this part could run in the browser as well
using (var client = new HttpClient())
{
var content = new ByteArrayContent(arr, currentOffset, len);
content.Headers.ContentType = MediaTypeHeaderValue.Parse("application/octet-stream");
foreach (var kvp in dict)
{
client.DefaultRequestHeaders.Add(kvp.Key, kvp.Value);
}
var response = client.PutAsync(signedUrl, content).Result;
if (!response.IsSuccessStatusCode)
{
throw new Exception("upload failed"); //this should be replaced with some sort of exponential backoff
}
}
}
});
threadpool[t].Start();
}
for (int t = 0; t < threadpool.Length; t++)
{
threadpool[t].Join();
}
//now we compose the chunks into a single file - we can do at most 32 at a time
BlobCombine(blockList.Values.ToArray(), cloudFileName);
Console.WriteLine("... parallel upload finished");
//now use chunked download
Console.WriteLine("Starting parallel download ...");
var downloadedArr = new byte[arr.Length];
threadpool = new Thread[2];
offset = 0;
buflength = 200000;
var downloadUrl = getSignedUrl(cloudFileName, 1, "download"); //single download URL is sufficient
for (int t = 0; t < threadpool.Length; t++)
{
threadpool[t] = new Thread(delegate ()
{
while (true)
{
int currentOffset = -1;
lock (downloadedArr)
{
if (offset >= arr.Length) { break; }
currentOffset = offset;
offset += buflength;
}
int len = buflength;
if (currentOffset + len > downloadedArr.Length)
{
len = downloadedArr.Length - currentOffset;
}
//now perform the actual download with this URL - this part could run in the browser as well
var tags = new Dictionary<string, string>();
tags.Add("Range", $"bytes={currentOffset}-{currentOffset + len - 1}");
using (var client = new HttpClient())
{
var request = new HttpRequestMessage { RequestUri = new Uri(downloadUrl) };
foreach (var kvp in tags)
{
client.DefaultRequestHeaders.Add(kvp.Key, kvp.Value);
}
var response = client.SendAsync(request).Result;
var buffer = new byte[len];
lock (downloadedArr)
{
response.Content.ReadAsStream().Read(buffer, 0, len);
}
lock (downloadedArr)
{
Array.Copy(buffer, 0, downloadedArr, currentOffset, len);
}
}
}
});
threadpool[t].Start();
}
for (int t = 0; t < threadpool.Length; t++)
{
threadpool[t].Join();
}
Console.WriteLine("... parallel download finished");
//compare original array and downloaded array
for(int i = 0; i < arr.Length; i++)
{
if (arr[i] != downloadedArr[i])
{
throw new Exception("download is different from original data");
}
}
Console.WriteLine("good job: original and downloaded data are the same!");
}
static string getSignedUrl(string cloudFileName, int hours, string capability, Dictionary<string, string> tags = null)
{
string url = null;
switch (capability)
{
case "download":
url = urlSigner.Sign(bucketName, cloudFileName, TimeSpan.FromHours(hours), HttpMethod.Get);
break;
case "upload":
var requestHeaders = new Dictionary<string, IEnumerable<string>>();
if (tags != null)
{
foreach (var kvp in tags)
{
requestHeaders.Add(kvp.Key, new[] { kvp.Value });
}
}
UrlSigner.Options options = UrlSigner.Options.FromDuration(TimeSpan.FromHours(hours));
UrlSigner.RequestTemplate template = UrlSigner.RequestTemplate
.FromBucket(bucketName)
.WithObjectName(cloudFileName).WithHttpMethod(HttpMethod.Put);
if (requestHeaders.Count > 0)
{
template = template.WithRequestHeaders(requestHeaders);
}
url = urlSigner.Sign(template, options);
break;
case "delete":
url = urlSigner.Sign(bucketName, cloudFileName, TimeSpan.FromHours(hours), HttpMethod.Delete);
break;
}
return url;
}
static bool BlobCombine(string[] inputFiles, string outputFile)
{
var sourceObjects = new List<ComposeRequest.SourceObjectsData>();
foreach (var fn in inputFiles)
{
sourceObjects.Add(new ComposeRequest.SourceObjectsData { Name = fn });
}
while (sourceObjects.Count > 32)
{
var prefix = sourceObjects.First().Name.Split('.').First();
var newSourceObjects = new List<ComposeRequest.SourceObjectsData>();
var currentSplit = new List<ComposeRequest.SourceObjectsData>();
var sb = new StringBuilder();
for (int i = 0; i < sourceObjects.Count; i++)
{
sb.Append(sourceObjects[i].Name.Split('.').Last());
currentSplit.Add(sourceObjects[i]);
if (currentSplit.Count == 32)
{
var targetName = $"{prefix}.{HashStringOne(sb.ToString())}";
if (!condense(currentSplit, targetName, false))
{
return false;
}
newSourceObjects.Add(new ComposeRequest.SourceObjectsData() { Name = targetName });
currentSplit = new List<ComposeRequest.SourceObjectsData>();
sb = new StringBuilder();
}
}
if (currentSplit.Count == 1)
{
newSourceObjects.Add(currentSplit[0]);
}
if (currentSplit.Count > 1)
{
var targetName = $"{prefix}.{HashStringOne(sb.ToString())}";
if (!condense(currentSplit, targetName, false))
{
return false;
}
newSourceObjects.Add(new ComposeRequest.SourceObjectsData() { Name = targetName });
}
sourceObjects = newSourceObjects;
}
return condense(sourceObjects, outputFile, true);
}
static ulong HashStringOne(string s)
{
ulong hash = 0;
for (int i = 0; i < s.Length; i++)
{
hash += (ulong)s[i];
hash += (hash << 10);
hash ^= (hash >> 6);
}
hash += (hash << 3);
hash ^= (hash >> 11);
hash += (hash << 15);
return hash;
}
static bool condense(List<ComposeRequest.SourceObjectsData> input, string targetName, bool lastRound)
{
try
{
storage.Service.Objects.Compose(new ComposeRequest
{
SourceObjects = input
}, bucketName, targetName).Execute();
if (!lastRound)
{
//set custom time
var file = storage.GetObject(bucketName, targetName);
file.CustomTime = DateTime.UtcNow.AddHours(-23);
file = storage.UpdateObject(file);
}
else
{
//try to set mime type based on file extensions
var file = storage.GetObject(bucketName, targetName);
file.ContentType = MimeTypes.GetMimeType(targetName);
file = storage.UpdateObject(file);
}
return true;
}
catch (Exception e)
{
return false;
}
}
}
}
The upload is performed in parallel using signed URLs. Even though this is a C# command line program you could easily put that code into some ASP net core backend. There are a few lines code of code where the actual upload/download happens using httpclient - those could be done in Javascript in the browser.
The only thing that has to run on the backend is creating signed URLs - plus the compositing of the chunks (this could probably be done in the browser - but this typically isn't heavy operation and Google recommends to do these operations not using signed Urls).
Note, that you have to create a different signed URL for each upload chunk - but a single signed url is sufficient for the download.
Also note that the composition code is a bit involved because you can only combine up to 32 chunks into a new object on cloud storage - hence you might need a few rounds of composition (you can compose objects that are already composed).
I am including CRC32C hashes in the upload to make sure it's uploaded correctly. There should be some Javascript library to perform this in the browser. If you run this in the browser you need to send the hash to the backend when requesting a signed upload url because this parameter is embedded in the put header and has to be encrypted as part of the signed url.
The custom time is included and set to -23 hours from current time so that you can set a lifecycle rule on your bucket which deletes the temporary chunks one day after custom time (effectively it will be a few hours later even though it should be 1 hour after creating the chunk). You can also manually delete the chunks but I would use the custom time approach anyway to make sure you are not gunking up your bucket with failed uploads.
The above approach is truly parallel upload/download. If you just care about chunking (for a progress bar say) but you don't care about parallel threads doing the upload/download then a resumable upload is possible (you would still use the same download approach as outlined above). Such an upload has to be initiated with a single POST call and then you can upload the file chunk by chunk (similar to the way the download code works).

AWS S3 returns 404 for a file that definitely still exists there

We have some code that downloads a bunch of S3 files to a local directory. The list of files to retrieve is from a query we run. It only lists files that actually exist in our S3 bucket.
As we loop to retrieve these files, about 10% of them return a 404 error as if the file doesn't exist. I log out the name/location of that file, so I can go to S3 and check, and sure enough every single one of the IS ON S3 in the location we went looking for it.
Why does S3 throw a 404 when the file exists?
Here is the Groovy code of the script.
class RetrieveS3FilesFromCSVLoader implements Loader {
private static String missingFilesFile = "00-MISSED_FILES.csv"
private static String csvFileName = "/csv/s3file2.csv"
private static String saveFilesToLocation = "/tmp/retrieve/"
public static final char SEPARATOR = ','
#Autowired
DocumentFileService documentFileService
private void readWithCommaSeparatorSQL() {
int counter = 0
String fileName
String fileLocation
File missedFiles = new File(saveFilesToLocation + missingFilesFile)
PrintWriter writer = new PrintWriter(missedFiles)
File fileCSV = new File(getClass().getResource(csvFileName).toURI())
fileCSV.splitEachLine(SEPARATOR as String) { nextLine ->
//if (counter < 15) {
if (nextLine != null && (nextLine[0] != 'FileLocation')) {
counter++
try {
//Remove 0, only if client number start with "0".
fileLocation = nextLine[0].trim()
byte[] fileBytes = documentFileService.getFile(fileLocation)
if (fileBytes != null) {
fileName = fileLocation.substring(fileLocation.indexOf("/") + 1, fileLocation.length())
File file = new File(saveFilesToLocation + fileName)
file.withOutputStream {
it.write fileBytes
}
println "$counter) Wrote file ${fileLocation} to ${saveFilesToLocation + fileLocation}"
} else {
println "$counter) UNABLE TO RETRIEVE FILE ELSE: $fileLocation"
writer.println(fileLocation)
}
} catch (Exception e) {
println "$counter) UNABLE TO RETRIEVE FILE: $fileLocation"
println(e.getMessage())
writer.println(fileLocation)
}
} else {
counter++;
}
//}
}
writer.close()
}
Here is the code for getFile(fileLocation) and client creation.
public byte[] getFile(String filename) throws IOException {
AmazonS3Client s3Client = connectToAmazonS3Service();
S3Object object = s3Client.getObject(S3_BUCKET_NAME, filename);
if(object == null) {
return null;
}
byte[] fileAsArray = IOUtils.toByteArray(object.getObjectContent());
object.close();
return fileAsArray;
}
/**
* Connects to Amazon S3
*
* #return instance of AmazonS3Client
*/
private AmazonS3Client connectToAmazonS3Service() {
AWSCredentials credentials;
try {
credentials = new BasicAWSCredentials(S3_ACCESS_KEY_ID, S3_SECRET_ACCESS_KEY);
} catch (Exception e) {
throw new AmazonClientException(
"Cannot load the credentials from the credential profiles file. " +
"Please make sure that your credentials file is at the correct " +
"location (~/.aws/credentials), and is in valid format.",
e);
}
AmazonS3Client s3 = new AmazonS3Client(credentials);
Region usWest2 = Region.getRegion(Regions.US_EAST_1);
s3.setRegion(usWest2);
return s3;
}
The code above works for 90% of the files in the list passed to the script, but we know with fact that all 100% of the files exist in S3 and with the location String we are passing.
I am just an idiot. Thought it had the production AWS credentials in the properties file. Instead it was development credentials. So I had the wrong credentials.

Authentication Error (Authentication Failed) Using Sabre Soap Message

I am using soap service to authenticate request by sabre as in the document https://developer.sabre.com/docs/read/soap_basics/Authentication.
I generate the proxy classes using wsdl. i put authentication credential in the code. This is my Code for test purpose:
using ConsoleApplication1.SessionReference1;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace ConsoleApplication1
{
class Program
{
[STAThread]
static void Main(string[] args)
{
try
{
// Set user information, including security credentials and the IPCC.
string username = "my-username";
string password = "my-password";
string ipcc = "not-understand";
string domain = "EXT";
string temp = Environment.GetEnvironmentVariable("tmp"); // Get temp directory
string PropsFileName = temp + "/session.properties"; // Define dir and file name
DateTime dt = DateTime.UtcNow;
string tstamp = dt.ToString("s") + "Z";
//Create the message header and provide the conversation ID.
MessageHeader msgHeader = new MessageHeader();
msgHeader.ConversationId = "TestSession"; // Set the ConversationId
From from = new From();
PartyId fromPartyId = new PartyId();
PartyId[] fromPartyIdArr = new PartyId[1];
fromPartyId.Value = "WebServiceClient";
fromPartyIdArr[0] = fromPartyId;
from.PartyId = fromPartyIdArr;
msgHeader.From = from;
To to = new To();
PartyId toPartyId = new PartyId();
PartyId[] toPartyIdArr = new PartyId[1];
toPartyId.Value = "WebServiceSupplier";
toPartyIdArr[0] = toPartyId;
to.PartyId = toPartyIdArr;
msgHeader.To = to;
//Add the value for eb:CPAId, which is the IPCC.
//Add the value for the action code of this Web service, SessionCreateRQ.
msgHeader.CPAId = ipcc;
msgHeader.Action = "SessionCreateRQ";
Service service = new Service();
service.Value = "SessionCreate";
msgHeader.Service = service;
MessageData msgData = new MessageData();
msgData.MessageId = "mid:20001209-133003-2333#clientofsabre.com1";
msgData.Timestamp = tstamp;
msgHeader.MessageData = msgData;
Security security = new Security();
SecurityUsernameToken securityUserToken = new SecurityUsernameToken();
securityUserToken.Username = username;
securityUserToken.Password = password;
securityUserToken.Organization = ipcc;
securityUserToken.Domain = domain;
security.UsernameToken = securityUserToken;
SessionCreateRQ req = new SessionCreateRQ();
SessionCreateRQPOS pos = new SessionCreateRQPOS();
SessionCreateRQPOSSource source = new SessionCreateRQPOSSource();
source.PseudoCityCode = ipcc;
pos.Source = source;
req.POS = pos;
SessionCreatePortTypeClient s = new SessionCreatePortTypeClient();
SessionCreateRS resp = s.SessionCreateRQ(ref msgHeader, ref security, req);
//SessionCreateRQService serviceObj = new SessionCreateRQService();
//serviceObj.MessageHeaderValue = msgHeader;
//serviceObj.SecurityValue = security;
//SessionCreateRS rs = new SessionCreateRS();
//SessionCreateRS = serviceObj.SessionCreateRQ(req); // Send the request
if (resp.Errors != null && resp.Errors.Error != null)
{
Console.WriteLine("Error : " + resp.Errors.Error.ErrorInfo.Message);
}
else
{
// msgHeader = serviceObj.MessageHeaderValue;
// security = serviceObj.SecurityValue;
Console.WriteLine("**********************************************");
Console.WriteLine("Response of SessionCreateRQ service");
Console.WriteLine("BinarySecurityToken returned : " + security.BinarySecurityToken);
Console.WriteLine("**********************************************");
string ConvIdLine = "convid=" + msgHeader.ConversationId; // ConversationId to a string
string TokenLine = "securitytoken=" + security.BinarySecurityToken; // BinarySecurityToken to a string
string ipccLine = "ipcc=" + ipcc; // IPCC to a string
// File.Delete(PropsFileName); // Clean up
// TextWriter tw = new StreamWriter(PropsFileName); // Create & open the file
// tw.WriteLine(DateTime.Now); // Write the date for reference
// tw.WriteLine(TokenLine); // Write the BinarySecurityToken
// tw.WriteLine(ConvIdLine); // Write the ConversationId
// tw.WriteLine(ipccLine); // Write the IPCC
// tw.Close();
//Console.Read();
}
}
catch (Exception e)
{
Console.WriteLine("Exception Message : " + e.Message);
Console.WriteLine("Exception Stack Trace : " + e.StackTrace);
Console.Read();
}
}
}
}
Please Help me
I didn't check the sequence of the code in detail, but checking the values you set for organization and domain:
string ipcc = "not-understand";
string domain = "EXT";
Unless you did it intentionally to mask the values, you should have received your ipcc value from Sabre after getting a SOAP webservices account.
The value for SOAP APIs domain is normally "DEFAULT".
Registering on dev studio only gives you REST test credentials (not SOAP), so you can use this form to request SOAP test credentials:
https://developer.sabre.com/contact
Finally, I don't quite see the environment/target url you're using to test the SessionCreate service...CERT one is: https://sws3-crt.cert.sabre.com/

How to set HtmlSaveOptions to save html and resources to stream using Aspose PDF

I am trying to use Aspose.PDF to load PDF from databases, convert it to HTML and render them to our web page.I want to know if we can save both the document and the resource to stream since current example in the document of Aspose.PDF saves css and images to a local path. I have tried this but the is and error that Aspose.Pdf.SaveFormat.Html is not supported.
Aspose.Pdf.Document PDFDocument = new Aspose.Pdf.Document(PDFStream);
MemoryStream HTMLStreamFromPDF = new MemoryStream();
PDFDocument.Save(HTMLStreamFromPDF, Aspose.Pdf.SaveFormat.Html);
If it can be done, how to write the parameters of CustomResourceSavingStrategy, CustomCssSavingStrategy, and CustomStrategyOfCssUrlCreation of HtmlSaveOptions. I am sorry that I am not quite familiar with delegate in C#
Thanks!
Finally found a way to save all files to stream.
MemoryStream HTMLStreamFromPDF = new MemoryStream();
List<MemoryStream> ResourseStreamList = new List<MemoryStream>();
List<string> ResourceNameList = new List<string>();
MemoryStream CSSStream = new MemoryStream();
Aspose.Pdf.HtmlSaveOptions saveOptions = new Aspose.Pdf.HtmlSaveOptions();
CustomResourcesProcessingBind customResourcesProcessingBind = new CustomResourcesProcessingBind((_1) => CustomResourcesProcessing(ResourseStreamList,ResourceNameList, RequestURL, _1));
saveOptions.CustomResourceSavingStrategy = new Aspose.Pdf.HtmlSaveOptions.ResourceSavingStrategy(customResourcesProcessingBind);
CssUrlCreationCustomStrategyBind cssUrlCreationCustomStrategyBind = new CssUrlCreationCustomStrategyBind((_1) => CssUrlCreationCustomStrategy(RequestURL, _1));
saveOptions.CustomStrategyOfCssUrlCreation = new Aspose.Pdf.HtmlSaveOptions.CssUrlMakingStrategy(cssUrlCreationCustomStrategyBind);
CustomCssSavingProcessingBind customCssSavingProcessingBind = new CustomCssSavingProcessingBind((_1) => CustomCssSavingProcessing(CSSStream, _1));
saveOptions.CustomCssSavingStrategy = new Aspose.Pdf.HtmlSaveOptions.CssSavingStrategy(customCssSavingProcessingBind);
saveOptions.HtmlMarkupGenerationMode = Aspose.Pdf.HtmlSaveOptions.HtmlMarkupGenerationModes.WriteOnlyBodyContent;
PDFDocument.Save(HTMLStreamFromPDF, saveOptions);
private delegate string CustomResourcesProcessingBind(Aspose.Pdf.SaveOptions.ResourceSavingInfo resourceSavingInfo);
private static string CustomResourcesProcessing(List<MemoryStream> ResourseStreamList, List<string> ResourceNameList, string RequestURL, Aspose.Pdf.SaveOptions.ResourceSavingInfo resourceSavingInfo)
{
MemoryStream newResource = new MemoryStream();
resourceSavingInfo.ContentStream.CopyTo(newResource);
ResourceNameList.Add(resourceSavingInfo.SupposedFileName);
ResourseStreamList.Add(newResource);
string urlThatWillBeUsedInHtml = RequestURL +"/"+ Path.GetFileName(resourceSavingInfo.SupposedFileName);
return urlThatWillBeUsedInHtml;
}
private delegate string CssUrlCreationCustomStrategyBind(Aspose.Pdf.HtmlSaveOptions.CssUrlRequestInfo requestInfo);
private static string CssUrlCreationCustomStrategy(string RequestURL,Aspose.Pdf.HtmlSaveOptions.CssUrlRequestInfo requestInfo)
{
return RequestURL + "/css_style.css";
}
private delegate void CustomCssSavingProcessingBind(Aspose.Pdf.HtmlSaveOptions.CssSavingInfo resourceInfo);
private static void CustomCssSavingProcessing(MemoryStream CSSStream, Aspose.Pdf.HtmlSaveOptions.CssSavingInfo resourceInfo)
{
resourceInfo.ContentStream.CopyTo(CSSStream);
}
Check the following sample code regarding how to use parameters CustomResourceSavingStrategy, CustomCssSavingStrategy and CustomStrategyOfCssUrlCreation of HtmlSaveOptions when converting from PDF to HTML.
static string _folderForReferencedResources;
static void Main(string[] args)
{
Document pdfDocument = new Document(#"F:\ExternalTestsData\input.pdf");
string outHtmlFile = #"F:\ExternalTestsData\output.html";
_folderForReferencedResources = #"F:\ExternalTestsData\resources\";
//-----------------------------------------------------
// 2)clean results if they already present
//-----------------------------------------------------
if (Directory.Exists(_folderForReferencedResources))
{
Directory.Delete(_folderForReferencedResources, true);
}
File.Delete(outHtmlFile);
//-----------------------------------------------------
// create HtmlSaveOption with tested feature
//-----------------------------------------------------
HtmlSaveOptions saveOptions = new HtmlSaveOptions();
saveOptions.CustomResourceSavingStrategy = new HtmlSaveOptions.ResourceSavingStrategy(Strategy_11_CUSTOM_SAVE_OF_FONTS_AND_IMAGES);
saveOptions.CustomCssSavingStrategy = new HtmlSaveOptions.CssSavingStrategy(Strategy_11_CSS_WriteCssToPredefinedFolder);
saveOptions.CustomStrategyOfCssUrlCreation = new HtmlSaveOptions.CssUrlMakingStrategy(Strategy_11_CSS_ReturnResultPathInPredefinedTestFolder);
using (Stream outStream = File.OpenWrite(outHtmlFile))
{
pdfDocument.Save(outStream, saveOptions);
}
}
private static void Strategy_11_CSS_WriteCssToPredefinedFolder(HtmlSaveOptions.CssSavingInfo resourceInfo)
{
if (!Directory.Exists(_folderForReferencedResources))
{
Directory.CreateDirectory(_folderForReferencedResources);
}
string path = _folderForReferencedResources + Path.GetFileName(resourceInfo.SupposedURL);
System.IO.BinaryReader reader = new BinaryReader(resourceInfo.ContentStream);
System.IO.File.WriteAllBytes(path, reader.ReadBytes((int)resourceInfo.ContentStream.Length));
}
private static string Strategy_11_CSS_ReturnResultPathInPredefinedTestFolder(HtmlSaveOptions.CssUrlRequestInfo requestInfo)
{
return "file:///" + _folderForReferencedResources.Replace(#"\", "/") + "css_style{0}.css";
}
private static string Strategy_11_CUSTOM_SAVE_OF_FONTS_AND_IMAGES(SaveOptions.ResourceSavingInfo resourceSavingInfo)
{
if (!Directory.Exists(_folderForReferencedResources))
{
Directory.CreateDirectory(_folderForReferencedResources);
}
string path = _folderForReferencedResources + Path.GetFileName(resourceSavingInfo.SupposedFileName);
// first path of this method is for saving of font
System.IO.BinaryReader contentReader = new BinaryReader(resourceSavingInfo.ContentStream);
System.IO.File.WriteAllBytes(path, contentReader.ReadBytes((int)resourceSavingInfo.ContentStream.Length));
string urlThatWillBeUsedInHtml = "file:///" + _folderForReferencedResources.Replace(#"\", "/") + Path.GetFileName(resourceSavingInfo.SupposedFileName);
return urlThatWillBeUsedInHtml;
}
The details are available here.
P.S. I am Social Media Developer at Aspose.

trying to upload video using graph api

I have been trying to implement a video upload facebook feature for my mobile app for a while now but never really succeeded with rest. I learnt yesterday that the graph alternative was available.
After getting a few errors to do with access key mainly i have gotten to the point where output stream succesfully writes the movie file and the input stream just receives an empty json array once i have written the 3gp file.
Anyone any idea why I would get an empty json array and no video gets published when i get all my code to run, i get 200 response code and the server sends me a non error response?
Any help greatly appreciated.
Here is the class that gets the blank json array (send method). I have appended the token to the url and in the table to be sure. I am sorry if the code is untidy buts it just been a day of trial and error.
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Enumeration;
import java.util.Hashtable;
import javax.microedition.io.Connector;
import javax.microedition.io.HttpConnection;
import net.rim.device.api.io.http.HttpProtocolConstants;
public class HttpMultipartRequest2
{
static final String BOUNDARY = "----------V2ymHFg03ehbqgZCaKO6jy";
byte[] postBytes = null;
String url = null;
Hashtable paramsTable;
public HttpMultipartRequest2(String url, Hashtable params,
String fileField, String fileName, String fileType, byte[] fileBytes) throws Exception
{
this.url = url;
String boundary = getBoundaryString();
paramsTable = params;
String boundaryMessage = getBoundaryMessage(boundary, params, fileField, fileName, fileType);
String endBoundary = "\r\n--" + boundary + "--\r\n";
ByteArrayOutputStream bos = new ByteArrayOutputStream();
bos.write(boundaryMessage.getBytes());
bos.write(fileBytes);
bos.write(endBoundary.getBytes());
this.postBytes = bos.toByteArray();
bos.close();
}
String getBoundaryString() {
return BOUNDARY;
}
String getBoundaryMessage(String boundary, Hashtable params, String fileField, String fileName, String fileType)
{
StringBuffer res = new StringBuffer("--").append(boundary).append("\r\n");
Enumeration keys = params.keys();
while(keys.hasMoreElements())
{
String key = (String)keys.nextElement();
String value = (String)params.get(key);
res.append("Content-Disposition: form-data; name=\"").append(key).append("\"\r\n")
.append("\r\n").append(value).append("\r\n")
.append("--").append(boundary).append("\r\n");
}
res.append("Content-Disposition: form-data; name=\"").append(fileField)
.append("\"; filename=\"").append(fileName).append("\"\r\n")
.append("Content-Type: ").append(fileType).append("\r\n\r\n");
Log.info(("res "+res.toString()));
return res.toString();
}
public String send() throws Exception
{
StringBuffer sb = new StringBuffer();
HttpConnection hc = null;
InputStream is = null;
ByteArrayOutputStream bos = new ByteArrayOutputStream();
byte[] res = null;
try
{
Log.info("before hc open"+ url);
hc = (HttpConnection) Connector.open(url);
hc.setRequestProperty("Content-Type", "multipart/form-data; boundary=" + getBoundaryString());
hc.setRequestProperty("access_token", (String)paramsTable.get("access_token"));
hc.setRequestProperty(HttpProtocolConstants.HEADER_CONTENT_LENGTH, String.valueOf(postBytes.length));
hc.setRequestProperty( "x-rim-transcode-content", "none" );
ByteArrayOutputStream out = new ByteArrayOutputStream();
OutputStream dos = hc.openOutputStream();
is = hc.openInputStream();
Log.info("before dos write responsecode");// + hc.getResponseCode());
out.write(postBytes, 0, postBytes.length);
//Log.info("flushing"+ hc.getResponseCode());
Log.info("after doswrite responsecode");
dos.write(out.toByteArray());
dos.flush();
Log.info("after flush");
if(dos!=null)
dos.close();
int ch;
Log.info("before openinput ");
Log.info("after openinput ");
while ((ch = is.read()) != -1)
{
bos.write(ch);
sb.append((char)ch);
Log.info("char"+(char)ch);
}
res = bos.toByteArray();
Log.info("Response recieved from Server is : " + sb.toString() );
}
catch(Exception e)
{
Log.info(hc.getResponseCode() + "sexce"+e);
}
catch(OutOfMemoryError error)
{
Log.info("outofmemory " + error);
System.gc();
}
finally
{
try
{
if(bos != null)
bos.close();
if(is != null)
is.close();
if(hc != null)
hc.close();
}
catch(Exception e2)
{
Log.info("finally exception"+ e2);
}
}
return sb.toString();
}
}
Are you trying to upload to a user's feed or to a page? There is an Open Bug regarding posting to pages.
Also, could you post some code?
Assuming that you've read the documentation:
Facebook Graph API->Video
And that you are using graph-video.facebook.com, not graph.facebook.com.