How to automatically go through and edit long txt files? - list

I have an issue: I've got some chat logs that are thousands of lines ong, and I'm trying to isolate the messages from one specific user. The log looks like this:
[dd-mm-yy hh:mm pm/am] Username
message
[dd-mm-yy hh:mm pm/am] Username
message
[dd-mm-yy hh:mm pm/am] Username
message
In my file, I want to only keep the messages (not the other information like day hour or their username) that one specific user has send, and delete everything else, so I can process the contents of those messages. Is there anything out there that can help me achieve it, because as you can see its a very tedious process to go through thousands of lines of logs doing this by hand.

I ended up writing a js script to do what I wanted since I couldnt find anything anywhere else, here it is:
const fs = require("fs");
const readline = require("readline");
async function processLineByLine() {
const fileStream = fs.createReadStream("./input.txt");
const rl = readline.createInterface({
input: fileStream,
crlfDelay: Infinity,
});
// Note: we use the crlfDelay option to recognize all instances of CR LF
// ('\r\n') in input.txt as a single line break.
let trigger = false;
for await (const line of rl) {
// Each line in input.txt will be successively available here as `line`.
console.log(`Line from file: ${line}`);
if (line.includes("YOU DID THIS TO MY BOI LIM#7483") == true) {
console.log("true");
trigger = true;
}
else if (trigger == true) {
console.log(`Line sent by user: ${line}`);
fs.appendFile("output.txt", line + " ", (err) => {
// throws an error, you could also catch it here
if (err) throw err;
// success case, the file was saved
console.log("line saved");
});
trigger = false;
}
}
}
processLineByLine();

Related

If statement to allow single and multiple if options

I have an If statement dependent on a choice made in a google form. Everything currently works as needed BUT, the question choice allows them to choose ALL OPTIONS THAT APPLY. If more than one option is chosen, the script fails.
How it currently works - person fills out form, script grabs info and inputs to a document, gives specified message based on choice from question (cif and message in script below), then emails the correct recipient (chosen in form). It works beautifully if cif is submitted with one choice.
What do I add/edit to the script to allow for multiple choices, and therefore multiple messages input to the document, without having to write an If state for each scenario?
// Global variables
var docTemplate = "1EoBcz0BK4R5hm-q5pR68xnQnR8DlR56XzjxRrgsu4uE"; // *** replace with your template ID ***
var docName = "You got a High 5";
function onFormSubmit(e) { // add an onsubmit trigger
// Values come from the spreadsheet form
var observer = e.values[1]
var teacher = e.values[2]
var email = e.values[2]
var period = e.values[4]
var time = e.values[3]
var cif = e.values[5]
var comments = e.values[6]
var message;
if (cif == 'READ - Variety of texts') {
message = 'read quote'
}
else if (cif == 'WRITE - Paper or electronic') {
message = 'write quote'
}
else if (cif == 'THINK - Actively engaged students') {
message = 'think quote'
}
else if (cif == 'TALK - Purposeful discussion') {
message = 'talk quote'
}
else if (cif == 'MOVE - Students moving through class') {
message = 'move quote'
}
else if (cif == 'CIF not observed') {
message = 'CIF not observed'
}
// Get document template, copy it as a new temp doc, and save the Doc’s id
var copyId = DriveApp.getFileById(docTemplate)
.makeCopy(docName+' for '+teacher)
.getId();
// Open the temporary document
var copyDoc = DocumentApp.openById(copyId);
// Get the document’s body section
var copyBody = copyDoc.getActiveSection();
// Replace place holder keys,
copyBody.replaceText('keyobserver', observer);
copyBody.replaceText('keyperiod', period);
copyBody.replaceText('keytime', time);
copyBody.replaceText('keycif', cif);
copyBody.replaceText('keycomments', comments);
copyBody.replaceText('keymessage', message)
var todaysDate = Utilities.formatDate(new Date(), "GMT", "MM/dd/yyyy");
copyBody.replaceText('keyTodaysDate', todaysDate);
// Save and close the temporary document
copyDoc.saveAndClose();
// Convert temporary document to PDF by using the getAs blob conversion
var pdf = DriveApp.getFileById(copyId).getAs("application/pdf");
// Attach PDF and send the email
var subject = "High 5 - it matters.";
var body = "You got a High 5! See attached PDF. " +
"Please do not reply to this email. You will be asked to supply a response thorugh a link within the attached PDF. " +
"'Do all the good you can. By all the means you can. In all the ways you can. In all the places you can. At all the times you can. To all the people you can. As long as you ever can. -John Wesley'";
MailApp.sendEmail(email, subject, body, {htmlBody: body, attachments: pdf});
// Delete temp file
DriveApp.getFileById(copyId).setTrashed(true);
}
Having multiple selected answer will return a comma separated values in form of string.
For example:
e.values[5] can have:
'READ - Variety of texts, WRITE - Paper or electronic, THINK - Actively engaged students, TALK - Purposeful discussion, MOVE - Students moving through class, CIF not observed'
in one single string and using == will not work since it has different content versus the string you are comparing to. This causes all of your if statements to fail because non of them matches the string. This leads to an undefined variable message which causes error on replaceText() function.
To fix the issue replace this line of code:
var message;
if (cif == 'READ - Variety of texts') {
message = 'read quote'
}
else if (cif == 'WRITE - Paper or electronic') {
message = 'write quote'
}
else if (cif == 'THINK - Actively engaged students') {
message = 'think quote'
}
else if (cif == 'TALK - Purposeful discussion') {
message = 'talk quote'
}
else if (cif == 'MOVE - Students moving through class') {
message = 'move quote'
}
else if (cif == 'CIF not observed') {
message = 'CIF not observed'
}
with:
var message = [];
if (cif.match('READ - Variety of texts')) {
message.push('read quote');
}
if (cif.match('WRITE - Paper or electronic')) {
message.push('write quote');
}
if (cif.match('THINK - Actively engaged students')) {
message.push('think quote');
}
if (cif.match('TALK - Purposeful discussion')) {
message.push('talk quote');
}
if (cif.match('MOVE - Students moving through class')) {
message.push('move quote');
}
if (cif.match('CIF not observed')) {
message.push('CIF not observed')
}
and in your
copyBody.replaceText('keymessage', message);
just add join function to make message a comma separated values when the content is more than 1:
copyBody.replaceText('keymessage', message.join(',');
Example Usage:
Multiple CIF:
var cif = 'READ - Variety of texts, WRITE - Paper or electronic, THINK - Actively engaged students, TALK - Purposeful discussion, MOVE - Students moving through class, CIF not observed';
Output:
Single CIF:
var cif = 'WRITE - Paper or electronic';
Output:
References
Array.join()
String.match()

Tracking failed jobs in SAS

I'm looking to improve the way to track a server that uses SAS to collect information from several databases, the challenge here is that it's one of those servers that a company used for a while without any governance, folders are everywhere, no actual folder structure, anyone was doing whatever they pleased with it. You know the drill.
The option that I found for now is to put a macro on the jobs that are failing that will track the logs, and if it finds an error, then it should send out an email to a distribution list.
But this solution isn't a very good software architecture, every time a new job is scheduled in the crontab, I need to slap the macro there, and if the things move to another directory it stops working and all of that, a lot of manual work cascading down.
A solution that I'm looking for would be finding something that can read all logs from a list of directories (and this would be the biggest manual update it needs then, but only this, and only because I don't have authorization to move the folders), fetch the ones that have errors, and output this to a web page, so then we can only check that web page. to know centralized information (when it's scheduled, when it last ran, etc, etc).
By any chance does anyone have any suggestions here on how to do this? I thought of running a SAS script to just output a txt file that I can fetch somewhere and then reading that with Javascript on a web page, but I ran into a few difficulties on how to loop through all of the folders, and because this scripting language doesn't fell really feel all that great for this purpose. But I'm out of ideas for now. Recommendations?
If you are using Node server side to deliver web content, you can write a function that scans the folders listed in a control file for log files containing ERROR: messages and reports them.
For example:
log folders.txt (control file)
d:\temp\logs\job-1
d:\temp\logs\tuesday jobs
d:\jobs\wednesday\job set 1
d:\jobs\wednesday\job set 2\prod
node app (test version)
const express = require('express');
const app = express();
const port = 8081;
app
.get('/sas/logs/errors', (req, res) => {
res.setHeader('Content-Type', 'text/html');
processFolders(res);
res.end();
})
;
app.listen(port, () => console.log(`Listening on port ${port}`));
const fs = require('fs');
const readline = require('readline');
const path = require('path');
const listOfFoldersFile = 'log folders.txt';
function processFolders(res) {
var level1Count = 0;
var level2Count = 0;
var level3Count = 0;
var content;
try {
content = fs.readFileSync(listOfFoldersFile, 'UTF-8');
}
catch (err) {
console.log(err.toString());
res.write('<p>Configuration error in processFolders.</p>');
return;
}
const folders = content.split(/\r?\n/);
for (let folder of folders) {
scanForLogs(folder);
}
if (level1Count == 0) {
res.write('<p>No error messages found in SAS logs.</p>');
return;
}
return;
function scanForLogs(folder) {
//console.log(`scan folder ${folder}`);
var logfiles;
try {
logfiles = fs.readdirSync (folder).filter(filename => filename.match(/\.log$/));
}
catch (err) {
console.log(err.toString());
res.write('<p>Problem scanning a log folder.</p>');
return;
}
level2Count = 0;
for (let logfile of logfiles) {
parseSASLog(folder, logfile);
}
}
function parseSASLog(folder, filename) {
//console.log(`parse log ${path.join(folder,filename)}`);
var content;
try {
content = fs.readFileSync(path.join(folder,filename), 'UTF-8');
}
catch (err) {
console.log(err.toString());
res.write(`<p>Problem reading file ${filename}</p>`);
return;
}
const lines = content.split(/\r?\n/);
level3Count = 0;
var linenum = 0;
for (const line of lines) {
linenum++;
if (line.match (/^ERROR:/)) {
reportErrorMessage(folder, filename, line, linenum);
}
}
if (level3Count) {
res.write('</pre>');
}
}
function reportErrorMessage(folder, filename, line, linenum) {
//console.log(`reportErrorMessage`);
level1Count++;
level2Count++;
level3Count++;
if (level1Count == 1) {
res.write('<h1>SAS errors</h1>');
}
if (level2Count == 1) {
res.write(`<h2>folder: ${folder}<h2>`);
}
if (level3Count ==1) {
res.write(`<h3>file: ${filename}</h3><pre>`);
}
res.write(`${linenum}: ${line}\n`);
}
}
Errors reported (example)
Other approaches could include
asynchronous server side scans delivering output to client side piecewise through signalR
delivering error report as json data which the client side uses to render the report

Calling a Web Service (containg multiple pages) does not load all the pages (without an added sleep delay)

My question is about a strange behavious I notice both on my iPhone device and the codenameone simulator (NetBeans).
I invoke the following code below which calls a google web service to provide a list of food places around a GPS coordinate:
The web service that is called is as follows (KEY OBSCURED):
https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=40.714353,-74.00597299999998&radius=200&types=food&key=XXXXXXXXXXXXXXXXXXXXXXX
Each result contains the next page token and thus, the second call (for the subsequent page) is as follows:
https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=40.714353,-74.00597299999998&radius=200&types=food&key=XXXXXXXXXXXXXXXXXXXXXXX&pagetoken=YYYYYYYYYYYYYYYYYY
public static byte[] getWSResponseData(String urlString, boolean usePost)
{
ConnectionRequest r = new ConnectionRequest();
r.setUrl(urlString);
r.setPost(usePost);
InfiniteProgress prog = new InfiniteProgress();
Dialog dlg = prog.showInifiniteBlocking();
r.setDisposeOnCompletion(dlg);
NetworkManager.getInstance().addToQueueAndWait(r);
try
{
Thread.sleep(2000);
}
catch (InterruptedException ex)
{
}
byte[] responseData = r.getResponseData();
return responseData;
}
public static void getLocationsList(double lat, double lng)
{
boolean done = false;
while (!done)
{
byte[] responseData = getWSResponseData(finalURL,false);
result = Result.fromContent(parser.parseJSON(new InputStreamReader(new ByteArrayInputStream(responseData))));
String venueNames[] = result.getAsStringArray("/results/name");
nextToken = result.getAsString("/next_page_token");
if ( nextToken == null || nextToken.equals(""))
done = true;
else
finalURL = completeURL + "&pagetoken=" + nextToken;
}
.....
}
This code works fine with the sleep timer, but when I remove the Thread.sleep, only the first page gets called.
Any help would be appreciated.
Using the debugger does not help as this is a timing issue and the issue does not occur when using the debugger.
Also when I put some print statements into the code
while (!done)
{
String nextToken = null;
**System.out.println(finalURL);**
...
}
System.out.println("Total Number of entries returned: " + itemCount);
I get the following output:
First Run (WITHOUT SLEEP):
https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=40.714353,-74.00597299999998&radius=200&types=food&key=XXXXXXXX
https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=40.714353,-74.00597299999998&radius=200&types=food&key=XXXXXXXX&pagetoken=CqQCF...
Total Number of entries returned: 20
Using the network monitor I see that the response to the second WS call returns:
{
"html_attributions" : [],
"results" : [],
"status" : "INVALID_REQUEST"
}
Which is strange as when I cut and paste the WS URL into my browser, it works fine...
Second Run (WITH SLEEP):
https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=40.714353,-74.00597299999998&radius=200&types=food&key=XXXXXXXXX
https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=40.714353,-74.00597299999998&radius=200&types=food&key=XXXXXXXXX&pagetoken=CqQCFQEAA...
https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=40.714353,-74.00597299999998&radius=200&types=food&key=XXXXXXXXX&pagetoken=CsQDtQEAA...
Total Number of entries returned: 60
Well it seems to be a google API issue as indicated here:
Paging on Google Places API returns status INVALID_REQUEST
I still could not get it to work by changing the WS URL with a random parameter as they suggested, but I will keep trying and post something here if I get it to work. For now I will just keep a 2 second delay between the calls which seems to work.
Well gave up on using the google WS for this and switched to Yelp, works very well:
https://api.yelp.com/v3/businesses/search?.....

Test if file exists and executed even at the time of submission

I implemented a feature that tests if my file already exists; whether an error message is displayed and a pop up Java script. Otherwise it does nothing.
Except that at the time of registration of my content. He still enters the loop and as the file is already uploaded it still displays the error message and so I can never create my content.
In advance, thank you for your help.
function bportal_tesst_form_alter(&$form, &$form_state, $form_id) {
if ($form_id == 'document_node_form') {
$form['field_document_file']['und'][0]['#upload_validators']['custom_document_upload_validation'] = array();
}
return $form;
}
function custom_document_upload_validation($file) {
$filename = $file->filename;
$errors = array();
if (file_exists('/var/www/vhosts/BP/docroot/sites/default/files-private/documents/'.$filename)) {
print_r($filename);
$errors[] = t("The new file already exists. Please use a different name.");
drupal_add_js(drupal_get_path('module','bportal_tesst') . '/js/pop-in.js');
}
return $errors;
}

Some Problems of Indy 10 IdHTTP Implementation

In regard to Indy 10 of IdHTTP, many things have been running perfectly, but there are a few things that don't work so well here. That is why, once again, I need your help.
Download button has been running perfectly. I'm using the following code :
void __fastcall TForm1::DownloadClick(TObject *Sender)
{
MyFile = SaveDialog->FileName;
TFileStream* Fist = new TFileStream(MyFile, fmCreate | fmShareDenyNone);
Download->Enabled = false;
Urlz = Edit1->Text;
Url->Caption = Urlz;
try
{
IdHTTP->Get(Edit1->Text, Fist);
IdHTTP->Connected();
IdHTTP->Response->ResponseCode = 200;
IdHTTP->ReadTimeout = 70000;
IdHTTP->ConnectTimeout = 70000;
IdHTTP->ReuseSocket;
Fist->Position = 0;
}
__finally
{
delete Fist;
Form1->Updated();
}
}
However, a "Cancel Resume" button is still can't resume interrupted downloads. Meant, it is always sending back the entire file every time I call Get() though I've used IdHTTP->Request->Ranges property.
I use the following code:
void __fastcall TForm1::CancelResumeClick(TObject *Sender)
{
MyFile = SaveDialog->FileName;;
TFileStream* TFist = new TFileStream(MyFile, fmCreate | fmShareDenyNone);
if (IdHTTP->Connected() == true)
{
IdHTTP->Disconnect();
CancelResume->Caption = "RESUME";
IdHTTP->Response->AcceptRanges = "Bytes";
}
else
{
try {
CancelResume->Caption = "CANCEL";
// IdHTTP->Request->Ranges == "0-100";
// IdHTTP->Request->Range = Format("bytes=%d-",ARRAYOFCONST((TFist->Position)));
IdHTTP->Request->Ranges->Add()->StartPos = TFist->Position;
IdHTTP->Get(Edit1->Text, TFist);
IdHTTP->Request->Referer = Edit1->Text;
IdHTTP->ConnectTimeout = 70000;
IdHTTP->ReadTimeout = 70000;
}
__finally {
delete TFist;
}
}
Meanwhile, by using the FormatBytes function, found here, has been able to shows only the size of download files. But still unable to determine the speed of download or transfer speed.
I'm using the following code:
void __fastcall TForm1::IdHTTPWork(TObject *ASender, TWorkMode AWorkMode, __int64 AWorkCount)
{
__int64 Romeo = 0;
Romeo = IdHTTP->Response->ContentStream->Position;
// Romeo = AWorkCount;
Download->Caption = FormatBytes(Romeo) + " (" + IntToStr(Romeo) + " Bytes)";
ForSpeed->Caption = FormatBytes(Romeo);
ProgressBar->Position = AWorkCount;
ProgressBar->Update();
Form1->Updated();
}
Please advise and give an example. Any help would sure be appreciated!
In your DownloadClick() method:
Calling Connected() is useless, since you don't do anything with the result. Nor is there any guarantee that the connection will remain connected, as the server could send a Connection: close response header. I don't see anything in your code that is asking for HTTP keep-alives. Let TIdHTTP manage the connection for you.
You are forcing the Response->ResponseCode to 200. Don't do that. Respect the response code that the server actually sent. The fact that no exception was raised means the response was successful whether it is 200 or 206.
You are reading the ReuseSocket property value and ignoring it.
There is no need to reset the Fist->Position property to 0 before closing the file.
Now, with that said, your CancelResumeClick() method has many issues.
You are using the fmCreate flag when opening the file. If the file already exists, you will overwrite it from scratch, thus TFist->Position will ALWAYS be 0. Use fmOpenReadWrite instead so an existing file will open as-is. And then you have to seek to the end of the file to provide the correct Position to the Ranges header.
You are relying on the socket's Connected() state to make decisions. DO NOT do that. The connection may be gone after the previous response, or may have timed out and been closed before the new request is made. The file can still be resumed either way. HTTP is stateless. It does not matter if the socket remains open between requests, or is closed in between. Every request is self-contained. Use information provided in the previous response to govern the next request. Not the socket state.
You are modifying the value of the Response->AcceptRanges property, instead of using the value provided by the previous response. The server tells you if the file supports resuming, so you have to remember that value, or query it before then attempting to resumed download.
When you actually call Get(), the server may or may not respect the requested Range, depending on whether the requested file supports byte ranges or not. If the server responds with a response code of 206, the requested range is accepted, and the server sends ONLY the requested bytes, so you need to APPEND them to your existing file. However, if the server response with a response code of 200, the server is sending the entire file from scratch, so you need to REPLACE your existing file with the new bytes. You are not taking that into account.
In your IdHTTPWork() method, in order to calculate the download/transfer speed, you have to keep track of how many bytes are actually being transferred in between each event firing. When the event is fired, save the current AWorkCount and tick count, and then the next time the event is fired, you can compare the new AWorkCount and current ticks to know how much time has elapsed and how many bytes were transferred. From those value, you can calculate the speed, and even the estimated time remaining.
As for your progress bar, you can't use AWorkCount alone to calculate a new position. That only works if you set the progress bar's Max to AWorkCountMax in the OnWorkBegin event, and that value is not always know before a download begins. You need to take into account the size of the file being downloaded, whether it is being downloaded fresh or being resumed, how many bytes are being requested during a resume, etc. So there is lot more work involved in displaying a progress bar for a HTTP download.
Now, to answer your two questions:
How to retrieve and save the download file to a disk by using its original name?
It is provided by the server in the filename parameter of the Content-Disposition header, and/or in the name parameter of the Content-Type header. If neither value is provided by the server, you can use the filename that is in the URL you are requesting. TIdHTTP has a URL property that provides the parsed version of the last requested URL.
However, since you are creating the file locally before sending your download request, you will have to create a local file using a temp filename, and then rename the local file after the download is complete. Otherwise, use TIdHTTP.Head() to determine the real filename (you can also use it to determine if resuming is supported) before creating the local file with that filename, then use TIdHTTP.Get() to download to that local file. Otherwise, download the file to memory using TMemoryStream instead of TFileStream, and then save with the desired filename when complete.
when I click http://get.videolan.org/vlc/2.2.1/win32/vlc-2.2.1-win32.exe then the server will process requests to its actual url. http://mirror.vodien.com/videolan/vlc/2.2.1/win32/vlc-2.2.1-win32.exe. The problem is that IdHTTP will not automatically grab through it.
That is because VideoLan is not using an HTTP redirect to send clients to the real URL (TIdHTTP supports HTTP redirects). VideoLan is using an HTML redirect instead (TIdHTTP does not support HTML redirects). When a webbrowser downloads the first URL, a 5 second countdown timer is displayed before the real download then begins. As such, you will have to manually detect that the server is sending you an HTML page instead of the real file (look at the TIdHTTP.Response.ContentType property for that), parse the HTML to determine the real URL, and then download it. This also means that you cannot download the first URL directly into your target local file, otherwise you will corrupt it, especially during a resume. You have to cache the server's response first, either to a temp file or to memory, so you can analyze it before deciding how to act on it. It also means you have to remember the real URL for resuming, you cannot resume the download using the original countdown URL.
Try something more like the following instead. It does not take into account for everything mentioned above (particularly speed/progress tracking, HTML redirects, etc), but should get you a little closer:
void __fastcall TForm1::DownloadClick(TObject *Sender)
{
Urlz = Edit1->Text;
Url->Caption = Urlz;
IdHTTP->Head(Urlz);
String FileName = IdHTTP->Response->RawHeaders->Params["Content-Disposition"]["filename"];
if (FileName.IsEmpty())
{
FileName = IdHTTP->Response->RawHeaders->Params["Content-Type"]["name"];
if (FileName.IsEmpty())
FileName = IdHTTP->URL->Document;
}
SaveDialog->FileName = FileName;
if (!SaveDialog->Execute()) return;
MyFile = SaveDialog->FileName;
TFileStream* Fist = new TFileStream(MyFile, fmCreate | fmShareDenyWrite);
try
{
try
{
Download->Enabled = false;
Resume->Enabled = false;
IdHTTP->Request->Clear();
//...
IdHTTP->ReadTimeout = 70000;
IdHTTP->ConnectTimeout = 70000;
IdHTTP->Get(Urlz, Fist);
}
__finally
{
delete Fist;
Download->Enabled = true;
Updated();
}
}
catch (const EIdHTTPProtocolException &)
{
DeleteFile(MyFile);
throw;
}
}
void __fastcall TForm1::ResumeClick(TObject *Sender)
{
TFileStream* Fist = new TFileStream(MyFile, fmOpenReadWrite | fmShareDenyWrite);
try
{
Download->Enabled = false;
Resume->Enabled = false;
IdHTTP->Request->Clear();
//...
Fist->Seek(0, soEnd);
IdHTTP->Request->Ranges->Add()->StartPos = Fist->Position;
IdHTTP->Request->Referer = Edit1->Text;
IdHTTP->ConnectTimeout = 70000;
IdHTTP->ReadTimeout = 70000;
IdHTTP->Get(Urlz, Fist);
}
__finally
{
delete Fist;
Download->Enabled = true;
Updated();
}
}
void __fastcall TForm1::IdHTTPHeadersAvailable(TObject*Sender, TIdHeaderList *AHeaders, bool &VContinue)
{
Resume->Enabled = ( ((IdHTTP->Response->ResponseCode == 200) || (IdHTTP->Response->ResponseCode == 206)) && TextIsSame(AHeaders->Values["Accept-Ranges"], "bytes") );
if ((IdHTTP->Response->ContentStream) && (IdHTTP->Request->Ranges->Count > 0) && (IdHTTP->Response->ResponseCode == 200))
IdHTTP->Response->ContentStream->Size = 0;
}
#Romeo:
Also, you can try a following function to determine the real download filename.
I've translated this to C++ based on the RRUZ'function. So far so good, I'm using it on my simple IdHTTP download program, too.
But, this translation result is of course still need value improvement input from Remy Lebeau, RRUZ, or any other master here.
String __fastcall GetRemoteFileName(const String URI)
{
String result;
try
{
TIdHTTP* HTTP = new TIdHTTP(NULL);
try
{
HTTP->Head(URI);
result = HTTP->Response->RawHeaders->Params["Content-Disposition"]["filename"];
if (result.IsEmpty())
{
result = HTTP->Response->RawHeaders->Params["Content-Type"]["name"];
if (result.IsEmpty())
result = HTTP->URL->Document;
}
}
__finally
{
delete HTTP;
}
}
catch(const Exception &ex)
{
ShowMessage(const_cast<Exception&>(ex).ToString());
}
return result;
}