I'm using Google Cloud Functions and bucket triggers to :
Push Video on Cloud Storage bucket
Extract audio from video file with ffmpeg and store it locally
Save the audio to another bucket
Extract the text from the audio
Save it on another bucket
Translate the txt file automatically
Everything is doing fine if the file is under 5MB, but after the file is never uploaded to the other bucket. I have tried to upload it on the same bucket I get the video from but no use.
I have checked the google storage nodejs code and the code is doing a stream automatically after 5Mb of upload but it never ends.
Somebody have already this issue ?
My code is doing :
// Imports the Google Cloud client library
const storage = require('#google-cloud/storage')();
const ffmpegPath = require('#ffmpeg-installer/ffmpeg').path
const ffmpeg = require('fluent-ffmpeg');
const path = require('path');
const fs = require('fs');
const rimraf = require('rimraf'); // rimraf directly
// Binaries for ffmpeg
ffmpeg.setFfmpegPath(ffmpegPath);
/**
* Triggered from a message on a Cloud Storage bucket.
*
* #param {!Object} event The Cloud Functions event.
* #param {!Function} The callback function.
*/
exports.processFile = function(event, callback) {
console.log(JSON.stringify(event));
console.log('Processing file: ' + event.data.name);
const bucket = storage.bucket(event.data.bucket);
const bucketAudio = storage.bucket('distant-bucket');
const videoFile = bucket.file(event.data.name);
const destination = '/tmp/' + path.basename( event.data.name );
return new Promise((resolve, reject) => { return videoFile.download({
destination: destination
}).then((error) => {
if (error.length > 0) {
reject(error);
} else {
resolve(destination);
}
})
})
.then((fileinfo) => {
const filename = destination.replace( '.mp4', '.flac' );
return new Promise((resolve, reject) => {
ffmpeg(fileinfo)
.videoBitrate(19200)
.inputOptions('-vn')
.format('flac')
.audioChannels(1)
.audioBitrate(44100)
.output(filename)
.on('end', function() {
console.log('extracted audio : '+event.data.name+" to "+filename);
resolve(filename);
})
.on('error', function(err, stdout, stderr) {
reject(err);
})
.run()
});
})
.then((flacOutput) => {
console.log('save file '+flacOutput);
const stats = fs.statSync(flacOutput)
console.log(stats.size / 1000000.0);
// TODO: upload FLAC file to Cloud Storage
return bucket
.upload( flacOutput ).catch((err) => {
console.error('Failed to upload flac.', err);
return Promise.reject(err);
});
}).then(() => {
console.log(`Audio uploaded.`);
// Delete the temporary file.
return new Promise((resolve, reject) => {
fs.unlink(destination);
fs.unlink(destination.replace( '.mp4', '.flac' ), (err) => {
if (err) {
reject(err);
} else {
resolve();
}
});
});
});
};
Related
I upload file to google storage using "#ffmpeg-installer/ffmpeg" and #google-cloud/storage in my node.js App.
Step 1. file uploading to fs is in child processes - one process for each type of resolution (totaly six).
step 2. encription (converting to stream)
step 3. upload to google storage
I use "Upload a directory to a bucket" in order to send the video from the client to the Google Cloud Storage bucket.
This way is working fine only with small video.
for example when I upload video with duration one hour it split on chunk and totally I get more three thousands files. But the problem occurs when there are more than 1500 files
So actually i upload folder with large amount of files, but not all of this files are uploaded to cloud.
maybe someone had the similar problem and helps fix it.
const uploadFolder = async (bucketName, directoryPath, socketInstance) => {
try {
let dirCtr = 1;
let itemCtr = 0;
const fileList = [];
const onComplete = async () => {
const folderName = nanoid(46);
await Promise.all(
fileList.map(filePath => {
const fileName = path.relative(directoryPath, filePath);
const destination = `${ folderName }/${ fileName }`;
return storage
.bucket(bucketName)
.upload(filePath, { destination })
.then(
uploadResp => ({ fileName: destination, status: uploadResp[0] }),
err => ({ fileName: destination, response: err })
);
})
);
if (socketInstance) socketInstance.emit('uploadProgress', {
message: `Added files to Google bucket`,
last: false,
part: false
});
return folderName;
};
const getFiles = async directory => {
const items = await fs.readdir(directory);
dirCtr--;
itemCtr += items.length;
for(const item of items) {
const fullPath = path.join(directory, item);
const stat = await fs.stat(fullPath);
itemCtr--;
if (stat.isFile()) {
fileList.push(fullPath);
} else if (stat.isDirectory()) {
dirCtr++;
await getFiles(fullPath);
}
}
}
await getFiles(directoryPath);
return onComplete();
} catch (e) {
log.error(e.message);
throw new Error('Can\'t store folder.');
}
};
I am trying to adapt a Qwiklabs tutorial to use pdfCrowd, rather than LibreOffice.
The service works by downloading a file from one GCP storage bucket for 'uploads', processing it, then uploading it to another bucket for 'processed' files, then deleting the original from the 'uploads' bucket.
This is the function that downloads, uploads, sends off for processing, then deletes. This is the code from the Qwiklabs tut and it works great.
app.post('/', async (req, res) => {
try {
const file = decodeBase64Json(req.body.message.data);
await downloadFile(file.bucket, file.name);
const pdfFileName = await convertFile(file.name);
await uploadFile(process.env.PDF_BUCKET, pdfFileName);
await deleteFile(file.bucket, file.name);
}
catch (ex) {
console.log(`Error: ${ex}`);
}
res.set('Content-Type', 'text/plain');
res.send('\n\nOK\n\n');
})
The original convertFile function is:
async function convertFile(fileName) {
const cmd = 'libreoffice --headless --convert-to pdf --outdir /tmp ' +
`"/tmp/${fileName}"`;
console.log(cmd);
const { stdout, stderr } = await exec(cmd);
if (stderr) {
throw stderr;
}
console.log(stdout);
pdfFileName = fileName.replace(/\.\w+$/, '.pdf');
return pdfFileName;
}
The problem comes when I change my convertFile function. LibreOffice accepts file.name, but pdfCrowd wants the file path.
So I change the function with pdfCrowd to:
async function convertFile(fileName) {
// create the API client instance
const _newPdfPath = `/tmp/${fileName.replace(/\.\w+$/, '.pdf')}`
const client = new pdfcrowd.HtmlToPdfClient("demo", "ce544b6ea52a5621fb9d55f8b542d14d");
// run the conversion and write the result to a file
client.convertFileToFile(`/tmp/${fileName}`, _newPdfPath, function (err, fileName) {
if (err)
return console.error("Pdfcrowd Error: " + err);
console.log("Success: the file was created " + fileName);
});
pdfFileName = fileName.replace(/\.\w+$/, '.pdf');
return pdfFileName;
}
Now the pdf conversion returns SUCCESS, but after a notification to say there is no file or directory as specified in the 'out' file path I passed to convertFileToFile. The file specified by _newPdfPath doesn't exist.
Error: Error: ENOENT: no such file or directory, stat '/tmp/mynew.pdf'
Success: the file was created /tmp/hello (31).pdf
The pdfCrowd function should be creating a file in the tmp directory, but is the async waiting for the file to be created in the tmp directory?
My complete code is:
const {promisify} = require('util');
const {Storage} = require('#google-cloud/storage');
const exec = promisify(require('child_process').exec);
const storage = new Storage();
const express = require('express');
const bodyParser = require('body-parser');
const app = express();
const pdfcrowd = require("pdfcrowd");
app.use(bodyParser.json());
const port = process.env.PORT || 8080;
app.listen(port, () => {
console.log('Listening on port', port);
});
app.post('/', async (req, res) => {
try {
const file = decodeBase64Json(req.body.message.data);
// console.log("FILE=========", file, req.body.message.data)
await downloadFile(file.bucket, file.name);
const pdfFileName = await convertFile(file.name);
await uploadFile(process.env.PDF_BUCKET, pdfFileName);
await deleteFile(file.bucket, file.name);
}
catch (ex) {
console.log(`Error: ${ex}`);
}
res.set('Content-Type', 'text/plain');
res.send('\n\nOK\n\n');
})
function decodeBase64Json(data) {
return JSON.parse(Buffer.from(data, 'base64').toString());
}
async function downloadFile(bucketName, fileName) {
const options = {destination: `/tmp/${fileName}`};
await storage.bucket(bucketName).file(fileName).download(options);
}
async function convertFile(fileName) {
// create the API client instance
const _newPdfPath = `/tmp/${fileName.replace(/\.\w+$/, '.pdf')}`
const client = new pdfcrowd.HtmlToPdfClient("demo", "ce544b6ea52a5621fb9d55f8b542d14d");
// run the conversion and write the result to a file
client.convertFileToFile(`/tmp/${fileName}`, _newPdfPath, function (err, fileName) {
if (err)
return console.error("Pdfcrowd Error: " + err);
console.log("Success: the file was created " + fileName);
});
pdfFileName = fileName.replace(/\.\w+$/, '.pdf');
return pdfFileName;
}
async function deleteFile(bucketName, fileName) {
await storage.bucket(bucketName).file(fileName).delete();
}
async function uploadFile(bucketName, fileName) {
await storage.bucket(bucketName).upload(`/tmp/${fileName}`);
}
The problem is that your convertFile function finishes before convertFileToFile callback is invoked.
I'd pass callback for success and error into convertFile, e.g.
app.post('/', async (req, res) => {
try {
const file = decodeBase64Json(req.body.message.data);
// console.log("FILE=========", file, req.body.message.data)
await downloadFile(file.bucket, file.name);
let on_pdf_done = async function(pdfFileName) {
await uploadFile(process.env.PDF_BUCKET, pdfFileName);
await deleteFile(file.bucket, file.name);
res.set('Content-Type', 'text/plain');
res.send('\n\nOK\n\n');
};
let on_pdf_fail = function() {
res.set('Content-Type', 'text/plain');
res.send('\n\nERROR\n\n');
};
convertFile(file.name, on_pdf_done, on_pdf_fail);
}
catch (ex) {
console.log(`Error: ${ex}`);
}
})
function convertFile(fileName, success_callback, fail_callback) {
// create the API client instance
const _newPdfPath = `/tmp/${fileName.replace(/\.\w+$/, '.pdf')}`
const client = new pdfcrowd.HtmlToPdfClient("demo", "ce544b6ea52a5621fb9d55f8b542d14d");
// run the conversion and write the result to a file
client.convertFileToFile(`/tmp/${fileName}`, _newPdfPath, function (err, fileName) {
if (err)
return fail_callback();
success_callback(fileName.replace(/\.\w+$/, '.pdf'));
});
}
I have a video stored in s3 bucket with authenticated-read ACL.
I need to read and make a trailer with ffmpeg (nodejs)
Here's the code I use to generate the trailer
exports.generatePreview = (req, res) => {
const getParams = {
Bucket: S3_CREDENTIALS.bucketName,
Key: req.params.key
}
s3.getSignedUrl('getObject', getParams, (err, signedRequest) => {
console.log(signedRequest, err, 'getSignedUrl')
ffmpeg(new URL(signedRequest))
.size('640x?')
.aspect('4:3')
.seekInput('3:00')
.duration('0:30')
.then(function (video) {
s3.putObject({ Bucket: S3_CREDENTIALS.bucketName, key: 'preview_' + req.body.key, Body: video }, function (err, data) {
console.log(err, data)
})
});
});
}
Unfortunately, the constructor path seems not to read remote url. If I try to execute an ffmpeg command line with the same signedurl (i.e. ffmpeg -i "https://[bucketname].s3.eu-west-1.amazonaws.com/[key.mp4]?[signedParams]" -vn -acodec pcm_s16le -ar 44100 -ac 2 video.wav)
The error I get is that the signedRequest url 'The input file does not exist'
It seems fs.readFileSync https is not supported even if I try the request with http with the same result. fs.readFileSync(signedurl) => gives the same result
How to overcome this issue?
If you're using node-ffmpeg this isn't possible because the library only accepts a string pointing to a local path, but fluent-ffmpeg does support readstreams so give that a try.
For example (untested, just spitballing):
const ffmpeg = require('fluent-ffmpeg');
const stream = require('stream');
exports.generatePreview = (req, res) => {
let params = {Bucket: S3_CREDENTIALS.bucketName, Key: req.params.key};
// Retrieve object stream
let readStream = s3.getObject(params).createReadStream();
// Set up the ffmpeg process
let ffmpegProcess = new ffmpeg(readStream)
//Add your args here
.toFormat('mp4');
ffmpegProcess.on('error', (err, stdout, stderr) => {
// Handle errors here
}).on('end', () => {
// Processing is complete
}).pipe(() => {
// Create a new stream
let pt = new stream.PassThrough();
// Reuse the same params object and set the Body to the stream
params.Key = 'preview_' + req.body.key;
params.Body = pt;
// Upload and wait for the result
s3.upload(params, (err, data) => {
if (err)
return console.error(err);
console.log("done");
})
});
});
This will have high memory requirements so if this is a Lambda function you might play around with retrieving only the first X bytes of the file and converting only that.
I'm trying to use imagemagick in my Google Cloud function. The function is triggered by uploading a file to a Google Cloud Storage bucket. I have grander plans, but trying to get there one step at a time. Starting with identify.
// imagemagick_setup
const gm = require('gm').subClass({imageMagick: true});
const path = require('path');
const {Storage} = require('#google-cloud/storage');
const storage = new Storage();
exports.processListingImage = (event, context) => {
const object = event.data || event; // Node 6: event.data === Node 8+: event
const filename = object.name;
console.log("Filename: ", filename);
const fullFileObject = storage.bucket(object.bucket).file(object.name);
console.log("Calling resize function");
let resizePromise = resizeImage( fullFileObject );
<more stuff>
};
function resizeImage( file, sizes ) {
const tempLocalPath = `/tmp/${path.parse(file.name).base}`;
return file
.download({destination: tempLocalPath})
.catch(err => {
console.error('Failed to download file.', err);
return Promise.reject(err);
})
.then( () => {
// file now downloaded, get it's metadata
return new Promise((resolve, reject) => {
gm( tempLocalPath )
.identify( (err, result) => {
if (err)
{
console.log("Error reading metadata: ", err);
}
else
{
console.log("Well, there seems to be metadata: ", result);
}
});
});
});
} // end resizeImage()
The local file path is: "/tmp/andy-test.raw". But when the identify function runs, I get an error:
identify-im6.q16: unable to open image `/tmp/magick-12MgKrSna0qp9U.ppm': No such file or directory # error/blob.c/OpenBlob/2701.
Why is identify looking for a different file than what I (believe) I told it to look for? Eventually, I am going to resize the image and write it back out to Cloud Storage, but I wanted to get identify to run first..
Mark had the right answer - if I upload a jpg file, it works. Onto the next challenge.
My Function is triggered by a cloud storage event and will load files into a BigQuery table, my issue is that we recieved some .zip files with the same name and the function is attempting to load these files as well and this is causing some issues with the table. I need to make the code only process files that are .csv. Below is the code I have so far:
exports.ToBigQuery = (event, callback) => {
const file = event.data;
const context = event.context;
const BigQuery = require('#google-cloud/bigquery');
const Storage = require('#google-cloud/storage');
const projectId = "gas-ddr";
const datasetId = "gas_ddr";
const bucketName = file.bucket;
const filename = file.name;
const dashOffset = filename.indexOf('-');
const tableId = filename.substring(0, dashOffset);
console.log(`Load ${filename} into ${tableId}.`);
// Instantiates clients
const bigquery = new BigQuery({
projectId: projectId,
});
const storage = Storage({
projectId: projectId,
});
const metadata = {
allowJaggedRows: true,
skipLeadingRows: 1
};
let job;
// Loads data from a Google Cloud Storage file into the table
bigquery
.dataset(datasetId)
.table(tableId)
.load(storage.bucket(bucketName).file(filename),metadata)
.then(results => {
job = results[0];
console.log(`Job ${job.id} started.`);
// Wait for the job to finish
return job;
})
.then(metadata => {
// Check the job's status for errors
const errors = metadata.status.errors;
if (errors && errors.length > 0) {
throw errors;
}
})
.then(() => {
console.log(`Job ${job.id} completed.`);
})
.catch(err => {
console.error('ERROR:', err);
});
callback();
};
This is simply a javascript related question. You can simply extract the extension part of a filename and process files accordingly:
function getExtension(filename) {
var parts = filename.split('.');
return parts[parts.length - 1];
}
if (getExtension(filename) == "csv") {
// Loads data from a Google Cloud Storage file into the table
bigquery
.dataset(datasetId)
...
}