I'm currently in the process of writing a steganography application with Qt. I am trying to hide my message bits in the least significant bit of the blue colour of the pixel.
From debugging I can tell that this section is working as it should. However after hiding my bits in the message I then save the image and then reopen it. This is where the problem develops.
When I read in the (reopened) image the scanLines that I read in are not the same as the ones I wrote previously, and I can't figure out why. Maybe it's just me being stupid, or maybe I'm missing something. Any help would be much appreciated.
The code I have so far is as follows
void MainWindow::Encrypt(QImage image, QString message) {
if(image.isNull()) {
qDebug() << "PROBLEM";
}
image = image.convertToFormat(QImage::Format_ARGB32);
QVector<bool> bvec;
QByteArray bytes = message.toAscii();
char mask;
QRgb tempPix;
for(int i = 0; i < bytes.size(); i++) {
for(int j = 0; j < 8; j++) {
mask = (0x01 << j);
bvec.push_back((bytes[i] & mask) == mask);
}
}
if(image.height() < bvec.size()) {
qDebug() << "Not enough space in image";
}
for(int j = 0; j < bvec.size(); j++) {
QRgb *pixel = (QRgb *)image.scanLine(j);
tempPix = *pixel;
int blue = qBlue(tempPix);
blue &= 0xFE;
blue |= (bvec[j] == 1) ? 0x01 : 0x00;
*pixel = qRgba(qRed(tempPix), qGreen(tempPix), blue, qAlpha(tempPix));
}
if(image.save(filename) != true) {
emit addToStatusLog("Did not save. Error");
}
}
void MainWindow::Decrypt(QImage image) {
char temp = 0x00;
qint8 mask = 0x01;
QVector<bool> bvec;
QRgb *pixel;
int blue;
image = image.convertToFormat(QImage::Format_ARGB32);
for(int i = 0; i < image.height(); i++) {
pixel = (QRgb *)image.scanLine(i);
blue = qBlue(*pixel);
bvec.push_back((blue & mask) == mask);
}
for(int j = 0; j < bvec.size(); j++) {
if(j % 8 == 0 && j != 0) {
qDebug() << temp;
temp = 0x00;
}
temp |= (bvec[j]) ? (0x01 << (j%8)) : 0x00;
}
qDebug() << temp;
}
Thanks
Make sure you're not saving using a lossy format, such as JPEG.
Related
I am new to kinect project
And I am implementing a depth threshold when distance is greater than 400mm
for (UINT y = 0; y < pImg->rows; ++y)
{
// Get row pointers for Mats
const USHORT* pDepthRow = depth->ptr<USHORT>(y);
for (UINT x = 0; x < pImg->cols; ++x)
{
USHORT raw_depth = pDepthRow[x];
SHORT realDepth = NuiDepthPixelToDepth(raw_depth);
// If depth value is valid, convert and copy it
if (raw_depth != 65535)
{
if(realDepth >400 ) //greater than 400mm
{
pImg->at<Vec4b>(y,x)[0] = 255;
pImg->at<Vec4b>(y,x)[1] = 255;
pImg->at<Vec4b>(y,x)[2] = 255;
pImg->at<Vec4b>(y,x)[3] = 255;
}
else
{
pImg->at<Vec4b>(y,x)[0] = 0;
pImg->at<Vec4b>(y,x)[1] = 0;
pImg->at<Vec4b>(y,x)[2] = 0;
pImg->at<Vec4b>(y,x)[3] = 0;
}
}
}
It seems get the correct result but reduces the frame rate massively.
When I want to get rid of the loop by using the cv::inRange, but this function only support 8U1C when the raw depth is 16U.
So what else can I use to segment the depth according to the real distance?
Try to improve performance by storing a reference to the pixel.
Change this:
if (realDepth > 400) //greater than 400mm
{
pImg->at<Vec4b>(y,x)[0] = 255;
pImg->at<Vec4b>(y,x)[1] = 255;
pImg->at<Vec4b>(y,x)[2] = 255;
pImg->at<Vec4b>(y,x)[3] = 255;
}
else
{
pImg->at<Vec4b>(y,x)[0] = 0;
pImg->at<Vec4b>(y,x)[1] = 0;
pImg->at<Vec4b>(y,x)[2] = 0;
pImg->at<Vec4b>(y,x)[3] = 0;
}
To this:
(I donĀ“t know what T is because I dont know what pImg is.
T should be equal to the return value of the at method. I assume it is Vec4b.)
T& pixel = pImg->at<Vec4b>(y, x); // probably Vec4b& pixel = ..
if (realDepth > 400) //greater than 400mm
{
pixel[0] = 255;
pixel[1] = 255;
pixel[2] = 255;
pixel[3] = 255;
}
else
{
pixel[0] = 0;
pixel[1] = 0;
pixel[2] = 0;
pixel[3] = 0;
}
Sorry for putting it out here but opencv site is down or something i cannot access the topic there. I am reading a RGB image in Mat and than trying to convert in into BYTE* using the following function but it is not giving me the exact image but distorted part of some of the image. Please can anyone help me out here where i am mistaking it. Thank you.
BYTE *change_to_BYTE(Mat matrix_value)
{
BYTE* v_char_new = new BYTE[matrix_value.rows * matrix_value.cols * 3]();
vector<byte> v_char;
for(int i = 0; i < matrix_value.rows; i++)
{
for(int j = 0; j < matrix_value.cols; j++)
{
v_char_new[((i*matrix_value.cols+j) * 3)+0] = (*(uchar*)((matrix_value.data+ i*matrix_value.step + j + 0)));
v_char_new[((i*matrix_value.cols+j) * 3)+1] = (*(uchar*)((matrix_value.data+ i*matrix_value.step + j +1)));
v_char_new[((i*matrix_value.cols+j) * 3)+2] = (*(uchar*)((matrix_value.data+ i*matrix_value.step + j +2)));
}
}
return v_char_new;
}
After checking it out in different ways this was the one that worked for me.
BYTE *Ctest_face_projectDlg::change_to_BYTE(Mat matrix_value)
{
BYTE* v_char_new = new BYTE[matrix_value.rows * matrix_value.cols * 3]();
for(int i = 0; i < matrix_value.rows; i++)
{
for(int j = 0; j < matrix_value.cols; j++)
{
Vec3b bgrPixel = matrix_value.at<Vec3b>(i, j);
v_char_new[((i*matrix_value.cols+j)*3)+0] = bgrPixel.val[0];
v_char_new[((i*matrix_value.cols+j)*3)+1] = bgrPixel.val[1];
v_char_new[((i*matrix_value.cols+j)*3)+2] = bgrPixel.val[2];
// do something with BGR values...
}
}
}
I found this code and I wonder why it does't works ..
// f is an stream
void writeTGA(ostream& f, bool rle) const{
f<<ubyte(0);//ID length
f<<ubyte(0);//Color map Type
if(rle)
f<<ubyte(10);//Image Type Code
else
f<<ubyte(2);//Image Type Code
f<<ubyte(0);//Color map stuff
f<<ubyte(0);
f<<ubyte(0);
f<<ubyte(0);
f<<ubyte(0);
f<<ubyte(0);//X Origin of Image
f<<ubyte(0);//X Origin of Image
f<<ubyte(0);//Y Origin of Image
f<<ubyte(0);//Y Origin of Image
f << ubyte(width%256);
f << ubyte(width>>8);
f << ubyte(height%256);
f << ubyte(height>>8);
f<<ubyte(24);//Image Pixels Size
f<<ubyte(32);//Image Descriptor Byte
if(rle)
{
cout<<"rleHeadHead"<<endl;
ubyte rleHead = 0;
ubyte diff[128];
int i = 0;
while(i<width*height)
{
rleHead = 1;
/* RLE */
if(i+1 < width*height)
while(pixels[i] == pixels[i+1]) {
if(i+1 >= width*height || rleHead >= 128)
break;
rleHead++;
i++;
}
if(rleHead > 1)
{
f<< (rleHead+127);
f<<pixels[i].b;
f<<pixels[i].g;
f<<pixels[i].r;
}
rleHead = 0;
/* RAW */
if(i+1 < width*height)
{
while(pixels[i+rleHead] != pixels[i+rleHead+1])
{
if( (i+rleHead+1) >= width*height || rleHead >= 128)
break;
rleHead++;
}
} else
rleHead++;
if(rleHead > 0)
{
f << (rleHead-1);
for(int j = 0; j < rleHead; j++)
{
diff[j] = pixels[i+j].b;
diff[j] = pixels[i+j].g;
diff[j] = pixels[i+j].r;
}
f.write((const char*) diff, rle*3);
i += rleHead;
}}}
else{
for(int i = 0 ; i < width*height ; i++){
f<< pixels[i].b;
f<< pixels[i].g;
f<< pixels[i].r;}
}
}
I tried to implement it and it seems not good ..
Otherwise, someone know if it exist a library or just a simple file where I can find this algorithm ?
Thanks you in advance
I am acquiring images using a digital camera. At first, I was using a mono camera, but recently I upgraded to a color camera. With the mono camera I was having some palette issues until I found this bit of code to alter the palette to a grayscale palette:
for(int i=0; i<256; i++)
{
pbmi->bmiColors[i].rgbRed = BYTE(i);
pbmi->bmiColors[i].rgbGreen = BYTE(i);
pbmi->bmiColors[i].rgbBlue = BYTE(i);
pbmi->bmiColors[i].rgbReserved = BYTE(0);
}
where pbmi is a BITMAPINFO*.
This worked just fine for the mono camera. But now with the color camera I obviously don't want to make the images grayscale. However, if I remove that chunk of code I get the same palette issues that I was getting before with the mono camera. So it seems to me like I need to do something similar as I did before and create a palette, only this time a color palette.
For reference, here is the rest of the pbmi's settings:
//// INFO ////
BITMAPINFO* pbmi = (BITMAPINFO*)alloca( sizeof(BITMAPINFOHEADER) +
sizeof(RGBQUAD)*256);
pbmi->bmiHeader.biSize = sizeof (pbmi->bmiHeader);
pbmi->bmiHeader.biWidth = 2752;
pbmi->bmiHeader.biHeight = -2200;
pbmi->bmiHeader.biPlanes = 1;
pbmi->bmiHeader.biBitCount = 8;
pbmi->bmiHeader.biCompression = BI_RGB;
pbmi->bmiHeader.biSizeImage = 0;
pbmi->bmiHeader.biXPelsPerMeter = 14173;
pbmi->bmiHeader.biYPelsPerMeter = 14173;
pbmi->bmiHeader.biClrUsed = 0;
pbmi->bmiHeader.biClrImportant = 0;
So far, I have tried the following:
for(int i=0,a = 0; i < 64; i++)
{
pbmi->bmiColors[i].rgbRed = BYTE(a);
pbmi->bmiColors[i+64].rgbGreen = BYTE(a);
pbmi->bmiColors[i+64+64].rgbBlue = BYTE(a);
pbmi->bmiColors[i+64+64+64].rgbReserved = BYTE(0);
a += 4;
}
//This created a palette consisting only of cyan, yellow, and magenta colors.
//Didn't work.
for(int i=0,r=0,g=0,b=0; b <= 255; i++)
{
if(r >= 256)
{
r = 0;
g++;
}
if(g >= 256)
{
g = 0;
b++;
}
pbmi->bmiColors[i].rgbRed = BYTE(r);
pbmi->bmiColors[i].rgbGreen = BYTE(g);
pbmi->bmiColors[i].rgbBlue = BYTE(b);
pbmi->bmiColors[i].rgbReserved = BYTE(0);
r++;
}
//Here I was trying to basically count up hexadecimally from 000000 to FFFFFF.
//Caused an access violation error.
I've also tried each of those after changing pbmi->bmiHeader.biBitCount to 16, 24, and 32, none of which worked.
So my question is: How do I create a color palette based on the BITMAPINFO settings I have provided?
If you are trying to create a simple RGB pallete, you just need to change the R, G e B values from 0 to 255:
const int MaxIndex = 255;
for(int r=0; r <= MaxIndex; r++)
for(int g=0; g <= MaxIndex; g++)
for(int b=0; b <= MaxIndex; b++)
{
i = r * MaxIndex * MaxIndex + g * MaxIndex + b
pbmi->bmiColors[i].rgbRed = BYTE(r);
pbmi->bmiColors[i].rgbGreen = BYTE(g);
pbmi->bmiColors[i].rgbBlue = BYTE(b);
pbmi->bmiColors[i].rgbReserved = BYTE(0);
}
I'm trying to get the number of difference between two pictures.
When I compare 2 images in gray scale, pixDiff <> 0 but when it come to RGB, pixDiff is always 0.
I used openCV's compare and also a custom loop.
Mat frame, oldFrame;
cap >> oldFrame;
if(analyseMod == MONOCHROME)
cvtColor(oldFrame, oldFrame, CV_BGR2GRAY);
nbChannels = oldFrame.channels();
while(1)
{
pixDiff = 0;
cap >> frame;
//Test diff
Mat diff;
compare(oldFrame, frame, diff, CMP_NE);
imshow("video 0", diff);
imshow("video 1", frame);
if(analyseMod == MONOCHROME)
{
cvtColor(frame, frame, CV_BGR2GRAY);
for(int i=0; i<frame.rows; i++)
for(int j=0; j<frame.cols; j++)
if(frame.at<uchar>(i,j) < oldFrame.at<uchar>(i,j) - similarPixelTolerance || frame.at<uchar>(i,j) > oldFrame.at<uchar>(i,j) + similarPixelTolerance)
pixDiff++;
}
else if(analyseMod == RGB)
{
uint8_t *f = (uint8_t *)frame.data;
uint8_t *o = (uint8_t *)oldFrame.data;
for(int i=0; i<frame.rows; i++)
{
for(int j=0; j<frame.cols; j++)
{
if(f[nbChannels*i*frame.cols + j + RED] < o[nbChannels*i*oldFrame.cols + j + RED])
pixDiff++;
}
}
}
frame.copyTo(oldFrame);
cout << pixDiff;
if(waitKey(30) >= 0) break;
}
Thx for help
I still don't get it, why are you not using your delta in the RGB case, but here is the solution for both cases, if you want to consider color channels separately. Set CN to 1 for monochrome case and to 3 for RGB case.
const int CN = 3; // 3 for RGB, 1 for monochrome
uint8_t *f = frame.ptr<uint8_t>();
uint8_t *o = oldFrame.ptr<uint8_t>();
for(int i = 0; i < frame.rows; ++i)
{
for(int j = 0; j < frame.cols; ++j)
{
for (int c = 0; c < CN; ++c)
{
if (abs(*f - *o) > similarPixelTolerance) ++pxDiff;
++f, ++o;
}
}
}
It is way more efficient to access pixels in this way than to call at for each pixel. The only possible problem is if you have some padding in your images, but by default OpenCV is using continuous allocation.