I am creating a menu for a game at C++ and I have a problem when I read a text file (.cfg), and I want to show it to the screen (With SDL,not console).
The problem is that in SDL, I only get the last line. Why is that?
Here is my code:
fstream characters;
characters.open("characters.cfg", ios::in);
while (getline(characters, line))
cout << line << endl;
And I create a string to show it with SDL_ttf:
void renderUI() {
SDL_Surface* textSurface;
SDL_Texture* textTexture;
SDL_Rect textRect;
SDL_Color white = { 255,255,255 };
textRect.w = 250;
textRect.h = 20;
textRect.x = 150;
textRect.y = 200;
string names = line;
textSurface = TTF_RenderText_Blended(gameFont, names.c_str(), white);
textTexture = SDL_CreateTextureFromSurface(renderer, textSurface);
if (textSurface == NULL)
{
cout << "TTF_RenderText_Solid() Failed: " << TTF_GetError() << endl;
TTF_Quit();
SDL_Quit();
exit(1);
}
SDL_RenderCopy(renderer, textTexture, NULL, &textRect);
}
And here a photo to show that I get the full text in the console, but not in SDL:
EXAMPLE PHOTO:
I have the solution. Here is the code:
fstream characters;
string line;
string text;
characters.open("characters.cfg", ios::in);
while (getline(characters, line))
{
text = text + line + "\n";
}
characters.close();
And hereĀ“s the code of SDL_ttf and the solution to the line break problem with SDL:
SDL_Surface* textSurface;
SDL_Texture* textTexture;
SDL_Rect textRect;
SDL_Color white = { 255,255,255 };
textRect.w = 250;
textRect.h = 400;
textRect.x = 150;
textRect.y = 185;
textSurface = TTF_RenderText_Blended_Wrapped(gameFont, text.c_str(), white, 500);
textTexture = SDL_CreateTextureFromSurface(renderer, textSurface);
// If TTF_RenderText_Solid() Fail
if (textSurface == NULL)
{
cout << "TTF_RenderText_Solid() Failed: " << TTF_GetError() << endl;
TTF_Quit();
SDL_Quit();
exit(1);
}
Used this for the line break problem with SDL_TTF:
TTF_RenderText_Blended_Wrapped for be able to use line break
Related
I'm trying using visual studio 2019, c++, read(show) WebP image, but nothing happens.
I can open, gif, jpg, png, but not WebP
I'm using visual studio 2019, added sld, sdl_image, WebP, include's, libs, and dlls's.
console mesage -- Failed to decode WEBP --
#include <iostream>
#include <SDL.h>
#include <SDL_image.h>
const int WIDTH = 640, HEIGHT = 360;
int main(int argc, char* argv[])
{
SDL_Surface* imageSurface = NULL;
SDL_Surface* windowSurface = NULL;
SDL_Init(SDL_INIT_EVERYTHING);
SDL_Window* window = SDL_CreateWindow("Hello SDL World", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, WIDTH, HEIGHT, SDL_WINDOW_ALLOW_HIGHDPI);
windowSurface = SDL_GetWindowSurface(window);
// Check that the window was successfully created
if (NULL == window)
{
// In the case that the window could not be made...
std::cout << "Could not create window: " << SDL_GetError() << std::endl;
return 1;
}
if (!(IMG_Init(IMG_INIT_WEBP) & IMG_INIT_WEBP))
{
std::cout << "Could not create window: " << IMG_GetError() << std::endl;
return 1;
}
SDL_Event windowEvent;
imageSurface = IMG_Load("logo.webp");
if (NULL == imageSurface)
{
std::cout << "SDL could not load image! SDL Error: " << SDL_GetError() << std::endl;
}
while (true)
{
if (SDL_PollEvent(&windowEvent))
{
if (SDL_QUIT == windowEvent.type)
{
break;
}
}
SDL_BlitSurface(imageSurface, NULL, windowSurface, NULL);
SDL_UpdateWindowSurface(window);
}
SDL_FreeSurface(imageSurface);
SDL_FreeSurface(windowSurface);
imageSurface = NULL;
windowSurface = NULL;
SDL_DestroyWindow(window);
SDL_Quit();
return EXIT_SUCCESS;
}
I very recently got into C++, and am basically learning through watching videos and reverse-engineering other people's code. Needless to say, I'm not very good at this stuff yet.
Anyway, I'm working on a program that detects if a specific color (RGB) is at a specific coordinate in another window, and if it is, the code executes a click.
This is what I put together
#include <windows.h>
#include <cstdlib>
#include <iostream>
#include <cstring>
using namespace std;
void BigClick()
{
INPUT Input = { 0 };
Input.type = INPUT_MOUSE;
Input.mi.dwFlags = MOUSEEVENTF_LEFTDOWN; //push
::SendInput(1, &Input, sizeof(INPUT));
::ZeroMemory(&Input, sizeof(INPUT)); //or NULL?
Input.type = INPUT_MOUSE;
Input.mi.dwFlags = MOUSEEVENTF_LEFTUP; //release
::SendInput(1, &Input, sizeof(INPUT));
}
int main()
{
COLORREF colortosearch = RGB(0, 0, 255); // color to search
HDC hdcScreen = GetDC(NULL);
int x = 954;
int y = 540;
while (true)
{
if (::GetPixel(hdcScreen, x, y) == colortosearch)
{
// start code to click
cout << "one click completed";
BigClick();
}
}
::ReleaseDC(NULL, hdcScreen);
return 0;
}
The code compiles but it does not click even when the entire screen is blue or RGB(0,0,255). I know BigClick() clicks, since I tested it by itself to make sure. What am I missing here? I'm thinking I'm not giving GetPixel the coordinates to check in the right way, but since I'm so new it could be anything as far as I know.
I changed the fixed coordinates to follow the cursor position and your program seems to work fine. Clicks are also executed!
int main()
{
COLORREF colortosearch = RGB(0, 0, 255); // color to search
HDC hdcScreen = GetDC(NULL);
while (true)
{
POINT cursor;
GetCursorPos(&cursor);
COLORREF color = ::GetPixel(hdcScreen, cursor.x, cursor.y);
if (color == colortosearch) {
// start code to click
cout << "one click completed";
BigClick();
}
else {
int red = GetRValue(color);
int green = GetGValue(color);
int blue = GetBValue(color);
cout << "x: " << cursor.x << ", y:" << cursor.y << " --> ";
cout << "(" << red << ", " << green << ", " << blue << ")\r\n";
}
}
::ReleaseDC(NULL, hdcScreen);
return 0;
}
I'm trying to integrate a IDS uEye camera with OpenCV and it kinda works for now.
Problem I'm facing is that when I use the IDS SDK to view the camera image, I get a full image. But using OpenCV's VideoCapture, I only get the top left quarter of the image.
I just put an image of a rectangle split into quarters to clarify what the full image should be (entire rectangle) and what I'm getting from videocapture (top left quarter only)
(source: kheper.net)
I've already tried to adjust the image width and height via cap.set and since the VideoCapture line is after setting the uEye camera's parameters, I'm rather certain it's not a settings issue with the camera and more to do with VideoCapture itself
char strCamFileName[256];
char* pcImageMemory;
int memId;
int nRet = 0;
SENSORINFO sInfo;
IplImage* img;
HIDS hCam = 0; // index 0 means taking first camera available
RECT rc;
MSG msg;
Mat frame(MaxImageSizeY, MaxImageSizeX, CV_8UC1);
nRet = is_InitCamera(&hCam, hWndDisplay);
if (nRet != IS_SUCCESS)
{
cout << endl << "Error Connecting to Camera" << endl;
cout << "Closing program..." << endl;
return 0;
}
else
{
cout << endl << "Camera initialisation was successful!" << endl << endl;
}
// you can query information about the sensor type of the camera
nRet = is_GetSensorInfo(hCam, &sInfo);
if (nRet == IS_SUCCESS)
{
cout << "Cameramodel: \t\t" << sInfo.strSensorName << endl;
cout << "Maximum image width: \t" << sInfo.nMaxWidth << endl;
cout << "Maximum image height: \t" << sInfo.nMaxHeight << endl << endl << endl;
}
MaxImageSizeX = sInfo.nMaxWidth;
MaxImageSizeY = sInfo.nMaxHeight;
DisplayWidth = MaxImageSizeX;
DisplayHeight = MaxImageSizeY;
int nColorMode = IS_COLORMODE_CBYCRY;
int nBitsPerPixel = 32;
// Get number of available formats and size of list
UINT count;
UINT bytesNeeded = sizeof(IMAGE_FORMAT_LIST);
nRet = is_ImageFormat(hCam, IMGFRMT_CMD_GET_NUM_ENTRIES, &count, sizeof(count));
bytesNeeded += (count - 1) * sizeof(IMAGE_FORMAT_INFO);
void* ptr = malloc(bytesNeeded);
// Create and fill list
IMAGE_FORMAT_LIST* pformatList = (IMAGE_FORMAT_LIST*)ptr;
pformatList->nSizeOfListEntry = sizeof(IMAGE_FORMAT_INFO);
pformatList->nNumListElements = count;
nRet = is_ImageFormat(hCam, IMGFRMT_CMD_GET_LIST, pformatList, bytesNeeded);
// Prepare for creating image buffers
char* pMem = NULL;
int memID = 0;
// Set each format and then capture an image
IMAGE_FORMAT_INFO formatInfo;
// Allocate image mem for current format, set format
nRet = is_AllocImageMem(hCam, MaxImageSizeX, MaxImageSizeY, nBitsPerPixel, &pMem, &memID);
nRet = is_SetImageMem(hCam, pMem, memID);
nRet = is_ImageFormat(hCam, IMGFRMT_CMD_SET_FORMAT, &formatInfo.nFormatID, sizeof(formatInfo.nFormatID));
// Sets the color mode to be used when image data are saved or displayed by the graphics card
is_SetColorMode(hCam, nColorMode);
// allocates an image memory for an image, activates it and sets the way in which the images will be displayed on the screen
int nMemoryId;
is_AllocImageMem(hCam, MaxImageSizeX, MaxImageSizeY, nBitsPerPixel, &pcImageMemory, &nMemoryId);
is_SetImageMem(hCam, pcImageMemory, nMemoryId);
is_SetDisplayMode(hCam, IS_SET_DM_DIB);
is_HotPixel(hCam, IS_HOTPIXEL_DISABLE_CORRECTION, NULL, NULL);
IS_RECT AAOI; // IS_RECT type variable for Auto AOI parameters
AAOI.s32X = MaxImageSizeX / 3 | IS_AOI_IMAGE_POS_ABSOLUTE;
AAOI.s32Width = MaxImageSizeX / 3;
AAOI.s32Y = MaxImageSizeY / 3 | IS_AOI_IMAGE_POS_ABSOLUTE;
AAOI.s32Height = MaxImageSizeY / 3;
double enable = 1;
double disable = 0;
is_SetAutoParameter(hCam, IS_SET_AUTO_SPEED, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_GAIN, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_WHITEBALANCE, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_FRAMERATE, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SHUTTER, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_GAIN, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_WHITEBALANCE, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_SHUTTER, &disable, 0);
is_AOI(hCam, IS_AOI_AUTO_BRIGHTNESS_SET_AOI, &AAOI, sizeof(AAOI));
is_AOI(hCam, IS_AOI_AUTO_WHITEBALANCE_SET_AOI, &AAOI, sizeof(AAOI));
VideoCapture cap; //--- INITIALIZE VIDEOCAPTURE
int deviceID = 0; // 0 = open default camera
int apiID = cv::CAP_ANY; // 0 = autodetect default API
if (cap.open(deviceID, apiID))
{
cout << "cap opened" << endl;
}
else
{
cout << "cap not opened" << endl;
}
cout << "Press 1 to capture image" << endl
<< "Press 2 to use (last) captured image" << endl;
cap.read(frame);
From what I know VideoCapture should be able to obtain the entire image from the camera right?
I'm honestly just really confused why VideoCapture cuts of 3/4 of the image and I would appreciate any help
Alright I found out the problem...
Again I left out too much code in the original post (because there's ALOT of irrelevant code related to USB stuff) so I'll include the most important part I left out here
double enable = 1;
double disable = 0;
is_SetAutoParameter(hCam, IS_SET_AUTO_SPEED, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_GAIN, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_WHITEBALANCE, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_FRAMERATE, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SHUTTER, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_GAIN, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_WHITEBALANCE, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_SHUTTER, &disable, 0);
is_AOI(hCam, IS_AOI_AUTO_BRIGHTNESS_SET_AOI, &AAOI, sizeof(AAOI));
is_AOI(hCam, IS_AOI_AUTO_WHITEBALANCE_SET_AOI, &AAOI, sizeof(AAOI));
//// Acquires a single image from the camera
//is_FreezeVideo(hCam, IS_WAIT);
//// Output an image from an image memory in the specified window
//int nRenderMode = IS_RENDER_FIT_TO_WINDOW;
//is_RenderBitmap(hCam, nMemoryId, hWndDisplay, nRenderMode);
is_ExitCamera(hCam); // exit camera so that OpenCV can access as camera parameters have been set
CalibSet CS; // declaring variable 'CS' under the class 'CalibSet'
Mat livemap1, livemap2;
FileStorage tfs(inputCalibFile, FileStorage::READ); // Read the settings
if (!tfs.isOpened())
{
cout << "Could not open the calibration file: \"" << inputCalibFile << "\"" << endl;
return -1;
}
tfs["camera_matrix"] >> CS.CamMat;
tfs["distortion_coefficients"] >> CS.DistCoeff;
tfs["image_width"] >> CS.image.width;
tfs["image_height"] >> CS.image.height;
tfs.release(); // close Settings file
So. Basically what the class CalibSet does is it holds values for a .xml file that is used to extract values after undistortion calibration.
More about that here Camera calibration data retrieval
But the issue that prevented cap.set from working was likely these last few lines.
tfs["image_width"] >> CS.image.width; and tfs["image_height"] >> CS.image.height; which took the values in "image_width" and "image_height" and stored them in the respective variables in the class CalibSet.
And guess what... The width and height in the .xml file was 640x480...
I modified that portion in the .xml to the supposed 1280x1024 and the live feed from the camera was fixed and I finally got the full image instead of the 1/4 that I got before.
I'm trying to read and render multiple files in a directory (when combined they form an object) using vtk. But so far I'm getting the following error:
ERROR: In D:\VTK\VTK-src\IO\XML\vtkXMLReader.cxx, line 283
vtkXMLPolyDataReader (00D1B560): Error opening file D:\3d models\Dist\.
ERROR: In D:\VTK\VTK-src\Common\ExecutionModel\vtkExecutive.cxx, line 782
vtkCompositeDataPipeline (00CC2078): Algorithm
vtkXMLPolyDataReader(00D1B560) returned failure for request: vtkInformation
(00D20688)
Debug: Off
Modified Time: 8721
Reference Count: 1
Registered Events: (none)
Request: REQUEST_DATA
FORWARD_DIRECTION: 0
ALGORITHM_AFTER_FORWARD: 1
FROM_OUTPUT_PORT: 0
So far what I've tried is reading only 1 file instead of multiple files, but I still the error I mentioned above.
Here's the coding I'm working on:
int main(int argc, char *argv[])
{
std::string directoryName = "D:\\3d models\\Dist\\" ;
vtkSmartPointer<vtkDirectory> directory = vtkSmartPointer<vtkDirectory>::New();
int opened = directory->Open(directoryName.c_str());
if(!opened)
{
std::cout << "No es posible abrir este directorio!" << std::endl;
return EXIT_FAILURE;
}
int numberOfFiles = directory->GetNumberOfFiles();
std::cout << "NUmero de archivos: " << numberOfFiles << std::endl;
for (int i = 0; i < numberOfFiles; i++)
{
std::string fileString = directoryName;
////fileString += "/";
fileString += directory->GetFile(i);
std::string ext = vtksys::SystemTools::GetFilenameLastExtension(fileString);
std::cout << fileString.c_str() << " extension: " << ext << std::endl;
std::string name = vtksys::SystemTools::GetFilenameWithoutLastExtension(fileString);
std::cout << "nombre: " << name << std::endl;
const char*cstr = fileString.c_str();
std::cout << cstr << endl;
vtkSmartPointer<vtkXMLPolyDataReader> reader =
vtkSmartPointer<vtkXMLPolyDataReader>::New();
reader->SetFileName(cstr);
reader->Update();
reader->GetOutput();
vtkSmartPointer<vtkTransform> transform =
vtkSmartPointer<vtkTransform>::New();
transform->Scale(.005, .005, .005);
vtkSmartPointer<vtkTransformFilter> transformFilter =
vtkSmartPointer<vtkTransformFilter>::New();
transformFilter->SetInputConnection(reader->GetOutputPort());
transformFilter->SetTransform(transform);
// Visualizar
vtkSmartPointer<vtkPolyDataMapper> mapper =
vtkSmartPointer<vtkPolyDataMapper>::New();
mapper->SetInputConnection(transformFilter->GetOutputPort());
vtkSmartPointer<vtkActor> actor =
vtkSmartPointer<vtkActor>::New();
actor->SetPosition(1.1, .5, .1);
actor->SetMapper(mapper);
vtkSmartPointer<vtkOpenVRRenderer> renderer =
vtkSmartPointer<vtkOpenVRRenderer>::New();
vtkSmartPointer<vtkOpenVRRenderWindow> renderWindow =
vtkSmartPointer<vtkOpenVRRenderWindow>::New();
renderWindow->AddRenderer(renderer);
vtkSmartPointer<vtkOpenVRRenderWindowInteractor> renderWindowInteractor =
vtkSmartPointer<vtkOpenVRRenderWindowInteractor>::New();
renderWindowInteractor->SetRenderWindow(renderWindow);
vtkNew<vtkOpenVRCamera> cam;
renderer->SetActiveCamera(cam);
renderer->AddActor(actor);
renderer->SetBackground(.2, .3, .4);
renderWindow->Render();
renderWindowInteractor->Start();
}
return EXIT_SUCCESS;
}
What am I missing? I've tried with different type of files but I still haven't been able to read and render anything using this method.
It looks like the first entry when you list the files in a folder is "." (which is normal), so the path to the file you try to open is "D:\3d models\Dist\." and it is not a valid file for vtkXMLPolyDataReader.
You should only try to open vtk files written with vtkXMLPolyDataWriter.
For example by checking the extension is ".vtp" (or whatever extension you used to save the files containing your vtkPolyData models).
Check the extension in the first part of your loop:
for (int i = 0; i < numberOfFiles; i++)
{
std::string fileString = directoryName;
////fileString += "/";
fileString += directory->GetFile(i);
std::string ext = vtksys::SystemTools::GetFilenameLastExtension(fileString);
std::cout << fileString.c_str() << " extension: " << ext << std::endl;
// add this line here to skip "." and "..", also fix the extension if not .xml
if (ext.find(".vtp") == std::string::npos) continue;
(...)
I also suspect you are trying to use vtkXMLPolyDataReader while you should use vtkPolyDataReader (this really depends on what writer was used to produce the files). vtkXMLPolyDataReader is the standard reader for .vtp files.
Last, you are creating a renderer, a rendering window and a camera inside the loop: it means one window per object. It's uncommon, is that what you want?
Long story short: you need at least one renderer and one window to display one or many models. Your models are represented by an actor each: the window is where the renderer draws (one or many) actors. A renderer is a rendering pass: of course you can have one rendering pass for each actor, but unless you are sure you need it, you don't.
Here's your code, modified as I would do at your place:
int main(int argc, char *argv[])
{
std::string directoryName = "D:\\3d models\\Dist\\" ;
vtkSmartPointer<vtkOpenVRRenderer> renderer =
vtkSmartPointer<vtkOpenVRRenderer>::New();
vtkSmartPointer<vtkOpenVRRenderWindow> renderWindow =
vtkSmartPointer<vtkOpenVRRenderWindow>::New();
renderWindow->AddRenderer(renderer);
vtkSmartPointer<vtkOpenVRRenderWindowInteractor> renderWindowInteractor =
vtkSmartPointer<vtkOpenVRRenderWindowInteractor>::New();
renderWindowInteractor->SetRenderWindow(renderWindow);
vtkNew<vtkOpenVRCamera> cam;
renderer->SetActiveCamera(cam);
renderer->SetBackground(.2, .3, .4);
vtkSmartPointer<vtkDirectory> directory = vtkSmartPointer<vtkDirectory>::New();
int opened = directory->Open(directoryName.c_str());
if(!opened)
{
std::cout << "No es posible abrir este directorio!" << std::endl;
return EXIT_FAILURE;
}
int numberOfFiles = directory->GetNumberOfFiles();
std::cout << "NUmero de archivos: " << numberOfFiles << std::endl;
for (int i = 0; i < numberOfFiles; i++)
{
std::string fileString = directoryName;
////fileString += "/";
fileString += directory->GetFile(i);
std::string ext = vtksys::SystemTools::GetFilenameLastExtension(fileString);
std::cout << fileString.c_str() << " extension: " << ext << std::endl;
if (ext.find(".vtp") == std::string::npos) continue;
std::string name = vtksys::SystemTools::GetFilenameWithoutLastExtension(fileString);
std::cout << "nombre: " << name << std::endl;
const char*cstr = fileString.c_str();
std::cout << cstr << endl;
vtkSmartPointer<vtkXMLPolyDataReader> reader =
vtkSmartPointer<vtkXMLPolyDataReader>::New();
reader->SetFileName(cstr);
reader->Update();
reader->GetOutput();
vtkSmartPointer<vtkTransform> transform =
vtkSmartPointer<vtkTransform>::New();
transform->Scale(.005, .005, .005);
vtkSmartPointer<vtkTransformFilter> transformFilter =
vtkSmartPointer<vtkTransformFilter>::New();
transformFilter->SetInputConnection(reader->GetOutputPort());
transformFilter->SetTransform(transform);
// Visualizar
vtkSmartPointer<vtkPolyDataMapper> mapper =
vtkSmartPointer<vtkPolyDataMapper>::New();
mapper->SetInputConnection(transformFilter->GetOutputPort());
vtkSmartPointer<vtkActor> actor =
vtkSmartPointer<vtkActor>::New();
actor->SetPosition(1.1, .5, .1);
actor->SetMapper(mapper);
renderer->AddActor(actor);
}
renderWindow->Render();
renderWindowInteractor->Start();
return EXIT_SUCCESS;
}
I am using C++ and Opencv 2.3.1 for background subtraction. I have tried many times to change the parameters of Mog2 in order to disable shadow detection feature also i have tried what other people suggest on the internet. however, the shadow detection still enabled.
could you please tell me how to disable it?
see the sample code and the generated mask.
//opencv
#include < opencv2/opencv.hpp>
#include < opencv2/core/core.hpp>
#include < opencv2/highgui/highgui.hpp>
#include < opencv2/video/background_segm.hpp>
#include < opencv2/imgproc/imgproc.hpp>
#include < opencv2/video/video.hpp>
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>
using namespace cv;
using namespace std;
// Global variables
Mat frame; //current frame
Mat fgMaskMOG2; //fg mask fg mask generated by MOG method
Ptr<BackgroundSubtractor> pMOG2; //MOG Background subtractor
int keyboard; //input from keyboard
//new variables
int history = 1250;
float varThreshold = 16;
bool bShadowDetection = true;
/*
//added to remove the shadow
unsigned char nShadowDetection = 0;
float fTau = 0.5;
//static const unsigned char nShadowDetection =( unsigned char)0;
*/
// Function Headers
void help();
void processImages(char* firstFrameFilename);
void help()
{
cout
<< "This program shows how to use background subtraction methods provided by " << endl
<< " OpenCV. You can process images (-img)." << endl
<< "Usage:" << endl
<< "./bs -img <image filename>}" << endl
<< "for example: ./bs -img /data/images/1.png" << endl
<< endl;
}
// morphological operation
void morphOps(Mat &thresh){
//create structuring element that will be used to "dilate" and "erode" image.
//the element chosen here is a 3px by 3px rectangle
Mat erodeElement = getStructuringElement( MORPH_RECT,Size(2,2)); //3x3
//dilate with larger element so make sure object is nicely visible
Mat dilateElement = getStructuringElement( MORPH_RECT,Size(1,1)); //8x8
erode(thresh,thresh,erodeElement);
erode(thresh,thresh,erodeElement);
dilate(thresh,thresh,dilateElement);
dilate(thresh,thresh,dilateElement);
}
// main function
int main(int argc, char* argv[])
{
//print help information
help();
//check for the input parameter correctness
if(argc != 3) {
cerr <<"Incorret input list" << endl;
cerr <<"exiting..." << endl;
return EXIT_FAILURE;
}
//create GUI windows
namedWindow("Frame");
namedWindow("FG Mask MOG2 ");
//create Background Subtractor objects
//pMOG2 = new BackgroundSubtractorMOG2();
pMOG2 = new BackgroundSubtractorMOG2( history, varThreshold, bShadowDetection);
//BackgroundSubtractorMOG2(int history, float varThreshold, bool bShadowDetection=1);
if(strcmp(argv[1], "-img") == 0) {
//input data coming from a sequence of images
processImages(argv[2]);
}
else {
//error in reading input parameters
cerr <<"Please, check the input parameters." << endl;
cerr <<"Exiting..." << endl;
return EXIT_FAILURE;
}
//destroy GUI windows
destroyAllWindows();
return EXIT_SUCCESS;
}
//function processImages
void processImages(char* fistFrameFilename) {
//read the first file of the sequence
frame = imread(fistFrameFilename);
if(frame.empty()){
//error in opening the first image
cerr << "Unable to open first image frame: " << fistFrameFilename << endl;
exit(EXIT_FAILURE);
//current image filename
string fn(fistFrameFilename);
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
//update the background model
pMOG2->operator()(frame, fgMaskMOG2,-1);
//get the frame number and write it on the current frame
size_t index = fn.find_last_of("/");
if(index == string::npos) {
index = fn.find_last_of("\\");
}
size_t index2 = fn.find_last_of(".");
string prefix = fn.substr(0,index+1);
string suffix = fn.substr(index2);
string frameNumberString = fn.substr(index+1, index2-index-1);
istringstream iss(frameNumberString);
int frameNumber = 0;
iss >> frameNumber;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
//show the current frame and the fg masks
imshow("Frame", frame);
morphOps(fgMaskMOG2);
imshow("FG Mask MOG2 ", fgMaskMOG2);
//get the input from the keyboard
keyboard = waitKey(1);
//search for the next image in the sequence
ostringstream oss;
oss << (frameNumber + 1);
string nextFrameNumberString = oss.str();
string nextFrameFilename = prefix + nextFrameNumberString + suffix;
//read the next frame
frame = imread(nextFrameFilename);
if(frame.empty()){
//error in opening the next image in the sequence
cerr << "Unable to open image frame: " << nextFrameFilename << endl;
exit(EXIT_FAILURE);
}
//update the path of the current frame
fn.assign(nextFrameFilename);
// save subtracted images
string imageToSave =("output_MOG_" + frameNumberString + ".png");
bool saved = imwrite( "D:\\SO\\temp\\" +imageToSave,fgMaskMOG2);
if(!saved) {
cerr << "Unable to save " << imageToSave << endl;
}
}
}
}
take a look at the documentation
on your code you have
bool bShadowDetection = true;
change it to
bool bShadowDetection = false;
EDIT:
OpenCV 3's BackgroundSubtractorMOG2 Class has setShadowValue (int value) function to set gray value of shadow.
setting value of gray to zero will remove the shadow.
it depends on what you really want to see - if you want to separate the shadows from your segmentation:
bool bShadowDetection = true;
and use
cv::threshold(Mask,Mask,254,255,cv::THRESH_BINARY);
after MOG2->apply()
you'll get exactly the part of wich is {255} in your image
and sry for reanimating this...