How to access a RGB image data - c++

I want the data at pixel to be compared with the colour and then i want to find contour then take centroid points of the contour ,so i am using like this to find countourdata am i wrong at this statement
int pos = i * w * Channels + j; //channels is 3 as rgb
// if any data exists
if (data->imageData[pos]>0)
Code is like this
for (int i = x; i < x+h; i++) //height of frame pixels
{
for (int j = y; j < y+w; j++)//width of frame pixels
{
int pos = i * w * Channels + j; //channels is 3 as rgb
// if any data exists
if (data->imageData[pos]>0) //Taking data (here is the problem how to take)
{
xPos += j;
yPos += i;
nPix++;
}
}
}

I use the following code structure
/**
* #brief Calculate greeness from an RGB image
*
* Performs the greeness pixelwise transform on the input image.
* Greeness is defined as
* Greeness = 255*G/sqrt(R^2+G^2+B^2)
* The function assumes that the resolution of the two images are identical.
*
* #param imSrc Input RGB image.
* #param imDst Output grayscale (greeness) image.
*/
void rgbToGreeness( IplImage *imSrc , IplImage* imDst) {
// Allocate variables
int tmp_pix;
uchar * _SrcPtr, * _DstPtr;
// Iterate over the image line by line
for(int y = 0 ; y < imSrc->height ; y++ )
{
// Locate pointers to the first data element in the current line
_SrcPtr = ( uchar* )( imSrc->imageData + y * imSrc->widthStep );
_DstPtr = ( uchar* )( imDst->imageData + y * imDst->widthStep );
// Iterate over the elements in the current line
for( int x = 0 ; x < imSrc->width ; x++ )
{
//2*G-B-R - Excessive green
tmp_pix = (int) (255*_SrcPtr[3*x+1]/pow(pow((float)_SrcPtr[3*x],2) + pow((float)_SrcPtr[3*x+1], 2) + pow((float)_SrcPtr[3*x+2], 2), (float) 0.5));
//If value is larger than 255, set it to 255 and lower than 0 set it to 0
_DstPtr[x] = (uchar) ( ( tmp_pix < 0 ) ? 0 : ( ( tmp_pix > 255 ) ? 255 : tmp_pix ) );
}
}
}

Here is some code to access RGB data of a pixel in an image
IplImage* img=cvLoadImage(fileName);
CvScalar s;
s=cvGet2D(img,i,j); // get the (i,j) pixel value
s.val[0]=111; // B-channel
s.val[1]=111; // G-channel
s.val[2]=111; // R-channel
cvSet2D(img,i,j,s); // set the (i,j) pixel value
Source (modified a little): http://www.cs.iit.edu/~agam/cs512/lect-notes/opencv-intro/opencv-intro.html#SECTION00053000000000000000

As requested here is my exact code where i want to calculate centroids from contour
My exact code is like this
1) Taking RGB image as input
2) x=0,y=0,w=width of frame,h=height of frame.are the data passing
void cRecursiveCentroids::ComputeCentroid(int x, int y, int w, int h, IplImage *data, bool splitOnUpDown, int level, int id, int addToId){
if (level == m_Levels-1 ) return;
int Channels = data->nChannels; // Number of channels
std::cout << "Channels: " << Channels << "\n";
int xPos = 0;
int yPos = 0;
int nPix = 0;
for (int i = x; i < x+h; i++) //Tracing the contour
{
for (int j = y; j < y+w; j++)
{
int pos = i * m_Wid * Channels + j; // Here may be the error i am thinking
// if any data exists
if (data->imageData[pos]>0)
{
xPos += j;
//std::cout << "xPos: " << xPos << "\n";
yPos += i;
// std::cout << "yPos: " << yPos << "\n";
nPix++;
}
}
}
Check = nPix;
if (nPix > 0){ // Calculating Position
xPos = (int)((float)xPos / (float)nPix);
yPos = (int)((float)yPos / (float)nPix);
int num = ( id + addToId ) > 16 ? 16 : (id+addToId);
m_Cent[num].posx = xPos;
m_Cent[num].posy = yPos;
m_Cent[num].level = level;
splitOnUpDown = !splitOnUpDown;
level = level+1;
if (splitOnUpDown) //Recursive calling for centroids
{
id *= 2;
ComputeCentroid(x,y,w,(yPos - y), data, splitOnUpDown, level, id, addToId);
ComputeCentroid(x,yPos,w,h-(yPos-y), data, splitOnUpDown, level, id+1, addToId);
} else {
id *= 2;
ComputeCentroid(x,y,(xPos-x),h, data, splitOnUpDown, level, id, addToId);
ComputeCentroid(xPos,y,w - (xPos-x),h, data, splitOnUpDown, level, id+1, addToId);
}
}
DrawCentroidPoints(); //Draw Centroid Points
}

Related

Find the average colour on screen in SDL

in SDL we're trying to find the average colour of the screen. To do so we're reading all the pixel colour values and putting them into an array (Performance is not of concern), for some reason however, GetPixel always returns a colour (0,0,0,0). Ive already established that the RenderReadPixels works correctly since saving a screenshot works just fine.
const Uint32 format = SDL_PIXELFORMAT_ARGB8888;
SDL_Surface* surface = SDL_CreateRGBSurfaceWithFormat(0, width, height, 32, format);
SDL_RenderReadPixels(renderer, NULL, format, surface->pixels, surface->pitch);
float* coverage = new float[width*height]; // * allocates memory
coverage[0] = 1;
for (int i = 0; i < width; i++)
{
for (int j = 0; j < height; j++)
{
SDL_Color col;
col = GetPixel(surface, i, j);
coverage[i * height + j] = (1/3)(col.r + col.b + col.g); //Return coverage value at i, j
std::cout << coverage[i * height + j]; //Always returns 0
std::cout << "\n";
}
}
SDL_Color GetPixel(SDL_Surface* srf, int x, int y)
{
SDL_Color color;
SDL_GetRGBA(get_pixel32(srf, x, y), srf->format, &color.r, &color.g, &color.b, &color.a);
return color;
}
Uint32 get_pixel32(SDL_Surface* surface, int x, int y)
{
//Convert the pixels to 32 bit
Uint32* pixels = (Uint32*)surface->pixels;
//Get the requested pixel
return pixels[(y * surface->w) + x];
}
1/3 is always 0 because of the way number promotion works in C++.
Best be explicit about what you want:
coverage[i * height + j] = float(col.r + col.b + col.g) / 3.0;

I'm reading the RGB values of pixels from a BMP file and not getting the correct values

I followed the code at this link read pixel value in bmp file to be able to read the RGB values of pixels and when I have the entire image as one color and read a random pixel's values they are correct. After this I tried to make it so the function would also try and find how many unique colors there were so I added a box with a different color to the image but the function still only finds one color. I'm wondering if maybe I'm somehow not looking at all the bytes contained in the BMP but I'm not sure how that would be as I'm new to trying this stuff.
To make sure the code wasn't finding different colored pixels but failing to add them to the list of unique pixels I tried printing output when a color is found that is different from the one that is always found but no output ever came from it.
struct Color {
int R = -1;
int G = -1;
int B = -1;
};
unsigned char* readBMP(char* filename) {
int i;
FILE* f = fopen(filename, "rb");
unsigned char info[54];
fread(info, sizeof(unsigned char), 54, f);
int width = *(int*)&info[18]; //the reason *(int*) is used here because there's an integer stored at 18 in the array that indicates how wide the BMP is
int height = *(int*)&info[22]; // same reasoning for *(int*)
int size = 3 * width * height;
unsigned char* data = new unsigned char[size];
fread(data, sizeof(unsigned char), size, f);
fclose(f);
// windows has BMP saved as BGR tuples and this switches it to RGB
for(i = 0; i < size; i += 3){
unsigned char tmp = data[i];
data[i] = data[i+2];
data[i+2] = tmp;
}
i = 0; // i is the x value of the pixel that is having its RGB values checked
int j = 0; // j is the y value of the pixel that is having its RGB values checked
unsigned char R = data[3 * (i * width + j)]; // value of R of the pixel at (i,j)
unsigned char G = data[3 * (i * width + j) + 1]; // value of G of the pixel at (i,j)
unsigned char B = data[3 * (i * width + j) + 2]; // value of B of the pixel at (i,j)
std::cout << "value of R is " << int(R);
std::cout << " value of G is " << int(G);
std::cout << " value of B is " << int(B);
Color num_colors[5];
int count;
int z;
int flag;
int iterator;
int sum;
for(count = 0; count < size; count += 1){
unsigned char R = data[3 * (i * width + j)];
unsigned char G = data[3 * (i * width + j) + 1];
unsigned char B = data[3 * (i * width + j) + 2];
sum = int(R) + int(G) + int(B);
if(sum != 301) {// 301 is the sum of the RGB values of the color that the program does manage to find
std::cout << sum;
}
flag = 0;
for(z = 0; z < 5; z += 1){
if(num_colors[z].R == R && num_colors[z].G == G && num_colors[z].B == B){
flag = 1;
}
}
if(flag == 1){
continue;
}
iterator = 0;
while(num_colors[iterator].R != -1){
iterator += 1;
}
num_colors[iterator].R = R;
num_colors[iterator].G = G;
num_colors[iterator].B = B;
}
int number = 0;
for(int r = 0; r < 5; r += 1){
std::cout << "\nValue of R here: " << num_colors[r].R;
if(num_colors[r].R != -1){
number += 1;
}
}
std::cout << "\nNumber of colors in image: " << number;
return data;
}
https://imgur.com/a/dXllIWL
This is the picture I'm using so there should be two colors found but the code only finds red pixels.
Your problem is that you are always checking the RGB values at (0,0)
i = 0; // i is the x value of the pixel that is having its RGB values checked
int j = 0; // j is the y value of the pixel that is having its RGB values checked
...
for(count = 0; count < size; count += 1){
unsigned char R = data[3 * (i * width + j)];
unsigned char G = data[3 * (i * width + j) + 1];
unsigned char B = data[3 * (i * width + j) + 2];
i and j defines the X and Y position of the pixel you are checking, but notice that you never change those in the loop. Your loop will keep doing the same thing over and over again. What you probably want is a double loop, going through all coordinates in your image:
for(int y=0; y<height; y++)
for(int x=0; x<width; x++){
unsigned char R = data[3 * (y * width + x) + 0];
unsigned char G = data[3 * (y * width + x) + 1];
unsigned char B = data[3 * (y * width + x) + 2];

Image Shearing C++

I'm trying to Shear an image along the X-axis using OpenCV to load the image, and the following algorithm to shear the image: x′=x+y·Bx, but for some reason, I end up with the following shear:
My source code looks like this:
#include "stdafx.h"
#include "opencv2\opencv.hpp"
using namespace std;
using namespace cv;
int main()
{
Mat src = imread("B2DBy.jpg", 1);
if (src.empty())
cout << "Error: Loading image" << endl;
int r1, c1; // tranformed point
int rows, cols; // original image rows and columns
rows = src.rows;
cols = src.cols;
float Bx = 2; // amount of shearing in x-axis
float By = 0; // amount of shearing in y-axis
int maxXOffset = abs(cols * Bx);
int maxYOffset = abs(rows * By);
Mat out = Mat::ones(src.rows + maxYOffset, src.cols + maxXOffset, src.type()); // create output image to be the same as the source
for (int r = 0; r < out.rows; r++) // loop through the image
{
for (int c = 0; c < out.cols; c++)
{
r1 = r + c * By - maxYOffset; // map old point to new
c1 = r * Bx + c - maxXOffset;
if (r1 >= 0 && r1 <= out.rows && c1 >= 0 && c1 <= out.cols) // check if the point is within the boundaries
{
out.at<uchar>(r, c) = src.at<uchar>(r1, c1); // set value
}
}
}
namedWindow("Source image", CV_WINDOW_AUTOSIZE);
namedWindow("Rotated image", CV_WINDOW_AUTOSIZE);
imshow("Source image", src);
imshow("Rotated image", out);
waitKey(0);
return 0;
}
EDIT
Fixed it myself.
Didn't need to substract the offset. Heres the updated source code:
Mat forward(Mat img) {
Mat umg = img;
int y1, x1; // tranformed point
int rows, cols; // original image rows and columns
rows = umg.rows;
cols = umg.cols;
float Bx = 0.7; // amount of shearing in x-axis
float By = 0; // amount of shearing in y-axis
int maxXOffset = abs(rows * Bx);
int maxYOffset = abs(cols * By);
Mat out = Mat::ones(rows + maxYOffset, cols + maxXOffset, umg.type()); // create output image to be the same as the source
for (int y = 0; y < rows; y++) // loop through the image
{
for (int x = 0; x < cols; x++)
{
y1 = y + x * By; // map old point to new
x1 = y * Bx + x;
out.at<uchar>(y1, x1) = umg.at<uchar>(y, x); // set value
}
}
return out;
}
Mat backwards(Mat img) {
Mat umg = img;
int y1, x1; // tranformed point
int rows, cols; // original image rows and columns
rows = umg.rows;
cols = umg.cols;
float Bx = 0.7; // amount of shearing in x-axis
float By = 0; // amount of shearing in y-axis
int maxXOffset = abs(rows * Bx);
int maxYOffset = abs(cols * By);
Mat out = Mat::ones(rows + maxYOffset, cols + maxXOffset, umg.type()); // create output image to be the same as the source
for (int y = 0; y < rows; y++) // loop through the image
{
for (int x = 0; x < cols; x++)
{
//y1 = y + x * By; // map old point to new
//x1 = y * Bx + x;
y1 = (1 / (1 - Bx*By)) * (y + x * By);
x1 = (1 / (1 - Bx*By)) * (y * Bx + x);
out.at<uchar>(y1, x1) = umg.at<uchar>(y, x); // set value
}
}
return out;
}
int main()
{
Mat src = imread("B2DBy.jpg", 0);
if (src.empty())
cout << "Error: Loading image" << endl;
Mat forwards = forward(src);
Mat back = backwards(src);
namedWindow("Source image", CV_WINDOW_NORMAL);
imshow("Source image", src);
imshow("back", back);
imshow("forward image", forwards);
waitKey(0);
return 0;
}
I found some time to work on this.
Now I understand what you tried to achieve with the offset computation, but I'm not sure whether yours is correct.
Just change all the cv::Vec3b to unsigned char or uchar and load as grayscale, if wanted.
Please try this code and maybe you'll find your error:
// no interpolation yet
// cv::Vec3b only
cv::Mat shear(const cv::Mat & input, float Bx, float By)
{
if (Bx*By == 1)
{
throw("Shearing: Bx*By==1 is forbidden");
}
if (input.type() != CV_8UC3) return cv::Mat();
// shearing:
// x'=x+y·Bx
// y'=y+x*By
// shear the extreme positions to find out new image size:
std::vector<cv::Point2f> extremePoints;
extremePoints.push_back(cv::Point2f(0, 0));
extremePoints.push_back(cv::Point2f(input.cols, 0));
extremePoints.push_back(cv::Point2f(input.cols, input.rows));
extremePoints.push_back(cv::Point2f(0, input.rows));
for (unsigned int i = 0; i < extremePoints.size(); ++i)
{
cv::Point2f & pt = extremePoints[i];
pt = cv::Point2f(pt.x + pt.y*Bx, pt.y + pt.x*By);
}
cv::Rect offsets = cv::boundingRect(extremePoints);
cv::Point2f offset = -offsets.tl();
cv::Size resultSize = offsets.size();
cv::Mat shearedImage = cv::Mat::zeros(resultSize, input.type()); // every pixel here is implicitely shifted by "offset"
// perform the shearing by back-transformation
for (int j = 0; j < shearedImage.rows; ++j)
{
for (int i = 0; i < shearedImage.cols; ++i)
{
cv::Point2f pp(i, j);
pp = pp - offset; // go back to original coordinate system
// go back to original pixel:
// x'=x+y·Bx
// y'=y+x*By
// y = y'-x*By
// x = x' -(y'-x*By)*Bx
// x = +x*By*Bx - y'*Bx +x'
// x*(1-By*Bx) = -y'*Bx +x'
// x = (-y'*Bx +x')/(1-By*Bx)
cv::Point2f p;
p.x = (-pp.y*Bx + pp.x) / (1 - By*Bx);
p.y = pp.y - p.x*By;
if ((p.x >= 0 && p.x < input.cols) && (p.y >= 0 && p.y < input.rows))
{
// TODO: interpolate, if wanted (p is floating point precision and can be placed between two pixels)!
shearedImage.at<cv::Vec3b>(j, i) = input.at<cv::Vec3b>(p);
}
}
}
return shearedImage;
}
int main(int argc, char* argv[])
{
cv::Mat input = cv::imread("C:/StackOverflow/Input/Lenna.png");
cv::Mat output = shear(input, 0.7, 0);
//cv::Mat output = shear(input, -0.7, 0);
//cv::Mat output = shear(input, 0, 0.7);
cv::imshow("input", input);
cv::imshow("output", output);
cv::waitKey(0);
return 0;
}
Giving me these outputs for the 3 sample lines:

Hough transform returns the collinear and semi collinear points

I have points in an image. I need to detect the most collinear points. The fastest method is to use Hough transform, but I have to modify the opencv method. Actually I need that the semi collinear points to be returned with detected line, for this reason I modified the polar line struct. A tolerance threshold is also needed to detect nearly detected points as shown in the image. Can someone help in how to tune this threshold?
I need at least four semi collinear points to detect the line to which they belong.
The points of first image were detected by 6 overlapped lines
the point of middle images were detected by nothing
the third's points
were detected by three lines
Which is the best way to get rid from the overlapped liens?? Or how to tune the tolerance threshold to detect the semi collinear points by only one line?
the is my own function call:
vector<CvLinePolar2> lines;
CvMat c_image = source1; // loaded image
HoughLinesStandard(&c_image,1,CV_PI/180,4,&lines,INT_MAX);
typedef struct CvLinePolar2
{
float rho;
float angle;
vector<CvPoint> points;
};
void HoughLinesStandard( const CvMat* img, float rho, float theta,
int threshold, vector<CvLinePolar2> *lines, int linesMax= INT_MAX )
{
cv::AutoBuffer<int> _accum, _sort_buf;
cv::AutoBuffer<float> _tabSin, _tabCos;
const uchar* image;
int step, width, height;
int numangle, numrho;
int total = 0;
int i, j;
float irho = 1 / rho;
double scale;
vector<vector<CvPoint>> lpoints;
CV_Assert( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 );
image = img->data.ptr;
step = img->step;
width = img->cols;
height = img->rows;
numangle = cvRound(CV_PI / theta);
numrho = cvRound(((width + height) * 2 + 1) / rho);
_accum.allocate((numangle+2) * (numrho+2));
_sort_buf.allocate(numangle * numrho);
_tabSin.allocate(numangle);
_tabCos.allocate(numangle);
int *accum = _accum, *sort_buf = _sort_buf;
float *tabSin = _tabSin, *tabCos = _tabCos;
memset( accum, 0, sizeof(accum[0]) * (numangle+2) * (numrho+2) );
//memset( lpoints, 0, sizeof(lpoints) );
lpoints.resize(sizeof(accum[0]) * (numangle+2) * (numrho+2));
float ang = 0;
for(int n = 0; n < numangle; ang += theta, n++ )
{
tabSin[n] = (float)(sin(ang) * irho);
tabCos[n] = (float)(cos(ang) * irho);
}
// stage 1. fill accumulator
for( i = 0; i < height; i++ )
for( j = 0; j < width; j++ )
{
if( image[i * step + j] != 0 )
{
CvPoint pt;
pt.x = j; pt.y = i;
for(int n = 0; n < numangle; n++ )
{
int r = cvRound( j * tabCos[n] + i * tabSin[n] );
r += (numrho - 1) / 2;
int ind = (n+1) * (numrho+2) + r+1;
int s = accum[ind];
accum[ind]++;
lpoints[ind].push_back(pt);
}
}
}
// stage 2. find local maximums
for(int r = 0; r < numrho; r++ )
for(int n = 0; n < numangle; n++ )
{
int base = (n+1) * (numrho+2) + r+1;
if( accum[base] > threshold &&
accum[base] > accum[base - 1] && accum[base] >= accum[base + 1] &&
accum[base] > accum[base - numrho - 2] && accum[base] >= accum[base + numrho + 2] )
sort_buf[total++] = base;
}
// stage 3. sort the detected lines by accumulator value
icvHoughSortDescent32s( sort_buf, total, accum );
// stage 4. store the first min(total,linesMax) lines to the output buffer
linesMax = MIN(linesMax, total);
scale = 1./(numrho+2);
for( i = 0; i < linesMax; i++ )
{
CvLinePolar2 line;
int idx = sort_buf[i];
int n = cvFloor(idx*scale) - 1;
int r = idx - (n+1)*(numrho+2) - 1;
line.rho = (r - (numrho - 1)*0.5f) * rho;
line.angle = n * theta;
line.points = lpoints[idx];
lines->push_back(line);
}
}
One approach is non-maximal suppression to thin out the candidate set for potential lines. Once you've identified the thinned potential lines you could then compute an average of the remaining lines that would satisfy some angular or spatial difference threshold.
Try HoughLinesP..opencv reference

Generate a plane with triangle strips

What would be the best algorithm to generate a list of vertices to draw a plane using triangle strips?
I'm looking for a function which receives the plane's width and height and returns a float array containing correctly indexed vertices.
width represents the number of vertices per row.
height represents the number of vertices per column.
float* getVertices( int width, int height ) {
...
}
void render() {
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, getVertices(width,heigth));
glDrawArrays(GL_TRIANGLE_STRIP, 0, width*height);
glDisableClientState(GL_VERTEX_ARRAY);
}
Thanks you all. I've coded this. Is it correct? Or is the generated strip somehow wrong?
int width;
int height;
float* vertices = 0;
int* indices = 0;
int getVerticesCount( int width, int height ) {
return width * height * 3;
}
int getIndicesCount( int width, int height ) {
return (width*height) + (width-1)*(height-2);
}
float* getVertices( int width, int height ) {
if ( vertices ) return vertices;
vertices = new float[ getVerticesCount( width, height ) ];
int i = 0;
for ( int row=0; row<height; row++ ) {
for ( int col=0; col<width; col++ ) {
vertices[i++] = (float) col;
vertices[i++] = 0.0f;
vertices[i++] = (float) row;
}
}
return vertices;
}
int* getIndices( int width, int height ) {
if ( indices ) return indices;
indices = new int[ iSize ];
int i = 0;
for ( int row=0; row<height-1; row++ ) {
if ( (row&1)==0 ) { // even rows
for ( int col=0; col<width; col++ ) {
indices[i++] = col + row * width;
indices[i++] = col + (row+1) * width;
}
} else { // odd rows
for ( int col=width-1; col>0; col-- ) {
indices[i++] = col + (row+1) * width;
indices[i++] = col - 1 + + row * width;
}
}
}
if ( (mHeight&1) && mHeight>2 ) {
mpIndices[i++] = (mHeight-1) * mWidth;
}
return indices;
}
void render() {
glEnableClientState( GL_VERTEX_ARRAY );
glVertexPointer( 3, GL_FLOAT, 0, getVertices(width,height) );
glDrawElements( GL_TRIANGLE_STRIP, getIndicesCount(width,height), GL_UNSIGNED_INT, getIndices(width,height) );
glDisableClientState( GL_VERTEX_ARRAY );
}
With width=4 and height=4 this is what I got:
And here I'm modifying some vertex height:
Here is some code that does this (not tested, but you get the idea at least):
void make_plane(int rows, int columns, float *vertices, int *indices) {
// Set up vertices
for (int r = 0; r < rows; ++r) {
for (int c = 0; c < columns; ++c) {
int index = r*columns + c;
vertices[3*index + 0] = (float) c;
vertices[3*index + 1] = (float) r;
vertices[3*index + 2] = 0.0f;
}
}
// Set up indices
int i = 0;
for (int r = 0; r < rows - 1; ++r) {
indices[i++] = r * columns;
for (int c = 0; c < columns; ++c) {
indices[i++] = r * columns + c;
indices[i++] = (r + 1) * columns + c;
}
indices[i++] = (r + 1) * columns + (columns - 1);
}
}
The first loop sets up the vertex array in a standard rectangular grid. There are R*C vertices.
The second loop sets up the indices. In general, there are two vertices per square in the grid. Each vertex will cause a new triangle to be drawn (with the previous two vertices), so each square is drawn with two triangles.
The first and last vertex at the start and end of each row is duplicated. This means there are two triangles of zero area (degenerate triangles) between each row. This allows us to draw the entire grid in one big triangle strip. This technique is called stitching.
none of the code above gives a correct mesh generation. A very good article about how to make a strip of triangles on a simple plane: http://www.learnopengles.com/android-lesson-eight-an-introduction-to-index-buffer-objects-ibos/
Here is my test code that actually tested and fully working:
int plane_width = 4; // amount of columns
int plane_height = 2; // amount of rows
int total_vertices = (plane_width + 1) * (plane_height + 1);
planeVert = new CIwFVec2[total_vertices];
memset(planeVert, 0, sizeof(CIwFVec2) * total_vertices);
int numIndPerRow = plane_width * 2 + 2;
int numIndDegensReq = (plane_height - 1) * 2;
int total_indices = numIndPerRow * plane_height + numIndDegensReq;
planeInd = new uint16[total_indices];
make_plane(plane_width, plane_height, planeVert, planeInd);
...
void make_plane(int width, int height, CIwFVec2 *vertices, uint16 *indices)
{
width++;
height++;
int size = sizeof(CIwFVec2);
// Set up vertices
for(int y = 0; y < height; y++)
{
int base = y * width;
for(int x = 0; x < width; x++)
{
int index = base + x;
CIwFVec2 *v = vertices + index;
v->x = (float) x;
v->y = (float) y;
Debug::PrintDebug("%d: %f, %f", index, v->x, v->y);
}
}
Debug::PrintDebug("-------------------------");
// Set up indices
int i = 0;
height--;
for(int y = 0; y < height; y++)
{
int base = y * width;
//indices[i++] = (uint16)base;
for(int x = 0; x < width; x++)
{
indices[i++] = (uint16)(base + x);
indices[i++] = (uint16)(base + width + x);
}
// add a degenerate triangle (except in a last row)
if(y < height - 1)
{
indices[i++] = (uint16)((y + 1) * width + (width - 1));
indices[i++] = (uint16)((y + 1) * width);
}
}
for(int ind=0; ind < i; ind++)
Debug::PrintDebug("%d ", indices[ind]);
}
I was doing something similar and using the first two answers I have come up with this (tested, C#, XNA)
// center x,z on origin
float offset = worldSize / 2.0f, scale = worldSize / (float)vSize;
// create local vertices
VertexPositionColor[] vertices = new VertexPositionColor[vSize * vSize];
for (uint z = 0; z < vSize; z++) {
for (uint x = 0; x < vSize; x++) {
uint index = x + (z * vSize);
vertices[index].Position = new Vector3((scale*(float)x) - offset,
heightValue,
(scale*(float)z) - offset);
vertices[index].Color = Color.White;
}
}
// create local indices
var indices = new System.Collections.Generic.List<IndexType>();
for (int z = 0; z < vSize - 1; z++) {
// degenerate index on non-first row
if (z != 0) indices.Add((IndexType)(z * vSize));
// main strip
for (int x = 0; x < vSize; x++) {
indices.Add((IndexType)(z * vSize + x));
indices.Add((IndexType)((z + 1) * vSize + x));
}
// degenerate index on non-last row
if (z != (vSize-2)) indices.Add((IndexType)((z + 1) * vSize + (vSize - 1)));
}
This is easily convertable to c++, just make indices an std::vector.
The notable features for my solution are that:
a) It doesn't need to change the winding order per substrip - adding two points creates two degenerate triangles, so the order is correct for the next substrip.
b) You should conditionally add the first and last dg triangle vertices.