How to read tiled images with OpenImageIO and read_tiles() - c++

I have a problem reading a tiled image on Windows with VisualStudio and OIIO 2.0.8.
For testing I rendered an image with Arnold with tiled option checked and without the tile option. While reading the scanline image works fine, the tiled rendering does not read anything. I can see in debug mode that the tilePixels array does not change at all before and after reading a tile. The result of the read_tiles call is always true.
Maybe anyone can have a look and tell me if there is an obvious problem.
This is the still bit chaotic code I use.
std::string filename = "C:/daten/images/tiledRender.exr";
auto in = ImageInput::open(filename);
if (in)
{
int tw = spec.tile_width;
int th = spec.tile_height;
int w = spec.width;
int h = spec.height;
int numBytesPerPixel = 3;
size_t numBytesPerImage = w*h*numBytesPerPixel;
size_t numBytesPerLine = w*numBytesPerPixel;
std::vector<unsigned char> pixels(numBytesPerImage, 120);
unsigned char* line = &pixels[0];
unsigned char *bit = image->bits(); //this comes from QImage
if (tw == 0) // no tiles read scanlines
{
qDebug() << "Found scanline rendering.\n";
for (int i = 0; i < h; i++)
{
bool success = in->read_scanlines(0, 0, i, i+1, 0, 0, 3, TypeDesc::UCHAR, line);
if (!success)
qDebug() << "read scanline problem at scanline " << i << "\n";
line += numBytesPerLine;
}
memcpy(bit, &pixels[0], numBytesPerImage);
}
else {
qDebug() << "Found tiled rendering.\n";
int numTilePixels = tw * th;
int numBytesPerTile = numTilePixels * 3;
std::vector<unsigned char> tilePixels(numBytesPerTile, 80);
unsigned char* tilePtr = &tilePixels[0];
for (int x = 0; x < w; x += tw)
{
for (int y = 0; y < h; y += th)
{
int ttw = tw;
int tth = th;
if ((x + tw) >= w)
ttw = w - x;
if ((y + th) >= h)
tth = h - y;
bool success = in->read_tiles(0, 0, x, x+ttw, y, y+tth, 0, 0, 0, 3, TypeDesc::UCHAR, tilePtr);
if (!success)
qDebug() << "read tiles problem\n";
}
}
}

The solution lies in the way the tiles are read. Instead of reading zStart = 0 and zEnd = 0, I have to use zEnd = 1.
so instead of:
bool success = in->read_tiles(0, 0, x, x+ttw, y, y+tth, 0, 0, 0, 3, TypeDesc::UCHAR, tilePtr);
It has to be
bool success = in->read_tiles(0, 0, x, x+ttw, y, y+tth, 0, 1, 0, 3, TypeDesc::UCHAR, tilePtr);

Related

Half-precision PyTorch float tensors have same performance as single precision float tensors?

Background: I've implemented the antiobject/"field AI" pattern (https://home.cs.colorado.edu/~ralex/papers/PDF/OOPSLA06antiobjects.pdf) for single diffusion using LibTorch/PyTorch.
This works fine, but in the process of running it on the GPU and optimizing it, I've run into a problem. I have a Titan V, which I believe excels at half-precision float math. However, when I make the tensors torch::kHalf, the performance is the same. (I've also tried torch::kFloat16). Any ideas?
The code that I timed is in update():
#define SDL_MAIN_HANDLED
#include <simple2d.h>
#include <torch/torch.h>
#include <c10/cuda/CUDAStream.h>
#include <ATen/cuda/CUDAEvent.h>
#include <math.h>
#include <chrono>
#define DEBUG_NO_DRAW
torch::Device gpu(torch::kCUDA);
torch::Device cpu(torch::kCPU);
torch::Device device = gpu;
const int windowLength = 1000;
const int64_t length = 500;
const float diffusionRate = 0.25;
const int obstacleCount = 4000;
const int entityCount = 1000;
float cellLength = windowLength / length;
torch::Tensor scent = torch::zeros({ length, length }, device).to(torch::kHalf);
torch::Tensor up, down, left, right;
torch::Tensor topWallMask = torch::ones({ length, length }, device).to(torch::kHalf);
torch::Tensor bottomWallMask = torch::ones({ length, length }, device).to(torch::kHalf);
torch::Tensor leftWallMask = torch::ones({ length, length }, device).to(torch::kHalf);
torch::Tensor rightWallMask = torch::ones({ length, length }, device).to(torch::kHalf);
torch::Tensor obstaclesMask = torch::ones({ length, length }, device).to(torch::kHalf);
torch::Tensor entities = torch::zeros({ length, length }, device).to(torch::kHalf);
c10::cuda::CUDAStream stream = c10::cuda::getCurrentCUDAStream();
std::time_t *lastFpsUpdate = NULL;
std::time_t *currentTime = new std::time_t();
int frameAccumulator = 0;
std::vector<long> updateDurations;
void update() {
torch::NoGradGuard no_grad;
AT_CUDA_CHECK(cudaStreamSynchronize(stream));
auto startTime = std::chrono::high_resolution_clock::now();
down = scent.roll(1, 0) * obstaclesMask * topWallMask;
up = scent.roll(-1, 0) * obstaclesMask * bottomWallMask;
right = scent.roll(1, 1) * obstaclesMask * leftWallMask;
left = scent.roll(-1, 1) * obstaclesMask * rightWallMask;
scent = scent + ((down - scent) + (up - scent) + (right - scent) + (left - scent)) * diffusionRate;
scent = torch::max(scent, entities);
AT_CUDA_CHECK(cudaStreamSynchronize(stream));
auto endTime = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(endTime - startTime);
updateDurations.push_back(duration.count());
}
void render() {
if (lastFpsUpdate == NULL) {
lastFpsUpdate = new std::time_t();
std::time(lastFpsUpdate);
}
torch::Tensor sqrtedScent = scent.sqrt().to(torch::kFloat).to(cpu); // just to make darker scents a little brighter for display
auto obstaclesMaskCPU = obstaclesMask.to(torch::kFloat).to(cpu);
auto sqrtedScentAccessor = sqrtedScent.accessor<float, 2>();
auto obstaclesMaskAccessor = obstaclesMaskCPU.accessor<float, 2>();
float r = 0, g = 0, b = 0, a = 0;
#ifndef DEBUG_NO_DRAW
S2D_DrawQuad(
0, 0, 0, 0, 0, 1,
windowLength, 0, 0, 0, 0, 1,
windowLength, windowLength, 0, 0, 0, 1,
0, windowLength, 0, 0, 0, 1);
#endif
for (int i = 0; i < length; i++) {
for(int j = 0; j < length; j++) {
if (obstaclesMaskAccessor[i][j] == 0) {
r = 1; g = 1; b = 1; a = 1;
}
else {
r = 1; g = 0; b = 0; a = sqrtedScentAccessor[i][j];
}
#ifndef DEBUG_NO_DRAW
S2D_DrawQuad(cellLength * j, cellLength * i, r, g, b, a,
cellLength * (j + 1), cellLength * i, r, g, b, a,
cellLength * (j + 1), cellLength * (i + 1), r, g, b, a,
cellLength * j, cellLength * (i + 1), r, g, b, a);
#endif
}
}
frameAccumulator++;
std::time(currentTime);
if (std::difftime(*currentTime, *lastFpsUpdate) > 1.0) {
std::cout << "FPS: " << frameAccumulator << std::endl;
frameAccumulator = 0;
*lastFpsUpdate = *currentTime;
int updateCount = updateDurations.size();
long totalUpdateTime = 0;
for (int i = 0; i < updateCount; i++) {
totalUpdateTime += updateDurations[i];
}
long averageUpdateTime = totalUpdateTime / updateCount;
std::cout << "AverageUpdateTime: " << averageUpdateTime << "us" << std::endl;
updateDurations.clear();
}
}
int main() {
if (torch::cuda::is_available()) {
std::cout << "CUDA is available!" << std::endl;
}
std::cout << "Using " << (device == cpu ? "CPU" : "GPU") << std::endl;
for (int i = 0; i < length; i++) {
topWallMask[0][i] = 0;
bottomWallMask[length - 1][i] = 0;
leftWallMask[i][0] = 0;
rightWallMask[i][length - 1] = 0;
}
for (int i = 0; i < obstacleCount; i++) {
int x = rand() % length;
int y = rand() % length;
obstaclesMask[x][y] = 0;
}
//std::cout << obstaclesMask << std::endl;
for (int i = 0; i < entityCount; i++) {
int x = rand() % length;
int y = rand() % length;
if (obstaclesMask[x][y].item() == 0)
continue;
entities[x][y] = 1;
}
S2D_Window* window = S2D_CreateWindow(
"Collab Diffuse", windowLength, windowLength, update, render, 0
);
S2D_Show(window);
return 0;
}
In both single precision and half precision versions of the code, update() takes about 2700 microseconds.
I'm using PyTorch/LibTorch 1.7.1.
Any other performance tips would be appreciated. (I'm aware drawing pixel by pixel is very slow, so I plan to switch from Simple2D to something else that can draw bitmaps from memory).

glTexImage2D slicing and alignment issues appearing in window

I'm working on making my own topographic map, and I have been using .hgt files from NASA.
I'm loading the files using
void MapImage::load_map_file(const char* filename) {
std::ifstream file(filename, std::ios::in | std::ios::binary);
if (!file) {
std::cout << "Error opening file!" << std::endl;
}
std::vector<short> tempHeight(TOTAL_SIZE);
unsigned char buffer[2];
int x, y;
for (int i = 0; i < TOTAL_SIZE; i++) {
if (!file.read(reinterpret_cast<char*>(buffer), sizeof(buffer))) {
std::cout << "Error reading file!" << std::endl;
}
tempHeight[i] = (buffer[0] << 8) | buffer[1];
}
height = tempHeight;
}
And then adding them to an inmemory bitmap using:
void MapImage::loadTextureImage() {
img_tex = 0;
glGenTextures(1, &img_tex);
int x, y, w, h;
w = h = SRTM_SIZE;
unsigned char* img;
img = (unsigned char *)malloc(3 * w * h);
memset(img, 0, sizeof(img));
int g = 0;
double height_color;
/*
for(int i = 0; i < TOTAL_SIZE; i++){
height_color = (float)height[i] / 10.0;
g = (height_color * 255);
if (g>255)g = 255;
img[i * 3 + 2] = (unsigned char)0;
img[i * 3 + 1] = (unsigned char)g;
img[i * 3 + 0]= (unsigned char)0;
}
*/
for (int i = 0; i < w; i++) {
for (int j = 0; j < h; ++j) {
x = i;
y = (h - 1) - j;
height_color = (float)height[j + (i * w)] / 10.0;
g = (height_color * 255);
if (g>255)g = 255;
img[(x + y*w) * 3 + 2] = (unsigned char)0;
img[(x + y*w) * 3 + 1] = (unsigned char)g;
img[(x + y*w) * 3] = (unsigned char)0;
}
}
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, img_tex);
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_RGB,
w,
h,
0,
GL_RGB,
GL_UNSIGNED_BYTE,
img
);
}
However this results in a image with the corner sliced, like this
Using the commented version in the loadTextureImage() looks slightly different, however with the same corner sliced.
Does anyone have a hint to whats going on? I've tried using a image as a texture, loading with the stbi library, and that works fine, so I'm not sure where it's going wrong.
(the coordinates for the image is N10E099)
This looks like row misalignment, caused by the 3-wide colour data. Try using the following call just before glTexImage2D:
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
This alignment value, which is 4 by default, is used by glTexImage2D and friends whenever texture data is read to be sent to the GPU.
There is no verification that it matches what the data actually looks like, so in cases like yours where a row doesn't end on a 4-byte boundary, the first few bytes of the next row will be skipped, leading to this diagonal distortion.
Texture data transfers in the other direction (from the GPU to client memory) are aligned via glPixelStorei(GL_PACK_ALIGNMENT, 1);.

How do I create a dynamic array of arrays (of arrays)?

I'm trying to create a dynamic array of arrays (of arrays). But for some reason the data gets corrupted. I'm using the data to generate a texture in a OpenGL application.
The following code works fine:
unsigned char imageData[64][64][3];
for (int i = 0; i < 64; i++)
{
for (int j = 0; j < 64; j++)
{
unsigned char r = 0, g = 0, b = 0;
if (i < 32)
{
if (j < 32)
r = 255;
else
b = 255;
}
else
{
if (j < 32)
g = 255;
}
imageData[i][j][0] = r;
imageData[i][j][1] = g;
imageData[i][j][2] = b;
}
std::cout << std::endl;
}
glTexImage2D(target, 0, GL_RGB, 64, 64, 0, GL_RGB, GL_UNSIGNED_BYTE, imageData);
Problem is, I want to be able to create a texture of any size (not just 64*64). So I'm trying this:
unsigned char*** imageData = new unsigned char**[64]();
for (int i = 0; i < 64; i++)
{
imageData[i] = new unsigned char*[64]();
for (int j = 0; j < 64; j++)
{
imageData[i][j] = new unsigned char[3]();
unsigned char r = 0, g = 0, b = 0;
if (i < 32)
{
if (j < 32)
r = 255;
else
b = 255;
}
else
{
if (j < 32)
g = 255;
}
imageData[i][j][0] = r;
imageData[i][j][1] = g;
imageData[i][j][2] = b;
}
std::cout << std::endl;
}
glTexImage2D(target, 0, GL_RGB, 64, 64, 0, GL_RGB, GL_UNSIGNED_BYTE, imageData);
But that doesn't work, the image gets all messed up so I assume I'm creating the array of arrays (of arrays) incorrectly? What am I doing wrong?
Also, I guess I should be using vectors instead. But how can I cast the vector of vectors of vectors data into a (void *) ?
This line contains multiple bugs:
unsigned char* pixel = &(imageData[(y * height) + x]);
You should multiply x by height and add y. And there's also the fact that each pixel is actually 3 bytes. Some issues that led to this bug in your code (and will lead to to others)
You should also be using std::vector. You can call std::vector::data to get a pointer to the underlying data to interface to C API's.
You should have a class that represents a pixel. This will handle the offsetting correctly and give things names and made the code clearer.
Whenever you are working with a multi dimensional array that you encode into a single dimensional one, you should try to carefully write an access function that takes care of indexing so you can test it separately.
(end bulleted list... oh SO).
struct Pixel {
unsigned char red;
unsigned char blue;
unsigned char green;
};
struct TwoDimPixelArray {
TwoDimArray(int width, int height)
: m_width(width), m_height(height)
{
m_vector.resize(m_width * m_height);
}
Pixel& get(int x, int y) {
return m_vector[x*height + y];
}
Pixel* data() { return m_vector.data(); }
private:
int m_width;
int m_height;
std::vector<Pixel> m_vector;
}
int width = 64;
int height = 64;
TwoDimPixelArray imageData(width, height);
for (int x = 0; x != width ; ++ x) {
for (int y = 0; y != height ; ++y) {
auto& pixel = imageData.get(x, y);
// ... pixel.red = something, pixel.blue = something, etc
}
}
glTexImage2D(target, 0, GL_RGB, 64, 64, 0, GL_RGB, GL_UNSIGNED_BYTE, imageData.data());
You need to use continuous memory for it to work with opengl.
My solution is inspired by previous answers, with a different indexing system
unsigned char* imageData = new unsigned char[width*height*3];
unsigned char r, g, b;
const unsigned int row_size_bytes = width * 3;
for( unsigned int x = 0; x < width; x++ ) {
unsigned int current_row_offset_bytes = x * 3;
for( unsigned int y = 0; y < height; y++ ) {
unsigned int one_dim_offset = y * row_size_bytes + current_row_offset_bytes
unsigned char* pixel = &(imageData[one_dim_offset]);
pixel[0] = r;
pixel[1] = g;
pixel[2] = b;
}
}
Unfortunnately it's untested, but i'm confident assuming sizeof(char) is 1.

CImg: Image binarization result fails

So, the problem in my following code is that the result of the image binarization becomes too dark. (There was even an example image I have whose binary image becomes wholly black.)
I have been searching any mistake in my code for a very long time, and have found none that seemingly looks problematic to me.
Below is the image I want to binarize:
Image before binarized - in my code is named: "hildebrantmed.bmp"
Below is the resulting binary image:
Image after binarized
Before I show you my source code, here are the 'rules' in the image binarization (since this is an assignment I recently got):
I am not allowed to use any other libraries than CImg.
The programming language to use is C/C++. Not any other else.
Normally, the Otsu's method is the choice. However, I may be allowed to use other algorithms if it is better.
Lastly, here is my source code:
#include <iostream>
#include <CImg.h>
using namespace std;
using namespace cimg_library;
/**
* Generate histogram of the grayscale image
*/
int * generate_histogram(CImg<unsigned char> img)
{
int histogram[256];
// initialize default values for histogram
for (int i = 0; i < 256; i++)
{
histogram[i] = 0;
}
// increment intensity for histogram
for (int i = 0; i < img.height(); i++)
{
for (int j = 0; j < img.width(); j++)
{
int gray_value = img(j, i, 0, 0);
histogram[gray_value]++;
}
}
return histogram;
}
/**
* Find threshold value from the grayscale image's histogram
*/
int otsu_threshold(CImg<unsigned char> img)
{
int * histogram = generate_histogram(img); // image histogram
int total = img.width() * img.height(); // total pixels
double sum = 0;
int i;
for (i = 0; i < 256; i++)
{
sum += i * histogram[i];
}
double sumB = 0;
int wB = 0;
int wF = 0;
double var_max = 0;
int threshold = 0;
for (i = 0; i < 256; i++)
{
wB += histogram[i];
if (wB == 0) continue;
wF = total - wB;
if (wF == 0) continue;
sumB += (double)(i * histogram[i]);
double mB = sumB / wB;
double mF = (sum - sumB) / wF;
double var_between = (double)wB * (double)wF * (mB - mF) * (mB - mF);
if (var_between > var_max)
{
var_max = var_between;
threshold = i;
}
}
return threshold;
}
/**
* Main function
*/
int main(int argc, char * argv[])
{
// retrieve image from its path
CImg<unsigned char> img("hildebrantmed.bmp");
const int width = img.width();
const int height = img.height();
// initialize a new image for img's grayscale
CImg<unsigned char> gray_img(width, height, 1, 1, 0);
// from RGB divided into three separate channels
CImg<unsigned char> imgR(width, height, 1, 3, 0);
CImg<unsigned char> imgG(width, height, 1, 3, 0);
CImg<unsigned char> imgB(width, height, 1, 3, 0);
// for all (x, y) pixels in image
cimg_forXY(img, x, y)
{
imgR(x, y, 0, 0) = img(x, y, 0, 0),
imgG(x, y, 0, 1) = img(x, y, 0, 1),
imgB(x, y, 0, 2) = img(x, y, 0, 2);
// separate the channels
int R = (int)img(x, y, 0, 0);
int G = (int)img(x, y, 0, 1);
int B = (int)img(x, y, 0, 2);
// obtain gray value from different weights of RGB channels
int gray_value = (int)(0.299 * R + 0.587 * G + 0.114 * B);
gray_img(x, y, 0, 0) = gray_value;
}
// find threshold of grayscale image
int threshold = otsu_threshold(gray_img);
// initialize a binary image version of img
CImg<unsigned char> binary_img(width, height, 1, 1, 0);
// for every (x, y) pixel in gray_img
cimg_forXY(img, x, y)
{
int gray_value = gray_img(x, y, 0, 0);
// COMPARE gray_value with threshold
int binary_value;
// gray_value > threshold: 255 (white)
if (gray_value > threshold) binary_value = 255;
// gray_value < threshold: 0 (black)
else binary_value = 0;
// assign binary_value to each of binary_img's pixels
binary_img(x, y, 0, 0) = binary_value;
}
// display the images
CImgDisplay src_disp(img, "Source image");
CImgDisplay gray_disp(gray_img, "Grayscale image");
CImgDisplay binary_disp(binary_img, "Binary image");
while (!src_disp.is_closed() && !gray_disp.is_closed() && !binary_disp.is_closed())
{
src_disp.wait();
gray_disp.wait();
}
return 0;
}
If you find that another algorithm would work better, please provide with the algorithm and source code in your answer. Thanks for your attention.
First error: you're trying to return an array's pointer which actually gets destroyed as soon as the generate_histogram function ends.
To make it work properly, you should supply the pointer to an array from the calling function, something like:
{
//[....]
int histogram[256];
generate_histogram(img, histogram);
//[....]
}
int * generate_histogram(CImg<unsigned char> img, int* arHistogram)
{
//[....]
}

Parse a tile map into efficient horizontal and vertical bounding boxes

I'm working on a simple platformer game in C++, everything is working great except i want to group tiles into bounding boxes in order to have less passes of the physics function(i'm targeting both PC's and embedded devices so its all about speed).
What the following function does is load a simple map format:
"[char *header][char bytesize][bytesize w][bytesize h][char tw][char th][char *map]"
It then reads the array as a 2d map and groups vertical lines when possible.
The question is... could someone show me how to group these tiles into efficient bounding boxes on both vertical and horizontal axis?
Heres an image to help explain, i know my grammar and spelling is terrible: http://thetooth.name/dev/blocks_bic.png
void Environment::load_map(char* mapPath){
cl("Loading Map: %s ", mapPath);
FILE* mapFile = fopen(mapPath, "rb");
FILE* mapInfoFile = fopen(strcat(substr(mapPath, 0, strlen(mapPath)-3), "bmd"), "rb");
if (mapFile == NULL || mapInfoFile == NULL)
{
cl("[ERROR]\n");
throw KLGLException("Error loading map file!");
return;
}
size_t wordSize;
char tBuffer[8] = {};
int w = 0;
int h = 0;
int tileWidth = 0;
int tileHeight = 0;
fread(tBuffer, 1, 7, mapFile);
if (strcmp(tBuffer, "STME1.0"))
{
cl("[BADHEADER]");
}
fill_n(tBuffer, 8, NULL);
fread(tBuffer, 1, 1, mapFile);
if (!strcmp(tBuffer, "B"))
{
wordSize = sizeof(char);
}else{
wordSize = sizeof(int);
}
fseek(mapFile, 8, SEEK_SET);
fread(&w, wordSize, 1, mapFile);
fread(&h, wordSize, 1, mapFile);
fread(&tileWidth, 1, 1, mapFile);
fread(&tileHeight, 1, 1, mapFile);
#define lvLookup y*w+x
fill_n(mapData, (w*h)+1, '\0');
fill_n(mapMask, (w*h)+1, '\0');
// Read files into memory... back to front and inside out...
for(int y = 0; y < h; y++){
for(int x = 0; x < w; x++){
fread(&mapData[lvLookup], 1, 1, mapFile);
fread(&mapMask[lvLookup], 1, 1, mapInfoFile);
}
}
fclose(mapFile);
fclose(mapInfoFile);
// Parse map data into are geometry vectors
for(int x = 0; x < w; x++){
for(int y = 0; y < h; y++){
if(mapData[lvLookup] > 0){
int xl = x;
int yl = y;
while(mapData[yl*w+x] != 0/* && mapMask[yl*w+x] == 0*/){
yl++;
}
platforms->push_back(Platform(x*tileWidth, y*tileHeight, 1*tileWidth, (yl-y)*tileHeight, lvLookup, mapData, mapMask));
y = yl-1;
}
}
}
cl("[OK]\n");
}
Thanks in advanced!
This is a little ugly, but I think it should work as a starting point. It works by scanning left to right, top to bottom looking for to find whole columns that are alike. The challenging part is keeping track of consecutive complete vertical blocks and outputting "partial" runs correctly.
void find_bounds()
{
int startx = 0;
char last = mapData[0];
for (int x = 0; x < w; x++) {
int starty = 0;
for (int y = 0; y < h; y++) {
char c = mapData[x+y*w];
if (c != last) {
if (starty == 0) {
// finish last run of complete vertical blocks
if(startx != x) {
// it ran more than one column, output those first
span(last, startx, 0, x-1, h-1);
startx = x;
}
// and a run from the start of this column
span(last, x, 0, x, y);
} else {
// a run within a column
span(last, x, starty, x, y);
}
last = c;
starty = y;
}
}
// had a run withing this column or processing last column, finish it up
if (starty || x == w-1) {
span(last, x, starty, x, h-1);
startx= x + 1;
}
}
}
Test-suite (the first two should correspond to the second and third cases from your illustration):
#include <iostream>
#include <vector>
using namespace std;
const int w = 8, h = 8;
char mapData[w*h+1] =
#if 1
"xxxxxxxx"
"xxxxxxxx"
"xxxxxxxx"
" xxxxxxx"
"xxxxxxxx"
"xxxxxxxx"
"xxxxxxxx"
"xxxxxxxx"
#elif 0
"xxxxxxxx"
"xxxxxxxx"
"xxx xxxx"
"xxxxxxxx"
"xxxxxxxx"
"xxxxxxxx"
"xxxxxxxx"
"xxxxxxxx"
#else
"xxxxxxxx"
"xx xxxxx"
"xxxxxxxx"
"xx xxxxx"
"xxxx xxx"
"xxxxxxxx"
"xxxxxxxx"
"xxxxxxxx"
#endif
;
void span(char type, int x0, int y0, int x1, int y1)
{
if (!(x0 == x1 && y0 == y1))
cout << type << " " << x0 << ", " << y0 << " -> " << x1 << ", " << y1 << "\n";
}
int main()
{
find_bounds();
}