I am trying to generate a single UV-texture map in the format of the SURREAL dataset. There is a notebook in the original DensePose repository that discusses how to apply texture transfer using an image from SMPL: github.com/facebookresearch/DensePose/blob/master/notebooks/DensePose-RCNN-Texture-Transfer.ipynb
However, in this case I am trying to use the outputs we get from DensePose directly:
In dump mode, I get the uv coordinates in data[0]['pred_densepose'][0].uv with dimensions: torch.Size([2, 1098, 529])
I overlayed the output from running inference on an image with dp_u,dp_v visualization on a black background. Here is the link to the image: https://densepose.s3.amazonaws.com/test1uv.0001.png
This is the command I used to get this inference: python3 apply_net.py show configs/densepose_rcnn_R_101_FPN_DL_WC2M_s1x.yaml model_final_de6e7a.pkl input.jpg dp_u,dp_v -v --output output.png
This is the link to the original image: https://densepose.s3.amazonaws.com/02_1_front.jpg
Using these components, I am trying to generate the 24 part uv texture map in the same format as SMPL:
https://densepose.s3.amazonaws.com/extracted_smpl_texture_apprearance.png
https://densepose.s3.amazonaws.com/texture_from_SURREAL.png
It would be extremely helpful if someone can share how to solve this problem. Please let me know if additional information is needed.
I don't know if the problem still persists or you were able to find a solution. In case that anyone else would challenge the same issues, here is my solution. I put together several different codes and ideas from official github issue page for densepose (https://github.com/facebookresearch/DensePose/issues/68).
I assume that we already have output of apply_net.py utility from github denspose repository. From your post it is a data output (one you were able to obtain data[0]['pred_densepose'][0].uv from).
Let's do some coding:
import copy
import cv2
import matplotlib
import numpy as np
from matplotlib import pyplot as plt
matplotlib.use('TkAgg')
# I assume the data are stored in pickle, and you are able to read them
results = data[0]
IMAGE_FILE = 'path/to/image.png'
def parse_iuv(result):
i = result['pred_densepose'][0].labels.cpu().numpy().astype(float)
uv = (result['pred_densepose'][0].uv.cpu().numpy() * 255.0).astype(float)
iuv = np.stack((uv[1, :, :], uv[0, :, :], i))
iuv = np.transpose(iuv, (1, 2, 0))
return iuv
def parse_bbox(result):
return result["pred_boxes_XYXY"][0].cpu().numpy()
def concat_textures(array):
texture = []
for i in range(4):
tmp = array[6 * i]
for j in range(6 * i + 1, 6 * i + 6):
tmp = np.concatenate((tmp, array[j]), axis=1)
texture = tmp if len(texture) == 0 else np.concatenate((texture, tmp), axis=0)
return texture
def interpolate_tex(tex):
# code is adopted from https://github.com/facebookresearch/DensePose/issues/68
valid_mask = np.array((tex.sum(0) != 0) * 1, dtype='uint8')
radius_increase = 10
kernel = np.ones((radius_increase, radius_increase), np.uint8)
dilated_mask = cv2.dilate(valid_mask, kernel, iterations=1)
region_to_fill = dilated_mask - valid_mask
invalid_region = 1 - valid_mask
actual_part_max = tex.max()
actual_part_min = tex.min()
actual_part_uint = np.array((tex - actual_part_min) / (actual_part_max - actual_part_min) * 255, dtype='uint8')
actual_part_uint = cv2.inpaint(actual_part_uint.transpose((1, 2, 0)), invalid_region, 1,
cv2.INPAINT_TELEA).transpose((2, 0, 1))
actual_part = (actual_part_uint / 255.0) * (actual_part_max - actual_part_min) + actual_part_min
# only use dilated part
actual_part = actual_part * dilated_mask
return actual_part
def get_texture(im, iuv, bbox, tex_part_size=200):
# this part of code creates iuv image which corresponds
# to the size of original image (iuv from densepose is placed
# within pose bounding box).
im = im.transpose(2, 1, 0) / 255
image_w, image_h = im.shape[1], im.shape[2]
bbox[2] = bbox[2] - bbox[0]
bbox[3] = bbox[3] - bbox[1]
x, y, w, h = [int(v) for v in bbox]
bg = np.zeros((image_h, image_w, 3))
bg[y:y + h, x:x + w, :] = iuv
iuv = bg
iuv = iuv.transpose((2, 1, 0))
i, u, v = iuv[2], iuv[1], iuv[0]
# following part of code iterate over parts and creates textures
# of size `tex_part_size x tex_part_size`
n_parts = 24
texture = np.zeros((n_parts, 3, tex_part_size, tex_part_size))
for part_id in range(1, n_parts + 1):
generated = np.zeros((3, tex_part_size, tex_part_size))
x, y = u[i == part_id], v[i == part_id]
# transform uv coodrinates to current UV texture coordinates:
tex_u_coo = (x * (tex_part_size - 1) / 255).astype(int)
tex_v_coo = (y * (tex_part_size - 1) / 255).astype(int)
# clipping due to issues encountered in denspose output;
# for unknown reason, some `uv` coos are out of bound [0, 1]
tex_u_coo = np.clip(tex_u_coo, 0, tex_part_size - 1)
tex_v_coo = np.clip(tex_v_coo, 0, tex_part_size - 1)
# write corresponding pixels from original image to UV texture
# iterate in range(3) due to 3 chanels
for channel in range(3):
generated[channel][tex_v_coo, tex_u_coo] = im[channel][i == part_id]
# this part is not crucial, but gives you better results
# (texture comes out more smooth)
if np.sum(generated) > 0:
generated = interpolate_tex(generated)
# assign part to final texture carrier
texture[part_id - 1] = generated[:, ::-1, :]
# concatenate textures and create 2D plane (UV)
tex_concat = np.zeros((24, tex_part_size, tex_part_size, 3))
for i in range(texture.shape[0]):
tex_concat[i] = texture[i].transpose(2, 1, 0)
tex = concat_textures(tex_concat)
return tex
iuv = parse_iuv(results)
bbox = parse_bbox(results)
image = cv2.imread(IMAGE_FILE)[:, :, ::-1]
uv_texture = get_texture(image, iuv, bbox)
# plot texture or do whatever you like
plt.imshow(uv_texture)
plt.show()
Enjoy
Related
I am trying to implement below architecture and not sure in applying gradient tape properly.
In the above architecture we can see, outputs taken from multiple layers in the blue boxes. Each blue box is termed as loss branch in the paper which contains two losses namely cross entropy and l2 loss. I wrote architecture in tensorflow 2 and using gradient tape for custom training purpose. One thing I am not sure is how should I update the losses using gradient tape.
I have two queries,
How am I supposed to use gradient tape for multiple losses in this scenario. I am interested in seeing code!
For instance, consider the 3rd blue box(3rd loss branch) in the above image, where we will take inputs from conv 13 layer and get two outputs, one for classification and other for regression.
So after computing the losses how I am supposed to update the weights, should I update all the layers above(from conv 1 to conv 13) or should I only update the layers weights which fetched me conv 13 (conv 11, 12 and 13).
I am also attaching a link where I posted a question yesterday in detail.
Below is the snippet which I have tried for gradient descent. Please correct me if I am wrong.
images = batch.data[0]
images = (images - 127.5) / 127.5
targets = batch.label
with tensorflow.GradientTape() as tape:
outputs = self.net(images)
loss = self.loss_criterion(outputs, targets)
self.scheduler(i, self.optimizer)
grads = tape.gradient(loss, self.net.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.net.trainable_variables))
Below is the code for custom loss function which is used as loss_criterion above.
losses = []
for i in range(self.num_output_scales):
pred_score = outputs[i * 2]
pred_bbox = outputs[i * 2 + 1]
gt_mask = targets[i * 2]
gt_label = targets[i * 2 + 1]
pred_score_softmax = tensorflow.nn.softmax(pred_score, axis=1)
loss_mask = tensorflow.ones(pred_score_softmax.shape, tensorflow.float32)
if self.hnm_ratio > 0:
pos_flag = (gt_label[:, 0, :, :] > 0.5)
pos_num = tensorflow.math.reduce_sum(tensorflow.cast(pos_flag, dtype=tensorflow.float32))
if pos_num > 0:
neg_flag = (gt_label[:, 1, :, :] > 0.5)
neg_num = tensorflow.math.reduce_sum(tensorflow.cast(neg_flag, dtype=tensorflow.float32))
neg_num_selected = min(int(self.hnm_ratio * pos_num), int(neg_num))
neg_prob = tensorflow.where(neg_flag, pred_score_softmax[:, 1, :, :], \
tensorflow.zeros_like(pred_score_softmax[:, 1, :, :]))
neg_prob_sort = tensorflow.sort(tensorflow.reshape(neg_prob, shape=(1, -1)), direction='ASCENDING')
prob_threshold = neg_prob_sort[0][int(neg_num_selected)]
neg_grad_flag = (neg_prob <= prob_threshold)
loss_mask = tensorflow.concat([tensorflow.expand_dims(pos_flag, axis=1),
tensorflow.expand_dims(neg_grad_flag, axis=1)], axis=1)
else:
neg_choice_ratio = 0.1
neg_num_selected = int(tensorflow.cast(tensorflow.size(pred_score_softmax[:, 1, :, :]), dtype=tensorflow.float32) * 0.1)
neg_prob = pred_score_softmax[:, 1, :, :]
neg_prob_sort = tensorflow.sort(tensorflow.reshape(neg_prob, shape=(1, -1)), direction='ASCENDING')
prob_threshold = neg_prob_sort[0][int(neg_num_selected)]
neg_grad_flag = (neg_prob <= prob_threshold)
loss_mask = tensorflow.concat([tensorflow.expand_dims(pos_flag, axis=1),
tensorflow.expand_dims(neg_grad_flag, axis=1)], axis=1)
pred_score_softmax_masked = tensorflow.where(loss_mask, pred_score_softmax,
tensorflow.zeros_like(pred_score_softmax, dtype=tensorflow.float32))
pred_score_log = tensorflow.math.log(pred_score_softmax_masked)
score_cross_entropy = - tensorflow.where(loss_mask, gt_label[:, :2, :, :],
tensorflow.zeros_like(gt_label[:, :2, :, :], dtype=tensorflow.float32)) * pred_score_log
loss_score = tensorflow.math.reduce_sum(score_cross_entropy) /
tensorflow.cast(tensorflow.size(score_cross_entropy), tensorflow.float32)
mask_bbox = gt_mask[:, 2:6, :, :]
predict_bbox = pred_bbox * mask_bbox
label_bbox = gt_label[:, 2:6, :, :] * mask_bbox
# l2 loss of boxes
# loss_bbox = tensorflow.math.reduce_sum(tensorflow.nn.l2_loss((label_bbox - predict_bbox)) ** 2) / 2
loss_bbox = mse(label_bbox, predict_bbox) / tensorflow.math.reduce_sum(mask_bbox)
# Adding only losses relevant to a branch and sending them for back prop
losses.append(loss_score + loss_bbox)
# losses.append(loss_bbox)
# Adding all losses and sending to back prop Approach 1
# loss_cls += loss_score
# loss_reg += loss_bbox
# loss_branch.append(loss_score)
# loss_branch.append(loss_bbox)
# loss = loss_cls + loss_reg
return losses
I am not getting any error but my losses aren't minimizing. Here is the log for my training.
Someone please help me in fixing this.
I'd like to understand why, when I convert the PIL image imageRGB to a float array arrayRGB_f and use matplotlib's imshow() without a cmap it looks either black, or strange and unreadable, even though PIL's imageRGB.show() looks fine, and each of the individual r, g, b channels shown with cmap='gray' look okay as well.
I have workarounds, but I just don't understand why this happens.
matplotlib.__version__ returns '2.0.2' and I'm using MacOS with an Anaconda installation.
See this answer for more on the conversion of a ttf rendering to a 1bit.
fyi the output of the print statements are:
float64 (41, 101, 3)
int64 (41, 101, 3)
int64 (41, 101)
int64 (41, 101)
fontname = 'default'
imageRGB.show()
plt.imshow()
fontname = 'Arial Unicode.ttf'
imageRGB.show()
plt.imshow()
font = ImageFont.truetype(fontname, 20)
imageRGB.show()
plt.imshow()
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import matplotlib.pyplot as plt
# fontname = 'Arial Unicode.ttf'
fontname = 'default'
if fontname == 'default':
font = ImageFont.load_default()
else:
font = ImageFont.truetype(fontname, 12)
string = "Hello " + fontname[:6]
ww, hh = 101, 41
threshold = 80 # https://stackoverflow.com/a/47546095/3904031
imageRGB = Image.new('RGB', (ww, hh))
draw = ImageDraw.Draw(imageRGB)
image8bit = draw.text((10, 12), string, font=font,
fill=(255, 255, 255, 255)) # R, G, B alpha
image8bit = imageRGB.convert("L")
image1bit = image8bit.point(lambda x: 0 if x < threshold else 1, mode='1') # https://stackoverflow.com/a/47546095/3904031
arrayRGB = np.array(list(imageRGB.getdata())).reshape(hh, ww, 3)
arrayRGB_f = arrayRGB.astype(float)
array8bit = np.array(list(image8bit.getdata())).reshape(hh, ww)
array1bit = np.array(list(image1bit.getdata())).reshape(hh, ww)
for a in (arrayRGB_f, arrayRGB, array8bit, array1bit):
print a.dtype, a.shape
imageRGB.show()
if True:
plt.figure()
a = arrayRGB_f
plt.subplot(2, 2, 1)
plt.imshow(a) # , interpolation='nearest', cmap='gray',
for i in range(3):
plt.subplot(2, 2, 2+i)
plt.imshow(a[:, :, i], cmap='gray')
plt.suptitle('arrayRGB_f, fontname = ' + fontname)
plt.show()
I can't find an ideal duplicate so I'll post an answer.
As #ImportanceOfBeingErnest mentions when .imshow() is given an n x m x 3 or n x m x 4 array, it is expecting a normalized array between 0.0 and 1.0.
Best way to do this is:
arrayRGB_f = arrayRGB.astype(float)/255.
though this seems to work as well:
arrayRGB_f = arrayRGB.astype(float)
arrayRGB_f = arrayRGB_f / arrayRGB_f.max()
For longer discussions, see this and this.
I would like to know how to save images captured by Opencv, in a directory created by code that should stay on the server ...**
import cv2, sys, numpy, os
haar_file = 'haarcascade_frontalface_default.xml'
datasets = 'datasets' #All the faces data will be present this folder
sub_data = raw_input ('digite o seu nome') #These are sub data sets of folder, for my faces I've used my name
path = os.path.join(datasets, sub_data)
if not os.path.isdir(path):
os.mkdir(path)
(width, height) = (130, 100) # defining the size of images
face_cascade = cv2.CascadeClassifier(haar_file)
webcam = cv2.VideoCapture(0) #'0' is use for my webcam, if you've any other camera attached use '1' like this
# The program loops until it has 100 images of the face.
count = 1
while count < 101:
(_, im) = webcam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 4)
for (x,y,w,h) in faces:
cv2.rectangle(im,(x,y),(x+w,y+h),(255,0,0),2)
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (width, height))
cv2.imwrite('%s/%s.png' % (path,count), face_resize)
count += 1
cv2.imshow('OpenCV', im)
key = cv2.waitKey(10)
if key == 27:
break
I have some problems the first one is that I can't update the plot limits of the y axis and the second is that I want to see 6 lines from each sensor, as you can see in the picture I see only one if I make some changes I see all the sensors variations in one line
here is the code where I create the plot and a picture of this:
http://i.imgur.com/ogFoMDJ.png?1
# Flag variables
self.isLogging = False
# Create data buffers
self.N = 70
self.n = range(self.N)
self.M = 6 # just one lead - i.e. 1 number per sample
self.x = 0 * numpy.ones(self.N, numpy.int)
# Data logging file
self.f = 0
# Create plot area and axes
self.x_max = 500
self.x_min = 330
self.fig = Figure(facecolor='#e4e4e4')
self.canvas = FigureCanvasWxAgg(self, -1, self.fig)
self.canvas.SetPosition((330,50))
self.canvas.SetSize((550,280))
self.ax = self.fig.add_axes([0.08,0.1,0.86,0.8])
self.ax.autoscale(False)
self.ax.set_xlim(0, self.N - 1)
self.ax.set_ylim(self.x_min, self.x_max)
self.ax.plot(self.n,self.x)
# Filter taps
self.taps = [0, 0, 0]
# Create timer to read incoming data and scroll plot
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.GetSample, self.timer)
And here is where I grab the data and I try to update the limits of the plot
if len(sample_string) != 6:
sample_string = sample_string[0:-1]
self.taps[1:3] = self.taps[0:2]
self.taps[0] = int(array[1])
#value = 0.5 * self.taps[0] + 0.5 * self.taps[2]
value = self.taps[0]
self.x[0:self.N-1] = self.x[1:]
self.x[self.N-1] = value
# print sample to data logging file
if self.f != 0:
self.f.write(str(value))
self.f.write("\n")
# update plot limits
maxval = max(self.x[:])
minval = min(self.x[:])
self.x_max += ((maxval + 10) - self.x_max) / 100.0
self.x_min -= (self.x_min - (minval - 10)) / 100.0
# Update plot
self.ax.cla()
self.ax.autoscale(False)
self.ax.set_xlim(0, self.N - 1)
self.ax.set_ylim(self.x_min, self.x_max)
self.ax.plot(self.n, self.x)
self.canvas.draw()
if b7 == True:
self.textctrl0.Clear()
self.textctrl0.AppendText(array[1])
self.textctrl1.Clear()
self.textctrl1.AppendText(array[2])
self.textctrl2.Clear()
self.textctrl2.AppendText(array[3])
self.textctrl3.Clear()
self.textctrl3.AppendText(array[4])
self.textctrl4.Clear()
self.textctrl4.AppendText(array[5])
self.textctrl5.Clear()
self.textctrl5.AppendText(array[6])
b7=False
p.s I removed the faulty code where I tried to add the other sensors,here is only the working code for the one sensor plot..
Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 9 years ago.
Improve this question
I'm new to Openstreetmap and mapnick,
I'm trying to export map image which will be geo-referenced
(So it can be used in other applications)
I've installed osm and mapnik inside ubuntu virtual machine
I've tried using generate_image.py script, but generated image is not equal to the bounding box. My python knowledge is not good enough for me to fix the script.
I've also tried using nik2img.py script using verbose mode, for example:
nik2img.py osm.xml sarajevo.png --srs 900913 --bbox 18.227 43.93 18.511 43.765 --dimensions 10000 10000
and tried using the log bounding box
Step: 11 // --> Map long/lat bbox: Envelope(18.2164733537,43.765,18.5215266463,43.93)
Unfortunately generated image is not equal to the bounding box :(
How can I change scripts so I can georeference generated image?
Or do you know an easier way to accomplish this task?
Image i'm getting using the http://www.openstreetmap.org/ export is nicely geo-referenced, but it's not big enough :(
I've managed to change generate_tiles.py to generate 1024x1024 images together with correct bounding box
Changed script is available bellow
#!/usr/bin/python
from math import pi,cos,sin,log,exp,atan
from subprocess import call
import sys, os
from Queue import Queue
import mapnik
import threading
DEG_TO_RAD = pi/180
RAD_TO_DEG = 180/pi
# Default number of rendering threads to spawn, should be roughly equal to number of CPU cores available
NUM_THREADS = 4
def minmax (a,b,c):
a = max(a,b)
a = min(a,c)
return a
class GoogleProjection:
def __init__(self,levels=18):
self.Bc = []
self.Cc = []
self.zc = []
self.Ac = []
c = 1024
for d in range(0,levels):
e = c/2;
self.Bc.append(c/360.0)
self.Cc.append(c/(2 * pi))
self.zc.append((e,e))
self.Ac.append(c)
c *= 2
def fromLLtoPixel(self,ll,zoom):
d = self.zc[zoom]
e = round(d[0] + ll[0] * self.Bc[zoom])
f = minmax(sin(DEG_TO_RAD * ll[1]),-0.9999,0.9999)
g = round(d[1] + 0.5*log((1+f)/(1-f))*-self.Cc[zoom])
return (e,g)
def fromPixelToLL(self,px,zoom):
e = self.zc[zoom]
f = (px[0] - e[0])/self.Bc[zoom]
g = (px[1] - e[1])/-self.Cc[zoom]
h = RAD_TO_DEG * ( 2 * atan(exp(g)) - 0.5 * pi)
return (f,h)
class RenderThread:
def __init__(self, tile_dir, mapfile, q, printLock, maxZoom):
self.tile_dir = tile_dir
self.q = q
self.m = mapnik.Map(1024, 1024)
self.printLock = printLock
# Load style XML
mapnik.load_map(self.m, mapfile, True)
# Obtain <Map> projection
self.prj = mapnik.Projection(self.m.srs)
# Projects between tile pixel co-ordinates and LatLong (EPSG:4326)
self.tileproj = GoogleProjection(maxZoom+1)
def render_tile(self, tile_uri, x, y, z):
# Calculate pixel positions of bottom-left & top-right
p0 = (x * 1024, (y + 1) * 1024)
p1 = ((x + 1) * 1024, y * 1024)
# Convert to LatLong (EPSG:4326)
l0 = self.tileproj.fromPixelToLL(p0, z);
l1 = self.tileproj.fromPixelToLL(p1, z);
# Convert to map projection (e.g. mercator co-ords EPSG:900913)
c0 = self.prj.forward(mapnik.Coord(l0[0],l0[1]))
c1 = self.prj.forward(mapnik.Coord(l1[0],l1[1]))
# Bounding box for the tile
if hasattr(mapnik,'mapnik_version') and mapnik.mapnik_version() >= 800:
bbox = mapnik.Box2d(c0.x,c0.y, c1.x,c1.y)
else:
bbox = mapnik.Envelope(c0.x,c0.y, c1.x,c1.y)
render_size = 1024
self.m.resize(render_size, render_size)
self.m.zoom_to_box(bbox)
self.m.buffer_size = 128
# Render image with default Agg renderer
im = mapnik.Image(render_size, render_size)
mapnik.render(self.m, im)
im.save(tile_uri, 'png256')
print "Rendered: ", tile_uri, "; ", l0 , "; ", l1
# Write geo coding informations
file = open(tile_uri[:-4] + ".tab", 'w')
file.write("!table\n")
file.write("!version 300\n")
file.write("!charset WindowsLatin2\n")
file.write("Definition Table\n")
file.write(" File \""+tile_uri[:-4]+".jpg\"\n")
file.write(" Type \"RASTER\"\n")
file.write(" ("+str(l0[0])+","+str(l1[1])+") (0,0) Label \"Pt 1\",\n")
file.write(" ("+str(l1[0])+","+str(l1[1])+") (1023,0) Label \"Pt 2\",\n")
file.write(" ("+str(l1[0])+","+str(l0[1])+") (1023,1023) Label \"Pt 3\",\n")
file.write(" ("+str(l0[0])+","+str(l0[1])+") (0,1023) Label \"Pt 4\"\n")
file.write(" CoordSys Earth Projection 1, 104\n")
file.write(" Units \"degree\"\n")
file.close()
def loop(self):
while True:
#Fetch a tile from the queue and render it
r = self.q.get()
if (r == None):
self.q.task_done()
break
else:
(name, tile_uri, x, y, z) = r
exists= ""
if os.path.isfile(tile_uri):
exists= "exists"
else:
self.render_tile(tile_uri, x, y, z)
bytes=os.stat(tile_uri)[6]
empty= ''
if bytes == 103:
empty = " Empty Tile "
self.printLock.acquire()
print name, ":", z, x, y, exists, empty
self.printLock.release()
self.q.task_done()
def render_tiles(bbox, mapfile, tile_dir, minZoom=1,maxZoom=18, name="unknown", num_threads=NUM_THREADS):
print "render_tiles(",bbox, mapfile, tile_dir, minZoom,maxZoom, name,")"
# Launch rendering threads
queue = Queue(32)
printLock = threading.Lock()
renderers = {}
for i in range(num_threads):
renderer = RenderThread(tile_dir, mapfile, queue, printLock, maxZoom)
render_thread = threading.Thread(target=renderer.loop)
render_thread.start()
#print "Started render thread %s" % render_thread.getName()
renderers[i] = render_thread
if not os.path.isdir(tile_dir):
os.mkdir(tile_dir)
gprj = GoogleProjection(maxZoom+1)
ll0 = (bbox[0],bbox[3])
ll1 = (bbox[2],bbox[1])
for z in range(minZoom,maxZoom + 1):
px0 = gprj.fromLLtoPixel(ll0,z)
px1 = gprj.fromLLtoPixel(ll1,z)
# check if we have directories in place
zoom = "%s" % z
if not os.path.isdir(tile_dir + zoom):
os.mkdir(tile_dir + zoom)
for x in range(int(px0[0]/1024.0),int(px1[0]/1024.0)+1):
# Validate x co-ordinate
if (x < 0) or (x >= 2**z):
continue
# check if we have directories in place
str_x = "%s" % x
if not os.path.isdir(tile_dir + zoom + '/' + str_x):
os.mkdir(tile_dir + zoom + '/' + str_x)
for y in range(int(px0[1]/1024.0),int(px1[1]/1024.0)+1):
# Validate x co-ordinate
if (y < 0) or (y >= 2**z):
continue
str_y = "%s" % y
tile_uri = tile_dir + zoom + '_' + str_x + '_' + str_y + '.png'
# Submit tile to be rendered into the queue
t = (name, tile_uri, x, y, z)
queue.put(t)
# Signal render threads to exit by sending empty request to queue
for i in range(num_threads):
queue.put(None)
# wait for pending rendering jobs to complete
queue.join()
for i in range(num_threads):
renderers[i].join()
if __name__ == "__main__":
home = os.environ['HOME']
try:
mapfile = "/home/emir/bin/mapnik/osm.xml" #os.environ['MAPNIK_MAP_FILE']
except KeyError:
mapfile = "/home/emir/bin/mapnik/osm.xml"
try:
tile_dir = os.environ['MAPNIK_TILE_DIR']
except KeyError:
tile_dir = home + "/osm/tiles/"
if not tile_dir.endswith('/'):
tile_dir = tile_dir + '/'
#-------------------------------------------------------------------------
#
# Change the following for different bounding boxes and zoom levels
#
#render sarajevo at 16 zoom level
bbox = (18.256, 43.785, 18.485, 43.907)
render_tiles(bbox, mapfile, tile_dir, 16, 16, "World")
Try Maperitive's export-bitmap command, it generates various georeferencing sidecar files
(worldfile, KML, OziExplorer .MAP file).