OpenCV python module error - python-2.7

The code is for face detection and recognition with the help of haar cascade algorithm. Running on OpenCV 3.2.0 and Python 2.7. Throwing a Module Error
Is there anybody who can help me out, it will be big help for me. Thank you.
# facerec.py
import cv2, sys, numpy, os
size = 1
fn_haar = 'haarcascade_frontalface_default.xml'
fn_dir = 'att_faces'
# Part 1: Create fisherRecognizer
print('Training...')
# Create a list of images and a list of corresponding names
(images, lables, names, id) = ([], [], {}, 0)
# Get the folders containing the training data
for (subdirs, dirs, files) in os.walk(fn_dir):
# Loop through each folder named after the subject in the photos
for subdir in dirs:
names[id] = subdir
subjectpath = os.path.join(fn_dir, subdir)
# Loop through each photo in the folder
for filename in os.listdir(subjectpath):
# Skip non-image formates
f_name, f_extension = os.path.splitext(filename)
if(f_extension.lower() not in
['.png','.jpg','.jpeg','.gif','.pgm']):
print("Skipping "+filename+", wrong file type")
continue
path = subjectpath + '/' + filename
lable = id
# Add to training data
images.append(cv2.imread(path, 0))
lables.append(int(lable))
id += 1
(im_width, im_height) = (112, 92)
# Create a Numpy array from the two lists above
(images, lables) = [numpy.array(lis) for lis in [images, lables]]
# OpenCV trains a model from the images
# NOTE FOR OpenCV2: remove '.face'
model = cv2.face.createFisherFaceRecognizer()
model.train(images, lables)
# Part 2: Use fisherRecognizer on camera stream
haar_cascade = cv2.CascadeClassifier(fn_haar)
webcam = cv2.VideoCapture(0)
while True:
# Loop until the camera is working
rval = False
while(not rval):
# Put the image from the webcam into 'frame'
(rval, frame) = webcam.read()
if(not rval):
print("Failed to open webcam. Trying again...")
# Flip the image (optional)
frame=cv2.flip(frame,1,0)
# Convert to grayscalel
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Resize to speed up detection (optinal, change size above)
mini = cv2.resize(gray, (int(gray.shape[1] / size), int(gray.shape[0] / size)))
# Detect faces and loop through each one
faces = haar_cascade.detectMultiScale(mini)
for i in range(len(faces)):
face_i = faces[i]
# Coordinates of face after scaling back by `size`
(x, y, w, h) = [v * size for v in face_i]
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (im_width, im_height))
# Try to recognize the face
prediction = model.predict(face_resize)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
# [1]
# Write the name of recognized face
cv2.putText(frame,
'%s - %.0f' % (names[prediction[0]],prediction[1]),
(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
# Show the image and check for ESC being pressed
cv2.imshow('OpenCV', frame)
key = cv2.waitKey(10)
if key == 27:
break

It looks to me like you have not heeded the comment in your own code. The error message tells you that the module cv2 does not define face and the comment seems to warn about the same thing.
# NOTE FOR OpenCV2: remove '.face'
model = cv2.face.createFisherFaceRecognizer()
What happens if you heed the comment and code
model = cv2.createFisherFaceRecognizer()
instead?

I just solve the error by installing opencv_Contrib zip from github.

Related

Find homography for stitching

I’m working on the following task:
I have 6 fisheye cameras and would like to produce a 360 degree stitched image.
After carrying out the calibration procedure with findChessboardCorners, calibrateCamera, I obtained the intrinsic and extrinsic matrix.
Starting from the 6 images with fish-eye effect, through the fisheye.initUndistortRectifyMap function, I obtained the 6 planar images.
The two planar images from above are reported below.
Now I should do the stitching to get a 360 degree image.
I tried to do this using the cv2.createStitcher function, but this doesn’t always work, also I would like to have access to the homography matrix to determine the static matrices of the system.
So I tried to calculate the homography matrix, identifying through the SIFT algorithm, the common keypoints between two images and keeping the keypoints that best match.
I then stitched the two images using the warpPerspective function.
I believe that the procedure is correct up to the calculation of the keypoints, but I do not understand why the final result is not good.
In fact, in an attempt to stitch the second image is completely deformed / changed in perspective with a loss of right image.
Here there is the code:
import cv2
import numpy as np
def cvshow(name, img):
cv2.imshow(name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.destroyAllWindows()
def sift_kp(image):
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
sift = cv2.xfeatures2d.SIFT_create()
kp, des = sift.detectAndCompute(image, None)
kp_image = cv2.drawKeypoints(gray_image, kp, None)
return kp_image, kp, des
def get_good_match(des1, des2):
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2) # des1 is the template image, des2 is the matching image
matches = sorted(matches, key=lambda x: x[0].distance / x[1].distance)
good = []
for m, n in matches:
if m.distance < 0.55 * n.distance:
good.append(m)
return good
def drawMatches(imageA, imageB, kpsA, kpsB, matches, status):
# Initialize the visualization picture, connect the A and B pictures left and right together
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB
# Joint traversal, draw matching pairs
for ((trainIdx, queryIdx), s) in zip(matches, status):
# When the point pair is matched successfully, draw it on the visualization
if s == 1:
# Draw matching pairs
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)
# Return visualization results
return vis
# Panorama stitching
def siftimg_rightlignment(img_right, img_left):
_, kp1, des1 = sift_kp(img_right)
_, kp2, des2 = sift_kp(img_left)
goodMatch = get_good_match(des1, des2)
# When the matching pairs of the filter items are greater than 4 pairs: calculate the perspective transformation matrix
if len(goodMatch) > 4:
# Get the point coordinates of the matching pair
ptsA = np.float32([kp1[m.queryIdx].pt for m in goodMatch]).reshape(-1, 1, 2)
ptsB = np.float32([kp2[m.trainIdx].pt for m in goodMatch]).reshape(-1, 1, 2)
ransacReprojThreshold = 4
H, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, ransacReprojThreshold)
print(H)
#H = np.array([[-3.95002617e-01,-7.49813070e-02, 4.43642683e+02], [-4.06655962e-01,5.27365057e-01, 1.20636875e+02],[-1.60149798e-03, -3.69708507e-05, 1.00000000e+00]])
# The function of this function is to first use RANSAC to select the best four sets of pairing points, and then calculate the H matrix. H is a 3*3 matrix
# Change the angle of view to the right of the picture, result is the transformed picture
result = cv2.warpPerspective(img_right, H, (img_right.shape[1] + img_left.shape[1], img_right.shape[0]))
cvshow('result_medium', result)
# Pass the picture left to the left end of the result picture
result[0:img_left.shape[0], 0:img_left.shape[1]] = img_left
return result
# Feature matching + panoramic stitching
import numpy as np
import cv2
# Read the stitched pictures (note the placement of the left and right pictures)
# Is to transform the graphics on the right
img_left = cv2.imread(r'\planar\0.png')
img_right = cv2.imread(r'\planar\5.png')
img_right = cv2.resize(img_right, None, fx=0.5, fy=0.3)
# Ensure that the two images are the same size
img_left = cv2.resize(img_left, (img_right.shape[1], img_right.shape[0]))
kpimg_right, kp1, des1 = sift_kp(img_right)
kpimg_left, kp2, des2 = sift_kp(img_left)
# Display the original image and the image after key point detection at the same time
cvshow('img_left', np.hstack((img_left, kpimg_left)))
cvshow('img_right', np.hstack((img_right, kpimg_right)))
goodMatch = get_good_match(des1, des2)
all_goodmatch_img = cv2.drawMatches(img_right, kp1, img_left, kp2, goodMatch, None, flags=2)
# goodmatch_img Set the first goodMatch[:10]
goodmatch_img = cv2.drawMatches(img_right, kp1, img_left, kp2, goodMatch[:10], None, flags=2)
cvshow('Keypoint Matches1', all_goodmatch_img)
cvshow('Keypoint Matches2', goodmatch_img)
# Stitch the picture into a panorama
result = siftimg_rightlignment(img_right, img_left)
cvshow('result', result)```

Create UV Texture map from DensePose Output

I am trying to generate a single UV-texture map in the format of the SURREAL dataset. There is a notebook in the original DensePose repository that discusses how to apply texture transfer using an image from SMPL: github.com/facebookresearch/DensePose/blob/master/notebooks/DensePose-RCNN-Texture-Transfer.ipynb
However, in this case I am trying to use the outputs we get from DensePose directly:
In dump mode, I get the uv coordinates in data[0]['pred_densepose'][0].uv with dimensions: torch.Size([2, 1098, 529])
I overlayed the output from running inference on an image with dp_u,dp_v visualization on a black background. Here is the link to the image: https://densepose.s3.amazonaws.com/test1uv.0001.png
This is the command I used to get this inference: python3 apply_net.py show configs/densepose_rcnn_R_101_FPN_DL_WC2M_s1x.yaml model_final_de6e7a.pkl input.jpg dp_u,dp_v -v --output output.png
This is the link to the original image: https://densepose.s3.amazonaws.com/02_1_front.jpg
Using these components, I am trying to generate the 24 part uv texture map in the same format as SMPL:
https://densepose.s3.amazonaws.com/extracted_smpl_texture_apprearance.png
https://densepose.s3.amazonaws.com/texture_from_SURREAL.png
It would be extremely helpful if someone can share how to solve this problem. Please let me know if additional information is needed.
I don't know if the problem still persists or you were able to find a solution. In case that anyone else would challenge the same issues, here is my solution. I put together several different codes and ideas from official github issue page for densepose (https://github.com/facebookresearch/DensePose/issues/68).
I assume that we already have output of apply_net.py utility from github denspose repository. From your post it is a data output (one you were able to obtain data[0]['pred_densepose'][0].uv from).
Let's do some coding:
import copy
import cv2
import matplotlib
import numpy as np
from matplotlib import pyplot as plt
matplotlib.use('TkAgg')
# I assume the data are stored in pickle, and you are able to read them
results = data[0]
IMAGE_FILE = 'path/to/image.png'
def parse_iuv(result):
i = result['pred_densepose'][0].labels.cpu().numpy().astype(float)
uv = (result['pred_densepose'][0].uv.cpu().numpy() * 255.0).astype(float)
iuv = np.stack((uv[1, :, :], uv[0, :, :], i))
iuv = np.transpose(iuv, (1, 2, 0))
return iuv
def parse_bbox(result):
return result["pred_boxes_XYXY"][0].cpu().numpy()
def concat_textures(array):
texture = []
for i in range(4):
tmp = array[6 * i]
for j in range(6 * i + 1, 6 * i + 6):
tmp = np.concatenate((tmp, array[j]), axis=1)
texture = tmp if len(texture) == 0 else np.concatenate((texture, tmp), axis=0)
return texture
def interpolate_tex(tex):
# code is adopted from https://github.com/facebookresearch/DensePose/issues/68
valid_mask = np.array((tex.sum(0) != 0) * 1, dtype='uint8')
radius_increase = 10
kernel = np.ones((radius_increase, radius_increase), np.uint8)
dilated_mask = cv2.dilate(valid_mask, kernel, iterations=1)
region_to_fill = dilated_mask - valid_mask
invalid_region = 1 - valid_mask
actual_part_max = tex.max()
actual_part_min = tex.min()
actual_part_uint = np.array((tex - actual_part_min) / (actual_part_max - actual_part_min) * 255, dtype='uint8')
actual_part_uint = cv2.inpaint(actual_part_uint.transpose((1, 2, 0)), invalid_region, 1,
cv2.INPAINT_TELEA).transpose((2, 0, 1))
actual_part = (actual_part_uint / 255.0) * (actual_part_max - actual_part_min) + actual_part_min
# only use dilated part
actual_part = actual_part * dilated_mask
return actual_part
def get_texture(im, iuv, bbox, tex_part_size=200):
# this part of code creates iuv image which corresponds
# to the size of original image (iuv from densepose is placed
# within pose bounding box).
im = im.transpose(2, 1, 0) / 255
image_w, image_h = im.shape[1], im.shape[2]
bbox[2] = bbox[2] - bbox[0]
bbox[3] = bbox[3] - bbox[1]
x, y, w, h = [int(v) for v in bbox]
bg = np.zeros((image_h, image_w, 3))
bg[y:y + h, x:x + w, :] = iuv
iuv = bg
iuv = iuv.transpose((2, 1, 0))
i, u, v = iuv[2], iuv[1], iuv[0]
# following part of code iterate over parts and creates textures
# of size `tex_part_size x tex_part_size`
n_parts = 24
texture = np.zeros((n_parts, 3, tex_part_size, tex_part_size))
for part_id in range(1, n_parts + 1):
generated = np.zeros((3, tex_part_size, tex_part_size))
x, y = u[i == part_id], v[i == part_id]
# transform uv coodrinates to current UV texture coordinates:
tex_u_coo = (x * (tex_part_size - 1) / 255).astype(int)
tex_v_coo = (y * (tex_part_size - 1) / 255).astype(int)
# clipping due to issues encountered in denspose output;
# for unknown reason, some `uv` coos are out of bound [0, 1]
tex_u_coo = np.clip(tex_u_coo, 0, tex_part_size - 1)
tex_v_coo = np.clip(tex_v_coo, 0, tex_part_size - 1)
# write corresponding pixels from original image to UV texture
# iterate in range(3) due to 3 chanels
for channel in range(3):
generated[channel][tex_v_coo, tex_u_coo] = im[channel][i == part_id]
# this part is not crucial, but gives you better results
# (texture comes out more smooth)
if np.sum(generated) > 0:
generated = interpolate_tex(generated)
# assign part to final texture carrier
texture[part_id - 1] = generated[:, ::-1, :]
# concatenate textures and create 2D plane (UV)
tex_concat = np.zeros((24, tex_part_size, tex_part_size, 3))
for i in range(texture.shape[0]):
tex_concat[i] = texture[i].transpose(2, 1, 0)
tex = concat_textures(tex_concat)
return tex
iuv = parse_iuv(results)
bbox = parse_bbox(results)
image = cv2.imread(IMAGE_FILE)[:, :, ::-1]
uv_texture = get_texture(image, iuv, bbox)
# plot texture or do whatever you like
plt.imshow(uv_texture)
plt.show()
Enjoy

How to add an argument to look up a certain keyword from the same directory

I have a list of images to make a collage named in a certain pattern.
Ex.
yahoo_jp.png
yahoo_us.png
yahoo_uk.png
yahoo_cn.png
All files are in the same directory. Currently, I can only send a command to make a collage out of all images in a folder but what I want to do is to be able to send a certain keyword from the shell command and look up a list of files in the folder with the keywords matching and then making a collage.
Ex. shell command
make_collage.py -o my_collage.png -w 540 -i 840 new argument --> -a "_us"
- when this command is run, it will only make a collage with files containing keyword "_us" only. So the output would be a collage containing only the "_us" images.
import argparse
import os
import random
from PIL import Image
def make_collage(images, filename, width, init_height):
"""
Make a collage image with a width equal to `width` from `images` and save to `filename`.
"""
if not images:
print('No images for collage found!')
return False
margin_size = 2
# run until a suitable arrangement of images is found
while True:
# copy images to images_list
images_list = images[:]
coefs_lines = []
images_line = []
x = 0
while images_list:
# get first image and resize to `init_height`
img_path = images_list.pop(0)
img = Image.open(img_path)
img.thumbnail((width, init_height))
# when `x` will go beyond the `width`, start the next line
if x > width:
coefs_lines.append((float(x) / width, images_line))
images_line = []
x = 0
x += img.size[0] + margin_size
images_line.append(img_path)
# finally add the last line with images
coefs_lines.append((float(x) / width, images_line))
# compact the lines, by reducing the `init_height`, if any with one or less images
if len(coefs_lines) <= 1:
break
if any(map(lambda c: len(c[1]) <= 1, coefs_lines)):
# reduce `init_height`
init_height -= 10
else:
break
# get output height
out_height = 0
for coef, imgs_line in coefs_lines:
if imgs_line:
out_height += int(init_height / coef) + margin_size
if not out_height:
print('Height of collage could not be 0!')
return False
collage_image = Image.new('RGB', (width, int(out_height)), (35, 35, 35))
# put images to the collage
y = 0
for coef, imgs_line in coefs_lines:
if imgs_line:
x = 0
for img_path in imgs_line:
img = Image.open(img_path)
# if need to enlarge an image - use `resize`, otherwise use `thumbnail`, it's faster
k = (init_height / coef) / img.size[1]
if k > 1:
img = img.resize((int(img.size[0] * k), int(img.size[1] * k)), Image.ANTIALIAS)
else:
img.thumbnail((int(width / coef), int(init_height / coef)), Image.ANTIALIAS)
if collage_image:
collage_image.paste(img, (int(x), int(y)))
x += img.size[0] + margin_size
y += int(init_height / coef) + margin_size
collage_image.save(filename)
return True
def main():
# prepare argument parser
parse = argparse.ArgumentParser(description='Photo collage maker')
parse.add_argument('-f', '--folder', dest='folder', help='folder with images (*.jpg, *.jpeg, *.png)', default='.')
parse.add_argument('-o', '--output', dest='output', help='output collage image filename', default='collage.png')
parse.add_argument('-w', '--width', dest='width', type=int, help='resulting collage image width')
parse.add_argument('-i', '--init_height', dest='init_height', type=int, help='initial height for resize the images')
parse.add_argument('-s', '--shuffle', action='store_true', dest='shuffle', help='enable images shuffle')
args = parse.parse_args()
if not args.width or not args.init_height:
parse.print_help()
exit(1)
# get images
files = [os.path.join(args.folder, fn) for fn in os.listdir(args.folder)]
images = [fn for fn in files if os.path.splitext(fn)[1].lower() in ('.jpg', '.jpeg', '.png')]
if not images:
print('No images for making collage! Please select other directory with images!')
exit(1)
# shuffle images if needed
if args.shuffle:
random.shuffle(images)
print('Making collage...')
res = make_collage(images, args.output, args.width, args.init_height)
if not res:
print('Failed to create collage!')
exit(1)
print('Collage is ready!')
if __name__ == '__main__':
main()
The easiest way would be to use glob.glob() along with os.listdir(), although glob() uses bash syntax so you'll need to enter *_us*.
First import it:
from glob import glob
Then add a "pattern" optional positional arg:
parse.add_argument('-p', '--pattern', nargs="?", default=None, help="enter a grep-like expansion.")
Finally, change the line under # get images to something like this:
if args.pattern:
files = [fn for fn in glob(os.path.join(args.folder, args.pattern))]
else:
files = [os.path.join(args.folder, fn) for fn in os.listdir(args.folder)]

How to save images to a server folder via sockets using python and opencv?

I would like to know how to save images captured by Opencv, in a directory created by code that should stay on the server ...**
import cv2, sys, numpy, os
haar_file = 'haarcascade_frontalface_default.xml'
datasets = 'datasets' #All the faces data will be present this folder
sub_data = raw_input ('digite o seu nome') #These are sub data sets of folder, for my faces I've used my name
path = os.path.join(datasets, sub_data)
if not os.path.isdir(path):
os.mkdir(path)
(width, height) = (130, 100) # defining the size of images
face_cascade = cv2.CascadeClassifier(haar_file)
webcam = cv2.VideoCapture(0) #'0' is use for my webcam, if you've any other camera attached use '1' like this
# The program loops until it has 100 images of the face.
count = 1
while count < 101:
(_, im) = webcam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 4)
for (x,y,w,h) in faces:
cv2.rectangle(im,(x,y),(x+w,y+h),(255,0,0),2)
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (width, height))
cv2.imwrite('%s/%s.png' % (path,count), face_resize)
count += 1
cv2.imshow('OpenCV', im)
key = cv2.waitKey(10)
if key == 27:
break

Problems with global variables ((python))

I have a problem with my global variables in my code. In SCRIPT1.py I use many variables from a little document config.py which only contains variables which I also need in other modules of my code. But when running my SCRIPT1.py I get an error (ERROR). I have no idea why it doesn't work with config.(name of variable)... I found this solution to have your variables in all of your modules on stack overflow with a lot of good votes. What am I doing wrong?
First my code contained config.costSurfaceA in stead of costSurfaceArray (for ex in 'def createPath') but when running it with this variable, it gave me a syntax error because of the dot in 'config.costSurfaceA'. I replaced it all by 'costSurfaceArray' and did this in the if statement 'config.costSurfaceA = costSurfaceArray' just to get it as a variable. But I have the feeling this is all to much work for nothing..
Thanks in avance for helping me! I know it is a lot of code but I think it's all important for understanding..
SCRIPT1.py
from osgeo import gdal, osr
from skimage.graph import route_through_array
import numpy as np
import Save_Array_To_Excel_01
import config
def ask_costsurfacepath_path():
config.costsurfacepath = input('please enter the system path where to find the cost-surface-IMG file (ex: /Users/PeterVanvoorden/Documents/GroepT/Thesis/Branched_Testfile.img): ')
def ask_outputpath_path():
config.outputpath = input('please enter the system path where to save the outputpath IMG file (ex: /Users/PeterVanvoorden/Documents/GroepT/Thesis/Branched_Testfile.img): ')
def raster2array(rasterfn):
print 'raster2array'
raster = gdal.Open(rasterfn)
band = raster.GetRasterBand(1)
array = band.ReadAsArray()
return array
def coord2pixelOffset(rasterfn,x,y):
print 'coord2pixelOffset'
raster = gdal.Open(rasterfn)
geotransform = raster.GetGeoTransform()
originX = geotransform[0] # East/West location of Upper Left corner
originY = geotransform[3] # North/South location of Upper Left corner
pixelWidth = geotransform[1] # X pixel size
pixelHeight = geotransform[5] # Y pixel size
xOffset = int((x - originX)/pixelWidth)
yOffset = int((y - originY)/pixelHeight)
return xOffset,yOffset
def createPath(CostSurfacefn,costSurfaceArray,startCoord,stopCoord):
print 'creatpath'
# coordinates to array index
startCoordX = startCoord[0]
startCoordY = startCoord[1]
startIndexX,startIndexY = coord2pixelOffset(CostSurfacefn,startCoordX,startCoordY)
stopCoordX = stopCoord[0]
stopCoordY = stopCoord[1]
stopIndexX,stopIndexY = coord2pixelOffset(CostSurfacefn,stopCoordX,stopCoordY)
# create path
indices, weight = route_through_array(costSurfaceArray, (startIndexY,startIndexX), (stopIndexY,stopIndexX),geometric=True,fully_connected=True)
indices = np.array(indices).T
path = np.zeros_like(costSurfaceArray)
path[indices[0], indices[1]] = 1
return path
def array2raster(newRasterfn,rasterfn,array):
print 'array2raster'
raster = gdal.Open(rasterfn)
geotransform = raster.GetGeoTransform()
originX = geotransform[0] # East/West location of Upper Left corner
originY = geotransform[3] # North/South location of Upper Left corner
pixelWidth = geotransform[1] # X pixel size
pixelHeight = geotransform[5] # Y pixel size
cols = array.shape[1]
rows = array.shape[0]
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(newRasterfn, cols, rows, gdal.GDT_Byte)
outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
outband = outRaster.GetRasterBand(1)
outband.WriteArray(array)
outRasterSRS = osr.SpatialReference()
outRasterSRS.ImportFromWkt(raster.GetProjectionRef())
outRaster.SetProjection(outRasterSRS.ExportToWkt())
outband.FlushCache()
def main(CostSurfacefn,outputPathfn,startCoord,stopCoord):
print 'main'
costSurfaceArray = raster2array(CostSurfacefn) # creates array from cost surface raster
config.costSurfaceA = costSurfaceArray
config.pathArray = createPath(CostSurfacefn,costSurfaceArray,startCoord,stopCoord) # creates path array
Save_Array_To_Excel_01.Save_Array(config.pathArray) # Save Array to csv file
array2raster(outputPathfn,CostSurfacefn,config.pathArray) # converts path array to raster
if __name__ == "__main__":
ask_costsurfacepath_path()
ask_outputpath_path()
CostSurfacefn = config.costsurfacepath
print config.costsurfacepath
startCoord = (config.startX,config.startY)
stopCoord = (config.stopX,config.stopY)
outputPathfn = config.outputpath
main(CostSurfacefn,outputPathfn,startCoord,stopCoord)
config.py
# Configuration file with all global variables
# number of properties
number = None
# different permutations of properties
permutations = list()
# properties array containing:
# * first column = ID first property [0]
# * second column = ID second property [1]
# * third column = distance between two properties [2]
# * forth column = estimated cost [3]
properties_array = None
# lowest price until now
lowest_price = 10**10000
# path with this lowest price
lowest_path = None
# current price (needs to be compared with lowest price)
current_price = 0
# current path (needs to be compared with lowest path)
current_path = [1]
# path to place where to save properties list
plist_path = None
# Array of the path
pathArray = None
# Array of the map
costSurfaceA = None
# current start X coordinate
startX = 0
# current start Y coordinate
startY = 0
# current stop X coordinate
stopX = 0
# current stop Y coordinate
stopY = 0
# path to costsurface IMG file
costsurfacepath = 0
# path to output path from Least cost path analysis
outputpath = 0
ERROR
please enter the system path where to put the file as a STRING (ex: /Users/PeterVanvoorden/Documents/GroepT/Thesis/Branched_Testfile.csv): '/User/PeterVanvoorden/Desktop/Shell.csv'
You entered: /User/PeterVanvoorden/Desktop/Shell.csv
please enter the system path where to find the cost-surface-IMG file (ex: /Users/PeterVanvoorden/Documents/GroepT/Thesis/Branched_Testfile.img): '/User/PeterVanvoorden/Desktop/clipsmall.img'
please enter the system path where to save the outputpath IMG file (ex: /Users/PeterVanvoorden/Documents/GroepT/Thesis/Branched_Testfile.img): '/User/PeterVanvoorden/Desktop/Shellimg.img'
/User/PeterVanvoorden/Desktop/clipsmall.img
main
raster2array
Traceback (most recent call last):
File "/Users/PeterVanvoorden/Documents/GroepT/Thesis/f_python_standalone/python_files/Working_Files/Least_cost_path_analysis_01_outputArray.py", line 97, in <module>
main(CostSurfacefn,outputPathfn,startCoord,stopCoord)
File "/Users/PeterVanvoorden/Documents/GroepT/Thesis/f_python_standalone/python_files/Working_Files/Least_cost_path_analysis_01_outputArray.py", line 76, in main
costSurfaceArray = raster2array(CostSurfacefn) # creates array from cost surface raster
File "/Users/PeterVanvoorden/Documents/GroepT/Thesis/f_python_standalone/python_files/Working_Files/Least_cost_path_analysis_01_outputArray.py", line 17, in raster2array
band = raster.GetRasterBand(1)
AttributeError: 'NoneType' object has no attribute 'GetRasterBand'
You can pass around like this.
def ask_costsurfacepath_path():
costsurfacepath = input('please enter ...')
return costsurfacepath
... in __name__ == '__main__'
CostSurfacefn = ask_costsurfacepath_path()
...