Keep receiving error python-opencv 3.4.2 error: (-215:Assertion failed) (size_t)knn <= index_->size() in function 'runKnnSearch_' - assertion

I am trying to compare two images but I keeping getting errors in python it says:
matches = flann.knnMatch(des1,des2, k=2)
cv2.error: OpenCV(3.4.2) /home/pi/opencv
python/opencv/modules/flann/src/miniflann.cpp:487: error: (-215:Assertion failed)
(size_t)knn <= index_->size() in function 'runKnnSearch_'```
here below is code:
# Create Sift
sift = cv2.xfeatures2d.SIFT_create()
kp_1, desc_1 = sift.detectAndCompute(original, None)
index_params = dict(algorithm=0, trees=15)
search_params = dict()
**flann = cv2.FlannBasedMatcher(index_params, search_params)** >Is this correct
# Load all images into array
all_images_to_compare = []
titles = []
for f in glob.iglob("/home/pi/dataset/*"):
image = cv2.imread(f)
titles.append(f)
all_images_to_compare.append(image)
for image_to_compare, title in zip(all_images_to_compare, titles):
# 1) Check if 2 images are equals
if original.shape == image_to_compare.shape:
print("The images have same size and channels")
difference = cv2.subtract(original, image_to_compare)
b, g, r = cv2.split(difference)
if cv2.countNonZero(b) == 0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0:
print ("Similarity: 100% (equal size and channels)")
#2) Check for similarities between the 2 images
kp_2, desc_2 = sift.detectAndCompute(image_to_compare, None)
**matches = flann.knnMatch(desc_1, desc_2, k=2)** > error occurs here

Related

Pyomo error when passing rule from objective

I am running the following pyomo code
C = list(datadict.keys())
model = ConcreteModel()
model.IDX = range(23)
model.zIDX = range(1)
def _initialize_rule(model, i):
return datadict[C[i]]['Int']
def _bounds_rule(model, i):
return (datadict[C[i]]['Min'], datadict[C[i]]['Max'])
# declare decision variables
model.x = Var(model.IDX, initialize=_initialize_rule, domain=NonNegativeReals, bounds=_bounds_rule)
model.z = Var(model.zIDX, initialize=1, domain=NonNegativeReals, bounds=(0, None))
model.c1 = Constraint(
expr = sum(model.x[i]*datadict[C[i]]["A"] for i in model.IDX) == budget
)
def _maa_rule(m):
v = BlockVector(23)
for i in m.IDX:
if channelData[C[i]]["A"]==0:
v.set_block(i, 1)
else:
v.set_block(i, m.x[i])
inputArr = v.flatten()
# Month
inputArr = np.append(inputArr, [dateData['Month']])
# DOW
inputArr = np.append(inputArr, [dateData['DOW']])
# DOY
inputArr = np.append(inputArr, [dateData['DOY']])
# Quarter
inputArr = np.append(inputArr, [dateData['Quarter']])
# fracDOY
inputArr = np.append(inputArr, [dateData['fracDOY']])
X_arr = X_scaler.transform(inputArr)
I_arr = Y_Inflow_scaler.inverse_transform(automlInflow.predict(X_arr).reshape(-1, 1))
O_arr = Y_Outflow_scaler.inverse_transform(automlOutflow.predict(X_arr).reshape(-1, 1))
m.z[i] = I_arr[0][0] - O_arr[0][0]
return m.z[i]
model.obj = Objective(rule=_maa_rule, sense=maximize)
But I get the following error when I try to run the code...
ERROR: Rule failed when generating expression for Objective obj with index
None: AssertionError: Blocks need to be numpy arrays or BlockVectors
ERROR: Constructing component 'obj' from data=None failed: AssertionError:
Blocks need to be numpy arrays or BlockVectors
---------------------------------------------------------------------------
AssertionError: Blocks need to be numpy arrays or BlockVectors
I have tried creating a blockVector and then assigning the predict values to it, but nothing seems to work.
Any help would be GREATLY appreciated.

Is this method of calculating the top-5 accuracy in pytorch correct?

I am trying to validate the findings of a paper by testing it on the same model architecture as well as the same dataset reported by the paper. I have been using the imagenet script provided in the official pytorch repository's examples section to do the same.
class AverageMeter(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def init(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision#k for the specified values of k"""
maxk = max(topk)
batchsize = target.size(0)
, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correctk.mul(100.0 / batch_size))
return res
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
# measure data loading time
print(f"Processing {batch_idx+1}/{len(test_loader)}")
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = torch.autograd.Variable(inputs, volatile=True), torch.autograd.Variable(targets)
# compute output
outputs = model(inputs)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
print(prec1,prec5)
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
print(top1)
print(top5)
However the top 5 error which I am getting by using this script is not matching with the one in the paper. Can anyone tell me what is wrong in this particular snippet?

Why I'm getting "TypeError: Failed to convert object of type <type 'dict'> to Tensor."?

I'm new to TF and ML.
Details about data: Features(x) - (70 x 70 x 70) tensor for each sample, y - a float for each sample.
TFRecords created with the following code:
def convert_to_tf_records():
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float64_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
tfrecords_filename = 'A-100-h2-h2o.tfrecords'
writer = tf.python_io.TFRecordWriter(tfrecords_filename)
# Get data from db for now.
db = connect('results-60-70.db')
data = db.select(selection='Ti')
i = 0
for row in data:
desc = np.array(json.loads(row.descriptor), dtype=np.float32)
print(desc.shape)
be = float(row.binding_energy) * 23 # Convert to Kcal/mol ?
desc = desc.flatten()
desc = desc.tostring()
example = tf.train.Example(features=tf.train.Features(feature={'voxel_grid': _bytes_feature(desc), 'binding_energy': _float64_feature(be)}))
writer.write(example.SerializeToString())
i += 1
if i >= 10:
break
Input function:
def my_input_function(fname, perform_shuffle=False, repeat_count=None):
def _parse_elements(example):
features = tf.parse_single_example(example, features={'voxel_grid': tf.FixedLenFeature([], tf.string), 'binding_energy': tf.FixedLenFeature([], tf.float32)})
vg = tf.decode_raw(features['voxel_grid'], tf.float32)
vg = tf.reshape(vg, [70, 70, 70])
vg = tf.convert_to_tensor(vg, dtype=tf.float32)
vg = {'voxel_grid': vg}
e = tf.cast(features['binding_energy'], tf.float32)
return vg, e
def input_function():
dataset = tf.data.TFRecordDataset(fname).map(_parse_elements)
dataset = dataset.repeat(repeat_count)
dataset = dataset.batch(5)
dataset = dataset.prefetch(1)
if perform_shuffle:
dataset.shuffle(20)
iterator = dataset.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
return batch_features, batch_labels
return input_function
Model function:
def my_model_function(features, labels, mode):
if mode == tf.estimator.ModeKeys.PREDICT:
tf.logging.info("my_model_fn: PREDICT, {}".format(mode))
elif mode == tf.estimator.ModeKeys.EVAL:
tf.logging.info("my_model_fn: EVAL, {}".format(mode))
elif mode == tf.estimator.ModeKeys.TRAIN:
tf.logging.info("my_model_fn: TRAIN, {}".format(mode))
feature_columns = [tf.feature_column.numeric_column('voxel_grid', shape=(70, 70, 70), dtype=tf.float32)]
# Create the layer of input
input_layer = tf.feature_column.input_layer(features, feature_columns)
input_layer = tf.reshape(input_layer, [-1, 70, 70, 70, 1])
# Convolution layers
conv1 = tf.layers.conv3d(inputs=input_layer, strides=(2, 2, 2), filters=32, kernel_size=(7, 7, 7))
conv2 = tf.layers.conv3d(inputs=conv1, strides=(2, 2, 2), filters=32, kernel_size=(7, 7, 7))
pool3 = tf.layers.max_pooling3d(inputs=conv2, pool_size=[2, 2, 2], strides=2)
flat = tf.layers.flatten(pool3)
dense1 = tf.layers.dense(inputs=flat, units=10, activation=tf.nn.relu)
dense2 = tf.layers.dense(inputs=dense1, units=10, activation=tf.nn.relu)
output = tf.layers.dense(inputs=dense2, units=1)
predictions = {'binding_energy': output}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate loss
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics
eval_metric_ops = {"mse": tf.metrics.mean_squared_error(labels=labels, predictions=predictions['binding_energy'])}
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
When calling model.train using
model = tf.estimator.Estimator(model_fn=my_model_function, model_dir='./model_dir')
model.train(input_fn=my_input_function('A-100-h2-h2o.tfrecords'), steps=100)
I get the following error.
TypeError: Failed to convert object of type to Tensor.
Found it!
changing
# Calculate loss
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions)
to
# Calculate loss
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions['binding_energy'])
solves the issue.

What is the error in below code of re.findall()?

import re
for test in range(int(input())):
a = input() # input a string
n = a.replace("=", "") # if string contains '='then remove it
gg = re.findall(r ">+", n) # count >
l1 = len(max(gg, key = len)) # count consecutive >
hh = re.findall(r "<+", n) # count <
l2 = len(max(hh, key = len)) # count consecutive <
print(max(l1, l2) + 1) # print max of two + 1
Input is :
4
<<<
<><
<=>
<=<
I am encountering a error if I run the above code.I read it on SO only the syntax still I am getting error:
Traceback (most recent call last):<br/> File "/home/fea0d5e04ac92cb3a1e4f041940f2dfc.py", line 8, in <module><br/>
l2=len(max(hh, key=len))<br/> ValueError: max() arg is an empty sequence
max fails on an empty sequence.
Python >= 3.4
max takes an optional default argument:
l1 = len(max(gg, key = len, default=0)) # count consecutive >
Python < 3.4
Add a guard to check if the list is empty
if len(hh) > 0:
l2 = len(max(hh, key = len)) # count consecutive <
else:
l2 = 0

OpenCV 3.2 NameError: global name 'FLANN_INDEX_LSH' is not defined

I am currently trying to implement ORB with FLANN, I have read the documentation and it said that when using ORB with FLANN I have to use:
index_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
And my code
def useFLANN(img1, img2, kp1, kp2, des1, des2, setDraw, type):
# Fast Library for Approximate Nearest Neighbors
MIN_MATCH_COUNT = 10
FLANN_INDEX_KDTREE = 0
if type == True:
# Detect with ORB
index_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
else:
# Detect with Others such as SURF, SIFT
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
# It specifies the number of times the trees in the index should be recursively traversed. Higher values gives better precision, but also takes more time
search_params = dict(checks = 60)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
else:
print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
matchesMask = None
totalDistance = 0
for g in good:
totalDistance += g.distance
if setDraw == True:
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
plt.imshow(img3, 'gray'),plt.show()
return totalDistance
The problem is when I run the program it said that FLANN_INDEX_LSH is not defined. I don't know what to do, is FLANN_INDEX_LSH buggy in OpenCV 3.2?
Note: when I use SIFT/SURF with FLANN FLANN_INDEX_KDTREE works perfectly
Its not buggy. FLANN_INDEX_LSH is just not defined in OpenCV's python API. You can define it as follows
FLANN_INDEX_LSH = 6
and continue with your code. For comprehensive list, refer official docs
Change it into:
algorithm=6, # FLANN_INDEX_LSH