What's the equivalent of np.delete in libtorch? - c++

It seems we don't have an np.delete equivalent in libtorch yet, so how can we emulate its behavior? For example I'm trying to rewrite the following bit of code in libtorch:
ids = np.delete( ids, np.concatenate([[last], np.where(overlap > overlap_threshold)[0]] ) )
How should I go about this? I thought about slicing, but I'm not sure if there are implications involved that I'm not aware of. This is what I came up with:
neg = torch.where(overlap < overlap_threshold)[0]
ids = ids[neg].clone()
libtorch:
auto neg = torch::where(overlap <over_threshold)[0];
ids.index_put_({Slice()}, ids.index({neg}));
//or simply
ids = ids.index({neg}).clone();
And this is an example demo to test out their result is the same:
x1 = np.asarray([125.,152., 155., 155., 202.])
y1 = np.asarray( [52., 72., 92., 95., 95.])
x2 = np.asarray( [145., 172., 175., 175., 222.])
y2 = np.asarray( [ 72., 92., 112., 115., 115.])
score = np.asarray([0.60711509, 0.63444906, 0.85604602, 0.60021192, 0.70115328])
area = (x2 - x1 + 1.0) * (y2 - y1 + 1.0)
ids = np.argsort(score)
overlap_threshold = 0.5
mode = 'union'
while len(ids) > 0:
# grab index of the largest value
last = len(ids) - 1
i = ids[last]
# left top corner of intersection boxes
ix1 = np.maximum(x1[i], x1[ids[:last]])
iy1 = np.maximum(y1[i], y1[ids[:last]])
# right bottom corner of intersection boxes
ix2 = np.minimum(x2[i], x2[ids[:last]])
iy2 = np.minimum(y2[i], y2[ids[:last]])
# width and height of intersection boxes
w = np.maximum(0.0, ix2 - ix1 + 1.0)
h = np.maximum(0.0, iy2 - iy1 + 1.0)
# intersections' areas
inter = w * h
if mode == 'min':
overlap = inter / np.minimum(area[i], area[ids[:last]])
elif mode == 'union':
# intersection over union (IoU)
overlap = inter / (area[i] + area[ids[:last]] - inter)
# delete all boxes where overlap is too big
# ids = np.delete(ids,np.concatenate([[last], np.where(overlap > overlap_threshold)[0]]))
neg = np.where(overlap <= overlap_threshold)[0]
ids = ids[neg]
print(f'ids: {ids}')
And here is the cpp counter part in libtorch:
void test5()
{
auto x1 = torch::tensor({ 125., 152., 155., 155., 202. });
auto y1 = torch::tensor({ 52., 72., 92., 95., 95. });
auto x2 = torch::tensor({ 145., 172., 175., 175., 222. });
auto y2 = torch::tensor({ 72., 92., 112., 115., 115. });
auto score = torch::tensor({ 0.60711509, 0.63444906, 0.85604602, 0.60021192, 0.70115328 });
auto area = (x2 - x1 + 1.0) * (y2 - y1 + 1.0);
auto ids = torch::argsort(score);
auto overlap_threshold = 0.5;
auto mode = "union";
while (ids.sizes()[0] > 0)
{
//# grab index of the largest value
auto last = ids.sizes()[0] - 1;
auto i = ids[last];
//# left top corner of intersection boxes
auto ix1 = torch::max(x1[i], x1.index({ ids.index({ Slice(None,last) }) }));
auto iy1 = torch::max(y1[i], y1.index({ ids.index({ Slice(None,last) }) }));
//# right bottom corner of intersection boxes
auto ix2 = torch::min(x2[i], x2.index({ ids.index({Slice(None,last)}) }));
auto iy2 = torch::min(y2[i], y2.index({ ids.index({Slice(None,last)}) }));
//# width and height of intersection boxes
auto w = torch::max(torch::tensor(0.0), ix2 - ix1 + 1.0);
auto h = torch::max(torch::tensor(0.0), iy2 - iy1 + 1.0);
//# intersections' areas
auto inter = w * h;
torch::Tensor overlap;
if (mode == "min")
{
overlap = inter / torch::min(area[i], area.index({ ids.index({Slice(None,last)}) }));
}
else if (mode == "union")
{ //# intersection over union (IoU)
overlap = inter / (area[i] + area.index({ ids.index({Slice(None,last)}) }) - inter);
}
//# delete all boxes where overlap is too big
//# ids = np.delete(ids, np.concatenate([[last], np.where(overlap > overlap_threshold)[0]] ))
auto neg = torch::where(overlap < overlap_threshold)[0];
ids = ids.index({ neg });
std::cout << "ids: " << ids << std::endl;
}
}
Both of them print the same output, so is there something that I'm missing here or this is actually the reasonable way of implementing delete in libtorch?
What other possibly more efficient ways do I have to implement/emulate np.delete()?

It seems this is the reasonable way of doing it as pointed out in the comments. That is to reverse the condition and only filter out based on the new condition.
I also would like to fix a slight issue in my original post.
the correct form that would be equivalent to the Pythons :
ids = np.delete(ids, np.concatenate([[last], np.where(overlap > overlap_threshold)[0]] ))
would be :
auto neg = torch::where(overlap <= overlap_threshold)[0];
ids = ids.index({ neg });
mind the <=!

Related

'for' loop and 'lists' in plotly - R

I want to add some shapes to my chart:
p <- p %>% layout(shapes = list(list(type = "rect", fillcolor = kolorrecesji, line = list(color = kolorrecesji), opacity = op_1, x0 = x0_1, x1 = x1_1, xref = "x", y0 = min_y, y1 = max_y, yref = "y"),
list(type = "rect", fillcolor = kolorrecesji, line = list(color = kolorrecesji), opacity = op_2, x0 = x0_2, x1 = x1_2, xref = "x", y0 = min_y, y1 = max_y, yref = "y"),
list(type = "rect", fillcolor = kolorrecesji, line = list(color = kolorrecesji), opacity = op_3, x0 = x0_3, x1 = x1_3, xref = "x", y0 = min_y, y1 = max_y, yref = "y"),
list(type = "rect", fillcolor = kolorrecesji, line = list(color = kolorrecesji), opacity = op_4, x0 = x0_4, x1 = x1_4, xref = "x", y0 = min_y, y1 = max_y, yref = "y"),
...
Can I have dynamic number od these shapes?
I prefer to have shapes in data frame with different x0, x1, fillcolor and opacity (or parameter permitting to draw or not) and use for ... loop rather than list them one by one like above?
rgds & thks
Grzegorz

How to modify a variable when a while loop is running Python

I am using wx.python along with VPython to make an orbit simulator, however i'm having trouble trying to get the sliders in the GUI to effect the simulation, I assume it's because I am trying to get the number associated with the slider button to go into a while loop when it is running.
So my question is, how do i get the function SetRate to update in the while loop located at the bottom of the code? (I have checked to see that the slider is retuning values)
Here is my code for reference:
Value = 1.0
dt = 100.0
def InputValue(Value):
dt = Value
def SetRate(evt):
global Value
Value = SpeedOfSimulation.GetValue()
return Value
w = window(menus=True, title="Planetary Orbits",x=0, y=0, width = 1000, height = 1000)
Screen = display(window = w, x = 30, y = 30, width = 700, height = 500)
gdisplay(window = w, x = 80, y = 80 , width = 40, height = 20)
p = w.panel # Refers to the full region of the window in which to place widgets
SpeedOfSimulation = wx.Slider(p, pos=(800,10), size=(200,100), minValue=0, maxValue=1000)
SpeedOfSimulation.Bind(wx.EVT_SCROLL, SetRate)
TestData = [2, 0, 0, 0, 6371e3, 5.98e24, 0, 0, 0, 384400e3, 0, 0, 1737e3, 7.35e22, 0, 1e3, 0]
Nstars = TestData[0] # change this to have more or fewer stars
G = 6.7e-11 # Universal gravitational constant
# Typical values
Msun = 2E30
Rsun = 2E9
vsun = 0.8*sqrt(G*Msun/Rsun)
Stars = []
colors = [color.red, color.green, color.blue,
color.yellow, color.cyan, color.magenta]
PositionList = []
MomentumList = []
MassList = []
RadiusList = []
for i in range(0,Nstars):
s=i*8
x = TestData[s+1]
y = TestData[s+2]
z = TestData[s+3]
Radius = TestData[s+4]
Stars = Stars+[sphere(pos=(x,y,z), radius=Radius, color=colors[i % 6],
make_trail=True, interval=10)]
Mass = TestData[s+5]
SpeedX = TestData[s+6]
SpeedY = TestData[s+7]
SpeedZ = TestData[s+8]
px = Mass*(SpeedX)
py = Mass*(SpeedY)
pz = Mass*(SpeedZ)
PositionList.append((x,y,z))
MomentumList.append((px,py,pz))
MassList.append(Mass)
RadiusList.append(Radius)
pos = array(PositionList)
Momentum = array(MomentumList)
Mass = array(MassList)
Mass.shape = (Nstars,1) # Numeric Python: (1 by Nstars) vs. (Nstars by 1)
Radii = array(RadiusList)
vcm = sum(Momentum)/sum(Mass) # velocity of center of mass
Momentum = Momentum-Mass*vcm # make total initial momentum equal zero
Nsteps = 0
time = clock()
Nhits = 0
while True:
InputValue(Value) #Reprensents the change in time
rate(100000) #No more than 100 loops per second on fast computers
# Compute all forces on all stars
r = pos-pos[:,newaxis] # all pairs of star-to-star vectors (Where r is the Relative Position Vector
for n in range(Nstars):
r[n,n] = 1e6 # otherwise the self-forces are infinite
rmag = sqrt(sum(square(r),-1)) # star-to-star scalar distances
hit = less_equal(rmag,Radii+Radii[:,newaxis])-identity(Nstars)
hitlist = sort(nonzero(hit.flat)[0]).tolist() # 1,2 encoded as 1*Nstars+2
F = G*Mass*Mass[:,newaxis]*r/rmag[:,:,newaxis]**3 # all force pairs
for n in range(Nstars):
F[n,n] = 0 # no self-forces
Momentum = Momentum+sum(F,1)*dt
# Having updated all momenta, now update all positions
pos = pos+(Momentum/Mass)*dt
# Update positions of display objects; add trail
for i in range(Nstars):
Stars[i].pos = pos[i]
I know nothing about vpython but in a normal wxPython app, you will use wx.Timer instead of while loop.
here is an example of wx.Timer modified from https://www.blog.pythonlibrary.org/2009/08/25/wxpython-using-wx-timers/
You will want to separate the while loop part from your SetRate class method and put it in update.
import wx
class MyForm(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY, "Timer Tutorial 1",
size=(500,500))
# Add a panel so it looks the correct on all platforms
panel = wx.Panel(self, wx.ID_ANY)
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.update, self.timer)
SpeedOfSimulation = wx.Slider(p, pos=(800,10), size=(200,100), minValue=0, maxValue=1000)
SpeedOfSimulation.Bind(wx.EVT_SCROLL, SetRate)
self.SpeedOfSimulation = SpeedOfSimulation
def update(self, event):
# Compute all forces on all stars
SpeedOfSimulation = self.SpeedOfSimulation.GetValue()

Combine overlapping rectangles (python)

After researching, I came across few questions similar to this:OpenCV groupRectangles - getting grouped and ungrouped rectangles (most are in c++). However, none of them are solid. I want to combine the overlapping rectangles into a single one.
Image
My progress:
for cnt in large_contours:
x,y,w,h = cv2.boundingRect(cnt)
mec=x,y,w,h
rectVec=cv2.rectangle(img_and_contours,(x,y),(x+w,y+h),(0,255,0),2)
#cv2.rectangle(img_and_contours, cv2.boundingRect(large_contours[cnt]),(0,255,0));
rectList, weights = cv2.groupRectangles(mec, 3,0.2)
I only posted piece of my code.I was hoping groupRectangle would do what I wanted, but did nothing and instead gives me an error
rectList,weights = cv2.groupRectangles(mec,3,0.2)
TypeError: rectList
Blockquote
Here is the piece of code which worked for me
def merge_overlapping_zones(zones,delta_overpap = 30):
index = 0
if zones is None: return zones
while index < len(zones):
no_Over_Lap = False
while no_Over_Lap == False and len(zones) > 1 and index < len(zones):
zone1 = zones[index]
tmpZones = np.delete(zones, index, 0)
tmpZones = [tImageZone(*a) for a in tmpZones]
for i in range(0, len(tmpZones)):
zone2 = tmpZones[i]
# check left side broken
if zone2.x >= delta_overpap and zone2.y >= delta_overpap:
t = tImageZone(zone2.x - delta_overpap, zone2.y - delta_overpap, zone2.w + 2 * delta_overpap,
zone2.h + 2 * delta_overpap)
elif zone2.x >= delta_overpap:
t = tImageZone(zone2.x - delta_overpap, zone2.y, zone2.w + 2 * delta_overpap,
zone2.h + 2 * delta_overpap)
else:
t = tImageZone(zone2.x, zone2.y - delta_overpap, zone2.w + 2 * delta_overpap,
zone2.h + 2 * delta_overpap)
if (is_zone_overlap(zone1, t) or is_zone_overlap(zone1, zone2)):
tmpZones[i] = merge_zone(zone1, zone2)
zones = tmpZones
no_Over_Lap = False
break
no_Over_Lap = True
index += 1
return zones
`
There is an algorithm called **Non max suppression**. The function takes the rectangle array as input, and output the maximum rectangle. Here is the code (from pyimagesearch):
def non_max_suppression_fast(boxes, overlapThresh):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
#
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked using the
# integer data type
return boxes[pick].astype("int")
Hope it can help you.

generate N random numbers from a skew normal distribution using numpy

I need a function in python to return N random numbers from a skew normal distribution. The skew needs to be taken as a parameter.
e.g. my current use is
x = numpy.random.randn(1000)
and the ideal function would be e.g.
x = randn_skew(1000, skew=0.7)
Solution needs to conform with: python version 2.7, numpy v.1.9
A similar answer is here: skew normal distribution in scipy However this generates a PDF not the random numbers.
I start by generating the PDF curves for reference:
NUM_SAMPLES = 100000
SKEW_PARAMS = [-3, 0]
def skew_norm_pdf(x,e=0,w=1,a=0):
# adapated from:
# http://stackoverflow.com/questions/5884768/skew-normal-distribution-in-scipy
t = (x-e) / w
return 2.0 * w * stats.norm.pdf(t) * stats.norm.cdf(a*t)
# generate the skew normal PDF for reference:
location = 0.0
scale = 1.0
x = np.linspace(-5,5,100)
plt.subplots(figsize=(12,4))
for alpha_skew in SKEW_PARAMS:
p = skew_norm_pdf(x,location,scale,alpha_skew)
# n.b. note that alpha is a parameter that controls skew, but the 'skewness'
# as measured will be different. see the wikipedia page:
# https://en.wikipedia.org/wiki/Skew_normal_distribution
plt.plot(x,p)
Next I found a VB implementation of sampling random numbers from the skew normal distribution and converted it to python:
# literal adaption from:
# http://stackoverflow.com/questions/4643285/how-to-generate-random-numbers-that-follow-skew-normal-distribution-in-matlab
# original at:
# http://www.ozgrid.com/forum/showthread.php?t=108175
def rand_skew_norm(fAlpha, fLocation, fScale):
sigma = fAlpha / np.sqrt(1.0 + fAlpha**2)
afRN = np.random.randn(2)
u0 = afRN[0]
v = afRN[1]
u1 = sigma*u0 + np.sqrt(1.0 -sigma**2) * v
if u0 >= 0:
return u1*fScale + fLocation
return (-u1)*fScale + fLocation
def randn_skew(N, skew=0.0):
return [rand_skew_norm(skew, 0, 1) for x in range(N)]
# lets check they at least visually match the PDF:
plt.subplots(figsize=(12,4))
for alpha_skew in SKEW_PARAMS:
p = randn_skew(NUM_SAMPLES, alpha_skew)
sns.distplot(p)
And then wrote a quick version which (without extensive testing) appears to be correct:
def randn_skew_fast(N, alpha=0.0, loc=0.0, scale=1.0):
sigma = alpha / np.sqrt(1.0 + alpha**2)
u0 = np.random.randn(N)
v = np.random.randn(N)
u1 = (sigma*u0 + np.sqrt(1.0 - sigma**2)*v) * scale
u1[u0 < 0] *= -1
u1 = u1 + loc
return u1
# lets check again
plt.subplots(figsize=(12,4))
for alpha_skew in SKEW_PARAMS:
p = randn_skew_fast(NUM_SAMPLES, alpha_skew)
sns.distplot(p)
from scipy.stats import skewnorm
a=10
data= skewnorm.rvs(a, size=1000)
Here, a is a parameter which you can refer to:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.skewnorm.html
Adapted from rsnorm function from fGarch R package
def random_snorm(n, mean = 0, sd = 1, xi = 1.5):
def random_snorm_aux(n, xi):
weight = xi/(xi + 1/xi)
z = numpy.random.uniform(-weight,1-weight,n)
xi_ = xi**numpy.sign(z)
random = -numpy.absolute(numpy.random.normal(0,1,n))/xi_ * numpy.sign(z)
m1 = 2/numpy.sqrt(2 * numpy.pi)
mu = m1 * (xi - 1/xi)
sigma = numpy.sqrt((1 - m1**2) * (xi**2 + 1/xi**2) + 2 * m1**2 - 1)
return (random - mu)/sigma
return random_snorm_aux(n, xi) * sd + mean

updating function arguments python in each iterations

I am trying to update my function arguments after each iteration but failed to do so. Kindly check my code because I am new to python language. My task is to calculate xps, (represents collection of positions) and v2ps, (represents collection of velocities) after each iteration and want to plot them against each other. Basic this program represents the collision of objects moving vertical down and one of object also collide with plane above which they are moving.
acc_grav = 10
m1 =float(input(" Input mass of ball one, m1: "))
m2 =float(input(" Input mass of ball two, m2: "))
time_steps =10000
num_coll_bounce = 0
num_ball_coll = 0
eps=1.e-6
def ball_coll(x1_old,v1_old,x2_old,v2_old,time_ball_coll):
v1 = v1_old - acc_grav*time_ball_coll
v2 = v2_old - acc_grav*time_ball_coll
x1 = x1_old + time_ball_coll*v1_old - 0.5*acc_grav*(time_ball_coll)**2
x2 = x2_old + time_ball_coll*v2_old - 0.5*acc_grav*(time_ball_coll)**2
v1_ball_coll = (v1*(m1-m2)+(2*m2*v2))/(m1+m2)
v2_ball_coll = (v2*(m2-m1)+(2*m1*v1))/(m1+m2)
cumlv2=v2
return [v1,v2,x1,x2,v1_ball_coll,v2_ball_coll]
def floor_coll(x1_old,v1_old,x2_old,v2_old,time_floor_coll):
v1 = v1_old - acc_grav*time_floor_coll
v2 = v2_old - acc_grav*time_floor_coll
x1 = 0 #at the time of bonuce
x2 = x2_old + time_floor_coll*v2_old - 0.5*acc_grav*time_floor_coll**2
#update velocities following rules for collision with walls
v1_bounce = -v1
v2_bounce = v2
return [v1,v2,x1,x2,v1_bounce,v2_bounce]
for i in range(0, 10):
x1_0 = 1
x2_0 = 3 - (i-1)*0.1
v1_0 = 2
v2_0 = 2*v1_0
xps = []
v2ps = []
for n in range (time_steps-1):
time_ball_coll = (x2_0-x1_0)/(v1_0 - v2_0)
time_floor_coll = (v1_0 + (v1_0**2 + 2*acc_grav*x1_0)**1/2)/acc_grav
if ((time_ball_coll - time_floor_coll)<eps and v1_0 - v2_0 > 0):
num_coll_bounce = num_coll_bounce + 1
num_ball_coll = num_ball_coll + 1
ball_coll(x1_0,v1_0,x2_0,v2_0,time_ball_coll)
#xps[n] = x2_0
#v2ps(n,num_ballcoll) = v2ini
xps.append(x2_0)
v2ps.append(v2_0)
else:
num_coll_bounce = num_coll_bounce + 1
floor_coll(x1_0,v1_0,x2_0,v2_0,time_floor_coll)
#x1_old,v1_old,x2_old,v2_old,time_floor_coll = dd2
x_1.append(x1_0)
x_2.append(x2_0)