Python 2.7, Tkinter, make a point "trace" a waveform - python-2.7

I'm trying to make a widget that demonstrates the relationship between a sine wave and it's phasor diagram. I want a pointer that follows the user's mouse movements but instead of freely moving around the page, I want it to stick to the waveform I've plotted and only follow the co-ordinates that make up that line. Kind of like a slider but along a sine wave! Any suggestions? Thanks. Here's what I've got so far;
import math
import Tkinter as tk
from Tkinter import PhotoImage, Canvas
from PIL import Image, ImageTk
sinwidget = tk.Tk()
main_canvas = tk.Canvas(sinwidget, width = 400, height = 400, bg= "white")
main_canvas.pack()
def callback(event):
canvas = event.widget
x = canvas.canvasx(event.x)
y = canvas.canvasy(event.y)
print canvas.find_closest(x, y)
draw(event.x, event.y)
def draw(x, y):
box.coords(pointer, x-20, y-20, x+20, y+20)
def exit_():
sinwidget.destroy()
box = main_canvas
box.bind('<Motion>', callback)
box.pack()
pointer = box.create_rectangle(0.2,0.2,0.2,0.2)
wavelength = 360
height = 400
center = height//2
degree = 1
increment = 0.0175
amplitude = -80
sin = []
for x in range(360):
sin.append(x * degree)
sin.append(int(math.sin(x * increment) * amplitude) + center)
sinwave = main_canvas.create_line(sin, fill="red", width=2.0)
xaxis = main_canvas.create_line(0, center, wavelength, center, fill="black",
width=3.0)
yaxis = main_canvas.create_line(2, 100, 2, 300, fill="black", width=3.0)
exit_button = tk.Button(sinwidget, text = "exit", command = exit_)
exit_button.pack()
sinwidget.mainloop()
Thanks for any help you can offer!

Related

Make part of image transparent with pyglet

I have two images, "desert" image over "winter" image:
import pyglet
desert_img = pyglet.image.load('assets/desert.jpg')
desert = pyglet.sprite.Sprite(desert_img, x=50, y=50)
winter_img = pyglet.image.load('assets/winter.jpg')
winter = pyglet.sprite.Sprite(winter_img, x=0, y=0)
window = pyglet.window.Window()
#window.event
def on_draw():
winter.draw()
desert.draw()
pyglet.app.run()
Result is:
I would like draw a square of "transparency" on desert image (winter image should be visible in this square). Is it possible ? How to do that ?
I found many question who permit to make transparency with image itself (png, alpha ... but no like i want).
Based on Torxed suggestion, replace image content with transparent bytes where we want to make it transparent:
import io
from PIL import Image
import pyglet
from PIL.PngImagePlugin import PngImageFile
def replace_content_with_transparency(img: PngImageFile, x, y, width, height):
pixels = img.load()
for i in range(x, width):
for j in range(y, height):
pixels[i, j] = (0, 0, 0, 0)
desert_png = Image.open('assets/desert.png')
replace_content_with_transparency(desert_png, 32, 32, 123, 123)
fake_file = io.BytesIO()
desert_png.save(fake_file, format='PNG')
desert_img = pyglet.image.load('img.png', file=fake_file)
desert = pyglet.sprite.Sprite(desert_img, x=50, y=50)
winter_img = pyglet.image.load('assets/winter.jpg')
winter = pyglet.sprite.Sprite(winter_img, x=0, y=0)
window = pyglet.window.Window()
#window.event
def on_draw():
winter.draw()
desert.draw()
pyglet.app.run()

pyglet resolution of screen into variables

is there any options how to get values height and width of screen into variables using pyglet? I am able to print it but not extract these values.
import pyglet
platform = pyglet.window.get_platform()
display = platform.get_default_display()
screen = display.get_screens()
->
>>> screen
[XlibScreen(display=<pyglet.canvas.xlib.XlibDisplay object at 0x7f4644cf0990>, x=0, y=0, width=1366, height=768, xinerama=0)]
>>>
Any idea? Thanks in advance.
it should be as simple as this:
platform = pyglet.window.get_platform()
display = platform.get_default_display()
screen = display.get_default_screen()
screen_width = screen.width
screen_height = screen.height
In new versions of pyglet pyglet.window.get_platform() is deprecated/removed proof
Therefore code will look like:
display = pyglet.canvas.Display()
screen = display.get_default_screen()
screen_width = screen.width
screen_height = screen.height

How to keep a Tkinter widget on top of the others

I have some code that moves images left and right but I do not want them to appear on top of the right border, which I draw as a rectangle.
What are the options in Tkinter to keep a widget (in my example a rectangle) on top of some other widgets (in my code a tile, which is an image)?
I am drawing the rectangle and the image on one canvas.
I can image that using two canvas could do the trick, but are there any other options/settings?
Thanks
import Tkinter as tk # for Python2
import PIL.Image, PIL.ImageTk
win = tk.Tk()
#Create a canvas
canvas = tk.Canvas(win, height = 500, width = 500)
#Create a rectangle on the right of the canvas
rect = canvas.create_rectangle(250, 0, 500, 250, width = 2, fill = "red")
#Create an image
SPRITE = PIL.Image.open("sprite.png")
tilePIL = SPRITE.resize((100, 100))
tilePI = PIL.ImageTk.PhotoImage(tilePIL)
tile = canvas.create_image(100, 100, image = tilePI, tags = "a tag")
#Place the canvas
canvas.grid(row = 1, column = 0, rowspan = 5)
#Move the tile to the right.
#The tile will go on top of red rectangle. How to keep the rectangle on top of the tile?
canvas.coords(tile, (300, 100))
canvas.mainloop()
Use tag_raise() method:
canvas.tag_raise(tile)

How to obtain the contour plot data for each scatter points?

I have plotted a contour plot as background which represent the altitude of the area.
And 100 scatter points were set represent the real pollutant emission source. Is there a method to obtain the altitude of each point?
This is my code:
%matplotlib inline
fig=plt.figure(figsize=(16,16))
ax=plt.subplot()
xi,yi = np.linspace(195.2260,391.2260,50),
np.linspace(4108.9341,4304.9341,50)
height=np.array(list(csv.reader(open("/Users/HYF/Documents/SJZ_vis/Concentration/work/terr_grd.csv","rb"),delimiter=','))).astype('float')
cmap = cm.get_cmap(name='terrain', lut=None)
terrf = plt.contourf(xi, yi, height,100, cmap=cmap)
terr = plt.contour(xi, yi, height, 100,
colors='k',alpha=0.5
)
plt.clabel(terr, fontsize=7, inline=20)
ax.autoscale(False)
point= plt.scatter(dat_so2["xp"], dat_so2["yp"], marker='o',c="grey",s=40)
ax.autoscale(False)
for i in range(0,len(dat_so2["xp"]),1):
plt.text(dat_so2["xp"][i], dat_so2["yp"][i],
str(i),color="White",fontsize=16)
ax.set_xlim(225,275)
ax.set_ylim(4200,4260)
plt.show()
You can do this with scipy.interpolate.interp2d
For example, you could add to your code:
from scipy import interpolate
hfunc = interpolate.interp2d(xi,yi,height)
pointheights = np.zeros(dat_so2["xp"].shape)
for i,(x,y) in enumerate(zip(dat_so2["xp"],dat_so2["yp"])):
pointheights[i]=hfunc(x,y)
Putting this together with the rest of your script, and some sample data, gives this (I've simplified a couple of things here, but you get the idea):
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
from scipy import interpolate
fig=plt.figure(figsize=(8,8))
ax=plt.subplot()
#xi,yi = np.linspace(195.2260,391.2260,50),np.linspace(4108.9341,4304.9341,50)
xi,yi = np.linspace(225,275,50),np.linspace(4200,4260,50)
# A made up function of height (in place of your data)
XI,YI = np.meshgrid(xi,yi)
height = (XI-230.)**2 + (YI-4220.)**2
#height=np.array(list(csv.reader(open("/Users/HYF/Documents/SJZ_vis/Concentration/work/terr_grd.csv","rb"),delimiter=','))).astype('float')
cmap = cm.get_cmap(name='terrain', lut=None)
terrf = plt.contourf(xi, yi, height,10, cmap=cmap)
terr = plt.contour(xi, yi, height, 10,
colors='k',alpha=0.5
)
plt.clabel(terr, fontsize=7, inline=20)
ax.autoscale(False)
# Some made up sample points
dat_so2 = np.array([(230,4210),(240,4220),(250,4230),(260,4240),(270,4250)],dtype=[("xp","f4"),("yp","f4")])
point= plt.scatter(dat_so2["xp"], dat_so2["yp"], marker='o',c="grey",s=40)
# The interpolation function
hfunc = interpolate.interp2d(xi,yi,height)
# Now, for each point, lets interpolate the height
pointheights = np.zeros(dat_so2["xp"].shape)
for i,(x,y) in enumerate(zip(dat_so2["xp"],dat_so2["yp"])):
pointheights[i]=hfunc(x,y)
print pointheights
ax.autoscale(False)
for i in range(0,len(dat_so2["xp"]),1):
plt.text(dat_so2["xp"][i], dat_so2["yp"][i],
str(i),color="White",fontsize=16)
# We can also add a height label to the plot
plt.text(dat_so2["xp"][i], dat_so2["yp"][i],
"{:4.1f}".format(pointheights[i]),color="black",fontsize=16,ha='right',va='top')
ax.set_xlim(225,275)
ax.set_ylim(4200,4260)
plt.show()

Opencv: detect mouse position clicking over a picture

I have this code in which I simply display an image using OpenCV:
import numpy as np
import cv2
class LoadImage:
def loadImage(self):
self.img=cv2.imread('photo.png')
cv2.imshow('Test',self.img)
self.pressedkey=cv2.waitKey(0)
# Wait for ESC key to exit
if self.pressedkey==27:
cv2.destroyAllWindows()
# Start of the main program here
if __name__=="__main__":
LI=LoadImage()
LI.loadImage()
Once the window displayed with the photo in it, I want to display on the console (terminal) the position of the mouse when I click over the picture. I have no idea how to perform this. Any help please?
Here is an example mouse callback function, that captures the left button double-click
def draw_circle(event,x,y,flags,param):
global mouseX,mouseY
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img,(x,y),100,(255,0,0),-1)
mouseX,mouseY = x,y
You then need to bind that function to a window that will capture the mouse click
img = np.zeros((512,512,3), np.uint8)
cv2.namedWindow('image')
cv2.setMouseCallback('image',draw_circle)
then, in a infinite processing loop (or whatever you want)
while(1):
cv2.imshow('image',img)
k = cv2.waitKey(20) & 0xFF
if k == 27:
break
elif k == ord('a'):
print mouseX,mouseY
What Does This Code Do?
It stores the mouse position in global variables mouseX & mouseY every time you double click inside the black window and press the a key that will be created.
elif k == ord('a'):
print mouseX,mouseY
will print the current stored mouse click location every time you press the a button.
Code "Borrowed" from here.
Below is my implementation:
No need to store the click position, ONLY display it:
def onMouse(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
# draw circle here (etc...)
print('x = %d, y = %d'%(x, y))
cv2.setMouseCallback('WindowName', onMouse)
If you want to use the positions in other places of your code, you can use below way to obtain the coordinates:
posList = []
def onMouse(event, x, y, flags, param):
global posList
if event == cv2.EVENT_LBUTTONDOWN:
posList.append((x, y))
cv2.setMouseCallback('WindowName', onMouse)
posNp = np.array(posList) # convert to NumPy for later use
import cv2
cv2.imshow("image", img)
cv2.namedWindow('image')
cv2.setMouseCallback('image', on_click)
def on_click(event, x, y, p1, p2):
if event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(lastImage, (x, y), 3, (255, 0, 0), -1)
You can detect mouse position clicking over a picture via performing the various mouse click events.
You just to remember one thing while performing the mouse clicks events is that, you should have to use the same window name at all places wherever you are using the cv2.imshow or cv2.namedWindow
I given the working code in answer that uses python 3.x and opencv in the following the stackoverflow post:
https://stackoverflow.com/a/60445099/11493115
You can refer the above link for better explanation.
Code:
import cv2
import numpy as np
#This will display all the available mouse click events
events = [i for i in dir(cv2) if 'EVENT' in i]
print(events)
#This variable we use to store the pixel location
refPt = []
#click event function
def click_event(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print(x,",",y)
refPt.append([x,y])
font = cv2.FONT_HERSHEY_SIMPLEX
strXY = str(x)+", "+str(y)
cv2.putText(img, strXY, (x,y), font, 0.5, (255,255,0), 2)
cv2.imshow("image", img)
if event == cv2.EVENT_RBUTTONDOWN:
blue = img[y, x, 0]
green = img[y, x, 1]
red = img[y, x, 2]
font = cv2.FONT_HERSHEY_SIMPLEX
strBGR = str(blue)+", "+str(green)+","+str(red)
cv2.putText(img, strBGR, (x,y), font, 0.5, (0,255,255), 2)
cv2.imshow("image", img)
#Here, you need to change the image name and it's path according to your directory
img = cv2.imread("D:/pictures/abc.jpg")
cv2.imshow("image", img)
#calling the mouse click event
cv2.setMouseCallback("image", click_event)
cv2.waitKey(0)
cv2.destroyAllWindows()
Here is class based implementation of OpenCV mouse call back for getting point on an image,
import cv2
import numpy as np
#events = [i for i in dir(cv2) if 'EVENT' in i]
#print (events)
class MousePts:
def __init__(self,windowname,img):
self.windowname = windowname
self.img1 = img.copy()
self.img = self.img1.copy()
cv2.namedWindow(windowname,cv2.WINDOW_NORMAL)
cv2.imshow(windowname,img)
self.curr_pt = []
self.point = []
def select_point(self,event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
self.point.append([x,y])
#print(self.point)
cv2.circle(self.img,(x,y),5,(0,255,0),-1)
elif event == cv2.EVENT_MOUSEMOVE:
self.curr_pt = [x,y]
#print(self.point)
def getpt(self,count=1,img=None):
if img is not None:
self.img = img
else:
self.img = self.img1.copy()
cv2.namedWindow(self.windowname,cv2.WINDOW_NORMAL)
cv2.imshow(self.windowname,self.img)
cv2.setMouseCallback(self.windowname,self.select_point)
self.point = []
while(1):
cv2.imshow(self.windowname,self.img)
k = cv2.waitKey(20) & 0xFF
if k == 27 or len(self.point)>=count:
break
#print(self.point)
cv2.setMouseCallback(self.windowname, lambda *args : None)
#cv2.destroyAllWindows()
return self.point, self.img
if __name__=='__main__':
img = np.zeros((512,512,3), np.uint8)
windowname = 'image'
coordinateStore = MousePts(windowname,img)
pts,img = coordinateStore.getpt(3)
print(pts)
pts,img = coordinateStore.getpt(3,img)
print(pts)
cv2.imshow(windowname,img)
cv2.waitKey(0)
I have ported the PyIgnition library from Pygame to opencv2. Find the code at https://github.com/bunkahle/particlescv2
There also several examples on how to use the particle engine for Python.
In case, you want to get the coordinates by hovering over the image in Python 3, you could try this:
import numpy as np
import cv2 as cv
import os
import sys
# Reduce the size of image by this number to show completely in screen
descalingFactor = 2
# mouse callback function, which will print the coordinates in console
def print_coord(event,x,y,flags,param):
if event == cv.EVENT_MOUSEMOVE:
print(f'{x*descalingFactor, y*descalingFactor}\r', end="")
img = cv.imread(cv.samples.findFile('TestImage.png'))
imgheight, imgwidth = img.shape[:2]
resizedImg = cv.resize(img,(int(imgwidth/descalingFactor), int(imgheight/descalingFactor)), interpolation = cv.INTER_AREA)
cv.namedWindow('Get Coordinates')
cv.setMouseCallback('Get Coordinates',print_coord)
cv.imshow('Get Coordinates',resizedImg)
cv.waitKey(0)
If anyone wants a multi-process-based GUI for drawing points and dragging to move them, here is a single file script for same.