Pygame - rendering multiline text using Sprites - python-2.7

Struggle is that I am capable of rendering one line of text with Sprites in Pygame, I am also capable of rendering multi-line text, but without the Sprites, but I can't figure out way how to render multi-line text using Sprites.
I've got this for rendering one line text using Sprites (I am going to skip the basic for running Pygame code - background, init etc.):
class Score(pygame.sprite.Sprite):
.
.
def update(self, lives, score):
.
.
self.text = "LIVES: %d SCORE: %d" % (lives, score)
self.image = self.font.render(self.text, True, self.color)
self.rect = self.image.get_rect()
.
.
.
.
def main():
.
.
score = pygame.sprite.RenderUpdates()
score.add(Score())
.
.
while 1:
.
.
score.clear(screen, background)
score_list = score.draw(screen)
pygame.display.update(score_list)
score.update(lives, score)
.
.
I just want to know if it's even possible to do it using my render method or if I should focus on doing it some other way?
And maybe one related question; Is this method the right way to render objects (images) in Pygame?
Thank you for any help.

You could render the strings with FONT.render first, then create a transparent surface and blit the separate text surfaces onto it. To figure out the width of the transparent base image, you can use the .get_width() method of the separate text surfaces and for the height you can use FONT.get_height().
import pygame as pg
pg.init()
screen = pg.display.set_mode((640, 480))
clock = pg.time.Clock()
FONT = pg.font.Font(None, 40)
BLUE = pg.Color('dodgerblue1')
class Score(pg.sprite.Sprite):
def __init__(self, pos):
super(Score, self).__init__()
self.lives = 3
self.score = 0
self.rect = pg.Rect(pos, (1, 1))
self.update_image()
def update_image(self):
height = FONT.get_height()
# Put the rendered text surfaces into this list.
text_surfaces = []
for txt in ('lives {}'.format(self.lives),
'score {}'.format(self.score)):
text_surfaces.append(FONT.render(txt, True, BLUE))
# The width of the widest surface.
width = max(txt_surf.get_width() for txt_surf in text_surfaces)
# A transparent surface onto which we blit the text surfaces.
self.image = pg.Surface((width, height*2), pg.SRCALPHA)
for y, txt_surf in enumerate(text_surfaces):
self.image.blit(txt_surf, (0, y*height))
# Get a new rect (if you maybe want to make the text clickable).
self.rect = self.image.get_rect(topleft=self.rect.topleft)
def main():
score = Score((100, 100))
all_sprites = pg.sprite.Group(score)
done = False
while not done:
for event in pg.event.get():
if event.type == pg.QUIT:
done = True
elif event.type == pg.KEYDOWN:
if event.key == pg.K_s:
# Increment the score and update the image.
# It would be nicer to turn the score into a
# property and update the image automatically.
score.score += 1
score.update_image()
all_sprites.update()
screen.fill((30, 30, 30))
all_sprites.draw(screen)
pg.display.flip()
clock.tick(30)
if __name__ == '__main__':
main()
pg.quit()

Related

Mouse click results in unexpected behavior using wxPython & wx.Panel

I am writing an application in which the following steps are intended to be executed:
1 - user clicks in blue area (a wx.Panel) and a white circle appears;
2 - user clicks Select Start button;
3 - user clicks in the white circle and it changes to green (by drawing a green circle over the white one)
The problem is that when step 3 is executed the green circle does not appear where the mouse is clicked. For example, clicking in the blue area registers a point at (223, 486). After clicking the Select Start button and then clicking in the white circle a point registers at (211, 464), and a green circle appears outside the white circle. I don’t understand why this is happening and would appreciate any help in resolving the problem.
Using python 2.7 with wxPython 3.0.3.0 on MacOs
import wx
class Test(wx.Frame):
def __init__(self, parent, title):
super(Test, self).__init__(parent, title = title, size = (820,900))
self.startSelected = False
self.radius = 10
self.panel = wx.Panel(self, size=(800,800))
self.panel.SetBackgroundColour(wx.Colour(0,0,200))
self.panel.Bind(wx.EVT_LEFT_DOWN, self.onMouseDown)
self.gbs = wx.GridBagSizer(0,0)
self.startBtn = wx.Button(self, wx.ID_ANY, label = 'Select Start')
self.startBtn.Bind(wx.EVT_BUTTON, self.selectStart)
self.gbs.Add(self.panel, span = (1,4), pos=(1,1),flag=wx.EXPAND | wx.ALL)
self.gbs.Add(self.startBtn, pos=(2,1))
self.SetSizer(self.gbs)
def onMouseDown(self, e):
pt = e.GetPosition()
print pt.x, pt.y
if e.LeftDown():
if self.startSelected:
color = wx.Colour(0,255,0)
self.paint(pt, color)
else:
color = wx.Colour(255,255,255)
self.paint(pt, color)
def paint(self, pt, color):
dc = wx.ClientDC(self)
b = wx.Brush(color)
dc.SetBrush(b)
dc.DrawCircle(pt.x, pt.y, self.radius)
def selectStart(self, e):
self.startSelected = True
if __name__ == '__main__':
app = wx.App()
Test(None, 'Test')
app.MainLoop()
The wx.ClientDC is relative to the object passed to it. As coded in the paint method, dc = wx.ClientDC(self), self is the wx.Frame. Since drawing is done on the wx.Panel, dc = wx.ClientDC(self.panel) should be used. This results in the expected behavior.

Each blitted image in list disappear instead of remaining on screen. I wanted to fill up the whole screen with image as it blits on random locations

import pygame
import random
pygame.init()
screen = pygame.display.set_mode((476,800))
clock = pygame.time.Clock()
win = pygame.display.set_mode((500, 600))
pygame.display.set_caption("DOGGO GAME")
dogimg = pygame.image.load('dog.png')
*doglist will contain the image rects so that multiple images/entities will fill the screen
doglist = []
dogx = []
dogy = []
blot triggers the creation of element(dog image rect = dogimg)
BLOT = pygame.USEREVENT
pygame.time.set_timer(BLOT, 500)
drawdog blits the images in the doglist
def drawdog(doglist):
for dog in doglist:
screen.blit(dogimg, new_dog)
run = True
while run:
#pygame.time.delay(50)
win.fill((0, 0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == BLOT:
dogimg = pygame.image.load('dog.png')
dogranx = random.randint(50,200)
dograny = random.randint(50,450)
new_dog = dogimg.get_rect(midtop=(dogranx, dograny))
doglist.append(new_dog)
print(doglist)
drawdog(doglist)
pygame.display.update()
clock.tick(1)
pygame.quit()
I was able to correct it by replacing:
for dog in doglist
with
for new_dog in doglist
In the former, I was using extend. I changed it to append and used the latter code, and it worked.

Opencv: detect mouse position clicking over a picture

I have this code in which I simply display an image using OpenCV:
import numpy as np
import cv2
class LoadImage:
def loadImage(self):
self.img=cv2.imread('photo.png')
cv2.imshow('Test',self.img)
self.pressedkey=cv2.waitKey(0)
# Wait for ESC key to exit
if self.pressedkey==27:
cv2.destroyAllWindows()
# Start of the main program here
if __name__=="__main__":
LI=LoadImage()
LI.loadImage()
Once the window displayed with the photo in it, I want to display on the console (terminal) the position of the mouse when I click over the picture. I have no idea how to perform this. Any help please?
Here is an example mouse callback function, that captures the left button double-click
def draw_circle(event,x,y,flags,param):
global mouseX,mouseY
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img,(x,y),100,(255,0,0),-1)
mouseX,mouseY = x,y
You then need to bind that function to a window that will capture the mouse click
img = np.zeros((512,512,3), np.uint8)
cv2.namedWindow('image')
cv2.setMouseCallback('image',draw_circle)
then, in a infinite processing loop (or whatever you want)
while(1):
cv2.imshow('image',img)
k = cv2.waitKey(20) & 0xFF
if k == 27:
break
elif k == ord('a'):
print mouseX,mouseY
What Does This Code Do?
It stores the mouse position in global variables mouseX & mouseY every time you double click inside the black window and press the a key that will be created.
elif k == ord('a'):
print mouseX,mouseY
will print the current stored mouse click location every time you press the a button.
Code "Borrowed" from here.
Below is my implementation:
No need to store the click position, ONLY display it:
def onMouse(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
# draw circle here (etc...)
print('x = %d, y = %d'%(x, y))
cv2.setMouseCallback('WindowName', onMouse)
If you want to use the positions in other places of your code, you can use below way to obtain the coordinates:
posList = []
def onMouse(event, x, y, flags, param):
global posList
if event == cv2.EVENT_LBUTTONDOWN:
posList.append((x, y))
cv2.setMouseCallback('WindowName', onMouse)
posNp = np.array(posList) # convert to NumPy for later use
import cv2
cv2.imshow("image", img)
cv2.namedWindow('image')
cv2.setMouseCallback('image', on_click)
def on_click(event, x, y, p1, p2):
if event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(lastImage, (x, y), 3, (255, 0, 0), -1)
You can detect mouse position clicking over a picture via performing the various mouse click events.
You just to remember one thing while performing the mouse clicks events is that, you should have to use the same window name at all places wherever you are using the cv2.imshow or cv2.namedWindow
I given the working code in answer that uses python 3.x and opencv in the following the stackoverflow post:
https://stackoverflow.com/a/60445099/11493115
You can refer the above link for better explanation.
Code:
import cv2
import numpy as np
#This will display all the available mouse click events
events = [i for i in dir(cv2) if 'EVENT' in i]
print(events)
#This variable we use to store the pixel location
refPt = []
#click event function
def click_event(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print(x,",",y)
refPt.append([x,y])
font = cv2.FONT_HERSHEY_SIMPLEX
strXY = str(x)+", "+str(y)
cv2.putText(img, strXY, (x,y), font, 0.5, (255,255,0), 2)
cv2.imshow("image", img)
if event == cv2.EVENT_RBUTTONDOWN:
blue = img[y, x, 0]
green = img[y, x, 1]
red = img[y, x, 2]
font = cv2.FONT_HERSHEY_SIMPLEX
strBGR = str(blue)+", "+str(green)+","+str(red)
cv2.putText(img, strBGR, (x,y), font, 0.5, (0,255,255), 2)
cv2.imshow("image", img)
#Here, you need to change the image name and it's path according to your directory
img = cv2.imread("D:/pictures/abc.jpg")
cv2.imshow("image", img)
#calling the mouse click event
cv2.setMouseCallback("image", click_event)
cv2.waitKey(0)
cv2.destroyAllWindows()
Here is class based implementation of OpenCV mouse call back for getting point on an image,
import cv2
import numpy as np
#events = [i for i in dir(cv2) if 'EVENT' in i]
#print (events)
class MousePts:
def __init__(self,windowname,img):
self.windowname = windowname
self.img1 = img.copy()
self.img = self.img1.copy()
cv2.namedWindow(windowname,cv2.WINDOW_NORMAL)
cv2.imshow(windowname,img)
self.curr_pt = []
self.point = []
def select_point(self,event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
self.point.append([x,y])
#print(self.point)
cv2.circle(self.img,(x,y),5,(0,255,0),-1)
elif event == cv2.EVENT_MOUSEMOVE:
self.curr_pt = [x,y]
#print(self.point)
def getpt(self,count=1,img=None):
if img is not None:
self.img = img
else:
self.img = self.img1.copy()
cv2.namedWindow(self.windowname,cv2.WINDOW_NORMAL)
cv2.imshow(self.windowname,self.img)
cv2.setMouseCallback(self.windowname,self.select_point)
self.point = []
while(1):
cv2.imshow(self.windowname,self.img)
k = cv2.waitKey(20) & 0xFF
if k == 27 or len(self.point)>=count:
break
#print(self.point)
cv2.setMouseCallback(self.windowname, lambda *args : None)
#cv2.destroyAllWindows()
return self.point, self.img
if __name__=='__main__':
img = np.zeros((512,512,3), np.uint8)
windowname = 'image'
coordinateStore = MousePts(windowname,img)
pts,img = coordinateStore.getpt(3)
print(pts)
pts,img = coordinateStore.getpt(3,img)
print(pts)
cv2.imshow(windowname,img)
cv2.waitKey(0)
I have ported the PyIgnition library from Pygame to opencv2. Find the code at https://github.com/bunkahle/particlescv2
There also several examples on how to use the particle engine for Python.
In case, you want to get the coordinates by hovering over the image in Python 3, you could try this:
import numpy as np
import cv2 as cv
import os
import sys
# Reduce the size of image by this number to show completely in screen
descalingFactor = 2
# mouse callback function, which will print the coordinates in console
def print_coord(event,x,y,flags,param):
if event == cv.EVENT_MOUSEMOVE:
print(f'{x*descalingFactor, y*descalingFactor}\r', end="")
img = cv.imread(cv.samples.findFile('TestImage.png'))
imgheight, imgwidth = img.shape[:2]
resizedImg = cv.resize(img,(int(imgwidth/descalingFactor), int(imgheight/descalingFactor)), interpolation = cv.INTER_AREA)
cv.namedWindow('Get Coordinates')
cv.setMouseCallback('Get Coordinates',print_coord)
cv.imshow('Get Coordinates',resizedImg)
cv.waitKey(0)
If anyone wants a multi-process-based GUI for drawing points and dragging to move them, here is a single file script for same.

Draggable point in matplotlib - How to retrieve point coordinates

I would like to be able to move a point (matplotlib.patches.Ellipse) along a vertical line. To do so, I'm calling the Dragable rectangle class (http://matplotlib.org/1.3.1/users/event_handling.html) inside a PySide QtGui.QWidget.
The dragging of my ellipse works fine, but I'm stuck in how to retrieve the final y-coordinate of the ellipse when I release the mouse button (I want to have a QLabel next to the plot that contains this y-coordinate). From on_release I have its final position:
def on_release(self, event):
if DraggablePoint.lock is not self:
return
self.point.newYcoordinate = self.point.center[1]
self.press = None
DraggablePoint.lock = None
# draw everything but the selected point and store the pixel buffer
canvas = self.point.figure.canvas
axes = self.point.axes
self.point.set_animated(True)
canvas.draw()
self.background = canvas.copy_from_bbox(self.point.axes.bbox)
# now redraw just the point
axes.draw_artist(self.point)
# and blit just the redrawn area
canvas.blit(axes.bbox)
but how can I connect this to the text of my QLabel (and obtain that this label is updated each time the ellipse is moved)?
self.FlexibleValue = patches.Ellipse(xy=(0.5, 0.5), width=0.2, height=0.2, edgecolor='r', facecolor='r', lw=2)
self.axesResp.add_patch(self.FlexibleValue)
self.dragValue = DraggablePoint(self.FlexibleValue)
self.dragValue.connect()
by adding a signal as tcaswell suggested:
def on_release(self, event):
...
self.signal.updatedSignal.emit(str(self.point.newYcoordinate))
and then retrieving the y-coordinate:
self.dragValue.signal.updatedSignal.connect(self.updatedValue)
with:
def updatedValue(self, newValue):
self.labelValue.setText(newValue)
and:
class UpdatedSignal(QtCore.QObject):
updatedSignal = QtCore.Signal(str)

Aligning Label in Frame Tkinter

I am new to Python and even newer to Tkinter.
I am currently practicing how to use Frames and Labels and
the problem I am encountering is, when I put Labels on a frame with some buttons next to each label,
the alignment is not good to look at.
Here is the code:
from Tkinter import *
class GUI():
def __init__(self):
self.namelist = ["Mark","Anna","Jason","Lenna","Leo","Zucharich","Robinson","AReallyLongNameThatMightExist"]
self.canvas = Canvas(width=1200,height=700)
self.canvas.pack(expand=YES,fill=BOTH)
def Friends(self):
controlframe = Frame(self.canvas)
controlframe.place(x=600,y=300)
#Frame for showing names of friends
for x in self.namelist:
frame = Frame(controlframe)
frame.pack()
Name = Label(frame,text="%s "%x).pack(side=LEFT)
chatButton = Button(frame,text="Chat").pack(side=LEFT)
delButton = Button(frame,text="Delete").pack(side=LEFT)
setcloseButton = Button(frame,text="Set Close").pack(side=LEFT)
setgroupButton = Button(frame,text="Set Group").pack(side=LEFT)
mainloop()
GUI = GUI()
GUI.Friends()
What should I do so that the alignment of the Label(=name) and the button is equal to the other ones so that they will form a shape of a rectangle and not some zigzag?
It is almost always better in Tk to use the grid geometry manager. It is much more flexible once you come to understand how it works. Converting your example to use grid solves your problem as shown below but you should experiment with it a bit. Try removing the 'sticky="W"' from the label for instance and see that the centering of the widgets within the row or column can be controlled. To get your frame responding to resizes sensibly you should investigate the columnconfigure and rowconfigure options for the grid geometry management as well.
from Tkinter import *
class GUI():
def __init__(self):
self.namelist = ["Mark","Anna","Jason","Lenna",
"Leo","Zucharich","Robinson",
"AReallyLongNameThatMightExist"]
self.canvas = Canvas(width=1200,height=700)
self.canvas.pack(expand=YES,fill=BOTH)
def Friends(self):
frame = Frame(self.canvas)
frame.place(x=600,y=300)
#Frame for showing names of friends
row = 0
for x in self.namelist:
label = Label(frame,text="%s "%x)
chatButton = Button(frame,text="Chat")
delButton = Button(frame,text="Delete")
setcloseButton = Button(frame,text="Set Close")
setgroupButton = Button(frame,text="Set Group")
label.grid(row=row, column=0, sticky="W")
chatButton.grid(row=row, column=1)
delButton.grid(row=row, column=2)
setcloseButton.grid(row=row, column=3)
setgroupButton.grid(row=row, column=4)
row = row + 1
mainloop()
GUI = GUI()
GUI.Friends()