Is there a way in SwiftUI to track multiple gestures at once? I want my one main view to be able to keep track of multiple fingers dragging at once.
ZStack {
Color.black
.edgesIgnoringSafeArea(.all)
.gesture(DragGesture(minimumDistance: 0)
.onChanged { (value) in
//some logic
}.onEnded { (value) in
//more logic
})
//other code
}
I have this code however I can only ever have one drag gesture being processed at a time. If one finger is dragging and then I try to add another one the first one stops.
I am trying to achieve an effect where multiple fingers are on screen at once. Each finger is dragging a circle simultaneously (one circle is following each finger).
I see simultaneous gestures on Apple's documentation, but this is referring to have one gesture trigger multiple blocks.
the solution (How to detect a tap gesture location in SwiftUI?) that uses UIViewRepresentable would work. Although its not the pure swiftUI solution (I've looked for a pure swiftUI solution but cannot find one and would be interested to see one). I've updated code to the below code, you would need to changed the numberOfTouchPoints = 5. and then use call back to get value into view.
struct TapView: UIViewRepresentable {
var tappedCallback: (() -> Void)
func makeUIView(context: UIViewRepresentableContext<TapView>) -> TapView.UIViewType {
let v = UIView(frame: .zero)
let gesture = NFingerGestureRecognizer(target: context.coordinator,
action: #selector(Coordinator.tapped))
v.addGestureRecognizer(gesture)
return v
}
class Coordinator: NSObject {
var tappedCallback: (() -> Void)
init(tappedCallback: #escaping (() -> Void)) {
self.tappedCallback = tappedCallback
}
#objc func tapped(gesture:NFingerGestureRecognizer) {
for i in 0..<gesture.numberOfTouches{
print(gesture.location(ofTouch: i, in: gesture. view))
}
self.tappedCallback()
}
}
func makeCoordinator() -> TapView.Coordinator {
return Coordinator(tappedCallback:self.tappedCallback)
}
func updateUIView(_ uiView: UIView,
context: UIViewRepresentableContext<TapView>) {
}
}
class NFingerGestureRecognizer: UIGestureRecognizer {
var numberOfTouchPoints: Int = 2
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent) {
if self.state == .possible {
if (self.numberOfTouches == numberOfTouchPoints){ // we have a two finger interaction starting
self.state = .began
print("two finger drag - Start")
}
} else { // check to see if there are more touchs
if (self.numberOfTouches > numberOfTouchPoints){ // we have a two finger interaction starting
self.state = .failed
print("two finger drag - failed")
}
}
//printInternalTouches()
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent) {
if (self.state != .possible){
self.state = .changed // we are looking for two finger interaction
//printInternalTouches()
print("changed")
}
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent) {
if numberOfTouches == 2 {
state = .ended
}
else {
state = .failed
}
//printInternalTouches()
print("ended")
}
private func printInternalTouches(){
// now print the internal touchUI elements - should be the better solution
print("Internal Locations")
for i in 0..<self.numberOfTouches{
print(self.location(ofTouch: i, in: self.view))
}
}
}
I combined Paul's answer with the outdated example from Apple to create a way to detect as many touches as you like.
Update 3: (commented code is for debug output)
import Foundation
import UIKit
class NFingerGestureRecognizer: UIGestureRecognizer {
var tappedCallback: (UITouch, CGPoint?) -> Void
var touchViews = [UITouch:CGPoint]()
init(target: Any?, tappedCallback: #escaping (UITouch, CGPoint?) -> ()) {
self.tappedCallback = tappedCallback
super.init(target: target, action: nil)
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent) {
for touch in touches {
let location = touch.location(in: touch.view)
// print("Start: (\(location.x)/\(location.y))")
touchViews[touch] = location
tappedCallback(touch, location)
}
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent) {
for touch in touches {
let newLocation = touch.location(in: touch.view)
// let oldLocation = touchViews[touch]!
// print("Move: (\(oldLocation.x)/\(oldLocation.y)) -> (\(newLocation.x)/\(newLocation.y))")
touchViews[touch] = newLocation
tappedCallback(touch, newLocation)
}
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent) {
for touch in touches {
// let oldLocation = touchViews[touch]!
// print("End: (\(oldLocation.x)/\(oldLocation.y))")
touchViews.removeValue(forKey: touch)
tappedCallback(touch, nil)
}
}
}
Update:
You need to wrap this into an UIViewRepresentable to use it as a SwiftUI component:
import Foundation
import SwiftUI
struct TapView: UIViewRepresentable {
var tappedCallback: (UITouch, CGPoint?) -> Void
func makeUIView(context: UIViewRepresentableContext<TapView>) -> TapView.UIViewType {
let v = UIView(frame: .zero)
let gesture = NFingerGestureRecognizer(target: context.coordinator, tappedCallback: tappedCallback)
v.addGestureRecognizer(gesture)
return v
}
func updateUIView(_ uiView: UIView, context: UIViewRepresentableContext<TapView>) {
// empty
}
}
Update 2:
A little example of how to use TapView in SwiftUI:
var body: some View {
guard let img = UIImage(named: "background.png") else {
fatalError("Unable to load image")
}
return GeometryReader { geometry in
ZStack {
Image(uiImage: img)
.resizable()
.aspectRatio(geometry.size, contentMode: .fill)
.edgesIgnoringSafeArea(.all)
TapView { touch, optLocation in
// touch can be used as a dictionary key.
// optLocation is nil when the touch ends.
}
}
}
}
I've implemented this solution and it works fine until there is another view within the same parent view as the one with geometry reader. When I add another view, All simultaneous touches after the 1st have incorrect X or Y values for "optLocation".
Related
Because I need to know whether user taps with finger or pencil, I had to create a UIViewRepresentable with a UITapGestureRecognizer. Everything works fine when this View is used with other SwiftUI views. But when stack 2 of this view, and I clip the top one, the other one never catches any tap event.
Here is my custom view:
struct UIKitView: UIViewRepresentable {
let color: UIColor
func makeUIView(context: Context) -> UIView {
let view = UIView()
view.backgroundColor = color
let tapGesture = UITapGestureRecognizer(target: context.coordinator, action: #selector(Coordinator.handleTap(gesture:)))
view.addGestureRecognizer(tapGesture)
return view
}
func updateUIView(_ uiView: UIView, context: Context) { }
func makeCoordinator() -> Coordinator {
Coordinator(color: color)
}
class Coordinator: NSObject {
let color: UIColor
init(color: UIColor) {
self.color = color
}
#objc func handleTap(gesture: UITapGestureRecognizer) {
print("Tapped in \(color == .yellow ? "yellow" : "red")")
}
}
}
And here how it is used in SwiftUI View:
ZStack {
UIKitView(color: .yellow)
UIKitView(color: .red)
.clipShape(Circle())) // not required, just visual
.contentShape(Circle()))
}
The tap is always catched by the red view.
Then I've overriden the hitTest() function of my UIVIewRepresentable to check if the tap is inside the clipîng shape. The tap is then detected in the red circle, but nothing tiggers anymore outside.
Any idea why?
You can override this method in CircleView:
override func point(inside point: CGPoint, with event: UIEvent?) -> Bool {
let center = CGPoint(x: bounds.size.width/2, y: bounds.size.height/2)
return pow(center.x-point.x, 2) + pow(center.y - point.y, 2) <= pow(bounds.size.width/2, 2)
}
Source
FYI there is also a mistake in your UIViewRepresentable, you need this
func updateUIView(_ uiView: UIView, context: Context) {
view.backgroundColor = color
}
I'm implementing a SwiftUI control over a UIView's drawing surface. Often you are doing both at the same time. Problem is that starting with the SwiftUI control will block the UIKit's UITouch(s). And yet, it works when you start with the UITouch first and then the SwiftUI control.
Here is the example code:
import SwiftUI
struct ContentView: View {
#GestureState private var touchXY: CGPoint = .zero
var body: some View {
ZStack {
TouchRepresentable()
Rectangle()
.foregroundColor(.red)
.frame(width: 128, height: 128)
.gesture(DragGesture(minimumDistance: 0)
.updating($touchXY) { (value, touchXY, _) in touchXY = value.location})
.onChange(of: touchXY) {
print(String(format:"SwiftUI(%.2g,%.2g)", $0.x,$0.y), terminator: " ") }
//.allowsHitTesting(true) no difference
}
}
}
struct TouchRepresentable: UIViewRepresentable {
typealias Context = UIViewRepresentableContext<TouchRepresentable>
public func makeUIView(context: Context) -> TouchView { return TouchView() }
public func updateUIView(_ uiView: TouchView, context: Context) {}
}
class TouchView: UIView, UIGestureRecognizerDelegate {
func updateTouches(_ touches: Set<UITouch>, with event: UIEvent?) {
for touch in touches {
let location = touch.location(in: nil)
print(String(format: "UIKit(%g,%g) ", round(location.x), round(location.y)), terminator: " ") }
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) { updateTouches(touches, with: event) }
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) { updateTouches(touches, with: event) }
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) { updateTouches(touches, with: event) }
override func touchesCancelled(_ touches: Set<UITouch>, with event: UIEvent?) { updateTouches(touches, with: event) }
}
Because this is multitouch, you need to test on a real device.
Sequence A:
finger 1 starts dragging outside of red square (UIKit)
finger 2 simultaneously drags inside of red square (SwiftUI)
=> Console shows both UIKit and SwiftUI touch positions
Sequence B:
finger 1 starts dragging inside of red square (SwiftUI)
finger 2 simultaneously drags outside of red square (UIKit)
=> Console shows only SwiftUI touch positions
Would like to get both positions in Sequence B.
I am attempting to place an object (a view of a square) on the screen and then drag it immediately. What I have achieved is the following:
I can drag existing objects that are already on the screen.
I can place new objects on the screen, but they do not drag along immediately. I need to lift my finger, and then tap on them again in order to drag.
How can I achieve the functionality: Place object on screen and immediately start dragging? I am missing something here. My code:
ContentView with square placement logic:
struct ContentView: View {
let screenSize = UIScreen.main.bounds
#State private var squares: [SquareView] = []
#State private var offsets = [CGSize](repeating: .zero, count: 300)
var body: some View {
GeometryReader { geo in
ForEach(squares, id: \.self) { square in
square
.position(x: square.startXLocation, y: square.startYLocation)
}
.ignoresSafeArea()
}
.onTouch(perform: updateLocation)
.onAppear {
for i in 0...2 {
let xLocation = Double.random(in: 50...(screenSize.width - 150))
let yLocation = Double.random(in: 50...(screenSize.height - 150))
let square = SquareView(sideLength: 40, number: i, startXLocation: xLocation, startYLocation: yLocation)
squares.append(square)
}
}
}
func updateLocation(_ location: CGPoint, type: TouchType) {
var square = SquareView(sideLength: 50, number: Int.random(in: 20...99), startXLocation: location.x, startYLocation: location.y)
if type == .started {
squares.append(square)
square.startXLocation = location.x
square.startYLocation = location.y
}
if type == .moved {
let newSquare = squares.last!
newSquare.offset = CGSize(width: location.x - newSquare.startXLocation, height: location.y - newSquare.startYLocation)
}
if type == .ended {
// Don't need to do anything here
}
}
}
The squares that I place with the logic to drag on the screen:
struct SquareView: View, Hashable {
let colors: [Color] = [.green, .red, .blue, .yellow]
let sideLength: Double
let number: Int
var startXLocation: Double
var startYLocation: Double
#State private var squareColor: Color = .yellow
#State var startOffset: CGSize = .zero
#State var offset: CGSize = .zero
var body: some View {
ZStack{
Rectangle()
.frame(width: sideLength, height: sideLength)
.foregroundColor(squareColor)
.onAppear {
squareColor = colors.randomElement()!
}
Text("\(number)")
} // ZStack
.offset(offset)
.gesture(
DragGesture()
.onChanged { gesture in
offset.width = gesture.translation.width + startOffset.width
offset.height = gesture.translation.height + startOffset.height
}
.onEnded { value in
startOffset.width = value.location.x
startOffset.height = value.location.y
}
)
}
static func ==(lhs: SquareView, rhs: SquareView) -> Bool {
return lhs.number == rhs.number
}
func hash(into hasher: inout Hasher) {
hasher.combine(number)
}
}
The struct used to detect the touch location on the screen (not relevant for the actual question, but necessary to reconstruct the program). Adapted from code by Paul Hudson, hackingwithswift.com:
// The types of touches users want to be notified about
struct TouchType: OptionSet {
let rawValue: Int
static let started = TouchType(rawValue: 1 << 0)
static let moved = TouchType(rawValue: 1 << 1)
static let ended = TouchType(rawValue: 1 << 2)
static let all: TouchType = [.started, .moved, .ended]
}
// Our UIKit to SwiftUI wrapper view
struct TouchLocatingView: UIViewRepresentable {
// A closer to call when touch data has arrived
var onUpdate: (CGPoint, TouchType) -> Void
// The list of touch types to be notified of
var types = TouchType.all
// Whether touch information should continue after the user's finger has left the view
var limitToBounds = true
func makeUIView(context: Context) -> TouchLocatingUIView {
// Create the underlying UIView, passing in our configuration
let view = TouchLocatingUIView()
view.onUpdate = onUpdate
view.touchTypes = types
view.limitToBounds = limitToBounds
return view
}
func updateUIView(_ uiView: TouchLocatingUIView, context: Context) {
}
// The internal UIView responsible for catching taps
class TouchLocatingUIView: UIView {
// Internal copies of our settings
var onUpdate: ((CGPoint, TouchType) -> Void)?
var touchTypes: TouchType = .all
var limitToBounds = true
// Our main initializer, making sure interaction is enabled.
override init(frame: CGRect) {
super.init(frame: frame)
isUserInteractionEnabled = true
}
// Just in case you're using storyboards!
required init?(coder: NSCoder) {
super.init(coder: coder)
isUserInteractionEnabled = true
}
// Triggered when a touch starts.
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
guard let touch = touches.first else { return }
let location = touch.location(in: self)
send(location, forEvent: .started)
}
// Triggered when an existing touch moves.
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
guard let touch = touches.first else { return }
let location = touch.location(in: self)
send(location, forEvent: .moved)
}
// Triggered when the user lifts a finger.
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
guard let touch = touches.first else { return }
let location = touch.location(in: self)
send(location, forEvent: .ended)
}
// Triggered when the user's touch is interrupted, e.g. by a low battery alert.
override func touchesCancelled(_ touches: Set<UITouch>, with event: UIEvent?) {
guard let touch = touches.first else { return }
let location = touch.location(in: self)
send(location, forEvent: .ended)
}
// Send a touch location only if the user asked for it
func send(_ location: CGPoint, forEvent event: TouchType) {
guard touchTypes.contains(event) else {
return
}
if limitToBounds == false || bounds.contains(location) {
onUpdate?(CGPoint(x: round(location.x), y: round(location.y)), event)
}
}
}
}
// A custom SwiftUI view modifier that overlays a view with our UIView subclass.
struct TouchLocater: ViewModifier {
var type: TouchType = .all
var limitToBounds = true
let perform: (CGPoint, TouchType) -> Void
func body(content: Content) -> some View {
content
.background(
TouchLocatingView(onUpdate: perform, types: type, limitToBounds: limitToBounds)
)
// .overlay(
// TouchLocatingView(onUpdate: perform, types: type, limitToBounds: limitToBounds)
// )
}
}
// A new method on View that makes it easier to apply our touch locater view.
extension View {
func onTouch(type: TouchType = .all, limitToBounds: Bool = true, perform: #escaping (CGPoint, TouchType) -> Void) -> some View {
self.modifier(TouchLocater(type: type, limitToBounds: limitToBounds, perform: perform))
}
}
// Finally, here's some example code you can try out.
struct ContentView1: View {
var body: some View {
VStack {
Text("This will track all touches, inside bounds only.")
.padding()
.background(.red)
.onTouch(perform: updateLocation)
Text("This will track all touches, ignoring bounds – you can start a touch inside, then carry on moving it outside.")
.padding()
.background(.blue)
.onTouch(limitToBounds: false, perform: updateLocation)
Text("This will track only starting touches, inside bounds only.")
.padding()
.background(.green)
.onTouch(type: .started, perform: updateLocation)
}
}
func updateLocation(_ location: CGPoint, type: TouchType) {
print(location, type)
}
}
A possible approach is to handle drag and creation in "area" (background container), while "item" views are just rendered at the place where needed.
Find below a simplified demo (used Xcode 13.2 / iOS 15.2), see also comments in code snapshot.
Note: tap detection in already "existed" item is an exercise for you.
extension CGPoint: Identifiable { // just a helper for demo
public var id: String { "\(x)-\(y)" }
}
struct TapAndDragDemo: View {
#State private var points: [CGPoint] = [] // << persistent
#State private var point: CGPoint? // << current
#GestureState private var dragState: CGSize = CGSize.zero
var body: some View {
Color.clear.overlay( // << area
Group {
ForEach(points) { // << stored `items`
Rectangle()
.frame(width: 24, height: 24)
.position(x: $0.x, y: $0.y)
}
if let curr = point { // << active `item`
Rectangle().fill(Color.red)
.frame(width: 24, height: 24)
.position(x: curr.x, y: curr.y)
}
}
)
.contentShape(Rectangle()) // << make area tappable
.gesture(DragGesture(minimumDistance: 0.0)
.updating($dragState) { drag, state, _ in
state = drag.translation
}
.onChanged {
point = $0.location // track drag current
}
.onEnded {
points.append($0.location) // push to stored
point = nil
}
)
}
}
I am showing a camera preview view on a .sheet.
I need to detect when the user's finger touches the preview area and when the user's finger leaves the preview area.
The only solution I have found to do that was this
CameraPreview()
.gesture(DragGesture(minimumDistance: 0)
.onChanged { _ in
// finger touches
}
.onEnded { _ in
// finger leaves
})
but as this preview is being displayed on a sheet and sheets must be closed by dragging them from the top down, this drag gesture prevents the sheet drag gesture from working.
Any ideas?
NOTE: I do not need to detect dragging. I need to detect single tap on a view and then receive a callback when the finger leaves that view.
You need to use UIViewRepresentable for this. Here are the steps:
Create a custom version of AnyGestureRecognizer taken from here:
class AnyGestureRecognizer: UIGestureRecognizer {
var onCallback: () -> Void
var offCallback: () -> Void
init(target: Any?, onCallback: #escaping () -> Void, offCallback: #escaping () -> Void) {
self.onCallback = onCallback
self.offCallback = offCallback
super.init(target: target, action: nil)
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent) {
if let touchedView = touches.first?.view, touchedView is UIControl {
state = .cancelled
offCallback()
} else if let touchedView = touches.first?.view as? UITextView, touchedView.isEditable {
state = .cancelled
offCallback()
} else {
state = .began
onCallback()
}
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
state = .ended
offCallback()
}
override func touchesCancelled(_ touches: Set<UITouch>, with event: UIEvent) {
state = .cancelled
offCallback()
}
}
Create a custom UIViewRepresentable view and attach the AnyGestureRecognizer to it:
struct GestureView: UIViewRepresentable {
var onCallback: () -> Void
var offCallback: () -> Void
func makeUIView(context: UIViewRepresentableContext<GestureView>) -> UIView {
let view = UIView(frame: .zero)
let gesture = AnyGestureRecognizer(
target: context.coordinator,
onCallback: onCallback,
offCallback: offCallback
)
gesture.delegate = context.coordinator
view.addGestureRecognizer(gesture)
return view
}
func updateUIView(_ uiView: UIView, context: UIViewRepresentableContext<GestureView>) {}
class Coordinator: NSObject {}
func makeCoordinator() -> GestureView.Coordinator {
Coordinator()
}
}
extension GestureView.Coordinator: UIGestureRecognizerDelegate {
func gestureRecognizer(_ gestureRecognizer: UIGestureRecognizer, shouldRecognizeSimultaneouslyWith otherGestureRecognizer: UIGestureRecognizer) -> Bool {
true
}
}
Use it as an overlay:
struct ContentView: View {
var body: some View {
Color.blue
.sheet(isPresented: .constant(true)) {
Color.red
.overlay(
GestureView(onCallback: { print("start") }, offCallback: { print("end") })
)
}
}
}
The title says it all - I can't get focus on UIViewRepresentable in SwiftUI on tvOS. Any view that conforms to View protocol can be focused without problems but neither UIViewRepresentable nor UIViewControllerRepresentable can.
Any ideas?
This code works:
struct ContentView: View {
var body: some View {
Text("Hello, World!")
.focusable()
.onExitCommand {
print("Menu button pressed")
}
}
}
This one does not:
struct ContentView: View {
var body: some View {
ViewRepresentable()
.focusable()
.onExitCommand {
print("Menu button pressed")
}
}
}
struct ViewRepresentable: UIViewRepresentable {
func makeUIView(context: Context) -> UIView {
let view = UIView()
view.backgroundColor = .red
return view
}
func updateUIView(_ uiView: UIView, context: Context) {
}
}
Thanks!
You need to make a UIView subclass that overrides canBecomeFocused to return true. You probably want to also override didUpdateFocus(in:with:) if you want to respond to focus.
For reference here is a utility class I made to let me have focusable UIViews, and respond to remote swipes and clicks.
import UIKit
#available (iOS, unavailable)
#available (macCatalyst, unavailable)
class UserInputView: UIView {
weak var pressDelegate: PressableDelegate?
weak var touchDelegate: TouchableDelegate?
var touchStarted: CGFloat = 0
var lastTouch: CGFloat = 0
override init(frame: CGRect) {
super.init(frame: frame)
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
touchStarted = touches.first?.location(in: nil).x ?? 0
lastTouch = touchStarted
touchDelegate?.touchDown()
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
let thisTouch = touches.first?.location(in: nil).x ?? 0
touchDelegate?.touchMove(amount: Double((thisTouch - lastTouch)/(touches.first?.window?.frame.width ?? 1920)))
lastTouch = touches.first?.location(in: nil).x ?? 0
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
touchDelegate?.touchUp()
}
override func pressesBegan(_ presses: Set<UIPress>, with event: UIPressesEvent?) {
if isClick(presses) {
pressDelegate?.press(pressed: true)
} else {
superview?.pressesBegan(presses, with: event)
}
}
override func pressesEnded(_ presses: Set<UIPress>, with event: UIPressesEvent?) {
if isClick(presses) {
pressDelegate?.press(pressed: false)
pressDelegate?.clicked()
} else {
superview?.pressesEnded(presses, with: event)
}
}
private func isClick(_ presses: Set<UIPress>?) -> Bool {
return presses?.map({ $0.type }).contains(.select) ?? false
}
override func didUpdateFocus(in context: UIFocusUpdateContext, with coordinator: UIFocusAnimationCoordinator) {
pressDelegate?.focus(focused: isFocused)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override var canBecomeFocused: Bool {
return true
}
}