Gesture interfering with sheet drag - swiftui

I am showing a camera preview view on a .sheet.
I need to detect when the user's finger touches the preview area and when the user's finger leaves the preview area.
The only solution I have found to do that was this
CameraPreview()
.gesture(DragGesture(minimumDistance: 0)
.onChanged { _ in
// finger touches
}
.onEnded { _ in
// finger leaves
})
but as this preview is being displayed on a sheet and sheets must be closed by dragging them from the top down, this drag gesture prevents the sheet drag gesture from working.
Any ideas?
NOTE: I do not need to detect dragging. I need to detect single tap on a view and then receive a callback when the finger leaves that view.

You need to use UIViewRepresentable for this. Here are the steps:
Create a custom version of AnyGestureRecognizer taken from here:
class AnyGestureRecognizer: UIGestureRecognizer {
var onCallback: () -> Void
var offCallback: () -> Void
init(target: Any?, onCallback: #escaping () -> Void, offCallback: #escaping () -> Void) {
self.onCallback = onCallback
self.offCallback = offCallback
super.init(target: target, action: nil)
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent) {
if let touchedView = touches.first?.view, touchedView is UIControl {
state = .cancelled
offCallback()
} else if let touchedView = touches.first?.view as? UITextView, touchedView.isEditable {
state = .cancelled
offCallback()
} else {
state = .began
onCallback()
}
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
state = .ended
offCallback()
}
override func touchesCancelled(_ touches: Set<UITouch>, with event: UIEvent) {
state = .cancelled
offCallback()
}
}
Create a custom UIViewRepresentable view and attach the AnyGestureRecognizer to it:
struct GestureView: UIViewRepresentable {
var onCallback: () -> Void
var offCallback: () -> Void
func makeUIView(context: UIViewRepresentableContext<GestureView>) -> UIView {
let view = UIView(frame: .zero)
let gesture = AnyGestureRecognizer(
target: context.coordinator,
onCallback: onCallback,
offCallback: offCallback
)
gesture.delegate = context.coordinator
view.addGestureRecognizer(gesture)
return view
}
func updateUIView(_ uiView: UIView, context: UIViewRepresentableContext<GestureView>) {}
class Coordinator: NSObject {}
func makeCoordinator() -> GestureView.Coordinator {
Coordinator()
}
}
extension GestureView.Coordinator: UIGestureRecognizerDelegate {
func gestureRecognizer(_ gestureRecognizer: UIGestureRecognizer, shouldRecognizeSimultaneouslyWith otherGestureRecognizer: UIGestureRecognizer) -> Bool {
true
}
}
Use it as an overlay:
struct ContentView: View {
var body: some View {
Color.blue
.sheet(isPresented: .constant(true)) {
Color.red
.overlay(
GestureView(onCallback: { print("start") }, offCallback: { print("end") })
)
}
}
}

Related

SwiftUI: In SplitView, how can I detect if the master view is visible?

When SwiftUI creates a SplitView, it adds a toolbar button that hides/shows the Master view. How can I detect this change so that I can resize the font in the detail screen and use all the space optimally?
I've tried using .onChange with geometry but can't seem to get that to work.
If you're using iOS 16 you can use NavigationSplitView with NavigationSplitViewVisibility
Example:
struct MySplitView: View {
#State private var columnVisibility: NavigationSplitViewVisibility = .all
var bothAreShown: Bool { columnVisibility != .detailOnly }
var body: some View {
NavigationSplitView(columnVisibility: $columnVisibility) {
Text("Master Column")
} detail: {
Text("Detail Column")
Text(bothAreShown ? "Both are shown" : "Just detail shown")
}
}
}
After thinkering for a while on this I got to this solution:
struct ContentView: View {
#State var isOpen = true
var body: some View {
NavigationView {
VStack{
Text("Primary")
.onUIKitAppear {
isOpen.toggle()
}
.onAppear{
print("hello")
isOpen.toggle()
}
.onDisappear{
isOpen.toggle()
print("hello: bye")
}
.navigationTitle("options")
}
Text("Secondary").font(isOpen ? .body : .title)
}.navigationViewStyle(.columns)
}
}
The onUIKitAppear is a custom extension suggested by apple to be only executed once the view has been presented to the user https://developer.apple.com/forums/thread/655338?page=2
struct UIKitAppear: UIViewControllerRepresentable {
let action: () -> Void
func makeUIViewController(context: Context) -> UIAppearViewController {
let vc = UIAppearViewController()
vc.delegate = context.coordinator
return vc
}
func makeCoordinator() -> Coordinator {
Coordinator(action: self.action)
}
func updateUIViewController(_ controller: UIAppearViewController, context: Context) {}
class Coordinator: ActionRepresentable {
var action: () -> Void
init(action: #escaping () -> Void) {
self.action = action
}
func remoteAction() {
action()
}
}
}
protocol ActionRepresentable: AnyObject {
func remoteAction()
}
class UIAppearViewController: UIViewController {
weak var delegate: ActionRepresentable?
var savedView: UIView?
override func viewDidLoad() {
self.savedView = UILabel()
if let _view = self.savedView {
view.addSubview(_view)
}
}
override func viewDidAppear(_ animated: Bool) {
delegate?.remoteAction()
}
override func viewDidDisappear(_ animated: Bool) {
view.removeFromSuperview()
savedView?.removeFromSuperview()
}
}
public extension View {
func onUIKitAppear(_ perform: #escaping () -> Void) -> some View {
self.background(UIKitAppear(action: perform))
}
}

UISearchController how to activate from SwiftUI

I found an integration of the uiSearchController in SwiftUI, but I don't know how to let it become active?
I found this:
I want that the searchBar becomes active when changing an Bool in the SwiftUI View with a #State for example.
If I add a Binding to the view modifier and set the isActive property of the searchController in
ViewControllerResolver { viewController in
viewController.navigationItem.searchController = self.searchBar.searchController
viewController.navigationItem.hidesSearchBarWhenScrolling = false
}
then is doesn't become active.
Im not really familiar with UIKit, perhaps anybody knows how to correctly activate the searchbar that one can start typing for a search.
class SearchBar: NSObject, ObservableObject {
#Published var text: String = ""
let searchController: UISearchController = UISearchController(searchResultsController: nil)
override init() {
super.init()
self.searchController.obscuresBackgroundDuringPresentation = false
self.searchController.searchResultsUpdater = self
}
}
extension SearchBar: UISearchResultsUpdating {
func updateSearchResults(for searchController: UISearchController) {
// Publish search bar text changes.
if let searchBarText = searchController.searchBar.text {
self.text = searchBarText
}
}
}
final class ViewControllerResolver: UIViewControllerRepresentable {
let onResolve: (UIViewController) -> Void
init(onResolve: #escaping (UIViewController) -> Void) {
self.onResolve = onResolve
}
func makeUIViewController(context: Context) -> ParentResolverViewController {
ParentResolverViewController(onResolve: onResolve)
}
func updateUIViewController(_ uiViewController: ParentResolverViewController, context: Context) {
}
}
class ParentResolverViewController: UIViewController {
let onResolve: (UIViewController) -> Void
init(onResolve: #escaping (UIViewController) -> Void) {
self.onResolve = onResolve
super.init(nibName: nil, bundle: nil)
}
required init?(coder: NSCoder) {
fatalError("Use init(onResolve:) to instantiate ParentResolverViewController.")
}
override func didMove(toParent parent: UIViewController?) {
super.didMove(toParent: parent)
if let parent = parent {
onResolve(parent)
}
}
}
struct SearchBarModifier: ViewModifier {
let searchBar: SearchBar
func body(content: Content) -> some View {
content
.overlay(
ViewControllerResolver { viewController in
viewController.navigationItem.searchController = self.searchBar.searchController
viewController.navigationItem.hidesSearchBarWhenScrolling = false
}
.frame(width: 0, height: 0)
)
}
}
extension View {
func add(_ searchBar: SearchBar) -> some View {
return self.modifier(SearchBarModifier(searchBar: searchBar))
}
}
To activate a UISearchBar (which is what you're using), just do:
searchController.searchBar.becomeFirstResponder()
(from this answer)
Now all we need to do is reference searchController.searchBar from the SwiftUI view. First, add a function to your SearchBar class.
class SearchBar: NSObject, ObservableObject {
#Published var text: String = ""
let searchController: UISearchController = UISearchController(searchResultsController: nil)
override init() {
super.init()
self.searchController.obscuresBackgroundDuringPresentation = false
self.searchController.searchResultsUpdater = self
}
/// add this function
func activate() {
searchController.searchBar.becomeFirstResponder()
}
}
Then, just call it. I think this is better than setting a #State, but if you require that, let me know and I'll edit my answer.
struct ContentView: View {
#StateObject var searchBar = SearchBar()
var body: some View {
NavigationView {
Button(action: {
searchBar.activate() /// activate the search bar
}) {
Text("Activate search bar")
}
.modifier(SearchBarModifier(searchBar: searchBar))
.navigationTitle("Navigation View")
}
}
}
Result:

SwiftUI: Multitouch gesture / Multiple Gestures

Is there a way in SwiftUI to track multiple gestures at once? I want my one main view to be able to keep track of multiple fingers dragging at once.
ZStack {
Color.black
.edgesIgnoringSafeArea(.all)
.gesture(DragGesture(minimumDistance: 0)
.onChanged { (value) in
//some logic
}.onEnded { (value) in
//more logic
})
//other code
}
I have this code however I can only ever have one drag gesture being processed at a time. If one finger is dragging and then I try to add another one the first one stops.
I am trying to achieve an effect where multiple fingers are on screen at once. Each finger is dragging a circle simultaneously (one circle is following each finger).
I see simultaneous gestures on Apple's documentation, but this is referring to have one gesture trigger multiple blocks.
the solution (How to detect a tap gesture location in SwiftUI?) that uses UIViewRepresentable would work. Although its not the pure swiftUI solution (I've looked for a pure swiftUI solution but cannot find one and would be interested to see one). I've updated code to the below code, you would need to changed the numberOfTouchPoints = 5. and then use call back to get value into view.
struct TapView: UIViewRepresentable {
var tappedCallback: (() -> Void)
func makeUIView(context: UIViewRepresentableContext<TapView>) -> TapView.UIViewType {
let v = UIView(frame: .zero)
let gesture = NFingerGestureRecognizer(target: context.coordinator,
action: #selector(Coordinator.tapped))
v.addGestureRecognizer(gesture)
return v
}
class Coordinator: NSObject {
var tappedCallback: (() -> Void)
init(tappedCallback: #escaping (() -> Void)) {
self.tappedCallback = tappedCallback
}
#objc func tapped(gesture:NFingerGestureRecognizer) {
for i in 0..<gesture.numberOfTouches{
print(gesture.location(ofTouch: i, in: gesture. view))
}
self.tappedCallback()
}
}
func makeCoordinator() -> TapView.Coordinator {
return Coordinator(tappedCallback:self.tappedCallback)
}
func updateUIView(_ uiView: UIView,
context: UIViewRepresentableContext<TapView>) {
}
}
class NFingerGestureRecognizer: UIGestureRecognizer {
var numberOfTouchPoints: Int = 2
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent) {
if self.state == .possible {
if (self.numberOfTouches == numberOfTouchPoints){ // we have a two finger interaction starting
self.state = .began
print("two finger drag - Start")
}
} else { // check to see if there are more touchs
if (self.numberOfTouches > numberOfTouchPoints){ // we have a two finger interaction starting
self.state = .failed
print("two finger drag - failed")
}
}
//printInternalTouches()
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent) {
if (self.state != .possible){
self.state = .changed // we are looking for two finger interaction
//printInternalTouches()
print("changed")
}
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent) {
if numberOfTouches == 2 {
state = .ended
}
else {
state = .failed
}
//printInternalTouches()
print("ended")
}
private func printInternalTouches(){
// now print the internal touchUI elements - should be the better solution
print("Internal Locations")
for i in 0..<self.numberOfTouches{
print(self.location(ofTouch: i, in: self.view))
}
}
}
I combined Paul's answer with the outdated example from Apple to create a way to detect as many touches as you like.
Update 3: (commented code is for debug output)
import Foundation
import UIKit
class NFingerGestureRecognizer: UIGestureRecognizer {
var tappedCallback: (UITouch, CGPoint?) -> Void
var touchViews = [UITouch:CGPoint]()
init(target: Any?, tappedCallback: #escaping (UITouch, CGPoint?) -> ()) {
self.tappedCallback = tappedCallback
super.init(target: target, action: nil)
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent) {
for touch in touches {
let location = touch.location(in: touch.view)
// print("Start: (\(location.x)/\(location.y))")
touchViews[touch] = location
tappedCallback(touch, location)
}
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent) {
for touch in touches {
let newLocation = touch.location(in: touch.view)
// let oldLocation = touchViews[touch]!
// print("Move: (\(oldLocation.x)/\(oldLocation.y)) -> (\(newLocation.x)/\(newLocation.y))")
touchViews[touch] = newLocation
tappedCallback(touch, newLocation)
}
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent) {
for touch in touches {
// let oldLocation = touchViews[touch]!
// print("End: (\(oldLocation.x)/\(oldLocation.y))")
touchViews.removeValue(forKey: touch)
tappedCallback(touch, nil)
}
}
}
Update:
You need to wrap this into an UIViewRepresentable to use it as a SwiftUI component:
import Foundation
import SwiftUI
struct TapView: UIViewRepresentable {
var tappedCallback: (UITouch, CGPoint?) -> Void
func makeUIView(context: UIViewRepresentableContext<TapView>) -> TapView.UIViewType {
let v = UIView(frame: .zero)
let gesture = NFingerGestureRecognizer(target: context.coordinator, tappedCallback: tappedCallback)
v.addGestureRecognizer(gesture)
return v
}
func updateUIView(_ uiView: UIView, context: UIViewRepresentableContext<TapView>) {
// empty
}
}
Update 2:
A little example of how to use TapView in SwiftUI:
var body: some View {
guard let img = UIImage(named: "background.png") else {
fatalError("Unable to load image")
}
return GeometryReader { geometry in
ZStack {
Image(uiImage: img)
.resizable()
.aspectRatio(geometry.size, contentMode: .fill)
.edgesIgnoringSafeArea(.all)
TapView { touch, optLocation in
// touch can be used as a dictionary key.
// optLocation is nil when the touch ends.
}
}
}
}
I've implemented this solution and it works fine until there is another view within the same parent view as the one with geometry reader. When I add another view, All simultaneous touches after the 1st have incorrect X or Y values for "optLocation".

How to make UIViewRepresentable focusable on tvOS in SwiftUI?

The title says it all - I can't get focus on UIViewRepresentable in SwiftUI on tvOS. Any view that conforms to View protocol can be focused without problems but neither UIViewRepresentable nor UIViewControllerRepresentable can.
Any ideas?
This code works:
struct ContentView: View {
var body: some View {
Text("Hello, World!")
.focusable()
.onExitCommand {
print("Menu button pressed")
}
}
}
This one does not:
struct ContentView: View {
var body: some View {
ViewRepresentable()
.focusable()
.onExitCommand {
print("Menu button pressed")
}
}
}
struct ViewRepresentable: UIViewRepresentable {
func makeUIView(context: Context) -> UIView {
let view = UIView()
view.backgroundColor = .red
return view
}
func updateUIView(_ uiView: UIView, context: Context) {
}
}
Thanks!
You need to make a UIView subclass that overrides canBecomeFocused to return true. You probably want to also override didUpdateFocus(in:with:) if you want to respond to focus.
For reference here is a utility class I made to let me have focusable UIViews, and respond to remote swipes and clicks.
import UIKit
#available (iOS, unavailable)
#available (macCatalyst, unavailable)
class UserInputView: UIView {
weak var pressDelegate: PressableDelegate?
weak var touchDelegate: TouchableDelegate?
var touchStarted: CGFloat = 0
var lastTouch: CGFloat = 0
override init(frame: CGRect) {
super.init(frame: frame)
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
touchStarted = touches.first?.location(in: nil).x ?? 0
lastTouch = touchStarted
touchDelegate?.touchDown()
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
let thisTouch = touches.first?.location(in: nil).x ?? 0
touchDelegate?.touchMove(amount: Double((thisTouch - lastTouch)/(touches.first?.window?.frame.width ?? 1920)))
lastTouch = touches.first?.location(in: nil).x ?? 0
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
touchDelegate?.touchUp()
}
override func pressesBegan(_ presses: Set<UIPress>, with event: UIPressesEvent?) {
if isClick(presses) {
pressDelegate?.press(pressed: true)
} else {
superview?.pressesBegan(presses, with: event)
}
}
override func pressesEnded(_ presses: Set<UIPress>, with event: UIPressesEvent?) {
if isClick(presses) {
pressDelegate?.press(pressed: false)
pressDelegate?.clicked()
} else {
superview?.pressesEnded(presses, with: event)
}
}
private func isClick(_ presses: Set<UIPress>?) -> Bool {
return presses?.map({ $0.type }).contains(.select) ?? false
}
override func didUpdateFocus(in context: UIFocusUpdateContext, with coordinator: UIFocusAnimationCoordinator) {
pressDelegate?.focus(focused: isFocused)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override var canBecomeFocused: Bool {
return true
}
}

SwiftUI NavigationBar height

How to get current NavigationBar height? In UIKit we could get
navigationController?.navigationBar.frame.height
but can't find anything for SwiftUI...
Based on this post (thanks to Asperi): https://stackoverflow.com/a/59972635/12299030
struct NavBarAccessor: UIViewControllerRepresentable {
var callback: (UINavigationBar) -> Void
private let proxyController = ViewController()
func makeUIViewController(context: UIViewControllerRepresentableContext<NavBarAccessor>) ->
UIViewController {
proxyController.callback = callback
return proxyController
}
func updateUIViewController(_ uiViewController: UIViewController, context: UIViewControllerRepresentableContext<NavBarAccessor>) {
}
typealias UIViewControllerType = UIViewController
private class ViewController: UIViewController {
var callback: (UINavigationBar) -> Void = { _ in }
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
if let navBar = self.navigationController {
self.callback(navBar.navigationBar)
}
}
}
}
And then we can call this from any View:
.background(NavBarAccessor { navBar in
print(">> NavBar height: \(navBar.bounds.height)")
// !! use as needed, in calculations, #State, etc.
})
Building on #yoprst 's response, rather than configure calculations and #State variables, you could also return a View to insert directly into the hierarchy:
struct NavigationBarAccessor: UIViewControllerRepresentable {
var callback: (UINavigationBar) -> (AnyView)
private let proxyViewController = ProxyViewController()
func makeUIViewController(context: UIViewControllerRepresentableContext<NavigationBarAccessor>) -> UIViewController {
self.proxyViewController.callback = callback
return proxyViewController
}
func updateUIViewController(_ uiViewController: UIViewController, context: UIViewControllerRepresentableContext<NavigationBarAccessor>) {
}
private class ProxyViewController: UIViewController {
var callback: ((UINavigationBar) -> AnyView)?
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
if let navigationBar = self.navigationController?.navigationBar {
_ = self.callback?(navigationBar)
}
}
}
}
Usage:
VStack {
NavigationBarAccessor { navigationBar in
Spacer()
.frame(height: navigationBar.frame.height)
}
}
Use GeometryReader, like this:
ZStack {
GeometryReader { geometry in
Rectangle().fill(Color.blue).frame(height: geometry.safeAreaInsets.bottom + 44.0)
}
Text("Content")
}
.edgesIgnoringSafeArea(.top)