Why doesn't my AVVideoCapturePreview render in the layer? - swiftui

I'm trying to implement a camera preview view in SwiftUI, for which I have the following code:
import SwiftUI
import AVFoundation
struct CameraPreview: UIViewRepresentable {
let session: AVCaptureSession
func makeUIView(context: Context) -> UIView {
let view = UIView()
view.backgroundColor = .gray
let videoPreviewLayer = AVCaptureVideoPreviewLayer(session: session)
videoPreviewLayer.frame = view.bounds
videoPreviewLayer.videoGravity = .resizeAspectFill
videoPreviewLayer.connection?.videoOrientation = .portrait
view.layer.addSublayer(videoPreviewLayer)
return view
}
func updateUIView(_ uiView: UIView, context: Context) {
for layer in uiView.layer.sublayers ?? [] {
layer.frame = uiView.bounds
}
}
}
However I do see the gray background view that I set on the view, but it never starts showing the camera output. I've set a AVCaptureVideoDataOutputSampleBufferDelegate class and I can see the frames being captured and processed, yet for some reason it does not start rendering the output.
I have this other snippet that DOES render the output, but it does so by setting the preview layer as the root layer which is what I want to avoid, here's the code that works:
struct CameraPreview: UIViewRepresentable {
let session: AVCaptureSession
func makeUIView(context: Context) -> UIView {
let view = VideoView()
view.backgroundColor = .gray
view.previewLayer.session = session
view.previewLayer.videoGravity = .resizeAspectFill
view.previewLayer.connection?.videoOrientation = .portrait
return view
}
func updateUIView(_ uiView: UIView, context: Context) {
for layer in uiView.layer.sublayers ?? [] {
layer.frame = uiView.bounds
}
}
class VideoView: UIView {
override class var layerClass: AnyClass {
AVCaptureVideoPreviewLayer.self
}
var previewLayer: AVCaptureVideoPreviewLayer {
layer as! AVCaptureVideoPreviewLayer
}
}
}
Some examples I found showed I should be able to show the preview like I do in the first example. I've tried initializing the session with inputs before and after the preview view is created and I've gotten the same result. Am I missing anything? am I not retaining the layer or is there a special configuration for the session to look out for? to make it work I simply swap the implementations and the one with the inner class does render?
Any help is really appreciated.
Some resources:
https://nsscreencast.com/episodes/296-camera-capture-preview-layer-sample-buffer
https://www.appcoda.com/avfoundation-swift-guide/
https://developer.apple.com/documentation/vision/recognizing_objects_in_live_capture

Related

Remove title when pushing EKCalendarChooser to Navigation Stack with SwiftUI

I'm working on an app where I want to push the EKCalendarChooser View Controller to the navigation stack with a navigation link. Everything works as expected apart from the fact that I can't get rid of some magic title/label.
I want to hide the title marked with the red rectangle in the image.
I'm using the following code to push the view:
NavigationLink(destination: CalendarChooser(eventStore: self.eventStore)
.edgesIgnoringSafeArea([.top,.bottom])
.navigationTitle("My Navigation Title")) {
Text("Calendar Selection")
}
And this is my UIViewControllerRepresentable
import SwiftUI
import EventKitUI
struct CalendarChooser: UIViewControllerRepresentable {
func makeCoordinator() -> Coordinator {
return Coordinator(self)
}
#Environment(\.presentationMode) var presentationMode
let eventStore: EKEventStore
func makeUIViewController(context: UIViewControllerRepresentableContext<CalendarChooser>) -> UINavigationController {
let chooser = EKCalendarChooser(selectionStyle: .multiple, displayStyle: .allCalendars, entityType: .event, eventStore: eventStore)
chooser.selectedCalendars = Set(eventStore.selectableCalendarsFromSettings)
chooser.delegate = context.coordinator
chooser.showsDoneButton = false
chooser.showsCancelButton = false
return UINavigationController(rootViewController: chooser)
}
func updateUIViewController(_ uiViewController: UINavigationController, context: UIViewControllerRepresentableContext<CalendarChooser>) {
}
class Coordinator: NSObject, UINavigationControllerDelegate, EKCalendarChooserDelegate {
var parent: CalendarChooser
init(_ parent: CalendarChooser) {
self.parent = parent
}
func calendarChooserDidFinish(_ calendarChooser: EKCalendarChooser) {
let selectedCalendarIDs = calendarChooser.selectedCalendars.compactMap { $0.calendarIdentifier }
UserDefaults.savedCalendarIDs = selectedCalendarIDs
NotificationCenter.default.post(name: .calendarSelectionDidChange, object: nil)
parent.presentationMode.wrappedValue.dismiss()
}
func calendarChooserDidCancel(_ calendarChooser: EKCalendarChooser) {
parent.presentationMode.wrappedValue.dismiss()
}
}
}
Note that I'm not even sure that I'm on the right track here and I'm open for any solution.
I think I've found a solution to my own problem. With a small modification
to my UIViewControllerRepresentable the view looks the way I want it to. More specifically to the updateUIViewController function:
func updateUIViewController(_ uiViewController: UINavigationController, context: UIViewControllerRepresentableContext<CalendarChooser>) {
uiViewController.setNavigationBarHidden(true, animated: false) // This line!
}
By doing this I keep the navigation controls and title from the navigation link, which looks like this:

How to add a customized InfoWindow to markers in google-maps swift ui?

i tried to make a view like bellow in SwiftUi without any success Customized info window swift ui
Since this question doesn't have too much detail, I will be going off of some assumptions. First, I am assuming that you are calling the MapView through a UIViewControllerRepresentable.
I am not too familiar with the Google Maps SDK, but this is possible through the GMSMapViewDelegate Methods. After implementing the proper GMSMapViewDelegate method, you can use ZStacks to present the image that you would like to show.
For example:
struct MapView: UIViewControllerRepresentable {
var parentView: ContentView
func makeUIViewController(context: Context) {
let mapView = GMSMapView()
return mapView
}
func updateUIViewController(_ uiViewController: GMSMapView, context: Context) {
}
func makeCoordinator() -> Coordinator {
return Coordinator(self)
}
class Coordinator: NSObject, GMSMapViewDelegate {
var parent: MapView
init(_ parent: MapView) {
self.parent = parent
}
//Use the proper Google Maps Delegate method to find out if a marker was tapped and then show the image by doing: parent.parentView.isShowingInformationImage = true.
}
}
In your SwiftUI view that you would like to put this MapView in, you can do the following:
struct ContentView: View {
#State var isShowingInformationImage = false
var body: some View {
ZStack {
if isShowingInformationImage {
//Call the View containing the image
}
MapView(parentView: self)
}
}
}

RealityKit – Loading Reality Composer scenes with SwiftUI

I'm trying to load different models on face using SwiftUI, RealityKit and ARKit.
struct AugmentedRealityView: UIViewRepresentable {
#Binding var modelName: String
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
let configuration = ARFaceTrackingConfiguration()
arView.session.run(configuration, options: [.removeExistingAnchors,
.resetTracking])
loadModel(name: modelName, arView: arView)
return arView
}
func updateUIView(_ uiView: ARView, context: Context) { }
private func loadModel(name: String, arView: ARView) {
var cancellable: AnyCancellable? = nil
cancellable = ModelEntity.loadAsync(named: name).sink(
receiveCompletion: { loadCompletion in
if case let .failure(error) = loadCompletion {
print("Unable to load model: \(error.localizedDescription)")
}
cancellable?.cancel()
},
receiveValue: { model in
let faceAnchor = AnchorEntity(.face)
arView.scene.addAnchor(faceAnchor)
faceAnchor.addChild(model)
model.scale = [1, 1, 1]
})
}
}
This is how I load them but when the camera view opens and loads one model then the other models won't be loaded. Can someone help me out?
When the value of your Binding changes, SwiftUI is calling your updateUIView(_:,context:) implementation, which does noting.
Additionally, you are not storing the AnyCancellable. When the token returned by sink gets deallocated the request will be cancelled. That could result in unexpected failures when trying to load lager models.
To fix both of these issue, use a Coordinator.
import UIKit
import RealityKit
import SwiftUI
import Combine
import ARKit
struct AugmentedRealityView: UIViewRepresentable {
class Coordinator {
private var token: AnyCancellable?
private var currentModelName: String?
fileprivate func loadModel(_ name: String, into arView: ARView) {
// Only load model if the name is different from the previous one
guard name != currentModelName else {
return
}
currentModelName = name
// This is optional
// When the token gets overwritten
// the request gets cancelled
// automatically
token?.cancel()
token = ModelEntity.loadAsync(named: name).sink(
receiveCompletion: { loadCompletion in
if case let .failure(error) = loadCompletion {
print("Unable to load model: \(error.localizedDescription)")
}
},
receiveValue: { model in
let faceAnchor = AnchorEntity(.camera)
arView.scene.addAnchor(faceAnchor)
faceAnchor.addChild(model)
model.scale = [1, 1, 1]
})
}
fileprivate func cancelRequest() {
token?.cancel()
}
}
#Binding var modelName: String
func makeCoordinator() -> Coordinator {
Coordinator()
}
static func dismantleUIView(_ uiView: ARView, coordinator: Coordinator) {
coordinator.cancelRequest()
}
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
let configuration = ARFaceTrackingConfiguration()
arView.session.run(configuration, options: [.removeExistingAnchors,
.resetTracking])
context.coordinator.loadModel(modelName, into: arView)
return arView
}
func updateUIView(_ uiView: ARView, context: Context) {
context.coordinator.loadModel(modelName, into: uiView)
}
}
We create a nested Coordinator class that holds the AnyCancellable token and move the loadModel function into the Coordinator.
Other than a SwiftUI View, the Coordinator is a class that lives while your view is visible (always a remember that SwiftUI might create and destroy your View at will, its lifecycle is not related to the actual "view" that is shown on screen).
In out loadModel class we double check that the value of our Binding actually changed so that we don't cancel an ongoing request for the same model when SwiftUI updates our View, e.g. because of a change in the environment.
Then we implement the makeCoordinator function to construct one of our Coordinator objects.
Both in makeUIView and in updateUIView we call the loadModel function on our Coordinator.
The dimantleUIView method is optional. When the Coordinator gets deconstructed our token gets released as well, which will trigger Combine into canceling ongoing requests.

GLKView in SwiftUI?

How can I use GLKView in SwiftUI? I'm using CIFilter but would like to apply filters through GLKit / OpenGL. Any ideas?
struct ContentView: View {
#State private var image: Image?
var body: some View {
VStack {
image?
.resizable()
.scaledToFit()
}
.onAppear(perform: loadImage)
}
func loadImage() {
guard let inputImage = UIImage(named: "squirrel") else {
return
}
let ciImage = CIImage(image: inputImage)
let context = CIContext()
let blur = CIFilter.gaussianBlur()
blur.inputImage = ciImage
blur.radius = 20
guard let outputImage = blur.outputImage else {
return
}
if let cgImg = context.createCGImage(outputImage, from: ciImage!.extent) {
let uiImg = UIImage(cgImage: cgImg)
image = Image(uiImage: uiImg)
}
}
}
Here's a working GLKView in SwiftUI using UIViewControllerRepresentable.
A few things to keep in mind.
GLKit was deprecated with the release of iOS 12, nearly 2 years ago. While I hope Apple won't kill it anytime soon (way too many apps still use it), they recommend using Metal or an MTKView instead. Most of the technique here is still the way to go for SwiftUI.
I worked with SwiftUI in hopes of making my next CoreImage app be a "pure" SwiftUI app until I had too many UIKit needs to bring in. I stopped working on this around Beta 6. The code works but is clearly not production ready. The repo for this is here.
I'm more comfortable working with models instead of putting code for things like using a CIFilter directly in my views. I'll assume you know how to create a view model and have it be an EnvironmentObject. If not look at my code in the repo.
Your code references a SwiftUI Image view - I never found any documentation that suggests it uses the GPU (as a GLKView does) so you won't find anything like that in my code. If you are looking for real-time performance when changing attributes, I found this to work very well.
Starting with a GLKView, here's my code:
class ImageView: GLKView {
var renderContext: CIContext
var myClearColor:UIColor!
var rgb:(Int?,Int?,Int?)!
public var image: CIImage! {
didSet {
setNeedsDisplay()
}
}
public var clearColor: UIColor! {
didSet {
myClearColor = clearColor
}
}
public init() {
let eaglContext = EAGLContext(api: .openGLES2)
renderContext = CIContext(eaglContext: eaglContext!)
super.init(frame: CGRect.zero)
context = eaglContext!
}
override public init(frame: CGRect, context: EAGLContext) {
renderContext = CIContext(eaglContext: context)
super.init(frame: frame, context: context)
enableSetNeedsDisplay = true
}
public required init?(coder aDecoder: NSCoder) {
let eaglContext = EAGLContext(api: .openGLES2)
renderContext = CIContext(eaglContext: eaglContext!)
super.init(coder: aDecoder)
context = eaglContext!
}
override public func draw(_ rect: CGRect) {
if let image = image {
let imageSize = image.extent.size
var drawFrame = CGRect(x: 0, y: 0, width: CGFloat(drawableWidth), height: CGFloat(drawableHeight))
let imageAR = imageSize.width / imageSize.height
let viewAR = drawFrame.width / drawFrame.height
if imageAR > viewAR {
drawFrame.origin.y += (drawFrame.height - drawFrame.width / imageAR) / 2.0
drawFrame.size.height = drawFrame.width / imageAR
} else {
drawFrame.origin.x += (drawFrame.width - drawFrame.height * imageAR) / 2.0
drawFrame.size.width = drawFrame.height * imageAR
}
rgb = (0,0,0)
rgb = myClearColor.rgb()
glClearColor(Float(rgb.0!)/256.0, Float(rgb.1!)/256.0, Float(rgb.2!)/256.0, 0.0);
glClear(0x00004000)
// set the blend mode to "source over" so that CI will use that
glEnable(0x0BE2);
glBlendFunc(1, 0x0303);
renderContext.draw(image, in: drawFrame, from: image.extent)
}
}
}
This is very old production code, taken from objc.io issue 21 dated February 2015! Of note is that it encapsulates a CIContext, needs it's own clear color defined before using it's draw method, and renders an image as scaleAspectFit. If you should try using this in UIKit, it'll like work perfectly.
Next, a "wrapper" UIViewController:
class ImageViewVC: UIViewController {
var model: Model!
var imageView = ImageView()
override func viewDidLoad() {
super.viewDidLoad()
view = imageView
NotificationCenter.default.addObserver(self, selector: #selector(updateImage), name: .updateImage, object: nil)
}
override func viewDidLayoutSubviews() {
imageView.setNeedsDisplay()
}
override func traitCollectionDidChange(_ previousTraitCollection: UITraitCollection?) {
if traitCollection.userInterfaceStyle == .light {
imageView.clearColor = UIColor.white
} else {
imageView.clearColor = UIColor.black
}
}
#objc func updateImage() {
imageView.image = model.ciFinal
imageView.setNeedsDisplay()
}
}
I did this for a few reasons - pretty much adding up to the fact that i'm not a Combine expert.
First, note that the view model (model) cannot access the EnvironmentObject directly. That's a SwiftUI object and UIKit doesn't know about it. I think an ObservableObject *may work, but never found the right way to do it.
Second, note the use of NotificationCenter. I spent a week last year trying to get Combine to "just work" - particularly in the opposite direction of having a UIButton tap notify my model of a change - and found that this is really the easiest way. It's even easier than using delegate methods.
Next, exposing the VC as a representable:
struct GLKViewerVC: UIViewControllerRepresentable {
#EnvironmentObject var model: Model
let glkViewVC = ImageViewVC()
func makeUIViewController(context: Context) -> ImageViewVC {
return glkViewVC
}
func updateUIViewController(_ uiViewController: ImageViewVC, context: Context) {
glkViewVC.model = model
}
}
The only thing of note is that here's where I set the model variable in the VC. I'm sure it's possible to get rid of the VC entirely and have a UIViewRepresentable, but I'm more comfortable with this set up.
Next, my model:
class Model : ObservableObject {
var objectWillChange = PassthroughSubject<Void, Never>()
var uiOriginal:UIImage?
var ciInput:CIImage?
var ciFinal:CIImage?
init() {
uiOriginal = UIImage(named: "vermont.jpg")
uiOriginal = uiOriginal!.resizeToBoundingSquare(640)
ciInput = CIImage(image: uiOriginal!)?.rotateImage()
let filter = CIFilter(name: "CIPhotoEffectNoir")
filter?.setValue(ciInput, forKey: "inputImage")
ciFinal = filter?.outputImage
}
}
Nothing to see here at all, but understand that in SceneDelegate, where you instantiate this, it will trigger the init and set up the filtered image.
Finally, ContentView:
struct ContentView: View {
#EnvironmentObject var model: Model
var body: some View {
VStack {
GLKViewerVC()
Button(action: {
self.showImage()
}) {
VStack {
Image(systemName:"tv").font(Font.body.weight(.bold))
Text("Show image").font(Font.body.weight(.bold))
}
.frame(width: 80, height: 80)
}
}
}
func showImage() {
NotificationCenter.default.post(name: .updateImage, object: nil, userInfo: nil)
}
}
SceneDelegate instantiates the view model which now has the altered CIImage, and the button beneath the GLKView (an instance of GLKViewVC, which is just a SwiftUI View) will send a notification to update the image.
Apple's WWDC 2022 contained a tutorial/video entitled "Display EDR Content with Core Image, Metal, and SwiftUI" which describes how to blend Core Image with Metal and SwiftUI. It points to some new sample code entitled "Generating an Animation with a Core Image Render Destination" (here).
While it doesn't address your question about using GLKView, it does provide some elegant, clean, Apple-sanctioned code for using Metal within SwiftUI.
This sample project is very CoreImage-centric (which matches your background with CIFilter), but I wish Apple would post more sample-code examples showing Metal integrated with SwiftUI.

Where to put delegates of CLLocationManager using Swift UI?

I've been on and off Swift for the years so, sorry if I'm doing something stupid, the Swift UI thing isn't helping my confusion either. I have the code I'm working with at the bottom.
So I'm making a Geofencing app using SwiftUI, I've got the basics up and running but still have a bit to go with it,
I have the Geofencing coordinates and stuff inside the UserData environment variable, which I get off a json elsewhere.
Have a SwiftUI object that has a map out with the current position and initializes a CLLocationManager to deal with geofencing.
I'm trying to implement the delegate on the same file by
locationManager.delegate = self
in the setupManager(),
but it will cause
Cannot assign value of type 'GeofencingView' to type 'CLLocationManagerDelegate?'
The lack of SwiftUI specific information online seems to be causing the most confusion for me at the moment, I think what I should do is,
create a customized NsObject that handles the delegates, but in that case I'm not sure how to pass the #EnviromentObject
Find out how to put the delegates on the UIViewRepresentable object.
Any pointers on what I'm doing wrong would be greatly appreciated.
import SwiftUI
import MapKit
import CoreLocation
struct GeofencingView: UIViewRepresentable {
#EnvironmentObject var userData: UserData
var notification = LocalNotificationManager()
var locationManager = CLLocationManager()
func setupManager() {
locationManager.desiredAccuracy = kCLLocationAccuracyBest
locationManager.requestWhenInUseAuthorization()
locationManager.requestAlwaysAuthorization()
self.startGoefenceMonitoring()
}
func startGoefenceMonitoring() {
print("MapListView::startGeofenceMonitoring")
for landmark in self.userData.landmarks {
let moniteringCordinate = CLLocationCoordinate2DMake(landmark.locationCoordinate.longitude, landmark.locationCoordinate.latitude)
let moniteringRegion = CLCircularRegion.init(center: moniteringCordinate, radius: 20.0, identifier: "\(landmark.id)" )
locationManager.startMonitoring(for: moniteringRegion)
}
}
func makeUIView(context: Context) -> MKMapView {
setupManager()
let mapView = MKMapView(frame: UIScreen.main.bounds)
let count = self.userData.landmarks.count
var annotationArray: [MKAnnotation] = []
var num:Int = 0
mapView.showsUserLocation = true
mapView.userTrackingMode = .follow
return mapView
}
func updateUIView(_ uiView: MKMapView, context: Context) {
}
}
struct GeofencingView_Preview: PreviewProvider {
static var previews: some View {
GeofencingView()
.environmentObject(UserData())
}
}