SwiftUI - Resize AVCaptureVideoPreviewLayer - swiftui

I have the following code:
struct CameraPreview: UIViewRepresentable {
#ObservedObject var camera: Camera
class CameraView: UIView {
override class var layerClass: AnyClass {
AVCaptureVideoPreviewLayer.self
}
var videoPreviewLayer: AVCaptureVideoPreviewLayer {
return layer as! AVCaptureVideoPreviewLayer
}
}
func makeUIView(context: Context) -> CameraView {
let cameraView = CameraView()
cameraView.videoPreviewLayer.session = camera.session
cameraView.videoPreviewLayer.videoGravity = .resizeAspect
cameraView.videoPreviewLayer.cornerRadius = 12
cameraView.videoPreviewLayer.connection?.videoOrientation = .portrait
cameraView.videoPreviewLayer.backgroundColor = CGColor(red: 1.0, green: 0, blue: 0, alpha: 1)
camera.start()
return cameraView
}
func updateUIView(_ cameraView: CameraView, context: Context) {
}
}
But I haven’t been able to resize it in my SwiftUI View to the camera’s preview true size, I’m stuck where I have a bigger frame than it actually needs:
Is there a way to make the AVCaptureVideoPreviewLayer/UIViewRepresentable frame only the size of the camera preview?

set cameraView.videoPreviewLayer.videoGravity = .resize
it fits.

Related

UITapGestureRecognizer not triggered when used by 2 overlapping UIViewRepresentable

Because I need to know whether user taps with finger or pencil, I had to create a UIViewRepresentable with a UITapGestureRecognizer. Everything works fine when this View is used with other SwiftUI views. But when stack 2 of this view, and I clip the top one, the other one never catches any tap event.
Here is my custom view:
struct UIKitView: UIViewRepresentable {
let color: UIColor
func makeUIView(context: Context) -> UIView {
let view = UIView()
view.backgroundColor = color
let tapGesture = UITapGestureRecognizer(target: context.coordinator, action: #selector(Coordinator.handleTap(gesture:)))
view.addGestureRecognizer(tapGesture)
return view
}
func updateUIView(_ uiView: UIView, context: Context) { }
func makeCoordinator() -> Coordinator {
Coordinator(color: color)
}
class Coordinator: NSObject {
let color: UIColor
init(color: UIColor) {
self.color = color
}
#objc func handleTap(gesture: UITapGestureRecognizer) {
print("Tapped in \(color == .yellow ? "yellow" : "red")")
}
}
}
And here how it is used in SwiftUI View:
ZStack {
UIKitView(color: .yellow)
UIKitView(color: .red)
.clipShape(Circle())) // not required, just visual
.contentShape(Circle()))
}
The tap is always catched by the red view.
Then I've overriden the hitTest() function of my UIVIewRepresentable to check if the tap is inside the clipîng shape. The tap is then detected in the red circle, but nothing tiggers anymore outside.
Any idea why?
You can override this method in CircleView:
override func point(inside point: CGPoint, with event: UIEvent?) -> Bool {
let center = CGPoint(x: bounds.size.width/2, y: bounds.size.height/2)
return pow(center.x-point.x, 2) + pow(center.y - point.y, 2) <= pow(bounds.size.width/2, 2)
}
Source
FYI there is also a mistake in your UIViewRepresentable, you need this
func updateUIView(_ uiView: UIView, context: Context) {
view.backgroundColor = color
}

Is there an easy way to pinch to zoom and drag any View in SwiftUI?

I have been looking for a short, reusable piece of code that allows to zoom and drag any view in SwiftUI, and also to change the scale independently.
This would be the answer.
The interesting part that I add is that the scale of the zoomed View can be controled from outside via a binding property. So we don't need to depend just on the pinching gesture, but can add a double tap to get the maximum scale, return to the normal scale, or have a slider (for instance) that changes the scale as we please.
I owe the bulk of this code to jtbandes in his answer to this question.
Here you have in a single file the code of the Zoomable and Scrollable view and a Test View to show how it works:
`
import SwiftUI
let maxAllowedScale = 4.0
struct TestZoomableScrollView: View {
#State private var scale: CGFloat = 1.0
var doubleTapGesture: some Gesture {
TapGesture(count: 2).onEnded {
if scale < maxAllowedScale / 2 {
scale = maxAllowedScale
} else {
scale = 1.0
}
}
}
var body: some View {
VStack(alignment: .center) {
Spacer()
ZoomableScrollView(scale: $scale) {
Image("foto_producto")
.resizable()
.scaledToFit()
.frame(width: 200, height: 200)
}
.frame(width: 300, height: 300)
.border(.black)
.gesture(doubleTapGesture)
Spacer()
Text("Change the scale")
Slider(value: $scale, in: 0.5...maxAllowedScale + 0.5)
.padding(.horizontal)
Spacer()
}
}
}
struct ZoomableScrollView<Content: View>: UIViewRepresentable {
private var content: Content
#Binding private var scale: CGFloat
init(scale: Binding<CGFloat>, #ViewBuilder content: () -> Content) {
self._scale = scale
self.content = content()
}
func makeUIView(context: Context) -> UIScrollView {
// set up the UIScrollView
let scrollView = UIScrollView()
scrollView.delegate = context.coordinator // for viewForZooming(in:)
scrollView.maximumZoomScale = maxAllowedScale
scrollView.minimumZoomScale = 1
scrollView.showsVerticalScrollIndicator = false
scrollView.showsHorizontalScrollIndicator = false
scrollView.bouncesZoom = true
// Create a UIHostingController to hold our SwiftUI content
let hostedView = context.coordinator.hostingController.view!
hostedView.translatesAutoresizingMaskIntoConstraints = true
hostedView.autoresizingMask = [.flexibleWidth, .flexibleHeight]
hostedView.frame = scrollView.bounds
scrollView.addSubview(hostedView)
return scrollView
}
func makeCoordinator() -> Coordinator {
return Coordinator(hostingController: UIHostingController(rootView: self.content), scale: $scale)
}
func updateUIView(_ uiView: UIScrollView, context: Context) {
// update the hosting controller's SwiftUI content
context.coordinator.hostingController.rootView = self.content
uiView.zoomScale = scale
assert(context.coordinator.hostingController.view.superview == uiView)
}
class Coordinator: NSObject, UIScrollViewDelegate {
var hostingController: UIHostingController<Content>
#Binding var scale: CGFloat
init(hostingController: UIHostingController<Content>, scale: Binding<CGFloat>) {
self.hostingController = hostingController
self._scale = scale
}
func viewForZooming(in scrollView: UIScrollView) -> UIView? {
return hostingController.view
}
func scrollViewDidEndZooming(_ scrollView: UIScrollView, with view: UIView?, atScale scale: CGFloat) {
self.scale = scale
}
}
}
`
I think it's the shortest, easiest way to get the desired behaviour. Also, it works perfectly, something that I haven't found in other solutions offered here. For example, the zooming out is smooth and usually it can be jerky if you don't use this approach.
The slider hast that range to show how the minimun and maximum values are respected, in a real app the range would be 1...maxAllowedScale.
As for the double tap, the behaviour can be changed very easily depending pm what you prefer.
I attach video to show everything at once:
I hope this helps anyone who's looking for this feature.

Draw overlay on AVPlayer SwiftUI

I am building an app where I want to draw rectangles on a video. The rectangles will come from a neural network and the position will be relative to the top left corner och the image frame.
To achieve that, I need to figure out a way to draw overlays on the AVPlayer view and use the actual frame as reference.
Is there an easy way of doing it in SwiftUI (which I started to learn a couple of days ago)?
Code that I have now, this is just about drawing something the way I want it. I.e. I want to find out how to position the rectangle with respect to the image/video. The orange dot should be origin if the black rectangle is the video.
Current code:
import SwiftUI
import AVKit
struct PlayVideo: View {
var moviePath: URL?
var player: AVPlayer
init(fileName: String) {
moviePath = fileURL(for: fileName)
player = AVPlayer(url: moviePath!)
}
var body: some View {
VideoPlayer(player: player)
.scaledToFill()
.overlay(
Rectangle()
.fill(Color.blue)
.frame(width: 20, height: 20),
alignment: .center)
Button(action: {
player.rate = 0.04
}, label: {
Text("Play Slow")
})
}
}
I tried with .position(x: ,y: ), but that seems to be for the parent frame.
EDIT
I have tried a new approach:
import SwiftUI
import AVKit
var player = AVPlayer()
struct PlayVideo: View {
var moviePath: URL
init(fileName: String) {
moviePath = fileURL(for: fileName)
}
var body: some View {
PlayerView(url: moviePath)
Button(action: {
player.rate = 0.04
}, label: {
Text("Play Slow")
})
}
}
struct PlayVideo_Previews: PreviewProvider {
static var previews: some View {
PlayVideo(fileName: "path")
}
}
struct PlayerView: UIViewRepresentable {
var url_: URL
init(url: URL) {
url_ = url
}
func updateUIView(_ uiView: UIView, context: UIViewRepresentableContext<PlayerView>) {
}
func makeUIView(context: Context) -> UIView {
return PlayerUIView(frame: .zero, url: url_)
}
}
class PlayerUIView: UIView {
private let playerLayer = AVPlayerLayer()
init(frame: CGRect, url: URL) {
super.init(frame: frame)
player = AVPlayer(url: url)
player.play()
playerLayer.player = player
layer.addSublayer(playerLayer)
// Create a CGRect object which is used to render a rectangle.
let rectFrame: CGRect = CGRect(x:CGFloat(0), y:CGFloat(0), width:CGFloat(100), height:CGFloat(50))
// Create a UIView object which use above CGRect object.
let greenView = UIView(frame: rectFrame)
// Set UIView background color.
greenView.backgroundColor = UIColor.green
layer.addSublayer(greenView.layer)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override func layoutSubviews() {
super.layoutSubviews()
playerLayer.frame = bounds
}
}
As you can see, there is a problem with the offset of the video and the view.
I imagine one could do a second view and place on top of the video view with the same size and position. Then one can draw on that. But the problem remains how to get size and position of the video correctly.

How can I make SceneView's background transparent?

I want to open a 3D model and make its background transparent, so that I can see the UI behind the SceneView. I've tried this code, but sceneView becomes white, not transparent.
struct ModelView: View {
var body: some View {
ZStack {
Text("Behind Text Behind Text Behind Text")
SceneView(
scene: { () -> SCNScene in
let scene = SCNScene()
scene.background.contents = UIColor.clear
return scene
}(),
pointOfView: { () -> SCNNode in
let cameraNode = SCNNode()
cameraNode.camera = SCNCamera()
cameraNode.position = SCNVector3(x: 0, y: 0, z: 10)
return cameraNode
}(),
options: [
.allowsCameraControl,
.temporalAntialiasingEnabled,
]
)
}
}
}
I use XCode 12.5 and IPhone 8.
EDIT 1:
Thanks to the comments below, I decided to try new approaches but they still don't work.
Approach #1
First, I tried to create a MySceneView using SCNView through UIViewRepresentable:
struct MySceneView: UIViewRepresentable {
typealias UIViewType = SCNView
typealias Context = UIViewRepresentableContext<MySceneView>
func updateUIView(_ uiView: UIViewType, context: Context) {}
func makeUIView(context: Context) -> UIViewType {
let view = SCNView()
view.allowsCameraControl = true
view.isTemporalAntialiasingEnabled = true
view.autoenablesDefaultLighting = true
view.scene = MySceneView.scene
return view
}
static let scene: SCNScene = {
let scene = SCNScene(named: "art.scnassets/man.obj")!
scene.background.contents = UIColor.clear
let cameraNode = SCNNode()
cameraNode.camera = SCNCamera()
cameraNode.position = SCNVector3(x: 0, y: 0, z: 10)
scene.rootNode.addChildNode(cameraNode)
return scene
}()
}
Approach #2
I tried using SpriteView, here is the code:
ZStack {
Text("Behind Text Behind Text Behind Text")
SpriteView(scene: { () -> SKScene in
let scene = SKScene()
scene.backgroundColor = UIColor.clear
let model = SK3DNode(viewportSize: .init(width: 200, height: 200))
model.scnScene = MySceneView.scene
scene.addChild(model)
return scene
}(), options: [.allowsTransparency])
}
Update:
A much simpler solution is to use UIViewRepresentable, create SCNView and set backgroundColor to clear
Old:
Thanks George_E, your idea with SpriteKit worked perfectly. Here is the code:
SpriteView(scene: { () -> SKScene in
let scene = SKScene()
scene.backgroundColor = UIColor.clear
let model = SK3DNode(viewportSize: .init(width: 200, height: 200))
model.scnScene = {
let scene = SCNScene(named: "art.scnassets/man.obj")!
scene.background.contents = UIColor.clear
let cameraNode = SCNNode()
cameraNode.camera = SCNCamera()
cameraNode.position = SCNVector3(x: 0, y: 0, z: 10)
scene.rootNode.addChildNode(cameraNode)
return scene
}()
scene.addChild(model)
return scene
}(), options: [.allowsTransparency])
I wasn't able to find a fully working snippet here, but thanks to the answers from Arutyun I managed to compile a working solution without the need for SpriteKit.
import SceneKit
import SwiftUI
struct MySceneView: UIViewRepresentable {
typealias UIViewType = SCNView
typealias Context = UIViewRepresentableContext<MySceneView>
func updateUIView(_ uiView: UIViewType, context: Context) {}
func makeUIView(context: Context) -> UIViewType {
let view = SCNView()
view.backgroundColor = UIColor.clear // this is key!
view.allowsCameraControl = true
view.autoenablesDefaultLighting = true
// load the object here, could load a .scn file too
view.scene = SCNScene(named: "model.obj")!
return view
}
}
And use it just like a regular view:
import SwiftUI
struct MySceneView: View {
var body: some View {
ZStack{
// => background views here
MySceneView()
.frame( // set frame as required
maxWidth: .infinity,
maxHeight: .infinity,
alignment: .center
)
}
}
}
struct MySceneView_Previews: PreviewProvider {
static var previews: some View {
MySceneView()
}
}
With SwiftUI:
It is slightly different in SwiftUI using a SpriteView.
To implement a transparent SpriteView in SwiftUI, you have to use the 'options' parameter:
Configure your SKScene with clear background (view AND scene)
Configure your SpriteView with the correct option
// 1. configure your scene in 'didMove'
override func didMove(to view: SKView) {
self.backgroundColor = .clear
view.backgroundColor = .clear
}
and most importantly:
// 2. configure your SpriteView with 'allowsTranspanency'
SpriteView(scene: YourSKScene(), options: [.allowsTransparency])
I didn't like using SpriteKit to make a SceneKit scenes background contents transparent because you completely loose access to the SCNView. Here is what I believe to be the correct approach.
Create a Scene Kit Scene file named GameScene.scn
Drop your 3D object into the GameScene
Use the code below
/// The SCNView view
struct GameSceneView: UIViewRepresentable {
#ObservedObject var viewModel: GameSceneViewModel
func makeUIView(context: UIViewRepresentableContext<GameSceneView>) -> SCNView {
let view = SCNView()
view.backgroundColor = viewModel.backgroundColor
view.allowsCameraControl = viewModel.allowsCameraControls
view.autoenablesDefaultLighting = viewModel.autoenablesDefaultLighting
view.scene = viewModel.scene
return view
}
func updateUIView(_ uiView: SCNView, context: UIViewRepresentableContext<GameSceneView>) {}
}
/// The view model supplying the SCNScene and its properties
class GameSceneViewModel: ObservableObject {
#Published var scene: SCNScene?
#Published var backgroundColor: UIColor
#Published var allowsCameraControls: Bool
#Published var autoenablesDefaultLighting: Bool
init(
sceneName: String = "GameScene.scn",
cameraName: String = "camera",
backgroundColor: UIColor = .clear,
allowsCameraControls: Bool = true,
autoenablesDefaultLighting: Bool = true
) {
self.scene = SCNScene(named: sceneName)
self.backgroundColor = backgroundColor
self.allowsCameraControls = allowsCameraControls
self.autoenablesDefaultLighting = autoenablesDefaultLighting
scene?.background.contents = backgroundColor
scene?.rootNode.childNode(withName: cameraName, recursively: false)
}
}
/// Usage
struct ContentView: View {
var body: some View {
VStack {
GameSceneView(viewModel: GameSceneViewModel())
}
.background(Color.blue)
}
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
}
}

Trouble to make a custom UIView aspect scale fit/fill with SwiftUI

No Public API in SwiftUI to response for the resizable modifier of View protocol. Only Image in SwiftUI could work with .resizable(). Custom UIView like UIView for GIF is not resizable now.
I use SDWebImageSwiftUI AnimatedImage, which is backing UIKit View SDAnimatedImageView. AnimatedImage is not response to .resizable(), .scaleToFit, .aspectRatio(contentMode: .fit), etc. WebImage is backing SwiftUI Image, so it's working fine.
import SwiftUI
import SDWebImageSwiftUI
struct ContentView: View {
let url = URL(string: "https://media.giphy.com/media/H62DGtBRwgbrxWXh6t/giphy.gif")!
var body: some View {
VStack {
AnimatedImage(url: url)
.scaledToFit()
.frame(width: 100, height: 100)
WebImage(url: url)
.scaledToFit()
.frame(width: 100, height: 100)
}
}
}
Not sure if it's an Apple bug. Expect custom view like SDWebImageSwiftUI AnimatedImage is responsive to SwiftUI size related modifiers like .scaledToFit().
Related issue: https://github.com/SDWebImage/SDWebImageSwiftUI/issues/3
SwiftUI uses the compression resistance priority and the content hugging priority to decide what resizing is possible.
If you want to resize a view below its intrinsic content size, you need to reduce the compression resistance priority.
Example:
func makeUIView(context: Context) -> UIView {
let imageView = UIImageView(image: UIImage(named: "yourImage")!)
imageView.setContentCompressionResistancePriority(.defaultLow, for: .horizontal)
imageView.setContentCompressionResistancePriority(.defaultLow, for: .vertical)
return imageView
}
This will allow you to set .frame(width:height:) to any size you want.
Finally found a solution.
Make a UIView wrapper outside of the SDAnimationImageView or UIImageView, then override layoutSubviews() set the frame of subview.
Here is full code by me.
And SDWebImageSwiftUI also release a new version which uses wrapper to solve this problem.
class ImageModel: ObservableObject {
#Published var url: URL?
#Published var contentMode: UIView.ContentMode = .scaleAspectFill
}
struct WebImage: UIViewRepresentable {
#ObservedObject var imageModel = ImageModel()
func makeUIView(context: UIViewRepresentableContext<WebImage>) -> ImageView {
let uiView = ImageView(imageModel: imageModel)
return uiView
}
func updateUIView(_ uiView: ImageView, context: UIViewRepresentableContext<WebImage>) {
uiView.imageView.sd_setImage(with: imageModel.url)
uiView.imageView.contentMode = imageModel.contentMode
}
func url(_ url: URL?) -> Self {
imageModel.url = url
return self
}
func scaledToFit() -> Self {
imageModel.contentMode = .scaleAspectFit
return self
}
func scaledToFill() -> Self {
imageModel.contentMode = .scaleAspectFill
return self
}
}
class ImageView: UIView {
let imageView = UIImageView()
init(imageModel: ImageModel) {
super.init(frame: .zero)
addSubview(imageView)
}
override func layoutSubviews() {
super.layoutSubviews()
imageView.frame = bounds
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}