I am practicing with SwiftUI and making a meme maker which has labels that are produced from a textField and can be moved and resized. I also want to be able to do this with images from the users Photo library. I am able to get one image, but if I try and get more it just replaces the first image. I tried having the images added to an array, but then the images will not show up on the memeImageView.
Image property
#State private var image = UIImage()
Button
Button {
self.isShowPhotoLibrary = true
} label: {
Text("Add Image")
.foregroundColor(Color.yellow)
}.sheet(isPresented: $isShowPhotoLibrary) {
ImagePicker(sourceType: .photoLibrary, selectedImage: self.$image)
}
MemeUmageView
var memeImageView: some View {
ZStack {
KFImage(URL(string: meme.url ?? ""))
.placeholder {
ProgressView()
}
.resizable()
.aspectRatio(contentMode: .fit)
.frame(height: UIScreen.main.bounds.height / 2.5)
ForEach(addedLabels, id:\.self) { label in
DraggableLabel(text: label)
}
DraggableImage(image: image)
}
.clipped()
}
Attempt with using an array. I also tried making three buttons to add up to three images, each as its own property thinking that the initial property was being overridden.
My image array
#State private var addedImages = [UIImage?]()
Button
Button {
self.isShowPhotoLibrary = true
addedImages.append(image)
} label: {
Text("Add Image")
.foregroundColor(Color.yellow)
}.sheet(isPresented: $isShowPhotoLibrary) {
ImagePicker(sourceType: .photoLibrary, selectedImage: self.$image)
}
var memeImageView: some View {
ZStack {
KFImage(URL(string: meme.url ?? ""))
.placeholder {
ProgressView()
}
.resizable()
.aspectRatio(contentMode: .fit)
.frame(height: UIScreen.main.bounds.height / 2.5)
ForEach(addedLabels, id:\.self) { label in
DraggableLabel(text: label)
}
ForEach(0..<addedImages.count) { index in
DraggableImage(image: addedImages[index]!)
}
}
.clipped()
}
Where I call MemeImageView.
var body: some View {
VStack(spacing: 12) {
memeImageView
ForEach(0..<(meme.boxCount ?? 0)) { i in
TextField("Statement \(i + 1)", text: $addedLabels[i])
.padding(.horizontal, 8)
.padding(.vertical, 4)
.background(Color.gray.opacity(0.25))
.cornerRadius(5)
.onTapGesture {
self.endEditing()
}
}
.padding(.horizontal)
}.onTapGesture {
self.endEditing()
}
// Gets a new Image
Button {
self.isShowPhotoLibrary = true
addedImages.append(image)
} label: {
Text("Add Image")
.foregroundColor(Color.yellow)
}.sheet(isPresented: $isShowPhotoLibrary) {
ImagePicker(sourceType: .photoLibrary, selectedImage: self.$image)
}
Spacer()
// Saves Image
Button {
// takes a screenshot and crops it
if let image = memeImageView.takeScreenshot(origin: CGPoint(x: 0, y: UIApplication.shared.windows[0].safeAreaInsets.top + navBarHeight + 1), size: CGSize(width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height / 2.5)) {
UIImageWriteToSavedPhotosAlbum(image, nil, nil, nil)
presentationMode.wrappedValue.dismiss() // dismisses the view
}
}
label: {
Text("Save image")
.foregroundColor(Color.yellow)
}.frame( width: 150, height: 50)
.overlay(
RoundedRectangle(cornerRadius: 25)
.stroke(Color.red, lineWidth: 3)
)
.navigationBarTitle(meme.name ?? "Meme", displayMode: .inline)
.background(NavBarAccessor { navBar in
self.navBarHeight = navBar.bounds.height
})
}
For Reproducing(as close to how mine actual project is setup):
Content View
import SwiftUI
struct ContentView: View {
var body: some View {
DragImageView()
}
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
}
}
DragImageView:
import SwiftUI
struct DragImageView: View {
//===================
// MARK: Properties
//===================
#State private var addedImages = [UIImage?]()
#State private var isShowPhotoLibrary = false
#State private var image = UIImage()
var body: some View {
VStack(spacing: 12) {
imageView
}
// Gets a new Image
Button {
self.isShowPhotoLibrary = true
addedImages.append(image)
} label: {
Text("Add Image")
.foregroundColor(Color.yellow)
}.sheet(isPresented: $isShowPhotoLibrary) {
ImagePicker(sourceType: .photoLibrary, selectedImage: self.$image)
}
Spacer()
}
var imageView: some View {
ZStack {
DraggableImage(image: image)
}
//.clipped()
}
// This will dismiss the keyboard
private func endEditing() {
UIApplication.shared.endEditing()
}
}
// Allows fot the keyboard to be dismissed
extension UIApplication {
func endEditing() {
sendAction(#selector(UIResponder.resignFirstResponder), to: nil, from: nil, for: nil)
}
}
DraggableImage:
import SwiftUI
struct DraggableImage: View {
// Drag Gesture
#State private var currentPosition: CGSize = .zero
#State private var newPosition: CGSize = .zero
// Roation Gesture
#State private var rotation: Double = 0.0
// Scale Gesture
#State private var scale: CGFloat = 1.0
// The different states the frame of the label could be
private enum WidthState: Int {
case full, half, third, fourth
}
#State private var widthState: WidthState = .full
#State private var currentWidth: CGFloat = 100 //UIScreen.main.bounds.width
var image: UIImage
var body: some View {
VStack {
Image(uiImage: self.image)
.resizable()
.scaledToFill()
.frame(width: self.currentWidth)
.lineLimit(nil)
}
.scaleEffect(scale) // Scale based on our state
.rotationEffect(Angle.degrees(rotation)) // Rotate based on the state
.offset(x: self.currentPosition.width, // Offset from the drag difference from it's current position
y: self.currentPosition.height)
.gesture(
// Two finger rotation
RotationGesture()
.onChanged { angle in
self.rotation = angle.degrees // keep track of the angle for state
}
// We want it to work with the scale effect, so they could either scale and rotate at the same time
.simultaneously(with:
MagnificationGesture()
.onChanged { scale in
self.scale = scale.magnitude // Keep track of the scale
})
// Update the drags new position to be wherever it was last dragged to. (we don't want to reset it back to it's current position)
.simultaneously(with: DragGesture()
.onChanged { value in
self.currentPosition = CGSize(width: value.translation.width + self.newPosition.width,
height: value.translation.height + self.newPosition.height)
}
.onEnded { value in
self.newPosition = self.currentPosition
})
)
/// Have to do double tap first or else it will never work with the single tap
.onTapGesture(count: 2) {
// Update our widthState to be the next on in the 'enum', or start back at .full
self.widthState = WidthState(rawValue: self.widthState.rawValue + 1) ?? .full
self.currentWidth = UIScreen.main.bounds.width / CGFloat(self.widthState.rawValue)
}
}
}
ImagePicker:
import UIKit
import SwiftUI
struct ImagePicker: UIViewControllerRepresentable {
var sourceType: UIImagePickerController.SourceType = .photoLibrary
#Binding var selectedImage: UIImage
#Environment(\.presentationMode) private var presentationMode
func makeUIViewController(context: UIViewControllerRepresentableContext<ImagePicker>) -> UIImagePickerController {
let imagePicker = UIImagePickerController()
imagePicker.allowsEditing = false
imagePicker.sourceType = sourceType
imagePicker.delegate = context.coordinator
return imagePicker
}
func updateUIViewController(_ uiViewController: UIImagePickerController, context: UIViewControllerRepresentableContext<ImagePicker>) {
}
func makeCoordinator() -> Coordinator {
Coordinator(self)
}
final class Coordinator: NSObject, UIImagePickerControllerDelegate, UINavigationControllerDelegate {
var parent: ImagePicker
init(_ parent: ImagePicker) {
self.parent = parent
}
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
if let image = info[UIImagePickerController.InfoKey.originalImage] as? UIImage {
parent.selectedImage = image
}
parent.presentationMode.wrappedValue.dismiss()
}
}
}
I should add this is to make memes, so the user picked images go on top the view that I save to the camera roll.
I'm not 100% clear on what the exact desired output should be, but this should get you started (explained below):
struct DragImageView: View {
//===================
// MARK: Properties
//===================
#State private var addedImages = [UIImage]()
#State private var isShowPhotoLibrary = false
var bindingForImage: Binding<UIImage> {
Binding<UIImage> { () -> UIImage in
return addedImages.last ?? UIImage()
} set: { (newImage) in
addedImages.append(newImage)
print("Images: \(addedImages.count)")
}
}
var body: some View {
VStack(spacing: 12) {
imageView
}
// Gets a new Image
Button {
self.isShowPhotoLibrary = true
} label: {
Text("Add Image")
.foregroundColor(Color.yellow)
}.sheet(isPresented: $isShowPhotoLibrary) {
ImagePicker(sourceType: .photoLibrary, selectedImage: bindingForImage)
}
Spacer()
}
var imageView: some View {
VStack {
ForEach(addedImages, id: \.self) { image in
DraggableImage(image: image)
}
}
}
// This will dismiss the keyboard
private func endEditing() {
UIApplication.shared.endEditing()
}
}
addedImages is now an array of non-optional UIImages
There's a custom Binding for the image picker. When it receives a new image, it appends it to the end of the array.
In var imageView, there's a VStack instead of a ZStack so that multiple images can get displayed (instead of stacked on top of each other) and a ForEach loop to iterate through the images.
Related
I am trying to create two synced ScrollViews in SwiftUI such that scrolling in one will result in the same scrolling in the other.
I am using the ScrollViewOffset class shown at the bottom for getting a scrollView offset value but having trouble figuring out how to scroll the other view.
I seem to be able to 'hack' it by preventing scrolling in one view and setting the content position() on the other - is there any way to actually scroll the scrollView content to a position - I know ScrollViewReader seems to allow scrolling to display content items but I can't seem to find anything that will scroll the contents to an offset position.
The problem with using position() is that it does not actually change the ScrollViews scroller positions - there seems to be no ScrollView.scrollContentsTo(point: CGPoint).
#State private var scrollOffset1: CGPoint = .zero
HStack {
ScrollViewOffset(onOffsetChange: { offset in
scrollOffset1 = offset
print("New ScrollView1 offset: \(offset)")
}, content: {
VStack {
ImageView(filteredImageProvider: self.provider)
.frame(width: imageWidth, height: imageHeight)
}
.frame(width: imageWidth + (geometry.size.width - 20) * 2, height: imageHeight + (geometry.size.height - 20) * 2)
.border(Color.white)
.id(0)
})
ScrollView([]) {
VStack {
ImageView(filteredImageProvider: self.provider, showEdits: false)
.frame(width: imageWidth, height: imageHeight)
}
.frame(width: imageWidth + (geometry.size.width - 20) * 2, height: imageHeight + (geometry.size.height - 20) * 2)
.border(Color.white)
.id(0)
.position(x: scrollOffset1.x, y: scrollOffset1.y + (imageHeight + (geometry.size.height - 20) * 2)/2)
}
}
//
// ScrollViewOffset.swift
// ZoomView
//
//
import Foundation
import SwiftUI
struct ScrollViewOffset<Content: View>: View {
let onOffsetChange: (CGPoint) -> Void
let content: () -> Content
init(
onOffsetChange: #escaping (CGPoint) -> Void,
#ViewBuilder content: #escaping () -> Content
) {
self.onOffsetChange = onOffsetChange
self.content = content
}
var body: some View {
ScrollView([.horizontal, .vertical]) {
offsetReader
content()
.padding(.top, -8)
}
.coordinateSpace(name: "frameLayer")
.onPreferenceChange(ScrollOffsetPreferenceKey.self, perform: onOffsetChange)
}
var offsetReader: some View {
GeometryReader { proxy in
Color.clear
.preference(
key: ScrollOffsetPreferenceKey.self,
value: proxy.frame(in: .named("frameLayer")).origin
)
}
.frame(width: 0, height: 0)
}
}
private struct ScrollOffsetPreferenceKey: PreferenceKey {
static var defaultValue: CGPoint = .zero
static func reduce(value: inout CGPoint, nextValue: () -> CGPoint) {}
}
Synced scroll views.
In this example, you can scroll the LHS scrollview and the RHS scrollview will be synchronised to the same position. In this example, the scrollview on the RHS is disabled, and the position is simply synchronised by using an offset.
But using the same logic and code, you can make both the LHS and RHS scrollviews synced when either of them are scrolled.
import SwiftUI
struct ContentView: View {
#State private var offset = CGFloat.zero
var body: some View {
HStack(alignment: .top) {
// MainScrollView
ScrollView {
VStack {
ForEach(0..<100) { i in
Text("Item \(i)").padding()
}
}
.background( GeometryReader {
Color.clear.preference(key: ViewOffsetKey.self,
value: -$0.frame(in: .named("scroll")).origin.y)
})
.onPreferenceChange(ViewOffsetKey.self) { value in
print("offset >> \(value)")
offset = value
}
}
.coordinateSpace(name: "scroll")
// Synchronised with ScrollView above
ScrollView {
VStack {
ForEach(0..<100) { i in
Text("Item \(i)").padding()
}
}
.offset(y: -offset)
}
.disabled(true)
}
}
}
struct ViewOffsetKey: PreferenceKey {
typealias Value = CGFloat
static var defaultValue = CGFloat.zero
static func reduce(value: inout Value, nextValue: () -> Value) {
value += nextValue()
}
}
With the current API of ScrollView, this is not possible. While you can get the contentOffset of the scrollView using methods that are widely available on the internet, the ScrollViewReader that is used to programmatically scroll a ScrollView only allows you to scroll to specific views, instead of to a contentOffset.
To achieve this functionality, you are going to have to wrap UIScrollView. Here is an implementation, although it isn't 100% stable, and is missing a good amount of scrollView functionality:
import SwiftUI
import UIKit
public struct ScrollableView<Content: View>: UIViewControllerRepresentable {
#Binding var offset: CGPoint
var content: () -> Content
public init(_ offset: Binding<CGPoint>, #ViewBuilder content: #escaping () -> Content) {
self._offset = offset
self.content = content
}
public func makeUIViewController(context: Context) -> UIScrollViewViewController {
let vc = UIScrollViewViewController()
vc.hostingController.rootView = AnyView(self.content())
vc.scrollView.setContentOffset(offset, animated: false)
vc.delegate = context.coordinator
return vc
}
public func updateUIViewController(_ viewController: UIScrollViewViewController, context: Context) {
viewController.hostingController.rootView = AnyView(self.content())
// Allow for deaceleration to be done by the scrollView
if !viewController.scrollView.isDecelerating {
viewController.scrollView.setContentOffset(offset, animated: false)
}
}
public func makeCoordinator() -> Coordinator {
Coordinator(contentOffset: _offset)
}
public class Coordinator: NSObject, UIScrollViewDelegate {
let contentOffset: Binding<CGPoint>
init(contentOffset: Binding<CGPoint>) {
self.contentOffset = contentOffset
}
public func scrollViewDidScroll(_ scrollView: UIScrollView) {
contentOffset.wrappedValue = scrollView.contentOffset
}
}
}
public class UIScrollViewViewController: UIViewController {
lazy var scrollView: UIScrollView = UIScrollView()
var hostingController: UIHostingController<AnyView> = UIHostingController(rootView: AnyView(EmptyView()))
weak var delegate: UIScrollViewDelegate?
public override func viewDidLoad() {
super.viewDidLoad()
self.scrollView.delegate = delegate
self.view.addSubview(self.scrollView)
self.pinEdges(of: self.scrollView, to: self.view)
self.hostingController.willMove(toParent: self)
self.scrollView.addSubview(self.hostingController.view)
self.pinEdges(of: self.hostingController.view, to: self.scrollView)
self.hostingController.didMove(toParent: self)
}
func pinEdges(of viewA: UIView, to viewB: UIView) {
viewA.translatesAutoresizingMaskIntoConstraints = false
viewB.addConstraints([
viewA.leadingAnchor.constraint(equalTo: viewB.leadingAnchor),
viewA.trailingAnchor.constraint(equalTo: viewB.trailingAnchor),
viewA.topAnchor.constraint(equalTo: viewB.topAnchor),
viewA.bottomAnchor.constraint(equalTo: viewB.bottomAnchor),
])
}
}
struct ScrollableView_Previews: PreviewProvider {
static var previews: some View {
Wrapper()
}
struct Wrapper: View {
#State var offset: CGPoint = .init(x: 0, y: 50)
var body: some View {
HStack {
ScrollableView($offset, content: {
ForEach(0...100, id: \.self) { id in
Text("\(id)")
}
})
ScrollableView($offset, content: {
ForEach(0...100, id: \.self) { id in
Text("\(id)")
}
})
VStack {
Text("x: \(offset.x) y: \(offset.y)")
Button("Top", action: {
offset = .zero
})
.buttonStyle(.borderedProminent)
}
.frame(width: 200)
.padding()
}
}
}
}
Using Swift 2.0 I am hoping to find a way to capture the resized image after the user has selected how they want to see it in the frame from the scroll view (ZoomScrollView).
I know there are complex examples out there from Swift but was hoping to find a simpler way to capture this in Swift 2.0. In all my searching I've heard references to using ZStack and some masks or overlays but can't find a simple good example.
I am hoping someone can update my example with the ZStack, masks, etc and how to extract the image for saving or provide a better example.
import SwiftUI
struct ContentView: View {
#Environment(\.presentationMode) var presentationMode
#State var isAccepted: Bool = false
#State var isShowingImagePicker = false
#State var isShowingActionPicker = false
#State var sourceType:UIImagePickerController.SourceType = .camera
#State var image:UIImage?
var body: some View {
HStack {
Color(UIColor.systemYellow).frame(width: 8)
VStack(alignment: .leading) {
HStack {
Spacer()
VStack {
if image != nil {
ZoomScrollView {
Image(uiImage: image!)
.resizable()
.scaledToFit()
}
.frame(width: 300, height: 300, alignment: .center)
.clipShape(Circle())
} else {
Image(systemName: "person.crop.circle")
.resizable()
.font(.system(size: 32, weight: .light))
.frame(width: 300, height: 300, alignment: .center)
.cornerRadius(180)
.foregroundColor(Color(.systemGray))
.clipShape(Circle())
}
}
Spacer()
}
Spacer()
HStack {
Button(action: {
self.isShowingActionPicker = true
}, label: {
Text("Select Image")
.foregroundColor(.blue)
})
.frame(width: 130)
.actionSheet(isPresented: $isShowingActionPicker, content: {
ActionSheet(title: Text("Select a profile avatar picture"), message: nil, buttons: [
.default(Text("Camera"), action: {
self.isShowingImagePicker = true
self.sourceType = .camera
}),
.default(Text("Photo Library"), action: {
self.isShowingImagePicker = true
self.sourceType = .photoLibrary
}),
.cancel()
])
})
.sheet(isPresented: $isShowingImagePicker) {
imagePicker(image: $image, isShowingImagePicker: $isShowingImagePicker ,sourceType: self.sourceType)
}
Spacer()
// Save button
Button(action: {
// Save Image here... print for now just see if file dimensions are the right size
print("saved: ", image!)
self.presentationMode.wrappedValue.dismiss()
}
) {
HStack {
Text("Save").foregroundColor(isAccepted ? .gray : .blue)
}
}
.frame(width: 102)
.padding(.top)
.padding(.bottom)
//.buttonStyle(RoundedCorners())
.disabled(isAccepted) // Disable if if already isAccepted is true
}
}
Spacer()
Color(UIColor.systemYellow).frame(width: 8)
}
.padding(.top, UIApplication.shared.windows.first?.safeAreaInsets.top)
.background(Color(UIColor.systemYellow))
}
}
struct ZoomScrollView<Content: View>: UIViewRepresentable {
private var content: Content
init(#ViewBuilder content: () -> Content) {
self.content = content()
}
func makeUIView(context: Context) -> UIScrollView {
// set up the UIScrollView
let scrollView = UIScrollView()
scrollView.delegate = context.coordinator // for viewForZooming(in:)
scrollView.maximumZoomScale = 20
scrollView.minimumZoomScale = 1
scrollView.bouncesZoom = true
// create a UIHostingController to hold our SwiftUI content
let hostedView = context.coordinator.hostingController.view!
hostedView.translatesAutoresizingMaskIntoConstraints = true
hostedView.autoresizingMask = [.flexibleWidth, .flexibleHeight]
hostedView.frame = scrollView.bounds
scrollView.addSubview(hostedView)
return scrollView
}
func makeCoordinator() -> Coordinator {
return Coordinator(hostingController: UIHostingController(rootView: self.content))
}
func updateUIView(_ uiView: UIScrollView, context: Context) {
// update the hosting controller's SwiftUI content
context.coordinator.hostingController.rootView = self.content
assert(context.coordinator.hostingController.view.superview == uiView)
}
// MARK: - Coordinator
class Coordinator: NSObject, UIScrollViewDelegate {
var hostingController: UIHostingController<Content>
init(hostingController: UIHostingController<Content>) {
self.hostingController = hostingController
}
func viewForZooming(in scrollView: UIScrollView) -> UIView? {
return hostingController.view
}
}
}
struct imagePicker:UIViewControllerRepresentable {
#Binding var image: UIImage?
#Binding var isShowingImagePicker: Bool
typealias UIViewControllerType = UIImagePickerController
typealias Coordinator = imagePickerCoordinator
var sourceType:UIImagePickerController.SourceType = .camera
func makeUIViewController(context: Context) -> UIImagePickerController {
let picker = UIImagePickerController()
picker.sourceType = sourceType
picker.delegate = context.coordinator
return picker
}
func makeCoordinator() -> imagePickerCoordinator {
return imagePickerCoordinator(image: $image, isShowingImagePicker: $isShowingImagePicker)
}
func updateUIViewController(_ uiViewController: UIImagePickerController, context: Context) {}
}
class imagePickerCoordinator: NSObject, UINavigationControllerDelegate, UIImagePickerControllerDelegate {
#Binding var image: UIImage?
#Binding var isShowingImagePicker: Bool
init(image:Binding<UIImage?>, isShowingImagePicker: Binding<Bool>) {
_image = image
_isShowingImagePicker = isShowingImagePicker
}
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
if let uiimage = info[UIImagePickerController.InfoKey.originalImage] as? UIImage {
image = uiimage
isShowingImagePicker = false
}
}
func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
isShowingImagePicker = false
}
}
Just want to return the image that's zoomed in the circle. The image can be square (re: the 300x300 frame), that's fine just need the zoomed image not whole screen or the original image.
the following changes were successful based the comments:
Add the following State variables:
#State private var rect: CGRect = .zero
#State private var uiimage: UIImage? = nil // resized image
Added "RectGetter" to the picked image frame after image selected selected
if image != nil {
ZoomScrollView {
Image(uiImage: image!)
.resizable()
.scaledToFit()
}
.frame(width: 300, height: 300, alignment: .center)
.clipShape(Circle())
.background(RectGetter(rect: $rect))
Here is the struct and extension I added
extension UIView {
func asImage(rect: CGRect) -> UIImage {
let renderer = UIGraphicsImageRenderer(bounds: rect)
return renderer.image { rendererContext in
layer.render(in: rendererContext.cgContext)
}
}
}
struct RectGetter: View {
#Binding var rect: CGRect
var body: some View {
GeometryReader { proxy in
self.createView(proxy: proxy)
}
}
func createView(proxy: GeometryProxy) -> some View {
DispatchQueue.main.async {
self.rect = proxy.frame(in: .global)
}
return Rectangle().fill(Color.clear)
}
}
Last I set the image to save
self.uiimage = UIApplication.shared.windows[0].rootViewController?.view.asImage(rect: self.rect)
This assumes the root controller. However, in my production app I had to point to self
self.uiimage = UIApplication.shared.windows[0].self.asImage(rect: self.rect)
Then I was able to save that image.
A couple of notes. The image returned is the rectangle which is fine. However due to the way the image is captured the rest of the rectangle outside the cropShape of a circle has the background color. In this case yellow at the for corners outside the circle. There is probably a way to have some sort of ZOrder mask that overlays the image for display when you are resizing the image but then this accesses the right layer and saves the full rectangle picture. If anyone wants to suggest further that would be a cleaner solution but this works assuming you will always display the picture in the same crop shape it was saved in.
I'm new to SwiftUI and Combine. What I trying to build is a manual camera app, and there's only 4 UI component:
CaptureButton for making a shot from the camera
FocusPicker for controlling manually camera focus exposure
OffsetView for displaying a level of exposure
CameraPreviewRepresentable for integrating UIKit camera into SwiftUI view
Also added Privacy requests into.Info.plist file from a user to allow camera feature and saving to Apple Photo App
For updating data and passing it to the UI, I'm using CameraViewModel, currentCameraSubject and currentCamera Publisher to showing new values from AVCaptureDevice and setting it to CameraViewModel.
And I'm noticing a really interesting behavior/bug of FocusPicker when I start interacting with it and piking a new focus it constantly get back to started position and when OffsetView is getting a new value each time.
But interesting enough for example when OffsetView has the same value then FocusPicker is doing normal. And I do not know why this is happening. Please help, it's really frustrating to fix for me.
By the way, it will only work on a real device only.
Here's all the code:
import SwiftUI
//#main
//struct StackOverflowCamApp: App {
// var cameraViewModel = CameraViewModel(focusLensPosition: 0)
// let cameraController: CustomCameraController = CustomCameraController()
//
// var body: some Scene {
// WindowGroup {
// ContentView(cameraViewModel: cameraViewModel, cameraController: cameraController)
// }
// }
//}
struct ContentView: View {
#State private var didTapCapture = false
#ObservedObject var cameraViewModel: CameraViewModel
let cameraController: CustomCameraController
var body: some View {
VStack {
ZStack {
CameraPreviewRepresentable(didTapCapture: $didTapCapture, cameraViewModel: cameraViewModel, cameraController: cameraController)
.frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .center)
VStack {
FocusPicker(selectedFocus: $cameraViewModel.focusChoice)
Text(String(format: "%.2f", cameraViewModel.focusLensPosition))
.foregroundColor(.red)
.font(.largeTitle)
}
.frame(maxWidth: .infinity, alignment: .leading)
}
.edgesIgnoringSafeArea(.all)
Spacer()
OffsetView(levelValue: cameraViewModel.exposureTargetOffset, height: 100)
.frame(maxWidth: .infinity, alignment: .leading)
CaptureButton(didTapCapture: $didTapCapture)
.frame(width: 100, height: 100, alignment: .center)
.padding(.bottom, 20)
}
}
}
struct CaptureButton: View {
#Binding var didTapCapture : Bool
var body: some View {
Button {
didTapCapture.toggle()
} label: {
Image(systemName: "photo")
.font(.largeTitle)
.padding(30)
.background(Color.red)
.foregroundColor(.white)
.clipShape(Circle())
.overlay(
Circle()
.stroke(Color.red)
)
}
}
}
struct OffsetView: View {
var levelValue: Float
let height: CGFloat
var body: some View {
ZStack {
Rectangle()
.foregroundColor(.red)
.frame(maxWidth: height / 2, maxHeight: height, alignment: .trailing)
Rectangle()
.foregroundColor(.orange)
.frame(maxWidth: height / 2, maxHeight: height / 20, alignment: .trailing)
.offset(x: 0, y: min(CGFloat(-levelValue) * height / 2, height / 2))
}
}
}
struct FocusPicker: View {
#Binding var selectedFocus: FocusChoice
var body: some View {
Picker(selection: $selectedFocus, label: Text("")) {
ForEach(0..<FocusChoice.allCases.count) {
Text("\(FocusChoice.allCases[$0].caption)")
.foregroundColor(.white)
.font(.subheadline)
.fontWeight(.medium)
.tag(FocusChoice.allCases[$0])
}
.animation(.none)
.background(Color.clear)
.pickerStyle(WheelPickerStyle())
}
.frame(width: 60, height: 200)
.border(Color.gray, width: 5)
.clipped()
}
}
import SwiftUI
import Combine
import AVFoundation
struct CameraPreviewRepresentable: UIViewControllerRepresentable {
#Environment(\.presentationMode) var presentationMode
#Binding var didTapCapture: Bool
#ObservedObject var cameraViewModel: CameraViewModel
let cameraController: CustomCameraController
func makeUIViewController(context: Context) -> CustomCameraController {
cameraController.delegate = context.coordinator
return cameraController
}
func updateUIViewController(_ cameraViewController: CustomCameraController, context: Context) {
if didTapCapture {
cameraViewController.didTapRecord()
}
// checking if new value is differnt from the previous value
if cameraViewModel.focusChoice.rawValue != cameraViewController.manualFocusValue {
cameraViewController.manualFocusValue = cameraViewModel.focusChoice.rawValue
}
}
func makeCoordinator() -> Coordinator {
Coordinator(self, cameraViewModel: cameraViewModel)
}
class Coordinator: NSObject, UINavigationControllerDelegate, AVCapturePhotoCaptureDelegate {
let parent: CameraPreviewRepresentable
var cameraViewModel: CameraViewModel
var tokens = Set<AnyCancellable>()
init(_ parent: CameraPreviewRepresentable, cameraViewModel: CameraViewModel) {
self.parent = parent
self.cameraViewModel = cameraViewModel
super.init()
// for showing focus lens position
self.parent.cameraController.currentCamera
.filter { $0 != nil }
.flatMap { $0!.publisher(for: \.lensPosition) }
.assign(to: \.focusLensPosition, on: cameraViewModel)
.store(in: &tokens)
// for showing exposure offset
self.parent.cameraController.currentCamera
.filter { $0 != nil }
.flatMap { $0!.publisher(for: \.exposureTargetOffset) }
.assign(to: \.exposureTargetOffset, on: cameraViewModel)
.store(in: &tokens)
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
parent.didTapCapture = false
if let imageData = photo.fileDataRepresentation(), let image = UIImage(data: imageData) {
UIImageWriteToSavedPhotosAlbum(image, nil, nil, nil)
}
parent.presentationMode.wrappedValue.dismiss()
}
}
}
import Combine
import AVFoundation
class CameraViewModel: ObservableObject {
#Published var focusLensPosition: Float = 0
#Published var exposureTargetOffset: Float = 0
#Published var focusChoice: FocusChoice = .infinity
private var tokens = Set<AnyCancellable>()
init(focusLensPosition: Float) {
self.focusLensPosition = focusLensPosition
}
}
enum FocusChoice: Float, CaseIterable {
case infinity = 1
case ft_30 = 0.95
case ft_15 = 0.9
case ft_10 = 0.85
case ft_7 = 0.8
case ft_5 = 0.5
case ft_4 = 0.7
case ft_3_5 = 0.65
case ft_3 = 0.6
case auto = 0
}
extension FocusChoice {
var caption: String {
switch self {
case .infinity: return "∞ft"
case .ft_30: return "30"
case .ft_15: return "15"
case .ft_10: return "10"
case .ft_7: return "7"
case .ft_5: return "5"
case .ft_4: return "4"
case .ft_3_5: return "3.5"
case .ft_3: return "3"
case .auto: return "Auto"
}
}
}
import UIKit
import Combine
import AVFoundation
class CustomCameraController: UIViewController {
var image: UIImage?
var captureSession = AVCaptureSession()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
lazy var currentCamera: AnyPublisher<AVCaptureDevice?, Never> = currentCameraSubject.eraseToAnyPublisher()
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
private var currentCameraSubject = CurrentValueSubject<AVCaptureDevice?, Never>(nil)
var manualFocusValue: Float = 1 {
didSet {
guard manualFocusValue != 0 else {
setAutoLensPosition()
return
}
setFocusLensPosition(manualValue: manualFocusValue)
}
}
//DELEGATE
var delegate: AVCapturePhotoCaptureDelegate?
func setFocusLensPosition(manualValue: Float) {
do {
try currentCameraSubject.value!.lockForConfiguration()
currentCameraSubject.value!.focusMode = .locked
currentCameraSubject.value!.setFocusModeLocked(lensPosition: manualValue, completionHandler: nil)
currentCameraSubject.value!.unlockForConfiguration()
} catch let error {
print(error.localizedDescription)
}
}
func setAutoLensPosition() {
do {
try currentCameraSubject.value!.lockForConfiguration()
currentCameraSubject.value!.focusMode = .continuousAutoFocus
currentCameraSubject.value!.unlockForConfiguration()
} catch let error {
print(error.localizedDescription)
}
}
func didTapRecord() {
let settings = AVCapturePhotoSettings()
photoOutput?.capturePhoto(with: settings, delegate: delegate!)
}
override func viewDidLoad() {
super.viewDidLoad()
setup()
}
func setup() {
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}
func setupCaptureSession() {
captureSession.sessionPreset = .photo
}
func setupDevice() {
let deviceDiscoverySession =
AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera],
mediaType: .video,
position: .unspecified)
for device in deviceDiscoverySession.devices {
switch device.position {
case .front:
self.frontCamera = device
case .back:
self.backCamera = device
default:
break
}
}
self.currentCameraSubject.send(self.backCamera)
}
func setupInputOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCameraSubject.value!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
captureSession.addOutput(photoOutput!)
} catch {
print(error)
}
}
func setupPreviewLayer() {
self.cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
let deviceOrientation = UIDevice.current.orientation
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation(rawValue: deviceOrientation.rawValue)!
self.cameraPreviewLayer?.frame = self.view.frame
self.view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession() {
captureSession.startRunning()
}
}
Your ContentView gets updated all the time from the Published values. To Fix that we first remove the declaration as ObservedObject from the ViewModel inside the ContentView and declare it like that:
let cameraViewModel: CameraViewModel
Now we will get some errors. For FocusView just use ProxyBinding.
FocusPicker(selectedFocus: Binding<FocusChoice>(
get: {
cameraViewModel.focusChoice
},
set: {
cameraViewModel.focusChoice = $0
}
))
For the updated text, just create another View. Here! we use ObservedObject.
struct TextView: View {
#ObservedObject var cameraViewModel: CameraViewModel
var body: some View {
Text(String(format: "%.2f", cameraViewModel.focusLensPosition))
.foregroundColor(.red)
.font(.largeTitle)
}
}
Same for the OffsetView. Add ObservedObject there
struct OffsetView: View {
#ObservedObject var viewModel : CameraViewModel
let height: CGFloat
var body: some View {
ZStack {
Rectangle()
.foregroundColor(.red)
.frame(maxWidth: height / 2, maxHeight: height, alignment: .trailing)
Rectangle()
.foregroundColor(.orange)
.frame(maxWidth: height / 2, maxHeight: height / 20, alignment: .trailing)
.offset(x: 0, y: min(CGFloat(-viewModel.exposureTargetOffset) * height / 2, height / 2))
}
}
}
The ContentView will then look like that:
struct ContentView: View {
#State private var didTapCapture = false
let cameraViewModel: CameraViewModel
let cameraController: CustomCameraController
var body: some View {
VStack {
ZStack {
CameraPreviewRepresentable(didTapCapture: $didTapCapture, cameraViewModel: cameraViewModel, cameraController: cameraController)
.frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .center)
VStack {
FocusPicker(selectedFocus: Binding<FocusChoice>(
get: {
cameraViewModel.focusChoice
},
set: {
cameraViewModel.focusChoice = $0
}
))
TextView(cameraViewModel: cameraViewModel)
}
.frame(maxWidth: .infinity, alignment: .leading)
}
.edgesIgnoringSafeArea(.all)
Spacer()
OffsetView(viewModel: cameraViewModel, height: 100)
.frame(maxWidth: .infinity, alignment: .leading)
CaptureButton(didTapCapture: $didTapCapture)
.frame(width: 100, height: 100, alignment: .center)
.padding(.bottom, 20)
}
}
}
Hence, we do not have any ObservedObject anymore in the ContentView and our Picker just works fine.
I'm new to SwiftUI and manual camera functionality, and I really need help.
So I trying to build a SwiftUI camera view that has a UIKit camera as a wrapper to control the focus lens position via SwiftUI picker view, display below a fucus value, and want to try have a correlation between AVcaptureDevice.lensPosition from 0 to 1.0 and feats that are displayed in the focus picker view. But for now, I only want to display that fucus number on screen.
And the problem is when I try to update focus via coordinator focus observation and set it to the camera view model then nothing happened. Please help 🙌
Here's the code:
import SwiftUI
import AVFoundation
import Combine
struct ContentView: View {
#State private var didTapCapture = false
#State private var focusLensPosition: Float = 0
#ObservedObject var cameraViewModel = CameraViewModel(focusLensPosition: 0)
var body: some View {
VStack {
ZStack {
CameraPreviewRepresentable(didTapCapture: $didTapCapture, cameraViewModel: cameraViewModel)
.frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .center)
VStack {
FocusPicker(selectedFocus: $focusLensPosition)
Text(String(cameraViewModel.focusLensPosition))
.foregroundColor(.red)
.font(.largeTitle)
}
.frame(maxWidth: .infinity, alignment: .leading)
}
.edgesIgnoringSafeArea(.all)
Spacer()
CaptureButton(didTapCapture: $didTapCapture)
.frame(width: 100, height: 100, alignment: .center)
.padding(.bottom, 20)
}
}
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
}
}
struct CaptureButton: View {
#Binding var didTapCapture : Bool
var body: some View {
Button {
didTapCapture.toggle()
} label: {
Image(systemName: "photo")
.font(.largeTitle)
.padding(30)
.background(Color.red)
.foregroundColor(.white)
.clipShape(Circle())
.overlay(
Circle()
.stroke(Color.red)
)
}
}
}
struct CameraPreviewRepresentable: UIViewControllerRepresentable {
#Environment(\.presentationMode) var presentationMode
#Binding var didTapCapture: Bool
#ObservedObject var cameraViewModel: CameraViewModel
let cameraController: CustomCameraController = CustomCameraController()
func makeUIViewController(context: Context) -> CustomCameraController {
cameraController.delegate = context.coordinator
return cameraController
}
func updateUIViewController(_ cameraViewController: CustomCameraController, context: Context) {
if (self.didTapCapture) {
cameraViewController.didTapRecord()
}
}
func makeCoordinator() -> Coordinator {
Coordinator(self, cameraViewModel: cameraViewModel)
}
class Coordinator: NSObject, UINavigationControllerDelegate, AVCapturePhotoCaptureDelegate {
let parent: CameraPreviewRepresentable
var cameraViewModel: CameraViewModel
var focusLensPositionObserver: NSKeyValueObservation?
init(_ parent: CameraPreviewRepresentable, cameraViewModel: CameraViewModel) {
self.parent = parent
self.cameraViewModel = cameraViewModel
super.init()
focusLensPositionObserver = self.parent.cameraController.currentCamera?.observe(\.lensPosition, options: [.new]) { [weak self] camera, _ in
print(Float(camera.lensPosition))
//announcing changes via Publisher
self?.cameraViewModel.focusLensPosition = camera.lensPosition
}
}
deinit {
focusLensPositionObserver = nil
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
parent.didTapCapture = false
if let imageData = photo.fileDataRepresentation(), let image = UIImage(data: imageData) {
UIImageWriteToSavedPhotosAlbum(image, nil, nil, nil)
}
parent.presentationMode.wrappedValue.dismiss()
}
}
}
class CameraViewModel: ObservableObject {
#Published var focusLensPosition: Float = 0
init(focusLensPosition: Float) {
self.focusLensPosition = focusLensPosition
}
}
class CustomCameraController: UIViewController {
var image: UIImage?
var captureSession = AVCaptureSession()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
var currentCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
//DELEGATE
var delegate: AVCapturePhotoCaptureDelegate?
func showFocusLensPosition() -> Float {
// guard let camera = currentCamera else { return 0 }
// try! currentCamera!.lockForConfiguration()
// currentCamera!.focusMode = .autoFocus
//// currentCamera!.setFocusModeLocked(lensPosition: currentCamera!.lensPosition, completionHandler: nil)
// currentCamera!.unlockForConfiguration()
return currentCamera!.lensPosition
}
func didTapRecord() {
let settings = AVCapturePhotoSettings()
photoOutput?.capturePhoto(with: settings, delegate: delegate!)
}
override func viewDidLoad() {
super.viewDidLoad()
setup()
}
func setup() {
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}
func setupCaptureSession() {
captureSession.sessionPreset = .photo
}
func setupDevice() {
let deviceDiscoverySession =
AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera],
mediaType: .video,
position: .unspecified)
for device in deviceDiscoverySession.devices {
switch device.position {
case .front:
self.frontCamera = device
case .back:
self.backCamera = device
default:
break
}
}
self.currentCamera = self.backCamera
}
func setupInputOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
captureSession.addOutput(photoOutput!)
} catch {
print(error)
}
}
func setupPreviewLayer() {
self.cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
let deviceOrientation = UIDevice.current.orientation
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation(rawValue: deviceOrientation.rawValue)!
self.cameraPreviewLayer?.frame = self.view.frame
// view.transform = CGAffineTransform(scaleX: 0.5, y: 0.5)
self.view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession() {
captureSession.startRunning()
}
}
struct FocusPicker: View {
var feets = ["∞ ft", "30", "15", "10", "7", "5", "4", "3.5", "3", "2.5", "2", "1.5", "1", "0.5", "Auto"]
#Binding var selectedFocus: Float
var body: some View {
Picker(selection: $selectedFocus, label: Text("")) {
ForEach(0 ..< feets.count) {
Text(feets[$0])
.foregroundColor(.white)
.font(.subheadline)
.fontWeight(.medium)
}
.animation(.none)
.background(Color.clear)
.pickerStyle(WheelPickerStyle())
}
.frame(width: 60, height: 200)
.border(Color.gray, width: 5)
.clipped()
}
}
The problem with your provided code is that the type of selectedFocus within the FocusPicker view should be Integer rather than Float. So one option is to change this type to Integer and find a way to express the AVCaptureDevice.lensPosition as an Integer with the given range.
The second option is to replace the feets array with an enumeration. By making the enumeration conform to the CustomStringConvertible protocol, you can even provide a proper description. Please see my example below.
I've stripped your code a bit as you just wanted to display the number in the first step and thus the code is more comprehensible.
My working example:
import SwiftUI
import Combine
struct ContentView: View {
#ObservedObject var cameraViewModel = CameraViewModel(focusLensPosition: 0.5)
var body: some View {
VStack {
ZStack {
VStack {
FocusPicker(selectedFocus: $cameraViewModel.focusLensPosition)
Text(String(self.cameraViewModel.focusLensPosition))
.foregroundColor(.red)
.font(.largeTitle)
}
.frame(maxWidth: .infinity, alignment: .leading)
}
.edgesIgnoringSafeArea(.all)
}
}
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
}
}
class CameraViewModel: ObservableObject {
#Published var focusLensPosition: Float
init(focusLensPosition: Float) {
self.focusLensPosition = focusLensPosition
}
}
enum Feets: Float, CustomStringConvertible, CaseIterable, Identifiable {
case case1 = 0.0
case case2 = 0.5
case case3 = 1.0
var id: Float { self.rawValue }
var description: String {
get {
switch self {
case .case1:
return "∞ ft"
case .case2:
return "4"
case .case3:
return "Auto"
}
}
}
}
struct FocusPicker: View {
#Binding var selectedFocus: Float
var body: some View {
Picker(selection: $selectedFocus, label: Text("")) {
ForEach(Feets.allCases) { feet in
Text(feet.description)
}
.animation(.none)
.background(Color.clear)
.pickerStyle(WheelPickerStyle())
}
.frame(width: 60, height: 200)
.border(Color.gray, width: 5)
.clipped()
}
}
I'm building a custom modal and when I drag the modal, any subviews that have animation's attached, they animate while I'm dragging. How do I stop this from happening?
I thought about passing down an #EnvironmentObject with a isDragging flag, but it's not very scalable (and doesn't work well with custom ButtonStyles)
import SwiftUI
struct ContentView: View {
var body: some View {
Text("Hello, world!")
.padding()
.showModal(isShowing: .constant(true))
}
}
extension View {
func showModal(isShowing: Binding<Bool>) -> some View {
ViewOverlay(isShowing: isShowing, presenting: { self })
}
}
struct ViewOverlay<Presenting>: View where Presenting: View {
#Binding var isShowing: Bool
let presenting: () -> Presenting
#State var bottomState: CGFloat = 0
var body: some View {
ZStack(alignment: .center) {
presenting().blur(radius: isShowing ? 1 : 0)
VStack {
if isShowing {
Container()
.background(Color.red)
.offset(y: bottomState)
.gesture(
DragGesture()
.onChanged { value in
bottomState = value.translation.height
}
.onEnded { _ in
if bottomState > 50 {
withAnimation {
isShowing = false
}
}
bottomState = 0
})
.transition(.move(edge: .bottom))
}
}
}
}
}
struct Container: View {
var body: some View {
// I want this to not animate when dragging the modal
Text("CONTAINER")
.frame(maxWidth: .infinity, maxHeight: 200)
.animation(.spring())
}
}
UPDATE:
extension View {
func animationsDisabled(_ disabled: Bool) -> some View {
transaction { (tx: inout Transaction) in
tx.animation = tx.animation
tx.disablesAnimations = disabled
}
}
}
Container()
.animationsDisabled(isDragging || bottomState > 0)
In real life the Container contains a button with an animation on its pressed state
struct MyButtonStyle: ButtonStyle {
func makeBody(configuration: Self.Configuration) -> some View {
configuration.label
.scaleEffect(configuration.isPressed ? 0.9 : 1)
.animation(.spring())
}
}
Added the animationsDisabled function to the child view which does in fact stop the children moving during the drag.
What it doesn't do is stop the animation when the being initially slide in or dismissed.
Is there a way to know when a view is essentially not moving / transitioning?
Theoretically SwiftUI should not translate animation in this case, however I'm not sure if this is a bug - I would not use animation in Container in that generic way. The more I use animations the more tend to join them directly to specific values.
Anyway... here is possible workaround - break animation visibility by injecting different hosting controller in a middle.
Tested with Xcode 12 / iOS 14
struct ViewOverlay<Presenting>: View where Presenting: View {
#Binding var isShowing: Bool
let presenting: () -> Presenting
#State var bottomState: CGFloat = 0
var body: some View {
ZStack(alignment: .center) {
presenting().blur(radius: isShowing ? 1 : 0)
VStack {
Color.clear
if isShowing {
HelperView {
Container()
.background(Color.red)
}
.offset(y: bottomState)
.gesture(
DragGesture()
.onChanged { value in
bottomState = value.translation.height
}
.onEnded { _ in
if bottomState > 50 {
withAnimation {
isShowing = false
}
}
bottomState = 0
})
.transition(.move(edge: .bottom))
}
Color.clear
}
}
}
}
struct HelperView<Content: View>: UIViewRepresentable {
let content: () -> Content
func makeUIView(context: Context) -> UIView {
let controller = UIHostingController(rootView: content())
return controller.view
}
func updateUIView(_ uiView: UIView, context: Context) {
}
}
In Container, declare a binding var so you can pass the bottomState to the Container View:
struct Container: View {
#Binding var bottomState: CGFloat
.
.
.
.
}
Dont forget to pass bottomState to your Container View wherever you use it:
Container(bottomState: $bottomState)
Now in your Container View, you just need to declare that you don't want an animation while bottomState is being changed:
Text("CONTAINER")
.frame(maxWidth: .infinity, maxHeight: 200)
.animation(nil, value: bottomState) // You Need To Add This
.animation(.spring())
In .animation(nil, value: bottomState), by nil you are asking SwiftUI for no animations, while value of bottomState is being changed.
This approach is tested using Xcode 12 GM, iOS 14.0.1.
You must use the modifiers of the Text in the order i put them. that means that this will work:
.animation(nil, value: bottomState)
.animation(.spring())
but this won't work:
.animation(.spring())
.animation(nil, value: bottomState)
I also made sure that adding .animation(nil, value: bottomState) will only disable animations when bottomState is being changed, and the animation .animation(.spring()) should always work if bottomState is not being changed.
So this is my updated answer. I don't think there is a pretty way to do it so now I am doing it with a custom Button.
import SwiftUI
struct ContentView: View {
#State var isShowing = false
var body: some View {
Text("Hello, world!")
.padding()
.onTapGesture(count: 1, perform: {
withAnimation(.spring()) {
self.isShowing.toggle()
}
})
.showModal(isShowing: self.$isShowing)
}
}
extension View {
func showModal(isShowing: Binding<Bool>) -> some View {
ViewOverlay(isShowing: isShowing, presenting: { self })
}
}
struct ViewOverlay<Presenting>: View where Presenting: View {
#Binding var isShowing: Bool
let presenting: () -> Presenting
#State var bottomState: CGFloat = 0
#State var isDragging = false
var body: some View {
ZStack(alignment: .center) {
presenting().blur(radius: isShowing ? 1 : 0)
VStack {
if isShowing {
Container()
.background(Color.red)
.offset(y: bottomState)
.gesture(
DragGesture()
.onChanged { value in
isDragging = true
bottomState = value.translation.height
}
.onEnded { _ in
isDragging = false
if bottomState > 50 {
withAnimation(.spring()) {
isShowing = false
}
}
bottomState = 0
})
.transition(.move(edge: .bottom))
}
}
}
}
}
struct Container: View {
var body: some View {
CustomButton(action: {}, label: {
Text("Pressme")
})
.frame(maxWidth: .infinity, maxHeight: 200)
}
}
struct CustomButton<Label >: View where Label: View {
#State var isPressed = false
var action: () -> ()
var label: () -> Label
var body: some View {
label()
.scaleEffect(self.isPressed ? 0.9 : 1.0)
.gesture(DragGesture(minimumDistance: 0).onChanged({_ in
withAnimation(.spring()) {
self.isPressed = true
}
}).onEnded({_ in
withAnimation(.spring()) {
self.isPressed = false
action()
}
}))
}
}
The problem is that you can't use implicit animations inside the container as they will be animated when it moves. So you need to explicitly set an animation using withAnimation also for the button pressed, which I now did with a custom Button and a DragGesture.
It is the difference between explicit and implicit animation.
Take a look at this video where this topic is explored in detail:
https://www.youtube.com/watch?v=3krC2c56ceQ&list=PLpGHT1n4-mAtTj9oywMWoBx0dCGd51_yG&index=11