Rewrite Code

This commit is contained in:
Simon Rieger 2023-12-15 18:17:50 +01:00
parent a54db5d11e
commit 58ea7b8f48
2 changed files with 58 additions and 104 deletions

View file

@ -4,139 +4,93 @@
// //
// Created by Simon Rieger on 15.12.23. // Created by Simon Rieger on 15.12.23.
// //
import SwiftUI import SwiftUI
import AVFoundation import AVFoundation
import Vision import Vision
struct LiveTextRecognitionView: View { struct ContentView: View {
@State private var recognizedText = "" @State private var recognizedText: String = ""
var body: some View { var body: some View {
VStack { VStack {
CameraView(recognizedText: $recognizedText) CameraView(recognizedText: $recognizedText)
.edgesIgnoringSafeArea(.all) .edgesIgnoringSafeArea(.all)
.onDisappear {
CameraView.stopSession() Text("Recognized Text: \(recognizedText)")
}
Text("Live erkannter Text:")
.padding() .padding()
Text(recognizedText)
.padding()
.background(Color.white.opacity(0.7))
} }
} }
} }
struct LiveTextRecognitionView_Previews: PreviewProvider { struct CameraView: UIViewControllerRepresentable {
static var previews: some View {
LiveTextRecognitionView()
}
}
struct CameraView: UIViewRepresentable {
@Binding var recognizedText: String @Binding var recognizedText: String
class Coordinator: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate { class Coordinator: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate {
var recognizedText: Binding<String> var parent: CameraView
var request: VNRecognizeTextRequest?
init(parent: CameraView) {
init(recognizedText: Binding<String>) { self.parent = parent
self.recognizedText = recognizedText
super.init()
setupVision()
} }
func setupVision() {
request = VNRecognizeTextRequest(completionHandler: { (request, error) in
guard let observations = request.results as? [VNRecognizedTextObservation] else { return }
var recognizedText = ""
for observation in observations {
guard let topCandidate = observation.topCandidates(1).first else { continue }
recognizedText += topCandidate.string + "\n"
}
self.recognizedText.wrappedValue = recognizedText
})
request?.recognitionLevel = .accurate
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) { func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return } guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let request = VNRecognizeTextRequest { (request, error) in
if let error = error {
print("Error recognizing text: \(error)")
return
}
if let results = request.results as? [VNRecognizedTextObservation] {
let text = results.compactMap { observation in
observation.topCandidates(1).first?.string
}.joined(separator: "\n")
DispatchQueue.main.async {
self.parent.recognizedText = text
}
}
}
let handler = VNImageRequestHandler(cvPixelBuffer: imageBuffer, options: [:]) let handler = VNImageRequestHandler(cvPixelBuffer: imageBuffer, options: [:])
do { do {
try handler.perform([request!]) try handler.perform([request])
} catch { } catch {
print("Error performing OCR: \(error)") print("Error performing OCR: \(error)")
} }
} }
} }
static var session: AVCaptureSession?
static func startSession() {
session?.startRunning()
}
static func stopSession() {
session?.stopRunning()
}
func makeCoordinator() -> Coordinator { func makeCoordinator() -> Coordinator {
return Coordinator(recognizedText: $recognizedText) return Coordinator(parent: self)
} }
func makeUIView(context: Context) -> UIView { func makeUIViewController(context: Context) -> UIViewController {
let view = UIView() let viewController = UIViewController()
let captureSession = AVCaptureSession()
let session = AVCaptureSession()
guard let camera = AVCaptureDevice.default(for: .video) else { return viewController }
guard let device = AVCaptureDevice.default(for: .video) else { return view } do {
let input = try? AVCaptureDeviceInput(device: device) let input = try AVCaptureDeviceInput(device: camera)
captureSession.addInput(input)
if session.canAddInput(input!) { } catch {
session.addInput(input!) print("Error setting up camera input: \(error)")
return viewController
} }
let output = AVCaptureVideoDataOutput() let output = AVCaptureVideoDataOutput()
output.setSampleBufferDelegate(context.coordinator, queue: DispatchQueue(label: "cameraQueue")) output.setSampleBufferDelegate(context.coordinator, queue: DispatchQueue(label: "cameraQueue"))
captureSession.addOutput(output)
if session.canAddOutput(output) {
session.addOutput(output) let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
} previewLayer.frame = viewController.view.layer.bounds
// Todo: get PreviewLayer working
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.videoGravity = .resizeAspectFill previewLayer.videoGravity = .resizeAspectFill
previewLayer.frame = view.layer.bounds viewController.view.layer.addSublayer(previewLayer)
view.layer.addSublayer(previewLayer)
captureSession.startRunning()
CameraView.session = session
return viewController
return view
}
func updateUIView(_ uiView: UIView, context: Context) {
uiView.frame = CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
if context.coordinator.request == nil {
context.coordinator.setupVision()
}
if AVCaptureDevice.authorizationStatus(for: .video) == .authorized {
CameraView.startSession()
} else {
AVCaptureDevice.requestAccess(for: .video) { granted in
if granted {
CameraView.startSession()
}
}
}
} }
func updateUIViewController(_ uiViewController: UIViewController, context: Context) {}
} }

View file

@ -11,7 +11,7 @@ import SwiftUI
struct intelliScan_analytic_engineApp: App { struct intelliScan_analytic_engineApp: App {
var body: some Scene { var body: some Scene {
WindowGroup { WindowGroup {
LiveTextRecognitionView() ContentView()
} }
} }
} }