From 2b460751346f74fa8ec615d72f581dbd1515ca4b Mon Sep 17 00:00:00 2001
From: Apple <>
Date: Mon, 18 Jun 2018 16:39:52 -0700
Subject: [PATCH] Fixes a bug in CameraViewController that forced the buffer to
 use the lowest-resolution image rather than the highest.

---
 README.md                                    |   84 +-
 TrueDepthStreamer.xcodeproj/project.pbxproj  |   42 +-
 TrueDepthStreamer/AppDelegate.swift          |    2 +-
 TrueDepthStreamer/CameraViewController.swift | 1015 +++++++++---------
 TrueDepthStreamer/DepthToJETConverter.swift  |  264 ++---
 TrueDepthStreamer/FilterRenderer.swift       |  202 ++--
 TrueDepthStreamer/HistogramCalculator.h      |   14 +-
 TrueDepthStreamer/HistogramCalculator.m      |    2 -
 TrueDepthStreamer/PreviewMetalView.swift     |  664 ++++++------
 TrueDepthStreamer/VideoMixer.swift           |  302 +++---
 10 files changed, 1298 insertions(+), 1293 deletions(-)

diff --git a/README.md b/README.md
index 2c0080f..156c735 100644
--- a/README.md
+++ b/README.md
@@ -22,7 +22,7 @@ The `startRunning` method is a blocking call that may take time to execute. Disp
 
 ``` swift
 sessionQueue.async {
-          self.configureSession()
+    self.configureSession()
 }
 ```
 
@@ -39,11 +39,11 @@ Explicitly add this output type to your capture session:
 ``` swift
 session.addOutput(depthDataOutput)
 depthDataOutput.isFilteringEnabled = false
-         if let connection = depthDataOutput.connection(with: .depthData) {
-             connection.isEnabled = true
-         } else {
-             print("No AVCaptureConnection")
-         }
+if let connection = depthDataOutput.connection(with: .depthData) {
+    connection.isEnabled = true
+} else {
+    print("No AVCaptureConnection")
+}
 ```
 
 Search for the highest resolution available with floating-point depth values, and lock the configuration to the format.
@@ -51,10 +51,10 @@ Search for the highest resolution available with floating-point depth values, an
 ``` swift
 let depthFormats = videoDevice.activeFormat.supportedDepthDataFormats
 let filtered = depthFormats.filter({
-    $0.formatDescription.mediaSubType == kCVPixelFormatType_DepthFloat16
+    CMFormatDescriptionGetMediaSubType($0.formatDescription) == kCVPixelFormatType_DepthFloat16
 })
 let selectedFormat = filtered.max(by: {
-    first, second in first.formatDescription.videoDimensions.width < second.formatDescription.videoDimensions.width
+    first, second in CMVideoFormatDescriptionGetDimensions(first.formatDescription).width < CMVideoFormatDescriptionGetDimensions(second.formatDescription).width
 })
 
 do {
@@ -86,22 +86,22 @@ The sample uses JET color coding to distinguish depth values, ranging from red (
 
 ``` swift
 var cvTextureOut: CVMetalTexture?
-      CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, textureCache, pixelBuffer, nil, textureFormat, width, height, 0, &cvTextureOut)
-      guard let cvTexture = cvTextureOut, let texture = CVMetalTextureGetTexture(cvTexture) else {
-          print("Depth converter failed to create preview texture")
-          CVMetalTextureCacheFlush(textureCache, 0)
-          return nil
-      }
+CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, textureCache, pixelBuffer, nil, textureFormat, width, height, 0, &cvTextureOut)
+guard let cvTexture = cvTextureOut, let texture = CVMetalTextureGetTexture(cvTexture) else {
+    print("Depth converter failed to create preview texture")
+    CVMetalTextureCacheFlush(textureCache, 0)
+    return nil
+}
 ```
 
 ## Visualize Depth Data in 3D
 
 The sample’s 3D viewer renders data as a point cloud.  Control the camera with the following gestures:
 
-	•	Pinch to zoom.
-	•	Pan to move the camera around the center.
-	•	Rotate with two fingers to turn the camera angle.
-	•	Double-tap the screen to reset the initial; position.
+* Pinch to zoom.  
+* Pan to move the camera around the center.  
+* Rotate with two fingers to turn the camera angle.  
+* Double-tap the screen to reset the initial position.  
 
 The sample implements a 3D point cloud as a `PointCloudMetalView`.  It uses a Metal vertex shader to control geometry and a Metal fragment shader to color individual vertices, keeping the depth texture and color texture separate:
 
@@ -160,28 +160,28 @@ Processing depth data from a live stream may cause the device to heat up.  Keep
 ``` swift
 @objc
 func thermalStateChanged(notification: NSNotification) {
-       if let processInfo = notification.object as? ProcessInfo {
-           showThermalState(state: processInfo.thermalState)
-       }
-   }
-
-   func showThermalState(state: ProcessInfo.ThermalState) {
-       DispatchQueue.main.async {
-           var thermalStateString = "UNKNOWN"
-           if state == .nominal {
-               thermalStateString = "NOMINAL"
-           } else if state == .fair {
-               thermalStateString = "FAIR"
-           } else if state == .serious {
-               thermalStateString = "SERIOUS"
-           } else if state == .critical {
-               thermalStateString = "CRITICAL"
-           }
-           
-           let message = NSLocalizedString("Thermal state: \(thermalStateString)", comment: "Alert message when thermal state has changed")
-           let alertController = UIAlertController(title: "TrueDepthStreamer", message: message, preferredStyle: .alert)
-           alertController.addAction(UIAlertAction(title: NSLocalizedString("OK", comment: "Alert OK button"), style: .cancel, handler: nil))
-           self.present(alertController, animated: true, completion: nil)
-       }
-   }
+    if let processInfo = notification.object as? ProcessInfo {
+        showThermalState(state: processInfo.thermalState)
+    }
+}
+
+func showThermalState(state: ProcessInfo.ThermalState) {
+    DispatchQueue.main.async {
+        var thermalStateString = "UNKNOWN"
+        if state == .nominal {
+            thermalStateString = "NOMINAL"
+        } else if state == .fair {
+            thermalStateString = "FAIR"
+        } else if state == .serious {
+            thermalStateString = "SERIOUS"
+        } else if state == .critical {
+            thermalStateString = "CRITICAL"
+        }
+        
+        let message = NSLocalizedString("Thermal state: \(thermalStateString)", comment: "Alert message when thermal state has changed")
+        let alertController = UIAlertController(title: "TrueDepthStreamer", message: message, preferredStyle: .alert)
+        alertController.addAction(UIAlertAction(title: NSLocalizedString("OK", comment: "Alert OK button"), style: .cancel, handler: nil))
+        self.present(alertController, animated: true, completion: nil)
+    }
+}
 ```
diff --git a/TrueDepthStreamer.xcodeproj/project.pbxproj b/TrueDepthStreamer.xcodeproj/project.pbxproj
index 1d95377..ed1681a 100644
--- a/TrueDepthStreamer.xcodeproj/project.pbxproj
+++ b/TrueDepthStreamer.xcodeproj/project.pbxproj
@@ -31,6 +31,8 @@
 		2633CF1C1E7C65D500FC80E1 /* DepthToJETConverter.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DepthToJETConverter.swift; sourceTree = "<group>"; };
 		2672370D1E79BFBF003D2EAA /* DepthToJET.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = DepthToJET.metal; sourceTree = "<group>"; };
 		267ED84D1ED7965A00898078 /* TrueDepthStreamer-Bridging-Header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "TrueDepthStreamer-Bridging-Header.h"; sourceTree = "<group>"; };
+		26C3826026C380D000000001 /* SampleCode.xcconfig */ = {isa = PBXFileReference; name = SampleCode.xcconfig; path = Configuration/SampleCode.xcconfig; sourceTree = "<group>"; };
+		28908F9028908E9000000001 /* LICENSE.txt */ = {isa = PBXFileReference; includeInIndex = 1; path = LICENSE.txt; sourceTree = "<group>"; };
 		6D20BC0F1FFE665100496684 /* PointCloudMetalView.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = PointCloudMetalView.mm; sourceTree = "<group>"; };
 		C3444A731FFE6110002D901D /* PointCloudMetalView.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PointCloudMetalView.h; sourceTree = "<group>"; };
 		C3444A741FFE6110002D901D /* AAPLTransforms.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = AAPLTransforms.mm; sourceTree = "<group>"; };
@@ -38,8 +40,6 @@
 		C3444A781FFE6110002D901D /* AAPLTransforms.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AAPLTransforms.h; sourceTree = "<group>"; };
 		C3B256711FDE655100617DD7 /* HistogramCalculator.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = HistogramCalculator.m; sourceTree = "<group>"; };
 		C3B256721FDE655200617DD7 /* HistogramCalculator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = HistogramCalculator.h; sourceTree = "<group>"; };
-		C8798080C879905000000001 /* SampleCode.xcconfig */ = {isa = PBXFileReference; name = SampleCode.xcconfig; path = Configuration/SampleCode.xcconfig; sourceTree = "<group>"; };
-		C87E9A10C87E98B000000001 /* LICENSE.txt */ = {isa = PBXFileReference; includeInIndex = 1; path = LICENSE.txt; sourceTree = "<group>"; };
 		E414FC6C1D5921FD0007C979 /* README.md */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = net.daringfireball.markdown; path = README.md; sourceTree = "<group>"; };
 		E422DFB71CEF894F0047D7A4 /* TrueDepthStreamer.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = TrueDepthStreamer.app; sourceTree = BUILT_PRODUCTS_DIR; };
 		E422DFBF1CEF894F0047D7A4 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = "<group>"; };
@@ -75,39 +75,39 @@
 			path = Shaders;
 			sourceTree = "<group>";
 		};
-		8F7DEDF83CEB2358D1CCC092 /* Configuration */ = {
+		26C3860026C3856000000001 /* Configuration */ = {
 			isa = PBXGroup;
 			children = (
+				26C3826026C380D000000001 /* SampleCode.xcconfig */,
 			);
 			name = Configuration;
 			sourceTree = "<group>";
 		};
-		C3B2FE6F1FFA8B9A00D8BC60 /* PointCloud */ = {
+		28908660289084B000000001 /* LICENSE */ = {
 			isa = PBXGroup;
 			children = (
-				C3444A781FFE6110002D901D /* AAPLTransforms.h */,
-				C3444A741FFE6110002D901D /* AAPLTransforms.mm */,
-				C3444A731FFE6110002D901D /* PointCloudMetalView.h */,
-				6D20BC0F1FFE665100496684 /* PointCloudMetalView.mm */,
+				28908F9028908E9000000001 /* LICENSE.txt */,
 			);
-			path = PointCloud;
+			name = LICENSE;
+			path = LICENSE;
 			sourceTree = "<group>";
 		};
-		C87994B0C87990B000000001 /* Configuration */ = {
+		8F7DEDF83CEB2358D1CCC092 /* Configuration */ = {
 			isa = PBXGroup;
 			children = (
-				C8798080C879905000000001 /* SampleCode.xcconfig */,
 			);
 			name = Configuration;
 			sourceTree = "<group>";
 		};
-		C87EB090C87EB06000000001 /* LICENSE */ = {
+		C3B2FE6F1FFA8B9A00D8BC60 /* PointCloud */ = {
 			isa = PBXGroup;
 			children = (
-				C87E9A10C87E98B000000001 /* LICENSE.txt */,
+				C3444A781FFE6110002D901D /* AAPLTransforms.h */,
+				C3444A741FFE6110002D901D /* AAPLTransforms.mm */,
+				C3444A731FFE6110002D901D /* PointCloudMetalView.h */,
+				6D20BC0F1FFE665100496684 /* PointCloudMetalView.mm */,
 			);
-			name = LICENSE;
-			path = LICENSE;
+			path = PointCloud;
 			sourceTree = "<group>";
 		};
 		E422DFAE1CEF894F0047D7A4 = {
@@ -117,8 +117,8 @@
 				E422DFB91CEF894F0047D7A4 /* TrueDepthStreamer */,
 				E422DFB81CEF894F0047D7A4 /* Products */,
 				8F7DEDF83CEB2358D1CCC092 /* Configuration */,
-				C87994B0C87990B000000001 /* Configuration */,
-				C87EB090C87EB06000000001 /* LICENSE */,
+				26C3860026C3856000000001 /* Configuration */,
+				28908660289084B000000001 /* LICENSE */,
 			);
 			sourceTree = "<group>";
 		};
@@ -266,7 +266,7 @@
 /* Begin XCBuildConfiguration section */
 		E422DFC71CEF894F0047D7A4 /* Debug */ = {
 			isa = XCBuildConfiguration;
-			baseConfigurationReference = C8798080C879905000000001 /* SampleCode.xcconfig */;
+			baseConfigurationReference = 26C3826026C380D000000001 /* SampleCode.xcconfig */;
 			buildSettings = {
 				ALWAYS_SEARCH_USER_PATHS = NO;
 				ASSETCATALOG_COMPRESSION = lossless;
@@ -327,7 +327,7 @@
 		};
 		E422DFC81CEF894F0047D7A4 /* Release */ = {
 			isa = XCBuildConfiguration;
-			baseConfigurationReference = C8798080C879905000000001 /* SampleCode.xcconfig */;
+			baseConfigurationReference = 26C3826026C380D000000001 /* SampleCode.xcconfig */;
 			buildSettings = {
 				ALWAYS_SEARCH_USER_PATHS = NO;
 				ASSETCATALOG_COMPRESSION = "respect-asset-catalog";
@@ -382,7 +382,7 @@
 		};
 		E422DFCA1CEF894F0047D7A4 /* Debug */ = {
 			isa = XCBuildConfiguration;
-			baseConfigurationReference = C8798080C879905000000001 /* SampleCode.xcconfig */;
+			baseConfigurationReference = 26C3826026C380D000000001 /* SampleCode.xcconfig */;
 			buildSettings = {
 				ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
 				CLANG_ENABLE_MODULES = YES;
@@ -408,7 +408,7 @@
 		};
 		E422DFCB1CEF894F0047D7A4 /* Release */ = {
 			isa = XCBuildConfiguration;
-			baseConfigurationReference = C8798080C879905000000001 /* SampleCode.xcconfig */;
+			baseConfigurationReference = 26C3826026C380D000000001 /* SampleCode.xcconfig */;
 			buildSettings = {
 				ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
 				CLANG_ENABLE_MODULES = YES;
diff --git a/TrueDepthStreamer/AppDelegate.swift b/TrueDepthStreamer/AppDelegate.swift
index ecb30b2..22ceb15 100644
--- a/TrueDepthStreamer/AppDelegate.swift
+++ b/TrueDepthStreamer/AppDelegate.swift
@@ -9,5 +9,5 @@ import UIKit
 
 @UIApplicationMain
 class AppDelegate: UIResponder, UIApplicationDelegate {
-	var window: UIWindow?
+    var window: UIWindow?
 }
diff --git a/TrueDepthStreamer/CameraViewController.swift b/TrueDepthStreamer/CameraViewController.swift
index e1ef027..69e87bb 100644
--- a/TrueDepthStreamer/CameraViewController.swift
+++ b/TrueDepthStreamer/CameraViewController.swift
@@ -13,59 +13,59 @@ import Accelerate
 
 @available(iOS 11.1, *)
 class CameraViewController: UIViewController, AVCaptureDataOutputSynchronizerDelegate {
-
-	// MARK: - Properties
-
-	@IBOutlet weak private var resumeButton: UIButton!
-
-	@IBOutlet weak private var cameraUnavailableLabel: UILabel!
-
+    
+    // MARK: - Properties
+    
+    @IBOutlet weak private var resumeButton: UIButton!
+    
+    @IBOutlet weak private var cameraUnavailableLabel: UILabel!
+    
     @IBOutlet weak private var jetView: PreviewMetalView!
-
+    
     @IBOutlet weak private var depthSmoothingSwitch: UISwitch!
-
+    
     @IBOutlet weak private var mixFactorSlider: UISlider!
-
+    
     @IBOutlet weak private var touchDepth: UILabel!
-
+    
     @IBOutlet weak var autoPanningSwitch: UISwitch!
     
     private enum SessionSetupResult {
-		case success
-		case notAuthorized
-		case configurationFailed
-	}
-
-	private var setupResult: SessionSetupResult = .success
-
-	private let session = AVCaptureSession()
-
-	private var isSessionRunning = false
-
-	// Communicate with the session and other session objects on this queue.
-	private let sessionQueue = DispatchQueue(label: "session queue", attributes: [], autoreleaseFrequency: .workItem)
-	private var videoDeviceInput: AVCaptureDeviceInput!
-
-	private let dataOutputQueue = DispatchQueue(label: "video data queue", qos: .userInitiated, attributes: [], autoreleaseFrequency: .workItem)
-
-	private let videoDataOutput = AVCaptureVideoDataOutput()
-	private let depthDataOutput = AVCaptureDepthDataOutput()
-	private var outputSynchronizer: AVCaptureDataOutputSynchronizer?
-
-	private let videoDepthMixer = VideoMixer()
-
-	private let videoDepthConverter = DepthToJETConverter()
-
-	private var renderingEnabled = true
-
-	private let videoDeviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInTrueDepthCamera],
-	                                                                           mediaType: .video,
-	                                                                           position: .front)
-
-	private var statusBarOrientation: UIInterfaceOrientation = .portrait
+        case success
+        case notAuthorized
+        case configurationFailed
+    }
+    
+    private var setupResult: SessionSetupResult = .success
+    
+    private let session = AVCaptureSession()
+    
+    private var isSessionRunning = false
+    
+    // Communicate with the session and other session objects on this queue.
+    private let sessionQueue = DispatchQueue(label: "session queue", attributes: [], autoreleaseFrequency: .workItem)
+    private var videoDeviceInput: AVCaptureDeviceInput!
+    
+    private let dataOutputQueue = DispatchQueue(label: "video data queue", qos: .userInitiated, attributes: [], autoreleaseFrequency: .workItem)
+    
+    private let videoDataOutput = AVCaptureVideoDataOutput()
+    private let depthDataOutput = AVCaptureDepthDataOutput()
+    private var outputSynchronizer: AVCaptureDataOutputSynchronizer?
+    
+    private let videoDepthMixer = VideoMixer()
+    
+    private let videoDepthConverter = DepthToJETConverter()
+    
+    private var renderingEnabled = true
+    
+    private let videoDeviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInTrueDepthCamera],
+                                                                               mediaType: .video,
+                                                                               position: .front)
+    
+    private var statusBarOrientation: UIInterfaceOrientation = .portrait
     
     private var touchDetected = false
-
+    
     private var touchCoordinates = CGPoint(x: 0, y: 0)
     
     @IBOutlet weak private var cloudView: PointCloudMetalView!
@@ -81,36 +81,36 @@ class CameraViewController: UIViewController, AVCaptureDataOutputSynchronizerDel
     private var lastZoom = Float(0.0)
     
     private var lastXY = CGPoint(x: 0, y: 0)
-
+    
     private var JETEnabled = false
     
     private var viewFrameSize = CGSize()
     
     private var autoPanningIndex = Int(0) // start with auto-panning on
     
-	// MARK: - View Controller Life Cycle
-
-	override func viewDidLoad() {
-		super.viewDidLoad()
-
+    // MARK: - View Controller Life Cycle
+    
+    override func viewDidLoad() {
+        super.viewDidLoad()
+        
         viewFrameSize = self.view.frame.size
         
         let tapGestureJET = UITapGestureRecognizer(target: self, action: #selector(focusAndExposeTap))
         jetView.addGestureRecognizer(tapGestureJET)
-
+        
         let pressGestureJET = UILongPressGestureRecognizer(target: self, action: #selector(handleLongPressJET))
         pressGestureJET.minimumPressDuration = 0.05
         pressGestureJET.cancelsTouchesInView = false
         jetView.addGestureRecognizer(pressGestureJET)
-
+        
         let pinchGesture = UIPinchGestureRecognizer(target: self, action: #selector(handlePinch))
         cloudView.addGestureRecognizer(pinchGesture)
-
+        
         let doubleTapGesture = UITapGestureRecognizer(target: self, action: #selector(handleDoubleTap))
         doubleTapGesture.numberOfTapsRequired = 2
         doubleTapGesture.numberOfTouchesRequired = 1
         cloudView.addGestureRecognizer(doubleTapGesture)
-
+        
         let rotateGesture = UIRotationGestureRecognizer(target: self, action: #selector(handleRotate))
         cloudView.addGestureRecognizer(rotateGesture)
         
@@ -120,151 +120,153 @@ class CameraViewController: UIViewController, AVCaptureDataOutputSynchronizerDel
         cloudView.addGestureRecognizer(panOneFingerGesture)
         
         cloudToJETSegCtrl.selectedSegmentIndex = 1
-
-		// Check video authorization status, video access is required
-		switch AVCaptureDevice.authorizationStatus(for: .video) {
-			case .authorized:
-				// The user has previously granted access to the camera
-				break
-
-			case .notDetermined:
-				/*
-					The user has not yet been presented with the option to grant video access
-					We suspend the session queue to delay session setup until the access request has completed
-				*/
-				sessionQueue.suspend()
-				AVCaptureDevice.requestAccess(for: .video, completionHandler: { granted in
-					if !granted {
-						self.setupResult = .notAuthorized
-					}
-					self.sessionQueue.resume()
-				})
-
-			default:
-				// The user has previously denied access
-				setupResult = .notAuthorized
-		}
-
-		/*
-			Setup the capture session.
-			In general it is not safe to mutate an AVCaptureSession or any of its
-			inputs, outputs, or connections from multiple threads at the same time.
-			
-			Why not do all of this on the main queue?
-			Because AVCaptureSession.startRunning() is a blocking call which can
-			take a long time. We dispatch session setup to the sessionQueue so
-			that the main queue isn't blocked, which keeps the UI responsive.
-		*/
-		sessionQueue.async {
+        
+        // Check video authorization status, video access is required
+        switch AVCaptureDevice.authorizationStatus(for: .video) {
+        case .authorized:
+            // The user has previously granted access to the camera
+            break
+            
+        case .notDetermined:
+            /*
+             The user has not yet been presented with the option to grant video access
+             We suspend the session queue to delay session setup until the access request has completed
+             */
+            sessionQueue.suspend()
+            AVCaptureDevice.requestAccess(for: .video, completionHandler: { granted in
+                if !granted {
+                    self.setupResult = .notAuthorized
+                }
+                self.sessionQueue.resume()
+            })
+            
+        default:
+            // The user has previously denied access
+            setupResult = .notAuthorized
+        }
+        
+        /*
+         Setup the capture session.
+         In general it is not safe to mutate an AVCaptureSession or any of its
+         inputs, outputs, or connections from multiple threads at the same time.
+         
+         Why not do all of this on the main queue?
+         Because AVCaptureSession.startRunning() is a blocking call which can
+         take a long time. We dispatch session setup to the sessionQueue so
+         that the main queue isn't blocked, which keeps the UI responsive.
+         */
+        sessionQueue.async {
             self.configureSession()
-		}
-	}
-
-	override func viewWillAppear(_ animated: Bool) {
-		super.viewWillAppear(animated)
-
-		let interfaceOrientation = UIApplication.shared.statusBarOrientation
-		statusBarOrientation = interfaceOrientation
-
-		let initialThermalState = ProcessInfo.processInfo.thermalState
-		if initialThermalState == .serious || initialThermalState == .critical {
-			showThermalState(state: initialThermalState)
-		}
-
-		sessionQueue.async {
-			switch self.setupResult {
-				case .success:
-					// Only setup observers and start the session running if setup succeeded
-					self.addObservers()
-					let videoOrientation = self.videoDataOutput.connection(with: .video)!.videoOrientation
-					let videoDevicePosition = self.videoDeviceInput.device.position
-					let rotation = PreviewMetalView.Rotation(with: interfaceOrientation, videoOrientation: videoOrientation, cameraPosition: videoDevicePosition)
-					self.jetView.mirroring = (videoDevicePosition == .front)
-					if let rotation = rotation {
-						self.jetView.rotation = rotation
-					}
-					self.dataOutputQueue.async {
-						self.renderingEnabled = true
-					}
-
-					self.session.startRunning()
-					self.isSessionRunning = self.session.isRunning
-
-				case .notAuthorized:
-					DispatchQueue.main.async {
-						let message = NSLocalizedString("TrueDepthStreamer doesn't have permission to use the camera, please change privacy settings",
-						                                comment: "Alert message when the user has denied access to the camera")
-						let alertController = UIAlertController(title: "TrueDepthStreamer", message: message, preferredStyle: .alert)
-						alertController.addAction(UIAlertAction(title: NSLocalizedString("OK", comment: "Alert OK button"),
-						                                        style: .cancel,
-						                                        handler: nil))
-						alertController.addAction(UIAlertAction(title: NSLocalizedString("Settings", comment: "Alert button to open Settings"),
-						                                        style: .`default`,
-						                                        handler: { _ in
-                                                                    UIApplication.shared.open(URL(string: UIApplication.openSettingsURLString)!,
-																	                          options: [:],
-																	                          completionHandler: nil)
-						}))
-
-						self.present(alertController, animated: true, completion: nil)
-					}
-
-				case .configurationFailed:
-					DispatchQueue.main.async {
-                        self.cameraUnavailableLabel.isHidden = false
-                        self.cameraUnavailableLabel.alpha = 0.0
-                        UIView.animate(withDuration: 0.25) {
-                            self.cameraUnavailableLabel.alpha = 1.0
-                        }
-					}
-			}
-		}
-	}
-
-	override func viewWillDisappear(_ animated: Bool) {
-		dataOutputQueue.async {
-			self.renderingEnabled = false
-		}
-		sessionQueue.async {
-			if self.setupResult == .success {
-				self.session.stopRunning()
-				self.isSessionRunning = self.session.isRunning
-			}
-		}
-
-		super.viewWillDisappear(animated)
-	}
-
-	@objc
-	func didEnterBackground(notification: NSNotification) {
-		// Free up resources
-		dataOutputQueue.async {
-			self.renderingEnabled = false
-//            if let videoFilter = self.videoFilter {
-//                videoFilter.reset()
-//            }
-			self.videoDepthMixer.reset()
-			self.videoDepthConverter.reset()
-			self.jetView.pixelBuffer = nil
-			self.jetView.flushTextureCache()
-		}
-	}
-
-	@objc
-	func willEnterForground(notification: NSNotification) {
-		dataOutputQueue.async {
-			self.renderingEnabled = true
-		}
-	}
-
-	// You can use this opportunity to take corrective action to help cool the system down.
-	@objc
-	func thermalStateChanged(notification: NSNotification) {
+        }
+    }
+    
+    override func viewWillAppear(_ animated: Bool) {
+        super.viewWillAppear(animated)
+        
+        let interfaceOrientation = UIApplication.shared.statusBarOrientation
+        statusBarOrientation = interfaceOrientation
+        
+        let initialThermalState = ProcessInfo.processInfo.thermalState
+        if initialThermalState == .serious || initialThermalState == .critical {
+            showThermalState(state: initialThermalState)
+        }
+        
+        sessionQueue.async {
+            switch self.setupResult {
+            case .success:
+                // Only setup observers and start the session running if setup succeeded
+                self.addObservers()
+                let videoOrientation = self.videoDataOutput.connection(with: .video)!.videoOrientation
+                let videoDevicePosition = self.videoDeviceInput.device.position
+                let rotation = PreviewMetalView.Rotation(with: interfaceOrientation,
+                                                         videoOrientation: videoOrientation,
+                                                         cameraPosition: videoDevicePosition)
+                self.jetView.mirroring = (videoDevicePosition == .front)
+                if let rotation = rotation {
+                    self.jetView.rotation = rotation
+                }
+                self.dataOutputQueue.async {
+                    self.renderingEnabled = true
+                }
+                
+                self.session.startRunning()
+                self.isSessionRunning = self.session.isRunning
+                
+            case .notAuthorized:
+                DispatchQueue.main.async {
+                    let message = NSLocalizedString("TrueDepthStreamer doesn't have permission to use the camera, please change privacy settings",
+                                                    comment: "Alert message when the user has denied access to the camera")
+                    let alertController = UIAlertController(title: "TrueDepthStreamer", message: message, preferredStyle: .alert)
+                    alertController.addAction(UIAlertAction(title: NSLocalizedString("OK", comment: "Alert OK button"),
+                                                            style: .cancel,
+                                                            handler: nil))
+                    alertController.addAction(UIAlertAction(title: NSLocalizedString("Settings", comment: "Alert button to open Settings"),
+                                                            style: .`default`,
+                                                            handler: { _ in
+                                                                UIApplication.shared.open(URL(string: UIApplication.openSettingsURLString)!,
+                                                                                          options: [:],
+                                                                                          completionHandler: nil)
+                    }))
+                    
+                    self.present(alertController, animated: true, completion: nil)
+                }
+                
+            case .configurationFailed:
+                DispatchQueue.main.async {
+                    self.cameraUnavailableLabel.isHidden = false
+                    self.cameraUnavailableLabel.alpha = 0.0
+                    UIView.animate(withDuration: 0.25) {
+                        self.cameraUnavailableLabel.alpha = 1.0
+                    }
+                }
+            }
+        }
+    }
+    
+    override func viewWillDisappear(_ animated: Bool) {
+        dataOutputQueue.async {
+            self.renderingEnabled = false
+        }
+        sessionQueue.async {
+            if self.setupResult == .success {
+                self.session.stopRunning()
+                self.isSessionRunning = self.session.isRunning
+            }
+        }
+        
+        super.viewWillDisappear(animated)
+    }
+    
+    @objc
+    func didEnterBackground(notification: NSNotification) {
+        // Free up resources
+        dataOutputQueue.async {
+            self.renderingEnabled = false
+            //            if let videoFilter = self.videoFilter {
+            //                videoFilter.reset()
+            //            }
+            self.videoDepthMixer.reset()
+            self.videoDepthConverter.reset()
+            self.jetView.pixelBuffer = nil
+            self.jetView.flushTextureCache()
+        }
+    }
+    
+    @objc
+    func willEnterForground(notification: NSNotification) {
+        dataOutputQueue.async {
+            self.renderingEnabled = true
+        }
+    }
+    
+    // You can use this opportunity to take corrective action to help cool the system down.
+    @objc
+    func thermalStateChanged(notification: NSNotification) {
         if let processInfo = notification.object as? ProcessInfo {
             showThermalState(state: processInfo.thermalState)
         }
     }
-
+    
     func showThermalState(state: ProcessInfo.ThermalState) {
         DispatchQueue.main.async {
             var thermalStateString = "UNKNOWN"
@@ -284,148 +286,148 @@ class CameraViewController: UIViewController, AVCaptureDataOutputSynchronizerDel
             self.present(alertController, animated: true, completion: nil)
         }
     }
-	override var supportedInterfaceOrientations: UIInterfaceOrientationMask {
-		return .all
-	}
-
-	override func viewWillTransition(to size: CGSize, with coordinator: UIViewControllerTransitionCoordinator) {
-		super.viewWillTransition(to: size, with: coordinator)
-
-		coordinator.animate(
-			alongsideTransition: { _ in
-				let interfaceOrientation = UIApplication.shared.statusBarOrientation
-				self.statusBarOrientation = interfaceOrientation
-				self.sessionQueue.async {
-					/*
-						The photo orientation is based on the interface orientation. You could also set the orientation of the photo connection based
-						on the device orientation by observing UIDeviceOrientationDidChangeNotification.
-					*/
-					let videoOrientation = self.videoDataOutput.connection(with: .video)!.videoOrientation
-					if let rotation = PreviewMetalView.Rotation(with: interfaceOrientation, videoOrientation: videoOrientation,
+    override var supportedInterfaceOrientations: UIInterfaceOrientationMask {
+        return .all
+    }
+    
+    override func viewWillTransition(to size: CGSize, with coordinator: UIViewControllerTransitionCoordinator) {
+        super.viewWillTransition(to: size, with: coordinator)
+        
+        coordinator.animate(
+            alongsideTransition: { _ in
+                let interfaceOrientation = UIApplication.shared.statusBarOrientation
+                self.statusBarOrientation = interfaceOrientation
+                self.sessionQueue.async {
+                    /*
+                     The photo orientation is based on the interface orientation. You could also set the orientation of the photo connection based
+                     on the device orientation by observing UIDeviceOrientationDidChangeNotification.
+                     */
+                    let videoOrientation = self.videoDataOutput.connection(with: .video)!.videoOrientation
+                    if let rotation = PreviewMetalView.Rotation(with: interfaceOrientation, videoOrientation: videoOrientation,
                                                                 cameraPosition: self.videoDeviceInput.device.position) {
-						self.jetView.rotation = rotation
-					}
-				}
-			}, completion: nil
-		)
-	}
-
-	// MARK: - KVO and Notifications
-
-	private var sessionRunningContext = 0
-
-	private func addObservers() {
-		NotificationCenter.default.addObserver(self, selector: #selector(didEnterBackground),
+                        self.jetView.rotation = rotation
+                    }
+                }
+        }, completion: nil
+        )
+    }
+    
+    // MARK: - KVO and Notifications
+    
+    private var sessionRunningContext = 0
+    
+    private func addObservers() {
+        NotificationCenter.default.addObserver(self, selector: #selector(didEnterBackground),
                                                name: UIApplication.didEnterBackgroundNotification, object: nil)
-		NotificationCenter.default.addObserver(self, selector: #selector(willEnterForground),
+        NotificationCenter.default.addObserver(self, selector: #selector(willEnterForground),
                                                name: UIApplication.willEnterForegroundNotification, object: nil)
-		NotificationCenter.default.addObserver(self, selector: #selector(thermalStateChanged),
+        NotificationCenter.default.addObserver(self, selector: #selector(thermalStateChanged),
                                                name: ProcessInfo.thermalStateDidChangeNotification,	object: nil)
-		NotificationCenter.default.addObserver(self, selector: #selector(sessionRuntimeError),
+        NotificationCenter.default.addObserver(self, selector: #selector(sessionRuntimeError),
                                                name: NSNotification.Name.AVCaptureSessionRuntimeError, object: session)
-
-		session.addObserver(self, forKeyPath: "running", options: NSKeyValueObservingOptions.new, context: &sessionRunningContext)
-
-		/*
-			A session can only run when the app is full screen. It will be interrupted
-			in a multi-app layout, introduced in iOS 9, see also the documentation of
-			AVCaptureSessionInterruptionReason. Add observers to handle these session
-			interruptions and show a preview is paused message. See the documentation
-			of AVCaptureSessionWasInterruptedNotification for other interruption reasons.
-		*/
-		NotificationCenter.default.addObserver(self, selector: #selector(sessionWasInterrupted),
+        
+        session.addObserver(self, forKeyPath: "running", options: NSKeyValueObservingOptions.new, context: &sessionRunningContext)
+        
+        /*
+         A session can only run when the app is full screen. It will be interrupted
+         in a multi-app layout, introduced in iOS 9, see also the documentation of
+         AVCaptureSessionInterruptionReason. Add observers to handle these session
+         interruptions and show a preview is paused message. See the documentation
+         of AVCaptureSessionWasInterruptedNotification for other interruption reasons.
+         */
+        NotificationCenter.default.addObserver(self, selector: #selector(sessionWasInterrupted),
                                                name: NSNotification.Name.AVCaptureSessionWasInterrupted,
                                                object: session)
-		NotificationCenter.default.addObserver(self, selector: #selector(sessionInterruptionEnded),
+        NotificationCenter.default.addObserver(self, selector: #selector(sessionInterruptionEnded),
                                                name: NSNotification.Name.AVCaptureSessionInterruptionEnded,
                                                object: session)
-		NotificationCenter.default.addObserver(self, selector: #selector(subjectAreaDidChange),
+        NotificationCenter.default.addObserver(self, selector: #selector(subjectAreaDidChange),
                                                name: NSNotification.Name.AVCaptureDeviceSubjectAreaDidChange,
                                                object: videoDeviceInput.device)
-	}
-
-	deinit {
-		NotificationCenter.default.removeObserver(self)
-		session.removeObserver(self, forKeyPath: "running", context: &sessionRunningContext)
-	}
-
-	override func observeValue(forKeyPath keyPath: String?, of object: Any?, change: [NSKeyValueChangeKey: Any]?, context: UnsafeMutableRawPointer?) {
-		if context != &sessionRunningContext {
-			super.observeValue(forKeyPath: keyPath, of: object, change: change, context: context)
-		}
-	}
-
-	// MARK: - Session Management
-
-	// Call this on the session queue
-	private func configureSession() {
-		if setupResult != .success {
-			return
-		}
-
-		let defaultVideoDevice: AVCaptureDevice? = videoDeviceDiscoverySession.devices.first
-
-		guard let videoDevice = defaultVideoDevice else {
-			print("Could not find any video device")
-			setupResult = .configurationFailed
-			return
-		}
-
-		do {
-			videoDeviceInput = try AVCaptureDeviceInput(device: videoDevice)
-		} catch {
-			print("Could not create video device input: \(error)")
-			setupResult = .configurationFailed
-			return
-		}
-
-		session.beginConfiguration()
-
-		session.sessionPreset = AVCaptureSession.Preset.vga640x480
-
-		// Add a video input
-		guard session.canAddInput(videoDeviceInput) else {
-			print("Could not add video device input to the session")
-			setupResult = .configurationFailed
-			session.commitConfiguration()
-			return
-		}
-		session.addInput(videoDeviceInput)
-
-		// Add a video data output
-		if session.canAddOutput(videoDataOutput) {
-			session.addOutput(videoDataOutput)
-			videoDataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
-		} else {
-			print("Could not add video data output to the session")
-			setupResult = .configurationFailed
-			session.commitConfiguration()
-			return
-		}
-
-		// Add a depth data output
-		if session.canAddOutput(depthDataOutput) {
-			session.addOutput(depthDataOutput)
-			depthDataOutput.isFilteringEnabled = false
+    }
+    
+    deinit {
+        NotificationCenter.default.removeObserver(self)
+        session.removeObserver(self, forKeyPath: "running", context: &sessionRunningContext)
+    }
+    
+    override func observeValue(forKeyPath keyPath: String?, of object: Any?, change: [NSKeyValueChangeKey: Any]?, context: UnsafeMutableRawPointer?) {
+        if context != &sessionRunningContext {
+            super.observeValue(forKeyPath: keyPath, of: object, change: change, context: context)
+        }
+    }
+    
+    // MARK: - Session Management
+    
+    // Call this on the session queue
+    private func configureSession() {
+        if setupResult != .success {
+            return
+        }
+        
+        let defaultVideoDevice: AVCaptureDevice? = videoDeviceDiscoverySession.devices.first
+        
+        guard let videoDevice = defaultVideoDevice else {
+            print("Could not find any video device")
+            setupResult = .configurationFailed
+            return
+        }
+        
+        do {
+            videoDeviceInput = try AVCaptureDeviceInput(device: videoDevice)
+        } catch {
+            print("Could not create video device input: \(error)")
+            setupResult = .configurationFailed
+            return
+        }
+        
+        session.beginConfiguration()
+        
+        session.sessionPreset = AVCaptureSession.Preset.vga640x480
+        
+        // Add a video input
+        guard session.canAddInput(videoDeviceInput) else {
+            print("Could not add video device input to the session")
+            setupResult = .configurationFailed
+            session.commitConfiguration()
+            return
+        }
+        session.addInput(videoDeviceInput)
+        
+        // Add a video data output
+        if session.canAddOutput(videoDataOutput) {
+            session.addOutput(videoDataOutput)
+            videoDataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
+        } else {
+            print("Could not add video data output to the session")
+            setupResult = .configurationFailed
+            session.commitConfiguration()
+            return
+        }
+        
+        // Add a depth data output
+        if session.canAddOutput(depthDataOutput) {
+            session.addOutput(depthDataOutput)
+            depthDataOutput.isFilteringEnabled = false
             if let connection = depthDataOutput.connection(with: .depthData) {
                 connection.isEnabled = true
             } else {
                 print("No AVCaptureConnection")
             }
-		} else {
-			print("Could not add depth data output to the session")
-			setupResult = .configurationFailed
-			session.commitConfiguration()
-			return
-		}
+        } else {
+            print("Could not add depth data output to the session")
+            setupResult = .configurationFailed
+            session.commitConfiguration()
+            return
+        }
         
         // Search for highest resolution with half-point depth values
         let depthFormats = videoDevice.activeFormat.supportedDepthDataFormats
         let filtered = depthFormats.filter({
-            $0.formatDescription.mediaSubType == kCVPixelFormatType_DepthFloat16
+            CMFormatDescriptionGetMediaSubType($0.formatDescription) == kCVPixelFormatType_DepthFloat16
         })
         let selectedFormat = filtered.max(by: {
-            first, second in first.formatDescription.videoDimensions.width < second.formatDescription.videoDimensions.width
+            first, second in CMVideoFormatDescriptionGetDimensions(first.formatDescription).width < CMVideoFormatDescriptionGetDimensions(second.formatDescription).width
         })
         
         do {
@@ -443,51 +445,51 @@ class CameraViewController: UIViewController, AVCaptureDataOutputSynchronizerDel
         // The first output in the dataOutputs array, in this case the AVCaptureVideoDataOutput, is the "master" output.
         outputSynchronizer = AVCaptureDataOutputSynchronizer(dataOutputs: [videoDataOutput, depthDataOutput])
         outputSynchronizer!.setDelegate(self, queue: dataOutputQueue)
-		session.commitConfiguration()
-	}
-
+        session.commitConfiguration()
+    }
+    
     private func focus(with focusMode: AVCaptureDevice.FocusMode,
                        exposureMode: AVCaptureDevice.ExposureMode,
                        at devicePoint: CGPoint,
                        monitorSubjectAreaChange: Bool) {
-		sessionQueue.async {
-			let videoDevice = self.videoDeviceInput.device
-
-			do {
-				try videoDevice.lockForConfiguration()
-				if videoDevice.isFocusPointOfInterestSupported && videoDevice.isFocusModeSupported(focusMode) {
-					videoDevice.focusPointOfInterest = devicePoint
-					videoDevice.focusMode = focusMode
-				}
-
-				if videoDevice.isExposurePointOfInterestSupported && videoDevice.isExposureModeSupported(exposureMode) {
-					videoDevice.exposurePointOfInterest = devicePoint
-					videoDevice.exposureMode = exposureMode
-				}
-
-				videoDevice.isSubjectAreaChangeMonitoringEnabled = monitorSubjectAreaChange
-				videoDevice.unlockForConfiguration()
-			} catch {
-				print("Could not lock device for configuration: \(error)")
-			}
-		}
-	}
-
-	@IBAction private func changeMixFactor(_ sender: UISlider) {
-		let mixFactor = sender.value
-
-		dataOutputQueue.async {
-			self.videoDepthMixer.mixFactor = mixFactor
-		}
-	}
-
-	@IBAction private func changeDepthSmoothing(_ sender: UISwitch) {
+        sessionQueue.async {
+            let videoDevice = self.videoDeviceInput.device
+            
+            do {
+                try videoDevice.lockForConfiguration()
+                if videoDevice.isFocusPointOfInterestSupported && videoDevice.isFocusModeSupported(focusMode) {
+                    videoDevice.focusPointOfInterest = devicePoint
+                    videoDevice.focusMode = focusMode
+                }
+                
+                if videoDevice.isExposurePointOfInterestSupported && videoDevice.isExposureModeSupported(exposureMode) {
+                    videoDevice.exposurePointOfInterest = devicePoint
+                    videoDevice.exposureMode = exposureMode
+                }
+                
+                videoDevice.isSubjectAreaChangeMonitoringEnabled = monitorSubjectAreaChange
+                videoDevice.unlockForConfiguration()
+            } catch {
+                print("Could not lock device for configuration: \(error)")
+            }
+        }
+    }
+    
+    @IBAction private func changeMixFactor(_ sender: UISlider) {
+        let mixFactor = sender.value
+        
+        dataOutputQueue.async {
+            self.videoDepthMixer.mixFactor = mixFactor
+        }
+    }
+    
+    @IBAction private func changeDepthSmoothing(_ sender: UISwitch) {
         let smoothingEnabled = sender.isOn
         
         sessionQueue.async {
             self.depthDataOutput.isFilteringEnabled = smoothingEnabled
         }
-	}
+    }
     
     @IBAction func changeCloudToJET(_ sender: UISegmentedControl) {
         JETEnabled = (sender.selectedSegmentIndex == 0)
@@ -498,132 +500,132 @@ class CameraViewController: UIViewController, AVCaptureDataOutputSynchronizerDel
             } else {
                 self.depthDataOutput.isFilteringEnabled = false
             }
-
+            
             self.cloudView.isHidden = JETEnabled
             self.jetView.isHidden = !JETEnabled
         }
     }
-
-	@IBAction private func focusAndExposeTap(_ gesture: UITapGestureRecognizer) {
-		let location = gesture.location(in: jetView)
-		guard let texturePoint = jetView.texturePointForView(point: location) else {
-			return
-		}
-
-		let textureRect = CGRect(origin: texturePoint, size: .zero)
-		let deviceRect = videoDataOutput.metadataOutputRectConverted(fromOutputRect: textureRect)
-		focus(with: .autoFocus, exposureMode: .autoExpose, at: deviceRect.origin, monitorSubjectAreaChange: true)
+    
+    @IBAction private func focusAndExposeTap(_ gesture: UITapGestureRecognizer) {
+        let location = gesture.location(in: jetView)
+        guard let texturePoint = jetView.texturePointForView(point: location) else {
+            return
+        }
+        
+        let textureRect = CGRect(origin: texturePoint, size: .zero)
+        let deviceRect = videoDataOutput.metadataOutputRectConverted(fromOutputRect: textureRect)
+        focus(with: .autoFocus, exposureMode: .autoExpose, at: deviceRect.origin, monitorSubjectAreaChange: true)
+    }
+    
+    @objc
+    func subjectAreaDidChange(notification: NSNotification) {
+        let devicePoint = CGPoint(x: 0.5, y: 0.5)
+        focus(with: .continuousAutoFocus, exposureMode: .continuousAutoExposure, at: devicePoint, monitorSubjectAreaChange: false)
+    }
+    
+    @objc
+    func sessionWasInterrupted(notification: NSNotification) {
+        // In iOS 9 and later, the userInfo dictionary contains information on why the session was interrupted.
+        if let userInfoValue = notification.userInfo?[AVCaptureSessionInterruptionReasonKey] as AnyObject?,
+            let reasonIntegerValue = userInfoValue.integerValue,
+            let reason = AVCaptureSession.InterruptionReason(rawValue: reasonIntegerValue) {
+            print("Capture session was interrupted with reason \(reason)")
+            
+            if reason == .videoDeviceInUseByAnotherClient {
+                // Simply fade-in a button to enable the user to try to resume the session running.
+                resumeButton.isHidden = false
+                resumeButton.alpha = 0.0
+                UIView.animate(withDuration: 0.25) {
+                    self.resumeButton.alpha = 1.0
+                }
+            } else if reason == .videoDeviceNotAvailableWithMultipleForegroundApps {
+                // Simply fade-in a label to inform the user that the camera is unavailable.
+                cameraUnavailableLabel.isHidden = false
+                cameraUnavailableLabel.alpha = 0.0
+                UIView.animate(withDuration: 0.25) {
+                    self.cameraUnavailableLabel.alpha = 1.0
+                }
+            }
+        }
+    }
+    
+    @objc
+    func sessionInterruptionEnded(notification: NSNotification) {
+        if !resumeButton.isHidden {
+            UIView.animate(withDuration: 0.25,
+                           animations: {
+                            self.resumeButton.alpha = 0
+            }, completion: { _ in
+                self.resumeButton.isHidden = true
+            }
+            )
+        }
+        if !cameraUnavailableLabel.isHidden {
+            UIView.animate(withDuration: 0.25,
+                           animations: {
+                            self.cameraUnavailableLabel.alpha = 0
+            }, completion: { _ in
+                self.cameraUnavailableLabel.isHidden = true
+            }
+            )
+        }
+    }
+    
+    @objc
+    func sessionRuntimeError(notification: NSNotification) {
+        guard let errorValue = notification.userInfo?[AVCaptureSessionErrorKey] as? NSError else {
+            return
+        }
+        
+        let error = AVError(_nsError: errorValue)
+        print("Capture session runtime error: \(error)")
+        
+        /*
+         Automatically try to restart the session running if media services were
+         reset and the last start running succeeded. Otherwise, enable the user
+         to try to resume the session running.
+         */
+        if error.code == .mediaServicesWereReset {
+            sessionQueue.async {
+                if self.isSessionRunning {
+                    self.session.startRunning()
+                    self.isSessionRunning = self.session.isRunning
+                } else {
+                    DispatchQueue.main.async {
+                        self.resumeButton.isHidden = false
+                    }
+                }
+            }
+        } else {
+            resumeButton.isHidden = false
+        }
+    }
+    
+    @IBAction private func resumeInterruptedSession(_ sender: UIButton) {
+        sessionQueue.async {
+            /*
+             The session might fail to start running. A failure to start the session running will be communicated via
+             a session runtime error notification. To avoid repeatedly failing to start the session
+             running, we only try to restart the session running in the session runtime error handler
+             if we aren't trying to resume the session running.
+             */
+            self.session.startRunning()
+            self.isSessionRunning = self.session.isRunning
+            if !self.session.isRunning {
+                DispatchQueue.main.async {
+                    let message = NSLocalizedString("Unable to resume", comment: "Alert message when unable to resume the session running")
+                    let alertController = UIAlertController(title: "TrueDepthStreamer", message: message, preferredStyle: .alert)
+                    let cancelAction = UIAlertAction(title: NSLocalizedString("OK", comment: "Alert OK button"), style: .cancel, handler: nil)
+                    alertController.addAction(cancelAction)
+                    self.present(alertController, animated: true, completion: nil)
+                }
+            } else {
+                DispatchQueue.main.async {
+                    self.resumeButton.isHidden = true
+                }
+            }
+        }
     }
-
-	@objc
-	func subjectAreaDidChange(notification: NSNotification) {
-		let devicePoint = CGPoint(x: 0.5, y: 0.5)
-		focus(with: .continuousAutoFocus, exposureMode: .continuousAutoExposure, at: devicePoint, monitorSubjectAreaChange: false)
-	}
-
-	@objc
-	func sessionWasInterrupted(notification: NSNotification) {
-		// In iOS 9 and later, the userInfo dictionary contains information on why the session was interrupted.
-		if let userInfoValue = notification.userInfo?[AVCaptureSessionInterruptionReasonKey] as AnyObject?,
-			let reasonIntegerValue = userInfoValue.integerValue,
-			let reason = AVCaptureSession.InterruptionReason(rawValue: reasonIntegerValue) {
-			print("Capture session was interrupted with reason \(reason)")
-
-			if reason == .videoDeviceInUseByAnotherClient {
-				// Simply fade-in a button to enable the user to try to resume the session running.
-				resumeButton.isHidden = false
-				resumeButton.alpha = 0.0
-				UIView.animate(withDuration: 0.25) {
-					self.resumeButton.alpha = 1.0
-				}
-			} else if reason == .videoDeviceNotAvailableWithMultipleForegroundApps {
-				// Simply fade-in a label to inform the user that the camera is unavailable.
-				cameraUnavailableLabel.isHidden = false
-				cameraUnavailableLabel.alpha = 0.0
-				UIView.animate(withDuration: 0.25) {
-					self.cameraUnavailableLabel.alpha = 1.0
-				}
-			}
-		}
-	}
-
-	@objc
-	func sessionInterruptionEnded(notification: NSNotification) {
-		if !resumeButton.isHidden {
-			UIView.animate(withDuration: 0.25,
-				animations: {
-					self.resumeButton.alpha = 0
-				}, completion: { _ in
-					self.resumeButton.isHidden = true
-				}
-			)
-		}
-		if !cameraUnavailableLabel.isHidden {
-			UIView.animate(withDuration: 0.25,
-				animations: {
-					self.cameraUnavailableLabel.alpha = 0
-				}, completion: { _ in
-					self.cameraUnavailableLabel.isHidden = true
-				}
-			)
-		}
-	}
-
-	@objc
-	func sessionRuntimeError(notification: NSNotification) {
-		guard let errorValue = notification.userInfo?[AVCaptureSessionErrorKey] as? NSError else {
-			return
-		}
-
-		let error = AVError(_nsError: errorValue)
-		print("Capture session runtime error: \(error)")
-
-		/*
-			Automatically try to restart the session running if media services were
-			reset and the last start running succeeded. Otherwise, enable the user
-			to try to resume the session running.
-		*/
-		if error.code == .mediaServicesWereReset {
-			sessionQueue.async {
-				if self.isSessionRunning {
-					self.session.startRunning()
-					self.isSessionRunning = self.session.isRunning
-				} else {
-					DispatchQueue.main.async {
-						self.resumeButton.isHidden = false
-					}
-				}
-			}
-		} else {
-			resumeButton.isHidden = false
-		}
-	}
-
-	@IBAction private func resumeInterruptedSession(_ sender: UIButton) {
-		sessionQueue.async {
-			/*
-				The session might fail to start running. A failure to start the session running will be communicated via
-				a session runtime error notification. To avoid repeatedly failing to start the session
-				running, we only try to restart the session running in the session runtime error handler
-				if we aren't trying to resume the session running.
-			*/
-			self.session.startRunning()
-			self.isSessionRunning = self.session.isRunning
-			if !self.session.isRunning {
-				DispatchQueue.main.async {
-					let message = NSLocalizedString("Unable to resume", comment: "Alert message when unable to resume the session running")
-					let alertController = UIAlertController(title: "TrueDepthStreamer", message: message, preferredStyle: .alert)
-					let cancelAction = UIAlertAction(title: NSLocalizedString("OK", comment: "Alert OK button"), style: .cancel, handler: nil)
-					alertController.addAction(cancelAction)
-					self.present(alertController, animated: true, completion: nil)
-				}
-			} else {
-				DispatchQueue.main.async {
-					self.resumeButton.isHidden = true
-				}
-			}
-		}
-	}
     
     // MARK: - Point cloud view gestures
     
@@ -696,24 +698,24 @@ class CameraViewController: UIViewController, AVCaptureDataOutputSynchronizerDel
             gesture.rotation = 0
         }
     }
-
+    
     // MARK: - JET view Depth label gesture
-
-    @IBAction private func handleLongPressJET(gesture: UILongPressGestureRecognizer) {
     
+    @IBAction private func handleLongPressJET(gesture: UILongPressGestureRecognizer) {
+        
         switch gesture.state {
-            case .began:
-                touchDetected = true
-                let pnt: CGPoint = gesture.location(in: self.jetView)
-                touchCoordinates = pnt
-            case .changed:
-                let pnt: CGPoint = gesture.location(in: self.jetView)
-                touchCoordinates = pnt
-            case .possible, .ended, .cancelled, .failed:
-                touchDetected = false
-                DispatchQueue.main.async {
-                    self.touchDepth.text = ""
-                }
+        case .began:
+            touchDetected = true
+            let pnt: CGPoint = gesture.location(in: self.jetView)
+            touchCoordinates = pnt
+        case .changed:
+            let pnt: CGPoint = gesture.location(in: self.jetView)
+            touchCoordinates = pnt
+        case .possible, .ended, .cancelled, .failed:
+            touchDetected = false
+            DispatchQueue.main.async {
+                self.touchDepth.text = ""
+            }
         }
     }
     
@@ -724,7 +726,7 @@ class CameraViewController: UIViewController, AVCaptureDataOutputSynchronizerDel
             self.autoPanningIndex = -1
         }
     }
-
+    
     // MARK: - Video + Depth Frame Processing
     
     func dataOutputSynchronizer(_ synchronizer: AVCaptureDataOutputSynchronizer,
@@ -733,7 +735,7 @@ class CameraViewController: UIViewController, AVCaptureDataOutputSynchronizerDel
         if !renderingEnabled {
             return
         }
-
+        
         // Read all outputs
         guard renderingEnabled,
             let syncedDepthData: AVCaptureSynchronizedDepthData =
@@ -750,9 +752,10 @@ class CameraViewController: UIViewController, AVCaptureDataOutputSynchronizerDel
         
         let depthData = syncedDepthData.depthData
         let depthPixelBuffer = depthData.depthDataMap
-        guard let videoPixelBuffer = syncedVideoData.sampleBuffer.imageBuffer,
-            let formatDescription = syncedVideoData.sampleBuffer.formatDescription else {
-            return
+        let sampleBuffer = syncedVideoData.sampleBuffer
+        guard let videoPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer),
+            let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer) else {
+                return
         }
         
         if JETEnabled {
@@ -763,9 +766,9 @@ class CameraViewController: UIViewController, AVCaptureDataOutputSynchronizerDel
                  to cover the dispatch_async call.
                  */
                 var depthFormatDescription: CMFormatDescription?
-                CMFormatDescription.createForVideo(allocator: kCFAllocatorDefault,
-                                                   imageBuffer: depthPixelBuffer,
-                                                   formatDescriptionOut: &depthFormatDescription)
+                CMVideoFormatDescriptionCreateForImageBuffer(allocator: kCFAllocatorDefault,
+                                                             imageBuffer: depthPixelBuffer,
+                                                             formatDescriptionOut: &depthFormatDescription)
                 videoDepthConverter.prepare(with: depthFormatDescription!, outputRetainedBufferCountHint: 2)
             }
             
@@ -783,7 +786,7 @@ class CameraViewController: UIViewController, AVCaptureDataOutputSynchronizerDel
                 print("Unable to combine video and depth")
                 return
             }
-                
+            
             jetView.pixelBuffer = mixedBuffer
             
             updateDepthLabel(depthFrame: depthPixelBuffer, videoFrame: videoPixelBuffer)
@@ -822,14 +825,14 @@ class CameraViewController: UIViewController, AVCaptureDataOutputSynchronizerDel
             // scale
             let scale = CGFloat(CVPixelBufferGetWidth(depthFrame)) / CGFloat(CVPixelBufferGetWidth(videoFrame))
             let depthPoint = CGPoint(x: CGFloat(CVPixelBufferGetWidth(depthFrame)) - 1.0 - texturePoint.x * scale, y: texturePoint.y * scale)
-
+            
             assert(kCVPixelFormatType_DepthFloat16 == CVPixelBufferGetPixelFormatType(depthFrame))
             CVPixelBufferLockBaseAddress(depthFrame, .readOnly)
             let rowData = CVPixelBufferGetBaseAddress(depthFrame)! + Int(depthPoint.y) * CVPixelBufferGetBytesPerRow(depthFrame)
             // swift does not have an Float16 data type. Use UInt16 instead, and then translate
             var f16Pixel = rowData.assumingMemoryBound(to: UInt16.self)[Int(depthPoint.x)]
             CVPixelBufferUnlockBaseAddress(depthFrame, .readOnly)
-
+            
             var f32Pixel = Float(0.0)
             var src = vImage_Buffer(data: &f16Pixel, height: 1, width: 1, rowBytes: 2)
             var dst = vImage_Buffer(data: &f32Pixel, height: 1, width: 1, rowBytes: 4)
@@ -854,15 +857,15 @@ class CameraViewController: UIViewController, AVCaptureDataOutputSynchronizerDel
 }
 
 extension AVCaptureVideoOrientation {
-	init?(interfaceOrientation: UIInterfaceOrientation) {
-		switch interfaceOrientation {
-		case .portrait: self = .portrait
-		case .portraitUpsideDown: self = .portraitUpsideDown
-		case .landscapeLeft: self = .landscapeLeft
-		case .landscapeRight: self = .landscapeRight
-		default: return nil
-		}
-	}
+    init?(interfaceOrientation: UIInterfaceOrientation) {
+        switch interfaceOrientation {
+        case .portrait: self = .portrait
+        case .portraitUpsideDown: self = .portraitUpsideDown
+        case .landscapeLeft: self = .landscapeLeft
+        case .landscapeRight: self = .landscapeRight
+        default: return nil
+        }
+    }
 }
 
 extension PreviewMetalView.Rotation {
diff --git a/TrueDepthStreamer/DepthToJETConverter.swift b/TrueDepthStreamer/DepthToJETConverter.swift
index bfc6f4c..fd3be05 100644
--- a/TrueDepthStreamer/DepthToJETConverter.swift
+++ b/TrueDepthStreamer/DepthToJETConverter.swift
@@ -95,20 +95,20 @@ class ColorTable: NSObject {
 }
 
 class DepthToJETConverter: FilterRenderer {
-
-	var description: String = "Depth to JET Converter"
-
-	var isPrepared = false
-
-	private(set) var inputFormatDescription: CMFormatDescription?
-
-	private(set) var outputFormatDescription: CMFormatDescription?
-
-	private var inputTextureFormat: MTLPixelFormat = .invalid
-
-	private var outputPixelBufferPool: CVPixelBufferPool!
     
-	private let metalDevice = MTLCreateSystemDefaultDevice()!
+    var description: String = "Depth to JET Converter"
+    
+    var isPrepared = false
+    
+    private(set) var inputFormatDescription: CMFormatDescription?
+    
+    private(set) var outputFormatDescription: CMFormatDescription?
+    
+    private var inputTextureFormat: MTLPixelFormat = .invalid
+    
+    private var outputPixelBufferPool: CVPixelBufferPool!
+    
+    private let metalDevice = MTLCreateSystemDefaultDevice()!
     
     private let jetParams = JETParams()
     
@@ -118,24 +118,24 @@ class DepthToJETConverter: FilterRenderer {
     
     private let histogramBuffer: MTLBuffer
     
-	private var computePipelineState: MTLComputePipelineState?
-
+    private var computePipelineState: MTLComputePipelineState?
+    
     private lazy var commandQueue: MTLCommandQueue? = {
         return self.metalDevice.makeCommandQueue()
     }()
-
-	private var textureCache: CVMetalTextureCache!
-		
-	private var colorBuf: MTLBuffer?
-
-	required init() {
-		let defaultLibrary = metalDevice.makeDefaultLibrary()!
-		let kernelFunction = defaultLibrary.makeFunction(name: "depthToJET")
-		do {
-			computePipelineState = try metalDevice.makeComputePipelineState(function: kernelFunction!)
-		} catch {
-			fatalError("Unable to create depth converter pipeline state. (\(error))")
-		}
+    
+    private var textureCache: CVMetalTextureCache!
+    
+    private var colorBuf: MTLBuffer?
+    
+    required init() {
+        let defaultLibrary = metalDevice.makeDefaultLibrary()!
+        let kernelFunction = defaultLibrary.makeFunction(name: "depthToJET")
+        do {
+            computePipelineState = try metalDevice.makeComputePipelineState(function: kernelFunction!)
+        } catch {
+            fatalError("Unable to create depth converter pipeline state. (\(error))")
+        }
         
         guard let histBuffer = metalDevice.makeBuffer(
             length: MemoryLayout<Float>.size * Int(jetParams.histogramSize),
@@ -148,95 +148,98 @@ class DepthToJETConverter: FilterRenderer {
         guard let jetBuffer = metalDevice.makeBuffer(length: MemoryLayout<JETParams>.size, options: .storageModeShared) else {
             fatalError("Failed to allocate buffer for histogram size")
         }
-
+        
         jetBuffer.contents().bindMemory(to: JETParams.self, capacity: 1)
             .assign(repeating: self.jetParams, count: 1)
-
+        
         self.jetParamsBuffer = jetBuffer
-	}
-
-	static private func allocateOutputBufferPool(with formatDescription: CMFormatDescription, outputRetainedBufferCountHint: Int) -> CVPixelBufferPool? {
-		let inputDimensions = formatDescription.videoDimensions
-		let outputPixelBufferAttributes: [String: Any] = [
-			kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA,
-			kCVPixelBufferWidthKey as String: Int(inputDimensions.width),
-			kCVPixelBufferHeightKey as String: Int(inputDimensions.height),
-			kCVPixelBufferIOSurfacePropertiesKey as String: [:]
-		]
-
-		let poolAttributes = [kCVPixelBufferPoolMinimumBufferCountKey as String: outputRetainedBufferCountHint]
-		var cvPixelBufferPool: CVPixelBufferPool?
-		// Create a pixel buffer pool with the same pixel attributes as the input format description
-		CVPixelBufferPoolCreate(kCFAllocatorDefault, poolAttributes as NSDictionary?, outputPixelBufferAttributes as NSDictionary?, &cvPixelBufferPool)
-		guard let pixelBufferPool = cvPixelBufferPool else {
-			assertionFailure("Allocation failure: Could not create pixel buffer pool")
-			return nil
-		}
-		return pixelBufferPool
-	}
-
-	func prepare(with formatDescription: CMFormatDescription, outputRetainedBufferCountHint: Int) {
-		reset()
-
-		outputPixelBufferPool = DepthToJETConverter.allocateOutputBufferPool(with: formatDescription,
-		                                                                           outputRetainedBufferCountHint: outputRetainedBufferCountHint)
-		if outputPixelBufferPool == nil {
-			return
-		}
-
-		var pixelBuffer: CVPixelBuffer?
-		var pixelBufferFormatDescription: CMFormatDescription?
-		_ = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, outputPixelBufferPool!, &pixelBuffer)
-		if let pixelBuffer = pixelBuffer {
-			CMFormatDescription.createForVideo(allocator: kCFAllocatorDefault, imageBuffer: pixelBuffer, formatDescriptionOut: &pixelBufferFormatDescription)
-		}
-		pixelBuffer = nil
-
-		inputFormatDescription = formatDescription
-		outputFormatDescription = pixelBufferFormatDescription
-
-		let inputMediaSubType = formatDescription.mediaSubType
-		if inputMediaSubType == kCVPixelFormatType_DepthFloat16 {
-			inputTextureFormat = .r16Float
-		} else {
-			assertionFailure("Input format not supported")
-		}
-
-		var metalTextureCache: CVMetalTextureCache?
-		if CVMetalTextureCacheCreate(kCFAllocatorDefault, nil, metalDevice, nil, &metalTextureCache) != kCVReturnSuccess {
-			assertionFailure("Unable to allocate depth converter texture cache")
-		} else {
-			textureCache = metalTextureCache
-		}
-
+    }
+    
+    static private func allocateOutputBufferPool(with formatDescription: CMFormatDescription,
+                                                 outputRetainedBufferCountHint: Int) -> CVPixelBufferPool? {
+        let inputDimensions = CMVideoFormatDescriptionGetDimensions(formatDescription)
+        let outputBufferAttributes: [String: Any] = [
+            kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA,
+            kCVPixelBufferWidthKey as String: Int(inputDimensions.width),
+            kCVPixelBufferHeightKey as String: Int(inputDimensions.height),
+            kCVPixelBufferIOSurfacePropertiesKey as String: [:]
+        ]
+        
+        let poolAttributes = [kCVPixelBufferPoolMinimumBufferCountKey as String: outputRetainedBufferCountHint]
+        var cvPixelBufferPool: CVPixelBufferPool?
+        // Create a pixel buffer pool with the same pixel attributes as the input format description
+        CVPixelBufferPoolCreate(kCFAllocatorDefault, poolAttributes as NSDictionary?, outputBufferAttributes as NSDictionary?, &cvPixelBufferPool)
+        guard let pixelBufferPool = cvPixelBufferPool else {
+            assertionFailure("Allocation failure: Could not create pixel buffer pool")
+            return nil
+        }
+        return pixelBufferPool
+    }
+    
+    func prepare(with formatDescription: CMFormatDescription, outputRetainedBufferCountHint: Int) {
+        reset()
+        
+        outputPixelBufferPool = DepthToJETConverter.allocateOutputBufferPool(with: formatDescription,
+                                                                             outputRetainedBufferCountHint: outputRetainedBufferCountHint)
+        if outputPixelBufferPool == nil {
+            return
+        }
+        
+        var pixelBuffer: CVPixelBuffer?
+        var pixelBufferFormatDescription: CMFormatDescription?
+        _ = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, outputPixelBufferPool!, &pixelBuffer)
+        if pixelBuffer != nil {
+            CMVideoFormatDescriptionCreateForImageBuffer(allocator: kCFAllocatorDefault,
+                                                         imageBuffer: pixelBuffer!,
+                                                         formatDescriptionOut: &pixelBufferFormatDescription)
+        }
+        pixelBuffer = nil
+        
+        inputFormatDescription = formatDescription
+        outputFormatDescription = pixelBufferFormatDescription
+        
+        let inputMediaSubType = CMFormatDescriptionGetMediaSubType(formatDescription)
+        if inputMediaSubType == kCVPixelFormatType_DepthFloat16 {
+            inputTextureFormat = .r16Float
+        } else {
+            assertionFailure("Input format not supported")
+        }
+        
+        var metalTextureCache: CVMetalTextureCache?
+        if CVMetalTextureCacheCreate(kCFAllocatorDefault, nil, metalDevice, nil, &metalTextureCache) != kCVReturnSuccess {
+            assertionFailure("Unable to allocate depth converter texture cache")
+        } else {
+            textureCache = metalTextureCache
+        }
+        
         let colorTable = ColorTable(metalDevice: metalDevice, size: self.colors)
         colorBuf = colorTable.getColorTable()
-
-		isPrepared = true
-	}
-
-	func reset() {
-		outputPixelBufferPool = nil
-		outputFormatDescription = nil
-		inputFormatDescription = nil
-		textureCache = nil
-		isPrepared = false
-	}
-
+        
+        isPrepared = true
+    }
+    
+    func reset() {
+        outputPixelBufferPool = nil
+        outputFormatDescription = nil
+        inputFormatDescription = nil
+        textureCache = nil
+        isPrepared = false
+    }
+    
     // MARK: - Depth to JET Conversion
     
-	func render(pixelBuffer: CVPixelBuffer) -> CVPixelBuffer? {
-		if !isPrepared {
-			assertionFailure("Invalid state: Not prepared")
-			return nil
-		}
-
-		var newPixelBuffer: CVPixelBuffer?
-		CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, outputPixelBufferPool!, &newPixelBuffer)
-		guard let outputPixelBuffer = newPixelBuffer else {
-			print("Allocation failure: Could not get pixel buffer from pool (\(self.description))")
-			return nil
-		}
+    func render(pixelBuffer: CVPixelBuffer) -> CVPixelBuffer? {
+        if !isPrepared {
+            assertionFailure("Invalid state: Not prepared")
+            return nil
+        }
+        
+        var newPixelBuffer: CVPixelBuffer?
+        CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, outputPixelBufferPool!, &newPixelBuffer)
+        guard let outputPixelBuffer = newPixelBuffer else {
+            print("Allocation failure: Could not get pixel buffer from pool (\(self.description))")
+            return nil
+        }
         
         let hist = histogramBuffer.contents().bindMemory(to: Float.self, capacity: Int(self.jetParams.histogramSize))
         
@@ -247,12 +250,12 @@ class DepthToJETConverter: FilterRenderer {
                                           minDepth: 0.0,
                                           maxDepth: 5.0,
                                           binningFactor: self.jetParams.binningFactor)
-
+        
         guard let outputTexture = makeTextureFromCVPixelBuffer(pixelBuffer: outputPixelBuffer, textureFormat: .bgra8Unorm),
-              let inputTexture = makeTextureFromCVPixelBuffer(pixelBuffer: pixelBuffer, textureFormat: inputTextureFormat) else {
-            return nil
+            let inputTexture = makeTextureFromCVPixelBuffer(pixelBuffer: pixelBuffer, textureFormat: inputTextureFormat) else {
+                return nil
         }
-
+        
         // Set up command queue, buffer, and encoder
         guard let commandQueue = commandQueue,
             let commandBuffer = commandQueue.makeCommandBuffer(),
@@ -261,7 +264,7 @@ class DepthToJETConverter: FilterRenderer {
                 CVMetalTextureCacheFlush(textureCache!, 0)
                 return nil
         }
-
+        
         commandEncoder.label = "Depth to JET"
         commandEncoder.setComputePipelineState(computePipelineState!)
         commandEncoder.setTexture(inputTexture, index: 0)
@@ -269,7 +272,7 @@ class DepthToJETConverter: FilterRenderer {
         commandEncoder.setBuffer(self.jetParamsBuffer, offset: 0, index: 0)
         commandEncoder.setBuffer(self.histogramBuffer, offset: 0, index: 1)
         commandEncoder.setBuffer(colorBuf, offset: 0, index: 2)
-
+        
         // Set up thread groups as described in https://developer.apple.com/reference/metal/mtlcomputecommandencoder
         let width = computePipelineState!.threadExecutionWidth
         let height = computePipelineState!.maxTotalThreadsPerThreadgroup / width
@@ -278,28 +281,27 @@ class DepthToJETConverter: FilterRenderer {
                                           height: (inputTexture.height + height - 1) / height,
                                           depth: 1)
         commandEncoder.dispatchThreadgroups(threadgroupsPerGrid, threadsPerThreadgroup: threadsPerThreadgroup)
-
+        
         commandEncoder.endEncoding()
-
+        
         commandBuffer.commit()
-
-		return outputPixelBuffer
-	}
-
+        
+        return outputPixelBuffer
+    }
+    
     func makeTextureFromCVPixelBuffer(pixelBuffer: CVPixelBuffer, textureFormat: MTLPixelFormat) -> MTLTexture? {
-		let width = CVPixelBufferGetWidth(pixelBuffer)
-		let height = CVPixelBufferGetHeight(pixelBuffer)
-
-		// Create a Metal texture from the image buffer
-		var cvTextureOut: CVMetalTexture?
+        let width = CVPixelBufferGetWidth(pixelBuffer)
+        let height = CVPixelBufferGetHeight(pixelBuffer)
+        
+        // Create a Metal texture from the image buffer
+        var cvTextureOut: CVMetalTexture?
         CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, textureCache, pixelBuffer, nil, textureFormat, width, height, 0, &cvTextureOut)
         guard let cvTexture = cvTextureOut, let texture = CVMetalTextureGetTexture(cvTexture) else {
             print("Depth converter failed to create preview texture")
             CVMetalTextureCacheFlush(textureCache, 0)
             return nil
         }
-
-		return texture
-	}
-
+        
+        return texture
+    }
 }
diff --git a/TrueDepthStreamer/FilterRenderer.swift b/TrueDepthStreamer/FilterRenderer.swift
index 2b105b2..0e55c32 100644
--- a/TrueDepthStreamer/FilterRenderer.swift
+++ b/TrueDepthStreamer/FilterRenderer.swift
@@ -8,111 +8,113 @@ Filter renderer protocol.
 import CoreMedia
 
 protocol FilterRenderer: class {
-
-	var description: String { get }
-
-	var isPrepared: Bool { get }
-
-	// Prepare resources.
-	// The outputRetainedBufferCountHint tells out of place renderers how many of
-	// their output buffers will be held onto by the downstream pipeline at one time.
-	// This can be used by the renderer to size and preallocate their pools.
-	func prepare(with inputFormatDescription: CMFormatDescription, outputRetainedBufferCountHint: Int)
-
-	// Release resources.
-	func reset()
-
-	// The format description of the output pixel buffers.
-	var outputFormatDescription: CMFormatDescription? { get }
-
-	// The format description of the input pixel buffers.
-	var inputFormatDescription: CMFormatDescription? { get }
-
-	// Render pixel buffer.
-	func render(pixelBuffer: CVPixelBuffer) -> CVPixelBuffer?
+    
+    var description: String { get }
+    
+    var isPrepared: Bool { get }
+    
+    // Prepare resources.
+    // The outputRetainedBufferCountHint tells out of place renderers how many of
+    // their output buffers will be held onto by the downstream pipeline at one time.
+    // This can be used by the renderer to size and preallocate their pools.
+    func prepare(with inputFormatDescription: CMFormatDescription, outputRetainedBufferCountHint: Int)
+    
+    // Release resources.
+    func reset()
+    
+    // The format description of the output pixel buffers.
+    var outputFormatDescription: CMFormatDescription? { get }
+    
+    // The format description of the input pixel buffers.
+    var inputFormatDescription: CMFormatDescription? { get }
+    
+    // Render pixel buffer.
+    func render(pixelBuffer: CVPixelBuffer) -> CVPixelBuffer?
 }
 
 func allocateOutputBufferPool(with inputFormatDescription: CMFormatDescription,
                               outputRetainedBufferCountHint: Int) ->(
-	outputBufferPool: CVPixelBufferPool?,
-	outputColorSpace: CGColorSpace?,
-	outputFormatDescription: CMFormatDescription?) {
-
-	let inputMediaSubType = inputFormatDescription.mediaSubType
-	if inputMediaSubType != kCVPixelFormatType_32BGRA {
-		assertionFailure("Invalid input pixel buffer type \(inputMediaSubType)")
-		return (nil, nil, nil)
-	}
-
-	let inputDimensions = inputFormatDescription.videoDimensions
-	var pixelBufferAttributes: [String: Any] = [
-		kCVPixelBufferPixelFormatTypeKey as String: UInt(inputMediaSubType),
-		kCVPixelBufferWidthKey as String: Int(inputDimensions.width),
-		kCVPixelBufferHeightKey as String: Int(inputDimensions.height),
-		kCVPixelBufferIOSurfacePropertiesKey as String: [:]
-	]
-
-	// Get pixel buffer attributes and color space from the input format description
-	var cgColorSpace = CGColorSpaceCreateDeviceRGB()
-	if let inputFormatDescriptionExtension = inputFormatDescription.extensions as Dictionary? {
-		let colorPrimaries = inputFormatDescriptionExtension[kCVImageBufferColorPrimariesKey]
-
-		if let colorPrimaries = colorPrimaries {
-			var colorSpaceProperties: [String: AnyObject] = [kCVImageBufferColorPrimariesKey as String: colorPrimaries]
-
-			if let yCbCrMatrix = inputFormatDescriptionExtension[kCVImageBufferYCbCrMatrixKey] {
-				colorSpaceProperties[kCVImageBufferYCbCrMatrixKey as String] = yCbCrMatrix
-			}
-
-			if let transferFunction = inputFormatDescriptionExtension[kCVImageBufferTransferFunctionKey] {
-				colorSpaceProperties[kCVImageBufferTransferFunctionKey as String] = transferFunction
-			}
-
-			pixelBufferAttributes[kCVBufferPropagatedAttachmentsKey as String] = colorSpaceProperties
-		}
-
-		if let cvColorspace = inputFormatDescriptionExtension[kCVImageBufferCGColorSpaceKey] {
-			cgColorSpace = cvColorspace as! CGColorSpace
-		} else if (colorPrimaries as? String) == (kCVImageBufferColorPrimaries_P3_D65 as String) {
-			cgColorSpace = CGColorSpace(name: CGColorSpace.displayP3)!
-		}
-	}
-
-	// Create a pixel buffer pool with the same pixel attributes as the input format description
-	let poolAttributes = [kCVPixelBufferPoolMinimumBufferCountKey as String: outputRetainedBufferCountHint]
-	var cvPixelBufferPool: CVPixelBufferPool?
-	CVPixelBufferPoolCreate(kCFAllocatorDefault, poolAttributes as NSDictionary?, pixelBufferAttributes as NSDictionary?, &cvPixelBufferPool)
-	guard let pixelBufferPool = cvPixelBufferPool else {
-		assertionFailure("Allocation failure: Could not allocate pixel buffer pool")
-		return (nil, nil, nil)
-	}
-
-	preallocateBuffers(pool: pixelBufferPool, allocationThreshold: outputRetainedBufferCountHint)
-
-	// Get output format description
-	var pixelBuffer: CVPixelBuffer?
-	var outputFormatDescription: CMFormatDescription?
-	let auxAttributes = [kCVPixelBufferPoolAllocationThresholdKey as String: outputRetainedBufferCountHint] as NSDictionary
-	CVPixelBufferPoolCreatePixelBufferWithAuxAttributes(kCFAllocatorDefault, pixelBufferPool, auxAttributes, &pixelBuffer)
-	if let pixelBuffer = pixelBuffer {
-		CMFormatDescription.createForVideo(allocator: kCFAllocatorDefault, imageBuffer: pixelBuffer, formatDescriptionOut: &outputFormatDescription)
-    }
-	pixelBuffer = nil
-
-	return (pixelBufferPool, cgColorSpace, outputFormatDescription)
+    outputBufferPool: CVPixelBufferPool?,
+    outputColorSpace: CGColorSpace?,
+    outputFormatDescription: CMFormatDescription?) {
+        
+        let inputMediaSubType = CMFormatDescriptionGetMediaSubType(inputFormatDescription)
+        if inputMediaSubType != kCVPixelFormatType_32BGRA {
+            assertionFailure("Invalid input pixel buffer type \(inputMediaSubType)")
+            return (nil, nil, nil)
+        }
+        
+        let inputDimensions = CMVideoFormatDescriptionGetDimensions(inputFormatDescription)
+        var pixelBufferAttributes: [String: Any] = [
+            kCVPixelBufferPixelFormatTypeKey as String: UInt(inputMediaSubType),
+            kCVPixelBufferWidthKey as String: Int(inputDimensions.width),
+            kCVPixelBufferHeightKey as String: Int(inputDimensions.height),
+            kCVPixelBufferIOSurfacePropertiesKey as String: [:]
+        ]
+        
+        // Get pixel buffer attributes and color space from the input format description
+        var cgColorSpace = CGColorSpaceCreateDeviceRGB()
+        if let inputFormatDescriptionExtension = CMFormatDescriptionGetExtensions(inputFormatDescription) as Dictionary? {
+            let colorPrimaries = inputFormatDescriptionExtension[kCVImageBufferColorPrimariesKey]
+            
+            if let colorPrimaries = colorPrimaries {
+                var colorSpaceProperties: [String: AnyObject] = [kCVImageBufferColorPrimariesKey as String: colorPrimaries]
+                
+                if let yCbCrMatrix = inputFormatDescriptionExtension[kCVImageBufferYCbCrMatrixKey] {
+                    colorSpaceProperties[kCVImageBufferYCbCrMatrixKey as String] = yCbCrMatrix
+                }
+                
+                if let transferFunction = inputFormatDescriptionExtension[kCVImageBufferTransferFunctionKey] {
+                    colorSpaceProperties[kCVImageBufferTransferFunctionKey as String] = transferFunction
+                }
+                
+                pixelBufferAttributes[kCVBufferPropagatedAttachmentsKey as String] = colorSpaceProperties
+            }
+            
+            if let cvColorspace = inputFormatDescriptionExtension[kCVImageBufferCGColorSpaceKey] {
+                cgColorSpace = cvColorspace as! CGColorSpace
+            } else if (colorPrimaries as? String) == (kCVImageBufferColorPrimaries_P3_D65 as String) {
+                cgColorSpace = CGColorSpace(name: CGColorSpace.displayP3)!
+            }
+        }
+        
+        // Create a pixel buffer pool with the same pixel attributes as the input format description
+        let poolAttributes = [kCVPixelBufferPoolMinimumBufferCountKey as String: outputRetainedBufferCountHint]
+        var cvPixelBufferPool: CVPixelBufferPool?
+        CVPixelBufferPoolCreate(kCFAllocatorDefault, poolAttributes as NSDictionary?, pixelBufferAttributes as NSDictionary?, &cvPixelBufferPool)
+        guard let pixelBufferPool = cvPixelBufferPool else {
+            assertionFailure("Allocation failure: Could not allocate pixel buffer pool")
+            return (nil, nil, nil)
+        }
+        
+        preallocateBuffers(pool: pixelBufferPool, allocationThreshold: outputRetainedBufferCountHint)
+        
+        // Get output format description
+        var pixelBuffer: CVPixelBuffer?
+        var outputFormatDescription: CMFormatDescription?
+        let auxAttributes = [kCVPixelBufferPoolAllocationThresholdKey as String: outputRetainedBufferCountHint] as NSDictionary
+        CVPixelBufferPoolCreatePixelBufferWithAuxAttributes(kCFAllocatorDefault, pixelBufferPool, auxAttributes, &pixelBuffer)
+        if pixelBuffer != nil {
+            CMVideoFormatDescriptionCreateForImageBuffer(allocator: kCFAllocatorDefault,
+                                                         imageBuffer: pixelBuffer!,
+                                                         formatDescriptionOut: &outputFormatDescription)
+        }
+        pixelBuffer = nil
+        
+        return (pixelBufferPool, cgColorSpace, outputFormatDescription)
 }
 
 private func preallocateBuffers(pool: CVPixelBufferPool, allocationThreshold: Int) {
-	var pixelBuffers = [CVPixelBuffer]()
-	var error: CVReturn = kCVReturnSuccess
-	let auxAttributes = [kCVPixelBufferPoolAllocationThresholdKey as String: allocationThreshold] as NSDictionary
-	var pixelBuffer: CVPixelBuffer?
-	while error == kCVReturnSuccess {
-		error = CVPixelBufferPoolCreatePixelBufferWithAuxAttributes(kCFAllocatorDefault, pool, auxAttributes, &pixelBuffer)
-		if let pixelBuffer = pixelBuffer {
-			pixelBuffers.append(pixelBuffer)
-		}
-		pixelBuffer = nil
-	}
-	pixelBuffers.removeAll()
+    var pixelBuffers = [CVPixelBuffer]()
+    var error: CVReturn = kCVReturnSuccess
+    let auxAttributes = [kCVPixelBufferPoolAllocationThresholdKey as String: allocationThreshold] as NSDictionary
+    var pixelBuffer: CVPixelBuffer?
+    while error == kCVReturnSuccess {
+        error = CVPixelBufferPoolCreatePixelBufferWithAuxAttributes(kCFAllocatorDefault, pool, auxAttributes, &pixelBuffer)
+        if let pixelBuffer = pixelBuffer {
+            pixelBuffers.append(pixelBuffer)
+        }
+        pixelBuffer = nil
+    }
+    pixelBuffers.removeAll()
 }
diff --git a/TrueDepthStreamer/HistogramCalculator.h b/TrueDepthStreamer/HistogramCalculator.h
index 0825f3b..e0079c6 100644
--- a/TrueDepthStreamer/HistogramCalculator.h
+++ b/TrueDepthStreamer/HistogramCalculator.h
@@ -13,13 +13,13 @@ Class for performing histogram equalization efficiently
 
 @interface HistogramCalculator : NSObject
 
-+(void) calcHistogramForPixelBuffer:(CVPixelBufferRef)pixelBuffer
-                           toBuffer:(float*)histogram
-                           withSize:(int)size
-                          forColors:(int)colors
-                           minDepth:(float)minDepth
-                           maxDepth:(float)maxDepth
-                      binningFactor:(int)factor;
++ (void) calcHistogramForPixelBuffer:(CVPixelBufferRef)pixelBuffer
+                            toBuffer:(float*)histogram
+                            withSize:(int)size
+                           forColors:(int)colors
+                            minDepth:(float)minDepth
+                            maxDepth:(float)maxDepth
+                       binningFactor:(int)factor;
 
 @end
 
diff --git a/TrueDepthStreamer/HistogramCalculator.m b/TrueDepthStreamer/HistogramCalculator.m
index 5fd851b..25a84f9 100644
--- a/TrueDepthStreamer/HistogramCalculator.m
+++ b/TrueDepthStreamer/HistogramCalculator.m
@@ -51,8 +51,6 @@ Class for performing histogram equalization efficiently
     
     for (int i = 1; i < size; ++i)
         histogram[i] = colors - histogram[i];
-    
-
 }
 
 @end
diff --git a/TrueDepthStreamer/PreviewMetalView.swift b/TrueDepthStreamer/PreviewMetalView.swift
index bc12613..f053dde 100644
--- a/TrueDepthStreamer/PreviewMetalView.swift
+++ b/TrueDepthStreamer/PreviewMetalView.swift
@@ -10,292 +10,292 @@ import Metal
 import MetalKit
 
 class PreviewMetalView: MTKView {
-
-	enum Rotation: Int {
-		case rotate0Degrees
-		case rotate90Degrees
-		case rotate180Degrees
-		case rotate270Degrees
-	}
-
-	var mirroring = false {
-		didSet {
-			syncQueue.sync {
-				internalMirroring = mirroring
-			}
-		}
-	}
-
-	private var internalMirroring: Bool = false
-
-	var rotation: Rotation = .rotate0Degrees {
-		didSet {
-			syncQueue.sync {
-				internalRotation = rotation
-			}
-		}
-	}
-
-	private var internalRotation: Rotation = .rotate0Degrees
-
-	var pixelBuffer: CVPixelBuffer? {
-		didSet {
-			syncQueue.sync {
-				internalPixelBuffer = pixelBuffer
-			}
-		}
-	}
-
-	private var internalPixelBuffer: CVPixelBuffer?
-
-	private let syncQueue = DispatchQueue(label: "Preview View Sync Queue", qos: .userInitiated, attributes: [], autoreleaseFrequency: .workItem)
-
-	private var textureCache: CVMetalTextureCache?
-
-	private var textureWidth: Int = 0
-
-	private var textureHeight: Int = 0
-
-	private var textureMirroring = false
-
-	private var textureRotation: Rotation = .rotate0Degrees
-
-	private var sampler: MTLSamplerState!
-
-	private var renderPipelineState: MTLRenderPipelineState!
-
-	private var commandQueue: MTLCommandQueue?
-
-	private var vertexCoordBuffer: MTLBuffer!
-
-	private var textCoordBuffer: MTLBuffer!
-
-	private var internalBounds: CGRect!
-
-	private var textureTranform: CGAffineTransform?
-
-	func texturePointForView(point: CGPoint) -> CGPoint? {
-		var result: CGPoint?
-		guard let transform = textureTranform else {
-			return result
-		}
-		let transformPoint = point.applying(transform)
-
-		if CGRect(origin: .zero, size: CGSize(width: textureWidth, height: textureHeight)).contains(transformPoint) {
-			result = transformPoint
-		} else {
-			print("Invalid point \(point) result point \(transformPoint)")
-		}
-
-		return result
-	}
-
-	func viewPointForTexture(point: CGPoint) -> CGPoint? {
-		var result: CGPoint?
-		guard let transform = textureTranform?.inverted() else {
-			return result
-		}
-		let transformPoint = point.applying(transform)
-
-		if internalBounds.contains(transformPoint) {
-			result = transformPoint
-		} else {
-			print("Invalid point \(point) result point \(transformPoint)")
-		}
-
-		return result
-	}
-
-	func flushTextureCache() {
-		textureCache = nil
-	}
-
-	private func setupTransform(width: Int, height: Int, mirroring: Bool, rotation: Rotation) {
-		var scaleX: Float = 1.0
-		var scaleY: Float = 1.0
-		var resizeAspect: Float = 1.0
-
-		internalBounds = self.bounds
-		textureWidth = width
-		textureHeight = height
-		textureMirroring = mirroring
-		textureRotation = rotation
-
-		if textureWidth > 0 && textureHeight > 0 {
-			switch textureRotation {
-				case .rotate0Degrees, .rotate180Degrees:
-					scaleX = Float(internalBounds.width / CGFloat(textureWidth))
-					scaleY = Float(internalBounds.height / CGFloat(textureHeight))
-
-				case .rotate90Degrees, .rotate270Degrees:
-					scaleX = Float(internalBounds.width / CGFloat(textureHeight))
-					scaleY = Float(internalBounds.height / CGFloat(textureWidth))
-			}
-		}
-		// Resize aspect
-		resizeAspect = min(scaleX, scaleY)
-		if scaleX < scaleY {
-			scaleY = scaleX / scaleY
-			scaleX = 1.0
-		} else {
-			scaleX = scaleY / scaleX
-			scaleY = 1.0
-		}
-
-		if textureMirroring {
-			scaleX *= -1.0
-		}
-
-		// Vertex coordinate takes the gravity into account
-		let vertexData: [Float] = [
-			-scaleX, -scaleY, 0.0, 1.0,
-			scaleX, -scaleY, 0.0, 1.0,
-			-scaleX, scaleY, 0.0, 1.0,
-			scaleX, scaleY, 0.0, 1.0
-			]
-		vertexCoordBuffer = device!.makeBuffer(bytes: vertexData, length: vertexData.count * MemoryLayout<Float>.size, options: [])
-
-		// Texture coordinate takes the rotation into account
-		var textData: [Float]
-		switch textureRotation {
-		case .rotate0Degrees:
-			textData = [
-				0.0, 1.0,
-				1.0, 1.0,
-				0.0, 0.0,
-				1.0, 0.0
-			]
-
-		case .rotate180Degrees:
-			textData = [
-				1.0, 0.0,
-				0.0, 0.0,
-				1.0, 1.0,
-				0.0, 1.0
-			]
-
-		case .rotate90Degrees:
-			textData = [
-				1.0, 1.0,
-				1.0, 0.0,
-				0.0, 1.0,
-				0.0, 0.0
-			]
-
-		case .rotate270Degrees:
-			textData = [
-				0.0, 0.0,
-				0.0, 1.0,
-				1.0, 0.0,
-				1.0, 1.0
-			]
-		}
-		textCoordBuffer = device?.makeBuffer(bytes: textData, length: textData.count * MemoryLayout<Float>.size, options: [])
-
-		// Calculate the transform from texture coordinates to view coordinates
-		var transform = CGAffineTransform.identity
-		if textureMirroring {
-			transform = transform.concatenating(CGAffineTransform(scaleX: -1, y: 1))
-			transform = transform.concatenating(CGAffineTransform(translationX: CGFloat(textureWidth), y: 0))
-		}
-
-		switch textureRotation {
-			case .rotate0Degrees:
-				transform = transform.concatenating(CGAffineTransform(rotationAngle: CGFloat(0)))
-
-			case .rotate180Degrees:
-				transform = transform.concatenating(CGAffineTransform(rotationAngle: CGFloat(Double.pi)))
-				transform = transform.concatenating(CGAffineTransform(translationX: CGFloat(textureWidth), y: CGFloat(textureHeight)))
-
-			case .rotate90Degrees:
-				transform = transform.concatenating(CGAffineTransform(rotationAngle: CGFloat(Double.pi) / 2))
-				transform = transform.concatenating(CGAffineTransform(translationX: CGFloat(textureHeight), y: 0))
-
-			case .rotate270Degrees:
-				transform = transform.concatenating(CGAffineTransform(rotationAngle: 3 * CGFloat(Double.pi) / 2))
-				transform = transform.concatenating(CGAffineTransform(translationX: 0, y: CGFloat(textureWidth)))
-		}
-
-		transform = transform.concatenating(CGAffineTransform(scaleX: CGFloat(resizeAspect), y: CGFloat(resizeAspect)))
-		let tranformRect = CGRect(origin: .zero, size: CGSize(width: textureWidth, height: textureHeight)).applying(transform)
-		let transX = (internalBounds.size.width - tranformRect.size.width) / 2
-		let transY = (internalBounds.size.height - tranformRect.size.height) / 2
-		transform = transform.concatenating(CGAffineTransform(translationX: transX, y: transY))
-		textureTranform = transform.inverted()
-	}
-
-	required init(coder: NSCoder) {
-		super.init(coder: coder)
-
-		device = MTLCreateSystemDefaultDevice()
-
-		configureMetal()
-
-		createTextureCache()
-
-		colorPixelFormat = .bgra8Unorm
-	}
-
-	func configureMetal() {
-		let defaultLibrary = device!.makeDefaultLibrary()!
-		let pipelineDescriptor = MTLRenderPipelineDescriptor()
-		pipelineDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm
-		pipelineDescriptor.vertexFunction = defaultLibrary.makeFunction(name: "vertexPassThrough")
-		pipelineDescriptor.fragmentFunction = defaultLibrary.makeFunction(name: "fragmentPassThrough")
-
-		// To determine how our textures are sampled, we create a sampler descriptor, which
-		// will be used to ask for a sampler state object from our device below.
-		let samplerDescriptor = MTLSamplerDescriptor()
-		samplerDescriptor.sAddressMode = .clampToEdge
-		samplerDescriptor.tAddressMode = .clampToEdge
-		samplerDescriptor.minFilter = .linear
-		samplerDescriptor.magFilter = .linear
-		sampler = device!.makeSamplerState(descriptor: samplerDescriptor)
-
-		do {
-			renderPipelineState = try device!.makeRenderPipelineState(descriptor: pipelineDescriptor)
-		} catch {
-			fatalError("Unable to create preview Metal view pipeline state. (\(error))")
-		}
-
-		commandQueue = device!.makeCommandQueue()
-	}
-
-	func createTextureCache() {
-		var newTextureCache: CVMetalTextureCache?
-		if CVMetalTextureCacheCreate(kCFAllocatorDefault, nil, device!, nil, &newTextureCache) == kCVReturnSuccess {
-			textureCache = newTextureCache
-		} else {
-			assertionFailure("Unable to allocate texture cache")
-		}
-	}
-
-	override func draw(_ rect: CGRect) {
-		var pixelBuffer: CVPixelBuffer?
-		var mirroring = false
-		var rotation: Rotation = .rotate0Degrees
-
-		syncQueue.sync {
-			pixelBuffer = internalPixelBuffer
-			mirroring = internalMirroring
-			rotation = internalRotation
-		}
-
-		guard let drawable = currentDrawable,
-			let currentRenderPassDescriptor = currentRenderPassDescriptor,
-			let previewPixelBuffer = pixelBuffer else {
-				return
-		}
-
-		// Create a Metal texture from the image buffer
-		let width = CVPixelBufferGetWidth(previewPixelBuffer)
-		let height = CVPixelBufferGetHeight(previewPixelBuffer)
-
-		if textureCache == nil {
-			createTextureCache()
-		}
-		var cvTextureOut: CVMetalTexture?
-		CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
+    
+    enum Rotation: Int {
+        case rotate0Degrees
+        case rotate90Degrees
+        case rotate180Degrees
+        case rotate270Degrees
+    }
+    
+    var mirroring = false {
+        didSet {
+            syncQueue.sync {
+                internalMirroring = mirroring
+            }
+        }
+    }
+    
+    private var internalMirroring: Bool = false
+    
+    var rotation: Rotation = .rotate0Degrees {
+        didSet {
+            syncQueue.sync {
+                internalRotation = rotation
+            }
+        }
+    }
+    
+    private var internalRotation: Rotation = .rotate0Degrees
+    
+    var pixelBuffer: CVPixelBuffer? {
+        didSet {
+            syncQueue.sync {
+                internalPixelBuffer = pixelBuffer
+            }
+        }
+    }
+    
+    private var internalPixelBuffer: CVPixelBuffer?
+    
+    private let syncQueue = DispatchQueue(label: "Preview View Sync Queue", qos: .userInitiated, attributes: [], autoreleaseFrequency: .workItem)
+    
+    private var textureCache: CVMetalTextureCache?
+    
+    private var textureWidth: Int = 0
+    
+    private var textureHeight: Int = 0
+    
+    private var textureMirroring = false
+    
+    private var textureRotation: Rotation = .rotate0Degrees
+    
+    private var sampler: MTLSamplerState!
+    
+    private var renderPipelineState: MTLRenderPipelineState!
+    
+    private var commandQueue: MTLCommandQueue?
+    
+    private var vertexCoordBuffer: MTLBuffer!
+    
+    private var textCoordBuffer: MTLBuffer!
+    
+    private var internalBounds: CGRect!
+    
+    private var textureTranform: CGAffineTransform?
+    
+    func texturePointForView(point: CGPoint) -> CGPoint? {
+        var result: CGPoint?
+        guard let transform = textureTranform else {
+            return result
+        }
+        let transformPoint = point.applying(transform)
+        
+        if CGRect(origin: .zero, size: CGSize(width: textureWidth, height: textureHeight)).contains(transformPoint) {
+            result = transformPoint
+        } else {
+            print("Invalid point \(point) result point \(transformPoint)")
+        }
+        
+        return result
+    }
+    
+    func viewPointForTexture(point: CGPoint) -> CGPoint? {
+        var result: CGPoint?
+        guard let transform = textureTranform?.inverted() else {
+            return result
+        }
+        let transformPoint = point.applying(transform)
+        
+        if internalBounds.contains(transformPoint) {
+            result = transformPoint
+        } else {
+            print("Invalid point \(point) result point \(transformPoint)")
+        }
+        
+        return result
+    }
+    
+    func flushTextureCache() {
+        textureCache = nil
+    }
+    
+    private func setupTransform(width: Int, height: Int, mirroring: Bool, rotation: Rotation) {
+        var scaleX: Float = 1.0
+        var scaleY: Float = 1.0
+        var resizeAspect: Float = 1.0
+        
+        internalBounds = self.bounds
+        textureWidth = width
+        textureHeight = height
+        textureMirroring = mirroring
+        textureRotation = rotation
+        
+        if textureWidth > 0 && textureHeight > 0 {
+            switch textureRotation {
+            case .rotate0Degrees, .rotate180Degrees:
+                scaleX = Float(internalBounds.width / CGFloat(textureWidth))
+                scaleY = Float(internalBounds.height / CGFloat(textureHeight))
+                
+            case .rotate90Degrees, .rotate270Degrees:
+                scaleX = Float(internalBounds.width / CGFloat(textureHeight))
+                scaleY = Float(internalBounds.height / CGFloat(textureWidth))
+            }
+        }
+        // Resize aspect
+        resizeAspect = min(scaleX, scaleY)
+        if scaleX < scaleY {
+            scaleY = scaleX / scaleY
+            scaleX = 1.0
+        } else {
+            scaleX = scaleY / scaleX
+            scaleY = 1.0
+        }
+        
+        if textureMirroring {
+            scaleX *= -1.0
+        }
+        
+        // Vertex coordinate takes the gravity into account
+        let vertexData: [Float] = [
+            -scaleX, -scaleY, 0.0, 1.0,
+            scaleX, -scaleY, 0.0, 1.0,
+            -scaleX, scaleY, 0.0, 1.0,
+            scaleX, scaleY, 0.0, 1.0
+        ]
+        vertexCoordBuffer = device!.makeBuffer(bytes: vertexData, length: vertexData.count * MemoryLayout<Float>.size, options: [])
+        
+        // Texture coordinate takes the rotation into account
+        var textData: [Float]
+        switch textureRotation {
+        case .rotate0Degrees:
+            textData = [
+                0.0, 1.0,
+                1.0, 1.0,
+                0.0, 0.0,
+                1.0, 0.0
+            ]
+            
+        case .rotate180Degrees:
+            textData = [
+                1.0, 0.0,
+                0.0, 0.0,
+                1.0, 1.0,
+                0.0, 1.0
+            ]
+            
+        case .rotate90Degrees:
+            textData = [
+                1.0, 1.0,
+                1.0, 0.0,
+                0.0, 1.0,
+                0.0, 0.0
+            ]
+            
+        case .rotate270Degrees:
+            textData = [
+                0.0, 0.0,
+                0.0, 1.0,
+                1.0, 0.0,
+                1.0, 1.0
+            ]
+        }
+        textCoordBuffer = device?.makeBuffer(bytes: textData, length: textData.count * MemoryLayout<Float>.size, options: [])
+        
+        // Calculate the transform from texture coordinates to view coordinates
+        var transform = CGAffineTransform.identity
+        if textureMirroring {
+            transform = transform.concatenating(CGAffineTransform(scaleX: -1, y: 1))
+            transform = transform.concatenating(CGAffineTransform(translationX: CGFloat(textureWidth), y: 0))
+        }
+        
+        switch textureRotation {
+        case .rotate0Degrees:
+            transform = transform.concatenating(CGAffineTransform(rotationAngle: CGFloat(0)))
+            
+        case .rotate180Degrees:
+            transform = transform.concatenating(CGAffineTransform(rotationAngle: CGFloat(Double.pi)))
+            transform = transform.concatenating(CGAffineTransform(translationX: CGFloat(textureWidth), y: CGFloat(textureHeight)))
+            
+        case .rotate90Degrees:
+            transform = transform.concatenating(CGAffineTransform(rotationAngle: CGFloat(Double.pi) / 2))
+            transform = transform.concatenating(CGAffineTransform(translationX: CGFloat(textureHeight), y: 0))
+            
+        case .rotate270Degrees:
+            transform = transform.concatenating(CGAffineTransform(rotationAngle: 3 * CGFloat(Double.pi) / 2))
+            transform = transform.concatenating(CGAffineTransform(translationX: 0, y: CGFloat(textureWidth)))
+        }
+        
+        transform = transform.concatenating(CGAffineTransform(scaleX: CGFloat(resizeAspect), y: CGFloat(resizeAspect)))
+        let tranformRect = CGRect(origin: .zero, size: CGSize(width: textureWidth, height: textureHeight)).applying(transform)
+        let transX = (internalBounds.size.width - tranformRect.size.width) / 2
+        let transY = (internalBounds.size.height - tranformRect.size.height) / 2
+        transform = transform.concatenating(CGAffineTransform(translationX: transX, y: transY))
+        textureTranform = transform.inverted()
+    }
+    
+    required init(coder: NSCoder) {
+        super.init(coder: coder)
+        
+        device = MTLCreateSystemDefaultDevice()
+        
+        configureMetal()
+        
+        createTextureCache()
+        
+        colorPixelFormat = .bgra8Unorm
+    }
+    
+    func configureMetal() {
+        let defaultLibrary = device!.makeDefaultLibrary()!
+        let pipelineDescriptor = MTLRenderPipelineDescriptor()
+        pipelineDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm
+        pipelineDescriptor.vertexFunction = defaultLibrary.makeFunction(name: "vertexPassThrough")
+        pipelineDescriptor.fragmentFunction = defaultLibrary.makeFunction(name: "fragmentPassThrough")
+        
+        // To determine how our textures are sampled, we create a sampler descriptor, which
+        // will be used to ask for a sampler state object from our device below.
+        let samplerDescriptor = MTLSamplerDescriptor()
+        samplerDescriptor.sAddressMode = .clampToEdge
+        samplerDescriptor.tAddressMode = .clampToEdge
+        samplerDescriptor.minFilter = .linear
+        samplerDescriptor.magFilter = .linear
+        sampler = device!.makeSamplerState(descriptor: samplerDescriptor)
+        
+        do {
+            renderPipelineState = try device!.makeRenderPipelineState(descriptor: pipelineDescriptor)
+        } catch {
+            fatalError("Unable to create preview Metal view pipeline state. (\(error))")
+        }
+        
+        commandQueue = device!.makeCommandQueue()
+    }
+    
+    func createTextureCache() {
+        var newTextureCache: CVMetalTextureCache?
+        if CVMetalTextureCacheCreate(kCFAllocatorDefault, nil, device!, nil, &newTextureCache) == kCVReturnSuccess {
+            textureCache = newTextureCache
+        } else {
+            assertionFailure("Unable to allocate texture cache")
+        }
+    }
+    
+    override func draw(_ rect: CGRect) {
+        var pixelBuffer: CVPixelBuffer?
+        var mirroring = false
+        var rotation: Rotation = .rotate0Degrees
+        
+        syncQueue.sync {
+            pixelBuffer = internalPixelBuffer
+            mirroring = internalMirroring
+            rotation = internalRotation
+        }
+        
+        guard let drawable = currentDrawable,
+            let currentRenderPassDescriptor = currentRenderPassDescriptor,
+            let previewPixelBuffer = pixelBuffer else {
+                return
+        }
+        
+        // Create a Metal texture from the image buffer
+        let width = CVPixelBufferGetWidth(previewPixelBuffer)
+        let height = CVPixelBufferGetHeight(previewPixelBuffer)
+        
+        if textureCache == nil {
+            createTextureCache()
+        }
+        var cvTextureOut: CVMetalTexture?
+        CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
                                                   textureCache!,
                                                   previewPixelBuffer,
                                                   nil,
@@ -304,50 +304,50 @@ class PreviewMetalView: MTKView {
                                                   height,
                                                   0,
                                                   &cvTextureOut)
-		guard let cvTexture = cvTextureOut, let texture = CVMetalTextureGetTexture(cvTexture) else {
-				print("Failed to create preview texture")
-
-				CVMetalTextureCacheFlush(textureCache!, 0)
-				return
-		}
-
-		if texture.width != textureWidth ||
-			texture.height != textureHeight ||
-			self.bounds != internalBounds ||
-			mirroring != textureMirroring ||
-			rotation != textureRotation {
-			setupTransform(width: texture.width, height: texture.height, mirroring: mirroring, rotation: rotation)
-		}
-
-		// Set up command buffer and encoder
-		guard let commandQueue = commandQueue else {
-			print("Failed to create Metal command queue")
-			CVMetalTextureCacheFlush(textureCache!, 0)
-			return
-		}
-		
-		guard let commandBuffer = commandQueue.makeCommandBuffer() else {
-			print("Failed to create Metal command buffer")
-			CVMetalTextureCacheFlush(textureCache!, 0)
-			return
-		}
-		
-		guard let commandEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: currentRenderPassDescriptor) else {
-			print("Failed to create Metal command encoder")
-			CVMetalTextureCacheFlush(textureCache!, 0)
-			return
-		}
-		
-		commandEncoder.label = "Preview display"
-		commandEncoder.setRenderPipelineState(renderPipelineState!)
-		commandEncoder.setVertexBuffer(vertexCoordBuffer, offset: 0, index: 0)
-		commandEncoder.setVertexBuffer(textCoordBuffer, offset: 0, index: 1)
-		commandEncoder.setFragmentTexture(texture, index: 0)
-		commandEncoder.setFragmentSamplerState(sampler, index: 0)
-		commandEncoder.drawPrimitives(type: .triangleStrip, vertexStart: 0, vertexCount: 4)
-		commandEncoder.endEncoding()
-		
-		commandBuffer.present(drawable) // Draw to the screen
-		commandBuffer.commit()
-	}
+        guard let cvTexture = cvTextureOut, let texture = CVMetalTextureGetTexture(cvTexture) else {
+            print("Failed to create preview texture")
+            
+            CVMetalTextureCacheFlush(textureCache!, 0)
+            return
+        }
+        
+        if texture.width != textureWidth ||
+            texture.height != textureHeight ||
+            self.bounds != internalBounds ||
+            mirroring != textureMirroring ||
+            rotation != textureRotation {
+            setupTransform(width: texture.width, height: texture.height, mirroring: mirroring, rotation: rotation)
+        }
+        
+        // Set up command buffer and encoder
+        guard let commandQueue = commandQueue else {
+            print("Failed to create Metal command queue")
+            CVMetalTextureCacheFlush(textureCache!, 0)
+            return
+        }
+        
+        guard let commandBuffer = commandQueue.makeCommandBuffer() else {
+            print("Failed to create Metal command buffer")
+            CVMetalTextureCacheFlush(textureCache!, 0)
+            return
+        }
+        
+        guard let commandEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: currentRenderPassDescriptor) else {
+            print("Failed to create Metal command encoder")
+            CVMetalTextureCacheFlush(textureCache!, 0)
+            return
+        }
+        
+        commandEncoder.label = "Preview display"
+        commandEncoder.setRenderPipelineState(renderPipelineState!)
+        commandEncoder.setVertexBuffer(vertexCoordBuffer, offset: 0, index: 0)
+        commandEncoder.setVertexBuffer(textCoordBuffer, offset: 0, index: 1)
+        commandEncoder.setFragmentTexture(texture, index: 0)
+        commandEncoder.setFragmentSamplerState(sampler, index: 0)
+        commandEncoder.drawPrimitives(type: .triangleStrip, vertexStart: 0, vertexCount: 4)
+        commandEncoder.endEncoding()
+        
+        commandBuffer.present(drawable) // Draw to the screen
+        commandBuffer.commit()
+    }
 }
diff --git a/TrueDepthStreamer/VideoMixer.swift b/TrueDepthStreamer/VideoMixer.swift
index 3cb1206..3fcaca2 100644
--- a/TrueDepthStreamer/VideoMixer.swift
+++ b/TrueDepthStreamer/VideoMixer.swift
@@ -11,169 +11,169 @@ import Metal
 import MetalKit
 
 class VideoMixer {
-
-	var description: String = "Video Mixer"
-
-	var isPrepared = false
-
-	private(set) var inputFormatDescription: CMFormatDescription?
-
-	private(set) var outputFormatDescription: CMFormatDescription?
-
-	private var outputPixelBufferPool: CVPixelBufferPool?
-
-	private let metalDevice = MTLCreateSystemDefaultDevice()!
-
-	private var renderPipelineState: MTLRenderPipelineState?
-
-	private var sampler: MTLSamplerState?
-
-	private var textureCache: CVMetalTextureCache!
-
+    
+    var description: String = "Video Mixer"
+    
+    var isPrepared = false
+    
+    private(set) var inputFormatDescription: CMFormatDescription?
+    
+    private(set) var outputFormatDescription: CMFormatDescription?
+    
+    private var outputPixelBufferPool: CVPixelBufferPool?
+    
+    private let metalDevice = MTLCreateSystemDefaultDevice()!
+    
+    private var renderPipelineState: MTLRenderPipelineState?
+    
+    private var sampler: MTLSamplerState?
+    
+    private var textureCache: CVMetalTextureCache!
+    
     private lazy var commandQueue: MTLCommandQueue? = {
         return self.metalDevice.makeCommandQueue()
     }()
-
+    
     private var fullRangeVertexBuffer: MTLBuffer?
-
-	var mixFactor: Float = 0.5
-
-	init() {
+    
+    var mixFactor: Float = 0.5
+    
+    init() {
         let vertexData: [Float] = [
             -1.0, 1.0,
             1.0, 1.0,
             -1.0, -1.0,
             1.0, -1.0
-            ]
-
-		fullRangeVertexBuffer = metalDevice.makeBuffer(bytes: vertexData, length: vertexData.count * MemoryLayout<Float>.size, options: [])
-
-		let defaultLibrary = metalDevice.makeDefaultLibrary()!
-
-		let pipelineDescriptor = MTLRenderPipelineDescriptor()
-		pipelineDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm
-		pipelineDescriptor.vertexFunction = defaultLibrary.makeFunction(name: "vertexMixer")
-		pipelineDescriptor.fragmentFunction = defaultLibrary.makeFunction(name: "fragmentMixer")
-
-		do {
-			renderPipelineState = try metalDevice.makeRenderPipelineState(descriptor: pipelineDescriptor)
-		} catch {
-			fatalError("Unable to create video mixer pipeline state. (\(error))")
-		}
-
-		// To determine how our textures are sampled, we create a sampler descriptor, which
-		// is used to ask for a sampler state object from our device.
-		let samplerDescriptor = MTLSamplerDescriptor()
-		samplerDescriptor.minFilter = .linear
-		samplerDescriptor.magFilter = .linear
-		sampler = metalDevice.makeSamplerState(descriptor: samplerDescriptor)
-	}
-
-	func prepare(with videoFormatDescription: CMFormatDescription, outputRetainedBufferCountHint: Int) {
-		reset()
-
-		(outputPixelBufferPool, _, outputFormatDescription) = allocateOutputBufferPool(with: videoFormatDescription,
-		                                                                               outputRetainedBufferCountHint: outputRetainedBufferCountHint)
-		if outputPixelBufferPool == nil {
-			return
-		}
-		inputFormatDescription = videoFormatDescription
-
-		var metalTextureCache: CVMetalTextureCache?
-		if CVMetalTextureCacheCreate(kCFAllocatorDefault, nil, metalDevice, nil, &metalTextureCache) != kCVReturnSuccess {
-			assertionFailure("Unable to allocate video mixer texture cache")
-		} else {
-			textureCache = metalTextureCache
-		}
-
-		isPrepared = true
-	}
-
-	func reset() {
-		outputPixelBufferPool = nil
-		outputFormatDescription = nil
-		inputFormatDescription = nil
-		textureCache = nil
-		isPrepared = false
-	}
-
+        ]
+        
+        fullRangeVertexBuffer = metalDevice.makeBuffer(bytes: vertexData, length: vertexData.count * MemoryLayout<Float>.size, options: [])
+        
+        let defaultLibrary = metalDevice.makeDefaultLibrary()!
+        
+        let pipelineDescriptor = MTLRenderPipelineDescriptor()
+        pipelineDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm
+        pipelineDescriptor.vertexFunction = defaultLibrary.makeFunction(name: "vertexMixer")
+        pipelineDescriptor.fragmentFunction = defaultLibrary.makeFunction(name: "fragmentMixer")
+        
+        do {
+            renderPipelineState = try metalDevice.makeRenderPipelineState(descriptor: pipelineDescriptor)
+        } catch {
+            fatalError("Unable to create video mixer pipeline state. (\(error))")
+        }
+        
+        // To determine how our textures are sampled, we create a sampler descriptor, which
+        // is used to ask for a sampler state object from our device.
+        let samplerDescriptor = MTLSamplerDescriptor()
+        samplerDescriptor.minFilter = .linear
+        samplerDescriptor.magFilter = .linear
+        sampler = metalDevice.makeSamplerState(descriptor: samplerDescriptor)
+    }
+    
+    func prepare(with videoFormatDescription: CMFormatDescription, outputRetainedBufferCountHint: Int) {
+        reset()
+        
+        (outputPixelBufferPool, _, outputFormatDescription) = allocateOutputBufferPool(with: videoFormatDescription,
+                                                                                       outputRetainedBufferCountHint: outputRetainedBufferCountHint)
+        if outputPixelBufferPool == nil {
+            return
+        }
+        inputFormatDescription = videoFormatDescription
+        
+        var metalTextureCache: CVMetalTextureCache?
+        if CVMetalTextureCacheCreate(kCFAllocatorDefault, nil, metalDevice, nil, &metalTextureCache) != kCVReturnSuccess {
+            assertionFailure("Unable to allocate video mixer texture cache")
+        } else {
+            textureCache = metalTextureCache
+        }
+        
+        isPrepared = true
+    }
+    
+    func reset() {
+        outputPixelBufferPool = nil
+        outputFormatDescription = nil
+        inputFormatDescription = nil
+        textureCache = nil
+        isPrepared = false
+    }
+    
     struct MixerParameters {
         var mixFactor: Float
     }
-
-	func mix(videoPixelBuffer: CVPixelBuffer, depthPixelBuffer: CVPixelBuffer) -> CVPixelBuffer? {
-		if !isPrepared {
-			assertionFailure("Invalid state: Not prepared")
-			return nil
-		}
-
-		var newPixelBuffer: CVPixelBuffer?
-		CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, outputPixelBufferPool!, &newPixelBuffer)
-		guard let outputPixelBuffer = newPixelBuffer else {
-			print("Allocation failure: Could not get pixel buffer from pool (\(self.description))")
-			return nil
-		}
-		guard let outputTexture = makeTextureFromCVPixelBuffer(pixelBuffer: outputPixelBuffer),
-			let inputTexture0 = makeTextureFromCVPixelBuffer(pixelBuffer: videoPixelBuffer),
-			let inputTexture1 = makeTextureFromCVPixelBuffer(pixelBuffer: depthPixelBuffer) else {
-				return nil
-		}
-
-		var parameters = MixerParameters(mixFactor: mixFactor)
-
-		let renderPassDescriptor = MTLRenderPassDescriptor()
+    
+    func mix(videoPixelBuffer: CVPixelBuffer, depthPixelBuffer: CVPixelBuffer) -> CVPixelBuffer? {
+        if !isPrepared {
+            assertionFailure("Invalid state: Not prepared")
+            return nil
+        }
+        
+        var newPixelBuffer: CVPixelBuffer?
+        CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, outputPixelBufferPool!, &newPixelBuffer)
+        guard let outputPixelBuffer = newPixelBuffer else {
+            print("Allocation failure: Could not get pixel buffer from pool (\(self.description))")
+            return nil
+        }
+        guard let outputTexture = makeTextureFromCVPixelBuffer(pixelBuffer: outputPixelBuffer),
+            let inputTexture0 = makeTextureFromCVPixelBuffer(pixelBuffer: videoPixelBuffer),
+            let inputTexture1 = makeTextureFromCVPixelBuffer(pixelBuffer: depthPixelBuffer) else {
+                return nil
+        }
+        
+        var parameters = MixerParameters(mixFactor: mixFactor)
+        
+        let renderPassDescriptor = MTLRenderPassDescriptor()
         renderPassDescriptor.colorAttachments[0].texture = outputTexture
-
-		guard let fullRangeVertexBuffer = fullRangeVertexBuffer else {
-			print("Failed to create Metal vertex buffer")
-			CVMetalTextureCacheFlush(textureCache!, 0)
-			return nil
-		}
-		
-		guard let sampler = sampler else {
-			print("Failed to create Metal sampler")
-			CVMetalTextureCacheFlush(textureCache!, 0)
-			return nil
-		}
-		
-		// Set up command queue, buffer, and encoder
-		guard let commandQueue = commandQueue,
-			let commandBuffer = commandQueue.makeCommandBuffer(),
-			let commandEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: renderPassDescriptor) else {
-				print("Failed to create Metal command queue")
-				CVMetalTextureCacheFlush(textureCache!, 0)
-				return nil
-		}
-		
-		commandEncoder.label = "Video Mixer"
-		commandEncoder.setRenderPipelineState(renderPipelineState!)
-		commandEncoder.setVertexBuffer(fullRangeVertexBuffer, offset: 0, index: 0)
-		commandEncoder.setFragmentTexture(inputTexture0, index: 0)
-		commandEncoder.setFragmentTexture(inputTexture1, index: 1)
-		commandEncoder.setFragmentSamplerState(sampler, index: 0)
-		commandEncoder.setFragmentBytes( UnsafeMutableRawPointer(&parameters), length: MemoryLayout<MixerParameters>.size, index: 0)
-		commandEncoder.drawPrimitives(type: .triangleStrip, vertexStart: 0, vertexCount: 4)
-		commandEncoder.endEncoding()
-		
-		commandBuffer.commit()
-
+        
+        guard let fullRangeVertexBuffer = fullRangeVertexBuffer else {
+            print("Failed to create Metal vertex buffer")
+            CVMetalTextureCacheFlush(textureCache!, 0)
+            return nil
+        }
+        
+        guard let sampler = sampler else {
+            print("Failed to create Metal sampler")
+            CVMetalTextureCacheFlush(textureCache!, 0)
+            return nil
+        }
+        
+        // Set up command queue, buffer, and encoder
+        guard let commandQueue = commandQueue,
+            let commandBuffer = commandQueue.makeCommandBuffer(),
+            let commandEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: renderPassDescriptor) else {
+                print("Failed to create Metal command queue")
+                CVMetalTextureCacheFlush(textureCache!, 0)
+                return nil
+        }
+        
+        commandEncoder.label = "Video Mixer"
+        commandEncoder.setRenderPipelineState(renderPipelineState!)
+        commandEncoder.setVertexBuffer(fullRangeVertexBuffer, offset: 0, index: 0)
+        commandEncoder.setFragmentTexture(inputTexture0, index: 0)
+        commandEncoder.setFragmentTexture(inputTexture1, index: 1)
+        commandEncoder.setFragmentSamplerState(sampler, index: 0)
+        commandEncoder.setFragmentBytes( UnsafeMutableRawPointer(&parameters), length: MemoryLayout<MixerParameters>.size, index: 0)
+        commandEncoder.drawPrimitives(type: .triangleStrip, vertexStart: 0, vertexCount: 4)
+        commandEncoder.endEncoding()
+        
+        commandBuffer.commit()
+        
         return outputPixelBuffer
-	}
-
-	func makeTextureFromCVPixelBuffer(pixelBuffer: CVPixelBuffer) -> MTLTexture? {
-		let width = CVPixelBufferGetWidth(pixelBuffer)
-		let height = CVPixelBufferGetHeight(pixelBuffer)
-
-		// Create a Metal texture from the image buffer
-		var cvTextureOut: CVMetalTexture?
-		CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, textureCache, pixelBuffer, nil, .bgra8Unorm, width, height, 0, &cvTextureOut)
-		guard let cvTexture = cvTextureOut, let texture = CVMetalTextureGetTexture(cvTexture) else {
-			print("Video mixer failed to create preview texture")
-
-			CVMetalTextureCacheFlush(textureCache, 0)
-			return nil
-		}
-
-		return texture
-	}
+    }
+    
+    func makeTextureFromCVPixelBuffer(pixelBuffer: CVPixelBuffer) -> MTLTexture? {
+        let width = CVPixelBufferGetWidth(pixelBuffer)
+        let height = CVPixelBufferGetHeight(pixelBuffer)
+        
+        // Create a Metal texture from the image buffer
+        var cvTextureOut: CVMetalTexture?
+        CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, textureCache, pixelBuffer, nil, .bgra8Unorm, width, height, 0, &cvTextureOut)
+        guard let cvTexture = cvTextureOut, let texture = CVMetalTextureGetTexture(cvTexture) else {
+            print("Video mixer failed to create preview texture")
+            
+            CVMetalTextureCacheFlush(textureCache, 0)
+            return nil
+        }
+        
+        return texture
+    }
 }
-- 
GitLab