let model = try? VNCoreMLModel(for: CarClassifier().model) else { fatalError("can't load Places ML model") } // Create a Vision request with completion handler let request = VNCoreMLRequest(model: model) { [weak self] request, error in guard let results = request.results as? [VNClassificationObservation] else { return } let topResults = results[0..<5] // Update UI on main queue DispatchQueue.main.async { [weak self] in self?.answerLabel.text = topResults .map { "\(Int($0.confidence * 100))% \($0.identifier)" } .joined(separator: "\n") } } // Run the Core ML classifier on global dispatch queue let handler = VNImageRequestHandler(ciImage: image) DispatchQueue.global(qos: .userInteractive).async { do { try handler.perform([request]) } catch { print(error) } }