import Foundation import FoundationModels /// Errors that can occur when using Apple Intelligence enum AppleIntelligenceError: Error, CustomStringConvertible { case modelNotAvailable case generationFailed(String) case sessionCreationFailed var description: String { switch self { case .modelNotAvailable: return "Apple Intelligence model is not available on this device" case .generationFailed(let reason): return "Generation failed: \(reason)" case .sessionCreationFailed: return "Failed to create language model session" } } } /// Service wrapper for Apple Intelligence Foundation Models actor AppleIntelligenceService { /// The language model session private var session: LanguageModelSession? /// Whether the model is available private(set) var isAvailable: Bool = false /// Initialize and check model availability init() async { await checkAvailability() } /// Check if Apple Intelligence is available private func checkAvailability() async { let availability = SystemLanguageModel.default.availability switch availability { case .available: isAvailable = true session = LanguageModelSession() case .unavailable: isAvailable = false @unknown default: isAvailable = false } } /// Get the current model status as a string func getModelStatus() -> String { let availability = SystemLanguageModel.default.availability switch availability { case .available: return "available" case .unavailable(let reason): return "unavailable: \(reason)" @unknown default: return "unknown" } } /// Generate a completion for the given prompt (non-streaming) func complete(prompt: String, temperature: Float?, maxTokens: Int?) async throws -> String { guard isAvailable, let session = session else { throw AppleIntelligenceError.modelNotAvailable } let response = try await session.respond(to: prompt) return response.content } /// Generate a streaming completion for the given prompt func streamComplete( prompt: String, temperature: Float?, maxTokens: Int? ) -> AsyncThrowingStream { AsyncThrowingStream { continuation in Task { guard self.isAvailable, let session = self.session else { continuation.finish(throwing: AppleIntelligenceError.modelNotAvailable) return } do { let stream = session.streamResponse(to: prompt) for try await partialResponse in stream { continuation.yield(partialResponse.content) } continuation.finish() } catch { continuation.finish(throwing: AppleIntelligenceError.generationFailed(error.localizedDescription)) } } } } }