- Add Vision framework integration for image analysis (OCR, classification) - Add image attachment support in chat UI with drag & drop - Add recent images sidebar from Downloads/Desktop - Add copy to clipboard button for assistant responses - Add gRPC reflection service with toggle in settings - Create proper .proto file and generate Swift code - Add server restart when toggling reflection setting - Fix port number formatting in settings (remove comma grouping) - Update gRPC dependencies to v2.x 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
148 lines
5.5 KiB
Swift
148 lines
5.5 KiB
Swift
import Foundation
|
|
import FoundationModels
|
|
|
|
/// Errors that can occur when using Apple Intelligence
|
|
public enum AppleIntelligenceError: Error, CustomStringConvertible, Sendable {
|
|
case modelNotAvailable
|
|
case generationFailed(String)
|
|
case sessionCreationFailed
|
|
case imageAnalysisFailed(String)
|
|
|
|
public var description: String {
|
|
switch self {
|
|
case .modelNotAvailable:
|
|
return "Apple Intelligence model is not available on this device"
|
|
case .generationFailed(let reason):
|
|
return "Generation failed: \(reason)"
|
|
case .sessionCreationFailed:
|
|
return "Failed to create language model session"
|
|
case .imageAnalysisFailed(let reason):
|
|
return "Image analysis failed: \(reason)"
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Service wrapper for Apple Intelligence Foundation Models
|
|
public actor AppleIntelligenceService {
|
|
/// The language model session
|
|
private var session: LanguageModelSession?
|
|
|
|
/// Vision analysis service for image processing
|
|
private let visionService = VisionAnalysisService()
|
|
|
|
/// Whether the model is available
|
|
public private(set) var isAvailable: Bool = false
|
|
|
|
/// Initialize and check model availability
|
|
public init() async {
|
|
await checkAvailability()
|
|
}
|
|
|
|
/// Check if Apple Intelligence is available
|
|
private func checkAvailability() async {
|
|
let availability = SystemLanguageModel.default.availability
|
|
switch availability {
|
|
case .available:
|
|
isAvailable = true
|
|
session = LanguageModelSession()
|
|
case .unavailable:
|
|
isAvailable = false
|
|
@unknown default:
|
|
isAvailable = false
|
|
}
|
|
}
|
|
|
|
/// Get the current model status as a string
|
|
public func getModelStatus() -> String {
|
|
let availability = SystemLanguageModel.default.availability
|
|
switch availability {
|
|
case .available:
|
|
return "available"
|
|
case .unavailable(let reason):
|
|
return "unavailable: \(reason)"
|
|
@unknown default:
|
|
return "unknown"
|
|
}
|
|
}
|
|
|
|
/// Generate a completion for the given prompt (non-streaming)
|
|
public func complete(
|
|
prompt: String,
|
|
temperature: Float?,
|
|
maxTokens: Int?,
|
|
images: [(data: Data, filename: String?)] = []
|
|
) async throws -> (text: String, analyses: [VisionAnalysisResult]) {
|
|
guard isAvailable, let session = session else {
|
|
throw AppleIntelligenceError.modelNotAvailable
|
|
}
|
|
|
|
// Analyze images if provided
|
|
var analyses: [VisionAnalysisResult] = []
|
|
var enhancedPrompt = prompt
|
|
|
|
if !images.isEmpty {
|
|
do {
|
|
analyses = try await visionService.analyzeMultiple(images: images)
|
|
let analysesWithFilenames = zip(analyses, images).map { (result: $0.0, filename: $0.1.filename) }
|
|
let context = await visionService.formatAnalysesAsPromptContext(analyses: analysesWithFilenames)
|
|
enhancedPrompt = context + "\n\n" + prompt
|
|
} catch {
|
|
throw AppleIntelligenceError.imageAnalysisFailed(error.localizedDescription)
|
|
}
|
|
}
|
|
|
|
let response = try await session.respond(to: enhancedPrompt)
|
|
return (text: response.content, analyses: analyses)
|
|
}
|
|
|
|
/// Generate a streaming completion for the given prompt
|
|
public func streamComplete(
|
|
prompt: String,
|
|
temperature: Float?,
|
|
maxTokens: Int?,
|
|
images: [(data: Data, filename: String?)] = []
|
|
) -> AsyncThrowingStream<(text: String, analyses: [VisionAnalysisResult]?), Error> {
|
|
AsyncThrowingStream { continuation in
|
|
Task {
|
|
guard self.isAvailable, let session = self.session else {
|
|
continuation.finish(throwing: AppleIntelligenceError.modelNotAvailable)
|
|
return
|
|
}
|
|
|
|
// Analyze images first if provided
|
|
var analyses: [VisionAnalysisResult] = []
|
|
var enhancedPrompt = prompt
|
|
|
|
if !images.isEmpty {
|
|
do {
|
|
analyses = try await self.visionService.analyzeMultiple(images: images)
|
|
let analysesWithFilenames = zip(analyses, images).map { (result: $0.0, filename: $0.1.filename) }
|
|
let context = await self.visionService.formatAnalysesAsPromptContext(analyses: analysesWithFilenames)
|
|
enhancedPrompt = context + "\n\n" + prompt
|
|
} catch {
|
|
continuation.finish(throwing: AppleIntelligenceError.imageAnalysisFailed(error.localizedDescription))
|
|
return
|
|
}
|
|
}
|
|
|
|
do {
|
|
let stream = session.streamResponse(to: enhancedPrompt)
|
|
var isFirst = true
|
|
for try await partialResponse in stream {
|
|
// Include analyses only in first chunk
|
|
if isFirst {
|
|
continuation.yield((text: partialResponse.content, analyses: analyses))
|
|
isFirst = false
|
|
} else {
|
|
continuation.yield((text: partialResponse.content, analyses: nil))
|
|
}
|
|
}
|
|
continuation.finish()
|
|
} catch {
|
|
continuation.finish(throwing: AppleIntelligenceError.generationFailed(error.localizedDescription))
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|