- Add Vision framework integration for image analysis (OCR, classification) - Add image attachment support in chat UI with drag & drop - Add recent images sidebar from Downloads/Desktop - Add copy to clipboard button for assistant responses - Add gRPC reflection service with toggle in settings - Create proper .proto file and generate Swift code - Add server restart when toggling reflection setting - Fix port number formatting in settings (remove comma grouping) - Update gRPC dependencies to v2.x 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
164 lines
6.1 KiB
Swift
164 lines
6.1 KiB
Swift
import Foundation
|
|
import GRPCCore
|
|
import GRPCProtobuf
|
|
import GRPCNIOTransportHTTP2
|
|
|
|
/// gRPC service provider for Apple Intelligence
|
|
public struct AppleIntelligenceProvider: Appleintelligence_AppleIntelligenceService.ServiceProtocol {
|
|
/// The underlying AI service
|
|
private let service: AppleIntelligenceService
|
|
|
|
/// Optional API key for authentication
|
|
private let apiKey: String?
|
|
|
|
public init(service: AppleIntelligenceService, apiKey: String? = nil) {
|
|
self.service = service
|
|
self.apiKey = apiKey
|
|
}
|
|
|
|
// MARK: - ServiceProtocol Implementation
|
|
|
|
public func complete(
|
|
request: GRPCCore.ServerRequest<Appleintelligence_CompletionRequest>,
|
|
context: GRPCCore.ServerContext
|
|
) async throws -> GRPCCore.ServerResponse<Appleintelligence_CompletionResponse> {
|
|
try validateApiKey(metadata: request.metadata)
|
|
|
|
let message = request.message
|
|
|
|
// Convert protobuf images to service format
|
|
let images = message.images.map { img in
|
|
(data: img.data, filename: img.filename.isEmpty ? nil : img.filename)
|
|
}
|
|
|
|
let (text, analyses) = try await service.complete(
|
|
prompt: message.prompt,
|
|
temperature: message.hasTemperature ? message.temperature : nil,
|
|
maxTokens: message.hasMaxTokens ? Int(message.maxTokens) : nil,
|
|
images: images
|
|
)
|
|
|
|
var response = Appleintelligence_CompletionResponse()
|
|
response.id = UUID().uuidString
|
|
response.text = text
|
|
response.finishReason = "stop"
|
|
|
|
// Include analysis results if requested
|
|
if message.includeAnalysis {
|
|
response.imageAnalyses = analyses.map { analysis in
|
|
var protoAnalysis = Appleintelligence_ImageAnalysis()
|
|
protoAnalysis.textContent = analysis.textContent
|
|
protoAnalysis.labels = analysis.labels
|
|
protoAnalysis.description_p = analysis.description
|
|
return protoAnalysis
|
|
}
|
|
}
|
|
|
|
return ServerResponse(message: response)
|
|
}
|
|
|
|
public func streamComplete(
|
|
request: GRPCCore.ServerRequest<Appleintelligence_CompletionRequest>,
|
|
context: GRPCCore.ServerContext
|
|
) async throws -> GRPCCore.StreamingServerResponse<Appleintelligence_CompletionChunk> {
|
|
try validateApiKey(metadata: request.metadata)
|
|
|
|
let message = request.message
|
|
let completionId = UUID().uuidString
|
|
|
|
// Convert protobuf images to service format
|
|
let images = message.images.map { img in
|
|
(data: img.data, filename: img.filename.isEmpty ? nil : img.filename)
|
|
}
|
|
|
|
return StreamingServerResponse { writer in
|
|
let stream = await self.service.streamComplete(
|
|
prompt: message.prompt,
|
|
temperature: message.hasTemperature ? message.temperature : nil,
|
|
maxTokens: message.hasMaxTokens ? Int(message.maxTokens) : nil,
|
|
images: images
|
|
)
|
|
|
|
var lastContent = ""
|
|
var isFirstChunk = true
|
|
for try await (partialResponse, analyses) in stream {
|
|
// Calculate the delta (new text since last response)
|
|
let delta: String
|
|
if partialResponse.hasPrefix(lastContent) {
|
|
delta = String(partialResponse.dropFirst(lastContent.count))
|
|
} else {
|
|
delta = partialResponse
|
|
}
|
|
lastContent = partialResponse
|
|
|
|
if !delta.isEmpty || isFirstChunk {
|
|
var chunk = Appleintelligence_CompletionChunk()
|
|
chunk.id = completionId
|
|
chunk.delta = delta
|
|
chunk.isFinal = false
|
|
|
|
// Include analyses in first chunk if requested
|
|
if isFirstChunk && message.includeAnalysis, let analyses = analyses {
|
|
chunk.imageAnalyses = analyses.map { analysis in
|
|
var protoAnalysis = Appleintelligence_ImageAnalysis()
|
|
protoAnalysis.textContent = analysis.textContent
|
|
protoAnalysis.labels = analysis.labels
|
|
protoAnalysis.description_p = analysis.description
|
|
return protoAnalysis
|
|
}
|
|
}
|
|
|
|
try await writer.write(chunk)
|
|
isFirstChunk = false
|
|
}
|
|
}
|
|
|
|
// Send final chunk
|
|
var finalChunk = Appleintelligence_CompletionChunk()
|
|
finalChunk.id = completionId
|
|
finalChunk.delta = ""
|
|
finalChunk.isFinal = true
|
|
finalChunk.finishReason = "stop"
|
|
try await writer.write(finalChunk)
|
|
|
|
return [:]
|
|
}
|
|
}
|
|
|
|
public func health(
|
|
request: GRPCCore.ServerRequest<Appleintelligence_HealthRequest>,
|
|
context: GRPCCore.ServerContext
|
|
) async throws -> GRPCCore.ServerResponse<Appleintelligence_HealthResponse> {
|
|
let isHealthy = await service.isAvailable
|
|
let modelStatus = await service.getModelStatus()
|
|
|
|
var response = Appleintelligence_HealthResponse()
|
|
response.healthy = isHealthy
|
|
response.modelStatus = modelStatus
|
|
|
|
return ServerResponse(message: response)
|
|
}
|
|
|
|
// MARK: - Private Helpers
|
|
|
|
/// Validate API key if configured
|
|
private func validateApiKey(metadata: Metadata) throws {
|
|
guard let expectedKey = apiKey else {
|
|
return // No API key required
|
|
}
|
|
|
|
// Look for Authorization header in metadata
|
|
let authValues = metadata["authorization"]
|
|
guard let authHeader = authValues.first(where: { _ in true }),
|
|
case .string(let authString) = authHeader,
|
|
authString.hasPrefix("Bearer ") else {
|
|
throw RPCError(code: .unauthenticated, message: "Missing or invalid Authorization header")
|
|
}
|
|
|
|
let providedKey = String(authString.dropFirst("Bearer ".count))
|
|
guard providedKey == expectedKey else {
|
|
throw RPCError(code: .unauthenticated, message: "Invalid API key")
|
|
}
|
|
}
|
|
}
|