- Restructure project into three targets: - AppleIntelligenceCore: Shared gRPC service code - AppleIntelligenceServer: CLI server - AppleIntelligenceApp: Menu bar app - Menu bar app features: - Toggle server on/off from menu bar - Chat window with streaming AI responses - Settings: host, port, API key, auto-start, launch at login - Proper window focus handling for menu bar apps - Add build scripts for distribution: - build-app.sh: Creates signed .app bundle - create-dmg.sh: Creates distributable DMG 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
97 lines
2.8 KiB
Swift
97 lines
2.8 KiB
Swift
import Foundation
|
|
import AppleIntelligenceCore
|
|
|
|
@MainActor
|
|
@Observable
|
|
final class ChatViewModel {
|
|
var messages: [ChatMessage] = []
|
|
var inputText: String = ""
|
|
var isLoading: Bool = false
|
|
var errorMessage: String?
|
|
|
|
private var service: AppleIntelligenceService?
|
|
private var currentTask: Task<Void, Never>?
|
|
|
|
func initialize() async {
|
|
service = await AppleIntelligenceService()
|
|
}
|
|
|
|
var isServiceAvailable: Bool {
|
|
get async {
|
|
await service?.isAvailable ?? false
|
|
}
|
|
}
|
|
|
|
func sendMessage() {
|
|
let text = inputText.trimmingCharacters(in: .whitespacesAndNewlines)
|
|
guard !text.isEmpty else { return }
|
|
guard !isLoading else { return }
|
|
|
|
// Add user message
|
|
let userMessage = ChatMessage(role: .user, content: text)
|
|
messages.append(userMessage)
|
|
inputText = ""
|
|
errorMessage = nil
|
|
|
|
// Add placeholder for assistant response
|
|
var assistantMessage = ChatMessage(role: .assistant, content: "", isStreaming: true)
|
|
messages.append(assistantMessage)
|
|
|
|
isLoading = true
|
|
|
|
currentTask = Task {
|
|
do {
|
|
guard let service = service else {
|
|
throw AppleIntelligenceError.modelNotAvailable
|
|
}
|
|
|
|
let stream = await service.streamComplete(
|
|
prompt: text,
|
|
temperature: nil,
|
|
maxTokens: nil
|
|
)
|
|
|
|
var fullResponse = ""
|
|
for try await partialResponse in stream {
|
|
fullResponse = partialResponse
|
|
// Update the last message (assistant's response)
|
|
if let index = messages.lastIndex(where: { $0.role == .assistant }) {
|
|
messages[index].content = fullResponse
|
|
}
|
|
}
|
|
|
|
// Mark streaming as complete
|
|
if let index = messages.lastIndex(where: { $0.role == .assistant }) {
|
|
messages[index].isStreaming = false
|
|
}
|
|
|
|
} catch {
|
|
errorMessage = error.localizedDescription
|
|
// Remove the empty assistant message on error
|
|
if let index = messages.lastIndex(where: { $0.role == .assistant && $0.content.isEmpty }) {
|
|
messages.remove(at: index)
|
|
}
|
|
}
|
|
|
|
isLoading = false
|
|
}
|
|
}
|
|
|
|
func stopGeneration() {
|
|
currentTask?.cancel()
|
|
currentTask = nil
|
|
isLoading = false
|
|
|
|
// Mark any streaming message as complete
|
|
if let index = messages.lastIndex(where: { $0.isStreaming }) {
|
|
messages[index].isStreaming = false
|
|
}
|
|
}
|
|
|
|
func clearChat() {
|
|
stopGeneration()
|
|
messages.removeAll()
|
|
errorMessage = nil
|
|
}
|
|
}
|