Files
oai-swift/oAI/Models/MockData.swift
2026-02-11 22:22:55 +01:00

241 lines
6.7 KiB
Swift

//
// MockData.swift
// oAI
//
// Mock data for Phase 1 testing
//
import Foundation
extension Message {
static let mockUser1 = Message(
role: .user,
content: "What is SwiftUI and how does it compare to UIKit?",
tokens: 12,
cost: 0.00002
)
static let mockAssistant1 = Message(
role: .assistant,
content: """
SwiftUI is Apple's modern declarative framework for building user interfaces across all Apple platforms. Here's how it compares to UIKit:
## Key Differences
**SwiftUI (Declarative)**
- Describe WHAT the UI should look like
- State-driven updates
- Less boilerplate code
- Cross-platform by default
**UIKit (Imperative)**
- Describe HOW to build the UI step-by-step
- Manual state management
- More verbose
- iOS/tvOS focused
## Example
```swift
// SwiftUI
struct ContentView: View {
@State private var count = 0
var body: some View {
VStack {
Text("Count: \\(count)")
Button("Increment") {
count += 1
}
}
}
}
```
```swift
// UIKit equivalent
class ViewController: UIViewController {
var count = 0
let label = UILabel()
let button = UIButton()
override func viewDidLoad() {
super.viewDidLoad()
setupUI()
}
@objc func incrementTapped() {
count += 1
label.text = "Count: \\(count)"
}
func setupUI() {
// ... lots more setup code
}
}
```
SwiftUI is recommended for new projects, especially if targeting iOS 14+ and multiple platforms.
""",
tokens: 245,
cost: 0.00049
)
static let mockUser2 = Message(
role: .user,
content: "Can you explain async/await in Swift?",
tokens: 8,
cost: 0.00001
)
static let mockAssistant2 = Message(
role: .assistant,
content: """
Async/await is Swift's modern concurrency model introduced in Swift 5.5. It makes asynchronous code look and behave like synchronous code.
## Basic Example
```swift
// Old way with completion handlers
func fetchData(completion: @escaping (Result<Data, Error>) -> Void) {
URLSession.shared.dataTask(with: url) { data, response, error in
if let error = error {
completion(.failure(error))
} else if let data = data {
completion(.success(data))
}
}.resume()
}
// New way with async/await
func fetchData() async throws -> Data {
let (data, _) = try await URLSession.shared.data(from: url)
return data
}
```
## Key Benefits
1. **Readability**: No callback pyramids
2. **Error handling**: Standard try/catch
3. **Sequential logic**: Reads top-to-bottom
4. **Cancellation**: Built-in with Task
## Usage
```swift
Task {
do {
let data = try await fetchData()
await MainActor.run {
// Update UI
}
} catch {
print("Error: \\(error)")
}
}
```
Much cleaner than completion handlers!
""",
tokens: 189,
cost: 0.00038
)
static let mockSystem = Message(
role: .system,
content: "Conversation cleared. Starting fresh.",
tokens: nil,
cost: nil
)
static let mockMessages = [mockUser1, mockAssistant1, mockUser2, mockAssistant2]
}
extension ModelInfo {
static let mockModels = [
ModelInfo(
id: "anthropic/claude-sonnet-4",
name: "Claude Sonnet 4",
description: "Balanced intelligence and speed for most tasks",
contextLength: 200_000,
pricing: Pricing(prompt: 3.0, completion: 15.0),
capabilities: ModelCapabilities(vision: true, tools: true, online: true)
),
ModelInfo(
id: "anthropic/claude-opus-4",
name: "Claude Opus 4",
description: "Most capable model for complex tasks",
contextLength: 200_000,
pricing: Pricing(prompt: 15.0, completion: 75.0),
capabilities: ModelCapabilities(vision: true, tools: true, online: true)
),
ModelInfo(
id: "anthropic/claude-haiku-4",
name: "Claude Haiku 4",
description: "Fast and efficient for simple tasks",
contextLength: 200_000,
pricing: Pricing(prompt: 0.8, completion: 4.0),
capabilities: ModelCapabilities(vision: true, tools: true, online: true)
),
ModelInfo(
id: "openai/gpt-4o",
name: "GPT-4o",
description: "OpenAI's flagship multimodal model",
contextLength: 128_000,
pricing: Pricing(prompt: 2.5, completion: 10.0),
capabilities: ModelCapabilities(vision: true, tools: true, online: false)
),
ModelInfo(
id: "openai/gpt-4o-mini",
name: "GPT-4o Mini",
description: "Faster and cheaper GPT-4o variant",
contextLength: 128_000,
pricing: Pricing(prompt: 0.15, completion: 0.6),
capabilities: ModelCapabilities(vision: true, tools: true, online: false)
),
ModelInfo(
id: "openai/o1",
name: "o1",
description: "Advanced reasoning model for complex problems",
contextLength: 200_000,
pricing: Pricing(prompt: 15.0, completion: 60.0),
capabilities: ModelCapabilities(vision: false, tools: false, online: false)
),
ModelInfo(
id: "google/gemini-pro-1.5",
name: "Gemini Pro 1.5",
description: "Google's advanced multimodal model",
contextLength: 2_000_000,
pricing: Pricing(prompt: 1.25, completion: 5.0),
capabilities: ModelCapabilities(vision: true, tools: true, online: false)
),
ModelInfo(
id: "meta-llama/llama-3.1-405b",
name: "Llama 3.1 405B",
description: "Meta's largest open source model",
contextLength: 128_000,
pricing: Pricing(prompt: 2.7, completion: 2.7),
capabilities: ModelCapabilities(vision: false, tools: true, online: false)
)
]
}
extension Conversation {
static let mockConversation1 = Conversation(
name: "SwiftUI Discussion",
messages: [Message.mockUser1, Message.mockAssistant1],
createdAt: Date().addingTimeInterval(-86400), // 1 day ago
updatedAt: Date().addingTimeInterval(-3600) // 1 hour ago
)
static let mockConversation2 = Conversation(
name: "Async/Await Tutorial",
messages: [Message.mockUser2, Message.mockAssistant2],
createdAt: Date().addingTimeInterval(-172800), // 2 days ago
updatedAt: Date().addingTimeInterval(-7200) // 2 hours ago
)
static let mockConversations = [mockConversation1, mockConversation2]
}