Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### Fixed

- AI chat freeze when large queries or results are included in the system prompt (#774)
- AI chat panel not updating when switching database connections
- Schema restored on reconnect for PostgreSQL, Redshift, and BigQuery (#777)
- Database restored after auto-reconnect (was lost when connection dropped)
- Redis database selection persisted across sessions
Expand Down
20 changes: 17 additions & 3 deletions TablePro/Core/AI/AIPromptTemplates.swift
Original file line number Diff line number Diff line change
Expand Up @@ -12,19 +12,33 @@ enum AIPromptTemplates {
/// Build a prompt asking AI to explain a query
@MainActor static func explainQuery(_ query: String, databaseType: DatabaseType = .mysql) -> String {
let (typeName, lang) = queryInfo(for: databaseType)
return "Explain this \(typeName):\n\n```\(lang)\n\(query)\n```"
return explainQuery(query, typeName: typeName, language: lang)
}

/// Build a prompt asking AI to optimize a query
@MainActor static func optimizeQuery(_ query: String, databaseType: DatabaseType = .mysql) -> String {
let (typeName, lang) = queryInfo(for: databaseType)
return "Optimize this \(typeName) for better performance:\n\n```\(lang)\n\(query)\n```"
return optimizeQuery(query, typeName: typeName, language: lang)
}

/// Build a prompt asking AI to fix a query that produced an error
@MainActor static func fixError(query: String, error: String, databaseType: DatabaseType = .mysql) -> String {
let (typeName, lang) = queryInfo(for: databaseType)
return "This \(typeName) failed with an error. Please fix it.\n\nQuery:\n```\(lang)\n\(query)\n```\n\nError: \(error)"
return fixError(query: query, error: error, typeName: typeName, language: lang)
}

// MARK: - Non-isolated overloads

static func explainQuery(_ query: String, typeName: String, language: String) -> String {
"Explain this \(typeName):\n\n```\(language)\n\(query)\n```"
}

static func optimizeQuery(_ query: String, typeName: String, language: String) -> String {
"Optimize this \(typeName) for better performance:\n\n```\(language)\n\(query)\n```"
}

static func fixError(query: String, error: String, typeName: String, language: String) -> String {
"This \(typeName) failed with an error. Please fix it.\n\nQuery:\n```\(language)\n\(query)\n```\n\nError: \(error)"
}

@MainActor private static func queryInfo(for databaseType: DatabaseType) -> (typeName: String, language: String) {
Expand Down
9 changes: 6 additions & 3 deletions TablePro/Core/AI/AIProviderFactory.swift
Original file line number Diff line number Diff line change
Expand Up @@ -36,18 +36,21 @@ enum AIProviderFactory {
case .claude:
provider = AnthropicProvider(
endpoint: config.endpoint,
apiKey: apiKey ?? ""
apiKey: apiKey ?? "",
maxOutputTokens: config.maxOutputTokens ?? 4_096
)
case .gemini:
provider = GeminiProvider(
endpoint: config.endpoint,
apiKey: apiKey ?? ""
apiKey: apiKey ?? "",
maxOutputTokens: config.maxOutputTokens ?? 8_192
)
case .openAI, .openRouter, .ollama, .custom:
provider = OpenAICompatibleProvider(
endpoint: config.endpoint,
apiKey: apiKey,
providerType: config.type
providerType: config.type,
maxOutputTokens: config.maxOutputTokens
)
}
cache[config.id] = (apiKey, provider)
Expand Down
6 changes: 5 additions & 1 deletion TablePro/Core/AI/AISchemaContext.swift
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,11 @@ struct AISchemaContext {
let query = currentQuery,
!query.isEmpty {
let lang = editorLanguage.codeBlockTag
parts.append("\n## Current Query\n```\(lang)\n\(query)\n```")
let maxQueryLength = 2_000
let truncated = query.count > maxQueryLength
? String(query.prefix(maxQueryLength)) + "\n-- ... truncated"
: query
parts.append("\n## Current Query\n```\(lang)\n\(truncated)\n```")
}

if settings.includeQueryResults,
Expand Down
7 changes: 5 additions & 2 deletions TablePro/Core/AI/AnthropicProvider.swift
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,13 @@ final class AnthropicProvider: AIProvider {

private let endpoint: String
private let apiKey: String
private let maxOutputTokens: Int
private let session: URLSession

init(endpoint: String, apiKey: String) {
init(endpoint: String, apiKey: String, maxOutputTokens: Int = 4_096) {
self.endpoint = endpoint.hasSuffix("/") ? String(endpoint.dropLast()) : endpoint
self.apiKey = apiKey.trimmingCharacters(in: .whitespacesAndNewlines)
self.maxOutputTokens = maxOutputTokens
self.session = URLSession(configuration: .ephemeral)
}

Expand Down Expand Up @@ -182,9 +184,10 @@ final class AnthropicProvider: AIProvider {
messages: [AIChatMessage],
model: String,
systemPrompt: String?,
maxTokens: Int = 4_096,
maxTokens: Int? = nil,
stream: Bool = true
) throws -> URLRequest {
let maxTokens = maxTokens ?? maxOutputTokens
guard let url = URL(string: "\(endpoint)/v1/messages") else {
throw AIProviderError.invalidEndpoint(endpoint)
}
Expand Down
37 changes: 26 additions & 11 deletions TablePro/Core/AI/GeminiProvider.swift
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,13 @@ final class GeminiProvider: AIProvider {

private let endpoint: String
private let apiKey: String
private let maxOutputTokens: Int
private let session: URLSession

init(endpoint: String, apiKey: String) {
init(endpoint: String, apiKey: String, maxOutputTokens: Int = 8_192) {
self.endpoint = endpoint.hasSuffix("/") ? String(endpoint.dropLast()) : endpoint
self.apiKey = apiKey.trimmingCharacters(in: .whitespacesAndNewlines)
self.maxOutputTokens = maxOutputTokens
self.session = URLSession(configuration: .ephemeral)
}

Expand Down Expand Up @@ -106,6 +108,14 @@ final class GeminiProvider: AIProvider {
}
}

private static let knownModels = [
"gemini-2.5-flash",
"gemini-2.5-pro",
"gemini-2.0-flash",
"gemini-1.5-flash",
"gemini-1.5-pro"
]

func fetchAvailableModels() async throws -> [String] {
guard let url = URL(string: "\(endpoint)/v1beta/models") else {
throw AIProviderError.invalidEndpoint(endpoint)
Expand All @@ -115,36 +125,41 @@ final class GeminiProvider: AIProvider {
request.httpMethod = "GET"
request.setValue(apiKey, forHTTPHeaderField: "x-goog-api-key")

let (data, response) = try await session.data(for: request)
let data: Data
let response: URLResponse
do {
(data, response) = try await session.data(for: request)
} catch {
return Self.knownModels
}

guard let httpResponse = response as? HTTPURLResponse else {
throw AIProviderError.networkError("Invalid response")
return Self.knownModels
}

guard httpResponse.statusCode == 200 else {
throw mapHTTPError(
statusCode: httpResponse.statusCode,
body: String(data: data, encoding: .utf8) ?? ""
)
return Self.knownModels
}

guard let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any],
let models = json["models"] as? [[String: Any]]
else {
return []
return Self.knownModels
}

return models.compactMap { model -> String? in
let fetched = models.compactMap { model -> String? in
guard let name = model["name"] as? String,
let methods = model["supportedGenerationMethods"] as? [String],
methods.contains("generateContent")
else { return nil }
// Strip "models/" prefix: "models/gemini-2.0-flash" "gemini-2.0-flash"
// Strip "models/" prefix: "models/gemini-2.0-flash" -> "gemini-2.0-flash"
if name.hasPrefix("models/") {
return String(name.dropFirst(7))
}
return name
}

return fetched.isEmpty ? Self.knownModels : fetched
}

func testConnection() async throws -> Bool {
Expand Down Expand Up @@ -196,7 +211,7 @@ final class GeminiProvider: AIProvider {
request.setValue(apiKey, forHTTPHeaderField: "x-goog-api-key")

var body: [String: Any] = [
"generationConfig": ["maxOutputTokens": 8_192]
"generationConfig": ["maxOutputTokens": maxOutputTokens]
]

if let systemPrompt, !systemPrompt.isEmpty {
Expand Down
38 changes: 27 additions & 11 deletions TablePro/Core/AI/InlineSuggestionManager.swift
Original file line number Diff line number Diff line change
Expand Up @@ -234,22 +234,38 @@ final class InlineSuggestionManager {
systemPrompt: systemPrompt
)

let flushInterval: ContinuousClock.Duration = .milliseconds(50)
var lastFlushTime: ContinuousClock.Instant = .now

for try await event in stream {
guard !Task.isCancelled else { break }
switch event {
case .text(let token):
if case .text(let token) = event {
accumulated += token
// Progressive update: show partial ghost text as tokens arrive
await MainActor.run { [weak self, accumulated] in
guard let self else { return }
let cleaned = self.cleanSuggestion(accumulated)
if !cleaned.isEmpty {
self.currentSuggestion = cleaned
self.showGhostText(cleaned, at: self.suggestionOffset)
if ContinuousClock.now - lastFlushTime >= flushInterval {
let snapshot = accumulated
await MainActor.run { [weak self] in
guard let self else { return }
let cleaned = self.cleanSuggestion(snapshot)
if !cleaned.isEmpty {
self.currentSuggestion = cleaned
self.showGhostText(cleaned, at: self.suggestionOffset)
}
}
lastFlushTime = .now
}
}
}

// Final flush
if !Task.isCancelled, !accumulated.isEmpty {
let snapshot = accumulated
await MainActor.run { [weak self] in
guard let self else { return }
let cleaned = self.cleanSuggestion(snapshot)
if !cleaned.isEmpty {
self.currentSuggestion = cleaned
self.showGhostText(cleaned, at: self.suggestionOffset)
}
case .usage:
break
}
}

Expand Down
8 changes: 7 additions & 1 deletion TablePro/Core/AI/OpenAICompatibleProvider.swift
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,14 @@ final class OpenAICompatibleProvider: AIProvider {
private let endpoint: String
private let apiKey: String?
private let providerType: AIProviderType
private let maxOutputTokens: Int?
private let session: URLSession

init(endpoint: String, apiKey: String?, providerType: AIProviderType) {
init(endpoint: String, apiKey: String?, providerType: AIProviderType, maxOutputTokens: Int? = nil) {
self.endpoint = endpoint.hasSuffix("/") ? String(endpoint.dropLast()) : endpoint
self.apiKey = apiKey?.trimmingCharacters(in: .whitespacesAndNewlines)
self.providerType = providerType
self.maxOutputTokens = maxOutputTokens
self.session = URLSession(configuration: .ephemeral)
}

Expand Down Expand Up @@ -254,6 +256,10 @@ final class OpenAICompatibleProvider: AIProvider {
"stream": true
]

if let maxOutputTokens {
body["max_tokens"] = maxOutputTokens
}

// Request usage stats in stream (OpenAI/OpenRouter support this)
if providerType != .ollama {
body["stream_options"] = ["include_usage": true]
Expand Down
15 changes: 14 additions & 1 deletion TablePro/Core/Storage/AIChatStorage.swift
Original file line number Diff line number Diff line change
Expand Up @@ -59,12 +59,25 @@ actor AIChatStorage {

// MARK: - Public Methods

/// Maximum encoded size for a single conversation file (500 KB)
private static let maxFileSize = 500_000

/// Maximum number of messages to keep after trimming
private static let trimmedMessageCount = 50

/// Save a conversation to disk
func save(_ conversation: AIConversation) {
let fileURL = directory.appendingPathComponent("\(conversation.id.uuidString).json")

do {
let data = try Self.encoder.encode(conversation)
var data = try Self.encoder.encode(conversation)

if data.count > Self.maxFileSize {
var trimmed = conversation
trimmed.messages = Array(trimmed.messages.suffix(Self.trimmedMessageCount))
data = try Self.encoder.encode(trimmed)
}

try data.write(to: fileURL, options: [.atomic, .completeFileProtectionUntilFirstUserAuthentication])
} catch {
Self.logger.error("Failed to save conversation \(conversation.id): \(error.localizedDescription)")
Expand Down
5 changes: 4 additions & 1 deletion TablePro/Models/AI/AIModels.swift
Original file line number Diff line number Diff line change
Expand Up @@ -60,21 +60,24 @@ struct AIProviderConfig: Codable, Equatable, Identifiable {
var model: String
var endpoint: String
var isEnabled: Bool
var maxOutputTokens: Int?

init(
id: UUID = UUID(),
name: String = "",
type: AIProviderType = .claude,
model: String = "",
endpoint: String = "",
isEnabled: Bool = true
isEnabled: Bool = true,
maxOutputTokens: Int? = nil
) {
self.id = id
self.name = name
self.type = type
self.model = model
self.endpoint = endpoint.isEmpty ? type.defaultEndpoint : endpoint
self.isEnabled = isEnabled
self.maxOutputTokens = maxOutputTokens
}
}

Expand Down
Loading
Loading