// CodeIntelTools.swift // CxSwiftAgent — Code Intelligence MCP Tools // // 10 tools: code_complete, code_explain, code_review, code_refactor, // code_fix, code_document, code_test, code_security, // code_symbols, code_diff_explain // // Uses URLSession for LLM API calls to config.llmEndpoint. import Foundation enum CodeIntelTools { static func register(on server: MCPServer, config: AgentConfig, memory: AgentMemory) { // ── code_complete ───────────────────────────────────────────── server.registerTool( "code_complete", description: "Generate code completion given context and a prompt.", inputSchema: [ "type": "object", "required": ["prompt"], "properties": [ "prompt": ["type": "string", "description": "What to generate."], "context": ["type": "string", "description": "Surrounding code context."], "language": ["type": "string", "description": "Programming language."], ] as [String: Any], ] as [String: Any], annotations: ToolAnnotations(readOnlyHint: true) ) { args in let prompt = args["prompt"] as? String ?? "" let context = args["context"] as? String ?? "" let lang = args["language"] as? String ?? "swift" let sysPrompt = "You are an expert \(lang) programmer. Generate clean, idiomatic code." let userPrompt = context.isEmpty ? prompt : "Context:\n```\n\(context)\n```\n\n\(prompt)" return await callLLM(system: sysPrompt, user: userPrompt, config: config) } // ── code_explain ────────────────────────────────────────────── server.registerTool( "code_explain", description: "Explain what a piece of code does.", inputSchema: [ "type": "object", "required": ["code"], "properties": [ "code": ["type": "string", "description": "Code to explain."], "detail": ["type": "string", "description": "Detail level: brief, normal, detailed (default: normal)."], "history": ["type": "string", "description": "JSON-encoded conversation history for multi-turn context."], ] as [String: Any], ] as [String: Any], annotations: ToolAnnotations(readOnlyHint: true) ) { args in let code = args["code"] as? String ?? "" let detail = args["detail"] as? String ?? "normal" let context = args["context"] as? String ?? "" // Parse conversation history if provided var history: [[String: String]] = [] if let historyJSON = args["history"] as? String, let data = historyJSON.data(using: .utf8), let parsed = try? JSONSerialization.jsonObject(with: data) as? [[String: String]] { history = parsed } let sysPrompt: String let userPrompt: String if context == "general_assistant" { sysPrompt = """ You are CxIDE Assistant, an expert coding AI embedded in a macOS IDE. \ You help developers write code, answer programming questions, and assist with software engineering tasks. \ Be concise, practical, and give code examples when helpful. \ When asked to create or generate code, produce complete, working code — never give step-by-step GUI instructions. \ You can create files, edit code, run builds, and perform git operations. \ Continue conversations naturally — remember what was discussed and build on previous messages. \ Always respond as a coding assistant, not a generic chatbot. """ userPrompt = code } else if context == "user query" { sysPrompt = "You are an expert programming assistant in a macOS IDE. Respond concisely and provide code when relevant." userPrompt = code } else { sysPrompt = "You are an expert code explainer. Provide \(detail) explanations." userPrompt = "Explain this code:\n```\n\(code)\n```" } return await callLLM( system: sysPrompt, user: userPrompt, config: config, history: history ) } // ── code_review ─────────────────────────────────────────────── server.registerTool( "code_review", description: "Review code for bugs, style issues, and improvements.", inputSchema: [ "type": "object", "required": ["code"], "properties": [ "code": ["type": "string", "description": "Code to review."], "language": ["type": "string", "description": "Programming language."], "focus": ["type": "string", "description": "Review focus: bugs, style, performance, security, all (default: all)."], ] as [String: Any], ] as [String: Any], annotations: ToolAnnotations(readOnlyHint: true) ) { args in let code = args["code"] as? String ?? "" let focus = args["focus"] as? String ?? "all" let lang = args["language"] as? String ?? "" let history = parseHistory(args) return await callLLM( system: "You are a senior code reviewer. Focus on: \(focus). Be constructive and specific.", user: "Review this \(lang) code:\n```\(lang)\n\(code)\n```", config: config, history: history ) } // ── code_refactor ───────────────────────────────────────────── server.registerTool( "code_refactor", description: "Suggest refactored version of code with explanation.", inputSchema: [ "type": "object", "required": ["code"], "properties": [ "code": ["type": "string", "description": "Code to refactor."], "goal": ["type": "string", "description": "Refactoring goal (e.g. 'extract method', 'simplify')."], ] as [String: Any], ] as [String: Any], annotations: ToolAnnotations(readOnlyHint: true) ) { args in let code = args["code"] as? String ?? "" let goal = args["goal"] as? String ?? "improve readability and maintainability" let history = parseHistory(args) return await callLLM( system: "You are an expert refactoring assistant.", user: "Refactor this code. Goal: \(goal)\n```\n\(code)\n```\nReturn the refactored code and explain changes.", config: config, history: history ) } // ── code_fix ────────────────────────────────────────────────── server.registerTool( "code_fix", description: "Fix bugs or errors in code given an error message.", inputSchema: [ "type": "object", "required": ["code"], "properties": [ "code": ["type": "string", "description": "Code with the bug."], "error": ["type": "string", "description": "Error message or description."], ] as [String: Any], ] as [String: Any], annotations: ToolAnnotations(readOnlyHint: true) ) { args in let code = args["code"] as? String ?? "" let error = args["error"] as? String ?? "unknown error" let history = parseHistory(args) return await callLLM( system: "You are a debugging expert. Fix the code and explain the root cause.", user: "Fix this code:\n```\n\(code)\n```\nError: \(error)", config: config, history: history ) } // ── code_document ───────────────────────────────────────────── server.registerTool( "code_document", description: "Generate documentation comments for code.", inputSchema: [ "type": "object", "required": ["code"], "properties": [ "code": ["type": "string", "description": "Code to document."], "style": ["type": "string", "description": "Doc style: swift, jsdoc, javadoc, docstring (default: swift)."], ] as [String: Any], ] as [String: Any], annotations: ToolAnnotations(readOnlyHint: true) ) { args in let code = args["code"] as? String ?? "" let style = args["style"] as? String ?? "swift" let history = parseHistory(args) return await callLLM( system: "You are a documentation expert. Generate \(style)-style documentation comments.", user: "Add documentation to this code:\n```\n\(code)\n```", config: config, history: history ) } // ── code_test ───────────────────────────────────────────────── server.registerTool( "code_test", description: "Generate unit tests for code.", inputSchema: [ "type": "object", "required": ["code"], "properties": [ "code": ["type": "string", "description": "Code to test."], "framework": ["type": "string", "description": "Test framework: xctest, swift-testing, jest, pytest (default: xctest)."], ] as [String: Any], ] as [String: Any], annotations: ToolAnnotations(readOnlyHint: true) ) { args in let code = args["code"] as? String ?? "" let framework = args["framework"] as? String ?? "xctest" let history = parseHistory(args) return await callLLM( system: "You are a test engineering expert. Generate comprehensive \(framework) tests.", user: "Generate unit tests for:\n```\n\(code)\n```\nUse \(framework). Cover edge cases.", config: config, history: history ) } // ── code_security ───────────────────────────────────────────── server.registerTool( "code_security", description: "Analyze code for security vulnerabilities (OWASP Top 10).", inputSchema: [ "type": "object", "required": ["code"], "properties": [ "code": ["type": "string", "description": "Code to analyze."], "language": ["type": "string", "description": "Programming language."], ] as [String: Any], ] as [String: Any], annotations: ToolAnnotations(readOnlyHint: true) ) { args in let code = args["code"] as? String ?? "" let lang = args["language"] as? String ?? "" let history = parseHistory(args) return await callLLM( system: "You are a security auditor. Analyze for OWASP Top 10 vulnerabilities. " + "Rate severity (critical/high/medium/low) and suggest fixes.", user: "Security audit this \(lang) code:\n```\(lang)\n\(code)\n```", config: config, history: history ) } // ── code_symbols ────────────────────────────────────────────── server.registerTool( "code_symbols", description: "Extract symbols (classes, functions, variables, types) from source code.", inputSchema: [ "type": "object", "required": ["path"], "properties": [ "path": ["type": "string", "description": "Source file path."], ] as [String: Any], ] as [String: Any], annotations: ToolAnnotations(readOnlyHint: true) ) { args in guard let path = config.resolvePath(args["path"] as? String ?? "") else { return err("Path not allowed") } guard let content = try? String(contentsOfFile: path, encoding: .utf8) else { return err("Cannot read file") } var symbols: [String] = [] let lines = content.components(separatedBy: "\n") let patterns: [(String, String)] = [ ("func ", "function"), ("class ", "class"), ("struct ", "struct"), ("enum ", "enum"), ("protocol ", "protocol"), ("var ", "variable"), ("let ", "constant"), ("typealias ", "typealias"), ("extension ", "extension"), ("def ", "function"), // Python ("function ", "function"), // JS/PHP ("sub ", "function"), // Perl ("package ", "package"), // Perl/Go ] for (i, line) in lines.enumerated() { let trimmed = line.trimmingCharacters(in: .whitespaces) for (keyword, kind) in patterns { if trimmed.hasPrefix(keyword) || trimmed.hasPrefix("public \(keyword)") || trimmed.hasPrefix("private \(keyword)") || trimmed.hasPrefix("internal \(keyword)") { let name = trimmed.components(separatedBy: keyword).last? .components(separatedBy: "(").first? .components(separatedBy: ":").first? .components(separatedBy: "{").first? .components(separatedBy: "<").first? .trimmingCharacters(in: .whitespaces) ?? "?" symbols.append(" L\(i + 1): \(kind) \(name)") break } } } Task { await memory.recordFileAccess(path: path, action: "symbols") } return ok(symbols.isEmpty ? "No symbols found" : symbols.joined(separator: "\n")) } // ── code_diff_explain ───────────────────────────────────────── server.registerTool( "code_diff_explain", description: "Explain a code diff in natural language.", inputSchema: [ "type": "object", "required": ["diff"], "properties": [ "diff": ["type": "string", "description": "Unified diff text."], ] as [String: Any], ] as [String: Any], annotations: ToolAnnotations(readOnlyHint: true) ) { args in let diff = args["diff"] as? String ?? "" return await callLLM( system: "You are a code change analyst. Explain diffs clearly and concisely.", user: "Explain this diff:\n```diff\n\(diff)\n```", config: config ) } } // MARK: - LLM Helper private static func callLLM(system: String, user: String, config: AgentConfig, history: [[String: String]] = []) async -> [[String: Any]] { // Read live env vars so model switching in the UI takes effect immediately let endpoint = ProcessInfo.processInfo.environment["CX_LLM_ENDPOINT"] ?? config.llmEndpoint let model = ProcessInfo.processInfo.environment["CX_LLM_MODEL"] ?? config.llmModel let apiKey = ProcessInfo.processInfo.environment["NVIDIA_API_KEY"] ?? config.nvidiaApiKey // Check if we have a real LLM endpoint configured guard !endpoint.isEmpty else { return localFallback(system: system, user: user) } // Accept any non-default endpoint, or default if an API key is set let isDefault = endpoint == "http://localhost:8080" guard !isDefault || !apiKey.isEmpty else { return localFallback(system: system, user: user) } guard let url = URL(string: endpoint) else { return err("Invalid LLM endpoint URL: \(endpoint)") } var request = URLRequest(url: url) request.httpMethod = "POST" request.setValue("application/json", forHTTPHeaderField: "Content-Type") request.timeoutInterval = 30 if !apiKey.isEmpty { request.setValue("Bearer \(apiKey)", forHTTPHeaderField: "Authorization") } // Build messages array: system + history + current user message var messages: [[String: Any]] = [["role": "system", "content": system]] for msg in history { messages.append(msg as [String: Any]) } messages.append(["role": "user", "content": user]) let body: [String: Any] = [ "model": model, "messages": messages, "temperature": 0.2, "max_tokens": 4096, ] request.httpBody = try? JSONSerialization.data(withJSONObject: body) do { let (data, response) = try await URLSession.shared.data(for: request) guard let httpResponse = response as? HTTPURLResponse else { return err("Invalid HTTP response") } guard httpResponse.statusCode == 200 else { let responseBody = String(data: data, encoding: .utf8) ?? "" return err("LLM API returned \(httpResponse.statusCode): \(responseBody.prefix(200))") } guard let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any], let choices = json["choices"] as? [[String: Any]], let message = choices.first?["message"] as? [String: Any], let content = message["content"] as? String else { return err("Failed to parse LLM response") } return ok(content) } catch { // Network error — fall back to local analysis instead of showing raw error return localFallback(system: system, user: user) } } private static func ok(_ text: String) -> [[String: Any]] { [["type": "text", "text": text]] } private static func err(_ message: String) -> [[String: Any]] { [["type": "text", "text": "Error: \(message)"]] } /// Parse conversation history from tool arguments private static func parseHistory(_ args: [String: Any]) -> [[String: String]] { guard let historyJSON = args["history"] as? String, let data = historyJSON.data(using: .utf8), let parsed = try? JSONSerialization.jsonObject(with: data) as? [[String: String]] else { return [] } return parsed } // MARK: - Local Fallback (no LLM configured) private static func localFallback(system: String, user: String) -> [[String: Any]] { let sysLower = system.lowercased() // Extract code from the user prompt if present let codeBlock: String if let start = user.range(of: "```"), let end = user.range(of: "```", range: start.upperBound.. 120 { issues.append("Line \(i+1): exceeds 120 characters (\(trimmed.count))") } if trimmed.contains("!") && trimmed.contains("as!") { issues.append("Line \(i+1): force cast detected (`as!`)") } if trimmed.contains("try!") { issues.append("Line \(i+1): force try detected (`try!`)") } if trimmed.contains("implicitlyUnwrappedOptional") || (trimmed.contains(": ") && trimmed.hasSuffix("!") && !trimmed.hasPrefix("//")) { issues.append("Line \(i+1): possible implicitly unwrapped optional") } } var result = "**Code Review** (local static analysis)\n\n" result += "📊 \(lineCount) lines, \(functions) functions, \(classes) types\n\n" if issues.isEmpty { result += "✅ No obvious issues found in static analysis.\n" } else { result += "⚠️ Found \(issues.count) potential issue(s):\n" for issue in issues.prefix(10) { result += "• \(issue)\n" } } result += "\n💡 Configure an LLM for deeper semantic review." return ok(result) } if sysLower.contains("fix") { return ok("**Local Analysis**: The code has \(lineCount) lines with \(functions) functions.\n\n" + "I can't automatically fix code without an LLM. Common things to check:\n" + "• Missing `import` statements\n• Unbalanced braces or parentheses\n• Type mismatches\n• Optional unwrapping issues\n\n" + "💡 Set `CX_LLM_ENDPOINT` to enable AI-powered fixes.") } if sysLower.contains("test") { return ok("**Test Generation** requires an LLM.\n\n" + "Detected \(functions) function(s) and \(classes) type(s) that could be tested.\n\n" + "💡 Set `CX_LLM_ENDPOINT` to auto-generate unit tests.") } if sysLower.contains("security") { var findings: [String] = [] for (i, line) in lines.enumerated() { let t = line.lowercased() if t.contains("password") || t.contains("secret") || t.contains("api_key") || t.contains("apikey") { findings.append("Line \(i+1): possible hardcoded credential") } if t.contains("http://") && !t.contains("localhost") { findings.append("Line \(i+1): non-HTTPS URL") } if t.contains("eval(") || t.contains("exec(") { findings.append("Line \(i+1): dynamic code execution") } } var result = "**Security Scan** (local pattern matching)\n\n" if findings.isEmpty { result += "✅ No obvious security issues detected.\n" } else { result += "🔒 Found \(findings.count) potential concern(s):\n" for f in findings.prefix(10) { result += "• \(f)\n" } } result += "\n💡 Set `CX_LLM_ENDPOINT` for comprehensive security analysis." return ok(result) } if sysLower.contains("refactor") || sysLower.contains("document") { return ok("**\(sysLower.contains("refactor") ? "Refactoring" : "Documentation")** requires an LLM.\n\n" + "Code stats: \(lineCount) lines, \(functions) functions, \(classes) types.\n\n" + "💡 Set `CX_LLM_ENDPOINT` to enable this feature.") } // Generic fallback return ok("📊 **Code Stats**: \(lineCount) lines, \(nonEmpty) non-empty, \(functions) functions, \(classes) types, \(comments) comments.\n\n" + "💡 For AI-powered responses, configure `CX_LLM_ENDPOINT` and optionally `NVIDIA_API_KEY`.") } }