Compare commits
104 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| d86888704f | |||
| de6b6e20a5 | |||
| 1e8a5e08ed | |||
| 218ebbf32f | |||
| c49e7f4b22 | |||
| 9588c8c562 | |||
| 1948ac1284 | |||
| 3f92b7d963 | |||
| 5553e61dbf | |||
| 7f987737f9 | |||
| 5182f86133 | |||
| a50099ad74 | |||
| 20ba5523ee | |||
| 0b2b3701dc | |||
| 438b05b8a3 | |||
| e2a31b192f | |||
| b827d3d047 | |||
| 9c0cf274a3 | |||
| 85ae319690 | |||
| 449f133a1f | |||
| 2f6b03ef65 | |||
| d4030dc598 | |||
| 3271697f6b | |||
| cbfef5a5df | |||
| 52efd5f341 | |||
| 200cdbc4bd | |||
| 8525819ab4 | |||
| bcd52d526c | |||
| 7effade1d3 | |||
| dc0fee2ee3 | |||
| ea04a25ed6 | |||
| 282dcdce88 | |||
| b49f58bc16 | |||
| cdc425ae93 | |||
| 3525cb3949 | |||
| 9d85420bf6 | |||
| 641c95131f | |||
| 708c626176 | |||
| 5210e196f2 | |||
| 30c375b6c5 | |||
| baf49b1e69 | |||
| 96e0436d43 | |||
| 498e6e61b6 | |||
| 99064b6c41 | |||
| ee58b0ac32 | |||
| 990f93d467 | |||
| 44a00619b5 | |||
| 6923ee439f | |||
| c997b19b53 | |||
| c9daf68fea | |||
| ba9d083088 | |||
| 825dfc0722 | |||
| 3e4eacd1d3 | |||
| 23253219a3 | |||
| cc2b85a86d | |||
| 58dd6f3efa | |||
| c81d0f1593 | |||
| ae0dd3fc51 | |||
| 80dffa9f41 | |||
| ab0ae4fe04 | |||
| d31e068277 | |||
| 690f5c7056 | |||
| 0da8a3f193 | |||
| 15f81d9728 | |||
| b80db89391 | |||
| f413a63c5a | |||
| 33ad3797a1 | |||
| 55e6b0583d | |||
| ae9c3af096 | |||
| 0bd560b408 | |||
| 083b621b7d | |||
| d2a193e5c1 | |||
| acbfe47a4b | |||
| 60c859b3ab | |||
| 82078afd6d | |||
| 7851af14a9 | |||
| c2f5ccea3b | |||
| fab63d224b | |||
| 15e5c1206b | |||
| 38aba1a6bb | |||
| d0d3079df5 | |||
| 56de1170ee | |||
| 952e4819fe | |||
| 5ac0d152cb | |||
| 40c44470e8 | |||
| 5c37df1b22 | |||
| 5e81185df3 | |||
| 7534c9ef8d | |||
| 9545a4b3ad | |||
| e94df2c48a | |||
| cdf95002fc | |||
| 4c066bf2da | |||
| e57844e742 | |||
| 33d11ae223 | |||
| 05e90d3e2b | |||
| fe414d49e6 | |||
| d002d35bde | |||
| c9c3d17db0 | |||
| a909455f97 | |||
| 67381b02db | |||
| 235f84fa19 | |||
| 9c777c8429 | |||
| 0b17a0f4c8 | |||
| 2eabe55fe6 |
34
.github/workflows/macos-check.yml
vendored
Normal file
34
.github/workflows/macos-check.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: macos-check
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
pull_request:
|
||||
branches:
|
||||
- dev
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: cargo check (macOS)
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Cache Cargo registry
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Cargo check
|
||||
run: cargo check --workspace --all-features
|
||||
@@ -9,6 +9,7 @@ repos:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
args: ['--allow-multiple-documents']
|
||||
- id: check-toml
|
||||
- id: check-merge-conflict
|
||||
- id: check-added-large-files
|
||||
|
||||
@@ -1,3 +1,61 @@
|
||||
---
|
||||
kind: pipeline
|
||||
name: pr-checks
|
||||
|
||||
when:
|
||||
event:
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
steps:
|
||||
- name: fmt-clippy-test
|
||||
image: rust:1.83
|
||||
commands:
|
||||
- rustup component add rustfmt clippy
|
||||
- cargo fmt --all -- --check
|
||||
- cargo clippy --workspace --all-features -- -D warnings
|
||||
- cargo test --workspace --all-features
|
||||
|
||||
---
|
||||
kind: pipeline
|
||||
name: security-audit
|
||||
|
||||
when:
|
||||
event:
|
||||
- push
|
||||
- cron
|
||||
branch:
|
||||
- dev
|
||||
cron: weekly-security
|
||||
|
||||
steps:
|
||||
- name: cargo-audit
|
||||
image: rust:1.83
|
||||
commands:
|
||||
- cargo install cargo-audit --locked
|
||||
- cargo audit
|
||||
|
||||
---
|
||||
kind: pipeline
|
||||
name: release-tests
|
||||
|
||||
when:
|
||||
event: tag
|
||||
tag: v*
|
||||
|
||||
steps:
|
||||
- name: workspace-tests
|
||||
image: rust:1.83
|
||||
commands:
|
||||
- rustup component add llvm-tools-preview
|
||||
- cargo install cargo-llvm-cov --locked
|
||||
- cargo llvm-cov --workspace --all-features --summary-only
|
||||
- cargo llvm-cov --workspace --all-features --lcov --output-path coverage.lcov --no-run
|
||||
|
||||
---
|
||||
kind: pipeline
|
||||
name: release
|
||||
|
||||
when:
|
||||
event: tag
|
||||
tag: v*
|
||||
@@ -5,6 +63,9 @@ when:
|
||||
variables:
|
||||
- &rust_image 'rust:1.83'
|
||||
|
||||
depends_on:
|
||||
- release-tests
|
||||
|
||||
matrix:
|
||||
include:
|
||||
# Linux
|
||||
@@ -116,6 +177,11 @@ steps:
|
||||
sha256sum ${ARTIFACT}.tar.gz > ${ARTIFACT}.tar.gz.sha256
|
||||
fi
|
||||
|
||||
- name: release-notes
|
||||
image: *rust_image
|
||||
commands:
|
||||
- scripts/release-notes.sh "${CI_COMMIT_TAG}" release-notes.md
|
||||
|
||||
- name: release
|
||||
image: plugins/gitea-release
|
||||
settings:
|
||||
@@ -128,4 +194,4 @@ steps:
|
||||
- ${ARTIFACT}.zip
|
||||
- ${ARTIFACT}.zip.sha256
|
||||
title: Release ${CI_COMMIT_TAG}
|
||||
note: "Release ${CI_COMMIT_TAG}"
|
||||
note_file: release-notes.md
|
||||
|
||||
798
AGENTS.md
Normal file
798
AGENTS.md
Normal file
@@ -0,0 +1,798 @@
|
||||
# AGENTS.md - AI Agent Instructions for Owlen Development
|
||||
|
||||
This document provides comprehensive context and guidelines for AI agents (Claude, GPT-4, etc.) working on the Owlen codebase.
|
||||
|
||||
## Project Overview
|
||||
|
||||
**Owlen** is a local-first, terminal-based AI assistant built in Rust using the Ratatui TUI framework. It implements a Model Context Protocol (MCP) architecture for modular tool execution and supports both local (Ollama) and cloud LLM providers.
|
||||
|
||||
**Core Philosophy:**
|
||||
- **Local-first**: Prioritize local LLMs (Ollama) with cloud as fallback
|
||||
- **Privacy-focused**: No telemetry, user data stays on device
|
||||
- **MCP-native**: All operations through MCP servers for modularity
|
||||
- **Terminal-native**: Vim-style modal interaction in a beautiful TUI
|
||||
|
||||
**Current Status:** v1.0 - MCP-only architecture (Phase 10 complete)
|
||||
|
||||
## Architecture
|
||||
|
||||
### Project Structure
|
||||
|
||||
```
|
||||
owlen/
|
||||
├── crates/
|
||||
│ ├── owlen-core/ # Core types, config, provider traits
|
||||
│ ├── owlen-tui/ # Ratatui-based terminal interface
|
||||
│ ├── owlen-cli/ # Command-line interface
|
||||
│ ├── owlen-ollama/ # Ollama provider implementation
|
||||
│ ├── owlen-mcp-llm-server/ # LLM inference as MCP server
|
||||
│ ├── owlen-mcp-client/ # MCP client library
|
||||
│ ├── owlen-mcp-server/ # Base MCP server framework
|
||||
│ ├── owlen-mcp-code-server/ # Code execution in Docker
|
||||
│ └── owlen-mcp-prompt-server/ # Prompt management server
|
||||
├── docs/ # Documentation
|
||||
├── themes/ # TUI color themes
|
||||
└── .agents/ # Agent development plans
|
||||
```
|
||||
|
||||
### Key Technologies
|
||||
|
||||
- **Language**: Rust 1.83+
|
||||
- **TUI**: Ratatui with Crossterm backend
|
||||
- **Async Runtime**: Tokio
|
||||
- **Config**: TOML (serde)
|
||||
- **HTTP Client**: reqwest
|
||||
- **LLM Providers**: Ollama (primary), with extensibility for OpenAI/Anthropic
|
||||
- **Protocol**: JSON-RPC 2.0 over STDIO/HTTP/WebSocket
|
||||
|
||||
## Current Features (v1.0)
|
||||
|
||||
### Core Capabilities
|
||||
|
||||
1. **MCP Architecture** (Phase 3-10 complete)
|
||||
- All LLM interactions via MCP servers
|
||||
- Local and remote MCP client support
|
||||
- STDIO, HTTP, WebSocket transports
|
||||
- Automatic failover with health checks
|
||||
|
||||
2. **Provider System**
|
||||
- Ollama (local and cloud)
|
||||
- Configurable per-provider settings
|
||||
- API key management with env variable expansion
|
||||
- Model switching via TUI (`:m` command)
|
||||
|
||||
3. **Agentic Loop** (ReAct pattern)
|
||||
- THOUGHT → ACTION → OBSERVATION cycle
|
||||
- Tool discovery and execution
|
||||
- Configurable iteration limits
|
||||
- Emergency stop (Ctrl+C)
|
||||
|
||||
4. **Mode System**
|
||||
- Chat mode: Limited tool availability
|
||||
- Code mode: Full tool access
|
||||
- Tool filtering by mode
|
||||
- Runtime mode switching
|
||||
|
||||
5. **Session Management**
|
||||
- Auto-save conversations
|
||||
- Session persistence with encryption
|
||||
- Description generation
|
||||
- Session timeout management
|
||||
|
||||
6. **Security**
|
||||
- Docker sandboxing for code execution
|
||||
- Tool whitelisting
|
||||
- Permission prompts for dangerous operations
|
||||
- Network isolation options
|
||||
|
||||
### TUI Features
|
||||
|
||||
- Vim-style modal editing (Normal, Insert, Visual, Command modes)
|
||||
- Multi-panel layout (conversation, status, input)
|
||||
- Syntax highlighting for code blocks
|
||||
- Theme system (10+ built-in themes)
|
||||
- Scrollback history (configurable limit)
|
||||
- Word wrap and visual selection
|
||||
|
||||
## Development Guidelines
|
||||
|
||||
### Code Style
|
||||
|
||||
1. **Rust Best Practices**
|
||||
- Use `rustfmt` (pre-commit hook enforced)
|
||||
- Run `cargo clippy` before commits
|
||||
- Prefer `Result` over `panic!` for errors
|
||||
- Document public APIs with `///` comments
|
||||
|
||||
2. **Error Handling**
|
||||
- Use `owlen_core::Error` enum for all errors
|
||||
- Chain errors with context (`.map_err(|e| Error::X(format!(...)))`)
|
||||
- Never unwrap in library code (tests OK)
|
||||
|
||||
3. **Async Patterns**
|
||||
- All I/O operations must be async
|
||||
- Use `tokio::spawn` for background tasks
|
||||
- Prefer `tokio::sync::mpsc` for channels
|
||||
- Always set timeouts for network operations
|
||||
|
||||
4. **Testing**
|
||||
- Unit tests in same file (`#[cfg(test)] mod tests`)
|
||||
- Use mock implementations from `test_utils` modules
|
||||
- Integration tests in `crates/*/tests/`
|
||||
- All public APIs must have tests
|
||||
|
||||
### File Organization
|
||||
|
||||
**When editing existing files:**
|
||||
1. Read the entire file first (use `Read` tool)
|
||||
2. Preserve existing code style and formatting
|
||||
3. Update related tests in the same commit
|
||||
4. Keep changes atomic and focused
|
||||
|
||||
**When creating new files:**
|
||||
1. Check `crates/owlen-core/src/` for similar modules
|
||||
2. Follow existing module structure
|
||||
3. Add to `lib.rs` with appropriate visibility
|
||||
4. Document module purpose with `//!` header
|
||||
|
||||
### Configuration
|
||||
|
||||
**Config file**: `~/.config/owlen/config.toml`
|
||||
|
||||
Example structure:
|
||||
```toml
|
||||
[general]
|
||||
default_provider = "ollama"
|
||||
default_model = "llama3.2:latest"
|
||||
enable_streaming = true
|
||||
|
||||
[mcp]
|
||||
# MCP is always enabled in v1.0+
|
||||
|
||||
[providers.ollama]
|
||||
provider_type = "ollama"
|
||||
base_url = "http://localhost:11434"
|
||||
|
||||
[providers.ollama-cloud]
|
||||
provider_type = "ollama-cloud"
|
||||
base_url = "https://ollama.com"
|
||||
api_key = "$OLLAMA_API_KEY"
|
||||
|
||||
[ui]
|
||||
theme = "default_dark"
|
||||
word_wrap = true
|
||||
|
||||
[security]
|
||||
enable_sandboxing = true
|
||||
allowed_tools = ["web_search", "code_exec"]
|
||||
```
|
||||
|
||||
### Common Tasks
|
||||
|
||||
#### Adding a New Provider
|
||||
|
||||
1. Create `crates/owlen-{provider}/` crate
|
||||
2. Implement `owlen_core::provider::Provider` trait
|
||||
3. Add to `owlen_core::router::ProviderRouter`
|
||||
4. Update config schema in `owlen_core::config`
|
||||
5. Add tests with `MockProvider` pattern
|
||||
6. Document in `docs/provider-implementation.md`
|
||||
|
||||
#### Adding a New MCP Server
|
||||
|
||||
1. Create `crates/owlen-mcp-{name}-server/` crate
|
||||
2. Implement JSON-RPC 2.0 protocol handlers
|
||||
3. Define tool descriptors with JSON schemas
|
||||
4. Add sandboxing/security checks
|
||||
5. Register in `mcp_servers` config array
|
||||
6. Document tool capabilities
|
||||
|
||||
#### Adding a TUI Feature
|
||||
|
||||
1. Modify `crates/owlen-tui/src/chat_app.rs`
|
||||
2. Update keybinding handlers
|
||||
3. Extend UI rendering in `draw()` method
|
||||
4. Add to help screen (`?` command)
|
||||
5. Test with different terminal sizes
|
||||
6. Ensure theme compatibility
|
||||
|
||||
## Feature Parity Roadmap
|
||||
|
||||
Based on analysis of OpenAI Codex and Claude Code, here are prioritized features to implement:
|
||||
|
||||
### Phase 11: MCP Client Enhancement (HIGHEST PRIORITY)
|
||||
|
||||
**Goal**: Full MCP client capabilities to access ecosystem tools
|
||||
|
||||
**Features:**
|
||||
1. **MCP Server Management**
|
||||
- `owlen mcp add/list/remove` commands
|
||||
- Three config scopes: local, project (`.mcp.json`), user
|
||||
- Environment variable expansion in config
|
||||
- OAuth 2.0 authentication for remote servers
|
||||
|
||||
2. **MCP Resource References**
|
||||
- `@github:issue://123` syntax
|
||||
- `@postgres:schema://users` syntax
|
||||
- Auto-completion for resources
|
||||
|
||||
3. **MCP Prompts as Slash Commands**
|
||||
- `/mcp__github__list_prs`
|
||||
- Dynamic command registration
|
||||
|
||||
**Implementation:**
|
||||
- Extend `owlen-mcp-client` crate
|
||||
- Add `.mcp.json` parsing to `owlen-core::config`
|
||||
- Update TUI command parser for `@` and `/mcp__` syntax
|
||||
- Add OAuth flow to TUI
|
||||
|
||||
**Files to modify:**
|
||||
- `crates/owlen-mcp-client/src/lib.rs`
|
||||
- `crates/owlen-core/src/config.rs`
|
||||
- `crates/owlen-tui/src/command_parser.rs`
|
||||
|
||||
### Phase 12: Approval & Sandbox System (HIGHEST PRIORITY)
|
||||
|
||||
**Goal**: Safe agentic behavior with user control
|
||||
|
||||
**Features:**
|
||||
1. **Three-tier Approval Modes**
|
||||
- `suggest`: Approve ALL file writes and shell commands (default)
|
||||
- `auto-edit`: Auto-approve file changes, prompt for shell
|
||||
- `full-auto`: Auto-approve everything (requires Git repo)
|
||||
|
||||
2. **Platform-specific Sandboxing**
|
||||
- Linux: Docker with network isolation
|
||||
- macOS: Apple Seatbelt (`sandbox-exec`)
|
||||
- Windows: AppContainer or Job Objects
|
||||
|
||||
3. **Permission Management**
|
||||
- `/permissions` command in TUI
|
||||
- Tool allowlist (e.g., `Edit`, `Bash(git commit:*)`)
|
||||
- Stored in `.owlen/settings.json` (project) or `~/.owlen.json` (user)
|
||||
|
||||
**Implementation:**
|
||||
- New `owlen-core::approval` module
|
||||
- Extend `owlen-core::sandbox` with platform detection
|
||||
- Update `owlen-mcp-code-server` to use new sandbox
|
||||
- Add permission storage to config system
|
||||
|
||||
**Files to create:**
|
||||
- `crates/owlen-core/src/approval.rs`
|
||||
- `crates/owlen-core/src/sandbox/linux.rs`
|
||||
- `crates/owlen-core/src/sandbox/macos.rs`
|
||||
- `crates/owlen-core/src/sandbox/windows.rs`
|
||||
|
||||
### Phase 13: Project Documentation System (HIGH PRIORITY)
|
||||
|
||||
**Goal**: Massive usability improvement with project context
|
||||
|
||||
**Features:**
|
||||
1. **OWLEN.md System**
|
||||
- `OWLEN.md` at repo root (checked into git)
|
||||
- `OWLEN.local.md` (gitignored, personal)
|
||||
- `~/.config/owlen/OWLEN.md` (global)
|
||||
- Support nested OWLEN.md in monorepos
|
||||
|
||||
2. **Auto-generation**
|
||||
- `/init` command to generate project-specific OWLEN.md
|
||||
- Analyze codebase structure
|
||||
- Detect build system, test framework
|
||||
- Suggest common commands
|
||||
|
||||
3. **Live Updates**
|
||||
- `#` command to add instructions to OWLEN.md
|
||||
- Context-aware insertion (relevant section)
|
||||
|
||||
**Contents of OWLEN.md:**
|
||||
- Common bash commands
|
||||
- Code style guidelines
|
||||
- Testing instructions
|
||||
- Core files and utilities
|
||||
- Known quirks/warnings
|
||||
|
||||
**Implementation:**
|
||||
- New `owlen-core::project_doc` module
|
||||
- File discovery algorithm (walk up directory tree)
|
||||
- Markdown parser for sections
|
||||
- TUI commands: `/init`, `#`
|
||||
|
||||
**Files to create:**
|
||||
- `crates/owlen-core/src/project_doc.rs`
|
||||
- `crates/owlen-tui/src/commands/init.rs`
|
||||
|
||||
### Phase 14: Non-Interactive Mode (HIGH PRIORITY)
|
||||
|
||||
**Goal**: Enable CI/CD integration and automation
|
||||
|
||||
**Features:**
|
||||
1. **Headless Execution**
|
||||
```bash
|
||||
owlen exec "fix linting errors" --approval-mode auto-edit
|
||||
owlen --quiet "update CHANGELOG" --json
|
||||
```
|
||||
|
||||
2. **Environment Variables**
|
||||
- `OWLEN_QUIET_MODE=1`
|
||||
- `OWLEN_DISABLE_PROJECT_DOC=1`
|
||||
- `OWLEN_APPROVAL_MODE=full-auto`
|
||||
|
||||
3. **JSON Output**
|
||||
- Structured output for parsing
|
||||
- Exit codes for success/failure
|
||||
- Progress events on stderr
|
||||
|
||||
**Implementation:**
|
||||
- New `owlen-cli` subcommand: `exec`
|
||||
- Extend `owlen-core::session` with non-interactive mode
|
||||
- Add JSON serialization for results
|
||||
- Environment variable parsing in config
|
||||
|
||||
**Files to modify:**
|
||||
- `crates/owlen-cli/src/main.rs`
|
||||
- `crates/owlen-core/src/session.rs`
|
||||
|
||||
### Phase 15: Multi-Provider Expansion (HIGH PRIORITY)
|
||||
|
||||
**Goal**: Support cloud providers while maintaining local-first
|
||||
|
||||
**Providers to add:**
|
||||
1. OpenAI (GPT-4, o1, o4-mini)
|
||||
2. Anthropic (Claude 3.5 Sonnet, Opus)
|
||||
3. Google (Gemini Ultra, Pro)
|
||||
4. Mistral AI
|
||||
|
||||
**Configuration:**
|
||||
```toml
|
||||
[providers.openai]
|
||||
api_key = "${OPENAI_API_KEY}"
|
||||
model = "o4-mini"
|
||||
enabled = true
|
||||
|
||||
[providers.anthropic]
|
||||
api_key = "${ANTHROPIC_API_KEY}"
|
||||
model = "claude-3-5-sonnet"
|
||||
enabled = true
|
||||
```
|
||||
|
||||
**Runtime Switching:**
|
||||
```
|
||||
:model ollama/starcoder
|
||||
:model openai/o4-mini
|
||||
:model anthropic/claude-3-5-sonnet
|
||||
```
|
||||
|
||||
**Implementation:**
|
||||
- Create `owlen-openai`, `owlen-anthropic`, `owlen-google` crates
|
||||
- Implement `Provider` trait for each
|
||||
- Add runtime model switching to TUI
|
||||
- Maintain Ollama as default
|
||||
|
||||
**Files to create:**
|
||||
- `crates/owlen-openai/src/lib.rs`
|
||||
- `crates/owlen-anthropic/src/lib.rs`
|
||||
- `crates/owlen-google/src/lib.rs`
|
||||
|
||||
### Phase 16: Custom Slash Commands (MEDIUM PRIORITY)
|
||||
|
||||
**Goal**: User and team-defined workflows
|
||||
|
||||
**Features:**
|
||||
1. **Command Directories**
|
||||
- `~/.owlen/commands/` (user, available everywhere)
|
||||
- `.owlen/commands/` (project, checked into git)
|
||||
- Support `$ARGUMENTS` keyword
|
||||
|
||||
2. **Example Structure**
|
||||
```markdown
|
||||
# .owlen/commands/fix-github-issue.md
|
||||
Please analyze and fix GitHub issue: $ARGUMENTS.
|
||||
1. Use `gh issue view` to get details
|
||||
2. Implement changes
|
||||
3. Write and run tests
|
||||
4. Create PR
|
||||
```
|
||||
|
||||
3. **TUI Integration**
|
||||
- Auto-complete for custom commands
|
||||
- Help text from command files
|
||||
- Parameter validation
|
||||
|
||||
**Implementation:**
|
||||
- New `owlen-core::commands` module
|
||||
- Command discovery and parsing
|
||||
- Template expansion
|
||||
- TUI command registration
|
||||
|
||||
**Files to create:**
|
||||
- `crates/owlen-core/src/commands.rs`
|
||||
- `crates/owlen-tui/src/commands/custom.rs`
|
||||
|
||||
### Phase 17: Plugin System (MEDIUM PRIORITY)
|
||||
|
||||
**Goal**: One-command installation of tool collections
|
||||
|
||||
**Features:**
|
||||
1. **Plugin Structure**
|
||||
```json
|
||||
{
|
||||
"name": "github-workflow",
|
||||
"version": "1.0.0",
|
||||
"commands": [
|
||||
{"name": "pr", "file": "commands/pr.md"}
|
||||
],
|
||||
"mcp_servers": [
|
||||
{
|
||||
"name": "github",
|
||||
"command": "${OWLEN_PLUGIN_ROOT}/bin/github-mcp"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
2. **Installation**
|
||||
```bash
|
||||
owlen plugin install github-workflow
|
||||
owlen plugin list
|
||||
owlen plugin remove github-workflow
|
||||
```
|
||||
|
||||
3. **Discovery**
|
||||
- `~/.owlen/plugins/` directory
|
||||
- Git repository URLs
|
||||
- Plugin registry (future)
|
||||
|
||||
**Implementation:**
|
||||
- New `owlen-core::plugins` module
|
||||
- Plugin manifest parser
|
||||
- Installation/removal logic
|
||||
- Sandboxing for plugin code
|
||||
|
||||
**Files to create:**
|
||||
- `crates/owlen-core/src/plugins.rs`
|
||||
- `crates/owlen-cli/src/commands/plugin.rs`
|
||||
|
||||
### Phase 18: Extended Thinking Modes (MEDIUM PRIORITY)
|
||||
|
||||
**Goal**: Progressive computation budgets for complex tasks
|
||||
|
||||
**Modes:**
|
||||
- `think` - basic extended thinking
|
||||
- `think hard` - increased computation
|
||||
- `think harder` - more computation
|
||||
- `ultrathink` - maximum budget
|
||||
|
||||
**Implementation:**
|
||||
- Extend `owlen-core::types::ChatParameters`
|
||||
- Add thinking mode to TUI commands
|
||||
- Configure per-provider max tokens
|
||||
|
||||
**Files to modify:**
|
||||
- `crates/owlen-core/src/types.rs`
|
||||
- `crates/owlen-tui/src/command_parser.rs`
|
||||
|
||||
### Phase 19: Git Workflow Automation (MEDIUM PRIORITY)
|
||||
|
||||
**Goal**: Streamline common Git operations
|
||||
|
||||
**Features:**
|
||||
1. Auto-commit message generation
|
||||
2. PR creation via `gh` CLI
|
||||
3. Rebase conflict resolution
|
||||
4. File revert operations
|
||||
5. Git history analysis
|
||||
|
||||
**Implementation:**
|
||||
- New `owlen-mcp-git-server` crate
|
||||
- Tools: `commit`, `create_pr`, `rebase`, `revert`, `history`
|
||||
- Integration with TUI commands
|
||||
|
||||
**Files to create:**
|
||||
- `crates/owlen-mcp-git-server/src/lib.rs`
|
||||
|
||||
### Phase 20: Enterprise Features (LOW PRIORITY)
|
||||
|
||||
**Goal**: Team and enterprise deployment support
|
||||
|
||||
**Features:**
|
||||
1. **Managed Configuration**
|
||||
- `/etc/owlen/managed-mcp.json` (Linux)
|
||||
- Restrict user additions with `useEnterpriseMcpConfigOnly`
|
||||
|
||||
2. **Audit Logging**
|
||||
- Log all file writes and shell commands
|
||||
- Structured JSON logs
|
||||
- Tamper-proof storage
|
||||
|
||||
3. **Team Collaboration**
|
||||
- Shared OWLEN.md across team
|
||||
- Project-scoped MCP servers in `.mcp.json`
|
||||
- Approval policy enforcement
|
||||
|
||||
**Implementation:**
|
||||
- Extend `owlen-core::config` with managed settings
|
||||
- New `owlen-core::audit` module
|
||||
- Enterprise deployment documentation
|
||||
|
||||
## Testing Requirements
|
||||
|
||||
### Test Coverage Goals
|
||||
|
||||
- **Unit tests**: 80%+ coverage for `owlen-core`
|
||||
- **Integration tests**: All MCP servers, providers
|
||||
- **TUI tests**: Key workflows (not pixel-perfect)
|
||||
|
||||
### Test Organization
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::provider::test_utils::MockProvider;
|
||||
use crate::mcp::test_utils::MockMcpClient;
|
||||
|
||||
#[test]
|
||||
fn test_feature() {
|
||||
// Setup
|
||||
let provider = MockProvider::new();
|
||||
|
||||
// Execute
|
||||
let result = provider.chat(request).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Running Tests
|
||||
|
||||
```bash
|
||||
cargo test --all # All tests
|
||||
cargo test --lib -p owlen-core # Core library tests
|
||||
cargo test --test integration # Integration tests
|
||||
```
|
||||
|
||||
## Documentation Standards
|
||||
|
||||
### Code Documentation
|
||||
|
||||
1. **Module-level** (`//!` at top of file):
|
||||
```rust
|
||||
//! Brief module description
|
||||
//!
|
||||
//! Detailed explanation of module purpose,
|
||||
//! key types, and usage examples.
|
||||
```
|
||||
|
||||
2. **Public APIs** (`///` above items):
|
||||
```rust
|
||||
/// Brief description
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `arg1` - Description
|
||||
///
|
||||
/// # Returns
|
||||
/// Description of return value
|
||||
///
|
||||
/// # Errors
|
||||
/// When this function returns an error
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let result = function(arg);
|
||||
/// ```
|
||||
pub fn function(arg: Type) -> Result<Output> {
|
||||
// implementation
|
||||
}
|
||||
```
|
||||
|
||||
3. **Private items**: Optional, use for complex logic
|
||||
|
||||
### User Documentation
|
||||
|
||||
Location: `docs/` directory
|
||||
|
||||
Files to maintain:
|
||||
- `architecture.md` - System design
|
||||
- `configuration.md` - Config reference
|
||||
- `migration-guide.md` - Version upgrades
|
||||
- `troubleshooting.md` - Common issues
|
||||
- `provider-implementation.md` - Adding providers
|
||||
- `faq.md` - Frequently asked questions
|
||||
|
||||
## Git Workflow
|
||||
|
||||
### Branch Strategy
|
||||
|
||||
- `main` - stable releases only
|
||||
- `dev` - active development (default)
|
||||
- `feature/*` - new features
|
||||
- `fix/*` - bug fixes
|
||||
- `docs/*` - documentation only
|
||||
|
||||
### Commit Messages
|
||||
|
||||
Follow conventional commits:
|
||||
|
||||
```
|
||||
type(scope): brief description
|
||||
|
||||
Detailed explanation of changes.
|
||||
|
||||
Breaking changes, if any.
|
||||
|
||||
🤖 Generated with [Claude Code](https://claude.com/claude-code)
|
||||
|
||||
Co-Authored-By: Claude <noreply@anthropic.com>
|
||||
```
|
||||
|
||||
Types: `feat`, `fix`, `docs`, `refactor`, `test`, `chore`
|
||||
|
||||
### Pre-commit Hooks
|
||||
|
||||
Automatically run:
|
||||
- `cargo fmt` (formatting)
|
||||
- `cargo check` (compilation)
|
||||
- `cargo clippy` (linting)
|
||||
- YAML/TOML validation
|
||||
- Trailing whitespace removal
|
||||
|
||||
## Performance Guidelines
|
||||
|
||||
### Optimization Priorities
|
||||
|
||||
1. **Startup time**: < 500ms cold start
|
||||
2. **First token latency**: < 2s for local models
|
||||
3. **Memory usage**: < 100MB base, < 500MB with conversation
|
||||
4. **Responsiveness**: TUI redraws < 16ms (60 FPS)
|
||||
|
||||
### Profiling
|
||||
|
||||
```bash
|
||||
cargo build --release --features profiling
|
||||
valgrind --tool=callgrind target/release/owlen
|
||||
kcachegrind callgrind.out.*
|
||||
```
|
||||
|
||||
### Async Performance
|
||||
|
||||
- Avoid blocking in async contexts
|
||||
- Use `tokio::spawn` for CPU-intensive work
|
||||
- Set timeouts on all network operations
|
||||
- Cancel tasks on shutdown
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Threat Model
|
||||
|
||||
**Trusted:**
|
||||
- User's local machine
|
||||
- User-installed Ollama models
|
||||
- User configuration files
|
||||
|
||||
**Untrusted:**
|
||||
- MCP server responses
|
||||
- Web search results
|
||||
- Code execution output
|
||||
- Cloud LLM responses
|
||||
|
||||
### Security Measures
|
||||
|
||||
1. **Input Validation**
|
||||
- Sanitize all MCP tool arguments
|
||||
- Validate JSON schemas strictly
|
||||
- Escape shell commands
|
||||
|
||||
2. **Sandboxing**
|
||||
- Docker for code execution
|
||||
- Network isolation
|
||||
- Filesystem restrictions
|
||||
|
||||
3. **Secrets Management**
|
||||
- Never log API keys
|
||||
- Use environment variables
|
||||
- Encrypt sensitive config fields
|
||||
|
||||
4. **Dependency Auditing**
|
||||
```bash
|
||||
cargo audit
|
||||
cargo deny check
|
||||
```
|
||||
|
||||
## Debugging Tips
|
||||
|
||||
### Enable Debug Logging
|
||||
|
||||
```bash
|
||||
OWLEN_DEBUG_OLLAMA=1 owlen # Ollama requests
|
||||
RUST_LOG=debug owlen # All debug logs
|
||||
RUST_BACKTRACE=1 owlen # Stack traces
|
||||
```
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Timeout on Ollama**
|
||||
- Check `ollama ps` for loaded models
|
||||
- Increase timeout in config
|
||||
- Restart Ollama service
|
||||
|
||||
2. **MCP Server Not Found**
|
||||
- Verify `mcp_servers` config
|
||||
- Check server binary exists
|
||||
- Test server manually with STDIO
|
||||
|
||||
3. **TUI Rendering Issues**
|
||||
- Test in different terminals
|
||||
- Check terminal size (`tput cols; tput lines`)
|
||||
- Verify theme compatibility
|
||||
|
||||
## Contributing
|
||||
|
||||
### Before Submitting PR
|
||||
|
||||
1. Run full test suite: `cargo test --all`
|
||||
2. Check formatting: `cargo fmt -- --check`
|
||||
3. Run linter: `cargo clippy -- -D warnings`
|
||||
4. Update documentation if API changed
|
||||
5. Add tests for new features
|
||||
6. Update CHANGELOG.md
|
||||
|
||||
### PR Description Template
|
||||
|
||||
```markdown
|
||||
## Summary
|
||||
Brief description of changes
|
||||
|
||||
## Type of Change
|
||||
- [ ] Bug fix
|
||||
- [ ] New feature
|
||||
- [ ] Breaking change
|
||||
- [ ] Documentation update
|
||||
|
||||
## Testing
|
||||
Describe tests performed
|
||||
|
||||
## Checklist
|
||||
- [ ] Tests added/updated
|
||||
- [ ] Documentation updated
|
||||
- [ ] CHANGELOG.md updated
|
||||
- [ ] No clippy warnings
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
### External Documentation
|
||||
|
||||
- [Ratatui Docs](https://ratatui.rs/)
|
||||
- [Tokio Tutorial](https://tokio.rs/tokio/tutorial)
|
||||
- [MCP Specification](https://modelcontextprotocol.io/)
|
||||
- [Ollama API](https://github.com/ollama/ollama/blob/main/docs/api.md)
|
||||
|
||||
### Internal Documentation
|
||||
|
||||
- `.agents/new_phases.md` - 10-phase migration plan (completed)
|
||||
- `docs/phase5-mode-system.md` - Mode system design
|
||||
- `docs/migration-guide.md` - v0.x → v1.0 migration
|
||||
|
||||
### Community
|
||||
|
||||
- GitHub Issues: Bug reports and feature requests
|
||||
- GitHub Discussions: Questions and ideas
|
||||
- AUR Package: `owlen-git` (Arch Linux)
|
||||
|
||||
## Version History
|
||||
|
||||
- **v1.0.0** (current) - MCP-only architecture, Phase 10 complete
|
||||
- **v0.2.0** - Added web search, code execution servers
|
||||
- **v0.1.0** - Initial release with Ollama support
|
||||
|
||||
## License
|
||||
|
||||
Owlen is open source software. See LICENSE file for details.
|
||||
|
||||
---
|
||||
|
||||
**Last Updated**: 2025-10-11
|
||||
**Maintained By**: Owlen Development Team
|
||||
**For AI Agents**: Follow these guidelines when modifying Owlen codebase. Prioritize MCP client enhancement (Phase 11) and approval system (Phase 12) for feature parity with Codex/Claude Code while maintaining local-first philosophy.
|
||||
33
CHANGELOG.md
33
CHANGELOG.md
@@ -11,12 +11,45 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
- Comprehensive documentation suite including guides for architecture, configuration, testing, and more.
|
||||
- Rustdoc examples for core components like `Provider` and `SessionController`.
|
||||
- Module-level documentation for `owlen-tui`.
|
||||
- Provider integration tests (`crates/owlen-providers/tests`) covering registration, routing, and health status handling for the new `ProviderManager`.
|
||||
- TUI message and generation tests that exercise the non-blocking event loop, background worker, and message dispatch.
|
||||
- Ollama integration can now talk to Ollama Cloud when an API key is configured.
|
||||
- Ollama provider will also read `OLLAMA_API_KEY` / `OLLAMA_CLOUD_API_KEY` environment variables when no key is stored in the config.
|
||||
- `owlen config doctor`, `owlen config path`, and `owlen upgrade` CLI commands to automate migrations and surface manual update steps.
|
||||
- Startup provider health check with actionable hints when Ollama or remote MCP servers are unavailable.
|
||||
- `dev/check-windows.sh` helper script for on-demand Windows cross-checks.
|
||||
- Global F1 keybinding for the in-app help overlay and a clearer status hint on launch.
|
||||
- Automatic fallback to the new `ansi_basic` theme when the active terminal only advertises 16-color support.
|
||||
- Offline provider shim that keeps the TUI usable while primary providers are unreachable and communicates recovery steps inline.
|
||||
- `owlen cloud` subcommands (`setup`, `status`, `models`, `logout`) for managing Ollama Cloud credentials without hand-editing config files.
|
||||
- Tabbed model selector that separates local and cloud providers, including cloud indicators in the UI.
|
||||
- Footer status line includes provider connectivity/credential summaries (e.g., cloud auth failures, missing API keys).
|
||||
- Secure credential vault integration for Ollama Cloud API keys when `privacy.encrypt_local_data = true`.
|
||||
- Input panel respects a new `ui.input_max_rows` setting so long prompts expand predictably before scrolling kicks in.
|
||||
- Command palette offers fuzzy `:model` filtering and `:provider` completions for fast switching.
|
||||
- Message rendering caches wrapped lines and throttles streaming redraws to keep the TUI responsive on long sessions.
|
||||
- Model picker badges now inspect provider capabilities so vision/audio/thinking models surface the correct icons even when descriptions are sparse.
|
||||
- Chat history honors `ui.scrollback_lines`, trimming older rows to keep the TUI responsive and surfacing a "↓ New messages" badge whenever updates land off-screen.
|
||||
|
||||
### Changed
|
||||
- The main `README.md` has been updated to be more concise and link to the new documentation.
|
||||
- Default configuration now pre-populates both `providers.ollama` and `providers.ollama-cloud` entries so switching between local and cloud backends is a single setting change.
|
||||
- `McpMode` support was restored with explicit validation; `remote_only`, `remote_preferred`, and `local_only` now behave predictably.
|
||||
- Configuration loading performs structural validation and fails fast on missing default providers or invalid MCP definitions.
|
||||
- Ollama provider error handling now distinguishes timeouts, missing models, and authentication failures.
|
||||
- `owlen` warns when the active terminal likely lacks 256-color support.
|
||||
- `config.toml` now carries a schema version (`1.2.0`) and is migrated automatically; deprecated keys such as `agent.max_tool_calls` trigger warnings instead of hard failures.
|
||||
- Model selector navigation (Tab/Shift-Tab) now switches between local and cloud tabs while preserving selection state.
|
||||
- Header displays the active model together with its provider (e.g., `Model (Provider)`), improving clarity when swapping backends.
|
||||
- Documentation refreshed to cover the message handler architecture, the background health worker, multi-provider configuration, and the new provider onboarding checklist.
|
||||
|
||||
---
|
||||
|
||||
## [0.1.11] - 2025-10-18
|
||||
|
||||
### Changed
|
||||
- Bump workspace packages and distribution metadata to version `0.1.11`.
|
||||
|
||||
## [0.1.10] - 2025-10-03
|
||||
|
||||
### Added
|
||||
|
||||
@@ -10,6 +10,10 @@ This project and everyone participating in it is governed by the [Owlen Code of
|
||||
|
||||
## How Can I Contribute?
|
||||
|
||||
### Repository map
|
||||
|
||||
Need a quick orientation before diving in? Start with the curated [repo map](docs/repo-map.md) for a two-level directory overview. If you move folders around, regenerate it with `scripts/gen-repo-map.sh`.
|
||||
|
||||
### Reporting Bugs
|
||||
|
||||
This is one of the most helpful ways you can contribute. Before creating a bug report, please check a few things:
|
||||
@@ -40,6 +44,7 @@ The process for submitting a pull request is as follows:
|
||||
6. **Add a clear, concise commit message.** We follow the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) specification.
|
||||
7. **Push to your fork** and submit a pull request to Owlen's `main` branch.
|
||||
8. **Include a clear description** of the problem and solution. Include the relevant issue number if applicable.
|
||||
9. **Declare AI assistance.** If any part of the patch was generated with an AI tool (e.g., ChatGPT, Claude Code), call that out in the PR description. A human maintainer must review and approve AI-assisted changes before merge.
|
||||
|
||||
## Development Setup
|
||||
|
||||
|
||||
34
Cargo.toml
34
Cargo.toml
@@ -4,13 +4,20 @@ members = [
|
||||
"crates/owlen-core",
|
||||
"crates/owlen-tui",
|
||||
"crates/owlen-cli",
|
||||
"crates/owlen-ollama",
|
||||
"crates/owlen-providers",
|
||||
"crates/mcp/server",
|
||||
"crates/mcp/llm-server",
|
||||
"crates/mcp/client",
|
||||
"crates/mcp/code-server",
|
||||
"crates/mcp/prompt-server",
|
||||
"crates/owlen-markdown",
|
||||
"xtask",
|
||||
]
|
||||
exclude = []
|
||||
|
||||
[workspace.package]
|
||||
version = "0.1.9"
|
||||
edition = "2021"
|
||||
version = "0.1.11"
|
||||
edition = "2024"
|
||||
authors = ["Owlibou"]
|
||||
license = "AGPL-3.0"
|
||||
repository = "https://somegit.dev/Owlibou/owlen"
|
||||
@@ -34,12 +41,28 @@ tui-textarea = "0.6"
|
||||
# HTTP client and JSON handling
|
||||
reqwest = { version = "0.12", default-features = false, features = ["json", "stream", "rustls-tls"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
serde_json = { version = "1.0" }
|
||||
|
||||
# Utilities
|
||||
uuid = { version = "1.0", features = ["v4", "serde"] }
|
||||
anyhow = "1.0"
|
||||
thiserror = "1.0"
|
||||
thiserror = "2.0"
|
||||
nix = "0.29"
|
||||
which = "6.0"
|
||||
tempfile = "3.8"
|
||||
jsonschema = "0.17"
|
||||
aes-gcm = "0.10"
|
||||
ring = "0.17"
|
||||
keyring = "3.0"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
urlencoding = "2.1"
|
||||
regex = "1.10"
|
||||
rpassword = "7.3"
|
||||
sqlx = { version = "0.7", default-features = false, features = ["runtime-tokio-rustls", "sqlite", "macros", "uuid", "chrono", "migrate"] }
|
||||
log = "0.4"
|
||||
dirs = "5.0"
|
||||
serde_yaml = "0.9"
|
||||
handlebars = "6.0"
|
||||
|
||||
# Configuration
|
||||
toml = "0.8"
|
||||
@@ -58,7 +81,6 @@ async-trait = "0.1"
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
|
||||
# Dev dependencies
|
||||
tempfile = "3.8"
|
||||
tokio-test = "0.4"
|
||||
|
||||
# For more keys and their definitions, see https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
2
PKGBUILD
2
PKGBUILD
@@ -1,6 +1,6 @@
|
||||
# Maintainer: vikingowl <christian@nachtigall.dev>
|
||||
pkgname=owlen
|
||||
pkgver=0.1.9
|
||||
pkgver=0.1.11
|
||||
pkgrel=1
|
||||
pkgdesc="Terminal User Interface LLM client for Ollama with chat and code assistance features"
|
||||
arch=('x86_64')
|
||||
|
||||
109
README.md
109
README.md
@@ -3,16 +3,17 @@
|
||||
> Terminal-native assistant for running local language models with a comfortable TUI.
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
## What Is OWLEN?
|
||||
|
||||
OWLEN is a Rust-powered, terminal-first interface for interacting with local large
|
||||
language models. It provides a responsive chat workflow that runs against
|
||||
[Ollama](https://ollama.com/) with a focus on developer productivity, vim-style navigation,
|
||||
and seamless session management—all without leaving your terminal.
|
||||
OWLEN is a Rust-powered, terminal-first interface for interacting with local and cloud
|
||||
language models. It provides a responsive chat workflow that now routes through a
|
||||
multi-provider manager—handling local Ollama, Ollama Cloud, and future MCP-backed providers—
|
||||
with a focus on developer productivity, vim-style navigation, and seamless session
|
||||
management—all without leaving your terminal.
|
||||
|
||||
## Alpha Status
|
||||
|
||||
@@ -30,8 +31,22 @@ The OWLEN interface features a clean, multi-panel layout with vim-inspired navig
|
||||
- **Streaming Responses**: Real-time token streaming from Ollama.
|
||||
- **Advanced Text Editing**: Multi-line input, history, and clipboard support.
|
||||
- **Session Management**: Save, load, and manage conversations.
|
||||
- **Code Side Panel**: Switch to code mode (`:mode code`) and open files inline with `:open <path>` for LLM-assisted coding.
|
||||
- **Theming System**: 10 built-in themes and support for custom themes.
|
||||
- **Modular Architecture**: Extensible provider system (currently Ollama).
|
||||
- **Modular Architecture**: Extensible provider system orchestrated by the new `ProviderManager`, ready for additional MCP-backed providers.
|
||||
- **Dual-Source Model Picker**: Merge local and cloud catalogues with real-time availability badges powered by the background health worker.
|
||||
- **Non-Blocking UI Loop**: Asynchronous generation tasks and provider health checks run off-thread, keeping the TUI responsive even while streaming long replies.
|
||||
- **Guided Setup**: `owlen config doctor` upgrades legacy configs and verifies your environment in seconds.
|
||||
|
||||
## Security & Privacy
|
||||
|
||||
Owlen is designed to keep data local by default while still allowing controlled access to remote tooling.
|
||||
|
||||
- **Local-first execution**: All LLM calls flow through the bundled MCP LLM server which talks to a local Ollama instance. If the server is unreachable, Owlen stays usable in “offline mode” and surfaces clear recovery instructions.
|
||||
- **Sandboxed tooling**: Code execution runs in Docker according to the MCP Code Server settings, and future releases will extend this to other OS-level sandboxes (`sandbox-exec` on macOS, Windows job objects).
|
||||
- **Session storage**: Conversations are stored under the platform data directory and can be encrypted at rest. Set `privacy.encrypt_local_data = true` in `config.toml` to enable AES-GCM storage protected by a user-supplied passphrase.
|
||||
- **Network access**: No telemetry is sent. The only outbound requests occur when you explicitly enable remote tooling (e.g., web search) or configure a cloud LLM provider. Each tool is opt-in via `privacy` and `tools` configuration sections.
|
||||
- **Config migrations**: Every saved `config.toml` carries a schema version and is upgraded automatically; deprecated keys trigger warnings so security-related settings are not silently ignored.
|
||||
|
||||
## Getting Started
|
||||
|
||||
@@ -42,18 +57,28 @@ The OWLEN interface features a clean, multi-panel layout with vim-inspired navig
|
||||
|
||||
### Installation
|
||||
|
||||
#### Linux & macOS
|
||||
The recommended way to install on Linux and macOS is to clone the repository and install using `cargo`.
|
||||
Pick the option that matches your platform and appetite for source builds:
|
||||
|
||||
| Platform | Package / Command | Notes |
|
||||
| --- | --- | --- |
|
||||
| Arch Linux | `yay -S owlen-git` | Builds from the latest `dev` branch via AUR. |
|
||||
| Other Linux | `cargo install --path crates/owlen-cli --locked --force` | Requires Rust 1.75+ and a running Ollama daemon. |
|
||||
| macOS | `cargo install --path crates/owlen-cli --locked --force` | macOS 12+ tested. Install Ollama separately (`brew install ollama`). The binary links against the system OpenSSL – ensure Command Line Tools are installed. |
|
||||
| Windows (experimental) | `cargo install --path crates/owlen-cli --locked --force` | Enable the GNU toolchain (`rustup target add x86_64-pc-windows-gnu`) and install Ollama for Windows preview builds. Some optional tools (e.g., Docker-based code execution) are currently disabled. |
|
||||
|
||||
If you prefer containerised builds, use the provided `Dockerfile` as a base image and copy out `target/release/owlen`.
|
||||
|
||||
Run the helper scripts to sanity-check platform coverage:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Owlibou/owlen.git
|
||||
cd owlen
|
||||
cargo install --path crates/owlen-cli
|
||||
```
|
||||
**Note for macOS**: While this method works, official binary releases for macOS are planned for the future.
|
||||
# Windows compatibility smoke test (GNU toolchain)
|
||||
scripts/check-windows.sh
|
||||
|
||||
#### Windows
|
||||
The Windows build has not been thoroughly tested yet. Installation is possible via the same `cargo install` method, but it is considered experimental at this time.
|
||||
# Reproduce CI packaging locally (choose a target from .woodpecker.yml)
|
||||
dev/local_build.sh x86_64-unknown-linux-gnu
|
||||
```
|
||||
|
||||
> **Tip (macOS):** On the first launch macOS Gatekeeper may quarantine the binary. Clear the attribute (`xattr -d com.apple.quarantine $(which owlen)`) or build from source locally to avoid notarisation prompts.
|
||||
|
||||
### Running OWLEN
|
||||
|
||||
@@ -66,13 +91,26 @@ If you built from source without installing, you can run it with:
|
||||
./target/release/owlen
|
||||
```
|
||||
|
||||
### Updating
|
||||
|
||||
Owlen does not auto-update. Run `owlen upgrade` at any time to print the recommended manual steps (pull the repository and reinstall with `cargo install --path crates/owlen-cli --force`). Arch Linux users can update via the `owlen-git` AUR package.
|
||||
|
||||
## Using the TUI
|
||||
|
||||
OWLEN uses a modal, vim-inspired interface. Press `?` in Normal mode to view the help screen with all keybindings.
|
||||
OWLEN uses a modal, vim-inspired interface. Press `F1` (available from any mode) or `?` in Normal mode to view the help screen with all keybindings.
|
||||
|
||||
- **Normal Mode**: Navigate with `h/j/k/l`, `w/b`, `gg/G`.
|
||||
- **Editing Mode**: Enter with `i` or `a`. Send messages with `Enter`.
|
||||
- **Command Mode**: Enter with `:`. Access commands like `:quit`, `:save`, `:theme`.
|
||||
- **Command Mode**: Enter with `:`. Access commands like `:quit`, `:w`, `:session save`, `:theme`.
|
||||
- **Quick Exit**: Press `Ctrl+C` twice in Normal mode to quit quickly (first press still cancels active generations).
|
||||
- **Tutorial Command**: Type `:tutorial` any time for a quick summary of the most important keybindings.
|
||||
- **MCP Slash Commands**: Owlen auto-registers zero-argument MCP tools as slash commands—type `/mcp__github__list_prs` (for example) to pull remote context directly into the chat log.
|
||||
|
||||
Model discovery commands worth remembering:
|
||||
|
||||
- `:models --local` or `:models --cloud` jump directly to the corresponding section in the picker.
|
||||
- `:cloud setup [--force-cloud-base-url]` stores your cloud API key without clobbering an existing local base URL (unless you opt in with the flag).
|
||||
When a catalogue is unreachable, Owlen now tags the picker with `Local unavailable` / `Cloud unavailable` so you can recover without guessing.
|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -82,17 +120,47 @@ For more detailed information, please refer to the following documents:
|
||||
- **[CHANGELOG.md](CHANGELOG.md)**: A log of changes for each version.
|
||||
- **[docs/architecture.md](docs/architecture.md)**: An overview of the project's architecture.
|
||||
- **[docs/troubleshooting.md](docs/troubleshooting.md)**: Help with common issues.
|
||||
- **[docs/provider-implementation.md](docs/provider-implementation.md)**: A guide for adding new providers.
|
||||
- **[docs/repo-map.md](docs/repo-map.md)**: Snapshot of the workspace layout and key crates.
|
||||
- **[docs/provider-implementation.md](docs/provider-implementation.md)**: Trait-level details for implementing providers.
|
||||
- **[docs/adding-providers.md](docs/adding-providers.md)**: Step-by-step checklist for wiring a provider into the multi-provider architecture and test suite.
|
||||
- **Experimental providers staging area**: [crates/providers/experimental/README.md](crates/providers/experimental/README.md) records the placeholder crates (OpenAI, Anthropic, Gemini) and their current status.
|
||||
- **[docs/platform-support.md](docs/platform-support.md)**: Current OS support matrix and cross-check instructions.
|
||||
|
||||
## Configuration
|
||||
|
||||
OWLEN stores its configuration in `~/.config/owlen/config.toml`. This file is created on the first run and can be customized. You can also add custom themes in `~/.config/owlen/themes/`.
|
||||
OWLEN stores its configuration in the standard platform-specific config directory:
|
||||
|
||||
| Platform | Location |
|
||||
|----------|----------|
|
||||
| Linux | `~/.config/owlen/config.toml` |
|
||||
| macOS | `~/Library/Application Support/owlen/config.toml` |
|
||||
| Windows | `%APPDATA%\owlen\config.toml` |
|
||||
|
||||
Use `owlen config path` to print the exact location on your machine and `owlen config doctor` to migrate a legacy config automatically.
|
||||
You can also add custom themes alongside the config directory (e.g., `~/.config/owlen/themes/`).
|
||||
|
||||
See the [themes/README.md](themes/README.md) for more details on theming.
|
||||
|
||||
## Testing
|
||||
|
||||
Owlen uses standard Rust tooling for verification. Run the full test suite with:
|
||||
|
||||
```bash
|
||||
cargo test
|
||||
```
|
||||
|
||||
Unit tests cover the command palette state machine, agent response parsing, and key MCP abstractions. Formatting and lint checks can be run with `cargo fmt --all` and `cargo clippy` respectively.
|
||||
|
||||
## Roadmap
|
||||
|
||||
We are actively working on enhancing the code client, adding more providers (OpenAI, Anthropic), and improving the overall user experience. See the [Roadmap section in the old README](https://github.com/Owlibou/owlen/blob/main/README.md?plain=1#L295) for more details.
|
||||
Upcoming milestones focus on feature parity with modern code assistants while keeping Owlen local-first:
|
||||
|
||||
1. **Phase 11 – MCP client enhancements**: `owlen mcp add/list/remove`, resource references (`@github:issue://123`), and MCP prompt slash commands.
|
||||
2. **Phase 12 – Approval & sandboxing**: Three-tier approval modes plus platform-specific sandboxes (Docker, `sandbox-exec`, Windows job objects).
|
||||
3. **Phase 13 – Project documentation system**: Automatic `OWLEN.md` generation, contextual updates, and nested project support.
|
||||
4. **Phase 15 – Provider expansion**: OpenAI, Anthropic, and other cloud providers layered onto the existing Ollama-first architecture.
|
||||
|
||||
See `AGENTS.md` for the long-form roadmap and design notes.
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -101,3 +169,4 @@ Contributions are highly welcome! Please see our **[Contributing Guide](CONTRIBU
|
||||
## License
|
||||
|
||||
This project is licensed under the GNU Affero General Public License v3.0. See the [LICENSE](LICENSE) file for details.
|
||||
For commercial or proprietary integrations that cannot adopt AGPL, please reach out to the maintainers to discuss alternative licensing arrangements.
|
||||
|
||||
21
SECURITY.md
21
SECURITY.md
@@ -17,3 +17,24 @@ To report a security vulnerability, please email the project lead at [security@o
|
||||
You will receive a response from us within 48 hours. If the issue is confirmed, we will release a patch as soon as possible, depending on the complexity of the issue.
|
||||
|
||||
Please do not report security vulnerabilities through public GitHub issues.
|
||||
|
||||
## Design Overview
|
||||
|
||||
Owlen ships with a local-first architecture:
|
||||
|
||||
- **Process isolation** – The TUI speaks to language models through a separate MCP LLM server. Tool execution (code, web, filesystem) occurs in dedicated MCP processes so a crash or hang cannot take down the UI.
|
||||
- **Sandboxing** – The MCP Code Server executes snippets in Docker containers. Upcoming releases will extend this to platform sandboxes (`sandbox-exec` on macOS, Windows job objects) as described in our roadmap.
|
||||
- **Network posture** – No telemetry is emitted. The application only reaches the network when a user explicitly enables remote tools (web search, remote MCP servers) or configures cloud providers. All tools require allow-listing in `config.toml`.
|
||||
|
||||
## Data Handling
|
||||
|
||||
- **Sessions** – Conversations are stored in the user’s data directory (`~/.local/share/owlen` on Linux, equivalent paths on macOS/Windows). Enable `privacy.encrypt_local_data = true` to wrap the session store in AES-GCM encryption protected by a passphrase (`OWLEN_MASTER_PASSWORD` or an interactive prompt).
|
||||
- **Credentials** – API tokens are resolved from the config file or environment variables at runtime and are never written to logs.
|
||||
- **Remote calls** – When remote search or cloud LLM tooling is on, only the minimum payload (prompt, tool arguments) is sent. All outbound requests go through the MCP servers so they can be audited or disabled centrally.
|
||||
|
||||
## Supply-Chain Safeguards
|
||||
|
||||
- The repository includes a git `pre-commit` configuration that runs `cargo fmt`, `cargo check`, and `cargo clippy -- -D warnings` on every commit.
|
||||
- Pull requests generated with the assistance of AI tooling must receive manual maintainer review before merging. Contributors are asked to declare AI involvement in their PR description so maintainers can double-check the changes.
|
||||
|
||||
Additional recommendations for operators (e.g., running Owlen on shared systems) are maintained in `docs/security.md` (planned) and the issue tracker.
|
||||
|
||||
29
config.toml
Normal file
29
config.toml
Normal file
@@ -0,0 +1,29 @@
|
||||
[general]
|
||||
default_provider = "ollama_local"
|
||||
default_model = "llama3.2:latest"
|
||||
|
||||
[privacy]
|
||||
encrypt_local_data = true
|
||||
|
||||
[providers.ollama_local]
|
||||
enabled = true
|
||||
provider_type = "ollama"
|
||||
base_url = "http://localhost:11434"
|
||||
|
||||
[providers.ollama_cloud]
|
||||
enabled = false
|
||||
provider_type = "ollama_cloud"
|
||||
base_url = "https://ollama.com"
|
||||
api_key_env = "OLLAMA_CLOUD_API_KEY"
|
||||
|
||||
[providers.openai]
|
||||
enabled = false
|
||||
provider_type = "openai"
|
||||
base_url = "https://api.openai.com/v1"
|
||||
api_key_env = "OPENAI_API_KEY"
|
||||
|
||||
[providers.anthropic]
|
||||
enabled = false
|
||||
provider_type = "anthropic"
|
||||
base_url = "https://api.anthropic.com/v1"
|
||||
api_key_env = "ANTHROPIC_API_KEY"
|
||||
12
crates/mcp/client/Cargo.toml
Normal file
12
crates/mcp/client/Cargo.toml
Normal file
@@ -0,0 +1,12 @@
|
||||
[package]
|
||||
name = "owlen-mcp-client"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
description = "Dedicated MCP client library for Owlen, exposing remote MCP server communication"
|
||||
license = "AGPL-3.0"
|
||||
|
||||
[dependencies]
|
||||
owlen-core = { path = "../../owlen-core" }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
17
crates/mcp/client/src/lib.rs
Normal file
17
crates/mcp/client/src/lib.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
//! Owlen MCP client library.
|
||||
//!
|
||||
//! This crate provides a thin façade over the remote MCP client implementation
|
||||
//! inside `owlen-core`. It re‑exports the most useful types so downstream
|
||||
//! crates can depend only on `owlen-mcp-client` without pulling in the entire
|
||||
//! core crate internals.
|
||||
|
||||
pub use owlen_core::config::{McpConfigScope, ScopedMcpServer};
|
||||
pub use owlen_core::mcp::remote_client::RemoteMcpClient;
|
||||
pub use owlen_core::mcp::{McpClient, McpToolCall, McpToolDescriptor, McpToolResponse};
|
||||
|
||||
// Re‑export the core Provider trait so that the MCP client can also be used as an LLM provider.
|
||||
pub use owlen_core::Provider as McpProvider;
|
||||
|
||||
// Note: The `RemoteMcpClient` type provides its own `new` constructor in the core
|
||||
// crate. Users can call `RemoteMcpClient::new()` directly. No additional wrapper
|
||||
// is needed here.
|
||||
22
crates/mcp/code-server/Cargo.toml
Normal file
22
crates/mcp/code-server/Cargo.toml
Normal file
@@ -0,0 +1,22 @@
|
||||
[package]
|
||||
name = "owlen-mcp-code-server"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
description = "MCP server exposing safe code execution tools for Owlen"
|
||||
license = "AGPL-3.0"
|
||||
|
||||
[dependencies]
|
||||
owlen-core = { path = "../../owlen-core" }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
bollard = "0.17"
|
||||
tempfile = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
|
||||
[lib]
|
||||
name = "owlen_mcp_code_server"
|
||||
path = "src/lib.rs"
|
||||
186
crates/mcp/code-server/src/lib.rs
Normal file
186
crates/mcp/code-server/src/lib.rs
Normal file
@@ -0,0 +1,186 @@
|
||||
//! MCP server exposing code execution tools with Docker sandboxing.
|
||||
//!
|
||||
//! This server provides:
|
||||
//! - compile_project: Build projects (Rust, Node.js, Python)
|
||||
//! - run_tests: Execute test suites
|
||||
//! - format_code: Run code formatters
|
||||
//! - lint_code: Run linters
|
||||
|
||||
pub mod sandbox;
|
||||
pub mod tools;
|
||||
|
||||
use owlen_core::mcp::protocol::{
|
||||
ErrorCode, InitializeParams, InitializeResult, PROTOCOL_VERSION, RequestId, RpcError,
|
||||
RpcErrorResponse, RpcRequest, RpcResponse, ServerCapabilities, ServerInfo, methods,
|
||||
};
|
||||
use owlen_core::tools::{Tool, ToolResult};
|
||||
use serde_json::{Value, json};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::io::{self, AsyncBufReadExt, AsyncWriteExt};
|
||||
|
||||
use tools::{CompileProjectTool, FormatCodeTool, LintCodeTool, RunTestsTool};
|
||||
|
||||
/// Tool registry for the code server
|
||||
#[allow(dead_code)]
|
||||
struct ToolRegistry {
|
||||
tools: HashMap<String, Box<dyn Tool + Send + Sync>>,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl ToolRegistry {
|
||||
fn new() -> Self {
|
||||
let mut tools: HashMap<String, Box<dyn Tool + Send + Sync>> = HashMap::new();
|
||||
tools.insert(
|
||||
"compile_project".to_string(),
|
||||
Box::new(CompileProjectTool::new()),
|
||||
);
|
||||
tools.insert("run_tests".to_string(), Box::new(RunTestsTool::new()));
|
||||
tools.insert("format_code".to_string(), Box::new(FormatCodeTool::new()));
|
||||
tools.insert("lint_code".to_string(), Box::new(LintCodeTool::new()));
|
||||
Self { tools }
|
||||
}
|
||||
|
||||
fn list_tools(&self) -> Vec<owlen_core::mcp::McpToolDescriptor> {
|
||||
self.tools
|
||||
.values()
|
||||
.map(|tool| owlen_core::mcp::McpToolDescriptor {
|
||||
name: tool.name().to_string(),
|
||||
description: tool.description().to_string(),
|
||||
input_schema: tool.schema(),
|
||||
requires_network: tool.requires_network(),
|
||||
requires_filesystem: tool.requires_filesystem(),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
async fn execute(&self, name: &str, args: Value) -> Result<ToolResult, String> {
|
||||
self.tools
|
||||
.get(name)
|
||||
.ok_or_else(|| format!("Tool not found: {}", name))?
|
||||
.execute(args)
|
||||
.await
|
||||
.map_err(|e| e.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let mut stdin = io::BufReader::new(io::stdin());
|
||||
let mut stdout = io::stdout();
|
||||
|
||||
let registry = Arc::new(ToolRegistry::new());
|
||||
|
||||
loop {
|
||||
let mut line = String::new();
|
||||
match stdin.read_line(&mut line).await {
|
||||
Ok(0) => break, // EOF
|
||||
Ok(_) => {
|
||||
let req: RpcRequest = match serde_json::from_str(&line) {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
let err = RpcErrorResponse::new(
|
||||
RequestId::Number(0),
|
||||
RpcError::parse_error(format!("Parse error: {}", e)),
|
||||
);
|
||||
let s = serde_json::to_string(&err)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let resp = handle_request(req.clone(), registry.clone()).await;
|
||||
match resp {
|
||||
Ok(r) => {
|
||||
let s = serde_json::to_string(&r)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
}
|
||||
Err(e) => {
|
||||
let err = RpcErrorResponse::new(req.id.clone(), e);
|
||||
let s = serde_json::to_string(&err)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Error reading stdin: {}", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
async fn handle_request(
|
||||
req: RpcRequest,
|
||||
registry: Arc<ToolRegistry>,
|
||||
) -> Result<RpcResponse, RpcError> {
|
||||
match req.method.as_str() {
|
||||
methods::INITIALIZE => {
|
||||
let params: InitializeParams =
|
||||
serde_json::from_value(req.params.unwrap_or_else(|| json!({})))
|
||||
.map_err(|e| RpcError::invalid_params(format!("Invalid init params: {}", e)))?;
|
||||
if !params.protocol_version.eq(PROTOCOL_VERSION) {
|
||||
return Err(RpcError::new(
|
||||
ErrorCode::INVALID_REQUEST,
|
||||
format!(
|
||||
"Incompatible protocol version. Client: {}, Server: {}",
|
||||
params.protocol_version, PROTOCOL_VERSION
|
||||
),
|
||||
));
|
||||
}
|
||||
let result = InitializeResult {
|
||||
protocol_version: PROTOCOL_VERSION.to_string(),
|
||||
server_info: ServerInfo {
|
||||
name: "owlen-mcp-code-server".to_string(),
|
||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
},
|
||||
capabilities: ServerCapabilities {
|
||||
supports_tools: Some(true),
|
||||
supports_resources: Some(false),
|
||||
supports_streaming: Some(false),
|
||||
},
|
||||
};
|
||||
let payload = serde_json::to_value(result).map_err(|e| {
|
||||
RpcError::internal_error(format!("Failed to serialize initialize result: {}", e))
|
||||
})?;
|
||||
Ok(RpcResponse::new(req.id, payload))
|
||||
}
|
||||
methods::TOOLS_LIST => {
|
||||
let tools = registry.list_tools();
|
||||
Ok(RpcResponse::new(req.id, json!(tools)))
|
||||
}
|
||||
methods::TOOLS_CALL => {
|
||||
let call = serde_json::from_value::<owlen_core::mcp::McpToolCall>(
|
||||
req.params.unwrap_or_else(|| json!({})),
|
||||
)
|
||||
.map_err(|e| RpcError::invalid_params(format!("Invalid tool call: {}", e)))?;
|
||||
|
||||
let result: ToolResult = registry
|
||||
.execute(&call.name, call.arguments)
|
||||
.await
|
||||
.map_err(|e| RpcError::internal_error(format!("Tool execution failed: {}", e)))?;
|
||||
|
||||
let resp = owlen_core::mcp::McpToolResponse {
|
||||
name: call.name,
|
||||
success: result.success,
|
||||
output: result.output,
|
||||
metadata: result.metadata,
|
||||
duration_ms: result.duration.as_millis() as u128,
|
||||
};
|
||||
let payload = serde_json::to_value(resp).map_err(|e| {
|
||||
RpcError::internal_error(format!("Failed to serialize tool response: {}", e))
|
||||
})?;
|
||||
Ok(RpcResponse::new(req.id, payload))
|
||||
}
|
||||
_ => Err(RpcError::method_not_found(&req.method)),
|
||||
}
|
||||
}
|
||||
250
crates/mcp/code-server/src/sandbox.rs
Normal file
250
crates/mcp/code-server/src/sandbox.rs
Normal file
@@ -0,0 +1,250 @@
|
||||
//! Docker-based sandboxing for secure code execution
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use bollard::Docker;
|
||||
use bollard::container::{
|
||||
Config, CreateContainerOptions, RemoveContainerOptions, StartContainerOptions,
|
||||
WaitContainerOptions,
|
||||
};
|
||||
use bollard::models::{HostConfig, Mount, MountTypeEnum};
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
/// Result of executing code in a sandbox
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ExecutionResult {
|
||||
pub stdout: String,
|
||||
pub stderr: String,
|
||||
pub exit_code: i64,
|
||||
pub timed_out: bool,
|
||||
}
|
||||
|
||||
/// Docker-based sandbox executor
|
||||
pub struct Sandbox {
|
||||
docker: Docker,
|
||||
memory_limit: i64,
|
||||
cpu_quota: i64,
|
||||
timeout_secs: u64,
|
||||
}
|
||||
|
||||
impl Sandbox {
|
||||
/// Create a new sandbox with default resource limits
|
||||
pub fn new() -> Result<Self> {
|
||||
let docker =
|
||||
Docker::connect_with_local_defaults().context("Failed to connect to Docker daemon")?;
|
||||
|
||||
Ok(Self {
|
||||
docker,
|
||||
memory_limit: 512 * 1024 * 1024, // 512MB
|
||||
cpu_quota: 50000, // 50% of one core
|
||||
timeout_secs: 30,
|
||||
})
|
||||
}
|
||||
|
||||
/// Execute a command in a sandboxed container
|
||||
pub async fn execute(
|
||||
&self,
|
||||
image: &str,
|
||||
cmd: &[&str],
|
||||
workspace: Option<&Path>,
|
||||
env: HashMap<String, String>,
|
||||
) -> Result<ExecutionResult> {
|
||||
let container_name = format!("owlen-sandbox-{}", uuid::Uuid::new_v4());
|
||||
|
||||
// Prepare volume mount if workspace provided
|
||||
let mounts = if let Some(ws) = workspace {
|
||||
vec![Mount {
|
||||
target: Some("/workspace".to_string()),
|
||||
source: Some(ws.to_string_lossy().to_string()),
|
||||
typ: Some(MountTypeEnum::BIND),
|
||||
read_only: Some(false),
|
||||
..Default::default()
|
||||
}]
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
// Create container config
|
||||
let host_config = HostConfig {
|
||||
memory: Some(self.memory_limit),
|
||||
cpu_quota: Some(self.cpu_quota),
|
||||
network_mode: Some("none".to_string()), // No network access
|
||||
mounts: Some(mounts),
|
||||
auto_remove: Some(true),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let config = Config {
|
||||
image: Some(image.to_string()),
|
||||
cmd: Some(cmd.iter().map(|s| s.to_string()).collect()),
|
||||
working_dir: Some("/workspace".to_string()),
|
||||
env: Some(env.iter().map(|(k, v)| format!("{}={}", k, v)).collect()),
|
||||
host_config: Some(host_config),
|
||||
attach_stdout: Some(true),
|
||||
attach_stderr: Some(true),
|
||||
tty: Some(false),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Create container
|
||||
let container = self
|
||||
.docker
|
||||
.create_container(
|
||||
Some(CreateContainerOptions {
|
||||
name: container_name.clone(),
|
||||
..Default::default()
|
||||
}),
|
||||
config,
|
||||
)
|
||||
.await
|
||||
.context("Failed to create container")?;
|
||||
|
||||
// Start container
|
||||
self.docker
|
||||
.start_container(&container.id, None::<StartContainerOptions<String>>)
|
||||
.await
|
||||
.context("Failed to start container")?;
|
||||
|
||||
// Wait for container with timeout
|
||||
let wait_result =
|
||||
tokio::time::timeout(std::time::Duration::from_secs(self.timeout_secs), async {
|
||||
let mut wait_stream = self
|
||||
.docker
|
||||
.wait_container(&container.id, None::<WaitContainerOptions<String>>);
|
||||
|
||||
use futures::StreamExt;
|
||||
if let Some(result) = wait_stream.next().await {
|
||||
result
|
||||
} else {
|
||||
Err(bollard::errors::Error::IOError {
|
||||
err: std::io::Error::other("Container wait stream ended unexpectedly"),
|
||||
})
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
let (exit_code, timed_out) = match wait_result {
|
||||
Ok(Ok(result)) => (result.status_code, false),
|
||||
Ok(Err(e)) => {
|
||||
eprintln!("Container wait error: {}", e);
|
||||
(1, false)
|
||||
}
|
||||
Err(_) => {
|
||||
// Timeout - kill the container
|
||||
let _ = self
|
||||
.docker
|
||||
.kill_container(
|
||||
&container.id,
|
||||
None::<bollard::container::KillContainerOptions<String>>,
|
||||
)
|
||||
.await;
|
||||
(124, true)
|
||||
}
|
||||
};
|
||||
|
||||
// Get logs
|
||||
let logs = self.docker.logs(
|
||||
&container.id,
|
||||
Some(bollard::container::LogsOptions::<String> {
|
||||
stdout: true,
|
||||
stderr: true,
|
||||
..Default::default()
|
||||
}),
|
||||
);
|
||||
|
||||
use futures::StreamExt;
|
||||
let mut stdout = String::new();
|
||||
let mut stderr = String::new();
|
||||
|
||||
let log_result = tokio::time::timeout(std::time::Duration::from_secs(5), async {
|
||||
let mut logs = logs;
|
||||
while let Some(log) = logs.next().await {
|
||||
match log {
|
||||
Ok(bollard::container::LogOutput::StdOut { message }) => {
|
||||
stdout.push_str(&String::from_utf8_lossy(&message));
|
||||
}
|
||||
Ok(bollard::container::LogOutput::StdErr { message }) => {
|
||||
stderr.push_str(&String::from_utf8_lossy(&message));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
if log_result.is_err() {
|
||||
eprintln!("Timeout reading container logs");
|
||||
}
|
||||
|
||||
// Remove container (auto_remove should handle this, but be explicit)
|
||||
let _ = self
|
||||
.docker
|
||||
.remove_container(
|
||||
&container.id,
|
||||
Some(RemoveContainerOptions {
|
||||
force: true,
|
||||
..Default::default()
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(ExecutionResult {
|
||||
stdout,
|
||||
stderr,
|
||||
exit_code,
|
||||
timed_out,
|
||||
})
|
||||
}
|
||||
|
||||
/// Execute in a Rust environment
|
||||
pub async fn execute_rust(&self, workspace: &Path, cmd: &[&str]) -> Result<ExecutionResult> {
|
||||
self.execute("rust:1.75-slim", cmd, Some(workspace), HashMap::new())
|
||||
.await
|
||||
}
|
||||
|
||||
/// Execute in a Python environment
|
||||
pub async fn execute_python(&self, workspace: &Path, cmd: &[&str]) -> Result<ExecutionResult> {
|
||||
self.execute("python:3.11-slim", cmd, Some(workspace), HashMap::new())
|
||||
.await
|
||||
}
|
||||
|
||||
/// Execute in a Node.js environment
|
||||
pub async fn execute_node(&self, workspace: &Path, cmd: &[&str]) -> Result<ExecutionResult> {
|
||||
self.execute("node:20-slim", cmd, Some(workspace), HashMap::new())
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Sandbox {
|
||||
fn default() -> Self {
|
||||
Self::new().expect("Failed to create default sandbox")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore] // Requires Docker daemon
|
||||
async fn test_sandbox_rust_compile() {
|
||||
let sandbox = Sandbox::new().unwrap();
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
|
||||
// Create a simple Rust project
|
||||
std::fs::write(
|
||||
temp_dir.path().join("main.rs"),
|
||||
"fn main() { println!(\"Hello from sandbox!\"); }",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let result = sandbox
|
||||
.execute_rust(temp_dir.path(), &["rustc", "main.rs"])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result.exit_code, 0);
|
||||
assert!(!result.timed_out);
|
||||
}
|
||||
}
|
||||
417
crates/mcp/code-server/src/tools.rs
Normal file
417
crates/mcp/code-server/src/tools.rs
Normal file
@@ -0,0 +1,417 @@
|
||||
//! Code execution tools using Docker sandboxing
|
||||
|
||||
use crate::sandbox::Sandbox;
|
||||
use async_trait::async_trait;
|
||||
use owlen_core::Result;
|
||||
use owlen_core::tools::{Tool, ToolResult};
|
||||
use serde_json::{Value, json};
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Tool for compiling projects (Rust, Node.js, Python)
|
||||
pub struct CompileProjectTool {
|
||||
sandbox: Sandbox,
|
||||
}
|
||||
|
||||
impl Default for CompileProjectTool {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl CompileProjectTool {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
sandbox: Sandbox::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for CompileProjectTool {
|
||||
fn name(&self) -> &'static str {
|
||||
"compile_project"
|
||||
}
|
||||
|
||||
fn description(&self) -> &'static str {
|
||||
"Compile a project (Rust, Node.js, Python). Detects project type automatically."
|
||||
}
|
||||
|
||||
fn schema(&self) -> Value {
|
||||
json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"project_path": {
|
||||
"type": "string",
|
||||
"description": "Path to the project root"
|
||||
},
|
||||
"project_type": {
|
||||
"type": "string",
|
||||
"enum": ["rust", "node", "python"],
|
||||
"description": "Project type (auto-detected if not specified)"
|
||||
}
|
||||
},
|
||||
"required": ["project_path"]
|
||||
})
|
||||
}
|
||||
|
||||
async fn execute(&self, args: Value) -> Result<ToolResult> {
|
||||
let project_path = args
|
||||
.get("project_path")
|
||||
.and_then(|v| v.as_str())
|
||||
.ok_or_else(|| owlen_core::Error::InvalidInput("Missing project_path".into()))?;
|
||||
|
||||
let path = PathBuf::from(project_path);
|
||||
if !path.exists() {
|
||||
return Ok(ToolResult::error("Project path does not exist"));
|
||||
}
|
||||
|
||||
// Detect project type
|
||||
let project_type = if let Some(pt) = args.get("project_type").and_then(|v| v.as_str()) {
|
||||
pt.to_string()
|
||||
} else if path.join("Cargo.toml").exists() {
|
||||
"rust".to_string()
|
||||
} else if path.join("package.json").exists() {
|
||||
"node".to_string()
|
||||
} else if path.join("setup.py").exists() || path.join("pyproject.toml").exists() {
|
||||
"python".to_string()
|
||||
} else {
|
||||
return Ok(ToolResult::error("Could not detect project type"));
|
||||
};
|
||||
|
||||
// Execute compilation
|
||||
let result = match project_type.as_str() {
|
||||
"rust" => self.sandbox.execute_rust(&path, &["cargo", "build"]).await,
|
||||
"node" => {
|
||||
self.sandbox
|
||||
.execute_node(&path, &["npm", "run", "build"])
|
||||
.await
|
||||
}
|
||||
"python" => {
|
||||
// Python typically doesn't need compilation, but we can check syntax
|
||||
self.sandbox
|
||||
.execute_python(&path, &["python", "-m", "compileall", "."])
|
||||
.await
|
||||
}
|
||||
_ => return Ok(ToolResult::error("Unsupported project type")),
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(exec_result) => {
|
||||
if exec_result.timed_out {
|
||||
Ok(ToolResult::error("Compilation timed out"))
|
||||
} else if exec_result.exit_code == 0 {
|
||||
Ok(ToolResult::success(json!({
|
||||
"success": true,
|
||||
"stdout": exec_result.stdout,
|
||||
"stderr": exec_result.stderr,
|
||||
"project_type": project_type
|
||||
})))
|
||||
} else {
|
||||
Ok(ToolResult::success(json!({
|
||||
"success": false,
|
||||
"exit_code": exec_result.exit_code,
|
||||
"stdout": exec_result.stdout,
|
||||
"stderr": exec_result.stderr,
|
||||
"project_type": project_type
|
||||
})))
|
||||
}
|
||||
}
|
||||
Err(e) => Ok(ToolResult::error(&format!("Compilation failed: {}", e))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Tool for running test suites
|
||||
pub struct RunTestsTool {
|
||||
sandbox: Sandbox,
|
||||
}
|
||||
|
||||
impl Default for RunTestsTool {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl RunTestsTool {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
sandbox: Sandbox::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for RunTestsTool {
|
||||
fn name(&self) -> &'static str {
|
||||
"run_tests"
|
||||
}
|
||||
|
||||
fn description(&self) -> &'static str {
|
||||
"Run tests for a project (Rust, Node.js, Python)"
|
||||
}
|
||||
|
||||
fn schema(&self) -> Value {
|
||||
json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"project_path": {
|
||||
"type": "string",
|
||||
"description": "Path to the project root"
|
||||
},
|
||||
"test_filter": {
|
||||
"type": "string",
|
||||
"description": "Optional test filter/pattern"
|
||||
}
|
||||
},
|
||||
"required": ["project_path"]
|
||||
})
|
||||
}
|
||||
|
||||
async fn execute(&self, args: Value) -> Result<ToolResult> {
|
||||
let project_path = args
|
||||
.get("project_path")
|
||||
.and_then(|v| v.as_str())
|
||||
.ok_or_else(|| owlen_core::Error::InvalidInput("Missing project_path".into()))?;
|
||||
|
||||
let path = PathBuf::from(project_path);
|
||||
if !path.exists() {
|
||||
return Ok(ToolResult::error("Project path does not exist"));
|
||||
}
|
||||
|
||||
let test_filter = args.get("test_filter").and_then(|v| v.as_str());
|
||||
|
||||
// Detect project type and run tests
|
||||
let result = if path.join("Cargo.toml").exists() {
|
||||
let cmd = if let Some(filter) = test_filter {
|
||||
vec!["cargo", "test", filter]
|
||||
} else {
|
||||
vec!["cargo", "test"]
|
||||
};
|
||||
self.sandbox.execute_rust(&path, &cmd).await
|
||||
} else if path.join("package.json").exists() {
|
||||
self.sandbox.execute_node(&path, &["npm", "test"]).await
|
||||
} else if path.join("pytest.ini").exists()
|
||||
|| path.join("setup.py").exists()
|
||||
|| path.join("pyproject.toml").exists()
|
||||
{
|
||||
let cmd = if let Some(filter) = test_filter {
|
||||
vec!["pytest", "-k", filter]
|
||||
} else {
|
||||
vec!["pytest"]
|
||||
};
|
||||
self.sandbox.execute_python(&path, &cmd).await
|
||||
} else {
|
||||
return Ok(ToolResult::error("Could not detect test framework"));
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(exec_result) => Ok(ToolResult::success(json!({
|
||||
"success": exec_result.exit_code == 0 && !exec_result.timed_out,
|
||||
"exit_code": exec_result.exit_code,
|
||||
"stdout": exec_result.stdout,
|
||||
"stderr": exec_result.stderr,
|
||||
"timed_out": exec_result.timed_out
|
||||
}))),
|
||||
Err(e) => Ok(ToolResult::error(&format!("Tests failed to run: {}", e))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Tool for formatting code
|
||||
pub struct FormatCodeTool {
|
||||
sandbox: Sandbox,
|
||||
}
|
||||
|
||||
impl Default for FormatCodeTool {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl FormatCodeTool {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
sandbox: Sandbox::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for FormatCodeTool {
|
||||
fn name(&self) -> &'static str {
|
||||
"format_code"
|
||||
}
|
||||
|
||||
fn description(&self) -> &'static str {
|
||||
"Format code using project-appropriate formatter (rustfmt, prettier, black)"
|
||||
}
|
||||
|
||||
fn schema(&self) -> Value {
|
||||
json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"project_path": {
|
||||
"type": "string",
|
||||
"description": "Path to the project root"
|
||||
},
|
||||
"check_only": {
|
||||
"type": "boolean",
|
||||
"description": "Only check formatting without modifying files",
|
||||
"default": false
|
||||
}
|
||||
},
|
||||
"required": ["project_path"]
|
||||
})
|
||||
}
|
||||
|
||||
async fn execute(&self, args: Value) -> Result<ToolResult> {
|
||||
let project_path = args
|
||||
.get("project_path")
|
||||
.and_then(|v| v.as_str())
|
||||
.ok_or_else(|| owlen_core::Error::InvalidInput("Missing project_path".into()))?;
|
||||
|
||||
let path = PathBuf::from(project_path);
|
||||
if !path.exists() {
|
||||
return Ok(ToolResult::error("Project path does not exist"));
|
||||
}
|
||||
|
||||
let check_only = args
|
||||
.get("check_only")
|
||||
.and_then(|v| v.as_bool())
|
||||
.unwrap_or(false);
|
||||
|
||||
// Detect project type and run formatter
|
||||
let result = if path.join("Cargo.toml").exists() {
|
||||
let cmd = if check_only {
|
||||
vec!["cargo", "fmt", "--", "--check"]
|
||||
} else {
|
||||
vec!["cargo", "fmt"]
|
||||
};
|
||||
self.sandbox.execute_rust(&path, &cmd).await
|
||||
} else if path.join("package.json").exists() {
|
||||
let cmd = if check_only {
|
||||
vec!["npx", "prettier", "--check", "."]
|
||||
} else {
|
||||
vec!["npx", "prettier", "--write", "."]
|
||||
};
|
||||
self.sandbox.execute_node(&path, &cmd).await
|
||||
} else if path.join("setup.py").exists() || path.join("pyproject.toml").exists() {
|
||||
let cmd = if check_only {
|
||||
vec!["black", "--check", "."]
|
||||
} else {
|
||||
vec!["black", "."]
|
||||
};
|
||||
self.sandbox.execute_python(&path, &cmd).await
|
||||
} else {
|
||||
return Ok(ToolResult::error("Could not detect project type"));
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(exec_result) => Ok(ToolResult::success(json!({
|
||||
"success": exec_result.exit_code == 0,
|
||||
"formatted": !check_only && exec_result.exit_code == 0,
|
||||
"stdout": exec_result.stdout,
|
||||
"stderr": exec_result.stderr
|
||||
}))),
|
||||
Err(e) => Ok(ToolResult::error(&format!("Formatting failed: {}", e))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Tool for linting code
|
||||
pub struct LintCodeTool {
|
||||
sandbox: Sandbox,
|
||||
}
|
||||
|
||||
impl Default for LintCodeTool {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl LintCodeTool {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
sandbox: Sandbox::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for LintCodeTool {
|
||||
fn name(&self) -> &'static str {
|
||||
"lint_code"
|
||||
}
|
||||
|
||||
fn description(&self) -> &'static str {
|
||||
"Lint code using project-appropriate linter (clippy, eslint, pylint)"
|
||||
}
|
||||
|
||||
fn schema(&self) -> Value {
|
||||
json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"project_path": {
|
||||
"type": "string",
|
||||
"description": "Path to the project root"
|
||||
},
|
||||
"fix": {
|
||||
"type": "boolean",
|
||||
"description": "Automatically fix issues if possible",
|
||||
"default": false
|
||||
}
|
||||
},
|
||||
"required": ["project_path"]
|
||||
})
|
||||
}
|
||||
|
||||
async fn execute(&self, args: Value) -> Result<ToolResult> {
|
||||
let project_path = args
|
||||
.get("project_path")
|
||||
.and_then(|v| v.as_str())
|
||||
.ok_or_else(|| owlen_core::Error::InvalidInput("Missing project_path".into()))?;
|
||||
|
||||
let path = PathBuf::from(project_path);
|
||||
if !path.exists() {
|
||||
return Ok(ToolResult::error("Project path does not exist"));
|
||||
}
|
||||
|
||||
let fix = args.get("fix").and_then(|v| v.as_bool()).unwrap_or(false);
|
||||
|
||||
// Detect project type and run linter
|
||||
let result = if path.join("Cargo.toml").exists() {
|
||||
let cmd = if fix {
|
||||
vec!["cargo", "clippy", "--fix", "--allow-dirty"]
|
||||
} else {
|
||||
vec!["cargo", "clippy"]
|
||||
};
|
||||
self.sandbox.execute_rust(&path, &cmd).await
|
||||
} else if path.join("package.json").exists() {
|
||||
let cmd = if fix {
|
||||
vec!["npx", "eslint", ".", "--fix"]
|
||||
} else {
|
||||
vec!["npx", "eslint", "."]
|
||||
};
|
||||
self.sandbox.execute_node(&path, &cmd).await
|
||||
} else if path.join("setup.py").exists() || path.join("pyproject.toml").exists() {
|
||||
// pylint doesn't have auto-fix
|
||||
self.sandbox.execute_python(&path, &["pylint", "."]).await
|
||||
} else {
|
||||
return Ok(ToolResult::error("Could not detect project type"));
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(exec_result) => {
|
||||
let issues_found = exec_result.exit_code != 0;
|
||||
Ok(ToolResult::success(json!({
|
||||
"success": true,
|
||||
"issues_found": issues_found,
|
||||
"exit_code": exec_result.exit_code,
|
||||
"stdout": exec_result.stdout,
|
||||
"stderr": exec_result.stderr
|
||||
})))
|
||||
}
|
||||
Err(e) => Ok(ToolResult::error(&format!("Linting failed: {}", e))),
|
||||
}
|
||||
}
|
||||
}
|
||||
16
crates/mcp/llm-server/Cargo.toml
Normal file
16
crates/mcp/llm-server/Cargo.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
[package]
|
||||
name = "owlen-mcp-llm-server"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
|
||||
[dependencies]
|
||||
owlen-core = { path = "../../owlen-core" }
|
||||
tokio = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
tokio-stream = { workspace = true }
|
||||
|
||||
[[bin]]
|
||||
name = "owlen-mcp-llm-server"
|
||||
path = "src/main.rs"
|
||||
597
crates/mcp/llm-server/src/main.rs
Normal file
597
crates/mcp/llm-server/src/main.rs
Normal file
@@ -0,0 +1,597 @@
|
||||
#![allow(
|
||||
unused_imports,
|
||||
unused_variables,
|
||||
dead_code,
|
||||
clippy::unnecessary_cast,
|
||||
clippy::manual_flatten,
|
||||
clippy::empty_line_after_outer_attr
|
||||
)]
|
||||
|
||||
use owlen_core::Provider;
|
||||
use owlen_core::ProviderConfig;
|
||||
use owlen_core::config::{Config as OwlenConfig, ensure_provider_config};
|
||||
use owlen_core::mcp::protocol::{
|
||||
ErrorCode, InitializeParams, InitializeResult, PROTOCOL_VERSION, RequestId, RpcError,
|
||||
RpcErrorResponse, RpcNotification, RpcRequest, RpcResponse, ServerCapabilities, ServerInfo,
|
||||
methods,
|
||||
};
|
||||
use owlen_core::mcp::{McpToolCall, McpToolDescriptor, McpToolResponse};
|
||||
use owlen_core::providers::OllamaProvider;
|
||||
use owlen_core::types::{ChatParameters, ChatRequest, Message};
|
||||
use serde::Deserialize;
|
||||
use serde_json::{Value, json};
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::sync::Arc;
|
||||
use tokio::io::{self, AsyncBufReadExt, AsyncWriteExt};
|
||||
use tokio_stream::StreamExt;
|
||||
|
||||
// Suppress warnings are handled by the crate-level attribute at the top.
|
||||
|
||||
/// Arguments for the generate_text tool
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct GenerateTextArgs {
|
||||
messages: Vec<Message>,
|
||||
temperature: Option<f32>,
|
||||
max_tokens: Option<u32>,
|
||||
model: String,
|
||||
stream: bool,
|
||||
}
|
||||
|
||||
/// Simple tool descriptor for generate_text
|
||||
fn generate_text_descriptor() -> McpToolDescriptor {
|
||||
McpToolDescriptor {
|
||||
name: "generate_text".to_string(),
|
||||
description: "Generate text using Ollama LLM. Each message must have 'role' (user/assistant/system) and 'content' (string) fields.".to_string(),
|
||||
input_schema: json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"messages": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"role": {
|
||||
"type": "string",
|
||||
"enum": ["user", "assistant", "system"],
|
||||
"description": "The role of the message sender"
|
||||
},
|
||||
"content": {
|
||||
"type": "string",
|
||||
"description": "The message content"
|
||||
}
|
||||
},
|
||||
"required": ["role", "content"]
|
||||
},
|
||||
"description": "Array of message objects with role and content"
|
||||
},
|
||||
"temperature": {"type": ["number", "null"], "description": "Sampling temperature (0.0-2.0)"},
|
||||
"max_tokens": {"type": ["integer", "null"], "description": "Maximum tokens to generate"},
|
||||
"model": {"type": "string", "description": "Model name (e.g., llama3.2:latest)"},
|
||||
"stream": {"type": "boolean", "description": "Whether to stream the response"}
|
||||
},
|
||||
"required": ["messages", "model", "stream"]
|
||||
}),
|
||||
requires_network: true,
|
||||
requires_filesystem: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
/// Tool descriptor for resources/get (read file)
|
||||
fn resources_get_descriptor() -> McpToolDescriptor {
|
||||
McpToolDescriptor {
|
||||
name: "resources/get".to_string(),
|
||||
description: "Read and return the TEXT CONTENTS of a single FILE. Use this to read the contents of code files, config files, or text documents. Do NOT use for directories.".to_string(),
|
||||
input_schema: json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {"type": "string", "description": "Path to the FILE (not directory) to read"}
|
||||
},
|
||||
"required": ["path"]
|
||||
}),
|
||||
requires_network: false,
|
||||
requires_filesystem: vec!["read".to_string()],
|
||||
}
|
||||
}
|
||||
|
||||
/// Tool descriptor for resources/list (list directory)
|
||||
fn resources_list_descriptor() -> McpToolDescriptor {
|
||||
McpToolDescriptor {
|
||||
name: "resources/list".to_string(),
|
||||
description: "List the NAMES of all files and directories in a directory. Use this to see what files exist in a folder, or to list directory contents. Returns an array of file/directory names.".to_string(),
|
||||
input_schema: json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {"type": "string", "description": "Path to the DIRECTORY to list (use '.' for current directory)"}
|
||||
}
|
||||
}),
|
||||
requires_network: false,
|
||||
requires_filesystem: vec!["read".to_string()],
|
||||
}
|
||||
}
|
||||
|
||||
fn provider_from_config() -> Result<Arc<dyn Provider>, RpcError> {
|
||||
let mut config = OwlenConfig::load(None).unwrap_or_default();
|
||||
let requested_name =
|
||||
env::var("OWLEN_PROVIDER").unwrap_or_else(|_| config.general.default_provider.clone());
|
||||
let provider_key = canonical_provider_name(&requested_name);
|
||||
if config.provider(&provider_key).is_none() {
|
||||
ensure_provider_config(&mut config, &provider_key);
|
||||
}
|
||||
let provider_cfg: ProviderConfig =
|
||||
config.provider(&provider_key).cloned().ok_or_else(|| {
|
||||
RpcError::internal_error(format!(
|
||||
"Provider '{provider_key}' not found in configuration"
|
||||
))
|
||||
})?;
|
||||
|
||||
match provider_cfg.provider_type.as_str() {
|
||||
"ollama" | "ollama_cloud" => {
|
||||
let provider = OllamaProvider::from_config(&provider_cfg, Some(&config.general))
|
||||
.map_err(|e| {
|
||||
RpcError::internal_error(format!(
|
||||
"Failed to init Ollama provider from config: {e}"
|
||||
))
|
||||
})?;
|
||||
Ok(Arc::new(provider) as Arc<dyn Provider>)
|
||||
}
|
||||
other => Err(RpcError::internal_error(format!(
|
||||
"Unsupported provider type '{other}' for MCP LLM server"
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn create_provider() -> Result<Arc<dyn Provider>, RpcError> {
|
||||
if let Ok(url) = env::var("OLLAMA_URL") {
|
||||
let provider = OllamaProvider::new(&url).map_err(|e| {
|
||||
RpcError::internal_error(format!("Failed to init Ollama provider: {e}"))
|
||||
})?;
|
||||
return Ok(Arc::new(provider) as Arc<dyn Provider>);
|
||||
}
|
||||
|
||||
provider_from_config()
|
||||
}
|
||||
|
||||
fn canonical_provider_name(name: &str) -> String {
|
||||
let normalized = name.trim().to_ascii_lowercase().replace('-', "_");
|
||||
match normalized.as_str() {
|
||||
"" => "ollama_local".to_string(),
|
||||
"ollama" | "ollama_local" => "ollama_local".to_string(),
|
||||
"ollama_cloud" => "ollama_cloud".to_string(),
|
||||
other => other.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_generate_text(args: GenerateTextArgs) -> Result<String, RpcError> {
|
||||
let provider = create_provider()?;
|
||||
|
||||
let parameters = ChatParameters {
|
||||
temperature: args.temperature,
|
||||
max_tokens: args.max_tokens.map(|v| v as u32),
|
||||
stream: args.stream,
|
||||
extra: HashMap::new(),
|
||||
};
|
||||
|
||||
let request = ChatRequest {
|
||||
model: args.model,
|
||||
messages: args.messages,
|
||||
parameters,
|
||||
tools: None,
|
||||
};
|
||||
|
||||
// Use streaming API and collect output
|
||||
let mut stream = provider
|
||||
.stream_prompt(request)
|
||||
.await
|
||||
.map_err(|e| RpcError::internal_error(format!("Chat request failed: {}", e)))?;
|
||||
let mut content = String::new();
|
||||
while let Some(chunk) = stream.next().await {
|
||||
match chunk {
|
||||
Ok(resp) => {
|
||||
content.push_str(&resp.message.content);
|
||||
if resp.is_final {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(RpcError::internal_error(format!("Stream error: {}", e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(content)
|
||||
}
|
||||
|
||||
async fn handle_request(req: &RpcRequest) -> Result<Value, RpcError> {
|
||||
match req.method.as_str() {
|
||||
methods::INITIALIZE => {
|
||||
let params = req
|
||||
.params
|
||||
.as_ref()
|
||||
.ok_or_else(|| RpcError::invalid_params("Missing params for initialize"))?;
|
||||
let init: InitializeParams = serde_json::from_value(params.clone())
|
||||
.map_err(|e| RpcError::invalid_params(format!("Invalid init params: {}", e)))?;
|
||||
if !init.protocol_version.eq(PROTOCOL_VERSION) {
|
||||
return Err(RpcError::new(
|
||||
ErrorCode::INVALID_REQUEST,
|
||||
format!(
|
||||
"Incompatible protocol version. Client: {}, Server: {}",
|
||||
init.protocol_version, PROTOCOL_VERSION
|
||||
),
|
||||
));
|
||||
}
|
||||
let result = InitializeResult {
|
||||
protocol_version: PROTOCOL_VERSION.to_string(),
|
||||
server_info: ServerInfo {
|
||||
name: "owlen-mcp-llm-server".to_string(),
|
||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
},
|
||||
capabilities: ServerCapabilities {
|
||||
supports_tools: Some(true),
|
||||
supports_resources: Some(false),
|
||||
supports_streaming: Some(true),
|
||||
},
|
||||
};
|
||||
serde_json::to_value(result).map_err(|e| {
|
||||
RpcError::internal_error(format!("Failed to serialize init result: {}", e))
|
||||
})
|
||||
}
|
||||
methods::TOOLS_LIST => {
|
||||
let tools = vec![
|
||||
generate_text_descriptor(),
|
||||
resources_get_descriptor(),
|
||||
resources_list_descriptor(),
|
||||
];
|
||||
Ok(json!(tools))
|
||||
}
|
||||
// New method to list available Ollama models via the provider.
|
||||
methods::MODELS_LIST => {
|
||||
let provider = create_provider()?;
|
||||
let models = provider
|
||||
.list_models()
|
||||
.await
|
||||
.map_err(|e| RpcError::internal_error(format!("Failed to list models: {}", e)))?;
|
||||
serde_json::to_value(models).map_err(|e| {
|
||||
RpcError::internal_error(format!("Failed to serialize model list: {}", e))
|
||||
})
|
||||
}
|
||||
methods::TOOLS_CALL => {
|
||||
// For streaming we will send incremental notifications directly from here.
|
||||
// The caller (main loop) will handle writing the final response.
|
||||
Err(RpcError::internal_error(
|
||||
"TOOLS_CALL should be handled in main loop for streaming",
|
||||
))
|
||||
}
|
||||
_ => Err(RpcError::method_not_found(&req.method)),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let root = env::current_dir()?; // not used but kept for parity
|
||||
let mut stdin = io::BufReader::new(io::stdin());
|
||||
let mut stdout = io::stdout();
|
||||
loop {
|
||||
let mut line = String::new();
|
||||
match stdin.read_line(&mut line).await {
|
||||
Ok(0) => break,
|
||||
Ok(_) => {
|
||||
let req: RpcRequest = match serde_json::from_str(&line) {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
let err = RpcErrorResponse::new(
|
||||
RequestId::Number(0),
|
||||
RpcError::parse_error(format!("Parse error: {}", e)),
|
||||
);
|
||||
let s = serde_json::to_string(&err)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let id = req.id.clone();
|
||||
// Streaming tool calls (generate_text) are handled specially to emit incremental notifications.
|
||||
if req.method == methods::TOOLS_CALL {
|
||||
// Parse the tool call
|
||||
let params = match &req.params {
|
||||
Some(p) => p,
|
||||
None => {
|
||||
let err_resp = RpcErrorResponse::new(
|
||||
id.clone(),
|
||||
RpcError::invalid_params("Missing params for tool call"),
|
||||
);
|
||||
let s = serde_json::to_string(&err_resp)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let call: McpToolCall = match serde_json::from_value(params.clone()) {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
let err_resp = RpcErrorResponse::new(
|
||||
id.clone(),
|
||||
RpcError::invalid_params(format!("Invalid tool call: {}", e)),
|
||||
);
|
||||
let s = serde_json::to_string(&err_resp)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
// Dispatch based on the requested tool name.
|
||||
// Handle resources tools manually.
|
||||
if call.name.starts_with("resources/get") {
|
||||
let path = call
|
||||
.arguments
|
||||
.get("path")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("");
|
||||
match std::fs::read_to_string(path) {
|
||||
Ok(content) => {
|
||||
let response = McpToolResponse {
|
||||
name: call.name,
|
||||
success: true,
|
||||
output: json!(content),
|
||||
metadata: HashMap::new(),
|
||||
duration_ms: 0,
|
||||
};
|
||||
let payload = match serde_json::to_value(&response) {
|
||||
Ok(value) => value,
|
||||
Err(e) => {
|
||||
let err_resp = RpcErrorResponse::new(
|
||||
id.clone(),
|
||||
RpcError::internal_error(format!(
|
||||
"Failed to serialize resource response: {}",
|
||||
e
|
||||
)),
|
||||
);
|
||||
let s = serde_json::to_string(&err_resp)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let final_resp = RpcResponse::new(id.clone(), payload);
|
||||
let s = serde_json::to_string(&final_resp)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
let err_resp = RpcErrorResponse::new(
|
||||
id.clone(),
|
||||
RpcError::internal_error(format!("Failed to read file: {}", e)),
|
||||
);
|
||||
let s = serde_json::to_string(&err_resp)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
if call.name.starts_with("resources/list") {
|
||||
let path = call
|
||||
.arguments
|
||||
.get("path")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or(".");
|
||||
match std::fs::read_dir(path) {
|
||||
Ok(entries) => {
|
||||
let mut names = Vec::new();
|
||||
for entry in entries.flatten() {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
names.push(name.to_string());
|
||||
}
|
||||
}
|
||||
let response = McpToolResponse {
|
||||
name: call.name,
|
||||
success: true,
|
||||
output: json!(names),
|
||||
metadata: HashMap::new(),
|
||||
duration_ms: 0,
|
||||
};
|
||||
let payload = match serde_json::to_value(&response) {
|
||||
Ok(value) => value,
|
||||
Err(e) => {
|
||||
let err_resp = RpcErrorResponse::new(
|
||||
id.clone(),
|
||||
RpcError::internal_error(format!(
|
||||
"Failed to serialize directory listing: {}",
|
||||
e
|
||||
)),
|
||||
);
|
||||
let s = serde_json::to_string(&err_resp)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let final_resp = RpcResponse::new(id.clone(), payload);
|
||||
let s = serde_json::to_string(&final_resp)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
let err_resp = RpcErrorResponse::new(
|
||||
id.clone(),
|
||||
RpcError::internal_error(format!("Failed to list dir: {}", e)),
|
||||
);
|
||||
let s = serde_json::to_string(&err_resp)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Expect generate_text tool for the remaining path.
|
||||
if call.name != "generate_text" {
|
||||
let err_resp =
|
||||
RpcErrorResponse::new(id.clone(), RpcError::tool_not_found(&call.name));
|
||||
let s = serde_json::to_string(&err_resp)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
let args: GenerateTextArgs =
|
||||
match serde_json::from_value(call.arguments.clone()) {
|
||||
Ok(a) => a,
|
||||
Err(e) => {
|
||||
let err_resp = RpcErrorResponse::new(
|
||||
id.clone(),
|
||||
RpcError::invalid_params(format!("Invalid arguments: {}", e)),
|
||||
);
|
||||
let s = serde_json::to_string(&err_resp)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Initialize provider and start streaming
|
||||
let provider = match create_provider() {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
let err_resp = RpcErrorResponse::new(
|
||||
id.clone(),
|
||||
RpcError::internal_error(format!(
|
||||
"Failed to initialize provider: {:?}",
|
||||
e
|
||||
)),
|
||||
);
|
||||
let s = serde_json::to_string(&err_resp)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let parameters = ChatParameters {
|
||||
temperature: args.temperature,
|
||||
max_tokens: args.max_tokens.map(|v| v as u32),
|
||||
stream: true,
|
||||
extra: HashMap::new(),
|
||||
};
|
||||
let request = ChatRequest {
|
||||
model: args.model,
|
||||
messages: args.messages,
|
||||
parameters,
|
||||
tools: None,
|
||||
};
|
||||
let mut stream = match provider.stream_prompt(request).await {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
let err_resp = RpcErrorResponse::new(
|
||||
id.clone(),
|
||||
RpcError::internal_error(format!("Chat request failed: {}", e)),
|
||||
);
|
||||
let s = serde_json::to_string(&err_resp)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
// Accumulate full content while sending incremental progress notifications
|
||||
let mut final_content = String::new();
|
||||
while let Some(chunk) = stream.next().await {
|
||||
match chunk {
|
||||
Ok(resp) => {
|
||||
// Append chunk to the final content buffer
|
||||
final_content.push_str(&resp.message.content);
|
||||
// Emit a progress notification for the UI
|
||||
let notif = RpcNotification::new(
|
||||
"tools/call/progress",
|
||||
Some(json!({ "content": resp.message.content })),
|
||||
);
|
||||
let s = serde_json::to_string(¬if)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
if resp.is_final {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
let err_resp = RpcErrorResponse::new(
|
||||
id.clone(),
|
||||
RpcError::internal_error(format!("Stream error: {}", e)),
|
||||
);
|
||||
let s = serde_json::to_string(&err_resp)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// After streaming, send the final tool response containing the full content
|
||||
let final_output = final_content.clone();
|
||||
let response = McpToolResponse {
|
||||
name: call.name,
|
||||
success: true,
|
||||
output: json!(final_output),
|
||||
metadata: HashMap::new(),
|
||||
duration_ms: 0,
|
||||
};
|
||||
let payload = match serde_json::to_value(&response) {
|
||||
Ok(value) => value,
|
||||
Err(e) => {
|
||||
let err_resp = RpcErrorResponse::new(
|
||||
id.clone(),
|
||||
RpcError::internal_error(format!(
|
||||
"Failed to serialize final streaming response: {}",
|
||||
e
|
||||
)),
|
||||
);
|
||||
let s = serde_json::to_string(&err_resp)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let final_resp = RpcResponse::new(id.clone(), payload);
|
||||
let s = serde_json::to_string(&final_resp)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
// Non‑streaming requests are handled by the generic handler
|
||||
match handle_request(&req).await {
|
||||
Ok(res) => {
|
||||
let resp = RpcResponse::new(id, res);
|
||||
let s = serde_json::to_string(&resp)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
}
|
||||
Err(err) => {
|
||||
let err_resp = RpcErrorResponse::new(id, err);
|
||||
let s = serde_json::to_string(&err_resp)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Read error: {}", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
21
crates/mcp/prompt-server/Cargo.toml
Normal file
21
crates/mcp/prompt-server/Cargo.toml
Normal file
@@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "owlen-mcp-prompt-server"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
description = "MCP server that renders prompt templates (YAML) for Owlen"
|
||||
license = "AGPL-3.0"
|
||||
|
||||
[dependencies]
|
||||
owlen-core = { path = "../../owlen-core" }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_yaml = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
handlebars = { workspace = true }
|
||||
dirs = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
|
||||
[lib]
|
||||
name = "owlen_mcp_prompt_server"
|
||||
path = "src/lib.rs"
|
||||
415
crates/mcp/prompt-server/src/lib.rs
Normal file
415
crates/mcp/prompt-server/src/lib.rs
Normal file
@@ -0,0 +1,415 @@
|
||||
//! MCP server for rendering prompt templates with YAML storage and Handlebars rendering.
|
||||
//!
|
||||
//! Templates are stored in `~/.config/owlen/prompts/` as YAML files.
|
||||
//! Provides full Handlebars templating support for dynamic prompt generation.
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use handlebars::Handlebars;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{Value, json};
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use owlen_core::mcp::protocol::{
|
||||
ErrorCode, InitializeParams, InitializeResult, PROTOCOL_VERSION, RequestId, RpcError,
|
||||
RpcErrorResponse, RpcRequest, RpcResponse, ServerCapabilities, ServerInfo, methods,
|
||||
};
|
||||
use owlen_core::mcp::{McpToolCall, McpToolDescriptor, McpToolResponse};
|
||||
use tokio::io::{self, AsyncBufReadExt, AsyncWriteExt};
|
||||
|
||||
/// Prompt template definition
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PromptTemplate {
|
||||
/// Template name
|
||||
pub name: String,
|
||||
/// Template version
|
||||
pub version: String,
|
||||
/// Optional mode restriction
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mode: Option<String>,
|
||||
/// Handlebars template content
|
||||
pub template: String,
|
||||
/// Template description
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
/// Prompt server managing templates
|
||||
pub struct PromptServer {
|
||||
templates: Arc<RwLock<HashMap<String, PromptTemplate>>>,
|
||||
handlebars: Handlebars<'static>,
|
||||
templates_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl PromptServer {
|
||||
/// Create a new prompt server
|
||||
pub fn new() -> Result<Self> {
|
||||
let templates_dir = Self::get_templates_dir()?;
|
||||
|
||||
// Create templates directory if it doesn't exist
|
||||
if !templates_dir.exists() {
|
||||
fs::create_dir_all(&templates_dir)?;
|
||||
Self::create_default_templates(&templates_dir)?;
|
||||
}
|
||||
|
||||
let mut server = Self {
|
||||
templates: Arc::new(RwLock::new(HashMap::new())),
|
||||
handlebars: Handlebars::new(),
|
||||
templates_dir,
|
||||
};
|
||||
|
||||
// Load all templates
|
||||
server.load_templates()?;
|
||||
|
||||
Ok(server)
|
||||
}
|
||||
|
||||
/// Get the templates directory path
|
||||
fn get_templates_dir() -> Result<PathBuf> {
|
||||
let config_dir = dirs::config_dir().context("Could not determine config directory")?;
|
||||
Ok(config_dir.join("owlen").join("prompts"))
|
||||
}
|
||||
|
||||
/// Create default template examples
|
||||
fn create_default_templates(dir: &Path) -> Result<()> {
|
||||
let chat_mode_system = PromptTemplate {
|
||||
name: "chat_mode_system".to_string(),
|
||||
version: "1.0".to_string(),
|
||||
mode: Some("chat".to_string()),
|
||||
description: Some("System prompt for chat mode".to_string()),
|
||||
template: r#"You are Owlen, a helpful AI assistant. You have access to these tools:
|
||||
{{#each tools}}
|
||||
- {{name}}: {{description}}
|
||||
{{/each}}
|
||||
|
||||
Use the ReAct pattern:
|
||||
THOUGHT: Your reasoning
|
||||
ACTION: tool_name
|
||||
ACTION_INPUT: {"param": "value"}
|
||||
|
||||
When you have enough information:
|
||||
FINAL_ANSWER: Your response"#
|
||||
.to_string(),
|
||||
};
|
||||
|
||||
let code_mode_system = PromptTemplate {
|
||||
name: "code_mode_system".to_string(),
|
||||
version: "1.0".to_string(),
|
||||
mode: Some("code".to_string()),
|
||||
description: Some("System prompt for code mode".to_string()),
|
||||
template: r#"You are Owlen in code mode, with full development capabilities. You have access to:
|
||||
{{#each tools}}
|
||||
- {{name}}: {{description}}
|
||||
{{/each}}
|
||||
|
||||
Use the ReAct pattern to solve coding tasks:
|
||||
THOUGHT: Analyze what needs to be done
|
||||
ACTION: tool_name (compile_project, run_tests, format_code, lint_code, etc.)
|
||||
ACTION_INPUT: {"param": "value"}
|
||||
|
||||
Continue iterating until the task is complete, then provide:
|
||||
FINAL_ANSWER: Summary of what was done"#
|
||||
.to_string(),
|
||||
};
|
||||
|
||||
// Save templates
|
||||
let chat_path = dir.join("chat_mode_system.yaml");
|
||||
let code_path = dir.join("code_mode_system.yaml");
|
||||
|
||||
fs::write(chat_path, serde_yaml::to_string(&chat_mode_system)?)?;
|
||||
fs::write(code_path, serde_yaml::to_string(&code_mode_system)?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load all templates from the templates directory
|
||||
fn load_templates(&mut self) -> Result<()> {
|
||||
let entries = fs::read_dir(&self.templates_dir)?;
|
||||
|
||||
for entry in entries {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
|
||||
if path.extension().and_then(|s| s.to_str()) == Some("yaml")
|
||||
|| path.extension().and_then(|s| s.to_str()) == Some("yml")
|
||||
{
|
||||
match self.load_template(&path) {
|
||||
Ok(template) => {
|
||||
// Register with Handlebars
|
||||
if let Err(e) = self
|
||||
.handlebars
|
||||
.register_template_string(&template.name, &template.template)
|
||||
{
|
||||
eprintln!(
|
||||
"Warning: Failed to register template {}: {}",
|
||||
template.name, e
|
||||
);
|
||||
} else {
|
||||
let mut templates = self.templates.blocking_write();
|
||||
templates.insert(template.name.clone(), template);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Warning: Failed to load template {:?}: {}", path, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load a single template from file
|
||||
fn load_template(&self, path: &Path) -> Result<PromptTemplate> {
|
||||
let content = fs::read_to_string(path)?;
|
||||
let template: PromptTemplate = serde_yaml::from_str(&content)?;
|
||||
Ok(template)
|
||||
}
|
||||
|
||||
/// Get a template by name
|
||||
pub async fn get_template(&self, name: &str) -> Option<PromptTemplate> {
|
||||
let templates = self.templates.read().await;
|
||||
templates.get(name).cloned()
|
||||
}
|
||||
|
||||
/// List all available templates
|
||||
pub async fn list_templates(&self) -> Vec<String> {
|
||||
let templates = self.templates.read().await;
|
||||
templates.keys().cloned().collect()
|
||||
}
|
||||
|
||||
/// Render a template with given variables
|
||||
pub fn render_template(&self, name: &str, vars: &Value) -> Result<String> {
|
||||
self.handlebars
|
||||
.render(name, vars)
|
||||
.context("Failed to render template")
|
||||
}
|
||||
|
||||
/// Reload all templates from disk
|
||||
pub async fn reload_templates(&mut self) -> Result<()> {
|
||||
{
|
||||
let mut templates = self.templates.write().await;
|
||||
templates.clear();
|
||||
}
|
||||
self.handlebars = Handlebars::new();
|
||||
self.load_templates()
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let mut stdin = io::BufReader::new(io::stdin());
|
||||
let mut stdout = io::stdout();
|
||||
|
||||
let server = Arc::new(tokio::sync::Mutex::new(PromptServer::new()?));
|
||||
|
||||
loop {
|
||||
let mut line = String::new();
|
||||
match stdin.read_line(&mut line).await {
|
||||
Ok(0) => break, // EOF
|
||||
Ok(_) => {
|
||||
let req: RpcRequest = match serde_json::from_str(&line) {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
let err = RpcErrorResponse::new(
|
||||
RequestId::Number(0),
|
||||
RpcError::parse_error(format!("Parse error: {}", e)),
|
||||
);
|
||||
let s = serde_json::to_string(&err)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let resp = handle_request(req.clone(), server.clone()).await;
|
||||
match resp {
|
||||
Ok(r) => {
|
||||
let s = serde_json::to_string(&r)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
}
|
||||
Err(e) => {
|
||||
let err = RpcErrorResponse::new(req.id.clone(), e);
|
||||
let s = serde_json::to_string(&err)?;
|
||||
stdout.write_all(s.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Error reading stdin: {}", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
async fn handle_request(
|
||||
req: RpcRequest,
|
||||
server: Arc<tokio::sync::Mutex<PromptServer>>,
|
||||
) -> Result<RpcResponse, RpcError> {
|
||||
match req.method.as_str() {
|
||||
methods::INITIALIZE => {
|
||||
let params: InitializeParams =
|
||||
serde_json::from_value(req.params.unwrap_or_else(|| json!({})))
|
||||
.map_err(|e| RpcError::invalid_params(format!("Invalid init params: {}", e)))?;
|
||||
if !params.protocol_version.eq(PROTOCOL_VERSION) {
|
||||
return Err(RpcError::new(
|
||||
ErrorCode::INVALID_REQUEST,
|
||||
format!(
|
||||
"Incompatible protocol version. Client: {}, Server: {}",
|
||||
params.protocol_version, PROTOCOL_VERSION
|
||||
),
|
||||
));
|
||||
}
|
||||
let result = InitializeResult {
|
||||
protocol_version: PROTOCOL_VERSION.to_string(),
|
||||
server_info: ServerInfo {
|
||||
name: "owlen-mcp-prompt-server".to_string(),
|
||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
},
|
||||
capabilities: ServerCapabilities {
|
||||
supports_tools: Some(true),
|
||||
supports_resources: Some(false),
|
||||
supports_streaming: Some(false),
|
||||
},
|
||||
};
|
||||
let payload = serde_json::to_value(result).map_err(|e| {
|
||||
RpcError::internal_error(format!("Failed to serialize initialize result: {}", e))
|
||||
})?;
|
||||
Ok(RpcResponse::new(req.id, payload))
|
||||
}
|
||||
methods::TOOLS_LIST => {
|
||||
let tools = vec![
|
||||
McpToolDescriptor {
|
||||
name: "get_prompt".to_string(),
|
||||
description: "Retrieve a prompt template by name".to_string(),
|
||||
input_schema: json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string", "description": "Template name"}
|
||||
},
|
||||
"required": ["name"]
|
||||
}),
|
||||
requires_network: false,
|
||||
requires_filesystem: vec![],
|
||||
},
|
||||
McpToolDescriptor {
|
||||
name: "render_prompt".to_string(),
|
||||
description: "Render a prompt template with Handlebars variables".to_string(),
|
||||
input_schema: json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string", "description": "Template name"},
|
||||
"vars": {"type": "object", "description": "Variables for Handlebars rendering"}
|
||||
},
|
||||
"required": ["name"]
|
||||
}),
|
||||
requires_network: false,
|
||||
requires_filesystem: vec![],
|
||||
},
|
||||
McpToolDescriptor {
|
||||
name: "list_prompts".to_string(),
|
||||
description: "List all available prompt templates".to_string(),
|
||||
input_schema: json!({"type": "object", "properties": {}}),
|
||||
requires_network: false,
|
||||
requires_filesystem: vec![],
|
||||
},
|
||||
McpToolDescriptor {
|
||||
name: "reload_prompts".to_string(),
|
||||
description: "Reload all prompts from disk".to_string(),
|
||||
input_schema: json!({"type": "object", "properties": {}}),
|
||||
requires_network: false,
|
||||
requires_filesystem: vec![],
|
||||
},
|
||||
];
|
||||
Ok(RpcResponse::new(req.id, json!(tools)))
|
||||
}
|
||||
methods::TOOLS_CALL => {
|
||||
let call: McpToolCall = serde_json::from_value(req.params.unwrap_or_else(|| json!({})))
|
||||
.map_err(|e| RpcError::invalid_params(format!("Invalid tool call: {}", e)))?;
|
||||
|
||||
let result = match call.name.as_str() {
|
||||
"get_prompt" => {
|
||||
let name = call
|
||||
.arguments
|
||||
.get("name")
|
||||
.and_then(|v| v.as_str())
|
||||
.ok_or_else(|| RpcError::invalid_params("Missing 'name' parameter"))?;
|
||||
|
||||
let srv = server.lock().await;
|
||||
match srv.get_template(name).await {
|
||||
Some(template) => match serde_json::to_value(template) {
|
||||
Ok(serialized) => {
|
||||
json!({"success": true, "template": serialized})
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(RpcError::internal_error(format!(
|
||||
"Failed to serialize template '{}': {}",
|
||||
name, e
|
||||
)));
|
||||
}
|
||||
},
|
||||
None => json!({"success": false, "error": "Template not found"}),
|
||||
}
|
||||
}
|
||||
"render_prompt" => {
|
||||
let name = call
|
||||
.arguments
|
||||
.get("name")
|
||||
.and_then(|v| v.as_str())
|
||||
.ok_or_else(|| RpcError::invalid_params("Missing 'name' parameter"))?;
|
||||
|
||||
let default_vars = json!({});
|
||||
let vars = call.arguments.get("vars").unwrap_or(&default_vars);
|
||||
|
||||
let srv = server.lock().await;
|
||||
match srv.render_template(name, vars) {
|
||||
Ok(rendered) => json!({"success": true, "rendered": rendered}),
|
||||
Err(e) => json!({"success": false, "error": e.to_string()}),
|
||||
}
|
||||
}
|
||||
"list_prompts" => {
|
||||
let srv = server.lock().await;
|
||||
let templates = srv.list_templates().await;
|
||||
json!({"success": true, "templates": templates})
|
||||
}
|
||||
"reload_prompts" => {
|
||||
let mut srv = server.lock().await;
|
||||
match srv.reload_templates().await {
|
||||
Ok(_) => json!({"success": true, "message": "Prompts reloaded"}),
|
||||
Err(e) => json!({"success": false, "error": e.to_string()}),
|
||||
}
|
||||
}
|
||||
_ => return Err(RpcError::method_not_found(&call.name)),
|
||||
};
|
||||
|
||||
let resp = McpToolResponse {
|
||||
name: call.name,
|
||||
success: result
|
||||
.get("success")
|
||||
.and_then(|v| v.as_bool())
|
||||
.unwrap_or(false),
|
||||
output: result,
|
||||
metadata: HashMap::new(),
|
||||
duration_ms: 0,
|
||||
};
|
||||
|
||||
let payload = serde_json::to_value(resp).map_err(|e| {
|
||||
RpcError::internal_error(format!("Failed to serialize tool response: {}", e))
|
||||
})?;
|
||||
Ok(RpcResponse::new(req.id, payload))
|
||||
}
|
||||
_ => Err(RpcError::method_not_found(&req.method)),
|
||||
}
|
||||
}
|
||||
3
crates/mcp/prompt-server/templates/example.yaml
Normal file
3
crates/mcp/prompt-server/templates/example.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
prompt: |
|
||||
Hello {{name}}!
|
||||
Your role is: {{role}}.
|
||||
12
crates/mcp/server/Cargo.toml
Normal file
12
crates/mcp/server/Cargo.toml
Normal file
@@ -0,0 +1,12 @@
|
||||
[package]
|
||||
name = "owlen-mcp-server"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
|
||||
[dependencies]
|
||||
tokio = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
path-clean = "1.0"
|
||||
owlen-core = { path = "../../owlen-core" }
|
||||
246
crates/mcp/server/src/main.rs
Normal file
246
crates/mcp/server/src/main.rs
Normal file
@@ -0,0 +1,246 @@
|
||||
use owlen_core::mcp::protocol::{
|
||||
ErrorCode, InitializeParams, InitializeResult, PROTOCOL_VERSION, RequestId, RpcError,
|
||||
RpcErrorResponse, RpcRequest, RpcResponse, ServerCapabilities, ServerInfo, is_compatible,
|
||||
};
|
||||
use path_clean::PathClean;
|
||||
use serde::Deserialize;
|
||||
use std::env;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use tokio::io::{self, AsyncBufReadExt, AsyncWriteExt};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct FileArgs {
|
||||
path: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct WriteArgs {
|
||||
path: String,
|
||||
content: String,
|
||||
}
|
||||
|
||||
async fn handle_request(req: &RpcRequest, root: &Path) -> Result<serde_json::Value, RpcError> {
|
||||
match req.method.as_str() {
|
||||
"initialize" => {
|
||||
let params = req
|
||||
.params
|
||||
.as_ref()
|
||||
.ok_or_else(|| RpcError::invalid_params("Missing params for initialize"))?;
|
||||
|
||||
let init_params: InitializeParams =
|
||||
serde_json::from_value(params.clone()).map_err(|e| {
|
||||
RpcError::invalid_params(format!("Invalid initialize params: {}", e))
|
||||
})?;
|
||||
|
||||
// Check protocol version compatibility
|
||||
if !is_compatible(&init_params.protocol_version, PROTOCOL_VERSION) {
|
||||
return Err(RpcError::new(
|
||||
ErrorCode::INVALID_REQUEST,
|
||||
format!(
|
||||
"Incompatible protocol version. Client: {}, Server: {}",
|
||||
init_params.protocol_version, PROTOCOL_VERSION
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
// Build initialization result
|
||||
let result = InitializeResult {
|
||||
protocol_version: PROTOCOL_VERSION.to_string(),
|
||||
server_info: ServerInfo {
|
||||
name: "owlen-mcp-server".to_string(),
|
||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
},
|
||||
capabilities: ServerCapabilities {
|
||||
supports_tools: Some(false),
|
||||
supports_resources: Some(true), // Supports read, write, delete
|
||||
supports_streaming: Some(false),
|
||||
},
|
||||
};
|
||||
|
||||
Ok(serde_json::to_value(result).map_err(|e| {
|
||||
RpcError::internal_error(format!("Failed to serialize result: {}", e))
|
||||
})?)
|
||||
}
|
||||
"resources/list" => {
|
||||
let params = req
|
||||
.params
|
||||
.as_ref()
|
||||
.ok_or_else(|| RpcError::invalid_params("Missing params"))?;
|
||||
let args: FileArgs = serde_json::from_value(params.clone())
|
||||
.map_err(|e| RpcError::invalid_params(format!("Invalid params: {}", e)))?;
|
||||
resources_list(&args.path, root).await
|
||||
}
|
||||
"resources/get" => {
|
||||
let params = req
|
||||
.params
|
||||
.as_ref()
|
||||
.ok_or_else(|| RpcError::invalid_params("Missing params"))?;
|
||||
let args: FileArgs = serde_json::from_value(params.clone())
|
||||
.map_err(|e| RpcError::invalid_params(format!("Invalid params: {}", e)))?;
|
||||
resources_get(&args.path, root).await
|
||||
}
|
||||
"resources/write" => {
|
||||
let params = req
|
||||
.params
|
||||
.as_ref()
|
||||
.ok_or_else(|| RpcError::invalid_params("Missing params"))?;
|
||||
let args: WriteArgs = serde_json::from_value(params.clone())
|
||||
.map_err(|e| RpcError::invalid_params(format!("Invalid params: {}", e)))?;
|
||||
resources_write(&args.path, &args.content, root).await
|
||||
}
|
||||
"resources/delete" => {
|
||||
let params = req
|
||||
.params
|
||||
.as_ref()
|
||||
.ok_or_else(|| RpcError::invalid_params("Missing params"))?;
|
||||
let args: FileArgs = serde_json::from_value(params.clone())
|
||||
.map_err(|e| RpcError::invalid_params(format!("Invalid params: {}", e)))?;
|
||||
resources_delete(&args.path, root).await
|
||||
}
|
||||
_ => Err(RpcError::method_not_found(&req.method)),
|
||||
}
|
||||
}
|
||||
|
||||
fn sanitize_path(path: &str, root: &Path) -> Result<PathBuf, RpcError> {
|
||||
let path = Path::new(path);
|
||||
let path = if path.is_absolute() {
|
||||
path.strip_prefix("/")
|
||||
.map_err(|_| RpcError::invalid_params("Invalid path"))?
|
||||
.to_path_buf()
|
||||
} else {
|
||||
path.to_path_buf()
|
||||
};
|
||||
|
||||
let full_path = root.join(path).clean();
|
||||
|
||||
if !full_path.starts_with(root) {
|
||||
return Err(RpcError::path_traversal());
|
||||
}
|
||||
|
||||
Ok(full_path)
|
||||
}
|
||||
|
||||
async fn resources_list(path: &str, root: &Path) -> Result<serde_json::Value, RpcError> {
|
||||
let full_path = sanitize_path(path, root)?;
|
||||
|
||||
let entries = fs::read_dir(full_path).map_err(|e| {
|
||||
RpcError::new(
|
||||
ErrorCode::RESOURCE_NOT_FOUND,
|
||||
format!("Failed to read directory: {}", e),
|
||||
)
|
||||
})?;
|
||||
|
||||
let mut result = Vec::new();
|
||||
for entry in entries {
|
||||
let entry = entry.map_err(|e| {
|
||||
RpcError::internal_error(format!("Failed to read directory entry: {}", e))
|
||||
})?;
|
||||
result.push(entry.file_name().to_string_lossy().to_string());
|
||||
}
|
||||
|
||||
Ok(serde_json::json!(result))
|
||||
}
|
||||
|
||||
async fn resources_get(path: &str, root: &Path) -> Result<serde_json::Value, RpcError> {
|
||||
let full_path = sanitize_path(path, root)?;
|
||||
|
||||
let content = fs::read_to_string(full_path).map_err(|e| {
|
||||
RpcError::new(
|
||||
ErrorCode::RESOURCE_NOT_FOUND,
|
||||
format!("Failed to read file: {}", e),
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(serde_json::json!(content))
|
||||
}
|
||||
|
||||
async fn resources_write(
|
||||
path: &str,
|
||||
content: &str,
|
||||
root: &Path,
|
||||
) -> Result<serde_json::Value, RpcError> {
|
||||
let full_path = sanitize_path(path, root)?;
|
||||
// Ensure parent directory exists
|
||||
if let Some(parent) = full_path.parent() {
|
||||
std::fs::create_dir_all(parent).map_err(|e| {
|
||||
RpcError::internal_error(format!("Failed to create parent directories: {}", e))
|
||||
})?;
|
||||
}
|
||||
std::fs::write(full_path, content)
|
||||
.map_err(|e| RpcError::internal_error(format!("Failed to write file: {}", e)))?;
|
||||
Ok(serde_json::json!(null))
|
||||
}
|
||||
|
||||
async fn resources_delete(path: &str, root: &Path) -> Result<serde_json::Value, RpcError> {
|
||||
let full_path = sanitize_path(path, root)?;
|
||||
if full_path.is_file() {
|
||||
std::fs::remove_file(full_path)
|
||||
.map_err(|e| RpcError::internal_error(format!("Failed to delete file: {}", e)))?;
|
||||
Ok(serde_json::json!(null))
|
||||
} else {
|
||||
Err(RpcError::new(
|
||||
ErrorCode::RESOURCE_NOT_FOUND,
|
||||
"Path does not refer to a file",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let root = env::current_dir()?;
|
||||
let mut stdin = io::BufReader::new(io::stdin());
|
||||
let mut stdout = io::stdout();
|
||||
|
||||
loop {
|
||||
let mut line = String::new();
|
||||
match stdin.read_line(&mut line).await {
|
||||
Ok(0) => {
|
||||
// EOF
|
||||
break;
|
||||
}
|
||||
Ok(_) => {
|
||||
let req: RpcRequest = match serde_json::from_str(&line) {
|
||||
Ok(req) => req,
|
||||
Err(e) => {
|
||||
let err_resp = RpcErrorResponse::new(
|
||||
RequestId::Number(0),
|
||||
RpcError::parse_error(format!("Parse error: {}", e)),
|
||||
);
|
||||
let resp_str = serde_json::to_string(&err_resp)?;
|
||||
stdout.write_all(resp_str.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let request_id = req.id.clone();
|
||||
|
||||
match handle_request(&req, &root).await {
|
||||
Ok(result) => {
|
||||
let resp = RpcResponse::new(request_id, result);
|
||||
let resp_str = serde_json::to_string(&resp)?;
|
||||
stdout.write_all(resp_str.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
}
|
||||
Err(error) => {
|
||||
let err_resp = RpcErrorResponse::new(request_id, error);
|
||||
let resp_str = serde_json::to_string(&err_resp)?;
|
||||
stdout.write_all(resp_str.as_bytes()).await?;
|
||||
stdout.write_all(b"\n").await?;
|
||||
stdout.flush().await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
// Handle read error
|
||||
eprintln!("Error reading from stdin: {}", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -10,8 +10,7 @@ description = "Command-line interface for OWLEN LLM client"
|
||||
|
||||
[features]
|
||||
default = ["chat-client"]
|
||||
chat-client = []
|
||||
code-client = []
|
||||
chat-client = ["owlen-tui"]
|
||||
|
||||
[[bin]]
|
||||
name = "owlen"
|
||||
@@ -21,15 +20,24 @@ required-features = ["chat-client"]
|
||||
[[bin]]
|
||||
name = "owlen-code"
|
||||
path = "src/code_main.rs"
|
||||
required-features = ["code-client"]
|
||||
required-features = ["chat-client"]
|
||||
|
||||
[[bin]]
|
||||
name = "owlen-agent"
|
||||
path = "src/agent_main.rs"
|
||||
required-features = ["chat-client"]
|
||||
|
||||
[dependencies]
|
||||
owlen-core = { path = "../owlen-core" }
|
||||
owlen-tui = { path = "../owlen-tui" }
|
||||
owlen-ollama = { path = "../owlen-ollama" }
|
||||
owlen-providers = { path = "../owlen-providers" }
|
||||
# Optional TUI dependency, enabled by the "chat-client" feature.
|
||||
owlen-tui = { path = "../owlen-tui", optional = true }
|
||||
log = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
|
||||
# CLI framework
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
|
||||
# Async runtime
|
||||
tokio = { workspace = true }
|
||||
@@ -43,3 +51,10 @@ crossterm = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
regex = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
dirs = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true }
|
||||
tokio-test = { workspace = true }
|
||||
|
||||
31
crates/owlen-cli/build.rs
Normal file
31
crates/owlen-cli/build.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
use std::process::Command;
|
||||
|
||||
fn main() {
|
||||
const MIN_VERSION: (u32, u32, u32) = (1, 75, 0);
|
||||
|
||||
let rustc = std::env::var("RUSTC").unwrap_or_else(|_| "rustc".into());
|
||||
let output = Command::new(&rustc)
|
||||
.arg("--version")
|
||||
.output()
|
||||
.expect("failed to invoke rustc");
|
||||
|
||||
let version_line = String::from_utf8_lossy(&output.stdout);
|
||||
let version_str = version_line.split_whitespace().nth(1).unwrap_or("0.0.0");
|
||||
let sanitized = version_str.split('-').next().unwrap_or(version_str);
|
||||
|
||||
let mut parts = sanitized
|
||||
.split('.')
|
||||
.map(|part| part.parse::<u32>().unwrap_or(0));
|
||||
let current = (
|
||||
parts.next().unwrap_or(0),
|
||||
parts.next().unwrap_or(0),
|
||||
parts.next().unwrap_or(0),
|
||||
);
|
||||
|
||||
if current < MIN_VERSION {
|
||||
panic!(
|
||||
"owlen requires rustc {}.{}.{} or newer (found {version_line})",
|
||||
MIN_VERSION.0, MIN_VERSION.1, MIN_VERSION.2
|
||||
);
|
||||
}
|
||||
}
|
||||
61
crates/owlen-cli/src/agent_main.rs
Normal file
61
crates/owlen-cli/src/agent_main.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
//! Simple entry point for the ReAct agentic executor.
|
||||
//!
|
||||
//! Usage: `owlen-agent "<prompt>" [--model <model>] [--max-iter <n>]`
|
||||
//!
|
||||
//! This binary demonstrates Phase 4 without the full TUI. It creates an
|
||||
//! OllamaProvider, a RemoteMcpClient, runs the AgentExecutor and prints the
|
||||
//! final answer.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use owlen_cli::agent::{AgentConfig, AgentExecutor};
|
||||
use owlen_core::mcp::remote_client::RemoteMcpClient;
|
||||
|
||||
/// Command‑line arguments for the agent binary.
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(
|
||||
name = "owlen-agent",
|
||||
author,
|
||||
version,
|
||||
about = "Run the ReAct agent via MCP"
|
||||
)]
|
||||
struct Args {
|
||||
/// The initial user query.
|
||||
prompt: String,
|
||||
/// Model to use (defaults to Ollama default).
|
||||
#[arg(long)]
|
||||
model: Option<String>,
|
||||
/// Maximum ReAct iterations.
|
||||
#[arg(long, default_value_t = 10)]
|
||||
max_iter: usize,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let args = Args::parse();
|
||||
|
||||
// Initialise the MCP LLM client – it implements Provider and talks to the
|
||||
// MCP LLM server which wraps Ollama. This ensures all communication goes
|
||||
// through the MCP architecture (Phase 10 requirement).
|
||||
let provider = Arc::new(RemoteMcpClient::new()?);
|
||||
|
||||
// The MCP client also serves as the tool client for resource operations
|
||||
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
|
||||
|
||||
let config = AgentConfig {
|
||||
max_iterations: args.max_iter,
|
||||
model: args.model.unwrap_or_else(|| "llama3.2:latest".to_string()),
|
||||
..AgentConfig::default()
|
||||
};
|
||||
|
||||
let executor = AgentExecutor::new(provider, mcp_client, config);
|
||||
match executor.run(args.prompt).await {
|
||||
Ok(result) => {
|
||||
println!("\n✓ Agent completed in {} iterations", result.iterations);
|
||||
println!("\nFinal answer:\n{}", result.answer);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => Err(anyhow::anyhow!(e)),
|
||||
}
|
||||
}
|
||||
326
crates/owlen-cli/src/bootstrap.rs
Normal file
326
crates/owlen-cli/src/bootstrap.rs
Normal file
@@ -0,0 +1,326 @@
|
||||
use std::borrow::Cow;
|
||||
use std::io;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{Result, anyhow};
|
||||
use async_trait::async_trait;
|
||||
use crossterm::{
|
||||
event::{DisableBracketedPaste, DisableMouseCapture, EnableBracketedPaste, EnableMouseCapture},
|
||||
execute,
|
||||
terminal::{EnterAlternateScreen, LeaveAlternateScreen, disable_raw_mode, enable_raw_mode},
|
||||
};
|
||||
use futures::stream;
|
||||
use owlen_core::{
|
||||
ChatStream, Error, Provider,
|
||||
config::{Config, McpMode},
|
||||
mcp::remote_client::RemoteMcpClient,
|
||||
mode::Mode,
|
||||
provider::ProviderManager,
|
||||
providers::OllamaProvider,
|
||||
session::{ControllerEvent, SessionController},
|
||||
storage::StorageManager,
|
||||
types::{ChatRequest, ChatResponse, Message, ModelInfo},
|
||||
};
|
||||
use owlen_tui::{
|
||||
ChatApp, SessionEvent,
|
||||
app::App as RuntimeApp,
|
||||
config,
|
||||
tui_controller::{TuiController, TuiRequest},
|
||||
ui,
|
||||
};
|
||||
use ratatui::{Terminal, prelude::CrosstermBackend};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::commands::cloud::{load_runtime_credentials, set_env_var};
|
||||
|
||||
pub async fn launch(initial_mode: Mode) -> Result<()> {
|
||||
set_env_var("OWLEN_AUTO_CONSENT", "1");
|
||||
|
||||
let color_support = detect_terminal_color_support();
|
||||
let mut cfg = config::try_load_config().unwrap_or_default();
|
||||
let _ = cfg.refresh_mcp_servers(None);
|
||||
|
||||
if let Some(previous_theme) = apply_terminal_theme(&mut cfg, &color_support) {
|
||||
let term_label = match &color_support {
|
||||
TerminalColorSupport::Limited { term } => Cow::from(term.as_str()),
|
||||
TerminalColorSupport::Full => Cow::from("current terminal"),
|
||||
};
|
||||
eprintln!(
|
||||
"Terminal '{}' lacks full 256-color support. Using '{}' theme instead of '{}'.",
|
||||
term_label, BASIC_THEME_NAME, previous_theme
|
||||
);
|
||||
} else if let TerminalColorSupport::Limited { term } = &color_support {
|
||||
eprintln!(
|
||||
"Warning: terminal '{}' may not fully support 256-color themes.",
|
||||
term
|
||||
);
|
||||
}
|
||||
|
||||
cfg.validate()?;
|
||||
let storage = Arc::new(StorageManager::new().await?);
|
||||
load_runtime_credentials(&mut cfg, storage.clone()).await?;
|
||||
|
||||
let (tui_tx, _tui_rx) = mpsc::unbounded_channel::<TuiRequest>();
|
||||
let tui_controller = Arc::new(TuiController::new(tui_tx));
|
||||
|
||||
let provider = build_provider(&cfg)?;
|
||||
let mut offline_notice: Option<String> = None;
|
||||
let provider = match provider.health_check().await {
|
||||
Ok(_) => provider,
|
||||
Err(err) => {
|
||||
let hint = if matches!(cfg.mcp.mode, McpMode::RemotePreferred | McpMode::RemoteOnly)
|
||||
&& !cfg.effective_mcp_servers().is_empty()
|
||||
{
|
||||
"Ensure the configured MCP server is running and reachable."
|
||||
} else {
|
||||
"Ensure Ollama is running (`ollama serve`) and reachable at the configured base_url."
|
||||
};
|
||||
let notice =
|
||||
format!("Provider health check failed: {err}. {hint} Continuing in offline mode.");
|
||||
eprintln!("{notice}");
|
||||
offline_notice = Some(notice.clone());
|
||||
let fallback_model = cfg
|
||||
.general
|
||||
.default_model
|
||||
.clone()
|
||||
.unwrap_or_else(|| "offline".to_string());
|
||||
Arc::new(OfflineProvider::new(notice, fallback_model)) as Arc<dyn Provider>
|
||||
}
|
||||
};
|
||||
|
||||
let (controller_event_tx, controller_event_rx) = mpsc::unbounded_channel::<ControllerEvent>();
|
||||
let controller = SessionController::new(
|
||||
provider,
|
||||
cfg,
|
||||
storage.clone(),
|
||||
tui_controller,
|
||||
false,
|
||||
Some(controller_event_tx),
|
||||
)
|
||||
.await?;
|
||||
let provider_manager = Arc::new(ProviderManager::default());
|
||||
let mut runtime = RuntimeApp::new(provider_manager);
|
||||
let (mut app, mut session_rx) = ChatApp::new(controller, controller_event_rx).await?;
|
||||
app.initialize_models().await?;
|
||||
if let Some(notice) = offline_notice.clone() {
|
||||
app.set_status_message(¬ice);
|
||||
app.set_system_status(notice);
|
||||
}
|
||||
|
||||
app.set_mode(initial_mode).await;
|
||||
|
||||
enable_raw_mode()?;
|
||||
let mut stdout = io::stdout();
|
||||
execute!(
|
||||
stdout,
|
||||
EnterAlternateScreen,
|
||||
EnableMouseCapture,
|
||||
EnableBracketedPaste
|
||||
)?;
|
||||
let backend = CrosstermBackend::new(stdout);
|
||||
let mut terminal = Terminal::new(backend)?;
|
||||
|
||||
let result = run_app(&mut terminal, &mut runtime, &mut app, &mut session_rx).await;
|
||||
|
||||
config::save_config(&app.config())?;
|
||||
|
||||
disable_raw_mode()?;
|
||||
execute!(
|
||||
terminal.backend_mut(),
|
||||
LeaveAlternateScreen,
|
||||
DisableMouseCapture,
|
||||
DisableBracketedPaste
|
||||
)?;
|
||||
terminal.show_cursor()?;
|
||||
|
||||
if let Err(err) = result {
|
||||
println!("{err:?}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn build_provider(cfg: &Config) -> Result<Arc<dyn Provider>> {
|
||||
match cfg.mcp.mode {
|
||||
McpMode::RemotePreferred => {
|
||||
let remote_result = if let Some(mcp_server) = cfg.effective_mcp_servers().first() {
|
||||
RemoteMcpClient::new_with_config(mcp_server)
|
||||
} else {
|
||||
RemoteMcpClient::new()
|
||||
};
|
||||
|
||||
match remote_result {
|
||||
Ok(client) => Ok(Arc::new(client) as Arc<dyn Provider>),
|
||||
Err(err) if cfg.mcp.allow_fallback => {
|
||||
log::warn!(
|
||||
"Remote MCP client unavailable ({}); falling back to local provider.",
|
||||
err
|
||||
);
|
||||
build_local_provider(cfg)
|
||||
}
|
||||
Err(err) => Err(anyhow!(err)),
|
||||
}
|
||||
}
|
||||
McpMode::RemoteOnly => {
|
||||
let mcp_server = cfg.effective_mcp_servers().first().ok_or_else(|| {
|
||||
anyhow!("[[mcp_servers]] must be configured when [mcp].mode = \"remote_only\"")
|
||||
})?;
|
||||
let client = RemoteMcpClient::new_with_config(mcp_server)?;
|
||||
Ok(Arc::new(client) as Arc<dyn Provider>)
|
||||
}
|
||||
McpMode::LocalOnly | McpMode::Legacy => build_local_provider(cfg),
|
||||
McpMode::Disabled => Err(anyhow!(
|
||||
"MCP mode 'disabled' is not supported by the owlen TUI"
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_local_provider(cfg: &Config) -> Result<Arc<dyn Provider>> {
|
||||
let provider_name = cfg.general.default_provider.clone();
|
||||
let provider_cfg = cfg.provider(&provider_name).ok_or_else(|| {
|
||||
anyhow!(format!(
|
||||
"No provider configuration found for '{provider_name}' in [providers]"
|
||||
))
|
||||
})?;
|
||||
|
||||
match provider_cfg.provider_type.as_str() {
|
||||
"ollama" | "ollama_cloud" => {
|
||||
let provider = OllamaProvider::from_config(provider_cfg, Some(&cfg.general))?;
|
||||
Ok(Arc::new(provider) as Arc<dyn Provider>)
|
||||
}
|
||||
other => Err(anyhow!(format!(
|
||||
"Provider type '{other}' is not supported in legacy/local MCP mode"
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
const BASIC_THEME_NAME: &str = "ansi_basic";
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum TerminalColorSupport {
|
||||
Full,
|
||||
Limited { term: String },
|
||||
}
|
||||
|
||||
fn detect_terminal_color_support() -> TerminalColorSupport {
|
||||
let term = std::env::var("TERM").unwrap_or_else(|_| "unknown".to_string());
|
||||
let colorterm = std::env::var("COLORTERM").unwrap_or_default();
|
||||
let term_lower = term.to_lowercase();
|
||||
let color_lower = colorterm.to_lowercase();
|
||||
|
||||
let supports_extended = term_lower.contains("256color")
|
||||
|| color_lower.contains("truecolor")
|
||||
|| color_lower.contains("24bit")
|
||||
|| color_lower.contains("fullcolor");
|
||||
|
||||
if supports_extended {
|
||||
TerminalColorSupport::Full
|
||||
} else {
|
||||
TerminalColorSupport::Limited { term }
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_terminal_theme(cfg: &mut Config, support: &TerminalColorSupport) -> Option<String> {
|
||||
match support {
|
||||
TerminalColorSupport::Full => None,
|
||||
TerminalColorSupport::Limited { .. } => {
|
||||
if cfg.ui.theme != BASIC_THEME_NAME {
|
||||
let previous = std::mem::replace(&mut cfg.ui.theme, BASIC_THEME_NAME.to_string());
|
||||
Some(previous)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct OfflineProvider {
|
||||
reason: String,
|
||||
placeholder_model: String,
|
||||
}
|
||||
|
||||
impl OfflineProvider {
|
||||
fn new(reason: String, placeholder_model: String) -> Self {
|
||||
Self {
|
||||
reason,
|
||||
placeholder_model,
|
||||
}
|
||||
}
|
||||
|
||||
fn friendly_response(&self, requested_model: &str) -> ChatResponse {
|
||||
let mut message = String::new();
|
||||
message.push_str("⚠️ Owlen is running in offline mode.\n\n");
|
||||
message.push_str(&self.reason);
|
||||
if !requested_model.is_empty() && requested_model != self.placeholder_model {
|
||||
message.push_str(&format!(
|
||||
"\n\nYou requested model '{}', but no providers are reachable.",
|
||||
requested_model
|
||||
));
|
||||
}
|
||||
message.push_str(
|
||||
"\n\nStart your preferred provider (e.g. `ollama serve`) or switch providers with `:provider` once connectivity is restored.",
|
||||
);
|
||||
|
||||
ChatResponse {
|
||||
message: Message::assistant(message),
|
||||
usage: None,
|
||||
is_streaming: false,
|
||||
is_final: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Provider for OfflineProvider {
|
||||
fn name(&self) -> &str {
|
||||
"offline"
|
||||
}
|
||||
|
||||
async fn list_models(&self) -> Result<Vec<ModelInfo>, Error> {
|
||||
Ok(vec![ModelInfo {
|
||||
id: self.placeholder_model.clone(),
|
||||
provider: "offline".to_string(),
|
||||
name: format!("Offline (fallback: {})", self.placeholder_model),
|
||||
description: Some("Placeholder model used while no providers are reachable".into()),
|
||||
context_window: None,
|
||||
capabilities: vec![],
|
||||
supports_tools: false,
|
||||
}])
|
||||
}
|
||||
|
||||
async fn send_prompt(&self, request: ChatRequest) -> Result<ChatResponse, Error> {
|
||||
Ok(self.friendly_response(&request.model))
|
||||
}
|
||||
|
||||
async fn stream_prompt(&self, request: ChatRequest) -> Result<ChatStream, Error> {
|
||||
let response = self.friendly_response(&request.model);
|
||||
Ok(Box::pin(stream::iter(vec![Ok(response)])))
|
||||
}
|
||||
|
||||
async fn health_check(&self) -> Result<(), Error> {
|
||||
Err(Error::Provider(anyhow!(
|
||||
"offline provider cannot reach any backing models"
|
||||
)))
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &(dyn std::any::Any + Send + Sync) {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_app(
|
||||
terminal: &mut Terminal<CrosstermBackend<io::Stdout>>,
|
||||
runtime: &mut RuntimeApp,
|
||||
app: &mut ChatApp,
|
||||
session_rx: &mut mpsc::UnboundedReceiver<SessionEvent>,
|
||||
) -> Result<()> {
|
||||
let mut render = |terminal: &mut Terminal<CrosstermBackend<io::Stdout>>,
|
||||
state: &mut ChatApp|
|
||||
-> Result<()> {
|
||||
terminal.draw(|f| ui::render_chat(f, state))?;
|
||||
Ok(())
|
||||
};
|
||||
|
||||
runtime.run(terminal, app, session_rx, &mut render).await?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,103 +1,16 @@
|
||||
//! OWLEN Code Mode - TUI client optimized for coding assistance
|
||||
//! Owlen CLI entrypoint optimised for code-first workflows.
|
||||
#![allow(dead_code, unused_imports)]
|
||||
|
||||
mod bootstrap;
|
||||
mod commands;
|
||||
mod mcp;
|
||||
|
||||
use anyhow::Result;
|
||||
use clap::{Arg, Command};
|
||||
use owlen_core::session::SessionController;
|
||||
use owlen_ollama::OllamaProvider;
|
||||
use owlen_tui::{config, ui, AppState, CodeApp, Event, EventHandler, SessionEvent};
|
||||
use std::io;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use owlen_core::config as core_config;
|
||||
use owlen_core::mode::Mode;
|
||||
use owlen_tui::config;
|
||||
|
||||
use crossterm::{
|
||||
event::{DisableMouseCapture, EnableMouseCapture},
|
||||
execute,
|
||||
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
|
||||
};
|
||||
use ratatui::{backend::CrosstermBackend, Terminal};
|
||||
|
||||
#[tokio::main]
|
||||
#[tokio::main(flavor = "multi_thread")]
|
||||
async fn main() -> Result<()> {
|
||||
let matches = Command::new("owlen-code")
|
||||
.about("OWLEN Code Mode - TUI optimized for programming assistance")
|
||||
.version(env!("CARGO_PKG_VERSION"))
|
||||
.arg(
|
||||
Arg::new("model")
|
||||
.short('m')
|
||||
.long("model")
|
||||
.value_name("MODEL")
|
||||
.help("Preferred model to use for this session"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let mut config = config::try_load_config().unwrap_or_default();
|
||||
|
||||
if let Some(model) = matches.get_one::<String>("model") {
|
||||
config.general.default_model = Some(model.clone());
|
||||
}
|
||||
|
||||
let provider_cfg = config::ensure_ollama_config(&mut config).clone();
|
||||
let provider = Arc::new(OllamaProvider::from_config(
|
||||
&provider_cfg,
|
||||
Some(&config.general),
|
||||
)?);
|
||||
|
||||
let controller = SessionController::new(provider, config.clone());
|
||||
let (mut app, mut session_rx) = CodeApp::new(controller);
|
||||
app.inner_mut().initialize_models().await?;
|
||||
|
||||
let cancellation_token = CancellationToken::new();
|
||||
let (event_tx, event_rx) = mpsc::unbounded_channel();
|
||||
let event_handler = EventHandler::new(event_tx, cancellation_token.clone());
|
||||
let event_handle = tokio::spawn(async move { event_handler.run().await });
|
||||
|
||||
enable_raw_mode()?;
|
||||
let mut stdout = io::stdout();
|
||||
execute!(stdout, EnterAlternateScreen, EnableMouseCapture)?;
|
||||
let backend = CrosstermBackend::new(stdout);
|
||||
let mut terminal = Terminal::new(backend)?;
|
||||
|
||||
let result = run_app(&mut terminal, &mut app, event_rx, &mut session_rx).await;
|
||||
|
||||
cancellation_token.cancel();
|
||||
event_handle.await?;
|
||||
|
||||
config::save_config(app.inner().config())?;
|
||||
|
||||
disable_raw_mode()?;
|
||||
execute!(
|
||||
terminal.backend_mut(),
|
||||
LeaveAlternateScreen,
|
||||
DisableMouseCapture
|
||||
)?;
|
||||
terminal.show_cursor()?;
|
||||
|
||||
if let Err(err) = result {
|
||||
println!("{err:?}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_app(
|
||||
terminal: &mut Terminal<CrosstermBackend<io::Stdout>>,
|
||||
app: &mut CodeApp,
|
||||
mut event_rx: mpsc::UnboundedReceiver<Event>,
|
||||
session_rx: &mut mpsc::UnboundedReceiver<SessionEvent>,
|
||||
) -> Result<()> {
|
||||
loop {
|
||||
terminal.draw(|f| ui::render_chat(f, app.inner_mut()))?;
|
||||
|
||||
tokio::select! {
|
||||
Some(event) = event_rx.recv() => {
|
||||
if let AppState::Quit = app.handle_event(event).await? {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
Some(session_event) = session_rx.recv() => {
|
||||
app.handle_session_event(session_event)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
bootstrap::launch(Mode::Code).await
|
||||
}
|
||||
|
||||
479
crates/owlen-cli/src/commands/cloud.rs
Normal file
479
crates/owlen-cli/src/commands/cloud.rs
Normal file
@@ -0,0 +1,479 @@
|
||||
use std::ffi::OsStr;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{Context, Result, anyhow, bail};
|
||||
use clap::Subcommand;
|
||||
use owlen_core::LlmProvider;
|
||||
use owlen_core::ProviderConfig;
|
||||
use owlen_core::config::{
|
||||
self as core_config, Config, OLLAMA_CLOUD_API_KEY_ENV, OLLAMA_CLOUD_BASE_URL,
|
||||
OLLAMA_CLOUD_ENDPOINT_KEY, OLLAMA_MODE_KEY,
|
||||
};
|
||||
use owlen_core::credentials::{ApiCredentials, CredentialManager, OLLAMA_CLOUD_CREDENTIAL_ID};
|
||||
use owlen_core::encryption;
|
||||
use owlen_core::providers::OllamaProvider;
|
||||
use owlen_core::storage::StorageManager;
|
||||
use serde_json::Value;
|
||||
|
||||
const DEFAULT_CLOUD_ENDPOINT: &str = OLLAMA_CLOUD_BASE_URL;
|
||||
const CLOUD_ENDPOINT_KEY: &str = OLLAMA_CLOUD_ENDPOINT_KEY;
|
||||
const CLOUD_PROVIDER_KEY: &str = "ollama_cloud";
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
pub enum CloudCommand {
|
||||
/// Configure Ollama Cloud credentials
|
||||
Setup {
|
||||
/// API key passed directly on the command line (prompted when omitted)
|
||||
#[arg(long)]
|
||||
api_key: Option<String>,
|
||||
/// Override the cloud endpoint (default: https://ollama.com)
|
||||
#[arg(long)]
|
||||
endpoint: Option<String>,
|
||||
/// Provider name to configure (default: ollama_cloud)
|
||||
#[arg(long, default_value = "ollama_cloud")]
|
||||
provider: String,
|
||||
/// Overwrite the provider base URL with the cloud endpoint
|
||||
#[arg(long)]
|
||||
force_cloud_base_url: bool,
|
||||
},
|
||||
/// Check connectivity to Ollama Cloud
|
||||
Status {
|
||||
/// Provider name to check (default: ollama_cloud)
|
||||
#[arg(long, default_value = "ollama_cloud")]
|
||||
provider: String,
|
||||
},
|
||||
/// List available cloud-hosted models
|
||||
Models {
|
||||
/// Provider name to query (default: ollama_cloud)
|
||||
#[arg(long, default_value = "ollama_cloud")]
|
||||
provider: String,
|
||||
},
|
||||
/// Remove stored Ollama Cloud credentials
|
||||
Logout {
|
||||
/// Provider name to clear (default: ollama_cloud)
|
||||
#[arg(long, default_value = "ollama_cloud")]
|
||||
provider: String,
|
||||
},
|
||||
}
|
||||
|
||||
pub async fn run_cloud_command(command: CloudCommand) -> Result<()> {
|
||||
match command {
|
||||
CloudCommand::Setup {
|
||||
api_key,
|
||||
endpoint,
|
||||
provider,
|
||||
force_cloud_base_url,
|
||||
} => setup(provider, api_key, endpoint, force_cloud_base_url).await,
|
||||
CloudCommand::Status { provider } => status(provider).await,
|
||||
CloudCommand::Models { provider } => models(provider).await,
|
||||
CloudCommand::Logout { provider } => logout(provider).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn setup(
|
||||
provider: String,
|
||||
api_key: Option<String>,
|
||||
endpoint: Option<String>,
|
||||
force_cloud_base_url: bool,
|
||||
) -> Result<()> {
|
||||
let provider = canonical_provider_name(&provider);
|
||||
let mut config = crate::config::try_load_config().unwrap_or_default();
|
||||
let endpoint =
|
||||
normalize_endpoint(&endpoint.unwrap_or_else(|| DEFAULT_CLOUD_ENDPOINT.to_string()));
|
||||
|
||||
let base_changed = {
|
||||
let entry = ensure_provider_entry(&mut config, &provider);
|
||||
entry.enabled = true;
|
||||
configure_cloud_endpoint(entry, &endpoint, force_cloud_base_url)
|
||||
};
|
||||
|
||||
let key = match api_key {
|
||||
Some(value) if !value.trim().is_empty() => value,
|
||||
_ => {
|
||||
let prompt = format!("Enter API key for {provider}: ");
|
||||
encryption::prompt_password(&prompt)?
|
||||
}
|
||||
};
|
||||
|
||||
if config.privacy.encrypt_local_data {
|
||||
let storage = Arc::new(StorageManager::new().await?);
|
||||
let manager = unlock_credential_manager(&config, storage.clone())?;
|
||||
let credentials = ApiCredentials {
|
||||
api_key: key.clone(),
|
||||
endpoint: endpoint.clone(),
|
||||
};
|
||||
manager
|
||||
.store_credentials(OLLAMA_CLOUD_CREDENTIAL_ID, &credentials)
|
||||
.await?;
|
||||
// Ensure plaintext key is not persisted to disk.
|
||||
if let Some(entry) = config.providers.get_mut(&provider) {
|
||||
entry.api_key = None;
|
||||
}
|
||||
} else if let Some(entry) = config.providers.get_mut(&provider) {
|
||||
entry.api_key = Some(key.clone());
|
||||
}
|
||||
|
||||
crate::config::save_config(&config)?;
|
||||
println!("Saved Ollama configuration for provider '{provider}'.");
|
||||
if config.privacy.encrypt_local_data {
|
||||
println!("API key stored securely in the encrypted credential vault.");
|
||||
} else {
|
||||
println!("API key stored in plaintext configuration (encryption disabled).");
|
||||
}
|
||||
if !force_cloud_base_url && !base_changed {
|
||||
println!(
|
||||
"Local base URL preserved; cloud endpoint stored as {}.",
|
||||
CLOUD_ENDPOINT_KEY
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn status(provider: String) -> Result<()> {
|
||||
let provider = canonical_provider_name(&provider);
|
||||
let mut config = crate::config::try_load_config().unwrap_or_default();
|
||||
let storage = Arc::new(StorageManager::new().await?);
|
||||
let manager = if config.privacy.encrypt_local_data {
|
||||
Some(unlock_credential_manager(&config, storage.clone())?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let api_key = hydrate_api_key(&mut config, manager.as_ref()).await?;
|
||||
{
|
||||
let entry = ensure_provider_entry(&mut config, &provider);
|
||||
entry.enabled = true;
|
||||
configure_cloud_endpoint(entry, DEFAULT_CLOUD_ENDPOINT, false);
|
||||
}
|
||||
|
||||
let provider_cfg = config
|
||||
.provider(&provider)
|
||||
.cloned()
|
||||
.ok_or_else(|| anyhow!("Provider '{provider}' is not configured"))?;
|
||||
|
||||
let endpoint =
|
||||
resolve_cloud_endpoint(&provider_cfg).unwrap_or_else(|| DEFAULT_CLOUD_ENDPOINT.to_string());
|
||||
let mut runtime_cfg = provider_cfg.clone();
|
||||
runtime_cfg.base_url = Some(endpoint.clone());
|
||||
runtime_cfg.extra.insert(
|
||||
OLLAMA_MODE_KEY.to_string(),
|
||||
Value::String("cloud".to_string()),
|
||||
);
|
||||
|
||||
let ollama = OllamaProvider::from_config(&runtime_cfg, Some(&config.general))
|
||||
.with_context(|| "Failed to construct Ollama provider. Run `owlen cloud setup` first.")?;
|
||||
|
||||
match ollama.health_check().await {
|
||||
Ok(_) => {
|
||||
println!("✓ Connected to {provider} ({})", endpoint);
|
||||
if api_key.is_none() && config.privacy.encrypt_local_data {
|
||||
println!(
|
||||
"Warning: No API key stored; connection succeeded via environment variables."
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
println!("✗ Failed to reach {provider}: {err}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn models(provider: String) -> Result<()> {
|
||||
let provider = canonical_provider_name(&provider);
|
||||
let mut config = crate::config::try_load_config().unwrap_or_default();
|
||||
let storage = Arc::new(StorageManager::new().await?);
|
||||
let manager = if config.privacy.encrypt_local_data {
|
||||
Some(unlock_credential_manager(&config, storage.clone())?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
hydrate_api_key(&mut config, manager.as_ref()).await?;
|
||||
|
||||
{
|
||||
let entry = ensure_provider_entry(&mut config, &provider);
|
||||
entry.enabled = true;
|
||||
configure_cloud_endpoint(entry, DEFAULT_CLOUD_ENDPOINT, false);
|
||||
}
|
||||
|
||||
let provider_cfg = config
|
||||
.provider(&provider)
|
||||
.cloned()
|
||||
.ok_or_else(|| anyhow!("Provider '{provider}' is not configured"))?;
|
||||
|
||||
let endpoint =
|
||||
resolve_cloud_endpoint(&provider_cfg).unwrap_or_else(|| DEFAULT_CLOUD_ENDPOINT.to_string());
|
||||
let mut runtime_cfg = provider_cfg.clone();
|
||||
runtime_cfg.base_url = Some(endpoint);
|
||||
runtime_cfg.extra.insert(
|
||||
OLLAMA_MODE_KEY.to_string(),
|
||||
Value::String("cloud".to_string()),
|
||||
);
|
||||
|
||||
let ollama = OllamaProvider::from_config(&runtime_cfg, Some(&config.general))
|
||||
.with_context(|| "Failed to construct Ollama provider. Run `owlen cloud setup` first.")?;
|
||||
|
||||
match ollama.list_models().await {
|
||||
Ok(models) => {
|
||||
if models.is_empty() {
|
||||
println!("No cloud models reported by '{}'.", provider);
|
||||
} else {
|
||||
println!("Models available via '{}':", provider);
|
||||
for model in models {
|
||||
if let Some(description) = &model.description {
|
||||
println!(" - {} ({})", model.id, description);
|
||||
} else {
|
||||
println!(" - {}", model.id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
bail!("Failed to list models: {err}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn logout(provider: String) -> Result<()> {
|
||||
let provider = canonical_provider_name(&provider);
|
||||
let mut config = crate::config::try_load_config().unwrap_or_default();
|
||||
let storage = Arc::new(StorageManager::new().await?);
|
||||
|
||||
if config.privacy.encrypt_local_data {
|
||||
let manager = unlock_credential_manager(&config, storage.clone())?;
|
||||
manager
|
||||
.delete_credentials(OLLAMA_CLOUD_CREDENTIAL_ID)
|
||||
.await?;
|
||||
}
|
||||
|
||||
if let Some(entry) = config.providers.get_mut(&provider) {
|
||||
entry.api_key = None;
|
||||
entry.enabled = false;
|
||||
}
|
||||
|
||||
crate::config::save_config(&config)?;
|
||||
println!("Cleared credentials for provider '{provider}'.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ensure_provider_entry<'a>(config: &'a mut Config, provider: &str) -> &'a mut ProviderConfig {
|
||||
core_config::ensure_provider_config_mut(config, provider)
|
||||
}
|
||||
|
||||
fn configure_cloud_endpoint(entry: &mut ProviderConfig, endpoint: &str, force: bool) -> bool {
|
||||
let normalized = normalize_endpoint(endpoint);
|
||||
let previous_base = entry.base_url.clone();
|
||||
entry.extra.insert(
|
||||
CLOUD_ENDPOINT_KEY.to_string(),
|
||||
Value::String(normalized.clone()),
|
||||
);
|
||||
|
||||
if entry.api_key_env.is_none() {
|
||||
entry.api_key_env = Some(OLLAMA_CLOUD_API_KEY_ENV.to_string());
|
||||
}
|
||||
|
||||
if force
|
||||
|| entry
|
||||
.base_url
|
||||
.as_ref()
|
||||
.map(|value| value.trim().is_empty())
|
||||
.unwrap_or(true)
|
||||
{
|
||||
entry.base_url = Some(normalized.clone());
|
||||
}
|
||||
|
||||
if force {
|
||||
entry.enabled = true;
|
||||
}
|
||||
|
||||
entry.base_url != previous_base
|
||||
}
|
||||
|
||||
fn resolve_cloud_endpoint(cfg: &ProviderConfig) -> Option<String> {
|
||||
if let Some(value) = cfg
|
||||
.extra
|
||||
.get(CLOUD_ENDPOINT_KEY)
|
||||
.and_then(|value| value.as_str())
|
||||
.map(normalize_endpoint)
|
||||
{
|
||||
return Some(value);
|
||||
}
|
||||
|
||||
cfg.base_url
|
||||
.as_ref()
|
||||
.map(|value| value.trim_end_matches('/').to_string())
|
||||
.filter(|value| !value.is_empty())
|
||||
}
|
||||
|
||||
fn normalize_endpoint(endpoint: &str) -> String {
|
||||
let trimmed = endpoint.trim().trim_end_matches('/');
|
||||
if trimmed.is_empty() {
|
||||
DEFAULT_CLOUD_ENDPOINT.to_string()
|
||||
} else {
|
||||
trimmed.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
fn canonical_provider_name(provider: &str) -> String {
|
||||
let normalized = provider.trim().to_ascii_lowercase().replace('-', "_");
|
||||
match normalized.as_str() {
|
||||
"" => CLOUD_PROVIDER_KEY.to_string(),
|
||||
"ollama" => CLOUD_PROVIDER_KEY.to_string(),
|
||||
"ollama_cloud" => CLOUD_PROVIDER_KEY.to_string(),
|
||||
value => value.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn set_env_var<K, V>(key: K, value: V)
|
||||
where
|
||||
K: AsRef<OsStr>,
|
||||
V: AsRef<OsStr>,
|
||||
{
|
||||
// Safety: the CLI updates process-wide environment variables during startup while no
|
||||
// other threads are mutating the environment.
|
||||
unsafe {
|
||||
std::env::set_var(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
fn set_env_if_missing(var: &str, value: &str) {
|
||||
if std::env::var(var)
|
||||
.map(|v| v.trim().is_empty())
|
||||
.unwrap_or(true)
|
||||
{
|
||||
set_env_var(var, value);
|
||||
}
|
||||
}
|
||||
|
||||
fn unlock_credential_manager(
|
||||
config: &Config,
|
||||
storage: Arc<StorageManager>,
|
||||
) -> Result<Arc<CredentialManager>> {
|
||||
if !config.privacy.encrypt_local_data {
|
||||
bail!("Credential manager requested but encryption is disabled");
|
||||
}
|
||||
|
||||
let secure_path = vault_path(&storage)?;
|
||||
let handle = unlock_vault(&secure_path)?;
|
||||
let master_key = Arc::new(handle.data.master_key.clone());
|
||||
Ok(Arc::new(CredentialManager::new(
|
||||
storage,
|
||||
master_key.clone(),
|
||||
)))
|
||||
}
|
||||
|
||||
fn vault_path(storage: &StorageManager) -> Result<PathBuf> {
|
||||
let base_dir = storage
|
||||
.database_path()
|
||||
.parent()
|
||||
.map(|p| p.to_path_buf())
|
||||
.or_else(dirs::data_local_dir)
|
||||
.unwrap_or_else(|| PathBuf::from("."));
|
||||
Ok(base_dir.join("encrypted_data.json"))
|
||||
}
|
||||
|
||||
fn unlock_vault(path: &Path) -> Result<encryption::VaultHandle> {
|
||||
use std::env;
|
||||
|
||||
if path.exists() {
|
||||
if let Some(password) = env::var("OWLEN_MASTER_PASSWORD")
|
||||
.ok()
|
||||
.map(|value| value.trim().to_string())
|
||||
.filter(|password| !password.is_empty())
|
||||
{
|
||||
return encryption::unlock_with_password(path.to_path_buf(), &password)
|
||||
.context("Failed to unlock vault with OWLEN_MASTER_PASSWORD");
|
||||
}
|
||||
|
||||
for attempt in 0..3 {
|
||||
let password = encryption::prompt_password("Enter master password: ")?;
|
||||
match encryption::unlock_with_password(path.to_path_buf(), &password) {
|
||||
Ok(handle) => {
|
||||
set_env_var("OWLEN_MASTER_PASSWORD", password);
|
||||
return Ok(handle);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("Failed to unlock vault: {err}");
|
||||
if attempt == 2 {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bail!("Unable to unlock encrypted credential vault");
|
||||
}
|
||||
|
||||
let handle = encryption::unlock_interactive(path.to_path_buf())?;
|
||||
if env::var("OWLEN_MASTER_PASSWORD")
|
||||
.map(|v| v.trim().is_empty())
|
||||
.unwrap_or(true)
|
||||
{
|
||||
let password = encryption::prompt_password("Cache master password for this session: ")?;
|
||||
set_env_var("OWLEN_MASTER_PASSWORD", password);
|
||||
}
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
async fn hydrate_api_key(
|
||||
config: &mut Config,
|
||||
manager: Option<&Arc<CredentialManager>>,
|
||||
) -> Result<Option<String>> {
|
||||
let credentials = match manager {
|
||||
Some(manager) => manager.get_credentials(OLLAMA_CLOUD_CREDENTIAL_ID).await?,
|
||||
None => None,
|
||||
};
|
||||
|
||||
if let Some(credentials) = credentials {
|
||||
let key = credentials.api_key.trim().to_string();
|
||||
if !key.is_empty() {
|
||||
set_env_if_missing("OLLAMA_API_KEY", &key);
|
||||
set_env_if_missing("OLLAMA_CLOUD_API_KEY", &key);
|
||||
}
|
||||
|
||||
let cfg = core_config::ensure_provider_config_mut(config, CLOUD_PROVIDER_KEY);
|
||||
configure_cloud_endpoint(cfg, &credentials.endpoint, false);
|
||||
return Ok(Some(key));
|
||||
}
|
||||
|
||||
if let Some(key) = config
|
||||
.provider(CLOUD_PROVIDER_KEY)
|
||||
.and_then(|cfg| cfg.api_key.as_ref())
|
||||
.map(|value| value.trim())
|
||||
.filter(|value| !value.is_empty())
|
||||
{
|
||||
set_env_if_missing("OLLAMA_API_KEY", key);
|
||||
set_env_if_missing("OLLAMA_CLOUD_API_KEY", key);
|
||||
return Ok(Some(key.to_string()));
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub async fn load_runtime_credentials(
|
||||
config: &mut Config,
|
||||
storage: Arc<StorageManager>,
|
||||
) -> Result<()> {
|
||||
if config.privacy.encrypt_local_data {
|
||||
let manager = unlock_credential_manager(config, storage.clone())?;
|
||||
hydrate_api_key(config, Some(&manager)).await?;
|
||||
} else {
|
||||
hydrate_api_key(config, None).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn canonicalises_provider_names() {
|
||||
assert_eq!(canonical_provider_name("OLLAMA_CLOUD"), CLOUD_PROVIDER_KEY);
|
||||
assert_eq!(canonical_provider_name(" ollama-cloud"), CLOUD_PROVIDER_KEY);
|
||||
assert_eq!(canonical_provider_name(""), CLOUD_PROVIDER_KEY);
|
||||
}
|
||||
}
|
||||
4
crates/owlen-cli/src/commands/mod.rs
Normal file
4
crates/owlen-cli/src/commands/mod.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
//! Command implementations for the `owlen` CLI.
|
||||
|
||||
pub mod cloud;
|
||||
pub mod providers;
|
||||
651
crates/owlen-cli/src/commands/providers.rs
Normal file
651
crates/owlen-cli/src/commands/providers.rs
Normal file
@@ -0,0 +1,651 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{Result, anyhow};
|
||||
use clap::{Args, Subcommand};
|
||||
use owlen_core::ProviderConfig;
|
||||
use owlen_core::config::{self as core_config, Config};
|
||||
use owlen_core::provider::{
|
||||
AnnotatedModelInfo, ModelProvider, ProviderManager, ProviderStatus, ProviderType,
|
||||
};
|
||||
use owlen_core::storage::StorageManager;
|
||||
use owlen_providers::ollama::{OllamaCloudProvider, OllamaLocalProvider};
|
||||
use owlen_tui::config as tui_config;
|
||||
|
||||
use super::cloud;
|
||||
|
||||
/// CLI subcommands for provider management.
|
||||
#[derive(Debug, Subcommand)]
|
||||
pub enum ProvidersCommand {
|
||||
/// List configured providers and their metadata.
|
||||
List,
|
||||
/// Run health checks against providers.
|
||||
Status {
|
||||
/// Optional provider identifier to check.
|
||||
#[arg(value_name = "PROVIDER")]
|
||||
provider: Option<String>,
|
||||
},
|
||||
/// Enable a provider in the configuration.
|
||||
Enable {
|
||||
/// Provider identifier to enable.
|
||||
provider: String,
|
||||
},
|
||||
/// Disable a provider in the configuration.
|
||||
Disable {
|
||||
/// Provider identifier to disable.
|
||||
provider: String,
|
||||
},
|
||||
}
|
||||
|
||||
/// Arguments for the `owlen models` command.
|
||||
#[derive(Debug, Default, Args)]
|
||||
pub struct ModelsArgs {
|
||||
/// Restrict output to a specific provider.
|
||||
#[arg(long)]
|
||||
pub provider: Option<String>,
|
||||
}
|
||||
|
||||
pub async fn run_providers_command(command: ProvidersCommand) -> Result<()> {
|
||||
match command {
|
||||
ProvidersCommand::List => list_providers(),
|
||||
ProvidersCommand::Status { provider } => status_providers(provider.as_deref()).await,
|
||||
ProvidersCommand::Enable { provider } => toggle_provider(&provider, true),
|
||||
ProvidersCommand::Disable { provider } => toggle_provider(&provider, false),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_models_command(args: ModelsArgs) -> Result<()> {
|
||||
list_models(args.provider.as_deref()).await
|
||||
}
|
||||
|
||||
fn list_providers() -> Result<()> {
|
||||
let config = tui_config::try_load_config().unwrap_or_default();
|
||||
let default_provider = canonical_provider_id(&config.general.default_provider);
|
||||
|
||||
let mut rows = Vec::new();
|
||||
for (id, cfg) in &config.providers {
|
||||
let type_label = describe_provider_type(id, cfg);
|
||||
let auth_label = describe_auth(cfg, requires_auth(id, cfg));
|
||||
let enabled = if cfg.enabled { "yes" } else { "no" };
|
||||
let default = if id == &default_provider { "*" } else { "" };
|
||||
let base = cfg
|
||||
.base_url
|
||||
.as_ref()
|
||||
.map(|value| value.trim().to_string())
|
||||
.unwrap_or_else(|| "-".to_string());
|
||||
|
||||
rows.push(ProviderListRow {
|
||||
id: id.to_string(),
|
||||
type_label,
|
||||
enabled: enabled.to_string(),
|
||||
default: default.to_string(),
|
||||
auth: auth_label,
|
||||
base_url: base,
|
||||
});
|
||||
}
|
||||
|
||||
rows.sort_by(|a, b| a.id.cmp(&b.id));
|
||||
|
||||
let id_width = rows
|
||||
.iter()
|
||||
.map(|row| row.id.len())
|
||||
.max()
|
||||
.unwrap_or(8)
|
||||
.max("Provider".len());
|
||||
let enabled_width = rows
|
||||
.iter()
|
||||
.map(|row| row.enabled.len())
|
||||
.max()
|
||||
.unwrap_or(7)
|
||||
.max("Enabled".len());
|
||||
let default_width = rows
|
||||
.iter()
|
||||
.map(|row| row.default.len())
|
||||
.max()
|
||||
.unwrap_or(7)
|
||||
.max("Default".len());
|
||||
let type_width = rows
|
||||
.iter()
|
||||
.map(|row| row.type_label.len())
|
||||
.max()
|
||||
.unwrap_or(4)
|
||||
.max("Type".len());
|
||||
let auth_width = rows
|
||||
.iter()
|
||||
.map(|row| row.auth.len())
|
||||
.max()
|
||||
.unwrap_or(4)
|
||||
.max("Auth".len());
|
||||
|
||||
println!(
|
||||
"{:<id_width$} {:<enabled_width$} {:<default_width$} {:<type_width$} {:<auth_width$} Base URL",
|
||||
"Provider",
|
||||
"Enabled",
|
||||
"Default",
|
||||
"Type",
|
||||
"Auth",
|
||||
id_width = id_width,
|
||||
enabled_width = enabled_width,
|
||||
default_width = default_width,
|
||||
type_width = type_width,
|
||||
auth_width = auth_width,
|
||||
);
|
||||
|
||||
for row in rows {
|
||||
println!(
|
||||
"{:<id_width$} {:<enabled_width$} {:<default_width$} {:<type_width$} {:<auth_width$} {}",
|
||||
row.id,
|
||||
row.enabled,
|
||||
row.default,
|
||||
row.type_label,
|
||||
row.auth,
|
||||
row.base_url,
|
||||
id_width = id_width,
|
||||
enabled_width = enabled_width,
|
||||
default_width = default_width,
|
||||
type_width = type_width,
|
||||
auth_width = auth_width,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn status_providers(filter: Option<&str>) -> Result<()> {
|
||||
let mut config = tui_config::try_load_config().unwrap_or_default();
|
||||
let filter = filter.map(canonical_provider_id);
|
||||
verify_provider_filter(&config, filter.as_deref())?;
|
||||
|
||||
let storage = Arc::new(StorageManager::new().await?);
|
||||
cloud::load_runtime_credentials(&mut config, storage.clone()).await?;
|
||||
|
||||
let manager = ProviderManager::new(&config);
|
||||
let records = register_enabled_providers(&manager, &config, filter.as_deref()).await?;
|
||||
let health = manager.refresh_health().await;
|
||||
|
||||
let mut rows = Vec::new();
|
||||
for record in records {
|
||||
let status = health.get(&record.id).copied();
|
||||
rows.push(ProviderStatusRow::from_record(record, status));
|
||||
}
|
||||
|
||||
rows.sort_by(|a, b| a.id.cmp(&b.id));
|
||||
print_status_rows(&rows);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_models(filter: Option<&str>) -> Result<()> {
|
||||
let mut config = tui_config::try_load_config().unwrap_or_default();
|
||||
let filter = filter.map(canonical_provider_id);
|
||||
verify_provider_filter(&config, filter.as_deref())?;
|
||||
|
||||
let storage = Arc::new(StorageManager::new().await?);
|
||||
cloud::load_runtime_credentials(&mut config, storage.clone()).await?;
|
||||
|
||||
let manager = ProviderManager::new(&config);
|
||||
let records = register_enabled_providers(&manager, &config, filter.as_deref()).await?;
|
||||
let models = manager
|
||||
.list_all_models()
|
||||
.await
|
||||
.map_err(|err| anyhow!(err))?;
|
||||
let statuses = manager.provider_statuses().await;
|
||||
|
||||
print_models(records, models, statuses);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verify_provider_filter(config: &Config, filter: Option<&str>) -> Result<()> {
|
||||
if let Some(filter) = filter
|
||||
&& !config.providers.contains_key(filter)
|
||||
{
|
||||
return Err(anyhow!(
|
||||
"Provider '{}' is not defined in configuration.",
|
||||
filter
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn toggle_provider(provider: &str, enable: bool) -> Result<()> {
|
||||
let mut config = tui_config::try_load_config().unwrap_or_default();
|
||||
let canonical = canonical_provider_id(provider);
|
||||
if canonical.is_empty() {
|
||||
return Err(anyhow!("Provider name cannot be empty."));
|
||||
}
|
||||
|
||||
let previous_default = config.general.default_provider.clone();
|
||||
let previous_fallback_enabled = config.providers.get("ollama_local").map(|cfg| cfg.enabled);
|
||||
|
||||
let previous_enabled;
|
||||
{
|
||||
let entry = core_config::ensure_provider_config_mut(&mut config, &canonical);
|
||||
previous_enabled = entry.enabled;
|
||||
if previous_enabled == enable {
|
||||
println!(
|
||||
"Provider '{}' is already {}.",
|
||||
canonical,
|
||||
if enable { "enabled" } else { "disabled" }
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
entry.enabled = enable;
|
||||
}
|
||||
|
||||
if !enable && config.general.default_provider == canonical {
|
||||
if let Some(candidate) = choose_fallback_provider(&config, &canonical) {
|
||||
config.general.default_provider = candidate.clone();
|
||||
println!(
|
||||
"Default provider set to '{}' because '{}' was disabled.",
|
||||
candidate, canonical
|
||||
);
|
||||
} else {
|
||||
let entry = core_config::ensure_provider_config_mut(&mut config, "ollama_local");
|
||||
entry.enabled = true;
|
||||
config.general.default_provider = "ollama_local".to_string();
|
||||
println!(
|
||||
"Enabled 'ollama_local' and made it default because no other providers are active."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(err) = config.validate() {
|
||||
{
|
||||
let entry = core_config::ensure_provider_config_mut(&mut config, &canonical);
|
||||
entry.enabled = previous_enabled;
|
||||
}
|
||||
config.general.default_provider = previous_default;
|
||||
if let Some(enabled) = previous_fallback_enabled
|
||||
&& let Some(entry) = config.providers.get_mut("ollama_local")
|
||||
{
|
||||
entry.enabled = enabled;
|
||||
}
|
||||
return Err(anyhow!(err));
|
||||
}
|
||||
|
||||
tui_config::save_config(&config).map_err(|err| anyhow!(err))?;
|
||||
|
||||
println!(
|
||||
"{} provider '{}'.",
|
||||
if enable { "Enabled" } else { "Disabled" },
|
||||
canonical
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn choose_fallback_provider(config: &Config, exclude: &str) -> Option<String> {
|
||||
if exclude != "ollama_local"
|
||||
&& let Some(cfg) = config.providers.get("ollama_local")
|
||||
&& cfg.enabled
|
||||
{
|
||||
return Some("ollama_local".to_string());
|
||||
}
|
||||
|
||||
let mut candidates: Vec<String> = config
|
||||
.providers
|
||||
.iter()
|
||||
.filter(|(id, cfg)| cfg.enabled && id.as_str() != exclude)
|
||||
.map(|(id, _)| id.clone())
|
||||
.collect();
|
||||
candidates.sort();
|
||||
candidates.into_iter().next()
|
||||
}
|
||||
|
||||
async fn register_enabled_providers(
|
||||
manager: &ProviderManager,
|
||||
config: &Config,
|
||||
filter: Option<&str>,
|
||||
) -> Result<Vec<ProviderRecord>> {
|
||||
let default_provider = canonical_provider_id(&config.general.default_provider);
|
||||
let mut records = Vec::new();
|
||||
|
||||
for (id, cfg) in &config.providers {
|
||||
if let Some(filter) = filter
|
||||
&& id != filter
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut record = ProviderRecord::from_config(id, cfg, id == &default_provider);
|
||||
if !cfg.enabled {
|
||||
records.push(record);
|
||||
continue;
|
||||
}
|
||||
|
||||
match instantiate_provider(id, cfg) {
|
||||
Ok(provider) => {
|
||||
let metadata = provider.metadata().clone();
|
||||
record.provider_type_label = provider_type_label(metadata.provider_type);
|
||||
record.requires_auth = metadata.requires_auth;
|
||||
record.metadata = Some(metadata);
|
||||
manager.register_provider(provider).await;
|
||||
}
|
||||
Err(err) => {
|
||||
record.registration_error = Some(err.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
records.push(record);
|
||||
}
|
||||
|
||||
records.sort_by(|a, b| a.id.cmp(&b.id));
|
||||
Ok(records)
|
||||
}
|
||||
|
||||
fn instantiate_provider(id: &str, cfg: &ProviderConfig) -> Result<Arc<dyn ModelProvider>> {
|
||||
let kind = cfg.provider_type.trim().to_ascii_lowercase();
|
||||
if kind == "ollama" || id == "ollama_local" {
|
||||
let provider = OllamaLocalProvider::new(cfg.base_url.clone(), None, None)
|
||||
.map_err(|err| anyhow!(err))?;
|
||||
Ok(Arc::new(provider))
|
||||
} else if kind == "ollama_cloud" || id == "ollama_cloud" {
|
||||
let provider = OllamaCloudProvider::new(cfg.base_url.clone(), cfg.api_key.clone(), None)
|
||||
.map_err(|err| anyhow!(err))?;
|
||||
Ok(Arc::new(provider))
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"Provider '{}' uses unsupported type '{}'.",
|
||||
id,
|
||||
if kind.is_empty() {
|
||||
"unknown"
|
||||
} else {
|
||||
kind.as_str()
|
||||
}
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn describe_provider_type(id: &str, cfg: &ProviderConfig) -> String {
|
||||
if cfg.provider_type.trim().eq_ignore_ascii_case("ollama") || id.ends_with("_local") {
|
||||
"Local".to_string()
|
||||
} else if cfg
|
||||
.provider_type
|
||||
.trim()
|
||||
.eq_ignore_ascii_case("ollama_cloud")
|
||||
|| id.contains("cloud")
|
||||
{
|
||||
"Cloud".to_string()
|
||||
} else {
|
||||
"Custom".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
fn requires_auth(id: &str, cfg: &ProviderConfig) -> bool {
|
||||
cfg.api_key.is_some()
|
||||
|| cfg.api_key_env.is_some()
|
||||
|| matches!(id, "ollama_cloud" | "openai" | "anthropic")
|
||||
}
|
||||
|
||||
fn describe_auth(cfg: &ProviderConfig, required: bool) -> String {
|
||||
if let Some(env) = cfg
|
||||
.api_key_env
|
||||
.as_ref()
|
||||
.map(|value| value.trim())
|
||||
.filter(|value| !value.is_empty())
|
||||
{
|
||||
format!("env:{env}")
|
||||
} else if cfg
|
||||
.api_key
|
||||
.as_ref()
|
||||
.map(|value| !value.trim().is_empty())
|
||||
.unwrap_or(false)
|
||||
{
|
||||
"config".to_string()
|
||||
} else if required {
|
||||
"required".to_string()
|
||||
} else {
|
||||
"-".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
fn canonical_provider_id(raw: &str) -> String {
|
||||
let trimmed = raw.trim().to_ascii_lowercase();
|
||||
if trimmed.is_empty() {
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
match trimmed.as_str() {
|
||||
"ollama" | "ollama-local" => "ollama_local".to_string(),
|
||||
"ollama_cloud" | "ollama-cloud" => "ollama_cloud".to_string(),
|
||||
other => other.replace('-', "_"),
|
||||
}
|
||||
}
|
||||
|
||||
fn provider_type_label(provider_type: ProviderType) -> String {
|
||||
match provider_type {
|
||||
ProviderType::Local => "Local".to_string(),
|
||||
ProviderType::Cloud => "Cloud".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn provider_status_strings(status: ProviderStatus) -> (&'static str, &'static str) {
|
||||
match status {
|
||||
ProviderStatus::Available => ("OK", "available"),
|
||||
ProviderStatus::Unavailable => ("ERR", "unavailable"),
|
||||
ProviderStatus::RequiresSetup => ("SETUP", "requires setup"),
|
||||
}
|
||||
}
|
||||
|
||||
fn print_status_rows(rows: &[ProviderStatusRow]) {
|
||||
let id_width = rows
|
||||
.iter()
|
||||
.map(|row| row.id.len())
|
||||
.max()
|
||||
.unwrap_or(8)
|
||||
.max("Provider".len());
|
||||
let type_width = rows
|
||||
.iter()
|
||||
.map(|row| row.provider_type.len())
|
||||
.max()
|
||||
.unwrap_or(4)
|
||||
.max("Type".len());
|
||||
let status_width = rows
|
||||
.iter()
|
||||
.map(|row| row.indicator.len() + 1 + row.status_label.len())
|
||||
.max()
|
||||
.unwrap_or(6)
|
||||
.max("State".len());
|
||||
|
||||
println!(
|
||||
"{:<id_width$} {:<4} {:<type_width$} {:<status_width$} Details",
|
||||
"Provider",
|
||||
"Def",
|
||||
"Type",
|
||||
"State",
|
||||
id_width = id_width,
|
||||
type_width = type_width,
|
||||
status_width = status_width,
|
||||
);
|
||||
|
||||
for row in rows {
|
||||
let def = if row.default_provider { "*" } else { "-" };
|
||||
let details = row.detail.as_deref().unwrap_or("-");
|
||||
println!(
|
||||
"{:<id_width$} {:<4} {:<type_width$} {:<status_width$} {}",
|
||||
row.id,
|
||||
def,
|
||||
row.provider_type,
|
||||
format!("{} {}", row.indicator, row.status_label),
|
||||
details,
|
||||
id_width = id_width,
|
||||
type_width = type_width,
|
||||
status_width = status_width,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn print_models(
|
||||
records: Vec<ProviderRecord>,
|
||||
models: Vec<AnnotatedModelInfo>,
|
||||
statuses: HashMap<String, ProviderStatus>,
|
||||
) {
|
||||
let mut grouped: HashMap<String, Vec<AnnotatedModelInfo>> = HashMap::new();
|
||||
for info in models {
|
||||
grouped
|
||||
.entry(info.provider_id.clone())
|
||||
.or_default()
|
||||
.push(info);
|
||||
}
|
||||
|
||||
for record in records {
|
||||
let status = statuses.get(&record.id).copied().or_else(|| {
|
||||
if record.metadata.is_some() && record.registration_error.is_none() && record.enabled {
|
||||
Some(ProviderStatus::Unavailable)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
let (indicator, label, status_value) = if !record.enabled {
|
||||
("-", "disabled", None)
|
||||
} else if record.registration_error.is_some() {
|
||||
("ERR", "error", None)
|
||||
} else if let Some(status) = status {
|
||||
let (indicator, label) = provider_status_strings(status);
|
||||
(indicator, label, Some(status))
|
||||
} else {
|
||||
("?", "unknown", None)
|
||||
};
|
||||
|
||||
let title = if record.default_provider {
|
||||
format!("{} (default)", record.id)
|
||||
} else {
|
||||
record.id.clone()
|
||||
};
|
||||
println!(
|
||||
"{} {} [{}] {}",
|
||||
indicator, title, record.provider_type_label, label
|
||||
);
|
||||
|
||||
if let Some(err) = &record.registration_error {
|
||||
println!(" error: {}", err);
|
||||
println!();
|
||||
continue;
|
||||
}
|
||||
|
||||
if !record.enabled {
|
||||
println!(" provider disabled");
|
||||
println!();
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(entries) = grouped.get(&record.id) {
|
||||
let mut entries = entries.clone();
|
||||
entries.sort_by(|a, b| a.model.name.cmp(&b.model.name));
|
||||
if entries.is_empty() {
|
||||
println!(" (no models reported)");
|
||||
} else {
|
||||
for entry in entries {
|
||||
let mut line = format!(" - {}", entry.model.name);
|
||||
if let Some(description) = &entry.model.description
|
||||
&& !description.trim().is_empty()
|
||||
{
|
||||
line.push_str(&format!(" — {}", description.trim()));
|
||||
}
|
||||
println!("{}", line);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
println!(" (no models reported)");
|
||||
}
|
||||
|
||||
if let Some(ProviderStatus::RequiresSetup) = status_value
|
||||
&& record.requires_auth
|
||||
{
|
||||
println!(" configure provider credentials or API key");
|
||||
}
|
||||
println!();
|
||||
}
|
||||
}
|
||||
|
||||
struct ProviderListRow {
|
||||
id: String,
|
||||
type_label: String,
|
||||
enabled: String,
|
||||
default: String,
|
||||
auth: String,
|
||||
base_url: String,
|
||||
}
|
||||
|
||||
struct ProviderRecord {
|
||||
id: String,
|
||||
enabled: bool,
|
||||
default_provider: bool,
|
||||
provider_type_label: String,
|
||||
requires_auth: bool,
|
||||
registration_error: Option<String>,
|
||||
metadata: Option<owlen_core::provider::ProviderMetadata>,
|
||||
}
|
||||
|
||||
impl ProviderRecord {
|
||||
fn from_config(id: &str, cfg: &ProviderConfig, default_provider: bool) -> Self {
|
||||
Self {
|
||||
id: id.to_string(),
|
||||
enabled: cfg.enabled,
|
||||
default_provider,
|
||||
provider_type_label: describe_provider_type(id, cfg),
|
||||
requires_auth: requires_auth(id, cfg),
|
||||
registration_error: None,
|
||||
metadata: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ProviderStatusRow {
|
||||
id: String,
|
||||
provider_type: String,
|
||||
default_provider: bool,
|
||||
indicator: String,
|
||||
status_label: String,
|
||||
detail: Option<String>,
|
||||
}
|
||||
|
||||
impl ProviderStatusRow {
|
||||
fn from_record(record: ProviderRecord, status: Option<ProviderStatus>) -> Self {
|
||||
if !record.enabled {
|
||||
return Self {
|
||||
id: record.id,
|
||||
provider_type: record.provider_type_label,
|
||||
default_provider: record.default_provider,
|
||||
indicator: "-".to_string(),
|
||||
status_label: "disabled".to_string(),
|
||||
detail: None,
|
||||
};
|
||||
}
|
||||
|
||||
if let Some(err) = record.registration_error {
|
||||
return Self {
|
||||
id: record.id,
|
||||
provider_type: record.provider_type_label,
|
||||
default_provider: record.default_provider,
|
||||
indicator: "ERR".to_string(),
|
||||
status_label: "error".to_string(),
|
||||
detail: Some(err),
|
||||
};
|
||||
}
|
||||
|
||||
if let Some(status) = status {
|
||||
let (indicator, label) = provider_status_strings(status);
|
||||
return Self {
|
||||
id: record.id,
|
||||
provider_type: record.provider_type_label,
|
||||
default_provider: record.default_provider,
|
||||
indicator: indicator.to_string(),
|
||||
status_label: label.to_string(),
|
||||
detail: if matches!(status, ProviderStatus::RequiresSetup) && record.requires_auth {
|
||||
Some("credentials required".to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
Self {
|
||||
id: record.id,
|
||||
provider_type: record.provider_type_label,
|
||||
default_provider: record.default_provider,
|
||||
indicator: "?".to_string(),
|
||||
status_label: "unknown".to_string(),
|
||||
detail: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
8
crates/owlen-cli/src/lib.rs
Normal file
8
crates/owlen-cli/src/lib.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
//! Library portion of the `owlen-cli` crate.
|
||||
//!
|
||||
//! It currently only re‑exports the `agent` module used by the standalone
|
||||
//! `owlen-agent` binary. Additional shared functionality can be added here in
|
||||
//! the future.
|
||||
|
||||
// Re-export agent module from owlen-core
|
||||
pub use owlen_core::agent;
|
||||
@@ -1,124 +1,228 @@
|
||||
#![allow(clippy::collapsible_if)] // TODO: Remove once Rust 2024 let-chains are available
|
||||
|
||||
//! OWLEN CLI - Chat TUI client
|
||||
|
||||
mod bootstrap;
|
||||
mod commands;
|
||||
mod mcp;
|
||||
|
||||
use anyhow::Result;
|
||||
use clap::{Arg, Command};
|
||||
use owlen_core::session::SessionController;
|
||||
use owlen_ollama::OllamaProvider;
|
||||
use owlen_tui::{config, ui, AppState, ChatApp, Event, EventHandler, SessionEvent};
|
||||
use std::io;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crossterm::{
|
||||
event::{DisableBracketedPaste, DisableMouseCapture, EnableBracketedPaste, EnableMouseCapture},
|
||||
execute,
|
||||
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
|
||||
use clap::{Parser, Subcommand};
|
||||
use commands::{
|
||||
cloud::{CloudCommand, run_cloud_command},
|
||||
providers::{ModelsArgs, ProvidersCommand, run_models_command, run_providers_command},
|
||||
};
|
||||
use ratatui::{backend::CrosstermBackend, Terminal};
|
||||
use mcp::{McpCommand, run_mcp_command};
|
||||
use owlen_core::config as core_config;
|
||||
use owlen_core::config::McpMode;
|
||||
use owlen_core::mode::Mode;
|
||||
use owlen_tui::config;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let matches = Command::new("owlen")
|
||||
.about("OWLEN - A chat-focused TUI client for Ollama")
|
||||
.version(env!("CARGO_PKG_VERSION"))
|
||||
.arg(
|
||||
Arg::new("model")
|
||||
.short('m')
|
||||
.long("model")
|
||||
.value_name("MODEL")
|
||||
.help("Preferred model to use for this session"),
|
||||
)
|
||||
.get_matches();
|
||||
/// Owlen - Terminal UI for LLM chat
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(name = "owlen")]
|
||||
#[command(about = "Terminal UI for LLM chat via MCP", long_about = None)]
|
||||
struct Args {
|
||||
/// Start in code mode (enables all tools)
|
||||
#[arg(long, short = 'c')]
|
||||
code: bool,
|
||||
#[command(subcommand)]
|
||||
command: Option<OwlenCommand>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum OwlenCommand {
|
||||
/// Inspect or upgrade configuration files
|
||||
#[command(subcommand)]
|
||||
Config(ConfigCommand),
|
||||
/// Manage Ollama Cloud credentials
|
||||
#[command(subcommand)]
|
||||
Cloud(CloudCommand),
|
||||
/// Manage model providers
|
||||
#[command(subcommand)]
|
||||
Providers(ProvidersCommand),
|
||||
/// List models exposed by configured providers
|
||||
Models(ModelsArgs),
|
||||
/// Manage MCP server registrations
|
||||
#[command(subcommand)]
|
||||
Mcp(McpCommand),
|
||||
/// Show manual steps for updating Owlen to the latest revision
|
||||
Upgrade,
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum ConfigCommand {
|
||||
/// Automatically upgrade legacy configuration values and ensure validity
|
||||
Doctor,
|
||||
/// Print the resolved configuration file path
|
||||
Path,
|
||||
}
|
||||
|
||||
async fn run_command(command: OwlenCommand) -> Result<()> {
|
||||
match command {
|
||||
OwlenCommand::Config(config_cmd) => run_config_command(config_cmd),
|
||||
OwlenCommand::Cloud(cloud_cmd) => run_cloud_command(cloud_cmd).await,
|
||||
OwlenCommand::Providers(provider_cmd) => run_providers_command(provider_cmd).await,
|
||||
OwlenCommand::Models(args) => run_models_command(args).await,
|
||||
OwlenCommand::Mcp(mcp_cmd) => run_mcp_command(mcp_cmd),
|
||||
OwlenCommand::Upgrade => {
|
||||
println!(
|
||||
"To update Owlen from source:\n git pull\n cargo install --path crates/owlen-cli --force"
|
||||
);
|
||||
println!(
|
||||
"If you installed from the AUR, use your package manager (e.g., yay -S owlen-git)."
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn run_config_command(command: ConfigCommand) -> Result<()> {
|
||||
match command {
|
||||
ConfigCommand::Doctor => run_config_doctor(),
|
||||
ConfigCommand::Path => {
|
||||
let path = core_config::default_config_path();
|
||||
println!("{}", path.display());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn run_config_doctor() -> Result<()> {
|
||||
let config_path = core_config::default_config_path();
|
||||
let existed = config_path.exists();
|
||||
let mut config = config::try_load_config().unwrap_or_default();
|
||||
let _ = config.refresh_mcp_servers(None);
|
||||
let mut changes = Vec::new();
|
||||
|
||||
if let Some(model) = matches.get_one::<String>("model") {
|
||||
config.general.default_model = Some(model.clone());
|
||||
if !existed {
|
||||
changes.push("created configuration file from defaults".to_string());
|
||||
}
|
||||
|
||||
// Prepare provider from configuration
|
||||
let provider_cfg = config::ensure_ollama_config(&mut config).clone();
|
||||
let provider = Arc::new(OllamaProvider::from_config(
|
||||
&provider_cfg,
|
||||
Some(&config.general),
|
||||
)?);
|
||||
if config.provider(&config.general.default_provider).is_none() {
|
||||
config.general.default_provider = "ollama_local".to_string();
|
||||
changes.push("default provider missing; reset to 'ollama_local'".to_string());
|
||||
}
|
||||
|
||||
let controller = SessionController::new(provider, config.clone());
|
||||
let (mut app, mut session_rx) = ChatApp::new(controller);
|
||||
app.initialize_models().await?;
|
||||
for key in ["ollama_local", "ollama_cloud", "openai", "anthropic"] {
|
||||
if !config.providers.contains_key(key) {
|
||||
core_config::ensure_provider_config_mut(&mut config, key);
|
||||
changes.push(format!("added default configuration for provider '{key}'"));
|
||||
}
|
||||
}
|
||||
|
||||
// Event infrastructure
|
||||
let cancellation_token = CancellationToken::new();
|
||||
let (event_tx, event_rx) = mpsc::unbounded_channel();
|
||||
let event_handler = EventHandler::new(event_tx, cancellation_token.clone());
|
||||
let event_handle = tokio::spawn(async move { event_handler.run().await });
|
||||
if let Some(entry) = config.providers.get_mut("ollama_local") {
|
||||
if entry.provider_type.trim().is_empty() || entry.provider_type != "ollama" {
|
||||
entry.provider_type = "ollama".to_string();
|
||||
changes.push("normalised providers.ollama_local.provider_type to 'ollama'".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Terminal setup
|
||||
enable_raw_mode()?;
|
||||
let mut stdout = io::stdout();
|
||||
execute!(
|
||||
stdout,
|
||||
EnterAlternateScreen,
|
||||
EnableMouseCapture,
|
||||
EnableBracketedPaste
|
||||
)?;
|
||||
let backend = CrosstermBackend::new(stdout);
|
||||
let mut terminal = Terminal::new(backend)?;
|
||||
let mut ensure_default_enabled = true;
|
||||
|
||||
let result = run_app(&mut terminal, &mut app, event_rx, &mut session_rx).await;
|
||||
if !config.providers.values().any(|cfg| cfg.enabled) {
|
||||
let entry = core_config::ensure_provider_config_mut(&mut config, "ollama_local");
|
||||
if !entry.enabled {
|
||||
entry.enabled = true;
|
||||
changes.push("no providers were enabled; enabled 'ollama_local'".to_string());
|
||||
}
|
||||
if config.general.default_provider != "ollama_local" {
|
||||
config.general.default_provider = "ollama_local".to_string();
|
||||
changes.push(
|
||||
"default provider reset to 'ollama_local' because no providers were enabled"
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
ensure_default_enabled = false;
|
||||
}
|
||||
|
||||
// Shutdown
|
||||
cancellation_token.cancel();
|
||||
event_handle.await?;
|
||||
if ensure_default_enabled {
|
||||
let default_id = config.general.default_provider.clone();
|
||||
if let Some(default_cfg) = config.providers.get(&default_id) {
|
||||
if !default_cfg.enabled {
|
||||
if let Some(new_default) = config
|
||||
.providers
|
||||
.iter()
|
||||
.filter(|(id, cfg)| cfg.enabled && *id != &default_id)
|
||||
.map(|(id, _)| id.clone())
|
||||
.min()
|
||||
{
|
||||
config.general.default_provider = new_default.clone();
|
||||
changes.push(format!(
|
||||
"default provider '{default_id}' was disabled; switched default to '{new_default}'"
|
||||
));
|
||||
} else {
|
||||
let entry =
|
||||
core_config::ensure_provider_config_mut(&mut config, "ollama_local");
|
||||
if !entry.enabled {
|
||||
entry.enabled = true;
|
||||
changes.push(
|
||||
"enabled 'ollama_local' because default provider was disabled"
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
if config.general.default_provider != "ollama_local" {
|
||||
config.general.default_provider = "ollama_local".to_string();
|
||||
changes.push(
|
||||
"default provider reset to 'ollama_local' because previous default was disabled"
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Persist configuration updates (e.g., selected model)
|
||||
config::save_config(app.config())?;
|
||||
match config.mcp.mode {
|
||||
McpMode::Legacy => {
|
||||
config.mcp.mode = McpMode::LocalOnly;
|
||||
config.mcp.warn_on_legacy = true;
|
||||
changes.push("converted [mcp].mode = 'legacy' to 'local_only'".to_string());
|
||||
}
|
||||
McpMode::RemoteOnly if config.effective_mcp_servers().is_empty() => {
|
||||
config.mcp.mode = McpMode::RemotePreferred;
|
||||
config.mcp.allow_fallback = true;
|
||||
changes.push(
|
||||
"downgraded remote-only configuration to remote_preferred because no servers are defined"
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
McpMode::RemotePreferred
|
||||
if !config.mcp.allow_fallback && config.effective_mcp_servers().is_empty() =>
|
||||
{
|
||||
config.mcp.allow_fallback = true;
|
||||
changes.push(
|
||||
"enabled [mcp].allow_fallback because no remote servers are configured".to_string(),
|
||||
);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
disable_raw_mode()?;
|
||||
execute!(
|
||||
terminal.backend_mut(),
|
||||
LeaveAlternateScreen,
|
||||
DisableMouseCapture,
|
||||
DisableBracketedPaste
|
||||
)?;
|
||||
terminal.show_cursor()?;
|
||||
config.validate()?;
|
||||
config::save_config(&config)?;
|
||||
|
||||
if let Err(err) = result {
|
||||
println!("{err:?}");
|
||||
if changes.is_empty() {
|
||||
println!(
|
||||
"Configuration already up to date: {}",
|
||||
config_path.display()
|
||||
);
|
||||
} else {
|
||||
println!("Updated {}:", config_path.display());
|
||||
for change in changes {
|
||||
println!(" - {change}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_app(
|
||||
terminal: &mut Terminal<CrosstermBackend<io::Stdout>>,
|
||||
app: &mut ChatApp,
|
||||
mut event_rx: mpsc::UnboundedReceiver<Event>,
|
||||
session_rx: &mut mpsc::UnboundedReceiver<SessionEvent>,
|
||||
) -> Result<()> {
|
||||
loop {
|
||||
// Advance loading animation frame
|
||||
app.advance_loading_animation();
|
||||
|
||||
terminal.draw(|f| ui::render_chat(f, app))?;
|
||||
|
||||
// Process any pending LLM requests AFTER UI has been drawn
|
||||
app.process_pending_llm_request().await?;
|
||||
|
||||
tokio::select! {
|
||||
Some(event) = event_rx.recv() => {
|
||||
if let AppState::Quit = app.handle_event(event).await? {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
Some(session_event) = session_rx.recv() => {
|
||||
app.handle_session_event(session_event)?;
|
||||
}
|
||||
// Add a timeout to keep the animation going even when there are no events
|
||||
_ = tokio::time::sleep(tokio::time::Duration::from_millis(100)) => {
|
||||
// This will cause the loop to continue and advance the animation
|
||||
}
|
||||
}
|
||||
#[tokio::main(flavor = "multi_thread")]
|
||||
async fn main() -> Result<()> {
|
||||
// Parse command-line arguments
|
||||
let Args { code, command } = Args::parse();
|
||||
if let Some(command) = command {
|
||||
return run_command(command).await;
|
||||
}
|
||||
let initial_mode = if code { Mode::Code } else { Mode::Chat };
|
||||
bootstrap::launch(initial_mode).await
|
||||
}
|
||||
|
||||
259
crates/owlen-cli/src/mcp.rs
Normal file
259
crates/owlen-cli/src/mcp.rs
Normal file
@@ -0,0 +1,259 @@
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
use anyhow::{Result, anyhow};
|
||||
use clap::{Args, Subcommand, ValueEnum};
|
||||
use owlen_core::config::{self as core_config, Config, McpConfigScope, McpServerConfig};
|
||||
use owlen_tui::config as tui_config;
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
pub enum McpCommand {
|
||||
/// Add or update an MCP server in the selected scope
|
||||
Add(AddArgs),
|
||||
/// List MCP servers across scopes
|
||||
List(ListArgs),
|
||||
/// Remove an MCP server from a scope
|
||||
Remove(RemoveArgs),
|
||||
}
|
||||
|
||||
pub fn run_mcp_command(command: McpCommand) -> Result<()> {
|
||||
match command {
|
||||
McpCommand::Add(args) => handle_add(args),
|
||||
McpCommand::List(args) => handle_list(args),
|
||||
McpCommand::Remove(args) => handle_remove(args),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, ValueEnum, Default)]
|
||||
pub enum ScopeArg {
|
||||
User,
|
||||
#[default]
|
||||
Project,
|
||||
Local,
|
||||
}
|
||||
|
||||
impl From<ScopeArg> for McpConfigScope {
|
||||
fn from(value: ScopeArg) -> Self {
|
||||
match value {
|
||||
ScopeArg::User => McpConfigScope::User,
|
||||
ScopeArg::Project => McpConfigScope::Project,
|
||||
ScopeArg::Local => McpConfigScope::Local,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Args)]
|
||||
pub struct AddArgs {
|
||||
/// Logical name used to reference the server
|
||||
pub name: String,
|
||||
/// Command or endpoint invoked for the server
|
||||
pub command: String,
|
||||
/// Transport mechanism (stdio, http, websocket)
|
||||
#[arg(long, default_value = "stdio")]
|
||||
pub transport: String,
|
||||
/// Configuration scope to write the server into
|
||||
#[arg(long, value_enum, default_value_t = ScopeArg::Project)]
|
||||
pub scope: ScopeArg,
|
||||
/// Environment variables (KEY=VALUE) passed to the server process
|
||||
#[arg(long = "env")]
|
||||
pub env: Vec<String>,
|
||||
/// Additional arguments appended when launching the server
|
||||
#[arg(trailing_var_arg = true, value_name = "ARG")]
|
||||
pub args: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Args, Default)]
|
||||
pub struct ListArgs {
|
||||
/// Restrict output to a specific configuration scope
|
||||
#[arg(long, value_enum)]
|
||||
pub scope: Option<ScopeArg>,
|
||||
/// Display only the effective servers (after precedence resolution)
|
||||
#[arg(long)]
|
||||
pub effective_only: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Args)]
|
||||
pub struct RemoveArgs {
|
||||
/// Name of the server to remove
|
||||
pub name: String,
|
||||
/// Optional explicit scope to remove from
|
||||
#[arg(long, value_enum)]
|
||||
pub scope: Option<ScopeArg>,
|
||||
}
|
||||
|
||||
fn handle_add(args: AddArgs) -> Result<()> {
|
||||
let mut config = load_config()?;
|
||||
let scope: McpConfigScope = args.scope.into();
|
||||
let mut env_map = HashMap::new();
|
||||
for pair in &args.env {
|
||||
let (key, value) = pair
|
||||
.split_once('=')
|
||||
.ok_or_else(|| anyhow!("Environment pairs must use KEY=VALUE syntax: '{}'", pair))?;
|
||||
if key.trim().is_empty() {
|
||||
return Err(anyhow!("Environment variable name cannot be empty"));
|
||||
}
|
||||
env_map.insert(key.trim().to_string(), value.to_string());
|
||||
}
|
||||
|
||||
let server = McpServerConfig {
|
||||
name: args.name.clone(),
|
||||
command: args.command.clone(),
|
||||
args: args.args.clone(),
|
||||
transport: args.transport.to_lowercase(),
|
||||
env: env_map,
|
||||
oauth: None,
|
||||
};
|
||||
|
||||
config.add_mcp_server(scope, server.clone(), None)?;
|
||||
if matches!(scope, McpConfigScope::User) {
|
||||
tui_config::save_config(&config)?;
|
||||
}
|
||||
|
||||
if let Some(path) = core_config::mcp_scope_path(scope, None) {
|
||||
println!(
|
||||
"Registered MCP server '{}' in {} scope ({})",
|
||||
server.name,
|
||||
scope,
|
||||
path.display()
|
||||
);
|
||||
} else {
|
||||
println!(
|
||||
"Registered MCP server '{}' in {} scope.",
|
||||
server.name, scope
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_list(args: ListArgs) -> Result<()> {
|
||||
let mut config = load_config()?;
|
||||
config.refresh_mcp_servers(None)?;
|
||||
|
||||
let scoped = config.scoped_mcp_servers();
|
||||
if scoped.is_empty() {
|
||||
println!("No MCP servers configured.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let filter_scope = args.scope.map(|scope| scope.into());
|
||||
let effective = config.effective_mcp_servers();
|
||||
let mut active = HashSet::new();
|
||||
for server in effective {
|
||||
active.insert((
|
||||
server.name.clone(),
|
||||
server.command.clone(),
|
||||
server.transport.to_lowercase(),
|
||||
));
|
||||
}
|
||||
|
||||
println!(
|
||||
"{:<2} {:<8} {:<20} {:<10} Command",
|
||||
"", "Scope", "Name", "Transport"
|
||||
);
|
||||
for entry in scoped {
|
||||
if filter_scope
|
||||
.as_ref()
|
||||
.is_some_and(|target_scope| entry.scope != *target_scope)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let payload = format_command_line(&entry.config.command, &entry.config.args);
|
||||
let key = (
|
||||
entry.config.name.clone(),
|
||||
entry.config.command.clone(),
|
||||
entry.config.transport.to_lowercase(),
|
||||
);
|
||||
let marker = if active.contains(&key) { "*" } else { " " };
|
||||
|
||||
if args.effective_only && marker != "*" {
|
||||
continue;
|
||||
}
|
||||
|
||||
println!(
|
||||
"{} {:<8} {:<20} {:<10} {}",
|
||||
marker, entry.scope, entry.config.name, entry.config.transport, payload
|
||||
);
|
||||
}
|
||||
|
||||
let scoped_resources = config.scoped_mcp_resources();
|
||||
if !scoped_resources.is_empty() {
|
||||
println!();
|
||||
println!("{:<2} {:<8} {:<30} Title", "", "Scope", "Resource");
|
||||
let effective_keys: HashSet<(String, String)> = config
|
||||
.effective_mcp_resources()
|
||||
.iter()
|
||||
.map(|res| (res.server.clone(), res.uri.clone()))
|
||||
.collect();
|
||||
|
||||
for entry in scoped_resources {
|
||||
if filter_scope
|
||||
.as_ref()
|
||||
.is_some_and(|target_scope| entry.scope != *target_scope)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let key = (entry.config.server.clone(), entry.config.uri.clone());
|
||||
let marker = if effective_keys.contains(&key) {
|
||||
"*"
|
||||
} else {
|
||||
" "
|
||||
};
|
||||
if args.effective_only && marker != "*" {
|
||||
continue;
|
||||
}
|
||||
|
||||
let reference = format!("@{}:{}", entry.config.server, entry.config.uri);
|
||||
let title = entry.config.title.as_deref().unwrap_or("—");
|
||||
|
||||
println!("{} {:<8} {:<30} {}", marker, entry.scope, reference, title);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_remove(args: RemoveArgs) -> Result<()> {
|
||||
let mut config = load_config()?;
|
||||
let scope_hint = args.scope.map(|scope| scope.into());
|
||||
let result = config.remove_mcp_server(scope_hint, &args.name, None)?;
|
||||
|
||||
match result {
|
||||
Some(scope) => {
|
||||
if matches!(scope, McpConfigScope::User) {
|
||||
tui_config::save_config(&config)?;
|
||||
}
|
||||
|
||||
if let Some(path) = core_config::mcp_scope_path(scope, None) {
|
||||
println!(
|
||||
"Removed MCP server '{}' from {} scope ({})",
|
||||
args.name,
|
||||
scope,
|
||||
path.display()
|
||||
);
|
||||
} else {
|
||||
println!("Removed MCP server '{}' from {} scope.", args.name, scope);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
println!("No MCP server named '{}' was found.", args.name);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn load_config() -> Result<Config> {
|
||||
let mut config = tui_config::try_load_config().unwrap_or_default();
|
||||
config.refresh_mcp_servers(None)?;
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
fn format_command_line(command: &str, args: &[String]) -> String {
|
||||
if args.is_empty() {
|
||||
command.to_string()
|
||||
} else {
|
||||
format!("{} {}", command, args.join(" "))
|
||||
}
|
||||
}
|
||||
266
crates/owlen-cli/tests/agent_tests.rs
Normal file
266
crates/owlen-cli/tests/agent_tests.rs
Normal file
@@ -0,0 +1,266 @@
|
||||
//! Integration tests for the ReAct agent loop functionality.
|
||||
//!
|
||||
//! These tests verify that the agent executor correctly:
|
||||
//! - Parses ReAct formatted responses
|
||||
//! - Executes tool calls
|
||||
//! - Handles multi-step workflows
|
||||
//! - Recovers from errors
|
||||
//! - Respects iteration limits
|
||||
|
||||
use owlen_cli::agent::{AgentConfig, AgentExecutor, LlmResponse};
|
||||
use owlen_core::mcp::remote_client::RemoteMcpClient;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_react_parsing_tool_call() {
|
||||
let executor = create_test_executor();
|
||||
|
||||
// Test parsing a tool call with JSON arguments
|
||||
let text = "THOUGHT: I should search for information\nACTION: web_search\nACTION_INPUT: {\"query\": \"rust async programming\"}\n";
|
||||
|
||||
let result = executor.parse_response(text);
|
||||
|
||||
match result {
|
||||
Ok(LlmResponse::ToolCall {
|
||||
thought,
|
||||
tool_name,
|
||||
arguments,
|
||||
}) => {
|
||||
assert_eq!(thought, "I should search for information");
|
||||
assert_eq!(tool_name, "web_search");
|
||||
assert_eq!(arguments["query"], "rust async programming");
|
||||
}
|
||||
other => panic!("Expected ToolCall, got: {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_react_parsing_final_answer() {
|
||||
let executor = create_test_executor();
|
||||
|
||||
let text = "THOUGHT: I have enough information now\nFINAL_ANSWER: The answer is 42\n";
|
||||
|
||||
let result = executor.parse_response(text);
|
||||
|
||||
match result {
|
||||
Ok(LlmResponse::FinalAnswer { thought, answer }) => {
|
||||
assert_eq!(thought, "I have enough information now");
|
||||
assert_eq!(answer, "The answer is 42");
|
||||
}
|
||||
other => panic!("Expected FinalAnswer, got: {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_react_parsing_with_multiline_thought() {
|
||||
let executor = create_test_executor();
|
||||
|
||||
let text = "THOUGHT: This is a complex\nmulti-line thought\nACTION: list_files\nACTION_INPUT: {\"path\": \".\"}\n";
|
||||
|
||||
let result = executor.parse_response(text);
|
||||
|
||||
// The regex currently only captures until first newline
|
||||
// This test documents current behavior
|
||||
match result {
|
||||
Ok(LlmResponse::ToolCall { thought, .. }) => {
|
||||
// Regex pattern stops at first \n after THOUGHT:
|
||||
assert!(thought.contains("This is a complex"));
|
||||
}
|
||||
other => panic!("Expected ToolCall, got: {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore] // Requires MCP LLM server to be running
|
||||
async fn test_agent_single_tool_scenario() {
|
||||
// This test requires a running MCP LLM server (which wraps Ollama)
|
||||
let provider = Arc::new(RemoteMcpClient::new().unwrap());
|
||||
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
|
||||
|
||||
let config = AgentConfig {
|
||||
max_iterations: 5,
|
||||
model: "llama3.2".to_string(),
|
||||
temperature: Some(0.7),
|
||||
max_tokens: None,
|
||||
};
|
||||
|
||||
let executor = AgentExecutor::new(provider, mcp_client, config);
|
||||
|
||||
// Simple query that should complete in one tool call
|
||||
let result = executor
|
||||
.run("List files in the current directory".to_string())
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(agent_result) => {
|
||||
assert!(
|
||||
!agent_result.answer.is_empty(),
|
||||
"Answer should not be empty"
|
||||
);
|
||||
println!("Agent answer: {}", agent_result.answer);
|
||||
}
|
||||
Err(e) => {
|
||||
// It's okay if this fails due to LLM not following format
|
||||
println!("Agent test skipped: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore] // Requires Ollama to be running
|
||||
async fn test_agent_multi_step_workflow() {
|
||||
// Test a query that requires multiple tool calls
|
||||
let provider = Arc::new(RemoteMcpClient::new().unwrap());
|
||||
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
|
||||
|
||||
let config = AgentConfig {
|
||||
max_iterations: 10,
|
||||
model: "llama3.2".to_string(),
|
||||
temperature: Some(0.5), // Lower temperature for more consistent behavior
|
||||
max_tokens: None,
|
||||
};
|
||||
|
||||
let executor = AgentExecutor::new(provider, mcp_client, config);
|
||||
|
||||
// Query requiring multiple steps: list -> read -> analyze
|
||||
let result = executor
|
||||
.run("Find all Rust files and tell me which one contains 'Agent'".to_string())
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(agent_result) => {
|
||||
assert!(!agent_result.answer.is_empty());
|
||||
println!("Multi-step answer: {:?}", agent_result);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Multi-step test skipped: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore] // Requires Ollama
|
||||
async fn test_agent_iteration_limit() {
|
||||
let provider = Arc::new(RemoteMcpClient::new().unwrap());
|
||||
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
|
||||
|
||||
let config = AgentConfig {
|
||||
max_iterations: 2, // Very low limit to test enforcement
|
||||
model: "llama3.2".to_string(),
|
||||
temperature: Some(0.7),
|
||||
max_tokens: None,
|
||||
};
|
||||
|
||||
let executor = AgentExecutor::new(provider, mcp_client, config);
|
||||
|
||||
// Complex query that would require many iterations
|
||||
let result = executor
|
||||
.run("Perform an exhaustive analysis of all files".to_string())
|
||||
.await;
|
||||
|
||||
// Should hit the iteration limit (or parse error if LLM doesn't follow format)
|
||||
match result {
|
||||
Err(e) => {
|
||||
let error_str = format!("{}", e);
|
||||
// Accept either iteration limit error or parse error (LLM didn't follow ReAct format)
|
||||
assert!(
|
||||
error_str.contains("Maximum iterations")
|
||||
|| error_str.contains("2")
|
||||
|| error_str.contains("parse"),
|
||||
"Expected iteration limit or parse error, got: {}",
|
||||
error_str
|
||||
);
|
||||
println!("Test passed: agent stopped with error: {}", error_str);
|
||||
}
|
||||
Ok(_) => {
|
||||
// It's possible the LLM completed within 2 iterations
|
||||
println!("Agent completed within iteration limit");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore] // Requires Ollama
|
||||
async fn test_agent_tool_budget_enforcement() {
|
||||
let provider = Arc::new(RemoteMcpClient::new().unwrap());
|
||||
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
|
||||
|
||||
let config = AgentConfig {
|
||||
max_iterations: 3, // Very low iteration limit to enforce budget
|
||||
model: "llama3.2".to_string(),
|
||||
temperature: Some(0.7),
|
||||
max_tokens: None,
|
||||
};
|
||||
|
||||
let executor = AgentExecutor::new(provider, mcp_client, config);
|
||||
|
||||
// Query that would require many tool calls
|
||||
let result = executor
|
||||
.run("Read every file in the project and summarize them all".to_string())
|
||||
.await;
|
||||
|
||||
// Should hit the tool call budget (or parse error if LLM doesn't follow format)
|
||||
match result {
|
||||
Err(e) => {
|
||||
let error_str = format!("{}", e);
|
||||
// Accept either budget error or parse error (LLM didn't follow ReAct format)
|
||||
assert!(
|
||||
error_str.contains("Maximum iterations")
|
||||
|| error_str.contains("budget")
|
||||
|| error_str.contains("parse"),
|
||||
"Expected budget or parse error, got: {}",
|
||||
error_str
|
||||
);
|
||||
println!("Test passed: agent stopped with error: {}", error_str);
|
||||
}
|
||||
Ok(_) => {
|
||||
println!("Agent completed within tool budget");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to create a test executor
|
||||
// For parsing tests, we don't need a real connection
|
||||
fn create_test_executor() -> AgentExecutor {
|
||||
// For parsing tests, we can accept the error from RemoteMcpClient::new()
|
||||
// since we're only testing parse_response which doesn't use the MCP client
|
||||
let provider = match RemoteMcpClient::new() {
|
||||
Ok(client) => Arc::new(client),
|
||||
Err(_) => {
|
||||
// If MCP server binary doesn't exist, parsing tests can still run
|
||||
// by using a dummy client that will never be called
|
||||
// This is a workaround for unit tests that only need parse_response
|
||||
panic!("MCP server binary not found - build the project first with: cargo build --all");
|
||||
}
|
||||
};
|
||||
|
||||
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
|
||||
|
||||
let config = AgentConfig::default();
|
||||
AgentExecutor::new(provider, mcp_client, config)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_agent_config_defaults() {
|
||||
let config = AgentConfig::default();
|
||||
|
||||
assert_eq!(config.max_iterations, 15);
|
||||
assert_eq!(config.model, "llama3.2:latest");
|
||||
assert_eq!(config.temperature, Some(0.7));
|
||||
// max_tool_calls field removed - agent now tracks iterations instead
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_agent_config_custom() {
|
||||
let config = AgentConfig {
|
||||
max_iterations: 15,
|
||||
model: "custom-model".to_string(),
|
||||
temperature: Some(0.5),
|
||||
max_tokens: Some(2000),
|
||||
};
|
||||
|
||||
assert_eq!(config.max_iterations, 15);
|
||||
assert_eq!(config.model, "custom-model");
|
||||
assert_eq!(config.temperature, Some(0.5));
|
||||
assert_eq!(config.max_tokens, Some(2000));
|
||||
}
|
||||
@@ -9,23 +9,45 @@ homepage.workspace = true
|
||||
description = "Core traits and types for OWLEN LLM client"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.75"
|
||||
log = "0.4.20"
|
||||
serde = { version = "1.0.188", features = ["derive"] }
|
||||
serde_json = "1.0.105"
|
||||
thiserror = "1.0.48"
|
||||
tokio = { version = "1.32.0", features = ["full"] }
|
||||
anyhow = { workspace = true }
|
||||
log = { workspace = true }
|
||||
regex = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
unicode-segmentation = "1.11"
|
||||
unicode-width = "0.1"
|
||||
uuid = { version = "1.4.1", features = ["v4", "serde"] }
|
||||
textwrap = "0.16.0"
|
||||
futures = "0.3.28"
|
||||
async-trait = "0.1.73"
|
||||
toml = "0.8.0"
|
||||
shellexpand = "3.1.0"
|
||||
dirs = "5.0"
|
||||
uuid = { workspace = true }
|
||||
textwrap = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
futures-util = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
shellexpand = { workspace = true }
|
||||
dirs = { workspace = true }
|
||||
ratatui = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
jsonschema = { workspace = true }
|
||||
which = { workspace = true }
|
||||
nix = { workspace = true }
|
||||
aes-gcm = { workspace = true }
|
||||
ring = { workspace = true }
|
||||
keyring = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
crossterm = { workspace = true }
|
||||
urlencoding = { workspace = true }
|
||||
rpassword = { workspace = true }
|
||||
sqlx = { workspace = true }
|
||||
duckduckgo = "0.2.0"
|
||||
reqwest = { workspace = true, features = ["default"] }
|
||||
reqwest_011 = { version = "0.11", package = "reqwest" }
|
||||
path-clean = "1.0"
|
||||
tokio-stream = { workspace = true }
|
||||
tokio-tungstenite = "0.21"
|
||||
tungstenite = "0.21"
|
||||
ollama-rs = { version = "0.3", features = ["stream", "headers"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
httpmock = "0.7"
|
||||
|
||||
12
crates/owlen-core/migrations/0001_create_conversations.sql
Normal file
12
crates/owlen-core/migrations/0001_create_conversations.sql
Normal file
@@ -0,0 +1,12 @@
|
||||
CREATE TABLE IF NOT EXISTS conversations (
|
||||
id TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
description TEXT,
|
||||
model TEXT NOT NULL,
|
||||
message_count INTEGER NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
updated_at INTEGER NOT NULL,
|
||||
data TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_conversations_updated_at ON conversations(updated_at DESC);
|
||||
@@ -0,0 +1,7 @@
|
||||
CREATE TABLE IF NOT EXISTS secure_items (
|
||||
key TEXT PRIMARY KEY,
|
||||
nonce BLOB NOT NULL,
|
||||
ciphertext BLOB NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
updated_at INTEGER NOT NULL
|
||||
);
|
||||
421
crates/owlen-core/src/agent.rs
Normal file
421
crates/owlen-core/src/agent.rs
Normal file
@@ -0,0 +1,421 @@
|
||||
//! Agentic execution loop with ReAct pattern support.
|
||||
//!
|
||||
//! This module provides the core agent orchestration logic that allows an LLM
|
||||
//! to reason about tasks, execute tools, and observe results in an iterative loop.
|
||||
|
||||
use crate::Provider;
|
||||
use crate::mcp::{McpClient, McpToolCall, McpToolDescriptor, McpToolResponse};
|
||||
use crate::types::{ChatParameters, ChatRequest, Message};
|
||||
use crate::{Error, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Maximum number of agent iterations before stopping
|
||||
const DEFAULT_MAX_ITERATIONS: usize = 15;
|
||||
|
||||
/// Parsed response from the LLM in ReAct format
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum LlmResponse {
|
||||
/// LLM wants to execute a tool
|
||||
ToolCall {
|
||||
thought: String,
|
||||
tool_name: String,
|
||||
arguments: serde_json::Value,
|
||||
},
|
||||
/// LLM has reached a final answer
|
||||
FinalAnswer { thought: String, answer: String },
|
||||
/// LLM is just reasoning without taking action
|
||||
Reasoning { thought: String },
|
||||
}
|
||||
|
||||
/// Parse error when LLM response doesn't match expected format
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ParseError {
|
||||
#[error("No recognizable pattern found in response")]
|
||||
NoPattern,
|
||||
#[error("Missing required field: {0}")]
|
||||
MissingField(String),
|
||||
#[error("Invalid JSON in ACTION_INPUT: {0}")]
|
||||
InvalidJson(String),
|
||||
}
|
||||
|
||||
/// Result of an agent execution
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AgentResult {
|
||||
/// Final answer from the agent
|
||||
pub answer: String,
|
||||
/// Number of iterations taken
|
||||
pub iterations: usize,
|
||||
/// All messages exchanged during execution
|
||||
pub messages: Vec<Message>,
|
||||
/// Whether the agent completed successfully
|
||||
pub success: bool,
|
||||
}
|
||||
|
||||
/// Configuration for agent execution
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AgentConfig {
|
||||
/// Maximum number of iterations
|
||||
pub max_iterations: usize,
|
||||
/// Model to use for reasoning
|
||||
pub model: String,
|
||||
/// Temperature for LLM sampling
|
||||
pub temperature: Option<f32>,
|
||||
/// Max tokens per LLM call
|
||||
pub max_tokens: Option<u32>,
|
||||
}
|
||||
|
||||
impl Default for AgentConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_iterations: DEFAULT_MAX_ITERATIONS,
|
||||
model: "llama3.2:latest".to_string(),
|
||||
temperature: Some(0.7),
|
||||
max_tokens: Some(4096),
|
||||
}
|
||||
}
|
||||
}
|
||||