feat: add OpenAI-compat adapter for Ollama and llama.cpp

Thin wrapper over OpenAI adapter with custom base URLs.
Ollama: localhost:11434/v1, llama.cpp: localhost:8080/v1.
No API key required for local providers.

Fixed: initial tool call args captured on first chunk
(Ollama sends complete args in one chunk, not as deltas).

Live verified: text + tool calling with qwen3:14b on Ollama.
Five providers now live: Mistral, Anthropic, OpenAI, Google, Ollama.
This commit is contained in:
2026-04-03 13:47:30 +02:00
parent d26b07c509
commit 54ae24d11c
3 changed files with 54 additions and 6 deletions

View File

@@ -72,10 +72,11 @@ func (s *openaiStream) Next() bool {
for _, tc := range delta.ToolCalls {
existing, ok := s.toolCalls[tc.Index]
if !ok {
// New tool call
// New tool call — capture initial arguments too
existing = &toolCallState{
id: tc.ID,
name: tc.Function.Name,
args: tc.Function.Arguments,
}
s.toolCalls[tc.Index] = existing
s.hadToolCalls = true
@@ -90,8 +91,8 @@ func (s *openaiStream) Next() bool {
}
}
// Accumulate arguments
if tc.Function.Arguments != "" {
// Accumulate arguments (subsequent chunks)
if tc.Function.Arguments != "" && ok {
existing.args += tc.Function.Arguments
s.cur = stream.Event{
Type: stream.EventToolCallDelta,

View File

@@ -0,0 +1,41 @@
// Package openaicompat provides OpenAI-compatible provider adapters
// for Ollama, llama.cpp, and other servers that implement the OpenAI API.
package openaicompat
import (
"somegit.dev/Owlibou/gnoma/internal/provider"
oaiprov "somegit.dev/Owlibou/gnoma/internal/provider/openai"
)
const (
ollamaDefaultURL = "http://localhost:11434/v1"
llamacppDefaultURL = "http://localhost:8080/v1"
)
// NewOllama creates a provider for a local Ollama instance.
func NewOllama(cfg provider.ProviderConfig) (provider.Provider, error) {
if cfg.BaseURL == "" {
cfg.BaseURL = ollamaDefaultURL
}
if cfg.APIKey == "" {
cfg.APIKey = "ollama" // Ollama doesn't require a real key
}
if cfg.Model == "" {
cfg.Model = "qwen3:8b"
}
return oaiprov.New(cfg)
}
// NewLlamaCpp creates a provider for a local llama.cpp server.
func NewLlamaCpp(cfg provider.ProviderConfig) (provider.Provider, error) {
if cfg.BaseURL == "" {
cfg.BaseURL = llamacppDefaultURL
}
if cfg.APIKey == "" {
cfg.APIKey = "llamacpp" // llama.cpp doesn't require a real key
}
if cfg.Model == "" {
cfg.Model = "default"
}
return oaiprov.New(cfg)
}